[
  {
    "path": ".gitignore",
    "content": "### Basic ignore file\n\n# Binaries for programs and plugins\nvsphere-influxdb\n\n# Test binary, build with `go test -c`\n*.test\n\n# Output of the go coverage tool, specifically when used with LiteIDE\n*.out\n\n# Configuration file\nvsphere-influxdb.json\n\n# Vim swap files\n*.swp\n"
  },
  {
    "path": ".travis.yml",
    "content": "language: go\nsudo: required\ngo:\n  - 1.9\nenv:\n  - PATH=/home/travis/gopath/bin:$PATH\nbefore_install:\n  - sudo apt-get -qq update\n  - sudo apt-get install -y ruby ruby-dev build-essential rpm \n  - go get -u github.com/golang/dep/cmd/dep\n  - go get -u github.com/alecthomas/gometalinter\ninstall:\n  - dep ensure\nbefore_script:\n  - gometalinter --install\n    #  - gometalinter --vendor ./...\nscript:\n  - git status\nafter_success:\n#  - gem install --no-ri --no-rdoc fpm\n  - test -n \"$TRAVIS_TAG\" && curl -sL https://git.io/goreleaser | bash\n\n"
  },
  {
    "path": "Dockerfile",
    "content": "FROM golang:1.12-alpine3.10 as builder\n\nWORKDIR /go/src/vsphere-influxdb-go\nCOPY . .\nRUN apk --update add --virtual build-deps git \nRUN go get -d -v ./...\nRUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo\n\nFROM alpine:3.10\nRUN apk update \\\n && apk upgrade \\\n && apk add ca-certificates \\\n && addgroup -S spock && adduser -S spock -G spock\nCOPY --from=0 /go/src/vsphere-influxdb-go/vsphere-influxdb-go /vsphere-influxdb-go\n\nUSER spock\n\nCMD [\"/vsphere-influxdb-go\"]\n"
  },
  {
    "path": "Gopkg.toml",
    "content": "\n# Gopkg.toml example\n#\n# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md\n# for detailed Gopkg.toml documentation.\n#\n# required = [\"github.com/user/thing/cmd/thing\"]\n# ignored = [\"github.com/user/project/pkgX\", \"bitbucket.org/user/project/pkgA/pkgY\"]\n#\n# [[constraint]]\n#   name = \"github.com/user/project\"\n#   version = \"1.0.0\"\n#\n# [[constraint]]\n#   name = \"github.com/user/project2\"\n#   branch = \"dev\"\n#   source = \"github.com/myfork/project2\"\n#\n# [[override]]\n#  name = \"github.com/x/y\"\n#  version = \"2.4.0\"\n\n\n[[constraint]]\n  name = \"github.com/davecgh/go-spew\"\n  version = \"1.1.0\"\n\n[[constraint]]\n  name = \"github.com/influxdata/influxdb\"\n  version = \"1.3.6\"\n\n[[constraint]]\n  name = \"github.com/vmware/govmomi\"\n  version = \"0.15.0\"\n\n[[constraint]]\n  branch = \"master\"\n  name = \"golang.org/x/net\"\n"
  },
  {
    "path": "LICENSE.txt",
    "content": "                    GNU GENERAL PUBLIC LICENSE\n                       Version 3, 29 June 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU General Public License is a free, copyleft license for\nsoftware and other kinds of works.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nthe GNU General Public License is intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.  We, the Free Software Foundation, use the\nGNU General Public License for most of our software; it applies also to\nany other work released this way by its authors.  You can apply it to\nyour programs, too.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  To protect your rights, we need to prevent others from denying you\nthese rights or asking you to surrender the rights.  Therefore, you have\ncertain responsibilities if you distribute copies of the software, or if\nyou modify it: responsibilities to respect the freedom of others.\n\n  For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must pass on to the recipients the same\nfreedoms that you received.  You must make sure that they, too, receive\nor can get the source code.  And you must show them these terms so they\nknow their rights.\n\n  Developers that use the GNU GPL protect your rights with two steps:\n(1) assert copyright on the software, and (2) offer you this License\ngiving you legal permission to copy, distribute and/or modify it.\n\n  For the developers' and authors' protection, the GPL clearly explains\nthat there is no warranty for this free software.  For both users' and\nauthors' sake, the GPL requires that modified versions be marked as\nchanged, so that their problems will not be attributed erroneously to\nauthors of previous versions.\n\n  Some devices are designed to deny users access to install or run\nmodified versions of the software inside them, although the manufacturer\ncan do so.  This is fundamentally incompatible with the aim of\nprotecting users' freedom to change the software.  The systematic\npattern of such abuse occurs in the area of products for individuals to\nuse, which is precisely where it is most unacceptable.  Therefore, we\nhave designed this version of the GPL to prohibit the practice for those\nproducts.  If such problems arise substantially in other domains, we\nstand ready to extend this provision to those domains in future versions\nof the GPL, as needed to protect the freedom of users.\n\n  Finally, every program is threatened constantly by software patents.\nStates should not allow patents to restrict development and use of\nsoftware on general-purpose computers, but in those that do, we wish to\navoid the special danger that patents applied to a free program could\nmake it effectively proprietary.  To prevent this, the GPL assures that\npatents cannot be used to render the program non-free.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Use with the GNU Affero General Public License.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU Affero General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the special requirements of the GNU Affero General Public License,\nsection 13, concerning interaction through a network will apply to the\ncombination as such.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU General Public License from time to time.  Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU General Public License for more details.\n\n    You should have received a copy of the GNU General Public License\n    along with this program.  If not, see <http://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If the program does terminal interaction, make it output a short\nnotice like this when it starts in an interactive mode:\n\n    <program>  Copyright (C) <year>  <name of author>\n    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n    This is free software, and you are welcome to redistribute it\n    under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License.  Of course, your program's commands\nmight be different; for a GUI interface, you would use an \"about box\".\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU GPL, see\n<http://www.gnu.org/licenses/>.\n\n  The GNU General Public License does not permit incorporating your program\ninto proprietary programs.  If your program is a subroutine library, you\nmay consider it more useful to permit linking proprietary applications with\nthe library.  If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License.  But first, please read\n<http://www.gnu.org/philosophy/why-not-lgpl.html>.\n"
  },
  {
    "path": "README.md",
    "content": "[![Releases](https://img.shields.io/github/release/Oxalide/vsphere-influxdb-go.svg?style=flat-square)](https://github.com/Oxalide/vsphere-influxdb-go/releases/latest) [![Build Status](https://travis-ci.org/Oxalide/vsphere-influxdb-go.svg?branch=master)](https://travis-ci.org/Oxalide/vsphere-influxdb-go) [![Go Report Card](https://goreportcard.com/badge/Oxalide/vsphere-influxdb-go)](https://goreportcard.com/report/github.com/Oxalide/vsphere-influxdb-go)\n\n# Collect VMware vCenter and ESXi performance metrics and send them to InfluxDB\n\n# Screenshots of Grafana dashboards\n![screenshot](https://grafana.com/api/dashboards/3556/images/2224/image)\n![screenshot](https://grafana.com/api/dashboards/3556/images/2227/image)\n![screenshot](https://grafana.com/api/dashboards/3556/images/2230/image)\n![screenshot](https://grafana.com/api/dashboards/3571/images/2245/image)\n![screenshot](https://grafana.com/api/dashboards/3571/images/2251/image)\n![screenshot](https://grafana.com/api/dashboards/3571/images/2254/image)\n\n# Description and Features\nThis is a tool written in Go that helps you do your own custom tailored monitoring, capacity planning and performance debugging of VMware based infrastructures. It collects all possible metrics from vCenters and ESXi hypervisors about hosts, clusters, resource pools, datastores and virtual machines and sends them to an [InfluxDB database](https://github.com/influxdata/influxdb) (a popular open source time series database project written in Go), which you can then visualise in Grafana (links to sample dashboards [below](#example-dashboards)) or Chronograf, and use Grafana, Kapacitor or custom scripts to do alerting based on your needs, KPIs, capacity plannings/expectations.\n\n# Install \nGrab the [latest release](https://github.com/Oxalide/vsphere-influxdb-go/releases/latest) for your OS (deb, rpm packages, exes, archives for Linux, Darwin, Windows, FreeBSD on amd64, arm6, arm7, arm64 are available) and install it.\n\nFor Debian/Ubuntu on adm64:\n```\ncurl -L -O $(curl -s https://api.github.com/repos/Oxalide/vsphere-influxdb-go/releases | grep browser_download_url | grep '64[.]deb' | head -n 1 | cut -d '\"' -f 4)\ndpkg -i vsphere-influxdb-go*.deb\n```\n\nCentOS/Red Hat on amd64:\n```\ncurl -L -O $(curl -s https://api.github.com/repos/Oxalide/vsphere-influxdb-go/releases | grep browser_download_url | grep '64[.]rpm' | head -n 1 | cut -d '\"' -f 4)\nrpm -i vsphere-influxdb-go*.rpm\n```\n\nThis will install vsphere-influxdb-go in /usr/local/bin/vsphere-influxdb-go and an example configuration file in /etc/vsphere-influxdb-go.json that needs to be edited.\n\n\n# Configure\n\nThe JSON configuration file in /etc/vsphere-influxdb-go.json contains all your vCenters/ESXi to connect to, the InfluxDB connection details(url, username/password, database to use), and the metrics to collect(full list [here](http://www.virten.net/2015/05/vsphere-6-0-performance-counter-description/) ).\n\n**Note: Not all metrics are available directly, you might need to change your metric collection level.**\nA table with the level needed for each metric is availble [here](http://www.virten.net/2015/05/which-performance-counters-are-available-in-each-statistic-level/), and you can find a python script to change the collect level in the [tools folder of the project](./tools/).\n\nAdditionally  you can provide a vCenter/ESXi server and InfluxDB connection details via environment variables, wich is extremly helpful when running inside a container:\n\nFor InfluxDB:\n* INFLUX\\_HOSTNAME\n* INFLUX\\_USERNAME\n* INFLUX\\_PASSWORD\n* INFLUX\\_DATABASE\n\nFor vSphere:\n* VSPHERE\\_HOSTNAME\n* VSPHERE\\_USERNAME\n* VSPHERE\\_PASSWORD \n\nKeep in mind, that currently only one vCenter/ESXi can be added via environment variable.\n\nIf you set a domain, it will be automaticaly removed from the names of the found objects.\n\nMetrics collected are defined by associating ObjectType groups with Metric groups.\n\nThere have been reports of the script not working correctly when the time is incorrect on the vsphere or vcenter. Make sure that the time is valid or activate the NTP service on the machine.\n\n# Run as a service\n\nCreate a crontab to run it every X minutes(one minute is fine - in our case, ~30 vCenters, ~100 ESXi and ~1400 VMs take approximately 25s to collect all metrics - rather impressive, i might add).\n```\n* * * * * /usr/local/bin/vsphere-influxdb-go\n```\n\n# Example dashboards\n* https://grafana.com/dashboards/1299 (thanks to @exbane )\n* https://grafana.com/dashboards/3556 (VMware cloud overview, mostly provisioning/global cloud usage stats)\n* https://grafana.com/dashboards/3571 (VMware performance, mostly VM oriented performance stats)\n\nContributions welcome!\n\n\n# Compile from source\n\n```\n\ngo get github.com/oxalide/vsphere-influxdb-go\n\n```\nThis will install the project in your $GOBIN($GOPATH/bin). If you have appended $GOBIN to your $PATH, you will be able to call it directly. Otherwise, you'll have to call it with its full path.\nExample:\n```\nvsphere-influxdb-go\n```\nor :\n```\n$GOBIN/vsphere-influxdb-go\n```\n\n# TODO before v1.0\n* Add service discovery(or probably something like [Viper](https://github.com/spf13/viper) for easier and more flexible configuration with multiple backends)\n* Daemonize\n* Provide a ready to use Dockerfile\n\n# Contributing\nYou are welcome to contribute!\n\n# License \n\nThe original project, upon which this one is based, is written by cblomart, sends the data to Graphite, and is available [here](https://github.com/cblomart/vsphere-graphite). \n\nThis one is licensed under GPLv3. You can find a copy of the license in [LICENSE.txt](./LICENSE.txt)\n\n\n"
  },
  {
    "path": "goreleaser.yml",
    "content": "project_name: vsphere-influxdb-go\nbuilds:\n  - binary: vsphere-influxdb-go\n    goos:\n      - windows\n      - darwin\n      - linux\n      - freebsd\n    goarch:\n      - amd64\n      - arm\n      - arm64\n    goarm:\n      - 6\n      - 7\n\narchive:\n  format: tar.gz\n  files:\n    - LICENSE.txt\n    - README.md\nnfpm:\n  # Your app's vendor.\n  # Default is empty.\n  vendor: Oxalide\n  # Your app's homepage.\n  homepage: https://github.com/Oxalide/vsphere-influxdb-go\n\n  # Your app's maintainer (probably you).\n  maintainer: Adrian Todorov <ato@oxalide.com>\n\n  # Your app's description.\n  description: Collect VMware vSphere, vCenter and ESXi performance metrics and send them to InfluxDB\n\n  # Your app's license.\n  license: GPL 3.0\n\n  # Formats to be generated.\n  formats:\n    - deb\n    - rpm\n  # Files or directories to add to your package (beyond the binary).\n  # Keys are source paths to get the files from.\n  # Values are the destination locations of the files in the package.\n  files:\n    \"vsphere-influxdb.json.sample\": \"/etc/vsphere-influxdb-go.json\"\n    \n"
  },
  {
    "path": "tools/README.md",
    "content": "# Change vCenter metric collection level\n\n```\ngit clone https://github.com/Oxalide/vsphere-influxdb-go.git\npip install -r tools/requirements.txt\n./tools/change_metric_collection_level.py\n```\n"
  },
  {
    "path": "tools/change_metric_collection_level.py",
    "content": "#!/usr/bin/python\n#============================================\n# Script: change_metric_collection_level.py \n# Description: Change the metric collection level of an interval in a vCenter\n# Copyright 2017 Adrian Todorov, Oxalide ato@oxalide.com\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n# GNU General Public License for more details.\n# You should have received a copy of the GNU General Public License\n# along with this program.  If not, see <http://www.gnu.org/licenses/>.\n#\n#============================================\n\nfrom pyVim.connect import SmartConnect, Disconnect\nfrom pyVmomi import vim\nimport atexit\nimport sys\nimport requests\nimport argparse\nimport getpass\nimport linecache\n\nrequests.packages.urllib3.disable_warnings()\n\ndef PrintException():\n\t exc_type, exc_obj, tb = sys.exc_info()\n\t f = tb.tb_frame\n\t lineno = tb.tb_lineno\n\t filename = f.f_code.co_filename\n\t linecache.checkcache(filename)\n\t line = linecache.getline(filename, lineno, f.f_globals)\n\t print 'EXCEPTION IN ({}, LINE {} \"{}\"): {}'.format(filename, lineno, line.strip(), exc_obj)\n\n\ndef get_args():\n    parser = argparse.ArgumentParser(description='Arguments for talking to vCenter and modifying a PerfManager collection interval')\n\n    parser.add_argument('-s', '--host', required=True,action='store',help='vSpehre service to connect to')\n    parser.add_argument('-o', '--port', type=int, default=443, action='store', help='Port to connect on')\n    parser.add_argument('-u', '--user', required=True, action='store', help='User name to use')\n    parser.add_argument('-p', '--password', required=False, action='store', help='Password to use')\n    parser.add_argument('--interval-name', required=False, action='store', dest='intervalName', help='The name of the interval to modify')\n    parser.add_argument('--interval-key', required=False, action='store', dest='intervalKey', help='The key of the interval to modify')\n    parser.add_argument('--interval-level', type=int, required=True, default=4, action='store', dest='intervalLevel', help='The collection level wanted for the interval')\n\n    args = parser.parse_args()\n\n    if not args.password:\n        args.password = getpass.getpass(prompt='Enter password:\\n')\n\tif not args.intervalName and not args.intervalKey:\n\t\tprint \"An interval name or key is needed\"\n\t\texit(2)\n\t\n\treturn args\n\ndef change_level(host, user, pwd, port, level, key, name):\n\ttry:\n\t\tprint user\n\t\tprint pwd\n\t\tprint host\n\t\tserviceInstance = SmartConnect(host=host,user=user,pwd=pwd,port=port)\n\t\tatexit.register(Disconnect, serviceInstance)\n\t\tcontent = serviceInstance.RetrieveContent()\n\t\tpm  = content.perfManager\n\n\t\tfor hi in pm.historicalInterval:\n\t\t\tif (key and int(hi.key) == int(key)) or (name and str(hi.name) == str(name)):\n\t\t\t\tprint \"Changing interval '\"  + str(hi.name) + \"'\"\n\t\t\t\tnewobj = hi\n\t\t\t\tnewobj.level = level\n\t\t\t\tpm.UpdatePerfInterval(newobj)\n\n\t\tprint \"Intervals are now configured as follows: \"\n\t\tprint \"Name | Level\"\n\t\tpm2  = content.perfManager\n\t\tfor hi2 in  pm2.historicalInterval:\n\t\t\tprint hi2.name + \" | \" + str(hi2.level)\n\n\texcept Exception, e:\n\t\tprint \"Error: %s \" % (e)\n\t\tPrintException()\n\t\texit(2)\n\n\nif __name__ == \"__main__\":\n\targs = get_args()\n\tchange_level(args.host, args.user, args.password, args.port, args.intervalLevel, args.intervalKey, args.intervalName)\n\n\n"
  },
  {
    "path": "tools/requirements.txt",
    "content": "pyVmomi\nrequests\nargparse\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/.gitignore",
    "content": "# Compiled Object files, Static and Dynamic libs (Shared Objects)\n*.o\n*.a\n*.so\n\n# Folders\n_obj\n_test\n\n# Architecture specific extensions/prefixes\n*.[568vq]\n[568vq].out\n\n*.cgo1.go\n*.cgo2.c\n_cgo_defun.c\n_cgo_gotypes.go\n_cgo_export.*\n\n_testmain.go\n\n*.exe\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/.travis.yml",
    "content": "language: go\ngo:\n    - 1.5.4\n    - 1.6.3\n    - 1.7\ninstall:\n    - go get -v golang.org/x/tools/cmd/cover\nscript:\n    - go test -v -tags=safe ./spew\n    - go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov\nafter_success:\n    - go get -v github.com/mattn/goveralls\n    - export PATH=$PATH:$HOME/gopath/bin\n    - goveralls -coverprofile=profile.cov -service=travis-ci\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/LICENSE",
    "content": "ISC License\n\nCopyright (c) 2012-2016 Dave Collins <dave@davec.name>\n\nPermission to use, copy, modify, and distribute this software for any\npurpose with or without fee is hereby granted, provided that the above\ncopyright notice and this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\nWITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\nANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\nOR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/README.md",
    "content": "go-spew\n=======\n\n[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)]\n(https://travis-ci.org/davecgh/go-spew) [![ISC License]\n(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status]\n(https://img.shields.io/coveralls/davecgh/go-spew.svg)]\n(https://coveralls.io/r/davecgh/go-spew?branch=master)\n\n\nGo-spew implements a deep pretty printer for Go data structures to aid in\ndebugging.  A comprehensive suite of tests with 100% test coverage is provided\nto ensure proper functionality.  See `test_coverage.txt` for the gocov coverage\nreport.  Go-spew is licensed under the liberal ISC license, so it may be used in\nopen source or commercial projects.\n\nIf you're interested in reading about how this package came to life and some\nof the challenges involved in providing a deep pretty printer, there is a blog\npost about it\n[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/).\n\n## Documentation\n\n[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)]\n(http://godoc.org/github.com/davecgh/go-spew/spew)\n\nFull `go doc` style documentation for the project can be viewed online without\ninstalling this package by using the excellent GoDoc site here:\nhttp://godoc.org/github.com/davecgh/go-spew/spew\n\nYou can also view the documentation locally once the package is installed with\nthe `godoc` tool by running `godoc -http=\":6060\"` and pointing your browser to\nhttp://localhost:6060/pkg/github.com/davecgh/go-spew/spew\n\n## Installation\n\n```bash\n$ go get -u github.com/davecgh/go-spew/spew\n```\n\n## Quick Start\n\nAdd this import line to the file you're working in:\n\n```Go\nimport \"github.com/davecgh/go-spew/spew\"\n```\n\nTo dump a variable with full newlines, indentation, type, and pointer\ninformation use Dump, Fdump, or Sdump:\n\n```Go\nspew.Dump(myVar1, myVar2, ...)\nspew.Fdump(someWriter, myVar1, myVar2, ...)\nstr := spew.Sdump(myVar1, myVar2, ...)\n```\n\nAlternatively, if you would prefer to use format strings with a compacted inline\nprinting style, use the convenience wrappers Printf, Fprintf, etc with %v (most\ncompact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types\nand pointer addresses): \n\n```Go\nspew.Printf(\"myVar1: %v -- myVar2: %+v\", myVar1, myVar2)\nspew.Printf(\"myVar3: %#v -- myVar4: %#+v\", myVar3, myVar4)\nspew.Fprintf(someWriter, \"myVar1: %v -- myVar2: %+v\", myVar1, myVar2)\nspew.Fprintf(someWriter, \"myVar3: %#v -- myVar4: %#+v\", myVar3, myVar4)\n```\n\n## Debugging a Web Application Example\n\nHere is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production.\n\n```Go\npackage main\n\nimport (\n    \"fmt\"\n    \"html\"\n    \"net/http\"\n\n    \"github.com/davecgh/go-spew/spew\"\n)\n\nfunc handler(w http.ResponseWriter, r *http.Request) {\n    w.Header().Set(\"Content-Type\", \"text/html\")\n    fmt.Fprintf(w, \"Hi there, %s!\", r.URL.Path[1:])\n    fmt.Fprintf(w, \"<!--\\n\" + html.EscapeString(spew.Sdump(w)) + \"\\n-->\")\n}\n\nfunc main() {\n    http.HandleFunc(\"/\", handler)\n    http.ListenAndServe(\":8080\", nil)\n}\n```\n\n## Sample Dump Output\n\n```\n(main.Foo) {\n unexportedField: (*main.Bar)(0xf84002e210)({\n  flag: (main.Flag) flagTwo,\n  data: (uintptr) <nil>\n }),\n ExportedField: (map[interface {}]interface {}) {\n  (string) \"one\": (bool) true\n }\n}\n([]uint8) {\n 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |\n 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!\"#$%&'()*+,-./0|\n 00000020  31 32                                             |12|\n}\n```\n\n## Sample Formatter Output\n\nDouble pointer to a uint8:\n```\n\t  %v: <**>5\n\t %+v: <**>(0xf8400420d0->0xf8400420c8)5\n\t %#v: (**uint8)5\n\t%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5\n```\n\nPointer to circular struct with a uint8 field and a pointer to itself:\n```\n\t  %v: <*>{1 <*><shown>}\n\t %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}\n\t %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}\n\t%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}\n```\n\n## Configuration Options\n\nConfiguration of spew is handled by fields in the ConfigState type. For\nconvenience, all of the top-level functions use a global state available via the\nspew.Config global.\n\nIt is also possible to create a ConfigState instance that provides methods\nequivalent to the top-level functions. This allows concurrent configuration\noptions. See the ConfigState documentation for more details.\n\n```\n* Indent\n\tString to use for each indentation level for Dump functions.\n\tIt is a single space by default.  A popular alternative is \"\\t\".\n\n* MaxDepth\n\tMaximum number of levels to descend into nested data structures.\n\tThere is no limit by default.\n\n* DisableMethods\n\tDisables invocation of error and Stringer interface methods.\n\tMethod invocation is enabled by default.\n\n* DisablePointerMethods\n\tDisables invocation of error and Stringer interface methods on types\n\twhich only accept pointer receivers from non-pointer variables.  This option\n\trelies on access to the unsafe package, so it will not have any effect when\n\trunning in environments without access to the unsafe package such as Google\n\tApp Engine or with the \"safe\" build tag specified.\n\tPointer method invocation is enabled by default.\n\n* DisablePointerAddresses\n\tDisablePointerAddresses specifies whether to disable the printing of\n\tpointer addresses. This is useful when diffing data structures in tests.\n\n* DisableCapacities\n\tDisableCapacities specifies whether to disable the printing of capacities\n\tfor arrays, slices, maps and channels. This is useful when diffing data\n\tstructures in tests.\n\n* ContinueOnMethod\n\tEnables recursion into types after invoking error and Stringer interface\n\tmethods. Recursion after method invocation is disabled by default.\n\n* SortKeys\n\tSpecifies map keys should be sorted before being printed. Use\n\tthis to have a more deterministic, diffable output.  Note that\n\tonly native types (bool, int, uint, floats, uintptr and string)\n\tand types which implement error or Stringer interfaces are supported,\n\twith other types sorted according to the reflect.Value.String() output\n\twhich guarantees display stability.  Natural map order is used by\n\tdefault.\n\n* SpewKeys\n\tSpewKeys specifies that, as a last resort attempt, map keys should be\n\tspewed to strings and sorted by those strings.  This is only considered\n\tif SortKeys is true.\n\n```\n\n## Unsafe Package Dependency\n\nThis package relies on the unsafe package to perform some of the more advanced\nfeatures, however it also supports a \"limited\" mode which allows it to work in\nenvironments where the unsafe package is not available.  By default, it will\noperate in this mode on Google App Engine and when compiled with GopherJS.  The\n\"safe\" build tag may also be specified to force the package to build without\nusing the unsafe package.\n\n## License\n\nGo-spew is licensed under the [copyfree](http://copyfree.org) ISC License.\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/cov_report.sh",
    "content": "#!/bin/sh\n\n# This script uses gocov to generate a test coverage report.\n# The gocov tool my be obtained with the following command:\n#   go get github.com/axw/gocov/gocov\n#\n# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.\n\n# Check for gocov.\nif ! type gocov >/dev/null 2>&1; then\n\techo >&2 \"This script requires the gocov tool.\"\n\techo >&2 \"You may obtain it with the following command:\"\n\techo >&2 \"go get github.com/axw/gocov/gocov\"\n\texit 1\nfi\n\n# Only run the cgo tests if gcc is installed.\nif type gcc >/dev/null 2>&1; then\n\t(cd spew && gocov test -tags testcgo | gocov report)\nelse\n\t(cd spew && gocov test | gocov report)\nfi\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/bypass.go",
    "content": "// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>\n//\n// Permission to use, copy, modify, and distribute this software for any\n// purpose with or without fee is hereby granted, provided that the above\n// copyright notice and this permission notice appear in all copies.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n// NOTE: Due to the following build constraints, this file will only be compiled\n// when the code is not running on Google App Engine, compiled by GopherJS, and\n// \"-tags safe\" is not added to the go build command line.  The \"disableunsafe\"\n// tag is deprecated and thus should not be used.\n// +build !js,!appengine,!safe,!disableunsafe\n\npackage spew\n\nimport (\n\t\"reflect\"\n\t\"unsafe\"\n)\n\nconst (\n\t// UnsafeDisabled is a build-time constant which specifies whether or\n\t// not access to the unsafe package is available.\n\tUnsafeDisabled = false\n\n\t// ptrSize is the size of a pointer on the current arch.\n\tptrSize = unsafe.Sizeof((*byte)(nil))\n)\n\nvar (\n\t// offsetPtr, offsetScalar, and offsetFlag are the offsets for the\n\t// internal reflect.Value fields.  These values are valid before golang\n\t// commit ecccf07e7f9d which changed the format.  The are also valid\n\t// after commit 82f48826c6c7 which changed the format again to mirror\n\t// the original format.  Code in the init function updates these offsets\n\t// as necessary.\n\toffsetPtr    = uintptr(ptrSize)\n\toffsetScalar = uintptr(0)\n\toffsetFlag   = uintptr(ptrSize * 2)\n\n\t// flagKindWidth and flagKindShift indicate various bits that the\n\t// reflect package uses internally to track kind information.\n\t//\n\t// flagRO indicates whether or not the value field of a reflect.Value is\n\t// read-only.\n\t//\n\t// flagIndir indicates whether the value field of a reflect.Value is\n\t// the actual data or a pointer to the data.\n\t//\n\t// These values are valid before golang commit 90a7c3c86944 which\n\t// changed their positions.  Code in the init function updates these\n\t// flags as necessary.\n\tflagKindWidth = uintptr(5)\n\tflagKindShift = uintptr(flagKindWidth - 1)\n\tflagRO        = uintptr(1 << 0)\n\tflagIndir     = uintptr(1 << 1)\n)\n\nfunc init() {\n\t// Older versions of reflect.Value stored small integers directly in the\n\t// ptr field (which is named val in the older versions).  Versions\n\t// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named\n\t// scalar for this purpose which unfortunately came before the flag\n\t// field, so the offset of the flag field is different for those\n\t// versions.\n\t//\n\t// This code constructs a new reflect.Value from a known small integer\n\t// and checks if the size of the reflect.Value struct indicates it has\n\t// the scalar field. When it does, the offsets are updated accordingly.\n\tvv := reflect.ValueOf(0xf00)\n\tif unsafe.Sizeof(vv) == (ptrSize * 4) {\n\t\toffsetScalar = ptrSize * 2\n\t\toffsetFlag = ptrSize * 3\n\t}\n\n\t// Commit 90a7c3c86944 changed the flag positions such that the low\n\t// order bits are the kind.  This code extracts the kind from the flags\n\t// field and ensures it's the correct type.  When it's not, the flag\n\t// order has been changed to the newer format, so the flags are updated\n\t// accordingly.\n\tupf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)\n\tupfv := *(*uintptr)(upf)\n\tflagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)\n\tif (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {\n\t\tflagKindShift = 0\n\t\tflagRO = 1 << 5\n\t\tflagIndir = 1 << 6\n\n\t\t// Commit adf9b30e5594 modified the flags to separate the\n\t\t// flagRO flag into two bits which specifies whether or not the\n\t\t// field is embedded.  This causes flagIndir to move over a bit\n\t\t// and means that flagRO is the combination of either of the\n\t\t// original flagRO bit and the new bit.\n\t\t//\n\t\t// This code detects the change by extracting what used to be\n\t\t// the indirect bit to ensure it's set.  When it's not, the flag\n\t\t// order has been changed to the newer format, so the flags are\n\t\t// updated accordingly.\n\t\tif upfv&flagIndir == 0 {\n\t\t\tflagRO = 3 << 5\n\t\t\tflagIndir = 1 << 7\n\t\t}\n\t}\n}\n\n// unsafeReflectValue converts the passed reflect.Value into a one that bypasses\n// the typical safety restrictions preventing access to unaddressable and\n// unexported data.  It works by digging the raw pointer to the underlying\n// value out of the protected value and generating a new unprotected (unsafe)\n// reflect.Value to it.\n//\n// This allows us to check for implementations of the Stringer and error\n// interfaces to be used for pretty printing ordinarily unaddressable and\n// inaccessible values such as unexported struct fields.\nfunc unsafeReflectValue(v reflect.Value) (rv reflect.Value) {\n\tindirects := 1\n\tvt := v.Type()\n\tupv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)\n\trvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))\n\tif rvf&flagIndir != 0 {\n\t\tvt = reflect.PtrTo(v.Type())\n\t\tindirects++\n\t} else if offsetScalar != 0 {\n\t\t// The value is in the scalar field when it's not one of the\n\t\t// reference types.\n\t\tswitch vt.Kind() {\n\t\tcase reflect.Uintptr:\n\t\tcase reflect.Chan:\n\t\tcase reflect.Func:\n\t\tcase reflect.Map:\n\t\tcase reflect.Ptr:\n\t\tcase reflect.UnsafePointer:\n\t\tdefault:\n\t\t\tupv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +\n\t\t\t\toffsetScalar)\n\t\t}\n\t}\n\n\tpv := reflect.NewAt(vt, upv)\n\trv = pv\n\tfor i := 0; i < indirects; i++ {\n\t\trv = rv.Elem()\n\t}\n\treturn rv\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/bypasssafe.go",
    "content": "// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>\n//\n// Permission to use, copy, modify, and distribute this software for any\n// purpose with or without fee is hereby granted, provided that the above\n// copyright notice and this permission notice appear in all copies.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n// NOTE: Due to the following build constraints, this file will only be compiled\n// when the code is running on Google App Engine, compiled by GopherJS, or\n// \"-tags safe\" is added to the go build command line.  The \"disableunsafe\"\n// tag is deprecated and thus should not be used.\n// +build js appengine safe disableunsafe\n\npackage spew\n\nimport \"reflect\"\n\nconst (\n\t// UnsafeDisabled is a build-time constant which specifies whether or\n\t// not access to the unsafe package is available.\n\tUnsafeDisabled = true\n)\n\n// unsafeReflectValue typically converts the passed reflect.Value into a one\n// that bypasses the typical safety restrictions preventing access to\n// unaddressable and unexported data.  However, doing this relies on access to\n// the unsafe package.  This is a stub version which simply returns the passed\n// reflect.Value when the unsafe package is not available.\nfunc unsafeReflectValue(v reflect.Value) reflect.Value {\n\treturn v\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/common.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\npackage spew\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strconv\"\n)\n\n// Some constants in the form of bytes to avoid string overhead.  This mirrors\n// the technique used in the fmt package.\nvar (\n\tpanicBytes            = []byte(\"(PANIC=\")\n\tplusBytes             = []byte(\"+\")\n\tiBytes                = []byte(\"i\")\n\ttrueBytes             = []byte(\"true\")\n\tfalseBytes            = []byte(\"false\")\n\tinterfaceBytes        = []byte(\"(interface {})\")\n\tcommaNewlineBytes     = []byte(\",\\n\")\n\tnewlineBytes          = []byte(\"\\n\")\n\topenBraceBytes        = []byte(\"{\")\n\topenBraceNewlineBytes = []byte(\"{\\n\")\n\tcloseBraceBytes       = []byte(\"}\")\n\tasteriskBytes         = []byte(\"*\")\n\tcolonBytes            = []byte(\":\")\n\tcolonSpaceBytes       = []byte(\": \")\n\topenParenBytes        = []byte(\"(\")\n\tcloseParenBytes       = []byte(\")\")\n\tspaceBytes            = []byte(\" \")\n\tpointerChainBytes     = []byte(\"->\")\n\tnilAngleBytes         = []byte(\"<nil>\")\n\tmaxNewlineBytes       = []byte(\"<max depth reached>\\n\")\n\tmaxShortBytes         = []byte(\"<max>\")\n\tcircularBytes         = []byte(\"<already shown>\")\n\tcircularShortBytes    = []byte(\"<shown>\")\n\tinvalidAngleBytes     = []byte(\"<invalid>\")\n\topenBracketBytes      = []byte(\"[\")\n\tcloseBracketBytes     = []byte(\"]\")\n\tpercentBytes          = []byte(\"%\")\n\tprecisionBytes        = []byte(\".\")\n\topenAngleBytes        = []byte(\"<\")\n\tcloseAngleBytes       = []byte(\">\")\n\topenMapBytes          = []byte(\"map[\")\n\tcloseMapBytes         = []byte(\"]\")\n\tlenEqualsBytes        = []byte(\"len=\")\n\tcapEqualsBytes        = []byte(\"cap=\")\n)\n\n// hexDigits is used to map a decimal value to a hex digit.\nvar hexDigits = \"0123456789abcdef\"\n\n// catchPanic handles any panics that might occur during the handleMethods\n// calls.\nfunc catchPanic(w io.Writer, v reflect.Value) {\n\tif err := recover(); err != nil {\n\t\tw.Write(panicBytes)\n\t\tfmt.Fprintf(w, \"%v\", err)\n\t\tw.Write(closeParenBytes)\n\t}\n}\n\n// handleMethods attempts to call the Error and String methods on the underlying\n// type the passed reflect.Value represents and outputes the result to Writer w.\n//\n// It handles panics in any called methods by catching and displaying the error\n// as the formatted value.\nfunc handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {\n\t// We need an interface to check if the type implements the error or\n\t// Stringer interface.  However, the reflect package won't give us an\n\t// interface on certain things like unexported struct fields in order\n\t// to enforce visibility rules.  We use unsafe, when it's available,\n\t// to bypass these restrictions since this package does not mutate the\n\t// values.\n\tif !v.CanInterface() {\n\t\tif UnsafeDisabled {\n\t\t\treturn false\n\t\t}\n\n\t\tv = unsafeReflectValue(v)\n\t}\n\n\t// Choose whether or not to do error and Stringer interface lookups against\n\t// the base type or a pointer to the base type depending on settings.\n\t// Technically calling one of these methods with a pointer receiver can\n\t// mutate the value, however, types which choose to satisify an error or\n\t// Stringer interface with a pointer receiver should not be mutating their\n\t// state inside these interface methods.\n\tif !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {\n\t\tv = unsafeReflectValue(v)\n\t}\n\tif v.CanAddr() {\n\t\tv = v.Addr()\n\t}\n\n\t// Is it an error or Stringer?\n\tswitch iface := v.Interface().(type) {\n\tcase error:\n\t\tdefer catchPanic(w, v)\n\t\tif cs.ContinueOnMethod {\n\t\t\tw.Write(openParenBytes)\n\t\t\tw.Write([]byte(iface.Error()))\n\t\t\tw.Write(closeParenBytes)\n\t\t\tw.Write(spaceBytes)\n\t\t\treturn false\n\t\t}\n\n\t\tw.Write([]byte(iface.Error()))\n\t\treturn true\n\n\tcase fmt.Stringer:\n\t\tdefer catchPanic(w, v)\n\t\tif cs.ContinueOnMethod {\n\t\t\tw.Write(openParenBytes)\n\t\t\tw.Write([]byte(iface.String()))\n\t\t\tw.Write(closeParenBytes)\n\t\t\tw.Write(spaceBytes)\n\t\t\treturn false\n\t\t}\n\t\tw.Write([]byte(iface.String()))\n\t\treturn true\n\t}\n\treturn false\n}\n\n// printBool outputs a boolean value as true or false to Writer w.\nfunc printBool(w io.Writer, val bool) {\n\tif val {\n\t\tw.Write(trueBytes)\n\t} else {\n\t\tw.Write(falseBytes)\n\t}\n}\n\n// printInt outputs a signed integer value to Writer w.\nfunc printInt(w io.Writer, val int64, base int) {\n\tw.Write([]byte(strconv.FormatInt(val, base)))\n}\n\n// printUint outputs an unsigned integer value to Writer w.\nfunc printUint(w io.Writer, val uint64, base int) {\n\tw.Write([]byte(strconv.FormatUint(val, base)))\n}\n\n// printFloat outputs a floating point value using the specified precision,\n// which is expected to be 32 or 64bit, to Writer w.\nfunc printFloat(w io.Writer, val float64, precision int) {\n\tw.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))\n}\n\n// printComplex outputs a complex value using the specified float precision\n// for the real and imaginary parts to Writer w.\nfunc printComplex(w io.Writer, c complex128, floatPrecision int) {\n\tr := real(c)\n\tw.Write(openParenBytes)\n\tw.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))\n\ti := imag(c)\n\tif i >= 0 {\n\t\tw.Write(plusBytes)\n\t}\n\tw.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))\n\tw.Write(iBytes)\n\tw.Write(closeParenBytes)\n}\n\n// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'\n// prefix to Writer w.\nfunc printHexPtr(w io.Writer, p uintptr) {\n\t// Null pointer.\n\tnum := uint64(p)\n\tif num == 0 {\n\t\tw.Write(nilAngleBytes)\n\t\treturn\n\t}\n\n\t// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix\n\tbuf := make([]byte, 18)\n\n\t// It's simpler to construct the hex string right to left.\n\tbase := uint64(16)\n\ti := len(buf) - 1\n\tfor num >= base {\n\t\tbuf[i] = hexDigits[num%base]\n\t\tnum /= base\n\t\ti--\n\t}\n\tbuf[i] = hexDigits[num]\n\n\t// Add '0x' prefix.\n\ti--\n\tbuf[i] = 'x'\n\ti--\n\tbuf[i] = '0'\n\n\t// Strip unused leading bytes.\n\tbuf = buf[i:]\n\tw.Write(buf)\n}\n\n// valuesSorter implements sort.Interface to allow a slice of reflect.Value\n// elements to be sorted.\ntype valuesSorter struct {\n\tvalues  []reflect.Value\n\tstrings []string // either nil or same len and values\n\tcs      *ConfigState\n}\n\n// newValuesSorter initializes a valuesSorter instance, which holds a set of\n// surrogate keys on which the data should be sorted.  It uses flags in\n// ConfigState to decide if and how to populate those surrogate keys.\nfunc newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {\n\tvs := &valuesSorter{values: values, cs: cs}\n\tif canSortSimply(vs.values[0].Kind()) {\n\t\treturn vs\n\t}\n\tif !cs.DisableMethods {\n\t\tvs.strings = make([]string, len(values))\n\t\tfor i := range vs.values {\n\t\t\tb := bytes.Buffer{}\n\t\t\tif !handleMethods(cs, &b, vs.values[i]) {\n\t\t\t\tvs.strings = nil\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tvs.strings[i] = b.String()\n\t\t}\n\t}\n\tif vs.strings == nil && cs.SpewKeys {\n\t\tvs.strings = make([]string, len(values))\n\t\tfor i := range vs.values {\n\t\t\tvs.strings[i] = Sprintf(\"%#v\", vs.values[i].Interface())\n\t\t}\n\t}\n\treturn vs\n}\n\n// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted\n// directly, or whether it should be considered for sorting by surrogate keys\n// (if the ConfigState allows it).\nfunc canSortSimply(kind reflect.Kind) bool {\n\t// This switch parallels valueSortLess, except for the default case.\n\tswitch kind {\n\tcase reflect.Bool:\n\t\treturn true\n\tcase reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:\n\t\treturn true\n\tcase reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:\n\t\treturn true\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn true\n\tcase reflect.String:\n\t\treturn true\n\tcase reflect.Uintptr:\n\t\treturn true\n\tcase reflect.Array:\n\t\treturn true\n\t}\n\treturn false\n}\n\n// Len returns the number of values in the slice.  It is part of the\n// sort.Interface implementation.\nfunc (s *valuesSorter) Len() int {\n\treturn len(s.values)\n}\n\n// Swap swaps the values at the passed indices.  It is part of the\n// sort.Interface implementation.\nfunc (s *valuesSorter) Swap(i, j int) {\n\ts.values[i], s.values[j] = s.values[j], s.values[i]\n\tif s.strings != nil {\n\t\ts.strings[i], s.strings[j] = s.strings[j], s.strings[i]\n\t}\n}\n\n// valueSortLess returns whether the first value should sort before the second\n// value.  It is used by valueSorter.Less as part of the sort.Interface\n// implementation.\nfunc valueSortLess(a, b reflect.Value) bool {\n\tswitch a.Kind() {\n\tcase reflect.Bool:\n\t\treturn !a.Bool() && b.Bool()\n\tcase reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:\n\t\treturn a.Int() < b.Int()\n\tcase reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:\n\t\treturn a.Uint() < b.Uint()\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn a.Float() < b.Float()\n\tcase reflect.String:\n\t\treturn a.String() < b.String()\n\tcase reflect.Uintptr:\n\t\treturn a.Uint() < b.Uint()\n\tcase reflect.Array:\n\t\t// Compare the contents of both arrays.\n\t\tl := a.Len()\n\t\tfor i := 0; i < l; i++ {\n\t\t\tav := a.Index(i)\n\t\t\tbv := b.Index(i)\n\t\t\tif av.Interface() == bv.Interface() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn valueSortLess(av, bv)\n\t\t}\n\t}\n\treturn a.String() < b.String()\n}\n\n// Less returns whether the value at index i should sort before the\n// value at index j.  It is part of the sort.Interface implementation.\nfunc (s *valuesSorter) Less(i, j int) bool {\n\tif s.strings == nil {\n\t\treturn valueSortLess(s.values[i], s.values[j])\n\t}\n\treturn s.strings[i] < s.strings[j]\n}\n\n// sortValues is a sort function that handles both native types and any type that\n// can be converted to error or Stringer.  Other inputs are sorted according to\n// their Value.String() value to ensure display stability.\nfunc sortValues(values []reflect.Value, cs *ConfigState) {\n\tif len(values) == 0 {\n\t\treturn\n\t}\n\tsort.Sort(newValuesSorter(values, cs))\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/common_test.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\npackage spew_test\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n)\n\n// custom type to test Stinger interface on non-pointer receiver.\ntype stringer string\n\n// String implements the Stringer interface for testing invocation of custom\n// stringers on types with non-pointer receivers.\nfunc (s stringer) String() string {\n\treturn \"stringer \" + string(s)\n}\n\n// custom type to test Stinger interface on pointer receiver.\ntype pstringer string\n\n// String implements the Stringer interface for testing invocation of custom\n// stringers on types with only pointer receivers.\nfunc (s *pstringer) String() string {\n\treturn \"stringer \" + string(*s)\n}\n\n// xref1 and xref2 are cross referencing structs for testing circular reference\n// detection.\ntype xref1 struct {\n\tps2 *xref2\n}\ntype xref2 struct {\n\tps1 *xref1\n}\n\n// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular\n// reference for testing detection.\ntype indirCir1 struct {\n\tps2 *indirCir2\n}\ntype indirCir2 struct {\n\tps3 *indirCir3\n}\ntype indirCir3 struct {\n\tps1 *indirCir1\n}\n\n// embed is used to test embedded structures.\ntype embed struct {\n\ta string\n}\n\n// embedwrap is used to test embedded structures.\ntype embedwrap struct {\n\t*embed\n\te *embed\n}\n\n// panicer is used to intentionally cause a panic for testing spew properly\n// handles them\ntype panicer int\n\nfunc (p panicer) String() string {\n\tpanic(\"test panic\")\n}\n\n// customError is used to test custom error interface invocation.\ntype customError int\n\nfunc (e customError) Error() string {\n\treturn fmt.Sprintf(\"error: %d\", int(e))\n}\n\n// stringizeWants converts a slice of wanted test output into a format suitable\n// for a test error message.\nfunc stringizeWants(wants []string) string {\n\ts := \"\"\n\tfor i, want := range wants {\n\t\tif i > 0 {\n\t\t\ts += fmt.Sprintf(\"want%d: %s\", i+1, want)\n\t\t} else {\n\t\t\ts += \"want: \" + want\n\t\t}\n\t}\n\treturn s\n}\n\n// testFailed returns whether or not a test failed by checking if the result\n// of the test is in the slice of wanted strings.\nfunc testFailed(result string, wants []string) bool {\n\tfor _, want := range wants {\n\t\tif result == want {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\ntype sortableStruct struct {\n\tx int\n}\n\nfunc (ss sortableStruct) String() string {\n\treturn fmt.Sprintf(\"ss.%d\", ss.x)\n}\n\ntype unsortableStruct struct {\n\tx int\n}\n\ntype sortTestCase struct {\n\tinput    []reflect.Value\n\texpected []reflect.Value\n}\n\nfunc helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) {\n\tgetInterfaces := func(values []reflect.Value) []interface{} {\n\t\tinterfaces := []interface{}{}\n\t\tfor _, v := range values {\n\t\t\tinterfaces = append(interfaces, v.Interface())\n\t\t}\n\t\treturn interfaces\n\t}\n\n\tfor _, test := range tests {\n\t\tspew.SortValues(test.input, cs)\n\t\t// reflect.DeepEqual cannot really make sense of reflect.Value,\n\t\t// probably because of all the pointer tricks. For instance,\n\t\t// v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{}\n\t\t// instead.\n\t\tinput := getInterfaces(test.input)\n\t\texpected := getInterfaces(test.expected)\n\t\tif !reflect.DeepEqual(input, expected) {\n\t\t\tt.Errorf(\"Sort mismatch:\\n %v != %v\", input, expected)\n\t\t}\n\t}\n}\n\n// TestSortValues ensures the sort functionality for relect.Value based sorting\n// works as intended.\nfunc TestSortValues(t *testing.T) {\n\tv := reflect.ValueOf\n\n\ta := v(\"a\")\n\tb := v(\"b\")\n\tc := v(\"c\")\n\tembedA := v(embed{\"a\"})\n\tembedB := v(embed{\"b\"})\n\tembedC := v(embed{\"c\"})\n\ttests := []sortTestCase{\n\t\t// No values.\n\t\t{\n\t\t\t[]reflect.Value{},\n\t\t\t[]reflect.Value{},\n\t\t},\n\t\t// Bools.\n\t\t{\n\t\t\t[]reflect.Value{v(false), v(true), v(false)},\n\t\t\t[]reflect.Value{v(false), v(false), v(true)},\n\t\t},\n\t\t// Ints.\n\t\t{\n\t\t\t[]reflect.Value{v(2), v(1), v(3)},\n\t\t\t[]reflect.Value{v(1), v(2), v(3)},\n\t\t},\n\t\t// Uints.\n\t\t{\n\t\t\t[]reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))},\n\t\t\t[]reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))},\n\t\t},\n\t\t// Floats.\n\t\t{\n\t\t\t[]reflect.Value{v(2.0), v(1.0), v(3.0)},\n\t\t\t[]reflect.Value{v(1.0), v(2.0), v(3.0)},\n\t\t},\n\t\t// Strings.\n\t\t{\n\t\t\t[]reflect.Value{b, a, c},\n\t\t\t[]reflect.Value{a, b, c},\n\t\t},\n\t\t// Array\n\t\t{\n\t\t\t[]reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})},\n\t\t\t[]reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})},\n\t\t},\n\t\t// Uintptrs.\n\t\t{\n\t\t\t[]reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))},\n\t\t\t[]reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))},\n\t\t},\n\t\t// SortableStructs.\n\t\t{\n\t\t\t// Note: not sorted - DisableMethods is set.\n\t\t\t[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},\n\t\t\t[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},\n\t\t},\n\t\t// UnsortableStructs.\n\t\t{\n\t\t\t// Note: not sorted - SpewKeys is false.\n\t\t\t[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},\n\t\t\t[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},\n\t\t},\n\t\t// Invalid.\n\t\t{\n\t\t\t[]reflect.Value{embedB, embedA, embedC},\n\t\t\t[]reflect.Value{embedB, embedA, embedC},\n\t\t},\n\t}\n\tcs := spew.ConfigState{DisableMethods: true, SpewKeys: false}\n\thelpTestSortValues(tests, &cs, t)\n}\n\n// TestSortValuesWithMethods ensures the sort functionality for relect.Value\n// based sorting works as intended when using string methods.\nfunc TestSortValuesWithMethods(t *testing.T) {\n\tv := reflect.ValueOf\n\n\ta := v(\"a\")\n\tb := v(\"b\")\n\tc := v(\"c\")\n\ttests := []sortTestCase{\n\t\t// Ints.\n\t\t{\n\t\t\t[]reflect.Value{v(2), v(1), v(3)},\n\t\t\t[]reflect.Value{v(1), v(2), v(3)},\n\t\t},\n\t\t// Strings.\n\t\t{\n\t\t\t[]reflect.Value{b, a, c},\n\t\t\t[]reflect.Value{a, b, c},\n\t\t},\n\t\t// SortableStructs.\n\t\t{\n\t\t\t[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},\n\t\t\t[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},\n\t\t},\n\t\t// UnsortableStructs.\n\t\t{\n\t\t\t// Note: not sorted - SpewKeys is false.\n\t\t\t[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},\n\t\t\t[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},\n\t\t},\n\t}\n\tcs := spew.ConfigState{DisableMethods: false, SpewKeys: false}\n\thelpTestSortValues(tests, &cs, t)\n}\n\n// TestSortValuesWithSpew ensures the sort functionality for relect.Value\n// based sorting works as intended when using spew to stringify keys.\nfunc TestSortValuesWithSpew(t *testing.T) {\n\tv := reflect.ValueOf\n\n\ta := v(\"a\")\n\tb := v(\"b\")\n\tc := v(\"c\")\n\ttests := []sortTestCase{\n\t\t// Ints.\n\t\t{\n\t\t\t[]reflect.Value{v(2), v(1), v(3)},\n\t\t\t[]reflect.Value{v(1), v(2), v(3)},\n\t\t},\n\t\t// Strings.\n\t\t{\n\t\t\t[]reflect.Value{b, a, c},\n\t\t\t[]reflect.Value{a, b, c},\n\t\t},\n\t\t// SortableStructs.\n\t\t{\n\t\t\t[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},\n\t\t\t[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},\n\t\t},\n\t\t// UnsortableStructs.\n\t\t{\n\t\t\t[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},\n\t\t\t[]reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})},\n\t\t},\n\t}\n\tcs := spew.ConfigState{DisableMethods: true, SpewKeys: true}\n\thelpTestSortValues(tests, &cs, t)\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/config.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\npackage spew\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\n// ConfigState houses the configuration options used by spew to format and\n// display values.  There is a global instance, Config, that is used to control\n// all top-level Formatter and Dump functionality.  Each ConfigState instance\n// provides methods equivalent to the top-level functions.\n//\n// The zero value for ConfigState provides no indentation.  You would typically\n// want to set it to a space or a tab.\n//\n// Alternatively, you can use NewDefaultConfig to get a ConfigState instance\n// with default settings.  See the documentation of NewDefaultConfig for default\n// values.\ntype ConfigState struct {\n\t// Indent specifies the string to use for each indentation level.  The\n\t// global config instance that all top-level functions use set this to a\n\t// single space by default.  If you would like more indentation, you might\n\t// set this to a tab with \"\\t\" or perhaps two spaces with \"  \".\n\tIndent string\n\n\t// MaxDepth controls the maximum number of levels to descend into nested\n\t// data structures.  The default, 0, means there is no limit.\n\t//\n\t// NOTE: Circular data structures are properly detected, so it is not\n\t// necessary to set this value unless you specifically want to limit deeply\n\t// nested data structures.\n\tMaxDepth int\n\n\t// DisableMethods specifies whether or not error and Stringer interfaces are\n\t// invoked for types that implement them.\n\tDisableMethods bool\n\n\t// DisablePointerMethods specifies whether or not to check for and invoke\n\t// error and Stringer interfaces on types which only accept a pointer\n\t// receiver when the current type is not a pointer.\n\t//\n\t// NOTE: This might be an unsafe action since calling one of these methods\n\t// with a pointer receiver could technically mutate the value, however,\n\t// in practice, types which choose to satisify an error or Stringer\n\t// interface with a pointer receiver should not be mutating their state\n\t// inside these interface methods.  As a result, this option relies on\n\t// access to the unsafe package, so it will not have any effect when\n\t// running in environments without access to the unsafe package such as\n\t// Google App Engine or with the \"safe\" build tag specified.\n\tDisablePointerMethods bool\n\n\t// DisablePointerAddresses specifies whether to disable the printing of\n\t// pointer addresses. This is useful when diffing data structures in tests.\n\tDisablePointerAddresses bool\n\n\t// DisableCapacities specifies whether to disable the printing of capacities\n\t// for arrays, slices, maps and channels. This is useful when diffing\n\t// data structures in tests.\n\tDisableCapacities bool\n\n\t// ContinueOnMethod specifies whether or not recursion should continue once\n\t// a custom error or Stringer interface is invoked.  The default, false,\n\t// means it will print the results of invoking the custom error or Stringer\n\t// interface and return immediately instead of continuing to recurse into\n\t// the internals of the data type.\n\t//\n\t// NOTE: This flag does not have any effect if method invocation is disabled\n\t// via the DisableMethods or DisablePointerMethods options.\n\tContinueOnMethod bool\n\n\t// SortKeys specifies map keys should be sorted before being printed. Use\n\t// this to have a more deterministic, diffable output.  Note that only\n\t// native types (bool, int, uint, floats, uintptr and string) and types\n\t// that support the error or Stringer interfaces (if methods are\n\t// enabled) are supported, with other types sorted according to the\n\t// reflect.Value.String() output which guarantees display stability.\n\tSortKeys bool\n\n\t// SpewKeys specifies that, as a last resort attempt, map keys should\n\t// be spewed to strings and sorted by those strings.  This is only\n\t// considered if SortKeys is true.\n\tSpewKeys bool\n}\n\n// Config is the active configuration of the top-level functions.\n// The configuration can be changed by modifying the contents of spew.Config.\nvar Config = ConfigState{Indent: \" \"}\n\n// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were\n// passed with a Formatter interface returned by c.NewFormatter.  It returns\n// the formatted string as a value that satisfies error.  See NewFormatter\n// for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {\n\treturn fmt.Errorf(format, c.convertArgs(a)...)\n}\n\n// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were\n// passed with a Formatter interface returned by c.NewFormatter.  It returns\n// the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {\n\treturn fmt.Fprint(w, c.convertArgs(a)...)\n}\n\n// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were\n// passed with a Formatter interface returned by c.NewFormatter.  It returns\n// the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {\n\treturn fmt.Fprintf(w, format, c.convertArgs(a)...)\n}\n\n// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it\n// passed with a Formatter interface returned by c.NewFormatter.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {\n\treturn fmt.Fprintln(w, c.convertArgs(a)...)\n}\n\n// Print is a wrapper for fmt.Print that treats each argument as if it were\n// passed with a Formatter interface returned by c.NewFormatter.  It returns\n// the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Print(c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Print(a ...interface{}) (n int, err error) {\n\treturn fmt.Print(c.convertArgs(a)...)\n}\n\n// Printf is a wrapper for fmt.Printf that treats each argument as if it were\n// passed with a Formatter interface returned by c.NewFormatter.  It returns\n// the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {\n\treturn fmt.Printf(format, c.convertArgs(a)...)\n}\n\n// Println is a wrapper for fmt.Println that treats each argument as if it were\n// passed with a Formatter interface returned by c.NewFormatter.  It returns\n// the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Println(c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Println(a ...interface{}) (n int, err error) {\n\treturn fmt.Println(c.convertArgs(a)...)\n}\n\n// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were\n// passed with a Formatter interface returned by c.NewFormatter.  It returns\n// the resulting string.  See NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Sprint(a ...interface{}) string {\n\treturn fmt.Sprint(c.convertArgs(a)...)\n}\n\n// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were\n// passed with a Formatter interface returned by c.NewFormatter.  It returns\n// the resulting string.  See NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Sprintf(format string, a ...interface{}) string {\n\treturn fmt.Sprintf(format, c.convertArgs(a)...)\n}\n\n// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it\n// were passed with a Formatter interface returned by c.NewFormatter.  It\n// returns the resulting string.  See NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Sprintln(a ...interface{}) string {\n\treturn fmt.Sprintln(c.convertArgs(a)...)\n}\n\n/*\nNewFormatter returns a custom formatter that satisfies the fmt.Formatter\ninterface.  As a result, it integrates cleanly with standard fmt package\nprinting functions.  The formatter is useful for inline printing of smaller data\ntypes similar to the standard %v format specifier.\n\nThe custom formatter only responds to the %v (most compact), %+v (adds pointer\naddresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb\ncombinations.  Any other verbs such as %x and %q will be sent to the the\nstandard fmt package for formatting.  In addition, the custom formatter ignores\nthe width and precision arguments (however they will still work on the format\nspecifiers not handled by the custom formatter).\n\nTypically this function shouldn't be called directly.  It is much easier to make\nuse of the custom formatter by calling one of the convenience functions such as\nc.Printf, c.Println, or c.Printf.\n*/\nfunc (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {\n\treturn newFormatter(c, v)\n}\n\n// Fdump formats and displays the passed arguments to io.Writer w.  It formats\n// exactly the same as Dump.\nfunc (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {\n\tfdump(c, w, a...)\n}\n\n/*\nDump displays the passed parameters to standard out with newlines, customizable\nindentation, and additional debug information such as complete types and all\npointer addresses used to indirect to the final value.  It provides the\nfollowing features over the built-in printing facilities provided by the fmt\npackage:\n\n\t* Pointers are dereferenced and followed\n\t* Circular data structures are detected and handled properly\n\t* Custom Stringer/error interfaces are optionally invoked, including\n\t  on unexported types\n\t* Custom types which only implement the Stringer/error interfaces via\n\t  a pointer receiver are optionally invoked when passing non-pointer\n\t  variables\n\t* Byte arrays and slices are dumped like the hexdump -C command which\n\t  includes offsets, byte values in hex, and ASCII output\n\nThe configuration options are controlled by modifying the public members\nof c.  See ConfigState for options documentation.\n\nSee Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to\nget the formatted result as a string.\n*/\nfunc (c *ConfigState) Dump(a ...interface{}) {\n\tfdump(c, os.Stdout, a...)\n}\n\n// Sdump returns a string with the passed arguments formatted exactly the same\n// as Dump.\nfunc (c *ConfigState) Sdump(a ...interface{}) string {\n\tvar buf bytes.Buffer\n\tfdump(c, &buf, a...)\n\treturn buf.String()\n}\n\n// convertArgs accepts a slice of arguments and returns a slice of the same\n// length with each argument converted to a spew Formatter interface using\n// the ConfigState associated with s.\nfunc (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {\n\tformatters = make([]interface{}, len(args))\n\tfor index, arg := range args {\n\t\tformatters[index] = newFormatter(c, arg)\n\t}\n\treturn formatters\n}\n\n// NewDefaultConfig returns a ConfigState with the following default settings.\n//\n// \tIndent: \" \"\n// \tMaxDepth: 0\n// \tDisableMethods: false\n// \tDisablePointerMethods: false\n// \tContinueOnMethod: false\n// \tSortKeys: false\nfunc NewDefaultConfig() *ConfigState {\n\treturn &ConfigState{Indent: \" \"}\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/doc.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\n/*\nPackage spew implements a deep pretty printer for Go data structures to aid in\ndebugging.\n\nA quick overview of the additional features spew provides over the built-in\nprinting facilities for Go data types are as follows:\n\n\t* Pointers are dereferenced and followed\n\t* Circular data structures are detected and handled properly\n\t* Custom Stringer/error interfaces are optionally invoked, including\n\t  on unexported types\n\t* Custom types which only implement the Stringer/error interfaces via\n\t  a pointer receiver are optionally invoked when passing non-pointer\n\t  variables\n\t* Byte arrays and slices are dumped like the hexdump -C command which\n\t  includes offsets, byte values in hex, and ASCII output (only when using\n\t  Dump style)\n\nThere are two different approaches spew allows for dumping Go data structures:\n\n\t* Dump style which prints with newlines, customizable indentation,\n\t  and additional debug information such as types and all pointer addresses\n\t  used to indirect to the final value\n\t* A custom Formatter interface that integrates cleanly with the standard fmt\n\t  package and replaces %v, %+v, %#v, and %#+v to provide inline printing\n\t  similar to the default %v while providing the additional functionality\n\t  outlined above and passing unsupported format verbs such as %x and %q\n\t  along to fmt\n\nQuick Start\n\nThis section demonstrates how to quickly get started with spew.  See the\nsections below for further details on formatting and configuration options.\n\nTo dump a variable with full newlines, indentation, type, and pointer\ninformation use Dump, Fdump, or Sdump:\n\tspew.Dump(myVar1, myVar2, ...)\n\tspew.Fdump(someWriter, myVar1, myVar2, ...)\n\tstr := spew.Sdump(myVar1, myVar2, ...)\n\nAlternatively, if you would prefer to use format strings with a compacted inline\nprinting style, use the convenience wrappers Printf, Fprintf, etc with\n%v (most compact), %+v (adds pointer addresses), %#v (adds types), or\n%#+v (adds types and pointer addresses):\n\tspew.Printf(\"myVar1: %v -- myVar2: %+v\", myVar1, myVar2)\n\tspew.Printf(\"myVar3: %#v -- myVar4: %#+v\", myVar3, myVar4)\n\tspew.Fprintf(someWriter, \"myVar1: %v -- myVar2: %+v\", myVar1, myVar2)\n\tspew.Fprintf(someWriter, \"myVar3: %#v -- myVar4: %#+v\", myVar3, myVar4)\n\nConfiguration Options\n\nConfiguration of spew is handled by fields in the ConfigState type.  For\nconvenience, all of the top-level functions use a global state available\nvia the spew.Config global.\n\nIt is also possible to create a ConfigState instance that provides methods\nequivalent to the top-level functions.  This allows concurrent configuration\noptions.  See the ConfigState documentation for more details.\n\nThe following configuration options are available:\n\t* Indent\n\t\tString to use for each indentation level for Dump functions.\n\t\tIt is a single space by default.  A popular alternative is \"\\t\".\n\n\t* MaxDepth\n\t\tMaximum number of levels to descend into nested data structures.\n\t\tThere is no limit by default.\n\n\t* DisableMethods\n\t\tDisables invocation of error and Stringer interface methods.\n\t\tMethod invocation is enabled by default.\n\n\t* DisablePointerMethods\n\t\tDisables invocation of error and Stringer interface methods on types\n\t\twhich only accept pointer receivers from non-pointer variables.\n\t\tPointer method invocation is enabled by default.\n\n\t* DisablePointerAddresses\n\t\tDisablePointerAddresses specifies whether to disable the printing of\n\t\tpointer addresses. This is useful when diffing data structures in tests.\n\n\t* DisableCapacities\n\t\tDisableCapacities specifies whether to disable the printing of\n\t\tcapacities for arrays, slices, maps and channels. This is useful when\n\t\tdiffing data structures in tests.\n\n\t* ContinueOnMethod\n\t\tEnables recursion into types after invoking error and Stringer interface\n\t\tmethods. Recursion after method invocation is disabled by default.\n\n\t* SortKeys\n\t\tSpecifies map keys should be sorted before being printed. Use\n\t\tthis to have a more deterministic, diffable output.  Note that\n\t\tonly native types (bool, int, uint, floats, uintptr and string)\n\t\tand types which implement error or Stringer interfaces are\n\t\tsupported with other types sorted according to the\n\t\treflect.Value.String() output which guarantees display\n\t\tstability.  Natural map order is used by default.\n\n\t* SpewKeys\n\t\tSpecifies that, as a last resort attempt, map keys should be\n\t\tspewed to strings and sorted by those strings.  This is only\n\t\tconsidered if SortKeys is true.\n\nDump Usage\n\nSimply call spew.Dump with a list of variables you want to dump:\n\n\tspew.Dump(myVar1, myVar2, ...)\n\nYou may also call spew.Fdump if you would prefer to output to an arbitrary\nio.Writer.  For example, to dump to standard error:\n\n\tspew.Fdump(os.Stderr, myVar1, myVar2, ...)\n\nA third option is to call spew.Sdump to get the formatted output as a string:\n\n\tstr := spew.Sdump(myVar1, myVar2, ...)\n\nSample Dump Output\n\nSee the Dump example for details on the setup of the types and variables being\nshown here.\n\n\t(main.Foo) {\n\t unexportedField: (*main.Bar)(0xf84002e210)({\n\t  flag: (main.Flag) flagTwo,\n\t  data: (uintptr) <nil>\n\t }),\n\t ExportedField: (map[interface {}]interface {}) (len=1) {\n\t  (string) (len=3) \"one\": (bool) true\n\t }\n\t}\n\nByte (and uint8) arrays and slices are displayed uniquely like the hexdump -C\ncommand as shown.\n\t([]uint8) (len=32 cap=32) {\n\t 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |\n\t 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!\"#$%&'()*+,-./0|\n\t 00000020  31 32                                             |12|\n\t}\n\nCustom Formatter\n\nSpew provides a custom formatter that implements the fmt.Formatter interface\nso that it integrates cleanly with standard fmt package printing functions. The\nformatter is useful for inline printing of smaller data types similar to the\nstandard %v format specifier.\n\nThe custom formatter only responds to the %v (most compact), %+v (adds pointer\naddresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb\ncombinations.  Any other verbs such as %x and %q will be sent to the the\nstandard fmt package for formatting.  In addition, the custom formatter ignores\nthe width and precision arguments (however they will still work on the format\nspecifiers not handled by the custom formatter).\n\nCustom Formatter Usage\n\nThe simplest way to make use of the spew custom formatter is to call one of the\nconvenience functions such as spew.Printf, spew.Println, or spew.Printf.  The\nfunctions have syntax you are most likely already familiar with:\n\n\tspew.Printf(\"myVar1: %v -- myVar2: %+v\", myVar1, myVar2)\n\tspew.Printf(\"myVar3: %#v -- myVar4: %#+v\", myVar3, myVar4)\n\tspew.Println(myVar, myVar2)\n\tspew.Fprintf(os.Stderr, \"myVar1: %v -- myVar2: %+v\", myVar1, myVar2)\n\tspew.Fprintf(os.Stderr, \"myVar3: %#v -- myVar4: %#+v\", myVar3, myVar4)\n\nSee the Index for the full list convenience functions.\n\nSample Formatter Output\n\nDouble pointer to a uint8:\n\t  %v: <**>5\n\t %+v: <**>(0xf8400420d0->0xf8400420c8)5\n\t %#v: (**uint8)5\n\t%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5\n\nPointer to circular struct with a uint8 field and a pointer to itself:\n\t  %v: <*>{1 <*><shown>}\n\t %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}\n\t %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}\n\t%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}\n\nSee the Printf example for details on the setup of variables being shown\nhere.\n\nErrors\n\nSince it is possible for custom Stringer/error interfaces to panic, spew\ndetects them and handles them internally by printing the panic information\ninline with the output.  Since spew is intended to provide deep pretty printing\ncapabilities on structures, it intentionally does not return any errors.\n*/\npackage spew\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/dump.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\npackage spew\n\nimport (\n\t\"bytes\"\n\t\"encoding/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\t// uint8Type is a reflect.Type representing a uint8.  It is used to\n\t// convert cgo types to uint8 slices for hexdumping.\n\tuint8Type = reflect.TypeOf(uint8(0))\n\n\t// cCharRE is a regular expression that matches a cgo char.\n\t// It is used to detect character arrays to hexdump them.\n\tcCharRE = regexp.MustCompile(\"^.*\\\\._Ctype_char$\")\n\n\t// cUnsignedCharRE is a regular expression that matches a cgo unsigned\n\t// char.  It is used to detect unsigned character arrays to hexdump\n\t// them.\n\tcUnsignedCharRE = regexp.MustCompile(\"^.*\\\\._Ctype_unsignedchar$\")\n\n\t// cUint8tCharRE is a regular expression that matches a cgo uint8_t.\n\t// It is used to detect uint8_t arrays to hexdump them.\n\tcUint8tCharRE = regexp.MustCompile(\"^.*\\\\._Ctype_uint8_t$\")\n)\n\n// dumpState contains information about the state of a dump operation.\ntype dumpState struct {\n\tw                io.Writer\n\tdepth            int\n\tpointers         map[uintptr]int\n\tignoreNextType   bool\n\tignoreNextIndent bool\n\tcs               *ConfigState\n}\n\n// indent performs indentation according to the depth level and cs.Indent\n// option.\nfunc (d *dumpState) indent() {\n\tif d.ignoreNextIndent {\n\t\td.ignoreNextIndent = false\n\t\treturn\n\t}\n\td.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))\n}\n\n// unpackValue returns values inside of non-nil interfaces when possible.\n// This is useful for data types like structs, arrays, slices, and maps which\n// can contain varying types packed inside an interface.\nfunc (d *dumpState) unpackValue(v reflect.Value) reflect.Value {\n\tif v.Kind() == reflect.Interface && !v.IsNil() {\n\t\tv = v.Elem()\n\t}\n\treturn v\n}\n\n// dumpPtr handles formatting of pointers by indirecting them as necessary.\nfunc (d *dumpState) dumpPtr(v reflect.Value) {\n\t// Remove pointers at or below the current depth from map used to detect\n\t// circular refs.\n\tfor k, depth := range d.pointers {\n\t\tif depth >= d.depth {\n\t\t\tdelete(d.pointers, k)\n\t\t}\n\t}\n\n\t// Keep list of all dereferenced pointers to show later.\n\tpointerChain := make([]uintptr, 0)\n\n\t// Figure out how many levels of indirection there are by dereferencing\n\t// pointers and unpacking interfaces down the chain while detecting circular\n\t// references.\n\tnilFound := false\n\tcycleFound := false\n\tindirects := 0\n\tve := v\n\tfor ve.Kind() == reflect.Ptr {\n\t\tif ve.IsNil() {\n\t\t\tnilFound = true\n\t\t\tbreak\n\t\t}\n\t\tindirects++\n\t\taddr := ve.Pointer()\n\t\tpointerChain = append(pointerChain, addr)\n\t\tif pd, ok := d.pointers[addr]; ok && pd < d.depth {\n\t\t\tcycleFound = true\n\t\t\tindirects--\n\t\t\tbreak\n\t\t}\n\t\td.pointers[addr] = d.depth\n\n\t\tve = ve.Elem()\n\t\tif ve.Kind() == reflect.Interface {\n\t\t\tif ve.IsNil() {\n\t\t\t\tnilFound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tve = ve.Elem()\n\t\t}\n\t}\n\n\t// Display type information.\n\td.w.Write(openParenBytes)\n\td.w.Write(bytes.Repeat(asteriskBytes, indirects))\n\td.w.Write([]byte(ve.Type().String()))\n\td.w.Write(closeParenBytes)\n\n\t// Display pointer information.\n\tif !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {\n\t\td.w.Write(openParenBytes)\n\t\tfor i, addr := range pointerChain {\n\t\t\tif i > 0 {\n\t\t\t\td.w.Write(pointerChainBytes)\n\t\t\t}\n\t\t\tprintHexPtr(d.w, addr)\n\t\t}\n\t\td.w.Write(closeParenBytes)\n\t}\n\n\t// Display dereferenced value.\n\td.w.Write(openParenBytes)\n\tswitch {\n\tcase nilFound == true:\n\t\td.w.Write(nilAngleBytes)\n\n\tcase cycleFound == true:\n\t\td.w.Write(circularBytes)\n\n\tdefault:\n\t\td.ignoreNextType = true\n\t\td.dump(ve)\n\t}\n\td.w.Write(closeParenBytes)\n}\n\n// dumpSlice handles formatting of arrays and slices.  Byte (uint8 under\n// reflection) arrays and slices are dumped in hexdump -C fashion.\nfunc (d *dumpState) dumpSlice(v reflect.Value) {\n\t// Determine whether this type should be hex dumped or not.  Also,\n\t// for types which should be hexdumped, try to use the underlying data\n\t// first, then fall back to trying to convert them to a uint8 slice.\n\tvar buf []uint8\n\tdoConvert := false\n\tdoHexDump := false\n\tnumEntries := v.Len()\n\tif numEntries > 0 {\n\t\tvt := v.Index(0).Type()\n\t\tvts := vt.String()\n\t\tswitch {\n\t\t// C types that need to be converted.\n\t\tcase cCharRE.MatchString(vts):\n\t\t\tfallthrough\n\t\tcase cUnsignedCharRE.MatchString(vts):\n\t\t\tfallthrough\n\t\tcase cUint8tCharRE.MatchString(vts):\n\t\t\tdoConvert = true\n\n\t\t// Try to use existing uint8 slices and fall back to converting\n\t\t// and copying if that fails.\n\t\tcase vt.Kind() == reflect.Uint8:\n\t\t\t// We need an addressable interface to convert the type\n\t\t\t// to a byte slice.  However, the reflect package won't\n\t\t\t// give us an interface on certain things like\n\t\t\t// unexported struct fields in order to enforce\n\t\t\t// visibility rules.  We use unsafe, when available, to\n\t\t\t// bypass these restrictions since this package does not\n\t\t\t// mutate the values.\n\t\t\tvs := v\n\t\t\tif !vs.CanInterface() || !vs.CanAddr() {\n\t\t\t\tvs = unsafeReflectValue(vs)\n\t\t\t}\n\t\t\tif !UnsafeDisabled {\n\t\t\t\tvs = vs.Slice(0, numEntries)\n\n\t\t\t\t// Use the existing uint8 slice if it can be\n\t\t\t\t// type asserted.\n\t\t\t\tiface := vs.Interface()\n\t\t\t\tif slice, ok := iface.([]uint8); ok {\n\t\t\t\t\tbuf = slice\n\t\t\t\t\tdoHexDump = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// The underlying data needs to be converted if it can't\n\t\t\t// be type asserted to a uint8 slice.\n\t\t\tdoConvert = true\n\t\t}\n\n\t\t// Copy and convert the underlying type if needed.\n\t\tif doConvert && vt.ConvertibleTo(uint8Type) {\n\t\t\t// Convert and copy each element into a uint8 byte\n\t\t\t// slice.\n\t\t\tbuf = make([]uint8, numEntries)\n\t\t\tfor i := 0; i < numEntries; i++ {\n\t\t\t\tvv := v.Index(i)\n\t\t\t\tbuf[i] = uint8(vv.Convert(uint8Type).Uint())\n\t\t\t}\n\t\t\tdoHexDump = true\n\t\t}\n\t}\n\n\t// Hexdump the entire slice as needed.\n\tif doHexDump {\n\t\tindent := strings.Repeat(d.cs.Indent, d.depth)\n\t\tstr := indent + hex.Dump(buf)\n\t\tstr = strings.Replace(str, \"\\n\", \"\\n\"+indent, -1)\n\t\tstr = strings.TrimRight(str, d.cs.Indent)\n\t\td.w.Write([]byte(str))\n\t\treturn\n\t}\n\n\t// Recursively call dump for each item.\n\tfor i := 0; i < numEntries; i++ {\n\t\td.dump(d.unpackValue(v.Index(i)))\n\t\tif i < (numEntries - 1) {\n\t\t\td.w.Write(commaNewlineBytes)\n\t\t} else {\n\t\t\td.w.Write(newlineBytes)\n\t\t}\n\t}\n}\n\n// dump is the main workhorse for dumping a value.  It uses the passed reflect\n// value to figure out what kind of object we are dealing with and formats it\n// appropriately.  It is a recursive function, however circular data structures\n// are detected and handled properly.\nfunc (d *dumpState) dump(v reflect.Value) {\n\t// Handle invalid reflect values immediately.\n\tkind := v.Kind()\n\tif kind == reflect.Invalid {\n\t\td.w.Write(invalidAngleBytes)\n\t\treturn\n\t}\n\n\t// Handle pointers specially.\n\tif kind == reflect.Ptr {\n\t\td.indent()\n\t\td.dumpPtr(v)\n\t\treturn\n\t}\n\n\t// Print type information unless already handled elsewhere.\n\tif !d.ignoreNextType {\n\t\td.indent()\n\t\td.w.Write(openParenBytes)\n\t\td.w.Write([]byte(v.Type().String()))\n\t\td.w.Write(closeParenBytes)\n\t\td.w.Write(spaceBytes)\n\t}\n\td.ignoreNextType = false\n\n\t// Display length and capacity if the built-in len and cap functions\n\t// work with the value's kind and the len/cap itself is non-zero.\n\tvalueLen, valueCap := 0, 0\n\tswitch v.Kind() {\n\tcase reflect.Array, reflect.Slice, reflect.Chan:\n\t\tvalueLen, valueCap = v.Len(), v.Cap()\n\tcase reflect.Map, reflect.String:\n\t\tvalueLen = v.Len()\n\t}\n\tif valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {\n\t\td.w.Write(openParenBytes)\n\t\tif valueLen != 0 {\n\t\t\td.w.Write(lenEqualsBytes)\n\t\t\tprintInt(d.w, int64(valueLen), 10)\n\t\t}\n\t\tif !d.cs.DisableCapacities && valueCap != 0 {\n\t\t\tif valueLen != 0 {\n\t\t\t\td.w.Write(spaceBytes)\n\t\t\t}\n\t\t\td.w.Write(capEqualsBytes)\n\t\t\tprintInt(d.w, int64(valueCap), 10)\n\t\t}\n\t\td.w.Write(closeParenBytes)\n\t\td.w.Write(spaceBytes)\n\t}\n\n\t// Call Stringer/error interfaces if they exist and the handle methods flag\n\t// is enabled\n\tif !d.cs.DisableMethods {\n\t\tif (kind != reflect.Invalid) && (kind != reflect.Interface) {\n\t\t\tif handled := handleMethods(d.cs, d.w, v); handled {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tswitch kind {\n\tcase reflect.Invalid:\n\t\t// Do nothing.  We should never get here since invalid has already\n\t\t// been handled above.\n\n\tcase reflect.Bool:\n\t\tprintBool(d.w, v.Bool())\n\n\tcase reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:\n\t\tprintInt(d.w, v.Int(), 10)\n\n\tcase reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:\n\t\tprintUint(d.w, v.Uint(), 10)\n\n\tcase reflect.Float32:\n\t\tprintFloat(d.w, v.Float(), 32)\n\n\tcase reflect.Float64:\n\t\tprintFloat(d.w, v.Float(), 64)\n\n\tcase reflect.Complex64:\n\t\tprintComplex(d.w, v.Complex(), 32)\n\n\tcase reflect.Complex128:\n\t\tprintComplex(d.w, v.Complex(), 64)\n\n\tcase reflect.Slice:\n\t\tif v.IsNil() {\n\t\t\td.w.Write(nilAngleBytes)\n\t\t\tbreak\n\t\t}\n\t\tfallthrough\n\n\tcase reflect.Array:\n\t\td.w.Write(openBraceNewlineBytes)\n\t\td.depth++\n\t\tif (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {\n\t\t\td.indent()\n\t\t\td.w.Write(maxNewlineBytes)\n\t\t} else {\n\t\t\td.dumpSlice(v)\n\t\t}\n\t\td.depth--\n\t\td.indent()\n\t\td.w.Write(closeBraceBytes)\n\n\tcase reflect.String:\n\t\td.w.Write([]byte(strconv.Quote(v.String())))\n\n\tcase reflect.Interface:\n\t\t// The only time we should get here is for nil interfaces due to\n\t\t// unpackValue calls.\n\t\tif v.IsNil() {\n\t\t\td.w.Write(nilAngleBytes)\n\t\t}\n\n\tcase reflect.Ptr:\n\t\t// Do nothing.  We should never get here since pointers have already\n\t\t// been handled above.\n\n\tcase reflect.Map:\n\t\t// nil maps should be indicated as different than empty maps\n\t\tif v.IsNil() {\n\t\t\td.w.Write(nilAngleBytes)\n\t\t\tbreak\n\t\t}\n\n\t\td.w.Write(openBraceNewlineBytes)\n\t\td.depth++\n\t\tif (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {\n\t\t\td.indent()\n\t\t\td.w.Write(maxNewlineBytes)\n\t\t} else {\n\t\t\tnumEntries := v.Len()\n\t\t\tkeys := v.MapKeys()\n\t\t\tif d.cs.SortKeys {\n\t\t\t\tsortValues(keys, d.cs)\n\t\t\t}\n\t\t\tfor i, key := range keys {\n\t\t\t\td.dump(d.unpackValue(key))\n\t\t\t\td.w.Write(colonSpaceBytes)\n\t\t\t\td.ignoreNextIndent = true\n\t\t\t\td.dump(d.unpackValue(v.MapIndex(key)))\n\t\t\t\tif i < (numEntries - 1) {\n\t\t\t\t\td.w.Write(commaNewlineBytes)\n\t\t\t\t} else {\n\t\t\t\t\td.w.Write(newlineBytes)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\td.depth--\n\t\td.indent()\n\t\td.w.Write(closeBraceBytes)\n\n\tcase reflect.Struct:\n\t\td.w.Write(openBraceNewlineBytes)\n\t\td.depth++\n\t\tif (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {\n\t\t\td.indent()\n\t\t\td.w.Write(maxNewlineBytes)\n\t\t} else {\n\t\t\tvt := v.Type()\n\t\t\tnumFields := v.NumField()\n\t\t\tfor i := 0; i < numFields; i++ {\n\t\t\t\td.indent()\n\t\t\t\tvtf := vt.Field(i)\n\t\t\t\td.w.Write([]byte(vtf.Name))\n\t\t\t\td.w.Write(colonSpaceBytes)\n\t\t\t\td.ignoreNextIndent = true\n\t\t\t\td.dump(d.unpackValue(v.Field(i)))\n\t\t\t\tif i < (numFields - 1) {\n\t\t\t\t\td.w.Write(commaNewlineBytes)\n\t\t\t\t} else {\n\t\t\t\t\td.w.Write(newlineBytes)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\td.depth--\n\t\td.indent()\n\t\td.w.Write(closeBraceBytes)\n\n\tcase reflect.Uintptr:\n\t\tprintHexPtr(d.w, uintptr(v.Uint()))\n\n\tcase reflect.UnsafePointer, reflect.Chan, reflect.Func:\n\t\tprintHexPtr(d.w, v.Pointer())\n\n\t// There were not any other types at the time this code was written, but\n\t// fall back to letting the default fmt package handle it in case any new\n\t// types are added.\n\tdefault:\n\t\tif v.CanInterface() {\n\t\t\tfmt.Fprintf(d.w, \"%v\", v.Interface())\n\t\t} else {\n\t\t\tfmt.Fprintf(d.w, \"%v\", v.String())\n\t\t}\n\t}\n}\n\n// fdump is a helper function to consolidate the logic from the various public\n// methods which take varying writers and config states.\nfunc fdump(cs *ConfigState, w io.Writer, a ...interface{}) {\n\tfor _, arg := range a {\n\t\tif arg == nil {\n\t\t\tw.Write(interfaceBytes)\n\t\t\tw.Write(spaceBytes)\n\t\t\tw.Write(nilAngleBytes)\n\t\t\tw.Write(newlineBytes)\n\t\t\tcontinue\n\t\t}\n\n\t\td := dumpState{w: w, cs: cs}\n\t\td.pointers = make(map[uintptr]int)\n\t\td.dump(reflect.ValueOf(arg))\n\t\td.w.Write(newlineBytes)\n\t}\n}\n\n// Fdump formats and displays the passed arguments to io.Writer w.  It formats\n// exactly the same as Dump.\nfunc Fdump(w io.Writer, a ...interface{}) {\n\tfdump(&Config, w, a...)\n}\n\n// Sdump returns a string with the passed arguments formatted exactly the same\n// as Dump.\nfunc Sdump(a ...interface{}) string {\n\tvar buf bytes.Buffer\n\tfdump(&Config, &buf, a...)\n\treturn buf.String()\n}\n\n/*\nDump displays the passed parameters to standard out with newlines, customizable\nindentation, and additional debug information such as complete types and all\npointer addresses used to indirect to the final value.  It provides the\nfollowing features over the built-in printing facilities provided by the fmt\npackage:\n\n\t* Pointers are dereferenced and followed\n\t* Circular data structures are detected and handled properly\n\t* Custom Stringer/error interfaces are optionally invoked, including\n\t  on unexported types\n\t* Custom types which only implement the Stringer/error interfaces via\n\t  a pointer receiver are optionally invoked when passing non-pointer\n\t  variables\n\t* Byte arrays and slices are dumped like the hexdump -C command which\n\t  includes offsets, byte values in hex, and ASCII output\n\nThe configuration options are controlled by an exported package global,\nspew.Config.  See ConfigState for options documentation.\n\nSee Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to\nget the formatted result as a string.\n*/\nfunc Dump(a ...interface{}) {\n\tfdump(&Config, os.Stdout, a...)\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/dump_test.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\n/*\nTest Summary:\nNOTE: For each test, a nil pointer, a single pointer and double pointer to the\nbase test element are also tested to ensure proper indirection across all types.\n\n- Max int8, int16, int32, int64, int\n- Max uint8, uint16, uint32, uint64, uint\n- Boolean true and false\n- Standard complex64 and complex128\n- Array containing standard ints\n- Array containing type with custom formatter on pointer receiver only\n- Array containing interfaces\n- Array containing bytes\n- Slice containing standard float32 values\n- Slice containing type with custom formatter on pointer receiver only\n- Slice containing interfaces\n- Slice containing bytes\n- Nil slice\n- Standard string\n- Nil interface\n- Sub-interface\n- Map with string keys and int vals\n- Map with custom formatter type on pointer receiver only keys and vals\n- Map with interface keys and values\n- Map with nil interface value\n- Struct with primitives\n- Struct that contains another struct\n- Struct that contains custom type with Stringer pointer interface via both\n  exported and unexported fields\n- Struct that contains embedded struct and field to same struct\n- Uintptr to 0 (null pointer)\n- Uintptr address of real variable\n- Unsafe.Pointer to 0 (null pointer)\n- Unsafe.Pointer to address of real variable\n- Nil channel\n- Standard int channel\n- Function with no params and no returns\n- Function with param and no returns\n- Function with multiple params and multiple returns\n- Struct that is circular through self referencing\n- Structs that are circular through cross referencing\n- Structs that are indirectly circular\n- Type that panics in its Stringer interface\n*/\n\npackage spew_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"testing\"\n\t\"unsafe\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n)\n\n// dumpTest is used to describe a test to be performed against the Dump method.\ntype dumpTest struct {\n\tin    interface{}\n\twants []string\n}\n\n// dumpTests houses all of the tests to be performed against the Dump method.\nvar dumpTests = make([]dumpTest, 0)\n\n// addDumpTest is a helper method to append the passed input and desired result\n// to dumpTests\nfunc addDumpTest(in interface{}, wants ...string) {\n\ttest := dumpTest{in, wants}\n\tdumpTests = append(dumpTests, test)\n}\n\nfunc addIntDumpTests() {\n\t// Max int8.\n\tv := int8(127)\n\tnv := (*int8)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"int8\"\n\tvs := \"127\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Max int16.\n\tv2 := int16(32767)\n\tnv2 := (*int16)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"int16\"\n\tv2s := \"32767\"\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(nv2, \"(*\"+v2t+\")(<nil>)\\n\")\n\n\t// Max int32.\n\tv3 := int32(2147483647)\n\tnv3 := (*int32)(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"int32\"\n\tv3s := \"2147483647\"\n\taddDumpTest(v3, \"(\"+v3t+\") \"+v3s+\"\\n\")\n\taddDumpTest(pv3, \"(*\"+v3t+\")(\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(&pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(nv3, \"(*\"+v3t+\")(<nil>)\\n\")\n\n\t// Max int64.\n\tv4 := int64(9223372036854775807)\n\tnv4 := (*int64)(nil)\n\tpv4 := &v4\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"int64\"\n\tv4s := \"9223372036854775807\"\n\taddDumpTest(v4, \"(\"+v4t+\") \"+v4s+\"\\n\")\n\taddDumpTest(pv4, \"(*\"+v4t+\")(\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(&pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(nv4, \"(*\"+v4t+\")(<nil>)\\n\")\n\n\t// Max int.\n\tv5 := int(2147483647)\n\tnv5 := (*int)(nil)\n\tpv5 := &v5\n\tv5Addr := fmt.Sprintf(\"%p\", pv5)\n\tpv5Addr := fmt.Sprintf(\"%p\", &pv5)\n\tv5t := \"int\"\n\tv5s := \"2147483647\"\n\taddDumpTest(v5, \"(\"+v5t+\") \"+v5s+\"\\n\")\n\taddDumpTest(pv5, \"(*\"+v5t+\")(\"+v5Addr+\")(\"+v5s+\")\\n\")\n\taddDumpTest(&pv5, \"(**\"+v5t+\")(\"+pv5Addr+\"->\"+v5Addr+\")(\"+v5s+\")\\n\")\n\taddDumpTest(nv5, \"(*\"+v5t+\")(<nil>)\\n\")\n}\n\nfunc addUintDumpTests() {\n\t// Max uint8.\n\tv := uint8(255)\n\tnv := (*uint8)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"uint8\"\n\tvs := \"255\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Max uint16.\n\tv2 := uint16(65535)\n\tnv2 := (*uint16)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"uint16\"\n\tv2s := \"65535\"\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(nv2, \"(*\"+v2t+\")(<nil>)\\n\")\n\n\t// Max uint32.\n\tv3 := uint32(4294967295)\n\tnv3 := (*uint32)(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"uint32\"\n\tv3s := \"4294967295\"\n\taddDumpTest(v3, \"(\"+v3t+\") \"+v3s+\"\\n\")\n\taddDumpTest(pv3, \"(*\"+v3t+\")(\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(&pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(nv3, \"(*\"+v3t+\")(<nil>)\\n\")\n\n\t// Max uint64.\n\tv4 := uint64(18446744073709551615)\n\tnv4 := (*uint64)(nil)\n\tpv4 := &v4\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"uint64\"\n\tv4s := \"18446744073709551615\"\n\taddDumpTest(v4, \"(\"+v4t+\") \"+v4s+\"\\n\")\n\taddDumpTest(pv4, \"(*\"+v4t+\")(\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(&pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(nv4, \"(*\"+v4t+\")(<nil>)\\n\")\n\n\t// Max uint.\n\tv5 := uint(4294967295)\n\tnv5 := (*uint)(nil)\n\tpv5 := &v5\n\tv5Addr := fmt.Sprintf(\"%p\", pv5)\n\tpv5Addr := fmt.Sprintf(\"%p\", &pv5)\n\tv5t := \"uint\"\n\tv5s := \"4294967295\"\n\taddDumpTest(v5, \"(\"+v5t+\") \"+v5s+\"\\n\")\n\taddDumpTest(pv5, \"(*\"+v5t+\")(\"+v5Addr+\")(\"+v5s+\")\\n\")\n\taddDumpTest(&pv5, \"(**\"+v5t+\")(\"+pv5Addr+\"->\"+v5Addr+\")(\"+v5s+\")\\n\")\n\taddDumpTest(nv5, \"(*\"+v5t+\")(<nil>)\\n\")\n}\n\nfunc addBoolDumpTests() {\n\t// Boolean true.\n\tv := bool(true)\n\tnv := (*bool)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"bool\"\n\tvs := \"true\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Boolean false.\n\tv2 := bool(false)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"bool\"\n\tv2s := \"false\"\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n}\n\nfunc addFloatDumpTests() {\n\t// Standard float32.\n\tv := float32(3.1415)\n\tnv := (*float32)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"float32\"\n\tvs := \"3.1415\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Standard float64.\n\tv2 := float64(3.1415926)\n\tnv2 := (*float64)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"float64\"\n\tv2s := \"3.1415926\"\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(nv2, \"(*\"+v2t+\")(<nil>)\\n\")\n}\n\nfunc addComplexDumpTests() {\n\t// Standard complex64.\n\tv := complex(float32(6), -2)\n\tnv := (*complex64)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"complex64\"\n\tvs := \"(6-2i)\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Standard complex128.\n\tv2 := complex(float64(-6), 2)\n\tnv2 := (*complex128)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"complex128\"\n\tv2s := \"(-6+2i)\"\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(nv2, \"(*\"+v2t+\")(<nil>)\\n\")\n}\n\nfunc addArrayDumpTests() {\n\t// Array containing standard ints.\n\tv := [3]int{1, 2, 3}\n\tvLen := fmt.Sprintf(\"%d\", len(v))\n\tvCap := fmt.Sprintf(\"%d\", cap(v))\n\tnv := (*[3]int)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"int\"\n\tvs := \"(len=\" + vLen + \" cap=\" + vCap + \") {\\n (\" + vt + \") 1,\\n (\" +\n\t\tvt + \") 2,\\n (\" + vt + \") 3\\n}\"\n\taddDumpTest(v, \"([3]\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*[3]\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**[3]\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*[3]\"+vt+\")(<nil>)\\n\")\n\n\t// Array containing type with custom formatter on pointer receiver only.\n\tv2i0 := pstringer(\"1\")\n\tv2i1 := pstringer(\"2\")\n\tv2i2 := pstringer(\"3\")\n\tv2 := [3]pstringer{v2i0, v2i1, v2i2}\n\tv2i0Len := fmt.Sprintf(\"%d\", len(v2i0))\n\tv2i1Len := fmt.Sprintf(\"%d\", len(v2i1))\n\tv2i2Len := fmt.Sprintf(\"%d\", len(v2i2))\n\tv2Len := fmt.Sprintf(\"%d\", len(v2))\n\tv2Cap := fmt.Sprintf(\"%d\", cap(v2))\n\tnv2 := (*[3]pstringer)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"spew_test.pstringer\"\n\tv2sp := \"(len=\" + v2Len + \" cap=\" + v2Cap + \") {\\n (\" + v2t +\n\t\t\") (len=\" + v2i0Len + \") stringer 1,\\n (\" + v2t +\n\t\t\") (len=\" + v2i1Len + \") stringer 2,\\n (\" + v2t +\n\t\t\") (len=\" + v2i2Len + \") \" + \"stringer 3\\n}\"\n\tv2s := v2sp\n\tif spew.UnsafeDisabled {\n\t\tv2s = \"(len=\" + v2Len + \" cap=\" + v2Cap + \") {\\n (\" + v2t +\n\t\t\t\") (len=\" + v2i0Len + \") \\\"1\\\",\\n (\" + v2t + \") (len=\" +\n\t\t\tv2i1Len + \") \\\"2\\\",\\n (\" + v2t + \") (len=\" + v2i2Len +\n\t\t\t\") \" + \"\\\"3\\\"\\n}\"\n\t}\n\taddDumpTest(v2, \"([3]\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*[3]\"+v2t+\")(\"+v2Addr+\")(\"+v2sp+\")\\n\")\n\taddDumpTest(&pv2, \"(**[3]\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2sp+\")\\n\")\n\taddDumpTest(nv2, \"(*[3]\"+v2t+\")(<nil>)\\n\")\n\n\t// Array containing interfaces.\n\tv3i0 := \"one\"\n\tv3 := [3]interface{}{v3i0, int(2), uint(3)}\n\tv3i0Len := fmt.Sprintf(\"%d\", len(v3i0))\n\tv3Len := fmt.Sprintf(\"%d\", len(v3))\n\tv3Cap := fmt.Sprintf(\"%d\", cap(v3))\n\tnv3 := (*[3]interface{})(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"[3]interface {}\"\n\tv3t2 := \"string\"\n\tv3t3 := \"int\"\n\tv3t4 := \"uint\"\n\tv3s := \"(len=\" + v3Len + \" cap=\" + v3Cap + \") {\\n (\" + v3t2 + \") \" +\n\t\t\"(len=\" + v3i0Len + \") \\\"one\\\",\\n (\" + v3t3 + \") 2,\\n (\" +\n\t\tv3t4 + \") 3\\n}\"\n\taddDumpTest(v3, \"(\"+v3t+\") \"+v3s+\"\\n\")\n\taddDumpTest(pv3, \"(*\"+v3t+\")(\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(&pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(nv3, \"(*\"+v3t+\")(<nil>)\\n\")\n\n\t// Array containing bytes.\n\tv4 := [34]byte{\n\t\t0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,\n\t\t0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,\n\t\t0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,\n\t\t0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,\n\t\t0x31, 0x32,\n\t}\n\tv4Len := fmt.Sprintf(\"%d\", len(v4))\n\tv4Cap := fmt.Sprintf(\"%d\", cap(v4))\n\tnv4 := (*[34]byte)(nil)\n\tpv4 := &v4\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"[34]uint8\"\n\tv4s := \"(len=\" + v4Len + \" cap=\" + v4Cap + \") \" +\n\t\t\"{\\n 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20\" +\n\t\t\"  |............... |\\n\" +\n\t\t\" 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30\" +\n\t\t\"  |!\\\"#$%&'()*+,-./0|\\n\" +\n\t\t\" 00000020  31 32                                           \" +\n\t\t\"  |12|\\n}\"\n\taddDumpTest(v4, \"(\"+v4t+\") \"+v4s+\"\\n\")\n\taddDumpTest(pv4, \"(*\"+v4t+\")(\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(&pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(nv4, \"(*\"+v4t+\")(<nil>)\\n\")\n}\n\nfunc addSliceDumpTests() {\n\t// Slice containing standard float32 values.\n\tv := []float32{3.14, 6.28, 12.56}\n\tvLen := fmt.Sprintf(\"%d\", len(v))\n\tvCap := fmt.Sprintf(\"%d\", cap(v))\n\tnv := (*[]float32)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"float32\"\n\tvs := \"(len=\" + vLen + \" cap=\" + vCap + \") {\\n (\" + vt + \") 3.14,\\n (\" +\n\t\tvt + \") 6.28,\\n (\" + vt + \") 12.56\\n}\"\n\taddDumpTest(v, \"([]\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*[]\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**[]\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*[]\"+vt+\")(<nil>)\\n\")\n\n\t// Slice containing type with custom formatter on pointer receiver only.\n\tv2i0 := pstringer(\"1\")\n\tv2i1 := pstringer(\"2\")\n\tv2i2 := pstringer(\"3\")\n\tv2 := []pstringer{v2i0, v2i1, v2i2}\n\tv2i0Len := fmt.Sprintf(\"%d\", len(v2i0))\n\tv2i1Len := fmt.Sprintf(\"%d\", len(v2i1))\n\tv2i2Len := fmt.Sprintf(\"%d\", len(v2i2))\n\tv2Len := fmt.Sprintf(\"%d\", len(v2))\n\tv2Cap := fmt.Sprintf(\"%d\", cap(v2))\n\tnv2 := (*[]pstringer)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"spew_test.pstringer\"\n\tv2s := \"(len=\" + v2Len + \" cap=\" + v2Cap + \") {\\n (\" + v2t + \") (len=\" +\n\t\tv2i0Len + \") stringer 1,\\n (\" + v2t + \") (len=\" + v2i1Len +\n\t\t\") stringer 2,\\n (\" + v2t + \") (len=\" + v2i2Len + \") \" +\n\t\t\"stringer 3\\n}\"\n\taddDumpTest(v2, \"([]\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*[]\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**[]\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(nv2, \"(*[]\"+v2t+\")(<nil>)\\n\")\n\n\t// Slice containing interfaces.\n\tv3i0 := \"one\"\n\tv3 := []interface{}{v3i0, int(2), uint(3), nil}\n\tv3i0Len := fmt.Sprintf(\"%d\", len(v3i0))\n\tv3Len := fmt.Sprintf(\"%d\", len(v3))\n\tv3Cap := fmt.Sprintf(\"%d\", cap(v3))\n\tnv3 := (*[]interface{})(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"[]interface {}\"\n\tv3t2 := \"string\"\n\tv3t3 := \"int\"\n\tv3t4 := \"uint\"\n\tv3t5 := \"interface {}\"\n\tv3s := \"(len=\" + v3Len + \" cap=\" + v3Cap + \") {\\n (\" + v3t2 + \") \" +\n\t\t\"(len=\" + v3i0Len + \") \\\"one\\\",\\n (\" + v3t3 + \") 2,\\n (\" +\n\t\tv3t4 + \") 3,\\n (\" + v3t5 + \") <nil>\\n}\"\n\taddDumpTest(v3, \"(\"+v3t+\") \"+v3s+\"\\n\")\n\taddDumpTest(pv3, \"(*\"+v3t+\")(\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(&pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(nv3, \"(*\"+v3t+\")(<nil>)\\n\")\n\n\t// Slice containing bytes.\n\tv4 := []byte{\n\t\t0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,\n\t\t0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,\n\t\t0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,\n\t\t0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,\n\t\t0x31, 0x32,\n\t}\n\tv4Len := fmt.Sprintf(\"%d\", len(v4))\n\tv4Cap := fmt.Sprintf(\"%d\", cap(v4))\n\tnv4 := (*[]byte)(nil)\n\tpv4 := &v4\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"[]uint8\"\n\tv4s := \"(len=\" + v4Len + \" cap=\" + v4Cap + \") \" +\n\t\t\"{\\n 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20\" +\n\t\t\"  |............... |\\n\" +\n\t\t\" 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30\" +\n\t\t\"  |!\\\"#$%&'()*+,-./0|\\n\" +\n\t\t\" 00000020  31 32                                           \" +\n\t\t\"  |12|\\n}\"\n\taddDumpTest(v4, \"(\"+v4t+\") \"+v4s+\"\\n\")\n\taddDumpTest(pv4, \"(*\"+v4t+\")(\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(&pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(nv4, \"(*\"+v4t+\")(<nil>)\\n\")\n\n\t// Nil slice.\n\tv5 := []int(nil)\n\tnv5 := (*[]int)(nil)\n\tpv5 := &v5\n\tv5Addr := fmt.Sprintf(\"%p\", pv5)\n\tpv5Addr := fmt.Sprintf(\"%p\", &pv5)\n\tv5t := \"[]int\"\n\tv5s := \"<nil>\"\n\taddDumpTest(v5, \"(\"+v5t+\") \"+v5s+\"\\n\")\n\taddDumpTest(pv5, \"(*\"+v5t+\")(\"+v5Addr+\")(\"+v5s+\")\\n\")\n\taddDumpTest(&pv5, \"(**\"+v5t+\")(\"+pv5Addr+\"->\"+v5Addr+\")(\"+v5s+\")\\n\")\n\taddDumpTest(nv5, \"(*\"+v5t+\")(<nil>)\\n\")\n}\n\nfunc addStringDumpTests() {\n\t// Standard string.\n\tv := \"test\"\n\tvLen := fmt.Sprintf(\"%d\", len(v))\n\tnv := (*string)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"string\"\n\tvs := \"(len=\" + vLen + \") \\\"test\\\"\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n}\n\nfunc addInterfaceDumpTests() {\n\t// Nil interface.\n\tvar v interface{}\n\tnv := (*interface{})(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"interface {}\"\n\tvs := \"<nil>\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Sub-interface.\n\tv2 := interface{}(uint16(65535))\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"uint16\"\n\tv2s := \"65535\"\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n}\n\nfunc addMapDumpTests() {\n\t// Map with string keys and int vals.\n\tk := \"one\"\n\tkk := \"two\"\n\tm := map[string]int{k: 1, kk: 2}\n\tklen := fmt.Sprintf(\"%d\", len(k)) // not kLen to shut golint up\n\tkkLen := fmt.Sprintf(\"%d\", len(kk))\n\tmLen := fmt.Sprintf(\"%d\", len(m))\n\tnilMap := map[string]int(nil)\n\tnm := (*map[string]int)(nil)\n\tpm := &m\n\tmAddr := fmt.Sprintf(\"%p\", pm)\n\tpmAddr := fmt.Sprintf(\"%p\", &pm)\n\tmt := \"map[string]int\"\n\tmt1 := \"string\"\n\tmt2 := \"int\"\n\tms := \"(len=\" + mLen + \") {\\n (\" + mt1 + \") (len=\" + klen + \") \" +\n\t\t\"\\\"one\\\": (\" + mt2 + \") 1,\\n (\" + mt1 + \") (len=\" + kkLen +\n\t\t\") \\\"two\\\": (\" + mt2 + \") 2\\n}\"\n\tms2 := \"(len=\" + mLen + \") {\\n (\" + mt1 + \") (len=\" + kkLen + \") \" +\n\t\t\"\\\"two\\\": (\" + mt2 + \") 2,\\n (\" + mt1 + \") (len=\" + klen +\n\t\t\") \\\"one\\\": (\" + mt2 + \") 1\\n}\"\n\taddDumpTest(m, \"(\"+mt+\") \"+ms+\"\\n\", \"(\"+mt+\") \"+ms2+\"\\n\")\n\taddDumpTest(pm, \"(*\"+mt+\")(\"+mAddr+\")(\"+ms+\")\\n\",\n\t\t\"(*\"+mt+\")(\"+mAddr+\")(\"+ms2+\")\\n\")\n\taddDumpTest(&pm, \"(**\"+mt+\")(\"+pmAddr+\"->\"+mAddr+\")(\"+ms+\")\\n\",\n\t\t\"(**\"+mt+\")(\"+pmAddr+\"->\"+mAddr+\")(\"+ms2+\")\\n\")\n\taddDumpTest(nm, \"(*\"+mt+\")(<nil>)\\n\")\n\taddDumpTest(nilMap, \"(\"+mt+\") <nil>\\n\")\n\n\t// Map with custom formatter type on pointer receiver only keys and vals.\n\tk2 := pstringer(\"one\")\n\tv2 := pstringer(\"1\")\n\tm2 := map[pstringer]pstringer{k2: v2}\n\tk2Len := fmt.Sprintf(\"%d\", len(k2))\n\tv2Len := fmt.Sprintf(\"%d\", len(v2))\n\tm2Len := fmt.Sprintf(\"%d\", len(m2))\n\tnilMap2 := map[pstringer]pstringer(nil)\n\tnm2 := (*map[pstringer]pstringer)(nil)\n\tpm2 := &m2\n\tm2Addr := fmt.Sprintf(\"%p\", pm2)\n\tpm2Addr := fmt.Sprintf(\"%p\", &pm2)\n\tm2t := \"map[spew_test.pstringer]spew_test.pstringer\"\n\tm2t1 := \"spew_test.pstringer\"\n\tm2t2 := \"spew_test.pstringer\"\n\tm2s := \"(len=\" + m2Len + \") {\\n (\" + m2t1 + \") (len=\" + k2Len + \") \" +\n\t\t\"stringer one: (\" + m2t2 + \") (len=\" + v2Len + \") stringer 1\\n}\"\n\tif spew.UnsafeDisabled {\n\t\tm2s = \"(len=\" + m2Len + \") {\\n (\" + m2t1 + \") (len=\" + k2Len +\n\t\t\t\") \" + \"\\\"one\\\": (\" + m2t2 + \") (len=\" + v2Len +\n\t\t\t\") \\\"1\\\"\\n}\"\n\t}\n\taddDumpTest(m2, \"(\"+m2t+\") \"+m2s+\"\\n\")\n\taddDumpTest(pm2, \"(*\"+m2t+\")(\"+m2Addr+\")(\"+m2s+\")\\n\")\n\taddDumpTest(&pm2, \"(**\"+m2t+\")(\"+pm2Addr+\"->\"+m2Addr+\")(\"+m2s+\")\\n\")\n\taddDumpTest(nm2, \"(*\"+m2t+\")(<nil>)\\n\")\n\taddDumpTest(nilMap2, \"(\"+m2t+\") <nil>\\n\")\n\n\t// Map with interface keys and values.\n\tk3 := \"one\"\n\tk3Len := fmt.Sprintf(\"%d\", len(k3))\n\tm3 := map[interface{}]interface{}{k3: 1}\n\tm3Len := fmt.Sprintf(\"%d\", len(m3))\n\tnilMap3 := map[interface{}]interface{}(nil)\n\tnm3 := (*map[interface{}]interface{})(nil)\n\tpm3 := &m3\n\tm3Addr := fmt.Sprintf(\"%p\", pm3)\n\tpm3Addr := fmt.Sprintf(\"%p\", &pm3)\n\tm3t := \"map[interface {}]interface {}\"\n\tm3t1 := \"string\"\n\tm3t2 := \"int\"\n\tm3s := \"(len=\" + m3Len + \") {\\n (\" + m3t1 + \") (len=\" + k3Len + \") \" +\n\t\t\"\\\"one\\\": (\" + m3t2 + \") 1\\n}\"\n\taddDumpTest(m3, \"(\"+m3t+\") \"+m3s+\"\\n\")\n\taddDumpTest(pm3, \"(*\"+m3t+\")(\"+m3Addr+\")(\"+m3s+\")\\n\")\n\taddDumpTest(&pm3, \"(**\"+m3t+\")(\"+pm3Addr+\"->\"+m3Addr+\")(\"+m3s+\")\\n\")\n\taddDumpTest(nm3, \"(*\"+m3t+\")(<nil>)\\n\")\n\taddDumpTest(nilMap3, \"(\"+m3t+\") <nil>\\n\")\n\n\t// Map with nil interface value.\n\tk4 := \"nil\"\n\tk4Len := fmt.Sprintf(\"%d\", len(k4))\n\tm4 := map[string]interface{}{k4: nil}\n\tm4Len := fmt.Sprintf(\"%d\", len(m4))\n\tnilMap4 := map[string]interface{}(nil)\n\tnm4 := (*map[string]interface{})(nil)\n\tpm4 := &m4\n\tm4Addr := fmt.Sprintf(\"%p\", pm4)\n\tpm4Addr := fmt.Sprintf(\"%p\", &pm4)\n\tm4t := \"map[string]interface {}\"\n\tm4t1 := \"string\"\n\tm4t2 := \"interface {}\"\n\tm4s := \"(len=\" + m4Len + \") {\\n (\" + m4t1 + \") (len=\" + k4Len + \")\" +\n\t\t\" \\\"nil\\\": (\" + m4t2 + \") <nil>\\n}\"\n\taddDumpTest(m4, \"(\"+m4t+\") \"+m4s+\"\\n\")\n\taddDumpTest(pm4, \"(*\"+m4t+\")(\"+m4Addr+\")(\"+m4s+\")\\n\")\n\taddDumpTest(&pm4, \"(**\"+m4t+\")(\"+pm4Addr+\"->\"+m4Addr+\")(\"+m4s+\")\\n\")\n\taddDumpTest(nm4, \"(*\"+m4t+\")(<nil>)\\n\")\n\taddDumpTest(nilMap4, \"(\"+m4t+\") <nil>\\n\")\n}\n\nfunc addStructDumpTests() {\n\t// Struct with primitives.\n\ttype s1 struct {\n\t\ta int8\n\t\tb uint8\n\t}\n\tv := s1{127, 255}\n\tnv := (*s1)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"spew_test.s1\"\n\tvt2 := \"int8\"\n\tvt3 := \"uint8\"\n\tvs := \"{\\n a: (\" + vt2 + \") 127,\\n b: (\" + vt3 + \") 255\\n}\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Struct that contains another struct.\n\ttype s2 struct {\n\t\ts1 s1\n\t\tb  bool\n\t}\n\tv2 := s2{s1{127, 255}, true}\n\tnv2 := (*s2)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"spew_test.s2\"\n\tv2t2 := \"spew_test.s1\"\n\tv2t3 := \"int8\"\n\tv2t4 := \"uint8\"\n\tv2t5 := \"bool\"\n\tv2s := \"{\\n s1: (\" + v2t2 + \") {\\n  a: (\" + v2t3 + \") 127,\\n  b: (\" +\n\t\tv2t4 + \") 255\\n },\\n b: (\" + v2t5 + \") true\\n}\"\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(nv2, \"(*\"+v2t+\")(<nil>)\\n\")\n\n\t// Struct that contains custom type with Stringer pointer interface via both\n\t// exported and unexported fields.\n\ttype s3 struct {\n\t\ts pstringer\n\t\tS pstringer\n\t}\n\tv3 := s3{\"test\", \"test2\"}\n\tnv3 := (*s3)(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"spew_test.s3\"\n\tv3t2 := \"spew_test.pstringer\"\n\tv3s := \"{\\n s: (\" + v3t2 + \") (len=4) stringer test,\\n S: (\" + v3t2 +\n\t\t\") (len=5) stringer test2\\n}\"\n\tv3sp := v3s\n\tif spew.UnsafeDisabled {\n\t\tv3s = \"{\\n s: (\" + v3t2 + \") (len=4) \\\"test\\\",\\n S: (\" +\n\t\t\tv3t2 + \") (len=5) \\\"test2\\\"\\n}\"\n\t\tv3sp = \"{\\n s: (\" + v3t2 + \") (len=4) \\\"test\\\",\\n S: (\" +\n\t\t\tv3t2 + \") (len=5) stringer test2\\n}\"\n\t}\n\taddDumpTest(v3, \"(\"+v3t+\") \"+v3s+\"\\n\")\n\taddDumpTest(pv3, \"(*\"+v3t+\")(\"+v3Addr+\")(\"+v3sp+\")\\n\")\n\taddDumpTest(&pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")(\"+v3sp+\")\\n\")\n\taddDumpTest(nv3, \"(*\"+v3t+\")(<nil>)\\n\")\n\n\t// Struct that contains embedded struct and field to same struct.\n\te := embed{\"embedstr\"}\n\teLen := fmt.Sprintf(\"%d\", len(\"embedstr\"))\n\tv4 := embedwrap{embed: &e, e: &e}\n\tnv4 := (*embedwrap)(nil)\n\tpv4 := &v4\n\teAddr := fmt.Sprintf(\"%p\", &e)\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"spew_test.embedwrap\"\n\tv4t2 := \"spew_test.embed\"\n\tv4t3 := \"string\"\n\tv4s := \"{\\n embed: (*\" + v4t2 + \")(\" + eAddr + \")({\\n  a: (\" + v4t3 +\n\t\t\") (len=\" + eLen + \") \\\"embedstr\\\"\\n }),\\n e: (*\" + v4t2 +\n\t\t\")(\" + eAddr + \")({\\n  a: (\" + v4t3 + \") (len=\" + eLen + \")\" +\n\t\t\" \\\"embedstr\\\"\\n })\\n}\"\n\taddDumpTest(v4, \"(\"+v4t+\") \"+v4s+\"\\n\")\n\taddDumpTest(pv4, \"(*\"+v4t+\")(\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(&pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(nv4, \"(*\"+v4t+\")(<nil>)\\n\")\n}\n\nfunc addUintptrDumpTests() {\n\t// Null pointer.\n\tv := uintptr(0)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"uintptr\"\n\tvs := \"<nil>\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\n\t// Address of real variable.\n\ti := 1\n\tv2 := uintptr(unsafe.Pointer(&i))\n\tnv2 := (*uintptr)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"uintptr\"\n\tv2s := fmt.Sprintf(\"%p\", &i)\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(nv2, \"(*\"+v2t+\")(<nil>)\\n\")\n}\n\nfunc addUnsafePointerDumpTests() {\n\t// Null pointer.\n\tv := unsafe.Pointer(uintptr(0))\n\tnv := (*unsafe.Pointer)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"unsafe.Pointer\"\n\tvs := \"<nil>\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Address of real variable.\n\ti := 1\n\tv2 := unsafe.Pointer(&i)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"unsafe.Pointer\"\n\tv2s := fmt.Sprintf(\"%p\", &i)\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n}\n\nfunc addChanDumpTests() {\n\t// Nil channel.\n\tvar v chan int\n\tpv := &v\n\tnv := (*chan int)(nil)\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"chan int\"\n\tvs := \"<nil>\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Real channel.\n\tv2 := make(chan int)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"chan int\"\n\tv2s := fmt.Sprintf(\"%p\", v2)\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n}\n\nfunc addFuncDumpTests() {\n\t// Function with no params and no returns.\n\tv := addIntDumpTests\n\tnv := (*func())(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"func()\"\n\tvs := fmt.Sprintf(\"%p\", v)\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Function with param and no returns.\n\tv2 := TestDump\n\tnv2 := (*func(*testing.T))(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"func(*testing.T)\"\n\tv2s := fmt.Sprintf(\"%p\", v2)\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(nv2, \"(*\"+v2t+\")(<nil>)\\n\")\n\n\t// Function with multiple params and multiple returns.\n\tvar v3 = func(i int, s string) (b bool, err error) {\n\t\treturn true, nil\n\t}\n\tnv3 := (*func(int, string) (bool, error))(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"func(int, string) (bool, error)\"\n\tv3s := fmt.Sprintf(\"%p\", v3)\n\taddDumpTest(v3, \"(\"+v3t+\") \"+v3s+\"\\n\")\n\taddDumpTest(pv3, \"(*\"+v3t+\")(\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(&pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(nv3, \"(*\"+v3t+\")(<nil>)\\n\")\n}\n\nfunc addCircularDumpTests() {\n\t// Struct that is circular through self referencing.\n\ttype circular struct {\n\t\tc *circular\n\t}\n\tv := circular{nil}\n\tv.c = &v\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"spew_test.circular\"\n\tvs := \"{\\n c: (*\" + vt + \")(\" + vAddr + \")({\\n  c: (*\" + vt + \")(\" +\n\t\tvAddr + \")(<already shown>)\\n })\\n}\"\n\tvs2 := \"{\\n c: (*\" + vt + \")(\" + vAddr + \")(<already shown>)\\n}\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs2+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs2+\")\\n\")\n\n\t// Structs that are circular through cross referencing.\n\tv2 := xref1{nil}\n\tts2 := xref2{&v2}\n\tv2.ps2 = &ts2\n\tpv2 := &v2\n\tts2Addr := fmt.Sprintf(\"%p\", &ts2)\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"spew_test.xref1\"\n\tv2t2 := \"spew_test.xref2\"\n\tv2s := \"{\\n ps2: (*\" + v2t2 + \")(\" + ts2Addr + \")({\\n  ps1: (*\" + v2t +\n\t\t\")(\" + v2Addr + \")({\\n   ps2: (*\" + v2t2 + \")(\" + ts2Addr +\n\t\t\")(<already shown>)\\n  })\\n })\\n}\"\n\tv2s2 := \"{\\n ps2: (*\" + v2t2 + \")(\" + ts2Addr + \")({\\n  ps1: (*\" + v2t +\n\t\t\")(\" + v2Addr + \")(<already shown>)\\n })\\n}\"\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s2+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s2+\")\\n\")\n\n\t// Structs that are indirectly circular.\n\tv3 := indirCir1{nil}\n\ttic2 := indirCir2{nil}\n\ttic3 := indirCir3{&v3}\n\ttic2.ps3 = &tic3\n\tv3.ps2 = &tic2\n\tpv3 := &v3\n\ttic2Addr := fmt.Sprintf(\"%p\", &tic2)\n\ttic3Addr := fmt.Sprintf(\"%p\", &tic3)\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"spew_test.indirCir1\"\n\tv3t2 := \"spew_test.indirCir2\"\n\tv3t3 := \"spew_test.indirCir3\"\n\tv3s := \"{\\n ps2: (*\" + v3t2 + \")(\" + tic2Addr + \")({\\n  ps3: (*\" + v3t3 +\n\t\t\")(\" + tic3Addr + \")({\\n   ps1: (*\" + v3t + \")(\" + v3Addr +\n\t\t\")({\\n    ps2: (*\" + v3t2 + \")(\" + tic2Addr +\n\t\t\")(<already shown>)\\n   })\\n  })\\n })\\n}\"\n\tv3s2 := \"{\\n ps2: (*\" + v3t2 + \")(\" + tic2Addr + \")({\\n  ps3: (*\" + v3t3 +\n\t\t\")(\" + tic3Addr + \")({\\n   ps1: (*\" + v3t + \")(\" + v3Addr +\n\t\t\")(<already shown>)\\n  })\\n })\\n}\"\n\taddDumpTest(v3, \"(\"+v3t+\") \"+v3s+\"\\n\")\n\taddDumpTest(pv3, \"(*\"+v3t+\")(\"+v3Addr+\")(\"+v3s2+\")\\n\")\n\taddDumpTest(&pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")(\"+v3s2+\")\\n\")\n}\n\nfunc addPanicDumpTests() {\n\t// Type that panics in its Stringer interface.\n\tv := panicer(127)\n\tnv := (*panicer)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"spew_test.panicer\"\n\tvs := \"(PANIC=test panic)127\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n}\n\nfunc addErrorDumpTests() {\n\t// Type that has a custom Error interface.\n\tv := customError(127)\n\tnv := (*customError)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"spew_test.customError\"\n\tvs := \"error: 127\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n}\n\n// TestDump executes all of the tests described by dumpTests.\nfunc TestDump(t *testing.T) {\n\t// Setup tests.\n\taddIntDumpTests()\n\taddUintDumpTests()\n\taddBoolDumpTests()\n\taddFloatDumpTests()\n\taddComplexDumpTests()\n\taddArrayDumpTests()\n\taddSliceDumpTests()\n\taddStringDumpTests()\n\taddInterfaceDumpTests()\n\taddMapDumpTests()\n\taddStructDumpTests()\n\taddUintptrDumpTests()\n\taddUnsafePointerDumpTests()\n\taddChanDumpTests()\n\taddFuncDumpTests()\n\taddCircularDumpTests()\n\taddPanicDumpTests()\n\taddErrorDumpTests()\n\taddCgoDumpTests()\n\n\tt.Logf(\"Running %d tests\", len(dumpTests))\n\tfor i, test := range dumpTests {\n\t\tbuf := new(bytes.Buffer)\n\t\tspew.Fdump(buf, test.in)\n\t\ts := buf.String()\n\t\tif testFailed(s, test.wants) {\n\t\t\tt.Errorf(\"Dump #%d\\n got: %s %s\", i, s, stringizeWants(test.wants))\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc TestDumpSortedKeys(t *testing.T) {\n\tcfg := spew.ConfigState{SortKeys: true}\n\ts := cfg.Sdump(map[int]string{1: \"1\", 3: \"3\", 2: \"2\"})\n\texpected := \"(map[int]string) (len=3) {\\n(int) 1: (string) (len=1) \" +\n\t\t\"\\\"1\\\",\\n(int) 2: (string) (len=1) \\\"2\\\",\\n(int) 3: (string) \" +\n\t\t\"(len=1) \\\"3\\\"\\n\" +\n\t\t\"}\\n\"\n\tif s != expected {\n\t\tt.Errorf(\"Sorted keys mismatch:\\n  %v %v\", s, expected)\n\t}\n\n\ts = cfg.Sdump(map[stringer]int{\"1\": 1, \"3\": 3, \"2\": 2})\n\texpected = \"(map[spew_test.stringer]int) (len=3) {\\n\" +\n\t\t\"(spew_test.stringer) (len=1) stringer 1: (int) 1,\\n\" +\n\t\t\"(spew_test.stringer) (len=1) stringer 2: (int) 2,\\n\" +\n\t\t\"(spew_test.stringer) (len=1) stringer 3: (int) 3\\n\" +\n\t\t\"}\\n\"\n\tif s != expected {\n\t\tt.Errorf(\"Sorted keys mismatch:\\n  %v %v\", s, expected)\n\t}\n\n\ts = cfg.Sdump(map[pstringer]int{pstringer(\"1\"): 1, pstringer(\"3\"): 3, pstringer(\"2\"): 2})\n\texpected = \"(map[spew_test.pstringer]int) (len=3) {\\n\" +\n\t\t\"(spew_test.pstringer) (len=1) stringer 1: (int) 1,\\n\" +\n\t\t\"(spew_test.pstringer) (len=1) stringer 2: (int) 2,\\n\" +\n\t\t\"(spew_test.pstringer) (len=1) stringer 3: (int) 3\\n\" +\n\t\t\"}\\n\"\n\tif spew.UnsafeDisabled {\n\t\texpected = \"(map[spew_test.pstringer]int) (len=3) {\\n\" +\n\t\t\t\"(spew_test.pstringer) (len=1) \\\"1\\\": (int) 1,\\n\" +\n\t\t\t\"(spew_test.pstringer) (len=1) \\\"2\\\": (int) 2,\\n\" +\n\t\t\t\"(spew_test.pstringer) (len=1) \\\"3\\\": (int) 3\\n\" +\n\t\t\t\"}\\n\"\n\t}\n\tif s != expected {\n\t\tt.Errorf(\"Sorted keys mismatch:\\n  %v %v\", s, expected)\n\t}\n\n\ts = cfg.Sdump(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2})\n\texpected = \"(map[spew_test.customError]int) (len=3) {\\n\" +\n\t\t\"(spew_test.customError) error: 1: (int) 1,\\n\" +\n\t\t\"(spew_test.customError) error: 2: (int) 2,\\n\" +\n\t\t\"(spew_test.customError) error: 3: (int) 3\\n\" +\n\t\t\"}\\n\"\n\tif s != expected {\n\t\tt.Errorf(\"Sorted keys mismatch:\\n  %v %v\", s, expected)\n\t}\n\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go",
    "content": "// Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n//\n// Permission to use, copy, modify, and distribute this software for any\n// purpose with or without fee is hereby granted, provided that the above\n// copyright notice and this permission notice appear in all copies.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n// NOTE: Due to the following build constraints, this file will only be compiled\n// when both cgo is supported and \"-tags testcgo\" is added to the go test\n// command line.  This means the cgo tests are only added (and hence run) when\n// specifially requested.  This configuration is used because spew itself\n// does not require cgo to run even though it does handle certain cgo types\n// specially.  Rather than forcing all clients to require cgo and an external\n// C compiler just to run the tests, this scheme makes them optional.\n// +build cgo,testcgo\n\npackage spew_test\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/davecgh/go-spew/spew/testdata\"\n)\n\nfunc addCgoDumpTests() {\n\t// C char pointer.\n\tv := testdata.GetCgoCharPointer()\n\tnv := testdata.GetCgoNullCharPointer()\n\tpv := &v\n\tvcAddr := fmt.Sprintf(\"%p\", v)\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"*testdata._Ctype_char\"\n\tvs := \"116\"\n\taddDumpTest(v, \"(\"+vt+\")(\"+vcAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\"->\"+vcAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\"->\"+vcAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(\"+vt+\")(<nil>)\\n\")\n\n\t// C char array.\n\tv2, v2l, v2c := testdata.GetCgoCharArray()\n\tv2Len := fmt.Sprintf(\"%d\", v2l)\n\tv2Cap := fmt.Sprintf(\"%d\", v2c)\n\tv2t := \"[6]testdata._Ctype_char\"\n\tv2s := \"(len=\" + v2Len + \" cap=\" + v2Cap + \") \" +\n\t\t\"{\\n 00000000  74 65 73 74 32 00                               \" +\n\t\t\"  |test2.|\\n}\"\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\n\t// C unsigned char array.\n\tv3, v3l, v3c := testdata.GetCgoUnsignedCharArray()\n\tv3Len := fmt.Sprintf(\"%d\", v3l)\n\tv3Cap := fmt.Sprintf(\"%d\", v3c)\n\tv3t := \"[6]testdata._Ctype_unsignedchar\"\n\tv3t2 := \"[6]testdata._Ctype_uchar\"\n\tv3s := \"(len=\" + v3Len + \" cap=\" + v3Cap + \") \" +\n\t\t\"{\\n 00000000  74 65 73 74 33 00                               \" +\n\t\t\"  |test3.|\\n}\"\n\taddDumpTest(v3, \"(\"+v3t+\") \"+v3s+\"\\n\", \"(\"+v3t2+\") \"+v3s+\"\\n\")\n\n\t// C signed char array.\n\tv4, v4l, v4c := testdata.GetCgoSignedCharArray()\n\tv4Len := fmt.Sprintf(\"%d\", v4l)\n\tv4Cap := fmt.Sprintf(\"%d\", v4c)\n\tv4t := \"[6]testdata._Ctype_schar\"\n\tv4t2 := \"testdata._Ctype_schar\"\n\tv4s := \"(len=\" + v4Len + \" cap=\" + v4Cap + \") \" +\n\t\t\"{\\n (\" + v4t2 + \") 116,\\n (\" + v4t2 + \") 101,\\n (\" + v4t2 +\n\t\t\") 115,\\n (\" + v4t2 + \") 116,\\n (\" + v4t2 + \") 52,\\n (\" + v4t2 +\n\t\t\") 0\\n}\"\n\taddDumpTest(v4, \"(\"+v4t+\") \"+v4s+\"\\n\")\n\n\t// C uint8_t array.\n\tv5, v5l, v5c := testdata.GetCgoUint8tArray()\n\tv5Len := fmt.Sprintf(\"%d\", v5l)\n\tv5Cap := fmt.Sprintf(\"%d\", v5c)\n\tv5t := \"[6]testdata._Ctype_uint8_t\"\n\tv5s := \"(len=\" + v5Len + \" cap=\" + v5Cap + \") \" +\n\t\t\"{\\n 00000000  74 65 73 74 35 00                               \" +\n\t\t\"  |test5.|\\n}\"\n\taddDumpTest(v5, \"(\"+v5t+\") \"+v5s+\"\\n\")\n\n\t// C typedefed unsigned char array.\n\tv6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray()\n\tv6Len := fmt.Sprintf(\"%d\", v6l)\n\tv6Cap := fmt.Sprintf(\"%d\", v6c)\n\tv6t := \"[6]testdata._Ctype_custom_uchar_t\"\n\tv6s := \"(len=\" + v6Len + \" cap=\" + v6Cap + \") \" +\n\t\t\"{\\n 00000000  74 65 73 74 36 00                               \" +\n\t\t\"  |test6.|\\n}\"\n\taddDumpTest(v6, \"(\"+v6t+\") \"+v6s+\"\\n\")\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go",
    "content": "// Copyright (c) 2013 Dave Collins <dave@davec.name>\n//\n// Permission to use, copy, modify, and distribute this software for any\n// purpose with or without fee is hereby granted, provided that the above\n// copyright notice and this permission notice appear in all copies.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n// NOTE: Due to the following build constraints, this file will only be compiled\n// when either cgo is not supported or \"-tags testcgo\" is not added to the go\n// test command line.  This file intentionally does not setup any cgo tests in\n// this scenario.\n// +build !cgo !testcgo\n\npackage spew_test\n\nfunc addCgoDumpTests() {\n\t// Don't add any tests for cgo since this file is only compiled when\n\t// there should not be any cgo tests.\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/example_test.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\npackage spew_test\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n)\n\ntype Flag int\n\nconst (\n\tflagOne Flag = iota\n\tflagTwo\n)\n\nvar flagStrings = map[Flag]string{\n\tflagOne: \"flagOne\",\n\tflagTwo: \"flagTwo\",\n}\n\nfunc (f Flag) String() string {\n\tif s, ok := flagStrings[f]; ok {\n\t\treturn s\n\t}\n\treturn fmt.Sprintf(\"Unknown flag (%d)\", int(f))\n}\n\ntype Bar struct {\n\tdata uintptr\n}\n\ntype Foo struct {\n\tunexportedField Bar\n\tExportedField   map[interface{}]interface{}\n}\n\n// This example demonstrates how to use Dump to dump variables to stdout.\nfunc ExampleDump() {\n\t// The following package level declarations are assumed for this example:\n\t/*\n\t\ttype Flag int\n\n\t\tconst (\n\t\t\tflagOne Flag = iota\n\t\t\tflagTwo\n\t\t)\n\n\t\tvar flagStrings = map[Flag]string{\n\t\t\tflagOne: \"flagOne\",\n\t\t\tflagTwo: \"flagTwo\",\n\t\t}\n\n\t\tfunc (f Flag) String() string {\n\t\t\tif s, ok := flagStrings[f]; ok {\n\t\t\t\treturn s\n\t\t\t}\n\t\t\treturn fmt.Sprintf(\"Unknown flag (%d)\", int(f))\n\t\t}\n\n\t\ttype Bar struct {\n\t\t\tdata uintptr\n\t\t}\n\n\t\ttype Foo struct {\n\t\t\tunexportedField Bar\n\t\t\tExportedField   map[interface{}]interface{}\n\t\t}\n\t*/\n\n\t// Setup some sample data structures for the example.\n\tbar := Bar{uintptr(0)}\n\ts1 := Foo{bar, map[interface{}]interface{}{\"one\": true}}\n\tf := Flag(5)\n\tb := []byte{\n\t\t0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,\n\t\t0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,\n\t\t0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,\n\t\t0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,\n\t\t0x31, 0x32,\n\t}\n\n\t// Dump!\n\tspew.Dump(s1, f, b)\n\n\t// Output:\n\t// (spew_test.Foo) {\n\t//  unexportedField: (spew_test.Bar) {\n\t//   data: (uintptr) <nil>\n\t//  },\n\t//  ExportedField: (map[interface {}]interface {}) (len=1) {\n\t//   (string) (len=3) \"one\": (bool) true\n\t//  }\n\t// }\n\t// (spew_test.Flag) Unknown flag (5)\n\t// ([]uint8) (len=34 cap=34) {\n\t//  00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |\n\t//  00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!\"#$%&'()*+,-./0|\n\t//  00000020  31 32                                             |12|\n\t// }\n\t//\n}\n\n// This example demonstrates how to use Printf to display a variable with a\n// format string and inline formatting.\nfunc ExamplePrintf() {\n\t// Create a double pointer to a uint 8.\n\tui8 := uint8(5)\n\tpui8 := &ui8\n\tppui8 := &pui8\n\n\t// Create a circular data type.\n\ttype circular struct {\n\t\tui8 uint8\n\t\tc   *circular\n\t}\n\tc := circular{ui8: 1}\n\tc.c = &c\n\n\t// Print!\n\tspew.Printf(\"ppui8: %v\\n\", ppui8)\n\tspew.Printf(\"circular: %v\\n\", c)\n\n\t// Output:\n\t// ppui8: <**>5\n\t// circular: {1 <*>{1 <*><shown>}}\n}\n\n// This example demonstrates how to use a ConfigState.\nfunc ExampleConfigState() {\n\t// Modify the indent level of the ConfigState only.  The global\n\t// configuration is not modified.\n\tscs := spew.ConfigState{Indent: \"\\t\"}\n\n\t// Output using the ConfigState instance.\n\tv := map[string]int{\"one\": 1}\n\tscs.Printf(\"v: %v\\n\", v)\n\tscs.Dump(v)\n\n\t// Output:\n\t// v: map[one:1]\n\t// (map[string]int) (len=1) {\n\t// \t(string) (len=3) \"one\": (int) 1\n\t// }\n}\n\n// This example demonstrates how to use ConfigState.Dump to dump variables to\n// stdout\nfunc ExampleConfigState_Dump() {\n\t// See the top-level Dump example for details on the types used in this\n\t// example.\n\n\t// Create two ConfigState instances with different indentation.\n\tscs := spew.ConfigState{Indent: \"\\t\"}\n\tscs2 := spew.ConfigState{Indent: \" \"}\n\n\t// Setup some sample data structures for the example.\n\tbar := Bar{uintptr(0)}\n\ts1 := Foo{bar, map[interface{}]interface{}{\"one\": true}}\n\n\t// Dump using the ConfigState instances.\n\tscs.Dump(s1)\n\tscs2.Dump(s1)\n\n\t// Output:\n\t// (spew_test.Foo) {\n\t// \tunexportedField: (spew_test.Bar) {\n\t// \t\tdata: (uintptr) <nil>\n\t// \t},\n\t// \tExportedField: (map[interface {}]interface {}) (len=1) {\n\t//\t\t(string) (len=3) \"one\": (bool) true\n\t// \t}\n\t// }\n\t// (spew_test.Foo) {\n\t//  unexportedField: (spew_test.Bar) {\n\t//   data: (uintptr) <nil>\n\t//  },\n\t//  ExportedField: (map[interface {}]interface {}) (len=1) {\n\t//   (string) (len=3) \"one\": (bool) true\n\t//  }\n\t// }\n\t//\n}\n\n// This example demonstrates how to use ConfigState.Printf to display a variable\n// with a format string and inline formatting.\nfunc ExampleConfigState_Printf() {\n\t// See the top-level Dump example for details on the types used in this\n\t// example.\n\n\t// Create two ConfigState instances and modify the method handling of the\n\t// first ConfigState only.\n\tscs := spew.NewDefaultConfig()\n\tscs2 := spew.NewDefaultConfig()\n\tscs.DisableMethods = true\n\n\t// Alternatively\n\t// scs := spew.ConfigState{Indent: \" \", DisableMethods: true}\n\t// scs2 := spew.ConfigState{Indent: \" \"}\n\n\t// This is of type Flag which implements a Stringer and has raw value 1.\n\tf := flagTwo\n\n\t// Dump using the ConfigState instances.\n\tscs.Printf(\"f: %v\\n\", f)\n\tscs2.Printf(\"f: %v\\n\", f)\n\n\t// Output:\n\t// f: 1\n\t// f: flagTwo\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/format.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\npackage spew\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// supportedFlags is a list of all the character flags supported by fmt package.\nconst supportedFlags = \"0-+# \"\n\n// formatState implements the fmt.Formatter interface and contains information\n// about the state of a formatting operation.  The NewFormatter function can\n// be used to get a new Formatter which can be used directly as arguments\n// in standard fmt package printing calls.\ntype formatState struct {\n\tvalue          interface{}\n\tfs             fmt.State\n\tdepth          int\n\tpointers       map[uintptr]int\n\tignoreNextType bool\n\tcs             *ConfigState\n}\n\n// buildDefaultFormat recreates the original format string without precision\n// and width information to pass in to fmt.Sprintf in the case of an\n// unrecognized type.  Unless new types are added to the language, this\n// function won't ever be called.\nfunc (f *formatState) buildDefaultFormat() (format string) {\n\tbuf := bytes.NewBuffer(percentBytes)\n\n\tfor _, flag := range supportedFlags {\n\t\tif f.fs.Flag(int(flag)) {\n\t\t\tbuf.WriteRune(flag)\n\t\t}\n\t}\n\n\tbuf.WriteRune('v')\n\n\tformat = buf.String()\n\treturn format\n}\n\n// constructOrigFormat recreates the original format string including precision\n// and width information to pass along to the standard fmt package.  This allows\n// automatic deferral of all format strings this package doesn't support.\nfunc (f *formatState) constructOrigFormat(verb rune) (format string) {\n\tbuf := bytes.NewBuffer(percentBytes)\n\n\tfor _, flag := range supportedFlags {\n\t\tif f.fs.Flag(int(flag)) {\n\t\t\tbuf.WriteRune(flag)\n\t\t}\n\t}\n\n\tif width, ok := f.fs.Width(); ok {\n\t\tbuf.WriteString(strconv.Itoa(width))\n\t}\n\n\tif precision, ok := f.fs.Precision(); ok {\n\t\tbuf.Write(precisionBytes)\n\t\tbuf.WriteString(strconv.Itoa(precision))\n\t}\n\n\tbuf.WriteRune(verb)\n\n\tformat = buf.String()\n\treturn format\n}\n\n// unpackValue returns values inside of non-nil interfaces when possible and\n// ensures that types for values which have been unpacked from an interface\n// are displayed when the show types flag is also set.\n// This is useful for data types like structs, arrays, slices, and maps which\n// can contain varying types packed inside an interface.\nfunc (f *formatState) unpackValue(v reflect.Value) reflect.Value {\n\tif v.Kind() == reflect.Interface {\n\t\tf.ignoreNextType = false\n\t\tif !v.IsNil() {\n\t\t\tv = v.Elem()\n\t\t}\n\t}\n\treturn v\n}\n\n// formatPtr handles formatting of pointers by indirecting them as necessary.\nfunc (f *formatState) formatPtr(v reflect.Value) {\n\t// Display nil if top level pointer is nil.\n\tshowTypes := f.fs.Flag('#')\n\tif v.IsNil() && (!showTypes || f.ignoreNextType) {\n\t\tf.fs.Write(nilAngleBytes)\n\t\treturn\n\t}\n\n\t// Remove pointers at or below the current depth from map used to detect\n\t// circular refs.\n\tfor k, depth := range f.pointers {\n\t\tif depth >= f.depth {\n\t\t\tdelete(f.pointers, k)\n\t\t}\n\t}\n\n\t// Keep list of all dereferenced pointers to possibly show later.\n\tpointerChain := make([]uintptr, 0)\n\n\t// Figure out how many levels of indirection there are by derferencing\n\t// pointers and unpacking interfaces down the chain while detecting circular\n\t// references.\n\tnilFound := false\n\tcycleFound := false\n\tindirects := 0\n\tve := v\n\tfor ve.Kind() == reflect.Ptr {\n\t\tif ve.IsNil() {\n\t\t\tnilFound = true\n\t\t\tbreak\n\t\t}\n\t\tindirects++\n\t\taddr := ve.Pointer()\n\t\tpointerChain = append(pointerChain, addr)\n\t\tif pd, ok := f.pointers[addr]; ok && pd < f.depth {\n\t\t\tcycleFound = true\n\t\t\tindirects--\n\t\t\tbreak\n\t\t}\n\t\tf.pointers[addr] = f.depth\n\n\t\tve = ve.Elem()\n\t\tif ve.Kind() == reflect.Interface {\n\t\t\tif ve.IsNil() {\n\t\t\t\tnilFound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tve = ve.Elem()\n\t\t}\n\t}\n\n\t// Display type or indirection level depending on flags.\n\tif showTypes && !f.ignoreNextType {\n\t\tf.fs.Write(openParenBytes)\n\t\tf.fs.Write(bytes.Repeat(asteriskBytes, indirects))\n\t\tf.fs.Write([]byte(ve.Type().String()))\n\t\tf.fs.Write(closeParenBytes)\n\t} else {\n\t\tif nilFound || cycleFound {\n\t\t\tindirects += strings.Count(ve.Type().String(), \"*\")\n\t\t}\n\t\tf.fs.Write(openAngleBytes)\n\t\tf.fs.Write([]byte(strings.Repeat(\"*\", indirects)))\n\t\tf.fs.Write(closeAngleBytes)\n\t}\n\n\t// Display pointer information depending on flags.\n\tif f.fs.Flag('+') && (len(pointerChain) > 0) {\n\t\tf.fs.Write(openParenBytes)\n\t\tfor i, addr := range pointerChain {\n\t\t\tif i > 0 {\n\t\t\t\tf.fs.Write(pointerChainBytes)\n\t\t\t}\n\t\t\tprintHexPtr(f.fs, addr)\n\t\t}\n\t\tf.fs.Write(closeParenBytes)\n\t}\n\n\t// Display dereferenced value.\n\tswitch {\n\tcase nilFound == true:\n\t\tf.fs.Write(nilAngleBytes)\n\n\tcase cycleFound == true:\n\t\tf.fs.Write(circularShortBytes)\n\n\tdefault:\n\t\tf.ignoreNextType = true\n\t\tf.format(ve)\n\t}\n}\n\n// format is the main workhorse for providing the Formatter interface.  It\n// uses the passed reflect value to figure out what kind of object we are\n// dealing with and formats it appropriately.  It is a recursive function,\n// however circular data structures are detected and handled properly.\nfunc (f *formatState) format(v reflect.Value) {\n\t// Handle invalid reflect values immediately.\n\tkind := v.Kind()\n\tif kind == reflect.Invalid {\n\t\tf.fs.Write(invalidAngleBytes)\n\t\treturn\n\t}\n\n\t// Handle pointers specially.\n\tif kind == reflect.Ptr {\n\t\tf.formatPtr(v)\n\t\treturn\n\t}\n\n\t// Print type information unless already handled elsewhere.\n\tif !f.ignoreNextType && f.fs.Flag('#') {\n\t\tf.fs.Write(openParenBytes)\n\t\tf.fs.Write([]byte(v.Type().String()))\n\t\tf.fs.Write(closeParenBytes)\n\t}\n\tf.ignoreNextType = false\n\n\t// Call Stringer/error interfaces if they exist and the handle methods\n\t// flag is enabled.\n\tif !f.cs.DisableMethods {\n\t\tif (kind != reflect.Invalid) && (kind != reflect.Interface) {\n\t\t\tif handled := handleMethods(f.cs, f.fs, v); handled {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tswitch kind {\n\tcase reflect.Invalid:\n\t\t// Do nothing.  We should never get here since invalid has already\n\t\t// been handled above.\n\n\tcase reflect.Bool:\n\t\tprintBool(f.fs, v.Bool())\n\n\tcase reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:\n\t\tprintInt(f.fs, v.Int(), 10)\n\n\tcase reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:\n\t\tprintUint(f.fs, v.Uint(), 10)\n\n\tcase reflect.Float32:\n\t\tprintFloat(f.fs, v.Float(), 32)\n\n\tcase reflect.Float64:\n\t\tprintFloat(f.fs, v.Float(), 64)\n\n\tcase reflect.Complex64:\n\t\tprintComplex(f.fs, v.Complex(), 32)\n\n\tcase reflect.Complex128:\n\t\tprintComplex(f.fs, v.Complex(), 64)\n\n\tcase reflect.Slice:\n\t\tif v.IsNil() {\n\t\t\tf.fs.Write(nilAngleBytes)\n\t\t\tbreak\n\t\t}\n\t\tfallthrough\n\n\tcase reflect.Array:\n\t\tf.fs.Write(openBracketBytes)\n\t\tf.depth++\n\t\tif (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {\n\t\t\tf.fs.Write(maxShortBytes)\n\t\t} else {\n\t\t\tnumEntries := v.Len()\n\t\t\tfor i := 0; i < numEntries; i++ {\n\t\t\t\tif i > 0 {\n\t\t\t\t\tf.fs.Write(spaceBytes)\n\t\t\t\t}\n\t\t\t\tf.ignoreNextType = true\n\t\t\t\tf.format(f.unpackValue(v.Index(i)))\n\t\t\t}\n\t\t}\n\t\tf.depth--\n\t\tf.fs.Write(closeBracketBytes)\n\n\tcase reflect.String:\n\t\tf.fs.Write([]byte(v.String()))\n\n\tcase reflect.Interface:\n\t\t// The only time we should get here is for nil interfaces due to\n\t\t// unpackValue calls.\n\t\tif v.IsNil() {\n\t\t\tf.fs.Write(nilAngleBytes)\n\t\t}\n\n\tcase reflect.Ptr:\n\t\t// Do nothing.  We should never get here since pointers have already\n\t\t// been handled above.\n\n\tcase reflect.Map:\n\t\t// nil maps should be indicated as different than empty maps\n\t\tif v.IsNil() {\n\t\t\tf.fs.Write(nilAngleBytes)\n\t\t\tbreak\n\t\t}\n\n\t\tf.fs.Write(openMapBytes)\n\t\tf.depth++\n\t\tif (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {\n\t\t\tf.fs.Write(maxShortBytes)\n\t\t} else {\n\t\t\tkeys := v.MapKeys()\n\t\t\tif f.cs.SortKeys {\n\t\t\t\tsortValues(keys, f.cs)\n\t\t\t}\n\t\t\tfor i, key := range keys {\n\t\t\t\tif i > 0 {\n\t\t\t\t\tf.fs.Write(spaceBytes)\n\t\t\t\t}\n\t\t\t\tf.ignoreNextType = true\n\t\t\t\tf.format(f.unpackValue(key))\n\t\t\t\tf.fs.Write(colonBytes)\n\t\t\t\tf.ignoreNextType = true\n\t\t\t\tf.format(f.unpackValue(v.MapIndex(key)))\n\t\t\t}\n\t\t}\n\t\tf.depth--\n\t\tf.fs.Write(closeMapBytes)\n\n\tcase reflect.Struct:\n\t\tnumFields := v.NumField()\n\t\tf.fs.Write(openBraceBytes)\n\t\tf.depth++\n\t\tif (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {\n\t\t\tf.fs.Write(maxShortBytes)\n\t\t} else {\n\t\t\tvt := v.Type()\n\t\t\tfor i := 0; i < numFields; i++ {\n\t\t\t\tif i > 0 {\n\t\t\t\t\tf.fs.Write(spaceBytes)\n\t\t\t\t}\n\t\t\t\tvtf := vt.Field(i)\n\t\t\t\tif f.fs.Flag('+') || f.fs.Flag('#') {\n\t\t\t\t\tf.fs.Write([]byte(vtf.Name))\n\t\t\t\t\tf.fs.Write(colonBytes)\n\t\t\t\t}\n\t\t\t\tf.format(f.unpackValue(v.Field(i)))\n\t\t\t}\n\t\t}\n\t\tf.depth--\n\t\tf.fs.Write(closeBraceBytes)\n\n\tcase reflect.Uintptr:\n\t\tprintHexPtr(f.fs, uintptr(v.Uint()))\n\n\tcase reflect.UnsafePointer, reflect.Chan, reflect.Func:\n\t\tprintHexPtr(f.fs, v.Pointer())\n\n\t// There were not any other types at the time this code was written, but\n\t// fall back to letting the default fmt package handle it if any get added.\n\tdefault:\n\t\tformat := f.buildDefaultFormat()\n\t\tif v.CanInterface() {\n\t\t\tfmt.Fprintf(f.fs, format, v.Interface())\n\t\t} else {\n\t\t\tfmt.Fprintf(f.fs, format, v.String())\n\t\t}\n\t}\n}\n\n// Format satisfies the fmt.Formatter interface. See NewFormatter for usage\n// details.\nfunc (f *formatState) Format(fs fmt.State, verb rune) {\n\tf.fs = fs\n\n\t// Use standard formatting for verbs that are not v.\n\tif verb != 'v' {\n\t\tformat := f.constructOrigFormat(verb)\n\t\tfmt.Fprintf(fs, format, f.value)\n\t\treturn\n\t}\n\n\tif f.value == nil {\n\t\tif fs.Flag('#') {\n\t\t\tfs.Write(interfaceBytes)\n\t\t}\n\t\tfs.Write(nilAngleBytes)\n\t\treturn\n\t}\n\n\tf.format(reflect.ValueOf(f.value))\n}\n\n// newFormatter is a helper function to consolidate the logic from the various\n// public methods which take varying config states.\nfunc newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {\n\tfs := &formatState{value: v, cs: cs}\n\tfs.pointers = make(map[uintptr]int)\n\treturn fs\n}\n\n/*\nNewFormatter returns a custom formatter that satisfies the fmt.Formatter\ninterface.  As a result, it integrates cleanly with standard fmt package\nprinting functions.  The formatter is useful for inline printing of smaller data\ntypes similar to the standard %v format specifier.\n\nThe custom formatter only responds to the %v (most compact), %+v (adds pointer\naddresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb\ncombinations.  Any other verbs such as %x and %q will be sent to the the\nstandard fmt package for formatting.  In addition, the custom formatter ignores\nthe width and precision arguments (however they will still work on the format\nspecifiers not handled by the custom formatter).\n\nTypically this function shouldn't be called directly.  It is much easier to make\nuse of the custom formatter by calling one of the convenience functions such as\nPrintf, Println, or Fprintf.\n*/\nfunc NewFormatter(v interface{}) fmt.Formatter {\n\treturn newFormatter(&Config, v)\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/format_test.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\n/*\nTest Summary:\nNOTE: For each test, a nil pointer, a single pointer and double pointer to the\nbase test element are also tested to ensure proper indirection across all types.\n\n- Max int8, int16, int32, int64, int\n- Max uint8, uint16, uint32, uint64, uint\n- Boolean true and false\n- Standard complex64 and complex128\n- Array containing standard ints\n- Array containing type with custom formatter on pointer receiver only\n- Array containing interfaces\n- Slice containing standard float32 values\n- Slice containing type with custom formatter on pointer receiver only\n- Slice containing interfaces\n- Nil slice\n- Standard string\n- Nil interface\n- Sub-interface\n- Map with string keys and int vals\n- Map with custom formatter type on pointer receiver only keys and vals\n- Map with interface keys and values\n- Map with nil interface value\n- Struct with primitives\n- Struct that contains another struct\n- Struct that contains custom type with Stringer pointer interface via both\n  exported and unexported fields\n- Struct that contains embedded struct and field to same struct\n- Uintptr to 0 (null pointer)\n- Uintptr address of real variable\n- Unsafe.Pointer to 0 (null pointer)\n- Unsafe.Pointer to address of real variable\n- Nil channel\n- Standard int channel\n- Function with no params and no returns\n- Function with param and no returns\n- Function with multiple params and multiple returns\n- Struct that is circular through self referencing\n- Structs that are circular through cross referencing\n- Structs that are indirectly circular\n- Type that panics in its Stringer interface\n- Type that has a custom Error interface\n- %x passthrough with uint\n- %#x passthrough with uint\n- %f passthrough with precision\n- %f passthrough with width and precision\n- %d passthrough with width\n- %q passthrough with string\n*/\n\npackage spew_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"testing\"\n\t\"unsafe\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n)\n\n// formatterTest is used to describe a test to be performed against NewFormatter.\ntype formatterTest struct {\n\tformat string\n\tin     interface{}\n\twants  []string\n}\n\n// formatterTests houses all of the tests to be performed against NewFormatter.\nvar formatterTests = make([]formatterTest, 0)\n\n// addFormatterTest is a helper method to append the passed input and desired\n// result to formatterTests.\nfunc addFormatterTest(format string, in interface{}, wants ...string) {\n\ttest := formatterTest{format, in, wants}\n\tformatterTests = append(formatterTests, test)\n}\n\nfunc addIntFormatterTests() {\n\t// Max int8.\n\tv := int8(127)\n\tnv := (*int8)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"int8\"\n\tvs := \"127\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Max int16.\n\tv2 := int16(32767)\n\tnv2 := (*int16)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"int16\"\n\tv2s := \"32767\"\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\n\t// Max int32.\n\tv3 := int32(2147483647)\n\tnv3 := (*int32)(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"int32\"\n\tv3s := \"2147483647\"\n\taddFormatterTest(\"%v\", v3, v3s)\n\taddFormatterTest(\"%v\", pv3, \"<*>\"+v3s)\n\taddFormatterTest(\"%v\", &pv3, \"<**>\"+v3s)\n\taddFormatterTest(\"%v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%+v\", v3, v3s)\n\taddFormatterTest(\"%+v\", pv3, \"<*>(\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", &pv3, \"<**>(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%#v\", v3, \"(\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#v\", pv3, \"(*\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#v\", &pv3, \"(**\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v3, \"(\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#+v\", pv3, \"(*\"+v3t+\")(\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%#+v\", &pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%#v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\n\t// Max int64.\n\tv4 := int64(9223372036854775807)\n\tnv4 := (*int64)(nil)\n\tpv4 := &v4\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"int64\"\n\tv4s := \"9223372036854775807\"\n\taddFormatterTest(\"%v\", v4, v4s)\n\taddFormatterTest(\"%v\", pv4, \"<*>\"+v4s)\n\taddFormatterTest(\"%v\", &pv4, \"<**>\"+v4s)\n\taddFormatterTest(\"%v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%+v\", v4, v4s)\n\taddFormatterTest(\"%+v\", pv4, \"<*>(\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%+v\", &pv4, \"<**>(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%+v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%#v\", v4, \"(\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#v\", pv4, \"(*\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#v\", &pv4, \"(**\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v4, \"(\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#+v\", pv4, \"(*\"+v4t+\")(\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%#+v\", &pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%#+v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n\n\t// Max int.\n\tv5 := int(2147483647)\n\tnv5 := (*int)(nil)\n\tpv5 := &v5\n\tv5Addr := fmt.Sprintf(\"%p\", pv5)\n\tpv5Addr := fmt.Sprintf(\"%p\", &pv5)\n\tv5t := \"int\"\n\tv5s := \"2147483647\"\n\taddFormatterTest(\"%v\", v5, v5s)\n\taddFormatterTest(\"%v\", pv5, \"<*>\"+v5s)\n\taddFormatterTest(\"%v\", &pv5, \"<**>\"+v5s)\n\taddFormatterTest(\"%v\", nv5, \"<nil>\")\n\taddFormatterTest(\"%+v\", v5, v5s)\n\taddFormatterTest(\"%+v\", pv5, \"<*>(\"+v5Addr+\")\"+v5s)\n\taddFormatterTest(\"%+v\", &pv5, \"<**>(\"+pv5Addr+\"->\"+v5Addr+\")\"+v5s)\n\taddFormatterTest(\"%+v\", nv5, \"<nil>\")\n\taddFormatterTest(\"%#v\", v5, \"(\"+v5t+\")\"+v5s)\n\taddFormatterTest(\"%#v\", pv5, \"(*\"+v5t+\")\"+v5s)\n\taddFormatterTest(\"%#v\", &pv5, \"(**\"+v5t+\")\"+v5s)\n\taddFormatterTest(\"%#v\", nv5, \"(*\"+v5t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v5, \"(\"+v5t+\")\"+v5s)\n\taddFormatterTest(\"%#+v\", pv5, \"(*\"+v5t+\")(\"+v5Addr+\")\"+v5s)\n\taddFormatterTest(\"%#+v\", &pv5, \"(**\"+v5t+\")(\"+pv5Addr+\"->\"+v5Addr+\")\"+v5s)\n\taddFormatterTest(\"%#+v\", nv5, \"(*\"+v5t+\")\"+\"<nil>\")\n}\n\nfunc addUintFormatterTests() {\n\t// Max uint8.\n\tv := uint8(255)\n\tnv := (*uint8)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"uint8\"\n\tvs := \"255\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Max uint16.\n\tv2 := uint16(65535)\n\tnv2 := (*uint16)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"uint16\"\n\tv2s := \"65535\"\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\n\t// Max uint32.\n\tv3 := uint32(4294967295)\n\tnv3 := (*uint32)(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"uint32\"\n\tv3s := \"4294967295\"\n\taddFormatterTest(\"%v\", v3, v3s)\n\taddFormatterTest(\"%v\", pv3, \"<*>\"+v3s)\n\taddFormatterTest(\"%v\", &pv3, \"<**>\"+v3s)\n\taddFormatterTest(\"%v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%+v\", v3, v3s)\n\taddFormatterTest(\"%+v\", pv3, \"<*>(\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", &pv3, \"<**>(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%#v\", v3, \"(\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#v\", pv3, \"(*\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#v\", &pv3, \"(**\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v3, \"(\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#+v\", pv3, \"(*\"+v3t+\")(\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%#+v\", &pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%#v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\n\t// Max uint64.\n\tv4 := uint64(18446744073709551615)\n\tnv4 := (*uint64)(nil)\n\tpv4 := &v4\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"uint64\"\n\tv4s := \"18446744073709551615\"\n\taddFormatterTest(\"%v\", v4, v4s)\n\taddFormatterTest(\"%v\", pv4, \"<*>\"+v4s)\n\taddFormatterTest(\"%v\", &pv4, \"<**>\"+v4s)\n\taddFormatterTest(\"%v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%+v\", v4, v4s)\n\taddFormatterTest(\"%+v\", pv4, \"<*>(\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%+v\", &pv4, \"<**>(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%+v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%#v\", v4, \"(\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#v\", pv4, \"(*\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#v\", &pv4, \"(**\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v4, \"(\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#+v\", pv4, \"(*\"+v4t+\")(\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%#+v\", &pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%#+v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n\n\t// Max uint.\n\tv5 := uint(4294967295)\n\tnv5 := (*uint)(nil)\n\tpv5 := &v5\n\tv5Addr := fmt.Sprintf(\"%p\", pv5)\n\tpv5Addr := fmt.Sprintf(\"%p\", &pv5)\n\tv5t := \"uint\"\n\tv5s := \"4294967295\"\n\taddFormatterTest(\"%v\", v5, v5s)\n\taddFormatterTest(\"%v\", pv5, \"<*>\"+v5s)\n\taddFormatterTest(\"%v\", &pv5, \"<**>\"+v5s)\n\taddFormatterTest(\"%v\", nv5, \"<nil>\")\n\taddFormatterTest(\"%+v\", v5, v5s)\n\taddFormatterTest(\"%+v\", pv5, \"<*>(\"+v5Addr+\")\"+v5s)\n\taddFormatterTest(\"%+v\", &pv5, \"<**>(\"+pv5Addr+\"->\"+v5Addr+\")\"+v5s)\n\taddFormatterTest(\"%+v\", nv5, \"<nil>\")\n\taddFormatterTest(\"%#v\", v5, \"(\"+v5t+\")\"+v5s)\n\taddFormatterTest(\"%#v\", pv5, \"(*\"+v5t+\")\"+v5s)\n\taddFormatterTest(\"%#v\", &pv5, \"(**\"+v5t+\")\"+v5s)\n\taddFormatterTest(\"%#v\", nv5, \"(*\"+v5t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v5, \"(\"+v5t+\")\"+v5s)\n\taddFormatterTest(\"%#+v\", pv5, \"(*\"+v5t+\")(\"+v5Addr+\")\"+v5s)\n\taddFormatterTest(\"%#+v\", &pv5, \"(**\"+v5t+\")(\"+pv5Addr+\"->\"+v5Addr+\")\"+v5s)\n\taddFormatterTest(\"%#v\", nv5, \"(*\"+v5t+\")\"+\"<nil>\")\n}\n\nfunc addBoolFormatterTests() {\n\t// Boolean true.\n\tv := bool(true)\n\tnv := (*bool)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"bool\"\n\tvs := \"true\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Boolean false.\n\tv2 := bool(false)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"bool\"\n\tv2s := \"false\"\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n}\n\nfunc addFloatFormatterTests() {\n\t// Standard float32.\n\tv := float32(3.1415)\n\tnv := (*float32)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"float32\"\n\tvs := \"3.1415\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Standard float64.\n\tv2 := float64(3.1415926)\n\tnv2 := (*float64)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"float64\"\n\tv2s := \"3.1415926\"\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n}\n\nfunc addComplexFormatterTests() {\n\t// Standard complex64.\n\tv := complex(float32(6), -2)\n\tnv := (*complex64)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"complex64\"\n\tvs := \"(6-2i)\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Standard complex128.\n\tv2 := complex(float64(-6), 2)\n\tnv2 := (*complex128)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"complex128\"\n\tv2s := \"(-6+2i)\"\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n}\n\nfunc addArrayFormatterTests() {\n\t// Array containing standard ints.\n\tv := [3]int{1, 2, 3}\n\tnv := (*[3]int)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"[3]int\"\n\tvs := \"[1 2 3]\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Array containing type with custom formatter on pointer receiver only.\n\tv2 := [3]pstringer{\"1\", \"2\", \"3\"}\n\tnv2 := (*[3]pstringer)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"[3]spew_test.pstringer\"\n\tv2sp := \"[stringer 1 stringer 2 stringer 3]\"\n\tv2s := v2sp\n\tif spew.UnsafeDisabled {\n\t\tv2s = \"[1 2 3]\"\n\t}\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2sp)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2sp)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2sp)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2sp)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2sp)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2sp)\n\taddFormatterTest(\"%#v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2sp)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2sp)\n\taddFormatterTest(\"%#+v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\n\t// Array containing interfaces.\n\tv3 := [3]interface{}{\"one\", int(2), uint(3)}\n\tnv3 := (*[3]interface{})(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"[3]interface {}\"\n\tv3t2 := \"string\"\n\tv3t3 := \"int\"\n\tv3t4 := \"uint\"\n\tv3s := \"[one 2 3]\"\n\tv3s2 := \"[(\" + v3t2 + \")one (\" + v3t3 + \")2 (\" + v3t4 + \")3]\"\n\taddFormatterTest(\"%v\", v3, v3s)\n\taddFormatterTest(\"%v\", pv3, \"<*>\"+v3s)\n\taddFormatterTest(\"%v\", &pv3, \"<**>\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%+v\", v3, v3s)\n\taddFormatterTest(\"%+v\", pv3, \"<*>(\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", &pv3, \"<**>(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%#v\", v3, \"(\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#v\", pv3, \"(*\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#v\", &pv3, \"(**\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v3, \"(\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#+v\", pv3, \"(*\"+v3t+\")(\"+v3Addr+\")\"+v3s2)\n\taddFormatterTest(\"%#+v\", &pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s2)\n\taddFormatterTest(\"%#+v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n}\n\nfunc addSliceFormatterTests() {\n\t// Slice containing standard float32 values.\n\tv := []float32{3.14, 6.28, 12.56}\n\tnv := (*[]float32)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"[]float32\"\n\tvs := \"[3.14 6.28 12.56]\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Slice containing type with custom formatter on pointer receiver only.\n\tv2 := []pstringer{\"1\", \"2\", \"3\"}\n\tnv2 := (*[]pstringer)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"[]spew_test.pstringer\"\n\tv2s := \"[stringer 1 stringer 2 stringer 3]\"\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\n\t// Slice containing interfaces.\n\tv3 := []interface{}{\"one\", int(2), uint(3), nil}\n\tnv3 := (*[]interface{})(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"[]interface {}\"\n\tv3t2 := \"string\"\n\tv3t3 := \"int\"\n\tv3t4 := \"uint\"\n\tv3t5 := \"interface {}\"\n\tv3s := \"[one 2 3 <nil>]\"\n\tv3s2 := \"[(\" + v3t2 + \")one (\" + v3t3 + \")2 (\" + v3t4 + \")3 (\" + v3t5 +\n\t\t\")<nil>]\"\n\taddFormatterTest(\"%v\", v3, v3s)\n\taddFormatterTest(\"%v\", pv3, \"<*>\"+v3s)\n\taddFormatterTest(\"%v\", &pv3, \"<**>\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%+v\", v3, v3s)\n\taddFormatterTest(\"%+v\", pv3, \"<*>(\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", &pv3, \"<**>(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%#v\", v3, \"(\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#v\", pv3, \"(*\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#v\", &pv3, \"(**\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v3, \"(\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#+v\", pv3, \"(*\"+v3t+\")(\"+v3Addr+\")\"+v3s2)\n\taddFormatterTest(\"%#+v\", &pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s2)\n\taddFormatterTest(\"%#+v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\n\t// Nil slice.\n\tvar v4 []int\n\tnv4 := (*[]int)(nil)\n\tpv4 := &v4\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"[]int\"\n\tv4s := \"<nil>\"\n\taddFormatterTest(\"%v\", v4, v4s)\n\taddFormatterTest(\"%v\", pv4, \"<*>\"+v4s)\n\taddFormatterTest(\"%v\", &pv4, \"<**>\"+v4s)\n\taddFormatterTest(\"%+v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%+v\", v4, v4s)\n\taddFormatterTest(\"%+v\", pv4, \"<*>(\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%+v\", &pv4, \"<**>(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%+v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%#v\", v4, \"(\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#v\", pv4, \"(*\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#v\", &pv4, \"(**\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v4, \"(\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#+v\", pv4, \"(*\"+v4t+\")(\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%#+v\", &pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%#+v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n}\n\nfunc addStringFormatterTests() {\n\t// Standard string.\n\tv := \"test\"\n\tnv := (*string)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"string\"\n\tvs := \"test\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n}\n\nfunc addInterfaceFormatterTests() {\n\t// Nil interface.\n\tvar v interface{}\n\tnv := (*interface{})(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"interface {}\"\n\tvs := \"<nil>\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Sub-interface.\n\tv2 := interface{}(uint16(65535))\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"uint16\"\n\tv2s := \"65535\"\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n}\n\nfunc addMapFormatterTests() {\n\t// Map with string keys and int vals.\n\tv := map[string]int{\"one\": 1, \"two\": 2}\n\tnilMap := map[string]int(nil)\n\tnv := (*map[string]int)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"map[string]int\"\n\tvs := \"map[one:1 two:2]\"\n\tvs2 := \"map[two:2 one:1]\"\n\taddFormatterTest(\"%v\", v, vs, vs2)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs, \"<*>\"+vs2)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs, \"<**>\"+vs2)\n\taddFormatterTest(\"%+v\", nilMap, \"<nil>\")\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs, vs2)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs, \"<*>(\"+vAddr+\")\"+vs2)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs,\n\t\t\"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs2)\n\taddFormatterTest(\"%+v\", nilMap, \"<nil>\")\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs, \"(\"+vt+\")\"+vs2)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs, \"(*\"+vt+\")\"+vs2)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs, \"(**\"+vt+\")\"+vs2)\n\taddFormatterTest(\"%#v\", nilMap, \"(\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs, \"(\"+vt+\")\"+vs2)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs,\n\t\t\"(*\"+vt+\")(\"+vAddr+\")\"+vs2)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs,\n\t\t\"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs2)\n\taddFormatterTest(\"%#+v\", nilMap, \"(\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Map with custom formatter type on pointer receiver only keys and vals.\n\tv2 := map[pstringer]pstringer{\"one\": \"1\"}\n\tnv2 := (*map[pstringer]pstringer)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"map[spew_test.pstringer]spew_test.pstringer\"\n\tv2s := \"map[stringer one:stringer 1]\"\n\tif spew.UnsafeDisabled {\n\t\tv2s = \"map[one:1]\"\n\t}\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\n\t// Map with interface keys and values.\n\tv3 := map[interface{}]interface{}{\"one\": 1}\n\tnv3 := (*map[interface{}]interface{})(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"map[interface {}]interface {}\"\n\tv3t1 := \"string\"\n\tv3t2 := \"int\"\n\tv3s := \"map[one:1]\"\n\tv3s2 := \"map[(\" + v3t1 + \")one:(\" + v3t2 + \")1]\"\n\taddFormatterTest(\"%v\", v3, v3s)\n\taddFormatterTest(\"%v\", pv3, \"<*>\"+v3s)\n\taddFormatterTest(\"%v\", &pv3, \"<**>\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%+v\", v3, v3s)\n\taddFormatterTest(\"%+v\", pv3, \"<*>(\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", &pv3, \"<**>(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%#v\", v3, \"(\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#v\", pv3, \"(*\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#v\", &pv3, \"(**\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v3, \"(\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#+v\", pv3, \"(*\"+v3t+\")(\"+v3Addr+\")\"+v3s2)\n\taddFormatterTest(\"%#+v\", &pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s2)\n\taddFormatterTest(\"%#+v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\n\t// Map with nil interface value\n\tv4 := map[string]interface{}{\"nil\": nil}\n\tnv4 := (*map[string]interface{})(nil)\n\tpv4 := &v4\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"map[string]interface {}\"\n\tv4t1 := \"interface {}\"\n\tv4s := \"map[nil:<nil>]\"\n\tv4s2 := \"map[nil:(\" + v4t1 + \")<nil>]\"\n\taddFormatterTest(\"%v\", v4, v4s)\n\taddFormatterTest(\"%v\", pv4, \"<*>\"+v4s)\n\taddFormatterTest(\"%v\", &pv4, \"<**>\"+v4s)\n\taddFormatterTest(\"%+v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%+v\", v4, v4s)\n\taddFormatterTest(\"%+v\", pv4, \"<*>(\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%+v\", &pv4, \"<**>(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%+v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%#v\", v4, \"(\"+v4t+\")\"+v4s2)\n\taddFormatterTest(\"%#v\", pv4, \"(*\"+v4t+\")\"+v4s2)\n\taddFormatterTest(\"%#v\", &pv4, \"(**\"+v4t+\")\"+v4s2)\n\taddFormatterTest(\"%#v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v4, \"(\"+v4t+\")\"+v4s2)\n\taddFormatterTest(\"%#+v\", pv4, \"(*\"+v4t+\")(\"+v4Addr+\")\"+v4s2)\n\taddFormatterTest(\"%#+v\", &pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s2)\n\taddFormatterTest(\"%#+v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n}\n\nfunc addStructFormatterTests() {\n\t// Struct with primitives.\n\ttype s1 struct {\n\t\ta int8\n\t\tb uint8\n\t}\n\tv := s1{127, 255}\n\tnv := (*s1)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"spew_test.s1\"\n\tvt2 := \"int8\"\n\tvt3 := \"uint8\"\n\tvs := \"{127 255}\"\n\tvs2 := \"{a:127 b:255}\"\n\tvs3 := \"{a:(\" + vt2 + \")127 b:(\" + vt3 + \")255}\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs2)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs2)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs2)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs3)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs3)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs3)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs3)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs3)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs3)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Struct that contains another struct.\n\ttype s2 struct {\n\t\ts1 s1\n\t\tb  bool\n\t}\n\tv2 := s2{s1{127, 255}, true}\n\tnv2 := (*s2)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"spew_test.s2\"\n\tv2t2 := \"spew_test.s1\"\n\tv2t3 := \"int8\"\n\tv2t4 := \"uint8\"\n\tv2t5 := \"bool\"\n\tv2s := \"{{127 255} true}\"\n\tv2s2 := \"{s1:{a:127 b:255} b:true}\"\n\tv2s3 := \"{s1:(\" + v2t2 + \"){a:(\" + v2t3 + \")127 b:(\" + v2t4 + \")255} b:(\" +\n\t\tv2t5 + \")true}\"\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%+v\", v2, v2s2)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s2)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s2)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s3)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s3)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s3)\n\taddFormatterTest(\"%#v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s3)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s3)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s3)\n\taddFormatterTest(\"%#+v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\n\t// Struct that contains custom type with Stringer pointer interface via both\n\t// exported and unexported fields.\n\ttype s3 struct {\n\t\ts pstringer\n\t\tS pstringer\n\t}\n\tv3 := s3{\"test\", \"test2\"}\n\tnv3 := (*s3)(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"spew_test.s3\"\n\tv3t2 := \"spew_test.pstringer\"\n\tv3s := \"{stringer test stringer test2}\"\n\tv3sp := v3s\n\tv3s2 := \"{s:stringer test S:stringer test2}\"\n\tv3s2p := v3s2\n\tv3s3 := \"{s:(\" + v3t2 + \")stringer test S:(\" + v3t2 + \")stringer test2}\"\n\tv3s3p := v3s3\n\tif spew.UnsafeDisabled {\n\t\tv3s = \"{test test2}\"\n\t\tv3sp = \"{test stringer test2}\"\n\t\tv3s2 = \"{s:test S:test2}\"\n\t\tv3s2p = \"{s:test S:stringer test2}\"\n\t\tv3s3 = \"{s:(\" + v3t2 + \")test S:(\" + v3t2 + \")test2}\"\n\t\tv3s3p = \"{s:(\" + v3t2 + \")test S:(\" + v3t2 + \")stringer test2}\"\n\t}\n\taddFormatterTest(\"%v\", v3, v3s)\n\taddFormatterTest(\"%v\", pv3, \"<*>\"+v3sp)\n\taddFormatterTest(\"%v\", &pv3, \"<**>\"+v3sp)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%+v\", v3, v3s2)\n\taddFormatterTest(\"%+v\", pv3, \"<*>(\"+v3Addr+\")\"+v3s2p)\n\taddFormatterTest(\"%+v\", &pv3, \"<**>(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s2p)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%#v\", v3, \"(\"+v3t+\")\"+v3s3)\n\taddFormatterTest(\"%#v\", pv3, \"(*\"+v3t+\")\"+v3s3p)\n\taddFormatterTest(\"%#v\", &pv3, \"(**\"+v3t+\")\"+v3s3p)\n\taddFormatterTest(\"%#v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v3, \"(\"+v3t+\")\"+v3s3)\n\taddFormatterTest(\"%#+v\", pv3, \"(*\"+v3t+\")(\"+v3Addr+\")\"+v3s3p)\n\taddFormatterTest(\"%#+v\", &pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s3p)\n\taddFormatterTest(\"%#+v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\n\t// Struct that contains embedded struct and field to same struct.\n\te := embed{\"embedstr\"}\n\tv4 := embedwrap{embed: &e, e: &e}\n\tnv4 := (*embedwrap)(nil)\n\tpv4 := &v4\n\teAddr := fmt.Sprintf(\"%p\", &e)\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"spew_test.embedwrap\"\n\tv4t2 := \"spew_test.embed\"\n\tv4t3 := \"string\"\n\tv4s := \"{<*>{embedstr} <*>{embedstr}}\"\n\tv4s2 := \"{embed:<*>(\" + eAddr + \"){a:embedstr} e:<*>(\" + eAddr +\n\t\t\"){a:embedstr}}\"\n\tv4s3 := \"{embed:(*\" + v4t2 + \"){a:(\" + v4t3 + \")embedstr} e:(*\" + v4t2 +\n\t\t\"){a:(\" + v4t3 + \")embedstr}}\"\n\tv4s4 := \"{embed:(*\" + v4t2 + \")(\" + eAddr + \"){a:(\" + v4t3 +\n\t\t\")embedstr} e:(*\" + v4t2 + \")(\" + eAddr + \"){a:(\" + v4t3 + \")embedstr}}\"\n\taddFormatterTest(\"%v\", v4, v4s)\n\taddFormatterTest(\"%v\", pv4, \"<*>\"+v4s)\n\taddFormatterTest(\"%v\", &pv4, \"<**>\"+v4s)\n\taddFormatterTest(\"%+v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%+v\", v4, v4s2)\n\taddFormatterTest(\"%+v\", pv4, \"<*>(\"+v4Addr+\")\"+v4s2)\n\taddFormatterTest(\"%+v\", &pv4, \"<**>(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s2)\n\taddFormatterTest(\"%+v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%#v\", v4, \"(\"+v4t+\")\"+v4s3)\n\taddFormatterTest(\"%#v\", pv4, \"(*\"+v4t+\")\"+v4s3)\n\taddFormatterTest(\"%#v\", &pv4, \"(**\"+v4t+\")\"+v4s3)\n\taddFormatterTest(\"%#v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v4, \"(\"+v4t+\")\"+v4s4)\n\taddFormatterTest(\"%#+v\", pv4, \"(*\"+v4t+\")(\"+v4Addr+\")\"+v4s4)\n\taddFormatterTest(\"%#+v\", &pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s4)\n\taddFormatterTest(\"%#+v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n}\n\nfunc addUintptrFormatterTests() {\n\t// Null pointer.\n\tv := uintptr(0)\n\tnv := (*uintptr)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"uintptr\"\n\tvs := \"<nil>\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Address of real variable.\n\ti := 1\n\tv2 := uintptr(unsafe.Pointer(&i))\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"uintptr\"\n\tv2s := fmt.Sprintf(\"%p\", &i)\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n}\n\nfunc addUnsafePointerFormatterTests() {\n\t// Null pointer.\n\tv := unsafe.Pointer(uintptr(0))\n\tnv := (*unsafe.Pointer)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"unsafe.Pointer\"\n\tvs := \"<nil>\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Address of real variable.\n\ti := 1\n\tv2 := unsafe.Pointer(&i)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"unsafe.Pointer\"\n\tv2s := fmt.Sprintf(\"%p\", &i)\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n}\n\nfunc addChanFormatterTests() {\n\t// Nil channel.\n\tvar v chan int\n\tpv := &v\n\tnv := (*chan int)(nil)\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"chan int\"\n\tvs := \"<nil>\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Real channel.\n\tv2 := make(chan int)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"chan int\"\n\tv2s := fmt.Sprintf(\"%p\", v2)\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n}\n\nfunc addFuncFormatterTests() {\n\t// Function with no params and no returns.\n\tv := addIntFormatterTests\n\tnv := (*func())(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"func()\"\n\tvs := fmt.Sprintf(\"%p\", v)\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Function with param and no returns.\n\tv2 := TestFormatter\n\tnv2 := (*func(*testing.T))(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"func(*testing.T)\"\n\tv2s := fmt.Sprintf(\"%p\", v2)\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\n\t// Function with multiple params and multiple returns.\n\tvar v3 = func(i int, s string) (b bool, err error) {\n\t\treturn true, nil\n\t}\n\tnv3 := (*func(int, string) (bool, error))(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"func(int, string) (bool, error)\"\n\tv3s := fmt.Sprintf(\"%p\", v3)\n\taddFormatterTest(\"%v\", v3, v3s)\n\taddFormatterTest(\"%v\", pv3, \"<*>\"+v3s)\n\taddFormatterTest(\"%v\", &pv3, \"<**>\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%+v\", v3, v3s)\n\taddFormatterTest(\"%+v\", pv3, \"<*>(\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", &pv3, \"<**>(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%#v\", v3, \"(\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#v\", pv3, \"(*\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#v\", &pv3, \"(**\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v3, \"(\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#+v\", pv3, \"(*\"+v3t+\")(\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%#+v\", &pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%#+v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n}\n\nfunc addCircularFormatterTests() {\n\t// Struct that is circular through self referencing.\n\ttype circular struct {\n\t\tc *circular\n\t}\n\tv := circular{nil}\n\tv.c = &v\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"spew_test.circular\"\n\tvs := \"{<*>{<*><shown>}}\"\n\tvs2 := \"{<*><shown>}\"\n\tvs3 := \"{c:<*>(\" + vAddr + \"){c:<*>(\" + vAddr + \")<shown>}}\"\n\tvs4 := \"{c:<*>(\" + vAddr + \")<shown>}\"\n\tvs5 := \"{c:(*\" + vt + \"){c:(*\" + vt + \")<shown>}}\"\n\tvs6 := \"{c:(*\" + vt + \")<shown>}\"\n\tvs7 := \"{c:(*\" + vt + \")(\" + vAddr + \"){c:(*\" + vt + \")(\" + vAddr +\n\t\t\")<shown>}}\"\n\tvs8 := \"{c:(*\" + vt + \")(\" + vAddr + \")<shown>}\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs2)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs2)\n\taddFormatterTest(\"%+v\", v, vs3)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs4)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs4)\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs5)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs6)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs6)\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs7)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs8)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs8)\n\n\t// Structs that are circular through cross referencing.\n\tv2 := xref1{nil}\n\tts2 := xref2{&v2}\n\tv2.ps2 = &ts2\n\tpv2 := &v2\n\tts2Addr := fmt.Sprintf(\"%p\", &ts2)\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"spew_test.xref1\"\n\tv2t2 := \"spew_test.xref2\"\n\tv2s := \"{<*>{<*>{<*><shown>}}}\"\n\tv2s2 := \"{<*>{<*><shown>}}\"\n\tv2s3 := \"{ps2:<*>(\" + ts2Addr + \"){ps1:<*>(\" + v2Addr + \"){ps2:<*>(\" +\n\t\tts2Addr + \")<shown>}}}\"\n\tv2s4 := \"{ps2:<*>(\" + ts2Addr + \"){ps1:<*>(\" + v2Addr + \")<shown>}}\"\n\tv2s5 := \"{ps2:(*\" + v2t2 + \"){ps1:(*\" + v2t + \"){ps2:(*\" + v2t2 +\n\t\t\")<shown>}}}\"\n\tv2s6 := \"{ps2:(*\" + v2t2 + \"){ps1:(*\" + v2t + \")<shown>}}\"\n\tv2s7 := \"{ps2:(*\" + v2t2 + \")(\" + ts2Addr + \"){ps1:(*\" + v2t +\n\t\t\")(\" + v2Addr + \"){ps2:(*\" + v2t2 + \")(\" + ts2Addr +\n\t\t\")<shown>}}}\"\n\tv2s8 := \"{ps2:(*\" + v2t2 + \")(\" + ts2Addr + \"){ps1:(*\" + v2t +\n\t\t\")(\" + v2Addr + \")<shown>}}\"\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s2)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s2)\n\taddFormatterTest(\"%+v\", v2, v2s3)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s4)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s4)\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s5)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s6)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s6)\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s7)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s8)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s8)\n\n\t// Structs that are indirectly circular.\n\tv3 := indirCir1{nil}\n\ttic2 := indirCir2{nil}\n\ttic3 := indirCir3{&v3}\n\ttic2.ps3 = &tic3\n\tv3.ps2 = &tic2\n\tpv3 := &v3\n\ttic2Addr := fmt.Sprintf(\"%p\", &tic2)\n\ttic3Addr := fmt.Sprintf(\"%p\", &tic3)\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"spew_test.indirCir1\"\n\tv3t2 := \"spew_test.indirCir2\"\n\tv3t3 := \"spew_test.indirCir3\"\n\tv3s := \"{<*>{<*>{<*>{<*><shown>}}}}\"\n\tv3s2 := \"{<*>{<*>{<*><shown>}}}\"\n\tv3s3 := \"{ps2:<*>(\" + tic2Addr + \"){ps3:<*>(\" + tic3Addr + \"){ps1:<*>(\" +\n\t\tv3Addr + \"){ps2:<*>(\" + tic2Addr + \")<shown>}}}}\"\n\tv3s4 := \"{ps2:<*>(\" + tic2Addr + \"){ps3:<*>(\" + tic3Addr + \"){ps1:<*>(\" +\n\t\tv3Addr + \")<shown>}}}\"\n\tv3s5 := \"{ps2:(*\" + v3t2 + \"){ps3:(*\" + v3t3 + \"){ps1:(*\" + v3t +\n\t\t\"){ps2:(*\" + v3t2 + \")<shown>}}}}\"\n\tv3s6 := \"{ps2:(*\" + v3t2 + \"){ps3:(*\" + v3t3 + \"){ps1:(*\" + v3t +\n\t\t\")<shown>}}}\"\n\tv3s7 := \"{ps2:(*\" + v3t2 + \")(\" + tic2Addr + \"){ps3:(*\" + v3t3 + \")(\" +\n\t\ttic3Addr + \"){ps1:(*\" + v3t + \")(\" + v3Addr + \"){ps2:(*\" + v3t2 +\n\t\t\")(\" + tic2Addr + \")<shown>}}}}\"\n\tv3s8 := \"{ps2:(*\" + v3t2 + \")(\" + tic2Addr + \"){ps3:(*\" + v3t3 + \")(\" +\n\t\ttic3Addr + \"){ps1:(*\" + v3t + \")(\" + v3Addr + \")<shown>}}}\"\n\taddFormatterTest(\"%v\", v3, v3s)\n\taddFormatterTest(\"%v\", pv3, \"<*>\"+v3s2)\n\taddFormatterTest(\"%v\", &pv3, \"<**>\"+v3s2)\n\taddFormatterTest(\"%+v\", v3, v3s3)\n\taddFormatterTest(\"%+v\", pv3, \"<*>(\"+v3Addr+\")\"+v3s4)\n\taddFormatterTest(\"%+v\", &pv3, \"<**>(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s4)\n\taddFormatterTest(\"%#v\", v3, \"(\"+v3t+\")\"+v3s5)\n\taddFormatterTest(\"%#v\", pv3, \"(*\"+v3t+\")\"+v3s6)\n\taddFormatterTest(\"%#v\", &pv3, \"(**\"+v3t+\")\"+v3s6)\n\taddFormatterTest(\"%#+v\", v3, \"(\"+v3t+\")\"+v3s7)\n\taddFormatterTest(\"%#+v\", pv3, \"(*\"+v3t+\")(\"+v3Addr+\")\"+v3s8)\n\taddFormatterTest(\"%#+v\", &pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s8)\n}\n\nfunc addPanicFormatterTests() {\n\t// Type that panics in its Stringer interface.\n\tv := panicer(127)\n\tnv := (*panicer)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"spew_test.panicer\"\n\tvs := \"(PANIC=test panic)127\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n}\n\nfunc addErrorFormatterTests() {\n\t// Type that has a custom Error interface.\n\tv := customError(127)\n\tnv := (*customError)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"spew_test.customError\"\n\tvs := \"error: 127\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n}\n\nfunc addPassthroughFormatterTests() {\n\t// %x passthrough with uint.\n\tv := uint(4294967295)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%x\", pv)\n\tpvAddr := fmt.Sprintf(\"%x\", &pv)\n\tvs := \"ffffffff\"\n\taddFormatterTest(\"%x\", v, vs)\n\taddFormatterTest(\"%x\", pv, vAddr)\n\taddFormatterTest(\"%x\", &pv, pvAddr)\n\n\t// %#x passthrough with uint.\n\tv2 := int(2147483647)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%#x\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%#x\", &pv2)\n\tv2s := \"0x7fffffff\"\n\taddFormatterTest(\"%#x\", v2, v2s)\n\taddFormatterTest(\"%#x\", pv2, v2Addr)\n\taddFormatterTest(\"%#x\", &pv2, pv2Addr)\n\n\t// %f passthrough with precision.\n\taddFormatterTest(\"%.2f\", 3.1415, \"3.14\")\n\taddFormatterTest(\"%.3f\", 3.1415, \"3.142\")\n\taddFormatterTest(\"%.4f\", 3.1415, \"3.1415\")\n\n\t// %f passthrough with width and precision.\n\taddFormatterTest(\"%5.2f\", 3.1415, \" 3.14\")\n\taddFormatterTest(\"%6.3f\", 3.1415, \" 3.142\")\n\taddFormatterTest(\"%7.4f\", 3.1415, \" 3.1415\")\n\n\t// %d passthrough with width.\n\taddFormatterTest(\"%3d\", 127, \"127\")\n\taddFormatterTest(\"%4d\", 127, \" 127\")\n\taddFormatterTest(\"%5d\", 127, \"  127\")\n\n\t// %q passthrough with string.\n\taddFormatterTest(\"%q\", \"test\", \"\\\"test\\\"\")\n}\n\n// TestFormatter executes all of the tests described by formatterTests.\nfunc TestFormatter(t *testing.T) {\n\t// Setup tests.\n\taddIntFormatterTests()\n\taddUintFormatterTests()\n\taddBoolFormatterTests()\n\taddFloatFormatterTests()\n\taddComplexFormatterTests()\n\taddArrayFormatterTests()\n\taddSliceFormatterTests()\n\taddStringFormatterTests()\n\taddInterfaceFormatterTests()\n\taddMapFormatterTests()\n\taddStructFormatterTests()\n\taddUintptrFormatterTests()\n\taddUnsafePointerFormatterTests()\n\taddChanFormatterTests()\n\taddFuncFormatterTests()\n\taddCircularFormatterTests()\n\taddPanicFormatterTests()\n\taddErrorFormatterTests()\n\taddPassthroughFormatterTests()\n\n\tt.Logf(\"Running %d tests\", len(formatterTests))\n\tfor i, test := range formatterTests {\n\t\tbuf := new(bytes.Buffer)\n\t\tspew.Fprintf(buf, test.format, test.in)\n\t\ts := buf.String()\n\t\tif testFailed(s, test.wants) {\n\t\t\tt.Errorf(\"Formatter #%d format: %s got: %s %s\", i, test.format, s,\n\t\t\t\tstringizeWants(test.wants))\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\ntype testStruct struct {\n\tx int\n}\n\nfunc (ts testStruct) String() string {\n\treturn fmt.Sprintf(\"ts.%d\", ts.x)\n}\n\ntype testStructP struct {\n\tx int\n}\n\nfunc (ts *testStructP) String() string {\n\treturn fmt.Sprintf(\"ts.%d\", ts.x)\n}\n\nfunc TestPrintSortedKeys(t *testing.T) {\n\tcfg := spew.ConfigState{SortKeys: true}\n\ts := cfg.Sprint(map[int]string{1: \"1\", 3: \"3\", 2: \"2\"})\n\texpected := \"map[1:1 2:2 3:3]\"\n\tif s != expected {\n\t\tt.Errorf(\"Sorted keys mismatch 1:\\n  %v %v\", s, expected)\n\t}\n\n\ts = cfg.Sprint(map[stringer]int{\"1\": 1, \"3\": 3, \"2\": 2})\n\texpected = \"map[stringer 1:1 stringer 2:2 stringer 3:3]\"\n\tif s != expected {\n\t\tt.Errorf(\"Sorted keys mismatch 2:\\n  %v %v\", s, expected)\n\t}\n\n\ts = cfg.Sprint(map[pstringer]int{pstringer(\"1\"): 1, pstringer(\"3\"): 3, pstringer(\"2\"): 2})\n\texpected = \"map[stringer 1:1 stringer 2:2 stringer 3:3]\"\n\tif spew.UnsafeDisabled {\n\t\texpected = \"map[1:1 2:2 3:3]\"\n\t}\n\tif s != expected {\n\t\tt.Errorf(\"Sorted keys mismatch 3:\\n  %v %v\", s, expected)\n\t}\n\n\ts = cfg.Sprint(map[testStruct]int{testStruct{1}: 1, testStruct{3}: 3, testStruct{2}: 2})\n\texpected = \"map[ts.1:1 ts.2:2 ts.3:3]\"\n\tif s != expected {\n\t\tt.Errorf(\"Sorted keys mismatch 4:\\n  %v %v\", s, expected)\n\t}\n\n\tif !spew.UnsafeDisabled {\n\t\ts = cfg.Sprint(map[testStructP]int{testStructP{1}: 1, testStructP{3}: 3, testStructP{2}: 2})\n\t\texpected = \"map[ts.1:1 ts.2:2 ts.3:3]\"\n\t\tif s != expected {\n\t\t\tt.Errorf(\"Sorted keys mismatch 5:\\n  %v %v\", s, expected)\n\t\t}\n\t}\n\n\ts = cfg.Sprint(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2})\n\texpected = \"map[error: 1:1 error: 2:2 error: 3:3]\"\n\tif s != expected {\n\t\tt.Errorf(\"Sorted keys mismatch 6:\\n  %v %v\", s, expected)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/internal_test.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\n/*\nThis test file is part of the spew package rather than than the spew_test\npackage because it needs access to internals to properly test certain cases\nwhich are not possible via the public interface since they should never happen.\n*/\n\npackage spew\n\nimport (\n\t\"bytes\"\n\t\"reflect\"\n\t\"testing\"\n)\n\n// dummyFmtState implements a fake fmt.State to use for testing invalid\n// reflect.Value handling.  This is necessary because the fmt package catches\n// invalid values before invoking the formatter on them.\ntype dummyFmtState struct {\n\tbytes.Buffer\n}\n\nfunc (dfs *dummyFmtState) Flag(f int) bool {\n\tif f == int('+') {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (dfs *dummyFmtState) Precision() (int, bool) {\n\treturn 0, false\n}\n\nfunc (dfs *dummyFmtState) Width() (int, bool) {\n\treturn 0, false\n}\n\n// TestInvalidReflectValue ensures the dump and formatter code handles an\n// invalid reflect value properly.  This needs access to internal state since it\n// should never happen in real code and therefore can't be tested via the public\n// API.\nfunc TestInvalidReflectValue(t *testing.T) {\n\ti := 1\n\n\t// Dump invalid reflect value.\n\tv := new(reflect.Value)\n\tbuf := new(bytes.Buffer)\n\td := dumpState{w: buf, cs: &Config}\n\td.dump(*v)\n\ts := buf.String()\n\twant := \"<invalid>\"\n\tif s != want {\n\t\tt.Errorf(\"InvalidReflectValue #%d\\n got: %s want: %s\", i, s, want)\n\t}\n\ti++\n\n\t// Formatter invalid reflect value.\n\tbuf2 := new(dummyFmtState)\n\tf := formatState{value: *v, cs: &Config, fs: buf2}\n\tf.format(*v)\n\ts = buf2.String()\n\twant = \"<invalid>\"\n\tif s != want {\n\t\tt.Errorf(\"InvalidReflectValue #%d got: %s want: %s\", i, s, want)\n\t}\n}\n\n// SortValues makes the internal sortValues function available to the test\n// package.\nfunc SortValues(values []reflect.Value, cs *ConfigState) {\n\tsortValues(values, cs)\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go",
    "content": "// Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n\n// Permission to use, copy, modify, and distribute this software for any\n// purpose with or without fee is hereby granted, provided that the above\n// copyright notice and this permission notice appear in all copies.\n\n// THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n// NOTE: Due to the following build constraints, this file will only be compiled\n// when the code is not running on Google App Engine, compiled by GopherJS, and\n// \"-tags safe\" is not added to the go build command line.  The \"disableunsafe\"\n// tag is deprecated and thus should not be used.\n// +build !js,!appengine,!safe,!disableunsafe\n\n/*\nThis test file is part of the spew package rather than than the spew_test\npackage because it needs access to internals to properly test certain cases\nwhich are not possible via the public interface since they should never happen.\n*/\n\npackage spew\n\nimport (\n\t\"bytes\"\n\t\"reflect\"\n\t\"testing\"\n\t\"unsafe\"\n)\n\n// changeKind uses unsafe to intentionally change the kind of a reflect.Value to\n// the maximum kind value which does not exist.  This is needed to test the\n// fallback code which punts to the standard fmt library for new types that\n// might get added to the language.\nfunc changeKind(v *reflect.Value, readOnly bool) {\n\trvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag))\n\t*rvf = *rvf | ((1<<flagKindWidth - 1) << flagKindShift)\n\tif readOnly {\n\t\t*rvf |= flagRO\n\t} else {\n\t\t*rvf &= ^uintptr(flagRO)\n\t}\n}\n\n// TestAddedReflectValue tests functionaly of the dump and formatter code which\n// falls back to the standard fmt library for new types that might get added to\n// the language.\nfunc TestAddedReflectValue(t *testing.T) {\n\ti := 1\n\n\t// Dump using a reflect.Value that is exported.\n\tv := reflect.ValueOf(int8(5))\n\tchangeKind(&v, false)\n\tbuf := new(bytes.Buffer)\n\td := dumpState{w: buf, cs: &Config}\n\td.dump(v)\n\ts := buf.String()\n\twant := \"(int8) 5\"\n\tif s != want {\n\t\tt.Errorf(\"TestAddedReflectValue #%d\\n got: %s want: %s\", i, s, want)\n\t}\n\ti++\n\n\t// Dump using a reflect.Value that is not exported.\n\tchangeKind(&v, true)\n\tbuf.Reset()\n\td.dump(v)\n\ts = buf.String()\n\twant = \"(int8) <int8 Value>\"\n\tif s != want {\n\t\tt.Errorf(\"TestAddedReflectValue #%d\\n got: %s want: %s\", i, s, want)\n\t}\n\ti++\n\n\t// Formatter using a reflect.Value that is exported.\n\tchangeKind(&v, false)\n\tbuf2 := new(dummyFmtState)\n\tf := formatState{value: v, cs: &Config, fs: buf2}\n\tf.format(v)\n\ts = buf2.String()\n\twant = \"5\"\n\tif s != want {\n\t\tt.Errorf(\"TestAddedReflectValue #%d got: %s want: %s\", i, s, want)\n\t}\n\ti++\n\n\t// Formatter using a reflect.Value that is not exported.\n\tchangeKind(&v, true)\n\tbuf2.Reset()\n\tf = formatState{value: v, cs: &Config, fs: buf2}\n\tf.format(v)\n\ts = buf2.String()\n\twant = \"<int8 Value>\"\n\tif s != want {\n\t\tt.Errorf(\"TestAddedReflectValue #%d got: %s want: %s\", i, s, want)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/spew.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\npackage spew\n\nimport (\n\t\"fmt\"\n\t\"io\"\n)\n\n// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were\n// passed with a default Formatter interface returned by NewFormatter.  It\n// returns the formatted string as a value that satisfies error.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Errorf(format string, a ...interface{}) (err error) {\n\treturn fmt.Errorf(format, convertArgs(a)...)\n}\n\n// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were\n// passed with a default Formatter interface returned by NewFormatter.  It\n// returns the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Fprint(w io.Writer, a ...interface{}) (n int, err error) {\n\treturn fmt.Fprint(w, convertArgs(a)...)\n}\n\n// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were\n// passed with a default Formatter interface returned by NewFormatter.  It\n// returns the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {\n\treturn fmt.Fprintf(w, format, convertArgs(a)...)\n}\n\n// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it\n// passed with a default Formatter interface returned by NewFormatter.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Fprintln(w io.Writer, a ...interface{}) (n int, err error) {\n\treturn fmt.Fprintln(w, convertArgs(a)...)\n}\n\n// Print is a wrapper for fmt.Print that treats each argument as if it were\n// passed with a default Formatter interface returned by NewFormatter.  It\n// returns the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Print(a ...interface{}) (n int, err error) {\n\treturn fmt.Print(convertArgs(a)...)\n}\n\n// Printf is a wrapper for fmt.Printf that treats each argument as if it were\n// passed with a default Formatter interface returned by NewFormatter.  It\n// returns the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Printf(format string, a ...interface{}) (n int, err error) {\n\treturn fmt.Printf(format, convertArgs(a)...)\n}\n\n// Println is a wrapper for fmt.Println that treats each argument as if it were\n// passed with a default Formatter interface returned by NewFormatter.  It\n// returns the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Println(a ...interface{}) (n int, err error) {\n\treturn fmt.Println(convertArgs(a)...)\n}\n\n// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were\n// passed with a default Formatter interface returned by NewFormatter.  It\n// returns the resulting string.  See NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Sprint(a ...interface{}) string {\n\treturn fmt.Sprint(convertArgs(a)...)\n}\n\n// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were\n// passed with a default Formatter interface returned by NewFormatter.  It\n// returns the resulting string.  See NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Sprintf(format string, a ...interface{}) string {\n\treturn fmt.Sprintf(format, convertArgs(a)...)\n}\n\n// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it\n// were passed with a default Formatter interface returned by NewFormatter.  It\n// returns the resulting string.  See NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Sprintln(a ...interface{}) string {\n\treturn fmt.Sprintln(convertArgs(a)...)\n}\n\n// convertArgs accepts a slice of arguments and returns a slice of the same\n// length with each argument converted to a default spew Formatter interface.\nfunc convertArgs(args []interface{}) (formatters []interface{}) {\n\tformatters = make([]interface{}, len(args))\n\tfor index, arg := range args {\n\t\tformatters[index] = NewFormatter(arg)\n\t}\n\treturn formatters\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/spew_test.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\npackage spew_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n)\n\n// spewFunc is used to identify which public function of the spew package or\n// ConfigState a test applies to.\ntype spewFunc int\n\nconst (\n\tfCSFdump spewFunc = iota\n\tfCSFprint\n\tfCSFprintf\n\tfCSFprintln\n\tfCSPrint\n\tfCSPrintln\n\tfCSSdump\n\tfCSSprint\n\tfCSSprintf\n\tfCSSprintln\n\tfCSErrorf\n\tfCSNewFormatter\n\tfErrorf\n\tfFprint\n\tfFprintln\n\tfPrint\n\tfPrintln\n\tfSdump\n\tfSprint\n\tfSprintf\n\tfSprintln\n)\n\n// Map of spewFunc values to names for pretty printing.\nvar spewFuncStrings = map[spewFunc]string{\n\tfCSFdump:        \"ConfigState.Fdump\",\n\tfCSFprint:       \"ConfigState.Fprint\",\n\tfCSFprintf:      \"ConfigState.Fprintf\",\n\tfCSFprintln:     \"ConfigState.Fprintln\",\n\tfCSSdump:        \"ConfigState.Sdump\",\n\tfCSPrint:        \"ConfigState.Print\",\n\tfCSPrintln:      \"ConfigState.Println\",\n\tfCSSprint:       \"ConfigState.Sprint\",\n\tfCSSprintf:      \"ConfigState.Sprintf\",\n\tfCSSprintln:     \"ConfigState.Sprintln\",\n\tfCSErrorf:       \"ConfigState.Errorf\",\n\tfCSNewFormatter: \"ConfigState.NewFormatter\",\n\tfErrorf:         \"spew.Errorf\",\n\tfFprint:         \"spew.Fprint\",\n\tfFprintln:       \"spew.Fprintln\",\n\tfPrint:          \"spew.Print\",\n\tfPrintln:        \"spew.Println\",\n\tfSdump:          \"spew.Sdump\",\n\tfSprint:         \"spew.Sprint\",\n\tfSprintf:        \"spew.Sprintf\",\n\tfSprintln:       \"spew.Sprintln\",\n}\n\nfunc (f spewFunc) String() string {\n\tif s, ok := spewFuncStrings[f]; ok {\n\t\treturn s\n\t}\n\treturn fmt.Sprintf(\"Unknown spewFunc (%d)\", int(f))\n}\n\n// spewTest is used to describe a test to be performed against the public\n// functions of the spew package or ConfigState.\ntype spewTest struct {\n\tcs     *spew.ConfigState\n\tf      spewFunc\n\tformat string\n\tin     interface{}\n\twant   string\n}\n\n// spewTests houses the tests to be performed against the public functions of\n// the spew package and ConfigState.\n//\n// These tests are only intended to ensure the public functions are exercised\n// and are intentionally not exhaustive of types.  The exhaustive type\n// tests are handled in the dump and format tests.\nvar spewTests []spewTest\n\n// redirStdout is a helper function to return the standard output from f as a\n// byte slice.\nfunc redirStdout(f func()) ([]byte, error) {\n\ttempFile, err := ioutil.TempFile(\"\", \"ss-test\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfileName := tempFile.Name()\n\tdefer os.Remove(fileName) // Ignore error\n\n\torigStdout := os.Stdout\n\tos.Stdout = tempFile\n\tf()\n\tos.Stdout = origStdout\n\ttempFile.Close()\n\n\treturn ioutil.ReadFile(fileName)\n}\n\nfunc initSpewTests() {\n\t// Config states with various settings.\n\tscsDefault := spew.NewDefaultConfig()\n\tscsNoMethods := &spew.ConfigState{Indent: \" \", DisableMethods: true}\n\tscsNoPmethods := &spew.ConfigState{Indent: \" \", DisablePointerMethods: true}\n\tscsMaxDepth := &spew.ConfigState{Indent: \" \", MaxDepth: 1}\n\tscsContinue := &spew.ConfigState{Indent: \" \", ContinueOnMethod: true}\n\tscsNoPtrAddr := &spew.ConfigState{DisablePointerAddresses: true}\n\tscsNoCap := &spew.ConfigState{DisableCapacities: true}\n\n\t// Variables for tests on types which implement Stringer interface with and\n\t// without a pointer receiver.\n\tts := stringer(\"test\")\n\ttps := pstringer(\"test\")\n\n\ttype ptrTester struct {\n\t\ts *struct{}\n\t}\n\ttptr := &ptrTester{s: &struct{}{}}\n\n\t// depthTester is used to test max depth handling for structs, array, slices\n\t// and maps.\n\ttype depthTester struct {\n\t\tic    indirCir1\n\t\tarr   [1]string\n\t\tslice []string\n\t\tm     map[string]int\n\t}\n\tdt := depthTester{indirCir1{nil}, [1]string{\"arr\"}, []string{\"slice\"},\n\t\tmap[string]int{\"one\": 1}}\n\n\t// Variable for tests on types which implement error interface.\n\tte := customError(10)\n\n\tspewTests = []spewTest{\n\t\t{scsDefault, fCSFdump, \"\", int8(127), \"(int8) 127\\n\"},\n\t\t{scsDefault, fCSFprint, \"\", int16(32767), \"32767\"},\n\t\t{scsDefault, fCSFprintf, \"%v\", int32(2147483647), \"2147483647\"},\n\t\t{scsDefault, fCSFprintln, \"\", int(2147483647), \"2147483647\\n\"},\n\t\t{scsDefault, fCSPrint, \"\", int64(9223372036854775807), \"9223372036854775807\"},\n\t\t{scsDefault, fCSPrintln, \"\", uint8(255), \"255\\n\"},\n\t\t{scsDefault, fCSSdump, \"\", uint8(64), \"(uint8) 64\\n\"},\n\t\t{scsDefault, fCSSprint, \"\", complex(1, 2), \"(1+2i)\"},\n\t\t{scsDefault, fCSSprintf, \"%v\", complex(float32(3), 4), \"(3+4i)\"},\n\t\t{scsDefault, fCSSprintln, \"\", complex(float64(5), 6), \"(5+6i)\\n\"},\n\t\t{scsDefault, fCSErrorf, \"%#v\", uint16(65535), \"(uint16)65535\"},\n\t\t{scsDefault, fCSNewFormatter, \"%v\", uint32(4294967295), \"4294967295\"},\n\t\t{scsDefault, fErrorf, \"%v\", uint64(18446744073709551615), \"18446744073709551615\"},\n\t\t{scsDefault, fFprint, \"\", float32(3.14), \"3.14\"},\n\t\t{scsDefault, fFprintln, \"\", float64(6.28), \"6.28\\n\"},\n\t\t{scsDefault, fPrint, \"\", true, \"true\"},\n\t\t{scsDefault, fPrintln, \"\", false, \"false\\n\"},\n\t\t{scsDefault, fSdump, \"\", complex(-10, -20), \"(complex128) (-10-20i)\\n\"},\n\t\t{scsDefault, fSprint, \"\", complex(-1, -2), \"(-1-2i)\"},\n\t\t{scsDefault, fSprintf, \"%v\", complex(float32(-3), -4), \"(-3-4i)\"},\n\t\t{scsDefault, fSprintln, \"\", complex(float64(-5), -6), \"(-5-6i)\\n\"},\n\t\t{scsNoMethods, fCSFprint, \"\", ts, \"test\"},\n\t\t{scsNoMethods, fCSFprint, \"\", &ts, \"<*>test\"},\n\t\t{scsNoMethods, fCSFprint, \"\", tps, \"test\"},\n\t\t{scsNoMethods, fCSFprint, \"\", &tps, \"<*>test\"},\n\t\t{scsNoPmethods, fCSFprint, \"\", ts, \"stringer test\"},\n\t\t{scsNoPmethods, fCSFprint, \"\", &ts, \"<*>stringer test\"},\n\t\t{scsNoPmethods, fCSFprint, \"\", tps, \"test\"},\n\t\t{scsNoPmethods, fCSFprint, \"\", &tps, \"<*>stringer test\"},\n\t\t{scsMaxDepth, fCSFprint, \"\", dt, \"{{<max>} [<max>] [<max>] map[<max>]}\"},\n\t\t{scsMaxDepth, fCSFdump, \"\", dt, \"(spew_test.depthTester) {\\n\" +\n\t\t\t\" ic: (spew_test.indirCir1) {\\n  <max depth reached>\\n },\\n\" +\n\t\t\t\" arr: ([1]string) (len=1 cap=1) {\\n  <max depth reached>\\n },\\n\" +\n\t\t\t\" slice: ([]string) (len=1 cap=1) {\\n  <max depth reached>\\n },\\n\" +\n\t\t\t\" m: (map[string]int) (len=1) {\\n  <max depth reached>\\n }\\n}\\n\"},\n\t\t{scsContinue, fCSFprint, \"\", ts, \"(stringer test) test\"},\n\t\t{scsContinue, fCSFdump, \"\", ts, \"(spew_test.stringer) \" +\n\t\t\t\"(len=4) (stringer test) \\\"test\\\"\\n\"},\n\t\t{scsContinue, fCSFprint, \"\", te, \"(error: 10) 10\"},\n\t\t{scsContinue, fCSFdump, \"\", te, \"(spew_test.customError) \" +\n\t\t\t\"(error: 10) 10\\n\"},\n\t\t{scsNoPtrAddr, fCSFprint, \"\", tptr, \"<*>{<*>{}}\"},\n\t\t{scsNoPtrAddr, fCSSdump, \"\", tptr, \"(*spew_test.ptrTester)({\\ns: (*struct {})({\\n})\\n})\\n\"},\n\t\t{scsNoCap, fCSSdump, \"\", make([]string, 0, 10), \"([]string) {\\n}\\n\"},\n\t\t{scsNoCap, fCSSdump, \"\", make([]string, 1, 10), \"([]string) (len=1) {\\n(string) \\\"\\\"\\n}\\n\"},\n\t}\n}\n\n// TestSpew executes all of the tests described by spewTests.\nfunc TestSpew(t *testing.T) {\n\tinitSpewTests()\n\n\tt.Logf(\"Running %d tests\", len(spewTests))\n\tfor i, test := range spewTests {\n\t\tbuf := new(bytes.Buffer)\n\t\tswitch test.f {\n\t\tcase fCSFdump:\n\t\t\ttest.cs.Fdump(buf, test.in)\n\n\t\tcase fCSFprint:\n\t\t\ttest.cs.Fprint(buf, test.in)\n\n\t\tcase fCSFprintf:\n\t\t\ttest.cs.Fprintf(buf, test.format, test.in)\n\n\t\tcase fCSFprintln:\n\t\t\ttest.cs.Fprintln(buf, test.in)\n\n\t\tcase fCSPrint:\n\t\t\tb, err := redirStdout(func() { test.cs.Print(test.in) })\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"%v #%d %v\", test.f, i, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbuf.Write(b)\n\n\t\tcase fCSPrintln:\n\t\t\tb, err := redirStdout(func() { test.cs.Println(test.in) })\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"%v #%d %v\", test.f, i, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbuf.Write(b)\n\n\t\tcase fCSSdump:\n\t\t\tstr := test.cs.Sdump(test.in)\n\t\t\tbuf.WriteString(str)\n\n\t\tcase fCSSprint:\n\t\t\tstr := test.cs.Sprint(test.in)\n\t\t\tbuf.WriteString(str)\n\n\t\tcase fCSSprintf:\n\t\t\tstr := test.cs.Sprintf(test.format, test.in)\n\t\t\tbuf.WriteString(str)\n\n\t\tcase fCSSprintln:\n\t\t\tstr := test.cs.Sprintln(test.in)\n\t\t\tbuf.WriteString(str)\n\n\t\tcase fCSErrorf:\n\t\t\terr := test.cs.Errorf(test.format, test.in)\n\t\t\tbuf.WriteString(err.Error())\n\n\t\tcase fCSNewFormatter:\n\t\t\tfmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in))\n\n\t\tcase fErrorf:\n\t\t\terr := spew.Errorf(test.format, test.in)\n\t\t\tbuf.WriteString(err.Error())\n\n\t\tcase fFprint:\n\t\t\tspew.Fprint(buf, test.in)\n\n\t\tcase fFprintln:\n\t\t\tspew.Fprintln(buf, test.in)\n\n\t\tcase fPrint:\n\t\t\tb, err := redirStdout(func() { spew.Print(test.in) })\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"%v #%d %v\", test.f, i, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbuf.Write(b)\n\n\t\tcase fPrintln:\n\t\t\tb, err := redirStdout(func() { spew.Println(test.in) })\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"%v #%d %v\", test.f, i, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbuf.Write(b)\n\n\t\tcase fSdump:\n\t\t\tstr := spew.Sdump(test.in)\n\t\t\tbuf.WriteString(str)\n\n\t\tcase fSprint:\n\t\t\tstr := spew.Sprint(test.in)\n\t\t\tbuf.WriteString(str)\n\n\t\tcase fSprintf:\n\t\t\tstr := spew.Sprintf(test.format, test.in)\n\t\t\tbuf.WriteString(str)\n\n\t\tcase fSprintln:\n\t\t\tstr := spew.Sprintln(test.in)\n\t\t\tbuf.WriteString(str)\n\n\t\tdefault:\n\t\t\tt.Errorf(\"%v #%d unrecognized function\", test.f, i)\n\t\t\tcontinue\n\t\t}\n\t\ts := buf.String()\n\t\tif test.want != s {\n\t\t\tt.Errorf(\"ConfigState #%d\\n got: %s want: %s\", i, s, test.want)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go",
    "content": "// Copyright (c) 2013 Dave Collins <dave@davec.name>\n//\n// Permission to use, copy, modify, and distribute this software for any\n// purpose with or without fee is hereby granted, provided that the above\n// copyright notice and this permission notice appear in all copies.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n// NOTE: Due to the following build constraints, this file will only be compiled\n// when both cgo is supported and \"-tags testcgo\" is added to the go test\n// command line.  This code should really only be in the dumpcgo_test.go file,\n// but unfortunately Go will not allow cgo in test files, so this is a\n// workaround to allow cgo types to be tested.  This configuration is used\n// because spew itself does not require cgo to run even though it does handle\n// certain cgo types specially.  Rather than forcing all clients to require cgo\n// and an external C compiler just to run the tests, this scheme makes them\n// optional.\n// +build cgo,testcgo\n\npackage testdata\n\n/*\n#include <stdint.h>\ntypedef unsigned char custom_uchar_t;\n\nchar            *ncp = 0;\nchar            *cp = \"test\";\nchar             ca[6] = {'t', 'e', 's', 't', '2', '\\0'};\nunsigned char    uca[6] = {'t', 'e', 's', 't', '3', '\\0'};\nsigned char      sca[6] = {'t', 'e', 's', 't', '4', '\\0'};\nuint8_t          ui8ta[6] = {'t', 'e', 's', 't', '5', '\\0'};\ncustom_uchar_t   tuca[6] = {'t', 'e', 's', 't', '6', '\\0'};\n*/\nimport \"C\"\n\n// GetCgoNullCharPointer returns a null char pointer via cgo.  This is only\n// used for tests.\nfunc GetCgoNullCharPointer() interface{} {\n\treturn C.ncp\n}\n\n// GetCgoCharPointer returns a char pointer via cgo.  This is only used for\n// tests.\nfunc GetCgoCharPointer() interface{} {\n\treturn C.cp\n}\n\n// GetCgoCharArray returns a char array via cgo and the array's len and cap.\n// This is only used for tests.\nfunc GetCgoCharArray() (interface{}, int, int) {\n\treturn C.ca, len(C.ca), cap(C.ca)\n}\n\n// GetCgoUnsignedCharArray returns an unsigned char array via cgo and the\n// array's len and cap.  This is only used for tests.\nfunc GetCgoUnsignedCharArray() (interface{}, int, int) {\n\treturn C.uca, len(C.uca), cap(C.uca)\n}\n\n// GetCgoSignedCharArray returns a signed char array via cgo and the array's len\n// and cap.  This is only used for tests.\nfunc GetCgoSignedCharArray() (interface{}, int, int) {\n\treturn C.sca, len(C.sca), cap(C.sca)\n}\n\n// GetCgoUint8tArray returns a uint8_t array via cgo and the array's len and\n// cap.  This is only used for tests.\nfunc GetCgoUint8tArray() (interface{}, int, int) {\n\treturn C.ui8ta, len(C.ui8ta), cap(C.ui8ta)\n}\n\n// GetCgoTypdefedUnsignedCharArray returns a typedefed unsigned char array via\n// cgo and the array's len and cap.  This is only used for tests.\nfunc GetCgoTypdefedUnsignedCharArray() (interface{}, int, int) {\n\treturn C.tuca, len(C.tuca), cap(C.tuca)\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/test_coverage.txt",
    "content": "\ngithub.com/davecgh/go-spew/spew/dump.go\t\t dumpState.dump\t\t\t 100.00% (88/88)\ngithub.com/davecgh/go-spew/spew/format.go\t formatState.format\t\t 100.00% (82/82)\ngithub.com/davecgh/go-spew/spew/format.go\t formatState.formatPtr\t\t 100.00% (52/52)\ngithub.com/davecgh/go-spew/spew/dump.go\t\t dumpState.dumpPtr\t\t 100.00% (44/44)\ngithub.com/davecgh/go-spew/spew/dump.go\t\t dumpState.dumpSlice\t\t 100.00% (39/39)\ngithub.com/davecgh/go-spew/spew/common.go\t handleMethods\t\t\t 100.00% (30/30)\ngithub.com/davecgh/go-spew/spew/common.go\t printHexPtr\t\t\t 100.00% (18/18)\ngithub.com/davecgh/go-spew/spew/common.go\t unsafeReflectValue\t\t 100.00% (13/13)\ngithub.com/davecgh/go-spew/spew/format.go\t formatState.constructOrigFormat 100.00% (12/12)\ngithub.com/davecgh/go-spew/spew/dump.go\t\t fdump\t\t\t\t 100.00% (11/11)\ngithub.com/davecgh/go-spew/spew/format.go\t formatState.Format\t\t 100.00% (11/11)\ngithub.com/davecgh/go-spew/spew/common.go\t init\t\t\t\t 100.00% (10/10)\ngithub.com/davecgh/go-spew/spew/common.go\t printComplex\t\t\t 100.00% (9/9)\ngithub.com/davecgh/go-spew/spew/common.go\t valuesSorter.Less\t\t 100.00% (8/8)\ngithub.com/davecgh/go-spew/spew/format.go\t formatState.buildDefaultFormat\t 100.00% (7/7)\ngithub.com/davecgh/go-spew/spew/format.go\t formatState.unpackValue\t 100.00% (5/5)\ngithub.com/davecgh/go-spew/spew/dump.go\t\t dumpState.indent\t\t 100.00% (4/4)\ngithub.com/davecgh/go-spew/spew/common.go\t catchPanic\t\t\t 100.00% (4/4)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.convertArgs\t 100.00% (4/4)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t convertArgs\t\t\t 100.00% (4/4)\ngithub.com/davecgh/go-spew/spew/format.go\t newFormatter\t\t\t 100.00% (3/3)\ngithub.com/davecgh/go-spew/spew/dump.go\t\t Sdump\t\t\t\t 100.00% (3/3)\ngithub.com/davecgh/go-spew/spew/common.go\t printBool\t\t\t 100.00% (3/3)\ngithub.com/davecgh/go-spew/spew/common.go\t sortValues\t\t\t 100.00% (3/3)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Sdump\t\t 100.00% (3/3)\ngithub.com/davecgh/go-spew/spew/dump.go\t\t dumpState.unpackValue\t\t 100.00% (3/3)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Printf\t\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Println\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Sprint\t\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Sprintf\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Sprintln\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/common.go\t printFloat\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t NewDefaultConfig\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/common.go\t printInt\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/common.go\t printUint\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/common.go\t valuesSorter.Len\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/common.go\t valuesSorter.Swap\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Errorf\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Fprint\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Fprintf\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Fprintln\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Print\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Printf\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Println\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Sprint\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Sprintf\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Sprintln\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.NewFormatter\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Fdump\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Dump\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/dump.go\t\t Fdump\t\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/dump.go\t\t Dump\t\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Fprintln\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/format.go\t NewFormatter\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Errorf\t\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Fprint\t\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Fprintf\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Print\t\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew\t\t\t ------------------------------- 100.00% (505/505)\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/.dockerignore",
    "content": "build\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/.github/ISSUE_TEMPLATE.md",
    "content": "### Directions\n_GitHub Issues are reserved for actionable bug reports and feature requests._\n_General questions should be sent to the [InfluxDB Community Site](https://community.influxdata.com)._\n\n_Before opening an issue, search for similar bug reports or feature requests on GitHub Issues._\n_If no similar issue can be found, fill out either the \"Bug Report\" or the \"Feature Request\" section below.\n_Erase the other section and everything on and above this line._\n\n### Bug report\n\n__System info:__ [Include InfluxDB version, operating system name, and other relevant details]\n\n__Steps to reproduce:__\n\n1. [First Step]\n2. [Second Step]\n3. [and so on...]\n\n__Expected behavior:__ [What you expected to happen]\n\n__Actual behavior:__ [What actually happened]\n\n__Additional info:__ [Include gist of relevant config, logs, etc.]\n\nAlso, if this is an issue of for performance, locking, etc the following commands are useful to create debug information for the team.\n\n```\ncurl -o profiles.tar.gz \"http://localhost:8086/debug/pprof/all?cpu=true\"\n\ncurl -o vars.txt \"http://localhost:8086/debug/vars\"\niostat -xd 1 30 > iostat.txt\n```\n\n**Please note** It will take at least 30 seconds for the first cURL command above to return a response.\nThis is because it will run a CPU profile as part of its information gathering, which takes 30 seconds to collect.\nIdeally you should run these commands when you're experiencing problems, so we can capture the state of the system at that time.\n\nIf you're concerned about running a CPU profile (which only has a small, temporary impact on performance), then you can set `?cpu=false` or omit `?cpu=true` altogether.\n\nPlease run those if possible and link them from a [gist](http://gist.github.com) or simply attach them as a comment to the issue.\n\n*Please note, the quickest way to fix a bug is to open a Pull Request.*\n\n\n### Feature Request\n\nOpening a feature request kicks off a discussion.\nRequests may be closed if we're not actively planning to work on them.\n\n__Proposal:__ [Description of the feature]\n\n__Current behavior:__ [What currently happens]\n\n__Desired behavior:__ [What you would like to happen]\n\n__Use case:__ [Why is this important (helps with prioritizing requests)]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/.github/PULL_REQUEST_TEMPLATE.md",
    "content": "###### Required for all non-trivial PRs\n- [ ] Rebased/mergable\n- [ ] Tests pass\n- [ ] CHANGELOG.md updated\n- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed)\n\n###### Required only if applicable\n_You can erase any checkboxes below this note if they are not applicable to your Pull Request._\n- [ ] [InfluxQL Spec](https://github.com/influxdata/influxdb/blob/master/influxql/README.md) updated\n- [ ] Provide example syntax\n- [ ] Update man page when modifying a command\n- [ ] Config changes: update sample config (`etc/config.sample.toml`), server `NewDemoConfig` method, and `Diagnostics` methods reporting config settings, if necessary\n- [ ] [InfluxData Documentation](https://github.com/influxdata/docs.influxdata.com): issue filed or pull request submitted \\<link to issue or pull request\\>\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/.gitignore",
    "content": "# Keep editor-specific, non-project specific ignore rules in global .gitignore:\n# https://help.github.com/articles/ignoring-files/#create-a-global-gitignore\n\n*~\nsrc/\n\nconfig.json\n/bin/\n\n/query/a.out*\n\n# ignore generated files.\ncmd/influxd/version.go\n\n# executables\n\n*.test\n\ninflux_tsm\n**/influx_tsm\n!**/influx_tsm/\n\ninflux_stress\n**/influx_stress\n!**/influx_stress/\n\ninfluxd\n**/influxd\n!**/influxd/\n\ninflux\n**/influx\n!**/influx/\n\ninfluxdb\n**/influxdb\n!**/influxdb/\n\ninflux_inspect\n**/influx_inspect\n!**/influx_inspect/\n\n/benchmark-tool\n/main\n/benchmark-storage\ngodef\ngosym\ngocode\ninspect-raft\n\n# dependencies\nout_rpm/\npackages/\n\n# autconf\nautom4te.cache/\nconfig.log\nconfig.status\n\n# log file\ninfluxdb.log\nbenchmark.log\n\n# config file\nconfig.toml\n\n# test data files\nintegration/migration_data/\n\n# man outputs\nman/*.xml\nman/*.1\nman/*.1.gz\n\n# test outputs\n/test-results.xml\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/.hooks/pre-commit",
    "content": "#!/usr/bin/env bash\n\nfmtcount=`git ls-files | grep '.go$' | xargs gofmt -l 2>&1 | wc -l`\nif [ $fmtcount -gt 0 ]; then\n    echo \"Some files aren't formatted, please run 'go fmt ./...' to format your source code before committing\"\n    exit 1\nfi\n\nvetcount=`go tool vet ./ 2>&1  | wc -l`\nif [ $vetcount -gt 0 ]; then\n    echo \"Some files aren't passing vet heuristics, please run 'go vet ./...' to see the errors it flags and correct your source code before committing\"\n    exit 1\nfi\nexit 0\n\n# Ensure FIXME lines are removed before commit.\nfixme_lines=$(git diff --cached | grep ^+ | grep -v pre-commit | grep FIXME | sed 's_^+\\s*__g')\nif [ \"$fixme_lines\" != \"\" ]; then\n    echo \"Please remove the following lines:\"\n    echo -e \"$fixme_lines\"\n    exit 1\nfi\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/.mention-bot",
    "content": "{\n  \"maxReviewers\": 3,\n  \"fileBlacklist\": [\"CHANGELOG.md\"],\n  \"userBlacklist\": [\"pauldix\", \"toddboom\", \"aviau\", \"mark-rushakoff\"],\n  \"requiredOrgs\": [\"influxdata\"]\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/CHANGELOG.md",
    "content": "## v1.3.6 [unreleased]\n\n### Bugfixes\n\n- [#8770](https://github.com/influxdata/influxdb/pull/8770): Reduce how long it takes to walk the varrefs in an expression.\n- [#8787](https://github.com/influxdata/influxdb/issues/8787): panic: runtime error: invalid memory address or nil pointer dereference.\n- [#8741](https://github.com/influxdata/influxdb/issues/8741): Fix increased memory usage in cache and wal readers\n- [#8848](https://github.com/influxdata/influxdb/issues/8848): Prevent deadlock when doing math on the result of a subquery.\n- [#8842](https://github.com/influxdata/influxdb/issues/8842): Fix several races in the shard and engine.\n- [#8887](https://github.com/influxdata/influxdb/pull/8887): Fix race on cache entry.\n\n## v1.3.5 [2017-08-29]\n\n### Bugfixes\n\n- [#8755](https://github.com/influxdata/influxdb/pull/8755): Fix race condition accessing `seriesByID` map.\n- [#8766](https://github.com/influxdata/influxdb/pull/8766): Fix deadlock when calling `SeriesIDsAllOrByExpr`\n\n## v1.3.4 [2017-08-23]\n\n### Bugfixes\n\n- [#8601](https://github.com/influxdata/influxdb/pull/8601): Fixed time boundaries for continuous queries with time zones.\n- [#8607](https://github.com/influxdata/influxdb/issues/8607): Fix time zone shifts when the shift happens on a time zone boundary.\n- [#8639](https://github.com/influxdata/influxdb/issues/8639): Parse time literals using the time zone in the select statement.\n- [#8701](https://github.com/influxdata/influxdb/pull/8701): Fix drop measurement not dropping all data\n- [#8677](https://github.com/influxdata/influxdb/issues/8677): Fix backups when snapshot is empty.\n- [#8706](https://github.com/influxdata/influxdb/pull/8706): Cursor leak, resulting in an accumulation of `.tsm.tmp` files after compactions.\n- [#8713](https://github.com/influxdata/influxdb/issues/8713): Deadlock when dropping measurement and writing\n- [#8716](https://github.com/influxdata/influxdb/pull/8716): Ensure inputs are closed on error. Add runtime GC finalizer as additional guard to close iterators\n- [#8726](https://github.com/influxdata/influxdb/pull/8726): Fix leaking tmp file when large compaction aborted\n\n### Features\n\n- [#8711](https://github.com/influxdata/influxdb/pull/8711): Batch up writes for monitor service\n\n## v1.3.3 [2017-08-10]\n\n### Bugfixes\n\n- [#8681](https://github.com/influxdata/influxdb/pull/8681): Resolves a memory leak when NewReaderIterator creates a nilFloatIterator, the reader is not closed\n\n## v1.3.2 [2017-08-04]\n\n### Bugfixes\n\n- [#8629](https://github.com/influxdata/influxdb/pull/8629): Interrupt in progress TSM compactions\n- [#8630](https://github.com/influxdata/influxdb/pull/8630): Prevent excessive memory usage when dropping series\n- [#8640](https://github.com/influxdata/influxdb/issues/8640): Significantly improve performance of SHOW TAG VALUES.\n\n## v1.3.1 [2017-07-20]\n\n### Bugfixes\n\n- [#8559](https://github.com/influxdata/influxdb/issues/8559): Ensure temporary TSM files get cleaned up when compaction aborted.\n- [#8500](https://github.com/influxdata/influxdb/issues/8500): InfluxDB goes unresponsive\n- [#8531](https://github.com/influxdata/influxdb/issues/8531): Duplicate points generated via INSERT after DELETE\n- [#8569](https://github.com/influxdata/influxdb/issues/8569): Fix the cq start and end times to use unix timestamps.\n\n## v1.3.0 [2017-06-21]\n\n### Removals\n\nThe admin UI is removed and unusable in this release. The `[admin]` configuration section will be ignored.\n\n### Configuration Changes\n\n* The top-level config `bind-address` now defaults to `localhost:8088`.\n  The previous default was just `:8088`, causing the backup and restore port to be bound on all available interfaces (i.e. including interfaces on the public internet).\n\n### Features\n\n- [#8143](https://github.com/influxdata/influxdb/pull/8143): Add WAL sync delay\n- [#7977](https://github.com/influxdata/influxdb/issues/7977): Add chunked request processing back into the Go client v2\n- [#7974](https://github.com/influxdata/influxdb/pull/7974): Allow non-admin users to execute SHOW DATABASES.\n- [#7948](https://github.com/influxdata/influxdb/pull/7948): Reduce memory allocations by reusing gzip.Writers across requests\n- [#7776](https://github.com/influxdata/influxdb/issues/7776): Add system information to /debug/vars.\n- [#7553](https://github.com/influxdata/influxdb/issues/7553): Add modulo operator to the query language.\n- [#7856](https://github.com/influxdata/influxdb/issues/7856): Failed points during an import now result in a non-zero exit code.\n- [#7821](https://github.com/influxdata/influxdb/issues/7821): Expose some configuration settings via SHOW DIAGNOSTICS\n- [#8025](https://github.com/influxdata/influxdb/issues/8025): Support single and multiline comments in InfluxQL.\n- [#6541](https://github.com/influxdata/influxdb/issues/6541): Support timezone offsets for queries.\n- [#8194](https://github.com/influxdata/influxdb/pull/8194): Add \"integral\" function to InfluxQL.\n- [#7393](https://github.com/influxdata/influxdb/issues/7393): Add \"non_negative_difference\" function to InfluxQL.\n- [#8042](https://github.com/influxdata/influxdb/issues/8042): Add bitwise AND, OR and XOR operators to the query language.\n- [#8302](https://github.com/influxdata/influxdb/pull/8302): Write throughput/concurrency improvements\n- [#8273](https://github.com/influxdata/influxdb/issues/8273): Remove the admin UI.\n- [#8327](https://github.com/influxdata/influxdb/pull/8327): Update to go1.8.1\n- [#8348](https://github.com/influxdata/influxdb/pull/8348): Add max concurrent compaction limits\n- [#8366](https://github.com/influxdata/influxdb/pull/8366): Add TSI support tooling.\n- [#8350](https://github.com/influxdata/influxdb/pull/8350): Track HTTP client requests for /write and /query with /debug/requests.\n- [#8384](https://github.com/influxdata/influxdb/pull/8384): Write and compaction stability\n- [#7862](https://github.com/influxdata/influxdb/pull/7861): Add new profile endpoint for gathering all debug profiles and querues in single archive.\n- [#8390](https://github.com/influxdata/influxdb/issues/8390): Add nanosecond duration literal support.\n- [#8394](https://github.com/influxdata/influxdb/pull/8394): Optimize top() and bottom() using an incremental aggregator.\n- [#7129](https://github.com/influxdata/influxdb/issues/7129): Maintain the tags of points selected by top() or bottom() when writing the results.\n\n### Bugfixes\n\n- [#8187](https://github.com/influxdata/influxdb/pull/8187): Several statements were missing the DefaultDatabase method\n- [#8231](https://github.com/influxdata/influxdb/pull/8231): Fix spelling mistake in HTTP section of config -- shared-sercret\n- [#8190](https://github.com/influxdata/influxdb/issues/8190): History file should redact passwords before saving to history.\n- [#8122](https://github.com/influxdata/influxdb/pull/8122): Suppress headers in output for influx cli when they are the same.\n- [#8119](https://github.com/influxdata/influxdb/pull/8119): Add chunked/chunk size as setting/options in cli.\n- [#8091](https://github.com/influxdata/influxdb/issues/8091): Do not increment the continuous query statistic if no query is run.\n- [#8064](https://github.com/influxdata/influxdb/issues/8064): Forbid wildcards in binary expressions.\n- [#8148](https://github.com/influxdata/influxdb/issues/8148): Fix fill(linear) when multiple series exist and there are null values.\n- [#7995](https://github.com/influxdata/influxdb/issues/7995): Update liner dependency to handle docker exec.\n- [#7835](https://github.com/influxdata/influxdb/pull/7835): Bind backup and restore port to localhost by default\n- [#7811](https://github.com/influxdata/influxdb/issues/7811): Kill query not killing query\n- [#7457](https://github.com/influxdata/influxdb/issues/7457): KILL QUERY should work during all phases of a query\n- [#8155](https://github.com/influxdata/influxdb/pull/8155): Simplify admin user check.\n- [#8118](https://github.com/influxdata/influxdb/issues/8118): Significantly improve DROP DATABASE speed.\n- [#8181](https://github.com/influxdata/influxdb/issues/8181): Return an error when an invalid duration literal is parsed.\n- [#8093](https://github.com/influxdata/influxdb/issues/8093): Fix the time range when an exact timestamp is selected.\n- [#8174](https://github.com/influxdata/influxdb/issues/8174): Fix query parser when using addition and subtraction without spaces.\n- [#8167](https://github.com/influxdata/influxdb/issues/8167): Fix a regression when math was used with selectors.\n- [#8175](https://github.com/influxdata/influxdb/issues/8175): Ensure the input for certain functions in the query engine are ordered.\n- [#8171](https://github.com/influxdata/influxdb/issues/8171): Significantly improve shutdown speed for high cardinality databases.\n- [#8177](https://github.com/influxdata/influxdb/issues/8177): Fix racy integration test.\n- [#8230](https://github.com/influxdata/influxdb/issues/8230): Prevent overflowing or underflowing during window computation.\n- [#8058](https://github.com/influxdata/influxdb/pull/8058): Enabled golint for admin, httpd, subscriber, udp. @karlding\n- [#8252](https://github.com/influxdata/influxdb/issues/8252): Implicitly cast null to false in binary expressions with a boolean.\n- [#8067](https://github.com/influxdata/influxdb/issues/8067): Restrict fill(none) and fill(linear) to be usable only with aggregate queries.\n- [#8065](https://github.com/influxdata/influxdb/issues/8065): Restrict top() and bottom() selectors to be used with no other functions.\n- [#8266](https://github.com/influxdata/influxdb/issues/8266): top() and bottom() now returns the time for every point.\n- [#8315](https://github.com/influxdata/influxdb/issues/8315): Remove default upper time bound on DELETE queries.\n- [#8066](https://github.com/influxdata/influxdb/issues/8066): Fix LIMIT and OFFSET for certain aggregate queries.\n- [#8045](https://github.com/influxdata/influxdb/issues/8045): Refactor the subquery code and fix outer condition queries.\n- [#7425](https://github.com/influxdata/influxdb/issues/7425): Fix compaction aborted log messages\n- [#8123](https://github.com/influxdata/influxdb/issues/8123): TSM compaction does not remove .tmp on error\n- [#8343](https://github.com/influxdata/influxdb/issues/8343): Set the CSV output to an empty string for null values.\n- [#8368](https://github.com/influxdata/influxdb/issues/8368): Compaction exhausting disk resources in InfluxDB\n- [#8358](https://github.com/influxdata/influxdb/issues/8358): Small edits to the etc/config.sample.toml file.\n- [#8392](https://github.com/influxdata/influxdb/issues/8393): Points beyond retention policy scope are dropped silently\n- [#8387](https://github.com/influxdata/influxdb/issues/8387): Fix TSM tmp file leaked on disk\n- [#8417](https://github.com/influxdata/influxdb/issues/8417): Fix large field keys preventing snapshot compactions\n- [#7957](https://github.com/influxdata/influxdb/issues/7957): URL query parameter credentials take priority over Authentication header.\n- [#8443](https://github.com/influxdata/influxdb/issues/8443): TSI branch has duplicate tag values.\n- [#8470](https://github.com/influxdata/influxdb/issues/8470): index file fd leak in tsi branch\n- [#8468](https://github.com/influxdata/influxdb/pull/8468): Fix TSI non-contiguous compaction panic.\n\n## v1.2.4 [2017-05-08]\n\n### Bugfixes\n\n- [#8338](https://github.com/influxdata/influxdb/pull/8338): Prefix partial write errors with `partial write:` to generalize identification in other subsystems\n\n## v1.2.3 [2017-04-17]\n\n### Bugfixes\n\n- [#8190](https://github.com/influxdata/influxdb/issues/8190): History file should redact passwords before saving to history.\n- [#8187](https://github.com/influxdata/influxdb/pull/8187): Several statements were missing the DefaultDatabase method\n- [#8022](https://github.com/influxdata/influxdb/issues/8022): Segment violation in models.Tags.Get\n- [#8155](https://github.com/influxdata/influxdb/pull/8155): Simplify admin user check.\n- [#8167](https://github.com/influxdata/influxdb/issues/8167): Fix a regression when math was used with selectors.\n- [#8175](https://github.com/influxdata/influxdb/issues/8175): Ensure the input for certain functions in the query engine are ordered.\n- [#8254](https://github.com/influxdata/influxdb/pull/8254): Fix delete time fields creating unparseable points\n\n## v1.2.2 [2017-03-14]\n\n### Release Notes\n\n### Configuration Changes\n\n#### `[http]` Section\n\n* `max-row-limit` now defaults to `0`.  The previous default was `10000`, but due to a bug, the value in use since `1.0` was `0`.\n\n### Bugfixes\n\n- [#8050](https://github.com/influxdata/influxdb/issues/8050): influxdb & grafana, absence of data on the graphs\n\n## v1.2.1 [2017-03-08]\n\n### Release Notes\n\n### Bugfixes\n\n- [#8100](https://github.com/influxdata/influxdb/issues/8100): Include IsRawQuery in the rewritten statement for meta queries.\n- [#8095](https://github.com/influxdata/influxdb/pull/8095): Fix race in WALEntry.Encode and Values.Deduplicate\n- [#8085](https://github.com/influxdata/influxdb/issues/8085): panic: interface conversion: tsm1.Value is tsm1.IntegerValue, not tsm1.FloatValue.\n- [#8084](https://github.com/influxdata/influxdb/issues/8084): Points missing after compaction\n- [#8080](https://github.com/influxdata/influxdb/issues/8080): Point.UnmarshalBinary() bounds check\n- [#8078](https://github.com/influxdata/influxdb/issues/8078): Map types correctly when selecting a field with multiple measurements where one of the measurements is empty.\n- [#8044](https://github.com/influxdata/influxdb/issues/8044): Treat non-reserved measurement names with underscores as normal measurements.\n- [#8040](https://github.com/influxdata/influxdb/issues/8040): Reduce the expression in a subquery to avoid a panic.\n- [#8028](https://github.com/influxdata/influxdb/issues/8028): Fix panic in collectd when configured to read types DB from directory.\n- [#8001](https://github.com/influxdata/influxdb/issues/8001): Map types correctly when using a regex and one of the measurements is empty.\n- [#7968](https://github.com/influxdata/influxdb/issues/7968): Properly select a tag within a subquery.\n- [#7966](https://github.com/influxdata/influxdb/pull/7966): Prevent a panic when aggregates are used in an inner query with a raw query.\n- [#7946](https://github.com/influxdata/influxdb/issues/7946): Fix authentication when subqueries are present.\n- [#7910](https://github.com/influxdata/influxdb/issues/7910): Fix EvalType when a parenthesis expression is used.\n- [#7906](https://github.com/influxdata/influxdb/issues/7906): Anchors not working as expected with case-insensitive regex\n- [#7905](https://github.com/influxdata/influxdb/issues/7905): Fix ORDER BY time DESC with ordering series keys.\n- [#7895](https://github.com/influxdata/influxdb/issues/7895): Fix incorrect math when aggregates that emit different times are used.\n- [#7888](https://github.com/influxdata/influxdb/pull/7888): Expand query dimensions from the subquery.\n- [#7885](https://github.com/influxdata/influxdb/issues/7885): Fix LIMIT and OFFSET when they are used in a subquery.\n- [#7880](https://github.com/influxdata/influxdb/issues/7880): Dividing aggregate functions with different outputs doesn't panic.\n- [#7877](https://github.com/influxdata/influxdb/issues/7877): Fix mapping of types when the measurement uses a regex\n\n## v1.2.0 [2017-01-24]\n\n### Release Notes\n\nThis release introduces a major new querying capability in the form of sub-queries, and provides several performance improvements, including a 50% or better gain in write performance on larger numbers of cores. The release adds some stability and memory-related improvements, as well as several CLI-related bug fixes. If upgrading from a prior version, please read the configuration changes in the following section before upgrading.\n\n### Configuration Changes\n\nThe following new configuration options are available, if upgrading to `1.2.0` from prior versions.\n\n#### `[[collectd]]` Section\n\n* `security-level` which defaults to `\"none\"`. This field also accepts `\"sign\"` and `\"encrypt\"` and enables different levels of transmission security for the collectd plugin.\n* `auth-file` which defaults to `\"/etc/collectd/auth_file\"`. Specifies where to locate the authentication file used to authenticate clients when using signed or encrypted mode.\n\n### Deprecations\n\nThe stress tool `influx_stress` will be removed in a subsequent release. We recommend using [`influx-stress`](https://github.com/influxdata/influx-stress) as a replacement.\n\n### Features\n\n- [#7830](https://github.com/influxdata/influxdb/pull/7830): Cache snapshotting performance improvements\n- [#7723](https://github.com/influxdata/influxdb/pull/7723): Remove the override of GOMAXPROCS.\n- [#7709](https://github.com/influxdata/influxdb/pull/7709): Add clear command to cli.\n- [#7688](https://github.com/influxdata/influxdb/pull/7688): Adding ability to use parameters in queries in the v2 client using the `Parameters` map in the `Query` struct.\n- [#7669](https://github.com/influxdata/influxdb/issues/7669): Uncomment section headers from the default configuration file.\n- [#7633](https://github.com/influxdata/influxdb/pull/7633): improve write performance significantly.\n- [#7601](https://github.com/influxdata/influxdb/issues/7601): Prune data in meta store for deleted shards.\n- [#7554](https://github.com/influxdata/influxdb/pull/7554): update latest dependencies with Godeps.\n- [#7368](https://github.com/influxdata/influxdb/pull/7368): Introduce syntax for marking a partial response with chunking.\n- [#7356](https://github.com/influxdata/influxdb/issues/7356): Use X-Forwarded-For IP address in HTTP logger if present.\n- [#7326](https://github.com/influxdata/influxdb/issues/7326): Verbose output for SSL connection errors.\n- [#7323](https://github.com/influxdata/influxdb/pull/7323): Allow add items to array config via ENV\n- [#7066](https://github.com/influxdata/influxdb/issues/7066): Add support for secure transmission via collectd.\n- [#7036](https://github.com/influxdata/influxdb/issues/7036): Switch logging to use structured logging everywhere.\n- [#4619](https://github.com/influxdata/influxdb/issues/4619): Support subquery execution in the query language.\n- [#3188](https://github.com/influxdata/influxdb/issues/3188): [CLI feature request] USE retention policy for queries.\n\n### Bugfixes\n\n- [#7845](https://github.com/influxdata/influxdb/issues/7845): Fix race in storage engine.\n- [#7838](https://github.com/influxdata/influxdb/issues/7838): Ensure Subscriber service can be disabled.\n- [#7822](https://github.com/influxdata/influxdb/issues/7822): Drop database will delete /influxdb/data directory\n- [#7814](https://github.com/influxdata/influxdb/issues/7814): InfluxDB should do a partial write on mismatched type errors.\n- [#7812](https://github.com/influxdata/influxdb/issues/7812): Fix slice out of bounds panic when pruning shard groups. Thanks @vladlopes\n- [#7786](https://github.com/influxdata/influxdb/pull/7786): Fix potential race condition in correctness of tsm1_cache memBytes statistic.\n- [#7784](https://github.com/influxdata/influxdb/pull/7784): Fix broken error return on meta client's UpdateUser and DropContinuousQuery methods.\n- [#7741](https://github.com/influxdata/influxdb/pull/7741): Fix string quoting and significantly improve performance of `influx_inspect export`.\n- [#7740](https://github.com/influxdata/influxdb/issues/7740): Fix parse key panic when missing tag value @oiooj\n- [#7698](https://github.com/influxdata/influxdb/pull/7698): CLI was caching db/rp for insert into statements.\n- [#7659](https://github.com/influxdata/influxdb/issues/7659): Fix CLI import bug when using self-signed SSL certificates.\n- [#7656](https://github.com/influxdata/influxdb/issues/7656): Fix cross-platform backup/restore @allenpetersen\n- [#7650](https://github.com/influxdata/influxdb/issues/7650): Ensures that all user privileges associated with a database are removed when the database is dropped.\n- [#7634](https://github.com/influxdata/influxdb/issues/7634): Return the time from a percentile call on an integer.\n- [#7621](https://github.com/influxdata/influxdb/issues/7621): Expand string and boolean fields when using a wildcard with `sample()`.\n- [#7616](https://github.com/influxdata/influxdb/pull/7616): Fix chuid argument order in init script @ccasey\n- [#7615](https://github.com/influxdata/influxdb/issues/7615): Reject invalid subscription urls @allenpetersen\n- [#7585](https://github.com/influxdata/influxdb/pull/7585): Return Error instead of panic when decoding point values.\n- [#7563](https://github.com/influxdata/influxdb/issues/7563): RP should not allow `INF` or `0` as a shard duration.\n- [#7396](https://github.com/influxdata/influxdb/issues/7396): CLI should use spaces for alignment, not tabs.\n- [#6527](https://github.com/influxdata/influxdb/issues/6527): 0.12.2 Influx CLI client PRECISION returns \"Unknown precision....\n\n\n## v1.1.5 [2017-04-28]\n\n### Bugfixes\n\n- [#8190](https://github.com/influxdata/influxdb/issues/8190): History file should redact passwords before saving to history.\n- [#8187](https://github.com/influxdata/influxdb/pull/8187): Several statements were missing the DefaultDatabase method\n\n## v1.1.4 [2017-02-27]\n\n### Bugfixes\n\n- [#8063](https://github.com/influxdata/influxdb/pull/8063): Backport #7631 to reduce GC allocations.\n\n## v1.1.3 [2017-02-17]\n\n### Bugfixes\n\n- [#8027](https://github.com/influxdata/influxdb/pull/8027): Remove Tags.shouldCopy, replace with forceCopy on series creation.\n\n## v1.1.2 [2017-02-16]\n\n### Bugfixes\n\n- [#7832](https://github.com/influxdata/influxdb/pull/7832): Fix memory leak when writing new series over HTTP\n- [#7929](https://github.com/influxdata/influxdb/issues/7929): Fix series tag iteration segfault. (#7922)\n- [#8011](https://github.com/influxdata/influxdb/issues/8011): Fix tag dereferencing panic.\n\n## v1.1.1 [2016-12-06]\n\n### Features\n\n- [#7684](https://github.com/influxdata/influxdb/issues/7684): Update Go version to 1.7.4.\n\n### Bugfixes\n\n- [#7679](https://github.com/influxdata/influxdb/pull/7679): Fix string fields w/ trailing slashes\n- [#7661](https://github.com/influxdata/influxdb/pull/7661): Quote the empty string as an ident.\n- [#7625](https://github.com/influxdata/influxdb/issues/7625): Fix incorrect tag value in error message.\n\n### Security\n\n[Go 1.7.4](https://golang.org/doc/devel/release.html#go1.7.minor) was released to address two security issues.  This release includes these security fixes.\n\n## v1.1.0 [2016-11-14]\n\n### Release Notes\n\nThis release is built with go 1.7.3 and provides many performance optimizations, stability changes and a few new query capabilities.  If upgrading from a prior version, please read the configuration changes below section before upgrading.\n\n### Deprecations\n\nThe admin interface is deprecated and will be removed in a subsequent release.  The configuration setting to enable the admin UI is now disabled by default, but can be enabled if necessary.  We recommend using [Chronograf](https://github.com/influxdata/chronograf) or [Grafana](https://github.com/grafana/grafana) as a replacement.\n\n### Configuration Changes\n\nThe following configuration changes may need to changed before upgrading to `1.1.0` from prior versions.\n\n#### `[admin]` Section\n\n* `enabled` now default to false.  If you are currently using the admin interaface, you will need to change this value to `true` to re-enable it.  The admin interface is currently deprecated and will be removed in a subsequent release.\n\n#### `[data]` Section\n\n* `max-values-per-tag` was added with a default of 100,000, but can be disabled by setting it to `0`.  Existing measurements with tags that exceed this limit will continue to load, but writes that would cause the tags cardinality to increase will be dropped and a `partial write` error will be returned to the caller.  This limit can be used to prevent high cardinality tag values from being written to a measurement.\n* `cache-max-memory-size` has been increased to from `524288000` to `1048576000`.  This setting is the maximum amount of RAM, in bytes, a shard cache can use before it rejects writes with an error.  Setting this value to `0` disables the limit.\n* `cache-snapshot-write-cold-duration` has been decreased from `1h` to `10m`.  This setting determines how long values will stay in the shard cache while the shard is cold for writes.\n* `compact-full-write-cold-duration` has been decreased from `24h` to `4h`.  The shorter duration allows cold shards to be compacted to an optimal state more quickly.\n\n### Features\n\nThe query language has been extended with a few new features:\n\n- [#7442](https://github.com/influxdata/influxdb/pull/7442): Support regex on fields keys in select clause\n- [#7403](https://github.com/influxdata/influxdb/pull/7403): New `linear` fill option\n- [#7388](https://github.com/influxdata/influxdb/pull/7388): New `cumulative_sum` function\n- [#7295](https://github.com/influxdata/influxdb/pull/7295): Support `ON` for `SHOW` commands\n\n\nAll Changes:\n\n- [#7496](https://github.com/influxdata/influxdb/pull/7496): Filter out series within shards that do not have data for that series.\n- [#7495](https://github.com/influxdata/influxdb/pull/7495): Rewrite regexes of the form host = /^server-a$/ to host = 'server-a', to take advantage of the tsdb index.\n- [#7480](https://github.com/influxdata/influxdb/pull/7480): Improve compaction planning performance by caching tsm file stats.\n- [#7473](https://github.com/influxdata/influxdb/pull/7473): Align binary math expression streams by time.\n- [#7470](https://github.com/influxdata/influxdb/pull/7470): Reduce map allocations when computing the TagSet of a measurement.\n- [#7463](https://github.com/influxdata/influxdb/pull/7463): Make input plugin services open/close idempotent.\n- [#7441](https://github.com/influxdata/influxdb/pull/7441): Speed up shutdown by closing shards concurrently.\n- [#7415](https://github.com/influxdata/influxdb/pull/7415): Add sample function to query language.\n- [#7403](https://github.com/influxdata/influxdb/pull/7403): Add `fill(linear)` to query language.\n- [#7388](https://github.com/influxdata/influxdb/pull/7388): Implement cumulative_sum() function.\n- [#7320](https://github.com/influxdata/influxdb/issues/7320): Update defaults in config for latest best practices\n- [#7305](https://github.com/influxdata/influxdb/pull/7305): UDP Client: Split large points. Thanks @vlasad\n- [#7281](https://github.com/influxdata/influxdb/pull/7281): Add stats for active compactions, compaction errors.\n- [#7268](https://github.com/influxdata/influxdb/pull/7268): More man pages for the other tools we package and compress man pages fully.\n- [#7146](https://github.com/influxdata/influxdb/issues/7146): Add max-values-per-tag to limit high tag cardinality data\n- [#7136](https://github.com/influxdata/influxdb/pull/7136): Update jwt-go dependency to version 3.\n- [#7135](https://github.com/influxdata/influxdb/pull/7135): Support enable HTTP service over unix domain socket. Thanks @oiooj\n- [#7120](https://github.com/influxdata/influxdb/issues/7120): Add additional statistics to query executor.\n- [#7115](https://github.com/influxdata/influxdb/issues/7115): Feature request: `influx inspect -export` should dump WAL files.\n- [#7099](https://github.com/influxdata/influxdb/pull/7099): Implement text/csv content encoding for the response writer.\n- [#6992](https://github.com/influxdata/influxdb/issues/6992): Support tools for running async queries.\n- [#6962](https://github.com/influxdata/influxdb/issues/6962): Support ON and use default database for SHOW commands.\n- [#6896](https://github.com/influxdata/influxdb/issues/6896): Correctly read in input from a non-interactive stream for the CLI.\n- [#6894](https://github.com/influxdata/influxdb/issues/6894): Support `INFLUX_USERNAME` and `INFLUX_PASSWORD` for setting username/password in the CLI.\n- [#6704](https://github.com/influxdata/influxdb/issues/6704): Optimize first/last when no group by interval is present.\n- [#5955](https://github.com/influxdata/influxdb/issues/5955): Make regex work on field and dimension keys in SELECT clause.\n- [#4461](https://github.com/influxdata/influxdb/issues/4461): Change default time boundaries for raw queries.\n- [#3634](https://github.com/influxdata/influxdb/issues/3634): Support mixed duration units.\n\n### Bugfixes\n\n- [#7606](https://github.com/influxdata/influxdb/pull/7606): Avoid deadlock when `max-row-limit` is hit.\n- [#7564](https://github.com/influxdata/influxdb/issues/7564): Fix incorrect grouping when multiple aggregates are used with sparse data.\n- [#7548](https://github.com/influxdata/influxdb/issues/7548): Fix output duration units for SHOW QUERIES.\n- [#7526](https://github.com/influxdata/influxdb/issues/7526): Truncate the version string when linking to the documentation.\n- [#7494](https://github.com/influxdata/influxdb/issues/7494): influx_inspect: export does not escape field keys.\n- [#7482](https://github.com/influxdata/influxdb/issues/7482): Fix issue where point would be written to wrong shard.\n- [#7448](https://github.com/influxdata/influxdb/pull/7448): Fix Retention Policy Inconsistencies\n- [#7436](https://github.com/influxdata/influxdb/issues/7436): Remove accidentally added string support for the stddev call.\n- [#7431](https://github.com/influxdata/influxdb/issues/7431): Remove /data/process_continuous_queries endpoint.\n- [#7392](https://github.com/influxdata/influxdb/pull/7392): Enable https subscriptions to work with custom CA certificates.\n- [#7385](https://github.com/influxdata/influxdb/pull/7385): Reduce query planning allocations\n- [#7382](https://github.com/influxdata/influxdb/issues/7382): Shard stats include wal path tag so disk bytes make more sense.\n- [#7334](https://github.com/influxdata/influxdb/issues/7334): Panic with unread show series iterators during drop database\n- [#7297](https://github.com/influxdata/influxdb/issues/7297): Use consistent column output from the CLI for column formatted responses.\n- [#7285](https://github.com/influxdata/influxdb/issues/7285): Correctly use password-type field in Admin UI. Thanks @dandv!\n- [#7231](https://github.com/influxdata/influxdb/issues/7231): Duplicate parsing bug in ALTER RETENTION POLICY.\n- [#7226](https://github.com/influxdata/influxdb/issues/7226): Fix database locked up when deleting shards\n- [#7196](https://github.com/influxdata/influxdb/issues/7196): Fix mmap dereferencing, fixes #7183, #7180\n- [#7177](https://github.com/influxdata/influxdb/issues/7177): Fix base64 encoding issue with /debug/vars stats.\n- [#7161](https://github.com/influxdata/influxdb/issues/7161): Drop measurement causes cache max memory exceeded error.\n- [#7152](https://github.com/influxdata/influxdb/issues/7152): Decrement number of measurements only once when deleting the last series from a measurement.\n- [#7053](https://github.com/influxdata/influxdb/issues/7053): Delete statement returns an error when retention policy or database is specified\n- [#7013](https://github.com/influxdata/influxdb/issues/7013): Fix the dollar sign so it properly handles reserved keywords.\n- [#2792](https://github.com/influxdata/influxdb/issues/2792): Exceeding max retention policy duration gives incorrect error message\n- [#1834](https://github.com/influxdata/influxdb/issues/1834): Drop time when used as a tag or field key.\n\n## v1.0.2 [2016-10-05]\n\n### Bugfixes\n\n- [#7391](https://github.com/influxdata/influxdb/issues/7391): Fix RLE integer decoding producing negative numbers\n- [#7335](https://github.com/influxdata/influxdb/pull/7335): Avoid stat syscall when planning compactions\n- [#7330](https://github.com/influxdata/influxdb/issues/7330): Subscription data loss under high write load\n- [#7150](https://github.com/influxdata/influxdb/issues/7150): Do not automatically reset the shard duration when using ALTER RETENTION POLICY\n- [#5878](https://github.com/influxdata/influxdb/issues/5878): Ensure correct shard groups created when retention policy has been altered.\n\n## v1.0.1 [2016-09-26]\n\n### Bugfixes\n\n- [#7315](https://github.com/influxdata/influxdb/issues/7315): Prevent users from manually using system queries since incorrect use would result in a panic.\n- [#7299](https://github.com/influxdata/influxdb/issues/7299): Ensure fieldsCreated stat available in shard measurement.\n- [#7272](https://github.com/influxdata/influxdb/issues/7272): Report cmdline and memstats in /debug/vars.\n- [#7271](https://github.com/influxdata/influxdb/issues/7271): Fixing typo within example configuration file. Thanks @andyfeller!\n- [#7270](https://github.com/influxdata/influxdb/issues/7270): Implement time math for lazy time literals.\n- [#7226](https://github.com/influxdata/influxdb/issues/7226): Fix database locked up when deleting shards\n- [#7110](https://github.com/influxdata/influxdb/issues/7110): Skip past points at the same time in derivative call within a merged series.\n- [#6846](https://github.com/influxdata/influxdb/issues/6846): Read an invalid JSON response as an error in the influx client.\n\n## v1.0.0 [2016-09-08]\n\n### Release Notes\n\n### Breaking changes\n\n* `max-series-per-database` was added with a default of 1M but can be disabled by setting it to `0`. Existing databases with series that exceed this limit will continue to load but writes that would create new series will fail.\n* Config option `[cluster]` has been replaced with `[coordinator]`\n* Support for config options `[collectd]` and `[opentsdb]` has been removed; use `[[collectd]]` and `[[opentsdb]]` instead.\n* Config option `data-logging-enabled` within the `[data]` section, has been renamed to `trace-logging-enabled`, and defaults to `false`.\n* The keywords `IF`, `EXISTS`, and `NOT` where removed for this release.  This means you no longer need to specify `IF NOT EXISTS` for `DROP DATABASE` or `IF EXISTS` for `CREATE DATABASE`.  If these are specified, a query parse error is returned.\n* The Shard `writePointsFail` stat has been renamed to `writePointsErr` for consistency with other stats.\n\nWith this release the systemd configuration files for InfluxDB will use the system configured default for logging and will no longer write files to `/var/log/influxdb` by default. On most systems, the logs will be directed to the systemd journal and can be accessed by `journalctl -u influxdb.service`. Consult the systemd journald documentation for configuring journald.\n\n### Features\n\n- [#7199](https://github.com/influxdata/influxdb/pull/7199): Add mode function. Thanks @agaurav.\n- [#7194](https://github.com/influxdata/influxdb/issues/7194): Support negative timestamps for the query engine.\n- [#7172](https://github.com/influxdata/influxdb/pull/7172): Write path stats\n- [#7095](https://github.com/influxdata/influxdb/pull/7095): Add MaxSeriesPerDatabase config setting.\n- [#7065](https://github.com/influxdata/influxdb/issues/7065): Remove IF EXISTS/IF NOT EXISTS from influxql language.\n- [#7050](https://github.com/influxdata/influxdb/pull/7050): Update go package library dependencies.\n- [#7046](https://github.com/influxdata/influxdb/pull/7046): Add tsm file export to influx_inspect tool.\n- [#7011](https://github.com/influxdata/influxdb/issues/7011): Create man pages for commands.\n- [#6959](https://github.com/influxdata/influxdb/issues/6959): Return 403 Forbidden when authentication succeeds but authorization fails.\n- [#6938](https://github.com/influxdata/influxdb/issues/6938): Added favicon\n- [#6928](https://github.com/influxdata/influxdb/issues/6928): Run continuous query for multiple buckets rather than one per bucket.\n- [#6909](https://github.com/influxdata/influxdb/issues/6909): Log the CQ execution time when continuous query logging is enabled.\n- [#6900](https://github.com/influxdata/influxdb/pull/6900): Trim BOM from Windows Notepad-saved config files.\n- [#6889](https://github.com/influxdata/influxdb/pull/6889): Update help and remove unused config options from the configuration file.\n- [#6820](https://github.com/influxdata/influxdb/issues/6820): Add NodeID to execution options\n- [#6812](https://github.com/influxdata/influxdb/pull/6812): Make httpd logger closer to Common (& combined) Log Format.\n- [#6805](https://github.com/influxdata/influxdb/issues/6805): Allow any variant of the help option to trigger the help.\n- [#6713](https://github.com/influxdata/influxdb/pull/6713): Reduce allocations during query parsing.\n- [#6686](https://github.com/influxdata/influxdb/pull/6686): Optimize timestamp run-length decoding\n- [#6664](https://github.com/influxdata/influxdb/pull/6664): Adds monitoring statistic for on-disk shard size.\n- [#6655](https://github.com/influxdata/influxdb/issues/6655): Add HTTP(s) based subscriptions.\n- [#6654](https://github.com/influxdata/influxdb/pull/6654): Add new HTTP statistics to monitoring\n- [#6623](https://github.com/influxdata/influxdb/pull/6623): Speed up drop database\n- [#6621](https://github.com/influxdata/influxdb/pull/6621): Add Holt-Winter forecasting function.\n- [#6609](https://github.com/influxdata/influxdb/pull/6609): Add support for JWT token authentication.\n- [#6593](https://github.com/influxdata/influxdb/pull/6593): Add ability to create snapshots of shards.\n- [#6585](https://github.com/influxdata/influxdb/pull/6585): Parallelize iterators\n- [#6559](https://github.com/influxdata/influxdb/issues/6559): Teach the http service how to enforce connection limits.\n- [#6519](https://github.com/influxdata/influxdb/issues/6519): Support cast syntax for selecting a specific type.\n- [#6507](https://github.com/influxdata/influxdb/issues/6507): Refactor monitor service to avoid expvar and write monitor statistics on a truncated time interval.\n- [#5906](https://github.com/influxdata/influxdb/issues/5906): Dynamically update the documentation link in the admin UI.\n- [#5750](https://github.com/influxdata/influxdb/issues/5750): Support wildcards in aggregate functions.\n- [#5655](https://github.com/influxdata/influxdb/issues/5655): Support specifying a retention policy for the graphite service.\n- [#5500](https://github.com/influxdata/influxdb/issues/5500): Add extra trace logging to tsm engine.\n- [#5499](https://github.com/influxdata/influxdb/issues/5499): Add stats and diagnostics to the TSM engine.\n- [#4532](https://github.com/influxdata/influxdb/issues/4532): Support regex selection in SHOW TAG VALUES for the key.\n- [#3733](https://github.com/influxdata/influxdb/issues/3733): Modify the default retention policy name and make it configurable.\n- [#3541](https://github.com/influxdata/influxdb/issues/3451): Update SHOW FIELD KEYS to return the field type with the field key.\n- [#2926](https://github.com/influxdata/influxdb/issues/2926): Support bound parameters in the parser.\n- [#1310](https://github.com/influxdata/influxdb/issues/1310): Add https-private-key option to httpd config.\n- [#1110](https://github.com/influxdata/influxdb/issues/1110): Support loading a folder for collectd typesdb files.\n\n### Bugfixes\n\n- [#7243](https://github.com/influxdata/influxdb/issues/7243): Optimize queries that compare a tag value to an empty string.\n- [#7240](https://github.com/influxdata/influxdb/issues/7240): Allow blank lines in the line protocol input.\n- [#7225](https://github.com/influxdata/influxdb/issues/7225): runtime: goroutine stack exceeds 1000000000-byte limit\n- [#7218](https://github.com/influxdata/influxdb/issues/7218): Fix alter retention policy when all options are used.\n- [#7127](https://github.com/influxdata/influxdb/pull/7127): Concurrent series limit\n- [#7125](https://github.com/influxdata/influxdb/pull/7125): Ensure gzip writer is closed in influx_inspect export\n- [#7119](https://github.com/influxdata/influxdb/pull/7119): Fix CREATE DATABASE when dealing with default values.\n- [#7088](https://github.com/influxdata/influxdb/pull/7088): Fix UDP pointsRx being incremented twice.\n- [#7084](https://github.com/influxdata/influxdb/pull/7084): Tombstone memory improvements\n- [#7081](https://github.com/influxdata/influxdb/issues/7081): Hardcode auto generated RP names to autogen\n- [#7080](https://github.com/influxdata/influxdb/pull/7080): Ensure IDs can't clash when managing Continuous Queries.\n- [#7074](https://github.com/influxdata/influxdb/issues/7074): Continuous full compactions\n- [#7043](https://github.com/influxdata/influxdb/pull/7043): Remove limiter from walkShards\n- [#7032](https://github.com/influxdata/influxdb/pull/7032): Copy tags in influx_stress to avoid a concurrent write panic on a map.\n- [#7028](https://github.com/influxdata/influxdb/pull/7028): Do not run continuous queries that have no time span.\n- [#7025](https://github.com/influxdata/influxdb/issues/7025): Move the CQ interval by the group by offset.\n- [#6990](https://github.com/influxdata/influxdb/issues/6990): Fix panic parsing empty key\n- [#6986](https://github.com/influxdata/influxdb/pull/6986): update connection settings when changing hosts in cli.\n- [#6968](https://github.com/influxdata/influxdb/issues/6968): Always use the demo config when outputting a new config.\n- [#6965](https://github.com/influxdata/influxdb/pull/6965): Minor improvements to init script. Removes sysvinit-utils as package dependency.\n- [#6952](https://github.com/influxdata/influxdb/pull/6952): Fix compaction planning with large TSM files\n- [#6946](https://github.com/influxdata/influxdb/issues/6946): Duplicate data for the same timestamp\n- [#6942](https://github.com/influxdata/influxdb/pull/6942): Fix panic: truncate the slice when merging the caches.\n- [#6934](https://github.com/influxdata/influxdb/pull/6934): Fix regex binary encoding for a measurement.\n- [#6911](https://github.com/influxdata/influxdb/issues/6911): Fix fill(previous) when used with math operators.\n- [#6883](https://github.com/influxdata/influxdb/pull/6883): Rename dumptsmdev to dumptsm in influx_inspect.\n- [#6882](https://github.com/influxdata/influxdb/pull/6882): Remove a double lock in the tsm1 index writer.\n- [#6869](https://github.com/influxdata/influxdb/issues/6869): Remove FieldCodec from tsdb package.\n- [#6864](https://github.com/influxdata/influxdb/pull/6864): Allow a non-admin to call \"use\" for the influx cli.\n- [#6859](https://github.com/influxdata/influxdb/issues/6859): Set the condition cursor instead of aux iterator when creating a nil condition cursor.\n- [#6855](https://github.com/influxdata/influxdb/pull/6855): Update `stress/v2` to work with clusters, ssl, and username/password auth. Code cleanup\n- [#6850](https://github.com/influxdata/influxdb/pull/6850): Modify the max nanosecond time to be one nanosecond less.\n- [#6835](https://github.com/influxdata/influxdb/pull/6835): Include sysvinit-tools as an rpm dependency.\n- [#6834](https://github.com/influxdata/influxdb/pull/6834): Add port to all graphite log output to help with debugging multiple endpoints\n- [#6829](https://github.com/influxdata/influxdb/issues/6829): Fix panic: runtime error: index out of range\n- [#6824](https://github.com/influxdata/influxdb/issues/6824): Remove systemd output redirection.\n- [#6819](https://github.com/influxdata/influxdb/issues/6819): Database unresponsive after DROP MEASUREMENT\n- [#6796](https://github.com/influxdata/influxdb/issues/6796): Out of Memory Error when Dropping Measurement\n- [#6771](https://github.com/influxdata/influxdb/issues/6771): Fix the point validation parser to identify and sort tags correctly.\n- [#6760](https://github.com/influxdata/influxdb/issues/6760): Prevent panic in concurrent auth cache write\n- [#6756](https://github.com/influxdata/influxdb/issues/6756): Set X-Influxdb-Version header on every request (even 404 requests).\n- [#6753](https://github.com/influxdata/influxdb/issues/6753): Prevent panic if there are no values.\n- [#6738](https://github.com/influxdata/influxdb/issues/6738): Time sorting broken with overwritten points\n- [#6727](https://github.com/influxdata/influxdb/issues/6727): queries with strings that look like dates end up with date types, not string types\n- [#6720](https://github.com/influxdata/influxdb/issues/6720): Concurrent map read write panic. Thanks @arussellsaw\n- [#6708](https://github.com/influxdata/influxdb/issues/6708): Drop writes from before the retention policy time window.\n- [#6702](https://github.com/influxdata/influxdb/issues/6702): Fix SELECT statement required privileges.\n- [#6701](https://github.com/influxdata/influxdb/issues/6701): Filter out sources that do not match the shard database/retention policy.\n- [#6693](https://github.com/influxdata/influxdb/pull/6693): Truncate the shard group end time if it exceeds MaxNanoTime.\n- [#6685](https://github.com/influxdata/influxdb/issues/6685): Batch SELECT INTO / CQ writes\n- [#6683](https://github.com/influxdata/influxdb/issues/6683): Fix compaction planning re-compacting large TSM files\n- [#6676](https://github.com/influxdata/influxdb/issues/6676): Ensures client sends correct precision when inserting points.\n- [#6672](https://github.com/influxdata/influxdb/issues/6672): Accept points with trailing whitespace.\n- [#6663](https://github.com/influxdata/influxdb/issues/6663): Fixing panic in SHOW FIELD KEYS.\n- [#6661](https://github.com/influxdata/influxdb/issues/6661): Disable limit optimization when using an aggregate.\n- [#6652](https://github.com/influxdata/influxdb/issues/6652): Fix panic: interface conversion: tsm1.Value is \\*tsm1.StringValue, not \\*tsm1.FloatValue\n- [#6650](https://github.com/influxdata/influxdb/issues/6650): Data race when dropping a database immediately after writing to it\n- [#6648](https://github.com/influxdata/influxdb/issues/6648): Make sure admin exists before authenticating query.\n- [#6644](https://github.com/influxdata/influxdb/issues/6644): Print the query executor's stack trace on a panic to the log.\n- [#6641](https://github.com/influxdata/influxdb/issues/6641): Fix read tombstones: EOF\n- [#6629](https://github.com/influxdata/influxdb/issues/6629): query-log-enabled in config not ignored anymore.\n- [#6624](https://github.com/influxdata/influxdb/issues/6624): Ensure clients requesting gzip encoded bodies don't receive empty body\n- [#6618](https://github.com/influxdata/influxdb/pull/6618): Optimize shard loading\n- [#6611](https://github.com/influxdata/influxdb/issues/6611): Queries slow down hundreds times after overwriting points\n- [#6607](https://github.com/influxdata/influxdb/issues/6607): SHOW TAG VALUES accepts != and !~ in WHERE clause.\n- [#6604](https://github.com/influxdata/influxdb/pull/6604): Remove old cluster code\n- [#6599](https://github.com/influxdata/influxdb/issues/6599): Ensure that future points considered in SHOW queries.\n- [#6595](https://github.com/influxdata/influxdb/issues/6595): Fix full compactions conflicting with level compactions\n- [#6557](https://github.com/influxdata/influxdb/issues/6557): Overwriting points on large series can cause memory spikes during compactions\n- [#6543](https://github.com/influxdata/influxdb/issues/6543): Fix parseFill to check for fill ident before attempting to parse an expression.\n- [#6406](https://github.com/influxdata/influxdb/issues/6406): Max index entries exceeded\n- [#6250](https://github.com/influxdata/influxdb/issues/6250): Slow startup time\n- [#6235](https://github.com/influxdata/influxdb/issues/6235): Fix measurement field panic in tsm1 engine.\n- [#5501](https://github.com/influxdata/influxdb/issues/5501): Queries against files that have just been compacted need to point to new files\n- [#2048](https://github.com/influxdata/influxdb/issues/2048): Check that retention policies exist before creating CQ\n\n## v0.13.0 [2016-05-12]\n\n### Release Notes\n\nWith this release InfluxDB is moving to Go v1.6.\n\n### Features\n\n- [#6534](https://github.com/influxdata/influxdb/pull/6534): Move to Go v1.6.2 (over Go v1.4.3)\n- [#6533](https://github.com/influxdata/influxdb/issues/6533): Optimize SHOW SERIES\n- [#6522](https://github.com/influxdata/influxdb/pull/6522): Dump TSM files to line protocol\n- [#6502](https://github.com/influxdata/influxdb/pull/6502): Add ability to copy shard via rpc calls.  Remove deprecated copier service.\n- [#6494](https://github.com/influxdata/influxdb/issues/6494): Support booleans for min() and max().\n- [#6484](https://github.com/influxdata/influxdb/pull/6484): Query language support for DELETE\n- [#6483](https://github.com/influxdata/influxdb/pull/6483): Delete series support for TSM\n- [#6444](https://github.com/influxdata/influxdb/pull/6444): Allow setting the config path through an environment variable and default config path.\n- [#6429](https://github.com/influxdata/influxdb/issues/6429): Log slow queries if they pass a configurable threshold.\n- [#6394](https://github.com/influxdata/influxdb/pull/6394): Allow time math with integer timestamps.\n- [#6334](https://github.com/influxdata/influxdb/pull/6334): Allow environment variables to be set per input type.\n- [#6292](https://github.com/influxdata/influxdb/issues/6292): Allow percentile to be used as a selector.\n- [#6290](https://github.com/influxdata/influxdb/issues/6290): Add POST /query endpoint and warning messages for using GET with write operations.\n- [#6263](https://github.com/influxdata/influxdb/pull/6263): Reduce UDP Service allocation size.\n- [#6237](https://github.com/influxdata/influxdb/issues/6237): Enable continuous integration testing on Windows platform via AppVeyor. Thanks @mvadu\n- [#6228](https://github.com/influxdata/influxdb/pull/6228): Support for multiple listeners for collectd and OpenTSDB inputs.\n- [#6213](https://github.com/influxdata/influxdb/pull/6213): Make logging output location more programmatically configurable.\n- [#5707](https://github.com/influxdata/influxdb/issues/5707): Return a deprecated message when IF NOT EXISTS is used.\n- [#5502](https://github.com/influxdata/influxdb/issues/5502): Add checksum verification to TSM inspect tool\n- [#4675](https://github.com/influxdata/influxdb/issues/4675): Allow derivative() function to be used with ORDER BY desc.\n- [#3558](https://github.com/influxdata/influxdb/issues/3558): Support field math inside a WHERE clause.\n- [#3247](https://github.com/influxdata/influxdb/issues/3247): Implement derivatives across intervals for aggregate queries.\n- [#3166](https://github.com/influxdata/influxdb/issues/3166): Sort the series keys inside of a tag set so output is deterministic.\n- [#2074](https://github.com/influxdata/influxdb/issues/2074): Support offset argument in the GROUP BY time(...) call.\n- [#1856](https://github.com/influxdata/influxdb/issues/1856): Add `elapsed` function that returns the time delta between subsequent points.\n\n### Bugfixes\n\n- [#6505](https://github.com/influxdata/influxdb/issues/6505): Add regex literal to InfluxQL spec for FROM clause.\n- [#6496](https://github.com/influxdata/influxdb/issues/6496): Fix parsing escaped series key when loading database index\n- [#6495](https://github.com/influxdata/influxdb/issues/6495): Fix aggregate returns when data is missing from some shards.\n- [#6491](https://github.com/influxdata/influxdb/pull/6491): Fix the CLI not to enter an infinite loop when the liner has an error.\n- [#6480](https://github.com/influxdata/influxdb/issues/6480): Fix SHOW statements' rewriting bug\n- [#6477](https://github.com/influxdata/influxdb/pull/6477): Don't catch SIGQUIT or SIGHUP signals.\n- [#6470](https://github.com/influxdata/influxdb/pull/6470): Remove SHOW SERVERS & DROP SERVER support\n- [#6468](https://github.com/influxdata/influxdb/issues/6468): Panic with truncated wal segments\n- [#6462](https://github.com/influxdata/influxdb/pull/6462): Add safer locking to CreateFieldIfNotExists\n- [#6458](https://github.com/influxdata/influxdb/pull/6458): Make it clear when the CLI version is unknown.\n- [#6457](https://github.com/influxdata/influxdb/issues/6457): Retention policy cleanup does not remove series\n- [#6439](https://github.com/influxdata/influxdb/issues/6439): Overwriting points returning old values\n- [#6427](https://github.com/influxdata/influxdb/pull/6427): Fix setting uint config options via env vars\n- [#6425](https://github.com/influxdata/influxdb/pull/6425): Close idle tcp connections in HTTP client to prevent tcp conn leak.\n- [#6419](https://github.com/influxdata/influxdb/issues/6419): Fix panic in transform iterator on division. @thbourlove\n- [#6398](https://github.com/influxdata/influxdb/issues/6398): Fix CREATE RETENTION POLICY parsing so it doesn't consume tokens it shouldn't.\n- [#6382](https://github.com/influxdata/influxdb/pull/6382): Removed dead code from the old query engine.\n- [#6361](https://github.com/influxdata/influxdb/pull/6361): Fix cluster/pool release of connection\n- [#6296](https://github.com/influxdata/influxdb/issues/6296): Allow the implicit time field to be renamed again.\n- [#6294](https://github.com/influxdata/influxdb/issues/6294): Fix panic running influx_inspect info.\n- [#6287](https://github.com/influxdata/influxdb/issues/6287): Fix data race in Influx Client.\n- [#6283](https://github.com/influxdata/influxdb/pull/6283): Fix GROUP BY tag to produce consistent results when a series has no tags.\n- [#6277](https://github.com/influxdata/influxdb/pull/6277): Fix deadlock in tsm1/file_store\n- [#6270](https://github.com/influxdata/influxdb/issues/6270): tsm1 query engine alloc reduction\n- [#6261](https://github.com/influxdata/influxdb/issues/6261): High CPU usage and slow query with DISTINCT\n- [#6252](https://github.com/influxdata/influxdb/pull/6252): Remove TSDB listener accept message @simnv\n- [#6202](https://github.com/influxdata/influxdb/pull/6202): Check default SHARD DURATION when recreating the same database.\n- [#6109](https://github.com/influxdata/influxdb/issues/6109): Cache maximum memory size exceeded on startup\n- [#5890](https://github.com/influxdata/influxdb/issues/5890): Return the time with a selector when there is no group by interval.\n- [#3883](https://github.com/influxdata/influxdb/issues/3883): Improve query sanitization to prevent a password leak in the logs.\n- [#3773](https://github.com/influxdata/influxdb/issues/3773): Support empty tags for all WHERE equality operations.\n- [#3369](https://github.com/influxdata/influxdb/issues/3369): Detect when a timer literal will overflow or underflow the query engine.\n\n## v0.12.2 [2016-04-20]\n\n### Bugfixes\n\n- [#6431](https://github.com/influxdata/influxdb/pull/6431): Fix panic in transform iterator on division. @thbourlove\n- [#6414](https://github.com/influxdata/influxdb/pull/6414): Send \"Connection: close\" header for queries.\n- [#6413](https://github.com/influxdata/influxdb/pull/6413): Prevent goroutine leak from persistent http connections. Thanks @aaronknister.\n- [#6383](https://github.com/influxdata/influxdb/pull/6383): Recover from a panic during query execution.\n- [#6379](https://github.com/influxdata/influxdb/issues/6379): Validate the first argument to percentile() is a variable.\n- [#6271](https://github.com/influxdata/influxdb/issues/6271): Fixed deadlock in tsm1 file store.\n\n## v0.12.1 [2016-04-08]\n\n### Bugfixes\n\n- [#6257](https://github.com/influxdata/influxdb/issues/6257): CreateShardGroup was incrementing meta data index even when it was idempotent.\n- [#6248](https://github.com/influxdata/influxdb/issues/6248): Panic using incorrectly quoted \"queries\" field key.\n- [#6229](https://github.com/influxdata/influxdb/issues/6229): Fixed aggregate queries with no GROUP BY to include the end time.\n- [#6225](https://github.com/influxdata/influxdb/pull/6225): Refresh admin assets.\n- [#6223](https://github.com/influxdata/influxdb/issues/6223): Failure to start/run on Windows. Thanks @mvadu\n- [#6206](https://github.com/influxdata/influxdb/issues/6206): Handle nil values from the tsm1 cursor correctly.\n- [#6190](https://github.com/influxdata/influxdb/pull/6190): Fix race on measurementFields.\n\n\n## v0.12.0 [2016-04-05]\n### Release Notes\nUpgrading to this release requires a little more than just installing the new binary and starting it up. The upgrade process is very quick and should only require a minute of downtime or less. Details on [upgrading to 0.12 are here](https://docs.influxdata.com/influxdb/v0.12/administration/upgrading/).\n\nThis release removes all of the old clustering code. It operates as a standalone server. For a free open source HA setup see the [InfluxDB Relay](https://github.com/influxdata/influxdb-relay).\n\n### Features\n\n- [#6193](https://github.com/influxdata/influxdb/pull/6193): Fix TypeError when processing empty results in admin UI. Thanks @jonseymour!\n- [#6166](https://github.com/influxdata/influxdb/pull/6166): Teach influxdb client how to use chunked queries and use in the CLI.\n- [#6158](https://github.com/influxdata/influxdb/pull/6158): Update influxd to detect an upgrade from `0.11` to `0.12`.  Minor restore bug fixes.\n- [#6149](https://github.com/influxdata/influxdb/pull/6149): Kill running queries when server is shutdown.\n- [#6148](https://github.com/influxdata/influxdb/pull/6148): Build script is now compatible with Python 3. Added ability to create detached signatures for packages. Build script now uses Python logging facility for messages.\n- [#6116](https://github.com/influxdata/influxdb/pull/6116): Allow `httpd` service to be extensible for routes\n- [#6115](https://github.com/influxdata/influxdb/issues/6115): Support chunking query results mid-series. Limit non-chunked output.\n- [#6112](https://github.com/influxdata/influxdb/issues/6112): Implement simple moving average function.\n- [#6111](https://github.com/influxdata/influxdb/pull/6111): Add ability to build static assest. Improved handling of TAR and ZIP package outputs.\n- [#6102](https://github.com/influxdata/influxdb/issues/6102): Limit series count in selection\n- [#6079](https://github.com/influxdata/influxdb/issues/6079): Limit the maximum number of concurrent queries.\n- [#6078](https://github.com/influxdata/influxdb/issues/6078): Limit bucket count in selection.\n- [#6077](https://github.com/influxdata/influxdb/issues/6077): Limit point count in selection.\n- [#6075](https://github.com/influxdata/influxdb/issues/6075): Limit the maximum running time of a query.\n- [#6073](https://github.com/influxdata/influxdb/pull/6073): Iterator stats\n- [#6060](https://github.com/influxdata/influxdb/pull/6060): Add configurable shard duration to retention policies\n- [#6025](https://github.com/influxdata/influxdb/pull/6025): Remove deprecated JSON write path.\n- [#6012](https://github.com/influxdata/influxdb/pull/6012): Add DROP SHARD support.\n- [#5939](https://github.com/influxdata/influxdb/issues/5939): Support viewing and killing running queries.\n- [#5744](https://github.com/influxdata/influxdb/issues/5744): Add integer literal support to the query language.\n- [#5372](https://github.com/influxdata/influxdb/pull/5372): Faster shard loading\n- [#1825](https://github.com/influxdata/influxdb/issues/1825): Implement difference function.\n\n### Bugfixes\n\n- [#6178](https://github.com/influxdata/influxdb/issues/6178): Ensure SHARD DURATION is checked when recreating a retention policy\n- [#6153](https://github.com/influxdata/influxdb/issues/6153): Check SHARD DURATION when recreating the same database\n- [#6152](https://github.com/influxdata/influxdb/issues/6152): Allow SHARD DURATION to be specified in isolation when creating a database\n- [#6140](https://github.com/influxdata/influxdb/issues/6140): Ensure Shard engine not accessed when closed.\n- [#6131](https://github.com/influxdata/influxdb/issues/6061): Fix write throughput regression with large number of measurments\n- [#6110](https://github.com/influxdata/influxdb/issues/6110): Fix for 0.9 upgrade path when using RPM\n- [#6094](https://github.com/influxdata/influxdb/issues/6094): Ensure CREATE RETENTION POLICY and CREATE CONTINUOUS QUERY are idempotent in the correct way.\n- [#6065](https://github.com/influxdata/influxdb/pull/6065):  Wait for a process termination on influxdb restart @simnv\n- [#6061](https://github.com/influxdata/influxdb/issues/6061): [0.12 / master] POST to /write does not write points if request has header 'Content-Type: application/x-www-form-urlencoded'\n- [#5728](https://github.com/influxdata/influxdb/issues/5728): Properly handle semi-colons as part of the main query loop.\n- [#5554](https://github.com/influxdata/influxdb/issues/5554): Can't run in alpine linux\n- [#5252](https://github.com/influxdata/influxdb/issues/5252): Release tarballs contain specific attributes on '.'\n- [#5152](https://github.com/influxdata/influxdb/issues/5152): Fix where filters when a tag and a filter are combined with OR.\n\n## v0.11.1 [2016-03-31]\n\n### Bugfixes\n\n- [#6168](https://github.com/influxdata/influxdb/pull/6168): Remove per measurement statsitics\n- [#6129](https://github.com/influxdata/influxdb/pull/6129): Fix default continuous query lease host\n- [#6121](https://github.com/influxdata/influxdb/issues/6121): Fix panic: slice index out of bounds in TSM index\n- [#6092](https://github.com/influxdata/influxdb/issues/6092): Upgrading directly from 0.9.6.1 to 0.11.0 fails\n- [#3932](https://github.com/influxdata/influxdb/issues/3932): Invalid timestamp format should throw an error.\n\n## v0.11.0 [2016-03-22]\n\n### Release Notes\n\nThere were some important breaking changes in this release. Here's a list of the important things to know before upgrading:\n\n* [SHOW SERIES output has changed](https://github.com/influxdata/influxdb/pull/5937). See [new output in this test diff](https://github.com/influxdata/influxdb/pull/5937/files#diff-0cb24c2b7420b4db507ee3496c371845L263).\n* [SHOW TAG VALUES output has changed](https://github.com/influxdata/influxdb/pull/5853)\n* JSON write endpoint is disabled by default and will be removed in the next release. You can [turn it back on](https://github.com/influxdata/influxdb/pull/5512) in this release.\n* b1/bz1 shards are no longer supported. You must migrate all old shards to TSM using [the migration tool](https://github.com/influxdata/influxdb/blob/master/cmd/influx_tsm/README.md).\n* On queries to create databases, retention policies, and users, the default behavior has changed to create `IF NOT EXISTS`. If they already exist, no error will be returned.\n* On queries with a selector like `min`, `max`, `first`, and `last` the time returned will be the time for the bucket of the group by window. [Selectors for the time for the specific point](https://github.com/influxdata/influxdb/issues/5926) will be added later.\n\n### Features\n\n- [#5994](https://github.com/influxdata/influxdb/issues/5994): Single server\n- [#5862](https://github.com/influxdata/influxdb/pull/5862): Make Admin UI dynamically fetch both client and server versions\n- [#5844](https://github.com/influxdata/influxdb/pull/5844): Tag TSM engine stats with database and retention policy\n- [#5758](https://github.com/influxdata/influxdb/pull/5758): TSM engine stats for cache, WAL, and filestore. Thanks @jonseymour\n- [#5737](https://github.com/influxdata/influxdb/pull/5737): Admin UI: Display results of multiple queries, not just the first query. Thanks @Vidhuran!\n- [#5720](https://github.com/influxdata/influxdb/pull/5720): Admin UI: New button to generate permalink to queries\n- [#5706](https://github.com/influxdata/influxdb/pull/5706): Cluster setup cleanup\n- [#5691](https://github.com/influxdata/influxdb/pull/5691): Remove associated shard data when retention policies are dropped.\n- [#5681](https://github.com/influxdata/influxdb/pull/5681): Stats: Add durations, number currently active to httpd and query executor\n- [#5666](https://github.com/influxdata/influxdb/pull/5666): Manage dependencies with gdm\n- [#5602](https://github.com/influxdata/influxdb/pull/5602): Simplify cluster startup for scripting and deployment\n- [#5598](https://github.com/influxdata/influxdb/pull/5598): Client: Add Ping to v2 client @PSUdaemon\n- [#5596](https://github.com/influxdata/influxdb/pull/5596): Build improvements for ARM architectures. Also removed `--goarm` and `--pkgarch` build flags.\n- [#5593](https://github.com/influxdata/influxdb/issues/5593): Modify `SHOW TAG VALUES` output for the new query engine to normalize the output.\n- [#5562](https://github.com/influxdata/influxdb/pull/5562): Graphite: Support matching fields multiple times (@chrusty)\n- [#5550](https://github.com/influxdata/influxdb/pull/5550): Enabled golint for tsdb/engine/wal. @gabelev\n- [#5541](https://github.com/influxdata/influxdb/pull/5541): Client: Support for adding custom TLS Config for HTTP client.\n- [#5512](https://github.com/influxdata/influxdb/pull/5512): HTTP: Add config option to enable HTTP JSON write path which is now disabled by default.\n- [#5419](https://github.com/influxdata/influxdb/pull/5419): Graphite: Support matching tags multiple times Thanks @m4ce\n- [#5336](https://github.com/influxdata/influxdb/pull/5366): Enabled golint for influxql. @gabelev\n- [#4299](https://github.com/influxdata/influxdb/pull/4299): Client: Reject uint64 Client.Point.Field values. Thanks @arussellsaw\n- [#4125](https://github.com/influxdata/influxdb/pull/4125): Admin UI: Fetch and display server version on connect. Thanks @alexiri!\n- [#2715](https://github.com/influxdata/influxdb/issues/2715): Support using field regex comparisons in the WHERE clause\n\n### Bugfixes\n\n- [#6042](https://github.com/influxdata/influxdb/issues/6042): CreateDatabase failure on Windows, regression from v0.11.0 RC @mvadu\n- [#6006](https://github.com/influxdata/influxdb/pull/6006): Fix deadlock while running backups\n- [#5965](https://github.com/influxdata/influxdb/issues/5965): InfluxDB panic crashes while parsing \"-\" as Float\n- [#5963](https://github.com/influxdata/influxdb/pull/5963): Fix possible deadlock\n- [#5949](https://github.com/influxdata/influxdb/issues/5949): Return error message when improper types are used in SELECT\n- [#5937](https://github.com/influxdata/influxdb/pull/5937): Rewrite SHOW SERIES to use query engine\n- [#5924](https://github.com/influxdata/influxdb/issues/5924): Missing data after using influx\\_tsm\n- [#5889](https://github.com/influxdata/influxdb/issues/5889): Fix writing partial TSM index when flush file fails\n- [#5880](https://github.com/influxdata/influxdb/issues/5880): TCP connection closed after write (regression/change from 0.9.6)\n- [#5865](https://github.com/influxdata/influxdb/issues/5865): Conversion to tsm fails with exceeds max index value\n- [#5854](https://github.com/influxdata/influxdb/issues/5854): failures of tests in tsdb/engine/tsm1 when compiled with go master\n- [#5842](https://github.com/influxdata/influxdb/issues/5842): Add SeriesList binary marshaling\n- [#5841](https://github.com/influxdata/influxdb/pull/5841): Reduce tsm allocations by converting time.Time to int64\n- [#5835](https://github.com/influxdata/influxdb/issues/5835): Make CREATE USER default to IF NOT EXISTS\n- [#5832](https://github.com/influxdata/influxdb/issues/5832): tsm: cache: need to check that snapshot has been sorted @jonseymour\n- [#5814](https://github.com/influxdata/influxdb/issues/5814): Run CQs with the same name from different databases\n- [#5787](https://github.com/influxdata/influxdb/pull/5787): HTTP: Add QueryAuthorizer instance to httpd service’s handler. @chris-ramon\n- [#5754](https://github.com/influxdata/influxdb/issues/5754): Adding a node as meta only results in a data node also being registered\n- [#5753](https://github.com/influxdata/influxdb/pull/5753): Ensures that drop-type commands work correctly in a cluster\n- [#5724](https://github.com/influxdata/influxdb/issues/5724): influx\\_tsm doesn't close file handles properly\n- [#5719](https://github.com/influxdata/influxdb/issues/5719): Fix cache not deduplicating points\n- [#5716](https://github.com/influxdata/influxdb/pull/5716): models: improve handling of points with empty field names or with no fields.\n- [#5699](https://github.com/influxdata/influxdb/issues/5699): Fix potential thread safety issue in cache @jonseymour\n- [#5696](https://github.com/influxdata/influxdb/issues/5696): Do not drop the database when creating with a retention policy\n- [#5695](https://github.com/influxdata/influxdb/pull/5695): Remove meta servers from node.json\n- [#5664](https://github.com/influxdata/influxdb/issues/5664): panic in model.Points.scanTo #5664\n- [#5656](https://github.com/influxdata/influxdb/issues/5656): influx\\_tsm: panic during conversion\n- [#5628](https://github.com/influxdata/influxdb/issues/5628): Crashed the server with a bad derivative query\n- [#5624](https://github.com/influxdata/influxdb/pull/5624): Fix golint issues in client v2 package @PSUDaemon\n- [#5610](https://github.com/influxdata/influxdb/issues/5610): Write into fully-replicated cluster is not replicated across all shards\n- [#5606](https://github.com/influxdata/influxdb/issues/5606): TSM conversion reproducibly drops data silently\n- [#5594](https://github.com/influxdata/influxdb/pull/5594): Fix missing url params on lease redirect - @oldmantaiter\n- [#5590](https://github.com/influxdata/influxdb/pull/5590): Fix panic when dropping subscription for unknown retention policy.\n- [#5557](https://github.com/influxdata/influxdb/issues/5630): Fixes panic when surrounding the select statement arguments in brackets\n- [#5535](https://github.com/influxdata/influxdb/pull/5535): Update README for referring to Collectd\n- [#5532](https://github.com/influxdata/influxdb/issues/5532): user passwords not changeable in cluster\n- [#5510](https://github.com/influxdata/influxdb/pull/5510): Optimize ReducePercentile @bsideup\n- [#5489](https://github.com/influxdata/influxdb/pull/5489): Fixes multiple issues causing tests to fail on windows. Thanks @runner-mei\n- [#5376](https://github.com/influxdata/influxdb/pull/5376): Fix golint issues in models package. @nuss-justin\n- [#5375](https://github.com/influxdata/influxdb/pull/5375): Lint tsdb and tsdb/engine package @nuss-justin\n- [#5182](https://github.com/influxdata/influxdb/pull/5182): Graphite: Fix an issue where the default template would be used instead of a more specific one. Thanks @flisky\n- [#4688](https://github.com/influxdata/influxdb/issues/4688): admin UI doesn't display results for some SHOW queries\n\n## v0.10.3 [2016-03-09]\n\n### Bugfixes\n\n- [#5924](https://github.com/influxdata/influxdb/issues/5924): Missing data after using influx\\_tsm\n- [#5716](https://github.com/influxdata/influxdb/pull/5716): models: improve handling of points with empty field names or with no fields.\n- [#5594](https://github.com/influxdata/influxdb/pull/5594): Fix missing url params on lease redirect - @oldmantaiter\n\n## v0.10.2 [2016-03-03]\n\n### Bugfixes\n\n- [#5880](https://github.com/influxdata/influxdb/issues/5880): TCP connection closed after write (regression/change from 0.9.6)\n- [#5865](https://github.com/influxdata/influxdb/issues/5865): Conversion to tsm fails with exceeds max index value\n- [#5861](https://github.com/influxdata/influxdb/pull/5861): Fix panic when dropping subscription for unknown retention policy.\n- [#5857](https://github.com/influxdata/influxdb/issues/5857): panic in tsm1.Values.Deduplicate\n- [#5832](https://github.com/influxdata/influxdb/issues/5832): tsm: cache: need to check that snapshot has been sorted @jonseymour\n- [#5719](https://github.com/influxdata/influxdb/issues/5719): Fix cache not deduplicating points\n- [#5699](https://github.com/influxdata/influxdb/issues/5699): Fix potential thread safety issue in cache @jonseymour\n\n## v0.10.1 [2016-02-18]\n\n### Bugfixes\n\n- [#5724](https://github.com/influxdata/influxdb/issues/5724): influx\\_tsm doesn't close file handles properly\n- [#5696](https://github.com/influxdata/influxdb/issues/5696): Do not drop the database when creating with a retention policy\n- [#5656](https://github.com/influxdata/influxdb/issues/5656): influx\\_tsm: panic during conversion\n- [#5606](https://github.com/influxdata/influxdb/issues/5606): TSM conversion reproducibly drops data silently\n- [#5303](https://github.com/influxdata/influxdb/issues/5303): Protect against stateful mappers returning nothing in the raw executor\n\n## v0.10.0 [2016-02-04]\n\n### Release Notes\n\nThis release now uses the TSM storage engine. Old bz1 and b1 shards can still be read, but in a future release you will be required to migrate old shards to TSM. For new shards getting created, or new installations, the TSM storage engine will be used.\n\nThis release also changes how clusters are setup. The config file has changed so have a look at the new example. Also, upgrading a single node works, but for upgrading clusters, you'll need help from us. Sent us a note at contact@influxdb.com if you need assistance upgrading a cluster.\n\n### Features\n\n- [#5565](https://github.com/influxdata/influxdb/pull/5565): Add configuration for time precision with UDP services. - @tpitale\n- [#5522](https://github.com/influxdata/influxdb/pull/5522): Optimize tsm1 cache to reduce memory consumption and GC scan time.\n- [#5460](https://github.com/influxdata/influxdb/pull/5460): Prevent exponential growth in CLI history. Thanks @sczk!\n- [#5459](https://github.com/influxdata/influxdb/pull/5459): Create `/status` endpoint for health checks.\n- [#5226](https://github.com/influxdata/influxdb/pull/5226): b\\*1 to tsm1 shard conversion tool.\n- [#5226](https://github.com/influxdata/influxdb/pull/5226): b*1 to tsm1 shard conversion tool.\n- [#5224](https://github.com/influxdata/influxdb/pull/5224): Online backup/incremental backup. Restore (for TSM).\n- [#5201](https://github.com/influxdata/influxdb/pull/5201): Allow max UDP buffer size to be configurable. Thanks @sebito91\n- [#5194](https://github.com/influxdata/influxdb/pull/5194): Custom continuous query options per query rather than per node.\n- [#5183](https://github.com/influxdata/influxdb/pull/5183): CLI confirms database exists when USE executed. Thanks @pires\n\n### Bugfixes\n\n- [#5505](https://github.com/influxdata/influxdb/issues/5505): Clear authCache in meta.Client when password changes.\n- [#5504](https://github.com/influxdata/influxdb/issues/5504): create retention policy on unexistant DB crash InfluxDB\n- [#5479](https://github.com/influxdata/influxdb/issues/5479): Bringing up a node as a meta only node causes panic\n- [#5478](https://github.com/influxdata/influxdb/issues/5478): panic: interface conversion: interface is float64, not int64\n- [#5475](https://github.com/influxdata/influxdb/issues/5475): Ensure appropriate exit code returned for non-interactive use of CLI.\n- [#5469](https://github.com/influxdata/influxdb/issues/5469): Conversion from bz1 to tsm doesn't work as described\n- [#5455](https://github.com/influxdata/influxdb/issues/5455): panic: runtime error: slice bounds out of range when loading corrupted wal segment\n- [#5449](https://github.com/influxdata/influxdb/issues/5449): panic when dropping collectd points\n- [#5382](https://github.com/influxdata/influxdb/pull/5382): Fixes some escaping bugs with tag keys and values.\n- [#5350](https://github.com/influxdata/influxdb/issues/5350): 'influxd backup' should create backup directory\n- [#5349](https://github.com/influxdata/influxdb/issues/5349): Validate metadata blob for 'influxd backup'\n- [#5264](https://github.com/influxdata/influxdb/pull/5264): Fix panic: runtime error: slice bounds out of range\n- [#5262](https://github.com/influxdata/influxdb/issues/5262): Fix a panic when a tag value was empty.\n- [#5244](https://github.com/influxdata/influxdb/issues/5244): panic: ensure it's safe to close engine multiple times.\n- [#5193](https://github.com/influxdata/influxdb/issues/5193): Missing data a minute before current time. Comes back later.\n- [#5186](https://github.com/influxdata/influxdb/pull/5186): Fix database creation with retention statement parsing. Fixes [#5077](https://github.com/influxdata/influxdb/issues/5077). Thanks @pires\n- [#5178](https://github.com/influxdata/influxdb/pull/5178): SHOW FIELD shouldn't consider VALUES to be valid. Thanks @pires\n- [#5158](https://github.com/influxdata/influxdb/pull/5158): Fix panic when writing invalid input to the line protocol.\n- [#5129](https://github.com/influxdata/influxdb/pull/5129): Ensure precision flag is respected by CLI. Thanks @e-dard\n- [#5079](https://github.com/influxdata/influxdb/pull/5079): Ensure tsm WAL encoding buffer can handle large batches.\n- [#5078](https://github.com/influxdata/influxdb/issues/5078): influx non-interactive mode - INSERT must be handled. Thanks @grange74\n- [#5064](https://github.com/influxdata/influxdb/pull/5064): Full support for parenthesis in SELECT clause, fixes [#5054](https://github.com/influxdata/influxdb/issues/5054). Thanks @mengjinglei\n- [#5059](https://github.com/influxdata/influxdb/pull/5059): Fix unmarshal of database error by client code. Thanks @farshidtz\n- [#5042](https://github.com/influxdata/influxdb/issues/5042): Count with fill(none) will drop 0 valued intervals.\n- [#5016](https://github.com/influxdata/influxdb/pull/5016): Don't panic if Meta data directory not writable. Thanks @oiooj\n- [#4940](https://github.com/influxdata/influxdb/pull/4940): Fix distributed aggregate query query error. Thanks @li-ang\n- [#4735](https://github.com/influxdata/influxdb/issues/4735): Fix panic when merging empty results.\n- [#4622](https://github.com/influxdata/influxdb/issues/4622): Fix panic when passing too large of timestamps to OpenTSDB input.\n- [#4303](https://github.com/influxdata/influxdb/issues/4303): Don't drop measurements or series from multiple databases.\n\n## v0.9.6 [2015-12-09]\n\n### Release Notes\nThis release has an updated design and implementation of the TSM storage engine. If you had been using tsm1 as your storage engine prior to this release (either 0.9.5.x or 0.9.6 nightly builds) you will have to start with a fresh database.\n\nIf you had TSM configuration options set, those have been updated. See the the updated sample configuration for more details: https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml#L98-L125\n\n### Features\n\n- [#4790](https://github.com/influxdata/influxdb/pull/4790): Allow openTSDB point-level error logging to be disabled\n- [#4728](https://github.com/influxdata/influxdb/pull/4728): SHOW SHARD GROUPS. By @mateuszdyminski\n- [#4841](https://github.com/influxdata/influxdb/pull/4841): Improve point parsing speed. Lint models pacakge. Thanks @e-dard!\n- [#4889](https://github.com/influxdata/influxdb/pull/4889): Implement close notifier and timeout on executors\n- [#2676](https://github.com/influxdata/influxdb/issues/2676), [#4866](https://github.com/influxdata/influxdb/pull/4866): Add support for specifying default retention policy in database create. Thanks @pires!\n- [#4848](https://github.com/influxdata/influxdb/pull/4848): Added framework for cluster integration testing.\n- [#4872](https://github.com/influxdata/influxdb/pull/4872): Add option to disable logging for meta service.\n- [#4787](https://github.com/influxdata/influxdb/issues/4787): Now builds on Solaris\n\n### Bugfixes\n\n- [#4849](https://github.com/influxdata/influxdb/issues/4849): Derivative works with count, mean, median, sum, first, last, max, min, and percentile.\n- [#4984](https://github.com/influxdata/influxdb/pull/4984): Allow math on fields, fixes regression. Thanks @mengjinglei\n- [#4666](https://github.com/influxdata/influxdb/issues/4666): Fix panic in derivative with invalid values.\n- [#4404](https://github.com/influxdata/influxdb/issues/4404): Return better error for currently unsupported DELETE queries.\n- [#4858](https://github.com/influxdata/influxdb/pull/4858): Validate nested aggregations in queries. Thanks @viru\n- [#4921](https://github.com/influxdata/influxdb/pull/4921): Error responses should be JSON-formatted. Thanks @pires\n- [#4974](https://github.com/influxdata/influxdb/issues/4974) Fix Data Race in TSDB when setting measurement field name\n- [#4876](https://github.com/influxdata/influxdb/pull/4876): Complete lint for monitor and services packages. Thanks @e-dard!\n- [#4833](https://github.com/influxdata/influxdb/pull/4833), [#4927](https://github.com/influxdata/influxdb/pull/4927): Fix SHOW MEASURMENTS for clusters. Thanks @li-ang!\n- [#4918](https://github.com/influxdata/influxdb/pull/4918): Restore can hang,  Fix [issue #4806](https://github.com/influxdata/influxdb/issues/4806). Thanks @oiooj\n- [#4855](https://github.com/influxdata/influxdb/pull/4855): Fix race in TCP proxy shutdown. Thanks @runner-mei!\n- [#4411](https://github.com/influxdata/influxdb/pull/4411): Add Access-Control-Expose-Headers to HTTP responses\n- [#4768](https://github.com/influxdata/influxdb/pull/4768): CLI history skips blank lines. Thanks @pires\n- [#4766](https://github.com/influxdata/influxdb/pull/4766): Update CLI usage output. Thanks @aneshas\n- [#4804](https://github.com/influxdata/influxdb/pull/4804): Complete lint for services/admin. Thanks @nii236\n- [#4796](https://github.com/influxdata/influxdb/pull/4796): Check point without fields. Thanks @CrazyJvm\n- [#4815](https://github.com/influxdata/influxdb/pull/4815): Added `Time` field into aggregate output across the cluster. Thanks @li-ang\n- [#4817](https://github.com/influxdata/influxdb/pull/4817): Fix Min,Max,Top,Bottom function when query distributed node. Thanks @mengjinglei\n- [#4878](https://github.com/influxdata/influxdb/pull/4878): Fix String() function for several InfluxQL statement types\n- [#4913](https://github.com/influxdata/influxdb/pull/4913): Fix b1 flush deadlock\n- [#3170](https://github.com/influxdata/influxdb/issues/3170), [#4921](https://github.com/influxdata/influxdb/pull/4921): Database does not exist error is now JSON. Thanks @pires!\n- [#5029](https://github.com/influxdata/influxdb/pull/5029): Drop UDP point on bad parse.\n\n## v0.9.5 [2015-11-20]\n\n### Release Notes\n\n- Field names for the internal stats have been changed to be more inline with Go style.\n- 0.9.5 is reverting to Go 1.4.2 due to unresolved issues with Go 1.5.1.\n\nThere are breaking changes in this release:\n- The filesystem hierarchy for packages has been changed, namely:\n  - Binaries are now located in `/usr/bin` (previously `/opt/influxdb`)\n  - Configuration files are now located in `/etc/influxdb` (previously `/etc/opt/influxdb`)\n  - Data directories are now located in `/var/lib/influxdb` (previously `/var/opt/influxdb`)\n  - Scripts are now located in `/usr/lib/influxdb/scripts` (previously `/opt/influxdb`)\n\n### Features\n\n- [#4702](https://github.com/influxdata/influxdb/pull/4702): Support 'history' command at CLI\n- [#4098](https://github.com/influxdata/influxdb/issues/4098): Enable `golint` on the code base - uuid subpackage\n- [#4141](https://github.com/influxdata/influxdb/pull/4141): Control whether each query should be logged\n- [#4065](https://github.com/influxdata/influxdb/pull/4065): Added precision support in cmd client. Thanks @sbouchex\n- [#4140](https://github.com/influxdata/influxdb/pull/4140): Make storage engine configurable\n- [#4161](https://github.com/influxdata/influxdb/pull/4161): Implement bottom selector function\n- [#4204](https://github.com/influxdata/influxdb/pull/4204): Allow module-level selection for SHOW STATS\n- [#4208](https://github.com/influxdata/influxdb/pull/4208): Allow module-level selection for SHOW DIAGNOSTICS\n- [#4196](https://github.com/influxdata/influxdb/pull/4196): Export tsdb.Iterator\n- [#4198](https://github.com/influxdata/influxdb/pull/4198): Add basic cluster-service stats\n- [#4262](https://github.com/influxdata/influxdb/pull/4262): Allow configuration of UDP retention policy\n- [#4265](https://github.com/influxdata/influxdb/pull/4265): Add statistics for Hinted-Handoff\n- [#4284](https://github.com/influxdata/influxdb/pull/4284): Add exponential backoff for hinted-handoff failures\n- [#4310](https://github.com/influxdata/influxdb/pull/4310): Support dropping non-Raft nodes. Work mostly by @corylanou\n- [#4348](https://github.com/influxdata/influxdb/pull/4348): Public ApplyTemplate function for graphite parser.\n- [#4178](https://github.com/influxdata/influxdb/pull/4178): Support fields in graphite parser. Thanks @roobert!\n- [#4409](https://github.com/influxdata/influxdb/pull/4409): wire up INTO queries.\n- [#4379](https://github.com/influxdata/influxdb/pull/4379): Auto-create database for UDP input.\n- [#4375](https://github.com/influxdata/influxdb/pull/4375): Add Subscriptions so data can be 'forked' out of InfluxDB to another third party.\n- [#4506](https://github.com/influxdata/influxdb/pull/4506): Register with Enterprise service and upload stats, if token is available.\n- [#4516](https://github.com/influxdata/influxdb/pull/4516): Hinted-handoff refactor, with new statistics and diagnostics\n- [#4501](https://github.com/influxdata/influxdb/pull/4501): Allow filtering SHOW MEASUREMENTS by regex.\n- [#4547](https://github.com/influxdata/influxdb/pull/4547): Allow any node to be dropped, even a raft node (even the leader).\n- [#4600](https://github.com/influxdata/influxdb/pull/4600): ping endpoint can wait for leader\n- [#4648](https://github.com/influxdata/influxdb/pull/4648): UDP Client (v2 client)\n- [#4690](https://github.com/influxdata/influxdb/pull/4690): SHOW SHARDS now includes database and policy. Thanks @pires\n- [#4676](https://github.com/influxdata/influxdb/pull/4676): UDP service listener performance enhancements\n- [#4659](https://github.com/influxdata/influxdb/pull/4659): Support IF EXISTS for DROP DATABASE. Thanks @ch33hau\n- [#4721](https://github.com/influxdata/influxdb/pull/4721): Export tsdb.InterfaceValues\n- [#4681](https://github.com/influxdata/influxdb/pull/4681): Increase default buffer size for collectd and graphite listeners\n- [#4685](https://github.com/influxdata/influxdb/pull/4685): Automatically promote node to raft peer if drop server results in removing a raft peer.\n- [#4846](https://github.com/influxdata/influxdb/pull/4846): Allow NaN as a valid value on the graphite service; discard these points silently (graphite compatibility). Thanks @jsternberg!\n\n### Bugfixes\n\n- [#4193](https://github.com/influxdata/influxdb/issues/4193): Less than or equal to inequality is not inclusive for time in where clause\n- [#4235](https://github.com/influxdata/influxdb/issues/4235): \"ORDER BY DESC\" doesn't properly order\n- [#4789](https://github.com/influxdata/influxdb/pull/4789): Decode WHERE fields during aggregates. Fix [issue #4701](https://github.com/influxdata/influxdb/issues/4701).\n- [#4778](https://github.com/influxdata/influxdb/pull/4778): If there are no points to count, count is 0.\n- [#4715](https://github.com/influxdata/influxdb/pull/4715): Fix panic during Raft-close. Fix [issue #4707](https://github.com/influxdata/influxdb/issues/4707). Thanks @oiooj\n- [#4643](https://github.com/influxdata/influxdb/pull/4643): Fix panic during backup restoration. Thanks @oiooj\n- [#4632](https://github.com/influxdata/influxdb/pull/4632): Fix parsing of IPv6 hosts in client package. Thanks @miguelxpn\n- [#4389](https://github.com/influxdata/influxdb/pull/4389): Don't add a new segment file on each hinted-handoff purge cycle.\n- [#4166](https://github.com/influxdata/influxdb/pull/4166): Fix parser error on invalid SHOW\n- [#3457](https://github.com/influxdata/influxdb/issues/3457): [0.9.3] cannot select field names with prefix + \".\" that match the measurement name\n- [#4704](https://github.com/influxdata/influxdb/pull/4704). Tighten up command parsing within CLI. Thanks @pires\n- [#4225](https://github.com/influxdata/influxdb/pull/4225): Always display diags in name-sorted order\n- [#4111](https://github.com/influxdata/influxdb/pull/4111): Update pre-commit hook for go vet composites\n- [#4136](https://github.com/influxdata/influxdb/pull/4136): Return an error-on-write if target retention policy does not exist. Thanks for the report @ymettier\n- [#4228](https://github.com/influxdata/influxdb/pull/4228): Add build timestamp to version information.\n- [#4124](https://github.com/influxdata/influxdb/issues/4124): Missing defer/recover/panic idiom in HTTPD service\n- [#4238](https://github.com/influxdata/influxdb/pull/4238): Fully disable hinted-handoff service if so requested.\n- [#4165](https://github.com/influxdata/influxdb/pull/4165): Tag all Go runtime stats when writing to internal database.\n- [#4586](https://github.com/influxdata/influxdb/pull/4586): Exit when invalid engine is selected\n- [#4118](https://github.com/influxdata/influxdb/issues/4118): Return consistent, correct result for SHOW MEASUREMENTS with multiple AND conditions\n- [#4191](https://github.com/influxdata/influxdb/pull/4191): Correctly marshal remote mapper responses. Fixes [#4170](https://github.com/influxdata/influxdb/issues/4170)\n- [#4222](https://github.com/influxdata/influxdb/pull/4222): Graphite TCP connections should not block shutdown\n- [#4180](https://github.com/influxdata/influxdb/pull/4180): Cursor & SelectMapper Refactor\n- [#1577](https://github.com/influxdata/influxdb/issues/1577): selectors (e.g. min, max, first, last) should have equivalents to return the actual point\n- [#4264](https://github.com/influxdata/influxdb/issues/4264): Refactor map functions to use list of values\n- [#4278](https://github.com/influxdata/influxdb/pull/4278): Fix error marshalling across the cluster\n- [#4149](https://github.com/influxdata/influxdb/pull/4149): Fix derivative unnecessarily requires aggregate function.  Thanks @peekeri!\n- [#4674](https://github.com/influxdata/influxdb/pull/4674): Fix panic during restore. Thanks @simcap.\n- [#4725](https://github.com/influxdata/influxdb/pull/4725): Don't list deleted shards during SHOW SHARDS.\n- [#4237](https://github.com/influxdata/influxdb/issues/4237): DERIVATIVE() edge conditions\n- [#4263](https://github.com/influxdata/influxdb/issues/4263): derivative does not work when data is missing\n- [#4293](https://github.com/influxdata/influxdb/pull/4293): Ensure shell is invoked when touching PID file. Thanks @christopherjdickson\n- [#4296](https://github.com/influxdata/influxdb/pull/4296): Reject line protocol ending with '-'. Fixes [#4272](https://github.com/influxdata/influxdb/issues/4272)\n- [#4333](https://github.com/influxdata/influxdb/pull/4333): Retry monitor storage creation and storage only on Leader.\n- [#4276](https://github.com/influxdata/influxdb/issues/4276): Walk DropSeriesStatement & check for empty sources\n- [#4465](https://github.com/influxdata/influxdb/pull/4465): Actually display a message if the CLI can't connect to the database.\n- [#4342](https://github.com/influxdata/influxdb/pull/4342): Fix mixing aggregates and math with non-aggregates. Thanks @kostya-sh.\n- [#4349](https://github.com/influxdata/influxdb/issues/4349): If HH can't unmarshal a block, skip that block.\n- [#4502](https://github.com/influxdata/influxdb/pull/4502): Don't crash on Graphite close, if Graphite not fully open. Thanks for the report @ranjib\n- [#4354](https://github.com/influxdata/influxdb/pull/4353): Fully lock node queues during hinted handoff. Fixes one cause of missing data on clusters.\n- [#4357](https://github.com/influxdata/influxdb/issues/4357): Fix similar float values encoding overflow Thanks @dgryski!\n- [#4344](https://github.com/influxdata/influxdb/issues/4344): Make client.Write default to client.precision if none is given.\n- [#3429](https://github.com/influxdata/influxdb/issues/3429): Incorrect parsing of regex containing '/'\n- [#4374](https://github.com/influxdata/influxdb/issues/4374): Add tsm1 quickcheck tests\n- [#4644](https://github.com/influxdata/influxdb/pull/4644): Check for response errors during token check, fixes issue [#4641](https://github.com/influxdata/influxdb/issues/4641)\n- [#4377](https://github.com/influxdata/influxdb/pull/4377): Hinted handoff should not process dropped nodes\n- [#4365](https://github.com/influxdata/influxdb/issues/4365): Prevent panic in DecodeSameTypeBlock\n- [#4280](https://github.com/influxdata/influxdb/issues/4280): Only drop points matching WHERE clause\n- [#4443](https://github.com/influxdata/influxdb/pull/4443): Fix race condition while listing store's shards. Fixes [#4442](https://github.com/influxdata/influxdb/issues/4442)\n- [#4410](https://github.com/influxdata/influxdb/pull/4410): Fix infinite recursion in statement string(). Thanks @kostya-sh\n- [#4360](https://github.com/influxdata/influxdb/issues/4360): Aggregate Selectors overwrite values during post-processing\n- [#4421](https://github.com/influxdata/influxdb/issues/4421): Fix line protocol accepting tags with no values\n- [#4434](https://github.com/influxdata/influxdb/pull/4434): Allow 'E' for scientific values. Fixes [#4433](https://github.com/influxdata/influxdb/issues/4433)\n- [#4431](https://github.com/influxdata/influxdb/issues/4431): Add tsm1 WAL QuickCheck\n- [#4438](https://github.com/influxdata/influxdb/pull/4438): openTSDB service shutdown fixes\n- [#4447](https://github.com/influxdata/influxdb/pull/4447): Fixes to logrotate file. Thanks @linsomniac.\n- [#3820](https://github.com/influxdata/influxdb/issues/3820): Fix js error in admin UI.\n- [#4460](https://github.com/influxdata/influxdb/issues/4460): tsm1 meta lint\n- [#4415](https://github.com/influxdata/influxdb/issues/4415): Selector (like max, min, first, etc) return a string instead of timestamp\n- [#4472](https://github.com/influxdata/influxdb/issues/4472): Fix 'too many points in GROUP BY interval' error\n- [#4475](https://github.com/influxdata/influxdb/issues/4475): Fix SHOW TAG VALUES error message.\n- [#4486](https://github.com/influxdata/influxdb/pull/4486): Fix missing comments for runner package\n- [#4497](https://github.com/influxdata/influxdb/pull/4497): Fix sequence in meta proto\n- [#3367](https://github.com/influxdata/influxdb/issues/3367): Negative timestamps are parsed correctly by the line protocol.\n- [#4563](https://github.com/influxdata/influxdb/pull/4536): Fix broken subscriptions updates.\n- [#4538](https://github.com/influxdata/influxdb/issues/4538): Dropping database under a write load causes panics\n- [#4582](https://github.com/influxdata/influxdb/pull/4582): Correct logging tags in cluster and TCP package. Thanks @oiooj\n- [#4513](https://github.com/influxdata/influxdb/issues/4513): TSM1: panic: runtime error: index out of range\n- [#4521](https://github.com/influxdata/influxdb/issues/4521): TSM1: panic: decode of short block: got 1, exp 9\n- [#4587](https://github.com/influxdata/influxdb/pull/4587): Prevent NaN float values from being stored\n- [#4596](https://github.com/influxdata/influxdb/pull/4596): Skip empty string for start position when parsing line protocol @Thanks @ch33hau\n- [#4610](https://github.com/influxdata/influxdb/pull/4610): Make internal stats names consistent with Go style.\n- [#4625](https://github.com/influxdata/influxdb/pull/4625): Correctly handle bad write requests. Thanks @oiooj.\n- [#4650](https://github.com/influxdata/influxdb/issues/4650): Importer should skip empty lines\n- [#4651](https://github.com/influxdata/influxdb/issues/4651): Importer doesn't flush out last batch\n- [#4602](https://github.com/influxdata/influxdb/issues/4602): Fixes data race between PointsWriter and Subscriber services.\n- [#4691](https://github.com/influxdata/influxdb/issues/4691): Enable toml test `TestConfig_Encode`.\n- [#4283](https://github.com/influxdata/influxdb/pull/4283): Disable HintedHandoff if configuration is not set.\n- [#4703](https://github.com/influxdata/influxdb/pull/4703): Complete lint for cmd/influx. Thanks @pablolmiranda\n\n## v0.9.4 [2015-09-14]\n\n### Release Notes\nWith this release InfluxDB is moving to Go 1.5.\n\n### Features\n\n- [#4050](https://github.com/influxdata/influxdb/pull/4050): Add stats to collectd\n- [#3771](https://github.com/influxdata/influxdb/pull/3771): Close idle Graphite TCP connections\n- [#3755](https://github.com/influxdata/influxdb/issues/3755): Add option to build script. Thanks @fg2it\n- [#3863](https://github.com/influxdata/influxdb/pull/3863): Move to Go 1.5\n- [#3892](https://github.com/influxdata/influxdb/pull/3892): Support IF NOT EXISTS for CREATE DATABASE\n- [#3916](https://github.com/influxdata/influxdb/pull/3916): New statistics and diagnostics support. Graphite first to be instrumented.\n- [#3901](https://github.com/influxdata/influxdb/pull/3901): Add consistency level option to influx cli Thanks @takayuki\n- [#4048](https://github.com/influxdata/influxdb/pull/4048): Add statistics to Continuous Query service\n- [#4049](https://github.com/influxdata/influxdb/pull/4049): Add stats to the UDP input\n- [#3876](https://github.com/influxdata/influxdb/pull/3876): Allow the following syntax in CQs: INTO \"1hPolicy\".:MEASUREMENT\n- [#3975](https://github.com/influxdata/influxdb/pull/3975): Add shard copy service\n- [#3986](https://github.com/influxdata/influxdb/pull/3986): Support sorting by time desc\n- [#3930](https://github.com/influxdata/influxdb/pull/3930): Wire up TOP aggregate function - fixes [#1821](https://github.com/influxdata/influxdb/issues/1821)\n- [#4045](https://github.com/influxdata/influxdb/pull/4045): Instrument cluster-level points writer\n- [#3996](https://github.com/influxdata/influxdb/pull/3996): Add statistics to httpd package\n- [#4003](https://github.com/influxdata/influxdb/pull/4033): Add logrotate configuration.\n- [#4043](https://github.com/influxdata/influxdb/pull/4043): Add stats and batching to openTSDB input\n- [#4042](https://github.com/influxdata/influxdb/pull/4042): Add pending batches control to batcher\n- [#4006](https://github.com/influxdata/influxdb/pull/4006): Add basic statistics for shards\n- [#4072](https://github.com/influxdata/influxdb/pull/4072): Add statistics for the WAL.\n\n### Bugfixes\n\n- [#4042](https://github.com/influxdata/influxdb/pull/4042): Set UDP input batching defaults as needed.\n- [#3785](https://github.com/influxdata/influxdb/issues/3785): Invalid time stamp in graphite metric causes panic\n- [#3804](https://github.com/influxdata/influxdb/pull/3804): init.d script fixes, fixes issue 3803.\n- [#3823](https://github.com/influxdata/influxdb/pull/3823): Deterministic ordering for first() and last()\n- [#3869](https://github.com/influxdata/influxdb/issues/3869): Seemingly deadlocked when ingesting metrics via graphite plugin\n- [#3856](https://github.com/influxdata/influxdb/pull/3856): Minor changes to retention enforcement.\n- [#3884](https://github.com/influxdata/influxdb/pull/3884): Fix two panics in WAL that can happen at server startup\n- [#3868](https://github.com/influxdata/influxdb/pull/3868): Add shell option to start the daemon on CentOS. Thanks @SwannCroiset.\n- [#3886](https://github.com/influxdata/influxdb/pull/3886): Prevent write timeouts due to lock contention in WAL\n- [#3574](https://github.com/influxdata/influxdb/issues/3574): Querying data node causes panic\n- [#3913](https://github.com/influxdata/influxdb/issues/3913): Convert meta shard owners to objects\n- [#4026](https://github.com/influxdata/influxdb/pull/4026): Support multiple Graphite inputs. Fixes issue [#3636](https://github.com/influxdata/influxdb/issues/3636)\n- [#3927](https://github.com/influxdata/influxdb/issues/3927): Add WAL lock to prevent timing lock contention\n- [#3928](https://github.com/influxdata/influxdb/issues/3928): Write fails for multiple points when tag starts with quote\n- [#3901](https://github.com/influxdata/influxdb/pull/3901): Unblock relaxed write consistency level Thanks @takayuki!\n- [#3950](https://github.com/influxdata/influxdb/pull/3950): Limit bz1 quickcheck tests to 10 iterations on CI\n- [#3977](https://github.com/influxdata/influxdb/pull/3977): Silence wal logging during testing\n- [#3931](https://github.com/influxdata/influxdb/pull/3931): Don't precreate shard groups entirely in the past\n- [#3960](https://github.com/influxdata/influxdb/issues/3960): possible \"catch up\" bug with nodes down in a cluster\n- [#3980](https://github.com/influxdata/influxdb/pull/3980): 'service stop' waits until service actually stops. Fixes issue #3548.\n- [#4016](https://github.com/influxdata/influxdb/pull/4016): Shutdown Graphite UDP on SIGTERM.\n- [#4034](https://github.com/influxdata/influxdb/pull/4034): Rollback bolt tx on mapper open error\n- [#3848](https://github.com/influxdata/influxdb/issues/3848): restart influxdb causing panic\n- [#3881](https://github.com/influxdata/influxdb/issues/3881): panic: runtime error: invalid memory address or nil pointer dereference\n- [#3926](https://github.com/influxdata/influxdb/issues/3926): First or last value of `GROUP BY time(x)` is often null. Fixed by [#4038](https://github.com/influxdata/influxdb/pull/4038)\n- [#4053](https://github.com/influxdata/influxdb/pull/4053): Prohibit dropping default retention policy.\n- [#4060](https://github.com/influxdata/influxdb/pull/4060): Don't log EOF error in openTSDB input.\n- [#3978](https://github.com/influxdata/influxdb/issues/3978): [0.9.3] (regression) cannot use GROUP BY * with more than a single field in SELECT clause\n- [#4058](https://github.com/influxdata/influxdb/pull/4058): Disable bz1 recompression\n- [#3902](https://github.com/influxdata/influxdb/issues/3902): [0.9.3] DB should not crash when using invalid expression \"GROUP BY time\"\n- [#3718](https://github.com/influxdata/influxdb/issues/3718): Derivative query with group by time but no aggregate function should fail parse\n\n## v0.9.3 [2015-08-26]\n\n### Release Notes\n\nThere are breaking changes in this release.\n - To store data points as integers you must now append `i` to the number if using the line protocol.\n - If you have a UDP input configured, you should check the UDP section of [the new sample configuration file](https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml) to learn how to modify existing configuration files, as 0.9.3 now expects multiple UDP inputs.\n - Configuration files must now have an entry for `wal-dir` in the `[data]` section. Check [new sample configuration file](https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml) for more details.\n - The implicit `GROUP BY *` that was added to every `SELECT *` has been removed. Instead any tags in the data are now part of the columns in the returned query.\n\nPlease see the *Features* section below for full details.\n\n### Features\n\n- [#3376](https://github.com/influxdata/influxdb/pull/3376): Support for remote shard query mapping\n- [#3372](https://github.com/influxdata/influxdb/pull/3372): Support joining nodes to existing cluster\n- [#3426](https://github.com/influxdata/influxdb/pull/3426): Additional logging for continuous queries. Thanks @jhorwit2\n- [#3478](https://github.com/influxdata/influxdb/pull/3478): Support incremental cluster joins\n- [#3519](https://github.com/influxdata/influxdb/pull/3519): **--BREAKING CHANGE--** Update line protocol to require trailing i for field values that are integers\n- [#3529](https://github.com/influxdata/influxdb/pull/3529): Add TLS support for OpenTSDB plugin. Thanks @nathanielc\n- [#3421](https://github.com/influxdata/influxdb/issues/3421): Should update metastore and cluster if IP or hostname changes\n- [#3502](https://github.com/influxdata/influxdb/pull/3502): Importer for 0.8.9 data via the CLI\n- [#3564](https://github.com/influxdata/influxdb/pull/3564): Fix alias, maintain column sort order\n- [#3585](https://github.com/influxdata/influxdb/pull/3585): Additional test coverage for non-existent fields\n- [#3246](https://github.com/influxdata/influxdb/issues/3246): Allow overriding of configuration parameters using environment variables\n- [#3599](https://github.com/influxdata/influxdb/pull/3599): **--BREAKING CHANGE--** Support multiple UDP inputs. Thanks @tpitale\n- [#3636](https://github.com/influxdata/influxdb/pull/3639): Cap auto-created retention policy replica count at 3\n- [#3641](https://github.com/influxdata/influxdb/pull/3641): Logging enhancements and single-node rename\n- [#3635](https://github.com/influxdata/influxdb/pull/3635): Add build branch to version output.\n- [#3115](https://github.com/influxdata/influxdb/pull/3115): Various init.d script improvements. Thanks @KoeSystems.\n- [#3628](https://github.com/influxdata/influxdb/pull/3628): Wildcard expansion of tags and fields for raw queries\n- [#3721](https://github.com/influxdata/influxdb/pull/3721): interpret number literals compared against time as nanoseconds from epoch\n- [#3514](https://github.com/influxdata/influxdb/issues/3514): Implement WAL outside BoltDB with compaction\n- [#3544](https://github.com/influxdata/influxdb/pull/3544): Implement compression on top of BoltDB\n- [#3795](https://github.com/influxdata/influxdb/pull/3795): Throttle import\n- [#3584](https://github.com/influxdata/influxdb/pull/3584): Import/export documenation\n\n### Bugfixes\n\n- [#3405](https://github.com/influxdata/influxdb/pull/3405): Prevent database panic when fields are missing. Thanks @jhorwit2\n- [#3411](https://github.com/influxdata/influxdb/issues/3411): 500 timeout on write\n- [#3420](https://github.com/influxdata/influxdb/pull/3420): Catch opentsdb malformed tags. Thanks @nathanielc.\n- [#3404](https://github.com/influxdata/influxdb/pull/3404): Added support for escaped single quotes in query string. Thanks @jhorwit2\n- [#3414](https://github.com/influxdata/influxdb/issues/3414): Shard mappers perform query re-writing\n- [#3525](https://github.com/influxdata/influxdb/pull/3525): check if fields are valid during parse time.\n- [#3511](https://github.com/influxdata/influxdb/issues/3511): Sending a large number of tag causes panic\n- [#3288](https://github.com/influxdata/influxdb/issues/3288): Run go fuzz on the line-protocol input\n- [#3545](https://github.com/influxdata/influxdb/issues/3545): Fix parsing string fields with newlines\n- [#3579](https://github.com/influxdata/influxdb/issues/3579): Revert breaking change to `client.NewClient` function\n- [#3580](https://github.com/influxdata/influxdb/issues/3580): Do not allow wildcards with fields in select statements\n- [#3530](https://github.com/influxdata/influxdb/pull/3530): Aliasing a column no longer works\n- [#3436](https://github.com/influxdata/influxdb/issues/3436): Fix panic in hinted handoff queue processor\n- [#3401](https://github.com/influxdata/influxdb/issues/3401): Derivative on non-numeric fields panics db\n- [#3583](https://github.com/influxdata/influxdb/issues/3583): Inserting value in scientific notation with a trailing i causes panic\n- [#3611](https://github.com/influxdata/influxdb/pull/3611): Fix query arithmetic with integers\n- [#3326](https://github.com/influxdata/influxdb/issues/3326): simple regex query fails with cryptic error\n- [#3618](https://github.com/influxdata/influxdb/pull/3618): Fix collectd stats panic on i386. Thanks @richterger\n- [#3625](https://github.com/influxdata/influxdb/pull/3625): Don't panic when aggregate and raw queries are in a single statement\n- [#3629](https://github.com/influxdata/influxdb/pull/3629): Use sensible batching defaults for Graphite.\n- [#3638](https://github.com/influxdata/influxdb/pull/3638): Cluster config fixes and removal of meta.peers config field\n- [#3640](https://github.com/influxdata/influxdb/pull/3640): Shutdown Graphite service when signal received.\n- [#3632](https://github.com/influxdata/influxdb/issues/3632): Make single-node host renames more seamless\n- [#3656](https://github.com/influxdata/influxdb/issues/3656): Silence snapshotter logger for testing\n- [#3651](https://github.com/influxdata/influxdb/pull/3651): Fully remove series when dropped.\n- [#3517](https://github.com/influxdata/influxdb/pull/3517): Batch CQ writes to avoid timeouts. Thanks @dim.\n- [#3522](https://github.com/influxdata/influxdb/pull/3522): Consume CQ results on request timeouts. Thanks @dim.\n- [#3646](https://github.com/influxdata/influxdb/pull/3646): Fix nil FieldCodec panic.\n- [#3672](https://github.com/influxdata/influxdb/pull/3672): Reduce in-memory index by 20%-30%\n- [#3673](https://github.com/influxdata/influxdb/pull/3673): Improve query performance by removing unnecessary tagset sorting.\n- [#3676](https://github.com/influxdata/influxdb/pull/3676): Improve query performance by memomizing mapper output keys.\n- [#3686](https://github.com/influxdata/influxdb/pull/3686): Ensure 'p' parameter is not logged, even on OPTIONS requests.\n- [#3687](https://github.com/influxdata/influxdb/issues/3687): Fix panic: runtime error: makeslice: len out of range in hinted handoff\n- [#3697](https://github.com/influxdata/influxdb/issues/3697):  Correctly merge non-chunked results for same series. Fix issue #3242.\n- [#3708](https://github.com/influxdata/influxdb/issues/3708): Fix double escaping measurement name during cluster replication\n- [#3704](https://github.com/influxdata/influxdb/issues/3704): cluster replication issue for measurement name containing backslash\n- [#3681](https://github.com/influxdata/influxdb/issues/3681): Quoted measurement names fail\n- [#3681](https://github.com/influxdata/influxdb/issues/3682): Fix inserting string value with backslashes\n- [#3735](https://github.com/influxdata/influxdb/issues/3735): Append to small bz1 blocks\n- [#3736](https://github.com/influxdata/influxdb/pull/3736): Update shard group duration with retention policy changes. Thanks for the report @papylhomme\n- [#3539](https://github.com/influxdata/influxdb/issues/3539): parser incorrectly accepts NaN as numerical value, but not always\n- [#3790](https://github.com/influxdata/influxdb/pull/3790): Fix line protocol parsing equals in measurements and NaN values\n- [#3778](https://github.com/influxdata/influxdb/pull/3778): Don't panic if SELECT on time.\n- [#3824](https://github.com/influxdata/influxdb/issues/3824): tsdb.Point.MarshalBinary needs to support all number types\n- [#3828](https://github.com/influxdata/influxdb/pull/3828): Support all number types when decoding a point\n- [#3853](https://github.com/influxdata/influxdb/pull/3853): Use 4KB default block size for bz1\n- [#3607](https://github.com/influxdata/influxdb/issues/3607): Fix unable to query influxdb due to deadlock in metastore.  Thanks @ccutrer!\n\n## v0.9.2 [2015-07-24]\n\n### Features\n- [#3177](https://github.com/influxdata/influxdb/pull/3177): Client supports making HTTPS requests. Thanks @jipperinbham\n- [#3299](https://github.com/influxdata/influxdb/pull/3299): Refactor query engine for distributed query support.\n- [#3334](https://github.com/influxdata/influxdb/pull/3334): Clean shutdown of influxd. Thanks @mcastilho\n\n### Bugfixes\n\n- [#3180](https://github.com/influxdata/influxdb/pull/3180): Log GOMAXPROCS, version, and commit on startup.\n- [#3218](https://github.com/influxdata/influxdb/pull/3218): Allow write timeouts to be configurable.\n- [#3184](https://github.com/influxdata/influxdb/pull/3184): Support basic auth in admin interface. Thanks @jipperinbham!\n- [#3236](https://github.com/influxdata/influxdb/pull/3236): Fix display issues in admin interface.\n- [#3232](https://github.com/influxdata/influxdb/pull/3232): Set logging prefix for metastore.\n- [#3230](https://github.com/influxdata/influxdb/issues/3230): panic: unable to parse bool value\n- [#3245](https://github.com/influxdata/influxdb/issues/3245): Error using graphite plugin with multiple filters\n- [#3223](https://github.com/influxdata/influxdb/issues/323): default graphite template cannot have extra tags\n- [#3255](https://github.com/influxdata/influxdb/pull/3255): Flush WAL on start-up as soon as possible.\n- [#3289](https://github.com/influxdata/influxdb/issues/3289): InfluxDB crashes on floats without decimal\n- [#3298](https://github.com/influxdata/influxdb/pull/3298): Corrected WAL & flush parameters in default config. Thanks @jhorwit2\n- [#3152](https://github.com/influxdata/influxdb/issues/3159): High CPU Usage with unsorted writes\n- [#3307](https://github.com/influxdata/influxdb/pull/3307): Fix regression parsing boolean values True/False\n- [#3304](https://github.com/influxdata/influxdb/pull/3304): Fixed httpd logger to log user from query params. Thanks @jhorwit2\n- [#3332](https://github.com/influxdata/influxdb/pull/3332): Add SLIMIT and SOFFSET to string version of AST.\n- [#3335](https://github.com/influxdata/influxdb/pull/3335): Don't drop all data on DROP DATABASE. Thanks to @PierreF for the report\n- [#2761](https://github.com/influxdata/influxdb/issues/2761): Make SHOW RETENTION POLICIES consistent with other queries.\n- [#3356](https://github.com/influxdata/influxdb/pull/3356): Disregard semicolons after database name in use command. Thanks @timraymond.\n- [#3351](https://github.com/influxdata/influxdb/pull/3351): Handle malformed regex comparisons during parsing. Thanks @rnubel\n- [#3244](https://github.com/influxdata/influxdb/pull/3244): Wire up admin privilege grant and revoke.\n- [#3259](https://github.com/influxdata/influxdb/issues/3259): Respect privileges for queries.\n- [#3256](https://github.com/influxdata/influxdb/pull/3256): Remove unnecessary timeout in WaitForLeader(). Thanks @cannium.\n- [#3380](https://github.com/influxdata/influxdb/issues/3380): Parser fix, only allow ORDER BY ASC and ORDER BY time ASC.\n- [#3319](https://github.com/influxdata/influxdb/issues/3319): restarting process irrevocably BREAKS measurements with spaces\n- [#3453](https://github.com/influxdata/influxdb/issues/3453): Remove outdated `dump` command from CLI.\n- [#3463](https://github.com/influxdata/influxdb/issues/3463): Fix aggregate queries and time precision on where clauses.\n\n## v0.9.1 [2015-07-02]\n\n### Features\n\n- [2650](https://github.com/influxdata/influxdb/pull/2650): Add SHOW GRANTS FOR USER statement. Thanks @n1tr0g.\n- [3125](https://github.com/influxdata/influxdb/pull/3125): Graphite Input Protocol Parsing\n- [2746](https://github.com/influxdata/influxdb/pull/2746): New Admin UI/interface\n- [3036](https://github.com/influxdata/influxdb/pull/3036): Write Ahead Log (WAL)\n- [3014](https://github.com/influxdata/influxdb/issues/3014): Implement Raft snapshots\n\n### Bugfixes\n\n- [3013](https://github.com/influxdata/influxdb/issues/3013): Panic error with inserting values with commas\n- [#2956](https://github.com/influxdata/influxdb/issues/2956): Type mismatch in derivative\n- [#2908](https://github.com/influxdata/influxdb/issues/2908): Field mismatch error messages need to be updated\n- [#2931](https://github.com/influxdata/influxdb/pull/2931): Services and reporting should wait until cluster has leader.\n- [#2943](https://github.com/influxdata/influxdb/issues/2943): Ensure default retention policies are fully replicated\n- [#2948](https://github.com/influxdata/influxdb/issues/2948): Field mismatch error message to include measurement name\n- [#2919](https://github.com/influxdata/influxdb/issues/2919): Unable to insert negative floats\n- [#2935](https://github.com/influxdata/influxdb/issues/2935): Hook CPU and memory profiling back up.\n- [#2960](https://github.com/influxdata/influxdb/issues/2960): Cluster Write Errors.\n- [#2928](https://github.com/influxdata/influxdb/pull/2928): Start work to set InfluxDB version in HTTP response headers. Thanks @neonstalwart.\n- [#2969](https://github.com/influxdata/influxdb/pull/2969): Actually set HTTP version in responses.\n- [#2993](https://github.com/influxdata/influxdb/pull/2993): Don't log each UDP batch.\n- [#2994](https://github.com/influxdata/influxdb/pull/2994): Don't panic during wilcard expansion if no default database specified.\n- [#3002](https://github.com/influxdata/influxdb/pull/3002): Remove measurement from shard's index on DROP MEASUREMENT.\n- [#3021](https://github.com/influxdata/influxdb/pull/3021): Correct set HTTP write trace logging. Thanks @vladlopes.\n- [#3027](https://github.com/influxdata/influxdb/pull/3027): Enforce minimum retention policy duration of 1 hour.\n- [#3030](https://github.com/influxdata/influxdb/pull/3030): Fix excessive logging of shard creation.\n- [#3038](https://github.com/influxdata/influxdb/pull/3038): Don't check deleted shards for precreation. Thanks @vladlopes.\n- [#3033](https://github.com/influxdata/influxdb/pull/3033): Add support for marshaling `uint64` in client.\n- [#3090](https://github.com/influxdata/influxdb/pull/3090): Remove database from TSDB index on DROP DATABASE.\n- [#2944](https://github.com/influxdata/influxdb/issues/2944): Don't require \"WHERE time\" when creating continuous queries.\n- [#3075](https://github.com/influxdata/influxdb/pull/3075): GROUP BY correctly when different tags have same value.\n- [#3078](https://github.com/influxdata/influxdb/pull/3078): Fix CLI panic on malformed INSERT.\n- [#2102](https://github.com/influxdata/influxdb/issues/2102): Re-work Graphite input and metric processing\n- [#2996](https://github.com/influxdata/influxdb/issues/2996): Graphite Input Parsing\n- [#3136](https://github.com/influxdata/influxdb/pull/3136): Fix various issues with init.d script. Thanks @ miguelcnf.\n- [#2996](https://github.com/influxdata/influxdb/issues/2996): Graphite Input Parsing\n- [#3127](https://github.com/influxdata/influxdb/issues/3127): Trying to insert a number larger than the largest signed 64-bit number kills influxd\n- [#3131](https://github.com/influxdata/influxdb/pull/3131): Copy batch tags to each point before marshalling\n- [#3155](https://github.com/influxdata/influxdb/pull/3155): Instantiate UDP batcher before listening for UDP traffic, otherwise a panic may result.\n- [#2678](https://github.com/influxdata/influxdb/issues/2678): Server allows tags with an empty string for the key and/or value\n- [#3061](https://github.com/influxdata/influxdb/issues/3061): syntactically incorrect line protocol insert panics the database\n- [#2608](https://github.com/influxdata/influxdb/issues/2608): drop measurement while writing points to that measurement has race condition that can panic\n- [#3183](https://github.com/influxdata/influxdb/issues/3183): using line protocol measurement names cannot contain commas\n- [#3193](https://github.com/influxdata/influxdb/pull/3193): Fix panic for SHOW STATS and in collectd\n- [#3102](https://github.com/influxdata/influxdb/issues/3102): Add authentication cache\n- [#3209](https://github.com/influxdata/influxdb/pull/3209): Dump Run() errors to stderr\n- [#3217](https://github.com/influxdata/influxdb/pull/3217): Allow WAL partition flush delay to be configurable.\n\n## v0.9.0 [2015-06-11]\n\n### Bugfixes\n\n- [#2869](https://github.com/influxdata/influxdb/issues/2869): Adding field to existing measurement causes panic\n- [#2849](https://github.com/influxdata/influxdb/issues/2849): RC32: Frequent write errors\n- [#2700](https://github.com/influxdata/influxdb/issues/2700): Incorrect error message in database EncodeFields\n- [#2897](https://github.com/influxdata/influxdb/pull/2897): Ensure target Graphite database exists\n- [#2898](https://github.com/influxdata/influxdb/pull/2898): Ensure target openTSDB database exists\n- [#2895](https://github.com/influxdata/influxdb/pull/2895): Use Graphite input defaults where necessary\n- [#2900](https://github.com/influxdata/influxdb/pull/2900): Use openTSDB input defaults where necessary\n- [#2886](https://github.com/influxdata/influxdb/issues/2886): Refactor backup & restore\n- [#2804](https://github.com/influxdata/influxdb/pull/2804): BREAKING: change time literals to be single quoted in InfluxQL. Thanks @nvcook42!\n- [#2906](https://github.com/influxdata/influxdb/pull/2906): Restrict replication factor to the cluster size\n- [#2905](https://github.com/influxdata/influxdb/pull/2905): Restrict clusters to 3 peers\n- [#2904](https://github.com/influxdata/influxdb/pull/2904): Re-enable server reporting.\n- [#2917](https://github.com/influxdata/influxdb/pull/2917): Fix int64 field values.\n- [#2920](https://github.com/influxdata/influxdb/issues/2920): Ensure collectd database exists\n\n## v0.9.0-rc33 [2015-06-09]\n\n### Bugfixes\n\n- [#2816](https://github.com/influxdata/influxdb/pull/2816): Enable UDP service. Thanks @renan-\n- [#2824](https://github.com/influxdata/influxdb/pull/2824): Add missing call to WaitGroup.Done in execConn. Thanks @liyichao\n- [#2823](https://github.com/influxdata/influxdb/pull/2823): Convert OpenTSDB to a service.\n- [#2838](https://github.com/influxdata/influxdb/pull/2838): Set auto-created retention policy period to infinite.\n- [#2829](https://github.com/influxdata/influxdb/pull/2829): Re-enable Graphite support as a new Service-style component.\n- [#2814](https://github.com/influxdata/influxdb/issues/2814): Convert collectd to a service.\n- [#2852](https://github.com/influxdata/influxdb/pull/2852): Don't panic when altering retention policies. Thanks for the report @huhongbo\n- [#2857](https://github.com/influxdata/influxdb/issues/2857): Fix parsing commas in string field values.\n- [#2833](https://github.com/influxdata/influxdb/pull/2833): Make the default config valid.\n- [#2859](https://github.com/influxdata/influxdb/pull/2859): Fix panic on aggregate functions.\n- [#2878](https://github.com/influxdata/influxdb/pull/2878): Re-enable shard precreation.\n- [2865](https://github.com/influxdata/influxdb/pull/2865) -- Return an empty set of results if database does not exist in shard metadata.\n\n### Features\n- [2858](https://github.com/influxdata/influxdb/pull/2858): Support setting openTSDB write consistency.\n\n## v0.9.0-rc32 [2015-06-07]\n\n### Release Notes\n\nThis released introduced an updated write path and clustering design. The data format has also changed, so you'll need to wipe out your data to upgrade from RC31. There should be no other data changes before v0.9.0 is released.\n\n### Features\n- [#1997](https://github.com/influxdata/influxdb/pull/1997): Update SELECT * to return tag values.\n- [#2599](https://github.com/influxdata/influxdb/issues/2599): Add \"epoch\" URL param and return JSON time values as epoch instead of date strings.\n- [#2682](https://github.com/influxdata/influxdb/issues/2682): Adding pr checklist to CONTRIBUTING.md\n- [#2683](https://github.com/influxdata/influxdb/issues/2683): Add batching support to Graphite inputs.\n- [#2687](https://github.com/influxdata/influxdb/issues/2687): Add batching support to Collectd inputs.\n- [#2696](https://github.com/influxdata/influxdb/pull/2696): Add line protocol. This is now the preferred way to write data.\n- [#2751](https://github.com/influxdata/influxdb/pull/2751): Add UDP input. UDP only supports the line protocol now.\n- [#2684](https://github.com/influxdata/influxdb/pull/2684): Include client timeout configuration. Thanks @vladlopes!\n\n### Bugfixes\n- [#2776](https://github.com/influxdata/influxdb/issues/2776): Re-implement retention policy enforcement.\n- [#2635](https://github.com/influxdata/influxdb/issues/2635): Fix querying against boolean field in WHERE clause.\n- [#2644](https://github.com/influxdata/influxdb/issues/2644): Make SHOW queries work with FROM /<regex>/.\n- [#2501](https://github.com/influxdata/influxdb/issues/2501): Name the FlagSet for the shell and add a version flag. Thanks @neonstalwart\n- [#2647](https://github.com/influxdata/influxdb/issues/2647): Fixes typos in sample config file - thanks @claws!\n\n## v0.9.0-rc31 [2015-05-21]\n\n### Features\n- [#1822](https://github.com/influxdata/influxdb/issues/1822): Wire up DERIVATIVE aggregate\n- [#1477](https://github.com/influxdata/influxdb/issues/1477): Wire up non_negative_derivative function\n- [#2557](https://github.com/influxdata/influxdb/issues/2557): Fix false positive error with `GROUP BY time`\n- [#1891](https://github.com/influxdata/influxdb/issues/1891): Wire up COUNT DISTINCT aggregate\n- [#1989](https://github.com/influxdata/influxdb/issues/1989): Implement `SELECT tagName FROM m`\n\n### Bugfixes\n- [#2545](https://github.com/influxdata/influxdb/pull/2545): Use \"value\" as the field name for graphite input. Thanks @cannium.\n- [#2558](https://github.com/influxdata/influxdb/pull/2558): Fix client response check - thanks @vladlopes!\n- [#2566](https://github.com/influxdata/influxdb/pull/2566): Wait until each data write has been commited by the Raft cluster.\n- [#2602](https://github.com/influxdata/influxdb/pull/2602): CLI execute command exits without cleaning up liner package.\n- [#2610](https://github.com/influxdata/influxdb/pull/2610): Fix shard group creation\n- [#2596](https://github.com/influxdata/influxdb/pull/2596): RC30: `panic: runtime error: index out of range` when insert data points.\n- [#2592](https://github.com/influxdata/influxdb/pull/2592): Should return an error if user attempts to group by a field.\n- [#2499](https://github.com/influxdata/influxdb/pull/2499): Issuing a select query with tag as a values causes panic.\n- [#2612](https://github.com/influxdata/influxdb/pull/2612): Query planner should validate distinct is passed a field.\n- [#2531](https://github.com/influxdata/influxdb/issues/2531): Fix select with 3 or more terms in where clause.\n- [#2564](https://github.com/influxdata/influxdb/issues/2564): Change \"name\" to \"measurement\" in JSON for writes.\n\n## PRs\n- [#2569](https://github.com/influxdata/influxdb/pull/2569): Add derivative functions\n- [#2598](https://github.com/influxdata/influxdb/pull/2598): Implement tag support in SELECT statements\n- [#2624](https://github.com/influxdata/influxdb/pull/2624): Remove references to SeriesID in `DROP SERIES` handlers.\n\n## v0.9.0-rc30 [2015-05-12]\n\n### Release Notes\n\nThis release has a breaking API change for writes -- the field previously called `timestamp` has been renamed to `time`.\n\n### Features\n- [#2254](https://github.com/influxdata/influxdb/pull/2254): Add Support for OpenTSDB HTTP interface. Thanks @tcolgate\n- [#2525](https://github.com/influxdata/influxdb/pull/2525): Serve broker diagnostics over HTTP\n- [#2186](https://github.com/influxdata/influxdb/pull/2186): The default status code for queries is now `200 OK`\n- [#2298](https://github.com/influxdata/influxdb/pull/2298): Successful writes now return a status code of `204 No Content` - thanks @neonstalwart!\n- [#2549](https://github.com/influxdata/influxdb/pull/2549): Raft election timeout to 5 seconds, so system is more forgiving of CPU loads.\n- [#2568](https://github.com/influxdata/influxdb/pull/2568): Wire up SELECT DISTINCT.\n\n### Bugfixes\n- [#2535](https://github.com/influxdata/influxdb/pull/2535): Return exit status 0 if influxd already running. Thanks @haim0n.\n- [#2521](https://github.com/influxdata/influxdb/pull/2521): Don't truncate topic data until fully replicated.\n- [#2509](https://github.com/influxdata/influxdb/pull/2509): Parse config file correctly during restore. Thanks @neonstalwart\n- [#2536](https://github.com/influxdata/influxdb/issues/2532): Set leader ID on restart of single-node cluster.\n- [#2448](https://github.com/influxdata/influxdb/pull/2448): Fix inconsistent data type - thanks @cannium!\n- [#2108](https://github.com/influxdata/influxdb/issues/2108): Change `timestamp` to `time` - thanks @neonstalwart!\n- [#2539](https://github.com/influxdata/influxdb/issues/2539): Add additional vote request logging.\n- [#2541](https://github.com/influxdata/influxdb/issues/2541): Update messaging client connection index with every message.\n- [#2542](https://github.com/influxdata/influxdb/issues/2542): Throw parser error for invalid aggregate without where time.\n- [#2548](https://github.com/influxdata/influxdb/issues/2548): Return an error when numeric aggregate applied to non-numeric data.\n- [#2487](https://github.com/influxdata/influxdb/issues/2487): Aggregate query with exact timestamp causes panic. Thanks @neonstalwart!\n- [#2552](https://github.com/influxdata/influxdb/issues/2552): Run CQ that is actually passed into go-routine.\n- [#2553](https://github.com/influxdata/influxdb/issues/2553): Fix race condition during CQ execution.\n- [#2557](https://github.com/influxdata/influxdb/issues/2557): RC30 WHERE time filter Regression.\n\n## v0.9.0-rc29 [2015-05-05]\n\n### Features\n- [#2410](https://github.com/influxdata/influxdb/pull/2410): If needed, brokers respond with data nodes for peer shard replication.\n- [#2469](https://github.com/influxdata/influxdb/pull/2469): Reduce default max topic size from 1GB to 50MB.\n- [#1824](https://github.com/influxdata/influxdb/pull/1824): Wire up MEDIAN aggregate. Thanks @neonstalwart!\n\n### Bugfixes\n- [#2446](https://github.com/influxdata/influxdb/pull/2446): Correctly count number of queries executed. Thanks @neonstalwart\n- [#2452](https://github.com/influxdata/influxdb/issues/2452): Fix panic with shard stats on multiple clusters\n- [#2453](https://github.com/influxdata/influxdb/pull/2453): Do not require snapshot on Log.WriteEntriesTo().\n- [#2460](https://github.com/influxdata/influxdb/issues/2460): Collectd input should use \"value\" for fields values. Fixes 2412. Thanks @josh-padnick\n- [#2465](https://github.com/influxdata/influxdb/pull/2465): HTTP response logging paniced with chunked requests. Thanks @Jackkoz\n- [#2475](https://github.com/influxdata/influxdb/pull/2475): RLock server when checking if shards groups are required during write.\n- [#2471](https://github.com/influxdata/influxdb/issues/2471): Function calls normalized to be lower case. Fixes percentile not working when called uppercase. Thanks @neonstalwart\n- [#2281](https://github.com/influxdata/influxdb/issues/2281): Fix Bad Escape error when parsing regex\n\n## v0.9.0-rc28 [2015-04-27]\n\n### Features\n- [#2410](https://github.com/influxdata/influxdb/pull/2410) Allow configuration of Raft timers\n- [#2354](https://github.com/influxdata/influxdb/pull/2354) Wire up STDDEV. Thanks @neonstalwart!\n\n### Bugfixes\n- [#2374](https://github.com/influxdata/influxdb/issues/2374): Two different panics during SELECT percentile\n- [#2404](https://github.com/influxdata/influxdb/pull/2404): Mean and percentile function fixes\n- [#2408](https://github.com/influxdata/influxdb/pull/2408): Fix snapshot 500 error\n- [#1896](https://github.com/influxdata/influxdb/issues/1896): Excessive heartbeater logging of \"connection refused\" on cluster node stop\n- [#2418](https://github.com/influxdata/influxdb/pull/2418): Fix raft node getting stuck in candidate state\n- [#2415](https://github.com/influxdata/influxdb/pull/2415): Raft leader ID now set on election after failover. Thanks @xiaost\n- [#2426](https://github.com/influxdata/influxdb/pull/2426): Fix race condition around listener address in openTSDB server.\n- [#2426](https://github.com/influxdata/influxdb/pull/2426): Fix race condition around listener address in Graphite server.\n- [#2429](https://github.com/influxdata/influxdb/pull/2429): Ensure no field value is null.\n- [#2431](https://github.com/influxdata/influxdb/pull/2431): Always append shard path in diags. Thanks @marcosnils\n- [#2441](https://github.com/influxdata/influxdb/pull/2441): Correctly release server RLock during \"drop series\".\n- [#2445](https://github.com/influxdata/influxdb/pull/2445): Read locks and data race fixes\n\n## v0.9.0-rc27 [04-23-2015]\n\n### Features\n- [#2398](https://github.com/influxdata/influxdb/pull/2398) Track more stats and report errors for shards.\n\n### Bugfixes\n- [#2370](https://github.com/influxdata/influxdb/pull/2370): Fix data race in openTSDB endpoint.\n- [#2371](https://github.com/influxdata/influxdb/pull/2371): Don't set client to nil when closing broker Fixes #2352\n- [#2372](https://github.com/influxdata/influxdb/pull/2372): Fix data race in graphite endpoint.\n- [#2373](https://github.com/influxdata/influxdb/pull/2373): Actually allow HTTP logging to be controlled.\n- [#2376](https://github.com/influxdata/influxdb/pull/2376): Encode all types of integers. Thanks @jtakkala.\n- [#2376](https://github.com/influxdata/influxdb/pull/2376): Add shard path to existing diags value. Fix issue #2369.\n- [#2386](https://github.com/influxdata/influxdb/pull/2386): Fix shard datanodes stats getting appended too many times\n- [#2393](https://github.com/influxdata/influxdb/pull/2393): Fix default hostname for connecting to cluster.\n- [#2390](https://github.com/influxdata/influxdb/pull/2390): Handle large sums when calculating means - thanks @neonstalwart!\n- [#2391](https://github.com/influxdata/influxdb/pull/2391): Unable to write points through Go client when authentication enabled\n- [#2400](https://github.com/influxdata/influxdb/pull/2400): Always send auth headers for client requests if present\n\n## v0.9.0-rc26 [04-21-2015]\n\n### Features\n- [#2301](https://github.com/influxdata/influxdb/pull/2301): Distributed query load balancing and failover\n- [#2336](https://github.com/influxdata/influxdb/pull/2336): Handle distributed queries when shards != data nodes\n- [#2353](https://github.com/influxdata/influxdb/pull/2353): Distributed Query/Clustering Fixes\n\n### Bugfixes\n- [#2297](https://github.com/influxdata/influxdb/pull/2297): create /var/run during startup. Thanks @neonstalwart.\n- [#2312](https://github.com/influxdata/influxdb/pull/2312): Re-use httpclient for continuous queries\n- [#2318](https://github.com/influxdata/influxdb/pull/2318): Remove pointless use of 'done' channel for collectd.\n- [#2242](https://github.com/influxdata/influxdb/pull/2242): Distributed Query should balance requests\n- [#2243](https://github.com/influxdata/influxdb/pull/2243): Use Limit Reader instead of fixed 1MB/1GB slice for DQ\n- [#2190](https://github.com/influxdata/influxdb/pull/2190): Implement failover to other data nodes for distributed queries\n- [#2324](https://github.com/influxdata/influxdb/issues/2324): Race in Broker.Close()/Broker.RunContinousQueryProcessing()\n- [#2325](https://github.com/influxdata/influxdb/pull/2325): Cluster open fixes\n- [#2326](https://github.com/influxdata/influxdb/pull/2326): Fix parse error in CREATE CONTINUOUS QUERY\n- [#2300](https://github.com/influxdata/influxdb/pull/2300): Refactor integration tests.  Properly close Graphite/OpenTSDB listeners.\n- [#2338](https://github.com/influxdata/influxdb/pull/2338): Fix panic if tag key isn't double quoted when it should have been\n- [#2340](https://github.com/influxdata/influxdb/pull/2340): Fix SHOW DIAGNOSTICS panic if any shard was non-local.\n- [#2351](https://github.com/influxdata/influxdb/pull/2351): Fix data race by rlocking shard during diagnostics.\n- [#2348](https://github.com/influxdata/influxdb/pull/2348): Data node fail to join cluster in 0.9.0rc25\n- [#2343](https://github.com/influxdata/influxdb/pull/2343): Node falls behind Metastore updates\n- [#2334](https://github.com/influxdata/influxdb/pull/2334): Test Partial replication is very problematic\n- [#2272](https://github.com/influxdata/influxdb/pull/2272): clustering: influxdb 0.9.0-rc23 panics when doing a GET with merge_metrics in a\n- [#2350](https://github.com/influxdata/influxdb/pull/2350): Issue fix for :influxd -hostname localhost.\n- [#2367](https://github.com/influxdata/influxdb/pull/2367): PR for issue #2350 - Always use localhost, not host name.\n\n## v0.9.0-rc25 [2015-04-15]\n\n### Bugfixes\n- [#2282](https://github.com/influxdata/influxdb/pull/2282): Use \"value\" as field name for OpenTSDB input.\n- [#2283](https://github.com/influxdata/influxdb/pull/2283): Fix bug when restarting an entire existing cluster.\n- [#2293](https://github.com/influxdata/influxdb/pull/2293): Open cluster listener before starting broker.\n- [#2287](https://github.com/influxdata/influxdb/pull/2287): Fix data race during SHOW RETENTION POLICIES.\n- [#2288](https://github.com/influxdata/influxdb/pull/2288): Fix expression parsing bug.\n- [#2294](https://github.com/influxdata/influxdb/pull/2294): Fix async response flushing (invalid chunked response error).\n\n## Features\n- [#2276](https://github.com/influxdata/influxdb/pull/2276): Broker topic truncation.\n- [#2292](https://github.com/influxdata/influxdb/pull/2292): Wire up drop CQ statement - thanks @neonstalwart!\n- [#2290](https://github.com/influxdata/influxdb/pull/2290): Allow hostname argument to override default config - thanks @neonstalwart!\n- [#2295](https://github.com/influxdata/influxdb/pull/2295): Use nil as default return value for MapCount - thanks @neonstalwart!\n- [#2246](https://github.com/influxdata/influxdb/pull/2246): Allow HTTP logging to be controlled.\n\n## v0.9.0-rc24 [2015-04-13]\n\n### Bugfixes\n- [#2255](https://github.com/influxdata/influxdb/pull/2255): Fix panic when changing default retention policy.\n- [#2257](https://github.com/influxdata/influxdb/pull/2257): Add \"snapshotting\" pseudo state & log entry cache.\n- [#2261](https://github.com/influxdata/influxdb/pull/2261): Support int64 value types.\n- [#2191](https://github.com/influxdata/influxdb/pull/2191): Case-insensitive check for \"fill\"\n- [#2274](https://github.com/influxdata/influxdb/pull/2274): Snapshot and HTTP API endpoints\n- [#2265](https://github.com/influxdata/influxdb/pull/2265): Fix auth for CLI.\n\n## v0.9.0-rc23 [2015-04-11]\n\n### Features\n- [#2202](https://github.com/influxdata/influxdb/pull/2202): Initial implementation of Distributed Queries\n- [#2202](https://github.com/influxdata/influxdb/pull/2202): 64-bit Series IDs. INCOMPATIBLE WITH PREVIOUS DATASTORES.\n\n### Bugfixes\n- [#2225](https://github.com/influxdata/influxdb/pull/2225): Make keywords completely case insensitive\n- [#2228](https://github.com/influxdata/influxdb/pull/2228): Accept keyword default unquoted in ALTER RETENTION POLICY statement\n- [#2236](https://github.com/influxdata/influxdb/pull/2236): Immediate term changes, fix stale write issue, net/http/pprof\n- [#2213](https://github.com/influxdata/influxdb/pull/2213): Seed random number generator for election timeout. Thanks @cannium.\n\n## v0.9.0-rc22 [2015-04-09]\n\n### Features\n- [#2214](https://github.com/influxdata/influxdb/pull/2214): Added the option to influx CLI to execute single command and exit. Thanks @n1tr0g\n\n### Bugfixes\n- [#2223](https://github.com/influxdata/influxdb/pull/2223): Always notify term change on RequestVote\n\n## v0.9.0-rc21 [2015-04-09]\n\n### Features\n- [#870](https://github.com/influxdata/influxdb/pull/870): Add support for OpenTSDB telnet input protocol. Thanks @tcolgate\n- [#2180](https://github.com/influxdata/influxdb/pull/2180): Allow http write handler to decode gzipped body\n- [#2175](https://github.com/influxdata/influxdb/pull/2175): Separate broker and data nodes\n- [#2158](https://github.com/influxdata/influxdb/pull/2158): Allow user password to be changed. Thanks @n1tr0g\n- [#2201](https://github.com/influxdata/influxdb/pull/2201): Bring back config join URLs\n- [#2121](https://github.com/influxdata/influxdb/pull/2121): Parser refactor\n\n### Bugfixes\n- [#2181](https://github.com/influxdata/influxdb/pull/2181): Fix panic on \"SHOW DIAGNOSTICS\".\n- [#2170](https://github.com/influxdata/influxdb/pull/2170): Make sure queries on missing tags return 200 status.\n- [#2197](https://github.com/influxdata/influxdb/pull/2197): Lock server during Open().\n- [#2200](https://github.com/influxdata/influxdb/pull/2200): Re-enable Continuous Queries.\n- [#2203](https://github.com/influxdata/influxdb/pull/2203): Fix race condition on continuous queries.\n- [#2217](https://github.com/influxdata/influxdb/pull/2217): Only revert to follower if new term is greater.\n- [#2219](https://github.com/influxdata/influxdb/pull/2219): Persist term change to disk when candidate. Thanks @cannium\n\n## v0.9.0-rc20 [2015-04-04]\n\n### Features\n- [#2128](https://github.com/influxdata/influxdb/pull/2128): Data node discovery from brokers\n- [#2142](https://github.com/influxdata/influxdb/pull/2142): Support chunked queries\n- [#2154](https://github.com/influxdata/influxdb/pull/2154): Node redirection\n- [#2168](https://github.com/influxdata/influxdb/pull/2168): Return raft term from vote, add term logging\n\n### Bugfixes\n- [#2147](https://github.com/influxdata/influxdb/pull/2147): Set Go Max procs in a better location\n- [#2137](https://github.com/influxdata/influxdb/pull/2137): Refactor `results` to `response`. Breaking Go Client change.\n- [#2151](https://github.com/influxdata/influxdb/pull/2151): Ignore replay commands on the metastore.\n- [#2152](https://github.com/influxdata/influxdb/issues/2152): Influxd process with stats enabled crashing with 'Unsuported protocol scheme for \"\"'\n- [#2156](https://github.com/influxdata/influxdb/pull/2156): Propagate error when resolving UDP address in Graphite UDP server.\n- [#2163](https://github.com/influxdata/influxdb/pull/2163): Fix up paths for default data and run storage.\n- [#2164](https://github.com/influxdata/influxdb/pull/2164): Append STDOUT/STDERR in initscript.\n- [#2165](https://github.com/influxdata/influxdb/pull/2165): Better name for config section for stats and diags.\n- [#2165](https://github.com/influxdata/influxdb/pull/2165): Monitoring database and retention policy are not configurable.\n- [#2167](https://github.com/influxdata/influxdb/pull/2167): Add broker log recovery.\n- [#2166](https://github.com/influxdata/influxdb/pull/2166): Don't panic if presented with a field of unknown type.\n- [#2149](https://github.com/influxdata/influxdb/pull/2149): Fix unit tests for win32 when directory doesn't exist.\n- [#2150](https://github.com/influxdata/influxdb/pull/2150): Fix unit tests for win32 when a connection is refused.\n\n## v0.9.0-rc19 [2015-04-01]\n\n### Features\n- [#2143](https://github.com/influxdata/influxdb/pull/2143): Add raft term logging.\n\n### Bugfixes\n- [#2145](https://github.com/influxdata/influxdb/pull/2145): Encode toml durations correctly which fixes default configuration generation `influxd config`.\n\n## v0.9.0-rc18 [2015-03-31]\n\n### Bugfixes\n- [#2100](https://github.com/influxdata/influxdb/pull/2100): Use channel to synchronize collectd shutdown.\n- [#2100](https://github.com/influxdata/influxdb/pull/2100): Synchronize access to shard index.\n- [#2131](https://github.com/influxdata/influxdb/pull/2131): Optimize marshalTags().\n- [#2130](https://github.com/influxdata/influxdb/pull/2130): Make fewer calls to marshalTags().\n- [#2105](https://github.com/influxdata/influxdb/pull/2105): Support != for tag values. Fix issue #2097, thanks to @smonkewitz for bug report.\n- [#2105](https://github.com/influxdata/influxdb/pull/2105): Support !~ tags values.\n- [#2138](https://github.com/influxdata/influxdb/pull/2136): Use map for marshaledTags cache.\n\n## v0.9.0-rc17 [2015-03-29]\n\n### Features\n- [#2076](https://github.com/influxdata/influxdb/pull/2076): Separate stdout and stderr output in init.d script\n- [#2091](https://github.com/influxdata/influxdb/pull/2091): Support disabling snapshot endpoint.\n- [#2081](https://github.com/influxdata/influxdb/pull/2081): Support writing diagnostic data into the internal database.\n- [#2095](https://github.com/influxdata/influxdb/pull/2095): Improved InfluxDB client docs. Thanks @derailed\n\n### Bugfixes\n- [#2093](https://github.com/influxdata/influxdb/pull/2093): Point precision not marshalled correctly. Thanks @derailed\n- [#2084](https://github.com/influxdata/influxdb/pull/2084): Allowing leading underscores in identifiers.\n- [#2080](https://github.com/influxdata/influxdb/pull/2080): Graphite logs in seconds, not milliseconds.\n- [#2101](https://github.com/influxdata/influxdb/pull/2101): SHOW DATABASES should name returned series \"databases\".\n- [#2104](https://github.com/influxdata/influxdb/pull/2104): Include NEQ when calculating field filters.\n- [#2112](https://github.com/influxdata/influxdb/pull/2112): Set GOMAXPROCS on startup. This may have been causing extra leader elections, which would cause a number of other bugs or instability.\n- [#2111](https://github.com/influxdata/influxdb/pull/2111) and [#2025](https://github.com/influxdata/influxdb/issues/2025): Raft stability fixes. Non-contiguous log error and others.\n- [#2114](https://github.com/influxdata/influxdb/pull/2114): Correctly start influxd on platforms without start-stop-daemon.\n\n## v0.9.0-rc16 [2015-03-24]\n\n### Features\n- [#2058](https://github.com/influxdata/influxdb/pull/2058): Track number of queries executed in stats.\n- [#2059](https://github.com/influxdata/influxdb/pull/2059): Retention policies sorted by name on return to client.\n- [#2061](https://github.com/influxdata/influxdb/pull/2061): Implement SHOW DIAGNOSTICS.\n- [#2064](https://github.com/influxdata/influxdb/pull/2064): Allow init.d script to return influxd version.\n- [#2053](https://github.com/influxdata/influxdb/pull/2053): Implment backup and restore.\n- [#1631](https://github.com/influxdata/influxdb/pull/1631): Wire up DROP CONTINUOUS QUERY.\n\n### Bugfixes\n- [#2037](https://github.com/influxdata/influxdb/pull/2037): Don't check 'configExists' at Run() level.\n- [#2039](https://github.com/influxdata/influxdb/pull/2039): Don't panic if getting current user fails.\n- [#2034](https://github.com/influxdata/influxdb/pull/2034): GROUP BY should require an aggregate.\n- [#2040](https://github.com/influxdata/influxdb/pull/2040): Add missing top-level help for config command.\n- [#2057](https://github.com/influxdata/influxdb/pull/2057): Move racy \"in order\" test to integration test suite.\n- [#2060](https://github.com/influxdata/influxdb/pull/2060): Reload server shard map on restart.\n- [#2068](https://github.com/influxdata/influxdb/pull/2068): Fix misspelled JSON field.\n- [#2067](https://github.com/influxdata/influxdb/pull/2067): Fixed issue where some queries didn't properly pull back data (introduced in RC15). Fixing intervals for GROUP BY.\n\n## v0.9.0-rc15 [2015-03-19]\n\n### Features\n- [#2000](https://github.com/influxdata/influxdb/pull/2000): Log broker path when broker fails to start. Thanks @gst.\n- [#2007](https://github.com/influxdata/influxdb/pull/2007): Track shard-level stats.\n\n### Bugfixes\n- [#2001](https://github.com/influxdata/influxdb/pull/2001): Ensure measurement not found returns status code 200.\n- [#1985](https://github.com/influxdata/influxdb/pull/1985): Set content-type JSON header before actually writing header. Thanks @dstrek.\n- [#2003](https://github.com/influxdata/influxdb/pull/2003): Set timestamp when writing monitoring stats.\n- [#2004](https://github.com/influxdata/influxdb/pull/2004): Limit group by to MaxGroupByPoints (currently 100,000).\n- [#2016](https://github.com/influxdata/influxdb/pull/2016): Fixing bucket alignment for group by. Thanks @jnutzmann\n- [#2021](https://github.com/influxdata/influxdb/pull/2021): Remove unnecessary formatting from log message. Thanks @simonkern\n\n\n## v0.9.0-rc14 [2015-03-18]\n\n### Bugfixes\n- [#1999](https://github.com/influxdata/influxdb/pull/1999): Return status code 200 for measurement not found errors on show series.\n\n## v0.9.0-rc13 [2015-03-17]\n\n### Features\n- [#1974](https://github.com/influxdata/influxdb/pull/1974): Add time taken for request to the http server logs.\n\n### Bugfixes\n- [#1971](https://github.com/influxdata/influxdb/pull/1971): Fix leader id initialization.\n- [#1975](https://github.com/influxdata/influxdb/pull/1975): Require `q` parameter for query endpoint.\n- [#1969](https://github.com/influxdata/influxdb/pull/1969): Print loaded config.\n- [#1987](https://github.com/influxdata/influxdb/pull/1987): Fix config print startup statement for when no config is provided.\n- [#1990](https://github.com/influxdata/influxdb/pull/1990): Drop measurement was taking too long due to transactions.\n\n## v0.9.0-rc12 [2015-03-15]\n\n### Bugfixes\n- [#1942](https://github.com/influxdata/influxdb/pull/1942): Sort wildcard names.\n- [#1957](https://github.com/influxdata/influxdb/pull/1957): Graphite numbers are always float64.\n- [#1955](https://github.com/influxdata/influxdb/pull/1955): Prohibit creation of databases with no name. Thanks @dullgiulio\n- [#1952](https://github.com/influxdata/influxdb/pull/1952): Handle delete statement with an error. Thanks again to @dullgiulio\n\n### Features\n- [#1935](https://github.com/influxdata/influxdb/pull/1935): Implement stateless broker for Raft.\n- [#1936](https://github.com/influxdata/influxdb/pull/1936): Implement \"SHOW STATS\" and self-monitoring\n\n### Features\n- [#1909](https://github.com/influxdata/influxdb/pull/1909): Implement a dump command.\n\n## v0.9.0-rc11 [2015-03-13]\n\n### Bugfixes\n- [#1917](https://github.com/influxdata/influxdb/pull/1902): Creating Infinite Retention Policy Failed.\n- [#1758](https://github.com/influxdata/influxdb/pull/1758): Add Graphite Integration Test.\n- [#1929](https://github.com/influxdata/influxdb/pull/1929): Default Retention Policy incorrectly auto created.\n- [#1930](https://github.com/influxdata/influxdb/pull/1930): Auto create database for graphite if not specified.\n- [#1908](https://github.com/influxdata/influxdb/pull/1908): Cosmetic CLI output fixes.\n- [#1931](https://github.com/influxdata/influxdb/pull/1931): Add default column to SHOW RETENTION POLICIES.\n- [#1937](https://github.com/influxdata/influxdb/pull/1937): OFFSET should be allowed to be 0.\n\n### Features\n- [#1902](https://github.com/influxdata/influxdb/pull/1902): Enforce retention policies to have a minimum duration.\n- [#1906](https://github.com/influxdata/influxdb/pull/1906): Add show servers to query language.\n- [#1925](https://github.com/influxdata/influxdb/pull/1925): Add `fill(none)`, `fill(previous)`, and `fill(<num>)` to queries.\n\n## v0.9.0-rc10 [2015-03-09]\n\n### Bugfixes\n- [#1867](https://github.com/influxdata/influxdb/pull/1867): Fix race accessing topic replicas map\n- [#1864](https://github.com/influxdata/influxdb/pull/1864): fix race in startStateLoop\n- [#1753](https://github.com/influxdata/influxdb/pull/1874): Do Not Panic on Missing Dirs\n- [#1877](https://github.com/influxdata/influxdb/pull/1877): Broker clients track broker leader\n- [#1862](https://github.com/influxdata/influxdb/pull/1862): Fix memory leak in `httpd.serveWait`. Thanks @mountkin\n- [#1883](https://github.com/influxdata/influxdb/pull/1883): RLock server during retention policy enforcement. Thanks @grisha\n- [#1868](https://github.com/influxdata/influxdb/pull/1868): Use `BatchPoints` for `client.Write` method. Thanks @vladlopes, @georgmu, @d2g, @evanphx, @akolosov.\n- [#1881](https://github.com/influxdata/influxdb/pull/1881): Update documentation for `client` package.  Misc library tweaks.\n- Fix queries with multiple where clauses on tags, times and fields. Fix queries that have where clauses on fields not in the select\n\n### Features\n- [#1875](https://github.com/influxdata/influxdb/pull/1875): Support trace logging of Raft.\n- [#1895](https://github.com/influxdata/influxdb/pull/1895): Auto-create a retention policy when a database is created.\n- [#1897](https://github.com/influxdata/influxdb/pull/1897): Pre-create shard groups.\n- [#1900](https://github.com/influxdata/influxdb/pull/1900): Change `LIMIT` to `SLIMIT` and implement `LIMIT` and `OFFSET`\n\n## v0.9.0-rc9 [2015-03-06]\n\n### Bugfixes\n- [#1872](https://github.com/influxdata/influxdb/pull/1872): Fix \"stale term\" errors with raft\n\n## v0.9.0-rc8 [2015-03-05]\n\n### Bugfixes\n- [#1836](https://github.com/influxdata/influxdb/pull/1836): Store each parsed shell command in history file.\n- [#1789](https://github.com/influxdata/influxdb/pull/1789): add --config-files option to fpm command. Thanks @kylezh\n- [#1859](https://github.com/influxdata/influxdb/pull/1859): Queries with a `GROUP BY *` clause were returning a 500 if done against a measurement that didn't exist\n\n### Features\n- [#1755](https://github.com/influxdata/influxdb/pull/1848): Support JSON data ingest over UDP\n- [#1857](https://github.com/influxdata/influxdb/pull/1857): Support retention policies with infinite duration\n- [#1858](https://github.com/influxdata/influxdb/pull/1858): Enable detailed tracing of write path\n\n## v0.9.0-rc7 [2015-03-02]\n\n### Features\n- [#1813](https://github.com/influxdata/influxdb/pull/1813): Queries for missing measurements or fields now return a 200 with an error message in the series JSON.\n- [#1826](https://github.com/influxdata/influxdb/pull/1826), [#1827](https://github.com/influxdata/influxdb/pull/1827): Fixed queries with `WHERE` clauses against fields.\n\n### Bugfixes\n\n- [#1744](https://github.com/influxdata/influxdb/pull/1744): Allow retention policies to be modified without specifying replication factor. Thanks @kylezh\n- [#1809](https://github.com/influxdata/influxdb/pull/1809): Packaging post-install script unconditionally removes init.d symlink. Thanks @sineos\n\n## v0.9.0-rc6 [2015-02-27]\n\n### Bugfixes\n\n- [#1780](https://github.com/influxdata/influxdb/pull/1780): Malformed identifiers get through the parser\n- [#1775](https://github.com/influxdata/influxdb/pull/1775): Panic \"index out of range\" on some queries\n- [#1744](https://github.com/influxdata/influxdb/pull/1744): Select shard groups which completely encompass time range. Thanks @kylezh.\n\n## v0.9.0-rc5 [2015-02-27]\n\n### Bugfixes\n\n- [#1752](https://github.com/influxdata/influxdb/pull/1752): remove debug log output from collectd.\n- [#1720](https://github.com/influxdata/influxdb/pull/1720): Parse Series IDs as unsigned 32-bits.\n- [#1767](https://github.com/influxdata/influxdb/pull/1767): Drop Series was failing across shards.  Issue #1761.\n- [#1773](https://github.com/influxdata/influxdb/pull/1773): Fix bug when merging series together that have unequal number of points in a group by interval\n- [#1771](https://github.com/influxdata/influxdb/pull/1771): Make `SHOW SERIES` return IDs and support `LIMIT` and `OFFSET`\n\n### Features\n\n- [#1698](https://github.com/influxdata/influxdb/pull/1698): Wire up DROP MEASUREMENT\n\n## v0.9.0-rc4 [2015-02-24]\n\n### Bugfixes\n\n- Fix authentication issue with continuous queries\n- Print version in the log on startup\n\n## v0.9.0-rc3 [2015-02-23]\n\n### Features\n\n- [#1659](https://github.com/influxdata/influxdb/pull/1659): WHERE against regexes: `WHERE =~ '.*asdf'\n- [#1580](https://github.com/influxdata/influxdb/pull/1580): Add support for fields with bool, int, or string data types\n- [#1687](https://github.com/influxdata/influxdb/pull/1687): Change `Rows` to `Series` in results output. BREAKING API CHANGE\n- [#1629](https://github.com/influxdata/influxdb/pull/1629): Add support for `DROP SERIES` queries\n- [#1632](https://github.com/influxdata/influxdb/pull/1632): Add support for `GROUP BY *` to return all series within a measurement\n- [#1689](https://github.com/influxdata/influxdb/pull/1689): Change `SHOW TAG VALUES WITH KEY=\"foo\"` to use the key name in the result. BREAKING API CHANGE\n- [#1699](https://github.com/influxdata/influxdb/pull/1699): Add CPU and memory profiling options to daemon\n- [#1672](https://github.com/influxdata/influxdb/pull/1672): Add index tracking to metastore. Makes downed node recovery actually work\n- [#1591](https://github.com/influxdata/influxdb/pull/1591): Add `spread` aggregate function\n- [#1576](https://github.com/influxdata/influxdb/pull/1576): Add `first` and `last` aggregate functions\n- [#1573](https://github.com/influxdata/influxdb/pull/1573): Add `stddev` aggregate function\n- [#1565](https://github.com/influxdata/influxdb/pull/1565): Add the admin interface back into the server and update for new API\n- [#1562](https://github.com/influxdata/influxdb/pull/1562): Enforce retention policies\n- [#1700](https://github.com/influxdata/influxdb/pull/1700): Change `Values` to `Fields` on writes.  BREAKING API CHANGE\n- [#1706](https://github.com/influxdata/influxdb/pull/1706): Add support for `LIMIT` and `OFFSET`, which work on the number of series returned in a query. To limit the number of data points use a `WHERE time` clause\n\n### Bugfixes\n\n- [#1636](https://github.com/influxdata/influxdb/issues/1636): Don't store number of fields in raw data. THIS IS A BREAKING DATA CHANGE. YOU MUST START WITH A FRESH DATABASE\n- [#1701](https://github.com/influxdata/influxdb/pull/1701), [#1667](https://github.com/influxdata/influxdb/pull/1667), [#1663](https://github.com/influxdata/influxdb/pull/1663), [#1615](https://github.com/influxdata/influxdb/pull/1615): Raft fixes\n- [#1644](https://github.com/influxdata/influxdb/pull/1644): Add batching support for significantly improved write performance\n- [#1704](https://github.com/influxdata/influxdb/pull/1704): Fix queries that pull back raw data (i.e. ones without aggregate functions)\n- [#1718](https://github.com/influxdata/influxdb/pull/1718): Return an error on write if any of the points are don't have at least one field\n- [#1806](https://github.com/influxdata/influxdb/pull/1806): Fix regex parsing.  Change regex syntax to use / delimiters.\n\n\n## v0.9.0-rc1,2 [no public release]\n\n### Features\n\n- Support for tags added\n- New queries for showing measurement names, tag keys, and tag values\n- Renamed shard spaces to retention policies\n- Deprecated matching against regex in favor of explicit writing and querying on retention policies\n- Pure Go InfluxQL parser\n- Switch to BoltDB as underlying datastore\n- BoltDB backed metastore to store schema information\n- Updated HTTP API to only have two endpoints `/query` and `/write`\n- Added all administrative functions to the query language\n- Change cluster architecture to have brokers and data nodes\n- Switch to streaming Raft implementation\n- In memory inverted index of the tag data\n- Pure Go implementation!\n\n## v0.8.6 [2014-11-15]\n\n### Features\n\n- [Issue #973](https://github.com/influxdata/influxdb/issues/973). Support\n  joining using a regex or list of time series\n- [Issue #1068](https://github.com/influxdata/influxdb/issues/1068). Print\n  the processor chain when the query is started\n\n### Bugfixes\n\n- [Issue #584](https://github.com/influxdata/influxdb/issues/584). Don't\n  panic if the process died while initializing\n- [Issue #663](https://github.com/influxdata/influxdb/issues/663). Make\n  sure all sub servies are closed when are stopping InfluxDB\n- [Issue #671](https://github.com/influxdata/influxdb/issues/671). Fix\n  the Makefile package target for Mac OSX\n- [Issue #800](https://github.com/influxdata/influxdb/issues/800). Use\n  su instead of sudo in the init script. This fixes the startup problem\n  on RHEL 6.\n- [Issue #925](https://github.com/influxdata/influxdb/issues/925). Don't\n  generate invalid query strings for single point queries\n- [Issue #943](https://github.com/influxdata/influxdb/issues/943). Don't\n  take two snapshots at the same time\n- [Issue #947](https://github.com/influxdata/influxdb/issues/947). Exit\n  nicely if the daemon doesn't have permission to write to the log.\n- [Issue #959](https://github.com/influxdata/influxdb/issues/959). Stop using\n  closed connections in the protobuf client.\n- [Issue #978](https://github.com/influxdata/influxdb/issues/978). Check\n  for valgrind and mercurial in the configure script\n- [Issue #996](https://github.com/influxdata/influxdb/issues/996). Fill should\n  fill the time range even if no points exists in the given time range\n- [Issue #1008](https://github.com/influxdata/influxdb/issues/1008). Return\n  an appropriate exit status code depending on whether the process exits\n  due to an error or exits gracefully.\n- [Issue #1024](https://github.com/influxdata/influxdb/issues/1024). Hitting\n  open files limit causes influxdb to create shards in loop.\n- [Issue #1069](https://github.com/influxdata/influxdb/issues/1069). Fix\n  deprecated interface endpoint in Admin UI.\n- [Issue #1076](https://github.com/influxdata/influxdb/issues/1076). Fix\n  the timestamps of data points written by the collectd plugin. (Thanks,\n  @renchap for reporting this bug)\n- [Issue #1078](https://github.com/influxdata/influxdb/issues/1078). Make sure\n  we don't resurrect shard directories for shards that have already expired\n- [Issue #1085](https://github.com/influxdata/influxdb/issues/1085). Set\n  the connection string of the local raft node\n- [Issue #1092](https://github.com/influxdata/influxdb/issues/1093). Set\n  the connection string of the local node in the raft snapshot.\n- [Issue #1100](https://github.com/influxdata/influxdb/issues/1100). Removing\n  a non-existent shard space causes the cluster to panic.\n- [Issue #1113](https://github.com/influxdata/influxdb/issues/1113). A nil\n  engine.ProcessorChain causes a panic.\n\n## v0.8.5 [2014-10-27]\n\n### Features\n\n- [Issue #1055](https://github.com/influxdata/influxdb/issues/1055). Allow\n  graphite and collectd input plugins to have separate binding address\n\n### Bugfixes\n\n- [Issue #1058](https://github.com/influxdata/influxdb/issues/1058). Use\n  the query language instead of the continuous query endpoints that\n  were removed in 0.8.4\n- [Issue #1022](https://github.com/influxdata/influxdb/issues/1022). Return\n  an +Inf or NaN instead of panicing when we encounter a divide by zero\n- [Issue #821](https://github.com/influxdata/influxdb/issues/821). Don't\n  scan through points when we hit the limit\n- [Issue #1051](https://github.com/influxdata/influxdb/issues/1051). Fix\n  timestamps when the collectd is used and low resolution timestamps\n  is set.\n\n## v0.8.4 [2014-10-24]\n\n### Bugfixes\n\n- Remove the continuous query api endpoints since the query language\n  has all the features needed to list and delete continuous queries.\n- [Issue #778](https://github.com/influxdata/influxdb/issues/778). Selecting\n  from a non-existent series should give a better error message indicating\n  that the series doesn't exist\n- [Issue #988](https://github.com/influxdata/influxdb/issues/988). Check\n  the arguments of `top()` and `bottom()`\n- [Issue #1021](https://github.com/influxdata/influxdb/issues/1021). Make\n  redirecting to standard output and standard error optional instead of\n  going to `/dev/null`. This can now be configured by setting `$STDOUT`\n  in `/etc/default/influxdb`\n- [Issue #985](https://github.com/influxdata/influxdb/issues/985). Make\n  sure we drop a shard only when there's no one using it. Otherwise, the\n  shard can be closed when another goroutine is writing to it which will\n  cause random errors and possibly corruption of the database.\n\n### Features\n\n- [Issue #1047](https://github.com/influxdata/influxdb/issues/1047). Allow\n  merge() to take a list of series (as opposed to a regex in #72)\n\n## v0.8.4-rc.1 [2014-10-21]\n\n### Bugfixes\n\n- [Issue #1040](https://github.com/influxdata/influxdb/issues/1040). Revert\n  to older raft snapshot if the latest one is corrupted\n- [Issue #1004](https://github.com/influxdata/influxdb/issues/1004). Querying\n  for data outside of existing shards returns an empty response instead of\n  throwing a `Couldn't lookup columns` error\n- [Issue #1020](https://github.com/influxdata/influxdb/issues/1020). Change\n  init script exit codes to conform to the lsb standards. (Thanks, @spuder)\n- [Issue #1011](https://github.com/influxdata/influxdb/issues/1011). Fix\n  the tarball for homebrew so that rocksdb is included and the directory\n  structure is clean\n- [Issue #1007](https://github.com/influxdata/influxdb/issues/1007). Fix\n  the content type when an error occurs and the client requests\n  compression.\n- [Issue #916](https://github.com/influxdata/influxdb/issues/916). Set\n  the ulimit in the init script with a way to override the limit\n- [Issue #742](https://github.com/influxdata/influxdb/issues/742). Fix\n  rocksdb for Mac OSX\n- [Issue #387](https://github.com/influxdata/influxdb/issues/387). Aggregations\n  with group by time(1w), time(1m) and time(1y) (for week, month and\n  year respectively) will cause the start time and end time of the bucket\n  to fall on the logical boundaries of the week, month or year.\n- [Issue #334](https://github.com/influxdata/influxdb/issues/334). Derivative\n  for queries with group by time() and fill(), will take the difference\n  between the first value in the bucket and the first value of the next\n  bucket.\n- [Issue #972](https://github.com/influxdata/influxdb/issues/972). Don't\n  assign duplicate server ids\n\n### Features\n\n- [Issue #722](https://github.com/influxdata/influxdb/issues/722). Add\n  an install target to the Makefile\n- [Issue #1032](https://github.com/influxdata/influxdb/issues/1032). Include\n  the admin ui static assets in the binary\n- [Issue #1019](https://github.com/influxdata/influxdb/issues/1019). Upgrade\n  to rocksdb 3.5.1\n- [Issue #992](https://github.com/influxdata/influxdb/issues/992). Add\n  an input plugin for collectd. (Thanks, @kimor79)\n- [Issue #72](https://github.com/influxdata/influxdb/issues/72). Support merge\n  for multiple series using regex syntax\n\n## v0.8.3 [2014-09-24]\n\n### Bugfixes\n\n- [Issue #885](https://github.com/influxdata/influxdb/issues/885). Multiple\n  queries separated by semicolons work as expected. Queries are process\n  sequentially\n- [Issue #652](https://github.com/influxdata/influxdb/issues/652). Return an\n  error if an invalid column is used in the where clause\n- [Issue #794](https://github.com/influxdata/influxdb/issues/794). Fix case\n  insensitive regex matching\n- [Issue #853](https://github.com/influxdata/influxdb/issues/853). Move\n  cluster config from raft to API.\n- [Issue #714](https://github.com/influxdata/influxdb/issues/714). Don't\n  panic on invalid boolean operators.\n- [Issue #843](https://github.com/influxdata/influxdb/issues/843). Prevent blank database names\n- [Issue #780](https://github.com/influxdata/influxdb/issues/780). Fix\n  fill() for all aggregators\n- [Issue #923](https://github.com/influxdata/influxdb/issues/923). Enclose\n  table names in double quotes in the result of GetQueryString()\n- [Issue #923](https://github.com/influxdata/influxdb/issues/923). Enclose\n  table names in double quotes in the result of GetQueryString()\n- [Issue #967](https://github.com/influxdata/influxdb/issues/967). Return an\n  error if the storage engine can't be created\n- [Issue #954](https://github.com/influxdata/influxdb/issues/954). Don't automatically\n  create shards which was causing too many shards to be created when used with\n  grafana\n- [Issue #939](https://github.com/influxdata/influxdb/issues/939). Aggregation should\n  ignore null values and invalid values, e.g. strings with mean().\n- [Issue #964](https://github.com/influxdata/influxdb/issues/964). Parse\n  big int in queries properly.\n\n## v0.8.2 [2014-09-05]\n\n### Bugfixes\n\n- [Issue #886](https://github.com/influxdata/influxdb/issues/886). Update shard space to not set defaults\n\n- [Issue #867](https://github.com/influxdata/influxdb/issues/867). Add option to return shard space mappings in list series\n\n### Bugfixes\n\n- [Issue #652](https://github.com/influxdata/influxdb/issues/652). Return\n  a meaningful error if an invalid column is used in where clause\n  after joining multiple series\n\n## v0.8.2 [2014-09-08]\n\n### Features\n\n- Added API endpoint to update shard space definitions\n\n### Bugfixes\n\n- [Issue #886](https://github.com/influxdata/influxdb/issues/886). Shard space regexes reset after restart of InfluxDB\n\n## v0.8.1 [2014-09-03]\n\n- [Issue #896](https://github.com/influxdata/influxdb/issues/896). Allow logging to syslog. Thanks @malthe\n\n### Bugfixes\n\n- [Issue #868](https://github.com/influxdata/influxdb/issues/868). Don't panic when upgrading a snapshot from 0.7.x\n- [Issue #887](https://github.com/influxdata/influxdb/issues/887). The first continuous query shouldn't trigger backfill if it had backfill disabled\n- [Issue #674](https://github.com/influxdata/influxdb/issues/674). Graceful exit when config file is invalid. (Thanks, @DavidBord)\n- [Issue #857](https://github.com/influxdata/influxdb/issues/857). More informative list servers api. (Thanks, @oliveagle)\n\n## v0.8.0 [2014-08-22]\n\n### Features\n\n- [Issue #850](https://github.com/influxdata/influxdb/issues/850). Makes the server listing more informative\n\n### Bugfixes\n\n- [Issue #779](https://github.com/influxdata/influxdb/issues/779). Deleting expired shards isn't thread safe.\n- [Issue #860](https://github.com/influxdata/influxdb/issues/860). Load database config should validate shard spaces.\n- [Issue #862](https://github.com/influxdata/influxdb/issues/862). Data migrator should have option to set delay time.\n\n## v0.8.0-rc.5 [2014-08-15]\n\n### Features\n\n- [Issue #376](https://github.com/influxdata/influxdb/issues/376). List series should support regex filtering\n- [Issue #745](https://github.com/influxdata/influxdb/issues/745). Add continuous queries to the database config\n- [Issue #746](https://github.com/influxdata/influxdb/issues/746). Add data migration tool for 0.8.0\n\n### Bugfixes\n\n- [Issue #426](https://github.com/influxdata/influxdb/issues/426). Fill should fill the entire time range that is requested\n- [Issue #740](https://github.com/influxdata/influxdb/issues/740). Don't emit non existent fields when joining series with different fields\n- [Issue #744](https://github.com/influxdata/influxdb/issues/744). Admin site should have all assets locally\n- [Issue #767](https://github.com/influxdata/influxdb/issues/768). Remove shards whenever they expire\n- [Issue #781](https://github.com/influxdata/influxdb/issues/781). Don't emit non existent fields when joining series with different fields\n- [Issue #791](https://github.com/influxdata/influxdb/issues/791). Move database config loader to be an API endpoint\n- [Issue #809](https://github.com/influxdata/influxdb/issues/809). Migration path from 0.7 -> 0.8\n- [Issue #811](https://github.com/influxdata/influxdb/issues/811). Gogoprotobuf removed `ErrWrongType`, which is depended on by Raft\n- [Issue #820](https://github.com/influxdata/influxdb/issues/820). Query non-local shard with time range to avoid getting back points not in time range\n- [Issue #827](https://github.com/influxdata/influxdb/issues/827). Don't leak file descriptors in the WAL\n- [Issue #830](https://github.com/influxdata/influxdb/issues/830). List series should return series in lexicographic sorted order\n- [Issue #831](https://github.com/influxdata/influxdb/issues/831). Move create shard space to be db specific\n\n## v0.8.0-rc.4 [2014-07-29]\n\n### Bugfixes\n\n- [Issue #774](https://github.com/influxdata/influxdb/issues/774). Don't try to parse \"inf\" shard retention policy\n- [Issue #769](https://github.com/influxdata/influxdb/issues/769). Use retention duration when determining expired shards. (Thanks, @shugo)\n- [Issue #736](https://github.com/influxdata/influxdb/issues/736). Only db admins should be able to drop a series\n- [Issue #713](https://github.com/influxdata/influxdb/issues/713). Null should be a valid fill value\n- [Issue #644](https://github.com/influxdata/influxdb/issues/644). Graphite api should write data in batches to the coordinator\n- [Issue #740](https://github.com/influxdata/influxdb/issues/740). Panic when distinct fields are selected from an inner join\n- [Issue #781](https://github.com/influxdata/influxdb/issues/781). Panic when distinct fields are added after an inner join\n\n## v0.8.0-rc.3 [2014-07-21]\n\n### Bugfixes\n\n- [Issue #752](https://github.com/influxdata/influxdb/issues/752). `./configure` should use goroot to find gofmt\n- [Issue #758](https://github.com/influxdata/influxdb/issues/758). Clarify the reason behind graphite input plugin not starting. (Thanks, @otoolep)\n- [Issue #759](https://github.com/influxdata/influxdb/issues/759). Don't revert the regex in the shard space. (Thanks, @shugo)\n- [Issue #760](https://github.com/influxdata/influxdb/issues/760). Removing a server should remove it from the shard server ids. (Thanks, @shugo)\n- [Issue #772](https://github.com/influxdata/influxdb/issues/772). Add sentinel values to all db. This caused the last key in the db to not be fetched properly.\n\n\n## v0.8.0-rc.2 [2014-07-15]\n\n- This release is to fix a build error in rc1 which caused rocksdb to not be available\n- Bump up the `max-open-files` option to 1000 on all storage engines\n- Lower the `write-buffer-size` to 1000\n\n## v0.8.0-rc.1 [2014-07-15]\n\n### Features\n\n- [Issue #643](https://github.com/influxdata/influxdb/issues/643). Support pretty print json. (Thanks, @otoolep)\n- [Issue #641](https://github.com/influxdata/influxdb/issues/641). Support multiple storage engines\n- [Issue #665](https://github.com/influxdata/influxdb/issues/665). Make build tmp directory configurable in the make file. (Thanks, @dgnorton)\n- [Issue #667](https://github.com/influxdata/influxdb/issues/667). Enable compression on all GET requests and when writing data\n- [Issue #648](https://github.com/influxdata/influxdb/issues/648). Return permissions when listing db users. (Thanks, @nicolai86)\n- [Issue #682](https://github.com/influxdata/influxdb/issues/682). Allow continuous queries to run without backfill (Thanks, @dhammika)\n- [Issue #689](https://github.com/influxdata/influxdb/issues/689). **REQUIRES DATA MIGRATION** Move metadata into raft\n- [Issue #255](https://github.com/influxdata/influxdb/issues/255). Support millisecond precision using `ms` suffix\n- [Issue #95](https://github.com/influxdata/influxdb/issues/95). Drop database should not be synchronous\n- [Issue #571](https://github.com/influxdata/influxdb/issues/571). Add support for arbitrary number of shard spaces and retention policies\n- Default storage engine changed to RocksDB\n\n### Bugfixes\n\n- [Issue #651](https://github.com/influxdata/influxdb/issues/651). Change permissions of symlink which fix some installation issues. (Thanks, @Dieterbe)\n- [Issue #670](https://github.com/influxdata/influxdb/issues/670). Don't warn on missing influxdb user on fresh installs\n- [Issue #676](https://github.com/influxdata/influxdb/issues/676). Allow storing high precision integer values without losing any information\n- [Issue #695](https://github.com/influxdata/influxdb/issues/695). Prevent having duplicate field names in the write payload. (Thanks, @seunglee150)\n- [Issue #731](https://github.com/influxdata/influxdb/issues/731). Don't enable the udp plugin if the `enabled` option is set to false\n- [Issue #733](https://github.com/influxdata/influxdb/issues/733). Print an `INFO` message when the input plugin is disabled\n- [Issue #707](https://github.com/influxdata/influxdb/issues/707). Graphite input plugin should work payload delimited by any whitespace character\n- [Issue #734](https://github.com/influxdata/influxdb/issues/734). Don't buffer non replicated writes\n- [Issue #465](https://github.com/influxdata/influxdb/issues/465). Recreating a currently deleting db or series doesn't bring back the old data anymore\n- [Issue #358](https://github.com/influxdata/influxdb/issues/358). **BREAKING** List series should return as a single series\n- [Issue #499](https://github.com/influxdata/influxdb/issues/499). **BREAKING** Querying non-existent database or series will return an error\n- [Issue #570](https://github.com/influxdata/influxdb/issues/570). InfluxDB crashes during delete/drop of database\n- [Issue #592](https://github.com/influxdata/influxdb/issues/592). Drop series is inefficient\n\n## v0.7.3 [2014-06-13]\n\n### Bugfixes\n\n- [Issue #637](https://github.com/influxdata/influxdb/issues/637). Truncate log files if the last request wasn't written properly\n- [Issue #646](https://github.com/influxdata/influxdb/issues/646). CRITICAL: Duplicate shard ids for new shards if old shards are deleted.\n\n## v0.7.2 [2014-05-30]\n\n### Features\n\n- [Issue #521](https://github.com/influxdata/influxdb/issues/521). MODE works on all datatypes (Thanks, @richthegeek)\n\n### Bugfixes\n\n- [Issue #418](https://github.com/influxdata/influxdb/pull/418). Requests or responses larger than MAX_REQUEST_SIZE break things.\n- [Issue #606](https://github.com/influxdata/influxdb/issues/606). InfluxDB will fail to start with invalid permission if log.txt didn't exist\n- [Issue #602](https://github.com/influxdata/influxdb/issues/602). Merge will fail to work across shards\n\n### Features\n\n## v0.7.1 [2014-05-29]\n\n### Bugfixes\n\n- [Issue #579](https://github.com/influxdata/influxdb/issues/579). Reject writes to nonexistent databases\n- [Issue #597](https://github.com/influxdata/influxdb/issues/597). Force compaction after deleting data\n\n### Features\n\n- [Issue #476](https://github.com/influxdata/influxdb/issues/476). Support ARM architecture\n- [Issue #578](https://github.com/influxdata/influxdb/issues/578). Support aliasing for expressions in parenthesis\n- [Issue #544](https://github.com/influxdata/influxdb/pull/544). Support forcing node removal from a cluster\n- [Issue #591](https://github.com/influxdata/influxdb/pull/591). Support multiple udp input plugins (Thanks, @tpitale)\n- [Issue #600](https://github.com/influxdata/influxdb/pull/600). Report version, os, arch, and raftName once per day.\n\n## v0.7.0 [2014-05-23]\n\n### Bugfixes\n\n- [Issue #557](https://github.com/influxdata/influxdb/issues/557). Group by time(1y) doesn't work while time(365d) works\n- [Issue #547](https://github.com/influxdata/influxdb/issues/547). Add difference function (Thanks, @mboelstra)\n- [Issue #550](https://github.com/influxdata/influxdb/issues/550). Fix tests on 32-bit ARM\n- [Issue #524](https://github.com/influxdata/influxdb/issues/524). Arithmetic operators and where conditions don't play nice together\n- [Issue #561](https://github.com/influxdata/influxdb/issues/561). Fix missing query in parsing errors\n- [Issue #563](https://github.com/influxdata/influxdb/issues/563). Add sample config for graphite over udp\n- [Issue #537](https://github.com/influxdata/influxdb/issues/537). Incorrect query syntax causes internal error\n- [Issue #565](https://github.com/influxdata/influxdb/issues/565). Empty series names shouldn't cause a panic\n- [Issue #575](https://github.com/influxdata/influxdb/issues/575). Single point select doesn't interpret timestamps correctly\n- [Issue #576](https://github.com/influxdata/influxdb/issues/576). We shouldn't set timestamps and sequence numbers when listing cq\n- [Issue #560](https://github.com/influxdata/influxdb/issues/560). Use /dev/urandom instead of /dev/random\n- [Issue #502](https://github.com/influxdata/influxdb/issues/502). Fix a\n  race condition in assigning id to db+series+field (Thanks @ohurvitz\n  for reporting this bug and providing a script to repro)\n\n### Features\n\n- [Issue #567](https://github.com/influxdata/influxdb/issues/567). Allow selecting from multiple series names by separating them with commas (Thanks, @peekeri)\n\n### Deprecated\n\n- [Issue #460](https://github.com/influxdata/influxdb/issues/460). Don't start automatically after installing\n- [Issue #529](https://github.com/influxdata/influxdb/issues/529). Don't run influxdb as root\n- [Issue #443](https://github.com/influxdata/influxdb/issues/443). Use `name` instead of `username` when returning cluster admins\n\n## v0.6.5 [2014-05-19]\n\n### Features\n\n- [Issue #551](https://github.com/influxdata/influxdb/issues/551). Add TOP and BOTTOM aggregate functions (Thanks, @chobie)\n\n### Bugfixes\n\n- [Issue #555](https://github.com/influxdata/influxdb/issues/555). Fix a regression introduced in the raft snapshot format\n\n## v0.6.4 [2014-05-16]\n\n### Features\n\n- Make the write batch size configurable (also applies to deletes)\n- Optimize writing to multiple series\n- [Issue #546](https://github.com/influxdata/influxdb/issues/546). Add UDP support for Graphite API (Thanks, @peekeri)\n\n### Bugfixes\n\n- Fix a bug in shard logic that caused short term shards to be clobbered with long term shards\n- [Issue #489](https://github.com/influxdata/influxdb/issues/489). Remove replication factor from CreateDatabase command\n\n## v0.6.3 [2014-05-13]\n\n### Features\n\n- [Issue #505](https://github.com/influxdata/influxdb/issues/505). Return a version header with http the response (Thanks, @majst01)\n- [Issue #520](https://github.com/influxdata/influxdb/issues/520). Print the version to the log file\n\n### Bugfixes\n\n- [Issue #516](https://github.com/influxdata/influxdb/issues/516). Close WAL log/index files when they aren't being used\n- [Issue #532](https://github.com/influxdata/influxdb/issues/532). Don't log graphite connection EOF as an error\n- [Issue #535](https://github.com/influxdata/influxdb/issues/535). WAL Replay hangs if response isn't received\n- [Issue #538](https://github.com/influxdata/influxdb/issues/538). Don't panic if the same series existed twice in the request with different columns\n- [Issue #536](https://github.com/influxdata/influxdb/issues/536). Joining the cluster after shards are creating shouldn't cause new nodes to panic\n- [Issue #539](https://github.com/influxdata/influxdb/issues/539). count(distinct()) with fill shouldn't panic on empty groups\n- [Issue #534](https://github.com/influxdata/influxdb/issues/534). Create a new series when interpolating\n\n## v0.6.2 [2014-05-09]\n\n### Bugfixes\n\n- [Issue #511](https://github.com/influxdata/influxdb/issues/511). Don't automatically create the database when a db user is created\n- [Issue #512](https://github.com/influxdata/influxdb/issues/512). Group by should respect null values\n- [Issue #518](https://github.com/influxdata/influxdb/issues/518). Filter Infinities and NaNs from the returned json\n- [Issue #522](https://github.com/influxdata/influxdb/issues/522). Committing requests while replaying caused the WAL to skip some log files\n- [Issue #369](https://github.com/influxdata/influxdb/issues/369). Fix some edge cases with WAL recovery\n\n## v0.6.1 [2014-05-06]\n\n### Bugfixes\n\n- [Issue #500](https://github.com/influxdata/influxdb/issues/500). Support `y` suffix in time durations\n- [Issue #501](https://github.com/influxdata/influxdb/issues/501). Writes with invalid payload should be rejected\n- [Issue #507](https://github.com/influxdata/influxdb/issues/507). New cluster admin passwords don't propagate properly to other nodes in a cluster\n- [Issue #508](https://github.com/influxdata/influxdb/issues/508). Don't replay WAL entries for servers with no shards\n- [Issue #464](https://github.com/influxdata/influxdb/issues/464). Admin UI shouldn't draw graphs for string columns\n- [Issue #480](https://github.com/influxdata/influxdb/issues/480). Large values on the y-axis get cut off\n\n## v0.6.0 [2014-05-02]\n\n### Feature\n\n- [Issue #477](https://github.com/influxdata/influxdb/issues/477). Add a udp json interface (Thanks, Julien Ammous)\n- [Issue #491](https://github.com/influxdata/influxdb/issues/491). Make initial root password settable through env variable (Thanks, Edward Muller)\n\n### Bugfixes\n\n- [Issue #469](https://github.com/influxdata/influxdb/issues/469). Drop continuous queries when a database is dropped\n- [Issue #431](https://github.com/influxdata/influxdb/issues/431). Don't log to standard output if a log file is specified in the config file\n- [Issue #483](https://github.com/influxdata/influxdb/issues/483). Return 409 if a database already exist (Thanks, Edward Muller)\n- [Issue #486](https://github.com/influxdata/influxdb/issues/486). Columns used in the target of continuous query shouldn't be inserted in the time series\n- [Issue #490](https://github.com/influxdata/influxdb/issues/490). Database user password's cannot be changed (Thanks, Edward Muller)\n- [Issue #495](https://github.com/influxdata/influxdb/issues/495). Enforce write permissions properly\n\n## v0.5.12 [2014-04-29]\n\n### Bugfixes\n\n- [Issue #419](https://github.com/influxdata/influxdb/issues/419),[Issue #478](https://github.com/influxdata/influxdb/issues/478). Allow hostname, raft and protobuf ports to be changed, without requiring manual intervention from the user\n\n## v0.5.11 [2014-04-25]\n\n### Features\n\n- [Issue #471](https://github.com/influxdata/influxdb/issues/471). Read and write permissions should be settable through the http api\n\n### Bugfixes\n\n- [Issue #323](https://github.com/influxdata/influxdb/issues/323). Continuous queries should guard against data loops\n- [Issue #473](https://github.com/influxdata/influxdb/issues/473). Engine memory optimization\n\n## v0.5.10 [2014-04-22]\n\n### Features\n\n- [Issue #463](https://github.com/influxdata/influxdb/issues/463). Allow series names to use any character (escape by wrapping in double quotes)\n- [Issue #447](https://github.com/influxdata/influxdb/issues/447). Allow @ in usernames\n- [Issue #466](https://github.com/influxdata/influxdb/issues/466). Allow column names to use any character (escape by wrapping in double quotes)\n\n### Bugfixes\n\n- [Issue #458](https://github.com/influxdata/influxdb/issues/458). Continuous queries with group by time() and a column should insert sequence numbers of 1\n- [Issue #457](https://github.com/influxdata/influxdb/issues/457). Deleting series that start with capital letters should work\n\n## v0.5.9 [2014-04-18]\n\n### Bugfixes\n\n- [Issue #446](https://github.com/influxdata/influxdb/issues/446). Check for (de)serialization errors\n- [Issue #456](https://github.com/influxdata/influxdb/issues/456). Continuous queries failed if one of the group by columns had null value\n- [Issue #455](https://github.com/influxdata/influxdb/issues/455). Comparison operators should ignore null values\n\n## v0.5.8 [2014-04-17]\n\n- Renamed config.toml.sample to config.sample.toml\n\n### Bugfixes\n\n- [Issue #244](https://github.com/influxdata/influxdb/issues/244). Reconstruct the query from the ast\n- [Issue #449](https://github.com/influxdata/influxdb/issues/449). Heartbeat timeouts can cause reading from connection to lock up\n- [Issue #451](https://github.com/influxdata/influxdb/issues/451). Reduce the aggregation state that is kept in memory so that\n  aggregation queries over large periods of time don't take insance amount of memory\n\n## v0.5.7 [2014-04-15]\n\n### Features\n\n- Queries are now logged as INFO in the log file before they run\n\n### Bugfixes\n\n- [Issue #328](https://github.com/influxdata/influxdb/issues/328). Join queries with math expressions don't work\n- [Issue #440](https://github.com/influxdata/influxdb/issues/440). Heartbeat timeouts in logs\n- [Issue #442](https://github.com/influxdata/influxdb/issues/442). shouldQuerySequentially didn't work as expected\n  causing count(*) queries on large time series to use\n  lots of memory\n- [Issue #437](https://github.com/influxdata/influxdb/issues/437). Queries with negative constants don't parse properly\n- [Issue #432](https://github.com/influxdata/influxdb/issues/432). Deleted data using a delete query is resurrected after a server restart\n- [Issue #439](https://github.com/influxdata/influxdb/issues/439). Report the right location of the error in the query\n- Fix some bugs with the WAL recovery on startup\n\n## v0.5.6 [2014-04-08]\n\n### Features\n\n- [Issue #310](https://github.com/influxdata/influxdb/issues/310). Request should support multiple timeseries\n- [Issue #416](https://github.com/influxdata/influxdb/issues/416). Improve the time it takes to drop database\n\n### Bugfixes\n\n- [Issue #413](https://github.com/influxdata/influxdb/issues/413). Don't assume that group by interval is greater than a second\n- [Issue #415](https://github.com/influxdata/influxdb/issues/415). Include the database when sending an auth error back to the user\n- [Issue #421](https://github.com/influxdata/influxdb/issues/421). Make read timeout a config option\n- [Issue #392](https://github.com/influxdata/influxdb/issues/392). Different columns in different shards returns invalid results when a query spans those shards\n\n### Bugfixes\n\n## v0.5.5 [2014-04-04]\n\n- Upgrade leveldb 1.10 -> 1.15\n\n  This should be a backward compatible change, but is here for documentation only\n\n### Feature\n\n- Add a command line option to repair corrupted leveldb databases on startup\n- [Issue #401](https://github.com/influxdata/influxdb/issues/401). No limit on the number of columns in the group by clause\n\n### Bugfixes\n\n- [Issue #398](https://github.com/influxdata/influxdb/issues/398). Support now() and NOW() in the query lang\n- [Issue #403](https://github.com/influxdata/influxdb/issues/403). Filtering should work with join queries\n- [Issue #404](https://github.com/influxdata/influxdb/issues/404). Filtering with invalid condition shouldn't crash the server\n- [Issue #405](https://github.com/influxdata/influxdb/issues/405). Percentile shouldn't crash for small number of values\n- [Issue #408](https://github.com/influxdata/influxdb/issues/408). Make InfluxDB recover from internal bugs and panics\n- [Issue #390](https://github.com/influxdata/influxdb/issues/390). Multiple response.WriteHeader when querying as admin\n- [Issue #407](https://github.com/influxdata/influxdb/issues/407). Start processing continuous queries only after the WAL is initialized\n- Close leveldb databases properly if we couldn't create a new Shard. See leveldb\\_shard\\_datastore\\_test:131\n\n## v0.5.4 [2014-04-02]\n\n### Bugfixes\n\n- [Issue #386](https://github.com/influxdata/influxdb/issues/386). Drop series should work with series containing dots\n- [Issue #389](https://github.com/influxdata/influxdb/issues/389). Filtering shouldn't stop prematurely\n- [Issue #341](https://github.com/influxdata/influxdb/issues/341). Make the number of shards that are queried in parallel configurable\n- [Issue #394](https://github.com/influxdata/influxdb/issues/394). Support count(distinct) and count(DISTINCT)\n- [Issue #362](https://github.com/influxdata/influxdb/issues/362). Limit should be enforced after aggregation\n\n## v0.5.3 [2014-03-31]\n\n### Bugfixes\n\n- [Issue #378](https://github.com/influxdata/influxdb/issues/378). Indexing should return if there are no requests added since the last index\n- [Issue #370](https://github.com/influxdata/influxdb/issues/370). Filtering and limit should be enforced on the shards\n- [Issue #379](https://github.com/influxdata/influxdb/issues/379). Boolean columns should be usable in where clauses\n- [Issue #381](https://github.com/influxdata/influxdb/issues/381). Should be able to do deletes as a cluster admin\n\n## v0.5.2 [2014-03-28]\n\n### Bugfixes\n\n- [Issue #342](https://github.com/influxdata/influxdb/issues/342). Data resurrected after a server restart\n- [Issue #367](https://github.com/influxdata/influxdb/issues/367). Influxdb won't start if the api port is commented out\n- [Issue #355](https://github.com/influxdata/influxdb/issues/355). Return an error on wrong time strings\n- [Issue #331](https://github.com/influxdata/influxdb/issues/331). Allow negative time values in the where clause\n- [Issue #371](https://github.com/influxdata/influxdb/issues/371). Seris index isn't deleted when the series is dropped\n- [Issue #360](https://github.com/influxdata/influxdb/issues/360). Store and recover continuous queries\n\n## v0.5.1 [2014-03-24]\n\n### Bugfixes\n\n- Revert the version of goraft due to a bug found in the latest version\n\n## v0.5.0 [2014-03-24]\n\n### Features\n\n- [Issue #293](https://github.com/influxdata/influxdb/pull/293). Implement a Graphite listener\n\n### Bugfixes\n\n- [Issue #340](https://github.com/influxdata/influxdb/issues/340). Writing many requests while replaying seems to cause commits out of order\n\n## v0.5.0-rc.6 [2014-03-20]\n\n### Bugfixes\n\n- Increase raft election timeout to avoid unecessary relections\n- Sort points before writing them to avoid an explosion in the request\n  number when the points are written randomly\n- [Issue #335](https://github.com/influxdata/influxdb/issues/335). Fixes regexp for interpolating more than one column value in continuous queries\n- [Issue #318](https://github.com/influxdata/influxdb/pull/318). Support EXPLAIN queries\n- [Issue #333](https://github.com/influxdata/influxdb/pull/333). Fail\n  when the password is too short or too long instead of passing it to\n  the crypto library\n\n## v0.5.0-rc.5 [2014-03-11]\n\n### Bugfixes\n\n- [Issue #312](https://github.com/influxdata/influxdb/issues/312). WAL should wait for server id to be set before recovering\n- [Issue #301](https://github.com/influxdata/influxdb/issues/301). Use ref counting to guard against race conditions in the shard cache\n- [Issue #319](https://github.com/influxdata/influxdb/issues/319). Propagate engine creation error correctly to the user\n- [Issue #316](https://github.com/influxdata/influxdb/issues/316). Make\n  sure we don't starve goroutines if we get an access denied error\n  from one of the shards\n- [Issue #306](https://github.com/influxdata/influxdb/issues/306). Deleting/Dropping database takes a lot of memory\n- [Issue #302](https://github.com/influxdata/influxdb/issues/302). Should be able to set negative timestamps on points\n- [Issue #327](https://github.com/influxdata/influxdb/issues/327). Make delete queries not use WAL. This addresses #315, #317 and #314\n- [Issue #321](https://github.com/influxdata/influxdb/issues/321). Make sure we split points on shards properly\n\n## v0.5.0-rc.4 [2014-03-07]\n\n### Bugfixes\n\n- [Issue #298](https://github.com/influxdata/influxdb/issues/298). Fix limit when querying multiple shards\n- [Issue #305](https://github.com/influxdata/influxdb/issues/305). Shard ids not unique after restart\n- [Issue #309](https://github.com/influxdata/influxdb/issues/309). Don't relog the requests on the remote server\n- Fix few bugs in the WAL and refactor the way it works (this requires purging the WAL from previous rc)\n\n## v0.5.0-rc.3 [2014-03-03]\n\n### Bugfixes\n- [Issue #69](https://github.com/influxdata/influxdb/issues/69). Support column aliases\n- [Issue #287](https://github.com/influxdata/influxdb/issues/287). Make the lru cache size configurable\n- [Issue #38](https://github.com/influxdata/influxdb/issues/38). Fix a memory leak discussed in this story\n- [Issue #286](https://github.com/influxdata/influxdb/issues/286). Make the number of open shards configurable\n- Make LevelDB use the max open files configuration option.\n\n## v0.5.0-rc.2 [2014-02-27]\n\n### Bugfixes\n\n- [Issue #274](https://github.com/influxdata/influxdb/issues/274). Crash after restart\n- [Issue #277](https://github.com/influxdata/influxdb/issues/277). Ensure duplicate shards won't be created\n- [Issue #279](https://github.com/influxdata/influxdb/issues/279). Limits not working on regex queries\n- [Issue #281](https://github.com/influxdata/influxdb/issues/281). `./influxdb -v` should print the sha when building from source\n- [Issue #283](https://github.com/influxdata/influxdb/issues/283). Dropping shard and restart in cluster causes panic.\n- [Issue #288](https://github.com/influxdata/influxdb/issues/288). Sequence numbers should be unique per server id\n\n## v0.5.0-rc.1 [2014-02-25]\n\n### Bugfixes\n\n- Ensure large deletes don't take too much memory\n- [Issue #240](https://github.com/influxdata/influxdb/pull/240). Unable to query against columns with `.` in the name.\n- [Issue #250](https://github.com/influxdata/influxdb/pull/250). different result between normal and continuous query with \"group by\" clause\n- [Issue #216](https://github.com/influxdata/influxdb/pull/216). Results with no points should exclude columns and points\n\n### Features\n\n- [Issue #243](https://github.com/influxdata/influxdb/issues/243). Should have endpoint to GET a user's attributes.\n- [Issue #269](https://github.com/influxdata/influxdb/pull/269), [Issue #65](https://github.com/influxdata/influxdb/issues/65) New clustering architecture (see docs), with the side effect that queries can be distributed between multiple shards\n- [Issue #164](https://github.com/influxdata/influxdb/pull/269),[Issue #103](https://github.com/influxdata/influxdb/pull/269),[Issue #166](https://github.com/influxdata/influxdb/pull/269),[Issue #165](https://github.com/influxdata/influxdb/pull/269),[Issue #132](https://github.com/influxdata/influxdb/pull/269) Make request log a log file instead of leveldb with recovery on startup\n\n### Deprecated\n\n- [Issue #189](https://github.com/influxdata/influxdb/issues/189). `/cluster_admins` and `/db/:db/users` return usernames in a `name` key instead of `username` key.\n- [Issue #216](https://github.com/influxdata/influxdb/pull/216). Results with no points should exclude columns and points\n\n## v0.4.4 [2014-02-05]\n\n### Features\n\n- Make the leveldb max open files configurable in the toml file\n\n## v0.4.3 [2014-01-31]\n\n### Bugfixes\n\n- [Issue #225](https://github.com/influxdata/influxdb/issues/225). Remove a hard limit on the points returned by the datastore\n- [Issue #223](https://github.com/influxdata/influxdb/issues/223). Null values caused count(distinct()) to panic\n- [Issue #224](https://github.com/influxdata/influxdb/issues/224). Null values broke replication due to protobuf limitation\n\n## v0.4.1 [2014-01-30]\n\n### Features\n\n- [Issue #193](https://github.com/influxdata/influxdb/issues/193). Allow logging to stdout. Thanks @schmurfy\n- [Issue #190](https://github.com/influxdata/influxdb/pull/190). Add support for SSL.\n- [Issue #194](https://github.com/influxdata/influxdb/pull/194). Should be able to disable Admin interface.\n\n### Bugfixes\n\n- [Issue #33](https://github.com/influxdata/influxdb/issues/33). Don't call WriteHeader more than once per request\n- [Issue #195](https://github.com/influxdata/influxdb/issues/195). Allow the bind address to be configurable, Thanks @schmurfy.\n- [Issue #199](https://github.com/influxdata/influxdb/issues/199). Make the test timeout configurable\n- [Issue #200](https://github.com/influxdata/influxdb/issues/200). Selecting `time` or `sequence_number` silently fail\n- [Issue #215](https://github.com/influxdata/influxdb/pull/215). Server fails to start up after Raft log compaction and restart.\n\n## v0.4.0 [2014-01-17]\n\n## Features\n\n- [Issue #86](https://github.com/influxdata/influxdb/issues/86). Support arithmetic expressions in select clause\n- [Issue #92](https://github.com/influxdata/influxdb/issues/92). Change '==' to '=' and '!=' to '<>'\n- [Issue #88](https://github.com/influxdata/influxdb/issues/88). Support datetime strings\n- [Issue #64](https://github.com/influxdata/influxdb/issues/64). Shard writes and queries across cluster with replay for briefly downed nodes (< 24 hrs)\n- [Issue #78](https://github.com/influxdata/influxdb/issues/78). Sequence numbers persist across restarts so they're not reused\n- [Issue #102](https://github.com/influxdata/influxdb/issues/102). Support expressions in where condition\n- [Issue #101](https://github.com/influxdata/influxdb/issues/101). Support expressions in aggregates\n- [Issue #62](https://github.com/influxdata/influxdb/issues/62). Support updating and deleting column values\n- [Issue #96](https://github.com/influxdata/influxdb/issues/96). Replicate deletes in a cluster\n- [Issue #94](https://github.com/influxdata/influxdb/issues/94). delete queries\n- [Issue #116](https://github.com/influxdata/influxdb/issues/116). Use proper logging\n- [Issue #40](https://github.com/influxdata/influxdb/issues/40). Use TOML instead of JSON in the config file\n- [Issue #99](https://github.com/influxdata/influxdb/issues/99). Support list series in the query language\n- [Issue #149](https://github.com/influxdata/influxdb/issues/149). Cluster admins should be able to perform reads and writes.\n- [Issue #108](https://github.com/influxdata/influxdb/issues/108). Querying one point using `time =`\n- [Issue #114](https://github.com/influxdata/influxdb/issues/114). Servers should periodically check that they're consistent.\n- [Issue #93](https://github.com/influxdata/influxdb/issues/93). Should be able to drop a time series\n- [Issue #177](https://github.com/influxdata/influxdb/issues/177). Support drop series in the query language.\n- [Issue #184](https://github.com/influxdata/influxdb/issues/184). Implement Raft log compaction.\n- [Issue #153](https://github.com/influxdata/influxdb/issues/153). Implement continuous queries\n\n### Bugfixes\n\n- [Issue #90](https://github.com/influxdata/influxdb/issues/90). Group by multiple columns panic\n- [Issue #89](https://github.com/influxdata/influxdb/issues/89). 'Group by' combined with 'where' not working\n- [Issue #106](https://github.com/influxdata/influxdb/issues/106). Don't panic if we only see one point and can't calculate derivative\n- [Issue #105](https://github.com/influxdata/influxdb/issues/105). Panic when using a where clause that reference columns with null values\n- [Issue #61](https://github.com/influxdata/influxdb/issues/61). Remove default limits from queries\n- [Issue #118](https://github.com/influxdata/influxdb/issues/118). Make column names starting with '_' legal\n- [Issue #121](https://github.com/influxdata/influxdb/issues/121). Don't fall back to the cluster admin auth if the db user auth fails\n- [Issue #127](https://github.com/influxdata/influxdb/issues/127). Return error on delete queries with where condition that don't have time\n- [Issue #117](https://github.com/influxdata/influxdb/issues/117). Fill empty groups with default values\n- [Issue #150](https://github.com/influxdata/influxdb/pull/150). Fix parser for when multiple divisions look like a regex.\n- [Issue #158](https://github.com/influxdata/influxdb/issues/158). Logged deletes should be stored with the time range if missing.\n- [Issue #136](https://github.com/influxdata/influxdb/issues/136). Make sure writes are replicated in order to avoid triggering replays\n- [Issue #145](https://github.com/influxdata/influxdb/issues/145). Server fails to join cluster if all starting at same time.\n- [Issue #176](https://github.com/influxdata/influxdb/issues/176). Drop database should take effect on all nodes\n- [Issue #180](https://github.com/influxdata/influxdb/issues/180). Column names not returned when running multi-node cluster and writing more than one point.\n- [Issue #182](https://github.com/influxdata/influxdb/issues/182). Queries with invalid limit clause crash the server\n\n### Deprecated\n\n- deprecate '==' and '!=' in favor of '=' and '<>', respectively\n- deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint\n- deprecate `username` field for a more consistent `name` field in `/db/:db/users` and `/cluster_admins`\n- deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should\n  be used to update user flags, password, etc.\n- Querying for column names that don't exist no longer throws an error.\n\n## v0.3.2\n\n## Features\n\n- [Issue #82](https://github.com/influxdata/influxdb/issues/82). Add endpoint for listing available admin interfaces.\n- [Issue #80](https://github.com/influxdata/influxdb/issues/80). Support durations when specifying start and end time\n- [Issue #81](https://github.com/influxdata/influxdb/issues/81). Add support for IN\n\n## Bugfixes\n\n- [Issue #75](https://github.com/influxdata/influxdb/issues/75). Don't allow time series names that start with underscore\n- [Issue #85](https://github.com/influxdata/influxdb/issues/85). Non-existing columns exist after they have been queried before\n\n## v0.3.0\n\n## Features\n\n- [Issue #51](https://github.com/influxdata/influxdb/issues/51). Implement first and last aggregates\n- [Issue #35](https://github.com/influxdata/influxdb/issues/35). Support table aliases in Join Queries\n- [Issue #71](https://github.com/influxdata/influxdb/issues/71). Add WillReturnSingleSeries to the Query\n- [Issue #61](https://github.com/influxdata/influxdb/issues/61). Limit should default to 10k\n- [Issue #59](https://github.com/influxdata/influxdb/issues/59). Add histogram aggregate function\n\n## Bugfixes\n\n- Fix join and merges when the query is a descending order query\n- [Issue #57](https://github.com/influxdata/influxdb/issues/57). Don't panic when type of time != float\n- [Issue #63](https://github.com/influxdata/influxdb/issues/63). Aggregate queries should not have a sequence_number column\n\n## v0.2.0\n\n### Features\n\n- [Issue #37](https://github.com/influxdata/influxdb/issues/37). Support the negation of the regex matcher !~\n- [Issue #47](https://github.com/influxdata/influxdb/issues/47). Spill out query and database detail at the time of bug report\n\n### Bugfixes\n\n- [Issue #36](https://github.com/influxdata/influxdb/issues/36). The regex operator should be =~ not ~=\n- [Issue #39](https://github.com/influxdata/influxdb/issues/39). Return proper content types from the http api\n- [Issue #42](https://github.com/influxdata/influxdb/issues/42). Make the api consistent with the docs\n- [Issue #41](https://github.com/influxdata/influxdb/issues/41). Table/Points not deleted when database is dropped\n- [Issue #45](https://github.com/influxdata/influxdb/issues/45). Aggregation shouldn't mess up the order of the points\n- [Issue #44](https://github.com/influxdata/influxdb/issues/44). Fix crashes on RHEL 5.9\n- [Issue #34](https://github.com/influxdata/influxdb/issues/34). Ascending order always return null for columns that have a null value\n- [Issue #55](https://github.com/influxdata/influxdb/issues/55). Limit should limit the points that match the Where clause\n- [Issue #53](https://github.com/influxdata/influxdb/issues/53). Writing null values via HTTP API fails\n\n### Deprecated\n\n- Preparing to deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint\n- Preparing to deprecate `username` field for a more consistent `name` field in the `/db/:db/users`\n- Preparing to deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should\n  be used to update user flags, password, etc.\n\n## v0.1.0\n\n### Features\n\n- [Issue #29](https://github.com/influxdata/influxdb/issues/29). Semicolon is now optional in queries\n- [Issue #31](https://github.com/influxdata/influxdb/issues/31). Support Basic Auth as well as query params for authentication.\n\n### Bugfixes\n\n- Don't allow creating users with empty username\n- [Issue #22](https://github.com/influxdata/influxdb/issues/22). Don't set goroot if it was set\n- [Issue #25](https://github.com/influxdata/influxdb/issues/25). Fix queries that use the median aggregator\n- [Issue #26](https://github.com/influxdata/influxdb/issues/26). Default log and db directories should be in /opt/influxdb/shared/data\n- [Issue #27](https://github.com/influxdata/influxdb/issues/27). Group by should not blow up if the one of the columns in group by has null values\n- [Issue #30](https://github.com/influxdata/influxdb/issues/30). Column indexes/names getting off somehow\n- [Issue #32](https://github.com/influxdata/influxdb/issues/32). Fix many typos in the codebase. Thanks @pborreli\n\n## v0.0.9\n\n#### Features\n\n- Add stddev(...) support\n- Better docs, thanks @auxesis and @d-snp.\n\n#### Bugfixes\n\n- Set PYTHONPATH and CC appropriately on mac os x.\n- [Issue #18](https://github.com/influxdata/influxdb/issues/18). Fix 386 debian and redhat packages\n- [Issue #23](https://github.com/influxdata/influxdb/issues/23). Fix the init scripts on redhat\n\n## v0.0.8\n\n#### Features\n\n- Add a way to reset the root password from the command line.\n- Add distinct(..) and derivative(...) support\n- Print test coverage if running go1.2\n\n#### Bugfixes\n\n- Fix the default admin site path in the .deb and .rpm packages.\n- Fix the configuration filename in the .tar.gz package.\n\n## v0.0.7\n\n#### Features\n\n- include the admin site in the repo to make it easier for newcomers.\n\n## v0.0.6\n\n#### Features\n\n- Add count(distinct(..)) support\n\n#### Bugfixes\n\n- Reuse levigo read/write options.\n\n## v0.0.5\n\n#### Features\n\n- Cache passwords in memory to speed up password verification\n- Add MERGE and INNER JOIN support\n\n#### Bugfixes\n\n- All columns should be returned if `select *` was used\n- Read/Write benchmarks\n\n## v0.0.2\n\n#### Features\n\n- Add an admin UI\n- Deb and RPM packages\n\n#### Bugfixes\n\n- Fix some nil pointer dereferences\n- Cleanup the aggregators implementation\n\n## v0.0.1 [2013-10-22]\n\n  * Initial Release\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/CODING_GUIDELINES.md",
    "content": "_This document is currently in draft form._\n\n# Background\n\nThe goal of this guide is to capture some Do and Don'ts of Go code for the InfluxDB database. When it comes to Go, writing good code is often achieved with the help of tools like `go fmt` and `go vet`. However there are still some practices not enforceable by any tools. This guide lists some specific practices to follow when writing code for the database.\n\n*Like everything, one needs to use good judgment.* There will always be times when it doesn't make sense to follow a guideline outlined in this document. If that case arises, be ready to justify your choices.\n\n# The Guidelines\n\n## Try not to use third-party libraries\n\nA third-party package is defined as one that is not part of the standard Go distribution. Generally speaking we prefer to minimize our use of third-party packages, and avoid them unless absolutely necessarily. We'll often write a little bit of code rather than pull in a third-party package. Of course, we do use some third-party packages -- most importantly we use [BoltDB](https://github.com/boltdb/bolt) in some storage engines. So to maximise the chance your change will be accepted by us, use only the standard libraries, or the third-party packages we have decided to use.\n\nFor rationale, check out the post [The Case Against Third Party Libraries](http://blog.gopheracademy.com/advent-2014/case-against-3pl/).\n\n## Always include a default case in a 'switch' statement\nThe lack of a `default` case in a `switch` statement can be a significant source of bugs. This is particularly true in the case of a type-assertions switch. So always include a `default` statement unless you have an explicit reason not to.\n\n## When -- and when not -- set a channel to 'nil'\n\n## Use defer with anonymous functions to handle complex locking\nConsider a block of code like the following.\n```\n    mu.Lock()\n    if foo == \"quit\" {\n        mu.Unlock()\n        return\n    } else if foo == \"continue\" {\n        if bar == \"quit\" {\n            mu.Unlock()\n            return\n        }\n        bar = \"still going\"\n    } else {\n        qux = \"here at last\"\n        mu.Unlock()\n        return\n    }\n    foo = \"more to do\"\n    bar = \"still more to do\"\n    mu.Unlock()\n\n    qux = \"finished now\"\n    return\n```\nWhile this is obviously contrived, complex lock control like this is sometimes required, and doesn't lend itself to `defer`. But as the code evolves, it's easy to introduce new cases, and forget to release locks. One way to address this is to use an anonymous function like so:\n```\n    more := func() bool {\n        mu.Lock()\n        defer mu.Unlock()\n        if foo == \"quit\" {\n            return false\n        } else if foo == \"continue\" {\n            if bar == \"quit\" {\n                return false\n            }\n            bar = \"still going\"\n        } else {\n            qux = \"here at last\"\n            return false\n        }\n        foo = \"more to do\"\n        bar = \"still more to do\"\n        return true\n    }()\n\n    if more {\n        qux = \"finished\"\n    }\n    return\n```\nThis allows us to use `defer` but ensures that if any new cases are added to the logic within the anonymous function, the lock will always be released. Another advantage of this approach is that `defer` will still run even in the event of a panic, ensuring the locks will be released even in that case.\n\n## When to call 'panic()'\n\n# Useful links\n- [Useful techniques in Go](http://arslan.io/ten-useful-techniques-in-go)\n- [Go in production](http://peter.bourgon.org/go-in-production/)\n- [Principles of designing Go APIs with channels](https://inconshreveable.com/07-08-2014/principles-of-designing-go-apis-with-channels/)\n- [Common mistakes in Golang](http://soryy.com/blog/2014/common-mistakes-with-go-lang/). Especially this section `Loops, Closures, and Local Variables`\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/CONTRIBUTING.md",
    "content": "Contributing to InfluxDB\n========================\n\nBug reports\n---------------\nBefore you file an issue, please search existing issues in case it has already been filed, or perhaps even fixed. If you file an issue, please include the following.\n* Full details of your operating system (or distribution) e.g. 64-bit Ubuntu 14.04.\n* The version of InfluxDB you are running\n* Whether you installed it using a pre-built package, or built it from source.\n* A small test case, if applicable, that demonstrates the issues.\n\nRemember the golden rule of bug reports: **The easier you make it for us to reproduce the problem, the faster it will get fixed.**\nIf you have never written a bug report before, or if you want to brush up on your bug reporting skills, we recommend reading [Simon Tatham's essay \"How to Report Bugs Effectively.\"](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html)\n\nTest cases should be in the form of `curl` commands. For example:\n```bash\n# create database\ncurl -G http://localhost:8086/query --data-urlencode \"q=CREATE DATABASE mydb\"\n\n# create retention policy\ncurl -G http://localhost:8086/query --data-urlencode \"q=CREATE RETENTION POLICY myrp ON mydb DURATION 365d REPLICATION 1 DEFAULT\"\n\n# write data\ncurl -X POST http://localhost:8086/write --data-urlencode \"db=mydb\" --data-binary \"cpu,region=useast,host=server_1,service=redis value=61\"\n\n# Delete a Measurement\ncurl -G http://localhost:8086/query  --data-urlencode 'db=mydb' --data-urlencode 'q=DROP MEASUREMENT cpu'\n\n# Query the Measurement\n# Bug: expected it to return no data, but data comes back.\ncurl -G http://localhost:8086/query  --data-urlencode 'db=mydb' --data-urlencode 'q=SELECT * from cpu'\n```\n**If you don't include a clear test case like this, your issue may not be investigated, and may even be closed**. If writing the data is too difficult, please zip up your data directory and include a link to it in your bug report.\n\nPlease note that issues are *not the place to file general questions* such as \"how do I use collectd with InfluxDB?\" Questions of this nature should be sent to the [InfluxData Community](https://community.influxdata.com/), not filed as issues. Issues like this will be closed.\n\nFeature requests\n---------------\nWe really like to receive feature requests, as it helps us prioritize our work. Please be clear about your requirements, as incomplete feature requests may simply be closed if we don't understand what you would like to see added to InfluxDB.\n\nContributing to the source code\n---------------\n\nInfluxDB follows standard Go project structure. This means that all your Go development are done in `$GOPATH/src`. GOPATH can be any directory under which InfluxDB and all its dependencies will be cloned. For full details on the project structure, follow along below.\n\nYou should also read our [coding guide](https://github.com/influxdata/influxdb/blob/master/CODING_GUIDELINES.md), to understand better how to write code for InfluxDB.\n\nSubmitting a pull request\n------------\nTo submit a pull request you should fork the InfluxDB repository, and make your change on a feature branch of your fork. Then generate a pull request from your branch against *master* of the InfluxDB repository. Include in your pull request details of your change -- the why *and* the how -- as well as the testing your performed. Also, be sure to run the test suite with your change in place. Changes that cause tests to fail cannot be merged.\n\nThere will usually be some back and forth as we finalize the change, but once that completes it may be merged.\n\nTo assist in review for the PR, please add the following to your pull request comment:\n\n```md\n- [ ] CHANGELOG.md updated\n- [ ] Rebased/mergable\n- [ ] Tests pass\n- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed)\n```\n\nSigning the CLA\n---------------\n\nIf you are going to be contributing back to InfluxDB please take a\nsecond to sign our CLA, which can be found\n[on our website](https://influxdata.com/community/cla/).\n\nInstalling Go\n-------------\nInfluxDB requires Go 1.8.3\n\nAt InfluxDB we find gvm, a Go version manager, useful for installing Go. For instructions\non how to install it see [the gvm page on github](https://github.com/moovweb/gvm).\n\nAfter installing gvm you can install and set the default go version by\nrunning the following:\n\n    gvm install go1.8.3\n    gvm use go1.8.3 --default\n\nInstalling GDM\n-------------\nInfluxDB uses [gdm](https://github.com/sparrc/gdm) to manage dependencies.  Install it by running the following:\n\n    go get github.com/sparrc/gdm\n\nRevision Control Systems\n-------------\nGo has the ability to import remote packages via revision control systems with the `go get` command.  To ensure that you can retrieve any remote package, be sure to install the following rcs software to your system.\nCurrently the project only depends on `git` and `mercurial`.\n\n* [Install Git](http://git-scm.com/book/en/Getting-Started-Installing-Git)\n* [Install Mercurial](http://mercurial.selenic.com/wiki/Download)\n\nGetting the source\n------\nSetup the project structure and fetch the repo like so:\n\n```bash\n    mkdir $HOME/gocodez\n    export GOPATH=$HOME/gocodez\n    go get github.com/influxdata/influxdb\n```\n\nYou can add the line `export GOPATH=$HOME/gocodez` to your bash/zsh file to be set for every shell instead of having to manually run it everytime.\n\nCloning a fork\n-------------\nIf you wish to work with fork of InfluxDB, your own fork for example, you must still follow the directory structure above. But instead of cloning the main repo, instead clone your fork. Follow the steps below to work with a fork:\n\n```bash\n    export GOPATH=$HOME/gocodez\n    mkdir -p $GOPATH/src/github.com/influxdata\n    cd $GOPATH/src/github.com/influxdata\n    git clone git@github.com:<username>/influxdb\n```\n\nRetaining the directory structure `$GOPATH/src/github.com/influxdata` is necessary so that Go imports work correctly.\n\nBuild and Test\n-----\n\nMake sure you have Go installed and the project structure as shown above. To then get the dependencies for the project, execute the following commands:\n\n```bash\ncd $GOPATH/src/github.com/influxdata/influxdb\ngdm restore\n```\n\nTo then build and install the binaries, run the following command.\n```bash\ngo clean ./...\ngo install ./...\n```\nThe binaries will be located in `$GOPATH/bin`. Please note that the InfluxDB binary is named `influxd`, not `influxdb`.\n\nTo set the version and commit flags during the build pass the following to the **install** command:\n\n```bash\n-ldflags=\"-X main.version=$VERSION -X main.branch=$BRANCH -X main.commit=$COMMIT\"\n```\n\nwhere `$VERSION` is the version, `$BRANCH` is the branch, and `$COMMIT` is the git commit hash.\n\nIf you want to build packages, see `build.py` usage information:\n\n```bash\npython build.py --help\n\n# Or to build a package for your current system\npython build.py --package\n```\n\nTo run the tests, execute the following command:\n\n```bash\ncd $GOPATH/src/github.com/influxdata/influxdb\ngo test -v ./...\n\n# run tests that match some pattern\ngo test -run=TestDatabase . -v\n\n# run tests and show coverage\ngo test -coverprofile /tmp/cover . && go tool cover -html /tmp/cover\n```\n\nTo install go cover, run the following command:\n```\ngo get golang.org/x/tools/cmd/cover\n```\n\nGenerated Google Protobuf code\n-----------------\nMost changes to the source do not require that the generated protocol buffer code be changed. But if you need to modify the protocol buffer code, you'll first need to install the protocol buffers toolchain.\n\nFirst install the [protocol buffer compiler](https://developers.google.com/protocol-buffers/\n) 2.6.1 or later for your OS:\n\nThen install the go plugins:\n\n```bash\ngo get github.com/gogo/protobuf/proto\ngo get github.com/gogo/protobuf/protoc-gen-gogo\ngo get github.com/gogo/protobuf/gogoproto\n```\n\nFinally run, `go generate` after updating any `*.proto` file:\n\n```bash\ngo generate ./...\n```\n**Troubleshooting**\n\nIf generating the protobuf code is failing for you, check each of the following:\n* Ensure the protobuf library can be found. Make sure that `LD_LIBRRARY_PATH` includes the directory in which the library `libprotoc.so` has been installed.\n* Ensure the command `protoc-gen-gogo`, found in `GOPATH/bin`, is on your path. This can be done by adding `GOPATH/bin` to `PATH`.\n\n\nGenerated Go Templates\n----------------------\n\nThe query engine requires optimized data structures for each data type so\ninstead of writing each implementation several times we use templates. _Do not\nchange code that ends in a `.gen.go` extension!_ Instead you must edit the\n`.gen.go.tmpl` file that was used to generate it.\n\nOnce you've edited the template file, you'll need the [`tmpl`][tmpl] utility\nto generate the code:\n\n```sh\n$ go get github.com/benbjohnson/tmpl\n```\n\nThen you can regenerate all templates in the project:\n\n```sh\n$ go generate ./...\n```\n\n[tmpl]: https://github.com/benbjohnson/tmpl\n\n\nPre-commit checks\n-------------\n\nWe have a pre-commit hook to make sure code is formatted properly and vetted before you commit any changes. We strongly recommend using the pre-commit hook to guard against accidentally committing unformatted code. To use the pre-commit hook, run the following:\n```bash\n    cd $GOPATH/src/github.com/influxdata/influxdb\n    cp .hooks/pre-commit .git/hooks/\n```\nIn case the commit is rejected because it's not formatted you can run\nthe following to format the code:\n\n```\ngo fmt ./...\ngo vet ./...\n```\n\nTo install go vet, run the following command:\n```\ngo get golang.org/x/tools/cmd/vet\n```\n\nNOTE: If you have not installed mercurial, the above command will fail.  See [Revision Control Systems](#revision-control-systems) above.\n\nFor more information on `go vet`, [read the GoDoc](https://godoc.org/golang.org/x/tools/cmd/vet).\n\nProfiling\n-----\nWhen troubleshooting problems with CPU or memory the Go toolchain can be helpful. You can start InfluxDB with CPU and memory profiling turned on. For example:\n\n```sh\n# start influx with profiling\n./influxd -cpuprofile influxdcpu.prof -memprof influxdmem.prof\n# run queries, writes, whatever you're testing\n# Quit out of influxd and influxd.prof will then be written.\n# open up pprof to examine the profiling data.\ngo tool pprof ./influxd influxd.prof\n# once inside run \"web\", opens up browser with the CPU graph\n# can also run \"web <function name>\" to zoom in. Or \"list <function name>\" to see specific lines\n```\nNote that when you pass the binary to `go tool pprof` *you must specify the path to the binary*.\n\nIf you are profiling benchmarks built with the `testing` package, you may wish\nto use the [`github.com/pkg/profile`](github.com/pkg/profile) package to limit\nthe code being profiled:\n\n```go\nfunc BenchmarkSomething(b *testing.B) {\n  // do something intensive like fill database with data...\n  defer profile.Start(profile.ProfilePath(\"/tmp\"), profile.MemProfile).Stop()\n  // do something that you want to profile...\n}\n```\n\nContinuous Integration testing\n-----\nInfluxDB uses CircleCI for continuous integration testing. To see how the code is built and tested, check out [this file](https://github.com/influxdata/influxdb/blob/master/circle-test.sh). It closely follows the build and test process outlined above. You can see the exact version of Go InfluxDB uses for testing by consulting that file.\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu32",
    "content": "FROM ioft/i386-ubuntu:14.04\n\nRUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \\\n    python-software-properties \\\n    software-properties-common \\\n    wget \\\n    git \\\n    mercurial \\\n    make \\\n    ruby \\\n    ruby-dev \\\n    rpm \\\n    zip \\\n    python \\\n    python-boto\n\nRUN gem install fpm\n\n# Install go\nENV GOPATH /root/go\nENV GO_VERSION 1.8.3\nENV GO_ARCH 386\nRUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \\\n   tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \\\n   rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz\nENV PATH /usr/local/go/bin:$PATH\n\nENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb\nENV PATH $GOPATH/bin:$PATH\nRUN mkdir -p $PROJECT_DIR\nWORKDIR $PROJECT_DIR\n\nVOLUME $PROJECT_DIR\n\nENTRYPOINT [ \"/root/go/src/github.com/influxdata/influxdb/build.py\" ]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64",
    "content": "FROM ubuntu:trusty\n\nRUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \\\n    python-software-properties \\\n    software-properties-common \\\n    wget \\\n    git \\\n    mercurial \\\n    make \\\n    ruby \\\n    ruby-dev \\\n    rpm \\\n    zip \\\n    python \\\n    python-boto \\\n    asciidoc \\\n    xmlto \\\n    docbook-xsl\n\nRUN gem install fpm\n\n# Install go\nENV GOPATH /root/go\nENV GO_VERSION 1.8.3\nENV GO_ARCH amd64\nRUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \\\n   tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \\\n   rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz\nENV PATH /usr/local/go/bin:$PATH\n\nENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb\nENV PATH $GOPATH/bin:$PATH\nRUN mkdir -p $PROJECT_DIR\nWORKDIR $PROJECT_DIR\n\nVOLUME $PROJECT_DIR\n\nENTRYPOINT [ \"/root/go/src/github.com/influxdata/influxdb/build.py\" ]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_git",
    "content": "FROM ubuntu:trusty\n\nRUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \\\n    python-software-properties \\\n    software-properties-common \\\n    wget \\\n    git \\\n    mercurial \\\n    make \\\n    ruby \\\n    ruby-dev \\\n    rpm \\\n    zip \\\n    python \\\n    python-boto\n\nRUN gem install fpm\n\n# Setup env\nENV GOPATH /root/go\nENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb\nENV PATH $GOPATH/bin:$PATH\nRUN mkdir -p $PROJECT_DIR\n\nVOLUME $PROJECT_DIR\n\n\n# Install go\nENV GO_VERSION 1.8.3\nENV GO_ARCH amd64\nRUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \\\n   tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \\\n   rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz\n\n# Clone Go tip for compilation\nENV GOROOT_BOOTSTRAP /usr/local/go\nRUN git clone https://go.googlesource.com/go\nENV PATH /go/bin:$PATH\n\n# Add script for compiling go\nENV GO_CHECKOUT master\nADD ./gobuild.sh /gobuild.sh\nENTRYPOINT [ \"/gobuild.sh\" ]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/Dockerfile_test_ubuntu32",
    "content": "FROM 32bit/ubuntu:14.04\n\nRUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y python-software-properties software-properties-common git\nRUN add-apt-repository ppa:evarlast/golang1.4\nRUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y -o Dpkg::Options::=\"--force-overwrite\" golang-go\n\nENV GOPATH=/root/go\nRUN mkdir -p /root/go/src/github.com/influxdata/influxdb\nRUN mkdir -p /tmp/artifacts\n\nVOLUME /root/go/src/github.com/influxdata/influxdb\nVOLUME /tmp/artifacts\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/Godeps",
    "content": "collectd.org e84e8af5356e7f47485bbc95c96da6dd7984a67e\ngithub.com/BurntSushi/toml 99064174e013895bbd9b025c31100bd1d9b590ca\ngithub.com/bmizerany/pat c068ca2f0aacee5ac3681d68e4d0a003b7d1fd2c\ngithub.com/boltdb/bolt 4b1ebc1869ad66568b313d0dc410e2be72670dda\ngithub.com/cespare/xxhash 4a94f899c20bc44d4f5f807cb14529e72aca99d6\ngithub.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76\ngithub.com/dgrijalva/jwt-go 24c63f56522a87ec5339cc3567883f1039378fdb\ngithub.com/dgryski/go-bits 2ad8d707cc05b1815ce6ff2543bb5e8d8f9298ef\ngithub.com/dgryski/go-bitstream 7d46cd22db7004f0cceb6f7975824b560cf0e486\ngithub.com/gogo/protobuf 30433562cfbf487fe1df7cd26c7bab168d2f14d0\ngithub.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380\ngithub.com/google/go-cmp 18107e6c56edb2d51f965f7d68e59404f0daee54\ngithub.com/influxdata/usage-client 6d3895376368aa52a3a81d2a16e90f0f52371967\ngithub.com/jwilder/encoding 27894731927e49b0a9023f00312be26733744815\ngithub.com/paulbellamy/ratecounter 5a11f585a31379765c190c033b6ad39956584447\ngithub.com/peterh/liner 88609521dc4b6c858fd4c98b628147da928ce4ac\ngithub.com/retailnext/hllpp 38a7bb71b483e855d35010808143beaf05b67f9d\ngithub.com/spaolacci/murmur3 0d12bf811670bf6a1a63828dfbd003eded177fce\ngithub.com/uber-go/atomic 74ca5ec650841aee9f289dce76e928313a37cbc6\ngithub.com/uber-go/zap fbae0281ffd546fa6d1959fec6075ac5da7fb577\ngolang.org/x/crypto 9477e0b78b9ac3d0b03822fd95422e2fe07627cd\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/LICENSE",
    "content": "The MIT License (MIT)\n\nCopyright (c) 2013-2016 Errplane Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md",
    "content": "# List\n- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE)\n- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)\n- github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING)\n- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license)\n- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)\n- github.com/cespare/xxhash [MIT LICENSE](https://github.com/cespare/xxhash/blob/master/LICENSE.txt)\n- github.com/clarkduvall/hyperloglog [MIT LICENSE](https://github.com/clarkduvall/hyperloglog/blob/master/LICENSE)\n- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE)\n- github.com/dgrijalva/jwt-go [MIT LICENSE](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE)\n- github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE)\n- github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE)\n- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)\n- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)\n- github.com/google/go-cmp [BSD LICENSE](https://github.com/google/go-cmp/blob/master/LICENSE)\n- github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt)\n- github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE)\n- github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE)\n- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING)\n- github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE)\n- github.com/retailnext/hllpp [BSD LICENSE](https://github.com/retailnext/hllpp/blob/master/LICENSE)\n- github.com/uber-go/atomic [MIT LICENSE](https://github.com/uber-go/atomic/blob/master/LICENSE.txt)\n- github.com/uber-go/zap [MIT LICENSE](https://github.com/uber-go/zap/blob/master/LICENSE.txt)\n- golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)\n- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt)\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/Makefile",
    "content": "PACKAGES=$(shell find . -name '*.go' -print0 | xargs -0 -n1 dirname | sort --unique)\n\ndefault:\n\nmetalint: deadcode cyclo aligncheck defercheck structcheck lint errcheck\n\ndeadcode:\n\t@deadcode $(PACKAGES) 2>&1\n\ncyclo:\n\t@gocyclo -over 10 $(PACKAGES)\n\naligncheck:\n\t@aligncheck $(PACKAGES)\n\ndefercheck:\n\t@defercheck $(PACKAGES)\n\n\nstructcheck:\n\t@structcheck $(PACKAGES)\n\nlint:\n\t@for pkg in $(PACKAGES); do golint $$pkg; done\n\nerrcheck:\n\t@for pkg in $(PACKAGES); do \\\n\t  errcheck -ignorepkg=bytes,fmt -ignore=\":(Rollback|Close)\" $$pkg \\\n\tdone\n\ntools:\n\tgo get github.com/remyoudompheng/go-misc/deadcode\n\tgo get github.com/alecthomas/gocyclo\n\tgo get github.com/opennota/check/...\n\tgo get github.com/golang/lint/golint\n\tgo get github.com/kisielk/errcheck\n\tgo get github.com/sparrc/gdm\n\n.PHONY: default,metalint,deadcode,cyclo,aligncheck,defercheck,structcheck,lint,errcheck,tools\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/QUERIES.md",
    "content": "The top level name is called a measurement. These names can contain any characters. Then there are field names, field values, tag keys and tag values, which can also contain any characters. However, if the measurement, field, or tag contains any character other than [A-Z,a-z,0-9,_], or if it starts with a digit, it must be double-quoted. Therefore anywhere a measurement name, field key, or tag key appears it should be wrapped in double quotes.\n\n# Databases & retention policies\n\n```sql\n-- create a database\nCREATE DATABASE <name>\n\n-- create a retention policy\nCREATE RETENTION POLICY <rp-name> ON <db-name> DURATION <duration> REPLICATION <n> [DEFAULT]\n\n-- alter retention policy\nALTER RETENTION POLICY <rp-name> ON <db-name> (DURATION <duration> | REPLICATION <n> | DEFAULT)+\n\n-- drop a database\nDROP DATABASE <name>\n\n-- drop a retention policy\nDROP RETENTION POLICY <rp-name> ON <db-name>\n```\nwhere `<duration>` is either `INF` for infinite retention, or an integer followed by the desired unit of time: u,ms,s,m,h,d,w for microseconds, milliseconds, seconds, minutes, hours, days, or weeks, respectively. `<replication>` must be an integer.\n\nIf present, `DEFAULT` sets the retention policy as the default retention policy for writes and reads.\n\n# Users and permissions\n\n```sql\n-- create user\nCREATE USER <name> WITH PASSWORD '<password>'\n\n-- grant privilege on a database\nGRANT <privilege> ON <db> TO <user>\n\n-- grant cluster admin privileges\nGRANT ALL [PRIVILEGES] TO <user>\n\n-- revoke privilege\nREVOKE <privilege> ON <db> FROM <user>\n\n-- revoke all privileges for a DB\nREVOKE ALL [PRIVILEGES] ON <db> FROM <user>\n\n-- revoke all privileges including cluster admin\nREVOKE ALL [PRIVILEGES] FROM <user>\n\n-- combine db creation with privilege assignment (user must already exist)\nCREATE DATABASE <name> GRANT <privilege> TO <user>\nCREATE DATABASE <name> REVOKE <privilege> FROM <user>\n\n-- delete a user\nDROP USER <name>\n\n\n```\nwhere `<privilege> := READ | WRITE | All `. \n\nAuthentication must be enabled in the influxdb.conf file for user permissions to be in effect.\n\nBy default, newly created users have no privileges to any databases.\n\nCluster administration privileges automatically grant full read and write permissions to all databases, regardless of subsequent database-specific privilege revocation statements.\n\n# Select\n\n```sql\nSELECT mean(value) from cpu WHERE host = 'serverA' AND time > now() - 4h GROUP BY time(5m)\n\nSELECT mean(value) from cpu WHERE time > now() - 4h GROUP BY time(5m), region\n```\n\n## Group By\n\n# Delete\n\n# Series\n\n## Destroy\n\n```sql\nDROP MEASUREMENT <name>\nDROP MEASUREMENT cpu WHERE region = 'uswest'\n```\n\n## Show\n\nShow series queries are for pulling out individual series from measurement names and tag data. They're useful for discovery.\n\n```sql\n-- show all databases\nSHOW DATABASES\n\n-- show measurement names\nSHOW MEASUREMENTS\nSHOW MEASUREMENTS LIMIT 15\nSHOW MEASUREMENTS LIMIT 10 OFFSET 40\nSHOW MEASUREMENTS WHERE service = 'redis'\n-- LIMIT and OFFSET can be applied to any of the SHOW type queries\n\n-- show all series across all measurements/tagsets\nSHOW SERIES\n\n-- get a show of all series for any measurements where tag key region = tak value 'uswest'\nSHOW SERIES WHERE region = 'uswest'\n\nSHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10\n\n-- returns the 100 - 109 rows in the result. In the case of SHOW SERIES, which returns \n-- series split into measurements. Each series counts as a row. So you could see only a \n-- single measurement returned, but 10 series within it.\nSHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 OFFSET 100\n\n-- show all retention policies on a database\nSHOW RETENTION POLICIES ON mydb\n\n-- get a show of all tag keys across all measurements\nSHOW TAG KEYS\n\n-- show all the tag keys for a given measurement\nSHOW TAG KEYS FROM cpu\nSHOW TAG KEYS FROM temperature, wind_speed\n\n-- show all the tag values. note that a single WHERE TAG KEY = '...' clause is required\nSHOW TAG VALUES WITH TAG KEY = 'region'\nSHOW TAG VALUES FROM cpu WHERE region = 'uswest' WITH TAG KEY = 'host'\n\n-- and you can do stuff against fields\nSHOW FIELD KEYS FROM cpu\n\n-- but you can't do this\nSHOW FIELD VALUES\n-- we don't index field values, so this query should be invalid.\n\n-- show all users\nSHOW USERS\n```\n\nNote that `FROM` and `WHERE` are optional clauses in most of the show series queries.\n\nAnd the show series output looks like this:\n\n```json\n[\n    {\n        \"name\": \"cpu\",\n        \"columns\": [\"id\", \"region\", \"host\"],\n        \"values\": [\n            1, \"uswest\", \"servera\",\n            2, \"uswest\", \"serverb\"\n        ]\n    },\n    {\n        \"name\": \"reponse_time\",\n        \"columns\": [\"id\", \"application\", \"host\"],\n        \"values\": [\n            3, \"myRailsApp\", \"servera\"\n        ]\n    }\n]\n```\n\n# Continuous Queries\n\nContinuous queries are going to be inspired by MySQL `TRIGGER` syntax:\n\nhttp://dev.mysql.com/doc/refman/5.0/en/trigger-syntax.html\n\nInstead of having automatically-assigned ids, named continuous queries allows for some level of duplication prevention,\nparticularly in the case where creation is scripted.\n\n## Create\n\n    CREATE CONTINUOUS QUERY <name> AS SELECT ... FROM ...\n\n## Destroy\n\n    DROP CONTINUOUS QUERY <name>\n\n## List\n\n    SHOW CONTINUOUS QUERIES\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/README.md",
    "content": "# InfluxDB [![Circle CI](https://circleci.com/gh/influxdata/influxdb/tree/master.svg?style=svg)](https://circleci.com/gh/influxdata/influxdb/tree/master) [![Go Report Card](https://goreportcard.com/badge/github.com/influxdata/influxdb)](https://goreportcard.com/report/github.com/influxdata/influxdb) [![Docker pulls](https://img.shields.io/docker/pulls/library/influxdb.svg)](https://hub.docker.com/_/influxdb/)\n\n## An Open-Source Time Series Database\n\nInfluxDB is an open source **time series database** with\n**no external dependencies**. It's useful for recording metrics,\nevents, and performing analytics.\n\n## Features\n\n* Built-in [HTTP API](https://docs.influxdata.com/influxdb/latest/guides/writing_data/) so you don't have to write any server side code to get up and running.\n* Data can be tagged, allowing very flexible querying.\n* SQL-like query language.\n* Simple to install and manage, and fast to get data in and out.\n* It aims to answer queries in real-time. That means every data point is\n  indexed as it comes in and is immediately available in queries that\n  should return in < 100ms.\n\n## Installation\n\nWe recommend installing InfluxDB using one of the [pre-built packages](https://influxdata.com/downloads/#influxdb). Then start InfluxDB using:\n\n* `service influxdb start` if you have installed InfluxDB using an official Debian or RPM package.\n* `systemctl start influxdb` if you have installed InfluxDB using an official Debian or RPM package, and are running a distro with `systemd`. For example, Ubuntu 15 or later.\n* `$GOPATH/bin/influxd` if you have built InfluxDB from source.\n\n## Getting Started\n\n### Create your first database\n\n```\ncurl -XPOST 'http://localhost:8086/query' --data-urlencode \"q=CREATE DATABASE mydb\"\n```\n\n### Insert some data\n```\ncurl -XPOST 'http://localhost:8086/write?db=mydb' \\\n-d 'cpu,host=server01,region=uswest load=42 1434055562000000000'\n\ncurl -XPOST 'http://localhost:8086/write?db=mydb' \\\n-d 'cpu,host=server02,region=uswest load=78 1434055562000000000'\n\ncurl -XPOST 'http://localhost:8086/write?db=mydb' \\\n-d 'cpu,host=server03,region=useast load=15.4 1434055562000000000'\n```\n\n### Query for the data\n```JSON\ncurl -G http://localhost:8086/query?pretty=true --data-urlencode \"db=mydb\" \\\n--data-urlencode \"q=SELECT * FROM cpu WHERE host='server01' AND time < now() - 1d\"\n```\n\n### Analyze the data\n```JSON\ncurl -G http://localhost:8086/query?pretty=true --data-urlencode \"db=mydb\" \\\n--data-urlencode \"q=SELECT mean(load) FROM cpu WHERE region='uswest'\"\n```\n\n## Documentation\n\n* Read more about the [design goals and motivations of the project](https://docs.influxdata.com/influxdb/latest/).\n* Follow the [getting started guide](https://docs.influxdata.com/influxdb/latest/introduction/getting_started/) to learn the basics in just a few minutes.\n* Learn more about [InfluxDB's key concepts](https://docs.influxdata.com/influxdb/latest/guides/writing_data/).\n\n## Contributing\n\nIf you're feeling adventurous and want to contribute to InfluxDB, see our [contributing doc](https://github.com/influxdata/influxdb/blob/master/CONTRIBUTING.md) for info on how to make feature requests, build from source, and run tests.\n\n## Looking for Support?\n\nInfluxDB offers a number of services to help your project succeed. We offer Developer Support for organizations in active development, Managed Hosting to make it easy to move into production, and Enterprise Support for companies requiring the best response times, SLAs, and technical fixes. Visit our [support page](https://influxdata.com/services/) or contact [sales@influxdb.com](mailto:sales@influxdb.com) to learn how we can best help you succeed.\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/TODO.md",
    "content": "# TODO\n\n## v2\n\nTODO list for v2. Here is a list of things we want to add to v1, but can't because they would be a breaking change.\n\n- [#1834](https://github.com/influxdata/influxdb/issues/1834): Disallow using time as a tag key or field key.\n- [#2124](https://github.com/influxdata/influxdb/issues/2124): Prohibit writes with precision, but without an explicit timestamp.\n- [#4461](https://github.com/influxdata/influxdb/issues/4461): Change default time boundaries.\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/appveyor.yml",
    "content": "version: 0.{build}\r\npull_requests:\r\n  do_not_increment_build_number: true\r\nbranches:\r\n  only:\r\n  - master\r\n\r\nos: Windows Server 2012 R2\r\n\r\n# Custom clone folder (variables are not expanded here).\r\nclone_folder: c:\\gopath\\src\\github.com\\influxdata\\influxdb\r\n\r\n# Environment variables\r\nenvironment:\r\n  GOROOT: C:\\go17\r\n  GOPATH: C:\\gopath\r\n\r\n# Scripts that run after cloning repository\r\ninstall:\r\n - set PATH=%GOROOT%\\bin;%GOPATH%\\bin;%PATH%\r\n - rmdir c:\\go /s /q\r\n - echo %PATH%\r\n - echo %GOPATH%\r\n - cd C:\\gopath\\src\\github.com\\influxdata\\influxdb\r\n - go version\r\n - go env\r\n - go get github.com/sparrc/gdm\r\n - cd C:\\gopath\\src\\github.com\\influxdata\\influxdb\r\n - gdm restore\r\n\r\n# To run your custom scripts instead of automatic MSBuild\r\nbuild_script:\r\n - go get -t -v ./...\r\n - go test -race -v ./...\r\n\r\n# To disable deployment\r\ndeploy: off\r\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/build.py",
    "content": "#!/usr/bin/python2.7 -u\n\nimport sys\nimport os\nimport subprocess\nimport time\nfrom datetime import datetime\nimport shutil\nimport tempfile\nimport hashlib\nimport re\nimport logging\nimport argparse\n\n################\n#### InfluxDB Variables\n################\n\n# Packaging variables\nPACKAGE_NAME = \"influxdb\"\nINSTALL_ROOT_DIR = \"/usr/bin\"\nLOG_DIR = \"/var/log/influxdb\"\nDATA_DIR = \"/var/lib/influxdb\"\nSCRIPT_DIR = \"/usr/lib/influxdb/scripts\"\nCONFIG_DIR = \"/etc/influxdb\"\nLOGROTATE_DIR = \"/etc/logrotate.d\"\nMAN_DIR = \"/usr/share/man\"\n\nINIT_SCRIPT = \"scripts/init.sh\"\nSYSTEMD_SCRIPT = \"scripts/influxdb.service\"\nPREINST_SCRIPT = \"scripts/pre-install.sh\"\nPOSTINST_SCRIPT = \"scripts/post-install.sh\"\nPOSTUNINST_SCRIPT = \"scripts/post-uninstall.sh\"\nLOGROTATE_SCRIPT = \"scripts/logrotate\"\nDEFAULT_CONFIG = \"etc/config.sample.toml\"\n\n# Default AWS S3 bucket for uploads\nDEFAULT_BUCKET = \"dl.influxdata.com/influxdb/artifacts\"\n\nCONFIGURATION_FILES = [\n    CONFIG_DIR + '/influxdb.conf',\n    LOGROTATE_DIR + '/influxdb',\n]\n\nPACKAGE_LICENSE = \"MIT\"\nPACKAGE_URL = \"https://github.com/influxdata/influxdb\"\nMAINTAINER = \"support@influxdb.com\"\nVENDOR = \"InfluxData\"\nDESCRIPTION = \"Distributed time-series database.\"\n\nprereqs = [ 'git', 'go' ]\ngo_vet_command = \"go tool vet ./\"\noptional_prereqs = [ 'fpm', 'rpmbuild', 'gpg' ]\n\nfpm_common_args = \"-f -s dir --log error \\\n--vendor {} \\\n--url {} \\\n--after-install {} \\\n--before-install {} \\\n--after-remove {} \\\n--license {} \\\n--maintainer {} \\\n--directories {} \\\n--directories {} \\\n--directories {} \\\n--description \\\"{}\\\"\".format(\n     VENDOR,\n     PACKAGE_URL,\n     POSTINST_SCRIPT,\n     PREINST_SCRIPT,\n     POSTUNINST_SCRIPT,\n     PACKAGE_LICENSE,\n     MAINTAINER,\n     LOG_DIR,\n     DATA_DIR,\n     MAN_DIR,\n     DESCRIPTION)\n\nfor f in CONFIGURATION_FILES:\n    fpm_common_args += \" --config-files {}\".format(f)\n\ntargets = {\n    'influx' : './cmd/influx',\n    'influxd' : './cmd/influxd',\n    'influx_stress' : './cmd/influx_stress',\n    'influx_inspect' : './cmd/influx_inspect',\n    'influx_tsm' : './cmd/influx_tsm',\n}\n\nsupported_builds = {\n    'darwin': [ \"amd64\" ],\n    'windows': [ \"amd64\" ],\n    'linux': [ \"amd64\", \"i386\", \"armhf\", \"arm64\", \"armel\", \"static_i386\", \"static_amd64\" ]\n}\n\nsupported_packages = {\n    \"darwin\": [ \"tar\" ],\n    \"linux\": [ \"deb\", \"rpm\", \"tar\" ],\n    \"windows\": [ \"zip\" ],\n}\n\n################\n#### InfluxDB Functions\n################\n\ndef print_banner():\n    logging.info(\"\"\"\n  ___       __ _          ___  ___\n |_ _|_ _  / _| |_  ___ _|   \\\\| _ )\n  | || ' \\\\|  _| | || \\\\ \\\\ / |) | _ \\\\\n |___|_||_|_| |_|\\\\_,_/_\\\\_\\\\___/|___/\n  Build Script\n\"\"\")\n\ndef create_package_fs(build_root):\n    \"\"\"Create a filesystem structure to mimic the package filesystem.\n    \"\"\"\n    logging.debug(\"Creating package filesystem at location: {}\".format(build_root))\n    # Using [1:] for the path names due to them being absolute\n    # (will overwrite previous paths, per 'os.path.join' documentation)\n    dirs = [ INSTALL_ROOT_DIR[1:],\n             LOG_DIR[1:],\n             DATA_DIR[1:],\n             SCRIPT_DIR[1:],\n             CONFIG_DIR[1:],\n             LOGROTATE_DIR[1:],\n             MAN_DIR[1:] ]\n    for d in dirs:\n        os.makedirs(os.path.join(build_root, d))\n        os.chmod(os.path.join(build_root, d), 0o755)\n\ndef package_scripts(build_root, config_only=False, windows=False):\n    \"\"\"Copy the necessary scripts and configuration files to the package\n    filesystem.\n    \"\"\"\n    if config_only:\n        logging.debug(\"Copying configuration to build directory.\")\n        shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, \"influxdb.conf\"))\n        os.chmod(os.path.join(build_root, \"influxdb.conf\"), 0o644)\n    else:\n        logging.debug(\"Copying scripts and sample configuration to build directory.\")\n        shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))\n        os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644)\n        shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))\n        os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644)\n        shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], \"influxdb\"))\n        os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], \"influxdb\"), 0o644)\n        shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], \"influxdb.conf\"))\n        os.chmod(os.path.join(build_root, CONFIG_DIR[1:], \"influxdb.conf\"), 0o644)\n\ndef package_man_files(build_root):\n    \"\"\"Copy and gzip man pages to the package filesystem.\"\"\"\n    logging.debug(\"Installing man pages.\")\n    run(\"make -C man/ clean install DESTDIR={}/usr\".format(build_root))\n    for path, dir, files in os.walk(os.path.join(build_root, MAN_DIR[1:])):\n        for f in files:\n            run(\"gzip -9n {}\".format(os.path.join(path, f)))\n\ndef go_get(branch, update=False, no_uncommitted=False):\n    \"\"\"Retrieve build dependencies or restore pinned dependencies.\n    \"\"\"\n    if local_changes() and no_uncommitted:\n        logging.error(\"There are uncommitted changes in the current directory.\")\n        return False\n    if not check_path_for(\"gdm\"):\n        logging.info(\"Downloading `gdm`...\")\n        get_command = \"go get github.com/sparrc/gdm\"\n        run(get_command)\n    logging.info(\"Retrieving dependencies with `gdm`...\")\n    sys.stdout.flush()\n    run(\"{}/bin/gdm restore -v\".format(os.environ.get(\"GOPATH\")))\n    return True\n\ndef run_tests(race, parallel, timeout, no_vet, junit=False):\n    \"\"\"Run the Go test suite on binary output.\n    \"\"\"\n    logging.info(\"Starting tests...\")\n    if race:\n        logging.info(\"Race is enabled.\")\n    if parallel is not None:\n        logging.info(\"Using parallel: {}\".format(parallel))\n    if timeout is not None:\n        logging.info(\"Using timeout: {}\".format(timeout))\n    out = run(\"go fmt ./...\")\n    if len(out) > 0:\n        logging.error(\"Code not formatted. Please use 'go fmt ./...' to fix formatting errors.\")\n        logging.error(\"{}\".format(out))\n        return False\n    if not no_vet:\n        logging.info(\"Running 'go vet'...\")\n        out = run(go_vet_command)\n        if len(out) > 0:\n            logging.error(\"Go vet failed. Please run 'go vet ./...' and fix any errors.\")\n            logging.error(\"{}\".format(out))\n            return False\n    else:\n        logging.info(\"Skipping 'go vet' call...\")\n    test_command = \"go test -v\"\n    if race:\n        test_command += \" -race\"\n    if parallel is not None:\n        test_command += \" -parallel {}\".format(parallel)\n    if timeout is not None:\n        test_command += \" -timeout {}\".format(timeout)\n    test_command += \" ./...\"\n    if junit:\n        logging.info(\"Retrieving go-junit-report...\")\n        run(\"go get github.com/jstemmer/go-junit-report\")\n\n        # Retrieve the output from this command.\n        logging.info(\"Running tests...\")\n        logging.debug(\"{}\".format(test_command))\n        proc = subprocess.Popen(test_command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n        output, unused_err = proc.communicate()\n        output = output.decode('utf-8').strip()\n\n        # Process the output through go-junit-report.\n        with open('test-results.xml', 'w') as f:\n            logging.debug(\"{}\".format(\"go-junit-report\"))\n            junit_proc = subprocess.Popen([\"go-junit-report\"], stdin=subprocess.PIPE, stdout=f, stderr=subprocess.PIPE)\n            unused_output, err = junit_proc.communicate(output.encode('ascii', 'ignore'))\n            if junit_proc.returncode != 0:\n                logging.error(\"Command '{}' failed with error: {}\".format(\"go-junit-report\", err))\n                sys.exit(1)\n\n        if proc.returncode != 0:\n            logging.error(\"Command '{}' failed with error: {}\".format(test_command, output.encode('ascii', 'ignore')))\n            sys.exit(1)\n    else:\n        logging.info(\"Running tests...\")\n        output = run(test_command)\n        logging.debug(\"Test output:\\n{}\".format(out.encode('ascii', 'ignore')))\n    return True\n\n################\n#### All InfluxDB-specific content above this line\n################\n\ndef run(command, allow_failure=False, shell=False):\n    \"\"\"Run shell command (convenience wrapper around subprocess).\n    \"\"\"\n    out = None\n    logging.debug(\"{}\".format(command))\n    try:\n        if shell:\n            out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell)\n        else:\n            out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT)\n        out = out.decode('utf-8').strip()\n        # logging.debug(\"Command output: {}\".format(out))\n    except subprocess.CalledProcessError as e:\n        if allow_failure:\n            logging.warn(\"Command '{}' failed with error: {}\".format(command, e.output))\n            return None\n        else:\n            logging.error(\"Command '{}' failed with error: {}\".format(command, e.output))\n            sys.exit(1)\n    except OSError as e:\n        if allow_failure:\n            logging.warn(\"Command '{}' failed with error: {}\".format(command, e))\n            return out\n        else:\n            logging.error(\"Command '{}' failed with error: {}\".format(command, e))\n            sys.exit(1)\n    else:\n        return out\n\ndef create_temp_dir(prefix = None):\n    \"\"\" Create temporary directory with optional prefix.\n    \"\"\"\n    if prefix is None:\n        return tempfile.mkdtemp(prefix=\"{}-build.\".format(PACKAGE_NAME))\n    else:\n        return tempfile.mkdtemp(prefix=prefix)\n\ndef increment_minor_version(version):\n    \"\"\"Return the version with the minor version incremented and patch\n    version set to zero.\n    \"\"\"\n    ver_list = version.split('.')\n    if len(ver_list) != 3:\n        logging.warn(\"Could not determine how to increment version '{}', will just use provided version.\".format(version))\n        return version\n    ver_list[1] = str(int(ver_list[1]) + 1)\n    ver_list[2] = str(0)\n    inc_version = '.'.join(ver_list)\n    logging.debug(\"Incremented version from '{}' to '{}'.\".format(version, inc_version))\n    return inc_version\n\ndef get_current_version_tag():\n    \"\"\"Retrieve the raw git version tag.\n    \"\"\"\n    version = run(\"git describe --always --tags --abbrev=0\")\n    return version\n\ndef get_current_version():\n    \"\"\"Parse version information from git tag output.\n    \"\"\"\n    version_tag = get_current_version_tag()\n    # Remove leading 'v'\n    if version_tag[0] == 'v':\n        version_tag = version_tag[1:]\n    # Replace any '-'/'_' with '~'\n    if '-' in version_tag:\n        version_tag = version_tag.replace(\"-\",\"~\")\n    if '_' in version_tag:\n        version_tag = version_tag.replace(\"_\",\"~\")\n    return version_tag\n\ndef get_current_commit(short=False):\n    \"\"\"Retrieve the current git commit.\n    \"\"\"\n    command = None\n    if short:\n        command = \"git log --pretty=format:'%h' -n 1\"\n    else:\n        command = \"git rev-parse HEAD\"\n    out = run(command)\n    return out.strip('\\'\\n\\r ')\n\ndef get_current_branch():\n    \"\"\"Retrieve the current git branch.\n    \"\"\"\n    command = \"git rev-parse --abbrev-ref HEAD\"\n    out = run(command)\n    return out.strip()\n\ndef local_changes():\n    \"\"\"Return True if there are local un-committed changes.\n    \"\"\"\n    output = run(\"git diff-files --ignore-submodules --\").strip()\n    if len(output) > 0:\n        return True\n    return False\n\ndef get_system_arch():\n    \"\"\"Retrieve current system architecture.\n    \"\"\"\n    arch = os.uname()[4]\n    if arch == \"x86_64\":\n        arch = \"amd64\"\n    elif arch == \"386\":\n        arch = \"i386\"\n    elif arch == \"aarch64\":\n        arch = \"arm64\"\n    elif 'arm' in arch:\n        # Prevent uname from reporting full ARM arch (eg 'armv7l')\n        arch = \"arm\"\n    return arch\n\ndef get_system_platform():\n    \"\"\"Retrieve current system platform.\n    \"\"\"\n    if sys.platform.startswith(\"linux\"):\n        return \"linux\"\n    else:\n        return sys.platform\n\ndef get_go_version():\n    \"\"\"Retrieve version information for Go.\n    \"\"\"\n    out = run(\"go version\")\n    matches = re.search('go version go(\\S+)', out)\n    if matches is not None:\n        return matches.groups()[0].strip()\n    return None\n\ndef check_path_for(b):\n    \"\"\"Check the the user's path for the provided binary.\n    \"\"\"\n    def is_exe(fpath):\n        return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n    for path in os.environ[\"PATH\"].split(os.pathsep):\n        path = path.strip('\"')\n        full_path = os.path.join(path, b)\n        if os.path.isfile(full_path) and os.access(full_path, os.X_OK):\n            return full_path\n\ndef check_environ(build_dir = None):\n    \"\"\"Check environment for common Go variables.\n    \"\"\"\n    logging.info(\"Checking environment...\")\n    for v in [ \"GOPATH\", \"GOBIN\", \"GOROOT\" ]:\n        logging.debug(\"Using '{}' for {}\".format(os.environ.get(v), v))\n\n    cwd = os.getcwd()\n    if build_dir is None and os.environ.get(\"GOPATH\") and os.environ.get(\"GOPATH\") not in cwd:\n        logging.warn(\"Your current directory is not under your GOPATH. This may lead to build failures.\")\n    return True\n\ndef check_prereqs():\n    \"\"\"Check user path for required dependencies.\n    \"\"\"\n    logging.info(\"Checking for dependencies...\")\n    for req in prereqs:\n        if not check_path_for(req):\n            logging.error(\"Could not find dependency: {}\".format(req))\n            return False\n    return True\n\ndef upload_packages(packages, bucket_name=None, overwrite=False):\n    \"\"\"Upload provided package output to AWS S3.\n    \"\"\"\n    logging.debug(\"Uploading files to bucket '{}': {}\".format(bucket_name, packages))\n    try:\n        import boto\n        from boto.s3.key import Key\n        from boto.s3.connection import OrdinaryCallingFormat\n        logging.getLogger(\"boto\").setLevel(logging.WARNING)\n    except ImportError:\n        logging.warn(\"Cannot upload packages without 'boto' Python library!\")\n        return False\n    logging.info(\"Connecting to AWS S3...\")\n    # Up the number of attempts to 10 from default of 1\n    boto.config.add_section(\"Boto\")\n    boto.config.set(\"Boto\", \"metadata_service_num_attempts\", \"10\")\n    c = boto.connect_s3(calling_format=OrdinaryCallingFormat())\n    if bucket_name is None:\n        bucket_name = DEFAULT_BUCKET\n    bucket = c.get_bucket(bucket_name.split('/')[0])\n    for p in packages:\n        if '/' in bucket_name:\n            # Allow for nested paths within the bucket name (ex:\n            # bucket/folder). Assuming forward-slashes as path\n            # delimiter.\n            name = os.path.join('/'.join(bucket_name.split('/')[1:]),\n                                os.path.basename(p))\n        else:\n            name = os.path.basename(p)\n        logging.debug(\"Using key: {}\".format(name))\n        if bucket.get_key(name) is None or overwrite:\n            logging.info(\"Uploading file {}\".format(name))\n            k = Key(bucket)\n            k.key = name\n            if overwrite:\n                n = k.set_contents_from_filename(p, replace=True)\n            else:\n                n = k.set_contents_from_filename(p, replace=False)\n            k.make_public()\n        else:\n            logging.warn(\"Not uploading file {}, as it already exists in the target bucket.\".format(name))\n    return True\n\ndef go_list(vendor=False, relative=False):\n    \"\"\"\n    Return a list of packages\n    If vendor is False vendor package are not included\n    If relative is True the package prefix defined by PACKAGE_URL is stripped\n    \"\"\"\n    p = subprocess.Popen([\"go\", \"list\", \"./...\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n    out, err = p.communicate()\n    packages = out.split('\\n')\n    if packages[-1] == '':\n        packages = packages[:-1]\n    if not vendor:\n        non_vendor = []\n        for p in packages:\n            if '/vendor/' not in p:\n                non_vendor.append(p)\n        packages = non_vendor\n    if relative:\n        relative_pkgs = []\n        for p in packages:\n            r = p.replace(PACKAGE_URL, '.')\n            if r != '.':\n                relative_pkgs.append(r)\n        packages = relative_pkgs\n    return packages\n\ndef build(version=None,\n          platform=None,\n          arch=None,\n          nightly=False,\n          race=False,\n          clean=False,\n          outdir=\".\",\n          tags=[],\n          static=False):\n    \"\"\"Build each target for the specified architecture and platform.\n    \"\"\"\n    logging.info(\"Starting build for {}/{}...\".format(platform, arch))\n    logging.info(\"Using Go version: {}\".format(get_go_version()))\n    logging.info(\"Using git branch: {}\".format(get_current_branch()))\n    logging.info(\"Using git commit: {}\".format(get_current_commit()))\n    if static:\n        logging.info(\"Using statically-compiled output.\")\n    if race:\n        logging.info(\"Race is enabled.\")\n    if len(tags) > 0:\n        logging.info(\"Using build tags: {}\".format(','.join(tags)))\n\n    logging.info(\"Sending build output to: {}\".format(outdir))\n    if not os.path.exists(outdir):\n        os.makedirs(outdir)\n    elif clean and outdir != '/' and outdir != \".\":\n        logging.info(\"Cleaning build directory '{}' before building.\".format(outdir))\n        shutil.rmtree(outdir)\n        os.makedirs(outdir)\n\n    logging.info(\"Using version '{}' for build.\".format(version))\n\n    for target, path in targets.items():\n        logging.info(\"Building target: {}\".format(target))\n        build_command = \"\"\n\n        # Handle static binary output\n        if static is True or \"static_\" in arch:\n            if \"static_\" in arch:\n                static = True\n                arch = arch.replace(\"static_\", \"\")\n            build_command += \"CGO_ENABLED=0 \"\n\n        # Handle variations in architecture output\n        if arch == \"i386\" or arch == \"i686\":\n            arch = \"386\"\n        elif \"arm\" in arch:\n            arch = \"arm\"\n        build_command += \"GOOS={} GOARCH={} \".format(platform, arch)\n\n        if \"arm\" in arch:\n            if arch == \"armel\":\n                build_command += \"GOARM=5 \"\n            elif arch == \"armhf\" or arch == \"arm\":\n                build_command += \"GOARM=6 \"\n            elif arch == \"arm64\":\n                # TODO(rossmcdonald) - Verify this is the correct setting for arm64\n                build_command += \"GOARM=7 \"\n            else:\n                logging.error(\"Invalid ARM architecture specified: {}\".format(arch))\n                logging.error(\"Please specify either 'armel', 'armhf', or 'arm64'.\")\n                return False\n        if platform == 'windows':\n            target = target + '.exe'\n        build_command += \"go build -o {} \".format(os.path.join(outdir, target))\n        if race:\n            build_command += \"-race \"\n        if len(tags) > 0:\n            build_command += \"-tags {} \".format(','.join(tags))\n        if \"1.4\" in get_go_version():\n            if static:\n                build_command += \"-ldflags=\\\"-s -X main.version {} -X main.branch {} -X main.commit {}\\\" \".format(version,\n                                                                                                                  get_current_branch(),\n                                                                                                                  get_current_commit())\n            else:\n                build_command += \"-ldflags=\\\"-X main.version {} -X main.branch {} -X main.commit {}\\\" \".format(version,\n                                                                                                               get_current_branch(),\n                                                                                                               get_current_commit())\n\n        else:\n            # Starting with Go 1.5, the linker flag arguments changed to 'name=value' from 'name value'\n            if static:\n                build_command += \"-ldflags=\\\"-s -X main.version={} -X main.branch={} -X main.commit={}\\\" \".format(version,\n                                                                                                                  get_current_branch(),\n                                                                                                                  get_current_commit())\n            else:\n                build_command += \"-ldflags=\\\"-X main.version={} -X main.branch={} -X main.commit={}\\\" \".format(version,\n                                                                                                               get_current_branch(),\n                                                                                                               get_current_commit())\n        if static:\n            build_command += \"-a -installsuffix cgo \"\n        build_command += path\n        start_time = datetime.utcnow()\n        run(build_command, shell=True)\n        end_time = datetime.utcnow()\n        logging.info(\"Time taken: {}s\".format((end_time - start_time).total_seconds()))\n    return True\n\ndef generate_md5_from_file(path):\n    \"\"\"Generate MD5 signature based on the contents of the file at path.\n    \"\"\"\n    m = hashlib.md5()\n    with open(path, 'rb') as f:\n        for chunk in iter(lambda: f.read(4096), b\"\"):\n            m.update(chunk)\n    return m.hexdigest()\n\ndef generate_sig_from_file(path):\n    \"\"\"Generate a detached GPG signature from the file at path.\n    \"\"\"\n    logging.debug(\"Generating GPG signature for file: {}\".format(path))\n    gpg_path = check_path_for('gpg')\n    if gpg_path is None:\n        logging.warn(\"gpg binary not found on path! Skipping signature creation.\")\n        return False\n    if os.environ.get(\"GNUPG_HOME\") is not None:\n        run('gpg --homedir {} --armor --yes --detach-sign {}'.format(os.environ.get(\"GNUPG_HOME\"), path))\n    else:\n        run('gpg --armor --detach-sign --yes {}'.format(path))\n    return True\n\ndef package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False):\n    \"\"\"Package the output of the build process.\n    \"\"\"\n    outfiles = []\n    tmp_build_dir = create_temp_dir()\n    logging.debug(\"Packaging for build output: {}\".format(build_output))\n    logging.info(\"Using temporary directory: {}\".format(tmp_build_dir))\n    try:\n        for platform in build_output:\n            # Create top-level folder displaying which platform (linux, etc)\n            os.makedirs(os.path.join(tmp_build_dir, platform))\n            for arch in build_output[platform]:\n                logging.info(\"Creating packages for {}/{}\".format(platform, arch))\n                # Create second-level directory displaying the architecture (amd64, etc)\n                current_location = build_output[platform][arch]\n\n                # Create directory tree to mimic file system of package\n                build_root = os.path.join(tmp_build_dir,\n                                          platform,\n                                          arch,\n                                          '{}-{}-{}'.format(PACKAGE_NAME, version, iteration))\n                os.makedirs(build_root)\n\n                # Copy packaging scripts to build directory\n                if platform == \"windows\":\n                    # For windows and static builds, just copy\n                    # binaries to root of package (no other scripts or\n                    # directories)\n                    package_scripts(build_root, config_only=True, windows=True)\n                elif static or \"static_\" in arch:\n                    package_scripts(build_root, config_only=True)\n                else:\n                    create_package_fs(build_root)\n                    package_scripts(build_root)\n\n                if platform != \"windows\":\n                    package_man_files(build_root)\n\n                for binary in targets:\n                    # Copy newly-built binaries to packaging directory\n                    if platform == 'windows':\n                        binary = binary + '.exe'\n                    if platform == 'windows' or static or \"static_\" in arch:\n                        # Where the binary should go in the package filesystem\n                        to = os.path.join(build_root, binary)\n                        # Where the binary currently is located\n                        fr = os.path.join(current_location, binary)\n                    else:\n                        # Where the binary currently is located\n                        fr = os.path.join(current_location, binary)\n                        # Where the binary should go in the package filesystem\n                        to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary)\n                    shutil.copy(fr, to)\n\n                for package_type in supported_packages[platform]:\n                    # Package the directory structure for each package type for the platform\n                    logging.debug(\"Packaging directory '{}' as '{}'.\".format(build_root, package_type))\n                    name = pkg_name\n                    # Reset version, iteration, and current location on each run\n                    # since they may be modified below.\n                    package_version = version\n                    package_iteration = iteration\n                    if \"static_\" in arch:\n                        # Remove the \"static_\" from the displayed arch on the package\n                        package_arch = arch.replace(\"static_\", \"\")\n                    else:\n                        package_arch = arch\n                    if not release and not nightly:\n                        # For non-release builds, just use the commit hash as the version\n                        package_version = \"{}~{}\".format(version,\n                                                         get_current_commit(short=True))\n                        package_iteration = \"0\"\n                    package_build_root = build_root\n                    current_location = build_output[platform][arch]\n\n                    if package_type in ['zip', 'tar']:\n                        # For tars and zips, start the packaging one folder above\n                        # the build root (to include the package name)\n                        package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1]))\n                        if nightly:\n                            if static or \"static_\" in arch:\n                                name = '{}-static-nightly_{}_{}'.format(name,\n                                                                        platform,\n                                                                        package_arch)\n                            else:\n                                name = '{}-nightly_{}_{}'.format(name,\n                                                                 platform,\n                                                                 package_arch)\n                        else:\n                            if static or \"static_\" in arch:\n                                name = '{}-{}-static_{}_{}'.format(name,\n                                                                   package_version,\n                                                                   platform,\n                                                                   package_arch)\n                            else:\n                                name = '{}-{}_{}_{}'.format(name,\n                                                            package_version,\n                                                            platform,\n                                                            package_arch)\n                        current_location = os.path.join(os.getcwd(), current_location)\n                        if package_type == 'tar':\n                            tar_command = \"cd {} && tar -cvzf {}.tar.gz ./*\".format(package_build_root, name)\n                            run(tar_command, shell=True)\n                            run(\"mv {}.tar.gz {}\".format(os.path.join(package_build_root, name), current_location), shell=True)\n                            outfile = os.path.join(current_location, name + \".tar.gz\")\n                            outfiles.append(outfile)\n                        elif package_type == 'zip':\n                            zip_command = \"cd {} && zip -r {}.zip ./*\".format(package_build_root, name)\n                            run(zip_command, shell=True)\n                            run(\"mv {}.zip {}\".format(os.path.join(package_build_root, name), current_location), shell=True)\n                            outfile = os.path.join(current_location, name + \".zip\")\n                            outfiles.append(outfile)\n                    elif package_type not in ['zip', 'tar'] and static or \"static_\" in arch:\n                        logging.info(\"Skipping package type '{}' for static builds.\".format(package_type))\n                    else:\n                        fpm_command = \"fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} \".format(\n                            fpm_common_args,\n                            name,\n                            package_arch,\n                            package_type,\n                            package_version,\n                            package_iteration,\n                            package_build_root,\n                            current_location)\n                        if package_type == \"rpm\":\n                            fpm_command += \"--depends coreutils --rpm-posttrans {}\".format(POSTINST_SCRIPT)\n                        out = run(fpm_command, shell=True)\n                        matches = re.search(':path=>\"(.*)\"', out)\n                        outfile = None\n                        if matches is not None:\n                            outfile = matches.groups()[0]\n                        if outfile is None:\n                            logging.warn(\"Could not determine output from packaging output!\")\n                        else:\n                            if nightly:\n                                # Strip nightly version from package name\n                                new_outfile = outfile.replace(\"{}-{}\".format(package_version, package_iteration), \"nightly\")\n                                os.rename(outfile, new_outfile)\n                                outfile = new_outfile\n                            else:\n                                if package_type == 'rpm':\n                                    # rpm's convert any dashes to underscores\n                                    package_version = package_version.replace(\"-\", \"_\")\n                                new_outfile = outfile.replace(\"{}-{}\".format(package_version, package_iteration), package_version)\n                                os.rename(outfile, new_outfile)\n                                outfile = new_outfile\n                            outfiles.append(os.path.join(os.getcwd(), outfile))\n        logging.debug(\"Produced package files: {}\".format(outfiles))\n        return outfiles\n    finally:\n        # Cleanup\n        shutil.rmtree(tmp_build_dir)\n\ndef main(args):\n    global PACKAGE_NAME\n\n    if args.release and args.nightly:\n        logging.error(\"Cannot be both a nightly and a release.\")\n        return 1\n\n    if args.nightly:\n        args.version = increment_minor_version(args.version)\n        args.version = \"{}~n{}\".format(args.version,\n                                       datetime.utcnow().strftime(\"%Y%m%d%H%M\"))\n        args.iteration = 0\n\n    # Pre-build checks\n    check_environ()\n    if not check_prereqs():\n        return 1\n    if args.build_tags is None:\n        args.build_tags = []\n    else:\n        args.build_tags = args.build_tags.split(',')\n\n    orig_commit = get_current_commit(short=True)\n    orig_branch = get_current_branch()\n\n    if args.platform not in supported_builds and args.platform != 'all':\n        logging.error(\"Invalid build platform: {}\".format(target_platform))\n        return 1\n\n    build_output = {}\n\n    if args.branch != orig_branch and args.commit != orig_commit:\n        logging.error(\"Can only specify one branch or commit to build from.\")\n        return 1\n    elif args.branch != orig_branch:\n        logging.info(\"Moving to git branch: {}\".format(args.branch))\n        run(\"git checkout {}\".format(args.branch))\n    elif args.commit != orig_commit:\n        logging.info(\"Moving to git commit: {}\".format(args.commit))\n        run(\"git checkout {}\".format(args.commit))\n\n    if not args.no_get:\n        if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted):\n            return 1\n\n    if args.test:\n        if not run_tests(args.race, args.parallel, args.timeout, args.no_vet, args.junit_report):\n            return 1\n\n    platforms = []\n    single_build = True\n    if args.platform == 'all':\n        platforms = supported_builds.keys()\n        single_build = False\n    else:\n        platforms = [args.platform]\n\n    for platform in platforms:\n        build_output.update( { platform : {} } )\n        archs = []\n        if args.arch == \"all\":\n            single_build = False\n            archs = supported_builds.get(platform)\n        else:\n            archs = [args.arch]\n\n        for arch in archs:\n            od = args.outdir\n            if not single_build:\n                od = os.path.join(args.outdir, platform, arch)\n            if not build(version=args.version,\n                         platform=platform,\n                         arch=arch,\n                         nightly=args.nightly,\n                         race=args.race,\n                         clean=args.clean,\n                         outdir=od,\n                         tags=args.build_tags,\n                         static=args.static):\n                return 1\n            build_output.get(platform).update( { arch : od } )\n\n    # Build packages\n    if args.package:\n        if not check_path_for(\"fpm\"):\n            logging.error(\"FPM ruby gem required for packaging. Stopping.\")\n            return 1\n        packages = package(build_output,\n                           args.name,\n                           args.version,\n                           nightly=args.nightly,\n                           iteration=args.iteration,\n                           static=args.static,\n                           release=args.release)\n        if args.sign:\n            logging.debug(\"Generating GPG signatures for packages: {}\".format(packages))\n            sigs = [] # retain signatures so they can be uploaded with packages\n            for p in packages:\n                if generate_sig_from_file(p):\n                    sigs.append(p + '.asc')\n                else:\n                    logging.error(\"Creation of signature for package [{}] failed!\".format(p))\n                    return 1\n            packages += sigs\n        if args.upload:\n            logging.debug(\"Files staged for upload: {}\".format(packages))\n            if args.nightly:\n                args.upload_overwrite = True\n            if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite):\n                return 1\n        logging.info(\"Packages created:\")\n        for p in packages:\n            logging.info(\"{} (MD5={})\".format(p.split('/')[-1:][0],\n                                              generate_md5_from_file(p)))\n    if orig_branch != get_current_branch():\n        logging.info(\"Moving back to original git branch: {}\".format(orig_branch))\n        run(\"git checkout {}\".format(orig_branch))\n\n    return 0\n\nif __name__ == '__main__':\n    LOG_LEVEL = logging.INFO\n    if '--debug' in sys.argv[1:]:\n        LOG_LEVEL = logging.DEBUG\n    log_format = '[%(levelname)s] %(funcName)s: %(message)s'\n    logging.basicConfig(level=LOG_LEVEL,\n                        format=log_format)\n\n    parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.')\n    parser.add_argument('--verbose','-v','--debug',\n                        action='store_true',\n                        help='Use debug output')\n    parser.add_argument('--outdir', '-o',\n                        metavar='<output directory>',\n                        default='./build/',\n                        type=os.path.abspath,\n                        help='Output directory')\n    parser.add_argument('--name', '-n',\n                        metavar='<name>',\n                        default=PACKAGE_NAME,\n                        type=str,\n                        help='Name to use for package name (when package is specified)')\n    parser.add_argument('--arch',\n                        metavar='<amd64|i386|armhf|arm64|armel|all>',\n                        type=str,\n                        default=get_system_arch(),\n                        help='Target architecture for build output')\n    parser.add_argument('--platform',\n                        metavar='<linux|darwin|windows|all>',\n                        type=str,\n                        default=get_system_platform(),\n                        help='Target platform for build output')\n    parser.add_argument('--branch',\n                        metavar='<branch>',\n                        type=str,\n                        default=get_current_branch(),\n                        help='Build from a specific branch')\n    parser.add_argument('--commit',\n                        metavar='<commit>',\n                        type=str,\n                        default=get_current_commit(short=True),\n                        help='Build from a specific commit')\n    parser.add_argument('--version',\n                        metavar='<version>',\n                        type=str,\n                        default=get_current_version(),\n                        help='Version information to apply to build output (ex: 0.12.0)')\n    parser.add_argument('--iteration',\n                        metavar='<package iteration>',\n                        type=str,\n                        default=\"1\",\n                        help='Package iteration to apply to build output (defaults to 1)')\n    parser.add_argument('--stats',\n                        action='store_true',\n                        help='Emit build metrics (requires InfluxDB Python client)')\n    parser.add_argument('--stats-server',\n                        metavar='<hostname:port>',\n                        type=str,\n                        help='Send build stats to InfluxDB using provided hostname and port')\n    parser.add_argument('--stats-db',\n                        metavar='<database name>',\n                        type=str,\n                        help='Send build stats to InfluxDB using provided database name')\n    parser.add_argument('--nightly',\n                        action='store_true',\n                        help='Mark build output as nightly build (will incremement the minor version)')\n    parser.add_argument('--update',\n                        action='store_true',\n                        help='Update build dependencies prior to building')\n    parser.add_argument('--package',\n                        action='store_true',\n                        help='Package binary output')\n    parser.add_argument('--release',\n                        action='store_true',\n                        help='Mark build output as release')\n    parser.add_argument('--clean',\n                        action='store_true',\n                        help='Clean output directory before building')\n    parser.add_argument('--no-get',\n                        action='store_true',\n                        help='Do not retrieve pinned dependencies when building')\n    parser.add_argument('--no-uncommitted',\n                        action='store_true',\n                        help='Fail if uncommitted changes exist in the working directory')\n    parser.add_argument('--upload',\n                        action='store_true',\n                        help='Upload output packages to AWS S3')\n    parser.add_argument('--upload-overwrite','-w',\n                        action='store_true',\n                        help='Upload output packages to AWS S3')\n    parser.add_argument('--bucket',\n                        metavar='<S3 bucket name>',\n                        type=str,\n                        default=DEFAULT_BUCKET,\n                        help='Destination bucket for uploads')\n    parser.add_argument('--build-tags',\n                        metavar='<tags>',\n                        help='Optional build tags to use for compilation')\n    parser.add_argument('--static',\n                        action='store_true',\n                        help='Create statically-compiled binary output')\n    parser.add_argument('--sign',\n                        action='store_true',\n                        help='Create GPG detached signatures for packages (when package is specified)')\n    parser.add_argument('--test',\n                        action='store_true',\n                        help='Run tests (does not produce build output)')\n    parser.add_argument('--junit-report',\n                        action='store_true',\n                        help='Output tests in the JUnit XML format')\n    parser.add_argument('--no-vet',\n                        action='store_true',\n                        help='Do not run \"go vet\" when running tests')\n    parser.add_argument('--race',\n                        action='store_true',\n                        help='Enable race flag for build output')\n    parser.add_argument('--parallel',\n                        metavar='<num threads>',\n                        type=int,\n                        help='Number of tests to run simultaneously')\n    parser.add_argument('--timeout',\n                        metavar='<timeout>',\n                        type=str,\n                        help='Timeout for tests before failing')\n    args = parser.parse_args()\n    print_banner()\n    sys.exit(main(args))\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/build.sh",
    "content": "#!/bin/bash\n# Run the build utility via Docker\n\nset -e\n\n# Make sure our working dir is the dir of the script\nDIR=$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)\ncd $DIR\n\n\n# Build new docker image\ndocker build -f Dockerfile_build_ubuntu64 -t influxdb-builder $DIR\necho \"Running build.py\"\n# Run docker\ndocker run --rm \\\n    -e AWS_ACCESS_KEY_ID=\"$AWS_ACCESS_KEY_ID\" \\\n    -e AWS_SECRET_ACCESS_KEY=\"$AWS_SECRET_ACCESS_KEY\" \\\n    -v $HOME/.aws.conf:/root/.aws.conf \\\n    -v $DIR:/root/go/src/github.com/influxdata/influxdb \\\n    influxdb-builder \\\n    \"$@\"\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/circle-test.sh",
    "content": "#!/bin/bash\n#\n# This is the InfluxDB test script for CircleCI, it is a light wrapper around ./test.sh.\n\n# Exit if any command fails\nset -e\n\n# Get dir of script and make it is our working directory.\nDIR=$(cd $(dirname \"${BASH_SOURCE[0]}\") && pwd)\ncd $DIR\n\nexport OUTPUT_DIR=\"$CIRCLE_ARTIFACTS\"\n# Don't delete the container since CircleCI doesn't have permission to do so.\nexport DOCKER_RM=\"false\"\n\n# Get number of test environments.\ncount=$(./test.sh count)\n# Check that we aren't wasting CircleCI nodes.\nif [ $CIRCLE_NODE_INDEX -gt $((count - 1)) ]\nthen\n    echo \"More CircleCI nodes allocated than tests environments to run!\"\n    exit 0\nfi\n\n# Map CircleCI nodes to test environments.\ntests=$(seq 0 $((count - 1)))\nfor i in $tests\ndo\n    mine=$(( $i % $CIRCLE_NODE_TOTAL ))\n    if [ $mine -eq $CIRCLE_NODE_INDEX ]\n    then\n        echo \"Running test env index: $i\"\n        ./test.sh $i\n    fi\ndone\n\n# Copy the JUnit test XML to the test reports folder.\nmkdir -p $CIRCLE_TEST_REPORTS/reports\ncp test-results.xml $CIRCLE_TEST_REPORTS/reports/test-results.xml\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/circle.yml",
    "content": "machine:\n    services:\n        - docker\n    environment:\n      GODIST: \"go1.8.3.linux-amd64.tar.gz\"\n    post:\n      - mkdir -p download\n      - test -e download/$GODIST || curl -o download/$GODIST https://storage.googleapis.com/golang/$GODIST\n      - sudo rm -rf /usr/local/go\n      - sudo tar -C /usr/local -xzf download/$GODIST\n\ndependencies:\n    cache_directories:\n        - \"~/docker\"\n        - ~/download\n    override:\n      - ./test.sh save:\n          # building the docker images can take a long time, hence caching\n          timeout: 1800\n\ntest:\n    override:\n        - bash circle-test.sh:\n            parallel: true\n            # Race tests using 960s timeout\n            timeout: 960\n\ndeployment:\n  release:\n    tag: /^v[0-9]+(\\.[0-9]+)*(\\S*)$/\n    commands:\n      - >\n        docker run\n        -e \"AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID\"\n        -e \"AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY\"\n        -v $(pwd):/root/go/src/github.com/influxdata/influxdb\n        influxdb_build_ubuntu64\n        --release\n        --package\n        --platform all\n        --arch all\n        --upload\n        --bucket dl.influxdata.com/influxdb/releases\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/client/README.md",
    "content": "# InfluxDB Client\n\n[![GoDoc](https://godoc.org/github.com/influxdata/influxdb?status.svg)](http://godoc.org/github.com/influxdata/influxdb/client/v2)\n\n## Description\n\n**NOTE:** The Go client library now has a \"v2\" version, with the old version\nbeing deprecated. The new version can be imported at\n`import \"github.com/influxdata/influxdb/client/v2\"`. It is not backwards-compatible.\n\nA Go client library written and maintained by the **InfluxDB** team.\nThis package provides convenience functions to read and write time series data.\nIt uses the HTTP protocol to communicate with your **InfluxDB** cluster.\n\n\n## Getting Started\n\n### Connecting To Your Database\n\nConnecting to an **InfluxDB** database is straightforward. You will need a host\nname, a port and the cluster user credentials if applicable. The default port is\n8086. You can customize these settings to your specific installation via the\n**InfluxDB** configuration file.\n\nThough not necessary for experimentation, you may want to create a new user\nand authenticate the connection to your database.\n\nFor more information please check out the\n[Admin Docs](https://docs.influxdata.com/influxdb/latest/administration/).\n\nFor the impatient, you can create a new admin user _bubba_ by firing off the\n[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).\n\n```shell\ninflux\n> create user bubba with password 'bumblebeetuna'\n> grant all privileges to bubba\n```\n\nAnd now for good measure set the credentials in you shell environment.\nIn the example below we will use $INFLUX_USER and $INFLUX_PWD\n\nNow with the administrivia out of the way, let's connect to our database.\n\nNOTE: If you've opted out of creating a user, you can omit Username and Password in\nthe configuration below.\n\n```go\npackage main\n\nimport (\n\t\"log\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/client/v2\"\n)\n\nconst (\n\tMyDB = \"square_holes\"\n\tusername = \"bubba\"\n\tpassword = \"bumblebeetuna\"\n)\n\n\nfunc main() {\n\t// Create a new HTTPClient\n\tc, err := client.NewHTTPClient(client.HTTPConfig{\n\t\tAddr:     \"http://localhost:8086\",\n\t\tUsername: username,\n\t\tPassword: password,\n\t})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Create a new point batch\n\tbp, err := client.NewBatchPoints(client.BatchPointsConfig{\n\t\tDatabase:  MyDB,\n\t\tPrecision: \"s\",\n\t})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Create a point and add to batch\n\ttags := map[string]string{\"cpu\": \"cpu-total\"}\n\tfields := map[string]interface{}{\n\t\t\"idle\":   10.1,\n\t\t\"system\": 53.3,\n\t\t\"user\":   46.6,\n\t}\n\n\tpt, err := client.NewPoint(\"cpu_usage\", tags, fields, time.Now())\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tbp.AddPoint(pt)\n\n\t// Write the batch\n\tif err := c.Write(bp); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\n```\n\n### Inserting Data\n\nTime series data aka *points* are written to the database using batch inserts.\nThe mechanism is to create one or more points and then create a batch aka\n*batch points* and write these to a given database and series. A series is a\ncombination of a measurement (time/values) and a set of tags.\n\nIn this sample we will create a batch of a 1,000 points. Each point has a time and\na single value as well as 2 tags indicating a shape and color. We write these points\nto a database called _square_holes_ using a measurement named _shapes_.\n\nNOTE: You can specify a RetentionPolicy as part of the batch points. If not\nprovided InfluxDB will use the database _default_ retention policy.\n\n```go\n\nfunc writePoints(clnt client.Client) {\n\tsampleSize := 1000\n\n\tbp, err := client.NewBatchPoints(client.BatchPointsConfig{\n\t\tDatabase:  \"systemstats\",\n\t\tPrecision: \"us\",\n\t})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n    rand.Seed(time.Now().UnixNano())\n\tfor i := 0; i < sampleSize; i++ {\n\t\tregions := []string{\"us-west1\", \"us-west2\", \"us-west3\", \"us-east1\"}\n\t\ttags := map[string]string{\n\t\t\t\"cpu\":    \"cpu-total\",\n\t\t\t\"host\":   fmt.Sprintf(\"host%d\", rand.Intn(1000)),\n\t\t\t\"region\": regions[rand.Intn(len(regions))],\n\t\t}\n\n\t\tidle := rand.Float64() * 100.0\n\t\tfields := map[string]interface{}{\n\t\t\t\"idle\": idle,\n\t\t\t\"busy\": 100.0 - idle,\n\t\t}\n\n\t\tpt, err := client.NewPoint(\n\t\t\t\"cpu_usage\",\n\t\t\ttags,\n\t\t\tfields,\n\t\t\ttime.Now(),\n\t\t)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tbp.AddPoint(pt)\n\t}\n\n\tif err := clnt.Write(bp); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n```\n\n### Querying Data\n\nOne nice advantage of using **InfluxDB** the ability to query your data using familiar\nSQL constructs. In this example we can create a convenience function to query the database\nas follows:\n\n```go\n// queryDB convenience function to query the database\nfunc queryDB(clnt client.Client, cmd string) (res []client.Result, err error) {\n\tq := client.Query{\n\t\tCommand:  cmd,\n\t\tDatabase: MyDB,\n\t}\n\tif response, err := clnt.Query(q); err == nil {\n\t\tif response.Error() != nil {\n\t\t\treturn res, response.Error()\n\t\t}\n\t\tres = response.Results\n\t} else {\n\t\treturn res, err\n\t}\n\treturn res, nil\n}\n```\n\n#### Creating a Database\n\n```go\n_, err := queryDB(clnt, fmt.Sprintf(\"CREATE DATABASE %s\", MyDB))\nif err != nil {\n\tlog.Fatal(err)\n}\n```\n\n#### Count Records\n\n```go\nq := fmt.Sprintf(\"SELECT count(%s) FROM %s\", \"value\", MyMeasurement)\nres, err := queryDB(clnt, q)\nif err != nil {\n\tlog.Fatal(err)\n}\ncount := res[0].Series[0].Values[0][1]\nlog.Printf(\"Found a total of %v records\\n\", count)\n```\n\n#### Find the last 10 _shapes_ records\n\n```go\nq := fmt.Sprintf(\"SELECT * FROM %s LIMIT %d\", MyMeasurement, 20)\nres, err = queryDB(clnt, q)\nif err != nil {\n\tlog.Fatal(err)\n}\n\nfor i, row := range res[0].Series[0].Values {\n\tt, err := time.Parse(time.RFC3339, row[0].(string))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tval := row[1].(string)\n\tlog.Printf(\"[%2d] %s: %s\\n\", i, t.Format(time.Stamp), val)\n}\n```\n\n### Using the UDP Client\n\nThe **InfluxDB** client also supports writing over UDP.\n\n```go\nfunc WriteUDP() {\n\t// Make client\n\tc, err := client.NewUDPClient(\"localhost:8089\")\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\t\n\t// Create a new point batch\n\tbp, _ := client.NewBatchPoints(client.BatchPointsConfig{\n\t\tPrecision: \"s\",\n\t})\n\n\t// Create a point and add to batch\n\ttags := map[string]string{\"cpu\": \"cpu-total\"}\n\tfields := map[string]interface{}{\n\t\t\"idle\":   10.1,\n\t\t\"system\": 53.3,\n\t\t\"user\":   46.6,\n\t}\n\tpt, err := client.NewPoint(\"cpu_usage\", tags, fields, time.Now())\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tbp.AddPoint(pt)\n\n\t// Write the batch\n\tc.Write(bp)\n}\n```\n\n### Point Splitting\n\nThe UDP client now supports splitting single points that exceed the configured\npayload size. The logic for processing each point is listed here, starting with\nan empty payload.\n\n1. If adding the point to the current (non-empty) payload would exceed the\n   configured size, send the current payload. Otherwise, add it to the current\n   payload.\n1. If the point is smaller than the configured size, add it to the payload.\n1. If the point has no timestamp, just try to send the entire point as a single\n   UDP payload, and process the next point.\n1. Since the point has a timestamp, re-use the existing measurement name,\n   tagset, and timestamp and create multiple new points by splitting up the\n   fields. The per-point length will be kept close to the configured size,\n   staying under it if possible. This does mean that one large field, maybe a\n   long string, could be sent as a larger-than-configured payload.\n\nThe above logic attempts to respect configured payload sizes, but not sacrifice\nany data integrity. Points without a timestamp can't be split, as that may\ncause fields to have differing timestamps when processed by the server.\n\n## Go Docs\n\nPlease refer to\n[http://godoc.org/github.com/influxdata/influxdb/client/v2](http://godoc.org/github.com/influxdata/influxdb/client/v2)\nfor documentation.\n\n## See Also\n\nYou can also examine how the client library is used by the\n[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go).\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/client/example_test.go",
    "content": "package client_test\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"math/rand\"\n\t\"net/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/client\"\n)\n\nfunc ExampleNewClient() {\n\thost, err := url.Parse(fmt.Sprintf(\"http://%s:%d\", \"localhost\", 8086))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// NOTE: this assumes you've setup a user and have setup shell env variables,\n\t// namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below.\n\tconf := client.Config{\n\t\tURL:      *host,\n\t\tUsername: os.Getenv(\"INFLUX_USER\"),\n\t\tPassword: os.Getenv(\"INFLUX_PWD\"),\n\t}\n\tcon, err := client.NewClient(conf)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Println(\"Connection\", con)\n}\n\nfunc ExampleClient_Ping() {\n\thost, err := url.Parse(fmt.Sprintf(\"http://%s:%d\", \"localhost\", 8086))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tcon, err := client.NewClient(client.Config{URL: *host})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdur, ver, err := con.Ping()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Printf(\"Happy as a hippo! %v, %s\", dur, ver)\n}\n\nfunc ExampleClient_Query() {\n\thost, err := url.Parse(fmt.Sprintf(\"http://%s:%d\", \"localhost\", 8086))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tcon, err := client.NewClient(client.Config{URL: *host})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tq := client.Query{\n\t\tCommand:  \"select count(value) from shapes\",\n\t\tDatabase: \"square_holes\",\n\t}\n\tif response, err := con.Query(q); err == nil && response.Error() == nil {\n\t\tlog.Println(response.Results)\n\t}\n}\n\nfunc ExampleClient_Write() {\n\thost, err := url.Parse(fmt.Sprintf(\"http://%s:%d\", \"localhost\", 8086))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tcon, err := client.NewClient(client.Config{URL: *host})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar (\n\t\tshapes     = []string{\"circle\", \"rectangle\", \"square\", \"triangle\"}\n\t\tcolors     = []string{\"red\", \"blue\", \"green\"}\n\t\tsampleSize = 1000\n\t\tpts        = make([]client.Point, sampleSize)\n\t)\n\n\trand.Seed(42)\n\tfor i := 0; i < sampleSize; i++ {\n\t\tpts[i] = client.Point{\n\t\t\tMeasurement: \"shapes\",\n\t\t\tTags: map[string]string{\n\t\t\t\t\"color\": strconv.Itoa(rand.Intn(len(colors))),\n\t\t\t\t\"shape\": strconv.Itoa(rand.Intn(len(shapes))),\n\t\t\t},\n\t\t\tFields: map[string]interface{}{\n\t\t\t\t\"value\": rand.Intn(sampleSize),\n\t\t\t},\n\t\t\tTime:      time.Now(),\n\t\t\tPrecision: \"s\",\n\t\t}\n\t}\n\n\tbps := client.BatchPoints{\n\t\tPoints:          pts,\n\t\tDatabase:        \"BumbeBeeTuna\",\n\t\tRetentionPolicy: \"default\",\n\t}\n\t_, err = con.Write(bps)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/client/influxdb.go",
    "content": "// Package client implements a now-deprecated client for InfluxDB;\n// use github.com/influxdata/influxdb/client/v2 instead.\npackage client // import \"github.com/influxdata/influxdb/client\"\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n)\n\nconst (\n\t// DefaultHost is the default host used to connect to an InfluxDB instance\n\tDefaultHost = \"localhost\"\n\n\t// DefaultPort is the default port used to connect to an InfluxDB instance\n\tDefaultPort = 8086\n\n\t// DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance\n\tDefaultTimeout = 0\n)\n\n// Query is used to send a command to the server. Both Command and Database are required.\ntype Query struct {\n\tCommand  string\n\tDatabase string\n\n\t// Chunked tells the server to send back chunked responses. This places\n\t// less load on the server by sending back chunks of the response rather\n\t// than waiting for the entire response all at once.\n\tChunked bool\n\n\t// ChunkSize sets the maximum number of rows that will be returned per\n\t// chunk. Chunks are either divided based on their series or if they hit\n\t// the chunk size limit.\n\t//\n\t// Chunked must be set to true for this option to be used.\n\tChunkSize int\n}\n\n// ParseConnectionString will parse a string to create a valid connection URL\nfunc ParseConnectionString(path string, ssl bool) (url.URL, error) {\n\tvar host string\n\tvar port int\n\n\th, p, err := net.SplitHostPort(path)\n\tif err != nil {\n\t\tif path == \"\" {\n\t\t\thost = DefaultHost\n\t\t} else {\n\t\t\thost = path\n\t\t}\n\t\t// If they didn't specify a port, always use the default port\n\t\tport = DefaultPort\n\t} else {\n\t\thost = h\n\t\tport, err = strconv.Atoi(p)\n\t\tif err != nil {\n\t\t\treturn url.URL{}, fmt.Errorf(\"invalid port number %q: %s\\n\", path, err)\n\t\t}\n\t}\n\n\tu := url.URL{\n\t\tScheme: \"http\",\n\t}\n\tif ssl {\n\t\tu.Scheme = \"https\"\n\t}\n\n\tu.Host = net.JoinHostPort(host, strconv.Itoa(port))\n\n\treturn u, nil\n}\n\n// Config is used to specify what server to connect to.\n// URL: The URL of the server connecting to.\n// Username/Password are optional. They will be passed via basic auth if provided.\n// UserAgent: If not provided, will default \"InfluxDBClient\",\n// Timeout: If not provided, will default to 0 (no timeout)\ntype Config struct {\n\tURL              url.URL\n\tUnixSocket       string\n\tUsername         string\n\tPassword         string\n\tUserAgent        string\n\tTimeout          time.Duration\n\tPrecision        string\n\tWriteConsistency string\n\tUnsafeSsl        bool\n}\n\n// NewConfig will create a config to be used in connecting to the client\nfunc NewConfig() Config {\n\treturn Config{\n\t\tTimeout: DefaultTimeout,\n\t}\n}\n\n// Client is used to make calls to the server.\ntype Client struct {\n\turl        url.URL\n\tunixSocket string\n\tusername   string\n\tpassword   string\n\thttpClient *http.Client\n\tuserAgent  string\n\tprecision  string\n}\n\nconst (\n\t// ConsistencyOne requires at least one data node acknowledged a write.\n\tConsistencyOne = \"one\"\n\n\t// ConsistencyAll requires all data nodes to acknowledge a write.\n\tConsistencyAll = \"all\"\n\n\t// ConsistencyQuorum requires a quorum of data nodes to acknowledge a write.\n\tConsistencyQuorum = \"quorum\"\n\n\t// ConsistencyAny allows for hinted hand off, potentially no write happened yet.\n\tConsistencyAny = \"any\"\n)\n\n// NewClient will instantiate and return a connected client to issue commands to the server.\nfunc NewClient(c Config) (*Client, error) {\n\ttlsConfig := &tls.Config{\n\t\tInsecureSkipVerify: c.UnsafeSsl,\n\t}\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: tlsConfig,\n\t}\n\n\tif c.UnixSocket != \"\" {\n\t\t// No need for compression in local communications.\n\t\ttr.DisableCompression = true\n\n\t\ttr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {\n\t\t\treturn net.Dial(\"unix\", c.UnixSocket)\n\t\t}\n\t}\n\n\tclient := Client{\n\t\turl:        c.URL,\n\t\tunixSocket: c.UnixSocket,\n\t\tusername:   c.Username,\n\t\tpassword:   c.Password,\n\t\thttpClient: &http.Client{Timeout: c.Timeout, Transport: tr},\n\t\tuserAgent:  c.UserAgent,\n\t\tprecision:  c.Precision,\n\t}\n\tif client.userAgent == \"\" {\n\t\tclient.userAgent = \"InfluxDBClient\"\n\t}\n\treturn &client, nil\n}\n\n// SetAuth will update the username and passwords\nfunc (c *Client) SetAuth(u, p string) {\n\tc.username = u\n\tc.password = p\n}\n\n// SetPrecision will update the precision\nfunc (c *Client) SetPrecision(precision string) {\n\tc.precision = precision\n}\n\n// Query sends a command to the server and returns the Response\nfunc (c *Client) Query(q Query) (*Response, error) {\n\tu := c.url\n\n\tu.Path = \"query\"\n\tvalues := u.Query()\n\tvalues.Set(\"q\", q.Command)\n\tvalues.Set(\"db\", q.Database)\n\tif q.Chunked {\n\t\tvalues.Set(\"chunked\", \"true\")\n\t\tif q.ChunkSize > 0 {\n\t\t\tvalues.Set(\"chunk_size\", strconv.Itoa(q.ChunkSize))\n\t\t}\n\t}\n\tif c.precision != \"\" {\n\t\tvalues.Set(\"epoch\", c.precision)\n\t}\n\tu.RawQuery = values.Encode()\n\n\treq, err := http.NewRequest(\"POST\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"User-Agent\", c.userAgent)\n\tif c.username != \"\" {\n\t\treq.SetBasicAuth(c.username, c.password)\n\t}\n\n\tresp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar response Response\n\tif q.Chunked {\n\t\tcr := NewChunkedResponse(resp.Body)\n\t\tfor {\n\t\t\tr, err := cr.NextResponse()\n\t\t\tif err != nil {\n\t\t\t\t// If we got an error while decoding the response, send that back.\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif r == nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tresponse.Results = append(response.Results, r.Results...)\n\t\t\tif r.Err != nil {\n\t\t\t\tresponse.Err = r.Err\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t} else {\n\t\tdec := json.NewDecoder(resp.Body)\n\t\tdec.UseNumber()\n\t\tif err := dec.Decode(&response); err != nil {\n\t\t\t// Ignore EOF errors if we got an invalid status code.\n\t\t\tif !(err == io.EOF && resp.StatusCode != http.StatusOK) {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\t// If we don't have an error in our json response, and didn't get StatusOK,\n\t// then send back an error.\n\tif resp.StatusCode != http.StatusOK && response.Error() == nil {\n\t\treturn &response, fmt.Errorf(\"received status code %d from server\", resp.StatusCode)\n\t}\n\treturn &response, nil\n}\n\n// Write takes BatchPoints and allows for writing of multiple points with defaults\n// If successful, error is nil and Response is nil\n// If an error occurs, Response may contain additional information if populated.\nfunc (c *Client) Write(bp BatchPoints) (*Response, error) {\n\tu := c.url\n\tu.Path = \"write\"\n\n\tvar b bytes.Buffer\n\tfor _, p := range bp.Points {\n\t\terr := checkPointTypes(p)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif p.Raw != \"\" {\n\t\t\tif _, err := b.WriteString(p.Raw); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else {\n\t\t\tfor k, v := range bp.Tags {\n\t\t\t\tif p.Tags == nil {\n\t\t\t\t\tp.Tags = make(map[string]string, len(bp.Tags))\n\t\t\t\t}\n\t\t\t\tp.Tags[k] = v\n\t\t\t}\n\n\t\t\tif _, err := b.WriteString(p.MarshalString()); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tif err := b.WriteByte('\\n'); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(\"POST\", u.String(), &b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Content-Type\", \"\")\n\treq.Header.Set(\"User-Agent\", c.userAgent)\n\tif c.username != \"\" {\n\t\treq.SetBasicAuth(c.username, c.password)\n\t}\n\n\tprecision := bp.Precision\n\tif precision == \"\" {\n\t\tprecision = c.precision\n\t}\n\n\tparams := req.URL.Query()\n\tparams.Set(\"db\", bp.Database)\n\tparams.Set(\"rp\", bp.RetentionPolicy)\n\tparams.Set(\"precision\", precision)\n\tparams.Set(\"consistency\", bp.WriteConsistency)\n\treq.URL.RawQuery = params.Encode()\n\n\tresp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar response Response\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {\n\t\tvar err = fmt.Errorf(string(body))\n\t\tresponse.Err = err\n\t\treturn &response, err\n\t}\n\n\treturn nil, nil\n}\n\n// WriteLineProtocol takes a string with line returns to delimit each write\n// If successful, error is nil and Response is nil\n// If an error occurs, Response may contain additional information if populated.\nfunc (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) {\n\tu := c.url\n\tu.Path = \"write\"\n\n\tr := strings.NewReader(data)\n\n\treq, err := http.NewRequest(\"POST\", u.String(), r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Content-Type\", \"\")\n\treq.Header.Set(\"User-Agent\", c.userAgent)\n\tif c.username != \"\" {\n\t\treq.SetBasicAuth(c.username, c.password)\n\t}\n\tparams := req.URL.Query()\n\tparams.Set(\"db\", database)\n\tparams.Set(\"rp\", retentionPolicy)\n\tparams.Set(\"precision\", precision)\n\tparams.Set(\"consistency\", writeConsistency)\n\treq.URL.RawQuery = params.Encode()\n\n\tresp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar response Response\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {\n\t\terr := fmt.Errorf(string(body))\n\t\tresponse.Err = err\n\t\treturn &response, err\n\t}\n\n\treturn nil, nil\n}\n\n// Ping will check to see if the server is up\n// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.\nfunc (c *Client) Ping() (time.Duration, string, error) {\n\tnow := time.Now()\n\tu := c.url\n\tu.Path = \"ping\"\n\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn 0, \"\", err\n\t}\n\treq.Header.Set(\"User-Agent\", c.userAgent)\n\tif c.username != \"\" {\n\t\treq.SetBasicAuth(c.username, c.password)\n\t}\n\n\tresp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn 0, \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tversion := resp.Header.Get(\"X-Influxdb-Version\")\n\treturn time.Since(now), version, nil\n}\n\n// Structs\n\n// Message represents a user message.\ntype Message struct {\n\tLevel string `json:\"level,omitempty\"`\n\tText  string `json:\"text,omitempty\"`\n}\n\n// Result represents a resultset returned from a single statement.\ntype Result struct {\n\tSeries   []models.Row\n\tMessages []*Message\n\tErr      error\n}\n\n// MarshalJSON encodes the result into JSON.\nfunc (r *Result) MarshalJSON() ([]byte, error) {\n\t// Define a struct that outputs \"error\" as a string.\n\tvar o struct {\n\t\tSeries   []models.Row `json:\"series,omitempty\"`\n\t\tMessages []*Message   `json:\"messages,omitempty\"`\n\t\tErr      string       `json:\"error,omitempty\"`\n\t}\n\n\t// Copy fields to output struct.\n\to.Series = r.Series\n\to.Messages = r.Messages\n\tif r.Err != nil {\n\t\to.Err = r.Err.Error()\n\t}\n\n\treturn json.Marshal(&o)\n}\n\n// UnmarshalJSON decodes the data into the Result struct\nfunc (r *Result) UnmarshalJSON(b []byte) error {\n\tvar o struct {\n\t\tSeries   []models.Row `json:\"series,omitempty\"`\n\t\tMessages []*Message   `json:\"messages,omitempty\"`\n\t\tErr      string       `json:\"error,omitempty\"`\n\t}\n\n\tdec := json.NewDecoder(bytes.NewBuffer(b))\n\tdec.UseNumber()\n\terr := dec.Decode(&o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.Series = o.Series\n\tr.Messages = o.Messages\n\tif o.Err != \"\" {\n\t\tr.Err = errors.New(o.Err)\n\t}\n\treturn nil\n}\n\n// Response represents a list of statement results.\ntype Response struct {\n\tResults []Result\n\tErr     error\n}\n\n// MarshalJSON encodes the response into JSON.\nfunc (r *Response) MarshalJSON() ([]byte, error) {\n\t// Define a struct that outputs \"error\" as a string.\n\tvar o struct {\n\t\tResults []Result `json:\"results,omitempty\"`\n\t\tErr     string   `json:\"error,omitempty\"`\n\t}\n\n\t// Copy fields to output struct.\n\to.Results = r.Results\n\tif r.Err != nil {\n\t\to.Err = r.Err.Error()\n\t}\n\n\treturn json.Marshal(&o)\n}\n\n// UnmarshalJSON decodes the data into the Response struct\nfunc (r *Response) UnmarshalJSON(b []byte) error {\n\tvar o struct {\n\t\tResults []Result `json:\"results,omitempty\"`\n\t\tErr     string   `json:\"error,omitempty\"`\n\t}\n\n\tdec := json.NewDecoder(bytes.NewBuffer(b))\n\tdec.UseNumber()\n\terr := dec.Decode(&o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.Results = o.Results\n\tif o.Err != \"\" {\n\t\tr.Err = errors.New(o.Err)\n\t}\n\treturn nil\n}\n\n// Error returns the first error from any statement.\n// Returns nil if no errors occurred on any statements.\nfunc (r *Response) Error() error {\n\tif r.Err != nil {\n\t\treturn r.Err\n\t}\n\tfor _, result := range r.Results {\n\t\tif result.Err != nil {\n\t\t\treturn result.Err\n\t\t}\n\t}\n\treturn nil\n}\n\n// duplexReader reads responses and writes it to another writer while\n// satisfying the reader interface.\ntype duplexReader struct {\n\tr io.Reader\n\tw io.Writer\n}\n\nfunc (r *duplexReader) Read(p []byte) (n int, err error) {\n\tn, err = r.r.Read(p)\n\tif err == nil {\n\t\tr.w.Write(p[:n])\n\t}\n\treturn n, err\n}\n\n// ChunkedResponse represents a response from the server that\n// uses chunking to stream the output.\ntype ChunkedResponse struct {\n\tdec    *json.Decoder\n\tduplex *duplexReader\n\tbuf    bytes.Buffer\n}\n\n// NewChunkedResponse reads a stream and produces responses from the stream.\nfunc NewChunkedResponse(r io.Reader) *ChunkedResponse {\n\tresp := &ChunkedResponse{}\n\tresp.duplex = &duplexReader{r: r, w: &resp.buf}\n\tresp.dec = json.NewDecoder(resp.duplex)\n\tresp.dec.UseNumber()\n\treturn resp\n}\n\n// NextResponse reads the next line of the stream and returns a response.\nfunc (r *ChunkedResponse) NextResponse() (*Response, error) {\n\tvar response Response\n\tif err := r.dec.Decode(&response); err != nil {\n\t\tif err == io.EOF {\n\t\t\treturn nil, nil\n\t\t}\n\t\t// A decoding error happened. This probably means the server crashed\n\t\t// and sent a last-ditch error message to us. Ensure we have read the\n\t\t// entirety of the connection to get any remaining error text.\n\t\tio.Copy(ioutil.Discard, r.duplex)\n\t\treturn nil, errors.New(strings.TrimSpace(r.buf.String()))\n\t}\n\tr.buf.Reset()\n\treturn &response, nil\n}\n\n// Point defines the fields that will be written to the database\n// Measurement, Time, and Fields are required\n// Precision can be specified if the time is in epoch format (integer).\n// Valid values for Precision are n, u, ms, s, m, and h\ntype Point struct {\n\tMeasurement string\n\tTags        map[string]string\n\tTime        time.Time\n\tFields      map[string]interface{}\n\tPrecision   string\n\tRaw         string\n}\n\n// MarshalJSON will format the time in RFC3339Nano\n// Precision is also ignored as it is only used for writing, not reading\n// Or another way to say it is we always send back in nanosecond precision\nfunc (p *Point) MarshalJSON() ([]byte, error) {\n\tpoint := struct {\n\t\tMeasurement string                 `json:\"measurement,omitempty\"`\n\t\tTags        map[string]string      `json:\"tags,omitempty\"`\n\t\tTime        string                 `json:\"time,omitempty\"`\n\t\tFields      map[string]interface{} `json:\"fields,omitempty\"`\n\t\tPrecision   string                 `json:\"precision,omitempty\"`\n\t}{\n\t\tMeasurement: p.Measurement,\n\t\tTags:        p.Tags,\n\t\tFields:      p.Fields,\n\t\tPrecision:   p.Precision,\n\t}\n\t// Let it omit empty if it's really zero\n\tif !p.Time.IsZero() {\n\t\tpoint.Time = p.Time.UTC().Format(time.RFC3339Nano)\n\t}\n\treturn json.Marshal(&point)\n}\n\n// MarshalString renders string representation of a Point with specified\n// precision. The default precision is nanoseconds.\nfunc (p *Point) MarshalString() string {\n\tpt, err := models.NewPoint(p.Measurement, models.NewTags(p.Tags), p.Fields, p.Time)\n\tif err != nil {\n\t\treturn \"# ERROR: \" + err.Error() + \" \" + p.Measurement\n\t}\n\tif p.Precision == \"\" || p.Precision == \"ns\" || p.Precision == \"n\" {\n\t\treturn pt.String()\n\t}\n\treturn pt.PrecisionString(p.Precision)\n}\n\n// UnmarshalJSON decodes the data into the Point struct\nfunc (p *Point) UnmarshalJSON(b []byte) error {\n\tvar normal struct {\n\t\tMeasurement string                 `json:\"measurement\"`\n\t\tTags        map[string]string      `json:\"tags\"`\n\t\tTime        time.Time              `json:\"time\"`\n\t\tPrecision   string                 `json:\"precision\"`\n\t\tFields      map[string]interface{} `json:\"fields\"`\n\t}\n\tvar epoch struct {\n\t\tMeasurement string                 `json:\"measurement\"`\n\t\tTags        map[string]string      `json:\"tags\"`\n\t\tTime        *int64                 `json:\"time\"`\n\t\tPrecision   string                 `json:\"precision\"`\n\t\tFields      map[string]interface{} `json:\"fields\"`\n\t}\n\n\tif err := func() error {\n\t\tvar err error\n\t\tdec := json.NewDecoder(bytes.NewBuffer(b))\n\t\tdec.UseNumber()\n\t\tif err = dec.Decode(&epoch); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Convert from epoch to time.Time, but only if Time\n\t\t// was actually set.\n\t\tvar ts time.Time\n\t\tif epoch.Time != nil {\n\t\t\tts, err = EpochToTime(*epoch.Time, epoch.Precision)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tp.Measurement = epoch.Measurement\n\t\tp.Tags = epoch.Tags\n\t\tp.Time = ts\n\t\tp.Precision = epoch.Precision\n\t\tp.Fields = normalizeFields(epoch.Fields)\n\t\treturn nil\n\t}(); err == nil {\n\t\treturn nil\n\t}\n\n\tdec := json.NewDecoder(bytes.NewBuffer(b))\n\tdec.UseNumber()\n\tif err := dec.Decode(&normal); err != nil {\n\t\treturn err\n\t}\n\tnormal.Time = SetPrecision(normal.Time, normal.Precision)\n\tp.Measurement = normal.Measurement\n\tp.Tags = normal.Tags\n\tp.Time = normal.Time\n\tp.Precision = normal.Precision\n\tp.Fields = normalizeFields(normal.Fields)\n\n\treturn nil\n}\n\n// Remove any notion of json.Number\nfunc normalizeFields(fields map[string]interface{}) map[string]interface{} {\n\tnewFields := map[string]interface{}{}\n\n\tfor k, v := range fields {\n\t\tswitch v := v.(type) {\n\t\tcase json.Number:\n\t\t\tjv, e := v.Float64()\n\t\t\tif e != nil {\n\t\t\t\tpanic(fmt.Sprintf(\"unable to convert json.Number to float64: %s\", e))\n\t\t\t}\n\t\t\tnewFields[k] = jv\n\t\tdefault:\n\t\t\tnewFields[k] = v\n\t\t}\n\t}\n\treturn newFields\n}\n\n// BatchPoints is used to send batched data in a single write.\n// Database and Points are required\n// If no retention policy is specified, it will use the databases default retention policy.\n// If tags are specified, they will be \"merged\" with all points. If a point already has that tag, it will be ignored.\n// If time is specified, it will be applied to any point with an empty time.\n// Precision can be specified if the time is in epoch format (integer).\n// Valid values for Precision are n, u, ms, s, m, and h\ntype BatchPoints struct {\n\tPoints           []Point           `json:\"points,omitempty\"`\n\tDatabase         string            `json:\"database,omitempty\"`\n\tRetentionPolicy  string            `json:\"retentionPolicy,omitempty\"`\n\tTags             map[string]string `json:\"tags,omitempty\"`\n\tTime             time.Time         `json:\"time,omitempty\"`\n\tPrecision        string            `json:\"precision,omitempty\"`\n\tWriteConsistency string            `json:\"-\"`\n}\n\n// UnmarshalJSON decodes the data into the BatchPoints struct\nfunc (bp *BatchPoints) UnmarshalJSON(b []byte) error {\n\tvar normal struct {\n\t\tPoints          []Point           `json:\"points\"`\n\t\tDatabase        string            `json:\"database\"`\n\t\tRetentionPolicy string            `json:\"retentionPolicy\"`\n\t\tTags            map[string]string `json:\"tags\"`\n\t\tTime            time.Time         `json:\"time\"`\n\t\tPrecision       string            `json:\"precision\"`\n\t}\n\tvar epoch struct {\n\t\tPoints          []Point           `json:\"points\"`\n\t\tDatabase        string            `json:\"database\"`\n\t\tRetentionPolicy string            `json:\"retentionPolicy\"`\n\t\tTags            map[string]string `json:\"tags\"`\n\t\tTime            *int64            `json:\"time\"`\n\t\tPrecision       string            `json:\"precision\"`\n\t}\n\n\tif err := func() error {\n\t\tvar err error\n\t\tif err = json.Unmarshal(b, &epoch); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Convert from epoch to time.Time\n\t\tvar ts time.Time\n\t\tif epoch.Time != nil {\n\t\t\tts, err = EpochToTime(*epoch.Time, epoch.Precision)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tbp.Points = epoch.Points\n\t\tbp.Database = epoch.Database\n\t\tbp.RetentionPolicy = epoch.RetentionPolicy\n\t\tbp.Tags = epoch.Tags\n\t\tbp.Time = ts\n\t\tbp.Precision = epoch.Precision\n\t\treturn nil\n\t}(); err == nil {\n\t\treturn nil\n\t}\n\n\tif err := json.Unmarshal(b, &normal); err != nil {\n\t\treturn err\n\t}\n\tnormal.Time = SetPrecision(normal.Time, normal.Precision)\n\tbp.Points = normal.Points\n\tbp.Database = normal.Database\n\tbp.RetentionPolicy = normal.RetentionPolicy\n\tbp.Tags = normal.Tags\n\tbp.Time = normal.Time\n\tbp.Precision = normal.Precision\n\n\treturn nil\n}\n\n// utility functions\n\n// Addr provides the current url as a string of the server the client is connected to.\nfunc (c *Client) Addr() string {\n\tif c.unixSocket != \"\" {\n\t\treturn c.unixSocket\n\t}\n\treturn c.url.String()\n}\n\n// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found.\nfunc checkPointTypes(p Point) error {\n\tfor _, v := range p.Fields {\n\t\tswitch v.(type) {\n\t\tcase int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool, string, nil:\n\t\t\treturn nil\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unsupported point type: %T\", v)\n\t\t}\n\t}\n\treturn nil\n}\n\n// helper functions\n\n// EpochToTime takes a unix epoch time and uses precision to return back a time.Time\nfunc EpochToTime(epoch int64, precision string) (time.Time, error) {\n\tif precision == \"\" {\n\t\tprecision = \"s\"\n\t}\n\tvar t time.Time\n\tswitch precision {\n\tcase \"h\":\n\t\tt = time.Unix(0, epoch*int64(time.Hour))\n\tcase \"m\":\n\t\tt = time.Unix(0, epoch*int64(time.Minute))\n\tcase \"s\":\n\t\tt = time.Unix(0, epoch*int64(time.Second))\n\tcase \"ms\":\n\t\tt = time.Unix(0, epoch*int64(time.Millisecond))\n\tcase \"u\":\n\t\tt = time.Unix(0, epoch*int64(time.Microsecond))\n\tcase \"n\":\n\t\tt = time.Unix(0, epoch)\n\tdefault:\n\t\treturn time.Time{}, fmt.Errorf(\"Unknown precision %q\", precision)\n\t}\n\treturn t, nil\n}\n\n// SetPrecision will round a time to the specified precision\nfunc SetPrecision(t time.Time, precision string) time.Time {\n\tswitch precision {\n\tcase \"n\":\n\tcase \"u\":\n\t\treturn t.Round(time.Microsecond)\n\tcase \"ms\":\n\t\treturn t.Round(time.Millisecond)\n\tcase \"s\":\n\t\treturn t.Round(time.Second)\n\tcase \"m\":\n\t\treturn t.Round(time.Minute)\n\tcase \"h\":\n\t\treturn t.Round(time.Hour)\n\t}\n\treturn t\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/client/influxdb_test.go",
    "content": "package client_test\n\nimport (\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/client\"\n)\n\nfunc BenchmarkWrite(b *testing.B) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvar data client.Response\n\t\tw.WriteHeader(http.StatusNoContent)\n\t\t_ = json.NewEncoder(w).Encode(data)\n\t}))\n\tdefer ts.Close()\n\n\tu, _ := url.Parse(ts.URL)\n\tconfig := client.Config{URL: *u}\n\tc, err := client.NewClient(config)\n\tif err != nil {\n\t\tb.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\n\tbp := client.BatchPoints{\n\t\tPoints: []client.Point{\n\t\t\t{Fields: map[string]interface{}{\"value\": 101}}},\n\t}\n\tfor i := 0; i < b.N; i++ {\n\t\tr, err := c.Write(bp)\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t\t}\n\t\tif r != nil {\n\t\t\tb.Fatalf(\"unexpected response. expected %v, actual %v\", nil, r)\n\t\t}\n\t}\n}\n\nfunc BenchmarkUnmarshalJSON2Tags(b *testing.B) {\n\tvar bp client.BatchPoints\n\tdata := []byte(`\n{\n    \"database\": \"foo\",\n    \"retentionPolicy\": \"bar\",\n    \"points\": [\n        {\n            \"name\": \"cpu\",\n            \"tags\": {\n                \"host\": \"server01\",\n                \"region\": \"us-east1\"\n            },\n            \"time\": 14244733039069373,\n            \"precision\": \"n\",\n            \"fields\": {\n                    \"value\": 4541770385657154000\n            }\n        }\n    ]\n}\n`)\n\n\tfor i := 0; i < b.N; i++ {\n\t\tif err := json.Unmarshal(data, &bp); err != nil {\n\t\t\tb.Errorf(\"unable to unmarshal nanosecond data: %s\", err.Error())\n\t\t}\n\t\tb.SetBytes(int64(len(data)))\n\t}\n}\n\nfunc BenchmarkUnmarshalJSON10Tags(b *testing.B) {\n\tvar bp client.BatchPoints\n\tdata := []byte(`\n{\n    \"database\": \"foo\",\n    \"retentionPolicy\": \"bar\",\n    \"points\": [\n        {\n            \"name\": \"cpu\",\n            \"tags\": {\n                \"host\": \"server01\",\n                \"region\": \"us-east1\",\n                \"tag1\": \"value1\",\n                \"tag2\": \"value2\",\n                \"tag2\": \"value3\",\n                \"tag4\": \"value4\",\n                \"tag5\": \"value5\",\n                \"tag6\": \"value6\",\n                \"tag7\": \"value7\",\n                \"tag8\": \"value8\"\n            },\n            \"time\": 14244733039069373,\n            \"precision\": \"n\",\n            \"fields\": {\n                    \"value\": 4541770385657154000\n            }\n        }\n    ]\n}\n`)\n\n\tfor i := 0; i < b.N; i++ {\n\t\tif err := json.Unmarshal(data, &bp); err != nil {\n\t\t\tb.Errorf(\"unable to unmarshal nanosecond data: %s\", err.Error())\n\t\t}\n\t\tb.SetBytes(int64(len(data)))\n\t}\n}\n\nfunc TestNewClient(t *testing.T) {\n\tconfig := client.Config{}\n\t_, err := client.NewClient(config)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n}\n\nfunc TestClient_Ping(t *testing.T) {\n\tts := emptyTestServer()\n\tdefer ts.Close()\n\n\tu, _ := url.Parse(ts.URL)\n\tconfig := client.Config{URL: *u}\n\tc, err := client.NewClient(config)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\td, version, err := c.Ping()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\tif d.Nanoseconds() == 0 {\n\t\tt.Fatalf(\"expected a duration greater than zero.  actual %v\", d.Nanoseconds())\n\t}\n\tif version != \"x.x\" {\n\t\tt.Fatalf(\"unexpected version.  expected %s,  actual %v\", \"x.x\", version)\n\t}\n}\n\nfunc TestClient_Query(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvar data client.Response\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_ = json.NewEncoder(w).Encode(data)\n\t}))\n\tdefer ts.Close()\n\n\tu, _ := url.Parse(ts.URL)\n\tconfig := client.Config{URL: *u}\n\tc, err := client.NewClient(config)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\n\tquery := client.Query{}\n\t_, err = c.Query(query)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n}\n\nfunc TestClient_ChunkedQuery(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvar data client.Response\n\t\tw.WriteHeader(http.StatusOK)\n\t\tenc := json.NewEncoder(w)\n\t\t_ = enc.Encode(data)\n\t\t_ = enc.Encode(data)\n\t}))\n\tdefer ts.Close()\n\n\tu, _ := url.Parse(ts.URL)\n\tconfig := client.Config{URL: *u}\n\tc, err := client.NewClient(config)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\n\tquery := client.Query{Chunked: true}\n\t_, err = c.Query(query)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n}\n\nfunc TestClient_BasicAuth(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tu, p, ok := r.BasicAuth()\n\n\t\tif !ok {\n\t\t\tt.Errorf(\"basic auth error\")\n\t\t}\n\t\tif u != \"username\" {\n\t\t\tt.Errorf(\"unexpected username, expected %q, actual %q\", \"username\", u)\n\t\t}\n\t\tif p != \"password\" {\n\t\t\tt.Errorf(\"unexpected password, expected %q, actual %q\", \"password\", p)\n\t\t}\n\t\tw.WriteHeader(http.StatusNoContent)\n\t}))\n\tdefer ts.Close()\n\n\tu, _ := url.Parse(ts.URL)\n\tu.User = url.UserPassword(\"username\", \"password\")\n\tconfig := client.Config{URL: *u, Username: \"username\", Password: \"password\"}\n\tc, err := client.NewClient(config)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\n\t_, _, err = c.Ping()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n}\n\nfunc TestClient_Write(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvar data client.Response\n\t\tw.WriteHeader(http.StatusNoContent)\n\t\t_ = json.NewEncoder(w).Encode(data)\n\t}))\n\tdefer ts.Close()\n\n\tu, _ := url.Parse(ts.URL)\n\tconfig := client.Config{URL: *u}\n\tc, err := client.NewClient(config)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\n\tbp := client.BatchPoints{}\n\tr, err := c.Write(bp)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\tif r != nil {\n\t\tt.Fatalf(\"unexpected response. expected %v, actual %v\", nil, r)\n\t}\n}\n\nfunc TestClient_UserAgent(t *testing.T) {\n\treceivedUserAgent := \"\"\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\treceivedUserAgent = r.UserAgent()\n\n\t\tvar data client.Response\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_ = json.NewEncoder(w).Encode(data)\n\t}))\n\tdefer ts.Close()\n\n\t_, err := http.Get(ts.URL)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\n\ttests := []struct {\n\t\tname      string\n\t\tuserAgent string\n\t\texpected  string\n\t}{\n\t\t{\n\t\t\tname:      \"Empty user agent\",\n\t\t\tuserAgent: \"\",\n\t\t\texpected:  \"InfluxDBClient\",\n\t\t},\n\t\t{\n\t\t\tname:      \"Custom user agent\",\n\t\t\tuserAgent: \"Test Influx Client\",\n\t\t\texpected:  \"Test Influx Client\",\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tu, _ := url.Parse(ts.URL)\n\t\tconfig := client.Config{URL: *u, UserAgent: test.userAgent}\n\t\tc, err := client.NewClient(config)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t\t}\n\n\t\treceivedUserAgent = \"\"\n\t\tquery := client.Query{}\n\t\t_, err = c.Query(query)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t\t}\n\t\tif !strings.HasPrefix(receivedUserAgent, test.expected) {\n\t\t\tt.Fatalf(\"Unexpected user agent. expected %v, actual %v\", test.expected, receivedUserAgent)\n\t\t}\n\n\t\treceivedUserAgent = \"\"\n\t\tbp := client.BatchPoints{}\n\t\t_, err = c.Write(bp)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t\t}\n\t\tif !strings.HasPrefix(receivedUserAgent, test.expected) {\n\t\t\tt.Fatalf(\"Unexpected user agent. expected %v, actual %v\", test.expected, receivedUserAgent)\n\t\t}\n\n\t\treceivedUserAgent = \"\"\n\t\t_, _, err = c.Ping()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t\t}\n\t\tif receivedUserAgent != test.expected {\n\t\t\tt.Fatalf(\"Unexpected user agent. expected %v, actual %v\", test.expected, receivedUserAgent)\n\t\t}\n\t}\n}\n\nfunc TestClient_Messages(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(`{\"results\":[{\"messages\":[{\"level\":\"warning\",\"text\":\"deprecation test\"}]}]}`))\n\t}))\n\tdefer ts.Close()\n\n\tu, _ := url.Parse(ts.URL)\n\tconfig := client.Config{URL: *u}\n\tc, err := client.NewClient(config)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\n\tquery := client.Query{}\n\tresp, err := c.Query(query)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\n\tif got, exp := len(resp.Results), 1; got != exp {\n\t\tt.Fatalf(\"unexpected number of results.  expected %v, actual %v\", exp, got)\n\t}\n\n\tr := resp.Results[0]\n\tif got, exp := len(r.Messages), 1; got != exp {\n\t\tt.Fatalf(\"unexpected number of messages.  expected %v, actual %v\", exp, got)\n\t}\n\n\tm := r.Messages[0]\n\tif got, exp := m.Level, \"warning\"; got != exp {\n\t\tt.Errorf(\"unexpected message level.  expected %v, actual %v\", exp, got)\n\t}\n\tif got, exp := m.Text, \"deprecation test\"; got != exp {\n\t\tt.Errorf(\"unexpected message text.  expected %v, actual %v\", exp, got)\n\t}\n}\n\nfunc TestPoint_UnmarshalEpoch(t *testing.T) {\n\tnow := time.Now()\n\ttests := []struct {\n\t\tname      string\n\t\tepoch     int64\n\t\tprecision string\n\t\texpected  time.Time\n\t}{\n\t\t{\n\t\t\tname:      \"nanoseconds\",\n\t\t\tepoch:     now.UnixNano(),\n\t\t\tprecision: \"n\",\n\t\t\texpected:  now,\n\t\t},\n\t\t{\n\t\t\tname:      \"microseconds\",\n\t\t\tepoch:     now.Round(time.Microsecond).UnixNano() / int64(time.Microsecond),\n\t\t\tprecision: \"u\",\n\t\t\texpected:  now.Round(time.Microsecond),\n\t\t},\n\t\t{\n\t\t\tname:      \"milliseconds\",\n\t\t\tepoch:     now.Round(time.Millisecond).UnixNano() / int64(time.Millisecond),\n\t\t\tprecision: \"ms\",\n\t\t\texpected:  now.Round(time.Millisecond),\n\t\t},\n\t\t{\n\t\t\tname:      \"seconds\",\n\t\t\tepoch:     now.Round(time.Second).UnixNano() / int64(time.Second),\n\t\t\tprecision: \"s\",\n\t\t\texpected:  now.Round(time.Second),\n\t\t},\n\t\t{\n\t\t\tname:      \"minutes\",\n\t\t\tepoch:     now.Round(time.Minute).UnixNano() / int64(time.Minute),\n\t\t\tprecision: \"m\",\n\t\t\texpected:  now.Round(time.Minute),\n\t\t},\n\t\t{\n\t\t\tname:      \"hours\",\n\t\t\tepoch:     now.Round(time.Hour).UnixNano() / int64(time.Hour),\n\t\t\tprecision: \"h\",\n\t\t\texpected:  now.Round(time.Hour),\n\t\t},\n\t\t{\n\t\t\tname:      \"max int64\",\n\t\t\tepoch:     9223372036854775807,\n\t\t\tprecision: \"n\",\n\t\t\texpected:  time.Unix(0, 9223372036854775807),\n\t\t},\n\t\t{\n\t\t\tname:      \"100 years from now\",\n\t\t\tepoch:     now.Add(time.Hour * 24 * 365 * 100).UnixNano(),\n\t\t\tprecision: \"n\",\n\t\t\texpected:  now.Add(time.Hour * 24 * 365 * 100),\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Logf(\"testing %q\\n\", test.name)\n\t\tdata := []byte(fmt.Sprintf(`{\"time\": %d, \"precision\":\"%s\"}`, test.epoch, test.precision))\n\t\tt.Logf(\"json: %s\", string(data))\n\t\tvar p client.Point\n\t\terr := json.Unmarshal(data, &p)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error.  exptected: %v, actual: %v\", nil, err)\n\t\t}\n\t\tif !p.Time.Equal(test.expected) {\n\t\t\tt.Fatalf(\"Unexpected time.  expected: %v, actual: %v\", test.expected, p.Time)\n\t\t}\n\t}\n}\n\nfunc TestPoint_UnmarshalRFC(t *testing.T) {\n\tnow := time.Now().UTC()\n\ttests := []struct {\n\t\tname     string\n\t\trfc      string\n\t\tnow      time.Time\n\t\texpected time.Time\n\t}{\n\t\t{\n\t\t\tname:     \"RFC3339Nano\",\n\t\t\trfc:      time.RFC3339Nano,\n\t\t\tnow:      now,\n\t\t\texpected: now,\n\t\t},\n\t\t{\n\t\t\tname:     \"RFC3339\",\n\t\t\trfc:      time.RFC3339,\n\t\t\tnow:      now.Round(time.Second),\n\t\t\texpected: now.Round(time.Second),\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Logf(\"testing %q\\n\", test.name)\n\t\tts := test.now.Format(test.rfc)\n\t\tdata := []byte(fmt.Sprintf(`{\"time\": %q}`, ts))\n\t\tt.Logf(\"json: %s\", string(data))\n\t\tvar p client.Point\n\t\terr := json.Unmarshal(data, &p)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error.  exptected: %v, actual: %v\", nil, err)\n\t\t}\n\t\tif !p.Time.Equal(test.expected) {\n\t\t\tt.Fatalf(\"Unexpected time.  expected: %v, actual: %v\", test.expected, p.Time)\n\t\t}\n\t}\n}\n\nfunc TestPoint_MarshalOmitempty(t *testing.T) {\n\tnow := time.Now().UTC()\n\ttests := []struct {\n\t\tname     string\n\t\tpoint    client.Point\n\t\tnow      time.Time\n\t\texpected string\n\t}{\n\t\t{\n\t\t\tname:     \"all empty\",\n\t\t\tpoint:    client.Point{Measurement: \"cpu\", Fields: map[string]interface{}{\"value\": 1.1}},\n\t\t\tnow:      now,\n\t\t\texpected: `{\"measurement\":\"cpu\",\"fields\":{\"value\":1.1}}`,\n\t\t},\n\t\t{\n\t\t\tname:     \"with time\",\n\t\t\tpoint:    client.Point{Measurement: \"cpu\", Fields: map[string]interface{}{\"value\": 1.1}, Time: now},\n\t\t\tnow:      now,\n\t\t\texpected: fmt.Sprintf(`{\"measurement\":\"cpu\",\"time\":\"%s\",\"fields\":{\"value\":1.1}}`, now.Format(time.RFC3339Nano)),\n\t\t},\n\t\t{\n\t\t\tname:     \"with tags\",\n\t\t\tpoint:    client.Point{Measurement: \"cpu\", Fields: map[string]interface{}{\"value\": 1.1}, Tags: map[string]string{\"foo\": \"bar\"}},\n\t\t\tnow:      now,\n\t\t\texpected: `{\"measurement\":\"cpu\",\"tags\":{\"foo\":\"bar\"},\"fields\":{\"value\":1.1}}`,\n\t\t},\n\t\t{\n\t\t\tname:     \"with precision\",\n\t\t\tpoint:    client.Point{Measurement: \"cpu\", Fields: map[string]interface{}{\"value\": 1.1}, Precision: \"ms\"},\n\t\t\tnow:      now,\n\t\t\texpected: `{\"measurement\":\"cpu\",\"fields\":{\"value\":1.1},\"precision\":\"ms\"}`,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Logf(\"testing %q\\n\", test.name)\n\t\tb, err := json.Marshal(&test.point)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error.  exptected: %v, actual: %v\", nil, err)\n\t\t}\n\t\tif test.expected != string(b) {\n\t\t\tt.Fatalf(\"Unexpected result.  expected: %v, actual: %v\", test.expected, string(b))\n\t\t}\n\t}\n}\n\nfunc TestEpochToTime(t *testing.T) {\n\tnow := time.Now()\n\n\ttests := []struct {\n\t\tname      string\n\t\tepoch     int64\n\t\tprecision string\n\t\texpected  time.Time\n\t}{\n\t\t{name: \"nanoseconds\", epoch: now.UnixNano(), precision: \"n\", expected: now},\n\t\t{name: \"microseconds\", epoch: now.Round(time.Microsecond).UnixNano() / int64(time.Microsecond), precision: \"u\", expected: now.Round(time.Microsecond)},\n\t\t{name: \"milliseconds\", epoch: now.Round(time.Millisecond).UnixNano() / int64(time.Millisecond), precision: \"ms\", expected: now.Round(time.Millisecond)},\n\t\t{name: \"seconds\", epoch: now.Round(time.Second).UnixNano() / int64(time.Second), precision: \"s\", expected: now.Round(time.Second)},\n\t\t{name: \"minutes\", epoch: now.Round(time.Minute).UnixNano() / int64(time.Minute), precision: \"m\", expected: now.Round(time.Minute)},\n\t\t{name: \"hours\", epoch: now.Round(time.Hour).UnixNano() / int64(time.Hour), precision: \"h\", expected: now.Round(time.Hour)},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Logf(\"testing %q\\n\", test.name)\n\t\ttm, e := client.EpochToTime(test.epoch, test.precision)\n\t\tif e != nil {\n\t\t\tt.Fatalf(\"unexpected error: expected %v, actual: %v\", nil, e)\n\t\t}\n\t\tif tm != test.expected {\n\t\t\tt.Fatalf(\"unexpected time: expected %v, actual %v\", test.expected, tm)\n\t\t}\n\t}\n}\n\n// helper functions\n\nfunc emptyTestServer() *httptest.Server {\n\treturn httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttime.Sleep(50 * time.Millisecond)\n\t\tw.Header().Set(\"X-Influxdb-Version\", \"x.x\")\n\t\treturn\n\t}))\n}\n\n// Ensure that data with epoch times can be decoded.\nfunc TestBatchPoints_Normal(t *testing.T) {\n\tvar bp client.BatchPoints\n\tdata := []byte(`\n{\n    \"database\": \"foo\",\n    \"retentionPolicy\": \"bar\",\n    \"points\": [\n        {\n            \"name\": \"cpu\",\n            \"tags\": {\n                \"host\": \"server01\"\n            },\n            \"time\": 14244733039069373,\n            \"precision\": \"n\",\n            \"values\": {\n                    \"value\": 4541770385657154000\n            }\n        },\n        {\n            \"name\": \"cpu\",\n             \"tags\": {\n                \"host\": \"server01\"\n            },\n            \"time\": 14244733039069380,\n            \"precision\": \"n\",\n            \"values\": {\n                    \"value\": 7199311900554737000\n            }\n        }\n    ]\n}\n`)\n\n\tif err := json.Unmarshal(data, &bp); err != nil {\n\t\tt.Errorf(\"unable to unmarshal nanosecond data: %s\", err.Error())\n\t}\n}\n\nfunc TestClient_Timeout(t *testing.T) {\n\tdone := make(chan bool)\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t<-done\n\t}))\n\tdefer ts.Close()\n\tdefer func() { done <- true }()\n\tu, _ := url.Parse(ts.URL)\n\tconfig := client.Config{URL: *u, Timeout: 500 * time.Millisecond}\n\tc, err := client.NewClient(config)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error. expected %v, actual %v\", nil, err)\n\t}\n\tquery := client.Query{}\n\t_, err = c.Query(query)\n\tif err == nil {\n\t\tt.Fatalf(\"unexpected success. expected timeout error\")\n\t} else if !strings.Contains(err.Error(), \"request canceled\") &&\n\t\t!strings.Contains(err.Error(), \"use of closed network connection\") {\n\t\tt.Fatalf(\"unexpected error. expected 'request canceled' error, got %v\", err)\n\t}\n}\n\nfunc TestClient_NoTimeout(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping in short mode\")\n\t}\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttime.Sleep(1 * time.Second)\n\t\tvar data client.Response\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_ = json.NewEncoder(w).Encode(data)\n\t}))\n\tdefer ts.Close()\n\n\tu, _ := url.Parse(ts.URL)\n\tconfig := client.Config{URL: *u}\n\tc, err := client.NewClient(config)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\n\tquery := client.Query{}\n\t_, err = c.Query(query)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n}\n\nfunc TestClient_WriteUint64(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvar data client.Response\n\t\tw.WriteHeader(http.StatusNoContent)\n\t\t_ = json.NewEncoder(w).Encode(data)\n\t}))\n\tdefer ts.Close()\n\n\tu, _ := url.Parse(ts.URL)\n\tconfig := client.Config{URL: *u}\n\tc, err := client.NewClient(config)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\tbp := client.BatchPoints{\n\t\tPoints: []client.Point{\n\t\t\t{\n\t\t\t\tFields: map[string]interface{}{\"value\": uint64(10)},\n\t\t\t},\n\t\t},\n\t}\n\tr, err := c.Write(bp)\n\tif err == nil {\n\t\tt.Fatalf(\"unexpected error. expected err, actual %v\", err)\n\t}\n\tif r != nil {\n\t\tt.Fatalf(\"unexpected response. expected %v, actual %v\", nil, r)\n\t}\n}\n\nfunc TestClient_ParseConnectionString_IPv6(t *testing.T) {\n\tpath := \"[fdf5:9ede:1875:0:a9ee:a600:8fe3:d495]:8086\"\n\tu, err := client.ParseConnectionString(path, false)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error, expected %v, actual %v\", nil, err)\n\t}\n\tif u.Host != path {\n\t\tt.Fatalf(\"ipv6 parse failed, expected %s, actual %s\", path, u.Host)\n\t}\n}\n\nfunc TestClient_CustomCertificates(t *testing.T) {\n\t// generated with:\n\t// openssl req -x509 -newkey rsa:2048 -keyout key.pem -out cert.pem -days 3650 -nodes -config influx.cnf\n\t// influx.cnf:\n\t// [req]\n\t// distinguished_name = req_distinguished_name\n\t// x509_extensions = v3_req\n\t// prompt = no\n\t// [req_distinguished_name]\n\t// C = US\n\t// ST = CA\n\t// L = San Francisco\n\t// O = InfluxDB\n\t// CN = github.com/influxdata\n\t// [v3_req]\n\t// keyUsage = keyEncipherment, dataEncipherment\n\t// extendedKeyUsage = serverAuth\n\t// subjectAltName = @alt_names\n\t// [alt_names]\n\t// IP.1 = 127.0.0.1\n\t//\n\tkey := `\n-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDLswqKJLxfhBRi\n4qdj7+jpBxTAi4MewrcMPp+9YlbLke3F7w2DPrZVkYVeWmg8LyTPAigrXeadK6hv\nqjRr05a7sMc5+ynivGbWUySWT+u17V85x6VR5TMIkJEOqpiIU8aYk0l+3UcrzVjS\n1QZCUBoxVwAVaSR6AXTA8YrVXdk/AI3f22dYiBjFmV4LJJkGjTaCnlDKu54hMU1t\nWTyFcoY9TBzZ1XA+ng5RQ/QADeL2PYrTW4s/mLI3jfKKD53EI4uM2FjW37ZfuxTa\nmhCR7/lxM4COg9K70y5uebfqJvuoXAwXLOzVbdfF5b9fJFbL67kaK2tiMT3Wt39m\nhXzclLTDAgMBAAECggEAK8mpElkjRUUXPMqMQSdpYe5rv5g973bb8n3jyMpC7i/I\ndSwWM4hfmbVWfhnhHk7kErvb9raQxGiGJLrp2eP6Gw69RPGA54SodpoY21cCzHDi\nb4FDQH+MoOKyy/xQHb4kitfejK70ha320huI5OhjOQgCtJeNh8yYVIGX3pX2BVyu\n36UB9tfX1S5pbiHeih3vZGd322Muj/joNzIelnYRBnoO0xqvQ0S1Dk+dLCTHO0/m\nu9AZN8c2TsRWZpJPMWwBv8LuABbE0e66/TSsrfklAn86ELCo44lZURDE7uPZ4pIH\nFWtmf+nW5Hy6aPhy60E40MqotlejhWwB3ktY/m3JAQKBgQDuB4nhxzJA9lH9EaCt\nbyvJ9wGVvI3k79hOwc/Z2R3dNe+Ma+TJy+aBppvsLF4qz83aWC+canyasbHcPNR/\nvXQGlsgKfucrmd1PfMV7uvOIkfOjK0E6mRC+jMuKtNTQrdtM1BU/Z7LY0iy0fNJ6\naNqhFdlJmmk0g+4bR4SAWB6FkwKBgQDbE/7r1u+GdJk/mhdjTi1aegr9lXb0l7L6\nBCvOYhs/Z/pXfsaYPSXhgk2w+LiGk6BaEA2/4Sr0YS2MAAaIhBVeFBIXVpNrXB3K\nYg1jOEeLQ3qoVBeJFhJNrN9ZQx33HANC1W/Y1apMwaYqCRUGVQkrdcsN2KNea1z0\n3qeYeCCSEQKBgCKZKeuNfrp+k1BLnaVYAW9r3ekb7SwXyMM53LJ3oqWiz10D2c+T\nOcAirYtYr59dcTiJlPIRcGcz6PxwQxsGOLU0eYM9CvEFfmutYS8o73ksbdOL2AFi\nelKYOIXC3yQuATBbq3L56b8mXaUmd5mfYBgGCv1t2ljtzFBext248UbNAoGBAIv1\n2V24YiwnH6THf/ucfVMZNx5Mt8OJivk5YvcmLDw05HWzc5LdNe89PP871z963u3K\n5c3ZP4UC9INFnOboY3JIJkqsr9/d6NZcECt8UBDDmoAhwSt+Y1EmiUZQn7s4NUkk\nbKE919/Ts6GVTc5O013lkkUVS0HOG4QBH1dEH6LRAoGAStl11WA9tuKXiBl5XG/C\ncq9mFPNJK3pEgd6YH874vEnYEEqENR4MFK3uWXus9Nm+VYxbUbPEzFF4kpsfukDg\n/JAVqY4lUam7g6fyyaoIIPQEp7jGjbsUf46IjnUjFcaojOugA3EAfn9awREUDuJZ\ncvh4WzEegcExTppINW1NB5E=\n-----END PRIVATE KEY-----\n`\n\tcert := `\n-----BEGIN CERTIFICATE-----\nMIIDdjCCAl6gAwIBAgIJAMYGAwkxUV51MA0GCSqGSIb3DQEBCwUAMFgxCzAJBgNV\nBAYTAlVTMQswCQYDVQQIDAJDQTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzERMA8G\nA1UECgwISW5mbHV4REIxETAPBgNVBAMMCGluZmx1eGRiMB4XDTE1MTIyOTAxNTg1\nNloXDTI1MTIyNjAxNTg1NlowWDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRYw\nFAYDVQQHDA1TYW4gRnJhbmNpc2NvMREwDwYDVQQKDAhJbmZsdXhEQjERMA8GA1UE\nAwwIaW5mbHV4ZGIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDLswqK\nJLxfhBRi4qdj7+jpBxTAi4MewrcMPp+9YlbLke3F7w2DPrZVkYVeWmg8LyTPAigr\nXeadK6hvqjRr05a7sMc5+ynivGbWUySWT+u17V85x6VR5TMIkJEOqpiIU8aYk0l+\n3UcrzVjS1QZCUBoxVwAVaSR6AXTA8YrVXdk/AI3f22dYiBjFmV4LJJkGjTaCnlDK\nu54hMU1tWTyFcoY9TBzZ1XA+ng5RQ/QADeL2PYrTW4s/mLI3jfKKD53EI4uM2FjW\n37ZfuxTamhCR7/lxM4COg9K70y5uebfqJvuoXAwXLOzVbdfF5b9fJFbL67kaK2ti\nMT3Wt39mhXzclLTDAgMBAAGjQzBBMAwGA1UdEwQFMAMBAf8wCwYDVR0PBAQDAgQw\nMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZIhvcN\nAQELBQADggEBAJxgHeduV9q2BuKnrt+sjXLGn/HwbMbgGbgFK6kUKJBWtv6Pa7JJ\nm4teDmTMWiaeB2g4N2bmaWTuEZzzShNKG5roFeWm1ilFMAyzkb+VifN4YuDKH62F\n3e259qsytiGbbJF3F//4sjfMw8qZVEPvspG1zKsASo0PpSOOUFmxcj0oMAXhnMrk\nrRcbk6fufhyq0iZGl8ZLKTCrkjk0b3qlNs6UaRD9/XBB59VlQ8I338sfjV06edwY\njn5Amab0uyoFNEp70Y4WGxrxUTS1GAC1LCA13S7EnidD440UrnWALTarjmHAK6aW\nwar3JNM1mGB3o2iAtuOJlFIKLpI1x+1e8pI=\n-----END CERTIFICATE-----\n`\n\tcer, err := tls.X509KeyPair([]byte(cert), []byte(key))\n\n\tif err != nil {\n\t\tt.Fatalf(\"Received error: %v\", err)\n\t}\n\n\tserver := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvar data client.Response\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_ = json.NewEncoder(w).Encode(data)\n\t}))\n\tserver.TLS = &tls.Config{Certificates: []tls.Certificate{cer}}\n\tserver.TLS.BuildNameToCertificate()\n\tserver.StartTLS()\n\tdefer server.Close()\n\n\tcertFile, _ := ioutil.TempFile(\"\", \"influx-cert-\")\n\tcertFile.WriteString(cert)\n\tcertFile.Close()\n\tdefer os.Remove(certFile.Name())\n\n\tu, _ := url.Parse(server.URL)\n\n\ttests := []struct {\n\t\tname      string\n\t\tunsafeSsl bool\n\t\texpected  error\n\t}{\n\t\t{name: \"validate certificates\", unsafeSsl: false, expected: errors.New(\"error\")},\n\t\t{name: \"not validate certificates\", unsafeSsl: true, expected: nil},\n\t}\n\n\tfor _, test := range tests {\n\t\tconfig := client.Config{URL: *u, UnsafeSsl: test.unsafeSsl}\n\t\tc, err := client.NewClient(config)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error. expected %v, actual %v\", nil, err)\n\t\t}\n\t\tquery := client.Query{}\n\t\t_, err = c.Query(query)\n\n\t\tif (test.expected == nil) != (err == nil) {\n\t\t\tt.Fatalf(\"%s: expected %v. got %v. unsafeSsl: %v\", test.name, test.expected, err, test.unsafeSsl)\n\t\t}\n\t}\n}\n\nfunc TestChunkedResponse(t *testing.T) {\n\ts := `{\"results\":[{},{}]}{\"results\":[{}]}`\n\tr := client.NewChunkedResponse(strings.NewReader(s))\n\tresp, err := r.NextResponse()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t} else if actual := len(resp.Results); actual != 2 {\n\t\tt.Fatalf(\"unexpected number of results.  expected %v, actual %v\", 2, actual)\n\t}\n\n\tresp, err = r.NextResponse()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t} else if actual := len(resp.Results); actual != 1 {\n\t\tt.Fatalf(\"unexpected number of results.  expected %v, actual %v\", 1, actual)\n\t}\n\n\tresp, err = r.NextResponse()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t} else if resp != nil {\n\t\tt.Fatalf(\"unexpected response.  expected %v, actual %v\", nil, resp)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/client/v2/client.go",
    "content": "// Package client (v2) is the current official Go client for InfluxDB.\npackage client // import \"github.com/influxdata/influxdb/client/v2\"\n\nimport (\n\t\"bytes\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n)\n\n// HTTPConfig is the config data needed to create an HTTP Client.\ntype HTTPConfig struct {\n\t// Addr should be of the form \"http://host:port\"\n\t// or \"http://[ipv6-host%zone]:port\".\n\tAddr string\n\n\t// Username is the influxdb username, optional.\n\tUsername string\n\n\t// Password is the influxdb password, optional.\n\tPassword string\n\n\t// UserAgent is the http User Agent, defaults to \"InfluxDBClient\".\n\tUserAgent string\n\n\t// Timeout for influxdb writes, defaults to no timeout.\n\tTimeout time.Duration\n\n\t// InsecureSkipVerify gets passed to the http client, if true, it will\n\t// skip https certificate verification. Defaults to false.\n\tInsecureSkipVerify bool\n\n\t// TLSConfig allows the user to set their own TLS config for the HTTP\n\t// Client. If set, this option overrides InsecureSkipVerify.\n\tTLSConfig *tls.Config\n}\n\n// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct.\ntype BatchPointsConfig struct {\n\t// Precision is the write precision of the points, defaults to \"ns\".\n\tPrecision string\n\n\t// Database is the database to write points to.\n\tDatabase string\n\n\t// RetentionPolicy is the retention policy of the points.\n\tRetentionPolicy string\n\n\t// Write consistency is the number of servers required to confirm write.\n\tWriteConsistency string\n}\n\n// Client is a client interface for writing & querying the database.\ntype Client interface {\n\t// Ping checks that status of cluster, and will always return 0 time and no\n\t// error for UDP clients.\n\tPing(timeout time.Duration) (time.Duration, string, error)\n\n\t// Write takes a BatchPoints object and writes all Points to InfluxDB.\n\tWrite(bp BatchPoints) error\n\n\t// Query makes an InfluxDB Query on the database. This will fail if using\n\t// the UDP client.\n\tQuery(q Query) (*Response, error)\n\n\t// Close releases any resources a Client may be using.\n\tClose() error\n}\n\n// NewHTTPClient returns a new Client from the provided config.\n// Client is safe for concurrent use by multiple goroutines.\nfunc NewHTTPClient(conf HTTPConfig) (Client, error) {\n\tif conf.UserAgent == \"\" {\n\t\tconf.UserAgent = \"InfluxDBClient\"\n\t}\n\n\tu, err := url.Parse(conf.Addr)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if u.Scheme != \"http\" && u.Scheme != \"https\" {\n\t\tm := fmt.Sprintf(\"Unsupported protocol scheme: %s, your address\"+\n\t\t\t\" must start with http:// or https://\", u.Scheme)\n\t\treturn nil, errors.New(m)\n\t}\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tInsecureSkipVerify: conf.InsecureSkipVerify,\n\t\t},\n\t}\n\tif conf.TLSConfig != nil {\n\t\ttr.TLSClientConfig = conf.TLSConfig\n\t}\n\treturn &client{\n\t\turl:       *u,\n\t\tusername:  conf.Username,\n\t\tpassword:  conf.Password,\n\t\tuseragent: conf.UserAgent,\n\t\thttpClient: &http.Client{\n\t\t\tTimeout:   conf.Timeout,\n\t\t\tTransport: tr,\n\t\t},\n\t\ttransport: tr,\n\t}, nil\n}\n\n// Ping will check to see if the server is up with an optional timeout on waiting for leader.\n// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.\nfunc (c *client) Ping(timeout time.Duration) (time.Duration, string, error) {\n\tnow := time.Now()\n\tu := c.url\n\tu.Path = \"ping\"\n\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn 0, \"\", err\n\t}\n\n\treq.Header.Set(\"User-Agent\", c.useragent)\n\n\tif c.username != \"\" {\n\t\treq.SetBasicAuth(c.username, c.password)\n\t}\n\n\tif timeout > 0 {\n\t\tparams := req.URL.Query()\n\t\tparams.Set(\"wait_for_leader\", fmt.Sprintf(\"%.0fs\", timeout.Seconds()))\n\t\treq.URL.RawQuery = params.Encode()\n\t}\n\n\tresp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn 0, \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn 0, \"\", err\n\t}\n\n\tif resp.StatusCode != http.StatusNoContent {\n\t\tvar err = fmt.Errorf(string(body))\n\t\treturn 0, \"\", err\n\t}\n\n\tversion := resp.Header.Get(\"X-Influxdb-Version\")\n\treturn time.Since(now), version, nil\n}\n\n// Close releases the client's resources.\nfunc (c *client) Close() error {\n\tc.transport.CloseIdleConnections()\n\treturn nil\n}\n\n// client is safe for concurrent use as the fields are all read-only\n// once the client is instantiated.\ntype client struct {\n\t// N.B - if url.UserInfo is accessed in future modifications to the\n\t// methods on client, you will need to syncronise access to url.\n\turl        url.URL\n\tusername   string\n\tpassword   string\n\tuseragent  string\n\thttpClient *http.Client\n\ttransport  *http.Transport\n}\n\n// BatchPoints is an interface into a batched grouping of points to write into\n// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate\n// batch for each goroutine.\ntype BatchPoints interface {\n\t// AddPoint adds the given point to the Batch of points.\n\tAddPoint(p *Point)\n\t// AddPoints adds the given points to the Batch of points.\n\tAddPoints(ps []*Point)\n\t// Points lists the points in the Batch.\n\tPoints() []*Point\n\n\t// Precision returns the currently set precision of this Batch.\n\tPrecision() string\n\t// SetPrecision sets the precision of this batch.\n\tSetPrecision(s string) error\n\n\t// Database returns the currently set database of this Batch.\n\tDatabase() string\n\t// SetDatabase sets the database of this Batch.\n\tSetDatabase(s string)\n\n\t// WriteConsistency returns the currently set write consistency of this Batch.\n\tWriteConsistency() string\n\t// SetWriteConsistency sets the write consistency of this Batch.\n\tSetWriteConsistency(s string)\n\n\t// RetentionPolicy returns the currently set retention policy of this Batch.\n\tRetentionPolicy() string\n\t// SetRetentionPolicy sets the retention policy of this Batch.\n\tSetRetentionPolicy(s string)\n}\n\n// NewBatchPoints returns a BatchPoints interface based on the given config.\nfunc NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) {\n\tif conf.Precision == \"\" {\n\t\tconf.Precision = \"ns\"\n\t}\n\tif _, err := time.ParseDuration(\"1\" + conf.Precision); err != nil {\n\t\treturn nil, err\n\t}\n\tbp := &batchpoints{\n\t\tdatabase:         conf.Database,\n\t\tprecision:        conf.Precision,\n\t\tretentionPolicy:  conf.RetentionPolicy,\n\t\twriteConsistency: conf.WriteConsistency,\n\t}\n\treturn bp, nil\n}\n\ntype batchpoints struct {\n\tpoints           []*Point\n\tdatabase         string\n\tprecision        string\n\tretentionPolicy  string\n\twriteConsistency string\n}\n\nfunc (bp *batchpoints) AddPoint(p *Point) {\n\tbp.points = append(bp.points, p)\n}\n\nfunc (bp *batchpoints) AddPoints(ps []*Point) {\n\tbp.points = append(bp.points, ps...)\n}\n\nfunc (bp *batchpoints) Points() []*Point {\n\treturn bp.points\n}\n\nfunc (bp *batchpoints) Precision() string {\n\treturn bp.precision\n}\n\nfunc (bp *batchpoints) Database() string {\n\treturn bp.database\n}\n\nfunc (bp *batchpoints) WriteConsistency() string {\n\treturn bp.writeConsistency\n}\n\nfunc (bp *batchpoints) RetentionPolicy() string {\n\treturn bp.retentionPolicy\n}\n\nfunc (bp *batchpoints) SetPrecision(p string) error {\n\tif _, err := time.ParseDuration(\"1\" + p); err != nil {\n\t\treturn err\n\t}\n\tbp.precision = p\n\treturn nil\n}\n\nfunc (bp *batchpoints) SetDatabase(db string) {\n\tbp.database = db\n}\n\nfunc (bp *batchpoints) SetWriteConsistency(wc string) {\n\tbp.writeConsistency = wc\n}\n\nfunc (bp *batchpoints) SetRetentionPolicy(rp string) {\n\tbp.retentionPolicy = rp\n}\n\n// Point represents a single data point.\ntype Point struct {\n\tpt models.Point\n}\n\n// NewPoint returns a point with the given timestamp. If a timestamp is not\n// given, then data is sent to the database without a timestamp, in which case\n// the server will assign local time upon reception. NOTE: it is recommended to\n// send data with a timestamp.\nfunc NewPoint(\n\tname string,\n\ttags map[string]string,\n\tfields map[string]interface{},\n\tt ...time.Time,\n) (*Point, error) {\n\tvar T time.Time\n\tif len(t) > 0 {\n\t\tT = t[0]\n\t}\n\n\tpt, err := models.NewPoint(name, models.NewTags(tags), fields, T)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Point{\n\t\tpt: pt,\n\t}, nil\n}\n\n// String returns a line-protocol string of the Point.\nfunc (p *Point) String() string {\n\treturn p.pt.String()\n}\n\n// PrecisionString returns a line-protocol string of the Point,\n// with the timestamp formatted for the given precision.\nfunc (p *Point) PrecisionString(precison string) string {\n\treturn p.pt.PrecisionString(precison)\n}\n\n// Name returns the measurement name of the point.\nfunc (p *Point) Name() string {\n\treturn string(p.pt.Name())\n}\n\n// Tags returns the tags associated with the point.\nfunc (p *Point) Tags() map[string]string {\n\treturn p.pt.Tags().Map()\n}\n\n// Time return the timestamp for the point.\nfunc (p *Point) Time() time.Time {\n\treturn p.pt.Time()\n}\n\n// UnixNano returns timestamp of the point in nanoseconds since Unix epoch.\nfunc (p *Point) UnixNano() int64 {\n\treturn p.pt.UnixNano()\n}\n\n// Fields returns the fields for the point.\nfunc (p *Point) Fields() (map[string]interface{}, error) {\n\treturn p.pt.Fields()\n}\n\n// NewPointFrom returns a point from the provided models.Point.\nfunc NewPointFrom(pt models.Point) *Point {\n\treturn &Point{pt: pt}\n}\n\nfunc (c *client) Write(bp BatchPoints) error {\n\tvar b bytes.Buffer\n\n\tfor _, p := range bp.Points() {\n\t\tif _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := b.WriteByte('\\n'); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tu := c.url\n\tu.Path = \"write\"\n\treq, err := http.NewRequest(\"POST\", u.String(), &b)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Content-Type\", \"\")\n\treq.Header.Set(\"User-Agent\", c.useragent)\n\tif c.username != \"\" {\n\t\treq.SetBasicAuth(c.username, c.password)\n\t}\n\n\tparams := req.URL.Query()\n\tparams.Set(\"db\", bp.Database())\n\tparams.Set(\"rp\", bp.RetentionPolicy())\n\tparams.Set(\"precision\", bp.Precision())\n\tparams.Set(\"consistency\", bp.WriteConsistency())\n\treq.URL.RawQuery = params.Encode()\n\n\tresp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {\n\t\tvar err = fmt.Errorf(string(body))\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// Query defines a query to send to the server.\ntype Query struct {\n\tCommand    string\n\tDatabase   string\n\tPrecision  string\n\tChunked    bool\n\tChunkSize  int\n\tParameters map[string]interface{}\n}\n\n// NewQuery returns a query object.\n// The database and precision arguments can be empty strings if they are not needed for the query.\nfunc NewQuery(command, database, precision string) Query {\n\treturn Query{\n\t\tCommand:    command,\n\t\tDatabase:   database,\n\t\tPrecision:  precision,\n\t\tParameters: make(map[string]interface{}),\n\t}\n}\n\n// NewQueryWithParameters returns a query object.\n// The database and precision arguments can be empty strings if they are not needed for the query.\n// parameters is a map of the parameter names used in the command to their values.\nfunc NewQueryWithParameters(command, database, precision string, parameters map[string]interface{}) Query {\n\treturn Query{\n\t\tCommand:    command,\n\t\tDatabase:   database,\n\t\tPrecision:  precision,\n\t\tParameters: parameters,\n\t}\n}\n\n// Response represents a list of statement results.\ntype Response struct {\n\tResults []Result\n\tErr     string `json:\"error,omitempty\"`\n}\n\n// Error returns the first error from any statement.\n// It returns nil if no errors occurred on any statements.\nfunc (r *Response) Error() error {\n\tif r.Err != \"\" {\n\t\treturn fmt.Errorf(r.Err)\n\t}\n\tfor _, result := range r.Results {\n\t\tif result.Err != \"\" {\n\t\t\treturn fmt.Errorf(result.Err)\n\t\t}\n\t}\n\treturn nil\n}\n\n// Message represents a user message.\ntype Message struct {\n\tLevel string\n\tText  string\n}\n\n// Result represents a resultset returned from a single statement.\ntype Result struct {\n\tSeries   []models.Row\n\tMessages []*Message\n\tErr      string `json:\"error,omitempty\"`\n}\n\n// Query sends a command to the server and returns the Response.\nfunc (c *client) Query(q Query) (*Response, error) {\n\tu := c.url\n\tu.Path = \"query\"\n\n\tjsonParameters, err := json.Marshal(q.Parameters)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"\")\n\treq.Header.Set(\"User-Agent\", c.useragent)\n\n\tif c.username != \"\" {\n\t\treq.SetBasicAuth(c.username, c.password)\n\t}\n\n\tparams := req.URL.Query()\n\tparams.Set(\"q\", q.Command)\n\tparams.Set(\"db\", q.Database)\n\tparams.Set(\"params\", string(jsonParameters))\n\tif q.Chunked {\n\t\tparams.Set(\"chunked\", \"true\")\n\t\tif q.ChunkSize > 0 {\n\t\t\tparams.Set(\"chunk_size\", strconv.Itoa(q.ChunkSize))\n\t\t}\n\t}\n\n\tif q.Precision != \"\" {\n\t\tparams.Set(\"epoch\", q.Precision)\n\t}\n\treq.URL.RawQuery = params.Encode()\n\n\tresp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar response Response\n\tif q.Chunked {\n\t\tcr := NewChunkedResponse(resp.Body)\n\t\tfor {\n\t\t\tr, err := cr.NextResponse()\n\t\t\tif err != nil {\n\t\t\t\t// If we got an error while decoding the response, send that back.\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif r == nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tresponse.Results = append(response.Results, r.Results...)\n\t\t\tif r.Err != \"\" {\n\t\t\t\tresponse.Err = r.Err\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t} else {\n\t\tdec := json.NewDecoder(resp.Body)\n\t\tdec.UseNumber()\n\t\tdecErr := dec.Decode(&response)\n\n\t\t// ignore this error if we got an invalid status code\n\t\tif decErr != nil && decErr.Error() == \"EOF\" && resp.StatusCode != http.StatusOK {\n\t\t\tdecErr = nil\n\t\t}\n\t\t// If we got a valid decode error, send that back\n\t\tif decErr != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to decode json: received status code %d err: %s\", resp.StatusCode, decErr)\n\t\t}\n\t}\n\t// If we don't have an error in our json response, and didn't get statusOK\n\t// then send back an error\n\tif resp.StatusCode != http.StatusOK && response.Error() == nil {\n\t\treturn &response, fmt.Errorf(\"received status code %d from server\",\n\t\t\tresp.StatusCode)\n\t}\n\treturn &response, nil\n}\n\n// duplexReader reads responses and writes it to another writer while\n// satisfying the reader interface.\ntype duplexReader struct {\n\tr io.Reader\n\tw io.Writer\n}\n\nfunc (r *duplexReader) Read(p []byte) (n int, err error) {\n\tn, err = r.r.Read(p)\n\tif err == nil {\n\t\tr.w.Write(p[:n])\n\t}\n\treturn n, err\n}\n\n// ChunkedResponse represents a response from the server that\n// uses chunking to stream the output.\ntype ChunkedResponse struct {\n\tdec    *json.Decoder\n\tduplex *duplexReader\n\tbuf    bytes.Buffer\n}\n\n// NewChunkedResponse reads a stream and produces responses from the stream.\nfunc NewChunkedResponse(r io.Reader) *ChunkedResponse {\n\tresp := &ChunkedResponse{}\n\tresp.duplex = &duplexReader{r: r, w: &resp.buf}\n\tresp.dec = json.NewDecoder(resp.duplex)\n\tresp.dec.UseNumber()\n\treturn resp\n}\n\n// NextResponse reads the next line of the stream and returns a response.\nfunc (r *ChunkedResponse) NextResponse() (*Response, error) {\n\tvar response Response\n\n\tif err := r.dec.Decode(&response); err != nil {\n\t\tif err == io.EOF {\n\t\t\treturn nil, nil\n\t\t}\n\t\t// A decoding error happened. This probably means the server crashed\n\t\t// and sent a last-ditch error message to us. Ensure we have read the\n\t\t// entirety of the connection to get any remaining error text.\n\t\tio.Copy(ioutil.Discard, r.duplex)\n\t\treturn nil, errors.New(strings.TrimSpace(r.buf.String()))\n\t}\n\n\tr.buf.Reset()\n\treturn &response, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/client/v2/client_test.go",
    "content": "package client\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestUDPClient_Query(t *testing.T) {\n\tconfig := UDPConfig{Addr: \"localhost:8089\"}\n\tc, err := NewUDPClient(config)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\tdefer c.Close()\n\tquery := Query{}\n\t_, err = c.Query(query)\n\tif err == nil {\n\t\tt.Error(\"Querying UDP client should fail\")\n\t}\n}\n\nfunc TestUDPClient_Ping(t *testing.T) {\n\tconfig := UDPConfig{Addr: \"localhost:8089\"}\n\tc, err := NewUDPClient(config)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\tdefer c.Close()\n\n\trtt, version, err := c.Ping(0)\n\tif rtt != 0 || version != \"\" || err != nil {\n\t\tt.Errorf(\"unexpected error.  expected (%v, '%v', %v), actual (%v, '%v', %v)\", 0, \"\", nil, rtt, version, err)\n\t}\n}\n\nfunc TestUDPClient_Write(t *testing.T) {\n\tconfig := UDPConfig{Addr: \"localhost:8089\"}\n\tc, err := NewUDPClient(config)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\tdefer c.Close()\n\n\tbp, err := NewBatchPoints(BatchPointsConfig{})\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\n\tfields := make(map[string]interface{})\n\tfields[\"value\"] = 1.0\n\tpt, _ := NewPoint(\"cpu\", make(map[string]string), fields)\n\tbp.AddPoint(pt)\n\n\terr = c.Write(bp)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n}\n\nfunc TestUDPClient_BadAddr(t *testing.T) {\n\tconfig := UDPConfig{Addr: \"foobar@wahoo\"}\n\tc, err := NewUDPClient(config)\n\tif err == nil {\n\t\tdefer c.Close()\n\t\tt.Error(\"Expected resolve error\")\n\t}\n}\n\nfunc TestUDPClient_Batches(t *testing.T) {\n\tvar logger writeLogger\n\tvar cl udpclient\n\n\tcl.conn = &logger\n\tcl.payloadSize = 20 // should allow for two points per batch\n\n\t// expected point should look like this: \"cpu a=1i\"\n\tfields := map[string]interface{}{\"a\": 1}\n\n\tp, _ := NewPoint(\"cpu\", nil, fields, time.Time{})\n\n\tbp, _ := NewBatchPoints(BatchPointsConfig{})\n\n\tfor i := 0; i < 9; i++ {\n\t\tbp.AddPoint(p)\n\t}\n\n\tif err := cl.Write(bp); err != nil {\n\t\tt.Fatalf(\"Unexpected error during Write: %v\", err)\n\t}\n\n\tif len(logger.writes) != 5 {\n\t\tt.Errorf(\"Mismatched write count: got %v, exp %v\", len(logger.writes), 5)\n\t}\n}\n\nfunc TestUDPClient_Split(t *testing.T) {\n\tvar logger writeLogger\n\tvar cl udpclient\n\n\tcl.conn = &logger\n\tcl.payloadSize = 1 // force one field per point\n\n\tfields := map[string]interface{}{\"a\": 1, \"b\": 2, \"c\": 3, \"d\": 4}\n\n\tp, _ := NewPoint(\"cpu\", nil, fields, time.Unix(1, 0))\n\n\tbp, _ := NewBatchPoints(BatchPointsConfig{})\n\n\tbp.AddPoint(p)\n\n\tif err := cl.Write(bp); err != nil {\n\t\tt.Fatalf(\"Unexpected error during Write: %v\", err)\n\t}\n\n\tif len(logger.writes) != len(fields) {\n\t\tt.Errorf(\"Mismatched write count: got %v, exp %v\", len(logger.writes), len(fields))\n\t}\n}\n\ntype writeLogger struct {\n\twrites [][]byte\n}\n\nfunc (w *writeLogger) Write(b []byte) (int, error) {\n\tw.writes = append(w.writes, append([]byte(nil), b...))\n\treturn len(b), nil\n}\n\nfunc (w *writeLogger) Close() error { return nil }\n\nfunc TestClient_Query(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvar data Response\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_ = json.NewEncoder(w).Encode(data)\n\t}))\n\tdefer ts.Close()\n\n\tconfig := HTTPConfig{Addr: ts.URL}\n\tc, _ := NewHTTPClient(config)\n\tdefer c.Close()\n\n\tquery := Query{}\n\t_, err := c.Query(query)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n}\n\nfunc TestClient_ChunkedQuery(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvar data Response\n\t\tw.WriteHeader(http.StatusOK)\n\t\tenc := json.NewEncoder(w)\n\t\t_ = enc.Encode(data)\n\t\t_ = enc.Encode(data)\n\t}))\n\tdefer ts.Close()\n\n\tconfig := HTTPConfig{Addr: ts.URL}\n\tc, err := NewHTTPClient(config)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\n\tquery := Query{Chunked: true}\n\t_, err = c.Query(query)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n}\n\nfunc TestClient_BoundParameters(t *testing.T) {\n\tvar parameterString string\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvar data Response\n\t\tr.ParseForm()\n\t\tparameterString = r.FormValue(\"params\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_ = json.NewEncoder(w).Encode(data)\n\t}))\n\tdefer ts.Close()\n\n\tconfig := HTTPConfig{Addr: ts.URL}\n\tc, _ := NewHTTPClient(config)\n\tdefer c.Close()\n\n\texpectedParameters := map[string]interface{}{\n\t\t\"testStringParameter\": \"testStringValue\",\n\t\t\"testNumberParameter\": 12.3,\n\t}\n\n\tquery := Query{\n\t\tParameters: expectedParameters,\n\t}\n\n\t_, err := c.Query(query)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\n\tvar actualParameters map[string]interface{}\n\n\terr = json.Unmarshal([]byte(parameterString), &actualParameters)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error. expected %v, actual %v\", nil, err)\n\t}\n\n\tif !reflect.DeepEqual(expectedParameters, actualParameters) {\n\t\tt.Errorf(\"unexpected parameters. expected %v, actual %v\", expectedParameters, actualParameters)\n\t}\n}\n\nfunc TestClient_BasicAuth(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tu, p, ok := r.BasicAuth()\n\n\t\tif !ok {\n\t\t\tt.Errorf(\"basic auth error\")\n\t\t}\n\t\tif u != \"username\" {\n\t\t\tt.Errorf(\"unexpected username, expected %q, actual %q\", \"username\", u)\n\t\t}\n\t\tif p != \"password\" {\n\t\t\tt.Errorf(\"unexpected password, expected %q, actual %q\", \"password\", p)\n\t\t}\n\t\tvar data Response\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_ = json.NewEncoder(w).Encode(data)\n\t}))\n\tdefer ts.Close()\n\n\tconfig := HTTPConfig{Addr: ts.URL, Username: \"username\", Password: \"password\"}\n\tc, _ := NewHTTPClient(config)\n\tdefer c.Close()\n\n\tquery := Query{}\n\t_, err := c.Query(query)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n}\n\nfunc TestClient_Ping(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvar data Response\n\t\tw.WriteHeader(http.StatusNoContent)\n\t\t_ = json.NewEncoder(w).Encode(data)\n\t}))\n\tdefer ts.Close()\n\n\tconfig := HTTPConfig{Addr: ts.URL}\n\tc, _ := NewHTTPClient(config)\n\tdefer c.Close()\n\n\t_, _, err := c.Ping(0)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n}\n\nfunc TestClient_Concurrent_Use(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(`{}`))\n\t}))\n\tdefer ts.Close()\n\n\tconfig := HTTPConfig{Addr: ts.URL}\n\tc, _ := NewHTTPClient(config)\n\tdefer c.Close()\n\n\tvar wg sync.WaitGroup\n\twg.Add(3)\n\tn := 1000\n\n\terrC := make(chan error)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tbp, err := NewBatchPoints(BatchPointsConfig{})\n\t\tif err != nil {\n\t\t\terrC <- fmt.Errorf(\"got error %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tfor i := 0; i < n; i++ {\n\t\t\tif err = c.Write(bp); err != nil {\n\t\t\t\terrC <- fmt.Errorf(\"got error %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tvar q Query\n\t\tfor i := 0; i < n; i++ {\n\t\t\tif _, err := c.Query(q); err != nil {\n\t\t\t\terrC <- fmt.Errorf(\"got error %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor i := 0; i < n; i++ {\n\t\t\tc.Ping(time.Second)\n\t\t}\n\t}()\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(errC)\n\t}()\n\n\tfor err := range errC {\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n}\n\nfunc TestClient_Write(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvar data Response\n\t\tw.WriteHeader(http.StatusNoContent)\n\t\t_ = json.NewEncoder(w).Encode(data)\n\t}))\n\tdefer ts.Close()\n\n\tconfig := HTTPConfig{Addr: ts.URL}\n\tc, _ := NewHTTPClient(config)\n\tdefer c.Close()\n\n\tbp, err := NewBatchPoints(BatchPointsConfig{})\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\terr = c.Write(bp)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n}\n\nfunc TestClient_UserAgent(t *testing.T) {\n\treceivedUserAgent := \"\"\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\treceivedUserAgent = r.UserAgent()\n\n\t\tvar data Response\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_ = json.NewEncoder(w).Encode(data)\n\t}))\n\tdefer ts.Close()\n\n\t_, err := http.Get(ts.URL)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\n\ttests := []struct {\n\t\tname      string\n\t\tuserAgent string\n\t\texpected  string\n\t}{\n\t\t{\n\t\t\tname:      \"Empty user agent\",\n\t\t\tuserAgent: \"\",\n\t\t\texpected:  \"InfluxDBClient\",\n\t\t},\n\t\t{\n\t\t\tname:      \"Custom user agent\",\n\t\t\tuserAgent: \"Test Influx Client\",\n\t\t\texpected:  \"Test Influx Client\",\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\n\t\tconfig := HTTPConfig{Addr: ts.URL, UserAgent: test.userAgent}\n\t\tc, _ := NewHTTPClient(config)\n\t\tdefer c.Close()\n\n\t\treceivedUserAgent = \"\"\n\t\tquery := Query{}\n\t\t_, err = c.Query(query)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t\t}\n\t\tif !strings.HasPrefix(receivedUserAgent, test.expected) {\n\t\t\tt.Errorf(\"Unexpected user agent. expected %v, actual %v\", test.expected, receivedUserAgent)\n\t\t}\n\n\t\treceivedUserAgent = \"\"\n\t\tbp, _ := NewBatchPoints(BatchPointsConfig{})\n\t\terr = c.Write(bp)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t\t}\n\t\tif !strings.HasPrefix(receivedUserAgent, test.expected) {\n\t\t\tt.Errorf(\"Unexpected user agent. expected %v, actual %v\", test.expected, receivedUserAgent)\n\t\t}\n\n\t\treceivedUserAgent = \"\"\n\t\t_, err := c.Query(query)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t\t}\n\t\tif receivedUserAgent != test.expected {\n\t\t\tt.Errorf(\"Unexpected user agent. expected %v, actual %v\", test.expected, receivedUserAgent)\n\t\t}\n\t}\n}\n\nfunc TestClient_PointString(t *testing.T) {\n\tconst shortForm = \"2006-Jan-02\"\n\ttime1, _ := time.Parse(shortForm, \"2013-Feb-03\")\n\ttags := map[string]string{\"cpu\": \"cpu-total\"}\n\tfields := map[string]interface{}{\"idle\": 10.1, \"system\": 50.9, \"user\": 39.0}\n\tp, _ := NewPoint(\"cpu_usage\", tags, fields, time1)\n\n\ts := \"cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39 1359849600000000000\"\n\tif p.String() != s {\n\t\tt.Errorf(\"Point String Error, got %s, expected %s\", p.String(), s)\n\t}\n\n\ts = \"cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39 1359849600000\"\n\tif p.PrecisionString(\"ms\") != s {\n\t\tt.Errorf(\"Point String Error, got %s, expected %s\",\n\t\t\tp.PrecisionString(\"ms\"), s)\n\t}\n}\n\nfunc TestClient_PointWithoutTimeString(t *testing.T) {\n\ttags := map[string]string{\"cpu\": \"cpu-total\"}\n\tfields := map[string]interface{}{\"idle\": 10.1, \"system\": 50.9, \"user\": 39.0}\n\tp, _ := NewPoint(\"cpu_usage\", tags, fields)\n\n\ts := \"cpu_usage,cpu=cpu-total idle=10.1,system=50.9,user=39\"\n\tif p.String() != s {\n\t\tt.Errorf(\"Point String Error, got %s, expected %s\", p.String(), s)\n\t}\n\n\tif p.PrecisionString(\"ms\") != s {\n\t\tt.Errorf(\"Point String Error, got %s, expected %s\",\n\t\t\tp.PrecisionString(\"ms\"), s)\n\t}\n}\n\nfunc TestClient_PointName(t *testing.T) {\n\ttags := map[string]string{\"cpu\": \"cpu-total\"}\n\tfields := map[string]interface{}{\"idle\": 10.1, \"system\": 50.9, \"user\": 39.0}\n\tp, _ := NewPoint(\"cpu_usage\", tags, fields)\n\n\texp := \"cpu_usage\"\n\tif p.Name() != exp {\n\t\tt.Errorf(\"Error, got %s, expected %s\",\n\t\t\tp.Name(), exp)\n\t}\n}\n\nfunc TestClient_PointTags(t *testing.T) {\n\ttags := map[string]string{\"cpu\": \"cpu-total\"}\n\tfields := map[string]interface{}{\"idle\": 10.1, \"system\": 50.9, \"user\": 39.0}\n\tp, _ := NewPoint(\"cpu_usage\", tags, fields)\n\n\tif !reflect.DeepEqual(tags, p.Tags()) {\n\t\tt.Errorf(\"Error, got %v, expected %v\",\n\t\t\tp.Tags(), tags)\n\t}\n}\n\nfunc TestClient_PointUnixNano(t *testing.T) {\n\tconst shortForm = \"2006-Jan-02\"\n\ttime1, _ := time.Parse(shortForm, \"2013-Feb-03\")\n\ttags := map[string]string{\"cpu\": \"cpu-total\"}\n\tfields := map[string]interface{}{\"idle\": 10.1, \"system\": 50.9, \"user\": 39.0}\n\tp, _ := NewPoint(\"cpu_usage\", tags, fields, time1)\n\n\texp := int64(1359849600000000000)\n\tif p.UnixNano() != exp {\n\t\tt.Errorf(\"Error, got %d, expected %d\",\n\t\t\tp.UnixNano(), exp)\n\t}\n}\n\nfunc TestClient_PointFields(t *testing.T) {\n\ttags := map[string]string{\"cpu\": \"cpu-total\"}\n\tfields := map[string]interface{}{\"idle\": 10.1, \"system\": 50.9, \"user\": 39.0}\n\tp, _ := NewPoint(\"cpu_usage\", tags, fields)\n\n\tpfields, err := p.Fields()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !reflect.DeepEqual(fields, pfields) {\n\t\tt.Errorf(\"Error, got %v, expected %v\",\n\t\t\tpfields, fields)\n\t}\n}\n\nfunc TestBatchPoints_PrecisionError(t *testing.T) {\n\t_, err := NewBatchPoints(BatchPointsConfig{Precision: \"foobar\"})\n\tif err == nil {\n\t\tt.Errorf(\"Precision: foobar should have errored\")\n\t}\n\n\tbp, _ := NewBatchPoints(BatchPointsConfig{Precision: \"ns\"})\n\terr = bp.SetPrecision(\"foobar\")\n\tif err == nil {\n\t\tt.Errorf(\"Precision: foobar should have errored\")\n\t}\n}\n\nfunc TestBatchPoints_SettersGetters(t *testing.T) {\n\tbp, _ := NewBatchPoints(BatchPointsConfig{\n\t\tPrecision:        \"ns\",\n\t\tDatabase:         \"db\",\n\t\tRetentionPolicy:  \"rp\",\n\t\tWriteConsistency: \"wc\",\n\t})\n\tif bp.Precision() != \"ns\" {\n\t\tt.Errorf(\"Expected: %s, got %s\", bp.Precision(), \"ns\")\n\t}\n\tif bp.Database() != \"db\" {\n\t\tt.Errorf(\"Expected: %s, got %s\", bp.Database(), \"db\")\n\t}\n\tif bp.RetentionPolicy() != \"rp\" {\n\t\tt.Errorf(\"Expected: %s, got %s\", bp.RetentionPolicy(), \"rp\")\n\t}\n\tif bp.WriteConsistency() != \"wc\" {\n\t\tt.Errorf(\"Expected: %s, got %s\", bp.WriteConsistency(), \"wc\")\n\t}\n\n\tbp.SetDatabase(\"db2\")\n\tbp.SetRetentionPolicy(\"rp2\")\n\tbp.SetWriteConsistency(\"wc2\")\n\terr := bp.SetPrecision(\"s\")\n\tif err != nil {\n\t\tt.Errorf(\"Did not expect error: %s\", err.Error())\n\t}\n\n\tif bp.Precision() != \"s\" {\n\t\tt.Errorf(\"Expected: %s, got %s\", bp.Precision(), \"s\")\n\t}\n\tif bp.Database() != \"db2\" {\n\t\tt.Errorf(\"Expected: %s, got %s\", bp.Database(), \"db2\")\n\t}\n\tif bp.RetentionPolicy() != \"rp2\" {\n\t\tt.Errorf(\"Expected: %s, got %s\", bp.RetentionPolicy(), \"rp2\")\n\t}\n\tif bp.WriteConsistency() != \"wc2\" {\n\t\tt.Errorf(\"Expected: %s, got %s\", bp.WriteConsistency(), \"wc2\")\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/client/v2/example_test.go",
    "content": "package client_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/client/v2\"\n)\n\n// Create a new client\nfunc ExampleClient() {\n\t// NOTE: this assumes you've setup a user and have setup shell env variables,\n\t// namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below.\n\t_, err := client.NewHTTPClient(client.HTTPConfig{\n\t\tAddr:     \"http://localhost:8086\",\n\t\tUsername: os.Getenv(\"INFLUX_USER\"),\n\t\tPassword: os.Getenv(\"INFLUX_PWD\"),\n\t})\n\tif err != nil {\n\t\tfmt.Println(\"Error creating InfluxDB Client: \", err.Error())\n\t}\n}\n\n// Write a point using the UDP client\nfunc ExampleClient_uDP() {\n\t// Make client\n\tconfig := client.UDPConfig{Addr: \"localhost:8089\"}\n\tc, err := client.NewUDPClient(config)\n\tif err != nil {\n\t\tfmt.Println(\"Error: \", err.Error())\n\t}\n\tdefer c.Close()\n\n\t// Create a new point batch\n\tbp, _ := client.NewBatchPoints(client.BatchPointsConfig{\n\t\tPrecision: \"s\",\n\t})\n\n\t// Create a point and add to batch\n\ttags := map[string]string{\"cpu\": \"cpu-total\"}\n\tfields := map[string]interface{}{\n\t\t\"idle\":   10.1,\n\t\t\"system\": 53.3,\n\t\t\"user\":   46.6,\n\t}\n\tpt, err := client.NewPoint(\"cpu_usage\", tags, fields, time.Now())\n\tif err != nil {\n\t\tfmt.Println(\"Error: \", err.Error())\n\t}\n\tbp.AddPoint(pt)\n\n\t// Write the batch\n\tc.Write(bp)\n}\n\n// Ping the cluster using the HTTP client\nfunc ExampleClient_Ping() {\n\t// Make client\n\tc, err := client.NewHTTPClient(client.HTTPConfig{\n\t\tAddr: \"http://localhost:8086\",\n\t})\n\tif err != nil {\n\t\tfmt.Println(\"Error creating InfluxDB Client: \", err.Error())\n\t}\n\tdefer c.Close()\n\n\t_, _, err = c.Ping(0)\n\tif err != nil {\n\t\tfmt.Println(\"Error pinging InfluxDB Cluster: \", err.Error())\n\t}\n}\n\n// Write a point using the HTTP client\nfunc ExampleClient_write() {\n\t// Make client\n\tc, err := client.NewHTTPClient(client.HTTPConfig{\n\t\tAddr: \"http://localhost:8086\",\n\t})\n\tif err != nil {\n\t\tfmt.Println(\"Error creating InfluxDB Client: \", err.Error())\n\t}\n\tdefer c.Close()\n\n\t// Create a new point batch\n\tbp, _ := client.NewBatchPoints(client.BatchPointsConfig{\n\t\tDatabase:  \"BumbleBeeTuna\",\n\t\tPrecision: \"s\",\n\t})\n\n\t// Create a point and add to batch\n\ttags := map[string]string{\"cpu\": \"cpu-total\"}\n\tfields := map[string]interface{}{\n\t\t\"idle\":   10.1,\n\t\t\"system\": 53.3,\n\t\t\"user\":   46.6,\n\t}\n\tpt, err := client.NewPoint(\"cpu_usage\", tags, fields, time.Now())\n\tif err != nil {\n\t\tfmt.Println(\"Error: \", err.Error())\n\t}\n\tbp.AddPoint(pt)\n\n\t// Write the batch\n\tc.Write(bp)\n}\n\n// Create a batch and add a point\nfunc ExampleBatchPoints() {\n\t// Create a new point batch\n\tbp, _ := client.NewBatchPoints(client.BatchPointsConfig{\n\t\tDatabase:  \"BumbleBeeTuna\",\n\t\tPrecision: \"s\",\n\t})\n\n\t// Create a point and add to batch\n\ttags := map[string]string{\"cpu\": \"cpu-total\"}\n\tfields := map[string]interface{}{\n\t\t\"idle\":   10.1,\n\t\t\"system\": 53.3,\n\t\t\"user\":   46.6,\n\t}\n\tpt, err := client.NewPoint(\"cpu_usage\", tags, fields, time.Now())\n\tif err != nil {\n\t\tfmt.Println(\"Error: \", err.Error())\n\t}\n\tbp.AddPoint(pt)\n}\n\n// Using the BatchPoints setter functions\nfunc ExampleBatchPoints_setters() {\n\t// Create a new point batch\n\tbp, _ := client.NewBatchPoints(client.BatchPointsConfig{})\n\tbp.SetDatabase(\"BumbleBeeTuna\")\n\tbp.SetPrecision(\"ms\")\n\n\t// Create a point and add to batch\n\ttags := map[string]string{\"cpu\": \"cpu-total\"}\n\tfields := map[string]interface{}{\n\t\t\"idle\":   10.1,\n\t\t\"system\": 53.3,\n\t\t\"user\":   46.6,\n\t}\n\tpt, err := client.NewPoint(\"cpu_usage\", tags, fields, time.Now())\n\tif err != nil {\n\t\tfmt.Println(\"Error: \", err.Error())\n\t}\n\tbp.AddPoint(pt)\n}\n\n// Create a new point with a timestamp\nfunc ExamplePoint() {\n\ttags := map[string]string{\"cpu\": \"cpu-total\"}\n\tfields := map[string]interface{}{\n\t\t\"idle\":   10.1,\n\t\t\"system\": 53.3,\n\t\t\"user\":   46.6,\n\t}\n\tpt, err := client.NewPoint(\"cpu_usage\", tags, fields, time.Now())\n\tif err == nil {\n\t\tfmt.Println(\"We created a point: \", pt.String())\n\t}\n}\n\n// Create a new point without a timestamp\nfunc ExamplePoint_withoutTime() {\n\ttags := map[string]string{\"cpu\": \"cpu-total\"}\n\tfields := map[string]interface{}{\n\t\t\"idle\":   10.1,\n\t\t\"system\": 53.3,\n\t\t\"user\":   46.6,\n\t}\n\tpt, err := client.NewPoint(\"cpu_usage\", tags, fields)\n\tif err == nil {\n\t\tfmt.Println(\"We created a point w/o time: \", pt.String())\n\t}\n}\n\n// Write 1000 points\nfunc ExampleClient_write1000() {\n\tsampleSize := 1000\n\n\t// Make client\n\tc, err := client.NewHTTPClient(client.HTTPConfig{\n\t\tAddr: \"http://localhost:8086\",\n\t})\n\tif err != nil {\n\t\tfmt.Println(\"Error creating InfluxDB Client: \", err.Error())\n\t}\n\tdefer c.Close()\n\n\trand.Seed(42)\n\n\tbp, _ := client.NewBatchPoints(client.BatchPointsConfig{\n\t\tDatabase:  \"systemstats\",\n\t\tPrecision: \"us\",\n\t})\n\n\tfor i := 0; i < sampleSize; i++ {\n\t\tregions := []string{\"us-west1\", \"us-west2\", \"us-west3\", \"us-east1\"}\n\t\ttags := map[string]string{\n\t\t\t\"cpu\":    \"cpu-total\",\n\t\t\t\"host\":   fmt.Sprintf(\"host%d\", rand.Intn(1000)),\n\t\t\t\"region\": regions[rand.Intn(len(regions))],\n\t\t}\n\n\t\tidle := rand.Float64() * 100.0\n\t\tfields := map[string]interface{}{\n\t\t\t\"idle\": idle,\n\t\t\t\"busy\": 100.0 - idle,\n\t\t}\n\n\t\tpt, err := client.NewPoint(\n\t\t\t\"cpu_usage\",\n\t\t\ttags,\n\t\t\tfields,\n\t\t\ttime.Now(),\n\t\t)\n\t\tif err != nil {\n\t\t\tprintln(\"Error:\", err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tbp.AddPoint(pt)\n\t}\n\n\terr = c.Write(bp)\n\tif err != nil {\n\t\tfmt.Println(\"Error: \", err.Error())\n\t}\n}\n\n// Make a Query\nfunc ExampleClient_query() {\n\t// Make client\n\tc, err := client.NewHTTPClient(client.HTTPConfig{\n\t\tAddr: \"http://localhost:8086\",\n\t})\n\tif err != nil {\n\t\tfmt.Println(\"Error creating InfluxDB Client: \", err.Error())\n\t}\n\tdefer c.Close()\n\n\tq := client.NewQuery(\"SELECT count(value) FROM shapes\", \"square_holes\", \"ns\")\n\tif response, err := c.Query(q); err == nil && response.Error() == nil {\n\t\tfmt.Println(response.Results)\n\t}\n}\n\n// Create a Database with a query\nfunc ExampleClient_createDatabase() {\n\t// Make client\n\tc, err := client.NewHTTPClient(client.HTTPConfig{\n\t\tAddr: \"http://localhost:8086\",\n\t})\n\tif err != nil {\n\t\tfmt.Println(\"Error creating InfluxDB Client: \", err.Error())\n\t}\n\tdefer c.Close()\n\n\tq := client.NewQuery(\"CREATE DATABASE telegraf\", \"\", \"\")\n\tif response, err := c.Query(q); err == nil && response.Error() == nil {\n\t\tfmt.Println(response.Results)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/client/v2/udp.go",
    "content": "package client\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"time\"\n)\n\nconst (\n\t// UDPPayloadSize is a reasonable default payload size for UDP packets that\n\t// could be travelling over the internet.\n\tUDPPayloadSize = 512\n)\n\n// UDPConfig is the config data needed to create a UDP Client.\ntype UDPConfig struct {\n\t// Addr should be of the form \"host:port\"\n\t// or \"[ipv6-host%zone]:port\".\n\tAddr string\n\n\t// PayloadSize is the maximum size of a UDP client message, optional\n\t// Tune this based on your network. Defaults to UDPPayloadSize.\n\tPayloadSize int\n}\n\n// NewUDPClient returns a client interface for writing to an InfluxDB UDP\n// service from the given config.\nfunc NewUDPClient(conf UDPConfig) (Client, error) {\n\tvar udpAddr *net.UDPAddr\n\tudpAddr, err := net.ResolveUDPAddr(\"udp\", conf.Addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconn, err := net.DialUDP(\"udp\", nil, udpAddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpayloadSize := conf.PayloadSize\n\tif payloadSize == 0 {\n\t\tpayloadSize = UDPPayloadSize\n\t}\n\n\treturn &udpclient{\n\t\tconn:        conn,\n\t\tpayloadSize: payloadSize,\n\t}, nil\n}\n\n// Close releases the udpclient's resources.\nfunc (uc *udpclient) Close() error {\n\treturn uc.conn.Close()\n}\n\ntype udpclient struct {\n\tconn        io.WriteCloser\n\tpayloadSize int\n}\n\nfunc (uc *udpclient) Write(bp BatchPoints) error {\n\tvar b = make([]byte, 0, uc.payloadSize) // initial buffer size, it will grow as needed\n\tvar d, _ = time.ParseDuration(\"1\" + bp.Precision())\n\n\tvar delayedError error\n\n\tvar checkBuffer = func(n int) {\n\t\tif len(b) > 0 && len(b)+n > uc.payloadSize {\n\t\t\tif _, err := uc.conn.Write(b); err != nil {\n\t\t\t\tdelayedError = err\n\t\t\t}\n\t\t\tb = b[:0]\n\t\t}\n\t}\n\n\tfor _, p := range bp.Points() {\n\t\tp.pt.Round(d)\n\t\tpointSize := p.pt.StringSize() + 1 // include newline in size\n\t\t//point := p.pt.RoundedString(d) + \"\\n\"\n\n\t\tcheckBuffer(pointSize)\n\n\t\tif p.Time().IsZero() || pointSize <= uc.payloadSize {\n\t\t\tb = p.pt.AppendString(b)\n\t\t\tb = append(b, '\\n')\n\t\t\tcontinue\n\t\t}\n\n\t\tpoints := p.pt.Split(uc.payloadSize - 1) // account for newline character\n\t\tfor _, sp := range points {\n\t\t\tcheckBuffer(sp.StringSize() + 1)\n\t\t\tb = sp.AppendString(b)\n\t\t\tb = append(b, '\\n')\n\t\t}\n\t}\n\n\tif len(b) > 0 {\n\t\tif _, err := uc.conn.Write(b); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn delayedError\n}\n\nfunc (uc *udpclient) Query(q Query) (*Response, error) {\n\treturn nil, fmt.Errorf(\"Querying via UDP is not supported\")\n}\n\nfunc (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) {\n\treturn 0, \"\", nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go",
    "content": "// Package cli contains the logic of the influx command line client.\npackage cli // import \"github.com/influxdata/influxdb/cmd/influx/cli\"\n\nimport (\n\t\"bytes\"\n\t\"encoding/csv\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"os/signal\"\n\t\"path/filepath\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\t\"text/tabwriter\"\n\n\t\"golang.org/x/crypto/ssh/terminal\"\n\n\t\"github.com/influxdata/influxdb/client\"\n\t\"github.com/influxdata/influxdb/importer/v8\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/peterh/liner\"\n)\n\n// ErrBlankCommand is returned when a parsed command is empty.\nvar ErrBlankCommand = errors.New(\"empty input\")\n\n// CommandLine holds CLI configuration and state.\ntype CommandLine struct {\n\tLine            *liner.State\n\tHost            string\n\tPort            int\n\tDatabase        string\n\tSsl             bool\n\tRetentionPolicy string\n\tClientVersion   string\n\tServerVersion   string\n\tPretty          bool   // controls pretty print for json\n\tFormat          string // controls the output format.  Valid values are json, csv, or column\n\tExecute         string\n\tShowVersion     bool\n\tImport          bool\n\tChunked         bool\n\tChunkSize       int\n\tQuit            chan struct{}\n\tIgnoreSignals   bool // Ignore signals normally caught by this process (used primarily for testing)\n\tForceTTY        bool // Force the CLI to act as if it were connected to a TTY\n\tosSignals       chan os.Signal\n\thistoryFilePath string\n\n\tClient         *client.Client\n\tClientConfig   client.Config // Client config options.\n\tImporterConfig v8.Config     // Importer configuration options.\n}\n\n// New returns an instance of CommandLine with the specified client version.\nfunc New(version string) *CommandLine {\n\treturn &CommandLine{\n\t\tClientVersion: version,\n\t\tQuit:          make(chan struct{}, 1),\n\t\tosSignals:     make(chan os.Signal, 1),\n\t\tChunked:       true,\n\t}\n}\n\n// Run executes the CLI.\nfunc (c *CommandLine) Run() error {\n\thasTTY := c.ForceTTY || terminal.IsTerminal(int(os.Stdin.Fd()))\n\n\tvar promptForPassword bool\n\t// determine if they set the password flag but provided no value\n\tfor _, v := range os.Args {\n\t\tv = strings.ToLower(v)\n\t\tif (strings.HasPrefix(v, \"-password\") || strings.HasPrefix(v, \"--password\")) && c.ClientConfig.Password == \"\" {\n\t\t\tpromptForPassword = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Check if we will be able to prompt for the password later.\n\tif promptForPassword && !hasTTY {\n\t\treturn errors.New(\"Unable to prompt for a password with no TTY.\")\n\t}\n\n\t// Read environment variables for username/password.\n\tif c.ClientConfig.Username == \"\" {\n\t\tc.ClientConfig.Username = os.Getenv(\"INFLUX_USERNAME\")\n\t}\n\t// If we are going to be prompted for a password, always use the entered password.\n\tif promptForPassword {\n\t\t// Open the liner (temporarily) and prompt for the password.\n\t\tp, e := func() (string, error) {\n\t\t\tl := liner.NewLiner()\n\t\t\tdefer l.Close()\n\t\t\treturn l.PasswordPrompt(\"password: \")\n\t\t}()\n\t\tif e != nil {\n\t\t\treturn errors.New(\"Unable to parse password\")\n\t\t}\n\t\tc.ClientConfig.Password = p\n\t} else if c.ClientConfig.Password == \"\" {\n\t\tc.ClientConfig.Password = os.Getenv(\"INFLUX_PASSWORD\")\n\t}\n\n\tif err := c.Connect(\"\"); err != nil {\n\t\tmsg := \"Please check your connection settings and ensure 'influxd' is running.\"\n\t\tif !c.Ssl && strings.Contains(err.Error(), \"malformed HTTP response\") {\n\t\t\t// Attempt to connect with SSL and disable secure SSL for this test.\n\t\t\tc.Ssl = true\n\t\t\tunsafeSsl := c.ClientConfig.UnsafeSsl\n\t\t\tc.ClientConfig.UnsafeSsl = true\n\t\t\tif err := c.Connect(\"\"); err == nil {\n\t\t\t\tmsg = \"Please use the -ssl flag to connect using SSL.\"\n\t\t\t}\n\t\t\tc.Ssl = false\n\t\t\tc.ClientConfig.UnsafeSsl = unsafeSsl\n\t\t} else if c.Ssl && !c.ClientConfig.UnsafeSsl && strings.Contains(err.Error(), \"certificate is valid for\") {\n\t\t\t// Attempt to connect with an insecure connection just to see if it works.\n\t\t\tc.ClientConfig.UnsafeSsl = true\n\t\t\tif err := c.Connect(\"\"); err == nil {\n\t\t\t\tmsg = \"You may use -unsafeSsl to connect anyway, but the SSL connection will not be secure.\"\n\t\t\t}\n\t\t\tc.ClientConfig.UnsafeSsl = false\n\t\t}\n\t\treturn fmt.Errorf(\"Failed to connect to %s: %s\\n%s\", c.Client.Addr(), err.Error(), msg)\n\t}\n\n\t// Modify precision.\n\tc.SetPrecision(c.ClientConfig.Precision)\n\n\tif c.Execute != \"\" {\n\t\t// Make the non-interactive mode send everything through the CLI's parser\n\t\t// the same way the interactive mode works\n\t\tlines := strings.Split(c.Execute, \"\\n\")\n\t\tfor _, line := range lines {\n\t\t\tif err := c.ParseCommand(line); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tif c.Import {\n\t\taddr := net.JoinHostPort(c.Host, strconv.Itoa(c.Port))\n\t\tu, e := client.ParseConnectionString(addr, c.Ssl)\n\t\tif e != nil {\n\t\t\treturn e\n\t\t}\n\n\t\t// Copy the latest importer config and inject the latest client config\n\t\t// into it.\n\t\tconfig := c.ImporterConfig\n\t\tconfig.Config = c.ClientConfig\n\t\tconfig.URL = u\n\n\t\ti := v8.NewImporter(config)\n\t\tif err := i.Import(); err != nil {\n\t\t\terr = fmt.Errorf(\"ERROR: %s\\n\", err)\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\tif !hasTTY {\n\t\tcmd, err := ioutil.ReadAll(os.Stdin)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn c.ExecuteQuery(string(cmd))\n\t}\n\n\tif !c.IgnoreSignals {\n\t\t// register OS signals for graceful termination\n\t\tsignal.Notify(c.osSignals, syscall.SIGINT, syscall.SIGTERM)\n\t}\n\n\tc.Line = liner.NewLiner()\n\tdefer c.Line.Close()\n\n\tc.Line.SetMultiLineMode(true)\n\n\tfmt.Printf(\"Connected to %s version %s\\n\", c.Client.Addr(), c.ServerVersion)\n\n\tc.Version()\n\n\t// Only load/write history if HOME environment variable is set.\n\tif homeDir := os.Getenv(\"HOME\"); homeDir != \"\" {\n\t\t// Attempt to load the history file.\n\t\tc.historyFilePath = filepath.Join(homeDir, \".influx_history\")\n\t\tif historyFile, err := os.Open(c.historyFilePath); err == nil {\n\t\t\tc.Line.ReadHistory(historyFile)\n\t\t\thistoryFile.Close()\n\t\t}\n\t}\n\n\t// read from prompt until exit is run\n\treturn c.mainLoop()\n}\n\n// mainLoop runs the main prompt loop for the CLI.\nfunc (c *CommandLine) mainLoop() error {\n\tfor {\n\t\tselect {\n\t\tcase <-c.osSignals:\n\t\t\tc.exit()\n\t\t\treturn nil\n\t\tcase <-c.Quit:\n\t\t\tc.exit()\n\t\t\treturn nil\n\t\tdefault:\n\t\t\tl, e := c.Line.Prompt(\"> \")\n\t\t\tif e == io.EOF {\n\t\t\t\t// Instead of die, register that someone exited the program gracefully\n\t\t\t\tl = \"exit\"\n\t\t\t} else if e != nil {\n\t\t\t\tc.exit()\n\t\t\t\treturn e\n\t\t\t}\n\t\t\tif err := c.ParseCommand(l); err != ErrBlankCommand && !strings.HasPrefix(strings.TrimSpace(l), \"auth\") {\n\t\t\t\tl = influxql.Sanitize(l)\n\t\t\t\tc.Line.AppendHistory(l)\n\t\t\t\tc.saveHistory()\n\t\t\t}\n\t\t}\n\t}\n}\n\n// ParseCommand parses an instruction and calls the related method\n// or executes the command as a query against InfluxDB.\nfunc (c *CommandLine) ParseCommand(cmd string) error {\n\tlcmd := strings.TrimSpace(strings.ToLower(cmd))\n\ttokens := strings.Fields(lcmd)\n\n\tif len(tokens) > 0 {\n\t\tswitch tokens[0] {\n\t\tcase \"exit\", \"quit\":\n\t\t\tclose(c.Quit)\n\t\tcase \"gopher\":\n\t\t\tc.gopher()\n\t\tcase \"connect\":\n\t\t\treturn c.Connect(cmd)\n\t\tcase \"auth\":\n\t\t\tc.SetAuth(cmd)\n\t\tcase \"help\":\n\t\t\tc.help()\n\t\tcase \"history\":\n\t\t\tc.history()\n\t\tcase \"format\":\n\t\t\tc.SetFormat(cmd)\n\t\tcase \"precision\":\n\t\t\tc.SetPrecision(cmd)\n\t\tcase \"consistency\":\n\t\t\tc.SetWriteConsistency(cmd)\n\t\tcase \"settings\":\n\t\t\tc.Settings()\n\t\tcase \"chunked\":\n\t\t\tc.Chunked = !c.Chunked\n\t\t\tif c.Chunked {\n\t\t\t\tfmt.Println(\"chunked responses enabled\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"chunked reponses disabled\")\n\t\t\t}\n\t\tcase \"chunk\":\n\t\t\tc.SetChunkSize(cmd)\n\t\tcase \"pretty\":\n\t\t\tc.Pretty = !c.Pretty\n\t\t\tif c.Pretty {\n\t\t\t\tfmt.Println(\"Pretty print enabled\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Pretty print disabled\")\n\t\t\t}\n\t\tcase \"use\":\n\t\t\tc.use(cmd)\n\t\tcase \"insert\":\n\t\t\treturn c.Insert(cmd)\n\t\tcase \"clear\":\n\t\t\tc.clear(cmd)\n\t\tdefault:\n\t\t\treturn c.ExecuteQuery(cmd)\n\t\t}\n\n\t\treturn nil\n\t}\n\treturn ErrBlankCommand\n}\n\n// Connect connects to a server.\nfunc (c *CommandLine) Connect(cmd string) error {\n\t// Remove the \"connect\" keyword if it exists\n\taddr := strings.TrimSpace(strings.Replace(cmd, \"connect\", \"\", -1))\n\tif addr == \"\" {\n\t\t// If they didn't provide a connection string, use the current settings\n\t\taddr = net.JoinHostPort(c.Host, strconv.Itoa(c.Port))\n\t}\n\n\tURL, err := client.ParseConnectionString(addr, c.Ssl)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create copy of the current client config and create a new client.\n\tClientConfig := c.ClientConfig\n\tClientConfig.UserAgent = \"InfluxDBShell/\" + c.ClientVersion\n\tClientConfig.URL = URL\n\n\tclient, err := client.NewClient(ClientConfig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not create client %s\", err)\n\t}\n\tc.Client = client\n\n\t_, v, err := c.Client.Ping()\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.ServerVersion = v\n\n\t// Update the command with the current connection information\n\tif host, port, err := net.SplitHostPort(ClientConfig.URL.Host); err == nil {\n\t\tc.Host = host\n\t\tif i, err := strconv.Atoi(port); err == nil {\n\t\t\tc.Port = i\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// SetAuth sets client authentication credentials.\nfunc (c *CommandLine) SetAuth(cmd string) {\n\t// If they pass in the entire command, we should parse it\n\t// auth <username> <password>\n\targs := strings.Fields(cmd)\n\tif len(args) == 3 {\n\t\targs = args[1:]\n\t} else {\n\t\targs = []string{}\n\t}\n\n\tif len(args) == 2 {\n\t\tc.ClientConfig.Username = args[0]\n\t\tc.ClientConfig.Password = args[1]\n\t} else {\n\t\tu, e := c.Line.Prompt(\"username: \")\n\t\tif e != nil {\n\t\t\tfmt.Printf(\"Unable to process input: %s\", e)\n\t\t\treturn\n\t\t}\n\t\tc.ClientConfig.Username = strings.TrimSpace(u)\n\t\tp, e := c.Line.PasswordPrompt(\"password: \")\n\t\tif e != nil {\n\t\t\tfmt.Printf(\"Unable to process input: %s\", e)\n\t\t\treturn\n\t\t}\n\t\tc.ClientConfig.Password = p\n\t}\n\n\t// Update the client as well\n\tc.Client.SetAuth(c.ClientConfig.Username, c.ClientConfig.Password)\n}\n\nfunc (c *CommandLine) clear(cmd string) {\n\targs := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), \";\"), \" \")\n\tv := strings.ToLower(strings.Join(args[1:], \" \"))\n\tswitch v {\n\tcase \"database\", \"db\":\n\t\tc.Database = \"\"\n\t\tfmt.Println(\"database context cleared\")\n\t\treturn\n\tcase \"retention policy\", \"rp\":\n\t\tc.RetentionPolicy = \"\"\n\t\tfmt.Println(\"retention policy context cleared\")\n\t\treturn\n\tdefault:\n\t\tif len(args) > 1 {\n\t\t\tfmt.Printf(\"invalid command %q.\\n\", v)\n\t\t}\n\t\tfmt.Println(`Possible commands for 'clear' are:\n    # Clear the database context\n    clear database\n    clear db\n\n    # Clear the retention policy context\n    clear retention policy\n    clear rp\n\t\t`)\n\t}\n}\n\nfunc (c *CommandLine) use(cmd string) {\n\targs := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), \";\"), \" \")\n\tif len(args) != 2 {\n\t\tfmt.Printf(\"Could not parse database name from %q.\\n\", cmd)\n\t\treturn\n\t}\n\n\tstmt := args[1]\n\tdb, rp, err := parseDatabaseAndRetentionPolicy([]byte(stmt))\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to parse database or retention policy from %s\", stmt)\n\t\treturn\n\t}\n\n\tif !c.databaseExists(db) {\n\t\treturn\n\t}\n\n\tc.Database = db\n\tfmt.Printf(\"Using database %s\\n\", db)\n\n\tif rp != \"\" {\n\t\tif !c.retentionPolicyExists(db, rp) {\n\t\t\treturn\n\t\t}\n\t\tc.RetentionPolicy = rp\n\t\tfmt.Printf(\"Using retention policy %s\\n\", rp)\n\t}\n}\n\nfunc (c *CommandLine) databaseExists(db string) bool {\n\t// Validate if specified database exists\n\tresponse, err := c.Client.Query(client.Query{Command: \"SHOW DATABASES\"})\n\tif err != nil {\n\t\tfmt.Printf(\"ERR: %s\\n\", err)\n\t\treturn false\n\t} else if err := response.Error(); err != nil {\n\t\tif c.ClientConfig.Username == \"\" {\n\t\t\tfmt.Printf(\"ERR: %s\\n\", err)\n\t\t\treturn false\n\t\t}\n\t\t// TODO(jsternberg): Fix SHOW DATABASES to be user-aware #6397.\n\t\t// If we are unable to run SHOW DATABASES, display a warning and use the\n\t\t// database anyway in case the person doesn't have permission to run the\n\t\t// command, but does have permission to use the database.\n\t\tfmt.Printf(\"WARN: %s\\n\", err)\n\t} else {\n\t\t// Verify the provided database exists\n\t\tif databaseExists := func() bool {\n\t\t\tfor _, result := range response.Results {\n\t\t\t\tfor _, row := range result.Series {\n\t\t\t\t\tif row.Name == \"databases\" {\n\t\t\t\t\t\tfor _, values := range row.Values {\n\t\t\t\t\t\t\tfor _, database := range values {\n\t\t\t\t\t\t\t\tif database == db {\n\t\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}(); !databaseExists {\n\t\t\tfmt.Printf(\"ERR: Database %s doesn't exist. Run SHOW DATABASES for a list of existing databases.\\n\", db)\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (c *CommandLine) retentionPolicyExists(db, rp string) bool {\n\t// Validate if specified database exists\n\tresponse, err := c.Client.Query(client.Query{Command: fmt.Sprintf(\"SHOW RETENTION POLICIES ON %q\", db)})\n\tif err != nil {\n\t\tfmt.Printf(\"ERR: %s\\n\", err)\n\t\treturn false\n\t} else if err := response.Error(); err != nil {\n\t\tif c.ClientConfig.Username == \"\" {\n\t\t\tfmt.Printf(\"ERR: %s\\n\", err)\n\t\t\treturn false\n\t\t}\n\t\tfmt.Printf(\"WARN: %s\\n\", err)\n\t} else {\n\t\t// Verify the provided database exists\n\t\tif retentionPolicyExists := func() bool {\n\t\t\tfor _, result := range response.Results {\n\t\t\t\tfor _, row := range result.Series {\n\t\t\t\t\tfor _, values := range row.Values {\n\t\t\t\t\t\tfor i, v := range values {\n\t\t\t\t\t\t\tif i != 0 {\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif v == rp {\n\t\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}(); !retentionPolicyExists {\n\t\t\tfmt.Printf(\"ERR: RETENTION POLICY %s doesn't exist. Run SHOW RETENTION POLICIES ON %q for a list of existing retention polices.\\n\", rp, db)\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// SetChunkSize sets the chunk size\n// 0 sets it back to the default\nfunc (c *CommandLine) SetChunkSize(cmd string) {\n\t// normalize cmd\n\tcmd = strings.ToLower(cmd)\n\tcmd = strings.Join(strings.Fields(cmd), \" \")\n\n\t// Remove the \"chunk size\" keyword if it exists\n\tcmd = strings.TrimPrefix(cmd, \"chunk size \")\n\n\t// Remove the \"chunk\" keyword if it exists\n\t// allows them to use `chunk 50` as a shortcut\n\tcmd = strings.TrimPrefix(cmd, \"chunk \")\n\n\tif n, err := strconv.ParseInt(cmd, 10, 64); err == nil {\n\t\tc.ChunkSize = int(n)\n\t\tif c.ChunkSize <= 0 {\n\t\t\tc.ChunkSize = 0\n\t\t}\n\t\tfmt.Printf(\"chunk size set to %d\\n\", c.ChunkSize)\n\t} else {\n\t\tfmt.Printf(\"unable to parse chunk size from %q\\n\", cmd)\n\t}\n}\n\n// SetPrecision sets client precision.\nfunc (c *CommandLine) SetPrecision(cmd string) {\n\t// normalize cmd\n\tcmd = strings.ToLower(cmd)\n\n\t// Remove the \"precision\" keyword if it exists\n\tcmd = strings.TrimSpace(strings.Replace(cmd, \"precision\", \"\", -1))\n\n\tswitch cmd {\n\tcase \"h\", \"m\", \"s\", \"ms\", \"u\", \"ns\":\n\t\tc.ClientConfig.Precision = cmd\n\t\tc.Client.SetPrecision(c.ClientConfig.Precision)\n\tcase \"rfc3339\":\n\t\tc.ClientConfig.Precision = \"\"\n\t\tc.Client.SetPrecision(c.ClientConfig.Precision)\n\tdefault:\n\t\tfmt.Printf(\"Unknown precision %q. Please use rfc3339, h, m, s, ms, u or ns.\\n\", cmd)\n\t}\n}\n\n// SetFormat sets output format.\nfunc (c *CommandLine) SetFormat(cmd string) {\n\t// Remove the \"format\" keyword if it exists\n\tcmd = strings.TrimSpace(strings.Replace(cmd, \"format\", \"\", -1))\n\t// normalize cmd\n\tcmd = strings.ToLower(cmd)\n\n\tswitch cmd {\n\tcase \"json\", \"csv\", \"column\":\n\t\tc.Format = cmd\n\tdefault:\n\t\tfmt.Printf(\"Unknown format %q. Please use json, csv, or column.\\n\", cmd)\n\t}\n}\n\n// SetWriteConsistency sets write consistency level.\nfunc (c *CommandLine) SetWriteConsistency(cmd string) {\n\t// Remove the \"consistency\" keyword if it exists\n\tcmd = strings.TrimSpace(strings.Replace(cmd, \"consistency\", \"\", -1))\n\t// normalize cmd\n\tcmd = strings.ToLower(cmd)\n\n\t_, err := models.ParseConsistencyLevel(cmd)\n\tif err != nil {\n\t\tfmt.Printf(\"Unknown consistency level %q. Please use any, one, quorum, or all.\\n\", cmd)\n\t\treturn\n\t}\n\tc.ClientConfig.WriteConsistency = cmd\n}\n\n// isWhitespace returns true if the rune is a space, tab, or newline.\nfunc isWhitespace(ch rune) bool { return ch == ' ' || ch == '\\t' || ch == '\\n' }\n\n// isLetter returns true if the rune is a letter.\nfunc isLetter(ch rune) bool { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') }\n\n// isDigit returns true if the rune is a digit.\nfunc isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') }\n\n// isIdentFirstChar returns true if the rune can be used as the first char in an unquoted identifer.\nfunc isIdentFirstChar(ch rune) bool { return isLetter(ch) || ch == '_' }\n\n// isIdentChar returns true if the rune can be used in an unquoted identifier.\nfunc isNotIdentChar(ch rune) bool { return !(isLetter(ch) || isDigit(ch) || ch == '_') }\n\nfunc parseUnquotedIdentifier(stmt string) (string, string) {\n\tif fields := strings.FieldsFunc(stmt, isNotIdentChar); len(fields) > 0 {\n\t\treturn fields[0], strings.TrimPrefix(stmt, fields[0])\n\t}\n\treturn \"\", stmt\n}\n\nfunc parseDoubleQuotedIdentifier(stmt string) (string, string) {\n\tescapeNext := false\n\tfields := strings.FieldsFunc(stmt, func(ch rune) bool {\n\t\tif ch == '\\\\' {\n\t\t\tescapeNext = true\n\t\t} else if ch == '\"' {\n\t\t\tif !escapeNext {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tescapeNext = false\n\t\t}\n\t\treturn false\n\t})\n\tif len(fields) > 0 {\n\t\treturn fields[0], strings.TrimPrefix(stmt, \"\\\"\"+fields[0]+\"\\\"\")\n\t}\n\treturn \"\", stmt\n}\n\nfunc parseNextIdentifier(stmt string) (ident, remainder string) {\n\tif len(stmt) > 0 {\n\t\tswitch {\n\t\tcase isWhitespace(rune(stmt[0])):\n\t\t\treturn parseNextIdentifier(stmt[1:])\n\t\tcase isIdentFirstChar(rune(stmt[0])):\n\t\t\treturn parseUnquotedIdentifier(stmt)\n\t\tcase stmt[0] == '\"':\n\t\t\treturn parseDoubleQuotedIdentifier(stmt)\n\t\t}\n\t}\n\treturn \"\", stmt\n}\n\nfunc (c *CommandLine) parseInto(stmt string) *client.BatchPoints {\n\tident, stmt := parseNextIdentifier(stmt)\n\tdb, rp := c.Database, c.RetentionPolicy\n\tif strings.HasPrefix(stmt, \".\") {\n\t\tdb = ident\n\t\tident, stmt = parseNextIdentifier(stmt[1:])\n\t}\n\tif strings.HasPrefix(stmt, \" \") {\n\t\trp = ident\n\t\tstmt = stmt[1:]\n\t}\n\n\treturn &client.BatchPoints{\n\t\tPoints: []client.Point{\n\t\t\tclient.Point{Raw: stmt},\n\t\t},\n\t\tDatabase:         db,\n\t\tRetentionPolicy:  rp,\n\t\tPrecision:        c.ClientConfig.Precision,\n\t\tWriteConsistency: c.ClientConfig.WriteConsistency,\n\t}\n}\n\nfunc (c *CommandLine) parseInsert(stmt string) (*client.BatchPoints, error) {\n\ti, point := parseNextIdentifier(stmt)\n\tif !strings.EqualFold(i, \"insert\") {\n\t\treturn nil, fmt.Errorf(\"found %s, expected INSERT\\n\", i)\n\t}\n\tif i, r := parseNextIdentifier(point); strings.EqualFold(i, \"into\") {\n\t\tbp := c.parseInto(r)\n\t\treturn bp, nil\n\t}\n\treturn &client.BatchPoints{\n\t\tPoints: []client.Point{\n\t\t\tclient.Point{Raw: point},\n\t\t},\n\t\tDatabase:         c.Database,\n\t\tRetentionPolicy:  c.RetentionPolicy,\n\t\tPrecision:        c.ClientConfig.Precision,\n\t\tWriteConsistency: c.ClientConfig.WriteConsistency,\n\t}, nil\n}\n\n// Insert runs an INSERT statement.\nfunc (c *CommandLine) Insert(stmt string) error {\n\tbp, err := c.parseInsert(stmt)\n\tif err != nil {\n\t\tfmt.Printf(\"ERR: %s\\n\", err)\n\t\treturn nil\n\t}\n\tif _, err := c.Client.Write(*bp); err != nil {\n\t\tfmt.Printf(\"ERR: %s\\n\", err)\n\t\tif c.Database == \"\" {\n\t\t\tfmt.Println(\"Note: error may be due to not setting a database or retention policy.\")\n\t\t\tfmt.Println(`Please set a database with the command \"use <database>\" or`)\n\t\t\tfmt.Println(\"INSERT INTO <database>.<retention-policy> <point>\")\n\t\t}\n\t}\n\treturn nil\n}\n\n// query creates a query struct to be used with the client.\nfunc (c *CommandLine) query(query string) client.Query {\n\treturn client.Query{\n\t\tCommand:   query,\n\t\tDatabase:  c.Database,\n\t\tChunked:   c.Chunked,\n\t\tChunkSize: c.ChunkSize,\n\t}\n}\n\n// ExecuteQuery runs any query statement.\nfunc (c *CommandLine) ExecuteQuery(query string) error {\n\t// If we have a retention policy, we need to rewrite the statement sources\n\tif c.RetentionPolicy != \"\" {\n\t\tpq, err := influxql.NewParser(strings.NewReader(query)).ParseQuery()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"ERR: %s\\n\", err)\n\t\t\treturn err\n\t\t}\n\t\tfor _, stmt := range pq.Statements {\n\t\t\tif selectStatement, ok := stmt.(*influxql.SelectStatement); ok {\n\t\t\t\tinfluxql.WalkFunc(selectStatement.Sources, func(n influxql.Node) {\n\t\t\t\t\tif t, ok := n.(*influxql.Measurement); ok {\n\t\t\t\t\t\tif t.Database == \"\" && c.Database != \"\" {\n\t\t\t\t\t\t\tt.Database = c.Database\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif t.RetentionPolicy == \"\" && c.RetentionPolicy != \"\" {\n\t\t\t\t\t\t\tt.RetentionPolicy = c.RetentionPolicy\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t\tquery = pq.String()\n\t}\n\tresponse, err := c.Client.Query(c.query(query))\n\tif err != nil {\n\t\tfmt.Printf(\"ERR: %s\\n\", err)\n\t\treturn err\n\t}\n\tc.FormatResponse(response, os.Stdout)\n\tif err := response.Error(); err != nil {\n\t\tfmt.Printf(\"ERR: %s\\n\", response.Error())\n\t\tif c.Database == \"\" {\n\t\t\tfmt.Println(\"Warning: It is possible this error is due to not setting a database.\")\n\t\t\tfmt.Println(`Please set a database with the command \"use <database>\".`)\n\t\t}\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// FormatResponse formats output to the previously chosen format.\nfunc (c *CommandLine) FormatResponse(response *client.Response, w io.Writer) {\n\tswitch c.Format {\n\tcase \"json\":\n\t\tc.writeJSON(response, w)\n\tcase \"csv\":\n\t\tc.writeCSV(response, w)\n\tcase \"column\":\n\t\tc.writeColumns(response, w)\n\tdefault:\n\t\tfmt.Fprintf(w, \"Unknown output format %q.\\n\", c.Format)\n\t}\n}\n\nfunc (c *CommandLine) writeJSON(response *client.Response, w io.Writer) {\n\tvar data []byte\n\tvar err error\n\tif c.Pretty {\n\t\tdata, err = json.MarshalIndent(response, \"\", \"    \")\n\t} else {\n\t\tdata, err = json.Marshal(response)\n\t}\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Unable to parse json: %s\\n\", err)\n\t\treturn\n\t}\n\tfmt.Fprintln(w, string(data))\n}\n\nfunc tagsEqual(prev, current map[string]string) bool {\n\treturn reflect.DeepEqual(prev, current)\n}\n\nfunc columnsEqual(prev, current []string) bool {\n\treturn reflect.DeepEqual(prev, current)\n}\n\nfunc headersEqual(prev, current models.Row) bool {\n\tif prev.Name != current.Name {\n\t\treturn false\n\t}\n\treturn tagsEqual(prev.Tags, current.Tags) && columnsEqual(prev.Columns, current.Columns)\n}\n\nfunc (c *CommandLine) writeCSV(response *client.Response, w io.Writer) {\n\tcsvw := csv.NewWriter(w)\n\tvar previousHeaders models.Row\n\tfor _, result := range response.Results {\n\t\tsuppressHeaders := len(result.Series) > 0 && headersEqual(previousHeaders, result.Series[0])\n\t\tif !suppressHeaders && len(result.Series) > 0 {\n\t\t\tpreviousHeaders = models.Row{\n\t\t\t\tName:    result.Series[0].Name,\n\t\t\t\tTags:    result.Series[0].Tags,\n\t\t\t\tColumns: result.Series[0].Columns,\n\t\t\t}\n\t\t}\n\n\t\t// Create a tabbed writer for each result as they won't always line up\n\t\trows := c.formatResults(result, \"\\t\", suppressHeaders)\n\t\tfor _, r := range rows {\n\t\t\tcsvw.Write(strings.Split(r, \"\\t\"))\n\t\t}\n\t}\n\tcsvw.Flush()\n}\n\nfunc (c *CommandLine) writeColumns(response *client.Response, w io.Writer) {\n\t// Create a tabbed writer for each result as they won't always line up\n\twriter := new(tabwriter.Writer)\n\twriter.Init(w, 0, 8, 1, ' ', 0)\n\n\tvar previousHeaders models.Row\n\tfor i, result := range response.Results {\n\t\t// Print out all messages first\n\t\tfor _, m := range result.Messages {\n\t\t\tfmt.Fprintf(w, \"%s: %s.\\n\", m.Level, m.Text)\n\t\t}\n\t\t// Check to see if the headers are the same as the previous row.  If so, suppress them in the output\n\t\tsuppressHeaders := len(result.Series) > 0 && headersEqual(previousHeaders, result.Series[0])\n\t\tif !suppressHeaders && len(result.Series) > 0 {\n\t\t\tpreviousHeaders = models.Row{\n\t\t\t\tName:    result.Series[0].Name,\n\t\t\t\tTags:    result.Series[0].Tags,\n\t\t\t\tColumns: result.Series[0].Columns,\n\t\t\t}\n\t\t}\n\n\t\t// If we are suppressing headers, don't output the extra line return. If we\n\t\t// aren't suppressing headers, then we put out line returns between results\n\t\t// (not before the first result, and not after the last result).\n\t\tif !suppressHeaders && i > 0 {\n\t\t\tfmt.Fprintln(writer, \"\")\n\t\t}\n\n\t\trows := c.formatResults(result, \"\\t\", suppressHeaders)\n\t\tfor _, r := range rows {\n\t\t\tfmt.Fprintln(writer, r)\n\t\t}\n\n\t}\n\twriter.Flush()\n}\n\n// formatResults will behave differently if you are formatting for columns or csv\nfunc (c *CommandLine) formatResults(result client.Result, separator string, suppressHeaders bool) []string {\n\trows := []string{}\n\t// Create a tabbed writer for each result as they won't always line up\n\tfor i, row := range result.Series {\n\t\t// gather tags\n\t\ttags := []string{}\n\t\tfor k, v := range row.Tags {\n\t\t\ttags = append(tags, fmt.Sprintf(\"%s=%s\", k, v))\n\t\t\tsort.Strings(tags)\n\t\t}\n\n\t\tcolumnNames := []string{}\n\n\t\t// Only put name/tags in a column if format is csv\n\t\tif c.Format == \"csv\" {\n\t\t\tif len(tags) > 0 {\n\t\t\t\tcolumnNames = append([]string{\"tags\"}, columnNames...)\n\t\t\t}\n\n\t\t\tif row.Name != \"\" {\n\t\t\t\tcolumnNames = append([]string{\"name\"}, columnNames...)\n\t\t\t}\n\t\t}\n\n\t\tcolumnNames = append(columnNames, row.Columns...)\n\n\t\t// Output a line separator if we have more than one set or results and format is column\n\t\tif i > 0 && c.Format == \"column\" && !suppressHeaders {\n\t\t\trows = append(rows, \"\")\n\t\t}\n\n\t\t// If we are column format, we break out the name/tag to separate lines\n\t\tif c.Format == \"column\" && !suppressHeaders {\n\t\t\tif row.Name != \"\" {\n\t\t\t\tn := fmt.Sprintf(\"name: %s\", row.Name)\n\t\t\t\trows = append(rows, n)\n\t\t\t}\n\t\t\tif len(tags) > 0 {\n\t\t\t\tt := fmt.Sprintf(\"tags: %s\", (strings.Join(tags, \", \")))\n\t\t\t\trows = append(rows, t)\n\t\t\t}\n\t\t}\n\n\t\tif !suppressHeaders {\n\t\t\trows = append(rows, strings.Join(columnNames, separator))\n\t\t}\n\n\t\t// if format is column, write dashes under each column\n\t\tif c.Format == \"column\" && !suppressHeaders {\n\t\t\tlines := []string{}\n\t\t\tfor _, columnName := range columnNames {\n\t\t\t\tlines = append(lines, strings.Repeat(\"-\", len(columnName)))\n\t\t\t}\n\t\t\trows = append(rows, strings.Join(lines, separator))\n\t\t}\n\n\t\tfor _, v := range row.Values {\n\t\t\tvar values []string\n\t\t\tif c.Format == \"csv\" {\n\t\t\t\tif row.Name != \"\" {\n\t\t\t\t\tvalues = append(values, row.Name)\n\t\t\t\t}\n\t\t\t\tif len(tags) > 0 {\n\t\t\t\t\tvalues = append(values, strings.Join(tags, \",\"))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, vv := range v {\n\t\t\t\tvalues = append(values, interfaceToString(vv))\n\t\t\t}\n\t\t\trows = append(rows, strings.Join(values, separator))\n\t\t}\n\t}\n\treturn rows\n}\n\nfunc interfaceToString(v interface{}) string {\n\tswitch t := v.(type) {\n\tcase nil:\n\t\treturn \"\"\n\tcase bool:\n\t\treturn fmt.Sprintf(\"%v\", v)\n\tcase int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr:\n\t\treturn fmt.Sprintf(\"%d\", t)\n\tcase float32, float64:\n\t\treturn fmt.Sprintf(\"%v\", t)\n\tdefault:\n\t\treturn fmt.Sprintf(\"%v\", t)\n\t}\n}\n\n// Settings prints current settings.\nfunc (c *CommandLine) Settings() {\n\tw := new(tabwriter.Writer)\n\tw.Init(os.Stdout, 0, 1, 1, ' ', 0)\n\tfmt.Fprintln(w, \"Setting\\tValue\")\n\tfmt.Fprintln(w, \"--------\\t--------\")\n\tif c.Port > 0 {\n\t\tfmt.Fprintf(w, \"Host\\t%s:%d\\n\", c.Host, c.Port)\n\t} else {\n\t\tfmt.Fprintf(w, \"Host\\t%s\\n\", c.Host)\n\t}\n\tfmt.Fprintf(w, \"Username\\t%s\\n\", c.ClientConfig.Username)\n\tfmt.Fprintf(w, \"Database\\t%s\\n\", c.Database)\n\tfmt.Fprintf(w, \"RetentionPolicy\\t%s\\n\", c.RetentionPolicy)\n\tfmt.Fprintf(w, \"Pretty\\t%v\\n\", c.Pretty)\n\tfmt.Fprintf(w, \"Format\\t%s\\n\", c.Format)\n\tfmt.Fprintf(w, \"Write Consistency\\t%s\\n\", c.ClientConfig.WriteConsistency)\n\tfmt.Fprintf(w, \"Chunked\\t%v\\n\", c.Chunked)\n\tfmt.Fprintf(w, \"Chunk Size\\t%d\\n\", c.ChunkSize)\n\tfmt.Fprintln(w)\n\tw.Flush()\n}\n\nfunc (c *CommandLine) help() {\n\tfmt.Println(`Usage:\n        connect <host:port>   connects to another node specified by host:port\n        auth                  prompts for username and password\n        pretty                toggles pretty print for the json format\n        chunked               turns on chunked responses from server\n        chunk size <size>     sets the size of the chunked responses.  Set to 0 to reset to the default chunked size\n        use <db_name>         sets current database\n        format <format>       specifies the format of the server responses: json, csv, or column\n        precision <format>    specifies the format of the timestamp: rfc3339, h, m, s, ms, u or ns\n        consistency <level>   sets write consistency level: any, one, quorum, or all\n        history               displays command history\n        settings              outputs the current settings for the shell\n        clear                 clears settings such as database or retention policy.  run 'clear' for help\n        exit/quit/ctrl+d      quits the influx shell\n\n        show databases        show database names\n        show series           show series information\n        show measurements     show measurement information\n        show tag keys         show tag key information\n        show field keys       show field key information\n\n        A full list of influxql commands can be found at:\n        https://docs.influxdata.com/influxdb/latest/query_language/spec/\n`)\n}\n\nfunc (c *CommandLine) history() {\n\tvar buf bytes.Buffer\n\tc.Line.WriteHistory(&buf)\n\tfmt.Print(buf.String())\n}\n\nfunc (c *CommandLine) saveHistory() {\n\tif historyFile, err := os.Create(c.historyFilePath); err != nil {\n\t\tfmt.Printf(\"There was an error writing history file: %s\\n\", err)\n\t} else {\n\t\tc.Line.WriteHistory(historyFile)\n\t\thistoryFile.Close()\n\t}\n}\n\nfunc (c *CommandLine) gopher() {\n\tfmt.Println(`\n                                          .-::-::://:-::-    .:/++/'\n                                     '://:-''/oo+//++o+/.://o-    ./+:\n                                  .:-.    '++-         .o/ '+yydhy'  o-\n                               .:/.      .h:         :osoys  .smMN-  :/\n                            -/:.'        s-         /MMMymh.   '/y/  s'\n                         -+s:''''        d          -mMMms//     '-/o:\n                       -/++/++/////:.    o:          '... s-        :s.\n                     :+-+s-'       ':/'  's-             /+          'o:\n                   '+-'o:        /ydhsh.  '//.        '-o-             o-\n                  .y. o:        .MMMdm+y    ':+++:::/+:.'               s:\n                .-h/  y-        'sdmds'h -+ydds:::-.'                   'h.\n             .//-.d'  o:          '.' 'dsNMMMNh:.:++'                    :y\n            +y.  'd   's.            .s:mddds:     ++                     o/\n           'N-  odd    'o/.       './o-s-'   .---+++'                      o-\n           'N'  yNd      .://:/:::::. -s   -+/s/./s'                       'o/'\n            so'  .h         ''''       ////s: '+. .s                         +y'\n             os/-.y'                       's' 'y::+                          +d'\n               '.:o/                        -+:-:.'                            so.---.'\n                   o'                                                          'd-.''/s'\n                   .s'                                                          :y.''.y\n                    -s                                                           mo:::'\n                     ::                                                          yh\n                      //                                      ''''               /M'\n                       o+                                    .s///:/.            'N:\n                        :+                                   /:    -s'            ho\n                         's-                               -/s/:+/.+h'            +h\n                           ys'                            ':'    '-.              -d\n                            oh                                                    .h\n                             /o                                                   .s\n                              s.                                                  .h\n                              -y                                                  .d\n                               m/                                                 -h\n                               +d                                                 /o\n                               'N-                                                y:\n                                h:                                                m.\n                                s-                                               -d\n                                o-                                               s+\n                                +-                                              'm'\n                                s/                                              oo--.\n                                y-                                             /s  ':+'\n                                s'                                           'od--' .d:\n                                -+                                         ':o: ':+-/+\n                                 y-                                      .:+-      '\n                                //o-                                 '.:+/.\n                                .-:+/'                           ''-/+/.\n                                    ./:'                    ''.:o+/-'\n                                      .+o:/:/+-'      ''.-+ooo/-'\n                                         o:   -h///++////-.\n                                        /:   .o/\n                                       //+  'y\n                                       ./sooy.\n\n`)\n}\n\n// Version prints the CLI version.\nfunc (c *CommandLine) Version() {\n\tfmt.Println(\"InfluxDB shell version:\", c.ClientVersion)\n}\n\nfunc (c *CommandLine) exit() {\n\t// write to history file\n\tc.saveHistory()\n\t// release line resources\n\tc.Line.Close()\n\tc.Line = nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_internal_test.go",
    "content": "package cli\n\nimport \"testing\"\n\nfunc TestParseCommand_InsertInto(t *testing.T) {\n\tt.Parallel()\n\n\tc := CommandLine{}\n\n\ttests := []struct {\n\t\tcmd, db, rp string\n\t}{\n\t\t{\n\t\t\tcmd: `INSERT INTO test cpu,host=serverA,region=us-west value=1.0`,\n\t\t\tdb:  \"\",\n\t\t\trp:  \"test\",\n\t\t},\n\t\t{\n\t\t\tcmd: ` INSERT INTO .test cpu,host=serverA,region=us-west value=1.0`,\n\t\t\tdb:  \"\",\n\t\t\trp:  \"test\",\n\t\t},\n\t\t{\n\t\t\tcmd: `INSERT INTO   \"test test\" cpu,host=serverA,region=us-west value=1.0`,\n\t\t\tdb:  \"\",\n\t\t\trp:  \"test test\",\n\t\t},\n\t\t{\n\t\t\tcmd: `Insert iNTO test.test cpu,host=serverA,region=us-west value=1.0`,\n\t\t\tdb:  \"test\",\n\t\t\trp:  \"test\",\n\t\t},\n\t\t{\n\t\t\tcmd: `insert into \"test test\" cpu,host=serverA,region=us-west value=1.0`,\n\t\t\tdb:  \"\",\n\t\t\trp:  \"test test\",\n\t\t},\n\t\t{\n\t\t\tcmd: `insert into \"d b\".\"test test\" cpu,host=serverA,region=us-west value=1.0`,\n\t\t\tdb:  \"d b\",\n\t\t\trp:  \"test test\",\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Logf(\"command: %s\", test.cmd)\n\t\tbp, err := c.parseInsert(test.cmd)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif bp.Database != test.db {\n\t\t\tt.Fatalf(`Command \"insert into\" db parsing failed, expected: %q, actual: %q`, test.db, bp.Database)\n\t\t}\n\t\tif bp.RetentionPolicy != test.rp {\n\t\t\tt.Fatalf(`Command \"insert into\" rp parsing failed, expected: %q, actual: %q`, test.rp, bp.RetentionPolicy)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_test.go",
    "content": "package cli_test\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/client\"\n\t\"github.com/influxdata/influxdb/cmd/influx/cli\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/peterh/liner\"\n)\n\nconst (\n\tCLIENT_VERSION = \"y.y\"\n\tSERVER_VERSION = \"x.x\"\n)\n\nfunc TestNewCLI(t *testing.T) {\n\tt.Parallel()\n\tc := cli.New(CLIENT_VERSION)\n\n\tif c == nil {\n\t\tt.Fatal(\"CommandLine shouldn't be nil.\")\n\t}\n\n\tif c.ClientVersion != CLIENT_VERSION {\n\t\tt.Fatalf(\"CommandLine version is %s but should be %s\", c.ClientVersion, CLIENT_VERSION)\n\t}\n}\n\nfunc TestRunCLI(t *testing.T) {\n\tt.Parallel()\n\tts := emptyTestServer()\n\tdefer ts.Close()\n\n\tu, _ := url.Parse(ts.URL)\n\th, p, _ := net.SplitHostPort(u.Host)\n\tc := cli.New(CLIENT_VERSION)\n\tc.Host = h\n\tc.Port, _ = strconv.Atoi(p)\n\tc.IgnoreSignals = true\n\tc.ForceTTY = true\n\tgo func() {\n\t\tclose(c.Quit)\n\t}()\n\tif err := c.Run(); err != nil {\n\t\tt.Fatalf(\"Run failed with error: %s\", err)\n\t}\n}\n\nfunc TestRunCLI_ExecuteInsert(t *testing.T) {\n\tt.Parallel()\n\tts := emptyTestServer()\n\tdefer ts.Close()\n\n\tu, _ := url.Parse(ts.URL)\n\th, p, _ := net.SplitHostPort(u.Host)\n\tc := cli.New(CLIENT_VERSION)\n\tc.Host = h\n\tc.Port, _ = strconv.Atoi(p)\n\tc.ClientConfig.Precision = \"ms\"\n\tc.Execute = \"INSERT sensor,floor=1 value=2\"\n\tc.IgnoreSignals = true\n\tc.ForceTTY = true\n\tif err := c.Run(); err != nil {\n\t\tt.Fatalf(\"Run failed with error: %s\", err)\n\t}\n}\n\nfunc TestSetAuth(t *testing.T) {\n\tt.Parallel()\n\tc := cli.New(CLIENT_VERSION)\n\tconfig := client.NewConfig()\n\tclient, _ := client.NewClient(config)\n\tc.Client = client\n\tu := \"userx\"\n\tp := \"pwdy\"\n\tc.SetAuth(\"auth \" + u + \" \" + p)\n\n\t// validate CLI configuration\n\tif c.ClientConfig.Username != u {\n\t\tt.Fatalf(\"Username is %s but should be %s\", c.ClientConfig.Username, u)\n\t}\n\tif c.ClientConfig.Password != p {\n\t\tt.Fatalf(\"Password is %s but should be %s\", c.ClientConfig.Password, p)\n\t}\n}\n\nfunc TestSetPrecision(t *testing.T) {\n\tt.Parallel()\n\tc := cli.New(CLIENT_VERSION)\n\tconfig := client.NewConfig()\n\tclient, _ := client.NewClient(config)\n\tc.Client = client\n\n\t// validate set non-default precision\n\tp := \"ns\"\n\tc.SetPrecision(\"precision \" + p)\n\tif c.ClientConfig.Precision != p {\n\t\tt.Fatalf(\"Precision is %s but should be %s\", c.ClientConfig.Precision, p)\n\t}\n\n\t// validate set default precision which equals empty string\n\tp = \"rfc3339\"\n\tc.SetPrecision(\"precision \" + p)\n\tif c.ClientConfig.Precision != \"\" {\n\t\tt.Fatalf(\"Precision is %s but should be empty\", c.ClientConfig.Precision)\n\t}\n}\n\nfunc TestSetFormat(t *testing.T) {\n\tt.Parallel()\n\tc := cli.New(CLIENT_VERSION)\n\tconfig := client.NewConfig()\n\tclient, _ := client.NewClient(config)\n\tc.Client = client\n\n\t// validate set non-default format\n\tf := \"json\"\n\tc.SetFormat(\"format \" + f)\n\tif c.Format != f {\n\t\tt.Fatalf(\"Format is %s but should be %s\", c.Format, f)\n\t}\n}\n\nfunc Test_SetChunked(t *testing.T) {\n\tt.Parallel()\n\tc := cli.New(CLIENT_VERSION)\n\tconfig := client.NewConfig()\n\tclient, _ := client.NewClient(config)\n\tc.Client = client\n\n\t// make sure chunked is on by default\n\tif got, exp := c.Chunked, true; got != exp {\n\t\tt.Fatalf(\"chunked should be on by default.  got %v, exp %v\", got, exp)\n\t}\n\n\t// turn chunked off\n\tif err := c.ParseCommand(\"Chunked\"); err != nil {\n\t\tt.Fatalf(\"setting chunked failed: err: %s\", err)\n\t}\n\n\tif got, exp := c.Chunked, false; got != exp {\n\t\tt.Fatalf(\"setting chunked failed.  got %v, exp %v\", got, exp)\n\t}\n\n\t// turn chunked back on\n\tif err := c.ParseCommand(\"Chunked\"); err != nil {\n\t\tt.Fatalf(\"setting chunked failed: err: %s\", err)\n\t}\n\n\tif got, exp := c.Chunked, true; got != exp {\n\t\tt.Fatalf(\"setting chunked failed.  got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc Test_SetChunkSize(t *testing.T) {\n\tt.Parallel()\n\tc := cli.New(CLIENT_VERSION)\n\tconfig := client.NewConfig()\n\tclient, _ := client.NewClient(config)\n\tc.Client = client\n\n\t// check default chunk size\n\tif got, exp := c.ChunkSize, 0; got != exp {\n\t\tt.Fatalf(\"unexpected chunk size.  got %d, exp %d\", got, exp)\n\t}\n\n\ttests := []struct {\n\t\tcommand string\n\t\texp     int\n\t}{\n\t\t{\"chunk size 20\", 20},\n\t\t{\"   CHunk     siZE  55    \", 55},\n\t\t{\"chunk 10\", 10},\n\t\t{\"     chuNK     15\", 15},\n\t\t{\"chunk size -60\", 0},\n\t\t{\"chunk size 10\", 10},\n\t\t{\"chunk size 0\", 0},\n\t\t{\"chunk size 10\", 10},\n\t\t{\"chunk size junk\", 10},\n\t}\n\n\tfor _, test := range tests {\n\t\tif err := c.ParseCommand(test.command); err != nil {\n\t\t\tt.Logf(\"command: %q\", test.command)\n\t\t\tt.Fatalf(\"setting chunked failed: err: %s\", err)\n\t\t}\n\n\t\tif got, exp := c.ChunkSize, test.exp; got != exp {\n\t\t\tt.Logf(\"command: %q\", test.command)\n\t\t\tt.Fatalf(\"unexpected chunk size.  got %d, exp %d\", got, exp)\n\t\t}\n\t}\n}\n\nfunc TestSetWriteConsistency(t *testing.T) {\n\tt.Parallel()\n\tc := cli.New(CLIENT_VERSION)\n\tconfig := client.NewConfig()\n\tclient, _ := client.NewClient(config)\n\tc.Client = client\n\n\t// set valid write consistency\n\tconsistency := \"all\"\n\tc.SetWriteConsistency(\"consistency \" + consistency)\n\tif c.ClientConfig.WriteConsistency != consistency {\n\t\tt.Fatalf(\"WriteConsistency is %s but should be %s\", c.ClientConfig.WriteConsistency, consistency)\n\t}\n\n\t// set different valid write consistency and validate change\n\tconsistency = \"quorum\"\n\tc.SetWriteConsistency(\"consistency \" + consistency)\n\tif c.ClientConfig.WriteConsistency != consistency {\n\t\tt.Fatalf(\"WriteConsistency is %s but should be %s\", c.ClientConfig.WriteConsistency, consistency)\n\t}\n\n\t// set invalid write consistency and verify there was no change\n\tinvalidConsistency := \"invalid_consistency\"\n\tc.SetWriteConsistency(\"consistency \" + invalidConsistency)\n\tif c.ClientConfig.WriteConsistency == invalidConsistency {\n\t\tt.Fatalf(\"WriteConsistency is %s but should be %s\", c.ClientConfig.WriteConsistency, consistency)\n\t}\n}\n\nfunc TestParseCommand_CommandsExist(t *testing.T) {\n\tt.Parallel()\n\tc, err := client.NewClient(client.Config{})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error %v\", err)\n\t}\n\n\tm := cli.CommandLine{Client: c, Line: liner.NewLiner()}\n\ttests := []struct {\n\t\tcmd string\n\t}{\n\t\t{cmd: \"gopher\"},\n\t\t{cmd: \"auth\"},\n\t\t{cmd: \"help\"},\n\t\t{cmd: \"format\"},\n\t\t{cmd: \"precision\"},\n\t\t{cmd: \"settings\"},\n\t}\n\tfor _, test := range tests {\n\t\tif err := m.ParseCommand(test.cmd); err != nil {\n\t\t\tt.Fatalf(`Got error %v for command %q, expected nil`, err, test.cmd)\n\t\t}\n\t}\n}\n\nfunc TestParseCommand_Connect(t *testing.T) {\n\tt.Parallel()\n\tts := emptyTestServer()\n\tdefer ts.Close()\n\n\tu, _ := url.Parse(ts.URL)\n\tcmd := \"connect \" + u.Host\n\tc := cli.CommandLine{}\n\n\t// assert connection is established\n\tif err := c.ParseCommand(cmd); err != nil {\n\t\tt.Fatalf(\"There was an error while connecting to %v: %v\", u.Path, err)\n\t}\n\n\t// assert server version is populated\n\tif c.ServerVersion != SERVER_VERSION {\n\t\tt.Fatalf(\"Server version is %s but should be %s.\", c.ServerVersion, SERVER_VERSION)\n\t}\n}\n\nfunc TestParseCommand_TogglePretty(t *testing.T) {\n\tt.Parallel()\n\tc := cli.CommandLine{}\n\tif c.Pretty {\n\t\tt.Fatalf(`Pretty should be false.`)\n\t}\n\tc.ParseCommand(\"pretty\")\n\tif !c.Pretty {\n\t\tt.Fatalf(`Pretty should be true.`)\n\t}\n\tc.ParseCommand(\"pretty\")\n\tif c.Pretty {\n\t\tt.Fatalf(`Pretty should be false.`)\n\t}\n}\n\nfunc TestParseCommand_Exit(t *testing.T) {\n\tt.Parallel()\n\ttests := []struct {\n\t\tcmd string\n\t}{\n\t\t{cmd: \"exit\"},\n\t\t{cmd: \" exit\"},\n\t\t{cmd: \"exit \"},\n\t\t{cmd: \"Exit \"},\n\t}\n\n\tfor _, test := range tests {\n\t\tc := cli.CommandLine{Quit: make(chan struct{}, 1)}\n\t\tc.ParseCommand(test.cmd)\n\t\t// channel should be closed\n\t\tif _, ok := <-c.Quit; ok {\n\t\t\tt.Fatalf(`Command \"exit\" failed for %q.`, test.cmd)\n\t\t}\n\t}\n}\n\nfunc TestParseCommand_Quit(t *testing.T) {\n\tt.Parallel()\n\ttests := []struct {\n\t\tcmd string\n\t}{\n\t\t{cmd: \"quit\"},\n\t\t{cmd: \" quit\"},\n\t\t{cmd: \"quit \"},\n\t\t{cmd: \"Quit \"},\n\t}\n\n\tfor _, test := range tests {\n\t\tc := cli.CommandLine{Quit: make(chan struct{}, 1)}\n\t\tc.ParseCommand(test.cmd)\n\t\t// channel should be closed\n\t\tif _, ok := <-c.Quit; ok {\n\t\t\tt.Fatalf(`Command \"quit\" failed for %q.`, test.cmd)\n\t\t}\n\t}\n}\n\nfunc TestParseCommand_Use(t *testing.T) {\n\tt.Parallel()\n\tts := emptyTestServer()\n\tdefer ts.Close()\n\n\tu, _ := url.Parse(ts.URL)\n\tconfig := client.Config{URL: *u}\n\tc, err := client.NewClient(config)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\n\ttests := []struct {\n\t\tcmd string\n\t}{\n\t\t{cmd: \"use db\"},\n\t\t{cmd: \" use db\"},\n\t\t{cmd: \"use db \"},\n\t\t{cmd: \"use db;\"},\n\t\t{cmd: \"use db; \"},\n\t\t{cmd: \"Use db\"},\n\t}\n\n\tfor _, test := range tests {\n\t\tm := cli.CommandLine{Client: c}\n\t\tif err := m.ParseCommand(test.cmd); err != nil {\n\t\t\tt.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd)\n\t\t}\n\n\t\tif m.Database != \"db\" {\n\t\t\tt.Fatalf(`Command \"use\" changed database to %q. Expected db`, m.Database)\n\t\t}\n\t}\n}\n\nfunc TestParseCommand_UseAuth(t *testing.T) {\n\tt.Parallel()\n\tts := emptyTestServer()\n\tdefer ts.Close()\n\n\tu, _ := url.Parse(ts.URL)\n\ttests := []struct {\n\t\tcmd      string\n\t\tuser     string\n\t\tdatabase string\n\t}{\n\t\t{\n\t\t\tcmd:      \"use db\",\n\t\t\tuser:     \"admin\",\n\t\t\tdatabase: \"db\",\n\t\t},\n\t\t{\n\t\t\tcmd:      \"use blank\",\n\t\t\tuser:     \"admin\",\n\t\t\tdatabase: \"\",\n\t\t},\n\t\t{\n\t\t\tcmd:      \"use db\",\n\t\t\tuser:     \"anonymous\",\n\t\t\tdatabase: \"db\",\n\t\t},\n\t\t{\n\t\t\tcmd:      \"use blank\",\n\t\t\tuser:     \"anonymous\",\n\t\t\tdatabase: \"blank\",\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\tconfig := client.Config{URL: *u, Username: tt.user}\n\t\tfmt.Println(\"using auth:\", tt.user)\n\t\tc, err := client.NewClient(config)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%d. unexpected error.  expected %v, actual %v\", i, nil, err)\n\t\t\tcontinue\n\t\t}\n\t\tm := cli.CommandLine{Client: c}\n\t\tm.ClientConfig.Username = tt.user\n\n\t\tif err := m.ParseCommand(tt.cmd); err != nil {\n\t\t\tt.Fatalf(`%d. Got error %v for command %q, expected nil.`, i, err, tt.cmd)\n\t\t}\n\n\t\tif m.Database != tt.database {\n\t\t\tt.Fatalf(`%d. Command \"use\" changed database to %q. Expected %q`, i, m.Database, tt.database)\n\t\t}\n\t}\n}\n\nfunc TestParseCommand_Consistency(t *testing.T) {\n\tt.Parallel()\n\tc := cli.CommandLine{}\n\ttests := []struct {\n\t\tcmd string\n\t}{\n\t\t{cmd: \"consistency one\"},\n\t\t{cmd: \" consistency one\"},\n\t\t{cmd: \"consistency one \"},\n\t\t{cmd: \"consistency one;\"},\n\t\t{cmd: \"consistency one; \"},\n\t\t{cmd: \"Consistency one\"},\n\t}\n\n\tfor _, test := range tests {\n\t\tif err := c.ParseCommand(test.cmd); err != nil {\n\t\t\tt.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd)\n\t\t}\n\n\t\tif c.ClientConfig.WriteConsistency != \"one\" {\n\t\t\tt.Fatalf(`Command \"consistency\" changed consistency to %q. Expected one`, c.ClientConfig.WriteConsistency)\n\t\t}\n\t}\n}\n\nfunc TestParseCommand_Insert(t *testing.T) {\n\tt.Parallel()\n\tts := emptyTestServer()\n\tdefer ts.Close()\n\n\tu, _ := url.Parse(ts.URL)\n\tconfig := client.Config{URL: *u}\n\tc, err := client.NewClient(config)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error.  expected %v, actual %v\", nil, err)\n\t}\n\tm := cli.CommandLine{Client: c}\n\n\ttests := []struct {\n\t\tcmd string\n\t}{\n\t\t{cmd: \"INSERT cpu,host=serverA,region=us-west value=1.0\"},\n\t\t{cmd: \" INSERT cpu,host=serverA,region=us-west value=1.0\"},\n\t\t{cmd: \"INSERT   cpu,host=serverA,region=us-west value=1.0\"},\n\t\t{cmd: \"insert cpu,host=serverA,region=us-west    value=1.0    \"},\n\t\t{cmd: \"insert\"},\n\t\t{cmd: \"Insert \"},\n\t\t{cmd: \"insert c\"},\n\t\t{cmd: \"insert int\"},\n\t}\n\n\tfor _, test := range tests {\n\t\tif err := m.ParseCommand(test.cmd); err != nil {\n\t\t\tt.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd)\n\t\t}\n\t}\n}\n\nfunc TestParseCommand_History(t *testing.T) {\n\tt.Parallel()\n\tc := cli.CommandLine{Line: liner.NewLiner()}\n\tdefer c.Line.Close()\n\n\t// append one entry to history\n\tc.Line.AppendHistory(\"abc\")\n\n\ttests := []struct {\n\t\tcmd string\n\t}{\n\t\t{cmd: \"history\"},\n\t\t{cmd: \" history\"},\n\t\t{cmd: \"history \"},\n\t\t{cmd: \"History \"},\n\t}\n\n\tfor _, test := range tests {\n\t\tif err := c.ParseCommand(test.cmd); err != nil {\n\t\t\tt.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd)\n\t\t}\n\t}\n\n\t// buf size should be at least 1\n\tvar buf bytes.Buffer\n\tc.Line.WriteHistory(&buf)\n\tif buf.Len() < 1 {\n\t\tt.Fatal(\"History is borked\")\n\t}\n}\n\nfunc TestParseCommand_HistoryWithBlankCommand(t *testing.T) {\n\tt.Parallel()\n\tc := cli.CommandLine{Line: liner.NewLiner()}\n\tdefer c.Line.Close()\n\n\t// append one entry to history\n\tc.Line.AppendHistory(\"x\")\n\n\ttests := []struct {\n\t\tcmd string\n\t\terr error\n\t}{\n\t\t{cmd: \"history\"},\n\t\t{cmd: \" history\"},\n\t\t{cmd: \"history \"},\n\t\t{cmd: \"\", err: cli.ErrBlankCommand},      // shouldn't be persisted in history\n\t\t{cmd: \" \", err: cli.ErrBlankCommand},     // shouldn't be persisted in history\n\t\t{cmd: \"     \", err: cli.ErrBlankCommand}, // shouldn't be persisted in history\n\t}\n\n\t// a blank command will return cli.ErrBlankCommand.\n\tfor _, test := range tests {\n\t\tif err := c.ParseCommand(test.cmd); err != test.err {\n\t\t\tt.Errorf(`Got error %v for command %q, expected %v`, err, test.cmd, test.err)\n\t\t}\n\t}\n\n\t// buf shall not contain empty commands\n\tvar buf bytes.Buffer\n\tc.Line.WriteHistory(&buf)\n\tscanner := bufio.NewScanner(&buf)\n\tfor scanner.Scan() {\n\t\tif strings.TrimSpace(scanner.Text()) == \"\" {\n\t\t\tt.Fatal(\"Empty commands should not be persisted in history.\")\n\t\t}\n\t}\n}\n\n// helper methods\n\nfunc emptyTestServer() *httptest.Server {\n\treturn httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"X-Influxdb-Version\", SERVER_VERSION)\n\n\t\t// Fake authorization entirely based on the username.\n\t\tauthorized := false\n\t\tuser, _, _ := r.BasicAuth()\n\t\tswitch user {\n\t\tcase \"\", \"admin\":\n\t\t\tauthorized = true\n\t\t}\n\n\t\tswitch r.URL.Path {\n\t\tcase \"/query\":\n\t\t\tvalues := r.URL.Query()\n\t\t\tparser := influxql.NewParser(bytes.NewBufferString(values.Get(\"q\")))\n\t\t\tq, err := parser.ParseQuery()\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstmt := q.Statements[0]\n\n\t\t\tswitch stmt.(type) {\n\t\t\tcase *influxql.ShowDatabasesStatement:\n\t\t\t\tif authorized {\n\t\t\t\t\tio.WriteString(w, `{\"results\":[{\"series\":[{\"name\":\"databases\",\"columns\":[\"name\"],\"values\":[[\"db\"]]}]}]}`)\n\t\t\t\t} else {\n\t\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t\t\tio.WriteString(w, fmt.Sprintf(`{\"error\":\"error authorizing query: %s not authorized to execute statement 'SHOW DATABASES', requires admin privilege\"}`, user))\n\t\t\t\t}\n\t\t\tcase *influxql.ShowDiagnosticsStatement:\n\t\t\t\tio.WriteString(w, `{\"results\":[{}]}`)\n\t\t\t}\n\t\tcase \"/write\":\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t}\n\t}))\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx/cli/parser.go",
    "content": "package cli\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n)\n\nfunc parseDatabaseAndRetentionPolicy(stmt []byte) (string, string, error) {\n\tvar db, rp []byte\n\tvar quoted bool\n\tvar seperatorCount int\n\n\tstmt = bytes.TrimSpace(stmt)\n\n\tfor _, b := range stmt {\n\t\tif b == '\"' {\n\t\t\tquoted = !quoted\n\t\t\tcontinue\n\t\t}\n\t\tif b == '.' && !quoted {\n\t\t\tseperatorCount++\n\t\t\tif seperatorCount > 1 {\n\t\t\t\treturn \"\", \"\", fmt.Errorf(\"unable to parse database and retention policy from %s\", string(stmt))\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif seperatorCount == 1 {\n\t\t\trp = append(rp, b)\n\t\t\tcontinue\n\t\t}\n\t\tdb = append(db, b)\n\t}\n\treturn string(db), string(rp), nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx/cli/parser_internal_test.go",
    "content": "package cli\n\nimport (\n\t\"errors\"\n\t\"testing\"\n)\n\nfunc Test_parseDatabaseAndretentionPolicy(t *testing.T) {\n\ttests := []struct {\n\t\tstmt string\n\t\tdb   string\n\t\trp   string\n\t\terr  error\n\t}{\n\t\t{\n\t\t\tstmt: `foo`,\n\t\t\tdb:   \"foo\",\n\t\t},\n\t\t{\n\t\t\tstmt: `\"foo.bar\"`,\n\t\t\tdb:   \"foo.bar\",\n\t\t},\n\t\t{\n\t\t\tstmt: `\"foo.bar\".`,\n\t\t\tdb:   \"foo.bar\",\n\t\t},\n\t\t{\n\t\t\tstmt: `.\"foo.bar\"`,\n\t\t\trp:   \"foo.bar\",\n\t\t},\n\t\t{\n\t\t\tstmt: `foo.bar`,\n\t\t\tdb:   \"foo\",\n\t\t\trp:   \"bar\",\n\t\t},\n\t\t{\n\t\t\tstmt: `\"foo\".bar`,\n\t\t\tdb:   \"foo\",\n\t\t\trp:   \"bar\",\n\t\t},\n\t\t{\n\t\t\tstmt: `\"foo\".\"bar\"`,\n\t\t\tdb:   \"foo\",\n\t\t\trp:   \"bar\",\n\t\t},\n\t\t{\n\t\t\tstmt: `\"foo.bin\".\"bar\"`,\n\t\t\tdb:   \"foo.bin\",\n\t\t\trp:   \"bar\",\n\t\t},\n\t\t{\n\t\t\tstmt: `\"foo.bin\".\"bar.baz....\"`,\n\t\t\tdb:   \"foo.bin\",\n\t\t\trp:   \"bar.baz....\",\n\t\t},\n\t\t{\n\t\t\tstmt: `  \"foo.bin\".\"bar.baz....\"  `,\n\t\t\tdb:   \"foo.bin\",\n\t\t\trp:   \"bar.baz....\",\n\t\t},\n\n\t\t{\n\t\t\tstmt: `\"foo.bin\".\"bar\".boom`,\n\t\t\terr:  errors.New(\"foo\"),\n\t\t},\n\t\t{\n\t\t\tstmt: \"foo.bar.\",\n\t\t\terr:  errors.New(\"foo\"),\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tdb, rp, err := parseDatabaseAndRetentionPolicy([]byte(test.stmt))\n\t\tif err != nil && test.err == nil {\n\t\t\tt.Errorf(\"unexpected error: got %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif test.err != nil && err == nil {\n\t\t\tt.Errorf(\"expected err: got: nil, exp: %s\", test.err)\n\t\t\tcontinue\n\t\t}\n\t\tif db != test.db {\n\t\t\tt.Errorf(\"unexpected database: got: %s, exp: %s\", db, test.db)\n\t\t}\n\t\tif rp != test.rp {\n\t\t\tt.Errorf(\"unexpected retention policy: got: %s, exp: %s\", rp, test.rp)\n\t\t}\n\t}\n\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx/main.go",
    "content": "// The influx command is a CLI client to InfluxDB.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/influxdata/influxdb/client\"\n\t\"github.com/influxdata/influxdb/cmd/influx/cli\"\n)\n\n// These variables are populated via the Go linker.\nvar (\n\tversion string\n)\n\nconst (\n\t// defaultFormat is the default format of the results when issuing queries\n\tdefaultFormat = \"column\"\n\n\t// defaultPrecision is the default timestamp format of the results when issuing queries\n\tdefaultPrecision = \"ns\"\n\n\t// defaultPPS is the default points per second that the import will throttle at\n\t// by default it's 0, which means it will not throttle\n\tdefaultPPS = 0\n)\n\nfunc init() {\n\t// If version is not set, make that clear.\n\tif version == \"\" {\n\t\tversion = \"unknown\"\n\t}\n}\n\nfunc main() {\n\tc := cli.New(version)\n\n\tfs := flag.NewFlagSet(\"InfluxDB shell version \"+version, flag.ExitOnError)\n\tfs.StringVar(&c.Host, \"host\", client.DefaultHost, \"Influxdb host to connect to.\")\n\tfs.IntVar(&c.Port, \"port\", client.DefaultPort, \"Influxdb port to connect to.\")\n\tfs.StringVar(&c.ClientConfig.UnixSocket, \"socket\", \"\", \"Influxdb unix socket to connect to.\")\n\tfs.StringVar(&c.ClientConfig.Username, \"username\", \"\", \"Username to connect to the server.\")\n\tfs.StringVar(&c.ClientConfig.Password, \"password\", \"\", `Password to connect to the server.  Leaving blank will prompt for password (--password=\"\").`)\n\tfs.StringVar(&c.Database, \"database\", c.Database, \"Database to connect to the server.\")\n\tfs.BoolVar(&c.Ssl, \"ssl\", false, \"Use https for connecting to cluster.\")\n\tfs.BoolVar(&c.ClientConfig.UnsafeSsl, \"unsafeSsl\", false, \"Set this when connecting to the cluster using https and not use SSL verification.\")\n\tfs.StringVar(&c.Format, \"format\", defaultFormat, \"Format specifies the format of the server responses:  json, csv, or column.\")\n\tfs.StringVar(&c.ClientConfig.Precision, \"precision\", defaultPrecision, \"Precision specifies the format of the timestamp:  rfc3339,h,m,s,ms,u or ns.\")\n\tfs.StringVar(&c.ClientConfig.WriteConsistency, \"consistency\", \"all\", \"Set write consistency level: any, one, quorum, or all.\")\n\tfs.BoolVar(&c.Pretty, \"pretty\", false, \"Turns on pretty print for the json format.\")\n\tfs.StringVar(&c.Execute, \"execute\", c.Execute, \"Execute command and quit.\")\n\tfs.BoolVar(&c.ShowVersion, \"version\", false, \"Displays the InfluxDB version.\")\n\tfs.BoolVar(&c.Import, \"import\", false, \"Import a previous database.\")\n\tfs.IntVar(&c.ImporterConfig.PPS, \"pps\", defaultPPS, \"How many points per second the import will allow.  By default it is zero and will not throttle importing.\")\n\tfs.StringVar(&c.ImporterConfig.Path, \"path\", \"\", \"path to the file to import\")\n\tfs.BoolVar(&c.ImporterConfig.Compressed, \"compressed\", false, \"set to true if the import file is compressed\")\n\n\t// Define our own custom usage to print\n\tfs.Usage = func() {\n\t\tfmt.Println(`Usage of influx:\n  -version\n       Display the version and exit.\n  -host 'host name'\n       Host to connect to.\n  -port 'port #'\n       Port to connect to.\n  -socket 'unix domain socket'\n       Unix socket to connect to.\n  -database 'database name'\n       Database to connect to the server.\n  -password 'password'\n      Password to connect to the server.  Leaving blank will prompt for password (--password '').\n  -username 'username'\n       Username to connect to the server.\n  -ssl\n        Use https for requests.\n  -unsafeSsl\n        Set this when connecting to the cluster using https and not use SSL verification.\n  -execute 'command'\n       Execute command and quit.\n  -format 'json|csv|column'\n       Format specifies the format of the server responses:  json, csv, or column.\n  -precision 'rfc3339|h|m|s|ms|u|ns'\n       Precision specifies the format of the timestamp:  rfc3339, h, m, s, ms, u or ns.\n  -consistency 'any|one|quorum|all'\n       Set write consistency level: any, one, quorum, or all\n  -pretty\n       Turns on pretty print for the json format.\n  -import\n       Import a previous database export from file\n  -pps\n       How many points per second the import will allow.  By default it is zero and will not throttle importing.\n  -path\n       Path to file to import\n  -compressed\n       Set to true if the import file is compressed\n\nExamples:\n\n    # Use influx in a non-interactive mode to query the database \"metrics\" and pretty print json:\n    $ influx -database 'metrics' -execute 'select * from cpu' -format 'json' -pretty\n\n    # Connect to a specific database on startup and set database context:\n    $ influx -database 'metrics' -host 'localhost' -port '8086'\n`)\n\t}\n\tfs.Parse(os.Args[1:])\n\n\tif c.ShowVersion {\n\t\tc.Version()\n\t\tos.Exit(0)\n\t}\n\n\tif err := c.Run(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx_stress/README.md",
    "content": "# `influx_stress`\n\nIf you run into any issues with this tool please mention @jackzampolin when you create an issue.\n\n## Ways to run\n\n### `influx_stress`\nThis runs a basic stress test with the [default config](https://github.com/influxdata/influxdb/blob/master/stress/stress.toml) For more information on the configuration file please see the default.\n\n### `influx_stress -config someConfig.toml`\nThis runs the stress test with a valid configuration file located at `someConfig.tom`\n\n### `influx_stress -v2 -config someConfig.iql`\nThis runs the stress test with a valid `v2` configuration file. For more information about the `v2` stress test see the [v2 stress README](https://github.com/influxdata/influxdb/blob/master/stress/v2/README.md).\n\n## Flags\n\nIf flags are defined they overwrite the config from any file passed in.\n\n### `-addr` string\nIP address and port of database where response times will persist (e.g., localhost:8086)\n\n`default` = \"http://localhost:8086\"\n\n### `-config` string\nThe relative path to the stress test configuration file.\n\n`default` = [config](https://github.com/influxdata/influxdb/blob/master/stress/stress.toml)\n\n### `-cpuprofile` filename\nWrites the result of Go's cpu profile to filename\n\n`default` = no profiling\n\n### `-database` string\nName of database on `-addr` that `influx_stress` will persist write and query response times\n\n`default` = \"stress\"\n\n### `-tags` value\nA comma separated list of tags to add to write and query response times.\n\n`default` = \"\"\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx_stress/examples/template.toml",
    "content": "# This section can be removed\n[provision]\n  # The basic provisioner simply deletes and creates database.\n  # If `reset_database` is false, it will not attempt to delete the database\n  [provision.basic]\n    # If enabled the provisioner will actually run\n    enabled = true\n    # Address of the instance that is to be provisioned\n    address = \"localhost:8086\"\n    # Database the will be created/deleted\n    database = \"stress\"\n    # Attempt to delete database\n    reset_database = true\n\n# This section cannot be commented out\n# To prevent writes set `enabled=false`\n# in [write.influx_client.basic]\n[write]\n  [write.point_generator]\n    # The basic point generator will generate points of the form\n    # `cpu,host=server-%v,location=us-west value=234 123456`\n    [write.point_generator.basic]\n      # number of points that will be written for each of the series\n      point_count = 100\n      # number of series\n      series_count = 100000\n      # How much time between each timestamp\n      tick = \"10s\"\n      # Randomize timestamp a bit (not functional)\n      jitter = true\n      # Precision of points that are being written\n      precision = \"s\"\n      # name of the measurement that will be written\n      measurement = \"cpu\"\n      # The date for the first point that is written into influx\n      start_date = \"2006-Jan-02\"\n      # Defines a tag for a series\n      [[write.point_generator.basic.tag]]\n        key = \"host\"\n        value = \"server\"\n      [[write.point_generator.basic.tag]]\n        key = \"location\"\n        value = \"us-west\"\n      # Defines a field for a series\n      [[write.point_generator.basic.field]]\n        key = \"value\"\n        value = \"float64\" # supported types: float64, int, bool\n\n\n  [write.influx_client]\n    [write.influx_client.basic]\n      # If enabled the writer will actually write\n      enabled = true\n      # Addresses is an array of the Influxdb instances\n      addresses = [\"localhost:8086\"] # stress_test_server runs on port 1234\n      # Database that is being written to\n      database = \"stress\"\n      # Precision of points that are being written\n      precision = \"s\"\n      # Size of batches that are sent to db\n      batch_size = 10000\n      # Interval between each batch\n      batch_interval = \"0s\"\n      # How many concurrent writers to the db\n      concurrency = 10\n      # ssl enabled?\n      ssl = false\n      # format of points that are written to influxdb\n      format = \"line_http\" # line_udp (not supported yet), graphite_tcp (not supported yet), graphite_udp (not supported yet)\n\n# This section can be removed\n[read]\n  [read.query_generator]\n    [read.query_generator.basic]\n      # Template of the query that will be ran against the instance\n      template = \"SELECT count(value) FROM cpu where host='server-%v'\"\n      # How many times the templated query will be ran\n      query_count = 250\n\n  [read.query_client]\n    [read.query_client.basic]\n      # if enabled the reader will actually read\n      enabled = true\n      # Address of the instance that will be queried\n      addresses = [\"localhost:8086\"]\n      # Database that will be queried\n      database = \"stress\"\n      # Interval bewteen queries\n      query_interval = \"100ms\"\n      # Number of concurrent queriers\n      concurrency = 1\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx_stress/influx_stress.go",
    "content": "// Command influx_stress is deprecated; use github.com/influxdata/influx-stress instead.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"runtime/pprof\"\n\n\t\"github.com/influxdata/influxdb/stress\"\n\tv2 \"github.com/influxdata/influxdb/stress/v2\"\n)\n\nvar (\n\tuseV2      = flag.Bool(\"v2\", false, \"Use version 2 of stress tool\")\n\tconfig     = flag.String(\"config\", \"\", \"The stress test file\")\n\tcpuprofile = flag.String(\"cpuprofile\", \"\", \"Write the cpu profile to `filename`\")\n\tdb         = flag.String(\"db\", \"\", \"target database within test system for write and query load\")\n)\n\nfunc main() {\n\to := stress.NewOutputConfig()\n\tflag.Parse()\n\n\tif *cpuprofile != \"\" {\n\t\tf, err := os.Create(*cpuprofile)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\t\tpprof.StartCPUProfile(f)\n\t\tdefer pprof.StopCPUProfile()\n\t}\n\n\tif *useV2 {\n\t\tif *config != \"\" {\n\t\t\tv2.RunStress(*config)\n\t\t} else {\n\t\t\tv2.RunStress(\"stress/v2/iql/file.iql\")\n\t\t}\n\t} else {\n\n\t\tc, err := stress.NewConfig(*config)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\n\t\tif *db != \"\" {\n\t\t\tc.Provision.Basic.Database = *db\n\t\t\tc.Write.InfluxClients.Basic.Database = *db\n\t\t\tc.Read.QueryClients.Basic.Database = *db\n\t\t}\n\n\t\tw := stress.NewWriter(c.Write.PointGenerators.Basic, &c.Write.InfluxClients.Basic)\n\t\tr := stress.NewQuerier(&c.Read.QueryGenerators.Basic, &c.Read.QueryClients.Basic)\n\t\ts := stress.NewStressTest(&c.Provision.Basic, w, r)\n\n\t\tbw := stress.NewBroadcastChannel()\n\t\tbw.Register(c.Write.InfluxClients.Basic.BasicWriteHandler)\n\t\tbw.Register(o.HTTPHandler(\"write\"))\n\n\t\tbr := stress.NewBroadcastChannel()\n\t\tbr.Register(c.Read.QueryClients.Basic.BasicReadHandler)\n\t\tbr.Register(o.HTTPHandler(\"read\"))\n\n\t\ts.Start(bw.Handle, br.Handle)\n\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx_tsm/README.md",
    "content": "# Converting b1 and bz1 shards to tsm1\n\n`influx_tsm` is a tool for converting b1 and bz1 shards to tsm1\nformat. Converting shards to tsm1 format results in a very significant\nreduction in disk usage, and significantly improved write-throughput,\nwhen writing data into those shards.\n\nConversion can be controlled on a database-by-database basis. By\ndefault a database is backed up before it is converted, allowing you\nto roll back any changes. Because of the backup process, ensure the\nhost system has at least as much free disk space as the disk space\nconsumed by the _data_ directory of your InfluxDB system.\n\nThe tool automatically ignores tsm1 shards, and can be run\nidempotently on any database.\n\nConversion is an offline process, and the InfluxDB system must be\nstopped during conversion. However the conversion process reads and\nwrites shards directly on disk and should be fast.\n\n## Steps\n\nFollow these steps to perform a conversion.\n\n* Identify the databases you wish to convert. You can convert one or more databases at a time. By default all databases are converted.\n* Decide on parallel operation. By default the conversion operation peforms each operation in a serial manner. This minimizes load on the host system performing the conversion, but also takes the most time. If you wish to minimize the time conversion takes, enable parallel mode. Conversion will then perform as many operations as possible in parallel, but the process may place significant load on the host system (CPU, disk, and RAM, usage will all increase).\n* Stop all write-traffic to your InfluxDB system.\n* Restart the InfluxDB service and wait until all WAL data is flushed to disk -- this has completed when the system responds to queries. This is to ensure all data is present in shards.\n* Stop the InfluxDB service. It should not be restarted until conversion is complete.\n* Run conversion tool. Depending on the size of the data directory, this might be a lengthy operation. Consider running the conversion tool under a \"screen\" session to avoid any interruptions.\n* Unless you ran the conversion tool as the same user as that which runs InfluxDB, then you may need to set the correct read-and-write permissions on the new tsm1 directories.\n* Restart node and ensure data looks correct.\n* If everything looks OK, you may then wish to remove or archive the backed-up databases.\n* Restart write traffic.\n\n## Example session\n\nBelow is an example session, showing a database being converted.\n\n```\n$ # Create a backup location that the `influxdb` user has full access to\n$ mkdir -m 0777 /path/to/influxdb_backup\n$ sudo -u influxdb influx_tsm -backup /path/to/influxdb_backup -parallel /var/lib/influxdb/data\n\nb1 and bz1 shard conversion.\n-----------------------------------\nData directory is:                  /var/lib/influxdb/data\nBackup directory is:                /path/to/influxdb_backup\nDatabases specified:                all\nDatabase backups enabled:           yes\nParallel mode enabled (GOMAXPROCS): yes (8)\n\n\nFound 1 shards that will be converted.\n\nDatabase        Retention       Path                                                    Engine  Size\n_internal       monitor         /var/lib/influxdb/data/_internal/monitor/1           bz1     65536\n\nThese shards will be converted. Proceed? y/N: y\nConversion starting....\nBacking up 1 databases...\n2016/01/28 12:23:43.699266 Backup of databse '_internal' started\n2016/01/28 12:23:43.699883 Backing up file /var/lib/influxdb/data/_internal/monitor/1\n2016/01/28 12:23:43.700052 Database _internal backed up (851.776µs)\n2016/01/28 12:23:43.700320 Starting conversion of shard: /var/lib/influxdb/data/_internal/monitor/1\n2016/01/28 12:23:43.706276 Conversion of /var/lib/influxdb/data/_internal/monitor/1 successful (6.040148ms)\n\nSummary statistics\n========================================\nDatabases converted:                 1\nShards converted:                    1\nTSM files created:                   1\nPoints read:                         369\nPoints written:                      369\nNaN filtered:                        0\nInf filtered:                        0\nPoints without fields filtered:      0\nDisk usage pre-conversion (bytes):   65536\nDisk usage post-conversion (bytes):  11000\nReduction factor:                    83%\nBytes per TSM point:                 29.81\nTotal conversion time:               7.330443ms\n\n$ # restart node, verify data\n$ sudo rm -r /path/to/influxdb_backup\n```\n\nNote that the tool first lists the shards that will be converted,\nbefore asking for confirmation. You can abort the conversion process\nat this step if you just wish to see what would be converted, or if\nthe list of shards does not look correct.\n\n__WARNING:__ If you run the `influx_tsm` tool as a user other than the\n`influxdb` user (or the user that the InfluxDB process runs under),\nplease make sure to verify the shard permissions are correct prior to\nstarting InfluxDB. If needed, shard permissions can be corrected with\nthe `chown` command. For example:\n\n```\nsudo chown -R influxdb:influxdb /var/lib/influxdb\n```\n\n## Rolling back a conversion\n\nAfter a successful backup (the message `Database XYZ backed up` was\nlogged), you have a duplicate of that database in the _backup_\ndirectory you provided on the command line. If, when checking your\ndata after a successful conversion, you notice things missing or\nsomething just isn't right, you can \"undo\" the conversion:\n\n- Shut down your node (this is very important)\n- Remove the database's directory from the influxdb `data` directory (default: `~/.influxdb/data/XYZ` for binary installations or `/var/lib/influxdb/data/XYZ` for packaged installations)\n- Copy (to really make sure the shard is preserved) the database's directory from the backup directory you created into the `data` directory.\n\nUsing the same directories as above, and assuming a database named `stats`:\n\n```\n$ sudo rm -r /var/lib/influxdb/data/stats\n$ sudo cp -r /path/to/influxdb_backup/stats /var/lib/influxdb/data/\n$ # restart influxd node\n```\n\n#### How to avoid downtime when upgrading shards\n\n*Identify non-`tsm1` shards*\n\nNon-`tsm1` shards are files of the form: `data/<database>/<retention_policy>/<shard_id>`.\n\n`tsm1` shards are files of the form: `data/<database>/<retention_policy>/<shard_id>/<file>.tsm`.\n\n*Determine which `bz`/`bz1` shards are cold for writes*\n\nRun the `SHOW SHARDS` query to see the start and end dates for shards.\nIf the date range for a shard does not span the current time then the shard is said to be cold for writes.\nThis means that no new points are expected to be added to the shard.\nThe shard whose date range spans now is said to be hot for writes.\nYou can only safely convert cold shards without stopping the InfluxDB process.\n\n*Convert cold shards*\n\n1. Copy each of the cold shards you'd like to convert to a new directory with the structure `/tmp/data/<database>/<retention_policy>/<shard_id>`.\n2. Run the `influx_tsm` tool on the copied files:\n```\ninflux_tsm -parallel /tmp/data/\n```\n3. Remove the existing cold `b1`/`bz1` shards from the production data directory.\n4. Move the new `tsm1` shards into the original directory, overwriting the existing `b1`/`bz1` shards of the same name. Do this simultaneously with step 3 to avoid any query errors.\n5. Wait an hour, a day, or a week (depending on your retention period) for any hot `b1`/`bz1` shards to become cold and repeat steps 1 through 4 on the newly cold shards.\n\n> **Note:** Any points written to the cold shards after making a copy will be lost when the `tsm1` shard overwrites the existing cold shard.\nNothing in InfluxDB will prevent writes to cold shards, they are merely unexpected, not impossible.\nIt is your responsibility to prevent writes to cold shards to prevent data loss.\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx_tsm/b1/reader.go",
    "content": "// Package b1 reads data from b1 shards.\npackage b1 // import \"github.com/influxdata/influxdb/cmd/influx_tsm/b1\"\n\nimport (\n\t\"encoding/binary\"\n\t\"math\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com/boltdb/bolt\"\n\t\"github.com/influxdata/influxdb/cmd/influx_tsm/stats\"\n\t\"github.com/influxdata/influxdb/cmd/influx_tsm/tsdb\"\n\t\"github.com/influxdata/influxdb/tsdb/engine/tsm1\"\n)\n\n// DefaultChunkSize is the size of chunks read from the b1 shard\nconst DefaultChunkSize int = 1000\n\nvar excludedBuckets = map[string]bool{\n\t\"fields\": true,\n\t\"meta\":   true,\n\t\"series\": true,\n\t\"wal\":    true,\n}\n\n// Reader is used to read all data from a b1 shard.\ntype Reader struct {\n\tpath string\n\tdb   *bolt.DB\n\ttx   *bolt.Tx\n\n\tcursors    []*cursor\n\tcurrCursor int\n\n\tkeyBuf   string\n\tvalues   []tsm1.Value\n\tvaluePos int\n\n\tfields map[string]*tsdb.MeasurementFields\n\tcodecs map[string]*tsdb.FieldCodec\n\n\tstats *stats.Stats\n}\n\n// NewReader returns a reader for the b1 shard at path.\nfunc NewReader(path string, stats *stats.Stats, chunkSize int) *Reader {\n\tr := &Reader{\n\t\tpath:   path,\n\t\tfields: make(map[string]*tsdb.MeasurementFields),\n\t\tcodecs: make(map[string]*tsdb.FieldCodec),\n\t\tstats:  stats,\n\t}\n\n\tif chunkSize <= 0 {\n\t\tchunkSize = DefaultChunkSize\n\t}\n\n\tr.values = make([]tsm1.Value, chunkSize)\n\n\treturn r\n}\n\n// Open opens the reader.\nfunc (r *Reader) Open() error {\n\t// Open underlying storage.\n\tdb, err := bolt.Open(r.path, 0666, &bolt.Options{Timeout: 1 * time.Second})\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.db = db\n\n\t// Load fields.\n\tif err := r.db.View(func(tx *bolt.Tx) error {\n\t\tmeta := tx.Bucket([]byte(\"fields\"))\n\t\tc := meta.Cursor()\n\n\t\tfor k, v := c.First(); k != nil; k, v = c.Next() {\n\t\t\tmf := &tsdb.MeasurementFields{}\n\t\t\tif err := mf.UnmarshalBinary(v); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tr.fields[string(k)] = mf\n\t\t\tr.codecs[string(k)] = tsdb.NewFieldCodec(mf.Fields)\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tseriesSet := make(map[string]bool)\n\n\t// ignore series index and find all series in this shard\n\tif err := r.db.View(func(tx *bolt.Tx) error {\n\t\ttx.ForEach(func(name []byte, _ *bolt.Bucket) error {\n\t\t\tkey := string(name)\n\t\t\tif !excludedBuckets[key] {\n\t\t\t\tseriesSet[key] = true\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tr.tx, err = r.db.Begin(false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create cursor for each field of each series.\n\tfor s := range seriesSet {\n\t\tmeasurement := tsdb.MeasurementFromSeriesKey(s)\n\t\tfields := r.fields[measurement]\n\t\tif fields == nil {\n\t\t\tr.stats.IncrFiltered()\n\t\t\tcontinue\n\t\t}\n\t\tfor _, f := range fields.Fields {\n\t\t\tc := newCursor(r.tx, s, f.Name, r.codecs[measurement])\n\t\t\tc.SeekTo(0)\n\t\t\tr.cursors = append(r.cursors, c)\n\t\t}\n\t}\n\tsort.Sort(cursors(r.cursors))\n\n\treturn nil\n}\n\n// Next returns whether any data remains to be read. It must be called before\n// the next call to Read().\nfunc (r *Reader) Next() bool {\n\tr.valuePos = 0\nOUTER:\n\tfor {\n\t\tif r.currCursor >= len(r.cursors) {\n\t\t\t// All cursors drained. No more data remains.\n\t\t\treturn false\n\t\t}\n\n\t\tcc := r.cursors[r.currCursor]\n\t\tr.keyBuf = tsm1.SeriesFieldKey(cc.series, cc.field)\n\n\t\tfor {\n\t\t\tk, v := cc.Next()\n\t\t\tif k == -1 {\n\t\t\t\t// Go to next cursor and try again.\n\t\t\t\tr.currCursor++\n\t\t\t\tif r.valuePos == 0 {\n\t\t\t\t\t// The previous cursor had no data. Instead of returning\n\t\t\t\t\t// just go immediately to the next cursor.\n\t\t\t\t\tcontinue OUTER\n\t\t\t\t}\n\t\t\t\t// There is some data available. Indicate that it should be read.\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif f, ok := v.(float64); ok {\n\t\t\t\tif math.IsInf(f, 0) {\n\t\t\t\t\tr.stats.AddPointsRead(1)\n\t\t\t\t\tr.stats.IncrInf()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif math.IsNaN(f) {\n\t\t\t\t\tr.stats.AddPointsRead(1)\n\t\t\t\t\tr.stats.IncrNaN()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tr.values[r.valuePos] = tsm1.NewValue(k, v)\n\t\t\tr.valuePos++\n\n\t\t\tif r.valuePos >= len(r.values) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Read returns the next chunk of data in the shard, converted to tsm1 values. Data is\n// emitted completely for every field, in every series, before the next field is processed.\n// Data from Read() adheres to the requirements for writing to tsm1 shards\nfunc (r *Reader) Read() (string, []tsm1.Value, error) {\n\treturn r.keyBuf, r.values[:r.valuePos], nil\n}\n\n// Close closes the reader.\nfunc (r *Reader) Close() error {\n\tr.tx.Rollback()\n\treturn r.db.Close()\n}\n\n// cursor provides ordered iteration across a series.\ntype cursor struct {\n\t// Bolt cursor and readahead buffer.\n\tcursor *bolt.Cursor\n\tkeyBuf int64\n\tvalBuf interface{}\n\n\tseries string\n\tfield  string\n\tdec    *tsdb.FieldCodec\n}\n\n// Cursor returns an iterator for a key over a single field.\nfunc newCursor(tx *bolt.Tx, series string, field string, dec *tsdb.FieldCodec) *cursor {\n\tcur := &cursor{\n\t\tkeyBuf: -2,\n\t\tseries: series,\n\t\tfield:  field,\n\t\tdec:    dec,\n\t}\n\n\t// Retrieve series bucket.\n\tb := tx.Bucket([]byte(series))\n\tif b != nil {\n\t\tcur.cursor = b.Cursor()\n\t}\n\n\treturn cur\n}\n\n// Seek moves the cursor to a position.\nfunc (c *cursor) SeekTo(seek int64) {\n\tvar seekBytes [8]byte\n\tbinary.BigEndian.PutUint64(seekBytes[:], uint64(seek))\n\tk, v := c.cursor.Seek(seekBytes[:])\n\tc.keyBuf, c.valBuf = tsdb.DecodeKeyValue(c.field, c.dec, k, v)\n}\n\n// Next returns the next key/value pair from the cursor.\nfunc (c *cursor) Next() (key int64, value interface{}) {\n\tfor {\n\t\tk, v := func() (int64, interface{}) {\n\t\t\tif c.keyBuf != -2 {\n\t\t\t\tk, v := c.keyBuf, c.valBuf\n\t\t\t\tc.keyBuf = -2\n\t\t\t\treturn k, v\n\t\t\t}\n\n\t\t\tk, v := c.cursor.Next()\n\t\t\tif k == nil {\n\t\t\t\treturn -1, nil\n\t\t\t}\n\t\t\treturn tsdb.DecodeKeyValue(c.field, c.dec, k, v)\n\t\t}()\n\n\t\tif k != -1 && v == nil {\n\t\t\t// There is a point in the series at the next timestamp,\n\t\t\t// but not for this cursor's field. Go to the next point.\n\t\t\tcontinue\n\t\t}\n\t\treturn k, v\n\t}\n}\n\n// Sort b1 cursors in correct order for writing to TSM files.\n\ntype cursors []*cursor\n\nfunc (a cursors) Len() int      { return len(a) }\nfunc (a cursors) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a cursors) Less(i, j int) bool {\n\tif a[i].series == a[j].series {\n\t\treturn a[i].field < a[j].field\n\t}\n\treturn a[i].series < a[j].series\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx_tsm/bz1/reader.go",
    "content": "// Package bz1 reads data from bz1 shards.\npackage bz1 // import \"github.com/influxdata/influxdb/cmd/influx_tsm/bz1\"\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com/boltdb/bolt\"\n\t\"github.com/golang/snappy\"\n\t\"github.com/influxdata/influxdb/cmd/influx_tsm/stats\"\n\t\"github.com/influxdata/influxdb/cmd/influx_tsm/tsdb\"\n\t\"github.com/influxdata/influxdb/tsdb/engine/tsm1\"\n)\n\n// DefaultChunkSize is the size of chunks read from the bz1 shard\nconst DefaultChunkSize = 1000\n\n// Reader is used to read all data from a bz1 shard.\ntype Reader struct {\n\tpath string\n\tdb   *bolt.DB\n\ttx   *bolt.Tx\n\n\tcursors    []*cursor\n\tcurrCursor int\n\n\tkeyBuf   string\n\tvalues   []tsm1.Value\n\tvaluePos int\n\n\tfields map[string]*tsdb.MeasurementFields\n\tcodecs map[string]*tsdb.FieldCodec\n\n\tstats *stats.Stats\n}\n\n// NewReader returns a reader for the bz1 shard at path.\nfunc NewReader(path string, stats *stats.Stats, chunkSize int) *Reader {\n\tr := &Reader{\n\t\tpath:   path,\n\t\tfields: make(map[string]*tsdb.MeasurementFields),\n\t\tcodecs: make(map[string]*tsdb.FieldCodec),\n\t\tstats:  stats,\n\t}\n\n\tif chunkSize <= 0 {\n\t\tchunkSize = DefaultChunkSize\n\t}\n\n\tr.values = make([]tsm1.Value, chunkSize)\n\n\treturn r\n}\n\n// Open opens the reader.\nfunc (r *Reader) Open() error {\n\t// Open underlying storage.\n\tdb, err := bolt.Open(r.path, 0666, &bolt.Options{Timeout: 1 * time.Second})\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.db = db\n\n\tseriesSet := make(map[string]bool)\n\n\tif err := r.db.View(func(tx *bolt.Tx) error {\n\t\tvar data []byte\n\n\t\tmeta := tx.Bucket([]byte(\"meta\"))\n\t\tif meta == nil {\n\t\t\t// No data in this shard.\n\t\t\treturn nil\n\t\t}\n\n\t\tpointsBucket := tx.Bucket([]byte(\"points\"))\n\t\tif pointsBucket == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tif err := pointsBucket.ForEach(func(key, _ []byte) error {\n\t\t\tseriesSet[string(key)] = true\n\t\t\treturn nil\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuf := meta.Get([]byte(\"fields\"))\n\t\tif buf == nil {\n\t\t\t// No data in this shard.\n\t\t\treturn nil\n\t\t}\n\n\t\tdata, err = snappy.Decode(nil, buf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := json.Unmarshal(data, &r.fields); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\t// Build the codec for each measurement.\n\tfor k, v := range r.fields {\n\t\tr.codecs[k] = tsdb.NewFieldCodec(v.Fields)\n\t}\n\n\tr.tx, err = r.db.Begin(false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create cursor for each field of each series.\n\tfor s := range seriesSet {\n\t\tmeasurement := tsdb.MeasurementFromSeriesKey(s)\n\t\tfields := r.fields[measurement]\n\t\tif fields == nil {\n\t\t\tr.stats.IncrFiltered()\n\t\t\tcontinue\n\t\t}\n\t\tfor _, f := range fields.Fields {\n\t\t\tc := newCursor(r.tx, s, f.Name, r.codecs[measurement])\n\t\t\tif c == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.SeekTo(0)\n\t\t\tr.cursors = append(r.cursors, c)\n\t\t}\n\t}\n\tsort.Sort(cursors(r.cursors))\n\n\treturn nil\n}\n\n// Next returns whether there is any more data to be read.\nfunc (r *Reader) Next() bool {\n\tr.valuePos = 0\nOUTER:\n\tfor {\n\t\tif r.currCursor >= len(r.cursors) {\n\t\t\t// All cursors drained. No more data remains.\n\t\t\treturn false\n\t\t}\n\n\t\tcc := r.cursors[r.currCursor]\n\t\tr.keyBuf = tsm1.SeriesFieldKey(cc.series, cc.field)\n\n\t\tfor {\n\t\t\tk, v := cc.Next()\n\t\t\tif k == -1 {\n\t\t\t\t// Go to next cursor and try again.\n\t\t\t\tr.currCursor++\n\t\t\t\tif r.valuePos == 0 {\n\t\t\t\t\t// The previous cursor had no data. Instead of returning\n\t\t\t\t\t// just go immediately to the next cursor.\n\t\t\t\t\tcontinue OUTER\n\t\t\t\t}\n\t\t\t\t// There is some data available. Indicate that it should be read.\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\tif f, ok := v.(float64); ok {\n\t\t\t\tif math.IsInf(f, 0) {\n\t\t\t\t\tr.stats.AddPointsRead(1)\n\t\t\t\t\tr.stats.IncrInf()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif math.IsNaN(f) {\n\t\t\t\t\tr.stats.AddPointsRead(1)\n\t\t\t\t\tr.stats.IncrNaN()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tr.values[r.valuePos] = tsm1.NewValue(k, v)\n\t\t\tr.valuePos++\n\n\t\t\tif r.valuePos >= len(r.values) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Read returns the next chunk of data in the shard, converted to tsm1 values. Data is\n// emitted completely for every field, in every series, before the next field is processed.\n// Data from Read() adheres to the requirements for writing to tsm1 shards\nfunc (r *Reader) Read() (string, []tsm1.Value, error) {\n\treturn r.keyBuf, r.values[:r.valuePos], nil\n}\n\n// Close closes the reader.\nfunc (r *Reader) Close() error {\n\tr.tx.Rollback()\n\treturn r.db.Close()\n}\n\n// cursor provides ordered iteration across a series.\ntype cursor struct {\n\tcursor       *bolt.Cursor\n\tbuf          []byte // uncompressed buffer\n\toff          int    // buffer offset\n\tfieldIndices []int\n\tindex        int\n\n\tseries string\n\tfield  string\n\tdec    *tsdb.FieldCodec\n\n\tkeyBuf int64\n\tvalBuf interface{}\n}\n\n// newCursor returns an instance of a bz1 cursor.\nfunc newCursor(tx *bolt.Tx, series string, field string, dec *tsdb.FieldCodec) *cursor {\n\t// Retrieve points bucket. Ignore if there is no bucket.\n\tb := tx.Bucket([]byte(\"points\")).Bucket([]byte(series))\n\tif b == nil {\n\t\treturn nil\n\t}\n\n\treturn &cursor{\n\t\tcursor: b.Cursor(),\n\t\tseries: series,\n\t\tfield:  field,\n\t\tdec:    dec,\n\t\tkeyBuf: -2,\n\t}\n}\n\n// Seek moves the cursor to a position.\nfunc (c *cursor) SeekTo(seek int64) {\n\tvar seekBytes [8]byte\n\tbinary.BigEndian.PutUint64(seekBytes[:], uint64(seek))\n\n\t// Move cursor to appropriate block and set to buffer.\n\tk, v := c.cursor.Seek(seekBytes[:])\n\tif v == nil { // get the last block, it might have this time\n\t\t_, v = c.cursor.Last()\n\t} else if seek < int64(binary.BigEndian.Uint64(k)) { // the seek key is less than this block, go back one and check\n\t\t_, v = c.cursor.Prev()\n\n\t\t// if the previous block max time is less than the seek value, reset to where we were originally\n\t\tif v == nil || seek > int64(binary.BigEndian.Uint64(v[0:8])) {\n\t\t\t_, v = c.cursor.Seek(seekBytes[:])\n\t\t}\n\t}\n\tc.setBuf(v)\n\n\t// Read current block up to seek position.\n\tc.seekBuf(seekBytes[:])\n\n\t// Return current entry.\n\tc.keyBuf, c.valBuf = c.read()\n}\n\n// seekBuf moves the cursor to a position within the current buffer.\nfunc (c *cursor) seekBuf(seek []byte) (key, value []byte) {\n\tfor {\n\t\t// Slice off the current entry.\n\t\tbuf := c.buf[c.off:]\n\n\t\t// Exit if current entry's timestamp is on or after the seek.\n\t\tif len(buf) == 0 {\n\t\t\treturn\n\t\t}\n\n\t\tif bytes.Compare(buf[0:8], seek) != -1 {\n\t\t\treturn\n\t\t}\n\n\t\tc.off += entryHeaderSize + entryDataSize(buf)\n\t}\n}\n\n// Next returns the next key/value pair from the cursor. If there are no values\n// remaining, -1 is returned.\nfunc (c *cursor) Next() (int64, interface{}) {\n\tfor {\n\t\tk, v := func() (int64, interface{}) {\n\t\t\tif c.keyBuf != -2 {\n\t\t\t\tk, v := c.keyBuf, c.valBuf\n\t\t\t\tc.keyBuf = -2\n\t\t\t\treturn k, v\n\t\t\t}\n\n\t\t\t// Ignore if there is no buffer.\n\t\t\tif len(c.buf) == 0 {\n\t\t\t\treturn -1, nil\n\t\t\t}\n\n\t\t\t// Move forward to next entry.\n\t\t\tc.off += entryHeaderSize + entryDataSize(c.buf[c.off:])\n\n\t\t\t// If no items left then read first item from next block.\n\t\t\tif c.off >= len(c.buf) {\n\t\t\t\t_, v := c.cursor.Next()\n\t\t\t\tc.setBuf(v)\n\t\t\t}\n\n\t\t\treturn c.read()\n\t\t}()\n\n\t\tif k != -1 && v == nil {\n\t\t\t// There is a point in the series at the next timestamp,\n\t\t\t// but not for this cursor's field. Go to the next point.\n\t\t\tcontinue\n\t\t}\n\t\treturn k, v\n\t}\n}\n\n// setBuf saves a compressed block to the buffer.\nfunc (c *cursor) setBuf(block []byte) {\n\t// Clear if the block is empty.\n\tif len(block) == 0 {\n\t\tc.buf, c.off, c.fieldIndices, c.index = c.buf[0:0], 0, c.fieldIndices[0:0], 0\n\t\treturn\n\t}\n\n\t// Otherwise decode block into buffer.\n\t// Skip over the first 8 bytes since they are the max timestamp.\n\tbuf, err := snappy.Decode(nil, block[8:])\n\tif err != nil {\n\t\tc.buf = c.buf[0:0]\n\t\tfmt.Printf(\"block decode error: %s\\n\", err)\n\t}\n\n\tc.buf, c.off = buf, 0\n}\n\n// read reads the current key and value from the current block.\nfunc (c *cursor) read() (key int64, value interface{}) {\n\t// Return nil if the offset is at the end of the buffer.\n\tif c.off >= len(c.buf) {\n\t\treturn -1, nil\n\t}\n\n\t// Otherwise read the current entry.\n\tbuf := c.buf[c.off:]\n\tdataSize := entryDataSize(buf)\n\n\treturn tsdb.DecodeKeyValue(c.field, c.dec, buf[0:8], buf[entryHeaderSize:entryHeaderSize+dataSize])\n}\n\n// Sort bz1 cursors in correct order for writing to TSM files.\n\ntype cursors []*cursor\n\nfunc (a cursors) Len() int      { return len(a) }\nfunc (a cursors) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a cursors) Less(i, j int) bool {\n\tif a[i].series == a[j].series {\n\t\treturn a[i].field < a[j].field\n\t}\n\treturn a[i].series < a[j].series\n}\n\n// entryHeaderSize is the number of bytes required for the header.\nconst entryHeaderSize = 8 + 4\n\n// entryDataSize returns the size of an entry's data field, in bytes.\nfunc entryDataSize(v []byte) int { return int(binary.BigEndian.Uint32(v[8:12])) }\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx_tsm/converter.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\n\t\"github.com/influxdata/influxdb/cmd/influx_tsm/stats\"\n\t\"github.com/influxdata/influxdb/tsdb/engine/tsm1\"\n)\n\nconst (\n\tmaxBlocksPerKey = 65535\n)\n\n// KeyIterator is used to iterate over b* keys for conversion to tsm keys\ntype KeyIterator interface {\n\tNext() bool\n\tRead() (string, []tsm1.Value, error)\n}\n\n// Converter encapsulates the logic for converting b*1 shards to tsm1 shards.\ntype Converter struct {\n\tpath           string\n\tmaxTSMFileSize uint32\n\tsequence       int\n\tstats          *stats.Stats\n}\n\n// NewConverter returns a new instance of the Converter.\nfunc NewConverter(path string, sz uint32, stats *stats.Stats) *Converter {\n\treturn &Converter{\n\t\tpath:           path,\n\t\tmaxTSMFileSize: sz,\n\t\tstats:          stats,\n\t}\n}\n\n// Process writes the data provided by iter to a tsm1 shard.\nfunc (c *Converter) Process(iter KeyIterator) error {\n\t// Ensure the tsm1 directory exists.\n\tif err := os.MkdirAll(c.path, 0777); err != nil {\n\t\treturn err\n\t}\n\n\t// Iterate until no more data remains.\n\tvar w tsm1.TSMWriter\n\tvar keyCount map[string]int\n\n\tfor iter.Next() {\n\t\tk, v, err := iter.Read()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif w == nil {\n\t\t\tw, err = c.nextTSMWriter()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tkeyCount = map[string]int{}\n\t\t}\n\t\tif err := w.Write(k, v); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tkeyCount[k]++\n\n\t\tc.stats.AddPointsRead(len(v))\n\t\tc.stats.AddPointsWritten(len(v))\n\n\t\t// If we have a max file size configured and we're over it, start a new TSM file.\n\t\tif w.Size() > c.maxTSMFileSize || keyCount[k] == maxBlocksPerKey {\n\t\t\tif err := w.WriteIndex(); err != nil && err != tsm1.ErrNoValues {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tc.stats.AddTSMBytes(w.Size())\n\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tw = nil\n\t\t}\n\t}\n\n\tif w != nil {\n\t\tif err := w.WriteIndex(); err != nil && err != tsm1.ErrNoValues {\n\t\t\treturn err\n\t\t}\n\t\tc.stats.AddTSMBytes(w.Size())\n\n\t\tif err := w.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// nextTSMWriter returns the next TSMWriter for the Converter.\nfunc (c *Converter) nextTSMWriter() (tsm1.TSMWriter, error) {\n\tc.sequence++\n\tfileName := filepath.Join(c.path, fmt.Sprintf(\"%09d-%09d.%s\", 1, c.sequence, tsm1.TSMFileExtension))\n\n\tfd, err := os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0666)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create the writer for the new TSM file.\n\tw, err := tsm1.NewTSMWriter(fd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc.stats.IncrTSMFileCount()\n\treturn w, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx_tsm/main.go",
    "content": "// Command influx_tsm converts b1 or bz1 shards (from InfluxDB releases earlier than v0.11)\n// to the current tsm1 format.\npackage main\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"runtime/pprof\"\n\t\"sort\"\n\t\"strings\"\n\t\"text/tabwriter\"\n\t\"time\"\n\n\t\"net/http\"\n\t_ \"net/http/pprof\"\n\n\t\"github.com/influxdata/influxdb/cmd/influx_tsm/b1\"\n\t\"github.com/influxdata/influxdb/cmd/influx_tsm/bz1\"\n\t\"github.com/influxdata/influxdb/cmd/influx_tsm/tsdb\"\n)\n\n// ShardReader reads b* shards and converts to tsm shards\ntype ShardReader interface {\n\tKeyIterator\n\tOpen() error\n\tClose() error\n}\n\nconst (\n\ttsmExt = \"tsm\"\n)\n\nvar description = `\nConvert a database from b1 or bz1 format to tsm1 format.\n\nThis tool will backup the directories before conversion (if not disabled).\nThe backed-up files must be removed manually, generally after starting up the\nnode again to make sure all of data has been converted correctly.\n\nTo restore a backup:\n  Shut down the node, remove the converted directory, and\n  copy the backed-up directory to the original location.`\n\ntype options struct {\n\tDataPath       string\n\tBackupPath     string\n\tDBs            []string\n\tDebugAddr      string\n\tTSMSize        uint64\n\tParallel       bool\n\tSkipBackup     bool\n\tUpdateInterval time.Duration\n\tYes            bool\n\tCPUFile        string\n}\n\nfunc (o *options) Parse() error {\n\tfs := flag.NewFlagSet(os.Args[0], flag.ExitOnError)\n\n\tvar dbs string\n\n\tfs.StringVar(&dbs, \"dbs\", \"\", \"Comma-delimited list of databases to convert. Default is to convert all databases.\")\n\tfs.Uint64Var(&opts.TSMSize, \"sz\", maxTSMSz, \"Maximum size of individual TSM files.\")\n\tfs.BoolVar(&opts.Parallel, \"parallel\", false, \"Perform parallel conversion. (up to GOMAXPROCS shards at once)\")\n\tfs.BoolVar(&opts.SkipBackup, \"nobackup\", false, \"Disable database backups. Not recommended.\")\n\tfs.StringVar(&opts.BackupPath, \"backup\", \"\", \"The location to backup up the current databases. Must not be within the data directory.\")\n\tfs.StringVar(&opts.DebugAddr, \"debug\", \"\", \"If set, http debugging endpoints will be enabled on the given address\")\n\tfs.DurationVar(&opts.UpdateInterval, \"interval\", 5*time.Second, \"How often status updates are printed.\")\n\tfs.BoolVar(&opts.Yes, \"y\", false, \"Don't ask, just convert\")\n\tfs.StringVar(&opts.CPUFile, \"profile\", \"\", \"CPU Profile location\")\n\tfs.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %v [options] <data-path> \\n\", os.Args[0])\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\\nOptions:\\n\", description)\n\t\tfs.PrintDefaults()\n\t\tfmt.Fprintf(os.Stderr, \"\\n\")\n\t}\n\n\tif err := fs.Parse(os.Args[1:]); err != nil {\n\t\treturn err\n\t}\n\n\tif len(fs.Args()) < 1 {\n\t\treturn errors.New(\"no data directory specified\")\n\t}\n\tvar err error\n\tif o.DataPath, err = filepath.Abs(fs.Args()[0]); err != nil {\n\t\treturn err\n\t}\n\tif o.DataPath, err = filepath.EvalSymlinks(filepath.Clean(o.DataPath)); err != nil {\n\t\treturn err\n\t}\n\n\tif o.TSMSize > maxTSMSz {\n\t\treturn fmt.Errorf(\"bad TSM file size, maximum TSM file size is %d\", maxTSMSz)\n\t}\n\n\t// Check if specific databases were requested.\n\to.DBs = strings.Split(dbs, \",\")\n\tif len(o.DBs) == 1 && o.DBs[0] == \"\" {\n\t\to.DBs = nil\n\t}\n\n\tif !o.SkipBackup {\n\t\tif o.BackupPath == \"\" {\n\t\t\treturn errors.New(\"either -nobackup or -backup DIR must be set\")\n\t\t}\n\t\tif o.BackupPath, err = filepath.Abs(o.BackupPath); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif o.BackupPath, err = filepath.EvalSymlinks(filepath.Clean(o.BackupPath)); err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\treturn errors.New(\"backup directory must already exist\")\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\tif strings.HasPrefix(o.BackupPath, o.DataPath) {\n\t\t\tfmt.Println(o.BackupPath, o.DataPath)\n\t\t\treturn errors.New(\"backup directory cannot be contained within data directory\")\n\t\t}\n\t}\n\n\tif o.DebugAddr != \"\" {\n\t\tlog.Printf(\"Starting debugging server on http://%v\", o.DebugAddr)\n\t\tgo func() {\n\t\t\tlog.Fatal(http.ListenAndServe(o.DebugAddr, nil))\n\t\t}()\n\t}\n\n\treturn nil\n}\n\nvar opts options\n\nconst maxTSMSz uint64 = 2 * 1024 * 1024 * 1024\n\nfunc init() {\n\tlog.SetOutput(os.Stderr)\n\tlog.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)\n}\n\nfunc main() {\n\tif err := opts.Parse(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Determine the list of databases\n\tdbs, err := ioutil.ReadDir(opts.DataPath)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to access data directory at %v: %v\\n\", opts.DataPath, err)\n\t}\n\tfmt.Println() // Cleanly separate output from start of program.\n\n\tif opts.Parallel {\n\t\tif !isEnvSet(\"GOMAXPROCS\") {\n\t\t\t// Only modify GOMAXPROCS if it wasn't set in the environment\n\t\t\t// This means 'GOMAXPROCS=1 influx_tsm -parallel' will not actually\n\t\t\t// run in parallel\n\t\t\truntime.GOMAXPROCS(runtime.NumCPU())\n\t\t}\n\t}\n\n\tvar badUser string\n\tif opts.SkipBackup {\n\t\tbadUser = \"(NOT RECOMMENDED)\"\n\t}\n\n\t// Dump summary of what is about to happen.\n\tfmt.Println(\"b1 and bz1 shard conversion.\")\n\tfmt.Println(\"-----------------------------------\")\n\tfmt.Println(\"Data directory is:                 \", opts.DataPath)\n\tif !opts.SkipBackup {\n\t\tfmt.Println(\"Backup directory is:               \", opts.BackupPath)\n\t}\n\tfmt.Println(\"Databases specified:               \", allDBs(opts.DBs))\n\tfmt.Println(\"Database backups enabled:          \", yesno(!opts.SkipBackup), badUser)\n\tfmt.Printf(\"Parallel mode enabled (GOMAXPROCS): %s (%d)\\n\", yesno(opts.Parallel), runtime.GOMAXPROCS(0))\n\tfmt.Println()\n\n\tshards := collectShards(dbs)\n\n\t// Anything to convert?\n\tfmt.Printf(\"\\nFound %d shards that will be converted.\\n\", len(shards))\n\tif len(shards) == 0 {\n\t\tfmt.Println(\"Nothing to do.\")\n\t\treturn\n\t}\n\n\t// Display list of convertible shards.\n\tfmt.Println()\n\tw := new(tabwriter.Writer)\n\tw.Init(os.Stdout, 0, 8, 1, '\\t', 0)\n\tfmt.Fprintln(w, \"Database\\tRetention\\tPath\\tEngine\\tSize\")\n\tfor _, si := range shards {\n\t\tfmt.Fprintf(w, \"%v\\t%v\\t%v\\t%v\\t%d\\n\", si.Database, si.RetentionPolicy, si.FullPath(opts.DataPath), si.FormatAsString(), si.Size)\n\t}\n\tw.Flush()\n\n\tif !opts.Yes {\n\t\t// Get confirmation from user.\n\t\tfmt.Printf(\"\\nThese shards will be converted. Proceed? y/N: \")\n\t\tliner := bufio.NewReader(os.Stdin)\n\t\tyn, err := liner.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to read response: %v\", err)\n\t\t}\n\t\tyn = strings.TrimRight(strings.ToLower(yn), \"\\n\")\n\t\tif yn != \"y\" {\n\t\t\tlog.Fatal(\"Conversion aborted.\")\n\t\t}\n\t}\n\tfmt.Println(\"Conversion starting....\")\n\n\tif opts.CPUFile != \"\" {\n\t\tf, err := os.Create(opts.CPUFile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif err = pprof.StartCPUProfile(f); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdefer pprof.StopCPUProfile()\n\t}\n\n\ttr := newTracker(shards, opts)\n\n\tif err := tr.Run(); err != nil {\n\t\tlog.Fatalf(\"Error occurred preventing completion: %v\\n\", err)\n\t}\n\n\ttr.PrintStats()\n}\n\nfunc collectShards(dbs []os.FileInfo) tsdb.ShardInfos {\n\t// Get the list of shards for conversion.\n\tvar shards tsdb.ShardInfos\n\tfor _, db := range dbs {\n\t\td := tsdb.NewDatabase(filepath.Join(opts.DataPath, db.Name()))\n\t\tshs, err := d.Shards()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to access shards for database %v: %v\\n\", d.Name(), err)\n\t\t}\n\t\tshards = append(shards, shs...)\n\t}\n\n\tsort.Sort(shards)\n\tshards = shards.FilterFormat(tsdb.TSM1)\n\tif len(dbs) > 0 {\n\t\tshards = shards.ExclusiveDatabases(opts.DBs)\n\t}\n\n\treturn shards\n}\n\n// backupDatabase backs up the database named db\nfunc backupDatabase(db string) error {\n\tcopyFile := func(path string, info os.FileInfo, err error) error {\n\t\t// Strip the DataPath from the path and replace with BackupPath.\n\t\ttoPath := strings.Replace(path, opts.DataPath, opts.BackupPath, 1)\n\n\t\tif info.IsDir() {\n\t\t\treturn os.MkdirAll(toPath, info.Mode())\n\t\t}\n\n\t\tin, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer in.Close()\n\n\t\tsrcInfo, err := os.Stat(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tout, err := os.OpenFile(toPath, os.O_CREATE|os.O_WRONLY, info.Mode())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer out.Close()\n\n\t\tdstInfo, err := os.Stat(toPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif dstInfo.Size() == srcInfo.Size() {\n\t\t\tlog.Printf(\"Backup file already found for %v with correct size, skipping.\", path)\n\t\t\treturn nil\n\t\t}\n\n\t\tif dstInfo.Size() > srcInfo.Size() {\n\t\t\tlog.Printf(\"Invalid backup file found for %v, replacing with good copy.\", path)\n\t\t\tif err := out.Truncate(0); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := out.Seek(0, io.SeekStart); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif dstInfo.Size() > 0 {\n\t\t\tlog.Printf(\"Resuming backup of file %v, starting at %v bytes\", path, dstInfo.Size())\n\t\t}\n\n\t\toff, err := out.Seek(0, io.SeekEnd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := in.Seek(off, io.SeekStart); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlog.Printf(\"Backing up file %v\", path)\n\n\t\t_, err = io.Copy(out, in)\n\n\t\treturn err\n\t}\n\n\treturn filepath.Walk(filepath.Join(opts.DataPath, db), copyFile)\n}\n\n// convertShard converts the shard in-place.\nfunc convertShard(si *tsdb.ShardInfo, tr *tracker) error {\n\tsrc := si.FullPath(opts.DataPath)\n\tdst := fmt.Sprintf(\"%v.%v\", src, tsmExt)\n\n\tvar reader ShardReader\n\tswitch si.Format {\n\tcase tsdb.BZ1:\n\t\treader = bz1.NewReader(src, &tr.Stats, 0)\n\tcase tsdb.B1:\n\t\treader = b1.NewReader(src, &tr.Stats, 0)\n\tdefault:\n\t\treturn fmt.Errorf(\"Unsupported shard format: %v\", si.FormatAsString())\n\t}\n\n\t// Open the shard, and create a converter.\n\tif err := reader.Open(); err != nil {\n\t\treturn fmt.Errorf(\"Failed to open %v for conversion: %v\", src, err)\n\t}\n\tdefer reader.Close()\n\tconverter := NewConverter(dst, uint32(opts.TSMSize), &tr.Stats)\n\n\t// Perform the conversion.\n\tif err := converter.Process(reader); err != nil {\n\t\treturn fmt.Errorf(\"Conversion of %v failed: %v\", src, err)\n\t}\n\n\t// Delete source shard, and rename new tsm1 shard.\n\tif err := reader.Close(); err != nil {\n\t\treturn fmt.Errorf(\"Conversion of %v failed due to close: %v\", src, err)\n\t}\n\n\tif err := os.RemoveAll(si.FullPath(opts.DataPath)); err != nil {\n\t\treturn fmt.Errorf(\"Deletion of %v failed: %v\", src, err)\n\t}\n\tif err := os.Rename(dst, src); err != nil {\n\t\treturn fmt.Errorf(\"Rename of %v to %v failed: %v\", dst, src, err)\n\t}\n\n\treturn nil\n}\n\n// ParallelGroup allows the maximum parrallelism of a set of operations to be controlled.\ntype ParallelGroup chan struct{}\n\n// NewParallelGroup returns a group which allows n operations to run in parallel. A value of 0\n// means no operations will ever run.\nfunc NewParallelGroup(n int) ParallelGroup {\n\treturn make(chan struct{}, n)\n}\n\n// Do executes one operation of the ParallelGroup\nfunc (p ParallelGroup) Do(f func()) {\n\tp <- struct{}{} // acquire working slot\n\tdefer func() { <-p }()\n\n\tf()\n}\n\n// yesno returns \"yes\" for true, \"no\" for false.\nfunc yesno(b bool) string {\n\tif b {\n\t\treturn \"yes\"\n\t}\n\treturn \"no\"\n}\n\n// allDBs returns \"all\" if all databases are requested for conversion.\nfunc allDBs(dbs []string) string {\n\tif dbs == nil {\n\t\treturn \"all\"\n\t}\n\treturn fmt.Sprintf(\"%v\", dbs)\n}\n\n// isEnvSet checks to see if a variable was set in the environment\nfunc isEnvSet(name string) bool {\n\tfor _, s := range os.Environ() {\n\t\tif strings.SplitN(s, \"=\", 2)[0] == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx_tsm/stats/stats.go",
    "content": "// Package stats contains statistics for converting non-TSM shards to TSM.\npackage stats\n\nimport (\n\t\"sync/atomic\"\n\t\"time\"\n)\n\n// Stats are the statistics captured while converting non-TSM shards to TSM\ntype Stats struct {\n\tNanFiltered     uint64\n\tInfFiltered     uint64\n\tFieldsFiltered  uint64\n\tPointsWritten   uint64\n\tPointsRead      uint64\n\tTsmFilesCreated uint64\n\tTsmBytesWritten uint64\n\tCompletedShards uint64\n\tTotalTime       time.Duration\n}\n\n// AddPointsRead increments the number of read points.\nfunc (s *Stats) AddPointsRead(n int) {\n\tatomic.AddUint64(&s.PointsRead, uint64(n))\n}\n\n// AddPointsWritten increments the number of written points.\nfunc (s *Stats) AddPointsWritten(n int) {\n\tatomic.AddUint64(&s.PointsWritten, uint64(n))\n}\n\n// AddTSMBytes increments the number of TSM Bytes.\nfunc (s *Stats) AddTSMBytes(n uint32) {\n\tatomic.AddUint64(&s.TsmBytesWritten, uint64(n))\n}\n\n// IncrTSMFileCount increments the number of TSM files created.\nfunc (s *Stats) IncrTSMFileCount() {\n\tatomic.AddUint64(&s.TsmFilesCreated, 1)\n}\n\n// IncrNaN increments the number of NaNs filtered.\nfunc (s *Stats) IncrNaN() {\n\tatomic.AddUint64(&s.NanFiltered, 1)\n}\n\n// IncrInf increments the number of Infs filtered.\nfunc (s *Stats) IncrInf() {\n\tatomic.AddUint64(&s.InfFiltered, 1)\n}\n\n// IncrFiltered increments the number of fields filtered.\nfunc (s *Stats) IncrFiltered() {\n\tatomic.AddUint64(&s.FieldsFiltered, 1)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tracker.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/cmd/influx_tsm/stats\"\n\t\"github.com/influxdata/influxdb/cmd/influx_tsm/tsdb\"\n)\n\n// tracker will orchestrate and track the conversions of non-TSM shards to TSM\ntype tracker struct {\n\tStats stats.Stats\n\n\tshards tsdb.ShardInfos\n\topts   options\n\n\tpg ParallelGroup\n\twg sync.WaitGroup\n}\n\n// newTracker will setup and return a clean tracker instance\nfunc newTracker(shards tsdb.ShardInfos, opts options) *tracker {\n\tt := &tracker{\n\t\tshards: shards,\n\t\topts:   opts,\n\t\tpg:     NewParallelGroup(runtime.GOMAXPROCS(0)),\n\t}\n\n\treturn t\n}\n\nfunc (t *tracker) Run() error {\n\tconversionStart := time.Now()\n\n\t// Backup each directory.\n\tif !opts.SkipBackup {\n\t\tdatabases := t.shards.Databases()\n\t\tfmt.Printf(\"Backing up %d databases...\\n\", len(databases))\n\t\tt.wg.Add(len(databases))\n\t\tfor i := range databases {\n\t\t\tdb := databases[i]\n\t\t\tgo t.pg.Do(func() {\n\t\t\t\tdefer t.wg.Done()\n\n\t\t\t\tstart := time.Now()\n\t\t\t\tlog.Printf(\"Backup of database '%v' started\", db)\n\t\t\t\terr := backupDatabase(db)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"Backup of database %v failed: %v\\n\", db, err)\n\t\t\t\t}\n\t\t\t\tlog.Printf(\"Database %v backed up (%v)\\n\", db, time.Since(start))\n\t\t\t})\n\t\t}\n\t\tt.wg.Wait()\n\t} else {\n\t\tfmt.Println(\"Database backup disabled.\")\n\t}\n\n\tt.wg.Add(len(t.shards))\n\tfor i := range t.shards {\n\t\tsi := t.shards[i]\n\t\tgo t.pg.Do(func() {\n\t\t\tdefer func() {\n\t\t\t\tatomic.AddUint64(&t.Stats.CompletedShards, 1)\n\t\t\t\tt.wg.Done()\n\t\t\t}()\n\n\t\t\tstart := time.Now()\n\t\t\tlog.Printf(\"Starting conversion of shard: %v\", si.FullPath(opts.DataPath))\n\t\t\tif err := convertShard(si, t); err != nil {\n\t\t\t\tlog.Fatalf(\"Failed to convert %v: %v\\n\", si.FullPath(opts.DataPath), err)\n\t\t\t}\n\t\t\tlog.Printf(\"Conversion of %v successful (%v)\\n\", si.FullPath(opts.DataPath), time.Since(start))\n\t\t})\n\t}\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tt.wg.Wait()\n\t\tclose(done)\n\t}()\n\nWAIT_LOOP:\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\tbreak WAIT_LOOP\n\t\tcase <-time.After(opts.UpdateInterval):\n\t\t\tt.StatusUpdate()\n\t\t}\n\t}\n\n\tt.Stats.TotalTime = time.Since(conversionStart)\n\n\treturn nil\n}\n\nfunc (t *tracker) StatusUpdate() {\n\tshardCount := atomic.LoadUint64(&t.Stats.CompletedShards)\n\tpointCount := atomic.LoadUint64(&t.Stats.PointsRead)\n\tpointWritten := atomic.LoadUint64(&t.Stats.PointsWritten)\n\n\tlog.Printf(\"Still Working: Completed Shards: %d/%d Points read/written: %d/%d\", shardCount, len(t.shards), pointCount, pointWritten)\n}\n\nfunc (t *tracker) PrintStats() {\n\tpreSize := t.shards.Size()\n\tpostSize := int64(t.Stats.TsmBytesWritten)\n\n\tfmt.Printf(\"\\nSummary statistics\\n========================================\\n\")\n\tfmt.Printf(\"Databases converted:                 %d\\n\", len(t.shards.Databases()))\n\tfmt.Printf(\"Shards converted:                    %d\\n\", len(t.shards))\n\tfmt.Printf(\"TSM files created:                   %d\\n\", t.Stats.TsmFilesCreated)\n\tfmt.Printf(\"Points read:                         %d\\n\", t.Stats.PointsRead)\n\tfmt.Printf(\"Points written:                      %d\\n\", t.Stats.PointsWritten)\n\tfmt.Printf(\"NaN filtered:                        %d\\n\", t.Stats.NanFiltered)\n\tfmt.Printf(\"Inf filtered:                        %d\\n\", t.Stats.InfFiltered)\n\tfmt.Printf(\"Points without fields filtered:      %d\\n\", t.Stats.FieldsFiltered)\n\tfmt.Printf(\"Disk usage pre-conversion (bytes):   %d\\n\", preSize)\n\tfmt.Printf(\"Disk usage post-conversion (bytes):  %d\\n\", postSize)\n\tfmt.Printf(\"Reduction factor:                    %d%%\\n\", 100*(preSize-postSize)/preSize)\n\tfmt.Printf(\"Bytes per TSM point:                 %.2f\\n\", float64(postSize)/float64(t.Stats.PointsWritten))\n\tfmt.Printf(\"Total conversion time:               %v\\n\", t.Stats.TotalTime)\n\tfmt.Println()\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/codec.go",
    "content": "package tsdb\n\nimport (\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n)\n\nconst (\n\tfieldFloat   = 1\n\tfieldInteger = 2\n\tfieldBoolean = 3\n\tfieldString  = 4\n)\n\nvar (\n\t// ErrFieldNotFound is returned when a field cannot be found.\n\tErrFieldNotFound = errors.New(\"field not found\")\n\n\t// ErrFieldUnmappedID is returned when the system is presented, during decode, with a field ID\n\t// there is no mapping for.\n\tErrFieldUnmappedID = errors.New(\"field ID not mapped\")\n)\n\n// FieldCodec provides encoding and decoding functionality for the fields of a given\n// Measurement.\ntype FieldCodec struct {\n\tfieldsByID   map[uint8]*Field\n\tfieldsByName map[string]*Field\n}\n\n// NewFieldCodec returns a FieldCodec for the given Measurement. Must be called with\n// a RLock that protects the Measurement.\nfunc NewFieldCodec(fields map[string]*Field) *FieldCodec {\n\tfieldsByID := make(map[uint8]*Field, len(fields))\n\tfieldsByName := make(map[string]*Field, len(fields))\n\tfor _, f := range fields {\n\t\tfieldsByID[f.ID] = f\n\t\tfieldsByName[f.Name] = f\n\t}\n\treturn &FieldCodec{fieldsByID: fieldsByID, fieldsByName: fieldsByName}\n}\n\n// FieldIDByName returns the ID for the given field.\nfunc (f *FieldCodec) FieldIDByName(s string) (uint8, error) {\n\tfi := f.fieldsByName[s]\n\tif fi == nil {\n\t\treturn 0, ErrFieldNotFound\n\t}\n\treturn fi.ID, nil\n}\n\n// DecodeByID scans a byte slice for a field with the given ID, converts it to its\n// expected type, and return that value.\nfunc (f *FieldCodec) DecodeByID(targetID uint8, b []byte) (interface{}, error) {\n\tvar value interface{}\n\tfor {\n\t\tif len(b) == 0 {\n\t\t\t// No more bytes.\n\t\t\treturn nil, ErrFieldNotFound\n\t\t}\n\n\t\tfield := f.fieldsByID[b[0]]\n\t\tif field == nil {\n\t\t\t// This can happen, though is very unlikely. If this node receives encoded data, to be written\n\t\t\t// to disk, and is queried for that data before its metastore is updated, there will be no field\n\t\t\t// mapping for the data during decode. All this can happen because data is encoded by the node\n\t\t\t// that first received the write request, not the node that actually writes the data to disk.\n\t\t\t// So if this happens, the read must be aborted.\n\t\t\treturn nil, ErrFieldUnmappedID\n\t\t}\n\n\t\tswitch field.Type {\n\t\tcase fieldFloat:\n\t\t\tif field.ID == targetID {\n\t\t\t\tvalue = math.Float64frombits(binary.BigEndian.Uint64(b[1:9]))\n\t\t\t}\n\t\t\tb = b[9:]\n\t\tcase fieldInteger:\n\t\t\tif field.ID == targetID {\n\t\t\t\tvalue = int64(binary.BigEndian.Uint64(b[1:9]))\n\t\t\t}\n\t\t\tb = b[9:]\n\t\tcase fieldBoolean:\n\t\t\tif field.ID == targetID {\n\t\t\t\tvalue = b[1] == 1\n\t\t\t}\n\t\t\tb = b[2:]\n\t\tcase fieldString:\n\t\t\tlength := binary.BigEndian.Uint16(b[1:3])\n\t\t\tif field.ID == targetID {\n\t\t\t\tvalue = string(b[3 : 3+length])\n\t\t\t}\n\t\t\tb = b[3+length:]\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"unsupported value type during decode by id: %T\", field.Type))\n\t\t}\n\n\t\tif value != nil {\n\t\t\treturn value, nil\n\t\t}\n\t}\n}\n\n// DecodeByName scans a byte slice for a field with the given name, converts it to its\n// expected type, and return that value.\nfunc (f *FieldCodec) DecodeByName(name string, b []byte) (interface{}, error) {\n\tfi := f.FieldByName(name)\n\tif fi == nil {\n\t\treturn 0, ErrFieldNotFound\n\t}\n\treturn f.DecodeByID(fi.ID, b)\n}\n\n// FieldByName returns the field by its name. It will return a nil if not found\nfunc (f *FieldCodec) FieldByName(name string) *Field {\n\treturn f.fieldsByName[name]\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/database.go",
    "content": "// Pacage tsdb abstracts the various shard types supported by the influx_tsm command.\npackage tsdb // import \"github.com/influxdata/influxdb/cmd/influx_tsm/tsdb\"\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com/boltdb/bolt\"\n\t\"github.com/influxdata/influxdb/pkg/slices\"\n)\n\n// Flags for differentiating between engines\nconst (\n\tB1 = iota\n\tBZ1\n\tTSM1\n)\n\n// EngineFormat holds the flag for the engine\ntype EngineFormat int\n\n// String returns the string format of the engine.\nfunc (e EngineFormat) String() string {\n\tswitch e {\n\tcase TSM1:\n\t\treturn \"tsm1\"\n\tcase B1:\n\t\treturn \"b1\"\n\tcase BZ1:\n\t\treturn \"bz1\"\n\tdefault:\n\t\tpanic(\"unrecognized shard engine format\")\n\t}\n}\n\n// ShardInfo is the description of a shard on disk.\ntype ShardInfo struct {\n\tDatabase        string\n\tRetentionPolicy string\n\tPath            string\n\tFormat          EngineFormat\n\tSize            int64\n}\n\n// FormatAsString returns the format of the shard as a string.\nfunc (s *ShardInfo) FormatAsString() string {\n\treturn s.Format.String()\n}\n\n// FullPath returns the full path to the shard, given the data directory root.\nfunc (s *ShardInfo) FullPath(dataPath string) string {\n\treturn filepath.Join(dataPath, s.Database, s.RetentionPolicy, s.Path)\n}\n\n// ShardInfos is an array of ShardInfo\ntype ShardInfos []*ShardInfo\n\nfunc (s ShardInfos) Len() int      { return len(s) }\nfunc (s ShardInfos) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\nfunc (s ShardInfos) Less(i, j int) bool {\n\tif s[i].Database == s[j].Database {\n\t\tif s[i].RetentionPolicy == s[j].RetentionPolicy {\n\t\t\treturn s[i].Path < s[j].Path\n\t\t}\n\n\t\treturn s[i].RetentionPolicy < s[j].RetentionPolicy\n\t}\n\n\treturn s[i].Database < s[j].Database\n}\n\n// Databases returns the sorted unique set of databases for the shards.\nfunc (s ShardInfos) Databases() []string {\n\tdbm := make(map[string]bool)\n\tfor _, ss := range s {\n\t\tdbm[ss.Database] = true\n\t}\n\n\tvar dbs []string\n\tfor k := range dbm {\n\t\tdbs = append(dbs, k)\n\t}\n\tsort.Strings(dbs)\n\treturn dbs\n}\n\n// FilterFormat returns a copy of the ShardInfos, with shards of the given\n// format removed.\nfunc (s ShardInfos) FilterFormat(fmt EngineFormat) ShardInfos {\n\tvar a ShardInfos\n\tfor _, si := range s {\n\t\tif si.Format != fmt {\n\t\t\ta = append(a, si)\n\t\t}\n\t}\n\treturn a\n}\n\n// Size returns the space on disk consumed by the shards.\nfunc (s ShardInfos) Size() int64 {\n\tvar sz int64\n\tfor _, si := range s {\n\t\tsz += si.Size\n\t}\n\treturn sz\n}\n\n// ExclusiveDatabases returns a copy of the ShardInfo, with shards associated\n// with the given databases present. If the given set is empty, all databases\n// are returned.\nfunc (s ShardInfos) ExclusiveDatabases(exc []string) ShardInfos {\n\tvar a ShardInfos\n\n\t// Empty set? Return everything.\n\tif len(exc) == 0 {\n\t\ta = make(ShardInfos, len(s))\n\t\tcopy(a, s)\n\t\treturn a\n\t}\n\n\tfor _, si := range s {\n\t\tif slices.Exists(exc, si.Database) {\n\t\t\ta = append(a, si)\n\t\t}\n\t}\n\treturn a\n}\n\n// Database represents an entire database on disk.\ntype Database struct {\n\tpath string\n}\n\n// NewDatabase creates a database instance using data at path.\nfunc NewDatabase(path string) *Database {\n\treturn &Database{path: path}\n}\n\n// Name returns the name of the database.\nfunc (d *Database) Name() string {\n\treturn path.Base(d.path)\n}\n\n// Path returns the path to the database.\nfunc (d *Database) Path() string {\n\treturn d.path\n}\n\n// Shards returns information for every shard in the database.\nfunc (d *Database) Shards() ([]*ShardInfo, error) {\n\tfd, err := os.Open(d.path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get each retention policy.\n\trps, err := fd.Readdirnames(-1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Process each retention policy.\n\tvar shardInfos []*ShardInfo\n\tfor _, rp := range rps {\n\t\trpfd, err := os.Open(filepath.Join(d.path, rp))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Process each shard\n\t\tshards, err := rpfd.Readdirnames(-1)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, sh := range shards {\n\t\t\tfmt, sz, err := shardFormat(filepath.Join(d.path, rp, sh))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tsi := &ShardInfo{\n\t\t\t\tDatabase:        d.Name(),\n\t\t\t\tRetentionPolicy: path.Base(rp),\n\t\t\t\tPath:            sh,\n\t\t\t\tFormat:          fmt,\n\t\t\t\tSize:            sz,\n\t\t\t}\n\t\t\tshardInfos = append(shardInfos, si)\n\t\t}\n\t}\n\n\tsort.Sort(ShardInfos(shardInfos))\n\treturn shardInfos, nil\n}\n\n// shardFormat returns the format and size on disk of the shard at path.\nfunc shardFormat(path string) (EngineFormat, int64, error) {\n\t// If it's a directory then it's a tsm1 engine\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\tif fi.Mode().IsDir() {\n\t\treturn TSM1, fi.Size(), nil\n\t}\n\n\t// It must be a BoltDB-based engine.\n\tdb, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 1 * time.Second})\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\tdefer db.Close()\n\n\tvar format EngineFormat\n\terr = db.View(func(tx *bolt.Tx) error {\n\t\t// Retrieve the meta bucket.\n\t\tb := tx.Bucket([]byte(\"meta\"))\n\n\t\t// If no format is specified then it must be an original b1 database.\n\t\tif b == nil {\n\t\t\tformat = B1\n\t\t\treturn nil\n\t\t}\n\n\t\t// There is an actual format indicator.\n\t\tswitch f := string(b.Get([]byte(\"format\"))); f {\n\t\tcase \"b1\", \"v1\":\n\t\t\tformat = B1\n\t\tcase \"bz1\":\n\t\t\tformat = BZ1\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unrecognized engine format: %s\", f)\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn format, fi.Size(), err\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/internal/meta.pb.go",
    "content": "// Code generated by protoc-gen-gogo.\n// source: internal/meta.proto\n// DO NOT EDIT!\n\n/*\nPackage internal is a generated protocol buffer package.\n\nIt is generated from these files:\n\tinternal/meta.proto\n\nIt has these top-level messages:\n\tSeries\n\tTag\n\tMeasurementFields\n\tField\n*/\npackage internal\n\nimport proto \"github.com/gogo/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\ntype Series struct {\n\tKey              *string `protobuf:\"bytes,1,req,name=Key\" json:\"Key,omitempty\"`\n\tTags             []*Tag  `protobuf:\"bytes,2,rep,name=Tags\" json:\"Tags,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *Series) Reset()         { *m = Series{} }\nfunc (m *Series) String() string { return proto.CompactTextString(m) }\nfunc (*Series) ProtoMessage()    {}\n\nfunc (m *Series) GetKey() string {\n\tif m != nil && m.Key != nil {\n\t\treturn *m.Key\n\t}\n\treturn \"\"\n}\n\nfunc (m *Series) GetTags() []*Tag {\n\tif m != nil {\n\t\treturn m.Tags\n\t}\n\treturn nil\n}\n\ntype Tag struct {\n\tKey              *string `protobuf:\"bytes,1,req,name=Key\" json:\"Key,omitempty\"`\n\tValue            *string `protobuf:\"bytes,2,req,name=Value\" json:\"Value,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *Tag) Reset()         { *m = Tag{} }\nfunc (m *Tag) String() string { return proto.CompactTextString(m) }\nfunc (*Tag) ProtoMessage()    {}\n\nfunc (m *Tag) GetKey() string {\n\tif m != nil && m.Key != nil {\n\t\treturn *m.Key\n\t}\n\treturn \"\"\n}\n\nfunc (m *Tag) GetValue() string {\n\tif m != nil && m.Value != nil {\n\t\treturn *m.Value\n\t}\n\treturn \"\"\n}\n\ntype MeasurementFields struct {\n\tFields           []*Field `protobuf:\"bytes,1,rep,name=Fields\" json:\"Fields,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *MeasurementFields) Reset()         { *m = MeasurementFields{} }\nfunc (m *MeasurementFields) String() string { return proto.CompactTextString(m) }\nfunc (*MeasurementFields) ProtoMessage()    {}\n\nfunc (m *MeasurementFields) GetFields() []*Field {\n\tif m != nil {\n\t\treturn m.Fields\n\t}\n\treturn nil\n}\n\ntype Field struct {\n\tID               *int32  `protobuf:\"varint,1,req,name=ID\" json:\"ID,omitempty\"`\n\tName             *string `protobuf:\"bytes,2,req,name=Name\" json:\"Name,omitempty\"`\n\tType             *int32  `protobuf:\"varint,3,req,name=Type\" json:\"Type,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *Field) Reset()         { *m = Field{} }\nfunc (m *Field) String() string { return proto.CompactTextString(m) }\nfunc (*Field) ProtoMessage()    {}\n\nfunc (m *Field) GetID() int32 {\n\tif m != nil && m.ID != nil {\n\t\treturn *m.ID\n\t}\n\treturn 0\n}\n\nfunc (m *Field) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *Field) GetType() int32 {\n\tif m != nil && m.Type != nil {\n\t\treturn *m.Type\n\t}\n\treturn 0\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/types.go",
    "content": "package tsdb\n\nimport (\n\t\"encoding/binary\"\n\t\"strings\"\n\n\t\"github.com/influxdata/influxdb/cmd/influx_tsm/tsdb/internal\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\n\t\"github.com/gogo/protobuf/proto\"\n)\n\n// Field represents an encoded field.\ntype Field struct {\n\tID   uint8             `json:\"id,omitempty\"`\n\tName string            `json:\"name,omitempty\"`\n\tType influxql.DataType `json:\"type,omitempty\"`\n}\n\n// MeasurementFields is a mapping from measurements to its fields.\ntype MeasurementFields struct {\n\tFields map[string]*Field `json:\"fields\"`\n\tCodec  *FieldCodec\n}\n\n// UnmarshalBinary decodes the object from a binary format.\nfunc (m *MeasurementFields) UnmarshalBinary(buf []byte) error {\n\tvar pb internal.MeasurementFields\n\tif err := proto.Unmarshal(buf, &pb); err != nil {\n\t\treturn err\n\t}\n\tm.Fields = make(map[string]*Field)\n\tfor _, f := range pb.Fields {\n\t\tm.Fields[f.GetName()] = &Field{ID: uint8(f.GetID()), Name: f.GetName(), Type: influxql.DataType(f.GetType())}\n\t}\n\treturn nil\n}\n\n// Series represents a series in the shard.\ntype Series struct {\n\tKey  string\n\tTags map[string]string\n}\n\n// MeasurementFromSeriesKey returns the Measurement name for a given series.\nfunc MeasurementFromSeriesKey(key string) string {\n\treturn strings.SplitN(key, \",\", 2)[0]\n}\n\n// DecodeKeyValue decodes the key and value from bytes.\nfunc DecodeKeyValue(field string, dec *FieldCodec, k, v []byte) (int64, interface{}) {\n\t// Convert key to a timestamp.\n\tkey := int64(binary.BigEndian.Uint64(k[0:8]))\n\n\tdecValue, err := dec.DecodeByName(field, v)\n\tif err != nil {\n\t\treturn key, nil\n\t}\n\treturn key, decValue\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go",
    "content": "// Package backup is the backup subcommand for the influxd command.\npackage backup\n\nimport (\n\t\"encoding/binary\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/services/snapshotter\"\n\t\"github.com/influxdata/influxdb/tcp\"\n)\n\nconst (\n\t// Suffix is a suffix added to the backup while it's in-process.\n\tSuffix = \".pending\"\n\n\t// Metafile is the base name given to the metastore backups.\n\tMetafile = \"meta\"\n\n\t// BackupFilePattern is the beginning of the pattern for a backup\n\t// file. They follow the scheme <database>.<retention>.<shardID>.<increment>\n\tBackupFilePattern = \"%s.%s.%05d\"\n)\n\n// Command represents the program execution for \"influxd backup\".\ntype Command struct {\n\t// The logger passed to the ticker during execution.\n\tLogger *log.Logger\n\n\t// Standard input/output, overridden for testing.\n\tStderr io.Writer\n\tStdout io.Writer\n\n\thost     string\n\tpath     string\n\tdatabase string\n}\n\n// NewCommand returns a new instance of Command with default settings.\nfunc NewCommand() *Command {\n\treturn &Command{\n\t\tStderr: os.Stderr,\n\t\tStdout: os.Stdout,\n\t}\n}\n\n// Run executes the program.\nfunc (cmd *Command) Run(args ...string) error {\n\t// Set up logger.\n\tcmd.Logger = log.New(cmd.Stderr, \"\", log.LstdFlags)\n\n\t// Parse command line arguments.\n\tretentionPolicy, shardID, since, err := cmd.parseFlags(args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// based on the arguments passed in we only backup the minimum\n\tif shardID != \"\" {\n\t\t// always backup the metastore\n\t\tif err := cmd.backupMetastore(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = cmd.backupShard(retentionPolicy, shardID, since)\n\t} else if retentionPolicy != \"\" {\n\t\terr = cmd.backupRetentionPolicy(retentionPolicy, since)\n\t} else if cmd.database != \"\" {\n\t\terr = cmd.backupDatabase(since)\n\t} else {\n\t\terr = cmd.backupMetastore()\n\t}\n\n\tif err != nil {\n\t\tcmd.Logger.Printf(\"backup failed: %v\", err)\n\t\treturn err\n\t}\n\n\tcmd.Logger.Println(\"backup complete\")\n\n\treturn nil\n}\n\n// parseFlags parses and validates the command line arguments into a request object.\nfunc (cmd *Command) parseFlags(args []string) (retentionPolicy, shardID string, since time.Time, err error) {\n\tfs := flag.NewFlagSet(\"\", flag.ContinueOnError)\n\n\tfs.StringVar(&cmd.host, \"host\", \"localhost:8088\", \"\")\n\tfs.StringVar(&cmd.database, \"database\", \"\", \"\")\n\tfs.StringVar(&retentionPolicy, \"retention\", \"\", \"\")\n\tfs.StringVar(&shardID, \"shard\", \"\", \"\")\n\tvar sinceArg string\n\tfs.StringVar(&sinceArg, \"since\", \"\", \"\")\n\n\tfs.SetOutput(cmd.Stderr)\n\tfs.Usage = cmd.printUsage\n\n\terr = fs.Parse(args)\n\tif err != nil {\n\t\treturn\n\t}\n\tif sinceArg != \"\" {\n\t\tsince, err = time.Parse(time.RFC3339, sinceArg)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Ensure that only one arg is specified.\n\tif fs.NArg() == 0 {\n\t\treturn \"\", \"\", time.Unix(0, 0), errors.New(\"backup destination path required\")\n\t} else if fs.NArg() != 1 {\n\t\treturn \"\", \"\", time.Unix(0, 0), errors.New(\"only one backup path allowed\")\n\t}\n\tcmd.path = fs.Arg(0)\n\n\terr = os.MkdirAll(cmd.path, 0700)\n\n\treturn\n}\n\n// backupShard will write a tar archive of the passed in shard with any TSM files that have been\n// created since the time passed in\nfunc (cmd *Command) backupShard(retentionPolicy string, shardID string, since time.Time) error {\n\tid, err := strconv.ParseUint(shardID, 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tshardArchivePath, err := cmd.nextPath(filepath.Join(cmd.path, fmt.Sprintf(BackupFilePattern, cmd.database, retentionPolicy, id)))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd.Logger.Printf(\"backing up db=%v rp=%v shard=%v to %s since %s\",\n\t\tcmd.database, retentionPolicy, shardID, shardArchivePath, since)\n\n\treq := &snapshotter.Request{\n\t\tType:            snapshotter.RequestShardBackup,\n\t\tDatabase:        cmd.database,\n\t\tRetentionPolicy: retentionPolicy,\n\t\tShardID:         id,\n\t\tSince:           since,\n\t}\n\n\t// TODO: verify shard backup data\n\treturn cmd.downloadAndVerify(req, shardArchivePath, nil)\n}\n\n// backupDatabase will request the database information from the server and then backup the metastore and\n// every shard in every retention policy in the database. Each shard will be written to a separate tar.\nfunc (cmd *Command) backupDatabase(since time.Time) error {\n\tcmd.Logger.Printf(\"backing up db=%s since %s\", cmd.database, since)\n\n\treq := &snapshotter.Request{\n\t\tType:     snapshotter.RequestDatabaseInfo,\n\t\tDatabase: cmd.database,\n\t}\n\n\tresponse, err := cmd.requestInfo(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn cmd.backupResponsePaths(response, since)\n}\n\n// backupRetentionPolicy will request the retention policy information from the server and then backup\n// the metastore and every shard in the retention policy. Each shard will be written to a separate tar.\nfunc (cmd *Command) backupRetentionPolicy(retentionPolicy string, since time.Time) error {\n\tcmd.Logger.Printf(\"backing up rp=%s since %s\", retentionPolicy, since)\n\n\treq := &snapshotter.Request{\n\t\tType:            snapshotter.RequestRetentionPolicyInfo,\n\t\tDatabase:        cmd.database,\n\t\tRetentionPolicy: retentionPolicy,\n\t}\n\n\tresponse, err := cmd.requestInfo(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn cmd.backupResponsePaths(response, since)\n}\n\n// backupResponsePaths will backup the metastore and all shard paths in the response struct\nfunc (cmd *Command) backupResponsePaths(response *snapshotter.Response, since time.Time) error {\n\tif err := cmd.backupMetastore(); err != nil {\n\t\treturn err\n\t}\n\n\t// loop through the returned paths and back up each shard\n\tfor _, path := range response.Paths {\n\t\trp, id, err := retentionAndShardFromPath(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := cmd.backupShard(rp, id, since); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// backupMetastore will backup the metastore on the host to the passed in path. Database and retention policy backups\n// will force a backup of the metastore as well as requesting a specific shard backup from the command line\nfunc (cmd *Command) backupMetastore() error {\n\tmetastoreArchivePath, err := cmd.nextPath(filepath.Join(cmd.path, Metafile))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd.Logger.Printf(\"backing up metastore to %s\", metastoreArchivePath)\n\n\treq := &snapshotter.Request{\n\t\tType: snapshotter.RequestMetastoreBackup,\n\t}\n\n\treturn cmd.downloadAndVerify(req, metastoreArchivePath, func(file string) error {\n\t\tbinData, err := ioutil.ReadFile(file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tmagic := binary.BigEndian.Uint64(binData[:8])\n\t\tif magic != snapshotter.BackupMagicHeader {\n\t\t\tcmd.Logger.Println(\"Invalid metadata blob, ensure the metadata service is running (default port 8088)\")\n\t\t\treturn errors.New(\"invalid metadata received\")\n\t\t}\n\n\t\treturn nil\n\t})\n}\n\n// nextPath returns the next file to write to.\nfunc (cmd *Command) nextPath(path string) (string, error) {\n\t// Iterate through incremental files until one is available.\n\tfor i := 0; ; i++ {\n\t\ts := fmt.Sprintf(path+\".%02d\", i)\n\t\tif _, err := os.Stat(s); os.IsNotExist(err) {\n\t\t\treturn s, nil\n\t\t} else if err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n}\n\n// downloadAndVerify will download either the metastore or shard to a temp file and then\n// rename it to a good backup file name after complete\nfunc (cmd *Command) downloadAndVerify(req *snapshotter.Request, path string, validator func(string) error) error {\n\ttmppath := path + Suffix\n\tif err := cmd.download(req, tmppath); err != nil {\n\t\treturn err\n\t}\n\n\tif validator != nil {\n\t\tif err := validator(tmppath); err != nil {\n\t\t\tif rmErr := os.Remove(tmppath); rmErr != nil {\n\t\t\t\tcmd.Logger.Printf(\"Error cleaning up temporary file: %v\", rmErr)\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\n\tf, err := os.Stat(tmppath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// There was nothing downloaded, don't create an empty backup file.\n\tif f.Size() == 0 {\n\t\treturn os.Remove(tmppath)\n\t}\n\n\t// Rename temporary file to final path.\n\tif err := os.Rename(tmppath, path); err != nil {\n\t\treturn fmt.Errorf(\"rename: %s\", err)\n\t}\n\n\treturn nil\n}\n\n// download downloads a snapshot of either the metastore or a shard from a host to a given path.\nfunc (cmd *Command) download(req *snapshotter.Request, path string) error {\n\t// Create local file to write to.\n\tf, err := os.Create(path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"open temp file: %s\", err)\n\t}\n\tdefer f.Close()\n\n\tfor i := 0; i < 10; i++ {\n\t\tif err = func() error {\n\t\t\t// Connect to snapshotter service.\n\t\t\tconn, err := tcp.Dial(\"tcp\", cmd.host, snapshotter.MuxHeader)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer conn.Close()\n\n\t\t\t// Write the request\n\t\t\tif err := json.NewEncoder(conn).Encode(req); err != nil {\n\t\t\t\treturn fmt.Errorf(\"encode snapshot request: %s\", err)\n\t\t\t}\n\n\t\t\t// Read snapshot from the connection\n\t\t\tif n, err := io.Copy(f, conn); err != nil || n == 0 {\n\t\t\t\treturn fmt.Errorf(\"copy backup to file: err=%v, n=%d\", err, n)\n\t\t\t}\n\t\t\treturn nil\n\t\t}(); err == nil {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tcmd.Logger.Printf(\"Download shard %v failed %s.  Retrying (%d)...\\n\", req.ShardID, err, i)\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}\n\n\treturn err\n}\n\n// requestInfo will request the database or retention policy information from the host\nfunc (cmd *Command) requestInfo(request *snapshotter.Request) (*snapshotter.Response, error) {\n\t// Connect to snapshotter service.\n\tconn, err := tcp.Dial(\"tcp\", cmd.host, snapshotter.MuxHeader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer conn.Close()\n\n\t// Write the request\n\tif err := json.NewEncoder(conn).Encode(request); err != nil {\n\t\treturn nil, fmt.Errorf(\"encode snapshot request: %s\", err)\n\t}\n\n\t// Read the response\n\tvar r snapshotter.Response\n\tif err := json.NewDecoder(conn).Decode(&r); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &r, nil\n}\n\n// printUsage prints the usage message to STDERR.\nfunc (cmd *Command) printUsage() {\n\tfmt.Fprintf(cmd.Stdout, `Downloads a snapshot of a data node and saves it to disk.\n\nUsage: influxd backup [flags] PATH\n\n    -host <host:port>\n            The host to connect to snapshot. Defaults to 127.0.0.1:8088.\n    -database <name>\n            The database to backup.\n    -retention <name>\n            Optional. The retention policy to backup.\n    -shard <id>\n            Optional. The shard id to backup. If specified, retention is required.\n    -since <2015-12-24T08:12:23>\n            Optional. Do an incremental backup since the passed in RFC3339\n            formatted time.\n\n`)\n}\n\n// retentionAndShardFromPath will take the shard relative path and split it into the\n// retention policy name and shard ID. The first part of the path should be the database name.\nfunc retentionAndShardFromPath(path string) (retention, shard string, err error) {\n\ta := strings.Split(path, string(filepath.Separator))\n\tif len(a) != 3 {\n\t\treturn \"\", \"\", fmt.Errorf(\"expected database, retention policy, and shard id in path: %s\", path)\n\t}\n\n\treturn a[1], a[2], nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influxd/help/help.go",
    "content": "// Package help is the help subcommand of the influxd command.\npackage help\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n)\n\n// Command displays help for command-line sub-commands.\ntype Command struct {\n\tStdout io.Writer\n}\n\n// NewCommand returns a new instance of Command.\nfunc NewCommand() *Command {\n\treturn &Command{\n\t\tStdout: os.Stdout,\n\t}\n}\n\n// Run executes the command.\nfunc (cmd *Command) Run(args ...string) error {\n\tfmt.Fprintln(cmd.Stdout, strings.TrimSpace(usage))\n\treturn nil\n}\n\nconst usage = `\nConfigure and start an InfluxDB server.\n\nUsage: influxd [[command] [arguments]]\n\nThe commands are:\n\n    backup               downloads a snapshot of a data node and saves it to disk\n    config               display the default configuration\n    help                 display this help message\n    restore              uses a snapshot of a data node to rebuild a cluster\n    run                  run node with existing configuration\n    version              displays the InfluxDB version\n\n\"run\" is the default command.\n\nUse \"influxd [command] -help\" for more information about a command.\n`\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influxd/main.go",
    "content": "// Command influxd is the InfluxDB server.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"math/rand\"\n\t\"os\"\n\t\"os/signal\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/cmd\"\n\t\"github.com/influxdata/influxdb/cmd/influxd/backup\"\n\t\"github.com/influxdata/influxdb/cmd/influxd/help\"\n\t\"github.com/influxdata/influxdb/cmd/influxd/restore\"\n\t\"github.com/influxdata/influxdb/cmd/influxd/run\"\n\t\"github.com/uber-go/zap\"\n)\n\n// These variables are populated via the Go linker.\nvar (\n\tversion string\n\tcommit  string\n\tbranch  string\n)\n\nfunc init() {\n\t// If commit, branch, or build time are not set, make that clear.\n\tif version == \"\" {\n\t\tversion = \"unknown\"\n\t}\n\tif commit == \"\" {\n\t\tcommit = \"unknown\"\n\t}\n\tif branch == \"\" {\n\t\tbranch = \"unknown\"\n\t}\n}\n\nfunc main() {\n\trand.Seed(time.Now().UnixNano())\n\n\tm := NewMain()\n\tif err := m.Run(os.Args[1:]...); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}\n\n// Main represents the program execution.\ntype Main struct {\n\tLogger zap.Logger\n\n\tStdin  io.Reader\n\tStdout io.Writer\n\tStderr io.Writer\n}\n\n// NewMain return a new instance of Main.\nfunc NewMain() *Main {\n\treturn &Main{\n\t\tLogger: zap.New(\n\t\t\tzap.NewTextEncoder(),\n\t\t\tzap.Output(os.Stderr),\n\t\t),\n\t\tStdin:  os.Stdin,\n\t\tStdout: os.Stdout,\n\t\tStderr: os.Stderr,\n\t}\n}\n\n// Run determines and runs the command specified by the CLI args.\nfunc (m *Main) Run(args ...string) error {\n\tname, args := cmd.ParseCommandName(args)\n\n\t// Extract name from args.\n\tswitch name {\n\tcase \"\", \"run\":\n\t\tcmd := run.NewCommand()\n\n\t\t// Tell the server the build details.\n\t\tcmd.Version = version\n\t\tcmd.Commit = commit\n\t\tcmd.Branch = branch\n\t\tcmd.Logger = m.Logger\n\n\t\tif err := cmd.Run(args...); err != nil {\n\t\t\treturn fmt.Errorf(\"run: %s\", err)\n\t\t}\n\n\t\tsignalCh := make(chan os.Signal, 1)\n\t\tsignal.Notify(signalCh, os.Interrupt, syscall.SIGTERM)\n\t\tm.Logger.Info(\"Listening for signals\")\n\n\t\t// Block until one of the signals above is received\n\t\t<-signalCh\n\t\tm.Logger.Info(\"Signal received, initializing clean shutdown...\")\n\t\tgo cmd.Close()\n\n\t\t// Block again until another signal is received, a shutdown timeout elapses,\n\t\t// or the Command is gracefully closed\n\t\tm.Logger.Info(\"Waiting for clean shutdown...\")\n\t\tselect {\n\t\tcase <-signalCh:\n\t\t\tm.Logger.Info(\"second signal received, initializing hard shutdown\")\n\t\tcase <-time.After(time.Second * 30):\n\t\t\tm.Logger.Info(\"time limit reached, initializing hard shutdown\")\n\t\tcase <-cmd.Closed:\n\t\t\tm.Logger.Info(\"server shutdown completed\")\n\t\t}\n\n\t\t// goodbye.\n\n\tcase \"backup\":\n\t\tname := backup.NewCommand()\n\t\tif err := name.Run(args...); err != nil {\n\t\t\treturn fmt.Errorf(\"backup: %s\", err)\n\t\t}\n\tcase \"restore\":\n\t\tname := restore.NewCommand()\n\t\tif err := name.Run(args...); err != nil {\n\t\t\treturn fmt.Errorf(\"restore: %s\", err)\n\t\t}\n\tcase \"config\":\n\t\tif err := run.NewPrintConfigCommand().Run(args...); err != nil {\n\t\t\treturn fmt.Errorf(\"config: %s\", err)\n\t\t}\n\tcase \"version\":\n\t\tif err := NewVersionCommand().Run(args...); err != nil {\n\t\t\treturn fmt.Errorf(\"version: %s\", err)\n\t\t}\n\tcase \"help\":\n\t\tif err := help.NewCommand().Run(args...); err != nil {\n\t\t\treturn fmt.Errorf(\"help: %s\", err)\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(`unknown command \"%s\"`+\"\\n\"+`Run 'influxd help' for usage`+\"\\n\\n\", name)\n\t}\n\n\treturn nil\n}\n\n// VersionCommand represents the command executed by \"influxd version\".\ntype VersionCommand struct {\n\tStdout io.Writer\n\tStderr io.Writer\n}\n\n// NewVersionCommand return a new instance of VersionCommand.\nfunc NewVersionCommand() *VersionCommand {\n\treturn &VersionCommand{\n\t\tStdout: os.Stdout,\n\t\tStderr: os.Stderr,\n\t}\n}\n\n// Run prints the current version and commit info.\nfunc (cmd *VersionCommand) Run(args ...string) error {\n\t// Parse flags in case -h is specified.\n\tfs := flag.NewFlagSet(\"\", flag.ContinueOnError)\n\tfs.Usage = func() { fmt.Fprintln(cmd.Stderr, versionUsage) }\n\tif err := fs.Parse(args); err != nil {\n\t\treturn err\n\t}\n\n\t// Print version info.\n\tfmt.Fprintf(cmd.Stdout, \"InfluxDB v%s (git: %s %s)\\n\", version, branch, commit)\n\n\treturn nil\n}\n\nvar versionUsage = `Displays the InfluxDB version, build branch and git commit hash.\n\nUsage: influxd version\n`\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go",
    "content": "// Package restore is the restore subcommand for the influxd command,\n// for restoring from a backup.\npackage restore\n\nimport (\n\t\"archive/tar\"\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strconv\"\n\n\t\"github.com/influxdata/influxdb/cmd/influxd/backup\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/services/snapshotter\"\n)\n\n// Command represents the program execution for \"influxd restore\".\ntype Command struct {\n\tStdout io.Writer\n\tStderr io.Writer\n\n\tbackupFilesPath string\n\tmetadir         string\n\tdatadir         string\n\tdatabase        string\n\tretention       string\n\tshard           string\n\n\t// TODO: when the new meta stuff is done this should not be exported or be gone\n\tMetaConfig *meta.Config\n}\n\n// NewCommand returns a new instance of Command with default settings.\nfunc NewCommand() *Command {\n\treturn &Command{\n\t\tStdout:     os.Stdout,\n\t\tStderr:     os.Stderr,\n\t\tMetaConfig: meta.NewConfig(),\n\t}\n}\n\n// Run executes the program.\nfunc (cmd *Command) Run(args ...string) error {\n\tif err := cmd.parseFlags(args); err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.metadir != \"\" {\n\t\tif err := cmd.unpackMeta(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif cmd.shard != \"\" {\n\t\treturn cmd.unpackShard(cmd.shard)\n\t} else if cmd.retention != \"\" {\n\t\treturn cmd.unpackRetention()\n\t} else if cmd.datadir != \"\" {\n\t\treturn cmd.unpackDatabase()\n\t}\n\treturn nil\n}\n\n// parseFlags parses and validates the command line arguments.\nfunc (cmd *Command) parseFlags(args []string) error {\n\tfs := flag.NewFlagSet(\"\", flag.ContinueOnError)\n\tfs.StringVar(&cmd.metadir, \"metadir\", \"\", \"\")\n\tfs.StringVar(&cmd.datadir, \"datadir\", \"\", \"\")\n\tfs.StringVar(&cmd.database, \"database\", \"\", \"\")\n\tfs.StringVar(&cmd.retention, \"retention\", \"\", \"\")\n\tfs.StringVar(&cmd.shard, \"shard\", \"\", \"\")\n\tfs.SetOutput(cmd.Stdout)\n\tfs.Usage = cmd.printUsage\n\tif err := fs.Parse(args); err != nil {\n\t\treturn err\n\t}\n\n\tcmd.MetaConfig = meta.NewConfig()\n\tcmd.MetaConfig.Dir = cmd.metadir\n\n\t// Require output path.\n\tcmd.backupFilesPath = fs.Arg(0)\n\tif cmd.backupFilesPath == \"\" {\n\t\treturn fmt.Errorf(\"path with backup files required\")\n\t}\n\n\t// validate the arguments\n\tif cmd.metadir == \"\" && cmd.database == \"\" {\n\t\treturn fmt.Errorf(\"-metadir or -database are required to restore\")\n\t}\n\n\tif cmd.database != \"\" && cmd.datadir == \"\" {\n\t\treturn fmt.Errorf(\"-datadir is required to restore\")\n\t}\n\n\tif cmd.shard != \"\" {\n\t\tif cmd.database == \"\" {\n\t\t\treturn fmt.Errorf(\"-database is required to restore shard\")\n\t\t}\n\t\tif cmd.retention == \"\" {\n\t\t\treturn fmt.Errorf(\"-retention is required to restore shard\")\n\t\t}\n\t} else if cmd.retention != \"\" && cmd.database == \"\" {\n\t\treturn fmt.Errorf(\"-database is required to restore retention policy\")\n\t}\n\n\treturn nil\n}\n\n// unpackMeta reads the metadata from the backup directory and initializes a raft\n// cluster and replaces the root metadata.\nfunc (cmd *Command) unpackMeta() error {\n\t// find the meta file\n\tmetaFiles, err := filepath.Glob(filepath.Join(cmd.backupFilesPath, backup.Metafile+\".*\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(metaFiles) == 0 {\n\t\treturn fmt.Errorf(\"no metastore backups in %s\", cmd.backupFilesPath)\n\t}\n\n\tlatest := metaFiles[len(metaFiles)-1]\n\n\tfmt.Fprintf(cmd.Stdout, \"Using metastore snapshot: %v\\n\", latest)\n\t// Read the metastore backup\n\tf, err := os.Open(latest)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar buf bytes.Buffer\n\tif _, err := io.Copy(&buf, f); err != nil {\n\t\treturn fmt.Errorf(\"copy: %s\", err)\n\t}\n\n\tb := buf.Bytes()\n\tvar i int\n\n\t// Make sure the file is actually a meta store backup file\n\tmagic := binary.BigEndian.Uint64(b[:8])\n\tif magic != snapshotter.BackupMagicHeader {\n\t\treturn fmt.Errorf(\"invalid metadata file\")\n\t}\n\ti += 8\n\n\t// Size of the meta store bytes\n\tlength := int(binary.BigEndian.Uint64(b[i : i+8]))\n\ti += 8\n\tmetaBytes := b[i : i+length]\n\ti += int(length)\n\n\t// Size of the node.json bytes\n\tlength = int(binary.BigEndian.Uint64(b[i : i+8]))\n\ti += 8\n\tnodeBytes := b[i : i+length]\n\n\t// Unpack into metadata.\n\tvar data meta.Data\n\tif err := data.UnmarshalBinary(metaBytes); err != nil {\n\t\treturn fmt.Errorf(\"unmarshal: %s\", err)\n\t}\n\n\t// Copy meta config and remove peers so it starts in single mode.\n\tc := cmd.MetaConfig\n\tc.Dir = cmd.metadir\n\n\t// Create the meta dir\n\tif os.MkdirAll(c.Dir, 0700); err != nil {\n\t\treturn err\n\t}\n\n\t// Write node.json back to meta dir\n\tif err := ioutil.WriteFile(filepath.Join(c.Dir, \"node.json\"), nodeBytes, 0655); err != nil {\n\t\treturn err\n\t}\n\n\tclient := meta.NewClient(c)\n\tif err := client.Open(); err != nil {\n\t\treturn err\n\t}\n\tdefer client.Close()\n\n\t// Force set the full metadata.\n\tif err := client.SetData(&data); err != nil {\n\t\treturn fmt.Errorf(\"set data: %s\", err)\n\t}\n\n\t// remove the raft.db file if it exists\n\terr = os.Remove(filepath.Join(cmd.metadir, \"raft.db\"))\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\t// remove the node.json file if it exists\n\terr = os.Remove(filepath.Join(cmd.metadir, \"node.json\"))\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// unpackShard will look for all backup files in the path matching this shard ID\n// and restore them to the data dir\nfunc (cmd *Command) unpackShard(shardID string) error {\n\t// make sure the shard isn't already there so we don't clobber anything\n\trestorePath := filepath.Join(cmd.datadir, cmd.database, cmd.retention, shardID)\n\tif _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) {\n\t\treturn fmt.Errorf(\"shard already present: %s\", restorePath)\n\t}\n\n\tid, err := strconv.ParseUint(shardID, 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// find the shard backup files\n\tpat := filepath.Join(cmd.backupFilesPath, fmt.Sprintf(backup.BackupFilePattern, cmd.database, cmd.retention, id))\n\treturn cmd.unpackFiles(pat + \".*\")\n}\n\n// unpackDatabase will look for all backup files in the path matching this database\n// and restore them to the data dir\nfunc (cmd *Command) unpackDatabase() error {\n\t// make sure the shard isn't already there so we don't clobber anything\n\trestorePath := filepath.Join(cmd.datadir, cmd.database)\n\tif _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) {\n\t\treturn fmt.Errorf(\"database already present: %s\", restorePath)\n\t}\n\n\t// find the database backup files\n\tpat := filepath.Join(cmd.backupFilesPath, cmd.database)\n\treturn cmd.unpackFiles(pat + \".*\")\n}\n\n// unpackRetention will look for all backup files in the path matching this retention\n// and restore them to the data dir\nfunc (cmd *Command) unpackRetention() error {\n\t// make sure the shard isn't already there so we don't clobber anything\n\trestorePath := filepath.Join(cmd.datadir, cmd.database, cmd.retention)\n\tif _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) {\n\t\treturn fmt.Errorf(\"retention already present: %s\", restorePath)\n\t}\n\n\t// find the retention backup files\n\tpat := filepath.Join(cmd.backupFilesPath, cmd.database)\n\treturn cmd.unpackFiles(fmt.Sprintf(\"%s.%s.*\", pat, cmd.retention))\n}\n\n// unpackFiles will look for backup files matching the pattern and restore them to the data dir\nfunc (cmd *Command) unpackFiles(pat string) error {\n\tfmt.Printf(\"Restoring from backup %s\\n\", pat)\n\n\tbackupFiles, err := filepath.Glob(pat)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(backupFiles) == 0 {\n\t\treturn fmt.Errorf(\"no backup files for %s in %s\", pat, cmd.backupFilesPath)\n\t}\n\n\tfor _, fn := range backupFiles {\n\t\tif err := cmd.unpackTar(fn); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// unpackTar will restore a single tar archive to the data dir\nfunc (cmd *Command) unpackTar(tarFile string) error {\n\tf, err := os.Open(tarFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\ttr := tar.NewReader(f)\n\n\tfor {\n\t\thdr, err := tr.Next()\n\t\tif err == io.EOF {\n\t\t\treturn nil\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := cmd.unpackFile(tr, hdr.Name); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\n// unpackFile will copy the current file from the tar archive to the data dir\nfunc (cmd *Command) unpackFile(tr *tar.Reader, fileName string) error {\n\tnativeFileName := filepath.FromSlash(fileName)\n\tfn := filepath.Join(cmd.datadir, nativeFileName)\n\tfmt.Printf(\"unpacking %s\\n\", fn)\n\n\tif err := os.MkdirAll(filepath.Dir(fn), 0777); err != nil {\n\t\treturn fmt.Errorf(\"error making restore dir: %s\", err.Error())\n\t}\n\n\tff, err := os.Create(fn)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer ff.Close()\n\n\tif _, err := io.Copy(ff, tr); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// printUsage prints the usage message to STDERR.\nfunc (cmd *Command) printUsage() {\n\tfmt.Fprintf(cmd.Stdout, `Uses backups from the PATH to restore the metastore, databases,\nretention policies, or specific shards. The InfluxDB process must not be\nrunning during a restore.\n\nUsage: influxd restore [flags] PATH\n\n    -metadir <path>\n            Optional. If set the metastore will be recovered to the given path.\n    -datadir <path>\n            Optional. If set the restore process will recover the specified\n            database, retention policy or shard to the given directory.\n    -database <name>\n            Optional. Required if no metadir given. Will restore the database\n            TSM files.\n    -retention <name>\n            Optional. If given, database is required. Will restore the retention policy's\n            TSM files.\n    -shard <id>\n            Optional. If given, database and retention are required. Will restore the shard's\n            TSM files.\n\n`)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influxd/run/command.go",
    "content": "// Package run is the run (default) subcommand for the influxd command.\npackage run\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/uber-go/zap\"\n)\n\nconst logo = `\n 8888888           .d888 888                   8888888b.  888888b.\n   888            d88P\"  888                   888  \"Y88b 888  \"88b\n   888            888    888                   888    888 888  .88P\n   888   88888b.  888888 888 888  888 888  888 888    888 8888888K.\n   888   888 \"88b 888    888 888  888  Y8bd8P' 888    888 888  \"Y88b\n   888   888  888 888    888 888  888   X88K   888    888 888    888\n   888   888  888 888    888 Y88b 888 .d8\"\"8b. 888  .d88P 888   d88P\n 8888888 888  888 888    888  \"Y88888 888  888 8888888P\"  8888888P\"\n\n`\n\n// Command represents the command executed by \"influxd run\".\ntype Command struct {\n\tVersion   string\n\tBranch    string\n\tCommit    string\n\tBuildTime string\n\n\tclosing chan struct{}\n\tClosed  chan struct{}\n\n\tStdin  io.Reader\n\tStdout io.Writer\n\tStderr io.Writer\n\tLogger zap.Logger\n\n\tServer *Server\n}\n\n// NewCommand return a new instance of Command.\nfunc NewCommand() *Command {\n\treturn &Command{\n\t\tclosing: make(chan struct{}),\n\t\tClosed:  make(chan struct{}),\n\t\tStdin:   os.Stdin,\n\t\tStdout:  os.Stdout,\n\t\tStderr:  os.Stderr,\n\t\tLogger:  zap.New(zap.NullEncoder()),\n\t}\n}\n\n// Run parses the config from args and runs the server.\nfunc (cmd *Command) Run(args ...string) error {\n\t// Parse the command line flags.\n\toptions, err := cmd.ParseFlags(args...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Print sweet InfluxDB logo.\n\tfmt.Print(logo)\n\n\t// Mark start-up in log.\n\tcmd.Logger.Info(fmt.Sprintf(\"InfluxDB starting, version %s, branch %s, commit %s\",\n\t\tcmd.Version, cmd.Branch, cmd.Commit))\n\tcmd.Logger.Info(fmt.Sprintf(\"Go version %s, GOMAXPROCS set to %d\", runtime.Version(), runtime.GOMAXPROCS(0)))\n\n\t// Write the PID file.\n\tif err := cmd.writePIDFile(options.PIDFile); err != nil {\n\t\treturn fmt.Errorf(\"write pid file: %s\", err)\n\t}\n\n\t// Parse config\n\tconfig, err := cmd.ParseConfig(options.GetConfigPath())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parse config: %s\", err)\n\t}\n\n\t// Apply any environment variables on top of the parsed config\n\tif err := config.ApplyEnvOverrides(); err != nil {\n\t\treturn fmt.Errorf(\"apply env config: %v\", err)\n\t}\n\n\t// Validate the configuration.\n\tif err := config.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`\", err)\n\t}\n\n\tif config.HTTPD.PprofEnabled {\n\t\t// Turn on block profiling to debug stuck databases\n\t\truntime.SetBlockProfileRate(int(1 * time.Second))\n\t}\n\n\t// Create server from config and start it.\n\tbuildInfo := &BuildInfo{\n\t\tVersion: cmd.Version,\n\t\tCommit:  cmd.Commit,\n\t\tBranch:  cmd.Branch,\n\t\tTime:    cmd.BuildTime,\n\t}\n\ts, err := NewServer(config, buildInfo)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"create server: %s\", err)\n\t}\n\ts.Logger = cmd.Logger\n\ts.CPUProfile = options.CPUProfile\n\ts.MemProfile = options.MemProfile\n\tif err := s.Open(); err != nil {\n\t\treturn fmt.Errorf(\"open server: %s\", err)\n\t}\n\tcmd.Server = s\n\n\t// Begin monitoring the server's error channel.\n\tgo cmd.monitorServerErrors()\n\n\treturn nil\n}\n\n// Close shuts down the server.\nfunc (cmd *Command) Close() error {\n\tdefer close(cmd.Closed)\n\tclose(cmd.closing)\n\tif cmd.Server != nil {\n\t\treturn cmd.Server.Close()\n\t}\n\treturn nil\n}\n\nfunc (cmd *Command) monitorServerErrors() {\n\tlogger := log.New(cmd.Stderr, \"\", log.LstdFlags)\n\tfor {\n\t\tselect {\n\t\tcase err := <-cmd.Server.Err():\n\t\t\tlogger.Println(err)\n\t\tcase <-cmd.closing:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// ParseFlags parses the command line flags from args and returns an options set.\nfunc (cmd *Command) ParseFlags(args ...string) (Options, error) {\n\tvar options Options\n\tfs := flag.NewFlagSet(\"\", flag.ContinueOnError)\n\tfs.StringVar(&options.ConfigPath, \"config\", \"\", \"\")\n\tfs.StringVar(&options.PIDFile, \"pidfile\", \"\", \"\")\n\t// Ignore hostname option.\n\t_ = fs.String(\"hostname\", \"\", \"\")\n\tfs.StringVar(&options.CPUProfile, \"cpuprofile\", \"\", \"\")\n\tfs.StringVar(&options.MemProfile, \"memprofile\", \"\", \"\")\n\tfs.Usage = func() { fmt.Fprintln(cmd.Stderr, usage) }\n\tif err := fs.Parse(args); err != nil {\n\t\treturn Options{}, err\n\t}\n\treturn options, nil\n}\n\n// writePIDFile writes the process ID to path.\nfunc (cmd *Command) writePIDFile(path string) error {\n\t// Ignore if path is not set.\n\tif path == \"\" {\n\t\treturn nil\n\t}\n\n\t// Ensure the required directory structure exists.\n\terr := os.MkdirAll(filepath.Dir(path), 0777)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"mkdir: %s\", err)\n\t}\n\n\t// Retrieve the PID and write it.\n\tpid := strconv.Itoa(os.Getpid())\n\tif err := ioutil.WriteFile(path, []byte(pid), 0666); err != nil {\n\t\treturn fmt.Errorf(\"write file: %s\", err)\n\t}\n\n\treturn nil\n}\n\n// ParseConfig parses the config at path.\n// It returns a demo configuration if path is blank.\nfunc (cmd *Command) ParseConfig(path string) (*Config, error) {\n\t// Use demo configuration if no config path is specified.\n\tif path == \"\" {\n\t\tcmd.Logger.Info(\"no configuration provided, using default settings\")\n\t\treturn NewDemoConfig()\n\t}\n\n\tcmd.Logger.Info(fmt.Sprintf(\"Using configuration at: %s\", path))\n\n\tconfig := NewConfig()\n\tif err := config.FromTomlFile(path); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn config, nil\n}\n\nconst usage = `Runs the InfluxDB server.\n\nUsage: influxd run [flags]\n\n    -config <path>\n            Set the path to the configuration file.\n            This defaults to the environment variable INFLUXDB_CONFIG_PATH,\n            ~/.influxdb/influxdb.conf, or /etc/influxdb/influxdb.conf if a file\n            is present at any of these locations.\n            Disable the automatic loading of a configuration file using\n            the null device (such as /dev/null).\n    -pidfile <path>\n            Write process ID to a file.\n    -cpuprofile <path>\n            Write CPU profiling information to a file.\n    -memprofile <path>\n            Write memory usage information to a file.\n`\n\n// Options represents the command line options that can be parsed.\ntype Options struct {\n\tConfigPath string\n\tPIDFile    string\n\tCPUProfile string\n\tMemProfile string\n}\n\n// GetConfigPath returns the config path from the options.\n// It will return a path by searching in this order:\n//   1. The CLI option in ConfigPath\n//   2. The environment variable INFLUXDB_CONFIG_PATH\n//   3. The first influxdb.conf file on the path:\n//        - ~/.influxdb\n//        - /etc/influxdb\nfunc (opt *Options) GetConfigPath() string {\n\tif opt.ConfigPath != \"\" {\n\t\tif opt.ConfigPath == os.DevNull {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn opt.ConfigPath\n\t} else if envVar := os.Getenv(\"INFLUXDB_CONFIG_PATH\"); envVar != \"\" {\n\t\treturn envVar\n\t}\n\n\tfor _, path := range []string{\n\t\tos.ExpandEnv(\"${HOME}/.influxdb/influxdb.conf\"),\n\t\t\"/etc/influxdb/influxdb.conf\",\n\t} {\n\t\tif _, err := os.Stat(path); err == nil {\n\t\t\treturn path\n\t\t}\n\t}\n\treturn \"\"\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influxd/run/config.go",
    "content": "package run\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os/user\"\n\t\"path/filepath\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/influxdata/influxdb/coordinator\"\n\t\"github.com/influxdata/influxdb/monitor\"\n\t\"github.com/influxdata/influxdb/monitor/diagnostics\"\n\t\"github.com/influxdata/influxdb/services/collectd\"\n\t\"github.com/influxdata/influxdb/services/continuous_querier\"\n\t\"github.com/influxdata/influxdb/services/graphite\"\n\t\"github.com/influxdata/influxdb/services/httpd\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/services/opentsdb\"\n\t\"github.com/influxdata/influxdb/services/precreator\"\n\t\"github.com/influxdata/influxdb/services/retention\"\n\t\"github.com/influxdata/influxdb/services/subscriber\"\n\t\"github.com/influxdata/influxdb/services/udp\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n)\n\nconst (\n\t// DefaultBindAddress is the default address for various RPC services.\n\tDefaultBindAddress = \"127.0.0.1:8088\"\n)\n\n// Config represents the configuration format for the influxd binary.\ntype Config struct {\n\tMeta        *meta.Config       `toml:\"meta\"`\n\tData        tsdb.Config        `toml:\"data\"`\n\tCoordinator coordinator.Config `toml:\"coordinator\"`\n\tRetention   retention.Config   `toml:\"retention\"`\n\tPrecreator  precreator.Config  `toml:\"shard-precreation\"`\n\n\tMonitor        monitor.Config    `toml:\"monitor\"`\n\tSubscriber     subscriber.Config `toml:\"subscriber\"`\n\tHTTPD          httpd.Config      `toml:\"http\"`\n\tGraphiteInputs []graphite.Config `toml:\"graphite\"`\n\tCollectdInputs []collectd.Config `toml:\"collectd\"`\n\tOpenTSDBInputs []opentsdb.Config `toml:\"opentsdb\"`\n\tUDPInputs      []udp.Config      `toml:\"udp\"`\n\n\tContinuousQuery continuous_querier.Config `toml:\"continuous_queries\"`\n\n\t// Server reporting\n\tReportingDisabled bool `toml:\"reporting-disabled\"`\n\n\t// BindAddress is the address that all TCP services use (Raft, Snapshot, Cluster, etc.)\n\tBindAddress string `toml:\"bind-address\"`\n}\n\n// NewConfig returns an instance of Config with reasonable defaults.\nfunc NewConfig() *Config {\n\tc := &Config{}\n\tc.Meta = meta.NewConfig()\n\tc.Data = tsdb.NewConfig()\n\tc.Coordinator = coordinator.NewConfig()\n\tc.Precreator = precreator.NewConfig()\n\n\tc.Monitor = monitor.NewConfig()\n\tc.Subscriber = subscriber.NewConfig()\n\tc.HTTPD = httpd.NewConfig()\n\n\tc.GraphiteInputs = []graphite.Config{graphite.NewConfig()}\n\tc.CollectdInputs = []collectd.Config{collectd.NewConfig()}\n\tc.OpenTSDBInputs = []opentsdb.Config{opentsdb.NewConfig()}\n\tc.UDPInputs = []udp.Config{udp.NewConfig()}\n\n\tc.ContinuousQuery = continuous_querier.NewConfig()\n\tc.Retention = retention.NewConfig()\n\tc.BindAddress = DefaultBindAddress\n\n\treturn c\n}\n\n// NewDemoConfig returns the config that runs when no config is specified.\nfunc NewDemoConfig() (*Config, error) {\n\tc := NewConfig()\n\n\tvar homeDir string\n\t// By default, store meta and data files in current users home directory\n\tu, err := user.Current()\n\tif err == nil {\n\t\thomeDir = u.HomeDir\n\t} else if os.Getenv(\"HOME\") != \"\" {\n\t\thomeDir = os.Getenv(\"HOME\")\n\t} else {\n\t\treturn nil, fmt.Errorf(\"failed to determine current user for storage\")\n\t}\n\n\tc.Meta.Dir = filepath.Join(homeDir, \".influxdb/meta\")\n\tc.Data.Dir = filepath.Join(homeDir, \".influxdb/data\")\n\tc.Data.WALDir = filepath.Join(homeDir, \".influxdb/wal\")\n\n\treturn c, nil\n}\n\n// trimBOM trims the Byte-Order-Marks from the beginning of the file.\n// This is for Windows compatability only.\n// See https://github.com/influxdata/telegraf/issues/1378.\nfunc trimBOM(f []byte) []byte {\n\treturn bytes.TrimPrefix(f, []byte(\"\\xef\\xbb\\xbf\"))\n}\n\n// FromTomlFile loads the config from a TOML file.\nfunc (c *Config) FromTomlFile(fpath string) error {\n\tbs, err := ioutil.ReadFile(fpath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbs = trimBOM(bs)\n\treturn c.FromToml(string(bs))\n}\n\n// FromToml loads the config from TOML.\nfunc (c *Config) FromToml(input string) error {\n\t// Replace deprecated [cluster] with [coordinator]\n\tre := regexp.MustCompile(`(?m)^\\s*\\[cluster\\]`)\n\tinput = re.ReplaceAllStringFunc(input, func(in string) string {\n\t\tin = strings.TrimSpace(in)\n\t\tout := \"[coordinator]\"\n\t\tlog.Printf(\"deprecated config option %s replaced with %s; %s will not be supported in a future release\\n\", in, out, in)\n\t\treturn out\n\t})\n\n\t_, err := toml.Decode(input, c)\n\treturn err\n}\n\n// Validate returns an error if the config is invalid.\nfunc (c *Config) Validate() error {\n\tif err := c.Meta.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.Data.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.Monitor.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.ContinuousQuery.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.Retention.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.Precreator.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.Subscriber.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, graphite := range c.GraphiteInputs {\n\t\tif err := graphite.Validate(); err != nil {\n\t\t\treturn fmt.Errorf(\"invalid graphite config: %v\", err)\n\t\t}\n\t}\n\n\tfor _, collectd := range c.CollectdInputs {\n\t\tif err := collectd.Validate(); err != nil {\n\t\t\treturn fmt.Errorf(\"invalid collectd config: %v\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ApplyEnvOverrides apply the environment configuration on top of the config.\nfunc (c *Config) ApplyEnvOverrides() error {\n\treturn c.applyEnvOverrides(\"INFLUXDB\", reflect.ValueOf(c), \"\")\n}\n\nfunc (c *Config) applyEnvOverrides(prefix string, spec reflect.Value, structKey string) error {\n\t// If we have a pointer, dereference it\n\telement := spec\n\tif spec.Kind() == reflect.Ptr {\n\t\telement = spec.Elem()\n\t}\n\n\tvalue := os.Getenv(prefix)\n\n\tswitch element.Kind() {\n\tcase reflect.String:\n\t\tif len(value) == 0 {\n\t\t\treturn nil\n\t\t}\n\t\telement.SetString(value)\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\tvar intValue int64\n\n\t\t// Handle toml.Duration\n\t\tif element.Type().Name() == \"Duration\" {\n\t\t\tdur, err := time.ParseDuration(value)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to apply %v to %v using type %v and value '%v'\", prefix, structKey, element.Type().String(), value)\n\t\t\t}\n\t\t\tintValue = dur.Nanoseconds()\n\t\t} else {\n\t\t\tvar err error\n\t\t\tintValue, err = strconv.ParseInt(value, 0, element.Type().Bits())\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to apply %v to %v using type %v and value '%v'\", prefix, structKey, element.Type().String(), value)\n\t\t\t}\n\t\t}\n\t\telement.SetInt(intValue)\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\tintValue, err := strconv.ParseUint(value, 0, element.Type().Bits())\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to apply %v to %v using type %v and value '%v'\", prefix, structKey, element.Type().String(), value)\n\t\t}\n\t\telement.SetUint(intValue)\n\tcase reflect.Bool:\n\t\tboolValue, err := strconv.ParseBool(value)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to apply %v to %v using type %v and value '%v'\", prefix, structKey, element.Type().String(), value)\n\t\t}\n\t\telement.SetBool(boolValue)\n\tcase reflect.Float32, reflect.Float64:\n\t\tfloatValue, err := strconv.ParseFloat(value, element.Type().Bits())\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to apply %v to %v using type %v and value '%v'\", prefix, structKey, element.Type().String(), value)\n\t\t}\n\t\telement.SetFloat(floatValue)\n\tcase reflect.Slice:\n\t\t// If the type is s slice, apply to each using the index as a suffix, e.g. GRAPHITE_0, GRAPHITE_0_TEMPLATES_0 or GRAPHITE_0_TEMPLATES=\"item1,item2\"\n\t\tfor j := 0; j < element.Len(); j++ {\n\t\t\tf := element.Index(j)\n\t\t\tif err := c.applyEnvOverrides(prefix, f, structKey); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := c.applyEnvOverrides(fmt.Sprintf(\"%s_%d\", prefix, j), f, structKey); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// If the type is s slice but have value not parsed as slice e.g. GRAPHITE_0_TEMPLATES=\"item1,item2\"\n\t\tif element.Len() == 0 && len(value) > 0 {\n\t\t\trules := strings.Split(value, \",\")\n\n\t\t\tfor _, rule := range rules {\n\t\t\t\telement.Set(reflect.Append(element, reflect.ValueOf(rule)))\n\t\t\t}\n\t\t}\n\tcase reflect.Struct:\n\t\ttypeOfSpec := element.Type()\n\t\tfor i := 0; i < element.NumField(); i++ {\n\t\t\tfield := element.Field(i)\n\n\t\t\t// Skip any fields that we cannot set\n\t\t\tif !field.CanSet() && field.Kind() != reflect.Slice {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfieldName := typeOfSpec.Field(i).Name\n\n\t\t\tconfigName := typeOfSpec.Field(i).Tag.Get(\"toml\")\n\t\t\t// Replace hyphens with underscores to avoid issues with shells\n\t\t\tconfigName = strings.Replace(configName, \"-\", \"_\", -1)\n\n\t\t\tenvKey := strings.ToUpper(configName)\n\t\t\tif prefix != \"\" {\n\t\t\t\tenvKey = strings.ToUpper(fmt.Sprintf(\"%s_%s\", prefix, configName))\n\t\t\t}\n\n\t\t\t// If it's a sub-config, recursively apply\n\t\t\tif field.Kind() == reflect.Struct || field.Kind() == reflect.Ptr ||\n\t\t\t\tfield.Kind() == reflect.Slice || field.Kind() == reflect.Array {\n\t\t\t\tif err := c.applyEnvOverrides(envKey, field, fieldName); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvalue := os.Getenv(envKey)\n\t\t\t// Skip any fields we don't have a value to set\n\t\t\tif len(value) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err := c.applyEnvOverrides(envKey, field, fieldName); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n// Diagnostics returns a diagnostics representation of Config.\nfunc (c *Config) Diagnostics() (*diagnostics.Diagnostics, error) {\n\treturn diagnostics.RowFromMap(map[string]interface{}{\n\t\t\"reporting-disabled\": c.ReportingDisabled,\n\t\t\"bind-address\":       c.BindAddress,\n\t}), nil\n}\n\nfunc (c *Config) diagnosticsClients() map[string]diagnostics.Client {\n\t// Config settings that are always present.\n\tm := map[string]diagnostics.Client{\n\t\t\"config\": c,\n\n\t\t\"config-data\":        c.Data,\n\t\t\"config-meta\":        c.Meta,\n\t\t\"config-coordinator\": c.Coordinator,\n\t\t\"config-retention\":   c.Retention,\n\t\t\"config-precreator\":  c.Precreator,\n\n\t\t\"config-monitor\":    c.Monitor,\n\t\t\"config-subscriber\": c.Subscriber,\n\t\t\"config-httpd\":      c.HTTPD,\n\n\t\t\"config-cqs\": c.ContinuousQuery,\n\t}\n\n\t// Config settings that can be repeated and can be disabled.\n\tif g := graphite.Configs(c.GraphiteInputs); g.Enabled() {\n\t\tm[\"config-graphite\"] = g\n\t}\n\tif cc := collectd.Configs(c.CollectdInputs); cc.Enabled() {\n\t\tm[\"config-collectd\"] = cc\n\t}\n\tif t := opentsdb.Configs(c.OpenTSDBInputs); t.Enabled() {\n\t\tm[\"config-opentsdb\"] = t\n\t}\n\tif u := udp.Configs(c.UDPInputs); u.Enabled() {\n\t\tm[\"config-udp\"] = u\n\t}\n\n\treturn m\n}\n\n// registerDiagnostics registers the config settings with the Monitor.\nfunc (c *Config) registerDiagnostics(m *monitor.Monitor) {\n\tfor name, dc := range c.diagnosticsClients() {\n\t\tm.RegisterDiagnosticsClient(name, dc)\n\t}\n}\n\n// registerDiagnostics deregisters the config settings from the Monitor.\nfunc (c *Config) deregisterDiagnostics(m *monitor.Monitor) {\n\tfor name := range c.diagnosticsClients() {\n\t\tm.DeregisterDiagnosticsClient(name)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_command.go",
    "content": "package run\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\n\t\"github.com/BurntSushi/toml\"\n)\n\n// PrintConfigCommand represents the command executed by \"influxd config\".\ntype PrintConfigCommand struct {\n\tStdin  io.Reader\n\tStdout io.Writer\n\tStderr io.Writer\n}\n\n// NewPrintConfigCommand return a new instance of PrintConfigCommand.\nfunc NewPrintConfigCommand() *PrintConfigCommand {\n\treturn &PrintConfigCommand{\n\t\tStdin:  os.Stdin,\n\t\tStdout: os.Stdout,\n\t\tStderr: os.Stderr,\n\t}\n}\n\n// Run parses and prints the current config loaded.\nfunc (cmd *PrintConfigCommand) Run(args ...string) error {\n\t// Parse command flags.\n\tfs := flag.NewFlagSet(\"\", flag.ContinueOnError)\n\tconfigPath := fs.String(\"config\", \"\", \"\")\n\tfs.Usage = func() { fmt.Fprintln(cmd.Stderr, printConfigUsage) }\n\tif err := fs.Parse(args); err != nil {\n\t\treturn err\n\t}\n\n\t// Parse config from path.\n\topt := Options{ConfigPath: *configPath}\n\tconfig, err := cmd.parseConfig(opt.GetConfigPath())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parse config: %s\", err)\n\t}\n\n\t// Apply any environment variables on top of the parsed config\n\tif err := config.ApplyEnvOverrides(); err != nil {\n\t\treturn fmt.Errorf(\"apply env config: %v\", err)\n\t}\n\n\t// Validate the configuration.\n\tif err := config.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`\", err)\n\t}\n\n\ttoml.NewEncoder(cmd.Stdout).Encode(config)\n\tfmt.Fprint(cmd.Stdout, \"\\n\")\n\n\treturn nil\n}\n\n// ParseConfig parses the config at path.\n// Returns a demo configuration if path is blank.\nfunc (cmd *PrintConfigCommand) parseConfig(path string) (*Config, error) {\n\tconfig, err := NewDemoConfig()\n\tif err != nil {\n\t\tconfig = NewConfig()\n\t}\n\n\tif path == \"\" {\n\t\treturn config, nil\n\t}\n\n\tfmt.Fprintf(os.Stderr, \"Merging with configuration at: %s\\n\", path)\n\n\tif err := config.FromTomlFile(path); err != nil {\n\t\treturn nil, err\n\t}\n\treturn config, nil\n}\n\nvar printConfigUsage = `Displays the default configuration.\n\nUsage: influxd config [flags]\n\n    -config <path>\n            Set the path to the initial configuration file.\n            This defaults to the environment variable INFLUXDB_CONFIG_PATH,\n            ~/.influxdb/influxdb.conf, or /etc/influxdb/influxdb.conf if a file\n            is present at any of these locations.\n            Disable the automatic loading of a configuration file using\n            the null device (such as /dev/null).\n`\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_test.go",
    "content": "package run_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/influxdata/influxdb/cmd/influxd/run\"\n)\n\n// Ensure the configuration can be parsed.\nfunc TestConfig_Parse(t *testing.T) {\n\t// Parse configuration.\n\tvar c run.Config\n\tif err := c.FromToml(`\n[meta]\ndir = \"/tmp/meta\"\n\n[data]\ndir = \"/tmp/data\"\n\n[coordinator]\n\n[http]\nbind-address = \":8087\"\n\n[[graphite]]\nprotocol = \"udp\"\n\n[[graphite]]\nprotocol = \"tcp\"\n\n[[collectd]]\nbind-address = \":1000\"\n\n[[collectd]]\nbind-address = \":1010\"\n\n[[opentsdb]]\nbind-address = \":2000\"\n\n[[opentsdb]]\nbind-address = \":2010\"\n\n[[opentsdb]]\nbind-address = \":2020\"\n\n[[udp]]\nbind-address = \":4444\"\n\n[monitoring]\nenabled = true\n\n[subscriber]\nenabled = true\n\n[continuous_queries]\nenabled = true\n`); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Validate configuration.\n\tif c.Meta.Dir != \"/tmp/meta\" {\n\t\tt.Fatalf(\"unexpected meta dir: %s\", c.Meta.Dir)\n\t} else if c.Data.Dir != \"/tmp/data\" {\n\t\tt.Fatalf(\"unexpected data dir: %s\", c.Data.Dir)\n\t} else if c.HTTPD.BindAddress != \":8087\" {\n\t\tt.Fatalf(\"unexpected api bind address: %s\", c.HTTPD.BindAddress)\n\t} else if len(c.GraphiteInputs) != 2 {\n\t\tt.Fatalf(\"unexpected graphiteInputs count: %d\", len(c.GraphiteInputs))\n\t} else if c.GraphiteInputs[0].Protocol != \"udp\" {\n\t\tt.Fatalf(\"unexpected graphite protocol(0): %s\", c.GraphiteInputs[0].Protocol)\n\t} else if c.GraphiteInputs[1].Protocol != \"tcp\" {\n\t\tt.Fatalf(\"unexpected graphite protocol(1): %s\", c.GraphiteInputs[1].Protocol)\n\t} else if c.CollectdInputs[0].BindAddress != \":1000\" {\n\t\tt.Fatalf(\"unexpected collectd bind address: %s\", c.CollectdInputs[0].BindAddress)\n\t} else if c.CollectdInputs[1].BindAddress != \":1010\" {\n\t\tt.Fatalf(\"unexpected collectd bind address: %s\", c.CollectdInputs[1].BindAddress)\n\t} else if c.OpenTSDBInputs[0].BindAddress != \":2000\" {\n\t\tt.Fatalf(\"unexpected opentsdb bind address: %s\", c.OpenTSDBInputs[0].BindAddress)\n\t} else if c.OpenTSDBInputs[1].BindAddress != \":2010\" {\n\t\tt.Fatalf(\"unexpected opentsdb bind address: %s\", c.OpenTSDBInputs[1].BindAddress)\n\t} else if c.OpenTSDBInputs[2].BindAddress != \":2020\" {\n\t\tt.Fatalf(\"unexpected opentsdb bind address: %s\", c.OpenTSDBInputs[2].BindAddress)\n\t} else if c.UDPInputs[0].BindAddress != \":4444\" {\n\t\tt.Fatalf(\"unexpected udp bind address: %s\", c.UDPInputs[0].BindAddress)\n\t} else if c.Subscriber.Enabled != true {\n\t\tt.Fatalf(\"unexpected subscriber enabled: %v\", c.Subscriber.Enabled)\n\t} else if c.ContinuousQuery.Enabled != true {\n\t\tt.Fatalf(\"unexpected continuous query enabled: %v\", c.ContinuousQuery.Enabled)\n\t}\n}\n\n// Ensure the configuration can be parsed.\nfunc TestConfig_Parse_EnvOverride(t *testing.T) {\n\t// Parse configuration.\n\tvar c run.Config\n\tif _, err := toml.Decode(`\n[meta]\ndir = \"/tmp/meta\"\n\n[data]\ndir = \"/tmp/data\"\n\n[coordinator]\n\n[admin]\nbind-address = \":8083\"\n\n[http]\nbind-address = \":8087\"\n\n[[graphite]]\nprotocol = \"udp\"\ntemplates = [\n  \"default.* .template.in.config\"\n]\n\n[[graphite]]\nprotocol = \"tcp\"\n\n[[collectd]]\nbind-address = \":1000\"\n\n[[collectd]]\nbind-address = \":1010\"\n\n[[opentsdb]]\nbind-address = \":2000\"\n\n[[opentsdb]]\nbind-address = \":2010\"\n\n[[udp]]\nbind-address = \":4444\"\n\n[[udp]]\n\n[monitoring]\nenabled = true\n\n[continuous_queries]\nenabled = true\n`, &c); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := os.Setenv(\"INFLUXDB_UDP_BIND_ADDRESS\", \":1234\"); err != nil {\n\t\tt.Fatalf(\"failed to set env var: %v\", err)\n\t}\n\n\tif err := os.Setenv(\"INFLUXDB_UDP_0_BIND_ADDRESS\", \":5555\"); err != nil {\n\t\tt.Fatalf(\"failed to set env var: %v\", err)\n\t}\n\n\tif err := os.Setenv(\"INFLUXDB_GRAPHITE_0_TEMPLATES_0\", \"overide.* .template.0\"); err != nil {\n\t\tt.Fatalf(\"failed to set env var: %v\", err)\n\t}\n\n\tif err := os.Setenv(\"INFLUXDB_GRAPHITE_1_TEMPLATES\", \"overide.* .template.1.1,overide.* .template.1.2\"); err != nil {\n\t\tt.Fatalf(\"failed to set env var: %v\", err)\n\t}\n\n\tif err := os.Setenv(\"INFLUXDB_GRAPHITE_1_PROTOCOL\", \"udp\"); err != nil {\n\t\tt.Fatalf(\"failed to set env var: %v\", err)\n\t}\n\n\tif err := os.Setenv(\"INFLUXDB_COLLECTD_1_BIND_ADDRESS\", \":1020\"); err != nil {\n\t\tt.Fatalf(\"failed to set env var: %v\", err)\n\t}\n\n\tif err := os.Setenv(\"INFLUXDB_OPENTSDB_0_BIND_ADDRESS\", \":2020\"); err != nil {\n\t\tt.Fatalf(\"failed to set env var: %v\", err)\n\t}\n\n\t// uint64 type\n\tif err := os.Setenv(\"INFLUXDB_DATA_CACHE_MAX_MEMORY_SIZE\", \"1000\"); err != nil {\n\t\tt.Fatalf(\"failed to set env var: %v\", err)\n\t}\n\n\tif err := c.ApplyEnvOverrides(); err != nil {\n\t\tt.Fatalf(\"failed to apply env overrides: %v\", err)\n\t}\n\n\tif c.UDPInputs[0].BindAddress != \":5555\" {\n\t\tt.Fatalf(\"unexpected udp bind address: %s\", c.UDPInputs[0].BindAddress)\n\t}\n\n\tif c.UDPInputs[1].BindAddress != \":1234\" {\n\t\tt.Fatalf(\"unexpected udp bind address: %s\", c.UDPInputs[1].BindAddress)\n\t}\n\n\tif len(c.GraphiteInputs[0].Templates) != 1 || c.GraphiteInputs[0].Templates[0] != \"overide.* .template.0\" {\n\t\tt.Fatalf(\"unexpected graphite 0 templates: %+v\", c.GraphiteInputs[0].Templates)\n\t}\n\n\tif len(c.GraphiteInputs[1].Templates) != 2 || c.GraphiteInputs[1].Templates[1] != \"overide.* .template.1.2\" {\n\t\tt.Fatalf(\"unexpected graphite 1 templates: %+v\", c.GraphiteInputs[1].Templates)\n\t}\n\n\tif c.GraphiteInputs[1].Protocol != \"udp\" {\n\t\tt.Fatalf(\"unexpected graphite protocol: %s\", c.GraphiteInputs[1].Protocol)\n\t}\n\n\tif c.CollectdInputs[1].BindAddress != \":1020\" {\n\t\tt.Fatalf(\"unexpected collectd bind address: %s\", c.CollectdInputs[1].BindAddress)\n\t}\n\n\tif c.OpenTSDBInputs[0].BindAddress != \":2020\" {\n\t\tt.Fatalf(\"unexpected opentsdb bind address: %s\", c.OpenTSDBInputs[0].BindAddress)\n\t}\n\n\tif c.Data.CacheMaxMemorySize != 1000 {\n\t\tt.Fatalf(\"unexpected cache max memory size: %v\", c.Data.CacheMaxMemorySize)\n\t}\n}\n\nfunc TestConfig_ValidateNoServiceConfigured(t *testing.T) {\n\tvar c run.Config\n\tif _, err := toml.Decode(`\n[meta]\nenabled = false\n\n[data]\nenabled = false\n`, &c); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif e := c.Validate(); e == nil {\n\t\tt.Fatalf(\"got nil, expected error\")\n\t}\n}\n\nfunc TestConfig_ValidateMonitorStore_MetaOnly(t *testing.T) {\n\tc := run.NewConfig()\n\tif _, err := toml.Decode(`\n[monitor]\nstore-enabled = true\n\n[meta]\ndir = \"foo\"\n\n[data]\nenabled = false\n`, &c); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := c.Validate(); err == nil {\n\t\tt.Fatalf(\"got nil, expected error\")\n\t}\n}\n\nfunc TestConfig_DeprecatedOptions(t *testing.T) {\n\t// Parse configuration.\n\tvar c run.Config\n\tif err := c.FromToml(`\n[cluster]\nmax-select-point = 100\n`); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Validate configuration.\n\tif c.Coordinator.MaxSelectPointN != 100 {\n\t\tt.Fatalf(\"unexpected coordinator max select points: %d\", c.Coordinator.MaxSelectPointN)\n\n\t}\n}\n\n// Ensure that Config.Validate correctly validates the individual subsections.\nfunc TestConfig_InvalidSubsections(t *testing.T) {\n\t// Precondition: NewDemoConfig must validate correctly.\n\tc, err := run.NewDemoConfig()\n\tif err != nil {\n\t\tt.Fatalf(\"error creating demo config: %s\", err)\n\t}\n\tif err := c.Validate(); err != nil {\n\t\tt.Fatalf(\"new demo config failed validation: %s\", err)\n\t}\n\n\t// For each subsection, load a config with a single invalid setting.\n\tfor _, tc := range []struct {\n\t\tsection string\n\t\tkv      string\n\t}{\n\t\t{\"meta\", `dir = \"\"`},\n\t\t{\"data\", `dir = \"\"`},\n\t\t{\"monitor\", `store-database = \"\"`},\n\t\t{\"continuous_queries\", `run-interval = \"0s\"`},\n\t\t{\"subscriber\", `http-timeout = \"0s\"`},\n\t\t{\"retention\", `check-interval = \"0s\"`},\n\t\t{\"shard-precreation\", `advance-period = \"0s\"`},\n\t} {\n\t\tc, err := run.NewDemoConfig()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"error creating demo config: %s\", err)\n\t\t}\n\n\t\ts := fmt.Sprintf(\"\\n[%s]\\n%s\\n\", tc.section, tc.kv)\n\t\tif err := c.FromToml(s); err != nil {\n\t\t\tt.Fatalf(\"error loading toml %q: %s\", s, err)\n\t\t}\n\n\t\tif err := c.Validate(); err == nil {\n\t\t\tt.Fatalf(\"expected error but got nil for config: %s\", s)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/influxd/run/server.go",
    "content": "package run\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"runtime/pprof\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb\"\n\t\"github.com/influxdata/influxdb/coordinator\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/monitor\"\n\t\"github.com/influxdata/influxdb/services/collectd\"\n\t\"github.com/influxdata/influxdb/services/continuous_querier\"\n\t\"github.com/influxdata/influxdb/services/graphite\"\n\t\"github.com/influxdata/influxdb/services/httpd\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/services/opentsdb\"\n\t\"github.com/influxdata/influxdb/services/precreator\"\n\t\"github.com/influxdata/influxdb/services/retention\"\n\t\"github.com/influxdata/influxdb/services/snapshotter\"\n\t\"github.com/influxdata/influxdb/services/subscriber\"\n\t\"github.com/influxdata/influxdb/services/udp\"\n\t\"github.com/influxdata/influxdb/tcp\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n\tclient \"github.com/influxdata/usage-client/v1\"\n\t\"github.com/uber-go/zap\"\n\n\t// Initialize the engine & index packages\n\t_ \"github.com/influxdata/influxdb/tsdb/engine\"\n\t_ \"github.com/influxdata/influxdb/tsdb/index\"\n)\n\nvar startTime time.Time\n\nfunc init() {\n\tstartTime = time.Now().UTC()\n}\n\n// BuildInfo represents the build details for the server code.\ntype BuildInfo struct {\n\tVersion string\n\tCommit  string\n\tBranch  string\n\tTime    string\n}\n\n// Server represents a container for the metadata and storage data and services.\n// It is built using a Config and it manages the startup and shutdown of all\n// services in the proper order.\ntype Server struct {\n\tbuildInfo BuildInfo\n\n\terr     chan error\n\tclosing chan struct{}\n\n\tBindAddress string\n\tListener    net.Listener\n\n\tLogger zap.Logger\n\n\tMetaClient *meta.Client\n\n\tTSDBStore     *tsdb.Store\n\tQueryExecutor *influxql.QueryExecutor\n\tPointsWriter  *coordinator.PointsWriter\n\tSubscriber    *subscriber.Service\n\n\tServices []Service\n\n\t// These references are required for the tcp muxer.\n\tSnapshotterService *snapshotter.Service\n\n\tMonitor *monitor.Monitor\n\n\t// Server reporting and registration\n\treportingDisabled bool\n\n\t// Profiling\n\tCPUProfile string\n\tMemProfile string\n\n\t// httpAPIAddr is the host:port combination for the main HTTP API for querying and writing data\n\thttpAPIAddr string\n\n\t// httpUseTLS specifies if we should use a TLS connection to the http servers\n\thttpUseTLS bool\n\n\t// tcpAddr is the host:port combination for the TCP listener that services mux onto\n\ttcpAddr string\n\n\tconfig *Config\n}\n\n// NewServer returns a new instance of Server built from a config.\nfunc NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) {\n\t// We need to ensure that a meta directory always exists even if\n\t// we don't start the meta store.  node.json is always stored under\n\t// the meta directory.\n\tif err := os.MkdirAll(c.Meta.Dir, 0777); err != nil {\n\t\treturn nil, fmt.Errorf(\"mkdir all: %s\", err)\n\t}\n\n\t// 0.10-rc1 and prior would sometimes put the node.json at the root\n\t// dir which breaks backup/restore and restarting nodes.  This moves\n\t// the file from the root so it's always under the meta dir.\n\toldPath := filepath.Join(filepath.Dir(c.Meta.Dir), \"node.json\")\n\tnewPath := filepath.Join(c.Meta.Dir, \"node.json\")\n\n\tif _, err := os.Stat(oldPath); err == nil {\n\t\tif err := os.Rename(oldPath, newPath); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t_, err := influxdb.LoadNode(c.Meta.Dir)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif err := raftDBExists(c.Meta.Dir); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// In 0.10.0 bind-address got moved to the top level. Check\n\t// The old location to keep things backwards compatible\n\tbind := c.BindAddress\n\n\ts := &Server{\n\t\tbuildInfo: *buildInfo,\n\t\terr:       make(chan error),\n\t\tclosing:   make(chan struct{}),\n\n\t\tBindAddress: bind,\n\n\t\tLogger: zap.New(\n\t\t\tzap.NewTextEncoder(),\n\t\t\tzap.Output(os.Stderr),\n\t\t),\n\n\t\tMetaClient: meta.NewClient(c.Meta),\n\n\t\treportingDisabled: c.ReportingDisabled,\n\n\t\thttpAPIAddr: c.HTTPD.BindAddress,\n\t\thttpUseTLS:  c.HTTPD.HTTPSEnabled,\n\t\ttcpAddr:     bind,\n\n\t\tconfig: c,\n\t}\n\ts.Monitor = monitor.New(s, c.Monitor)\n\ts.config.registerDiagnostics(s.Monitor)\n\n\tif err := s.MetaClient.Open(); err != nil {\n\t\treturn nil, err\n\t}\n\n\ts.TSDBStore = tsdb.NewStore(c.Data.Dir)\n\ts.TSDBStore.EngineOptions.Config = c.Data\n\n\t// Copy TSDB configuration.\n\ts.TSDBStore.EngineOptions.EngineVersion = c.Data.Engine\n\ts.TSDBStore.EngineOptions.IndexVersion = c.Data.Index\n\n\t// Create the Subscriber service\n\ts.Subscriber = subscriber.NewService(c.Subscriber)\n\n\t// Initialize points writer.\n\ts.PointsWriter = coordinator.NewPointsWriter()\n\ts.PointsWriter.WriteTimeout = time.Duration(c.Coordinator.WriteTimeout)\n\ts.PointsWriter.TSDBStore = s.TSDBStore\n\ts.PointsWriter.Subscriber = s.Subscriber\n\n\t// Initialize query executor.\n\ts.QueryExecutor = influxql.NewQueryExecutor()\n\ts.QueryExecutor.StatementExecutor = &coordinator.StatementExecutor{\n\t\tMetaClient:  s.MetaClient,\n\t\tTaskManager: s.QueryExecutor.TaskManager,\n\t\tTSDBStore:   coordinator.LocalTSDBStore{Store: s.TSDBStore},\n\t\tShardMapper: &coordinator.LocalShardMapper{\n\t\t\tMetaClient: s.MetaClient,\n\t\t\tTSDBStore:  coordinator.LocalTSDBStore{Store: s.TSDBStore},\n\t\t},\n\t\tMonitor:           s.Monitor,\n\t\tPointsWriter:      s.PointsWriter,\n\t\tMaxSelectPointN:   c.Coordinator.MaxSelectPointN,\n\t\tMaxSelectSeriesN:  c.Coordinator.MaxSelectSeriesN,\n\t\tMaxSelectBucketsN: c.Coordinator.MaxSelectBucketsN,\n\t}\n\ts.QueryExecutor.TaskManager.QueryTimeout = time.Duration(c.Coordinator.QueryTimeout)\n\ts.QueryExecutor.TaskManager.LogQueriesAfter = time.Duration(c.Coordinator.LogQueriesAfter)\n\ts.QueryExecutor.TaskManager.MaxConcurrentQueries = c.Coordinator.MaxConcurrentQueries\n\n\t// Initialize the monitor\n\ts.Monitor.Version = s.buildInfo.Version\n\ts.Monitor.Commit = s.buildInfo.Commit\n\ts.Monitor.Branch = s.buildInfo.Branch\n\ts.Monitor.BuildTime = s.buildInfo.Time\n\ts.Monitor.PointsWriter = (*monitorPointsWriter)(s.PointsWriter)\n\treturn s, nil\n}\n\n// Statistics returns statistics for the services running in the Server.\nfunc (s *Server) Statistics(tags map[string]string) []models.Statistic {\n\tvar statistics []models.Statistic\n\tstatistics = append(statistics, s.QueryExecutor.Statistics(tags)...)\n\tstatistics = append(statistics, s.TSDBStore.Statistics(tags)...)\n\tstatistics = append(statistics, s.PointsWriter.Statistics(tags)...)\n\tstatistics = append(statistics, s.Subscriber.Statistics(tags)...)\n\tfor _, srv := range s.Services {\n\t\tif m, ok := srv.(monitor.Reporter); ok {\n\t\t\tstatistics = append(statistics, m.Statistics(tags)...)\n\t\t}\n\t}\n\treturn statistics\n}\n\nfunc (s *Server) appendSnapshotterService() {\n\tsrv := snapshotter.NewService()\n\tsrv.TSDBStore = s.TSDBStore\n\tsrv.MetaClient = s.MetaClient\n\ts.Services = append(s.Services, srv)\n\ts.SnapshotterService = srv\n}\n\n// SetLogOutput sets the logger used for all messages. It must not be called\n// after the Open method has been called.\nfunc (s *Server) SetLogOutput(w io.Writer) {\n\ts.Logger = zap.New(zap.NewTextEncoder(), zap.Output(zap.AddSync(w)))\n}\n\nfunc (s *Server) appendMonitorService() {\n\ts.Services = append(s.Services, s.Monitor)\n}\n\nfunc (s *Server) appendRetentionPolicyService(c retention.Config) {\n\tif !c.Enabled {\n\t\treturn\n\t}\n\tsrv := retention.NewService(c)\n\tsrv.MetaClient = s.MetaClient\n\tsrv.TSDBStore = s.TSDBStore\n\ts.Services = append(s.Services, srv)\n}\n\nfunc (s *Server) appendHTTPDService(c httpd.Config) {\n\tif !c.Enabled {\n\t\treturn\n\t}\n\tsrv := httpd.NewService(c)\n\tsrv.Handler.MetaClient = s.MetaClient\n\tsrv.Handler.QueryAuthorizer = meta.NewQueryAuthorizer(s.MetaClient)\n\tsrv.Handler.WriteAuthorizer = meta.NewWriteAuthorizer(s.MetaClient)\n\tsrv.Handler.QueryExecutor = s.QueryExecutor\n\tsrv.Handler.Monitor = s.Monitor\n\tsrv.Handler.PointsWriter = s.PointsWriter\n\tsrv.Handler.Version = s.buildInfo.Version\n\n\ts.Services = append(s.Services, srv)\n}\n\nfunc (s *Server) appendCollectdService(c collectd.Config) {\n\tif !c.Enabled {\n\t\treturn\n\t}\n\tsrv := collectd.NewService(c)\n\tsrv.MetaClient = s.MetaClient\n\tsrv.PointsWriter = s.PointsWriter\n\ts.Services = append(s.Services, srv)\n}\n\nfunc (s *Server) appendOpenTSDBService(c opentsdb.Config) error {\n\tif !c.Enabled {\n\t\treturn nil\n\t}\n\tsrv, err := opentsdb.NewService(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsrv.PointsWriter = s.PointsWriter\n\tsrv.MetaClient = s.MetaClient\n\ts.Services = append(s.Services, srv)\n\treturn nil\n}\n\nfunc (s *Server) appendGraphiteService(c graphite.Config) error {\n\tif !c.Enabled {\n\t\treturn nil\n\t}\n\tsrv, err := graphite.NewService(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsrv.PointsWriter = s.PointsWriter\n\tsrv.MetaClient = s.MetaClient\n\tsrv.Monitor = s.Monitor\n\ts.Services = append(s.Services, srv)\n\treturn nil\n}\n\nfunc (s *Server) appendPrecreatorService(c precreator.Config) error {\n\tif !c.Enabled {\n\t\treturn nil\n\t}\n\tsrv, err := precreator.NewService(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsrv.MetaClient = s.MetaClient\n\ts.Services = append(s.Services, srv)\n\treturn nil\n}\n\nfunc (s *Server) appendUDPService(c udp.Config) {\n\tif !c.Enabled {\n\t\treturn\n\t}\n\tsrv := udp.NewService(c)\n\tsrv.PointsWriter = s.PointsWriter\n\tsrv.MetaClient = s.MetaClient\n\ts.Services = append(s.Services, srv)\n}\n\nfunc (s *Server) appendContinuousQueryService(c continuous_querier.Config) {\n\tif !c.Enabled {\n\t\treturn\n\t}\n\tsrv := continuous_querier.NewService(c)\n\tsrv.MetaClient = s.MetaClient\n\tsrv.QueryExecutor = s.QueryExecutor\n\ts.Services = append(s.Services, srv)\n}\n\n// Err returns an error channel that multiplexes all out of band errors received from all services.\nfunc (s *Server) Err() <-chan error { return s.err }\n\n// Open opens the meta and data store and all services.\nfunc (s *Server) Open() error {\n\t// Start profiling, if set.\n\tstartProfile(s.CPUProfile, s.MemProfile)\n\n\t// Open shared TCP connection.\n\tln, err := net.Listen(\"tcp\", s.BindAddress)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"listen: %s\", err)\n\t}\n\ts.Listener = ln\n\n\t// Multiplex listener.\n\tmux := tcp.NewMux()\n\tgo mux.Serve(ln)\n\n\t// Append services.\n\ts.appendMonitorService()\n\ts.appendPrecreatorService(s.config.Precreator)\n\ts.appendSnapshotterService()\n\ts.appendContinuousQueryService(s.config.ContinuousQuery)\n\ts.appendHTTPDService(s.config.HTTPD)\n\ts.appendRetentionPolicyService(s.config.Retention)\n\tfor _, i := range s.config.GraphiteInputs {\n\t\tif err := s.appendGraphiteService(i); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfor _, i := range s.config.CollectdInputs {\n\t\ts.appendCollectdService(i)\n\t}\n\tfor _, i := range s.config.OpenTSDBInputs {\n\t\tif err := s.appendOpenTSDBService(i); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfor _, i := range s.config.UDPInputs {\n\t\ts.appendUDPService(i)\n\t}\n\n\ts.Subscriber.MetaClient = s.MetaClient\n\ts.Subscriber.MetaClient = s.MetaClient\n\ts.PointsWriter.MetaClient = s.MetaClient\n\ts.Monitor.MetaClient = s.MetaClient\n\n\ts.SnapshotterService.Listener = mux.Listen(snapshotter.MuxHeader)\n\n\t// Configure logging for all services and clients.\n\tif s.config.Meta.LoggingEnabled {\n\t\ts.MetaClient.WithLogger(s.Logger)\n\t}\n\ts.TSDBStore.WithLogger(s.Logger)\n\tif s.config.Data.QueryLogEnabled {\n\t\ts.QueryExecutor.WithLogger(s.Logger)\n\t}\n\ts.PointsWriter.WithLogger(s.Logger)\n\ts.Subscriber.WithLogger(s.Logger)\n\tfor _, svc := range s.Services {\n\t\tsvc.WithLogger(s.Logger)\n\t}\n\ts.SnapshotterService.WithLogger(s.Logger)\n\ts.Monitor.WithLogger(s.Logger)\n\n\t// Open TSDB store.\n\tif err := s.TSDBStore.Open(); err != nil {\n\t\treturn fmt.Errorf(\"open tsdb store: %s\", err)\n\t}\n\n\t// Open the subcriber service\n\tif err := s.Subscriber.Open(); err != nil {\n\t\treturn fmt.Errorf(\"open subscriber: %s\", err)\n\t}\n\n\t// Open the points writer service\n\tif err := s.PointsWriter.Open(); err != nil {\n\t\treturn fmt.Errorf(\"open points writer: %s\", err)\n\t}\n\n\tfor _, service := range s.Services {\n\t\tif err := service.Open(); err != nil {\n\t\t\treturn fmt.Errorf(\"open service: %s\", err)\n\t\t}\n\t}\n\n\t// Start the reporting service, if not disabled.\n\tif !s.reportingDisabled {\n\t\tgo s.startServerReporting()\n\t}\n\n\treturn nil\n}\n\n// Close shuts down the meta and data stores and all services.\nfunc (s *Server) Close() error {\n\tstopProfile()\n\n\t// Close the listener first to stop any new connections\n\tif s.Listener != nil {\n\t\ts.Listener.Close()\n\t}\n\n\t// Close services to allow any inflight requests to complete\n\t// and prevent new requests from being accepted.\n\tfor _, service := range s.Services {\n\t\tservice.Close()\n\t}\n\n\ts.config.deregisterDiagnostics(s.Monitor)\n\n\tif s.PointsWriter != nil {\n\t\ts.PointsWriter.Close()\n\t}\n\n\tif s.QueryExecutor != nil {\n\t\ts.QueryExecutor.Close()\n\t}\n\n\t// Close the TSDBStore, no more reads or writes at this point\n\tif s.TSDBStore != nil {\n\t\ts.TSDBStore.Close()\n\t}\n\n\tif s.Subscriber != nil {\n\t\ts.Subscriber.Close()\n\t}\n\n\tif s.MetaClient != nil {\n\t\ts.MetaClient.Close()\n\t}\n\n\tclose(s.closing)\n\treturn nil\n}\n\n// startServerReporting starts periodic server reporting.\nfunc (s *Server) startServerReporting() {\n\ts.reportServer()\n\n\tticker := time.NewTicker(24 * time.Hour)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-s.closing:\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\ts.reportServer()\n\t\t}\n\t}\n}\n\n// reportServer reports usage statistics about the system.\nfunc (s *Server) reportServer() {\n\tdbs := s.MetaClient.Databases()\n\tnumDatabases := len(dbs)\n\n\tvar (\n\t\tnumMeasurements int64\n\t\tnumSeries       int64\n\t)\n\n\tfor _, db := range dbs {\n\t\tname := db.Name\n\t\tn, err := s.TSDBStore.SeriesCardinality(name)\n\t\tif err != nil {\n\t\t\ts.Logger.Error(fmt.Sprintf(\"Unable to get series cardinality for database %s: %v\", name, err))\n\t\t} else {\n\t\t\tnumSeries += n\n\t\t}\n\n\t\tn, err = s.TSDBStore.MeasurementsCardinality(name)\n\t\tif err != nil {\n\t\t\ts.Logger.Error(fmt.Sprintf(\"Unable to get measurement cardinality for database %s: %v\", name, err))\n\t\t} else {\n\t\t\tnumMeasurements += n\n\t\t}\n\t}\n\n\tclusterID := s.MetaClient.ClusterID()\n\tcl := client.New(\"\")\n\tusage := client.Usage{\n\t\tProduct: \"influxdb\",\n\t\tData: []client.UsageData{\n\t\t\t{\n\t\t\t\tValues: client.Values{\n\t\t\t\t\t\"os\":               runtime.GOOS,\n\t\t\t\t\t\"arch\":             runtime.GOARCH,\n\t\t\t\t\t\"version\":          s.buildInfo.Version,\n\t\t\t\t\t\"cluster_id\":       fmt.Sprintf(\"%v\", clusterID),\n\t\t\t\t\t\"num_series\":       numSeries,\n\t\t\t\t\t\"num_measurements\": numMeasurements,\n\t\t\t\t\t\"num_databases\":    numDatabases,\n\t\t\t\t\t\"uptime\":           time.Since(startTime).Seconds(),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\ts.Logger.Info(\"Sending usage statistics to usage.influxdata.com\")\n\n\tgo cl.Save(usage)\n}\n\n// Service represents a service attached to the server.\ntype Service interface {\n\tWithLogger(log zap.Logger)\n\tOpen() error\n\tClose() error\n}\n\n// prof stores the file locations of active profiles.\nvar prof struct {\n\tcpu *os.File\n\tmem *os.File\n}\n\n// StartProfile initializes the cpu and memory profile, if specified.\nfunc startProfile(cpuprofile, memprofile string) {\n\tif cpuprofile != \"\" {\n\t\tf, err := os.Create(cpuprofile)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"cpuprofile: %v\", err)\n\t\t}\n\t\tlog.Printf(\"writing CPU profile to: %s\\n\", cpuprofile)\n\t\tprof.cpu = f\n\t\tpprof.StartCPUProfile(prof.cpu)\n\t}\n\n\tif memprofile != \"\" {\n\t\tf, err := os.Create(memprofile)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"memprofile: %v\", err)\n\t\t}\n\t\tlog.Printf(\"writing mem profile to: %s\\n\", memprofile)\n\t\tprof.mem = f\n\t\truntime.MemProfileRate = 4096\n\t}\n\n}\n\n// StopProfile closes the cpu and memory profiles if they are running.\nfunc stopProfile() {\n\tif prof.cpu != nil {\n\t\tpprof.StopCPUProfile()\n\t\tprof.cpu.Close()\n\t\tlog.Println(\"CPU profile stopped\")\n\t}\n\tif prof.mem != nil {\n\t\tpprof.Lookup(\"heap\").WriteTo(prof.mem, 0)\n\t\tprof.mem.Close()\n\t\tlog.Println(\"mem profile stopped\")\n\t}\n}\n\n// monitorPointsWriter is a wrapper around `coordinator.PointsWriter` that helps\n// to prevent a circular dependency between the `cluster` and `monitor` packages.\ntype monitorPointsWriter coordinator.PointsWriter\n\nfunc (pw *monitorPointsWriter) WritePoints(database, retentionPolicy string, points models.Points) error {\n\treturn (*coordinator.PointsWriter)(pw).WritePointsPrivileged(database, retentionPolicy, models.ConsistencyLevelAny, points)\n}\n\nfunc raftDBExists(dir string) error {\n\t// Check to see if there is a raft db, if so, error out with a message\n\t// to downgrade, export, and then import the meta data\n\traftFile := filepath.Join(dir, \"raft.db\")\n\tif _, err := os.Stat(raftFile); err == nil {\n\t\treturn fmt.Errorf(\"detected %s. To proceed, you'll need to either 1) downgrade to v0.11.x, export your metadata, upgrade to the current version again, and then import the metadata or 2) delete the file, which will effectively reset your database. For more assistance with the upgrade, see: https://docs.influxdata.com/influxdb/v0.12/administration/upgrading/\", raftFile)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/cmd/parse.go",
    "content": "// Package cmd is the root package of the various command-line utilities for InfluxDB.\npackage cmd\n\nimport \"strings\"\n\n// ParseCommandName extracts the command name and args from the args list.\nfunc ParseCommandName(args []string) (string, []string) {\n\t// Retrieve command name as first argument.\n\tvar name string\n\tif len(args) > 0 {\n\t\tif !strings.HasPrefix(args[0], \"-\") {\n\t\t\tname = args[0]\n\t\t} else if args[0] == \"-h\" || args[0] == \"-help\" || args[0] == \"--help\" {\n\t\t\t// Special case -h immediately following binary name\n\t\t\tname = \"help\"\n\t\t}\n\t}\n\n\t// If command is \"help\" and has an argument then rewrite args to use \"-h\".\n\tif name == \"help\" && len(args) > 2 && !strings.HasPrefix(args[1], \"-\") {\n\t\treturn args[1], []string{\"-h\"}\n\t}\n\n\t// If a named command is specified then return it with its arguments.\n\tif name != \"\" {\n\t\treturn name, args[1:]\n\t}\n\treturn \"\", args\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/coordinator/config.go",
    "content": "// Package coordinator contains abstractions for writing points, executing statements,\n// and accessing meta data.\npackage coordinator\n\nimport (\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/monitor/diagnostics\"\n\t\"github.com/influxdata/influxdb/toml\"\n)\n\nconst (\n\t// DefaultWriteTimeout is the default timeout for a complete write to succeed.\n\tDefaultWriteTimeout = 10 * time.Second\n\n\t// DefaultMaxConcurrentQueries is the maximum number of running queries.\n\t// A value of zero will make the maximum query limit unlimited.\n\tDefaultMaxConcurrentQueries = 0\n\n\t// DefaultMaxSelectPointN is the maximum number of points a SELECT can process.\n\t// A value of zero will make the maximum point count unlimited.\n\tDefaultMaxSelectPointN = 0\n\n\t// DefaultMaxSelectSeriesN is the maximum number of series a SELECT can run.\n\t// A value of zero will make the maximum series count unlimited.\n\tDefaultMaxSelectSeriesN = 0\n)\n\n// Config represents the configuration for the coordinator service.\ntype Config struct {\n\tWriteTimeout         toml.Duration `toml:\"write-timeout\"`\n\tMaxConcurrentQueries int           `toml:\"max-concurrent-queries\"`\n\tQueryTimeout         toml.Duration `toml:\"query-timeout\"`\n\tLogQueriesAfter      toml.Duration `toml:\"log-queries-after\"`\n\tMaxSelectPointN      int           `toml:\"max-select-point\"`\n\tMaxSelectSeriesN     int           `toml:\"max-select-series\"`\n\tMaxSelectBucketsN    int           `toml:\"max-select-buckets\"`\n}\n\n// NewConfig returns an instance of Config with defaults.\nfunc NewConfig() Config {\n\treturn Config{\n\t\tWriteTimeout:         toml.Duration(DefaultWriteTimeout),\n\t\tQueryTimeout:         toml.Duration(influxql.DefaultQueryTimeout),\n\t\tMaxConcurrentQueries: DefaultMaxConcurrentQueries,\n\t\tMaxSelectPointN:      DefaultMaxSelectPointN,\n\t\tMaxSelectSeriesN:     DefaultMaxSelectSeriesN,\n\t}\n}\n\n// Diagnostics returns a diagnostics representation of a subset of the Config.\nfunc (c Config) Diagnostics() (*diagnostics.Diagnostics, error) {\n\treturn diagnostics.RowFromMap(map[string]interface{}{\n\t\t\"write-timeout\":          c.WriteTimeout,\n\t\t\"max-concurrent-queries\": c.MaxConcurrentQueries,\n\t\t\"query-timeout\":          c.QueryTimeout,\n\t\t\"log-queries-after\":      c.LogQueriesAfter,\n\t\t\"max-select-point\":       c.MaxSelectPointN,\n\t\t\"max-select-series\":      c.MaxSelectSeriesN,\n\t\t\"max-select-buckets\":     c.MaxSelectBucketsN,\n\t}), nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/coordinator/config_test.go",
    "content": "package coordinator_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/influxdata/influxdb/coordinator\"\n)\n\nfunc TestConfig_Parse(t *testing.T) {\n\t// Parse configuration.\n\tvar c coordinator.Config\n\tif _, err := toml.Decode(`\nwrite-timeout = \"20s\"\n`, &c); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Validate configuration.\n\tif time.Duration(c.WriteTimeout) != 20*time.Second {\n\t\tt.Fatalf(\"unexpected write timeout s: %s\", c.WriteTimeout)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/coordinator/meta_client.go",
    "content": "package coordinator\n\nimport (\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n)\n\n// MetaClient is an interface for accessing meta data.\ntype MetaClient interface {\n\tCreateContinuousQuery(database, name, query string) error\n\tCreateDatabase(name string) (*meta.DatabaseInfo, error)\n\tCreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error)\n\tCreateRetentionPolicy(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error)\n\tCreateSubscription(database, rp, name, mode string, destinations []string) error\n\tCreateUser(name, password string, admin bool) (meta.User, error)\n\tDatabase(name string) *meta.DatabaseInfo\n\tDatabases() []meta.DatabaseInfo\n\tDropShard(id uint64) error\n\tDropContinuousQuery(database, name string) error\n\tDropDatabase(name string) error\n\tDropRetentionPolicy(database, name string) error\n\tDropSubscription(database, rp, name string) error\n\tDropUser(name string) error\n\tRetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error)\n\tSetAdminPrivilege(username string, admin bool) error\n\tSetPrivilege(username, database string, p influxql.Privilege) error\n\tShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error)\n\tUpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error\n\tUpdateUser(name, password string) error\n\tUserPrivilege(username, database string) (*influxql.Privilege, error)\n\tUserPrivileges(username string) (map[string]influxql.Privilege, error)\n\tUsers() []meta.UserInfo\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/coordinator/meta_client_test.go",
    "content": "package coordinator_test\n\nimport (\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n)\n\n// MetaClient is a mockable implementation of cluster.MetaClient.\ntype MetaClient struct {\n\tCreateContinuousQueryFn             func(database, name, query string) error\n\tCreateDatabaseFn                    func(name string) (*meta.DatabaseInfo, error)\n\tCreateDatabaseWithRetentionPolicyFn func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error)\n\tCreateRetentionPolicyFn             func(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error)\n\tCreateSubscriptionFn                func(database, rp, name, mode string, destinations []string) error\n\tCreateUserFn                        func(name, password string, admin bool) (meta.User, error)\n\tDatabaseFn                          func(name string) *meta.DatabaseInfo\n\tDatabasesFn                         func() []meta.DatabaseInfo\n\tDataNodeFn                          func(id uint64) (*meta.NodeInfo, error)\n\tDataNodesFn                         func() ([]meta.NodeInfo, error)\n\tDeleteDataNodeFn                    func(id uint64) error\n\tDeleteMetaNodeFn                    func(id uint64) error\n\tDropContinuousQueryFn               func(database, name string) error\n\tDropDatabaseFn                      func(name string) error\n\tDropRetentionPolicyFn               func(database, name string) error\n\tDropSubscriptionFn                  func(database, rp, name string) error\n\tDropShardFn                         func(id uint64) error\n\tDropUserFn                          func(name string) error\n\tMetaNodesFn                         func() ([]meta.NodeInfo, error)\n\tRetentionPolicyFn                   func(database, name string) (rpi *meta.RetentionPolicyInfo, err error)\n\tSetAdminPrivilegeFn                 func(username string, admin bool) error\n\tSetPrivilegeFn                      func(username, database string, p influxql.Privilege) error\n\tShardGroupsByTimeRangeFn            func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error)\n\tUpdateRetentionPolicyFn             func(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error\n\tUpdateUserFn                        func(name, password string) error\n\tUserPrivilegeFn                     func(username, database string) (*influxql.Privilege, error)\n\tUserPrivilegesFn                    func(username string) (map[string]influxql.Privilege, error)\n\tUsersFn                             func() []meta.UserInfo\n}\n\nfunc (c *MetaClient) CreateContinuousQuery(database, name, query string) error {\n\treturn c.CreateContinuousQueryFn(database, name, query)\n}\n\nfunc (c *MetaClient) CreateDatabase(name string) (*meta.DatabaseInfo, error) {\n\treturn c.CreateDatabaseFn(name)\n}\n\nfunc (c *MetaClient) CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) {\n\treturn c.CreateDatabaseWithRetentionPolicyFn(name, spec)\n}\n\nfunc (c *MetaClient) CreateRetentionPolicy(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error) {\n\treturn c.CreateRetentionPolicyFn(database, spec, makeDefault)\n}\n\nfunc (c *MetaClient) DropShard(id uint64) error {\n\treturn c.DropShardFn(id)\n}\n\nfunc (c *MetaClient) CreateSubscription(database, rp, name, mode string, destinations []string) error {\n\treturn c.CreateSubscriptionFn(database, rp, name, mode, destinations)\n}\n\nfunc (c *MetaClient) CreateUser(name, password string, admin bool) (meta.User, error) {\n\treturn c.CreateUserFn(name, password, admin)\n}\n\nfunc (c *MetaClient) Database(name string) *meta.DatabaseInfo {\n\treturn c.DatabaseFn(name)\n}\n\nfunc (c *MetaClient) Databases() []meta.DatabaseInfo {\n\treturn c.DatabasesFn()\n}\n\nfunc (c *MetaClient) DataNode(id uint64) (*meta.NodeInfo, error) {\n\treturn c.DataNodeFn(id)\n}\n\nfunc (c *MetaClient) DataNodes() ([]meta.NodeInfo, error) {\n\treturn c.DataNodesFn()\n}\n\nfunc (c *MetaClient) DeleteDataNode(id uint64) error {\n\treturn c.DeleteDataNodeFn(id)\n}\n\nfunc (c *MetaClient) DeleteMetaNode(id uint64) error {\n\treturn c.DeleteMetaNodeFn(id)\n}\n\nfunc (c *MetaClient) DropContinuousQuery(database, name string) error {\n\treturn c.DropContinuousQueryFn(database, name)\n}\n\nfunc (c *MetaClient) DropDatabase(name string) error {\n\treturn c.DropDatabaseFn(name)\n}\n\nfunc (c *MetaClient) DropRetentionPolicy(database, name string) error {\n\treturn c.DropRetentionPolicyFn(database, name)\n}\n\nfunc (c *MetaClient) DropSubscription(database, rp, name string) error {\n\treturn c.DropSubscriptionFn(database, rp, name)\n}\n\nfunc (c *MetaClient) DropUser(name string) error {\n\treturn c.DropUserFn(name)\n}\n\nfunc (c *MetaClient) MetaNodes() ([]meta.NodeInfo, error) {\n\treturn c.MetaNodesFn()\n}\n\nfunc (c *MetaClient) RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) {\n\treturn c.RetentionPolicyFn(database, name)\n}\n\nfunc (c *MetaClient) SetAdminPrivilege(username string, admin bool) error {\n\treturn c.SetAdminPrivilegeFn(username, admin)\n}\n\nfunc (c *MetaClient) SetPrivilege(username, database string, p influxql.Privilege) error {\n\treturn c.SetPrivilegeFn(username, database, p)\n}\n\nfunc (c *MetaClient) ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) {\n\treturn c.ShardGroupsByTimeRangeFn(database, policy, min, max)\n}\n\nfunc (c *MetaClient) UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error {\n\treturn c.UpdateRetentionPolicyFn(database, name, rpu, makeDefault)\n}\n\nfunc (c *MetaClient) UpdateUser(name, password string) error {\n\treturn c.UpdateUserFn(name, password)\n}\n\nfunc (c *MetaClient) UserPrivilege(username, database string) (*influxql.Privilege, error) {\n\treturn c.UserPrivilegeFn(username, database)\n}\n\nfunc (c *MetaClient) UserPrivileges(username string) (map[string]influxql.Privilege, error) {\n\treturn c.UserPrivilegesFn(username)\n}\n\nfunc (c *MetaClient) Users() []meta.UserInfo {\n\treturn c.UsersFn()\n}\n\n// DefaultMetaClientDatabaseFn returns a single database (db0) with a retention policy.\nfunc DefaultMetaClientDatabaseFn(name string) *meta.DatabaseInfo {\n\treturn &meta.DatabaseInfo{\n\t\tName: DefaultDatabase,\n\t\tDefaultRetentionPolicy: DefaultRetentionPolicy,\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/coordinator/points_writer.go",
    "content": "package coordinator\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n\t\"github.com/uber-go/zap\"\n)\n\n// The keys for statistics generated by the \"write\" module.\nconst (\n\tstatWriteReq           = \"req\"\n\tstatPointWriteReq      = \"pointReq\"\n\tstatPointWriteReqLocal = \"pointReqLocal\"\n\tstatWriteOK            = \"writeOk\"\n\tstatWriteDrop          = \"writeDrop\"\n\tstatWriteTimeout       = \"writeTimeout\"\n\tstatWriteErr           = \"writeError\"\n\tstatSubWriteOK         = \"subWriteOk\"\n\tstatSubWriteDrop       = \"subWriteDrop\"\n)\n\nvar (\n\t// ErrTimeout is returned when a write times out.\n\tErrTimeout = errors.New(\"timeout\")\n\n\t// ErrPartialWrite is returned when a write partially succeeds but does\n\t// not meet the requested consistency level.\n\tErrPartialWrite = errors.New(\"partial write\")\n\n\t// ErrWriteFailed is returned when no writes succeeded.\n\tErrWriteFailed = errors.New(\"write failed\")\n)\n\n// PointsWriter handles writes across multiple local and remote data nodes.\ntype PointsWriter struct {\n\tmu           sync.RWMutex\n\tclosing      chan struct{}\n\tWriteTimeout time.Duration\n\tLogger       zap.Logger\n\n\tNode *influxdb.Node\n\n\tMetaClient interface {\n\t\tDatabase(name string) (di *meta.DatabaseInfo)\n\t\tRetentionPolicy(database, policy string) (*meta.RetentionPolicyInfo, error)\n\t\tCreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error)\n\t}\n\n\tTSDBStore interface {\n\t\tCreateShard(database, retentionPolicy string, shardID uint64, enabled bool) error\n\t\tWriteToShard(shardID uint64, points []models.Point) error\n\t}\n\n\tSubscriber interface {\n\t\tPoints() chan<- *WritePointsRequest\n\t}\n\tsubPoints chan<- *WritePointsRequest\n\n\tstats *WriteStatistics\n}\n\n// WritePointsRequest represents a request to write point data to the cluster.\ntype WritePointsRequest struct {\n\tDatabase        string\n\tRetentionPolicy string\n\tPoints          []models.Point\n}\n\n// AddPoint adds a point to the WritePointRequest with field key 'value'\nfunc (w *WritePointsRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) {\n\tpt, err := models.NewPoint(\n\t\tname, models.NewTags(tags), map[string]interface{}{\"value\": value}, timestamp,\n\t)\n\tif err != nil {\n\t\treturn\n\t}\n\tw.Points = append(w.Points, pt)\n}\n\n// NewPointsWriter returns a new instance of PointsWriter for a node.\nfunc NewPointsWriter() *PointsWriter {\n\treturn &PointsWriter{\n\t\tclosing:      make(chan struct{}),\n\t\tWriteTimeout: DefaultWriteTimeout,\n\t\tLogger:       zap.New(zap.NullEncoder()),\n\t\tstats:        &WriteStatistics{},\n\t}\n}\n\n// ShardMapping contains a mapping of shards to points.\ntype ShardMapping struct {\n\tn       int\n\tPoints  map[uint64][]models.Point  // The points associated with a shard ID\n\tShards  map[uint64]*meta.ShardInfo // The shards that have been mapped, keyed by shard ID\n\tDropped []models.Point             // Points that were dropped\n}\n\n// NewShardMapping creates an empty ShardMapping.\nfunc NewShardMapping(n int) *ShardMapping {\n\treturn &ShardMapping{\n\t\tn:      n,\n\t\tPoints: map[uint64][]models.Point{},\n\t\tShards: map[uint64]*meta.ShardInfo{},\n\t}\n}\n\n// MapPoint adds the point to the ShardMapping, associated with the given shardInfo.\nfunc (s *ShardMapping) MapPoint(shardInfo *meta.ShardInfo, p models.Point) {\n\tif cap(s.Points[shardInfo.ID]) < s.n {\n\t\ts.Points[shardInfo.ID] = make([]models.Point, 0, s.n)\n\t}\n\ts.Points[shardInfo.ID] = append(s.Points[shardInfo.ID], p)\n\ts.Shards[shardInfo.ID] = shardInfo\n}\n\n// Open opens the communication channel with the point writer.\nfunc (w *PointsWriter) Open() error {\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\tw.closing = make(chan struct{})\n\tif w.Subscriber != nil {\n\t\tw.subPoints = w.Subscriber.Points()\n\t}\n\treturn nil\n}\n\n// Close closes the communication channel with the point writer.\nfunc (w *PointsWriter) Close() error {\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\tif w.closing != nil {\n\t\tclose(w.closing)\n\t}\n\tif w.subPoints != nil {\n\t\t// 'nil' channels always block so this makes the\n\t\t// select statement in WritePoints hit its default case\n\t\t// dropping any in-flight writes.\n\t\tw.subPoints = nil\n\t}\n\treturn nil\n}\n\n// WithLogger sets the Logger on w.\nfunc (w *PointsWriter) WithLogger(log zap.Logger) {\n\tw.Logger = log.With(zap.String(\"service\", \"write\"))\n}\n\n// WriteStatistics keeps statistics related to the PointsWriter.\ntype WriteStatistics struct {\n\tWriteReq           int64\n\tPointWriteReq      int64\n\tPointWriteReqLocal int64\n\tWriteOK            int64\n\tWriteDropped       int64\n\tWriteTimeout       int64\n\tWriteErr           int64\n\tSubWriteOK         int64\n\tSubWriteDrop       int64\n}\n\n// Statistics returns statistics for periodic monitoring.\nfunc (w *PointsWriter) Statistics(tags map[string]string) []models.Statistic {\n\treturn []models.Statistic{{\n\t\tName: \"write\",\n\t\tTags: tags,\n\t\tValues: map[string]interface{}{\n\t\t\tstatWriteReq:           atomic.LoadInt64(&w.stats.WriteReq),\n\t\t\tstatPointWriteReq:      atomic.LoadInt64(&w.stats.PointWriteReq),\n\t\t\tstatPointWriteReqLocal: atomic.LoadInt64(&w.stats.PointWriteReqLocal),\n\t\t\tstatWriteOK:            atomic.LoadInt64(&w.stats.WriteOK),\n\t\t\tstatWriteDrop:          atomic.LoadInt64(&w.stats.WriteDropped),\n\t\t\tstatWriteTimeout:       atomic.LoadInt64(&w.stats.WriteTimeout),\n\t\t\tstatWriteErr:           atomic.LoadInt64(&w.stats.WriteErr),\n\t\t\tstatSubWriteOK:         atomic.LoadInt64(&w.stats.SubWriteOK),\n\t\t\tstatSubWriteDrop:       atomic.LoadInt64(&w.stats.SubWriteDrop),\n\t\t},\n\t}}\n}\n\n// MapShards maps the points contained in wp to a ShardMapping.  If a point\n// maps to a shard group or shard that does not currently exist, it will be\n// created before returning the mapping.\nfunc (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error) {\n\trp, err := w.MetaClient.RetentionPolicy(wp.Database, wp.RetentionPolicy)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if rp == nil {\n\t\treturn nil, influxdb.ErrRetentionPolicyNotFound(wp.RetentionPolicy)\n\t}\n\n\t// Holds all the shard groups and shards that are required for writes.\n\tlist := make(sgList, 0, 8)\n\tmin := time.Unix(0, models.MinNanoTime)\n\tif rp.Duration > 0 {\n\t\tmin = time.Now().Add(-rp.Duration)\n\t}\n\n\tfor _, p := range wp.Points {\n\t\t// Either the point is outside the scope of the RP, or we already have\n\t\t// a suitable shard group for the point.\n\t\tif p.Time().Before(min) || list.Covers(p.Time()) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// No shard groups overlap with the point's time, so we will create\n\t\t// a new shard group for this point.\n\t\tsg, err := w.MetaClient.CreateShardGroup(wp.Database, wp.RetentionPolicy, p.Time())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif sg == nil {\n\t\t\treturn nil, errors.New(\"nil shard group\")\n\t\t}\n\t\tlist = list.Append(*sg)\n\t}\n\n\tmapping := NewShardMapping(len(wp.Points))\n\tfor _, p := range wp.Points {\n\t\tsg := list.ShardGroupAt(p.Time())\n\t\tif sg == nil {\n\t\t\t// We didn't create a shard group because the point was outside the\n\t\t\t// scope of the RP.\n\t\t\tmapping.Dropped = append(mapping.Dropped, p)\n\t\t\tatomic.AddInt64(&w.stats.WriteDropped, 1)\n\t\t\tcontinue\n\t\t}\n\n\t\tsh := sg.ShardFor(p.HashID())\n\t\tmapping.MapPoint(&sh, p)\n\t}\n\treturn mapping, nil\n}\n\n// sgList is a wrapper around a meta.ShardGroupInfos where we can also check\n// if a given time is covered by any of the shard groups in the list.\ntype sgList meta.ShardGroupInfos\n\nfunc (l sgList) Covers(t time.Time) bool {\n\tif len(l) == 0 {\n\t\treturn false\n\t}\n\treturn l.ShardGroupAt(t) != nil\n}\n\n// ShardGroupAt attempts to find a shard group that could contain a point\n// at the given time.\n//\n// Shard groups are sorted first according to end time, and then according\n// to start time. Therefore, if there are multiple shard groups that match\n// this point's time they will be preferred in this order:\n//\n//  - a shard group with the earliest end time;\n//  - (assuming identical end times) the shard group with the earliest start time.\nfunc (l sgList) ShardGroupAt(t time.Time) *meta.ShardGroupInfo {\n\tidx := sort.Search(len(l), func(i int) bool { return l[i].EndTime.After(t) })\n\n\t// We couldn't find a shard group the point falls into.\n\tif idx == len(l) || t.Before(l[idx].StartTime) {\n\t\treturn nil\n\t}\n\treturn &l[idx]\n}\n\n// Append appends a shard group to the list, and returns a sorted list.\nfunc (l sgList) Append(sgi meta.ShardGroupInfo) sgList {\n\tnext := append(l, sgi)\n\tsort.Sort(meta.ShardGroupInfos(next))\n\treturn next\n}\n\n// WritePointsInto is a copy of WritePoints that uses a tsdb structure instead of\n// a cluster structure for information. This is to avoid a circular dependency.\nfunc (w *PointsWriter) WritePointsInto(p *IntoWriteRequest) error {\n\treturn w.WritePointsPrivileged(p.Database, p.RetentionPolicy, models.ConsistencyLevelOne, p.Points)\n}\n\n// WritePoints writes the data to the underlying storage. consitencyLevel and user are only used for clustered scenarios\nfunc (w *PointsWriter) WritePoints(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, user meta.User, points []models.Point) error {\n\treturn w.WritePointsPrivileged(database, retentionPolicy, consistencyLevel, points)\n}\n\n// WritePointsPrivileged writes the data to the underlying storage, consitencyLevel is only used for clustered scenarios\nfunc (w *PointsWriter) WritePointsPrivileged(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error {\n\tatomic.AddInt64(&w.stats.WriteReq, 1)\n\tatomic.AddInt64(&w.stats.PointWriteReq, int64(len(points)))\n\n\tif retentionPolicy == \"\" {\n\t\tdb := w.MetaClient.Database(database)\n\t\tif db == nil {\n\t\t\treturn influxdb.ErrDatabaseNotFound(database)\n\t\t}\n\t\tretentionPolicy = db.DefaultRetentionPolicy\n\t}\n\n\tshardMappings, err := w.MapShards(&WritePointsRequest{Database: database, RetentionPolicy: retentionPolicy, Points: points})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Write each shard in it's own goroutine and return as soon as one fails.\n\tch := make(chan error, len(shardMappings.Points))\n\tfor shardID, points := range shardMappings.Points {\n\t\tgo func(shard *meta.ShardInfo, database, retentionPolicy string, points []models.Point) {\n\t\t\tch <- w.writeToShard(shard, database, retentionPolicy, points)\n\t\t}(shardMappings.Shards[shardID], database, retentionPolicy, points)\n\t}\n\n\t// Send points to subscriptions if possible.\n\tok := false\n\t// We need to lock just in case the channel is about to be nil'ed\n\tw.mu.RLock()\n\tselect {\n\tcase w.subPoints <- &WritePointsRequest{Database: database, RetentionPolicy: retentionPolicy, Points: points}:\n\t\tok = true\n\tdefault:\n\t}\n\tw.mu.RUnlock()\n\tif ok {\n\t\tatomic.AddInt64(&w.stats.SubWriteOK, 1)\n\t} else {\n\t\tatomic.AddInt64(&w.stats.SubWriteDrop, 1)\n\t}\n\n\tif err == nil && len(shardMappings.Dropped) > 0 {\n\t\terr = tsdb.PartialWriteError{Reason: \"points beyond retention policy\", Dropped: len(shardMappings.Dropped)}\n\n\t}\n\ttimeout := time.NewTimer(w.WriteTimeout)\n\tdefer timeout.Stop()\n\tfor range shardMappings.Points {\n\t\tselect {\n\t\tcase <-w.closing:\n\t\t\treturn ErrWriteFailed\n\t\tcase <-timeout.C:\n\t\t\tatomic.AddInt64(&w.stats.WriteTimeout, 1)\n\t\t\t// return timeout error to caller\n\t\t\treturn ErrTimeout\n\t\tcase err := <-ch:\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn err\n}\n\n// writeToShards writes points to a shard.\nfunc (w *PointsWriter) writeToShard(shard *meta.ShardInfo, database, retentionPolicy string, points []models.Point) error {\n\tatomic.AddInt64(&w.stats.PointWriteReqLocal, int64(len(points)))\n\n\terr := w.TSDBStore.WriteToShard(shard.ID, points)\n\tif err == nil {\n\t\tatomic.AddInt64(&w.stats.WriteOK, 1)\n\t\treturn nil\n\t}\n\n\t// If this is a partial write error, that is also ok.\n\tif _, ok := err.(tsdb.PartialWriteError); ok {\n\t\tatomic.AddInt64(&w.stats.WriteErr, 1)\n\t\treturn err\n\t}\n\n\t// If we've written to shard that should exist on the current node, but the store has\n\t// not actually created this shard, tell it to create it and retry the write\n\tif err == tsdb.ErrShardNotFound {\n\t\terr = w.TSDBStore.CreateShard(database, retentionPolicy, shard.ID, true)\n\t\tif err != nil {\n\t\t\tw.Logger.Info(fmt.Sprintf(\"write failed for shard %d: %v\", shard.ID, err))\n\n\t\t\tatomic.AddInt64(&w.stats.WriteErr, 1)\n\t\t\treturn err\n\t\t}\n\t}\n\terr = w.TSDBStore.WriteToShard(shard.ID, points)\n\tif err != nil {\n\t\tw.Logger.Info(fmt.Sprintf(\"write failed for shard %d: %v\", shard.ID, err))\n\t\tatomic.AddInt64(&w.stats.WriteErr, 1)\n\t\treturn err\n\t}\n\n\tatomic.AddInt64(&w.stats.WriteOK, 1)\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/coordinator/points_writer_internal_test.go",
    "content": "package coordinator\n\nimport (\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestSgList_ShardGroupAt(t *testing.T) {\n\tbase := time.Date(2016, 10, 19, 0, 0, 0, 0, time.UTC)\n\tday := func(n int) time.Time {\n\t\treturn base.Add(time.Duration(24*n) * time.Hour)\n\t}\n\n\tlist := sgList{\n\t\t{ID: 1, StartTime: day(0), EndTime: day(1)},\n\t\t{ID: 2, StartTime: day(1), EndTime: day(2)},\n\t\t{ID: 3, StartTime: day(2), EndTime: day(3)},\n\t\t// SG day 3 to day 4 missing...\n\t\t{ID: 4, StartTime: day(4), EndTime: day(5)},\n\t\t{ID: 5, StartTime: day(5), EndTime: day(6)},\n\t}\n\n\texamples := []struct {\n\t\tT            time.Time\n\t\tShardGroupID uint64 // 0 will indicate we don't expect a shard group\n\t}{\n\t\t{T: base.Add(-time.Minute), ShardGroupID: 0}, // Before any SG\n\t\t{T: day(0), ShardGroupID: 1},\n\t\t{T: day(0).Add(time.Minute), ShardGroupID: 1},\n\t\t{T: day(1), ShardGroupID: 2},\n\t\t{T: day(3).Add(time.Minute), ShardGroupID: 0}, // No matching SG\n\t\t{T: day(5).Add(time.Hour), ShardGroupID: 5},\n\t}\n\n\tfor i, example := range examples {\n\t\tsg := list.ShardGroupAt(example.T)\n\t\tvar id uint64\n\t\tif sg != nil {\n\t\t\tid = sg.ID\n\t\t}\n\n\t\tif got, exp := id, example.ShardGroupID; got != exp {\n\t\t\tt.Errorf(\"[Example %d] got %v, expected %v\", i+1, got, exp)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/coordinator/points_writer_test.go",
    "content": "package coordinator_test\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb\"\n\t\"github.com/influxdata/influxdb/coordinator\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n)\n\n// TODO(benbjohnson): Rewrite tests to use cluster_test.MetaClient.\n\n// Ensures the points writer maps a single point to a single shard.\nfunc TestPointsWriter_MapShards_One(t *testing.T) {\n\tms := PointsWriterMetaClient{}\n\trp := NewRetentionPolicy(\"myp\", time.Hour, 3)\n\n\tms.NodeIDFn = func() uint64 { return 1 }\n\tms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) {\n\t\treturn rp, nil\n\t}\n\n\tms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) {\n\t\treturn &rp.ShardGroups[0], nil\n\t}\n\n\tc := coordinator.PointsWriter{MetaClient: ms}\n\tpr := &coordinator.WritePointsRequest{\n\t\tDatabase:        \"mydb\",\n\t\tRetentionPolicy: \"myrp\",\n\t}\n\tpr.AddPoint(\"cpu\", 1.0, time.Now(), nil)\n\n\tvar (\n\t\tshardMappings *coordinator.ShardMapping\n\t\terr           error\n\t)\n\tif shardMappings, err = c.MapShards(pr); err != nil {\n\t\tt.Fatalf(\"unexpected an error: %v\", err)\n\t}\n\n\tif exp := 1; len(shardMappings.Points) != exp {\n\t\tt.Errorf(\"MapShards() len mismatch. got %v, exp %v\", len(shardMappings.Points), exp)\n\t}\n}\n\n// Ensures the points writer maps to a new shard group when the shard duration\n// is changed.\nfunc TestPointsWriter_MapShards_AlterShardDuration(t *testing.T) {\n\tms := PointsWriterMetaClient{}\n\trp := NewRetentionPolicy(\"myp\", time.Hour, 3)\n\n\tms.NodeIDFn = func() uint64 { return 1 }\n\tms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) {\n\t\treturn rp, nil\n\t}\n\n\tvar (\n\t\ti   int\n\t\tnow = time.Now()\n\t)\n\n\tms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) {\n\t\tsg := []meta.ShardGroupInfo{\n\t\t\tmeta.ShardGroupInfo{\n\t\t\t\tShards:    make([]meta.ShardInfo, 1),\n\t\t\t\tStartTime: now, EndTime: now.Add(rp.Duration).Add(-1),\n\t\t\t},\n\t\t\tmeta.ShardGroupInfo{\n\t\t\t\tShards:    make([]meta.ShardInfo, 1),\n\t\t\t\tStartTime: now.Add(time.Hour), EndTime: now.Add(3 * time.Hour).Add(rp.Duration).Add(-1),\n\t\t\t},\n\t\t}[i]\n\t\ti++\n\t\treturn &sg, nil\n\t}\n\n\tc := coordinator.NewPointsWriter()\n\tc.MetaClient = ms\n\n\tpr := &coordinator.WritePointsRequest{\n\t\tDatabase:        \"mydb\",\n\t\tRetentionPolicy: \"myrp\",\n\t}\n\tpr.AddPoint(\"cpu\", 1.0, now, nil)\n\tpr.AddPoint(\"cpu\", 2.0, now.Add(2*time.Second), nil)\n\n\tvar (\n\t\tshardMappings *coordinator.ShardMapping\n\t\terr           error\n\t)\n\tif shardMappings, err = c.MapShards(pr); err != nil {\n\t\tt.Fatalf(\"unexpected an error: %v\", err)\n\t}\n\n\tif got, exp := len(shardMappings.Points[0]), 2; got != exp {\n\t\tt.Fatalf(\"got %d point(s), expected %d\", got, exp)\n\t}\n\n\tif got, exp := len(shardMappings.Shards), 1; got != exp {\n\t\tt.Errorf(\"got %d shard(s), expected %d\", got, exp)\n\t}\n\n\t// Now we alter the retention policy duration.\n\trp.ShardGroupDuration = 3 * time.Hour\n\n\tpr = &coordinator.WritePointsRequest{\n\t\tDatabase:        \"mydb\",\n\t\tRetentionPolicy: \"myrp\",\n\t}\n\tpr.AddPoint(\"cpu\", 1.0, now.Add(2*time.Hour), nil)\n\n\t// Point is beyond previous shard group so a new shard group should be\n\t// created.\n\tif shardMappings, err = c.MapShards(pr); err != nil {\n\t\tt.Fatalf(\"unexpected an error: %v\", err)\n\t}\n\n\t// We can check value of i since it's only incremeneted when a shard group\n\t// is created.\n\tif got, exp := i, 2; got != exp {\n\t\tt.Fatal(\"new shard group was not created, expected it to be\")\n\t}\n}\n\n// Ensures the points writer maps a multiple points across shard group boundaries.\nfunc TestPointsWriter_MapShards_Multiple(t *testing.T) {\n\tms := PointsWriterMetaClient{}\n\trp := NewRetentionPolicy(\"myp\", time.Hour, 3)\n\trp.ShardGroupDuration = time.Hour\n\tAttachShardGroupInfo(rp, []meta.ShardOwner{\n\t\t{NodeID: 1},\n\t\t{NodeID: 2},\n\t\t{NodeID: 3},\n\t})\n\tAttachShardGroupInfo(rp, []meta.ShardOwner{\n\t\t{NodeID: 1},\n\t\t{NodeID: 2},\n\t\t{NodeID: 3},\n\t})\n\n\tms.NodeIDFn = func() uint64 { return 1 }\n\tms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) {\n\t\treturn rp, nil\n\t}\n\n\tms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) {\n\t\tfor i, sg := range rp.ShardGroups {\n\t\t\tif timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) {\n\t\t\t\treturn &rp.ShardGroups[i], nil\n\t\t\t}\n\t\t}\n\t\tpanic(\"should not get here\")\n\t}\n\n\tc := coordinator.NewPointsWriter()\n\tc.MetaClient = ms\n\tdefer c.Close()\n\tpr := &coordinator.WritePointsRequest{\n\t\tDatabase:        \"mydb\",\n\t\tRetentionPolicy: \"myrp\",\n\t}\n\n\t// Three points that range over the shardGroup duration (1h) and should map to two\n\t// distinct shards\n\tpr.AddPoint(\"cpu\", 1.0, time.Now(), nil)\n\tpr.AddPoint(\"cpu\", 2.0, time.Now().Add(time.Hour), nil)\n\tpr.AddPoint(\"cpu\", 3.0, time.Now().Add(time.Hour+time.Second), nil)\n\n\tvar (\n\t\tshardMappings *coordinator.ShardMapping\n\t\terr           error\n\t)\n\tif shardMappings, err = c.MapShards(pr); err != nil {\n\t\tt.Fatalf(\"unexpected an error: %v\", err)\n\t}\n\n\tif exp := 2; len(shardMappings.Points) != exp {\n\t\tt.Errorf(\"MapShards() len mismatch. got %v, exp %v\", len(shardMappings.Points), exp)\n\t}\n\n\tfor _, points := range shardMappings.Points {\n\t\t// First shard should have 1 point w/ first point added\n\t\tif len(points) == 1 && points[0].Time() != pr.Points[0].Time() {\n\t\t\tt.Fatalf(\"MapShards() value mismatch. got %v, exp %v\", points[0].Time(), pr.Points[0].Time())\n\t\t}\n\n\t\t// Second shard should have the last two points added\n\t\tif len(points) == 2 && points[0].Time() != pr.Points[1].Time() {\n\t\t\tt.Fatalf(\"MapShards() value mismatch. got %v, exp %v\", points[0].Time(), pr.Points[1].Time())\n\t\t}\n\n\t\tif len(points) == 2 && points[1].Time() != pr.Points[2].Time() {\n\t\t\tt.Fatalf(\"MapShards() value mismatch. got %v, exp %v\", points[1].Time(), pr.Points[2].Time())\n\t\t}\n\t}\n}\n\n// Ensures the points writer does not map points beyond the retention policy.\nfunc TestPointsWriter_MapShards_Invalid(t *testing.T) {\n\tms := PointsWriterMetaClient{}\n\trp := NewRetentionPolicy(\"myp\", time.Hour, 3)\n\n\tms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) {\n\t\treturn rp, nil\n\t}\n\n\tms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) {\n\t\treturn &rp.ShardGroups[0], nil\n\t}\n\n\tc := coordinator.NewPointsWriter()\n\tc.MetaClient = ms\n\tdefer c.Close()\n\tpr := &coordinator.WritePointsRequest{\n\t\tDatabase:        \"mydb\",\n\t\tRetentionPolicy: \"myrp\",\n\t}\n\n\t// Add a point that goes beyond the current retention policy.\n\tpr.AddPoint(\"cpu\", 1.0, time.Now().Add(-2*time.Hour), nil)\n\n\tvar (\n\t\tshardMappings *coordinator.ShardMapping\n\t\terr           error\n\t)\n\tif shardMappings, err = c.MapShards(pr); err != nil {\n\t\tt.Fatalf(\"unexpected an error: %v\", err)\n\t}\n\n\tif got, exp := len(shardMappings.Points), 0; got != exp {\n\t\tt.Errorf(\"MapShards() len mismatch. got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := len(shardMappings.Dropped), 1; got != exp {\n\t\tt.Fatalf(\"MapShard() dropped mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestPointsWriter_WritePoints(t *testing.T) {\n\ttests := []struct {\n\t\tname            string\n\t\tdatabase        string\n\t\tretentionPolicy string\n\n\t\t// the responses returned by each shard write call.  node ID 1 = pos 0\n\t\terr    []error\n\t\texpErr error\n\t}{\n\t\t{\n\t\t\tname:            \"write one success\",\n\t\t\tdatabase:        \"mydb\",\n\t\t\tretentionPolicy: \"myrp\",\n\t\t\terr:             []error{nil, nil, nil},\n\t\t\texpErr:          nil,\n\t\t},\n\n\t\t// Write to non-existent database\n\t\t{\n\t\t\tname:            \"write to non-existent database\",\n\t\t\tdatabase:        \"doesnt_exist\",\n\t\t\tretentionPolicy: \"\",\n\t\t\terr:             []error{nil, nil, nil},\n\t\t\texpErr:          fmt.Errorf(\"database not found: doesnt_exist\"),\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\n\t\tpr := &coordinator.WritePointsRequest{\n\t\t\tDatabase:        test.database,\n\t\t\tRetentionPolicy: test.retentionPolicy,\n\t\t}\n\n\t\t// Ensure that the test shard groups are created before the points\n\t\t// are created.\n\t\tms := NewPointsWriterMetaClient()\n\n\t\t// Three points that range over the shardGroup duration (1h) and should map to two\n\t\t// distinct shards\n\t\tpr.AddPoint(\"cpu\", 1.0, time.Now(), nil)\n\t\tpr.AddPoint(\"cpu\", 2.0, time.Now().Add(time.Hour), nil)\n\t\tpr.AddPoint(\"cpu\", 3.0, time.Now().Add(time.Hour+time.Second), nil)\n\n\t\t// copy to prevent data race\n\t\ttheTest := test\n\t\tsm := coordinator.NewShardMapping(16)\n\t\tsm.MapPoint(\n\t\t\t&meta.ShardInfo{ID: uint64(1), Owners: []meta.ShardOwner{\n\t\t\t\t{NodeID: 1},\n\t\t\t\t{NodeID: 2},\n\t\t\t\t{NodeID: 3},\n\t\t\t}},\n\t\t\tpr.Points[0])\n\t\tsm.MapPoint(\n\t\t\t&meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{\n\t\t\t\t{NodeID: 1},\n\t\t\t\t{NodeID: 2},\n\t\t\t\t{NodeID: 3},\n\t\t\t}},\n\t\t\tpr.Points[1])\n\t\tsm.MapPoint(\n\t\t\t&meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{\n\t\t\t\t{NodeID: 1},\n\t\t\t\t{NodeID: 2},\n\t\t\t\t{NodeID: 3},\n\t\t\t}},\n\t\t\tpr.Points[2])\n\n\t\t// Local coordinator.Node ShardWriter\n\t\t// lock on the write increment since these functions get called in parallel\n\t\tvar mu sync.Mutex\n\n\t\tstore := &fakeStore{\n\t\t\tWriteFn: func(shardID uint64, points []models.Point) error {\n\t\t\t\tmu.Lock()\n\t\t\t\tdefer mu.Unlock()\n\t\t\t\treturn theTest.err[0]\n\t\t\t},\n\t\t}\n\n\t\tms.DatabaseFn = func(database string) *meta.DatabaseInfo {\n\t\t\treturn nil\n\t\t}\n\t\tms.NodeIDFn = func() uint64 { return 1 }\n\n\t\tsubPoints := make(chan *coordinator.WritePointsRequest, 1)\n\t\tsub := Subscriber{}\n\t\tsub.PointsFn = func() chan<- *coordinator.WritePointsRequest {\n\t\t\treturn subPoints\n\t\t}\n\n\t\tc := coordinator.NewPointsWriter()\n\t\tc.MetaClient = ms\n\t\tc.TSDBStore = store\n\t\tc.Subscriber = sub\n\t\tc.Node = &influxdb.Node{ID: 1}\n\n\t\tc.Open()\n\t\tdefer c.Close()\n\n\t\terr := c.WritePointsPrivileged(pr.Database, pr.RetentionPolicy, models.ConsistencyLevelOne, pr.Points)\n\t\tif err == nil && test.expErr != nil {\n\t\t\tt.Errorf(\"PointsWriter.WritePointsPrivileged(): '%s' error: got %v, exp %v\", test.name, err, test.expErr)\n\t\t}\n\n\t\tif err != nil && test.expErr == nil {\n\t\t\tt.Errorf(\"PointsWriter.WritePointsPrivileged(): '%s' error: got %v, exp %v\", test.name, err, test.expErr)\n\t\t}\n\t\tif err != nil && test.expErr != nil && err.Error() != test.expErr.Error() {\n\t\t\tt.Errorf(\"PointsWriter.WritePointsPrivileged(): '%s' error: got %v, exp %v\", test.name, err, test.expErr)\n\t\t}\n\t\tif test.expErr == nil {\n\t\t\tselect {\n\t\t\tcase p := <-subPoints:\n\t\t\t\tif !reflect.DeepEqual(p, pr) {\n\t\t\t\t\tt.Errorf(\"PointsWriter.WritePointsPrivileged(): '%s' error: unexpected WritePointsRequest got %v, exp %v\", test.name, p, pr)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tt.Errorf(\"PointsWriter.WritePointsPrivileged(): '%s' error: Subscriber.Points not called\", test.name)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestPointsWriter_WritePoints_Dropped(t *testing.T) {\n\tpr := &coordinator.WritePointsRequest{\n\t\tDatabase:        \"mydb\",\n\t\tRetentionPolicy: \"myrp\",\n\t}\n\n\t// Ensure that the test shard groups are created before the points\n\t// are created.\n\tms := NewPointsWriterMetaClient()\n\n\t// Three points that range over the shardGroup duration (1h) and should map to two\n\t// distinct shards\n\tpr.AddPoint(\"cpu\", 1.0, time.Now().Add(-24*time.Hour), nil)\n\n\t// copy to prevent data race\n\tsm := coordinator.NewShardMapping(16)\n\n\t// ShardMapper dropped this point\n\tsm.Dropped = append(sm.Dropped, pr.Points[0])\n\n\t// Local coordinator.Node ShardWriter\n\t// lock on the write increment since these functions get called in parallel\n\tvar mu sync.Mutex\n\n\tstore := &fakeStore{\n\t\tWriteFn: func(shardID uint64, points []models.Point) error {\n\t\t\tmu.Lock()\n\t\t\tdefer mu.Unlock()\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tms.DatabaseFn = func(database string) *meta.DatabaseInfo {\n\t\treturn nil\n\t}\n\tms.NodeIDFn = func() uint64 { return 1 }\n\n\tsubPoints := make(chan *coordinator.WritePointsRequest, 1)\n\tsub := Subscriber{}\n\tsub.PointsFn = func() chan<- *coordinator.WritePointsRequest {\n\t\treturn subPoints\n\t}\n\n\tc := coordinator.NewPointsWriter()\n\tc.MetaClient = ms\n\tc.TSDBStore = store\n\tc.Subscriber = sub\n\tc.Node = &influxdb.Node{ID: 1}\n\n\tc.Open()\n\tdefer c.Close()\n\n\terr := c.WritePointsPrivileged(pr.Database, pr.RetentionPolicy, models.ConsistencyLevelOne, pr.Points)\n\tif _, ok := err.(tsdb.PartialWriteError); !ok {\n\t\tt.Errorf(\"PointsWriter.WritePoints(): got %v, exp %v\", err, tsdb.PartialWriteError{})\n\t}\n}\n\ntype fakePointsWriter struct {\n\tWritePointsIntoFn func(*coordinator.IntoWriteRequest) error\n}\n\nfunc (f *fakePointsWriter) WritePointsInto(req *coordinator.IntoWriteRequest) error {\n\treturn f.WritePointsIntoFn(req)\n}\n\nfunc TestBufferedPointsWriter(t *testing.T) {\n\tdb := \"db0\"\n\trp := \"rp0\"\n\tcapacity := 10000\n\n\twritePointsIntoCnt := 0\n\tpointsWritten := []models.Point{}\n\n\treset := func() {\n\t\twritePointsIntoCnt = 0\n\t\tpointsWritten = pointsWritten[:0]\n\t}\n\n\tfakeWriter := &fakePointsWriter{\n\t\tWritePointsIntoFn: func(req *coordinator.IntoWriteRequest) error {\n\t\t\twritePointsIntoCnt++\n\t\t\tpointsWritten = append(pointsWritten, req.Points...)\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tw := coordinator.NewBufferedPointsWriter(fakeWriter, db, rp, capacity)\n\n\t// Test that capacity and length are correct for new buffered writer.\n\tif w.Cap() != capacity {\n\t\tt.Fatalf(\"exp %d, got %d\", capacity, w.Cap())\n\t} else if w.Len() != 0 {\n\t\tt.Fatalf(\"exp %d, got %d\", 0, w.Len())\n\t}\n\n\t// Test flushing an empty buffer.\n\tif err := w.Flush(); err != nil {\n\t\tt.Fatal(err)\n\t} else if writePointsIntoCnt > 0 {\n\t\tt.Fatalf(\"exp 0, got %d\", writePointsIntoCnt)\n\t}\n\n\t// Test writing zero points.\n\tif err := w.WritePointsInto(&coordinator.IntoWriteRequest{\n\t\tDatabase:        db,\n\t\tRetentionPolicy: rp,\n\t\tPoints:          []models.Point{},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t} else if writePointsIntoCnt > 0 {\n\t\tt.Fatalf(\"exp 0, got %d\", writePointsIntoCnt)\n\t} else if w.Len() > 0 {\n\t\tt.Fatalf(\"exp 0, got %d\", w.Len())\n\t}\n\n\t// Test writing single large bunch of points points.\n\treq := coordinator.WritePointsRequest{\n\t\tDatabase:        db,\n\t\tRetentionPolicy: rp,\n\t}\n\n\tnumPoints := int(float64(capacity) * 5.5)\n\tfor i := 0; i < numPoints; i++ {\n\t\treq.AddPoint(\"cpu\", float64(i), time.Now().Add(time.Duration(i)*time.Second), nil)\n\t}\n\n\tr := coordinator.IntoWriteRequest(req)\n\tif err := w.WritePointsInto(&r); err != nil {\n\t\tt.Fatal(err)\n\t} else if writePointsIntoCnt != 5 {\n\t\tt.Fatalf(\"exp 5, got %d\", writePointsIntoCnt)\n\t} else if w.Len() != capacity/2 {\n\t\tt.Fatalf(\"exp %d, got %d\", capacity/2, w.Len())\n\t} else if len(pointsWritten) != numPoints-capacity/2 {\n\t\tt.Fatalf(\"exp %d, got %d\", numPoints-capacity/2, len(pointsWritten))\n\t}\n\n\tif err := w.Flush(); err != nil {\n\t\tt.Fatal(err)\n\t} else if writePointsIntoCnt != 6 {\n\t\tt.Fatalf(\"exp 6, got %d\", writePointsIntoCnt)\n\t} else if w.Len() != 0 {\n\t\tt.Fatalf(\"exp 0, got %d\", w.Len())\n\t} else if len(pointsWritten) != numPoints {\n\t\tt.Fatalf(\"exp %d, got %d\", numPoints, len(pointsWritten))\n\t} else if !reflect.DeepEqual(r.Points, pointsWritten) {\n\t\tt.Fatal(\"points don't match\")\n\t}\n\n\treset()\n\n\t// Test writing points one at a time.\n\tfor i, _ := range r.Points {\n\t\tif err := w.WritePointsInto(&coordinator.IntoWriteRequest{\n\t\t\tDatabase:        db,\n\t\t\tRetentionPolicy: rp,\n\t\t\tPoints:          r.Points[i : i+1],\n\t\t}); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\tif err := w.Flush(); err != nil {\n\t\tt.Fatal(err)\n\t} else if writePointsIntoCnt != 6 {\n\t\tt.Fatalf(\"exp 6, got %d\", writePointsIntoCnt)\n\t} else if w.Len() != 0 {\n\t\tt.Fatalf(\"exp 0, got %d\", w.Len())\n\t} else if len(pointsWritten) != numPoints {\n\t\tt.Fatalf(\"exp %d, got %d\", numPoints, len(pointsWritten))\n\t} else if !reflect.DeepEqual(r.Points, pointsWritten) {\n\t\tt.Fatal(\"points don't match\")\n\t}\n}\n\nvar shardID uint64\n\ntype fakeStore struct {\n\tWriteFn       func(shardID uint64, points []models.Point) error\n\tCreateShardfn func(database, retentionPolicy string, shardID uint64, enabled bool) error\n}\n\nfunc (f *fakeStore) WriteToShard(shardID uint64, points []models.Point) error {\n\treturn f.WriteFn(shardID, points)\n}\n\nfunc (f *fakeStore) CreateShard(database, retentionPolicy string, shardID uint64, enabled bool) error {\n\treturn f.CreateShardfn(database, retentionPolicy, shardID, enabled)\n}\n\nfunc NewPointsWriterMetaClient() *PointsWriterMetaClient {\n\tms := &PointsWriterMetaClient{}\n\trp := NewRetentionPolicy(\"myp\", time.Hour, 3)\n\tAttachShardGroupInfo(rp, []meta.ShardOwner{\n\t\t{NodeID: 1},\n\t\t{NodeID: 2},\n\t\t{NodeID: 3},\n\t})\n\tAttachShardGroupInfo(rp, []meta.ShardOwner{\n\t\t{NodeID: 1},\n\t\t{NodeID: 2},\n\t\t{NodeID: 3},\n\t})\n\n\tms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) {\n\t\treturn rp, nil\n\t}\n\n\tms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) {\n\t\tfor i, sg := range rp.ShardGroups {\n\t\t\tif timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) {\n\t\t\t\treturn &rp.ShardGroups[i], nil\n\t\t\t}\n\t\t}\n\t\tpanic(\"should not get here\")\n\t}\n\treturn ms\n}\n\ntype PointsWriterMetaClient struct {\n\tNodeIDFn                      func() uint64\n\tRetentionPolicyFn             func(database, name string) (*meta.RetentionPolicyInfo, error)\n\tCreateShardGroupIfNotExistsFn func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error)\n\tDatabaseFn                    func(database string) *meta.DatabaseInfo\n\tShardOwnerFn                  func(shardID uint64) (string, string, *meta.ShardGroupInfo)\n}\n\nfunc (m PointsWriterMetaClient) NodeID() uint64 { return m.NodeIDFn() }\n\nfunc (m PointsWriterMetaClient) RetentionPolicy(database, name string) (*meta.RetentionPolicyInfo, error) {\n\treturn m.RetentionPolicyFn(database, name)\n}\n\nfunc (m PointsWriterMetaClient) CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) {\n\treturn m.CreateShardGroupIfNotExistsFn(database, policy, timestamp)\n}\n\nfunc (m PointsWriterMetaClient) Database(database string) *meta.DatabaseInfo {\n\treturn m.DatabaseFn(database)\n}\n\nfunc (m PointsWriterMetaClient) ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) {\n\treturn m.ShardOwnerFn(shardID)\n}\n\ntype Subscriber struct {\n\tPointsFn func() chan<- *coordinator.WritePointsRequest\n}\n\nfunc (s Subscriber) Points() chan<- *coordinator.WritePointsRequest {\n\treturn s.PointsFn()\n}\n\nfunc NewRetentionPolicy(name string, duration time.Duration, nodeCount int) *meta.RetentionPolicyInfo {\n\tshards := []meta.ShardInfo{}\n\towners := []meta.ShardOwner{}\n\tfor i := 1; i <= nodeCount; i++ {\n\t\towners = append(owners, meta.ShardOwner{NodeID: uint64(i)})\n\t}\n\n\t// each node is fully replicated with each other\n\tshards = append(shards, meta.ShardInfo{\n\t\tID:     nextShardID(),\n\t\tOwners: owners,\n\t})\n\n\tstart := time.Now()\n\trp := &meta.RetentionPolicyInfo{\n\t\tName:               \"myrp\",\n\t\tReplicaN:           nodeCount,\n\t\tDuration:           duration,\n\t\tShardGroupDuration: duration,\n\t\tShardGroups: []meta.ShardGroupInfo{\n\t\t\tmeta.ShardGroupInfo{\n\t\t\t\tID:        nextShardID(),\n\t\t\t\tStartTime: start,\n\t\t\t\tEndTime:   start.Add(duration).Add(-1),\n\t\t\t\tShards:    shards,\n\t\t\t},\n\t\t},\n\t}\n\treturn rp\n}\n\nfunc AttachShardGroupInfo(rp *meta.RetentionPolicyInfo, owners []meta.ShardOwner) {\n\tvar startTime, endTime time.Time\n\tif len(rp.ShardGroups) == 0 {\n\t\tstartTime = time.Now()\n\t} else {\n\t\tstartTime = rp.ShardGroups[len(rp.ShardGroups)-1].StartTime.Add(rp.ShardGroupDuration)\n\t}\n\tendTime = startTime.Add(rp.ShardGroupDuration).Add(-1)\n\n\tsh := meta.ShardGroupInfo{\n\t\tID:        uint64(len(rp.ShardGroups) + 1),\n\t\tStartTime: startTime,\n\t\tEndTime:   endTime,\n\t\tShards: []meta.ShardInfo{\n\t\t\tmeta.ShardInfo{\n\t\t\t\tID:     nextShardID(),\n\t\t\t\tOwners: owners,\n\t\t\t},\n\t\t},\n\t}\n\trp.ShardGroups = append(rp.ShardGroups, sh)\n}\n\nfunc nextShardID() uint64 {\n\treturn atomic.AddUint64(&shardID, 1)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/coordinator/shard_mapper.go",
    "content": "package coordinator\n\nimport (\n\t\"io\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n)\n\n// IteratorCreator is an interface that combines mapping fields and creating iterators.\ntype IteratorCreator interface {\n\tinfluxql.IteratorCreator\n\tinfluxql.FieldMapper\n\tio.Closer\n}\n\n// ShardMapper retrieves and maps shards into an IteratorCreator that can later be\n// used for executing queries.\ntype ShardMapper interface {\n\tMapShards(sources influxql.Sources, opt *influxql.SelectOptions) (IteratorCreator, error)\n}\n\n// LocalShardMapper implements a ShardMapper for local shards.\ntype LocalShardMapper struct {\n\tMetaClient interface {\n\t\tShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error)\n\t}\n\n\tTSDBStore interface {\n\t\tShardGroup(ids []uint64) tsdb.ShardGroup\n\t}\n}\n\n// MapShards maps the sources to the appropriate shards into an IteratorCreator.\nfunc (e *LocalShardMapper) MapShards(sources influxql.Sources, opt *influxql.SelectOptions) (IteratorCreator, error) {\n\ta := &LocalShardMapping{\n\t\tShardMap: make(map[Source]tsdb.ShardGroup),\n\t}\n\n\tif err := e.mapShards(a, sources, opt); err != nil {\n\t\treturn nil, err\n\t}\n\treturn a, nil\n}\n\nfunc (e *LocalShardMapper) mapShards(a *LocalShardMapping, sources influxql.Sources, opt *influxql.SelectOptions) error {\n\tfor _, s := range sources {\n\t\tswitch s := s.(type) {\n\t\tcase *influxql.Measurement:\n\t\t\tsource := Source{\n\t\t\t\tDatabase:        s.Database,\n\t\t\t\tRetentionPolicy: s.RetentionPolicy,\n\t\t\t}\n\n\t\t\t// Retrieve the list of shards for this database. This list of\n\t\t\t// shards is always the same regardless of which measurement we are\n\t\t\t// using.\n\t\t\tif _, ok := a.ShardMap[source]; !ok {\n\t\t\t\tgroups, err := e.MetaClient.ShardGroupsByTimeRange(s.Database, s.RetentionPolicy, opt.MinTime, opt.MaxTime)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tif len(groups) == 0 {\n\t\t\t\t\ta.ShardMap[source] = nil\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tshardIDs := make([]uint64, 0, len(groups[0].Shards)*len(groups))\n\t\t\t\tfor _, g := range groups {\n\t\t\t\t\tfor _, si := range g.Shards {\n\t\t\t\t\t\tshardIDs = append(shardIDs, si.ID)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ta.ShardMap[source] = e.TSDBStore.ShardGroup(shardIDs)\n\t\t\t}\n\t\tcase *influxql.SubQuery:\n\t\t\tif err := e.mapShards(a, s.Statement.Sources, opt); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n// ShardMapper maps data sources to a list of shard information.\ntype LocalShardMapping struct {\n\tShardMap map[Source]tsdb.ShardGroup\n}\n\nfunc (a *LocalShardMapping) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) {\n\tsource := Source{\n\t\tDatabase:        m.Database,\n\t\tRetentionPolicy: m.RetentionPolicy,\n\t}\n\n\tsg := a.ShardMap[source]\n\tif sg == nil {\n\t\treturn\n\t}\n\n\tfields = make(map[string]influxql.DataType)\n\tdimensions = make(map[string]struct{})\n\n\tvar measurements []string\n\tif m.Regex != nil {\n\t\tmeasurements = sg.MeasurementsByRegex(m.Regex.Val)\n\t} else {\n\t\tmeasurements = []string{m.Name}\n\t}\n\n\tf, d, err := sg.FieldDimensions(measurements)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tfor k, typ := range f {\n\t\tfields[k] = typ\n\t}\n\tfor k := range d {\n\t\tdimensions[k] = struct{}{}\n\t}\n\treturn\n}\n\nfunc (a *LocalShardMapping) MapType(m *influxql.Measurement, field string) influxql.DataType {\n\tsource := Source{\n\t\tDatabase:        m.Database,\n\t\tRetentionPolicy: m.RetentionPolicy,\n\t}\n\n\tsg := a.ShardMap[source]\n\tif sg == nil {\n\t\treturn influxql.Unknown\n\t}\n\n\tvar names []string\n\tif m.Regex != nil {\n\t\tnames = sg.MeasurementsByRegex(m.Regex.Val)\n\t} else {\n\t\tnames = []string{m.Name}\n\t}\n\n\tvar typ influxql.DataType\n\tfor _, name := range names {\n\t\tt := sg.MapType(name, field)\n\t\tif typ.LessThan(t) {\n\t\t\ttyp = t\n\t\t}\n\t}\n\treturn typ\n}\n\nfunc (a *LocalShardMapping) CreateIterator(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\tsource := Source{\n\t\tDatabase:        m.Database,\n\t\tRetentionPolicy: m.RetentionPolicy,\n\t}\n\n\tsg := a.ShardMap[source]\n\tif sg == nil {\n\t\treturn nil, nil\n\t}\n\n\tif m.Regex != nil {\n\t\tmeasurements := sg.MeasurementsByRegex(m.Regex.Val)\n\t\tinputs := make([]influxql.Iterator, 0, len(measurements))\n\t\tif err := func() error {\n\t\t\tfor _, measurement := range measurements {\n\t\t\t\tinput, err := sg.CreateIterator(measurement, opt)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tinputs = append(inputs, input)\n\t\t\t}\n\t\t\treturn nil\n\t\t}(); err != nil {\n\t\t\tinfluxql.Iterators(inputs).Close()\n\t\t\treturn nil, err\n\t\t}\n\t\treturn influxql.Iterators(inputs).Merge(opt)\n\t}\n\treturn sg.CreateIterator(m.Name, opt)\n}\n\n// Close does nothing for a LocalShardMapping.\nfunc (a *LocalShardMapping) Close() error {\n\treturn nil\n}\n\n// Source contains the database and retention policy source for data.\ntype Source struct {\n\tDatabase        string\n\tRetentionPolicy string\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/coordinator/shard_mapper_test.go",
    "content": "package coordinator_test\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/coordinator\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n)\n\nfunc TestLocalShardMapper(t *testing.T) {\n\tvar metaClient MetaClient\n\tmetaClient.ShardGroupsByTimeRangeFn = func(database, policy string, min, max time.Time) ([]meta.ShardGroupInfo, error) {\n\t\tif database != \"db0\" {\n\t\t\tt.Errorf(\"unexpected database: %s\", database)\n\t\t}\n\t\tif policy != \"rp0\" {\n\t\t\tt.Errorf(\"unexpected retention policy: %s\", policy)\n\t\t}\n\t\treturn []meta.ShardGroupInfo{\n\t\t\t{ID: 1, Shards: []meta.ShardInfo{\n\t\t\t\t{ID: 1, Owners: []meta.ShardOwner{{NodeID: 0}}},\n\t\t\t\t{ID: 2, Owners: []meta.ShardOwner{{NodeID: 0}}},\n\t\t\t}},\n\t\t\t{ID: 2, Shards: []meta.ShardInfo{\n\t\t\t\t{ID: 3, Owners: []meta.ShardOwner{{NodeID: 0}}},\n\t\t\t\t{ID: 4, Owners: []meta.ShardOwner{{NodeID: 0}}},\n\t\t\t}},\n\t\t}, nil\n\t}\n\n\tvar tsdbStore TSDBStore\n\ttsdbStore.ShardGroupFn = func(ids []uint64) tsdb.ShardGroup {\n\t\tif !reflect.DeepEqual(ids, []uint64{1, 2, 3, 4}) {\n\t\t\tt.Errorf(\"unexpected shard ids: %#v\", ids)\n\t\t}\n\n\t\tvar sh MockShard\n\t\tsh.CreateIteratorFn = func(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\t\tif measurement != \"cpu\" {\n\t\t\t\tt.Errorf(\"unexpected measurement: %s\", measurement)\n\t\t\t}\n\t\t\treturn &FloatIterator{}, nil\n\t\t}\n\t\treturn &sh\n\t}\n\n\t// Initialize the shard mapper.\n\tshardMapper := &coordinator.LocalShardMapper{\n\t\tMetaClient: &metaClient,\n\t\tTSDBStore:  &tsdbStore,\n\t}\n\n\t// Normal measurement.\n\tmeasurement := &influxql.Measurement{\n\t\tDatabase:        \"db0\",\n\t\tRetentionPolicy: \"rp0\",\n\t\tName:            \"cpu\",\n\t}\n\tic, err := shardMapper.MapShards([]influxql.Source{measurement}, &influxql.SelectOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n\n\t// This should be a LocalShardMapping.\n\tm, ok := ic.(*coordinator.LocalShardMapping)\n\tif !ok {\n\t\tt.Fatalf(\"unexpected mapping type: %T\", ic)\n\t} else if len(m.ShardMap) != 1 {\n\t\tt.Fatalf(\"unexpected number of shard mappings: %d\", len(m.ShardMap))\n\t}\n\n\tif _, err := ic.CreateIterator(measurement, influxql.IteratorOptions{}); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n\n\t// Subquery.\n\tsubquery := &influxql.SubQuery{\n\t\tStatement: &influxql.SelectStatement{\n\t\t\tSources: []influxql.Source{measurement},\n\t\t},\n\t}\n\tic, err = shardMapper.MapShards([]influxql.Source{subquery}, &influxql.SelectOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n\n\t// This should be a LocalShardMapping.\n\tm, ok = ic.(*coordinator.LocalShardMapping)\n\tif !ok {\n\t\tt.Fatalf(\"unexpected mapping type: %T\", ic)\n\t} else if len(m.ShardMap) != 1 {\n\t\tt.Fatalf(\"unexpected number of shard mappings: %d\", len(m.ShardMap))\n\t}\n\n\tif _, err := ic.CreateIterator(measurement, influxql.IteratorOptions{}); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/coordinator/statement_executor.go",
    "content": "package coordinator\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/monitor\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n)\n\n// ErrDatabaseNameRequired is returned when executing statements that require a database,\n// when a database has not been provided.\nvar ErrDatabaseNameRequired = errors.New(\"database name required\")\n\ntype pointsWriter interface {\n\tWritePointsInto(*IntoWriteRequest) error\n}\n\n// StatementExecutor executes a statement in the query.\ntype StatementExecutor struct {\n\tMetaClient MetaClient\n\n\t// TaskManager holds the StatementExecutor that handles task-related commands.\n\tTaskManager influxql.StatementExecutor\n\n\t// TSDB storage for local node.\n\tTSDBStore TSDBStore\n\n\t// ShardMapper for mapping shards when executing a SELECT statement.\n\tShardMapper ShardMapper\n\n\t// Holds monitoring data for SHOW STATS and SHOW DIAGNOSTICS.\n\tMonitor *monitor.Monitor\n\n\t// Used for rewriting points back into system for SELECT INTO statements.\n\tPointsWriter pointsWriter\n\n\t// Select statement limits\n\tMaxSelectPointN   int\n\tMaxSelectSeriesN  int\n\tMaxSelectBucketsN int\n}\n\n// ExecuteStatement executes the given statement with the given execution context.\nfunc (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t// Select statements are handled separately so that they can be streamed.\n\tif stmt, ok := stmt.(*influxql.SelectStatement); ok {\n\t\treturn e.executeSelectStatement(stmt, &ctx)\n\t}\n\n\tvar rows models.Rows\n\tvar messages []*influxql.Message\n\tvar err error\n\tswitch stmt := stmt.(type) {\n\tcase *influxql.AlterRetentionPolicyStatement:\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\t\terr = e.executeAlterRetentionPolicyStatement(stmt)\n\tcase *influxql.CreateContinuousQueryStatement:\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\t\terr = e.executeCreateContinuousQueryStatement(stmt)\n\tcase *influxql.CreateDatabaseStatement:\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\t\terr = e.executeCreateDatabaseStatement(stmt)\n\tcase *influxql.CreateRetentionPolicyStatement:\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\t\terr = e.executeCreateRetentionPolicyStatement(stmt)\n\tcase *influxql.CreateSubscriptionStatement:\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\t\terr = e.executeCreateSubscriptionStatement(stmt)\n\tcase *influxql.CreateUserStatement:\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\t\terr = e.executeCreateUserStatement(stmt)\n\tcase *influxql.DeleteSeriesStatement:\n\t\terr = e.executeDeleteSeriesStatement(stmt, ctx.Database)\n\tcase *influxql.DropContinuousQueryStatement:\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\t\terr = e.executeDropContinuousQueryStatement(stmt)\n\tcase *influxql.DropDatabaseStatement:\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\t\terr = e.executeDropDatabaseStatement(stmt)\n\tcase *influxql.DropMeasurementStatement:\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\t\terr = e.executeDropMeasurementStatement(stmt, ctx.Database)\n\tcase *influxql.DropSeriesStatement:\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\t\terr = e.executeDropSeriesStatement(stmt, ctx.Database)\n\tcase *influxql.DropRetentionPolicyStatement:\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\t\terr = e.executeDropRetentionPolicyStatement(stmt)\n\tcase *influxql.DropShardStatement:\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\t\terr = e.executeDropShardStatement(stmt)\n\tcase *influxql.DropSubscriptionStatement:\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\t\terr = e.executeDropSubscriptionStatement(stmt)\n\tcase *influxql.DropUserStatement:\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\t\terr = e.executeDropUserStatement(stmt)\n\tcase *influxql.GrantStatement:\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\t\terr = e.executeGrantStatement(stmt)\n\tcase *influxql.GrantAdminStatement:\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\t\terr = e.executeGrantAdminStatement(stmt)\n\tcase *influxql.RevokeStatement:\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\t\terr = e.executeRevokeStatement(stmt)\n\tcase *influxql.RevokeAdminStatement:\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\t\terr = e.executeRevokeAdminStatement(stmt)\n\tcase *influxql.ShowContinuousQueriesStatement:\n\t\trows, err = e.executeShowContinuousQueriesStatement(stmt)\n\tcase *influxql.ShowDatabasesStatement:\n\t\trows, err = e.executeShowDatabasesStatement(stmt, &ctx)\n\tcase *influxql.ShowDiagnosticsStatement:\n\t\trows, err = e.executeShowDiagnosticsStatement(stmt)\n\tcase *influxql.ShowGrantsForUserStatement:\n\t\trows, err = e.executeShowGrantsForUserStatement(stmt)\n\tcase *influxql.ShowMeasurementsStatement:\n\t\treturn e.executeShowMeasurementsStatement(stmt, &ctx)\n\tcase *influxql.ShowRetentionPoliciesStatement:\n\t\trows, err = e.executeShowRetentionPoliciesStatement(stmt)\n\tcase *influxql.ShowShardsStatement:\n\t\trows, err = e.executeShowShardsStatement(stmt)\n\tcase *influxql.ShowShardGroupsStatement:\n\t\trows, err = e.executeShowShardGroupsStatement(stmt)\n\tcase *influxql.ShowStatsStatement:\n\t\trows, err = e.executeShowStatsStatement(stmt)\n\tcase *influxql.ShowSubscriptionsStatement:\n\t\trows, err = e.executeShowSubscriptionsStatement(stmt)\n\tcase *influxql.ShowTagValuesStatement:\n\t\treturn e.executeShowTagValues(stmt, &ctx)\n\tcase *influxql.ShowUsersStatement:\n\t\trows, err = e.executeShowUsersStatement(stmt)\n\tcase *influxql.SetPasswordUserStatement:\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\t\terr = e.executeSetPasswordUserStatement(stmt)\n\tcase *influxql.ShowQueriesStatement, *influxql.KillQueryStatement:\n\t\t// Send query related statements to the task manager.\n\t\treturn e.TaskManager.ExecuteStatement(stmt, ctx)\n\tdefault:\n\t\treturn influxql.ErrInvalidQuery\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ctx.Send(&influxql.Result{\n\t\tStatementID: ctx.StatementID,\n\t\tSeries:      rows,\n\t\tMessages:    messages,\n\t})\n}\n\nfunc (e *StatementExecutor) executeAlterRetentionPolicyStatement(stmt *influxql.AlterRetentionPolicyStatement) error {\n\trpu := &meta.RetentionPolicyUpdate{\n\t\tDuration:           stmt.Duration,\n\t\tReplicaN:           stmt.Replication,\n\t\tShardGroupDuration: stmt.ShardGroupDuration,\n\t}\n\n\t// Update the retention policy.\n\tif err := e.MetaClient.UpdateRetentionPolicy(stmt.Database, stmt.Name, rpu, stmt.Default); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (e *StatementExecutor) executeCreateContinuousQueryStatement(q *influxql.CreateContinuousQueryStatement) error {\n\t// Verify that retention policies exist.\n\tvar err error\n\tverifyRPFn := func(n influxql.Node) {\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tswitch m := n.(type) {\n\t\tcase *influxql.Measurement:\n\t\t\tvar rp *meta.RetentionPolicyInfo\n\t\t\tif rp, err = e.MetaClient.RetentionPolicy(m.Database, m.RetentionPolicy); err != nil {\n\t\t\t\treturn\n\t\t\t} else if rp == nil {\n\t\t\t\terr = fmt.Errorf(\"%s: %s.%s\", meta.ErrRetentionPolicyNotFound, m.Database, m.RetentionPolicy)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n\n\tinfluxql.WalkFunc(q, verifyRPFn)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn e.MetaClient.CreateContinuousQuery(q.Database, q.Name, q.String())\n}\n\nfunc (e *StatementExecutor) executeCreateDatabaseStatement(stmt *influxql.CreateDatabaseStatement) error {\n\tif !meta.ValidName(stmt.Name) {\n\t\t// TODO This should probably be in `(*meta.Data).CreateDatabase`\n\t\t// but can't go there until 1.1 is used everywhere\n\t\treturn meta.ErrInvalidName\n\t}\n\n\tif !stmt.RetentionPolicyCreate {\n\t\t_, err := e.MetaClient.CreateDatabase(stmt.Name)\n\t\treturn err\n\t}\n\n\t// If we're doing, for example, CREATE DATABASE \"db\" WITH DURATION 1d then\n\t// the name will not yet be set. We only need to validate non-empty\n\t// retention policy names, such as in the statement:\n\t// \tCREATE DATABASE \"db\" WITH DURATION 1d NAME \"xyz\"\n\tif stmt.RetentionPolicyName != \"\" && !meta.ValidName(stmt.RetentionPolicyName) {\n\t\treturn meta.ErrInvalidName\n\t}\n\n\tspec := meta.RetentionPolicySpec{\n\t\tName:               stmt.RetentionPolicyName,\n\t\tDuration:           stmt.RetentionPolicyDuration,\n\t\tReplicaN:           stmt.RetentionPolicyReplication,\n\t\tShardGroupDuration: stmt.RetentionPolicyShardGroupDuration,\n\t}\n\t_, err := e.MetaClient.CreateDatabaseWithRetentionPolicy(stmt.Name, &spec)\n\treturn err\n}\n\nfunc (e *StatementExecutor) executeCreateRetentionPolicyStatement(stmt *influxql.CreateRetentionPolicyStatement) error {\n\tif !meta.ValidName(stmt.Name) {\n\t\t// TODO This should probably be in `(*meta.Data).CreateRetentionPolicy`\n\t\t// but can't go there until 1.1 is used everywhere\n\t\treturn meta.ErrInvalidName\n\t}\n\n\tspec := meta.RetentionPolicySpec{\n\t\tName:               stmt.Name,\n\t\tDuration:           &stmt.Duration,\n\t\tReplicaN:           &stmt.Replication,\n\t\tShardGroupDuration: stmt.ShardGroupDuration,\n\t}\n\n\t// Create new retention policy.\n\t_, err := e.MetaClient.CreateRetentionPolicy(stmt.Database, &spec, stmt.Default)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (e *StatementExecutor) executeCreateSubscriptionStatement(q *influxql.CreateSubscriptionStatement) error {\n\treturn e.MetaClient.CreateSubscription(q.Database, q.RetentionPolicy, q.Name, q.Mode, q.Destinations)\n}\n\nfunc (e *StatementExecutor) executeCreateUserStatement(q *influxql.CreateUserStatement) error {\n\t_, err := e.MetaClient.CreateUser(q.Name, q.Password, q.Admin)\n\treturn err\n}\n\nfunc (e *StatementExecutor) executeDeleteSeriesStatement(stmt *influxql.DeleteSeriesStatement, database string) error {\n\tif dbi := e.MetaClient.Database(database); dbi == nil {\n\t\treturn influxql.ErrDatabaseNotFound(database)\n\t}\n\n\t// Convert \"now()\" to current time.\n\tstmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: time.Now().UTC()})\n\n\t// Locally delete the series.\n\treturn e.TSDBStore.DeleteSeries(database, stmt.Sources, stmt.Condition)\n}\n\nfunc (e *StatementExecutor) executeDropContinuousQueryStatement(q *influxql.DropContinuousQueryStatement) error {\n\treturn e.MetaClient.DropContinuousQuery(q.Database, q.Name)\n}\n\n// executeDropDatabaseStatement drops a database from the cluster.\n// It does not return an error if the database was not found on any of\n// the nodes, or in the Meta store.\nfunc (e *StatementExecutor) executeDropDatabaseStatement(stmt *influxql.DropDatabaseStatement) error {\n\tif e.MetaClient.Database(stmt.Name) == nil {\n\t\treturn nil\n\t}\n\n\t// Locally delete the datababse.\n\tif err := e.TSDBStore.DeleteDatabase(stmt.Name); err != nil {\n\t\treturn err\n\t}\n\n\t// Remove the database from the Meta Store.\n\treturn e.MetaClient.DropDatabase(stmt.Name)\n}\n\nfunc (e *StatementExecutor) executeDropMeasurementStatement(stmt *influxql.DropMeasurementStatement, database string) error {\n\tif dbi := e.MetaClient.Database(database); dbi == nil {\n\t\treturn influxql.ErrDatabaseNotFound(database)\n\t}\n\n\t// Locally drop the measurement\n\treturn e.TSDBStore.DeleteMeasurement(database, stmt.Name)\n}\n\nfunc (e *StatementExecutor) executeDropSeriesStatement(stmt *influxql.DropSeriesStatement, database string) error {\n\tif dbi := e.MetaClient.Database(database); dbi == nil {\n\t\treturn influxql.ErrDatabaseNotFound(database)\n\t}\n\n\t// Check for time in WHERE clause (not supported).\n\tif influxql.HasTimeExpr(stmt.Condition) {\n\t\treturn errors.New(\"DROP SERIES doesn't support time in WHERE clause\")\n\t}\n\n\t// Locally drop the series.\n\treturn e.TSDBStore.DeleteSeries(database, stmt.Sources, stmt.Condition)\n}\n\nfunc (e *StatementExecutor) executeDropShardStatement(stmt *influxql.DropShardStatement) error {\n\t// Locally delete the shard.\n\tif err := e.TSDBStore.DeleteShard(stmt.ID); err != nil {\n\t\treturn err\n\t}\n\n\t// Remove the shard reference from the Meta Store.\n\treturn e.MetaClient.DropShard(stmt.ID)\n}\n\nfunc (e *StatementExecutor) executeDropRetentionPolicyStatement(stmt *influxql.DropRetentionPolicyStatement) error {\n\tdbi := e.MetaClient.Database(stmt.Database)\n\tif dbi == nil {\n\t\treturn nil\n\t}\n\n\tif dbi.RetentionPolicy(stmt.Name) == nil {\n\t\treturn nil\n\t}\n\n\t// Locally drop the retention policy.\n\tif err := e.TSDBStore.DeleteRetentionPolicy(stmt.Database, stmt.Name); err != nil {\n\t\treturn err\n\t}\n\n\treturn e.MetaClient.DropRetentionPolicy(stmt.Database, stmt.Name)\n}\n\nfunc (e *StatementExecutor) executeDropSubscriptionStatement(q *influxql.DropSubscriptionStatement) error {\n\treturn e.MetaClient.DropSubscription(q.Database, q.RetentionPolicy, q.Name)\n}\n\nfunc (e *StatementExecutor) executeDropUserStatement(q *influxql.DropUserStatement) error {\n\treturn e.MetaClient.DropUser(q.Name)\n}\n\nfunc (e *StatementExecutor) executeGrantStatement(stmt *influxql.GrantStatement) error {\n\treturn e.MetaClient.SetPrivilege(stmt.User, stmt.On, stmt.Privilege)\n}\n\nfunc (e *StatementExecutor) executeGrantAdminStatement(stmt *influxql.GrantAdminStatement) error {\n\treturn e.MetaClient.SetAdminPrivilege(stmt.User, true)\n}\n\nfunc (e *StatementExecutor) executeRevokeStatement(stmt *influxql.RevokeStatement) error {\n\tpriv := influxql.NoPrivileges\n\n\t// Revoking all privileges means there's no need to look at existing user privileges.\n\tif stmt.Privilege != influxql.AllPrivileges {\n\t\tp, err := e.MetaClient.UserPrivilege(stmt.User, stmt.On)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Bit clear (AND NOT) the user's privilege with the revoked privilege.\n\t\tpriv = *p &^ stmt.Privilege\n\t}\n\n\treturn e.MetaClient.SetPrivilege(stmt.User, stmt.On, priv)\n}\n\nfunc (e *StatementExecutor) executeRevokeAdminStatement(stmt *influxql.RevokeAdminStatement) error {\n\treturn e.MetaClient.SetAdminPrivilege(stmt.User, false)\n}\n\nfunc (e *StatementExecutor) executeSetPasswordUserStatement(q *influxql.SetPasswordUserStatement) error {\n\treturn e.MetaClient.UpdateUser(q.Name, q.Password)\n}\n\nfunc (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatement, ctx *influxql.ExecutionContext) error {\n\titrs, stmt, err := e.createIterators(stmt, ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Generate a row emitter from the iterator set.\n\tem := influxql.NewEmitter(itrs, stmt.TimeAscending(), ctx.ChunkSize)\n\tem.Columns = stmt.ColumnNames()\n\tif stmt.Location != nil {\n\t\tem.Location = stmt.Location\n\t}\n\tem.OmitTime = stmt.OmitTime\n\tdefer em.Close()\n\n\t// Emit rows to the results channel.\n\tvar writeN int64\n\tvar emitted bool\n\n\tvar pointsWriter *BufferedPointsWriter\n\tif stmt.Target != nil {\n\t\tpointsWriter = NewBufferedPointsWriter(e.PointsWriter, stmt.Target.Measurement.Database, stmt.Target.Measurement.RetentionPolicy, 10000)\n\t}\n\n\tfor {\n\t\trow, partial, err := em.Emit()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t} else if row == nil {\n\t\t\t// Check if the query was interrupted while emitting.\n\t\t\tselect {\n\t\t\tcase <-ctx.InterruptCh:\n\t\t\t\treturn influxql.ErrQueryInterrupted\n\t\t\tdefault:\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\t// Write points back into system for INTO statements.\n\t\tif stmt.Target != nil {\n\t\t\tif err := e.writeInto(pointsWriter, stmt, row); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\twriteN += int64(len(row.Values))\n\t\t\tcontinue\n\t\t}\n\n\t\tresult := &influxql.Result{\n\t\t\tStatementID: ctx.StatementID,\n\t\t\tSeries:      []*models.Row{row},\n\t\t\tPartial:     partial,\n\t\t}\n\n\t\t// Send results or exit if closing.\n\t\tif err := ctx.Send(result); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\temitted = true\n\t}\n\n\t// Flush remaining points and emit write count if an INTO statement.\n\tif stmt.Target != nil {\n\t\tif err := pointsWriter.Flush(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar messages []*influxql.Message\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, influxql.ReadOnlyWarning(stmt.String()))\n\t\t}\n\n\t\treturn ctx.Send(&influxql.Result{\n\t\t\tStatementID: ctx.StatementID,\n\t\t\tMessages:    messages,\n\t\t\tSeries: []*models.Row{{\n\t\t\t\tName:    \"result\",\n\t\t\t\tColumns: []string{\"time\", \"written\"},\n\t\t\t\tValues:  [][]interface{}{{time.Unix(0, 0).UTC(), writeN}},\n\t\t\t}},\n\t\t})\n\t}\n\n\t// Always emit at least one result.\n\tif !emitted {\n\t\treturn ctx.Send(&influxql.Result{\n\t\t\tStatementID: ctx.StatementID,\n\t\t\tSeries:      make([]*models.Row, 0),\n\t\t})\n\t}\n\n\treturn nil\n}\n\nfunc (e *StatementExecutor) createIterators(stmt *influxql.SelectStatement, ctx *influxql.ExecutionContext) ([]influxql.Iterator, *influxql.SelectStatement, error) {\n\t// It is important to \"stamp\" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now`\n\tnow := time.Now().UTC()\n\topt := influxql.SelectOptions{\n\t\tInterruptCh: ctx.InterruptCh,\n\t\tNodeID:      ctx.ExecutionOptions.NodeID,\n\t\tMaxSeriesN:  e.MaxSelectSeriesN,\n\t\tAuthorizer:  ctx.Authorizer,\n\t}\n\n\t// Replace instances of \"now()\" with the current time, and check the resultant times.\n\tnowValuer := influxql.NowValuer{Now: now, Location: stmt.Location}\n\tstmt = stmt.Reduce(&nowValuer)\n\n\tvar err error\n\topt.MinTime, opt.MaxTime, err = influxql.TimeRange(stmt.Condition, stmt.Location)\n\tif err != nil {\n\t\treturn nil, stmt, err\n\t}\n\n\tif opt.MaxTime.IsZero() {\n\t\topt.MaxTime = time.Unix(0, influxql.MaxTime)\n\t}\n\tif opt.MinTime.IsZero() {\n\t\topt.MinTime = time.Unix(0, influxql.MinTime).UTC()\n\t}\n\n\t// Convert DISTINCT into a call.\n\tstmt.RewriteDistinct()\n\n\t// Remove \"time\" from fields list.\n\tstmt.RewriteTimeFields()\n\n\t// Rewrite time condition.\n\tif err := stmt.RewriteTimeCondition(now); err != nil {\n\t\treturn nil, stmt, err\n\t}\n\n\t// Rewrite any regex conditions that could make use of the index.\n\tstmt.RewriteRegexConditions()\n\n\t// Create an iterator creator based on the shards in the cluster.\n\tic, err := e.ShardMapper.MapShards(stmt.Sources, &opt)\n\tif err != nil {\n\t\treturn nil, stmt, err\n\t}\n\tdefer ic.Close()\n\n\t// Rewrite wildcards, if any exist.\n\ttmp, err := stmt.RewriteFields(ic)\n\tif err != nil {\n\t\treturn nil, stmt, err\n\t}\n\tstmt = tmp\n\n\tif e.MaxSelectBucketsN > 0 && !stmt.IsRawQuery {\n\t\tinterval, err := stmt.GroupByInterval()\n\t\tif err != nil {\n\t\t\treturn nil, stmt, err\n\t\t}\n\n\t\tif interval > 0 {\n\t\t\t// Determine the start and end time matched to the interval (may not match the actual times).\n\t\t\tmin := opt.MinTime.Truncate(interval)\n\t\t\tmax := opt.MaxTime.Truncate(interval).Add(interval)\n\n\t\t\t// Determine the number of buckets by finding the time span and dividing by the interval.\n\t\t\tbuckets := int64(max.Sub(min)) / int64(interval)\n\t\t\tif int(buckets) > e.MaxSelectBucketsN {\n\t\t\t\treturn nil, stmt, fmt.Errorf(\"max-select-buckets limit exceeded: (%d/%d)\", buckets, e.MaxSelectBucketsN)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Create a set of iterators from a selection.\n\titrs, err := influxql.Select(stmt, ic, &opt)\n\tif err != nil {\n\t\treturn nil, stmt, err\n\t}\n\n\tif e.MaxSelectPointN > 0 {\n\t\tmonitor := influxql.PointLimitMonitor(itrs, influxql.DefaultStatsInterval, e.MaxSelectPointN)\n\t\tctx.Query.Monitor(monitor)\n\t}\n\treturn itrs, stmt, nil\n}\n\nfunc (e *StatementExecutor) executeShowContinuousQueriesStatement(stmt *influxql.ShowContinuousQueriesStatement) (models.Rows, error) {\n\tdis := e.MetaClient.Databases()\n\n\trows := []*models.Row{}\n\tfor _, di := range dis {\n\t\trow := &models.Row{Columns: []string{\"name\", \"query\"}, Name: di.Name}\n\t\tfor _, cqi := range di.ContinuousQueries {\n\t\t\trow.Values = append(row.Values, []interface{}{cqi.Name, cqi.Query})\n\t\t}\n\t\trows = append(rows, row)\n\t}\n\treturn rows, nil\n}\n\nfunc (e *StatementExecutor) executeShowDatabasesStatement(q *influxql.ShowDatabasesStatement, ctx *influxql.ExecutionContext) (models.Rows, error) {\n\tdis := e.MetaClient.Databases()\n\ta := ctx.ExecutionOptions.Authorizer\n\n\trow := &models.Row{Name: \"databases\", Columns: []string{\"name\"}}\n\tfor _, di := range dis {\n\t\t// Only include databases that the user is authorized to read or write.\n\t\tif a.AuthorizeDatabase(influxql.ReadPrivilege, di.Name) || a.AuthorizeDatabase(influxql.WritePrivilege, di.Name) {\n\t\t\trow.Values = append(row.Values, []interface{}{di.Name})\n\t\t}\n\t}\n\treturn []*models.Row{row}, nil\n}\n\nfunc (e *StatementExecutor) executeShowDiagnosticsStatement(stmt *influxql.ShowDiagnosticsStatement) (models.Rows, error) {\n\tdiags, err := e.Monitor.Diagnostics()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get a sorted list of diagnostics keys.\n\tsortedKeys := make([]string, 0, len(diags))\n\tfor k := range diags {\n\t\tsortedKeys = append(sortedKeys, k)\n\t}\n\tsort.Strings(sortedKeys)\n\n\trows := make([]*models.Row, 0, len(diags))\n\tfor _, k := range sortedKeys {\n\t\tif stmt.Module != \"\" && k != stmt.Module {\n\t\t\tcontinue\n\t\t}\n\n\t\trow := &models.Row{Name: k}\n\n\t\trow.Columns = diags[k].Columns\n\t\trow.Values = diags[k].Rows\n\t\trows = append(rows, row)\n\t}\n\treturn rows, nil\n}\n\nfunc (e *StatementExecutor) executeShowGrantsForUserStatement(q *influxql.ShowGrantsForUserStatement) (models.Rows, error) {\n\tpriv, err := e.MetaClient.UserPrivileges(q.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trow := &models.Row{Columns: []string{\"database\", \"privilege\"}}\n\tfor d, p := range priv {\n\t\trow.Values = append(row.Values, []interface{}{d, p.String()})\n\t}\n\treturn []*models.Row{row}, nil\n}\n\nfunc (e *StatementExecutor) executeShowMeasurementsStatement(q *influxql.ShowMeasurementsStatement, ctx *influxql.ExecutionContext) error {\n\tif q.Database == \"\" {\n\t\treturn ErrDatabaseNameRequired\n\t}\n\n\tnames, err := e.TSDBStore.MeasurementNames(q.Database, q.Condition)\n\tif err != nil || len(names) == 0 {\n\t\treturn ctx.Send(&influxql.Result{\n\t\t\tStatementID: ctx.StatementID,\n\t\t\tErr:         err,\n\t\t})\n\t}\n\n\tif q.Offset > 0 {\n\t\tif q.Offset >= len(names) {\n\t\t\tnames = nil\n\t\t} else {\n\t\t\tnames = names[q.Offset:]\n\t\t}\n\t}\n\n\tif q.Limit > 0 {\n\t\tif q.Limit < len(names) {\n\t\t\tnames = names[:q.Limit]\n\t\t}\n\t}\n\n\tvalues := make([][]interface{}, len(names))\n\tfor i, name := range names {\n\t\tvalues[i] = []interface{}{string(name)}\n\t}\n\n\tif len(values) == 0 {\n\t\treturn ctx.Send(&influxql.Result{\n\t\t\tStatementID: ctx.StatementID,\n\t\t})\n\t}\n\n\treturn ctx.Send(&influxql.Result{\n\t\tStatementID: ctx.StatementID,\n\t\tSeries: []*models.Row{{\n\t\t\tName:    \"measurements\",\n\t\t\tColumns: []string{\"name\"},\n\t\t\tValues:  values,\n\t\t}},\n\t})\n}\n\nfunc (e *StatementExecutor) executeShowRetentionPoliciesStatement(q *influxql.ShowRetentionPoliciesStatement) (models.Rows, error) {\n\tif q.Database == \"\" {\n\t\treturn nil, ErrDatabaseNameRequired\n\t}\n\n\tdi := e.MetaClient.Database(q.Database)\n\tif di == nil {\n\t\treturn nil, influxdb.ErrDatabaseNotFound(q.Database)\n\t}\n\n\trow := &models.Row{Columns: []string{\"name\", \"duration\", \"shardGroupDuration\", \"replicaN\", \"default\"}}\n\tfor _, rpi := range di.RetentionPolicies {\n\t\trow.Values = append(row.Values, []interface{}{rpi.Name, rpi.Duration.String(), rpi.ShardGroupDuration.String(), rpi.ReplicaN, di.DefaultRetentionPolicy == rpi.Name})\n\t}\n\treturn []*models.Row{row}, nil\n}\n\nfunc (e *StatementExecutor) executeShowShardsStatement(stmt *influxql.ShowShardsStatement) (models.Rows, error) {\n\tdis := e.MetaClient.Databases()\n\n\trows := []*models.Row{}\n\tfor _, di := range dis {\n\t\trow := &models.Row{Columns: []string{\"id\", \"database\", \"retention_policy\", \"shard_group\", \"start_time\", \"end_time\", \"expiry_time\", \"owners\"}, Name: di.Name}\n\t\tfor _, rpi := range di.RetentionPolicies {\n\t\t\tfor _, sgi := range rpi.ShardGroups {\n\t\t\t\t// Shards associated with deleted shard groups are effectively deleted.\n\t\t\t\t// Don't list them.\n\t\t\t\tif sgi.Deleted() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfor _, si := range sgi.Shards {\n\t\t\t\t\townerIDs := make([]uint64, len(si.Owners))\n\t\t\t\t\tfor i, owner := range si.Owners {\n\t\t\t\t\t\townerIDs[i] = owner.NodeID\n\t\t\t\t\t}\n\n\t\t\t\t\trow.Values = append(row.Values, []interface{}{\n\t\t\t\t\t\tsi.ID,\n\t\t\t\t\t\tdi.Name,\n\t\t\t\t\t\trpi.Name,\n\t\t\t\t\t\tsgi.ID,\n\t\t\t\t\t\tsgi.StartTime.UTC().Format(time.RFC3339),\n\t\t\t\t\t\tsgi.EndTime.UTC().Format(time.RFC3339),\n\t\t\t\t\t\tsgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339),\n\t\t\t\t\t\tjoinUint64(ownerIDs),\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\trows = append(rows, row)\n\t}\n\treturn rows, nil\n}\n\nfunc (e *StatementExecutor) executeShowShardGroupsStatement(stmt *influxql.ShowShardGroupsStatement) (models.Rows, error) {\n\tdis := e.MetaClient.Databases()\n\n\trow := &models.Row{Columns: []string{\"id\", \"database\", \"retention_policy\", \"start_time\", \"end_time\", \"expiry_time\"}, Name: \"shard groups\"}\n\tfor _, di := range dis {\n\t\tfor _, rpi := range di.RetentionPolicies {\n\t\t\tfor _, sgi := range rpi.ShardGroups {\n\t\t\t\t// Shards associated with deleted shard groups are effectively deleted.\n\t\t\t\t// Don't list them.\n\t\t\t\tif sgi.Deleted() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\trow.Values = append(row.Values, []interface{}{\n\t\t\t\t\tsgi.ID,\n\t\t\t\t\tdi.Name,\n\t\t\t\t\trpi.Name,\n\t\t\t\t\tsgi.StartTime.UTC().Format(time.RFC3339),\n\t\t\t\t\tsgi.EndTime.UTC().Format(time.RFC3339),\n\t\t\t\t\tsgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339),\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn []*models.Row{row}, nil\n}\n\nfunc (e *StatementExecutor) executeShowStatsStatement(stmt *influxql.ShowStatsStatement) (models.Rows, error) {\n\tstats, err := e.Monitor.Statistics(nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar rows []*models.Row\n\tfor _, stat := range stats {\n\t\tif stmt.Module != \"\" && stat.Name != stmt.Module {\n\t\t\tcontinue\n\t\t}\n\t\trow := &models.Row{Name: stat.Name, Tags: stat.Tags}\n\n\t\tvalues := make([]interface{}, 0, len(stat.Values))\n\t\tfor _, k := range stat.ValueNames() {\n\t\t\trow.Columns = append(row.Columns, k)\n\t\t\tvalues = append(values, stat.Values[k])\n\t\t}\n\t\trow.Values = [][]interface{}{values}\n\t\trows = append(rows, row)\n\t}\n\treturn rows, nil\n}\n\nfunc (e *StatementExecutor) executeShowSubscriptionsStatement(stmt *influxql.ShowSubscriptionsStatement) (models.Rows, error) {\n\tdis := e.MetaClient.Databases()\n\n\trows := []*models.Row{}\n\tfor _, di := range dis {\n\t\trow := &models.Row{Columns: []string{\"retention_policy\", \"name\", \"mode\", \"destinations\"}, Name: di.Name}\n\t\tfor _, rpi := range di.RetentionPolicies {\n\t\t\tfor _, si := range rpi.Subscriptions {\n\t\t\t\trow.Values = append(row.Values, []interface{}{rpi.Name, si.Name, si.Mode, si.Destinations})\n\t\t\t}\n\t\t}\n\t\tif len(row.Values) > 0 {\n\t\t\trows = append(rows, row)\n\t\t}\n\t}\n\treturn rows, nil\n}\n\nfunc (e *StatementExecutor) executeShowTagValues(q *influxql.ShowTagValuesStatement, ctx *influxql.ExecutionContext) error {\n\tif q.Database == \"\" {\n\t\treturn ErrDatabaseNameRequired\n\t}\n\n\ttagValues, err := e.TSDBStore.TagValues(q.Database, q.Condition)\n\tif err != nil {\n\t\treturn ctx.Send(&influxql.Result{\n\t\t\tStatementID: ctx.StatementID,\n\t\t\tErr:         err,\n\t\t})\n\t}\n\n\temitted := false\n\tfor _, m := range tagValues {\n\t\tvalues := m.Values\n\n\t\tif q.Offset > 0 {\n\t\t\tif q.Offset >= len(values) {\n\t\t\t\tvalues = nil\n\t\t\t} else {\n\t\t\t\tvalues = values[q.Offset:]\n\t\t\t}\n\t\t}\n\n\t\tif q.Limit > 0 {\n\t\t\tif q.Limit < len(values) {\n\t\t\t\tvalues = values[:q.Limit]\n\t\t\t}\n\t\t}\n\n\t\tif len(values) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\trow := &models.Row{\n\t\t\tName:    m.Measurement,\n\t\t\tColumns: []string{\"key\", \"value\"},\n\t\t\tValues:  make([][]interface{}, len(values)),\n\t\t}\n\t\tfor i, v := range values {\n\t\t\trow.Values[i] = []interface{}{v.Key, v.Value}\n\t\t}\n\n\t\tif err := ctx.Send(&influxql.Result{\n\t\t\tStatementID: ctx.StatementID,\n\t\t\tSeries:      []*models.Row{row},\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\temitted = true\n\t}\n\n\t// Ensure at least one result is emitted.\n\tif !emitted {\n\t\treturn ctx.Send(&influxql.Result{\n\t\t\tStatementID: ctx.StatementID,\n\t\t})\n\t}\n\treturn nil\n}\n\nfunc (e *StatementExecutor) executeShowUsersStatement(q *influxql.ShowUsersStatement) (models.Rows, error) {\n\trow := &models.Row{Columns: []string{\"user\", \"admin\"}}\n\tfor _, ui := range e.MetaClient.Users() {\n\t\trow.Values = append(row.Values, []interface{}{ui.Name, ui.Admin})\n\t}\n\treturn []*models.Row{row}, nil\n}\n\n// BufferedPointsWriter adds buffering to a pointsWriter so that SELECT INTO queries\n// write their points to the destination in batches.\ntype BufferedPointsWriter struct {\n\tw               pointsWriter\n\tbuf             []models.Point\n\tdatabase        string\n\tretentionPolicy string\n}\n\n// NewBufferedPointsWriter returns a new BufferedPointsWriter.\nfunc NewBufferedPointsWriter(w pointsWriter, database, retentionPolicy string, capacity int) *BufferedPointsWriter {\n\treturn &BufferedPointsWriter{\n\t\tw:               w,\n\t\tbuf:             make([]models.Point, 0, capacity),\n\t\tdatabase:        database,\n\t\tretentionPolicy: retentionPolicy,\n\t}\n}\n\n// WritePointsInto implements pointsWriter for BufferedPointsWriter.\nfunc (w *BufferedPointsWriter) WritePointsInto(req *IntoWriteRequest) error {\n\t// Make sure we're buffering points only for the expected destination.\n\tif req.Database != w.database || req.RetentionPolicy != w.retentionPolicy {\n\t\treturn fmt.Errorf(\"writer for %s.%s can't write into %s.%s\", w.database, w.retentionPolicy, req.Database, req.RetentionPolicy)\n\t}\n\n\tfor i := 0; i < len(req.Points); {\n\t\t// Get the available space in the buffer.\n\t\tavail := cap(w.buf) - len(w.buf)\n\n\t\t// Calculate number of points to copy into the buffer.\n\t\tn := len(req.Points[i:])\n\t\tif n > avail {\n\t\t\tn = avail\n\t\t}\n\n\t\t// Copy points into buffer.\n\t\tw.buf = append(w.buf, req.Points[i:n+i]...)\n\n\t\t// Advance the index by number of points copied.\n\t\ti += n\n\n\t\t// If buffer is full, flush points to underlying writer.\n\t\tif len(w.buf) == cap(w.buf) {\n\t\t\tif err := w.Flush(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// Flush writes all buffered points to the underlying writer.\nfunc (w *BufferedPointsWriter) Flush() error {\n\tif len(w.buf) == 0 {\n\t\treturn nil\n\t}\n\n\tif err := w.w.WritePointsInto(&IntoWriteRequest{\n\t\tDatabase:        w.database,\n\t\tRetentionPolicy: w.retentionPolicy,\n\t\tPoints:          w.buf,\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\t// Clear the buffer.\n\tw.buf = w.buf[:0]\n\n\treturn nil\n}\n\n// Len returns the number of points buffered.\nfunc (w *BufferedPointsWriter) Len() int { return len(w.buf) }\n\n// Cap returns the capacity (in points) of the buffer.\nfunc (w *BufferedPointsWriter) Cap() int { return cap(w.buf) }\n\nfunc (e *StatementExecutor) writeInto(w pointsWriter, stmt *influxql.SelectStatement, row *models.Row) error {\n\tif stmt.Target.Measurement.Database == \"\" {\n\t\treturn errNoDatabaseInTarget\n\t}\n\n\t// It might seem a bit weird that this is where we do this, since we will have to\n\t// convert rows back to points. The Executors (both aggregate and raw) are complex\n\t// enough that changing them to write back to the DB is going to be clumsy\n\t//\n\t// it might seem weird to have the write be in the QueryExecutor, but the interweaving of\n\t// limitedRowWriter and ExecuteAggregate/Raw makes it ridiculously hard to make sure that the\n\t// results will be the same as when queried normally.\n\tname := stmt.Target.Measurement.Name\n\tif name == \"\" {\n\t\tname = row.Name\n\t}\n\n\tpoints, err := convertRowToPoints(name, row)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := w.WritePointsInto(&IntoWriteRequest{\n\t\tDatabase:        stmt.Target.Measurement.Database,\n\t\tRetentionPolicy: stmt.Target.Measurement.RetentionPolicy,\n\t\tPoints:          points,\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nvar errNoDatabaseInTarget = errors.New(\"no database in target\")\n\n// convertRowToPoints will convert a query result Row into Points that can be written back in.\nfunc convertRowToPoints(measurementName string, row *models.Row) ([]models.Point, error) {\n\t// figure out which parts of the result are the time and which are the fields\n\ttimeIndex := -1\n\tfieldIndexes := make(map[string]int)\n\tfor i, c := range row.Columns {\n\t\tif c == \"time\" {\n\t\t\ttimeIndex = i\n\t\t} else {\n\t\t\tfieldIndexes[c] = i\n\t\t}\n\t}\n\n\tif timeIndex == -1 {\n\t\treturn nil, errors.New(\"error finding time index in result\")\n\t}\n\n\tpoints := make([]models.Point, 0, len(row.Values))\n\tfor _, v := range row.Values {\n\t\tvals := make(map[string]interface{})\n\t\tfor fieldName, fieldIndex := range fieldIndexes {\n\t\t\tval := v[fieldIndex]\n\t\t\tif val != nil {\n\t\t\t\tvals[fieldName] = v[fieldIndex]\n\t\t\t}\n\t\t}\n\n\t\tp, err := models.NewPoint(measurementName, models.NewTags(row.Tags), vals, v[timeIndex].(time.Time))\n\t\tif err != nil {\n\t\t\t// Drop points that can't be stored\n\t\t\tcontinue\n\t\t}\n\n\t\tpoints = append(points, p)\n\t}\n\n\treturn points, nil\n}\n\n// NormalizeStatement adds a default database and policy to the measurements in statement.\nfunc (e *StatementExecutor) NormalizeStatement(stmt influxql.Statement, defaultDatabase string) (err error) {\n\tinfluxql.WalkFunc(stmt, func(node influxql.Node) {\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tswitch node := node.(type) {\n\t\tcase *influxql.ShowRetentionPoliciesStatement:\n\t\t\tif node.Database == \"\" {\n\t\t\t\tnode.Database = defaultDatabase\n\t\t\t}\n\t\tcase *influxql.ShowMeasurementsStatement:\n\t\t\tif node.Database == \"\" {\n\t\t\t\tnode.Database = defaultDatabase\n\t\t\t}\n\t\tcase *influxql.ShowTagValuesStatement:\n\t\t\tif node.Database == \"\" {\n\t\t\t\tnode.Database = defaultDatabase\n\t\t\t}\n\t\tcase *influxql.Measurement:\n\t\t\tswitch stmt.(type) {\n\t\t\tcase *influxql.DropSeriesStatement, *influxql.DeleteSeriesStatement:\n\t\t\t// DB and RP not supported by these statements so don't rewrite into invalid\n\t\t\t// statements\n\t\t\tdefault:\n\t\t\t\terr = e.normalizeMeasurement(node, defaultDatabase)\n\t\t\t}\n\t\t}\n\t})\n\treturn\n}\n\nfunc (e *StatementExecutor) normalizeMeasurement(m *influxql.Measurement, defaultDatabase string) error {\n\t// Targets (measurements in an INTO clause) can have blank names, which means it will be\n\t// the same as the measurement name it came from in the FROM clause.\n\tif !m.IsTarget && m.Name == \"\" && m.Regex == nil {\n\t\treturn errors.New(\"invalid measurement\")\n\t}\n\n\t// Measurement does not have an explicit database? Insert default.\n\tif m.Database == \"\" {\n\t\tm.Database = defaultDatabase\n\t}\n\n\t// The database must now be specified by this point.\n\tif m.Database == \"\" {\n\t\treturn ErrDatabaseNameRequired\n\t}\n\n\t// Find database.\n\tdi := e.MetaClient.Database(m.Database)\n\tif di == nil {\n\t\treturn influxdb.ErrDatabaseNotFound(m.Database)\n\t}\n\n\t// If no retention policy was specified, use the default.\n\tif m.RetentionPolicy == \"\" {\n\t\tif di.DefaultRetentionPolicy == \"\" {\n\t\t\treturn fmt.Errorf(\"default retention policy not set for: %s\", di.Name)\n\t\t}\n\t\tm.RetentionPolicy = di.DefaultRetentionPolicy\n\t}\n\n\treturn nil\n}\n\n// IntoWriteRequest is a partial copy of cluster.WriteRequest\ntype IntoWriteRequest struct {\n\tDatabase        string\n\tRetentionPolicy string\n\tPoints          []models.Point\n}\n\n// TSDBStore is an interface for accessing the time series data store.\ntype TSDBStore interface {\n\tCreateShard(database, policy string, shardID uint64, enabled bool) error\n\tWriteToShard(shardID uint64, points []models.Point) error\n\n\tRestoreShard(id uint64, r io.Reader) error\n\tBackupShard(id uint64, since time.Time, w io.Writer) error\n\n\tDeleteDatabase(name string) error\n\tDeleteMeasurement(database, name string) error\n\tDeleteRetentionPolicy(database, name string) error\n\tDeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error\n\tDeleteShard(id uint64) error\n\n\tMeasurementNames(database string, cond influxql.Expr) ([][]byte, error)\n\tTagValues(database string, cond influxql.Expr) ([]tsdb.TagValues, error)\n}\n\nvar _ TSDBStore = LocalTSDBStore{}\n\n// LocalTSDBStore embeds a tsdb.Store and implements IteratorCreator\n// to satisfy the TSDBStore interface.\ntype LocalTSDBStore struct {\n\t*tsdb.Store\n}\n\n// ShardIteratorCreator is an interface for creating an IteratorCreator to access a specific shard.\ntype ShardIteratorCreator interface {\n\tShardIteratorCreator(id uint64) influxql.IteratorCreator\n}\n\n// joinUint64 returns a comma-delimited string of uint64 numbers.\nfunc joinUint64(a []uint64) string {\n\tvar buf bytes.Buffer\n\tfor i, x := range a {\n\t\tbuf.WriteString(strconv.FormatUint(x, 10))\n\t\tif i < len(a)-1 {\n\t\t\tbuf.WriteRune(',')\n\t\t}\n\t}\n\treturn buf.String()\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/coordinator/statement_executor_test.go",
    "content": "package coordinator_test\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n\t\"github.com/influxdata/influxdb/coordinator\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/internal\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n\t\"github.com/uber-go/zap\"\n)\n\nconst (\n\t// DefaultDatabase is the default database name used in tests.\n\tDefaultDatabase = \"db0\"\n\n\t// DefaultRetentionPolicy is the default retention policy name used in tests.\n\tDefaultRetentionPolicy = \"rp0\"\n)\n\n// Ensure query executor can execute a simple SELECT statement.\nfunc TestQueryExecutor_ExecuteQuery_SelectStatement(t *testing.T) {\n\te := DefaultQueryExecutor()\n\n\t// The meta client should return a single shard owned by the local node.\n\te.MetaClient.ShardGroupsByTimeRangeFn = func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) {\n\t\treturn []meta.ShardGroupInfo{\n\t\t\t{ID: 1, Shards: []meta.ShardInfo{\n\t\t\t\t{ID: 100, Owners: []meta.ShardOwner{{NodeID: 0}}},\n\t\t\t}},\n\t\t}, nil\n\t}\n\n\t// The TSDB store should return an IteratorCreator for shard.\n\t// This IteratorCreator returns a single iterator with \"value\" in the aux fields.\n\te.TSDBStore.ShardGroupFn = func(ids []uint64) tsdb.ShardGroup {\n\t\tif !reflect.DeepEqual(ids, []uint64{100}) {\n\t\t\tt.Fatalf(\"unexpected shard ids: %v\", ids)\n\t\t}\n\n\t\tvar sh MockShard\n\t\tsh.CreateIteratorFn = func(m string, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\t\treturn &FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Time: int64(0 * time.Second), Aux: []interface{}{float64(100)}},\n\t\t\t\t{Name: \"cpu\", Time: int64(1 * time.Second), Aux: []interface{}{float64(200)}},\n\t\t\t}}, nil\n\t\t}\n\t\tsh.FieldDimensionsFn = func(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) {\n\t\t\tif !reflect.DeepEqual(measurements, []string{\"cpu\"}) {\n\t\t\t\tt.Fatalf(\"unexpected source: %#v\", measurements)\n\t\t\t}\n\t\t\treturn map[string]influxql.DataType{\"value\": influxql.Float}, nil, nil\n\t\t}\n\t\treturn &sh\n\t}\n\n\t// Verify all results from the query.\n\tif a := ReadAllResults(e.ExecuteQuery(`SELECT * FROM cpu`, \"db0\", 0)); !reflect.DeepEqual(a, []*influxql.Result{\n\t\t{\n\t\t\tStatementID: 0,\n\t\t\tSeries: []*models.Row{{\n\t\t\t\tName:    \"cpu\",\n\t\t\t\tColumns: []string{\"time\", \"value\"},\n\t\t\t\tValues: [][]interface{}{\n\t\t\t\t\t{time.Unix(0, 0).UTC(), float64(100)},\n\t\t\t\t\t{time.Unix(1, 0).UTC(), float64(200)},\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t}) {\n\t\tt.Fatalf(\"unexpected results: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure query executor can enforce a maximum bucket selection count.\nfunc TestQueryExecutor_ExecuteQuery_MaxSelectBucketsN(t *testing.T) {\n\te := DefaultQueryExecutor()\n\te.StatementExecutor.MaxSelectBucketsN = 3\n\n\t// The meta client should return a single shards on the local node.\n\te.MetaClient.ShardGroupsByTimeRangeFn = func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) {\n\t\treturn []meta.ShardGroupInfo{\n\t\t\t{ID: 1, Shards: []meta.ShardInfo{\n\t\t\t\t{ID: 100, Owners: []meta.ShardOwner{{NodeID: 0}}},\n\t\t\t}},\n\t\t}, nil\n\t}\n\n\te.TSDBStore.ShardGroupFn = func(ids []uint64) tsdb.ShardGroup {\n\t\tif !reflect.DeepEqual(ids, []uint64{100}) {\n\t\t\tt.Fatalf(\"unexpected shard ids: %v\", ids)\n\t\t}\n\n\t\tvar sh MockShard\n\t\tsh.CreateIteratorFn = func(m string, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\t\treturn &FloatIterator{\n\t\t\t\tPoints: []influxql.FloatPoint{{Name: \"cpu\", Time: int64(0 * time.Second), Aux: []interface{}{float64(100)}}},\n\t\t\t}, nil\n\t\t}\n\t\tsh.FieldDimensionsFn = func(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) {\n\t\t\tif !reflect.DeepEqual(measurements, []string{\"cpu\"}) {\n\t\t\t\tt.Fatalf(\"unexpected source: %#v\", measurements)\n\t\t\t}\n\t\t\treturn map[string]influxql.DataType{\"value\": influxql.Float}, nil, nil\n\t\t}\n\t\treturn &sh\n\t}\n\n\t// Verify all results from the query.\n\tif a := ReadAllResults(e.ExecuteQuery(`SELECT count(value) FROM cpu WHERE time >= '2000-01-01T00:00:05Z' AND time < '2000-01-01T00:00:35Z' GROUP BY time(10s)`, \"db0\", 0)); !reflect.DeepEqual(a, []*influxql.Result{\n\t\t{\n\t\t\tStatementID: 0,\n\t\t\tErr:         errors.New(\"max-select-buckets limit exceeded: (4/3)\"),\n\t\t},\n\t}) {\n\t\tt.Fatalf(\"unexpected results: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestStatementExecutor_NormalizeDropSeries(t *testing.T) {\n\tq, err := influxql.ParseQuery(\"DROP SERIES FROM cpu\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error parsing query: %v\", err)\n\t}\n\n\tstmt := q.Statements[0].(*influxql.DropSeriesStatement)\n\n\ts := &coordinator.StatementExecutor{\n\t\tMetaClient: &internal.MetaClientMock{\n\t\t\tDatabaseFn: func(name string) *meta.DatabaseInfo {\n\t\t\t\tt.Fatal(\"meta client should not be called\")\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\tif err := s.NormalizeStatement(stmt, \"foo\"); err != nil {\n\t\tt.Fatalf(\"unexpected error normalizing statement: %v\", err)\n\t}\n\n\tm := stmt.Sources[0].(*influxql.Measurement)\n\tif m.Database != \"\" {\n\t\tt.Fatalf(\"database rewritten when not supposed to: %v\", m.Database)\n\t}\n\tif m.RetentionPolicy != \"\" {\n\t\tt.Fatalf(\"database rewritten when not supposed to: %v\", m.RetentionPolicy)\n\t}\n\n\tif exp, got := \"DROP SERIES FROM cpu\", q.String(); exp != got {\n\t\tt.Fatalf(\"generated query does match parsed: exp %v, got %v\", exp, got)\n\t}\n}\n\nfunc TestStatementExecutor_NormalizeDeleteSeries(t *testing.T) {\n\tq, err := influxql.ParseQuery(\"DELETE FROM cpu\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error parsing query: %v\", err)\n\t}\n\n\tstmt := q.Statements[0].(*influxql.DeleteSeriesStatement)\n\n\ts := &coordinator.StatementExecutor{\n\t\tMetaClient: &internal.MetaClientMock{\n\t\t\tDatabaseFn: func(name string) *meta.DatabaseInfo {\n\t\t\t\tt.Fatal(\"meta client should not be called\")\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\tif err := s.NormalizeStatement(stmt, \"foo\"); err != nil {\n\t\tt.Fatalf(\"unexpected error normalizing statement: %v\", err)\n\t}\n\n\tm := stmt.Sources[0].(*influxql.Measurement)\n\tif m.Database != \"\" {\n\t\tt.Fatalf(\"database rewritten when not supposed to: %v\", m.Database)\n\t}\n\tif m.RetentionPolicy != \"\" {\n\t\tt.Fatalf(\"database rewritten when not supposed to: %v\", m.RetentionPolicy)\n\t}\n\n\tif exp, got := \"DELETE FROM cpu\", q.String(); exp != got {\n\t\tt.Fatalf(\"generated query does match parsed: exp %v, got %v\", exp, got)\n\t}\n}\n\ntype mockAuthorizer struct {\n\tAuthorizeDatabaseFn func(influxql.Privilege, string) bool\n}\n\nfunc (a *mockAuthorizer) AuthorizeDatabase(p influxql.Privilege, name string) bool {\n\treturn a.AuthorizeDatabaseFn(p, name)\n}\n\nfunc (m *mockAuthorizer) AuthorizeQuery(database string, query *influxql.Query) error {\n\tpanic(\"fail\")\n}\n\nfunc (m *mockAuthorizer) AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool {\n\tpanic(\"fail\")\n}\n\nfunc (m *mockAuthorizer) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool {\n\tpanic(\"fail\")\n}\n\nfunc TestQueryExecutor_ExecuteQuery_ShowDatabases(t *testing.T) {\n\tqe := influxql.NewQueryExecutor()\n\tqe.StatementExecutor = &coordinator.StatementExecutor{\n\t\tMetaClient: &internal.MetaClientMock{\n\t\t\tDatabasesFn: func() []meta.DatabaseInfo {\n\t\t\t\treturn []meta.DatabaseInfo{\n\t\t\t\t\t{Name: \"db1\"}, {Name: \"db2\"}, {Name: \"db3\"}, {Name: \"db4\"},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\topt := influxql.ExecutionOptions{\n\t\tAuthorizer: &mockAuthorizer{\n\t\t\tAuthorizeDatabaseFn: func(p influxql.Privilege, name string) bool {\n\t\t\t\treturn name == \"db2\" || name == \"db4\"\n\t\t\t},\n\t\t},\n\t}\n\n\tq, err := influxql.ParseQuery(\"SHOW DATABASES\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tresults := ReadAllResults(qe.ExecuteQuery(q, opt, make(chan struct{})))\n\texp := []*influxql.Result{\n\t\t{\n\t\t\tStatementID: 0,\n\t\t\tSeries: []*models.Row{{\n\t\t\t\tName:    \"databases\",\n\t\t\t\tColumns: []string{\"name\"},\n\t\t\t\tValues: [][]interface{}{\n\t\t\t\t\t{\"db2\"}, {\"db4\"},\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t}\n\tif !reflect.DeepEqual(results, exp) {\n\t\tt.Fatalf(\"unexpected results: exp %s, got %s\", spew.Sdump(exp), spew.Sdump(results))\n\t}\n}\n\n// QueryExecutor is a test wrapper for coordinator.QueryExecutor.\ntype QueryExecutor struct {\n\t*influxql.QueryExecutor\n\n\tMetaClient        MetaClient\n\tTSDBStore         TSDBStore\n\tStatementExecutor *coordinator.StatementExecutor\n\tLogOutput         bytes.Buffer\n}\n\n// NewQueryExecutor returns a new instance of QueryExecutor.\n// This query executor always has a node id of 0.\nfunc NewQueryExecutor() *QueryExecutor {\n\te := &QueryExecutor{\n\t\tQueryExecutor: influxql.NewQueryExecutor(),\n\t}\n\te.StatementExecutor = &coordinator.StatementExecutor{\n\t\tMetaClient: &e.MetaClient,\n\t\tTSDBStore:  &e.TSDBStore,\n\t\tShardMapper: &coordinator.LocalShardMapper{\n\t\t\tMetaClient: &e.MetaClient,\n\t\t\tTSDBStore:  &e.TSDBStore,\n\t\t},\n\t}\n\te.QueryExecutor.StatementExecutor = e.StatementExecutor\n\n\tvar out io.Writer = &e.LogOutput\n\tif testing.Verbose() {\n\t\tout = io.MultiWriter(out, os.Stderr)\n\t}\n\te.QueryExecutor.WithLogger(zap.New(\n\t\tzap.NewTextEncoder(),\n\t\tzap.Output(zap.AddSync(out)),\n\t))\n\n\treturn e\n}\n\n// DefaultQueryExecutor returns a QueryExecutor with a database (db0) and retention policy (rp0).\nfunc DefaultQueryExecutor() *QueryExecutor {\n\te := NewQueryExecutor()\n\te.MetaClient.DatabaseFn = DefaultMetaClientDatabaseFn\n\treturn e\n}\n\n// ExecuteQuery parses query and executes against the database.\nfunc (e *QueryExecutor) ExecuteQuery(query, database string, chunkSize int) <-chan *influxql.Result {\n\treturn e.QueryExecutor.ExecuteQuery(MustParseQuery(query), influxql.ExecutionOptions{\n\t\tDatabase:  database,\n\t\tChunkSize: chunkSize,\n\t}, make(chan struct{}))\n}\n\n// TSDBStore is a mockable implementation of coordinator.TSDBStore.\ntype TSDBStore struct {\n\tCreateShardFn  func(database, policy string, shardID uint64, enabled bool) error\n\tWriteToShardFn func(shardID uint64, points []models.Point) error\n\n\tRestoreShardFn func(id uint64, r io.Reader) error\n\tBackupShardFn  func(id uint64, since time.Time, w io.Writer) error\n\n\tDeleteDatabaseFn        func(name string) error\n\tDeleteMeasurementFn     func(database, name string) error\n\tDeleteRetentionPolicyFn func(database, name string) error\n\tDeleteShardFn           func(id uint64) error\n\tDeleteSeriesFn          func(database string, sources []influxql.Source, condition influxql.Expr) error\n\tShardGroupFn            func(ids []uint64) tsdb.ShardGroup\n}\n\nfunc (s *TSDBStore) CreateShard(database, policy string, shardID uint64, enabled bool) error {\n\tif s.CreateShardFn == nil {\n\t\treturn nil\n\t}\n\treturn s.CreateShardFn(database, policy, shardID, enabled)\n}\n\nfunc (s *TSDBStore) WriteToShard(shardID uint64, points []models.Point) error {\n\treturn s.WriteToShardFn(shardID, points)\n}\n\nfunc (s *TSDBStore) RestoreShard(id uint64, r io.Reader) error {\n\treturn s.RestoreShardFn(id, r)\n}\n\nfunc (s *TSDBStore) BackupShard(id uint64, since time.Time, w io.Writer) error {\n\treturn s.BackupShardFn(id, since, w)\n}\n\nfunc (s *TSDBStore) DeleteDatabase(name string) error {\n\treturn s.DeleteDatabaseFn(name)\n}\n\nfunc (s *TSDBStore) DeleteMeasurement(database, name string) error {\n\treturn s.DeleteMeasurementFn(database, name)\n}\n\nfunc (s *TSDBStore) DeleteRetentionPolicy(database, name string) error {\n\treturn s.DeleteRetentionPolicyFn(database, name)\n}\n\nfunc (s *TSDBStore) DeleteShard(id uint64) error {\n\treturn s.DeleteShardFn(id)\n}\n\nfunc (s *TSDBStore) DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error {\n\treturn s.DeleteSeriesFn(database, sources, condition)\n}\n\nfunc (s *TSDBStore) ShardGroup(ids []uint64) tsdb.ShardGroup {\n\treturn s.ShardGroupFn(ids)\n}\n\nfunc (s *TSDBStore) Measurements(database string, cond influxql.Expr) ([]string, error) {\n\treturn nil, nil\n}\n\nfunc (s *TSDBStore) MeasurementNames(database string, cond influxql.Expr) ([][]byte, error) {\n\treturn nil, nil\n}\n\nfunc (s *TSDBStore) TagValues(database string, cond influxql.Expr) ([]tsdb.TagValues, error) {\n\treturn nil, nil\n}\n\ntype MockShard struct {\n\tMeasurements      []string\n\tFieldDimensionsFn func(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error)\n\tCreateIteratorFn  func(m string, opt influxql.IteratorOptions) (influxql.Iterator, error)\n\tExpandSourcesFn   func(sources influxql.Sources) (influxql.Sources, error)\n}\n\nfunc (sh *MockShard) MeasurementsByRegex(re *regexp.Regexp) []string {\n\tnames := make([]string, 0, len(sh.Measurements))\n\tfor _, name := range sh.Measurements {\n\t\tif re.MatchString(name) {\n\t\t\tnames = append(names, name)\n\t\t}\n\t}\n\treturn names\n}\n\nfunc (sh *MockShard) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) {\n\treturn sh.FieldDimensionsFn(measurements)\n}\n\nfunc (sh *MockShard) MapType(measurement, field string) influxql.DataType {\n\tf, d, err := sh.FieldDimensions([]string{measurement})\n\tif err != nil {\n\t\treturn influxql.Unknown\n\t}\n\n\tif typ, ok := f[field]; ok {\n\t\treturn typ\n\t} else if _, ok := d[field]; ok {\n\t\treturn influxql.Tag\n\t}\n\treturn influxql.Unknown\n}\n\nfunc (sh *MockShard) CreateIterator(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\treturn sh.CreateIteratorFn(measurement, opt)\n}\n\nfunc (sh *MockShard) ExpandSources(sources influxql.Sources) (influxql.Sources, error) {\n\treturn sh.ExpandSourcesFn(sources)\n}\n\n// MustParseQuery parses s into a query. Panic on error.\nfunc MustParseQuery(s string) *influxql.Query {\n\tq, err := influxql.ParseQuery(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn q\n}\n\n// ReadAllResults reads all results from c and returns as a slice.\nfunc ReadAllResults(c <-chan *influxql.Result) []*influxql.Result {\n\tvar a []*influxql.Result\n\tfor result := range c {\n\t\ta = append(a, result)\n\t}\n\treturn a\n}\n\n// FloatIterator is a represents an iterator that reads from a slice.\ntype FloatIterator struct {\n\tPoints []influxql.FloatPoint\n\tstats  influxql.IteratorStats\n}\n\nfunc (itr *FloatIterator) Stats() influxql.IteratorStats { return itr.stats }\nfunc (itr *FloatIterator) Close() error                  { return nil }\n\n// Next returns the next value and shifts it off the beginning of the points slice.\nfunc (itr *FloatIterator) Next() (*influxql.FloatPoint, error) {\n\tif len(itr.Points) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tv := &itr.Points[0]\n\titr.Points = itr.Points[1:]\n\treturn v, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/errors.go",
    "content": "package influxdb\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n)\n\n// ErrFieldTypeConflict is returned when a new field already exists with a\n// different type.\nvar ErrFieldTypeConflict = errors.New(\"field type conflict\")\n\n// ErrDatabaseNotFound indicates that a database operation failed on the\n// specified database because the specified database does not exist.\nfunc ErrDatabaseNotFound(name string) error { return fmt.Errorf(\"database not found: %s\", name) }\n\n// ErrRetentionPolicyNotFound indicates that the named retention policy could\n// not be found in the database.\nfunc ErrRetentionPolicyNotFound(name string) error {\n\treturn fmt.Errorf(\"retention policy not found: %s\", name)\n}\n\n// IsAuthorizationError indicates whether an error is due to an authorization failure\nfunc IsAuthorizationError(err error) bool {\n\te, ok := err.(interface {\n\t\tAuthorizationFailed() bool\n\t})\n\treturn ok && e.AuthorizationFailed()\n}\n\n// IsClientError indicates whether an error is a known client error.\nfunc IsClientError(err error) bool {\n\tif err == nil {\n\t\treturn false\n\t}\n\n\tif strings.HasPrefix(err.Error(), ErrFieldTypeConflict.Error()) {\n\t\treturn true\n\t}\n\n\treturn false\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/etc/burn-in/.rvmrc",
    "content": "rvm use ruby-2.1.0@burn-in --create\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile",
    "content": "source 'https://rubygems.org'\n\ngem \"colorize\"\ngem \"influxdb\"\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/etc/burn-in/Gemfile.lock",
    "content": "GEM\n  remote: https://rubygems.org/\n  specs:\n    colorize (0.6.0)\n    influxdb (0.0.16)\n      json\n    json (1.8.1)\n\nPLATFORMS\n  ruby\n\nDEPENDENCIES\n  colorize\n  influxdb\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/etc/burn-in/burn-in.rb",
    "content": "require \"influxdb\"\nrequire \"colorize\"\nrequire \"benchmark\"\n\nrequire_relative \"log\"\nrequire_relative \"random_gaussian\"\n\nBATCH_SIZE = 10_000\n\nLog.info \"Starting burn-in suite\"\nmaster = InfluxDB::Client.new\nmaster.delete_database(\"burn-in\") rescue nil\nmaster.create_database(\"burn-in\")\nmaster.create_database_user(\"burn-in\", \"user\", \"pass\")\n\nmaster.database = \"burn-in\"\n# master.query \"select * from test1 into test2;\"\n# master.query \"select count(value) from test1 group by time(1m) into test2;\"\n\ninfluxdb = InfluxDB::Client.new \"burn-in\", username: \"user\", password: \"pass\"\n\nLog.success \"Connected to server #{influxdb.host}:#{influxdb.port}\"\n\nLog.log \"Creating RandomGaussian(500, 25)\"\ngaussian = RandomGaussian.new(500, 25)\npoint_count = 0\n\nwhile true\n  Log.log \"Generating 10,000 points..\"\n  points = []\n  BATCH_SIZE.times do |n|\n    points << {value: gaussian.rand.to_i.abs}\n  end\n  point_count += points.length\n\n  Log.info \"Sending points to server..\"\n  begin\n    st = Time.now\n    foo = influxdb.write_point(\"test1\", points)\n    et = Time.now\n    Log.log foo.inspect\n    Log.log \"#{et-st} seconds elapsed\"\n    Log.success \"Write successful.\"\n  rescue => e\n    Log.failure \"Write failed:\"\n    Log.log e\n  end\n  sleep 0.5\n\n  Log.info \"Checking regular points\"\n  st = Time.now\n  response = influxdb.query(\"select count(value) from test1;\")\n  et = Time.now\n\n  Log.log \"#{et-st} seconds elapsed\"\n\n  response_count = response[\"test1\"].first[\"count\"]\n  if point_count == response_count\n    Log.success \"Point counts match: #{point_count} == #{response_count}\"\n  else\n    Log.failure \"Point counts don't match: #{point_count} != #{response_count}\"\n  end\n\n  # Log.info \"Checking continuous query points for test2\"\n  # st = Time.now\n  # response = influxdb.query(\"select count(value) from test2;\")\n  # et = Time.now\n\n  # Log.log \"#{et-st} seconds elapsed\"\n\n  # response_count = response[\"test2\"].first[\"count\"]\n  # if point_count == response_count\n    # Log.success \"Point counts match: #{point_count} == #{response_count}\"\n  # else\n    # Log.failure \"Point counts don't match: #{point_count} != #{response_count}\"\n  # end\nend\n\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/etc/burn-in/log.rb",
    "content": "module Log\n  def self.info(msg)\n    print Time.now.strftime(\"%r\") + \" | \"\n    puts msg.to_s.colorize(:yellow)\n  end\n\n  def self.success(msg)\n    print Time.now.strftime(\"%r\") + \" | \"\n    puts msg.to_s.colorize(:green)\n  end\n\n  def self.failure(msg)\n    print Time.now.strftime(\"%r\") + \" | \"\n    puts msg.to_s.colorize(:red)\n  end\n\n  def self.log(msg)\n    print Time.now.strftime(\"%r\") + \" | \"\n    puts msg.to_s\n  end\nend\n\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/etc/burn-in/random_gaussian.rb",
    "content": "class RandomGaussian\n  def initialize(mean, stddev, rand_helper = lambda { Kernel.rand })\n    @rand_helper = rand_helper\n    @mean = mean\n    @stddev = stddev\n    @valid = false\n    @next = 0\n  end\n\n  def rand\n    if @valid then\n      @valid = false\n      return @next\n    else\n      @valid = true\n      x, y = self.class.gaussian(@mean, @stddev, @rand_helper)\n      @next = y\n      return x\n    end\n  end\n\n  private\n  def self.gaussian(mean, stddev, rand)\n    theta = 2 * Math::PI * rand.call\n    rho = Math.sqrt(-2 * Math.log(1 - rand.call))\n    scale = stddev * rho\n    x = mean + scale * Math.cos(theta)\n    y = mean + scale * Math.sin(theta)\n    return x, y\n  end\nend\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/etc/burn-in/random_points.rb",
    "content": "require \"influxdb\"\n\nONE_WEEK_IN_SECONDS = 7*24*60*60\nNUM_POINTS = 10_000\nBATCHES = 100\n\nmaster = InfluxDB::Client.new\nmaster.delete_database(\"ctx\") rescue nil\nmaster.create_database(\"ctx\")\n\ninfluxdb = InfluxDB::Client.new \"ctx\"\ninfluxdb.time_precision = \"s\"\n\nnames = [\"foo\", \"bar\", \"baz\", \"quu\", \"qux\"]\n\nst = Time.now\nBATCHES.times do |m|\n  points = []\n\n  puts \"Writing #{NUM_POINTS} points, time ##{m}..\"\n  NUM_POINTS.times do |n|\n    timestamp = Time.now.to_i - rand(ONE_WEEK_IN_SECONDS)\n    points << {value: names.sample, time: timestamp}\n  end\n\n  influxdb.write_point(\"ct1\", points)\nend\nputs st\nputs Time.now\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/etc/config.sample.toml",
    "content": "### Welcome to the InfluxDB configuration file.\n\n# The values in this file override the default values used by the system if\n# a config option is not specified. The commented out lines are the configuration\n# field and the default value used. Uncommenting a line and changing the value\n# will change the value used at runtime when the process is restarted.\n\n# Once every 24 hours InfluxDB will report usage data to usage.influxdata.com\n# The data includes a random ID, os, arch, version, the number of series and other\n# usage data. No data from user databases is ever transmitted.\n# Change this option to true to disable reporting.\n# reporting-disabled = false\n\n# Bind address to use for the RPC service for backup and restore.\n# bind-address = \"127.0.0.1:8088\"\n\n###\n### [meta]\n###\n### Controls the parameters for the Raft consensus group that stores metadata\n### about the InfluxDB cluster.\n###\n\n[meta]\n  # Where the metadata/raft database is stored\n  dir = \"/var/lib/influxdb/meta\"\n\n  # Automatically create a default retention policy when creating a database.\n  # retention-autocreate = true\n\n  # If log messages are printed for the meta service\n  # logging-enabled = true\n\n###\n### [data]\n###\n### Controls where the actual shard data for InfluxDB lives and how it is\n### flushed from the WAL. \"dir\" may need to be changed to a suitable place\n### for your system, but the WAL settings are an advanced configuration. The\n### defaults should work for most systems.\n###\n\n[data]\n  # The directory where the TSM storage engine stores TSM files.\n  dir = \"/var/lib/influxdb/data\"\n\n  # The directory where the TSM storage engine stores WAL files.\n  wal-dir = \"/var/lib/influxdb/wal\"\n\n  # The amount of time that a write will wait before fsyncing.  A duration\n  # greater than 0 can be used to batch up multiple fsync calls.  This is useful for slower\n  # disks or when WAL write contention is seen.  A value of 0s fsyncs every write to the WAL.\n  # Values in the range of 0-100ms are recommended for non-SSD disks.\n  # wal-fsync-delay = \"0s\"\n\n\n  # The type of shard index to use for new shards.  The default is an in-memory index that is\n  # recreated at startup.  A value of \"tsi1\" will use a disk based index that supports higher\n  # cardinality datasets.\n  # index-version = \"inmem\"\n\n  # Trace logging provides more verbose output around the tsm engine. Turning\n  # this on can provide more useful output for debugging tsm engine issues.\n  # trace-logging-enabled = false\n\n  # Whether queries should be logged before execution. Very useful for troubleshooting, but will\n  # log any sensitive data contained within a query.\n  # query-log-enabled = true\n\n  # Settings for the TSM engine\n\n  # CacheMaxMemorySize is the maximum size a shard's cache can\n  # reach before it starts rejecting writes.\n  # cache-max-memory-size = 1048576000\n\n  # CacheSnapshotMemorySize is the size at which the engine will\n  # snapshot the cache and write it to a TSM file, freeing up memory\n  # cache-snapshot-memory-size = 26214400\n\n  # CacheSnapshotWriteColdDuration is the length of time at\n  # which the engine will snapshot the cache and write it to\n  # a new TSM file if the shard hasn't received writes or deletes\n  # cache-snapshot-write-cold-duration = \"10m\"\n\n  # CompactFullWriteColdDuration is the duration at which the engine\n  # will compact all TSM files in a shard if it hasn't received a\n  # write or delete\n  # compact-full-write-cold-duration = \"4h\"\n\n  # The maximum number of concurrent full and level compactions that can run at one time.  A\n  # value of 0 results in runtime.GOMAXPROCS(0) used at runtime.  This setting does not apply\n  # to cache snapshotting.\n  # max-concurrent-compactions = 0\n\n  # The maximum series allowed per database before writes are dropped.  This limit can prevent\n  # high cardinality issues at the database level.  This limit can be disabled by setting it to\n  # 0.\n  # max-series-per-database = 1000000\n\n  # The maximum number of tag values per tag that are allowed before writes are dropped.  This limit\n  # can prevent high cardinality tag values from being written to a measurement.  This limit can be\n  # disabled by setting it to 0.\n  # max-values-per-tag = 100000\n\n###\n### [coordinator]\n###\n### Controls the clustering service configuration.\n###\n\n[coordinator]\n  # The default time a write request will wait until a \"timeout\" error is returned to the caller.\n  # write-timeout = \"10s\"\n\n  # The maximum number of concurrent queries allowed to be executing at one time.  If a query is\n  # executed and exceeds this limit, an error is returned to the caller.  This limit can be disabled\n  # by setting it to 0.\n  # max-concurrent-queries = 0\n\n  # The maximum time a query will is allowed to execute before being killed by the system.  This limit\n  # can help prevent run away queries.  Setting the value to 0 disables the limit.\n  # query-timeout = \"0s\"\n\n  # The time threshold when a query will be logged as a slow query.  This limit can be set to help\n  # discover slow or resource intensive queries.  Setting the value to 0 disables the slow query logging.\n  # log-queries-after = \"0s\"\n\n  # The maximum number of points a SELECT can process.  A value of 0 will make\n  # the maximum point count unlimited.  This will only be checked every 10 seconds so queries will not\n  # be aborted immediately when hitting the limit.\n  # max-select-point = 0\n\n  # The maximum number of series a SELECT can run.  A value of 0 will make the maximum series\n  # count unlimited.\n  # max-select-series = 0\n\n  # The maxium number of group by time bucket a SELECT can create.  A value of zero will max the maximum\n  # number of buckets unlimited.\n  # max-select-buckets = 0\n\n###\n### [retention]\n###\n### Controls the enforcement of retention policies for evicting old data.\n###\n\n[retention]\n  # Determines whether retention policy enforcement enabled.\n  # enabled = true\n\n  # The interval of time when retention policy enforcement checks run.\n  # check-interval = \"30m\"\n\n###\n### [shard-precreation]\n###\n### Controls the precreation of shards, so they are available before data arrives.\n### Only shards that, after creation, will have both a start- and end-time in the\n### future, will ever be created. Shards are never precreated that would be wholly\n### or partially in the past.\n\n[shard-precreation]\n  # Determines whether shard pre-creation service is enabled.\n  # enabled = true\n\n  # The interval of time when the check to pre-create new shards runs.\n  # check-interval = \"10m\"\n\n  # The default period ahead of the endtime of a shard group that its successor\n  # group is created.\n  # advance-period = \"30m\"\n\n###\n### Controls the system self-monitoring, statistics and diagnostics.\n###\n### The internal database for monitoring data is created automatically if\n### if it does not already exist. The target retention within this database\n### is called 'monitor' and is also created with a retention period of 7 days\n### and a replication factor of 1, if it does not exist. In all cases the\n### this retention policy is configured as the default for the database.\n\n[monitor]\n  # Whether to record statistics internally.\n  # store-enabled = true\n\n  # The destination database for recorded statistics\n  # store-database = \"_internal\"\n\n  # The interval at which to record statistics\n  # store-interval = \"10s\"\n\n###\n### [http]\n###\n### Controls how the HTTP endpoints are configured. These are the primary\n### mechanism for getting data into and out of InfluxDB.\n###\n\n[http]\n  # Determines whether HTTP endpoint is enabled.\n  # enabled = true\n\n  # The bind address used by the HTTP service.\n  # bind-address = \":8086\"\n\n  # Determines whether user authentication is enabled over HTTP/HTTPS.\n  # auth-enabled = false\n\n  # The default realm sent back when issuing a basic auth challenge.\n  # realm = \"InfluxDB\"\n\n  # Determines whether HTTP request logging is enabled.\n  # log-enabled = true\n\n  # Determines whether detailed write logging is enabled.\n  # write-tracing = false\n\n  # Determines whether the pprof endpoint is enabled.  This endpoint is used for\n  # troubleshooting and monitoring.\n  # pprof-enabled = true\n\n  # Determines whether HTTPS is enabled.\n  # https-enabled = false\n\n  # The SSL certificate to use when HTTPS is enabled.\n  # https-certificate = \"/etc/ssl/influxdb.pem\"\n\n  # Use a separate private key location.\n  # https-private-key = \"\"\n\n  # The JWT auth shared secret to validate requests using JSON web tokens.\n  # shared-secret = \"\"\n\n  # The default chunk size for result sets that should be chunked.\n  # max-row-limit = 0\n\n  # The maximum number of HTTP connections that may be open at once.  New connections that\n  # would exceed this limit are dropped.  Setting this value to 0 disables the limit.\n  # max-connection-limit = 0\n\n  # Enable http service over unix domain socket\n  # unix-socket-enabled = false\n\n  # The path of the unix domain socket.\n  # bind-socket = \"/var/run/influxdb.sock\"\n\n###\n### [subscriber]\n###\n### Controls the subscriptions, which can be used to fork a copy of all data\n### received by the InfluxDB host.\n###\n\n[subscriber]\n  # Determines whether the subscriber service is enabled.\n  # enabled = true\n\n  # The default timeout for HTTP writes to subscribers.\n  # http-timeout = \"30s\"\n\n  # Allows insecure HTTPS connections to subscribers.  This is useful when testing with self-\n  # signed certificates.\n  # insecure-skip-verify = false\n\n  # The path to the PEM encoded CA certs file. If the empty string, the default system certs will be used\n  # ca-certs = \"\"\n\n  # The number of writer goroutines processing the write channel.\n  # write-concurrency = 40\n\n  # The number of in-flight writes buffered in the write channel.\n  # write-buffer-size = 1000\n\n\n###\n### [[graphite]]\n###\n### Controls one or many listeners for Graphite data.\n###\n\n[[graphite]]\n  # Determines whether the graphite endpoint is enabled.\n  # enabled = false\n  # database = \"graphite\"\n  # retention-policy = \"\"\n  # bind-address = \":2003\"\n  # protocol = \"tcp\"\n  # consistency-level = \"one\"\n\n  # These next lines control how batching works. You should have this enabled\n  # otherwise you could get dropped metrics or poor performance. Batching\n  # will buffer points in memory if you have many coming in.\n\n  # Flush if this many points get buffered\n  # batch-size = 5000\n\n  # number of batches that may be pending in memory\n  # batch-pending = 10\n\n  # Flush at least this often even if we haven't hit buffer limit\n  # batch-timeout = \"1s\"\n\n  # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.\n  # udp-read-buffer = 0\n\n  ### This string joins multiple matching 'measurement' values providing more control over the final measurement name.\n  # separator = \".\"\n\n  ### Default tags that will be added to all metrics.  These can be overridden at the template level\n  ### or by tags extracted from metric\n  # tags = [\"region=us-east\", \"zone=1c\"]\n\n  ### Each template line requires a template pattern.  It can have an optional\n  ### filter before the template and separated by spaces.  It can also have optional extra\n  ### tags following the template.  Multiple tags should be separated by commas and no spaces\n  ### similar to the line protocol format.  There can be only one default template.\n  # templates = [\n  #   \"*.app env.service.resource.measurement\",\n  #   # Default template\n  #   \"server.*\",\n  # ]\n\n###\n### [collectd]\n###\n### Controls one or many listeners for collectd data.\n###\n\n[[collectd]]\n  # enabled = false\n  # bind-address = \":25826\"\n  # database = \"collectd\"\n  # retention-policy = \"\"\n  #\n  # The collectd service supports either scanning a directory for multiple types\n  # db files, or specifying a single db file.\n  # typesdb = \"/usr/local/share/collectd\"\n  #\n  # security-level = \"none\"\n  # auth-file = \"/etc/collectd/auth_file\"\n\n  # These next lines control how batching works. You should have this enabled\n  # otherwise you could get dropped metrics or poor performance. Batching\n  # will buffer points in memory if you have many coming in.\n\n  # Flush if this many points get buffered\n  # batch-size = 5000\n\n  # Number of batches that may be pending in memory\n  # batch-pending = 10\n\n  # Flush at least this often even if we haven't hit buffer limit\n  # batch-timeout = \"10s\"\n\n  # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.\n  # read-buffer = 0\n\n###\n### [opentsdb]\n###\n### Controls one or many listeners for OpenTSDB data.\n###\n\n[[opentsdb]]\n  # enabled = false\n  # bind-address = \":4242\"\n  # database = \"opentsdb\"\n  # retention-policy = \"\"\n  # consistency-level = \"one\"\n  # tls-enabled = false\n  # certificate= \"/etc/ssl/influxdb.pem\"\n\n  # Log an error for every malformed point.\n  # log-point-errors = true\n\n  # These next lines control how batching works. You should have this enabled\n  # otherwise you could get dropped metrics or poor performance. Only points\n  # metrics received over the telnet protocol undergo batching.\n\n  # Flush if this many points get buffered\n  # batch-size = 1000\n\n  # Number of batches that may be pending in memory\n  # batch-pending = 5\n\n  # Flush at least this often even if we haven't hit buffer limit\n  # batch-timeout = \"1s\"\n\n###\n### [[udp]]\n###\n### Controls the listeners for InfluxDB line protocol data via UDP.\n###\n\n[[udp]]\n  # enabled = false\n  # bind-address = \":8089\"\n  # database = \"udp\"\n  # retention-policy = \"\"\n\n  # These next lines control how batching works. You should have this enabled\n  # otherwise you could get dropped metrics or poor performance. Batching\n  # will buffer points in memory if you have many coming in.\n\n  # Flush if this many points get buffered\n  # batch-size = 5000\n\n  # Number of batches that may be pending in memory\n  # batch-pending = 10\n\n  # Will flush at least this often even if we haven't hit buffer limit\n  # batch-timeout = \"1s\"\n\n  # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.\n  # read-buffer = 0\n\n###\n### [continuous_queries]\n###\n### Controls how continuous queries are run within InfluxDB.\n###\n\n[continuous_queries]\n  # Determines whether the continuous query service is enabled.\n  # enabled = true\n\n  # Controls whether queries are logged when executed by the CQ service.\n  # log-enabled = true\n\n  # interval for how often continuous queries will be checked if they need to run\n  # run-interval = \"1s\"\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/gobuild.sh",
    "content": "#!/bin/bash\n# This script run inside the Dockerfile_build_ubuntu64_git container and\n# gets the latests Go source code and compiles it.\n# Then passes control over to the normal build.py script\n\nset -e\n\ncd /go/src\ngit fetch --all\ngit checkout $GO_CHECKOUT\n# Merge in recent changes if we are on a branch\n# if we checked out a tag just ignore the error\ngit pull || true\n./make.bash\n\n# Run normal build.py\ncd \"$PROJECT_DIR\"\nexec ./build.py \"$@\"\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/importer/README.md",
    "content": "# Import/Export\n\n## Exporting from 0.8.9\n\nVersion `0.8.9` of InfluxDB adds support to export your data to a format that can be imported into `0.9.3` and later.\n\nNote that `0.8.9` can be found here:\n\n```\nhttp://get.influxdb.org.s3.amazonaws.com/influxdb_0.8.9_amd64.deb\nhttp://get.influxdb.org.s3.amazonaws.com/influxdb-0.8.9-1.x86_64.rpm\n```\n\n### Design\n\n`0.8.9` exports raw data to a flat file that includes two sections, `DDL` and `DML`.  You can choose to export them independently (see below).\n\nThe `DDL` section contains the sql commands to create databases and retention policies.  the `DML` section is [line protocol](https://github.com/influxdata/influxdb/blob/master/tsdb/README.md) and can be directly posted to the [http endpoint](https://docs.influxdata.com/influxdb/v0.10/guides/writing_data) in `0.10`.  Remember that batching is important and we don't recommend batch sizes over 5k without further testing.\n\nExample export file:\n```\n# DDL\nCREATE DATABASE db0\nCREATE DATABASE db1\nCREATE RETENTION POLICY rp1 ON db1 DURATION 1h REPLICATION 1\n\n# DML\n# CONTEXT-DATABASE:db0\n# CONTEXT-RETENTION-POLICY:autogen\ncpu,host=server1 value=33.3 1464026335000000000\ncpu,host=server1 value=43.3 1464026395000000000\ncpu,host=server1 value=63.3 1464026575000000000\n\n# CONTEXT-DATABASE:db1\n# CONTEXT-RETENTION-POLICY:rp1\ncpu,host=server1 value=73.3 1464026335000000000\ncpu,host=server1 value=83.3 1464026395000000000\ncpu,host=server1 value=93.3 1464026575000000000\n```\n\nYou need to specify a database and shard group when you export.\n\nTo list out your shards, use the following http endpoint:\n\n`/cluster/shard_spaces`\n\nexample:\n```sh\nhttp://username:password@localhost:8086/cluster/shard_spaces\n```\n\nThen, to export a database with then name \"metrics\" and a shard space with the name \"default\", issue the following curl command:\n\n```sh\ncurl -o export http://username:password@localhost:8086/export/metrics/default\n```\n\nCompression is supported, and will result in a significantly smaller file size.\n\nUse the following command for compression:\n```sh\ncurl -o export.gz --compressed http://username:password@localhost:8086/export/metrics/default\n```\n\nYou can also export just the `DDL` with this option:\n\n```sh\ncurl -o export.ddl http://username:password@localhost:8086/export/metrics/default?l=ddl\n```\n\nOr just the `DML` with this option:\n\n```sh\ncurl -o export.dml.gz --compressed http://username:password@localhost:8086/export/metrics/default?l=dml\n```\n\n### Assumptions\n\n- Series name mapping follows these [guidelines](https://docs.influxdata.com/influxdb/v0.8/advanced_topics/schema_design/)\n- Database name will map directly from `0.8` to `0.10`\n- Shard Spaces map to Retention Policies\n- Shard Space Duration is ignored, as in `0.10` we determine shard size automatically\n- Regex is used to match the correct series names and only exports that data for the database\n- Duration becomes the new Retention Policy duration\n\n- Users are not migrated due to inability to get passwords.  Anyone using users will need to manually set these back up in `0.10`\n\n### Upgrade Recommendations\n\nIt's recommended that you upgrade to `0.9.3` or later first and have all your writes going there.  Then, on the `0.8.X` instances, upgrade to `0.8.9`.\n\nIt is important that when exporting you change your config to allow for the http endpoints not timing out.  To do so, make this change in your config:\n\n```toml\n# Configure the http api\n[api]\nread-timeout = \"0s\"\n```\n\n### Exceptions\n\nIf a series can't be exported to tags based on the guidelines mentioned above,\nwe will insert the entire series name as the measurement name.  You can either \nallow that to import into the new InfluxDB instance, or you can do your own \ndata massage on it prior to importing it.\n\nFor example, if you have the following series name:\n\n```\nmetric.disk.c.host.server01.single\n```\n\nIt will export as exactly thta as the measurement name and no tags:\n\n```\nmetric.disk.c.host.server01.single\n```\n\n### Export Metrics\n\nWhen you export, you will now get comments inline in the `DML`:\n\n`# Found 999 Series for export`\n\nAs well as count totals for each series exported:\n\n`# Series FOO - Points Exported: 999`\n\nWith a total at the bottom:\n\n`# Points Exported: 999`\n\nYou can grep the file that was exported at the end to get all the export metrics:\n\n`cat myexport | grep Exported`\n\n## Importing\n\nVersion `0.9.3` of InfluxDB adds support to import your data from version `0.8.9`.\n\n## Caveats\n\nFor the export/import to work, all requisites have to be met.  For export, all series names in `0.8` should be in the following format:\n\n```\n<tagName>.<tagValue>.<tagName>.<tagValue>.<measurement>\n```\nfor example:\n```\naz.us-west-1.host.serverA.cpu\n```\nor any number of tags \n```\nbuilding.2.temperature\n```\n\nAdditionally, the fields need to have a consistent type (all float64, int64, etc) for every write in `0.8`.  Otherwise they have the potential to fail writes in the import.\nSee below for more information.\n\n## Running the import command\n \n To import via the cli, you can specify the following command:\n\n ```sh\n influx -import -path=metrics-default.gz -compressed\n ```\n\n If the file is not compressed you can issue it without the `-compressed` flag:\n\n ```sh\n influx -import -path=metrics-default\n ```\n\n To redirect failed import lines to another file, run this command:\n\n ```sh\n influx -import -path=metrics-default.gz -compressed > failures\n ```\n\n The import will use the line protocol in batches of 5,000 lines per batch when sending data to the server.\n \n### Throttiling the import\n \n If you need to throttle the import so the database has time to ingest, you can use the `-pps` flag.  This will limit the points per second that will be sent to the server.\n \n  ```sh\n influx -import -path=metrics-default.gz -compressed -pps 50000 > failures\n ```\n \n Which is stating that you don't want MORE than 50,000 points per second to write to the database. Due to the processing that is taking place however, you will likely never get exactly 50,000 pps, more like 35,000 pps, etc. \n\n## Understanding the results of the import\n\nDuring the import, a status message will write out for every 100,000 points imported and report stats on the progress of the import:\n\n```\n2015/08/21 14:48:01 Processed 3100000 lines.  Time elapsed: 56.740578415s.  Points per second (PPS): 54634\n```\n\n The batch will give some basic stats when finished:\n\n ```sh\n 2015/07/29 23:15:20 Processed 2 commands\n 2015/07/29 23:15:20 Processed 70207923 inserts\n 2015/07/29 23:15:20 Failed 29785000 inserts\n ```\n\n Most inserts fail due to the following types of error:\n\n ```sh\n 2015/07/29 22:18:28 error writing batch:  write failed: field type conflict: input field \"value\" on measurement \"metric\" is type float64, already exists as type integer\n ```\n\n This is due to the fact that in `0.8` a field could get created and saved as int or float types for independent writes.  In `0.9` and greater the field has to have a consistent type.\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/importer/v8/importer.go",
    "content": "// Package v8 contains code for importing data from 0.8 instances of InfluxDB.\npackage v8 // import \"github.com/influxdata/influxdb/importer/v8\"\n\nimport (\n\t\"bufio\"\n\t\"compress/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/client\"\n)\n\nconst batchSize = 5000\n\n// Config is the config used to initialize a Importer importer\ntype Config struct {\n\tPath       string // Path to import data.\n\tVersion    string\n\tCompressed bool // Whether import data is gzipped.\n\tPPS        int  // points per second importer imports with.\n\n\tclient.Config\n}\n\n// NewConfig returns an initialized *Config\nfunc NewConfig() Config {\n\treturn Config{Config: client.NewConfig()}\n}\n\n// Importer is the importer used for importing 0.8 data\ntype Importer struct {\n\tclient                *client.Client\n\tdatabase              string\n\tretentionPolicy       string\n\tconfig                Config\n\tbatch                 []string\n\ttotalInserts          int\n\tfailedInserts         int\n\ttotalCommands         int\n\tthrottlePointsWritten int\n\tlastWrite             time.Time\n\tthrottle              *time.Ticker\n}\n\n// NewImporter will return an intialized Importer struct\nfunc NewImporter(config Config) *Importer {\n\tconfig.UserAgent = fmt.Sprintf(\"influxDB importer/%s\", config.Version)\n\treturn &Importer{\n\t\tconfig: config,\n\t\tbatch:  make([]string, 0, batchSize),\n\t}\n}\n\n// Import processes the specified file in the Config and writes the data to the databases in chunks specified by batchSize\nfunc (i *Importer) Import() error {\n\t// Create a client and try to connect.\n\tcl, err := client.NewClient(i.config.Config)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not create client %s\", err)\n\t}\n\ti.client = cl\n\tif _, _, e := i.client.Ping(); e != nil {\n\t\treturn fmt.Errorf(\"failed to connect to %s\\n\", i.client.Addr())\n\t}\n\n\t// Validate args\n\tif i.config.Path == \"\" {\n\t\treturn fmt.Errorf(\"file argument required\")\n\t}\n\n\tdefer func() {\n\t\tif i.totalInserts > 0 {\n\t\t\tlog.Printf(\"Processed %d commands\\n\", i.totalCommands)\n\t\t\tlog.Printf(\"Processed %d inserts\\n\", i.totalInserts)\n\t\t\tlog.Printf(\"Failed %d inserts\\n\", i.failedInserts)\n\t\t}\n\t}()\n\n\t// Open the file\n\tf, err := os.Open(i.config.Path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tvar r io.Reader\n\n\t// If gzipped, wrap in a gzip reader\n\tif i.config.Compressed {\n\t\tgr, err := gzip.NewReader(f)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer gr.Close()\n\t\t// Set the reader to the gzip reader\n\t\tr = gr\n\t} else {\n\t\t// Standard text file so our reader can just be the file\n\t\tr = f\n\t}\n\n\t// Get our reader\n\tscanner := bufio.NewScanner(r)\n\n\t// Process the DDL\n\ti.processDDL(scanner)\n\n\t// Set up our throttle channel.  Since there is effectively no other activity at this point\n\t// the smaller resolution gets us much closer to the requested PPS\n\ti.throttle = time.NewTicker(time.Microsecond)\n\tdefer i.throttle.Stop()\n\n\t// Prime the last write\n\ti.lastWrite = time.Now()\n\n\t// Process the DML\n\ti.processDML(scanner)\n\n\t// Check if we had any errors scanning the file\n\tif err := scanner.Err(); err != nil {\n\t\treturn fmt.Errorf(\"reading standard input: %s\", err)\n\t}\n\n\t// If there were any failed inserts then return an error so that a non-zero\n\t// exit code can be returned.\n\tif i.failedInserts > 0 {\n\t\tplural := \" was\"\n\t\tif i.failedInserts > 1 {\n\t\t\tplural = \"s were\"\n\t\t}\n\n\t\treturn fmt.Errorf(\"%d point%s not inserted\", i.failedInserts, plural)\n\t}\n\n\treturn nil\n}\n\nfunc (i *Importer) processDDL(scanner *bufio.Scanner) {\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\t// If we find the DML token, we are done with DDL\n\t\tif strings.HasPrefix(line, \"# DML\") {\n\t\t\treturn\n\t\t}\n\t\tif strings.HasPrefix(line, \"#\") {\n\t\t\tcontinue\n\t\t}\n\t\t// Skip blank lines\n\t\tif strings.TrimSpace(line) == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\ti.queryExecutor(line)\n\t}\n}\n\nfunc (i *Importer) processDML(scanner *bufio.Scanner) {\n\tstart := time.Now()\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tif strings.HasPrefix(line, \"# CONTEXT-DATABASE:\") {\n\t\t\ti.database = strings.TrimSpace(strings.Split(line, \":\")[1])\n\t\t}\n\t\tif strings.HasPrefix(line, \"# CONTEXT-RETENTION-POLICY:\") {\n\t\t\ti.retentionPolicy = strings.TrimSpace(strings.Split(line, \":\")[1])\n\t\t}\n\t\tif strings.HasPrefix(line, \"#\") {\n\t\t\tcontinue\n\t\t}\n\t\t// Skip blank lines\n\t\tif strings.TrimSpace(line) == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\ti.batchAccumulator(line, start)\n\t}\n\t// Call batchWrite one last time to flush anything out in the batch\n\ti.batchWrite()\n}\n\nfunc (i *Importer) execute(command string) {\n\tresponse, err := i.client.Query(client.Query{Command: command, Database: i.database})\n\tif err != nil {\n\t\tlog.Printf(\"error: %s\\n\", err)\n\t\treturn\n\t}\n\tif err := response.Error(); err != nil {\n\t\tlog.Printf(\"error: %s\\n\", response.Error())\n\t}\n}\n\nfunc (i *Importer) queryExecutor(command string) {\n\ti.totalCommands++\n\ti.execute(command)\n}\n\nfunc (i *Importer) batchAccumulator(line string, start time.Time) {\n\ti.batch = append(i.batch, line)\n\tif len(i.batch) == batchSize {\n\t\ti.batchWrite()\n\t\ti.batch = i.batch[:0]\n\t\t// Give some status feedback every 100000 lines processed\n\t\tprocessed := i.totalInserts + i.failedInserts\n\t\tif processed%100000 == 0 {\n\t\t\tsince := time.Since(start)\n\t\t\tpps := float64(processed) / since.Seconds()\n\t\t\tlog.Printf(\"Processed %d lines.  Time elapsed: %s.  Points per second (PPS): %d\", processed, since.String(), int64(pps))\n\t\t}\n\t}\n}\n\nfunc (i *Importer) batchWrite() {\n\t// Accumulate the batch size to see how many points we have written this second\n\ti.throttlePointsWritten += len(i.batch)\n\n\t// Find out when we last wrote data\n\tsince := time.Since(i.lastWrite)\n\n\t// Check to see if we've exceeded our points per second for the current timeframe\n\tvar currentPPS int\n\tif since.Seconds() > 0 {\n\t\tcurrentPPS = int(float64(i.throttlePointsWritten) / since.Seconds())\n\t} else {\n\t\tcurrentPPS = i.throttlePointsWritten\n\t}\n\n\t// If our currentPPS is greater than the PPS specified, then we wait and retry\n\tif int(currentPPS) > i.config.PPS && i.config.PPS != 0 {\n\t\t// Wait for the next tick\n\t\t<-i.throttle.C\n\n\t\t// Decrement the batch size back out as it is going to get called again\n\t\ti.throttlePointsWritten -= len(i.batch)\n\t\ti.batchWrite()\n\t\treturn\n\t}\n\n\t_, e := i.client.WriteLineProtocol(strings.Join(i.batch, \"\\n\"), i.database, i.retentionPolicy, i.config.Precision, i.config.WriteConsistency)\n\tif e != nil {\n\t\tlog.Println(\"error writing batch: \", e)\n\t\t// Output failed lines to STDOUT so users can capture lines that failed to import\n\t\tfmt.Println(strings.Join(i.batch, \"\\n\"))\n\t\ti.failedInserts += len(i.batch)\n\t} else {\n\t\ti.totalInserts += len(i.batch)\n\t}\n\ti.throttlePointsWritten = 0\n\ti.lastWrite = time.Now()\n\treturn\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxdb.go",
    "content": "// Package influxdb is the root package of InfluxDB,\n// the scalable datastore for metrics, events, and real-time analytics.\n//\n// If you're looking for the Go HTTP client for InfluxDB,\n// see package github.com/influxdata/influxdb/client/v2.\npackage influxdb // import \"github.com/influxdata/influxdb\"\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/README.md",
    "content": "# The Influx Query Language Specification\n\n## Introduction\n\nThis is a reference for the Influx Query Language (\"InfluxQL\").\n\nInfluxQL is a SQL-like query language for interacting with InfluxDB.  It has\nbeen lovingly crafted to feel familiar to those coming from other SQL or\nSQL-like environments while providing features specific to storing and analyzing\ntime series data.\n\n\n## Notation\n\nThe syntax is specified using Extended Backus-Naur Form (\"EBNF\").  EBNF is the\nsame notation used in the [Go](http://golang.org) programming language\nspecification, which can be found [here](https://golang.org/ref/spec).  Not so\ncoincidentally, InfluxDB is written in Go.\n\n```\nProduction  = production_name \"=\" [ Expression ] \".\" .\nExpression  = Alternative { \"|\" Alternative } .\nAlternative = Term { Term } .\nTerm        = production_name | token [ \"…\" token ] | Group | Option | Repetition .\nGroup       = \"(\" Expression \")\" .\nOption      = \"[\" Expression \"]\" .\nRepetition  = \"{\" Expression \"}\" .\n```\n\nNotation operators in order of increasing precedence:\n\n```\n|   alternation\n()  grouping\n[]  option (0 or 1 times)\n{}  repetition (0 to n times)\n```\n\n## Comments\n\nBoth single and multiline comments are supported. A comment is treated\nthe same as whitespace by the parser.\n\n```\n-- single line comment\n/*\n    multiline comment\n*/\n```\n\nSingle line comments will skip all text until the scanner hits a\nnewline. Multiline comments will skip all text until the end comment\nmarker is hit. Nested multiline comments are not supported so the\nfollowing does not work:\n\n```\n/* /* this does not work */ */\n```\n\n## Query representation\n\n### Characters\n\nInfluxQL is Unicode text encoded in [UTF-8](http://en.wikipedia.org/wiki/UTF-8).\n\n```\nnewline             = /* the Unicode code point U+000A */ .\nunicode_char        = /* an arbitrary Unicode code point except newline */ .\n```\n\n## Letters and digits\n\nLetters are the set of ASCII characters plus the underscore character _ (U+005F)\nis considered a letter.\n\nOnly decimal digits are supported.\n\n```\nletter              = ascii_letter | \"_\" .\nascii_letter        = \"A\" … \"Z\" | \"a\" … \"z\" .\ndigit               = \"0\" … \"9\" .\n```\n\n## Identifiers\n\nIdentifiers are tokens which refer to database names, retention policy names,\nuser names, measurement names, tag keys, and field keys.\n\nThe rules:\n\n- double quoted identifiers can contain any unicode character other than a new line\n- double quoted identifiers can contain escaped `\"` characters (i.e., `\\\"`)\n- double quoted identifiers can contain InfluxQL keywords\n- unquoted identifiers must start with an upper or lowercase ASCII character or \"_\"\n- unquoted identifiers may contain only ASCII letters, decimal digits, and \"_\"\n\n```\nidentifier          = unquoted_identifier | quoted_identifier .\nunquoted_identifier = ( letter ) { letter | digit } .\nquoted_identifier   = `\"` unicode_char { unicode_char } `\"` .\n```\n\n#### Examples:\n\n```\ncpu\n_cpu_stats\n\"1h\"\n\"anything really\"\n\"1_Crazy-1337.identifier>NAME👍\"\n```\n\n## Keywords\n\n```\nALL           ALTER         ANY           AS            ASC           BEGIN\nBY            CREATE        CONTINUOUS    DATABASE      DATABASES     DEFAULT\nDELETE        DESC          DESTINATIONS  DIAGNOSTICS   DISTINCT      DROP\nDURATION      END           EVERY         EXPLAIN       FIELD         FOR\nFROM          GRANT         GRANTS        GROUP         GROUPS        IN\nINF           INSERT        INTO          KEY           KEYS          KILL\nLIMIT         SHOW          MEASUREMENT   MEASUREMENTS  NAME          OFFSET\nON            ORDER         PASSWORD      POLICY        POLICIES      PRIVILEGES\nQUERIES       QUERY         READ          REPLICATION   RESAMPLE      RETENTION\nREVOKE        SELECT        SERIES        SET           SHARD         SHARDS\nSLIMIT        SOFFSET       STATS         SUBSCRIPTION  SUBSCRIPTIONS TAG\nTO            USER          USERS         VALUES        WHERE         WITH\nWRITE\n```\n\n## Literals\n\n### Integers\n\nInfluxQL supports decimal integer literals.  Hexadecimal and octal literals are\nnot currently supported.\n\n```\nint_lit             = [ \"+\" | \"-\" ] ( \"1\" … \"9\" ) { digit } .\n```\n\n### Floats\n\nInfluxQL supports floating-point literals.  Exponents are not currently supported.\n\n```\nfloat_lit           = [ \"+\" | \"-\" ] ( \".\" digit { digit } | digit { digit } \".\" { digit } ) .\n```\n\n### Strings\n\nString literals must be surrounded by single quotes. Strings may contain `'`\ncharacters as long as they are escaped (i.e., `\\'`).\n\n```\nstring_lit          = `'` { unicode_char } `'` .\n```\n\n### Durations\n\nDuration literals specify a length of time.  An integer literal followed\nimmediately (with no spaces) by a duration unit listed below is interpreted as\na duration literal.\n\n### Duration units\n| Units  | Meaning                                 |\n|--------|-----------------------------------------|\n| u or µ | microseconds (1 millionth of a second)  |\n| ms     | milliseconds (1 thousandth of a second) |\n| s      | second                                  |\n| m      | minute                                  |\n| h      | hour                                    |\n| d      | day                                     |\n| w      | week                                    |\n\n```\nduration_lit        = int_lit duration_unit .\nduration_unit       = \"u\" | \"µ\" | \"ms\" | \"s\" | \"m\" | \"h\" | \"d\" | \"w\" .\n```\n\n### Dates & Times\n\nThe date and time literal format is not specified in EBNF like the rest of this document.  It is specified using Go's date / time parsing format, which is a reference date written in the format required by InfluxQL.  The reference date time is:\n\nInfluxQL reference date time: January 2nd, 2006 at 3:04:05 PM\n\n```\ntime_lit            = \"2006-01-02 15:04:05.999999\" | \"2006-01-02\" .\n```\n\n### Booleans\n\n```\nbool_lit            = TRUE | FALSE .\n```\n\n### Regular Expressions\n\n```\nregex_lit           = \"/\" { unicode_char } \"/\" .\n```\n\n**Comparators:**\n`=~` matches against\n`!~` doesn't match against\n\n> **Note:** Use regular expressions to match measurements and tags.\nYou cannot use regular expressions to match databases, retention policies, or fields.\n\n## Queries\n\nA query is composed of one or more statements separated by a semicolon.\n\n```\nquery               = statement { \";\" statement } .\n\nstatement           = alter_retention_policy_stmt |\n                      create_continuous_query_stmt |\n                      create_database_stmt |\n                      create_retention_policy_stmt |\n                      create_subscription_stmt |\n                      create_user_stmt |\n                      delete_stmt |\n                      drop_continuous_query_stmt |\n                      drop_database_stmt |\n                      drop_measurement_stmt |\n                      drop_retention_policy_stmt |\n                      drop_series_stmt |\n                      drop_shard_stmt |\n                      drop_subscription_stmt |\n                      drop_user_stmt |\n                      grant_stmt |\n                      kill_query_statement |\n                      show_continuous_queries_stmt |\n                      show_databases_stmt |\n                      show_field_keys_stmt |\n                      show_grants_stmt |\n                      show_measurements_stmt |\n                      show_queries_stmt |\n                      show_retention_policies |\n                      show_series_stmt |\n                      show_shard_groups_stmt |\n                      show_shards_stmt |\n                      show_subscriptions_stmt|\n                      show_tag_keys_stmt |\n                      show_tag_values_stmt |\n                      show_users_stmt |\n                      revoke_stmt |\n                      select_stmt .\n```\n\n## Statements\n\n### ALTER RETENTION POLICY\n\n```\nalter_retention_policy_stmt  = \"ALTER RETENTION POLICY\" policy_name on_clause\n                               retention_policy_option\n                               [ retention_policy_option ]\n                               [ retention_policy_option ]\n                               [ retention_policy_option ] .\n```\n\n> Replication factors do not serve a purpose with single node instances.\n\n#### Examples:\n\n```sql\n-- Set default retention policy for mydb to 1h.cpu.\nALTER RETENTION POLICY \"1h.cpu\" ON \"mydb\" DEFAULT\n\n-- Change duration and replication factor.\nALTER RETENTION POLICY \"policy1\" ON \"somedb\" DURATION 1h REPLICATION 4\n```\n\n### CREATE CONTINUOUS QUERY\n\n```\ncreate_continuous_query_stmt = \"CREATE CONTINUOUS QUERY\" query_name on_clause\n                               [ \"RESAMPLE\" resample_opts ]\n                               \"BEGIN\" select_stmt \"END\" .\n\nquery_name                   = identifier .\n\nresample_opts                = (every_stmt for_stmt | every_stmt | for_stmt) .\nevery_stmt                   = \"EVERY\" duration_lit\nfor_stmt                     = \"FOR\" duration_lit\n```\n\n#### Examples:\n\n```sql\n-- selects from DEFAULT retention policy and writes into 6_months retention policy\nCREATE CONTINUOUS QUERY \"10m_event_count\"\nON \"db_name\"\nBEGIN\n  SELECT count(\"value\")\n  INTO \"6_months\".\"events\"\n  FROM \"events\"\n  GROUP BY time(10m)\nEND;\n\n-- this selects from the output of one continuous query in one retention policy and outputs to another series in another retention policy\nCREATE CONTINUOUS QUERY \"1h_event_count\"\nON \"db_name\"\nBEGIN\n  SELECT sum(\"count\") as \"count\"\n  INTO \"2_years\".\"events\"\n  FROM \"6_months\".\"events\"\n  GROUP BY time(1h)\nEND;\n\n-- this customizes the resample interval so the interval is queried every 10s and intervals are resampled until 2m after their start time\n-- when resample is used, at least one of \"EVERY\" or \"FOR\" must be used\nCREATE CONTINUOUS QUERY \"cpu_mean\"\nON \"db_name\"\nRESAMPLE EVERY 10s FOR 2m\nBEGIN\n  SELECT mean(\"value\")\n  INTO \"cpu_mean\"\n  FROM \"cpu\"\n  GROUP BY time(1m)\nEND;\n```\n\n### CREATE DATABASE\n\n```\ncreate_database_stmt = \"CREATE DATABASE\" db_name\n                       [ WITH\n                           [ retention_policy_duration ]\n                           [ retention_policy_replication ]\n                           [ retention_policy_shard_group_duration ]\n                           [ retention_policy_name ]\n                       ] .\n```\n\n> Replication factors do not serve a purpose with single node instances.\n\n#### Examples:\n\n```sql\n-- Create a database called foo\nCREATE DATABASE \"foo\"\n\n-- Create a database called bar with a new DEFAULT retention policy and specify the duration, replication, shard group duration, and name of that retention policy\nCREATE DATABASE \"bar\" WITH DURATION 1d REPLICATION 1 SHARD DURATION 30m NAME \"myrp\"\n\n-- Create a database called mydb with a new DEFAULT retention policy and specify the name of that retention policy\nCREATE DATABASE \"mydb\" WITH NAME \"myrp\"\n```\n\n### CREATE RETENTION POLICY\n\n```\ncreate_retention_policy_stmt = \"CREATE RETENTION POLICY\" policy_name on_clause\n                               retention_policy_duration\n                               retention_policy_replication\n                               [ retention_policy_shard_group_duration ]\n                               [ \"DEFAULT\" ] .\n```\n\n> Replication factors do not serve a purpose with single node instances.\n\n#### Examples\n\n```sql\n-- Create a retention policy.\nCREATE RETENTION POLICY \"10m.events\" ON \"somedb\" DURATION 60m REPLICATION 2\n\n-- Create a retention policy and set it as the DEFAULT.\nCREATE RETENTION POLICY \"10m.events\" ON \"somedb\" DURATION 60m REPLICATION 2 DEFAULT\n\n-- Create a retention policy and specify the shard group duration.\nCREATE RETENTION POLICY \"10m.events\" ON \"somedb\" DURATION 60m REPLICATION 2 SHARD DURATION 30m\n```\n\n### CREATE SUBSCRIPTION\n\nSubscriptions tell InfluxDB to send all the data it receives to Kapacitor or other third parties.\n\n```\ncreate_subscription_stmt = \"CREATE SUBSCRIPTION\" subscription_name \"ON\" db_name \".\" retention_policy \"DESTINATIONS\" (\"ANY\"|\"ALL\") host { \",\" host} .\n```\n\n#### Examples:\n\n```sql\n-- Create a SUBSCRIPTION on database 'mydb' and retention policy 'autogen' that send data to 'example.com:9090' via UDP.\nCREATE SUBSCRIPTION \"sub0\" ON \"mydb\".\"autogen\" DESTINATIONS ALL 'udp://example.com:9090'\n\n-- Create a SUBSCRIPTION on database 'mydb' and retention policy 'autogen' that round robins the data to 'h1.example.com:9090' and 'h2.example.com:9090'.\nCREATE SUBSCRIPTION \"sub0\" ON \"mydb\".\"autogen\" DESTINATIONS ANY 'udp://h1.example.com:9090', 'udp://h2.example.com:9090'\n```\n\n### CREATE USER\n\n```\ncreate_user_stmt = \"CREATE USER\" user_name \"WITH PASSWORD\" password\n                   [ \"WITH ALL PRIVILEGES\" ] .\n```\n\n#### Examples:\n\n```sql\n-- Create a normal database user.\nCREATE USER \"jdoe\" WITH PASSWORD '1337password'\n\n-- Create an admin user.\n-- Note: Unlike the GRANT statement, the \"PRIVILEGES\" keyword is required here.\nCREATE USER \"jdoe\" WITH PASSWORD '1337password' WITH ALL PRIVILEGES\n```\n\n> **Note:** The password string must be wrapped in single quotes.\n\n### DELETE\n\n```\ndelete_stmt = \"DELETE\" ( from_clause | where_clause | from_clause where_clause ) .\n```\n\n#### Examples:\n\n```sql\nDELETE FROM \"cpu\"\nDELETE FROM \"cpu\" WHERE time < '2000-01-01T00:00:00Z'\nDELETE WHERE time < '2000-01-01T00:00:00Z'\n```\n\n### DROP CONTINUOUS QUERY\n\n```\ndrop_continuous_query_stmt = \"DROP CONTINUOUS QUERY\" query_name on_clause .\n```\n\n#### Example:\n\n```sql\nDROP CONTINUOUS QUERY \"myquery\" ON \"mydb\"\n```\n\n### DROP DATABASE\n\n```\ndrop_database_stmt = \"DROP DATABASE\" db_name .\n```\n\n#### Example:\n\n```sql\nDROP DATABASE \"mydb\"\n```\n\n### DROP MEASUREMENT\n\n```\ndrop_measurement_stmt = \"DROP MEASUREMENT\" measurement .\n```\n\n#### Examples:\n\n```sql\n-- drop the cpu measurement\nDROP MEASUREMENT \"cpu\"\n```\n\n### DROP RETENTION POLICY\n\n```\ndrop_retention_policy_stmt = \"DROP RETENTION POLICY\" policy_name on_clause .\n```\n\n#### Example:\n\n```sql\n-- drop the retention policy named 1h.cpu from mydb\nDROP RETENTION POLICY \"1h.cpu\" ON \"mydb\"\n```\n\n### DROP SERIES\n\n```\ndrop_series_stmt = \"DROP SERIES\" ( from_clause | where_clause | from_clause where_clause ) .\n```\n\n#### Example:\n\n```sql\nDROP SERIES FROM \"telegraf\".\"autogen\".\"cpu\" WHERE cpu = 'cpu8'\n\n```\n\n### DROP SHARD\n\n```\ndrop_shard_stmt = \"DROP SHARD\" ( shard_id ) .\n```\n\n#### Example:\n\n```\nDROP SHARD 1\n```\n\n### DROP SUBSCRIPTION\n\n```\ndrop_subscription_stmt = \"DROP SUBSCRIPTION\" subscription_name \"ON\" db_name \".\" retention_policy .\n```\n\n#### Example:\n\n```sql\nDROP SUBSCRIPTION \"sub0\" ON \"mydb\".\"autogen\"\n```\n\n### DROP USER\n\n```\ndrop_user_stmt = \"DROP USER\" user_name .\n```\n\n#### Example:\n\n```sql\nDROP USER \"jdoe\"\n```\n\n### GRANT\n\n> **NOTE:** Users can be granted privileges on databases that do not exist.\n\n```\ngrant_stmt = \"GRANT\" privilege [ on_clause ] to_clause .\n```\n\n#### Examples:\n\n```sql\n-- grant admin privileges\nGRANT ALL TO \"jdoe\"\n\n-- grant read access to a database\nGRANT READ ON \"mydb\" TO \"jdoe\"\n```\n\n### KILL QUERY\n\n```\nkill_query_statement = \"KILL QUERY\" query_id .\n```\n\n#### Examples:\n\n```\n--- kill a query with the query_id 36\nKILL QUERY 36\n```\n\n> **NOTE:** Identify the `query_id` from the `SHOW QUERIES` output.\n\n### SHOW CONTINUOUS QUERIES\n\n```\nshow_continuous_queries_stmt = \"SHOW CONTINUOUS QUERIES\" .\n```\n\n#### Example:\n\n```sql\n-- show all continuous queries\nSHOW CONTINUOUS QUERIES\n```\n\n### SHOW DATABASES\n\n```\nshow_databases_stmt = \"SHOW DATABASES\" .\n```\n\n#### Example:\n\n```sql\n-- show all databases\nSHOW DATABASES\n```\n\n### SHOW FIELD KEYS\n\n```\nshow_field_keys_stmt = \"SHOW FIELD KEYS\" [ from_clause ] .\n```\n\n#### Examples:\n\n```sql\n-- show field keys and field value data types from all measurements\nSHOW FIELD KEYS\n\n-- show field keys and field value data types from specified measurement\nSHOW FIELD KEYS FROM \"cpu\"\n```\n\n### SHOW GRANTS\n\n```\nshow_grants_stmt = \"SHOW GRANTS FOR\" user_name .\n```\n\n#### Example:\n\n```sql\n-- show grants for jdoe\nSHOW GRANTS FOR \"jdoe\"\n```\n\n### SHOW MEASUREMENTS\n\n```\nshow_measurements_stmt = \"SHOW MEASUREMENTS\" [ with_measurement_clause ] [ where_clause ] [ limit_clause ] [ offset_clause ] .\n```\n\n#### Examples:\n\n```sql\n-- show all measurements\nSHOW MEASUREMENTS\n\n-- show measurements where region tag = 'uswest' AND host tag = 'serverA'\nSHOW MEASUREMENTS WHERE \"region\" = 'uswest' AND \"host\" = 'serverA'\n\n-- show measurements that start with 'h2o'\nSHOW MEASUREMENTS WITH MEASUREMENT =~ /h2o.*/\n```\n\n### SHOW QUERIES\n\n```\nshow_queries_stmt = \"SHOW QUERIES\" .\n```\n\n#### Example:\n\n```sql\n-- show all currently-running queries\nSHOW QUERIES\n```\n\n### SHOW RETENTION POLICIES\n\n```\nshow_retention_policies = \"SHOW RETENTION POLICIES\" on_clause .\n```\n\n#### Example:\n\n```sql\n-- show all retention policies on a database\nSHOW RETENTION POLICIES ON \"mydb\"\n```\n\n### SHOW SERIES\n\n```\nshow_series_stmt = \"SHOW SERIES\" [ from_clause ] [ where_clause ] [ limit_clause ] [ offset_clause ] .\n```\n\n#### Example:\n\n```sql\nSHOW SERIES FROM \"telegraf\".\"autogen\".\"cpu\" WHERE cpu = 'cpu8'\n```\n\n### SHOW SHARD GROUPS\n\n```\nshow_shard_groups_stmt = \"SHOW SHARD GROUPS\" .\n```\n\n#### Example:\n\n```sql\nSHOW SHARD GROUPS\n```\n\n### SHOW SHARDS\n\n```\nshow_shards_stmt = \"SHOW SHARDS\" .\n```\n\n#### Example:\n\n```sql\nSHOW SHARDS\n```\n\n### SHOW SUBSCRIPTIONS\n\n```\nshow_subscriptions_stmt = \"SHOW SUBSCRIPTIONS\" .\n```\n\n#### Example:\n\n```sql\nSHOW SUBSCRIPTIONS\n```\n\n### SHOW TAG KEYS\n\n```\nshow_tag_keys_stmt = \"SHOW TAG KEYS\" [ from_clause ] [ where_clause ] [ group_by_clause ]\n                     [ limit_clause ] [ offset_clause ] .\n```\n\n#### Examples:\n\n```sql\n-- show all tag keys\nSHOW TAG KEYS\n\n-- show all tag keys from the cpu measurement\nSHOW TAG KEYS FROM \"cpu\"\n\n-- show all tag keys from the cpu measurement where the region key = 'uswest'\nSHOW TAG KEYS FROM \"cpu\" WHERE \"region\" = 'uswest'\n\n-- show all tag keys where the host key = 'serverA'\nSHOW TAG KEYS WHERE \"host\" = 'serverA'\n```\n\n### SHOW TAG VALUES\n\n```\nshow_tag_values_stmt = \"SHOW TAG VALUES\" [ from_clause ] with_tag_clause [ where_clause ]\n                       [ group_by_clause ] [ limit_clause ] [ offset_clause ] .\n```\n\n#### Examples:\n\n```sql\n-- show all tag values across all measurements for the region tag\nSHOW TAG VALUES WITH KEY = \"region\"\n\n-- show tag values from the cpu measurement for the region tag\nSHOW TAG VALUES FROM \"cpu\" WITH KEY = \"region\"\n\n-- show tag values across all measurements for all tag keys that do not include the letter c\nSHOW TAG VALUES WITH KEY !~ /.*c.*/\n\n-- show tag values from the cpu measurement for region & host tag keys where service = 'redis'\nSHOW TAG VALUES FROM \"cpu\" WITH KEY IN (\"region\", \"host\") WHERE \"service\" = 'redis'\n```\n\n### SHOW USERS\n\n```\nshow_users_stmt = \"SHOW USERS\" .\n```\n\n#### Example:\n\n```sql\n-- show all users\nSHOW USERS\n```\n\n### REVOKE\n\n```\nrevoke_stmt = \"REVOKE\" privilege [ on_clause ] \"FROM\" user_name .\n```\n\n#### Examples:\n\n```sql\n-- revoke admin privileges from jdoe\nREVOKE ALL PRIVILEGES FROM \"jdoe\"\n\n-- revoke read privileges from jdoe on mydb\nREVOKE READ ON \"mydb\" FROM \"jdoe\"\n```\n\n### SELECT\n\n```\nselect_stmt = \"SELECT\" fields from_clause [ into_clause ] [ where_clause ]\n              [ group_by_clause ] [ order_by_clause ] [ limit_clause ]\n              [ offset_clause ] [ slimit_clause ] [ soffset_clause ]\n              [ timezone_clause ] .\n```\n\n#### Examples:\n\n```sql\n-- select mean value from the cpu measurement where region = 'uswest' grouped by 10 minute intervals\nSELECT mean(\"value\") FROM \"cpu\" WHERE \"region\" = 'uswest' GROUP BY time(10m) fill(0)\n\n-- select from all measurements beginning with cpu into the same measurement name in the cpu_1h retention policy\nSELECT mean(\"value\") INTO \"cpu_1h\".:MEASUREMENT FROM /cpu.*/\n\n-- select from measurements grouped by the day with a timezone\nSELECT mean(\"value\") FROM \"cpu\" GROUP BY region, time(1d) fill(0) tz(\"America/Chicago\")\n```\n\n## Clauses\n\n```\nfrom_clause     = \"FROM\" measurements .\n\ngroup_by_clause = \"GROUP BY\" dimensions fill(fill_option).\n\ninto_clause     = \"INTO\" ( measurement | back_ref ).\n\nlimit_clause    = \"LIMIT\" int_lit .\n\noffset_clause   = \"OFFSET\" int_lit .\n\nslimit_clause   = \"SLIMIT\" int_lit .\n\nsoffset_clause  = \"SOFFSET\" int_lit .\n\ntimezone_clause = tz(string_lit) .\n\non_clause       = \"ON\" db_name .\n\norder_by_clause = \"ORDER BY\" sort_fields .\n\nto_clause       = \"TO\" user_name .\n\nwhere_clause    = \"WHERE\" expr .\n\nwith_measurement_clause = \"WITH MEASUREMENT\" ( \"=\" measurement | \"=~\" regex_lit ) .\n\nwith_tag_clause = \"WITH KEY\" ( \"=\" tag_key | \"!=\" tag_key | \"=~\" regex_lit | \"IN (\" tag_keys \")\"  ) .\n```\n\n## Expressions\n\n```\nbinary_op        = \"+\" | \"-\" | \"*\" | \"/\" | \"%\" | \"&\" | \"|\" | \"^\" | \"AND\" |\n                   \"OR\" | \"=\" | \"!=\" | \"<>\" | \"<\" | \"<=\" | \">\" | \">=\" .\n\nexpr             = unary_expr { binary_op unary_expr } .\n\nunary_expr       = \"(\" expr \")\" | var_ref | time_lit | string_lit | int_lit |\n                   float_lit | bool_lit | duration_lit | regex_lit .\n```\n\n## Other\n\n```\nalias            = \"AS\" identifier .\n\nback_ref         = ( policy_name \".:MEASUREMENT\" ) |\n                   ( db_name \".\" [ policy_name ] \".:MEASUREMENT\" ) .\n\ndb_name          = identifier .\n\ndimension        = expr .\n\ndimensions       = dimension { \",\" dimension } .\n\nfield_key        = identifier .\n\nfield            = expr [ alias ] .\n\nfields           = field { \",\" field } .\n\nfill_option      = \"null\" | \"none\" | \"previous\" | \"linear\" | int_lit | float_lit .\n\nhost             = string_lit .\n\nmeasurement      = measurement_name |\n                   ( policy_name \".\" measurement_name ) |\n                   ( db_name \".\" [ policy_name ] \".\" measurement_name ) .\n\nmeasurements     = measurement { \",\" measurement } .\n\nmeasurement_name = identifier | regex_lit .\n\npassword         = string_lit .\n\npolicy_name      = identifier .\n\nprivilege        = \"ALL\" [ \"PRIVILEGES\" ] | \"READ\" | \"WRITE\" .\n\nquery_id         = int_lit .\n\nquery_name       = identifier .\n\nretention_policy = identifier .\n\nretention_policy_option      = retention_policy_duration |\n                               retention_policy_replication |\n                               retention_policy_shard_group_duration |\n                               \"DEFAULT\" .\n\nretention_policy_duration    = \"DURATION\" duration_lit .\n\nretention_policy_replication = \"REPLICATION\" int_lit .\n\nretention_policy_shard_group_duration = \"SHARD DURATION\" duration_lit .\n\nretention_policy_name = \"NAME\" identifier .\n\nseries_id        = int_lit .\n\nshard_id         = int_lit .\n\nsort_field       = field_key [ ASC | DESC ] .\n\nsort_fields      = sort_field { \",\" sort_field } .\n\nsubscription_name = identifier .\n\ntag_key          = identifier .\n\ntag_keys         = tag_key { \",\" tag_key } .\n\nuser_name        = identifier .\n\nvar_ref          = measurement .\n```\n\n## Query Engine Internals\n\nOnce you understand the language itself, it's important to know how these\nlanguage constructs are implemented in the query engine. This gives you an\nintuitive sense for how results will be processed and how to create efficient\nqueries.\n\nThe life cycle of a query looks like this:\n\n1. InfluxQL query string is tokenized and then parsed into an abstract syntax\n   tree (AST). This is the code representation of the query itself.\n\n2. The AST is passed to the `QueryExecutor` which directs queries to the\n   appropriate handlers. For example, queries related to meta data are executed\n   by the meta service and `SELECT` statements are executed by the shards\n   themselves.\n\n3. The query engine then determines the shards that match the `SELECT`\n   statement's time range. From these shards, iterators are created for each\n   field in the statement.\n\n4. Iterators are passed to the emitter which drains them and joins the resulting\n   points. The emitter's job is to convert simple time/value points into the\n   more complex result objects that are returned to the client.\n\n\n### Understanding Iterators\n\nIterators are at the heart of the query engine. They provide a simple interface\nfor looping over a set of points. For example, this is an iterator over Float\npoints:\n\n```\ntype FloatIterator interface {\n    Next() (*FloatPoint, error)\n}\n```\n\nThese iterators are created through the `IteratorCreator` interface:\n\n```\ntype IteratorCreator interface {\n    CreateIterator(m *Measurement, opt IteratorOptions) (Iterator, error)\n}\n```\n\nThe `IteratorOptions` provide arguments about field selection, time ranges,\nand dimensions that the iterator creator can use when planning an iterator.\nThe `IteratorCreator` interface is used at many levels such as the `Shards`,\n`Shard`, and `Engine`. This allows optimizations to be performed when applicable\nsuch as returning a precomputed `COUNT()`.\n\nIterators aren't just for reading raw data from storage though. Iterators can be\ncomposed so that they provided additional functionality around an input\niterator. For example, a `DistinctIterator` can compute the distinct values for\neach time window for an input iterator. Or a `FillIterator` can generate\nadditional points that are missing from an input iterator.\n\nThis composition also lends itself well to aggregation. For example, a statement\nsuch as this:\n\n```\nSELECT MEAN(value) FROM cpu GROUP BY time(10m)\n```\n\nIn this case, `MEAN(value)` is a `MeanIterator` wrapping an iterator from the\nunderlying shards. However, if we can add an additional iterator to determine\nthe derivative of the mean:\n\n```\nSELECT DERIVATIVE(MEAN(value), 20m) FROM cpu GROUP BY time(10m)\n```\n\n\n### Understanding Auxiliary Fields\n\nBecause InfluxQL allows users to use selector functions such as `FIRST()`,\n`LAST()`, `MIN()`, and `MAX()`, the engine must provide a way to return related\ndata at the same time with the selected point.\n\nFor example, in this query:\n\n```\nSELECT FIRST(value), host FROM cpu GROUP BY time(1h)\n```\n\nWe are selecting the first `value` that occurs every hour but we also want to\nretrieve the `host` associated with that point. Since the `Point` types only\nspecify a single typed `Value` for efficiency, we push the `host` into the\nauxiliary fields of the point. These auxiliary fields are attached to the point\nuntil it is passed to the emitter where the fields get split off to their own\niterator.\n\n\n### Built-in Iterators\n\nThere are many helper iterators that let us build queries:\n\n* Merge Iterator - This iterator combines one or more iterators into a single\n  new iterator of the same type. This iterator guarantees that all points\n  within a window will be output before starting the next window but does not\n  provide ordering guarantees within the window. This allows for fast access\n  for aggregate queries which do not need stronger sorting guarantees.\n\n* Sorted Merge Iterator - This iterator also combines one or more iterators\n  into a new iterator of the same type. However, this iterator guarantees\n  time ordering of every point. This makes it slower than the `MergeIterator`\n  but this ordering guarantee is required for non-aggregate queries which\n  return the raw data points.\n\n* Limit Iterator - This iterator limits the number of points per name/tag\n  group. This is the implementation of the `LIMIT` & `OFFSET` syntax.\n\n* Fill Iterator - This iterator injects extra points if they are missing from\n  the input iterator. It can provide `null` points, points with the previous\n  value, or points with a specific value.\n\n* Buffered Iterator - This iterator provides the ability to \"unread\" a point\n  back onto a buffer so it can be read again next time. This is used extensively\n  to provide lookahead for windowing.\n\n* Reduce Iterator - This iterator calls a reduction function for each point in\n  a window. When the window is complete then all points for that window are\n  output. This is used for simple aggregate functions such as `COUNT()`.\n\n* Reduce Slice Iterator - This iterator collects all points for a window first\n  and then passes them all to a reduction function at once. The results are\n  returned from the iterator. This is used for aggregate functions such as\n  `DERIVATIVE()`.\n\n* Transform Iterator - This iterator calls a transform function for each point\n  from an input iterator. This is used for executing binary expressions.\n\n* Dedupe Iterator - This iterator only outputs unique points. It is resource\n  intensive so it is only used for small queries such as meta query statements.\n\n\n### Call Iterators\n\nFunction calls in InfluxQL are implemented at two levels. Some calls can be\nwrapped at multiple layers to improve efficiency. For example, a `COUNT()` can\nbe performed at the shard level and then multiple `CountIterator`s can be\nwrapped with another `CountIterator` to compute the count of all shards. These\niterators can be created using `NewCallIterator()`.\n\nSome iterators are more complex or need to be implemented at a higher level.\nFor example, the `DERIVATIVE()` needs to retrieve all points for a window first\nbefore performing the calculation. This iterator is created by the engine itself\nand is never requested to be created by the lower levels.\n\n### Subqueries\n\nSubqueries are built on top of iterators. Most of the work involved in\nsupporting subqueries is in organizing how data is streamed to the\niterators that will process the data.\n\nThe final ordering of the stream has to output all points from one\nseries before moving to the next series and it also needs to ensure\nthose points are printed in order. So there are two separate concepts we\nneed to consider when creating an iterator: ordering and grouping.\n\nWhen an inner query has a different grouping than the outermost query,\nwe still need to group together related points into buckets, but we do\nnot have to ensure that all points from one buckets are output before\nthe points in another bucket. In fact, if we do that, we will be unable\nto perform the grouping for the outer query correctly. Instead, we group\nall points by the outermost query for an interval and then, within that\ninterval, we group the points for the inner query. For example, here are\nseries keys and times in seconds (fields are omitted since they don't\nmatter in this example):\n\n    cpu,host=server01 0\n    cpu,host=server01 10\n    cpu,host=server01 20\n    cpu,host=server01 30\n    cpu,host=server02 0\n    cpu,host=server02 10\n    cpu,host=server02 20\n    cpu,host=server02 30\n\nWith the following query:\n\n    SELECT mean(max) FROM (SELECT max(value) FROM cpu GROUP BY host, time(20s)) GROUP BY time(20s)\n\nThe final grouping keeps all of the points together which means we need\nto group `server01` with `server02`. That means we output the points\nfrom the underlying engine like this:\n\n    cpu,host=server01 0\n    cpu,host=server01 10\n    cpu,host=server02 0\n    cpu,host=server02 10\n    cpu,host=server01 20\n    cpu,host=server01 30\n    cpu,host=server02 20\n    cpu,host=server02 30\n\nWithin each one of those time buckets, we calculate the `max()` value\nfor each unique host so the output stream gets transformed to look like\nthis:\n\n    cpu,host=server01 0\n    cpu,host=server02 0\n    cpu,host=server01 20\n    cpu,host=server02 20\n\nThen we can process the `mean()` on this stream of data instead and it\nwill be output in the correct order. This is true of any order of\ngrouping since grouping can only go from more specific to less specific.\n\nWhen it comes to ordering, unordered data is faster to process, but we\nalways need to produce ordered data. When processing a raw query with no\naggregates, we need to ensure data coming from the engine is ordered so\nthe output is ordered. When we have an aggregate, we know one point is\nbeing emitted for each interval and will always produce ordered output.\nSo for aggregates, we can take unordered data as the input and get\nordered output. Any ordered data as input will always result in ordered\ndata so we just need to look at how an iterator processes unordered\ndata.\n\n|                 | raw query        | selector (without group by time) | selector (with group by time) | aggregator     |\n|-----------------|------------------|----------------------------------|-------------------------------|----------------|\n| ordered input   | ordered output   | ordered output                   | ordered output                | ordered output |\n| unordered input | unordered output | unordered output                 | ordered output                | ordered output |\n\nSince we always need ordered output, we just need to work backwards and\ndetermine which pattern of input gives us ordered output. If both\nordered and unordered input produce ordered output, we prefer unordered\ninput since it is faster.\n\nThere are also certain aggregates that require ordered input like\n`median()` and `percentile()`. These functions will explicitly request\nordered input. It is also important to realize that selectors that are\ngrouped by time are the equivalent of an aggregator. It is only\nselectors without a group by time that are different.\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/ast.go",
    "content": "package influxql\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"regexp\"\n\t\"regexp/syntax\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/gogo/protobuf/proto\"\n\tinternal \"github.com/influxdata/influxdb/influxql/internal\"\n)\n\n// DataType represents the primitive data types available in InfluxQL.\ntype DataType int\n\nconst (\n\t// Unknown primitive data type.\n\tUnknown DataType = 0\n\t// Float means the data type is a float.\n\tFloat = 1\n\t// Integer means the data type is an integer.\n\tInteger = 2\n\t// String means the data type is a string of text.\n\tString = 3\n\t// Boolean means the data type is a boolean.\n\tBoolean = 4\n\t// Time means the data type is a time.\n\tTime = 5\n\t// Duration means the data type is a duration of time.\n\tDuration = 6\n\t// Tag means the data type is a tag.\n\tTag = 7\n\t// AnyField means the data type is any field.\n\tAnyField = 8\n)\n\nvar (\n\t// ErrInvalidTime is returned when the timestamp string used to\n\t// compare against time field is invalid.\n\tErrInvalidTime = errors.New(\"invalid timestamp string\")\n)\n\n// InspectDataType returns the data type of a given value.\nfunc InspectDataType(v interface{}) DataType {\n\tswitch v.(type) {\n\tcase float64:\n\t\treturn Float\n\tcase int64, int32, int:\n\t\treturn Integer\n\tcase string:\n\t\treturn String\n\tcase bool:\n\t\treturn Boolean\n\tcase time.Time:\n\t\treturn Time\n\tcase time.Duration:\n\t\treturn Duration\n\tdefault:\n\t\treturn Unknown\n\t}\n}\n\n// InspectDataTypes returns all of the data types for an interface slice.\nfunc InspectDataTypes(a []interface{}) []DataType {\n\tdta := make([]DataType, len(a))\n\tfor i, v := range a {\n\t\tdta[i] = InspectDataType(v)\n\t}\n\treturn dta\n}\n\n// LessThan returns true if the other DataType has greater precedence than the\n// current data type. Unknown has the lowest precedence.\n//\n// NOTE: This is not the same as using the `<` or `>` operator because the\n// integers used decrease with higher precedence, but Unknown is the lowest\n// precedence at the zero value.\nfunc (d DataType) LessThan(other DataType) bool {\n\treturn d == Unknown || (other != Unknown && other < d)\n}\n\n// String returns the human-readable string representation of the DataType.\nfunc (d DataType) String() string {\n\tswitch d {\n\tcase Float:\n\t\treturn \"float\"\n\tcase Integer:\n\t\treturn \"integer\"\n\tcase String:\n\t\treturn \"string\"\n\tcase Boolean:\n\t\treturn \"boolean\"\n\tcase Time:\n\t\treturn \"time\"\n\tcase Duration:\n\t\treturn \"duration\"\n\tcase Tag:\n\t\treturn \"tag\"\n\tcase AnyField:\n\t\treturn \"field\"\n\t}\n\treturn \"unknown\"\n}\n\n// Node represents a node in the InfluxDB abstract syntax tree.\ntype Node interface {\n\t// node is unexported to ensure implementations of Node\n\t// can only originate in this package.\n\tnode()\n\tString() string\n}\n\nfunc (*Query) node()     {}\nfunc (Statements) node() {}\n\nfunc (*AlterRetentionPolicyStatement) node()  {}\nfunc (*CreateContinuousQueryStatement) node() {}\nfunc (*CreateDatabaseStatement) node()        {}\nfunc (*CreateRetentionPolicyStatement) node() {}\nfunc (*CreateSubscriptionStatement) node()    {}\nfunc (*CreateUserStatement) node()            {}\nfunc (*Distinct) node()                       {}\nfunc (*DeleteSeriesStatement) node()          {}\nfunc (*DeleteStatement) node()                {}\nfunc (*DropContinuousQueryStatement) node()   {}\nfunc (*DropDatabaseStatement) node()          {}\nfunc (*DropMeasurementStatement) node()       {}\nfunc (*DropRetentionPolicyStatement) node()   {}\nfunc (*DropSeriesStatement) node()            {}\nfunc (*DropShardStatement) node()             {}\nfunc (*DropSubscriptionStatement) node()      {}\nfunc (*DropUserStatement) node()              {}\nfunc (*GrantStatement) node()                 {}\nfunc (*GrantAdminStatement) node()            {}\nfunc (*KillQueryStatement) node()             {}\nfunc (*RevokeStatement) node()                {}\nfunc (*RevokeAdminStatement) node()           {}\nfunc (*SelectStatement) node()                {}\nfunc (*SetPasswordUserStatement) node()       {}\nfunc (*ShowContinuousQueriesStatement) node() {}\nfunc (*ShowGrantsForUserStatement) node()     {}\nfunc (*ShowDatabasesStatement) node()         {}\nfunc (*ShowFieldKeysStatement) node()         {}\nfunc (*ShowRetentionPoliciesStatement) node() {}\nfunc (*ShowMeasurementsStatement) node()      {}\nfunc (*ShowQueriesStatement) node()           {}\nfunc (*ShowSeriesStatement) node()            {}\nfunc (*ShowShardGroupsStatement) node()       {}\nfunc (*ShowShardsStatement) node()            {}\nfunc (*ShowStatsStatement) node()             {}\nfunc (*ShowSubscriptionsStatement) node()     {}\nfunc (*ShowDiagnosticsStatement) node()       {}\nfunc (*ShowTagKeysStatement) node()           {}\nfunc (*ShowTagValuesStatement) node()         {}\nfunc (*ShowUsersStatement) node()             {}\n\nfunc (*BinaryExpr) node()      {}\nfunc (*BooleanLiteral) node()  {}\nfunc (*Call) node()            {}\nfunc (*Dimension) node()       {}\nfunc (Dimensions) node()       {}\nfunc (*DurationLiteral) node() {}\nfunc (*IntegerLiteral) node()  {}\nfunc (*Field) node()           {}\nfunc (Fields) node()           {}\nfunc (*Measurement) node()     {}\nfunc (Measurements) node()     {}\nfunc (*nilLiteral) node()      {}\nfunc (*NumberLiteral) node()   {}\nfunc (*ParenExpr) node()       {}\nfunc (*RegexLiteral) node()    {}\nfunc (*ListLiteral) node()     {}\nfunc (*SortField) node()       {}\nfunc (SortFields) node()       {}\nfunc (Sources) node()          {}\nfunc (*StringLiteral) node()   {}\nfunc (*SubQuery) node()        {}\nfunc (*Target) node()          {}\nfunc (*TimeLiteral) node()     {}\nfunc (*VarRef) node()          {}\nfunc (*Wildcard) node()        {}\n\n// Query represents a collection of ordered statements.\ntype Query struct {\n\tStatements Statements\n}\n\n// String returns a string representation of the query.\nfunc (q *Query) String() string { return q.Statements.String() }\n\n// Statements represents a list of statements.\ntype Statements []Statement\n\n// String returns a string representation of the statements.\nfunc (a Statements) String() string {\n\tvar str []string\n\tfor _, stmt := range a {\n\t\tstr = append(str, stmt.String())\n\t}\n\treturn strings.Join(str, \";\\n\")\n}\n\n// Statement represents a single command in InfluxQL.\ntype Statement interface {\n\tNode\n\t// stmt is unexported to ensure implementations of Statement\n\t// can only originate in this package.\n\tstmt()\n\tRequiredPrivileges() (ExecutionPrivileges, error)\n}\n\n// HasDefaultDatabase provides an interface to get the default database from a Statement.\ntype HasDefaultDatabase interface {\n\tNode\n\t// stmt is unexported to ensure implementations of HasDefaultDatabase\n\t// can only originate in this package.\n\tstmt()\n\tDefaultDatabase() string\n}\n\n// ExecutionPrivilege is a privilege required for a user to execute\n// a statement on a database or resource.\ntype ExecutionPrivilege struct {\n\t// Admin privilege required.\n\tAdmin bool\n\n\t// Name of the database.\n\tName string\n\n\t// Database privilege required.\n\tPrivilege Privilege\n}\n\n// ExecutionPrivileges is a list of privileges required to execute a statement.\ntype ExecutionPrivileges []ExecutionPrivilege\n\nfunc (*AlterRetentionPolicyStatement) stmt()  {}\nfunc (*CreateContinuousQueryStatement) stmt() {}\nfunc (*CreateDatabaseStatement) stmt()        {}\nfunc (*CreateRetentionPolicyStatement) stmt() {}\nfunc (*CreateSubscriptionStatement) stmt()    {}\nfunc (*CreateUserStatement) stmt()            {}\nfunc (*DeleteSeriesStatement) stmt()          {}\nfunc (*DeleteStatement) stmt()                {}\nfunc (*DropContinuousQueryStatement) stmt()   {}\nfunc (*DropDatabaseStatement) stmt()          {}\nfunc (*DropMeasurementStatement) stmt()       {}\nfunc (*DropRetentionPolicyStatement) stmt()   {}\nfunc (*DropSeriesStatement) stmt()            {}\nfunc (*DropSubscriptionStatement) stmt()      {}\nfunc (*DropUserStatement) stmt()              {}\nfunc (*GrantStatement) stmt()                 {}\nfunc (*GrantAdminStatement) stmt()            {}\nfunc (*KillQueryStatement) stmt()             {}\nfunc (*ShowContinuousQueriesStatement) stmt() {}\nfunc (*ShowGrantsForUserStatement) stmt()     {}\nfunc (*ShowDatabasesStatement) stmt()         {}\nfunc (*ShowFieldKeysStatement) stmt()         {}\nfunc (*ShowMeasurementsStatement) stmt()      {}\nfunc (*ShowQueriesStatement) stmt()           {}\nfunc (*ShowRetentionPoliciesStatement) stmt() {}\nfunc (*ShowSeriesStatement) stmt()            {}\nfunc (*ShowShardGroupsStatement) stmt()       {}\nfunc (*ShowShardsStatement) stmt()            {}\nfunc (*ShowStatsStatement) stmt()             {}\nfunc (*DropShardStatement) stmt()             {}\nfunc (*ShowSubscriptionsStatement) stmt()     {}\nfunc (*ShowDiagnosticsStatement) stmt()       {}\nfunc (*ShowTagKeysStatement) stmt()           {}\nfunc (*ShowTagValuesStatement) stmt()         {}\nfunc (*ShowUsersStatement) stmt()             {}\nfunc (*RevokeStatement) stmt()                {}\nfunc (*RevokeAdminStatement) stmt()           {}\nfunc (*SelectStatement) stmt()                {}\nfunc (*SetPasswordUserStatement) stmt()       {}\n\n// Expr represents an expression that can be evaluated to a value.\ntype Expr interface {\n\tNode\n\t// expr is unexported to ensure implementations of Expr\n\t// can only originate in this package.\n\texpr()\n}\n\nfunc (*BinaryExpr) expr()      {}\nfunc (*BooleanLiteral) expr()  {}\nfunc (*Call) expr()            {}\nfunc (*Distinct) expr()        {}\nfunc (*DurationLiteral) expr() {}\nfunc (*IntegerLiteral) expr()  {}\nfunc (*nilLiteral) expr()      {}\nfunc (*NumberLiteral) expr()   {}\nfunc (*ParenExpr) expr()       {}\nfunc (*RegexLiteral) expr()    {}\nfunc (*ListLiteral) expr()     {}\nfunc (*StringLiteral) expr()   {}\nfunc (*TimeLiteral) expr()     {}\nfunc (*VarRef) expr()          {}\nfunc (*Wildcard) expr()        {}\n\n// Literal represents a static literal.\ntype Literal interface {\n\tExpr\n\t// literal is unexported to ensure implementations of Literal\n\t// can only originate in this package.\n\tliteral()\n}\n\nfunc (*BooleanLiteral) literal()  {}\nfunc (*DurationLiteral) literal() {}\nfunc (*IntegerLiteral) literal()  {}\nfunc (*nilLiteral) literal()      {}\nfunc (*NumberLiteral) literal()   {}\nfunc (*RegexLiteral) literal()    {}\nfunc (*ListLiteral) literal()     {}\nfunc (*StringLiteral) literal()   {}\nfunc (*TimeLiteral) literal()     {}\n\n// Source represents a source of data for a statement.\ntype Source interface {\n\tNode\n\t// source is unexported to ensure implementations of Source\n\t// can only originate in this package.\n\tsource()\n}\n\nfunc (*Measurement) source() {}\nfunc (*SubQuery) source()    {}\n\n// Sources represents a list of sources.\ntype Sources []Source\n\n// Names returns a list of source names.\nfunc (a Sources) Names() []string {\n\tnames := make([]string, 0, len(a))\n\tfor _, s := range a {\n\t\tswitch s := s.(type) {\n\t\tcase *Measurement:\n\t\t\tnames = append(names, s.Name)\n\t\t}\n\t}\n\treturn names\n}\n\n// Filter returns a list of source names filtered by the database/retention policy.\nfunc (a Sources) Filter(database, retentionPolicy string) []Source {\n\tsources := make([]Source, 0, len(a))\n\tfor _, s := range a {\n\t\tswitch s := s.(type) {\n\t\tcase *Measurement:\n\t\t\tif s.Database == database && s.RetentionPolicy == retentionPolicy {\n\t\t\t\tsources = append(sources, s)\n\t\t\t}\n\t\tcase *SubQuery:\n\t\t\tfilteredSources := s.Statement.Sources.Filter(database, retentionPolicy)\n\t\t\tsources = append(sources, filteredSources...)\n\t\t}\n\t}\n\treturn sources\n}\n\n// HasSystemSource returns true if any of the sources are internal, system sources.\nfunc (a Sources) HasSystemSource() bool {\n\tfor _, s := range a {\n\t\tswitch s := s.(type) {\n\t\tcase *Measurement:\n\t\t\tif IsSystemName(s.Name) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\n// HasRegex returns true if any of the sources are regex measurements.\nfunc (a Sources) HasRegex() bool {\n\tfor _, s := range a {\n\t\tswitch s := s.(type) {\n\t\tcase *Measurement:\n\t\t\tif s.Regex != nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\n// String returns a string representation of a Sources array.\nfunc (a Sources) String() string {\n\tvar buf bytes.Buffer\n\n\tubound := len(a) - 1\n\tfor i, src := range a {\n\t\t_, _ = buf.WriteString(src.String())\n\t\tif i < ubound {\n\t\t\t_, _ = buf.WriteString(\", \")\n\t\t}\n\t}\n\n\treturn buf.String()\n}\n\n// Measurements returns all measurements including ones embedded in subqueries.\nfunc (a Sources) Measurements() []*Measurement {\n\tmms := make([]*Measurement, 0, len(a))\n\tfor _, src := range a {\n\t\tswitch src := src.(type) {\n\t\tcase *Measurement:\n\t\t\tmms = append(mms, src)\n\t\tcase *SubQuery:\n\t\t\tmms = append(mms, src.Statement.Sources.Measurements()...)\n\t\t}\n\t}\n\treturn mms\n}\n\n// MarshalBinary encodes a list of sources to a binary format.\nfunc (a Sources) MarshalBinary() ([]byte, error) {\n\tvar pb internal.Measurements\n\tpb.Items = make([]*internal.Measurement, len(a))\n\tfor i, source := range a {\n\t\tpb.Items[i] = encodeMeasurement(source.(*Measurement))\n\t}\n\treturn proto.Marshal(&pb)\n}\n\n// UnmarshalBinary decodes binary data into a list of sources.\nfunc (a *Sources) UnmarshalBinary(buf []byte) error {\n\tvar pb internal.Measurements\n\tif err := proto.Unmarshal(buf, &pb); err != nil {\n\t\treturn err\n\t}\n\t*a = make(Sources, len(pb.GetItems()))\n\tfor i := range pb.GetItems() {\n\t\tmm, err := decodeMeasurement(pb.GetItems()[i])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t(*a)[i] = mm\n\t}\n\treturn nil\n}\n\n// IsSystemName returns true if name is an internal system name.\nfunc IsSystemName(name string) bool {\n\tswitch name {\n\tcase \"_fieldKeys\",\n\t\t\"_measurements\",\n\t\t\"_series\",\n\t\t\"_tagKeys\",\n\t\t\"_tags\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\n// SortField represents a field to sort results by.\ntype SortField struct {\n\t// Name of the field.\n\tName string\n\n\t// Sort order.\n\tAscending bool\n}\n\n// String returns a string representation of a sort field.\nfunc (field *SortField) String() string {\n\tvar buf bytes.Buffer\n\tif field.Name != \"\" {\n\t\t_, _ = buf.WriteString(field.Name)\n\t\t_, _ = buf.WriteString(\" \")\n\t}\n\tif field.Ascending {\n\t\t_, _ = buf.WriteString(\"ASC\")\n\t} else {\n\t\t_, _ = buf.WriteString(\"DESC\")\n\t}\n\treturn buf.String()\n}\n\n// SortFields represents an ordered list of ORDER BY fields.\ntype SortFields []*SortField\n\n// String returns a string representation of sort fields.\nfunc (a SortFields) String() string {\n\tfields := make([]string, 0, len(a))\n\tfor _, field := range a {\n\t\tfields = append(fields, field.String())\n\t}\n\treturn strings.Join(fields, \", \")\n}\n\n// CreateDatabaseStatement represents a command for creating a new database.\ntype CreateDatabaseStatement struct {\n\t// Name of the database to be created.\n\tName string\n\n\t// RetentionPolicyCreate indicates whether the user explicitly wants to create a retention policy.\n\tRetentionPolicyCreate bool\n\n\t// RetentionPolicyDuration indicates retention duration for the new database.\n\tRetentionPolicyDuration *time.Duration\n\n\t// RetentionPolicyReplication indicates retention replication for the new database.\n\tRetentionPolicyReplication *int\n\n\t// RetentionPolicyName indicates retention name for the new database.\n\tRetentionPolicyName string\n\n\t// RetentionPolicyShardGroupDuration indicates shard group duration for the new database.\n\tRetentionPolicyShardGroupDuration time.Duration\n}\n\n// String returns a string representation of the create database statement.\nfunc (s *CreateDatabaseStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"CREATE DATABASE \")\n\t_, _ = buf.WriteString(QuoteIdent(s.Name))\n\tif s.RetentionPolicyCreate {\n\t\t_, _ = buf.WriteString(\" WITH\")\n\t\tif s.RetentionPolicyDuration != nil {\n\t\t\t_, _ = buf.WriteString(\" DURATION \")\n\t\t\t_, _ = buf.WriteString(s.RetentionPolicyDuration.String())\n\t\t}\n\t\tif s.RetentionPolicyReplication != nil {\n\t\t\t_, _ = buf.WriteString(\" REPLICATION \")\n\t\t\t_, _ = buf.WriteString(strconv.Itoa(*s.RetentionPolicyReplication))\n\t\t}\n\t\tif s.RetentionPolicyShardGroupDuration > 0 {\n\t\t\t_, _ = buf.WriteString(\" SHARD DURATION \")\n\t\t\t_, _ = buf.WriteString(s.RetentionPolicyShardGroupDuration.String())\n\t\t}\n\t\tif s.RetentionPolicyName != \"\" {\n\t\t\t_, _ = buf.WriteString(\" NAME \")\n\t\t\t_, _ = buf.WriteString(QuoteIdent(s.RetentionPolicyName))\n\t\t}\n\t}\n\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute a CreateDatabaseStatement.\nfunc (s *CreateDatabaseStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// DropDatabaseStatement represents a command to drop a database.\ntype DropDatabaseStatement struct {\n\t// Name of the database to be dropped.\n\tName string\n}\n\n// String returns a string representation of the drop database statement.\nfunc (s *DropDatabaseStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"DROP DATABASE \")\n\t_, _ = buf.WriteString(QuoteIdent(s.Name))\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute a DropDatabaseStatement.\nfunc (s *DropDatabaseStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// DropRetentionPolicyStatement represents a command to drop a retention policy from a database.\ntype DropRetentionPolicyStatement struct {\n\t// Name of the policy to drop.\n\tName string\n\n\t// Name of the database to drop the policy from.\n\tDatabase string\n}\n\n// String returns a string representation of the drop retention policy statement.\nfunc (s *DropRetentionPolicyStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"DROP RETENTION POLICY \")\n\t_, _ = buf.WriteString(QuoteIdent(s.Name))\n\t_, _ = buf.WriteString(\" ON \")\n\t_, _ = buf.WriteString(QuoteIdent(s.Database))\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute a DropRetentionPolicyStatement.\nfunc (s *DropRetentionPolicyStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: WritePrivilege}}, nil\n}\n\n// DefaultDatabase returns the default database from the statement.\nfunc (s *DropRetentionPolicyStatement) DefaultDatabase() string {\n\treturn s.Database\n}\n\n// CreateUserStatement represents a command for creating a new user.\ntype CreateUserStatement struct {\n\t// Name of the user to be created.\n\tName string\n\n\t// User's password.\n\tPassword string\n\n\t// User's admin privilege.\n\tAdmin bool\n}\n\n// String returns a string representation of the create user statement.\nfunc (s *CreateUserStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"CREATE USER \")\n\t_, _ = buf.WriteString(QuoteIdent(s.Name))\n\t_, _ = buf.WriteString(\" WITH PASSWORD \")\n\t_, _ = buf.WriteString(\"[REDACTED]\")\n\tif s.Admin {\n\t\t_, _ = buf.WriteString(\" WITH ALL PRIVILEGES\")\n\t}\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege(s) required to execute a CreateUserStatement.\nfunc (s *CreateUserStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// DropUserStatement represents a command for dropping a user.\ntype DropUserStatement struct {\n\t// Name of the user to drop.\n\tName string\n}\n\n// String returns a string representation of the drop user statement.\nfunc (s *DropUserStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"DROP USER \")\n\t_, _ = buf.WriteString(QuoteIdent(s.Name))\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege(s) required to execute a DropUserStatement.\nfunc (s *DropUserStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// Privilege is a type of action a user can be granted the right to use.\ntype Privilege int\n\nconst (\n\t// NoPrivileges means no privileges required / granted / revoked.\n\tNoPrivileges Privilege = iota\n\t// ReadPrivilege means read privilege required / granted / revoked.\n\tReadPrivilege\n\t// WritePrivilege means write privilege required / granted / revoked.\n\tWritePrivilege\n\t// AllPrivileges means all privileges required / granted / revoked.\n\tAllPrivileges\n)\n\n// NewPrivilege returns an initialized *Privilege.\nfunc NewPrivilege(p Privilege) *Privilege { return &p }\n\n// String returns a string representation of a Privilege.\nfunc (p Privilege) String() string {\n\tswitch p {\n\tcase NoPrivileges:\n\t\treturn \"NO PRIVILEGES\"\n\tcase ReadPrivilege:\n\t\treturn \"READ\"\n\tcase WritePrivilege:\n\t\treturn \"WRITE\"\n\tcase AllPrivileges:\n\t\treturn \"ALL PRIVILEGES\"\n\t}\n\treturn \"\"\n}\n\n// GrantStatement represents a command for granting a privilege.\ntype GrantStatement struct {\n\t// The privilege to be granted.\n\tPrivilege Privilege\n\n\t// Database to grant the privilege to.\n\tOn string\n\n\t// Who to grant the privilege to.\n\tUser string\n}\n\n// String returns a string representation of the grant statement.\nfunc (s *GrantStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"GRANT \")\n\t_, _ = buf.WriteString(s.Privilege.String())\n\t_, _ = buf.WriteString(\" ON \")\n\t_, _ = buf.WriteString(QuoteIdent(s.On))\n\t_, _ = buf.WriteString(\" TO \")\n\t_, _ = buf.WriteString(QuoteIdent(s.User))\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute a GrantStatement.\nfunc (s *GrantStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// DefaultDatabase returns the default database from the statement.\nfunc (s *GrantStatement) DefaultDatabase() string {\n\treturn s.On\n}\n\n// GrantAdminStatement represents a command for granting admin privilege.\ntype GrantAdminStatement struct {\n\t// Who to grant the privilege to.\n\tUser string\n}\n\n// String returns a string representation of the grant admin statement.\nfunc (s *GrantAdminStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"GRANT ALL PRIVILEGES TO \")\n\t_, _ = buf.WriteString(QuoteIdent(s.User))\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute a GrantAdminStatement.\nfunc (s *GrantAdminStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// KillQueryStatement represents a command for killing a query.\ntype KillQueryStatement struct {\n\t// The query to kill.\n\tQueryID uint64\n\n\t// The host to delegate the kill to.\n\tHost string\n}\n\n// String returns a string representation of the kill query statement.\nfunc (s *KillQueryStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"KILL QUERY \")\n\t_, _ = buf.WriteString(strconv.FormatUint(s.QueryID, 10))\n\tif s.Host != \"\" {\n\t\t_, _ = buf.WriteString(\" ON \")\n\t\t_, _ = buf.WriteString(QuoteIdent(s.Host))\n\t}\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute a KillQueryStatement.\nfunc (s *KillQueryStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// SetPasswordUserStatement represents a command for changing user password.\ntype SetPasswordUserStatement struct {\n\t// Plain-text password.\n\tPassword string\n\n\t// Who to grant the privilege to.\n\tName string\n}\n\n// String returns a string representation of the set password statement.\nfunc (s *SetPasswordUserStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"SET PASSWORD FOR \")\n\t_, _ = buf.WriteString(QuoteIdent(s.Name))\n\t_, _ = buf.WriteString(\" = \")\n\t_, _ = buf.WriteString(\"[REDACTED]\")\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute a SetPasswordUserStatement.\nfunc (s *SetPasswordUserStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// RevokeStatement represents a command to revoke a privilege from a user.\ntype RevokeStatement struct {\n\t// The privilege to be revoked.\n\tPrivilege Privilege\n\n\t// Database to revoke the privilege from.\n\tOn string\n\n\t// Who to revoke privilege from.\n\tUser string\n}\n\n// String returns a string representation of the revoke statement.\nfunc (s *RevokeStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"REVOKE \")\n\t_, _ = buf.WriteString(s.Privilege.String())\n\t_, _ = buf.WriteString(\" ON \")\n\t_, _ = buf.WriteString(QuoteIdent(s.On))\n\t_, _ = buf.WriteString(\" FROM \")\n\t_, _ = buf.WriteString(QuoteIdent(s.User))\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute a RevokeStatement.\nfunc (s *RevokeStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// DefaultDatabase returns the default database from the statement.\nfunc (s *RevokeStatement) DefaultDatabase() string {\n\treturn s.On\n}\n\n// RevokeAdminStatement represents a command to revoke admin privilege from a user.\ntype RevokeAdminStatement struct {\n\t// Who to revoke admin privilege from.\n\tUser string\n}\n\n// String returns a string representation of the revoke admin statement.\nfunc (s *RevokeAdminStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"REVOKE ALL PRIVILEGES FROM \")\n\t_, _ = buf.WriteString(QuoteIdent(s.User))\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute a RevokeAdminStatement.\nfunc (s *RevokeAdminStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// CreateRetentionPolicyStatement represents a command to create a retention policy.\ntype CreateRetentionPolicyStatement struct {\n\t// Name of policy to create.\n\tName string\n\n\t// Name of database this policy belongs to.\n\tDatabase string\n\n\t// Duration data written to this policy will be retained.\n\tDuration time.Duration\n\n\t// Replication factor for data written to this policy.\n\tReplication int\n\n\t// Should this policy be set as default for the database?\n\tDefault bool\n\n\t// Shard Duration.\n\tShardGroupDuration time.Duration\n}\n\n// String returns a string representation of the create retention policy.\nfunc (s *CreateRetentionPolicyStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"CREATE RETENTION POLICY \")\n\t_, _ = buf.WriteString(QuoteIdent(s.Name))\n\t_, _ = buf.WriteString(\" ON \")\n\t_, _ = buf.WriteString(QuoteIdent(s.Database))\n\t_, _ = buf.WriteString(\" DURATION \")\n\t_, _ = buf.WriteString(FormatDuration(s.Duration))\n\t_, _ = buf.WriteString(\" REPLICATION \")\n\t_, _ = buf.WriteString(strconv.Itoa(s.Replication))\n\tif s.ShardGroupDuration > 0 {\n\t\t_, _ = buf.WriteString(\" SHARD DURATION \")\n\t\t_, _ = buf.WriteString(FormatDuration(s.ShardGroupDuration))\n\t}\n\tif s.Default {\n\t\t_, _ = buf.WriteString(\" DEFAULT\")\n\t}\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute a CreateRetentionPolicyStatement.\nfunc (s *CreateRetentionPolicyStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// DefaultDatabase returns the default database from the statement.\nfunc (s *CreateRetentionPolicyStatement) DefaultDatabase() string {\n\treturn s.Database\n}\n\n// AlterRetentionPolicyStatement represents a command to alter an existing retention policy.\ntype AlterRetentionPolicyStatement struct {\n\t// Name of policy to alter.\n\tName string\n\n\t// Name of the database this policy belongs to.\n\tDatabase string\n\n\t// Duration data written to this policy will be retained.\n\tDuration *time.Duration\n\n\t// Replication factor for data written to this policy.\n\tReplication *int\n\n\t// Should this policy be set as defalut for the database?\n\tDefault bool\n\n\t// Duration of the Shard.\n\tShardGroupDuration *time.Duration\n}\n\n// String returns a string representation of the alter retention policy statement.\nfunc (s *AlterRetentionPolicyStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"ALTER RETENTION POLICY \")\n\t_, _ = buf.WriteString(QuoteIdent(s.Name))\n\t_, _ = buf.WriteString(\" ON \")\n\t_, _ = buf.WriteString(QuoteIdent(s.Database))\n\n\tif s.Duration != nil {\n\t\t_, _ = buf.WriteString(\" DURATION \")\n\t\t_, _ = buf.WriteString(FormatDuration(*s.Duration))\n\t}\n\n\tif s.Replication != nil {\n\t\t_, _ = buf.WriteString(\" REPLICATION \")\n\t\t_, _ = buf.WriteString(strconv.Itoa(*s.Replication))\n\t}\n\n\tif s.ShardGroupDuration != nil {\n\t\t_, _ = buf.WriteString(\" SHARD DURATION \")\n\t\t_, _ = buf.WriteString(FormatDuration(*s.ShardGroupDuration))\n\t}\n\n\tif s.Default {\n\t\t_, _ = buf.WriteString(\" DEFAULT\")\n\t}\n\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute an AlterRetentionPolicyStatement.\nfunc (s *AlterRetentionPolicyStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// DefaultDatabase returns the default database from the statement.\nfunc (s *AlterRetentionPolicyStatement) DefaultDatabase() string {\n\treturn s.Database\n}\n\n// FillOption represents different options for filling aggregate windows.\ntype FillOption int\n\nconst (\n\t// NullFill means that empty aggregate windows will just have null values.\n\tNullFill FillOption = iota\n\t// NoFill means that empty aggregate windows will be purged from the result.\n\tNoFill\n\t// NumberFill means that empty aggregate windows will be filled with a provided number.\n\tNumberFill\n\t// PreviousFill means that empty aggregate windows will be filled with whatever the previous aggregate window had.\n\tPreviousFill\n\t// LinearFill means that empty aggregate windows will be filled with whatever a linear value between non null windows.\n\tLinearFill\n)\n\n// SelectStatement represents a command for extracting data from the database.\ntype SelectStatement struct {\n\t// Expressions returned from the selection.\n\tFields Fields\n\n\t// Target (destination) for the result of a SELECT INTO query.\n\tTarget *Target\n\n\t// Expressions used for grouping the selection.\n\tDimensions Dimensions\n\n\t// Data sources (measurements) that fields are extracted from.\n\tSources Sources\n\n\t// An expression evaluated on data point.\n\tCondition Expr\n\n\t// Fields to sort results by.\n\tSortFields SortFields\n\n\t// Maximum number of rows to be returned. Unlimited if zero.\n\tLimit int\n\n\t// Returns rows starting at an offset from the first row.\n\tOffset int\n\n\t// Maxiumum number of series to be returned. Unlimited if zero.\n\tSLimit int\n\n\t// Returns series starting at an offset from the first one.\n\tSOffset int\n\n\t// Memoized group by interval from GroupBy().\n\tgroupByInterval time.Duration\n\n\t// Whether it's a query for raw data values (i.e. not an aggregate).\n\tIsRawQuery bool\n\n\t// What fill option the select statement uses, if any.\n\tFill FillOption\n\n\t// The value to fill empty aggregate buckets with, if any.\n\tFillValue interface{}\n\n\t// The timezone for the query, if any.\n\tLocation *time.Location\n\n\t// Renames the implicit time field name.\n\tTimeAlias string\n\n\t// Removes the \"time\" column from the output.\n\tOmitTime bool\n\n\t// Removes duplicate rows from raw queries.\n\tDedupe bool\n}\n\n// HasDerivative returns true if any function call in the statement is a\n// derivative aggregate.\nfunc (s *SelectStatement) HasDerivative() bool {\n\tfor _, f := range s.FunctionCalls() {\n\t\tif f.Name == \"derivative\" || f.Name == \"non_negative_derivative\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// IsSimpleDerivative return true if any function call is a derivative function with a\n// variable ref as the first arg.\nfunc (s *SelectStatement) IsSimpleDerivative() bool {\n\tfor _, f := range s.FunctionCalls() {\n\t\tif f.Name == \"derivative\" || f.Name == \"non_negative_derivative\" {\n\t\t\t// it's nested if the first argument is an aggregate function\n\t\t\tif _, ok := f.Args[0].(*VarRef); ok {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\n// HasSelector returns true if there is exactly one selector.\nfunc (s *SelectStatement) HasSelector() bool {\n\tvar selector *Call\n\tfor _, f := range s.Fields {\n\t\tif call, ok := f.Expr.(*Call); ok {\n\t\t\tif selector != nil || !IsSelector(call) {\n\t\t\t\t// This is an aggregate call or there is already a selector.\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tselector = call\n\t\t}\n\t}\n\treturn selector != nil\n}\n\n// TimeAscending returns true if the time field is sorted in chronological order.\nfunc (s *SelectStatement) TimeAscending() bool {\n\treturn len(s.SortFields) == 0 || s.SortFields[0].Ascending\n}\n\n// TimeFieldName returns the name of the time field.\nfunc (s *SelectStatement) TimeFieldName() string {\n\tif s.TimeAlias != \"\" {\n\t\treturn s.TimeAlias\n\t}\n\treturn \"time\"\n}\n\n// Clone returns a deep copy of the statement.\nfunc (s *SelectStatement) Clone() *SelectStatement {\n\tclone := *s\n\tclone.Fields = make(Fields, 0, len(s.Fields))\n\tclone.Dimensions = make(Dimensions, 0, len(s.Dimensions))\n\tclone.Sources = cloneSources(s.Sources)\n\tclone.SortFields = make(SortFields, 0, len(s.SortFields))\n\tclone.Condition = CloneExpr(s.Condition)\n\n\tif s.Target != nil {\n\t\tclone.Target = &Target{\n\t\t\tMeasurement: &Measurement{\n\t\t\t\tDatabase:        s.Target.Measurement.Database,\n\t\t\t\tRetentionPolicy: s.Target.Measurement.RetentionPolicy,\n\t\t\t\tName:            s.Target.Measurement.Name,\n\t\t\t\tRegex:           CloneRegexLiteral(s.Target.Measurement.Regex),\n\t\t\t},\n\t\t}\n\t}\n\tfor _, f := range s.Fields {\n\t\tclone.Fields = append(clone.Fields, &Field{Expr: CloneExpr(f.Expr), Alias: f.Alias})\n\t}\n\tfor _, d := range s.Dimensions {\n\t\tclone.Dimensions = append(clone.Dimensions, &Dimension{Expr: CloneExpr(d.Expr)})\n\t}\n\tfor _, f := range s.SortFields {\n\t\tclone.SortFields = append(clone.SortFields, &SortField{Name: f.Name, Ascending: f.Ascending})\n\t}\n\treturn &clone\n}\n\nfunc cloneSources(sources Sources) Sources {\n\tclone := make(Sources, 0, len(sources))\n\tfor _, s := range sources {\n\t\tclone = append(clone, cloneSource(s))\n\t}\n\treturn clone\n}\n\nfunc cloneSource(s Source) Source {\n\tif s == nil {\n\t\treturn nil\n\t}\n\n\tswitch s := s.(type) {\n\tcase *Measurement:\n\t\tm := &Measurement{Database: s.Database, RetentionPolicy: s.RetentionPolicy, Name: s.Name}\n\t\tif s.Regex != nil {\n\t\t\tm.Regex = &RegexLiteral{Val: regexp.MustCompile(s.Regex.Val.String())}\n\t\t}\n\t\treturn m\n\tcase *SubQuery:\n\t\treturn &SubQuery{Statement: s.Statement.Clone()}\n\tdefault:\n\t\tpanic(\"unreachable\")\n\t}\n}\n\n// RewriteFields returns the re-written form of the select statement. Any wildcard query\n// fields are replaced with the supplied fields, and any wildcard GROUP BY fields are replaced\n// with the supplied dimensions. Any fields with no type specifier are rewritten with the\n// appropriate type.\nfunc (s *SelectStatement) RewriteFields(m FieldMapper) (*SelectStatement, error) {\n\t// Clone the statement so we aren't rewriting the original.\n\tother := s.Clone()\n\n\t// Iterate through the sources and rewrite any subqueries first.\n\tfor _, src := range other.Sources {\n\t\tswitch src := src.(type) {\n\t\tcase *SubQuery:\n\t\t\tstmt, err := src.Statement.RewriteFields(m)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tsrc.Statement = stmt\n\t\t}\n\t}\n\n\t// Rewrite all variable references in the fields with their types if one\n\t// hasn't been specified.\n\trewrite := func(n Node) {\n\t\tref, ok := n.(*VarRef)\n\t\tif !ok || (ref.Type != Unknown && ref.Type != AnyField) {\n\t\t\treturn\n\t\t}\n\n\t\ttyp := EvalType(ref, other.Sources, m)\n\t\tif typ == Tag && ref.Type == AnyField {\n\t\t\treturn\n\t\t}\n\t\tref.Type = typ\n\t}\n\tWalkFunc(other.Fields, rewrite)\n\tWalkFunc(other.Condition, rewrite)\n\n\t// Ignore if there are no wildcards.\n\thasFieldWildcard := other.HasFieldWildcard()\n\thasDimensionWildcard := other.HasDimensionWildcard()\n\tif !hasFieldWildcard && !hasDimensionWildcard {\n\t\treturn other, nil\n\t}\n\n\tfieldSet, dimensionSet, err := FieldDimensions(other.Sources, m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If there are no dimension wildcards then merge dimensions to fields.\n\tif !hasDimensionWildcard {\n\t\t// Remove the dimensions present in the group by so they don't get added as fields.\n\t\tfor _, d := range other.Dimensions {\n\t\t\tswitch expr := d.Expr.(type) {\n\t\t\tcase *VarRef:\n\t\t\t\tif _, ok := dimensionSet[expr.Val]; ok {\n\t\t\t\t\tdelete(dimensionSet, expr.Val)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Sort the field and dimension names for wildcard expansion.\n\tvar fields []VarRef\n\tif len(fieldSet) > 0 {\n\t\tfields = make([]VarRef, 0, len(fieldSet))\n\t\tfor name, typ := range fieldSet {\n\t\t\tfields = append(fields, VarRef{Val: name, Type: typ})\n\t\t}\n\t\tif !hasDimensionWildcard {\n\t\t\tfor name := range dimensionSet {\n\t\t\t\tfields = append(fields, VarRef{Val: name, Type: Tag})\n\t\t\t}\n\t\t\tdimensionSet = nil\n\t\t}\n\t\tsort.Sort(VarRefs(fields))\n\t}\n\tdimensions := stringSetSlice(dimensionSet)\n\n\t// Rewrite all wildcard query fields\n\tif hasFieldWildcard {\n\t\t// Allocate a slice assuming there is exactly one wildcard for efficiency.\n\t\trwFields := make(Fields, 0, len(other.Fields)+len(fields)-1)\n\t\tfor _, f := range other.Fields {\n\t\t\tswitch expr := f.Expr.(type) {\n\t\t\tcase *Wildcard:\n\t\t\t\tfor _, ref := range fields {\n\t\t\t\t\tif expr.Type == FIELD && ref.Type == Tag {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if expr.Type == TAG && ref.Type != Tag {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\trwFields = append(rwFields, &Field{Expr: &VarRef{Val: ref.Val, Type: ref.Type}})\n\t\t\t\t}\n\t\t\tcase *RegexLiteral:\n\t\t\t\tfor _, ref := range fields {\n\t\t\t\t\tif expr.Val.MatchString(ref.Val) {\n\t\t\t\t\t\trwFields = append(rwFields, &Field{Expr: &VarRef{Val: ref.Val, Type: ref.Type}})\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase *Call:\n\t\t\t\t// Clone a template that we can modify and use for new fields.\n\t\t\t\ttemplate := CloneExpr(expr).(*Call)\n\n\t\t\t\t// Search for the call with a wildcard by continuously descending until\n\t\t\t\t// we no longer have a call.\n\t\t\t\tcall := template\n\t\t\t\tfor len(call.Args) > 0 {\n\t\t\t\t\targ, ok := call.Args[0].(*Call)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tcall = arg\n\t\t\t\t}\n\n\t\t\t\t// Check if this field value is a wildcard.\n\t\t\t\tif len(call.Args) == 0 {\n\t\t\t\t\trwFields = append(rwFields, f)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Retrieve if this is a wildcard or a regular expression.\n\t\t\t\tvar re *regexp.Regexp\n\t\t\t\tswitch expr := call.Args[0].(type) {\n\t\t\t\tcase *Wildcard:\n\t\t\t\t\tif expr.Type == TAG {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"unable to use tag wildcard in %s()\", call.Name)\n\t\t\t\t\t}\n\t\t\t\tcase *RegexLiteral:\n\t\t\t\t\tre = expr.Val\n\t\t\t\tdefault:\n\t\t\t\t\trwFields = append(rwFields, f)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// All types that can expand wildcards support float and integer.\n\t\t\t\tsupportedTypes := map[DataType]struct{}{\n\t\t\t\t\tFloat:   struct{}{},\n\t\t\t\t\tInteger: struct{}{},\n\t\t\t\t}\n\n\t\t\t\t// Add additional types for certain functions.\n\t\t\t\tswitch call.Name {\n\t\t\t\tcase \"count\", \"first\", \"last\", \"distinct\", \"elapsed\", \"mode\", \"sample\":\n\t\t\t\t\tsupportedTypes[String] = struct{}{}\n\t\t\t\t\tfallthrough\n\t\t\t\tcase \"min\", \"max\":\n\t\t\t\t\tsupportedTypes[Boolean] = struct{}{}\n\t\t\t\t}\n\n\t\t\t\tfor _, ref := range fields {\n\t\t\t\t\t// Do not expand tags within a function call. It likely won't do anything\n\t\t\t\t\t// anyway and will be the wrong thing in 99% of cases.\n\t\t\t\t\tif ref.Type == Tag {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if _, ok := supportedTypes[ref.Type]; !ok {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if re != nil && !re.MatchString(ref.Val) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\t// Make a new expression and replace the wildcard within this cloned expression.\n\t\t\t\t\tcall.Args[0] = &VarRef{Val: ref.Val, Type: ref.Type}\n\t\t\t\t\trwFields = append(rwFields, &Field{\n\t\t\t\t\t\tExpr:  CloneExpr(template),\n\t\t\t\t\t\tAlias: fmt.Sprintf(\"%s_%s\", f.Name(), ref.Val),\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\tcase *BinaryExpr:\n\t\t\t\t// Search for regexes or wildcards within the binary\n\t\t\t\t// expression. If we find any, throw an error indicating that\n\t\t\t\t// it's illegal.\n\t\t\t\tvar regex, wildcard bool\n\t\t\t\tWalkFunc(expr, func(n Node) {\n\t\t\t\t\tswitch n.(type) {\n\t\t\t\t\tcase *RegexLiteral:\n\t\t\t\t\t\tregex = true\n\t\t\t\t\tcase *Wildcard:\n\t\t\t\t\t\twildcard = true\n\t\t\t\t\t}\n\t\t\t\t})\n\n\t\t\t\tif wildcard {\n\t\t\t\t\treturn nil, fmt.Errorf(\"unsupported expression with wildcard: %s\", f.Expr)\n\t\t\t\t} else if regex {\n\t\t\t\t\treturn nil, fmt.Errorf(\"unsupported expression with regex field: %s\", f.Expr)\n\t\t\t\t}\n\t\t\t\trwFields = append(rwFields, f)\n\t\t\tdefault:\n\t\t\t\trwFields = append(rwFields, f)\n\t\t\t}\n\t\t}\n\t\tother.Fields = rwFields\n\t}\n\n\t// Rewrite all wildcard GROUP BY fields\n\tif hasDimensionWildcard {\n\t\t// Allocate a slice assuming there is exactly one wildcard for efficiency.\n\t\trwDimensions := make(Dimensions, 0, len(other.Dimensions)+len(dimensions)-1)\n\t\tfor _, d := range other.Dimensions {\n\t\t\tswitch expr := d.Expr.(type) {\n\t\t\tcase *Wildcard:\n\t\t\t\tfor _, name := range dimensions {\n\t\t\t\t\trwDimensions = append(rwDimensions, &Dimension{Expr: &VarRef{Val: name}})\n\t\t\t\t}\n\t\t\tcase *RegexLiteral:\n\t\t\t\tfor _, name := range dimensions {\n\t\t\t\t\tif expr.Val.MatchString(name) {\n\t\t\t\t\t\trwDimensions = append(rwDimensions, &Dimension{Expr: &VarRef{Val: name}})\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\trwDimensions = append(rwDimensions, d)\n\t\t\t}\n\t\t}\n\t\tother.Dimensions = rwDimensions\n\t}\n\n\treturn other, nil\n}\n\n// RewriteRegexConditions rewrites regex conditions to make better use of the\n// database index.\n//\n// Conditions that can currently be simplified are:\n//\n//     - host =~ /^foo$/ becomes host = 'foo'\n//     - host !~ /^foo$/ becomes host != 'foo'\n//\n// Note: if the regex contains groups, character classes, repetition or\n// similar, it's likely it won't be rewritten. In order to support rewriting\n// regexes with these characters would be a lot more work.\nfunc (s *SelectStatement) RewriteRegexConditions() {\n\ts.Condition = RewriteExpr(s.Condition, func(e Expr) Expr {\n\t\tbe, ok := e.(*BinaryExpr)\n\t\tif !ok || (be.Op != EQREGEX && be.Op != NEQREGEX) {\n\t\t\t// This expression is not a binary condition or doesn't have a\n\t\t\t// regex based operator.\n\t\t\treturn e\n\t\t}\n\n\t\t// Handle regex-based condition.\n\t\trhs := be.RHS.(*RegexLiteral) // This must be a regex.\n\n\t\tval, ok := matchExactRegex(rhs.Val.String())\n\t\tif !ok {\n\t\t\t// Regex didn't match.\n\t\t\treturn e\n\t\t}\n\n\t\t// Remove leading and trailing ^ and $.\n\t\tbe.RHS = &StringLiteral{Val: val}\n\n\t\t// Update the condition operator.\n\t\tif be.Op == EQREGEX {\n\t\t\tbe.Op = EQ\n\t\t} else {\n\t\t\tbe.Op = NEQ\n\t\t}\n\t\treturn be\n\t})\n}\n\n// matchExactRegex matches regexes that have the following form: /^foo$/. It\n// considers /^$/ to be a matching regex.\nfunc matchExactRegex(v string) (string, bool) {\n\tre, err := syntax.Parse(v, syntax.Perl)\n\tif err != nil {\n\t\t// Nothing we can do or log.\n\t\treturn \"\", false\n\t}\n\n\tif re.Op != syntax.OpConcat {\n\t\treturn \"\", false\n\t}\n\n\tif len(re.Sub) < 2 || len(re.Sub) > 3 {\n\t\t// Regex has too few or too many subexpressions.\n\t\treturn \"\", false\n\t}\n\n\tstart := re.Sub[0]\n\tif !(start.Op == syntax.OpBeginLine || start.Op == syntax.OpBeginText) {\n\t\t// Regex does not begin with ^\n\t\treturn \"\", false\n\t}\n\n\tend := re.Sub[len(re.Sub)-1]\n\tif !(end.Op == syntax.OpEndLine || end.Op == syntax.OpEndText) {\n\t\t// Regex does not end with $\n\t\treturn \"\", false\n\t}\n\n\tif len(re.Sub) == 3 {\n\t\tmiddle := re.Sub[1]\n\t\tif middle.Op != syntax.OpLiteral || middle.Flags^syntax.Perl != 0 {\n\t\t\t// Regex does not contain a literal op.\n\t\t\treturn \"\", false\n\t\t}\n\n\t\t// We can rewrite this regex.\n\t\treturn string(middle.Rune), true\n\t}\n\n\t// The regex /^$/\n\treturn \"\", true\n}\n\n// RewriteDistinct rewrites the expression to be a call for map/reduce to work correctly.\n// This method assumes all validation has passed.\nfunc (s *SelectStatement) RewriteDistinct() {\n\tWalkFunc(s.Fields, func(n Node) {\n\t\tswitch n := n.(type) {\n\t\tcase *Field:\n\t\t\tif expr, ok := n.Expr.(*Distinct); ok {\n\t\t\t\tn.Expr = expr.NewCall()\n\t\t\t\ts.IsRawQuery = false\n\t\t\t}\n\t\tcase *Call:\n\t\t\tfor i, arg := range n.Args {\n\t\t\t\tif arg, ok := arg.(*Distinct); ok {\n\t\t\t\t\tn.Args[i] = arg.NewCall()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n}\n\n// RewriteTimeFields removes any \"time\" field references.\nfunc (s *SelectStatement) RewriteTimeFields() {\n\tfor i := 0; i < len(s.Fields); i++ {\n\t\tswitch expr := s.Fields[i].Expr.(type) {\n\t\tcase *VarRef:\n\t\t\tif expr.Val == \"time\" {\n\t\t\t\ts.TimeAlias = s.Fields[i].Alias\n\t\t\t\ts.Fields = append(s.Fields[:i], s.Fields[i+1:]...)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// RewriteTimeCondition adds time constraints to aggregate queries.\nfunc (s *SelectStatement) RewriteTimeCondition(now time.Time) error {\n\tinterval, err := s.GroupByInterval()\n\tif err != nil {\n\t\treturn err\n\t} else if interval > 0 && s.Condition != nil {\n\t\t_, tmax, err := TimeRange(s.Condition, s.Location)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif tmax.IsZero() {\n\t\t\ts.Condition = &BinaryExpr{\n\t\t\t\tOp:  AND,\n\t\t\t\tLHS: s.Condition,\n\t\t\t\tRHS: &BinaryExpr{\n\t\t\t\t\tOp:  LTE,\n\t\t\t\t\tLHS: &VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &TimeLiteral{Val: now},\n\t\t\t\t},\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, source := range s.Sources {\n\t\tswitch source := source.(type) {\n\t\tcase *SubQuery:\n\t\t\tif err := source.Statement.RewriteTimeCondition(now); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n// ColumnNames will walk all fields and functions and return the appropriate field names for the select statement\n// while maintaining order of the field names.\nfunc (s *SelectStatement) ColumnNames() []string {\n\t// First walk each field to determine the number of columns.\n\tcolumnFields := Fields{}\n\tfor _, field := range s.Fields {\n\t\tcolumnFields = append(columnFields, field)\n\n\t\tswitch f := field.Expr.(type) {\n\t\tcase *Call:\n\t\t\tif s.Target == nil && (f.Name == \"top\" || f.Name == \"bottom\") {\n\t\t\t\tfor _, arg := range f.Args[1:] {\n\t\t\t\t\tref, ok := arg.(*VarRef)\n\t\t\t\t\tif ok {\n\t\t\t\t\t\tcolumnFields = append(columnFields, &Field{Expr: ref})\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Determine if we should add an extra column for an implicit time.\n\toffset := 0\n\tif !s.OmitTime {\n\t\toffset++\n\t}\n\n\tcolumnNames := make([]string, len(columnFields)+offset)\n\tif !s.OmitTime {\n\t\t// Add the implicit time if requested.\n\t\tcolumnNames[0] = s.TimeFieldName()\n\t}\n\n\t// Keep track of the encountered column names.\n\tnames := make(map[string]int)\n\n\t// Resolve aliases first.\n\tfor i, col := range columnFields {\n\t\tif col.Alias != \"\" {\n\t\t\tcolumnNames[i+offset] = col.Alias\n\t\t\tnames[col.Alias] = 1\n\t\t}\n\t}\n\n\t// Resolve any generated names and resolve conflicts.\n\tfor i, col := range columnFields {\n\t\tif columnNames[i+offset] != \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tname := col.Name()\n\t\tcount, conflict := names[name]\n\t\tif conflict {\n\t\t\tfor {\n\t\t\t\tresolvedName := fmt.Sprintf(\"%s_%d\", name, count)\n\t\t\t\t_, conflict = names[resolvedName]\n\t\t\t\tif !conflict {\n\t\t\t\t\tnames[name] = count + 1\n\t\t\t\t\tname = resolvedName\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tcount++\n\t\t\t}\n\t\t}\n\t\tnames[name]++\n\t\tcolumnNames[i+offset] = name\n\t}\n\treturn columnNames\n}\n\n// FieldExprByName returns the expression that matches the field name and the\n// index where this was found. If the name matches one of the arguments to\n// \"top\" or \"bottom\", the variable reference inside of the function is returned\n// and the index is of the function call rather than the variable reference.\n// If no expression is found, -1 is returned for the index and the expression\n// will be nil.\nfunc (s *SelectStatement) FieldExprByName(name string) (int, Expr) {\n\tfor i, f := range s.Fields {\n\t\tif f.Name() == name {\n\t\t\treturn i, f.Expr\n\t\t} else if call, ok := f.Expr.(*Call); ok && (call.Name == \"top\" || call.Name == \"bottom\") && len(call.Args) > 2 {\n\t\t\tfor _, arg := range call.Args[1 : len(call.Args)-1] {\n\t\t\t\tif arg, ok := arg.(*VarRef); ok && arg.Val == name {\n\t\t\t\t\treturn i, arg\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn -1, nil\n}\n\n// Reduce calls the Reduce function on the different components of the\n// SelectStatement to reduce the statement.\nfunc (s *SelectStatement) Reduce(valuer Valuer) *SelectStatement {\n\tstmt := s.Clone()\n\tstmt.Condition = Reduce(stmt.Condition, valuer)\n\tfor _, d := range stmt.Dimensions {\n\t\td.Expr = Reduce(d.Expr, valuer)\n\t}\n\n\tfor _, source := range stmt.Sources {\n\t\tswitch source := source.(type) {\n\t\tcase *SubQuery:\n\t\t\tsource.Statement = source.Statement.Reduce(valuer)\n\t\t}\n\t}\n\treturn stmt\n}\n\n// HasTimeFieldSpecified will walk all fields and determine if the user explicitly asked for time.\n// This is needed to determine re-write behaviors for functions like TOP and BOTTOM.\nfunc (s *SelectStatement) HasTimeFieldSpecified() bool {\n\tfor _, f := range s.Fields {\n\t\tif f.Name() == \"time\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// String returns a string representation of the select statement.\nfunc (s *SelectStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"SELECT \")\n\t_, _ = buf.WriteString(s.Fields.String())\n\n\tif s.Target != nil {\n\t\t_, _ = buf.WriteString(\" \")\n\t\t_, _ = buf.WriteString(s.Target.String())\n\t}\n\tif len(s.Sources) > 0 {\n\t\t_, _ = buf.WriteString(\" FROM \")\n\t\t_, _ = buf.WriteString(s.Sources.String())\n\t}\n\tif s.Condition != nil {\n\t\t_, _ = buf.WriteString(\" WHERE \")\n\t\t_, _ = buf.WriteString(s.Condition.String())\n\t}\n\tif len(s.Dimensions) > 0 {\n\t\t_, _ = buf.WriteString(\" GROUP BY \")\n\t\t_, _ = buf.WriteString(s.Dimensions.String())\n\t}\n\tswitch s.Fill {\n\tcase NoFill:\n\t\t_, _ = buf.WriteString(\" fill(none)\")\n\tcase NumberFill:\n\t\t_, _ = buf.WriteString(fmt.Sprintf(\" fill(%v)\", s.FillValue))\n\tcase LinearFill:\n\t\t_, _ = buf.WriteString(\" fill(linear)\")\n\tcase PreviousFill:\n\t\t_, _ = buf.WriteString(\" fill(previous)\")\n\t}\n\tif len(s.SortFields) > 0 {\n\t\t_, _ = buf.WriteString(\" ORDER BY \")\n\t\t_, _ = buf.WriteString(s.SortFields.String())\n\t}\n\tif s.Limit > 0 {\n\t\t_, _ = fmt.Fprintf(&buf, \" LIMIT %d\", s.Limit)\n\t}\n\tif s.Offset > 0 {\n\t\t_, _ = buf.WriteString(\" OFFSET \")\n\t\t_, _ = buf.WriteString(strconv.Itoa(s.Offset))\n\t}\n\tif s.SLimit > 0 {\n\t\t_, _ = fmt.Fprintf(&buf, \" SLIMIT %d\", s.SLimit)\n\t}\n\tif s.SOffset > 0 {\n\t\t_, _ = fmt.Fprintf(&buf, \" SOFFSET %d\", s.SOffset)\n\t}\n\tif s.Location != nil {\n\t\t_, _ = fmt.Fprintf(&buf, ` TZ('%s')`, s.Location)\n\t}\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute the SelectStatement.\n// NOTE: Statement should be normalized first (database name(s) in Sources and\n// Target should be populated). If the statement has not been normalized, an\n// empty string will be returned for the database name and it is up to the caller\n// to interpret that as the default database.\nfunc (s *SelectStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\tep := ExecutionPrivileges{}\n\tfor _, source := range s.Sources {\n\t\tswitch source := source.(type) {\n\t\tcase *Measurement:\n\t\t\tep = append(ep, ExecutionPrivilege{\n\t\t\t\tName:      source.Database,\n\t\t\t\tPrivilege: ReadPrivilege,\n\t\t\t})\n\t\tcase *SubQuery:\n\t\t\tprivs, err := source.Statement.RequiredPrivileges()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tep = append(ep, privs...)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"invalid source: %s\", source)\n\t\t}\n\t}\n\n\tif s.Target != nil {\n\t\tp := ExecutionPrivilege{Admin: false, Name: s.Target.Measurement.Database, Privilege: WritePrivilege}\n\t\tep = append(ep, p)\n\t}\n\treturn ep, nil\n}\n\n// HasWildcard returns whether or not the select statement has at least 1 wildcard.\nfunc (s *SelectStatement) HasWildcard() bool {\n\treturn s.HasFieldWildcard() || s.HasDimensionWildcard()\n}\n\n// HasFieldWildcard returns whether or not the select statement has at least 1 wildcard in the fields.\nfunc (s *SelectStatement) HasFieldWildcard() (hasWildcard bool) {\n\tWalkFunc(s.Fields, func(n Node) {\n\t\tif hasWildcard {\n\t\t\treturn\n\t\t}\n\t\tswitch n.(type) {\n\t\tcase *Wildcard, *RegexLiteral:\n\t\t\thasWildcard = true\n\t\t}\n\t})\n\treturn hasWildcard\n}\n\n// HasDimensionWildcard returns whether or not the select statement has\n// at least 1 wildcard in the dimensions aka `GROUP BY`.\nfunc (s *SelectStatement) HasDimensionWildcard() bool {\n\tfor _, d := range s.Dimensions {\n\t\tswitch d.Expr.(type) {\n\t\tcase *Wildcard, *RegexLiteral:\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (s *SelectStatement) validate(tr targetRequirement) error {\n\tif err := s.validateFields(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := s.validateDimensions(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := s.validateDistinct(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := s.validateTopBottom(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := s.validateAggregates(tr); err != nil {\n\t\treturn err\n\t}\n\n\tif err := s.validateFill(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *SelectStatement) validateFields() error {\n\tns := s.NamesInSelect()\n\tif len(ns) == 1 && ns[0] == \"time\" {\n\t\treturn fmt.Errorf(\"at least 1 non-time field must be queried\")\n\t}\n\n\tfor _, f := range s.Fields {\n\t\tswitch expr := f.Expr.(type) {\n\t\tcase *BinaryExpr:\n\t\t\tif err := expr.validate(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *SelectStatement) validateDimensions() error {\n\tvar dur time.Duration\n\tfor _, dim := range s.Dimensions {\n\t\tswitch expr := dim.Expr.(type) {\n\t\tcase *Call:\n\t\t\t// Ensure the call is time() and it has one or two duration arguments.\n\t\t\t// If we already have a duration\n\t\t\tif expr.Name != \"time\" {\n\t\t\t\treturn errors.New(\"only time() calls allowed in dimensions\")\n\t\t\t} else if got := len(expr.Args); got < 1 || got > 2 {\n\t\t\t\treturn errors.New(\"time dimension expected 1 or 2 arguments\")\n\t\t\t} else if lit, ok := expr.Args[0].(*DurationLiteral); !ok {\n\t\t\t\treturn errors.New(\"time dimension must have duration argument\")\n\t\t\t} else if dur != 0 {\n\t\t\t\treturn errors.New(\"multiple time dimensions not allowed\")\n\t\t\t} else {\n\t\t\t\tdur = lit.Val\n\t\t\t\tif len(expr.Args) == 2 {\n\t\t\t\t\tswitch lit := expr.Args[1].(type) {\n\t\t\t\t\tcase *DurationLiteral:\n\t\t\t\t\t\t// noop\n\t\t\t\t\tcase *Call:\n\t\t\t\t\t\tif lit.Name != \"now\" {\n\t\t\t\t\t\t\treturn errors.New(\"time dimension offset function must be now()\")\n\t\t\t\t\t\t} else if len(lit.Args) != 0 {\n\t\t\t\t\t\t\treturn errors.New(\"time dimension offset now() function requires no arguments\")\n\t\t\t\t\t\t}\n\t\t\t\t\tdefault:\n\t\t\t\t\t\treturn errors.New(\"time dimension offset must be duration or now()\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase *VarRef:\n\t\t\tif strings.ToLower(expr.Val) == \"time\" {\n\t\t\t\treturn errors.New(\"time() is a function and expects at least one argument\")\n\t\t\t}\n\t\tcase *Wildcard:\n\t\tcase *RegexLiteral:\n\t\tdefault:\n\t\t\treturn errors.New(\"only time and tag dimensions allowed\")\n\t\t}\n\t}\n\treturn nil\n}\n\n// validSelectWithAggregate determines if a SELECT statement has the correct\n// combination of aggregate functions combined with selected fields and tags\n// Currently we don't have support for all aggregates, but aggregates that\n// can be combined with fields/tags are:\n//  TOP, BOTTOM, MAX, MIN, FIRST, LAST\nfunc (s *SelectStatement) validSelectWithAggregate() error {\n\tcalls := map[string]struct{}{}\n\tnumAggregates := 0\n\tfor _, f := range s.Fields {\n\t\tfieldCalls := walkFunctionCalls(f.Expr)\n\t\tfor _, c := range fieldCalls {\n\t\t\tcalls[c.Name] = struct{}{}\n\t\t}\n\t\tif len(fieldCalls) != 0 {\n\t\t\tnumAggregates++\n\t\t}\n\t}\n\t// For TOP, BOTTOM, MAX, MIN, FIRST, LAST, PERCENTILE (selector functions) it is ok to ask for fields and tags\n\t// but only if one function is specified.  Combining multiple functions and fields and tags is not currently supported\n\tonlySelectors := true\n\tfor k := range calls {\n\t\tswitch k {\n\t\tcase \"top\", \"bottom\", \"max\", \"min\", \"first\", \"last\", \"percentile\", \"sample\":\n\t\tdefault:\n\t\t\tonlySelectors = false\n\t\t\tbreak\n\t\t}\n\t}\n\tif onlySelectors {\n\t\t// If they only have one selector, they can have as many fields or tags as they want\n\t\tif numAggregates == 1 {\n\t\t\treturn nil\n\t\t}\n\t\t// If they have multiple selectors, they are not allowed to have any other fields or tags specified\n\t\tif numAggregates > 1 && len(s.Fields) != numAggregates {\n\t\t\treturn fmt.Errorf(\"mixing multiple selector functions with tags or fields is not supported\")\n\t\t}\n\t}\n\n\tif numAggregates != 0 && numAggregates != len(s.Fields) {\n\t\treturn fmt.Errorf(\"mixing aggregate and non-aggregate queries is not supported\")\n\t}\n\treturn nil\n}\n\n// validTopBottomAggr determines if TOP or BOTTOM aggregates have valid arguments.\nfunc (s *SelectStatement) validTopBottomAggr(expr *Call) error {\n\tif exp, got := 2, len(expr.Args); got < exp {\n\t\treturn fmt.Errorf(\"invalid number of arguments for %s, expected at least %d, got %d\", expr.Name, exp, got)\n\t}\n\tif len(expr.Args) > 1 {\n\t\tcallLimit, ok := expr.Args[len(expr.Args)-1].(*IntegerLiteral)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"expected integer as last argument in %s(), found %s\", expr.Name, expr.Args[len(expr.Args)-1])\n\t\t}\n\t\t// Check if they asked for a limit smaller than what they passed into the call\n\t\tif int64(callLimit.Val) > int64(s.Limit) && s.Limit != 0 {\n\t\t\treturn fmt.Errorf(\"limit (%d) in %s function can not be larger than the LIMIT (%d) in the select statement\", int64(callLimit.Val), expr.Name, int64(s.Limit))\n\t\t}\n\n\t\tfor _, v := range expr.Args[:len(expr.Args)-1] {\n\t\t\tif _, ok := v.(*VarRef); !ok {\n\t\t\t\treturn fmt.Errorf(\"only fields or tags are allowed in %s(), found %s\", expr.Name, v)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n// validPercentileAggr determines if the call to PERCENTILE has valid arguments.\nfunc (s *SelectStatement) validPercentileAggr(expr *Call) error {\n\tif err := s.validSelectWithAggregate(); err != nil {\n\t\treturn err\n\t}\n\tif exp, got := 2, len(expr.Args); got != exp {\n\t\treturn fmt.Errorf(\"invalid number of arguments for %s, expected %d, got %d\", expr.Name, exp, got)\n\t}\n\n\tswitch expr.Args[0].(type) {\n\tcase *VarRef, *RegexLiteral, *Wildcard:\n\t\t// do nothing\n\tdefault:\n\t\treturn fmt.Errorf(\"expected field argument in percentile()\")\n\t}\n\n\tswitch expr.Args[1].(type) {\n\tcase *IntegerLiteral, *NumberLiteral:\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"expected float argument in percentile()\")\n\t}\n}\n\n// validPercentileAggr determines if the call to SAMPLE has valid arguments.\nfunc (s *SelectStatement) validSampleAggr(expr *Call) error {\n\tif err := s.validSelectWithAggregate(); err != nil {\n\t\treturn err\n\t}\n\tif exp, got := 2, len(expr.Args); got != exp {\n\t\treturn fmt.Errorf(\"invalid number of arguments for %s, expected %d, got %d\", expr.Name, exp, got)\n\t}\n\n\tswitch expr.Args[0].(type) {\n\tcase *VarRef, *RegexLiteral, *Wildcard:\n\t\t// do nothing\n\tdefault:\n\t\treturn fmt.Errorf(\"expected field argument in sample()\")\n\t}\n\n\tswitch expr.Args[1].(type) {\n\tcase *IntegerLiteral:\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"expected integer argument in sample()\")\n\t}\n}\n\nfunc (s *SelectStatement) validateAggregates(tr targetRequirement) error {\n\tfor _, f := range s.Fields {\n\t\tfor _, expr := range walkFunctionCalls(f.Expr) {\n\t\t\tswitch expr.Name {\n\t\t\tcase \"derivative\", \"non_negative_derivative\", \"difference\", \"non_negative_difference\", \"moving_average\", \"cumulative_sum\", \"elapsed\":\n\t\t\t\tif err := s.validSelectWithAggregate(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tswitch expr.Name {\n\t\t\t\tcase \"derivative\", \"non_negative_derivative\", \"elapsed\":\n\t\t\t\t\tif min, max, got := 1, 2, len(expr.Args); got > max || got < min {\n\t\t\t\t\t\treturn fmt.Errorf(\"invalid number of arguments for %s, expected at least %d but no more than %d, got %d\", expr.Name, min, max, got)\n\t\t\t\t\t}\n\t\t\t\t\t// If a duration arg is passed, make sure it's a duration\n\t\t\t\t\tif len(expr.Args) == 2 {\n\t\t\t\t\t\t// Second must be a duration .e.g (1h)\n\t\t\t\t\t\tif _, ok := expr.Args[1].(*DurationLiteral); !ok {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"second argument to %s must be a duration, got %T\", expr.Name, expr.Args[1])\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase \"difference\", \"non_negative_difference\", \"cumulative_sum\":\n\t\t\t\t\tif got := len(expr.Args); got != 1 {\n\t\t\t\t\t\treturn fmt.Errorf(\"invalid number of arguments for %s, expected 1, got %d\", expr.Name, got)\n\t\t\t\t\t}\n\t\t\t\tcase \"moving_average\":\n\t\t\t\t\tif got := len(expr.Args); got != 2 {\n\t\t\t\t\t\treturn fmt.Errorf(\"invalid number of arguments for moving_average, expected 2, got %d\", got)\n\t\t\t\t\t}\n\n\t\t\t\t\tif lit, ok := expr.Args[1].(*IntegerLiteral); !ok {\n\t\t\t\t\t\treturn fmt.Errorf(\"second argument for moving_average must be an integer, got %T\", expr.Args[1])\n\t\t\t\t\t} else if lit.Val <= 1 {\n\t\t\t\t\t\treturn fmt.Errorf(\"moving_average window must be greater than 1, got %d\", lit.Val)\n\t\t\t\t\t} else if int64(int(lit.Val)) != lit.Val {\n\t\t\t\t\t\treturn fmt.Errorf(\"moving_average window too large, got %d\", lit.Val)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Validate that if they have grouping by time, they need a sub-call like min/max, etc.\n\t\t\t\tgroupByInterval, err := s.GroupByInterval()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"invalid group interval: %v\", err)\n\t\t\t\t}\n\n\t\t\t\tif c, ok := expr.Args[0].(*Call); ok && groupByInterval == 0 && tr != targetSubquery {\n\t\t\t\t\treturn fmt.Errorf(\"%s aggregate requires a GROUP BY interval\", expr.Name)\n\t\t\t\t} else if !ok && groupByInterval > 0 {\n\t\t\t\t\treturn fmt.Errorf(\"aggregate function required inside the call to %s\", expr.Name)\n\t\t\t\t} else if ok {\n\t\t\t\t\tswitch c.Name {\n\t\t\t\t\tcase \"top\", \"bottom\":\n\t\t\t\t\t\tif err := s.validTopBottomAggr(c); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\tcase \"percentile\":\n\t\t\t\t\t\tif err := s.validPercentileAggr(c); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tif exp, got := 1, len(c.Args); got != exp {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"invalid number of arguments for %s, expected %d, got %d\", c.Name, exp, got)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tswitch fc := c.Args[0].(type) {\n\t\t\t\t\t\tcase *VarRef, *Wildcard, *RegexLiteral:\n\t\t\t\t\t\t\t// do nothing\n\t\t\t\t\t\tcase *Call:\n\t\t\t\t\t\t\tif fc.Name != \"distinct\" || expr.Name != \"count\" {\n\t\t\t\t\t\t\t\treturn fmt.Errorf(\"expected field argument in %s()\", c.Name)\n\t\t\t\t\t\t\t} else if exp, got := 1, len(fc.Args); got != exp {\n\t\t\t\t\t\t\t\treturn fmt.Errorf(\"count(distinct %s) can only have %d argument(s), got %d\", fc.Name, exp, got)\n\t\t\t\t\t\t\t} else if _, ok := fc.Args[0].(*VarRef); !ok {\n\t\t\t\t\t\t\t\treturn fmt.Errorf(\"expected field argument in distinct()\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tcase *Distinct:\n\t\t\t\t\t\t\tif expr.Name != \"count\" {\n\t\t\t\t\t\t\t\treturn fmt.Errorf(\"expected field argument in %s()\", c.Name)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\treturn fmt.Errorf(\"expected field argument in %s()\", c.Name)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase \"top\", \"bottom\":\n\t\t\t\tif err := s.validTopBottomAggr(expr); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tcase \"percentile\":\n\t\t\t\tif err := s.validPercentileAggr(expr); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tcase \"sample\":\n\t\t\t\tif err := s.validSampleAggr(expr); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tcase \"integral\":\n\t\t\t\tif err := s.validSelectWithAggregate(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif min, max, got := 1, 2, len(expr.Args); got > max || got < min {\n\t\t\t\t\treturn fmt.Errorf(\"invalid number of arguments for %s, expected at least %d but no more than %d, got %d\", expr.Name, min, max, got)\n\t\t\t\t}\n\t\t\t\t// If a duration arg is passed, make sure it's a duration\n\t\t\t\tif len(expr.Args) == 2 {\n\t\t\t\t\t// Second must be a duration .e.g (1h)\n\t\t\t\t\tif _, ok := expr.Args[1].(*DurationLiteral); !ok {\n\t\t\t\t\t\treturn errors.New(\"second argument must be a duration\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase \"holt_winters\", \"holt_winters_with_fit\":\n\t\t\t\tif exp, got := 3, len(expr.Args); got != exp {\n\t\t\t\t\treturn fmt.Errorf(\"invalid number of arguments for %s, expected %d, got %d\", expr.Name, exp, got)\n\t\t\t\t}\n\t\t\t\t// Validate that if they have grouping by time, they need a sub-call like min/max, etc.\n\t\t\t\tgroupByInterval, err := s.GroupByInterval()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"invalid group interval: %v\", err)\n\t\t\t\t}\n\n\t\t\t\tif _, ok := expr.Args[0].(*Call); ok && groupByInterval == 0 && tr != targetSubquery {\n\t\t\t\t\treturn fmt.Errorf(\"%s aggregate requires a GROUP BY interval\", expr.Name)\n\t\t\t\t} else if !ok {\n\t\t\t\t\treturn fmt.Errorf(\"must use aggregate function with %s\", expr.Name)\n\t\t\t\t}\n\t\t\t\tif arg, ok := expr.Args[1].(*IntegerLiteral); !ok {\n\t\t\t\t\treturn fmt.Errorf(\"expected integer argument as second arg in %s\", expr.Name)\n\t\t\t\t} else if arg.Val <= 0 {\n\t\t\t\t\treturn fmt.Errorf(\"second arg to %s must be greater than 0, got %d\", expr.Name, arg.Val)\n\t\t\t\t}\n\t\t\t\tif _, ok := expr.Args[2].(*IntegerLiteral); !ok {\n\t\t\t\t\treturn fmt.Errorf(\"expected integer argument as third arg in %s\", expr.Name)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tif err := s.validSelectWithAggregate(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif exp, got := 1, len(expr.Args); got != exp {\n\t\t\t\t\t// Special error message if distinct was used as the argument.\n\t\t\t\t\tif expr.Name == \"count\" && got >= 1 {\n\t\t\t\t\t\tif _, ok := expr.Args[0].(*Distinct); ok {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"count(distinct <field>) can only have one argument\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn fmt.Errorf(\"invalid number of arguments for %s, expected %d, got %d\", expr.Name, exp, got)\n\t\t\t\t}\n\t\t\t\tswitch fc := expr.Args[0].(type) {\n\t\t\t\tcase *VarRef, *Wildcard, *RegexLiteral:\n\t\t\t\t\t// do nothing\n\t\t\t\tcase *Call:\n\t\t\t\t\tif fc.Name != \"distinct\" || expr.Name != \"count\" {\n\t\t\t\t\t\treturn fmt.Errorf(\"expected field argument in %s()\", expr.Name)\n\t\t\t\t\t} else if exp, got := 1, len(fc.Args); got != exp {\n\t\t\t\t\t\treturn fmt.Errorf(\"count(distinct <field>) can only have one argument\")\n\t\t\t\t\t} else if _, ok := fc.Args[0].(*VarRef); !ok {\n\t\t\t\t\t\treturn fmt.Errorf(\"expected field argument in distinct()\")\n\t\t\t\t\t}\n\t\t\t\tcase *Distinct:\n\t\t\t\t\tif expr.Name != \"count\" {\n\t\t\t\t\t\treturn fmt.Errorf(\"expected field argument in %s()\", expr.Name)\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\treturn fmt.Errorf(\"expected field argument in %s()\", expr.Name)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check that we have valid duration and where clauses for aggregates\n\n\t// fetch the group by duration\n\tgroupByDuration, _ := s.GroupByInterval()\n\n\t// If we have a group by interval, but no aggregate function, it's an invalid statement\n\tif s.IsRawQuery && groupByDuration > 0 {\n\t\treturn fmt.Errorf(\"GROUP BY requires at least one aggregate function\")\n\t}\n\n\t// If we have an aggregate function with a group by time without a where clause, it's an invalid statement\n\tif tr == targetNotRequired { // ignore create continuous query statements\n\t\tif err := s.validateTimeExpression(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif tr != targetSubquery {\n\t\tif err := s.validateGroupByInterval(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// validateFill ensures that the fill option matches the query type.\nfunc (s *SelectStatement) validateFill() error {\n\tinfo := newSelectInfo(s)\n\tif len(info.calls) == 0 {\n\t\tswitch s.Fill {\n\t\tcase NoFill:\n\t\t\treturn errors.New(\"fill(none) must be used with a function\")\n\t\tcase LinearFill:\n\t\t\treturn errors.New(\"fill(linear) must be used with a function\")\n\t\t}\n\t}\n\treturn nil\n}\n\n// validateTimeExpression ensures that any select statements that have a group\n// by interval either have a time expression limiting the time range or have a\n// parent query that does that.\nfunc (s *SelectStatement) validateTimeExpression() error {\n\t// If we have a time expression, we and all subqueries are fine.\n\tif HasTimeExpr(s.Condition) {\n\t\treturn nil\n\t}\n\n\t// Check if this is not a raw query and if the group by duration exists.\n\t// If these are true, then we have an error.\n\tinterval, err := s.GroupByInterval()\n\tif err != nil {\n\t\treturn err\n\t} else if !s.IsRawQuery && interval > 0 {\n\t\treturn fmt.Errorf(\"aggregate functions with GROUP BY time require a WHERE time clause\")\n\t}\n\n\t// Validate the subqueries. If we have a time expression in this select\n\t// statement, we don't need to do this because parent time ranges propagate\n\t// to children. So we only execute this when there is no time condition in\n\t// the parent.\n\tfor _, source := range s.Sources {\n\t\tswitch source := source.(type) {\n\t\tcase *SubQuery:\n\t\t\tif err := source.Statement.validateTimeExpression(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n// validateGroupByInterval ensures that a select statement is grouped by an\n// interval if it contains certain functions.\nfunc (s *SelectStatement) validateGroupByInterval() error {\n\tinterval, err := s.GroupByInterval()\n\tif err != nil {\n\t\treturn err\n\t} else if interval > 0 {\n\t\t// If we have an interval here, that means the interval will propagate\n\t\t// into any subqueries and we can just stop looking.\n\t\treturn nil\n\t}\n\n\t// Check inside of the fields for any of the specific functions that ned a group by interval.\n\tfor _, f := range s.Fields {\n\t\tswitch expr := f.Expr.(type) {\n\t\tcase *Call:\n\t\t\tswitch expr.Name {\n\t\t\tcase \"derivative\", \"non_negative_derivative\", \"difference\", \"non_negative_difference\", \"moving_average\", \"cumulative_sum\", \"elapsed\", \"holt_winters\", \"holt_winters_with_fit\":\n\t\t\t\t// If the first argument is a call, we needed a group by interval and we don't have one.\n\t\t\t\tif _, ok := expr.Args[0].(*Call); ok {\n\t\t\t\t\treturn fmt.Errorf(\"%s aggregate requires a GROUP BY interval\", expr.Name)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Validate the subqueries.\n\tfor _, source := range s.Sources {\n\t\tswitch source := source.(type) {\n\t\tcase *SubQuery:\n\t\t\tif err := source.Statement.validateGroupByInterval(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n// HasDistinct checks if a select statement contains a call to DISTINCT.\nfunc (s *SelectStatement) HasDistinct() bool {\n\tfor _, f := range s.Fields {\n\t\tswitch c := f.Expr.(type) {\n\t\tcase *Call:\n\t\t\tif c.Name == \"distinct\" {\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase *Distinct:\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (s *SelectStatement) validateDistinct() error {\n\tif !s.HasDistinct() {\n\t\treturn nil\n\t}\n\n\tif len(s.Fields) > 1 {\n\t\treturn fmt.Errorf(\"aggregate function distinct() cannot be combined with other functions or fields\")\n\t}\n\n\tswitch c := s.Fields[0].Expr.(type) {\n\tcase *Call:\n\t\tif len(c.Args) == 0 {\n\t\t\treturn fmt.Errorf(\"distinct function requires at least one argument\")\n\t\t}\n\n\t\tif len(c.Args) != 1 {\n\t\t\treturn fmt.Errorf(\"distinct function can only have one argument\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *SelectStatement) validateTopBottom() error {\n\t// Ensure there are not multiple calls if top/bottom is present.\n\tinfo := newSelectInfo(s)\n\tif len(info.calls) > 1 {\n\t\tfor call := range info.calls {\n\t\t\tif call.Name == \"top\" || call.Name == \"bottom\" {\n\t\t\t\treturn fmt.Errorf(\"selector function %s() cannot be combined with other functions\", call.Name)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n// GroupByInterval extracts the time interval, if specified.\nfunc (s *SelectStatement) GroupByInterval() (time.Duration, error) {\n\t// return if we've already pulled it out\n\tif s.groupByInterval != 0 {\n\t\treturn s.groupByInterval, nil\n\t}\n\n\t// Ignore if there are no dimensions.\n\tif len(s.Dimensions) == 0 {\n\t\treturn 0, nil\n\t}\n\n\tfor _, d := range s.Dimensions {\n\t\tif call, ok := d.Expr.(*Call); ok && call.Name == \"time\" {\n\t\t\t// Make sure there is exactly one argument.\n\t\t\tif got := len(call.Args); got < 1 || got > 2 {\n\t\t\t\treturn 0, errors.New(\"time dimension expected 1 or 2 arguments\")\n\t\t\t}\n\n\t\t\t// Ensure the argument is a duration.\n\t\t\tlit, ok := call.Args[0].(*DurationLiteral)\n\t\t\tif !ok {\n\t\t\t\treturn 0, errors.New(\"time dimension must have duration argument\")\n\t\t\t}\n\t\t\ts.groupByInterval = lit.Val\n\t\t\treturn lit.Val, nil\n\t\t}\n\t}\n\treturn 0, nil\n}\n\n// GroupByOffset extracts the time interval offset, if specified.\nfunc (s *SelectStatement) GroupByOffset() (time.Duration, error) {\n\tinterval, err := s.GroupByInterval()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// Ignore if there are no dimensions.\n\tif len(s.Dimensions) == 0 {\n\t\treturn 0, nil\n\t}\n\n\tfor _, d := range s.Dimensions {\n\t\tif call, ok := d.Expr.(*Call); ok && call.Name == \"time\" {\n\t\t\tif len(call.Args) == 2 {\n\t\t\t\tswitch expr := call.Args[1].(type) {\n\t\t\t\tcase *DurationLiteral:\n\t\t\t\t\treturn expr.Val % interval, nil\n\t\t\t\tcase *TimeLiteral:\n\t\t\t\t\treturn expr.Val.Sub(expr.Val.Truncate(interval)), nil\n\t\t\t\tdefault:\n\t\t\t\t\treturn 0, fmt.Errorf(\"invalid time dimension offset: %s\", expr)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn 0, nil\n\t\t}\n\t}\n\treturn 0, nil\n}\n\n// SetTimeRange sets the start and end time of the select statement to [start, end). i.e. start inclusive, end exclusive.\n// This is used commonly for continuous queries so the start and end are in buckets.\nfunc (s *SelectStatement) SetTimeRange(start, end time.Time) error {\n\tcond := fmt.Sprintf(\"time >= '%s' AND time < '%s'\", start.UTC().Format(time.RFC3339Nano), end.UTC().Format(time.RFC3339Nano))\n\tif s.Condition != nil {\n\t\tcond = fmt.Sprintf(\"%s AND %s\", s.rewriteWithoutTimeDimensions(), cond)\n\t}\n\n\texpr, err := NewParser(strings.NewReader(cond)).ParseExpr()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Fold out any previously replaced time dimensions and set the condition.\n\ts.Condition = Reduce(expr, nil)\n\n\treturn nil\n}\n\n// rewriteWithoutTimeDimensions will remove any WHERE time... clauses from the select statement.\n// This is necessary when setting an explicit time range to override any that previously existed.\nfunc (s *SelectStatement) rewriteWithoutTimeDimensions() string {\n\tn := RewriteFunc(s.Condition, func(n Node) Node {\n\t\tswitch n := n.(type) {\n\t\tcase *BinaryExpr:\n\t\t\tif n.LHS.String() == \"time\" {\n\t\t\t\treturn &BooleanLiteral{Val: true}\n\t\t\t}\n\t\t\treturn n\n\t\tcase *Call:\n\t\t\treturn &BooleanLiteral{Val: true}\n\t\tdefault:\n\t\t\treturn n\n\t\t}\n\t})\n\n\treturn n.String()\n}\n\n// NamesInWhere returns the field and tag names (idents) referenced in the where clause.\nfunc (s *SelectStatement) NamesInWhere() []string {\n\tvar a []string\n\tif s.Condition != nil {\n\t\ta = walkNames(s.Condition)\n\t}\n\treturn a\n}\n\n// NamesInSelect returns the field and tag names (idents) in the select clause.\nfunc (s *SelectStatement) NamesInSelect() []string {\n\tvar a []string\n\n\tfor _, f := range s.Fields {\n\t\ta = append(a, walkNames(f.Expr)...)\n\t}\n\n\treturn a\n}\n\n// NamesInDimension returns the field and tag names (idents) in the group by clause.\nfunc (s *SelectStatement) NamesInDimension() []string {\n\tvar a []string\n\n\tfor _, d := range s.Dimensions {\n\t\ta = append(a, walkNames(d.Expr)...)\n\t}\n\n\treturn a\n}\n\n// LimitTagSets returns a tag set list with SLIMIT and SOFFSET applied.\nfunc LimitTagSets(a []*TagSet, slimit, soffset int) []*TagSet {\n\t// Ignore if no limit or offset is specified.\n\tif slimit == 0 && soffset == 0 {\n\t\treturn a\n\t}\n\n\t// If offset is beyond the number of tag sets then return nil.\n\tif soffset > len(a) {\n\t\treturn nil\n\t}\n\n\t// Clamp limit to the max number of tag sets.\n\tif soffset+slimit > len(a) {\n\t\tslimit = len(a) - soffset\n\t}\n\treturn a[soffset : soffset+slimit]\n}\n\n// walkNames will walk the Expr and return the identifier names used.\nfunc walkNames(exp Expr) []string {\n\tswitch expr := exp.(type) {\n\tcase *VarRef:\n\t\treturn []string{expr.Val}\n\tcase *Call:\n\t\tvar a []string\n\t\tfor _, expr := range expr.Args {\n\t\t\tif ref, ok := expr.(*VarRef); ok {\n\t\t\t\ta = append(a, ref.Val)\n\t\t\t}\n\t\t}\n\t\treturn a\n\tcase *BinaryExpr:\n\t\tvar ret []string\n\t\tret = append(ret, walkNames(expr.LHS)...)\n\t\tret = append(ret, walkNames(expr.RHS)...)\n\t\treturn ret\n\tcase *ParenExpr:\n\t\treturn walkNames(expr.Expr)\n\t}\n\n\treturn nil\n}\n\n// walkRefs will walk the Expr and return the var refs used.\nfunc walkRefs(exp Expr) []VarRef {\n\trefs := make(map[VarRef]struct{})\n\tvar walk func(exp Expr)\n\twalk = func(exp Expr) {\n\t\tswitch expr := exp.(type) {\n\t\tcase *VarRef:\n\t\t\trefs[*expr] = struct{}{}\n\t\tcase *Call:\n\t\t\tfor _, expr := range expr.Args {\n\t\t\t\tif ref, ok := expr.(*VarRef); ok {\n\t\t\t\t\trefs[*ref] = struct{}{}\n\t\t\t\t}\n\t\t\t}\n\t\tcase *BinaryExpr:\n\t\t\twalk(expr.LHS)\n\t\t\twalk(expr.RHS)\n\t\tcase *ParenExpr:\n\t\t\twalk(expr.Expr)\n\t\t}\n\t}\n\twalk(exp)\n\n\t// Turn the map into a slice.\n\ta := make([]VarRef, 0, len(refs))\n\tfor ref := range refs {\n\t\ta = append(a, ref)\n\t}\n\treturn a\n}\n\n// ExprNames returns a list of non-\"time\" field names from an expression.\nfunc ExprNames(expr Expr) []VarRef {\n\tm := make(map[VarRef]struct{})\n\tfor _, ref := range walkRefs(expr) {\n\t\tif ref.Val == \"time\" {\n\t\t\tcontinue\n\t\t}\n\t\tm[ref] = struct{}{}\n\t}\n\n\ta := make([]VarRef, 0, len(m))\n\tfor k := range m {\n\t\ta = append(a, k)\n\t}\n\tsort.Sort(VarRefs(a))\n\n\treturn a\n}\n\n// FunctionCalls returns the Call objects from the query.\nfunc (s *SelectStatement) FunctionCalls() []*Call {\n\tvar a []*Call\n\tfor _, f := range s.Fields {\n\t\ta = append(a, walkFunctionCalls(f.Expr)...)\n\t}\n\treturn a\n}\n\n// FunctionCallsByPosition returns the Call objects from the query in the order they appear in the select statement.\nfunc (s *SelectStatement) FunctionCallsByPosition() [][]*Call {\n\tvar a [][]*Call\n\tfor _, f := range s.Fields {\n\t\ta = append(a, walkFunctionCalls(f.Expr))\n\t}\n\treturn a\n}\n\n// walkFunctionCalls walks the Expr and returns any function calls made.\nfunc walkFunctionCalls(exp Expr) []*Call {\n\tswitch expr := exp.(type) {\n\tcase *VarRef:\n\t\treturn nil\n\tcase *Call:\n\t\treturn []*Call{expr}\n\tcase *BinaryExpr:\n\t\tvar ret []*Call\n\t\tret = append(ret, walkFunctionCalls(expr.LHS)...)\n\t\tret = append(ret, walkFunctionCalls(expr.RHS)...)\n\t\treturn ret\n\tcase *ParenExpr:\n\t\treturn walkFunctionCalls(expr.Expr)\n\t}\n\n\treturn nil\n}\n\n// MatchSource returns the source name that matches a field name.\n// It returns a blank string if no sources match.\nfunc MatchSource(sources Sources, name string) string {\n\tfor _, src := range sources {\n\t\tswitch src := src.(type) {\n\t\tcase *Measurement:\n\t\t\tif strings.HasPrefix(name, src.Name) {\n\t\t\t\treturn src.Name\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}\n\n// Target represents a target (destination) policy, measurement, and DB.\ntype Target struct {\n\t// Measurement to write into.\n\tMeasurement *Measurement\n}\n\n// String returns a string representation of the Target.\nfunc (t *Target) String() string {\n\tif t == nil {\n\t\treturn \"\"\n\t}\n\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"INTO \")\n\t_, _ = buf.WriteString(t.Measurement.String())\n\tif t.Measurement.Name == \"\" {\n\t\t_, _ = buf.WriteString(\":MEASUREMENT\")\n\t}\n\n\treturn buf.String()\n}\n\n// DeleteStatement represents a command for deleting data from the database.\ntype DeleteStatement struct {\n\t// Data source that values are removed from.\n\tSource Source\n\n\t// An expression evaluated on data point.\n\tCondition Expr\n}\n\n// String returns a string representation of the delete statement.\nfunc (s *DeleteStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"DELETE FROM \")\n\t_, _ = buf.WriteString(s.Source.String())\n\tif s.Condition != nil {\n\t\t_, _ = buf.WriteString(\" WHERE \")\n\t\t_, _ = buf.WriteString(s.Condition.String())\n\t}\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute a DeleteStatement.\nfunc (s *DeleteStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: false, Name: \"\", Privilege: WritePrivilege}}, nil\n}\n\n// DefaultDatabase returns the default database from the statement.\nfunc (s *DeleteStatement) DefaultDatabase() string {\n\tif m, ok := s.Source.(*Measurement); ok {\n\t\treturn m.Database\n\t}\n\treturn \"\"\n}\n\n// ShowSeriesStatement represents a command for listing series in the database.\ntype ShowSeriesStatement struct {\n\t// Database to query. If blank, use the default database.\n\t// The database can also be specified per source in the Sources.\n\tDatabase string\n\n\t// Measurement(s) the series are listed for.\n\tSources Sources\n\n\t// An expression evaluated on a series name or tag.\n\tCondition Expr\n\n\t// Fields to sort results by\n\tSortFields SortFields\n\n\t// Maximum number of rows to be returned.\n\t// Unlimited if zero.\n\tLimit int\n\n\t// Returns rows starting at an offset from the first row.\n\tOffset int\n}\n\n// String returns a string representation of the list series statement.\nfunc (s *ShowSeriesStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"SHOW SERIES\")\n\n\tif s.Database != \"\" {\n\t\t_, _ = buf.WriteString(\" ON \")\n\t\t_, _ = buf.WriteString(QuoteIdent(s.Database))\n\t}\n\tif s.Sources != nil {\n\t\t_, _ = buf.WriteString(\" FROM \")\n\t\t_, _ = buf.WriteString(s.Sources.String())\n\t}\n\n\tif s.Condition != nil {\n\t\t_, _ = buf.WriteString(\" WHERE \")\n\t\t_, _ = buf.WriteString(s.Condition.String())\n\t}\n\tif len(s.SortFields) > 0 {\n\t\t_, _ = buf.WriteString(\" ORDER BY \")\n\t\t_, _ = buf.WriteString(s.SortFields.String())\n\t}\n\tif s.Limit > 0 {\n\t\t_, _ = buf.WriteString(\" LIMIT \")\n\t\t_, _ = buf.WriteString(strconv.Itoa(s.Limit))\n\t}\n\tif s.Offset > 0 {\n\t\t_, _ = buf.WriteString(\" OFFSET \")\n\t\t_, _ = buf.WriteString(strconv.Itoa(s.Offset))\n\t}\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute a ShowSeriesStatement.\nfunc (s *ShowSeriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: false, Name: \"\", Privilege: ReadPrivilege}}, nil\n}\n\n// DefaultDatabase returns the default database from the statement.\nfunc (s *ShowSeriesStatement) DefaultDatabase() string {\n\treturn s.Database\n}\n\n// DropSeriesStatement represents a command for removing a series from the database.\ntype DropSeriesStatement struct {\n\t// Data source that fields are extracted from (optional)\n\tSources Sources\n\n\t// An expression evaluated on data point (optional)\n\tCondition Expr\n}\n\n// String returns a string representation of the drop series statement.\nfunc (s *DropSeriesStatement) String() string {\n\tvar buf bytes.Buffer\n\tbuf.WriteString(\"DROP SERIES\")\n\n\tif s.Sources != nil {\n\t\tbuf.WriteString(\" FROM \")\n\t\tbuf.WriteString(s.Sources.String())\n\t}\n\tif s.Condition != nil {\n\t\tbuf.WriteString(\" WHERE \")\n\t\tbuf.WriteString(s.Condition.String())\n\t}\n\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute a DropSeriesStatement.\nfunc (s DropSeriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: false, Name: \"\", Privilege: WritePrivilege}}, nil\n}\n\n// DeleteSeriesStatement represents a command for deleting all or part of a series from a database.\ntype DeleteSeriesStatement struct {\n\t// Data source that fields are extracted from (optional)\n\tSources Sources\n\n\t// An expression evaluated on data point (optional)\n\tCondition Expr\n}\n\n// String returns a string representation of the delete series statement.\nfunc (s *DeleteSeriesStatement) String() string {\n\tvar buf bytes.Buffer\n\tbuf.WriteString(\"DELETE\")\n\n\tif s.Sources != nil {\n\t\tbuf.WriteString(\" FROM \")\n\t\tbuf.WriteString(s.Sources.String())\n\t}\n\tif s.Condition != nil {\n\t\tbuf.WriteString(\" WHERE \")\n\t\tbuf.WriteString(s.Condition.String())\n\t}\n\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute a DeleteSeriesStatement.\nfunc (s DeleteSeriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: false, Name: \"\", Privilege: WritePrivilege}}, nil\n}\n\n// DropShardStatement represents a command for removing a shard from\n// the node.\ntype DropShardStatement struct {\n\t// ID of the shard to be dropped.\n\tID uint64\n}\n\n// String returns a string representation of the drop series statement.\nfunc (s *DropShardStatement) String() string {\n\tvar buf bytes.Buffer\n\tbuf.WriteString(\"DROP SHARD \")\n\tbuf.WriteString(strconv.FormatUint(s.ID, 10))\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute a\n// DropShardStatement.\nfunc (s *DropShardStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// ShowContinuousQueriesStatement represents a command for listing continuous queries.\ntype ShowContinuousQueriesStatement struct{}\n\n// String returns a string representation of the show continuous queries statement.\nfunc (s *ShowContinuousQueriesStatement) String() string { return \"SHOW CONTINUOUS QUERIES\" }\n\n// RequiredPrivileges returns the privilege required to execute a ShowContinuousQueriesStatement.\nfunc (s *ShowContinuousQueriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: false, Name: \"\", Privilege: ReadPrivilege}}, nil\n}\n\n// ShowGrantsForUserStatement represents a command for listing user privileges.\ntype ShowGrantsForUserStatement struct {\n\t// Name of the user to display privileges.\n\tName string\n}\n\n// String returns a string representation of the show grants for user.\nfunc (s *ShowGrantsForUserStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"SHOW GRANTS FOR \")\n\t_, _ = buf.WriteString(QuoteIdent(s.Name))\n\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute a ShowGrantsForUserStatement\nfunc (s *ShowGrantsForUserStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// ShowDatabasesStatement represents a command for listing all databases in the cluster.\ntype ShowDatabasesStatement struct{}\n\n// String returns a string representation of the show databases command.\nfunc (s *ShowDatabasesStatement) String() string { return \"SHOW DATABASES\" }\n\n// RequiredPrivileges returns the privilege required to execute a ShowDatabasesStatement.\nfunc (s *ShowDatabasesStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\t// SHOW DATABASES is one of few statements that have no required privileges.\n\t// Anyone is allowed to execute it, but the returned results depend on the user's\n\t// individual database permissions.\n\treturn ExecutionPrivileges{{Admin: false, Name: \"\", Privilege: NoPrivileges}}, nil\n}\n\n// CreateContinuousQueryStatement represents a command for creating a continuous query.\ntype CreateContinuousQueryStatement struct {\n\t// Name of the continuous query to be created.\n\tName string\n\n\t// Name of the database to create the continuous query on.\n\tDatabase string\n\n\t// Source of data (SELECT statement).\n\tSource *SelectStatement\n\n\t// Interval to resample previous queries.\n\tResampleEvery time.Duration\n\n\t// Maximum duration to resample previous queries.\n\tResampleFor time.Duration\n}\n\n// String returns a string representation of the statement.\nfunc (s *CreateContinuousQueryStatement) String() string {\n\tvar buf bytes.Buffer\n\tfmt.Fprintf(&buf, \"CREATE CONTINUOUS QUERY %s ON %s \", QuoteIdent(s.Name), QuoteIdent(s.Database))\n\n\tif s.ResampleEvery > 0 || s.ResampleFor > 0 {\n\t\tbuf.WriteString(\"RESAMPLE \")\n\t\tif s.ResampleEvery > 0 {\n\t\t\tfmt.Fprintf(&buf, \"EVERY %s \", FormatDuration(s.ResampleEvery))\n\t\t}\n\t\tif s.ResampleFor > 0 {\n\t\t\tfmt.Fprintf(&buf, \"FOR %s \", FormatDuration(s.ResampleFor))\n\t\t}\n\t}\n\tfmt.Fprintf(&buf, \"BEGIN %s END\", s.Source.String())\n\treturn buf.String()\n}\n\n// DefaultDatabase returns the default database from the statement.\nfunc (s *CreateContinuousQueryStatement) DefaultDatabase() string {\n\treturn s.Database\n}\n\n// RequiredPrivileges returns the privilege required to execute a CreateContinuousQueryStatement.\nfunc (s *CreateContinuousQueryStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\tep := ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: ReadPrivilege}}\n\n\t// Selecting into a database that's different from the source?\n\tif s.Source.Target.Measurement.Database != \"\" {\n\t\t// Change source database privilege requirement to read.\n\t\tep[0].Privilege = ReadPrivilege\n\n\t\t// Add destination database privilege requirement and set it to write.\n\t\tp := ExecutionPrivilege{\n\t\t\tAdmin:     false,\n\t\t\tName:      s.Source.Target.Measurement.Database,\n\t\t\tPrivilege: WritePrivilege,\n\t\t}\n\t\tep = append(ep, p)\n\t}\n\n\treturn ep, nil\n}\n\nfunc (s *CreateContinuousQueryStatement) validate() error {\n\tinterval, err := s.Source.GroupByInterval()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif s.ResampleFor != 0 {\n\t\tif s.ResampleEvery != 0 && s.ResampleEvery > interval {\n\t\t\tinterval = s.ResampleEvery\n\t\t}\n\t\tif interval > s.ResampleFor {\n\t\t\treturn fmt.Errorf(\"FOR duration must be >= GROUP BY time duration: must be a minimum of %s, got %s\", FormatDuration(interval), FormatDuration(s.ResampleFor))\n\t\t}\n\t}\n\treturn nil\n}\n\n// DropContinuousQueryStatement represents a command for removing a continuous query.\ntype DropContinuousQueryStatement struct {\n\tName     string\n\tDatabase string\n}\n\n// String returns a string representation of the statement.\nfunc (s *DropContinuousQueryStatement) String() string {\n\treturn fmt.Sprintf(\"DROP CONTINUOUS QUERY %s ON %s\", QuoteIdent(s.Name), QuoteIdent(s.Database))\n}\n\n// RequiredPrivileges returns the privilege(s) required to execute a DropContinuousQueryStatement\nfunc (s *DropContinuousQueryStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: false, Name: \"\", Privilege: WritePrivilege}}, nil\n}\n\n// DefaultDatabase returns the default database from the statement.\nfunc (s *DropContinuousQueryStatement) DefaultDatabase() string {\n\treturn s.Database\n}\n\n// ShowMeasurementsStatement represents a command for listing measurements.\ntype ShowMeasurementsStatement struct {\n\t// Database to query. If blank, use the default database.\n\tDatabase string\n\n\t// Measurement name or regex.\n\tSource Source\n\n\t// An expression evaluated on data point.\n\tCondition Expr\n\n\t// Fields to sort results by\n\tSortFields SortFields\n\n\t// Maximum number of rows to be returned.\n\t// Unlimited if zero.\n\tLimit int\n\n\t// Returns rows starting at an offset from the first row.\n\tOffset int\n}\n\n// String returns a string representation of the statement.\nfunc (s *ShowMeasurementsStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"SHOW MEASUREMENTS\")\n\n\tif s.Database != \"\" {\n\t\t_, _ = buf.WriteString(\" ON \")\n\t\t_, _ = buf.WriteString(s.Database)\n\t}\n\tif s.Source != nil {\n\t\t_, _ = buf.WriteString(\" WITH MEASUREMENT \")\n\t\tif m, ok := s.Source.(*Measurement); ok && m.Regex != nil {\n\t\t\t_, _ = buf.WriteString(\"=~ \")\n\t\t} else {\n\t\t\t_, _ = buf.WriteString(\"= \")\n\t\t}\n\t\t_, _ = buf.WriteString(s.Source.String())\n\t}\n\tif s.Condition != nil {\n\t\t_, _ = buf.WriteString(\" WHERE \")\n\t\t_, _ = buf.WriteString(s.Condition.String())\n\t}\n\tif len(s.SortFields) > 0 {\n\t\t_, _ = buf.WriteString(\" ORDER BY \")\n\t\t_, _ = buf.WriteString(s.SortFields.String())\n\t}\n\tif s.Limit > 0 {\n\t\t_, _ = buf.WriteString(\" LIMIT \")\n\t\t_, _ = buf.WriteString(strconv.Itoa(s.Limit))\n\t}\n\tif s.Offset > 0 {\n\t\t_, _ = buf.WriteString(\" OFFSET \")\n\t\t_, _ = buf.WriteString(strconv.Itoa(s.Offset))\n\t}\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege(s) required to execute a ShowMeasurementsStatement.\nfunc (s *ShowMeasurementsStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: false, Name: \"\", Privilege: ReadPrivilege}}, nil\n}\n\n// DefaultDatabase returns the default database from the statement.\nfunc (s *ShowMeasurementsStatement) DefaultDatabase() string {\n\treturn s.Database\n}\n\n// DropMeasurementStatement represents a command to drop a measurement.\ntype DropMeasurementStatement struct {\n\t// Name of the measurement to be dropped.\n\tName string\n}\n\n// String returns a string representation of the drop measurement statement.\nfunc (s *DropMeasurementStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"DROP MEASUREMENT \")\n\t_, _ = buf.WriteString(QuoteIdent(s.Name))\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege(s) required to execute a DropMeasurementStatement\nfunc (s *DropMeasurementStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// ShowQueriesStatement represents a command for listing all running queries.\ntype ShowQueriesStatement struct{}\n\n// String returns a string representation of the show queries statement.\nfunc (s *ShowQueriesStatement) String() string {\n\treturn \"SHOW QUERIES\"\n}\n\n// RequiredPrivileges returns the privilege required to execute a ShowQueriesStatement.\nfunc (s *ShowQueriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: false, Name: \"\", Privilege: ReadPrivilege}}, nil\n}\n\n// ShowRetentionPoliciesStatement represents a command for listing retention policies.\ntype ShowRetentionPoliciesStatement struct {\n\t// Name of the database to list policies for.\n\tDatabase string\n}\n\n// String returns a string representation of a ShowRetentionPoliciesStatement.\nfunc (s *ShowRetentionPoliciesStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"SHOW RETENTION POLICIES\")\n\tif s.Database != \"\" {\n\t\t_, _ = buf.WriteString(\" ON \")\n\t\t_, _ = buf.WriteString(QuoteIdent(s.Database))\n\t}\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege(s) required to execute a ShowRetentionPoliciesStatement\nfunc (s *ShowRetentionPoliciesStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: false, Name: \"\", Privilege: ReadPrivilege}}, nil\n}\n\n// DefaultDatabase returns the default database from the statement.\nfunc (s *ShowRetentionPoliciesStatement) DefaultDatabase() string {\n\treturn s.Database\n}\n\n// ShowStatsStatement displays statistics for a given module.\ntype ShowStatsStatement struct {\n\tModule string\n}\n\n// String returns a string representation of a ShowStatsStatement.\nfunc (s *ShowStatsStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"SHOW STATS\")\n\tif s.Module != \"\" {\n\t\t_, _ = buf.WriteString(\" FOR \")\n\t\t_, _ = buf.WriteString(QuoteString(s.Module))\n\t}\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege(s) required to execute a ShowStatsStatement\nfunc (s *ShowStatsStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// ShowShardGroupsStatement represents a command for displaying shard groups in the cluster.\ntype ShowShardGroupsStatement struct{}\n\n// String returns a string representation of the SHOW SHARD GROUPS command.\nfunc (s *ShowShardGroupsStatement) String() string { return \"SHOW SHARD GROUPS\" }\n\n// RequiredPrivileges returns the privileges required to execute the statement.\nfunc (s *ShowShardGroupsStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// ShowShardsStatement represents a command for displaying shards in the cluster.\ntype ShowShardsStatement struct{}\n\n// String returns a string representation.\nfunc (s *ShowShardsStatement) String() string { return \"SHOW SHARDS\" }\n\n// RequiredPrivileges returns the privileges required to execute the statement.\nfunc (s *ShowShardsStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// ShowDiagnosticsStatement represents a command for show node diagnostics.\ntype ShowDiagnosticsStatement struct {\n\t// Module\n\tModule string\n}\n\n// String returns a string representation of the ShowDiagnosticsStatement.\nfunc (s *ShowDiagnosticsStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"SHOW DIAGNOSTICS\")\n\tif s.Module != \"\" {\n\t\t_, _ = buf.WriteString(\" FOR \")\n\t\t_, _ = buf.WriteString(QuoteString(s.Module))\n\t}\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute a ShowDiagnosticsStatement\nfunc (s *ShowDiagnosticsStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// CreateSubscriptionStatement represents a command to add a subscription to the incoming data stream.\ntype CreateSubscriptionStatement struct {\n\tName            string\n\tDatabase        string\n\tRetentionPolicy string\n\tDestinations    []string\n\tMode            string\n}\n\n// String returns a string representation of the CreateSubscriptionStatement.\nfunc (s *CreateSubscriptionStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"CREATE SUBSCRIPTION \")\n\t_, _ = buf.WriteString(QuoteIdent(s.Name))\n\t_, _ = buf.WriteString(\" ON \")\n\t_, _ = buf.WriteString(QuoteIdent(s.Database))\n\t_, _ = buf.WriteString(\".\")\n\t_, _ = buf.WriteString(QuoteIdent(s.RetentionPolicy))\n\t_, _ = buf.WriteString(\" DESTINATIONS \")\n\t_, _ = buf.WriteString(s.Mode)\n\t_, _ = buf.WriteString(\" \")\n\tfor i, dest := range s.Destinations {\n\t\tif i != 0 {\n\t\t\t_, _ = buf.WriteString(\", \")\n\t\t}\n\t\t_, _ = buf.WriteString(QuoteString(dest))\n\t}\n\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege required to execute a CreateSubscriptionStatement.\nfunc (s *CreateSubscriptionStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// DefaultDatabase returns the default database from the statement.\nfunc (s *CreateSubscriptionStatement) DefaultDatabase() string {\n\treturn s.Database\n}\n\n// DropSubscriptionStatement represents a command to drop a subscription to the incoming data stream.\ntype DropSubscriptionStatement struct {\n\tName            string\n\tDatabase        string\n\tRetentionPolicy string\n}\n\n// String returns a string representation of the DropSubscriptionStatement.\nfunc (s *DropSubscriptionStatement) String() string {\n\treturn fmt.Sprintf(`DROP SUBSCRIPTION %s ON %s.%s`, QuoteIdent(s.Name), QuoteIdent(s.Database), QuoteIdent(s.RetentionPolicy))\n}\n\n// RequiredPrivileges returns the privilege required to execute a DropSubscriptionStatement\nfunc (s *DropSubscriptionStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// DefaultDatabase returns the default database from the statement.\nfunc (s *DropSubscriptionStatement) DefaultDatabase() string {\n\treturn s.Database\n}\n\n// ShowSubscriptionsStatement represents a command to show a list of subscriptions.\ntype ShowSubscriptionsStatement struct {\n}\n\n// String returns a string representation of the ShowSubscriptionsStatement.\nfunc (s *ShowSubscriptionsStatement) String() string {\n\treturn \"SHOW SUBSCRIPTIONS\"\n}\n\n// RequiredPrivileges returns the privilege required to execute a ShowSubscriptionsStatement.\nfunc (s *ShowSubscriptionsStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// ShowTagKeysStatement represents a command for listing tag keys.\ntype ShowTagKeysStatement struct {\n\t// Database to query. If blank, use the default database.\n\t// The database can also be specified per source in the Sources.\n\tDatabase string\n\n\t// Data sources that fields are extracted from.\n\tSources Sources\n\n\t// An expression evaluated on data point.\n\tCondition Expr\n\n\t// Fields to sort results by.\n\tSortFields SortFields\n\n\t// Maximum number of tag keys per measurement. Unlimited if zero.\n\tLimit int\n\n\t// Returns tag keys starting at an offset from the first row.\n\tOffset int\n\n\t// Maxiumum number of series to be returned. Unlimited if zero.\n\tSLimit int\n\n\t// Returns series starting at an offset from the first one.\n\tSOffset int\n}\n\n// String returns a string representation of the statement.\nfunc (s *ShowTagKeysStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"SHOW TAG KEYS\")\n\n\tif s.Database != \"\" {\n\t\t_, _ = buf.WriteString(\" ON \")\n\t\t_, _ = buf.WriteString(QuoteIdent(s.Database))\n\t}\n\tif s.Sources != nil {\n\t\t_, _ = buf.WriteString(\" FROM \")\n\t\t_, _ = buf.WriteString(s.Sources.String())\n\t}\n\tif s.Condition != nil {\n\t\t_, _ = buf.WriteString(\" WHERE \")\n\t\t_, _ = buf.WriteString(s.Condition.String())\n\t}\n\tif len(s.SortFields) > 0 {\n\t\t_, _ = buf.WriteString(\" ORDER BY \")\n\t\t_, _ = buf.WriteString(s.SortFields.String())\n\t}\n\tif s.Limit > 0 {\n\t\t_, _ = buf.WriteString(\" LIMIT \")\n\t\t_, _ = buf.WriteString(strconv.Itoa(s.Limit))\n\t}\n\tif s.Offset > 0 {\n\t\t_, _ = buf.WriteString(\" OFFSET \")\n\t\t_, _ = buf.WriteString(strconv.Itoa(s.Offset))\n\t}\n\tif s.SLimit > 0 {\n\t\t_, _ = buf.WriteString(\" SLIMIT \")\n\t\t_, _ = buf.WriteString(strconv.Itoa(s.SLimit))\n\t}\n\tif s.SOffset > 0 {\n\t\t_, _ = buf.WriteString(\" SOFFSET \")\n\t\t_, _ = buf.WriteString(strconv.Itoa(s.SOffset))\n\t}\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege(s) required to execute a ShowTagKeysStatement.\nfunc (s *ShowTagKeysStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: false, Name: \"\", Privilege: ReadPrivilege}}, nil\n}\n\n// DefaultDatabase returns the default database from the statement.\nfunc (s *ShowTagKeysStatement) DefaultDatabase() string {\n\treturn s.Database\n}\n\n// ShowTagValuesStatement represents a command for listing tag values.\ntype ShowTagValuesStatement struct {\n\t// Database to query. If blank, use the default database.\n\t// The database can also be specified per source in the Sources.\n\tDatabase string\n\n\t// Data source that fields are extracted from.\n\tSources Sources\n\n\t// Operation to use when selecting tag key(s).\n\tOp Token\n\n\t// Literal to compare the tag key(s) with.\n\tTagKeyExpr Literal\n\n\t// An expression evaluated on data point.\n\tCondition Expr\n\n\t// Fields to sort results by.\n\tSortFields SortFields\n\n\t// Maximum number of rows to be returned.\n\t// Unlimited if zero.\n\tLimit int\n\n\t// Returns rows starting at an offset from the first row.\n\tOffset int\n}\n\n// String returns a string representation of the statement.\nfunc (s *ShowTagValuesStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"SHOW TAG VALUES\")\n\n\tif s.Database != \"\" {\n\t\t_, _ = buf.WriteString(\" ON \")\n\t\t_, _ = buf.WriteString(QuoteIdent(s.Database))\n\t}\n\tif s.Sources != nil {\n\t\t_, _ = buf.WriteString(\" FROM \")\n\t\t_, _ = buf.WriteString(s.Sources.String())\n\t}\n\t_, _ = buf.WriteString(\" WITH KEY \")\n\t_, _ = buf.WriteString(s.Op.String())\n\t_, _ = buf.WriteString(\" \")\n\tif lit, ok := s.TagKeyExpr.(*StringLiteral); ok {\n\t\t_, _ = buf.WriteString(QuoteIdent(lit.Val))\n\t} else {\n\t\t_, _ = buf.WriteString(s.TagKeyExpr.String())\n\t}\n\tif s.Condition != nil {\n\t\t_, _ = buf.WriteString(\" WHERE \")\n\t\t_, _ = buf.WriteString(s.Condition.String())\n\t}\n\tif len(s.SortFields) > 0 {\n\t\t_, _ = buf.WriteString(\" ORDER BY \")\n\t\t_, _ = buf.WriteString(s.SortFields.String())\n\t}\n\tif s.Limit > 0 {\n\t\t_, _ = buf.WriteString(\" LIMIT \")\n\t\t_, _ = buf.WriteString(strconv.Itoa(s.Limit))\n\t}\n\tif s.Offset > 0 {\n\t\t_, _ = buf.WriteString(\" OFFSET \")\n\t\t_, _ = buf.WriteString(strconv.Itoa(s.Offset))\n\t}\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege(s) required to execute a ShowTagValuesStatement.\nfunc (s *ShowTagValuesStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: false, Name: \"\", Privilege: ReadPrivilege}}, nil\n}\n\n// DefaultDatabase returns the default database from the statement.\nfunc (s *ShowTagValuesStatement) DefaultDatabase() string {\n\treturn s.Database\n}\n\n// ShowUsersStatement represents a command for listing users.\ntype ShowUsersStatement struct{}\n\n// String returns a string representation of the ShowUsersStatement.\nfunc (s *ShowUsersStatement) String() string {\n\treturn \"SHOW USERS\"\n}\n\n// RequiredPrivileges returns the privilege(s) required to execute a ShowUsersStatement\nfunc (s *ShowUsersStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: true, Name: \"\", Privilege: AllPrivileges}}, nil\n}\n\n// ShowFieldKeysStatement represents a command for listing field keys.\ntype ShowFieldKeysStatement struct {\n\t// Database to query. If blank, use the default database.\n\t// The database can also be specified per source in the Sources.\n\tDatabase string\n\n\t// Data sources that fields are extracted from.\n\tSources Sources\n\n\t// Fields to sort results by\n\tSortFields SortFields\n\n\t// Maximum number of rows to be returned.\n\t// Unlimited if zero.\n\tLimit int\n\n\t// Returns rows starting at an offset from the first row.\n\tOffset int\n}\n\n// String returns a string representation of the statement.\nfunc (s *ShowFieldKeysStatement) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"SHOW FIELD KEYS\")\n\n\tif s.Database != \"\" {\n\t\t_, _ = buf.WriteString(\" ON \")\n\t\t_, _ = buf.WriteString(QuoteIdent(s.Database))\n\t}\n\tif s.Sources != nil {\n\t\t_, _ = buf.WriteString(\" FROM \")\n\t\t_, _ = buf.WriteString(s.Sources.String())\n\t}\n\tif len(s.SortFields) > 0 {\n\t\t_, _ = buf.WriteString(\" ORDER BY \")\n\t\t_, _ = buf.WriteString(s.SortFields.String())\n\t}\n\tif s.Limit > 0 {\n\t\t_, _ = buf.WriteString(\" LIMIT \")\n\t\t_, _ = buf.WriteString(strconv.Itoa(s.Limit))\n\t}\n\tif s.Offset > 0 {\n\t\t_, _ = buf.WriteString(\" OFFSET \")\n\t\t_, _ = buf.WriteString(strconv.Itoa(s.Offset))\n\t}\n\treturn buf.String()\n}\n\n// RequiredPrivileges returns the privilege(s) required to execute a ShowFieldKeysStatement.\nfunc (s *ShowFieldKeysStatement) RequiredPrivileges() (ExecutionPrivileges, error) {\n\treturn ExecutionPrivileges{{Admin: false, Name: \"\", Privilege: ReadPrivilege}}, nil\n}\n\n// DefaultDatabase returns the default database from the statement.\nfunc (s *ShowFieldKeysStatement) DefaultDatabase() string {\n\treturn s.Database\n}\n\n// Fields represents a list of fields.\ntype Fields []*Field\n\n// AliasNames returns a list of calculated field names in\n// order of alias, function name, then field.\nfunc (a Fields) AliasNames() []string {\n\tnames := []string{}\n\tfor _, f := range a {\n\t\tnames = append(names, f.Name())\n\t}\n\treturn names\n}\n\n// Names returns a list of field names.\nfunc (a Fields) Names() []string {\n\tnames := []string{}\n\tfor _, f := range a {\n\t\tswitch expr := f.Expr.(type) {\n\t\tcase *Call:\n\t\t\tnames = append(names, expr.Name)\n\t\tcase *VarRef:\n\t\t\tnames = append(names, expr.Val)\n\t\tcase *BinaryExpr:\n\t\t\tnames = append(names, walkNames(expr)...)\n\t\tcase *ParenExpr:\n\t\t\tnames = append(names, walkNames(expr)...)\n\t\t}\n\t}\n\treturn names\n}\n\n// String returns a string representation of the fields.\nfunc (a Fields) String() string {\n\tvar str []string\n\tfor _, f := range a {\n\t\tstr = append(str, f.String())\n\t}\n\treturn strings.Join(str, \", \")\n}\n\n// Field represents an expression retrieved from a select statement.\ntype Field struct {\n\tExpr  Expr\n\tAlias string\n}\n\n// Name returns the name of the field. Returns alias, if set.\n// Otherwise uses the function name or variable name.\nfunc (f *Field) Name() string {\n\t// Return alias, if set.\n\tif f.Alias != \"\" {\n\t\treturn f.Alias\n\t}\n\n\t// Return the function name or variable name, if available.\n\tswitch expr := f.Expr.(type) {\n\tcase *Call:\n\t\treturn expr.Name\n\tcase *BinaryExpr:\n\t\treturn BinaryExprName(expr)\n\tcase *ParenExpr:\n\t\tf := Field{Expr: expr.Expr}\n\t\treturn f.Name()\n\tcase *VarRef:\n\t\treturn expr.Val\n\t}\n\n\t// Otherwise return a blank name.\n\treturn \"\"\n}\n\n// String returns a string representation of the field.\nfunc (f *Field) String() string {\n\tstr := f.Expr.String()\n\n\tif f.Alias == \"\" {\n\t\treturn str\n\t}\n\treturn fmt.Sprintf(\"%s AS %s\", str, QuoteIdent(f.Alias))\n}\n\n// Len implements sort.Interface.\nfunc (a Fields) Len() int { return len(a) }\n\n// Less implements sort.Interface.\nfunc (a Fields) Less(i, j int) bool { return a[i].Name() < a[j].Name() }\n\n// Swap implements sort.Interface.\nfunc (a Fields) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\n// Dimensions represents a list of dimensions.\ntype Dimensions []*Dimension\n\n// String returns a string representation of the dimensions.\nfunc (a Dimensions) String() string {\n\tvar str []string\n\tfor _, d := range a {\n\t\tstr = append(str, d.String())\n\t}\n\treturn strings.Join(str, \", \")\n}\n\n// Normalize returns the interval and tag dimensions separately.\n// Returns 0 if no time interval is specified.\nfunc (a Dimensions) Normalize() (time.Duration, []string) {\n\tvar dur time.Duration\n\tvar tags []string\n\n\tfor _, dim := range a {\n\t\tswitch expr := dim.Expr.(type) {\n\t\tcase *Call:\n\t\t\tlit, _ := expr.Args[0].(*DurationLiteral)\n\t\t\tdur = lit.Val\n\t\tcase *VarRef:\n\t\t\ttags = append(tags, expr.Val)\n\t\t}\n\t}\n\n\treturn dur, tags\n}\n\n// Dimension represents an expression that a select statement is grouped by.\ntype Dimension struct {\n\tExpr Expr\n}\n\n// String returns a string representation of the dimension.\nfunc (d *Dimension) String() string { return d.Expr.String() }\n\n// Measurements represents a list of measurements.\ntype Measurements []*Measurement\n\n// String returns a string representation of the measurements.\nfunc (a Measurements) String() string {\n\tvar str []string\n\tfor _, m := range a {\n\t\tstr = append(str, m.String())\n\t}\n\treturn strings.Join(str, \", \")\n}\n\n// Measurement represents a single measurement used as a datasource.\ntype Measurement struct {\n\tDatabase        string\n\tRetentionPolicy string\n\tName            string\n\tRegex           *RegexLiteral\n\tIsTarget        bool\n}\n\n// String returns a string representation of the measurement.\nfunc (m *Measurement) String() string {\n\tvar buf bytes.Buffer\n\tif m.Database != \"\" {\n\t\t_, _ = buf.WriteString(QuoteIdent(m.Database))\n\t\t_, _ = buf.WriteString(\".\")\n\t}\n\n\tif m.RetentionPolicy != \"\" {\n\t\t_, _ = buf.WriteString(QuoteIdent(m.RetentionPolicy))\n\t}\n\n\tif m.Database != \"\" || m.RetentionPolicy != \"\" {\n\t\t_, _ = buf.WriteString(`.`)\n\t}\n\n\tif m.Name != \"\" {\n\t\t_, _ = buf.WriteString(QuoteIdent(m.Name))\n\t} else if m.Regex != nil {\n\t\t_, _ = buf.WriteString(m.Regex.String())\n\t}\n\n\treturn buf.String()\n}\n\nfunc encodeMeasurement(mm *Measurement) *internal.Measurement {\n\tpb := &internal.Measurement{\n\t\tDatabase:        proto.String(mm.Database),\n\t\tRetentionPolicy: proto.String(mm.RetentionPolicy),\n\t\tName:            proto.String(mm.Name),\n\t\tIsTarget:        proto.Bool(mm.IsTarget),\n\t}\n\tif mm.Regex != nil {\n\t\tpb.Regex = proto.String(mm.Regex.Val.String())\n\t}\n\treturn pb\n}\n\nfunc decodeMeasurement(pb *internal.Measurement) (*Measurement, error) {\n\tmm := &Measurement{\n\t\tDatabase:        pb.GetDatabase(),\n\t\tRetentionPolicy: pb.GetRetentionPolicy(),\n\t\tName:            pb.GetName(),\n\t\tIsTarget:        pb.GetIsTarget(),\n\t}\n\n\tif pb.Regex != nil {\n\t\tregex, err := regexp.Compile(pb.GetRegex())\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid binary measurement regex: value=%q, err=%s\", pb.GetRegex(), err)\n\t\t}\n\t\tmm.Regex = &RegexLiteral{Val: regex}\n\t}\n\n\treturn mm, nil\n}\n\n// SubQuery is a source with a SelectStatement as the backing store.\ntype SubQuery struct {\n\tStatement *SelectStatement\n}\n\n// String returns a string representation of the subquery.\nfunc (s *SubQuery) String() string {\n\treturn fmt.Sprintf(\"(%s)\", s.Statement.String())\n}\n\n// VarRef represents a reference to a variable.\ntype VarRef struct {\n\tVal  string\n\tType DataType\n}\n\n// String returns a string representation of the variable reference.\nfunc (r *VarRef) String() string {\n\tbuf := bytes.NewBufferString(QuoteIdent(r.Val))\n\tif r.Type != Unknown {\n\t\tbuf.WriteString(\"::\")\n\t\tbuf.WriteString(r.Type.String())\n\t}\n\treturn buf.String()\n}\n\n// VarRefs represents a slice of VarRef types.\ntype VarRefs []VarRef\n\n// Len implements sort.Interface.\nfunc (a VarRefs) Len() int { return len(a) }\n\n// Less implements sort.Interface.\nfunc (a VarRefs) Less(i, j int) bool {\n\tif a[i].Val != a[j].Val {\n\t\treturn a[i].Val < a[j].Val\n\t}\n\treturn a[i].Type < a[j].Type\n}\n\n// Swap implements sort.Interface.\nfunc (a VarRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\n// Strings returns a slice of the variable names.\nfunc (a VarRefs) Strings() []string {\n\ts := make([]string, len(a))\n\tfor i, ref := range a {\n\t\ts[i] = ref.Val\n\t}\n\treturn s\n}\n\n// Call represents a function call.\ntype Call struct {\n\tName string\n\tArgs []Expr\n}\n\n// String returns a string representation of the call.\nfunc (c *Call) String() string {\n\t// Join arguments.\n\tvar str []string\n\tfor _, arg := range c.Args {\n\t\tstr = append(str, arg.String())\n\t}\n\n\t// Write function name and args.\n\treturn fmt.Sprintf(\"%s(%s)\", c.Name, strings.Join(str, \", \"))\n}\n\n// Distinct represents a DISTINCT expression.\ntype Distinct struct {\n\t// Identifier following DISTINCT\n\tVal string\n}\n\n// String returns a string representation of the expression.\nfunc (d *Distinct) String() string {\n\treturn fmt.Sprintf(\"DISTINCT %s\", d.Val)\n}\n\n// NewCall returns a new call expression from this expressions.\nfunc (d *Distinct) NewCall() *Call {\n\treturn &Call{\n\t\tName: \"distinct\",\n\t\tArgs: []Expr{\n\t\t\t&VarRef{Val: d.Val},\n\t\t},\n\t}\n}\n\n// NumberLiteral represents a numeric literal.\ntype NumberLiteral struct {\n\tVal float64\n}\n\n// String returns a string representation of the literal.\nfunc (l *NumberLiteral) String() string { return strconv.FormatFloat(l.Val, 'f', 3, 64) }\n\n// IntegerLiteral represents an integer literal.\ntype IntegerLiteral struct {\n\tVal int64\n}\n\n// String returns a string representation of the literal.\nfunc (l *IntegerLiteral) String() string { return fmt.Sprintf(\"%d\", l.Val) }\n\n// BooleanLiteral represents a boolean literal.\ntype BooleanLiteral struct {\n\tVal bool\n}\n\n// String returns a string representation of the literal.\nfunc (l *BooleanLiteral) String() string {\n\tif l.Val {\n\t\treturn \"true\"\n\t}\n\treturn \"false\"\n}\n\n// isTrueLiteral returns true if the expression is a literal \"true\" value.\nfunc isTrueLiteral(expr Expr) bool {\n\tif expr, ok := expr.(*BooleanLiteral); ok {\n\t\treturn expr.Val == true\n\t}\n\treturn false\n}\n\n// isFalseLiteral returns true if the expression is a literal \"false\" value.\nfunc isFalseLiteral(expr Expr) bool {\n\tif expr, ok := expr.(*BooleanLiteral); ok {\n\t\treturn expr.Val == false\n\t}\n\treturn false\n}\n\n// ListLiteral represents a list of tag key literals.\ntype ListLiteral struct {\n\tVals []string\n}\n\n// String returns a string representation of the literal.\nfunc (s *ListLiteral) String() string {\n\tvar buf bytes.Buffer\n\t_, _ = buf.WriteString(\"(\")\n\tfor idx, tagKey := range s.Vals {\n\t\tif idx != 0 {\n\t\t\t_, _ = buf.WriteString(\", \")\n\t\t}\n\t\t_, _ = buf.WriteString(QuoteIdent(tagKey))\n\t}\n\t_, _ = buf.WriteString(\")\")\n\treturn buf.String()\n}\n\n// StringLiteral represents a string literal.\ntype StringLiteral struct {\n\tVal string\n}\n\n// String returns a string representation of the literal.\nfunc (l *StringLiteral) String() string { return QuoteString(l.Val) }\n\n// IsTimeLiteral returns if this string can be interpreted as a time literal.\nfunc (l *StringLiteral) IsTimeLiteral() bool {\n\treturn isDateTimeString(l.Val) || isDateString(l.Val)\n}\n\n// ToTimeLiteral returns a time literal if this string can be converted to a time literal.\nfunc (l *StringLiteral) ToTimeLiteral(loc *time.Location) (*TimeLiteral, error) {\n\tif loc == nil {\n\t\tloc = time.UTC\n\t}\n\n\tif isDateTimeString(l.Val) {\n\t\tt, err := time.ParseInLocation(DateTimeFormat, l.Val, loc)\n\t\tif err != nil {\n\t\t\t// try to parse it as an RFCNano time\n\t\t\tt, err = time.ParseInLocation(time.RFC3339Nano, l.Val, loc)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, ErrInvalidTime\n\t\t\t}\n\t\t}\n\t\treturn &TimeLiteral{Val: t}, nil\n\t} else if isDateString(l.Val) {\n\t\tt, err := time.ParseInLocation(DateFormat, l.Val, loc)\n\t\tif err != nil {\n\t\t\treturn nil, ErrInvalidTime\n\t\t}\n\t\treturn &TimeLiteral{Val: t}, nil\n\t}\n\treturn nil, ErrInvalidTime\n}\n\n// TimeLiteral represents a point-in-time literal.\ntype TimeLiteral struct {\n\tVal time.Time\n}\n\n// String returns a string representation of the literal.\nfunc (l *TimeLiteral) String() string {\n\treturn `'` + l.Val.UTC().Format(time.RFC3339Nano) + `'`\n}\n\n// DurationLiteral represents a duration literal.\ntype DurationLiteral struct {\n\tVal time.Duration\n}\n\n// String returns a string representation of the literal.\nfunc (l *DurationLiteral) String() string { return FormatDuration(l.Val) }\n\n// nilLiteral represents a nil literal.\n// This is not available to the query language itself. It's only used internally.\ntype nilLiteral struct{}\n\n// String returns a string representation of the literal.\nfunc (l *nilLiteral) String() string { return `nil` }\n\n// BinaryExpr represents an operation between two expressions.\ntype BinaryExpr struct {\n\tOp  Token\n\tLHS Expr\n\tRHS Expr\n}\n\n// String returns a string representation of the binary expression.\nfunc (e *BinaryExpr) String() string {\n\treturn fmt.Sprintf(\"%s %s %s\", e.LHS.String(), e.Op.String(), e.RHS.String())\n}\n\nfunc (e *BinaryExpr) validate() error {\n\tv := binaryExprValidator{}\n\tWalk(&v, e)\n\tif v.err != nil {\n\t\treturn v.err\n\t} else if v.calls && v.refs {\n\t\treturn errors.New(\"binary expressions cannot mix aggregates and raw fields\")\n\t}\n\treturn nil\n}\n\ntype binaryExprValidator struct {\n\tcalls bool\n\trefs  bool\n\terr   error\n}\n\nfunc (v *binaryExprValidator) Visit(n Node) Visitor {\n\tif v.err != nil {\n\t\treturn nil\n\t}\n\n\tswitch n := n.(type) {\n\tcase *Call:\n\t\tv.calls = true\n\n\t\tif n.Name == \"top\" || n.Name == \"bottom\" {\n\t\t\tv.err = fmt.Errorf(\"cannot use %s() inside of a binary expression\", n.Name)\n\t\t\treturn nil\n\t\t}\n\n\t\tfor _, expr := range n.Args {\n\t\t\tswitch e := expr.(type) {\n\t\t\tcase *BinaryExpr:\n\t\t\t\tv.err = e.validate()\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn nil\n\tcase *VarRef:\n\t\tv.refs = true\n\t\treturn nil\n\t}\n\treturn v\n}\n\n// BinaryExprName returns the name of a binary expression by concatenating\n// the variables in the binary expression with underscores.\nfunc BinaryExprName(expr *BinaryExpr) string {\n\tv := binaryExprNameVisitor{}\n\tWalk(&v, expr)\n\treturn strings.Join(v.names, \"_\")\n}\n\ntype binaryExprNameVisitor struct {\n\tnames []string\n}\n\nfunc (v *binaryExprNameVisitor) Visit(n Node) Visitor {\n\tswitch n := n.(type) {\n\tcase *VarRef:\n\t\tv.names = append(v.names, n.Val)\n\tcase *Call:\n\t\tv.names = append(v.names, n.Name)\n\t\treturn nil\n\t}\n\treturn v\n}\n\n// ParenExpr represents a parenthesized expression.\ntype ParenExpr struct {\n\tExpr Expr\n}\n\n// String returns a string representation of the parenthesized expression.\nfunc (e *ParenExpr) String() string { return fmt.Sprintf(\"(%s)\", e.Expr.String()) }\n\n// RegexLiteral represents a regular expression.\ntype RegexLiteral struct {\n\tVal *regexp.Regexp\n}\n\n// String returns a string representation of the literal.\nfunc (r *RegexLiteral) String() string {\n\tif r.Val != nil {\n\t\treturn fmt.Sprintf(\"/%s/\", strings.Replace(r.Val.String(), `/`, `\\/`, -1))\n\t}\n\treturn \"\"\n}\n\n// CloneRegexLiteral returns a clone of the RegexLiteral.\nfunc CloneRegexLiteral(r *RegexLiteral) *RegexLiteral {\n\tif r == nil {\n\t\treturn nil\n\t}\n\n\tclone := &RegexLiteral{}\n\tif r.Val != nil {\n\t\tclone.Val = regexp.MustCompile(r.Val.String())\n\t}\n\n\treturn clone\n}\n\n// Wildcard represents a wild card expression.\ntype Wildcard struct {\n\tType Token\n}\n\n// String returns a string representation of the wildcard.\nfunc (e *Wildcard) String() string {\n\tswitch e.Type {\n\tcase FIELD:\n\t\treturn \"*::field\"\n\tcase TAG:\n\t\treturn \"*::tag\"\n\tdefault:\n\t\treturn \"*\"\n\t}\n}\n\n// CloneExpr returns a deep copy of the expression.\nfunc CloneExpr(expr Expr) Expr {\n\tif expr == nil {\n\t\treturn nil\n\t}\n\tswitch expr := expr.(type) {\n\tcase *BinaryExpr:\n\t\treturn &BinaryExpr{Op: expr.Op, LHS: CloneExpr(expr.LHS), RHS: CloneExpr(expr.RHS)}\n\tcase *BooleanLiteral:\n\t\treturn &BooleanLiteral{Val: expr.Val}\n\tcase *Call:\n\t\targs := make([]Expr, len(expr.Args))\n\t\tfor i, arg := range expr.Args {\n\t\t\targs[i] = CloneExpr(arg)\n\t\t}\n\t\treturn &Call{Name: expr.Name, Args: args}\n\tcase *Distinct:\n\t\treturn &Distinct{Val: expr.Val}\n\tcase *DurationLiteral:\n\t\treturn &DurationLiteral{Val: expr.Val}\n\tcase *IntegerLiteral:\n\t\treturn &IntegerLiteral{Val: expr.Val}\n\tcase *NumberLiteral:\n\t\treturn &NumberLiteral{Val: expr.Val}\n\tcase *ParenExpr:\n\t\treturn &ParenExpr{Expr: CloneExpr(expr.Expr)}\n\tcase *RegexLiteral:\n\t\treturn &RegexLiteral{Val: expr.Val}\n\tcase *StringLiteral:\n\t\treturn &StringLiteral{Val: expr.Val}\n\tcase *TimeLiteral:\n\t\treturn &TimeLiteral{Val: expr.Val}\n\tcase *VarRef:\n\t\treturn &VarRef{Val: expr.Val, Type: expr.Type}\n\tcase *Wildcard:\n\t\treturn &Wildcard{Type: expr.Type}\n\t}\n\tpanic(\"unreachable\")\n}\n\n// HasTimeExpr returns true if the expression has a time term.\nfunc HasTimeExpr(expr Expr) bool {\n\tswitch n := expr.(type) {\n\tcase *BinaryExpr:\n\t\tif n.Op == AND || n.Op == OR {\n\t\t\treturn HasTimeExpr(n.LHS) || HasTimeExpr(n.RHS)\n\t\t}\n\t\tif ref, ok := n.LHS.(*VarRef); ok && strings.ToLower(ref.Val) == \"time\" {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\tcase *ParenExpr:\n\t\t// walk down the tree\n\t\treturn HasTimeExpr(n.Expr)\n\tdefault:\n\t\treturn false\n\t}\n}\n\n// OnlyTimeExpr returns true if the expression only has time constraints.\nfunc OnlyTimeExpr(expr Expr) bool {\n\tif expr == nil {\n\t\treturn false\n\t}\n\tswitch n := expr.(type) {\n\tcase *BinaryExpr:\n\t\tif n.Op == AND || n.Op == OR {\n\t\t\treturn OnlyTimeExpr(n.LHS) && OnlyTimeExpr(n.RHS)\n\t\t}\n\t\tif ref, ok := n.LHS.(*VarRef); ok && strings.ToLower(ref.Val) == \"time\" {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\tcase *ParenExpr:\n\t\t// walk down the tree\n\t\treturn OnlyTimeExpr(n.Expr)\n\tdefault:\n\t\treturn false\n\t}\n}\n\n// TimeRange returns the minimum and maximum times specified by an expression.\n// It returns zero times if there is no bound.\nfunc TimeRange(expr Expr, loc *time.Location) (min, max time.Time, err error) {\n\tWalkFunc(expr, func(n Node) {\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif n, ok := n.(*BinaryExpr); ok {\n\t\t\t// Extract literal expression & operator on LHS.\n\t\t\t// Check for \"time\" on the left-hand side first.\n\t\t\t// Otherwise check for for the right-hand side and flip the operator.\n\t\t\top := n.Op\n\t\t\tvar value time.Time\n\t\t\tvalue, err = timeExprValue(n.LHS, n.RHS, loc)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t} else if value.IsZero() {\n\t\t\t\tif value, err = timeExprValue(n.RHS, n.LHS, loc); value.IsZero() || err != nil {\n\t\t\t\t\treturn\n\t\t\t\t} else if op == LT {\n\t\t\t\t\top = GT\n\t\t\t\t} else if op == LTE {\n\t\t\t\t\top = GTE\n\t\t\t\t} else if op == GT {\n\t\t\t\t\top = LT\n\t\t\t\t} else if op == GTE {\n\t\t\t\t\top = LTE\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Update the min/max depending on the operator.\n\t\t\t// The GT & LT update the value by +/- 1ns not make them \"not equal\".\n\t\t\tswitch op {\n\t\t\tcase GT:\n\t\t\t\tif min.IsZero() || value.After(min) {\n\t\t\t\t\tmin = value.Add(time.Nanosecond)\n\t\t\t\t}\n\t\t\tcase GTE:\n\t\t\t\tif min.IsZero() || value.After(min) {\n\t\t\t\t\tmin = value\n\t\t\t\t}\n\t\t\tcase LT:\n\t\t\t\tif max.IsZero() || value.Before(max) {\n\t\t\t\t\tmax = value.Add(-time.Nanosecond)\n\t\t\t\t}\n\t\t\tcase LTE:\n\t\t\t\tif max.IsZero() || value.Before(max) {\n\t\t\t\t\tmax = value\n\t\t\t\t}\n\t\t\tcase EQ:\n\t\t\t\tif min.IsZero() || value.After(min) {\n\t\t\t\t\tmin = value\n\t\t\t\t}\n\t\t\t\tif max.IsZero() || value.Before(max) {\n\t\t\t\t\tmax = value\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\treturn\n}\n\n// TimeRangeAsEpochNano returns the minimum and maximum times, as epoch nano, specified by\n// an expression. If there is no lower bound, the minimum time is returned\n// for minimum. If there is no higher bound, the maximum time is returned.\nfunc TimeRangeAsEpochNano(expr Expr) (min, max int64, err error) {\n\ttmin, tmax, err := TimeRange(expr, nil)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tif tmin.IsZero() {\n\t\tmin = time.Unix(0, MinTime).UnixNano()\n\t} else {\n\t\tmin = tmin.UnixNano()\n\t}\n\tif tmax.IsZero() {\n\t\tmax = time.Unix(0, MaxTime).UnixNano()\n\t} else {\n\t\tmax = tmax.UnixNano()\n\t}\n\treturn\n}\n\n// timeExprValue returns the time literal value of a \"time == <TimeLiteral>\" expression.\n// Returns zero time if the expression is not a time expression.\nfunc timeExprValue(ref Expr, lit Expr, loc *time.Location) (t time.Time, err error) {\n\tif ref, ok := ref.(*VarRef); ok && strings.ToLower(ref.Val) == \"time\" {\n\t\t// If literal looks like a date time then parse it as a time literal.\n\t\tif strlit, ok := lit.(*StringLiteral); ok {\n\t\t\tif strlit.IsTimeLiteral() {\n\t\t\t\tt, err := strlit.ToTimeLiteral(loc)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn time.Time{}, err\n\t\t\t\t}\n\t\t\t\tlit = t\n\t\t\t}\n\t\t}\n\n\t\tswitch lit := lit.(type) {\n\t\tcase *TimeLiteral:\n\t\t\tif lit.Val.After(time.Unix(0, MaxTime)) {\n\t\t\t\treturn time.Time{}, fmt.Errorf(\"time %s overflows time literal\", lit.Val.Format(time.RFC3339))\n\t\t\t} else if lit.Val.Before(time.Unix(0, MinTime+1)) {\n\t\t\t\t// The minimum allowable time literal is one greater than the minimum time because the minimum time\n\t\t\t\t// is a sentinel value only used internally.\n\t\t\t\treturn time.Time{}, fmt.Errorf(\"time %s underflows time literal\", lit.Val.Format(time.RFC3339))\n\t\t\t}\n\t\t\treturn lit.Val, nil\n\t\tcase *DurationLiteral:\n\t\t\treturn time.Unix(0, int64(lit.Val)).UTC(), nil\n\t\tcase *NumberLiteral:\n\t\t\treturn time.Unix(0, int64(lit.Val)).UTC(), nil\n\t\tcase *IntegerLiteral:\n\t\t\treturn time.Unix(0, lit.Val).UTC(), nil\n\t\tdefault:\n\t\t\treturn time.Time{}, fmt.Errorf(\"invalid operation: time and %T are not compatible\", lit)\n\t\t}\n\t}\n\treturn time.Time{}, nil\n}\n\n// Visitor can be called by Walk to traverse an AST hierarchy.\n// The Visit() function is called once per node.\ntype Visitor interface {\n\tVisit(Node) Visitor\n}\n\n// Walk traverses a node hierarchy in depth-first order.\nfunc Walk(v Visitor, node Node) {\n\tif node == nil {\n\t\treturn\n\t}\n\n\tif v = v.Visit(node); v == nil {\n\t\treturn\n\t}\n\n\tswitch n := node.(type) {\n\tcase *BinaryExpr:\n\t\tWalk(v, n.LHS)\n\t\tWalk(v, n.RHS)\n\n\tcase *Call:\n\t\tfor _, expr := range n.Args {\n\t\t\tWalk(v, expr)\n\t\t}\n\n\tcase *CreateContinuousQueryStatement:\n\t\tWalk(v, n.Source)\n\n\tcase *Dimension:\n\t\tWalk(v, n.Expr)\n\n\tcase Dimensions:\n\t\tfor _, c := range n {\n\t\t\tWalk(v, c)\n\t\t}\n\n\tcase *DeleteSeriesStatement:\n\t\tWalk(v, n.Sources)\n\t\tWalk(v, n.Condition)\n\n\tcase *DropSeriesStatement:\n\t\tWalk(v, n.Sources)\n\t\tWalk(v, n.Condition)\n\n\tcase *Field:\n\t\tWalk(v, n.Expr)\n\n\tcase Fields:\n\t\tfor _, c := range n {\n\t\t\tWalk(v, c)\n\t\t}\n\n\tcase *ParenExpr:\n\t\tWalk(v, n.Expr)\n\n\tcase *Query:\n\t\tWalk(v, n.Statements)\n\n\tcase *SelectStatement:\n\t\tWalk(v, n.Fields)\n\t\tWalk(v, n.Target)\n\t\tWalk(v, n.Dimensions)\n\t\tWalk(v, n.Sources)\n\t\tWalk(v, n.Condition)\n\t\tWalk(v, n.SortFields)\n\n\tcase *ShowSeriesStatement:\n\t\tWalk(v, n.Sources)\n\t\tWalk(v, n.Condition)\n\n\tcase *ShowTagKeysStatement:\n\t\tWalk(v, n.Sources)\n\t\tWalk(v, n.Condition)\n\t\tWalk(v, n.SortFields)\n\n\tcase *ShowTagValuesStatement:\n\t\tWalk(v, n.Sources)\n\t\tWalk(v, n.Condition)\n\t\tWalk(v, n.SortFields)\n\n\tcase *ShowFieldKeysStatement:\n\t\tWalk(v, n.Sources)\n\t\tWalk(v, n.SortFields)\n\n\tcase SortFields:\n\t\tfor _, sf := range n {\n\t\t\tWalk(v, sf)\n\t\t}\n\n\tcase Sources:\n\t\tfor _, s := range n {\n\t\t\tWalk(v, s)\n\t\t}\n\n\tcase *SubQuery:\n\t\tWalk(v, n.Statement)\n\n\tcase Statements:\n\t\tfor _, s := range n {\n\t\t\tWalk(v, s)\n\t\t}\n\n\tcase *Target:\n\t\tif n != nil {\n\t\t\tWalk(v, n.Measurement)\n\t\t}\n\t}\n}\n\n// WalkFunc traverses a node hierarchy in depth-first order.\nfunc WalkFunc(node Node, fn func(Node)) {\n\tWalk(walkFuncVisitor(fn), node)\n}\n\ntype walkFuncVisitor func(Node)\n\nfunc (fn walkFuncVisitor) Visit(n Node) Visitor { fn(n); return fn }\n\n// Rewriter can be called by Rewrite to replace nodes in the AST hierarchy.\n// The Rewrite() function is called once per node.\ntype Rewriter interface {\n\tRewrite(Node) Node\n}\n\n// Rewrite recursively invokes the rewriter to replace each node.\n// Nodes are traversed depth-first and rewritten from leaf to root.\nfunc Rewrite(r Rewriter, node Node) Node {\n\tswitch n := node.(type) {\n\tcase *Query:\n\t\tn.Statements = Rewrite(r, n.Statements).(Statements)\n\n\tcase Statements:\n\t\tfor i, s := range n {\n\t\t\tn[i] = Rewrite(r, s).(Statement)\n\t\t}\n\n\tcase *SelectStatement:\n\t\tn.Fields = Rewrite(r, n.Fields).(Fields)\n\t\tn.Dimensions = Rewrite(r, n.Dimensions).(Dimensions)\n\t\tn.Sources = Rewrite(r, n.Sources).(Sources)\n\n\t\t// Rewrite may return nil. Nil does not satisfy the Expr\n\t\t// interface. We only assert the rewritten result to be an\n\t\t// Expr if it is not nil:\n\t\tif cond := Rewrite(r, n.Condition); cond != nil {\n\t\t\tn.Condition = cond.(Expr)\n\t\t} else {\n\t\t\tn.Condition = nil\n\t\t}\n\n\tcase *SubQuery:\n\t\tn.Statement = Rewrite(r, n.Statement).(*SelectStatement)\n\n\tcase Fields:\n\t\tfor i, f := range n {\n\t\t\tn[i] = Rewrite(r, f).(*Field)\n\t\t}\n\n\tcase *Field:\n\t\tn.Expr = Rewrite(r, n.Expr).(Expr)\n\n\tcase Dimensions:\n\t\tfor i, d := range n {\n\t\t\tn[i] = Rewrite(r, d).(*Dimension)\n\t\t}\n\n\tcase *Dimension:\n\t\tn.Expr = Rewrite(r, n.Expr).(Expr)\n\n\tcase *BinaryExpr:\n\t\tn.LHS = Rewrite(r, n.LHS).(Expr)\n\t\tn.RHS = Rewrite(r, n.RHS).(Expr)\n\n\tcase *ParenExpr:\n\t\tn.Expr = Rewrite(r, n.Expr).(Expr)\n\n\tcase *Call:\n\t\tfor i, expr := range n.Args {\n\t\t\tn.Args[i] = Rewrite(r, expr).(Expr)\n\t\t}\n\t}\n\n\treturn r.Rewrite(node)\n}\n\n// RewriteFunc rewrites a node hierarchy.\nfunc RewriteFunc(node Node, fn func(Node) Node) Node {\n\treturn Rewrite(rewriterFunc(fn), node)\n}\n\ntype rewriterFunc func(Node) Node\n\nfunc (fn rewriterFunc) Rewrite(n Node) Node { return fn(n) }\n\n// RewriteExpr recursively invokes the function to replace each expr.\n// Nodes are traversed depth-first and rewritten from leaf to root.\nfunc RewriteExpr(expr Expr, fn func(Expr) Expr) Expr {\n\tswitch e := expr.(type) {\n\tcase *BinaryExpr:\n\t\te.LHS = RewriteExpr(e.LHS, fn)\n\t\te.RHS = RewriteExpr(e.RHS, fn)\n\t\tif e.LHS != nil && e.RHS == nil {\n\t\t\texpr = e.LHS\n\t\t} else if e.RHS != nil && e.LHS == nil {\n\t\t\texpr = e.RHS\n\t\t} else if e.LHS == nil && e.RHS == nil {\n\t\t\treturn nil\n\t\t}\n\n\tcase *ParenExpr:\n\t\te.Expr = RewriteExpr(e.Expr, fn)\n\t\tif e.Expr == nil {\n\t\t\treturn nil\n\t\t}\n\n\tcase *Call:\n\t\tfor i, expr := range e.Args {\n\t\t\te.Args[i] = RewriteExpr(expr, fn)\n\t\t}\n\t}\n\n\treturn fn(expr)\n}\n\n// Eval evaluates expr against a map.\nfunc Eval(expr Expr, m map[string]interface{}) interface{} {\n\tif expr == nil {\n\t\treturn nil\n\t}\n\n\tswitch expr := expr.(type) {\n\tcase *BinaryExpr:\n\t\treturn evalBinaryExpr(expr, m)\n\tcase *BooleanLiteral:\n\t\treturn expr.Val\n\tcase *IntegerLiteral:\n\t\treturn expr.Val\n\tcase *NumberLiteral:\n\t\treturn expr.Val\n\tcase *ParenExpr:\n\t\treturn Eval(expr.Expr, m)\n\tcase *RegexLiteral:\n\t\treturn expr.Val\n\tcase *StringLiteral:\n\t\treturn expr.Val\n\tcase *VarRef:\n\t\treturn m[expr.Val]\n\tdefault:\n\t\treturn nil\n\t}\n}\n\nfunc evalBinaryExpr(expr *BinaryExpr, m map[string]interface{}) interface{} {\n\tlhs := Eval(expr.LHS, m)\n\trhs := Eval(expr.RHS, m)\n\tif lhs == nil && rhs != nil {\n\t\t// When the LHS is nil and the RHS is a boolean, implicitly cast the\n\t\t// nil to false.\n\t\tif _, ok := rhs.(bool); ok {\n\t\t\tlhs = false\n\t\t}\n\t} else if lhs != nil && rhs == nil {\n\t\t// Implicit cast of the RHS nil to false when the LHS is a boolean.\n\t\tif _, ok := lhs.(bool); ok {\n\t\t\trhs = false\n\t\t}\n\t}\n\n\t// Evaluate if both sides are simple types.\n\tswitch lhs := lhs.(type) {\n\tcase bool:\n\t\trhs, ok := rhs.(bool)\n\t\tswitch expr.Op {\n\t\tcase AND:\n\t\t\treturn ok && (lhs && rhs)\n\t\tcase OR:\n\t\t\treturn ok && (lhs || rhs)\n\t\tcase BITWISE_AND:\n\t\t\treturn ok && (lhs && rhs)\n\t\tcase BITWISE_OR:\n\t\t\treturn ok && (lhs || rhs)\n\t\tcase BITWISE_XOR:\n\t\t\treturn ok && (lhs != rhs)\n\t\tcase EQ:\n\t\t\treturn ok && (lhs == rhs)\n\t\tcase NEQ:\n\t\t\treturn ok && (lhs != rhs)\n\t\t}\n\tcase float64:\n\t\t// Try the rhs as a float64 or int64\n\t\trhsf, ok := rhs.(float64)\n\t\tif !ok {\n\t\t\tvar rhsi int64\n\t\t\tif rhsi, ok = rhs.(int64); ok {\n\t\t\t\trhsf = float64(rhsi)\n\t\t\t}\n\t\t}\n\n\t\trhs := rhsf\n\t\tswitch expr.Op {\n\t\tcase EQ:\n\t\t\treturn ok && (lhs == rhs)\n\t\tcase NEQ:\n\t\t\treturn ok && (lhs != rhs)\n\t\tcase LT:\n\t\t\treturn ok && (lhs < rhs)\n\t\tcase LTE:\n\t\t\treturn ok && (lhs <= rhs)\n\t\tcase GT:\n\t\t\treturn ok && (lhs > rhs)\n\t\tcase GTE:\n\t\t\treturn ok && (lhs >= rhs)\n\t\tcase ADD:\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn lhs + rhs\n\t\tcase SUB:\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn lhs - rhs\n\t\tcase MUL:\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn lhs * rhs\n\t\tcase DIV:\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t} else if rhs == 0 {\n\t\t\t\treturn float64(0)\n\t\t\t}\n\t\t\treturn lhs / rhs\n\t\tcase MOD:\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn math.Mod(lhs, rhs)\n\t\t}\n\tcase int64:\n\t\t// Try as a float64 to see if a float cast is required.\n\t\trhsf, ok := rhs.(float64)\n\t\tif ok {\n\t\t\tlhs := float64(lhs)\n\t\t\trhs := rhsf\n\t\t\tswitch expr.Op {\n\t\t\tcase EQ:\n\t\t\t\treturn lhs == rhs\n\t\t\tcase NEQ:\n\t\t\t\treturn lhs != rhs\n\t\t\tcase LT:\n\t\t\t\treturn lhs < rhs\n\t\t\tcase LTE:\n\t\t\t\treturn lhs <= rhs\n\t\t\tcase GT:\n\t\t\t\treturn lhs > rhs\n\t\t\tcase GTE:\n\t\t\t\treturn lhs >= rhs\n\t\t\tcase ADD:\n\t\t\t\treturn lhs + rhs\n\t\t\tcase SUB:\n\t\t\t\treturn lhs - rhs\n\t\t\tcase MUL:\n\t\t\t\treturn lhs * rhs\n\t\t\tcase DIV:\n\t\t\t\tif rhs == 0 {\n\t\t\t\t\treturn float64(0)\n\t\t\t\t}\n\t\t\t\treturn lhs / rhs\n\t\t\tcase MOD:\n\t\t\t\treturn math.Mod(lhs, rhs)\n\t\t\t}\n\t\t} else {\n\t\t\trhs, ok := rhs.(int64)\n\t\t\tswitch expr.Op {\n\t\t\tcase EQ:\n\t\t\t\treturn ok && (lhs == rhs)\n\t\t\tcase NEQ:\n\t\t\t\treturn ok && (lhs != rhs)\n\t\t\tcase LT:\n\t\t\t\treturn ok && (lhs < rhs)\n\t\t\tcase LTE:\n\t\t\t\treturn ok && (lhs <= rhs)\n\t\t\tcase GT:\n\t\t\t\treturn ok && (lhs > rhs)\n\t\t\tcase GTE:\n\t\t\t\treturn ok && (lhs >= rhs)\n\t\t\tcase ADD:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn lhs + rhs\n\t\t\tcase SUB:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn lhs - rhs\n\t\t\tcase MUL:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn lhs * rhs\n\t\t\tcase DIV:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil\n\t\t\t\t} else if rhs == 0 {\n\t\t\t\t\treturn float64(0)\n\t\t\t\t}\n\t\t\t\treturn lhs / rhs\n\t\t\tcase MOD:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil\n\t\t\t\t} else if rhs == 0 {\n\t\t\t\t\treturn int64(0)\n\t\t\t\t}\n\t\t\t\treturn lhs % rhs\n\t\t\tcase BITWISE_AND:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn lhs & rhs\n\t\t\tcase BITWISE_OR:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn lhs | rhs\n\t\t\tcase BITWISE_XOR:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn lhs ^ rhs\n\t\t\t}\n\t\t}\n\tcase string:\n\t\tswitch expr.Op {\n\t\tcase EQ:\n\t\t\trhs, ok := rhs.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn lhs == rhs\n\t\tcase NEQ:\n\t\t\trhs, ok := rhs.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn lhs != rhs\n\t\tcase EQREGEX:\n\t\t\trhs, ok := rhs.(*regexp.Regexp)\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn rhs.MatchString(lhs)\n\t\tcase NEQREGEX:\n\t\t\trhs, ok := rhs.(*regexp.Regexp)\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn !rhs.MatchString(lhs)\n\t\t}\n\t}\n\treturn nil\n}\n\n// EvalBool evaluates expr and returns true if result is a boolean true.\n// Otherwise returns false.\nfunc EvalBool(expr Expr, m map[string]interface{}) bool {\n\tv, _ := Eval(expr, m).(bool)\n\treturn v\n}\n\n// TypeMapper maps a data type to the measurement and field.\ntype TypeMapper interface {\n\tMapType(measurement *Measurement, field string) DataType\n}\n\ntype nilTypeMapper struct{}\n\nfunc (nilTypeMapper) MapType(*Measurement, string) DataType { return Unknown }\n\n// EvalType evaluates the expression's type.\nfunc EvalType(expr Expr, sources Sources, typmap TypeMapper) DataType {\n\tif typmap == nil {\n\t\ttypmap = nilTypeMapper{}\n\t}\n\n\tswitch expr := expr.(type) {\n\tcase *VarRef:\n\t\t// If this variable already has an assigned type, just use that.\n\t\tif expr.Type != Unknown && expr.Type != AnyField {\n\t\t\treturn expr.Type\n\t\t}\n\n\t\tvar typ DataType\n\t\tfor _, src := range sources {\n\t\t\tswitch src := src.(type) {\n\t\t\tcase *Measurement:\n\t\t\t\tif t := typmap.MapType(src, expr.Val); typ.LessThan(t) {\n\t\t\t\t\ttyp = t\n\t\t\t\t}\n\t\t\tcase *SubQuery:\n\t\t\t\t_, e := src.Statement.FieldExprByName(expr.Val)\n\t\t\t\tif e != nil {\n\t\t\t\t\tif t := EvalType(e, src.Statement.Sources, typmap); typ.LessThan(t) {\n\t\t\t\t\t\ttyp = t\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif typ == Unknown {\n\t\t\t\t\tfor _, d := range src.Statement.Dimensions {\n\t\t\t\t\t\tif d, ok := d.Expr.(*VarRef); ok && expr.Val == d.Val {\n\t\t\t\t\t\t\ttyp = Tag\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn typ\n\tcase *Call:\n\t\tswitch expr.Name {\n\t\tcase \"mean\", \"median\", \"integral\":\n\t\t\treturn Float\n\t\tcase \"count\":\n\t\t\treturn Integer\n\t\tdefault:\n\t\t\treturn EvalType(expr.Args[0], sources, typmap)\n\t\t}\n\tcase *ParenExpr:\n\t\treturn EvalType(expr.Expr, sources, typmap)\n\tcase *NumberLiteral:\n\t\treturn Float\n\tcase *IntegerLiteral:\n\t\treturn Integer\n\tcase *StringLiteral:\n\t\treturn String\n\tcase *BooleanLiteral:\n\t\treturn Boolean\n\tcase *BinaryExpr:\n\t\tlhs := EvalType(expr.LHS, sources, typmap)\n\t\trhs := EvalType(expr.RHS, sources, typmap)\n\t\tif lhs != Unknown && rhs != Unknown {\n\t\t\tif lhs < rhs {\n\t\t\t\treturn lhs\n\t\t\t} else {\n\t\t\t\treturn rhs\n\t\t\t}\n\t\t} else if lhs != Unknown {\n\t\t\treturn lhs\n\t\t} else {\n\t\t\treturn rhs\n\t\t}\n\t}\n\treturn Unknown\n}\n\nfunc FieldDimensions(sources Sources, m FieldMapper) (fields map[string]DataType, dimensions map[string]struct{}, err error) {\n\tfields = make(map[string]DataType)\n\tdimensions = make(map[string]struct{})\n\n\tfor _, src := range sources {\n\t\tswitch src := src.(type) {\n\t\tcase *Measurement:\n\t\t\tf, d, err := m.FieldDimensions(src)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\tfor k, typ := range f {\n\t\t\t\tif _, ok := fields[k]; typ != Unknown && (!ok || typ < fields[k]) {\n\t\t\t\t\tfields[k] = typ\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor k := range d {\n\t\t\t\tdimensions[k] = struct{}{}\n\t\t\t}\n\t\tcase *SubQuery:\n\t\t\tfor _, f := range src.Statement.Fields {\n\t\t\t\tk := f.Name()\n\t\t\t\ttyp := EvalType(f.Expr, src.Statement.Sources, m)\n\n\t\t\t\tif _, ok := fields[k]; typ != Unknown && (!ok || typ < fields[k]) {\n\t\t\t\t\tfields[k] = typ\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, d := range src.Statement.Dimensions {\n\t\t\t\tif expr, ok := d.Expr.(*VarRef); ok {\n\t\t\t\t\tdimensions[expr.Val] = struct{}{}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n// Reduce evaluates expr using the available values in valuer.\n// References that don't exist in valuer are ignored.\nfunc Reduce(expr Expr, valuer Valuer) Expr {\n\texpr = reduce(expr, valuer)\n\n\t// Unwrap parens at top level.\n\tif expr, ok := expr.(*ParenExpr); ok {\n\t\treturn expr.Expr\n\t}\n\treturn expr\n}\n\nfunc reduce(expr Expr, valuer Valuer) Expr {\n\tif expr == nil {\n\t\treturn nil\n\t}\n\n\tswitch expr := expr.(type) {\n\tcase *BinaryExpr:\n\t\treturn reduceBinaryExpr(expr, valuer)\n\tcase *Call:\n\t\treturn reduceCall(expr, valuer)\n\tcase *ParenExpr:\n\t\treturn reduceParenExpr(expr, valuer)\n\tcase *VarRef:\n\t\treturn reduceVarRef(expr, valuer)\n\tcase *nilLiteral:\n\t\treturn expr\n\tdefault:\n\t\treturn CloneExpr(expr)\n\t}\n}\n\nfunc reduceBinaryExpr(expr *BinaryExpr, valuer Valuer) Expr {\n\t// Reduce both sides first.\n\top := expr.Op\n\tlhs := reduce(expr.LHS, valuer)\n\trhs := reduce(expr.RHS, valuer)\n\n\tloc := time.UTC\n\tif v, ok := valuer.(ZoneValuer); ok {\n\t\tloc = v.Zone()\n\t}\n\n\t// Do not evaluate if one side is nil.\n\tif lhs == nil || rhs == nil {\n\t\treturn &BinaryExpr{LHS: lhs, RHS: rhs, Op: expr.Op}\n\t}\n\n\t// If we have a logical operator (AND, OR) and one side is a boolean literal\n\t// then we need to have special handling.\n\tif op == AND {\n\t\tif isFalseLiteral(lhs) || isFalseLiteral(rhs) {\n\t\t\treturn &BooleanLiteral{Val: false}\n\t\t} else if isTrueLiteral(lhs) {\n\t\t\treturn rhs\n\t\t} else if isTrueLiteral(rhs) {\n\t\t\treturn lhs\n\t\t}\n\t} else if op == OR {\n\t\tif isTrueLiteral(lhs) || isTrueLiteral(rhs) {\n\t\t\treturn &BooleanLiteral{Val: true}\n\t\t} else if isFalseLiteral(lhs) {\n\t\t\treturn rhs\n\t\t} else if isFalseLiteral(rhs) {\n\t\t\treturn lhs\n\t\t}\n\t}\n\n\t// Evaluate if both sides are simple types.\n\tswitch lhs := lhs.(type) {\n\tcase *BooleanLiteral:\n\t\treturn reduceBinaryExprBooleanLHS(op, lhs, rhs)\n\tcase *DurationLiteral:\n\t\treturn reduceBinaryExprDurationLHS(op, lhs, rhs, loc)\n\tcase *IntegerLiteral:\n\t\treturn reduceBinaryExprIntegerLHS(op, lhs, rhs, loc)\n\tcase *nilLiteral:\n\t\treturn reduceBinaryExprNilLHS(op, lhs, rhs)\n\tcase *NumberLiteral:\n\t\treturn reduceBinaryExprNumberLHS(op, lhs, rhs)\n\tcase *StringLiteral:\n\t\treturn reduceBinaryExprStringLHS(op, lhs, rhs, loc)\n\tcase *TimeLiteral:\n\t\treturn reduceBinaryExprTimeLHS(op, lhs, rhs, loc)\n\tdefault:\n\t\treturn &BinaryExpr{Op: op, LHS: lhs, RHS: rhs}\n\t}\n}\n\nfunc reduceBinaryExprBooleanLHS(op Token, lhs *BooleanLiteral, rhs Expr) Expr {\n\tswitch rhs := rhs.(type) {\n\tcase *BooleanLiteral:\n\t\tswitch op {\n\t\tcase EQ:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val == rhs.Val}\n\t\tcase NEQ:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val != rhs.Val}\n\t\tcase AND:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val && rhs.Val}\n\t\tcase OR:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val || rhs.Val}\n\t\tcase BITWISE_AND:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val && rhs.Val}\n\t\tcase BITWISE_OR:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val || rhs.Val}\n\t\tcase BITWISE_XOR:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val != rhs.Val}\n\t\t}\n\tcase *nilLiteral:\n\t\treturn &BooleanLiteral{Val: false}\n\t}\n\treturn &BinaryExpr{Op: op, LHS: lhs, RHS: rhs}\n}\n\nfunc reduceBinaryExprDurationLHS(op Token, lhs *DurationLiteral, rhs Expr, loc *time.Location) Expr {\n\tswitch rhs := rhs.(type) {\n\tcase *DurationLiteral:\n\t\tswitch op {\n\t\tcase ADD:\n\t\t\treturn &DurationLiteral{Val: lhs.Val + rhs.Val}\n\t\tcase SUB:\n\t\t\treturn &DurationLiteral{Val: lhs.Val - rhs.Val}\n\t\tcase EQ:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val == rhs.Val}\n\t\tcase NEQ:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val != rhs.Val}\n\t\tcase GT:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val > rhs.Val}\n\t\tcase GTE:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val >= rhs.Val}\n\t\tcase LT:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val < rhs.Val}\n\t\tcase LTE:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val <= rhs.Val}\n\t\t}\n\tcase *NumberLiteral:\n\t\tswitch op {\n\t\tcase MUL:\n\t\t\treturn &DurationLiteral{Val: lhs.Val * time.Duration(rhs.Val)}\n\t\tcase DIV:\n\t\t\tif rhs.Val == 0 {\n\t\t\t\treturn &DurationLiteral{Val: 0}\n\t\t\t}\n\t\t\treturn &DurationLiteral{Val: lhs.Val / time.Duration(rhs.Val)}\n\t\t}\n\tcase *IntegerLiteral:\n\t\tswitch op {\n\t\tcase MUL:\n\t\t\treturn &DurationLiteral{Val: lhs.Val * time.Duration(rhs.Val)}\n\t\tcase DIV:\n\t\t\tif rhs.Val == 0 {\n\t\t\t\treturn &DurationLiteral{Val: 0}\n\t\t\t}\n\t\t\treturn &DurationLiteral{Val: lhs.Val / time.Duration(rhs.Val)}\n\t\t}\n\tcase *TimeLiteral:\n\t\tswitch op {\n\t\tcase ADD:\n\t\t\treturn &TimeLiteral{Val: rhs.Val.Add(lhs.Val)}\n\t\t}\n\tcase *StringLiteral:\n\t\tt, err := rhs.ToTimeLiteral(loc)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\texpr := reduceBinaryExprDurationLHS(op, lhs, t, loc)\n\n\t\t// If the returned expression is still a binary expr, that means\n\t\t// we couldn't reduce it so this wasn't used in a time literal context.\n\t\tif _, ok := expr.(*BinaryExpr); !ok {\n\t\t\treturn expr\n\t\t}\n\tcase *nilLiteral:\n\t\treturn &BooleanLiteral{Val: false}\n\t}\n\treturn &BinaryExpr{Op: op, LHS: lhs, RHS: rhs}\n}\n\nfunc reduceBinaryExprIntegerLHS(op Token, lhs *IntegerLiteral, rhs Expr, loc *time.Location) Expr {\n\tswitch rhs := rhs.(type) {\n\tcase *NumberLiteral:\n\t\treturn reduceBinaryExprNumberLHS(op, &NumberLiteral{Val: float64(lhs.Val)}, rhs)\n\tcase *IntegerLiteral:\n\t\tswitch op {\n\t\tcase ADD:\n\t\t\treturn &IntegerLiteral{Val: lhs.Val + rhs.Val}\n\t\tcase SUB:\n\t\t\treturn &IntegerLiteral{Val: lhs.Val - rhs.Val}\n\t\tcase MUL:\n\t\t\treturn &IntegerLiteral{Val: lhs.Val * rhs.Val}\n\t\tcase DIV:\n\t\t\tif rhs.Val == 0 {\n\t\t\t\treturn &NumberLiteral{Val: 0}\n\t\t\t}\n\t\t\treturn &NumberLiteral{Val: float64(lhs.Val) / float64(rhs.Val)}\n\t\tcase MOD:\n\t\t\tif rhs.Val == 0 {\n\t\t\t\treturn &IntegerLiteral{Val: 0}\n\t\t\t}\n\t\t\treturn &IntegerLiteral{Val: lhs.Val % rhs.Val}\n\t\tcase BITWISE_AND:\n\t\t\treturn &IntegerLiteral{Val: lhs.Val & rhs.Val}\n\t\tcase BITWISE_OR:\n\t\t\treturn &IntegerLiteral{Val: lhs.Val | rhs.Val}\n\t\tcase BITWISE_XOR:\n\t\t\treturn &IntegerLiteral{Val: lhs.Val ^ rhs.Val}\n\t\tcase EQ:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val == rhs.Val}\n\t\tcase NEQ:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val != rhs.Val}\n\t\tcase GT:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val > rhs.Val}\n\t\tcase GTE:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val >= rhs.Val}\n\t\tcase LT:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val < rhs.Val}\n\t\tcase LTE:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val <= rhs.Val}\n\t\t}\n\tcase *DurationLiteral:\n\t\t// Treat the integer as a timestamp.\n\t\tswitch op {\n\t\tcase ADD:\n\t\t\treturn &TimeLiteral{Val: time.Unix(0, lhs.Val).Add(rhs.Val)}\n\t\tcase SUB:\n\t\t\treturn &TimeLiteral{Val: time.Unix(0, lhs.Val).Add(-rhs.Val)}\n\t\t}\n\tcase *TimeLiteral:\n\t\td := &DurationLiteral{Val: time.Duration(lhs.Val)}\n\t\texpr := reduceBinaryExprDurationLHS(op, d, rhs, loc)\n\t\tif _, ok := expr.(*BinaryExpr); !ok {\n\t\t\treturn expr\n\t\t}\n\tcase *StringLiteral:\n\t\tt, err := rhs.ToTimeLiteral(loc)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\td := &DurationLiteral{Val: time.Duration(lhs.Val)}\n\t\texpr := reduceBinaryExprDurationLHS(op, d, t, loc)\n\t\tif _, ok := expr.(*BinaryExpr); !ok {\n\t\t\treturn expr\n\t\t}\n\tcase *nilLiteral:\n\t\treturn &BooleanLiteral{Val: false}\n\t}\n\treturn &BinaryExpr{Op: op, LHS: lhs, RHS: rhs}\n}\n\nfunc reduceBinaryExprNilLHS(op Token, lhs *nilLiteral, rhs Expr) Expr {\n\tswitch op {\n\tcase EQ, NEQ:\n\t\treturn &BooleanLiteral{Val: false}\n\t}\n\treturn &BinaryExpr{Op: op, LHS: lhs, RHS: rhs}\n}\n\nfunc reduceBinaryExprNumberLHS(op Token, lhs *NumberLiteral, rhs Expr) Expr {\n\tswitch rhs := rhs.(type) {\n\tcase *NumberLiteral:\n\t\tswitch op {\n\t\tcase ADD:\n\t\t\treturn &NumberLiteral{Val: lhs.Val + rhs.Val}\n\t\tcase SUB:\n\t\t\treturn &NumberLiteral{Val: lhs.Val - rhs.Val}\n\t\tcase MUL:\n\t\t\treturn &NumberLiteral{Val: lhs.Val * rhs.Val}\n\t\tcase DIV:\n\t\t\tif rhs.Val == 0 {\n\t\t\t\treturn &NumberLiteral{Val: 0}\n\t\t\t}\n\t\t\treturn &NumberLiteral{Val: lhs.Val / rhs.Val}\n\t\tcase MOD:\n\t\t\treturn &NumberLiteral{Val: math.Mod(lhs.Val, rhs.Val)}\n\t\tcase EQ:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val == rhs.Val}\n\t\tcase NEQ:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val != rhs.Val}\n\t\tcase GT:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val > rhs.Val}\n\t\tcase GTE:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val >= rhs.Val}\n\t\tcase LT:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val < rhs.Val}\n\t\tcase LTE:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val <= rhs.Val}\n\t\t}\n\tcase *IntegerLiteral:\n\t\tswitch op {\n\t\tcase ADD:\n\t\t\treturn &NumberLiteral{Val: lhs.Val + float64(rhs.Val)}\n\t\tcase SUB:\n\t\t\treturn &NumberLiteral{Val: lhs.Val - float64(rhs.Val)}\n\t\tcase MUL:\n\t\t\treturn &NumberLiteral{Val: lhs.Val * float64(rhs.Val)}\n\t\tcase DIV:\n\t\t\tif float64(rhs.Val) == 0 {\n\t\t\t\treturn &NumberLiteral{Val: 0}\n\t\t\t}\n\t\t\treturn &NumberLiteral{Val: lhs.Val / float64(rhs.Val)}\n\t\tcase MOD:\n\t\t\treturn &NumberLiteral{Val: math.Mod(lhs.Val, float64(rhs.Val))}\n\t\tcase EQ:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val == float64(rhs.Val)}\n\t\tcase NEQ:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val != float64(rhs.Val)}\n\t\tcase GT:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val > float64(rhs.Val)}\n\t\tcase GTE:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val >= float64(rhs.Val)}\n\t\tcase LT:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val < float64(rhs.Val)}\n\t\tcase LTE:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val <= float64(rhs.Val)}\n\t\t}\n\tcase *nilLiteral:\n\t\treturn &BooleanLiteral{Val: false}\n\t}\n\treturn &BinaryExpr{Op: op, LHS: lhs, RHS: rhs}\n}\n\nfunc reduceBinaryExprStringLHS(op Token, lhs *StringLiteral, rhs Expr, loc *time.Location) Expr {\n\tswitch rhs := rhs.(type) {\n\tcase *StringLiteral:\n\t\tswitch op {\n\t\tcase EQ:\n\t\t\tvar expr Expr = &BooleanLiteral{Val: lhs.Val == rhs.Val}\n\t\t\t// This might be a comparison between time literals.\n\t\t\t// If it is, parse the time literals and then compare since it\n\t\t\t// could be a different result if they use different formats\n\t\t\t// for the same time.\n\t\t\tif lhs.IsTimeLiteral() && rhs.IsTimeLiteral() {\n\t\t\t\ttlhs, err := lhs.ToTimeLiteral(loc)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn expr\n\t\t\t\t}\n\n\t\t\t\ttrhs, err := rhs.ToTimeLiteral(loc)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn expr\n\t\t\t\t}\n\n\t\t\t\tt := reduceBinaryExprTimeLHS(op, tlhs, trhs, loc)\n\t\t\t\tif _, ok := t.(*BinaryExpr); !ok {\n\t\t\t\t\texpr = t\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn expr\n\t\tcase NEQ:\n\t\t\tvar expr Expr = &BooleanLiteral{Val: lhs.Val != rhs.Val}\n\t\t\t// This might be a comparison between time literals.\n\t\t\t// If it is, parse the time literals and then compare since it\n\t\t\t// could be a different result if they use different formats\n\t\t\t// for the same time.\n\t\t\tif lhs.IsTimeLiteral() && rhs.IsTimeLiteral() {\n\t\t\t\ttlhs, err := lhs.ToTimeLiteral(loc)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn expr\n\t\t\t\t}\n\n\t\t\t\ttrhs, err := rhs.ToTimeLiteral(loc)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn expr\n\t\t\t\t}\n\n\t\t\t\tt := reduceBinaryExprTimeLHS(op, tlhs, trhs, loc)\n\t\t\t\tif _, ok := t.(*BinaryExpr); !ok {\n\t\t\t\t\texpr = t\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn expr\n\t\tcase ADD:\n\t\t\treturn &StringLiteral{Val: lhs.Val + rhs.Val}\n\t\tdefault:\n\t\t\t// Attempt to convert the string literal to a time literal.\n\t\t\tt, err := lhs.ToTimeLiteral(loc)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\texpr := reduceBinaryExprTimeLHS(op, t, rhs, loc)\n\n\t\t\t// If the returned expression is still a binary expr, that means\n\t\t\t// we couldn't reduce it so this wasn't used in a time literal context.\n\t\t\tif _, ok := expr.(*BinaryExpr); !ok {\n\t\t\t\treturn expr\n\t\t\t}\n\t\t}\n\tcase *DurationLiteral:\n\t\t// Attempt to convert the string literal to a time literal.\n\t\tt, err := lhs.ToTimeLiteral(loc)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\texpr := reduceBinaryExprTimeLHS(op, t, rhs, loc)\n\n\t\t// If the returned expression is still a binary expr, that means\n\t\t// we couldn't reduce it so this wasn't used in a time literal context.\n\t\tif _, ok := expr.(*BinaryExpr); !ok {\n\t\t\treturn expr\n\t\t}\n\tcase *TimeLiteral:\n\t\t// Attempt to convert the string literal to a time literal.\n\t\tt, err := lhs.ToTimeLiteral(loc)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\texpr := reduceBinaryExprTimeLHS(op, t, rhs, loc)\n\n\t\t// If the returned expression is still a binary expr, that means\n\t\t// we couldn't reduce it so this wasn't used in a time literal context.\n\t\tif _, ok := expr.(*BinaryExpr); !ok {\n\t\t\treturn expr\n\t\t}\n\tcase *IntegerLiteral:\n\t\t// Attempt to convert the string literal to a time literal.\n\t\tt, err := lhs.ToTimeLiteral(loc)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\texpr := reduceBinaryExprTimeLHS(op, t, rhs, loc)\n\n\t\t// If the returned expression is still a binary expr, that means\n\t\t// we couldn't reduce it so this wasn't used in a time literal context.\n\t\tif _, ok := expr.(*BinaryExpr); !ok {\n\t\t\treturn expr\n\t\t}\n\tcase *nilLiteral:\n\t\tswitch op {\n\t\tcase EQ, NEQ:\n\t\t\treturn &BooleanLiteral{Val: false}\n\t\t}\n\t}\n\treturn &BinaryExpr{Op: op, LHS: lhs, RHS: rhs}\n}\n\nfunc reduceBinaryExprTimeLHS(op Token, lhs *TimeLiteral, rhs Expr, loc *time.Location) Expr {\n\tswitch rhs := rhs.(type) {\n\tcase *DurationLiteral:\n\t\tswitch op {\n\t\tcase ADD:\n\t\t\treturn &TimeLiteral{Val: lhs.Val.Add(rhs.Val)}\n\t\tcase SUB:\n\t\t\treturn &TimeLiteral{Val: lhs.Val.Add(-rhs.Val)}\n\t\t}\n\tcase *IntegerLiteral:\n\t\td := &DurationLiteral{Val: time.Duration(rhs.Val)}\n\t\texpr := reduceBinaryExprTimeLHS(op, lhs, d, loc)\n\t\tif _, ok := expr.(*BinaryExpr); !ok {\n\t\t\treturn expr\n\t\t}\n\tcase *TimeLiteral:\n\t\tswitch op {\n\t\tcase SUB:\n\t\t\treturn &DurationLiteral{Val: lhs.Val.Sub(rhs.Val)}\n\t\tcase EQ:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val.Equal(rhs.Val)}\n\t\tcase NEQ:\n\t\t\treturn &BooleanLiteral{Val: !lhs.Val.Equal(rhs.Val)}\n\t\tcase GT:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val.After(rhs.Val)}\n\t\tcase GTE:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val.After(rhs.Val) || lhs.Val.Equal(rhs.Val)}\n\t\tcase LT:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val.Before(rhs.Val)}\n\t\tcase LTE:\n\t\t\treturn &BooleanLiteral{Val: lhs.Val.Before(rhs.Val) || lhs.Val.Equal(rhs.Val)}\n\t\t}\n\tcase *StringLiteral:\n\t\tt, err := rhs.ToTimeLiteral(loc)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\texpr := reduceBinaryExprTimeLHS(op, lhs, t, loc)\n\n\t\t// If the returned expression is still a binary expr, that means\n\t\t// we couldn't reduce it so this wasn't used in a time literal context.\n\t\tif _, ok := expr.(*BinaryExpr); !ok {\n\t\t\treturn expr\n\t\t}\n\tcase *nilLiteral:\n\t\treturn &BooleanLiteral{Val: false}\n\t}\n\treturn &BinaryExpr{Op: op, LHS: lhs, RHS: rhs}\n}\n\nfunc reduceCall(expr *Call, valuer Valuer) Expr {\n\t// Evaluate \"now()\" if valuer is set.\n\tif expr.Name == \"now\" && len(expr.Args) == 0 && valuer != nil {\n\t\tif v, ok := valuer.Value(\"now()\"); ok {\n\t\t\tv, _ := v.(time.Time)\n\t\t\treturn &TimeLiteral{Val: v}\n\t\t}\n\t}\n\n\t// Otherwise reduce arguments.\n\targs := make([]Expr, len(expr.Args))\n\tfor i, arg := range expr.Args {\n\t\targs[i] = reduce(arg, valuer)\n\t}\n\treturn &Call{Name: expr.Name, Args: args}\n}\n\nfunc reduceParenExpr(expr *ParenExpr, valuer Valuer) Expr {\n\tsubexpr := reduce(expr.Expr, valuer)\n\tif subexpr, ok := subexpr.(*BinaryExpr); ok {\n\t\treturn &ParenExpr{Expr: subexpr}\n\t}\n\treturn subexpr\n}\n\nfunc reduceVarRef(expr *VarRef, valuer Valuer) Expr {\n\t// Ignore if there is no valuer.\n\tif valuer == nil {\n\t\treturn &VarRef{Val: expr.Val, Type: expr.Type}\n\t}\n\n\t// Retrieve the value of the ref.\n\t// Ignore if the value doesn't exist.\n\tv, ok := valuer.Value(expr.Val)\n\tif !ok {\n\t\treturn &VarRef{Val: expr.Val, Type: expr.Type}\n\t}\n\n\t// Return the value as a literal.\n\tswitch v := v.(type) {\n\tcase bool:\n\t\treturn &BooleanLiteral{Val: v}\n\tcase time.Duration:\n\t\treturn &DurationLiteral{Val: v}\n\tcase float64:\n\t\treturn &NumberLiteral{Val: v}\n\tcase string:\n\t\treturn &StringLiteral{Val: v}\n\tcase time.Time:\n\t\treturn &TimeLiteral{Val: v}\n\tdefault:\n\t\treturn &nilLiteral{}\n\t}\n}\n\n// Valuer is the interface that wraps the Value() method.\ntype Valuer interface {\n\t// Value returns the value and existence flag for a given key.\n\tValue(key string) (interface{}, bool)\n}\n\n// ZoneValuer is the interface that specifies the current time zone.\ntype ZoneValuer interface {\n\t// Zone returns the time zone location.\n\tZone() *time.Location\n}\n\n// NowValuer returns only the value for \"now()\".\ntype NowValuer struct {\n\tNow      time.Time\n\tLocation *time.Location\n}\n\n// Value is a method that returns the value and existence flag for a given key.\nfunc (v *NowValuer) Value(key string) (interface{}, bool) {\n\tif key == \"now()\" {\n\t\treturn v.Now, true\n\t}\n\treturn nil, false\n}\n\n// Zone is a method that returns the time.Location.\nfunc (v *NowValuer) Zone() *time.Location {\n\tif v.Location != nil {\n\t\treturn v.Location\n\t}\n\treturn time.UTC\n}\n\n// ContainsVarRef returns true if expr is a VarRef or contains one.\nfunc ContainsVarRef(expr Expr) bool {\n\tvar v containsVarRefVisitor\n\tWalk(&v, expr)\n\treturn v.contains\n}\n\ntype containsVarRefVisitor struct {\n\tcontains bool\n}\n\nfunc (v *containsVarRefVisitor) Visit(n Node) Visitor {\n\tswitch n.(type) {\n\tcase *Call:\n\t\treturn nil\n\tcase *VarRef:\n\t\tv.contains = true\n\t}\n\treturn v\n}\n\nfunc IsSelector(expr Expr) bool {\n\tif call, ok := expr.(*Call); ok {\n\t\tswitch call.Name {\n\t\tcase \"first\", \"last\", \"min\", \"max\", \"percentile\", \"sample\", \"top\", \"bottom\":\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/ast_test.go",
    "content": "package influxql_test\n\nimport (\n\t\"fmt\"\n\t\"go/importer\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n)\n\nfunc BenchmarkQuery_String(b *testing.B) {\n\tp := influxql.NewParser(strings.NewReader(`SELECT foo AS zoo, a AS b FROM bar WHERE value > 10 AND q = 'hello'`))\n\tq, _ := p.ParseStatement()\n\tfor i := 0; i < b.N; i++ {\n\t\t_ = q.String()\n\t}\n}\n\n// Ensure a value's data type can be retrieved.\nfunc TestInspectDataType(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\tv   interface{}\n\t\ttyp influxql.DataType\n\t}{\n\t\t{float64(100), influxql.Float},\n\t\t{int64(100), influxql.Integer},\n\t\t{int32(100), influxql.Integer},\n\t\t{100, influxql.Integer},\n\t\t{true, influxql.Boolean},\n\t\t{\"string\", influxql.String},\n\t\t{time.Now(), influxql.Time},\n\t\t{time.Second, influxql.Duration},\n\t\t{nil, influxql.Unknown},\n\t} {\n\t\tif typ := influxql.InspectDataType(tt.v); tt.typ != typ {\n\t\t\tt.Errorf(\"%d. %v (%s): unexpected type: %s\", i, tt.v, tt.typ, typ)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc TestDataType_String(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\ttyp influxql.DataType\n\t\tv   string\n\t}{\n\t\t{influxql.Float, \"float\"},\n\t\t{influxql.Integer, \"integer\"},\n\t\t{influxql.Boolean, \"boolean\"},\n\t\t{influxql.String, \"string\"},\n\t\t{influxql.Time, \"time\"},\n\t\t{influxql.Duration, \"duration\"},\n\t\t{influxql.Tag, \"tag\"},\n\t\t{influxql.Unknown, \"unknown\"},\n\t} {\n\t\tif v := tt.typ.String(); tt.v != v {\n\t\t\tt.Errorf(\"%d. %v (%s): unexpected string: %s\", i, tt.typ, tt.v, v)\n\t\t}\n\t}\n}\n\nfunc TestDataType_LessThan(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\ttyp   influxql.DataType\n\t\tother influxql.DataType\n\t\texp   bool\n\t}{\n\t\t{typ: influxql.Unknown, other: influxql.Unknown, exp: true},\n\t\t{typ: influxql.Unknown, other: influxql.Float, exp: true},\n\t\t{typ: influxql.Unknown, other: influxql.Integer, exp: true},\n\t\t{typ: influxql.Unknown, other: influxql.String, exp: true},\n\t\t{typ: influxql.Unknown, other: influxql.Boolean, exp: true},\n\t\t{typ: influxql.Unknown, other: influxql.Tag, exp: true},\n\t\t{typ: influxql.Float, other: influxql.Unknown, exp: false},\n\t\t{typ: influxql.Integer, other: influxql.Unknown, exp: false},\n\t\t{typ: influxql.String, other: influxql.Unknown, exp: false},\n\t\t{typ: influxql.Boolean, other: influxql.Unknown, exp: false},\n\t\t{typ: influxql.Tag, other: influxql.Unknown, exp: false},\n\t\t{typ: influxql.Float, other: influxql.Float, exp: false},\n\t\t{typ: influxql.Float, other: influxql.Integer, exp: false},\n\t\t{typ: influxql.Float, other: influxql.String, exp: false},\n\t\t{typ: influxql.Float, other: influxql.Boolean, exp: false},\n\t\t{typ: influxql.Float, other: influxql.Tag, exp: false},\n\t\t{typ: influxql.Integer, other: influxql.Float, exp: true},\n\t\t{typ: influxql.Integer, other: influxql.Integer, exp: false},\n\t\t{typ: influxql.Integer, other: influxql.String, exp: false},\n\t\t{typ: influxql.Integer, other: influxql.Boolean, exp: false},\n\t\t{typ: influxql.Integer, other: influxql.Tag, exp: false},\n\t\t{typ: influxql.String, other: influxql.Float, exp: true},\n\t\t{typ: influxql.String, other: influxql.Integer, exp: true},\n\t\t{typ: influxql.String, other: influxql.String, exp: false},\n\t\t{typ: influxql.String, other: influxql.Boolean, exp: false},\n\t\t{typ: influxql.String, other: influxql.Tag, exp: false},\n\t\t{typ: influxql.Boolean, other: influxql.Float, exp: true},\n\t\t{typ: influxql.Boolean, other: influxql.Integer, exp: true},\n\t\t{typ: influxql.Boolean, other: influxql.String, exp: true},\n\t\t{typ: influxql.Boolean, other: influxql.Boolean, exp: false},\n\t\t{typ: influxql.Boolean, other: influxql.Tag, exp: false},\n\t\t{typ: influxql.Tag, other: influxql.Float, exp: true},\n\t\t{typ: influxql.Tag, other: influxql.Integer, exp: true},\n\t\t{typ: influxql.Tag, other: influxql.String, exp: true},\n\t\t{typ: influxql.Tag, other: influxql.Boolean, exp: true},\n\t\t{typ: influxql.Tag, other: influxql.Tag, exp: false},\n\t} {\n\t\tif got, exp := tt.typ.LessThan(tt.other), tt.exp; got != exp {\n\t\t\tt.Errorf(\"%d. %q.LessThan(%q) = %v; exp = %v\", i, tt.typ, tt.other, got, exp)\n\t\t}\n\t}\n}\n\n// Ensure the SELECT statement can extract GROUP BY interval.\nfunc TestSelectStatement_GroupByInterval(t *testing.T) {\n\tq := \"SELECT sum(value) from foo  where time < now() GROUP BY time(10m)\"\n\tstmt, err := influxql.NewParser(strings.NewReader(q)).ParseStatement()\n\tif err != nil {\n\t\tt.Fatalf(\"invalid statement: %q: %s\", stmt, err)\n\t}\n\n\ts := stmt.(*influxql.SelectStatement)\n\td, err := s.GroupByInterval()\n\tif d != 10*time.Minute {\n\t\tt.Fatalf(\"group by interval not equal:\\nexp=%s\\ngot=%s\", 10*time.Minute, d)\n\t}\n\tif err != nil {\n\t\tt.Fatalf(\"error parsing group by interval: %s\", err.Error())\n\t}\n}\n\n// Ensure the SELECT statement can have its start and end time set\nfunc TestSelectStatement_SetTimeRange(t *testing.T) {\n\tq := \"SELECT sum(value) from foo where time < now() GROUP BY time(10m)\"\n\tstmt, err := influxql.NewParser(strings.NewReader(q)).ParseStatement()\n\tif err != nil {\n\t\tt.Fatalf(\"invalid statement: %q: %s\", stmt, err)\n\t}\n\n\ts := stmt.(*influxql.SelectStatement)\n\tstart := time.Now().Add(-20 * time.Hour).Round(time.Second).UTC()\n\tend := time.Now().Add(10 * time.Hour).Round(time.Second).UTC()\n\ts.SetTimeRange(start, end)\n\tmin, max := MustTimeRange(s.Condition)\n\n\tif min != start {\n\t\tt.Fatalf(\"start time wasn't set properly.\\n  exp: %s\\n  got: %s\", start, min)\n\t}\n\t// the end range is actually one nanosecond before the given one since end is exclusive\n\tend = end.Add(-time.Nanosecond)\n\tif max != end {\n\t\tt.Fatalf(\"end time wasn't set properly.\\n  exp: %s\\n  got: %s\", end, max)\n\t}\n\n\t// ensure we can set a time on a select that already has one set\n\tstart = time.Now().Add(-20 * time.Hour).Round(time.Second).UTC()\n\tend = time.Now().Add(10 * time.Hour).Round(time.Second).UTC()\n\tq = fmt.Sprintf(\"SELECT sum(value) from foo WHERE time >= %ds and time <= %ds GROUP BY time(10m)\", start.Unix(), end.Unix())\n\tstmt, err = influxql.NewParser(strings.NewReader(q)).ParseStatement()\n\tif err != nil {\n\t\tt.Fatalf(\"invalid statement: %q: %s\", stmt, err)\n\t}\n\n\ts = stmt.(*influxql.SelectStatement)\n\tmin, max = MustTimeRange(s.Condition)\n\tif start != min || end != max {\n\t\tt.Fatalf(\"start and end times weren't equal:\\n  exp: %s\\n  got: %s\\n  exp: %s\\n  got:%s\\n\", start, min, end, max)\n\t}\n\n\t// update and ensure it saves it\n\tstart = time.Now().Add(-40 * time.Hour).Round(time.Second).UTC()\n\tend = time.Now().Add(20 * time.Hour).Round(time.Second).UTC()\n\ts.SetTimeRange(start, end)\n\tmin, max = MustTimeRange(s.Condition)\n\n\t// TODO: right now the SetTimeRange can't override the start time if it's more recent than what they're trying to set it to.\n\t//       shouldn't matter for our purposes with continuous queries, but fix this later\n\n\tif min != start {\n\t\tt.Fatalf(\"start time wasn't set properly.\\n  exp: %s\\n  got: %s\", start, min)\n\t}\n\t// the end range is actually one nanosecond before the given one since end is exclusive\n\tend = end.Add(-time.Nanosecond)\n\tif max != end {\n\t\tt.Fatalf(\"end time wasn't set properly.\\n  exp: %s\\n  got: %s\", end, max)\n\t}\n\n\t// ensure that when we set a time range other where clause conditions are still there\n\tq = \"SELECT sum(value) from foo WHERE foo = 'bar' and time < now() GROUP BY time(10m)\"\n\tstmt, err = influxql.NewParser(strings.NewReader(q)).ParseStatement()\n\tif err != nil {\n\t\tt.Fatalf(\"invalid statement: %q: %s\", stmt, err)\n\t}\n\n\ts = stmt.(*influxql.SelectStatement)\n\n\t// update and ensure it saves it\n\tstart = time.Now().Add(-40 * time.Hour).Round(time.Second).UTC()\n\tend = time.Now().Add(20 * time.Hour).Round(time.Second).UTC()\n\ts.SetTimeRange(start, end)\n\tmin, max = MustTimeRange(s.Condition)\n\n\tif min != start {\n\t\tt.Fatalf(\"start time wasn't set properly.\\n  exp: %s\\n  got: %s\", start, min)\n\t}\n\t// the end range is actually one nanosecond before the given one since end is exclusive\n\tend = end.Add(-time.Nanosecond)\n\tif max != end {\n\t\tt.Fatalf(\"end time wasn't set properly.\\n  exp: %s\\n  got: %s\", end, max)\n\t}\n\n\t// ensure the where clause is there\n\thasWhere := false\n\tinfluxql.WalkFunc(s.Condition, func(n influxql.Node) {\n\t\tif ex, ok := n.(*influxql.BinaryExpr); ok {\n\t\t\tif lhs, ok := ex.LHS.(*influxql.VarRef); ok {\n\t\t\t\tif lhs.Val == \"foo\" {\n\t\t\t\t\tif rhs, ok := ex.RHS.(*influxql.StringLiteral); ok {\n\t\t\t\t\t\tif rhs.Val == \"bar\" {\n\t\t\t\t\t\t\thasWhere = true\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\tif !hasWhere {\n\t\tt.Fatal(\"set time range cleared out the where clause\")\n\t}\n}\n\n// Ensure the idents from the select clause can come out\nfunc TestSelect_NamesInSelect(t *testing.T) {\n\ts := MustParseSelectStatement(\"select count(asdf), count(bar) from cpu\")\n\ta := s.NamesInSelect()\n\tif !reflect.DeepEqual(a, []string{\"asdf\", \"bar\"}) {\n\t\tt.Fatal(\"expected names asdf and bar\")\n\t}\n}\n\n// Ensure the idents from the where clause can come out\nfunc TestSelect_NamesInWhere(t *testing.T) {\n\ts := MustParseSelectStatement(\"select * from cpu where time > 23s AND (asdf = 'jkl' OR (foo = 'bar' AND baz = 'bar'))\")\n\ta := s.NamesInWhere()\n\tif !reflect.DeepEqual(a, []string{\"time\", \"asdf\", \"foo\", \"baz\"}) {\n\t\tt.Fatalf(\"exp: time,asdf,foo,baz\\ngot: %s\\n\", strings.Join(a, \",\"))\n\t}\n}\n\nfunc TestSelectStatement_HasWildcard(t *testing.T) {\n\tvar tests = []struct {\n\t\tstmt     string\n\t\twildcard bool\n\t}{\n\t\t// No wildcards\n\t\t{\n\t\t\tstmt:     `SELECT value FROM cpu`,\n\t\t\twildcard: false,\n\t\t},\n\n\t\t// Query wildcard\n\t\t{\n\t\t\tstmt:     `SELECT * FROM cpu`,\n\t\t\twildcard: true,\n\t\t},\n\n\t\t// No GROUP BY wildcards\n\t\t{\n\t\t\tstmt:     `SELECT value FROM cpu GROUP BY host`,\n\t\t\twildcard: false,\n\t\t},\n\n\t\t// No GROUP BY wildcards, time only\n\t\t{\n\t\t\tstmt:     `SELECT mean(value) FROM cpu where time < now() GROUP BY time(5ms)`,\n\t\t\twildcard: false,\n\t\t},\n\n\t\t// GROUP BY wildcard\n\t\t{\n\t\t\tstmt:     `SELECT value FROM cpu GROUP BY *`,\n\t\t\twildcard: true,\n\t\t},\n\n\t\t// GROUP BY wildcard with time\n\t\t{\n\t\t\tstmt:     `SELECT mean(value) FROM cpu where time < now() GROUP BY *,time(1m)`,\n\t\t\twildcard: true,\n\t\t},\n\n\t\t// GROUP BY wildcard with explicit\n\t\t{\n\t\t\tstmt:     `SELECT value FROM cpu GROUP BY *,host`,\n\t\t\twildcard: true,\n\t\t},\n\n\t\t// GROUP BY multiple wildcards\n\t\t{\n\t\t\tstmt:     `SELECT value FROM cpu GROUP BY *,*`,\n\t\t\twildcard: true,\n\t\t},\n\n\t\t// Combo\n\t\t{\n\t\t\tstmt:     `SELECT * FROM cpu GROUP BY *`,\n\t\t\twildcard: true,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\t// Parse statement.\n\t\tstmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"invalid statement: %q: %s\", tt.stmt, err)\n\t\t}\n\n\t\t// Test wildcard detection.\n\t\tif w := stmt.(*influxql.SelectStatement).HasWildcard(); tt.wildcard != w {\n\t\t\tt.Errorf(\"%d. %q: unexpected wildcard detection:\\n\\nexp=%v\\n\\ngot=%v\\n\\n\", i, tt.stmt, tt.wildcard, w)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\n// Test SELECT statement field rewrite.\nfunc TestSelectStatement_RewriteFields(t *testing.T) {\n\tvar tests = []struct {\n\t\tstmt    string\n\t\trewrite string\n\t\terr     string\n\t}{\n\t\t// No wildcards\n\t\t{\n\t\t\tstmt:    `SELECT value FROM cpu`,\n\t\t\trewrite: `SELECT value FROM cpu`,\n\t\t},\n\n\t\t// Query wildcard\n\t\t{\n\t\t\tstmt:    `SELECT * FROM cpu`,\n\t\t\trewrite: `SELECT host::tag, region::tag, value1::float, value2::integer FROM cpu`,\n\t\t},\n\n\t\t// Parser fundamentally prohibits multiple query sources\n\n\t\t// Query wildcard with explicit\n\t\t{\n\t\t\tstmt:    `SELECT *,value1 FROM cpu`,\n\t\t\trewrite: `SELECT host::tag, region::tag, value1::float, value2::integer, value1::float FROM cpu`,\n\t\t},\n\n\t\t// Query multiple wildcards\n\t\t{\n\t\t\tstmt:    `SELECT *,* FROM cpu`,\n\t\t\trewrite: `SELECT host::tag, region::tag, value1::float, value2::integer, host::tag, region::tag, value1::float, value2::integer FROM cpu`,\n\t\t},\n\n\t\t// Query wildcards with group by\n\t\t{\n\t\t\tstmt:    `SELECT * FROM cpu GROUP BY host`,\n\t\t\trewrite: `SELECT region::tag, value1::float, value2::integer FROM cpu GROUP BY host`,\n\t\t},\n\n\t\t// No GROUP BY wildcards\n\t\t{\n\t\t\tstmt:    `SELECT value FROM cpu GROUP BY host`,\n\t\t\trewrite: `SELECT value FROM cpu GROUP BY host`,\n\t\t},\n\n\t\t// No GROUP BY wildcards, time only\n\t\t{\n\t\t\tstmt:    `SELECT mean(value) FROM cpu where time < now() GROUP BY time(5ms)`,\n\t\t\trewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY time(5ms)`,\n\t\t},\n\n\t\t// GROUP BY wildcard\n\t\t{\n\t\t\tstmt:    `SELECT value FROM cpu GROUP BY *`,\n\t\t\trewrite: `SELECT value FROM cpu GROUP BY host, region`,\n\t\t},\n\n\t\t// GROUP BY wildcard with time\n\t\t{\n\t\t\tstmt:    `SELECT mean(value) FROM cpu where time < now() GROUP BY *,time(1m)`,\n\t\t\trewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY host, region, time(1m)`,\n\t\t},\n\n\t\t// GROUP BY wildcard with fill\n\t\t{\n\t\t\tstmt:    `SELECT mean(value) FROM cpu where time < now() GROUP BY *,time(1m) fill(0)`,\n\t\t\trewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY host, region, time(1m) fill(0)`,\n\t\t},\n\n\t\t// GROUP BY wildcard with explicit\n\t\t{\n\t\t\tstmt:    `SELECT value FROM cpu GROUP BY *,host`,\n\t\t\trewrite: `SELECT value FROM cpu GROUP BY host, region, host`,\n\t\t},\n\n\t\t// GROUP BY multiple wildcards\n\t\t{\n\t\t\tstmt:    `SELECT value FROM cpu GROUP BY *,*`,\n\t\t\trewrite: `SELECT value FROM cpu GROUP BY host, region, host, region`,\n\t\t},\n\n\t\t// Combo\n\t\t{\n\t\t\tstmt:    `SELECT * FROM cpu GROUP BY *`,\n\t\t\trewrite: `SELECT value1::float, value2::integer FROM cpu GROUP BY host, region`,\n\t\t},\n\n\t\t// Wildcard function with all fields.\n\t\t{\n\t\t\tstmt:    `SELECT mean(*) FROM cpu`,\n\t\t\trewrite: `SELECT mean(value1::float) AS mean_value1, mean(value2::integer) AS mean_value2 FROM cpu`,\n\t\t},\n\n\t\t{\n\t\t\tstmt:    `SELECT distinct(*) FROM strings`,\n\t\t\trewrite: `SELECT distinct(string::string) AS distinct_string, distinct(value::float) AS distinct_value FROM strings`,\n\t\t},\n\n\t\t{\n\t\t\tstmt:    `SELECT distinct(*) FROM bools`,\n\t\t\trewrite: `SELECT distinct(bool::boolean) AS distinct_bool, distinct(value::float) AS distinct_value FROM bools`,\n\t\t},\n\n\t\t// Wildcard function with some fields excluded.\n\t\t{\n\t\t\tstmt:    `SELECT mean(*) FROM strings`,\n\t\t\trewrite: `SELECT mean(value::float) AS mean_value FROM strings`,\n\t\t},\n\n\t\t{\n\t\t\tstmt:    `SELECT mean(*) FROM bools`,\n\t\t\trewrite: `SELECT mean(value::float) AS mean_value FROM bools`,\n\t\t},\n\n\t\t// Wildcard function with an alias.\n\t\t{\n\t\t\tstmt:    `SELECT mean(*) AS alias FROM cpu`,\n\t\t\trewrite: `SELECT mean(value1::float) AS alias_value1, mean(value2::integer) AS alias_value2 FROM cpu`,\n\t\t},\n\n\t\t// Query regex\n\t\t{\n\t\t\tstmt:    `SELECT /1/ FROM cpu`,\n\t\t\trewrite: `SELECT value1::float FROM cpu`,\n\t\t},\n\n\t\t{\n\t\t\tstmt:    `SELECT value1 FROM cpu GROUP BY /h/`,\n\t\t\trewrite: `SELECT value1::float FROM cpu GROUP BY host`,\n\t\t},\n\n\t\t// Query regex\n\t\t{\n\t\t\tstmt:    `SELECT mean(/1/) FROM cpu`,\n\t\t\trewrite: `SELECT mean(value1::float) AS mean_value1 FROM cpu`,\n\t\t},\n\n\t\t// Rewrite subquery\n\t\t{\n\t\t\tstmt:    `SELECT * FROM (SELECT mean(value1) FROM cpu GROUP BY host) GROUP BY *`,\n\t\t\trewrite: `SELECT mean::float FROM (SELECT mean(value1::float) FROM cpu GROUP BY host) GROUP BY host`,\n\t\t},\n\n\t\t// Invalid queries that can't be rewritten should return an error (to\n\t\t// avoid a panic in the query engine)\n\t\t{\n\t\t\tstmt: `SELECT count(*) / 2 FROM cpu`,\n\t\t\terr:  `unsupported expression with wildcard: count(*) / 2`,\n\t\t},\n\n\t\t{\n\t\t\tstmt: `SELECT * / 2 FROM (SELECT count(*) FROM cpu)`,\n\t\t\terr:  `unsupported expression with wildcard: * / 2`,\n\t\t},\n\n\t\t{\n\t\t\tstmt: `SELECT count(/value/) / 2 FROM cpu`,\n\t\t\terr:  `unsupported expression with regex field: count(/value/) / 2`,\n\t\t},\n\n\t\t// This one should be possible though since there's no wildcard in the\n\t\t// binary expression.\n\t\t{\n\t\t\tstmt:    `SELECT value1 + value2, * FROM cpu`,\n\t\t\trewrite: `SELECT value1::float + value2::integer, host::tag, region::tag, value1::float, value2::integer FROM cpu`,\n\t\t},\n\n\t\t{\n\t\t\tstmt:    `SELECT value1 + value2, /value/ FROM cpu`,\n\t\t\trewrite: `SELECT value1::float + value2::integer, value1::float, value2::integer FROM cpu`,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\t// Parse statement.\n\t\tstmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"invalid statement: %q: %s\", tt.stmt, err)\n\t\t}\n\n\t\tvar ic IteratorCreator\n\t\tic.FieldDimensionsFn = func(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) {\n\t\t\tswitch m.Name {\n\t\t\tcase \"cpu\":\n\t\t\t\tfields = map[string]influxql.DataType{\n\t\t\t\t\t\"value1\": influxql.Float,\n\t\t\t\t\t\"value2\": influxql.Integer,\n\t\t\t\t}\n\t\t\tcase \"strings\":\n\t\t\t\tfields = map[string]influxql.DataType{\n\t\t\t\t\t\"value\":  influxql.Float,\n\t\t\t\t\t\"string\": influxql.String,\n\t\t\t\t}\n\t\t\tcase \"bools\":\n\t\t\t\tfields = map[string]influxql.DataType{\n\t\t\t\t\t\"value\": influxql.Float,\n\t\t\t\t\t\"bool\":  influxql.Boolean,\n\t\t\t\t}\n\t\t\t}\n\t\t\tdimensions = map[string]struct{}{\"host\": struct{}{}, \"region\": struct{}{}}\n\t\t\treturn\n\t\t}\n\n\t\t// Rewrite statement.\n\t\trw, err := stmt.(*influxql.SelectStatement).RewriteFields(&ic)\n\t\tif tt.err != \"\" {\n\t\t\tif err != nil && err.Error() != tt.err {\n\t\t\t\tt.Errorf(\"%d. %q: unexpected error: %s != %s\", i, tt.stmt, err.Error(), tt.err)\n\t\t\t} else if err == nil {\n\t\t\t\tt.Errorf(\"%d. %q: expected error\", i, tt.stmt)\n\t\t\t}\n\t\t} else {\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"%d. %q: error: %s\", i, tt.stmt, err)\n\t\t\t} else if rw == nil && tt.err == \"\" {\n\t\t\t\tt.Errorf(\"%d. %q: unexpected nil statement\", i, tt.stmt)\n\t\t\t} else if rw := rw.String(); tt.rewrite != rw {\n\t\t\t\tt.Errorf(\"%d. %q: unexpected rewrite:\\n\\nexp=%s\\n\\ngot=%s\\n\\n\", i, tt.stmt, tt.rewrite, rw)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Test SELECT statement regex conditions rewrite.\nfunc TestSelectStatement_RewriteRegexConditions(t *testing.T) {\n\tvar tests = []struct {\n\t\tin  string\n\t\tout string\n\t}{\n\t\t{in: `SELECT value FROM cpu`, out: `SELECT value FROM cpu`},\n\t\t{in: `SELECT value FROM cpu WHERE host='server-1'`, out: `SELECT value FROM cpu WHERE host='server-1'`},\n\t\t{in: `SELECT value FROM cpu WHERE host = 'server-1'`, out: `SELECT value FROM cpu WHERE host = 'server-1'`},\n\t\t{in: `SELECT value FROM cpu WHERE host != 'server-1'`, out: `SELECT value FROM cpu WHERE host != 'server-1'`},\n\n\t\t// Non matching regex\n\t\t{in: `SELECT value FROM cpu WHERE host =~ /server-1|server-2|server-3/`, out: `SELECT value FROM cpu WHERE host =~ /server-1|server-2|server-3/`},\n\t\t{in: `SELECT value FROM cpu WHERE host =~ /server-1/`, out: `SELECT value FROM cpu WHERE host =~ /server-1/`},\n\t\t{in: `SELECT value FROM cpu WHERE host !~ /server-1/`, out: `SELECT value FROM cpu WHERE host !~ /server-1/`},\n\t\t{in: `SELECT value FROM cpu WHERE host =~ /^server-1/`, out: `SELECT value FROM cpu WHERE host =~ /^server-1/`},\n\t\t{in: `SELECT value FROM cpu WHERE host =~ /server-1$/`, out: `SELECT value FROM cpu WHERE host =~ /server-1$/`},\n\t\t{in: `SELECT value FROM cpu WHERE host !~ /\\^server-1$/`, out: `SELECT value FROM cpu WHERE host !~ /\\^server-1$/`},\n\t\t{in: `SELECT value FROM cpu WHERE host !~ /\\^$/`, out: `SELECT value FROM cpu WHERE host !~ /\\^$/`},\n\t\t{in: `SELECT value FROM cpu WHERE host !~ /^server-1\\$/`, out: `SELECT value FROM cpu WHERE host !~ /^server-1\\$/`},\n\t\t{in: `SELECT value FROM cpu WHERE host =~ /^\\$/`, out: `SELECT value FROM cpu WHERE host =~ /^\\$/`},\n\t\t{in: `SELECT value FROM cpu WHERE host !~ /^a/`, out: `SELECT value FROM cpu WHERE host !~ /^a/`},\n\n\t\t// These regexes are not supported due to the presence of escaped or meta characters.\n\t\t{in: `SELECT value FROM cpu WHERE host !~ /^(foo|bar)$/`, out: `SELECT value FROM cpu WHERE host !~ /^(foo|bar)$/`},\n\t\t{in: `SELECT value FROM cpu WHERE host !~ /^?a$/`, out: `SELECT value FROM cpu WHERE host !~ /^?a$/`},\n\t\t{in: `SELECT value FROM cpu WHERE host !~ /^[a-z]$/`, out: `SELECT value FROM cpu WHERE host !~ /^[a-z]$/`},\n\t\t{in: `SELECT value FROM cpu WHERE host !~ /^\\d$/`, out: `SELECT value FROM cpu WHERE host !~ /^\\d$/`},\n\t\t{in: `SELECT value FROM cpu WHERE host !~ /^a*$/`, out: `SELECT value FROM cpu WHERE host !~ /^a*$/`},\n\t\t{in: `SELECT value FROM cpu WHERE host !~ /^a.b$/`, out: `SELECT value FROM cpu WHERE host !~ /^a.b$/`},\n\t\t{in: `SELECT value FROM cpu WHERE host !~ /^ab+$/`, out: `SELECT value FROM cpu WHERE host !~ /^ab+$/`},\n\t\t{in: `SELECT value FROM cpu WHERE host =~ /^hello\\world$/`, out: `SELECT value FROM cpu WHERE host =~ /^hello\\world$/`},\n\n\t\t// These regexes all match and will be rewritten.\n\t\t{in: `SELECT value FROM cpu WHERE host !~ /^a[2]$/`, out: `SELECT value FROM cpu WHERE host != 'a2'`},\n\t\t{in: `SELECT value FROM cpu WHERE host =~ /^server-1$/`, out: `SELECT value FROM cpu WHERE host = 'server-1'`},\n\t\t{in: `SELECT value FROM cpu WHERE host !~ /^server-1$/`, out: `SELECT value FROM cpu WHERE host != 'server-1'`},\n\t\t{in: `SELECT value FROM cpu WHERE host =~ /^server 1$/`, out: `SELECT value FROM cpu WHERE host = 'server 1'`},\n\t\t{in: `SELECT value FROM cpu WHERE host =~ /^$/`, out: `SELECT value FROM cpu WHERE host = ''`},\n\t\t{in: `SELECT value FROM cpu WHERE host !~ /^$/`, out: `SELECT value FROM cpu WHERE host != ''`},\n\t\t{in: `SELECT value FROM cpu WHERE host =~ /^server-1$/ OR host =~ /^server-2$/`, out: `SELECT value FROM cpu WHERE host = 'server-1' OR host = 'server-2'`},\n\t\t{in: `SELECT value FROM cpu WHERE host =~ /^server-1$/ OR host =~ /^server]a$/`, out: `SELECT value FROM cpu WHERE host = 'server-1' OR host = 'server]a'`},\n\t\t{in: `SELECT value FROM cpu WHERE host =~ /^hello\\?$/`, out: `SELECT value FROM cpu WHERE host = 'hello?'`},\n\t\t{in: `SELECT value FROM cpu WHERE host !~ /^\\\\$/`, out: `SELECT value FROM cpu WHERE host != '\\\\'`},\n\t\t{in: `SELECT value FROM cpu WHERE host !~ /^\\\\\\$$/`, out: `SELECT value FROM cpu WHERE host != '\\\\$'`},\n\t}\n\n\tfor i, test := range tests {\n\t\tstmt, err := influxql.NewParser(strings.NewReader(test.in)).ParseStatement()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"[Example %d], %v\", i, err)\n\t\t}\n\n\t\t// Rewrite any supported regex conditions.\n\t\tstmt.(*influxql.SelectStatement).RewriteRegexConditions()\n\n\t\t// Get the expected rewritten statement.\n\t\texpStmt, err := influxql.NewParser(strings.NewReader(test.out)).ParseStatement()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"[Example %d], %v\", i, err)\n\t\t}\n\n\t\t// Compare the (potentially) rewritten AST to the expected AST.\n\t\tif got, exp := stmt, expStmt; !reflect.DeepEqual(got, exp) {\n\t\t\tt.Errorf(\"[Example %d]\\nattempting %v\\ngot %v\\n%s\\n\\nexpected %v\\n%s\\n\", i+1, test.in, got, mustMarshalJSON(got), exp, mustMarshalJSON(exp))\n\t\t}\n\t}\n}\n\n// Test SELECT statement time field rewrite.\nfunc TestSelectStatement_RewriteTimeFields(t *testing.T) {\n\tvar tests = []struct {\n\t\ts    string\n\t\tstmt influxql.Statement\n\t}{\n\t\t{\n\t\t\ts: `SELECT time, field1 FROM cpu`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"field1\"}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{\n\t\t\t\t\t&influxql.Measurement{Name: \"cpu\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `SELECT time AS timestamp, field1 FROM cpu`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"field1\"}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{\n\t\t\t\t\t&influxql.Measurement{Name: \"cpu\"},\n\t\t\t\t},\n\t\t\t\tTimeAlias: \"timestamp\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\t// Parse statement.\n\t\tstmt, err := influxql.NewParser(strings.NewReader(tt.s)).ParseStatement()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"invalid statement: %q: %s\", tt.s, err)\n\t\t}\n\n\t\t// Rewrite statement.\n\t\tstmt.(*influxql.SelectStatement).RewriteTimeFields()\n\t\tif !reflect.DeepEqual(tt.stmt, stmt) {\n\t\t\tt.Logf(\"\\n# %s\\nexp=%s\\ngot=%s\\n\", tt.s, mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt))\n\t\t\tt.Logf(\"\\nSQL exp=%s\\nSQL got=%s\\n\", tt.stmt.String(), stmt.String())\n\t\t\tt.Errorf(\"%d. %q\\n\\nstmt mismatch:\\n\\nexp=%#v\\n\\ngot=%#v\\n\\n\", i, tt.s, tt.stmt, stmt)\n\t\t}\n\t}\n}\n\n// Ensure that the IsRawQuery flag gets set properly\nfunc TestSelectStatement_IsRawQuerySet(t *testing.T) {\n\tvar tests = []struct {\n\t\tstmt  string\n\t\tisRaw bool\n\t}{\n\t\t{\n\t\t\tstmt:  \"select * from foo\",\n\t\t\tisRaw: true,\n\t\t},\n\t\t{\n\t\t\tstmt:  \"select value1,value2 from foo\",\n\t\t\tisRaw: true,\n\t\t},\n\t\t{\n\t\t\tstmt:  \"select value1,value2 from foo, time(10m)\",\n\t\t\tisRaw: true,\n\t\t},\n\t\t{\n\t\t\tstmt:  \"select mean(value) from foo where time < now() group by time(5m)\",\n\t\t\tisRaw: false,\n\t\t},\n\t\t{\n\t\t\tstmt:  \"select mean(value) from foo group by bar\",\n\t\t\tisRaw: false,\n\t\t},\n\t\t{\n\t\t\tstmt:  \"select mean(value) from foo group by *\",\n\t\t\tisRaw: false,\n\t\t},\n\t\t{\n\t\t\tstmt:  \"select mean(value) from foo group by *\",\n\t\t\tisRaw: false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\ts := MustParseSelectStatement(tt.stmt)\n\t\tif s.IsRawQuery != tt.isRaw {\n\t\t\tt.Errorf(\"'%s', IsRawQuery should be %v\", tt.stmt, tt.isRaw)\n\t\t}\n\t}\n}\n\nfunc TestSelectStatement_HasDerivative(t *testing.T) {\n\tvar tests = []struct {\n\t\tstmt       string\n\t\tderivative bool\n\t}{\n\t\t// No derivatives\n\t\t{\n\t\t\tstmt:       `SELECT value FROM cpu`,\n\t\t\tderivative: false,\n\t\t},\n\n\t\t// Query derivative\n\t\t{\n\t\t\tstmt:       `SELECT derivative(value) FROM cpu`,\n\t\t\tderivative: true,\n\t\t},\n\n\t\t// No GROUP BY time only\n\t\t{\n\t\t\tstmt:       `SELECT mean(value) FROM cpu where time < now() GROUP BY time(5ms)`,\n\t\t\tderivative: false,\n\t\t},\n\n\t\t// No GROUP BY derivatives, time only\n\t\t{\n\t\t\tstmt:       `SELECT derivative(mean(value)) FROM cpu where time < now() GROUP BY time(5ms)`,\n\t\t\tderivative: true,\n\t\t},\n\n\t\t{\n\t\t\tstmt:       `SELECT value FROM cpu`,\n\t\t\tderivative: false,\n\t\t},\n\n\t\t// Query derivative\n\t\t{\n\t\t\tstmt:       `SELECT non_negative_derivative(value) FROM cpu`,\n\t\t\tderivative: true,\n\t\t},\n\n\t\t// No GROUP BY derivatives, time only\n\t\t{\n\t\t\tstmt:       `SELECT non_negative_derivative(mean(value)) FROM cpu where time < now() GROUP BY time(5ms)`,\n\t\t\tderivative: true,\n\t\t},\n\n\t\t// Invalid derivative function name\n\t\t{\n\t\t\tstmt:       `SELECT typoDerivative(value) FROM cpu where time < now()`,\n\t\t\tderivative: false,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\t// Parse statement.\n\t\tt.Logf(\"index: %d, statement: %s\", i, tt.stmt)\n\t\tstmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"invalid statement: %q: %s\", tt.stmt, err)\n\t\t}\n\n\t\t// Test derivative detection.\n\t\tif d := stmt.(*influxql.SelectStatement).HasDerivative(); tt.derivative != d {\n\t\t\tt.Errorf(\"%d. %q: unexpected derivative detection:\\n\\nexp=%v\\n\\ngot=%v\\n\\n\", i, tt.stmt, tt.derivative, d)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc TestSelectStatement_IsSimpleDerivative(t *testing.T) {\n\tvar tests = []struct {\n\t\tstmt       string\n\t\tderivative bool\n\t}{\n\t\t// No derivatives\n\t\t{\n\t\t\tstmt:       `SELECT value FROM cpu`,\n\t\t\tderivative: false,\n\t\t},\n\n\t\t// Query derivative\n\t\t{\n\t\t\tstmt:       `SELECT derivative(value) FROM cpu`,\n\t\t\tderivative: true,\n\t\t},\n\n\t\t// Query derivative\n\t\t{\n\t\t\tstmt:       `SELECT non_negative_derivative(value) FROM cpu`,\n\t\t\tderivative: true,\n\t\t},\n\n\t\t// No GROUP BY time only\n\t\t{\n\t\t\tstmt:       `SELECT mean(value) FROM cpu where time < now() GROUP BY time(5ms)`,\n\t\t\tderivative: false,\n\t\t},\n\n\t\t// No GROUP BY derivatives, time only\n\t\t{\n\t\t\tstmt:       `SELECT non_negative_derivative(mean(value)) FROM cpu where time < now() GROUP BY time(5ms)`,\n\t\t\tderivative: false,\n\t\t},\n\n\t\t// Invalid derivative function name\n\t\t{\n\t\t\tstmt:       `SELECT typoDerivative(value) FROM cpu where time < now()`,\n\t\t\tderivative: false,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\t// Parse statement.\n\t\tt.Logf(\"index: %d, statement: %s\", i, tt.stmt)\n\t\tstmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"invalid statement: %q: %s\", tt.stmt, err)\n\t\t}\n\n\t\t// Test derivative detection.\n\t\tif d := stmt.(*influxql.SelectStatement).IsSimpleDerivative(); tt.derivative != d {\n\t\t\tt.Errorf(\"%d. %q: unexpected derivative detection:\\n\\nexp=%v\\n\\ngot=%v\\n\\n\", i, tt.stmt, tt.derivative, d)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\n// Ensure binary expression names can be evaluated.\nfunc TestBinaryExprName(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\texpr string\n\t\tname string\n\t}{\n\t\t{expr: `value + 1`, name: `value`},\n\t\t{expr: `\"user\" / total`, name: `user_total`},\n\t\t{expr: `(\"user\" + total) / total`, name: `user_total_total`},\n\t} {\n\t\texpr := influxql.MustParseExpr(tt.expr)\n\t\tswitch expr := expr.(type) {\n\t\tcase *influxql.BinaryExpr:\n\t\t\tname := influxql.BinaryExprName(expr)\n\t\t\tif name != tt.name {\n\t\t\t\tt.Errorf(\"%d. unexpected name %s, got %s\", i, name, tt.name)\n\t\t\t}\n\t\tdefault:\n\t\t\tt.Errorf(\"%d. unexpected expr type: %T\", i, expr)\n\t\t}\n\t}\n}\n\n// Ensure the time range of an expression can be extracted.\nfunc TestTimeRange(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\texpr          string\n\t\tmin, max, err string\n\t\tloc           string\n\t}{\n\t\t// LHS VarRef\n\t\t{expr: `time > '2000-01-01 00:00:00'`, min: `2000-01-01T00:00:00.000000001Z`, max: `0001-01-01T00:00:00Z`},\n\t\t{expr: `time >= '2000-01-01 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`},\n\t\t{expr: `time < '2000-01-01 00:00:00'`, min: `0001-01-01T00:00:00Z`, max: `1999-12-31T23:59:59.999999999Z`},\n\t\t{expr: `time <= '2000-01-01 00:00:00'`, min: `0001-01-01T00:00:00Z`, max: `2000-01-01T00:00:00Z`},\n\n\t\t// RHS VarRef\n\t\t{expr: `'2000-01-01 00:00:00' > time`, min: `0001-01-01T00:00:00Z`, max: `1999-12-31T23:59:59.999999999Z`},\n\t\t{expr: `'2000-01-01 00:00:00' >= time`, min: `0001-01-01T00:00:00Z`, max: `2000-01-01T00:00:00Z`},\n\t\t{expr: `'2000-01-01 00:00:00' < time`, min: `2000-01-01T00:00:00.000000001Z`, max: `0001-01-01T00:00:00Z`},\n\t\t{expr: `'2000-01-01 00:00:00' <= time`, min: `2000-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`},\n\n\t\t// number literal\n\t\t{expr: `time < 10`, min: `0001-01-01T00:00:00Z`, max: `1970-01-01T00:00:00.000000009Z`},\n\n\t\t// Equality\n\t\t{expr: `time = '2000-01-01 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `2000-01-01T00:00:00Z`},\n\n\t\t// Multiple time expressions.\n\t\t{expr: `time >= '2000-01-01 00:00:00' AND time < '2000-01-02 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `2000-01-01T23:59:59.999999999Z`},\n\n\t\t// Min/max crossover\n\t\t{expr: `time >= '2000-01-01 00:00:00' AND time <= '1999-01-01 00:00:00'`, min: `2000-01-01T00:00:00Z`, max: `1999-01-01T00:00:00Z`},\n\n\t\t// Absolute time\n\t\t{expr: `time = 1388534400s`, min: `2014-01-01T00:00:00Z`, max: `2014-01-01T00:00:00Z`},\n\n\t\t// Non-comparative expressions.\n\t\t{expr: `time`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`},\n\t\t{expr: `time + 2`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`},\n\t\t{expr: `time - '2000-01-01 00:00:00'`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`},\n\t\t{expr: `time AND '2000-01-01 00:00:00'`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`},\n\n\t\t// Invalid time expressions.\n\t\t{expr: `time > \"2000-01-01 00:00:00\"`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`, err: `invalid operation: time and *influxql.VarRef are not compatible`},\n\t\t{expr: `time > '2262-04-11 23:47:17'`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`, err: `time 2262-04-11T23:47:17Z overflows time literal`},\n\t\t{expr: `time > '1677-09-20 19:12:43'`, min: `0001-01-01T00:00:00Z`, max: `0001-01-01T00:00:00Z`, err: `time 1677-09-20T19:12:43Z underflows time literal`},\n\n\t\t// Time zone expressions.\n\t\t{expr: `time >= '2000-01-01'`, loc: `America/Los_Angeles`, min: `2000-01-01T00:00:00-08:00`, max: `0001-01-01T00:00:00Z`},\n\t\t{expr: `time <= '2000-01-01'`, loc: `America/Los_Angeles`, min: `0001-01-01T00:00:00Z`, max: `2000-01-01T00:00:00-08:00`},\n\t\t{expr: `time >= '2000-01-01 03:17:00'`, loc: `America/Los_Angeles`, min: `2000-01-01T03:17:00-08:00`, max: `0001-01-01T00:00:00Z`},\n\t\t{expr: `time <= '2000-01-01 03:17:00'`, loc: `America/Los_Angeles`, min: `0001-01-01T00:00:00Z`, max: `2000-01-01T03:17:00-08:00`},\n\t} {\n\t\tt.Run(tt.expr, func(t *testing.T) {\n\t\t\t// Load the time zone if one was specified.\n\t\t\tvar loc *time.Location\n\t\t\tif tt.loc != \"\" {\n\t\t\t\tl, err := time.LoadLocation(tt.loc)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"unable to load time zone %s: %s\", tt.loc, err)\n\t\t\t\t}\n\t\t\t\tloc = l\n\t\t\t}\n\n\t\t\t// Extract time range.\n\t\t\texpr := MustParseExpr(tt.expr)\n\t\t\tmin, max, err := influxql.TimeRange(expr, loc)\n\n\t\t\t// Compare with expected min/max.\n\t\t\tif min := min.Format(time.RFC3339Nano); tt.min != min {\n\t\t\t\tt.Fatalf(\"%d. %s: unexpected min:\\n\\nexp=%s\\n\\ngot=%s\\n\\n\", i, tt.expr, tt.min, min)\n\t\t\t}\n\t\t\tif max := max.Format(time.RFC3339Nano); tt.max != max {\n\t\t\t\tt.Fatalf(\"%d. %s: unexpected max:\\n\\nexp=%s\\n\\ngot=%s\\n\\n\", i, tt.expr, tt.max, max)\n\t\t\t}\n\t\t\tif (err != nil && err.Error() != tt.err) || (err == nil && tt.err != \"\") {\n\t\t\t\tt.Fatalf(\"%d. %s: unexpected error:\\n\\nexp=%s\\n\\ngot=%s\\n\\n\", i, tt.expr, tt.err, err)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// Ensure that we see if a where clause has only time limitations\nfunc TestOnlyTimeExpr(t *testing.T) {\n\tvar tests = []struct {\n\t\tstmt string\n\t\texp  bool\n\t}{\n\t\t{\n\t\t\tstmt: `SELECT value FROM myseries WHERE value > 1`,\n\t\t\texp:  false,\n\t\t},\n\t\t{\n\t\t\tstmt: `SELECT value FROM foo WHERE time >= '2000-01-01T00:00:05Z'`,\n\t\t\texp:  true,\n\t\t},\n\t\t{\n\t\t\tstmt: `SELECT value FROM foo WHERE time >= '2000-01-01T00:00:05Z' AND time < '2000-01-01T00:00:05Z'`,\n\t\t\texp:  true,\n\t\t},\n\t\t{\n\t\t\tstmt: `SELECT value FROM foo WHERE time >= '2000-01-01T00:00:05Z' AND asdf = 'bar'`,\n\t\t\texp:  false,\n\t\t},\n\t\t{\n\t\t\tstmt: `SELECT value FROM foo WHERE asdf = 'jkl' AND (time >= '2000-01-01T00:00:05Z' AND time < '2000-01-01T00:00:05Z')`,\n\t\t\texp:  false,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\t// Parse statement.\n\t\tstmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"invalid statement: %q: %s\", tt.stmt, err)\n\t\t}\n\t\tif influxql.OnlyTimeExpr(stmt.(*influxql.SelectStatement).Condition) != tt.exp {\n\t\t\tt.Fatalf(\"%d. expected statement to return only time dimension to be %t: %s\", i, tt.exp, tt.stmt)\n\t\t}\n\t}\n}\n\n// Ensure an AST node can be rewritten.\nfunc TestRewrite(t *testing.T) {\n\texpr := MustParseExpr(`time > 1 OR foo = 2`)\n\n\t// Flip LHS & RHS in all binary expressions.\n\tact := influxql.RewriteFunc(expr, func(n influxql.Node) influxql.Node {\n\t\tswitch n := n.(type) {\n\t\tcase *influxql.BinaryExpr:\n\t\t\treturn &influxql.BinaryExpr{Op: n.Op, LHS: n.RHS, RHS: n.LHS}\n\t\tdefault:\n\t\t\treturn n\n\t\t}\n\t})\n\n\t// Verify that everything is flipped.\n\tif act := act.String(); act != `2 = foo OR 1 > time` {\n\t\tt.Fatalf(\"unexpected result: %s\", act)\n\t}\n}\n\n// Ensure an Expr can be rewritten handling nils.\nfunc TestRewriteExpr(t *testing.T) {\n\texpr := MustParseExpr(`(time > 1 AND time < 10) OR foo = 2`)\n\n\t// Remove all time expressions.\n\tact := influxql.RewriteExpr(expr, func(e influxql.Expr) influxql.Expr {\n\t\tswitch e := e.(type) {\n\t\tcase *influxql.BinaryExpr:\n\t\t\tif lhs, ok := e.LHS.(*influxql.VarRef); ok && lhs.Val == \"time\" {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn e\n\t})\n\n\t// Verify that everything is flipped.\n\tif act := act.String(); act != `foo = 2` {\n\t\tt.Fatalf(\"unexpected result: %s\", act)\n\t}\n}\n\n// Ensure that the String() value of a statement is parseable\nfunc TestParseString(t *testing.T) {\n\tvar tests = []struct {\n\t\tstmt string\n\t}{\n\t\t{\n\t\t\tstmt: `SELECT \"cpu load\" FROM myseries`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SELECT \"cpu load\" FROM \"my series\"`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SELECT \"cpu\\\"load\" FROM myseries`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SELECT \"cpu'load\" FROM myseries`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SELECT \"cpu load\" FROM \"my\\\"series\"`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SELECT \"field with spaces\" FROM \"\\\"ugly\\\" db\".\"\\\"ugly\\\" rp\".\"\\\"ugly\\\" measurement\"`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SELECT * FROM myseries`,\n\t\t},\n\t\t{\n\t\t\tstmt: `DROP DATABASE \"!\"`,\n\t\t},\n\t\t{\n\t\t\tstmt: `DROP RETENTION POLICY \"my rp\" ON \"a database\"`,\n\t\t},\n\t\t{\n\t\t\tstmt: `CREATE RETENTION POLICY \"my rp\" ON \"a database\" DURATION 1d REPLICATION 1`,\n\t\t},\n\t\t{\n\t\t\tstmt: `ALTER RETENTION POLICY \"my rp\" ON \"a database\" DEFAULT`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW RETENTION POLICIES ON \"a database\"`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW TAG VALUES WITH KEY IN (\"a long name\", short)`,\n\t\t},\n\t\t{\n\t\t\tstmt: `DROP CONTINUOUS QUERY \"my query\" ON \"my database\"`,\n\t\t},\n\t\t// See issues https://github.com/influxdata/influxdb/issues/1647\n\t\t// and https://github.com/influxdata/influxdb/issues/4404\n\t\t//{\n\t\t//\tstmt: `DELETE FROM \"my db\".\"my rp\".\"my measurement\"`,\n\t\t//},\n\t\t{\n\t\t\tstmt: `DROP SUBSCRIPTION \"ugly \\\"subscription\\\" name\" ON \"\\\"my\\\" db\".\"\\\"my\\\" rp\"`,\n\t\t},\n\t\t{\n\t\t\tstmt: `CREATE SUBSCRIPTION \"ugly \\\"subscription\\\" name\" ON \"\\\"my\\\" db\".\"\\\"my\\\" rp\" DESTINATIONS ALL 'my host', 'my other host'`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW MEASUREMENTS WITH MEASUREMENT =~ /foo/`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW MEASUREMENTS WITH MEASUREMENT = \"and/or\"`,\n\t\t},\n\t\t{\n\t\t\tstmt: `DROP USER \"user with spaces\"`,\n\t\t},\n\t\t{\n\t\t\tstmt: `GRANT ALL PRIVILEGES ON \"db with spaces\" TO \"user with spaces\"`,\n\t\t},\n\t\t{\n\t\t\tstmt: `GRANT ALL PRIVILEGES TO \"user with spaces\"`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW GRANTS FOR \"user with spaces\"`,\n\t\t},\n\t\t{\n\t\t\tstmt: `REVOKE ALL PRIVILEGES ON \"db with spaces\" FROM \"user with spaces\"`,\n\t\t},\n\t\t{\n\t\t\tstmt: `REVOKE ALL PRIVILEGES FROM \"user with spaces\"`,\n\t\t},\n\t\t{\n\t\t\tstmt: `CREATE DATABASE \"db with spaces\"`,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\t// Parse statement.\n\t\tstmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"invalid statement: %q: %s\", tt.stmt, err)\n\t\t}\n\n\t\tstmtCopy, err := influxql.NewParser(strings.NewReader(stmt.String())).ParseStatement()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to parse string: %v\\norig: %v\\ngot: %v\", err, tt.stmt, stmt.String())\n\t\t}\n\n\t\tif !reflect.DeepEqual(stmt, stmtCopy) {\n\t\t\tt.Fatalf(\"statement changed after stringifying and re-parsing:\\noriginal : %v\\nre-parsed: %v\\n\", tt.stmt, stmtCopy.String())\n\t\t}\n\t}\n}\n\n// Ensure an expression can be reduced.\nfunc TestEval(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\tin   string\n\t\tout  interface{}\n\t\tdata map[string]interface{}\n\t}{\n\t\t// Number literals.\n\t\t{in: `1 + 2`, out: int64(3)},\n\t\t{in: `(foo*2) + ( (4/2) + (3 * 5) - 0.5 )`, out: float64(26.5), data: map[string]interface{}{\"foo\": float64(5)}},\n\t\t{in: `foo / 2`, out: float64(2), data: map[string]interface{}{\"foo\": float64(4)}},\n\t\t{in: `4 = 4`, out: true},\n\t\t{in: `4 <> 4`, out: false},\n\t\t{in: `6 > 4`, out: true},\n\t\t{in: `4 >= 4`, out: true},\n\t\t{in: `4 < 6`, out: true},\n\t\t{in: `4 <= 4`, out: true},\n\t\t{in: `4 AND 5`, out: nil},\n\t\t{in: `0 = 'test'`, out: false},\n\t\t{in: `1.0 = 1`, out: true},\n\t\t{in: `1.2 = 1`, out: false},\n\n\t\t// Boolean literals.\n\t\t{in: `true AND false`, out: false},\n\t\t{in: `true OR false`, out: true},\n\t\t{in: `false = 4`, out: false},\n\n\t\t// String literals.\n\t\t{in: `'foo' = 'bar'`, out: false},\n\t\t{in: `'foo' = 'foo'`, out: true},\n\t\t{in: `'' = 4`, out: nil},\n\n\t\t// Regex literals.\n\t\t{in: `'foo' =~ /f.*/`, out: true},\n\t\t{in: `'foo' =~ /b.*/`, out: false},\n\t\t{in: `'foo' !~ /f.*/`, out: false},\n\t\t{in: `'foo' !~ /b.*/`, out: true},\n\n\t\t// Variable references.\n\t\t{in: `foo`, out: \"bar\", data: map[string]interface{}{\"foo\": \"bar\"}},\n\t\t{in: `foo = 'bar'`, out: true, data: map[string]interface{}{\"foo\": \"bar\"}},\n\t\t{in: `foo = 'bar'`, out: nil, data: map[string]interface{}{\"foo\": nil}},\n\t\t{in: `'bar' = foo`, out: nil, data: map[string]interface{}{\"foo\": nil}},\n\t\t{in: `foo <> 'bar'`, out: true, data: map[string]interface{}{\"foo\": \"xxx\"}},\n\t\t{in: `foo =~ /b.*/`, out: true, data: map[string]interface{}{\"foo\": \"bar\"}},\n\t\t{in: `foo !~ /b.*/`, out: false, data: map[string]interface{}{\"foo\": \"bar\"}},\n\t\t{in: `foo > 2 OR bar > 3`, out: true, data: map[string]interface{}{\"foo\": float64(4)}},\n\t\t{in: `foo > 2 OR bar > 3`, out: true, data: map[string]interface{}{\"bar\": float64(4)}},\n\t} {\n\t\t// Evaluate expression.\n\t\tout := influxql.Eval(MustParseExpr(tt.in), tt.data)\n\n\t\t// Compare with expected output.\n\t\tif !reflect.DeepEqual(tt.out, out) {\n\t\t\tt.Errorf(\"%d. %s: unexpected output:\\n\\nexp=%#v\\n\\ngot=%#v\\n\\n\", i, tt.in, tt.out, out)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\ntype EvalFixture map[string]map[string]influxql.DataType\n\nfunc (e EvalFixture) MapType(measurement *influxql.Measurement, field string) influxql.DataType {\n\tm := e[measurement.Name]\n\tif m == nil {\n\t\treturn influxql.Unknown\n\t}\n\treturn m[field]\n}\n\nfunc TestEvalType(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\tname string\n\t\tin   string\n\t\ttyp  influxql.DataType\n\t\tdata EvalFixture\n\t}{\n\t\t{\n\t\t\tname: `a single data type`,\n\t\t\tin:   `min(value)`,\n\t\t\ttyp:  influxql.Integer,\n\t\t\tdata: EvalFixture{\n\t\t\t\t\"cpu\": map[string]influxql.DataType{\n\t\t\t\t\t\"value\": influxql.Integer,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: `multiple data types`,\n\t\t\tin:   `min(value)`,\n\t\t\ttyp:  influxql.Integer,\n\t\t\tdata: EvalFixture{\n\t\t\t\t\"cpu\": map[string]influxql.DataType{\n\t\t\t\t\t\"value\": influxql.Integer,\n\t\t\t\t},\n\t\t\t\t\"mem\": map[string]influxql.DataType{\n\t\t\t\t\t\"value\": influxql.String,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: `count() with a float`,\n\t\t\tin:   `count(value)`,\n\t\t\ttyp:  influxql.Integer,\n\t\t\tdata: EvalFixture{\n\t\t\t\t\"cpu\": map[string]influxql.DataType{\n\t\t\t\t\t\"value\": influxql.Float,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: `mean() with an integer`,\n\t\t\tin:   `mean(value)`,\n\t\t\ttyp:  influxql.Float,\n\t\t\tdata: EvalFixture{\n\t\t\t\t\"cpu\": map[string]influxql.DataType{\n\t\t\t\t\t\"value\": influxql.Integer,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: `value inside a parenthesis`,\n\t\t\tin:   `(value)`,\n\t\t\ttyp:  influxql.Float,\n\t\t\tdata: EvalFixture{\n\t\t\t\t\"cpu\": map[string]influxql.DataType{\n\t\t\t\t\t\"value\": influxql.Float,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t} {\n\t\tsources := make([]influxql.Source, 0, len(tt.data))\n\t\tfor src := range tt.data {\n\t\t\tsources = append(sources, &influxql.Measurement{Name: src})\n\t\t}\n\n\t\texpr := influxql.MustParseExpr(tt.in)\n\t\ttyp := influxql.EvalType(expr, sources, tt.data)\n\t\tif typ != tt.typ {\n\t\t\tt.Errorf(\"%d. %s: unexpected type:\\n\\nexp=%#v\\n\\ngot=%#v\\n\\n\", i, tt.name, tt.typ, typ)\n\t\t}\n\t}\n}\n\n// Ensure an expression can be reduced.\nfunc TestReduce(t *testing.T) {\n\tnow := mustParseTime(\"2000-01-01T00:00:00Z\")\n\n\tfor i, tt := range []struct {\n\t\tin   string\n\t\tout  string\n\t\tdata Valuer\n\t}{\n\t\t// Number literals.\n\t\t{in: `1 + 2`, out: `3`},\n\t\t{in: `(foo*2) + ( (4/2) + (3 * 5) - 0.5 )`, out: `(foo * 2) + 16.500`},\n\t\t{in: `foo(bar(2 + 3), 4)`, out: `foo(bar(5), 4)`},\n\t\t{in: `4 / 0`, out: `0.000`},\n\t\t{in: `1 / 2`, out: `0.500`},\n\t\t{in: `2 % 3`, out: `2`},\n\t\t{in: `5 % 2`, out: `1`},\n\t\t{in: `2 % 0`, out: `0`},\n\t\t{in: `2.5 % 0`, out: `NaN`},\n\t\t{in: `254 & 3`, out: `2`},\n\t\t{in: `254 | 3`, out: `255`},\n\t\t{in: `254 ^ 3`, out: `253`},\n\t\t{in: `-3 & 3`, out: `1`},\n\t\t{in: `8 & -3`, out: `8`},\n\t\t{in: `8.5 & -3`, out: `8.500 & -3`},\n\t\t{in: `4 = 4`, out: `true`},\n\t\t{in: `4 <> 4`, out: `false`},\n\t\t{in: `6 > 4`, out: `true`},\n\t\t{in: `4 >= 4`, out: `true`},\n\t\t{in: `4 < 6`, out: `true`},\n\t\t{in: `4 <= 4`, out: `true`},\n\t\t{in: `4 AND 5`, out: `4 AND 5`},\n\n\t\t// Boolean literals.\n\t\t{in: `true AND false`, out: `false`},\n\t\t{in: `true OR false`, out: `true`},\n\t\t{in: `true OR (foo = bar AND 1 > 2)`, out: `true`},\n\t\t{in: `(foo = bar AND 1 > 2) OR true`, out: `true`},\n\t\t{in: `false OR (foo = bar AND 1 > 2)`, out: `false`},\n\t\t{in: `(foo = bar AND 1 > 2) OR false`, out: `false`},\n\t\t{in: `true = false`, out: `false`},\n\t\t{in: `true <> false`, out: `true`},\n\t\t{in: `true + false`, out: `true + false`},\n\n\t\t// Time literals with now().\n\t\t{in: `now() + 2h`, out: `'2000-01-01T02:00:00Z'`, data: map[string]interface{}{\"now()\": now}},\n\t\t{in: `now() / 2h`, out: `'2000-01-01T00:00:00Z' / 2h`, data: map[string]interface{}{\"now()\": now}},\n\t\t{in: `4µ + now()`, out: `'2000-01-01T00:00:00.000004Z'`, data: map[string]interface{}{\"now()\": now}},\n\t\t{in: `now() + 2000000000`, out: `'2000-01-01T00:00:02Z'`, data: map[string]interface{}{\"now()\": now}},\n\t\t{in: `2000000000 + now()`, out: `'2000-01-01T00:00:02Z'`, data: map[string]interface{}{\"now()\": now}},\n\t\t{in: `now() - 2000000000`, out: `'1999-12-31T23:59:58Z'`, data: map[string]interface{}{\"now()\": now}},\n\t\t{in: `now() = now()`, out: `true`, data: map[string]interface{}{\"now()\": now}},\n\t\t{in: `now() <> now()`, out: `false`, data: map[string]interface{}{\"now()\": now}},\n\t\t{in: `now() < now() + 1h`, out: `true`, data: map[string]interface{}{\"now()\": now}},\n\t\t{in: `now() <= now() + 1h`, out: `true`, data: map[string]interface{}{\"now()\": now}},\n\t\t{in: `now() >= now() - 1h`, out: `true`, data: map[string]interface{}{\"now()\": now}},\n\t\t{in: `now() > now() - 1h`, out: `true`, data: map[string]interface{}{\"now()\": now}},\n\t\t{in: `now() - (now() - 60s)`, out: `1m`, data: map[string]interface{}{\"now()\": now}},\n\t\t{in: `now() AND now()`, out: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`, data: map[string]interface{}{\"now()\": now}},\n\t\t{in: `now()`, out: `now()`},\n\t\t{in: `946684800000000000 + 2h`, out: `'2000-01-01T02:00:00Z'`},\n\n\t\t// Time literals.\n\t\t{in: `'2000-01-01T00:00:00Z' + 2h`, out: `'2000-01-01T02:00:00Z'`},\n\t\t{in: `'2000-01-01T00:00:00Z' / 2h`, out: `'2000-01-01T00:00:00Z' / 2h`},\n\t\t{in: `4µ + '2000-01-01T00:00:00Z'`, out: `'2000-01-01T00:00:00.000004Z'`},\n\t\t{in: `'2000-01-01T00:00:00Z' + 2000000000`, out: `'2000-01-01T00:00:02Z'`},\n\t\t{in: `2000000000 + '2000-01-01T00:00:00Z'`, out: `'2000-01-01T00:00:02Z'`},\n\t\t{in: `'2000-01-01T00:00:00Z' - 2000000000`, out: `'1999-12-31T23:59:58Z'`},\n\t\t{in: `'2000-01-01T00:00:00Z' = '2000-01-01T00:00:00Z'`, out: `true`},\n\t\t{in: `'2000-01-01T00:00:00.000000000Z' = '2000-01-01T00:00:00Z'`, out: `true`},\n\t\t{in: `'2000-01-01T00:00:00Z' <> '2000-01-01T00:00:00Z'`, out: `false`},\n\t\t{in: `'2000-01-01T00:00:00.000000000Z' <> '2000-01-01T00:00:00Z'`, out: `false`},\n\t\t{in: `'2000-01-01T00:00:00Z' < '2000-01-01T00:00:00Z' + 1h`, out: `true`},\n\t\t{in: `'2000-01-01T00:00:00.000000000Z' < '2000-01-01T00:00:00Z' + 1h`, out: `true`},\n\t\t{in: `'2000-01-01T00:00:00Z' <= '2000-01-01T00:00:00Z' + 1h`, out: `true`},\n\t\t{in: `'2000-01-01T00:00:00.000000000Z' <= '2000-01-01T00:00:00Z' + 1h`, out: `true`},\n\t\t{in: `'2000-01-01T00:00:00Z' > '2000-01-01T00:00:00Z' - 1h`, out: `true`},\n\t\t{in: `'2000-01-01T00:00:00.000000000Z' > '2000-01-01T00:00:00Z' - 1h`, out: `true`},\n\t\t{in: `'2000-01-01T00:00:00Z' >= '2000-01-01T00:00:00Z' - 1h`, out: `true`},\n\t\t{in: `'2000-01-01T00:00:00.000000000Z' >= '2000-01-01T00:00:00Z' - 1h`, out: `true`},\n\t\t{in: `'2000-01-01T00:00:00Z' - ('2000-01-01T00:00:00Z' - 60s)`, out: `1m`},\n\t\t{in: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`, out: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`},\n\n\t\t// Duration literals.\n\t\t{in: `10m + 1h - 60s`, out: `69m`},\n\t\t{in: `(10m / 2) * 5`, out: `25m`},\n\t\t{in: `60s = 1m`, out: `true`},\n\t\t{in: `60s <> 1m`, out: `false`},\n\t\t{in: `60s < 1h`, out: `true`},\n\t\t{in: `60s <= 1h`, out: `true`},\n\t\t{in: `60s > 12s`, out: `true`},\n\t\t{in: `60s >= 1m`, out: `true`},\n\t\t{in: `60s AND 1m`, out: `1m AND 1m`},\n\t\t{in: `60m / 0`, out: `0s`},\n\t\t{in: `60m + 50`, out: `1h + 50`},\n\n\t\t// String literals.\n\t\t{in: `'foo' + 'bar'`, out: `'foobar'`},\n\n\t\t// Variable references.\n\t\t{in: `foo`, out: `'bar'`, data: map[string]interface{}{\"foo\": \"bar\"}},\n\t\t{in: `foo = 'bar'`, out: `true`, data: map[string]interface{}{\"foo\": \"bar\"}},\n\t\t{in: `foo = 'bar'`, out: `false`, data: map[string]interface{}{\"foo\": nil}},\n\t\t{in: `foo <> 'bar'`, out: `false`, data: map[string]interface{}{\"foo\": nil}},\n\t} {\n\t\t// Fold expression.\n\t\texpr := influxql.Reduce(MustParseExpr(tt.in), tt.data)\n\n\t\t// Compare with expected output.\n\t\tif out := expr.String(); tt.out != out {\n\t\t\tt.Errorf(\"%d. %s: unexpected expr:\\n\\nexp=%s\\n\\ngot=%s\\n\\n\", i, tt.in, tt.out, out)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc Test_fieldsNames(t *testing.T) {\n\tfor _, test := range []struct {\n\t\tin    []string\n\t\tout   []string\n\t\talias []string\n\t}{\n\t\t{ //case: binary expr(valRef)\n\t\t\tin:    []string{\"value+value\"},\n\t\t\tout:   []string{\"value\", \"value\"},\n\t\t\talias: []string{\"value_value\"},\n\t\t},\n\t\t{ //case: binary expr + valRef\n\t\t\tin:    []string{\"value+value\", \"temperature\"},\n\t\t\tout:   []string{\"value\", \"value\", \"temperature\"},\n\t\t\talias: []string{\"value_value\", \"temperature\"},\n\t\t},\n\t\t{ //case: aggregate expr\n\t\t\tin:    []string{\"mean(value)\"},\n\t\t\tout:   []string{\"mean\"},\n\t\t\talias: []string{\"mean\"},\n\t\t},\n\t\t{ //case: binary expr(aggregate expr)\n\t\t\tin:    []string{\"mean(value) + max(value)\"},\n\t\t\tout:   []string{\"value\", \"value\"},\n\t\t\talias: []string{\"mean_max\"},\n\t\t},\n\t\t{ //case: binary expr(aggregate expr) + valRef\n\t\t\tin:    []string{\"mean(value) + max(value)\", \"temperature\"},\n\t\t\tout:   []string{\"value\", \"value\", \"temperature\"},\n\t\t\talias: []string{\"mean_max\", \"temperature\"},\n\t\t},\n\t\t{ //case: mixed aggregate and varRef\n\t\t\tin:    []string{\"mean(value) + temperature\"},\n\t\t\tout:   []string{\"value\", \"temperature\"},\n\t\t\talias: []string{\"mean_temperature\"},\n\t\t},\n\t\t{ //case: ParenExpr(varRef)\n\t\t\tin:    []string{\"(value)\"},\n\t\t\tout:   []string{\"value\"},\n\t\t\talias: []string{\"value\"},\n\t\t},\n\t\t{ //case: ParenExpr(varRef + varRef)\n\t\t\tin:    []string{\"(value + value)\"},\n\t\t\tout:   []string{\"value\", \"value\"},\n\t\t\talias: []string{\"value_value\"},\n\t\t},\n\t\t{ //case: ParenExpr(aggregate)\n\t\t\tin:    []string{\"(mean(value))\"},\n\t\t\tout:   []string{\"value\"},\n\t\t\talias: []string{\"mean\"},\n\t\t},\n\t\t{ //case: ParenExpr(aggregate + aggregate)\n\t\t\tin:    []string{\"(mean(value) + max(value))\"},\n\t\t\tout:   []string{\"value\", \"value\"},\n\t\t\talias: []string{\"mean_max\"},\n\t\t},\n\t} {\n\t\tfields := influxql.Fields{}\n\t\tfor _, s := range test.in {\n\t\t\texpr := MustParseExpr(s)\n\t\t\tfields = append(fields, &influxql.Field{Expr: expr})\n\t\t}\n\t\tgot := fields.Names()\n\t\tif !reflect.DeepEqual(got, test.out) {\n\t\t\tt.Errorf(\"get fields name:\\nexp=%v\\ngot=%v\\n\", test.out, got)\n\t\t}\n\t\talias := fields.AliasNames()\n\t\tif !reflect.DeepEqual(alias, test.alias) {\n\t\t\tt.Errorf(\"get fields alias name:\\nexp=%v\\ngot=%v\\n\", test.alias, alias)\n\t\t}\n\t}\n\n}\n\nfunc TestSelect_ColumnNames(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\tstmt    *influxql.SelectStatement\n\t\tcolumns []string\n\t}{\n\t\t{\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tFields: influxql.Fields([]*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"value\"}},\n\t\t\t\t}),\n\t\t\t},\n\t\t\tcolumns: []string{\"time\", \"value\"},\n\t\t},\n\t\t{\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tFields: influxql.Fields([]*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"value\"}},\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"value\"}},\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"value_1\"}},\n\t\t\t\t}),\n\t\t\t},\n\t\t\tcolumns: []string{\"time\", \"value\", \"value_1\", \"value_1_1\"},\n\t\t},\n\t\t{\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tFields: influxql.Fields([]*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"value\"}},\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"value_1\"}},\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"value\"}},\n\t\t\t\t}),\n\t\t\t},\n\t\t\tcolumns: []string{\"time\", \"value\", \"value_1\", \"value_2\"},\n\t\t},\n\t\t{\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tFields: influxql.Fields([]*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"value\"}},\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"total\"}, Alias: \"value\"},\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"value\"}},\n\t\t\t\t}),\n\t\t\t},\n\t\t\tcolumns: []string{\"time\", \"value_1\", \"value\", \"value_2\"},\n\t\t},\n\t\t{\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tFields: influxql.Fields([]*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"value\"}},\n\t\t\t\t}),\n\t\t\t\tTimeAlias: \"timestamp\",\n\t\t\t},\n\t\t\tcolumns: []string{\"timestamp\", \"value\"},\n\t\t},\n\t} {\n\t\tcolumns := tt.stmt.ColumnNames()\n\t\tif !reflect.DeepEqual(columns, tt.columns) {\n\t\t\tt.Errorf(\"%d. expected %s, got %s\", i, tt.columns, columns)\n\t\t}\n\t}\n}\n\nfunc TestSelect_Privileges(t *testing.T) {\n\tstmt := &influxql.SelectStatement{\n\t\tTarget: &influxql.Target{\n\t\t\tMeasurement: &influxql.Measurement{Database: \"db2\"},\n\t\t},\n\t\tSources: []influxql.Source{\n\t\t\t&influxql.Measurement{Database: \"db0\"},\n\t\t\t&influxql.Measurement{Database: \"db1\"},\n\t\t},\n\t}\n\n\texp := influxql.ExecutionPrivileges{\n\t\tinfluxql.ExecutionPrivilege{Name: \"db0\", Privilege: influxql.ReadPrivilege},\n\t\tinfluxql.ExecutionPrivilege{Name: \"db1\", Privilege: influxql.ReadPrivilege},\n\t\tinfluxql.ExecutionPrivilege{Name: \"db2\", Privilege: influxql.WritePrivilege},\n\t}\n\n\tgot, err := stmt.RequiredPrivileges()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !reflect.DeepEqual(exp, got) {\n\t\tt.Errorf(\"exp: %v, got: %v\", exp, got)\n\t}\n}\n\nfunc TestSelect_SubqueryPrivileges(t *testing.T) {\n\tstmt := &influxql.SelectStatement{\n\t\tTarget: &influxql.Target{\n\t\t\tMeasurement: &influxql.Measurement{Database: \"db2\"},\n\t\t},\n\t\tSources: []influxql.Source{\n\t\t\t&influxql.Measurement{Database: \"db0\"},\n\t\t\t&influxql.SubQuery{\n\t\t\t\tStatement: &influxql.SelectStatement{\n\t\t\t\t\tSources: []influxql.Source{\n\t\t\t\t\t\t&influxql.Measurement{Database: \"db1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\texp := influxql.ExecutionPrivileges{\n\t\tinfluxql.ExecutionPrivilege{Name: \"db0\", Privilege: influxql.ReadPrivilege},\n\t\tinfluxql.ExecutionPrivilege{Name: \"db1\", Privilege: influxql.ReadPrivilege},\n\t\tinfluxql.ExecutionPrivilege{Name: \"db2\", Privilege: influxql.WritePrivilege},\n\t}\n\n\tgot, err := stmt.RequiredPrivileges()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !reflect.DeepEqual(exp, got) {\n\t\tt.Errorf(\"exp: %v, got: %v\", exp, got)\n\t}\n}\n\nfunc TestShow_Privileges(t *testing.T) {\n\tfor _, c := range []struct {\n\t\tstmt influxql.Statement\n\t\texp  influxql.ExecutionPrivileges\n\t}{\n\t\t{\n\t\t\tstmt: &influxql.ShowDatabasesStatement{},\n\t\t\texp:  influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.NoPrivileges}},\n\t\t},\n\t\t{\n\t\t\tstmt: &influxql.ShowFieldKeysStatement{},\n\t\t\texp:  influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}},\n\t\t},\n\t\t{\n\t\t\tstmt: &influxql.ShowMeasurementsStatement{},\n\t\t\texp:  influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}},\n\t\t},\n\t\t{\n\t\t\tstmt: &influxql.ShowQueriesStatement{},\n\t\t\texp:  influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}},\n\t\t},\n\t\t{\n\t\t\tstmt: &influxql.ShowRetentionPoliciesStatement{},\n\t\t\texp:  influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}},\n\t\t},\n\t\t{\n\t\t\tstmt: &influxql.ShowSeriesStatement{},\n\t\t\texp:  influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}},\n\t\t},\n\t\t{\n\t\t\tstmt: &influxql.ShowShardGroupsStatement{},\n\t\t\texp:  influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}},\n\t\t},\n\t\t{\n\t\t\tstmt: &influxql.ShowShardsStatement{},\n\t\t\texp:  influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}},\n\t\t},\n\t\t{\n\t\t\tstmt: &influxql.ShowStatsStatement{},\n\t\t\texp:  influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}},\n\t\t},\n\t\t{\n\t\t\tstmt: &influxql.ShowSubscriptionsStatement{},\n\t\t\texp:  influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}},\n\t\t},\n\t\t{\n\t\t\tstmt: &influxql.ShowDiagnosticsStatement{},\n\t\t\texp:  influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}},\n\t\t},\n\t\t{\n\t\t\tstmt: &influxql.ShowTagKeysStatement{},\n\t\t\texp:  influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}},\n\t\t},\n\t\t{\n\t\t\tstmt: &influxql.ShowTagValuesStatement{},\n\t\t\texp:  influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}},\n\t\t},\n\t\t{\n\t\t\tstmt: &influxql.ShowUsersStatement{},\n\t\t\texp:  influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}},\n\t\t},\n\t} {\n\t\tgot, err := c.stmt.RequiredPrivileges()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif !reflect.DeepEqual(c.exp, got) {\n\t\t\tt.Errorf(\"exp: %v, got: %v\", c.exp, got)\n\t\t}\n\t}\n}\n\nfunc TestSources_Names(t *testing.T) {\n\tsources := influxql.Sources([]influxql.Source{\n\t\t&influxql.Measurement{\n\t\t\tName: \"cpu\",\n\t\t},\n\t\t&influxql.Measurement{\n\t\t\tName: \"mem\",\n\t\t},\n\t})\n\n\tnames := sources.Names()\n\tif names[0] != \"cpu\" {\n\t\tt.Errorf(\"expected cpu, got %s\", names[0])\n\t}\n\tif names[1] != \"mem\" {\n\t\tt.Errorf(\"expected mem, got %s\", names[1])\n\t}\n}\n\nfunc TestSources_HasSystemSource(t *testing.T) {\n\tsources := influxql.Sources([]influxql.Source{\n\t\t&influxql.Measurement{\n\t\t\tName: \"_measurements\",\n\t\t},\n\t})\n\n\tok := sources.HasSystemSource()\n\tif !ok {\n\t\tt.Errorf(\"expected to find a system source, found none\")\n\t}\n\n\tsources = influxql.Sources([]influxql.Source{\n\t\t&influxql.Measurement{\n\t\t\tName: \"cpu\",\n\t\t},\n\t})\n\n\tok = sources.HasSystemSource()\n\tif ok {\n\t\tt.Errorf(\"expected to find no system source, found one\")\n\t}\n}\n\n// Parse statements that might appear valid but should return an error.\n// If allowed to execute, at least some of these statements would result in a panic.\nfunc TestParse_Errors(t *testing.T) {\n\tfor _, tt := range []struct {\n\t\ttmpl string\n\t\tgood string\n\t\tbad  string\n\t}{\n\t\t// Second argument to derivative must be duration\n\t\t{tmpl: `SELECT derivative(f, %s) FROM m`, good: \"1h\", bad: \"true\"},\n\t} {\n\t\tgood := fmt.Sprintf(tt.tmpl, tt.good)\n\t\tif _, err := influxql.ParseStatement(good); err != nil {\n\t\t\tt.Fatalf(\"statement %q should have parsed correctly but returned error: %s\", good, err)\n\t\t}\n\n\t\tbad := fmt.Sprintf(tt.tmpl, tt.bad)\n\t\tif _, err := influxql.ParseStatement(bad); err == nil {\n\t\t\tt.Fatalf(\"statement %q should have resulted in a parse error but did not\", bad)\n\t\t}\n\t}\n}\n\n// This test checks to ensure that we have given thought to the database\n// context required for security checks.  If a new statement is added, this\n// test will fail until it is categorized into the correct bucket below.\nfunc Test_EnforceHasDefaultDatabase(t *testing.T) {\n\tpkg, err := importer.Default().Import(\"github.com/influxdata/influxdb/influxql\")\n\tif err != nil {\n\t\tfmt.Printf(\"error: %s\\n\", err.Error())\n\t\treturn\n\t}\n\tstatements := []string{}\n\n\t// this is a list of statements that do not have a database context\n\texemptStatements := []string{\n\t\t\"CreateDatabaseStatement\",\n\t\t\"CreateUserStatement\",\n\t\t\"DeleteSeriesStatement\",\n\t\t\"DropDatabaseStatement\",\n\t\t\"DropMeasurementStatement\",\n\t\t\"DropSeriesStatement\",\n\t\t\"DropShardStatement\",\n\t\t\"DropUserStatement\",\n\t\t\"GrantAdminStatement\",\n\t\t\"KillQueryStatement\",\n\t\t\"RevokeAdminStatement\",\n\t\t\"SelectStatement\",\n\t\t\"SetPasswordUserStatement\",\n\t\t\"ShowContinuousQueriesStatement\",\n\t\t\"ShowDatabasesStatement\",\n\t\t\"ShowDiagnosticsStatement\",\n\t\t\"ShowGrantsForUserStatement\",\n\t\t\"ShowQueriesStatement\",\n\t\t\"ShowShardGroupsStatement\",\n\t\t\"ShowShardsStatement\",\n\t\t\"ShowStatsStatement\",\n\t\t\"ShowSubscriptionsStatement\",\n\t\t\"ShowUsersStatement\",\n\t}\n\n\texists := func(stmt string) bool {\n\t\tswitch stmt {\n\t\t// These are functions with the word statement in them, and can be ignored\n\t\tcase \"Statement\", \"MustParseStatement\", \"ParseStatement\", \"RewriteStatement\":\n\t\t\treturn true\n\t\tdefault:\n\t\t\t// check the exempt statements\n\t\t\tfor _, s := range exemptStatements {\n\t\t\t\tif s == stmt {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\t// check the statements that passed the interface test for HasDefaultDatabase\n\t\t\tfor _, s := range statements {\n\t\t\t\tif s == stmt {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\n\tneedsHasDefault := []interface{}{\n\t\t&influxql.AlterRetentionPolicyStatement{},\n\t\t&influxql.CreateContinuousQueryStatement{},\n\t\t&influxql.CreateRetentionPolicyStatement{},\n\t\t&influxql.CreateSubscriptionStatement{},\n\t\t&influxql.DeleteStatement{},\n\t\t&influxql.DropContinuousQueryStatement{},\n\t\t&influxql.DropRetentionPolicyStatement{},\n\t\t&influxql.DropSubscriptionStatement{},\n\t\t&influxql.GrantStatement{},\n\t\t&influxql.RevokeStatement{},\n\t\t&influxql.ShowFieldKeysStatement{},\n\t\t&influxql.ShowMeasurementsStatement{},\n\t\t&influxql.ShowRetentionPoliciesStatement{},\n\t\t&influxql.ShowSeriesStatement{},\n\t\t&influxql.ShowTagKeysStatement{},\n\t\t&influxql.ShowTagValuesStatement{},\n\t}\n\n\tfor _, stmt := range needsHasDefault {\n\t\tstatements = append(statements, strings.TrimPrefix(fmt.Sprintf(\"%T\", stmt), \"*influxql.\"))\n\t\tif _, ok := stmt.(influxql.HasDefaultDatabase); !ok {\n\t\t\tt.Errorf(\"%T was expected to declare DefaultDatabase method\", stmt)\n\t\t}\n\n\t}\n\n\tfor _, declName := range pkg.Scope().Names() {\n\t\tif strings.HasSuffix(declName, \"Statement\") {\n\t\t\tif !exists(declName) {\n\t\t\t\tt.Errorf(\"unchecked statement %s.  please update this test to determine if this statement needs to declare 'DefaultDatabase'\", declName)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Valuer represents a simple wrapper around a map to implement the influxql.Valuer interface.\ntype Valuer map[string]interface{}\n\n// Value returns the value and existence of a key.\nfunc (o Valuer) Value(key string) (v interface{}, ok bool) {\n\tv, ok = o[key]\n\treturn\n}\n\n// MustTimeRange will parse a time range. Panic on error.\nfunc MustTimeRange(expr influxql.Expr) (min, max time.Time) {\n\tmin, max, err := influxql.TimeRange(expr, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn min, max\n}\n\n// mustParseTime parses an IS0-8601 string. Panic on error.\nfunc mustParseTime(s string) time.Time {\n\tt, err := time.Parse(time.RFC3339, s)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn t\n}\n\n// BenchmarkExprNames benchmarks how long it takes to run ExprNames.\nfunc BenchmarkExprNames(b *testing.B) {\n\texprs := make([]string, 100)\n\tfor i := range exprs {\n\t\texprs[i] = fmt.Sprintf(\"host = 'server%02d'\", i)\n\t}\n\tcondition := MustParseExpr(strings.Join(exprs, \" OR \"))\n\n\tb.ResetTimer()\n\tb.ReportAllocs()\n\n\tfor i := 0; i < b.N; i++ {\n\t\trefs := influxql.ExprNames(condition)\n\t\tif have, want := refs, []influxql.VarRef{{Val: \"host\"}}; !reflect.DeepEqual(have, want) {\n\t\t\tb.Fatalf(\"unexpected expression names: have=%s want=%s\", have, want)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/call_iterator.go",
    "content": "package influxql\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"time\"\n)\n\n/*\nThis file contains iterator implementations for each function call available\nin InfluxQL. Call iterators are separated into two groups:\n\n1. Map/reduce-style iterators - these are passed to IteratorCreator so that\n   processing can be at the low-level storage and aggregates are returned.\n\n2. Raw aggregate iterators - these require the full set of data for a window.\n   These are handled by the select() function and raw points are streamed in\n   from the low-level storage.\n\nThere are helpers to aid in building aggregate iterators. For simple map/reduce\niterators, you can use the reduceIterator types and pass a reduce function. This\nreduce function is passed a previous and current value and the new timestamp,\nvalue, and auxilary fields are returned from it.\n\nFor raw aggregate iterators, you can use the reduceSliceIterators which pass\nin a slice of all points to the function and return a point. For more complex\niterator types, you may need to create your own iterators by hand.\n\nOnce your iterator is complete, you'll need to add it to the NewCallIterator()\nfunction if it is to be available to IteratorCreators and add it to the select()\nfunction to allow it to be included during planning.\n*/\n\n// NewCallIterator returns a new iterator for a Call.\nfunc NewCallIterator(input Iterator, opt IteratorOptions) (Iterator, error) {\n\tname := opt.Expr.(*Call).Name\n\tswitch name {\n\tcase \"count\":\n\t\treturn newCountIterator(input, opt)\n\tcase \"min\":\n\t\treturn newMinIterator(input, opt)\n\tcase \"max\":\n\t\treturn newMaxIterator(input, opt)\n\tcase \"sum\":\n\t\treturn newSumIterator(input, opt)\n\tcase \"first\":\n\t\treturn newFirstIterator(input, opt)\n\tcase \"last\":\n\t\treturn newLastIterator(input, opt)\n\tcase \"mean\":\n\t\treturn newMeanIterator(input, opt)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported function call: %s\", name)\n\t}\n}\n\n// newCountIterator returns an iterator for operating on a count() call.\nfunc newCountIterator(input Iterator, opt IteratorOptions) (Iterator, error) {\n\t// FIXME: Wrap iterator in int-type iterator and always output int value.\n\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewFloatFuncIntegerReducer(FloatCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime})\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatReduceIntegerIterator(input, opt, createFn), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewIntegerFuncReducer(IntegerCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime})\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerReduceIntegerIterator(input, opt, createFn), nil\n\tcase StringIterator:\n\t\tcreateFn := func() (StringPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewStringFuncIntegerReducer(StringCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime})\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newStringReduceIntegerIterator(input, opt, createFn), nil\n\tcase BooleanIterator:\n\t\tcreateFn := func() (BooleanPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewBooleanFuncIntegerReducer(BooleanCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime})\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newBooleanReduceIntegerIterator(input, opt, createFn), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported count iterator type: %T\", input)\n\t}\n}\n\n// FloatCountReduce returns the count of points.\nfunc FloatCountReduce(prev *IntegerPoint, curr *FloatPoint) (int64, int64, []interface{}) {\n\tif prev == nil {\n\t\treturn ZeroTime, 1, nil\n\t}\n\treturn ZeroTime, prev.Value + 1, nil\n}\n\n// IntegerCountReduce returns the count of points.\nfunc IntegerCountReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) {\n\tif prev == nil {\n\t\treturn ZeroTime, 1, nil\n\t}\n\treturn ZeroTime, prev.Value + 1, nil\n}\n\n// StringCountReduce returns the count of points.\nfunc StringCountReduce(prev *IntegerPoint, curr *StringPoint) (int64, int64, []interface{}) {\n\tif prev == nil {\n\t\treturn ZeroTime, 1, nil\n\t}\n\treturn ZeroTime, prev.Value + 1, nil\n}\n\n// BooleanCountReduce returns the count of points.\nfunc BooleanCountReduce(prev *IntegerPoint, curr *BooleanPoint) (int64, int64, []interface{}) {\n\tif prev == nil {\n\t\treturn ZeroTime, 1, nil\n\t}\n\treturn ZeroTime, prev.Value + 1, nil\n}\n\n// newMinIterator returns an iterator for operating on a min() call.\nfunc newMinIterator(input Iterator, opt IteratorOptions) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatFuncReducer(FloatMinReduce, nil)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatReduceFloatIterator(input, opt, createFn), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewIntegerFuncReducer(IntegerMinReduce, nil)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerReduceIntegerIterator(input, opt, createFn), nil\n\tcase BooleanIterator:\n\t\tcreateFn := func() (BooleanPointAggregator, BooleanPointEmitter) {\n\t\t\tfn := NewBooleanFuncReducer(BooleanMinReduce, nil)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newBooleanReduceBooleanIterator(input, opt, createFn), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported min iterator type: %T\", input)\n\t}\n}\n\n// FloatMinReduce returns the minimum value between prev & curr.\nfunc FloatMinReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) {\n\tif prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) {\n\t\treturn curr.Time, curr.Value, cloneAux(curr.Aux)\n\t}\n\treturn prev.Time, prev.Value, prev.Aux\n}\n\n// IntegerMinReduce returns the minimum value between prev & curr.\nfunc IntegerMinReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) {\n\tif prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) {\n\t\treturn curr.Time, curr.Value, cloneAux(curr.Aux)\n\t}\n\treturn prev.Time, prev.Value, prev.Aux\n}\n\n// BooleanMinReduce returns the minimum value between prev & curr.\nfunc BooleanMinReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) {\n\tif prev == nil || (curr.Value != prev.Value && !curr.Value) || (curr.Value == prev.Value && curr.Time < prev.Time) {\n\t\treturn curr.Time, curr.Value, cloneAux(curr.Aux)\n\t}\n\treturn prev.Time, prev.Value, prev.Aux\n}\n\n// newMaxIterator returns an iterator for operating on a max() call.\nfunc newMaxIterator(input Iterator, opt IteratorOptions) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatFuncReducer(FloatMaxReduce, nil)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatReduceFloatIterator(input, opt, createFn), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewIntegerFuncReducer(IntegerMaxReduce, nil)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerReduceIntegerIterator(input, opt, createFn), nil\n\tcase BooleanIterator:\n\t\tcreateFn := func() (BooleanPointAggregator, BooleanPointEmitter) {\n\t\t\tfn := NewBooleanFuncReducer(BooleanMaxReduce, nil)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newBooleanReduceBooleanIterator(input, opt, createFn), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported max iterator type: %T\", input)\n\t}\n}\n\n// FloatMaxReduce returns the maximum value between prev & curr.\nfunc FloatMaxReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) {\n\tif prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) {\n\t\treturn curr.Time, curr.Value, cloneAux(curr.Aux)\n\t}\n\treturn prev.Time, prev.Value, prev.Aux\n}\n\n// IntegerMaxReduce returns the maximum value between prev & curr.\nfunc IntegerMaxReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) {\n\tif prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) {\n\t\treturn curr.Time, curr.Value, cloneAux(curr.Aux)\n\t}\n\treturn prev.Time, prev.Value, prev.Aux\n}\n\n// BooleanMaxReduce returns the minimum value between prev & curr.\nfunc BooleanMaxReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) {\n\tif prev == nil || (curr.Value != prev.Value && curr.Value) || (curr.Value == prev.Value && curr.Time < prev.Time) {\n\t\treturn curr.Time, curr.Value, cloneAux(curr.Aux)\n\t}\n\treturn prev.Time, prev.Value, prev.Aux\n}\n\n// newSumIterator returns an iterator for operating on a sum() call.\nfunc newSumIterator(input Iterator, opt IteratorOptions) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatFuncReducer(FloatSumReduce, &FloatPoint{Value: 0, Time: ZeroTime})\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatReduceFloatIterator(input, opt, createFn), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewIntegerFuncReducer(IntegerSumReduce, &IntegerPoint{Value: 0, Time: ZeroTime})\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerReduceIntegerIterator(input, opt, createFn), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported sum iterator type: %T\", input)\n\t}\n}\n\n// FloatSumReduce returns the sum prev value & curr value.\nfunc FloatSumReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) {\n\tif prev == nil {\n\t\treturn ZeroTime, curr.Value, nil\n\t}\n\treturn prev.Time, prev.Value + curr.Value, nil\n}\n\n// IntegerSumReduce returns the sum prev value & curr value.\nfunc IntegerSumReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) {\n\tif prev == nil {\n\t\treturn ZeroTime, curr.Value, nil\n\t}\n\treturn prev.Time, prev.Value + curr.Value, nil\n}\n\n// newFirstIterator returns an iterator for operating on a first() call.\nfunc newFirstIterator(input Iterator, opt IteratorOptions) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatFuncReducer(FloatFirstReduce, nil)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatReduceFloatIterator(input, opt, createFn), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewIntegerFuncReducer(IntegerFirstReduce, nil)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerReduceIntegerIterator(input, opt, createFn), nil\n\tcase StringIterator:\n\t\tcreateFn := func() (StringPointAggregator, StringPointEmitter) {\n\t\t\tfn := NewStringFuncReducer(StringFirstReduce, nil)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newStringReduceStringIterator(input, opt, createFn), nil\n\tcase BooleanIterator:\n\t\tcreateFn := func() (BooleanPointAggregator, BooleanPointEmitter) {\n\t\t\tfn := NewBooleanFuncReducer(BooleanFirstReduce, nil)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newBooleanReduceBooleanIterator(input, opt, createFn), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported first iterator type: %T\", input)\n\t}\n}\n\n// FloatFirstReduce returns the first point sorted by time.\nfunc FloatFirstReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) {\n\tif prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) {\n\t\treturn curr.Time, curr.Value, cloneAux(curr.Aux)\n\t}\n\treturn prev.Time, prev.Value, prev.Aux\n}\n\n// IntegerFirstReduce returns the first point sorted by time.\nfunc IntegerFirstReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) {\n\tif prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) {\n\t\treturn curr.Time, curr.Value, cloneAux(curr.Aux)\n\t}\n\treturn prev.Time, prev.Value, prev.Aux\n}\n\n// StringFirstReduce returns the first point sorted by time.\nfunc StringFirstReduce(prev, curr *StringPoint) (int64, string, []interface{}) {\n\tif prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) {\n\t\treturn curr.Time, curr.Value, cloneAux(curr.Aux)\n\t}\n\treturn prev.Time, prev.Value, prev.Aux\n}\n\n// BooleanFirstReduce returns the first point sorted by time.\nfunc BooleanFirstReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) {\n\tif prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && !curr.Value && prev.Value) {\n\t\treturn curr.Time, curr.Value, cloneAux(curr.Aux)\n\t}\n\treturn prev.Time, prev.Value, prev.Aux\n}\n\n// newLastIterator returns an iterator for operating on a last() call.\nfunc newLastIterator(input Iterator, opt IteratorOptions) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatFuncReducer(FloatLastReduce, nil)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatReduceFloatIterator(input, opt, createFn), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewIntegerFuncReducer(IntegerLastReduce, nil)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerReduceIntegerIterator(input, opt, createFn), nil\n\tcase StringIterator:\n\t\tcreateFn := func() (StringPointAggregator, StringPointEmitter) {\n\t\t\tfn := NewStringFuncReducer(StringLastReduce, nil)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newStringReduceStringIterator(input, opt, createFn), nil\n\tcase BooleanIterator:\n\t\tcreateFn := func() (BooleanPointAggregator, BooleanPointEmitter) {\n\t\t\tfn := NewBooleanFuncReducer(BooleanLastReduce, nil)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newBooleanReduceBooleanIterator(input, opt, createFn), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported last iterator type: %T\", input)\n\t}\n}\n\n// FloatLastReduce returns the last point sorted by time.\nfunc FloatLastReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) {\n\tif prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) {\n\t\treturn curr.Time, curr.Value, cloneAux(curr.Aux)\n\t}\n\treturn prev.Time, prev.Value, prev.Aux\n}\n\n// IntegerLastReduce returns the last point sorted by time.\nfunc IntegerLastReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) {\n\tif prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) {\n\t\treturn curr.Time, curr.Value, cloneAux(curr.Aux)\n\t}\n\treturn prev.Time, prev.Value, prev.Aux\n}\n\n// StringLastReduce returns the first point sorted by time.\nfunc StringLastReduce(prev, curr *StringPoint) (int64, string, []interface{}) {\n\tif prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) {\n\t\treturn curr.Time, curr.Value, cloneAux(curr.Aux)\n\t}\n\treturn prev.Time, prev.Value, prev.Aux\n}\n\n// BooleanLastReduce returns the first point sorted by time.\nfunc BooleanLastReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) {\n\tif prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value && !prev.Value) {\n\t\treturn curr.Time, curr.Value, cloneAux(curr.Aux)\n\t}\n\treturn prev.Time, prev.Value, prev.Aux\n}\n\n// NewDistinctIterator returns an iterator for operating on a distinct() call.\nfunc NewDistinctIterator(input Iterator, opt IteratorOptions) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatDistinctReducer()\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatReduceFloatIterator(input, opt, createFn), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewIntegerDistinctReducer()\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerReduceIntegerIterator(input, opt, createFn), nil\n\tcase StringIterator:\n\t\tcreateFn := func() (StringPointAggregator, StringPointEmitter) {\n\t\t\tfn := NewStringDistinctReducer()\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newStringReduceStringIterator(input, opt, createFn), nil\n\tcase BooleanIterator:\n\t\tcreateFn := func() (BooleanPointAggregator, BooleanPointEmitter) {\n\t\t\tfn := NewBooleanDistinctReducer()\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newBooleanReduceBooleanIterator(input, opt, createFn), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported distinct iterator type: %T\", input)\n\t}\n}\n\n// newMeanIterator returns an iterator for operating on a mean() call.\nfunc newMeanIterator(input Iterator, opt IteratorOptions) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatMeanReducer()\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatReduceFloatIterator(input, opt, createFn), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewIntegerMeanReducer()\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerReduceFloatIterator(input, opt, createFn), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported mean iterator type: %T\", input)\n\t}\n}\n\n// NewMedianIterator returns an iterator for operating on a median() call.\nfunc NewMedianIterator(input Iterator, opt IteratorOptions) (Iterator, error) {\n\treturn newMedianIterator(input, opt)\n}\n\n// newMedianIterator returns an iterator for operating on a median() call.\nfunc newMedianIterator(input Iterator, opt IteratorOptions) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatSliceFuncReducer(FloatMedianReduceSlice)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatReduceFloatIterator(input, opt, createFn), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewIntegerSliceFuncFloatReducer(IntegerMedianReduceSlice)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerReduceFloatIterator(input, opt, createFn), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported median iterator type: %T\", input)\n\t}\n}\n\n// FloatMedianReduceSlice returns the median value within a window.\nfunc FloatMedianReduceSlice(a []FloatPoint) []FloatPoint {\n\tif len(a) == 1 {\n\t\treturn a\n\t}\n\n\t// OPTIMIZE(benbjohnson): Use getSortedRange() from v0.9.5.1.\n\n\t// Return the middle value from the points.\n\t// If there are an even number of points then return the mean of the two middle points.\n\tsort.Sort(floatPointsByValue(a))\n\tif len(a)%2 == 0 {\n\t\tlo, hi := a[len(a)/2-1], a[(len(a)/2)]\n\t\treturn []FloatPoint{{Time: ZeroTime, Value: lo.Value + (hi.Value-lo.Value)/2}}\n\t}\n\treturn []FloatPoint{{Time: ZeroTime, Value: a[len(a)/2].Value}}\n}\n\n// IntegerMedianReduceSlice returns the median value within a window.\nfunc IntegerMedianReduceSlice(a []IntegerPoint) []FloatPoint {\n\tif len(a) == 1 {\n\t\treturn []FloatPoint{{Time: ZeroTime, Value: float64(a[0].Value)}}\n\t}\n\n\t// OPTIMIZE(benbjohnson): Use getSortedRange() from v0.9.5.1.\n\n\t// Return the middle value from the points.\n\t// If there are an even number of points then return the mean of the two middle points.\n\tsort.Sort(integerPointsByValue(a))\n\tif len(a)%2 == 0 {\n\t\tlo, hi := a[len(a)/2-1], a[(len(a)/2)]\n\t\treturn []FloatPoint{{Time: ZeroTime, Value: float64(lo.Value) + float64(hi.Value-lo.Value)/2}}\n\t}\n\treturn []FloatPoint{{Time: ZeroTime, Value: float64(a[len(a)/2].Value)}}\n}\n\n// newModeIterator returns an iterator for operating on a mode() call.\nfunc NewModeIterator(input Iterator, opt IteratorOptions) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatSliceFuncReducer(FloatModeReduceSlice)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatReduceFloatIterator(input, opt, createFn), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewIntegerSliceFuncReducer(IntegerModeReduceSlice)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerReduceIntegerIterator(input, opt, createFn), nil\n\tcase StringIterator:\n\t\tcreateFn := func() (StringPointAggregator, StringPointEmitter) {\n\t\t\tfn := NewStringSliceFuncReducer(StringModeReduceSlice)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newStringReduceStringIterator(input, opt, createFn), nil\n\tcase BooleanIterator:\n\t\tcreateFn := func() (BooleanPointAggregator, BooleanPointEmitter) {\n\t\t\tfn := NewBooleanSliceFuncReducer(BooleanModeReduceSlice)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newBooleanReduceBooleanIterator(input, opt, createFn), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported median iterator type: %T\", input)\n\t}\n}\n\n// FloatModeReduceSlice returns the mode value within a window.\nfunc FloatModeReduceSlice(a []FloatPoint) []FloatPoint {\n\tif len(a) == 1 {\n\t\treturn a\n\t}\n\n\tsort.Sort(floatPointsByValue(a))\n\n\tmostFreq := 0\n\tcurrFreq := 0\n\tcurrMode := a[0].Value\n\tmostMode := a[0].Value\n\tmostTime := a[0].Time\n\tcurrTime := a[0].Time\n\n\tfor _, p := range a {\n\t\tif p.Value != currMode {\n\t\t\tcurrFreq = 1\n\t\t\tcurrMode = p.Value\n\t\t\tcurrTime = p.Time\n\t\t\tcontinue\n\t\t}\n\t\tcurrFreq++\n\t\tif mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) {\n\t\t\tcontinue\n\t\t}\n\t\tmostFreq = currFreq\n\t\tmostMode = p.Value\n\t\tmostTime = p.Time\n\t}\n\n\treturn []FloatPoint{{Time: ZeroTime, Value: mostMode}}\n}\n\n// IntegerModeReduceSlice returns the mode value within a window.\nfunc IntegerModeReduceSlice(a []IntegerPoint) []IntegerPoint {\n\tif len(a) == 1 {\n\t\treturn a\n\t}\n\tsort.Sort(integerPointsByValue(a))\n\n\tmostFreq := 0\n\tcurrFreq := 0\n\tcurrMode := a[0].Value\n\tmostMode := a[0].Value\n\tmostTime := a[0].Time\n\tcurrTime := a[0].Time\n\n\tfor _, p := range a {\n\t\tif p.Value != currMode {\n\t\t\tcurrFreq = 1\n\t\t\tcurrMode = p.Value\n\t\t\tcurrTime = p.Time\n\t\t\tcontinue\n\t\t}\n\t\tcurrFreq++\n\t\tif mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) {\n\t\t\tcontinue\n\t\t}\n\t\tmostFreq = currFreq\n\t\tmostMode = p.Value\n\t\tmostTime = p.Time\n\t}\n\n\treturn []IntegerPoint{{Time: ZeroTime, Value: mostMode}}\n}\n\n// StringModeReduceSlice returns the mode value within a window.\nfunc StringModeReduceSlice(a []StringPoint) []StringPoint {\n\tif len(a) == 1 {\n\t\treturn a\n\t}\n\n\tsort.Sort(stringPointsByValue(a))\n\n\tmostFreq := 0\n\tcurrFreq := 0\n\tcurrMode := a[0].Value\n\tmostMode := a[0].Value\n\tmostTime := a[0].Time\n\tcurrTime := a[0].Time\n\n\tfor _, p := range a {\n\t\tif p.Value != currMode {\n\t\t\tcurrFreq = 1\n\t\t\tcurrMode = p.Value\n\t\t\tcurrTime = p.Time\n\t\t\tcontinue\n\t\t}\n\t\tcurrFreq++\n\t\tif mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) {\n\t\t\tcontinue\n\t\t}\n\t\tmostFreq = currFreq\n\t\tmostMode = p.Value\n\t\tmostTime = p.Time\n\t}\n\n\treturn []StringPoint{{Time: ZeroTime, Value: mostMode}}\n}\n\n// BooleanModeReduceSlice returns the mode value within a window.\nfunc BooleanModeReduceSlice(a []BooleanPoint) []BooleanPoint {\n\tif len(a) == 1 {\n\t\treturn a\n\t}\n\n\ttrueFreq := 0\n\tfalsFreq := 0\n\tmostMode := false\n\n\tfor _, p := range a {\n\t\tif p.Value {\n\t\t\ttrueFreq++\n\t\t} else {\n\t\t\tfalsFreq++\n\t\t}\n\t}\n\t// In case either of true or false are mode then retuned mode value wont be\n\t// of metric with oldest timestamp\n\tif trueFreq >= falsFreq {\n\t\tmostMode = true\n\t}\n\n\treturn []BooleanPoint{{Time: ZeroTime, Value: mostMode}}\n}\n\n// newStddevIterator returns an iterator for operating on a stddev() call.\nfunc newStddevIterator(input Iterator, opt IteratorOptions) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatSliceFuncReducer(FloatStddevReduceSlice)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatReduceFloatIterator(input, opt, createFn), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewIntegerSliceFuncFloatReducer(IntegerStddevReduceSlice)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerReduceFloatIterator(input, opt, createFn), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported stddev iterator type: %T\", input)\n\t}\n}\n\n// FloatStddevReduceSlice returns the stddev value within a window.\nfunc FloatStddevReduceSlice(a []FloatPoint) []FloatPoint {\n\t// If there is only one point then return 0.\n\tif len(a) < 2 {\n\t\treturn []FloatPoint{{Time: ZeroTime, Nil: true}}\n\t}\n\n\t// Calculate the mean.\n\tvar mean float64\n\tvar count int\n\tfor _, p := range a {\n\t\tif math.IsNaN(p.Value) {\n\t\t\tcontinue\n\t\t}\n\t\tcount++\n\t\tmean += (p.Value - mean) / float64(count)\n\t}\n\n\t// Calculate the variance.\n\tvar variance float64\n\tfor _, p := range a {\n\t\tif math.IsNaN(p.Value) {\n\t\t\tcontinue\n\t\t}\n\t\tvariance += math.Pow(p.Value-mean, 2)\n\t}\n\treturn []FloatPoint{{\n\t\tTime:  ZeroTime,\n\t\tValue: math.Sqrt(variance / float64(count-1)),\n\t}}\n}\n\n// IntegerStddevReduceSlice returns the stddev value within a window.\nfunc IntegerStddevReduceSlice(a []IntegerPoint) []FloatPoint {\n\t// If there is only one point then return 0.\n\tif len(a) < 2 {\n\t\treturn []FloatPoint{{Time: ZeroTime, Nil: true}}\n\t}\n\n\t// Calculate the mean.\n\tvar mean float64\n\tvar count int\n\tfor _, p := range a {\n\t\tcount++\n\t\tmean += (float64(p.Value) - mean) / float64(count)\n\t}\n\n\t// Calculate the variance.\n\tvar variance float64\n\tfor _, p := range a {\n\t\tvariance += math.Pow(float64(p.Value)-mean, 2)\n\t}\n\treturn []FloatPoint{{\n\t\tTime:  ZeroTime,\n\t\tValue: math.Sqrt(variance / float64(count-1)),\n\t}}\n}\n\n// newSpreadIterator returns an iterator for operating on a spread() call.\nfunc newSpreadIterator(input Iterator, opt IteratorOptions) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatSliceFuncReducer(FloatSpreadReduceSlice)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatReduceFloatIterator(input, opt, createFn), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewIntegerSliceFuncReducer(IntegerSpreadReduceSlice)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerReduceIntegerIterator(input, opt, createFn), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported spread iterator type: %T\", input)\n\t}\n}\n\n// FloatSpreadReduceSlice returns the spread value within a window.\nfunc FloatSpreadReduceSlice(a []FloatPoint) []FloatPoint {\n\t// Find min & max values.\n\tmin, max := a[0].Value, a[0].Value\n\tfor _, p := range a[1:] {\n\t\tmin = math.Min(min, p.Value)\n\t\tmax = math.Max(max, p.Value)\n\t}\n\treturn []FloatPoint{{Time: ZeroTime, Value: max - min}}\n}\n\n// IntegerSpreadReduceSlice returns the spread value within a window.\nfunc IntegerSpreadReduceSlice(a []IntegerPoint) []IntegerPoint {\n\t// Find min & max values.\n\tmin, max := a[0].Value, a[0].Value\n\tfor _, p := range a[1:] {\n\t\tif p.Value < min {\n\t\t\tmin = p.Value\n\t\t}\n\t\tif p.Value > max {\n\t\t\tmax = p.Value\n\t\t}\n\t}\n\treturn []IntegerPoint{{Time: ZeroTime, Value: max - min}}\n}\n\nfunc newTopIterator(input Iterator, opt IteratorOptions, n int, keepTags bool) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatTopReducer(n)\n\t\t\treturn fn, fn\n\t\t}\n\t\titr := newFloatReduceFloatIterator(input, opt, createFn)\n\t\titr.keepTags = keepTags\n\t\treturn itr, nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewIntegerTopReducer(n)\n\t\t\treturn fn, fn\n\t\t}\n\t\titr := newIntegerReduceIntegerIterator(input, opt, createFn)\n\t\titr.keepTags = keepTags\n\t\treturn itr, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported top iterator type: %T\", input)\n\t}\n}\n\nfunc newBottomIterator(input Iterator, opt IteratorOptions, n int, keepTags bool) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatBottomReducer(n)\n\t\t\treturn fn, fn\n\t\t}\n\t\titr := newFloatReduceFloatIterator(input, opt, createFn)\n\t\titr.keepTags = keepTags\n\t\treturn itr, nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewIntegerBottomReducer(n)\n\t\t\treturn fn, fn\n\t\t}\n\t\titr := newIntegerReduceIntegerIterator(input, opt, createFn)\n\t\titr.keepTags = keepTags\n\t\treturn itr, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported bottom iterator type: %T\", input)\n\t}\n}\n\n// newPercentileIterator returns an iterator for operating on a percentile() call.\nfunc newPercentileIterator(input Iterator, opt IteratorOptions, percentile float64) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tfloatPercentileReduceSlice := NewFloatPercentileReduceSliceFunc(percentile)\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatSliceFuncReducer(floatPercentileReduceSlice)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatReduceFloatIterator(input, opt, createFn), nil\n\tcase IntegerIterator:\n\t\tintegerPercentileReduceSlice := NewIntegerPercentileReduceSliceFunc(percentile)\n\t\tcreateFn := func() (IntegerPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewIntegerSliceFuncReducer(integerPercentileReduceSlice)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerReduceIntegerIterator(input, opt, createFn), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported percentile iterator type: %T\", input)\n\t}\n}\n\n// NewFloatPercentileReduceSliceFunc returns the percentile value within a window.\nfunc NewFloatPercentileReduceSliceFunc(percentile float64) FloatReduceSliceFunc {\n\treturn func(a []FloatPoint) []FloatPoint {\n\t\tlength := len(a)\n\t\ti := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1\n\n\t\tif i < 0 || i >= length {\n\t\t\treturn nil\n\t\t}\n\n\t\tsort.Sort(floatPointsByValue(a))\n\t\treturn []FloatPoint{{Time: a[i].Time, Value: a[i].Value, Aux: cloneAux(a[i].Aux)}}\n\t}\n}\n\n// NewIntegerPercentileReduceSliceFunc returns the percentile value within a window.\nfunc NewIntegerPercentileReduceSliceFunc(percentile float64) IntegerReduceSliceFunc {\n\treturn func(a []IntegerPoint) []IntegerPoint {\n\t\tlength := len(a)\n\t\ti := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1\n\n\t\tif i < 0 || i >= length {\n\t\t\treturn nil\n\t\t}\n\n\t\tsort.Sort(integerPointsByValue(a))\n\t\treturn []IntegerPoint{{Time: a[i].Time, Value: a[i].Value, Aux: cloneAux(a[i].Aux)}}\n\t}\n}\n\n// newDerivativeIterator returns an iterator for operating on a derivative() call.\nfunc newDerivativeIterator(input Iterator, opt IteratorOptions, interval Interval, isNonNegative bool) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatDerivativeReducer(interval, isNonNegative, opt.Ascending)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatStreamFloatIterator(input, createFn, opt), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewIntegerDerivativeReducer(interval, isNonNegative, opt.Ascending)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerStreamFloatIterator(input, createFn, opt), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported derivative iterator type: %T\", input)\n\t}\n}\n\n// newDifferenceIterator returns an iterator for operating on a difference() call.\nfunc newDifferenceIterator(input Iterator, opt IteratorOptions, isNonNegative bool) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatDifferenceReducer(isNonNegative)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatStreamFloatIterator(input, createFn, opt), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewIntegerDifferenceReducer(isNonNegative)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerStreamIntegerIterator(input, createFn, opt), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported difference iterator type: %T\", input)\n\t}\n}\n\n// newElapsedIterator returns an iterator for operating on a elapsed() call.\nfunc newElapsedIterator(input Iterator, opt IteratorOptions, interval Interval) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewFloatElapsedReducer(interval)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatStreamIntegerIterator(input, createFn, opt), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewIntegerElapsedReducer(interval)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerStreamIntegerIterator(input, createFn, opt), nil\n\tcase BooleanIterator:\n\t\tcreateFn := func() (BooleanPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewBooleanElapsedReducer(interval)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newBooleanStreamIntegerIterator(input, createFn, opt), nil\n\tcase StringIterator:\n\t\tcreateFn := func() (StringPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewStringElapsedReducer(interval)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newStringStreamIntegerIterator(input, createFn, opt), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported elapsed iterator type: %T\", input)\n\t}\n}\n\n// newMovingAverageIterator returns an iterator for operating on a moving_average() call.\nfunc newMovingAverageIterator(input Iterator, n int, opt IteratorOptions) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatMovingAverageReducer(n)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatStreamFloatIterator(input, createFn, opt), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewIntegerMovingAverageReducer(n)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerStreamFloatIterator(input, createFn, opt), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported moving average iterator type: %T\", input)\n\t}\n}\n\n// newCumulativeSumIterator returns an iterator for operating on a cumulative_sum() call.\nfunc newCumulativeSumIterator(input Iterator, opt IteratorOptions) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatCumulativeSumReducer()\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatStreamFloatIterator(input, createFn, opt), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewIntegerCumulativeSumReducer()\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerStreamIntegerIterator(input, createFn, opt), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported cumulative sum iterator type: %T\", input)\n\t}\n}\n\n// newHoltWintersIterator returns an iterator for operating on a holt_winters() call.\nfunc newHoltWintersIterator(input Iterator, opt IteratorOptions, h, m int, includeFitData bool, interval time.Duration) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatHoltWintersReducer(h, m, includeFitData, interval)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatReduceFloatIterator(input, opt, createFn), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatHoltWintersReducer(h, m, includeFitData, interval)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerReduceFloatIterator(input, opt, createFn), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported elapsed iterator type: %T\", input)\n\t}\n}\n\n// NewSampleIterator returns an iterator for operating on a sample() call (exported for use in test).\nfunc NewSampleIterator(input Iterator, opt IteratorOptions, size int) (Iterator, error) {\n\treturn newSampleIterator(input, opt, size)\n}\n\n// newSampleIterator returns an iterator for operating on a sample() call.\nfunc newSampleIterator(input Iterator, opt IteratorOptions, size int) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatSampleReducer(size)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatReduceFloatIterator(input, opt, createFn), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, IntegerPointEmitter) {\n\t\t\tfn := NewIntegerSampleReducer(size)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerReduceIntegerIterator(input, opt, createFn), nil\n\tcase StringIterator:\n\t\tcreateFn := func() (StringPointAggregator, StringPointEmitter) {\n\t\t\tfn := NewStringSampleReducer(size)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newStringReduceStringIterator(input, opt, createFn), nil\n\tcase BooleanIterator:\n\t\tcreateFn := func() (BooleanPointAggregator, BooleanPointEmitter) {\n\t\t\tfn := NewBooleanSampleReducer(size)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newBooleanReduceBooleanIterator(input, opt, createFn), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported elapsed iterator type: %T\", input)\n\t}\n}\n\n// newIntegralIterator returns an iterator for operating on a integral() call.\nfunc newIntegralIterator(input Iterator, opt IteratorOptions, interval Interval) (Iterator, error) {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\tcreateFn := func() (FloatPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewFloatIntegralReducer(interval, opt)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newFloatStreamFloatIterator(input, createFn, opt), nil\n\tcase IntegerIterator:\n\t\tcreateFn := func() (IntegerPointAggregator, FloatPointEmitter) {\n\t\t\tfn := NewIntegerIntegralReducer(interval, opt)\n\t\t\treturn fn, fn\n\t\t}\n\t\treturn newIntegerStreamFloatIterator(input, createFn, opt), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported integral iterator type: %T\", input)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/call_iterator_test.go",
    "content": "package influxql_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/pkg/deep\"\n)\n\n// Ensure that a float iterator can be created for a count() call.\nfunc TestCallIterator_Count_Float(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Time: 0, Value: 15, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Name: \"cpu\", Time: 2, Value: 10, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Name: \"cpu\", Time: 1, Value: 10, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Name: \"cpu\", Time: 5, Value: 20, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Name: \"cpu\", Time: 1, Value: 11, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Name: \"cpu\", Time: 23, Value: 8, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Name: \"mem\", Time: 23, Value: 10, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`count(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0, Value: 3, Tags: ParseTags(\"host=hostA\"), Aggregated: 3}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 5, Value: 1, Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0, Value: 1, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 20, Value: 1, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"mem\", Time: 20, Value: 1, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that an integer iterator can be created for a count() call.\nfunc TestCallIterator_Count_Integer(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Time: 0, Value: 15, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Name: \"cpu\", Time: 2, Value: 10, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Name: \"cpu\", Time: 1, Value: 10, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Name: \"cpu\", Time: 5, Value: 20, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Name: \"cpu\", Time: 1, Value: 11, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Name: \"cpu\", Time: 23, Value: 8, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Name: \"mem\", Time: 23, Value: 10, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`count(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0, Value: 3, Tags: ParseTags(\"host=hostA\"), Aggregated: 3}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 5, Value: 1, Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0, Value: 1, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 20, Value: 1, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"mem\", Time: 20, Value: 1, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that a string iterator can be created for a count() call.\nfunc TestCallIterator_Count_String(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&StringIterator{Points: []influxql.StringPoint{\n\t\t\t{Name: \"cpu\", Time: 0, Value: \"d\", Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Name: \"cpu\", Time: 2, Value: \"b\", Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Name: \"cpu\", Time: 1, Value: \"b\", Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Name: \"cpu\", Time: 5, Value: \"e\", Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Name: \"cpu\", Time: 1, Value: \"c\", Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Name: \"cpu\", Time: 23, Value: \"a\", Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Name: \"mem\", Time: 23, Value: \"b\", Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`count(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0, Value: 3, Tags: ParseTags(\"host=hostA\"), Aggregated: 3}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 5, Value: 1, Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0, Value: 1, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 20, Value: 1, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"mem\", Time: 20, Value: 1, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that a boolean iterator can be created for a count() call.\nfunc TestCallIterator_Count_Boolean(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&BooleanIterator{Points: []influxql.BooleanPoint{\n\t\t\t{Name: \"cpu\", Time: 0, Value: true, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Name: \"cpu\", Time: 2, Value: false, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Name: \"cpu\", Time: 1, Value: true, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Name: \"cpu\", Time: 5, Value: false, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Name: \"cpu\", Time: 1, Value: true, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Name: \"cpu\", Time: 23, Value: false, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Name: \"mem\", Time: 23, Value: true, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`count(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0, Value: 3, Tags: ParseTags(\"host=hostA\"), Aggregated: 3}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 5, Value: 1, Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0, Value: 1, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 20, Value: 1, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"mem\", Time: 20, Value: 1, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that a float iterator can be created for a min() call.\nfunc TestCallIterator_Min_Float(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Time: 0, Value: 15, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 2, Value: 10, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 4, Value: 12, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 1, Value: 10, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Time: 5, Value: 20, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Time: 1, Value: 11, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Time: 23, Value: 8, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`min(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Time: 1, Value: 10, Tags: ParseTags(\"host=hostA\"), Aggregated: 4}},\n\t\t{&influxql.FloatPoint{Time: 5, Value: 20, Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that a integer iterator can be created for a min() call.\nfunc TestCallIterator_Min_Integer(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Time: 0, Value: 15, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 2, Value: 10, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 4, Value: 12, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 1, Value: 10, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Time: 5, Value: 20, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Time: 1, Value: 11, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Time: 23, Value: 8, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`min(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Time: 1, Value: 10, Tags: ParseTags(\"host=hostA\"), Aggregated: 4}},\n\t\t{&influxql.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that a boolean iterator can be created for a min() call.\nfunc TestCallIterator_Min_Boolean(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&BooleanIterator{Points: []influxql.BooleanPoint{\n\t\t\t{Time: 0, Value: true, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 2, Value: false, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 1, Value: true, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Time: 5, Value: false, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Time: 1, Value: false, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Time: 23, Value: true, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`min(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.BooleanPoint{Time: 2, Value: false, Tags: ParseTags(\"host=hostA\"), Aggregated: 3}},\n\t\t{&influxql.BooleanPoint{Time: 5, Value: false, Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.BooleanPoint{Time: 1, Value: false, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.BooleanPoint{Time: 23, Value: true, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that a float iterator can be created for a max() call.\nfunc TestCallIterator_Max_Float(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Time: 0, Value: 15, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 2, Value: 10, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 1, Value: 10, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Time: 5, Value: 20, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Time: 1, Value: 11, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Time: 23, Value: 8, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`max(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Time: 0, Value: 15, Tags: ParseTags(\"host=hostA\"), Aggregated: 3}},\n\t\t{&influxql.FloatPoint{Time: 5, Value: 20, Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that a integer iterator can be created for a max() call.\nfunc TestCallIterator_Max_Integer(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Time: 0, Value: 15, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 2, Value: 10, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 1, Value: 10, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Time: 5, Value: 20, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Time: 1, Value: 11, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Time: 23, Value: 8, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`max(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Time: 0, Value: 15, Tags: ParseTags(\"host=hostA\"), Aggregated: 3}},\n\t\t{&influxql.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that a boolean iterator can be created for a max() call.\nfunc TestCallIterator_Max_Boolean(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&BooleanIterator{Points: []influxql.BooleanPoint{\n\t\t\t{Time: 0, Value: true, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 2, Value: false, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 1, Value: true, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Time: 5, Value: false, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Time: 1, Value: false, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Time: 23, Value: true, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`max(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.BooleanPoint{Time: 0, Value: true, Tags: ParseTags(\"host=hostA\"), Aggregated: 3}},\n\t\t{&influxql.BooleanPoint{Time: 5, Value: false, Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.BooleanPoint{Time: 1, Value: false, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.BooleanPoint{Time: 23, Value: true, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that a float iterator can be created for a sum() call.\nfunc TestCallIterator_Sum_Float(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Time: 0, Value: 15, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 2, Value: 10, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 1, Value: 10, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Time: 5, Value: 20, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Time: 1, Value: 11, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Time: 23, Value: 8, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`sum(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Time: 0, Value: 35, Tags: ParseTags(\"host=hostA\"), Aggregated: 3}},\n\t\t{&influxql.FloatPoint{Time: 5, Value: 20, Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Time: 0, Value: 11, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Time: 20, Value: 8, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that an integer iterator can be created for a sum() call.\nfunc TestCallIterator_Sum_Integer(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Time: 0, Value: 15, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 2, Value: 10, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 1, Value: 10, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Time: 5, Value: 20, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Time: 1, Value: 11, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Time: 23, Value: 8, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`sum(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Time: 0, Value: 35, Tags: ParseTags(\"host=hostA\"), Aggregated: 3}},\n\t\t{&influxql.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Time: 0, Value: 11, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Time: 20, Value: 8, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that a float iterator can be created for a first() call.\nfunc TestCallIterator_First_Float(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Time: 2, Value: 10, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 0, Value: 15, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 1, Value: 10, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Time: 6, Value: 20, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Time: 1, Value: 11, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Time: 23, Value: 8, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`first(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Time: 0, Value: 15, Tags: ParseTags(\"host=hostA\"), Aggregated: 3}},\n\t\t{&influxql.FloatPoint{Time: 6, Value: 20, Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that an integer iterator can be created for a first() call.\nfunc TestCallIterator_First_Integer(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Time: 2, Value: 10, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 0, Value: 15, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 1, Value: 10, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Time: 6, Value: 20, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Time: 1, Value: 11, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Time: 23, Value: 8, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`first(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Time: 0, Value: 15, Tags: ParseTags(\"host=hostA\"), Aggregated: 3}},\n\t\t{&influxql.IntegerPoint{Time: 6, Value: 20, Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that a string iterator can be created for a first() call.\nfunc TestCallIterator_First_String(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&StringIterator{Points: []influxql.StringPoint{\n\t\t\t{Time: 2, Value: \"b\", Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 0, Value: \"d\", Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 1, Value: \"b\", Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Time: 6, Value: \"e\", Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Time: 1, Value: \"c\", Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Time: 23, Value: \"a\", Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`first(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.StringPoint{Time: 0, Value: \"d\", Tags: ParseTags(\"host=hostA\"), Aggregated: 3}},\n\t\t{&influxql.StringPoint{Time: 6, Value: \"e\", Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.StringPoint{Time: 1, Value: \"c\", Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.StringPoint{Time: 23, Value: \"a\", Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that a boolean iterator can be created for a first() call.\nfunc TestCallIterator_First_Boolean(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&BooleanIterator{Points: []influxql.BooleanPoint{\n\t\t\t{Time: 2, Value: false, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 0, Value: true, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 1, Value: false, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Time: 6, Value: false, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Time: 1, Value: true, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Time: 23, Value: false, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`first(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.BooleanPoint{Time: 0, Value: true, Tags: ParseTags(\"host=hostA\"), Aggregated: 3}},\n\t\t{&influxql.BooleanPoint{Time: 6, Value: false, Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.BooleanPoint{Time: 1, Value: true, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.BooleanPoint{Time: 23, Value: false, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that a float iterator can be created for a last() call.\nfunc TestCallIterator_Last_Float(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Time: 2, Value: 10, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 0, Value: 15, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 1, Value: 10, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Time: 6, Value: 20, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Time: 1, Value: 11, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Time: 23, Value: 8, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`last(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Time: 2, Value: 10, Tags: ParseTags(\"host=hostA\"), Aggregated: 3}},\n\t\t{&influxql.FloatPoint{Time: 6, Value: 20, Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that an integer iterator can be created for a last() call.\nfunc TestCallIterator_Last_Integer(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Time: 2, Value: 10, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 0, Value: 15, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 1, Value: 10, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Time: 6, Value: 20, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Time: 1, Value: 11, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Time: 23, Value: 8, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`last(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Time: 2, Value: 10, Tags: ParseTags(\"host=hostA\"), Aggregated: 3}},\n\t\t{&influxql.IntegerPoint{Time: 6, Value: 20, Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that a string iterator can be created for a last() call.\nfunc TestCallIterator_Last_String(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&StringIterator{Points: []influxql.StringPoint{\n\t\t\t{Time: 2, Value: \"b\", Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 0, Value: \"d\", Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 1, Value: \"b\", Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Time: 6, Value: \"e\", Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Time: 1, Value: \"c\", Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Time: 23, Value: \"a\", Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`last(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.StringPoint{Time: 2, Value: \"b\", Tags: ParseTags(\"host=hostA\"), Aggregated: 3}},\n\t\t{&influxql.StringPoint{Time: 6, Value: \"e\", Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.StringPoint{Time: 1, Value: \"c\", Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.StringPoint{Time: 23, Value: \"a\", Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that a boolean iterator can be created for a last() call.\nfunc TestCallIterator_Last_Boolean(t *testing.T) {\n\titr, _ := influxql.NewCallIterator(\n\t\t&BooleanIterator{Points: []influxql.BooleanPoint{\n\t\t\t{Time: 2, Value: false, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 0, Value: true, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t\t{Time: 1, Value: false, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t\t{Time: 6, Value: false, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t\t{Time: 1, Value: true, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t\t{Time: 23, Value: false, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`last(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.BooleanPoint{Time: 2, Value: false, Tags: ParseTags(\"host=hostA\"), Aggregated: 3}},\n\t\t{&influxql.BooleanPoint{Time: 6, Value: false, Tags: ParseTags(\"host=hostA\"), Aggregated: 1}},\n\t\t{&influxql.BooleanPoint{Time: 1, Value: true, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t\t{&influxql.BooleanPoint{Time: 23, Value: false, Tags: ParseTags(\"host=hostB\"), Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that a float iterator can be created for a mode() call.\nfunc TestCallIterator_Mode_Float(t *testing.T) {\n\titr, _ := influxql.NewModeIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t{Time: 0, Value: 15, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 1, Value: 10, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t{Time: 2, Value: 10, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 3, Value: 10, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 4, Value: 10, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 6, Value: 20, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 7, Value: 21, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 8, Value: 21, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t{Time: 1, Value: 11, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t{Time: 22, Value: 8, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t{Time: 23, Value: 8, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t{Time: 24, Value: 25, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`mode(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Time: 0, Value: 10, Tags: ParseTags(\"host=hostA\"), Aggregated: 0}},\n\t\t{&influxql.FloatPoint{Time: 5, Value: 21, Tags: ParseTags(\"host=hostA\"), Aggregated: 0}},\n\t\t{&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags(\"host=hostB\"), Aggregated: 0}},\n\t\t{&influxql.FloatPoint{Time: 20, Value: 8, Tags: ParseTags(\"host=hostB\"), Aggregated: 0}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that a integer iterator can be created for a mode() call.\nfunc TestCallIterator_Mode_Integer(t *testing.T) {\n\titr, _ := influxql.NewModeIterator(&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t{Time: 0, Value: 15, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 1, Value: 10, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t{Time: 2, Value: 10, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 3, Value: 10, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 4, Value: 10, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 6, Value: 20, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 7, Value: 21, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 8, Value: 21, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 1, Value: 11, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t{Time: 22, Value: 8, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t{Time: 23, Value: 8, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t{Time: 24, Value: 25, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`mode(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Time: 0, Value: 10, Tags: ParseTags(\"host=hostA\")}},\n\t\t{&influxql.IntegerPoint{Time: 5, Value: 21, Tags: ParseTags(\"host=hostA\")}},\n\t\t{&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags(\"host=hostB\")}},\n\t\t{&influxql.IntegerPoint{Time: 20, Value: 8, Tags: ParseTags(\"host=hostB\")}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that a string iterator can be created for a mode() call.\nfunc TestCallIterator_Mode_String(t *testing.T) {\n\titr, _ := influxql.NewModeIterator(&StringIterator{Points: []influxql.StringPoint{\n\t\t{Time: 0, Value: \"15\", Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 1, Value: \"10\", Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t{Time: 2, Value: \"10\", Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 3, Value: \"10\", Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 4, Value: \"10\", Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 6, Value: \"20\", Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 7, Value: \"21\", Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 7, Value: \"21\", Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t{Time: 1, Value: \"11\", Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t{Time: 22, Value: \"8\", Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t{Time: 23, Value: \"8\", Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t{Time: 24, Value: \"25\", Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`mode(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.StringPoint{Time: 0, Value: \"10\", Tags: ParseTags(\"host=hostA\")}},\n\t\t{&influxql.StringPoint{Time: 5, Value: \"21\", Tags: ParseTags(\"host=hostA\")}},\n\t\t{&influxql.StringPoint{Time: 1, Value: \"11\", Tags: ParseTags(\"host=hostB\")}},\n\t\t{&influxql.StringPoint{Time: 20, Value: \"8\", Tags: ParseTags(\"host=hostB\")}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure that a boolean iterator can be created for a modBooleanl.\nfunc TestCallIterator_Mode_Boolean(t *testing.T) {\n\titr, _ := influxql.NewModeIterator(&BooleanIterator{Points: []influxql.BooleanPoint{\n\t\t{Time: 0, Value: true, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 1, Value: true, Tags: ParseTags(\"region=us-west,host=hostA\")},\n\t\t{Time: 2, Value: true, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 3, Value: true, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 4, Value: false, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 6, Value: false, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 7, Value: false, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\t\t{Time: 8, Value: false, Tags: ParseTags(\"region=us-east,host=hostA\")},\n\n\t\t{Time: 1, Value: false, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t{Time: 22, Value: false, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t{Time: 23, Value: true, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t\t{Time: 24, Value: true, Tags: ParseTags(\"region=us-west,host=hostB\")},\n\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr:       MustParseExpr(`mode(\"value\")`),\n\t\t\tDimensions: []string{\"host\"},\n\t\t\tInterval:   influxql.Interval{Duration: 5 * time.Nanosecond},\n\t\t\tOrdered:    true,\n\t\t\tAscending:  true,\n\t\t},\n\t)\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.BooleanPoint{Time: 0, Value: true, Tags: ParseTags(\"host=hostA\")}},\n\t\t{&influxql.BooleanPoint{Time: 5, Value: false, Tags: ParseTags(\"host=hostA\")}},\n\t\t{&influxql.BooleanPoint{Time: 1, Value: false, Tags: ParseTags(\"host=hostB\")}},\n\t\t{&influxql.BooleanPoint{Time: 20, Value: true, Tags: ParseTags(\"host=hostB\")}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestNewCallIterator_UnsupportedExprName(t *testing.T) {\n\t_, err := influxql.NewCallIterator(\n\t\t&FloatIterator{},\n\t\tinfluxql.IteratorOptions{\n\t\t\tExpr: MustParseExpr(`foobar(\"value\")`),\n\t\t},\n\t)\n\n\tif err == nil || err.Error() != \"unsupported function call: foobar\" {\n\t\tt.Errorf(\"unexpected error: %s\", err)\n\t}\n}\n\nfunc BenchmarkCountIterator_1K(b *testing.B)   { benchmarkCountIterator(b, 1000) }\nfunc BenchmarkCountIterator_100K(b *testing.B) { benchmarkCountIterator(b, 100000) }\nfunc BenchmarkCountIterator_1M(b *testing.B)   { benchmarkCountIterator(b, 1000000) }\n\nfunc benchmarkCountIterator(b *testing.B, pointN int) {\n\tbenchmarkCallIterator(b, influxql.IteratorOptions{\n\t\tExpr:      MustParseExpr(\"count(value)\"),\n\t\tStartTime: influxql.MinTime,\n\t\tEndTime:   influxql.MaxTime,\n\t}, pointN)\n}\n\nfunc benchmarkCallIterator(b *testing.B, opt influxql.IteratorOptions, pointN int) {\n\tb.ReportAllocs()\n\n\tfor i := 0; i < b.N; i++ {\n\t\t// Create a lightweight point generator.\n\t\tp := influxql.FloatPoint{Name: \"cpu\", Value: 100}\n\t\tinput := FloatPointGenerator{\n\t\t\tN:  pointN,\n\t\t\tFn: func(i int) *influxql.FloatPoint { return &p },\n\t\t}\n\n\t\t// Execute call against input.\n\t\titr, err := influxql.NewCallIterator(&input, opt)\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\tinfluxql.DrainIterator(itr)\n\t}\n}\n\nfunc BenchmarkSampleIterator_1k(b *testing.B)   { benchmarkSampleIterator(b, 1000) }\nfunc BenchmarkSampleIterator_100k(b *testing.B) { benchmarkSampleIterator(b, 100000) }\nfunc BenchmarkSampleIterator_1M(b *testing.B)   { benchmarkSampleIterator(b, 1000000) }\n\nfunc benchmarkSampleIterator(b *testing.B, pointN int) {\n\tb.ReportAllocs()\n\n\t// Create a lightweight point generator.\n\tp := influxql.FloatPoint{Name: \"cpu\"}\n\tinput := FloatPointGenerator{\n\t\tN: pointN,\n\t\tFn: func(i int) *influxql.FloatPoint {\n\t\t\tp.Value = float64(i)\n\t\t\treturn &p\n\t\t},\n\t}\n\n\tfor i := 0; i < b.N; i++ {\n\t\t// Execute call against input.\n\t\titr, err := influxql.NewSampleIterator(&input, influxql.IteratorOptions{}, 100)\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\tinfluxql.DrainIterator(itr)\n\t}\n}\n\nfunc BenchmarkDistinctIterator_1K(b *testing.B)   { benchmarkDistinctIterator(b, 1000) }\nfunc BenchmarkDistinctIterator_100K(b *testing.B) { benchmarkDistinctIterator(b, 100000) }\nfunc BenchmarkDistinctIterator_1M(b *testing.B)   { benchmarkDistinctIterator(b, 1000000) }\n\nfunc benchmarkDistinctIterator(b *testing.B, pointN int) {\n\tb.ReportAllocs()\n\n\tfor i := 0; i < b.N; i++ {\n\t\t// Create a lightweight point generator.\n\t\tp := influxql.FloatPoint{Name: \"cpu\"}\n\t\tinput := FloatPointGenerator{\n\t\t\tN: pointN,\n\t\t\tFn: func(i int) *influxql.FloatPoint {\n\t\t\t\tp.Value = float64(i % 10)\n\t\t\t\treturn &p\n\t\t\t},\n\t\t}\n\n\t\t// Execute call against input.\n\t\titr, err := influxql.NewDistinctIterator(&input, influxql.IteratorOptions{})\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\tinfluxql.DrainIterator(itr)\n\t}\n}\n\nfunc BenchmarkModeIterator_1K(b *testing.B)   { benchmarkModeIterator(b, 1000) }\nfunc BenchmarkModeIterator_100K(b *testing.B) { benchmarkModeIterator(b, 100000) }\nfunc BenchmarkModeIterator_1M(b *testing.B)   { benchmarkModeIterator(b, 1000000) }\n\nfunc benchmarkModeIterator(b *testing.B, pointN int) {\n\tb.ReportAllocs()\n\n\tfor i := 0; i < b.N; i++ {\n\t\t// Create a lightweight point generator.\n\t\tp := influxql.FloatPoint{Name: \"cpu\"}\n\t\tinput := FloatPointGenerator{\n\t\t\tN: pointN,\n\t\t\tFn: func(i int) *influxql.FloatPoint {\n\t\t\t\tp.Value = float64(10)\n\t\t\t\treturn &p\n\t\t\t},\n\t\t}\n\n\t\t// Execute call against input.\n\t\titr, err := influxql.NewModeIterator(&input, influxql.IteratorOptions{})\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\tinfluxql.DrainIterator(itr)\n\t}\n}\n\ntype FloatPointGenerator struct {\n\ti  int\n\tN  int\n\tFn func(i int) *influxql.FloatPoint\n}\n\nfunc (g *FloatPointGenerator) Close() error                  { return nil }\nfunc (g *FloatPointGenerator) Stats() influxql.IteratorStats { return influxql.IteratorStats{} }\n\nfunc (g *FloatPointGenerator) Next() (*influxql.FloatPoint, error) {\n\tif g.i == g.N {\n\t\treturn nil, nil\n\t}\n\tp := g.Fn(g.i)\n\tg.i++\n\treturn p, nil\n}\n\nfunc MustCallIterator(input influxql.Iterator, opt influxql.IteratorOptions) influxql.Iterator {\n\titr, err := influxql.NewCallIterator(input, opt)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn itr\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/cast.go",
    "content": "package influxql\n\nfunc castToFloat(v interface{}) float64 {\n\tswitch v := v.(type) {\n\tcase float64:\n\t\treturn v\n\tcase int64:\n\t\treturn float64(v)\n\tdefault:\n\t\treturn float64(0)\n\t}\n}\n\nfunc castToInteger(v interface{}) int64 {\n\tswitch v := v.(type) {\n\tcase float64:\n\t\treturn int64(v)\n\tcase int64:\n\t\treturn v\n\tdefault:\n\t\treturn int64(0)\n\t}\n}\n\nfunc castToString(v interface{}) string {\n\tswitch v := v.(type) {\n\tcase string:\n\t\treturn v\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\nfunc castToBoolean(v interface{}) bool {\n\tswitch v := v.(type) {\n\tcase bool:\n\t\treturn v\n\tdefault:\n\t\treturn false\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/doc.go",
    "content": "/*\nPackage influxql implements a parser for the InfluxDB query language.\n\nInfluxQL is a DML and DDL language for the InfluxDB time series database.\nIt provides the ability to query for aggregate statistics as well as create\nand configure the InfluxDB server.\n\nSee https://docs.influxdata.com/influxdb/latest/query_language/\nfor a reference on using InfluxQL.\n\n*/\npackage influxql\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/emitter.go",
    "content": "package influxql\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n)\n\n// Emitter groups values together by name, tags, and time.\ntype Emitter struct {\n\tbuf       []Point\n\titrs      []Iterator\n\tascending bool\n\tchunkSize int\n\n\ttags Tags\n\trow  *models.Row\n\n\t// The columns to attach to each row.\n\tColumns []string\n\n\t// The time zone location.\n\tLocation *time.Location\n\n\t// Removes the \"time\" column from output.\n\t// Used for meta queries where time does not apply.\n\tOmitTime bool\n}\n\n// NewEmitter returns a new instance of Emitter that pulls from itrs.\nfunc NewEmitter(itrs []Iterator, ascending bool, chunkSize int) *Emitter {\n\treturn &Emitter{\n\t\tbuf:       make([]Point, len(itrs)),\n\t\titrs:      itrs,\n\t\tascending: ascending,\n\t\tchunkSize: chunkSize,\n\t\tLocation:  time.UTC,\n\t}\n}\n\n// Close closes the underlying iterators.\nfunc (e *Emitter) Close() error {\n\treturn Iterators(e.itrs).Close()\n}\n\n// Emit returns the next row from the iterators.\nfunc (e *Emitter) Emit() (*models.Row, bool, error) {\n\t// Immediately end emission if there are no iterators.\n\tif len(e.itrs) == 0 {\n\t\treturn nil, false, nil\n\t}\n\n\t// Continually read from iterators until they are exhausted.\n\tfor {\n\t\t// Fill buffer. Return row if no more points remain.\n\t\tt, name, tags, err := e.loadBuf()\n\t\tif err != nil {\n\t\t\treturn nil, false, err\n\t\t} else if t == ZeroTime {\n\t\t\trow := e.row\n\t\t\te.row = nil\n\t\t\treturn row, false, nil\n\t\t}\n\n\t\t// Read next set of values from all iterators at a given time/name/tags.\n\t\t// If no values are returned then return row.\n\t\tvalues := e.readAt(t, name, tags)\n\t\tif values == nil {\n\t\t\trow := e.row\n\t\t\te.row = nil\n\t\t\treturn row, false, nil\n\t\t}\n\n\t\t// If there's no row yet then create one.\n\t\t// If the name and tags match the existing row, append to that row if\n\t\t// the number of values doesn't exceed the chunk size.\n\t\t// Otherwise return existing row and add values to next emitted row.\n\t\tif e.row == nil {\n\t\t\te.createRow(name, tags, values)\n\t\t} else if e.row.Name == name && e.tags.Equals(&tags) {\n\t\t\tif e.chunkSize > 0 && len(e.row.Values) >= e.chunkSize {\n\t\t\t\trow := e.row\n\t\t\t\trow.Partial = true\n\t\t\t\te.createRow(name, tags, values)\n\t\t\t\treturn row, true, nil\n\t\t\t}\n\t\t\te.row.Values = append(e.row.Values, values)\n\t\t} else {\n\t\t\trow := e.row\n\t\t\te.createRow(name, tags, values)\n\t\t\treturn row, true, nil\n\t\t}\n\t}\n}\n\n// loadBuf reads in points into empty buffer slots.\n// Returns the next time/name/tags to emit for.\nfunc (e *Emitter) loadBuf() (t int64, name string, tags Tags, err error) {\n\tt = ZeroTime\n\n\tfor i := range e.itrs {\n\t\t// Load buffer, if empty.\n\t\tif e.buf[i] == nil {\n\t\t\te.buf[i], err = e.readIterator(e.itrs[i])\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// Skip if buffer is empty.\n\t\tp := e.buf[i]\n\t\tif p == nil {\n\t\t\tcontinue\n\t\t}\n\t\titrTime, itrName, itrTags := p.time(), p.name(), p.tags()\n\n\t\t// Initialize range values if not set.\n\t\tif t == ZeroTime {\n\t\t\tt, name, tags = itrTime, itrName, itrTags\n\t\t\tcontinue\n\t\t}\n\n\t\t// Update range values if lower and emitter is in time ascending order.\n\t\tif e.ascending {\n\t\t\tif (itrName < name) || (itrName == name && itrTags.ID() < tags.ID()) || (itrName == name && itrTags.ID() == tags.ID() && itrTime < t) {\n\t\t\t\tt, name, tags = itrTime, itrName, itrTags\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// Update range values if higher and emitter is in time descending order.\n\t\tif (itrName > name) || (itrName == name && itrTags.ID() > tags.ID()) || (itrName == name && itrTags.ID() == tags.ID() && itrTime > t) {\n\t\t\tt, name, tags = itrTime, itrName, itrTags\n\t\t}\n\t}\n\treturn\n}\n\n// createRow creates a new row attached to the emitter.\nfunc (e *Emitter) createRow(name string, tags Tags, values []interface{}) {\n\te.tags = tags\n\te.row = &models.Row{\n\t\tName:    name,\n\t\tTags:    tags.KeyValues(),\n\t\tColumns: e.Columns,\n\t\tValues:  [][]interface{}{values},\n\t}\n}\n\n// readAt returns the next slice of values from the iterators at time/name/tags.\n// Returns nil values once the iterators are exhausted.\nfunc (e *Emitter) readAt(t int64, name string, tags Tags) []interface{} {\n\toffset := 1\n\tif e.OmitTime {\n\t\toffset = 0\n\t}\n\n\tvalues := make([]interface{}, len(e.itrs)+offset)\n\tif !e.OmitTime {\n\t\tvalues[0] = time.Unix(0, t).In(e.Location)\n\t}\n\te.readInto(t, name, tags, values[offset:])\n\treturn values\n}\n\nfunc (e *Emitter) readInto(t int64, name string, tags Tags, values []interface{}) {\n\tfor i, p := range e.buf {\n\t\t// Skip if buffer is empty.\n\t\tif p == nil {\n\t\t\tvalues[i] = nil\n\t\t\tcontinue\n\t\t}\n\n\t\t// Skip point if it doesn't match time/name/tags.\n\t\tpTags := p.tags()\n\t\tif p.time() != t || p.name() != name || !pTags.Equals(&tags) {\n\t\t\tvalues[i] = nil\n\t\t\tcontinue\n\t\t}\n\n\t\t// Read point value.\n\t\tvalues[i] = p.value()\n\n\t\t// Clear buffer.\n\t\te.buf[i] = nil\n\t}\n}\n\n// readIterator reads the next point from itr.\nfunc (e *Emitter) readIterator(itr Iterator) (Point, error) {\n\tif itr == nil {\n\t\treturn nil, nil\n\t}\n\n\tswitch itr := itr.(type) {\n\tcase FloatIterator:\n\t\tif p, err := itr.Next(); err != nil {\n\t\t\treturn nil, err\n\t\t} else if p != nil {\n\t\t\treturn p, nil\n\t\t}\n\tcase IntegerIterator:\n\t\tif p, err := itr.Next(); err != nil {\n\t\t\treturn nil, err\n\t\t} else if p != nil {\n\t\t\treturn p, nil\n\t\t}\n\tcase StringIterator:\n\t\tif p, err := itr.Next(); err != nil {\n\t\t\treturn nil, err\n\t\t} else if p != nil {\n\t\t\treturn p, nil\n\t\t}\n\tcase BooleanIterator:\n\t\tif p, err := itr.Next(); err != nil {\n\t\t\treturn nil, err\n\t\t} else if p != nil {\n\t\t\treturn p, nil\n\t\t}\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported iterator: %T\", itr))\n\t}\n\treturn nil, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/emitter_test.go",
    "content": "package influxql_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/pkg/deep\"\n)\n\n// Ensure the emitter can group iterators together into rows.\nfunc TestEmitter_Emit(t *testing.T) {\n\t// Build an emitter that pulls from two iterators.\n\te := influxql.NewEmitter([]influxql.Iterator{\n\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 0, Value: 1},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 1, Value: 2},\n\t\t}},\n\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 1, Value: 4},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=north\"), Time: 0, Value: 4},\n\t\t\t{Name: \"mem\", Time: 4, Value: 5},\n\t\t}},\n\t}, true, 0)\n\te.Columns = []string{\"col1\", \"col2\"}\n\n\t// Verify the cpu region=west is emitted first.\n\tif row, _, err := e.Emit(); err != nil {\n\t\tt.Fatalf(\"unexpected error(0): %s\", err)\n\t} else if !deep.Equal(row, &models.Row{\n\t\tName:    \"cpu\",\n\t\tTags:    map[string]string{\"region\": \"west\"},\n\t\tColumns: []string{\"col1\", \"col2\"},\n\t\tValues: [][]interface{}{\n\t\t\t{time.Unix(0, 0).UTC(), float64(1), nil},\n\t\t\t{time.Unix(0, 1).UTC(), float64(2), float64(4)},\n\t\t},\n\t}) {\n\t\tt.Fatalf(\"unexpected row(0): %s\", spew.Sdump(row))\n\t}\n\n\t// Verify the cpu region=north is emitted next.\n\tif row, _, err := e.Emit(); err != nil {\n\t\tt.Fatalf(\"unexpected error(1): %s\", err)\n\t} else if !deep.Equal(row, &models.Row{\n\t\tName:    \"cpu\",\n\t\tTags:    map[string]string{\"region\": \"north\"},\n\t\tColumns: []string{\"col1\", \"col2\"},\n\t\tValues: [][]interface{}{\n\t\t\t{time.Unix(0, 0).UTC(), nil, float64(4)},\n\t\t},\n\t}) {\n\t\tt.Fatalf(\"unexpected row(1): %s\", spew.Sdump(row))\n\t}\n\n\t// Verify the mem series is emitted last.\n\tif row, _, err := e.Emit(); err != nil {\n\t\tt.Fatalf(\"unexpected error(2): %s\", err)\n\t} else if !deep.Equal(row, &models.Row{\n\t\tName:    \"mem\",\n\t\tColumns: []string{\"col1\", \"col2\"},\n\t\tValues: [][]interface{}{\n\t\t\t{time.Unix(0, 4).UTC(), nil, float64(5)},\n\t\t},\n\t}) {\n\t\tt.Fatalf(\"unexpected row(2): %s\", spew.Sdump(row))\n\t}\n\n\t// Verify EOF.\n\tif row, _, err := e.Emit(); err != nil {\n\t\tt.Fatalf(\"unexpected error(eof): %s\", err)\n\t} else if row != nil {\n\t\tt.Fatalf(\"unexpected eof: %s\", spew.Sdump(row))\n\t}\n}\n\n// Ensure the emitter will limit the chunked output from a series.\nfunc TestEmitter_ChunkSize(t *testing.T) {\n\t// Build an emitter that pulls from one iterator with multiple points in the same series.\n\te := influxql.NewEmitter([]influxql.Iterator{\n\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 0, Value: 1},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 1, Value: 2},\n\t\t}},\n\t}, true, 1)\n\te.Columns = []string{\"col1\"}\n\n\t// Verify the cpu region=west is emitted first.\n\tif row, _, err := e.Emit(); err != nil {\n\t\tt.Fatalf(\"unexpected error(0): %s\", err)\n\t} else if !deep.Equal(row, &models.Row{\n\t\tName:    \"cpu\",\n\t\tTags:    map[string]string{\"region\": \"west\"},\n\t\tColumns: []string{\"col1\"},\n\t\tValues: [][]interface{}{\n\t\t\t{time.Unix(0, 0).UTC(), float64(1)},\n\t\t},\n\t\tPartial: true,\n\t}) {\n\t\tt.Fatalf(\"unexpected row(0): %s\", spew.Sdump(row))\n\t}\n\n\t// Verify the cpu region=north is emitted next.\n\tif row, _, err := e.Emit(); err != nil {\n\t\tt.Fatalf(\"unexpected error(1): %s\", err)\n\t} else if !deep.Equal(row, &models.Row{\n\t\tName:    \"cpu\",\n\t\tTags:    map[string]string{\"region\": \"west\"},\n\t\tColumns: []string{\"col1\"},\n\t\tValues: [][]interface{}{\n\t\t\t{time.Unix(0, 1).UTC(), float64(2)},\n\t\t},\n\t}) {\n\t\tt.Fatalf(\"unexpected row(1): %s\", spew.Sdump(row))\n\t}\n\n\t// Verify EOF.\n\tif row, _, err := e.Emit(); err != nil {\n\t\tt.Fatalf(\"unexpected error(eof): %s\", err)\n\t} else if row != nil {\n\t\tt.Fatalf(\"unexpected eof: %s\", spew.Sdump(row))\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/functions.gen.go",
    "content": "// Generated by tmpl\n// https://github.com/benbjohnson/tmpl\n//\n// DO NOT EDIT!\n// Source: functions.gen.go.tmpl\n\npackage influxql\n\nimport (\n\t\"math/rand\"\n\t\"sort\"\n\t\"time\"\n)\n\n// FloatPointAggregator aggregates points to produce a single point.\ntype FloatPointAggregator interface {\n\tAggregateFloat(p *FloatPoint)\n}\n\n// FloatBulkPointAggregator aggregates multiple points at a time.\ntype FloatBulkPointAggregator interface {\n\tAggregateFloatBulk(points []FloatPoint)\n}\n\n// AggregateFloatPoints feeds a slice of FloatPoint into an\n// aggregator. If the aggregator is a FloatBulkPointAggregator, it will\n// use the AggregateBulk method.\nfunc AggregateFloatPoints(a FloatPointAggregator, points []FloatPoint) {\n\tswitch a := a.(type) {\n\tcase FloatBulkPointAggregator:\n\t\ta.AggregateFloatBulk(points)\n\tdefault:\n\t\tfor _, p := range points {\n\t\t\ta.AggregateFloat(&p)\n\t\t}\n\t}\n}\n\n// FloatPointEmitter produces a single point from an aggregate.\ntype FloatPointEmitter interface {\n\tEmit() []FloatPoint\n}\n\n// FloatReduceFunc is the function called by a FloatPoint reducer.\ntype FloatReduceFunc func(prev *FloatPoint, curr *FloatPoint) (t int64, v float64, aux []interface{})\n\n// FloatFuncReducer is a reducer that reduces\n// the passed in points to a single point using a reduce function.\ntype FloatFuncReducer struct {\n\tprev *FloatPoint\n\tfn   FloatReduceFunc\n}\n\n// NewFloatFuncReducer creates a new FloatFuncFloatReducer.\nfunc NewFloatFuncReducer(fn FloatReduceFunc, prev *FloatPoint) *FloatFuncReducer {\n\treturn &FloatFuncReducer{fn: fn, prev: prev}\n}\n\n// AggregateFloat takes a FloatPoint and invokes the reduce function with the\n// current and new point to modify the current point.\nfunc (r *FloatFuncReducer) AggregateFloat(p *FloatPoint) {\n\tt, v, aux := r.fn(r.prev, p)\n\tif r.prev == nil {\n\t\tr.prev = &FloatPoint{}\n\t}\n\tr.prev.Time = t\n\tr.prev.Value = v\n\tr.prev.Aux = aux\n\tif p.Aggregated > 1 {\n\t\tr.prev.Aggregated += p.Aggregated\n\t} else {\n\t\tr.prev.Aggregated++\n\t}\n}\n\n// Emit emits the point that was generated when reducing the points fed in with AggregateFloat.\nfunc (r *FloatFuncReducer) Emit() []FloatPoint {\n\treturn []FloatPoint{*r.prev}\n}\n\n// FloatReduceSliceFunc is the function called by a FloatPoint reducer.\ntype FloatReduceSliceFunc func(a []FloatPoint) []FloatPoint\n\n// FloatSliceFuncReducer is a reducer that aggregates\n// the passed in points and then invokes the function to reduce the points when they are emitted.\ntype FloatSliceFuncReducer struct {\n\tpoints []FloatPoint\n\tfn     FloatReduceSliceFunc\n}\n\n// NewFloatSliceFuncReducer creates a new FloatSliceFuncReducer.\nfunc NewFloatSliceFuncReducer(fn FloatReduceSliceFunc) *FloatSliceFuncReducer {\n\treturn &FloatSliceFuncReducer{fn: fn}\n}\n\n// AggregateFloat copies the FloatPoint into the internal slice to be passed\n// to the reduce function when Emit is called.\nfunc (r *FloatSliceFuncReducer) AggregateFloat(p *FloatPoint) {\n\tr.points = append(r.points, *p.Clone())\n}\n\n// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice.\n// This is a more efficient version of calling AggregateFloat on each point.\nfunc (r *FloatSliceFuncReducer) AggregateFloatBulk(points []FloatPoint) {\n\tr.points = append(r.points, points...)\n}\n\n// Emit invokes the reduce function on the aggregated points to generate the aggregated points.\n// This method does not clear the points from the internal slice.\nfunc (r *FloatSliceFuncReducer) Emit() []FloatPoint {\n\treturn r.fn(r.points)\n}\n\n// FloatReduceIntegerFunc is the function called by a FloatPoint reducer.\ntype FloatReduceIntegerFunc func(prev *IntegerPoint, curr *FloatPoint) (t int64, v int64, aux []interface{})\n\n// FloatFuncIntegerReducer is a reducer that reduces\n// the passed in points to a single point using a reduce function.\ntype FloatFuncIntegerReducer struct {\n\tprev *IntegerPoint\n\tfn   FloatReduceIntegerFunc\n}\n\n// NewFloatFuncIntegerReducer creates a new FloatFuncIntegerReducer.\nfunc NewFloatFuncIntegerReducer(fn FloatReduceIntegerFunc, prev *IntegerPoint) *FloatFuncIntegerReducer {\n\treturn &FloatFuncIntegerReducer{fn: fn, prev: prev}\n}\n\n// AggregateFloat takes a FloatPoint and invokes the reduce function with the\n// current and new point to modify the current point.\nfunc (r *FloatFuncIntegerReducer) AggregateFloat(p *FloatPoint) {\n\tt, v, aux := r.fn(r.prev, p)\n\tif r.prev == nil {\n\t\tr.prev = &IntegerPoint{}\n\t}\n\tr.prev.Time = t\n\tr.prev.Value = v\n\tr.prev.Aux = aux\n\tif p.Aggregated > 1 {\n\t\tr.prev.Aggregated += p.Aggregated\n\t} else {\n\t\tr.prev.Aggregated++\n\t}\n}\n\n// Emit emits the point that was generated when reducing the points fed in with AggregateFloat.\nfunc (r *FloatFuncIntegerReducer) Emit() []IntegerPoint {\n\treturn []IntegerPoint{*r.prev}\n}\n\n// FloatReduceIntegerSliceFunc is the function called by a FloatPoint reducer.\ntype FloatReduceIntegerSliceFunc func(a []FloatPoint) []IntegerPoint\n\n// FloatSliceFuncIntegerReducer is a reducer that aggregates\n// the passed in points and then invokes the function to reduce the points when they are emitted.\ntype FloatSliceFuncIntegerReducer struct {\n\tpoints []FloatPoint\n\tfn     FloatReduceIntegerSliceFunc\n}\n\n// NewFloatSliceFuncIntegerReducer creates a new FloatSliceFuncIntegerReducer.\nfunc NewFloatSliceFuncIntegerReducer(fn FloatReduceIntegerSliceFunc) *FloatSliceFuncIntegerReducer {\n\treturn &FloatSliceFuncIntegerReducer{fn: fn}\n}\n\n// AggregateFloat copies the FloatPoint into the internal slice to be passed\n// to the reduce function when Emit is called.\nfunc (r *FloatSliceFuncIntegerReducer) AggregateFloat(p *FloatPoint) {\n\tr.points = append(r.points, *p.Clone())\n}\n\n// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice.\n// This is a more efficient version of calling AggregateFloat on each point.\nfunc (r *FloatSliceFuncIntegerReducer) AggregateFloatBulk(points []FloatPoint) {\n\tr.points = append(r.points, points...)\n}\n\n// Emit invokes the reduce function on the aggregated points to generate the aggregated points.\n// This method does not clear the points from the internal slice.\nfunc (r *FloatSliceFuncIntegerReducer) Emit() []IntegerPoint {\n\treturn r.fn(r.points)\n}\n\n// FloatReduceStringFunc is the function called by a FloatPoint reducer.\ntype FloatReduceStringFunc func(prev *StringPoint, curr *FloatPoint) (t int64, v string, aux []interface{})\n\n// FloatFuncStringReducer is a reducer that reduces\n// the passed in points to a single point using a reduce function.\ntype FloatFuncStringReducer struct {\n\tprev *StringPoint\n\tfn   FloatReduceStringFunc\n}\n\n// NewFloatFuncStringReducer creates a new FloatFuncStringReducer.\nfunc NewFloatFuncStringReducer(fn FloatReduceStringFunc, prev *StringPoint) *FloatFuncStringReducer {\n\treturn &FloatFuncStringReducer{fn: fn, prev: prev}\n}\n\n// AggregateFloat takes a FloatPoint and invokes the reduce function with the\n// current and new point to modify the current point.\nfunc (r *FloatFuncStringReducer) AggregateFloat(p *FloatPoint) {\n\tt, v, aux := r.fn(r.prev, p)\n\tif r.prev == nil {\n\t\tr.prev = &StringPoint{}\n\t}\n\tr.prev.Time = t\n\tr.prev.Value = v\n\tr.prev.Aux = aux\n\tif p.Aggregated > 1 {\n\t\tr.prev.Aggregated += p.Aggregated\n\t} else {\n\t\tr.prev.Aggregated++\n\t}\n}\n\n// Emit emits the point that was generated when reducing the points fed in with AggregateFloat.\nfunc (r *FloatFuncStringReducer) Emit() []StringPoint {\n\treturn []StringPoint{*r.prev}\n}\n\n// FloatReduceStringSliceFunc is the function called by a FloatPoint reducer.\ntype FloatReduceStringSliceFunc func(a []FloatPoint) []StringPoint\n\n// FloatSliceFuncStringReducer is a reducer that aggregates\n// the passed in points and then invokes the function to reduce the points when they are emitted.\ntype FloatSliceFuncStringReducer struct {\n\tpoints []FloatPoint\n\tfn     FloatReduceStringSliceFunc\n}\n\n// NewFloatSliceFuncStringReducer creates a new FloatSliceFuncStringReducer.\nfunc NewFloatSliceFuncStringReducer(fn FloatReduceStringSliceFunc) *FloatSliceFuncStringReducer {\n\treturn &FloatSliceFuncStringReducer{fn: fn}\n}\n\n// AggregateFloat copies the FloatPoint into the internal slice to be passed\n// to the reduce function when Emit is called.\nfunc (r *FloatSliceFuncStringReducer) AggregateFloat(p *FloatPoint) {\n\tr.points = append(r.points, *p.Clone())\n}\n\n// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice.\n// This is a more efficient version of calling AggregateFloat on each point.\nfunc (r *FloatSliceFuncStringReducer) AggregateFloatBulk(points []FloatPoint) {\n\tr.points = append(r.points, points...)\n}\n\n// Emit invokes the reduce function on the aggregated points to generate the aggregated points.\n// This method does not clear the points from the internal slice.\nfunc (r *FloatSliceFuncStringReducer) Emit() []StringPoint {\n\treturn r.fn(r.points)\n}\n\n// FloatReduceBooleanFunc is the function called by a FloatPoint reducer.\ntype FloatReduceBooleanFunc func(prev *BooleanPoint, curr *FloatPoint) (t int64, v bool, aux []interface{})\n\n// FloatFuncBooleanReducer is a reducer that reduces\n// the passed in points to a single point using a reduce function.\ntype FloatFuncBooleanReducer struct {\n\tprev *BooleanPoint\n\tfn   FloatReduceBooleanFunc\n}\n\n// NewFloatFuncBooleanReducer creates a new FloatFuncBooleanReducer.\nfunc NewFloatFuncBooleanReducer(fn FloatReduceBooleanFunc, prev *BooleanPoint) *FloatFuncBooleanReducer {\n\treturn &FloatFuncBooleanReducer{fn: fn, prev: prev}\n}\n\n// AggregateFloat takes a FloatPoint and invokes the reduce function with the\n// current and new point to modify the current point.\nfunc (r *FloatFuncBooleanReducer) AggregateFloat(p *FloatPoint) {\n\tt, v, aux := r.fn(r.prev, p)\n\tif r.prev == nil {\n\t\tr.prev = &BooleanPoint{}\n\t}\n\tr.prev.Time = t\n\tr.prev.Value = v\n\tr.prev.Aux = aux\n\tif p.Aggregated > 1 {\n\t\tr.prev.Aggregated += p.Aggregated\n\t} else {\n\t\tr.prev.Aggregated++\n\t}\n}\n\n// Emit emits the point that was generated when reducing the points fed in with AggregateFloat.\nfunc (r *FloatFuncBooleanReducer) Emit() []BooleanPoint {\n\treturn []BooleanPoint{*r.prev}\n}\n\n// FloatReduceBooleanSliceFunc is the function called by a FloatPoint reducer.\ntype FloatReduceBooleanSliceFunc func(a []FloatPoint) []BooleanPoint\n\n// FloatSliceFuncBooleanReducer is a reducer that aggregates\n// the passed in points and then invokes the function to reduce the points when they are emitted.\ntype FloatSliceFuncBooleanReducer struct {\n\tpoints []FloatPoint\n\tfn     FloatReduceBooleanSliceFunc\n}\n\n// NewFloatSliceFuncBooleanReducer creates a new FloatSliceFuncBooleanReducer.\nfunc NewFloatSliceFuncBooleanReducer(fn FloatReduceBooleanSliceFunc) *FloatSliceFuncBooleanReducer {\n\treturn &FloatSliceFuncBooleanReducer{fn: fn}\n}\n\n// AggregateFloat copies the FloatPoint into the internal slice to be passed\n// to the reduce function when Emit is called.\nfunc (r *FloatSliceFuncBooleanReducer) AggregateFloat(p *FloatPoint) {\n\tr.points = append(r.points, *p.Clone())\n}\n\n// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice.\n// This is a more efficient version of calling AggregateFloat on each point.\nfunc (r *FloatSliceFuncBooleanReducer) AggregateFloatBulk(points []FloatPoint) {\n\tr.points = append(r.points, points...)\n}\n\n// Emit invokes the reduce function on the aggregated points to generate the aggregated points.\n// This method does not clear the points from the internal slice.\nfunc (r *FloatSliceFuncBooleanReducer) Emit() []BooleanPoint {\n\treturn r.fn(r.points)\n}\n\n// FloatDistinctReducer returns the distinct points in a series.\ntype FloatDistinctReducer struct {\n\tm map[float64]FloatPoint\n}\n\n// NewFloatDistinctReducer creates a new FloatDistinctReducer.\nfunc NewFloatDistinctReducer() *FloatDistinctReducer {\n\treturn &FloatDistinctReducer{m: make(map[float64]FloatPoint)}\n}\n\n// AggregateFloat aggregates a point into the reducer.\nfunc (r *FloatDistinctReducer) AggregateFloat(p *FloatPoint) {\n\tif _, ok := r.m[p.Value]; !ok {\n\t\tr.m[p.Value] = *p\n\t}\n}\n\n// Emit emits the distinct points that have been aggregated into the reducer.\nfunc (r *FloatDistinctReducer) Emit() []FloatPoint {\n\tpoints := make([]FloatPoint, 0, len(r.m))\n\tfor _, p := range r.m {\n\t\tpoints = append(points, FloatPoint{Time: p.Time, Value: p.Value})\n\t}\n\tsort.Sort(floatPoints(points))\n\treturn points\n}\n\n// FloatElapsedReducer calculates the elapsed of the aggregated points.\ntype FloatElapsedReducer struct {\n\tunitConversion int64\n\tprev           FloatPoint\n\tcurr           FloatPoint\n}\n\n// NewFloatElapsedReducer creates a new FloatElapsedReducer.\nfunc NewFloatElapsedReducer(interval Interval) *FloatElapsedReducer {\n\treturn &FloatElapsedReducer{\n\t\tunitConversion: int64(interval.Duration),\n\t\tprev:           FloatPoint{Nil: true},\n\t\tcurr:           FloatPoint{Nil: true},\n\t}\n}\n\n// AggregateFloat aggregates a point into the reducer and updates the current window.\nfunc (r *FloatElapsedReducer) AggregateFloat(p *FloatPoint) {\n\tr.prev = r.curr\n\tr.curr = *p\n}\n\n// Emit emits the elapsed of the reducer at the current point.\nfunc (r *FloatElapsedReducer) Emit() []IntegerPoint {\n\tif !r.prev.Nil {\n\t\telapsed := (r.curr.Time - r.prev.Time) / r.unitConversion\n\t\treturn []IntegerPoint{\n\t\t\t{Time: r.curr.Time, Value: elapsed},\n\t\t}\n\t}\n\treturn nil\n}\n\n// FloatSampleReducer implements a reservoir sampling to calculate a random subset of points\ntype FloatSampleReducer struct {\n\tcount int        // how many points we've iterated over\n\trng   *rand.Rand // random number generator for each reducer\n\n\tpoints floatPoints // the reservoir\n}\n\n// NewFloatSampleReducer creates a new FloatSampleReducer\nfunc NewFloatSampleReducer(size int) *FloatSampleReducer {\n\treturn &FloatSampleReducer{\n\t\trng:    rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/\n\t\tpoints: make(floatPoints, size),\n\t}\n}\n\n// AggregateFloat aggregates a point into the reducer.\nfunc (r *FloatSampleReducer) AggregateFloat(p *FloatPoint) {\n\tr.count++\n\t// Fill the reservoir with the first n points\n\tif r.count-1 < len(r.points) {\n\t\tp.CopyTo(&r.points[r.count-1])\n\t\treturn\n\t}\n\n\t// Generate a random integer between 1 and the count and\n\t// if that number is less than the length of the slice\n\t// replace the point at that index rnd with p.\n\trnd := r.rng.Intn(r.count)\n\tif rnd < len(r.points) {\n\t\tp.CopyTo(&r.points[rnd])\n\t}\n}\n\n// Emit emits the reservoir sample as many points.\nfunc (r *FloatSampleReducer) Emit() []FloatPoint {\n\tmin := len(r.points)\n\tif r.count < min {\n\t\tmin = r.count\n\t}\n\tpts := r.points[:min]\n\tsort.Sort(pts)\n\treturn pts\n}\n\n// IntegerPointAggregator aggregates points to produce a single point.\ntype IntegerPointAggregator interface {\n\tAggregateInteger(p *IntegerPoint)\n}\n\n// IntegerBulkPointAggregator aggregates multiple points at a time.\ntype IntegerBulkPointAggregator interface {\n\tAggregateIntegerBulk(points []IntegerPoint)\n}\n\n// AggregateIntegerPoints feeds a slice of IntegerPoint into an\n// aggregator. If the aggregator is a IntegerBulkPointAggregator, it will\n// use the AggregateBulk method.\nfunc AggregateIntegerPoints(a IntegerPointAggregator, points []IntegerPoint) {\n\tswitch a := a.(type) {\n\tcase IntegerBulkPointAggregator:\n\t\ta.AggregateIntegerBulk(points)\n\tdefault:\n\t\tfor _, p := range points {\n\t\t\ta.AggregateInteger(&p)\n\t\t}\n\t}\n}\n\n// IntegerPointEmitter produces a single point from an aggregate.\ntype IntegerPointEmitter interface {\n\tEmit() []IntegerPoint\n}\n\n// IntegerReduceFloatFunc is the function called by a IntegerPoint reducer.\ntype IntegerReduceFloatFunc func(prev *FloatPoint, curr *IntegerPoint) (t int64, v float64, aux []interface{})\n\n// IntegerFuncFloatReducer is a reducer that reduces\n// the passed in points to a single point using a reduce function.\ntype IntegerFuncFloatReducer struct {\n\tprev *FloatPoint\n\tfn   IntegerReduceFloatFunc\n}\n\n// NewIntegerFuncFloatReducer creates a new IntegerFuncFloatReducer.\nfunc NewIntegerFuncFloatReducer(fn IntegerReduceFloatFunc, prev *FloatPoint) *IntegerFuncFloatReducer {\n\treturn &IntegerFuncFloatReducer{fn: fn, prev: prev}\n}\n\n// AggregateInteger takes a IntegerPoint and invokes the reduce function with the\n// current and new point to modify the current point.\nfunc (r *IntegerFuncFloatReducer) AggregateInteger(p *IntegerPoint) {\n\tt, v, aux := r.fn(r.prev, p)\n\tif r.prev == nil {\n\t\tr.prev = &FloatPoint{}\n\t}\n\tr.prev.Time = t\n\tr.prev.Value = v\n\tr.prev.Aux = aux\n\tif p.Aggregated > 1 {\n\t\tr.prev.Aggregated += p.Aggregated\n\t} else {\n\t\tr.prev.Aggregated++\n\t}\n}\n\n// Emit emits the point that was generated when reducing the points fed in with AggregateInteger.\nfunc (r *IntegerFuncFloatReducer) Emit() []FloatPoint {\n\treturn []FloatPoint{*r.prev}\n}\n\n// IntegerReduceFloatSliceFunc is the function called by a IntegerPoint reducer.\ntype IntegerReduceFloatSliceFunc func(a []IntegerPoint) []FloatPoint\n\n// IntegerSliceFuncFloatReducer is a reducer that aggregates\n// the passed in points and then invokes the function to reduce the points when they are emitted.\ntype IntegerSliceFuncFloatReducer struct {\n\tpoints []IntegerPoint\n\tfn     IntegerReduceFloatSliceFunc\n}\n\n// NewIntegerSliceFuncFloatReducer creates a new IntegerSliceFuncFloatReducer.\nfunc NewIntegerSliceFuncFloatReducer(fn IntegerReduceFloatSliceFunc) *IntegerSliceFuncFloatReducer {\n\treturn &IntegerSliceFuncFloatReducer{fn: fn}\n}\n\n// AggregateInteger copies the IntegerPoint into the internal slice to be passed\n// to the reduce function when Emit is called.\nfunc (r *IntegerSliceFuncFloatReducer) AggregateInteger(p *IntegerPoint) {\n\tr.points = append(r.points, *p.Clone())\n}\n\n// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice.\n// This is a more efficient version of calling AggregateInteger on each point.\nfunc (r *IntegerSliceFuncFloatReducer) AggregateIntegerBulk(points []IntegerPoint) {\n\tr.points = append(r.points, points...)\n}\n\n// Emit invokes the reduce function on the aggregated points to generate the aggregated points.\n// This method does not clear the points from the internal slice.\nfunc (r *IntegerSliceFuncFloatReducer) Emit() []FloatPoint {\n\treturn r.fn(r.points)\n}\n\n// IntegerReduceFunc is the function called by a IntegerPoint reducer.\ntype IntegerReduceFunc func(prev *IntegerPoint, curr *IntegerPoint) (t int64, v int64, aux []interface{})\n\n// IntegerFuncReducer is a reducer that reduces\n// the passed in points to a single point using a reduce function.\ntype IntegerFuncReducer struct {\n\tprev *IntegerPoint\n\tfn   IntegerReduceFunc\n}\n\n// NewIntegerFuncReducer creates a new IntegerFuncIntegerReducer.\nfunc NewIntegerFuncReducer(fn IntegerReduceFunc, prev *IntegerPoint) *IntegerFuncReducer {\n\treturn &IntegerFuncReducer{fn: fn, prev: prev}\n}\n\n// AggregateInteger takes a IntegerPoint and invokes the reduce function with the\n// current and new point to modify the current point.\nfunc (r *IntegerFuncReducer) AggregateInteger(p *IntegerPoint) {\n\tt, v, aux := r.fn(r.prev, p)\n\tif r.prev == nil {\n\t\tr.prev = &IntegerPoint{}\n\t}\n\tr.prev.Time = t\n\tr.prev.Value = v\n\tr.prev.Aux = aux\n\tif p.Aggregated > 1 {\n\t\tr.prev.Aggregated += p.Aggregated\n\t} else {\n\t\tr.prev.Aggregated++\n\t}\n}\n\n// Emit emits the point that was generated when reducing the points fed in with AggregateInteger.\nfunc (r *IntegerFuncReducer) Emit() []IntegerPoint {\n\treturn []IntegerPoint{*r.prev}\n}\n\n// IntegerReduceSliceFunc is the function called by a IntegerPoint reducer.\ntype IntegerReduceSliceFunc func(a []IntegerPoint) []IntegerPoint\n\n// IntegerSliceFuncReducer is a reducer that aggregates\n// the passed in points and then invokes the function to reduce the points when they are emitted.\ntype IntegerSliceFuncReducer struct {\n\tpoints []IntegerPoint\n\tfn     IntegerReduceSliceFunc\n}\n\n// NewIntegerSliceFuncReducer creates a new IntegerSliceFuncReducer.\nfunc NewIntegerSliceFuncReducer(fn IntegerReduceSliceFunc) *IntegerSliceFuncReducer {\n\treturn &IntegerSliceFuncReducer{fn: fn}\n}\n\n// AggregateInteger copies the IntegerPoint into the internal slice to be passed\n// to the reduce function when Emit is called.\nfunc (r *IntegerSliceFuncReducer) AggregateInteger(p *IntegerPoint) {\n\tr.points = append(r.points, *p.Clone())\n}\n\n// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice.\n// This is a more efficient version of calling AggregateInteger on each point.\nfunc (r *IntegerSliceFuncReducer) AggregateIntegerBulk(points []IntegerPoint) {\n\tr.points = append(r.points, points...)\n}\n\n// Emit invokes the reduce function on the aggregated points to generate the aggregated points.\n// This method does not clear the points from the internal slice.\nfunc (r *IntegerSliceFuncReducer) Emit() []IntegerPoint {\n\treturn r.fn(r.points)\n}\n\n// IntegerReduceStringFunc is the function called by a IntegerPoint reducer.\ntype IntegerReduceStringFunc func(prev *StringPoint, curr *IntegerPoint) (t int64, v string, aux []interface{})\n\n// IntegerFuncStringReducer is a reducer that reduces\n// the passed in points to a single point using a reduce function.\ntype IntegerFuncStringReducer struct {\n\tprev *StringPoint\n\tfn   IntegerReduceStringFunc\n}\n\n// NewIntegerFuncStringReducer creates a new IntegerFuncStringReducer.\nfunc NewIntegerFuncStringReducer(fn IntegerReduceStringFunc, prev *StringPoint) *IntegerFuncStringReducer {\n\treturn &IntegerFuncStringReducer{fn: fn, prev: prev}\n}\n\n// AggregateInteger takes a IntegerPoint and invokes the reduce function with the\n// current and new point to modify the current point.\nfunc (r *IntegerFuncStringReducer) AggregateInteger(p *IntegerPoint) {\n\tt, v, aux := r.fn(r.prev, p)\n\tif r.prev == nil {\n\t\tr.prev = &StringPoint{}\n\t}\n\tr.prev.Time = t\n\tr.prev.Value = v\n\tr.prev.Aux = aux\n\tif p.Aggregated > 1 {\n\t\tr.prev.Aggregated += p.Aggregated\n\t} else {\n\t\tr.prev.Aggregated++\n\t}\n}\n\n// Emit emits the point that was generated when reducing the points fed in with AggregateInteger.\nfunc (r *IntegerFuncStringReducer) Emit() []StringPoint {\n\treturn []StringPoint{*r.prev}\n}\n\n// IntegerReduceStringSliceFunc is the function called by a IntegerPoint reducer.\ntype IntegerReduceStringSliceFunc func(a []IntegerPoint) []StringPoint\n\n// IntegerSliceFuncStringReducer is a reducer that aggregates\n// the passed in points and then invokes the function to reduce the points when they are emitted.\ntype IntegerSliceFuncStringReducer struct {\n\tpoints []IntegerPoint\n\tfn     IntegerReduceStringSliceFunc\n}\n\n// NewIntegerSliceFuncStringReducer creates a new IntegerSliceFuncStringReducer.\nfunc NewIntegerSliceFuncStringReducer(fn IntegerReduceStringSliceFunc) *IntegerSliceFuncStringReducer {\n\treturn &IntegerSliceFuncStringReducer{fn: fn}\n}\n\n// AggregateInteger copies the IntegerPoint into the internal slice to be passed\n// to the reduce function when Emit is called.\nfunc (r *IntegerSliceFuncStringReducer) AggregateInteger(p *IntegerPoint) {\n\tr.points = append(r.points, *p.Clone())\n}\n\n// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice.\n// This is a more efficient version of calling AggregateInteger on each point.\nfunc (r *IntegerSliceFuncStringReducer) AggregateIntegerBulk(points []IntegerPoint) {\n\tr.points = append(r.points, points...)\n}\n\n// Emit invokes the reduce function on the aggregated points to generate the aggregated points.\n// This method does not clear the points from the internal slice.\nfunc (r *IntegerSliceFuncStringReducer) Emit() []StringPoint {\n\treturn r.fn(r.points)\n}\n\n// IntegerReduceBooleanFunc is the function called by a IntegerPoint reducer.\ntype IntegerReduceBooleanFunc func(prev *BooleanPoint, curr *IntegerPoint) (t int64, v bool, aux []interface{})\n\n// IntegerFuncBooleanReducer is a reducer that reduces\n// the passed in points to a single point using a reduce function.\ntype IntegerFuncBooleanReducer struct {\n\tprev *BooleanPoint\n\tfn   IntegerReduceBooleanFunc\n}\n\n// NewIntegerFuncBooleanReducer creates a new IntegerFuncBooleanReducer.\nfunc NewIntegerFuncBooleanReducer(fn IntegerReduceBooleanFunc, prev *BooleanPoint) *IntegerFuncBooleanReducer {\n\treturn &IntegerFuncBooleanReducer{fn: fn, prev: prev}\n}\n\n// AggregateInteger takes a IntegerPoint and invokes the reduce function with the\n// current and new point to modify the current point.\nfunc (r *IntegerFuncBooleanReducer) AggregateInteger(p *IntegerPoint) {\n\tt, v, aux := r.fn(r.prev, p)\n\tif r.prev == nil {\n\t\tr.prev = &BooleanPoint{}\n\t}\n\tr.prev.Time = t\n\tr.prev.Value = v\n\tr.prev.Aux = aux\n\tif p.Aggregated > 1 {\n\t\tr.prev.Aggregated += p.Aggregated\n\t} else {\n\t\tr.prev.Aggregated++\n\t}\n}\n\n// Emit emits the point that was generated when reducing the points fed in with AggregateInteger.\nfunc (r *IntegerFuncBooleanReducer) Emit() []BooleanPoint {\n\treturn []BooleanPoint{*r.prev}\n}\n\n// IntegerReduceBooleanSliceFunc is the function called by a IntegerPoint reducer.\ntype IntegerReduceBooleanSliceFunc func(a []IntegerPoint) []BooleanPoint\n\n// IntegerSliceFuncBooleanReducer is a reducer that aggregates\n// the passed in points and then invokes the function to reduce the points when they are emitted.\ntype IntegerSliceFuncBooleanReducer struct {\n\tpoints []IntegerPoint\n\tfn     IntegerReduceBooleanSliceFunc\n}\n\n// NewIntegerSliceFuncBooleanReducer creates a new IntegerSliceFuncBooleanReducer.\nfunc NewIntegerSliceFuncBooleanReducer(fn IntegerReduceBooleanSliceFunc) *IntegerSliceFuncBooleanReducer {\n\treturn &IntegerSliceFuncBooleanReducer{fn: fn}\n}\n\n// AggregateInteger copies the IntegerPoint into the internal slice to be passed\n// to the reduce function when Emit is called.\nfunc (r *IntegerSliceFuncBooleanReducer) AggregateInteger(p *IntegerPoint) {\n\tr.points = append(r.points, *p.Clone())\n}\n\n// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice.\n// This is a more efficient version of calling AggregateInteger on each point.\nfunc (r *IntegerSliceFuncBooleanReducer) AggregateIntegerBulk(points []IntegerPoint) {\n\tr.points = append(r.points, points...)\n}\n\n// Emit invokes the reduce function on the aggregated points to generate the aggregated points.\n// This method does not clear the points from the internal slice.\nfunc (r *IntegerSliceFuncBooleanReducer) Emit() []BooleanPoint {\n\treturn r.fn(r.points)\n}\n\n// IntegerDistinctReducer returns the distinct points in a series.\ntype IntegerDistinctReducer struct {\n\tm map[int64]IntegerPoint\n}\n\n// NewIntegerDistinctReducer creates a new IntegerDistinctReducer.\nfunc NewIntegerDistinctReducer() *IntegerDistinctReducer {\n\treturn &IntegerDistinctReducer{m: make(map[int64]IntegerPoint)}\n}\n\n// AggregateInteger aggregates a point into the reducer.\nfunc (r *IntegerDistinctReducer) AggregateInteger(p *IntegerPoint) {\n\tif _, ok := r.m[p.Value]; !ok {\n\t\tr.m[p.Value] = *p\n\t}\n}\n\n// Emit emits the distinct points that have been aggregated into the reducer.\nfunc (r *IntegerDistinctReducer) Emit() []IntegerPoint {\n\tpoints := make([]IntegerPoint, 0, len(r.m))\n\tfor _, p := range r.m {\n\t\tpoints = append(points, IntegerPoint{Time: p.Time, Value: p.Value})\n\t}\n\tsort.Sort(integerPoints(points))\n\treturn points\n}\n\n// IntegerElapsedReducer calculates the elapsed of the aggregated points.\ntype IntegerElapsedReducer struct {\n\tunitConversion int64\n\tprev           IntegerPoint\n\tcurr           IntegerPoint\n}\n\n// NewIntegerElapsedReducer creates a new IntegerElapsedReducer.\nfunc NewIntegerElapsedReducer(interval Interval) *IntegerElapsedReducer {\n\treturn &IntegerElapsedReducer{\n\t\tunitConversion: int64(interval.Duration),\n\t\tprev:           IntegerPoint{Nil: true},\n\t\tcurr:           IntegerPoint{Nil: true},\n\t}\n}\n\n// AggregateInteger aggregates a point into the reducer and updates the current window.\nfunc (r *IntegerElapsedReducer) AggregateInteger(p *IntegerPoint) {\n\tr.prev = r.curr\n\tr.curr = *p\n}\n\n// Emit emits the elapsed of the reducer at the current point.\nfunc (r *IntegerElapsedReducer) Emit() []IntegerPoint {\n\tif !r.prev.Nil {\n\t\telapsed := (r.curr.Time - r.prev.Time) / r.unitConversion\n\t\treturn []IntegerPoint{\n\t\t\t{Time: r.curr.Time, Value: elapsed},\n\t\t}\n\t}\n\treturn nil\n}\n\n// IntegerSampleReducer implements a reservoir sampling to calculate a random subset of points\ntype IntegerSampleReducer struct {\n\tcount int        // how many points we've iterated over\n\trng   *rand.Rand // random number generator for each reducer\n\n\tpoints integerPoints // the reservoir\n}\n\n// NewIntegerSampleReducer creates a new IntegerSampleReducer\nfunc NewIntegerSampleReducer(size int) *IntegerSampleReducer {\n\treturn &IntegerSampleReducer{\n\t\trng:    rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/\n\t\tpoints: make(integerPoints, size),\n\t}\n}\n\n// AggregateInteger aggregates a point into the reducer.\nfunc (r *IntegerSampleReducer) AggregateInteger(p *IntegerPoint) {\n\tr.count++\n\t// Fill the reservoir with the first n points\n\tif r.count-1 < len(r.points) {\n\t\tp.CopyTo(&r.points[r.count-1])\n\t\treturn\n\t}\n\n\t// Generate a random integer between 1 and the count and\n\t// if that number is less than the length of the slice\n\t// replace the point at that index rnd with p.\n\trnd := r.rng.Intn(r.count)\n\tif rnd < len(r.points) {\n\t\tp.CopyTo(&r.points[rnd])\n\t}\n}\n\n// Emit emits the reservoir sample as many points.\nfunc (r *IntegerSampleReducer) Emit() []IntegerPoint {\n\tmin := len(r.points)\n\tif r.count < min {\n\t\tmin = r.count\n\t}\n\tpts := r.points[:min]\n\tsort.Sort(pts)\n\treturn pts\n}\n\n// StringPointAggregator aggregates points to produce a single point.\ntype StringPointAggregator interface {\n\tAggregateString(p *StringPoint)\n}\n\n// StringBulkPointAggregator aggregates multiple points at a time.\ntype StringBulkPointAggregator interface {\n\tAggregateStringBulk(points []StringPoint)\n}\n\n// AggregateStringPoints feeds a slice of StringPoint into an\n// aggregator. If the aggregator is a StringBulkPointAggregator, it will\n// use the AggregateBulk method.\nfunc AggregateStringPoints(a StringPointAggregator, points []StringPoint) {\n\tswitch a := a.(type) {\n\tcase StringBulkPointAggregator:\n\t\ta.AggregateStringBulk(points)\n\tdefault:\n\t\tfor _, p := range points {\n\t\t\ta.AggregateString(&p)\n\t\t}\n\t}\n}\n\n// StringPointEmitter produces a single point from an aggregate.\ntype StringPointEmitter interface {\n\tEmit() []StringPoint\n}\n\n// StringReduceFloatFunc is the function called by a StringPoint reducer.\ntype StringReduceFloatFunc func(prev *FloatPoint, curr *StringPoint) (t int64, v float64, aux []interface{})\n\n// StringFuncFloatReducer is a reducer that reduces\n// the passed in points to a single point using a reduce function.\ntype StringFuncFloatReducer struct {\n\tprev *FloatPoint\n\tfn   StringReduceFloatFunc\n}\n\n// NewStringFuncFloatReducer creates a new StringFuncFloatReducer.\nfunc NewStringFuncFloatReducer(fn StringReduceFloatFunc, prev *FloatPoint) *StringFuncFloatReducer {\n\treturn &StringFuncFloatReducer{fn: fn, prev: prev}\n}\n\n// AggregateString takes a StringPoint and invokes the reduce function with the\n// current and new point to modify the current point.\nfunc (r *StringFuncFloatReducer) AggregateString(p *StringPoint) {\n\tt, v, aux := r.fn(r.prev, p)\n\tif r.prev == nil {\n\t\tr.prev = &FloatPoint{}\n\t}\n\tr.prev.Time = t\n\tr.prev.Value = v\n\tr.prev.Aux = aux\n\tif p.Aggregated > 1 {\n\t\tr.prev.Aggregated += p.Aggregated\n\t} else {\n\t\tr.prev.Aggregated++\n\t}\n}\n\n// Emit emits the point that was generated when reducing the points fed in with AggregateString.\nfunc (r *StringFuncFloatReducer) Emit() []FloatPoint {\n\treturn []FloatPoint{*r.prev}\n}\n\n// StringReduceFloatSliceFunc is the function called by a StringPoint reducer.\ntype StringReduceFloatSliceFunc func(a []StringPoint) []FloatPoint\n\n// StringSliceFuncFloatReducer is a reducer that aggregates\n// the passed in points and then invokes the function to reduce the points when they are emitted.\ntype StringSliceFuncFloatReducer struct {\n\tpoints []StringPoint\n\tfn     StringReduceFloatSliceFunc\n}\n\n// NewStringSliceFuncFloatReducer creates a new StringSliceFuncFloatReducer.\nfunc NewStringSliceFuncFloatReducer(fn StringReduceFloatSliceFunc) *StringSliceFuncFloatReducer {\n\treturn &StringSliceFuncFloatReducer{fn: fn}\n}\n\n// AggregateString copies the StringPoint into the internal slice to be passed\n// to the reduce function when Emit is called.\nfunc (r *StringSliceFuncFloatReducer) AggregateString(p *StringPoint) {\n\tr.points = append(r.points, *p.Clone())\n}\n\n// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice.\n// This is a more efficient version of calling AggregateString on each point.\nfunc (r *StringSliceFuncFloatReducer) AggregateStringBulk(points []StringPoint) {\n\tr.points = append(r.points, points...)\n}\n\n// Emit invokes the reduce function on the aggregated points to generate the aggregated points.\n// This method does not clear the points from the internal slice.\nfunc (r *StringSliceFuncFloatReducer) Emit() []FloatPoint {\n\treturn r.fn(r.points)\n}\n\n// StringReduceIntegerFunc is the function called by a StringPoint reducer.\ntype StringReduceIntegerFunc func(prev *IntegerPoint, curr *StringPoint) (t int64, v int64, aux []interface{})\n\n// StringFuncIntegerReducer is a reducer that reduces\n// the passed in points to a single point using a reduce function.\ntype StringFuncIntegerReducer struct {\n\tprev *IntegerPoint\n\tfn   StringReduceIntegerFunc\n}\n\n// NewStringFuncIntegerReducer creates a new StringFuncIntegerReducer.\nfunc NewStringFuncIntegerReducer(fn StringReduceIntegerFunc, prev *IntegerPoint) *StringFuncIntegerReducer {\n\treturn &StringFuncIntegerReducer{fn: fn, prev: prev}\n}\n\n// AggregateString takes a StringPoint and invokes the reduce function with the\n// current and new point to modify the current point.\nfunc (r *StringFuncIntegerReducer) AggregateString(p *StringPoint) {\n\tt, v, aux := r.fn(r.prev, p)\n\tif r.prev == nil {\n\t\tr.prev = &IntegerPoint{}\n\t}\n\tr.prev.Time = t\n\tr.prev.Value = v\n\tr.prev.Aux = aux\n\tif p.Aggregated > 1 {\n\t\tr.prev.Aggregated += p.Aggregated\n\t} else {\n\t\tr.prev.Aggregated++\n\t}\n}\n\n// Emit emits the point that was generated when reducing the points fed in with AggregateString.\nfunc (r *StringFuncIntegerReducer) Emit() []IntegerPoint {\n\treturn []IntegerPoint{*r.prev}\n}\n\n// StringReduceIntegerSliceFunc is the function called by a StringPoint reducer.\ntype StringReduceIntegerSliceFunc func(a []StringPoint) []IntegerPoint\n\n// StringSliceFuncIntegerReducer is a reducer that aggregates\n// the passed in points and then invokes the function to reduce the points when they are emitted.\ntype StringSliceFuncIntegerReducer struct {\n\tpoints []StringPoint\n\tfn     StringReduceIntegerSliceFunc\n}\n\n// NewStringSliceFuncIntegerReducer creates a new StringSliceFuncIntegerReducer.\nfunc NewStringSliceFuncIntegerReducer(fn StringReduceIntegerSliceFunc) *StringSliceFuncIntegerReducer {\n\treturn &StringSliceFuncIntegerReducer{fn: fn}\n}\n\n// AggregateString copies the StringPoint into the internal slice to be passed\n// to the reduce function when Emit is called.\nfunc (r *StringSliceFuncIntegerReducer) AggregateString(p *StringPoint) {\n\tr.points = append(r.points, *p.Clone())\n}\n\n// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice.\n// This is a more efficient version of calling AggregateString on each point.\nfunc (r *StringSliceFuncIntegerReducer) AggregateStringBulk(points []StringPoint) {\n\tr.points = append(r.points, points...)\n}\n\n// Emit invokes the reduce function on the aggregated points to generate the aggregated points.\n// This method does not clear the points from the internal slice.\nfunc (r *StringSliceFuncIntegerReducer) Emit() []IntegerPoint {\n\treturn r.fn(r.points)\n}\n\n// StringReduceFunc is the function called by a StringPoint reducer.\ntype StringReduceFunc func(prev *StringPoint, curr *StringPoint) (t int64, v string, aux []interface{})\n\n// StringFuncReducer is a reducer that reduces\n// the passed in points to a single point using a reduce function.\ntype StringFuncReducer struct {\n\tprev *StringPoint\n\tfn   StringReduceFunc\n}\n\n// NewStringFuncReducer creates a new StringFuncStringReducer.\nfunc NewStringFuncReducer(fn StringReduceFunc, prev *StringPoint) *StringFuncReducer {\n\treturn &StringFuncReducer{fn: fn, prev: prev}\n}\n\n// AggregateString takes a StringPoint and invokes the reduce function with the\n// current and new point to modify the current point.\nfunc (r *StringFuncReducer) AggregateString(p *StringPoint) {\n\tt, v, aux := r.fn(r.prev, p)\n\tif r.prev == nil {\n\t\tr.prev = &StringPoint{}\n\t}\n\tr.prev.Time = t\n\tr.prev.Value = v\n\tr.prev.Aux = aux\n\tif p.Aggregated > 1 {\n\t\tr.prev.Aggregated += p.Aggregated\n\t} else {\n\t\tr.prev.Aggregated++\n\t}\n}\n\n// Emit emits the point that was generated when reducing the points fed in with AggregateString.\nfunc (r *StringFuncReducer) Emit() []StringPoint {\n\treturn []StringPoint{*r.prev}\n}\n\n// StringReduceSliceFunc is the function called by a StringPoint reducer.\ntype StringReduceSliceFunc func(a []StringPoint) []StringPoint\n\n// StringSliceFuncReducer is a reducer that aggregates\n// the passed in points and then invokes the function to reduce the points when they are emitted.\ntype StringSliceFuncReducer struct {\n\tpoints []StringPoint\n\tfn     StringReduceSliceFunc\n}\n\n// NewStringSliceFuncReducer creates a new StringSliceFuncReducer.\nfunc NewStringSliceFuncReducer(fn StringReduceSliceFunc) *StringSliceFuncReducer {\n\treturn &StringSliceFuncReducer{fn: fn}\n}\n\n// AggregateString copies the StringPoint into the internal slice to be passed\n// to the reduce function when Emit is called.\nfunc (r *StringSliceFuncReducer) AggregateString(p *StringPoint) {\n\tr.points = append(r.points, *p.Clone())\n}\n\n// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice.\n// This is a more efficient version of calling AggregateString on each point.\nfunc (r *StringSliceFuncReducer) AggregateStringBulk(points []StringPoint) {\n\tr.points = append(r.points, points...)\n}\n\n// Emit invokes the reduce function on the aggregated points to generate the aggregated points.\n// This method does not clear the points from the internal slice.\nfunc (r *StringSliceFuncReducer) Emit() []StringPoint {\n\treturn r.fn(r.points)\n}\n\n// StringReduceBooleanFunc is the function called by a StringPoint reducer.\ntype StringReduceBooleanFunc func(prev *BooleanPoint, curr *StringPoint) (t int64, v bool, aux []interface{})\n\n// StringFuncBooleanReducer is a reducer that reduces\n// the passed in points to a single point using a reduce function.\ntype StringFuncBooleanReducer struct {\n\tprev *BooleanPoint\n\tfn   StringReduceBooleanFunc\n}\n\n// NewStringFuncBooleanReducer creates a new StringFuncBooleanReducer.\nfunc NewStringFuncBooleanReducer(fn StringReduceBooleanFunc, prev *BooleanPoint) *StringFuncBooleanReducer {\n\treturn &StringFuncBooleanReducer{fn: fn, prev: prev}\n}\n\n// AggregateString takes a StringPoint and invokes the reduce function with the\n// current and new point to modify the current point.\nfunc (r *StringFuncBooleanReducer) AggregateString(p *StringPoint) {\n\tt, v, aux := r.fn(r.prev, p)\n\tif r.prev == nil {\n\t\tr.prev = &BooleanPoint{}\n\t}\n\tr.prev.Time = t\n\tr.prev.Value = v\n\tr.prev.Aux = aux\n\tif p.Aggregated > 1 {\n\t\tr.prev.Aggregated += p.Aggregated\n\t} else {\n\t\tr.prev.Aggregated++\n\t}\n}\n\n// Emit emits the point that was generated when reducing the points fed in with AggregateString.\nfunc (r *StringFuncBooleanReducer) Emit() []BooleanPoint {\n\treturn []BooleanPoint{*r.prev}\n}\n\n// StringReduceBooleanSliceFunc is the function called by a StringPoint reducer.\ntype StringReduceBooleanSliceFunc func(a []StringPoint) []BooleanPoint\n\n// StringSliceFuncBooleanReducer is a reducer that aggregates\n// the passed in points and then invokes the function to reduce the points when they are emitted.\ntype StringSliceFuncBooleanReducer struct {\n\tpoints []StringPoint\n\tfn     StringReduceBooleanSliceFunc\n}\n\n// NewStringSliceFuncBooleanReducer creates a new StringSliceFuncBooleanReducer.\nfunc NewStringSliceFuncBooleanReducer(fn StringReduceBooleanSliceFunc) *StringSliceFuncBooleanReducer {\n\treturn &StringSliceFuncBooleanReducer{fn: fn}\n}\n\n// AggregateString copies the StringPoint into the internal slice to be passed\n// to the reduce function when Emit is called.\nfunc (r *StringSliceFuncBooleanReducer) AggregateString(p *StringPoint) {\n\tr.points = append(r.points, *p.Clone())\n}\n\n// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice.\n// This is a more efficient version of calling AggregateString on each point.\nfunc (r *StringSliceFuncBooleanReducer) AggregateStringBulk(points []StringPoint) {\n\tr.points = append(r.points, points...)\n}\n\n// Emit invokes the reduce function on the aggregated points to generate the aggregated points.\n// This method does not clear the points from the internal slice.\nfunc (r *StringSliceFuncBooleanReducer) Emit() []BooleanPoint {\n\treturn r.fn(r.points)\n}\n\n// StringDistinctReducer returns the distinct points in a series.\ntype StringDistinctReducer struct {\n\tm map[string]StringPoint\n}\n\n// NewStringDistinctReducer creates a new StringDistinctReducer.\nfunc NewStringDistinctReducer() *StringDistinctReducer {\n\treturn &StringDistinctReducer{m: make(map[string]StringPoint)}\n}\n\n// AggregateString aggregates a point into the reducer.\nfunc (r *StringDistinctReducer) AggregateString(p *StringPoint) {\n\tif _, ok := r.m[p.Value]; !ok {\n\t\tr.m[p.Value] = *p\n\t}\n}\n\n// Emit emits the distinct points that have been aggregated into the reducer.\nfunc (r *StringDistinctReducer) Emit() []StringPoint {\n\tpoints := make([]StringPoint, 0, len(r.m))\n\tfor _, p := range r.m {\n\t\tpoints = append(points, StringPoint{Time: p.Time, Value: p.Value})\n\t}\n\tsort.Sort(stringPoints(points))\n\treturn points\n}\n\n// StringElapsedReducer calculates the elapsed of the aggregated points.\ntype StringElapsedReducer struct {\n\tunitConversion int64\n\tprev           StringPoint\n\tcurr           StringPoint\n}\n\n// NewStringElapsedReducer creates a new StringElapsedReducer.\nfunc NewStringElapsedReducer(interval Interval) *StringElapsedReducer {\n\treturn &StringElapsedReducer{\n\t\tunitConversion: int64(interval.Duration),\n\t\tprev:           StringPoint{Nil: true},\n\t\tcurr:           StringPoint{Nil: true},\n\t}\n}\n\n// AggregateString aggregates a point into the reducer and updates the current window.\nfunc (r *StringElapsedReducer) AggregateString(p *StringPoint) {\n\tr.prev = r.curr\n\tr.curr = *p\n}\n\n// Emit emits the elapsed of the reducer at the current point.\nfunc (r *StringElapsedReducer) Emit() []IntegerPoint {\n\tif !r.prev.Nil {\n\t\telapsed := (r.curr.Time - r.prev.Time) / r.unitConversion\n\t\treturn []IntegerPoint{\n\t\t\t{Time: r.curr.Time, Value: elapsed},\n\t\t}\n\t}\n\treturn nil\n}\n\n// StringSampleReducer implements a reservoir sampling to calculate a random subset of points\ntype StringSampleReducer struct {\n\tcount int        // how many points we've iterated over\n\trng   *rand.Rand // random number generator for each reducer\n\n\tpoints stringPoints // the reservoir\n}\n\n// NewStringSampleReducer creates a new StringSampleReducer\nfunc NewStringSampleReducer(size int) *StringSampleReducer {\n\treturn &StringSampleReducer{\n\t\trng:    rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/\n\t\tpoints: make(stringPoints, size),\n\t}\n}\n\n// AggregateString aggregates a point into the reducer.\nfunc (r *StringSampleReducer) AggregateString(p *StringPoint) {\n\tr.count++\n\t// Fill the reservoir with the first n points\n\tif r.count-1 < len(r.points) {\n\t\tp.CopyTo(&r.points[r.count-1])\n\t\treturn\n\t}\n\n\t// Generate a random integer between 1 and the count and\n\t// if that number is less than the length of the slice\n\t// replace the point at that index rnd with p.\n\trnd := r.rng.Intn(r.count)\n\tif rnd < len(r.points) {\n\t\tp.CopyTo(&r.points[rnd])\n\t}\n}\n\n// Emit emits the reservoir sample as many points.\nfunc (r *StringSampleReducer) Emit() []StringPoint {\n\tmin := len(r.points)\n\tif r.count < min {\n\t\tmin = r.count\n\t}\n\tpts := r.points[:min]\n\tsort.Sort(pts)\n\treturn pts\n}\n\n// BooleanPointAggregator aggregates points to produce a single point.\ntype BooleanPointAggregator interface {\n\tAggregateBoolean(p *BooleanPoint)\n}\n\n// BooleanBulkPointAggregator aggregates multiple points at a time.\ntype BooleanBulkPointAggregator interface {\n\tAggregateBooleanBulk(points []BooleanPoint)\n}\n\n// AggregateBooleanPoints feeds a slice of BooleanPoint into an\n// aggregator. If the aggregator is a BooleanBulkPointAggregator, it will\n// use the AggregateBulk method.\nfunc AggregateBooleanPoints(a BooleanPointAggregator, points []BooleanPoint) {\n\tswitch a := a.(type) {\n\tcase BooleanBulkPointAggregator:\n\t\ta.AggregateBooleanBulk(points)\n\tdefault:\n\t\tfor _, p := range points {\n\t\t\ta.AggregateBoolean(&p)\n\t\t}\n\t}\n}\n\n// BooleanPointEmitter produces a single point from an aggregate.\ntype BooleanPointEmitter interface {\n\tEmit() []BooleanPoint\n}\n\n// BooleanReduceFloatFunc is the function called by a BooleanPoint reducer.\ntype BooleanReduceFloatFunc func(prev *FloatPoint, curr *BooleanPoint) (t int64, v float64, aux []interface{})\n\n// BooleanFuncFloatReducer is a reducer that reduces\n// the passed in points to a single point using a reduce function.\ntype BooleanFuncFloatReducer struct {\n\tprev *FloatPoint\n\tfn   BooleanReduceFloatFunc\n}\n\n// NewBooleanFuncFloatReducer creates a new BooleanFuncFloatReducer.\nfunc NewBooleanFuncFloatReducer(fn BooleanReduceFloatFunc, prev *FloatPoint) *BooleanFuncFloatReducer {\n\treturn &BooleanFuncFloatReducer{fn: fn, prev: prev}\n}\n\n// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the\n// current and new point to modify the current point.\nfunc (r *BooleanFuncFloatReducer) AggregateBoolean(p *BooleanPoint) {\n\tt, v, aux := r.fn(r.prev, p)\n\tif r.prev == nil {\n\t\tr.prev = &FloatPoint{}\n\t}\n\tr.prev.Time = t\n\tr.prev.Value = v\n\tr.prev.Aux = aux\n\tif p.Aggregated > 1 {\n\t\tr.prev.Aggregated += p.Aggregated\n\t} else {\n\t\tr.prev.Aggregated++\n\t}\n}\n\n// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean.\nfunc (r *BooleanFuncFloatReducer) Emit() []FloatPoint {\n\treturn []FloatPoint{*r.prev}\n}\n\n// BooleanReduceFloatSliceFunc is the function called by a BooleanPoint reducer.\ntype BooleanReduceFloatSliceFunc func(a []BooleanPoint) []FloatPoint\n\n// BooleanSliceFuncFloatReducer is a reducer that aggregates\n// the passed in points and then invokes the function to reduce the points when they are emitted.\ntype BooleanSliceFuncFloatReducer struct {\n\tpoints []BooleanPoint\n\tfn     BooleanReduceFloatSliceFunc\n}\n\n// NewBooleanSliceFuncFloatReducer creates a new BooleanSliceFuncFloatReducer.\nfunc NewBooleanSliceFuncFloatReducer(fn BooleanReduceFloatSliceFunc) *BooleanSliceFuncFloatReducer {\n\treturn &BooleanSliceFuncFloatReducer{fn: fn}\n}\n\n// AggregateBoolean copies the BooleanPoint into the internal slice to be passed\n// to the reduce function when Emit is called.\nfunc (r *BooleanSliceFuncFloatReducer) AggregateBoolean(p *BooleanPoint) {\n\tr.points = append(r.points, *p.Clone())\n}\n\n// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice.\n// This is a more efficient version of calling AggregateBoolean on each point.\nfunc (r *BooleanSliceFuncFloatReducer) AggregateBooleanBulk(points []BooleanPoint) {\n\tr.points = append(r.points, points...)\n}\n\n// Emit invokes the reduce function on the aggregated points to generate the aggregated points.\n// This method does not clear the points from the internal slice.\nfunc (r *BooleanSliceFuncFloatReducer) Emit() []FloatPoint {\n\treturn r.fn(r.points)\n}\n\n// BooleanReduceIntegerFunc is the function called by a BooleanPoint reducer.\ntype BooleanReduceIntegerFunc func(prev *IntegerPoint, curr *BooleanPoint) (t int64, v int64, aux []interface{})\n\n// BooleanFuncIntegerReducer is a reducer that reduces\n// the passed in points to a single point using a reduce function.\ntype BooleanFuncIntegerReducer struct {\n\tprev *IntegerPoint\n\tfn   BooleanReduceIntegerFunc\n}\n\n// NewBooleanFuncIntegerReducer creates a new BooleanFuncIntegerReducer.\nfunc NewBooleanFuncIntegerReducer(fn BooleanReduceIntegerFunc, prev *IntegerPoint) *BooleanFuncIntegerReducer {\n\treturn &BooleanFuncIntegerReducer{fn: fn, prev: prev}\n}\n\n// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the\n// current and new point to modify the current point.\nfunc (r *BooleanFuncIntegerReducer) AggregateBoolean(p *BooleanPoint) {\n\tt, v, aux := r.fn(r.prev, p)\n\tif r.prev == nil {\n\t\tr.prev = &IntegerPoint{}\n\t}\n\tr.prev.Time = t\n\tr.prev.Value = v\n\tr.prev.Aux = aux\n\tif p.Aggregated > 1 {\n\t\tr.prev.Aggregated += p.Aggregated\n\t} else {\n\t\tr.prev.Aggregated++\n\t}\n}\n\n// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean.\nfunc (r *BooleanFuncIntegerReducer) Emit() []IntegerPoint {\n\treturn []IntegerPoint{*r.prev}\n}\n\n// BooleanReduceIntegerSliceFunc is the function called by a BooleanPoint reducer.\ntype BooleanReduceIntegerSliceFunc func(a []BooleanPoint) []IntegerPoint\n\n// BooleanSliceFuncIntegerReducer is a reducer that aggregates\n// the passed in points and then invokes the function to reduce the points when they are emitted.\ntype BooleanSliceFuncIntegerReducer struct {\n\tpoints []BooleanPoint\n\tfn     BooleanReduceIntegerSliceFunc\n}\n\n// NewBooleanSliceFuncIntegerReducer creates a new BooleanSliceFuncIntegerReducer.\nfunc NewBooleanSliceFuncIntegerReducer(fn BooleanReduceIntegerSliceFunc) *BooleanSliceFuncIntegerReducer {\n\treturn &BooleanSliceFuncIntegerReducer{fn: fn}\n}\n\n// AggregateBoolean copies the BooleanPoint into the internal slice to be passed\n// to the reduce function when Emit is called.\nfunc (r *BooleanSliceFuncIntegerReducer) AggregateBoolean(p *BooleanPoint) {\n\tr.points = append(r.points, *p.Clone())\n}\n\n// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice.\n// This is a more efficient version of calling AggregateBoolean on each point.\nfunc (r *BooleanSliceFuncIntegerReducer) AggregateBooleanBulk(points []BooleanPoint) {\n\tr.points = append(r.points, points...)\n}\n\n// Emit invokes the reduce function on the aggregated points to generate the aggregated points.\n// This method does not clear the points from the internal slice.\nfunc (r *BooleanSliceFuncIntegerReducer) Emit() []IntegerPoint {\n\treturn r.fn(r.points)\n}\n\n// BooleanReduceStringFunc is the function called by a BooleanPoint reducer.\ntype BooleanReduceStringFunc func(prev *StringPoint, curr *BooleanPoint) (t int64, v string, aux []interface{})\n\n// BooleanFuncStringReducer is a reducer that reduces\n// the passed in points to a single point using a reduce function.\ntype BooleanFuncStringReducer struct {\n\tprev *StringPoint\n\tfn   BooleanReduceStringFunc\n}\n\n// NewBooleanFuncStringReducer creates a new BooleanFuncStringReducer.\nfunc NewBooleanFuncStringReducer(fn BooleanReduceStringFunc, prev *StringPoint) *BooleanFuncStringReducer {\n\treturn &BooleanFuncStringReducer{fn: fn, prev: prev}\n}\n\n// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the\n// current and new point to modify the current point.\nfunc (r *BooleanFuncStringReducer) AggregateBoolean(p *BooleanPoint) {\n\tt, v, aux := r.fn(r.prev, p)\n\tif r.prev == nil {\n\t\tr.prev = &StringPoint{}\n\t}\n\tr.prev.Time = t\n\tr.prev.Value = v\n\tr.prev.Aux = aux\n\tif p.Aggregated > 1 {\n\t\tr.prev.Aggregated += p.Aggregated\n\t} else {\n\t\tr.prev.Aggregated++\n\t}\n}\n\n// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean.\nfunc (r *BooleanFuncStringReducer) Emit() []StringPoint {\n\treturn []StringPoint{*r.prev}\n}\n\n// BooleanReduceStringSliceFunc is the function called by a BooleanPoint reducer.\ntype BooleanReduceStringSliceFunc func(a []BooleanPoint) []StringPoint\n\n// BooleanSliceFuncStringReducer is a reducer that aggregates\n// the passed in points and then invokes the function to reduce the points when they are emitted.\ntype BooleanSliceFuncStringReducer struct {\n\tpoints []BooleanPoint\n\tfn     BooleanReduceStringSliceFunc\n}\n\n// NewBooleanSliceFuncStringReducer creates a new BooleanSliceFuncStringReducer.\nfunc NewBooleanSliceFuncStringReducer(fn BooleanReduceStringSliceFunc) *BooleanSliceFuncStringReducer {\n\treturn &BooleanSliceFuncStringReducer{fn: fn}\n}\n\n// AggregateBoolean copies the BooleanPoint into the internal slice to be passed\n// to the reduce function when Emit is called.\nfunc (r *BooleanSliceFuncStringReducer) AggregateBoolean(p *BooleanPoint) {\n\tr.points = append(r.points, *p.Clone())\n}\n\n// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice.\n// This is a more efficient version of calling AggregateBoolean on each point.\nfunc (r *BooleanSliceFuncStringReducer) AggregateBooleanBulk(points []BooleanPoint) {\n\tr.points = append(r.points, points...)\n}\n\n// Emit invokes the reduce function on the aggregated points to generate the aggregated points.\n// This method does not clear the points from the internal slice.\nfunc (r *BooleanSliceFuncStringReducer) Emit() []StringPoint {\n\treturn r.fn(r.points)\n}\n\n// BooleanReduceFunc is the function called by a BooleanPoint reducer.\ntype BooleanReduceFunc func(prev *BooleanPoint, curr *BooleanPoint) (t int64, v bool, aux []interface{})\n\n// BooleanFuncReducer is a reducer that reduces\n// the passed in points to a single point using a reduce function.\ntype BooleanFuncReducer struct {\n\tprev *BooleanPoint\n\tfn   BooleanReduceFunc\n}\n\n// NewBooleanFuncReducer creates a new BooleanFuncBooleanReducer.\nfunc NewBooleanFuncReducer(fn BooleanReduceFunc, prev *BooleanPoint) *BooleanFuncReducer {\n\treturn &BooleanFuncReducer{fn: fn, prev: prev}\n}\n\n// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the\n// current and new point to modify the current point.\nfunc (r *BooleanFuncReducer) AggregateBoolean(p *BooleanPoint) {\n\tt, v, aux := r.fn(r.prev, p)\n\tif r.prev == nil {\n\t\tr.prev = &BooleanPoint{}\n\t}\n\tr.prev.Time = t\n\tr.prev.Value = v\n\tr.prev.Aux = aux\n\tif p.Aggregated > 1 {\n\t\tr.prev.Aggregated += p.Aggregated\n\t} else {\n\t\tr.prev.Aggregated++\n\t}\n}\n\n// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean.\nfunc (r *BooleanFuncReducer) Emit() []BooleanPoint {\n\treturn []BooleanPoint{*r.prev}\n}\n\n// BooleanReduceSliceFunc is the function called by a BooleanPoint reducer.\ntype BooleanReduceSliceFunc func(a []BooleanPoint) []BooleanPoint\n\n// BooleanSliceFuncReducer is a reducer that aggregates\n// the passed in points and then invokes the function to reduce the points when they are emitted.\ntype BooleanSliceFuncReducer struct {\n\tpoints []BooleanPoint\n\tfn     BooleanReduceSliceFunc\n}\n\n// NewBooleanSliceFuncReducer creates a new BooleanSliceFuncReducer.\nfunc NewBooleanSliceFuncReducer(fn BooleanReduceSliceFunc) *BooleanSliceFuncReducer {\n\treturn &BooleanSliceFuncReducer{fn: fn}\n}\n\n// AggregateBoolean copies the BooleanPoint into the internal slice to be passed\n// to the reduce function when Emit is called.\nfunc (r *BooleanSliceFuncReducer) AggregateBoolean(p *BooleanPoint) {\n\tr.points = append(r.points, *p.Clone())\n}\n\n// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice.\n// This is a more efficient version of calling AggregateBoolean on each point.\nfunc (r *BooleanSliceFuncReducer) AggregateBooleanBulk(points []BooleanPoint) {\n\tr.points = append(r.points, points...)\n}\n\n// Emit invokes the reduce function on the aggregated points to generate the aggregated points.\n// This method does not clear the points from the internal slice.\nfunc (r *BooleanSliceFuncReducer) Emit() []BooleanPoint {\n\treturn r.fn(r.points)\n}\n\n// BooleanDistinctReducer returns the distinct points in a series.\ntype BooleanDistinctReducer struct {\n\tm map[bool]BooleanPoint\n}\n\n// NewBooleanDistinctReducer creates a new BooleanDistinctReducer.\nfunc NewBooleanDistinctReducer() *BooleanDistinctReducer {\n\treturn &BooleanDistinctReducer{m: make(map[bool]BooleanPoint)}\n}\n\n// AggregateBoolean aggregates a point into the reducer.\nfunc (r *BooleanDistinctReducer) AggregateBoolean(p *BooleanPoint) {\n\tif _, ok := r.m[p.Value]; !ok {\n\t\tr.m[p.Value] = *p\n\t}\n}\n\n// Emit emits the distinct points that have been aggregated into the reducer.\nfunc (r *BooleanDistinctReducer) Emit() []BooleanPoint {\n\tpoints := make([]BooleanPoint, 0, len(r.m))\n\tfor _, p := range r.m {\n\t\tpoints = append(points, BooleanPoint{Time: p.Time, Value: p.Value})\n\t}\n\tsort.Sort(booleanPoints(points))\n\treturn points\n}\n\n// BooleanElapsedReducer calculates the elapsed of the aggregated points.\ntype BooleanElapsedReducer struct {\n\tunitConversion int64\n\tprev           BooleanPoint\n\tcurr           BooleanPoint\n}\n\n// NewBooleanElapsedReducer creates a new BooleanElapsedReducer.\nfunc NewBooleanElapsedReducer(interval Interval) *BooleanElapsedReducer {\n\treturn &BooleanElapsedReducer{\n\t\tunitConversion: int64(interval.Duration),\n\t\tprev:           BooleanPoint{Nil: true},\n\t\tcurr:           BooleanPoint{Nil: true},\n\t}\n}\n\n// AggregateBoolean aggregates a point into the reducer and updates the current window.\nfunc (r *BooleanElapsedReducer) AggregateBoolean(p *BooleanPoint) {\n\tr.prev = r.curr\n\tr.curr = *p\n}\n\n// Emit emits the elapsed of the reducer at the current point.\nfunc (r *BooleanElapsedReducer) Emit() []IntegerPoint {\n\tif !r.prev.Nil {\n\t\telapsed := (r.curr.Time - r.prev.Time) / r.unitConversion\n\t\treturn []IntegerPoint{\n\t\t\t{Time: r.curr.Time, Value: elapsed},\n\t\t}\n\t}\n\treturn nil\n}\n\n// BooleanSampleReducer implements a reservoir sampling to calculate a random subset of points\ntype BooleanSampleReducer struct {\n\tcount int        // how many points we've iterated over\n\trng   *rand.Rand // random number generator for each reducer\n\n\tpoints booleanPoints // the reservoir\n}\n\n// NewBooleanSampleReducer creates a new BooleanSampleReducer\nfunc NewBooleanSampleReducer(size int) *BooleanSampleReducer {\n\treturn &BooleanSampleReducer{\n\t\trng:    rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/\n\t\tpoints: make(booleanPoints, size),\n\t}\n}\n\n// AggregateBoolean aggregates a point into the reducer.\nfunc (r *BooleanSampleReducer) AggregateBoolean(p *BooleanPoint) {\n\tr.count++\n\t// Fill the reservoir with the first n points\n\tif r.count-1 < len(r.points) {\n\t\tp.CopyTo(&r.points[r.count-1])\n\t\treturn\n\t}\n\n\t// Generate a random integer between 1 and the count and\n\t// if that number is less than the length of the slice\n\t// replace the point at that index rnd with p.\n\trnd := r.rng.Intn(r.count)\n\tif rnd < len(r.points) {\n\t\tp.CopyTo(&r.points[rnd])\n\t}\n}\n\n// Emit emits the reservoir sample as many points.\nfunc (r *BooleanSampleReducer) Emit() []BooleanPoint {\n\tmin := len(r.points)\n\tif r.count < min {\n\t\tmin = r.count\n\t}\n\tpts := r.points[:min]\n\tsort.Sort(pts)\n\treturn pts\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/functions.gen.go.tmpl",
    "content": "package influxql\n\nimport (\n\"sort\"\n\"time\"\n\"math/rand\"\n)\n\n{{with $types := .}}{{range $k := $types}}\n\n// {{$k.Name}}PointAggregator aggregates points to produce a single point.\ntype {{$k.Name}}PointAggregator interface {\n\tAggregate{{$k.Name}}(p *{{$k.Name}}Point)\n}\n\n// {{$k.Name}}BulkPointAggregator aggregates multiple points at a time.\ntype {{$k.Name}}BulkPointAggregator interface {\n\tAggregate{{$k.Name}}Bulk(points []{{$k.Name}}Point)\n}\n\n// Aggregate{{$k.Name}}Points feeds a slice of {{$k.Name}}Point into an\n// aggregator. If the aggregator is a {{$k.Name}}BulkPointAggregator, it will\n// use the AggregateBulk method.\nfunc Aggregate{{$k.Name}}Points(a {{$k.Name}}PointAggregator, points []{{$k.Name}}Point) {\n\tswitch a := a.(type) {\n\tcase {{$k.Name}}BulkPointAggregator:\n\t\ta.Aggregate{{$k.Name}}Bulk(points)\n\tdefault:\n\t\tfor _, p := range points {\n\t\t\ta.Aggregate{{$k.Name}}(&p)\n\t\t}\n\t}\n}\n\n// {{$k.Name}}PointEmitter produces a single point from an aggregate.\ntype {{$k.Name}}PointEmitter interface {\n\tEmit() []{{$k.Name}}Point\n}\n\n{{range $v := $types}}\n\n// {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func is the function called by a {{$k.Name}}Point reducer.\ntype {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func func(prev *{{$v.Name}}Point, curr *{{$k.Name}}Point) (t int64, v {{$v.Type}}, aux []interface{})\n\n// {{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer is a reducer that reduces\n// the passed in points to a single point using a reduce function.\ntype {{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer struct {\n\tprev *{{$v.Name}}Point\n\tfn   {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func\n}\n\n// New{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer creates a new {{$k.Name}}Func{{$v.Name}}Reducer.\nfunc New{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer(fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func, prev *{{$v.Name}}Point) *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer {\n\treturn &{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer{fn: fn, prev: prev}\n}\n\n// Aggregate{{$k.Name}} takes a {{$k.Name}}Point and invokes the reduce function with the\n// current and new point to modify the current point.\nfunc (r *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) {\n\tt, v, aux := r.fn(r.prev, p)\n\tif r.prev == nil {\n\t\tr.prev = &{{$v.Name}}Point{}\n\t}\n\tr.prev.Time = t\n\tr.prev.Value = v\n\tr.prev.Aux = aux\n\tif p.Aggregated > 1 {\n\t\tr.prev.Aggregated += p.Aggregated\n\t} else {\n\t\tr.prev.Aggregated++\n\t}\n}\n\n// Emit emits the point that was generated when reducing the points fed in with Aggregate{{$k.Name}}.\nfunc (r *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Emit() []{{$v.Name}}Point {\n\treturn []{{$v.Name}}Point{*r.prev}\n}\n\n// {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc is the function called by a {{$k.Name}}Point reducer.\ntype {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc func(a []{{$k.Name}}Point) []{{$v.Name}}Point\n\n// {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer is a reducer that aggregates\n// the passed in points and then invokes the function to reduce the points when they are emitted.\ntype {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer struct {\n\tpoints []{{$k.Name}}Point\n\tfn     {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc\n}\n\n// New{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer creates a new {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer.\nfunc New{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer(fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc) *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer {\n\treturn &{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer{fn: fn}\n}\n\n// Aggregate{{$k.Name}} copies the {{$k.Name}}Point into the internal slice to be passed\n// to the reduce function when Emit is called.\nfunc (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) {\n\tr.points = append(r.points, *p.Clone())\n}\n\n// Aggregate{{$k.Name}}Bulk performs a bulk copy of {{$k.Name}}Points into the internal slice.\n// This is a more efficient version of calling Aggregate{{$k.Name}} on each point.\nfunc (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}Bulk(points []{{$k.Name}}Point) {\n\tr.points = append(r.points, points...)\n}\n\n// Emit invokes the reduce function on the aggregated points to generate the aggregated points.\n// This method does not clear the points from the internal slice.\nfunc (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Emit() []{{$v.Name}}Point {\n\treturn r.fn(r.points)\n}\n{{end}}\n\n// {{$k.Name}}DistinctReducer returns the distinct points in a series.\ntype {{$k.Name}}DistinctReducer struct {\n\tm map[{{$k.Type}}]{{$k.Name}}Point\n}\n\n// New{{$k.Name}}DistinctReducer creates a new {{$k.Name}}DistinctReducer.\nfunc New{{$k.Name}}DistinctReducer() *{{$k.Name}}DistinctReducer {\n\treturn &{{$k.Name}}DistinctReducer{m: make(map[{{$k.Type}}]{{$k.Name}}Point)}\n}\n\n// Aggregate{{$k.Name}} aggregates a point into the reducer.\nfunc (r *{{$k.Name}}DistinctReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) {\n\tif _, ok := r.m[p.Value]; !ok {\n\t\tr.m[p.Value] = *p\n\t}\n}\n\n// Emit emits the distinct points that have been aggregated into the reducer.\nfunc (r *{{$k.Name}}DistinctReducer) Emit() []{{$k.Name}}Point {\n\tpoints := make([]{{$k.Name}}Point, 0, len(r.m))\n\tfor _, p := range r.m {\n\t\tpoints = append(points, {{$k.Name}}Point{Time: p.Time, Value: p.Value})\n\t}\n\tsort.Sort({{$k.name}}Points(points))\n\treturn points\n}\n\n// {{$k.Name}}ElapsedReducer calculates the elapsed of the aggregated points.\ntype {{$k.Name}}ElapsedReducer struct {\n\tunitConversion int64\n\tprev           {{$k.Name}}Point\n\tcurr           {{$k.Name}}Point\n}\n\n// New{{$k.Name}}ElapsedReducer creates a new {{$k.Name}}ElapsedReducer.\nfunc New{{$k.Name}}ElapsedReducer(interval Interval) *{{$k.Name}}ElapsedReducer {\n\treturn &{{$k.Name}}ElapsedReducer{\n\t\tunitConversion: int64(interval.Duration),\n\t\tprev:           {{$k.Name}}Point{Nil: true},\n\t\tcurr:           {{$k.Name}}Point{Nil: true},\n\t}\n}\n\n// Aggregate{{$k.Name}} aggregates a point into the reducer and updates the current window.\nfunc (r *{{$k.Name}}ElapsedReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) {\n\tr.prev = r.curr\n\tr.curr = *p\n}\n\n// Emit emits the elapsed of the reducer at the current point.\nfunc (r *{{$k.Name}}ElapsedReducer) Emit() []IntegerPoint {\n\tif !r.prev.Nil {\n\t\telapsed := (r.curr.Time - r.prev.Time) / r.unitConversion\n\t\treturn []IntegerPoint{\n\t\t\t{Time: r.curr.Time, Value: elapsed},\n\t\t}\n\t}\n\treturn nil\n}\n\n// {{$k.Name}}SampleReducer implements a reservoir sampling to calculate a random subset of points\ntype {{$k.Name}}SampleReducer struct {\n\tcount int // how many points we've iterated over\n\trng   *rand.Rand // random number generator for each reducer\n\n\tpoints {{$k.name}}Points // the reservoir\n}\n\n// New{{$k.Name}}SampleReducer creates a new {{$k.Name}}SampleReducer\nfunc New{{$k.Name}}SampleReducer(size int) *{{$k.Name}}SampleReducer {\n\treturn &{{$k.Name}}SampleReducer{\n\t\trng:    rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/\n\t\tpoints: make({{$k.name}}Points, size),\n\t}\n}\n\n// Aggregate{{$k.Name}} aggregates a point into the reducer.\nfunc (r *{{$k.Name}}SampleReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) {\n\tr.count++\n\t// Fill the reservoir with the first n points\n\tif r.count-1 < len(r.points) {\n\t\tp.CopyTo(&r.points[r.count-1])\n\t\treturn\n\t}\n\n\t// Generate a random integer between 1 and the count and\n\t// if that number is less than the length of the slice\n\t// replace the point at that index rnd with p.\n\trnd := r.rng.Intn(r.count)\n\tif rnd < len(r.points) {\n\t\tp.CopyTo(&r.points[rnd])\n\t}\n}\n\n// Emit emits the reservoir sample as many points.\nfunc (r *{{$k.Name}}SampleReducer) Emit() []{{$k.Name}}Point {\n\tmin := len(r.points)\n\tif r.count < min {\n\t\tmin = r.count\n\t}\n\tpts := r.points[:min]\n\tsort.Sort(pts)\n\treturn pts\n}\n\n\n{{end}}{{end}}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/functions.go",
    "content": "package influxql\n\nimport (\n\t\"container/heap\"\n\t\"math\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql/neldermead\"\n)\n\n// FloatMeanReducer calculates the mean of the aggregated points.\ntype FloatMeanReducer struct {\n\tsum   float64\n\tcount uint32\n}\n\n// NewFloatMeanReducer creates a new FloatMeanReducer.\nfunc NewFloatMeanReducer() *FloatMeanReducer {\n\treturn &FloatMeanReducer{}\n}\n\n// AggregateFloat aggregates a point into the reducer.\nfunc (r *FloatMeanReducer) AggregateFloat(p *FloatPoint) {\n\tif p.Aggregated >= 2 {\n\t\tr.sum += p.Value * float64(p.Aggregated)\n\t\tr.count += p.Aggregated\n\t} else {\n\t\tr.sum += p.Value\n\t\tr.count++\n\t}\n}\n\n// Emit emits the mean of the aggregated points as a single point.\nfunc (r *FloatMeanReducer) Emit() []FloatPoint {\n\treturn []FloatPoint{{\n\t\tTime:       ZeroTime,\n\t\tValue:      r.sum / float64(r.count),\n\t\tAggregated: r.count,\n\t}}\n}\n\n// IntegerMeanReducer calculates the mean of the aggregated points.\ntype IntegerMeanReducer struct {\n\tsum   int64\n\tcount uint32\n}\n\n// NewIntegerMeanReducer creates a new IntegerMeanReducer.\nfunc NewIntegerMeanReducer() *IntegerMeanReducer {\n\treturn &IntegerMeanReducer{}\n}\n\n// AggregateInteger aggregates a point into the reducer.\nfunc (r *IntegerMeanReducer) AggregateInteger(p *IntegerPoint) {\n\tif p.Aggregated >= 2 {\n\t\tr.sum += p.Value * int64(p.Aggregated)\n\t\tr.count += p.Aggregated\n\t} else {\n\t\tr.sum += p.Value\n\t\tr.count++\n\t}\n}\n\n// Emit emits the mean of the aggregated points as a single point.\nfunc (r *IntegerMeanReducer) Emit() []FloatPoint {\n\treturn []FloatPoint{{\n\t\tTime:       ZeroTime,\n\t\tValue:      float64(r.sum) / float64(r.count),\n\t\tAggregated: r.count,\n\t}}\n}\n\n// FloatDerivativeReducer calculates the derivative of the aggregated points.\ntype FloatDerivativeReducer struct {\n\tinterval      Interval\n\tprev          FloatPoint\n\tcurr          FloatPoint\n\tisNonNegative bool\n\tascending     bool\n}\n\n// NewFloatDerivativeReducer creates a new FloatDerivativeReducer.\nfunc NewFloatDerivativeReducer(interval Interval, isNonNegative, ascending bool) *FloatDerivativeReducer {\n\treturn &FloatDerivativeReducer{\n\t\tinterval:      interval,\n\t\tisNonNegative: isNonNegative,\n\t\tascending:     ascending,\n\t\tprev:          FloatPoint{Nil: true},\n\t\tcurr:          FloatPoint{Nil: true},\n\t}\n}\n\n// AggregateFloat aggregates a point into the reducer and updates the current window.\nfunc (r *FloatDerivativeReducer) AggregateFloat(p *FloatPoint) {\n\t// Skip past a point when it does not advance the stream. A joined series\n\t// may have multiple points at the same time so we will discard anything\n\t// except the first point we encounter.\n\tif !r.curr.Nil && r.curr.Time == p.Time {\n\t\treturn\n\t}\n\n\tr.prev = r.curr\n\tr.curr = *p\n}\n\n// Emit emits the derivative of the reducer at the current point.\nfunc (r *FloatDerivativeReducer) Emit() []FloatPoint {\n\tif !r.prev.Nil {\n\t\t// Calculate the derivative of successive points by dividing the\n\t\t// difference of each value by the elapsed time normalized to the interval.\n\t\tdiff := r.curr.Value - r.prev.Value\n\t\telapsed := r.curr.Time - r.prev.Time\n\t\tif !r.ascending {\n\t\t\telapsed = -elapsed\n\t\t}\n\t\tvalue := diff / (float64(elapsed) / float64(r.interval.Duration))\n\n\t\t// Mark this point as read by changing the previous point to nil.\n\t\tr.prev.Nil = true\n\n\t\t// Drop negative values for non-negative derivatives.\n\t\tif r.isNonNegative && diff < 0 {\n\t\t\treturn nil\n\t\t}\n\t\treturn []FloatPoint{{Time: r.curr.Time, Value: value}}\n\t}\n\treturn nil\n}\n\n// IntegerDerivativeReducer calculates the derivative of the aggregated points.\ntype IntegerDerivativeReducer struct {\n\tinterval      Interval\n\tprev          IntegerPoint\n\tcurr          IntegerPoint\n\tisNonNegative bool\n\tascending     bool\n}\n\n// NewIntegerDerivativeReducer creates a new IntegerDerivativeReducer.\nfunc NewIntegerDerivativeReducer(interval Interval, isNonNegative, ascending bool) *IntegerDerivativeReducer {\n\treturn &IntegerDerivativeReducer{\n\t\tinterval:      interval,\n\t\tisNonNegative: isNonNegative,\n\t\tascending:     ascending,\n\t\tprev:          IntegerPoint{Nil: true},\n\t\tcurr:          IntegerPoint{Nil: true},\n\t}\n}\n\n// AggregateInteger aggregates a point into the reducer and updates the current window.\nfunc (r *IntegerDerivativeReducer) AggregateInteger(p *IntegerPoint) {\n\t// Skip past a point when it does not advance the stream. A joined series\n\t// may have multiple points at the same time so we will discard anything\n\t// except the first point we encounter.\n\tif !r.curr.Nil && r.curr.Time == p.Time {\n\t\treturn\n\t}\n\n\tr.prev = r.curr\n\tr.curr = *p\n}\n\n// Emit emits the derivative of the reducer at the current point.\nfunc (r *IntegerDerivativeReducer) Emit() []FloatPoint {\n\tif !r.prev.Nil {\n\t\t// Calculate the derivative of successive points by dividing the\n\t\t// difference of each value by the elapsed time normalized to the interval.\n\t\tdiff := float64(r.curr.Value - r.prev.Value)\n\t\telapsed := r.curr.Time - r.prev.Time\n\t\tif !r.ascending {\n\t\t\telapsed = -elapsed\n\t\t}\n\t\tvalue := diff / (float64(elapsed) / float64(r.interval.Duration))\n\n\t\t// Mark this point as read by changing the previous point to nil.\n\t\tr.prev.Nil = true\n\n\t\t// Drop negative values for non-negative derivatives.\n\t\tif r.isNonNegative && diff < 0 {\n\t\t\treturn nil\n\t\t}\n\t\treturn []FloatPoint{{Time: r.curr.Time, Value: value}}\n\t}\n\treturn nil\n}\n\n// FloatDifferenceReducer calculates the derivative of the aggregated points.\ntype FloatDifferenceReducer struct {\n\tisNonNegative bool\n\tprev          FloatPoint\n\tcurr          FloatPoint\n}\n\n// NewFloatDifferenceReducer creates a new FloatDifferenceReducer.\nfunc NewFloatDifferenceReducer(isNonNegative bool) *FloatDifferenceReducer {\n\treturn &FloatDifferenceReducer{\n\t\tisNonNegative: isNonNegative,\n\t\tprev:          FloatPoint{Nil: true},\n\t\tcurr:          FloatPoint{Nil: true},\n\t}\n}\n\n// AggregateFloat aggregates a point into the reducer and updates the current window.\nfunc (r *FloatDifferenceReducer) AggregateFloat(p *FloatPoint) {\n\t// Skip past a point when it does not advance the stream. A joined series\n\t// may have multiple points at the same time so we will discard anything\n\t// except the first point we encounter.\n\tif !r.curr.Nil && r.curr.Time == p.Time {\n\t\treturn\n\t}\n\n\tr.prev = r.curr\n\tr.curr = *p\n}\n\n// Emit emits the difference of the reducer at the current point.\nfunc (r *FloatDifferenceReducer) Emit() []FloatPoint {\n\tif !r.prev.Nil {\n\t\t// Calculate the difference of successive points.\n\t\tvalue := r.curr.Value - r.prev.Value\n\n\t\t// If it is non_negative_difference discard any negative value. Since\n\t\t// prev is still marked as unread. The correctness can be ensured.\n\t\tif r.isNonNegative && value < 0 {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Mark this point as read by changing the previous point to nil.\n\t\tr.prev.Nil = true\n\t\treturn []FloatPoint{{Time: r.curr.Time, Value: value}}\n\t}\n\treturn nil\n}\n\n// IntegerDifferenceReducer calculates the derivative of the aggregated points.\ntype IntegerDifferenceReducer struct {\n\tisNonNegative bool\n\tprev          IntegerPoint\n\tcurr          IntegerPoint\n}\n\n// NewIntegerDifferenceReducer creates a new IntegerDifferenceReducer.\nfunc NewIntegerDifferenceReducer(isNonNegative bool) *IntegerDifferenceReducer {\n\treturn &IntegerDifferenceReducer{\n\t\tisNonNegative: isNonNegative,\n\t\tprev:          IntegerPoint{Nil: true},\n\t\tcurr:          IntegerPoint{Nil: true},\n\t}\n}\n\n// AggregateInteger aggregates a point into the reducer and updates the current window.\nfunc (r *IntegerDifferenceReducer) AggregateInteger(p *IntegerPoint) {\n\t// Skip past a point when it does not advance the stream. A joined series\n\t// may have multiple points at the same time so we will discard anything\n\t// except the first point we encounter.\n\tif !r.curr.Nil && r.curr.Time == p.Time {\n\t\treturn\n\t}\n\n\tr.prev = r.curr\n\tr.curr = *p\n}\n\n// Emit emits the difference of the reducer at the current point.\nfunc (r *IntegerDifferenceReducer) Emit() []IntegerPoint {\n\tif !r.prev.Nil {\n\t\t// Calculate the difference of successive points.\n\t\tvalue := r.curr.Value - r.prev.Value\n\n\t\t// If it is non_negative_difference discard any negative value. Since\n\t\t// prev is still marked as unread. The correctness can be ensured.\n\t\tif r.isNonNegative && value < 0 {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Mark this point as read by changing the previous point to nil.\n\t\tr.prev.Nil = true\n\n\t\treturn []IntegerPoint{{Time: r.curr.Time, Value: value}}\n\t}\n\treturn nil\n}\n\n// FloatMovingAverageReducer calculates the moving average of the aggregated points.\ntype FloatMovingAverageReducer struct {\n\tpos  int\n\tsum  float64\n\ttime int64\n\tbuf  []float64\n}\n\n// NewFloatMovingAverageReducer creates a new FloatMovingAverageReducer.\nfunc NewFloatMovingAverageReducer(n int) *FloatMovingAverageReducer {\n\treturn &FloatMovingAverageReducer{\n\t\tbuf: make([]float64, 0, n),\n\t}\n}\n\n// AggregateFloat aggregates a point into the reducer and updates the current window.\nfunc (r *FloatMovingAverageReducer) AggregateFloat(p *FloatPoint) {\n\tif len(r.buf) != cap(r.buf) {\n\t\tr.buf = append(r.buf, p.Value)\n\t} else {\n\t\tr.sum -= r.buf[r.pos]\n\t\tr.buf[r.pos] = p.Value\n\t}\n\tr.sum += p.Value\n\tr.time = p.Time\n\tr.pos++\n\tif r.pos >= cap(r.buf) {\n\t\tr.pos = 0\n\t}\n}\n\n// Emit emits the moving average of the current window. Emit should be called\n// after every call to AggregateFloat and it will produce one point if there\n// is enough data to fill a window, otherwise it will produce zero points.\nfunc (r *FloatMovingAverageReducer) Emit() []FloatPoint {\n\tif len(r.buf) != cap(r.buf) {\n\t\treturn []FloatPoint{}\n\t}\n\treturn []FloatPoint{\n\t\t{\n\t\t\tValue:      r.sum / float64(len(r.buf)),\n\t\t\tTime:       r.time,\n\t\t\tAggregated: uint32(len(r.buf)),\n\t\t},\n\t}\n}\n\n// IntegerMovingAverageReducer calculates the moving average of the aggregated points.\ntype IntegerMovingAverageReducer struct {\n\tpos  int\n\tsum  int64\n\ttime int64\n\tbuf  []int64\n}\n\n// NewIntegerMovingAverageReducer creates a new IntegerMovingAverageReducer.\nfunc NewIntegerMovingAverageReducer(n int) *IntegerMovingAverageReducer {\n\treturn &IntegerMovingAverageReducer{\n\t\tbuf: make([]int64, 0, n),\n\t}\n}\n\n// AggregateInteger aggregates a point into the reducer and updates the current window.\nfunc (r *IntegerMovingAverageReducer) AggregateInteger(p *IntegerPoint) {\n\tif len(r.buf) != cap(r.buf) {\n\t\tr.buf = append(r.buf, p.Value)\n\t} else {\n\t\tr.sum -= r.buf[r.pos]\n\t\tr.buf[r.pos] = p.Value\n\t}\n\tr.sum += p.Value\n\tr.time = p.Time\n\tr.pos++\n\tif r.pos >= cap(r.buf) {\n\t\tr.pos = 0\n\t}\n}\n\n// Emit emits the moving average of the current window. Emit should be called\n// after every call to AggregateInteger and it will produce one point if there\n// is enough data to fill a window, otherwise it will produce zero points.\nfunc (r *IntegerMovingAverageReducer) Emit() []FloatPoint {\n\tif len(r.buf) != cap(r.buf) {\n\t\treturn []FloatPoint{}\n\t}\n\treturn []FloatPoint{\n\t\t{\n\t\t\tValue:      float64(r.sum) / float64(len(r.buf)),\n\t\t\tTime:       r.time,\n\t\t\tAggregated: uint32(len(r.buf)),\n\t\t},\n\t}\n}\n\n// FloatCumulativeSumReducer cumulates the values from each point.\ntype FloatCumulativeSumReducer struct {\n\tcurr FloatPoint\n}\n\n// NewFloatCumulativeSumReducer creates a new FloatCumulativeSumReducer.\nfunc NewFloatCumulativeSumReducer() *FloatCumulativeSumReducer {\n\treturn &FloatCumulativeSumReducer{\n\t\tcurr: FloatPoint{Nil: true},\n\t}\n}\n\nfunc (r *FloatCumulativeSumReducer) AggregateFloat(p *FloatPoint) {\n\tr.curr.Value += p.Value\n\tr.curr.Time = p.Time\n\tr.curr.Nil = false\n}\n\nfunc (r *FloatCumulativeSumReducer) Emit() []FloatPoint {\n\tvar pts []FloatPoint\n\tif !r.curr.Nil {\n\t\tpts = []FloatPoint{r.curr}\n\t}\n\treturn pts\n}\n\n// IntegerCumulativeSumReducer cumulates the values from each point.\ntype IntegerCumulativeSumReducer struct {\n\tcurr IntegerPoint\n}\n\n// NewIntegerCumulativeSumReducer creates a new IntegerCumulativeSumReducer.\nfunc NewIntegerCumulativeSumReducer() *IntegerCumulativeSumReducer {\n\treturn &IntegerCumulativeSumReducer{\n\t\tcurr: IntegerPoint{Nil: true},\n\t}\n}\n\nfunc (r *IntegerCumulativeSumReducer) AggregateInteger(p *IntegerPoint) {\n\tr.curr.Value += p.Value\n\tr.curr.Time = p.Time\n\tr.curr.Nil = false\n}\n\nfunc (r *IntegerCumulativeSumReducer) Emit() []IntegerPoint {\n\tvar pts []IntegerPoint\n\tif !r.curr.Nil {\n\t\tpts = []IntegerPoint{r.curr}\n\t}\n\treturn pts\n}\n\n// FloatHoltWintersReducer forecasts a series into the future.\n// This is done using the Holt-Winters damped method.\n//    1. Using the series the initial values are calculated using a SSE.\n//    2. The series is forecasted into the future using the iterative relations.\ntype FloatHoltWintersReducer struct {\n\t// Season period\n\tm        int\n\tseasonal bool\n\n\t// Horizon\n\th int\n\n\t// Interval between points\n\tinterval int64\n\t// interval / 2 -- used to perform rounding\n\thalfInterval int64\n\n\t// Whether to include all data or only future values\n\tincludeFitData bool\n\n\t// NelderMead optimizer\n\toptim *neldermead.Optimizer\n\t// Small difference bound for the optimizer\n\tepsilon float64\n\n\ty      []float64\n\tpoints []FloatPoint\n}\n\nconst (\n\t// Arbitrary weight for initializing some intial guesses.\n\t// This should be in the  range [0,1]\n\thwWeight = 0.5\n\t// Epsilon value for the minimization process\n\thwDefaultEpsilon = 1.0e-4\n\t// Define a grid of initial guesses for the parameters: alpha, beta, gamma, and phi.\n\t// Keep in mind that this grid is N^4 so we should keep N small\n\t// The starting lower guess\n\thwGuessLower = 0.3\n\t//  The upper bound on the grid\n\thwGuessUpper = 1.0\n\t// The step between guesses\n\thwGuessStep = 0.4\n)\n\n// NewFloatHoltWintersReducer creates a new FloatHoltWintersReducer.\nfunc NewFloatHoltWintersReducer(h, m int, includeFitData bool, interval time.Duration) *FloatHoltWintersReducer {\n\tseasonal := true\n\tif m < 2 {\n\t\tseasonal = false\n\t}\n\treturn &FloatHoltWintersReducer{\n\t\th:              h,\n\t\tm:              m,\n\t\tseasonal:       seasonal,\n\t\tincludeFitData: includeFitData,\n\t\tinterval:       int64(interval),\n\t\thalfInterval:   int64(interval) / 2,\n\t\toptim:          neldermead.New(),\n\t\tepsilon:        hwDefaultEpsilon,\n\t}\n}\n\nfunc (r *FloatHoltWintersReducer) aggregate(time int64, value float64) {\n\tr.points = append(r.points, FloatPoint{\n\t\tTime:  time,\n\t\tValue: value,\n\t})\n}\n\n// AggregateFloat aggregates a point into the reducer and updates the current window.\nfunc (r *FloatHoltWintersReducer) AggregateFloat(p *FloatPoint) {\n\tr.aggregate(p.Time, p.Value)\n}\n\n// AggregateInteger aggregates a point into the reducer and updates the current window.\nfunc (r *FloatHoltWintersReducer) AggregateInteger(p *IntegerPoint) {\n\tr.aggregate(p.Time, float64(p.Value))\n}\n\nfunc (r *FloatHoltWintersReducer) roundTime(t int64) int64 {\n\t// Overflow safe round function\n\tremainder := t % r.interval\n\tif remainder > r.halfInterval {\n\t\t// Round up\n\t\treturn (t/r.interval + 1) * r.interval\n\t}\n\t// Round down\n\treturn (t / r.interval) * r.interval\n}\n\n// Emit returns the points generated by the HoltWinters algorithm.\nfunc (r *FloatHoltWintersReducer) Emit() []FloatPoint {\n\tif l := len(r.points); l < 2 || r.seasonal && l < r.m || r.h <= 0 {\n\t\treturn nil\n\t}\n\t// First fill in r.y with values and NaNs for missing values\n\tstart, stop := r.roundTime(r.points[0].Time), r.roundTime(r.points[len(r.points)-1].Time)\n\tcount := (stop - start) / r.interval\n\tif count <= 0 {\n\t\treturn nil\n\t}\n\tr.y = make([]float64, 1, count)\n\tr.y[0] = r.points[0].Value\n\tt := r.roundTime(r.points[0].Time)\n\tfor _, p := range r.points[1:] {\n\t\trounded := r.roundTime(p.Time)\n\t\tif rounded <= t {\n\t\t\t// Drop values that occur for the same time bucket\n\t\t\tcontinue\n\t\t}\n\t\tt += r.interval\n\t\t// Add any missing values before the next point\n\t\tfor rounded != t {\n\t\t\t// Add in a NaN so we can skip it later.\n\t\t\tr.y = append(r.y, math.NaN())\n\t\t\tt += r.interval\n\t\t}\n\t\tr.y = append(r.y, p.Value)\n\t}\n\n\t// Seasonality\n\tm := r.m\n\n\t// Starting guesses\n\t// NOTE: Since these values are guesses\n\t// in the cases where we were missing data,\n\t// we can just skip the value and call it good.\n\n\tl0 := 0.0\n\tif r.seasonal {\n\t\tfor i := 0; i < m; i++ {\n\t\t\tif !math.IsNaN(r.y[i]) {\n\t\t\t\tl0 += (1 / float64(m)) * r.y[i]\n\t\t\t}\n\t\t}\n\t} else {\n\t\tl0 += hwWeight * r.y[0]\n\t}\n\n\tb0 := 0.0\n\tif r.seasonal {\n\t\tfor i := 0; i < m && m+i < len(r.y); i++ {\n\t\t\tif !math.IsNaN(r.y[i]) && !math.IsNaN(r.y[m+i]) {\n\t\t\t\tb0 += 1 / float64(m*m) * (r.y[m+i] - r.y[i])\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif !math.IsNaN(r.y[1]) {\n\t\t\tb0 = hwWeight * (r.y[1] - r.y[0])\n\t\t}\n\t}\n\n\tvar s []float64\n\tif r.seasonal {\n\t\ts = make([]float64, m)\n\t\tfor i := 0; i < m; i++ {\n\t\t\tif !math.IsNaN(r.y[i]) {\n\t\t\t\ts[i] = r.y[i] / l0\n\t\t\t} else {\n\t\t\t\ts[i] = 0\n\t\t\t}\n\t\t}\n\t}\n\n\tparameters := make([]float64, 6+len(s))\n\tparameters[4] = l0\n\tparameters[5] = b0\n\to := len(parameters) - len(s)\n\tfor i := range s {\n\t\tparameters[i+o] = s[i]\n\t}\n\n\t// Determine best fit for the various parameters\n\tminSSE := math.Inf(1)\n\tvar bestParams []float64\n\tfor alpha := hwGuessLower; alpha < hwGuessUpper; alpha += hwGuessStep {\n\t\tfor beta := hwGuessLower; beta < hwGuessUpper; beta += hwGuessStep {\n\t\t\tfor gamma := hwGuessLower; gamma < hwGuessUpper; gamma += hwGuessStep {\n\t\t\t\tfor phi := hwGuessLower; phi < hwGuessUpper; phi += hwGuessStep {\n\t\t\t\t\tparameters[0] = alpha\n\t\t\t\t\tparameters[1] = beta\n\t\t\t\t\tparameters[2] = gamma\n\t\t\t\t\tparameters[3] = phi\n\t\t\t\t\tsse, params := r.optim.Optimize(r.sse, parameters, r.epsilon, 1)\n\t\t\t\t\tif sse < minSSE || bestParams == nil {\n\t\t\t\t\t\tminSSE = sse\n\t\t\t\t\t\tbestParams = params\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Forecast\n\tforecasted := r.forecast(r.h, bestParams)\n\tvar points []FloatPoint\n\tif r.includeFitData {\n\t\tstart := r.points[0].Time\n\t\tpoints = make([]FloatPoint, 0, len(forecasted))\n\t\tfor i, v := range forecasted {\n\t\t\tif !math.IsNaN(v) {\n\t\t\t\tt := start + r.interval*(int64(i))\n\t\t\t\tpoints = append(points, FloatPoint{\n\t\t\t\t\tValue: v,\n\t\t\t\t\tTime:  t,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t} else {\n\t\tstop := r.points[len(r.points)-1].Time\n\t\tpoints = make([]FloatPoint, 0, r.h)\n\t\tfor i, v := range forecasted[len(r.y):] {\n\t\t\tif !math.IsNaN(v) {\n\t\t\t\tt := stop + r.interval*(int64(i)+1)\n\t\t\t\tpoints = append(points, FloatPoint{\n\t\t\t\t\tValue: v,\n\t\t\t\t\tTime:  t,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\t// Clear data set\n\tr.y = r.y[0:0]\n\treturn points\n}\n\n// Using the recursive relations compute the next values\nfunc (r *FloatHoltWintersReducer) next(alpha, beta, gamma, phi, phiH, yT, lTp, bTp, sTm, sTmh float64) (yTh, lT, bT, sT float64) {\n\tlT = alpha*(yT/sTm) + (1-alpha)*(lTp+phi*bTp)\n\tbT = beta*(lT-lTp) + (1-beta)*phi*bTp\n\tsT = gamma*(yT/(lTp+phi*bTp)) + (1-gamma)*sTm\n\tyTh = (lT + phiH*bT) * sTmh\n\treturn\n}\n\n// Forecast the data h points into the future.\nfunc (r *FloatHoltWintersReducer) forecast(h int, params []float64) []float64 {\n\t// Constrain parameters\n\tr.constrain(params)\n\n\tyT := r.y[0]\n\n\tphi := params[3]\n\tphiH := phi\n\n\tlT := params[4]\n\tbT := params[5]\n\n\t// seasonals is a ring buffer of past sT values\n\tvar seasonals []float64\n\tvar m, so int\n\tif r.seasonal {\n\t\tseasonals = params[6:]\n\t\tm = len(params[6:])\n\t\tif m == 1 {\n\t\t\tseasonals[0] = 1\n\t\t}\n\t\t// Season index offset\n\t\tso = m - 1\n\t}\n\n\tforecasted := make([]float64, len(r.y)+h)\n\tforecasted[0] = yT\n\tl := len(r.y)\n\tvar hm int\n\tstm, stmh := 1.0, 1.0\n\tfor t := 1; t < l+h; t++ {\n\t\tif r.seasonal {\n\t\t\thm = t % m\n\t\t\tstm = seasonals[(t-m+so)%m]\n\t\t\tstmh = seasonals[(t-m+hm+so)%m]\n\t\t}\n\t\tvar sT float64\n\t\tyT, lT, bT, sT = r.next(\n\t\t\tparams[0], // alpha\n\t\t\tparams[1], // beta\n\t\t\tparams[2], // gamma\n\t\t\tphi,\n\t\t\tphiH,\n\t\t\tyT,\n\t\t\tlT,\n\t\t\tbT,\n\t\t\tstm,\n\t\t\tstmh,\n\t\t)\n\t\tphiH += math.Pow(phi, float64(t))\n\n\t\tif r.seasonal {\n\t\t\tseasonals[(t+so)%m] = sT\n\t\t\tso++\n\t\t}\n\n\t\tforecasted[t] = yT\n\t}\n\treturn forecasted\n}\n\n// Compute sum squared error for the given parameters.\nfunc (r *FloatHoltWintersReducer) sse(params []float64) float64 {\n\tsse := 0.0\n\tforecasted := r.forecast(0, params)\n\tfor i := range forecasted {\n\t\t// Skip missing values since we cannot use them to compute an error.\n\t\tif !math.IsNaN(r.y[i]) {\n\t\t\t// Compute error\n\t\t\tif math.IsNaN(forecasted[i]) {\n\t\t\t\t// Penalize forecasted NaNs\n\t\t\t\treturn math.Inf(1)\n\t\t\t}\n\t\t\tdiff := forecasted[i] - r.y[i]\n\t\t\tsse += diff * diff\n\t\t}\n\t}\n\treturn sse\n}\n\n// Constrain alpha, beta, gamma, phi in the range [0, 1]\nfunc (r *FloatHoltWintersReducer) constrain(x []float64) {\n\t// alpha\n\tif x[0] > 1 {\n\t\tx[0] = 1\n\t}\n\tif x[0] < 0 {\n\t\tx[0] = 0\n\t}\n\t// beta\n\tif x[1] > 1 {\n\t\tx[1] = 1\n\t}\n\tif x[1] < 0 {\n\t\tx[1] = 0\n\t}\n\t// gamma\n\tif x[2] > 1 {\n\t\tx[2] = 1\n\t}\n\tif x[2] < 0 {\n\t\tx[2] = 0\n\t}\n\t// phi\n\tif x[3] > 1 {\n\t\tx[3] = 1\n\t}\n\tif x[3] < 0 {\n\t\tx[3] = 0\n\t}\n}\n\n// FloatIntegralReducer calculates the time-integral of the aggregated points.\ntype FloatIntegralReducer struct {\n\tinterval Interval\n\tsum      float64\n\tprev     FloatPoint\n\twindow   struct {\n\t\tstart int64\n\t\tend   int64\n\t}\n\tch  chan FloatPoint\n\topt IteratorOptions\n}\n\n// NewFloatIntegralReducer creates a new FloatIntegralReducer.\nfunc NewFloatIntegralReducer(interval Interval, opt IteratorOptions) *FloatIntegralReducer {\n\treturn &FloatIntegralReducer{\n\t\tinterval: interval,\n\t\tprev:     FloatPoint{Nil: true},\n\t\tch:       make(chan FloatPoint, 1),\n\t\topt:      opt,\n\t}\n}\n\n// AggregateFloat aggregates a point into the reducer.\nfunc (r *FloatIntegralReducer) AggregateFloat(p *FloatPoint) {\n\t// If this is the first point, just save it\n\tif r.prev.Nil {\n\t\tr.prev = *p\n\t\tif !r.opt.Interval.IsZero() {\n\t\t\t// Record the end of the time interval.\n\t\t\t// We do not care for whether the last number is inclusive or exclusive\n\t\t\t// because we treat both the same for the involved math.\n\t\t\tif r.opt.Ascending {\n\t\t\t\tr.window.start, r.window.end = r.opt.Window(p.Time)\n\t\t\t} else {\n\t\t\t\tr.window.end, r.window.start = r.opt.Window(p.Time)\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\t// If this point has the same timestamp as the previous one,\n\t// skip the point. Points sent into this reducer are expected\n\t// to be fed in order.\n\tif r.prev.Time == p.Time {\n\t\tr.prev = *p\n\t\treturn\n\t} else if !r.opt.Interval.IsZero() && ((r.opt.Ascending && p.Time >= r.window.end) || (!r.opt.Ascending && p.Time <= r.window.end)) {\n\t\t// If our previous time is not equal to the window, we need to\n\t\t// interpolate the area at the end of this interval.\n\t\tif r.prev.Time != r.window.end {\n\t\t\tvalue := linearFloat(r.window.end, r.prev.Time, p.Time, r.prev.Value, p.Value)\n\t\t\telapsed := float64(r.window.end-r.prev.Time) / float64(r.interval.Duration)\n\t\t\tr.sum += 0.5 * (value + r.prev.Value) * elapsed\n\n\t\t\tr.prev.Value = value\n\t\t\tr.prev.Time = r.window.end\n\t\t}\n\n\t\t// Emit the current point through the channel and then clear it.\n\t\tr.ch <- FloatPoint{Time: r.window.start, Value: r.sum}\n\t\tif r.opt.Ascending {\n\t\t\tr.window.start, r.window.end = r.opt.Window(p.Time)\n\t\t} else {\n\t\t\tr.window.end, r.window.start = r.opt.Window(p.Time)\n\t\t}\n\t\tr.sum = 0.0\n\t}\n\n\t// Normal operation: update the sum using the trapezium rule\n\telapsed := float64(p.Time-r.prev.Time) / float64(r.interval.Duration)\n\tr.sum += 0.5 * (p.Value + r.prev.Value) * elapsed\n\tr.prev = *p\n}\n\n// Emit emits the time-integral of the aggregated points as a single point.\n// InfluxQL convention dictates that outside a group-by-time clause we return\n// a timestamp of zero.  Within a group-by-time, we can set the time to ZeroTime\n// and a higher level will change it to the start of the time group.\nfunc (r *FloatIntegralReducer) Emit() []FloatPoint {\n\tselect {\n\tcase pt, ok := <-r.ch:\n\t\tif !ok {\n\t\t\treturn nil\n\t\t}\n\t\treturn []FloatPoint{pt}\n\tdefault:\n\t\treturn nil\n\t}\n}\n\n// Close flushes any in progress points to ensure any remaining points are\n// emitted.\nfunc (r *FloatIntegralReducer) Close() error {\n\t// If our last point is at the start time, then discard this point since\n\t// there is no area within this bucket. Otherwise, send off what we\n\t// currently have as the final point.\n\tif !r.prev.Nil && r.prev.Time != r.window.start {\n\t\tr.ch <- FloatPoint{Time: r.window.start, Value: r.sum}\n\t}\n\tclose(r.ch)\n\treturn nil\n}\n\n// IntegerIntegralReducer calculates the time-integral of the aggregated points.\ntype IntegerIntegralReducer struct {\n\tinterval Interval\n\tsum      float64\n\tprev     IntegerPoint\n\twindow   struct {\n\t\tstart int64\n\t\tend   int64\n\t}\n\tch  chan FloatPoint\n\topt IteratorOptions\n}\n\n// NewIntegerIntegralReducer creates a new IntegerIntegralReducer.\nfunc NewIntegerIntegralReducer(interval Interval, opt IteratorOptions) *IntegerIntegralReducer {\n\treturn &IntegerIntegralReducer{\n\t\tinterval: interval,\n\t\tprev:     IntegerPoint{Nil: true},\n\t\tch:       make(chan FloatPoint, 1),\n\t\topt:      opt,\n\t}\n}\n\n// AggregateInteger aggregates a point into the reducer.\nfunc (r *IntegerIntegralReducer) AggregateInteger(p *IntegerPoint) {\n\t// If this is the first point, just save it\n\tif r.prev.Nil {\n\t\tr.prev = *p\n\n\t\t// Record the end of the time interval.\n\t\t// We do not care for whether the last number is inclusive or exclusive\n\t\t// because we treat both the same for the involved math.\n\t\tif r.opt.Ascending {\n\t\t\tr.window.start, r.window.end = r.opt.Window(p.Time)\n\t\t} else {\n\t\t\tr.window.end, r.window.start = r.opt.Window(p.Time)\n\t\t}\n\n\t\t// If we see the minimum allowable time, set the time to zero so we don't\n\t\t// break the default returned time for aggregate queries without times.\n\t\tif r.window.start == MinTime {\n\t\t\tr.window.start = 0\n\t\t}\n\t\treturn\n\t}\n\n\t// If this point has the same timestamp as the previous one,\n\t// skip the point. Points sent into this reducer are expected\n\t// to be fed in order.\n\tvalue := float64(p.Value)\n\tif r.prev.Time == p.Time {\n\t\tr.prev = *p\n\t\treturn\n\t} else if (r.opt.Ascending && p.Time >= r.window.end) || (!r.opt.Ascending && p.Time <= r.window.end) {\n\t\t// If our previous time is not equal to the window, we need to\n\t\t// interpolate the area at the end of this interval.\n\t\tif r.prev.Time != r.window.end {\n\t\t\tvalue = linearFloat(r.window.end, r.prev.Time, p.Time, float64(r.prev.Value), value)\n\t\t\telapsed := float64(r.window.end-r.prev.Time) / float64(r.interval.Duration)\n\t\t\tr.sum += 0.5 * (value + float64(r.prev.Value)) * elapsed\n\n\t\t\tr.prev.Time = r.window.end\n\t\t}\n\n\t\t// Emit the current point through the channel and then clear it.\n\t\tr.ch <- FloatPoint{Time: r.window.start, Value: r.sum}\n\t\tif r.opt.Ascending {\n\t\t\tr.window.start, r.window.end = r.opt.Window(p.Time)\n\t\t} else {\n\t\t\tr.window.end, r.window.start = r.opt.Window(p.Time)\n\t\t}\n\t\tr.sum = 0.0\n\t}\n\n\t// Normal operation: update the sum using the trapezium rule\n\telapsed := float64(p.Time-r.prev.Time) / float64(r.interval.Duration)\n\tr.sum += 0.5 * (value + float64(r.prev.Value)) * elapsed\n\tr.prev = *p\n}\n\n// Emit emits the time-integral of the aggregated points as a single FLOAT point\n// InfluxQL convention dictates that outside a group-by-time clause we return\n// a timestamp of zero.  Within a group-by-time, we can set the time to ZeroTime\n// and a higher level will change it to the start of the time group.\nfunc (r *IntegerIntegralReducer) Emit() []FloatPoint {\n\tselect {\n\tcase pt, ok := <-r.ch:\n\t\tif !ok {\n\t\t\treturn nil\n\t\t}\n\t\treturn []FloatPoint{pt}\n\tdefault:\n\t\treturn nil\n\t}\n}\n\n// Close flushes any in progress points to ensure any remaining points are\n// emitted.\nfunc (r *IntegerIntegralReducer) Close() error {\n\t// If our last point is at the start time, then discard this point since\n\t// there is no area within this bucket. Otherwise, send off what we\n\t// currently have as the final point.\n\tif !r.prev.Nil && r.prev.Time != r.window.start {\n\t\tr.ch <- FloatPoint{Time: r.window.start, Value: r.sum}\n\t}\n\tclose(r.ch)\n\treturn nil\n}\n\ntype FloatTopReducer struct {\n\th *floatPointsByFunc\n}\n\nfunc NewFloatTopReducer(n int) *FloatTopReducer {\n\treturn &FloatTopReducer{\n\t\th: floatPointsSortBy(make([]FloatPoint, 0, n), func(a, b *FloatPoint) bool {\n\t\t\tif a.Value != b.Value {\n\t\t\t\treturn a.Value < b.Value\n\t\t\t}\n\t\t\treturn a.Time > b.Time\n\t\t}),\n\t}\n}\n\nfunc (r *FloatTopReducer) AggregateFloat(p *FloatPoint) {\n\tif r.h.Len() == cap(r.h.points) {\n\t\t// Compare the minimum point and the aggregated point. If our value is\n\t\t// larger, replace the current min value.\n\t\tif !r.h.cmp(&r.h.points[0], p) {\n\t\t\treturn\n\t\t}\n\t\tr.h.points[0] = *p\n\t\theap.Fix(r.h, 0)\n\t\treturn\n\t}\n\theap.Push(r.h, *p)\n}\n\nfunc (r *FloatTopReducer) Emit() []FloatPoint {\n\t// Ensure the points are sorted with the maximum value last. While the\n\t// first point may be the minimum value, the rest is not guaranteed to be\n\t// in any particular order while it is a heap.\n\tpoints := make([]FloatPoint, len(r.h.points))\n\tfor i, p := range r.h.points {\n\t\tp.Aggregated = 0\n\t\tpoints[i] = p\n\t}\n\th := floatPointsByFunc{points: points, cmp: r.h.cmp}\n\tsort.Sort(sort.Reverse(&h))\n\treturn points\n}\n\ntype IntegerTopReducer struct {\n\th *integerPointsByFunc\n}\n\nfunc NewIntegerTopReducer(n int) *IntegerTopReducer {\n\treturn &IntegerTopReducer{\n\t\th: integerPointsSortBy(make([]IntegerPoint, 0, n), func(a, b *IntegerPoint) bool {\n\t\t\tif a.Value != b.Value {\n\t\t\t\treturn a.Value < b.Value\n\t\t\t}\n\t\t\treturn a.Time > b.Time\n\t\t}),\n\t}\n}\n\nfunc (r *IntegerTopReducer) AggregateInteger(p *IntegerPoint) {\n\tif r.h.Len() == cap(r.h.points) {\n\t\t// Compare the minimum point and the aggregated point. If our value is\n\t\t// larger, replace the current min value.\n\t\tif !r.h.cmp(&r.h.points[0], p) {\n\t\t\treturn\n\t\t}\n\t\tr.h.points[0] = *p\n\t\theap.Fix(r.h, 0)\n\t\treturn\n\t}\n\theap.Push(r.h, *p)\n}\n\nfunc (r *IntegerTopReducer) Emit() []IntegerPoint {\n\t// Ensure the points are sorted with the maximum value last. While the\n\t// first point may be the minimum value, the rest is not guaranteed to be\n\t// in any particular order while it is a heap.\n\tpoints := make([]IntegerPoint, len(r.h.points))\n\tfor i, p := range r.h.points {\n\t\tp.Aggregated = 0\n\t\tpoints[i] = p\n\t}\n\th := integerPointsByFunc{points: points, cmp: r.h.cmp}\n\tsort.Sort(sort.Reverse(&h))\n\treturn points\n}\n\ntype FloatBottomReducer struct {\n\th *floatPointsByFunc\n}\n\nfunc NewFloatBottomReducer(n int) *FloatBottomReducer {\n\treturn &FloatBottomReducer{\n\t\th: floatPointsSortBy(make([]FloatPoint, 0, n), func(a, b *FloatPoint) bool {\n\t\t\tif a.Value != b.Value {\n\t\t\t\treturn a.Value > b.Value\n\t\t\t}\n\t\t\treturn a.Time > b.Time\n\t\t}),\n\t}\n}\n\nfunc (r *FloatBottomReducer) AggregateFloat(p *FloatPoint) {\n\tif r.h.Len() == cap(r.h.points) {\n\t\t// Compare the minimum point and the aggregated point. If our value is\n\t\t// larger, replace the current min value.\n\t\tif !r.h.cmp(&r.h.points[0], p) {\n\t\t\treturn\n\t\t}\n\t\tr.h.points[0] = *p\n\t\theap.Fix(r.h, 0)\n\t\treturn\n\t}\n\theap.Push(r.h, *p)\n}\n\nfunc (r *FloatBottomReducer) Emit() []FloatPoint {\n\t// Ensure the points are sorted with the maximum value last. While the\n\t// first point may be the minimum value, the rest is not guaranteed to be\n\t// in any particular order while it is a heap.\n\tpoints := make([]FloatPoint, len(r.h.points))\n\tfor i, p := range r.h.points {\n\t\tp.Aggregated = 0\n\t\tpoints[i] = p\n\t}\n\th := floatPointsByFunc{points: points, cmp: r.h.cmp}\n\tsort.Sort(sort.Reverse(&h))\n\treturn points\n}\n\ntype IntegerBottomReducer struct {\n\th *integerPointsByFunc\n}\n\nfunc NewIntegerBottomReducer(n int) *IntegerBottomReducer {\n\treturn &IntegerBottomReducer{\n\t\th: integerPointsSortBy(make([]IntegerPoint, 0, n), func(a, b *IntegerPoint) bool {\n\t\t\tif a.Value != b.Value {\n\t\t\t\treturn a.Value > b.Value\n\t\t\t}\n\t\t\treturn a.Time > b.Time\n\t\t}),\n\t}\n}\n\nfunc (r *IntegerBottomReducer) AggregateInteger(p *IntegerPoint) {\n\tif r.h.Len() == cap(r.h.points) {\n\t\t// Compare the minimum point and the aggregated point. If our value is\n\t\t// larger, replace the current min value.\n\t\tif !r.h.cmp(&r.h.points[0], p) {\n\t\t\treturn\n\t\t}\n\t\tr.h.points[0] = *p\n\t\theap.Fix(r.h, 0)\n\t\treturn\n\t}\n\theap.Push(r.h, *p)\n}\n\nfunc (r *IntegerBottomReducer) Emit() []IntegerPoint {\n\t// Ensure the points are sorted with the maximum value last. While the\n\t// first point may be the minimum value, the rest is not guaranteed to be\n\t// in any particular order while it is a heap.\n\tpoints := make([]IntegerPoint, len(r.h.points))\n\tfor i, p := range r.h.points {\n\t\tp.Aggregated = 0\n\t\tpoints[i] = p\n\t}\n\th := integerPointsByFunc{points: points, cmp: r.h.cmp}\n\tsort.Sort(sort.Reverse(&h))\n\treturn points\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/functions_test.go",
    "content": "package influxql_test\n\nimport (\n\t\"math\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/pkg/deep\"\n)\n\nfunc almostEqual(got, exp float64) bool {\n\treturn math.Abs(got-exp) < 1e-5 && !math.IsNaN(got)\n}\n\nfunc TestHoltWinters_AusTourists(t *testing.T) {\n\thw := influxql.NewFloatHoltWintersReducer(10, 4, false, 1)\n\t// Dataset from http://www.inside-r.org/packages/cran/fpp/docs/austourists\n\taustourists := []influxql.FloatPoint{\n\t\t{Time: 1, Value: 30.052513},\n\t\t{Time: 2, Value: 19.148496},\n\t\t{Time: 3, Value: 25.317692},\n\t\t{Time: 4, Value: 27.591437},\n\t\t{Time: 5, Value: 32.076456},\n\t\t{Time: 6, Value: 23.487961},\n\t\t{Time: 7, Value: 28.47594},\n\t\t{Time: 8, Value: 35.123753},\n\t\t{Time: 9, Value: 36.838485},\n\t\t{Time: 10, Value: 25.007017},\n\t\t{Time: 11, Value: 30.72223},\n\t\t{Time: 12, Value: 28.693759},\n\t\t{Time: 13, Value: 36.640986},\n\t\t{Time: 14, Value: 23.824609},\n\t\t{Time: 15, Value: 29.311683},\n\t\t{Time: 16, Value: 31.770309},\n\t\t{Time: 17, Value: 35.177877},\n\t\t{Time: 18, Value: 19.775244},\n\t\t{Time: 19, Value: 29.60175},\n\t\t{Time: 20, Value: 34.538842},\n\t\t{Time: 21, Value: 41.273599},\n\t\t{Time: 22, Value: 26.655862},\n\t\t{Time: 23, Value: 28.279859},\n\t\t{Time: 24, Value: 35.191153},\n\t\t{Time: 25, Value: 41.727458},\n\t\t{Time: 26, Value: 24.04185},\n\t\t{Time: 27, Value: 32.328103},\n\t\t{Time: 28, Value: 37.328708},\n\t\t{Time: 29, Value: 46.213153},\n\t\t{Time: 30, Value: 29.346326},\n\t\t{Time: 31, Value: 36.48291},\n\t\t{Time: 32, Value: 42.977719},\n\t\t{Time: 33, Value: 48.901525},\n\t\t{Time: 34, Value: 31.180221},\n\t\t{Time: 35, Value: 37.717881},\n\t\t{Time: 36, Value: 40.420211},\n\t\t{Time: 37, Value: 51.206863},\n\t\t{Time: 38, Value: 31.887228},\n\t\t{Time: 39, Value: 40.978263},\n\t\t{Time: 40, Value: 43.772491},\n\t\t{Time: 41, Value: 55.558567},\n\t\t{Time: 42, Value: 33.850915},\n\t\t{Time: 43, Value: 42.076383},\n\t\t{Time: 44, Value: 45.642292},\n\t\t{Time: 45, Value: 59.76678},\n\t\t{Time: 46, Value: 35.191877},\n\t\t{Time: 47, Value: 44.319737},\n\t\t{Time: 48, Value: 47.913736},\n\t}\n\n\tfor _, p := range austourists {\n\t\thw.AggregateFloat(&p)\n\t}\n\tpoints := hw.Emit()\n\n\tforecasted := []influxql.FloatPoint{\n\t\t{Time: 49, Value: 51.85064132137853},\n\t\t{Time: 50, Value: 43.26055282315273},\n\t\t{Time: 51, Value: 41.827258044814464},\n\t\t{Time: 52, Value: 54.3990354591749},\n\t\t{Time: 53, Value: 54.62334472770803},\n\t\t{Time: 54, Value: 45.57155693625209},\n\t\t{Time: 55, Value: 44.06051240252263},\n\t\t{Time: 56, Value: 57.30029870759433},\n\t\t{Time: 57, Value: 57.53591513519172},\n\t\t{Time: 58, Value: 47.999008139396096},\n\t}\n\n\tif exp, got := len(forecasted), len(points); exp != got {\n\t\tt.Fatalf(\"unexpected number of points emitted: got %d exp %d\", got, exp)\n\t}\n\n\tfor i := range forecasted {\n\t\tif exp, got := forecasted[i].Time, points[i].Time; got != exp {\n\t\t\tt.Errorf(\"unexpected time on points[%d] got %v exp %v\", i, got, exp)\n\t\t}\n\t\tif exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) {\n\t\t\tt.Errorf(\"unexpected value on points[%d] got %v exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestHoltWinters_AusTourists_Missing(t *testing.T) {\n\thw := influxql.NewFloatHoltWintersReducer(10, 4, false, 1)\n\t// Dataset from http://www.inside-r.org/packages/cran/fpp/docs/austourists\n\taustourists := []influxql.FloatPoint{\n\t\t{Time: 1, Value: 30.052513},\n\t\t{Time: 3, Value: 25.317692},\n\t\t{Time: 4, Value: 27.591437},\n\t\t{Time: 5, Value: 32.076456},\n\t\t{Time: 6, Value: 23.487961},\n\t\t{Time: 7, Value: 28.47594},\n\t\t{Time: 9, Value: 36.838485},\n\t\t{Time: 10, Value: 25.007017},\n\t\t{Time: 11, Value: 30.72223},\n\t\t{Time: 12, Value: 28.693759},\n\t\t{Time: 13, Value: 36.640986},\n\t\t{Time: 14, Value: 23.824609},\n\t\t{Time: 15, Value: 29.311683},\n\t\t{Time: 16, Value: 31.770309},\n\t\t{Time: 17, Value: 35.177877},\n\t\t{Time: 19, Value: 29.60175},\n\t\t{Time: 20, Value: 34.538842},\n\t\t{Time: 21, Value: 41.273599},\n\t\t{Time: 22, Value: 26.655862},\n\t\t{Time: 23, Value: 28.279859},\n\t\t{Time: 24, Value: 35.191153},\n\t\t{Time: 25, Value: 41.727458},\n\t\t{Time: 26, Value: 24.04185},\n\t\t{Time: 27, Value: 32.328103},\n\t\t{Time: 28, Value: 37.328708},\n\t\t{Time: 30, Value: 29.346326},\n\t\t{Time: 31, Value: 36.48291},\n\t\t{Time: 32, Value: 42.977719},\n\t\t{Time: 34, Value: 31.180221},\n\t\t{Time: 35, Value: 37.717881},\n\t\t{Time: 36, Value: 40.420211},\n\t\t{Time: 37, Value: 51.206863},\n\t\t{Time: 38, Value: 31.887228},\n\t\t{Time: 41, Value: 55.558567},\n\t\t{Time: 42, Value: 33.850915},\n\t\t{Time: 43, Value: 42.076383},\n\t\t{Time: 44, Value: 45.642292},\n\t\t{Time: 45, Value: 59.76678},\n\t\t{Time: 46, Value: 35.191877},\n\t\t{Time: 47, Value: 44.319737},\n\t\t{Time: 48, Value: 47.913736},\n\t}\n\n\tfor _, p := range austourists {\n\t\thw.AggregateFloat(&p)\n\t}\n\tpoints := hw.Emit()\n\n\tforecasted := []influxql.FloatPoint{\n\t\t{Time: 49, Value: 54.84533610387743},\n\t\t{Time: 50, Value: 41.19329421863249},\n\t\t{Time: 51, Value: 45.71673175112451},\n\t\t{Time: 52, Value: 56.05759298805955},\n\t\t{Time: 53, Value: 59.32337460282217},\n\t\t{Time: 54, Value: 44.75280096850461},\n\t\t{Time: 55, Value: 49.98865098113751},\n\t\t{Time: 56, Value: 61.86084934967605},\n\t\t{Time: 57, Value: 65.95805633454883},\n\t\t{Time: 58, Value: 50.1502170480547},\n\t}\n\n\tif exp, got := len(forecasted), len(points); exp != got {\n\t\tt.Fatalf(\"unexpected number of points emitted: got %d exp %d\", got, exp)\n\t}\n\n\tfor i := range forecasted {\n\t\tif exp, got := forecasted[i].Time, points[i].Time; got != exp {\n\t\t\tt.Errorf(\"unexpected time on points[%d] got %v exp %v\", i, got, exp)\n\t\t}\n\t\tif exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) {\n\t\t\tt.Errorf(\"unexpected value on points[%d] got %v exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestHoltWinters_USPopulation(t *testing.T) {\n\tseries := []influxql.FloatPoint{\n\t\t{Time: 1, Value: 3.93},\n\t\t{Time: 2, Value: 5.31},\n\t\t{Time: 3, Value: 7.24},\n\t\t{Time: 4, Value: 9.64},\n\t\t{Time: 5, Value: 12.90},\n\t\t{Time: 6, Value: 17.10},\n\t\t{Time: 7, Value: 23.20},\n\t\t{Time: 8, Value: 31.40},\n\t\t{Time: 9, Value: 39.80},\n\t\t{Time: 10, Value: 50.20},\n\t\t{Time: 11, Value: 62.90},\n\t\t{Time: 12, Value: 76.00},\n\t\t{Time: 13, Value: 92.00},\n\t\t{Time: 14, Value: 105.70},\n\t\t{Time: 15, Value: 122.80},\n\t\t{Time: 16, Value: 131.70},\n\t\t{Time: 17, Value: 151.30},\n\t\t{Time: 18, Value: 179.30},\n\t\t{Time: 19, Value: 203.20},\n\t}\n\thw := influxql.NewFloatHoltWintersReducer(10, 0, true, 1)\n\tfor _, p := range series {\n\t\thw.AggregateFloat(&p)\n\t}\n\tpoints := hw.Emit()\n\n\tforecasted := []influxql.FloatPoint{\n\t\t{Time: 1, Value: 3.93},\n\t\t{Time: 2, Value: 4.957405463559748},\n\t\t{Time: 3, Value: 7.012210102535647},\n\t\t{Time: 4, Value: 10.099589257439924},\n\t\t{Time: 5, Value: 14.229926188104242},\n\t\t{Time: 6, Value: 19.418878968703797},\n\t\t{Time: 7, Value: 25.68749172281409},\n\t\t{Time: 8, Value: 33.062351305731305},\n\t\t{Time: 9, Value: 41.575791076125206},\n\t\t{Time: 10, Value: 51.26614395589263},\n\t\t{Time: 11, Value: 62.178047564264595},\n\t\t{Time: 12, Value: 74.36280483872488},\n\t\t{Time: 13, Value: 87.87880423073163},\n\t\t{Time: 14, Value: 102.79200429905801},\n\t\t{Time: 15, Value: 119.17648832929542},\n\t\t{Time: 16, Value: 137.11509549747296},\n\t\t{Time: 17, Value: 156.70013608313175},\n\t\t{Time: 18, Value: 178.03419933863566},\n\t\t{Time: 19, Value: 201.23106385518594},\n\t\t{Time: 20, Value: 226.4167216525905},\n\t\t{Time: 21, Value: 253.73052878285205},\n\t\t{Time: 22, Value: 283.32649700397553},\n\t\t{Time: 23, Value: 315.37474308085984},\n\t\t{Time: 24, Value: 350.06311454009256},\n\t\t{Time: 25, Value: 387.59901328556873},\n\t\t{Time: 26, Value: 428.21144141893404},\n\t\t{Time: 27, Value: 472.1532969569147},\n\t\t{Time: 28, Value: 519.7039509590035},\n\t\t{Time: 29, Value: 571.1721419458248},\n\t}\n\n\tif exp, got := len(forecasted), len(points); exp != got {\n\t\tt.Fatalf(\"unexpected number of points emitted: got %d exp %d\", got, exp)\n\t}\n\tfor i := range forecasted {\n\t\tif exp, got := forecasted[i].Time, points[i].Time; got != exp {\n\t\t\tt.Errorf(\"unexpected time on points[%d] got %v exp %v\", i, got, exp)\n\t\t}\n\t\tif exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) {\n\t\t\tt.Errorf(\"unexpected value on points[%d] got %v exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestHoltWinters_USPopulation_Missing(t *testing.T) {\n\tseries := []influxql.FloatPoint{\n\t\t{Time: 1, Value: 3.93},\n\t\t{Time: 2, Value: 5.31},\n\t\t{Time: 3, Value: 7.24},\n\t\t{Time: 4, Value: 9.64},\n\t\t{Time: 5, Value: 12.90},\n\t\t{Time: 6, Value: 17.10},\n\t\t{Time: 7, Value: 23.20},\n\t\t{Time: 8, Value: 31.40},\n\t\t{Time: 10, Value: 50.20},\n\t\t{Time: 11, Value: 62.90},\n\t\t{Time: 12, Value: 76.00},\n\t\t{Time: 13, Value: 92.00},\n\t\t{Time: 15, Value: 122.80},\n\t\t{Time: 16, Value: 131.70},\n\t\t{Time: 17, Value: 151.30},\n\t\t{Time: 19, Value: 203.20},\n\t}\n\thw := influxql.NewFloatHoltWintersReducer(10, 0, true, 1)\n\tfor _, p := range series {\n\t\thw.AggregateFloat(&p)\n\t}\n\tpoints := hw.Emit()\n\n\tforecasted := []influxql.FloatPoint{\n\t\t{Time: 1, Value: 3.93},\n\t\t{Time: 2, Value: 4.8931364428135105},\n\t\t{Time: 3, Value: 6.962653629047061},\n\t\t{Time: 4, Value: 10.056207765903274},\n\t\t{Time: 5, Value: 14.18435088129532},\n\t\t{Time: 6, Value: 19.362939306110846},\n\t\t{Time: 7, Value: 25.613247940326584},\n\t\t{Time: 8, Value: 32.96213087008264},\n\t\t{Time: 9, Value: 41.442230043017204},\n\t\t{Time: 10, Value: 51.09223428526052},\n\t\t{Time: 11, Value: 61.95719155158485},\n\t\t{Time: 12, Value: 74.08887794968567},\n\t\t{Time: 13, Value: 87.54622778052787},\n\t\t{Time: 14, Value: 102.39582960014131},\n\t\t{Time: 15, Value: 118.7124941463221},\n\t\t{Time: 16, Value: 136.57990089987464},\n\t\t{Time: 17, Value: 156.09133107941278},\n\t\t{Time: 18, Value: 177.35049601833734},\n\t\t{Time: 19, Value: 200.472471161683},\n\t\t{Time: 20, Value: 225.58474737097785},\n\t\t{Time: 21, Value: 252.82841286206823},\n\t\t{Time: 22, Value: 282.35948095261017},\n\t\t{Time: 23, Value: 314.3503808953992},\n\t\t{Time: 24, Value: 348.99163145856954},\n\t\t{Time: 25, Value: 386.49371962730555},\n\t\t{Time: 26, Value: 427.08920989407727},\n\t\t{Time: 27, Value: 471.0351131332573},\n\t\t{Time: 28, Value: 518.615548088049},\n\t\t{Time: 29, Value: 570.1447331101863},\n\t}\n\n\tif exp, got := len(forecasted), len(points); exp != got {\n\t\tt.Fatalf(\"unexpected number of points emitted: got %d exp %d\", got, exp)\n\t}\n\tfor i := range forecasted {\n\t\tif exp, got := forecasted[i].Time, points[i].Time; got != exp {\n\t\t\tt.Errorf(\"unexpected time on points[%d] got %v exp %v\", i, got, exp)\n\t\t}\n\t\tif exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) {\n\t\t\tt.Errorf(\"unexpected value on points[%d] got %v exp %v\", i, got, exp)\n\t\t}\n\t}\n}\nfunc TestHoltWinters_RoundTime(t *testing.T) {\n\tmaxTime := time.Unix(0, influxql.MaxTime).Round(time.Second).UnixNano()\n\tdata := []influxql.FloatPoint{\n\t\t{Time: maxTime - int64(5*time.Second), Value: 1},\n\t\t{Time: maxTime - int64(4*time.Second+103*time.Millisecond), Value: 10},\n\t\t{Time: maxTime - int64(3*time.Second+223*time.Millisecond), Value: 2},\n\t\t{Time: maxTime - int64(2*time.Second+481*time.Millisecond), Value: 11},\n\t}\n\thw := influxql.NewFloatHoltWintersReducer(2, 2, true, time.Second)\n\tfor _, p := range data {\n\t\thw.AggregateFloat(&p)\n\t}\n\tpoints := hw.Emit()\n\n\tforecasted := []influxql.FloatPoint{\n\t\t{Time: maxTime - int64(5*time.Second), Value: 1},\n\t\t{Time: maxTime - int64(4*time.Second), Value: 10.006729104838234},\n\t\t{Time: maxTime - int64(3*time.Second), Value: 1.998341814469269},\n\t\t{Time: maxTime - int64(2*time.Second), Value: 10.997858830631172},\n\t\t{Time: maxTime - int64(1*time.Second), Value: 4.085860238030013},\n\t\t{Time: maxTime - int64(0*time.Second), Value: 11.35713604403339},\n\t}\n\n\tif exp, got := len(forecasted), len(points); exp != got {\n\t\tt.Fatalf(\"unexpected number of points emitted: got %d exp %d\", got, exp)\n\t}\n\tfor i := range forecasted {\n\t\tif exp, got := forecasted[i].Time, points[i].Time; got != exp {\n\t\t\tt.Errorf(\"unexpected time on points[%d] got %v exp %v\", i, got, exp)\n\t\t}\n\t\tif exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) {\n\t\t\tt.Errorf(\"unexpected value on points[%d] got %v exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestHoltWinters_MaxTime(t *testing.T) {\n\tdata := []influxql.FloatPoint{\n\t\t{Time: influxql.MaxTime - 1, Value: 1},\n\t\t{Time: influxql.MaxTime, Value: 2},\n\t}\n\thw := influxql.NewFloatHoltWintersReducer(1, 0, true, 1)\n\tfor _, p := range data {\n\t\thw.AggregateFloat(&p)\n\t}\n\tpoints := hw.Emit()\n\n\tforecasted := []influxql.FloatPoint{\n\t\t{Time: influxql.MaxTime - 1, Value: 1},\n\t\t{Time: influxql.MaxTime, Value: 2.001516944066403},\n\t\t{Time: influxql.MaxTime + 1, Value: 2.5365248972488343},\n\t}\n\n\tif exp, got := len(forecasted), len(points); exp != got {\n\t\tt.Fatalf(\"unexpected number of points emitted: got %d exp %d\", got, exp)\n\t}\n\tfor i := range forecasted {\n\t\tif exp, got := forecasted[i].Time, points[i].Time; got != exp {\n\t\t\tt.Errorf(\"unexpected time on points[%d] got %v exp %v\", i, got, exp)\n\t\t}\n\t\tif exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) {\n\t\t\tt.Errorf(\"unexpected value on points[%d] got %v exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\n// TestSample_AllSamplesSeen attempts to verify that it is possible\n// to get every subsample in a reasonable number of iterations.\n//\n// The idea here is that 30 iterations should be enough to hit every possible\n// sequence at least once.\nfunc TestSample_AllSamplesSeen(t *testing.T) {\n\tps := []influxql.FloatPoint{\n\t\t{Time: 1, Value: 1},\n\t\t{Time: 2, Value: 2},\n\t\t{Time: 3, Value: 3},\n\t}\n\n\t// List of all the possible subsamples\n\tsamples := [][]influxql.FloatPoint{\n\t\t{\n\t\t\t{Time: 1, Value: 1},\n\t\t\t{Time: 2, Value: 2},\n\t\t},\n\t\t{\n\t\t\t{Time: 1, Value: 1},\n\t\t\t{Time: 3, Value: 3},\n\t\t},\n\t\t{\n\t\t\t{Time: 2, Value: 2},\n\t\t\t{Time: 3, Value: 3},\n\t\t},\n\t}\n\n\t// 30 iterations should be sufficient to guarantee that\n\t// we hit every possible subsample.\n\tfor i := 0; i < 30; i++ {\n\t\ts := influxql.NewFloatSampleReducer(2)\n\t\tfor _, p := range ps {\n\t\t\ts.AggregateFloat(&p)\n\t\t}\n\n\t\tpoints := s.Emit()\n\n\t\tfor i, sample := range samples {\n\t\t\t// if we find a sample that it matches, remove it from\n\t\t\t// this list of possible samples\n\t\t\tif deep.Equal(sample, points) {\n\t\t\t\tsamples = append(samples[:i], samples[i+1:]...)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// if samples is empty we've seen every sample, so we're done\n\t\tif len(samples) == 0 {\n\t\t\treturn\n\t\t}\n\n\t\t// The FloatSampleReducer is seeded with time.Now().UnixNano(), and without this sleep,\n\t\t// this test will fail on machines where UnixNano doesn't return full resolution.\n\t\t// Specifically, some Windows machines will only return timestamps accurate to 100ns.\n\t\t// While iterating through this test without an explicit sleep,\n\t\t// we would only see one or two unique seeds across all the calls to NewFloatSampleReducer.\n\t\ttime.Sleep(time.Millisecond)\n\t}\n\n\t// If we missed a sample, report the error\n\tif len(samples) != 0 {\n\t\tt.Fatalf(\"expected all samples to be seen; unseen samples: %#v\", samples)\n\t}\n}\n\nfunc TestSample_SampleSizeLessThanNumPoints(t *testing.T) {\n\ts := influxql.NewFloatSampleReducer(2)\n\n\tps := []influxql.FloatPoint{\n\t\t{Time: 1, Value: 1},\n\t\t{Time: 2, Value: 2},\n\t\t{Time: 3, Value: 3},\n\t}\n\n\tfor _, p := range ps {\n\t\ts.AggregateFloat(&p)\n\t}\n\n\tpoints := s.Emit()\n\n\tif exp, got := 2, len(points); exp != got {\n\t\tt.Fatalf(\"unexpected number of points emitted: got %d exp %d\", got, exp)\n\t}\n}\n\nfunc TestSample_SampleSizeGreaterThanNumPoints(t *testing.T) {\n\ts := influxql.NewFloatSampleReducer(4)\n\n\tps := []influxql.FloatPoint{\n\t\t{Time: 1, Value: 1},\n\t\t{Time: 2, Value: 2},\n\t\t{Time: 3, Value: 3},\n\t}\n\n\tfor _, p := range ps {\n\t\ts.AggregateFloat(&p)\n\t}\n\n\tpoints := s.Emit()\n\n\tif exp, got := len(ps), len(points); exp != got {\n\t\tt.Fatalf(\"unexpected number of points emitted: got %d exp %d\", got, exp)\n\t}\n\n\tif !deep.Equal(ps, points) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(points))\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/influxql.go",
    "content": "package influxql // import \"github.com/influxdata/influxdb/influxql\"\n\n//go:generate tmpl -data=@tmpldata iterator.gen.go.tmpl\n//go:generate tmpl -data=@tmpldata point.gen.go.tmpl\n//go:generate tmpl -data=@tmpldata functions.gen.go.tmpl\n\n//go:generate protoc --gogo_out=. internal/internal.proto\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/internal/internal.pb.go",
    "content": "// Code generated by protoc-gen-gogo.\n// source: internal/internal.proto\n// DO NOT EDIT!\n\n/*\nPackage influxql is a generated protocol buffer package.\n\nIt is generated from these files:\n\tinternal/internal.proto\n\nIt has these top-level messages:\n\tPoint\n\tAux\n\tIteratorOptions\n\tMeasurements\n\tMeasurement\n\tInterval\n\tIteratorStats\n\tVarRef\n*/\npackage influxql\n\nimport proto \"github.com/gogo/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\n// This is a compile-time assertion to ensure that this generated file\n// is compatible with the proto package it is being compiled against.\n// A compilation error at this line likely means your copy of the\n// proto package needs to be updated.\nconst _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package\n\ntype Point struct {\n\tName             *string        `protobuf:\"bytes,1,req,name=Name\" json:\"Name,omitempty\"`\n\tTags             *string        `protobuf:\"bytes,2,req,name=Tags\" json:\"Tags,omitempty\"`\n\tTime             *int64         `protobuf:\"varint,3,req,name=Time\" json:\"Time,omitempty\"`\n\tNil              *bool          `protobuf:\"varint,4,req,name=Nil\" json:\"Nil,omitempty\"`\n\tAux              []*Aux         `protobuf:\"bytes,5,rep,name=Aux\" json:\"Aux,omitempty\"`\n\tAggregated       *uint32        `protobuf:\"varint,6,opt,name=Aggregated\" json:\"Aggregated,omitempty\"`\n\tFloatValue       *float64       `protobuf:\"fixed64,7,opt,name=FloatValue\" json:\"FloatValue,omitempty\"`\n\tIntegerValue     *int64         `protobuf:\"varint,8,opt,name=IntegerValue\" json:\"IntegerValue,omitempty\"`\n\tStringValue      *string        `protobuf:\"bytes,9,opt,name=StringValue\" json:\"StringValue,omitempty\"`\n\tBooleanValue     *bool          `protobuf:\"varint,10,opt,name=BooleanValue\" json:\"BooleanValue,omitempty\"`\n\tStats            *IteratorStats `protobuf:\"bytes,11,opt,name=Stats\" json:\"Stats,omitempty\"`\n\tXXX_unrecognized []byte         `json:\"-\"`\n}\n\nfunc (m *Point) Reset()                    { *m = Point{} }\nfunc (m *Point) String() string            { return proto.CompactTextString(m) }\nfunc (*Point) ProtoMessage()               {}\nfunc (*Point) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{0} }\n\nfunc (m *Point) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *Point) GetTags() string {\n\tif m != nil && m.Tags != nil {\n\t\treturn *m.Tags\n\t}\n\treturn \"\"\n}\n\nfunc (m *Point) GetTime() int64 {\n\tif m != nil && m.Time != nil {\n\t\treturn *m.Time\n\t}\n\treturn 0\n}\n\nfunc (m *Point) GetNil() bool {\n\tif m != nil && m.Nil != nil {\n\t\treturn *m.Nil\n\t}\n\treturn false\n}\n\nfunc (m *Point) GetAux() []*Aux {\n\tif m != nil {\n\t\treturn m.Aux\n\t}\n\treturn nil\n}\n\nfunc (m *Point) GetAggregated() uint32 {\n\tif m != nil && m.Aggregated != nil {\n\t\treturn *m.Aggregated\n\t}\n\treturn 0\n}\n\nfunc (m *Point) GetFloatValue() float64 {\n\tif m != nil && m.FloatValue != nil {\n\t\treturn *m.FloatValue\n\t}\n\treturn 0\n}\n\nfunc (m *Point) GetIntegerValue() int64 {\n\tif m != nil && m.IntegerValue != nil {\n\t\treturn *m.IntegerValue\n\t}\n\treturn 0\n}\n\nfunc (m *Point) GetStringValue() string {\n\tif m != nil && m.StringValue != nil {\n\t\treturn *m.StringValue\n\t}\n\treturn \"\"\n}\n\nfunc (m *Point) GetBooleanValue() bool {\n\tif m != nil && m.BooleanValue != nil {\n\t\treturn *m.BooleanValue\n\t}\n\treturn false\n}\n\nfunc (m *Point) GetStats() *IteratorStats {\n\tif m != nil {\n\t\treturn m.Stats\n\t}\n\treturn nil\n}\n\ntype Aux struct {\n\tDataType         *int32   `protobuf:\"varint,1,req,name=DataType\" json:\"DataType,omitempty\"`\n\tFloatValue       *float64 `protobuf:\"fixed64,2,opt,name=FloatValue\" json:\"FloatValue,omitempty\"`\n\tIntegerValue     *int64   `protobuf:\"varint,3,opt,name=IntegerValue\" json:\"IntegerValue,omitempty\"`\n\tStringValue      *string  `protobuf:\"bytes,4,opt,name=StringValue\" json:\"StringValue,omitempty\"`\n\tBooleanValue     *bool    `protobuf:\"varint,5,opt,name=BooleanValue\" json:\"BooleanValue,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *Aux) Reset()                    { *m = Aux{} }\nfunc (m *Aux) String() string            { return proto.CompactTextString(m) }\nfunc (*Aux) ProtoMessage()               {}\nfunc (*Aux) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{1} }\n\nfunc (m *Aux) GetDataType() int32 {\n\tif m != nil && m.DataType != nil {\n\t\treturn *m.DataType\n\t}\n\treturn 0\n}\n\nfunc (m *Aux) GetFloatValue() float64 {\n\tif m != nil && m.FloatValue != nil {\n\t\treturn *m.FloatValue\n\t}\n\treturn 0\n}\n\nfunc (m *Aux) GetIntegerValue() int64 {\n\tif m != nil && m.IntegerValue != nil {\n\t\treturn *m.IntegerValue\n\t}\n\treturn 0\n}\n\nfunc (m *Aux) GetStringValue() string {\n\tif m != nil && m.StringValue != nil {\n\t\treturn *m.StringValue\n\t}\n\treturn \"\"\n}\n\nfunc (m *Aux) GetBooleanValue() bool {\n\tif m != nil && m.BooleanValue != nil {\n\t\treturn *m.BooleanValue\n\t}\n\treturn false\n}\n\ntype IteratorOptions struct {\n\tExpr             *string        `protobuf:\"bytes,1,opt,name=Expr\" json:\"Expr,omitempty\"`\n\tAux              []string       `protobuf:\"bytes,2,rep,name=Aux\" json:\"Aux,omitempty\"`\n\tFields           []*VarRef      `protobuf:\"bytes,17,rep,name=Fields\" json:\"Fields,omitempty\"`\n\tSources          []*Measurement `protobuf:\"bytes,3,rep,name=Sources\" json:\"Sources,omitempty\"`\n\tInterval         *Interval      `protobuf:\"bytes,4,opt,name=Interval\" json:\"Interval,omitempty\"`\n\tDimensions       []string       `protobuf:\"bytes,5,rep,name=Dimensions\" json:\"Dimensions,omitempty\"`\n\tGroupBy          []string       `protobuf:\"bytes,19,rep,name=GroupBy\" json:\"GroupBy,omitempty\"`\n\tFill             *int32         `protobuf:\"varint,6,opt,name=Fill\" json:\"Fill,omitempty\"`\n\tFillValue        *float64       `protobuf:\"fixed64,7,opt,name=FillValue\" json:\"FillValue,omitempty\"`\n\tCondition        *string        `protobuf:\"bytes,8,opt,name=Condition\" json:\"Condition,omitempty\"`\n\tStartTime        *int64         `protobuf:\"varint,9,opt,name=StartTime\" json:\"StartTime,omitempty\"`\n\tEndTime          *int64         `protobuf:\"varint,10,opt,name=EndTime\" json:\"EndTime,omitempty\"`\n\tLocation         *string        `protobuf:\"bytes,21,opt,name=Location\" json:\"Location,omitempty\"`\n\tAscending        *bool          `protobuf:\"varint,11,opt,name=Ascending\" json:\"Ascending,omitempty\"`\n\tLimit            *int64         `protobuf:\"varint,12,opt,name=Limit\" json:\"Limit,omitempty\"`\n\tOffset           *int64         `protobuf:\"varint,13,opt,name=Offset\" json:\"Offset,omitempty\"`\n\tSLimit           *int64         `protobuf:\"varint,14,opt,name=SLimit\" json:\"SLimit,omitempty\"`\n\tSOffset          *int64         `protobuf:\"varint,15,opt,name=SOffset\" json:\"SOffset,omitempty\"`\n\tDedupe           *bool          `protobuf:\"varint,16,opt,name=Dedupe\" json:\"Dedupe,omitempty\"`\n\tMaxSeriesN       *int64         `protobuf:\"varint,18,opt,name=MaxSeriesN\" json:\"MaxSeriesN,omitempty\"`\n\tOrdered          *bool          `protobuf:\"varint,20,opt,name=Ordered\" json:\"Ordered,omitempty\"`\n\tXXX_unrecognized []byte         `json:\"-\"`\n}\n\nfunc (m *IteratorOptions) Reset()                    { *m = IteratorOptions{} }\nfunc (m *IteratorOptions) String() string            { return proto.CompactTextString(m) }\nfunc (*IteratorOptions) ProtoMessage()               {}\nfunc (*IteratorOptions) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{2} }\n\nfunc (m *IteratorOptions) GetExpr() string {\n\tif m != nil && m.Expr != nil {\n\t\treturn *m.Expr\n\t}\n\treturn \"\"\n}\n\nfunc (m *IteratorOptions) GetAux() []string {\n\tif m != nil {\n\t\treturn m.Aux\n\t}\n\treturn nil\n}\n\nfunc (m *IteratorOptions) GetFields() []*VarRef {\n\tif m != nil {\n\t\treturn m.Fields\n\t}\n\treturn nil\n}\n\nfunc (m *IteratorOptions) GetSources() []*Measurement {\n\tif m != nil {\n\t\treturn m.Sources\n\t}\n\treturn nil\n}\n\nfunc (m *IteratorOptions) GetInterval() *Interval {\n\tif m != nil {\n\t\treturn m.Interval\n\t}\n\treturn nil\n}\n\nfunc (m *IteratorOptions) GetDimensions() []string {\n\tif m != nil {\n\t\treturn m.Dimensions\n\t}\n\treturn nil\n}\n\nfunc (m *IteratorOptions) GetGroupBy() []string {\n\tif m != nil {\n\t\treturn m.GroupBy\n\t}\n\treturn nil\n}\n\nfunc (m *IteratorOptions) GetFill() int32 {\n\tif m != nil && m.Fill != nil {\n\t\treturn *m.Fill\n\t}\n\treturn 0\n}\n\nfunc (m *IteratorOptions) GetFillValue() float64 {\n\tif m != nil && m.FillValue != nil {\n\t\treturn *m.FillValue\n\t}\n\treturn 0\n}\n\nfunc (m *IteratorOptions) GetCondition() string {\n\tif m != nil && m.Condition != nil {\n\t\treturn *m.Condition\n\t}\n\treturn \"\"\n}\n\nfunc (m *IteratorOptions) GetStartTime() int64 {\n\tif m != nil && m.StartTime != nil {\n\t\treturn *m.StartTime\n\t}\n\treturn 0\n}\n\nfunc (m *IteratorOptions) GetEndTime() int64 {\n\tif m != nil && m.EndTime != nil {\n\t\treturn *m.EndTime\n\t}\n\treturn 0\n}\n\nfunc (m *IteratorOptions) GetLocation() string {\n\tif m != nil && m.Location != nil {\n\t\treturn *m.Location\n\t}\n\treturn \"\"\n}\n\nfunc (m *IteratorOptions) GetAscending() bool {\n\tif m != nil && m.Ascending != nil {\n\t\treturn *m.Ascending\n\t}\n\treturn false\n}\n\nfunc (m *IteratorOptions) GetLimit() int64 {\n\tif m != nil && m.Limit != nil {\n\t\treturn *m.Limit\n\t}\n\treturn 0\n}\n\nfunc (m *IteratorOptions) GetOffset() int64 {\n\tif m != nil && m.Offset != nil {\n\t\treturn *m.Offset\n\t}\n\treturn 0\n}\n\nfunc (m *IteratorOptions) GetSLimit() int64 {\n\tif m != nil && m.SLimit != nil {\n\t\treturn *m.SLimit\n\t}\n\treturn 0\n}\n\nfunc (m *IteratorOptions) GetSOffset() int64 {\n\tif m != nil && m.SOffset != nil {\n\t\treturn *m.SOffset\n\t}\n\treturn 0\n}\n\nfunc (m *IteratorOptions) GetDedupe() bool {\n\tif m != nil && m.Dedupe != nil {\n\t\treturn *m.Dedupe\n\t}\n\treturn false\n}\n\nfunc (m *IteratorOptions) GetMaxSeriesN() int64 {\n\tif m != nil && m.MaxSeriesN != nil {\n\t\treturn *m.MaxSeriesN\n\t}\n\treturn 0\n}\n\nfunc (m *IteratorOptions) GetOrdered() bool {\n\tif m != nil && m.Ordered != nil {\n\t\treturn *m.Ordered\n\t}\n\treturn false\n}\n\ntype Measurements struct {\n\tItems            []*Measurement `protobuf:\"bytes,1,rep,name=Items\" json:\"Items,omitempty\"`\n\tXXX_unrecognized []byte         `json:\"-\"`\n}\n\nfunc (m *Measurements) Reset()                    { *m = Measurements{} }\nfunc (m *Measurements) String() string            { return proto.CompactTextString(m) }\nfunc (*Measurements) ProtoMessage()               {}\nfunc (*Measurements) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{3} }\n\nfunc (m *Measurements) GetItems() []*Measurement {\n\tif m != nil {\n\t\treturn m.Items\n\t}\n\treturn nil\n}\n\ntype Measurement struct {\n\tDatabase         *string `protobuf:\"bytes,1,opt,name=Database\" json:\"Database,omitempty\"`\n\tRetentionPolicy  *string `protobuf:\"bytes,2,opt,name=RetentionPolicy\" json:\"RetentionPolicy,omitempty\"`\n\tName             *string `protobuf:\"bytes,3,opt,name=Name\" json:\"Name,omitempty\"`\n\tRegex            *string `protobuf:\"bytes,4,opt,name=Regex\" json:\"Regex,omitempty\"`\n\tIsTarget         *bool   `protobuf:\"varint,5,opt,name=IsTarget\" json:\"IsTarget,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *Measurement) Reset()                    { *m = Measurement{} }\nfunc (m *Measurement) String() string            { return proto.CompactTextString(m) }\nfunc (*Measurement) ProtoMessage()               {}\nfunc (*Measurement) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{4} }\n\nfunc (m *Measurement) GetDatabase() string {\n\tif m != nil && m.Database != nil {\n\t\treturn *m.Database\n\t}\n\treturn \"\"\n}\n\nfunc (m *Measurement) GetRetentionPolicy() string {\n\tif m != nil && m.RetentionPolicy != nil {\n\t\treturn *m.RetentionPolicy\n\t}\n\treturn \"\"\n}\n\nfunc (m *Measurement) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *Measurement) GetRegex() string {\n\tif m != nil && m.Regex != nil {\n\t\treturn *m.Regex\n\t}\n\treturn \"\"\n}\n\nfunc (m *Measurement) GetIsTarget() bool {\n\tif m != nil && m.IsTarget != nil {\n\t\treturn *m.IsTarget\n\t}\n\treturn false\n}\n\ntype Interval struct {\n\tDuration         *int64 `protobuf:\"varint,1,opt,name=Duration\" json:\"Duration,omitempty\"`\n\tOffset           *int64 `protobuf:\"varint,2,opt,name=Offset\" json:\"Offset,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Interval) Reset()                    { *m = Interval{} }\nfunc (m *Interval) String() string            { return proto.CompactTextString(m) }\nfunc (*Interval) ProtoMessage()               {}\nfunc (*Interval) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{5} }\n\nfunc (m *Interval) GetDuration() int64 {\n\tif m != nil && m.Duration != nil {\n\t\treturn *m.Duration\n\t}\n\treturn 0\n}\n\nfunc (m *Interval) GetOffset() int64 {\n\tif m != nil && m.Offset != nil {\n\t\treturn *m.Offset\n\t}\n\treturn 0\n}\n\ntype IteratorStats struct {\n\tSeriesN          *int64 `protobuf:\"varint,1,opt,name=SeriesN\" json:\"SeriesN,omitempty\"`\n\tPointN           *int64 `protobuf:\"varint,2,opt,name=PointN\" json:\"PointN,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *IteratorStats) Reset()                    { *m = IteratorStats{} }\nfunc (m *IteratorStats) String() string            { return proto.CompactTextString(m) }\nfunc (*IteratorStats) ProtoMessage()               {}\nfunc (*IteratorStats) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{6} }\n\nfunc (m *IteratorStats) GetSeriesN() int64 {\n\tif m != nil && m.SeriesN != nil {\n\t\treturn *m.SeriesN\n\t}\n\treturn 0\n}\n\nfunc (m *IteratorStats) GetPointN() int64 {\n\tif m != nil && m.PointN != nil {\n\t\treturn *m.PointN\n\t}\n\treturn 0\n}\n\ntype VarRef struct {\n\tVal              *string `protobuf:\"bytes,1,req,name=Val\" json:\"Val,omitempty\"`\n\tType             *int32  `protobuf:\"varint,2,opt,name=Type\" json:\"Type,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *VarRef) Reset()                    { *m = VarRef{} }\nfunc (m *VarRef) String() string            { return proto.CompactTextString(m) }\nfunc (*VarRef) ProtoMessage()               {}\nfunc (*VarRef) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{7} }\n\nfunc (m *VarRef) GetVal() string {\n\tif m != nil && m.Val != nil {\n\t\treturn *m.Val\n\t}\n\treturn \"\"\n}\n\nfunc (m *VarRef) GetType() int32 {\n\tif m != nil && m.Type != nil {\n\t\treturn *m.Type\n\t}\n\treturn 0\n}\n\nfunc init() {\n\tproto.RegisterType((*Point)(nil), \"influxql.Point\")\n\tproto.RegisterType((*Aux)(nil), \"influxql.Aux\")\n\tproto.RegisterType((*IteratorOptions)(nil), \"influxql.IteratorOptions\")\n\tproto.RegisterType((*Measurements)(nil), \"influxql.Measurements\")\n\tproto.RegisterType((*Measurement)(nil), \"influxql.Measurement\")\n\tproto.RegisterType((*Interval)(nil), \"influxql.Interval\")\n\tproto.RegisterType((*IteratorStats)(nil), \"influxql.IteratorStats\")\n\tproto.RegisterType((*VarRef)(nil), \"influxql.VarRef\")\n}\n\nfunc init() { proto.RegisterFile(\"internal/internal.proto\", fileDescriptorInternal) }\n\nvar fileDescriptorInternal = []byte{\n\t// 737 bytes of a gzipped FileDescriptorProto\n\t0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0xdd, 0x6a, 0xdb, 0x4a,\n\t0x10, 0x46, 0x92, 0xe5, 0x58, 0xeb, 0xf8, 0x24, 0x67, 0x4f, 0x7e, 0x96, 0xc3, 0xe1, 0x54, 0xe8,\n\t0x4a, 0x50, 0xea, 0x40, 0x6e, 0x0b, 0x05, 0xa7, 0x49, 0x8a, 0x21, 0x71, 0xc2, 0x2a, 0xe4, 0x7e,\n\t0x6b, 0x8d, 0xc5, 0x82, 0x2c, 0xb9, 0xab, 0x55, 0x71, 0x1e, 0xa5, 0xcf, 0xd0, 0x87, 0xe9, 0xab,\n\t0xf4, 0x11, 0xca, 0xce, 0x4a, 0x96, 0x92, 0x42, 0x73, 0xa5, 0xf9, 0xbe, 0x99, 0x1d, 0xed, 0xcc,\n\t0x37, 0x3b, 0xe4, 0x54, 0x16, 0x1a, 0x54, 0x21, 0xf2, 0xb3, 0xd6, 0x98, 0x6e, 0x54, 0xa9, 0x4b,\n\t0x3a, 0x92, 0xc5, 0x2a, 0xaf, 0xb7, 0x5f, 0xf2, 0xe8, 0x87, 0x4b, 0xfc, 0xfb, 0x52, 0x16, 0x9a,\n\t0x52, 0x32, 0x58, 0x88, 0x35, 0x30, 0x27, 0x74, 0xe3, 0x80, 0xa3, 0x6d, 0xb8, 0x07, 0x91, 0x55,\n\t0xcc, 0xb5, 0x9c, 0xb1, 0x91, 0x93, 0x6b, 0x60, 0x5e, 0xe8, 0xc6, 0x1e, 0x47, 0x9b, 0x1e, 0x12,\n\t0x6f, 0x21, 0x73, 0x36, 0x08, 0xdd, 0x78, 0xc4, 0x8d, 0x49, 0xdf, 0x10, 0x6f, 0x56, 0x6f, 0x99,\n\t0x1f, 0x7a, 0xf1, 0xf8, 0x7c, 0x32, 0x6d, 0xff, 0x37, 0x9d, 0xd5, 0x5b, 0x6e, 0x3c, 0xf4, 0x7f,\n\t0x42, 0x66, 0x59, 0xa6, 0x20, 0x13, 0x1a, 0x52, 0x36, 0x0c, 0x9d, 0x78, 0xc2, 0x7b, 0x8c, 0xf1,\n\t0x5f, 0xe7, 0xa5, 0xd0, 0x8f, 0x22, 0xaf, 0x81, 0xed, 0x85, 0x4e, 0xec, 0xf0, 0x1e, 0x43, 0x23,\n\t0xb2, 0x3f, 0x2f, 0x34, 0x64, 0xa0, 0x6c, 0xc4, 0x28, 0x74, 0x62, 0x8f, 0x3f, 0xe3, 0x68, 0x48,\n\t0xc6, 0x89, 0x56, 0xb2, 0xc8, 0x6c, 0x48, 0x10, 0x3a, 0x71, 0xc0, 0xfb, 0x94, 0xc9, 0x72, 0x51,\n\t0x96, 0x39, 0x88, 0xc2, 0x86, 0x90, 0xd0, 0x89, 0x47, 0xfc, 0x19, 0x47, 0xdf, 0x11, 0x3f, 0xd1,\n\t0x42, 0x57, 0x6c, 0x1c, 0x3a, 0xf1, 0xf8, 0xfc, 0xb4, 0x2b, 0x66, 0xae, 0x41, 0x09, 0x5d, 0x2a,\n\t0x74, 0x73, 0x1b, 0x15, 0x7d, 0x77, 0xb0, 0x74, 0xfa, 0x2f, 0x19, 0x5d, 0x0a, 0x2d, 0x1e, 0x9e,\n\t0x36, 0xb6, 0xa7, 0x3e, 0xdf, 0xe1, 0x17, 0xc5, 0xb9, 0xaf, 0x16, 0xe7, 0xbd, 0x5e, 0xdc, 0xe0,\n\t0xf5, 0xe2, 0xfc, 0xdf, 0x8b, 0x8b, 0x7e, 0x0e, 0xc8, 0x41, 0x5b, 0xc6, 0xdd, 0x46, 0xcb, 0xb2,\n\t0x40, 0x85, 0xaf, 0xb6, 0x1b, 0xc5, 0x1c, 0x4c, 0x89, 0xb6, 0x51, 0xd8, 0xe8, 0xe9, 0x86, 0x5e,\n\t0x1c, 0x58, 0x01, 0x63, 0x32, 0xbc, 0x96, 0x90, 0xa7, 0x15, 0xfb, 0x1b, 0x45, 0x3e, 0xec, 0xfa,\n\t0xf2, 0x28, 0x14, 0x87, 0x15, 0x6f, 0xfc, 0xf4, 0x8c, 0xec, 0x25, 0x65, 0xad, 0x96, 0x50, 0x31,\n\t0x0f, 0x43, 0x8f, 0xbb, 0xd0, 0x5b, 0x10, 0x55, 0xad, 0x60, 0x0d, 0x85, 0xe6, 0x6d, 0x14, 0x9d,\n\t0x92, 0x91, 0x29, 0x55, 0x7d, 0x15, 0x39, 0xd6, 0x35, 0x3e, 0xa7, 0xbd, 0xa6, 0x37, 0x1e, 0xbe,\n\t0x8b, 0x31, 0xed, 0xbc, 0x94, 0x6b, 0x28, 0x2a, 0x73, 0x7d, 0x9c, 0xb9, 0x80, 0xf7, 0x18, 0xca,\n\t0xc8, 0xde, 0x27, 0x55, 0xd6, 0x9b, 0x8b, 0x27, 0xf6, 0x0f, 0x3a, 0x5b, 0x68, 0x4a, 0xbd, 0x96,\n\t0x79, 0x8e, 0xf3, 0xe7, 0x73, 0xb4, 0xe9, 0x7f, 0x24, 0x30, 0xdf, 0xfe, 0xe0, 0x75, 0x84, 0xf1,\n\t0x7e, 0x2c, 0x8b, 0x54, 0x9a, 0x56, 0xe1, 0xd0, 0x05, 0xbc, 0x23, 0x8c, 0x37, 0xd1, 0x42, 0x69,\n\t0x7c, 0x21, 0x01, 0xaa, 0xd6, 0x11, 0xe6, 0x1e, 0x57, 0x45, 0x8a, 0x3e, 0x82, 0xbe, 0x16, 0x9a,\n\t0x61, 0xb9, 0x29, 0x97, 0x02, 0x93, 0x1e, 0x63, 0xd2, 0x1d, 0x36, 0x39, 0x67, 0xd5, 0x12, 0x8a,\n\t0x54, 0x16, 0x19, 0xce, 0xe0, 0x88, 0x77, 0x04, 0x3d, 0x22, 0xfe, 0x8d, 0x5c, 0x4b, 0xcd, 0xf6,\n\t0x31, 0xa3, 0x05, 0xf4, 0x84, 0x0c, 0xef, 0x56, 0xab, 0x0a, 0x34, 0x9b, 0x20, 0xdd, 0x20, 0xc3,\n\t0x27, 0x36, 0xfc, 0x2f, 0xcb, 0x5b, 0x64, 0x6e, 0x96, 0x34, 0x07, 0x0e, 0xec, 0xcd, 0x92, 0xee,\n\t0xc4, 0x25, 0xa4, 0xf5, 0x06, 0xd8, 0x21, 0xfe, 0xba, 0x41, 0xa6, 0xe7, 0xb7, 0x62, 0x9b, 0x80,\n\t0x92, 0x50, 0x2d, 0x18, 0xc5, 0x43, 0x3d, 0xc6, 0x64, 0xbc, 0x53, 0x29, 0x28, 0x48, 0xd9, 0x11,\n\t0x1e, 0x6c, 0x61, 0xf4, 0x9e, 0xec, 0xf7, 0x54, 0xaf, 0xe8, 0x5b, 0xe2, 0xcf, 0x35, 0xac, 0x2b,\n\t0xe6, 0xfc, 0x69, 0x38, 0x6c, 0x4c, 0xf4, 0xcd, 0x21, 0xe3, 0x1e, 0xdd, 0xbe, 0xb2, 0xcf, 0xa2,\n\t0x82, 0x66, 0x5e, 0x77, 0x98, 0xc6, 0xe4, 0x80, 0x83, 0x86, 0xc2, 0x74, 0xf1, 0xbe, 0xcc, 0xe5,\n\t0xf2, 0x09, 0x9f, 0x5a, 0xc0, 0x5f, 0xd2, 0xbb, 0xdd, 0xe7, 0xd9, 0x89, 0xc7, 0xdd, 0x77, 0x44,\n\t0x7c, 0x0e, 0x19, 0x6c, 0x9b, 0x97, 0x65, 0x81, 0xf9, 0xdf, 0xbc, 0x7a, 0x10, 0x2a, 0x03, 0xdd,\n\t0xbc, 0xa7, 0x1d, 0x8e, 0x3e, 0x74, 0x63, 0x8b, 0xf7, 0xaa, 0x95, 0x15, 0xd4, 0xc1, 0xe6, 0xec,\n\t0x70, 0x4f, 0x1c, 0xb7, 0x2f, 0x4e, 0x34, 0x23, 0x93, 0x67, 0x1b, 0x05, 0x55, 0x69, 0x1a, 0xec,\n\t0x34, 0xaa, 0x34, 0xdd, 0x3d, 0x21, 0x43, 0xdc, 0xda, 0x8b, 0x36, 0x85, 0x45, 0xd1, 0x94, 0x0c,\n\t0xed, 0xe3, 0x33, 0x0f, 0xf6, 0x51, 0xe4, 0xcd, 0x36, 0x37, 0x26, 0x2e, 0x6e, 0xb3, 0x8c, 0x5c,\n\t0x3b, 0xeb, 0xc6, 0xfe, 0x15, 0x00, 0x00, 0xff, 0xff, 0xca, 0x3e, 0x5e, 0x08, 0x22, 0x06, 0x00,\n\t0x00,\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/internal/internal.proto",
    "content": "syntax = \"proto2\";\npackage influxql;\n\nmessage Point {\n    required string Name       = 1;\n    required string Tags       = 2;\n    required int64  Time       = 3;\n    required bool   Nil        = 4;\n    repeated Aux    Aux        = 5;\n    optional uint32 Aggregated = 6;\n\n    optional double FloatValue   = 7;\n    optional int64  IntegerValue = 8;\n    optional string StringValue  = 9;\n    optional bool   BooleanValue = 10;\n\n    optional IteratorStats Stats = 11;\n}\n\nmessage Aux {\n    required int32  DataType     = 1;\n    optional double FloatValue   = 2;\n    optional int64  IntegerValue = 3;\n    optional string StringValue  = 4;\n    optional bool   BooleanValue = 5;\n}\n\nmessage IteratorOptions {\n    optional string      Expr       = 1;\n    repeated string      Aux        = 2;\n    repeated VarRef      Fields     = 17;\n    repeated Measurement Sources    = 3;\n    optional Interval    Interval   = 4;\n    repeated string      Dimensions = 5;\n    repeated string      GroupBy    = 19;\n    optional int32       Fill       = 6;\n    optional double      FillValue  = 7;\n    optional string      Condition  = 8;\n    optional int64       StartTime  = 9;\n    optional int64       EndTime    = 10;\n    optional string      Location   = 21;\n    optional bool        Ascending  = 11;\n    optional int64       Limit      = 12;\n    optional int64       Offset     = 13;\n    optional int64       SLimit     = 14;\n    optional int64       SOffset    = 15;\n    optional bool        Dedupe     = 16;\n    optional int64       MaxSeriesN = 18;\n    optional bool        Ordered    = 20;\n}\n\nmessage Measurements {\n    repeated Measurement Items = 1;\n}\n\nmessage Measurement {\n    optional string Database        = 1;\n    optional string RetentionPolicy = 2;\n    optional string Name            = 3;\n    optional string Regex           = 4;\n    optional bool   IsTarget        = 5;\n}\n\nmessage Interval {\n    optional int64 Duration = 1;\n    optional int64 Offset   = 2;\n}\n\nmessage IteratorStats {\n    optional int64 SeriesN = 1;\n    optional int64 PointN  = 2;\n}\n\nmessage VarRef {\n    required string Val  = 1;\n    optional int32  Type = 2;\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go",
    "content": "// Generated by tmpl\n// https://github.com/benbjohnson/tmpl\n//\n// DO NOT EDIT!\n// Source: iterator.gen.go.tmpl\n\npackage influxql\n\nimport (\n\t\"container/heap\"\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/gogo/protobuf/proto\"\n\tinternal \"github.com/influxdata/influxdb/influxql/internal\"\n)\n\n// DefaultStatsInterval is the default value for IteratorEncoder.StatsInterval.\nconst DefaultStatsInterval = 10 * time.Second\n\n// FloatIterator represents a stream of float points.\ntype FloatIterator interface {\n\tIterator\n\tNext() (*FloatPoint, error)\n}\n\n// newFloatIterators converts a slice of Iterator to a slice of FloatIterator.\n// Drop and closes any iterator in itrs that is not a FloatIterator and cannot\n// be cast to a FloatIterator.\nfunc newFloatIterators(itrs []Iterator) []FloatIterator {\n\ta := make([]FloatIterator, 0, len(itrs))\n\tfor _, itr := range itrs {\n\t\tswitch itr := itr.(type) {\n\t\tcase FloatIterator:\n\t\t\ta = append(a, itr)\n\n\t\tcase IntegerIterator:\n\t\t\ta = append(a, &integerFloatCastIterator{input: itr})\n\n\t\tdefault:\n\t\t\titr.Close()\n\t\t}\n\t}\n\treturn a\n}\n\n// bufFloatIterator represents a buffered FloatIterator.\ntype bufFloatIterator struct {\n\titr FloatIterator\n\tbuf *FloatPoint\n}\n\n// newBufFloatIterator returns a buffered FloatIterator.\nfunc newBufFloatIterator(itr FloatIterator) *bufFloatIterator {\n\treturn &bufFloatIterator{itr: itr}\n}\n\n// Stats returns statistics from the input iterator.\nfunc (itr *bufFloatIterator) Stats() IteratorStats { return itr.itr.Stats() }\n\n// Close closes the underlying iterator.\nfunc (itr *bufFloatIterator) Close() error { return itr.itr.Close() }\n\n// peek returns the next point without removing it from the iterator.\nfunc (itr *bufFloatIterator) peek() (*FloatPoint, error) {\n\tp, err := itr.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\titr.unread(p)\n\treturn p, nil\n}\n\n// peekTime returns the time of the next point.\n// Returns zero time if no more points available.\nfunc (itr *bufFloatIterator) peekTime() (int64, error) {\n\tp, err := itr.peek()\n\tif p == nil || err != nil {\n\t\treturn ZeroTime, err\n\t}\n\treturn p.Time, nil\n}\n\n// Next returns the current buffer, if exists, or calls the underlying iterator.\nfunc (itr *bufFloatIterator) Next() (*FloatPoint, error) {\n\tbuf := itr.buf\n\tif buf != nil {\n\t\titr.buf = nil\n\t\treturn buf, nil\n\t}\n\treturn itr.itr.Next()\n}\n\n// NextInWindow returns the next value if it is between [startTime, endTime).\n// If the next value is outside the range then it is moved to the buffer.\nfunc (itr *bufFloatIterator) NextInWindow(startTime, endTime int64) (*FloatPoint, error) {\n\tv, err := itr.Next()\n\tif v == nil || err != nil {\n\t\treturn nil, err\n\t} else if t := v.Time; t >= endTime || t < startTime {\n\t\titr.unread(v)\n\t\treturn nil, nil\n\t}\n\treturn v, nil\n}\n\n// unread sets v to the buffer. It is read on the next call to Next().\nfunc (itr *bufFloatIterator) unread(v *FloatPoint) { itr.buf = v }\n\n// floatMergeIterator represents an iterator that combines multiple float iterators.\ntype floatMergeIterator struct {\n\tinputs []FloatIterator\n\theap   *floatMergeHeap\n\tinit   bool\n\n\t// Current iterator and window.\n\tcurr   *floatMergeHeapItem\n\twindow struct {\n\t\tname      string\n\t\ttags      string\n\t\tstartTime int64\n\t\tendTime   int64\n\t}\n}\n\n// newFloatMergeIterator returns a new instance of floatMergeIterator.\nfunc newFloatMergeIterator(inputs []FloatIterator, opt IteratorOptions) *floatMergeIterator {\n\titr := &floatMergeIterator{\n\t\tinputs: inputs,\n\t\theap: &floatMergeHeap{\n\t\t\titems: make([]*floatMergeHeapItem, 0, len(inputs)),\n\t\t\topt:   opt,\n\t\t},\n\t}\n\n\t// Initialize heap items.\n\tfor _, input := range inputs {\n\t\t// Wrap in buffer, ignore any inputs without anymore points.\n\t\tbufInput := newBufFloatIterator(input)\n\n\t\t// Append to the heap.\n\t\titr.heap.items = append(itr.heap.items, &floatMergeHeapItem{itr: bufInput})\n\t}\n\n\treturn itr\n}\n\n// Stats returns an aggregation of stats from the underlying iterators.\nfunc (itr *floatMergeIterator) Stats() IteratorStats {\n\tvar stats IteratorStats\n\tfor _, input := range itr.inputs {\n\t\tstats.Add(input.Stats())\n\t}\n\treturn stats\n}\n\n// Close closes the underlying iterators.\nfunc (itr *floatMergeIterator) Close() error {\n\tfor _, input := range itr.inputs {\n\t\tinput.Close()\n\t}\n\titr.curr = nil\n\titr.inputs = nil\n\titr.heap.items = nil\n\treturn nil\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *floatMergeIterator) Next() (*FloatPoint, error) {\n\t// Initialize the heap. This needs to be done lazily on the first call to this iterator\n\t// so that iterator initialization done through the Select() call returns quickly.\n\t// Queries can only be interrupted after the Select() call completes so any operations\n\t// done during iterator creation cannot be interrupted, which is why we do it here\n\t// instead so an interrupt can happen while initializing the heap.\n\tif !itr.init {\n\t\titems := itr.heap.items\n\t\titr.heap.items = make([]*floatMergeHeapItem, 0, len(items))\n\t\tfor _, item := range items {\n\t\t\tif p, err := item.itr.peek(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t} else if p == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\titr.heap.items = append(itr.heap.items, item)\n\t\t}\n\t\theap.Init(itr.heap)\n\t\titr.init = true\n\t}\n\n\tfor {\n\t\t// Retrieve the next iterator if we don't have one.\n\t\tif itr.curr == nil {\n\t\t\tif len(itr.heap.items) == 0 {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t\titr.curr = heap.Pop(itr.heap).(*floatMergeHeapItem)\n\n\t\t\t// Read point and set current window.\n\t\t\tp, err := itr.curr.itr.Next()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ttags := p.Tags.Subset(itr.heap.opt.Dimensions)\n\t\t\titr.window.name, itr.window.tags = p.Name, tags.ID()\n\t\t\titr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time)\n\t\t\treturn p, nil\n\t\t}\n\n\t\t// Read the next point from the current iterator.\n\t\tp, err := itr.curr.itr.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If there are no more points then remove iterator from heap and find next.\n\t\tif p == nil {\n\t\t\titr.curr = nil\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check if the point is inside of our current window.\n\t\tinWindow := true\n\t\tif window := itr.window; window.name != p.Name {\n\t\t\tinWindow = false\n\t\t} else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() {\n\t\t\tinWindow = false\n\t\t} else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime {\n\t\t\tinWindow = false\n\t\t} else if !opt.Ascending && p.Time < window.startTime {\n\t\t\tinWindow = false\n\t\t}\n\n\t\t// If it's outside our window then push iterator back on the heap and find new iterator.\n\t\tif !inWindow {\n\t\t\titr.curr.itr.unread(p)\n\t\t\theap.Push(itr.heap, itr.curr)\n\t\t\titr.curr = nil\n\t\t\tcontinue\n\t\t}\n\n\t\treturn p, nil\n\t}\n}\n\n// floatMergeHeap represents a heap of floatMergeHeapItems.\n// Items are sorted by their next window and then by name/tags.\ntype floatMergeHeap struct {\n\topt   IteratorOptions\n\titems []*floatMergeHeapItem\n}\n\nfunc (h *floatMergeHeap) Len() int      { return len(h.items) }\nfunc (h *floatMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }\nfunc (h *floatMergeHeap) Less(i, j int) bool {\n\tx, err := h.items[i].itr.peek()\n\tif err != nil {\n\t\treturn true\n\t}\n\ty, err := h.items[j].itr.peek()\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tif h.opt.Ascending {\n\t\tif x.Name != y.Name {\n\t\t\treturn x.Name < y.Name\n\t\t} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {\n\t\t\treturn xTags.ID() < yTags.ID()\n\t\t}\n\t} else {\n\t\tif x.Name != y.Name {\n\t\t\treturn x.Name > y.Name\n\t\t} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {\n\t\t\treturn xTags.ID() > yTags.ID()\n\t\t}\n\t}\n\n\txt, _ := h.opt.Window(x.Time)\n\tyt, _ := h.opt.Window(y.Time)\n\n\tif h.opt.Ascending {\n\t\treturn xt < yt\n\t}\n\treturn xt > yt\n}\n\nfunc (h *floatMergeHeap) Push(x interface{}) {\n\th.items = append(h.items, x.(*floatMergeHeapItem))\n}\n\nfunc (h *floatMergeHeap) Pop() interface{} {\n\told := h.items\n\tn := len(old)\n\titem := old[n-1]\n\th.items = old[0 : n-1]\n\treturn item\n}\n\ntype floatMergeHeapItem struct {\n\titr *bufFloatIterator\n}\n\n// floatSortedMergeIterator is an iterator that sorts and merges multiple iterators into one.\ntype floatSortedMergeIterator struct {\n\tinputs []FloatIterator\n\theap   *floatSortedMergeHeap\n\tinit   bool\n}\n\n// newFloatSortedMergeIterator returns an instance of floatSortedMergeIterator.\nfunc newFloatSortedMergeIterator(inputs []FloatIterator, opt IteratorOptions) Iterator {\n\titr := &floatSortedMergeIterator{\n\t\tinputs: inputs,\n\t\theap: &floatSortedMergeHeap{\n\t\t\titems: make([]*floatSortedMergeHeapItem, 0, len(inputs)),\n\t\t\topt:   opt,\n\t\t},\n\t}\n\n\t// Initialize heap items.\n\tfor _, input := range inputs {\n\t\t// Append to the heap.\n\t\titr.heap.items = append(itr.heap.items, &floatSortedMergeHeapItem{itr: input})\n\t}\n\n\treturn itr\n}\n\n// Stats returns an aggregation of stats from the underlying iterators.\nfunc (itr *floatSortedMergeIterator) Stats() IteratorStats {\n\tvar stats IteratorStats\n\tfor _, input := range itr.inputs {\n\t\tstats.Add(input.Stats())\n\t}\n\treturn stats\n}\n\n// Close closes the underlying iterators.\nfunc (itr *floatSortedMergeIterator) Close() error {\n\tfor _, input := range itr.inputs {\n\t\tinput.Close()\n\t}\n\treturn nil\n}\n\n// Next returns the next points from the iterator.\nfunc (itr *floatSortedMergeIterator) Next() (*FloatPoint, error) { return itr.pop() }\n\n// pop returns the next point from the heap.\n// Reads the next point from item's cursor and puts it back on the heap.\nfunc (itr *floatSortedMergeIterator) pop() (*FloatPoint, error) {\n\t// Initialize the heap. See the MergeIterator to see why this has to be done lazily.\n\tif !itr.init {\n\t\titems := itr.heap.items\n\t\titr.heap.items = make([]*floatSortedMergeHeapItem, 0, len(items))\n\t\tfor _, item := range items {\n\t\t\tvar err error\n\t\t\tif item.point, err = item.itr.Next(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t} else if item.point == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\titr.heap.items = append(itr.heap.items, item)\n\t\t}\n\t\theap.Init(itr.heap)\n\t\titr.init = true\n\t}\n\n\tif len(itr.heap.items) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// Read the next item from the heap.\n\titem := heap.Pop(itr.heap).(*floatSortedMergeHeapItem)\n\tif item.err != nil {\n\t\treturn nil, item.err\n\t} else if item.point == nil {\n\t\treturn nil, nil\n\t}\n\n\t// Copy the point for return.\n\tp := item.point.Clone()\n\n\t// Read the next item from the cursor. Push back to heap if one exists.\n\tif item.point, item.err = item.itr.Next(); item.point != nil {\n\t\theap.Push(itr.heap, item)\n\t}\n\n\treturn p, nil\n}\n\n// floatSortedMergeHeap represents a heap of floatSortedMergeHeapItems.\ntype floatSortedMergeHeap struct {\n\topt   IteratorOptions\n\titems []*floatSortedMergeHeapItem\n}\n\nfunc (h *floatSortedMergeHeap) Len() int      { return len(h.items) }\nfunc (h *floatSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }\nfunc (h *floatSortedMergeHeap) Less(i, j int) bool {\n\tx, y := h.items[i].point, h.items[j].point\n\n\tif h.opt.Ascending {\n\t\tif x.Name != y.Name {\n\t\t\treturn x.Name < y.Name\n\t\t} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {\n\t\t\treturn xTags.ID() < yTags.ID()\n\t\t}\n\t\treturn x.Time < y.Time\n\t}\n\n\tif x.Name != y.Name {\n\t\treturn x.Name > y.Name\n\t} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {\n\t\treturn xTags.ID() > yTags.ID()\n\t}\n\treturn x.Time > y.Time\n}\n\nfunc (h *floatSortedMergeHeap) Push(x interface{}) {\n\th.items = append(h.items, x.(*floatSortedMergeHeapItem))\n}\n\nfunc (h *floatSortedMergeHeap) Pop() interface{} {\n\told := h.items\n\tn := len(old)\n\titem := old[n-1]\n\th.items = old[0 : n-1]\n\treturn item\n}\n\ntype floatSortedMergeHeapItem struct {\n\tpoint *FloatPoint\n\terr   error\n\titr   FloatIterator\n}\n\n// floatParallelIterator represents an iterator that pulls data in a separate goroutine.\ntype floatParallelIterator struct {\n\tinput FloatIterator\n\tch    chan floatPointError\n\n\tonce    sync.Once\n\tclosing chan struct{}\n\twg      sync.WaitGroup\n}\n\n// newFloatParallelIterator returns a new instance of floatParallelIterator.\nfunc newFloatParallelIterator(input FloatIterator) *floatParallelIterator {\n\titr := &floatParallelIterator{\n\t\tinput:   input,\n\t\tch:      make(chan floatPointError, 256),\n\t\tclosing: make(chan struct{}),\n\t}\n\titr.wg.Add(1)\n\tgo itr.monitor()\n\treturn itr\n}\n\n// Stats returns stats from the underlying iterator.\nfunc (itr *floatParallelIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the underlying iterators.\nfunc (itr *floatParallelIterator) Close() error {\n\titr.once.Do(func() { close(itr.closing) })\n\titr.wg.Wait()\n\treturn itr.input.Close()\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *floatParallelIterator) Next() (*FloatPoint, error) {\n\tv, ok := <-itr.ch\n\tif !ok {\n\t\treturn nil, io.EOF\n\t}\n\treturn v.point, v.err\n}\n\n// monitor runs in a separate goroutine and actively pulls the next point.\nfunc (itr *floatParallelIterator) monitor() {\n\tdefer close(itr.ch)\n\tdefer itr.wg.Done()\n\n\tfor {\n\t\t// Read next point.\n\t\tp, err := itr.input.Next()\n\t\tif p != nil {\n\t\t\tp = p.Clone()\n\t\t}\n\n\t\tselect {\n\t\tcase <-itr.closing:\n\t\t\treturn\n\t\tcase itr.ch <- floatPointError{point: p, err: err}:\n\t\t}\n\t}\n}\n\ntype floatPointError struct {\n\tpoint *FloatPoint\n\terr   error\n}\n\n// floatLimitIterator represents an iterator that limits points per group.\ntype floatLimitIterator struct {\n\tinput FloatIterator\n\topt   IteratorOptions\n\tn     int\n\n\tprev struct {\n\t\tname string\n\t\ttags Tags\n\t}\n}\n\n// newFloatLimitIterator returns a new instance of floatLimitIterator.\nfunc newFloatLimitIterator(input FloatIterator, opt IteratorOptions) *floatLimitIterator {\n\treturn &floatLimitIterator{\n\t\tinput: input,\n\t\topt:   opt,\n\t}\n}\n\n// Stats returns stats from the underlying iterator.\nfunc (itr *floatLimitIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the underlying iterators.\nfunc (itr *floatLimitIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next point from the iterator.\nfunc (itr *floatLimitIterator) Next() (*FloatPoint, error) {\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif p == nil || err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Reset window and counter if a new window is encountered.\n\t\tif p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) {\n\t\t\titr.prev.name = p.Name\n\t\t\titr.prev.tags = p.Tags\n\t\t\titr.n = 0\n\t\t}\n\n\t\t// Increment counter.\n\t\titr.n++\n\n\t\t// Read next point if not beyond the offset.\n\t\tif itr.n <= itr.opt.Offset {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Read next point if we're beyond the limit.\n\t\tif itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn p, nil\n\t}\n}\n\ntype floatFillIterator struct {\n\tinput     *bufFloatIterator\n\tprev      FloatPoint\n\tstartTime int64\n\tendTime   int64\n\tauxFields []interface{}\n\tinit      bool\n\topt       IteratorOptions\n\n\twindow struct {\n\t\tname   string\n\t\ttags   Tags\n\t\ttime   int64\n\t\toffset int64\n\t}\n}\n\nfunc newFloatFillIterator(input FloatIterator, expr Expr, opt IteratorOptions) *floatFillIterator {\n\tif opt.Fill == NullFill {\n\t\tif expr, ok := expr.(*Call); ok && expr.Name == \"count\" {\n\t\t\topt.Fill = NumberFill\n\t\t\topt.FillValue = float64(0)\n\t\t}\n\t}\n\n\tvar startTime, endTime int64\n\tif opt.Ascending {\n\t\tstartTime, _ = opt.Window(opt.StartTime)\n\t\tendTime, _ = opt.Window(opt.EndTime)\n\t} else {\n\t\tstartTime, _ = opt.Window(opt.EndTime)\n\t\tendTime, _ = opt.Window(opt.StartTime)\n\t}\n\n\tvar auxFields []interface{}\n\tif len(opt.Aux) > 0 {\n\t\tauxFields = make([]interface{}, len(opt.Aux))\n\t}\n\n\treturn &floatFillIterator{\n\t\tinput:     newBufFloatIterator(input),\n\t\tprev:      FloatPoint{Nil: true},\n\t\tstartTime: startTime,\n\t\tendTime:   endTime,\n\t\tauxFields: auxFields,\n\t\topt:       opt,\n\t}\n}\n\nfunc (itr *floatFillIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *floatFillIterator) Close() error         { return itr.input.Close() }\n\nfunc (itr *floatFillIterator) Next() (*FloatPoint, error) {\n\tif !itr.init {\n\t\tp, err := itr.input.peek()\n\t\tif p == nil || err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\titr.window.name, itr.window.tags = p.Name, p.Tags\n\t\titr.window.time = itr.startTime\n\t\tif itr.opt.Location != nil {\n\t\t\t_, itr.window.offset = itr.opt.Zone(itr.window.time)\n\t\t}\n\t\titr.init = true\n\t}\n\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check if the next point is outside of our window or is nil.\n\tfor p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() {\n\t\t// If we are inside of an interval, unread the point and continue below to\n\t\t// constructing a new point.\n\t\tif itr.opt.Ascending {\n\t\t\tif itr.window.time <= itr.endTime {\n\t\t\t\titr.input.unread(p)\n\t\t\t\tp = nil\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tif itr.window.time >= itr.endTime {\n\t\t\t\titr.input.unread(p)\n\t\t\t\tp = nil\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// We are *not* in a current interval. If there is no next point,\n\t\t// we are at the end of all intervals.\n\t\tif p == nil {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\t// Set the new interval.\n\t\titr.window.name, itr.window.tags = p.Name, p.Tags\n\t\titr.window.time = itr.startTime\n\t\tif itr.opt.Location != nil {\n\t\t\t_, itr.window.offset = itr.opt.Zone(itr.window.time)\n\t\t}\n\t\titr.prev = FloatPoint{Nil: true}\n\t\tbreak\n\t}\n\n\t// Check if the point is our next expected point.\n\tif p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) {\n\t\tif p != nil {\n\t\t\titr.input.unread(p)\n\t\t}\n\n\t\tp = &FloatPoint{\n\t\t\tName: itr.window.name,\n\t\t\tTags: itr.window.tags,\n\t\t\tTime: itr.window.time,\n\t\t\tAux:  itr.auxFields,\n\t\t}\n\n\t\tswitch itr.opt.Fill {\n\t\tcase LinearFill:\n\t\t\tif !itr.prev.Nil {\n\t\t\t\tnext, err := itr.input.peek()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t} else if next != nil && next.Name == itr.window.name && next.Tags.ID() == itr.window.tags.ID() {\n\t\t\t\t\tinterval := int64(itr.opt.Interval.Duration)\n\t\t\t\t\tstart := itr.window.time / interval\n\t\t\t\t\tp.Value = linearFloat(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value)\n\t\t\t\t} else {\n\t\t\t\t\tp.Nil = true\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tp.Nil = true\n\t\t\t}\n\n\t\tcase NullFill:\n\t\t\tp.Nil = true\n\t\tcase NumberFill:\n\t\t\tp.Value = castToFloat(itr.opt.FillValue)\n\t\tcase PreviousFill:\n\t\t\tif !itr.prev.Nil {\n\t\t\t\tp.Value = itr.prev.Value\n\t\t\t\tp.Nil = itr.prev.Nil\n\t\t\t} else {\n\t\t\t\tp.Nil = true\n\t\t\t}\n\t\t}\n\t} else {\n\t\titr.prev = *p\n\t}\n\n\t// Advance the expected time. Do not advance to a new window here\n\t// as there may be lingering points with the same timestamp in the previous\n\t// window.\n\tif itr.opt.Ascending {\n\t\titr.window.time += int64(itr.opt.Interval.Duration)\n\t} else {\n\t\titr.window.time -= int64(itr.opt.Interval.Duration)\n\t}\n\n\t// Check to see if we have passed over an offset change and adjust the time\n\t// to account for this new offset.\n\tif itr.opt.Location != nil {\n\t\tif _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset {\n\t\t\tdiff := itr.window.offset - offset\n\t\t\tif abs(diff) < int64(itr.opt.Interval.Duration) {\n\t\t\t\titr.window.time += diff\n\t\t\t}\n\t\t\titr.window.offset = offset\n\t\t}\n\t}\n\treturn p, nil\n}\n\n// floatIntervalIterator represents a float implementation of IntervalIterator.\ntype floatIntervalIterator struct {\n\tinput FloatIterator\n\topt   IteratorOptions\n}\n\nfunc newFloatIntervalIterator(input FloatIterator, opt IteratorOptions) *floatIntervalIterator {\n\treturn &floatIntervalIterator{input: input, opt: opt}\n}\n\nfunc (itr *floatIntervalIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *floatIntervalIterator) Close() error         { return itr.input.Close() }\n\nfunc (itr *floatIntervalIterator) Next() (*FloatPoint, error) {\n\tp, err := itr.input.Next()\n\tif p == nil || err != nil {\n\t\treturn nil, err\n\t}\n\tp.Time, _ = itr.opt.Window(p.Time)\n\t// If we see the minimum allowable time, set the time to zero so we don't\n\t// break the default returned time for aggregate queries without times.\n\tif p.Time == MinTime {\n\t\tp.Time = 0\n\t}\n\treturn p, nil\n}\n\n// floatInterruptIterator represents a float implementation of InterruptIterator.\ntype floatInterruptIterator struct {\n\tinput   FloatIterator\n\tclosing <-chan struct{}\n\tcount   int\n}\n\nfunc newFloatInterruptIterator(input FloatIterator, closing <-chan struct{}) *floatInterruptIterator {\n\treturn &floatInterruptIterator{input: input, closing: closing}\n}\n\nfunc (itr *floatInterruptIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *floatInterruptIterator) Close() error         { return itr.input.Close() }\n\nfunc (itr *floatInterruptIterator) Next() (*FloatPoint, error) {\n\t// Only check if the channel is closed every N points. This\n\t// intentionally checks on both 0 and N so that if the iterator\n\t// has been interrupted before the first point is emitted it will\n\t// not emit any points.\n\tif itr.count&0xFF == 0xFF {\n\t\tselect {\n\t\tcase <-itr.closing:\n\t\t\treturn nil, itr.Close()\n\t\tdefault:\n\t\t\t// Reset iterator count to zero and fall through to emit the next point.\n\t\t\titr.count = 0\n\t\t}\n\t}\n\n\t// Increment the counter for every point read.\n\titr.count++\n\treturn itr.input.Next()\n}\n\n// floatCloseInterruptIterator represents a float implementation of CloseInterruptIterator.\ntype floatCloseInterruptIterator struct {\n\tinput   FloatIterator\n\tclosing <-chan struct{}\n\tdone    chan struct{}\n\tonce    sync.Once\n}\n\nfunc newFloatCloseInterruptIterator(input FloatIterator, closing <-chan struct{}) *floatCloseInterruptIterator {\n\titr := &floatCloseInterruptIterator{\n\t\tinput:   input,\n\t\tclosing: closing,\n\t\tdone:    make(chan struct{}),\n\t}\n\tgo itr.monitor()\n\treturn itr\n}\n\nfunc (itr *floatCloseInterruptIterator) monitor() {\n\tselect {\n\tcase <-itr.closing:\n\t\titr.Close()\n\tcase <-itr.done:\n\t}\n}\n\nfunc (itr *floatCloseInterruptIterator) Stats() IteratorStats {\n\treturn itr.input.Stats()\n}\n\nfunc (itr *floatCloseInterruptIterator) Close() error {\n\titr.once.Do(func() {\n\t\tclose(itr.done)\n\t\titr.input.Close()\n\t})\n\treturn nil\n}\n\nfunc (itr *floatCloseInterruptIterator) Next() (*FloatPoint, error) {\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\t// Check if the iterator was closed.\n\t\tselect {\n\t\tcase <-itr.done:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn p, nil\n}\n\n// auxFloatPoint represents a combination of a point and an error for the AuxIterator.\ntype auxFloatPoint struct {\n\tpoint *FloatPoint\n\terr   error\n}\n\n// floatAuxIterator represents a float implementation of AuxIterator.\ntype floatAuxIterator struct {\n\tinput      *bufFloatIterator\n\toutput     chan auxFloatPoint\n\tfields     *auxIteratorFields\n\tbackground bool\n}\n\nfunc newFloatAuxIterator(input FloatIterator, opt IteratorOptions) *floatAuxIterator {\n\treturn &floatAuxIterator{\n\t\tinput:  newBufFloatIterator(input),\n\t\toutput: make(chan auxFloatPoint, 1),\n\t\tfields: newAuxIteratorFields(opt),\n\t}\n}\n\nfunc (itr *floatAuxIterator) Background() {\n\titr.background = true\n\titr.Start()\n\tgo DrainIterator(itr)\n}\n\nfunc (itr *floatAuxIterator) Start()               { go itr.stream() }\nfunc (itr *floatAuxIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *floatAuxIterator) Close() error         { return itr.input.Close() }\nfunc (itr *floatAuxIterator) Next() (*FloatPoint, error) {\n\tp := <-itr.output\n\treturn p.point, p.err\n}\nfunc (itr *floatAuxIterator) Iterator(name string, typ DataType) Iterator {\n\treturn itr.fields.iterator(name, typ)\n}\n\nfunc (itr *floatAuxIterator) stream() {\n\tfor {\n\t\t// Read next point.\n\t\tp, err := itr.input.Next()\n\t\tif err != nil {\n\t\t\titr.output <- auxFloatPoint{err: err}\n\t\t\titr.fields.sendError(err)\n\t\t\tbreak\n\t\t} else if p == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Send point to output and to each field iterator.\n\t\titr.output <- auxFloatPoint{point: p}\n\t\tif ok := itr.fields.send(p); !ok && itr.background {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tclose(itr.output)\n\titr.fields.close()\n}\n\n// floatChanIterator represents a new instance of floatChanIterator.\ntype floatChanIterator struct {\n\tbuf struct {\n\t\ti      int\n\t\tfilled bool\n\t\tpoints [2]FloatPoint\n\t}\n\terr  error\n\tcond *sync.Cond\n\tdone bool\n}\n\nfunc (itr *floatChanIterator) Stats() IteratorStats { return IteratorStats{} }\n\nfunc (itr *floatChanIterator) Close() error {\n\titr.cond.L.Lock()\n\t// Mark the channel iterator as done and signal all waiting goroutines to start again.\n\titr.done = true\n\titr.cond.Broadcast()\n\t// Do not defer the unlock so we don't create an unnecessary allocation.\n\titr.cond.L.Unlock()\n\treturn nil\n}\n\nfunc (itr *floatChanIterator) setBuf(name string, tags Tags, time int64, value interface{}) bool {\n\titr.cond.L.Lock()\n\tdefer itr.cond.L.Unlock()\n\n\t// Wait for either the iterator to be done (so we don't have to set the value)\n\t// or for the buffer to have been read and ready for another write.\n\tfor !itr.done && itr.buf.filled {\n\t\titr.cond.Wait()\n\t}\n\n\t// Do not set the value and return false to signal that the iterator is closed.\n\t// Do this after the above wait as the above for loop may have exited because\n\t// the iterator was closed.\n\tif itr.done {\n\t\treturn false\n\t}\n\n\tswitch v := value.(type) {\n\tcase float64:\n\t\titr.buf.points[itr.buf.i] = FloatPoint{Name: name, Tags: tags, Time: time, Value: v}\n\n\tcase int64:\n\t\titr.buf.points[itr.buf.i] = FloatPoint{Name: name, Tags: tags, Time: time, Value: float64(v)}\n\n\tdefault:\n\t\titr.buf.points[itr.buf.i] = FloatPoint{Name: name, Tags: tags, Time: time, Nil: true}\n\t}\n\titr.buf.filled = true\n\n\t// Signal to all waiting goroutines that a new value is ready to read.\n\titr.cond.Signal()\n\treturn true\n}\n\nfunc (itr *floatChanIterator) setErr(err error) {\n\titr.cond.L.Lock()\n\tdefer itr.cond.L.Unlock()\n\titr.err = err\n\n\t// Signal to all waiting goroutines that a new value is ready to read.\n\titr.cond.Signal()\n}\n\nfunc (itr *floatChanIterator) Next() (*FloatPoint, error) {\n\titr.cond.L.Lock()\n\tdefer itr.cond.L.Unlock()\n\n\t// Check for an error and return one if there.\n\tif itr.err != nil {\n\t\treturn nil, itr.err\n\t}\n\n\t// Wait until either a value is available in the buffer or\n\t// the iterator is closed.\n\tfor !itr.done && !itr.buf.filled {\n\t\titr.cond.Wait()\n\t}\n\n\t// Return nil once the channel is done and the buffer is empty.\n\tif itr.done && !itr.buf.filled {\n\t\treturn nil, nil\n\t}\n\n\t// Always read from the buffer if it exists, even if the iterator\n\t// is closed. This prevents the last value from being truncated by\n\t// the parent iterator.\n\tp := &itr.buf.points[itr.buf.i]\n\titr.buf.i = (itr.buf.i + 1) % len(itr.buf.points)\n\titr.buf.filled = false\n\titr.cond.Signal()\n\treturn p, nil\n}\n\n// floatReduceFloatIterator executes a reducer for every interval and buffers the result.\ntype floatReduceFloatIterator struct {\n\tinput    *bufFloatIterator\n\tcreate   func() (FloatPointAggregator, FloatPointEmitter)\n\tdims     []string\n\topt      IteratorOptions\n\tpoints   []FloatPoint\n\tkeepTags bool\n}\n\nfunc newFloatReduceFloatIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, FloatPointEmitter)) *floatReduceFloatIterator {\n\treturn &floatReduceFloatIterator{\n\t\tinput:  newBufFloatIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *floatReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *floatReduceFloatIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *floatReduceFloatIterator) Next() (*FloatPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// floatReduceFloatPoint stores the reduced data for a name/tag combination.\ntype floatReduceFloatPoint struct {\n\tName       string\n\tTags       Tags\n\tAggregator FloatPointAggregator\n\tEmitter    FloatPointEmitter\n}\n\n// reduce executes fn once for every point in the next window.\n// The previous value for the dimension is passed to fn.\nfunc (itr *floatReduceFloatIterator) reduce() ([]FloatPoint, error) {\n\t// Calculate next window.\n\tvar (\n\t\tstartTime, endTime int64\n\t\twindow             struct {\n\t\t\tname string\n\t\t\ttags string\n\t\t}\n\t)\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t} else if p.Nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Unread the point so it can be processed.\n\t\titr.input.unread(p)\n\t\tstartTime, endTime = itr.opt.Window(p.Time)\n\t\twindow.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()\n\t\tbreak\n\t}\n\n\t// Create points by tags.\n\tm := make(map[string]*floatReduceFloatPoint)\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.NextInWindow(startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr == nil {\n\t\t\tbreak\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t} else if curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Ensure this point is within the same final window.\n\t\tif curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Retrieve the tags on this point for this level of the query.\n\t\t// This may be different than the bucket dimensions.\n\t\ttags := curr.Tags.Subset(itr.dims)\n\t\tid := tags.ID()\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &floatReduceFloatPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\tm[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateFloat(curr)\n\t}\n\n\t// Reverse sort points by name & tag if our output is supposed to be ordered.\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tif len(keys) > 1 && itr.opt.Ordered {\n\t\tsort.Sort(reverseStringSlice(keys))\n\t}\n\n\t// Assume the points are already sorted until proven otherwise.\n\tsortedByTime := true\n\t// Emit the points for each name & tag combination.\n\ta := make([]FloatPoint, 0, len(m))\n\tfor _, k := range keys {\n\t\trp := m[k]\n\t\tpoints := rp.Emitter.Emit()\n\t\tfor i := len(points) - 1; i >= 0; i-- {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tif !itr.keepTags {\n\t\t\t\tpoints[i].Tags = rp.Tags\n\t\t\t}\n\t\t\t// Set the points time to the interval time if the reducer didn't provide one.\n\t\t\tif points[i].Time == ZeroTime {\n\t\t\t\tpoints[i].Time = startTime\n\t\t\t} else {\n\t\t\t\tsortedByTime = false\n\t\t\t}\n\t\t\ta = append(a, points[i])\n\t\t}\n\t}\n\n\t// Points may be out of order. Perform a stable sort by time if requested.\n\tif !sortedByTime && itr.opt.Ordered {\n\t\tsort.Stable(sort.Reverse(floatPointsByTime(a)))\n\t}\n\n\treturn a, nil\n}\n\n// floatStreamFloatIterator streams inputs into the iterator and emits points gradually.\ntype floatStreamFloatIterator struct {\n\tinput  *bufFloatIterator\n\tcreate func() (FloatPointAggregator, FloatPointEmitter)\n\tdims   []string\n\topt    IteratorOptions\n\tm      map[string]*floatReduceFloatPoint\n\tpoints []FloatPoint\n}\n\n// newFloatStreamFloatIterator returns a new instance of floatStreamFloatIterator.\nfunc newFloatStreamFloatIterator(input FloatIterator, createFn func() (FloatPointAggregator, FloatPointEmitter), opt IteratorOptions) *floatStreamFloatIterator {\n\treturn &floatStreamFloatIterator{\n\t\tinput:  newBufFloatIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t\tm:      make(map[string]*floatReduceFloatPoint),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *floatStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *floatStreamFloatIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next value for the stream iterator.\nfunc (itr *floatStreamFloatIterator) Next() (*FloatPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// reduce creates and manages aggregators for every point from the input.\n// After aggregating a point, it always tries to emit a value using the emitter.\nfunc (itr *floatStreamFloatIterator) reduce() ([]FloatPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.Next()\n\t\tif curr == nil {\n\t\t\t// Close all of the aggregators to flush any remaining points to emit.\n\t\t\tvar points []FloatPoint\n\t\t\tfor _, rp := range itr.m {\n\t\t\t\tif aggregator, ok := rp.Aggregator.(io.Closer); ok {\n\t\t\t\t\tif err := aggregator.Close(); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\tpts := rp.Emitter.Emit()\n\t\t\t\t\tif len(pts) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor i := range pts {\n\t\t\t\t\t\tpts[i].Name = rp.Name\n\t\t\t\t\t\tpts[i].Tags = rp.Tags\n\t\t\t\t\t}\n\t\t\t\t\tpoints = append(points, pts...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Eliminate the aggregators and emitters.\n\t\t\titr.m = nil\n\t\t\treturn points, nil\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t}\n\t\ttags := curr.Tags.Subset(itr.dims)\n\n\t\tid := curr.Name\n\t\tif len(tags.m) > 0 {\n\t\t\tid += \"\\x00\" + tags.ID()\n\t\t}\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := itr.m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &floatReduceFloatPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\titr.m[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateFloat(curr)\n\n\t\t// Attempt to emit points from the aggregator.\n\t\tpoints := rp.Emitter.Emit()\n\t\tif len(points) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := range points {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tpoints[i].Tags = rp.Tags\n\t\t}\n\t\treturn points, nil\n\t}\n}\n\n// floatExprIterator executes a function to modify an existing point\n// for every output of the input iterator.\ntype floatExprIterator struct {\n\tleft      *bufFloatIterator\n\tright     *bufFloatIterator\n\tfn        floatExprFunc\n\tpoints    []FloatPoint // must be size 2\n\tstorePrev bool\n}\n\nfunc newFloatExprIterator(left, right FloatIterator, opt IteratorOptions, fn func(a, b float64) float64) *floatExprIterator {\n\tvar points []FloatPoint\n\tswitch opt.Fill {\n\tcase NullFill, PreviousFill:\n\t\tpoints = []FloatPoint{{Nil: true}, {Nil: true}}\n\tcase NumberFill:\n\t\tvalue := castToFloat(opt.FillValue)\n\t\tpoints = []FloatPoint{{Value: value}, {Value: value}}\n\t}\n\treturn &floatExprIterator{\n\t\tleft:      newBufFloatIterator(left),\n\t\tright:     newBufFloatIterator(right),\n\t\tpoints:    points,\n\t\tfn:        fn,\n\t\tstorePrev: opt.Fill == PreviousFill,\n\t}\n}\n\nfunc (itr *floatExprIterator) Stats() IteratorStats {\n\tstats := itr.left.Stats()\n\tstats.Add(itr.right.Stats())\n\treturn stats\n}\n\nfunc (itr *floatExprIterator) Close() error {\n\titr.left.Close()\n\titr.right.Close()\n\treturn nil\n}\n\nfunc (itr *floatExprIterator) Next() (*FloatPoint, error) {\n\tfor {\n\t\ta, b, err := itr.next()\n\t\tif err != nil || (a == nil && b == nil) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If any of these are nil and we are using fill(none), skip these points.\n\t\tif (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If one of the two points is nil, we need to fill it with a fake nil\n\t\t// point that has the same name, tags, and time as the other point.\n\t\t// There should never be a time when both of these are nil.\n\t\tif a == nil {\n\t\t\tp := *b\n\t\t\ta = &p\n\t\t\ta.Value = 0\n\t\t\ta.Nil = true\n\t\t} else if b == nil {\n\t\t\tp := *a\n\t\t\tb = &p\n\t\t\tb.Value = 0\n\t\t\tb.Nil = true\n\t\t}\n\n\t\t// If a value is nil, use the fill values if the fill value is non-nil.\n\t\tif a.Nil && !itr.points[0].Nil {\n\t\t\ta.Value = itr.points[0].Value\n\t\t\ta.Nil = false\n\t\t}\n\t\tif b.Nil && !itr.points[1].Nil {\n\t\t\tb.Value = itr.points[1].Value\n\t\t\tb.Nil = false\n\t\t}\n\n\t\tif itr.storePrev {\n\t\t\titr.points[0], itr.points[1] = *a, *b\n\t\t}\n\n\t\tif a.Nil {\n\t\t\treturn a, nil\n\t\t} else if b.Nil {\n\t\t\treturn b, nil\n\t\t}\n\t\ta.Value = itr.fn(a.Value, b.Value)\n\t\treturn a, nil\n\n\t}\n}\n\n// next returns the next points within each iterator. If the iterators are\n// uneven, it organizes them so only matching points are returned.\nfunc (itr *floatExprIterator) next() (a, b *FloatPoint, err error) {\n\t// Retrieve the next value for both the left and right.\n\ta, err = itr.left.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tb, err = itr.right.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// If we have a point from both, make sure that they match each other.\n\tif a != nil && b != nil {\n\t\tif a.Name > b.Name {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Name < b.Name {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if ltags < rtags {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif a.Time > b.Time {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Time < b.Time {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\t}\n\treturn a, b, nil\n}\n\n// floatExprFunc creates or modifies a point by combining two\n// points. The point passed in may be modified and returned rather than\n// allocating a new point if possible. One of the points may be nil, but at\n// least one of the points will be non-nil.\ntype floatExprFunc func(a, b float64) float64\n\n// floatReduceIntegerIterator executes a reducer for every interval and buffers the result.\ntype floatReduceIntegerIterator struct {\n\tinput    *bufFloatIterator\n\tcreate   func() (FloatPointAggregator, IntegerPointEmitter)\n\tdims     []string\n\topt      IteratorOptions\n\tpoints   []IntegerPoint\n\tkeepTags bool\n}\n\nfunc newFloatReduceIntegerIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, IntegerPointEmitter)) *floatReduceIntegerIterator {\n\treturn &floatReduceIntegerIterator{\n\t\tinput:  newBufFloatIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *floatReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *floatReduceIntegerIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *floatReduceIntegerIterator) Next() (*IntegerPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// floatReduceIntegerPoint stores the reduced data for a name/tag combination.\ntype floatReduceIntegerPoint struct {\n\tName       string\n\tTags       Tags\n\tAggregator FloatPointAggregator\n\tEmitter    IntegerPointEmitter\n}\n\n// reduce executes fn once for every point in the next window.\n// The previous value for the dimension is passed to fn.\nfunc (itr *floatReduceIntegerIterator) reduce() ([]IntegerPoint, error) {\n\t// Calculate next window.\n\tvar (\n\t\tstartTime, endTime int64\n\t\twindow             struct {\n\t\t\tname string\n\t\t\ttags string\n\t\t}\n\t)\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t} else if p.Nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Unread the point so it can be processed.\n\t\titr.input.unread(p)\n\t\tstartTime, endTime = itr.opt.Window(p.Time)\n\t\twindow.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()\n\t\tbreak\n\t}\n\n\t// Create points by tags.\n\tm := make(map[string]*floatReduceIntegerPoint)\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.NextInWindow(startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr == nil {\n\t\t\tbreak\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t} else if curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Ensure this point is within the same final window.\n\t\tif curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Retrieve the tags on this point for this level of the query.\n\t\t// This may be different than the bucket dimensions.\n\t\ttags := curr.Tags.Subset(itr.dims)\n\t\tid := tags.ID()\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &floatReduceIntegerPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\tm[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateFloat(curr)\n\t}\n\n\t// Reverse sort points by name & tag if our output is supposed to be ordered.\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tif len(keys) > 1 && itr.opt.Ordered {\n\t\tsort.Sort(reverseStringSlice(keys))\n\t}\n\n\t// Assume the points are already sorted until proven otherwise.\n\tsortedByTime := true\n\t// Emit the points for each name & tag combination.\n\ta := make([]IntegerPoint, 0, len(m))\n\tfor _, k := range keys {\n\t\trp := m[k]\n\t\tpoints := rp.Emitter.Emit()\n\t\tfor i := len(points) - 1; i >= 0; i-- {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tif !itr.keepTags {\n\t\t\t\tpoints[i].Tags = rp.Tags\n\t\t\t}\n\t\t\t// Set the points time to the interval time if the reducer didn't provide one.\n\t\t\tif points[i].Time == ZeroTime {\n\t\t\t\tpoints[i].Time = startTime\n\t\t\t} else {\n\t\t\t\tsortedByTime = false\n\t\t\t}\n\t\t\ta = append(a, points[i])\n\t\t}\n\t}\n\n\t// Points may be out of order. Perform a stable sort by time if requested.\n\tif !sortedByTime && itr.opt.Ordered {\n\t\tsort.Stable(sort.Reverse(integerPointsByTime(a)))\n\t}\n\n\treturn a, nil\n}\n\n// floatStreamIntegerIterator streams inputs into the iterator and emits points gradually.\ntype floatStreamIntegerIterator struct {\n\tinput  *bufFloatIterator\n\tcreate func() (FloatPointAggregator, IntegerPointEmitter)\n\tdims   []string\n\topt    IteratorOptions\n\tm      map[string]*floatReduceIntegerPoint\n\tpoints []IntegerPoint\n}\n\n// newFloatStreamIntegerIterator returns a new instance of floatStreamIntegerIterator.\nfunc newFloatStreamIntegerIterator(input FloatIterator, createFn func() (FloatPointAggregator, IntegerPointEmitter), opt IteratorOptions) *floatStreamIntegerIterator {\n\treturn &floatStreamIntegerIterator{\n\t\tinput:  newBufFloatIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t\tm:      make(map[string]*floatReduceIntegerPoint),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *floatStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *floatStreamIntegerIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next value for the stream iterator.\nfunc (itr *floatStreamIntegerIterator) Next() (*IntegerPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// reduce creates and manages aggregators for every point from the input.\n// After aggregating a point, it always tries to emit a value using the emitter.\nfunc (itr *floatStreamIntegerIterator) reduce() ([]IntegerPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.Next()\n\t\tif curr == nil {\n\t\t\t// Close all of the aggregators to flush any remaining points to emit.\n\t\t\tvar points []IntegerPoint\n\t\t\tfor _, rp := range itr.m {\n\t\t\t\tif aggregator, ok := rp.Aggregator.(io.Closer); ok {\n\t\t\t\t\tif err := aggregator.Close(); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\tpts := rp.Emitter.Emit()\n\t\t\t\t\tif len(pts) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor i := range pts {\n\t\t\t\t\t\tpts[i].Name = rp.Name\n\t\t\t\t\t\tpts[i].Tags = rp.Tags\n\t\t\t\t\t}\n\t\t\t\t\tpoints = append(points, pts...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Eliminate the aggregators and emitters.\n\t\t\titr.m = nil\n\t\t\treturn points, nil\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t}\n\t\ttags := curr.Tags.Subset(itr.dims)\n\n\t\tid := curr.Name\n\t\tif len(tags.m) > 0 {\n\t\t\tid += \"\\x00\" + tags.ID()\n\t\t}\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := itr.m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &floatReduceIntegerPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\titr.m[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateFloat(curr)\n\n\t\t// Attempt to emit points from the aggregator.\n\t\tpoints := rp.Emitter.Emit()\n\t\tif len(points) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := range points {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tpoints[i].Tags = rp.Tags\n\t\t}\n\t\treturn points, nil\n\t}\n}\n\n// floatIntegerExprIterator executes a function to modify an existing point\n// for every output of the input iterator.\ntype floatIntegerExprIterator struct {\n\tleft      *bufFloatIterator\n\tright     *bufFloatIterator\n\tfn        floatIntegerExprFunc\n\tpoints    []FloatPoint // must be size 2\n\tstorePrev bool\n}\n\nfunc newFloatIntegerExprIterator(left, right FloatIterator, opt IteratorOptions, fn func(a, b float64) int64) *floatIntegerExprIterator {\n\tvar points []FloatPoint\n\tswitch opt.Fill {\n\tcase NullFill, PreviousFill:\n\t\tpoints = []FloatPoint{{Nil: true}, {Nil: true}}\n\tcase NumberFill:\n\t\tvalue := castToFloat(opt.FillValue)\n\t\tpoints = []FloatPoint{{Value: value}, {Value: value}}\n\t}\n\treturn &floatIntegerExprIterator{\n\t\tleft:      newBufFloatIterator(left),\n\t\tright:     newBufFloatIterator(right),\n\t\tpoints:    points,\n\t\tfn:        fn,\n\t\tstorePrev: opt.Fill == PreviousFill,\n\t}\n}\n\nfunc (itr *floatIntegerExprIterator) Stats() IteratorStats {\n\tstats := itr.left.Stats()\n\tstats.Add(itr.right.Stats())\n\treturn stats\n}\n\nfunc (itr *floatIntegerExprIterator) Close() error {\n\titr.left.Close()\n\titr.right.Close()\n\treturn nil\n}\n\nfunc (itr *floatIntegerExprIterator) Next() (*IntegerPoint, error) {\n\tfor {\n\t\ta, b, err := itr.next()\n\t\tif err != nil || (a == nil && b == nil) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If any of these are nil and we are using fill(none), skip these points.\n\t\tif (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If one of the two points is nil, we need to fill it with a fake nil\n\t\t// point that has the same name, tags, and time as the other point.\n\t\t// There should never be a time when both of these are nil.\n\t\tif a == nil {\n\t\t\tp := *b\n\t\t\ta = &p\n\t\t\ta.Value = 0\n\t\t\ta.Nil = true\n\t\t} else if b == nil {\n\t\t\tp := *a\n\t\t\tb = &p\n\t\t\tb.Value = 0\n\t\t\tb.Nil = true\n\t\t}\n\n\t\t// If a value is nil, use the fill values if the fill value is non-nil.\n\t\tif a.Nil && !itr.points[0].Nil {\n\t\t\ta.Value = itr.points[0].Value\n\t\t\ta.Nil = false\n\t\t}\n\t\tif b.Nil && !itr.points[1].Nil {\n\t\t\tb.Value = itr.points[1].Value\n\t\t\tb.Nil = false\n\t\t}\n\n\t\tif itr.storePrev {\n\t\t\titr.points[0], itr.points[1] = *a, *b\n\t\t}\n\n\t\tp := &IntegerPoint{\n\t\t\tName:       a.Name,\n\t\t\tTags:       a.Tags,\n\t\t\tTime:       a.Time,\n\t\t\tNil:        a.Nil || b.Nil,\n\t\t\tAggregated: a.Aggregated,\n\t\t}\n\t\tif !p.Nil {\n\t\t\tp.Value = itr.fn(a.Value, b.Value)\n\t\t}\n\t\treturn p, nil\n\n\t}\n}\n\n// next returns the next points within each iterator. If the iterators are\n// uneven, it organizes them so only matching points are returned.\nfunc (itr *floatIntegerExprIterator) next() (a, b *FloatPoint, err error) {\n\t// Retrieve the next value for both the left and right.\n\ta, err = itr.left.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tb, err = itr.right.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// If we have a point from both, make sure that they match each other.\n\tif a != nil && b != nil {\n\t\tif a.Name > b.Name {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Name < b.Name {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if ltags < rtags {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif a.Time > b.Time {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Time < b.Time {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\t}\n\treturn a, b, nil\n}\n\n// floatIntegerExprFunc creates or modifies a point by combining two\n// points. The point passed in may be modified and returned rather than\n// allocating a new point if possible. One of the points may be nil, but at\n// least one of the points will be non-nil.\ntype floatIntegerExprFunc func(a, b float64) int64\n\n// floatReduceStringIterator executes a reducer for every interval and buffers the result.\ntype floatReduceStringIterator struct {\n\tinput    *bufFloatIterator\n\tcreate   func() (FloatPointAggregator, StringPointEmitter)\n\tdims     []string\n\topt      IteratorOptions\n\tpoints   []StringPoint\n\tkeepTags bool\n}\n\nfunc newFloatReduceStringIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, StringPointEmitter)) *floatReduceStringIterator {\n\treturn &floatReduceStringIterator{\n\t\tinput:  newBufFloatIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *floatReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *floatReduceStringIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *floatReduceStringIterator) Next() (*StringPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// floatReduceStringPoint stores the reduced data for a name/tag combination.\ntype floatReduceStringPoint struct {\n\tName       string\n\tTags       Tags\n\tAggregator FloatPointAggregator\n\tEmitter    StringPointEmitter\n}\n\n// reduce executes fn once for every point in the next window.\n// The previous value for the dimension is passed to fn.\nfunc (itr *floatReduceStringIterator) reduce() ([]StringPoint, error) {\n\t// Calculate next window.\n\tvar (\n\t\tstartTime, endTime int64\n\t\twindow             struct {\n\t\t\tname string\n\t\t\ttags string\n\t\t}\n\t)\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t} else if p.Nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Unread the point so it can be processed.\n\t\titr.input.unread(p)\n\t\tstartTime, endTime = itr.opt.Window(p.Time)\n\t\twindow.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()\n\t\tbreak\n\t}\n\n\t// Create points by tags.\n\tm := make(map[string]*floatReduceStringPoint)\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.NextInWindow(startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr == nil {\n\t\t\tbreak\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t} else if curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Ensure this point is within the same final window.\n\t\tif curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Retrieve the tags on this point for this level of the query.\n\t\t// This may be different than the bucket dimensions.\n\t\ttags := curr.Tags.Subset(itr.dims)\n\t\tid := tags.ID()\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &floatReduceStringPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\tm[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateFloat(curr)\n\t}\n\n\t// Reverse sort points by name & tag if our output is supposed to be ordered.\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tif len(keys) > 1 && itr.opt.Ordered {\n\t\tsort.Sort(reverseStringSlice(keys))\n\t}\n\n\t// Assume the points are already sorted until proven otherwise.\n\tsortedByTime := true\n\t// Emit the points for each name & tag combination.\n\ta := make([]StringPoint, 0, len(m))\n\tfor _, k := range keys {\n\t\trp := m[k]\n\t\tpoints := rp.Emitter.Emit()\n\t\tfor i := len(points) - 1; i >= 0; i-- {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tif !itr.keepTags {\n\t\t\t\tpoints[i].Tags = rp.Tags\n\t\t\t}\n\t\t\t// Set the points time to the interval time if the reducer didn't provide one.\n\t\t\tif points[i].Time == ZeroTime {\n\t\t\t\tpoints[i].Time = startTime\n\t\t\t} else {\n\t\t\t\tsortedByTime = false\n\t\t\t}\n\t\t\ta = append(a, points[i])\n\t\t}\n\t}\n\n\t// Points may be out of order. Perform a stable sort by time if requested.\n\tif !sortedByTime && itr.opt.Ordered {\n\t\tsort.Stable(sort.Reverse(stringPointsByTime(a)))\n\t}\n\n\treturn a, nil\n}\n\n// floatStreamStringIterator streams inputs into the iterator and emits points gradually.\ntype floatStreamStringIterator struct {\n\tinput  *bufFloatIterator\n\tcreate func() (FloatPointAggregator, StringPointEmitter)\n\tdims   []string\n\topt    IteratorOptions\n\tm      map[string]*floatReduceStringPoint\n\tpoints []StringPoint\n}\n\n// newFloatStreamStringIterator returns a new instance of floatStreamStringIterator.\nfunc newFloatStreamStringIterator(input FloatIterator, createFn func() (FloatPointAggregator, StringPointEmitter), opt IteratorOptions) *floatStreamStringIterator {\n\treturn &floatStreamStringIterator{\n\t\tinput:  newBufFloatIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t\tm:      make(map[string]*floatReduceStringPoint),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *floatStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *floatStreamStringIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next value for the stream iterator.\nfunc (itr *floatStreamStringIterator) Next() (*StringPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// reduce creates and manages aggregators for every point from the input.\n// After aggregating a point, it always tries to emit a value using the emitter.\nfunc (itr *floatStreamStringIterator) reduce() ([]StringPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.Next()\n\t\tif curr == nil {\n\t\t\t// Close all of the aggregators to flush any remaining points to emit.\n\t\t\tvar points []StringPoint\n\t\t\tfor _, rp := range itr.m {\n\t\t\t\tif aggregator, ok := rp.Aggregator.(io.Closer); ok {\n\t\t\t\t\tif err := aggregator.Close(); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\tpts := rp.Emitter.Emit()\n\t\t\t\t\tif len(pts) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor i := range pts {\n\t\t\t\t\t\tpts[i].Name = rp.Name\n\t\t\t\t\t\tpts[i].Tags = rp.Tags\n\t\t\t\t\t}\n\t\t\t\t\tpoints = append(points, pts...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Eliminate the aggregators and emitters.\n\t\t\titr.m = nil\n\t\t\treturn points, nil\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t}\n\t\ttags := curr.Tags.Subset(itr.dims)\n\n\t\tid := curr.Name\n\t\tif len(tags.m) > 0 {\n\t\t\tid += \"\\x00\" + tags.ID()\n\t\t}\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := itr.m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &floatReduceStringPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\titr.m[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateFloat(curr)\n\n\t\t// Attempt to emit points from the aggregator.\n\t\tpoints := rp.Emitter.Emit()\n\t\tif len(points) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := range points {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tpoints[i].Tags = rp.Tags\n\t\t}\n\t\treturn points, nil\n\t}\n}\n\n// floatStringExprIterator executes a function to modify an existing point\n// for every output of the input iterator.\ntype floatStringExprIterator struct {\n\tleft      *bufFloatIterator\n\tright     *bufFloatIterator\n\tfn        floatStringExprFunc\n\tpoints    []FloatPoint // must be size 2\n\tstorePrev bool\n}\n\nfunc newFloatStringExprIterator(left, right FloatIterator, opt IteratorOptions, fn func(a, b float64) string) *floatStringExprIterator {\n\tvar points []FloatPoint\n\tswitch opt.Fill {\n\tcase NullFill, PreviousFill:\n\t\tpoints = []FloatPoint{{Nil: true}, {Nil: true}}\n\tcase NumberFill:\n\t\tvalue := castToFloat(opt.FillValue)\n\t\tpoints = []FloatPoint{{Value: value}, {Value: value}}\n\t}\n\treturn &floatStringExprIterator{\n\t\tleft:      newBufFloatIterator(left),\n\t\tright:     newBufFloatIterator(right),\n\t\tpoints:    points,\n\t\tfn:        fn,\n\t\tstorePrev: opt.Fill == PreviousFill,\n\t}\n}\n\nfunc (itr *floatStringExprIterator) Stats() IteratorStats {\n\tstats := itr.left.Stats()\n\tstats.Add(itr.right.Stats())\n\treturn stats\n}\n\nfunc (itr *floatStringExprIterator) Close() error {\n\titr.left.Close()\n\titr.right.Close()\n\treturn nil\n}\n\nfunc (itr *floatStringExprIterator) Next() (*StringPoint, error) {\n\tfor {\n\t\ta, b, err := itr.next()\n\t\tif err != nil || (a == nil && b == nil) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If any of these are nil and we are using fill(none), skip these points.\n\t\tif (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If one of the two points is nil, we need to fill it with a fake nil\n\t\t// point that has the same name, tags, and time as the other point.\n\t\t// There should never be a time when both of these are nil.\n\t\tif a == nil {\n\t\t\tp := *b\n\t\t\ta = &p\n\t\t\ta.Value = 0\n\t\t\ta.Nil = true\n\t\t} else if b == nil {\n\t\t\tp := *a\n\t\t\tb = &p\n\t\t\tb.Value = 0\n\t\t\tb.Nil = true\n\t\t}\n\n\t\t// If a value is nil, use the fill values if the fill value is non-nil.\n\t\tif a.Nil && !itr.points[0].Nil {\n\t\t\ta.Value = itr.points[0].Value\n\t\t\ta.Nil = false\n\t\t}\n\t\tif b.Nil && !itr.points[1].Nil {\n\t\t\tb.Value = itr.points[1].Value\n\t\t\tb.Nil = false\n\t\t}\n\n\t\tif itr.storePrev {\n\t\t\titr.points[0], itr.points[1] = *a, *b\n\t\t}\n\n\t\tp := &StringPoint{\n\t\t\tName:       a.Name,\n\t\t\tTags:       a.Tags,\n\t\t\tTime:       a.Time,\n\t\t\tNil:        a.Nil || b.Nil,\n\t\t\tAggregated: a.Aggregated,\n\t\t}\n\t\tif !p.Nil {\n\t\t\tp.Value = itr.fn(a.Value, b.Value)\n\t\t}\n\t\treturn p, nil\n\n\t}\n}\n\n// next returns the next points within each iterator. If the iterators are\n// uneven, it organizes them so only matching points are returned.\nfunc (itr *floatStringExprIterator) next() (a, b *FloatPoint, err error) {\n\t// Retrieve the next value for both the left and right.\n\ta, err = itr.left.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tb, err = itr.right.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// If we have a point from both, make sure that they match each other.\n\tif a != nil && b != nil {\n\t\tif a.Name > b.Name {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Name < b.Name {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if ltags < rtags {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif a.Time > b.Time {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Time < b.Time {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\t}\n\treturn a, b, nil\n}\n\n// floatStringExprFunc creates or modifies a point by combining two\n// points. The point passed in may be modified and returned rather than\n// allocating a new point if possible. One of the points may be nil, but at\n// least one of the points will be non-nil.\ntype floatStringExprFunc func(a, b float64) string\n\n// floatReduceBooleanIterator executes a reducer for every interval and buffers the result.\ntype floatReduceBooleanIterator struct {\n\tinput    *bufFloatIterator\n\tcreate   func() (FloatPointAggregator, BooleanPointEmitter)\n\tdims     []string\n\topt      IteratorOptions\n\tpoints   []BooleanPoint\n\tkeepTags bool\n}\n\nfunc newFloatReduceBooleanIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, BooleanPointEmitter)) *floatReduceBooleanIterator {\n\treturn &floatReduceBooleanIterator{\n\t\tinput:  newBufFloatIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *floatReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *floatReduceBooleanIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *floatReduceBooleanIterator) Next() (*BooleanPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// floatReduceBooleanPoint stores the reduced data for a name/tag combination.\ntype floatReduceBooleanPoint struct {\n\tName       string\n\tTags       Tags\n\tAggregator FloatPointAggregator\n\tEmitter    BooleanPointEmitter\n}\n\n// reduce executes fn once for every point in the next window.\n// The previous value for the dimension is passed to fn.\nfunc (itr *floatReduceBooleanIterator) reduce() ([]BooleanPoint, error) {\n\t// Calculate next window.\n\tvar (\n\t\tstartTime, endTime int64\n\t\twindow             struct {\n\t\t\tname string\n\t\t\ttags string\n\t\t}\n\t)\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t} else if p.Nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Unread the point so it can be processed.\n\t\titr.input.unread(p)\n\t\tstartTime, endTime = itr.opt.Window(p.Time)\n\t\twindow.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()\n\t\tbreak\n\t}\n\n\t// Create points by tags.\n\tm := make(map[string]*floatReduceBooleanPoint)\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.NextInWindow(startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr == nil {\n\t\t\tbreak\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t} else if curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Ensure this point is within the same final window.\n\t\tif curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Retrieve the tags on this point for this level of the query.\n\t\t// This may be different than the bucket dimensions.\n\t\ttags := curr.Tags.Subset(itr.dims)\n\t\tid := tags.ID()\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &floatReduceBooleanPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\tm[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateFloat(curr)\n\t}\n\n\t// Reverse sort points by name & tag if our output is supposed to be ordered.\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tif len(keys) > 1 && itr.opt.Ordered {\n\t\tsort.Sort(reverseStringSlice(keys))\n\t}\n\n\t// Assume the points are already sorted until proven otherwise.\n\tsortedByTime := true\n\t// Emit the points for each name & tag combination.\n\ta := make([]BooleanPoint, 0, len(m))\n\tfor _, k := range keys {\n\t\trp := m[k]\n\t\tpoints := rp.Emitter.Emit()\n\t\tfor i := len(points) - 1; i >= 0; i-- {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tif !itr.keepTags {\n\t\t\t\tpoints[i].Tags = rp.Tags\n\t\t\t}\n\t\t\t// Set the points time to the interval time if the reducer didn't provide one.\n\t\t\tif points[i].Time == ZeroTime {\n\t\t\t\tpoints[i].Time = startTime\n\t\t\t} else {\n\t\t\t\tsortedByTime = false\n\t\t\t}\n\t\t\ta = append(a, points[i])\n\t\t}\n\t}\n\n\t// Points may be out of order. Perform a stable sort by time if requested.\n\tif !sortedByTime && itr.opt.Ordered {\n\t\tsort.Stable(sort.Reverse(booleanPointsByTime(a)))\n\t}\n\n\treturn a, nil\n}\n\n// floatStreamBooleanIterator streams inputs into the iterator and emits points gradually.\ntype floatStreamBooleanIterator struct {\n\tinput  *bufFloatIterator\n\tcreate func() (FloatPointAggregator, BooleanPointEmitter)\n\tdims   []string\n\topt    IteratorOptions\n\tm      map[string]*floatReduceBooleanPoint\n\tpoints []BooleanPoint\n}\n\n// newFloatStreamBooleanIterator returns a new instance of floatStreamBooleanIterator.\nfunc newFloatStreamBooleanIterator(input FloatIterator, createFn func() (FloatPointAggregator, BooleanPointEmitter), opt IteratorOptions) *floatStreamBooleanIterator {\n\treturn &floatStreamBooleanIterator{\n\t\tinput:  newBufFloatIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t\tm:      make(map[string]*floatReduceBooleanPoint),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *floatStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *floatStreamBooleanIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next value for the stream iterator.\nfunc (itr *floatStreamBooleanIterator) Next() (*BooleanPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// reduce creates and manages aggregators for every point from the input.\n// After aggregating a point, it always tries to emit a value using the emitter.\nfunc (itr *floatStreamBooleanIterator) reduce() ([]BooleanPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.Next()\n\t\tif curr == nil {\n\t\t\t// Close all of the aggregators to flush any remaining points to emit.\n\t\t\tvar points []BooleanPoint\n\t\t\tfor _, rp := range itr.m {\n\t\t\t\tif aggregator, ok := rp.Aggregator.(io.Closer); ok {\n\t\t\t\t\tif err := aggregator.Close(); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\tpts := rp.Emitter.Emit()\n\t\t\t\t\tif len(pts) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor i := range pts {\n\t\t\t\t\t\tpts[i].Name = rp.Name\n\t\t\t\t\t\tpts[i].Tags = rp.Tags\n\t\t\t\t\t}\n\t\t\t\t\tpoints = append(points, pts...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Eliminate the aggregators and emitters.\n\t\t\titr.m = nil\n\t\t\treturn points, nil\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t}\n\t\ttags := curr.Tags.Subset(itr.dims)\n\n\t\tid := curr.Name\n\t\tif len(tags.m) > 0 {\n\t\t\tid += \"\\x00\" + tags.ID()\n\t\t}\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := itr.m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &floatReduceBooleanPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\titr.m[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateFloat(curr)\n\n\t\t// Attempt to emit points from the aggregator.\n\t\tpoints := rp.Emitter.Emit()\n\t\tif len(points) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := range points {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tpoints[i].Tags = rp.Tags\n\t\t}\n\t\treturn points, nil\n\t}\n}\n\n// floatBooleanExprIterator executes a function to modify an existing point\n// for every output of the input iterator.\ntype floatBooleanExprIterator struct {\n\tleft      *bufFloatIterator\n\tright     *bufFloatIterator\n\tfn        floatBooleanExprFunc\n\tpoints    []FloatPoint // must be size 2\n\tstorePrev bool\n}\n\nfunc newFloatBooleanExprIterator(left, right FloatIterator, opt IteratorOptions, fn func(a, b float64) bool) *floatBooleanExprIterator {\n\tvar points []FloatPoint\n\tswitch opt.Fill {\n\tcase NullFill, PreviousFill:\n\t\tpoints = []FloatPoint{{Nil: true}, {Nil: true}}\n\tcase NumberFill:\n\t\tvalue := castToFloat(opt.FillValue)\n\t\tpoints = []FloatPoint{{Value: value}, {Value: value}}\n\t}\n\treturn &floatBooleanExprIterator{\n\t\tleft:      newBufFloatIterator(left),\n\t\tright:     newBufFloatIterator(right),\n\t\tpoints:    points,\n\t\tfn:        fn,\n\t\tstorePrev: opt.Fill == PreviousFill,\n\t}\n}\n\nfunc (itr *floatBooleanExprIterator) Stats() IteratorStats {\n\tstats := itr.left.Stats()\n\tstats.Add(itr.right.Stats())\n\treturn stats\n}\n\nfunc (itr *floatBooleanExprIterator) Close() error {\n\titr.left.Close()\n\titr.right.Close()\n\treturn nil\n}\n\nfunc (itr *floatBooleanExprIterator) Next() (*BooleanPoint, error) {\n\tfor {\n\t\ta, b, err := itr.next()\n\t\tif err != nil || (a == nil && b == nil) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If any of these are nil and we are using fill(none), skip these points.\n\t\tif (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If one of the two points is nil, we need to fill it with a fake nil\n\t\t// point that has the same name, tags, and time as the other point.\n\t\t// There should never be a time when both of these are nil.\n\t\tif a == nil {\n\t\t\tp := *b\n\t\t\ta = &p\n\t\t\ta.Value = 0\n\t\t\ta.Nil = true\n\t\t} else if b == nil {\n\t\t\tp := *a\n\t\t\tb = &p\n\t\t\tb.Value = 0\n\t\t\tb.Nil = true\n\t\t}\n\n\t\t// If a value is nil, use the fill values if the fill value is non-nil.\n\t\tif a.Nil && !itr.points[0].Nil {\n\t\t\ta.Value = itr.points[0].Value\n\t\t\ta.Nil = false\n\t\t}\n\t\tif b.Nil && !itr.points[1].Nil {\n\t\t\tb.Value = itr.points[1].Value\n\t\t\tb.Nil = false\n\t\t}\n\n\t\tif itr.storePrev {\n\t\t\titr.points[0], itr.points[1] = *a, *b\n\t\t}\n\n\t\tp := &BooleanPoint{\n\t\t\tName:       a.Name,\n\t\t\tTags:       a.Tags,\n\t\t\tTime:       a.Time,\n\t\t\tNil:        a.Nil || b.Nil,\n\t\t\tAggregated: a.Aggregated,\n\t\t}\n\t\tif !p.Nil {\n\t\t\tp.Value = itr.fn(a.Value, b.Value)\n\t\t}\n\t\treturn p, nil\n\n\t}\n}\n\n// next returns the next points within each iterator. If the iterators are\n// uneven, it organizes them so only matching points are returned.\nfunc (itr *floatBooleanExprIterator) next() (a, b *FloatPoint, err error) {\n\t// Retrieve the next value for both the left and right.\n\ta, err = itr.left.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tb, err = itr.right.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// If we have a point from both, make sure that they match each other.\n\tif a != nil && b != nil {\n\t\tif a.Name > b.Name {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Name < b.Name {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if ltags < rtags {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif a.Time > b.Time {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Time < b.Time {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\t}\n\treturn a, b, nil\n}\n\n// floatBooleanExprFunc creates or modifies a point by combining two\n// points. The point passed in may be modified and returned rather than\n// allocating a new point if possible. One of the points may be nil, but at\n// least one of the points will be non-nil.\ntype floatBooleanExprFunc func(a, b float64) bool\n\n// floatTransformIterator executes a function to modify an existing point for every\n// output of the input iterator.\ntype floatTransformIterator struct {\n\tinput FloatIterator\n\tfn    floatTransformFunc\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *floatTransformIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *floatTransformIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *floatTransformIterator) Next() (*FloatPoint, error) {\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if p != nil {\n\t\tp = itr.fn(p)\n\t}\n\treturn p, nil\n}\n\n// floatTransformFunc creates or modifies a point.\n// The point passed in may be modified and returned rather than allocating a\n// new point if possible.\ntype floatTransformFunc func(p *FloatPoint) *FloatPoint\n\n// floatBoolTransformIterator executes a function to modify an existing point for every\n// output of the input iterator.\ntype floatBoolTransformIterator struct {\n\tinput FloatIterator\n\tfn    floatBoolTransformFunc\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *floatBoolTransformIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *floatBoolTransformIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *floatBoolTransformIterator) Next() (*BooleanPoint, error) {\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if p != nil {\n\t\treturn itr.fn(p), nil\n\t}\n\treturn nil, nil\n}\n\n// floatBoolTransformFunc creates or modifies a point.\n// The point passed in may be modified and returned rather than allocating a\n// new point if possible.\ntype floatBoolTransformFunc func(p *FloatPoint) *BooleanPoint\n\n// floatDedupeIterator only outputs unique points.\n// This differs from the DistinctIterator in that it compares all aux fields too.\n// This iterator is relatively inefficient and should only be used on small\n// datasets such as meta query results.\ntype floatDedupeIterator struct {\n\tinput FloatIterator\n\tm     map[string]struct{} // lookup of points already sent\n}\n\ntype floatIteratorMapper struct {\n\te      *Emitter\n\tbuf    []interface{}\n\tdriver IteratorMap   // which iterator to use for the primary value, can be nil\n\tfields []IteratorMap // which iterator to use for an aux field\n\tpoint  FloatPoint\n}\n\nfunc newFloatIteratorMapper(itrs []Iterator, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *floatIteratorMapper {\n\te := NewEmitter(itrs, opt.Ascending, 0)\n\te.OmitTime = true\n\treturn &floatIteratorMapper{\n\t\te:      e,\n\t\tbuf:    make([]interface{}, len(itrs)),\n\t\tdriver: driver,\n\t\tfields: fields,\n\t\tpoint: FloatPoint{\n\t\t\tAux: make([]interface{}, len(fields)),\n\t\t},\n\t}\n}\n\nfunc (itr *floatIteratorMapper) Next() (*FloatPoint, error) {\n\tt, name, tags, err := itr.e.loadBuf()\n\tif err != nil || t == ZeroTime {\n\t\treturn nil, err\n\t}\n\titr.point.Time = t\n\titr.point.Name = name\n\titr.point.Tags = tags\n\n\titr.e.readInto(t, name, tags, itr.buf)\n\tif itr.driver != nil {\n\t\tif v := itr.driver.Value(tags, itr.buf); v != nil {\n\t\t\tif v, ok := v.(float64); ok {\n\t\t\t\titr.point.Value = v\n\t\t\t\titr.point.Nil = false\n\t\t\t} else {\n\t\t\t\titr.point.Value = 0\n\t\t\t\titr.point.Nil = true\n\t\t\t}\n\t\t} else {\n\t\t\titr.point.Value = 0\n\t\t\titr.point.Nil = true\n\t\t}\n\t}\n\tfor i, f := range itr.fields {\n\t\titr.point.Aux[i] = f.Value(tags, itr.buf)\n\t}\n\treturn &itr.point, nil\n}\n\nfunc (itr *floatIteratorMapper) Stats() IteratorStats {\n\tstats := IteratorStats{}\n\tfor _, itr := range itr.e.itrs {\n\t\tstats.Add(itr.Stats())\n\t}\n\treturn stats\n}\n\nfunc (itr *floatIteratorMapper) Close() error {\n\treturn itr.e.Close()\n}\n\ntype floatFilterIterator struct {\n\tinput FloatIterator\n\tcond  Expr\n\topt   IteratorOptions\n\tm     map[string]interface{}\n}\n\nfunc newFloatFilterIterator(input FloatIterator, cond Expr, opt IteratorOptions) FloatIterator {\n\t// Strip out time conditions from the WHERE clause.\n\t// TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct.\n\tn := RewriteFunc(CloneExpr(cond), func(n Node) Node {\n\t\tswitch n := n.(type) {\n\t\tcase *BinaryExpr:\n\t\t\tif n.LHS.String() == \"time\" {\n\t\t\t\treturn &BooleanLiteral{Val: true}\n\t\t\t}\n\t\t}\n\t\treturn n\n\t})\n\n\tcond, _ = n.(Expr)\n\tif cond == nil {\n\t\treturn input\n\t} else if n, ok := cond.(*BooleanLiteral); ok && n.Val {\n\t\treturn input\n\t}\n\n\treturn &floatFilterIterator{\n\t\tinput: input,\n\t\tcond:  cond,\n\t\topt:   opt,\n\t\tm:     make(map[string]interface{}),\n\t}\n}\n\nfunc (itr *floatFilterIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *floatFilterIterator) Close() error         { return itr.input.Close() }\n\nfunc (itr *floatFilterIterator) Next() (*FloatPoint, error) {\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor i, ref := range itr.opt.Aux {\n\t\t\titr.m[ref.Val] = p.Aux[i]\n\t\t}\n\t\tfor k, v := range p.Tags.KeyValues() {\n\t\t\titr.m[k] = v\n\t\t}\n\n\t\tif !EvalBool(itr.cond, itr.m) {\n\t\t\tcontinue\n\t\t}\n\t\treturn p, nil\n\t}\n}\n\n// newFloatDedupeIterator returns a new instance of floatDedupeIterator.\nfunc newFloatDedupeIterator(input FloatIterator) *floatDedupeIterator {\n\treturn &floatDedupeIterator{\n\t\tinput: input,\n\t\tm:     make(map[string]struct{}),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *floatDedupeIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *floatDedupeIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next unique point from the input iterator.\nfunc (itr *floatDedupeIterator) Next() (*FloatPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tp, err := itr.input.Next()\n\t\tif p == nil || err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Serialize to bytes to store in lookup.\n\t\tbuf, err := proto.Marshal(encodeFloatPoint(p))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If the point has already been output then move to the next point.\n\t\tif _, ok := itr.m[string(buf)]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Otherwise mark it as emitted and return point.\n\t\titr.m[string(buf)] = struct{}{}\n\t\treturn p, nil\n\t}\n}\n\n// floatReaderIterator represents an iterator that streams from a reader.\ntype floatReaderIterator struct {\n\tr   io.Reader\n\tdec *FloatPointDecoder\n}\n\n// newFloatReaderIterator returns a new instance of floatReaderIterator.\nfunc newFloatReaderIterator(r io.Reader, stats IteratorStats) *floatReaderIterator {\n\tdec := NewFloatPointDecoder(r)\n\tdec.stats = stats\n\n\treturn &floatReaderIterator{\n\t\tr:   r,\n\t\tdec: dec,\n\t}\n}\n\n// Stats returns stats about points processed.\nfunc (itr *floatReaderIterator) Stats() IteratorStats { return itr.dec.stats }\n\n// Close closes the underlying reader, if applicable.\nfunc (itr *floatReaderIterator) Close() error {\n\tif r, ok := itr.r.(io.ReadCloser); ok {\n\t\treturn r.Close()\n\t}\n\treturn nil\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *floatReaderIterator) Next() (*FloatPoint, error) {\n\t// OPTIMIZE(benbjohnson): Reuse point on iterator.\n\n\t// Unmarshal next point.\n\tp := &FloatPoint{}\n\tif err := itr.dec.DecodeFloatPoint(p); err == io.EOF {\n\t\treturn nil, nil\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}\n\n// IntegerIterator represents a stream of integer points.\ntype IntegerIterator interface {\n\tIterator\n\tNext() (*IntegerPoint, error)\n}\n\n// newIntegerIterators converts a slice of Iterator to a slice of IntegerIterator.\n// Drop and closes any iterator in itrs that is not a IntegerIterator and cannot\n// be cast to a IntegerIterator.\nfunc newIntegerIterators(itrs []Iterator) []IntegerIterator {\n\ta := make([]IntegerIterator, 0, len(itrs))\n\tfor _, itr := range itrs {\n\t\tswitch itr := itr.(type) {\n\t\tcase IntegerIterator:\n\t\t\ta = append(a, itr)\n\n\t\tdefault:\n\t\t\titr.Close()\n\t\t}\n\t}\n\treturn a\n}\n\n// bufIntegerIterator represents a buffered IntegerIterator.\ntype bufIntegerIterator struct {\n\titr IntegerIterator\n\tbuf *IntegerPoint\n}\n\n// newBufIntegerIterator returns a buffered IntegerIterator.\nfunc newBufIntegerIterator(itr IntegerIterator) *bufIntegerIterator {\n\treturn &bufIntegerIterator{itr: itr}\n}\n\n// Stats returns statistics from the input iterator.\nfunc (itr *bufIntegerIterator) Stats() IteratorStats { return itr.itr.Stats() }\n\n// Close closes the underlying iterator.\nfunc (itr *bufIntegerIterator) Close() error { return itr.itr.Close() }\n\n// peek returns the next point without removing it from the iterator.\nfunc (itr *bufIntegerIterator) peek() (*IntegerPoint, error) {\n\tp, err := itr.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\titr.unread(p)\n\treturn p, nil\n}\n\n// peekTime returns the time of the next point.\n// Returns zero time if no more points available.\nfunc (itr *bufIntegerIterator) peekTime() (int64, error) {\n\tp, err := itr.peek()\n\tif p == nil || err != nil {\n\t\treturn ZeroTime, err\n\t}\n\treturn p.Time, nil\n}\n\n// Next returns the current buffer, if exists, or calls the underlying iterator.\nfunc (itr *bufIntegerIterator) Next() (*IntegerPoint, error) {\n\tbuf := itr.buf\n\tif buf != nil {\n\t\titr.buf = nil\n\t\treturn buf, nil\n\t}\n\treturn itr.itr.Next()\n}\n\n// NextInWindow returns the next value if it is between [startTime, endTime).\n// If the next value is outside the range then it is moved to the buffer.\nfunc (itr *bufIntegerIterator) NextInWindow(startTime, endTime int64) (*IntegerPoint, error) {\n\tv, err := itr.Next()\n\tif v == nil || err != nil {\n\t\treturn nil, err\n\t} else if t := v.Time; t >= endTime || t < startTime {\n\t\titr.unread(v)\n\t\treturn nil, nil\n\t}\n\treturn v, nil\n}\n\n// unread sets v to the buffer. It is read on the next call to Next().\nfunc (itr *bufIntegerIterator) unread(v *IntegerPoint) { itr.buf = v }\n\n// integerMergeIterator represents an iterator that combines multiple integer iterators.\ntype integerMergeIterator struct {\n\tinputs []IntegerIterator\n\theap   *integerMergeHeap\n\tinit   bool\n\n\t// Current iterator and window.\n\tcurr   *integerMergeHeapItem\n\twindow struct {\n\t\tname      string\n\t\ttags      string\n\t\tstartTime int64\n\t\tendTime   int64\n\t}\n}\n\n// newIntegerMergeIterator returns a new instance of integerMergeIterator.\nfunc newIntegerMergeIterator(inputs []IntegerIterator, opt IteratorOptions) *integerMergeIterator {\n\titr := &integerMergeIterator{\n\t\tinputs: inputs,\n\t\theap: &integerMergeHeap{\n\t\t\titems: make([]*integerMergeHeapItem, 0, len(inputs)),\n\t\t\topt:   opt,\n\t\t},\n\t}\n\n\t// Initialize heap items.\n\tfor _, input := range inputs {\n\t\t// Wrap in buffer, ignore any inputs without anymore points.\n\t\tbufInput := newBufIntegerIterator(input)\n\n\t\t// Append to the heap.\n\t\titr.heap.items = append(itr.heap.items, &integerMergeHeapItem{itr: bufInput})\n\t}\n\n\treturn itr\n}\n\n// Stats returns an aggregation of stats from the underlying iterators.\nfunc (itr *integerMergeIterator) Stats() IteratorStats {\n\tvar stats IteratorStats\n\tfor _, input := range itr.inputs {\n\t\tstats.Add(input.Stats())\n\t}\n\treturn stats\n}\n\n// Close closes the underlying iterators.\nfunc (itr *integerMergeIterator) Close() error {\n\tfor _, input := range itr.inputs {\n\t\tinput.Close()\n\t}\n\titr.curr = nil\n\titr.inputs = nil\n\titr.heap.items = nil\n\treturn nil\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *integerMergeIterator) Next() (*IntegerPoint, error) {\n\t// Initialize the heap. This needs to be done lazily on the first call to this iterator\n\t// so that iterator initialization done through the Select() call returns quickly.\n\t// Queries can only be interrupted after the Select() call completes so any operations\n\t// done during iterator creation cannot be interrupted, which is why we do it here\n\t// instead so an interrupt can happen while initializing the heap.\n\tif !itr.init {\n\t\titems := itr.heap.items\n\t\titr.heap.items = make([]*integerMergeHeapItem, 0, len(items))\n\t\tfor _, item := range items {\n\t\t\tif p, err := item.itr.peek(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t} else if p == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\titr.heap.items = append(itr.heap.items, item)\n\t\t}\n\t\theap.Init(itr.heap)\n\t\titr.init = true\n\t}\n\n\tfor {\n\t\t// Retrieve the next iterator if we don't have one.\n\t\tif itr.curr == nil {\n\t\t\tif len(itr.heap.items) == 0 {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t\titr.curr = heap.Pop(itr.heap).(*integerMergeHeapItem)\n\n\t\t\t// Read point and set current window.\n\t\t\tp, err := itr.curr.itr.Next()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ttags := p.Tags.Subset(itr.heap.opt.Dimensions)\n\t\t\titr.window.name, itr.window.tags = p.Name, tags.ID()\n\t\t\titr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time)\n\t\t\treturn p, nil\n\t\t}\n\n\t\t// Read the next point from the current iterator.\n\t\tp, err := itr.curr.itr.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If there are no more points then remove iterator from heap and find next.\n\t\tif p == nil {\n\t\t\titr.curr = nil\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check if the point is inside of our current window.\n\t\tinWindow := true\n\t\tif window := itr.window; window.name != p.Name {\n\t\t\tinWindow = false\n\t\t} else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() {\n\t\t\tinWindow = false\n\t\t} else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime {\n\t\t\tinWindow = false\n\t\t} else if !opt.Ascending && p.Time < window.startTime {\n\t\t\tinWindow = false\n\t\t}\n\n\t\t// If it's outside our window then push iterator back on the heap and find new iterator.\n\t\tif !inWindow {\n\t\t\titr.curr.itr.unread(p)\n\t\t\theap.Push(itr.heap, itr.curr)\n\t\t\titr.curr = nil\n\t\t\tcontinue\n\t\t}\n\n\t\treturn p, nil\n\t}\n}\n\n// integerMergeHeap represents a heap of integerMergeHeapItems.\n// Items are sorted by their next window and then by name/tags.\ntype integerMergeHeap struct {\n\topt   IteratorOptions\n\titems []*integerMergeHeapItem\n}\n\nfunc (h *integerMergeHeap) Len() int      { return len(h.items) }\nfunc (h *integerMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }\nfunc (h *integerMergeHeap) Less(i, j int) bool {\n\tx, err := h.items[i].itr.peek()\n\tif err != nil {\n\t\treturn true\n\t}\n\ty, err := h.items[j].itr.peek()\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tif h.opt.Ascending {\n\t\tif x.Name != y.Name {\n\t\t\treturn x.Name < y.Name\n\t\t} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {\n\t\t\treturn xTags.ID() < yTags.ID()\n\t\t}\n\t} else {\n\t\tif x.Name != y.Name {\n\t\t\treturn x.Name > y.Name\n\t\t} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {\n\t\t\treturn xTags.ID() > yTags.ID()\n\t\t}\n\t}\n\n\txt, _ := h.opt.Window(x.Time)\n\tyt, _ := h.opt.Window(y.Time)\n\n\tif h.opt.Ascending {\n\t\treturn xt < yt\n\t}\n\treturn xt > yt\n}\n\nfunc (h *integerMergeHeap) Push(x interface{}) {\n\th.items = append(h.items, x.(*integerMergeHeapItem))\n}\n\nfunc (h *integerMergeHeap) Pop() interface{} {\n\told := h.items\n\tn := len(old)\n\titem := old[n-1]\n\th.items = old[0 : n-1]\n\treturn item\n}\n\ntype integerMergeHeapItem struct {\n\titr *bufIntegerIterator\n}\n\n// integerSortedMergeIterator is an iterator that sorts and merges multiple iterators into one.\ntype integerSortedMergeIterator struct {\n\tinputs []IntegerIterator\n\theap   *integerSortedMergeHeap\n\tinit   bool\n}\n\n// newIntegerSortedMergeIterator returns an instance of integerSortedMergeIterator.\nfunc newIntegerSortedMergeIterator(inputs []IntegerIterator, opt IteratorOptions) Iterator {\n\titr := &integerSortedMergeIterator{\n\t\tinputs: inputs,\n\t\theap: &integerSortedMergeHeap{\n\t\t\titems: make([]*integerSortedMergeHeapItem, 0, len(inputs)),\n\t\t\topt:   opt,\n\t\t},\n\t}\n\n\t// Initialize heap items.\n\tfor _, input := range inputs {\n\t\t// Append to the heap.\n\t\titr.heap.items = append(itr.heap.items, &integerSortedMergeHeapItem{itr: input})\n\t}\n\n\treturn itr\n}\n\n// Stats returns an aggregation of stats from the underlying iterators.\nfunc (itr *integerSortedMergeIterator) Stats() IteratorStats {\n\tvar stats IteratorStats\n\tfor _, input := range itr.inputs {\n\t\tstats.Add(input.Stats())\n\t}\n\treturn stats\n}\n\n// Close closes the underlying iterators.\nfunc (itr *integerSortedMergeIterator) Close() error {\n\tfor _, input := range itr.inputs {\n\t\tinput.Close()\n\t}\n\treturn nil\n}\n\n// Next returns the next points from the iterator.\nfunc (itr *integerSortedMergeIterator) Next() (*IntegerPoint, error) { return itr.pop() }\n\n// pop returns the next point from the heap.\n// Reads the next point from item's cursor and puts it back on the heap.\nfunc (itr *integerSortedMergeIterator) pop() (*IntegerPoint, error) {\n\t// Initialize the heap. See the MergeIterator to see why this has to be done lazily.\n\tif !itr.init {\n\t\titems := itr.heap.items\n\t\titr.heap.items = make([]*integerSortedMergeHeapItem, 0, len(items))\n\t\tfor _, item := range items {\n\t\t\tvar err error\n\t\t\tif item.point, err = item.itr.Next(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t} else if item.point == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\titr.heap.items = append(itr.heap.items, item)\n\t\t}\n\t\theap.Init(itr.heap)\n\t\titr.init = true\n\t}\n\n\tif len(itr.heap.items) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// Read the next item from the heap.\n\titem := heap.Pop(itr.heap).(*integerSortedMergeHeapItem)\n\tif item.err != nil {\n\t\treturn nil, item.err\n\t} else if item.point == nil {\n\t\treturn nil, nil\n\t}\n\n\t// Copy the point for return.\n\tp := item.point.Clone()\n\n\t// Read the next item from the cursor. Push back to heap if one exists.\n\tif item.point, item.err = item.itr.Next(); item.point != nil {\n\t\theap.Push(itr.heap, item)\n\t}\n\n\treturn p, nil\n}\n\n// integerSortedMergeHeap represents a heap of integerSortedMergeHeapItems.\ntype integerSortedMergeHeap struct {\n\topt   IteratorOptions\n\titems []*integerSortedMergeHeapItem\n}\n\nfunc (h *integerSortedMergeHeap) Len() int      { return len(h.items) }\nfunc (h *integerSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }\nfunc (h *integerSortedMergeHeap) Less(i, j int) bool {\n\tx, y := h.items[i].point, h.items[j].point\n\n\tif h.opt.Ascending {\n\t\tif x.Name != y.Name {\n\t\t\treturn x.Name < y.Name\n\t\t} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {\n\t\t\treturn xTags.ID() < yTags.ID()\n\t\t}\n\t\treturn x.Time < y.Time\n\t}\n\n\tif x.Name != y.Name {\n\t\treturn x.Name > y.Name\n\t} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {\n\t\treturn xTags.ID() > yTags.ID()\n\t}\n\treturn x.Time > y.Time\n}\n\nfunc (h *integerSortedMergeHeap) Push(x interface{}) {\n\th.items = append(h.items, x.(*integerSortedMergeHeapItem))\n}\n\nfunc (h *integerSortedMergeHeap) Pop() interface{} {\n\told := h.items\n\tn := len(old)\n\titem := old[n-1]\n\th.items = old[0 : n-1]\n\treturn item\n}\n\ntype integerSortedMergeHeapItem struct {\n\tpoint *IntegerPoint\n\terr   error\n\titr   IntegerIterator\n}\n\n// integerParallelIterator represents an iterator that pulls data in a separate goroutine.\ntype integerParallelIterator struct {\n\tinput IntegerIterator\n\tch    chan integerPointError\n\n\tonce    sync.Once\n\tclosing chan struct{}\n\twg      sync.WaitGroup\n}\n\n// newIntegerParallelIterator returns a new instance of integerParallelIterator.\nfunc newIntegerParallelIterator(input IntegerIterator) *integerParallelIterator {\n\titr := &integerParallelIterator{\n\t\tinput:   input,\n\t\tch:      make(chan integerPointError, 256),\n\t\tclosing: make(chan struct{}),\n\t}\n\titr.wg.Add(1)\n\tgo itr.monitor()\n\treturn itr\n}\n\n// Stats returns stats from the underlying iterator.\nfunc (itr *integerParallelIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the underlying iterators.\nfunc (itr *integerParallelIterator) Close() error {\n\titr.once.Do(func() { close(itr.closing) })\n\titr.wg.Wait()\n\treturn itr.input.Close()\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *integerParallelIterator) Next() (*IntegerPoint, error) {\n\tv, ok := <-itr.ch\n\tif !ok {\n\t\treturn nil, io.EOF\n\t}\n\treturn v.point, v.err\n}\n\n// monitor runs in a separate goroutine and actively pulls the next point.\nfunc (itr *integerParallelIterator) monitor() {\n\tdefer close(itr.ch)\n\tdefer itr.wg.Done()\n\n\tfor {\n\t\t// Read next point.\n\t\tp, err := itr.input.Next()\n\t\tif p != nil {\n\t\t\tp = p.Clone()\n\t\t}\n\n\t\tselect {\n\t\tcase <-itr.closing:\n\t\t\treturn\n\t\tcase itr.ch <- integerPointError{point: p, err: err}:\n\t\t}\n\t}\n}\n\ntype integerPointError struct {\n\tpoint *IntegerPoint\n\terr   error\n}\n\n// integerLimitIterator represents an iterator that limits points per group.\ntype integerLimitIterator struct {\n\tinput IntegerIterator\n\topt   IteratorOptions\n\tn     int\n\n\tprev struct {\n\t\tname string\n\t\ttags Tags\n\t}\n}\n\n// newIntegerLimitIterator returns a new instance of integerLimitIterator.\nfunc newIntegerLimitIterator(input IntegerIterator, opt IteratorOptions) *integerLimitIterator {\n\treturn &integerLimitIterator{\n\t\tinput: input,\n\t\topt:   opt,\n\t}\n}\n\n// Stats returns stats from the underlying iterator.\nfunc (itr *integerLimitIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the underlying iterators.\nfunc (itr *integerLimitIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next point from the iterator.\nfunc (itr *integerLimitIterator) Next() (*IntegerPoint, error) {\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif p == nil || err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Reset window and counter if a new window is encountered.\n\t\tif p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) {\n\t\t\titr.prev.name = p.Name\n\t\t\titr.prev.tags = p.Tags\n\t\t\titr.n = 0\n\t\t}\n\n\t\t// Increment counter.\n\t\titr.n++\n\n\t\t// Read next point if not beyond the offset.\n\t\tif itr.n <= itr.opt.Offset {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Read next point if we're beyond the limit.\n\t\tif itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn p, nil\n\t}\n}\n\ntype integerFillIterator struct {\n\tinput     *bufIntegerIterator\n\tprev      IntegerPoint\n\tstartTime int64\n\tendTime   int64\n\tauxFields []interface{}\n\tinit      bool\n\topt       IteratorOptions\n\n\twindow struct {\n\t\tname   string\n\t\ttags   Tags\n\t\ttime   int64\n\t\toffset int64\n\t}\n}\n\nfunc newIntegerFillIterator(input IntegerIterator, expr Expr, opt IteratorOptions) *integerFillIterator {\n\tif opt.Fill == NullFill {\n\t\tif expr, ok := expr.(*Call); ok && expr.Name == \"count\" {\n\t\t\topt.Fill = NumberFill\n\t\t\topt.FillValue = int64(0)\n\t\t}\n\t}\n\n\tvar startTime, endTime int64\n\tif opt.Ascending {\n\t\tstartTime, _ = opt.Window(opt.StartTime)\n\t\tendTime, _ = opt.Window(opt.EndTime)\n\t} else {\n\t\tstartTime, _ = opt.Window(opt.EndTime)\n\t\tendTime, _ = opt.Window(opt.StartTime)\n\t}\n\n\tvar auxFields []interface{}\n\tif len(opt.Aux) > 0 {\n\t\tauxFields = make([]interface{}, len(opt.Aux))\n\t}\n\n\treturn &integerFillIterator{\n\t\tinput:     newBufIntegerIterator(input),\n\t\tprev:      IntegerPoint{Nil: true},\n\t\tstartTime: startTime,\n\t\tendTime:   endTime,\n\t\tauxFields: auxFields,\n\t\topt:       opt,\n\t}\n}\n\nfunc (itr *integerFillIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *integerFillIterator) Close() error         { return itr.input.Close() }\n\nfunc (itr *integerFillIterator) Next() (*IntegerPoint, error) {\n\tif !itr.init {\n\t\tp, err := itr.input.peek()\n\t\tif p == nil || err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\titr.window.name, itr.window.tags = p.Name, p.Tags\n\t\titr.window.time = itr.startTime\n\t\tif itr.opt.Location != nil {\n\t\t\t_, itr.window.offset = itr.opt.Zone(itr.window.time)\n\t\t}\n\t\titr.init = true\n\t}\n\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check if the next point is outside of our window or is nil.\n\tfor p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() {\n\t\t// If we are inside of an interval, unread the point and continue below to\n\t\t// constructing a new point.\n\t\tif itr.opt.Ascending {\n\t\t\tif itr.window.time <= itr.endTime {\n\t\t\t\titr.input.unread(p)\n\t\t\t\tp = nil\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tif itr.window.time >= itr.endTime {\n\t\t\t\titr.input.unread(p)\n\t\t\t\tp = nil\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// We are *not* in a current interval. If there is no next point,\n\t\t// we are at the end of all intervals.\n\t\tif p == nil {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\t// Set the new interval.\n\t\titr.window.name, itr.window.tags = p.Name, p.Tags\n\t\titr.window.time = itr.startTime\n\t\tif itr.opt.Location != nil {\n\t\t\t_, itr.window.offset = itr.opt.Zone(itr.window.time)\n\t\t}\n\t\titr.prev = IntegerPoint{Nil: true}\n\t\tbreak\n\t}\n\n\t// Check if the point is our next expected point.\n\tif p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) {\n\t\tif p != nil {\n\t\t\titr.input.unread(p)\n\t\t}\n\n\t\tp = &IntegerPoint{\n\t\t\tName: itr.window.name,\n\t\t\tTags: itr.window.tags,\n\t\t\tTime: itr.window.time,\n\t\t\tAux:  itr.auxFields,\n\t\t}\n\n\t\tswitch itr.opt.Fill {\n\t\tcase LinearFill:\n\t\t\tif !itr.prev.Nil {\n\t\t\t\tnext, err := itr.input.peek()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t} else if next != nil && next.Name == itr.window.name && next.Tags.ID() == itr.window.tags.ID() {\n\t\t\t\t\tinterval := int64(itr.opt.Interval.Duration)\n\t\t\t\t\tstart := itr.window.time / interval\n\t\t\t\t\tp.Value = linearInteger(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value)\n\t\t\t\t} else {\n\t\t\t\t\tp.Nil = true\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tp.Nil = true\n\t\t\t}\n\n\t\tcase NullFill:\n\t\t\tp.Nil = true\n\t\tcase NumberFill:\n\t\t\tp.Value = castToInteger(itr.opt.FillValue)\n\t\tcase PreviousFill:\n\t\t\tif !itr.prev.Nil {\n\t\t\t\tp.Value = itr.prev.Value\n\t\t\t\tp.Nil = itr.prev.Nil\n\t\t\t} else {\n\t\t\t\tp.Nil = true\n\t\t\t}\n\t\t}\n\t} else {\n\t\titr.prev = *p\n\t}\n\n\t// Advance the expected time. Do not advance to a new window here\n\t// as there may be lingering points with the same timestamp in the previous\n\t// window.\n\tif itr.opt.Ascending {\n\t\titr.window.time += int64(itr.opt.Interval.Duration)\n\t} else {\n\t\titr.window.time -= int64(itr.opt.Interval.Duration)\n\t}\n\n\t// Check to see if we have passed over an offset change and adjust the time\n\t// to account for this new offset.\n\tif itr.opt.Location != nil {\n\t\tif _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset {\n\t\t\tdiff := itr.window.offset - offset\n\t\t\tif abs(diff) < int64(itr.opt.Interval.Duration) {\n\t\t\t\titr.window.time += diff\n\t\t\t}\n\t\t\titr.window.offset = offset\n\t\t}\n\t}\n\treturn p, nil\n}\n\n// integerIntervalIterator represents a integer implementation of IntervalIterator.\ntype integerIntervalIterator struct {\n\tinput IntegerIterator\n\topt   IteratorOptions\n}\n\nfunc newIntegerIntervalIterator(input IntegerIterator, opt IteratorOptions) *integerIntervalIterator {\n\treturn &integerIntervalIterator{input: input, opt: opt}\n}\n\nfunc (itr *integerIntervalIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *integerIntervalIterator) Close() error         { return itr.input.Close() }\n\nfunc (itr *integerIntervalIterator) Next() (*IntegerPoint, error) {\n\tp, err := itr.input.Next()\n\tif p == nil || err != nil {\n\t\treturn nil, err\n\t}\n\tp.Time, _ = itr.opt.Window(p.Time)\n\t// If we see the minimum allowable time, set the time to zero so we don't\n\t// break the default returned time for aggregate queries without times.\n\tif p.Time == MinTime {\n\t\tp.Time = 0\n\t}\n\treturn p, nil\n}\n\n// integerInterruptIterator represents a integer implementation of InterruptIterator.\ntype integerInterruptIterator struct {\n\tinput   IntegerIterator\n\tclosing <-chan struct{}\n\tcount   int\n}\n\nfunc newIntegerInterruptIterator(input IntegerIterator, closing <-chan struct{}) *integerInterruptIterator {\n\treturn &integerInterruptIterator{input: input, closing: closing}\n}\n\nfunc (itr *integerInterruptIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *integerInterruptIterator) Close() error         { return itr.input.Close() }\n\nfunc (itr *integerInterruptIterator) Next() (*IntegerPoint, error) {\n\t// Only check if the channel is closed every N points. This\n\t// intentionally checks on both 0 and N so that if the iterator\n\t// has been interrupted before the first point is emitted it will\n\t// not emit any points.\n\tif itr.count&0xFF == 0xFF {\n\t\tselect {\n\t\tcase <-itr.closing:\n\t\t\treturn nil, itr.Close()\n\t\tdefault:\n\t\t\t// Reset iterator count to zero and fall through to emit the next point.\n\t\t\titr.count = 0\n\t\t}\n\t}\n\n\t// Increment the counter for every point read.\n\titr.count++\n\treturn itr.input.Next()\n}\n\n// integerCloseInterruptIterator represents a integer implementation of CloseInterruptIterator.\ntype integerCloseInterruptIterator struct {\n\tinput   IntegerIterator\n\tclosing <-chan struct{}\n\tdone    chan struct{}\n\tonce    sync.Once\n}\n\nfunc newIntegerCloseInterruptIterator(input IntegerIterator, closing <-chan struct{}) *integerCloseInterruptIterator {\n\titr := &integerCloseInterruptIterator{\n\t\tinput:   input,\n\t\tclosing: closing,\n\t\tdone:    make(chan struct{}),\n\t}\n\tgo itr.monitor()\n\treturn itr\n}\n\nfunc (itr *integerCloseInterruptIterator) monitor() {\n\tselect {\n\tcase <-itr.closing:\n\t\titr.Close()\n\tcase <-itr.done:\n\t}\n}\n\nfunc (itr *integerCloseInterruptIterator) Stats() IteratorStats {\n\treturn itr.input.Stats()\n}\n\nfunc (itr *integerCloseInterruptIterator) Close() error {\n\titr.once.Do(func() {\n\t\tclose(itr.done)\n\t\titr.input.Close()\n\t})\n\treturn nil\n}\n\nfunc (itr *integerCloseInterruptIterator) Next() (*IntegerPoint, error) {\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\t// Check if the iterator was closed.\n\t\tselect {\n\t\tcase <-itr.done:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn p, nil\n}\n\n// auxIntegerPoint represents a combination of a point and an error for the AuxIterator.\ntype auxIntegerPoint struct {\n\tpoint *IntegerPoint\n\terr   error\n}\n\n// integerAuxIterator represents a integer implementation of AuxIterator.\ntype integerAuxIterator struct {\n\tinput      *bufIntegerIterator\n\toutput     chan auxIntegerPoint\n\tfields     *auxIteratorFields\n\tbackground bool\n}\n\nfunc newIntegerAuxIterator(input IntegerIterator, opt IteratorOptions) *integerAuxIterator {\n\treturn &integerAuxIterator{\n\t\tinput:  newBufIntegerIterator(input),\n\t\toutput: make(chan auxIntegerPoint, 1),\n\t\tfields: newAuxIteratorFields(opt),\n\t}\n}\n\nfunc (itr *integerAuxIterator) Background() {\n\titr.background = true\n\titr.Start()\n\tgo DrainIterator(itr)\n}\n\nfunc (itr *integerAuxIterator) Start()               { go itr.stream() }\nfunc (itr *integerAuxIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *integerAuxIterator) Close() error         { return itr.input.Close() }\nfunc (itr *integerAuxIterator) Next() (*IntegerPoint, error) {\n\tp := <-itr.output\n\treturn p.point, p.err\n}\nfunc (itr *integerAuxIterator) Iterator(name string, typ DataType) Iterator {\n\treturn itr.fields.iterator(name, typ)\n}\n\nfunc (itr *integerAuxIterator) stream() {\n\tfor {\n\t\t// Read next point.\n\t\tp, err := itr.input.Next()\n\t\tif err != nil {\n\t\t\titr.output <- auxIntegerPoint{err: err}\n\t\t\titr.fields.sendError(err)\n\t\t\tbreak\n\t\t} else if p == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Send point to output and to each field iterator.\n\t\titr.output <- auxIntegerPoint{point: p}\n\t\tif ok := itr.fields.send(p); !ok && itr.background {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tclose(itr.output)\n\titr.fields.close()\n}\n\n// integerChanIterator represents a new instance of integerChanIterator.\ntype integerChanIterator struct {\n\tbuf struct {\n\t\ti      int\n\t\tfilled bool\n\t\tpoints [2]IntegerPoint\n\t}\n\terr  error\n\tcond *sync.Cond\n\tdone bool\n}\n\nfunc (itr *integerChanIterator) Stats() IteratorStats { return IteratorStats{} }\n\nfunc (itr *integerChanIterator) Close() error {\n\titr.cond.L.Lock()\n\t// Mark the channel iterator as done and signal all waiting goroutines to start again.\n\titr.done = true\n\titr.cond.Broadcast()\n\t// Do not defer the unlock so we don't create an unnecessary allocation.\n\titr.cond.L.Unlock()\n\treturn nil\n}\n\nfunc (itr *integerChanIterator) setBuf(name string, tags Tags, time int64, value interface{}) bool {\n\titr.cond.L.Lock()\n\tdefer itr.cond.L.Unlock()\n\n\t// Wait for either the iterator to be done (so we don't have to set the value)\n\t// or for the buffer to have been read and ready for another write.\n\tfor !itr.done && itr.buf.filled {\n\t\titr.cond.Wait()\n\t}\n\n\t// Do not set the value and return false to signal that the iterator is closed.\n\t// Do this after the above wait as the above for loop may have exited because\n\t// the iterator was closed.\n\tif itr.done {\n\t\treturn false\n\t}\n\n\tswitch v := value.(type) {\n\tcase int64:\n\t\titr.buf.points[itr.buf.i] = IntegerPoint{Name: name, Tags: tags, Time: time, Value: v}\n\n\tdefault:\n\t\titr.buf.points[itr.buf.i] = IntegerPoint{Name: name, Tags: tags, Time: time, Nil: true}\n\t}\n\titr.buf.filled = true\n\n\t// Signal to all waiting goroutines that a new value is ready to read.\n\titr.cond.Signal()\n\treturn true\n}\n\nfunc (itr *integerChanIterator) setErr(err error) {\n\titr.cond.L.Lock()\n\tdefer itr.cond.L.Unlock()\n\titr.err = err\n\n\t// Signal to all waiting goroutines that a new value is ready to read.\n\titr.cond.Signal()\n}\n\nfunc (itr *integerChanIterator) Next() (*IntegerPoint, error) {\n\titr.cond.L.Lock()\n\tdefer itr.cond.L.Unlock()\n\n\t// Check for an error and return one if there.\n\tif itr.err != nil {\n\t\treturn nil, itr.err\n\t}\n\n\t// Wait until either a value is available in the buffer or\n\t// the iterator is closed.\n\tfor !itr.done && !itr.buf.filled {\n\t\titr.cond.Wait()\n\t}\n\n\t// Return nil once the channel is done and the buffer is empty.\n\tif itr.done && !itr.buf.filled {\n\t\treturn nil, nil\n\t}\n\n\t// Always read from the buffer if it exists, even if the iterator\n\t// is closed. This prevents the last value from being truncated by\n\t// the parent iterator.\n\tp := &itr.buf.points[itr.buf.i]\n\titr.buf.i = (itr.buf.i + 1) % len(itr.buf.points)\n\titr.buf.filled = false\n\titr.cond.Signal()\n\treturn p, nil\n}\n\n// integerReduceFloatIterator executes a reducer for every interval and buffers the result.\ntype integerReduceFloatIterator struct {\n\tinput    *bufIntegerIterator\n\tcreate   func() (IntegerPointAggregator, FloatPointEmitter)\n\tdims     []string\n\topt      IteratorOptions\n\tpoints   []FloatPoint\n\tkeepTags bool\n}\n\nfunc newIntegerReduceFloatIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, FloatPointEmitter)) *integerReduceFloatIterator {\n\treturn &integerReduceFloatIterator{\n\t\tinput:  newBufIntegerIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *integerReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *integerReduceFloatIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *integerReduceFloatIterator) Next() (*FloatPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// integerReduceFloatPoint stores the reduced data for a name/tag combination.\ntype integerReduceFloatPoint struct {\n\tName       string\n\tTags       Tags\n\tAggregator IntegerPointAggregator\n\tEmitter    FloatPointEmitter\n}\n\n// reduce executes fn once for every point in the next window.\n// The previous value for the dimension is passed to fn.\nfunc (itr *integerReduceFloatIterator) reduce() ([]FloatPoint, error) {\n\t// Calculate next window.\n\tvar (\n\t\tstartTime, endTime int64\n\t\twindow             struct {\n\t\t\tname string\n\t\t\ttags string\n\t\t}\n\t)\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t} else if p.Nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Unread the point so it can be processed.\n\t\titr.input.unread(p)\n\t\tstartTime, endTime = itr.opt.Window(p.Time)\n\t\twindow.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()\n\t\tbreak\n\t}\n\n\t// Create points by tags.\n\tm := make(map[string]*integerReduceFloatPoint)\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.NextInWindow(startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr == nil {\n\t\t\tbreak\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t} else if curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Ensure this point is within the same final window.\n\t\tif curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Retrieve the tags on this point for this level of the query.\n\t\t// This may be different than the bucket dimensions.\n\t\ttags := curr.Tags.Subset(itr.dims)\n\t\tid := tags.ID()\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &integerReduceFloatPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\tm[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateInteger(curr)\n\t}\n\n\t// Reverse sort points by name & tag if our output is supposed to be ordered.\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tif len(keys) > 1 && itr.opt.Ordered {\n\t\tsort.Sort(reverseStringSlice(keys))\n\t}\n\n\t// Assume the points are already sorted until proven otherwise.\n\tsortedByTime := true\n\t// Emit the points for each name & tag combination.\n\ta := make([]FloatPoint, 0, len(m))\n\tfor _, k := range keys {\n\t\trp := m[k]\n\t\tpoints := rp.Emitter.Emit()\n\t\tfor i := len(points) - 1; i >= 0; i-- {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tif !itr.keepTags {\n\t\t\t\tpoints[i].Tags = rp.Tags\n\t\t\t}\n\t\t\t// Set the points time to the interval time if the reducer didn't provide one.\n\t\t\tif points[i].Time == ZeroTime {\n\t\t\t\tpoints[i].Time = startTime\n\t\t\t} else {\n\t\t\t\tsortedByTime = false\n\t\t\t}\n\t\t\ta = append(a, points[i])\n\t\t}\n\t}\n\n\t// Points may be out of order. Perform a stable sort by time if requested.\n\tif !sortedByTime && itr.opt.Ordered {\n\t\tsort.Stable(sort.Reverse(floatPointsByTime(a)))\n\t}\n\n\treturn a, nil\n}\n\n// integerStreamFloatIterator streams inputs into the iterator and emits points gradually.\ntype integerStreamFloatIterator struct {\n\tinput  *bufIntegerIterator\n\tcreate func() (IntegerPointAggregator, FloatPointEmitter)\n\tdims   []string\n\topt    IteratorOptions\n\tm      map[string]*integerReduceFloatPoint\n\tpoints []FloatPoint\n}\n\n// newIntegerStreamFloatIterator returns a new instance of integerStreamFloatIterator.\nfunc newIntegerStreamFloatIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, FloatPointEmitter), opt IteratorOptions) *integerStreamFloatIterator {\n\treturn &integerStreamFloatIterator{\n\t\tinput:  newBufIntegerIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t\tm:      make(map[string]*integerReduceFloatPoint),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *integerStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *integerStreamFloatIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next value for the stream iterator.\nfunc (itr *integerStreamFloatIterator) Next() (*FloatPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// reduce creates and manages aggregators for every point from the input.\n// After aggregating a point, it always tries to emit a value using the emitter.\nfunc (itr *integerStreamFloatIterator) reduce() ([]FloatPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.Next()\n\t\tif curr == nil {\n\t\t\t// Close all of the aggregators to flush any remaining points to emit.\n\t\t\tvar points []FloatPoint\n\t\t\tfor _, rp := range itr.m {\n\t\t\t\tif aggregator, ok := rp.Aggregator.(io.Closer); ok {\n\t\t\t\t\tif err := aggregator.Close(); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\tpts := rp.Emitter.Emit()\n\t\t\t\t\tif len(pts) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor i := range pts {\n\t\t\t\t\t\tpts[i].Name = rp.Name\n\t\t\t\t\t\tpts[i].Tags = rp.Tags\n\t\t\t\t\t}\n\t\t\t\t\tpoints = append(points, pts...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Eliminate the aggregators and emitters.\n\t\t\titr.m = nil\n\t\t\treturn points, nil\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t}\n\t\ttags := curr.Tags.Subset(itr.dims)\n\n\t\tid := curr.Name\n\t\tif len(tags.m) > 0 {\n\t\t\tid += \"\\x00\" + tags.ID()\n\t\t}\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := itr.m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &integerReduceFloatPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\titr.m[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateInteger(curr)\n\n\t\t// Attempt to emit points from the aggregator.\n\t\tpoints := rp.Emitter.Emit()\n\t\tif len(points) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := range points {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tpoints[i].Tags = rp.Tags\n\t\t}\n\t\treturn points, nil\n\t}\n}\n\n// integerFloatExprIterator executes a function to modify an existing point\n// for every output of the input iterator.\ntype integerFloatExprIterator struct {\n\tleft      *bufIntegerIterator\n\tright     *bufIntegerIterator\n\tfn        integerFloatExprFunc\n\tpoints    []IntegerPoint // must be size 2\n\tstorePrev bool\n}\n\nfunc newIntegerFloatExprIterator(left, right IntegerIterator, opt IteratorOptions, fn func(a, b int64) float64) *integerFloatExprIterator {\n\tvar points []IntegerPoint\n\tswitch opt.Fill {\n\tcase NullFill, PreviousFill:\n\t\tpoints = []IntegerPoint{{Nil: true}, {Nil: true}}\n\tcase NumberFill:\n\t\tvalue := castToInteger(opt.FillValue)\n\t\tpoints = []IntegerPoint{{Value: value}, {Value: value}}\n\t}\n\treturn &integerFloatExprIterator{\n\t\tleft:      newBufIntegerIterator(left),\n\t\tright:     newBufIntegerIterator(right),\n\t\tpoints:    points,\n\t\tfn:        fn,\n\t\tstorePrev: opt.Fill == PreviousFill,\n\t}\n}\n\nfunc (itr *integerFloatExprIterator) Stats() IteratorStats {\n\tstats := itr.left.Stats()\n\tstats.Add(itr.right.Stats())\n\treturn stats\n}\n\nfunc (itr *integerFloatExprIterator) Close() error {\n\titr.left.Close()\n\titr.right.Close()\n\treturn nil\n}\n\nfunc (itr *integerFloatExprIterator) Next() (*FloatPoint, error) {\n\tfor {\n\t\ta, b, err := itr.next()\n\t\tif err != nil || (a == nil && b == nil) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If any of these are nil and we are using fill(none), skip these points.\n\t\tif (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If one of the two points is nil, we need to fill it with a fake nil\n\t\t// point that has the same name, tags, and time as the other point.\n\t\t// There should never be a time when both of these are nil.\n\t\tif a == nil {\n\t\t\tp := *b\n\t\t\ta = &p\n\t\t\ta.Value = 0\n\t\t\ta.Nil = true\n\t\t} else if b == nil {\n\t\t\tp := *a\n\t\t\tb = &p\n\t\t\tb.Value = 0\n\t\t\tb.Nil = true\n\t\t}\n\n\t\t// If a value is nil, use the fill values if the fill value is non-nil.\n\t\tif a.Nil && !itr.points[0].Nil {\n\t\t\ta.Value = itr.points[0].Value\n\t\t\ta.Nil = false\n\t\t}\n\t\tif b.Nil && !itr.points[1].Nil {\n\t\t\tb.Value = itr.points[1].Value\n\t\t\tb.Nil = false\n\t\t}\n\n\t\tif itr.storePrev {\n\t\t\titr.points[0], itr.points[1] = *a, *b\n\t\t}\n\n\t\tp := &FloatPoint{\n\t\t\tName:       a.Name,\n\t\t\tTags:       a.Tags,\n\t\t\tTime:       a.Time,\n\t\t\tNil:        a.Nil || b.Nil,\n\t\t\tAggregated: a.Aggregated,\n\t\t}\n\t\tif !p.Nil {\n\t\t\tp.Value = itr.fn(a.Value, b.Value)\n\t\t}\n\t\treturn p, nil\n\n\t}\n}\n\n// next returns the next points within each iterator. If the iterators are\n// uneven, it organizes them so only matching points are returned.\nfunc (itr *integerFloatExprIterator) next() (a, b *IntegerPoint, err error) {\n\t// Retrieve the next value for both the left and right.\n\ta, err = itr.left.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tb, err = itr.right.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// If we have a point from both, make sure that they match each other.\n\tif a != nil && b != nil {\n\t\tif a.Name > b.Name {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Name < b.Name {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if ltags < rtags {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif a.Time > b.Time {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Time < b.Time {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\t}\n\treturn a, b, nil\n}\n\n// integerFloatExprFunc creates or modifies a point by combining two\n// points. The point passed in may be modified and returned rather than\n// allocating a new point if possible. One of the points may be nil, but at\n// least one of the points will be non-nil.\ntype integerFloatExprFunc func(a, b int64) float64\n\n// integerReduceIntegerIterator executes a reducer for every interval and buffers the result.\ntype integerReduceIntegerIterator struct {\n\tinput    *bufIntegerIterator\n\tcreate   func() (IntegerPointAggregator, IntegerPointEmitter)\n\tdims     []string\n\topt      IteratorOptions\n\tpoints   []IntegerPoint\n\tkeepTags bool\n}\n\nfunc newIntegerReduceIntegerIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, IntegerPointEmitter)) *integerReduceIntegerIterator {\n\treturn &integerReduceIntegerIterator{\n\t\tinput:  newBufIntegerIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *integerReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *integerReduceIntegerIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *integerReduceIntegerIterator) Next() (*IntegerPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// integerReduceIntegerPoint stores the reduced data for a name/tag combination.\ntype integerReduceIntegerPoint struct {\n\tName       string\n\tTags       Tags\n\tAggregator IntegerPointAggregator\n\tEmitter    IntegerPointEmitter\n}\n\n// reduce executes fn once for every point in the next window.\n// The previous value for the dimension is passed to fn.\nfunc (itr *integerReduceIntegerIterator) reduce() ([]IntegerPoint, error) {\n\t// Calculate next window.\n\tvar (\n\t\tstartTime, endTime int64\n\t\twindow             struct {\n\t\t\tname string\n\t\t\ttags string\n\t\t}\n\t)\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t} else if p.Nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Unread the point so it can be processed.\n\t\titr.input.unread(p)\n\t\tstartTime, endTime = itr.opt.Window(p.Time)\n\t\twindow.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()\n\t\tbreak\n\t}\n\n\t// Create points by tags.\n\tm := make(map[string]*integerReduceIntegerPoint)\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.NextInWindow(startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr == nil {\n\t\t\tbreak\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t} else if curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Ensure this point is within the same final window.\n\t\tif curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Retrieve the tags on this point for this level of the query.\n\t\t// This may be different than the bucket dimensions.\n\t\ttags := curr.Tags.Subset(itr.dims)\n\t\tid := tags.ID()\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &integerReduceIntegerPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\tm[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateInteger(curr)\n\t}\n\n\t// Reverse sort points by name & tag if our output is supposed to be ordered.\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tif len(keys) > 1 && itr.opt.Ordered {\n\t\tsort.Sort(reverseStringSlice(keys))\n\t}\n\n\t// Assume the points are already sorted until proven otherwise.\n\tsortedByTime := true\n\t// Emit the points for each name & tag combination.\n\ta := make([]IntegerPoint, 0, len(m))\n\tfor _, k := range keys {\n\t\trp := m[k]\n\t\tpoints := rp.Emitter.Emit()\n\t\tfor i := len(points) - 1; i >= 0; i-- {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tif !itr.keepTags {\n\t\t\t\tpoints[i].Tags = rp.Tags\n\t\t\t}\n\t\t\t// Set the points time to the interval time if the reducer didn't provide one.\n\t\t\tif points[i].Time == ZeroTime {\n\t\t\t\tpoints[i].Time = startTime\n\t\t\t} else {\n\t\t\t\tsortedByTime = false\n\t\t\t}\n\t\t\ta = append(a, points[i])\n\t\t}\n\t}\n\n\t// Points may be out of order. Perform a stable sort by time if requested.\n\tif !sortedByTime && itr.opt.Ordered {\n\t\tsort.Stable(sort.Reverse(integerPointsByTime(a)))\n\t}\n\n\treturn a, nil\n}\n\n// integerStreamIntegerIterator streams inputs into the iterator and emits points gradually.\ntype integerStreamIntegerIterator struct {\n\tinput  *bufIntegerIterator\n\tcreate func() (IntegerPointAggregator, IntegerPointEmitter)\n\tdims   []string\n\topt    IteratorOptions\n\tm      map[string]*integerReduceIntegerPoint\n\tpoints []IntegerPoint\n}\n\n// newIntegerStreamIntegerIterator returns a new instance of integerStreamIntegerIterator.\nfunc newIntegerStreamIntegerIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, IntegerPointEmitter), opt IteratorOptions) *integerStreamIntegerIterator {\n\treturn &integerStreamIntegerIterator{\n\t\tinput:  newBufIntegerIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t\tm:      make(map[string]*integerReduceIntegerPoint),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *integerStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *integerStreamIntegerIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next value for the stream iterator.\nfunc (itr *integerStreamIntegerIterator) Next() (*IntegerPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// reduce creates and manages aggregators for every point from the input.\n// After aggregating a point, it always tries to emit a value using the emitter.\nfunc (itr *integerStreamIntegerIterator) reduce() ([]IntegerPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.Next()\n\t\tif curr == nil {\n\t\t\t// Close all of the aggregators to flush any remaining points to emit.\n\t\t\tvar points []IntegerPoint\n\t\t\tfor _, rp := range itr.m {\n\t\t\t\tif aggregator, ok := rp.Aggregator.(io.Closer); ok {\n\t\t\t\t\tif err := aggregator.Close(); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\tpts := rp.Emitter.Emit()\n\t\t\t\t\tif len(pts) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor i := range pts {\n\t\t\t\t\t\tpts[i].Name = rp.Name\n\t\t\t\t\t\tpts[i].Tags = rp.Tags\n\t\t\t\t\t}\n\t\t\t\t\tpoints = append(points, pts...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Eliminate the aggregators and emitters.\n\t\t\titr.m = nil\n\t\t\treturn points, nil\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t}\n\t\ttags := curr.Tags.Subset(itr.dims)\n\n\t\tid := curr.Name\n\t\tif len(tags.m) > 0 {\n\t\t\tid += \"\\x00\" + tags.ID()\n\t\t}\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := itr.m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &integerReduceIntegerPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\titr.m[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateInteger(curr)\n\n\t\t// Attempt to emit points from the aggregator.\n\t\tpoints := rp.Emitter.Emit()\n\t\tif len(points) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := range points {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tpoints[i].Tags = rp.Tags\n\t\t}\n\t\treturn points, nil\n\t}\n}\n\n// integerExprIterator executes a function to modify an existing point\n// for every output of the input iterator.\ntype integerExprIterator struct {\n\tleft      *bufIntegerIterator\n\tright     *bufIntegerIterator\n\tfn        integerExprFunc\n\tpoints    []IntegerPoint // must be size 2\n\tstorePrev bool\n}\n\nfunc newIntegerExprIterator(left, right IntegerIterator, opt IteratorOptions, fn func(a, b int64) int64) *integerExprIterator {\n\tvar points []IntegerPoint\n\tswitch opt.Fill {\n\tcase NullFill, PreviousFill:\n\t\tpoints = []IntegerPoint{{Nil: true}, {Nil: true}}\n\tcase NumberFill:\n\t\tvalue := castToInteger(opt.FillValue)\n\t\tpoints = []IntegerPoint{{Value: value}, {Value: value}}\n\t}\n\treturn &integerExprIterator{\n\t\tleft:      newBufIntegerIterator(left),\n\t\tright:     newBufIntegerIterator(right),\n\t\tpoints:    points,\n\t\tfn:        fn,\n\t\tstorePrev: opt.Fill == PreviousFill,\n\t}\n}\n\nfunc (itr *integerExprIterator) Stats() IteratorStats {\n\tstats := itr.left.Stats()\n\tstats.Add(itr.right.Stats())\n\treturn stats\n}\n\nfunc (itr *integerExprIterator) Close() error {\n\titr.left.Close()\n\titr.right.Close()\n\treturn nil\n}\n\nfunc (itr *integerExprIterator) Next() (*IntegerPoint, error) {\n\tfor {\n\t\ta, b, err := itr.next()\n\t\tif err != nil || (a == nil && b == nil) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If any of these are nil and we are using fill(none), skip these points.\n\t\tif (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If one of the two points is nil, we need to fill it with a fake nil\n\t\t// point that has the same name, tags, and time as the other point.\n\t\t// There should never be a time when both of these are nil.\n\t\tif a == nil {\n\t\t\tp := *b\n\t\t\ta = &p\n\t\t\ta.Value = 0\n\t\t\ta.Nil = true\n\t\t} else if b == nil {\n\t\t\tp := *a\n\t\t\tb = &p\n\t\t\tb.Value = 0\n\t\t\tb.Nil = true\n\t\t}\n\n\t\t// If a value is nil, use the fill values if the fill value is non-nil.\n\t\tif a.Nil && !itr.points[0].Nil {\n\t\t\ta.Value = itr.points[0].Value\n\t\t\ta.Nil = false\n\t\t}\n\t\tif b.Nil && !itr.points[1].Nil {\n\t\t\tb.Value = itr.points[1].Value\n\t\t\tb.Nil = false\n\t\t}\n\n\t\tif itr.storePrev {\n\t\t\titr.points[0], itr.points[1] = *a, *b\n\t\t}\n\n\t\tif a.Nil {\n\t\t\treturn a, nil\n\t\t} else if b.Nil {\n\t\t\treturn b, nil\n\t\t}\n\t\ta.Value = itr.fn(a.Value, b.Value)\n\t\treturn a, nil\n\n\t}\n}\n\n// next returns the next points within each iterator. If the iterators are\n// uneven, it organizes them so only matching points are returned.\nfunc (itr *integerExprIterator) next() (a, b *IntegerPoint, err error) {\n\t// Retrieve the next value for both the left and right.\n\ta, err = itr.left.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tb, err = itr.right.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// If we have a point from both, make sure that they match each other.\n\tif a != nil && b != nil {\n\t\tif a.Name > b.Name {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Name < b.Name {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if ltags < rtags {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif a.Time > b.Time {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Time < b.Time {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\t}\n\treturn a, b, nil\n}\n\n// integerExprFunc creates or modifies a point by combining two\n// points. The point passed in may be modified and returned rather than\n// allocating a new point if possible. One of the points may be nil, but at\n// least one of the points will be non-nil.\ntype integerExprFunc func(a, b int64) int64\n\n// integerReduceStringIterator executes a reducer for every interval and buffers the result.\ntype integerReduceStringIterator struct {\n\tinput    *bufIntegerIterator\n\tcreate   func() (IntegerPointAggregator, StringPointEmitter)\n\tdims     []string\n\topt      IteratorOptions\n\tpoints   []StringPoint\n\tkeepTags bool\n}\n\nfunc newIntegerReduceStringIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, StringPointEmitter)) *integerReduceStringIterator {\n\treturn &integerReduceStringIterator{\n\t\tinput:  newBufIntegerIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *integerReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *integerReduceStringIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *integerReduceStringIterator) Next() (*StringPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// integerReduceStringPoint stores the reduced data for a name/tag combination.\ntype integerReduceStringPoint struct {\n\tName       string\n\tTags       Tags\n\tAggregator IntegerPointAggregator\n\tEmitter    StringPointEmitter\n}\n\n// reduce executes fn once for every point in the next window.\n// The previous value for the dimension is passed to fn.\nfunc (itr *integerReduceStringIterator) reduce() ([]StringPoint, error) {\n\t// Calculate next window.\n\tvar (\n\t\tstartTime, endTime int64\n\t\twindow             struct {\n\t\t\tname string\n\t\t\ttags string\n\t\t}\n\t)\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t} else if p.Nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Unread the point so it can be processed.\n\t\titr.input.unread(p)\n\t\tstartTime, endTime = itr.opt.Window(p.Time)\n\t\twindow.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()\n\t\tbreak\n\t}\n\n\t// Create points by tags.\n\tm := make(map[string]*integerReduceStringPoint)\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.NextInWindow(startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr == nil {\n\t\t\tbreak\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t} else if curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Ensure this point is within the same final window.\n\t\tif curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Retrieve the tags on this point for this level of the query.\n\t\t// This may be different than the bucket dimensions.\n\t\ttags := curr.Tags.Subset(itr.dims)\n\t\tid := tags.ID()\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &integerReduceStringPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\tm[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateInteger(curr)\n\t}\n\n\t// Reverse sort points by name & tag if our output is supposed to be ordered.\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tif len(keys) > 1 && itr.opt.Ordered {\n\t\tsort.Sort(reverseStringSlice(keys))\n\t}\n\n\t// Assume the points are already sorted until proven otherwise.\n\tsortedByTime := true\n\t// Emit the points for each name & tag combination.\n\ta := make([]StringPoint, 0, len(m))\n\tfor _, k := range keys {\n\t\trp := m[k]\n\t\tpoints := rp.Emitter.Emit()\n\t\tfor i := len(points) - 1; i >= 0; i-- {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tif !itr.keepTags {\n\t\t\t\tpoints[i].Tags = rp.Tags\n\t\t\t}\n\t\t\t// Set the points time to the interval time if the reducer didn't provide one.\n\t\t\tif points[i].Time == ZeroTime {\n\t\t\t\tpoints[i].Time = startTime\n\t\t\t} else {\n\t\t\t\tsortedByTime = false\n\t\t\t}\n\t\t\ta = append(a, points[i])\n\t\t}\n\t}\n\n\t// Points may be out of order. Perform a stable sort by time if requested.\n\tif !sortedByTime && itr.opt.Ordered {\n\t\tsort.Stable(sort.Reverse(stringPointsByTime(a)))\n\t}\n\n\treturn a, nil\n}\n\n// integerStreamStringIterator streams inputs into the iterator and emits points gradually.\ntype integerStreamStringIterator struct {\n\tinput  *bufIntegerIterator\n\tcreate func() (IntegerPointAggregator, StringPointEmitter)\n\tdims   []string\n\topt    IteratorOptions\n\tm      map[string]*integerReduceStringPoint\n\tpoints []StringPoint\n}\n\n// newIntegerStreamStringIterator returns a new instance of integerStreamStringIterator.\nfunc newIntegerStreamStringIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, StringPointEmitter), opt IteratorOptions) *integerStreamStringIterator {\n\treturn &integerStreamStringIterator{\n\t\tinput:  newBufIntegerIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t\tm:      make(map[string]*integerReduceStringPoint),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *integerStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *integerStreamStringIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next value for the stream iterator.\nfunc (itr *integerStreamStringIterator) Next() (*StringPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// reduce creates and manages aggregators for every point from the input.\n// After aggregating a point, it always tries to emit a value using the emitter.\nfunc (itr *integerStreamStringIterator) reduce() ([]StringPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.Next()\n\t\tif curr == nil {\n\t\t\t// Close all of the aggregators to flush any remaining points to emit.\n\t\t\tvar points []StringPoint\n\t\t\tfor _, rp := range itr.m {\n\t\t\t\tif aggregator, ok := rp.Aggregator.(io.Closer); ok {\n\t\t\t\t\tif err := aggregator.Close(); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\tpts := rp.Emitter.Emit()\n\t\t\t\t\tif len(pts) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor i := range pts {\n\t\t\t\t\t\tpts[i].Name = rp.Name\n\t\t\t\t\t\tpts[i].Tags = rp.Tags\n\t\t\t\t\t}\n\t\t\t\t\tpoints = append(points, pts...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Eliminate the aggregators and emitters.\n\t\t\titr.m = nil\n\t\t\treturn points, nil\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t}\n\t\ttags := curr.Tags.Subset(itr.dims)\n\n\t\tid := curr.Name\n\t\tif len(tags.m) > 0 {\n\t\t\tid += \"\\x00\" + tags.ID()\n\t\t}\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := itr.m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &integerReduceStringPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\titr.m[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateInteger(curr)\n\n\t\t// Attempt to emit points from the aggregator.\n\t\tpoints := rp.Emitter.Emit()\n\t\tif len(points) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := range points {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tpoints[i].Tags = rp.Tags\n\t\t}\n\t\treturn points, nil\n\t}\n}\n\n// integerStringExprIterator executes a function to modify an existing point\n// for every output of the input iterator.\ntype integerStringExprIterator struct {\n\tleft      *bufIntegerIterator\n\tright     *bufIntegerIterator\n\tfn        integerStringExprFunc\n\tpoints    []IntegerPoint // must be size 2\n\tstorePrev bool\n}\n\nfunc newIntegerStringExprIterator(left, right IntegerIterator, opt IteratorOptions, fn func(a, b int64) string) *integerStringExprIterator {\n\tvar points []IntegerPoint\n\tswitch opt.Fill {\n\tcase NullFill, PreviousFill:\n\t\tpoints = []IntegerPoint{{Nil: true}, {Nil: true}}\n\tcase NumberFill:\n\t\tvalue := castToInteger(opt.FillValue)\n\t\tpoints = []IntegerPoint{{Value: value}, {Value: value}}\n\t}\n\treturn &integerStringExprIterator{\n\t\tleft:      newBufIntegerIterator(left),\n\t\tright:     newBufIntegerIterator(right),\n\t\tpoints:    points,\n\t\tfn:        fn,\n\t\tstorePrev: opt.Fill == PreviousFill,\n\t}\n}\n\nfunc (itr *integerStringExprIterator) Stats() IteratorStats {\n\tstats := itr.left.Stats()\n\tstats.Add(itr.right.Stats())\n\treturn stats\n}\n\nfunc (itr *integerStringExprIterator) Close() error {\n\titr.left.Close()\n\titr.right.Close()\n\treturn nil\n}\n\nfunc (itr *integerStringExprIterator) Next() (*StringPoint, error) {\n\tfor {\n\t\ta, b, err := itr.next()\n\t\tif err != nil || (a == nil && b == nil) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If any of these are nil and we are using fill(none), skip these points.\n\t\tif (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If one of the two points is nil, we need to fill it with a fake nil\n\t\t// point that has the same name, tags, and time as the other point.\n\t\t// There should never be a time when both of these are nil.\n\t\tif a == nil {\n\t\t\tp := *b\n\t\t\ta = &p\n\t\t\ta.Value = 0\n\t\t\ta.Nil = true\n\t\t} else if b == nil {\n\t\t\tp := *a\n\t\t\tb = &p\n\t\t\tb.Value = 0\n\t\t\tb.Nil = true\n\t\t}\n\n\t\t// If a value is nil, use the fill values if the fill value is non-nil.\n\t\tif a.Nil && !itr.points[0].Nil {\n\t\t\ta.Value = itr.points[0].Value\n\t\t\ta.Nil = false\n\t\t}\n\t\tif b.Nil && !itr.points[1].Nil {\n\t\t\tb.Value = itr.points[1].Value\n\t\t\tb.Nil = false\n\t\t}\n\n\t\tif itr.storePrev {\n\t\t\titr.points[0], itr.points[1] = *a, *b\n\t\t}\n\n\t\tp := &StringPoint{\n\t\t\tName:       a.Name,\n\t\t\tTags:       a.Tags,\n\t\t\tTime:       a.Time,\n\t\t\tNil:        a.Nil || b.Nil,\n\t\t\tAggregated: a.Aggregated,\n\t\t}\n\t\tif !p.Nil {\n\t\t\tp.Value = itr.fn(a.Value, b.Value)\n\t\t}\n\t\treturn p, nil\n\n\t}\n}\n\n// next returns the next points within each iterator. If the iterators are\n// uneven, it organizes them so only matching points are returned.\nfunc (itr *integerStringExprIterator) next() (a, b *IntegerPoint, err error) {\n\t// Retrieve the next value for both the left and right.\n\ta, err = itr.left.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tb, err = itr.right.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// If we have a point from both, make sure that they match each other.\n\tif a != nil && b != nil {\n\t\tif a.Name > b.Name {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Name < b.Name {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if ltags < rtags {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif a.Time > b.Time {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Time < b.Time {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\t}\n\treturn a, b, nil\n}\n\n// integerStringExprFunc creates or modifies a point by combining two\n// points. The point passed in may be modified and returned rather than\n// allocating a new point if possible. One of the points may be nil, but at\n// least one of the points will be non-nil.\ntype integerStringExprFunc func(a, b int64) string\n\n// integerReduceBooleanIterator executes a reducer for every interval and buffers the result.\ntype integerReduceBooleanIterator struct {\n\tinput    *bufIntegerIterator\n\tcreate   func() (IntegerPointAggregator, BooleanPointEmitter)\n\tdims     []string\n\topt      IteratorOptions\n\tpoints   []BooleanPoint\n\tkeepTags bool\n}\n\nfunc newIntegerReduceBooleanIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, BooleanPointEmitter)) *integerReduceBooleanIterator {\n\treturn &integerReduceBooleanIterator{\n\t\tinput:  newBufIntegerIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *integerReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *integerReduceBooleanIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *integerReduceBooleanIterator) Next() (*BooleanPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// integerReduceBooleanPoint stores the reduced data for a name/tag combination.\ntype integerReduceBooleanPoint struct {\n\tName       string\n\tTags       Tags\n\tAggregator IntegerPointAggregator\n\tEmitter    BooleanPointEmitter\n}\n\n// reduce executes fn once for every point in the next window.\n// The previous value for the dimension is passed to fn.\nfunc (itr *integerReduceBooleanIterator) reduce() ([]BooleanPoint, error) {\n\t// Calculate next window.\n\tvar (\n\t\tstartTime, endTime int64\n\t\twindow             struct {\n\t\t\tname string\n\t\t\ttags string\n\t\t}\n\t)\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t} else if p.Nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Unread the point so it can be processed.\n\t\titr.input.unread(p)\n\t\tstartTime, endTime = itr.opt.Window(p.Time)\n\t\twindow.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()\n\t\tbreak\n\t}\n\n\t// Create points by tags.\n\tm := make(map[string]*integerReduceBooleanPoint)\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.NextInWindow(startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr == nil {\n\t\t\tbreak\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t} else if curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Ensure this point is within the same final window.\n\t\tif curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Retrieve the tags on this point for this level of the query.\n\t\t// This may be different than the bucket dimensions.\n\t\ttags := curr.Tags.Subset(itr.dims)\n\t\tid := tags.ID()\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &integerReduceBooleanPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\tm[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateInteger(curr)\n\t}\n\n\t// Reverse sort points by name & tag if our output is supposed to be ordered.\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tif len(keys) > 1 && itr.opt.Ordered {\n\t\tsort.Sort(reverseStringSlice(keys))\n\t}\n\n\t// Assume the points are already sorted until proven otherwise.\n\tsortedByTime := true\n\t// Emit the points for each name & tag combination.\n\ta := make([]BooleanPoint, 0, len(m))\n\tfor _, k := range keys {\n\t\trp := m[k]\n\t\tpoints := rp.Emitter.Emit()\n\t\tfor i := len(points) - 1; i >= 0; i-- {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tif !itr.keepTags {\n\t\t\t\tpoints[i].Tags = rp.Tags\n\t\t\t}\n\t\t\t// Set the points time to the interval time if the reducer didn't provide one.\n\t\t\tif points[i].Time == ZeroTime {\n\t\t\t\tpoints[i].Time = startTime\n\t\t\t} else {\n\t\t\t\tsortedByTime = false\n\t\t\t}\n\t\t\ta = append(a, points[i])\n\t\t}\n\t}\n\n\t// Points may be out of order. Perform a stable sort by time if requested.\n\tif !sortedByTime && itr.opt.Ordered {\n\t\tsort.Stable(sort.Reverse(booleanPointsByTime(a)))\n\t}\n\n\treturn a, nil\n}\n\n// integerStreamBooleanIterator streams inputs into the iterator and emits points gradually.\ntype integerStreamBooleanIterator struct {\n\tinput  *bufIntegerIterator\n\tcreate func() (IntegerPointAggregator, BooleanPointEmitter)\n\tdims   []string\n\topt    IteratorOptions\n\tm      map[string]*integerReduceBooleanPoint\n\tpoints []BooleanPoint\n}\n\n// newIntegerStreamBooleanIterator returns a new instance of integerStreamBooleanIterator.\nfunc newIntegerStreamBooleanIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, BooleanPointEmitter), opt IteratorOptions) *integerStreamBooleanIterator {\n\treturn &integerStreamBooleanIterator{\n\t\tinput:  newBufIntegerIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t\tm:      make(map[string]*integerReduceBooleanPoint),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *integerStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *integerStreamBooleanIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next value for the stream iterator.\nfunc (itr *integerStreamBooleanIterator) Next() (*BooleanPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// reduce creates and manages aggregators for every point from the input.\n// After aggregating a point, it always tries to emit a value using the emitter.\nfunc (itr *integerStreamBooleanIterator) reduce() ([]BooleanPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.Next()\n\t\tif curr == nil {\n\t\t\t// Close all of the aggregators to flush any remaining points to emit.\n\t\t\tvar points []BooleanPoint\n\t\t\tfor _, rp := range itr.m {\n\t\t\t\tif aggregator, ok := rp.Aggregator.(io.Closer); ok {\n\t\t\t\t\tif err := aggregator.Close(); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\tpts := rp.Emitter.Emit()\n\t\t\t\t\tif len(pts) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor i := range pts {\n\t\t\t\t\t\tpts[i].Name = rp.Name\n\t\t\t\t\t\tpts[i].Tags = rp.Tags\n\t\t\t\t\t}\n\t\t\t\t\tpoints = append(points, pts...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Eliminate the aggregators and emitters.\n\t\t\titr.m = nil\n\t\t\treturn points, nil\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t}\n\t\ttags := curr.Tags.Subset(itr.dims)\n\n\t\tid := curr.Name\n\t\tif len(tags.m) > 0 {\n\t\t\tid += \"\\x00\" + tags.ID()\n\t\t}\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := itr.m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &integerReduceBooleanPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\titr.m[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateInteger(curr)\n\n\t\t// Attempt to emit points from the aggregator.\n\t\tpoints := rp.Emitter.Emit()\n\t\tif len(points) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := range points {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tpoints[i].Tags = rp.Tags\n\t\t}\n\t\treturn points, nil\n\t}\n}\n\n// integerBooleanExprIterator executes a function to modify an existing point\n// for every output of the input iterator.\ntype integerBooleanExprIterator struct {\n\tleft      *bufIntegerIterator\n\tright     *bufIntegerIterator\n\tfn        integerBooleanExprFunc\n\tpoints    []IntegerPoint // must be size 2\n\tstorePrev bool\n}\n\nfunc newIntegerBooleanExprIterator(left, right IntegerIterator, opt IteratorOptions, fn func(a, b int64) bool) *integerBooleanExprIterator {\n\tvar points []IntegerPoint\n\tswitch opt.Fill {\n\tcase NullFill, PreviousFill:\n\t\tpoints = []IntegerPoint{{Nil: true}, {Nil: true}}\n\tcase NumberFill:\n\t\tvalue := castToInteger(opt.FillValue)\n\t\tpoints = []IntegerPoint{{Value: value}, {Value: value}}\n\t}\n\treturn &integerBooleanExprIterator{\n\t\tleft:      newBufIntegerIterator(left),\n\t\tright:     newBufIntegerIterator(right),\n\t\tpoints:    points,\n\t\tfn:        fn,\n\t\tstorePrev: opt.Fill == PreviousFill,\n\t}\n}\n\nfunc (itr *integerBooleanExprIterator) Stats() IteratorStats {\n\tstats := itr.left.Stats()\n\tstats.Add(itr.right.Stats())\n\treturn stats\n}\n\nfunc (itr *integerBooleanExprIterator) Close() error {\n\titr.left.Close()\n\titr.right.Close()\n\treturn nil\n}\n\nfunc (itr *integerBooleanExprIterator) Next() (*BooleanPoint, error) {\n\tfor {\n\t\ta, b, err := itr.next()\n\t\tif err != nil || (a == nil && b == nil) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If any of these are nil and we are using fill(none), skip these points.\n\t\tif (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If one of the two points is nil, we need to fill it with a fake nil\n\t\t// point that has the same name, tags, and time as the other point.\n\t\t// There should never be a time when both of these are nil.\n\t\tif a == nil {\n\t\t\tp := *b\n\t\t\ta = &p\n\t\t\ta.Value = 0\n\t\t\ta.Nil = true\n\t\t} else if b == nil {\n\t\t\tp := *a\n\t\t\tb = &p\n\t\t\tb.Value = 0\n\t\t\tb.Nil = true\n\t\t}\n\n\t\t// If a value is nil, use the fill values if the fill value is non-nil.\n\t\tif a.Nil && !itr.points[0].Nil {\n\t\t\ta.Value = itr.points[0].Value\n\t\t\ta.Nil = false\n\t\t}\n\t\tif b.Nil && !itr.points[1].Nil {\n\t\t\tb.Value = itr.points[1].Value\n\t\t\tb.Nil = false\n\t\t}\n\n\t\tif itr.storePrev {\n\t\t\titr.points[0], itr.points[1] = *a, *b\n\t\t}\n\n\t\tp := &BooleanPoint{\n\t\t\tName:       a.Name,\n\t\t\tTags:       a.Tags,\n\t\t\tTime:       a.Time,\n\t\t\tNil:        a.Nil || b.Nil,\n\t\t\tAggregated: a.Aggregated,\n\t\t}\n\t\tif !p.Nil {\n\t\t\tp.Value = itr.fn(a.Value, b.Value)\n\t\t}\n\t\treturn p, nil\n\n\t}\n}\n\n// next returns the next points within each iterator. If the iterators are\n// uneven, it organizes them so only matching points are returned.\nfunc (itr *integerBooleanExprIterator) next() (a, b *IntegerPoint, err error) {\n\t// Retrieve the next value for both the left and right.\n\ta, err = itr.left.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tb, err = itr.right.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// If we have a point from both, make sure that they match each other.\n\tif a != nil && b != nil {\n\t\tif a.Name > b.Name {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Name < b.Name {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if ltags < rtags {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif a.Time > b.Time {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Time < b.Time {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\t}\n\treturn a, b, nil\n}\n\n// integerBooleanExprFunc creates or modifies a point by combining two\n// points. The point passed in may be modified and returned rather than\n// allocating a new point if possible. One of the points may be nil, but at\n// least one of the points will be non-nil.\ntype integerBooleanExprFunc func(a, b int64) bool\n\n// integerTransformIterator executes a function to modify an existing point for every\n// output of the input iterator.\ntype integerTransformIterator struct {\n\tinput IntegerIterator\n\tfn    integerTransformFunc\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *integerTransformIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *integerTransformIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *integerTransformIterator) Next() (*IntegerPoint, error) {\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if p != nil {\n\t\tp = itr.fn(p)\n\t}\n\treturn p, nil\n}\n\n// integerTransformFunc creates or modifies a point.\n// The point passed in may be modified and returned rather than allocating a\n// new point if possible.\ntype integerTransformFunc func(p *IntegerPoint) *IntegerPoint\n\n// integerBoolTransformIterator executes a function to modify an existing point for every\n// output of the input iterator.\ntype integerBoolTransformIterator struct {\n\tinput IntegerIterator\n\tfn    integerBoolTransformFunc\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *integerBoolTransformIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *integerBoolTransformIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *integerBoolTransformIterator) Next() (*BooleanPoint, error) {\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if p != nil {\n\t\treturn itr.fn(p), nil\n\t}\n\treturn nil, nil\n}\n\n// integerBoolTransformFunc creates or modifies a point.\n// The point passed in may be modified and returned rather than allocating a\n// new point if possible.\ntype integerBoolTransformFunc func(p *IntegerPoint) *BooleanPoint\n\n// integerDedupeIterator only outputs unique points.\n// This differs from the DistinctIterator in that it compares all aux fields too.\n// This iterator is relatively inefficient and should only be used on small\n// datasets such as meta query results.\ntype integerDedupeIterator struct {\n\tinput IntegerIterator\n\tm     map[string]struct{} // lookup of points already sent\n}\n\ntype integerIteratorMapper struct {\n\te      *Emitter\n\tbuf    []interface{}\n\tdriver IteratorMap   // which iterator to use for the primary value, can be nil\n\tfields []IteratorMap // which iterator to use for an aux field\n\tpoint  IntegerPoint\n}\n\nfunc newIntegerIteratorMapper(itrs []Iterator, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *integerIteratorMapper {\n\te := NewEmitter(itrs, opt.Ascending, 0)\n\te.OmitTime = true\n\treturn &integerIteratorMapper{\n\t\te:      e,\n\t\tbuf:    make([]interface{}, len(itrs)),\n\t\tdriver: driver,\n\t\tfields: fields,\n\t\tpoint: IntegerPoint{\n\t\t\tAux: make([]interface{}, len(fields)),\n\t\t},\n\t}\n}\n\nfunc (itr *integerIteratorMapper) Next() (*IntegerPoint, error) {\n\tt, name, tags, err := itr.e.loadBuf()\n\tif err != nil || t == ZeroTime {\n\t\treturn nil, err\n\t}\n\titr.point.Time = t\n\titr.point.Name = name\n\titr.point.Tags = tags\n\n\titr.e.readInto(t, name, tags, itr.buf)\n\tif itr.driver != nil {\n\t\tif v := itr.driver.Value(tags, itr.buf); v != nil {\n\t\t\tif v, ok := v.(int64); ok {\n\t\t\t\titr.point.Value = v\n\t\t\t\titr.point.Nil = false\n\t\t\t} else {\n\t\t\t\titr.point.Value = 0\n\t\t\t\titr.point.Nil = true\n\t\t\t}\n\t\t} else {\n\t\t\titr.point.Value = 0\n\t\t\titr.point.Nil = true\n\t\t}\n\t}\n\tfor i, f := range itr.fields {\n\t\titr.point.Aux[i] = f.Value(tags, itr.buf)\n\t}\n\treturn &itr.point, nil\n}\n\nfunc (itr *integerIteratorMapper) Stats() IteratorStats {\n\tstats := IteratorStats{}\n\tfor _, itr := range itr.e.itrs {\n\t\tstats.Add(itr.Stats())\n\t}\n\treturn stats\n}\n\nfunc (itr *integerIteratorMapper) Close() error {\n\treturn itr.e.Close()\n}\n\ntype integerFilterIterator struct {\n\tinput IntegerIterator\n\tcond  Expr\n\topt   IteratorOptions\n\tm     map[string]interface{}\n}\n\nfunc newIntegerFilterIterator(input IntegerIterator, cond Expr, opt IteratorOptions) IntegerIterator {\n\t// Strip out time conditions from the WHERE clause.\n\t// TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct.\n\tn := RewriteFunc(CloneExpr(cond), func(n Node) Node {\n\t\tswitch n := n.(type) {\n\t\tcase *BinaryExpr:\n\t\t\tif n.LHS.String() == \"time\" {\n\t\t\t\treturn &BooleanLiteral{Val: true}\n\t\t\t}\n\t\t}\n\t\treturn n\n\t})\n\n\tcond, _ = n.(Expr)\n\tif cond == nil {\n\t\treturn input\n\t} else if n, ok := cond.(*BooleanLiteral); ok && n.Val {\n\t\treturn input\n\t}\n\n\treturn &integerFilterIterator{\n\t\tinput: input,\n\t\tcond:  cond,\n\t\topt:   opt,\n\t\tm:     make(map[string]interface{}),\n\t}\n}\n\nfunc (itr *integerFilterIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *integerFilterIterator) Close() error         { return itr.input.Close() }\n\nfunc (itr *integerFilterIterator) Next() (*IntegerPoint, error) {\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor i, ref := range itr.opt.Aux {\n\t\t\titr.m[ref.Val] = p.Aux[i]\n\t\t}\n\t\tfor k, v := range p.Tags.KeyValues() {\n\t\t\titr.m[k] = v\n\t\t}\n\n\t\tif !EvalBool(itr.cond, itr.m) {\n\t\t\tcontinue\n\t\t}\n\t\treturn p, nil\n\t}\n}\n\n// newIntegerDedupeIterator returns a new instance of integerDedupeIterator.\nfunc newIntegerDedupeIterator(input IntegerIterator) *integerDedupeIterator {\n\treturn &integerDedupeIterator{\n\t\tinput: input,\n\t\tm:     make(map[string]struct{}),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *integerDedupeIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *integerDedupeIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next unique point from the input iterator.\nfunc (itr *integerDedupeIterator) Next() (*IntegerPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tp, err := itr.input.Next()\n\t\tif p == nil || err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Serialize to bytes to store in lookup.\n\t\tbuf, err := proto.Marshal(encodeIntegerPoint(p))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If the point has already been output then move to the next point.\n\t\tif _, ok := itr.m[string(buf)]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Otherwise mark it as emitted and return point.\n\t\titr.m[string(buf)] = struct{}{}\n\t\treturn p, nil\n\t}\n}\n\n// integerReaderIterator represents an iterator that streams from a reader.\ntype integerReaderIterator struct {\n\tr   io.Reader\n\tdec *IntegerPointDecoder\n}\n\n// newIntegerReaderIterator returns a new instance of integerReaderIterator.\nfunc newIntegerReaderIterator(r io.Reader, stats IteratorStats) *integerReaderIterator {\n\tdec := NewIntegerPointDecoder(r)\n\tdec.stats = stats\n\n\treturn &integerReaderIterator{\n\t\tr:   r,\n\t\tdec: dec,\n\t}\n}\n\n// Stats returns stats about points processed.\nfunc (itr *integerReaderIterator) Stats() IteratorStats { return itr.dec.stats }\n\n// Close closes the underlying reader, if applicable.\nfunc (itr *integerReaderIterator) Close() error {\n\tif r, ok := itr.r.(io.ReadCloser); ok {\n\t\treturn r.Close()\n\t}\n\treturn nil\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *integerReaderIterator) Next() (*IntegerPoint, error) {\n\t// OPTIMIZE(benbjohnson): Reuse point on iterator.\n\n\t// Unmarshal next point.\n\tp := &IntegerPoint{}\n\tif err := itr.dec.DecodeIntegerPoint(p); err == io.EOF {\n\t\treturn nil, nil\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}\n\n// StringIterator represents a stream of string points.\ntype StringIterator interface {\n\tIterator\n\tNext() (*StringPoint, error)\n}\n\n// newStringIterators converts a slice of Iterator to a slice of StringIterator.\n// Drop and closes any iterator in itrs that is not a StringIterator and cannot\n// be cast to a StringIterator.\nfunc newStringIterators(itrs []Iterator) []StringIterator {\n\ta := make([]StringIterator, 0, len(itrs))\n\tfor _, itr := range itrs {\n\t\tswitch itr := itr.(type) {\n\t\tcase StringIterator:\n\t\t\ta = append(a, itr)\n\n\t\tdefault:\n\t\t\titr.Close()\n\t\t}\n\t}\n\treturn a\n}\n\n// bufStringIterator represents a buffered StringIterator.\ntype bufStringIterator struct {\n\titr StringIterator\n\tbuf *StringPoint\n}\n\n// newBufStringIterator returns a buffered StringIterator.\nfunc newBufStringIterator(itr StringIterator) *bufStringIterator {\n\treturn &bufStringIterator{itr: itr}\n}\n\n// Stats returns statistics from the input iterator.\nfunc (itr *bufStringIterator) Stats() IteratorStats { return itr.itr.Stats() }\n\n// Close closes the underlying iterator.\nfunc (itr *bufStringIterator) Close() error { return itr.itr.Close() }\n\n// peek returns the next point without removing it from the iterator.\nfunc (itr *bufStringIterator) peek() (*StringPoint, error) {\n\tp, err := itr.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\titr.unread(p)\n\treturn p, nil\n}\n\n// peekTime returns the time of the next point.\n// Returns zero time if no more points available.\nfunc (itr *bufStringIterator) peekTime() (int64, error) {\n\tp, err := itr.peek()\n\tif p == nil || err != nil {\n\t\treturn ZeroTime, err\n\t}\n\treturn p.Time, nil\n}\n\n// Next returns the current buffer, if exists, or calls the underlying iterator.\nfunc (itr *bufStringIterator) Next() (*StringPoint, error) {\n\tbuf := itr.buf\n\tif buf != nil {\n\t\titr.buf = nil\n\t\treturn buf, nil\n\t}\n\treturn itr.itr.Next()\n}\n\n// NextInWindow returns the next value if it is between [startTime, endTime).\n// If the next value is outside the range then it is moved to the buffer.\nfunc (itr *bufStringIterator) NextInWindow(startTime, endTime int64) (*StringPoint, error) {\n\tv, err := itr.Next()\n\tif v == nil || err != nil {\n\t\treturn nil, err\n\t} else if t := v.Time; t >= endTime || t < startTime {\n\t\titr.unread(v)\n\t\treturn nil, nil\n\t}\n\treturn v, nil\n}\n\n// unread sets v to the buffer. It is read on the next call to Next().\nfunc (itr *bufStringIterator) unread(v *StringPoint) { itr.buf = v }\n\n// stringMergeIterator represents an iterator that combines multiple string iterators.\ntype stringMergeIterator struct {\n\tinputs []StringIterator\n\theap   *stringMergeHeap\n\tinit   bool\n\n\t// Current iterator and window.\n\tcurr   *stringMergeHeapItem\n\twindow struct {\n\t\tname      string\n\t\ttags      string\n\t\tstartTime int64\n\t\tendTime   int64\n\t}\n}\n\n// newStringMergeIterator returns a new instance of stringMergeIterator.\nfunc newStringMergeIterator(inputs []StringIterator, opt IteratorOptions) *stringMergeIterator {\n\titr := &stringMergeIterator{\n\t\tinputs: inputs,\n\t\theap: &stringMergeHeap{\n\t\t\titems: make([]*stringMergeHeapItem, 0, len(inputs)),\n\t\t\topt:   opt,\n\t\t},\n\t}\n\n\t// Initialize heap items.\n\tfor _, input := range inputs {\n\t\t// Wrap in buffer, ignore any inputs without anymore points.\n\t\tbufInput := newBufStringIterator(input)\n\n\t\t// Append to the heap.\n\t\titr.heap.items = append(itr.heap.items, &stringMergeHeapItem{itr: bufInput})\n\t}\n\n\treturn itr\n}\n\n// Stats returns an aggregation of stats from the underlying iterators.\nfunc (itr *stringMergeIterator) Stats() IteratorStats {\n\tvar stats IteratorStats\n\tfor _, input := range itr.inputs {\n\t\tstats.Add(input.Stats())\n\t}\n\treturn stats\n}\n\n// Close closes the underlying iterators.\nfunc (itr *stringMergeIterator) Close() error {\n\tfor _, input := range itr.inputs {\n\t\tinput.Close()\n\t}\n\titr.curr = nil\n\titr.inputs = nil\n\titr.heap.items = nil\n\treturn nil\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *stringMergeIterator) Next() (*StringPoint, error) {\n\t// Initialize the heap. This needs to be done lazily on the first call to this iterator\n\t// so that iterator initialization done through the Select() call returns quickly.\n\t// Queries can only be interrupted after the Select() call completes so any operations\n\t// done during iterator creation cannot be interrupted, which is why we do it here\n\t// instead so an interrupt can happen while initializing the heap.\n\tif !itr.init {\n\t\titems := itr.heap.items\n\t\titr.heap.items = make([]*stringMergeHeapItem, 0, len(items))\n\t\tfor _, item := range items {\n\t\t\tif p, err := item.itr.peek(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t} else if p == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\titr.heap.items = append(itr.heap.items, item)\n\t\t}\n\t\theap.Init(itr.heap)\n\t\titr.init = true\n\t}\n\n\tfor {\n\t\t// Retrieve the next iterator if we don't have one.\n\t\tif itr.curr == nil {\n\t\t\tif len(itr.heap.items) == 0 {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t\titr.curr = heap.Pop(itr.heap).(*stringMergeHeapItem)\n\n\t\t\t// Read point and set current window.\n\t\t\tp, err := itr.curr.itr.Next()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ttags := p.Tags.Subset(itr.heap.opt.Dimensions)\n\t\t\titr.window.name, itr.window.tags = p.Name, tags.ID()\n\t\t\titr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time)\n\t\t\treturn p, nil\n\t\t}\n\n\t\t// Read the next point from the current iterator.\n\t\tp, err := itr.curr.itr.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If there are no more points then remove iterator from heap and find next.\n\t\tif p == nil {\n\t\t\titr.curr = nil\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check if the point is inside of our current window.\n\t\tinWindow := true\n\t\tif window := itr.window; window.name != p.Name {\n\t\t\tinWindow = false\n\t\t} else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() {\n\t\t\tinWindow = false\n\t\t} else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime {\n\t\t\tinWindow = false\n\t\t} else if !opt.Ascending && p.Time < window.startTime {\n\t\t\tinWindow = false\n\t\t}\n\n\t\t// If it's outside our window then push iterator back on the heap and find new iterator.\n\t\tif !inWindow {\n\t\t\titr.curr.itr.unread(p)\n\t\t\theap.Push(itr.heap, itr.curr)\n\t\t\titr.curr = nil\n\t\t\tcontinue\n\t\t}\n\n\t\treturn p, nil\n\t}\n}\n\n// stringMergeHeap represents a heap of stringMergeHeapItems.\n// Items are sorted by their next window and then by name/tags.\ntype stringMergeHeap struct {\n\topt   IteratorOptions\n\titems []*stringMergeHeapItem\n}\n\nfunc (h *stringMergeHeap) Len() int      { return len(h.items) }\nfunc (h *stringMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }\nfunc (h *stringMergeHeap) Less(i, j int) bool {\n\tx, err := h.items[i].itr.peek()\n\tif err != nil {\n\t\treturn true\n\t}\n\ty, err := h.items[j].itr.peek()\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tif h.opt.Ascending {\n\t\tif x.Name != y.Name {\n\t\t\treturn x.Name < y.Name\n\t\t} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {\n\t\t\treturn xTags.ID() < yTags.ID()\n\t\t}\n\t} else {\n\t\tif x.Name != y.Name {\n\t\t\treturn x.Name > y.Name\n\t\t} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {\n\t\t\treturn xTags.ID() > yTags.ID()\n\t\t}\n\t}\n\n\txt, _ := h.opt.Window(x.Time)\n\tyt, _ := h.opt.Window(y.Time)\n\n\tif h.opt.Ascending {\n\t\treturn xt < yt\n\t}\n\treturn xt > yt\n}\n\nfunc (h *stringMergeHeap) Push(x interface{}) {\n\th.items = append(h.items, x.(*stringMergeHeapItem))\n}\n\nfunc (h *stringMergeHeap) Pop() interface{} {\n\told := h.items\n\tn := len(old)\n\titem := old[n-1]\n\th.items = old[0 : n-1]\n\treturn item\n}\n\ntype stringMergeHeapItem struct {\n\titr *bufStringIterator\n}\n\n// stringSortedMergeIterator is an iterator that sorts and merges multiple iterators into one.\ntype stringSortedMergeIterator struct {\n\tinputs []StringIterator\n\theap   *stringSortedMergeHeap\n\tinit   bool\n}\n\n// newStringSortedMergeIterator returns an instance of stringSortedMergeIterator.\nfunc newStringSortedMergeIterator(inputs []StringIterator, opt IteratorOptions) Iterator {\n\titr := &stringSortedMergeIterator{\n\t\tinputs: inputs,\n\t\theap: &stringSortedMergeHeap{\n\t\t\titems: make([]*stringSortedMergeHeapItem, 0, len(inputs)),\n\t\t\topt:   opt,\n\t\t},\n\t}\n\n\t// Initialize heap items.\n\tfor _, input := range inputs {\n\t\t// Append to the heap.\n\t\titr.heap.items = append(itr.heap.items, &stringSortedMergeHeapItem{itr: input})\n\t}\n\n\treturn itr\n}\n\n// Stats returns an aggregation of stats from the underlying iterators.\nfunc (itr *stringSortedMergeIterator) Stats() IteratorStats {\n\tvar stats IteratorStats\n\tfor _, input := range itr.inputs {\n\t\tstats.Add(input.Stats())\n\t}\n\treturn stats\n}\n\n// Close closes the underlying iterators.\nfunc (itr *stringSortedMergeIterator) Close() error {\n\tfor _, input := range itr.inputs {\n\t\tinput.Close()\n\t}\n\treturn nil\n}\n\n// Next returns the next points from the iterator.\nfunc (itr *stringSortedMergeIterator) Next() (*StringPoint, error) { return itr.pop() }\n\n// pop returns the next point from the heap.\n// Reads the next point from item's cursor and puts it back on the heap.\nfunc (itr *stringSortedMergeIterator) pop() (*StringPoint, error) {\n\t// Initialize the heap. See the MergeIterator to see why this has to be done lazily.\n\tif !itr.init {\n\t\titems := itr.heap.items\n\t\titr.heap.items = make([]*stringSortedMergeHeapItem, 0, len(items))\n\t\tfor _, item := range items {\n\t\t\tvar err error\n\t\t\tif item.point, err = item.itr.Next(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t} else if item.point == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\titr.heap.items = append(itr.heap.items, item)\n\t\t}\n\t\theap.Init(itr.heap)\n\t\titr.init = true\n\t}\n\n\tif len(itr.heap.items) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// Read the next item from the heap.\n\titem := heap.Pop(itr.heap).(*stringSortedMergeHeapItem)\n\tif item.err != nil {\n\t\treturn nil, item.err\n\t} else if item.point == nil {\n\t\treturn nil, nil\n\t}\n\n\t// Copy the point for return.\n\tp := item.point.Clone()\n\n\t// Read the next item from the cursor. Push back to heap if one exists.\n\tif item.point, item.err = item.itr.Next(); item.point != nil {\n\t\theap.Push(itr.heap, item)\n\t}\n\n\treturn p, nil\n}\n\n// stringSortedMergeHeap represents a heap of stringSortedMergeHeapItems.\ntype stringSortedMergeHeap struct {\n\topt   IteratorOptions\n\titems []*stringSortedMergeHeapItem\n}\n\nfunc (h *stringSortedMergeHeap) Len() int      { return len(h.items) }\nfunc (h *stringSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }\nfunc (h *stringSortedMergeHeap) Less(i, j int) bool {\n\tx, y := h.items[i].point, h.items[j].point\n\n\tif h.opt.Ascending {\n\t\tif x.Name != y.Name {\n\t\t\treturn x.Name < y.Name\n\t\t} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {\n\t\t\treturn xTags.ID() < yTags.ID()\n\t\t}\n\t\treturn x.Time < y.Time\n\t}\n\n\tif x.Name != y.Name {\n\t\treturn x.Name > y.Name\n\t} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {\n\t\treturn xTags.ID() > yTags.ID()\n\t}\n\treturn x.Time > y.Time\n}\n\nfunc (h *stringSortedMergeHeap) Push(x interface{}) {\n\th.items = append(h.items, x.(*stringSortedMergeHeapItem))\n}\n\nfunc (h *stringSortedMergeHeap) Pop() interface{} {\n\told := h.items\n\tn := len(old)\n\titem := old[n-1]\n\th.items = old[0 : n-1]\n\treturn item\n}\n\ntype stringSortedMergeHeapItem struct {\n\tpoint *StringPoint\n\terr   error\n\titr   StringIterator\n}\n\n// stringParallelIterator represents an iterator that pulls data in a separate goroutine.\ntype stringParallelIterator struct {\n\tinput StringIterator\n\tch    chan stringPointError\n\n\tonce    sync.Once\n\tclosing chan struct{}\n\twg      sync.WaitGroup\n}\n\n// newStringParallelIterator returns a new instance of stringParallelIterator.\nfunc newStringParallelIterator(input StringIterator) *stringParallelIterator {\n\titr := &stringParallelIterator{\n\t\tinput:   input,\n\t\tch:      make(chan stringPointError, 256),\n\t\tclosing: make(chan struct{}),\n\t}\n\titr.wg.Add(1)\n\tgo itr.monitor()\n\treturn itr\n}\n\n// Stats returns stats from the underlying iterator.\nfunc (itr *stringParallelIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the underlying iterators.\nfunc (itr *stringParallelIterator) Close() error {\n\titr.once.Do(func() { close(itr.closing) })\n\titr.wg.Wait()\n\treturn itr.input.Close()\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *stringParallelIterator) Next() (*StringPoint, error) {\n\tv, ok := <-itr.ch\n\tif !ok {\n\t\treturn nil, io.EOF\n\t}\n\treturn v.point, v.err\n}\n\n// monitor runs in a separate goroutine and actively pulls the next point.\nfunc (itr *stringParallelIterator) monitor() {\n\tdefer close(itr.ch)\n\tdefer itr.wg.Done()\n\n\tfor {\n\t\t// Read next point.\n\t\tp, err := itr.input.Next()\n\t\tif p != nil {\n\t\t\tp = p.Clone()\n\t\t}\n\n\t\tselect {\n\t\tcase <-itr.closing:\n\t\t\treturn\n\t\tcase itr.ch <- stringPointError{point: p, err: err}:\n\t\t}\n\t}\n}\n\ntype stringPointError struct {\n\tpoint *StringPoint\n\terr   error\n}\n\n// stringLimitIterator represents an iterator that limits points per group.\ntype stringLimitIterator struct {\n\tinput StringIterator\n\topt   IteratorOptions\n\tn     int\n\n\tprev struct {\n\t\tname string\n\t\ttags Tags\n\t}\n}\n\n// newStringLimitIterator returns a new instance of stringLimitIterator.\nfunc newStringLimitIterator(input StringIterator, opt IteratorOptions) *stringLimitIterator {\n\treturn &stringLimitIterator{\n\t\tinput: input,\n\t\topt:   opt,\n\t}\n}\n\n// Stats returns stats from the underlying iterator.\nfunc (itr *stringLimitIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the underlying iterators.\nfunc (itr *stringLimitIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next point from the iterator.\nfunc (itr *stringLimitIterator) Next() (*StringPoint, error) {\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif p == nil || err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Reset window and counter if a new window is encountered.\n\t\tif p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) {\n\t\t\titr.prev.name = p.Name\n\t\t\titr.prev.tags = p.Tags\n\t\t\titr.n = 0\n\t\t}\n\n\t\t// Increment counter.\n\t\titr.n++\n\n\t\t// Read next point if not beyond the offset.\n\t\tif itr.n <= itr.opt.Offset {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Read next point if we're beyond the limit.\n\t\tif itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn p, nil\n\t}\n}\n\ntype stringFillIterator struct {\n\tinput     *bufStringIterator\n\tprev      StringPoint\n\tstartTime int64\n\tendTime   int64\n\tauxFields []interface{}\n\tinit      bool\n\topt       IteratorOptions\n\n\twindow struct {\n\t\tname   string\n\t\ttags   Tags\n\t\ttime   int64\n\t\toffset int64\n\t}\n}\n\nfunc newStringFillIterator(input StringIterator, expr Expr, opt IteratorOptions) *stringFillIterator {\n\tif opt.Fill == NullFill {\n\t\tif expr, ok := expr.(*Call); ok && expr.Name == \"count\" {\n\t\t\topt.Fill = NumberFill\n\t\t\topt.FillValue = \"\"\n\t\t}\n\t}\n\n\tvar startTime, endTime int64\n\tif opt.Ascending {\n\t\tstartTime, _ = opt.Window(opt.StartTime)\n\t\tendTime, _ = opt.Window(opt.EndTime)\n\t} else {\n\t\tstartTime, _ = opt.Window(opt.EndTime)\n\t\tendTime, _ = opt.Window(opt.StartTime)\n\t}\n\n\tvar auxFields []interface{}\n\tif len(opt.Aux) > 0 {\n\t\tauxFields = make([]interface{}, len(opt.Aux))\n\t}\n\n\treturn &stringFillIterator{\n\t\tinput:     newBufStringIterator(input),\n\t\tprev:      StringPoint{Nil: true},\n\t\tstartTime: startTime,\n\t\tendTime:   endTime,\n\t\tauxFields: auxFields,\n\t\topt:       opt,\n\t}\n}\n\nfunc (itr *stringFillIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *stringFillIterator) Close() error         { return itr.input.Close() }\n\nfunc (itr *stringFillIterator) Next() (*StringPoint, error) {\n\tif !itr.init {\n\t\tp, err := itr.input.peek()\n\t\tif p == nil || err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\titr.window.name, itr.window.tags = p.Name, p.Tags\n\t\titr.window.time = itr.startTime\n\t\tif itr.opt.Location != nil {\n\t\t\t_, itr.window.offset = itr.opt.Zone(itr.window.time)\n\t\t}\n\t\titr.init = true\n\t}\n\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check if the next point is outside of our window or is nil.\n\tfor p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() {\n\t\t// If we are inside of an interval, unread the point and continue below to\n\t\t// constructing a new point.\n\t\tif itr.opt.Ascending {\n\t\t\tif itr.window.time <= itr.endTime {\n\t\t\t\titr.input.unread(p)\n\t\t\t\tp = nil\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tif itr.window.time >= itr.endTime {\n\t\t\t\titr.input.unread(p)\n\t\t\t\tp = nil\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// We are *not* in a current interval. If there is no next point,\n\t\t// we are at the end of all intervals.\n\t\tif p == nil {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\t// Set the new interval.\n\t\titr.window.name, itr.window.tags = p.Name, p.Tags\n\t\titr.window.time = itr.startTime\n\t\tif itr.opt.Location != nil {\n\t\t\t_, itr.window.offset = itr.opt.Zone(itr.window.time)\n\t\t}\n\t\titr.prev = StringPoint{Nil: true}\n\t\tbreak\n\t}\n\n\t// Check if the point is our next expected point.\n\tif p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) {\n\t\tif p != nil {\n\t\t\titr.input.unread(p)\n\t\t}\n\n\t\tp = &StringPoint{\n\t\t\tName: itr.window.name,\n\t\t\tTags: itr.window.tags,\n\t\t\tTime: itr.window.time,\n\t\t\tAux:  itr.auxFields,\n\t\t}\n\n\t\tswitch itr.opt.Fill {\n\t\tcase LinearFill:\n\t\t\tfallthrough\n\t\tcase NullFill:\n\t\t\tp.Nil = true\n\t\tcase NumberFill:\n\t\t\tp.Value = castToString(itr.opt.FillValue)\n\t\tcase PreviousFill:\n\t\t\tif !itr.prev.Nil {\n\t\t\t\tp.Value = itr.prev.Value\n\t\t\t\tp.Nil = itr.prev.Nil\n\t\t\t} else {\n\t\t\t\tp.Nil = true\n\t\t\t}\n\t\t}\n\t} else {\n\t\titr.prev = *p\n\t}\n\n\t// Advance the expected time. Do not advance to a new window here\n\t// as there may be lingering points with the same timestamp in the previous\n\t// window.\n\tif itr.opt.Ascending {\n\t\titr.window.time += int64(itr.opt.Interval.Duration)\n\t} else {\n\t\titr.window.time -= int64(itr.opt.Interval.Duration)\n\t}\n\n\t// Check to see if we have passed over an offset change and adjust the time\n\t// to account for this new offset.\n\tif itr.opt.Location != nil {\n\t\tif _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset {\n\t\t\tdiff := itr.window.offset - offset\n\t\t\tif abs(diff) < int64(itr.opt.Interval.Duration) {\n\t\t\t\titr.window.time += diff\n\t\t\t}\n\t\t\titr.window.offset = offset\n\t\t}\n\t}\n\treturn p, nil\n}\n\n// stringIntervalIterator represents a string implementation of IntervalIterator.\ntype stringIntervalIterator struct {\n\tinput StringIterator\n\topt   IteratorOptions\n}\n\nfunc newStringIntervalIterator(input StringIterator, opt IteratorOptions) *stringIntervalIterator {\n\treturn &stringIntervalIterator{input: input, opt: opt}\n}\n\nfunc (itr *stringIntervalIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *stringIntervalIterator) Close() error         { return itr.input.Close() }\n\nfunc (itr *stringIntervalIterator) Next() (*StringPoint, error) {\n\tp, err := itr.input.Next()\n\tif p == nil || err != nil {\n\t\treturn nil, err\n\t}\n\tp.Time, _ = itr.opt.Window(p.Time)\n\t// If we see the minimum allowable time, set the time to zero so we don't\n\t// break the default returned time for aggregate queries without times.\n\tif p.Time == MinTime {\n\t\tp.Time = 0\n\t}\n\treturn p, nil\n}\n\n// stringInterruptIterator represents a string implementation of InterruptIterator.\ntype stringInterruptIterator struct {\n\tinput   StringIterator\n\tclosing <-chan struct{}\n\tcount   int\n}\n\nfunc newStringInterruptIterator(input StringIterator, closing <-chan struct{}) *stringInterruptIterator {\n\treturn &stringInterruptIterator{input: input, closing: closing}\n}\n\nfunc (itr *stringInterruptIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *stringInterruptIterator) Close() error         { return itr.input.Close() }\n\nfunc (itr *stringInterruptIterator) Next() (*StringPoint, error) {\n\t// Only check if the channel is closed every N points. This\n\t// intentionally checks on both 0 and N so that if the iterator\n\t// has been interrupted before the first point is emitted it will\n\t// not emit any points.\n\tif itr.count&0xFF == 0xFF {\n\t\tselect {\n\t\tcase <-itr.closing:\n\t\t\treturn nil, itr.Close()\n\t\tdefault:\n\t\t\t// Reset iterator count to zero and fall through to emit the next point.\n\t\t\titr.count = 0\n\t\t}\n\t}\n\n\t// Increment the counter for every point read.\n\titr.count++\n\treturn itr.input.Next()\n}\n\n// stringCloseInterruptIterator represents a string implementation of CloseInterruptIterator.\ntype stringCloseInterruptIterator struct {\n\tinput   StringIterator\n\tclosing <-chan struct{}\n\tdone    chan struct{}\n\tonce    sync.Once\n}\n\nfunc newStringCloseInterruptIterator(input StringIterator, closing <-chan struct{}) *stringCloseInterruptIterator {\n\titr := &stringCloseInterruptIterator{\n\t\tinput:   input,\n\t\tclosing: closing,\n\t\tdone:    make(chan struct{}),\n\t}\n\tgo itr.monitor()\n\treturn itr\n}\n\nfunc (itr *stringCloseInterruptIterator) monitor() {\n\tselect {\n\tcase <-itr.closing:\n\t\titr.Close()\n\tcase <-itr.done:\n\t}\n}\n\nfunc (itr *stringCloseInterruptIterator) Stats() IteratorStats {\n\treturn itr.input.Stats()\n}\n\nfunc (itr *stringCloseInterruptIterator) Close() error {\n\titr.once.Do(func() {\n\t\tclose(itr.done)\n\t\titr.input.Close()\n\t})\n\treturn nil\n}\n\nfunc (itr *stringCloseInterruptIterator) Next() (*StringPoint, error) {\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\t// Check if the iterator was closed.\n\t\tselect {\n\t\tcase <-itr.done:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn p, nil\n}\n\n// auxStringPoint represents a combination of a point and an error for the AuxIterator.\ntype auxStringPoint struct {\n\tpoint *StringPoint\n\terr   error\n}\n\n// stringAuxIterator represents a string implementation of AuxIterator.\ntype stringAuxIterator struct {\n\tinput      *bufStringIterator\n\toutput     chan auxStringPoint\n\tfields     *auxIteratorFields\n\tbackground bool\n}\n\nfunc newStringAuxIterator(input StringIterator, opt IteratorOptions) *stringAuxIterator {\n\treturn &stringAuxIterator{\n\t\tinput:  newBufStringIterator(input),\n\t\toutput: make(chan auxStringPoint, 1),\n\t\tfields: newAuxIteratorFields(opt),\n\t}\n}\n\nfunc (itr *stringAuxIterator) Background() {\n\titr.background = true\n\titr.Start()\n\tgo DrainIterator(itr)\n}\n\nfunc (itr *stringAuxIterator) Start()               { go itr.stream() }\nfunc (itr *stringAuxIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *stringAuxIterator) Close() error         { return itr.input.Close() }\nfunc (itr *stringAuxIterator) Next() (*StringPoint, error) {\n\tp := <-itr.output\n\treturn p.point, p.err\n}\nfunc (itr *stringAuxIterator) Iterator(name string, typ DataType) Iterator {\n\treturn itr.fields.iterator(name, typ)\n}\n\nfunc (itr *stringAuxIterator) stream() {\n\tfor {\n\t\t// Read next point.\n\t\tp, err := itr.input.Next()\n\t\tif err != nil {\n\t\t\titr.output <- auxStringPoint{err: err}\n\t\t\titr.fields.sendError(err)\n\t\t\tbreak\n\t\t} else if p == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Send point to output and to each field iterator.\n\t\titr.output <- auxStringPoint{point: p}\n\t\tif ok := itr.fields.send(p); !ok && itr.background {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tclose(itr.output)\n\titr.fields.close()\n}\n\n// stringChanIterator represents a new instance of stringChanIterator.\ntype stringChanIterator struct {\n\tbuf struct {\n\t\ti      int\n\t\tfilled bool\n\t\tpoints [2]StringPoint\n\t}\n\terr  error\n\tcond *sync.Cond\n\tdone bool\n}\n\nfunc (itr *stringChanIterator) Stats() IteratorStats { return IteratorStats{} }\n\nfunc (itr *stringChanIterator) Close() error {\n\titr.cond.L.Lock()\n\t// Mark the channel iterator as done and signal all waiting goroutines to start again.\n\titr.done = true\n\titr.cond.Broadcast()\n\t// Do not defer the unlock so we don't create an unnecessary allocation.\n\titr.cond.L.Unlock()\n\treturn nil\n}\n\nfunc (itr *stringChanIterator) setBuf(name string, tags Tags, time int64, value interface{}) bool {\n\titr.cond.L.Lock()\n\tdefer itr.cond.L.Unlock()\n\n\t// Wait for either the iterator to be done (so we don't have to set the value)\n\t// or for the buffer to have been read and ready for another write.\n\tfor !itr.done && itr.buf.filled {\n\t\titr.cond.Wait()\n\t}\n\n\t// Do not set the value and return false to signal that the iterator is closed.\n\t// Do this after the above wait as the above for loop may have exited because\n\t// the iterator was closed.\n\tif itr.done {\n\t\treturn false\n\t}\n\n\tswitch v := value.(type) {\n\tcase string:\n\t\titr.buf.points[itr.buf.i] = StringPoint{Name: name, Tags: tags, Time: time, Value: v}\n\n\tdefault:\n\t\titr.buf.points[itr.buf.i] = StringPoint{Name: name, Tags: tags, Time: time, Nil: true}\n\t}\n\titr.buf.filled = true\n\n\t// Signal to all waiting goroutines that a new value is ready to read.\n\titr.cond.Signal()\n\treturn true\n}\n\nfunc (itr *stringChanIterator) setErr(err error) {\n\titr.cond.L.Lock()\n\tdefer itr.cond.L.Unlock()\n\titr.err = err\n\n\t// Signal to all waiting goroutines that a new value is ready to read.\n\titr.cond.Signal()\n}\n\nfunc (itr *stringChanIterator) Next() (*StringPoint, error) {\n\titr.cond.L.Lock()\n\tdefer itr.cond.L.Unlock()\n\n\t// Check for an error and return one if there.\n\tif itr.err != nil {\n\t\treturn nil, itr.err\n\t}\n\n\t// Wait until either a value is available in the buffer or\n\t// the iterator is closed.\n\tfor !itr.done && !itr.buf.filled {\n\t\titr.cond.Wait()\n\t}\n\n\t// Return nil once the channel is done and the buffer is empty.\n\tif itr.done && !itr.buf.filled {\n\t\treturn nil, nil\n\t}\n\n\t// Always read from the buffer if it exists, even if the iterator\n\t// is closed. This prevents the last value from being truncated by\n\t// the parent iterator.\n\tp := &itr.buf.points[itr.buf.i]\n\titr.buf.i = (itr.buf.i + 1) % len(itr.buf.points)\n\titr.buf.filled = false\n\titr.cond.Signal()\n\treturn p, nil\n}\n\n// stringReduceFloatIterator executes a reducer for every interval and buffers the result.\ntype stringReduceFloatIterator struct {\n\tinput    *bufStringIterator\n\tcreate   func() (StringPointAggregator, FloatPointEmitter)\n\tdims     []string\n\topt      IteratorOptions\n\tpoints   []FloatPoint\n\tkeepTags bool\n}\n\nfunc newStringReduceFloatIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, FloatPointEmitter)) *stringReduceFloatIterator {\n\treturn &stringReduceFloatIterator{\n\t\tinput:  newBufStringIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *stringReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *stringReduceFloatIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *stringReduceFloatIterator) Next() (*FloatPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// stringReduceFloatPoint stores the reduced data for a name/tag combination.\ntype stringReduceFloatPoint struct {\n\tName       string\n\tTags       Tags\n\tAggregator StringPointAggregator\n\tEmitter    FloatPointEmitter\n}\n\n// reduce executes fn once for every point in the next window.\n// The previous value for the dimension is passed to fn.\nfunc (itr *stringReduceFloatIterator) reduce() ([]FloatPoint, error) {\n\t// Calculate next window.\n\tvar (\n\t\tstartTime, endTime int64\n\t\twindow             struct {\n\t\t\tname string\n\t\t\ttags string\n\t\t}\n\t)\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t} else if p.Nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Unread the point so it can be processed.\n\t\titr.input.unread(p)\n\t\tstartTime, endTime = itr.opt.Window(p.Time)\n\t\twindow.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()\n\t\tbreak\n\t}\n\n\t// Create points by tags.\n\tm := make(map[string]*stringReduceFloatPoint)\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.NextInWindow(startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr == nil {\n\t\t\tbreak\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t} else if curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Ensure this point is within the same final window.\n\t\tif curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Retrieve the tags on this point for this level of the query.\n\t\t// This may be different than the bucket dimensions.\n\t\ttags := curr.Tags.Subset(itr.dims)\n\t\tid := tags.ID()\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &stringReduceFloatPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\tm[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateString(curr)\n\t}\n\n\t// Reverse sort points by name & tag if our output is supposed to be ordered.\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tif len(keys) > 1 && itr.opt.Ordered {\n\t\tsort.Sort(reverseStringSlice(keys))\n\t}\n\n\t// Assume the points are already sorted until proven otherwise.\n\tsortedByTime := true\n\t// Emit the points for each name & tag combination.\n\ta := make([]FloatPoint, 0, len(m))\n\tfor _, k := range keys {\n\t\trp := m[k]\n\t\tpoints := rp.Emitter.Emit()\n\t\tfor i := len(points) - 1; i >= 0; i-- {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tif !itr.keepTags {\n\t\t\t\tpoints[i].Tags = rp.Tags\n\t\t\t}\n\t\t\t// Set the points time to the interval time if the reducer didn't provide one.\n\t\t\tif points[i].Time == ZeroTime {\n\t\t\t\tpoints[i].Time = startTime\n\t\t\t} else {\n\t\t\t\tsortedByTime = false\n\t\t\t}\n\t\t\ta = append(a, points[i])\n\t\t}\n\t}\n\n\t// Points may be out of order. Perform a stable sort by time if requested.\n\tif !sortedByTime && itr.opt.Ordered {\n\t\tsort.Stable(sort.Reverse(floatPointsByTime(a)))\n\t}\n\n\treturn a, nil\n}\n\n// stringStreamFloatIterator streams inputs into the iterator and emits points gradually.\ntype stringStreamFloatIterator struct {\n\tinput  *bufStringIterator\n\tcreate func() (StringPointAggregator, FloatPointEmitter)\n\tdims   []string\n\topt    IteratorOptions\n\tm      map[string]*stringReduceFloatPoint\n\tpoints []FloatPoint\n}\n\n// newStringStreamFloatIterator returns a new instance of stringStreamFloatIterator.\nfunc newStringStreamFloatIterator(input StringIterator, createFn func() (StringPointAggregator, FloatPointEmitter), opt IteratorOptions) *stringStreamFloatIterator {\n\treturn &stringStreamFloatIterator{\n\t\tinput:  newBufStringIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t\tm:      make(map[string]*stringReduceFloatPoint),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *stringStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *stringStreamFloatIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next value for the stream iterator.\nfunc (itr *stringStreamFloatIterator) Next() (*FloatPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// reduce creates and manages aggregators for every point from the input.\n// After aggregating a point, it always tries to emit a value using the emitter.\nfunc (itr *stringStreamFloatIterator) reduce() ([]FloatPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.Next()\n\t\tif curr == nil {\n\t\t\t// Close all of the aggregators to flush any remaining points to emit.\n\t\t\tvar points []FloatPoint\n\t\t\tfor _, rp := range itr.m {\n\t\t\t\tif aggregator, ok := rp.Aggregator.(io.Closer); ok {\n\t\t\t\t\tif err := aggregator.Close(); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\tpts := rp.Emitter.Emit()\n\t\t\t\t\tif len(pts) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor i := range pts {\n\t\t\t\t\t\tpts[i].Name = rp.Name\n\t\t\t\t\t\tpts[i].Tags = rp.Tags\n\t\t\t\t\t}\n\t\t\t\t\tpoints = append(points, pts...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Eliminate the aggregators and emitters.\n\t\t\titr.m = nil\n\t\t\treturn points, nil\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t}\n\t\ttags := curr.Tags.Subset(itr.dims)\n\n\t\tid := curr.Name\n\t\tif len(tags.m) > 0 {\n\t\t\tid += \"\\x00\" + tags.ID()\n\t\t}\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := itr.m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &stringReduceFloatPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\titr.m[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateString(curr)\n\n\t\t// Attempt to emit points from the aggregator.\n\t\tpoints := rp.Emitter.Emit()\n\t\tif len(points) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := range points {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tpoints[i].Tags = rp.Tags\n\t\t}\n\t\treturn points, nil\n\t}\n}\n\n// stringFloatExprIterator executes a function to modify an existing point\n// for every output of the input iterator.\ntype stringFloatExprIterator struct {\n\tleft      *bufStringIterator\n\tright     *bufStringIterator\n\tfn        stringFloatExprFunc\n\tpoints    []StringPoint // must be size 2\n\tstorePrev bool\n}\n\nfunc newStringFloatExprIterator(left, right StringIterator, opt IteratorOptions, fn func(a, b string) float64) *stringFloatExprIterator {\n\tvar points []StringPoint\n\tswitch opt.Fill {\n\tcase NullFill, PreviousFill:\n\t\tpoints = []StringPoint{{Nil: true}, {Nil: true}}\n\tcase NumberFill:\n\t\tvalue := castToString(opt.FillValue)\n\t\tpoints = []StringPoint{{Value: value}, {Value: value}}\n\t}\n\treturn &stringFloatExprIterator{\n\t\tleft:      newBufStringIterator(left),\n\t\tright:     newBufStringIterator(right),\n\t\tpoints:    points,\n\t\tfn:        fn,\n\t\tstorePrev: opt.Fill == PreviousFill,\n\t}\n}\n\nfunc (itr *stringFloatExprIterator) Stats() IteratorStats {\n\tstats := itr.left.Stats()\n\tstats.Add(itr.right.Stats())\n\treturn stats\n}\n\nfunc (itr *stringFloatExprIterator) Close() error {\n\titr.left.Close()\n\titr.right.Close()\n\treturn nil\n}\n\nfunc (itr *stringFloatExprIterator) Next() (*FloatPoint, error) {\n\tfor {\n\t\ta, b, err := itr.next()\n\t\tif err != nil || (a == nil && b == nil) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If any of these are nil and we are using fill(none), skip these points.\n\t\tif (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If one of the two points is nil, we need to fill it with a fake nil\n\t\t// point that has the same name, tags, and time as the other point.\n\t\t// There should never be a time when both of these are nil.\n\t\tif a == nil {\n\t\t\tp := *b\n\t\t\ta = &p\n\t\t\ta.Value = \"\"\n\t\t\ta.Nil = true\n\t\t} else if b == nil {\n\t\t\tp := *a\n\t\t\tb = &p\n\t\t\tb.Value = \"\"\n\t\t\tb.Nil = true\n\t\t}\n\n\t\t// If a value is nil, use the fill values if the fill value is non-nil.\n\t\tif a.Nil && !itr.points[0].Nil {\n\t\t\ta.Value = itr.points[0].Value\n\t\t\ta.Nil = false\n\t\t}\n\t\tif b.Nil && !itr.points[1].Nil {\n\t\t\tb.Value = itr.points[1].Value\n\t\t\tb.Nil = false\n\t\t}\n\n\t\tif itr.storePrev {\n\t\t\titr.points[0], itr.points[1] = *a, *b\n\t\t}\n\n\t\tp := &FloatPoint{\n\t\t\tName:       a.Name,\n\t\t\tTags:       a.Tags,\n\t\t\tTime:       a.Time,\n\t\t\tNil:        a.Nil || b.Nil,\n\t\t\tAggregated: a.Aggregated,\n\t\t}\n\t\tif !p.Nil {\n\t\t\tp.Value = itr.fn(a.Value, b.Value)\n\t\t}\n\t\treturn p, nil\n\n\t}\n}\n\n// next returns the next points within each iterator. If the iterators are\n// uneven, it organizes them so only matching points are returned.\nfunc (itr *stringFloatExprIterator) next() (a, b *StringPoint, err error) {\n\t// Retrieve the next value for both the left and right.\n\ta, err = itr.left.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tb, err = itr.right.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// If we have a point from both, make sure that they match each other.\n\tif a != nil && b != nil {\n\t\tif a.Name > b.Name {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Name < b.Name {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if ltags < rtags {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif a.Time > b.Time {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Time < b.Time {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\t}\n\treturn a, b, nil\n}\n\n// stringFloatExprFunc creates or modifies a point by combining two\n// points. The point passed in may be modified and returned rather than\n// allocating a new point if possible. One of the points may be nil, but at\n// least one of the points will be non-nil.\ntype stringFloatExprFunc func(a, b string) float64\n\n// stringReduceIntegerIterator executes a reducer for every interval and buffers the result.\ntype stringReduceIntegerIterator struct {\n\tinput    *bufStringIterator\n\tcreate   func() (StringPointAggregator, IntegerPointEmitter)\n\tdims     []string\n\topt      IteratorOptions\n\tpoints   []IntegerPoint\n\tkeepTags bool\n}\n\nfunc newStringReduceIntegerIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, IntegerPointEmitter)) *stringReduceIntegerIterator {\n\treturn &stringReduceIntegerIterator{\n\t\tinput:  newBufStringIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *stringReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *stringReduceIntegerIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *stringReduceIntegerIterator) Next() (*IntegerPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// stringReduceIntegerPoint stores the reduced data for a name/tag combination.\ntype stringReduceIntegerPoint struct {\n\tName       string\n\tTags       Tags\n\tAggregator StringPointAggregator\n\tEmitter    IntegerPointEmitter\n}\n\n// reduce executes fn once for every point in the next window.\n// The previous value for the dimension is passed to fn.\nfunc (itr *stringReduceIntegerIterator) reduce() ([]IntegerPoint, error) {\n\t// Calculate next window.\n\tvar (\n\t\tstartTime, endTime int64\n\t\twindow             struct {\n\t\t\tname string\n\t\t\ttags string\n\t\t}\n\t)\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t} else if p.Nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Unread the point so it can be processed.\n\t\titr.input.unread(p)\n\t\tstartTime, endTime = itr.opt.Window(p.Time)\n\t\twindow.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()\n\t\tbreak\n\t}\n\n\t// Create points by tags.\n\tm := make(map[string]*stringReduceIntegerPoint)\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.NextInWindow(startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr == nil {\n\t\t\tbreak\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t} else if curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Ensure this point is within the same final window.\n\t\tif curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Retrieve the tags on this point for this level of the query.\n\t\t// This may be different than the bucket dimensions.\n\t\ttags := curr.Tags.Subset(itr.dims)\n\t\tid := tags.ID()\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &stringReduceIntegerPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\tm[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateString(curr)\n\t}\n\n\t// Reverse sort points by name & tag if our output is supposed to be ordered.\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tif len(keys) > 1 && itr.opt.Ordered {\n\t\tsort.Sort(reverseStringSlice(keys))\n\t}\n\n\t// Assume the points are already sorted until proven otherwise.\n\tsortedByTime := true\n\t// Emit the points for each name & tag combination.\n\ta := make([]IntegerPoint, 0, len(m))\n\tfor _, k := range keys {\n\t\trp := m[k]\n\t\tpoints := rp.Emitter.Emit()\n\t\tfor i := len(points) - 1; i >= 0; i-- {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tif !itr.keepTags {\n\t\t\t\tpoints[i].Tags = rp.Tags\n\t\t\t}\n\t\t\t// Set the points time to the interval time if the reducer didn't provide one.\n\t\t\tif points[i].Time == ZeroTime {\n\t\t\t\tpoints[i].Time = startTime\n\t\t\t} else {\n\t\t\t\tsortedByTime = false\n\t\t\t}\n\t\t\ta = append(a, points[i])\n\t\t}\n\t}\n\n\t// Points may be out of order. Perform a stable sort by time if requested.\n\tif !sortedByTime && itr.opt.Ordered {\n\t\tsort.Stable(sort.Reverse(integerPointsByTime(a)))\n\t}\n\n\treturn a, nil\n}\n\n// stringStreamIntegerIterator streams inputs into the iterator and emits points gradually.\ntype stringStreamIntegerIterator struct {\n\tinput  *bufStringIterator\n\tcreate func() (StringPointAggregator, IntegerPointEmitter)\n\tdims   []string\n\topt    IteratorOptions\n\tm      map[string]*stringReduceIntegerPoint\n\tpoints []IntegerPoint\n}\n\n// newStringStreamIntegerIterator returns a new instance of stringStreamIntegerIterator.\nfunc newStringStreamIntegerIterator(input StringIterator, createFn func() (StringPointAggregator, IntegerPointEmitter), opt IteratorOptions) *stringStreamIntegerIterator {\n\treturn &stringStreamIntegerIterator{\n\t\tinput:  newBufStringIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t\tm:      make(map[string]*stringReduceIntegerPoint),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *stringStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *stringStreamIntegerIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next value for the stream iterator.\nfunc (itr *stringStreamIntegerIterator) Next() (*IntegerPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// reduce creates and manages aggregators for every point from the input.\n// After aggregating a point, it always tries to emit a value using the emitter.\nfunc (itr *stringStreamIntegerIterator) reduce() ([]IntegerPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.Next()\n\t\tif curr == nil {\n\t\t\t// Close all of the aggregators to flush any remaining points to emit.\n\t\t\tvar points []IntegerPoint\n\t\t\tfor _, rp := range itr.m {\n\t\t\t\tif aggregator, ok := rp.Aggregator.(io.Closer); ok {\n\t\t\t\t\tif err := aggregator.Close(); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\tpts := rp.Emitter.Emit()\n\t\t\t\t\tif len(pts) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor i := range pts {\n\t\t\t\t\t\tpts[i].Name = rp.Name\n\t\t\t\t\t\tpts[i].Tags = rp.Tags\n\t\t\t\t\t}\n\t\t\t\t\tpoints = append(points, pts...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Eliminate the aggregators and emitters.\n\t\t\titr.m = nil\n\t\t\treturn points, nil\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t}\n\t\ttags := curr.Tags.Subset(itr.dims)\n\n\t\tid := curr.Name\n\t\tif len(tags.m) > 0 {\n\t\t\tid += \"\\x00\" + tags.ID()\n\t\t}\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := itr.m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &stringReduceIntegerPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\titr.m[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateString(curr)\n\n\t\t// Attempt to emit points from the aggregator.\n\t\tpoints := rp.Emitter.Emit()\n\t\tif len(points) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := range points {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tpoints[i].Tags = rp.Tags\n\t\t}\n\t\treturn points, nil\n\t}\n}\n\n// stringIntegerExprIterator executes a function to modify an existing point\n// for every output of the input iterator.\ntype stringIntegerExprIterator struct {\n\tleft      *bufStringIterator\n\tright     *bufStringIterator\n\tfn        stringIntegerExprFunc\n\tpoints    []StringPoint // must be size 2\n\tstorePrev bool\n}\n\nfunc newStringIntegerExprIterator(left, right StringIterator, opt IteratorOptions, fn func(a, b string) int64) *stringIntegerExprIterator {\n\tvar points []StringPoint\n\tswitch opt.Fill {\n\tcase NullFill, PreviousFill:\n\t\tpoints = []StringPoint{{Nil: true}, {Nil: true}}\n\tcase NumberFill:\n\t\tvalue := castToString(opt.FillValue)\n\t\tpoints = []StringPoint{{Value: value}, {Value: value}}\n\t}\n\treturn &stringIntegerExprIterator{\n\t\tleft:      newBufStringIterator(left),\n\t\tright:     newBufStringIterator(right),\n\t\tpoints:    points,\n\t\tfn:        fn,\n\t\tstorePrev: opt.Fill == PreviousFill,\n\t}\n}\n\nfunc (itr *stringIntegerExprIterator) Stats() IteratorStats {\n\tstats := itr.left.Stats()\n\tstats.Add(itr.right.Stats())\n\treturn stats\n}\n\nfunc (itr *stringIntegerExprIterator) Close() error {\n\titr.left.Close()\n\titr.right.Close()\n\treturn nil\n}\n\nfunc (itr *stringIntegerExprIterator) Next() (*IntegerPoint, error) {\n\tfor {\n\t\ta, b, err := itr.next()\n\t\tif err != nil || (a == nil && b == nil) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If any of these are nil and we are using fill(none), skip these points.\n\t\tif (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If one of the two points is nil, we need to fill it with a fake nil\n\t\t// point that has the same name, tags, and time as the other point.\n\t\t// There should never be a time when both of these are nil.\n\t\tif a == nil {\n\t\t\tp := *b\n\t\t\ta = &p\n\t\t\ta.Value = \"\"\n\t\t\ta.Nil = true\n\t\t} else if b == nil {\n\t\t\tp := *a\n\t\t\tb = &p\n\t\t\tb.Value = \"\"\n\t\t\tb.Nil = true\n\t\t}\n\n\t\t// If a value is nil, use the fill values if the fill value is non-nil.\n\t\tif a.Nil && !itr.points[0].Nil {\n\t\t\ta.Value = itr.points[0].Value\n\t\t\ta.Nil = false\n\t\t}\n\t\tif b.Nil && !itr.points[1].Nil {\n\t\t\tb.Value = itr.points[1].Value\n\t\t\tb.Nil = false\n\t\t}\n\n\t\tif itr.storePrev {\n\t\t\titr.points[0], itr.points[1] = *a, *b\n\t\t}\n\n\t\tp := &IntegerPoint{\n\t\t\tName:       a.Name,\n\t\t\tTags:       a.Tags,\n\t\t\tTime:       a.Time,\n\t\t\tNil:        a.Nil || b.Nil,\n\t\t\tAggregated: a.Aggregated,\n\t\t}\n\t\tif !p.Nil {\n\t\t\tp.Value = itr.fn(a.Value, b.Value)\n\t\t}\n\t\treturn p, nil\n\n\t}\n}\n\n// next returns the next points within each iterator. If the iterators are\n// uneven, it organizes them so only matching points are returned.\nfunc (itr *stringIntegerExprIterator) next() (a, b *StringPoint, err error) {\n\t// Retrieve the next value for both the left and right.\n\ta, err = itr.left.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tb, err = itr.right.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// If we have a point from both, make sure that they match each other.\n\tif a != nil && b != nil {\n\t\tif a.Name > b.Name {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Name < b.Name {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if ltags < rtags {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif a.Time > b.Time {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Time < b.Time {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\t}\n\treturn a, b, nil\n}\n\n// stringIntegerExprFunc creates or modifies a point by combining two\n// points. The point passed in may be modified and returned rather than\n// allocating a new point if possible. One of the points may be nil, but at\n// least one of the points will be non-nil.\ntype stringIntegerExprFunc func(a, b string) int64\n\n// stringReduceStringIterator executes a reducer for every interval and buffers the result.\ntype stringReduceStringIterator struct {\n\tinput    *bufStringIterator\n\tcreate   func() (StringPointAggregator, StringPointEmitter)\n\tdims     []string\n\topt      IteratorOptions\n\tpoints   []StringPoint\n\tkeepTags bool\n}\n\nfunc newStringReduceStringIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, StringPointEmitter)) *stringReduceStringIterator {\n\treturn &stringReduceStringIterator{\n\t\tinput:  newBufStringIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *stringReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *stringReduceStringIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *stringReduceStringIterator) Next() (*StringPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// stringReduceStringPoint stores the reduced data for a name/tag combination.\ntype stringReduceStringPoint struct {\n\tName       string\n\tTags       Tags\n\tAggregator StringPointAggregator\n\tEmitter    StringPointEmitter\n}\n\n// reduce executes fn once for every point in the next window.\n// The previous value for the dimension is passed to fn.\nfunc (itr *stringReduceStringIterator) reduce() ([]StringPoint, error) {\n\t// Calculate next window.\n\tvar (\n\t\tstartTime, endTime int64\n\t\twindow             struct {\n\t\t\tname string\n\t\t\ttags string\n\t\t}\n\t)\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t} else if p.Nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Unread the point so it can be processed.\n\t\titr.input.unread(p)\n\t\tstartTime, endTime = itr.opt.Window(p.Time)\n\t\twindow.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()\n\t\tbreak\n\t}\n\n\t// Create points by tags.\n\tm := make(map[string]*stringReduceStringPoint)\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.NextInWindow(startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr == nil {\n\t\t\tbreak\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t} else if curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Ensure this point is within the same final window.\n\t\tif curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Retrieve the tags on this point for this level of the query.\n\t\t// This may be different than the bucket dimensions.\n\t\ttags := curr.Tags.Subset(itr.dims)\n\t\tid := tags.ID()\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &stringReduceStringPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\tm[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateString(curr)\n\t}\n\n\t// Reverse sort points by name & tag if our output is supposed to be ordered.\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tif len(keys) > 1 && itr.opt.Ordered {\n\t\tsort.Sort(reverseStringSlice(keys))\n\t}\n\n\t// Assume the points are already sorted until proven otherwise.\n\tsortedByTime := true\n\t// Emit the points for each name & tag combination.\n\ta := make([]StringPoint, 0, len(m))\n\tfor _, k := range keys {\n\t\trp := m[k]\n\t\tpoints := rp.Emitter.Emit()\n\t\tfor i := len(points) - 1; i >= 0; i-- {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tif !itr.keepTags {\n\t\t\t\tpoints[i].Tags = rp.Tags\n\t\t\t}\n\t\t\t// Set the points time to the interval time if the reducer didn't provide one.\n\t\t\tif points[i].Time == ZeroTime {\n\t\t\t\tpoints[i].Time = startTime\n\t\t\t} else {\n\t\t\t\tsortedByTime = false\n\t\t\t}\n\t\t\ta = append(a, points[i])\n\t\t}\n\t}\n\n\t// Points may be out of order. Perform a stable sort by time if requested.\n\tif !sortedByTime && itr.opt.Ordered {\n\t\tsort.Stable(sort.Reverse(stringPointsByTime(a)))\n\t}\n\n\treturn a, nil\n}\n\n// stringStreamStringIterator streams inputs into the iterator and emits points gradually.\ntype stringStreamStringIterator struct {\n\tinput  *bufStringIterator\n\tcreate func() (StringPointAggregator, StringPointEmitter)\n\tdims   []string\n\topt    IteratorOptions\n\tm      map[string]*stringReduceStringPoint\n\tpoints []StringPoint\n}\n\n// newStringStreamStringIterator returns a new instance of stringStreamStringIterator.\nfunc newStringStreamStringIterator(input StringIterator, createFn func() (StringPointAggregator, StringPointEmitter), opt IteratorOptions) *stringStreamStringIterator {\n\treturn &stringStreamStringIterator{\n\t\tinput:  newBufStringIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t\tm:      make(map[string]*stringReduceStringPoint),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *stringStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *stringStreamStringIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next value for the stream iterator.\nfunc (itr *stringStreamStringIterator) Next() (*StringPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// reduce creates and manages aggregators for every point from the input.\n// After aggregating a point, it always tries to emit a value using the emitter.\nfunc (itr *stringStreamStringIterator) reduce() ([]StringPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.Next()\n\t\tif curr == nil {\n\t\t\t// Close all of the aggregators to flush any remaining points to emit.\n\t\t\tvar points []StringPoint\n\t\t\tfor _, rp := range itr.m {\n\t\t\t\tif aggregator, ok := rp.Aggregator.(io.Closer); ok {\n\t\t\t\t\tif err := aggregator.Close(); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\tpts := rp.Emitter.Emit()\n\t\t\t\t\tif len(pts) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor i := range pts {\n\t\t\t\t\t\tpts[i].Name = rp.Name\n\t\t\t\t\t\tpts[i].Tags = rp.Tags\n\t\t\t\t\t}\n\t\t\t\t\tpoints = append(points, pts...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Eliminate the aggregators and emitters.\n\t\t\titr.m = nil\n\t\t\treturn points, nil\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t}\n\t\ttags := curr.Tags.Subset(itr.dims)\n\n\t\tid := curr.Name\n\t\tif len(tags.m) > 0 {\n\t\t\tid += \"\\x00\" + tags.ID()\n\t\t}\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := itr.m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &stringReduceStringPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\titr.m[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateString(curr)\n\n\t\t// Attempt to emit points from the aggregator.\n\t\tpoints := rp.Emitter.Emit()\n\t\tif len(points) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := range points {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tpoints[i].Tags = rp.Tags\n\t\t}\n\t\treturn points, nil\n\t}\n}\n\n// stringExprIterator executes a function to modify an existing point\n// for every output of the input iterator.\ntype stringExprIterator struct {\n\tleft      *bufStringIterator\n\tright     *bufStringIterator\n\tfn        stringExprFunc\n\tpoints    []StringPoint // must be size 2\n\tstorePrev bool\n}\n\nfunc newStringExprIterator(left, right StringIterator, opt IteratorOptions, fn func(a, b string) string) *stringExprIterator {\n\tvar points []StringPoint\n\tswitch opt.Fill {\n\tcase NullFill, PreviousFill:\n\t\tpoints = []StringPoint{{Nil: true}, {Nil: true}}\n\tcase NumberFill:\n\t\tvalue := castToString(opt.FillValue)\n\t\tpoints = []StringPoint{{Value: value}, {Value: value}}\n\t}\n\treturn &stringExprIterator{\n\t\tleft:      newBufStringIterator(left),\n\t\tright:     newBufStringIterator(right),\n\t\tpoints:    points,\n\t\tfn:        fn,\n\t\tstorePrev: opt.Fill == PreviousFill,\n\t}\n}\n\nfunc (itr *stringExprIterator) Stats() IteratorStats {\n\tstats := itr.left.Stats()\n\tstats.Add(itr.right.Stats())\n\treturn stats\n}\n\nfunc (itr *stringExprIterator) Close() error {\n\titr.left.Close()\n\titr.right.Close()\n\treturn nil\n}\n\nfunc (itr *stringExprIterator) Next() (*StringPoint, error) {\n\tfor {\n\t\ta, b, err := itr.next()\n\t\tif err != nil || (a == nil && b == nil) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If any of these are nil and we are using fill(none), skip these points.\n\t\tif (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If one of the two points is nil, we need to fill it with a fake nil\n\t\t// point that has the same name, tags, and time as the other point.\n\t\t// There should never be a time when both of these are nil.\n\t\tif a == nil {\n\t\t\tp := *b\n\t\t\ta = &p\n\t\t\ta.Value = \"\"\n\t\t\ta.Nil = true\n\t\t} else if b == nil {\n\t\t\tp := *a\n\t\t\tb = &p\n\t\t\tb.Value = \"\"\n\t\t\tb.Nil = true\n\t\t}\n\n\t\t// If a value is nil, use the fill values if the fill value is non-nil.\n\t\tif a.Nil && !itr.points[0].Nil {\n\t\t\ta.Value = itr.points[0].Value\n\t\t\ta.Nil = false\n\t\t}\n\t\tif b.Nil && !itr.points[1].Nil {\n\t\t\tb.Value = itr.points[1].Value\n\t\t\tb.Nil = false\n\t\t}\n\n\t\tif itr.storePrev {\n\t\t\titr.points[0], itr.points[1] = *a, *b\n\t\t}\n\n\t\tif a.Nil {\n\t\t\treturn a, nil\n\t\t} else if b.Nil {\n\t\t\treturn b, nil\n\t\t}\n\t\ta.Value = itr.fn(a.Value, b.Value)\n\t\treturn a, nil\n\n\t}\n}\n\n// next returns the next points within each iterator. If the iterators are\n// uneven, it organizes them so only matching points are returned.\nfunc (itr *stringExprIterator) next() (a, b *StringPoint, err error) {\n\t// Retrieve the next value for both the left and right.\n\ta, err = itr.left.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tb, err = itr.right.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// If we have a point from both, make sure that they match each other.\n\tif a != nil && b != nil {\n\t\tif a.Name > b.Name {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Name < b.Name {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if ltags < rtags {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif a.Time > b.Time {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Time < b.Time {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\t}\n\treturn a, b, nil\n}\n\n// stringExprFunc creates or modifies a point by combining two\n// points. The point passed in may be modified and returned rather than\n// allocating a new point if possible. One of the points may be nil, but at\n// least one of the points will be non-nil.\ntype stringExprFunc func(a, b string) string\n\n// stringReduceBooleanIterator executes a reducer for every interval and buffers the result.\ntype stringReduceBooleanIterator struct {\n\tinput    *bufStringIterator\n\tcreate   func() (StringPointAggregator, BooleanPointEmitter)\n\tdims     []string\n\topt      IteratorOptions\n\tpoints   []BooleanPoint\n\tkeepTags bool\n}\n\nfunc newStringReduceBooleanIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, BooleanPointEmitter)) *stringReduceBooleanIterator {\n\treturn &stringReduceBooleanIterator{\n\t\tinput:  newBufStringIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *stringReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *stringReduceBooleanIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *stringReduceBooleanIterator) Next() (*BooleanPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// stringReduceBooleanPoint stores the reduced data for a name/tag combination.\ntype stringReduceBooleanPoint struct {\n\tName       string\n\tTags       Tags\n\tAggregator StringPointAggregator\n\tEmitter    BooleanPointEmitter\n}\n\n// reduce executes fn once for every point in the next window.\n// The previous value for the dimension is passed to fn.\nfunc (itr *stringReduceBooleanIterator) reduce() ([]BooleanPoint, error) {\n\t// Calculate next window.\n\tvar (\n\t\tstartTime, endTime int64\n\t\twindow             struct {\n\t\t\tname string\n\t\t\ttags string\n\t\t}\n\t)\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t} else if p.Nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Unread the point so it can be processed.\n\t\titr.input.unread(p)\n\t\tstartTime, endTime = itr.opt.Window(p.Time)\n\t\twindow.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()\n\t\tbreak\n\t}\n\n\t// Create points by tags.\n\tm := make(map[string]*stringReduceBooleanPoint)\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.NextInWindow(startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr == nil {\n\t\t\tbreak\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t} else if curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Ensure this point is within the same final window.\n\t\tif curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Retrieve the tags on this point for this level of the query.\n\t\t// This may be different than the bucket dimensions.\n\t\ttags := curr.Tags.Subset(itr.dims)\n\t\tid := tags.ID()\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &stringReduceBooleanPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\tm[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateString(curr)\n\t}\n\n\t// Reverse sort points by name & tag if our output is supposed to be ordered.\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tif len(keys) > 1 && itr.opt.Ordered {\n\t\tsort.Sort(reverseStringSlice(keys))\n\t}\n\n\t// Assume the points are already sorted until proven otherwise.\n\tsortedByTime := true\n\t// Emit the points for each name & tag combination.\n\ta := make([]BooleanPoint, 0, len(m))\n\tfor _, k := range keys {\n\t\trp := m[k]\n\t\tpoints := rp.Emitter.Emit()\n\t\tfor i := len(points) - 1; i >= 0; i-- {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tif !itr.keepTags {\n\t\t\t\tpoints[i].Tags = rp.Tags\n\t\t\t}\n\t\t\t// Set the points time to the interval time if the reducer didn't provide one.\n\t\t\tif points[i].Time == ZeroTime {\n\t\t\t\tpoints[i].Time = startTime\n\t\t\t} else {\n\t\t\t\tsortedByTime = false\n\t\t\t}\n\t\t\ta = append(a, points[i])\n\t\t}\n\t}\n\n\t// Points may be out of order. Perform a stable sort by time if requested.\n\tif !sortedByTime && itr.opt.Ordered {\n\t\tsort.Stable(sort.Reverse(booleanPointsByTime(a)))\n\t}\n\n\treturn a, nil\n}\n\n// stringStreamBooleanIterator streams inputs into the iterator and emits points gradually.\ntype stringStreamBooleanIterator struct {\n\tinput  *bufStringIterator\n\tcreate func() (StringPointAggregator, BooleanPointEmitter)\n\tdims   []string\n\topt    IteratorOptions\n\tm      map[string]*stringReduceBooleanPoint\n\tpoints []BooleanPoint\n}\n\n// newStringStreamBooleanIterator returns a new instance of stringStreamBooleanIterator.\nfunc newStringStreamBooleanIterator(input StringIterator, createFn func() (StringPointAggregator, BooleanPointEmitter), opt IteratorOptions) *stringStreamBooleanIterator {\n\treturn &stringStreamBooleanIterator{\n\t\tinput:  newBufStringIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t\tm:      make(map[string]*stringReduceBooleanPoint),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *stringStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *stringStreamBooleanIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next value for the stream iterator.\nfunc (itr *stringStreamBooleanIterator) Next() (*BooleanPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// reduce creates and manages aggregators for every point from the input.\n// After aggregating a point, it always tries to emit a value using the emitter.\nfunc (itr *stringStreamBooleanIterator) reduce() ([]BooleanPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.Next()\n\t\tif curr == nil {\n\t\t\t// Close all of the aggregators to flush any remaining points to emit.\n\t\t\tvar points []BooleanPoint\n\t\t\tfor _, rp := range itr.m {\n\t\t\t\tif aggregator, ok := rp.Aggregator.(io.Closer); ok {\n\t\t\t\t\tif err := aggregator.Close(); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\tpts := rp.Emitter.Emit()\n\t\t\t\t\tif len(pts) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor i := range pts {\n\t\t\t\t\t\tpts[i].Name = rp.Name\n\t\t\t\t\t\tpts[i].Tags = rp.Tags\n\t\t\t\t\t}\n\t\t\t\t\tpoints = append(points, pts...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Eliminate the aggregators and emitters.\n\t\t\titr.m = nil\n\t\t\treturn points, nil\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t}\n\t\ttags := curr.Tags.Subset(itr.dims)\n\n\t\tid := curr.Name\n\t\tif len(tags.m) > 0 {\n\t\t\tid += \"\\x00\" + tags.ID()\n\t\t}\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := itr.m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &stringReduceBooleanPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\titr.m[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateString(curr)\n\n\t\t// Attempt to emit points from the aggregator.\n\t\tpoints := rp.Emitter.Emit()\n\t\tif len(points) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := range points {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tpoints[i].Tags = rp.Tags\n\t\t}\n\t\treturn points, nil\n\t}\n}\n\n// stringBooleanExprIterator executes a function to modify an existing point\n// for every output of the input iterator.\ntype stringBooleanExprIterator struct {\n\tleft      *bufStringIterator\n\tright     *bufStringIterator\n\tfn        stringBooleanExprFunc\n\tpoints    []StringPoint // must be size 2\n\tstorePrev bool\n}\n\nfunc newStringBooleanExprIterator(left, right StringIterator, opt IteratorOptions, fn func(a, b string) bool) *stringBooleanExprIterator {\n\tvar points []StringPoint\n\tswitch opt.Fill {\n\tcase NullFill, PreviousFill:\n\t\tpoints = []StringPoint{{Nil: true}, {Nil: true}}\n\tcase NumberFill:\n\t\tvalue := castToString(opt.FillValue)\n\t\tpoints = []StringPoint{{Value: value}, {Value: value}}\n\t}\n\treturn &stringBooleanExprIterator{\n\t\tleft:      newBufStringIterator(left),\n\t\tright:     newBufStringIterator(right),\n\t\tpoints:    points,\n\t\tfn:        fn,\n\t\tstorePrev: opt.Fill == PreviousFill,\n\t}\n}\n\nfunc (itr *stringBooleanExprIterator) Stats() IteratorStats {\n\tstats := itr.left.Stats()\n\tstats.Add(itr.right.Stats())\n\treturn stats\n}\n\nfunc (itr *stringBooleanExprIterator) Close() error {\n\titr.left.Close()\n\titr.right.Close()\n\treturn nil\n}\n\nfunc (itr *stringBooleanExprIterator) Next() (*BooleanPoint, error) {\n\tfor {\n\t\ta, b, err := itr.next()\n\t\tif err != nil || (a == nil && b == nil) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If any of these are nil and we are using fill(none), skip these points.\n\t\tif (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If one of the two points is nil, we need to fill it with a fake nil\n\t\t// point that has the same name, tags, and time as the other point.\n\t\t// There should never be a time when both of these are nil.\n\t\tif a == nil {\n\t\t\tp := *b\n\t\t\ta = &p\n\t\t\ta.Value = \"\"\n\t\t\ta.Nil = true\n\t\t} else if b == nil {\n\t\t\tp := *a\n\t\t\tb = &p\n\t\t\tb.Value = \"\"\n\t\t\tb.Nil = true\n\t\t}\n\n\t\t// If a value is nil, use the fill values if the fill value is non-nil.\n\t\tif a.Nil && !itr.points[0].Nil {\n\t\t\ta.Value = itr.points[0].Value\n\t\t\ta.Nil = false\n\t\t}\n\t\tif b.Nil && !itr.points[1].Nil {\n\t\t\tb.Value = itr.points[1].Value\n\t\t\tb.Nil = false\n\t\t}\n\n\t\tif itr.storePrev {\n\t\t\titr.points[0], itr.points[1] = *a, *b\n\t\t}\n\n\t\tp := &BooleanPoint{\n\t\t\tName:       a.Name,\n\t\t\tTags:       a.Tags,\n\t\t\tTime:       a.Time,\n\t\t\tNil:        a.Nil || b.Nil,\n\t\t\tAggregated: a.Aggregated,\n\t\t}\n\t\tif !p.Nil {\n\t\t\tp.Value = itr.fn(a.Value, b.Value)\n\t\t}\n\t\treturn p, nil\n\n\t}\n}\n\n// next returns the next points within each iterator. If the iterators are\n// uneven, it organizes them so only matching points are returned.\nfunc (itr *stringBooleanExprIterator) next() (a, b *StringPoint, err error) {\n\t// Retrieve the next value for both the left and right.\n\ta, err = itr.left.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tb, err = itr.right.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// If we have a point from both, make sure that they match each other.\n\tif a != nil && b != nil {\n\t\tif a.Name > b.Name {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Name < b.Name {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if ltags < rtags {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif a.Time > b.Time {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Time < b.Time {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\t}\n\treturn a, b, nil\n}\n\n// stringBooleanExprFunc creates or modifies a point by combining two\n// points. The point passed in may be modified and returned rather than\n// allocating a new point if possible. One of the points may be nil, but at\n// least one of the points will be non-nil.\ntype stringBooleanExprFunc func(a, b string) bool\n\n// stringTransformIterator executes a function to modify an existing point for every\n// output of the input iterator.\ntype stringTransformIterator struct {\n\tinput StringIterator\n\tfn    stringTransformFunc\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *stringTransformIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *stringTransformIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *stringTransformIterator) Next() (*StringPoint, error) {\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if p != nil {\n\t\tp = itr.fn(p)\n\t}\n\treturn p, nil\n}\n\n// stringTransformFunc creates or modifies a point.\n// The point passed in may be modified and returned rather than allocating a\n// new point if possible.\ntype stringTransformFunc func(p *StringPoint) *StringPoint\n\n// stringBoolTransformIterator executes a function to modify an existing point for every\n// output of the input iterator.\ntype stringBoolTransformIterator struct {\n\tinput StringIterator\n\tfn    stringBoolTransformFunc\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *stringBoolTransformIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *stringBoolTransformIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *stringBoolTransformIterator) Next() (*BooleanPoint, error) {\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if p != nil {\n\t\treturn itr.fn(p), nil\n\t}\n\treturn nil, nil\n}\n\n// stringBoolTransformFunc creates or modifies a point.\n// The point passed in may be modified and returned rather than allocating a\n// new point if possible.\ntype stringBoolTransformFunc func(p *StringPoint) *BooleanPoint\n\n// stringDedupeIterator only outputs unique points.\n// This differs from the DistinctIterator in that it compares all aux fields too.\n// This iterator is relatively inefficient and should only be used on small\n// datasets such as meta query results.\ntype stringDedupeIterator struct {\n\tinput StringIterator\n\tm     map[string]struct{} // lookup of points already sent\n}\n\ntype stringIteratorMapper struct {\n\te      *Emitter\n\tbuf    []interface{}\n\tdriver IteratorMap   // which iterator to use for the primary value, can be nil\n\tfields []IteratorMap // which iterator to use for an aux field\n\tpoint  StringPoint\n}\n\nfunc newStringIteratorMapper(itrs []Iterator, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *stringIteratorMapper {\n\te := NewEmitter(itrs, opt.Ascending, 0)\n\te.OmitTime = true\n\treturn &stringIteratorMapper{\n\t\te:      e,\n\t\tbuf:    make([]interface{}, len(itrs)),\n\t\tdriver: driver,\n\t\tfields: fields,\n\t\tpoint: StringPoint{\n\t\t\tAux: make([]interface{}, len(fields)),\n\t\t},\n\t}\n}\n\nfunc (itr *stringIteratorMapper) Next() (*StringPoint, error) {\n\tt, name, tags, err := itr.e.loadBuf()\n\tif err != nil || t == ZeroTime {\n\t\treturn nil, err\n\t}\n\titr.point.Time = t\n\titr.point.Name = name\n\titr.point.Tags = tags\n\n\titr.e.readInto(t, name, tags, itr.buf)\n\tif itr.driver != nil {\n\t\tif v := itr.driver.Value(tags, itr.buf); v != nil {\n\t\t\tif v, ok := v.(string); ok {\n\t\t\t\titr.point.Value = v\n\t\t\t\titr.point.Nil = false\n\t\t\t} else {\n\t\t\t\titr.point.Value = \"\"\n\t\t\t\titr.point.Nil = true\n\t\t\t}\n\t\t} else {\n\t\t\titr.point.Value = \"\"\n\t\t\titr.point.Nil = true\n\t\t}\n\t}\n\tfor i, f := range itr.fields {\n\t\titr.point.Aux[i] = f.Value(tags, itr.buf)\n\t}\n\treturn &itr.point, nil\n}\n\nfunc (itr *stringIteratorMapper) Stats() IteratorStats {\n\tstats := IteratorStats{}\n\tfor _, itr := range itr.e.itrs {\n\t\tstats.Add(itr.Stats())\n\t}\n\treturn stats\n}\n\nfunc (itr *stringIteratorMapper) Close() error {\n\treturn itr.e.Close()\n}\n\ntype stringFilterIterator struct {\n\tinput StringIterator\n\tcond  Expr\n\topt   IteratorOptions\n\tm     map[string]interface{}\n}\n\nfunc newStringFilterIterator(input StringIterator, cond Expr, opt IteratorOptions) StringIterator {\n\t// Strip out time conditions from the WHERE clause.\n\t// TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct.\n\tn := RewriteFunc(CloneExpr(cond), func(n Node) Node {\n\t\tswitch n := n.(type) {\n\t\tcase *BinaryExpr:\n\t\t\tif n.LHS.String() == \"time\" {\n\t\t\t\treturn &BooleanLiteral{Val: true}\n\t\t\t}\n\t\t}\n\t\treturn n\n\t})\n\n\tcond, _ = n.(Expr)\n\tif cond == nil {\n\t\treturn input\n\t} else if n, ok := cond.(*BooleanLiteral); ok && n.Val {\n\t\treturn input\n\t}\n\n\treturn &stringFilterIterator{\n\t\tinput: input,\n\t\tcond:  cond,\n\t\topt:   opt,\n\t\tm:     make(map[string]interface{}),\n\t}\n}\n\nfunc (itr *stringFilterIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *stringFilterIterator) Close() error         { return itr.input.Close() }\n\nfunc (itr *stringFilterIterator) Next() (*StringPoint, error) {\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor i, ref := range itr.opt.Aux {\n\t\t\titr.m[ref.Val] = p.Aux[i]\n\t\t}\n\t\tfor k, v := range p.Tags.KeyValues() {\n\t\t\titr.m[k] = v\n\t\t}\n\n\t\tif !EvalBool(itr.cond, itr.m) {\n\t\t\tcontinue\n\t\t}\n\t\treturn p, nil\n\t}\n}\n\n// newStringDedupeIterator returns a new instance of stringDedupeIterator.\nfunc newStringDedupeIterator(input StringIterator) *stringDedupeIterator {\n\treturn &stringDedupeIterator{\n\t\tinput: input,\n\t\tm:     make(map[string]struct{}),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *stringDedupeIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *stringDedupeIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next unique point from the input iterator.\nfunc (itr *stringDedupeIterator) Next() (*StringPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tp, err := itr.input.Next()\n\t\tif p == nil || err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Serialize to bytes to store in lookup.\n\t\tbuf, err := proto.Marshal(encodeStringPoint(p))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If the point has already been output then move to the next point.\n\t\tif _, ok := itr.m[string(buf)]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Otherwise mark it as emitted and return point.\n\t\titr.m[string(buf)] = struct{}{}\n\t\treturn p, nil\n\t}\n}\n\n// stringReaderIterator represents an iterator that streams from a reader.\ntype stringReaderIterator struct {\n\tr   io.Reader\n\tdec *StringPointDecoder\n}\n\n// newStringReaderIterator returns a new instance of stringReaderIterator.\nfunc newStringReaderIterator(r io.Reader, stats IteratorStats) *stringReaderIterator {\n\tdec := NewStringPointDecoder(r)\n\tdec.stats = stats\n\n\treturn &stringReaderIterator{\n\t\tr:   r,\n\t\tdec: dec,\n\t}\n}\n\n// Stats returns stats about points processed.\nfunc (itr *stringReaderIterator) Stats() IteratorStats { return itr.dec.stats }\n\n// Close closes the underlying reader, if applicable.\nfunc (itr *stringReaderIterator) Close() error {\n\tif r, ok := itr.r.(io.ReadCloser); ok {\n\t\treturn r.Close()\n\t}\n\treturn nil\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *stringReaderIterator) Next() (*StringPoint, error) {\n\t// OPTIMIZE(benbjohnson): Reuse point on iterator.\n\n\t// Unmarshal next point.\n\tp := &StringPoint{}\n\tif err := itr.dec.DecodeStringPoint(p); err == io.EOF {\n\t\treturn nil, nil\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}\n\n// BooleanIterator represents a stream of boolean points.\ntype BooleanIterator interface {\n\tIterator\n\tNext() (*BooleanPoint, error)\n}\n\n// newBooleanIterators converts a slice of Iterator to a slice of BooleanIterator.\n// Drop and closes any iterator in itrs that is not a BooleanIterator and cannot\n// be cast to a BooleanIterator.\nfunc newBooleanIterators(itrs []Iterator) []BooleanIterator {\n\ta := make([]BooleanIterator, 0, len(itrs))\n\tfor _, itr := range itrs {\n\t\tswitch itr := itr.(type) {\n\t\tcase BooleanIterator:\n\t\t\ta = append(a, itr)\n\n\t\tdefault:\n\t\t\titr.Close()\n\t\t}\n\t}\n\treturn a\n}\n\n// bufBooleanIterator represents a buffered BooleanIterator.\ntype bufBooleanIterator struct {\n\titr BooleanIterator\n\tbuf *BooleanPoint\n}\n\n// newBufBooleanIterator returns a buffered BooleanIterator.\nfunc newBufBooleanIterator(itr BooleanIterator) *bufBooleanIterator {\n\treturn &bufBooleanIterator{itr: itr}\n}\n\n// Stats returns statistics from the input iterator.\nfunc (itr *bufBooleanIterator) Stats() IteratorStats { return itr.itr.Stats() }\n\n// Close closes the underlying iterator.\nfunc (itr *bufBooleanIterator) Close() error { return itr.itr.Close() }\n\n// peek returns the next point without removing it from the iterator.\nfunc (itr *bufBooleanIterator) peek() (*BooleanPoint, error) {\n\tp, err := itr.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\titr.unread(p)\n\treturn p, nil\n}\n\n// peekTime returns the time of the next point.\n// Returns zero time if no more points available.\nfunc (itr *bufBooleanIterator) peekTime() (int64, error) {\n\tp, err := itr.peek()\n\tif p == nil || err != nil {\n\t\treturn ZeroTime, err\n\t}\n\treturn p.Time, nil\n}\n\n// Next returns the current buffer, if exists, or calls the underlying iterator.\nfunc (itr *bufBooleanIterator) Next() (*BooleanPoint, error) {\n\tbuf := itr.buf\n\tif buf != nil {\n\t\titr.buf = nil\n\t\treturn buf, nil\n\t}\n\treturn itr.itr.Next()\n}\n\n// NextInWindow returns the next value if it is between [startTime, endTime).\n// If the next value is outside the range then it is moved to the buffer.\nfunc (itr *bufBooleanIterator) NextInWindow(startTime, endTime int64) (*BooleanPoint, error) {\n\tv, err := itr.Next()\n\tif v == nil || err != nil {\n\t\treturn nil, err\n\t} else if t := v.Time; t >= endTime || t < startTime {\n\t\titr.unread(v)\n\t\treturn nil, nil\n\t}\n\treturn v, nil\n}\n\n// unread sets v to the buffer. It is read on the next call to Next().\nfunc (itr *bufBooleanIterator) unread(v *BooleanPoint) { itr.buf = v }\n\n// booleanMergeIterator represents an iterator that combines multiple boolean iterators.\ntype booleanMergeIterator struct {\n\tinputs []BooleanIterator\n\theap   *booleanMergeHeap\n\tinit   bool\n\n\t// Current iterator and window.\n\tcurr   *booleanMergeHeapItem\n\twindow struct {\n\t\tname      string\n\t\ttags      string\n\t\tstartTime int64\n\t\tendTime   int64\n\t}\n}\n\n// newBooleanMergeIterator returns a new instance of booleanMergeIterator.\nfunc newBooleanMergeIterator(inputs []BooleanIterator, opt IteratorOptions) *booleanMergeIterator {\n\titr := &booleanMergeIterator{\n\t\tinputs: inputs,\n\t\theap: &booleanMergeHeap{\n\t\t\titems: make([]*booleanMergeHeapItem, 0, len(inputs)),\n\t\t\topt:   opt,\n\t\t},\n\t}\n\n\t// Initialize heap items.\n\tfor _, input := range inputs {\n\t\t// Wrap in buffer, ignore any inputs without anymore points.\n\t\tbufInput := newBufBooleanIterator(input)\n\n\t\t// Append to the heap.\n\t\titr.heap.items = append(itr.heap.items, &booleanMergeHeapItem{itr: bufInput})\n\t}\n\n\treturn itr\n}\n\n// Stats returns an aggregation of stats from the underlying iterators.\nfunc (itr *booleanMergeIterator) Stats() IteratorStats {\n\tvar stats IteratorStats\n\tfor _, input := range itr.inputs {\n\t\tstats.Add(input.Stats())\n\t}\n\treturn stats\n}\n\n// Close closes the underlying iterators.\nfunc (itr *booleanMergeIterator) Close() error {\n\tfor _, input := range itr.inputs {\n\t\tinput.Close()\n\t}\n\titr.curr = nil\n\titr.inputs = nil\n\titr.heap.items = nil\n\treturn nil\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *booleanMergeIterator) Next() (*BooleanPoint, error) {\n\t// Initialize the heap. This needs to be done lazily on the first call to this iterator\n\t// so that iterator initialization done through the Select() call returns quickly.\n\t// Queries can only be interrupted after the Select() call completes so any operations\n\t// done during iterator creation cannot be interrupted, which is why we do it here\n\t// instead so an interrupt can happen while initializing the heap.\n\tif !itr.init {\n\t\titems := itr.heap.items\n\t\titr.heap.items = make([]*booleanMergeHeapItem, 0, len(items))\n\t\tfor _, item := range items {\n\t\t\tif p, err := item.itr.peek(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t} else if p == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\titr.heap.items = append(itr.heap.items, item)\n\t\t}\n\t\theap.Init(itr.heap)\n\t\titr.init = true\n\t}\n\n\tfor {\n\t\t// Retrieve the next iterator if we don't have one.\n\t\tif itr.curr == nil {\n\t\t\tif len(itr.heap.items) == 0 {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t\titr.curr = heap.Pop(itr.heap).(*booleanMergeHeapItem)\n\n\t\t\t// Read point and set current window.\n\t\t\tp, err := itr.curr.itr.Next()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ttags := p.Tags.Subset(itr.heap.opt.Dimensions)\n\t\t\titr.window.name, itr.window.tags = p.Name, tags.ID()\n\t\t\titr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time)\n\t\t\treturn p, nil\n\t\t}\n\n\t\t// Read the next point from the current iterator.\n\t\tp, err := itr.curr.itr.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If there are no more points then remove iterator from heap and find next.\n\t\tif p == nil {\n\t\t\titr.curr = nil\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check if the point is inside of our current window.\n\t\tinWindow := true\n\t\tif window := itr.window; window.name != p.Name {\n\t\t\tinWindow = false\n\t\t} else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() {\n\t\t\tinWindow = false\n\t\t} else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime {\n\t\t\tinWindow = false\n\t\t} else if !opt.Ascending && p.Time < window.startTime {\n\t\t\tinWindow = false\n\t\t}\n\n\t\t// If it's outside our window then push iterator back on the heap and find new iterator.\n\t\tif !inWindow {\n\t\t\titr.curr.itr.unread(p)\n\t\t\theap.Push(itr.heap, itr.curr)\n\t\t\titr.curr = nil\n\t\t\tcontinue\n\t\t}\n\n\t\treturn p, nil\n\t}\n}\n\n// booleanMergeHeap represents a heap of booleanMergeHeapItems.\n// Items are sorted by their next window and then by name/tags.\ntype booleanMergeHeap struct {\n\topt   IteratorOptions\n\titems []*booleanMergeHeapItem\n}\n\nfunc (h *booleanMergeHeap) Len() int      { return len(h.items) }\nfunc (h *booleanMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }\nfunc (h *booleanMergeHeap) Less(i, j int) bool {\n\tx, err := h.items[i].itr.peek()\n\tif err != nil {\n\t\treturn true\n\t}\n\ty, err := h.items[j].itr.peek()\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tif h.opt.Ascending {\n\t\tif x.Name != y.Name {\n\t\t\treturn x.Name < y.Name\n\t\t} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {\n\t\t\treturn xTags.ID() < yTags.ID()\n\t\t}\n\t} else {\n\t\tif x.Name != y.Name {\n\t\t\treturn x.Name > y.Name\n\t\t} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {\n\t\t\treturn xTags.ID() > yTags.ID()\n\t\t}\n\t}\n\n\txt, _ := h.opt.Window(x.Time)\n\tyt, _ := h.opt.Window(y.Time)\n\n\tif h.opt.Ascending {\n\t\treturn xt < yt\n\t}\n\treturn xt > yt\n}\n\nfunc (h *booleanMergeHeap) Push(x interface{}) {\n\th.items = append(h.items, x.(*booleanMergeHeapItem))\n}\n\nfunc (h *booleanMergeHeap) Pop() interface{} {\n\told := h.items\n\tn := len(old)\n\titem := old[n-1]\n\th.items = old[0 : n-1]\n\treturn item\n}\n\ntype booleanMergeHeapItem struct {\n\titr *bufBooleanIterator\n}\n\n// booleanSortedMergeIterator is an iterator that sorts and merges multiple iterators into one.\ntype booleanSortedMergeIterator struct {\n\tinputs []BooleanIterator\n\theap   *booleanSortedMergeHeap\n\tinit   bool\n}\n\n// newBooleanSortedMergeIterator returns an instance of booleanSortedMergeIterator.\nfunc newBooleanSortedMergeIterator(inputs []BooleanIterator, opt IteratorOptions) Iterator {\n\titr := &booleanSortedMergeIterator{\n\t\tinputs: inputs,\n\t\theap: &booleanSortedMergeHeap{\n\t\t\titems: make([]*booleanSortedMergeHeapItem, 0, len(inputs)),\n\t\t\topt:   opt,\n\t\t},\n\t}\n\n\t// Initialize heap items.\n\tfor _, input := range inputs {\n\t\t// Append to the heap.\n\t\titr.heap.items = append(itr.heap.items, &booleanSortedMergeHeapItem{itr: input})\n\t}\n\n\treturn itr\n}\n\n// Stats returns an aggregation of stats from the underlying iterators.\nfunc (itr *booleanSortedMergeIterator) Stats() IteratorStats {\n\tvar stats IteratorStats\n\tfor _, input := range itr.inputs {\n\t\tstats.Add(input.Stats())\n\t}\n\treturn stats\n}\n\n// Close closes the underlying iterators.\nfunc (itr *booleanSortedMergeIterator) Close() error {\n\tfor _, input := range itr.inputs {\n\t\tinput.Close()\n\t}\n\treturn nil\n}\n\n// Next returns the next points from the iterator.\nfunc (itr *booleanSortedMergeIterator) Next() (*BooleanPoint, error) { return itr.pop() }\n\n// pop returns the next point from the heap.\n// Reads the next point from item's cursor and puts it back on the heap.\nfunc (itr *booleanSortedMergeIterator) pop() (*BooleanPoint, error) {\n\t// Initialize the heap. See the MergeIterator to see why this has to be done lazily.\n\tif !itr.init {\n\t\titems := itr.heap.items\n\t\titr.heap.items = make([]*booleanSortedMergeHeapItem, 0, len(items))\n\t\tfor _, item := range items {\n\t\t\tvar err error\n\t\t\tif item.point, err = item.itr.Next(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t} else if item.point == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\titr.heap.items = append(itr.heap.items, item)\n\t\t}\n\t\theap.Init(itr.heap)\n\t\titr.init = true\n\t}\n\n\tif len(itr.heap.items) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// Read the next item from the heap.\n\titem := heap.Pop(itr.heap).(*booleanSortedMergeHeapItem)\n\tif item.err != nil {\n\t\treturn nil, item.err\n\t} else if item.point == nil {\n\t\treturn nil, nil\n\t}\n\n\t// Copy the point for return.\n\tp := item.point.Clone()\n\n\t// Read the next item from the cursor. Push back to heap if one exists.\n\tif item.point, item.err = item.itr.Next(); item.point != nil {\n\t\theap.Push(itr.heap, item)\n\t}\n\n\treturn p, nil\n}\n\n// booleanSortedMergeHeap represents a heap of booleanSortedMergeHeapItems.\ntype booleanSortedMergeHeap struct {\n\topt   IteratorOptions\n\titems []*booleanSortedMergeHeapItem\n}\n\nfunc (h *booleanSortedMergeHeap) Len() int      { return len(h.items) }\nfunc (h *booleanSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }\nfunc (h *booleanSortedMergeHeap) Less(i, j int) bool {\n\tx, y := h.items[i].point, h.items[j].point\n\n\tif h.opt.Ascending {\n\t\tif x.Name != y.Name {\n\t\t\treturn x.Name < y.Name\n\t\t} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {\n\t\t\treturn xTags.ID() < yTags.ID()\n\t\t}\n\t\treturn x.Time < y.Time\n\t}\n\n\tif x.Name != y.Name {\n\t\treturn x.Name > y.Name\n\t} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {\n\t\treturn xTags.ID() > yTags.ID()\n\t}\n\treturn x.Time > y.Time\n}\n\nfunc (h *booleanSortedMergeHeap) Push(x interface{}) {\n\th.items = append(h.items, x.(*booleanSortedMergeHeapItem))\n}\n\nfunc (h *booleanSortedMergeHeap) Pop() interface{} {\n\told := h.items\n\tn := len(old)\n\titem := old[n-1]\n\th.items = old[0 : n-1]\n\treturn item\n}\n\ntype booleanSortedMergeHeapItem struct {\n\tpoint *BooleanPoint\n\terr   error\n\titr   BooleanIterator\n}\n\n// booleanParallelIterator represents an iterator that pulls data in a separate goroutine.\ntype booleanParallelIterator struct {\n\tinput BooleanIterator\n\tch    chan booleanPointError\n\n\tonce    sync.Once\n\tclosing chan struct{}\n\twg      sync.WaitGroup\n}\n\n// newBooleanParallelIterator returns a new instance of booleanParallelIterator.\nfunc newBooleanParallelIterator(input BooleanIterator) *booleanParallelIterator {\n\titr := &booleanParallelIterator{\n\t\tinput:   input,\n\t\tch:      make(chan booleanPointError, 256),\n\t\tclosing: make(chan struct{}),\n\t}\n\titr.wg.Add(1)\n\tgo itr.monitor()\n\treturn itr\n}\n\n// Stats returns stats from the underlying iterator.\nfunc (itr *booleanParallelIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the underlying iterators.\nfunc (itr *booleanParallelIterator) Close() error {\n\titr.once.Do(func() { close(itr.closing) })\n\titr.wg.Wait()\n\treturn itr.input.Close()\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *booleanParallelIterator) Next() (*BooleanPoint, error) {\n\tv, ok := <-itr.ch\n\tif !ok {\n\t\treturn nil, io.EOF\n\t}\n\treturn v.point, v.err\n}\n\n// monitor runs in a separate goroutine and actively pulls the next point.\nfunc (itr *booleanParallelIterator) monitor() {\n\tdefer close(itr.ch)\n\tdefer itr.wg.Done()\n\n\tfor {\n\t\t// Read next point.\n\t\tp, err := itr.input.Next()\n\t\tif p != nil {\n\t\t\tp = p.Clone()\n\t\t}\n\n\t\tselect {\n\t\tcase <-itr.closing:\n\t\t\treturn\n\t\tcase itr.ch <- booleanPointError{point: p, err: err}:\n\t\t}\n\t}\n}\n\ntype booleanPointError struct {\n\tpoint *BooleanPoint\n\terr   error\n}\n\n// booleanLimitIterator represents an iterator that limits points per group.\ntype booleanLimitIterator struct {\n\tinput BooleanIterator\n\topt   IteratorOptions\n\tn     int\n\n\tprev struct {\n\t\tname string\n\t\ttags Tags\n\t}\n}\n\n// newBooleanLimitIterator returns a new instance of booleanLimitIterator.\nfunc newBooleanLimitIterator(input BooleanIterator, opt IteratorOptions) *booleanLimitIterator {\n\treturn &booleanLimitIterator{\n\t\tinput: input,\n\t\topt:   opt,\n\t}\n}\n\n// Stats returns stats from the underlying iterator.\nfunc (itr *booleanLimitIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the underlying iterators.\nfunc (itr *booleanLimitIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next point from the iterator.\nfunc (itr *booleanLimitIterator) Next() (*BooleanPoint, error) {\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif p == nil || err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Reset window and counter if a new window is encountered.\n\t\tif p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) {\n\t\t\titr.prev.name = p.Name\n\t\t\titr.prev.tags = p.Tags\n\t\t\titr.n = 0\n\t\t}\n\n\t\t// Increment counter.\n\t\titr.n++\n\n\t\t// Read next point if not beyond the offset.\n\t\tif itr.n <= itr.opt.Offset {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Read next point if we're beyond the limit.\n\t\tif itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn p, nil\n\t}\n}\n\ntype booleanFillIterator struct {\n\tinput     *bufBooleanIterator\n\tprev      BooleanPoint\n\tstartTime int64\n\tendTime   int64\n\tauxFields []interface{}\n\tinit      bool\n\topt       IteratorOptions\n\n\twindow struct {\n\t\tname   string\n\t\ttags   Tags\n\t\ttime   int64\n\t\toffset int64\n\t}\n}\n\nfunc newBooleanFillIterator(input BooleanIterator, expr Expr, opt IteratorOptions) *booleanFillIterator {\n\tif opt.Fill == NullFill {\n\t\tif expr, ok := expr.(*Call); ok && expr.Name == \"count\" {\n\t\t\topt.Fill = NumberFill\n\t\t\topt.FillValue = false\n\t\t}\n\t}\n\n\tvar startTime, endTime int64\n\tif opt.Ascending {\n\t\tstartTime, _ = opt.Window(opt.StartTime)\n\t\tendTime, _ = opt.Window(opt.EndTime)\n\t} else {\n\t\tstartTime, _ = opt.Window(opt.EndTime)\n\t\tendTime, _ = opt.Window(opt.StartTime)\n\t}\n\n\tvar auxFields []interface{}\n\tif len(opt.Aux) > 0 {\n\t\tauxFields = make([]interface{}, len(opt.Aux))\n\t}\n\n\treturn &booleanFillIterator{\n\t\tinput:     newBufBooleanIterator(input),\n\t\tprev:      BooleanPoint{Nil: true},\n\t\tstartTime: startTime,\n\t\tendTime:   endTime,\n\t\tauxFields: auxFields,\n\t\topt:       opt,\n\t}\n}\n\nfunc (itr *booleanFillIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *booleanFillIterator) Close() error         { return itr.input.Close() }\n\nfunc (itr *booleanFillIterator) Next() (*BooleanPoint, error) {\n\tif !itr.init {\n\t\tp, err := itr.input.peek()\n\t\tif p == nil || err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\titr.window.name, itr.window.tags = p.Name, p.Tags\n\t\titr.window.time = itr.startTime\n\t\tif itr.opt.Location != nil {\n\t\t\t_, itr.window.offset = itr.opt.Zone(itr.window.time)\n\t\t}\n\t\titr.init = true\n\t}\n\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check if the next point is outside of our window or is nil.\n\tfor p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() {\n\t\t// If we are inside of an interval, unread the point and continue below to\n\t\t// constructing a new point.\n\t\tif itr.opt.Ascending {\n\t\t\tif itr.window.time <= itr.endTime {\n\t\t\t\titr.input.unread(p)\n\t\t\t\tp = nil\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tif itr.window.time >= itr.endTime {\n\t\t\t\titr.input.unread(p)\n\t\t\t\tp = nil\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// We are *not* in a current interval. If there is no next point,\n\t\t// we are at the end of all intervals.\n\t\tif p == nil {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\t// Set the new interval.\n\t\titr.window.name, itr.window.tags = p.Name, p.Tags\n\t\titr.window.time = itr.startTime\n\t\tif itr.opt.Location != nil {\n\t\t\t_, itr.window.offset = itr.opt.Zone(itr.window.time)\n\t\t}\n\t\titr.prev = BooleanPoint{Nil: true}\n\t\tbreak\n\t}\n\n\t// Check if the point is our next expected point.\n\tif p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) {\n\t\tif p != nil {\n\t\t\titr.input.unread(p)\n\t\t}\n\n\t\tp = &BooleanPoint{\n\t\t\tName: itr.window.name,\n\t\t\tTags: itr.window.tags,\n\t\t\tTime: itr.window.time,\n\t\t\tAux:  itr.auxFields,\n\t\t}\n\n\t\tswitch itr.opt.Fill {\n\t\tcase LinearFill:\n\t\t\tfallthrough\n\t\tcase NullFill:\n\t\t\tp.Nil = true\n\t\tcase NumberFill:\n\t\t\tp.Value = castToBoolean(itr.opt.FillValue)\n\t\tcase PreviousFill:\n\t\t\tif !itr.prev.Nil {\n\t\t\t\tp.Value = itr.prev.Value\n\t\t\t\tp.Nil = itr.prev.Nil\n\t\t\t} else {\n\t\t\t\tp.Nil = true\n\t\t\t}\n\t\t}\n\t} else {\n\t\titr.prev = *p\n\t}\n\n\t// Advance the expected time. Do not advance to a new window here\n\t// as there may be lingering points with the same timestamp in the previous\n\t// window.\n\tif itr.opt.Ascending {\n\t\titr.window.time += int64(itr.opt.Interval.Duration)\n\t} else {\n\t\titr.window.time -= int64(itr.opt.Interval.Duration)\n\t}\n\n\t// Check to see if we have passed over an offset change and adjust the time\n\t// to account for this new offset.\n\tif itr.opt.Location != nil {\n\t\tif _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset {\n\t\t\tdiff := itr.window.offset - offset\n\t\t\tif abs(diff) < int64(itr.opt.Interval.Duration) {\n\t\t\t\titr.window.time += diff\n\t\t\t}\n\t\t\titr.window.offset = offset\n\t\t}\n\t}\n\treturn p, nil\n}\n\n// booleanIntervalIterator represents a boolean implementation of IntervalIterator.\ntype booleanIntervalIterator struct {\n\tinput BooleanIterator\n\topt   IteratorOptions\n}\n\nfunc newBooleanIntervalIterator(input BooleanIterator, opt IteratorOptions) *booleanIntervalIterator {\n\treturn &booleanIntervalIterator{input: input, opt: opt}\n}\n\nfunc (itr *booleanIntervalIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *booleanIntervalIterator) Close() error         { return itr.input.Close() }\n\nfunc (itr *booleanIntervalIterator) Next() (*BooleanPoint, error) {\n\tp, err := itr.input.Next()\n\tif p == nil || err != nil {\n\t\treturn nil, err\n\t}\n\tp.Time, _ = itr.opt.Window(p.Time)\n\t// If we see the minimum allowable time, set the time to zero so we don't\n\t// break the default returned time for aggregate queries without times.\n\tif p.Time == MinTime {\n\t\tp.Time = 0\n\t}\n\treturn p, nil\n}\n\n// booleanInterruptIterator represents a boolean implementation of InterruptIterator.\ntype booleanInterruptIterator struct {\n\tinput   BooleanIterator\n\tclosing <-chan struct{}\n\tcount   int\n}\n\nfunc newBooleanInterruptIterator(input BooleanIterator, closing <-chan struct{}) *booleanInterruptIterator {\n\treturn &booleanInterruptIterator{input: input, closing: closing}\n}\n\nfunc (itr *booleanInterruptIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *booleanInterruptIterator) Close() error         { return itr.input.Close() }\n\nfunc (itr *booleanInterruptIterator) Next() (*BooleanPoint, error) {\n\t// Only check if the channel is closed every N points. This\n\t// intentionally checks on both 0 and N so that if the iterator\n\t// has been interrupted before the first point is emitted it will\n\t// not emit any points.\n\tif itr.count&0xFF == 0xFF {\n\t\tselect {\n\t\tcase <-itr.closing:\n\t\t\treturn nil, itr.Close()\n\t\tdefault:\n\t\t\t// Reset iterator count to zero and fall through to emit the next point.\n\t\t\titr.count = 0\n\t\t}\n\t}\n\n\t// Increment the counter for every point read.\n\titr.count++\n\treturn itr.input.Next()\n}\n\n// booleanCloseInterruptIterator represents a boolean implementation of CloseInterruptIterator.\ntype booleanCloseInterruptIterator struct {\n\tinput   BooleanIterator\n\tclosing <-chan struct{}\n\tdone    chan struct{}\n\tonce    sync.Once\n}\n\nfunc newBooleanCloseInterruptIterator(input BooleanIterator, closing <-chan struct{}) *booleanCloseInterruptIterator {\n\titr := &booleanCloseInterruptIterator{\n\t\tinput:   input,\n\t\tclosing: closing,\n\t\tdone:    make(chan struct{}),\n\t}\n\tgo itr.monitor()\n\treturn itr\n}\n\nfunc (itr *booleanCloseInterruptIterator) monitor() {\n\tselect {\n\tcase <-itr.closing:\n\t\titr.Close()\n\tcase <-itr.done:\n\t}\n}\n\nfunc (itr *booleanCloseInterruptIterator) Stats() IteratorStats {\n\treturn itr.input.Stats()\n}\n\nfunc (itr *booleanCloseInterruptIterator) Close() error {\n\titr.once.Do(func() {\n\t\tclose(itr.done)\n\t\titr.input.Close()\n\t})\n\treturn nil\n}\n\nfunc (itr *booleanCloseInterruptIterator) Next() (*BooleanPoint, error) {\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\t// Check if the iterator was closed.\n\t\tselect {\n\t\tcase <-itr.done:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn p, nil\n}\n\n// auxBooleanPoint represents a combination of a point and an error for the AuxIterator.\ntype auxBooleanPoint struct {\n\tpoint *BooleanPoint\n\terr   error\n}\n\n// booleanAuxIterator represents a boolean implementation of AuxIterator.\ntype booleanAuxIterator struct {\n\tinput      *bufBooleanIterator\n\toutput     chan auxBooleanPoint\n\tfields     *auxIteratorFields\n\tbackground bool\n}\n\nfunc newBooleanAuxIterator(input BooleanIterator, opt IteratorOptions) *booleanAuxIterator {\n\treturn &booleanAuxIterator{\n\t\tinput:  newBufBooleanIterator(input),\n\t\toutput: make(chan auxBooleanPoint, 1),\n\t\tfields: newAuxIteratorFields(opt),\n\t}\n}\n\nfunc (itr *booleanAuxIterator) Background() {\n\titr.background = true\n\titr.Start()\n\tgo DrainIterator(itr)\n}\n\nfunc (itr *booleanAuxIterator) Start()               { go itr.stream() }\nfunc (itr *booleanAuxIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *booleanAuxIterator) Close() error         { return itr.input.Close() }\nfunc (itr *booleanAuxIterator) Next() (*BooleanPoint, error) {\n\tp := <-itr.output\n\treturn p.point, p.err\n}\nfunc (itr *booleanAuxIterator) Iterator(name string, typ DataType) Iterator {\n\treturn itr.fields.iterator(name, typ)\n}\n\nfunc (itr *booleanAuxIterator) stream() {\n\tfor {\n\t\t// Read next point.\n\t\tp, err := itr.input.Next()\n\t\tif err != nil {\n\t\t\titr.output <- auxBooleanPoint{err: err}\n\t\t\titr.fields.sendError(err)\n\t\t\tbreak\n\t\t} else if p == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Send point to output and to each field iterator.\n\t\titr.output <- auxBooleanPoint{point: p}\n\t\tif ok := itr.fields.send(p); !ok && itr.background {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tclose(itr.output)\n\titr.fields.close()\n}\n\n// booleanChanIterator represents a new instance of booleanChanIterator.\ntype booleanChanIterator struct {\n\tbuf struct {\n\t\ti      int\n\t\tfilled bool\n\t\tpoints [2]BooleanPoint\n\t}\n\terr  error\n\tcond *sync.Cond\n\tdone bool\n}\n\nfunc (itr *booleanChanIterator) Stats() IteratorStats { return IteratorStats{} }\n\nfunc (itr *booleanChanIterator) Close() error {\n\titr.cond.L.Lock()\n\t// Mark the channel iterator as done and signal all waiting goroutines to start again.\n\titr.done = true\n\titr.cond.Broadcast()\n\t// Do not defer the unlock so we don't create an unnecessary allocation.\n\titr.cond.L.Unlock()\n\treturn nil\n}\n\nfunc (itr *booleanChanIterator) setBuf(name string, tags Tags, time int64, value interface{}) bool {\n\titr.cond.L.Lock()\n\tdefer itr.cond.L.Unlock()\n\n\t// Wait for either the iterator to be done (so we don't have to set the value)\n\t// or for the buffer to have been read and ready for another write.\n\tfor !itr.done && itr.buf.filled {\n\t\titr.cond.Wait()\n\t}\n\n\t// Do not set the value and return false to signal that the iterator is closed.\n\t// Do this after the above wait as the above for loop may have exited because\n\t// the iterator was closed.\n\tif itr.done {\n\t\treturn false\n\t}\n\n\tswitch v := value.(type) {\n\tcase bool:\n\t\titr.buf.points[itr.buf.i] = BooleanPoint{Name: name, Tags: tags, Time: time, Value: v}\n\n\tdefault:\n\t\titr.buf.points[itr.buf.i] = BooleanPoint{Name: name, Tags: tags, Time: time, Nil: true}\n\t}\n\titr.buf.filled = true\n\n\t// Signal to all waiting goroutines that a new value is ready to read.\n\titr.cond.Signal()\n\treturn true\n}\n\nfunc (itr *booleanChanIterator) setErr(err error) {\n\titr.cond.L.Lock()\n\tdefer itr.cond.L.Unlock()\n\titr.err = err\n\n\t// Signal to all waiting goroutines that a new value is ready to read.\n\titr.cond.Signal()\n}\n\nfunc (itr *booleanChanIterator) Next() (*BooleanPoint, error) {\n\titr.cond.L.Lock()\n\tdefer itr.cond.L.Unlock()\n\n\t// Check for an error and return one if there.\n\tif itr.err != nil {\n\t\treturn nil, itr.err\n\t}\n\n\t// Wait until either a value is available in the buffer or\n\t// the iterator is closed.\n\tfor !itr.done && !itr.buf.filled {\n\t\titr.cond.Wait()\n\t}\n\n\t// Return nil once the channel is done and the buffer is empty.\n\tif itr.done && !itr.buf.filled {\n\t\treturn nil, nil\n\t}\n\n\t// Always read from the buffer if it exists, even if the iterator\n\t// is closed. This prevents the last value from being truncated by\n\t// the parent iterator.\n\tp := &itr.buf.points[itr.buf.i]\n\titr.buf.i = (itr.buf.i + 1) % len(itr.buf.points)\n\titr.buf.filled = false\n\titr.cond.Signal()\n\treturn p, nil\n}\n\n// booleanReduceFloatIterator executes a reducer for every interval and buffers the result.\ntype booleanReduceFloatIterator struct {\n\tinput    *bufBooleanIterator\n\tcreate   func() (BooleanPointAggregator, FloatPointEmitter)\n\tdims     []string\n\topt      IteratorOptions\n\tpoints   []FloatPoint\n\tkeepTags bool\n}\n\nfunc newBooleanReduceFloatIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, FloatPointEmitter)) *booleanReduceFloatIterator {\n\treturn &booleanReduceFloatIterator{\n\t\tinput:  newBufBooleanIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *booleanReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *booleanReduceFloatIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *booleanReduceFloatIterator) Next() (*FloatPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// booleanReduceFloatPoint stores the reduced data for a name/tag combination.\ntype booleanReduceFloatPoint struct {\n\tName       string\n\tTags       Tags\n\tAggregator BooleanPointAggregator\n\tEmitter    FloatPointEmitter\n}\n\n// reduce executes fn once for every point in the next window.\n// The previous value for the dimension is passed to fn.\nfunc (itr *booleanReduceFloatIterator) reduce() ([]FloatPoint, error) {\n\t// Calculate next window.\n\tvar (\n\t\tstartTime, endTime int64\n\t\twindow             struct {\n\t\t\tname string\n\t\t\ttags string\n\t\t}\n\t)\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t} else if p.Nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Unread the point so it can be processed.\n\t\titr.input.unread(p)\n\t\tstartTime, endTime = itr.opt.Window(p.Time)\n\t\twindow.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()\n\t\tbreak\n\t}\n\n\t// Create points by tags.\n\tm := make(map[string]*booleanReduceFloatPoint)\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.NextInWindow(startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr == nil {\n\t\t\tbreak\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t} else if curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Ensure this point is within the same final window.\n\t\tif curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Retrieve the tags on this point for this level of the query.\n\t\t// This may be different than the bucket dimensions.\n\t\ttags := curr.Tags.Subset(itr.dims)\n\t\tid := tags.ID()\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &booleanReduceFloatPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\tm[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateBoolean(curr)\n\t}\n\n\t// Reverse sort points by name & tag if our output is supposed to be ordered.\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tif len(keys) > 1 && itr.opt.Ordered {\n\t\tsort.Sort(reverseStringSlice(keys))\n\t}\n\n\t// Assume the points are already sorted until proven otherwise.\n\tsortedByTime := true\n\t// Emit the points for each name & tag combination.\n\ta := make([]FloatPoint, 0, len(m))\n\tfor _, k := range keys {\n\t\trp := m[k]\n\t\tpoints := rp.Emitter.Emit()\n\t\tfor i := len(points) - 1; i >= 0; i-- {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tif !itr.keepTags {\n\t\t\t\tpoints[i].Tags = rp.Tags\n\t\t\t}\n\t\t\t// Set the points time to the interval time if the reducer didn't provide one.\n\t\t\tif points[i].Time == ZeroTime {\n\t\t\t\tpoints[i].Time = startTime\n\t\t\t} else {\n\t\t\t\tsortedByTime = false\n\t\t\t}\n\t\t\ta = append(a, points[i])\n\t\t}\n\t}\n\n\t// Points may be out of order. Perform a stable sort by time if requested.\n\tif !sortedByTime && itr.opt.Ordered {\n\t\tsort.Stable(sort.Reverse(floatPointsByTime(a)))\n\t}\n\n\treturn a, nil\n}\n\n// booleanStreamFloatIterator streams inputs into the iterator and emits points gradually.\ntype booleanStreamFloatIterator struct {\n\tinput  *bufBooleanIterator\n\tcreate func() (BooleanPointAggregator, FloatPointEmitter)\n\tdims   []string\n\topt    IteratorOptions\n\tm      map[string]*booleanReduceFloatPoint\n\tpoints []FloatPoint\n}\n\n// newBooleanStreamFloatIterator returns a new instance of booleanStreamFloatIterator.\nfunc newBooleanStreamFloatIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, FloatPointEmitter), opt IteratorOptions) *booleanStreamFloatIterator {\n\treturn &booleanStreamFloatIterator{\n\t\tinput:  newBufBooleanIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t\tm:      make(map[string]*booleanReduceFloatPoint),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *booleanStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *booleanStreamFloatIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next value for the stream iterator.\nfunc (itr *booleanStreamFloatIterator) Next() (*FloatPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// reduce creates and manages aggregators for every point from the input.\n// After aggregating a point, it always tries to emit a value using the emitter.\nfunc (itr *booleanStreamFloatIterator) reduce() ([]FloatPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.Next()\n\t\tif curr == nil {\n\t\t\t// Close all of the aggregators to flush any remaining points to emit.\n\t\t\tvar points []FloatPoint\n\t\t\tfor _, rp := range itr.m {\n\t\t\t\tif aggregator, ok := rp.Aggregator.(io.Closer); ok {\n\t\t\t\t\tif err := aggregator.Close(); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\tpts := rp.Emitter.Emit()\n\t\t\t\t\tif len(pts) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor i := range pts {\n\t\t\t\t\t\tpts[i].Name = rp.Name\n\t\t\t\t\t\tpts[i].Tags = rp.Tags\n\t\t\t\t\t}\n\t\t\t\t\tpoints = append(points, pts...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Eliminate the aggregators and emitters.\n\t\t\titr.m = nil\n\t\t\treturn points, nil\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t}\n\t\ttags := curr.Tags.Subset(itr.dims)\n\n\t\tid := curr.Name\n\t\tif len(tags.m) > 0 {\n\t\t\tid += \"\\x00\" + tags.ID()\n\t\t}\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := itr.m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &booleanReduceFloatPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\titr.m[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateBoolean(curr)\n\n\t\t// Attempt to emit points from the aggregator.\n\t\tpoints := rp.Emitter.Emit()\n\t\tif len(points) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := range points {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tpoints[i].Tags = rp.Tags\n\t\t}\n\t\treturn points, nil\n\t}\n}\n\n// booleanFloatExprIterator executes a function to modify an existing point\n// for every output of the input iterator.\ntype booleanFloatExprIterator struct {\n\tleft      *bufBooleanIterator\n\tright     *bufBooleanIterator\n\tfn        booleanFloatExprFunc\n\tpoints    []BooleanPoint // must be size 2\n\tstorePrev bool\n}\n\nfunc newBooleanFloatExprIterator(left, right BooleanIterator, opt IteratorOptions, fn func(a, b bool) float64) *booleanFloatExprIterator {\n\tvar points []BooleanPoint\n\tswitch opt.Fill {\n\tcase NullFill, PreviousFill:\n\t\tpoints = []BooleanPoint{{Nil: true}, {Nil: true}}\n\tcase NumberFill:\n\t\tvalue := castToBoolean(opt.FillValue)\n\t\tpoints = []BooleanPoint{{Value: value}, {Value: value}}\n\t}\n\treturn &booleanFloatExprIterator{\n\t\tleft:      newBufBooleanIterator(left),\n\t\tright:     newBufBooleanIterator(right),\n\t\tpoints:    points,\n\t\tfn:        fn,\n\t\tstorePrev: opt.Fill == PreviousFill,\n\t}\n}\n\nfunc (itr *booleanFloatExprIterator) Stats() IteratorStats {\n\tstats := itr.left.Stats()\n\tstats.Add(itr.right.Stats())\n\treturn stats\n}\n\nfunc (itr *booleanFloatExprIterator) Close() error {\n\titr.left.Close()\n\titr.right.Close()\n\treturn nil\n}\n\nfunc (itr *booleanFloatExprIterator) Next() (*FloatPoint, error) {\n\tfor {\n\t\ta, b, err := itr.next()\n\t\tif err != nil || (a == nil && b == nil) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If any of these are nil and we are using fill(none), skip these points.\n\t\tif (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If one of the two points is nil, we need to fill it with a fake nil\n\t\t// point that has the same name, tags, and time as the other point.\n\t\t// There should never be a time when both of these are nil.\n\t\tif a == nil {\n\t\t\tp := *b\n\t\t\ta = &p\n\t\t\ta.Value = false\n\t\t\ta.Nil = true\n\t\t} else if b == nil {\n\t\t\tp := *a\n\t\t\tb = &p\n\t\t\tb.Value = false\n\t\t\tb.Nil = true\n\t\t}\n\n\t\t// If a value is nil, use the fill values if the fill value is non-nil.\n\t\tif a.Nil && !itr.points[0].Nil {\n\t\t\ta.Value = itr.points[0].Value\n\t\t\ta.Nil = false\n\t\t}\n\t\tif b.Nil && !itr.points[1].Nil {\n\t\t\tb.Value = itr.points[1].Value\n\t\t\tb.Nil = false\n\t\t}\n\n\t\tif itr.storePrev {\n\t\t\titr.points[0], itr.points[1] = *a, *b\n\t\t}\n\n\t\tp := &FloatPoint{\n\t\t\tName:       a.Name,\n\t\t\tTags:       a.Tags,\n\t\t\tTime:       a.Time,\n\t\t\tNil:        a.Nil || b.Nil,\n\t\t\tAggregated: a.Aggregated,\n\t\t}\n\t\tif !p.Nil {\n\t\t\tp.Value = itr.fn(a.Value, b.Value)\n\t\t}\n\t\treturn p, nil\n\n\t}\n}\n\n// next returns the next points within each iterator. If the iterators are\n// uneven, it organizes them so only matching points are returned.\nfunc (itr *booleanFloatExprIterator) next() (a, b *BooleanPoint, err error) {\n\t// Retrieve the next value for both the left and right.\n\ta, err = itr.left.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tb, err = itr.right.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// If we have a point from both, make sure that they match each other.\n\tif a != nil && b != nil {\n\t\tif a.Name > b.Name {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Name < b.Name {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if ltags < rtags {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif a.Time > b.Time {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Time < b.Time {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\t}\n\treturn a, b, nil\n}\n\n// booleanFloatExprFunc creates or modifies a point by combining two\n// points. The point passed in may be modified and returned rather than\n// allocating a new point if possible. One of the points may be nil, but at\n// least one of the points will be non-nil.\ntype booleanFloatExprFunc func(a, b bool) float64\n\n// booleanReduceIntegerIterator executes a reducer for every interval and buffers the result.\ntype booleanReduceIntegerIterator struct {\n\tinput    *bufBooleanIterator\n\tcreate   func() (BooleanPointAggregator, IntegerPointEmitter)\n\tdims     []string\n\topt      IteratorOptions\n\tpoints   []IntegerPoint\n\tkeepTags bool\n}\n\nfunc newBooleanReduceIntegerIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, IntegerPointEmitter)) *booleanReduceIntegerIterator {\n\treturn &booleanReduceIntegerIterator{\n\t\tinput:  newBufBooleanIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *booleanReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *booleanReduceIntegerIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *booleanReduceIntegerIterator) Next() (*IntegerPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// booleanReduceIntegerPoint stores the reduced data for a name/tag combination.\ntype booleanReduceIntegerPoint struct {\n\tName       string\n\tTags       Tags\n\tAggregator BooleanPointAggregator\n\tEmitter    IntegerPointEmitter\n}\n\n// reduce executes fn once for every point in the next window.\n// The previous value for the dimension is passed to fn.\nfunc (itr *booleanReduceIntegerIterator) reduce() ([]IntegerPoint, error) {\n\t// Calculate next window.\n\tvar (\n\t\tstartTime, endTime int64\n\t\twindow             struct {\n\t\t\tname string\n\t\t\ttags string\n\t\t}\n\t)\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t} else if p.Nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Unread the point so it can be processed.\n\t\titr.input.unread(p)\n\t\tstartTime, endTime = itr.opt.Window(p.Time)\n\t\twindow.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()\n\t\tbreak\n\t}\n\n\t// Create points by tags.\n\tm := make(map[string]*booleanReduceIntegerPoint)\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.NextInWindow(startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr == nil {\n\t\t\tbreak\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t} else if curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Ensure this point is within the same final window.\n\t\tif curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Retrieve the tags on this point for this level of the query.\n\t\t// This may be different than the bucket dimensions.\n\t\ttags := curr.Tags.Subset(itr.dims)\n\t\tid := tags.ID()\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &booleanReduceIntegerPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\tm[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateBoolean(curr)\n\t}\n\n\t// Reverse sort points by name & tag if our output is supposed to be ordered.\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tif len(keys) > 1 && itr.opt.Ordered {\n\t\tsort.Sort(reverseStringSlice(keys))\n\t}\n\n\t// Assume the points are already sorted until proven otherwise.\n\tsortedByTime := true\n\t// Emit the points for each name & tag combination.\n\ta := make([]IntegerPoint, 0, len(m))\n\tfor _, k := range keys {\n\t\trp := m[k]\n\t\tpoints := rp.Emitter.Emit()\n\t\tfor i := len(points) - 1; i >= 0; i-- {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tif !itr.keepTags {\n\t\t\t\tpoints[i].Tags = rp.Tags\n\t\t\t}\n\t\t\t// Set the points time to the interval time if the reducer didn't provide one.\n\t\t\tif points[i].Time == ZeroTime {\n\t\t\t\tpoints[i].Time = startTime\n\t\t\t} else {\n\t\t\t\tsortedByTime = false\n\t\t\t}\n\t\t\ta = append(a, points[i])\n\t\t}\n\t}\n\n\t// Points may be out of order. Perform a stable sort by time if requested.\n\tif !sortedByTime && itr.opt.Ordered {\n\t\tsort.Stable(sort.Reverse(integerPointsByTime(a)))\n\t}\n\n\treturn a, nil\n}\n\n// booleanStreamIntegerIterator streams inputs into the iterator and emits points gradually.\ntype booleanStreamIntegerIterator struct {\n\tinput  *bufBooleanIterator\n\tcreate func() (BooleanPointAggregator, IntegerPointEmitter)\n\tdims   []string\n\topt    IteratorOptions\n\tm      map[string]*booleanReduceIntegerPoint\n\tpoints []IntegerPoint\n}\n\n// newBooleanStreamIntegerIterator returns a new instance of booleanStreamIntegerIterator.\nfunc newBooleanStreamIntegerIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, IntegerPointEmitter), opt IteratorOptions) *booleanStreamIntegerIterator {\n\treturn &booleanStreamIntegerIterator{\n\t\tinput:  newBufBooleanIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t\tm:      make(map[string]*booleanReduceIntegerPoint),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *booleanStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *booleanStreamIntegerIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next value for the stream iterator.\nfunc (itr *booleanStreamIntegerIterator) Next() (*IntegerPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// reduce creates and manages aggregators for every point from the input.\n// After aggregating a point, it always tries to emit a value using the emitter.\nfunc (itr *booleanStreamIntegerIterator) reduce() ([]IntegerPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.Next()\n\t\tif curr == nil {\n\t\t\t// Close all of the aggregators to flush any remaining points to emit.\n\t\t\tvar points []IntegerPoint\n\t\t\tfor _, rp := range itr.m {\n\t\t\t\tif aggregator, ok := rp.Aggregator.(io.Closer); ok {\n\t\t\t\t\tif err := aggregator.Close(); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\tpts := rp.Emitter.Emit()\n\t\t\t\t\tif len(pts) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor i := range pts {\n\t\t\t\t\t\tpts[i].Name = rp.Name\n\t\t\t\t\t\tpts[i].Tags = rp.Tags\n\t\t\t\t\t}\n\t\t\t\t\tpoints = append(points, pts...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Eliminate the aggregators and emitters.\n\t\t\titr.m = nil\n\t\t\treturn points, nil\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t}\n\t\ttags := curr.Tags.Subset(itr.dims)\n\n\t\tid := curr.Name\n\t\tif len(tags.m) > 0 {\n\t\t\tid += \"\\x00\" + tags.ID()\n\t\t}\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := itr.m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &booleanReduceIntegerPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\titr.m[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateBoolean(curr)\n\n\t\t// Attempt to emit points from the aggregator.\n\t\tpoints := rp.Emitter.Emit()\n\t\tif len(points) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := range points {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tpoints[i].Tags = rp.Tags\n\t\t}\n\t\treturn points, nil\n\t}\n}\n\n// booleanIntegerExprIterator executes a function to modify an existing point\n// for every output of the input iterator.\ntype booleanIntegerExprIterator struct {\n\tleft      *bufBooleanIterator\n\tright     *bufBooleanIterator\n\tfn        booleanIntegerExprFunc\n\tpoints    []BooleanPoint // must be size 2\n\tstorePrev bool\n}\n\nfunc newBooleanIntegerExprIterator(left, right BooleanIterator, opt IteratorOptions, fn func(a, b bool) int64) *booleanIntegerExprIterator {\n\tvar points []BooleanPoint\n\tswitch opt.Fill {\n\tcase NullFill, PreviousFill:\n\t\tpoints = []BooleanPoint{{Nil: true}, {Nil: true}}\n\tcase NumberFill:\n\t\tvalue := castToBoolean(opt.FillValue)\n\t\tpoints = []BooleanPoint{{Value: value}, {Value: value}}\n\t}\n\treturn &booleanIntegerExprIterator{\n\t\tleft:      newBufBooleanIterator(left),\n\t\tright:     newBufBooleanIterator(right),\n\t\tpoints:    points,\n\t\tfn:        fn,\n\t\tstorePrev: opt.Fill == PreviousFill,\n\t}\n}\n\nfunc (itr *booleanIntegerExprIterator) Stats() IteratorStats {\n\tstats := itr.left.Stats()\n\tstats.Add(itr.right.Stats())\n\treturn stats\n}\n\nfunc (itr *booleanIntegerExprIterator) Close() error {\n\titr.left.Close()\n\titr.right.Close()\n\treturn nil\n}\n\nfunc (itr *booleanIntegerExprIterator) Next() (*IntegerPoint, error) {\n\tfor {\n\t\ta, b, err := itr.next()\n\t\tif err != nil || (a == nil && b == nil) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If any of these are nil and we are using fill(none), skip these points.\n\t\tif (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If one of the two points is nil, we need to fill it with a fake nil\n\t\t// point that has the same name, tags, and time as the other point.\n\t\t// There should never be a time when both of these are nil.\n\t\tif a == nil {\n\t\t\tp := *b\n\t\t\ta = &p\n\t\t\ta.Value = false\n\t\t\ta.Nil = true\n\t\t} else if b == nil {\n\t\t\tp := *a\n\t\t\tb = &p\n\t\t\tb.Value = false\n\t\t\tb.Nil = true\n\t\t}\n\n\t\t// If a value is nil, use the fill values if the fill value is non-nil.\n\t\tif a.Nil && !itr.points[0].Nil {\n\t\t\ta.Value = itr.points[0].Value\n\t\t\ta.Nil = false\n\t\t}\n\t\tif b.Nil && !itr.points[1].Nil {\n\t\t\tb.Value = itr.points[1].Value\n\t\t\tb.Nil = false\n\t\t}\n\n\t\tif itr.storePrev {\n\t\t\titr.points[0], itr.points[1] = *a, *b\n\t\t}\n\n\t\tp := &IntegerPoint{\n\t\t\tName:       a.Name,\n\t\t\tTags:       a.Tags,\n\t\t\tTime:       a.Time,\n\t\t\tNil:        a.Nil || b.Nil,\n\t\t\tAggregated: a.Aggregated,\n\t\t}\n\t\tif !p.Nil {\n\t\t\tp.Value = itr.fn(a.Value, b.Value)\n\t\t}\n\t\treturn p, nil\n\n\t}\n}\n\n// next returns the next points within each iterator. If the iterators are\n// uneven, it organizes them so only matching points are returned.\nfunc (itr *booleanIntegerExprIterator) next() (a, b *BooleanPoint, err error) {\n\t// Retrieve the next value for both the left and right.\n\ta, err = itr.left.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tb, err = itr.right.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// If we have a point from both, make sure that they match each other.\n\tif a != nil && b != nil {\n\t\tif a.Name > b.Name {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Name < b.Name {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if ltags < rtags {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif a.Time > b.Time {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Time < b.Time {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\t}\n\treturn a, b, nil\n}\n\n// booleanIntegerExprFunc creates or modifies a point by combining two\n// points. The point passed in may be modified and returned rather than\n// allocating a new point if possible. One of the points may be nil, but at\n// least one of the points will be non-nil.\ntype booleanIntegerExprFunc func(a, b bool) int64\n\n// booleanReduceStringIterator executes a reducer for every interval and buffers the result.\ntype booleanReduceStringIterator struct {\n\tinput    *bufBooleanIterator\n\tcreate   func() (BooleanPointAggregator, StringPointEmitter)\n\tdims     []string\n\topt      IteratorOptions\n\tpoints   []StringPoint\n\tkeepTags bool\n}\n\nfunc newBooleanReduceStringIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, StringPointEmitter)) *booleanReduceStringIterator {\n\treturn &booleanReduceStringIterator{\n\t\tinput:  newBufBooleanIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *booleanReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *booleanReduceStringIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *booleanReduceStringIterator) Next() (*StringPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// booleanReduceStringPoint stores the reduced data for a name/tag combination.\ntype booleanReduceStringPoint struct {\n\tName       string\n\tTags       Tags\n\tAggregator BooleanPointAggregator\n\tEmitter    StringPointEmitter\n}\n\n// reduce executes fn once for every point in the next window.\n// The previous value for the dimension is passed to fn.\nfunc (itr *booleanReduceStringIterator) reduce() ([]StringPoint, error) {\n\t// Calculate next window.\n\tvar (\n\t\tstartTime, endTime int64\n\t\twindow             struct {\n\t\t\tname string\n\t\t\ttags string\n\t\t}\n\t)\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t} else if p.Nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Unread the point so it can be processed.\n\t\titr.input.unread(p)\n\t\tstartTime, endTime = itr.opt.Window(p.Time)\n\t\twindow.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()\n\t\tbreak\n\t}\n\n\t// Create points by tags.\n\tm := make(map[string]*booleanReduceStringPoint)\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.NextInWindow(startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr == nil {\n\t\t\tbreak\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t} else if curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Ensure this point is within the same final window.\n\t\tif curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Retrieve the tags on this point for this level of the query.\n\t\t// This may be different than the bucket dimensions.\n\t\ttags := curr.Tags.Subset(itr.dims)\n\t\tid := tags.ID()\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &booleanReduceStringPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\tm[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateBoolean(curr)\n\t}\n\n\t// Reverse sort points by name & tag if our output is supposed to be ordered.\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tif len(keys) > 1 && itr.opt.Ordered {\n\t\tsort.Sort(reverseStringSlice(keys))\n\t}\n\n\t// Assume the points are already sorted until proven otherwise.\n\tsortedByTime := true\n\t// Emit the points for each name & tag combination.\n\ta := make([]StringPoint, 0, len(m))\n\tfor _, k := range keys {\n\t\trp := m[k]\n\t\tpoints := rp.Emitter.Emit()\n\t\tfor i := len(points) - 1; i >= 0; i-- {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tif !itr.keepTags {\n\t\t\t\tpoints[i].Tags = rp.Tags\n\t\t\t}\n\t\t\t// Set the points time to the interval time if the reducer didn't provide one.\n\t\t\tif points[i].Time == ZeroTime {\n\t\t\t\tpoints[i].Time = startTime\n\t\t\t} else {\n\t\t\t\tsortedByTime = false\n\t\t\t}\n\t\t\ta = append(a, points[i])\n\t\t}\n\t}\n\n\t// Points may be out of order. Perform a stable sort by time if requested.\n\tif !sortedByTime && itr.opt.Ordered {\n\t\tsort.Stable(sort.Reverse(stringPointsByTime(a)))\n\t}\n\n\treturn a, nil\n}\n\n// booleanStreamStringIterator streams inputs into the iterator and emits points gradually.\ntype booleanStreamStringIterator struct {\n\tinput  *bufBooleanIterator\n\tcreate func() (BooleanPointAggregator, StringPointEmitter)\n\tdims   []string\n\topt    IteratorOptions\n\tm      map[string]*booleanReduceStringPoint\n\tpoints []StringPoint\n}\n\n// newBooleanStreamStringIterator returns a new instance of booleanStreamStringIterator.\nfunc newBooleanStreamStringIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, StringPointEmitter), opt IteratorOptions) *booleanStreamStringIterator {\n\treturn &booleanStreamStringIterator{\n\t\tinput:  newBufBooleanIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t\tm:      make(map[string]*booleanReduceStringPoint),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *booleanStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *booleanStreamStringIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next value for the stream iterator.\nfunc (itr *booleanStreamStringIterator) Next() (*StringPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// reduce creates and manages aggregators for every point from the input.\n// After aggregating a point, it always tries to emit a value using the emitter.\nfunc (itr *booleanStreamStringIterator) reduce() ([]StringPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.Next()\n\t\tif curr == nil {\n\t\t\t// Close all of the aggregators to flush any remaining points to emit.\n\t\t\tvar points []StringPoint\n\t\t\tfor _, rp := range itr.m {\n\t\t\t\tif aggregator, ok := rp.Aggregator.(io.Closer); ok {\n\t\t\t\t\tif err := aggregator.Close(); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\tpts := rp.Emitter.Emit()\n\t\t\t\t\tif len(pts) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor i := range pts {\n\t\t\t\t\t\tpts[i].Name = rp.Name\n\t\t\t\t\t\tpts[i].Tags = rp.Tags\n\t\t\t\t\t}\n\t\t\t\t\tpoints = append(points, pts...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Eliminate the aggregators and emitters.\n\t\t\titr.m = nil\n\t\t\treturn points, nil\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t}\n\t\ttags := curr.Tags.Subset(itr.dims)\n\n\t\tid := curr.Name\n\t\tif len(tags.m) > 0 {\n\t\t\tid += \"\\x00\" + tags.ID()\n\t\t}\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := itr.m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &booleanReduceStringPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\titr.m[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateBoolean(curr)\n\n\t\t// Attempt to emit points from the aggregator.\n\t\tpoints := rp.Emitter.Emit()\n\t\tif len(points) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := range points {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tpoints[i].Tags = rp.Tags\n\t\t}\n\t\treturn points, nil\n\t}\n}\n\n// booleanStringExprIterator executes a function to modify an existing point\n// for every output of the input iterator.\ntype booleanStringExprIterator struct {\n\tleft      *bufBooleanIterator\n\tright     *bufBooleanIterator\n\tfn        booleanStringExprFunc\n\tpoints    []BooleanPoint // must be size 2\n\tstorePrev bool\n}\n\nfunc newBooleanStringExprIterator(left, right BooleanIterator, opt IteratorOptions, fn func(a, b bool) string) *booleanStringExprIterator {\n\tvar points []BooleanPoint\n\tswitch opt.Fill {\n\tcase NullFill, PreviousFill:\n\t\tpoints = []BooleanPoint{{Nil: true}, {Nil: true}}\n\tcase NumberFill:\n\t\tvalue := castToBoolean(opt.FillValue)\n\t\tpoints = []BooleanPoint{{Value: value}, {Value: value}}\n\t}\n\treturn &booleanStringExprIterator{\n\t\tleft:      newBufBooleanIterator(left),\n\t\tright:     newBufBooleanIterator(right),\n\t\tpoints:    points,\n\t\tfn:        fn,\n\t\tstorePrev: opt.Fill == PreviousFill,\n\t}\n}\n\nfunc (itr *booleanStringExprIterator) Stats() IteratorStats {\n\tstats := itr.left.Stats()\n\tstats.Add(itr.right.Stats())\n\treturn stats\n}\n\nfunc (itr *booleanStringExprIterator) Close() error {\n\titr.left.Close()\n\titr.right.Close()\n\treturn nil\n}\n\nfunc (itr *booleanStringExprIterator) Next() (*StringPoint, error) {\n\tfor {\n\t\ta, b, err := itr.next()\n\t\tif err != nil || (a == nil && b == nil) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If any of these are nil and we are using fill(none), skip these points.\n\t\tif (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If one of the two points is nil, we need to fill it with a fake nil\n\t\t// point that has the same name, tags, and time as the other point.\n\t\t// There should never be a time when both of these are nil.\n\t\tif a == nil {\n\t\t\tp := *b\n\t\t\ta = &p\n\t\t\ta.Value = false\n\t\t\ta.Nil = true\n\t\t} else if b == nil {\n\t\t\tp := *a\n\t\t\tb = &p\n\t\t\tb.Value = false\n\t\t\tb.Nil = true\n\t\t}\n\n\t\t// If a value is nil, use the fill values if the fill value is non-nil.\n\t\tif a.Nil && !itr.points[0].Nil {\n\t\t\ta.Value = itr.points[0].Value\n\t\t\ta.Nil = false\n\t\t}\n\t\tif b.Nil && !itr.points[1].Nil {\n\t\t\tb.Value = itr.points[1].Value\n\t\t\tb.Nil = false\n\t\t}\n\n\t\tif itr.storePrev {\n\t\t\titr.points[0], itr.points[1] = *a, *b\n\t\t}\n\n\t\tp := &StringPoint{\n\t\t\tName:       a.Name,\n\t\t\tTags:       a.Tags,\n\t\t\tTime:       a.Time,\n\t\t\tNil:        a.Nil || b.Nil,\n\t\t\tAggregated: a.Aggregated,\n\t\t}\n\t\tif !p.Nil {\n\t\t\tp.Value = itr.fn(a.Value, b.Value)\n\t\t}\n\t\treturn p, nil\n\n\t}\n}\n\n// next returns the next points within each iterator. If the iterators are\n// uneven, it organizes them so only matching points are returned.\nfunc (itr *booleanStringExprIterator) next() (a, b *BooleanPoint, err error) {\n\t// Retrieve the next value for both the left and right.\n\ta, err = itr.left.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tb, err = itr.right.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// If we have a point from both, make sure that they match each other.\n\tif a != nil && b != nil {\n\t\tif a.Name > b.Name {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Name < b.Name {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if ltags < rtags {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif a.Time > b.Time {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Time < b.Time {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\t}\n\treturn a, b, nil\n}\n\n// booleanStringExprFunc creates or modifies a point by combining two\n// points. The point passed in may be modified and returned rather than\n// allocating a new point if possible. One of the points may be nil, but at\n// least one of the points will be non-nil.\ntype booleanStringExprFunc func(a, b bool) string\n\n// booleanReduceBooleanIterator executes a reducer for every interval and buffers the result.\ntype booleanReduceBooleanIterator struct {\n\tinput    *bufBooleanIterator\n\tcreate   func() (BooleanPointAggregator, BooleanPointEmitter)\n\tdims     []string\n\topt      IteratorOptions\n\tpoints   []BooleanPoint\n\tkeepTags bool\n}\n\nfunc newBooleanReduceBooleanIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, BooleanPointEmitter)) *booleanReduceBooleanIterator {\n\treturn &booleanReduceBooleanIterator{\n\t\tinput:  newBufBooleanIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *booleanReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *booleanReduceBooleanIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *booleanReduceBooleanIterator) Next() (*BooleanPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// booleanReduceBooleanPoint stores the reduced data for a name/tag combination.\ntype booleanReduceBooleanPoint struct {\n\tName       string\n\tTags       Tags\n\tAggregator BooleanPointAggregator\n\tEmitter    BooleanPointEmitter\n}\n\n// reduce executes fn once for every point in the next window.\n// The previous value for the dimension is passed to fn.\nfunc (itr *booleanReduceBooleanIterator) reduce() ([]BooleanPoint, error) {\n\t// Calculate next window.\n\tvar (\n\t\tstartTime, endTime int64\n\t\twindow             struct {\n\t\t\tname string\n\t\t\ttags string\n\t\t}\n\t)\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t} else if p.Nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Unread the point so it can be processed.\n\t\titr.input.unread(p)\n\t\tstartTime, endTime = itr.opt.Window(p.Time)\n\t\twindow.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()\n\t\tbreak\n\t}\n\n\t// Create points by tags.\n\tm := make(map[string]*booleanReduceBooleanPoint)\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.NextInWindow(startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr == nil {\n\t\t\tbreak\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t} else if curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Ensure this point is within the same final window.\n\t\tif curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Retrieve the tags on this point for this level of the query.\n\t\t// This may be different than the bucket dimensions.\n\t\ttags := curr.Tags.Subset(itr.dims)\n\t\tid := tags.ID()\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &booleanReduceBooleanPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\tm[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateBoolean(curr)\n\t}\n\n\t// Reverse sort points by name & tag if our output is supposed to be ordered.\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tif len(keys) > 1 && itr.opt.Ordered {\n\t\tsort.Sort(reverseStringSlice(keys))\n\t}\n\n\t// Assume the points are already sorted until proven otherwise.\n\tsortedByTime := true\n\t// Emit the points for each name & tag combination.\n\ta := make([]BooleanPoint, 0, len(m))\n\tfor _, k := range keys {\n\t\trp := m[k]\n\t\tpoints := rp.Emitter.Emit()\n\t\tfor i := len(points) - 1; i >= 0; i-- {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tif !itr.keepTags {\n\t\t\t\tpoints[i].Tags = rp.Tags\n\t\t\t}\n\t\t\t// Set the points time to the interval time if the reducer didn't provide one.\n\t\t\tif points[i].Time == ZeroTime {\n\t\t\t\tpoints[i].Time = startTime\n\t\t\t} else {\n\t\t\t\tsortedByTime = false\n\t\t\t}\n\t\t\ta = append(a, points[i])\n\t\t}\n\t}\n\n\t// Points may be out of order. Perform a stable sort by time if requested.\n\tif !sortedByTime && itr.opt.Ordered {\n\t\tsort.Stable(sort.Reverse(booleanPointsByTime(a)))\n\t}\n\n\treturn a, nil\n}\n\n// booleanStreamBooleanIterator streams inputs into the iterator and emits points gradually.\ntype booleanStreamBooleanIterator struct {\n\tinput  *bufBooleanIterator\n\tcreate func() (BooleanPointAggregator, BooleanPointEmitter)\n\tdims   []string\n\topt    IteratorOptions\n\tm      map[string]*booleanReduceBooleanPoint\n\tpoints []BooleanPoint\n}\n\n// newBooleanStreamBooleanIterator returns a new instance of booleanStreamBooleanIterator.\nfunc newBooleanStreamBooleanIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, BooleanPointEmitter), opt IteratorOptions) *booleanStreamBooleanIterator {\n\treturn &booleanStreamBooleanIterator{\n\t\tinput:  newBufBooleanIterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t\tm:      make(map[string]*booleanReduceBooleanPoint),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *booleanStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *booleanStreamBooleanIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next value for the stream iterator.\nfunc (itr *booleanStreamBooleanIterator) Next() (*BooleanPoint, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// reduce creates and manages aggregators for every point from the input.\n// After aggregating a point, it always tries to emit a value using the emitter.\nfunc (itr *booleanStreamBooleanIterator) reduce() ([]BooleanPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.Next()\n\t\tif curr == nil {\n\t\t\t// Close all of the aggregators to flush any remaining points to emit.\n\t\t\tvar points []BooleanPoint\n\t\t\tfor _, rp := range itr.m {\n\t\t\t\tif aggregator, ok := rp.Aggregator.(io.Closer); ok {\n\t\t\t\t\tif err := aggregator.Close(); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\tpts := rp.Emitter.Emit()\n\t\t\t\t\tif len(pts) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor i := range pts {\n\t\t\t\t\t\tpts[i].Name = rp.Name\n\t\t\t\t\t\tpts[i].Tags = rp.Tags\n\t\t\t\t\t}\n\t\t\t\t\tpoints = append(points, pts...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Eliminate the aggregators and emitters.\n\t\t\titr.m = nil\n\t\t\treturn points, nil\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t}\n\t\ttags := curr.Tags.Subset(itr.dims)\n\n\t\tid := curr.Name\n\t\tif len(tags.m) > 0 {\n\t\t\tid += \"\\x00\" + tags.ID()\n\t\t}\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := itr.m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &booleanReduceBooleanPoint{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\titr.m[id] = rp\n\t\t}\n\t\trp.Aggregator.AggregateBoolean(curr)\n\n\t\t// Attempt to emit points from the aggregator.\n\t\tpoints := rp.Emitter.Emit()\n\t\tif len(points) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := range points {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tpoints[i].Tags = rp.Tags\n\t\t}\n\t\treturn points, nil\n\t}\n}\n\n// booleanExprIterator executes a function to modify an existing point\n// for every output of the input iterator.\ntype booleanExprIterator struct {\n\tleft      *bufBooleanIterator\n\tright     *bufBooleanIterator\n\tfn        booleanExprFunc\n\tpoints    []BooleanPoint // must be size 2\n\tstorePrev bool\n}\n\nfunc newBooleanExprIterator(left, right BooleanIterator, opt IteratorOptions, fn func(a, b bool) bool) *booleanExprIterator {\n\tvar points []BooleanPoint\n\tswitch opt.Fill {\n\tcase NullFill, PreviousFill:\n\t\tpoints = []BooleanPoint{{Nil: true}, {Nil: true}}\n\tcase NumberFill:\n\t\tvalue := castToBoolean(opt.FillValue)\n\t\tpoints = []BooleanPoint{{Value: value}, {Value: value}}\n\t}\n\treturn &booleanExprIterator{\n\t\tleft:      newBufBooleanIterator(left),\n\t\tright:     newBufBooleanIterator(right),\n\t\tpoints:    points,\n\t\tfn:        fn,\n\t\tstorePrev: opt.Fill == PreviousFill,\n\t}\n}\n\nfunc (itr *booleanExprIterator) Stats() IteratorStats {\n\tstats := itr.left.Stats()\n\tstats.Add(itr.right.Stats())\n\treturn stats\n}\n\nfunc (itr *booleanExprIterator) Close() error {\n\titr.left.Close()\n\titr.right.Close()\n\treturn nil\n}\n\nfunc (itr *booleanExprIterator) Next() (*BooleanPoint, error) {\n\tfor {\n\t\ta, b, err := itr.next()\n\t\tif err != nil || (a == nil && b == nil) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If any of these are nil and we are using fill(none), skip these points.\n\t\tif (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If one of the two points is nil, we need to fill it with a fake nil\n\t\t// point that has the same name, tags, and time as the other point.\n\t\t// There should never be a time when both of these are nil.\n\t\tif a == nil {\n\t\t\tp := *b\n\t\t\ta = &p\n\t\t\ta.Value = false\n\t\t\ta.Nil = true\n\t\t} else if b == nil {\n\t\t\tp := *a\n\t\t\tb = &p\n\t\t\tb.Value = false\n\t\t\tb.Nil = true\n\t\t}\n\n\t\t// If a value is nil, use the fill values if the fill value is non-nil.\n\t\tif a.Nil && !itr.points[0].Nil {\n\t\t\ta.Value = itr.points[0].Value\n\t\t\ta.Nil = false\n\t\t}\n\t\tif b.Nil && !itr.points[1].Nil {\n\t\t\tb.Value = itr.points[1].Value\n\t\t\tb.Nil = false\n\t\t}\n\n\t\tif itr.storePrev {\n\t\t\titr.points[0], itr.points[1] = *a, *b\n\t\t}\n\n\t\tif a.Nil {\n\t\t\treturn a, nil\n\t\t} else if b.Nil {\n\t\t\treturn b, nil\n\t\t}\n\t\ta.Value = itr.fn(a.Value, b.Value)\n\t\treturn a, nil\n\n\t}\n}\n\n// next returns the next points within each iterator. If the iterators are\n// uneven, it organizes them so only matching points are returned.\nfunc (itr *booleanExprIterator) next() (a, b *BooleanPoint, err error) {\n\t// Retrieve the next value for both the left and right.\n\ta, err = itr.left.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tb, err = itr.right.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// If we have a point from both, make sure that they match each other.\n\tif a != nil && b != nil {\n\t\tif a.Name > b.Name {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Name < b.Name {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if ltags < rtags {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif a.Time > b.Time {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Time < b.Time {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\t}\n\treturn a, b, nil\n}\n\n// booleanExprFunc creates or modifies a point by combining two\n// points. The point passed in may be modified and returned rather than\n// allocating a new point if possible. One of the points may be nil, but at\n// least one of the points will be non-nil.\ntype booleanExprFunc func(a, b bool) bool\n\n// booleanTransformIterator executes a function to modify an existing point for every\n// output of the input iterator.\ntype booleanTransformIterator struct {\n\tinput BooleanIterator\n\tfn    booleanTransformFunc\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *booleanTransformIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *booleanTransformIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *booleanTransformIterator) Next() (*BooleanPoint, error) {\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if p != nil {\n\t\tp = itr.fn(p)\n\t}\n\treturn p, nil\n}\n\n// booleanTransformFunc creates or modifies a point.\n// The point passed in may be modified and returned rather than allocating a\n// new point if possible.\ntype booleanTransformFunc func(p *BooleanPoint) *BooleanPoint\n\n// booleanBoolTransformIterator executes a function to modify an existing point for every\n// output of the input iterator.\ntype booleanBoolTransformIterator struct {\n\tinput BooleanIterator\n\tfn    booleanBoolTransformFunc\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *booleanBoolTransformIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *booleanBoolTransformIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *booleanBoolTransformIterator) Next() (*BooleanPoint, error) {\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if p != nil {\n\t\treturn itr.fn(p), nil\n\t}\n\treturn nil, nil\n}\n\n// booleanBoolTransformFunc creates or modifies a point.\n// The point passed in may be modified and returned rather than allocating a\n// new point if possible.\ntype booleanBoolTransformFunc func(p *BooleanPoint) *BooleanPoint\n\n// booleanDedupeIterator only outputs unique points.\n// This differs from the DistinctIterator in that it compares all aux fields too.\n// This iterator is relatively inefficient and should only be used on small\n// datasets such as meta query results.\ntype booleanDedupeIterator struct {\n\tinput BooleanIterator\n\tm     map[string]struct{} // lookup of points already sent\n}\n\ntype booleanIteratorMapper struct {\n\te      *Emitter\n\tbuf    []interface{}\n\tdriver IteratorMap   // which iterator to use for the primary value, can be nil\n\tfields []IteratorMap // which iterator to use for an aux field\n\tpoint  BooleanPoint\n}\n\nfunc newBooleanIteratorMapper(itrs []Iterator, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *booleanIteratorMapper {\n\te := NewEmitter(itrs, opt.Ascending, 0)\n\te.OmitTime = true\n\treturn &booleanIteratorMapper{\n\t\te:      e,\n\t\tbuf:    make([]interface{}, len(itrs)),\n\t\tdriver: driver,\n\t\tfields: fields,\n\t\tpoint: BooleanPoint{\n\t\t\tAux: make([]interface{}, len(fields)),\n\t\t},\n\t}\n}\n\nfunc (itr *booleanIteratorMapper) Next() (*BooleanPoint, error) {\n\tt, name, tags, err := itr.e.loadBuf()\n\tif err != nil || t == ZeroTime {\n\t\treturn nil, err\n\t}\n\titr.point.Time = t\n\titr.point.Name = name\n\titr.point.Tags = tags\n\n\titr.e.readInto(t, name, tags, itr.buf)\n\tif itr.driver != nil {\n\t\tif v := itr.driver.Value(tags, itr.buf); v != nil {\n\t\t\tif v, ok := v.(bool); ok {\n\t\t\t\titr.point.Value = v\n\t\t\t\titr.point.Nil = false\n\t\t\t} else {\n\t\t\t\titr.point.Value = false\n\t\t\t\titr.point.Nil = true\n\t\t\t}\n\t\t} else {\n\t\t\titr.point.Value = false\n\t\t\titr.point.Nil = true\n\t\t}\n\t}\n\tfor i, f := range itr.fields {\n\t\titr.point.Aux[i] = f.Value(tags, itr.buf)\n\t}\n\treturn &itr.point, nil\n}\n\nfunc (itr *booleanIteratorMapper) Stats() IteratorStats {\n\tstats := IteratorStats{}\n\tfor _, itr := range itr.e.itrs {\n\t\tstats.Add(itr.Stats())\n\t}\n\treturn stats\n}\n\nfunc (itr *booleanIteratorMapper) Close() error {\n\treturn itr.e.Close()\n}\n\ntype booleanFilterIterator struct {\n\tinput BooleanIterator\n\tcond  Expr\n\topt   IteratorOptions\n\tm     map[string]interface{}\n}\n\nfunc newBooleanFilterIterator(input BooleanIterator, cond Expr, opt IteratorOptions) BooleanIterator {\n\t// Strip out time conditions from the WHERE clause.\n\t// TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct.\n\tn := RewriteFunc(CloneExpr(cond), func(n Node) Node {\n\t\tswitch n := n.(type) {\n\t\tcase *BinaryExpr:\n\t\t\tif n.LHS.String() == \"time\" {\n\t\t\t\treturn &BooleanLiteral{Val: true}\n\t\t\t}\n\t\t}\n\t\treturn n\n\t})\n\n\tcond, _ = n.(Expr)\n\tif cond == nil {\n\t\treturn input\n\t} else if n, ok := cond.(*BooleanLiteral); ok && n.Val {\n\t\treturn input\n\t}\n\n\treturn &booleanFilterIterator{\n\t\tinput: input,\n\t\tcond:  cond,\n\t\topt:   opt,\n\t\tm:     make(map[string]interface{}),\n\t}\n}\n\nfunc (itr *booleanFilterIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *booleanFilterIterator) Close() error         { return itr.input.Close() }\n\nfunc (itr *booleanFilterIterator) Next() (*BooleanPoint, error) {\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor i, ref := range itr.opt.Aux {\n\t\t\titr.m[ref.Val] = p.Aux[i]\n\t\t}\n\t\tfor k, v := range p.Tags.KeyValues() {\n\t\t\titr.m[k] = v\n\t\t}\n\n\t\tif !EvalBool(itr.cond, itr.m) {\n\t\t\tcontinue\n\t\t}\n\t\treturn p, nil\n\t}\n}\n\n// newBooleanDedupeIterator returns a new instance of booleanDedupeIterator.\nfunc newBooleanDedupeIterator(input BooleanIterator) *booleanDedupeIterator {\n\treturn &booleanDedupeIterator{\n\t\tinput: input,\n\t\tm:     make(map[string]struct{}),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *booleanDedupeIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *booleanDedupeIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next unique point from the input iterator.\nfunc (itr *booleanDedupeIterator) Next() (*BooleanPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\tp, err := itr.input.Next()\n\t\tif p == nil || err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Serialize to bytes to store in lookup.\n\t\tbuf, err := proto.Marshal(encodeBooleanPoint(p))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If the point has already been output then move to the next point.\n\t\tif _, ok := itr.m[string(buf)]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Otherwise mark it as emitted and return point.\n\t\titr.m[string(buf)] = struct{}{}\n\t\treturn p, nil\n\t}\n}\n\n// booleanReaderIterator represents an iterator that streams from a reader.\ntype booleanReaderIterator struct {\n\tr   io.Reader\n\tdec *BooleanPointDecoder\n}\n\n// newBooleanReaderIterator returns a new instance of booleanReaderIterator.\nfunc newBooleanReaderIterator(r io.Reader, stats IteratorStats) *booleanReaderIterator {\n\tdec := NewBooleanPointDecoder(r)\n\tdec.stats = stats\n\n\treturn &booleanReaderIterator{\n\t\tr:   r,\n\t\tdec: dec,\n\t}\n}\n\n// Stats returns stats about points processed.\nfunc (itr *booleanReaderIterator) Stats() IteratorStats { return itr.dec.stats }\n\n// Close closes the underlying reader, if applicable.\nfunc (itr *booleanReaderIterator) Close() error {\n\tif r, ok := itr.r.(io.ReadCloser); ok {\n\t\treturn r.Close()\n\t}\n\treturn nil\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *booleanReaderIterator) Next() (*BooleanPoint, error) {\n\t// OPTIMIZE(benbjohnson): Reuse point on iterator.\n\n\t// Unmarshal next point.\n\tp := &BooleanPoint{}\n\tif err := itr.dec.DecodeBooleanPoint(p); err == io.EOF {\n\t\treturn nil, nil\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}\n\n// IteratorEncoder is an encoder for encoding an iterator's points to w.\ntype IteratorEncoder struct {\n\tw io.Writer\n\n\t// Frequency with which stats are emitted.\n\tStatsInterval time.Duration\n}\n\n// NewIteratorEncoder encodes an iterator's points to w.\nfunc NewIteratorEncoder(w io.Writer) *IteratorEncoder {\n\treturn &IteratorEncoder{\n\t\tw: w,\n\n\t\tStatsInterval: DefaultStatsInterval,\n\t}\n}\n\n// EncodeIterator encodes and writes all of itr's points to the underlying writer.\nfunc (enc *IteratorEncoder) EncodeIterator(itr Iterator) error {\n\tswitch itr := itr.(type) {\n\tcase FloatIterator:\n\t\treturn enc.encodeFloatIterator(itr)\n\tcase IntegerIterator:\n\t\treturn enc.encodeIntegerIterator(itr)\n\tcase StringIterator:\n\t\treturn enc.encodeStringIterator(itr)\n\tcase BooleanIterator:\n\t\treturn enc.encodeBooleanIterator(itr)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported iterator for encoder: %T\", itr))\n\t}\n}\n\n// encodeFloatIterator encodes all points from itr to the underlying writer.\nfunc (enc *IteratorEncoder) encodeFloatIterator(itr FloatIterator) error {\n\tticker := time.NewTicker(enc.StatsInterval)\n\tdefer ticker.Stop()\n\n\t// Emit initial stats.\n\tif err := enc.encodeStats(itr.Stats()); err != nil {\n\t\treturn err\n\t}\n\n\t// Continually stream points from the iterator into the encoder.\n\tpenc := NewFloatPointEncoder(enc.w)\n\tfor {\n\t\t// Emit stats periodically.\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif err := enc.encodeStats(itr.Stats()); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t}\n\n\t\t// Retrieve the next point from the iterator.\n\t\tp, err := itr.Next()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t} else if p == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Write the point to the point encoder.\n\t\tif err := penc.EncodeFloatPoint(p); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Emit final stats.\n\tif err := enc.encodeStats(itr.Stats()); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// encodeIntegerIterator encodes all points from itr to the underlying writer.\nfunc (enc *IteratorEncoder) encodeIntegerIterator(itr IntegerIterator) error {\n\tticker := time.NewTicker(enc.StatsInterval)\n\tdefer ticker.Stop()\n\n\t// Emit initial stats.\n\tif err := enc.encodeStats(itr.Stats()); err != nil {\n\t\treturn err\n\t}\n\n\t// Continually stream points from the iterator into the encoder.\n\tpenc := NewIntegerPointEncoder(enc.w)\n\tfor {\n\t\t// Emit stats periodically.\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif err := enc.encodeStats(itr.Stats()); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t}\n\n\t\t// Retrieve the next point from the iterator.\n\t\tp, err := itr.Next()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t} else if p == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Write the point to the point encoder.\n\t\tif err := penc.EncodeIntegerPoint(p); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Emit final stats.\n\tif err := enc.encodeStats(itr.Stats()); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// encodeStringIterator encodes all points from itr to the underlying writer.\nfunc (enc *IteratorEncoder) encodeStringIterator(itr StringIterator) error {\n\tticker := time.NewTicker(enc.StatsInterval)\n\tdefer ticker.Stop()\n\n\t// Emit initial stats.\n\tif err := enc.encodeStats(itr.Stats()); err != nil {\n\t\treturn err\n\t}\n\n\t// Continually stream points from the iterator into the encoder.\n\tpenc := NewStringPointEncoder(enc.w)\n\tfor {\n\t\t// Emit stats periodically.\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif err := enc.encodeStats(itr.Stats()); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t}\n\n\t\t// Retrieve the next point from the iterator.\n\t\tp, err := itr.Next()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t} else if p == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Write the point to the point encoder.\n\t\tif err := penc.EncodeStringPoint(p); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Emit final stats.\n\tif err := enc.encodeStats(itr.Stats()); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// encodeBooleanIterator encodes all points from itr to the underlying writer.\nfunc (enc *IteratorEncoder) encodeBooleanIterator(itr BooleanIterator) error {\n\tticker := time.NewTicker(enc.StatsInterval)\n\tdefer ticker.Stop()\n\n\t// Emit initial stats.\n\tif err := enc.encodeStats(itr.Stats()); err != nil {\n\t\treturn err\n\t}\n\n\t// Continually stream points from the iterator into the encoder.\n\tpenc := NewBooleanPointEncoder(enc.w)\n\tfor {\n\t\t// Emit stats periodically.\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif err := enc.encodeStats(itr.Stats()); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t}\n\n\t\t// Retrieve the next point from the iterator.\n\t\tp, err := itr.Next()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t} else if p == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Write the point to the point encoder.\n\t\tif err := penc.EncodeBooleanPoint(p); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Emit final stats.\n\tif err := enc.encodeStats(itr.Stats()); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// encode a stats object in the point stream.\nfunc (enc *IteratorEncoder) encodeStats(stats IteratorStats) error {\n\tbuf, err := proto.Marshal(&internal.Point{\n\t\tName: proto.String(\"\"),\n\t\tTags: proto.String(\"\"),\n\t\tTime: proto.Int64(0),\n\t\tNil:  proto.Bool(false),\n\n\t\tStats: encodeIteratorStats(&stats),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {\n\t\treturn err\n\t}\n\tif _, err := enc.w.Write(buf); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go.tmpl",
    "content": "package influxql\n\nimport (\n\t\"container/heap\"\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/gogo/protobuf/proto\"\n\tinternal \"github.com/influxdata/influxdb/influxql/internal\"\n)\n\n// DefaultStatsInterval is the default value for IteratorEncoder.StatsInterval.\nconst DefaultStatsInterval = 10 * time.Second\n\n{{with $types := .}}{{range $k := $types}}\n\n// {{$k.Name}}Iterator represents a stream of {{$k.name}} points.\ntype {{$k.Name}}Iterator interface {\n\tIterator\n\tNext() (*{{$k.Name}}Point, error)\n}\n\n// new{{$k.Name}}Iterators converts a slice of Iterator to a slice of {{$k.Name}}Iterator.\n// Drop and closes any iterator in itrs that is not a {{$k.Name}}Iterator and cannot\n// be cast to a {{$k.Name}}Iterator.\nfunc new{{$k.Name}}Iterators(itrs []Iterator) []{{$k.Name}}Iterator {\n\ta := make([]{{$k.Name}}Iterator, 0, len(itrs))\n\tfor _, itr := range itrs {\n\t\tswitch itr := itr.(type) {\n\t\tcase {{$k.Name}}Iterator:\n\t\t\ta = append(a, itr)\n{{if eq .Name \"Float\"}}\n\t\tcase IntegerIterator:\n\t\t\ta = append(a, &integerFloatCastIterator{input: itr})\n{{end}}\n\t\tdefault:\n\t\t\titr.Close()\n\t\t}\n\t}\n\treturn a\n}\n\n\n// buf{{$k.Name}}Iterator represents a buffered {{$k.Name}}Iterator.\ntype buf{{$k.Name}}Iterator struct {\n\titr {{$k.Name}}Iterator\n\tbuf *{{$k.Name}}Point\n}\n\n// newBuf{{$k.Name}}Iterator returns a buffered {{$k.Name}}Iterator.\nfunc newBuf{{$k.Name}}Iterator(itr {{$k.Name}}Iterator) *buf{{$k.Name}}Iterator {\n\treturn &buf{{$k.Name}}Iterator{itr: itr}\n}\n\n// Stats returns statistics from the input iterator.\nfunc (itr *buf{{$k.Name}}Iterator) Stats() IteratorStats { return itr.itr.Stats() }\n\n// Close closes the underlying iterator.\nfunc (itr *buf{{$k.Name}}Iterator) Close() error { return itr.itr.Close() }\n\n// peek returns the next point without removing it from the iterator.\nfunc (itr *buf{{$k.Name}}Iterator) peek() (*{{$k.Name}}Point, error) {\n\tp, err := itr.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\titr.unread(p)\n\treturn p, nil\n}\n\n// peekTime returns the time of the next point.\n// Returns zero time if no more points available.\nfunc (itr *buf{{$k.Name}}Iterator) peekTime() (int64, error) {\n\tp, err := itr.peek()\n\tif p == nil || err != nil {\n\t\treturn ZeroTime, err\n\t}\n\treturn p.Time, nil\n}\n\n// Next returns the current buffer, if exists, or calls the underlying iterator.\nfunc (itr *buf{{$k.Name}}Iterator) Next() (*{{$k.Name}}Point, error) {\n\tbuf := itr.buf\n\tif buf != nil {\n\t\titr.buf = nil\n\t\treturn buf, nil\n\t}\n\treturn itr.itr.Next()\n}\n\n// NextInWindow returns the next value if it is between [startTime, endTime).\n// If the next value is outside the range then it is moved to the buffer.\nfunc (itr *buf{{$k.Name}}Iterator) NextInWindow(startTime, endTime int64) (*{{$k.Name}}Point, error) {\n\tv, err := itr.Next()\n\tif v == nil || err != nil {\n\t\treturn nil, err\n\t} else if t := v.Time; t >= endTime || t < startTime {\n\t\titr.unread(v)\n\t\treturn nil, nil\n\t}\n\treturn v, nil\n}\n\n// unread sets v to the buffer. It is read on the next call to Next().\nfunc (itr *buf{{$k.Name}}Iterator) unread(v *{{$k.Name}}Point) { itr.buf = v }\n\n// {{$k.name}}MergeIterator represents an iterator that combines multiple {{$k.name}} iterators.\ntype {{$k.name}}MergeIterator struct {\n\tinputs []{{$k.Name}}Iterator\n\theap   *{{$k.name}}MergeHeap\n\tinit   bool\n\n\t// Current iterator and window.\n\tcurr   *{{$k.name}}MergeHeapItem\n\twindow struct {\n\t\tname      string\n\t\ttags      string\n\t\tstartTime int64\n\t\tendTime   int64\n\t}\n}\n\n// new{{$k.Name}}MergeIterator returns a new instance of {{$k.name}}MergeIterator.\nfunc new{{$k.Name}}MergeIterator(inputs []{{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}MergeIterator {\n\titr := &{{$k.name}}MergeIterator{\n\t\tinputs: inputs,\n\t\theap: &{{$k.name}}MergeHeap{\n\t\t\titems: make([]*{{$k.name}}MergeHeapItem, 0, len(inputs)),\n\t\t\topt:   opt,\n\t\t},\n\t}\n\n\t// Initialize heap items.\n\tfor _, input := range inputs {\n\t\t// Wrap in buffer, ignore any inputs without anymore points.\n\t\tbufInput := newBuf{{$k.Name}}Iterator(input)\n\n\t\t// Append to the heap.\n\t\titr.heap.items = append(itr.heap.items, &{{$k.name}}MergeHeapItem{itr: bufInput})\n\t}\n\n\treturn itr\n}\n\n// Stats returns an aggregation of stats from the underlying iterators.\nfunc (itr *{{$k.name}}MergeIterator) Stats() IteratorStats {\n\tvar stats IteratorStats\n\tfor _, input := range itr.inputs {\n\t\tstats.Add(input.Stats())\n\t}\n\treturn stats\n}\n\n// Close closes the underlying iterators.\nfunc (itr *{{$k.name}}MergeIterator) Close() error {\n\tfor _, input := range itr.inputs {\n\t\tinput.Close()\n\t}\n\titr.curr = nil\n\titr.inputs = nil\n\titr.heap.items = nil\n\treturn nil\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *{{$k.name}}MergeIterator) Next() (*{{$k.Name}}Point, error) {\n\t// Initialize the heap. This needs to be done lazily on the first call to this iterator\n\t// so that iterator initialization done through the Select() call returns quickly.\n\t// Queries can only be interrupted after the Select() call completes so any operations\n\t// done during iterator creation cannot be interrupted, which is why we do it here\n\t// instead so an interrupt can happen while initializing the heap.\n\tif !itr.init {\n\t\titems := itr.heap.items\n\t\titr.heap.items = make([]*{{$k.name}}MergeHeapItem, 0, len(items))\n\t\tfor _, item := range items {\n\t\t\tif p, err := item.itr.peek(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t} else if p == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\titr.heap.items = append(itr.heap.items, item)\n\t\t}\n\t\theap.Init(itr.heap)\n\t\titr.init = true\n\t}\n\n\tfor {\n\t\t// Retrieve the next iterator if we don't have one.\n\t\tif itr.curr == nil {\n\t\t\tif len(itr.heap.items) == 0 {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t\titr.curr = heap.Pop(itr.heap).(*{{$k.name}}MergeHeapItem)\n\n\t\t\t// Read point and set current window.\n\t\t\tp, err := itr.curr.itr.Next()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ttags := p.Tags.Subset(itr.heap.opt.Dimensions)\n\t\t\titr.window.name, itr.window.tags = p.Name, tags.ID()\n\t\t\titr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time)\n\t\t\treturn p, nil\n\t\t}\n\n\t\t// Read the next point from the current iterator.\n\t\tp, err := itr.curr.itr.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If there are no more points then remove iterator from heap and find next.\n\t\tif p == nil {\n\t\t\titr.curr = nil\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check if the point is inside of our current window.\n\t\tinWindow := true\n\t\tif window := itr.window; window.name != p.Name {\n\t\t\tinWindow = false\n\t\t} else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() {\n\t\t\tinWindow = false\n\t\t} else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime {\n\t\t\tinWindow = false\n\t\t} else if !opt.Ascending && p.Time < window.startTime {\n\t\t\tinWindow = false\n\t\t}\n\n\t\t// If it's outside our window then push iterator back on the heap and find new iterator.\n\t\tif !inWindow {\n\t\t\titr.curr.itr.unread(p)\n\t\t\theap.Push(itr.heap, itr.curr)\n\t\t\titr.curr = nil\n\t\t\tcontinue\n\t\t}\n\n\t\treturn p, nil\n\t}\n}\n\n// {{$k.name}}MergeHeap represents a heap of {{$k.name}}MergeHeapItems.\n// Items are sorted by their next window and then by name/tags.\ntype {{$k.name}}MergeHeap struct {\n\topt   IteratorOptions\n\titems []*{{$k.name}}MergeHeapItem\n}\n\nfunc (h *{{$k.name}}MergeHeap) Len() int      { return len(h.items) }\nfunc (h *{{$k.name}}MergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }\nfunc (h *{{$k.name}}MergeHeap) Less(i, j int) bool {\n\tx, err := h.items[i].itr.peek()\n\tif err != nil {\n\t\treturn true\n\t}\n\ty, err := h.items[j].itr.peek()\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tif h.opt.Ascending {\n\t\tif x.Name != y.Name {\n\t\t\treturn x.Name < y.Name\n\t\t} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {\n\t\t\treturn xTags.ID() < yTags.ID()\n\t\t}\n\t} else {\n\t\tif x.Name != y.Name {\n\t\t\treturn x.Name > y.Name\n\t\t} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() {\n\t\t\treturn xTags.ID() > yTags.ID()\n\t\t}\n\t}\n\n\txt, _ := h.opt.Window(x.Time)\n\tyt, _ := h.opt.Window(y.Time)\n\n\tif h.opt.Ascending {\n\t\treturn xt < yt\n\t}\n\treturn xt > yt\n}\n\n\nfunc (h *{{$k.name}}MergeHeap) Push(x interface{}) {\n\th.items = append(h.items, x.(*{{$k.name}}MergeHeapItem))\n}\n\nfunc (h *{{$k.name}}MergeHeap) Pop() interface{} {\n\told := h.items\n\tn := len(old)\n\titem := old[n-1]\n\th.items = old[0 : n-1]\n\treturn item\n}\n\ntype {{$k.name}}MergeHeapItem struct {\n\titr *buf{{$k.Name}}Iterator\n}\n\n// {{$k.name}}SortedMergeIterator is an iterator that sorts and merges multiple iterators into one.\ntype {{$k.name}}SortedMergeIterator struct {\n\tinputs []{{$k.Name}}Iterator\n\theap   *{{$k.name}}SortedMergeHeap\n\tinit   bool\n}\n\n// new{{$k.Name}}SortedMergeIterator returns an instance of {{$k.name}}SortedMergeIterator.\nfunc new{{$k.Name}}SortedMergeIterator(inputs []{{$k.Name}}Iterator, opt IteratorOptions) Iterator {\n\titr := &{{$k.name}}SortedMergeIterator{\n\t\tinputs: inputs,\n\t\theap:   &{{$k.name}}SortedMergeHeap{\n\t\t\titems: make([]*{{$k.name}}SortedMergeHeapItem, 0, len(inputs)),\n\t\t\topt:   opt,\n\t\t},\n\t}\n\n\t// Initialize heap items.\n\tfor _, input := range inputs {\n\t\t// Append to the heap.\n\t\titr.heap.items = append(itr.heap.items, &{{$k.name}}SortedMergeHeapItem{itr: input})\n\t}\n\n\treturn itr\n}\n\n// Stats returns an aggregation of stats from the underlying iterators.\nfunc (itr *{{$k.name}}SortedMergeIterator) Stats() IteratorStats {\n\tvar stats IteratorStats\n\tfor _, input := range itr.inputs {\n\t\tstats.Add(input.Stats())\n\t}\n\treturn stats\n}\n\n// Close closes the underlying iterators.\nfunc (itr *{{$k.name}}SortedMergeIterator) Close() error {\n\tfor _, input := range itr.inputs {\n\t\tinput.Close()\n\t}\n\treturn nil\n}\n\n// Next returns the next points from the iterator.\nfunc (itr *{{$k.name}}SortedMergeIterator) Next() (*{{$k.Name}}Point, error) { return itr.pop() }\n\n// pop returns the next point from the heap.\n// Reads the next point from item's cursor and puts it back on the heap.\nfunc (itr *{{$k.name}}SortedMergeIterator) pop() (*{{$k.Name}}Point, error) {\n\t// Initialize the heap. See the MergeIterator to see why this has to be done lazily.\n\tif !itr.init {\n\t\titems := itr.heap.items\n\t\titr.heap.items = make([]*{{$k.name}}SortedMergeHeapItem, 0, len(items))\n\t\tfor _, item := range items {\n\t\t\tvar err error\n\t\t\tif item.point, err = item.itr.Next(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t} else if item.point == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\titr.heap.items = append(itr.heap.items, item)\n\t\t}\n\t\theap.Init(itr.heap)\n\t\titr.init = true\n\t}\n\n\tif len(itr.heap.items) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// Read the next item from the heap.\n\titem := heap.Pop(itr.heap).(*{{$k.name}}SortedMergeHeapItem)\n\tif item.err != nil {\n\t\treturn nil, item.err\n\t} else if item.point == nil {\n\t\treturn nil, nil\n\t}\n\n\t// Copy the point for return.\n\tp := item.point.Clone()\n\n\t// Read the next item from the cursor. Push back to heap if one exists.\n\tif item.point, item.err = item.itr.Next(); item.point != nil {\n\t\theap.Push(itr.heap, item)\n\t}\n\n\treturn p, nil\n}\n\n// {{$k.name}}SortedMergeHeap represents a heap of {{$k.name}}SortedMergeHeapItems.\ntype {{$k.name}}SortedMergeHeap struct {\n\topt   IteratorOptions\n\titems []*{{$k.name}}SortedMergeHeapItem\n}\n\nfunc (h *{{$k.name}}SortedMergeHeap) Len() int      { return len(h.items) }\nfunc (h *{{$k.name}}SortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] }\nfunc (h *{{$k.name}}SortedMergeHeap) Less(i, j int) bool {\n\tx, y := h.items[i].point, h.items[j].point\n\n\tif h.opt.Ascending {\n\t\tif x.Name != y.Name {\n\t\t\treturn x.Name < y.Name\n\t\t} else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {\n\t\t\treturn xTags.ID() < yTags.ID()\n\t\t}\n\t\treturn x.Time < y.Time\n\t}\n\n\tif x.Name != y.Name {\n\t\treturn x.Name > y.Name\n  } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) {\n\t\treturn xTags.ID() > yTags.ID()\n\t}\n\treturn x.Time > y.Time\n}\n\nfunc (h *{{$k.name}}SortedMergeHeap) Push(x interface{}) {\n\th.items = append(h.items, x.(*{{$k.name}}SortedMergeHeapItem))\n}\n\nfunc (h *{{$k.name}}SortedMergeHeap) Pop() interface{} {\n\told := h.items\n\tn := len(old)\n\titem := old[n-1]\n\th.items = old[0 : n-1]\n\treturn item\n}\n\ntype {{$k.name}}SortedMergeHeapItem struct {\n\tpoint     *{{$k.Name}}Point\n\terr       error\n\titr       {{$k.Name}}Iterator\n}\n\n// {{$k.name}}ParallelIterator represents an iterator that pulls data in a separate goroutine.\ntype {{$k.name}}ParallelIterator struct {\n\tinput   {{$k.Name}}Iterator\n\tch      chan {{$k.name}}PointError\n\n\tonce    sync.Once\n\tclosing chan struct{}\n\twg sync.WaitGroup\n}\n\n// new{{$k.Name}}ParallelIterator returns a new instance of {{$k.name}}ParallelIterator.\nfunc new{{$k.Name}}ParallelIterator(input {{$k.Name}}Iterator) *{{$k.name}}ParallelIterator {\n\titr := &{{$k.name}}ParallelIterator{\n\t\tinput:   input,\n\t\tch:      make(chan {{$k.name}}PointError, 256),\n\t\tclosing: make(chan struct{}),\n\t}\n\titr.wg.Add(1)\n\tgo itr.monitor()\n\treturn itr\n}\n\n// Stats returns stats from the underlying iterator.\nfunc (itr *{{$k.name}}ParallelIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the underlying iterators.\nfunc (itr *{{$k.name}}ParallelIterator) Close() error {\n\titr.once.Do(func() { close(itr.closing) })\n\titr.wg.Wait()\n\treturn itr.input.Close()\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *{{$k.name}}ParallelIterator) Next() (*{{$k.Name}}Point, error) {\n\tv, ok := <-itr.ch\n\tif !ok {\n\t\treturn nil, io.EOF\n\t}\n\treturn v.point, v.err\n}\n\n// monitor runs in a separate goroutine and actively pulls the next point.\nfunc (itr *{{$k.name}}ParallelIterator) monitor()  {\n\tdefer close(itr.ch)\n\tdefer itr.wg.Done()\n\n\tfor {\n\t\t// Read next point.\n\t\tp, err := itr.input.Next()\n\t\tif p != nil {\n\t\t\tp = p.Clone()\n\t\t}\n\n\t\tselect {\n\t\tcase <-itr.closing:\n\t\t\treturn\n\t\tcase itr.ch <- {{$k.name}}PointError{point: p, err: err}:\n\t\t}\n\t}\n}\n\ntype {{$k.name}}PointError struct {\n\tpoint *{{$k.Name}}Point\n\terr   error\n}\n\n// {{$k.name}}LimitIterator represents an iterator that limits points per group.\ntype {{$k.name}}LimitIterator struct {\n\tinput {{$k.Name}}Iterator\n\topt   IteratorOptions\n\tn     int\n\n\tprev struct {\n\t\tname string\n\t\ttags Tags\n\t}\n}\n\n// new{{$k.Name}}LimitIterator returns a new instance of {{$k.name}}LimitIterator.\nfunc new{{$k.Name}}LimitIterator(input {{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}LimitIterator {\n\treturn &{{$k.name}}LimitIterator{\n\t\tinput: input,\n\t\topt:   opt,\n\t}\n}\n\n// Stats returns stats from the underlying iterator.\nfunc (itr *{{$k.name}}LimitIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the underlying iterators.\nfunc (itr *{{$k.name}}LimitIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next point from the iterator.\nfunc (itr *{{$k.name}}LimitIterator) Next() (*{{$k.Name}}Point, error) {\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif p == nil || err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Reset window and counter if a new window is encountered.\n\t\tif p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) {\n\t\t\titr.prev.name = p.Name\n\t\t\titr.prev.tags = p.Tags\n\t\t\titr.n = 0\n\t\t}\n\n\t\t// Increment counter.\n\t\titr.n++\n\n\t\t// Read next point if not beyond the offset.\n\t\tif itr.n <= itr.opt.Offset {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Read next point if we're beyond the limit.\n\t\tif itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn p, nil\n\t}\n}\n\ntype {{$k.name}}FillIterator struct {\n\tinput     *buf{{$k.Name}}Iterator\n\tprev      {{$k.Name}}Point\n\tstartTime int64\n\tendTime   int64\n\tauxFields []interface{}\n\tinit      bool\n\topt       IteratorOptions\n\n\twindow struct {\n\t\tname   string\n\t\ttags   Tags\n\t\ttime   int64\n\t\toffset int64\n\t}\n}\n\nfunc new{{$k.Name}}FillIterator(input {{$k.Name}}Iterator, expr Expr, opt IteratorOptions) *{{$k.name}}FillIterator {\n\tif opt.Fill == NullFill {\n\t\tif expr, ok := expr.(*Call); ok && expr.Name == \"count\" {\n\t\t\topt.Fill = NumberFill\n\t\t\topt.FillValue = {{$k.Zero}}\n\t\t}\n\t}\n\n\tvar startTime, endTime int64\n\tif opt.Ascending {\n\t\tstartTime, _ = opt.Window(opt.StartTime)\n\t\tendTime, _ = opt.Window(opt.EndTime)\n\t} else {\n\t\tstartTime, _ = opt.Window(opt.EndTime)\n\t\tendTime, _ = opt.Window(opt.StartTime)\n\t}\n\n\tvar auxFields []interface{}\n\tif len(opt.Aux) > 0 {\n\t\tauxFields = make([]interface{}, len(opt.Aux))\n\t}\n\n\treturn &{{$k.name}}FillIterator{\n\t\tinput:     newBuf{{$k.Name}}Iterator(input),\n\t\tprev:      {{$k.Name}}Point{Nil: true},\n\t\tstartTime: startTime,\n\t\tendTime:   endTime,\n\t\tauxFields: auxFields,\n\t\topt:       opt,\n\t}\n}\n\nfunc (itr *{{$k.name}}FillIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *{{$k.name}}FillIterator) Close() error { return itr.input.Close() }\n\nfunc (itr *{{$k.name}}FillIterator) Next() (*{{$k.Name}}Point, error) {\n\tif !itr.init {\n\t\tp, err := itr.input.peek()\n\t\tif p == nil || err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\titr.window.name, itr.window.tags = p.Name, p.Tags\n\t\titr.window.time = itr.startTime\n\t\tif itr.opt.Location != nil {\n\t\t\t_, itr.window.offset = itr.opt.Zone(itr.window.time)\n\t\t}\n\t\titr.init = true\n\t}\n\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check if the next point is outside of our window or is nil.\n\tfor p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() {\n\t\t// If we are inside of an interval, unread the point and continue below to\n\t\t// constructing a new point.\n\t\tif itr.opt.Ascending {\n\t\t\tif itr.window.time <= itr.endTime {\n\t\t\t\titr.input.unread(p)\n\t\t\t\tp = nil\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tif itr.window.time >= itr.endTime {\n\t\t\t\titr.input.unread(p)\n\t\t\t\tp = nil\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// We are *not* in a current interval. If there is no next point,\n\t\t// we are at the end of all intervals.\n\t\tif p == nil {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\t// Set the new interval.\n\t\titr.window.name, itr.window.tags = p.Name, p.Tags\n\t\titr.window.time = itr.startTime\n\t\tif itr.opt.Location != nil {\n\t\t\t_, itr.window.offset = itr.opt.Zone(itr.window.time)\n\t\t}\n\t\titr.prev = {{$k.Name}}Point{Nil: true}\n\t\tbreak\n\t}\n\n\t// Check if the point is our next expected point.\n\tif p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) {\n\t\tif p != nil {\n\t\t\titr.input.unread(p)\n\t\t}\n\n\t\tp = &{{$k.Name}}Point{\n\t\t\tName: itr.window.name,\n\t\t\tTags: itr.window.tags,\n\t\t\tTime: itr.window.time,\n\t\t\tAux:  itr.auxFields,\n\t\t}\n\n\t\tswitch itr.opt.Fill {\n\t\tcase LinearFill:\n\t\t\t{{- if or (eq $k.Name \"Float\") (eq $k.Name \"Integer\")}}\n\t\t\tif !itr.prev.Nil {\n\t\t\t\tnext, err := itr.input.peek()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t} else if next != nil && next.Name == itr.window.name && next.Tags.ID() == itr.window.tags.ID() {\n\t\t\t\t\tinterval := int64(itr.opt.Interval.Duration)\n\t\t\t\t\tstart := itr.window.time / interval\n\t\t\t\t\tp.Value = linear{{$k.Name}}(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value)\n\t\t\t\t} else {\n\t\t\t\t\tp.Nil = true\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tp.Nil = true\n\t\t\t}\n\t\t\t{{else}}\n\t\t\tfallthrough\n\t\t\t{{- end}}\n\t\tcase NullFill:\n\t\t\tp.Nil = true\n\t\tcase NumberFill:\n\t\t\tp.Value = castTo{{$k.Name}}(itr.opt.FillValue)\n\t\tcase PreviousFill:\n\t\t\tif !itr.prev.Nil {\n\t\t\t\tp.Value = itr.prev.Value\n\t\t\t\tp.Nil = itr.prev.Nil\n\t\t\t} else {\n\t\t\t\tp.Nil = true\n\t\t\t}\n\t\t}\n\t} else {\n\t\titr.prev = *p\n\t}\n\n\t// Advance the expected time. Do not advance to a new window here\n\t// as there may be lingering points with the same timestamp in the previous\n\t// window.\n\tif itr.opt.Ascending {\n\t\titr.window.time += int64(itr.opt.Interval.Duration)\n\t} else {\n\t\titr.window.time -= int64(itr.opt.Interval.Duration)\n\t}\n\n\t// Check to see if we have passed over an offset change and adjust the time\n\t// to account for this new offset.\n\tif itr.opt.Location != nil {\n\t\tif _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset {\n\t\t\tdiff := itr.window.offset - offset\n\t\t\tif abs(diff) < int64(itr.opt.Interval.Duration) {\n\t\t\t\titr.window.time += diff\n\t\t\t}\n\t\t\titr.window.offset = offset\n\t\t}\n\t}\n\treturn p, nil\n}\n\n// {{$k.name}}IntervalIterator represents a {{$k.name}} implementation of IntervalIterator.\ntype {{$k.name}}IntervalIterator struct {\n\tinput {{$k.Name}}Iterator\n\topt   IteratorOptions\n}\n\nfunc new{{$k.Name}}IntervalIterator(input {{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}IntervalIterator {\n\treturn &{{$k.name}}IntervalIterator{input: input, opt: opt}\n}\n\nfunc (itr *{{$k.name}}IntervalIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *{{$k.name}}IntervalIterator) Close() error { return itr.input.Close() }\n\nfunc (itr *{{$k.name}}IntervalIterator) Next() (*{{$k.Name}}Point, error) {\n\tp, err := itr.input.Next()\n\tif p == nil || err != nil {\n\t\treturn nil, err\n\t}\n\tp.Time, _ = itr.opt.Window(p.Time)\n\t// If we see the minimum allowable time, set the time to zero so we don't\n\t// break the default returned time for aggregate queries without times.\n\tif p.Time == MinTime {\n\t\tp.Time = 0\n\t}\n\treturn p, nil\n}\n\n// {{$k.name}}InterruptIterator represents a {{$k.name}} implementation of InterruptIterator.\ntype {{$k.name}}InterruptIterator struct {\n\tinput   {{$k.Name}}Iterator\n\tclosing <-chan struct{}\n\tcount   int\n}\n\nfunc new{{$k.Name}}InterruptIterator(input {{$k.Name}}Iterator, closing <-chan struct{}) *{{$k.name}}InterruptIterator {\n\treturn &{{$k.name}}InterruptIterator{input: input, closing: closing}\n}\n\nfunc (itr *{{$k.name}}InterruptIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *{{$k.name}}InterruptIterator) Close() error { return itr.input.Close() }\n\nfunc (itr *{{$k.name}}InterruptIterator) Next() (*{{$k.Name}}Point, error) {\n\t// Only check if the channel is closed every N points. This\n\t// intentionally checks on both 0 and N so that if the iterator\n\t// has been interrupted before the first point is emitted it will\n\t// not emit any points.\n\tif itr.count & 0xFF == 0xFF {\n\t\tselect {\n\t\tcase <-itr.closing:\n\t\t\treturn nil, itr.Close()\n\t\tdefault:\n\t\t\t// Reset iterator count to zero and fall through to emit the next point.\n\t\t\titr.count = 0\n\t\t}\n\t}\n\n\t// Increment the counter for every point read.\n\titr.count++\n\treturn itr.input.Next()\n}\n\n// {{$k.name}}CloseInterruptIterator represents a {{$k.name}} implementation of CloseInterruptIterator.\ntype {{$k.name}}CloseInterruptIterator struct {\n\tinput   {{$k.Name}}Iterator\n\tclosing <-chan struct{}\n\tdone    chan struct{}\n\tonce    sync.Once\n}\n\nfunc new{{$k.Name}}CloseInterruptIterator(input {{$k.Name}}Iterator, closing <-chan struct{}) *{{$k.name}}CloseInterruptIterator {\n\titr := &{{$k.name}}CloseInterruptIterator{\n\t\tinput:   input,\n\t\tclosing: closing,\n\t\tdone:    make(chan struct{}),\n\t}\n\tgo itr.monitor()\n\treturn itr\n}\n\nfunc (itr *{{$k.name}}CloseInterruptIterator) monitor() {\n\tselect {\n\tcase <-itr.closing:\n\t\titr.Close()\n\tcase <-itr.done:\n\t}\n}\n\nfunc (itr *{{$k.name}}CloseInterruptIterator) Stats() IteratorStats {\n\treturn itr.input.Stats()\n}\n\nfunc (itr *{{$k.name}}CloseInterruptIterator) Close() error {\n\titr.once.Do(func() {\n\t\tclose(itr.done)\n\t\titr.input.Close()\n\t})\n\treturn nil\n}\n\nfunc (itr *{{$k.name}}CloseInterruptIterator) Next() (*{{$k.Name}}Point, error) {\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\t// Check if the iterator was closed.\n\t\tselect {\n\t\tcase <-itr.done:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn p, nil\n}\n\n// aux{{$k.Name}}Point represents a combination of a point and an error for the AuxIterator.\ntype aux{{$k.Name}}Point struct {\n\tpoint *{{$k.Name}}Point\n\terr   error\n}\n\n// {{$k.name}}AuxIterator represents a {{$k.name}} implementation of AuxIterator.\ntype {{$k.name}}AuxIterator struct {\n\tinput      *buf{{$k.Name}}Iterator\n\toutput     chan aux{{$k.Name}}Point\n\tfields     *auxIteratorFields\n\tbackground bool\n}\n\nfunc new{{$k.Name}}AuxIterator(input {{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}AuxIterator {\n\treturn &{{$k.name}}AuxIterator{\n\t\tinput:  newBuf{{$k.Name}}Iterator(input),\n\t\toutput: make(chan aux{{$k.Name}}Point, 1),\n\t\tfields: newAuxIteratorFields(opt),\n\t}\n}\n\nfunc (itr *{{$k.name}}AuxIterator) Background() {\n\titr.background = true\n\titr.Start()\n\tgo DrainIterator(itr)\n}\n\nfunc (itr *{{$k.name}}AuxIterator) Start()                           { go itr.stream() }\nfunc (itr *{{$k.name}}AuxIterator) Stats() IteratorStats             { return itr.input.Stats() }\nfunc (itr *{{$k.name}}AuxIterator) Close() error                     { return itr.input.Close() }\nfunc (itr *{{$k.name}}AuxIterator) Next() (*{{$k.Name}}Point, error) {\n\tp := <-itr.output\n\treturn p.point, p.err\n}\nfunc (itr *{{$k.name}}AuxIterator) Iterator(name string, typ DataType) Iterator    { return itr.fields.iterator(name, typ) }\n\nfunc (itr *{{.name}}AuxIterator) stream() {\n\tfor {\n\t\t// Read next point.\n\t\tp, err := itr.input.Next()\n\t\tif err != nil {\n\t\t\titr.output <- aux{{$k.Name}}Point{err: err}\n\t\t\titr.fields.sendError(err)\n\t\t\tbreak\n\t\t} else if p == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Send point to output and to each field iterator.\n\t\titr.output <- aux{{$k.Name}}Point{point: p}\n\t\tif ok := itr.fields.send(p); !ok && itr.background {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tclose(itr.output)\n\titr.fields.close()\n}\n\n// {{$k.name}}ChanIterator represents a new instance of {{$k.name}}ChanIterator.\ntype {{$k.name}}ChanIterator struct {\n\tbuf struct {\n\t\ti      int\n\t\tfilled bool\n\t\tpoints [2]{{$k.Name}}Point\n\t}\n\terr  error\n\tcond *sync.Cond\n\tdone bool\n}\n\nfunc (itr *{{$k.name}}ChanIterator) Stats() IteratorStats { return IteratorStats{} }\n\nfunc (itr *{{$k.name}}ChanIterator) Close() error {\n\titr.cond.L.Lock()\n\t// Mark the channel iterator as done and signal all waiting goroutines to start again.\n\titr.done = true\n\titr.cond.Broadcast()\n\t// Do not defer the unlock so we don't create an unnecessary allocation.\n\titr.cond.L.Unlock()\n\treturn nil\n}\n\nfunc (itr *{{$k.name}}ChanIterator) setBuf(name string, tags Tags, time int64, value interface{}) bool {\n\titr.cond.L.Lock()\n\tdefer itr.cond.L.Unlock()\n\n\t// Wait for either the iterator to be done (so we don't have to set the value)\n\t// or for the buffer to have been read and ready for another write.\n\tfor !itr.done && itr.buf.filled {\n\t\titr.cond.Wait()\n\t}\n\n\t// Do not set the value and return false to signal that the iterator is closed.\n\t// Do this after the above wait as the above for loop may have exited because\n\t// the iterator was closed.\n\tif itr.done {\n\t\treturn false\n\t}\n\n\tswitch v := value.(type) {\n\tcase {{$k.Type}}:\n\t\titr.buf.points[itr.buf.i] = {{$k.Name}}Point{Name: name, Tags: tags, Time: time, Value: v}\n{{if eq $k.Name \"Float\"}}\n\tcase int64:\n\t\titr.buf.points[itr.buf.i] = {{$k.Name}}Point{Name: name, Tags: tags, Time: time, Value: float64(v)}\n{{end}}\n\tdefault:\n\t\titr.buf.points[itr.buf.i] = {{$k.Name}}Point{Name: name, Tags: tags, Time: time, Nil: true}\n\t}\n\titr.buf.filled = true\n\n\t// Signal to all waiting goroutines that a new value is ready to read.\n\titr.cond.Signal()\n\treturn true\n}\n\nfunc (itr *{{$k.name}}ChanIterator) setErr(err error) {\n\titr.cond.L.Lock()\n\tdefer itr.cond.L.Unlock()\n\titr.err = err\n\n\t// Signal to all waiting goroutines that a new value is ready to read.\n\titr.cond.Signal()\n}\n\nfunc (itr *{{$k.name}}ChanIterator) Next() (*{{$k.Name}}Point, error) {\n\titr.cond.L.Lock()\n\tdefer itr.cond.L.Unlock()\n\n\t// Check for an error and return one if there.\n\tif itr.err != nil {\n\t\treturn nil, itr.err\n\t}\n\n\t// Wait until either a value is available in the buffer or\n\t// the iterator is closed.\n\tfor !itr.done && !itr.buf.filled {\n\t\titr.cond.Wait()\n\t}\n\n\t// Return nil once the channel is done and the buffer is empty.\n\tif itr.done && !itr.buf.filled {\n\t\treturn nil, nil\n\t}\n\n\t// Always read from the buffer if it exists, even if the iterator\n\t// is closed. This prevents the last value from being truncated by\n\t// the parent iterator.\n\tp := &itr.buf.points[itr.buf.i]\n\titr.buf.i = (itr.buf.i + 1) % len(itr.buf.points)\n\titr.buf.filled = false\n\titr.cond.Signal()\n\treturn p, nil\n}\n\n{{range $v := $types}}\n\n// {{$k.name}}Reduce{{$v.Name}}Iterator executes a reducer for every interval and buffers the result.\ntype {{$k.name}}Reduce{{$v.Name}}Iterator struct {\n\tinput    *buf{{$k.Name}}Iterator\n\tcreate   func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter)\n\tdims     []string\n\topt      IteratorOptions\n\tpoints   []{{$v.Name}}Point\n\tkeepTags bool\n}\n\nfunc new{{$k.Name}}Reduce{{$v.Name}}Iterator(input {{$k.Name}}Iterator, opt IteratorOptions, createFn func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter)) *{{$k.name}}Reduce{{$v.Name}}Iterator {\n\treturn &{{$k.name}}Reduce{{$v.Name}}Iterator{\n\t\tinput:  newBuf{{$k.Name}}Iterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Next() (*{{$v.Name}}Point, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// {{$k.name}}Reduce{{$v.Name}}Point stores the reduced data for a name/tag combination.\ntype {{$k.name}}Reduce{{$v.Name}}Point struct {\n\tName       string\n\tTags       Tags\n\tAggregator {{$k.Name}}PointAggregator\n\tEmitter    {{$v.Name}}PointEmitter\n}\n\n// reduce executes fn once for every point in the next window.\n// The previous value for the dimension is passed to fn.\nfunc (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, error) {\n\t// Calculate next window.\n\tvar (\n\t\tstartTime, endTime int64\n\t\twindow             struct {\n\t\t\tname string\n\t\t\ttags string\n\t\t}\n\t)\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t} else if p.Nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Unread the point so it can be processed.\n\t\titr.input.unread(p)\n\t\tstartTime, endTime = itr.opt.Window(p.Time)\n\t\twindow.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()\n\t\tbreak\n\t}\n\n\t// Create points by tags.\n\tm := make(map[string]*{{$k.name}}Reduce{{$v.Name}}Point)\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.NextInWindow(startTime, endTime)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr == nil {\n\t\t\tbreak\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t} else if curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Ensure this point is within the same final window.\n\t\tif curr.Name != window.name {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {\n\t\t\titr.input.unread(curr)\n\t\t\tbreak\n\t\t}\n\n\t\t// Retrieve the tags on this point for this level of the query.\n\t\t// This may be different than the bucket dimensions.\n\t\ttags := curr.Tags.Subset(itr.dims)\n\t\tid := tags.ID()\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &{{$k.name}}Reduce{{$v.Name}}Point{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\tm[id] = rp\n\t\t}\n\t\trp.Aggregator.Aggregate{{$k.Name}}(curr)\n\t}\n\n\t// Reverse sort points by name & tag if our output is supposed to be ordered.\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tif len(keys) > 1 && itr.opt.Ordered {\n\t\tsort.Sort(reverseStringSlice(keys))\n\t}\n\n\t// Assume the points are already sorted until proven otherwise.\n\tsortedByTime := true\n\t// Emit the points for each name & tag combination.\n\ta := make([]{{$v.Name}}Point, 0, len(m))\n\tfor _, k := range keys {\n\t\trp := m[k]\n\t\tpoints := rp.Emitter.Emit()\n\t\tfor i := len(points)-1; i >= 0; i-- {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tif !itr.keepTags {\n\t\t\t\tpoints[i].Tags = rp.Tags\n\t\t\t}\n\t\t\t// Set the points time to the interval time if the reducer didn't provide one.\n\t\t\tif points[i].Time == ZeroTime {\n\t\t\t\tpoints[i].Time = startTime\n\t\t\t} else {\n\t\t\t\tsortedByTime = false\n\t\t\t}\n\t\t\ta = append(a, points[i])\n\t\t}\n\t}\n\n\t// Points may be out of order. Perform a stable sort by time if requested.\n\tif !sortedByTime && itr.opt.Ordered {\n\t\tsort.Stable(sort.Reverse({{$v.name}}PointsByTime(a)))\n\t}\n\n\treturn a, nil\n}\n\n// {{$k.name}}Stream{{$v.Name}}Iterator streams inputs into the iterator and emits points gradually.\ntype {{$k.name}}Stream{{$v.Name}}Iterator struct {\n\tinput  *buf{{$k.Name}}Iterator\n\tcreate func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter)\n\tdims   []string\n\topt    IteratorOptions\n\tm      map[string]*{{$k.name}}Reduce{{$v.Name}}Point\n\tpoints []{{$v.Name}}Point\n}\n\n// new{{$k.Name}}Stream{{$v.Name}}Iterator returns a new instance of {{$k.name}}Stream{{$v.Name}}Iterator.\nfunc new{{$k.Name}}Stream{{$v.Name}}Iterator(input {{$k.Name}}Iterator, createFn func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter), opt IteratorOptions) *{{$k.name}}Stream{{$v.Name}}Iterator {\n\treturn &{{$k.name}}Stream{{$v.Name}}Iterator{\n\t\tinput:  newBuf{{$k.Name}}Iterator(input),\n\t\tcreate: createFn,\n\t\tdims:   opt.GetDimensions(),\n\t\topt:    opt,\n\t\tm:      make(map[string]*{{$k.name}}Reduce{{$v.Name}}Point),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *{{$k.name}}Stream{{$v.Name}}Iterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *{{$k.name}}Stream{{$v.Name}}Iterator) Close() error { return itr.input.Close() }\n\n// Next returns the next value for the stream iterator.\nfunc (itr *{{$k.name}}Stream{{$v.Name}}Iterator) Next() (*{{$v.Name}}Point, error) {\n\t// Calculate next window if we have no more points.\n\tif len(itr.points) == 0 {\n\t\tvar err error\n\t\titr.points, err = itr.reduce()\n\t\tif len(itr.points) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Pop next point off the stack.\n\tp := &itr.points[len(itr.points)-1]\n\titr.points = itr.points[:len(itr.points)-1]\n\treturn p, nil\n}\n\n// reduce creates and manages aggregators for every point from the input.\n// After aggregating a point, it always tries to emit a value using the emitter.\nfunc (itr *{{$k.name}}Stream{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, error) {\n\tfor {\n\t\t// Read next point.\n\t\tcurr, err := itr.input.Next()\n\t\tif curr == nil {\n\t\t\t// Close all of the aggregators to flush any remaining points to emit.\n\t\t\tvar points []{{$v.Name}}Point\n\t\t\tfor _, rp := range itr.m {\n\t\t\t\tif aggregator, ok := rp.Aggregator.(io.Closer); ok {\n\t\t\t\t\tif err := aggregator.Close(); err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\tpts := rp.Emitter.Emit()\n\t\t\t\t\tif len(pts) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tfor i := range pts {\n\t\t\t\t\t\tpts[i].Name = rp.Name\n\t\t\t\t\t\tpts[i].Tags = rp.Tags\n\t\t\t\t\t}\n\t\t\t\t\tpoints = append(points, pts...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Eliminate the aggregators and emitters.\n\t\t\titr.m = nil\n\t\t\treturn points, nil\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else if curr.Nil {\n\t\t\tcontinue\n\t\t}\n\t\ttags := curr.Tags.Subset(itr.dims)\n\n\t\tid := curr.Name\n\t\tif len(tags.m) > 0 {\n\t\t\tid += \"\\x00\" + tags.ID()\n\t\t}\n\n\t\t// Retrieve the aggregator for this name/tag combination or create one.\n\t\trp := itr.m[id]\n\t\tif rp == nil {\n\t\t\taggregator, emitter := itr.create()\n\t\t\trp = &{{$k.name}}Reduce{{.Name}}Point{\n\t\t\t\tName:       curr.Name,\n\t\t\t\tTags:       tags,\n\t\t\t\tAggregator: aggregator,\n\t\t\t\tEmitter:    emitter,\n\t\t\t}\n\t\t\titr.m[id] = rp\n\t\t}\n\t\trp.Aggregator.Aggregate{{$k.Name}}(curr)\n\n\t\t// Attempt to emit points from the aggregator.\n\t\tpoints := rp.Emitter.Emit()\n\t\tif len(points) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := range points {\n\t\t\tpoints[i].Name = rp.Name\n\t\t\tpoints[i].Tags = rp.Tags\n\t\t}\n\t\treturn points, nil\n\t}\n}\n\n// {{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator executes a function to modify an existing point\n// for every output of the input iterator.\ntype {{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator struct {\n\tleft      *buf{{$k.Name}}Iterator\n\tright     *buf{{$k.Name}}Iterator\n\tfn        {{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprFunc\n\tpoints    []{{$k.Name}}Point // must be size 2\n\tstorePrev bool\n}\n\nfunc new{{$k.Name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator(left, right {{$k.Name}}Iterator, opt IteratorOptions, fn func(a, b {{$k.Type}}) {{$v.Type}}) *{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator {\n\tvar points []{{$k.Name}}Point\n\tswitch opt.Fill {\n\tcase NullFill, PreviousFill:\n\t\tpoints = []{{$k.Name}}Point{ {Nil: true}, {Nil: true} }\n\tcase NumberFill:\n\t\tvalue := castTo{{$k.Name}}(opt.FillValue)\n\t\tpoints = []{{$k.Name}}Point{ {Value: value}, {Value: value} }\n\t}\n\treturn &{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator{\n\t\tleft:      newBuf{{$k.Name}}Iterator(left),\n\t\tright:     newBuf{{$k.Name}}Iterator(right),\n\t\tpoints:    points,\n\t\tfn:        fn,\n\t\tstorePrev: opt.Fill == PreviousFill,\n\t}\n}\n\nfunc (itr *{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator) Stats() IteratorStats {\n\tstats := itr.left.Stats()\n\tstats.Add(itr.right.Stats())\n\treturn stats\n}\n\nfunc (itr *{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator) Close() error {\n\titr.left.Close()\n\titr.right.Close()\n\treturn nil\n}\n\nfunc (itr *{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator) Next() (*{{$v.Name}}Point, error) {\n\tfor {\n\t\ta, b, err := itr.next()\n\t\tif err != nil || (a == nil && b == nil) {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If any of these are nil and we are using fill(none), skip these points.\n\t\tif (a == nil || a.Nil || b == nil || b.Nil) && itr.points == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If one of the two points is nil, we need to fill it with a fake nil\n\t\t// point that has the same name, tags, and time as the other point.\n\t\t// There should never be a time when both of these are nil.\n\t\tif a == nil {\n\t\t\tp := *b\n\t\t\ta = &p\n\t\t\ta.Value = {{$k.Nil}}\n\t\t\ta.Nil = true\n\t\t} else if b == nil {\n\t\t\tp := *a\n\t\t\tb = &p\n\t\t\tb.Value = {{$k.Nil}}\n\t\t\tb.Nil = true\n\t\t}\n\n\t\t// If a value is nil, use the fill values if the fill value is non-nil.\n\t\tif a.Nil && !itr.points[0].Nil {\n\t\t\ta.Value = itr.points[0].Value\n\t\t\ta.Nil = false\n\t\t}\n\t\tif b.Nil && !itr.points[1].Nil {\n\t\t\tb.Value = itr.points[1].Value\n\t\t\tb.Nil = false\n\t\t}\n\n\t\tif itr.storePrev {\n\t\t\titr.points[0], itr.points[1] = *a, *b\n\t\t}\n\n{{if eq $k.Name $v.Name}}\n\t\tif a.Nil {\n\t\t\treturn a, nil\n\t\t} else if b.Nil {\n\t\t\treturn b, nil\n\t\t}\n\t\ta.Value = itr.fn(a.Value, b.Value)\n\t\treturn a, nil\n{{else}}\n\t\tp := &{{$v.Name}}Point{\n\t\t\tName: a.Name,\n\t\t\tTags: a.Tags,\n\t\t\tTime: a.Time,\n\t\t\tNil:  a.Nil || b.Nil,\n\t\t\tAggregated: a.Aggregated,\n\t\t}\n\t\tif !p.Nil {\n\t\t\tp.Value = itr.fn(a.Value, b.Value)\n\t\t}\n\t\treturn p, nil\n{{end}}\n\t}\n}\n\n// next returns the next points within each iterator. If the iterators are\n// uneven, it organizes them so only matching points are returned.\nfunc (itr *{{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprIterator) next() (a, b *{{$k.Name}}Point, err error) {\n\t// Retrieve the next value for both the left and right.\n\ta, err = itr.left.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tb, err = itr.right.Next()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// If we have a point from both, make sure that they match each other.\n\tif a != nil && b != nil {\n\t\tif a.Name > b.Name {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Name < b.Name {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif ltags, rtags := a.Tags.ID(), b.Tags.ID(); ltags > rtags {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if ltags < rtags {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\n\t\tif a.Time > b.Time {\n\t\t\titr.left.unread(a)\n\t\t\treturn nil, b, nil\n\t\t} else if a.Time < b.Time {\n\t\t\titr.right.unread(b)\n\t\t\treturn a, nil, nil\n\t\t}\n\t}\n\treturn a, b, nil\n}\n\n// {{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprFunc creates or modifies a point by combining two\n// points. The point passed in may be modified and returned rather than\n// allocating a new point if possible. One of the points may be nil, but at\n// least one of the points will be non-nil.\ntype {{$k.name}}{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}ExprFunc func(a, b {{$k.Type}}) {{$v.Type}}\n{{end}}\n\n// {{$k.name}}TransformIterator executes a function to modify an existing point for every\n// output of the input iterator.\ntype {{$k.name}}TransformIterator struct {\n\tinput {{$k.Name}}Iterator\n\tfn    {{$k.name}}TransformFunc\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *{{$k.name}}TransformIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *{{$k.name}}TransformIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *{{$k.name}}TransformIterator) Next() (*{{$k.Name}}Point, error) {\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if p != nil {\n\t\tp = itr.fn(p)\n\t}\n\treturn p, nil\n}\n\n// {{$k.name}}TransformFunc creates or modifies a point.\n// The point passed in may be modified and returned rather than allocating a\n// new point if possible.\ntype {{$k.name}}TransformFunc func(p *{{$k.Name}}Point) *{{$k.Name}}Point\n\n// {{$k.name}}BoolTransformIterator executes a function to modify an existing point for every\n// output of the input iterator.\ntype {{$k.name}}BoolTransformIterator struct {\n\tinput {{$k.Name}}Iterator\n\tfn    {{$k.name}}BoolTransformFunc\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *{{$k.name}}BoolTransformIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *{{$k.name}}BoolTransformIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *{{$k.name}}BoolTransformIterator) Next() (*BooleanPoint, error) {\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if p != nil {\n\t\treturn itr.fn(p), nil\n\t}\n\treturn nil, nil\n}\n\n// {{$k.name}}BoolTransformFunc creates or modifies a point.\n// The point passed in may be modified and returned rather than allocating a\n// new point if possible.\ntype {{$k.name}}BoolTransformFunc func(p *{{$k.Name}}Point) *BooleanPoint\n\n// {{$k.name}}DedupeIterator only outputs unique points.\n// This differs from the DistinctIterator in that it compares all aux fields too.\n// This iterator is relatively inefficient and should only be used on small\n// datasets such as meta query results.\ntype {{$k.name}}DedupeIterator struct {\n\tinput {{$k.Name}}Iterator\n\tm     map[string]struct{} // lookup of points already sent\n}\n\ntype {{$k.name}}IteratorMapper struct {\n\te         *Emitter\n\tbuf       []interface{}\n\tdriver    IteratorMap   // which iterator to use for the primary value, can be nil\n\tfields    []IteratorMap // which iterator to use for an aux field\n\tpoint     {{$k.Name}}Point\n}\n\nfunc new{{$k.Name}}IteratorMapper(itrs []Iterator, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *{{$k.name}}IteratorMapper {\n\te := NewEmitter(itrs, opt.Ascending, 0)\n\te.OmitTime = true\n\treturn &{{$k.name}}IteratorMapper{\n\t\te:         e,\n\t\tbuf:       make([]interface{}, len(itrs)),\n\t\tdriver:    driver,\n\t\tfields:    fields,\n\t\tpoint:  {{$k.Name}}Point{\n\t\t\tAux: make([]interface{}, len(fields)),\n\t\t},\n\t}\n}\n\nfunc (itr *{{$k.name}}IteratorMapper) Next() (*{{$k.Name}}Point, error) {\n\tt, name, tags, err := itr.e.loadBuf()\n\tif err != nil || t == ZeroTime {\n\t\treturn nil, err\n\t}\n\titr.point.Time = t\n\titr.point.Name = name\n\titr.point.Tags = tags\n\n\titr.e.readInto(t, name, tags, itr.buf)\n\tif itr.driver != nil {\n\tif v := itr.driver.Value(tags, itr.buf); v != nil {\n\t\t\tif v, ok := v.({{$k.Type}}); ok {\n\t\t\t\titr.point.Value = v\n\t\t\t\titr.point.Nil = false\n\t\t\t} else {\n\t\t\t\titr.point.Value = {{$k.Nil}}\n\t\t\t\titr.point.Nil = true\n\t\t\t}\n\t\t} else {\n\t\t\titr.point.Value = {{$k.Nil}}\n\t\t\titr.point.Nil = true\n\t\t}\n\t}\n\tfor i, f := range itr.fields {\n\t\titr.point.Aux[i] = f.Value(tags, itr.buf)\n\t}\n\treturn &itr.point, nil\n}\n\nfunc (itr *{{$k.name}}IteratorMapper) Stats() IteratorStats {\n\tstats := IteratorStats{}\n\tfor _, itr := range itr.e.itrs {\n\t\tstats.Add(itr.Stats())\n\t}\n\treturn stats\n}\n\nfunc (itr *{{$k.name}}IteratorMapper) Close() error {\n\treturn itr.e.Close()\n}\n\ntype {{$k.name}}FilterIterator struct {\n\tinput {{$k.Name}}Iterator\n\tcond  Expr\n\topt   IteratorOptions\n\tm     map[string]interface{}\n}\n\nfunc new{{$k.Name}}FilterIterator(input {{$k.Name}}Iterator, cond Expr, opt IteratorOptions) {{$k.Name}}Iterator {\n\t// Strip out time conditions from the WHERE clause.\n\t// TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct.\n\tn := RewriteFunc(CloneExpr(cond), func(n Node) Node {\n\t\tswitch n := n.(type) {\n\t\tcase *BinaryExpr:\n\t\t\tif n.LHS.String() == \"time\" {\n\t\t\t\treturn &BooleanLiteral{Val: true}\n\t\t\t}\n\t\t}\n\t\treturn n\n\t})\n\n\tcond, _ = n.(Expr)\n\tif cond == nil {\n\t\treturn input\n\t} else if n, ok := cond.(*BooleanLiteral); ok && n.Val {\n\t\treturn input\n\t}\n\n\treturn &{{$k.name}}FilterIterator{\n\t\tinput: input,\n\t\tcond:  cond,\n\t\topt:   opt,\n\t\tm:     make(map[string]interface{}),\n\t}\n}\n\nfunc (itr *{{$k.name}}FilterIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *{{$k.name}}FilterIterator) Close() error { return itr.input.Close() }\n\nfunc (itr *{{$k.name}}FilterIterator) Next() (*{{$k.Name}}Point, error) {\n\tfor {\n\t\tp, err := itr.input.Next()\n\t\tif err != nil || p == nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor i, ref := range itr.opt.Aux {\n\t\t\titr.m[ref.Val] = p.Aux[i]\n\t\t}\n\t\tfor k, v := range p.Tags.KeyValues() {\n\t\t\titr.m[k] = v\n\t\t}\n\n\t\tif !EvalBool(itr.cond, itr.m) {\n\t\t\tcontinue\n\t\t}\n\t\treturn p, nil\n\t}\n}\n\n// new{{$k.Name}}DedupeIterator returns a new instance of {{$k.name}}DedupeIterator.\nfunc new{{$k.Name}}DedupeIterator(input {{$k.Name}}Iterator) *{{$k.name}}DedupeIterator {\n\treturn &{{$k.name}}DedupeIterator{\n\t\tinput: input,\n\t\tm:     make(map[string]struct{}),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *{{$k.name}}DedupeIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *{{$k.name}}DedupeIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next unique point from the input iterator.\nfunc (itr *{{$k.name}}DedupeIterator) Next() (*{{$k.Name}}Point, error) {\n\tfor {\n\t\t// Read next point.\n\t\tp, err := itr.input.Next()\n\t\tif p == nil || err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Serialize to bytes to store in lookup.\n\t\tbuf, err := proto.Marshal(encode{{$k.Name}}Point(p))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// If the point has already been output then move to the next point.\n\t\tif _, ok := itr.m[string(buf)]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Otherwise mark it as emitted and return point.\n\t\titr.m[string(buf)] = struct{}{}\n\t\treturn p, nil\n\t}\n}\n\n// {{$k.name}}ReaderIterator represents an iterator that streams from a reader.\ntype {{$k.name}}ReaderIterator struct {\n\tr     io.Reader\n\tdec   *{{$k.Name}}PointDecoder\n}\n\n// new{{$k.Name}}ReaderIterator returns a new instance of {{$k.name}}ReaderIterator.\nfunc new{{$k.Name}}ReaderIterator(r io.Reader, stats IteratorStats) *{{$k.name}}ReaderIterator {\n\tdec := New{{$k.Name}}PointDecoder(r)\n\tdec.stats = stats\n\n\treturn &{{$k.name}}ReaderIterator{\n\t\tr:     r,\n    dec:   dec,\n\t}\n}\n\n// Stats returns stats about points processed.\nfunc (itr *{{$k.name}}ReaderIterator) Stats() IteratorStats { return itr.dec.stats }\n\n// Close closes the underlying reader, if applicable.\nfunc (itr *{{$k.name}}ReaderIterator) Close() error {\n\tif r, ok := itr.r.(io.ReadCloser); ok {\n\t\treturn r.Close()\n\t}\n\treturn nil\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *{{$k.name}}ReaderIterator) Next() (*{{$k.Name}}Point, error) {\n\t// OPTIMIZE(benbjohnson): Reuse point on iterator.\n\n\t// Unmarshal next point.\n\tp := &{{$k.Name}}Point{}\n\tif err := itr.dec.Decode{{$k.Name}}Point(p); err == io.EOF {\n\t\treturn nil, nil\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}\n{{end}}\n\n\n// IteratorEncoder is an encoder for encoding an iterator's points to w.\ntype IteratorEncoder struct {\n\tw io.Writer\n\n\t// Frequency with which stats are emitted.\n\tStatsInterval time.Duration\n}\n\n// NewIteratorEncoder encodes an iterator's points to w.\nfunc NewIteratorEncoder(w io.Writer) *IteratorEncoder {\n\treturn &IteratorEncoder{\n\t\tw: w,\n\n\t\tStatsInterval: DefaultStatsInterval,\n\t}\n}\n\n// EncodeIterator encodes and writes all of itr's points to the underlying writer.\nfunc (enc *IteratorEncoder) EncodeIterator(itr Iterator) error {\n\tswitch itr := itr.(type) {\n\tcase FloatIterator:\n\t\treturn enc.encodeFloatIterator(itr)\n\tcase IntegerIterator:\n\t\treturn enc.encodeIntegerIterator(itr)\n\tcase StringIterator:\n\t\treturn enc.encodeStringIterator(itr)\n\tcase BooleanIterator:\n\t\treturn enc.encodeBooleanIterator(itr)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported iterator for encoder: %T\", itr))\n\t}\n}\n\n{{range .}}\n// encode{{.Name}}Iterator encodes all points from itr to the underlying writer.\nfunc (enc *IteratorEncoder) encode{{.Name}}Iterator(itr {{.Name}}Iterator) error {\n\tticker := time.NewTicker(enc.StatsInterval)\n\tdefer ticker.Stop()\n\n\t// Emit initial stats.\n\tif err := enc.encodeStats(itr.Stats()); err != nil {\n\t\treturn err\n\t}\n\n\t// Continually stream points from the iterator into the encoder.\n\tpenc := New{{.Name}}PointEncoder(enc.w)\n\tfor {\n\t\t// Emit stats periodically.\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif err := enc.encodeStats(itr.Stats()); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t}\n\n\t\t// Retrieve the next point from the iterator.\n\t\tp, err := itr.Next()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t} else if p == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Write the point to the point encoder.\n\t\tif err := penc.Encode{{.Name}}Point(p); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Emit final stats.\n\tif err := enc.encodeStats(itr.Stats()); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n{{end}}\n\n// encode a stats object in the point stream.\nfunc (enc *IteratorEncoder) encodeStats(stats IteratorStats) error {\n\tbuf, err := proto.Marshal(&internal.Point{\n\t\tName: proto.String(\"\"),\n\t\tTags: proto.String(\"\"),\n\t\tTime: proto.Int64(0),\n\t\tNil:  proto.Bool(false),\n\n\t\tStats: encodeIteratorStats(&stats),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {\n\t\treturn err\n\t}\n\tif _, err := enc.w.Write(buf); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n{{end}}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/iterator.go",
    "content": "package influxql\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\n\t\"github.com/gogo/protobuf/proto\"\n\tinternal \"github.com/influxdata/influxdb/influxql/internal\"\n)\n\n// ErrUnknownCall is returned when operating on an unknown function call.\nvar ErrUnknownCall = errors.New(\"unknown call\")\n\nconst (\n\t// MinTime is used as the minimum time value when computing an unbounded range.\n\t// This time is one less than the MinNanoTime so that the first minimum\n\t// time can be used as a sentinel value to signify that it is the default\n\t// value rather than explicitly set by the user.\n\tMinTime = models.MinNanoTime - 1\n\n\t// MaxTime is used as the maximum time value when computing an unbounded range.\n\t// This time is 2262-04-11 23:47:16.854775806 +0000 UTC\n\tMaxTime = models.MaxNanoTime\n\n\t// secToNs is the number of nanoseconds in a second.\n\tsecToNs = int64(time.Second)\n)\n\n// Iterator represents a generic interface for all Iterators.\n// Most iterator operations are done on the typed sub-interfaces.\ntype Iterator interface {\n\tStats() IteratorStats\n\tClose() error\n}\n\n// Iterators represents a list of iterators.\ntype Iterators []Iterator\n\n// Stats returns the aggregation of all iterator stats.\nfunc (a Iterators) Stats() IteratorStats {\n\tvar stats IteratorStats\n\tfor _, itr := range a {\n\t\tstats.Add(itr.Stats())\n\t}\n\treturn stats\n}\n\n// Close closes all iterators.\nfunc (a Iterators) Close() error {\n\tfor _, itr := range a {\n\t\titr.Close()\n\t}\n\treturn nil\n}\n\n// filterNonNil returns a slice of iterators that removes all nil iterators.\nfunc (a Iterators) filterNonNil() []Iterator {\n\tother := make([]Iterator, 0, len(a))\n\tfor _, itr := range a {\n\t\tif itr == nil {\n\t\t\tcontinue\n\t\t}\n\t\tother = append(other, itr)\n\t}\n\treturn other\n}\n\n// castType determines what type to cast the set of iterators to.\n// An iterator type is chosen using this hierarchy:\n//   float > integer > string > boolean\nfunc (a Iterators) castType() DataType {\n\tif len(a) == 0 {\n\t\treturn Unknown\n\t}\n\n\ttyp := DataType(Boolean)\n\tfor _, input := range a {\n\t\tswitch input.(type) {\n\t\tcase FloatIterator:\n\t\t\t// Once a float iterator is found, short circuit the end.\n\t\t\treturn Float\n\t\tcase IntegerIterator:\n\t\t\tif typ > Integer {\n\t\t\t\ttyp = Integer\n\t\t\t}\n\t\tcase StringIterator:\n\t\t\tif typ > String {\n\t\t\t\ttyp = String\n\t\t\t}\n\t\tcase BooleanIterator:\n\t\t\t// Boolean is the lowest type.\n\t\t}\n\t}\n\treturn typ\n}\n\n// cast casts an array of iterators to a single type.\n// Iterators that are not compatible or cannot be cast to the\n// chosen iterator type are closed and dropped.\nfunc (a Iterators) cast() interface{} {\n\ttyp := a.castType()\n\tswitch typ {\n\tcase Float:\n\t\treturn newFloatIterators(a)\n\tcase Integer:\n\t\treturn newIntegerIterators(a)\n\tcase String:\n\t\treturn newStringIterators(a)\n\tcase Boolean:\n\t\treturn newBooleanIterators(a)\n\t}\n\treturn a\n}\n\n// Merge combines all iterators into a single iterator.\n// A sorted merge iterator or a merge iterator can be used based on opt.\nfunc (a Iterators) Merge(opt IteratorOptions) (Iterator, error) {\n\t// Check if this is a call expression.\n\tcall, ok := opt.Expr.(*Call)\n\n\t// Merge into a single iterator.\n\tif !ok && opt.MergeSorted() {\n\t\titr := NewSortedMergeIterator(a, opt)\n\t\tif itr != nil && opt.InterruptCh != nil {\n\t\t\titr = NewInterruptIterator(itr, opt.InterruptCh)\n\t\t}\n\t\treturn itr, nil\n\t}\n\n\t// We do not need an ordered output so use a merge iterator.\n\titr := NewMergeIterator(a, opt)\n\tif itr == nil {\n\t\treturn nil, nil\n\t}\n\n\tif opt.InterruptCh != nil {\n\t\titr = NewInterruptIterator(itr, opt.InterruptCh)\n\t}\n\n\tif !ok {\n\t\t// This is not a call expression so do not use a call iterator.\n\t\treturn itr, nil\n\t}\n\n\t// When merging the count() function, use sum() to sum the counted points.\n\tif call.Name == \"count\" {\n\t\topt.Expr = &Call{\n\t\t\tName: \"sum\",\n\t\t\tArgs: call.Args,\n\t\t}\n\t}\n\treturn NewCallIterator(itr, opt)\n}\n\n// NewMergeIterator returns an iterator to merge itrs into one.\n// Inputs must either be merge iterators or only contain a single name/tag in\n// sorted order. The iterator will output all points by window, name/tag, then\n// time. This iterator is useful when you need all of the points for an\n// interval.\nfunc NewMergeIterator(inputs []Iterator, opt IteratorOptions) Iterator {\n\tinputs = Iterators(inputs).filterNonNil()\n\tif n := len(inputs); n == 0 {\n\t\treturn nil\n\t} else if n == 1 {\n\t\treturn inputs[0]\n\t}\n\n\t// Aggregate functions can use a more relaxed sorting so that points\n\t// within a window are grouped. This is much more efficient.\n\tswitch inputs := Iterators(inputs).cast().(type) {\n\tcase []FloatIterator:\n\t\treturn newFloatMergeIterator(inputs, opt)\n\tcase []IntegerIterator:\n\t\treturn newIntegerMergeIterator(inputs, opt)\n\tcase []StringIterator:\n\t\treturn newStringMergeIterator(inputs, opt)\n\tcase []BooleanIterator:\n\t\treturn newBooleanMergeIterator(inputs, opt)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported merge iterator type: %T\", inputs))\n\t}\n}\n\n// NewParallelMergeIterator returns an iterator that breaks input iterators\n// into groups and processes them in parallel.\nfunc NewParallelMergeIterator(inputs []Iterator, opt IteratorOptions, parallelism int) Iterator {\n\tinputs = Iterators(inputs).filterNonNil()\n\tif len(inputs) == 0 {\n\t\treturn nil\n\t} else if len(inputs) == 1 {\n\t\treturn inputs[0]\n\t}\n\n\t// Limit parallelism to the number of inputs.\n\tif len(inputs) < parallelism {\n\t\tparallelism = len(inputs)\n\t}\n\n\t// Determine the number of inputs per output iterator.\n\tn := len(inputs) / parallelism\n\n\t// Group iterators together.\n\toutputs := make([]Iterator, parallelism)\n\tfor i := range outputs {\n\t\tvar slice []Iterator\n\t\tif i < len(outputs)-1 {\n\t\t\tslice = inputs[i*n : (i+1)*n]\n\t\t} else {\n\t\t\tslice = inputs[i*n:]\n\t\t}\n\n\t\toutputs[i] = newParallelIterator(NewMergeIterator(slice, opt))\n\t}\n\n\t// Merge all groups together.\n\treturn NewMergeIterator(outputs, opt)\n}\n\n// NewSortedMergeIterator returns an iterator to merge itrs into one.\n// Inputs must either be sorted merge iterators or only contain a single\n// name/tag in sorted order. The iterator will output all points by name/tag,\n// then time. This iterator is useful when you need all points for a name/tag\n// to be in order.\nfunc NewSortedMergeIterator(inputs []Iterator, opt IteratorOptions) Iterator {\n\tinputs = Iterators(inputs).filterNonNil()\n\tif len(inputs) == 0 {\n\t\treturn nil\n\t} else if len(inputs) == 1 {\n\t\treturn inputs[0]\n\t}\n\n\tswitch inputs := Iterators(inputs).cast().(type) {\n\tcase []FloatIterator:\n\t\treturn newFloatSortedMergeIterator(inputs, opt)\n\tcase []IntegerIterator:\n\t\treturn newIntegerSortedMergeIterator(inputs, opt)\n\tcase []StringIterator:\n\t\treturn newStringSortedMergeIterator(inputs, opt)\n\tcase []BooleanIterator:\n\t\treturn newBooleanSortedMergeIterator(inputs, opt)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported sorted merge iterator type: %T\", inputs))\n\t}\n}\n\n// newParallelIterator returns an iterator that runs in a separate goroutine.\nfunc newParallelIterator(input Iterator) Iterator {\n\tif input == nil {\n\t\treturn nil\n\t}\n\n\tswitch itr := input.(type) {\n\tcase FloatIterator:\n\t\treturn newFloatParallelIterator(itr)\n\tcase IntegerIterator:\n\t\treturn newIntegerParallelIterator(itr)\n\tcase StringIterator:\n\t\treturn newStringParallelIterator(itr)\n\tcase BooleanIterator:\n\t\treturn newBooleanParallelIterator(itr)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported parallel iterator type: %T\", itr))\n\t}\n}\n\n// NewLimitIterator returns an iterator that limits the number of points per grouping.\nfunc NewLimitIterator(input Iterator, opt IteratorOptions) Iterator {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\treturn newFloatLimitIterator(input, opt)\n\tcase IntegerIterator:\n\t\treturn newIntegerLimitIterator(input, opt)\n\tcase StringIterator:\n\t\treturn newStringLimitIterator(input, opt)\n\tcase BooleanIterator:\n\t\treturn newBooleanLimitIterator(input, opt)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported limit iterator type: %T\", input))\n\t}\n}\n\n// NewFilterIterator returns an iterator that filters the points based on the\n// condition. This iterator is not nearly as efficient as filtering points\n// within the query engine and is only used when filtering subqueries.\nfunc NewFilterIterator(input Iterator, cond Expr, opt IteratorOptions) Iterator {\n\tif input == nil {\n\t\treturn nil\n\t}\n\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\treturn newFloatFilterIterator(input, cond, opt)\n\tcase IntegerIterator:\n\t\treturn newIntegerFilterIterator(input, cond, opt)\n\tcase StringIterator:\n\t\treturn newStringFilterIterator(input, cond, opt)\n\tcase BooleanIterator:\n\t\treturn newBooleanFilterIterator(input, cond, opt)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported filter iterator type: %T\", input))\n\t}\n}\n\n// NewDedupeIterator returns an iterator that only outputs unique points.\n// This iterator maintains a serialized copy of each row so it is inefficient\n// to use on large datasets. It is intended for small datasets such as meta queries.\nfunc NewDedupeIterator(input Iterator) Iterator {\n\tif input == nil {\n\t\treturn nil\n\t}\n\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\treturn newFloatDedupeIterator(input)\n\tcase IntegerIterator:\n\t\treturn newIntegerDedupeIterator(input)\n\tcase StringIterator:\n\t\treturn newStringDedupeIterator(input)\n\tcase BooleanIterator:\n\t\treturn newBooleanDedupeIterator(input)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported dedupe iterator type: %T\", input))\n\t}\n}\n\n// NewFillIterator returns an iterator that fills in missing points in an aggregate.\nfunc NewFillIterator(input Iterator, expr Expr, opt IteratorOptions) Iterator {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\treturn newFloatFillIterator(input, expr, opt)\n\tcase IntegerIterator:\n\t\treturn newIntegerFillIterator(input, expr, opt)\n\tcase StringIterator:\n\t\treturn newStringFillIterator(input, expr, opt)\n\tcase BooleanIterator:\n\t\treturn newBooleanFillIterator(input, expr, opt)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported fill iterator type: %T\", input))\n\t}\n}\n\n// NewIntervalIterator returns an iterator that sets the time on each point to the interval.\nfunc NewIntervalIterator(input Iterator, opt IteratorOptions) Iterator {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\treturn newFloatIntervalIterator(input, opt)\n\tcase IntegerIterator:\n\t\treturn newIntegerIntervalIterator(input, opt)\n\tcase StringIterator:\n\t\treturn newStringIntervalIterator(input, opt)\n\tcase BooleanIterator:\n\t\treturn newBooleanIntervalIterator(input, opt)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported fill iterator type: %T\", input))\n\t}\n}\n\n// NewInterruptIterator returns an iterator that will stop producing output\n// when the passed-in channel is closed.\nfunc NewInterruptIterator(input Iterator, closing <-chan struct{}) Iterator {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\treturn newFloatInterruptIterator(input, closing)\n\tcase IntegerIterator:\n\t\treturn newIntegerInterruptIterator(input, closing)\n\tcase StringIterator:\n\t\treturn newStringInterruptIterator(input, closing)\n\tcase BooleanIterator:\n\t\treturn newBooleanInterruptIterator(input, closing)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported interrupt iterator type: %T\", input))\n\t}\n}\n\n// NewCloseInterruptIterator returns an iterator that will invoke the Close() method on an\n// iterator when the passed-in channel has been closed.\nfunc NewCloseInterruptIterator(input Iterator, closing <-chan struct{}) Iterator {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\treturn newFloatCloseInterruptIterator(input, closing)\n\tcase IntegerIterator:\n\t\treturn newIntegerCloseInterruptIterator(input, closing)\n\tcase StringIterator:\n\t\treturn newStringCloseInterruptIterator(input, closing)\n\tcase BooleanIterator:\n\t\treturn newBooleanCloseInterruptIterator(input, closing)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported close iterator iterator type: %T\", input))\n\t}\n}\n\n// AuxIterator represents an iterator that can split off separate auxiliary iterators.\ntype AuxIterator interface {\n\tIterator\n\n\t// Auxilary iterator\n\tIterator(name string, typ DataType) Iterator\n\n\t// Start starts writing to the created iterators.\n\tStart()\n\n\t// Backgrounds the iterator so that, when start is called, it will\n\t// continuously read from the iterator.\n\tBackground()\n}\n\n// NewAuxIterator returns a new instance of AuxIterator.\nfunc NewAuxIterator(input Iterator, opt IteratorOptions) AuxIterator {\n\tswitch input := input.(type) {\n\tcase FloatIterator:\n\t\treturn newFloatAuxIterator(input, opt)\n\tcase IntegerIterator:\n\t\treturn newIntegerAuxIterator(input, opt)\n\tcase StringIterator:\n\t\treturn newStringAuxIterator(input, opt)\n\tcase BooleanIterator:\n\t\treturn newBooleanAuxIterator(input, opt)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported aux iterator type: %T\", input))\n\t}\n}\n\n// auxIteratorField represents an auxilary field within an AuxIterator.\ntype auxIteratorField struct {\n\tname string     // field name\n\ttyp  DataType   // detected data type\n\titrs []Iterator // auxillary iterators\n\tmu   sync.Mutex\n\topt  IteratorOptions\n}\n\nfunc (f *auxIteratorField) append(itr Iterator) {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\tf.itrs = append(f.itrs, itr)\n}\n\nfunc (f *auxIteratorField) close() {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\tfor _, itr := range f.itrs {\n\t\titr.Close()\n\t}\n}\n\ntype auxIteratorFields struct {\n\tfields     []*auxIteratorField\n\tdimensions []string\n}\n\n// newAuxIteratorFields returns a new instance of auxIteratorFields from a list of field names.\nfunc newAuxIteratorFields(opt IteratorOptions) *auxIteratorFields {\n\tfields := make([]*auxIteratorField, len(opt.Aux))\n\tfor i, ref := range opt.Aux {\n\t\tfields[i] = &auxIteratorField{name: ref.Val, typ: ref.Type, opt: opt}\n\t}\n\treturn &auxIteratorFields{\n\t\tfields:     fields,\n\t\tdimensions: opt.GetDimensions(),\n\t}\n}\n\nfunc (a *auxIteratorFields) close() {\n\tfor _, f := range a.fields {\n\t\tf.close()\n\t}\n}\n\n// iterator creates a new iterator for a named auxilary field.\nfunc (a *auxIteratorFields) iterator(name string, typ DataType) Iterator {\n\tfor _, f := range a.fields {\n\t\t// Skip field if it's name doesn't match.\n\t\t// Exit if no points were received by the iterator.\n\t\tif f.name != name || (typ != Unknown && f.typ != typ) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Create channel iterator by data type.\n\t\tswitch f.typ {\n\t\tcase Float:\n\t\t\titr := &floatChanIterator{cond: sync.NewCond(&sync.Mutex{})}\n\t\t\tf.append(itr)\n\t\t\treturn itr\n\t\tcase Integer:\n\t\t\titr := &integerChanIterator{cond: sync.NewCond(&sync.Mutex{})}\n\t\t\tf.append(itr)\n\t\t\treturn itr\n\t\tcase String, Tag:\n\t\t\titr := &stringChanIterator{cond: sync.NewCond(&sync.Mutex{})}\n\t\t\tf.append(itr)\n\t\t\treturn itr\n\t\tcase Boolean:\n\t\t\titr := &booleanChanIterator{cond: sync.NewCond(&sync.Mutex{})}\n\t\t\tf.append(itr)\n\t\t\treturn itr\n\t\tdefault:\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn &nilFloatIterator{}\n}\n\n// send sends a point to all field iterators.\nfunc (a *auxIteratorFields) send(p Point) (ok bool) {\n\tvalues := p.aux()\n\tfor i, f := range a.fields {\n\t\tvar v interface{}\n\t\tif i < len(values) {\n\t\t\tv = values[i]\n\t\t}\n\n\t\ttags := p.tags()\n\t\ttags = tags.Subset(a.dimensions)\n\n\t\t// Send new point for each aux iterator.\n\t\t// Primitive pointers represent nil values.\n\t\tfor _, itr := range f.itrs {\n\t\t\tswitch itr := itr.(type) {\n\t\t\tcase *floatChanIterator:\n\t\t\t\tok = itr.setBuf(p.name(), tags, p.time(), v) || ok\n\t\t\tcase *integerChanIterator:\n\t\t\t\tok = itr.setBuf(p.name(), tags, p.time(), v) || ok\n\t\t\tcase *stringChanIterator:\n\t\t\t\tok = itr.setBuf(p.name(), tags, p.time(), v) || ok\n\t\t\tcase *booleanChanIterator:\n\t\t\t\tok = itr.setBuf(p.name(), tags, p.time(), v) || ok\n\t\t\tdefault:\n\t\t\t\tpanic(fmt.Sprintf(\"invalid aux itr type: %T\", itr))\n\t\t\t}\n\t\t}\n\t}\n\treturn ok\n}\n\nfunc (a *auxIteratorFields) sendError(err error) {\n\tfor _, f := range a.fields {\n\t\tfor _, itr := range f.itrs {\n\t\t\tswitch itr := itr.(type) {\n\t\t\tcase *floatChanIterator:\n\t\t\t\titr.setErr(err)\n\t\t\tcase *integerChanIterator:\n\t\t\t\titr.setErr(err)\n\t\t\tcase *stringChanIterator:\n\t\t\t\titr.setErr(err)\n\t\t\tcase *booleanChanIterator:\n\t\t\t\titr.setErr(err)\n\t\t\tdefault:\n\t\t\t\tpanic(fmt.Sprintf(\"invalid aux itr type: %T\", itr))\n\t\t\t}\n\t\t}\n\t}\n}\n\n// DrainIterator reads and discards all points from itr.\nfunc DrainIterator(itr Iterator) {\n\tdefer itr.Close()\n\tswitch itr := itr.(type) {\n\tcase FloatIterator:\n\t\tfor p, _ := itr.Next(); p != nil; p, _ = itr.Next() {\n\t\t}\n\tcase IntegerIterator:\n\t\tfor p, _ := itr.Next(); p != nil; p, _ = itr.Next() {\n\t\t}\n\tcase StringIterator:\n\t\tfor p, _ := itr.Next(); p != nil; p, _ = itr.Next() {\n\t\t}\n\tcase BooleanIterator:\n\t\tfor p, _ := itr.Next(); p != nil; p, _ = itr.Next() {\n\t\t}\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported iterator type for draining: %T\", itr))\n\t}\n}\n\n// DrainIterators reads and discards all points from itrs.\nfunc DrainIterators(itrs []Iterator) {\n\tdefer Iterators(itrs).Close()\n\tfor {\n\t\tvar hasData bool\n\n\t\tfor _, itr := range itrs {\n\t\t\tswitch itr := itr.(type) {\n\t\t\tcase FloatIterator:\n\t\t\t\tif p, _ := itr.Next(); p != nil {\n\t\t\t\t\thasData = true\n\t\t\t\t}\n\t\t\tcase IntegerIterator:\n\t\t\t\tif p, _ := itr.Next(); p != nil {\n\t\t\t\t\thasData = true\n\t\t\t\t}\n\t\t\tcase StringIterator:\n\t\t\t\tif p, _ := itr.Next(); p != nil {\n\t\t\t\t\thasData = true\n\t\t\t\t}\n\t\t\tcase BooleanIterator:\n\t\t\t\tif p, _ := itr.Next(); p != nil {\n\t\t\t\t\thasData = true\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tpanic(fmt.Sprintf(\"unsupported iterator type for draining: %T\", itr))\n\t\t\t}\n\t\t}\n\n\t\t// Exit once all iterators return a nil point.\n\t\tif !hasData {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\n// NewReaderIterator returns an iterator that streams from a reader.\nfunc NewReaderIterator(r io.Reader, typ DataType, stats IteratorStats) Iterator {\n\tswitch typ {\n\tcase Float:\n\t\treturn newFloatReaderIterator(r, stats)\n\tcase Integer:\n\t\treturn newIntegerReaderIterator(r, stats)\n\tcase String:\n\t\treturn newStringReaderIterator(r, stats)\n\tcase Boolean:\n\t\treturn newBooleanReaderIterator(r, stats)\n\tdefault:\n\t\treturn &nilFloatReaderIterator{r: r}\n\t}\n}\n\n// IteratorCreator is an interface to create Iterators.\ntype IteratorCreator interface {\n\t// Creates a simple iterator for use in an InfluxQL query.\n\tCreateIterator(source *Measurement, opt IteratorOptions) (Iterator, error)\n}\n\n// FieldMapper returns the data type for the field inside of the measurement.\ntype FieldMapper interface {\n\tFieldDimensions(m *Measurement) (fields map[string]DataType, dimensions map[string]struct{}, err error)\n\n\tTypeMapper\n}\n\n// IteratorOptions is an object passed to CreateIterator to specify creation options.\ntype IteratorOptions struct {\n\t// Expression to iterate for.\n\t// This can be VarRef or a Call.\n\tExpr Expr\n\n\t// Auxilary tags or values to also retrieve for the point.\n\tAux []VarRef\n\n\t// Data sources from which to receive data. This is only used for encoding\n\t// measurements over RPC and is no longer used in the open source version.\n\tSources []Source\n\n\t// Group by interval and tags.\n\tInterval   Interval\n\tDimensions []string            // The final dimensions of the query (stays the same even in subqueries).\n\tGroupBy    map[string]struct{} // Dimensions to group points by in intermediate iterators.\n\tLocation   *time.Location\n\n\t// Fill options.\n\tFill      FillOption\n\tFillValue interface{}\n\n\t// Condition to filter by.\n\tCondition Expr\n\n\t// Time range for the iterator.\n\tStartTime int64\n\tEndTime   int64\n\n\t// Sorted in time ascending order if true.\n\tAscending bool\n\n\t// Limits the number of points per series.\n\tLimit, Offset int\n\n\t// Limits the number of series.\n\tSLimit, SOffset int\n\n\t// Removes duplicate rows from raw queries.\n\tDedupe bool\n\n\t// Determines if this is a query for raw data or an aggregate/selector.\n\tOrdered bool\n\n\t// Limits on the creation of iterators.\n\tMaxSeriesN int\n\n\t// If this channel is set and is closed, the iterator should try to exit\n\t// and close as soon as possible.\n\tInterruptCh <-chan struct{}\n\n\t// Authorizer can limit access to data\n\tAuthorizer Authorizer\n}\n\n// newIteratorOptionsStmt creates the iterator options from stmt.\nfunc newIteratorOptionsStmt(stmt *SelectStatement, sopt *SelectOptions) (opt IteratorOptions, err error) {\n\n\t// Determine time range from the condition.\n\tstartTime, endTime, err := TimeRange(stmt.Condition, stmt.Location)\n\tif err != nil {\n\t\treturn IteratorOptions{}, err\n\t}\n\n\tif !startTime.IsZero() {\n\t\topt.StartTime = startTime.UnixNano()\n\t} else {\n\t\tif sopt != nil {\n\t\t\topt.StartTime = sopt.MinTime.UnixNano()\n\t\t} else {\n\t\t\topt.StartTime = MinTime\n\t\t}\n\t}\n\tif !endTime.IsZero() {\n\t\topt.EndTime = endTime.UnixNano()\n\t} else {\n\t\tif sopt != nil {\n\t\t\topt.EndTime = sopt.MaxTime.UnixNano()\n\t\t} else {\n\t\t\topt.EndTime = MaxTime\n\t\t}\n\t}\n\topt.Location = stmt.Location\n\n\t// Determine group by interval.\n\tinterval, err := stmt.GroupByInterval()\n\tif err != nil {\n\t\treturn opt, err\n\t}\n\t// Set duration to zero if a negative interval has been used.\n\tif interval < 0 {\n\t\tinterval = 0\n\t} else if interval > 0 {\n\t\topt.Interval.Offset, err = stmt.GroupByOffset()\n\t\tif err != nil {\n\t\t\treturn opt, err\n\t\t}\n\t}\n\topt.Interval.Duration = interval\n\n\t// Always request an ordered output for the top level iterators.\n\t// The emitter will always emit points as ordered.\n\topt.Ordered = true\n\n\t// Determine dimensions.\n\topt.GroupBy = make(map[string]struct{}, len(opt.Dimensions))\n\tfor _, d := range stmt.Dimensions {\n\t\tif d, ok := d.Expr.(*VarRef); ok {\n\t\t\topt.Dimensions = append(opt.Dimensions, d.Val)\n\t\t\topt.GroupBy[d.Val] = struct{}{}\n\t\t}\n\t}\n\n\topt.Condition = stmt.Condition\n\topt.Ascending = stmt.TimeAscending()\n\topt.Dedupe = stmt.Dedupe\n\n\topt.Fill, opt.FillValue = stmt.Fill, stmt.FillValue\n\tif opt.Fill == NullFill && stmt.Target != nil {\n\t\t// Set the fill option to none if a target has been given.\n\t\t// Null values will get ignored when being written to the target\n\t\t// so fill(null) wouldn't write any null values to begin with.\n\t\topt.Fill = NoFill\n\t}\n\topt.Limit, opt.Offset = stmt.Limit, stmt.Offset\n\topt.SLimit, opt.SOffset = stmt.SLimit, stmt.SOffset\n\tif sopt != nil {\n\t\topt.MaxSeriesN = sopt.MaxSeriesN\n\t\topt.InterruptCh = sopt.InterruptCh\n\t\topt.Authorizer = sopt.Authorizer\n\t}\n\n\treturn opt, nil\n}\n\nfunc newIteratorOptionsSubstatement(stmt *SelectStatement, opt IteratorOptions) (IteratorOptions, error) {\n\tsubOpt, err := newIteratorOptionsStmt(stmt, nil)\n\tif err != nil {\n\t\treturn IteratorOptions{}, err\n\t}\n\n\tif subOpt.StartTime < opt.StartTime {\n\t\tsubOpt.StartTime = opt.StartTime\n\t}\n\tif subOpt.EndTime > opt.EndTime {\n\t\tsubOpt.EndTime = opt.EndTime\n\t}\n\t// Propagate the dimensions to the inner subquery.\n\tsubOpt.Dimensions = opt.Dimensions\n\tfor d := range opt.GroupBy {\n\t\tsubOpt.GroupBy[d] = struct{}{}\n\t}\n\tsubOpt.InterruptCh = opt.InterruptCh\n\n\t// Propagate the SLIMIT and SOFFSET from the outer query.\n\tsubOpt.SLimit += opt.SLimit\n\tsubOpt.SOffset += opt.SOffset\n\n\t// If the inner query uses a null fill option and is not a raw query,\n\t// switch it to none so we don't hit an unnecessary penalty from the\n\t// fill iterator. Null values will end up getting stripped by an outer\n\t// query anyway so there's no point in having them here. We still need\n\t// all other types of fill iterators because they can affect the result\n\t// of the outer query. We also do not do this for raw queries because\n\t// there is no fill iterator for them and fill(none) doesn't work with\n\t// raw queries.\n\tif !stmt.IsRawQuery && subOpt.Fill == NullFill {\n\t\tsubOpt.Fill = NoFill\n\t}\n\n\t// Inherit the ordering method from the outer query.\n\tsubOpt.Ordered = opt.Ordered\n\n\t// If there is no interval for this subquery, but the outer query has an\n\t// interval, inherit the parent interval.\n\tinterval, err := stmt.GroupByInterval()\n\tif err != nil {\n\t\treturn IteratorOptions{}, err\n\t} else if interval == 0 {\n\t\tsubOpt.Interval = opt.Interval\n\t}\n\treturn subOpt, nil\n}\n\n// MergeSorted returns true if the options require a sorted merge.\nfunc (opt IteratorOptions) MergeSorted() bool {\n\treturn opt.Ordered\n}\n\n// SeekTime returns the time the iterator should start from.\n// For ascending iterators this is the start time, for descending iterators it's the end time.\nfunc (opt IteratorOptions) SeekTime() int64 {\n\tif opt.Ascending {\n\t\treturn opt.StartTime\n\t}\n\treturn opt.EndTime\n}\n\n// Window returns the time window [start,end) that t falls within.\nfunc (opt IteratorOptions) Window(t int64) (start, end int64) {\n\tif opt.Interval.IsZero() {\n\t\treturn opt.StartTime, opt.EndTime + 1\n\t}\n\n\t// Subtract the offset to the time so we calculate the correct base interval.\n\tt -= int64(opt.Interval.Offset)\n\n\t// Retrieve the zone offset for the start time.\n\tvar zone int64\n\tif opt.Location != nil {\n\t\t_, zone = opt.Zone(t)\n\t}\n\n\t// Truncate time by duration.\n\tdt := (t + zone) % int64(opt.Interval.Duration)\n\tif dt < 0 {\n\t\t// Negative modulo rounds up instead of down, so offset\n\t\t// with the duration.\n\t\tdt += int64(opt.Interval.Duration)\n\t}\n\n\t// Find the start time.\n\tif MinTime+dt >= t {\n\t\tstart = MinTime\n\t} else {\n\t\tstart = t - dt\n\t}\n\n\t// Look for the start offset again because the first time may have been\n\t// after the offset switch. Now that we are at midnight in UTC, we can\n\t// lookup the zone offset again to get the real starting offset.\n\tif opt.Location != nil {\n\t\t_, startOffset := opt.Zone(start)\n\t\t// Do not adjust the offset if the offset change is greater than or\n\t\t// equal to the duration.\n\t\tif o := zone - startOffset; o != 0 && abs(o) < int64(opt.Interval.Duration) {\n\t\t\tstart += o\n\t\t}\n\t}\n\tstart += int64(opt.Interval.Offset)\n\n\t// Find the end time.\n\tif dt := int64(opt.Interval.Duration) - dt; MaxTime-dt <= t {\n\t\tend = MaxTime\n\t} else {\n\t\tend = t + dt\n\t}\n\n\t// Retrieve the zone offset for the end time.\n\tif opt.Location != nil {\n\t\t_, endOffset := opt.Zone(end)\n\t\t// Adjust the end time if the offset is different from the start offset.\n\t\t// Only apply the offset if it is smaller than the duration.\n\t\t// This prevents going back in time and creating time windows\n\t\t// that don't make any sense.\n\t\tif o := zone - endOffset; o != 0 && abs(o) < int64(opt.Interval.Duration) {\n\t\t\t// If the offset is greater than 0, that means we are adding time.\n\t\t\t// Added time goes into the previous interval because the clocks\n\t\t\t// move backwards. If the offset is less than 0, then we are skipping\n\t\t\t// time. Skipped time comes after the switch so if we have a time\n\t\t\t// interval that lands on the switch, it comes from the next\n\t\t\t// interval and not the current one. For this reason, we need to know\n\t\t\t// when the actual switch happens by seeing if the time switch is within\n\t\t\t// the current interval. We calculate the zone offset with the offset\n\t\t\t// and see if the value is the same. If it is, we apply the\n\t\t\t// offset.\n\t\t\tif o > 0 {\n\t\t\t\tend += o\n\t\t\t} else if _, z := opt.Zone(end + o); z == endOffset {\n\t\t\t\tend += o\n\t\t\t}\n\t\t}\n\t}\n\tend += int64(opt.Interval.Offset)\n\treturn\n}\n\n// DerivativeInterval returns the time interval for the derivative function.\nfunc (opt IteratorOptions) DerivativeInterval() Interval {\n\t// Use the interval on the derivative() call, if specified.\n\tif expr, ok := opt.Expr.(*Call); ok && len(expr.Args) == 2 {\n\t\treturn Interval{Duration: expr.Args[1].(*DurationLiteral).Val}\n\t}\n\n\t// Otherwise use the group by interval, if specified.\n\tif opt.Interval.Duration > 0 {\n\t\treturn Interval{Duration: opt.Interval.Duration}\n\t}\n\n\treturn Interval{Duration: time.Second}\n}\n\n// ElapsedInterval returns the time interval for the elapsed function.\nfunc (opt IteratorOptions) ElapsedInterval() Interval {\n\t// Use the interval on the elapsed() call, if specified.\n\tif expr, ok := opt.Expr.(*Call); ok && len(expr.Args) == 2 {\n\t\treturn Interval{Duration: expr.Args[1].(*DurationLiteral).Val}\n\t}\n\n\treturn Interval{Duration: time.Nanosecond}\n}\n\n// IntegralInterval returns the time interval for the integral function.\nfunc (opt IteratorOptions) IntegralInterval() Interval {\n\t// Use the interval on the integral() call, if specified.\n\tif expr, ok := opt.Expr.(*Call); ok && len(expr.Args) == 2 {\n\t\treturn Interval{Duration: expr.Args[1].(*DurationLiteral).Val}\n\t}\n\n\treturn Interval{Duration: time.Second}\n}\n\n// GetDimensions retrieves the dimensions for this query.\nfunc (opt IteratorOptions) GetDimensions() []string {\n\tif len(opt.GroupBy) > 0 {\n\t\tdimensions := make([]string, 0, len(opt.GroupBy))\n\t\tfor dim := range opt.GroupBy {\n\t\t\tdimensions = append(dimensions, dim)\n\t\t}\n\t\treturn dimensions\n\t}\n\treturn opt.Dimensions\n}\n\n// Zone returns the zone information for the given time. The offset is in nanoseconds.\nfunc (opt *IteratorOptions) Zone(ns int64) (string, int64) {\n\tif opt.Location == nil {\n\t\treturn \"\", 0\n\t}\n\n\tt := time.Unix(0, ns).In(opt.Location)\n\tname, offset := t.Zone()\n\treturn name, secToNs * int64(offset)\n}\n\n// MarshalBinary encodes opt into a binary format.\nfunc (opt *IteratorOptions) MarshalBinary() ([]byte, error) {\n\treturn proto.Marshal(encodeIteratorOptions(opt))\n}\n\n// UnmarshalBinary decodes from a binary format in to opt.\nfunc (opt *IteratorOptions) UnmarshalBinary(buf []byte) error {\n\tvar pb internal.IteratorOptions\n\tif err := proto.Unmarshal(buf, &pb); err != nil {\n\t\treturn err\n\t}\n\n\tother, err := decodeIteratorOptions(&pb)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*opt = *other\n\n\treturn nil\n}\n\nfunc encodeIteratorOptions(opt *IteratorOptions) *internal.IteratorOptions {\n\tpb := &internal.IteratorOptions{\n\t\tInterval:   encodeInterval(opt.Interval),\n\t\tDimensions: opt.Dimensions,\n\t\tFill:       proto.Int32(int32(opt.Fill)),\n\t\tStartTime:  proto.Int64(opt.StartTime),\n\t\tEndTime:    proto.Int64(opt.EndTime),\n\t\tAscending:  proto.Bool(opt.Ascending),\n\t\tLimit:      proto.Int64(int64(opt.Limit)),\n\t\tOffset:     proto.Int64(int64(opt.Offset)),\n\t\tSLimit:     proto.Int64(int64(opt.SLimit)),\n\t\tSOffset:    proto.Int64(int64(opt.SOffset)),\n\t\tDedupe:     proto.Bool(opt.Dedupe),\n\t\tMaxSeriesN: proto.Int64(int64(opt.MaxSeriesN)),\n\t\tOrdered:    proto.Bool(opt.Ordered),\n\t}\n\n\t// Set expression, if set.\n\tif opt.Expr != nil {\n\t\tpb.Expr = proto.String(opt.Expr.String())\n\t}\n\n\t// Set the location, if set.\n\tif opt.Location != nil {\n\t\tpb.Location = proto.String(opt.Location.String())\n\t}\n\n\t// Convert and encode aux fields as variable references.\n\tif opt.Aux != nil {\n\t\tpb.Fields = make([]*internal.VarRef, len(opt.Aux))\n\t\tpb.Aux = make([]string, len(opt.Aux))\n\t\tfor i, ref := range opt.Aux {\n\t\t\tpb.Fields[i] = encodeVarRef(ref)\n\t\t\tpb.Aux[i] = ref.Val\n\t\t}\n\t}\n\n\t// Encode group by dimensions from a map.\n\tif opt.GroupBy != nil {\n\t\tdimensions := make([]string, 0, len(opt.GroupBy))\n\t\tfor dim := range opt.GroupBy {\n\t\t\tdimensions = append(dimensions, dim)\n\t\t}\n\t\tpb.GroupBy = dimensions\n\t}\n\n\t// Convert and encode sources to measurements.\n\tif opt.Sources != nil {\n\t\tsources := make([]*internal.Measurement, len(opt.Sources))\n\t\tfor i, source := range opt.Sources {\n\t\t\tmm := source.(*Measurement)\n\t\t\tsources[i] = encodeMeasurement(mm)\n\t\t}\n\t\tpb.Sources = sources\n\t}\n\n\t// Fill value can only be a number. Set it if available.\n\tif v, ok := opt.FillValue.(float64); ok {\n\t\tpb.FillValue = proto.Float64(v)\n\t}\n\n\t// Set condition, if set.\n\tif opt.Condition != nil {\n\t\tpb.Condition = proto.String(opt.Condition.String())\n\t}\n\n\treturn pb\n}\n\nfunc decodeIteratorOptions(pb *internal.IteratorOptions) (*IteratorOptions, error) {\n\topt := &IteratorOptions{\n\t\tInterval:   decodeInterval(pb.GetInterval()),\n\t\tDimensions: pb.GetDimensions(),\n\t\tFill:       FillOption(pb.GetFill()),\n\t\tStartTime:  pb.GetStartTime(),\n\t\tEndTime:    pb.GetEndTime(),\n\t\tAscending:  pb.GetAscending(),\n\t\tLimit:      int(pb.GetLimit()),\n\t\tOffset:     int(pb.GetOffset()),\n\t\tSLimit:     int(pb.GetSLimit()),\n\t\tSOffset:    int(pb.GetSOffset()),\n\t\tDedupe:     pb.GetDedupe(),\n\t\tMaxSeriesN: int(pb.GetMaxSeriesN()),\n\t\tOrdered:    pb.GetOrdered(),\n\t}\n\n\t// Set expression, if set.\n\tif pb.Expr != nil {\n\t\texpr, err := ParseExpr(pb.GetExpr())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\topt.Expr = expr\n\t}\n\n\tif pb.Location != nil {\n\t\tloc, err := time.LoadLocation(pb.GetLocation())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\topt.Location = loc\n\t}\n\n\t// Convert and decode variable references.\n\tif fields := pb.GetFields(); fields != nil {\n\t\topt.Aux = make([]VarRef, len(fields))\n\t\tfor i, ref := range fields {\n\t\t\topt.Aux[i] = decodeVarRef(ref)\n\t\t}\n\t} else if aux := pb.GetAux(); aux != nil {\n\t\topt.Aux = make([]VarRef, len(aux))\n\t\tfor i, name := range aux {\n\t\t\topt.Aux[i] = VarRef{Val: name}\n\t\t}\n\t}\n\n\t// Convert and decode sources to measurements.\n\tif pb.Sources != nil {\n\t\tsources := make([]Source, len(pb.GetSources()))\n\t\tfor i, source := range pb.GetSources() {\n\t\t\tmm, err := decodeMeasurement(source)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tsources[i] = mm\n\t\t}\n\t\topt.Sources = sources\n\t}\n\n\t// Convert group by dimensions to a map.\n\tif pb.GroupBy != nil {\n\t\tdimensions := make(map[string]struct{}, len(pb.GroupBy))\n\t\tfor _, dim := range pb.GetGroupBy() {\n\t\t\tdimensions[dim] = struct{}{}\n\t\t}\n\t\topt.GroupBy = dimensions\n\t}\n\n\t// Set the fill value, if set.\n\tif pb.FillValue != nil {\n\t\topt.FillValue = pb.GetFillValue()\n\t}\n\n\t// Set condition, if set.\n\tif pb.Condition != nil {\n\t\texpr, err := ParseExpr(pb.GetCondition())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\topt.Condition = expr\n\t}\n\n\treturn opt, nil\n}\n\n// selectInfo represents an object that stores info about select fields.\ntype selectInfo struct {\n\tcalls map[*Call]struct{}\n\trefs  map[*VarRef]struct{}\n}\n\n// newSelectInfo creates a object with call and var ref info from stmt.\nfunc newSelectInfo(stmt *SelectStatement) *selectInfo {\n\tinfo := &selectInfo{\n\t\tcalls: make(map[*Call]struct{}),\n\t\trefs:  make(map[*VarRef]struct{}),\n\t}\n\tWalk(info, stmt.Fields)\n\treturn info\n}\n\nfunc (v *selectInfo) Visit(n Node) Visitor {\n\tswitch n := n.(type) {\n\tcase *Call:\n\t\tv.calls[n] = struct{}{}\n\t\treturn nil\n\tcase *VarRef:\n\t\tv.refs[n] = struct{}{}\n\t\treturn nil\n\t}\n\treturn v\n}\n\n// FindSelector returns a selector from the selectInfo. This will only\n// return a selector if the Call is a selector and it's the only function\n// in the selectInfo.\nfunc (v *selectInfo) FindSelector() *Call {\n\tif len(v.calls) != 1 {\n\t\treturn nil\n\t}\n\n\tfor s := range v.calls {\n\t\tif IsSelector(s) {\n\t\t\treturn s\n\t\t}\n\t}\n\treturn nil\n}\n\n// Interval represents a repeating interval for a query.\ntype Interval struct {\n\tDuration time.Duration\n\tOffset   time.Duration\n}\n\n// IsZero returns true if the interval has no duration.\nfunc (i Interval) IsZero() bool { return i.Duration == 0 }\n\nfunc encodeInterval(i Interval) *internal.Interval {\n\treturn &internal.Interval{\n\t\tDuration: proto.Int64(i.Duration.Nanoseconds()),\n\t\tOffset:   proto.Int64(i.Offset.Nanoseconds()),\n\t}\n}\n\nfunc decodeInterval(pb *internal.Interval) Interval {\n\treturn Interval{\n\t\tDuration: time.Duration(pb.GetDuration()),\n\t\tOffset:   time.Duration(pb.GetOffset()),\n\t}\n}\n\nfunc encodeVarRef(ref VarRef) *internal.VarRef {\n\treturn &internal.VarRef{\n\t\tVal:  proto.String(ref.Val),\n\t\tType: proto.Int32(int32(ref.Type)),\n\t}\n}\n\nfunc decodeVarRef(pb *internal.VarRef) VarRef {\n\treturn VarRef{\n\t\tVal:  pb.GetVal(),\n\t\tType: DataType(pb.GetType()),\n\t}\n}\n\ntype nilFloatIterator struct{}\n\nfunc (*nilFloatIterator) Stats() IteratorStats       { return IteratorStats{} }\nfunc (*nilFloatIterator) Close() error               { return nil }\nfunc (*nilFloatIterator) Next() (*FloatPoint, error) { return nil, nil }\n\ntype nilFloatReaderIterator struct {\n\tr io.Reader\n}\n\nfunc (*nilFloatReaderIterator) Stats() IteratorStats { return IteratorStats{} }\nfunc (itr *nilFloatReaderIterator) Close() error {\n\tif r, ok := itr.r.(io.ReadCloser); ok {\n\t\titr.r = nil\n\t\treturn r.Close()\n\t}\n\treturn nil\n}\nfunc (*nilFloatReaderIterator) Next() (*FloatPoint, error) { return nil, nil }\n\n// integerFloatTransformIterator executes a function to modify an existing point for every\n// output of the input iterator.\ntype integerFloatTransformIterator struct {\n\tinput IntegerIterator\n\tfn    integerFloatTransformFunc\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *integerFloatTransformIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *integerFloatTransformIterator) Close() error { return itr.input.Close() }\n\n// Next returns the minimum value for the next available interval.\nfunc (itr *integerFloatTransformIterator) Next() (*FloatPoint, error) {\n\tp, err := itr.input.Next()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if p != nil {\n\t\treturn itr.fn(p), nil\n\t}\n\treturn nil, nil\n}\n\n// integerFloatTransformFunc creates or modifies a point.\n// The point passed in may be modified and returned rather than allocating a\n// new point if possible.\ntype integerFloatTransformFunc func(p *IntegerPoint) *FloatPoint\n\ntype integerFloatCastIterator struct {\n\tinput IntegerIterator\n}\n\nfunc (itr *integerFloatCastIterator) Stats() IteratorStats { return itr.input.Stats() }\nfunc (itr *integerFloatCastIterator) Close() error         { return itr.input.Close() }\nfunc (itr *integerFloatCastIterator) Next() (*FloatPoint, error) {\n\tp, err := itr.input.Next()\n\tif p == nil || err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &FloatPoint{\n\t\tName:  p.Name,\n\t\tTags:  p.Tags,\n\t\tTime:  p.Time,\n\t\tNil:   p.Nil,\n\t\tValue: float64(p.Value),\n\t\tAux:   p.Aux,\n\t}, nil\n}\n\n// IteratorStats represents statistics about an iterator.\n// Some statistics are available immediately upon iterator creation while\n// some are derived as the iterator processes data.\ntype IteratorStats struct {\n\tSeriesN int // series represented\n\tPointN  int // points returned\n}\n\n// Add aggregates fields from s and other together. Overwrites s.\nfunc (s *IteratorStats) Add(other IteratorStats) {\n\ts.SeriesN += other.SeriesN\n\ts.PointN += other.PointN\n}\n\nfunc encodeIteratorStats(stats *IteratorStats) *internal.IteratorStats {\n\treturn &internal.IteratorStats{\n\t\tSeriesN: proto.Int64(int64(stats.SeriesN)),\n\t\tPointN:  proto.Int64(int64(stats.PointN)),\n\t}\n}\n\nfunc decodeIteratorStats(pb *internal.IteratorStats) IteratorStats {\n\treturn IteratorStats{\n\t\tSeriesN: int(pb.GetSeriesN()),\n\t\tPointN:  int(pb.GetPointN()),\n\t}\n}\n\n// floatFastDedupeIterator outputs unique points where the point has a single aux field.\ntype floatFastDedupeIterator struct {\n\tinput FloatIterator\n\tm     map[fastDedupeKey]struct{} // lookup of points already sent\n}\n\n// newFloatFastDedupeIterator returns a new instance of floatFastDedupeIterator.\nfunc newFloatFastDedupeIterator(input FloatIterator) *floatFastDedupeIterator {\n\treturn &floatFastDedupeIterator{\n\t\tinput: input,\n\t\tm:     make(map[fastDedupeKey]struct{}),\n\t}\n}\n\n// Stats returns stats from the input iterator.\nfunc (itr *floatFastDedupeIterator) Stats() IteratorStats { return itr.input.Stats() }\n\n// Close closes the iterator and all child iterators.\nfunc (itr *floatFastDedupeIterator) Close() error { return itr.input.Close() }\n\n// Next returns the next unique point from the input iterator.\nfunc (itr *floatFastDedupeIterator) Next() (*FloatPoint, error) {\n\tfor {\n\t\t// Read next point.\n\t\t// Skip if there are not any aux fields.\n\t\tp, err := itr.input.Next()\n\t\tif p == nil || err != nil {\n\t\t\treturn nil, err\n\t\t} else if len(p.Aux) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If the point has already been output then move to the next point.\n\t\tkey := fastDedupeKey{name: p.Name}\n\t\tkey.values[0] = p.Aux[0]\n\t\tif len(p.Aux) > 1 {\n\t\t\tkey.values[1] = p.Aux[1]\n\t\t}\n\t\tif _, ok := itr.m[key]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Otherwise mark it as emitted and return point.\n\t\titr.m[key] = struct{}{}\n\t\treturn p, nil\n\t}\n}\n\ntype fastDedupeKey struct {\n\tname   string\n\tvalues [2]interface{}\n}\n\ntype reverseStringSlice []string\n\nfunc (p reverseStringSlice) Len() int           { return len(p) }\nfunc (p reverseStringSlice) Less(i, j int) bool { return p[i] > p[j] }\nfunc (p reverseStringSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }\n\nfunc abs(v int64) int64 {\n\tif v < 0 {\n\t\treturn -v\n\t}\n\treturn v\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/iterator_mapper.go",
    "content": "package influxql\n\nimport \"fmt\"\n\ntype IteratorMap interface {\n\tValue(tags Tags, buf []interface{}) interface{}\n}\n\ntype FieldMap int\n\nfunc (i FieldMap) Value(tags Tags, buf []interface{}) interface{} { return buf[i] }\n\ntype TagMap string\n\nfunc (s TagMap) Value(tags Tags, buf []interface{}) interface{} { return tags.Value(string(s)) }\n\ntype NullMap struct{}\n\nfunc (NullMap) Value(tags Tags, buf []interface{}) interface{} { return nil }\n\nfunc NewIteratorMapper(itrs []Iterator, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) Iterator {\n\tif driver != nil {\n\t\tswitch driver := driver.(type) {\n\t\tcase FieldMap:\n\t\t\tswitch itrs[int(driver)].(type) {\n\t\t\tcase FloatIterator:\n\t\t\t\treturn newFloatIteratorMapper(itrs, driver, fields, opt)\n\t\t\tcase IntegerIterator:\n\t\t\t\treturn newIntegerIteratorMapper(itrs, driver, fields, opt)\n\t\t\tcase StringIterator:\n\t\t\t\treturn newStringIteratorMapper(itrs, driver, fields, opt)\n\t\t\tcase BooleanIterator:\n\t\t\t\treturn newBooleanIteratorMapper(itrs, driver, fields, opt)\n\t\t\tdefault:\n\t\t\t\tpanic(fmt.Sprintf(\"unable to map iterator type: %T\", itrs[int(driver)]))\n\t\t\t}\n\t\tcase TagMap:\n\t\t\treturn newStringIteratorMapper(itrs, driver, fields, opt)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"unable to create iterator mapper with driveression type: %T\", driver))\n\t\t}\n\t}\n\treturn newFloatIteratorMapper(itrs, nil, fields, opt)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/iterator_mapper_test.go",
    "content": "package influxql_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/pkg/deep\"\n)\n\nfunc TestIteratorMapper(t *testing.T) {\n\tval1itr := &FloatIterator{Points: []influxql.FloatPoint{\n\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: 1},\n\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 5, Value: 3},\n\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 2, Value: 2},\n\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 8, Value: 8},\n\t}}\n\n\tval2itr := &StringIterator{Points: []influxql.StringPoint{\n\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: \"a\"},\n\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 5, Value: \"c\"},\n\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 2, Value: \"b\"},\n\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 8, Value: \"h\"},\n\t}}\n\tinputs := []influxql.Iterator{val1itr, val2itr}\n\n\topt := influxql.IteratorOptions{\n\t\tAscending: true,\n\t\tAux: []influxql.VarRef{\n\t\t\t{Val: \"val1\", Type: influxql.Float},\n\t\t\t{Val: \"val2\", Type: influxql.String},\n\t\t},\n\t}\n\titr := influxql.NewIteratorMapper(inputs, nil, []influxql.IteratorMap{\n\t\tinfluxql.FieldMap(0),\n\t\tinfluxql.FieldMap(1),\n\t\tinfluxql.TagMap(\"host\"),\n\t}, opt)\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Aux: []interface{}{float64(1), \"a\", \"A\"}}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 5, Aux: []interface{}{float64(3), \"c\", \"A\"}}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 2, Aux: []interface{}{float64(2), \"b\", \"B\"}}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 8, Aux: []interface{}{float64(8), \"h\", \"B\"}}},\n\t}) {\n\t\tt.Errorf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n\n\tfor i, input := range inputs {\n\t\tswitch input := input.(type) {\n\t\tcase *FloatIterator:\n\t\t\tif !input.Closed {\n\t\t\t\tt.Errorf(\"iterator %d not closed\", i)\n\t\t\t}\n\t\tcase *StringIterator:\n\t\t\tif !input.Closed {\n\t\t\t\tt.Errorf(\"iterator %d not closed\", i)\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/iterator_test.go",
    "content": "package influxql_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/pkg/deep\"\n)\n\n// Ensure that a set of iterators can be merged together, sorted by window and name/tag.\nfunc TestMergeIterator_Float(t *testing.T) {\n\tinputs := []*FloatIterator{\n\t\t{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: 1},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: 3},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: 4},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: 2},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 11, Value: 8},\n\t\t}},\n\t\t{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: 7},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: 5},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: 6},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: 9},\n\t\t}},\n\t\t{Points: []influxql.FloatPoint{}},\n\t\t{Points: []influxql.FloatPoint{}},\n\t}\n\n\titr := influxql.NewMergeIterator(FloatIterators(inputs), influxql.IteratorOptions{\n\t\tInterval: influxql.Interval{\n\t\t\tDuration: 10 * time.Nanosecond,\n\t\t},\n\t\tDimensions: []string{\"host\"},\n\t\tAscending:  true,\n\t})\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: 3}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: 7}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: 4}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: 5}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: 6}},\n\t\t{&influxql.FloatPoint{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: 9}},\n\t\t{&influxql.FloatPoint{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 11, Value: 8}},\n\t}) {\n\t\tt.Errorf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n\n\tfor i, input := range inputs {\n\t\tif !input.Closed {\n\t\t\tt.Errorf(\"iterator %d not closed\", i)\n\t\t}\n\t}\n}\n\n// Ensure that a set of iterators can be merged together, sorted by window and name/tag.\nfunc TestMergeIterator_Integer(t *testing.T) {\n\tinputs := []*IntegerIterator{\n\t\t{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: 1},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: 3},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: 4},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: 2},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 11, Value: 8},\n\t\t}},\n\t\t{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: 7},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: 5},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: 6},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: 9},\n\t\t}},\n\t\t{Points: []influxql.IntegerPoint{}},\n\t}\n\titr := influxql.NewMergeIterator(IntegerIterators(inputs), influxql.IteratorOptions{\n\t\tInterval: influxql.Interval{\n\t\t\tDuration: 10 * time.Nanosecond,\n\t\t},\n\t\tDimensions: []string{\"host\"},\n\t\tAscending:  true,\n\t})\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: 3}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: 7}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: 4}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: 2}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: 5}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: 6}},\n\t\t{&influxql.IntegerPoint{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: 9}},\n\t\t{&influxql.IntegerPoint{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 11, Value: 8}},\n\t}) {\n\t\tt.Errorf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n\n\tfor i, input := range inputs {\n\t\tif !input.Closed {\n\t\t\tt.Errorf(\"iterator %d not closed\", i)\n\t\t}\n\t}\n}\n\n// Ensure that a set of iterators can be merged together, sorted by window and name/tag.\nfunc TestMergeIterator_String(t *testing.T) {\n\tinputs := []*StringIterator{\n\t\t{Points: []influxql.StringPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: \"a\"},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: \"c\"},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: \"d\"},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: \"b\"},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 11, Value: \"h\"},\n\t\t}},\n\t\t{Points: []influxql.StringPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: \"g\"},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: \"e\"},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: \"f\"},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: \"i\"},\n\t\t}},\n\t\t{Points: []influxql.StringPoint{}},\n\t}\n\titr := influxql.NewMergeIterator(StringIterators(inputs), influxql.IteratorOptions{\n\t\tInterval: influxql.Interval{\n\t\t\tDuration: 10 * time.Nanosecond,\n\t\t},\n\t\tDimensions: []string{\"host\"},\n\t\tAscending:  true,\n\t})\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: \"a\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: \"c\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: \"g\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: \"d\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: \"b\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: \"e\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: \"f\"}},\n\t\t{&influxql.StringPoint{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: \"i\"}},\n\t\t{&influxql.StringPoint{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 11, Value: \"h\"}},\n\t}) {\n\t\tt.Errorf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n\n\tfor i, input := range inputs {\n\t\tif !input.Closed {\n\t\t\tt.Errorf(\"iterator %d not closed\", i)\n\t\t}\n\t}\n}\n\n// Ensure that a set of iterators can be merged together, sorted by window and name/tag.\nfunc TestMergeIterator_Boolean(t *testing.T) {\n\tinputs := []*BooleanIterator{\n\t\t{Points: []influxql.BooleanPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: true},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: true},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: false},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: false},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 11, Value: true},\n\t\t}},\n\t\t{Points: []influxql.BooleanPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: true},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: true},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: false},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: false},\n\t\t}},\n\t\t{Points: []influxql.BooleanPoint{}},\n\t}\n\titr := influxql.NewMergeIterator(BooleanIterators(inputs), influxql.IteratorOptions{\n\t\tInterval: influxql.Interval{\n\t\t\tDuration: 10 * time.Nanosecond,\n\t\t},\n\t\tDimensions: []string{\"host\"},\n\t\tAscending:  true,\n\t})\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: true}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: true}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: true}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: false}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: false}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: true}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: false}},\n\t\t{&influxql.BooleanPoint{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: false}},\n\t\t{&influxql.BooleanPoint{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 11, Value: true}},\n\t}) {\n\t\tt.Errorf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n\n\tfor i, input := range inputs {\n\t\tif !input.Closed {\n\t\t\tt.Errorf(\"iterator %d not closed\", i)\n\t\t}\n\t}\n}\n\nfunc TestMergeIterator_Nil(t *testing.T) {\n\titr := influxql.NewMergeIterator([]influxql.Iterator{nil}, influxql.IteratorOptions{})\n\tif itr != nil {\n\t\tt.Fatalf(\"unexpected iterator: %#v\", itr)\n\t}\n}\n\nfunc TestMergeIterator_Cast_Float(t *testing.T) {\n\tinputs := []influxql.Iterator{\n\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: 1},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: 3},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: 4},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: 2},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 11, Value: 8},\n\t\t}},\n\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: 7},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: 5},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: 6},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: 9},\n\t\t}},\n\t}\n\n\titr := influxql.NewMergeIterator(inputs, influxql.IteratorOptions{\n\t\tInterval: influxql.Interval{\n\t\t\tDuration: 10 * time.Nanosecond,\n\t\t},\n\t\tDimensions: []string{\"host\"},\n\t\tAscending:  true,\n\t})\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: 3}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: 7}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: 4}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: 5}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: 6}},\n\t\t{&influxql.FloatPoint{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: 9}},\n\t\t{&influxql.FloatPoint{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 11, Value: 8}},\n\t}) {\n\t\tt.Errorf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n\n\tfor i, input := range inputs {\n\t\tswitch input := input.(type) {\n\t\tcase *FloatIterator:\n\t\t\tif !input.Closed {\n\t\t\t\tt.Errorf(\"iterator %d not closed\", i)\n\t\t\t}\n\t\tcase *IntegerIterator:\n\t\t\tif !input.Closed {\n\t\t\t\tt.Errorf(\"iterator %d not closed\", i)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Ensure that a set of iterators can be merged together, sorted by name/tag.\nfunc TestSortedMergeIterator_Float(t *testing.T) {\n\tinputs := []*FloatIterator{\n\t\t{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: 1},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: 3},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: 4},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: 2},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 4, Value: 8},\n\t\t}},\n\t\t{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: 7},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: 5},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: 6},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: 9},\n\t\t}},\n\t\t{Points: []influxql.FloatPoint{}},\n\t}\n\titr := influxql.NewSortedMergeIterator(FloatIterators(inputs), influxql.IteratorOptions{\n\t\tInterval: influxql.Interval{\n\t\t\tDuration: 10 * time.Nanosecond,\n\t\t},\n\t\tDimensions: []string{\"host\"},\n\t\tAscending:  true,\n\t})\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: 3}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: 7}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: 4}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: 5}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: 6}},\n\t\t{&influxql.FloatPoint{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: 9}},\n\t\t{&influxql.FloatPoint{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 4, Value: 8}},\n\t}) {\n\t\tt.Errorf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n\n\tfor i, input := range inputs {\n\t\tif !input.Closed {\n\t\t\tt.Errorf(\"iterator %d not closed\", i)\n\t\t}\n\t}\n}\n\n// Ensure that a set of iterators can be merged together, sorted by name/tag.\nfunc TestSortedMergeIterator_Integer(t *testing.T) {\n\tinputs := []*IntegerIterator{\n\t\t{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: 1},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: 3},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: 4},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: 2},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 4, Value: 8},\n\t\t}},\n\t\t{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: 7},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: 5},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: 6},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: 9},\n\t\t}},\n\t\t{Points: []influxql.IntegerPoint{}},\n\t}\n\titr := influxql.NewSortedMergeIterator(IntegerIterators(inputs), influxql.IteratorOptions{\n\t\tInterval: influxql.Interval{\n\t\t\tDuration: 10 * time.Nanosecond,\n\t\t},\n\t\tDimensions: []string{\"host\"},\n\t\tAscending:  true,\n\t})\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: 3}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: 7}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: 4}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: 2}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: 5}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: 6}},\n\t\t{&influxql.IntegerPoint{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: 9}},\n\t\t{&influxql.IntegerPoint{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 4, Value: 8}},\n\t}) {\n\t\tt.Errorf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n\n\tfor i, input := range inputs {\n\t\tif !input.Closed {\n\t\t\tt.Errorf(\"iterator %d not closed\", i)\n\t\t}\n\t}\n}\n\n// Ensure that a set of iterators can be merged together, sorted by name/tag.\nfunc TestSortedMergeIterator_String(t *testing.T) {\n\tinputs := []*StringIterator{\n\t\t{Points: []influxql.StringPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: \"a\"},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: \"c\"},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: \"d\"},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: \"b\"},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 4, Value: \"h\"},\n\t\t}},\n\t\t{Points: []influxql.StringPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: \"g\"},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: \"e\"},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: \"f\"},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: \"i\"},\n\t\t}},\n\t\t{Points: []influxql.StringPoint{}},\n\t}\n\titr := influxql.NewSortedMergeIterator(StringIterators(inputs), influxql.IteratorOptions{\n\t\tInterval: influxql.Interval{\n\t\t\tDuration: 10 * time.Nanosecond,\n\t\t},\n\t\tDimensions: []string{\"host\"},\n\t\tAscending:  true,\n\t})\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: \"a\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: \"c\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: \"g\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: \"d\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: \"b\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: \"e\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: \"f\"}},\n\t\t{&influxql.StringPoint{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: \"i\"}},\n\t\t{&influxql.StringPoint{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 4, Value: \"h\"}},\n\t}) {\n\t\tt.Errorf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n\n\tfor i, input := range inputs {\n\t\tif !input.Closed {\n\t\t\tt.Errorf(\"iterator %d not closed\", i)\n\t\t}\n\t}\n}\n\n// Ensure that a set of iterators can be merged together, sorted by name/tag.\nfunc TestSortedMergeIterator_Boolean(t *testing.T) {\n\tinputs := []*BooleanIterator{\n\t\t{Points: []influxql.BooleanPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: true},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: true},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: false},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: false},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 4, Value: true},\n\t\t}},\n\t\t{Points: []influxql.BooleanPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: true},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: true},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: false},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: true},\n\t\t}},\n\t\t{Points: []influxql.BooleanPoint{}},\n\t}\n\titr := influxql.NewSortedMergeIterator(BooleanIterators(inputs), influxql.IteratorOptions{\n\t\tInterval: influxql.Interval{\n\t\t\tDuration: 10 * time.Nanosecond,\n\t\t},\n\t\tDimensions: []string{\"host\"},\n\t\tAscending:  true,\n\t})\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: true}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: true}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: true}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: false}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: false}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: true}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: false}},\n\t\t{&influxql.BooleanPoint{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: true}},\n\t\t{&influxql.BooleanPoint{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 4, Value: true}},\n\t}) {\n\t\tt.Errorf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n\n\tfor i, input := range inputs {\n\t\tif !input.Closed {\n\t\t\tt.Errorf(\"iterator %d not closed\", i)\n\t\t}\n\t}\n}\n\nfunc TestSortedMergeIterator_Nil(t *testing.T) {\n\titr := influxql.NewSortedMergeIterator([]influxql.Iterator{nil}, influxql.IteratorOptions{})\n\tif itr != nil {\n\t\tt.Fatalf(\"unexpected iterator: %#v\", itr)\n\t}\n}\n\nfunc TestSortedMergeIterator_Cast_Float(t *testing.T) {\n\tinputs := []influxql.Iterator{\n\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: 1},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: 3},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: 4},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: 2},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 4, Value: 8},\n\t\t}},\n\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: 7},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: 5},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: 6},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: 9},\n\t\t}},\n\t}\n\n\titr := influxql.NewSortedMergeIterator(inputs, influxql.IteratorOptions{\n\t\tInterval: influxql.Interval{\n\t\t\tDuration: 10 * time.Nanosecond,\n\t\t},\n\t\tDimensions: []string{\"host\"},\n\t\tAscending:  true,\n\t})\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12, Value: 3}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20, Value: 7}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30, Value: 4}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 1, Value: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 11, Value: 5}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 13, Value: 6}},\n\t\t{&influxql.FloatPoint{Name: \"mem\", Tags: ParseTags(\"host=A\"), Time: 25, Value: 9}},\n\t\t{&influxql.FloatPoint{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 4, Value: 8}},\n\t}) {\n\t\tt.Errorf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n\n\tfor i, input := range inputs {\n\t\tswitch input := input.(type) {\n\t\tcase *FloatIterator:\n\t\t\tif !input.Closed {\n\t\t\t\tt.Errorf(\"iterator %d not closed\", i)\n\t\t\t}\n\t\tcase *IntegerIterator:\n\t\t\tif !input.Closed {\n\t\t\t\tt.Errorf(\"iterator %d not closed\", i)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Ensure limit iterators work with limit and offset.\nfunc TestLimitIterator_Float(t *testing.T) {\n\tinput := &FloatIterator{Points: []influxql.FloatPoint{\n\t\t{Name: \"cpu\", Time: 0, Value: 1},\n\t\t{Name: \"cpu\", Time: 5, Value: 3},\n\t\t{Name: \"cpu\", Time: 10, Value: 5},\n\t\t{Name: \"mem\", Time: 5, Value: 3},\n\t\t{Name: \"mem\", Time: 7, Value: 8},\n\t}}\n\n\titr := influxql.NewLimitIterator(input, influxql.IteratorOptions{\n\t\tLimit:  1,\n\t\tOffset: 1,\n\t})\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5, Value: 3}},\n\t\t{&influxql.FloatPoint{Name: \"mem\", Time: 7, Value: 8}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n\n\tif !input.Closed {\n\t\tt.Error(\"iterator not closed\")\n\t}\n}\n\n// Ensure limit iterators work with limit and offset.\nfunc TestLimitIterator_Integer(t *testing.T) {\n\tinput := &IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t{Name: \"cpu\", Time: 0, Value: 1},\n\t\t{Name: \"cpu\", Time: 5, Value: 3},\n\t\t{Name: \"cpu\", Time: 10, Value: 5},\n\t\t{Name: \"mem\", Time: 5, Value: 3},\n\t\t{Name: \"mem\", Time: 7, Value: 8},\n\t}}\n\n\titr := influxql.NewLimitIterator(input, influxql.IteratorOptions{\n\t\tLimit:  1,\n\t\tOffset: 1,\n\t})\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 5, Value: 3}},\n\t\t{&influxql.IntegerPoint{Name: \"mem\", Time: 7, Value: 8}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n\n\tif !input.Closed {\n\t\tt.Error(\"iterator not closed\")\n\t}\n}\n\n// Ensure limit iterators work with limit and offset.\nfunc TestLimitIterator_String(t *testing.T) {\n\tinput := &StringIterator{Points: []influxql.StringPoint{\n\t\t{Name: \"cpu\", Time: 0, Value: \"a\"},\n\t\t{Name: \"cpu\", Time: 5, Value: \"b\"},\n\t\t{Name: \"cpu\", Time: 10, Value: \"c\"},\n\t\t{Name: \"mem\", Time: 5, Value: \"d\"},\n\t\t{Name: \"mem\", Time: 7, Value: \"e\"},\n\t}}\n\n\titr := influxql.NewLimitIterator(input, influxql.IteratorOptions{\n\t\tLimit:  1,\n\t\tOffset: 1,\n\t})\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.StringPoint{Name: \"cpu\", Time: 5, Value: \"b\"}},\n\t\t{&influxql.StringPoint{Name: \"mem\", Time: 7, Value: \"e\"}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n\n\tif !input.Closed {\n\t\tt.Error(\"iterator not closed\")\n\t}\n}\n\n// Ensure limit iterators work with limit and offset.\nfunc TestLimitIterator_Boolean(t *testing.T) {\n\tinput := &BooleanIterator{Points: []influxql.BooleanPoint{\n\t\t{Name: \"cpu\", Time: 0, Value: true},\n\t\t{Name: \"cpu\", Time: 5, Value: false},\n\t\t{Name: \"cpu\", Time: 10, Value: true},\n\t\t{Name: \"mem\", Time: 5, Value: false},\n\t\t{Name: \"mem\", Time: 7, Value: true},\n\t}}\n\n\titr := influxql.NewLimitIterator(input, influxql.IteratorOptions{\n\t\tLimit:  1,\n\t\tOffset: 1,\n\t})\n\n\tif a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Time: 5, Value: false}},\n\t\t{&influxql.BooleanPoint{Name: \"mem\", Time: 7, Value: true}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n\n\tif !input.Closed {\n\t\tt.Error(\"iterator not closed\")\n\t}\n}\n\n// Ensure auxilary iterators can be created for auxilary fields.\nfunc TestFloatAuxIterator(t *testing.T) {\n\titr := influxql.NewAuxIterator(\n\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Time: 0, Value: 1, Aux: []interface{}{float64(100), float64(200)}},\n\t\t\t{Time: 1, Value: 2, Aux: []interface{}{float64(500), math.NaN()}},\n\t\t}},\n\t\tinfluxql.IteratorOptions{Aux: []influxql.VarRef{{Val: \"f0\", Type: influxql.Float}, {Val: \"f1\", Type: influxql.Float}}},\n\t)\n\n\titrs := []influxql.Iterator{\n\t\titr,\n\t\titr.Iterator(\"f0\", influxql.Unknown),\n\t\titr.Iterator(\"f1\", influxql.Unknown),\n\t\titr.Iterator(\"f0\", influxql.Unknown),\n\t}\n\titr.Start()\n\n\tif a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{\n\t\t\t&influxql.FloatPoint{Time: 0, Value: 1, Aux: []interface{}{float64(100), float64(200)}},\n\t\t\t&influxql.FloatPoint{Time: 0, Value: float64(100)},\n\t\t\t&influxql.FloatPoint{Time: 0, Value: float64(200)},\n\t\t\t&influxql.FloatPoint{Time: 0, Value: float64(100)},\n\t\t},\n\t\t{\n\t\t\t&influxql.FloatPoint{Time: 1, Value: 2, Aux: []interface{}{float64(500), math.NaN()}},\n\t\t\t&influxql.FloatPoint{Time: 1, Value: float64(500)},\n\t\t\t&influxql.FloatPoint{Time: 1, Value: math.NaN()},\n\t\t\t&influxql.FloatPoint{Time: 1, Value: float64(500)},\n\t\t},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure limit iterator returns a subset of points.\nfunc TestLimitIterator(t *testing.T) {\n\titr := influxql.NewLimitIterator(\n\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Time: 0, Value: 0},\n\t\t\t{Time: 1, Value: 1},\n\t\t\t{Time: 2, Value: 2},\n\t\t\t{Time: 3, Value: 3},\n\t\t}},\n\t\tinfluxql.IteratorOptions{\n\t\t\tLimit:     2,\n\t\t\tOffset:    1,\n\t\t\tStartTime: influxql.MinTime,\n\t\t\tEndTime:   influxql.MaxTime,\n\t\t},\n\t)\n\n\tif a, err := (Iterators{itr}).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Time: 1, Value: 1}},\n\t\t{&influxql.FloatPoint{Time: 2, Value: 2}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestFillIterator_DST(t *testing.T) {\n\tfor _, tt := range []struct {\n\t\tname       string\n\t\tstart, end time.Time\n\t\tpoints     []time.Duration\n\t\topt        influxql.IteratorOptions\n\t}{\n\t\t{\n\t\t\tname:  \"Start_GroupByDay_Ascending\",\n\t\t\tstart: mustParseTime(\"2000-04-01T00:00:00-08:00\"),\n\t\t\tend:   mustParseTime(\"2000-04-05T00:00:00-07:00\"),\n\t\t\tpoints: []time.Duration{\n\t\t\t\t24 * time.Hour,\n\t\t\t\t47 * time.Hour,\n\t\t\t\t71 * time.Hour,\n\t\t\t},\n\t\t\topt: influxql.IteratorOptions{\n\t\t\t\tInterval: influxql.Interval{\n\t\t\t\t\tDuration: 24 * time.Hour,\n\t\t\t\t},\n\t\t\t\tLocation:  LosAngeles,\n\t\t\t\tAscending: true,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:  \"Start_GroupByDay_Descending\",\n\t\t\tstart: mustParseTime(\"2000-04-01T00:00:00-08:00\"),\n\t\t\tend:   mustParseTime(\"2000-04-05T00:00:00-07:00\"),\n\t\t\tpoints: []time.Duration{\n\t\t\t\t71 * time.Hour,\n\t\t\t\t47 * time.Hour,\n\t\t\t\t24 * time.Hour,\n\t\t\t},\n\t\t\topt: influxql.IteratorOptions{\n\t\t\t\tInterval: influxql.Interval{\n\t\t\t\t\tDuration: 24 * time.Hour,\n\t\t\t\t},\n\t\t\t\tLocation:  LosAngeles,\n\t\t\t\tAscending: false,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:  \"Start_GroupByHour_Ascending\",\n\t\t\tstart: mustParseTime(\"2000-04-02T00:00:00-08:00\"),\n\t\t\tend:   mustParseTime(\"2000-04-02T05:00:00-07:00\"),\n\t\t\tpoints: []time.Duration{\n\t\t\t\t1 * time.Hour,\n\t\t\t\t2 * time.Hour,\n\t\t\t\t3 * time.Hour,\n\t\t\t},\n\t\t\topt: influxql.IteratorOptions{\n\t\t\t\tInterval: influxql.Interval{\n\t\t\t\t\tDuration: 1 * time.Hour,\n\t\t\t\t},\n\t\t\t\tLocation:  LosAngeles,\n\t\t\t\tAscending: true,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:  \"Start_GroupByHour_Descending\",\n\t\t\tstart: mustParseTime(\"2000-04-02T00:00:00-08:00\"),\n\t\t\tend:   mustParseTime(\"2000-04-02T05:00:00-07:00\"),\n\t\t\tpoints: []time.Duration{\n\t\t\t\t3 * time.Hour,\n\t\t\t\t2 * time.Hour,\n\t\t\t\t1 * time.Hour,\n\t\t\t},\n\t\t\topt: influxql.IteratorOptions{\n\t\t\t\tInterval: influxql.Interval{\n\t\t\t\t\tDuration: 1 * time.Hour,\n\t\t\t\t},\n\t\t\t\tLocation:  LosAngeles,\n\t\t\t\tAscending: false,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:  \"Start_GroupBy2Hour_Ascending\",\n\t\t\tstart: mustParseTime(\"2000-04-02T00:00:00-08:00\"),\n\t\t\tend:   mustParseTime(\"2000-04-02T07:00:00-07:00\"),\n\t\t\tpoints: []time.Duration{\n\t\t\t\t2 * time.Hour,\n\t\t\t\t3 * time.Hour,\n\t\t\t\t5 * time.Hour,\n\t\t\t},\n\t\t\topt: influxql.IteratorOptions{\n\t\t\t\tInterval: influxql.Interval{\n\t\t\t\t\tDuration: 2 * time.Hour,\n\t\t\t\t},\n\t\t\t\tLocation:  LosAngeles,\n\t\t\t\tAscending: true,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:  \"Start_GroupBy2Hour_Descending\",\n\t\t\tstart: mustParseTime(\"2000-04-02T00:00:00-08:00\"),\n\t\t\tend:   mustParseTime(\"2000-04-02T07:00:00-07:00\"),\n\t\t\tpoints: []time.Duration{\n\t\t\t\t5 * time.Hour,\n\t\t\t\t3 * time.Hour,\n\t\t\t\t2 * time.Hour,\n\t\t\t},\n\t\t\topt: influxql.IteratorOptions{\n\t\t\t\tInterval: influxql.Interval{\n\t\t\t\t\tDuration: 2 * time.Hour,\n\t\t\t\t},\n\t\t\t\tLocation:  LosAngeles,\n\t\t\t\tAscending: false,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:  \"End_GroupByDay_Ascending\",\n\t\t\tstart: mustParseTime(\"2000-10-28T00:00:00-07:00\"),\n\t\t\tend:   mustParseTime(\"2000-11-01T00:00:00-08:00\"),\n\t\t\tpoints: []time.Duration{\n\t\t\t\t24 * time.Hour,\n\t\t\t\t49 * time.Hour,\n\t\t\t\t73 * time.Hour,\n\t\t\t},\n\t\t\topt: influxql.IteratorOptions{\n\t\t\t\tInterval: influxql.Interval{\n\t\t\t\t\tDuration: 24 * time.Hour,\n\t\t\t\t},\n\t\t\t\tLocation:  LosAngeles,\n\t\t\t\tAscending: true,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:  \"End_GroupByDay_Descending\",\n\t\t\tstart: mustParseTime(\"2000-10-28T00:00:00-07:00\"),\n\t\t\tend:   mustParseTime(\"2000-11-01T00:00:00-08:00\"),\n\t\t\tpoints: []time.Duration{\n\t\t\t\t73 * time.Hour,\n\t\t\t\t49 * time.Hour,\n\t\t\t\t24 * time.Hour,\n\t\t\t},\n\t\t\topt: influxql.IteratorOptions{\n\t\t\t\tInterval: influxql.Interval{\n\t\t\t\t\tDuration: 24 * time.Hour,\n\t\t\t\t},\n\t\t\t\tLocation:  LosAngeles,\n\t\t\t\tAscending: false,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:  \"End_GroupByHour_Ascending\",\n\t\t\tstart: mustParseTime(\"2000-10-29T00:00:00-07:00\"),\n\t\t\tend:   mustParseTime(\"2000-10-29T03:00:00-08:00\"),\n\t\t\tpoints: []time.Duration{\n\t\t\t\t1 * time.Hour,\n\t\t\t\t2 * time.Hour,\n\t\t\t\t3 * time.Hour,\n\t\t\t},\n\t\t\topt: influxql.IteratorOptions{\n\t\t\t\tInterval: influxql.Interval{\n\t\t\t\t\tDuration: 1 * time.Hour,\n\t\t\t\t},\n\t\t\t\tLocation:  LosAngeles,\n\t\t\t\tAscending: true,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:  \"End_GroupByHour_Descending\",\n\t\t\tstart: mustParseTime(\"2000-10-29T00:00:00-07:00\"),\n\t\t\tend:   mustParseTime(\"2000-10-29T03:00:00-08:00\"),\n\t\t\tpoints: []time.Duration{\n\t\t\t\t3 * time.Hour,\n\t\t\t\t2 * time.Hour,\n\t\t\t\t1 * time.Hour,\n\t\t\t},\n\t\t\topt: influxql.IteratorOptions{\n\t\t\t\tInterval: influxql.Interval{\n\t\t\t\t\tDuration: 1 * time.Hour,\n\t\t\t\t},\n\t\t\t\tLocation:  LosAngeles,\n\t\t\t\tAscending: false,\n\t\t\t},\n\t\t},\n\t} {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\topt := tt.opt\n\t\t\topt.StartTime = tt.start.UnixNano()\n\t\t\topt.EndTime = tt.end.UnixNano() - 1\n\n\t\t\tpoints := make([][]influxql.Point, 0, len(tt.points)+1)\n\t\t\tif opt.Ascending {\n\t\t\t\tpoints = append(points, []influxql.Point{\n\t\t\t\t\t&influxql.FloatPoint{\n\t\t\t\t\t\tTime: tt.start.UnixNano(),\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t}\n\t\t\tfor _, d := range tt.points {\n\t\t\t\tpoints = append(points, []influxql.Point{\n\t\t\t\t\t&influxql.FloatPoint{\n\t\t\t\t\t\tTime: tt.start.Add(d).UnixNano(),\n\t\t\t\t\t\tNil:  true,\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t}\n\t\t\tif !opt.Ascending {\n\t\t\t\tpoints = append(points, []influxql.Point{\n\t\t\t\t\t&influxql.FloatPoint{\n\t\t\t\t\t\tTime: tt.start.UnixNano(),\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t}\n\t\t\titr := influxql.NewFillIterator(\n\t\t\t\t&FloatIterator{Points: []influxql.FloatPoint{{Time: tt.start.UnixNano(), Value: 0}}},\n\t\t\t\tnil,\n\t\t\t\topt,\n\t\t\t)\n\n\t\t\tif a, err := (Iterators{itr}).ReadAll(); err != nil {\n\t\t\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t\t\t} else if !deep.Equal(a, points) {\n\t\t\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t\t\t}\n\t\t})\n\t}\n}\n\n// Iterators is a test wrapper for iterators.\ntype Iterators []influxql.Iterator\n\n// Next returns the next value from each iterator.\n// Returns nil if any iterator returns a nil.\nfunc (itrs Iterators) Next() ([]influxql.Point, error) {\n\ta := make([]influxql.Point, len(itrs))\n\tfor i, itr := range itrs {\n\t\tswitch itr := itr.(type) {\n\t\tcase influxql.FloatIterator:\n\t\t\tfp, err := itr.Next()\n\t\t\tif fp == nil || err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ta[i] = fp\n\t\tcase influxql.IntegerIterator:\n\t\t\tip, err := itr.Next()\n\t\t\tif ip == nil || err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ta[i] = ip\n\t\tcase influxql.StringIterator:\n\t\t\tsp, err := itr.Next()\n\t\t\tif sp == nil || err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ta[i] = sp\n\t\tcase influxql.BooleanIterator:\n\t\t\tbp, err := itr.Next()\n\t\t\tif bp == nil || err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ta[i] = bp\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"iterator type not supported: %T\", itr))\n\t\t}\n\t}\n\treturn a, nil\n}\n\n// ReadAll reads all points from all iterators.\nfunc (itrs Iterators) ReadAll() ([][]influxql.Point, error) {\n\tvar a [][]influxql.Point\n\n\t// Read from every iterator until a nil is encountered.\n\tfor {\n\t\tpoints, err := itrs.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if points == nil {\n\t\t\tbreak\n\t\t}\n\t\ta = append(a, influxql.Points(points).Clone())\n\t}\n\n\t// Close all iterators.\n\tinfluxql.Iterators(itrs).Close()\n\n\treturn a, nil\n}\n\nfunc TestIteratorOptions_Window_Interval(t *testing.T) {\n\topt := influxql.IteratorOptions{\n\t\tInterval: influxql.Interval{\n\t\t\tDuration: 10,\n\t\t},\n\t}\n\n\tstart, end := opt.Window(4)\n\tif start != 0 {\n\t\tt.Errorf(\"expected start to be 0, got %d\", start)\n\t}\n\tif end != 10 {\n\t\tt.Errorf(\"expected end to be 10, got %d\", end)\n\t}\n}\n\nfunc TestIteratorOptions_Window_Offset(t *testing.T) {\n\topt := influxql.IteratorOptions{\n\t\tInterval: influxql.Interval{\n\t\t\tDuration: 10,\n\t\t\tOffset:   8,\n\t\t},\n\t}\n\n\tstart, end := opt.Window(14)\n\tif start != 8 {\n\t\tt.Errorf(\"expected start to be 8, got %d\", start)\n\t}\n\tif end != 18 {\n\t\tt.Errorf(\"expected end to be 18, got %d\", end)\n\t}\n}\n\nfunc TestIteratorOptions_Window_Default(t *testing.T) {\n\topt := influxql.IteratorOptions{\n\t\tStartTime: 0,\n\t\tEndTime:   60,\n\t}\n\n\tstart, end := opt.Window(34)\n\tif start != 0 {\n\t\tt.Errorf(\"expected start to be 0, got %d\", start)\n\t}\n\tif end != 61 {\n\t\tt.Errorf(\"expected end to be 61, got %d\", end)\n\t}\n}\n\nfunc TestIteratorOptions_Window_Location(t *testing.T) {\n\tfor _, tt := range []struct {\n\t\tnow        time.Time\n\t\tstart, end time.Time\n\t\tinterval   time.Duration\n\t}{\n\t\t{\n\t\t\tnow:      mustParseTime(\"2000-04-02T12:14:15-07:00\"),\n\t\t\tstart:    mustParseTime(\"2000-04-02T00:00:00-08:00\"),\n\t\t\tend:      mustParseTime(\"2000-04-03T00:00:00-07:00\"),\n\t\t\tinterval: 24 * time.Hour,\n\t\t},\n\t\t{\n\t\t\tnow:      mustParseTime(\"2000-04-02T01:17:12-08:00\"),\n\t\t\tstart:    mustParseTime(\"2000-04-02T00:00:00-08:00\"),\n\t\t\tend:      mustParseTime(\"2000-04-03T00:00:00-07:00\"),\n\t\t\tinterval: 24 * time.Hour,\n\t\t},\n\t\t{\n\t\t\tnow:      mustParseTime(\"2000-04-02T01:14:15-08:00\"),\n\t\t\tstart:    mustParseTime(\"2000-04-02T00:00:00-08:00\"),\n\t\t\tend:      mustParseTime(\"2000-04-02T03:00:00-07:00\"),\n\t\t\tinterval: 2 * time.Hour,\n\t\t},\n\t\t{\n\t\t\tnow:      mustParseTime(\"2000-04-02T03:17:12-07:00\"),\n\t\t\tstart:    mustParseTime(\"2000-04-02T03:00:00-07:00\"),\n\t\t\tend:      mustParseTime(\"2000-04-02T04:00:00-07:00\"),\n\t\t\tinterval: 2 * time.Hour,\n\t\t},\n\t\t{\n\t\t\tnow:      mustParseTime(\"2000-04-02T01:14:15-08:00\"),\n\t\t\tstart:    mustParseTime(\"2000-04-02T01:00:00-08:00\"),\n\t\t\tend:      mustParseTime(\"2000-04-02T03:00:00-07:00\"),\n\t\t\tinterval: 1 * time.Hour,\n\t\t},\n\t\t{\n\t\t\tnow:      mustParseTime(\"2000-04-02T03:17:12-07:00\"),\n\t\t\tstart:    mustParseTime(\"2000-04-02T03:00:00-07:00\"),\n\t\t\tend:      mustParseTime(\"2000-04-02T04:00:00-07:00\"),\n\t\t\tinterval: 1 * time.Hour,\n\t\t},\n\t\t{\n\t\t\tnow:      mustParseTime(\"2000-10-29T12:14:15-08:00\"),\n\t\t\tstart:    mustParseTime(\"2000-10-29T00:00:00-07:00\"),\n\t\t\tend:      mustParseTime(\"2000-10-30T00:00:00-08:00\"),\n\t\t\tinterval: 24 * time.Hour,\n\t\t},\n\t\t{\n\t\t\tnow:      mustParseTime(\"2000-10-29T01:17:12-07:00\"),\n\t\t\tstart:    mustParseTime(\"2000-10-29T00:00:00-07:00\"),\n\t\t\tend:      mustParseTime(\"2000-10-30T00:00:00-08:00\"),\n\t\t\tinterval: 24 * time.Hour,\n\t\t},\n\t\t{\n\t\t\tnow:      mustParseTime(\"2000-10-29T01:14:15-07:00\"),\n\t\t\tstart:    mustParseTime(\"2000-10-29T00:00:00-07:00\"),\n\t\t\tend:      mustParseTime(\"2000-10-29T02:00:00-08:00\"),\n\t\t\tinterval: 2 * time.Hour,\n\t\t},\n\t\t{\n\t\t\tnow:      mustParseTime(\"2000-10-29T03:17:12-08:00\"),\n\t\t\tstart:    mustParseTime(\"2000-10-29T02:00:00-08:00\"),\n\t\t\tend:      mustParseTime(\"2000-10-29T04:00:00-08:00\"),\n\t\t\tinterval: 2 * time.Hour,\n\t\t},\n\t\t{\n\t\t\tnow:      mustParseTime(\"2000-10-29T01:14:15-07:00\"),\n\t\t\tstart:    mustParseTime(\"2000-10-29T01:00:00-07:00\"),\n\t\t\tend:      mustParseTime(\"2000-10-29T01:00:00-08:00\"),\n\t\t\tinterval: 1 * time.Hour,\n\t\t},\n\t\t{\n\t\t\tnow:      mustParseTime(\"2000-10-29T02:17:12-07:00\"),\n\t\t\tstart:    mustParseTime(\"2000-10-29T02:00:00-07:00\"),\n\t\t\tend:      mustParseTime(\"2000-10-29T03:00:00-07:00\"),\n\t\t\tinterval: 1 * time.Hour,\n\t\t},\n\t} {\n\t\tt.Run(fmt.Sprintf(\"%s/%s\", tt.now, tt.interval), func(t *testing.T) {\n\t\t\topt := influxql.IteratorOptions{\n\t\t\t\tLocation: LosAngeles,\n\t\t\t\tInterval: influxql.Interval{\n\t\t\t\t\tDuration: tt.interval,\n\t\t\t\t},\n\t\t\t}\n\t\t\tstart, end := opt.Window(tt.now.UnixNano())\n\t\t\tif have, want := time.Unix(0, start).In(LosAngeles), tt.start; !have.Equal(want) {\n\t\t\t\tt.Errorf(\"unexpected start time: %s != %s\", have, want)\n\t\t\t}\n\t\t\tif have, want := time.Unix(0, end).In(LosAngeles), tt.end; !have.Equal(want) {\n\t\t\t\tt.Errorf(\"unexpected end time: %s != %s\", have, want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestIteratorOptions_Window_MinTime(t *testing.T) {\n\topt := influxql.IteratorOptions{\n\t\tStartTime: influxql.MinTime,\n\t\tEndTime:   influxql.MaxTime,\n\t\tInterval: influxql.Interval{\n\t\t\tDuration: time.Hour,\n\t\t},\n\t}\n\texpected := time.Unix(0, influxql.MinTime).Add(time.Hour).Truncate(time.Hour)\n\n\tstart, end := opt.Window(influxql.MinTime)\n\tif start != influxql.MinTime {\n\t\tt.Errorf(\"expected start to be %d, got %d\", influxql.MinTime, start)\n\t}\n\tif have, want := end, expected.UnixNano(); have != want {\n\t\tt.Errorf(\"expected end to be %d, got %d\", want, have)\n\t}\n}\n\nfunc TestIteratorOptions_Window_MaxTime(t *testing.T) {\n\topt := influxql.IteratorOptions{\n\t\tStartTime: influxql.MinTime,\n\t\tEndTime:   influxql.MaxTime,\n\t\tInterval: influxql.Interval{\n\t\t\tDuration: time.Hour,\n\t\t},\n\t}\n\texpected := time.Unix(0, influxql.MaxTime).Truncate(time.Hour)\n\n\tstart, end := opt.Window(influxql.MaxTime)\n\tif have, want := start, expected.UnixNano(); have != want {\n\t\tt.Errorf(\"expected start to be %d, got %d\", want, have)\n\t}\n\tif end != influxql.MaxTime {\n\t\tt.Errorf(\"expected end to be %d, got %d\", influxql.MaxTime, end)\n\t}\n}\n\nfunc TestIteratorOptions_SeekTime_Ascending(t *testing.T) {\n\topt := influxql.IteratorOptions{\n\t\tStartTime: 30,\n\t\tEndTime:   60,\n\t\tAscending: true,\n\t}\n\n\ttime := opt.SeekTime()\n\tif time != 30 {\n\t\tt.Errorf(\"expected time to be 30, got %d\", time)\n\t}\n}\n\nfunc TestIteratorOptions_SeekTime_Descending(t *testing.T) {\n\topt := influxql.IteratorOptions{\n\t\tStartTime: 30,\n\t\tEndTime:   60,\n\t\tAscending: false,\n\t}\n\n\ttime := opt.SeekTime()\n\tif time != 60 {\n\t\tt.Errorf(\"expected time to be 60, got %d\", time)\n\t}\n}\n\nfunc TestIteratorOptions_DerivativeInterval_Default(t *testing.T) {\n\topt := influxql.IteratorOptions{}\n\texpected := influxql.Interval{Duration: time.Second}\n\tactual := opt.DerivativeInterval()\n\tif actual != expected {\n\t\tt.Errorf(\"expected derivative interval to be %v, got %v\", expected, actual)\n\t}\n}\n\nfunc TestIteratorOptions_DerivativeInterval_GroupBy(t *testing.T) {\n\topt := influxql.IteratorOptions{\n\t\tInterval: influxql.Interval{\n\t\t\tDuration: 10,\n\t\t\tOffset:   2,\n\t\t},\n\t}\n\texpected := influxql.Interval{Duration: 10}\n\tactual := opt.DerivativeInterval()\n\tif actual != expected {\n\t\tt.Errorf(\"expected derivative interval to be %v, got %v\", expected, actual)\n\t}\n}\n\nfunc TestIteratorOptions_DerivativeInterval_Call(t *testing.T) {\n\topt := influxql.IteratorOptions{\n\t\tExpr: &influxql.Call{\n\t\t\tName: \"mean\",\n\t\t\tArgs: []influxql.Expr{\n\t\t\t\t&influxql.VarRef{Val: \"value\"},\n\t\t\t\t&influxql.DurationLiteral{Val: 2 * time.Second},\n\t\t\t},\n\t\t},\n\t\tInterval: influxql.Interval{\n\t\t\tDuration: 10,\n\t\t\tOffset:   2,\n\t\t},\n\t}\n\texpected := influxql.Interval{Duration: 2 * time.Second}\n\tactual := opt.DerivativeInterval()\n\tif actual != expected {\n\t\tt.Errorf(\"expected derivative interval to be %v, got %v\", expected, actual)\n\t}\n}\n\nfunc TestIteratorOptions_ElapsedInterval_Default(t *testing.T) {\n\topt := influxql.IteratorOptions{}\n\texpected := influxql.Interval{Duration: time.Nanosecond}\n\tactual := opt.ElapsedInterval()\n\tif actual != expected {\n\t\tt.Errorf(\"expected elapsed interval to be %v, got %v\", expected, actual)\n\t}\n}\n\nfunc TestIteratorOptions_ElapsedInterval_GroupBy(t *testing.T) {\n\topt := influxql.IteratorOptions{\n\t\tInterval: influxql.Interval{\n\t\t\tDuration: 10,\n\t\t\tOffset:   2,\n\t\t},\n\t}\n\texpected := influxql.Interval{Duration: time.Nanosecond}\n\tactual := opt.ElapsedInterval()\n\tif actual != expected {\n\t\tt.Errorf(\"expected elapsed interval to be %v, got %v\", expected, actual)\n\t}\n}\n\nfunc TestIteratorOptions_ElapsedInterval_Call(t *testing.T) {\n\topt := influxql.IteratorOptions{\n\t\tExpr: &influxql.Call{\n\t\t\tName: \"mean\",\n\t\t\tArgs: []influxql.Expr{\n\t\t\t\t&influxql.VarRef{Val: \"value\"},\n\t\t\t\t&influxql.DurationLiteral{Val: 2 * time.Second},\n\t\t\t},\n\t\t},\n\t\tInterval: influxql.Interval{\n\t\t\tDuration: 10,\n\t\t\tOffset:   2,\n\t\t},\n\t}\n\texpected := influxql.Interval{Duration: 2 * time.Second}\n\tactual := opt.ElapsedInterval()\n\tif actual != expected {\n\t\tt.Errorf(\"expected elapsed interval to be %v, got %v\", expected, actual)\n\t}\n}\n\nfunc TestIteratorOptions_IntegralInterval_Default(t *testing.T) {\n\topt := influxql.IteratorOptions{}\n\texpected := influxql.Interval{Duration: time.Second}\n\tactual := opt.IntegralInterval()\n\tif actual != expected {\n\t\tt.Errorf(\"expected default integral interval to be %v, got %v\", expected, actual)\n\t}\n}\n\n// Ensure iterator options can be marshaled to and from a binary format.\nfunc TestIteratorOptions_MarshalBinary(t *testing.T) {\n\topt := &influxql.IteratorOptions{\n\t\tExpr: MustParseExpr(\"count(value)\"),\n\t\tAux:  []influxql.VarRef{{Val: \"a\"}, {Val: \"b\"}, {Val: \"c\"}},\n\t\tInterval: influxql.Interval{\n\t\t\tDuration: 1 * time.Hour,\n\t\t\tOffset:   20 * time.Minute,\n\t\t},\n\t\tDimensions: []string{\"region\", \"host\"},\n\t\tGroupBy: map[string]struct{}{\n\t\t\t\"region\":  {},\n\t\t\t\"host\":    {},\n\t\t\t\"cluster\": {},\n\t\t},\n\t\tFill:      influxql.NumberFill,\n\t\tFillValue: float64(100),\n\t\tCondition: MustParseExpr(`foo = 'bar'`),\n\t\tStartTime: 1000,\n\t\tEndTime:   2000,\n\t\tAscending: true,\n\t\tLimit:     100,\n\t\tOffset:    200,\n\t\tSLimit:    300,\n\t\tSOffset:   400,\n\t\tDedupe:    true,\n\t}\n\n\t// Marshal to binary.\n\tbuf, err := opt.MarshalBinary()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Unmarshal back to an object.\n\tvar other influxql.IteratorOptions\n\tif err := other.UnmarshalBinary(buf); err != nil {\n\t\tt.Fatal(err)\n\t} else if !reflect.DeepEqual(&other, opt) {\n\t\tt.Fatalf(\"unexpected options: %s\", spew.Sdump(other))\n\t}\n}\n\n// Ensure iterator can be encoded and decoded over a byte stream.\nfunc TestIterator_EncodeDecode(t *testing.T) {\n\tvar buf bytes.Buffer\n\n\t// Create an iterator with several points & stats.\n\titr := &FloatIterator{\n\t\tPoints: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: 0},\n\t\t\t{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 1, Value: 10},\n\t\t},\n\t\tstats: influxql.IteratorStats{\n\t\t\tSeriesN: 2,\n\t\t\tPointN:  0,\n\t\t},\n\t}\n\n\t// Encode to the buffer.\n\tenc := influxql.NewIteratorEncoder(&buf)\n\tenc.StatsInterval = 100 * time.Millisecond\n\tif err := enc.EncodeIterator(itr); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Decode from the buffer.\n\tdec := influxql.NewReaderIterator(&buf, influxql.Float, itr.Stats())\n\n\t// Initial stats should exist immediately.\n\tfdec := dec.(influxql.FloatIterator)\n\tif stats := fdec.Stats(); !reflect.DeepEqual(stats, influxql.IteratorStats{SeriesN: 2, PointN: 0}) {\n\t\tt.Fatalf(\"unexpected stats(initial): %#v\", stats)\n\t}\n\n\t// Read both points.\n\tif p, err := fdec.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(0): %#v\", err)\n\t} else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0, Value: 0}) {\n\t\tt.Fatalf(\"unexpected point(0); %#v\", p)\n\t}\n\tif p, err := fdec.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(1): %#v\", err)\n\t} else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: \"mem\", Tags: ParseTags(\"host=B\"), Time: 1, Value: 10}) {\n\t\tt.Fatalf(\"unexpected point(1); %#v\", p)\n\t}\n\tif p, err := fdec.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(eof): %#v\", err)\n\t} else if p != nil {\n\t\tt.Fatalf(\"unexpected point(eof); %#v\", p)\n\t}\n}\n\n// IteratorCreator is a mockable implementation of SelectStatementExecutor.IteratorCreator.\ntype IteratorCreator struct {\n\tCreateIteratorFn  func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error)\n\tFieldDimensionsFn func(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error)\n}\n\nfunc (ic *IteratorCreator) CreateIterator(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\treturn ic.CreateIteratorFn(m, opt)\n}\n\nfunc (ic *IteratorCreator) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) {\n\treturn ic.FieldDimensionsFn(m)\n}\n\nfunc (ic *IteratorCreator) MapType(m *influxql.Measurement, field string) influxql.DataType {\n\tf, d, err := ic.FieldDimensions(m)\n\tif err != nil {\n\t\treturn influxql.Unknown\n\t}\n\n\tif typ, ok := f[field]; ok {\n\t\treturn typ\n\t}\n\tif _, ok := d[field]; ok {\n\t\treturn influxql.Tag\n\t}\n\treturn influxql.Unknown\n}\n\n// Test implementation of influxql.FloatIterator\ntype FloatIterator struct {\n\tPoints []influxql.FloatPoint\n\tClosed bool\n\tstats  influxql.IteratorStats\n}\n\nfunc (itr *FloatIterator) Stats() influxql.IteratorStats { return itr.stats }\nfunc (itr *FloatIterator) Close() error                  { itr.Closed = true; return nil }\n\n// Next returns the next value and shifts it off the beginning of the points slice.\nfunc (itr *FloatIterator) Next() (*influxql.FloatPoint, error) {\n\tif len(itr.Points) == 0 || itr.Closed {\n\t\treturn nil, nil\n\t}\n\n\tv := &itr.Points[0]\n\titr.Points = itr.Points[1:]\n\treturn v, nil\n}\n\nfunc FloatIterators(inputs []*FloatIterator) []influxql.Iterator {\n\titrs := make([]influxql.Iterator, len(inputs))\n\tfor i := range itrs {\n\t\titrs[i] = influxql.Iterator(inputs[i])\n\t}\n\treturn itrs\n}\n\n// Test implementation of influxql.IntegerIterator\ntype IntegerIterator struct {\n\tPoints []influxql.IntegerPoint\n\tClosed bool\n\tstats  influxql.IteratorStats\n}\n\nfunc (itr *IntegerIterator) Stats() influxql.IteratorStats { return itr.stats }\nfunc (itr *IntegerIterator) Close() error                  { itr.Closed = true; return nil }\n\n// Next returns the next value and shifts it off the beginning of the points slice.\nfunc (itr *IntegerIterator) Next() (*influxql.IntegerPoint, error) {\n\tif len(itr.Points) == 0 || itr.Closed {\n\t\treturn nil, nil\n\t}\n\n\tv := &itr.Points[0]\n\titr.Points = itr.Points[1:]\n\treturn v, nil\n}\n\nfunc IntegerIterators(inputs []*IntegerIterator) []influxql.Iterator {\n\titrs := make([]influxql.Iterator, len(inputs))\n\tfor i := range itrs {\n\t\titrs[i] = influxql.Iterator(inputs[i])\n\t}\n\treturn itrs\n}\n\n// Test implementation of influxql.StringIterator\ntype StringIterator struct {\n\tPoints []influxql.StringPoint\n\tClosed bool\n\tstats  influxql.IteratorStats\n}\n\nfunc (itr *StringIterator) Stats() influxql.IteratorStats { return itr.stats }\nfunc (itr *StringIterator) Close() error                  { itr.Closed = true; return nil }\n\n// Next returns the next value and shifts it off the beginning of the points slice.\nfunc (itr *StringIterator) Next() (*influxql.StringPoint, error) {\n\tif len(itr.Points) == 0 || itr.Closed {\n\t\treturn nil, nil\n\t}\n\n\tv := &itr.Points[0]\n\titr.Points = itr.Points[1:]\n\treturn v, nil\n}\n\nfunc StringIterators(inputs []*StringIterator) []influxql.Iterator {\n\titrs := make([]influxql.Iterator, len(inputs))\n\tfor i := range itrs {\n\t\titrs[i] = influxql.Iterator(inputs[i])\n\t}\n\treturn itrs\n}\n\n// Test implementation of influxql.BooleanIterator\ntype BooleanIterator struct {\n\tPoints []influxql.BooleanPoint\n\tClosed bool\n\tstats  influxql.IteratorStats\n}\n\nfunc (itr *BooleanIterator) Stats() influxql.IteratorStats { return itr.stats }\nfunc (itr *BooleanIterator) Close() error                  { itr.Closed = true; return nil }\n\n// Next returns the next value and shifts it off the beginning of the points slice.\nfunc (itr *BooleanIterator) Next() (*influxql.BooleanPoint, error) {\n\tif len(itr.Points) == 0 || itr.Closed {\n\t\treturn nil, nil\n\t}\n\n\tv := &itr.Points[0]\n\titr.Points = itr.Points[1:]\n\treturn v, nil\n}\n\nfunc BooleanIterators(inputs []*BooleanIterator) []influxql.Iterator {\n\titrs := make([]influxql.Iterator, len(inputs))\n\tfor i := range itrs {\n\t\titrs[i] = influxql.Iterator(inputs[i])\n\t}\n\treturn itrs\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/linear.go",
    "content": "package influxql\n\n// linearFloat computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue)\n// and returns the value of the point on the line with time windowTime\n// y = mx + b\nfunc linearFloat(windowTime, previousTime, nextTime int64, previousValue, nextValue float64) float64 {\n\tm := (nextValue - previousValue) / float64(nextTime-previousTime) // the slope of the line\n\tx := float64(windowTime - previousTime)                           // how far into the interval we are\n\tb := previousValue\n\treturn m*x + b\n}\n\n// linearInteger computes the the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue)\n// and returns the value of the point on the line with time windowTime\n// y = mx + b\nfunc linearInteger(windowTime, previousTime, nextTime int64, previousValue, nextValue int64) int64 {\n\tm := float64(nextValue-previousValue) / float64(nextTime-previousTime) // the slope of the line\n\tx := float64(windowTime - previousTime)                                // how far into the interval we are\n\tb := float64(previousValue)\n\treturn int64(m*x + b)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/monitor.go",
    "content": "package influxql\n\nimport \"time\"\n\n// PointLimitMonitor is a query monitor that exits when the number of points\n// emitted exceeds a threshold.\nfunc PointLimitMonitor(itrs Iterators, interval time.Duration, limit int) QueryMonitorFunc {\n\treturn func(closing <-chan struct{}) error {\n\t\tticker := time.NewTicker(interval)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tstats := itrs.Stats()\n\t\t\t\tif stats.PointN >= limit {\n\t\t\t\t\treturn ErrMaxSelectPointsLimitExceeded(stats.PointN, limit)\n\t\t\t\t}\n\t\t\tcase <-closing:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/neldermead/neldermead.go",
    "content": "// Package neldermead is an implementation of the Nelder-Mead optimization method.\n// Based on work by Michael F. Hutt: http://www.mikehutt.com/neldermead.html\npackage neldermead\n\nimport \"math\"\n\nconst (\n\tdefaultMaxIterations = 1000\n\t// reflection coefficient\n\tdefaultAlpha = 1.0\n\t// contraction coefficient\n\tdefaultBeta = 0.5\n\t// expansion coefficient\n\tdefaultGamma = 2.0\n)\n\n// Optimizer represents the parameters to the Nelder-Mead simplex method.\ntype Optimizer struct {\n\t// Maximum number of iterations.\n\tMaxIterations int\n\t// Reflection coefficient.\n\tAlpha,\n\t// Contraction coefficient.\n\tBeta,\n\t// Expansion coefficient.\n\tGamma float64\n}\n\n// New returns a new instance of Optimizer with all values set to the defaults.\nfunc New() *Optimizer {\n\treturn &Optimizer{\n\t\tMaxIterations: defaultMaxIterations,\n\t\tAlpha:         defaultAlpha,\n\t\tBeta:          defaultBeta,\n\t\tGamma:         defaultGamma,\n\t}\n}\n\n// Optimize applies the Nelder-Mead simplex method with the Optimizer's settings.\nfunc (o *Optimizer) Optimize(\n\tobjfunc func([]float64) float64,\n\tstart []float64,\n\tepsilon,\n\tscale float64,\n) (float64, []float64) {\n\tn := len(start)\n\n\t//holds vertices of simplex\n\tv := make([][]float64, n+1)\n\tfor i := range v {\n\t\tv[i] = make([]float64, n)\n\t}\n\n\t//value of function at each vertex\n\tf := make([]float64, n+1)\n\n\t//reflection - coordinates\n\tvr := make([]float64, n)\n\n\t//expansion - coordinates\n\tve := make([]float64, n)\n\n\t//contraction - coordinates\n\tvc := make([]float64, n)\n\n\t//centroid - coordinates\n\tvm := make([]float64, n)\n\n\t// create the initial simplex\n\t// assume one of the vertices is 0,0\n\n\tpn := scale * (math.Sqrt(float64(n+1)) - 1 + float64(n)) / (float64(n) * math.Sqrt(2))\n\tqn := scale * (math.Sqrt(float64(n+1)) - 1) / (float64(n) * math.Sqrt(2))\n\n\tfor i := 0; i < n; i++ {\n\t\tv[0][i] = start[i]\n\t}\n\n\tfor i := 1; i <= n; i++ {\n\t\tfor j := 0; j < n; j++ {\n\t\t\tif i-1 == j {\n\t\t\t\tv[i][j] = pn + start[j]\n\t\t\t} else {\n\t\t\t\tv[i][j] = qn + start[j]\n\t\t\t}\n\t\t}\n\t}\n\n\t// find the initial function values\n\tfor j := 0; j <= n; j++ {\n\t\tf[j] = objfunc(v[j])\n\t}\n\n\t// begin the main loop of the minimization\n\tfor itr := 1; itr <= o.MaxIterations; itr++ {\n\n\t\t// find the indexes of the largest and smallest values\n\t\tvg := 0\n\t\tvs := 0\n\t\tfor i := 0; i <= n; i++ {\n\t\t\tif f[i] > f[vg] {\n\t\t\t\tvg = i\n\t\t\t}\n\t\t\tif f[i] < f[vs] {\n\t\t\t\tvs = i\n\t\t\t}\n\t\t}\n\t\t// find the index of the second largest value\n\t\tvh := vs\n\t\tfor i := 0; i <= n; i++ {\n\t\t\tif f[i] > f[vh] && f[i] < f[vg] {\n\t\t\t\tvh = i\n\t\t\t}\n\t\t}\n\n\t\t// calculate the centroid\n\t\tfor i := 0; i <= n-1; i++ {\n\t\t\tcent := 0.0\n\t\t\tfor m := 0; m <= n; m++ {\n\t\t\t\tif m != vg {\n\t\t\t\t\tcent += v[m][i]\n\t\t\t\t}\n\t\t\t}\n\t\t\tvm[i] = cent / float64(n)\n\t\t}\n\n\t\t// reflect vg to new vertex vr\n\t\tfor i := 0; i <= n-1; i++ {\n\t\t\tvr[i] = vm[i] + o.Alpha*(vm[i]-v[vg][i])\n\t\t}\n\n\t\t// value of function at reflection point\n\t\tfr := objfunc(vr)\n\n\t\tif fr < f[vh] && fr >= f[vs] {\n\t\t\tfor i := 0; i <= n-1; i++ {\n\t\t\t\tv[vg][i] = vr[i]\n\t\t\t}\n\t\t\tf[vg] = fr\n\t\t}\n\n\t\t// investigate a step further in this direction\n\t\tif fr < f[vs] {\n\t\t\tfor i := 0; i <= n-1; i++ {\n\t\t\t\tve[i] = vm[i] + o.Gamma*(vr[i]-vm[i])\n\t\t\t}\n\n\t\t\t// value of function at expansion point\n\t\t\tfe := objfunc(ve)\n\n\t\t\t// by making fe < fr as opposed to fe < f[vs],\n\t\t\t// Rosenbrocks function takes 63 iterations as opposed\n\t\t\t// to 64 when using double variables.\n\n\t\t\tif fe < fr {\n\t\t\t\tfor i := 0; i <= n-1; i++ {\n\t\t\t\t\tv[vg][i] = ve[i]\n\t\t\t\t}\n\t\t\t\tf[vg] = fe\n\t\t\t} else {\n\t\t\t\tfor i := 0; i <= n-1; i++ {\n\t\t\t\t\tv[vg][i] = vr[i]\n\t\t\t\t}\n\t\t\t\tf[vg] = fr\n\t\t\t}\n\t\t}\n\n\t\t// check to see if a contraction is necessary\n\t\tif fr >= f[vh] {\n\t\t\tif fr < f[vg] && fr >= f[vh] {\n\t\t\t\t// perform outside contraction\n\t\t\t\tfor i := 0; i <= n-1; i++ {\n\t\t\t\t\tvc[i] = vm[i] + o.Beta*(vr[i]-vm[i])\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// perform inside contraction\n\t\t\t\tfor i := 0; i <= n-1; i++ {\n\t\t\t\t\tvc[i] = vm[i] - o.Beta*(vm[i]-v[vg][i])\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// value of function at contraction point\n\t\t\tfc := objfunc(vc)\n\n\t\t\tif fc < f[vg] {\n\t\t\t\tfor i := 0; i <= n-1; i++ {\n\t\t\t\t\tv[vg][i] = vc[i]\n\t\t\t\t}\n\t\t\t\tf[vg] = fc\n\t\t\t} else {\n\t\t\t\t// at this point the contraction is not successful,\n\t\t\t\t// we must halve the distance from vs to all the\n\t\t\t\t// vertices of the simplex and then continue.\n\n\t\t\t\tfor row := 0; row <= n; row++ {\n\t\t\t\t\tif row != vs {\n\t\t\t\t\t\tfor i := 0; i <= n-1; i++ {\n\t\t\t\t\t\t\tv[row][i] = v[vs][i] + (v[row][i]-v[vs][i])/2.0\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tf[vg] = objfunc(v[vg])\n\t\t\t\tf[vh] = objfunc(v[vh])\n\t\t\t}\n\t\t}\n\n\t\t// test for convergence\n\t\tfsum := 0.0\n\t\tfor i := 0; i <= n; i++ {\n\t\t\tfsum += f[i]\n\t\t}\n\t\tfavg := fsum / float64(n+1)\n\t\ts := 0.0\n\t\tfor i := 0; i <= n; i++ {\n\t\t\ts += math.Pow((f[i]-favg), 2.0) / float64(n)\n\t\t}\n\t\ts = math.Sqrt(s)\n\t\tif s < epsilon {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// find the index of the smallest value\n\tvs := 0\n\tfor i := 0; i <= n; i++ {\n\t\tif f[i] < f[vs] {\n\t\t\tvs = i\n\t\t}\n\t}\n\n\tparameters := make([]float64, n)\n\tfor i := 0; i < n; i++ {\n\t\tparameters[i] = v[vs][i]\n\t}\n\n\tmin := objfunc(v[vs])\n\n\treturn min, parameters\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/neldermead/neldermead_test.go",
    "content": "package neldermead_test\n\nimport (\n\t\"math\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/influxql/neldermead\"\n)\n\nfunc round(num float64, precision float64) float64 {\n\trnum := num * math.Pow(10, precision)\n\tvar tnum float64\n\tif rnum < 0 {\n\t\ttnum = math.Floor(rnum - 0.5)\n\t} else {\n\t\ttnum = math.Floor(rnum + 0.5)\n\t}\n\trnum = tnum / math.Pow(10, precision)\n\treturn rnum\n}\n\nfunc almostEqual(a, b, e float64) bool {\n\treturn math.Abs(a-b) < e\n}\n\nfunc Test_Optimize(t *testing.T) {\n\n\tconstraints := func(x []float64) {\n\t\tfor i := range x {\n\t\t\tx[i] = round(x[i], 5)\n\t\t}\n\t}\n\t// 100*(b-a^2)^2 + (1-a)^2\n\t//\n\t// Obvious global minimum at (a,b) = (1,1)\n\t//\n\t// Useful visualization:\n\t// https://www.wolframalpha.com/input/?i=minimize(100*(b-a%5E2)%5E2+%2B+(1-a)%5E2)\n\tf := func(x []float64) float64 {\n\t\tconstraints(x)\n\t\t// a = x[0]\n\t\t// b = x[1]\n\t\treturn 100*(x[1]-x[0]*x[0])*(x[1]-x[0]*x[0]) + (1.0-x[0])*(1.0-x[0])\n\t}\n\n\tstart := []float64{-1.2, 1.0}\n\n\topt := neldermead.New()\n\tepsilon := 1e-5\n\tmin, parameters := opt.Optimize(f, start, epsilon, 1)\n\n\tif !almostEqual(min, 0, epsilon) {\n\t\tt.Errorf(\"unexpected min: got %f exp 0\", min)\n\t}\n\n\tif !almostEqual(parameters[0], 1, 1e-2) {\n\t\tt.Errorf(\"unexpected parameters[0]: got %f exp 1\", parameters[0])\n\t}\n\n\tif !almostEqual(parameters[1], 1, 1e-2) {\n\t\tt.Errorf(\"unexpected parameters[1]: got %f exp 1\", parameters[1])\n\t}\n\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/parser.go",
    "content": "package influxql\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\t// DateFormat represents the format for date literals.\n\tDateFormat = \"2006-01-02\"\n\n\t// DateTimeFormat represents the format for date time literals.\n\tDateTimeFormat = \"2006-01-02 15:04:05.999999\"\n)\n\n// Parser represents an InfluxQL parser.\ntype Parser struct {\n\ts      *bufScanner\n\tparams map[string]interface{}\n}\n\n// NewParser returns a new instance of Parser.\nfunc NewParser(r io.Reader) *Parser {\n\treturn &Parser{s: newBufScanner(r)}\n}\n\n// SetParams sets the parameters that will be used for any bound parameter substitutions.\nfunc (p *Parser) SetParams(params map[string]interface{}) {\n\tp.params = params\n}\n\n// ParseQuery parses a query string and returns its AST representation.\nfunc ParseQuery(s string) (*Query, error) { return NewParser(strings.NewReader(s)).ParseQuery() }\n\n// ParseStatement parses a statement string and returns its AST representation.\nfunc ParseStatement(s string) (Statement, error) {\n\treturn NewParser(strings.NewReader(s)).ParseStatement()\n}\n\n// MustParseStatement parses a statement string and returns its AST. Panic on error.\nfunc MustParseStatement(s string) Statement {\n\tstmt, err := ParseStatement(s)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn stmt\n}\n\n// ParseExpr parses an expression string and returns its AST representation.\nfunc ParseExpr(s string) (Expr, error) { return NewParser(strings.NewReader(s)).ParseExpr() }\n\n// MustParseExpr parses an expression string and returns its AST. Panic on error.\nfunc MustParseExpr(s string) Expr {\n\texpr, err := ParseExpr(s)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn expr\n}\n\n// ParseQuery parses an InfluxQL string and returns a Query AST object.\nfunc (p *Parser) ParseQuery() (*Query, error) {\n\tvar statements Statements\n\tsemi := true\n\n\tfor {\n\t\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok == EOF {\n\t\t\treturn &Query{Statements: statements}, nil\n\t\t} else if tok == SEMICOLON {\n\t\t\tsemi = true\n\t\t} else {\n\t\t\tif !semi {\n\t\t\t\treturn nil, newParseError(tokstr(tok, lit), []string{\";\"}, pos)\n\t\t\t}\n\t\t\tp.unscan()\n\t\t\ts, err := p.ParseStatement()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tstatements = append(statements, s)\n\t\t\tsemi = false\n\t\t}\n\t}\n}\n\n// ParseStatement parses an InfluxQL string and returns a Statement AST object.\nfunc (p *Parser) ParseStatement() (Statement, error) {\n\t// Inspect the first token.\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\tswitch tok {\n\tcase SELECT:\n\t\treturn p.parseSelectStatement(targetNotRequired)\n\tcase DELETE:\n\t\treturn p.parseDeleteStatement()\n\tcase SHOW:\n\t\treturn p.parseShowStatement()\n\tcase CREATE:\n\t\treturn p.parseCreateStatement()\n\tcase DROP:\n\t\treturn p.parseDropStatement()\n\tcase GRANT:\n\t\treturn p.parseGrantStatement()\n\tcase REVOKE:\n\t\treturn p.parseRevokeStatement()\n\tcase ALTER:\n\t\treturn p.parseAlterStatement()\n\tcase SET:\n\t\treturn p.parseSetPasswordUserStatement()\n\tcase KILL:\n\t\treturn p.parseKillQueryStatement()\n\tdefault:\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"SELECT\", \"DELETE\", \"SHOW\", \"CREATE\", \"DROP\", \"GRANT\", \"REVOKE\", \"ALTER\", \"SET\", \"KILL\"}, pos)\n\t}\n}\n\n// parseShowStatement parses a string and returns a list statement.\n// This function assumes the SHOW token has already been consumed.\nfunc (p *Parser) parseShowStatement() (Statement, error) {\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\tswitch tok {\n\tcase CONTINUOUS:\n\t\treturn p.parseShowContinuousQueriesStatement()\n\tcase GRANTS:\n\t\treturn p.parseGrantsForUserStatement()\n\tcase DATABASES:\n\t\treturn p.parseShowDatabasesStatement()\n\tcase FIELD:\n\t\ttok, pos, lit := p.scanIgnoreWhitespace()\n\t\tif tok == KEYS {\n\t\t\treturn p.parseShowFieldKeysStatement()\n\t\t}\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"KEYS\"}, pos)\n\tcase MEASUREMENTS:\n\t\treturn p.parseShowMeasurementsStatement()\n\tcase QUERIES:\n\t\treturn p.parseShowQueriesStatement()\n\tcase RETENTION:\n\t\ttok, pos, lit := p.scanIgnoreWhitespace()\n\t\tif tok == POLICIES {\n\t\t\treturn p.parseShowRetentionPoliciesStatement()\n\t\t}\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"POLICIES\"}, pos)\n\tcase SERIES:\n\t\treturn p.parseShowSeriesStatement()\n\tcase SHARD:\n\t\ttok, pos, lit := p.scanIgnoreWhitespace()\n\t\tif tok == GROUPS {\n\t\t\treturn p.parseShowShardGroupsStatement()\n\t\t}\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"GROUPS\"}, pos)\n\tcase SHARDS:\n\t\treturn p.parseShowShardsStatement()\n\tcase STATS:\n\t\treturn p.parseShowStatsStatement()\n\tcase DIAGNOSTICS:\n\t\treturn p.parseShowDiagnosticsStatement()\n\tcase TAG:\n\t\ttok, pos, lit := p.scanIgnoreWhitespace()\n\t\tif tok == KEYS {\n\t\t\treturn p.parseShowTagKeysStatement()\n\t\t} else if tok == VALUES {\n\t\t\treturn p.parseShowTagValuesStatement()\n\t\t}\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"KEYS\", \"VALUES\"}, pos)\n\tcase USERS:\n\t\treturn p.parseShowUsersStatement()\n\tcase SUBSCRIPTIONS:\n\t\treturn p.parseShowSubscriptionsStatement()\n\t}\n\n\tshowQueryKeywords := []string{\n\t\t\"CONTINUOUS\",\n\t\t\"DATABASES\",\n\t\t\"FIELD\",\n\t\t\"GRANTS\",\n\t\t\"MEASUREMENTS\",\n\t\t\"QUERIES\",\n\t\t\"RETENTION\",\n\t\t\"SERIES\",\n\t\t\"TAG\",\n\t\t\"USERS\",\n\t\t\"STATS\",\n\t\t\"DIAGNOSTICS\",\n\t\t\"SHARD\",\n\t\t\"SHARDS\",\n\t\t\"SUBSCRIPTIONS\",\n\t}\n\tsort.Strings(showQueryKeywords)\n\n\treturn nil, newParseError(tokstr(tok, lit), showQueryKeywords, pos)\n}\n\n// parseCreateStatement parses a string and returns a create statement.\n// This function assumes the CREATE token has already been consumed.\nfunc (p *Parser) parseCreateStatement() (Statement, error) {\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\tif tok == CONTINUOUS {\n\t\treturn p.parseCreateContinuousQueryStatement()\n\t} else if tok == DATABASE {\n\t\treturn p.parseCreateDatabaseStatement()\n\t} else if tok == USER {\n\t\treturn p.parseCreateUserStatement()\n\t} else if tok == RETENTION {\n\t\ttok, pos, lit = p.scanIgnoreWhitespace()\n\t\tif tok != POLICY {\n\t\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"POLICY\"}, pos)\n\t\t}\n\t\treturn p.parseCreateRetentionPolicyStatement()\n\t} else if tok == SUBSCRIPTION {\n\t\treturn p.parseCreateSubscriptionStatement()\n\t}\n\n\treturn nil, newParseError(tokstr(tok, lit), []string{\"CONTINUOUS\", \"DATABASE\", \"USER\", \"RETENTION\", \"SUBSCRIPTION\"}, pos)\n}\n\n// parseDropStatement parses a string and returns a drop statement.\n// This function assumes the DROP token has already been consumed.\nfunc (p *Parser) parseDropStatement() (Statement, error) {\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\tswitch tok {\n\tcase CONTINUOUS:\n\t\treturn p.parseDropContinuousQueryStatement()\n\tcase DATABASE:\n\t\treturn p.parseDropDatabaseStatement()\n\tcase MEASUREMENT:\n\t\treturn p.parseDropMeasurementStatement()\n\tcase RETENTION:\n\t\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != POLICY {\n\t\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"POLICY\"}, pos)\n\t\t}\n\t\treturn p.parseDropRetentionPolicyStatement()\n\tcase SERIES:\n\t\treturn p.parseDropSeriesStatement()\n\tcase SHARD:\n\t\treturn p.parseDropShardStatement()\n\tcase SUBSCRIPTION:\n\t\treturn p.parseDropSubscriptionStatement()\n\tcase USER:\n\t\treturn p.parseDropUserStatement()\n\tdefault:\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"CONTINUOUS\", \"MEASUREMENT\", \"RETENTION\", \"SERIES\", \"SHARD\", \"SUBSCRIPTION\", \"USER\"}, pos)\n\t}\n}\n\n// parseAlterStatement parses a string and returns an alter statement.\n// This function assumes the ALTER token has already been consumed.\nfunc (p *Parser) parseAlterStatement() (Statement, error) {\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\tif tok == RETENTION {\n\t\tif tok, pos, lit = p.scanIgnoreWhitespace(); tok != POLICY {\n\t\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"POLICY\"}, pos)\n\t\t}\n\t\treturn p.parseAlterRetentionPolicyStatement()\n\t}\n\n\treturn nil, newParseError(tokstr(tok, lit), []string{\"RETENTION\"}, pos)\n}\n\n// parseSetPasswordUserStatement parses a string and returns a set statement.\n// This function assumes the SET token has already been consumed.\nfunc (p *Parser) parseSetPasswordUserStatement() (*SetPasswordUserStatement, error) {\n\tstmt := &SetPasswordUserStatement{}\n\n\t// Consume the required PASSWORD FOR tokens.\n\tif err := p.parseTokens([]Token{PASSWORD, FOR}); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse username\n\tident, err := p.parseIdent()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Name = ident\n\n\t// Consume the required = token.\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != EQ {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"=\"}, pos)\n\t}\n\n\t// Parse new user's password\n\tif ident, err = p.parseString(); err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Password = ident\n\n\treturn stmt, nil\n}\n\n// parseKillQueryStatement parses a string and returns a kill statement.\n// This function assumes the KILL token has already been consumed.\nfunc (p *Parser) parseKillQueryStatement() (*KillQueryStatement, error) {\n\tif err := p.parseTokens([]Token{QUERY}); err != nil {\n\t\treturn nil, err\n\t}\n\n\tqid, err := p.parseUInt64()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar host string\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok == ON {\n\t\thost, err = p.parseIdent()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tp.unscan()\n\t}\n\treturn &KillQueryStatement{QueryID: qid, Host: host}, nil\n}\n\n// parseCreateSubscriptionStatement parses a string and returns a CreateSubscriptionStatement.\n// This function assumes the \"CREATE SUBSCRIPTION\" tokens have already been consumed.\nfunc (p *Parser) parseCreateSubscriptionStatement() (*CreateSubscriptionStatement, error) {\n\tstmt := &CreateSubscriptionStatement{}\n\n\t// Read the id of the subscription to create.\n\tident, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Name = ident\n\n\t// Expect an \"ON\" keyword.\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != ON {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"ON\"}, pos)\n\t}\n\n\t// Read the name of the database.\n\tif ident, err = p.parseIdent(); err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Database = ident\n\n\tif tok, pos, lit := p.scan(); tok != DOT {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\".\"}, pos)\n\t}\n\n\t// Read the name of the retention policy.\n\tif ident, err = p.parseIdent(); err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.RetentionPolicy = ident\n\n\t// Expect a \"DESTINATIONS\" keyword.\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != DESTINATIONS {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"DESTINATIONS\"}, pos)\n\t}\n\n\t// Expect one of \"ANY ALL\" keywords.\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok == ALL || tok == ANY {\n\t\tstmt.Mode = tokens[tok]\n\t} else {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"ALL\", \"ANY\"}, pos)\n\t}\n\n\t// Read list of destinations.\n\tvar destinations []string\n\tif destinations, err = p.parseStringList(); err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Destinations = destinations\n\n\treturn stmt, nil\n}\n\n// parseCreateRetentionPolicyStatement parses a string and returns a create retention policy statement.\n// This function assumes the CREATE RETENTION POLICY tokens have already been consumed.\nfunc (p *Parser) parseCreateRetentionPolicyStatement() (*CreateRetentionPolicyStatement, error) {\n\tstmt := &CreateRetentionPolicyStatement{}\n\n\t// Parse the retention policy name.\n\tident, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Name = ident\n\n\t// Consume the required ON token.\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != ON {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"ON\"}, pos)\n\t}\n\n\t// Parse the database name.\n\tident, err = p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Database = ident\n\n\t// Parse required DURATION token.\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != DURATION {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"DURATION\"}, pos)\n\t}\n\n\t// Parse duration value\n\td, err := p.parseDuration()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Duration = d\n\n\t// Parse required REPLICATION token.\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != REPLICATION {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"REPLICATION\"}, pos)\n\t}\n\n\t// Parse replication value.\n\tn, err := p.parseInt(1, math.MaxInt32)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Replication = n\n\n\t// Parse optional SHARD token.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok == SHARD {\n\t\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != DURATION {\n\t\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"DURATION\"}, pos)\n\t\t}\n\n\t\t// Check to see if they used the INF keyword\n\t\ttok, pos, _ := p.scanIgnoreWhitespace()\n\t\tif tok == INF {\n\t\t\treturn nil, &ParseError{\n\t\t\t\tMessage: \"invalid duration INF for shard duration\",\n\t\t\t\tPos:     pos,\n\t\t\t}\n\t\t}\n\t\tp.unscan()\n\n\t\td, err := p.parseDuration()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tstmt.ShardGroupDuration = d\n\t} else {\n\t\tp.unscan()\n\t}\n\n\t// Parse optional DEFAULT token.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok == DEFAULT {\n\t\tstmt.Default = true\n\t} else {\n\t\tp.unscan()\n\t}\n\n\treturn stmt, nil\n}\n\n// parseAlterRetentionPolicyStatement parses a string and returns an alter retention policy statement.\n// This function assumes the ALTER RETENTION POLICY tokens have already been consumed.\nfunc (p *Parser) parseAlterRetentionPolicyStatement() (*AlterRetentionPolicyStatement, error) {\n\tstmt := &AlterRetentionPolicyStatement{}\n\n\t// Parse the retention policy name.\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\tif tok == DEFAULT {\n\t\tstmt.Name = \"default\"\n\t} else if tok == IDENT {\n\t\tstmt.Name = lit\n\t} else {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"identifier\"}, pos)\n\t}\n\n\t// Consume the required ON token.\n\tif tok, pos, lit = p.scanIgnoreWhitespace(); tok != ON {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"ON\"}, pos)\n\t}\n\n\t// Parse the database name.\n\tident, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Database = ident\n\n\t// Loop through option tokens (DURATION, REPLICATION, SHARD DURATION, DEFAULT, etc.).\n\tfound := make(map[Token]struct{})\nLoop:\n\tfor {\n\t\ttok, pos, lit := p.scanIgnoreWhitespace()\n\t\tif _, ok := found[tok]; ok {\n\t\t\treturn nil, &ParseError{\n\t\t\t\tMessage: fmt.Sprintf(\"found duplicate %s option\", tok),\n\t\t\t\tPos:     pos,\n\t\t\t}\n\t\t}\n\n\t\tswitch tok {\n\t\tcase DURATION:\n\t\t\td, err := p.parseDuration()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tstmt.Duration = &d\n\t\tcase REPLICATION:\n\t\t\tn, err := p.parseInt(1, math.MaxInt32)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tstmt.Replication = &n\n\t\tcase SHARD:\n\t\t\ttok, pos, lit := p.scanIgnoreWhitespace()\n\t\t\tif tok == DURATION {\n\t\t\t\t// Check to see if they used the INF keyword\n\t\t\t\ttok, pos, _ := p.scanIgnoreWhitespace()\n\t\t\t\tif tok == INF {\n\t\t\t\t\treturn nil, &ParseError{\n\t\t\t\t\t\tMessage: \"invalid duration INF for shard duration\",\n\t\t\t\t\t\tPos:     pos,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tp.unscan()\n\n\t\t\t\td, err := p.parseDuration()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tstmt.ShardGroupDuration = &d\n\t\t\t} else {\n\t\t\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"DURATION\"}, pos)\n\t\t\t}\n\t\tcase DEFAULT:\n\t\t\tstmt.Default = true\n\t\tdefault:\n\t\t\tif len(found) == 0 {\n\t\t\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"DURATION\", \"REPLICATION\", \"SHARD\", \"DEFAULT\"}, pos)\n\t\t\t}\n\t\t\tp.unscan()\n\t\t\tbreak Loop\n\t\t}\n\t\tfound[tok] = struct{}{}\n\t}\n\n\treturn stmt, nil\n}\n\n// parseInt parses a string representing a base 10 integer and returns the number.\n// It returns an error if the parsed number is outside the range [min, max].\nfunc (p *Parser) parseInt(min, max int) (int, error) {\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\tif tok != INTEGER {\n\t\treturn 0, newParseError(tokstr(tok, lit), []string{\"integer\"}, pos)\n\t}\n\n\t// Convert string to int.\n\tn, err := strconv.Atoi(lit)\n\tif err != nil {\n\t\treturn 0, &ParseError{Message: err.Error(), Pos: pos}\n\t} else if min > n || n > max {\n\t\treturn 0, &ParseError{\n\t\t\tMessage: fmt.Sprintf(\"invalid value %d: must be %d <= n <= %d\", n, min, max),\n\t\t\tPos:     pos,\n\t\t}\n\t}\n\n\treturn n, nil\n}\n\n// parseUInt64 parses a string and returns a 64-bit unsigned integer literal.\nfunc (p *Parser) parseUInt64() (uint64, error) {\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\tif tok != INTEGER {\n\t\treturn 0, newParseError(tokstr(tok, lit), []string{\"integer\"}, pos)\n\t}\n\n\t// Convert string to unsigned 64-bit integer\n\tn, err := strconv.ParseUint(lit, 10, 64)\n\tif err != nil {\n\t\treturn 0, &ParseError{Message: err.Error(), Pos: pos}\n\t}\n\n\treturn uint64(n), nil\n}\n\n// parseDuration parses a string and returns a duration literal.\n// This function assumes the DURATION token has already been consumed.\nfunc (p *Parser) parseDuration() (time.Duration, error) {\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\tif tok != DURATIONVAL && tok != INF {\n\t\treturn 0, newParseError(tokstr(tok, lit), []string{\"duration\"}, pos)\n\t}\n\n\tif tok == INF {\n\t\treturn 0, nil\n\t}\n\n\td, err := ParseDuration(lit)\n\tif err != nil {\n\t\treturn 0, &ParseError{Message: err.Error(), Pos: pos}\n\t}\n\n\treturn d, nil\n}\n\n// parseIdent parses an identifier.\nfunc (p *Parser) parseIdent() (string, error) {\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\tif tok != IDENT {\n\t\treturn \"\", newParseError(tokstr(tok, lit), []string{\"identifier\"}, pos)\n\t}\n\treturn lit, nil\n}\n\n// parseIdentList parses a comma delimited list of identifiers.\nfunc (p *Parser) parseIdentList() ([]string, error) {\n\t// Parse first (required) identifier.\n\tident, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tidents := []string{ident}\n\n\t// Parse remaining (optional) identifiers.\n\tfor {\n\t\tif tok, _, _ := p.scanIgnoreWhitespace(); tok != COMMA {\n\t\t\tp.unscan()\n\t\t\treturn idents, nil\n\t\t}\n\n\t\tif ident, err = p.parseIdent(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tidents = append(idents, ident)\n\t}\n}\n\n// parseSegmentedIdents parses a segmented identifiers.\n// e.g.,  \"db\".\"rp\".measurement  or  \"db\"..measurement\nfunc (p *Parser) parseSegmentedIdents() ([]string, error) {\n\tident, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tidents := []string{ident}\n\n\t// Parse remaining (optional) identifiers.\n\tfor {\n\t\tif tok, _, _ := p.scan(); tok != DOT {\n\t\t\t// No more segments so we're done.\n\t\t\tp.unscan()\n\t\t\tbreak\n\t\t}\n\n\t\tif ch := p.peekRune(); ch == '/' {\n\t\t\t// Next segment is a regex so we're done.\n\t\t\tbreak\n\t\t} else if ch == ':' {\n\t\t\t// Next segment is context-specific so let caller handle it.\n\t\t\tbreak\n\t\t} else if ch == '.' {\n\t\t\t// Add an empty identifier.\n\t\t\tidents = append(idents, \"\")\n\t\t\tcontinue\n\t\t}\n\n\t\t// Parse the next identifier.\n\t\tif ident, err = p.parseIdent(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tidents = append(idents, ident)\n\t}\n\n\tif len(idents) > 3 {\n\t\tmsg := fmt.Sprintf(\"too many segments in %s\", QuoteIdent(idents...))\n\t\treturn nil, &ParseError{Message: msg}\n\t}\n\n\treturn idents, nil\n}\n\n// parseString parses a string.\nfunc (p *Parser) parseString() (string, error) {\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\tif tok != STRING {\n\t\treturn \"\", newParseError(tokstr(tok, lit), []string{\"string\"}, pos)\n\t}\n\treturn lit, nil\n}\n\n// parseStringList parses a list of strings separated by commas.\nfunc (p *Parser) parseStringList() ([]string, error) {\n\t// Parse first (required) string.\n\tstr, err := p.parseString()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstrs := []string{str}\n\n\t// Parse remaining (optional) strings.\n\tfor {\n\t\tif tok, _, _ := p.scanIgnoreWhitespace(); tok != COMMA {\n\t\t\tp.unscan()\n\t\t\treturn strs, nil\n\t\t}\n\n\t\tif str, err = p.parseString(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tstrs = append(strs, str)\n\t}\n}\n\n// parseRevokeStatement parses a string and returns a revoke statement.\n// This function assumes the REVOKE token has already been consumed.\nfunc (p *Parser) parseRevokeStatement() (Statement, error) {\n\t// Parse the privilege to be revoked.\n\tpriv, err := p.parsePrivilege()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check for ON or FROM clauses.\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\tif tok == ON {\n\t\tstmt, err := p.parseRevokeOnStatement()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tstmt.Privilege = priv\n\t\treturn stmt, nil\n\t} else if tok == FROM {\n\t\t// Admin privilege is only revoked on ALL PRIVILEGES.\n\t\tif priv != AllPrivileges {\n\t\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"ON\"}, pos)\n\t\t}\n\t\treturn p.parseRevokeAdminStatement()\n\t}\n\n\t// Only ON or FROM clauses are allowed after privilege.\n\tif priv == AllPrivileges {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"ON\", \"FROM\"}, pos)\n\t}\n\treturn nil, newParseError(tokstr(tok, lit), []string{\"ON\"}, pos)\n}\n\n// parseRevokeOnStatement parses a string and returns a revoke statement.\n// This function assumes the [PRIVILEGE] ON tokens have already been consumed.\nfunc (p *Parser) parseRevokeOnStatement() (*RevokeStatement, error) {\n\tstmt := &RevokeStatement{}\n\n\t// Parse the name of the database.\n\tlit, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.On = lit\n\n\t// Parse FROM clause.\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\n\t// Check for required FROM token.\n\tif tok != FROM {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"FROM\"}, pos)\n\t}\n\n\t// Parse the name of the user.\n\tlit, err = p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.User = lit\n\n\treturn stmt, nil\n}\n\n// parseRevokeAdminStatement parses a string and returns a revoke admin statement.\n// This function assumes the ALL [PRVILEGES] FROM token has already been consumed.\nfunc (p *Parser) parseRevokeAdminStatement() (*RevokeAdminStatement, error) {\n\t// Admin privilege is always false when revoke admin clause is called.\n\tstmt := &RevokeAdminStatement{}\n\n\t// Parse the name of the user.\n\tlit, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.User = lit\n\n\treturn stmt, nil\n}\n\n// parseGrantStatement parses a string and returns a grant statement.\n// This function assumes the GRANT token has already been consumed.\nfunc (p *Parser) parseGrantStatement() (Statement, error) {\n\t// Parse the privilege to be granted.\n\tpriv, err := p.parsePrivilege()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check for ON or TO clauses.\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\tif tok == ON {\n\t\tstmt, err := p.parseGrantOnStatement()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tstmt.Privilege = priv\n\t\treturn stmt, nil\n\t} else if tok == TO {\n\t\t// Admin privilege is only granted on ALL PRIVILEGES.\n\t\tif priv != AllPrivileges {\n\t\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"ON\"}, pos)\n\t\t}\n\t\treturn p.parseGrantAdminStatement()\n\t}\n\n\t// Only ON or TO clauses are allowed after privilege.\n\tif priv == AllPrivileges {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"ON\", \"TO\"}, pos)\n\t}\n\treturn nil, newParseError(tokstr(tok, lit), []string{\"ON\"}, pos)\n}\n\n// parseGrantOnStatement parses a string and returns a grant statement.\n// This function assumes the [PRIVILEGE] ON tokens have already been consumed.\nfunc (p *Parser) parseGrantOnStatement() (*GrantStatement, error) {\n\tstmt := &GrantStatement{}\n\n\t// Parse the name of the database.\n\tlit, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.On = lit\n\n\t// Parse TO clause.\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\n\t// Check for required TO token.\n\tif tok != TO {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"TO\"}, pos)\n\t}\n\n\t// Parse the name of the user.\n\tlit, err = p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.User = lit\n\n\treturn stmt, nil\n}\n\n// parseGrantAdminStatement parses a string and returns a grant admin statement.\n// This function assumes the ALL [PRVILEGES] TO tokens have already been consumed.\nfunc (p *Parser) parseGrantAdminStatement() (*GrantAdminStatement, error) {\n\t// Admin privilege is always true when grant admin clause is called.\n\tstmt := &GrantAdminStatement{}\n\n\t// Parse the name of the user.\n\tlit, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.User = lit\n\n\treturn stmt, nil\n}\n\n// parsePrivilege parses a string and returns a Privilege.\nfunc (p *Parser) parsePrivilege() (Privilege, error) {\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\tswitch tok {\n\tcase READ:\n\t\treturn ReadPrivilege, nil\n\tcase WRITE:\n\t\treturn WritePrivilege, nil\n\tcase ALL:\n\t\t// Consume optional PRIVILEGES token\n\t\ttok, pos, lit = p.scanIgnoreWhitespace()\n\t\tif tok != PRIVILEGES {\n\t\t\tp.unscan()\n\t\t}\n\t\treturn AllPrivileges, nil\n\t}\n\treturn 0, newParseError(tokstr(tok, lit), []string{\"READ\", \"WRITE\", \"ALL [PRIVILEGES]\"}, pos)\n}\n\n// parseSelectStatement parses a select string and returns a Statement AST object.\n// This function assumes the SELECT token has already been consumed.\nfunc (p *Parser) parseSelectStatement(tr targetRequirement) (*SelectStatement, error) {\n\tstmt := &SelectStatement{}\n\tvar err error\n\n\t// Parse fields: \"FIELD+\".\n\tif stmt.Fields, err = p.parseFields(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse target: \"INTO\"\n\tif stmt.Target, err = p.parseTarget(tr); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse source: \"FROM\".\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != FROM {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"FROM\"}, pos)\n\t}\n\tif stmt.Sources, err = p.parseSources(true); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse condition: \"WHERE EXPR\".\n\tif stmt.Condition, err = p.parseCondition(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse dimensions: \"GROUP BY DIMENSION+\".\n\tif stmt.Dimensions, err = p.parseDimensions(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse fill options: \"fill(<option>)\"\n\tif stmt.Fill, stmt.FillValue, err = p.parseFill(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse sort: \"ORDER BY FIELD+\".\n\tif stmt.SortFields, err = p.parseOrderBy(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse limit: \"LIMIT <n>\".\n\tif stmt.Limit, err = p.parseOptionalTokenAndInt(LIMIT); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse offset: \"OFFSET <n>\".\n\tif stmt.Offset, err = p.parseOptionalTokenAndInt(OFFSET); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse series limit: \"SLIMIT <n>\".\n\tif stmt.SLimit, err = p.parseOptionalTokenAndInt(SLIMIT); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse series offset: \"SOFFSET <n>\".\n\tif stmt.SOffset, err = p.parseOptionalTokenAndInt(SOFFSET); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse timezone: \"TZ(<timezone>)\".\n\tif stmt.Location, err = p.parseLocation(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Set if the query is a raw data query or one with an aggregate\n\tstmt.IsRawQuery = true\n\tWalkFunc(stmt.Fields, func(n Node) {\n\t\tif _, ok := n.(*Call); ok {\n\t\t\tstmt.IsRawQuery = false\n\t\t}\n\t})\n\n\tif err := stmt.validate(tr); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn stmt, nil\n}\n\n// targetRequirement specifies whether or not a target clause is required.\ntype targetRequirement int\n\nconst (\n\ttargetRequired targetRequirement = iota\n\ttargetNotRequired\n\ttargetSubquery\n)\n\n// parseTarget parses a string and returns a Target.\nfunc (p *Parser) parseTarget(tr targetRequirement) (*Target, error) {\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != INTO {\n\t\tif tr == targetRequired {\n\t\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"INTO\"}, pos)\n\t\t}\n\t\tp.unscan()\n\t\treturn nil, nil\n\t}\n\n\t// db, rp, and / or measurement\n\tidents, err := p.parseSegmentedIdents()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(idents) < 3 {\n\t\t// Check for source measurement reference.\n\t\tif ch := p.peekRune(); ch == ':' {\n\t\t\tif err := p.parseTokens([]Token{COLON, MEASUREMENT}); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// Append empty measurement name.\n\t\t\tidents = append(idents, \"\")\n\t\t}\n\t}\n\n\tt := &Target{Measurement: &Measurement{IsTarget: true}}\n\n\tswitch len(idents) {\n\tcase 1:\n\t\tt.Measurement.Name = idents[0]\n\tcase 2:\n\t\tt.Measurement.RetentionPolicy = idents[0]\n\t\tt.Measurement.Name = idents[1]\n\tcase 3:\n\t\tt.Measurement.Database = idents[0]\n\t\tt.Measurement.RetentionPolicy = idents[1]\n\t\tt.Measurement.Name = idents[2]\n\t}\n\n\treturn t, nil\n}\n\n// parseDeleteStatement parses a string and returns a delete statement.\n// This function assumes the DELETE token has already been consumed.\nfunc (p *Parser) parseDeleteStatement() (Statement, error) {\n\tstmt := &DeleteSeriesStatement{}\n\tvar err error\n\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\n\tif tok == FROM {\n\t\t// Parse source.\n\t\tif stmt.Sources, err = p.parseSources(false); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar err error\n\t\tWalkFunc(stmt.Sources, func(n Node) {\n\t\t\tif t, ok := n.(*Measurement); ok {\n\t\t\t\t// Don't allow database or retention policy in from clause for delete\n\t\t\t\t// statement.  They apply to the selected database across all retention\n\t\t\t\t// policies.\n\t\t\t\tif t.Database != \"\" {\n\t\t\t\t\terr = &ParseError{Message: \"database not supported\"}\n\t\t\t\t}\n\t\t\t\tif t.RetentionPolicy != \"\" {\n\t\t\t\t\terr = &ParseError{Message: \"retention policy not supported\"}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t} else {\n\t\tp.unscan()\n\t}\n\n\t// Parse condition: \"WHERE EXPR\".\n\tif stmt.Condition, err = p.parseCondition(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If they didn't provide a FROM or a WHERE, this query is invalid\n\tif stmt.Condition == nil && stmt.Sources == nil {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"FROM\", \"WHERE\"}, pos)\n\t}\n\n\treturn stmt, nil\n}\n\n// parseShowSeriesStatement parses a string and returns a ShowSeriesStatement.\n// This function assumes the \"SHOW SERIES\" tokens have already been consumed.\nfunc (p *Parser) parseShowSeriesStatement() (*ShowSeriesStatement, error) {\n\tstmt := &ShowSeriesStatement{}\n\tvar err error\n\n\t// Parse optional ON clause.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok == ON {\n\t\t// Parse the database.\n\t\tstmt.Database, err = p.parseIdent()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tp.unscan()\n\t}\n\n\t// Parse optional FROM.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok == FROM {\n\t\tif stmt.Sources, err = p.parseSources(false); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tp.unscan()\n\t}\n\n\t// Parse condition: \"WHERE EXPR\".\n\tif stmt.Condition, err = p.parseCondition(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse sort: \"ORDER BY FIELD+\".\n\tif stmt.SortFields, err = p.parseOrderBy(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse limit: \"LIMIT <n>\".\n\tif stmt.Limit, err = p.parseOptionalTokenAndInt(LIMIT); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse offset: \"OFFSET <n>\".\n\tif stmt.Offset, err = p.parseOptionalTokenAndInt(OFFSET); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn stmt, nil\n}\n\n// parseShowMeasurementsStatement parses a string and returns a ShowSeriesStatement.\n// This function assumes the \"SHOW MEASUREMENTS\" tokens have already been consumed.\nfunc (p *Parser) parseShowMeasurementsStatement() (*ShowMeasurementsStatement, error) {\n\tstmt := &ShowMeasurementsStatement{}\n\tvar err error\n\n\t// Parse optional ON clause.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok == ON {\n\t\t// Parse the database.\n\t\tstmt.Database, err = p.parseIdent()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tp.unscan()\n\t}\n\n\t// Parse optional WITH clause.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok == WITH {\n\t\t// Parse required MEASUREMENT token.\n\t\tif err := p.parseTokens([]Token{MEASUREMENT}); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Parse required operator: = or =~.\n\t\ttok, pos, lit := p.scanIgnoreWhitespace()\n\t\tswitch tok {\n\t\tcase EQ, EQREGEX:\n\t\t\t// Parse required source (measurement name or regex).\n\t\t\tif stmt.Source, err = p.parseSource(false); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"=\", \"=~\"}, pos)\n\t\t}\n\t} else {\n\t\t// Not a WITH clause so put the token back.\n\t\tp.unscan()\n\t}\n\n\t// Parse condition: \"WHERE EXPR\".\n\tif stmt.Condition, err = p.parseCondition(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse sort: \"ORDER BY FIELD+\".\n\tif stmt.SortFields, err = p.parseOrderBy(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse limit: \"LIMIT <n>\".\n\tif stmt.Limit, err = p.parseOptionalTokenAndInt(LIMIT); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse offset: \"OFFSET <n>\".\n\tif stmt.Offset, err = p.parseOptionalTokenAndInt(OFFSET); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn stmt, nil\n}\n\n// parseShowQueriesStatement parses a string and returns a ShowQueriesStatement.\n// This function assumes the \"SHOW QUERIES\" tokens have been consumed.\nfunc (p *Parser) parseShowQueriesStatement() (*ShowQueriesStatement, error) {\n\treturn &ShowQueriesStatement{}, nil\n}\n\n// parseShowRetentionPoliciesStatement parses a string and returns a ShowRetentionPoliciesStatement.\n// This function assumes the \"SHOW RETENTION POLICIES\" tokens have been consumed.\nfunc (p *Parser) parseShowRetentionPoliciesStatement() (*ShowRetentionPoliciesStatement, error) {\n\tstmt := &ShowRetentionPoliciesStatement{}\n\n\t// Expect an \"ON\" keyword.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok == ON {\n\t\t// Parse the database.\n\t\tident, err := p.parseIdent()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tstmt.Database = ident\n\t} else {\n\t\tp.unscan()\n\t}\n\n\treturn stmt, nil\n}\n\n// parseShowTagKeysStatement parses a string and returns a ShowSeriesStatement.\n// This function assumes the \"SHOW TAG KEYS\" tokens have already been consumed.\nfunc (p *Parser) parseShowTagKeysStatement() (*ShowTagKeysStatement, error) {\n\tstmt := &ShowTagKeysStatement{}\n\tvar err error\n\n\t// Parse optional ON clause.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok == ON {\n\t\t// Parse the database.\n\t\tstmt.Database, err = p.parseIdent()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tp.unscan()\n\t}\n\n\t// Parse optional source.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok == FROM {\n\t\tif stmt.Sources, err = p.parseSources(false); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tp.unscan()\n\t}\n\n\t// Parse condition: \"WHERE EXPR\".\n\tif stmt.Condition, err = p.parseCondition(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse sort: \"ORDER BY FIELD+\".\n\tif stmt.SortFields, err = p.parseOrderBy(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse limit: \"LIMIT <n>\".\n\tif stmt.Limit, err = p.parseOptionalTokenAndInt(LIMIT); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse offset: \"OFFSET <n>\".\n\tif stmt.Offset, err = p.parseOptionalTokenAndInt(OFFSET); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse series limit: \"SLIMIT <n>\".\n\tif stmt.SLimit, err = p.parseOptionalTokenAndInt(SLIMIT); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse series offset: \"SOFFSET <n>\".\n\tif stmt.SOffset, err = p.parseOptionalTokenAndInt(SOFFSET); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn stmt, nil\n}\n\n// parseShowTagValuesStatement parses a string and returns a ShowSeriesStatement.\n// This function assumes the \"SHOW TAG VALUES\" tokens have already been consumed.\nfunc (p *Parser) parseShowTagValuesStatement() (*ShowTagValuesStatement, error) {\n\tstmt := &ShowTagValuesStatement{}\n\tvar err error\n\n\t// Parse optional ON clause.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok == ON {\n\t\t// Parse the database.\n\t\tstmt.Database, err = p.parseIdent()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tp.unscan()\n\t}\n\n\t// Parse optional source.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok == FROM {\n\t\tif stmt.Sources, err = p.parseSources(false); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tp.unscan()\n\t}\n\n\t// Parse required WITH KEY.\n\tif stmt.Op, stmt.TagKeyExpr, err = p.parseTagKeyExpr(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse condition: \"WHERE EXPR\".\n\tif stmt.Condition, err = p.parseCondition(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse sort: \"ORDER BY FIELD+\".\n\tif stmt.SortFields, err = p.parseOrderBy(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse limit: \"LIMIT <n>\".\n\tif stmt.Limit, err = p.parseOptionalTokenAndInt(LIMIT); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse offset: \"OFFSET <n>\".\n\tif stmt.Offset, err = p.parseOptionalTokenAndInt(OFFSET); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn stmt, nil\n}\n\n// parseTagKeys parses a string and returns a list of tag keys.\nfunc (p *Parser) parseTagKeyExpr() (Token, Literal, error) {\n\tvar err error\n\n\t// Parse required WITH KEY tokens.\n\tif err := p.parseTokens([]Token{WITH, KEY}); err != nil {\n\t\treturn 0, nil, err\n\t}\n\n\t// Parse required IN, EQ, or EQREGEX token.\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\tif tok == IN {\n\t\t// Parse required ( token.\n\t\tif tok, pos, lit = p.scanIgnoreWhitespace(); tok != LPAREN {\n\t\t\treturn 0, nil, newParseError(tokstr(tok, lit), []string{\"(\"}, pos)\n\t\t}\n\n\t\t// Parse tag key list.\n\t\tvar tagKeys []string\n\t\tif tagKeys, err = p.parseIdentList(); err != nil {\n\t\t\treturn 0, nil, err\n\t\t}\n\n\t\t// Parse required ) token.\n\t\tif tok, pos, lit = p.scanIgnoreWhitespace(); tok != RPAREN {\n\t\t\treturn 0, nil, newParseError(tokstr(tok, lit), []string{\")\"}, pos)\n\t\t}\n\t\treturn IN, &ListLiteral{Vals: tagKeys}, nil\n\t} else if tok == EQ || tok == NEQ {\n\t\t// Parse required tag key.\n\t\tident, err := p.parseIdent()\n\t\tif err != nil {\n\t\t\treturn 0, nil, err\n\t\t}\n\t\treturn tok, &StringLiteral{Val: ident}, nil\n\t} else if tok == EQREGEX || tok == NEQREGEX {\n\t\tre, err := p.parseRegex()\n\t\tif err != nil {\n\t\t\treturn 0, nil, err\n\t\t} else if re == nil {\n\t\t\t// parseRegex can return an empty type, but we need it to be present\n\t\t\ttok, pos, lit := p.scanIgnoreWhitespace()\n\t\t\treturn 0, nil, newParseError(tokstr(tok, lit), []string{\"regex\"}, pos)\n\t\t}\n\t\treturn tok, re, nil\n\t}\n\treturn 0, nil, newParseError(tokstr(tok, lit), []string{\"IN\", \"=\", \"=~\"}, pos)\n}\n\n// parseShowUsersStatement parses a string and returns a ShowUsersStatement.\n// This function assumes the \"SHOW USERS\" tokens have been consumed.\nfunc (p *Parser) parseShowUsersStatement() (*ShowUsersStatement, error) {\n\treturn &ShowUsersStatement{}, nil\n}\n\n// parseShowSubscriptionsStatement parses a string and returns a ShowSubscriptionsStatement\n// This function assumes the \"SHOW SUBSCRIPTIONS\" tokens have been consumed.\nfunc (p *Parser) parseShowSubscriptionsStatement() (*ShowSubscriptionsStatement, error) {\n\tstmt := &ShowSubscriptionsStatement{}\n\treturn stmt, nil\n}\n\n// parseShowFieldKeysStatement parses a string and returns a ShowSeriesStatement.\n// This function assumes the \"SHOW FIELD KEYS\" tokens have already been consumed.\nfunc (p *Parser) parseShowFieldKeysStatement() (*ShowFieldKeysStatement, error) {\n\tstmt := &ShowFieldKeysStatement{}\n\tvar err error\n\n\t// Parse optional ON clause.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok == ON {\n\t\t// Parse the database.\n\t\tstmt.Database, err = p.parseIdent()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tp.unscan()\n\t}\n\n\t// Parse optional source.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok == FROM {\n\t\tif stmt.Sources, err = p.parseSources(false); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tp.unscan()\n\t}\n\n\t// Parse sort: \"ORDER BY FIELD+\".\n\tif stmt.SortFields, err = p.parseOrderBy(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse limit: \"LIMIT <n>\".\n\tif stmt.Limit, err = p.parseOptionalTokenAndInt(LIMIT); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse offset: \"OFFSET <n>\".\n\tif stmt.Offset, err = p.parseOptionalTokenAndInt(OFFSET); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn stmt, nil\n}\n\n// parseDropMeasurementStatement parses a string and returns a DropMeasurementStatement.\n// This function assumes the \"DROP MEASUREMENT\" tokens have already been consumed.\nfunc (p *Parser) parseDropMeasurementStatement() (*DropMeasurementStatement, error) {\n\tstmt := &DropMeasurementStatement{}\n\n\t// Parse the name of the measurement to be dropped.\n\tlit, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Name = lit\n\n\treturn stmt, nil\n}\n\n// parseDropSeriesStatement parses a string and returns a DropSeriesStatement.\n// This function assumes the \"DROP SERIES\" tokens have already been consumed.\nfunc (p *Parser) parseDropSeriesStatement() (*DropSeriesStatement, error) {\n\tstmt := &DropSeriesStatement{}\n\tvar err error\n\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\n\tif tok == FROM {\n\t\t// Parse source.\n\t\tif stmt.Sources, err = p.parseSources(false); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar err error\n\t\tWalkFunc(stmt.Sources, func(n Node) {\n\t\t\tif t, ok := n.(*Measurement); ok {\n\t\t\t\t// Don't allow database or retention policy in from clause for delete\n\t\t\t\t// statement.  They apply to the selected database across all retention\n\t\t\t\t// policies.\n\t\t\t\tif t.Database != \"\" {\n\t\t\t\t\terr = &ParseError{Message: \"database not supported\"}\n\t\t\t\t}\n\t\t\t\tif t.RetentionPolicy != \"\" {\n\t\t\t\t\terr = &ParseError{Message: \"retention policy not supported\"}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tp.unscan()\n\t}\n\n\t// Parse condition: \"WHERE EXPR\".\n\tif stmt.Condition, err = p.parseCondition(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If they didn't provide a FROM or a WHERE, this query is invalid\n\tif stmt.Condition == nil && stmt.Sources == nil {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"FROM\", \"WHERE\"}, pos)\n\t}\n\n\treturn stmt, nil\n}\n\n// parseDropShardStatement parses a string and returns a\n// DropShardStatement. This function assumes the \"DROP SHARD\" tokens\n// have already been consumed.\nfunc (p *Parser) parseDropShardStatement() (*DropShardStatement, error) {\n\tvar err error\n\tstmt := &DropShardStatement{}\n\n\t// Parse the ID of the shard to be dropped.\n\tif stmt.ID, err = p.parseUInt64(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn stmt, nil\n}\n\n// parseShowContinuousQueriesStatement parses a string and returns a ShowContinuousQueriesStatement.\n// This function assumes the \"SHOW CONTINUOUS\" tokens have already been consumed.\nfunc (p *Parser) parseShowContinuousQueriesStatement() (*ShowContinuousQueriesStatement, error) {\n\tstmt := &ShowContinuousQueriesStatement{}\n\n\t// Expect a \"QUERIES\" token.\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != QUERIES {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"QUERIES\"}, pos)\n\t}\n\n\treturn stmt, nil\n}\n\n// parseGrantsForUserStatement parses a string and returns a ShowGrantsForUserStatement.\n// This function assumes the \"SHOW GRANTS\" tokens have already been consumed.\nfunc (p *Parser) parseGrantsForUserStatement() (*ShowGrantsForUserStatement, error) {\n\tstmt := &ShowGrantsForUserStatement{}\n\n\t// Expect a \"FOR\" token.\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != FOR {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"FOR\"}, pos)\n\t}\n\n\t// Parse the name of the user to be displayed.\n\tlit, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Name = lit\n\n\treturn stmt, nil\n}\n\n// parseShowDatabasesStatement parses a string and returns a ShowDatabasesStatement.\n// This function assumes the \"SHOW DATABASE\" tokens have already been consumed.\nfunc (p *Parser) parseShowDatabasesStatement() (*ShowDatabasesStatement, error) {\n\tstmt := &ShowDatabasesStatement{}\n\treturn stmt, nil\n}\n\n// parseCreateContinuousQueriesStatement parses a string and returns a CreateContinuousQueryStatement.\n// This function assumes the \"CREATE CONTINUOUS\" tokens have already been consumed.\nfunc (p *Parser) parseCreateContinuousQueryStatement() (*CreateContinuousQueryStatement, error) {\n\tstmt := &CreateContinuousQueryStatement{}\n\n\t// Expect a \"QUERY\" token.\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != QUERY {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"QUERY\"}, pos)\n\t}\n\n\t// Read the id of the query to create.\n\tident, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Name = ident\n\n\t// Expect an \"ON\" keyword.\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != ON {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"ON\"}, pos)\n\t}\n\n\t// Read the name of the database to create the query on.\n\tif ident, err = p.parseIdent(); err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Database = ident\n\n\tif p.parseTokenMaybe(RESAMPLE) {\n\t\tstmt.ResampleEvery, stmt.ResampleFor, err = p.parseResample()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Expect a \"BEGIN SELECT\" tokens.\n\tif err := p.parseTokens([]Token{BEGIN, SELECT}); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Read the select statement to be used as the source.\n\tsource, err := p.parseSelectStatement(targetRequired)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Source = source\n\n\t// validate that the statement has a non-zero group by interval if it is aggregated\n\tif !source.IsRawQuery {\n\t\td, err := source.GroupByInterval()\n\t\tif d == 0 || err != nil {\n\t\t\t// rewind so we can output an error with some info\n\t\t\tp.unscan() // unscan the whitespace\n\t\t\tp.unscan() // unscan the last token\n\t\t\ttok, pos, lit := p.scanIgnoreWhitespace()\n\t\t\texpected := []string{\"GROUP BY time(...)\"}\n\t\t\tif err != nil {\n\t\t\t\texpected = append(expected, err.Error())\n\t\t\t}\n\t\t\treturn nil, newParseError(tokstr(tok, lit), expected, pos)\n\t\t}\n\t}\n\n\t// Expect a \"END\" keyword.\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != END {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"END\"}, pos)\n\t}\n\n\tif err := stmt.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn stmt, nil\n}\n\n// parseCreateDatabaseStatement parses a string and returns a CreateDatabaseStatement.\n// This function assumes the \"CREATE DATABASE\" tokens have already been consumed.\nfunc (p *Parser) parseCreateDatabaseStatement() (*CreateDatabaseStatement, error) {\n\tstmt := &CreateDatabaseStatement{}\n\n\t// Parse the name of the database to be created.\n\tlit, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Name = lit\n\n\t// Look for \"WITH\"\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok == WITH {\n\t\t// validate that at least one of DURATION, NAME, REPLICATION or SHARD is provided\n\t\ttok, pos, lit := p.scanIgnoreWhitespace()\n\t\tif tok != DURATION && tok != NAME && tok != REPLICATION && tok != SHARD {\n\t\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"DURATION\", \"NAME\", \"REPLICATION\", \"SHARD\"}, pos)\n\t\t}\n\t\t// rewind\n\t\tp.unscan()\n\n\t\t// mark statement as having a RetentionPolicyInfo defined\n\t\tstmt.RetentionPolicyCreate = true\n\n\t\t// Look for \"DURATION\"\n\t\tif err := p.parseTokens([]Token{DURATION}); err != nil {\n\t\t\tp.unscan()\n\t\t} else {\n\t\t\trpDuration, err := p.parseDuration()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tstmt.RetentionPolicyDuration = &rpDuration\n\t\t}\n\n\t\t// Look for \"REPLICATION\"\n\t\tif err := p.parseTokens([]Token{REPLICATION}); err != nil {\n\t\t\tp.unscan()\n\t\t} else {\n\t\t\trpReplication, err := p.parseInt(1, math.MaxInt32)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tstmt.RetentionPolicyReplication = &rpReplication\n\t\t}\n\n\t\t// Look for \"SHARD\"\n\t\tif err := p.parseTokens([]Token{SHARD}); err != nil {\n\t\t\tp.unscan()\n\t\t} else {\n\t\t\t// Look for \"DURATION\"\n\t\t\ttok, pos, lit := p.scanIgnoreWhitespace()\n\t\t\tif tok != DURATION {\n\t\t\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"DURATION\"}, pos)\n\t\t\t}\n\t\t\tstmt.RetentionPolicyShardGroupDuration, err = p.parseDuration()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\t// Look for \"NAME\"\n\t\tif err := p.parseTokens([]Token{NAME}); err != nil {\n\t\t\tp.unscan()\n\t\t} else {\n\t\t\tstmt.RetentionPolicyName, err = p.parseIdent()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tp.unscan()\n\t}\n\treturn stmt, nil\n}\n\n// parseDropDatabaseStatement parses a string and returns a DropDatabaseStatement.\n// This function assumes the DROP DATABASE tokens have already been consumed.\nfunc (p *Parser) parseDropDatabaseStatement() (*DropDatabaseStatement, error) {\n\tstmt := &DropDatabaseStatement{}\n\n\t// Parse the name of the database to be dropped.\n\tlit, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Name = lit\n\n\treturn stmt, nil\n}\n\n// parseDropSubscriptionStatement parses a string and returns a DropSubscriptionStatement.\n// This function assumes the \"DROP SUBSCRIPTION\" tokens have already been consumed.\nfunc (p *Parser) parseDropSubscriptionStatement() (*DropSubscriptionStatement, error) {\n\tstmt := &DropSubscriptionStatement{}\n\n\t// Read the id of the subscription to drop.\n\tident, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Name = ident\n\n\t// Expect an \"ON\" keyword.\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != ON {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"ON\"}, pos)\n\t}\n\n\t// Read the name of the database.\n\tif ident, err = p.parseIdent(); err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Database = ident\n\n\tif tok, pos, lit := p.scan(); tok != DOT {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\".\"}, pos)\n\t}\n\n\t// Read the name of the retention policy.\n\tif ident, err = p.parseIdent(); err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.RetentionPolicy = ident\n\n\treturn stmt, nil\n}\n\n// parseDropRetentionPolicyStatement parses a string and returns a DropRetentionPolicyStatement.\n// This function assumes the DROP RETENTION POLICY tokens have been consumed.\nfunc (p *Parser) parseDropRetentionPolicyStatement() (*DropRetentionPolicyStatement, error) {\n\tstmt := &DropRetentionPolicyStatement{}\n\n\t// Parse the policy name.\n\tident, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Name = ident\n\n\t// Consume the required ON token.\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != ON {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"ON\"}, pos)\n\t}\n\n\t// Parse the database name.\n\tif stmt.Database, err = p.parseIdent(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn stmt, nil\n}\n\n// parseCreateUserStatement parses a string and returns a CreateUserStatement.\n// This function assumes the \"CREATE USER\" tokens have already been consumed.\nfunc (p *Parser) parseCreateUserStatement() (*CreateUserStatement, error) {\n\tstmt := &CreateUserStatement{}\n\n\t// Parse name of the user to be created.\n\tident, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Name = ident\n\n\t// Consume \"WITH PASSWORD\" tokens\n\tif err := p.parseTokens([]Token{WITH, PASSWORD}); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Parse new user's password\n\tif ident, err = p.parseString(); err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Password = ident\n\n\t// Check for option WITH clause.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok != WITH {\n\t\tp.unscan()\n\t\treturn stmt, nil\n\t}\n\n\t// \"WITH ALL PRIVILEGES\" grants the new user admin privilege.\n\t// Only admin privilege can be set on user creation.\n\tif err := p.parseTokens([]Token{ALL, PRIVILEGES}); err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Admin = true\n\n\treturn stmt, nil\n}\n\n// parseDropUserStatement parses a string and returns a DropUserStatement.\n// This function assumes the DROP USER tokens have already been consumed.\nfunc (p *Parser) parseDropUserStatement() (*DropUserStatement, error) {\n\tstmt := &DropUserStatement{}\n\n\t// Parse the name of the user to be dropped.\n\tlit, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Name = lit\n\n\treturn stmt, nil\n}\n\n// parseShowShardGroupsStatement parses a string for \"SHOW SHARD GROUPS\" statement.\n// This function assumes the \"SHOW SHARD GROUPS\" tokens have already been consumed.\nfunc (p *Parser) parseShowShardGroupsStatement() (*ShowShardGroupsStatement, error) {\n\treturn &ShowShardGroupsStatement{}, nil\n}\n\n// parseShowShardsStatement parses a string for \"SHOW SHARDS\" statement.\n// This function assumes the \"SHOW SHARDS\" tokens have already been consumed.\nfunc (p *Parser) parseShowShardsStatement() (*ShowShardsStatement, error) {\n\treturn &ShowShardsStatement{}, nil\n}\n\n// parseShowStatsStatement parses a string and returns a ShowStatsStatement.\n// This function assumes the \"SHOW STATS\" tokens have already been consumed.\nfunc (p *Parser) parseShowStatsStatement() (*ShowStatsStatement, error) {\n\tstmt := &ShowStatsStatement{}\n\tvar err error\n\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok == FOR {\n\t\tstmt.Module, err = p.parseString()\n\t} else {\n\t\tp.unscan()\n\t}\n\n\treturn stmt, err\n}\n\n// parseShowDiagnostics parses a string and returns a ShowDiagnosticsStatement.\nfunc (p *Parser) parseShowDiagnosticsStatement() (*ShowDiagnosticsStatement, error) {\n\tstmt := &ShowDiagnosticsStatement{}\n\tvar err error\n\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok == FOR {\n\t\tstmt.Module, err = p.parseString()\n\t} else {\n\t\tp.unscan()\n\t}\n\n\treturn stmt, err\n}\n\n// parseDropContinuousQueriesStatement parses a string and returns a DropContinuousQueryStatement.\n// This function assumes the \"DROP CONTINUOUS\" tokens have already been consumed.\nfunc (p *Parser) parseDropContinuousQueryStatement() (*DropContinuousQueryStatement, error) {\n\tstmt := &DropContinuousQueryStatement{}\n\n\t// Expect a \"QUERY\" token.\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != QUERY {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"QUERY\"}, pos)\n\t}\n\n\t// Read the id of the query to drop.\n\tident, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Name = ident\n\n\t// Expect an \"ON\" keyword.\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != ON {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"ON\"}, pos)\n\t}\n\n\t// Read the name of the database to remove the query from.\n\tif ident, err = p.parseIdent(); err != nil {\n\t\treturn nil, err\n\t}\n\tstmt.Database = ident\n\n\treturn stmt, nil\n}\n\n// parseFields parses a list of one or more fields.\nfunc (p *Parser) parseFields() (Fields, error) {\n\tvar fields Fields\n\n\tfor {\n\t\t// Parse the field.\n\t\tf, err := p.parseField()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Add new field.\n\t\tfields = append(fields, f)\n\n\t\t// If there's not a comma next then stop parsing fields.\n\t\tif tok, _, _ := p.scan(); tok != COMMA {\n\t\t\tp.unscan()\n\t\t\tbreak\n\t\t}\n\t}\n\treturn fields, nil\n}\n\n// parseField parses a single field.\nfunc (p *Parser) parseField() (*Field, error) {\n\tf := &Field{}\n\n\t// Attempt to parse a regex.\n\tre, err := p.parseRegex()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if re != nil {\n\t\tf.Expr = re\n\t} else {\n\t\t_, pos, _ := p.scanIgnoreWhitespace()\n\t\tp.unscan()\n\t\t// Parse the expression first.\n\t\texpr, err := p.ParseExpr()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar c validateField\n\t\tWalk(&c, expr)\n\t\tif c.foundInvalid {\n\t\t\treturn nil, fmt.Errorf(\"invalid operator %s in SELECT clause at line %d, char %d; operator is intended for WHERE clause\", c.badToken, pos.Line+1, pos.Char+1)\n\t\t}\n\t\tf.Expr = expr\n\t}\n\n\t// Parse the alias if the current and next tokens are \"WS AS\".\n\talias, err := p.parseAlias()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tf.Alias = alias\n\n\t// Consume all trailing whitespace.\n\tp.consumeWhitespace()\n\n\treturn f, nil\n}\n\n// validateField checks if the Expr is a valid field. We disallow all binary expression\n// that return a boolean.\ntype validateField struct {\n\tfoundInvalid bool\n\tbadToken     Token\n}\n\nfunc (c *validateField) Visit(n Node) Visitor {\n\te, ok := n.(*BinaryExpr)\n\tif !ok {\n\t\treturn c\n\t}\n\n\tswitch e.Op {\n\tcase EQ, NEQ, EQREGEX,\n\t\tNEQREGEX, LT, LTE, GT, GTE,\n\t\tAND, OR:\n\t\tc.foundInvalid = true\n\t\tc.badToken = e.Op\n\t\treturn nil\n\t}\n\treturn c\n}\n\n// parseAlias parses the \"AS IDENT\" alias for fields and dimensions.\nfunc (p *Parser) parseAlias() (string, error) {\n\t// Check if the next token is \"AS\". If not, then unscan and exit.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok != AS {\n\t\tp.unscan()\n\t\treturn \"\", nil\n\t}\n\n\t// Then we should have the alias identifier.\n\tlit, err := p.parseIdent()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn lit, nil\n}\n\n// parseSources parses a comma delimited list of sources.\nfunc (p *Parser) parseSources(subqueries bool) (Sources, error) {\n\tvar sources Sources\n\n\tfor {\n\t\ts, err := p.parseSource(subqueries)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsources = append(sources, s)\n\n\t\tif tok, _, _ := p.scanIgnoreWhitespace(); tok != COMMA {\n\t\t\tp.unscan()\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn sources, nil\n}\n\n// peekRune returns the next rune that would be read by the scanner.\nfunc (p *Parser) peekRune() rune {\n\tr, _, _ := p.s.s.r.ReadRune()\n\tif r != eof {\n\t\t_ = p.s.s.r.UnreadRune()\n\t}\n\n\treturn r\n}\n\nfunc (p *Parser) parseSource(subqueries bool) (Source, error) {\n\tm := &Measurement{}\n\n\t// Attempt to parse a regex.\n\tre, err := p.parseRegex()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if re != nil {\n\t\tm.Regex = re\n\t\t// Regex is always last so we're done.\n\t\treturn m, nil\n\t}\n\n\t// If there is no regular expression, this might be a subquery.\n\t// Parse the subquery if we are in a query that allows them as a source.\n\tif m.Regex == nil && subqueries {\n\t\tif tok, _, _ := p.scanIgnoreWhitespace(); tok == LPAREN {\n\t\t\tif err := p.parseTokens([]Token{SELECT}); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tstmt, err := p.parseSelectStatement(targetSubquery)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif err := p.parseTokens([]Token{RPAREN}); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn &SubQuery{Statement: stmt}, nil\n\t\t} else {\n\t\t\tp.unscan()\n\t\t}\n\t}\n\n\t// Didn't find a regex so parse segmented identifiers.\n\tidents, err := p.parseSegmentedIdents()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If we already have the max allowed idents, we're done.\n\tif len(idents) == 3 {\n\t\tm.Database, m.RetentionPolicy, m.Name = idents[0], idents[1], idents[2]\n\t\treturn m, nil\n\t}\n\t// Check again for regex.\n\tre, err = p.parseRegex()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if re != nil {\n\t\tm.Regex = re\n\t}\n\n\t// Assign identifiers to their proper locations.\n\tswitch len(idents) {\n\tcase 1:\n\t\tif re != nil {\n\t\t\tm.RetentionPolicy = idents[0]\n\t\t} else {\n\t\t\tm.Name = idents[0]\n\t\t}\n\tcase 2:\n\t\tif re != nil {\n\t\t\tm.Database, m.RetentionPolicy = idents[0], idents[1]\n\t\t} else {\n\t\t\tm.RetentionPolicy, m.Name = idents[0], idents[1]\n\t\t}\n\t}\n\n\treturn m, nil\n}\n\n// parseCondition parses the \"WHERE\" clause of the query, if it exists.\nfunc (p *Parser) parseCondition() (Expr, error) {\n\t// Check if the WHERE token exists.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok != WHERE {\n\t\tp.unscan()\n\t\treturn nil, nil\n\t}\n\n\t// Scan the identifier for the source.\n\texpr, err := p.ParseExpr()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn expr, nil\n}\n\n// parseDimensions parses the \"GROUP BY\" clause of the query, if it exists.\nfunc (p *Parser) parseDimensions() (Dimensions, error) {\n\t// If the next token is not GROUP then exit.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok != GROUP {\n\t\tp.unscan()\n\t\treturn nil, nil\n\t}\n\n\t// Now the next token should be \"BY\".\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != BY {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"BY\"}, pos)\n\t}\n\n\tvar dimensions Dimensions\n\tfor {\n\t\t// Parse the dimension.\n\t\td, err := p.parseDimension()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Add new dimension.\n\t\tdimensions = append(dimensions, d)\n\n\t\t// If there's not a comma next then stop parsing dimensions.\n\t\tif tok, _, _ := p.scan(); tok != COMMA {\n\t\t\tp.unscan()\n\t\t\tbreak\n\t\t}\n\t}\n\treturn dimensions, nil\n}\n\n// parseDimension parses a single dimension.\nfunc (p *Parser) parseDimension() (*Dimension, error) {\n\tre, err := p.parseRegex()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if re != nil {\n\t\treturn &Dimension{Expr: re}, nil\n\t}\n\n\t// Parse the expression first.\n\texpr, err := p.ParseExpr()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Consume all trailing whitespace.\n\tp.consumeWhitespace()\n\n\treturn &Dimension{Expr: expr}, nil\n}\n\n// parseFill parses the fill call and its options.\nfunc (p *Parser) parseFill() (FillOption, interface{}, error) {\n\t// Parse the expression first.\n\ttok, _, lit := p.scanIgnoreWhitespace()\n\tp.unscan()\n\tif tok != IDENT || strings.ToLower(lit) != \"fill\" {\n\t\treturn NullFill, nil, nil\n\t}\n\n\texpr, err := p.ParseExpr()\n\tif err != nil {\n\t\treturn NullFill, nil, err\n\t}\n\tfill, ok := expr.(*Call)\n\tif !ok {\n\t\treturn NullFill, nil, errors.New(\"fill must be a function call\")\n\t} else if len(fill.Args) != 1 {\n\t\treturn NullFill, nil, errors.New(\"fill requires an argument, e.g.: 0, null, none, previous, linear\")\n\t}\n\tswitch fill.Args[0].String() {\n\tcase \"null\":\n\t\treturn NullFill, nil, nil\n\tcase \"none\":\n\t\treturn NoFill, nil, nil\n\tcase \"previous\":\n\t\treturn PreviousFill, nil, nil\n\tcase \"linear\":\n\t\treturn LinearFill, nil, nil\n\tdefault:\n\t\tswitch num := fill.Args[0].(type) {\n\t\tcase *IntegerLiteral:\n\t\t\treturn NumberFill, num.Val, nil\n\t\tcase *NumberLiteral:\n\t\t\treturn NumberFill, num.Val, nil\n\t\tdefault:\n\t\t\treturn NullFill, nil, fmt.Errorf(\"expected number argument in fill()\")\n\t\t}\n\t}\n}\n\n// parseLocation parses the timezone call and its arguments.\nfunc (p *Parser) parseLocation() (*time.Location, error) {\n\t// Parse the expression first.\n\ttok, _, lit := p.scanIgnoreWhitespace()\n\tp.unscan()\n\tif tok != IDENT || strings.ToLower(lit) != \"tz\" {\n\t\treturn nil, nil\n\t}\n\n\texpr, err := p.ParseExpr()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttz, ok := expr.(*Call)\n\tif !ok {\n\t\treturn nil, errors.New(\"tz must be a function call\")\n\t} else if len(tz.Args) != 1 {\n\t\treturn nil, errors.New(\"tz requires exactly one argument\")\n\t}\n\n\ttzname, ok := tz.Args[0].(*StringLiteral)\n\tif !ok {\n\t\treturn nil, errors.New(\"expected string argument in tz()\")\n\t}\n\n\tloc, err := time.LoadLocation(tzname.Val)\n\tif err != nil {\n\t\t// Do not pass the same error message as the error may contain sensitive pathnames.\n\t\treturn nil, fmt.Errorf(\"unable to find time zone %s\", tzname.Val)\n\t}\n\treturn loc, nil\n}\n\n// parseOptionalTokenAndInt parses the specified token followed\n// by an int, if it exists.\nfunc (p *Parser) parseOptionalTokenAndInt(t Token) (int, error) {\n\t// Check if the token exists.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok != t {\n\t\tp.unscan()\n\t\treturn 0, nil\n\t}\n\n\t// Scan the number.\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\tif tok != INTEGER {\n\t\treturn 0, newParseError(tokstr(tok, lit), []string{\"integer\"}, pos)\n\t}\n\n\t// Parse number.\n\tn, _ := strconv.ParseInt(lit, 10, 64)\n\tif n < 0 {\n\t\tmsg := fmt.Sprintf(\"%s must be >= 0\", t.String())\n\t\treturn 0, &ParseError{Message: msg, Pos: pos}\n\t}\n\n\treturn int(n), nil\n}\n\n// parseOrderBy parses the \"ORDER BY\" clause of a query, if it exists.\nfunc (p *Parser) parseOrderBy() (SortFields, error) {\n\t// Return nil result and nil error if no ORDER token at this position.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok != ORDER {\n\t\tp.unscan()\n\t\treturn nil, nil\n\t}\n\n\t// Parse the required BY token.\n\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != BY {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"BY\"}, pos)\n\t}\n\n\t// Parse the ORDER BY fields.\n\tfields, err := p.parseSortFields()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn fields, nil\n}\n\n// parseSortFields parses the sort fields for an ORDER BY clause.\nfunc (p *Parser) parseSortFields() (SortFields, error) {\n\tvar fields SortFields\n\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\n\tswitch tok {\n\t// The first field after an order by may not have a field name (e.g. ORDER BY ASC)\n\tcase ASC, DESC:\n\t\tfields = append(fields, &SortField{Ascending: (tok == ASC)})\n\t// If it's a token, parse it as a sort field.  At least one is required.\n\tcase IDENT:\n\t\tp.unscan()\n\t\tfield, err := p.parseSortField()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif lit != \"time\" {\n\t\t\treturn nil, errors.New(\"only ORDER BY time supported at this time\")\n\t\t}\n\n\t\tfields = append(fields, field)\n\t// Parse error...\n\tdefault:\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"identifier\", \"ASC\", \"DESC\"}, pos)\n\t}\n\n\t// Parse additional fields.\n\tfor {\n\t\ttok, _, _ := p.scanIgnoreWhitespace()\n\n\t\tif tok != COMMA {\n\t\t\tp.unscan()\n\t\t\tbreak\n\t\t}\n\n\t\tfield, err := p.parseSortField()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfields = append(fields, field)\n\t}\n\n\tif len(fields) > 1 {\n\t\treturn nil, errors.New(\"only ORDER BY time supported at this time\")\n\t}\n\n\treturn fields, nil\n}\n\n// parseSortField parses one field of an ORDER BY clause.\nfunc (p *Parser) parseSortField() (*SortField, error) {\n\tfield := &SortField{}\n\n\t// Parse sort field name.\n\tident, err := p.parseIdent()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfield.Name = ident\n\n\t// Check for optional ASC or DESC clause. Default is ASC.\n\ttok, _, _ := p.scanIgnoreWhitespace()\n\tif tok != ASC && tok != DESC {\n\t\tp.unscan()\n\t\ttok = ASC\n\t}\n\tfield.Ascending = (tok == ASC)\n\n\treturn field, nil\n}\n\n// parseVarRef parses a reference to a measurement or field.\nfunc (p *Parser) parseVarRef() (*VarRef, error) {\n\t// Parse the segments of the variable ref.\n\tsegments, err := p.parseSegmentedIdents()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar dtype DataType\n\tif tok, _, _ := p.scan(); tok == DOUBLECOLON {\n\t\ttok, pos, lit := p.scan()\n\t\tswitch tok {\n\t\tcase IDENT:\n\t\t\tswitch strings.ToLower(lit) {\n\t\t\tcase \"float\":\n\t\t\t\tdtype = Float\n\t\t\tcase \"integer\":\n\t\t\t\tdtype = Integer\n\t\t\tcase \"string\":\n\t\t\t\tdtype = String\n\t\t\tcase \"boolean\":\n\t\t\t\tdtype = Boolean\n\t\t\tdefault:\n\t\t\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"float\", \"integer\", \"string\", \"boolean\", \"field\", \"tag\"}, pos)\n\t\t\t}\n\t\tcase FIELD:\n\t\t\tdtype = AnyField\n\t\tcase TAG:\n\t\t\tdtype = Tag\n\t\tdefault:\n\t\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"float\", \"integer\", \"string\", \"boolean\", \"field\", \"tag\"}, pos)\n\t\t}\n\t} else {\n\t\tp.unscan()\n\t}\n\n\tvr := &VarRef{Val: strings.Join(segments, \".\"), Type: dtype}\n\n\treturn vr, nil\n}\n\n// ParseExpr parses an expression.\nfunc (p *Parser) ParseExpr() (Expr, error) {\n\tvar err error\n\t// Dummy root node.\n\troot := &BinaryExpr{}\n\n\t// Parse a non-binary expression type to start.\n\t// This variable will always be the root of the expression tree.\n\troot.RHS, err = p.parseUnaryExpr()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Loop over operations and unary exprs and build a tree based on precendence.\n\tfor {\n\t\t// If the next token is NOT an operator then return the expression.\n\t\top, _, _ := p.scanIgnoreWhitespace()\n\t\tif !op.isOperator() {\n\t\t\tp.unscan()\n\t\t\treturn root.RHS, nil\n\t\t}\n\n\t\t// Otherwise parse the next expression.\n\t\tvar rhs Expr\n\t\tif IsRegexOp(op) {\n\t\t\t// RHS of a regex operator must be a regular expression.\n\t\t\tp.consumeWhitespace()\n\t\t\tif rhs, err = p.parseRegex(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// parseRegex can return an empty type, but we need it to be present\n\t\t\tif rhs.(*RegexLiteral) == nil {\n\t\t\t\ttok, pos, lit := p.scanIgnoreWhitespace()\n\t\t\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"regex\"}, pos)\n\t\t\t}\n\t\t} else {\n\t\t\tif rhs, err = p.parseUnaryExpr(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\t// Find the right spot in the tree to add the new expression by\n\t\t// descending the RHS of the expression tree until we reach the last\n\t\t// BinaryExpr or a BinaryExpr whose RHS has an operator with\n\t\t// precedence >= the operator being added.\n\t\tfor node := root; ; {\n\t\t\tr, ok := node.RHS.(*BinaryExpr)\n\t\t\tif !ok || r.Op.Precedence() >= op.Precedence() {\n\t\t\t\t// Add the new expression here and break.\n\t\t\t\tnode.RHS = &BinaryExpr{LHS: node.RHS, RHS: rhs, Op: op}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnode = r\n\t\t}\n\t}\n}\n\n// parseUnaryExpr parses an non-binary expression.\nfunc (p *Parser) parseUnaryExpr() (Expr, error) {\n\t// If the first token is a LPAREN then parse it as its own grouped expression.\n\tif tok, _, _ := p.scanIgnoreWhitespace(); tok == LPAREN {\n\t\texpr, err := p.ParseExpr()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Expect an RPAREN at the end.\n\t\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != RPAREN {\n\t\t\treturn nil, newParseError(tokstr(tok, lit), []string{\")\"}, pos)\n\t\t}\n\n\t\treturn &ParenExpr{Expr: expr}, nil\n\t}\n\tp.unscan()\n\n\t// Read next token.\n\ttok, pos, lit := p.scanIgnoreWhitespace()\n\tswitch tok {\n\tcase IDENT:\n\t\t// If the next immediate token is a left parentheses, parse as function call.\n\t\t// Otherwise parse as a variable reference.\n\t\tif tok0, _, _ := p.scan(); tok0 == LPAREN {\n\t\t\treturn p.parseCall(lit)\n\t\t}\n\n\t\tp.unscan() // unscan the last token (wasn't an LPAREN)\n\t\tp.unscan() // unscan the IDENT token\n\n\t\t// Parse it as a VarRef.\n\t\treturn p.parseVarRef()\n\tcase DISTINCT:\n\t\t// If the next immediate token is a left parentheses, parse as function call.\n\t\t// Otherwise parse as a Distinct expression.\n\t\ttok0, pos, lit := p.scan()\n\t\tif tok0 == LPAREN {\n\t\t\treturn p.parseCall(\"distinct\")\n\t\t} else if tok0 == WS {\n\t\t\ttok1, pos, lit := p.scanIgnoreWhitespace()\n\t\t\tif tok1 != IDENT {\n\t\t\t\treturn nil, newParseError(tokstr(tok1, lit), []string{\"identifier\"}, pos)\n\t\t\t}\n\t\t\treturn &Distinct{Val: lit}, nil\n\t\t}\n\n\t\treturn nil, newParseError(tokstr(tok0, lit), []string{\"(\", \"identifier\"}, pos)\n\tcase STRING:\n\t\treturn &StringLiteral{Val: lit}, nil\n\tcase NUMBER:\n\t\tv, err := strconv.ParseFloat(lit, 64)\n\t\tif err != nil {\n\t\t\treturn nil, &ParseError{Message: \"unable to parse number\", Pos: pos}\n\t\t}\n\t\treturn &NumberLiteral{Val: v}, nil\n\tcase INTEGER:\n\t\tv, err := strconv.ParseInt(lit, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, &ParseError{Message: \"unable to parse integer\", Pos: pos}\n\t\t}\n\t\treturn &IntegerLiteral{Val: v}, nil\n\tcase TRUE, FALSE:\n\t\treturn &BooleanLiteral{Val: (tok == TRUE)}, nil\n\tcase DURATIONVAL:\n\t\tv, err := ParseDuration(lit)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &DurationLiteral{Val: v}, nil\n\tcase MUL:\n\t\twc := &Wildcard{}\n\t\tif tok, _, _ := p.scan(); tok == DOUBLECOLON {\n\t\t\ttok, pos, lit := p.scan()\n\t\t\tswitch tok {\n\t\t\tcase FIELD, TAG:\n\t\t\t\twc.Type = tok\n\t\t\tdefault:\n\t\t\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"field\", \"tag\"}, pos)\n\t\t\t}\n\t\t} else {\n\t\t\tp.unscan()\n\t\t}\n\t\treturn wc, nil\n\tcase REGEX:\n\t\tre, err := regexp.Compile(lit)\n\t\tif err != nil {\n\t\t\treturn nil, &ParseError{Message: err.Error(), Pos: pos}\n\t\t}\n\t\treturn &RegexLiteral{Val: re}, nil\n\tcase BOUNDPARAM:\n\t\tk := strings.TrimPrefix(lit, \"$\")\n\t\tif len(k) == 0 {\n\t\t\treturn nil, errors.New(\"empty bound parameter\")\n\t\t}\n\n\t\tv, ok := p.params[k]\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"missing parameter: %s\", k)\n\t\t}\n\n\t\tswitch v := v.(type) {\n\t\tcase float64:\n\t\t\treturn &NumberLiteral{Val: v}, nil\n\t\tcase int64:\n\t\t\treturn &IntegerLiteral{Val: v}, nil\n\t\tcase string:\n\t\t\treturn &StringLiteral{Val: v}, nil\n\t\tcase bool:\n\t\t\treturn &BooleanLiteral{Val: v}, nil\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unable to bind parameter with type %T\", v)\n\t\t}\n\tcase ADD, SUB:\n\t\tmul := 1\n\t\tif tok == SUB {\n\t\t\tmul = -1\n\t\t}\n\n\t\ttok0, pos0, lit0 := p.scanIgnoreWhitespace()\n\t\tswitch tok0 {\n\t\tcase NUMBER, INTEGER, DURATIONVAL, LPAREN, IDENT:\n\t\t\t// Unscan the token and use parseUnaryExpr.\n\t\t\tp.unscan()\n\n\t\t\tlit, err := p.parseUnaryExpr()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tswitch lit := lit.(type) {\n\t\t\tcase *NumberLiteral:\n\t\t\t\tlit.Val *= float64(mul)\n\t\t\tcase *IntegerLiteral:\n\t\t\t\tlit.Val *= int64(mul)\n\t\t\tcase *DurationLiteral:\n\t\t\t\tlit.Val *= time.Duration(mul)\n\t\t\tcase *VarRef, *Call, *ParenExpr:\n\t\t\t\t// Multiply the variable.\n\t\t\t\treturn &BinaryExpr{\n\t\t\t\t\tOp:  MUL,\n\t\t\t\t\tLHS: &IntegerLiteral{Val: int64(mul)},\n\t\t\t\t\tRHS: lit,\n\t\t\t\t}, nil\n\t\t\tdefault:\n\t\t\t\tpanic(fmt.Sprintf(\"unexpected literal: %T\", lit))\n\t\t\t}\n\t\t\treturn lit, nil\n\t\tdefault:\n\t\t\treturn nil, newParseError(tokstr(tok0, lit0), []string{\"identifier\", \"number\", \"duration\", \"(\"}, pos0)\n\t\t}\n\tdefault:\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"identifier\", \"string\", \"number\", \"bool\"}, pos)\n\t}\n}\n\n// parseRegex parses a regular expression.\nfunc (p *Parser) parseRegex() (*RegexLiteral, error) {\n\tnextRune := p.peekRune()\n\tif isWhitespace(nextRune) {\n\t\tp.consumeWhitespace()\n\t}\n\n\t// If the next character is not a '/', then return nils.\n\tnextRune = p.peekRune()\n\tif nextRune != '/' {\n\t\treturn nil, nil\n\t}\n\n\ttok, pos, lit := p.s.ScanRegex()\n\n\tif tok == BADESCAPE {\n\t\tmsg := fmt.Sprintf(\"bad escape: %s\", lit)\n\t\treturn nil, &ParseError{Message: msg, Pos: pos}\n\t} else if tok == BADREGEX {\n\t\tmsg := fmt.Sprintf(\"bad regex: %s\", lit)\n\t\treturn nil, &ParseError{Message: msg, Pos: pos}\n\t} else if tok != REGEX {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\"regex\"}, pos)\n\t}\n\n\tre, err := regexp.Compile(lit)\n\tif err != nil {\n\t\treturn nil, &ParseError{Message: err.Error(), Pos: pos}\n\t}\n\n\treturn &RegexLiteral{Val: re}, nil\n}\n\n// parseCall parses a function call.\n// This function assumes the function name and LPAREN have been consumed.\nfunc (p *Parser) parseCall(name string) (*Call, error) {\n\tname = strings.ToLower(name)\n\n\t// Parse first function argument if one exists.\n\tvar args []Expr\n\tre, err := p.parseRegex()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if re != nil {\n\t\targs = append(args, re)\n\t} else {\n\t\t// If there's a right paren then just return immediately.\n\t\tif tok, _, _ := p.scan(); tok == RPAREN {\n\t\t\treturn &Call{Name: name}, nil\n\t\t}\n\t\tp.unscan()\n\n\t\targ, err := p.ParseExpr()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\targs = append(args, arg)\n\t}\n\n\t// Parse additional function arguments if there is a comma.\n\tfor {\n\t\t// If there's not a comma, stop parsing arguments.\n\t\tif tok, _, _ := p.scanIgnoreWhitespace(); tok != COMMA {\n\t\t\tp.unscan()\n\t\t\tbreak\n\t\t}\n\n\t\tre, err := p.parseRegex()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if re != nil {\n\t\t\targs = append(args, re)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Parse an expression argument.\n\t\targ, err := p.ParseExpr()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\targs = append(args, arg)\n\t}\n\n\t// There should be a right parentheses at the end.\n\tif tok, pos, lit := p.scan(); tok != RPAREN {\n\t\treturn nil, newParseError(tokstr(tok, lit), []string{\")\"}, pos)\n\t}\n\n\treturn &Call{Name: name, Args: args}, nil\n}\n\n// parseResample parses a RESAMPLE [EVERY <duration>] [FOR <duration>].\n// This function assumes RESAMPLE has already been consumed.\n// EVERY and FOR are optional, but at least one of the two has to be used.\nfunc (p *Parser) parseResample() (time.Duration, time.Duration, error) {\n\tvar interval time.Duration\n\tif p.parseTokenMaybe(EVERY) {\n\t\ttok, pos, lit := p.scanIgnoreWhitespace()\n\t\tif tok != DURATIONVAL {\n\t\t\treturn 0, 0, newParseError(tokstr(tok, lit), []string{\"duration\"}, pos)\n\t\t}\n\n\t\td, err := ParseDuration(lit)\n\t\tif err != nil {\n\t\t\treturn 0, 0, &ParseError{Message: err.Error(), Pos: pos}\n\t\t}\n\t\tinterval = d\n\t}\n\n\tvar maxDuration time.Duration\n\tif p.parseTokenMaybe(FOR) {\n\t\ttok, pos, lit := p.scanIgnoreWhitespace()\n\t\tif tok != DURATIONVAL {\n\t\t\treturn 0, 0, newParseError(tokstr(tok, lit), []string{\"duration\"}, pos)\n\t\t}\n\n\t\td, err := ParseDuration(lit)\n\t\tif err != nil {\n\t\t\treturn 0, 0, &ParseError{Message: err.Error(), Pos: pos}\n\t\t}\n\t\tmaxDuration = d\n\t}\n\n\t// Neither EVERY or FOR were read, so read the next token again\n\t// so we can return a suitable error message.\n\tif interval == 0 && maxDuration == 0 {\n\t\ttok, pos, lit := p.scanIgnoreWhitespace()\n\t\treturn 0, 0, newParseError(tokstr(tok, lit), []string{\"EVERY\", \"FOR\"}, pos)\n\t}\n\treturn interval, maxDuration, nil\n}\n\n// scan returns the next token from the underlying scanner.\nfunc (p *Parser) scan() (tok Token, pos Pos, lit string) { return p.s.Scan() }\n\n// scanIgnoreWhitespace scans the next non-whitespace and non-comment token.\nfunc (p *Parser) scanIgnoreWhitespace() (tok Token, pos Pos, lit string) {\n\tfor {\n\t\ttok, pos, lit = p.scan()\n\t\tif tok == WS || tok == COMMENT {\n\t\t\tcontinue\n\t\t}\n\t\treturn\n\t}\n}\n\n// consumeWhitespace scans the next token if it's whitespace.\nfunc (p *Parser) consumeWhitespace() {\n\tif tok, _, _ := p.scan(); tok != WS {\n\t\tp.unscan()\n\t}\n}\n\n// unscan pushes the previously read token back onto the buffer.\nfunc (p *Parser) unscan() { p.s.Unscan() }\n\n// ParseDuration parses a time duration from a string.\n// This is needed instead of time.ParseDuration because this will support\n// the full syntax that InfluxQL supports for specifying durations\n// including weeks and days.\nfunc ParseDuration(s string) (time.Duration, error) {\n\t// Return an error if the string is blank or one character\n\tif len(s) < 2 {\n\t\treturn 0, ErrInvalidDuration\n\t}\n\n\t// Split string into individual runes.\n\ta := []rune(s)\n\n\t// Start with a zero duration.\n\tvar d time.Duration\n\ti := 0\n\n\t// Check for a negative.\n\tisNegative := false\n\tif a[i] == '-' {\n\t\tisNegative = true\n\t\ti++\n\t}\n\n\tvar measure int64\n\tvar unit string\n\n\t// Parsing loop.\n\tfor i < len(a) {\n\t\t// Find the number portion.\n\t\tstart := i\n\t\tfor ; i < len(a) && isDigit(a[i]); i++ {\n\t\t\t// Scan for the digits.\n\t\t}\n\n\t\t// Check if we reached the end of the string prematurely.\n\t\tif i >= len(a) || i == start {\n\t\t\treturn 0, ErrInvalidDuration\n\t\t}\n\n\t\t// Parse the numeric part.\n\t\tn, err := strconv.ParseInt(string(a[start:i]), 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, ErrInvalidDuration\n\t\t}\n\t\tmeasure = n\n\n\t\t// Extract the unit of measure.\n\t\t// If the last two characters are \"ms\" then parse as milliseconds.\n\t\t// Otherwise just use the last character as the unit of measure.\n\t\tunit = string(a[i])\n\t\tswitch a[i] {\n\t\tcase 'n':\n\t\t\tif i+1 < len(a) && a[i+1] == 's' {\n\t\t\t\tunit = string(a[i : i+2])\n\t\t\t\td += time.Duration(n)\n\t\t\t\ti += 2\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn 0, ErrInvalidDuration\n\t\tcase 'u', 'µ':\n\t\t\td += time.Duration(n) * time.Microsecond\n\t\tcase 'm':\n\t\t\tif i+1 < len(a) && a[i+1] == 's' {\n\t\t\t\tunit = string(a[i : i+2])\n\t\t\t\td += time.Duration(n) * time.Millisecond\n\t\t\t\ti += 2\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\td += time.Duration(n) * time.Minute\n\t\tcase 's':\n\t\t\td += time.Duration(n) * time.Second\n\t\tcase 'h':\n\t\t\td += time.Duration(n) * time.Hour\n\t\tcase 'd':\n\t\t\td += time.Duration(n) * 24 * time.Hour\n\t\tcase 'w':\n\t\t\td += time.Duration(n) * 7 * 24 * time.Hour\n\t\tdefault:\n\t\t\treturn 0, ErrInvalidDuration\n\t\t}\n\t\ti++\n\t}\n\n\t// Check to see if we overflowed a duration\n\tif d < 0 && !isNegative {\n\t\treturn 0, fmt.Errorf(\"overflowed duration %d%s: choose a smaller duration or INF\", measure, unit)\n\t}\n\n\tif isNegative {\n\t\td = -d\n\t}\n\treturn d, nil\n}\n\n// FormatDuration formats a duration to a string.\nfunc FormatDuration(d time.Duration) string {\n\tif d == 0 {\n\t\treturn \"0s\"\n\t} else if d%(7*24*time.Hour) == 0 {\n\t\treturn fmt.Sprintf(\"%dw\", d/(7*24*time.Hour))\n\t} else if d%(24*time.Hour) == 0 {\n\t\treturn fmt.Sprintf(\"%dd\", d/(24*time.Hour))\n\t} else if d%time.Hour == 0 {\n\t\treturn fmt.Sprintf(\"%dh\", d/time.Hour)\n\t} else if d%time.Minute == 0 {\n\t\treturn fmt.Sprintf(\"%dm\", d/time.Minute)\n\t} else if d%time.Second == 0 {\n\t\treturn fmt.Sprintf(\"%ds\", d/time.Second)\n\t} else if d%time.Millisecond == 0 {\n\t\treturn fmt.Sprintf(\"%dms\", d/time.Millisecond)\n\t}\n\t// Although we accept both \"u\" and \"µ\" when reading microsecond durations,\n\t// we output with \"u\", which can be represented in 1 byte,\n\t// instead of \"µ\", which requires 2 bytes.\n\treturn fmt.Sprintf(\"%du\", d/time.Microsecond)\n}\n\n// parseTokens consumes an expected sequence of tokens.\nfunc (p *Parser) parseTokens(toks []Token) error {\n\tfor _, expected := range toks {\n\t\tif tok, pos, lit := p.scanIgnoreWhitespace(); tok != expected {\n\t\t\treturn newParseError(tokstr(tok, lit), []string{tokens[expected]}, pos)\n\t\t}\n\t}\n\treturn nil\n}\n\n// parseTokenMaybe consumes the next token if it matches the expected one and\n// does nothing if the next token is not the next one.\nfunc (p *Parser) parseTokenMaybe(expected Token) bool {\n\ttok, _, _ := p.scanIgnoreWhitespace()\n\tif tok != expected {\n\t\tp.unscan()\n\t\treturn false\n\t}\n\treturn true\n}\n\nvar (\n\t// Quote String replacer.\n\tqsReplacer = strings.NewReplacer(\"\\n\", `\\n`, `\\`, `\\\\`, `'`, `\\'`)\n\n\t// Quote Ident replacer.\n\tqiReplacer = strings.NewReplacer(\"\\n\", `\\n`, `\\`, `\\\\`, `\"`, `\\\"`)\n)\n\n// QuoteString returns a quoted string.\nfunc QuoteString(s string) string {\n\treturn `'` + qsReplacer.Replace(s) + `'`\n}\n\n// QuoteIdent returns a quoted identifier from multiple bare identifiers.\nfunc QuoteIdent(segments ...string) string {\n\tvar buf bytes.Buffer\n\tfor i, segment := range segments {\n\t\tneedQuote := IdentNeedsQuotes(segment) ||\n\t\t\t((i < len(segments)-1) && segment != \"\") || // not last segment && not \"\"\n\t\t\t((i == 0 || i == len(segments)-1) && segment == \"\") // the first or last segment and an empty string\n\n\t\tif needQuote {\n\t\t\t_ = buf.WriteByte('\"')\n\t\t}\n\n\t\t_, _ = buf.WriteString(qiReplacer.Replace(segment))\n\n\t\tif needQuote {\n\t\t\t_ = buf.WriteByte('\"')\n\t\t}\n\n\t\tif i < len(segments)-1 {\n\t\t\t_ = buf.WriteByte('.')\n\t\t}\n\t}\n\treturn buf.String()\n}\n\n// IdentNeedsQuotes returns true if the ident string given would require quotes.\nfunc IdentNeedsQuotes(ident string) bool {\n\t// check if this identifier is a keyword\n\ttok := Lookup(ident)\n\tif tok != IDENT {\n\t\treturn true\n\t}\n\tfor i, r := range ident {\n\t\tif i == 0 && !isIdentFirstChar(r) {\n\t\t\treturn true\n\t\t} else if i > 0 && !isIdentChar(r) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// isDateString returns true if the string looks like a date-only time literal.\nfunc isDateString(s string) bool { return dateStringRegexp.MatchString(s) }\n\n// isDateTimeString returns true if the string looks like a date+time time literal.\nfunc isDateTimeString(s string) bool { return dateTimeStringRegexp.MatchString(s) }\n\nvar dateStringRegexp = regexp.MustCompile(`^\\d{4}-\\d{2}-\\d{2}$`)\nvar dateTimeStringRegexp = regexp.MustCompile(`^\\d{4}-\\d{2}-\\d{2}.+`)\n\n// ErrInvalidDuration is returned when parsing a malformed duration.\nvar ErrInvalidDuration = errors.New(\"invalid duration\")\n\n// ParseError represents an error that occurred during parsing.\ntype ParseError struct {\n\tMessage  string\n\tFound    string\n\tExpected []string\n\tPos      Pos\n}\n\n// newParseError returns a new instance of ParseError.\nfunc newParseError(found string, expected []string, pos Pos) *ParseError {\n\treturn &ParseError{Found: found, Expected: expected, Pos: pos}\n}\n\n// Error returns the string representation of the error.\nfunc (e *ParseError) Error() string {\n\tif e.Message != \"\" {\n\t\treturn fmt.Sprintf(\"%s at line %d, char %d\", e.Message, e.Pos.Line+1, e.Pos.Char+1)\n\t}\n\treturn fmt.Sprintf(\"found %s, expected %s at line %d, char %d\", e.Found, strings.Join(e.Expected, \", \"), e.Pos.Line+1, e.Pos.Char+1)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/parser_test.go",
    "content": "package influxql_test\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n)\n\n// Ensure the parser can parse a multi-statement query.\nfunc TestParser_ParseQuery(t *testing.T) {\n\ts := `SELECT a FROM b; SELECT c FROM d`\n\tq, err := influxql.NewParser(strings.NewReader(s)).ParseQuery()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if len(q.Statements) != 2 {\n\t\tt.Fatalf(\"unexpected statement count: %d\", len(q.Statements))\n\t}\n}\n\nfunc TestParser_ParseQuery_TrailingSemicolon(t *testing.T) {\n\ts := `SELECT value FROM cpu;`\n\tq, err := influxql.NewParser(strings.NewReader(s)).ParseQuery()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if len(q.Statements) != 1 {\n\t\tt.Fatalf(\"unexpected statement count: %d\", len(q.Statements))\n\t}\n}\n\n// Ensure the parser can parse an empty query.\nfunc TestParser_ParseQuery_Empty(t *testing.T) {\n\tq, err := influxql.NewParser(strings.NewReader(``)).ParseQuery()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if len(q.Statements) != 0 {\n\t\tt.Fatalf(\"unexpected statement count: %d\", len(q.Statements))\n\t}\n}\n\n// Ensure the parser will skip comments.\nfunc TestParser_ParseQuery_SkipComments(t *testing.T) {\n\tq, err := influxql.ParseQuery(`SELECT * FROM cpu; -- read from cpu database\n\n/* create continuous query */\nCREATE CONTINUOUS QUERY cq0 ON db0 BEGIN\n\tSELECT mean(*) INTO db1..:MEASUREMENT FROM cpu GROUP BY time(5m)\nEND;\n\n/* just a multline comment\nwhat is this doing here?\n**/\n\n-- should ignore the trailing multiline comment /*\nSELECT mean(value) FROM gpu;\n-- trailing comment at the end`)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if len(q.Statements) != 3 {\n\t\tt.Fatalf(\"unexpected statement count: %d\", len(q.Statements))\n\t}\n}\n\n// Ensure the parser can return an error from an malformed statement.\nfunc TestParser_ParseQuery_ParseError(t *testing.T) {\n\t_, err := influxql.NewParser(strings.NewReader(`SELECT`)).ParseQuery()\n\tif err == nil || err.Error() != `found EOF, expected identifier, string, number, bool at line 1, char 8` {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n}\n\nfunc TestParser_ParseQuery_NoSemicolon(t *testing.T) {\n\t_, err := influxql.NewParser(strings.NewReader(`CREATE DATABASE foo CREATE DATABASE bar`)).ParseQuery()\n\tif err == nil || err.Error() != `found CREATE, expected ; at line 1, char 21` {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n}\n\n// Ensure the parser can parse strings into Statement ASTs.\nfunc TestParser_ParseStatement(t *testing.T) {\n\t// For use in various tests.\n\tnow := time.Now()\n\n\tvar tests = []struct {\n\t\tskip   bool\n\t\ts      string\n\t\tparams map[string]interface{}\n\t\tstmt   influxql.Statement\n\t\terr    string\n\t}{\n\t\t// SELECT * statement\n\t\t{\n\t\t\ts: `SELECT * FROM myseries`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Wildcard{}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `SELECT * FROM myseries GROUP BY *`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Wildcard{}},\n\t\t\t\t},\n\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\tDimensions: []*influxql.Dimension{{Expr: &influxql.Wildcard{}}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `SELECT field1, * FROM myseries GROUP BY *`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"field1\"}},\n\t\t\t\t\t{Expr: &influxql.Wildcard{}},\n\t\t\t\t},\n\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\tDimensions: []*influxql.Dimension{{Expr: &influxql.Wildcard{}}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `SELECT *, field1 FROM myseries GROUP BY *`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Wildcard{}},\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"field1\"}},\n\t\t\t\t},\n\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\tDimensions: []*influxql.Dimension{{Expr: &influxql.Wildcard{}}},\n\t\t\t},\n\t\t},\n\n\t\t// SELECT statement\n\t\t{\n\t\t\ts: fmt.Sprintf(`SELECT mean(field1), sum(field2) ,count(field3) AS field_x FROM myseries WHERE host = 'hosta.influxdb.org' and time > '%s' GROUP BY time(10h) ORDER BY DESC LIMIT 20 OFFSET 10;`, now.UTC().Format(time.RFC3339Nano)),\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"mean\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field1\"}}}},\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"sum\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field2\"}}}},\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"count\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field3\"}}}, Alias: \"field_x\"},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp: influxql.AND,\n\t\t\t\t\tLHS: &influxql.BinaryExpr{\n\t\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\t\tLHS: &influxql.VarRef{Val: \"host\"},\n\t\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"hosta.influxdb.org\"},\n\t\t\t\t\t},\n\t\t\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\t\t\tOp:  influxql.GT,\n\t\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\t\tRHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tDimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: \"time\", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 10 * time.Hour}}}}},\n\t\t\t\tSortFields: []*influxql.SortField{\n\t\t\t\t\t{Ascending: false},\n\t\t\t\t},\n\t\t\t\tLimit:  20,\n\t\t\t\tOffset: 10,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `SELECT \"foo.bar.baz\" AS foo FROM myseries`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"foo.bar.baz\"}, Alias: \"foo\"},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `SELECT \"foo.bar.baz\" AS foo FROM foo`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"foo.bar.baz\"}, Alias: \"foo\"},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"foo\"}},\n\t\t\t},\n\t\t},\n\n\t\t// sample\n\t\t{\n\t\t\ts: `SELECT sample(field1, 100) FROM myseries;`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"sample\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field1\"}, &influxql.IntegerLiteral{Val: 100}}}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t},\n\t\t},\n\n\t\t// derivative\n\t\t{\n\t\t\ts: `SELECT derivative(field1, 1h) FROM myseries;`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"derivative\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field1\"}, &influxql.DurationLiteral{Val: time.Hour}}}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: fmt.Sprintf(`SELECT derivative(field1, 1h) FROM myseries WHERE time > '%s'`, now.UTC().Format(time.RFC3339Nano)),\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"derivative\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field1\"}, &influxql.DurationLiteral{Val: time.Hour}}}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.GT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: `SELECT derivative(field1, 1h) / derivative(field2, 1h) FROM myseries`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.BinaryExpr{\n\t\t\t\t\t\t\tLHS: &influxql.Call{\n\t\t\t\t\t\t\t\tName: \"derivative\",\n\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t&influxql.VarRef{Val: \"field1\"},\n\t\t\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: time.Hour},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tRHS: &influxql.Call{\n\t\t\t\t\t\t\t\tName: \"derivative\",\n\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t&influxql.VarRef{Val: \"field2\"},\n\t\t\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: time.Hour},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tOp: influxql.DIV,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{\n\t\t\t\t\t&influxql.Measurement{Name: \"myseries\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// difference\n\t\t{\n\t\t\ts: `SELECT difference(field1) FROM myseries;`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"difference\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field1\"}}}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: fmt.Sprintf(`SELECT difference(max(field1)) FROM myseries WHERE time > '%s' GROUP BY time(1m)`, now.UTC().Format(time.RFC3339Nano)),\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\tName: \"difference\",\n\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t&influxql.Call{\n\t\t\t\t\t\t\t\t\tName: \"max\",\n\t\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t\t&influxql.VarRef{Val: \"field1\"},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\tDimensions: []*influxql.Dimension{\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: time.Minute},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.GT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// non_negative_difference\n\t\t{\n\t\t\ts: `SELECT non_negative_difference(field1) FROM myseries;`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"non_negative_difference\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field1\"}}}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: fmt.Sprintf(`SELECT non_negative_difference(max(field1)) FROM myseries WHERE time > '%s' GROUP BY time(1m)`, now.UTC().Format(time.RFC3339Nano)),\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\tName: \"non_negative_difference\",\n\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t&influxql.Call{\n\t\t\t\t\t\t\t\t\tName: \"max\",\n\t\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t\t&influxql.VarRef{Val: \"field1\"},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\tDimensions: []*influxql.Dimension{\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: time.Minute},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.GT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// moving_average\n\t\t{\n\t\t\ts: `SELECT moving_average(field1, 3) FROM myseries;`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"moving_average\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field1\"}, &influxql.IntegerLiteral{Val: 3}}}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: fmt.Sprintf(`SELECT moving_average(max(field1), 3) FROM myseries WHERE time > '%s' GROUP BY time(1m)`, now.UTC().Format(time.RFC3339Nano)),\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\tName: \"moving_average\",\n\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t&influxql.Call{\n\t\t\t\t\t\t\t\t\tName: \"max\",\n\t\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t\t&influxql.VarRef{Val: \"field1\"},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t&influxql.IntegerLiteral{Val: 3},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\tDimensions: []*influxql.Dimension{\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: time.Minute},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.GT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// cumulative_sum\n\t\t{\n\t\t\ts: fmt.Sprintf(`SELECT cumulative_sum(field1) FROM myseries WHERE time > '%s'`, now.UTC().Format(time.RFC3339Nano)),\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\tName: \"cumulative_sum\",\n\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t&influxql.VarRef{Val: \"field1\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.GT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: fmt.Sprintf(`SELECT cumulative_sum(mean(field1)) FROM myseries WHERE time > '%s' GROUP BY time(1m)`, now.UTC().Format(time.RFC3339Nano)),\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\tName: \"cumulative_sum\",\n\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t&influxql.Call{\n\t\t\t\t\t\t\t\t\tName: \"mean\",\n\t\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t\t&influxql.VarRef{Val: \"field1\"},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\tDimensions: []*influxql.Dimension{\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: time.Minute},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.GT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// holt_winters\n\t\t{\n\t\t\ts: fmt.Sprintf(`SELECT holt_winters(first(field1), 3, 1) FROM myseries WHERE time > '%s' GROUP BY time(1h);`, now.UTC().Format(time.RFC3339Nano)),\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{\n\t\t\t\t\t\tName: \"holt_winters\",\n\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t&influxql.Call{\n\t\t\t\t\t\t\t\tName: \"first\",\n\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t&influxql.VarRef{Val: \"field1\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t&influxql.IntegerLiteral{Val: 3},\n\t\t\t\t\t\t\t&influxql.IntegerLiteral{Val: 1},\n\t\t\t\t\t\t},\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\tDimensions: []*influxql.Dimension{\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: 1 * time.Hour},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.GT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: fmt.Sprintf(`SELECT holt_winters_with_fit(first(field1), 3, 1) FROM myseries WHERE time > '%s' GROUP BY time(1h);`, now.UTC().Format(time.RFC3339Nano)),\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{\n\t\t\t\t\t\tName: \"holt_winters_with_fit\",\n\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t&influxql.Call{\n\t\t\t\t\t\t\t\tName: \"first\",\n\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t&influxql.VarRef{Val: \"field1\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t&influxql.IntegerLiteral{Val: 3},\n\t\t\t\t\t\t\t&influxql.IntegerLiteral{Val: 1},\n\t\t\t\t\t\t}}},\n\t\t\t\t},\n\t\t\t\tDimensions: []*influxql.Dimension{\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: 1 * time.Hour},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.GT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: fmt.Sprintf(`SELECT holt_winters(max(field1), 4, 5) FROM myseries WHERE time > '%s' GROUP BY time(1m)`, now.UTC().Format(time.RFC3339Nano)),\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\tName: \"holt_winters\",\n\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t&influxql.Call{\n\t\t\t\t\t\t\t\t\tName: \"max\",\n\t\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t\t&influxql.VarRef{Val: \"field1\"},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t&influxql.IntegerLiteral{Val: 4},\n\t\t\t\t\t\t\t\t&influxql.IntegerLiteral{Val: 5},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\tDimensions: []*influxql.Dimension{\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: time.Minute},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.GT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: fmt.Sprintf(`SELECT holt_winters_with_fit(max(field1), 4, 5) FROM myseries WHERE time > '%s' GROUP BY time(1m)`, now.UTC().Format(time.RFC3339Nano)),\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\tName: \"holt_winters_with_fit\",\n\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t&influxql.Call{\n\t\t\t\t\t\t\t\t\tName: \"max\",\n\t\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t\t&influxql.VarRef{Val: \"field1\"},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t&influxql.IntegerLiteral{Val: 4},\n\t\t\t\t\t\t\t\t&influxql.IntegerLiteral{Val: 5},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\tDimensions: []*influxql.Dimension{\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: time.Minute},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.GT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// SELECT statement (lowercase)\n\t\t{\n\t\t\ts: `select my_field from myseries`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields:     []*influxql.Field{{Expr: &influxql.VarRef{Val: \"my_field\"}}},\n\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t},\n\t\t},\n\n\t\t// SELECT statement (lowercase) with quoted field\n\t\t{\n\t\t\ts: `select 'my_field' from myseries`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields:     []*influxql.Field{{Expr: &influxql.StringLiteral{Val: \"my_field\"}}},\n\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t},\n\t\t},\n\n\t\t// SELECT statement with multiple ORDER BY fields\n\t\t{\n\t\t\tskip: true,\n\t\t\ts:    `SELECT field1 FROM myseries ORDER BY ASC, field1, field2 DESC LIMIT 10`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields:     []*influxql.Field{{Expr: &influxql.VarRef{Val: \"field1\"}}},\n\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\tSortFields: []*influxql.SortField{\n\t\t\t\t\t{Ascending: true},\n\t\t\t\t\t{Name: \"field1\"},\n\t\t\t\t\t{Name: \"field2\"},\n\t\t\t\t},\n\t\t\t\tLimit: 10,\n\t\t\t},\n\t\t},\n\n\t\t// SELECT statement with SLIMIT and SOFFSET\n\t\t{\n\t\t\ts: `SELECT field1 FROM myseries SLIMIT 10 SOFFSET 5`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields:     []*influxql.Field{{Expr: &influxql.VarRef{Val: \"field1\"}}},\n\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\tSLimit:     10,\n\t\t\t\tSOffset:    5,\n\t\t\t},\n\t\t},\n\n\t\t// SELECT * FROM cpu WHERE host = 'serverC' AND region =~ /.*west.*/\n\t\t{\n\t\t\ts: `SELECT * FROM cpu WHERE host = 'serverC' AND region =~ /.*west.*/`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields:     []*influxql.Field{{Expr: &influxql.Wildcard{}}},\n\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp: influxql.AND,\n\t\t\t\t\tLHS: &influxql.BinaryExpr{\n\t\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\t\tLHS: &influxql.VarRef{Val: \"host\"},\n\t\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"serverC\"},\n\t\t\t\t\t},\n\t\t\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\t\t\tOp:  influxql.EQREGEX,\n\t\t\t\t\t\tLHS: &influxql.VarRef{Val: \"region\"},\n\t\t\t\t\t\tRHS: &influxql.RegexLiteral{Val: regexp.MustCompile(\".*west.*\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// select percentile statements\n\t\t{\n\t\t\ts: `select percentile(\"field1\", 2.0) from cpu`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"percentile\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field1\"}, &influxql.NumberLiteral{Val: 2.0}}}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: `select percentile(\"field1\", 2.0), field2 from cpu`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"percentile\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field1\"}, &influxql.NumberLiteral{Val: 2.0}}}},\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"field2\"}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t},\n\t\t},\n\n\t\t// select top statements\n\t\t{\n\t\t\ts: `select top(\"field1\", 2) from cpu`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"top\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field1\"}, &influxql.IntegerLiteral{Val: 2}}}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: `select top(field1, 2) from cpu`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"top\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field1\"}, &influxql.IntegerLiteral{Val: 2}}}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: `select top(field1, 2), tag1 from cpu`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"top\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field1\"}, &influxql.IntegerLiteral{Val: 2}}}},\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"tag1\"}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: `select top(field1, tag1, 2), tag1 from cpu`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"top\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field1\"}, &influxql.VarRef{Val: \"tag1\"}, &influxql.IntegerLiteral{Val: 2}}}},\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"tag1\"}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t},\n\t\t},\n\n\t\t// select distinct statements\n\t\t{\n\t\t\ts: `select distinct(field1) from cpu`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"distinct\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field1\"}}}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: `select distinct field2 from network`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Distinct{Val: \"field2\"}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"network\"}},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: `select count(distinct field3) from metrics`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"count\", Args: []influxql.Expr{&influxql.Distinct{Val: \"field3\"}}}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"metrics\"}},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: `select count(distinct field3), sum(field4) from metrics`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"count\", Args: []influxql.Expr{&influxql.Distinct{Val: \"field3\"}}}},\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"sum\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field4\"}}}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"metrics\"}},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: `select count(distinct(field3)), sum(field4) from metrics`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"count\", Args: []influxql.Expr{&influxql.Call{Name: \"distinct\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field3\"}}}}}},\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"sum\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field4\"}}}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"metrics\"}},\n\t\t\t},\n\t\t},\n\n\t\t// SELECT * FROM WHERE time\n\t\t{\n\t\t\ts: fmt.Sprintf(`SELECT * FROM cpu WHERE time > '%s'`, now.UTC().Format(time.RFC3339Nano)),\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields:     []*influxql.Field{{Expr: &influxql.Wildcard{}}},\n\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.GT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// SELECT * FROM WHERE field comparisons\n\t\t{\n\t\t\ts: `SELECT * FROM cpu WHERE load > 100`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields:     []*influxql.Field{{Expr: &influxql.Wildcard{}}},\n\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.GT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"load\"},\n\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 100},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `SELECT * FROM cpu WHERE load >= 100`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields:     []*influxql.Field{{Expr: &influxql.Wildcard{}}},\n\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.GTE,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"load\"},\n\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 100},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `SELECT * FROM cpu WHERE load = 100`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields:     []*influxql.Field{{Expr: &influxql.Wildcard{}}},\n\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"load\"},\n\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 100},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `SELECT * FROM cpu WHERE load <= 100`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields:     []*influxql.Field{{Expr: &influxql.Wildcard{}}},\n\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.LTE,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"load\"},\n\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 100},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `SELECT * FROM cpu WHERE load < 100`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields:     []*influxql.Field{{Expr: &influxql.Wildcard{}}},\n\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.LT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"load\"},\n\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 100},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `SELECT * FROM cpu WHERE load != 100`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields:     []*influxql.Field{{Expr: &influxql.Wildcard{}}},\n\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.NEQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"load\"},\n\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 100},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// SELECT * FROM /<regex>/\n\t\t{\n\t\t\ts: `SELECT * FROM /cpu.*/`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields:     []*influxql.Field{{Expr: &influxql.Wildcard{}}},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{\n\t\t\t\t\tRegex: &influxql.RegexLiteral{Val: regexp.MustCompile(\"cpu.*\")}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// SELECT * FROM \"db\".\"rp\"./<regex>/\n\t\t{\n\t\t\ts: `SELECT * FROM \"db\".\"rp\"./cpu.*/`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields:     []*influxql.Field{{Expr: &influxql.Wildcard{}}},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{\n\t\t\t\t\tDatabase:        `db`,\n\t\t\t\t\tRetentionPolicy: `rp`,\n\t\t\t\t\tRegex:           &influxql.RegexLiteral{Val: regexp.MustCompile(\"cpu.*\")}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// SELECT * FROM \"db\"../<regex>/\n\t\t{\n\t\t\ts: `SELECT * FROM \"db\"../cpu.*/`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields:     []*influxql.Field{{Expr: &influxql.Wildcard{}}},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{\n\t\t\t\t\tDatabase: `db`,\n\t\t\t\t\tRegex:    &influxql.RegexLiteral{Val: regexp.MustCompile(\"cpu.*\")}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// SELECT * FROM \"rp\"./<regex>/\n\t\t{\n\t\t\ts: `SELECT * FROM \"rp\"./cpu.*/`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields:     []*influxql.Field{{Expr: &influxql.Wildcard{}}},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{\n\t\t\t\t\tRetentionPolicy: `rp`,\n\t\t\t\t\tRegex:           &influxql.RegexLiteral{Val: regexp.MustCompile(\"cpu.*\")}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// SELECT statement with group by\n\t\t{\n\t\t\ts: `SELECT sum(value) FROM \"kbps\" WHERE time > now() - 120s AND deliveryservice='steam-dns' and cachegroup = 'total' GROUP BY time(60s)`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: false,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.Call{Name: \"sum\", Args: []influxql.Expr{&influxql.VarRef{Val: \"value\"}}}},\n\t\t\t\t},\n\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"kbps\"}},\n\t\t\t\tDimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: \"time\", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 60 * time.Second}}}}},\n\t\t\t\tCondition: &influxql.BinaryExpr{ // 1\n\t\t\t\t\tOp: influxql.AND,\n\t\t\t\t\tLHS: &influxql.BinaryExpr{ // 2\n\t\t\t\t\t\tOp: influxql.AND,\n\t\t\t\t\t\tLHS: &influxql.BinaryExpr{ //3\n\t\t\t\t\t\t\tOp:  influxql.GT,\n\t\t\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\t\t\t\t\tOp:  influxql.SUB,\n\t\t\t\t\t\t\t\tLHS: &influxql.Call{Name: \"now\"},\n\t\t\t\t\t\t\t\tRHS: &influxql.DurationLiteral{Val: mustParseDuration(\"120s\")},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\t\t\tLHS: &influxql.VarRef{Val: \"deliveryservice\"},\n\t\t\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"steam-dns\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\t\tLHS: &influxql.VarRef{Val: \"cachegroup\"},\n\t\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"total\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// SELECT statement with group by and multi digit duration (prevent regression from #731://github.com/influxdata/influxdb/pull/7316)\n\t\t{\n\t\t\ts: fmt.Sprintf(`SELECT count(value) FROM cpu where time < '%s' group by time(500ms)`, now.UTC().Format(time.RFC3339Nano)),\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tFields: []*influxql.Field{{\n\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\tName: \"count\",\n\t\t\t\t\t\tArgs: []influxql.Expr{&influxql.VarRef{Val: \"value\"}}}}},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.LT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},\n\t\t\t\t},\n\t\t\t\tDimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: \"time\", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 500 * time.Millisecond}}}}},\n\t\t\t},\n\t\t},\n\n\t\t// SELECT statement with fill\n\t\t{\n\t\t\ts: fmt.Sprintf(`SELECT mean(value) FROM cpu where time < '%s' GROUP BY time(5m) fill(1)`, now.UTC().Format(time.RFC3339Nano)),\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tFields: []*influxql.Field{{\n\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\tName: \"mean\",\n\t\t\t\t\t\tArgs: []influxql.Expr{&influxql.VarRef{Val: \"value\"}}}}},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.LT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},\n\t\t\t\t},\n\t\t\t\tDimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: \"time\", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 5 * time.Minute}}}}},\n\t\t\t\tFill:       influxql.NumberFill,\n\t\t\t\tFillValue:  int64(1),\n\t\t\t},\n\t\t},\n\n\t\t// SELECT statement with FILL(none) -- check case insensitivity\n\t\t{\n\t\t\ts: fmt.Sprintf(`SELECT mean(value) FROM cpu where time < '%s' GROUP BY time(5m) FILL(none)`, now.UTC().Format(time.RFC3339Nano)),\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tFields: []*influxql.Field{{\n\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\tName: \"mean\",\n\t\t\t\t\t\tArgs: []influxql.Expr{&influxql.VarRef{Val: \"value\"}}}}},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.LT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},\n\t\t\t\t},\n\t\t\t\tDimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: \"time\", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 5 * time.Minute}}}}},\n\t\t\t\tFill:       influxql.NoFill,\n\t\t\t},\n\t\t},\n\n\t\t// SELECT statement with previous fill\n\t\t{\n\t\t\ts: fmt.Sprintf(`SELECT mean(value) FROM cpu where time < '%s' GROUP BY time(5m) FILL(previous)`, now.UTC().Format(time.RFC3339Nano)),\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tFields: []*influxql.Field{{\n\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\tName: \"mean\",\n\t\t\t\t\t\tArgs: []influxql.Expr{&influxql.VarRef{Val: \"value\"}}}}},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.LT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},\n\t\t\t\t},\n\t\t\t\tDimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: \"time\", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 5 * time.Minute}}}}},\n\t\t\t\tFill:       influxql.PreviousFill,\n\t\t\t},\n\t\t},\n\n\t\t// SELECT statement with average fill\n\t\t{\n\t\t\ts: fmt.Sprintf(`SELECT mean(value) FROM cpu where time < '%s' GROUP BY time(5m) FILL(linear)`, now.UTC().Format(time.RFC3339Nano)),\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tFields: []*influxql.Field{{\n\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\tName: \"mean\",\n\t\t\t\t\t\tArgs: []influxql.Expr{&influxql.VarRef{Val: \"value\"}}}}},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.LT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: now.UTC().Format(time.RFC3339Nano)},\n\t\t\t\t},\n\t\t\t\tDimensions: []*influxql.Dimension{{Expr: &influxql.Call{Name: \"time\", Args: []influxql.Expr{&influxql.DurationLiteral{Val: 5 * time.Minute}}}}},\n\t\t\t\tFill:       influxql.LinearFill,\n\t\t\t},\n\t\t},\n\n\t\t// SELECT casts\n\t\t{\n\t\t\ts: `SELECT field1::float, field2::integer, field3::string, field4::boolean, field5::field, tag1::tag FROM cpu`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.VarRef{\n\t\t\t\t\t\t\tVal:  \"field1\",\n\t\t\t\t\t\t\tType: influxql.Float,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.VarRef{\n\t\t\t\t\t\t\tVal:  \"field2\",\n\t\t\t\t\t\t\tType: influxql.Integer,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.VarRef{\n\t\t\t\t\t\t\tVal:  \"field3\",\n\t\t\t\t\t\t\tType: influxql.String,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.VarRef{\n\t\t\t\t\t\t\tVal:  \"field4\",\n\t\t\t\t\t\t\tType: influxql.Boolean,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.VarRef{\n\t\t\t\t\t\t\tVal:  \"field5\",\n\t\t\t\t\t\t\tType: influxql.AnyField,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tExpr: &influxql.VarRef{\n\t\t\t\t\t\t\tVal:  \"tag1\",\n\t\t\t\t\t\t\tType: influxql.Tag,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t},\n\t\t},\n\n\t\t// SELECT statement with a bound parameter\n\t\t{\n\t\t\ts: `SELECT value FROM cpu WHERE value > $value`,\n\t\t\tparams: map[string]interface{}{\n\t\t\t\t\"value\": int64(2),\n\t\t\t},\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields: []*influxql.Field{{\n\t\t\t\t\tExpr: &influxql.VarRef{Val: \"value\"}}},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.GT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"value\"},\n\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 2},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// SELECT statement with a subquery\n\t\t{\n\t\t\ts: `SELECT sum(derivative) FROM (SELECT derivative(value) FROM cpu GROUP BY host) WHERE time >= now() - 1d GROUP BY time(1h)`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tFields: []*influxql.Field{{\n\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\tName: \"sum\",\n\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t&influxql.VarRef{Val: \"derivative\"},\n\t\t\t\t\t\t}},\n\t\t\t\t}},\n\t\t\t\tDimensions: []*influxql.Dimension{{\n\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: time.Hour},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t\tSources: []influxql.Source{\n\t\t\t\t\t&influxql.SubQuery{\n\t\t\t\t\t\tStatement: &influxql.SelectStatement{\n\t\t\t\t\t\t\tFields: []*influxql.Field{{\n\t\t\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\t\t\tName: \"derivative\",\n\t\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t\t&influxql.VarRef{Val: \"value\"},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t\tDimensions: []*influxql.Dimension{{\n\t\t\t\t\t\t\t\tExpr: &influxql.VarRef{Val: \"host\"},\n\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t\tSources: []influxql.Source{\n\t\t\t\t\t\t\t\t&influxql.Measurement{Name: \"cpu\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.GTE,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\t\t\tOp:  influxql.SUB,\n\t\t\t\t\t\tLHS: &influxql.Call{Name: \"now\"},\n\t\t\t\t\t\tRHS: &influxql.DurationLiteral{Val: 24 * time.Hour},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: `SELECT sum(mean) FROM (SELECT mean(value) FROM cpu GROUP BY time(1h)) WHERE time >= now() - 1d`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tFields: []*influxql.Field{{\n\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\tName: \"sum\",\n\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t&influxql.VarRef{Val: \"mean\"},\n\t\t\t\t\t\t}},\n\t\t\t\t}},\n\t\t\t\tSources: []influxql.Source{\n\t\t\t\t\t&influxql.SubQuery{\n\t\t\t\t\t\tStatement: &influxql.SelectStatement{\n\t\t\t\t\t\t\tFields: []*influxql.Field{{\n\t\t\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\t\t\tName: \"mean\",\n\t\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t\t&influxql.VarRef{Val: \"value\"},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t\tDimensions: []*influxql.Dimension{{\n\t\t\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: time.Hour},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t\tSources: []influxql.Source{\n\t\t\t\t\t\t\t\t&influxql.Measurement{Name: \"cpu\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.GTE,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\t\t\tOp:  influxql.SUB,\n\t\t\t\t\t\tLHS: &influxql.Call{Name: \"now\"},\n\t\t\t\t\t\tRHS: &influxql.DurationLiteral{Val: 24 * time.Hour},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: `SELECT sum(mean) FROM (SELECT mean(value) FROM cpu WHERE time >= now() - 1d GROUP BY time(1h))`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tFields: []*influxql.Field{{\n\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\tName: \"sum\",\n\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t&influxql.VarRef{Val: \"mean\"},\n\t\t\t\t\t\t}},\n\t\t\t\t}},\n\t\t\t\tSources: []influxql.Source{\n\t\t\t\t\t&influxql.SubQuery{\n\t\t\t\t\t\tStatement: &influxql.SelectStatement{\n\t\t\t\t\t\t\tFields: []*influxql.Field{{\n\t\t\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\t\t\tName: \"mean\",\n\t\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t\t&influxql.VarRef{Val: \"value\"},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t\tDimensions: []*influxql.Dimension{{\n\t\t\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: time.Hour},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\t\t\t\tOp:  influxql.GTE,\n\t\t\t\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\t\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\t\t\t\t\t\tOp:  influxql.SUB,\n\t\t\t\t\t\t\t\t\tLHS: &influxql.Call{Name: \"now\"},\n\t\t\t\t\t\t\t\t\tRHS: &influxql.DurationLiteral{Val: 24 * time.Hour},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSources: []influxql.Source{\n\t\t\t\t\t\t\t\t&influxql.Measurement{Name: \"cpu\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: `SELECT sum(derivative) FROM (SELECT derivative(mean(value)) FROM cpu GROUP BY host) WHERE time >= now() - 1d GROUP BY time(1h)`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tFields: []*influxql.Field{{\n\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\tName: \"sum\",\n\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t&influxql.VarRef{Val: \"derivative\"},\n\t\t\t\t\t\t}},\n\t\t\t\t}},\n\t\t\t\tDimensions: []*influxql.Dimension{{\n\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: time.Hour},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t\tSources: []influxql.Source{\n\t\t\t\t\t&influxql.SubQuery{\n\t\t\t\t\t\tStatement: &influxql.SelectStatement{\n\t\t\t\t\t\t\tFields: []*influxql.Field{{\n\t\t\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\t\t\tName: \"derivative\",\n\t\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t\t&influxql.Call{\n\t\t\t\t\t\t\t\t\t\t\tName: \"mean\",\n\t\t\t\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t\t\t\t&influxql.VarRef{Val: \"value\"},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t\tDimensions: []*influxql.Dimension{{\n\t\t\t\t\t\t\t\tExpr: &influxql.VarRef{Val: \"host\"},\n\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t\tSources: []influxql.Source{\n\t\t\t\t\t\t\t\t&influxql.Measurement{Name: \"cpu\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.GTE,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\t\t\tOp:  influxql.SUB,\n\t\t\t\t\t\tLHS: &influxql.Call{Name: \"now\"},\n\t\t\t\t\t\tRHS: &influxql.DurationLiteral{Val: 24 * time.Hour},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// select statements with intertwined comments\n\t\t{\n\t\t\ts: `SELECT \"user\" /*, system, idle */ FROM cpu`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"user\"}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: `SELECT /foo\\/*bar/ FROM /foo\\/*bar*/ WHERE x = 1`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tIsRawQuery: true,\n\t\t\t\tFields: []*influxql.Field{\n\t\t\t\t\t{Expr: &influxql.RegexLiteral{Val: regexp.MustCompile(`foo/*bar`)}},\n\t\t\t\t},\n\t\t\t\tSources: []influxql.Source{\n\t\t\t\t\t&influxql.Measurement{\n\t\t\t\t\t\tRegex: &influxql.RegexLiteral{Val: regexp.MustCompile(`foo/*bar*`)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"x\"},\n\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 1},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// SELECT statement with a time zone\n\t\t{\n\t\t\ts: `SELECT mean(value) FROM cpu WHERE time >= now() - 7d GROUP BY time(1d) TZ('America/Los_Angeles')`,\n\t\t\tstmt: &influxql.SelectStatement{\n\t\t\t\tFields: []*influxql.Field{{\n\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\tName: \"mean\",\n\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t&influxql.VarRef{Val: \"value\"}},\n\t\t\t\t\t}}},\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.GTE,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\t\t\tOp:  influxql.SUB,\n\t\t\t\t\t\tLHS: &influxql.Call{Name: \"now\"},\n\t\t\t\t\t\tRHS: &influxql.DurationLiteral{Val: 7 * 24 * time.Hour},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tDimensions: []*influxql.Dimension{{\n\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: 24 * time.Hour}}}}},\n\t\t\t\tLocation: LosAngeles,\n\t\t\t},\n\t\t},\n\n\t\t// See issues https://github.com/influxdata/influxdb/issues/1647\n\t\t// and https://github.com/influxdata/influxdb/issues/4404\n\t\t// DELETE statement\n\t\t//{\n\t\t//\ts: `DELETE FROM myseries WHERE host = 'hosta.influxdb.org'`,\n\t\t//\tstmt: &influxql.DeleteStatement{\n\t\t//\t\tSource: &influxql.Measurement{Name: \"myseries\"},\n\t\t//\t\tCondition: &influxql.BinaryExpr{\n\t\t//\t\t\tOp:  influxql.EQ,\n\t\t//\t\t\tLHS: &influxql.VarRef{Val: \"host\"},\n\t\t//\t\t\tRHS: &influxql.StringLiteral{Val: \"hosta.influxdb.org\"},\n\t\t//\t\t},\n\t\t//\t},\n\t\t//},\n\n\t\t// SHOW GRANTS\n\t\t{\n\t\t\ts:    `SHOW GRANTS FOR jdoe`,\n\t\t\tstmt: &influxql.ShowGrantsForUserStatement{Name: \"jdoe\"},\n\t\t},\n\n\t\t// SHOW DATABASES\n\t\t{\n\t\t\ts:    `SHOW DATABASES`,\n\t\t\tstmt: &influxql.ShowDatabasesStatement{},\n\t\t},\n\n\t\t// SHOW SERIES statement\n\t\t{\n\t\t\ts:    `SHOW SERIES`,\n\t\t\tstmt: &influxql.ShowSeriesStatement{},\n\t\t},\n\n\t\t// SHOW SERIES FROM\n\t\t{\n\t\t\ts: `SHOW SERIES FROM cpu`,\n\t\t\tstmt: &influxql.ShowSeriesStatement{\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t},\n\t\t},\n\n\t\t// SHOW SERIES ON db0\n\t\t{\n\t\t\ts: `SHOW SERIES ON db0`,\n\t\t\tstmt: &influxql.ShowSeriesStatement{\n\t\t\t\tDatabase: \"db0\",\n\t\t\t},\n\t\t},\n\n\t\t// SHOW SERIES FROM /<regex>/\n\t\t{\n\t\t\ts: `SHOW SERIES FROM /[cg]pu/`,\n\t\t\tstmt: &influxql.ShowSeriesStatement{\n\t\t\t\tSources: []influxql.Source{\n\t\t\t\t\t&influxql.Measurement{\n\t\t\t\t\t\tRegex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// SHOW SERIES with OFFSET 0\n\t\t{\n\t\t\ts:    `SHOW SERIES OFFSET 0`,\n\t\t\tstmt: &influxql.ShowSeriesStatement{Offset: 0},\n\t\t},\n\n\t\t// SHOW SERIES with LIMIT 2 OFFSET 0\n\t\t{\n\t\t\ts:    `SHOW SERIES LIMIT 2 OFFSET 0`,\n\t\t\tstmt: &influxql.ShowSeriesStatement{Offset: 0, Limit: 2},\n\t\t},\n\n\t\t// SHOW SERIES WHERE with ORDER BY and LIMIT\n\t\t{\n\t\t\tskip: true,\n\t\t\ts:    `SHOW SERIES WHERE region = 'order by desc' ORDER BY DESC, field1, field2 DESC LIMIT 10`,\n\t\t\tstmt: &influxql.ShowSeriesStatement{\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"region\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"order by desc\"},\n\t\t\t\t},\n\t\t\t\tSortFields: []*influxql.SortField{\n\t\t\t\t\t&influxql.SortField{Ascending: false},\n\t\t\t\t\t&influxql.SortField{Name: \"field1\", Ascending: true},\n\t\t\t\t\t&influxql.SortField{Name: \"field2\"},\n\t\t\t\t},\n\t\t\t\tLimit: 10,\n\t\t\t},\n\t\t},\n\n\t\t// SHOW MEASUREMENTS WHERE with ORDER BY and LIMIT\n\t\t{\n\t\t\tskip: true,\n\t\t\ts:    `SHOW MEASUREMENTS WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`,\n\t\t\tstmt: &influxql.ShowMeasurementsStatement{\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"region\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"uswest\"},\n\t\t\t\t},\n\t\t\t\tSortFields: []*influxql.SortField{\n\t\t\t\t\t{Ascending: true},\n\t\t\t\t\t{Name: \"field1\"},\n\t\t\t\t\t{Name: \"field2\"},\n\t\t\t\t},\n\t\t\t\tLimit: 10,\n\t\t\t},\n\t\t},\n\n\t\t// SHOW MEASUREMENTS ON db0\n\t\t{\n\t\t\ts: `SHOW MEASUREMENTS ON db0`,\n\t\t\tstmt: &influxql.ShowMeasurementsStatement{\n\t\t\t\tDatabase: \"db0\",\n\t\t\t},\n\t\t},\n\n\t\t// SHOW MEASUREMENTS WITH MEASUREMENT = cpu\n\t\t{\n\t\t\ts: `SHOW MEASUREMENTS WITH MEASUREMENT = cpu`,\n\t\t\tstmt: &influxql.ShowMeasurementsStatement{\n\t\t\t\tSource: &influxql.Measurement{Name: \"cpu\"},\n\t\t\t},\n\t\t},\n\n\t\t// SHOW MEASUREMENTS WITH MEASUREMENT =~ /regex/\n\t\t{\n\t\t\ts: `SHOW MEASUREMENTS WITH MEASUREMENT =~ /[cg]pu/`,\n\t\t\tstmt: &influxql.ShowMeasurementsStatement{\n\t\t\t\tSource: &influxql.Measurement{\n\t\t\t\t\tRegex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// SHOW QUERIES\n\t\t{\n\t\t\ts:    `SHOW QUERIES`,\n\t\t\tstmt: &influxql.ShowQueriesStatement{},\n\t\t},\n\n\t\t// KILL QUERY 4\n\t\t{\n\t\t\ts: `KILL QUERY 4`,\n\t\t\tstmt: &influxql.KillQueryStatement{\n\t\t\t\tQueryID: 4,\n\t\t\t},\n\t\t},\n\n\t\t// KILL QUERY 4 ON localhost\n\t\t{\n\t\t\ts: `KILL QUERY 4 ON localhost`,\n\t\t\tstmt: &influxql.KillQueryStatement{\n\t\t\t\tQueryID: 4,\n\t\t\t\tHost:    \"localhost\",\n\t\t\t},\n\t\t},\n\n\t\t// SHOW RETENTION POLICIES\n\t\t{\n\t\t\ts:    `SHOW RETENTION POLICIES`,\n\t\t\tstmt: &influxql.ShowRetentionPoliciesStatement{},\n\t\t},\n\n\t\t// SHOW RETENTION POLICIES ON db0\n\t\t{\n\t\t\ts: `SHOW RETENTION POLICIES ON db0`,\n\t\t\tstmt: &influxql.ShowRetentionPoliciesStatement{\n\t\t\t\tDatabase: \"db0\",\n\t\t\t},\n\t\t},\n\n\t\t// SHOW TAG KEYS\n\t\t{\n\t\t\ts: `SHOW TAG KEYS FROM src`,\n\t\t\tstmt: &influxql.ShowTagKeysStatement{\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"src\"}},\n\t\t\t},\n\t\t},\n\n\t\t// SHOW TAG KEYS ON db0\n\t\t{\n\t\t\ts: `SHOW TAG KEYS ON db0`,\n\t\t\tstmt: &influxql.ShowTagKeysStatement{\n\t\t\t\tDatabase: \"db0\",\n\t\t\t},\n\t\t},\n\n\t\t// SHOW TAG KEYS with LIMIT\n\t\t{\n\t\t\ts: `SHOW TAG KEYS FROM src LIMIT 2`,\n\t\t\tstmt: &influxql.ShowTagKeysStatement{\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"src\"}},\n\t\t\t\tLimit:   2,\n\t\t\t},\n\t\t},\n\n\t\t// SHOW TAG KEYS with OFFSET\n\t\t{\n\t\t\ts: `SHOW TAG KEYS FROM src OFFSET 1`,\n\t\t\tstmt: &influxql.ShowTagKeysStatement{\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"src\"}},\n\t\t\t\tOffset:  1,\n\t\t\t},\n\t\t},\n\n\t\t// SHOW TAG KEYS with LIMIT and OFFSET\n\t\t{\n\t\t\ts: `SHOW TAG KEYS FROM src LIMIT 2 OFFSET 1`,\n\t\t\tstmt: &influxql.ShowTagKeysStatement{\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"src\"}},\n\t\t\t\tLimit:   2,\n\t\t\t\tOffset:  1,\n\t\t\t},\n\t\t},\n\n\t\t// SHOW TAG KEYS with SLIMIT\n\t\t{\n\t\t\ts: `SHOW TAG KEYS FROM src SLIMIT 2`,\n\t\t\tstmt: &influxql.ShowTagKeysStatement{\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"src\"}},\n\t\t\t\tSLimit:  2,\n\t\t\t},\n\t\t},\n\n\t\t// SHOW TAG KEYS with SOFFSET\n\t\t{\n\t\t\ts: `SHOW TAG KEYS FROM src SOFFSET 1`,\n\t\t\tstmt: &influxql.ShowTagKeysStatement{\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"src\"}},\n\t\t\t\tSOffset: 1,\n\t\t\t},\n\t\t},\n\n\t\t// SHOW TAG KEYS with SLIMIT and SOFFSET\n\t\t{\n\t\t\ts: `SHOW TAG KEYS FROM src SLIMIT 2 SOFFSET 1`,\n\t\t\tstmt: &influxql.ShowTagKeysStatement{\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"src\"}},\n\t\t\t\tSLimit:  2,\n\t\t\t\tSOffset: 1,\n\t\t\t},\n\t\t},\n\n\t\t// SHOW TAG KEYS with LIMIT, OFFSET, SLIMIT, and SOFFSET\n\t\t{\n\t\t\ts: `SHOW TAG KEYS FROM src LIMIT 4 OFFSET 3 SLIMIT 2 SOFFSET 1`,\n\t\t\tstmt: &influxql.ShowTagKeysStatement{\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"src\"}},\n\t\t\t\tLimit:   4,\n\t\t\t\tOffset:  3,\n\t\t\t\tSLimit:  2,\n\t\t\t\tSOffset: 1,\n\t\t\t},\n\t\t},\n\n\t\t// SHOW TAG KEYS FROM /<regex>/\n\t\t{\n\t\t\ts: `SHOW TAG KEYS FROM /[cg]pu/`,\n\t\t\tstmt: &influxql.ShowTagKeysStatement{\n\t\t\t\tSources: []influxql.Source{\n\t\t\t\t\t&influxql.Measurement{\n\t\t\t\t\t\tRegex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// SHOW TAG KEYS\n\t\t{\n\t\t\tskip: true,\n\t\t\ts:    `SHOW TAG KEYS FROM src WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`,\n\t\t\tstmt: &influxql.ShowTagKeysStatement{\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"src\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"region\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"uswest\"},\n\t\t\t\t},\n\t\t\t\tSortFields: []*influxql.SortField{\n\t\t\t\t\t{Ascending: true},\n\t\t\t\t\t{Name: \"field1\"},\n\t\t\t\t\t{Name: \"field2\"},\n\t\t\t\t},\n\t\t\t\tLimit: 10,\n\t\t\t},\n\t\t},\n\n\t\t// SHOW TAG VALUES FROM ... WITH KEY = ...\n\t\t{\n\t\t\tskip: true,\n\t\t\ts:    `SHOW TAG VALUES FROM src WITH KEY = region WHERE region = 'uswest' ORDER BY ASC, field1, field2 DESC LIMIT 10`,\n\t\t\tstmt: &influxql.ShowTagValuesStatement{\n\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"src\"}},\n\t\t\t\tOp:         influxql.EQ,\n\t\t\t\tTagKeyExpr: &influxql.StringLiteral{Val: \"region\"},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"region\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"uswest\"},\n\t\t\t\t},\n\t\t\t\tSortFields: []*influxql.SortField{\n\t\t\t\t\t{Ascending: true},\n\t\t\t\t\t{Name: \"field1\"},\n\t\t\t\t\t{Name: \"field2\"},\n\t\t\t\t},\n\t\t\t\tLimit: 10,\n\t\t\t},\n\t\t},\n\n\t\t// SHOW TAG VALUES FROM ... WITH KEY IN...\n\t\t{\n\t\t\ts: `SHOW TAG VALUES FROM cpu WITH KEY IN (region, host) WHERE region = 'uswest'`,\n\t\t\tstmt: &influxql.ShowTagValuesStatement{\n\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t\tOp:         influxql.IN,\n\t\t\t\tTagKeyExpr: &influxql.ListLiteral{Vals: []string{\"region\", \"host\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"region\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"uswest\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// SHOW TAG VALUES ... AND TAG KEY =\n\t\t{\n\t\t\ts: `SHOW TAG VALUES FROM cpu WITH KEY IN (region,service,host)WHERE region = 'uswest'`,\n\t\t\tstmt: &influxql.ShowTagValuesStatement{\n\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"cpu\"}},\n\t\t\t\tOp:         influxql.IN,\n\t\t\t\tTagKeyExpr: &influxql.ListLiteral{Vals: []string{\"region\", \"service\", \"host\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"region\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"uswest\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// SHOW TAG VALUES WITH KEY = ...\n\t\t{\n\t\t\ts: `SHOW TAG VALUES WITH KEY = host WHERE region = 'uswest'`,\n\t\t\tstmt: &influxql.ShowTagValuesStatement{\n\t\t\t\tOp:         influxql.EQ,\n\t\t\t\tTagKeyExpr: &influxql.StringLiteral{Val: \"host\"},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"region\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"uswest\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// SHOW TAG VALUES FROM /<regex>/ WITH KEY = ...\n\t\t{\n\t\t\ts: `SHOW TAG VALUES FROM /[cg]pu/ WITH KEY = host`,\n\t\t\tstmt: &influxql.ShowTagValuesStatement{\n\t\t\t\tSources: []influxql.Source{\n\t\t\t\t\t&influxql.Measurement{\n\t\t\t\t\t\tRegex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tOp:         influxql.EQ,\n\t\t\t\tTagKeyExpr: &influxql.StringLiteral{Val: \"host\"},\n\t\t\t},\n\t\t},\n\n\t\t// SHOW TAG VALUES WITH KEY = \"...\"\n\t\t{\n\t\t\ts: `SHOW TAG VALUES WITH KEY = \"host\" WHERE region = 'uswest'`,\n\t\t\tstmt: &influxql.ShowTagValuesStatement{\n\t\t\t\tOp:         influxql.EQ,\n\t\t\t\tTagKeyExpr: &influxql.StringLiteral{Val: `host`},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"region\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"uswest\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// SHOW TAG VALUES WITH KEY =~ /<regex>/\n\t\t{\n\t\t\ts: `SHOW TAG VALUES WITH KEY =~ /(host|region)/`,\n\t\t\tstmt: &influxql.ShowTagValuesStatement{\n\t\t\t\tOp:         influxql.EQREGEX,\n\t\t\t\tTagKeyExpr: &influxql.RegexLiteral{Val: regexp.MustCompile(`(host|region)`)},\n\t\t\t},\n\t\t},\n\n\t\t// SHOW TAG VALUES ON db0\n\t\t{\n\t\t\ts: `SHOW TAG VALUES ON db0 WITH KEY = \"host\"`,\n\t\t\tstmt: &influxql.ShowTagValuesStatement{\n\t\t\t\tDatabase:   \"db0\",\n\t\t\t\tOp:         influxql.EQ,\n\t\t\t\tTagKeyExpr: &influxql.StringLiteral{Val: \"host\"},\n\t\t\t},\n\t\t},\n\n\t\t// SHOW USERS\n\t\t{\n\t\t\ts:    `SHOW USERS`,\n\t\t\tstmt: &influxql.ShowUsersStatement{},\n\t\t},\n\n\t\t// SHOW FIELD KEYS\n\t\t{\n\t\t\tskip: true,\n\t\t\ts:    `SHOW FIELD KEYS FROM src ORDER BY ASC, field1, field2 DESC LIMIT 10`,\n\t\t\tstmt: &influxql.ShowFieldKeysStatement{\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"src\"}},\n\t\t\t\tSortFields: []*influxql.SortField{\n\t\t\t\t\t{Ascending: true},\n\t\t\t\t\t{Name: \"field1\"},\n\t\t\t\t\t{Name: \"field2\"},\n\t\t\t\t},\n\t\t\t\tLimit: 10,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `SHOW FIELD KEYS FROM /[cg]pu/`,\n\t\t\tstmt: &influxql.ShowFieldKeysStatement{\n\t\t\t\tSources: []influxql.Source{\n\t\t\t\t\t&influxql.Measurement{\n\t\t\t\t\t\tRegex: &influxql.RegexLiteral{Val: regexp.MustCompile(`[cg]pu`)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `SHOW FIELD KEYS ON db0`,\n\t\t\tstmt: &influxql.ShowFieldKeysStatement{\n\t\t\t\tDatabase: \"db0\",\n\t\t\t},\n\t\t},\n\n\t\t// DELETE statement\n\t\t{\n\t\t\ts:    `DELETE FROM src`,\n\t\t\tstmt: &influxql.DeleteSeriesStatement{Sources: []influxql.Source{&influxql.Measurement{Name: \"src\"}}},\n\t\t},\n\t\t{\n\t\t\ts: `DELETE WHERE host = 'hosta.influxdb.org'`,\n\t\t\tstmt: &influxql.DeleteSeriesStatement{\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"host\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"hosta.influxdb.org\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `DELETE FROM src WHERE host = 'hosta.influxdb.org'`,\n\t\t\tstmt: &influxql.DeleteSeriesStatement{\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"src\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"host\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"hosta.influxdb.org\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// DROP SERIES statement\n\t\t{\n\t\t\ts:    `DROP SERIES FROM src`,\n\t\t\tstmt: &influxql.DropSeriesStatement{Sources: []influxql.Source{&influxql.Measurement{Name: \"src\"}}},\n\t\t},\n\t\t{\n\t\t\ts: `DROP SERIES WHERE host = 'hosta.influxdb.org'`,\n\t\t\tstmt: &influxql.DropSeriesStatement{\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"host\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"hosta.influxdb.org\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `DROP SERIES FROM src WHERE host = 'hosta.influxdb.org'`,\n\t\t\tstmt: &influxql.DropSeriesStatement{\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"src\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"host\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"hosta.influxdb.org\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// SHOW CONTINUOUS QUERIES statement\n\t\t{\n\t\t\ts:    `SHOW CONTINUOUS QUERIES`,\n\t\t\tstmt: &influxql.ShowContinuousQueriesStatement{},\n\t\t},\n\n\t\t// CREATE CONTINUOUS QUERY ... INTO <measurement>\n\t\t{\n\t\t\ts: `CREATE CONTINUOUS QUERY myquery ON testdb RESAMPLE EVERY 1m FOR 1h BEGIN SELECT count(field1) INTO measure1 FROM myseries GROUP BY time(5m) END`,\n\t\t\tstmt: &influxql.CreateContinuousQueryStatement{\n\t\t\t\tName:     \"myquery\",\n\t\t\t\tDatabase: \"testdb\",\n\t\t\t\tSource: &influxql.SelectStatement{\n\t\t\t\t\tFields:  []*influxql.Field{{Expr: &influxql.Call{Name: \"count\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field1\"}}}}},\n\t\t\t\t\tTarget:  &influxql.Target{Measurement: &influxql.Measurement{Name: \"measure1\", IsTarget: true}},\n\t\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\t\tDimensions: []*influxql.Dimension{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: 5 * time.Minute},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tResampleEvery: time.Minute,\n\t\t\t\tResampleFor:   time.Hour,\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: `CREATE CONTINUOUS QUERY myquery ON testdb RESAMPLE FOR 1h BEGIN SELECT count(field1) INTO measure1 FROM myseries GROUP BY time(5m) END`,\n\t\t\tstmt: &influxql.CreateContinuousQueryStatement{\n\t\t\t\tName:     \"myquery\",\n\t\t\t\tDatabase: \"testdb\",\n\t\t\t\tSource: &influxql.SelectStatement{\n\t\t\t\t\tFields:  []*influxql.Field{{Expr: &influxql.Call{Name: \"count\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field1\"}}}}},\n\t\t\t\t\tTarget:  &influxql.Target{Measurement: &influxql.Measurement{Name: \"measure1\", IsTarget: true}},\n\t\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\t\tDimensions: []*influxql.Dimension{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: 5 * time.Minute},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tResampleFor: time.Hour,\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: `CREATE CONTINUOUS QUERY myquery ON testdb RESAMPLE EVERY 1m BEGIN SELECT count(field1) INTO measure1 FROM myseries GROUP BY time(5m) END`,\n\t\t\tstmt: &influxql.CreateContinuousQueryStatement{\n\t\t\t\tName:     \"myquery\",\n\t\t\t\tDatabase: \"testdb\",\n\t\t\t\tSource: &influxql.SelectStatement{\n\t\t\t\t\tFields:  []*influxql.Field{{Expr: &influxql.Call{Name: \"count\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field1\"}}}}},\n\t\t\t\t\tTarget:  &influxql.Target{Measurement: &influxql.Measurement{Name: \"measure1\", IsTarget: true}},\n\t\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\t\tDimensions: []*influxql.Dimension{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: 5 * time.Minute},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tResampleEvery: time.Minute,\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: `create continuous query \"this.is-a.test\" on segments begin select * into measure1 from cpu_load_short end`,\n\t\t\tstmt: &influxql.CreateContinuousQueryStatement{\n\t\t\t\tName:     \"this.is-a.test\",\n\t\t\t\tDatabase: \"segments\",\n\t\t\t\tSource: &influxql.SelectStatement{\n\t\t\t\t\tIsRawQuery: true,\n\t\t\t\t\tFields:     []*influxql.Field{{Expr: &influxql.Wildcard{}}},\n\t\t\t\t\tTarget:     &influxql.Target{Measurement: &influxql.Measurement{Name: \"measure1\", IsTarget: true}},\n\t\t\t\t\tSources:    []influxql.Source{&influxql.Measurement{Name: \"cpu_load_short\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// CREATE CONTINUOUS QUERY ... INTO <retention-policy>.<measurement>\n\t\t{\n\t\t\ts: `CREATE CONTINUOUS QUERY myquery ON testdb BEGIN SELECT count(field1) INTO \"1h.policy1\".\"cpu.load\" FROM myseries GROUP BY time(5m) END`,\n\t\t\tstmt: &influxql.CreateContinuousQueryStatement{\n\t\t\t\tName:     \"myquery\",\n\t\t\t\tDatabase: \"testdb\",\n\t\t\t\tSource: &influxql.SelectStatement{\n\t\t\t\t\tFields: []*influxql.Field{{Expr: &influxql.Call{Name: \"count\", Args: []influxql.Expr{&influxql.VarRef{Val: \"field1\"}}}}},\n\t\t\t\t\tTarget: &influxql.Target{\n\t\t\t\t\t\tMeasurement: &influxql.Measurement{RetentionPolicy: \"1h.policy1\", Name: \"cpu.load\", IsTarget: true},\n\t\t\t\t\t},\n\t\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\t\tDimensions: []*influxql.Dimension{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: 5 * time.Minute},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// CREATE CONTINUOUS QUERY for non-aggregate SELECT stmts\n\t\t{\n\t\t\ts: `CREATE CONTINUOUS QUERY myquery ON testdb BEGIN SELECT value INTO \"policy1\".\"value\" FROM myseries END`,\n\t\t\tstmt: &influxql.CreateContinuousQueryStatement{\n\t\t\t\tName:     \"myquery\",\n\t\t\t\tDatabase: \"testdb\",\n\t\t\t\tSource: &influxql.SelectStatement{\n\t\t\t\t\tIsRawQuery: true,\n\t\t\t\t\tFields:     []*influxql.Field{{Expr: &influxql.VarRef{Val: \"value\"}}},\n\t\t\t\t\tTarget: &influxql.Target{\n\t\t\t\t\t\tMeasurement: &influxql.Measurement{RetentionPolicy: \"policy1\", Name: \"value\", IsTarget: true},\n\t\t\t\t\t},\n\t\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// CREATE CONTINUOUS QUERY for non-aggregate SELECT stmts with multiple values\n\t\t{\n\t\t\ts: `CREATE CONTINUOUS QUERY myquery ON testdb BEGIN SELECT transmit_rx, transmit_tx INTO \"policy1\".\"network\" FROM myseries END`,\n\t\t\tstmt: &influxql.CreateContinuousQueryStatement{\n\t\t\t\tName:     \"myquery\",\n\t\t\t\tDatabase: \"testdb\",\n\t\t\t\tSource: &influxql.SelectStatement{\n\t\t\t\t\tIsRawQuery: true,\n\t\t\t\t\tFields: []*influxql.Field{{Expr: &influxql.VarRef{Val: \"transmit_rx\"}},\n\t\t\t\t\t\t{Expr: &influxql.VarRef{Val: \"transmit_tx\"}}},\n\t\t\t\t\tTarget: &influxql.Target{\n\t\t\t\t\t\tMeasurement: &influxql.Measurement{RetentionPolicy: \"policy1\", Name: \"network\", IsTarget: true},\n\t\t\t\t\t},\n\t\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"myseries\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// CREATE CONTINUOUS QUERY with backreference measurement name\n\t\t{\n\t\t\ts: `CREATE CONTINUOUS QUERY myquery ON testdb BEGIN SELECT mean(value) INTO \"policy1\".:measurement FROM /^[a-z]+.*/ GROUP BY time(1m) END`,\n\t\t\tstmt: &influxql.CreateContinuousQueryStatement{\n\t\t\t\tName:     \"myquery\",\n\t\t\t\tDatabase: \"testdb\",\n\t\t\t\tSource: &influxql.SelectStatement{\n\t\t\t\t\tFields: []*influxql.Field{{Expr: &influxql.Call{Name: \"mean\", Args: []influxql.Expr{&influxql.VarRef{Val: \"value\"}}}}},\n\t\t\t\t\tTarget: &influxql.Target{\n\t\t\t\t\t\tMeasurement: &influxql.Measurement{RetentionPolicy: \"policy1\", IsTarget: true},\n\t\t\t\t\t},\n\t\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Regex: &influxql.RegexLiteral{Val: regexp.MustCompile(`^[a-z]+.*`)}}},\n\t\t\t\t\tDimensions: []*influxql.Dimension{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\t\t\t&influxql.DurationLiteral{Val: 1 * time.Minute},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// CREATE DATABASE statement\n\t\t{\n\t\t\ts: `CREATE DATABASE testdb`,\n\t\t\tstmt: &influxql.CreateDatabaseStatement{\n\t\t\t\tName: \"testdb\",\n\t\t\t\tRetentionPolicyCreate: false,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `CREATE DATABASE testdb WITH DURATION 24h`,\n\t\t\tstmt: &influxql.CreateDatabaseStatement{\n\t\t\t\tName: \"testdb\",\n\t\t\t\tRetentionPolicyCreate:   true,\n\t\t\t\tRetentionPolicyDuration: duration(24 * time.Hour),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `CREATE DATABASE testdb WITH SHARD DURATION 30m`,\n\t\t\tstmt: &influxql.CreateDatabaseStatement{\n\t\t\t\tName: \"testdb\",\n\t\t\t\tRetentionPolicyCreate:             true,\n\t\t\t\tRetentionPolicyShardGroupDuration: 30 * time.Minute,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `CREATE DATABASE testdb WITH REPLICATION 2`,\n\t\t\tstmt: &influxql.CreateDatabaseStatement{\n\t\t\t\tName: \"testdb\",\n\t\t\t\tRetentionPolicyCreate:      true,\n\t\t\t\tRetentionPolicyReplication: intptr(2),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `CREATE DATABASE testdb WITH NAME test_name`,\n\t\t\tstmt: &influxql.CreateDatabaseStatement{\n\t\t\t\tName: \"testdb\",\n\t\t\t\tRetentionPolicyCreate: true,\n\t\t\t\tRetentionPolicyName:   \"test_name\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `CREATE DATABASE testdb WITH DURATION 24h REPLICATION 2 NAME test_name`,\n\t\t\tstmt: &influxql.CreateDatabaseStatement{\n\t\t\t\tName: \"testdb\",\n\t\t\t\tRetentionPolicyCreate:      true,\n\t\t\t\tRetentionPolicyDuration:    duration(24 * time.Hour),\n\t\t\t\tRetentionPolicyReplication: intptr(2),\n\t\t\t\tRetentionPolicyName:        \"test_name\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `CREATE DATABASE testdb WITH DURATION 24h REPLICATION 2 SHARD DURATION 10m NAME test_name `,\n\t\t\tstmt: &influxql.CreateDatabaseStatement{\n\t\t\t\tName: \"testdb\",\n\t\t\t\tRetentionPolicyCreate:             true,\n\t\t\t\tRetentionPolicyDuration:           duration(24 * time.Hour),\n\t\t\t\tRetentionPolicyReplication:        intptr(2),\n\t\t\t\tRetentionPolicyName:               \"test_name\",\n\t\t\t\tRetentionPolicyShardGroupDuration: 10 * time.Minute,\n\t\t\t},\n\t\t},\n\n\t\t// CREATE USER statement\n\t\t{\n\t\t\ts: `CREATE USER testuser WITH PASSWORD 'pwd1337'`,\n\t\t\tstmt: &influxql.CreateUserStatement{\n\t\t\t\tName:     \"testuser\",\n\t\t\t\tPassword: \"pwd1337\",\n\t\t\t},\n\t\t},\n\n\t\t// CREATE USER ... WITH ALL PRIVILEGES\n\t\t{\n\t\t\ts: `CREATE USER testuser WITH PASSWORD 'pwd1337' WITH ALL PRIVILEGES`,\n\t\t\tstmt: &influxql.CreateUserStatement{\n\t\t\t\tName:     \"testuser\",\n\t\t\t\tPassword: \"pwd1337\",\n\t\t\t\tAdmin:    true,\n\t\t\t},\n\t\t},\n\n\t\t// SET PASSWORD FOR USER\n\t\t{\n\t\t\ts: `SET PASSWORD FOR testuser = 'pwd1337'`,\n\t\t\tstmt: &influxql.SetPasswordUserStatement{\n\t\t\t\tName:     \"testuser\",\n\t\t\t\tPassword: \"pwd1337\",\n\t\t\t},\n\t\t},\n\n\t\t// DROP CONTINUOUS QUERY statement\n\t\t{\n\t\t\ts:    `DROP CONTINUOUS QUERY myquery ON foo`,\n\t\t\tstmt: &influxql.DropContinuousQueryStatement{Name: \"myquery\", Database: \"foo\"},\n\t\t},\n\n\t\t// DROP DATABASE statement\n\t\t{\n\t\t\ts: `DROP DATABASE testdb`,\n\t\t\tstmt: &influxql.DropDatabaseStatement{\n\t\t\t\tName: \"testdb\",\n\t\t\t},\n\t\t},\n\n\t\t// DROP MEASUREMENT statement\n\t\t{\n\t\t\ts:    `DROP MEASUREMENT cpu`,\n\t\t\tstmt: &influxql.DropMeasurementStatement{Name: \"cpu\"},\n\t\t},\n\n\t\t// DROP RETENTION POLICY\n\t\t{\n\t\t\ts: `DROP RETENTION POLICY \"1h.cpu\" ON mydb`,\n\t\t\tstmt: &influxql.DropRetentionPolicyStatement{\n\t\t\t\tName:     `1h.cpu`,\n\t\t\t\tDatabase: `mydb`,\n\t\t\t},\n\t\t},\n\n\t\t// DROP USER statement\n\t\t{\n\t\t\ts:    `DROP USER jdoe`,\n\t\t\tstmt: &influxql.DropUserStatement{Name: \"jdoe\"},\n\t\t},\n\n\t\t// GRANT READ\n\t\t{\n\t\t\ts: `GRANT READ ON testdb TO jdoe`,\n\t\t\tstmt: &influxql.GrantStatement{\n\t\t\t\tPrivilege: influxql.ReadPrivilege,\n\t\t\t\tOn:        \"testdb\",\n\t\t\t\tUser:      \"jdoe\",\n\t\t\t},\n\t\t},\n\n\t\t// GRANT WRITE\n\t\t{\n\t\t\ts: `GRANT WRITE ON testdb TO jdoe`,\n\t\t\tstmt: &influxql.GrantStatement{\n\t\t\t\tPrivilege: influxql.WritePrivilege,\n\t\t\t\tOn:        \"testdb\",\n\t\t\t\tUser:      \"jdoe\",\n\t\t\t},\n\t\t},\n\n\t\t// GRANT ALL\n\t\t{\n\t\t\ts: `GRANT ALL ON testdb TO jdoe`,\n\t\t\tstmt: &influxql.GrantStatement{\n\t\t\t\tPrivilege: influxql.AllPrivileges,\n\t\t\t\tOn:        \"testdb\",\n\t\t\t\tUser:      \"jdoe\",\n\t\t\t},\n\t\t},\n\n\t\t// GRANT ALL PRIVILEGES\n\t\t{\n\t\t\ts: `GRANT ALL PRIVILEGES ON testdb TO jdoe`,\n\t\t\tstmt: &influxql.GrantStatement{\n\t\t\t\tPrivilege: influxql.AllPrivileges,\n\t\t\t\tOn:        \"testdb\",\n\t\t\t\tUser:      \"jdoe\",\n\t\t\t},\n\t\t},\n\n\t\t// GRANT ALL admin privilege\n\t\t{\n\t\t\ts: `GRANT ALL TO jdoe`,\n\t\t\tstmt: &influxql.GrantAdminStatement{\n\t\t\t\tUser: \"jdoe\",\n\t\t\t},\n\t\t},\n\n\t\t// GRANT ALL PRVILEGES admin privilege\n\t\t{\n\t\t\ts: `GRANT ALL PRIVILEGES TO jdoe`,\n\t\t\tstmt: &influxql.GrantAdminStatement{\n\t\t\t\tUser: \"jdoe\",\n\t\t\t},\n\t\t},\n\n\t\t// REVOKE READ\n\t\t{\n\t\t\ts: `REVOKE READ on testdb FROM jdoe`,\n\t\t\tstmt: &influxql.RevokeStatement{\n\t\t\t\tPrivilege: influxql.ReadPrivilege,\n\t\t\t\tOn:        \"testdb\",\n\t\t\t\tUser:      \"jdoe\",\n\t\t\t},\n\t\t},\n\n\t\t// REVOKE WRITE\n\t\t{\n\t\t\ts: `REVOKE WRITE ON testdb FROM jdoe`,\n\t\t\tstmt: &influxql.RevokeStatement{\n\t\t\t\tPrivilege: influxql.WritePrivilege,\n\t\t\t\tOn:        \"testdb\",\n\t\t\t\tUser:      \"jdoe\",\n\t\t\t},\n\t\t},\n\n\t\t// REVOKE ALL\n\t\t{\n\t\t\ts: `REVOKE ALL ON testdb FROM jdoe`,\n\t\t\tstmt: &influxql.RevokeStatement{\n\t\t\t\tPrivilege: influxql.AllPrivileges,\n\t\t\t\tOn:        \"testdb\",\n\t\t\t\tUser:      \"jdoe\",\n\t\t\t},\n\t\t},\n\n\t\t// REVOKE ALL PRIVILEGES\n\t\t{\n\t\t\ts: `REVOKE ALL PRIVILEGES ON testdb FROM jdoe`,\n\t\t\tstmt: &influxql.RevokeStatement{\n\t\t\t\tPrivilege: influxql.AllPrivileges,\n\t\t\t\tOn:        \"testdb\",\n\t\t\t\tUser:      \"jdoe\",\n\t\t\t},\n\t\t},\n\n\t\t// REVOKE ALL admin privilege\n\t\t{\n\t\t\ts: `REVOKE ALL FROM jdoe`,\n\t\t\tstmt: &influxql.RevokeAdminStatement{\n\t\t\t\tUser: \"jdoe\",\n\t\t\t},\n\t\t},\n\n\t\t// REVOKE ALL PRIVILEGES admin privilege\n\t\t{\n\t\t\ts: `REVOKE ALL PRIVILEGES FROM jdoe`,\n\t\t\tstmt: &influxql.RevokeAdminStatement{\n\t\t\t\tUser: \"jdoe\",\n\t\t\t},\n\t\t},\n\n\t\t// CREATE RETENTION POLICY\n\t\t{\n\t\t\ts: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 2`,\n\t\t\tstmt: &influxql.CreateRetentionPolicyStatement{\n\t\t\t\tName:        \"policy1\",\n\t\t\t\tDatabase:    \"testdb\",\n\t\t\t\tDuration:    time.Hour,\n\t\t\t\tReplication: 2,\n\t\t\t},\n\t\t},\n\n\t\t// CREATE RETENTION POLICY with infinite retention\n\t\t{\n\t\t\ts: `CREATE RETENTION POLICY policy1 ON testdb DURATION INF REPLICATION 2`,\n\t\t\tstmt: &influxql.CreateRetentionPolicyStatement{\n\t\t\t\tName:        \"policy1\",\n\t\t\t\tDatabase:    \"testdb\",\n\t\t\t\tDuration:    0,\n\t\t\t\tReplication: 2,\n\t\t\t},\n\t\t},\n\n\t\t// CREATE RETENTION POLICY ... DEFAULT\n\t\t{\n\t\t\ts: `CREATE RETENTION POLICY policy1 ON testdb DURATION 2m REPLICATION 4 DEFAULT`,\n\t\t\tstmt: &influxql.CreateRetentionPolicyStatement{\n\t\t\t\tName:        \"policy1\",\n\t\t\t\tDatabase:    \"testdb\",\n\t\t\t\tDuration:    2 * time.Minute,\n\t\t\t\tReplication: 4,\n\t\t\t\tDefault:     true,\n\t\t\t},\n\t\t},\n\t\t// CREATE RETENTION POLICY\n\t\t{\n\t\t\ts: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 2 SHARD DURATION 30m`,\n\t\t\tstmt: &influxql.CreateRetentionPolicyStatement{\n\t\t\t\tName:               \"policy1\",\n\t\t\t\tDatabase:           \"testdb\",\n\t\t\t\tDuration:           time.Hour,\n\t\t\t\tReplication:        2,\n\t\t\t\tShardGroupDuration: 30 * time.Minute,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 2 SHARD DURATION 0s`,\n\t\t\tstmt: &influxql.CreateRetentionPolicyStatement{\n\t\t\t\tName:               \"policy1\",\n\t\t\t\tDatabase:           \"testdb\",\n\t\t\t\tDuration:           time.Hour,\n\t\t\t\tReplication:        2,\n\t\t\t\tShardGroupDuration: 0,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 2 SHARD DURATION 1s`,\n\t\t\tstmt: &influxql.CreateRetentionPolicyStatement{\n\t\t\t\tName:               \"policy1\",\n\t\t\t\tDatabase:           \"testdb\",\n\t\t\t\tDuration:           time.Hour,\n\t\t\t\tReplication:        2,\n\t\t\t\tShardGroupDuration: time.Second,\n\t\t\t},\n\t\t},\n\n\t\t// ALTER RETENTION POLICY\n\t\t{\n\t\t\ts:    `ALTER RETENTION POLICY policy1 ON testdb DURATION 1m REPLICATION 4 DEFAULT`,\n\t\t\tstmt: newAlterRetentionPolicyStatement(\"policy1\", \"testdb\", time.Minute, -1, 4, true),\n\t\t},\n\n\t\t// ALTER RETENTION POLICY with options in reverse order\n\t\t{\n\t\t\ts:    `ALTER RETENTION POLICY policy1 ON testdb DEFAULT REPLICATION 4 DURATION 1m`,\n\t\t\tstmt: newAlterRetentionPolicyStatement(\"policy1\", \"testdb\", time.Minute, -1, 4, true),\n\t\t},\n\n\t\t// ALTER RETENTION POLICY with infinite retention\n\t\t{\n\t\t\ts:    `ALTER RETENTION POLICY policy1 ON testdb DEFAULT REPLICATION 4 DURATION INF`,\n\t\t\tstmt: newAlterRetentionPolicyStatement(\"policy1\", \"testdb\", 0, -1, 4, true),\n\t\t},\n\n\t\t// ALTER RETENTION POLICY without optional DURATION\n\t\t{\n\t\t\ts:    `ALTER RETENTION POLICY policy1 ON testdb DEFAULT REPLICATION 4`,\n\t\t\tstmt: newAlterRetentionPolicyStatement(\"policy1\", \"testdb\", -1, -1, 4, true),\n\t\t},\n\n\t\t// ALTER RETENTION POLICY without optional REPLICATION\n\t\t{\n\t\t\ts:    `ALTER RETENTION POLICY policy1 ON testdb DEFAULT`,\n\t\t\tstmt: newAlterRetentionPolicyStatement(\"policy1\", \"testdb\", -1, -1, -1, true),\n\t\t},\n\n\t\t// ALTER RETENTION POLICY without optional DEFAULT\n\t\t{\n\t\t\ts:    `ALTER RETENTION POLICY policy1 ON testdb REPLICATION 4`,\n\t\t\tstmt: newAlterRetentionPolicyStatement(\"policy1\", \"testdb\", -1, -1, 4, false),\n\t\t},\n\t\t// ALTER default retention policy unquoted\n\t\t{\n\t\t\ts:    `ALTER RETENTION POLICY default ON testdb REPLICATION 4`,\n\t\t\tstmt: newAlterRetentionPolicyStatement(\"default\", \"testdb\", -1, -1, 4, false),\n\t\t},\n\t\t// ALTER RETENTION POLICY with SHARD duration\n\t\t{\n\t\t\ts:    `ALTER RETENTION POLICY policy1 ON testdb REPLICATION 4 SHARD DURATION 10m`,\n\t\t\tstmt: newAlterRetentionPolicyStatement(\"policy1\", \"testdb\", -1, 10*time.Minute, 4, false),\n\t\t},\n\t\t// ALTER RETENTION POLICY with all options\n\t\t{\n\t\t\ts:    `ALTER RETENTION POLICY default ON testdb DURATION 0s REPLICATION 4 SHARD DURATION 10m DEFAULT`,\n\t\t\tstmt: newAlterRetentionPolicyStatement(\"default\", \"testdb\", time.Duration(0), 10*time.Minute, 4, true),\n\t\t},\n\t\t// ALTER RETENTION POLICY with 0s shard duration\n\t\t{\n\t\t\ts:    `ALTER RETENTION POLICY default ON testdb DURATION 0s REPLICATION 1 SHARD DURATION 0s`,\n\t\t\tstmt: newAlterRetentionPolicyStatement(\"default\", \"testdb\", time.Duration(0), 0, 1, false),\n\t\t},\n\n\t\t// SHOW STATS\n\t\t{\n\t\t\ts: `SHOW STATS`,\n\t\t\tstmt: &influxql.ShowStatsStatement{\n\t\t\t\tModule: \"\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `SHOW STATS FOR 'cluster'`,\n\t\t\tstmt: &influxql.ShowStatsStatement{\n\t\t\t\tModule: \"cluster\",\n\t\t\t},\n\t\t},\n\n\t\t// SHOW SHARD GROUPS\n\t\t{\n\t\t\ts:    `SHOW SHARD GROUPS`,\n\t\t\tstmt: &influxql.ShowShardGroupsStatement{},\n\t\t},\n\n\t\t// SHOW SHARDS\n\t\t{\n\t\t\ts:    `SHOW SHARDS`,\n\t\t\tstmt: &influxql.ShowShardsStatement{},\n\t\t},\n\n\t\t// SHOW DIAGNOSTICS\n\t\t{\n\t\t\ts:    `SHOW DIAGNOSTICS`,\n\t\t\tstmt: &influxql.ShowDiagnosticsStatement{},\n\t\t},\n\t\t{\n\t\t\ts: `SHOW DIAGNOSTICS FOR 'build'`,\n\t\t\tstmt: &influxql.ShowDiagnosticsStatement{\n\t\t\t\tModule: \"build\",\n\t\t\t},\n\t\t},\n\n\t\t// CREATE SUBSCRIPTION\n\t\t{\n\t\t\ts: `CREATE SUBSCRIPTION \"name\" ON \"db\".\"rp\" DESTINATIONS ANY 'udp://host1:9093', 'udp://host2:9093'`,\n\t\t\tstmt: &influxql.CreateSubscriptionStatement{\n\t\t\t\tName:            \"name\",\n\t\t\t\tDatabase:        \"db\",\n\t\t\t\tRetentionPolicy: \"rp\",\n\t\t\t\tDestinations:    []string{\"udp://host1:9093\", \"udp://host2:9093\"},\n\t\t\t\tMode:            \"ANY\",\n\t\t\t},\n\t\t},\n\n\t\t// DROP SUBSCRIPTION\n\t\t{\n\t\t\ts: `DROP SUBSCRIPTION \"name\" ON \"db\".\"rp\"`,\n\t\t\tstmt: &influxql.DropSubscriptionStatement{\n\t\t\t\tName:            \"name\",\n\t\t\t\tDatabase:        \"db\",\n\t\t\t\tRetentionPolicy: \"rp\",\n\t\t\t},\n\t\t},\n\n\t\t// SHOW SUBSCRIPTIONS\n\t\t{\n\t\t\ts:    `SHOW SUBSCRIPTIONS`,\n\t\t\tstmt: &influxql.ShowSubscriptionsStatement{},\n\t\t},\n\n\t\t// Errors\n\t\t{s: ``, err: `found EOF, expected SELECT, DELETE, SHOW, CREATE, DROP, GRANT, REVOKE, ALTER, SET, KILL at line 1, char 1`},\n\t\t{s: `SELECT`, err: `found EOF, expected identifier, string, number, bool at line 1, char 8`},\n\t\t{s: `SELECT time FROM myseries`, err: `at least 1 non-time field must be queried`},\n\t\t{s: `blah blah`, err: `found blah, expected SELECT, DELETE, SHOW, CREATE, DROP, GRANT, REVOKE, ALTER, SET, KILL at line 1, char 1`},\n\t\t{s: `SELECT field1 X`, err: `found X, expected FROM at line 1, char 15`},\n\t\t{s: `SELECT field1 FROM \"series\" WHERE X +;`, err: `found ;, expected identifier, string, number, bool at line 1, char 38`},\n\t\t{s: `SELECT field1 FROM myseries GROUP`, err: `found EOF, expected BY at line 1, char 35`},\n\t\t{s: `SELECT field1 FROM myseries LIMIT`, err: `found EOF, expected integer at line 1, char 35`},\n\t\t{s: `SELECT field1 FROM myseries LIMIT 10.5`, err: `found 10.5, expected integer at line 1, char 35`},\n\t\t{s: `SELECT count(max(value)) FROM myseries`, err: `expected field argument in count()`},\n\t\t{s: `SELECT count(distinct('value')) FROM myseries`, err: `expected field argument in distinct()`},\n\t\t{s: `SELECT distinct('value') FROM myseries`, err: `expected field argument in distinct()`},\n\t\t{s: `SELECT min(max(value)) FROM myseries`, err: `expected field argument in min()`},\n\t\t{s: `SELECT min(distinct(value)) FROM myseries`, err: `expected field argument in min()`},\n\t\t{s: `SELECT max(max(value)) FROM myseries`, err: `expected field argument in max()`},\n\t\t{s: `SELECT sum(max(value)) FROM myseries`, err: `expected field argument in sum()`},\n\t\t{s: `SELECT first(max(value)) FROM myseries`, err: `expected field argument in first()`},\n\t\t{s: `SELECT last(max(value)) FROM myseries`, err: `expected field argument in last()`},\n\t\t{s: `SELECT mean(max(value)) FROM myseries`, err: `expected field argument in mean()`},\n\t\t{s: `SELECT median(max(value)) FROM myseries`, err: `expected field argument in median()`},\n\t\t{s: `SELECT mode(max(value)) FROM myseries`, err: `expected field argument in mode()`},\n\t\t{s: `SELECT stddev(max(value)) FROM myseries`, err: `expected field argument in stddev()`},\n\t\t{s: `SELECT spread(max(value)) FROM myseries`, err: `expected field argument in spread()`},\n\t\t{s: `SELECT top() FROM myseries`, err: `invalid number of arguments for top, expected at least 2, got 0`},\n\t\t{s: `SELECT top(field1) FROM myseries`, err: `invalid number of arguments for top, expected at least 2, got 1`},\n\t\t{s: `SELECT top(field1,foo) FROM myseries`, err: `expected integer as last argument in top(), found foo`},\n\t\t{s: `SELECT top(field1,host,'server',foo) FROM myseries`, err: `expected integer as last argument in top(), found foo`},\n\t\t{s: `SELECT top(field1,5,'server',2) FROM myseries`, err: `only fields or tags are allowed in top(), found 5`},\n\t\t{s: `SELECT top(field1,max(foo),'server',2) FROM myseries`, err: `only fields or tags are allowed in top(), found max(foo)`},\n\t\t{s: `SELECT top(value, 10) + count(value) FROM myseries`, err: `cannot use top() inside of a binary expression`},\n\t\t{s: `SELECT top(max(value), 10) FROM myseries`, err: `only fields or tags are allowed in top(), found max(value)`},\n\t\t{s: `SELECT bottom() FROM myseries`, err: `invalid number of arguments for bottom, expected at least 2, got 0`},\n\t\t{s: `SELECT bottom(field1) FROM myseries`, err: `invalid number of arguments for bottom, expected at least 2, got 1`},\n\t\t{s: `SELECT bottom(field1,foo) FROM myseries`, err: `expected integer as last argument in bottom(), found foo`},\n\t\t{s: `SELECT bottom(field1,host,'server',foo) FROM myseries`, err: `expected integer as last argument in bottom(), found foo`},\n\t\t{s: `SELECT bottom(field1,5,'server',2) FROM myseries`, err: `only fields or tags are allowed in bottom(), found 5`},\n\t\t{s: `SELECT bottom(field1,max(foo),'server',2) FROM myseries`, err: `only fields or tags are allowed in bottom(), found max(foo)`},\n\t\t{s: `SELECT bottom(value, 10) + count(value) FROM myseries`, err: `cannot use bottom() inside of a binary expression`},\n\t\t{s: `SELECT bottom(max(value), 10) FROM myseries`, err: `only fields or tags are allowed in bottom(), found max(value)`},\n\t\t{s: `SELECT percentile() FROM myseries`, err: `invalid number of arguments for percentile, expected 2, got 0`},\n\t\t{s: `SELECT percentile(field1) FROM myseries`, err: `invalid number of arguments for percentile, expected 2, got 1`},\n\t\t{s: `SELECT percentile(field1, foo) FROM myseries`, err: `expected float argument in percentile()`},\n\t\t{s: `SELECT percentile(max(field1), 75) FROM myseries`, err: `expected field argument in percentile()`},\n\t\t{s: `SELECT field1 FROM myseries OFFSET`, err: `found EOF, expected integer at line 1, char 36`},\n\t\t{s: `SELECT field1 FROM myseries OFFSET 10.5`, err: `found 10.5, expected integer at line 1, char 36`},\n\t\t{s: `SELECT field1 FROM myseries ORDER`, err: `found EOF, expected BY at line 1, char 35`},\n\t\t{s: `SELECT field1 FROM myseries ORDER BY`, err: `found EOF, expected identifier, ASC, DESC at line 1, char 38`},\n\t\t{s: `SELECT field1 FROM myseries ORDER BY /`, err: `found /, expected identifier, ASC, DESC at line 1, char 38`},\n\t\t{s: `SELECT field1 FROM myseries ORDER BY 1`, err: `found 1, expected identifier, ASC, DESC at line 1, char 38`},\n\t\t{s: `SELECT field1 FROM myseries ORDER BY time ASC,`, err: `found EOF, expected identifier at line 1, char 47`},\n\t\t{s: `SELECT field1 FROM myseries ORDER BY time, field1`, err: `only ORDER BY time supported at this time`},\n\t\t{s: `SELECT field1 AS`, err: `found EOF, expected identifier at line 1, char 18`},\n\t\t{s: `SELECT field1 FROM foo group by time(1s)`, err: `GROUP BY requires at least one aggregate function`},\n\t\t{s: `SELECT field1 FROM foo fill(none)`, err: `fill(none) must be used with a function`},\n\t\t{s: `SELECT field1 FROM foo fill(linear)`, err: `fill(linear) must be used with a function`},\n\t\t{s: `SELECT count(value), value FROM foo`, err: `mixing aggregate and non-aggregate queries is not supported`},\n\t\t{s: `SELECT count(value)/10, value FROM foo`, err: `mixing aggregate and non-aggregate queries is not supported`},\n\t\t{s: `SELECT count(value) FROM foo group by time(1s)`, err: `aggregate functions with GROUP BY time require a WHERE time clause`},\n\t\t{s: `SELECT count(value) FROM foo group by time(500ms)`, err: `aggregate functions with GROUP BY time require a WHERE time clause`},\n\t\t{s: `SELECT count(value) FROM foo group by time(1s) where host = 'hosta.influxdb.org'`, err: `aggregate functions with GROUP BY time require a WHERE time clause`},\n\t\t{s: `SELECT count(value) FROM foo group by time`, err: `time() is a function and expects at least one argument`},\n\t\t{s: `SELECT count(value) FROM foo group by 'time'`, err: `only time and tag dimensions allowed`},\n\t\t{s: `SELECT count(value) FROM foo where time > now() and time < now() group by time()`, err: `time dimension expected 1 or 2 arguments`},\n\t\t{s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(b)`, err: `time dimension must have duration argument`},\n\t\t{s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(1s), time(2s)`, err: `multiple time dimensions not allowed`},\n\t\t{s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(1s, b)`, err: `time dimension offset must be duration or now()`},\n\t\t{s: `SELECT field1 FROM 12`, err: `found 12, expected identifier at line 1, char 20`},\n\t\t{s: `SELECT 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 FROM myseries`, err: `unable to parse integer at line 1, char 8`},\n\t\t{s: `SELECT 10.5h FROM myseries`, err: `found h, expected FROM at line 1, char 12`},\n\t\t{s: `SELECT distinct(field1), sum(field1) FROM myseries`, err: `aggregate function distinct() cannot be combined with other functions or fields`},\n\t\t{s: `SELECT distinct(field1), field2 FROM myseries`, err: `aggregate function distinct() cannot be combined with other functions or fields`},\n\t\t{s: `SELECT distinct(field1, field2) FROM myseries`, err: `distinct function can only have one argument`},\n\t\t{s: `SELECT distinct() FROM myseries`, err: `distinct function requires at least one argument`},\n\t\t{s: `SELECT distinct FROM myseries`, err: `found FROM, expected identifier at line 1, char 17`},\n\t\t{s: `SELECT distinct field1, field2 FROM myseries`, err: `aggregate function distinct() cannot be combined with other functions or fields`},\n\t\t{s: `SELECT count(distinct) FROM myseries`, err: `found ), expected (, identifier at line 1, char 22`},\n\t\t{s: `SELECT count(distinct field1, field2) FROM myseries`, err: `count(distinct <field>) can only have one argument`},\n\t\t{s: `select count(distinct(too, many, arguments)) from myseries`, err: `count(distinct <field>) can only have one argument`},\n\t\t{s: `select count() from myseries`, err: `invalid number of arguments for count, expected 1, got 0`},\n\t\t{s: `SELECT derivative(), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`},\n\t\t{s: `select derivative() from myseries`, err: `invalid number of arguments for derivative, expected at least 1 but no more than 2, got 0`},\n\t\t{s: `select derivative(mean(value), 1h, 3) from myseries`, err: `invalid number of arguments for derivative, expected at least 1 but no more than 2, got 3`},\n\t\t{s: `SELECT derivative(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to derivative`},\n\t\t{s: `SELECT derivative(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`},\n\t\t{s: `SELECT derivative(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`},\n\t\t{s: `SELECT derivative(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`},\n\t\t{s: `SELECT derivative(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`},\n\t\t{s: `SELECT derivative(mean(value), 1h) FROM myseries where time < now() and time > now() - 1d`, err: `derivative aggregate requires a GROUP BY interval`},\n\t\t{s: `SELECT min(derivative) FROM (SELECT derivative(mean(value), 1h) FROM myseries) where time < now() and time > now() - 1d`, err: `derivative aggregate requires a GROUP BY interval`},\n\t\t{s: `SELECT non_negative_derivative(), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`},\n\t\t{s: `select non_negative_derivative() from myseries`, err: `invalid number of arguments for non_negative_derivative, expected at least 1 but no more than 2, got 0`},\n\t\t{s: `select non_negative_derivative(mean(value), 1h, 3) from myseries`, err: `invalid number of arguments for non_negative_derivative, expected at least 1 but no more than 2, got 3`},\n\t\t{s: `SELECT non_negative_derivative(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to non_negative_derivative`},\n\t\t{s: `SELECT non_negative_derivative(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`},\n\t\t{s: `SELECT non_negative_derivative(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`},\n\t\t{s: `SELECT non_negative_derivative(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`},\n\t\t{s: `SELECT non_negative_derivative(mean(value), 1h) FROM myseries where time < now() and time > now() - 1d`, err: `non_negative_derivative aggregate requires a GROUP BY interval`},\n\t\t{s: `SELECT non_negative_derivative(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`},\n\t\t{s: `SELECT difference(), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`},\n\t\t{s: `SELECT difference() from myseries`, err: `invalid number of arguments for difference, expected 1, got 0`},\n\t\t{s: `SELECT difference(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to difference`},\n\t\t{s: `SELECT difference(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`},\n\t\t{s: `SELECT difference(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`},\n\t\t{s: `SELECT difference(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`},\n\t\t{s: `SELECT difference(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`},\n\t\t{s: `SELECT difference(mean(value)) FROM myseries where time < now() and time > now() - 1d`, err: `difference aggregate requires a GROUP BY interval`},\n\t\t{s: `SELECT moving_average(), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`},\n\t\t{s: `SELECT moving_average() from myseries`, err: `invalid number of arguments for moving_average, expected 2, got 0`},\n\t\t{s: `SELECT moving_average(value) FROM myseries`, err: `invalid number of arguments for moving_average, expected 2, got 1`},\n\t\t{s: `SELECT moving_average(value, 2) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to moving_average`},\n\t\t{s: `SELECT moving_average(top(value), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`},\n\t\t{s: `SELECT moving_average(bottom(value), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`},\n\t\t{s: `SELECT moving_average(max(), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`},\n\t\t{s: `SELECT moving_average(percentile(value), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`},\n\t\t{s: `SELECT moving_average(mean(value), 2) FROM myseries where time < now() and time > now() - 1d`, err: `moving_average aggregate requires a GROUP BY interval`},\n\t\t{s: `SELECT cumulative_sum(), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`},\n\t\t{s: `SELECT cumulative_sum() from myseries`, err: `invalid number of arguments for cumulative_sum, expected 1, got 0`},\n\t\t{s: `SELECT cumulative_sum(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to cumulative_sum`},\n\t\t{s: `SELECT cumulative_sum(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`},\n\t\t{s: `SELECT cumulative_sum(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`},\n\t\t{s: `SELECT cumulative_sum(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`},\n\t\t{s: `SELECT cumulative_sum(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`},\n\t\t{s: `SELECT cumulative_sum(mean(value)) FROM myseries where time < now() and time > now() - 1d`, err: `cumulative_sum aggregate requires a GROUP BY interval`},\n\t\t{s: `SELECT holt_winters(value) FROM myseries where time < now() and time > now() - 1d`, err: `invalid number of arguments for holt_winters, expected 3, got 1`},\n\t\t{s: `SELECT holt_winters(value, 10, 2) FROM myseries where time < now() and time > now() - 1d`, err: `must use aggregate function with holt_winters`},\n\t\t{s: `SELECT holt_winters(min(value), 10, 2) FROM myseries where time < now() and time > now() - 1d`, err: `holt_winters aggregate requires a GROUP BY interval`},\n\t\t{s: `SELECT holt_winters(min(value), 0, 2) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `second arg to holt_winters must be greater than 0, got 0`},\n\t\t{s: `SELECT holt_winters(min(value), false, 2) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `expected integer argument as second arg in holt_winters`},\n\t\t{s: `SELECT holt_winters(min(value), 10, 'string') FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `expected integer argument as third arg in holt_winters`},\n\t\t{s: `SELECT field1 from myseries WHERE host =~ 'asd' LIMIT 1`, err: `found asd, expected regex at line 1, char 42`},\n\t\t{s: `SELECT value > 2 FROM cpu`, err: `invalid operator > in SELECT clause at line 1, char 8; operator is intended for WHERE clause`},\n\t\t{s: `SELECT value = 2 FROM cpu`, err: `invalid operator = in SELECT clause at line 1, char 8; operator is intended for WHERE clause`},\n\t\t{s: `SELECT s =~ /foo/ FROM cpu`, err: `invalid operator =~ in SELECT clause at line 1, char 8; operator is intended for WHERE clause`},\n\t\t{s: `SELECT mean(value) + value FROM cpu WHERE time < now() and time > now() - 1h GROUP BY time(10m)`, err: `binary expressions cannot mix aggregates and raw fields`},\n\t\t// TODO: Remove this restriction in the future: https://github.com/influxdata/influxdb/issues/5968\n\t\t{s: `SELECT mean(cpu_total - cpu_idle) FROM cpu`, err: `expected field argument in mean()`},\n\t\t{s: `SELECT derivative(mean(cpu_total - cpu_idle), 1s) FROM cpu WHERE time < now() AND time > now() - 1d GROUP BY time(1h)`, err: `expected field argument in mean()`},\n\t\t// TODO: The error message will change when math is allowed inside an aggregate: https://github.com/influxdata/influxdb/pull/5990#issuecomment-195565870\n\t\t{s: `SELECT count(foo + sum(bar)) FROM cpu`, err: `expected field argument in count()`},\n\t\t{s: `SELECT (count(foo + sum(bar))) FROM cpu`, err: `expected field argument in count()`},\n\t\t{s: `SELECT sum(value) + count(foo + sum(bar)) FROM cpu`, err: `binary expressions cannot mix aggregates and raw fields`},\n\t\t{s: `SELECT mean(value) FROM cpu FILL + value`, err: `fill must be a function call`},\n\t\t{s: `SELECT sum(mean) FROM (SELECT mean(value) FROM cpu GROUP BY time(1h))`, err: `aggregate functions with GROUP BY time require a WHERE time clause`},\n\t\t{s: `SELECT top(value, 2), max(value) FROM cpu`, err: `selector function top() cannot be combined with other functions`},\n\t\t{s: `SELECT bottom(value, 2), max(value) FROM cpu`, err: `selector function bottom() cannot be combined with other functions`},\n\t\t// See issues https://github.com/influxdata/influxdb/issues/1647\n\t\t// and https://github.com/influxdata/influxdb/issues/4404\n\t\t//{s: `DELETE`, err: `found EOF, expected FROM at line 1, char 8`},\n\t\t//{s: `DELETE FROM`, err: `found EOF, expected identifier at line 1, char 13`},\n\t\t//{s: `DELETE FROM myseries WHERE`, err: `found EOF, expected identifier, string, number, bool at line 1, char 28`},\n\t\t{s: `DELETE`, err: `found EOF, expected FROM, WHERE at line 1, char 8`},\n\t\t{s: `DELETE FROM`, err: `found EOF, expected identifier at line 1, char 13`},\n\t\t{s: `DELETE FROM myseries WHERE`, err: `found EOF, expected identifier, string, number, bool at line 1, char 28`},\n\t\t{s: `DELETE FROM \"foo\".myseries`, err: `retention policy not supported at line 1, char 1`},\n\t\t{s: `DELETE FROM foo..myseries`, err: `database not supported at line 1, char 1`},\n\t\t{s: `DROP MEASUREMENT`, err: `found EOF, expected identifier at line 1, char 18`},\n\t\t{s: `DROP SERIES`, err: `found EOF, expected FROM, WHERE at line 1, char 13`},\n\t\t{s: `DROP SERIES FROM`, err: `found EOF, expected identifier at line 1, char 18`},\n\t\t{s: `DROP SERIES FROM src WHERE`, err: `found EOF, expected identifier, string, number, bool at line 1, char 28`},\n\t\t{s: `DROP SERIES FROM \"foo\".myseries`, err: `retention policy not supported at line 1, char 1`},\n\t\t{s: `DROP SERIES FROM foo..myseries`, err: `database not supported at line 1, char 1`},\n\t\t{s: `SHOW CONTINUOUS`, err: `found EOF, expected QUERIES at line 1, char 17`},\n\t\t{s: `SHOW RETENTION`, err: `found EOF, expected POLICIES at line 1, char 16`},\n\t\t{s: `SHOW RETENTION ON`, err: `found ON, expected POLICIES at line 1, char 16`},\n\t\t{s: `SHOW RETENTION POLICIES ON`, err: `found EOF, expected identifier at line 1, char 28`},\n\t\t{s: `SHOW SHARD`, err: `found EOF, expected GROUPS at line 1, char 12`},\n\t\t{s: `SHOW FOO`, err: `found FOO, expected CONTINUOUS, DATABASES, DIAGNOSTICS, FIELD, GRANTS, MEASUREMENTS, QUERIES, RETENTION, SERIES, SHARD, SHARDS, STATS, SUBSCRIPTIONS, TAG, USERS at line 1, char 6`},\n\t\t{s: `SHOW STATS FOR`, err: `found EOF, expected string at line 1, char 16`},\n\t\t{s: `SHOW DIAGNOSTICS FOR`, err: `found EOF, expected string at line 1, char 22`},\n\t\t{s: `SHOW GRANTS`, err: `found EOF, expected FOR at line 1, char 13`},\n\t\t{s: `SHOW GRANTS FOR`, err: `found EOF, expected identifier at line 1, char 17`},\n\t\t{s: `DROP CONTINUOUS`, err: `found EOF, expected QUERY at line 1, char 17`},\n\t\t{s: `DROP CONTINUOUS QUERY`, err: `found EOF, expected identifier at line 1, char 23`},\n\t\t{s: `DROP CONTINUOUS QUERY myquery`, err: `found EOF, expected ON at line 1, char 31`},\n\t\t{s: `DROP CONTINUOUS QUERY myquery ON`, err: `found EOF, expected identifier at line 1, char 34`},\n\t\t{s: `CREATE CONTINUOUS`, err: `found EOF, expected QUERY at line 1, char 19`},\n\t\t{s: `CREATE CONTINUOUS QUERY`, err: `found EOF, expected identifier at line 1, char 25`},\n\t\t{s: `CREATE CONTINUOUS QUERY cq ON db RESAMPLE FOR 5s BEGIN SELECT mean(value) INTO cpu_mean FROM cpu GROUP BY time(10s) END`, err: `FOR duration must be >= GROUP BY time duration: must be a minimum of 10s, got 5s`},\n\t\t{s: `CREATE CONTINUOUS QUERY cq ON db RESAMPLE EVERY 10s FOR 5s BEGIN SELECT mean(value) INTO cpu_mean FROM cpu GROUP BY time(5s) END`, err: `FOR duration must be >= GROUP BY time duration: must be a minimum of 10s, got 5s`},\n\t\t{s: `DROP FOO`, err: `found FOO, expected CONTINUOUS, MEASUREMENT, RETENTION, SERIES, SHARD, SUBSCRIPTION, USER at line 1, char 6`},\n\t\t{s: `CREATE FOO`, err: `found FOO, expected CONTINUOUS, DATABASE, USER, RETENTION, SUBSCRIPTION at line 1, char 8`},\n\t\t{s: `CREATE DATABASE`, err: `found EOF, expected identifier at line 1, char 17`},\n\t\t{s: `CREATE DATABASE \"testdb\" WITH`, err: `found EOF, expected DURATION, NAME, REPLICATION, SHARD at line 1, char 31`},\n\t\t{s: `CREATE DATABASE \"testdb\" WITH DURATION`, err: `found EOF, expected duration at line 1, char 40`},\n\t\t{s: `CREATE DATABASE \"testdb\" WITH REPLICATION`, err: `found EOF, expected integer at line 1, char 43`},\n\t\t{s: `CREATE DATABASE \"testdb\" WITH NAME`, err: `found EOF, expected identifier at line 1, char 36`},\n\t\t{s: `CREATE DATABASE \"testdb\" WITH SHARD`, err: `found EOF, expected DURATION at line 1, char 37`},\n\t\t{s: `DROP DATABASE`, err: `found EOF, expected identifier at line 1, char 15`},\n\t\t{s: `DROP RETENTION`, err: `found EOF, expected POLICY at line 1, char 16`},\n\t\t{s: `DROP RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 23`},\n\t\t{s: `DROP RETENTION POLICY \"1h.cpu\"`, err: `found EOF, expected ON at line 1, char 31`},\n\t\t{s: `DROP RETENTION POLICY \"1h.cpu\" ON`, err: `found EOF, expected identifier at line 1, char 35`},\n\t\t{s: `DROP USER`, err: `found EOF, expected identifier at line 1, char 11`},\n\t\t{s: `DROP SUBSCRIPTION`, err: `found EOF, expected identifier at line 1, char 19`},\n\t\t{s: `DROP SUBSCRIPTION \"name\"`, err: `found EOF, expected ON at line 1, char 25`},\n\t\t{s: `DROP SUBSCRIPTION \"name\" ON `, err: `found EOF, expected identifier at line 1, char 30`},\n\t\t{s: `DROP SUBSCRIPTION \"name\" ON \"db\"`, err: `found EOF, expected . at line 1, char 33`},\n\t\t{s: `DROP SUBSCRIPTION \"name\" ON \"db\".`, err: `found EOF, expected identifier at line 1, char 34`},\n\t\t{s: `CREATE USER testuser`, err: `found EOF, expected WITH at line 1, char 22`},\n\t\t{s: `CREATE USER testuser WITH`, err: `found EOF, expected PASSWORD at line 1, char 27`},\n\t\t{s: `CREATE USER testuser WITH PASSWORD`, err: `found EOF, expected string at line 1, char 36`},\n\t\t{s: `CREATE USER testuser WITH PASSWORD 'pwd' WITH`, err: `found EOF, expected ALL at line 1, char 47`},\n\t\t{s: `CREATE USER testuser WITH PASSWORD 'pwd' WITH ALL`, err: `found EOF, expected PRIVILEGES at line 1, char 51`},\n\t\t{s: `CREATE SUBSCRIPTION`, err: `found EOF, expected identifier at line 1, char 21`},\n\t\t{s: `CREATE SUBSCRIPTION \"name\"`, err: `found EOF, expected ON at line 1, char 27`},\n\t\t{s: `CREATE SUBSCRIPTION \"name\" ON `, err: `found EOF, expected identifier at line 1, char 32`},\n\t\t{s: `CREATE SUBSCRIPTION \"name\" ON \"db\"`, err: `found EOF, expected . at line 1, char 35`},\n\t\t{s: `CREATE SUBSCRIPTION \"name\" ON \"db\".`, err: `found EOF, expected identifier at line 1, char 36`},\n\t\t{s: `CREATE SUBSCRIPTION \"name\" ON \"db\".\"rp\"`, err: `found EOF, expected DESTINATIONS at line 1, char 40`},\n\t\t{s: `CREATE SUBSCRIPTION \"name\" ON \"db\".\"rp\" DESTINATIONS`, err: `found EOF, expected ALL, ANY at line 1, char 54`},\n\t\t{s: `CREATE SUBSCRIPTION \"name\" ON \"db\".\"rp\" DESTINATIONS ALL `, err: `found EOF, expected string at line 1, char 59`},\n\t\t{s: `GRANT`, err: `found EOF, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 7`},\n\t\t{s: `GRANT BOGUS`, err: `found BOGUS, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 7`},\n\t\t{s: `GRANT READ`, err: `found EOF, expected ON at line 1, char 12`},\n\t\t{s: `GRANT READ FROM`, err: `found FROM, expected ON at line 1, char 12`},\n\t\t{s: `GRANT READ ON`, err: `found EOF, expected identifier at line 1, char 15`},\n\t\t{s: `GRANT READ ON TO`, err: `found TO, expected identifier at line 1, char 15`},\n\t\t{s: `GRANT READ ON testdb`, err: `found EOF, expected TO at line 1, char 22`},\n\t\t{s: `GRANT READ ON testdb TO`, err: `found EOF, expected identifier at line 1, char 25`},\n\t\t{s: `GRANT READ TO`, err: `found TO, expected ON at line 1, char 12`},\n\t\t{s: `GRANT WRITE`, err: `found EOF, expected ON at line 1, char 13`},\n\t\t{s: `GRANT WRITE FROM`, err: `found FROM, expected ON at line 1, char 13`},\n\t\t{s: `GRANT WRITE ON`, err: `found EOF, expected identifier at line 1, char 16`},\n\t\t{s: `GRANT WRITE ON TO`, err: `found TO, expected identifier at line 1, char 16`},\n\t\t{s: `GRANT WRITE ON testdb`, err: `found EOF, expected TO at line 1, char 23`},\n\t\t{s: `GRANT WRITE ON testdb TO`, err: `found EOF, expected identifier at line 1, char 26`},\n\t\t{s: `GRANT WRITE TO`, err: `found TO, expected ON at line 1, char 13`},\n\t\t{s: `GRANT ALL`, err: `found EOF, expected ON, TO at line 1, char 11`},\n\t\t{s: `GRANT ALL PRIVILEGES`, err: `found EOF, expected ON, TO at line 1, char 22`},\n\t\t{s: `GRANT ALL FROM`, err: `found FROM, expected ON, TO at line 1, char 11`},\n\t\t{s: `GRANT ALL PRIVILEGES FROM`, err: `found FROM, expected ON, TO at line 1, char 22`},\n\t\t{s: `GRANT ALL ON`, err: `found EOF, expected identifier at line 1, char 14`},\n\t\t{s: `GRANT ALL PRIVILEGES ON`, err: `found EOF, expected identifier at line 1, char 25`},\n\t\t{s: `GRANT ALL ON TO`, err: `found TO, expected identifier at line 1, char 14`},\n\t\t{s: `GRANT ALL PRIVILEGES ON TO`, err: `found TO, expected identifier at line 1, char 25`},\n\t\t{s: `GRANT ALL ON testdb`, err: `found EOF, expected TO at line 1, char 21`},\n\t\t{s: `GRANT ALL PRIVILEGES ON testdb`, err: `found EOF, expected TO at line 1, char 32`},\n\t\t{s: `GRANT ALL ON testdb FROM`, err: `found FROM, expected TO at line 1, char 21`},\n\t\t{s: `GRANT ALL PRIVILEGES ON testdb FROM`, err: `found FROM, expected TO at line 1, char 32`},\n\t\t{s: `GRANT ALL ON testdb TO`, err: `found EOF, expected identifier at line 1, char 24`},\n\t\t{s: `GRANT ALL PRIVILEGES ON testdb TO`, err: `found EOF, expected identifier at line 1, char 35`},\n\t\t{s: `GRANT ALL TO`, err: `found EOF, expected identifier at line 1, char 14`},\n\t\t{s: `GRANT ALL PRIVILEGES TO`, err: `found EOF, expected identifier at line 1, char 25`},\n\t\t{s: `KILL`, err: `found EOF, expected QUERY at line 1, char 6`},\n\t\t{s: `KILL QUERY 10s`, err: `found 10s, expected integer at line 1, char 12`},\n\t\t{s: `KILL QUERY 4 ON 'host'`, err: `found host, expected identifier at line 1, char 16`},\n\t\t{s: `REVOKE`, err: `found EOF, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 8`},\n\t\t{s: `REVOKE BOGUS`, err: `found BOGUS, expected READ, WRITE, ALL [PRIVILEGES] at line 1, char 8`},\n\t\t{s: `REVOKE READ`, err: `found EOF, expected ON at line 1, char 13`},\n\t\t{s: `REVOKE READ TO`, err: `found TO, expected ON at line 1, char 13`},\n\t\t{s: `REVOKE READ ON`, err: `found EOF, expected identifier at line 1, char 16`},\n\t\t{s: `REVOKE READ ON FROM`, err: `found FROM, expected identifier at line 1, char 16`},\n\t\t{s: `REVOKE READ ON testdb`, err: `found EOF, expected FROM at line 1, char 23`},\n\t\t{s: `REVOKE READ ON testdb FROM`, err: `found EOF, expected identifier at line 1, char 28`},\n\t\t{s: `REVOKE READ FROM`, err: `found FROM, expected ON at line 1, char 13`},\n\t\t{s: `REVOKE WRITE`, err: `found EOF, expected ON at line 1, char 14`},\n\t\t{s: `REVOKE WRITE TO`, err: `found TO, expected ON at line 1, char 14`},\n\t\t{s: `REVOKE WRITE ON`, err: `found EOF, expected identifier at line 1, char 17`},\n\t\t{s: `REVOKE WRITE ON FROM`, err: `found FROM, expected identifier at line 1, char 17`},\n\t\t{s: `REVOKE WRITE ON testdb`, err: `found EOF, expected FROM at line 1, char 24`},\n\t\t{s: `REVOKE WRITE ON testdb FROM`, err: `found EOF, expected identifier at line 1, char 29`},\n\t\t{s: `REVOKE WRITE FROM`, err: `found FROM, expected ON at line 1, char 14`},\n\t\t{s: `REVOKE ALL`, err: `found EOF, expected ON, FROM at line 1, char 12`},\n\t\t{s: `REVOKE ALL PRIVILEGES`, err: `found EOF, expected ON, FROM at line 1, char 23`},\n\t\t{s: `REVOKE ALL TO`, err: `found TO, expected ON, FROM at line 1, char 12`},\n\t\t{s: `REVOKE ALL PRIVILEGES TO`, err: `found TO, expected ON, FROM at line 1, char 23`},\n\t\t{s: `REVOKE ALL ON`, err: `found EOF, expected identifier at line 1, char 15`},\n\t\t{s: `REVOKE ALL PRIVILEGES ON`, err: `found EOF, expected identifier at line 1, char 26`},\n\t\t{s: `REVOKE ALL ON FROM`, err: `found FROM, expected identifier at line 1, char 15`},\n\t\t{s: `REVOKE ALL PRIVILEGES ON FROM`, err: `found FROM, expected identifier at line 1, char 26`},\n\t\t{s: `REVOKE ALL ON testdb`, err: `found EOF, expected FROM at line 1, char 22`},\n\t\t{s: `REVOKE ALL PRIVILEGES ON testdb`, err: `found EOF, expected FROM at line 1, char 33`},\n\t\t{s: `REVOKE ALL ON testdb TO`, err: `found TO, expected FROM at line 1, char 22`},\n\t\t{s: `REVOKE ALL PRIVILEGES ON testdb TO`, err: `found TO, expected FROM at line 1, char 33`},\n\t\t{s: `REVOKE ALL ON testdb FROM`, err: `found EOF, expected identifier at line 1, char 27`},\n\t\t{s: `REVOKE ALL PRIVILEGES ON testdb FROM`, err: `found EOF, expected identifier at line 1, char 38`},\n\t\t{s: `REVOKE ALL FROM`, err: `found EOF, expected identifier at line 1, char 17`},\n\t\t{s: `REVOKE ALL PRIVILEGES FROM`, err: `found EOF, expected identifier at line 1, char 28`},\n\t\t{s: `CREATE RETENTION`, err: `found EOF, expected POLICY at line 1, char 18`},\n\t\t{s: `CREATE RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 25`},\n\t\t{s: `CREATE RETENTION POLICY policy1`, err: `found EOF, expected ON at line 1, char 33`},\n\t\t{s: `CREATE RETENTION POLICY policy1 ON`, err: `found EOF, expected identifier at line 1, char 36`},\n\t\t{s: `CREATE RETENTION POLICY policy1 ON testdb`, err: `found EOF, expected DURATION at line 1, char 43`},\n\t\t{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION`, err: `found EOF, expected duration at line 1, char 52`},\n\t\t{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION bad`, err: `found bad, expected duration at line 1, char 52`},\n\t\t{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h`, err: `found EOF, expected REPLICATION at line 1, char 54`},\n\t\t{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION`, err: `found EOF, expected integer at line 1, char 67`},\n\t\t{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 3.14`, err: `found 3.14, expected integer at line 1, char 67`},\n\t\t{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 0`, err: `invalid value 0: must be 1 <= n <= 2147483647 at line 1, char 67`},\n\t\t{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION bad`, err: `found bad, expected integer at line 1, char 67`},\n\t\t{s: `CREATE RETENTION POLICY policy1 ON testdb DURATION 1h REPLICATION 2 SHARD DURATION INF`, err: `invalid duration INF for shard duration at line 1, char 84`},\n\t\t{s: `ALTER`, err: `found EOF, expected RETENTION at line 1, char 7`},\n\t\t{s: `ALTER RETENTION`, err: `found EOF, expected POLICY at line 1, char 17`},\n\t\t{s: `ALTER RETENTION POLICY`, err: `found EOF, expected identifier at line 1, char 24`},\n\t\t{s: `ALTER RETENTION POLICY policy1`, err: `found EOF, expected ON at line 1, char 32`}, {s: `ALTER RETENTION POLICY policy1 ON`, err: `found EOF, expected identifier at line 1, char 35`},\n\t\t{s: `ALTER RETENTION POLICY policy1 ON testdb`, err: `found EOF, expected DURATION, REPLICATION, SHARD, DEFAULT at line 1, char 42`},\n\t\t{s: `ALTER RETENTION POLICY policy1 ON testdb REPLICATION 1 REPLICATION 2`, err: `found duplicate REPLICATION option at line 1, char 56`},\n\t\t{s: `ALTER RETENTION POLICY policy1 ON testdb DURATION 15251w`, err: `overflowed duration 15251w: choose a smaller duration or INF at line 1, char 51`},\n\t\t{s: `ALTER RETENTION POLICY policy1 ON testdb DURATION INF SHARD DURATION INF`, err: `invalid duration INF for shard duration at line 1, char 70`},\n\t\t{s: `SET`, err: `found EOF, expected PASSWORD at line 1, char 5`},\n\t\t{s: `SET PASSWORD`, err: `found EOF, expected FOR at line 1, char 14`},\n\t\t{s: `SET PASSWORD something`, err: `found something, expected FOR at line 1, char 14`},\n\t\t{s: `SET PASSWORD FOR`, err: `found EOF, expected identifier at line 1, char 18`},\n\t\t{s: `SET PASSWORD FOR dejan`, err: `found EOF, expected = at line 1, char 24`},\n\t\t{s: `SET PASSWORD FOR dejan =`, err: `found EOF, expected string at line 1, char 25`},\n\t\t{s: `SET PASSWORD FOR dejan = bla`, err: `found bla, expected string at line 1, char 26`},\n\t\t{s: `$SHOW$DATABASES`, err: `found $SHOW, expected SELECT, DELETE, SHOW, CREATE, DROP, GRANT, REVOKE, ALTER, SET, KILL at line 1, char 1`},\n\t\t{s: `SELECT * FROM cpu WHERE \"tagkey\" = $$`, err: `empty bound parameter`},\n\t}\n\n\tfor i, tt := range tests {\n\t\tif tt.skip {\n\t\t\tcontinue\n\t\t}\n\t\tp := influxql.NewParser(strings.NewReader(tt.s))\n\t\tif tt.params != nil {\n\t\t\tp.SetParams(tt.params)\n\t\t}\n\t\tstmt, err := p.ParseStatement()\n\n\t\t// We are memoizing a field so for testing we need to...\n\t\tif s, ok := tt.stmt.(*influxql.SelectStatement); ok {\n\t\t\ts.GroupByInterval()\n\t\t\tfor _, source := range s.Sources {\n\t\t\t\tswitch source := source.(type) {\n\t\t\t\tcase *influxql.SubQuery:\n\t\t\t\t\tsource.Statement.GroupByInterval()\n\t\t\t\t}\n\t\t\t}\n\t\t} else if st, ok := stmt.(*influxql.CreateContinuousQueryStatement); ok { // if it's a CQ, there is a non-exported field that gets memoized during parsing that needs to be set\n\t\t\tif st != nil && st.Source != nil {\n\t\t\t\ttt.stmt.(*influxql.CreateContinuousQueryStatement).Source.GroupByInterval()\n\t\t\t}\n\t\t}\n\n\t\tif !reflect.DeepEqual(tt.err, errstring(err)) {\n\t\t\tt.Errorf(\"%d. %q: error mismatch:\\n  exp=%s\\n  got=%s\\n\\n\", i, tt.s, tt.err, err)\n\t\t} else if tt.err == \"\" {\n\t\t\tif !reflect.DeepEqual(tt.stmt, stmt) {\n\t\t\t\tt.Logf(\"\\n# %s\\nexp=%s\\ngot=%s\\n\", tt.s, mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt))\n\t\t\t\tt.Logf(\"\\nSQL exp=%s\\nSQL got=%s\\n\", tt.stmt.String(), stmt.String())\n\t\t\t\tt.Errorf(\"%d. %q\\n\\nstmt mismatch:\\n\\nexp=%#v\\n\\ngot=%#v\\n\\n\", i, tt.s, tt.stmt, stmt)\n\t\t\t} else {\n\t\t\t\t// Attempt to reparse the statement as a string and confirm it parses the same.\n\t\t\t\t// Skip this if we have some kind of statement with a password since those will never be reparsed.\n\t\t\t\tswitch stmt.(type) {\n\t\t\t\tcase *influxql.CreateUserStatement, *influxql.SetPasswordUserStatement:\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tstmt2, err := influxql.ParseStatement(stmt.String())\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"%d. %q: unable to parse statement string: %s\", i, stmt.String(), err)\n\t\t\t\t} else if !reflect.DeepEqual(tt.stmt, stmt2) {\n\t\t\t\t\tt.Logf(\"\\n# %s\\nexp=%s\\ngot=%s\\n\", tt.s, mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt2))\n\t\t\t\t\tt.Logf(\"\\nSQL exp=%s\\nSQL got=%s\\n\", tt.stmt.String(), stmt2.String())\n\t\t\t\t\tt.Errorf(\"%d. %q\\n\\nstmt reparse mismatch:\\n\\nexp=%#v\\n\\ngot=%#v\\n\\n\", i, tt.s, tt.stmt, stmt2)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Ensure the parser can parse expressions into an AST.\nfunc TestParser_ParseExpr(t *testing.T) {\n\tvar tests = []struct {\n\t\ts    string\n\t\texpr influxql.Expr\n\t\terr  string\n\t}{\n\t\t// Primitives\n\t\t{s: `100.0`, expr: &influxql.NumberLiteral{Val: 100}},\n\t\t{s: `100`, expr: &influxql.IntegerLiteral{Val: 100}},\n\t\t{s: `-100.0`, expr: &influxql.NumberLiteral{Val: -100}},\n\t\t{s: `-100`, expr: &influxql.IntegerLiteral{Val: -100}},\n\t\t{s: `100.`, expr: &influxql.NumberLiteral{Val: 100}},\n\t\t{s: `-100.`, expr: &influxql.NumberLiteral{Val: -100}},\n\t\t{s: `.23`, expr: &influxql.NumberLiteral{Val: 0.23}},\n\t\t{s: `-.23`, expr: &influxql.NumberLiteral{Val: -0.23}},\n\t\t{s: `1s`, expr: &influxql.DurationLiteral{Val: time.Second}},\n\t\t{s: `-1s`, expr: &influxql.DurationLiteral{Val: -time.Second}},\n\t\t{s: `-+1`, err: `found +, expected identifier, number, duration, ( at line 1, char 2`},\n\t\t{s: `'foo bar'`, expr: &influxql.StringLiteral{Val: \"foo bar\"}},\n\t\t{s: `true`, expr: &influxql.BooleanLiteral{Val: true}},\n\t\t{s: `false`, expr: &influxql.BooleanLiteral{Val: false}},\n\t\t{s: `my_ident`, expr: &influxql.VarRef{Val: \"my_ident\"}},\n\t\t{s: `'2000-01-01 00:00:00'`, expr: &influxql.StringLiteral{Val: \"2000-01-01 00:00:00\"}},\n\t\t{s: `'2000-01-01'`, expr: &influxql.StringLiteral{Val: \"2000-01-01\"}},\n\n\t\t// Simple binary expression\n\t\t{\n\t\t\ts: `1 + 2`,\n\t\t\texpr: &influxql.BinaryExpr{\n\t\t\t\tOp:  influxql.ADD,\n\t\t\t\tLHS: &influxql.IntegerLiteral{Val: 1},\n\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 2},\n\t\t\t},\n\t\t},\n\n\t\t// Binary expression with LHS precedence\n\t\t{\n\t\t\ts: `1 * 2 + 3`,\n\t\t\texpr: &influxql.BinaryExpr{\n\t\t\t\tOp: influxql.ADD,\n\t\t\t\tLHS: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.MUL,\n\t\t\t\t\tLHS: &influxql.IntegerLiteral{Val: 1},\n\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 2},\n\t\t\t\t},\n\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 3},\n\t\t\t},\n\t\t},\n\n\t\t// Binary expression with RHS precedence\n\t\t{\n\t\t\ts: `1 + 2 * 3`,\n\t\t\texpr: &influxql.BinaryExpr{\n\t\t\t\tOp:  influxql.ADD,\n\t\t\t\tLHS: &influxql.IntegerLiteral{Val: 1},\n\t\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.MUL,\n\t\t\t\t\tLHS: &influxql.IntegerLiteral{Val: 2},\n\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 3},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// Binary expression with LHS precedence\n\t\t{\n\t\t\ts: `1 / 2 + 3`,\n\t\t\texpr: &influxql.BinaryExpr{\n\t\t\t\tOp: influxql.ADD,\n\t\t\t\tLHS: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.DIV,\n\t\t\t\t\tLHS: &influxql.IntegerLiteral{Val: 1},\n\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 2},\n\t\t\t\t},\n\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 3},\n\t\t\t},\n\t\t},\n\n\t\t// Binary expression with RHS precedence\n\t\t{\n\t\t\ts: `1 + 2 / 3`,\n\t\t\texpr: &influxql.BinaryExpr{\n\t\t\t\tOp:  influxql.ADD,\n\t\t\t\tLHS: &influxql.IntegerLiteral{Val: 1},\n\t\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.DIV,\n\t\t\t\t\tLHS: &influxql.IntegerLiteral{Val: 2},\n\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 3},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// Binary expression with LHS precedence\n\t\t{\n\t\t\ts: `1 % 2 + 3`,\n\t\t\texpr: &influxql.BinaryExpr{\n\t\t\t\tOp: influxql.ADD,\n\t\t\t\tLHS: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.MOD,\n\t\t\t\t\tLHS: &influxql.IntegerLiteral{Val: 1},\n\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 2},\n\t\t\t\t},\n\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 3},\n\t\t\t},\n\t\t},\n\n\t\t// Binary expression with RHS precedence\n\t\t{\n\t\t\ts: `1 + 2 % 3`,\n\t\t\texpr: &influxql.BinaryExpr{\n\t\t\t\tOp:  influxql.ADD,\n\t\t\t\tLHS: &influxql.IntegerLiteral{Val: 1},\n\t\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.MOD,\n\t\t\t\t\tLHS: &influxql.IntegerLiteral{Val: 2},\n\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 3},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// Binary expression with LHS paren group.\n\t\t{\n\t\t\ts: `(1 + 2) * 3`,\n\t\t\texpr: &influxql.BinaryExpr{\n\t\t\t\tOp: influxql.MUL,\n\t\t\t\tLHS: &influxql.ParenExpr{\n\t\t\t\t\tExpr: &influxql.BinaryExpr{\n\t\t\t\t\t\tOp:  influxql.ADD,\n\t\t\t\t\t\tLHS: &influxql.IntegerLiteral{Val: 1},\n\t\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 2},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 3},\n\t\t\t},\n\t\t},\n\n\t\t// Binary expression with no precedence, tests left associativity.\n\t\t{\n\t\t\ts: `1 * 2 * 3`,\n\t\t\texpr: &influxql.BinaryExpr{\n\t\t\t\tOp: influxql.MUL,\n\t\t\t\tLHS: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.MUL,\n\t\t\t\t\tLHS: &influxql.IntegerLiteral{Val: 1},\n\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 2},\n\t\t\t\t},\n\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 3},\n\t\t\t},\n\t\t},\n\n\t\t// Addition and subtraction without whitespace.\n\t\t{\n\t\t\ts: `1+2-3`,\n\t\t\texpr: &influxql.BinaryExpr{\n\t\t\t\tOp: influxql.SUB,\n\t\t\t\tLHS: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.ADD,\n\t\t\t\t\tLHS: &influxql.IntegerLiteral{Val: 1},\n\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 2},\n\t\t\t\t},\n\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 3},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: `time>now()-5m`,\n\t\t\texpr: &influxql.BinaryExpr{\n\t\t\t\tOp:  influxql.GT,\n\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.SUB,\n\t\t\t\t\tLHS: &influxql.Call{Name: \"now\"},\n\t\t\t\t\tRHS: &influxql.DurationLiteral{Val: 5 * time.Minute},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// Simple unary expression.\n\t\t{\n\t\t\ts: `-value`,\n\t\t\texpr: &influxql.BinaryExpr{\n\t\t\t\tOp:  influxql.MUL,\n\t\t\t\tLHS: &influxql.IntegerLiteral{Val: -1},\n\t\t\t\tRHS: &influxql.VarRef{Val: \"value\"},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: `-mean(value)`,\n\t\t\texpr: &influxql.BinaryExpr{\n\t\t\t\tOp:  influxql.MUL,\n\t\t\t\tLHS: &influxql.IntegerLiteral{Val: -1},\n\t\t\t\tRHS: &influxql.Call{\n\t\t\t\t\tName: \"mean\",\n\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t&influxql.VarRef{Val: \"value\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// Unary expressions with parenthesis.\n\t\t{\n\t\t\ts: `-(-4)`,\n\t\t\texpr: &influxql.BinaryExpr{\n\t\t\t\tOp:  influxql.MUL,\n\t\t\t\tLHS: &influxql.IntegerLiteral{Val: -1},\n\t\t\t\tRHS: &influxql.ParenExpr{\n\t\t\t\t\tExpr: &influxql.IntegerLiteral{Val: -4},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// Multiplication with leading subtraction.\n\t\t{\n\t\t\ts: `-2 * 3`,\n\t\t\texpr: &influxql.BinaryExpr{\n\t\t\t\tOp:  influxql.MUL,\n\t\t\t\tLHS: &influxql.IntegerLiteral{Val: -2},\n\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 3},\n\t\t\t},\n\t\t},\n\n\t\t// Binary expression with regex.\n\t\t{\n\t\t\ts: `region =~ /us.*/`,\n\t\t\texpr: &influxql.BinaryExpr{\n\t\t\t\tOp:  influxql.EQREGEX,\n\t\t\t\tLHS: &influxql.VarRef{Val: \"region\"},\n\t\t\t\tRHS: &influxql.RegexLiteral{Val: regexp.MustCompile(`us.*`)},\n\t\t\t},\n\t\t},\n\n\t\t// Binary expression with quoted '/' regex.\n\t\t{\n\t\t\ts: `url =~ /http\\:\\/\\/www\\.example\\.com/`,\n\t\t\texpr: &influxql.BinaryExpr{\n\t\t\t\tOp:  influxql.EQREGEX,\n\t\t\t\tLHS: &influxql.VarRef{Val: \"url\"},\n\t\t\t\tRHS: &influxql.RegexLiteral{Val: regexp.MustCompile(`http\\://www\\.example\\.com`)},\n\t\t\t},\n\t\t},\n\n\t\t// Complex binary expression.\n\t\t{\n\t\t\ts: `value + 3 < 30 AND 1 + 2 OR true`,\n\t\t\texpr: &influxql.BinaryExpr{\n\t\t\t\tOp: influxql.OR,\n\t\t\t\tLHS: &influxql.BinaryExpr{\n\t\t\t\t\tOp: influxql.AND,\n\t\t\t\t\tLHS: &influxql.BinaryExpr{\n\t\t\t\t\t\tOp: influxql.LT,\n\t\t\t\t\t\tLHS: &influxql.BinaryExpr{\n\t\t\t\t\t\t\tOp:  influxql.ADD,\n\t\t\t\t\t\t\tLHS: &influxql.VarRef{Val: \"value\"},\n\t\t\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 3},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 30},\n\t\t\t\t\t},\n\t\t\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\t\t\tOp:  influxql.ADD,\n\t\t\t\t\t\tLHS: &influxql.IntegerLiteral{Val: 1},\n\t\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 2},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tRHS: &influxql.BooleanLiteral{Val: true},\n\t\t\t},\n\t\t},\n\n\t\t// Complex binary expression.\n\t\t{\n\t\t\ts: `time > now() - 1d AND time < now() + 1d`,\n\t\t\texpr: &influxql.BinaryExpr{\n\t\t\t\tOp: influxql.AND,\n\t\t\t\tLHS: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.GT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\t\t\tOp:  influxql.SUB,\n\t\t\t\t\t\tLHS: &influxql.Call{Name: \"now\"},\n\t\t\t\t\t\tRHS: &influxql.DurationLiteral{Val: mustParseDuration(\"1d\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.LT,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\t\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\t\t\tOp:  influxql.ADD,\n\t\t\t\t\t\tLHS: &influxql.Call{Name: \"now\"},\n\t\t\t\t\t\tRHS: &influxql.DurationLiteral{Val: mustParseDuration(\"1d\")},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t// Duration math with an invalid literal.\n\t\t{\n\t\t\ts:   `time > now() - 1y`,\n\t\t\terr: `invalid duration`,\n\t\t},\n\n\t\t// Function call (empty)\n\t\t{\n\t\t\ts: `my_func()`,\n\t\t\texpr: &influxql.Call{\n\t\t\t\tName: \"my_func\",\n\t\t\t},\n\t\t},\n\n\t\t// Function call (multi-arg)\n\t\t{\n\t\t\ts: `my_func(1, 2 + 3)`,\n\t\t\texpr: &influxql.Call{\n\t\t\t\tName: \"my_func\",\n\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t&influxql.IntegerLiteral{Val: 1},\n\t\t\t\t\t&influxql.BinaryExpr{\n\t\t\t\t\t\tOp:  influxql.ADD,\n\t\t\t\t\t\tLHS: &influxql.IntegerLiteral{Val: 2},\n\t\t\t\t\t\tRHS: &influxql.IntegerLiteral{Val: 3},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\texpr, err := influxql.NewParser(strings.NewReader(tt.s)).ParseExpr()\n\t\tif !reflect.DeepEqual(tt.err, errstring(err)) {\n\t\t\tt.Errorf(\"%d. %q: error mismatch:\\n  exp=%s\\n  got=%s\\n\\n\", i, tt.s, tt.err, err)\n\t\t} else if tt.err == \"\" && !reflect.DeepEqual(tt.expr, expr) {\n\t\t\tt.Errorf(\"%d. %q\\n\\nexpr mismatch:\\n\\nexp=%#v\\n\\ngot=%#v\\n\\n\", i, tt.s, tt.expr, expr)\n\t\t} else if err == nil {\n\t\t\t// Attempt to reparse the expr as a string and confirm it parses the same.\n\t\t\texpr2, err := influxql.ParseExpr(expr.String())\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"%d. %q: unable to parse expr string: %s\", i, expr.String(), err)\n\t\t\t} else if !reflect.DeepEqual(tt.expr, expr2) {\n\t\t\t\tt.Logf(\"\\n# %s\\nexp=%s\\ngot=%s\\n\", tt.s, mustMarshalJSON(tt.expr), mustMarshalJSON(expr2))\n\t\t\t\tt.Logf(\"\\nSQL exp=%s\\nSQL got=%s\\n\", tt.expr.String(), expr2.String())\n\t\t\t\tt.Errorf(\"%d. %q\\n\\nexpr reparse mismatch:\\n\\nexp=%#v\\n\\ngot=%#v\\n\\n\", i, tt.s, tt.expr, expr2)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Ensure a time duration can be parsed.\nfunc TestParseDuration(t *testing.T) {\n\tvar tests = []struct {\n\t\ts   string\n\t\td   time.Duration\n\t\terr string\n\t}{\n\t\t{s: `10ns`, d: 10},\n\t\t{s: `10u`, d: 10 * time.Microsecond},\n\t\t{s: `10µ`, d: 10 * time.Microsecond},\n\t\t{s: `15ms`, d: 15 * time.Millisecond},\n\t\t{s: `100s`, d: 100 * time.Second},\n\t\t{s: `2m`, d: 2 * time.Minute},\n\t\t{s: `2h`, d: 2 * time.Hour},\n\t\t{s: `2d`, d: 2 * 24 * time.Hour},\n\t\t{s: `2w`, d: 2 * 7 * 24 * time.Hour},\n\t\t{s: `1h30m`, d: time.Hour + 30*time.Minute},\n\t\t{s: `30ms3000u`, d: 30*time.Millisecond + 3000*time.Microsecond},\n\t\t{s: `-5s`, d: -5 * time.Second},\n\t\t{s: `-5m30s`, d: -5*time.Minute - 30*time.Second},\n\n\t\t{s: ``, err: \"invalid duration\"},\n\t\t{s: `3`, err: \"invalid duration\"},\n\t\t{s: `1000`, err: \"invalid duration\"},\n\t\t{s: `w`, err: \"invalid duration\"},\n\t\t{s: `ms`, err: \"invalid duration\"},\n\t\t{s: `1.2w`, err: \"invalid duration\"},\n\t\t{s: `10x`, err: \"invalid duration\"},\n\t\t{s: `10n`, err: \"invalid duration\"},\n\t}\n\n\tfor i, tt := range tests {\n\t\td, err := influxql.ParseDuration(tt.s)\n\t\tif !reflect.DeepEqual(tt.err, errstring(err)) {\n\t\t\tt.Errorf(\"%d. %q: error mismatch:\\n  exp=%s\\n  got=%s\\n\\n\", i, tt.s, tt.err, err)\n\t\t} else if tt.d != d {\n\t\t\tt.Errorf(\"%d. %q\\n\\nduration mismatch:\\n\\nexp=%#v\\n\\ngot=%#v\\n\\n\", i, tt.s, tt.d, d)\n\t\t}\n\t}\n}\n\n// Ensure a time duration can be formatted.\nfunc TestFormatDuration(t *testing.T) {\n\tvar tests = []struct {\n\t\td time.Duration\n\t\ts string\n\t}{\n\t\t{d: 3 * time.Microsecond, s: `3u`},\n\t\t{d: 1001 * time.Microsecond, s: `1001u`},\n\t\t{d: 15 * time.Millisecond, s: `15ms`},\n\t\t{d: 100 * time.Second, s: `100s`},\n\t\t{d: 2 * time.Minute, s: `2m`},\n\t\t{d: 2 * time.Hour, s: `2h`},\n\t\t{d: 2 * 24 * time.Hour, s: `2d`},\n\t\t{d: 2 * 7 * 24 * time.Hour, s: `2w`},\n\t}\n\n\tfor i, tt := range tests {\n\t\ts := influxql.FormatDuration(tt.d)\n\t\tif tt.s != s {\n\t\t\tt.Errorf(\"%d. %v: mismatch: %s != %s\", i, tt.d, tt.s, s)\n\t\t}\n\t}\n}\n\n// Ensure a string can be quoted.\nfunc TestQuote(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\tin  string\n\t\tout string\n\t}{\n\t\t{``, `''`},\n\t\t{`foo`, `'foo'`},\n\t\t{\"foo\\nbar\", `'foo\\nbar'`},\n\t\t{`foo bar\\\\`, `'foo bar\\\\\\\\'`},\n\t\t{`'foo'`, `'\\'foo\\''`},\n\t} {\n\t\tif out := influxql.QuoteString(tt.in); tt.out != out {\n\t\t\tt.Errorf(\"%d. %s: mismatch: %s != %s\", i, tt.in, tt.out, out)\n\t\t}\n\t}\n}\n\n// Ensure an identifier's segments can be quoted.\nfunc TestQuoteIdent(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\tident []string\n\t\ts     string\n\t}{\n\t\t{[]string{``}, `\"\"`},\n\t\t{[]string{`select`}, `\"select\"`},\n\t\t{[]string{`in-bytes`}, `\"in-bytes\"`},\n\t\t{[]string{`foo`, `bar`}, `\"foo\".bar`},\n\t\t{[]string{`foo`, ``, `bar`}, `\"foo\"..bar`},\n\t\t{[]string{`foo bar`, `baz`}, `\"foo bar\".baz`},\n\t\t{[]string{`foo.bar`, `baz`}, `\"foo.bar\".baz`},\n\t\t{[]string{`foo.bar`, `rp`, `baz`}, `\"foo.bar\".\"rp\".baz`},\n\t\t{[]string{`foo.bar`, `rp`, `1baz`}, `\"foo.bar\".\"rp\".\"1baz\"`},\n\t} {\n\t\tif s := influxql.QuoteIdent(tt.ident...); tt.s != s {\n\t\t\tt.Errorf(\"%d. %s: mismatch: %s != %s\", i, tt.ident, tt.s, s)\n\t\t}\n\t}\n}\n\n// Ensure DeleteSeriesStatement can convert to a string\nfunc TestDeleteSeriesStatement_String(t *testing.T) {\n\tvar tests = []struct {\n\t\ts    string\n\t\tstmt influxql.Statement\n\t}{\n\t\t{\n\t\t\ts:    `DELETE FROM src`,\n\t\t\tstmt: &influxql.DeleteSeriesStatement{Sources: []influxql.Source{&influxql.Measurement{Name: \"src\"}}},\n\t\t},\n\t\t{\n\t\t\ts: `DELETE FROM src WHERE host = 'hosta.influxdb.org'`,\n\t\t\tstmt: &influxql.DeleteSeriesStatement{\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"src\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"host\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"hosta.influxdb.org\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `DELETE FROM src WHERE host = 'hosta.influxdb.org'`,\n\t\t\tstmt: &influxql.DeleteSeriesStatement{\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"src\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"host\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"hosta.influxdb.org\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `DELETE WHERE host = 'hosta.influxdb.org'`,\n\t\t\tstmt: &influxql.DeleteSeriesStatement{\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"host\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"hosta.influxdb.org\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\ts := test.stmt.String()\n\t\tif s != test.s {\n\t\t\tt.Errorf(\"error rendering string. expected %s, actual: %s\", test.s, s)\n\t\t}\n\t}\n}\n\n// Ensure DropSeriesStatement can convert to a string\nfunc TestDropSeriesStatement_String(t *testing.T) {\n\tvar tests = []struct {\n\t\ts    string\n\t\tstmt influxql.Statement\n\t}{\n\t\t{\n\t\t\ts:    `DROP SERIES FROM src`,\n\t\t\tstmt: &influxql.DropSeriesStatement{Sources: []influxql.Source{&influxql.Measurement{Name: \"src\"}}},\n\t\t},\n\t\t{\n\t\t\ts: `DROP SERIES FROM src WHERE host = 'hosta.influxdb.org'`,\n\t\t\tstmt: &influxql.DropSeriesStatement{\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"src\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"host\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"hosta.influxdb.org\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `DROP SERIES FROM src WHERE host = 'hosta.influxdb.org'`,\n\t\t\tstmt: &influxql.DropSeriesStatement{\n\t\t\t\tSources: []influxql.Source{&influxql.Measurement{Name: \"src\"}},\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"host\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"hosta.influxdb.org\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ts: `DROP SERIES WHERE host = 'hosta.influxdb.org'`,\n\t\t\tstmt: &influxql.DropSeriesStatement{\n\t\t\t\tCondition: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"host\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"hosta.influxdb.org\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\ts := test.stmt.String()\n\t\tif s != test.s {\n\t\t\tt.Errorf(\"error rendering string. expected %s, actual: %s\", test.s, s)\n\t\t}\n\t}\n}\n\nfunc BenchmarkParserParseStatement(b *testing.B) {\n\tb.ReportAllocs()\n\ts := `SELECT \"field\" FROM \"series\" WHERE value > 10`\n\tfor i := 0; i < b.N; i++ {\n\t\tif stmt, err := influxql.NewParser(strings.NewReader(s)).ParseStatement(); err != nil {\n\t\t\tb.Fatalf(\"unexpected error: %s\", err)\n\t\t} else if stmt == nil {\n\t\t\tb.Fatalf(\"expected statement: %s\", stmt)\n\t\t}\n\t}\n\tb.SetBytes(int64(len(s)))\n}\n\n// MustParseSelectStatement parses a select statement. Panic on error.\nfunc MustParseSelectStatement(s string) *influxql.SelectStatement {\n\tstmt, err := influxql.NewParser(strings.NewReader(s)).ParseStatement()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn stmt.(*influxql.SelectStatement)\n}\n\n// MustParseExpr parses an expression. Panic on error.\nfunc MustParseExpr(s string) influxql.Expr {\n\texpr, err := influxql.NewParser(strings.NewReader(s)).ParseExpr()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn expr\n}\n\n// errstring converts an error to its string representation.\nfunc errstring(err error) string {\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\treturn \"\"\n}\n\n// newAlterRetentionPolicyStatement creates an initialized AlterRetentionPolicyStatement.\nfunc newAlterRetentionPolicyStatement(name string, DB string, d, sd time.Duration, replication int, dfault bool) *influxql.AlterRetentionPolicyStatement {\n\tstmt := &influxql.AlterRetentionPolicyStatement{\n\t\tName:     name,\n\t\tDatabase: DB,\n\t\tDefault:  dfault,\n\t}\n\n\tif d > -1 {\n\t\tstmt.Duration = &d\n\t}\n\n\tif sd > -1 {\n\t\tstmt.ShardGroupDuration = &sd\n\t}\n\n\tif replication > -1 {\n\t\tstmt.Replication = &replication\n\t}\n\n\treturn stmt\n}\n\n// mustMarshalJSON encodes a value to JSON.\nfunc mustMarshalJSON(v interface{}) []byte {\n\tb, err := json.MarshalIndent(v, \"\", \"  \")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn b\n}\n\nfunc mustParseDuration(s string) time.Duration {\n\td, err := influxql.ParseDuration(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn d\n}\n\nfunc mustLoadLocation(s string) *time.Location {\n\tl, err := time.LoadLocation(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn l\n}\n\nvar LosAngeles = mustLoadLocation(\"America/Los_Angeles\")\n\nfunc duration(v time.Duration) *time.Duration {\n\treturn &v\n}\n\nfunc intptr(v int) *int {\n\treturn &v\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/point.gen.go",
    "content": "// Generated by tmpl\n// https://github.com/benbjohnson/tmpl\n//\n// DO NOT EDIT!\n// Source: point.gen.go.tmpl\n\npackage influxql\n\nimport (\n\t\"encoding/binary\"\n\t\"io\"\n\n\t\"github.com/gogo/protobuf/proto\"\n\tinternal \"github.com/influxdata/influxdb/influxql/internal\"\n)\n\n// FloatPoint represents a point with a float64 value.\n// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT.\n// See TestPoint_Fields in influxql/point_test.go for more details.\ntype FloatPoint struct {\n\tName string\n\tTags Tags\n\n\tTime  int64\n\tNil   bool\n\tValue float64\n\tAux   []interface{}\n\n\t// Total number of points that were combined into this point from an aggregate.\n\t// If this is zero, the point is not the result of an aggregate function.\n\tAggregated uint32\n}\n\nfunc (v *FloatPoint) name() string { return v.Name }\nfunc (v *FloatPoint) tags() Tags   { return v.Tags }\nfunc (v *FloatPoint) time() int64  { return v.Time }\nfunc (v *FloatPoint) nil() bool    { return v.Nil }\nfunc (v *FloatPoint) value() interface{} {\n\tif v.Nil {\n\t\treturn nil\n\t}\n\treturn v.Value\n}\nfunc (v *FloatPoint) aux() []interface{} { return v.Aux }\n\n// Clone returns a copy of v.\nfunc (v *FloatPoint) Clone() *FloatPoint {\n\tif v == nil {\n\t\treturn nil\n\t}\n\n\tother := *v\n\tif v.Aux != nil {\n\t\tother.Aux = make([]interface{}, len(v.Aux))\n\t\tcopy(other.Aux, v.Aux)\n\t}\n\n\treturn &other\n}\n\n// CopyTo makes a deep copy into the point.\nfunc (v *FloatPoint) CopyTo(other *FloatPoint) {\n\t*other = *v\n\tif v.Aux != nil {\n\t\tother.Aux = make([]interface{}, len(v.Aux))\n\t\tcopy(other.Aux, v.Aux)\n\t}\n}\n\nfunc encodeFloatPoint(p *FloatPoint) *internal.Point {\n\treturn &internal.Point{\n\t\tName:       proto.String(p.Name),\n\t\tTags:       proto.String(p.Tags.ID()),\n\t\tTime:       proto.Int64(p.Time),\n\t\tNil:        proto.Bool(p.Nil),\n\t\tAux:        encodeAux(p.Aux),\n\t\tAggregated: proto.Uint32(p.Aggregated),\n\n\t\tFloatValue: proto.Float64(p.Value),\n\t}\n}\n\nfunc decodeFloatPoint(pb *internal.Point) *FloatPoint {\n\treturn &FloatPoint{\n\t\tName:       pb.GetName(),\n\t\tTags:       newTagsID(pb.GetTags()),\n\t\tTime:       pb.GetTime(),\n\t\tNil:        pb.GetNil(),\n\t\tAux:        decodeAux(pb.Aux),\n\t\tAggregated: pb.GetAggregated(),\n\t\tValue:      pb.GetFloatValue(),\n\t}\n}\n\n// floatPoints represents a slice of points sortable by value.\ntype floatPoints []FloatPoint\n\nfunc (a floatPoints) Len() int { return len(a) }\nfunc (a floatPoints) Less(i, j int) bool {\n\tif a[i].Time != a[j].Time {\n\t\treturn a[i].Time < a[j].Time\n\t}\n\treturn a[i].Value < a[j].Value\n}\nfunc (a floatPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\n// floatPointsByValue represents a slice of points sortable by value.\ntype floatPointsByValue []FloatPoint\n\nfunc (a floatPointsByValue) Len() int { return len(a) }\n\nfunc (a floatPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value }\n\nfunc (a floatPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\n// floatPointsByTime represents a slice of points sortable by value.\ntype floatPointsByTime []FloatPoint\n\nfunc (a floatPointsByTime) Len() int           { return len(a) }\nfunc (a floatPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time }\nfunc (a floatPointsByTime) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\n\n// floatPointByFunc represents a slice of points sortable by a function.\ntype floatPointsByFunc struct {\n\tpoints []FloatPoint\n\tcmp    func(a, b *FloatPoint) bool\n}\n\nfunc (a *floatPointsByFunc) Len() int           { return len(a.points) }\nfunc (a *floatPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) }\nfunc (a *floatPointsByFunc) Swap(i, j int)      { a.points[i], a.points[j] = a.points[j], a.points[i] }\n\nfunc (a *floatPointsByFunc) Push(x interface{}) {\n\ta.points = append(a.points, x.(FloatPoint))\n}\n\nfunc (a *floatPointsByFunc) Pop() interface{} {\n\tp := a.points[len(a.points)-1]\n\ta.points = a.points[:len(a.points)-1]\n\treturn p\n}\n\nfunc floatPointsSortBy(points []FloatPoint, cmp func(a, b *FloatPoint) bool) *floatPointsByFunc {\n\treturn &floatPointsByFunc{\n\t\tpoints: points,\n\t\tcmp:    cmp,\n\t}\n}\n\n// FloatPointEncoder encodes FloatPoint points to a writer.\ntype FloatPointEncoder struct {\n\tw io.Writer\n}\n\n// NewFloatPointEncoder returns a new instance of FloatPointEncoder that writes to w.\nfunc NewFloatPointEncoder(w io.Writer) *FloatPointEncoder {\n\treturn &FloatPointEncoder{w: w}\n}\n\n// EncodeFloatPoint marshals and writes p to the underlying writer.\nfunc (enc *FloatPointEncoder) EncodeFloatPoint(p *FloatPoint) error {\n\t// Marshal to bytes.\n\tbuf, err := proto.Marshal(encodeFloatPoint(p))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Write the length.\n\tif err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {\n\t\treturn err\n\t}\n\n\t// Write the encoded point.\n\tif _, err := enc.w.Write(buf); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// FloatPointDecoder decodes FloatPoint points from a reader.\ntype FloatPointDecoder struct {\n\tr     io.Reader\n\tstats IteratorStats\n}\n\n// NewFloatPointDecoder returns a new instance of FloatPointDecoder that reads from r.\nfunc NewFloatPointDecoder(r io.Reader) *FloatPointDecoder {\n\treturn &FloatPointDecoder{r: r}\n}\n\n// Stats returns iterator stats embedded within the stream.\nfunc (dec *FloatPointDecoder) Stats() IteratorStats { return dec.stats }\n\n// DecodeFloatPoint reads from the underlying reader and unmarshals into p.\nfunc (dec *FloatPointDecoder) DecodeFloatPoint(p *FloatPoint) error {\n\tfor {\n\t\t// Read length.\n\t\tvar sz uint32\n\t\tif err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Read point data.\n\t\tbuf := make([]byte, sz)\n\t\tif _, err := io.ReadFull(dec.r, buf); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Unmarshal into point.\n\t\tvar pb internal.Point\n\t\tif err := proto.Unmarshal(buf, &pb); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// If the point contains stats then read stats and retry.\n\t\tif pb.Stats != nil {\n\t\t\tdec.stats = decodeIteratorStats(pb.Stats)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Decode into point object.\n\t\t*p = *decodeFloatPoint(&pb)\n\n\t\treturn nil\n\t}\n}\n\n// IntegerPoint represents a point with a int64 value.\n// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT.\n// See TestPoint_Fields in influxql/point_test.go for more details.\ntype IntegerPoint struct {\n\tName string\n\tTags Tags\n\n\tTime  int64\n\tNil   bool\n\tValue int64\n\tAux   []interface{}\n\n\t// Total number of points that were combined into this point from an aggregate.\n\t// If this is zero, the point is not the result of an aggregate function.\n\tAggregated uint32\n}\n\nfunc (v *IntegerPoint) name() string { return v.Name }\nfunc (v *IntegerPoint) tags() Tags   { return v.Tags }\nfunc (v *IntegerPoint) time() int64  { return v.Time }\nfunc (v *IntegerPoint) nil() bool    { return v.Nil }\nfunc (v *IntegerPoint) value() interface{} {\n\tif v.Nil {\n\t\treturn nil\n\t}\n\treturn v.Value\n}\nfunc (v *IntegerPoint) aux() []interface{} { return v.Aux }\n\n// Clone returns a copy of v.\nfunc (v *IntegerPoint) Clone() *IntegerPoint {\n\tif v == nil {\n\t\treturn nil\n\t}\n\n\tother := *v\n\tif v.Aux != nil {\n\t\tother.Aux = make([]interface{}, len(v.Aux))\n\t\tcopy(other.Aux, v.Aux)\n\t}\n\n\treturn &other\n}\n\n// CopyTo makes a deep copy into the point.\nfunc (v *IntegerPoint) CopyTo(other *IntegerPoint) {\n\t*other = *v\n\tif v.Aux != nil {\n\t\tother.Aux = make([]interface{}, len(v.Aux))\n\t\tcopy(other.Aux, v.Aux)\n\t}\n}\n\nfunc encodeIntegerPoint(p *IntegerPoint) *internal.Point {\n\treturn &internal.Point{\n\t\tName:       proto.String(p.Name),\n\t\tTags:       proto.String(p.Tags.ID()),\n\t\tTime:       proto.Int64(p.Time),\n\t\tNil:        proto.Bool(p.Nil),\n\t\tAux:        encodeAux(p.Aux),\n\t\tAggregated: proto.Uint32(p.Aggregated),\n\n\t\tIntegerValue: proto.Int64(p.Value),\n\t}\n}\n\nfunc decodeIntegerPoint(pb *internal.Point) *IntegerPoint {\n\treturn &IntegerPoint{\n\t\tName:       pb.GetName(),\n\t\tTags:       newTagsID(pb.GetTags()),\n\t\tTime:       pb.GetTime(),\n\t\tNil:        pb.GetNil(),\n\t\tAux:        decodeAux(pb.Aux),\n\t\tAggregated: pb.GetAggregated(),\n\t\tValue:      pb.GetIntegerValue(),\n\t}\n}\n\n// integerPoints represents a slice of points sortable by value.\ntype integerPoints []IntegerPoint\n\nfunc (a integerPoints) Len() int { return len(a) }\nfunc (a integerPoints) Less(i, j int) bool {\n\tif a[i].Time != a[j].Time {\n\t\treturn a[i].Time < a[j].Time\n\t}\n\treturn a[i].Value < a[j].Value\n}\nfunc (a integerPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\n// integerPointsByValue represents a slice of points sortable by value.\ntype integerPointsByValue []IntegerPoint\n\nfunc (a integerPointsByValue) Len() int { return len(a) }\n\nfunc (a integerPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value }\n\nfunc (a integerPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\n// integerPointsByTime represents a slice of points sortable by value.\ntype integerPointsByTime []IntegerPoint\n\nfunc (a integerPointsByTime) Len() int           { return len(a) }\nfunc (a integerPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time }\nfunc (a integerPointsByTime) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\n\n// integerPointByFunc represents a slice of points sortable by a function.\ntype integerPointsByFunc struct {\n\tpoints []IntegerPoint\n\tcmp    func(a, b *IntegerPoint) bool\n}\n\nfunc (a *integerPointsByFunc) Len() int           { return len(a.points) }\nfunc (a *integerPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) }\nfunc (a *integerPointsByFunc) Swap(i, j int)      { a.points[i], a.points[j] = a.points[j], a.points[i] }\n\nfunc (a *integerPointsByFunc) Push(x interface{}) {\n\ta.points = append(a.points, x.(IntegerPoint))\n}\n\nfunc (a *integerPointsByFunc) Pop() interface{} {\n\tp := a.points[len(a.points)-1]\n\ta.points = a.points[:len(a.points)-1]\n\treturn p\n}\n\nfunc integerPointsSortBy(points []IntegerPoint, cmp func(a, b *IntegerPoint) bool) *integerPointsByFunc {\n\treturn &integerPointsByFunc{\n\t\tpoints: points,\n\t\tcmp:    cmp,\n\t}\n}\n\n// IntegerPointEncoder encodes IntegerPoint points to a writer.\ntype IntegerPointEncoder struct {\n\tw io.Writer\n}\n\n// NewIntegerPointEncoder returns a new instance of IntegerPointEncoder that writes to w.\nfunc NewIntegerPointEncoder(w io.Writer) *IntegerPointEncoder {\n\treturn &IntegerPointEncoder{w: w}\n}\n\n// EncodeIntegerPoint marshals and writes p to the underlying writer.\nfunc (enc *IntegerPointEncoder) EncodeIntegerPoint(p *IntegerPoint) error {\n\t// Marshal to bytes.\n\tbuf, err := proto.Marshal(encodeIntegerPoint(p))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Write the length.\n\tif err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {\n\t\treturn err\n\t}\n\n\t// Write the encoded point.\n\tif _, err := enc.w.Write(buf); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// IntegerPointDecoder decodes IntegerPoint points from a reader.\ntype IntegerPointDecoder struct {\n\tr     io.Reader\n\tstats IteratorStats\n}\n\n// NewIntegerPointDecoder returns a new instance of IntegerPointDecoder that reads from r.\nfunc NewIntegerPointDecoder(r io.Reader) *IntegerPointDecoder {\n\treturn &IntegerPointDecoder{r: r}\n}\n\n// Stats returns iterator stats embedded within the stream.\nfunc (dec *IntegerPointDecoder) Stats() IteratorStats { return dec.stats }\n\n// DecodeIntegerPoint reads from the underlying reader and unmarshals into p.\nfunc (dec *IntegerPointDecoder) DecodeIntegerPoint(p *IntegerPoint) error {\n\tfor {\n\t\t// Read length.\n\t\tvar sz uint32\n\t\tif err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Read point data.\n\t\tbuf := make([]byte, sz)\n\t\tif _, err := io.ReadFull(dec.r, buf); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Unmarshal into point.\n\t\tvar pb internal.Point\n\t\tif err := proto.Unmarshal(buf, &pb); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// If the point contains stats then read stats and retry.\n\t\tif pb.Stats != nil {\n\t\t\tdec.stats = decodeIteratorStats(pb.Stats)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Decode into point object.\n\t\t*p = *decodeIntegerPoint(&pb)\n\n\t\treturn nil\n\t}\n}\n\n// StringPoint represents a point with a string value.\n// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT.\n// See TestPoint_Fields in influxql/point_test.go for more details.\ntype StringPoint struct {\n\tName string\n\tTags Tags\n\n\tTime  int64\n\tNil   bool\n\tValue string\n\tAux   []interface{}\n\n\t// Total number of points that were combined into this point from an aggregate.\n\t// If this is zero, the point is not the result of an aggregate function.\n\tAggregated uint32\n}\n\nfunc (v *StringPoint) name() string { return v.Name }\nfunc (v *StringPoint) tags() Tags   { return v.Tags }\nfunc (v *StringPoint) time() int64  { return v.Time }\nfunc (v *StringPoint) nil() bool    { return v.Nil }\nfunc (v *StringPoint) value() interface{} {\n\tif v.Nil {\n\t\treturn nil\n\t}\n\treturn v.Value\n}\nfunc (v *StringPoint) aux() []interface{} { return v.Aux }\n\n// Clone returns a copy of v.\nfunc (v *StringPoint) Clone() *StringPoint {\n\tif v == nil {\n\t\treturn nil\n\t}\n\n\tother := *v\n\tif v.Aux != nil {\n\t\tother.Aux = make([]interface{}, len(v.Aux))\n\t\tcopy(other.Aux, v.Aux)\n\t}\n\n\treturn &other\n}\n\n// CopyTo makes a deep copy into the point.\nfunc (v *StringPoint) CopyTo(other *StringPoint) {\n\t*other = *v\n\tif v.Aux != nil {\n\t\tother.Aux = make([]interface{}, len(v.Aux))\n\t\tcopy(other.Aux, v.Aux)\n\t}\n}\n\nfunc encodeStringPoint(p *StringPoint) *internal.Point {\n\treturn &internal.Point{\n\t\tName:       proto.String(p.Name),\n\t\tTags:       proto.String(p.Tags.ID()),\n\t\tTime:       proto.Int64(p.Time),\n\t\tNil:        proto.Bool(p.Nil),\n\t\tAux:        encodeAux(p.Aux),\n\t\tAggregated: proto.Uint32(p.Aggregated),\n\n\t\tStringValue: proto.String(p.Value),\n\t}\n}\n\nfunc decodeStringPoint(pb *internal.Point) *StringPoint {\n\treturn &StringPoint{\n\t\tName:       pb.GetName(),\n\t\tTags:       newTagsID(pb.GetTags()),\n\t\tTime:       pb.GetTime(),\n\t\tNil:        pb.GetNil(),\n\t\tAux:        decodeAux(pb.Aux),\n\t\tAggregated: pb.GetAggregated(),\n\t\tValue:      pb.GetStringValue(),\n\t}\n}\n\n// stringPoints represents a slice of points sortable by value.\ntype stringPoints []StringPoint\n\nfunc (a stringPoints) Len() int { return len(a) }\nfunc (a stringPoints) Less(i, j int) bool {\n\tif a[i].Time != a[j].Time {\n\t\treturn a[i].Time < a[j].Time\n\t}\n\treturn a[i].Value < a[j].Value\n}\nfunc (a stringPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\n// stringPointsByValue represents a slice of points sortable by value.\ntype stringPointsByValue []StringPoint\n\nfunc (a stringPointsByValue) Len() int { return len(a) }\n\nfunc (a stringPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value }\n\nfunc (a stringPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\n// stringPointsByTime represents a slice of points sortable by value.\ntype stringPointsByTime []StringPoint\n\nfunc (a stringPointsByTime) Len() int           { return len(a) }\nfunc (a stringPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time }\nfunc (a stringPointsByTime) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\n\n// stringPointByFunc represents a slice of points sortable by a function.\ntype stringPointsByFunc struct {\n\tpoints []StringPoint\n\tcmp    func(a, b *StringPoint) bool\n}\n\nfunc (a *stringPointsByFunc) Len() int           { return len(a.points) }\nfunc (a *stringPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) }\nfunc (a *stringPointsByFunc) Swap(i, j int)      { a.points[i], a.points[j] = a.points[j], a.points[i] }\n\nfunc (a *stringPointsByFunc) Push(x interface{}) {\n\ta.points = append(a.points, x.(StringPoint))\n}\n\nfunc (a *stringPointsByFunc) Pop() interface{} {\n\tp := a.points[len(a.points)-1]\n\ta.points = a.points[:len(a.points)-1]\n\treturn p\n}\n\nfunc stringPointsSortBy(points []StringPoint, cmp func(a, b *StringPoint) bool) *stringPointsByFunc {\n\treturn &stringPointsByFunc{\n\t\tpoints: points,\n\t\tcmp:    cmp,\n\t}\n}\n\n// StringPointEncoder encodes StringPoint points to a writer.\ntype StringPointEncoder struct {\n\tw io.Writer\n}\n\n// NewStringPointEncoder returns a new instance of StringPointEncoder that writes to w.\nfunc NewStringPointEncoder(w io.Writer) *StringPointEncoder {\n\treturn &StringPointEncoder{w: w}\n}\n\n// EncodeStringPoint marshals and writes p to the underlying writer.\nfunc (enc *StringPointEncoder) EncodeStringPoint(p *StringPoint) error {\n\t// Marshal to bytes.\n\tbuf, err := proto.Marshal(encodeStringPoint(p))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Write the length.\n\tif err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {\n\t\treturn err\n\t}\n\n\t// Write the encoded point.\n\tif _, err := enc.w.Write(buf); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// StringPointDecoder decodes StringPoint points from a reader.\ntype StringPointDecoder struct {\n\tr     io.Reader\n\tstats IteratorStats\n}\n\n// NewStringPointDecoder returns a new instance of StringPointDecoder that reads from r.\nfunc NewStringPointDecoder(r io.Reader) *StringPointDecoder {\n\treturn &StringPointDecoder{r: r}\n}\n\n// Stats returns iterator stats embedded within the stream.\nfunc (dec *StringPointDecoder) Stats() IteratorStats { return dec.stats }\n\n// DecodeStringPoint reads from the underlying reader and unmarshals into p.\nfunc (dec *StringPointDecoder) DecodeStringPoint(p *StringPoint) error {\n\tfor {\n\t\t// Read length.\n\t\tvar sz uint32\n\t\tif err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Read point data.\n\t\tbuf := make([]byte, sz)\n\t\tif _, err := io.ReadFull(dec.r, buf); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Unmarshal into point.\n\t\tvar pb internal.Point\n\t\tif err := proto.Unmarshal(buf, &pb); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// If the point contains stats then read stats and retry.\n\t\tif pb.Stats != nil {\n\t\t\tdec.stats = decodeIteratorStats(pb.Stats)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Decode into point object.\n\t\t*p = *decodeStringPoint(&pb)\n\n\t\treturn nil\n\t}\n}\n\n// BooleanPoint represents a point with a bool value.\n// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT.\n// See TestPoint_Fields in influxql/point_test.go for more details.\ntype BooleanPoint struct {\n\tName string\n\tTags Tags\n\n\tTime  int64\n\tNil   bool\n\tValue bool\n\tAux   []interface{}\n\n\t// Total number of points that were combined into this point from an aggregate.\n\t// If this is zero, the point is not the result of an aggregate function.\n\tAggregated uint32\n}\n\nfunc (v *BooleanPoint) name() string { return v.Name }\nfunc (v *BooleanPoint) tags() Tags   { return v.Tags }\nfunc (v *BooleanPoint) time() int64  { return v.Time }\nfunc (v *BooleanPoint) nil() bool    { return v.Nil }\nfunc (v *BooleanPoint) value() interface{} {\n\tif v.Nil {\n\t\treturn nil\n\t}\n\treturn v.Value\n}\nfunc (v *BooleanPoint) aux() []interface{} { return v.Aux }\n\n// Clone returns a copy of v.\nfunc (v *BooleanPoint) Clone() *BooleanPoint {\n\tif v == nil {\n\t\treturn nil\n\t}\n\n\tother := *v\n\tif v.Aux != nil {\n\t\tother.Aux = make([]interface{}, len(v.Aux))\n\t\tcopy(other.Aux, v.Aux)\n\t}\n\n\treturn &other\n}\n\n// CopyTo makes a deep copy into the point.\nfunc (v *BooleanPoint) CopyTo(other *BooleanPoint) {\n\t*other = *v\n\tif v.Aux != nil {\n\t\tother.Aux = make([]interface{}, len(v.Aux))\n\t\tcopy(other.Aux, v.Aux)\n\t}\n}\n\nfunc encodeBooleanPoint(p *BooleanPoint) *internal.Point {\n\treturn &internal.Point{\n\t\tName:       proto.String(p.Name),\n\t\tTags:       proto.String(p.Tags.ID()),\n\t\tTime:       proto.Int64(p.Time),\n\t\tNil:        proto.Bool(p.Nil),\n\t\tAux:        encodeAux(p.Aux),\n\t\tAggregated: proto.Uint32(p.Aggregated),\n\n\t\tBooleanValue: proto.Bool(p.Value),\n\t}\n}\n\nfunc decodeBooleanPoint(pb *internal.Point) *BooleanPoint {\n\treturn &BooleanPoint{\n\t\tName:       pb.GetName(),\n\t\tTags:       newTagsID(pb.GetTags()),\n\t\tTime:       pb.GetTime(),\n\t\tNil:        pb.GetNil(),\n\t\tAux:        decodeAux(pb.Aux),\n\t\tAggregated: pb.GetAggregated(),\n\t\tValue:      pb.GetBooleanValue(),\n\t}\n}\n\n// booleanPoints represents a slice of points sortable by value.\ntype booleanPoints []BooleanPoint\n\nfunc (a booleanPoints) Len() int { return len(a) }\nfunc (a booleanPoints) Less(i, j int) bool {\n\tif a[i].Time != a[j].Time {\n\t\treturn a[i].Time < a[j].Time\n\t}\n\treturn !a[i].Value\n}\nfunc (a booleanPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\n// booleanPointsByValue represents a slice of points sortable by value.\ntype booleanPointsByValue []BooleanPoint\n\nfunc (a booleanPointsByValue) Len() int { return len(a) }\n\nfunc (a booleanPointsByValue) Less(i, j int) bool { return !a[i].Value }\n\nfunc (a booleanPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\n// booleanPointsByTime represents a slice of points sortable by value.\ntype booleanPointsByTime []BooleanPoint\n\nfunc (a booleanPointsByTime) Len() int           { return len(a) }\nfunc (a booleanPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time }\nfunc (a booleanPointsByTime) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\n\n// booleanPointByFunc represents a slice of points sortable by a function.\ntype booleanPointsByFunc struct {\n\tpoints []BooleanPoint\n\tcmp    func(a, b *BooleanPoint) bool\n}\n\nfunc (a *booleanPointsByFunc) Len() int           { return len(a.points) }\nfunc (a *booleanPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) }\nfunc (a *booleanPointsByFunc) Swap(i, j int)      { a.points[i], a.points[j] = a.points[j], a.points[i] }\n\nfunc (a *booleanPointsByFunc) Push(x interface{}) {\n\ta.points = append(a.points, x.(BooleanPoint))\n}\n\nfunc (a *booleanPointsByFunc) Pop() interface{} {\n\tp := a.points[len(a.points)-1]\n\ta.points = a.points[:len(a.points)-1]\n\treturn p\n}\n\nfunc booleanPointsSortBy(points []BooleanPoint, cmp func(a, b *BooleanPoint) bool) *booleanPointsByFunc {\n\treturn &booleanPointsByFunc{\n\t\tpoints: points,\n\t\tcmp:    cmp,\n\t}\n}\n\n// BooleanPointEncoder encodes BooleanPoint points to a writer.\ntype BooleanPointEncoder struct {\n\tw io.Writer\n}\n\n// NewBooleanPointEncoder returns a new instance of BooleanPointEncoder that writes to w.\nfunc NewBooleanPointEncoder(w io.Writer) *BooleanPointEncoder {\n\treturn &BooleanPointEncoder{w: w}\n}\n\n// EncodeBooleanPoint marshals and writes p to the underlying writer.\nfunc (enc *BooleanPointEncoder) EncodeBooleanPoint(p *BooleanPoint) error {\n\t// Marshal to bytes.\n\tbuf, err := proto.Marshal(encodeBooleanPoint(p))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Write the length.\n\tif err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {\n\t\treturn err\n\t}\n\n\t// Write the encoded point.\n\tif _, err := enc.w.Write(buf); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// BooleanPointDecoder decodes BooleanPoint points from a reader.\ntype BooleanPointDecoder struct {\n\tr     io.Reader\n\tstats IteratorStats\n}\n\n// NewBooleanPointDecoder returns a new instance of BooleanPointDecoder that reads from r.\nfunc NewBooleanPointDecoder(r io.Reader) *BooleanPointDecoder {\n\treturn &BooleanPointDecoder{r: r}\n}\n\n// Stats returns iterator stats embedded within the stream.\nfunc (dec *BooleanPointDecoder) Stats() IteratorStats { return dec.stats }\n\n// DecodeBooleanPoint reads from the underlying reader and unmarshals into p.\nfunc (dec *BooleanPointDecoder) DecodeBooleanPoint(p *BooleanPoint) error {\n\tfor {\n\t\t// Read length.\n\t\tvar sz uint32\n\t\tif err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Read point data.\n\t\tbuf := make([]byte, sz)\n\t\tif _, err := io.ReadFull(dec.r, buf); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Unmarshal into point.\n\t\tvar pb internal.Point\n\t\tif err := proto.Unmarshal(buf, &pb); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// If the point contains stats then read stats and retry.\n\t\tif pb.Stats != nil {\n\t\t\tdec.stats = decodeIteratorStats(pb.Stats)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Decode into point object.\n\t\t*p = *decodeBooleanPoint(&pb)\n\n\t\treturn nil\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/point.gen.go.tmpl",
    "content": "package influxql\n\nimport (\n\t\"encoding/binary\"\n\t\"io\"\n\n\t\"github.com/gogo/protobuf/proto\"\n\tinternal \"github.com/influxdata/influxdb/influxql/internal\"\n)\n\n{{range .}}\n\n// {{.Name}}Point represents a point with a {{.Type}} value.\n// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT.\n// See TestPoint_Fields in influxql/point_test.go for more details.\ntype {{.Name}}Point struct {\n\tName string\n\tTags Tags\n\n\tTime  int64\n\tNil   bool\n\tValue {{.Type}}\n\tAux   []interface{}\n\n\t// Total number of points that were combined into this point from an aggregate.\n\t// If this is zero, the point is not the result of an aggregate function.\n\tAggregated uint32\n}\n\nfunc (v *{{.Name}}Point) name() string       { return v.Name }\nfunc (v *{{.Name}}Point) tags() Tags         { return v.Tags }\nfunc (v *{{.Name}}Point) time() int64        { return v.Time }\nfunc (v *{{.Name}}Point) nil() bool          { return v.Nil }\nfunc (v *{{.Name}}Point) value() interface{} {\n\tif v.Nil {\n\t\treturn nil\n\t}\n\treturn v.Value\n}\nfunc (v *{{.Name}}Point) aux() []interface{} { return v.Aux }\n\n// Clone returns a copy of v.\nfunc (v *{{.Name}}Point) Clone() *{{.Name}}Point {\n\tif v == nil {\n\t\treturn nil\n\t}\n\n\tother := *v\n\tif v.Aux != nil {\n\t\tother.Aux = make([]interface{}, len(v.Aux))\n\t\tcopy(other.Aux, v.Aux)\n\t}\n\n\treturn &other\n}\n\n// CopyTo makes a deep copy into the point.\nfunc (v *{{.Name}}Point) CopyTo(other *{{.Name}}Point) {\n\t*other = *v\n\tif v.Aux != nil {\n\t\tother.Aux = make([]interface{}, len(v.Aux))\n\t\tcopy(other.Aux, v.Aux)\n\t}\n}\n\nfunc encode{{.Name}}Point(p *{{.Name}}Point) *internal.Point {\n  return &internal.Point{\n    Name:       proto.String(p.Name),\n    Tags:       proto.String(p.Tags.ID()),\n    Time:       proto.Int64(p.Time),\n    Nil:        proto.Bool(p.Nil),\n    Aux:        encodeAux(p.Aux),\n\t\tAggregated: proto.Uint32(p.Aggregated),\n\n    {{if eq .Name \"Float\"}}\n      FloatValue: proto.Float64(p.Value),\n    {{else if eq .Name \"Integer\"}}\n      IntegerValue: proto.Int64(p.Value),\n    {{else if eq .Name \"String\"}}\n      StringValue: proto.String(p.Value),\n    {{else if eq .Name \"Boolean\"}}\n      BooleanValue: proto.Bool(p.Value),\n    {{end}}\n  }\n}\n\nfunc decode{{.Name}}Point(pb *internal.Point) *{{.Name}}Point {\n  return &{{.Name}}Point{\n    Name:       pb.GetName(),\n    Tags:       newTagsID(pb.GetTags()),\n    Time:       pb.GetTime(),\n    Nil:        pb.GetNil(),\n    Aux:        decodeAux(pb.Aux),\n\t\tAggregated: pb.GetAggregated(),\n    Value:      pb.Get{{.Name}}Value(),\n  }\n}\n\n// {{.name}}Points represents a slice of points sortable by value.\ntype {{.name}}Points []{{.Name}}Point\n\nfunc (a {{.name}}Points) Len() int { return len(a) }\nfunc (a {{.name}}Points) Less(i, j int) bool {\n\tif a[i].Time != a[j].Time {\n\t\treturn a[i].Time < a[j].Time\n\t}\n\treturn {{if ne .Name \"Boolean\"}}a[i].Value < a[j].Value{{else}}!a[i].Value{{end}}\n}\nfunc (a {{.name}}Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\n// {{.name}}PointsByValue represents a slice of points sortable by value.\ntype {{.name}}PointsByValue []{{.Name}}Point\n\nfunc (a {{.name}}PointsByValue) Len() int           { return len(a) }\n{{if eq .Name \"Boolean\"}}\nfunc (a {{.name}}PointsByValue) Less(i, j int) bool { return !a[i].Value }\n{{else}}\nfunc (a {{.name}}PointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value }\n{{end}}\nfunc (a {{.name}}PointsByValue) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\n\n// {{.name}}PointsByTime represents a slice of points sortable by value.\ntype {{.name}}PointsByTime []{{.Name}}Point\n\nfunc (a {{.name}}PointsByTime) Len() int           { return len(a) }\nfunc (a {{.name}}PointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time }\nfunc (a {{.name}}PointsByTime) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\n\n// {{.name}}PointByFunc represents a slice of points sortable by a function.\ntype {{.name}}PointsByFunc struct {\n\tpoints []{{.Name}}Point\n\tcmp    func(a, b *{{.Name}}Point) bool\n}\n\nfunc (a *{{.name}}PointsByFunc) Len() int           { return len(a.points) }\nfunc (a *{{.name}}PointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) }\nfunc (a *{{.name}}PointsByFunc) Swap(i, j int)      { a.points[i], a.points[j] = a.points[j], a.points[i] }\n\nfunc (a *{{.name}}PointsByFunc) Push(x interface{}) {\n\ta.points = append(a.points, x.({{.Name}}Point))\n}\n\nfunc (a *{{.name}}PointsByFunc) Pop() interface{} {\n\tp := a.points[len(a.points)-1]\n\ta.points = a.points[:len(a.points)-1]\n\treturn p\n}\n\nfunc {{.name}}PointsSortBy(points []{{.Name}}Point, cmp func(a, b *{{.Name}}Point) bool) *{{.name}}PointsByFunc {\n\treturn &{{.name}}PointsByFunc{\n\t\tpoints: points,\n\t\tcmp: cmp,\n\t}\n}\n\n// {{.Name}}PointEncoder encodes {{.Name}}Point points to a writer.\ntype {{.Name}}PointEncoder struct {\n\tw io.Writer\n}\n\n// New{{.Name}}PointEncoder returns a new instance of {{.Name}}PointEncoder that writes to w.\nfunc New{{.Name}}PointEncoder(w io.Writer) *{{.Name}}PointEncoder {\n\treturn &{{.Name}}PointEncoder{w: w}\n}\n\n// Encode{{.Name}}Point marshals and writes p to the underlying writer.\nfunc (enc *{{.Name}}PointEncoder) Encode{{.Name}}Point(p *{{.Name}}Point) error {\n\t// Marshal to bytes.\n\tbuf, err := proto.Marshal(encode{{.Name}}Point(p))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Write the length.\n\tif err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil {\n\t\treturn err\n\t}\n\n\t// Write the encoded point.\n\tif _, err := enc.w.Write(buf); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\n// {{.Name}}PointDecoder decodes {{.Name}}Point points from a reader.\ntype {{.Name}}PointDecoder struct {\n\tr     io.Reader\n\tstats IteratorStats\n}\n\n// New{{.Name}}PointDecoder returns a new instance of {{.Name}}PointDecoder that reads from r.\nfunc New{{.Name}}PointDecoder(r io.Reader) *{{.Name}}PointDecoder {\n\treturn &{{.Name}}PointDecoder{r: r}\n}\n\n// Stats returns iterator stats embedded within the stream.\nfunc (dec *{{.Name}}PointDecoder) Stats() IteratorStats { return dec.stats }\n\n// Decode{{.Name}}Point reads from the underlying reader and unmarshals into p.\nfunc (dec *{{.Name}}PointDecoder) Decode{{.Name}}Point(p *{{.Name}}Point) error {\n\tfor {\n\t\t// Read length.\n\t\tvar sz uint32\n\t\tif err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Read point data.\n\t\tbuf := make([]byte, sz)\n\t\tif _, err := io.ReadFull(dec.r, buf); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Unmarshal into point.\n\t\tvar pb internal.Point\n\t\tif err := proto.Unmarshal(buf, &pb); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// If the point contains stats then read stats and retry.\n\t\tif pb.Stats != nil {\n\t\t\tdec.stats = decodeIteratorStats(pb.Stats)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Decode into point object.\n\t\t*p = *decode{{.Name}}Point(&pb)\n\n\t\treturn nil\n\t}\n}\n\n{{end}}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/point.go",
    "content": "package influxql\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"sort\"\n\n\t\"github.com/gogo/protobuf/proto\"\n\tinternal \"github.com/influxdata/influxdb/influxql/internal\"\n)\n\n// ZeroTime is the Unix nanosecond timestamp for no time.\n// This time is not used by the query engine or the storage engine as a valid time.\nconst ZeroTime = int64(math.MinInt64)\n\n// Point represents a value in a series that occurred at a given time.\ntype Point interface {\n\t// Name and tags uniquely identify the series the value belongs to.\n\tname() string\n\ttags() Tags\n\n\t// The time that the value occurred at.\n\ttime() int64\n\n\t// The value at the given time.\n\tvalue() interface{}\n\n\t// Auxillary values passed along with the value.\n\taux() []interface{}\n}\n\n// Points represents a list of points.\ntype Points []Point\n\n// Clone returns a deep copy of a.\nfunc (a Points) Clone() []Point {\n\tother := make([]Point, len(a))\n\tfor i, p := range a {\n\t\tif p == nil {\n\t\t\tother[i] = nil\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch p := p.(type) {\n\t\tcase *FloatPoint:\n\t\t\tother[i] = p.Clone()\n\t\tcase *IntegerPoint:\n\t\t\tother[i] = p.Clone()\n\t\tcase *StringPoint:\n\t\t\tother[i] = p.Clone()\n\t\tcase *BooleanPoint:\n\t\t\tother[i] = p.Clone()\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"unable to clone point: %T\", p))\n\t\t}\n\t}\n\treturn other\n}\n\n// Tags represent a map of keys and values.\n// It memoizes its key so it can be used efficiently during query execution.\ntype Tags struct {\n\tid string\n\tm  map[string]string\n}\n\n// NewTags returns a new instance of Tags.\nfunc NewTags(m map[string]string) Tags {\n\tif len(m) == 0 {\n\t\treturn Tags{}\n\t}\n\treturn Tags{\n\t\tid: string(encodeTags(m)),\n\t\tm:  m,\n\t}\n}\n\n// newTagsID returns a new instance of Tags by parsing the given tag ID.\nfunc newTagsID(id string) Tags {\n\tm := decodeTags([]byte(id))\n\tif len(m) == 0 {\n\t\treturn Tags{}\n\t}\n\treturn Tags{id: id, m: m}\n}\n\n// ID returns the string identifier for the tags.\nfunc (t Tags) ID() string { return t.id }\n\n// KeyValues returns the underlying map for the tags.\nfunc (t Tags) KeyValues() map[string]string { return t.m }\n\n// Keys returns a sorted list of all keys on the tag.\nfunc (t *Tags) Keys() []string {\n\tif t == nil {\n\t\treturn nil\n\t}\n\n\tvar a []string\n\tfor k := range t.m {\n\t\ta = append(a, k)\n\t}\n\tsort.Strings(a)\n\treturn a\n}\n\n// Value returns the value for a given key.\nfunc (t *Tags) Value(k string) string {\n\tif t == nil {\n\t\treturn \"\"\n\t}\n\treturn t.m[k]\n}\n\n// Subset returns a new tags object with a subset of the keys.\nfunc (t *Tags) Subset(keys []string) Tags {\n\tif len(keys) == 0 {\n\t\treturn Tags{}\n\t}\n\n\t// If keys match existing keys, simply return this tagset.\n\tif keysMatch(t.m, keys) {\n\t\treturn *t\n\t}\n\n\t// Otherwise create new tag set.\n\tm := make(map[string]string, len(keys))\n\tfor _, k := range keys {\n\t\tm[k] = t.m[k]\n\t}\n\treturn NewTags(m)\n}\n\n// Equals returns true if t equals other.\nfunc (t *Tags) Equals(other *Tags) bool {\n\tif t == nil && other == nil {\n\t\treturn true\n\t} else if t == nil || other == nil {\n\t\treturn false\n\t}\n\treturn t.id == other.id\n}\n\n// keysMatch returns true if m has exactly the same keys as listed in keys.\nfunc keysMatch(m map[string]string, keys []string) bool {\n\tif len(keys) != len(m) {\n\t\treturn false\n\t}\n\n\tfor _, k := range keys {\n\t\tif _, ok := m[k]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\n// encodeTags converts a map of strings to an identifier.\nfunc encodeTags(m map[string]string) []byte {\n\t// Empty maps marshal to empty bytes.\n\tif len(m) == 0 {\n\t\treturn nil\n\t}\n\n\t// Extract keys and determine final size.\n\tsz := (len(m) * 2) - 1 // separators\n\tkeys := make([]string, 0, len(m))\n\tfor k, v := range m {\n\t\tkeys = append(keys, k)\n\t\tsz += len(k) + len(v)\n\t}\n\tsort.Strings(keys)\n\n\t// Generate marshaled bytes.\n\tb := make([]byte, sz)\n\tbuf := b\n\tfor _, k := range keys {\n\t\tcopy(buf, k)\n\t\tbuf[len(k)] = '\\x00'\n\t\tbuf = buf[len(k)+1:]\n\t}\n\tfor i, k := range keys {\n\t\tv := m[k]\n\t\tcopy(buf, v)\n\t\tif i < len(keys)-1 {\n\t\t\tbuf[len(v)] = '\\x00'\n\t\t\tbuf = buf[len(v)+1:]\n\t\t}\n\t}\n\treturn b\n}\n\n// decodeTags parses an identifier into a map of tags.\nfunc decodeTags(id []byte) map[string]string {\n\ta := bytes.Split(id, []byte{'\\x00'})\n\n\t// There must be an even number of segments.\n\tif len(a) > 0 && len(a)%2 == 1 {\n\t\ta = a[:len(a)-1]\n\t}\n\n\t// Return nil if there are no segments.\n\tif len(a) == 0 {\n\t\treturn nil\n\t}\n\tmid := len(a) / 2\n\n\t// Decode key/value tags.\n\tm := make(map[string]string)\n\tfor i := 0; i < mid; i++ {\n\t\tm[string(a[i])] = string(a[i+mid])\n\t}\n\treturn m\n}\n\nfunc encodeAux(aux []interface{}) []*internal.Aux {\n\tpb := make([]*internal.Aux, len(aux))\n\tfor i := range aux {\n\t\tswitch v := aux[i].(type) {\n\t\tcase float64:\n\t\t\tpb[i] = &internal.Aux{DataType: proto.Int32(Float), FloatValue: proto.Float64(v)}\n\t\tcase *float64:\n\t\t\tpb[i] = &internal.Aux{DataType: proto.Int32(Float)}\n\t\tcase int64:\n\t\t\tpb[i] = &internal.Aux{DataType: proto.Int32(Integer), IntegerValue: proto.Int64(v)}\n\t\tcase *int64:\n\t\t\tpb[i] = &internal.Aux{DataType: proto.Int32(Integer)}\n\t\tcase string:\n\t\t\tpb[i] = &internal.Aux{DataType: proto.Int32(String), StringValue: proto.String(v)}\n\t\tcase *string:\n\t\t\tpb[i] = &internal.Aux{DataType: proto.Int32(String)}\n\t\tcase bool:\n\t\t\tpb[i] = &internal.Aux{DataType: proto.Int32(Boolean), BooleanValue: proto.Bool(v)}\n\t\tcase *bool:\n\t\t\tpb[i] = &internal.Aux{DataType: proto.Int32(Boolean)}\n\t\tdefault:\n\t\t\tpb[i] = &internal.Aux{DataType: proto.Int32(int32(Unknown))}\n\t\t}\n\t}\n\treturn pb\n}\n\nfunc decodeAux(pb []*internal.Aux) []interface{} {\n\tif len(pb) == 0 {\n\t\treturn nil\n\t}\n\n\taux := make([]interface{}, len(pb))\n\tfor i := range pb {\n\t\tswitch pb[i].GetDataType() {\n\t\tcase Float:\n\t\t\tif pb[i].FloatValue != nil {\n\t\t\t\taux[i] = *pb[i].FloatValue\n\t\t\t} else {\n\t\t\t\taux[i] = (*float64)(nil)\n\t\t\t}\n\t\tcase Integer:\n\t\t\tif pb[i].IntegerValue != nil {\n\t\t\t\taux[i] = *pb[i].IntegerValue\n\t\t\t} else {\n\t\t\t\taux[i] = (*int64)(nil)\n\t\t\t}\n\t\tcase String:\n\t\t\tif pb[i].StringValue != nil {\n\t\t\t\taux[i] = *pb[i].StringValue\n\t\t\t} else {\n\t\t\t\taux[i] = (*string)(nil)\n\t\t\t}\n\t\tcase Boolean:\n\t\t\tif pb[i].BooleanValue != nil {\n\t\t\t\taux[i] = *pb[i].BooleanValue\n\t\t\t} else {\n\t\t\t\taux[i] = (*bool)(nil)\n\t\t\t}\n\t\tdefault:\n\t\t\taux[i] = nil\n\t\t}\n\t}\n\treturn aux\n}\n\nfunc cloneAux(src []interface{}) []interface{} {\n\tif src == nil {\n\t\treturn src\n\t}\n\tdest := make([]interface{}, len(src))\n\tcopy(dest, src)\n\treturn dest\n}\n\n// PointDecoder decodes generic points from a reader.\ntype PointDecoder struct {\n\tr     io.Reader\n\tstats IteratorStats\n}\n\n// NewPointDecoder returns a new instance of PointDecoder that reads from r.\nfunc NewPointDecoder(r io.Reader) *PointDecoder {\n\treturn &PointDecoder{r: r}\n}\n\n// Stats returns iterator stats embedded within the stream.\nfunc (dec *PointDecoder) Stats() IteratorStats { return dec.stats }\n\n// DecodePoint reads from the underlying reader and unmarshals into p.\nfunc (dec *PointDecoder) DecodePoint(p *Point) error {\n\tfor {\n\t\t// Read length.\n\t\tvar sz uint32\n\t\tif err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Read point data.\n\t\tbuf := make([]byte, sz)\n\t\tif _, err := io.ReadFull(dec.r, buf); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Unmarshal into point.\n\t\tvar pb internal.Point\n\t\tif err := proto.Unmarshal(buf, &pb); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// If the point contains stats then read stats and retry.\n\t\tif pb.Stats != nil {\n\t\t\tdec.stats = decodeIteratorStats(pb.Stats)\n\t\t\tcontinue\n\t\t}\n\n\t\tif pb.IntegerValue != nil {\n\t\t\t*p = decodeIntegerPoint(&pb)\n\t\t} else if pb.StringValue != nil {\n\t\t\t*p = decodeStringPoint(&pb)\n\t\t} else if pb.BooleanValue != nil {\n\t\t\t*p = decodeBooleanPoint(&pb)\n\t\t} else {\n\t\t\t*p = decodeFloatPoint(&pb)\n\t\t}\n\n\t\treturn nil\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/point_test.go",
    "content": "package influxql_test\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/pkg/deep\"\n)\n\nfunc TestPoint_Clone_Float(t *testing.T) {\n\tp := &influxql.FloatPoint{\n\t\tName:  \"cpu\",\n\t\tTags:  ParseTags(\"host=server01\"),\n\t\tTime:  5,\n\t\tValue: 2,\n\t\tAux:   []interface{}{float64(45)},\n\t}\n\tc := p.Clone()\n\tif p == c {\n\t\tt.Errorf(\"clone has the same address as the original: %v == %v\", p, c)\n\t}\n\tif !deep.Equal(p, c) {\n\t\tt.Errorf(\"mismatched point: %s\", spew.Sdump(c))\n\t}\n\tif &p.Aux[0] == &c.Aux[0] {\n\t\tt.Errorf(\"aux values share the same address: %v == %v\", p.Aux, c.Aux)\n\t} else if !deep.Equal(p.Aux, c.Aux) {\n\t\tt.Errorf(\"mismatched aux fields: %v != %v\", p.Aux, c.Aux)\n\t}\n}\n\nfunc TestPoint_Clone_Integer(t *testing.T) {\n\tp := &influxql.IntegerPoint{\n\t\tName:  \"cpu\",\n\t\tTags:  ParseTags(\"host=server01\"),\n\t\tTime:  5,\n\t\tValue: 2,\n\t\tAux:   []interface{}{float64(45)},\n\t}\n\tc := p.Clone()\n\tif p == c {\n\t\tt.Errorf(\"clone has the same address as the original: %v == %v\", p, c)\n\t}\n\tif !deep.Equal(p, c) {\n\t\tt.Errorf(\"mismatched point: %s\", spew.Sdump(c))\n\t}\n\tif &p.Aux[0] == &c.Aux[0] {\n\t\tt.Errorf(\"aux values share the same address: %v == %v\", p.Aux, c.Aux)\n\t} else if !deep.Equal(p.Aux, c.Aux) {\n\t\tt.Errorf(\"mismatched aux fields: %v != %v\", p.Aux, c.Aux)\n\t}\n}\n\nfunc TestPoint_Clone_String(t *testing.T) {\n\tp := &influxql.StringPoint{\n\t\tName:  \"cpu\",\n\t\tTags:  ParseTags(\"host=server01\"),\n\t\tTime:  5,\n\t\tValue: \"clone\",\n\t\tAux:   []interface{}{float64(45)},\n\t}\n\tc := p.Clone()\n\tif p == c {\n\t\tt.Errorf(\"clone has the same address as the original: %v == %v\", p, c)\n\t}\n\tif !deep.Equal(p, c) {\n\t\tt.Errorf(\"mismatched point: %s\", spew.Sdump(c))\n\t}\n\tif &p.Aux[0] == &c.Aux[0] {\n\t\tt.Errorf(\"aux values share the same address: %v == %v\", p.Aux, c.Aux)\n\t} else if !deep.Equal(p.Aux, c.Aux) {\n\t\tt.Errorf(\"mismatched aux fields: %v != %v\", p.Aux, c.Aux)\n\t}\n}\n\nfunc TestPoint_Clone_Boolean(t *testing.T) {\n\tp := &influxql.BooleanPoint{\n\t\tName:  \"cpu\",\n\t\tTags:  ParseTags(\"host=server01\"),\n\t\tTime:  5,\n\t\tValue: true,\n\t\tAux:   []interface{}{float64(45)},\n\t}\n\tc := p.Clone()\n\tif p == c {\n\t\tt.Errorf(\"clone has the same address as the original: %v == %v\", p, c)\n\t}\n\tif !deep.Equal(p, c) {\n\t\tt.Errorf(\"mismatched point: %s\", spew.Sdump(c))\n\t}\n\tif &p.Aux[0] == &c.Aux[0] {\n\t\tt.Errorf(\"aux values share the same address: %v == %v\", p.Aux, c.Aux)\n\t} else if !deep.Equal(p.Aux, c.Aux) {\n\t\tt.Errorf(\"mismatched aux fields: %v != %v\", p.Aux, c.Aux)\n\t}\n}\n\nfunc TestPoint_Clone_Nil(t *testing.T) {\n\tvar fp *influxql.FloatPoint\n\tif p := fp.Clone(); p != nil {\n\t\tt.Errorf(\"expected nil, got %v\", p)\n\t}\n\n\tvar ip *influxql.IntegerPoint\n\tif p := ip.Clone(); p != nil {\n\t\tt.Errorf(\"expected nil, got %v\", p)\n\t}\n\n\tvar sp *influxql.StringPoint\n\tif p := sp.Clone(); p != nil {\n\t\tt.Errorf(\"expected nil, got %v\", p)\n\t}\n\n\tvar bp *influxql.BooleanPoint\n\tif p := bp.Clone(); p != nil {\n\t\tt.Errorf(\"expected nil, got %v\", p)\n\t}\n}\n\n// TestPoint_Fields ensures that no additional fields are added to the point structs.\n// This struct is very sensitive and can effect performance unless handled carefully.\n// To avoid the struct becoming a dumping ground for every function that needs to store\n// miscellaneous information, this test is meant to ensure that new fields don't slip\n// into the struct.\nfunc TestPoint_Fields(t *testing.T) {\n\tallowedFields := map[string]bool{\n\t\t\"Name\":       true,\n\t\t\"Tags\":       true,\n\t\t\"Time\":       true,\n\t\t\"Nil\":        true,\n\t\t\"Value\":      true,\n\t\t\"Aux\":        true,\n\t\t\"Aggregated\": true,\n\t}\n\n\tfor _, typ := range []reflect.Type{\n\t\treflect.TypeOf(influxql.FloatPoint{}),\n\t\treflect.TypeOf(influxql.IntegerPoint{}),\n\t\treflect.TypeOf(influxql.StringPoint{}),\n\t\treflect.TypeOf(influxql.BooleanPoint{}),\n\t} {\n\t\tf, ok := typ.FieldByNameFunc(func(name string) bool {\n\t\t\treturn !allowedFields[name]\n\t\t})\n\t\tif ok {\n\t\t\tt.Errorf(\"found an unallowed field in %s: %s %s\", typ, f.Name, f.Type)\n\t\t}\n\t}\n}\n\n// Ensure that tags can return a unique id.\nfunc TestTags_ID(t *testing.T) {\n\ttags := influxql.NewTags(map[string]string{\"foo\": \"bar\", \"baz\": \"bat\"})\n\tif id := tags.ID(); id != \"baz\\x00foo\\x00bat\\x00bar\" {\n\t\tt.Fatalf(\"unexpected id: %q\", id)\n\t}\n}\n\n// Ensure that a subset can be created from a tag set.\nfunc TestTags_Subset(t *testing.T) {\n\ttags := influxql.NewTags(map[string]string{\"a\": \"0\", \"b\": \"1\", \"c\": \"2\"})\n\tsubset := tags.Subset([]string{\"b\", \"c\", \"d\"})\n\tif keys := subset.Keys(); !reflect.DeepEqual(keys, []string{\"b\", \"c\", \"d\"}) {\n\t\tt.Fatalf(\"unexpected keys: %+v\", keys)\n\t} else if v := subset.Value(\"a\"); v != \"\" {\n\t\tt.Fatalf(\"unexpected 'a' value: %s\", v)\n\t} else if v := subset.Value(\"b\"); v != \"1\" {\n\t\tt.Fatalf(\"unexpected 'b' value: %s\", v)\n\t} else if v := subset.Value(\"c\"); v != \"2\" {\n\t\tt.Fatalf(\"unexpected 'c' value: %s\", v)\n\t} else if v := subset.Value(\"d\"); v != \"\" {\n\t\tt.Fatalf(\"unexpected 'd' value: %s\", v)\n\t}\n}\n\n// ParseTags returns an instance of Tags for a comma-delimited list of key/values.\nfunc ParseTags(s string) influxql.Tags {\n\tm := make(map[string]string)\n\tfor _, kv := range strings.Split(s, \",\") {\n\t\ta := strings.Split(kv, \"=\")\n\t\tm[a[0]] = a[1]\n\t}\n\treturn influxql.NewTags(m)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/query_executor.go",
    "content": "package influxql\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"runtime/debug\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/uber-go/zap\"\n)\n\nvar (\n\t// ErrInvalidQuery is returned when executing an unknown query type.\n\tErrInvalidQuery = errors.New(\"invalid query\")\n\n\t// ErrNotExecuted is returned when a statement is not executed in a query.\n\t// This can occur when a previous statement in the same query has errored.\n\tErrNotExecuted = errors.New(\"not executed\")\n\n\t// ErrQueryInterrupted is an error returned when the query is interrupted.\n\tErrQueryInterrupted = errors.New(\"query interrupted\")\n\n\t// ErrQueryAborted is an error returned when the query is aborted.\n\tErrQueryAborted = errors.New(\"query aborted\")\n\n\t// ErrQueryEngineShutdown is an error sent when the query cannot be\n\t// created because the query engine was shutdown.\n\tErrQueryEngineShutdown = errors.New(\"query engine shutdown\")\n\n\t// ErrQueryTimeoutLimitExceeded is an error when a query hits the max time allowed to run.\n\tErrQueryTimeoutLimitExceeded = errors.New(\"query-timeout limit exceeded\")\n)\n\n// Statistics for the QueryExecutor\nconst (\n\tstatQueriesActive          = \"queriesActive\"   // Number of queries currently being executed\n\tstatQueriesExecuted        = \"queriesExecuted\" // Number of queries that have been executed (started).\n\tstatQueriesFinished        = \"queriesFinished\" // Number of queries that have finished.\n\tstatQueryExecutionDuration = \"queryDurationNs\" // Total (wall) time spent executing queries\n)\n\n// ErrDatabaseNotFound returns a database not found error for the given database name.\nfunc ErrDatabaseNotFound(name string) error { return fmt.Errorf(\"database not found: %s\", name) }\n\n// ErrMaxSelectPointsLimitExceeded is an error when a query hits the maximum number of points.\nfunc ErrMaxSelectPointsLimitExceeded(n, limit int) error {\n\treturn fmt.Errorf(\"max-select-point limit exceeed: (%d/%d)\", n, limit)\n}\n\n// ErrMaxConcurrentQueriesLimitExceeded is an error when a query cannot be run\n// because the maximum number of queries has been reached.\nfunc ErrMaxConcurrentQueriesLimitExceeded(n, limit int) error {\n\treturn fmt.Errorf(\"max-concurrent-queries limit exceeded(%d, %d)\", n, limit)\n}\n\n// Authorizer reports whether certain operations are authorized.\ntype Authorizer interface {\n\t// AuthorizeDatabase indicates whether the given Privilege is authorized on the database with the given name.\n\tAuthorizeDatabase(p Privilege, name string) bool\n\n\t// AuthorizeQuery returns an error if the query cannot be executed\n\tAuthorizeQuery(database string, query *Query) error\n\n\t// AuthorizeSeriesRead determines if a series is authorized for reading\n\tAuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool\n\n\t// AuthorizeSeriesWrite determines if a series is authorized for writing\n\tAuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool\n}\n\n// OpenAuthorizer is the Authorizer used when authorization is disabled.\n// It allows all operations.\ntype OpenAuthorizer struct{}\n\nvar _ Authorizer = OpenAuthorizer{}\n\n// AuthorizeDatabase returns true to allow any operation on a database.\nfunc (_ OpenAuthorizer) AuthorizeDatabase(Privilege, string) bool { return true }\n\nfunc (_ OpenAuthorizer) AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool {\n\treturn true\n}\n\nfunc (_ OpenAuthorizer) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool {\n\treturn true\n}\n\nfunc (_ OpenAuthorizer) AuthorizeQuery(_ string, _ *Query) error { return nil }\n\n// ExecutionOptions contains the options for executing a query.\ntype ExecutionOptions struct {\n\t// The database the query is running against.\n\tDatabase string\n\n\t// How to determine whether the query is allowed to execute,\n\t// what resources can be returned in SHOW queries, etc.\n\tAuthorizer Authorizer\n\n\t// The requested maximum number of points to return in each result.\n\tChunkSize int\n\n\t// If this query is being executed in a read-only context.\n\tReadOnly bool\n\n\t// Node to execute on.\n\tNodeID uint64\n\n\t// Quiet suppresses non-essential output from the query executor.\n\tQuiet bool\n\n\t// AbortCh is a channel that signals when results are no longer desired by the caller.\n\tAbortCh <-chan struct{}\n}\n\n// ExecutionContext contains state that the query is currently executing with.\ntype ExecutionContext struct {\n\t// The statement ID of the executing query.\n\tStatementID int\n\n\t// The query ID of the executing query.\n\tQueryID uint64\n\n\t// The query task information available to the StatementExecutor.\n\tQuery *QueryTask\n\n\t// Output channel where results and errors should be sent.\n\tResults chan *Result\n\n\t// Hold the query executor's logger.\n\tLog zap.Logger\n\n\t// A channel that is closed when the query is interrupted.\n\tInterruptCh <-chan struct{}\n\n\t// Options used to start this query.\n\tExecutionOptions\n}\n\n// send sends a Result to the Results channel and will exit if the query has\n// been aborted.\nfunc (ctx *ExecutionContext) send(result *Result) error {\n\tselect {\n\tcase <-ctx.AbortCh:\n\t\treturn ErrQueryAborted\n\tcase ctx.Results <- result:\n\t}\n\treturn nil\n}\n\n// Send sends a Result to the Results channel and will exit if the query has\n// been interrupted or aborted.\nfunc (ctx *ExecutionContext) Send(result *Result) error {\n\tselect {\n\tcase <-ctx.InterruptCh:\n\t\treturn ErrQueryInterrupted\n\tcase <-ctx.AbortCh:\n\t\treturn ErrQueryAborted\n\tcase ctx.Results <- result:\n\t}\n\treturn nil\n}\n\n// StatementExecutor executes a statement within the QueryExecutor.\ntype StatementExecutor interface {\n\t// ExecuteStatement executes a statement. Results should be sent to the\n\t// results channel in the ExecutionContext.\n\tExecuteStatement(stmt Statement, ctx ExecutionContext) error\n}\n\n// StatementNormalizer normalizes a statement before it is executed.\ntype StatementNormalizer interface {\n\t// NormalizeStatement adds a default database and policy to the\n\t// measurements in the statement.\n\tNormalizeStatement(stmt Statement, database string) error\n}\n\n// QueryExecutor executes every statement in an Query.\ntype QueryExecutor struct {\n\t// Used for executing a statement in the query.\n\tStatementExecutor StatementExecutor\n\n\t// Used for tracking running queries.\n\tTaskManager *TaskManager\n\n\t// Logger to use for all logging.\n\t// Defaults to discarding all log output.\n\tLogger zap.Logger\n\n\t// expvar-based stats.\n\tstats *QueryStatistics\n}\n\n// NewQueryExecutor returns a new instance of QueryExecutor.\nfunc NewQueryExecutor() *QueryExecutor {\n\treturn &QueryExecutor{\n\t\tTaskManager: NewTaskManager(),\n\t\tLogger:      zap.New(zap.NullEncoder()),\n\t\tstats:       &QueryStatistics{},\n\t}\n}\n\n// QueryStatistics keeps statistics related to the QueryExecutor.\ntype QueryStatistics struct {\n\tActiveQueries          int64\n\tExecutedQueries        int64\n\tFinishedQueries        int64\n\tQueryExecutionDuration int64\n}\n\n// Statistics returns statistics for periodic monitoring.\nfunc (e *QueryExecutor) Statistics(tags map[string]string) []models.Statistic {\n\treturn []models.Statistic{{\n\t\tName: \"queryExecutor\",\n\t\tTags: tags,\n\t\tValues: map[string]interface{}{\n\t\t\tstatQueriesActive:          atomic.LoadInt64(&e.stats.ActiveQueries),\n\t\t\tstatQueriesExecuted:        atomic.LoadInt64(&e.stats.ExecutedQueries),\n\t\t\tstatQueriesFinished:        atomic.LoadInt64(&e.stats.FinishedQueries),\n\t\t\tstatQueryExecutionDuration: atomic.LoadInt64(&e.stats.QueryExecutionDuration),\n\t\t},\n\t}}\n}\n\n// Close kills all running queries and prevents new queries from being attached.\nfunc (e *QueryExecutor) Close() error {\n\treturn e.TaskManager.Close()\n}\n\n// SetLogOutput sets the writer to which all logs are written. It must not be\n// called after Open is called.\nfunc (e *QueryExecutor) WithLogger(log zap.Logger) {\n\te.Logger = log.With(zap.String(\"service\", \"query\"))\n\te.TaskManager.Logger = e.Logger\n}\n\n// ExecuteQuery executes each statement within a query.\nfunc (e *QueryExecutor) ExecuteQuery(query *Query, opt ExecutionOptions, closing chan struct{}) <-chan *Result {\n\tresults := make(chan *Result)\n\tgo e.executeQuery(query, opt, closing, results)\n\treturn results\n}\n\nfunc (e *QueryExecutor) executeQuery(query *Query, opt ExecutionOptions, closing <-chan struct{}, results chan *Result) {\n\tdefer close(results)\n\tdefer e.recover(query, results)\n\n\tatomic.AddInt64(&e.stats.ActiveQueries, 1)\n\tatomic.AddInt64(&e.stats.ExecutedQueries, 1)\n\tdefer func(start time.Time) {\n\t\tatomic.AddInt64(&e.stats.ActiveQueries, -1)\n\t\tatomic.AddInt64(&e.stats.FinishedQueries, 1)\n\t\tatomic.AddInt64(&e.stats.QueryExecutionDuration, time.Since(start).Nanoseconds())\n\t}(time.Now())\n\n\tqid, task, err := e.TaskManager.AttachQuery(query, opt.Database, closing)\n\tif err != nil {\n\t\tselect {\n\t\tcase results <- &Result{Err: err}:\n\t\tcase <-opt.AbortCh:\n\t\t}\n\t\treturn\n\t}\n\tdefer e.TaskManager.KillQuery(qid)\n\n\t// Setup the execution context that will be used when executing statements.\n\tctx := ExecutionContext{\n\t\tQueryID:          qid,\n\t\tQuery:            task,\n\t\tResults:          results,\n\t\tLog:              e.Logger,\n\t\tInterruptCh:      task.closing,\n\t\tExecutionOptions: opt,\n\t}\n\n\tvar i int\nLOOP:\n\tfor ; i < len(query.Statements); i++ {\n\t\tctx.StatementID = i\n\t\tstmt := query.Statements[i]\n\n\t\t// If a default database wasn't passed in by the caller, check the statement.\n\t\tdefaultDB := opt.Database\n\t\tif defaultDB == \"\" {\n\t\t\tif s, ok := stmt.(HasDefaultDatabase); ok {\n\t\t\t\tdefaultDB = s.DefaultDatabase()\n\t\t\t}\n\t\t}\n\n\t\t// Do not let queries manually use the system measurements. If we find\n\t\t// one, return an error. This prevents a person from using the\n\t\t// measurement incorrectly and causing a panic.\n\t\tif stmt, ok := stmt.(*SelectStatement); ok {\n\t\t\tfor _, s := range stmt.Sources {\n\t\t\t\tswitch s := s.(type) {\n\t\t\t\tcase *Measurement:\n\t\t\t\t\tif IsSystemName(s.Name) {\n\t\t\t\t\t\tcommand := \"the appropriate meta command\"\n\t\t\t\t\t\tswitch s.Name {\n\t\t\t\t\t\tcase \"_fieldKeys\":\n\t\t\t\t\t\t\tcommand = \"SHOW FIELD KEYS\"\n\t\t\t\t\t\tcase \"_measurements\":\n\t\t\t\t\t\t\tcommand = \"SHOW MEASUREMENTS\"\n\t\t\t\t\t\tcase \"_series\":\n\t\t\t\t\t\t\tcommand = \"SHOW SERIES\"\n\t\t\t\t\t\tcase \"_tagKeys\":\n\t\t\t\t\t\t\tcommand = \"SHOW TAG KEYS\"\n\t\t\t\t\t\tcase \"_tags\":\n\t\t\t\t\t\t\tcommand = \"SHOW TAG VALUES\"\n\t\t\t\t\t\t}\n\t\t\t\t\t\tresults <- &Result{\n\t\t\t\t\t\t\tErr: fmt.Errorf(\"unable to use system source '%s': use %s instead\", s.Name, command),\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak LOOP\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Rewrite statements, if necessary.\n\t\t// This can occur on meta read statements which convert to SELECT statements.\n\t\tnewStmt, err := RewriteStatement(stmt)\n\t\tif err != nil {\n\t\t\tresults <- &Result{Err: err}\n\t\t\tbreak\n\t\t}\n\t\tstmt = newStmt\n\n\t\t// Normalize each statement if possible.\n\t\tif normalizer, ok := e.StatementExecutor.(StatementNormalizer); ok {\n\t\t\tif err := normalizer.NormalizeStatement(stmt, defaultDB); err != nil {\n\t\t\t\tif err := ctx.send(&Result{Err: err}); err == ErrQueryAborted {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// Log each normalized statement.\n\t\tif !ctx.Quiet {\n\t\t\te.Logger.Info(stmt.String())\n\t\t}\n\n\t\t// Send any other statements to the underlying statement executor.\n\t\terr = e.StatementExecutor.ExecuteStatement(stmt, ctx)\n\t\tif err == ErrQueryInterrupted {\n\t\t\t// Query was interrupted so retrieve the real interrupt error from\n\t\t\t// the query task if there is one.\n\t\t\tif qerr := task.Error(); qerr != nil {\n\t\t\t\terr = qerr\n\t\t\t}\n\t\t}\n\n\t\t// Send an error for this result if it failed for some reason.\n\t\tif err != nil {\n\t\t\tif err := ctx.send(&Result{\n\t\t\t\tStatementID: i,\n\t\t\t\tErr:         err,\n\t\t\t}); err == ErrQueryAborted {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// Stop after the first error.\n\t\t\tbreak\n\t\t}\n\n\t\t// Check if the query was interrupted during an uninterruptible statement.\n\t\tinterrupted := false\n\t\tif ctx.InterruptCh != nil {\n\t\t\tselect {\n\t\t\tcase <-ctx.InterruptCh:\n\t\t\t\tinterrupted = true\n\t\t\tdefault:\n\t\t\t\t// Query has not been interrupted.\n\t\t\t}\n\t\t}\n\n\t\tif interrupted {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Send error results for any statements which were not executed.\n\tfor ; i < len(query.Statements)-1; i++ {\n\t\tif err := ctx.send(&Result{\n\t\t\tStatementID: i,\n\t\t\tErr:         ErrNotExecuted,\n\t\t}); err == ErrQueryAborted {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (e *QueryExecutor) recover(query *Query, results chan *Result) {\n\tif err := recover(); err != nil {\n\t\te.Logger.Error(fmt.Sprintf(\"%s [panic:%s] %s\", query.String(), err, debug.Stack()))\n\t\tresults <- &Result{\n\t\t\tStatementID: -1,\n\t\t\tErr:         fmt.Errorf(\"%s [panic:%s]\", query.String(), err),\n\t\t}\n\t}\n}\n\n// QueryMonitorFunc is a function that will be called to check if a query\n// is currently healthy. If the query needs to be interrupted for some reason,\n// the error should be returned by this function.\ntype QueryMonitorFunc func(<-chan struct{}) error\n\n// QueryTask is the internal data structure for managing queries.\n// For the public use data structure that gets returned, see QueryTask.\ntype QueryTask struct {\n\tquery     string\n\tdatabase  string\n\tstartTime time.Time\n\tclosing   chan struct{}\n\tmonitorCh chan error\n\terr       error\n\tmu        sync.Mutex\n}\n\n// Monitor starts a new goroutine that will monitor a query. The function\n// will be passed in a channel to signal when the query has been finished\n// normally. If the function returns with an error and the query is still\n// running, the query will be terminated.\nfunc (q *QueryTask) Monitor(fn QueryMonitorFunc) {\n\tgo q.monitor(fn)\n}\n\n// Error returns any asynchronous error that may have occured while executing\n// the query.\nfunc (q *QueryTask) Error() error {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\treturn q.err\n}\n\nfunc (q *QueryTask) setError(err error) {\n\tq.mu.Lock()\n\tq.err = err\n\tq.mu.Unlock()\n}\n\nfunc (q *QueryTask) monitor(fn QueryMonitorFunc) {\n\tif err := fn(q.closing); err != nil {\n\t\tselect {\n\t\tcase <-q.closing:\n\t\tcase q.monitorCh <- err:\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/query_executor_test.go",
    "content": "package influxql_test\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n)\n\nvar errUnexpected = errors.New(\"unexpected error\")\n\ntype StatementExecutor struct {\n\tExecuteStatementFn func(stmt influxql.Statement, ctx influxql.ExecutionContext) error\n}\n\nfunc (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\treturn e.ExecuteStatementFn(stmt, ctx)\n}\n\nfunc NewQueryExecutor() *influxql.QueryExecutor {\n\treturn influxql.NewQueryExecutor()\n}\n\nfunc TestQueryExecutor_AttachQuery(t *testing.T) {\n\tq, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\te := NewQueryExecutor()\n\te.StatementExecutor = &StatementExecutor{\n\t\tExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\t\tif ctx.QueryID != 1 {\n\t\t\t\tt.Errorf(\"incorrect query id: exp=1 got=%d\", ctx.QueryID)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tdiscardOutput(e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil))\n}\n\nfunc TestQueryExecutor_KillQuery(t *testing.T) {\n\tq, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tqid := make(chan uint64)\n\n\te := NewQueryExecutor()\n\te.StatementExecutor = &StatementExecutor{\n\t\tExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\t\tswitch stmt.(type) {\n\t\t\tcase *influxql.KillQueryStatement:\n\t\t\t\treturn e.TaskManager.ExecuteStatement(stmt, ctx)\n\t\t\t}\n\n\t\t\tqid <- ctx.QueryID\n\t\t\tselect {\n\t\t\tcase <-ctx.InterruptCh:\n\t\t\t\treturn influxql.ErrQueryInterrupted\n\t\t\tcase <-time.After(100 * time.Millisecond):\n\t\t\t\tt.Error(\"killing the query did not close the channel after 100 milliseconds\")\n\t\t\t\treturn errUnexpected\n\t\t\t}\n\t\t},\n\t}\n\n\tresults := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil)\n\tq, err = influxql.ParseQuery(fmt.Sprintf(\"KILL QUERY %d\", <-qid))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdiscardOutput(e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil))\n\n\tresult := <-results\n\tif result.Err != influxql.ErrQueryInterrupted {\n\t\tt.Errorf(\"unexpected error: %s\", result.Err)\n\t}\n}\n\nfunc TestQueryExecutor_Interrupt(t *testing.T) {\n\tq, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\te := NewQueryExecutor()\n\te.StatementExecutor = &StatementExecutor{\n\t\tExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\t\tselect {\n\t\t\tcase <-ctx.InterruptCh:\n\t\t\t\treturn influxql.ErrQueryInterrupted\n\t\t\tcase <-time.After(100 * time.Millisecond):\n\t\t\t\tt.Error(\"killing the query did not close the channel after 100 milliseconds\")\n\t\t\t\treturn errUnexpected\n\t\t\t}\n\t\t},\n\t}\n\n\tclosing := make(chan struct{})\n\tresults := e.ExecuteQuery(q, influxql.ExecutionOptions{}, closing)\n\tclose(closing)\n\tresult := <-results\n\tif result.Err != influxql.ErrQueryInterrupted {\n\t\tt.Errorf(\"unexpected error: %s\", result.Err)\n\t}\n}\n\nfunc TestQueryExecutor_Abort(t *testing.T) {\n\tq, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tch1 := make(chan struct{})\n\tch2 := make(chan struct{})\n\n\te := NewQueryExecutor()\n\te.StatementExecutor = &StatementExecutor{\n\t\tExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\t\t<-ch1\n\t\t\tif err := ctx.Send(&influxql.Result{Err: errUnexpected}); err != influxql.ErrQueryAborted {\n\t\t\t\tt.Errorf(\"unexpected error: %v\", err)\n\t\t\t}\n\t\t\tclose(ch2)\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tdone := make(chan struct{})\n\tclose(done)\n\n\tresults := e.ExecuteQuery(q, influxql.ExecutionOptions{AbortCh: done}, nil)\n\tclose(ch1)\n\n\t<-ch2\n\tdiscardOutput(results)\n}\n\nfunc TestQueryExecutor_ShowQueries(t *testing.T) {\n\te := NewQueryExecutor()\n\te.StatementExecutor = &StatementExecutor{\n\t\tExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\t\tswitch stmt.(type) {\n\t\t\tcase *influxql.ShowQueriesStatement:\n\t\t\t\treturn e.TaskManager.ExecuteStatement(stmt, ctx)\n\t\t\t}\n\n\t\t\tt.Errorf(\"unexpected statement: %s\", stmt)\n\t\t\treturn errUnexpected\n\t\t},\n\t}\n\n\tq, err := influxql.ParseQuery(`SHOW QUERIES`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tresults := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil)\n\tresult := <-results\n\tif len(result.Series) != 1 {\n\t\tt.Errorf(\"expected %d rows, got %d\", 1, len(result.Series))\n\t}\n\tif result.Err != nil {\n\t\tt.Errorf(\"unexpected error: %s\", result.Err)\n\t}\n}\n\nfunc TestQueryExecutor_Limit_Timeout(t *testing.T) {\n\tq, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\te := NewQueryExecutor()\n\te.StatementExecutor = &StatementExecutor{\n\t\tExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\t\tselect {\n\t\t\tcase <-ctx.InterruptCh:\n\t\t\t\treturn influxql.ErrQueryInterrupted\n\t\t\tcase <-time.After(time.Second):\n\t\t\t\tt.Errorf(\"timeout has not killed the query\")\n\t\t\t\treturn errUnexpected\n\t\t\t}\n\t\t},\n\t}\n\te.TaskManager.QueryTimeout = time.Nanosecond\n\n\tresults := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil)\n\tresult := <-results\n\tif result.Err == nil || !strings.Contains(result.Err.Error(), \"query-timeout\") {\n\t\tt.Errorf(\"unexpected error: %s\", result.Err)\n\t}\n}\n\nfunc TestQueryExecutor_Limit_ConcurrentQueries(t *testing.T) {\n\tq, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tqid := make(chan uint64)\n\n\te := NewQueryExecutor()\n\te.StatementExecutor = &StatementExecutor{\n\t\tExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\t\tqid <- ctx.QueryID\n\t\t\t<-ctx.InterruptCh\n\t\t\treturn influxql.ErrQueryInterrupted\n\t\t},\n\t}\n\te.TaskManager.MaxConcurrentQueries = 1\n\tdefer e.Close()\n\n\t// Start first query and wait for it to be executing.\n\tgo discardOutput(e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil))\n\t<-qid\n\n\t// Start second query and expect for it to fail.\n\tresults := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil)\n\n\tselect {\n\tcase result := <-results:\n\t\tif len(result.Series) != 0 {\n\t\t\tt.Errorf(\"expected %d rows, got %d\", 0, len(result.Series))\n\t\t}\n\t\tif result.Err == nil || !strings.Contains(result.Err.Error(), \"max-concurrent-queries\") {\n\t\t\tt.Errorf(\"unexpected error: %s\", result.Err)\n\t\t}\n\tcase <-qid:\n\t\tt.Errorf(\"unexpected statement execution for the second query\")\n\t}\n}\n\nfunc TestQueryExecutor_Close(t *testing.T) {\n\tq, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tch1 := make(chan struct{})\n\tch2 := make(chan struct{})\n\n\te := NewQueryExecutor()\n\te.StatementExecutor = &StatementExecutor{\n\t\tExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\t\tclose(ch1)\n\t\t\t<-ctx.InterruptCh\n\t\t\treturn influxql.ErrQueryInterrupted\n\t\t},\n\t}\n\n\tresults := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil)\n\tgo func(results <-chan *influxql.Result) {\n\t\tresult := <-results\n\t\tif result.Err != influxql.ErrQueryEngineShutdown {\n\t\t\tt.Errorf(\"unexpected error: %s\", result.Err)\n\t\t}\n\t\tclose(ch2)\n\t}(results)\n\n\t// Wait for the statement to start executing.\n\t<-ch1\n\n\t// Close the query executor.\n\te.Close()\n\n\t// Check that the statement gets interrupted and finishes.\n\tselect {\n\tcase <-ch2:\n\tcase <-time.After(100 * time.Millisecond):\n\t\tt.Fatal(\"closing the query manager did not kill the query after 100 milliseconds\")\n\t}\n\n\tresults = e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil)\n\tresult := <-results\n\tif len(result.Series) != 0 {\n\t\tt.Errorf(\"expected %d rows, got %d\", 0, len(result.Series))\n\t}\n\tif result.Err != influxql.ErrQueryEngineShutdown {\n\t\tt.Errorf(\"unexpected error: %s\", result.Err)\n\t}\n}\n\nfunc TestQueryExecutor_Panic(t *testing.T) {\n\tq, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\te := NewQueryExecutor()\n\te.StatementExecutor = &StatementExecutor{\n\t\tExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\t\tpanic(\"test error\")\n\t\t},\n\t}\n\n\tresults := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil)\n\tresult := <-results\n\tif len(result.Series) != 0 {\n\t\tt.Errorf(\"expected %d rows, got %d\", 0, len(result.Series))\n\t}\n\tif result.Err == nil || result.Err.Error() != \"SELECT count(value) FROM cpu [panic:test error]\" {\n\t\tt.Errorf(\"unexpected error: %s\", result.Err)\n\t}\n}\n\nfunc TestQueryExecutor_InvalidSource(t *testing.T) {\n\te := NewQueryExecutor()\n\te.StatementExecutor = &StatementExecutor{\n\t\tExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\t\treturn errors.New(\"statement executed unexpectedly\")\n\t\t},\n\t}\n\n\tfor i, tt := range []struct {\n\t\tq   string\n\t\terr string\n\t}{\n\t\t{\n\t\t\tq:   `SELECT fieldKey, fieldType FROM _fieldKeys`,\n\t\t\terr: `unable to use system source '_fieldKeys': use SHOW FIELD KEYS instead`,\n\t\t},\n\t\t{\n\t\t\tq:   `SELECT \"name\" FROM _measurements`,\n\t\t\terr: `unable to use system source '_measurements': use SHOW MEASUREMENTS instead`,\n\t\t},\n\t\t{\n\t\t\tq:   `SELECT \"key\" FROM _series`,\n\t\t\terr: `unable to use system source '_series': use SHOW SERIES instead`,\n\t\t},\n\t\t{\n\t\t\tq:   `SELECT tagKey FROM _tagKeys`,\n\t\t\terr: `unable to use system source '_tagKeys': use SHOW TAG KEYS instead`,\n\t\t},\n\t\t{\n\t\t\tq:   `SELECT \"key\", value FROM _tags`,\n\t\t\terr: `unable to use system source '_tags': use SHOW TAG VALUES instead`,\n\t\t},\n\t} {\n\t\tq, err := influxql.ParseQuery(tt.q)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%d. unable to parse: %s\", i, tt.q)\n\t\t\tcontinue\n\t\t}\n\n\t\tresults := e.ExecuteQuery(q, influxql.ExecutionOptions{}, nil)\n\t\tresult := <-results\n\t\tif len(result.Series) != 0 {\n\t\t\tt.Errorf(\"%d. expected %d rows, got %d\", 0, i, len(result.Series))\n\t\t}\n\t\tif result.Err == nil || result.Err.Error() != tt.err {\n\t\t\tt.Errorf(\"%d. unexpected error: %s\", i, result.Err)\n\t\t}\n\t}\n}\n\nfunc discardOutput(results <-chan *influxql.Result) {\n\tfor range results {\n\t\t// Read all results and discard.\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/result.go",
    "content": "package influxql\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com/influxdata/influxdb/models\"\n)\n\nconst (\n\t// WarningLevel is the message level for a warning.\n\tWarningLevel = \"warning\"\n)\n\n// TagSet is a fundamental concept within the query system. It represents a composite series,\n// composed of multiple individual series that share a set of tag attributes.\ntype TagSet struct {\n\tTags       map[string]string\n\tFilters    []Expr\n\tSeriesKeys []string\n\tKey        []byte\n}\n\n// AddFilter adds a series-level filter to the Tagset.\nfunc (t *TagSet) AddFilter(key string, filter Expr) {\n\tt.SeriesKeys = append(t.SeriesKeys, key)\n\tt.Filters = append(t.Filters, filter)\n}\n\nfunc (t *TagSet) Len() int           { return len(t.SeriesKeys) }\nfunc (t *TagSet) Less(i, j int) bool { return t.SeriesKeys[i] < t.SeriesKeys[j] }\nfunc (t *TagSet) Swap(i, j int) {\n\tt.SeriesKeys[i], t.SeriesKeys[j] = t.SeriesKeys[j], t.SeriesKeys[i]\n\tt.Filters[i], t.Filters[j] = t.Filters[j], t.Filters[i]\n}\n\n// Reverse reverses the order of series keys and filters in the TagSet.\nfunc (t *TagSet) Reverse() {\n\tfor i, j := 0, len(t.Filters)-1; i < j; i, j = i+1, j-1 {\n\t\tt.Filters[i], t.Filters[j] = t.Filters[j], t.Filters[i]\n\t\tt.SeriesKeys[i], t.SeriesKeys[j] = t.SeriesKeys[j], t.SeriesKeys[i]\n\t}\n}\n\n// Message represents a user-facing message to be included with the result.\ntype Message struct {\n\tLevel string `json:\"level\"`\n\tText  string `json:\"text\"`\n}\n\n// ReadOnlyWarning generates a warning message that tells the user the command\n// they are using is being used for writing in a read only context.\n//\n// This is a temporary method while to be used while transitioning to read only\n// operations for issue #6290.\nfunc ReadOnlyWarning(stmt string) *Message {\n\treturn &Message{\n\t\tLevel: WarningLevel,\n\t\tText:  fmt.Sprintf(\"deprecated use of '%s' in a read only context, please use a POST request instead\", stmt),\n\t}\n}\n\n// Result represents a resultset returned from a single statement.\n// Rows represents a list of rows that can be sorted consistently by name/tag.\ntype Result struct {\n\t// StatementID is just the statement's position in the query. It's used\n\t// to combine statement results if they're being buffered in memory.\n\tStatementID int\n\tSeries      models.Rows\n\tMessages    []*Message\n\tPartial     bool\n\tErr         error\n}\n\n// MarshalJSON encodes the result into JSON.\nfunc (r *Result) MarshalJSON() ([]byte, error) {\n\t// Define a struct that outputs \"error\" as a string.\n\tvar o struct {\n\t\tStatementID int           `json:\"statement_id\"`\n\t\tSeries      []*models.Row `json:\"series,omitempty\"`\n\t\tMessages    []*Message    `json:\"messages,omitempty\"`\n\t\tPartial     bool          `json:\"partial,omitempty\"`\n\t\tErr         string        `json:\"error,omitempty\"`\n\t}\n\n\t// Copy fields to output struct.\n\to.StatementID = r.StatementID\n\to.Series = r.Series\n\to.Messages = r.Messages\n\to.Partial = r.Partial\n\tif r.Err != nil {\n\t\to.Err = r.Err.Error()\n\t}\n\n\treturn json.Marshal(&o)\n}\n\n// UnmarshalJSON decodes the data into the Result struct\nfunc (r *Result) UnmarshalJSON(b []byte) error {\n\tvar o struct {\n\t\tStatementID int           `json:\"statement_id\"`\n\t\tSeries      []*models.Row `json:\"series,omitempty\"`\n\t\tMessages    []*Message    `json:\"messages,omitempty\"`\n\t\tPartial     bool          `json:\"partial,omitempty\"`\n\t\tErr         string        `json:\"error,omitempty\"`\n\t}\n\n\terr := json.Unmarshal(b, &o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.StatementID = o.StatementID\n\tr.Series = o.Series\n\tr.Messages = o.Messages\n\tr.Partial = o.Partial\n\tif o.Err != \"\" {\n\t\tr.Err = errors.New(o.Err)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/sanitize.go",
    "content": "package influxql\n\nimport (\n\t\"bytes\"\n\t\"regexp\"\n)\n\nvar (\n\tsanitizeSetPassword = regexp.MustCompile(`(?i)password\\s+for[^=]*=\\s+([\"']?[^\\s\"]+[\"']?)`)\n\n\tsanitizeCreatePassword = regexp.MustCompile(`(?i)with\\s+password\\s+([\"']?[^\\s\"]+[\"']?)`)\n)\n\n// Sanitize attempts to sanitize passwords out of a raw query.\n// It looks for patterns that may be related to the SET PASSWORD and CREATE USER\n// statements and will redact the password that should be there. It will attempt\n// to redact information from common invalid queries too, but it's not guaranteed\n// to succeed on improper queries.\n//\n// This function works on the raw query and attempts to retain the original input\n// as much as possible.\nfunc Sanitize(query string) string {\n\tif matches := sanitizeSetPassword.FindAllStringSubmatchIndex(query, -1); matches != nil {\n\t\tvar buf bytes.Buffer\n\t\ti := 0\n\t\tfor _, match := range matches {\n\t\t\tbuf.WriteString(query[i:match[2]])\n\t\t\tbuf.WriteString(\"[REDACTED]\")\n\t\t\ti = match[3]\n\t\t}\n\t\tbuf.WriteString(query[i:])\n\t\tquery = buf.String()\n\t}\n\n\tif matches := sanitizeCreatePassword.FindAllStringSubmatchIndex(query, -1); matches != nil {\n\t\tvar buf bytes.Buffer\n\t\ti := 0\n\t\tfor _, match := range matches {\n\t\t\tbuf.WriteString(query[i:match[2]])\n\t\t\tbuf.WriteString(\"[REDACTED]\")\n\t\t\ti = match[3]\n\t\t}\n\t\tbuf.WriteString(query[i:])\n\t\tquery = buf.String()\n\t}\n\treturn query\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/sanitize_test.go",
    "content": "package influxql_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n)\n\nfunc TestSanitize(t *testing.T) {\n\tvar tests = []struct {\n\t\ts    string\n\t\tstmt string\n\t}{\n\t\t// Proper statements that should be redacted.\n\t\t{\n\t\t\ts:    `create user \"admin\" with password 'admin'`,\n\t\t\tstmt: `create user \"admin\" with password [REDACTED]`,\n\t\t},\n\t\t{\n\t\t\ts:    `set password for \"admin\" = 'admin'`,\n\t\t\tstmt: `set password for \"admin\" = [REDACTED]`,\n\t\t},\n\n\t\t// Common invalid statements that should still be redacted.\n\t\t{\n\t\t\ts:    `create user \"admin\" with password \"admin\"`,\n\t\t\tstmt: `create user \"admin\" with password [REDACTED]`,\n\t\t},\n\t\t{\n\t\t\ts:    `set password for \"admin\" = \"admin\"`,\n\t\t\tstmt: `set password for \"admin\" = [REDACTED]`,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\tstmt := influxql.Sanitize(tt.s)\n\t\tif tt.stmt != stmt {\n\t\t\tt.Errorf(\"%d. %q\\n\\nsanitize mismatch:\\n\\nexp=%#v\\n\\ngot=%#v\\n\\n\", i, tt.s, tt.stmt, stmt)\n\t\t}\n\t}\n}\n\nfunc BenchmarkSanitize(b *testing.B) {\n\tb.ReportAllocs()\n\tq := `create user \"admin\" with password 'admin'; set password for \"admin\" = 'admin'`\n\tfor i := 0; i < b.N; i++ {\n\t\tinfluxql.Sanitize(q)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/scanner.go",
    "content": "package influxql\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n)\n\n// Scanner represents a lexical scanner for InfluxQL.\ntype Scanner struct {\n\tr *reader\n}\n\n// NewScanner returns a new instance of Scanner.\nfunc NewScanner(r io.Reader) *Scanner {\n\treturn &Scanner{r: &reader{r: bufio.NewReader(r)}}\n}\n\n// Scan returns the next token and position from the underlying reader.\n// Also returns the literal text read for strings, numbers, and duration tokens\n// since these token types can have different literal representations.\nfunc (s *Scanner) Scan() (tok Token, pos Pos, lit string) {\n\t// Read next code point.\n\tch0, pos := s.r.read()\n\n\t// If we see whitespace then consume all contiguous whitespace.\n\t// If we see a letter, or certain acceptable special characters, then consume\n\t// as an ident or reserved word.\n\tif isWhitespace(ch0) {\n\t\treturn s.scanWhitespace()\n\t} else if isLetter(ch0) || ch0 == '_' {\n\t\ts.r.unread()\n\t\treturn s.scanIdent(true)\n\t} else if isDigit(ch0) {\n\t\treturn s.scanNumber()\n\t}\n\n\t// Otherwise parse individual characters.\n\tswitch ch0 {\n\tcase eof:\n\t\treturn EOF, pos, \"\"\n\tcase '\"':\n\t\ts.r.unread()\n\t\treturn s.scanIdent(true)\n\tcase '\\'':\n\t\treturn s.scanString()\n\tcase '.':\n\t\tch1, _ := s.r.read()\n\t\ts.r.unread()\n\t\tif isDigit(ch1) {\n\t\t\treturn s.scanNumber()\n\t\t}\n\t\treturn DOT, pos, \"\"\n\tcase '$':\n\t\ttok, _, lit = s.scanIdent(false)\n\t\tif tok != IDENT {\n\t\t\treturn tok, pos, \"$\" + lit\n\t\t}\n\t\treturn BOUNDPARAM, pos, \"$\" + lit\n\tcase '+':\n\t\treturn ADD, pos, \"\"\n\tcase '-':\n\t\tch1, _ := s.r.read()\n\t\tif ch1 == '-' {\n\t\t\ts.skipUntilNewline()\n\t\t\treturn COMMENT, pos, \"\"\n\t\t}\n\t\ts.r.unread()\n\t\treturn SUB, pos, \"\"\n\tcase '*':\n\t\treturn MUL, pos, \"\"\n\tcase '/':\n\t\tch1, _ := s.r.read()\n\t\tif ch1 == '*' {\n\t\t\tif err := s.skipUntilEndComment(); err != nil {\n\t\t\t\treturn ILLEGAL, pos, \"\"\n\t\t\t}\n\t\t\treturn COMMENT, pos, \"\"\n\t\t} else {\n\t\t\ts.r.unread()\n\t\t}\n\t\treturn DIV, pos, \"\"\n\tcase '%':\n\t\treturn MOD, pos, \"\"\n\tcase '&':\n\t\treturn BITWISE_AND, pos, \"\"\n\tcase '|':\n\t\treturn BITWISE_OR, pos, \"\"\n\tcase '^':\n\t\treturn BITWISE_XOR, pos, \"\"\n\tcase '=':\n\t\tif ch1, _ := s.r.read(); ch1 == '~' {\n\t\t\treturn EQREGEX, pos, \"\"\n\t\t}\n\t\ts.r.unread()\n\t\treturn EQ, pos, \"\"\n\tcase '!':\n\t\tif ch1, _ := s.r.read(); ch1 == '=' {\n\t\t\treturn NEQ, pos, \"\"\n\t\t} else if ch1 == '~' {\n\t\t\treturn NEQREGEX, pos, \"\"\n\t\t}\n\t\ts.r.unread()\n\tcase '>':\n\t\tif ch1, _ := s.r.read(); ch1 == '=' {\n\t\t\treturn GTE, pos, \"\"\n\t\t}\n\t\ts.r.unread()\n\t\treturn GT, pos, \"\"\n\tcase '<':\n\t\tif ch1, _ := s.r.read(); ch1 == '=' {\n\t\t\treturn LTE, pos, \"\"\n\t\t} else if ch1 == '>' {\n\t\t\treturn NEQ, pos, \"\"\n\t\t}\n\t\ts.r.unread()\n\t\treturn LT, pos, \"\"\n\tcase '(':\n\t\treturn LPAREN, pos, \"\"\n\tcase ')':\n\t\treturn RPAREN, pos, \"\"\n\tcase ',':\n\t\treturn COMMA, pos, \"\"\n\tcase ';':\n\t\treturn SEMICOLON, pos, \"\"\n\tcase ':':\n\t\tif ch1, _ := s.r.read(); ch1 == ':' {\n\t\t\treturn DOUBLECOLON, pos, \"\"\n\t\t}\n\t\ts.r.unread()\n\t\treturn COLON, pos, \"\"\n\t}\n\n\treturn ILLEGAL, pos, string(ch0)\n}\n\n// scanWhitespace consumes the current rune and all contiguous whitespace.\nfunc (s *Scanner) scanWhitespace() (tok Token, pos Pos, lit string) {\n\t// Create a buffer and read the current character into it.\n\tvar buf bytes.Buffer\n\tch, pos := s.r.curr()\n\t_, _ = buf.WriteRune(ch)\n\n\t// Read every subsequent whitespace character into the buffer.\n\t// Non-whitespace characters and EOF will cause the loop to exit.\n\tfor {\n\t\tch, _ = s.r.read()\n\t\tif ch == eof {\n\t\t\tbreak\n\t\t} else if !isWhitespace(ch) {\n\t\t\ts.r.unread()\n\t\t\tbreak\n\t\t} else {\n\t\t\t_, _ = buf.WriteRune(ch)\n\t\t}\n\t}\n\n\treturn WS, pos, buf.String()\n}\n\n// skipUntilNewline skips characters until it reaches a newline.\nfunc (s *Scanner) skipUntilNewline() {\n\tfor {\n\t\tif ch, _ := s.r.read(); ch == '\\n' || ch == eof {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// skipUntilEndComment skips characters until it reaches a '*/' symbol.\nfunc (s *Scanner) skipUntilEndComment() error {\n\tfor {\n\t\tif ch1, _ := s.r.read(); ch1 == '*' {\n\t\t\t// We might be at the end.\n\t\tstar:\n\t\t\tch2, _ := s.r.read()\n\t\t\tif ch2 == '/' {\n\t\t\t\treturn nil\n\t\t\t} else if ch2 == '*' {\n\t\t\t\t// We are back in the state machine since we see a star.\n\t\t\t\tgoto star\n\t\t\t} else if ch2 == eof {\n\t\t\t\treturn io.EOF\n\t\t\t}\n\t\t} else if ch1 == eof {\n\t\t\treturn io.EOF\n\t\t}\n\t}\n}\n\nfunc (s *Scanner) scanIdent(lookup bool) (tok Token, pos Pos, lit string) {\n\t// Save the starting position of the identifier.\n\t_, pos = s.r.read()\n\ts.r.unread()\n\n\tvar buf bytes.Buffer\n\tfor {\n\t\tif ch, _ := s.r.read(); ch == eof {\n\t\t\tbreak\n\t\t} else if ch == '\"' {\n\t\t\ttok0, pos0, lit0 := s.scanString()\n\t\t\tif tok0 == BADSTRING || tok0 == BADESCAPE {\n\t\t\t\treturn tok0, pos0, lit0\n\t\t\t}\n\t\t\treturn IDENT, pos, lit0\n\t\t} else if isIdentChar(ch) {\n\t\t\ts.r.unread()\n\t\t\tbuf.WriteString(ScanBareIdent(s.r))\n\t\t} else {\n\t\t\ts.r.unread()\n\t\t\tbreak\n\t\t}\n\t}\n\tlit = buf.String()\n\n\t// If the literal matches a keyword then return that keyword.\n\tif lookup {\n\t\tif tok = Lookup(lit); tok != IDENT {\n\t\t\treturn tok, pos, \"\"\n\t\t}\n\t}\n\treturn IDENT, pos, lit\n}\n\n// scanString consumes a contiguous string of non-quote characters.\n// Quote characters can be consumed if they're first escaped with a backslash.\nfunc (s *Scanner) scanString() (tok Token, pos Pos, lit string) {\n\ts.r.unread()\n\t_, pos = s.r.curr()\n\n\tvar err error\n\tlit, err = ScanString(s.r)\n\tif err == errBadString {\n\t\treturn BADSTRING, pos, lit\n\t} else if err == errBadEscape {\n\t\t_, pos = s.r.curr()\n\t\treturn BADESCAPE, pos, lit\n\t}\n\treturn STRING, pos, lit\n}\n\n// ScanRegex consumes a token to find escapes\nfunc (s *Scanner) ScanRegex() (tok Token, pos Pos, lit string) {\n\t_, pos = s.r.curr()\n\n\t// Start & end sentinels.\n\tstart, end := '/', '/'\n\t// Valid escape chars.\n\tescapes := map[rune]rune{'/': '/'}\n\n\tb, err := ScanDelimited(s.r, start, end, escapes, true)\n\n\tif err == errBadEscape {\n\t\t_, pos = s.r.curr()\n\t\treturn BADESCAPE, pos, lit\n\t} else if err != nil {\n\t\treturn BADREGEX, pos, lit\n\t}\n\treturn REGEX, pos, string(b)\n}\n\n// scanNumber consumes anything that looks like the start of a number.\nfunc (s *Scanner) scanNumber() (tok Token, pos Pos, lit string) {\n\tvar buf bytes.Buffer\n\n\t// Check if the initial rune is a \".\".\n\tch, pos := s.r.curr()\n\tif ch == '.' {\n\t\t// Peek and see if the next rune is a digit.\n\t\tch1, _ := s.r.read()\n\t\ts.r.unread()\n\t\tif !isDigit(ch1) {\n\t\t\treturn ILLEGAL, pos, \".\"\n\t\t}\n\n\t\t// Unread the full stop so we can read it later.\n\t\ts.r.unread()\n\t} else {\n\t\ts.r.unread()\n\t}\n\n\t// Read as many digits as possible.\n\t_, _ = buf.WriteString(s.scanDigits())\n\n\t// If next code points are a full stop and digit then consume them.\n\tisDecimal := false\n\tif ch0, _ := s.r.read(); ch0 == '.' {\n\t\tisDecimal = true\n\t\tif ch1, _ := s.r.read(); isDigit(ch1) {\n\t\t\t_, _ = buf.WriteRune(ch0)\n\t\t\t_, _ = buf.WriteRune(ch1)\n\t\t\t_, _ = buf.WriteString(s.scanDigits())\n\t\t} else {\n\t\t\ts.r.unread()\n\t\t}\n\t} else {\n\t\ts.r.unread()\n\t}\n\n\t// Read as a duration or integer if it doesn't have a fractional part.\n\tif !isDecimal {\n\t\t// If the next rune is a letter then this is a duration token.\n\t\tif ch0, _ := s.r.read(); isLetter(ch0) || ch0 == 'µ' {\n\t\t\t_, _ = buf.WriteRune(ch0)\n\t\t\tfor {\n\t\t\t\tch1, _ := s.r.read()\n\t\t\t\tif !isLetter(ch1) && ch1 != 'µ' {\n\t\t\t\t\ts.r.unread()\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\t_, _ = buf.WriteRune(ch1)\n\t\t\t}\n\n\t\t\t// Continue reading digits and letters as part of this token.\n\t\t\tfor {\n\t\t\t\tif ch0, _ := s.r.read(); isLetter(ch0) || ch0 == 'µ' || isDigit(ch0) {\n\t\t\t\t\t_, _ = buf.WriteRune(ch0)\n\t\t\t\t} else {\n\t\t\t\t\ts.r.unread()\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn DURATIONVAL, pos, buf.String()\n\t\t} else {\n\t\t\ts.r.unread()\n\t\t\treturn INTEGER, pos, buf.String()\n\t\t}\n\t}\n\treturn NUMBER, pos, buf.String()\n}\n\n// scanDigits consumes a contiguous series of digits.\nfunc (s *Scanner) scanDigits() string {\n\tvar buf bytes.Buffer\n\tfor {\n\t\tch, _ := s.r.read()\n\t\tif !isDigit(ch) {\n\t\t\ts.r.unread()\n\t\t\tbreak\n\t\t}\n\t\t_, _ = buf.WriteRune(ch)\n\t}\n\treturn buf.String()\n}\n\n// isWhitespace returns true if the rune is a space, tab, or newline.\nfunc isWhitespace(ch rune) bool { return ch == ' ' || ch == '\\t' || ch == '\\n' }\n\n// isLetter returns true if the rune is a letter.\nfunc isLetter(ch rune) bool { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') }\n\n// isDigit returns true if the rune is a digit.\nfunc isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') }\n\n// isIdentChar returns true if the rune can be used in an unquoted identifier.\nfunc isIdentChar(ch rune) bool { return isLetter(ch) || isDigit(ch) || ch == '_' }\n\n// isIdentFirstChar returns true if the rune can be used as the first char in an unquoted identifer.\nfunc isIdentFirstChar(ch rune) bool { return isLetter(ch) || ch == '_' }\n\n// bufScanner represents a wrapper for scanner to add a buffer.\n// It provides a fixed-length circular buffer that can be unread.\ntype bufScanner struct {\n\ts   *Scanner\n\ti   int // buffer index\n\tn   int // buffer size\n\tbuf [3]struct {\n\t\ttok Token\n\t\tpos Pos\n\t\tlit string\n\t}\n}\n\n// newBufScanner returns a new buffered scanner for a reader.\nfunc newBufScanner(r io.Reader) *bufScanner {\n\treturn &bufScanner{s: NewScanner(r)}\n}\n\n// Scan reads the next token from the scanner.\nfunc (s *bufScanner) Scan() (tok Token, pos Pos, lit string) {\n\treturn s.scanFunc(s.s.Scan)\n}\n\n// ScanRegex reads a regex token from the scanner.\nfunc (s *bufScanner) ScanRegex() (tok Token, pos Pos, lit string) {\n\treturn s.scanFunc(s.s.ScanRegex)\n}\n\n// scanFunc uses the provided function to scan the next token.\nfunc (s *bufScanner) scanFunc(scan func() (Token, Pos, string)) (tok Token, pos Pos, lit string) {\n\t// If we have unread tokens then read them off the buffer first.\n\tif s.n > 0 {\n\t\ts.n--\n\t\treturn s.curr()\n\t}\n\n\t// Move buffer position forward and save the token.\n\ts.i = (s.i + 1) % len(s.buf)\n\tbuf := &s.buf[s.i]\n\tbuf.tok, buf.pos, buf.lit = scan()\n\n\treturn s.curr()\n}\n\n// Unscan pushes the previously token back onto the buffer.\nfunc (s *bufScanner) Unscan() { s.n++ }\n\n// curr returns the last read token.\nfunc (s *bufScanner) curr() (tok Token, pos Pos, lit string) {\n\tbuf := &s.buf[(s.i-s.n+len(s.buf))%len(s.buf)]\n\treturn buf.tok, buf.pos, buf.lit\n}\n\n// reader represents a buffered rune reader used by the scanner.\n// It provides a fixed-length circular buffer that can be unread.\ntype reader struct {\n\tr   io.RuneScanner\n\ti   int // buffer index\n\tn   int // buffer char count\n\tpos Pos // last read rune position\n\tbuf [3]struct {\n\t\tch  rune\n\t\tpos Pos\n\t}\n\teof bool // true if reader has ever seen eof.\n}\n\n// ReadRune reads the next rune from the reader.\n// This is a wrapper function to implement the io.RuneReader interface.\n// Note that this function does not return size.\nfunc (r *reader) ReadRune() (ch rune, size int, err error) {\n\tch, _ = r.read()\n\tif ch == eof {\n\t\terr = io.EOF\n\t}\n\treturn\n}\n\n// UnreadRune pushes the previously read rune back onto the buffer.\n// This is a wrapper function to implement the io.RuneScanner interface.\nfunc (r *reader) UnreadRune() error {\n\tr.unread()\n\treturn nil\n}\n\n// read reads the next rune from the reader.\nfunc (r *reader) read() (ch rune, pos Pos) {\n\t// If we have unread characters then read them off the buffer first.\n\tif r.n > 0 {\n\t\tr.n--\n\t\treturn r.curr()\n\t}\n\n\t// Read next rune from underlying reader.\n\t// Any error (including io.EOF) should return as EOF.\n\tch, _, err := r.r.ReadRune()\n\tif err != nil {\n\t\tch = eof\n\t} else if ch == '\\r' {\n\t\tif ch, _, err := r.r.ReadRune(); err != nil {\n\t\t\t// nop\n\t\t} else if ch != '\\n' {\n\t\t\t_ = r.r.UnreadRune()\n\t\t}\n\t\tch = '\\n'\n\t}\n\n\t// Save character and position to the buffer.\n\tr.i = (r.i + 1) % len(r.buf)\n\tbuf := &r.buf[r.i]\n\tbuf.ch, buf.pos = ch, r.pos\n\n\t// Update position.\n\t// Only count EOF once.\n\tif ch == '\\n' {\n\t\tr.pos.Line++\n\t\tr.pos.Char = 0\n\t} else if !r.eof {\n\t\tr.pos.Char++\n\t}\n\n\t// Mark the reader as EOF.\n\t// This is used so we don't double count EOF characters.\n\tif ch == eof {\n\t\tr.eof = true\n\t}\n\n\treturn r.curr()\n}\n\n// unread pushes the previously read rune back onto the buffer.\nfunc (r *reader) unread() {\n\tr.n++\n}\n\n// curr returns the last read character and position.\nfunc (r *reader) curr() (ch rune, pos Pos) {\n\ti := (r.i - r.n + len(r.buf)) % len(r.buf)\n\tbuf := &r.buf[i]\n\treturn buf.ch, buf.pos\n}\n\n// eof is a marker code point to signify that the reader can't read any more.\nconst eof = rune(0)\n\n// ScanDelimited reads a delimited set of runes\nfunc ScanDelimited(r io.RuneScanner, start, end rune, escapes map[rune]rune, escapesPassThru bool) ([]byte, error) {\n\t// Scan start delimiter.\n\tif ch, _, err := r.ReadRune(); err != nil {\n\t\treturn nil, err\n\t} else if ch != start {\n\t\treturn nil, fmt.Errorf(\"expected %s; found %s\", string(start), string(ch))\n\t}\n\n\tvar buf bytes.Buffer\n\tfor {\n\t\tch0, _, err := r.ReadRune()\n\t\tif ch0 == end {\n\t\t\treturn buf.Bytes(), nil\n\t\t} else if err != nil {\n\t\t\treturn buf.Bytes(), err\n\t\t} else if ch0 == '\\n' {\n\t\t\treturn nil, errors.New(\"delimited text contains new line\")\n\t\t} else if ch0 == '\\\\' {\n\t\t\t// If the next character is an escape then write the escaped char.\n\t\t\t// If it's not a valid escape then return an error.\n\t\t\tch1, _, err := r.ReadRune()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tc, ok := escapes[ch1]\n\t\t\tif !ok {\n\t\t\t\tif escapesPassThru {\n\t\t\t\t\t// Unread ch1 (char after the \\)\n\t\t\t\t\t_ = r.UnreadRune()\n\t\t\t\t\t// Write ch0 (\\) to the output buffer.\n\t\t\t\t\t_, _ = buf.WriteRune(ch0)\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\tbuf.Reset()\n\t\t\t\t\t_, _ = buf.WriteRune(ch0)\n\t\t\t\t\t_, _ = buf.WriteRune(ch1)\n\t\t\t\t\treturn buf.Bytes(), errBadEscape\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t_, _ = buf.WriteRune(c)\n\t\t} else {\n\t\t\t_, _ = buf.WriteRune(ch0)\n\t\t}\n\t}\n}\n\n// ScanString reads a quoted string from a rune reader.\nfunc ScanString(r io.RuneScanner) (string, error) {\n\tending, _, err := r.ReadRune()\n\tif err != nil {\n\t\treturn \"\", errBadString\n\t}\n\n\tvar buf bytes.Buffer\n\tfor {\n\t\tch0, _, err := r.ReadRune()\n\t\tif ch0 == ending {\n\t\t\treturn buf.String(), nil\n\t\t} else if err != nil || ch0 == '\\n' {\n\t\t\treturn buf.String(), errBadString\n\t\t} else if ch0 == '\\\\' {\n\t\t\t// If the next character is an escape then write the escaped char.\n\t\t\t// If it's not a valid escape then return an error.\n\t\t\tch1, _, _ := r.ReadRune()\n\t\t\tif ch1 == 'n' {\n\t\t\t\t_, _ = buf.WriteRune('\\n')\n\t\t\t} else if ch1 == '\\\\' {\n\t\t\t\t_, _ = buf.WriteRune('\\\\')\n\t\t\t} else if ch1 == '\"' {\n\t\t\t\t_, _ = buf.WriteRune('\"')\n\t\t\t} else if ch1 == '\\'' {\n\t\t\t\t_, _ = buf.WriteRune('\\'')\n\t\t\t} else {\n\t\t\t\treturn string(ch0) + string(ch1), errBadEscape\n\t\t\t}\n\t\t} else {\n\t\t\t_, _ = buf.WriteRune(ch0)\n\t\t}\n\t}\n}\n\nvar errBadString = errors.New(\"bad string\")\nvar errBadEscape = errors.New(\"bad escape\")\n\n// ScanBareIdent reads bare identifier from a rune reader.\nfunc ScanBareIdent(r io.RuneScanner) string {\n\t// Read every ident character into the buffer.\n\t// Non-ident characters and EOF will cause the loop to exit.\n\tvar buf bytes.Buffer\n\tfor {\n\t\tch, _, err := r.ReadRune()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t} else if !isIdentChar(ch) {\n\t\t\tr.UnreadRune()\n\t\t\tbreak\n\t\t} else {\n\t\t\t_, _ = buf.WriteRune(ch)\n\t\t}\n\t}\n\treturn buf.String()\n}\n\n// IsRegexOp returns true if the operator accepts a regex operand.\nfunc IsRegexOp(t Token) bool {\n\treturn (t == EQREGEX || t == NEQREGEX)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/scanner_test.go",
    "content": "package influxql_test\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n)\n\n// Ensure the scanner can scan tokens correctly.\nfunc TestScanner_Scan(t *testing.T) {\n\tvar tests = []struct {\n\t\ts   string\n\t\ttok influxql.Token\n\t\tlit string\n\t\tpos influxql.Pos\n\t}{\n\t\t// Special tokens (EOF, ILLEGAL, WS)\n\t\t{s: ``, tok: influxql.EOF},\n\t\t{s: `#`, tok: influxql.ILLEGAL, lit: `#`},\n\t\t{s: ` `, tok: influxql.WS, lit: \" \"},\n\t\t{s: \"\\t\", tok: influxql.WS, lit: \"\\t\"},\n\t\t{s: \"\\n\", tok: influxql.WS, lit: \"\\n\"},\n\t\t{s: \"\\r\", tok: influxql.WS, lit: \"\\n\"},\n\t\t{s: \"\\r\\n\", tok: influxql.WS, lit: \"\\n\"},\n\t\t{s: \"\\rX\", tok: influxql.WS, lit: \"\\n\"},\n\t\t{s: \"\\n\\r\", tok: influxql.WS, lit: \"\\n\\n\"},\n\t\t{s: \" \\n\\t \\r\\n\\t\", tok: influxql.WS, lit: \" \\n\\t \\n\\t\"},\n\t\t{s: \" foo\", tok: influxql.WS, lit: \" \"},\n\n\t\t// Numeric operators\n\t\t{s: `+`, tok: influxql.ADD},\n\t\t{s: `-`, tok: influxql.SUB},\n\t\t{s: `*`, tok: influxql.MUL},\n\t\t{s: `/`, tok: influxql.DIV},\n\t\t{s: `%`, tok: influxql.MOD},\n\n\t\t// Logical operators\n\t\t{s: `AND`, tok: influxql.AND},\n\t\t{s: `and`, tok: influxql.AND},\n\t\t{s: `OR`, tok: influxql.OR},\n\t\t{s: `or`, tok: influxql.OR},\n\n\t\t{s: `=`, tok: influxql.EQ},\n\t\t{s: `<>`, tok: influxql.NEQ},\n\t\t{s: `! `, tok: influxql.ILLEGAL, lit: \"!\"},\n\t\t{s: `<`, tok: influxql.LT},\n\t\t{s: `<=`, tok: influxql.LTE},\n\t\t{s: `>`, tok: influxql.GT},\n\t\t{s: `>=`, tok: influxql.GTE},\n\n\t\t// Misc tokens\n\t\t{s: `(`, tok: influxql.LPAREN},\n\t\t{s: `)`, tok: influxql.RPAREN},\n\t\t{s: `,`, tok: influxql.COMMA},\n\t\t{s: `;`, tok: influxql.SEMICOLON},\n\t\t{s: `.`, tok: influxql.DOT},\n\t\t{s: `=~`, tok: influxql.EQREGEX},\n\t\t{s: `!~`, tok: influxql.NEQREGEX},\n\t\t{s: `:`, tok: influxql.COLON},\n\t\t{s: `::`, tok: influxql.DOUBLECOLON},\n\n\t\t// Identifiers\n\t\t{s: `foo`, tok: influxql.IDENT, lit: `foo`},\n\t\t{s: `_foo`, tok: influxql.IDENT, lit: `_foo`},\n\t\t{s: `Zx12_3U_-`, tok: influxql.IDENT, lit: `Zx12_3U_`},\n\t\t{s: `\"foo\"`, tok: influxql.IDENT, lit: `foo`},\n\t\t{s: `\"foo\\\\bar\"`, tok: influxql.IDENT, lit: `foo\\bar`},\n\t\t{s: `\"foo\\bar\"`, tok: influxql.BADESCAPE, lit: `\\b`, pos: influxql.Pos{Line: 0, Char: 5}},\n\t\t{s: `\"foo\\\"bar\\\"\"`, tok: influxql.IDENT, lit: `foo\"bar\"`},\n\t\t{s: `test\"`, tok: influxql.BADSTRING, lit: \"\", pos: influxql.Pos{Line: 0, Char: 3}},\n\t\t{s: `\"test`, tok: influxql.BADSTRING, lit: `test`},\n\t\t{s: `$host`, tok: influxql.BOUNDPARAM, lit: `$host`},\n\t\t{s: `$\"host param\"`, tok: influxql.BOUNDPARAM, lit: `$host param`},\n\n\t\t{s: `true`, tok: influxql.TRUE},\n\t\t{s: `false`, tok: influxql.FALSE},\n\n\t\t// Strings\n\t\t{s: `'testing 123!'`, tok: influxql.STRING, lit: `testing 123!`},\n\t\t{s: `'foo\\nbar'`, tok: influxql.STRING, lit: \"foo\\nbar\"},\n\t\t{s: `'foo\\\\bar'`, tok: influxql.STRING, lit: \"foo\\\\bar\"},\n\t\t{s: `'test`, tok: influxql.BADSTRING, lit: `test`},\n\t\t{s: \"'test\\nfoo\", tok: influxql.BADSTRING, lit: `test`},\n\t\t{s: `'test\\g'`, tok: influxql.BADESCAPE, lit: `\\g`, pos: influxql.Pos{Line: 0, Char: 6}},\n\n\t\t// Numbers\n\t\t{s: `100`, tok: influxql.INTEGER, lit: `100`},\n\t\t{s: `100.23`, tok: influxql.NUMBER, lit: `100.23`},\n\t\t{s: `.23`, tok: influxql.NUMBER, lit: `.23`},\n\t\t//{s: `.`, tok: influxql.ILLEGAL, lit: `.`},\n\t\t{s: `10.3s`, tok: influxql.NUMBER, lit: `10.3`},\n\n\t\t// Durations\n\t\t{s: `10u`, tok: influxql.DURATIONVAL, lit: `10u`},\n\t\t{s: `10µ`, tok: influxql.DURATIONVAL, lit: `10µ`},\n\t\t{s: `10ms`, tok: influxql.DURATIONVAL, lit: `10ms`},\n\t\t{s: `1s`, tok: influxql.DURATIONVAL, lit: `1s`},\n\t\t{s: `10m`, tok: influxql.DURATIONVAL, lit: `10m`},\n\t\t{s: `10h`, tok: influxql.DURATIONVAL, lit: `10h`},\n\t\t{s: `10d`, tok: influxql.DURATIONVAL, lit: `10d`},\n\t\t{s: `10w`, tok: influxql.DURATIONVAL, lit: `10w`},\n\t\t{s: `10x`, tok: influxql.DURATIONVAL, lit: `10x`}, // non-duration unit, but scanned as a duration value\n\n\t\t// Keywords\n\t\t{s: `ALL`, tok: influxql.ALL},\n\t\t{s: `ALTER`, tok: influxql.ALTER},\n\t\t{s: `AS`, tok: influxql.AS},\n\t\t{s: `ASC`, tok: influxql.ASC},\n\t\t{s: `BEGIN`, tok: influxql.BEGIN},\n\t\t{s: `BY`, tok: influxql.BY},\n\t\t{s: `CREATE`, tok: influxql.CREATE},\n\t\t{s: `CONTINUOUS`, tok: influxql.CONTINUOUS},\n\t\t{s: `DATABASE`, tok: influxql.DATABASE},\n\t\t{s: `DATABASES`, tok: influxql.DATABASES},\n\t\t{s: `DEFAULT`, tok: influxql.DEFAULT},\n\t\t{s: `DELETE`, tok: influxql.DELETE},\n\t\t{s: `DESC`, tok: influxql.DESC},\n\t\t{s: `DROP`, tok: influxql.DROP},\n\t\t{s: `DURATION`, tok: influxql.DURATION},\n\t\t{s: `END`, tok: influxql.END},\n\t\t{s: `EVERY`, tok: influxql.EVERY},\n\t\t{s: `EXPLAIN`, tok: influxql.EXPLAIN},\n\t\t{s: `FIELD`, tok: influxql.FIELD},\n\t\t{s: `FROM`, tok: influxql.FROM},\n\t\t{s: `GRANT`, tok: influxql.GRANT},\n\t\t{s: `GROUP`, tok: influxql.GROUP},\n\t\t{s: `GROUPS`, tok: influxql.GROUPS},\n\t\t{s: `INSERT`, tok: influxql.INSERT},\n\t\t{s: `INTO`, tok: influxql.INTO},\n\t\t{s: `KEY`, tok: influxql.KEY},\n\t\t{s: `KEYS`, tok: influxql.KEYS},\n\t\t{s: `KILL`, tok: influxql.KILL},\n\t\t{s: `LIMIT`, tok: influxql.LIMIT},\n\t\t{s: `SHOW`, tok: influxql.SHOW},\n\t\t{s: `SHARD`, tok: influxql.SHARD},\n\t\t{s: `SHARDS`, tok: influxql.SHARDS},\n\t\t{s: `MEASUREMENT`, tok: influxql.MEASUREMENT},\n\t\t{s: `MEASUREMENTS`, tok: influxql.MEASUREMENTS},\n\t\t{s: `OFFSET`, tok: influxql.OFFSET},\n\t\t{s: `ON`, tok: influxql.ON},\n\t\t{s: `ORDER`, tok: influxql.ORDER},\n\t\t{s: `PASSWORD`, tok: influxql.PASSWORD},\n\t\t{s: `POLICY`, tok: influxql.POLICY},\n\t\t{s: `POLICIES`, tok: influxql.POLICIES},\n\t\t{s: `PRIVILEGES`, tok: influxql.PRIVILEGES},\n\t\t{s: `QUERIES`, tok: influxql.QUERIES},\n\t\t{s: `QUERY`, tok: influxql.QUERY},\n\t\t{s: `READ`, tok: influxql.READ},\n\t\t{s: `REPLICATION`, tok: influxql.REPLICATION},\n\t\t{s: `RESAMPLE`, tok: influxql.RESAMPLE},\n\t\t{s: `RETENTION`, tok: influxql.RETENTION},\n\t\t{s: `REVOKE`, tok: influxql.REVOKE},\n\t\t{s: `SELECT`, tok: influxql.SELECT},\n\t\t{s: `SERIES`, tok: influxql.SERIES},\n\t\t{s: `TAG`, tok: influxql.TAG},\n\t\t{s: `TO`, tok: influxql.TO},\n\t\t{s: `USER`, tok: influxql.USER},\n\t\t{s: `USERS`, tok: influxql.USERS},\n\t\t{s: `VALUES`, tok: influxql.VALUES},\n\t\t{s: `WHERE`, tok: influxql.WHERE},\n\t\t{s: `WITH`, tok: influxql.WITH},\n\t\t{s: `WRITE`, tok: influxql.WRITE},\n\t\t{s: `explain`, tok: influxql.EXPLAIN}, // case insensitive\n\t\t{s: `seLECT`, tok: influxql.SELECT},   // case insensitive\n\t}\n\n\tfor i, tt := range tests {\n\t\ts := influxql.NewScanner(strings.NewReader(tt.s))\n\t\ttok, pos, lit := s.Scan()\n\t\tif tt.tok != tok {\n\t\t\tt.Errorf(\"%d. %q token mismatch: exp=%q got=%q <%q>\", i, tt.s, tt.tok, tok, lit)\n\t\t} else if tt.pos.Line != pos.Line || tt.pos.Char != pos.Char {\n\t\t\tt.Errorf(\"%d. %q pos mismatch: exp=%#v got=%#v\", i, tt.s, tt.pos, pos)\n\t\t} else if tt.lit != lit {\n\t\t\tt.Errorf(\"%d. %q literal mismatch: exp=%q got=%q\", i, tt.s, tt.lit, lit)\n\t\t}\n\t}\n}\n\n// Ensure the scanner can scan a series of tokens correctly.\nfunc TestScanner_Scan_Multi(t *testing.T) {\n\ttype result struct {\n\t\ttok influxql.Token\n\t\tpos influxql.Pos\n\t\tlit string\n\t}\n\texp := []result{\n\t\t{tok: influxql.SELECT, pos: influxql.Pos{Line: 0, Char: 0}, lit: \"\"},\n\t\t{tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 6}, lit: \" \"},\n\t\t{tok: influxql.IDENT, pos: influxql.Pos{Line: 0, Char: 7}, lit: \"value\"},\n\t\t{tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 12}, lit: \" \"},\n\t\t{tok: influxql.FROM, pos: influxql.Pos{Line: 0, Char: 13}, lit: \"\"},\n\t\t{tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 17}, lit: \" \"},\n\t\t{tok: influxql.IDENT, pos: influxql.Pos{Line: 0, Char: 18}, lit: \"myseries\"},\n\t\t{tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 26}, lit: \" \"},\n\t\t{tok: influxql.WHERE, pos: influxql.Pos{Line: 0, Char: 27}, lit: \"\"},\n\t\t{tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 32}, lit: \" \"},\n\t\t{tok: influxql.IDENT, pos: influxql.Pos{Line: 0, Char: 33}, lit: \"a\"},\n\t\t{tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 34}, lit: \" \"},\n\t\t{tok: influxql.EQ, pos: influxql.Pos{Line: 0, Char: 35}, lit: \"\"},\n\t\t{tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 36}, lit: \" \"},\n\t\t{tok: influxql.STRING, pos: influxql.Pos{Line: 0, Char: 36}, lit: \"b\"},\n\t\t{tok: influxql.EOF, pos: influxql.Pos{Line: 0, Char: 40}, lit: \"\"},\n\t}\n\n\t// Create a scanner.\n\tv := `SELECT value from myseries WHERE a = 'b'`\n\ts := influxql.NewScanner(strings.NewReader(v))\n\n\t// Continually scan until we reach the end.\n\tvar act []result\n\tfor {\n\t\ttok, pos, lit := s.Scan()\n\t\tact = append(act, result{tok, pos, lit})\n\t\tif tok == influxql.EOF {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Verify the token counts match.\n\tif len(exp) != len(act) {\n\t\tt.Fatalf(\"token count mismatch: exp=%d, got=%d\", len(exp), len(act))\n\t}\n\n\t// Verify each token matches.\n\tfor i := range exp {\n\t\tif !reflect.DeepEqual(exp[i], act[i]) {\n\t\t\tt.Fatalf(\"%d. token mismatch:\\n\\nexp=%#v\\n\\ngot=%#v\", i, exp[i], act[i])\n\t\t}\n\t}\n}\n\n// Ensure the library can correctly scan strings.\nfunc TestScanString(t *testing.T) {\n\tvar tests = []struct {\n\t\tin  string\n\t\tout string\n\t\terr string\n\t}{\n\t\t{in: `\"\"`, out: ``},\n\t\t{in: `\"foo bar\"`, out: `foo bar`},\n\t\t{in: `'foo bar'`, out: `foo bar`},\n\t\t{in: `\"foo\\nbar\"`, out: \"foo\\nbar\"},\n\t\t{in: `\"foo\\\\bar\"`, out: `foo\\bar`},\n\t\t{in: `\"foo\\\"bar\"`, out: `foo\"bar`},\n\t\t{in: `'foo\\'bar'`, out: `foo'bar`},\n\n\t\t{in: `\"foo` + \"\\n\", out: `foo`, err: \"bad string\"}, // newline in string\n\t\t{in: `\"foo`, out: `foo`, err: \"bad string\"},        // unclosed quotes\n\t\t{in: `\"foo\\xbar\"`, out: `\\x`, err: \"bad escape\"},   // invalid escape\n\t}\n\n\tfor i, tt := range tests {\n\t\tout, err := influxql.ScanString(strings.NewReader(tt.in))\n\t\tif tt.err != errstring(err) {\n\t\t\tt.Errorf(\"%d. %s: error: exp=%s, got=%s\", i, tt.in, tt.err, err)\n\t\t} else if tt.out != out {\n\t\t\tt.Errorf(\"%d. %s: out: exp=%s, got=%s\", i, tt.in, tt.out, out)\n\t\t}\n\t}\n}\n\n// Test scanning regex\nfunc TestScanRegex(t *testing.T) {\n\tvar tests = []struct {\n\t\tin  string\n\t\ttok influxql.Token\n\t\tlit string\n\t\terr string\n\t}{\n\t\t{in: `/^payments\\./`, tok: influxql.REGEX, lit: `^payments\\.`},\n\t\t{in: `/foo\\/bar/`, tok: influxql.REGEX, lit: `foo/bar`},\n\t\t{in: `/foo\\\\/bar/`, tok: influxql.REGEX, lit: `foo\\/bar`},\n\t\t{in: `/foo\\\\bar/`, tok: influxql.REGEX, lit: `foo\\\\bar`},\n\t\t{in: `/http\\:\\/\\/www\\.example\\.com/`, tok: influxql.REGEX, lit: `http\\://www\\.example\\.com`},\n\t}\n\n\tfor i, tt := range tests {\n\t\ts := influxql.NewScanner(strings.NewReader(tt.in))\n\t\ttok, _, lit := s.ScanRegex()\n\t\tif tok != tt.tok {\n\t\t\tt.Errorf(\"%d. %s: error:\\n\\texp=%s\\n\\tgot=%s\\n\", i, tt.in, tt.tok.String(), tok.String())\n\t\t}\n\t\tif lit != tt.lit {\n\t\t\tt.Errorf(\"%d. %s: error:\\n\\texp=%s\\n\\tgot=%s\\n\", i, tt.in, tt.lit, lit)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/select.go",
    "content": "package influxql\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"time\"\n)\n\n// SelectOptions are options that customize the select call.\ntype SelectOptions struct {\n\t// Authorizer is used to limit access to data\n\tAuthorizer Authorizer\n\n\t// The lower bound for a select call.\n\tMinTime time.Time\n\n\t// The upper bound for a select call.\n\tMaxTime time.Time\n\n\t// Node to exclusively read from.\n\t// If zero, all nodes are used.\n\tNodeID uint64\n\n\t// An optional channel that, if closed, signals that the select should be\n\t// interrupted.\n\tInterruptCh <-chan struct{}\n\n\t// Maximum number of concurrent series.\n\tMaxSeriesN int\n}\n\n// Select executes stmt against ic and returns a list of iterators to stream from.\n//\n// Statements should have all rewriting performed before calling select(). This\n// includes wildcard and source expansion.\nfunc Select(stmt *SelectStatement, ic IteratorCreator, sopt *SelectOptions) ([]Iterator, error) {\n\t// Determine base options for iterators.\n\topt, err := newIteratorOptionsStmt(stmt, sopt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buildIterators(stmt, ic, opt)\n}\n\nfunc buildIterators(stmt *SelectStatement, ic IteratorCreator, opt IteratorOptions) ([]Iterator, error) {\n\t// Retrieve refs for each call and var ref.\n\tinfo := newSelectInfo(stmt)\n\tif len(info.calls) > 1 && len(info.refs) > 0 {\n\t\treturn nil, errors.New(\"cannot select fields when selecting multiple aggregates\")\n\t}\n\n\t// Determine auxiliary fields to be selected.\n\topt.Aux = make([]VarRef, 0, len(info.refs))\n\tfor ref := range info.refs {\n\t\topt.Aux = append(opt.Aux, *ref)\n\t}\n\tsort.Sort(VarRefs(opt.Aux))\n\n\t// If there are multiple auxilary fields and no calls then construct an aux iterator.\n\tif len(info.calls) == 0 && len(info.refs) > 0 {\n\t\treturn buildAuxIterators(stmt.Fields, ic, stmt.Sources, opt)\n\t}\n\n\t// Include auxiliary fields from top() and bottom() when not writing the results.\n\tfields := stmt.Fields\n\tif stmt.Target == nil {\n\t\textraFields := 0\n\t\tfor call := range info.calls {\n\t\t\tif call.Name == \"top\" || call.Name == \"bottom\" {\n\t\t\t\tfor i := 1; i < len(call.Args)-1; i++ {\n\t\t\t\t\tref := call.Args[i].(*VarRef)\n\t\t\t\t\topt.Aux = append(opt.Aux, *ref)\n\t\t\t\t\textraFields++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif extraFields > 0 {\n\t\t\t// Rebuild the list of fields if any extra fields are being implicitly added\n\t\t\tfields = make([]*Field, 0, len(stmt.Fields)+extraFields)\n\t\t\tfor _, f := range stmt.Fields {\n\t\t\t\tfields = append(fields, f)\n\t\t\t\tswitch expr := f.Expr.(type) {\n\t\t\t\tcase *Call:\n\t\t\t\t\tif expr.Name == \"top\" || expr.Name == \"bottom\" {\n\t\t\t\t\t\tfor i := 1; i < len(expr.Args)-1; i++ {\n\t\t\t\t\t\t\tfields = append(fields, &Field{Expr: expr.Args[i]})\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Determine if there is one call and it is a selector.\n\tselector := false\n\tif len(info.calls) == 1 {\n\t\tfor call := range info.calls {\n\t\t\tselector = IsSelector(call)\n\t\t}\n\t}\n\n\treturn buildFieldIterators(fields, ic, stmt.Sources, opt, selector, stmt.Target != nil)\n}\n\n// buildAuxIterators creates a set of iterators from a single combined auxiliary iterator.\nfunc buildAuxIterators(fields Fields, ic IteratorCreator, sources Sources, opt IteratorOptions) ([]Iterator, error) {\n\t// Create the auxiliary iterators for each source.\n\tinputs := make([]Iterator, 0, len(sources))\n\tif err := func() error {\n\t\tfor _, source := range sources {\n\t\t\tswitch source := source.(type) {\n\t\t\tcase *Measurement:\n\t\t\t\tinput, err := ic.CreateIterator(source, opt)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tinputs = append(inputs, input)\n\t\t\tcase *SubQuery:\n\t\t\t\tb := subqueryBuilder{\n\t\t\t\t\tic:   ic,\n\t\t\t\t\tstmt: source.Statement,\n\t\t\t\t}\n\n\t\t\t\tinput, err := b.buildAuxIterator(opt)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tinputs = append(inputs, input)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}(); err != nil {\n\t\tIterators(inputs).Close()\n\t\treturn nil, err\n\t}\n\n\t// Merge iterators to read auxilary fields.\n\tinput, err := Iterators(inputs).Merge(opt)\n\tif err != nil {\n\t\tIterators(inputs).Close()\n\t\treturn nil, err\n\t} else if input == nil {\n\t\tinput = &nilFloatIterator{}\n\t}\n\n\t// Filter out duplicate rows, if required.\n\tif opt.Dedupe {\n\t\t// If there is no group by and it is a float iterator, see if we can use a fast dedupe.\n\t\tif itr, ok := input.(FloatIterator); ok && len(opt.Dimensions) == 0 {\n\t\t\tif sz := len(fields); sz > 0 && sz < 3 {\n\t\t\t\tinput = newFloatFastDedupeIterator(itr)\n\t\t\t} else {\n\t\t\t\tinput = NewDedupeIterator(itr)\n\t\t\t}\n\t\t} else {\n\t\t\tinput = NewDedupeIterator(input)\n\t\t}\n\t}\n\n\t// Apply limit & offset.\n\tif opt.Limit > 0 || opt.Offset > 0 {\n\t\tinput = NewLimitIterator(input, opt)\n\t}\n\n\t// Wrap in an auxiliary iterator to separate the fields.\n\taitr := NewAuxIterator(input, opt)\n\n\t// Generate iterators for each field.\n\titrs := make([]Iterator, len(fields))\n\tif err := func() error {\n\t\tfor i, f := range fields {\n\t\t\texpr := Reduce(f.Expr, nil)\n\t\t\titr, err := buildAuxIterator(expr, aitr, opt)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\titrs[i] = itr\n\t\t}\n\t\treturn nil\n\t}(); err != nil {\n\t\tIterators(Iterators(itrs).filterNonNil()).Close()\n\t\taitr.Close()\n\t\treturn nil, err\n\t}\n\n\t// Background the primary iterator since there is no reader for it.\n\taitr.Background()\n\n\treturn itrs, nil\n}\n\n// buildAuxIterator constructs an Iterator for an expression from an AuxIterator.\nfunc buildAuxIterator(expr Expr, aitr AuxIterator, opt IteratorOptions) (Iterator, error) {\n\tswitch expr := expr.(type) {\n\tcase *VarRef:\n\t\treturn aitr.Iterator(expr.Val, expr.Type), nil\n\tcase *BinaryExpr:\n\t\tif rhs, ok := expr.RHS.(Literal); ok {\n\t\t\t// The right hand side is a literal. It is more common to have the RHS be a literal,\n\t\t\t// so we check that one first and have this be the happy path.\n\t\t\tif lhs, ok := expr.LHS.(Literal); ok {\n\t\t\t\t// We have two literals that couldn't be combined by Reduce.\n\t\t\t\treturn nil, fmt.Errorf(\"unable to construct an iterator from two literals: LHS: %T, RHS: %T\", lhs, rhs)\n\t\t\t}\n\n\t\t\tlhs, err := buildAuxIterator(expr.LHS, aitr, opt)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn buildRHSTransformIterator(lhs, rhs, expr.Op, opt)\n\t\t} else if lhs, ok := expr.LHS.(Literal); ok {\n\t\t\trhs, err := buildAuxIterator(expr.RHS, aitr, opt)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn buildLHSTransformIterator(lhs, rhs, expr.Op, opt)\n\t\t} else {\n\t\t\t// We have two iterators. Combine them into a single iterator.\n\t\t\tlhs, err := buildAuxIterator(expr.LHS, aitr, opt)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\trhs, err := buildAuxIterator(expr.RHS, aitr, opt)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn buildTransformIterator(lhs, rhs, expr.Op, opt)\n\t\t}\n\tcase *ParenExpr:\n\t\treturn buildAuxIterator(expr.Expr, aitr, opt)\n\tcase *nilLiteral:\n\t\treturn &nilFloatIterator{}, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"invalid expression type: %T\", expr)\n\t}\n}\n\n// buildFieldIterators creates an iterator for each field expression.\nfunc buildFieldIterators(fields Fields, ic IteratorCreator, sources Sources, opt IteratorOptions, selector, writeMode bool) ([]Iterator, error) {\n\t// Create iterators from fields against the iterator creator.\n\titrs := make([]Iterator, len(fields))\n\n\tif err := func() error {\n\t\thasAuxFields := false\n\n\t\tvar input Iterator\n\t\tfor i, f := range fields {\n\t\t\t// Build iterators for calls first and save the iterator.\n\t\t\t// We do this so we can keep the ordering provided by the user, but\n\t\t\t// still build the Call's iterator first.\n\t\t\tif ContainsVarRef(f.Expr) {\n\t\t\t\thasAuxFields = true\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\texpr := Reduce(f.Expr, nil)\n\t\t\titr, err := buildExprIterator(expr, ic, sources, opt, selector, writeMode)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t} else if itr == nil {\n\t\t\t\titr = &nilFloatIterator{}\n\t\t\t}\n\n\t\t\t// If there is a limit or offset then apply it.\n\t\t\tif opt.Limit > 0 || opt.Offset > 0 {\n\t\t\t\titr = NewLimitIterator(itr, opt)\n\t\t\t}\n\t\t\titrs[i] = itr\n\t\t\tinput = itr\n\t\t}\n\n\t\tif input == nil || !hasAuxFields {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Build the aux iterators. Previous validation should ensure that only one\n\t\t// call was present so we build an AuxIterator from that input.\n\t\taitr := NewAuxIterator(input, opt)\n\t\tfor i, f := range fields {\n\t\t\tif itrs[i] != nil {\n\t\t\t\titrs[i] = aitr\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\texpr := Reduce(f.Expr, nil)\n\t\t\titr, err := buildAuxIterator(expr, aitr, opt)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t} else if itr == nil {\n\t\t\t\titr = &nilFloatIterator{}\n\t\t\t}\n\t\t\titrs[i] = itr\n\t\t}\n\t\taitr.Start()\n\t\treturn nil\n\n\t}(); err != nil {\n\t\tIterators(Iterators(itrs).filterNonNil()).Close()\n\t\treturn nil, err\n\t}\n\n\treturn itrs, nil\n}\n\n// buildExprIterator creates an iterator for an expression.\nfunc buildExprIterator(expr Expr, ic IteratorCreator, sources Sources, opt IteratorOptions, selector, writeMode bool) (Iterator, error) {\n\topt.Expr = expr\n\tb := exprIteratorBuilder{\n\t\tic:        ic,\n\t\tsources:   sources,\n\t\topt:       opt,\n\t\tselector:  selector,\n\t\twriteMode: writeMode,\n\t}\n\n\tswitch expr := expr.(type) {\n\tcase *VarRef:\n\t\treturn b.buildVarRefIterator(expr)\n\tcase *Call:\n\t\treturn b.buildCallIterator(expr)\n\tcase *BinaryExpr:\n\t\treturn b.buildBinaryExprIterator(expr)\n\tcase *ParenExpr:\n\t\treturn buildExprIterator(expr.Expr, ic, sources, opt, selector, writeMode)\n\tcase *nilLiteral:\n\t\treturn &nilFloatIterator{}, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"invalid expression type: %T\", expr)\n\t}\n}\n\ntype exprIteratorBuilder struct {\n\tic        IteratorCreator\n\tsources   Sources\n\topt       IteratorOptions\n\tselector  bool\n\twriteMode bool\n}\n\nfunc (b *exprIteratorBuilder) buildVarRefIterator(expr *VarRef) (Iterator, error) {\n\tinputs := make([]Iterator, 0, len(b.sources))\n\tif err := func() error {\n\t\tfor _, source := range b.sources {\n\t\t\tswitch source := source.(type) {\n\t\t\tcase *Measurement:\n\t\t\t\tinput, err := b.ic.CreateIterator(source, b.opt)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tinputs = append(inputs, input)\n\t\t\tcase *SubQuery:\n\t\t\t\tsubquery := subqueryBuilder{\n\t\t\t\t\tic:   b.ic,\n\t\t\t\t\tstmt: source.Statement,\n\t\t\t\t}\n\n\t\t\t\tinput, err := subquery.buildVarRefIterator(expr, b.opt)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tinputs = append(inputs, input)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}(); err != nil {\n\t\tIterators(inputs).Close()\n\t\treturn nil, err\n\t}\n\n\t// Variable references in this section will always go into some call\n\t// iterator. Combine it with a merge iterator.\n\titr := NewMergeIterator(inputs, b.opt)\n\tif itr == nil {\n\t\titr = &nilFloatIterator{}\n\t}\n\n\tif b.opt.InterruptCh != nil {\n\t\titr = NewInterruptIterator(itr, b.opt.InterruptCh)\n\t}\n\treturn itr, nil\n}\n\nfunc (b *exprIteratorBuilder) buildCallIterator(expr *Call) (Iterator, error) {\n\t// TODO(jsternberg): Refactor this. This section needs to die in a fire.\n\topt := b.opt\n\t// Eliminate limits and offsets if they were previously set. These are handled by the caller.\n\topt.Limit, opt.Offset = 0, 0\n\tswitch expr.Name {\n\tcase \"distinct\":\n\t\topt.Ordered = true\n\t\tinput, err := buildExprIterator(expr.Args[0].(*VarRef), b.ic, b.sources, opt, b.selector, false)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tinput, err = NewDistinctIterator(input, opt)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn NewIntervalIterator(input, opt), nil\n\tcase \"sample\":\n\t\topt.Ordered = true\n\t\tinput, err := buildExprIterator(expr.Args[0], b.ic, b.sources, opt, b.selector, false)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsize := expr.Args[1].(*IntegerLiteral)\n\n\t\treturn newSampleIterator(input, opt, int(size.Val))\n\tcase \"holt_winters\", \"holt_winters_with_fit\":\n\t\topt.Ordered = true\n\t\tinput, err := buildExprIterator(expr.Args[0], b.ic, b.sources, opt, b.selector, false)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\th := expr.Args[1].(*IntegerLiteral)\n\t\tm := expr.Args[2].(*IntegerLiteral)\n\n\t\tincludeFitData := \"holt_winters_with_fit\" == expr.Name\n\n\t\tinterval := opt.Interval.Duration\n\t\t// Redefine interval to be unbounded to capture all aggregate results\n\t\topt.StartTime = MinTime\n\t\topt.EndTime = MaxTime\n\t\topt.Interval = Interval{}\n\n\t\treturn newHoltWintersIterator(input, opt, int(h.Val), int(m.Val), includeFitData, interval)\n\tcase \"derivative\", \"non_negative_derivative\", \"difference\", \"non_negative_difference\", \"moving_average\", \"elapsed\":\n\t\tif !opt.Interval.IsZero() {\n\t\t\tif opt.Ascending {\n\t\t\t\topt.StartTime -= int64(opt.Interval.Duration)\n\t\t\t} else {\n\t\t\t\topt.EndTime += int64(opt.Interval.Duration)\n\t\t\t}\n\t\t}\n\t\topt.Ordered = true\n\n\t\tinput, err := buildExprIterator(expr.Args[0], b.ic, b.sources, opt, b.selector, false)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tswitch expr.Name {\n\t\tcase \"derivative\", \"non_negative_derivative\":\n\t\t\tinterval := opt.DerivativeInterval()\n\t\t\tisNonNegative := (expr.Name == \"non_negative_derivative\")\n\t\t\treturn newDerivativeIterator(input, opt, interval, isNonNegative)\n\t\tcase \"elapsed\":\n\t\t\tinterval := opt.ElapsedInterval()\n\t\t\treturn newElapsedIterator(input, opt, interval)\n\t\tcase \"difference\", \"non_negative_difference\":\n\t\t\tisNonNegative := (expr.Name == \"non_negative_difference\")\n\t\t\treturn newDifferenceIterator(input, opt, isNonNegative)\n\t\tcase \"moving_average\":\n\t\t\tn := expr.Args[1].(*IntegerLiteral)\n\t\t\tif n.Val > 1 && !opt.Interval.IsZero() {\n\t\t\t\tif opt.Ascending {\n\t\t\t\t\topt.StartTime -= int64(opt.Interval.Duration) * (n.Val - 1)\n\t\t\t\t} else {\n\t\t\t\t\topt.EndTime += int64(opt.Interval.Duration) * (n.Val - 1)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn newMovingAverageIterator(input, int(n.Val), opt)\n\t\t}\n\t\tpanic(fmt.Sprintf(\"invalid series aggregate function: %s\", expr.Name))\n\tcase \"cumulative_sum\":\n\t\topt.Ordered = true\n\t\tinput, err := buildExprIterator(expr.Args[0], b.ic, b.sources, opt, b.selector, false)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn newCumulativeSumIterator(input, opt)\n\tcase \"integral\":\n\t\topt.Ordered = true\n\t\tinput, err := buildExprIterator(expr.Args[0].(*VarRef), b.ic, b.sources, opt, false, false)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tinterval := opt.IntegralInterval()\n\t\treturn newIntegralIterator(input, opt, interval)\n\tcase \"top\":\n\t\tif len(expr.Args) < 2 {\n\t\t\treturn nil, fmt.Errorf(\"top() requires 2 or more arguments, got %d\", len(expr.Args))\n\t\t}\n\n\t\tvar input Iterator\n\t\tif len(expr.Args) > 2 {\n\t\t\t// Create a max iterator using the groupings in the arguments.\n\t\t\tdims := make(map[string]struct{}, len(expr.Args)-2+len(opt.GroupBy))\n\t\t\tfor i := 1; i < len(expr.Args)-1; i++ {\n\t\t\t\tref := expr.Args[i].(*VarRef)\n\t\t\t\tdims[ref.Val] = struct{}{}\n\t\t\t}\n\t\t\tfor dim := range opt.GroupBy {\n\t\t\t\tdims[dim] = struct{}{}\n\t\t\t}\n\n\t\t\tcall := &Call{\n\t\t\t\tName: \"max\",\n\t\t\t\tArgs: expr.Args[:1],\n\t\t\t}\n\t\t\tcallOpt := opt\n\t\t\tcallOpt.Expr = call\n\t\t\tcallOpt.GroupBy = dims\n\t\t\tcallOpt.Fill = NoFill\n\n\t\t\tbuilder := *b\n\t\t\tbuilder.opt = callOpt\n\t\t\tbuilder.selector = true\n\t\t\tbuilder.writeMode = false\n\n\t\t\ti, err := builder.callIterator(call, callOpt)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tinput = i\n\t\t} else {\n\t\t\t// There are no arguments so do not organize the points by tags.\n\t\t\tbuilder := *b\n\t\t\tbuilder.opt.Expr = expr.Args[0]\n\t\t\tbuilder.selector = true\n\t\t\tbuilder.writeMode = false\n\n\t\t\tref := expr.Args[0].(*VarRef)\n\t\t\ti, err := builder.buildVarRefIterator(ref)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tinput = i\n\t\t}\n\n\t\tn := expr.Args[len(expr.Args)-1].(*IntegerLiteral)\n\t\treturn newTopIterator(input, opt, int(n.Val), b.writeMode)\n\tcase \"bottom\":\n\t\tif len(expr.Args) < 2 {\n\t\t\treturn nil, fmt.Errorf(\"bottom() requires 2 or more arguments, got %d\", len(expr.Args))\n\t\t}\n\n\t\tvar input Iterator\n\t\tif len(expr.Args) > 2 {\n\t\t\t// Create a max iterator using the groupings in the arguments.\n\t\t\tdims := make(map[string]struct{}, len(expr.Args)-2)\n\t\t\tfor i := 1; i < len(expr.Args)-1; i++ {\n\t\t\t\tref := expr.Args[i].(*VarRef)\n\t\t\t\tdims[ref.Val] = struct{}{}\n\t\t\t}\n\t\t\tfor dim := range opt.GroupBy {\n\t\t\t\tdims[dim] = struct{}{}\n\t\t\t}\n\n\t\t\tcall := &Call{\n\t\t\t\tName: \"min\",\n\t\t\t\tArgs: expr.Args[:1],\n\t\t\t}\n\t\t\tcallOpt := opt\n\t\t\tcallOpt.Expr = call\n\t\t\tcallOpt.GroupBy = dims\n\t\t\tcallOpt.Fill = NoFill\n\n\t\t\tbuilder := *b\n\t\t\tbuilder.opt = callOpt\n\t\t\tbuilder.selector = true\n\t\t\tbuilder.writeMode = false\n\n\t\t\ti, err := builder.callIterator(call, callOpt)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tinput = i\n\t\t} else {\n\t\t\t// There are no arguments so do not organize the points by tags.\n\t\t\tbuilder := *b\n\t\t\tbuilder.opt.Expr = expr.Args[0]\n\t\t\tbuilder.selector = true\n\t\t\tbuilder.writeMode = false\n\n\t\t\tref := expr.Args[0].(*VarRef)\n\t\t\ti, err := builder.buildVarRefIterator(ref)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tinput = i\n\t\t}\n\n\t\tn := expr.Args[len(expr.Args)-1].(*IntegerLiteral)\n\t\treturn newBottomIterator(input, b.opt, int(n.Val), b.writeMode)\n\t}\n\n\titr, err := func() (Iterator, error) {\n\t\tswitch expr.Name {\n\t\tcase \"count\":\n\t\t\tswitch arg0 := expr.Args[0].(type) {\n\t\t\tcase *Call:\n\t\t\t\tif arg0.Name == \"distinct\" {\n\t\t\t\t\tinput, err := buildExprIterator(arg0, b.ic, b.sources, opt, b.selector, false)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\treturn newCountIterator(input, opt)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfallthrough\n\t\tcase \"min\", \"max\", \"sum\", \"first\", \"last\", \"mean\":\n\t\t\treturn b.callIterator(expr, opt)\n\t\tcase \"median\":\n\t\t\topt.Ordered = true\n\t\t\tinput, err := buildExprIterator(expr.Args[0].(*VarRef), b.ic, b.sources, opt, false, false)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn newMedianIterator(input, opt)\n\t\tcase \"mode\":\n\t\t\tinput, err := buildExprIterator(expr.Args[0].(*VarRef), b.ic, b.sources, opt, false, false)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn NewModeIterator(input, opt)\n\t\tcase \"stddev\":\n\t\t\tinput, err := buildExprIterator(expr.Args[0].(*VarRef), b.ic, b.sources, opt, false, false)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn newStddevIterator(input, opt)\n\t\tcase \"spread\":\n\t\t\t// OPTIMIZE(benbjohnson): convert to map/reduce\n\t\t\tinput, err := buildExprIterator(expr.Args[0].(*VarRef), b.ic, b.sources, opt, false, false)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn newSpreadIterator(input, opt)\n\t\tcase \"percentile\":\n\t\t\topt.Ordered = true\n\t\t\tinput, err := buildExprIterator(expr.Args[0].(*VarRef), b.ic, b.sources, opt, false, false)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tvar percentile float64\n\t\t\tswitch arg := expr.Args[1].(type) {\n\t\t\tcase *NumberLiteral:\n\t\t\t\tpercentile = arg.Val\n\t\t\tcase *IntegerLiteral:\n\t\t\t\tpercentile = float64(arg.Val)\n\t\t\t}\n\t\t\treturn newPercentileIterator(input, opt, percentile)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unsupported call: %s\", expr.Name)\n\t\t}\n\t}()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !b.selector || !opt.Interval.IsZero() {\n\t\titr = NewIntervalIterator(itr, opt)\n\t\tif !opt.Interval.IsZero() && opt.Fill != NoFill {\n\t\t\titr = NewFillIterator(itr, expr, opt)\n\t\t}\n\t}\n\tif opt.InterruptCh != nil {\n\t\titr = NewInterruptIterator(itr, opt.InterruptCh)\n\t}\n\treturn itr, nil\n}\n\nfunc (b *exprIteratorBuilder) buildBinaryExprIterator(expr *BinaryExpr) (Iterator, error) {\n\tif rhs, ok := expr.RHS.(Literal); ok {\n\t\t// The right hand side is a literal. It is more common to have the RHS be a literal,\n\t\t// so we check that one first and have this be the happy path.\n\t\tif lhs, ok := expr.LHS.(Literal); ok {\n\t\t\t// We have two literals that couldn't be combined by Reduce.\n\t\t\treturn nil, fmt.Errorf(\"unable to construct an iterator from two literals: LHS: %T, RHS: %T\", lhs, rhs)\n\t\t}\n\n\t\tlhs, err := buildExprIterator(expr.LHS, b.ic, b.sources, b.opt, b.selector, false)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn buildRHSTransformIterator(lhs, rhs, expr.Op, b.opt)\n\t} else if lhs, ok := expr.LHS.(Literal); ok {\n\t\trhs, err := buildExprIterator(expr.RHS, b.ic, b.sources, b.opt, b.selector, false)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn buildLHSTransformIterator(lhs, rhs, expr.Op, b.opt)\n\t} else {\n\t\t// We have two iterators. Combine them into a single iterator.\n\t\tlhs, err := buildExprIterator(expr.LHS, b.ic, b.sources, b.opt, false, false)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trhs, err := buildExprIterator(expr.RHS, b.ic, b.sources, b.opt, false, false)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn buildTransformIterator(lhs, rhs, expr.Op, b.opt)\n\t}\n}\n\nfunc (b *exprIteratorBuilder) callIterator(expr *Call, opt IteratorOptions) (Iterator, error) {\n\tinputs := make([]Iterator, 0, len(b.sources))\n\tif err := func() error {\n\t\tfor _, source := range b.sources {\n\t\t\tswitch source := source.(type) {\n\t\t\tcase *Measurement:\n\t\t\t\tinput, err := b.ic.CreateIterator(source, opt)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tinputs = append(inputs, input)\n\t\t\tcase *SubQuery:\n\t\t\t\t// Identify the name of the field we are using.\n\t\t\t\targ0 := expr.Args[0].(*VarRef)\n\n\t\t\t\tinput, err := buildExprIterator(arg0, b.ic, []Source{source}, opt, b.selector, false)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t// Wrap the result in a call iterator.\n\t\t\t\ti, err := NewCallIterator(input, opt)\n\t\t\t\tif err != nil {\n\t\t\t\t\tinput.Close()\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tinputs = append(inputs, i)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}(); err != nil {\n\t\tIterators(inputs).Close()\n\t\treturn nil, err\n\t}\n\n\titr, err := Iterators(inputs).Merge(opt)\n\tif err != nil {\n\t\tIterators(inputs).Close()\n\t\treturn nil, err\n\t} else if itr == nil {\n\t\titr = &nilFloatIterator{}\n\t}\n\treturn itr, nil\n}\n\nfunc buildRHSTransformIterator(lhs Iterator, rhs Literal, op Token, opt IteratorOptions) (Iterator, error) {\n\tfn := binaryExprFunc(iteratorDataType(lhs), literalDataType(rhs), op)\n\tswitch fn := fn.(type) {\n\tcase func(float64, float64) float64:\n\t\tvar input FloatIterator\n\t\tswitch lhs := lhs.(type) {\n\t\tcase FloatIterator:\n\t\t\tinput = lhs\n\t\tcase IntegerIterator:\n\t\t\tinput = &integerFloatCastIterator{input: lhs}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on LHS, unable to use %T as a FloatIterator\", lhs)\n\t\t}\n\n\t\tvar val float64\n\t\tswitch rhs := rhs.(type) {\n\t\tcase *NumberLiteral:\n\t\t\tval = rhs.Val\n\t\tcase *IntegerLiteral:\n\t\t\tval = float64(rhs.Val)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on RHS, unable to use %T as a NumberLiteral\", rhs)\n\t\t}\n\t\treturn &floatTransformIterator{\n\t\t\tinput: input,\n\t\t\tfn: func(p *FloatPoint) *FloatPoint {\n\t\t\t\tif p == nil {\n\t\t\t\t\treturn nil\n\t\t\t\t} else if p.Nil {\n\t\t\t\t\treturn p\n\t\t\t\t}\n\t\t\t\tp.Value = fn(p.Value, val)\n\t\t\t\treturn p\n\t\t\t},\n\t\t}, nil\n\tcase func(int64, int64) float64:\n\t\tinput, ok := lhs.(IntegerIterator)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on LHS, unable to use %T as a IntegerIterator\", lhs)\n\t\t}\n\n\t\tvar val int64\n\t\tswitch rhs := rhs.(type) {\n\t\tcase *IntegerLiteral:\n\t\t\tval = rhs.Val\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on RHS, unable to use %T as a IntegerLiteral\", rhs)\n\t\t}\n\t\treturn &integerFloatTransformIterator{\n\t\t\tinput: input,\n\t\t\tfn: func(p *IntegerPoint) *FloatPoint {\n\t\t\t\tif p == nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tfp := &FloatPoint{\n\t\t\t\t\tName: p.Name,\n\t\t\t\t\tTags: p.Tags,\n\t\t\t\t\tTime: p.Time,\n\t\t\t\t\tAux:  p.Aux,\n\t\t\t\t}\n\t\t\t\tif p.Nil {\n\t\t\t\t\tfp.Nil = true\n\t\t\t\t} else {\n\t\t\t\t\tfp.Value = fn(p.Value, val)\n\t\t\t\t}\n\t\t\t\treturn fp\n\t\t\t},\n\t\t}, nil\n\tcase func(float64, float64) bool:\n\t\tvar input FloatIterator\n\t\tswitch lhs := lhs.(type) {\n\t\tcase FloatIterator:\n\t\t\tinput = lhs\n\t\tcase IntegerIterator:\n\t\t\tinput = &integerFloatCastIterator{input: lhs}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on LHS, unable to use %T as a FloatIterator\", lhs)\n\t\t}\n\n\t\tvar val float64\n\t\tswitch rhs := rhs.(type) {\n\t\tcase *NumberLiteral:\n\t\t\tval = rhs.Val\n\t\tcase *IntegerLiteral:\n\t\t\tval = float64(rhs.Val)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on RHS, unable to use %T as a NumberLiteral\", rhs)\n\t\t}\n\t\treturn &floatBoolTransformIterator{\n\t\t\tinput: input,\n\t\t\tfn: func(p *FloatPoint) *BooleanPoint {\n\t\t\t\tif p == nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tbp := &BooleanPoint{\n\t\t\t\t\tName: p.Name,\n\t\t\t\t\tTags: p.Tags,\n\t\t\t\t\tTime: p.Time,\n\t\t\t\t\tAux:  p.Aux,\n\t\t\t\t}\n\t\t\t\tif p.Nil {\n\t\t\t\t\tbp.Nil = true\n\t\t\t\t} else {\n\t\t\t\t\tbp.Value = fn(p.Value, val)\n\t\t\t\t}\n\t\t\t\treturn bp\n\t\t\t},\n\t\t}, nil\n\tcase func(int64, int64) int64:\n\t\tvar input IntegerIterator\n\t\tswitch lhs := lhs.(type) {\n\t\tcase IntegerIterator:\n\t\t\tinput = lhs\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on LHS, unable to use %T as an IntegerIterator\", lhs)\n\t\t}\n\n\t\tvar val int64\n\t\tswitch rhs := rhs.(type) {\n\t\tcase *IntegerLiteral:\n\t\t\tval = rhs.Val\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on RHS, unable to use %T as an IntegerLiteral\", rhs)\n\t\t}\n\t\treturn &integerTransformIterator{\n\t\t\tinput: input,\n\t\t\tfn: func(p *IntegerPoint) *IntegerPoint {\n\t\t\t\tif p == nil {\n\t\t\t\t\treturn nil\n\t\t\t\t} else if p.Nil {\n\t\t\t\t\treturn p\n\t\t\t\t}\n\t\t\t\tp.Value = fn(p.Value, val)\n\t\t\t\treturn p\n\t\t\t},\n\t\t}, nil\n\tcase func(int64, int64) bool:\n\t\tvar input IntegerIterator\n\t\tswitch lhs := lhs.(type) {\n\t\tcase IntegerIterator:\n\t\t\tinput = lhs\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on LHS, unable to use %T as an IntegerIterator\", lhs)\n\t\t}\n\n\t\tvar val int64\n\t\tswitch rhs := rhs.(type) {\n\t\tcase *IntegerLiteral:\n\t\t\tval = rhs.Val\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on RHS, unable to use %T as an IntegerLiteral\", rhs)\n\t\t}\n\t\treturn &integerBoolTransformIterator{\n\t\t\tinput: input,\n\t\t\tfn: func(p *IntegerPoint) *BooleanPoint {\n\t\t\t\tif p == nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tbp := &BooleanPoint{\n\t\t\t\t\tName: p.Name,\n\t\t\t\t\tTags: p.Tags,\n\t\t\t\t\tTime: p.Time,\n\t\t\t\t\tAux:  p.Aux,\n\t\t\t\t}\n\t\t\t\tif p.Nil {\n\t\t\t\t\tbp.Nil = true\n\t\t\t\t} else {\n\t\t\t\t\tbp.Value = fn(p.Value, val)\n\t\t\t\t}\n\t\t\t\treturn bp\n\t\t\t},\n\t\t}, nil\n\tcase func(bool, bool) bool:\n\t\tvar input BooleanIterator\n\t\tswitch lhs := lhs.(type) {\n\t\tcase BooleanIterator:\n\t\t\tinput = lhs\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on LHS, unable to use %T as an BooleanIterator\", lhs)\n\t\t}\n\n\t\tvar val bool\n\t\tswitch rhs := rhs.(type) {\n\t\tcase *BooleanLiteral:\n\t\t\tval = rhs.Val\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on RHS, unable to use %T as an BooleanLiteral\", rhs)\n\t\t}\n\t\treturn &booleanTransformIterator{\n\t\t\tinput: input,\n\t\t\tfn: func(p *BooleanPoint) *BooleanPoint {\n\t\t\t\tif p == nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tbp := &BooleanPoint{\n\t\t\t\t\tName: p.Name,\n\t\t\t\t\tTags: p.Tags,\n\t\t\t\t\tTime: p.Time,\n\t\t\t\t\tAux:  p.Aux,\n\t\t\t\t}\n\t\t\t\tif p.Nil {\n\t\t\t\t\tbp.Nil = true\n\t\t\t\t} else {\n\t\t\t\t\tbp.Value = fn(p.Value, val)\n\t\t\t\t}\n\t\t\t\treturn bp\n\t\t\t},\n\t\t}, nil\n\t}\n\treturn nil, fmt.Errorf(\"unable to construct rhs transform iterator from %T and %T\", lhs, rhs)\n}\n\nfunc buildLHSTransformIterator(lhs Literal, rhs Iterator, op Token, opt IteratorOptions) (Iterator, error) {\n\tfn := binaryExprFunc(literalDataType(lhs), iteratorDataType(rhs), op)\n\tswitch fn := fn.(type) {\n\tcase func(float64, float64) float64:\n\t\tvar input FloatIterator\n\t\tswitch rhs := rhs.(type) {\n\t\tcase FloatIterator:\n\t\t\tinput = rhs\n\t\tcase IntegerIterator:\n\t\t\tinput = &integerFloatCastIterator{input: rhs}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on RHS, unable to use %T as a FloatIterator\", rhs)\n\t\t}\n\n\t\tvar val float64\n\t\tswitch lhs := lhs.(type) {\n\t\tcase *NumberLiteral:\n\t\t\tval = lhs.Val\n\t\tcase *IntegerLiteral:\n\t\t\tval = float64(lhs.Val)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on LHS, unable to use %T as a NumberLiteral\", lhs)\n\t\t}\n\t\treturn &floatTransformIterator{\n\t\t\tinput: input,\n\t\t\tfn: func(p *FloatPoint) *FloatPoint {\n\t\t\t\tif p == nil {\n\t\t\t\t\treturn nil\n\t\t\t\t} else if p.Nil {\n\t\t\t\t\treturn p\n\t\t\t\t}\n\t\t\t\tp.Value = fn(val, p.Value)\n\t\t\t\treturn p\n\t\t\t},\n\t\t}, nil\n\tcase func(int64, int64) float64:\n\t\tinput, ok := rhs.(IntegerIterator)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on RHS, unable to use %T as a IntegerIterator\", lhs)\n\t\t}\n\n\t\tvar val int64\n\t\tswitch lhs := lhs.(type) {\n\t\tcase *IntegerLiteral:\n\t\t\tval = lhs.Val\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on LHS, unable to use %T as a IntegerLiteral\", rhs)\n\t\t}\n\t\treturn &integerFloatTransformIterator{\n\t\t\tinput: input,\n\t\t\tfn: func(p *IntegerPoint) *FloatPoint {\n\t\t\t\tif p == nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tfp := &FloatPoint{\n\t\t\t\t\tName: p.Name,\n\t\t\t\t\tTags: p.Tags,\n\t\t\t\t\tTime: p.Time,\n\t\t\t\t\tAux:  p.Aux,\n\t\t\t\t}\n\t\t\t\tif p.Nil {\n\t\t\t\t\tfp.Nil = true\n\t\t\t\t} else {\n\t\t\t\t\tfp.Value = fn(val, p.Value)\n\t\t\t\t}\n\t\t\t\treturn fp\n\t\t\t},\n\t\t}, nil\n\tcase func(float64, float64) bool:\n\t\tvar input FloatIterator\n\t\tswitch rhs := rhs.(type) {\n\t\tcase FloatIterator:\n\t\t\tinput = rhs\n\t\tcase IntegerIterator:\n\t\t\tinput = &integerFloatCastIterator{input: rhs}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on RHS, unable to use %T as a FloatIterator\", rhs)\n\t\t}\n\n\t\tvar val float64\n\t\tswitch lhs := lhs.(type) {\n\t\tcase *NumberLiteral:\n\t\t\tval = lhs.Val\n\t\tcase *IntegerLiteral:\n\t\t\tval = float64(lhs.Val)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on LHS, unable to use %T as a NumberLiteral\", lhs)\n\t\t}\n\t\treturn &floatBoolTransformIterator{\n\t\t\tinput: input,\n\t\t\tfn: func(p *FloatPoint) *BooleanPoint {\n\t\t\t\tif p == nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tbp := &BooleanPoint{\n\t\t\t\t\tName: p.Name,\n\t\t\t\t\tTags: p.Tags,\n\t\t\t\t\tTime: p.Time,\n\t\t\t\t\tAux:  p.Aux,\n\t\t\t\t}\n\t\t\t\tif p.Nil {\n\t\t\t\t\tbp.Nil = true\n\t\t\t\t} else {\n\t\t\t\t\tbp.Value = fn(val, p.Value)\n\t\t\t\t}\n\t\t\t\treturn bp\n\t\t\t},\n\t\t}, nil\n\tcase func(int64, int64) int64:\n\t\tvar input IntegerIterator\n\t\tswitch rhs := rhs.(type) {\n\t\tcase IntegerIterator:\n\t\t\tinput = rhs\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on RHS, unable to use %T as an IntegerIterator\", rhs)\n\t\t}\n\n\t\tvar val int64\n\t\tswitch lhs := lhs.(type) {\n\t\tcase *IntegerLiteral:\n\t\t\tval = lhs.Val\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on LHS, unable to use %T as an IntegerLiteral\", lhs)\n\t\t}\n\t\treturn &integerTransformIterator{\n\t\t\tinput: input,\n\t\t\tfn: func(p *IntegerPoint) *IntegerPoint {\n\t\t\t\tif p == nil {\n\t\t\t\t\treturn nil\n\t\t\t\t} else if p.Nil {\n\t\t\t\t\treturn p\n\t\t\t\t}\n\t\t\t\tp.Value = fn(val, p.Value)\n\t\t\t\treturn p\n\t\t\t},\n\t\t}, nil\n\tcase func(int64, int64) bool:\n\t\tvar input IntegerIterator\n\t\tswitch rhs := rhs.(type) {\n\t\tcase IntegerIterator:\n\t\t\tinput = rhs\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on RHS, unable to use %T as an IntegerIterator\", rhs)\n\t\t}\n\n\t\tvar val int64\n\t\tswitch lhs := lhs.(type) {\n\t\tcase *IntegerLiteral:\n\t\t\tval = lhs.Val\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on LHS, unable to use %T as an IntegerLiteral\", lhs)\n\t\t}\n\t\treturn &integerBoolTransformIterator{\n\t\t\tinput: input,\n\t\t\tfn: func(p *IntegerPoint) *BooleanPoint {\n\t\t\t\tif p == nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tbp := &BooleanPoint{\n\t\t\t\t\tName: p.Name,\n\t\t\t\t\tTags: p.Tags,\n\t\t\t\t\tTime: p.Time,\n\t\t\t\t\tAux:  p.Aux,\n\t\t\t\t}\n\t\t\t\tif p.Nil {\n\t\t\t\t\tbp.Nil = true\n\t\t\t\t} else {\n\t\t\t\t\tbp.Value = fn(val, p.Value)\n\t\t\t\t}\n\t\t\t\treturn bp\n\t\t\t},\n\t\t}, nil\n\tcase func(bool, bool) bool:\n\t\tvar input BooleanIterator\n\t\tswitch rhs := rhs.(type) {\n\t\tcase BooleanIterator:\n\t\t\tinput = rhs\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on RHS, unable to use %T as an BooleanIterator\", rhs)\n\t\t}\n\n\t\tvar val bool\n\t\tswitch lhs := lhs.(type) {\n\t\tcase *BooleanLiteral:\n\t\t\tval = lhs.Val\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on LHS, unable to use %T as a BooleanLiteral\", lhs)\n\t\t}\n\t\treturn &booleanTransformIterator{\n\t\t\tinput: input,\n\t\t\tfn: func(p *BooleanPoint) *BooleanPoint {\n\t\t\t\tif p == nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tbp := &BooleanPoint{\n\t\t\t\t\tName: p.Name,\n\t\t\t\t\tTags: p.Tags,\n\t\t\t\t\tTime: p.Time,\n\t\t\t\t\tAux:  p.Aux,\n\t\t\t\t}\n\t\t\t\tif p.Nil {\n\t\t\t\t\tbp.Nil = true\n\t\t\t\t} else {\n\t\t\t\t\tbp.Value = fn(val, p.Value)\n\t\t\t\t}\n\t\t\t\treturn bp\n\t\t\t},\n\t\t}, nil\n\t}\n\treturn nil, fmt.Errorf(\"unable to construct lhs transform iterator from %T and %T\", lhs, rhs)\n}\n\nfunc buildTransformIterator(lhs Iterator, rhs Iterator, op Token, opt IteratorOptions) (Iterator, error) {\n\tfn := binaryExprFunc(iteratorDataType(lhs), iteratorDataType(rhs), op)\n\tswitch fn := fn.(type) {\n\tcase func(float64, float64) float64:\n\t\tvar left FloatIterator\n\t\tswitch lhs := lhs.(type) {\n\t\tcase FloatIterator:\n\t\t\tleft = lhs\n\t\tcase IntegerIterator:\n\t\t\tleft = &integerFloatCastIterator{input: lhs}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on LHS, unable to use %T as a FloatIterator\", lhs)\n\t\t}\n\n\t\tvar right FloatIterator\n\t\tswitch rhs := rhs.(type) {\n\t\tcase FloatIterator:\n\t\t\tright = rhs\n\t\tcase IntegerIterator:\n\t\t\tright = &integerFloatCastIterator{input: rhs}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on RHS, unable to use %T as a FloatIterator\", rhs)\n\t\t}\n\t\treturn newFloatExprIterator(left, right, opt, fn), nil\n\tcase func(int64, int64) float64:\n\t\tleft, ok := lhs.(IntegerIterator)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on LHS, unable to use %T as a IntegerIterator\", lhs)\n\t\t}\n\t\tright, ok := rhs.(IntegerIterator)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on RHS, unable to use %T as a IntegerIterator\", rhs)\n\t\t}\n\t\treturn newIntegerFloatExprIterator(left, right, opt, fn), nil\n\tcase func(int64, int64) int64:\n\t\tleft, ok := lhs.(IntegerIterator)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on LHS, unable to use %T as a IntegerIterator\", lhs)\n\t\t}\n\t\tright, ok := rhs.(IntegerIterator)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on RHS, unable to use %T as a IntegerIterator\", rhs)\n\t\t}\n\t\treturn newIntegerExprIterator(left, right, opt, fn), nil\n\tcase func(float64, float64) bool:\n\t\tvar left FloatIterator\n\t\tswitch lhs := lhs.(type) {\n\t\tcase FloatIterator:\n\t\t\tleft = lhs\n\t\tcase IntegerIterator:\n\t\t\tleft = &integerFloatCastIterator{input: lhs}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on LHS, unable to use %T as a FloatIterator\", lhs)\n\t\t}\n\n\t\tvar right FloatIterator\n\t\tswitch rhs := rhs.(type) {\n\t\tcase FloatIterator:\n\t\t\tright = rhs\n\t\tcase IntegerIterator:\n\t\t\tright = &integerFloatCastIterator{input: rhs}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on RHS, unable to use %T as a FloatIterator\", rhs)\n\t\t}\n\t\treturn newFloatBooleanExprIterator(left, right, opt, fn), nil\n\tcase func(int64, int64) bool:\n\t\tleft, ok := lhs.(IntegerIterator)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on LHS, unable to use %T as a IntegerIterator\", lhs)\n\t\t}\n\t\tright, ok := rhs.(IntegerIterator)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on RHS, unable to use %T as a IntegerIterator\", rhs)\n\t\t}\n\t\treturn newIntegerBooleanExprIterator(left, right, opt, fn), nil\n\tcase func(bool, bool) bool:\n\t\tleft, ok := lhs.(BooleanIterator)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on LHS, unable to use %T as a BooleanIterator\", lhs)\n\t\t}\n\t\tright, ok := rhs.(BooleanIterator)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"type mismatch on RHS, unable to use %T as a BooleanIterator\", rhs)\n\t\t}\n\t\treturn newBooleanExprIterator(left, right, opt, fn), nil\n\t}\n\treturn nil, fmt.Errorf(\"unable to construct transform iterator from %T and %T\", lhs, rhs)\n}\n\nfunc iteratorDataType(itr Iterator) DataType {\n\tswitch itr.(type) {\n\tcase FloatIterator:\n\t\treturn Float\n\tcase IntegerIterator:\n\t\treturn Integer\n\tcase StringIterator:\n\t\treturn String\n\tcase BooleanIterator:\n\t\treturn Boolean\n\tdefault:\n\t\treturn Unknown\n\t}\n}\n\nfunc literalDataType(lit Literal) DataType {\n\tswitch lit.(type) {\n\tcase *NumberLiteral:\n\t\treturn Float\n\tcase *IntegerLiteral:\n\t\treturn Integer\n\tcase *StringLiteral:\n\t\treturn String\n\tcase *BooleanLiteral:\n\t\treturn Boolean\n\tdefault:\n\t\treturn Unknown\n\t}\n}\n\nfunc binaryExprFunc(typ1 DataType, typ2 DataType, op Token) interface{} {\n\tvar fn interface{}\n\tswitch typ1 {\n\tcase Float:\n\t\tfn = floatBinaryExprFunc(op)\n\tcase Integer:\n\t\tswitch typ2 {\n\t\tcase Float:\n\t\t\tfn = floatBinaryExprFunc(op)\n\t\tdefault:\n\t\t\tfn = integerBinaryExprFunc(op)\n\t\t}\n\tcase Boolean:\n\t\tfn = booleanBinaryExprFunc(op)\n\t}\n\treturn fn\n}\n\nfunc floatBinaryExprFunc(op Token) interface{} {\n\tswitch op {\n\tcase ADD:\n\t\treturn func(lhs, rhs float64) float64 { return lhs + rhs }\n\tcase SUB:\n\t\treturn func(lhs, rhs float64) float64 { return lhs - rhs }\n\tcase MUL:\n\t\treturn func(lhs, rhs float64) float64 { return lhs * rhs }\n\tcase DIV:\n\t\treturn func(lhs, rhs float64) float64 {\n\t\t\tif rhs == 0 {\n\t\t\t\treturn float64(0)\n\t\t\t}\n\t\t\treturn lhs / rhs\n\t\t}\n\tcase MOD:\n\t\treturn func(lhs, rhs float64) float64 { return math.Mod(lhs, rhs) }\n\tcase EQ:\n\t\treturn func(lhs, rhs float64) bool { return lhs == rhs }\n\tcase NEQ:\n\t\treturn func(lhs, rhs float64) bool { return lhs != rhs }\n\tcase LT:\n\t\treturn func(lhs, rhs float64) bool { return lhs < rhs }\n\tcase LTE:\n\t\treturn func(lhs, rhs float64) bool { return lhs <= rhs }\n\tcase GT:\n\t\treturn func(lhs, rhs float64) bool { return lhs > rhs }\n\tcase GTE:\n\t\treturn func(lhs, rhs float64) bool { return lhs >= rhs }\n\t}\n\treturn nil\n}\n\nfunc integerBinaryExprFunc(op Token) interface{} {\n\tswitch op {\n\tcase ADD:\n\t\treturn func(lhs, rhs int64) int64 { return lhs + rhs }\n\tcase SUB:\n\t\treturn func(lhs, rhs int64) int64 { return lhs - rhs }\n\tcase MUL:\n\t\treturn func(lhs, rhs int64) int64 { return lhs * rhs }\n\tcase DIV:\n\t\treturn func(lhs, rhs int64) float64 {\n\t\t\tif rhs == 0 {\n\t\t\t\treturn float64(0)\n\t\t\t}\n\t\t\treturn float64(lhs) / float64(rhs)\n\t\t}\n\tcase MOD:\n\t\treturn func(lhs, rhs int64) int64 {\n\t\t\tif rhs == 0 {\n\t\t\t\treturn int64(0)\n\t\t\t}\n\t\t\treturn lhs % rhs\n\t\t}\n\tcase BITWISE_AND:\n\t\treturn func(lhs, rhs int64) int64 { return lhs & rhs }\n\tcase BITWISE_OR:\n\t\treturn func(lhs, rhs int64) int64 { return lhs | rhs }\n\tcase BITWISE_XOR:\n\t\treturn func(lhs, rhs int64) int64 { return lhs ^ rhs }\n\tcase EQ:\n\t\treturn func(lhs, rhs int64) bool { return lhs == rhs }\n\tcase NEQ:\n\t\treturn func(lhs, rhs int64) bool { return lhs != rhs }\n\tcase LT:\n\t\treturn func(lhs, rhs int64) bool { return lhs < rhs }\n\tcase LTE:\n\t\treturn func(lhs, rhs int64) bool { return lhs <= rhs }\n\tcase GT:\n\t\treturn func(lhs, rhs int64) bool { return lhs > rhs }\n\tcase GTE:\n\t\treturn func(lhs, rhs int64) bool { return lhs >= rhs }\n\t}\n\treturn nil\n}\n\nfunc booleanBinaryExprFunc(op Token) interface{} {\n\tswitch op {\n\tcase BITWISE_AND:\n\t\treturn func(lhs, rhs bool) bool { return lhs && rhs }\n\tcase BITWISE_OR:\n\t\treturn func(lhs, rhs bool) bool { return lhs || rhs }\n\tcase BITWISE_XOR:\n\t\treturn func(lhs, rhs bool) bool { return lhs != rhs }\n\t}\n\treturn nil\n}\n\n// stringSetSlice returns a sorted slice of keys from a string set.\nfunc stringSetSlice(m map[string]struct{}) []string {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\ta := make([]string, 0, len(m))\n\tfor k := range m {\n\t\ta = append(a, k)\n\t}\n\tsort.Strings(a)\n\treturn a\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/select_test.go",
    "content": "package influxql_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/pkg/deep\"\n)\n\n// Second represents a helper for type converting durations.\nconst Second = int64(time.Second)\n\n// Ensure a SELECT min() query can be executed.\nfunc TestSelect_Min(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\tif !reflect.DeepEqual(opt.Expr, MustParseExpr(`min(value)`)) {\n\t\t\tt.Fatalf(\"unexpected expr: %s\", spew.Sdump(opt.Expr))\n\t\t}\n\n\t\tinput, err := influxql.Iterators{\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn influxql.NewCallIterator(input, opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT min(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected point: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 19, Aggregated: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2, Aggregated: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Value: 100, Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Value: 10, Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT distinct() query can be executed.\nfunc TestSelect_Distinct_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 1 * Second, Value: 19},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 11 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 12 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT distinct(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected point: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 20}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 19}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Value: 10}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT distinct() query can be executed.\nfunc TestSelect_Distinct_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 1 * Second, Value: 19},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 11 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 12 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT distinct(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected point: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 20}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 19}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Value: 10}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT distinct() query can be executed.\nfunc TestSelect_Distinct_String(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&StringIterator{Points: []influxql.StringPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: \"a\"},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 1 * Second, Value: \"b\"},\n\t\t\t}},\n\t\t\t&StringIterator{Points: []influxql.StringPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: \"c\"},\n\t\t\t}},\n\t\t\t&StringIterator{Points: []influxql.StringPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: \"b\"},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: \"d\"},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 11 * Second, Value: \"d\"},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 12 * Second, Value: \"d\"},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT distinct(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected point: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: \"a\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: \"b\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: \"d\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Value: \"c\"}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT distinct() query can be executed.\nfunc TestSelect_Distinct_Boolean(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&BooleanIterator{Points: []influxql.BooleanPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: true},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 1 * Second, Value: false},\n\t\t\t}},\n\t\t\t&BooleanIterator{Points: []influxql.BooleanPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: false},\n\t\t\t}},\n\t\t\t&BooleanIterator{Points: []influxql.BooleanPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: true},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: false},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 11 * Second, Value: false},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 12 * Second, Value: true},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT distinct(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected point: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: true}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: false}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: false}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: true}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Value: false}},\n\t}) {\n\t\tt.Errorf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT mean() query can be executed.\nfunc TestSelect_Mean_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\tinput, err := influxql.Iterators{\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 4},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn influxql.NewCallIterator(input, opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected point: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 19.5, Aggregated: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2.5, Aggregated: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Value: 100, Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Value: 10, Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 50 * Second, Value: 3.2, Aggregated: 5}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT mean() query can be executed.\nfunc TestSelect_Mean_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\tinput, err := influxql.Iterators{\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 4},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn influxql.NewCallIterator(input, opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 19.5, Aggregated: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2.5, Aggregated: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Value: 100, Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Value: 10, Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 50 * Second, Value: 3.2, Aggregated: 5}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT mean() query cannot be executed on strings.\nfunc TestSelect_Mean_String(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.NewCallIterator(&StringIterator{}, opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err == nil || err.Error() != \"unsupported mean iterator type: *influxql_test.StringIterator\" {\n\t\tt.Errorf(\"unexpected error: %s\", err)\n\t}\n\n\tif itrs != nil {\n\t\tinfluxql.Iterators(itrs).Close()\n\t}\n}\n\n// Ensure a SELECT mean() query cannot be executed on booleans.\nfunc TestSelect_Mean_Boolean(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.NewCallIterator(&BooleanIterator{}, opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err == nil || err.Error() != \"unsupported mean iterator type: *influxql_test.BooleanIterator\" {\n\t\tt.Errorf(\"unexpected error: %s\", err)\n\t}\n\n\tif itrs != nil {\n\t\tinfluxql.Iterators(itrs).Close()\n\t}\n}\n\n// Ensure a SELECT median() query can be executed.\nfunc TestSelect_Median_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT median(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 19.5}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2.5}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Value: 100}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Value: 10}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 50 * Second, Value: 3}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT median() query can be executed.\nfunc TestSelect_Median_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT median(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 19.5}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2.5}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Value: 100}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Value: 10}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 50 * Second, Value: 3}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT median() query cannot be executed on strings.\nfunc TestSelect_Median_String(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &StringIterator{}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT median(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err == nil || err.Error() != \"unsupported median iterator type: *influxql_test.StringIterator\" {\n\t\tt.Errorf(\"unexpected error: %s\", err)\n\t}\n\n\tif itrs != nil {\n\t\tinfluxql.Iterators(itrs).Close()\n\t}\n}\n\n// Ensure a SELECT median() query cannot be executed on booleans.\nfunc TestSelect_Median_Boolean(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &BooleanIterator{}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT median(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err == nil || err.Error() != \"unsupported median iterator type: *influxql_test.BooleanIterator\" {\n\t\tt.Errorf(\"unexpected error: %s\", err)\n\t}\n\n\tif itrs != nil {\n\t\tinfluxql.Iterators(itrs).Close()\n\t}\n}\n\n// Ensure a SELECT mode() query can be executed.\nfunc TestSelect_Mode_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT mode(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 10}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Value: 100}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Value: 10}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 50 * Second, Value: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT mode() query can be executed.\nfunc TestSelect_Mode_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 54 * Second, Value: 5},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT mode(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 10}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Value: 100}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Value: 10}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 50 * Second, Value: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT mode() query cannot be executed on strings.\nfunc TestSelect_Mode_String(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&StringIterator{Points: []influxql.StringPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: \"a\"},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 1 * Second, Value: \"a\"},\n\t\t\t}},\n\t\t\t&StringIterator{Points: []influxql.StringPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: \"cxxx\"},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 6 * Second, Value: \"zzzz\"},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 7 * Second, Value: \"zzzz\"},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 8 * Second, Value: \"zxxx\"},\n\t\t\t}},\n\t\t\t&StringIterator{Points: []influxql.StringPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: \"b\"},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: \"d\"},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 11 * Second, Value: \"d\"},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 12 * Second, Value: \"d\"},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT mode(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected point: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: \"a\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: \"d\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Value: \"zzzz\"}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT mode() query cannot be executed on booleans.\nfunc TestSelect_Mode_Boolean(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&BooleanIterator{Points: []influxql.BooleanPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: true},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 1 * Second, Value: false},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 2 * Second, Value: false},\n\t\t\t}},\n\t\t\t&BooleanIterator{Points: []influxql.BooleanPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: true},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 6 * Second, Value: false},\n\t\t\t}},\n\t\t\t&BooleanIterator{Points: []influxql.BooleanPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: false},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: true},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 11 * Second, Value: false},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 12 * Second, Value: true},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT mode(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected point: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: false}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: true}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Value: true}},\n\t}) {\n\t\tt.Errorf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT top() query can be executed.\nfunc TestSelect_Top_NoTags_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT top(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 20}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 9 * Second, Value: 19}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 31 * Second, Value: 100}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 5 * Second, Value: 10}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 53 * Second, Value: 5}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 53 * Second, Value: 4}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT top() query can be executed.\nfunc TestSelect_Top_NoTags_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT top(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 20}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 9 * Second, Value: 19}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 31 * Second, Value: 100}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 5 * Second, Value: 10}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 53 * Second, Value: 5}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 53 * Second, Value: 4}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT top() query can be executed with tags.\nfunc TestSelect_Top_Tags_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\tif !reflect.DeepEqual(opt.Expr, MustParseExpr(`max(value::float)`)) {\n\t\t\tt.Fatalf(\"unexpected expr: %s\", spew.Sdump(opt.Expr))\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\tMustCallIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100, Aux: []interface{}{\"A\"}},\n\t\t\t}}, opt),\n\t\t\tMustCallIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5, Aux: []interface{}{\"B\"}},\n\t\t\t}}, opt),\n\t\t\tMustCallIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2, Aux: []interface{}{\"A\"}},\n\t\t\t}}, opt),\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT top(value::float, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{\n\t\t\t&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 20, Aux: []interface{}{\"A\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Time: 0 * Second, Value: \"A\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 10, Aux: []interface{}{\"B\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Time: 5 * Second, Value: \"B\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.FloatPoint{Name: \"cpu\", Time: 31 * Second, Value: 100, Aux: []interface{}{\"A\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Time: 31 * Second, Value: \"A\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.FloatPoint{Name: \"cpu\", Time: 53 * Second, Value: 5, Aux: []interface{}{\"B\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Time: 53 * Second, Value: \"B\"},\n\t\t},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT top() query can be executed with tags.\nfunc TestSelect_Top_Tags_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100, Aux: []interface{}{\"A\"}},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5, Aux: []interface{}{\"B\"}},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2, Aux: []interface{}{\"A\"}},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT top(value::integer, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{\n\t\t\t&influxql.IntegerPoint{Name: \"cpu\", Time: 0 * Second, Value: 20, Aux: []interface{}{\"A\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Time: 0 * Second, Value: \"A\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.IntegerPoint{Name: \"cpu\", Time: 5 * Second, Value: 10, Aux: []interface{}{\"B\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Time: 5 * Second, Value: \"B\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.IntegerPoint{Name: \"cpu\", Time: 31 * Second, Value: 100, Aux: []interface{}{\"A\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Time: 31 * Second, Value: \"A\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.IntegerPoint{Name: \"cpu\", Time: 53 * Second, Value: 5, Aux: []interface{}{\"B\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Time: 53 * Second, Value: \"B\"},\n\t\t},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT top() query can be executed with tags and group by.\nfunc TestSelect_Top_GroupByTags_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\tif !reflect.DeepEqual(opt.Expr, MustParseExpr(`max(value::float)`)) {\n\t\t\tt.Fatalf(\"unexpected expr: %s\", spew.Sdump(opt.Expr))\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\tMustCallIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100, Aux: []interface{}{\"A\"}},\n\t\t\t}}, opt),\n\t\t\tMustCallIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5, Aux: []interface{}{\"B\"}},\n\t\t\t}}, opt),\n\t\t\tMustCallIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2, Aux: []interface{}{\"A\"}},\n\t\t\t}}, opt),\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT top(value::float, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{\n\t\t\t&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"region=east\"), Time: 9 * Second, Value: 19, Aux: []interface{}{\"A\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"region=east\"), Time: 9 * Second, Value: \"A\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 0 * Second, Value: 20, Aux: []interface{}{\"A\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 0 * Second, Value: \"A\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 31 * Second, Value: 100, Aux: []interface{}{\"A\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 31 * Second, Value: \"A\"},\n\t\t},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT top() query can be executed with tags and group by.\nfunc TestSelect_Top_GroupByTags_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\tif !reflect.DeepEqual(opt.Expr, MustParseExpr(`max(value::integer)`)) {\n\t\t\tt.Fatalf(\"unexpected expr: %s\", spew.Sdump(opt.Expr))\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\tMustCallIterator(&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100, Aux: []interface{}{\"A\"}},\n\t\t\t}}, opt),\n\t\t\tMustCallIterator(&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5, Aux: []interface{}{\"B\"}},\n\t\t\t}}, opt),\n\t\t\tMustCallIterator(&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2, Aux: []interface{}{\"A\"}},\n\t\t\t}}, opt),\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT top(value::integer, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{\n\t\t\t&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"region=east\"), Time: 9 * Second, Value: 19, Aux: []interface{}{\"A\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"region=east\"), Time: 9 * Second, Value: \"A\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 0 * Second, Value: 20, Aux: []interface{}{\"A\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 0 * Second, Value: \"A\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 31 * Second, Value: 100, Aux: []interface{}{\"A\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 31 * Second, Value: \"A\"},\n\t\t},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT bottom() query can be executed.\nfunc TestSelect_Bottom_NoTags_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT bottom(value::float, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 11 * Second, Value: 3}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 31 * Second, Value: 100}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 5 * Second, Value: 10}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 50 * Second, Value: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 51 * Second, Value: 2}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT bottom() query can be executed.\nfunc TestSelect_Bottom_NoTags_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT bottom(value::integer, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 11 * Second, Value: 3}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 31 * Second, Value: 100}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 5 * Second, Value: 10}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 50 * Second, Value: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 51 * Second, Value: 2}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT bottom() query can be executed with tags.\nfunc TestSelect_Bottom_Tags_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\tif !reflect.DeepEqual(opt.Expr, MustParseExpr(`min(value::float)`)) {\n\t\t\tt.Fatalf(\"unexpected expr: %s\", spew.Sdump(opt.Expr))\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\tMustCallIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100, Aux: []interface{}{\"A\"}},\n\t\t\t}}, opt),\n\t\t\tMustCallIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5, Aux: []interface{}{\"B\"}},\n\t\t\t}}, opt),\n\t\t\tMustCallIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2, Aux: []interface{}{\"A\"}},\n\t\t\t}}, opt),\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT bottom(value::float, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{\n\t\t\t&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 10, Aux: []interface{}{\"B\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Time: 5 * Second, Value: \"B\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.FloatPoint{Name: \"cpu\", Time: 10 * Second, Value: 2, Aux: []interface{}{\"A\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Time: 10 * Second, Value: \"A\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.FloatPoint{Name: \"cpu\", Time: 31 * Second, Value: 100, Aux: []interface{}{\"A\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Time: 31 * Second, Value: \"A\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.FloatPoint{Name: \"cpu\", Time: 50 * Second, Value: 1, Aux: []interface{}{\"B\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Time: 50 * Second, Value: \"B\"},\n\t\t},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT bottom() query can be executed with tags.\nfunc TestSelect_Bottom_Tags_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\tif !reflect.DeepEqual(opt.Expr, MustParseExpr(`min(value::integer)`)) {\n\t\t\tt.Fatalf(\"unexpected expr: %s\", spew.Sdump(opt.Expr))\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\tMustCallIterator(&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100, Aux: []interface{}{\"A\"}},\n\t\t\t}}, opt),\n\t\t\tMustCallIterator(&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5, Aux: []interface{}{\"B\"}},\n\t\t\t}}, opt),\n\t\t\tMustCallIterator(&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2, Aux: []interface{}{\"A\"}},\n\t\t\t}}, opt),\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT bottom(value::integer, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{\n\t\t\t&influxql.IntegerPoint{Name: \"cpu\", Time: 5 * Second, Value: 10, Aux: []interface{}{\"B\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Time: 5 * Second, Value: \"B\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.IntegerPoint{Name: \"cpu\", Time: 10 * Second, Value: 2, Aux: []interface{}{\"A\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Time: 10 * Second, Value: \"A\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.IntegerPoint{Name: \"cpu\", Time: 31 * Second, Value: 100, Aux: []interface{}{\"A\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Time: 31 * Second, Value: \"A\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.IntegerPoint{Name: \"cpu\", Time: 50 * Second, Value: 1, Aux: []interface{}{\"B\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Time: 50 * Second, Value: \"B\"},\n\t\t},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT bottom() query can be executed with tags and group by.\nfunc TestSelect_Bottom_GroupByTags_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\tif !reflect.DeepEqual(opt.Expr, MustParseExpr(`min(value::float)`)) {\n\t\t\tt.Fatalf(\"unexpected expr: %s\", spew.Sdump(opt.Expr))\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\tMustCallIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100, Aux: []interface{}{\"A\"}},\n\t\t\t}}, opt),\n\t\t\tMustCallIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5, Aux: []interface{}{\"B\"}},\n\t\t\t}}, opt),\n\t\t\tMustCallIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2, Aux: []interface{}{\"A\"}},\n\t\t\t}}, opt),\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT bottom(value::float, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{\n\t\t\t&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"region=east\"), Time: 10 * Second, Value: 2, Aux: []interface{}{\"A\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"region=east\"), Time: 10 * Second, Value: \"A\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 11 * Second, Value: 3, Aux: []interface{}{\"A\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 11 * Second, Value: \"A\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 50 * Second, Value: 1, Aux: []interface{}{\"B\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 50 * Second, Value: \"B\"},\n\t\t},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT bottom() query can be executed with tags and group by.\nfunc TestSelect_Bottom_GroupByTags_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\tif !reflect.DeepEqual(opt.Expr, MustParseExpr(`min(value::float)`)) {\n\t\t\tt.Fatalf(\"unexpected expr: %s\", spew.Sdump(opt.Expr))\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\tMustCallIterator(&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100, Aux: []interface{}{\"A\"}},\n\t\t\t}}, opt),\n\t\t\tMustCallIterator(&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4, Aux: []interface{}{\"B\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5, Aux: []interface{}{\"B\"}},\n\t\t\t}}, opt),\n\t\t\tMustCallIterator(&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19, Aux: []interface{}{\"A\"}},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2, Aux: []interface{}{\"A\"}},\n\t\t\t}}, opt),\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT bottom(value::float, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{\n\t\t\t&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"region=east\"), Time: 10 * Second, Value: 2, Aux: []interface{}{\"A\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"region=east\"), Time: 10 * Second, Value: \"A\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 11 * Second, Value: 3, Aux: []interface{}{\"A\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 11 * Second, Value: \"A\"},\n\t\t},\n\t\t{\n\t\t\t&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 50 * Second, Value: 1, Aux: []interface{}{\"B\"}},\n\t\t\t&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"region=west\"), Time: 50 * Second, Value: \"B\"},\n\t\t},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT query with a fill(null) statement can be executed.\nfunc TestSelect_Fill_Null_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12 * Second, Value: 2},\n\t\t}}, opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(null)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2, Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 40 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 50 * Second, Nil: true}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT query with a fill(<number>) statement can be executed.\nfunc TestSelect_Fill_Number_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12 * Second, Value: 2},\n\t\t}}, opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(1)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2, Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20 * Second, Value: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Value: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 40 * Second, Value: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 50 * Second, Value: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT query with a fill(previous) statement can be executed.\nfunc TestSelect_Fill_Previous_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12 * Second, Value: 2},\n\t\t}}, opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(previous)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2, Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20 * Second, Value: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Value: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 40 * Second, Value: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 50 * Second, Value: 2}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT query with a fill(linear) statement can be executed.\nfunc TestSelect_Fill_Linear_Float_One(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12 * Second, Value: 2},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 32 * Second, Value: 4},\n\t\t}}, opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2, Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20 * Second, Value: 3}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Value: 4, Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 40 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 50 * Second, Nil: true}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Fill_Linear_Float_Many(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12 * Second, Value: 2},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 62 * Second, Value: 7},\n\t\t}}, opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2, Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20 * Second, Value: 3}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Value: 4}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 40 * Second, Value: 5}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 50 * Second, Value: 6}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 60 * Second, Value: 7, Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Fill_Linear_Float_MultipleSeries(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12 * Second, Value: 2},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 32 * Second, Value: 4},\n\t\t}}, opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2, Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 40 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 50 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 10 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 20 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 30 * Second, Value: 4, Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 40 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 50 * Second, Nil: true}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT query with a fill(linear) statement can be executed for integers.\nfunc TestSelect_Fill_Linear_Integer_One(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.NewCallIterator(&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12 * Second, Value: 1},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 32 * Second, Value: 4},\n\t\t}}, opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT max(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Nil: true}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 1, Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20 * Second, Value: 2}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Value: 4, Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 40 * Second, Nil: true}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 50 * Second, Nil: true}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Fill_Linear_Integer_Many(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.NewCallIterator(&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12 * Second, Value: 1},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 72 * Second, Value: 10},\n\t\t}}, opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT max(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:20Z' GROUP BY host, time(10s) fill(linear)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Nil: true}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 1, Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20 * Second, Value: 2}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Value: 4}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 40 * Second, Value: 5}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 50 * Second, Value: 7}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 60 * Second, Value: 8}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 70 * Second, Value: 10, Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Fill_Linear_Integer_MultipleSeries(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.NewCallIterator(&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 12 * Second, Value: 2},\n\t\t\t{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 32 * Second, Value: 4},\n\t\t}}, opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT max(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Nil: true}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2, Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 20 * Second, Nil: true}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Nil: true}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 40 * Second, Nil: true}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 50 * Second, Nil: true}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Nil: true}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 10 * Second, Nil: true}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 20 * Second, Nil: true}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 30 * Second, Value: 4, Aggregated: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 40 * Second, Nil: true}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 50 * Second, Nil: true}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT stddev() query can be executed.\nfunc TestSelect_Stddev_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT stddev(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 0.7071067811865476}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 0.7071067811865476}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 50 * Second, Value: 1.5811388300841898}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT stddev() query can be executed.\nfunc TestSelect_Stddev_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT stddev(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 0.7071067811865476}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 0.7071067811865476}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Nil: true}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 50 * Second, Value: 1.5811388300841898}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT spread() query can be executed.\nfunc TestSelect_Spread_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT spread(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Value: 0}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Value: 0}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 50 * Second, Value: 4}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT spread() query can be executed.\nfunc TestSelect_Spread_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 1},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 4},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 5},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT spread(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 1}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Value: 0}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Value: 0}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 50 * Second, Value: 4}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT percentile() query can be executed.\nfunc TestSelect_Percentile_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 9},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 8},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 7},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 54 * Second, Value: 6},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 55 * Second, Value: 5},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 56 * Second, Value: 4},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 57 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 58 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 59 * Second, Value: 1},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT percentile(value, 90) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 20}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 3}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Value: 100}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Value: 10}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 50 * Second, Value: 9}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT percentile() query can be executed.\nfunc TestSelect_Percentile_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 50 * Second, Value: 10},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 51 * Second, Value: 9},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 52 * Second, Value: 8},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 53 * Second, Value: 7},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 54 * Second, Value: 6},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 55 * Second, Value: 5},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 56 * Second, Value: 4},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 57 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 58 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 59 * Second, Value: 1},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT percentile(value, 90) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 20}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 3}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Value: 100}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Value: 10}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 50 * Second, Value: 9}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT sample() query can be executed.\nfunc TestSelect_Sample_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 5 * Second, Value: 10},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=B\"), Time: 10 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=B\"), Time: 15 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT sample(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 20}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 5 * Second, Value: 10}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 10 * Second, Value: 19}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 15 * Second, Value: 2}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT sample() query can be executed.\nfunc TestSelect_Sample_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 5 * Second, Value: 10},\n\t\t\t}},\n\t\t\t&IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=B\"), Time: 10 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=B\"), Time: 15 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT sample(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 20}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 5 * Second, Value: 10}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 10 * Second, Value: 19}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 15 * Second, Value: 2}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT sample() query can be executed.\nfunc TestSelect_Sample_Boolean(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&BooleanIterator{Points: []influxql.BooleanPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: true},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 5 * Second, Value: false},\n\t\t\t}},\n\t\t\t&BooleanIterator{Points: []influxql.BooleanPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=B\"), Time: 10 * Second, Value: false},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=B\"), Time: 15 * Second, Value: true},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT sample(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: true}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 5 * Second, Value: false}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 10 * Second, Value: false}},\n\t\t{&influxql.BooleanPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 15 * Second, Value: true}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT sample() query can be executed.\nfunc TestSelect_Sample_String(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&StringIterator{Points: []influxql.StringPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: \"a\"},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 5 * Second, Value: \"b\"},\n\t\t\t}},\n\t\t\t&StringIterator{Points: []influxql.StringPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=B\"), Time: 10 * Second, Value: \"c\"},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=B\"), Time: 15 * Second, Value: \"d\"},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT sample(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: \"a\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 5 * Second, Value: \"b\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 10 * Second, Value: \"c\"}},\n\t\t{&influxql.StringPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 15 * Second, Value: \"d\"}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a simple raw SELECT statement can be executed.\nfunc TestSelect_Raw(t *testing.T) {\n\t// Mock two iterators -- one for each value in the query.\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\tif !reflect.DeepEqual(opt.Aux, []influxql.VarRef{{Val: \"v1\", Type: influxql.Float}, {Val: \"v2\", Type: influxql.Float}}) {\n\t\t\tt.Fatalf(\"unexpected options: %s\", spew.Sdump(opt.Expr))\n\n\t\t}\n\t\treturn &FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Time: 0, Aux: []interface{}{float64(1), nil}},\n\t\t\t{Time: 1, Aux: []interface{}{nil, float64(2)}},\n\t\t\t{Time: 5, Aux: []interface{}{float64(3), float64(4)}},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT v1::float, v2::float FROM cpu`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{\n\t\t\t&influxql.FloatPoint{Time: 0, Value: 1},\n\t\t\t&influxql.FloatPoint{Time: 0, Nil: true},\n\t\t},\n\t\t{\n\t\t\t&influxql.FloatPoint{Time: 1, Nil: true},\n\t\t\t&influxql.FloatPoint{Time: 1, Value: 2},\n\t\t},\n\t\t{\n\t\t\t&influxql.FloatPoint{Time: 5, Value: 3},\n\t\t\t&influxql.FloatPoint{Time: 5, Value: 4},\n\t\t},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\n// Ensure a SELECT binary expr queries can be executed as floats.\nfunc TestSelect_BinaryExpr_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\tmakeAuxFields := func(value float64) []interface{} {\n\t\t\taux := make([]interface{}, len(opt.Aux))\n\t\t\tfor i := range aux {\n\t\t\t\taux[i] = value\n\t\t\t}\n\t\t\treturn aux\n\t\t}\n\t\treturn &FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20, Aux: makeAuxFields(20)},\n\t\t\t{Name: \"cpu\", Time: 5 * Second, Value: 10, Aux: makeAuxFields(10)},\n\t\t\t{Name: \"cpu\", Time: 9 * Second, Value: 19, Aux: makeAuxFields(19)},\n\t\t}}, nil\n\t}\n\tic.FieldDimensionsFn = func(m *influxql.Measurement) (map[string]influxql.DataType, map[string]struct{}, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn map[string]influxql.DataType{\"value\": influxql.Float}, nil, nil\n\t}\n\n\tfor _, test := range []struct {\n\t\tName      string\n\t\tStatement string\n\t\tPoints    [][]influxql.Point\n\t}{\n\t\t{\n\t\t\tName:      \"rhs binary add number\",\n\t\t\tStatement: `SELECT value + 2.0 FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 22}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 12}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 21}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"rhs binary add integer\",\n\t\t\tStatement: `SELECT value + 2 FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 22}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 12}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 21}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"lhs binary add number\",\n\t\t\tStatement: `SELECT 2.0 + value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 22}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 12}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 21}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"lhs binary add integer\",\n\t\t\tStatement: `SELECT 2 + value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 22}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 12}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 21}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"two variable binary add\",\n\t\t\tStatement: `SELECT value + value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 40}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 20}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 38}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"rhs binary multiply number\",\n\t\t\tStatement: `SELECT value * 2.0 FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 40}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 20}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 38}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"rhs binary multiply integer\",\n\t\t\tStatement: `SELECT value * 2 FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 40}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 20}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 38}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"lhs binary multiply number\",\n\t\t\tStatement: `SELECT 2.0 * value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 40}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 20}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 38}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"lhs binary multiply integer\",\n\t\t\tStatement: `SELECT 2 * value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 40}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 20}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 38}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"two variable binary multiply\",\n\t\t\tStatement: `SELECT value * value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 400}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 100}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 361}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"rhs binary subtract number\",\n\t\t\tStatement: `SELECT value - 2.0 FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 18}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 8}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 17}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"rhs binary subtract integer\",\n\t\t\tStatement: `SELECT value - 2 FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 18}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 8}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 17}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"lhs binary subtract number\",\n\t\t\tStatement: `SELECT 2.0 - value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: -18}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: -8}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: -17}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"lhs binary subtract integer\",\n\t\t\tStatement: `SELECT 2 - value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: -18}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: -8}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: -17}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"two variable binary subtract\",\n\t\t\tStatement: `SELECT value - value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 0}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 0}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 0}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"rhs binary division number\",\n\t\t\tStatement: `SELECT value / 2.0 FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 10}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 5}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: float64(19) / 2}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"rhs binary division integer\",\n\t\t\tStatement: `SELECT value / 2 FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 10}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 5}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: float64(19) / 2}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"lhs binary division number\",\n\t\t\tStatement: `SELECT 38.0 / value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 1.9}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 3.8}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 2}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"lhs binary division integer\",\n\t\t\tStatement: `SELECT 38 / value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 1.9}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 3.8}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 2}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"two variable binary division\",\n\t\t\tStatement: `SELECT value / value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 1}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 1}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 1}},\n\t\t\t},\n\t\t},\n\t} {\n\t\tstmt, err := MustParseSelectStatement(test.Statement).RewriteFields(&ic)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: rewrite error: %s\", test.Name, err)\n\t\t}\n\n\t\titrs, err := influxql.Select(stmt, &ic, nil)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: parse error: %s\", test.Name, err)\n\t\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\t\tt.Fatalf(\"%s: unexpected error: %s\", test.Name, err)\n\t\t} else if !deep.Equal(a, test.Points) {\n\t\t\tt.Errorf(\"%s: unexpected points: %s\", test.Name, spew.Sdump(a))\n\t\t}\n\t}\n}\n\n// Ensure a SELECT binary expr queries can be executed as integers.\nfunc TestSelect_BinaryExpr_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\tmakeAuxFields := func(value int64) []interface{} {\n\t\t\taux := make([]interface{}, len(opt.Aux))\n\t\t\tfor i := range aux {\n\t\t\t\taux[i] = value\n\t\t\t}\n\t\t\treturn aux\n\t\t}\n\t\treturn &IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20, Aux: makeAuxFields(20)},\n\t\t\t{Name: \"cpu\", Time: 5 * Second, Value: 10, Aux: makeAuxFields(10)},\n\t\t\t{Name: \"cpu\", Time: 9 * Second, Value: 19, Aux: makeAuxFields(19)},\n\t\t}}, nil\n\t}\n\tic.FieldDimensionsFn = func(m *influxql.Measurement) (map[string]influxql.DataType, map[string]struct{}, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn map[string]influxql.DataType{\"value\": influxql.Integer}, nil, nil\n\t}\n\n\tfor _, test := range []struct {\n\t\tName      string\n\t\tStatement string\n\t\tPoints    [][]influxql.Point\n\t}{\n\t\t{\n\t\t\tName:      \"rhs binary add number\",\n\t\t\tStatement: `SELECT value + 2.0 FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 22}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 12}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 21}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"rhs binary add integer\",\n\t\t\tStatement: `SELECT value + 2 FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0 * Second, Value: 22}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 5 * Second, Value: 12}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 9 * Second, Value: 21}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"lhs binary add number\",\n\t\t\tStatement: `SELECT 2.0 + value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 22}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 12}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 21}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"lhs binary add integer\",\n\t\t\tStatement: `SELECT 2 + value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0 * Second, Value: 22}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 5 * Second, Value: 12}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 9 * Second, Value: 21}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"two variable binary add\",\n\t\t\tStatement: `SELECT value + value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0 * Second, Value: 40}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 5 * Second, Value: 20}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 9 * Second, Value: 38}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"rhs binary multiply number\",\n\t\t\tStatement: `SELECT value * 2.0 FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 40}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 20}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 38}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"rhs binary multiply integer\",\n\t\t\tStatement: `SELECT value * 2 FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0 * Second, Value: 40}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 5 * Second, Value: 20}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 9 * Second, Value: 38}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"lhs binary multiply number\",\n\t\t\tStatement: `SELECT 2.0 * value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 40}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 20}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 38}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"lhs binary multiply integer\",\n\t\t\tStatement: `SELECT 2 * value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0 * Second, Value: 40}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 5 * Second, Value: 20}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 9 * Second, Value: 38}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"two variable binary multiply\",\n\t\t\tStatement: `SELECT value * value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0 * Second, Value: 400}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 5 * Second, Value: 100}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 9 * Second, Value: 361}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"rhs binary subtract number\",\n\t\t\tStatement: `SELECT value - 2.0 FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 18}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 8}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 17}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"rhs binary subtract integer\",\n\t\t\tStatement: `SELECT value - 2 FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0 * Second, Value: 18}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 5 * Second, Value: 8}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 9 * Second, Value: 17}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"lhs binary subtract number\",\n\t\t\tStatement: `SELECT 2.0 - value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: -18}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: -8}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: -17}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"lhs binary subtract integer\",\n\t\t\tStatement: `SELECT 2 - value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0 * Second, Value: -18}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 5 * Second, Value: -8}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 9 * Second, Value: -17}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"two variable binary subtract\",\n\t\t\tStatement: `SELECT value - value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0 * Second, Value: 0}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 5 * Second, Value: 0}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 9 * Second, Value: 0}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"rhs binary division number\",\n\t\t\tStatement: `SELECT value / 2.0 FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 10}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 5}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 9.5}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"rhs binary division integer\",\n\t\t\tStatement: `SELECT value / 2 FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 10}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 5}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: float64(19) / 2}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"lhs binary division number\",\n\t\t\tStatement: `SELECT 38.0 / value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 1.9}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 3.8}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 2.0}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"lhs binary division integer\",\n\t\t\tStatement: `SELECT 38 / value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 1.9}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 3.8}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 2}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"two variable binary division\",\n\t\t\tStatement: `SELECT value / value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 1}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 1}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 1}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"rhs binary bitwise-and integer\",\n\t\t\tStatement: `SELECT value & 254 FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0 * Second, Value: 20}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 5 * Second, Value: 10}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 9 * Second, Value: 18}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"lhs binary bitwise-or integer\",\n\t\t\tStatement: `SELECT 4 | value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0 * Second, Value: 20}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 5 * Second, Value: 14}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 9 * Second, Value: 23}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"two variable binary bitwise-xor\",\n\t\t\tStatement: `SELECT value ^ value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0 * Second, Value: 0}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 5 * Second, Value: 0}},\n\t\t\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 9 * Second, Value: 0}},\n\t\t\t},\n\t\t},\n\t} {\n\t\tstmt, err := MustParseSelectStatement(test.Statement).RewriteFields(&ic)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: rewrite error: %s\", test.Name, err)\n\t\t}\n\n\t\titrs, err := influxql.Select(stmt, &ic, nil)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: parse error: %s\", test.Name, err)\n\t\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\t\tt.Fatalf(\"%s: unexpected error: %s\", test.Name, err)\n\t\t} else if !deep.Equal(a, test.Points) {\n\t\t\tt.Errorf(\"%s: unexpected points: %s\", test.Name, spew.Sdump(a))\n\t\t}\n\t}\n}\n\n// Ensure a SELECT binary expr queries can be executed on mixed iterators.\nfunc TestSelect_BinaryExpr_Mixed(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20, Aux: []interface{}{float64(20), int64(10)}},\n\t\t\t{Name: \"cpu\", Time: 5 * Second, Value: 10, Aux: []interface{}{float64(10), int64(15)}},\n\t\t\t{Name: \"cpu\", Time: 9 * Second, Value: 19, Aux: []interface{}{float64(19), int64(5)}},\n\t\t}}, nil\n\t}\n\tic.FieldDimensionsFn = func(m *influxql.Measurement) (map[string]influxql.DataType, map[string]struct{}, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn map[string]influxql.DataType{\n\t\t\t\"total\": influxql.Float,\n\t\t\t\"value\": influxql.Integer,\n\t\t}, nil, nil\n\t}\n\n\tfor _, test := range []struct {\n\t\tName      string\n\t\tStatement string\n\t\tPoints    [][]influxql.Point\n\t}{\n\t\t{\n\t\t\tName:      \"mixed binary add\",\n\t\t\tStatement: `SELECT total + value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 30}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 25}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 24}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"mixed binary subtract\",\n\t\t\tStatement: `SELECT total - value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 10}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: -5}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 14}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"mixed binary multiply\",\n\t\t\tStatement: `SELECT total * value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 200}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 150}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: 95}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"mixed binary division\",\n\t\t\tStatement: `SELECT total / value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 2}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: float64(10) / float64(15)}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Value: float64(19) / float64(5)}},\n\t\t\t},\n\t\t},\n\t} {\n\t\tstmt, err := MustParseSelectStatement(test.Statement).RewriteFields(&ic)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: rewrite error: %s\", test.Name, err)\n\t\t}\n\n\t\titrs, err := influxql.Select(stmt, &ic, nil)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: parse error: %s\", test.Name, err)\n\t\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\t\tt.Fatalf(\"%s: unexpected error: %s\", test.Name, err)\n\t\t} else if !deep.Equal(a, test.Points) {\n\t\t\tt.Errorf(\"%s: unexpected points: %s\", test.Name, spew.Sdump(a))\n\t\t}\n\t}\n}\n\n// Ensure a SELECT binary expr queries can be executed as booleans.\nfunc TestSelect_BinaryExpr_Boolean(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\tmakeAuxFields := func(value bool) []interface{} {\n\t\t\taux := make([]interface{}, len(opt.Aux))\n\t\t\tfor i := range aux {\n\t\t\t\taux[i] = value\n\t\t\t}\n\t\t\treturn aux\n\t\t}\n\t\treturn &BooleanIterator{Points: []influxql.BooleanPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: true, Aux: makeAuxFields(true)},\n\t\t\t{Name: \"cpu\", Time: 5 * Second, Value: false, Aux: makeAuxFields(false)},\n\t\t\t{Name: \"cpu\", Time: 9 * Second, Value: true, Aux: makeAuxFields(true)},\n\t\t}}, nil\n\t}\n\tic.FieldDimensionsFn = func(m *influxql.Measurement) (map[string]influxql.DataType, map[string]struct{}, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn map[string]influxql.DataType{\n\t\t\t\"one\": influxql.Boolean,\n\t\t\t\"two\": influxql.Boolean,\n\t\t}, nil, nil\n\t}\n\n\tfor _, test := range []struct {\n\t\tName      string\n\t\tStatement string\n\t\tPoints    [][]influxql.Point\n\t}{\n\t\t{\n\t\t\tName:      \"rhs binary bitwise-xor\",\n\t\t\tStatement: `SELECT one ^ true FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.BooleanPoint{Name: \"cpu\", Time: 0 * Second, Value: false}},\n\t\t\t\t{&influxql.BooleanPoint{Name: \"cpu\", Time: 5 * Second, Value: true}},\n\t\t\t\t{&influxql.BooleanPoint{Name: \"cpu\", Time: 9 * Second, Value: false}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"lhs binary or\",\n\t\t\tStatement: `SELECT true | two FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.BooleanPoint{Name: \"cpu\", Time: 0 * Second, Value: true}},\n\t\t\t\t{&influxql.BooleanPoint{Name: \"cpu\", Time: 5 * Second, Value: true}},\n\t\t\t\t{&influxql.BooleanPoint{Name: \"cpu\", Time: 9 * Second, Value: true}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"two series bitwise-and\",\n\t\t\tStatement: `SELECT one & two FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.BooleanPoint{Name: \"cpu\", Time: 0 * Second, Value: true}},\n\t\t\t\t{&influxql.BooleanPoint{Name: \"cpu\", Time: 5 * Second, Value: false}},\n\t\t\t\t{&influxql.BooleanPoint{Name: \"cpu\", Time: 9 * Second, Value: true}},\n\t\t\t},\n\t\t},\n\t} {\n\t\tstmt, err := MustParseSelectStatement(test.Statement).RewriteFields(&ic)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: rewrite error: %s\", test.Name, err)\n\t\t}\n\n\t\titrs, err := influxql.Select(stmt, &ic, nil)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: parse error: %s\", test.Name, err)\n\t\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\t\tt.Fatalf(\"%s: unexpected error: %s\", test.Name, err)\n\t\t} else if !deep.Equal(a, test.Points) {\n\t\t\tt.Errorf(\"%s: unexpected points: %s\", test.Name, spew.Sdump(a))\n\t\t}\n\t}\n}\n\n// Ensure a SELECT binary expr with nil values can be executed.\n// Nil values may be present when a field is missing from one iterator,\n// but not the other.\nfunc TestSelect_BinaryExpr_NilValues(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20, Aux: []interface{}{float64(20), nil}},\n\t\t\t{Name: \"cpu\", Time: 5 * Second, Value: 10, Aux: []interface{}{float64(10), float64(15)}},\n\t\t\t{Name: \"cpu\", Time: 9 * Second, Value: 19, Aux: []interface{}{nil, float64(5)}},\n\t\t}}, nil\n\t}\n\tic.FieldDimensionsFn = func(m *influxql.Measurement) (map[string]influxql.DataType, map[string]struct{}, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn map[string]influxql.DataType{\n\t\t\t\"total\": influxql.Float,\n\t\t\t\"value\": influxql.Float,\n\t\t}, nil, nil\n\t}\n\n\tfor _, test := range []struct {\n\t\tName      string\n\t\tStatement string\n\t\tPoints    [][]influxql.Point\n\t}{\n\t\t{\n\t\t\tName:      \"nil binary add\",\n\t\t\tStatement: `SELECT total + value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Nil: true}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 25}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Nil: true}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"nil binary subtract\",\n\t\t\tStatement: `SELECT total - value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Nil: true}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: -5}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Nil: true}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"nil binary multiply\",\n\t\t\tStatement: `SELECT total * value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Nil: true}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: 150}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Nil: true}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:      \"nil binary division\",\n\t\t\tStatement: `SELECT total / value FROM cpu`,\n\t\t\tPoints: [][]influxql.Point{\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Nil: true}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 5 * Second, Value: float64(10) / float64(15)}},\n\t\t\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 9 * Second, Nil: true}},\n\t\t\t},\n\t\t},\n\t} {\n\t\tstmt, err := MustParseSelectStatement(test.Statement).RewriteFields(&ic)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: rewrite error: %s\", test.Name, err)\n\t\t}\n\n\t\titrs, err := influxql.Select(stmt, &ic, nil)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: parse error: %s\", test.Name, err)\n\t\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\t\tt.Fatalf(\"%s: unexpected error: %s\", test.Name, err)\n\t\t} else if !deep.Equal(a, test.Points) {\n\t\t\tt.Errorf(\"%s: unexpected points: %s\", test.Name, spew.Sdump(a))\n\t\t}\n\t}\n}\n\n// Ensure a SELECT (...) query can be executed.\nfunc TestSelect_ParenExpr(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\tif !reflect.DeepEqual(opt.Expr, MustParseExpr(`min(value)`)) {\n\t\t\tt.Fatalf(\"unexpected expr: %s\", spew.Sdump(opt.Expr))\n\t\t}\n\n\t\tinput, err := influxql.Iterators{\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 11 * Second, Value: 3},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 31 * Second, Value: 100},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn influxql.NewCallIterator(input, opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT (min(value)) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 19, Aggregated: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2, Aggregated: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 30 * Second, Value: 100, Aggregated: 1}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Value: 10, Aggregated: 1}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.Iterators{\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 0 * Second, Value: 20},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=A\"), Time: 1 * Second, Value: 19},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=west,host=B\"), Time: 5 * Second, Value: 10},\n\t\t\t}},\n\t\t\t&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 9 * Second, Value: 19},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 10 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 11 * Second, Value: 2},\n\t\t\t\t{Name: \"cpu\", Tags: ParseTags(\"region=east,host=A\"), Time: 12 * Second, Value: 2},\n\t\t\t}},\n\t\t}.Merge(opt)\n\t}\n\n\t// Execute selection.\n\titrs, err = influxql.Select(MustParseSelectStatement(`SELECT (distinct(value)) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 20}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 0 * Second, Value: 19}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 10 * Second, Value: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=B\"), Time: 0 * Second, Value: 10}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Derivative_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 12 * Second, Value: 3},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 4 * Second, Value: -2.5}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 8 * Second, Value: 2.25}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 12 * Second, Value: -4}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Derivative_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 12 * Second, Value: 3},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 4 * Second, Value: -2.5}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 8 * Second, Value: 2.25}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 12 * Second, Value: -4}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Derivative_Desc_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Time: 12 * Second, Value: 3},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z' ORDER BY desc`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Errorf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 8 * Second, Value: 4}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 4 * Second, Value: -2.25}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 2.5}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Derivative_Desc_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Time: 12 * Second, Value: 3},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z' ORDER BY desc`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Errorf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 8 * Second, Value: 4}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 4 * Second, Value: -2.25}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 2.5}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Derivative_Duplicate_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 3},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 4 * Second, Value: -2.5}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Derivative_Duplicate_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 3},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 4 * Second, Value: -2.5}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Difference_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 12 * Second, Value: 3},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 4 * Second, Value: -10}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 8 * Second, Value: 9}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 12 * Second, Value: -16}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Difference_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 12 * Second, Value: 3},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 4 * Second, Value: -10}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 8 * Second, Value: 9}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 12 * Second, Value: -16}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Difference_Duplicate_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 3},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 4 * Second, Value: -10}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Difference_Duplicate_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 3},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 4 * Second, Value: -10}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Non_Negative_Difference_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: 29},\n\t\t\t{Name: \"cpu\", Time: 12 * Second, Value: 3},\n\t\t\t{Name: \"cpu\", Time: 16 * Second, Value: 39},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT non_negative_difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 8 * Second, Value: 19}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 16 * Second, Value: 36}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Non_Negative_Difference_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: 21},\n\t\t\t{Name: \"cpu\", Time: 12 * Second, Value: 3},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT non_negative_difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 8 * Second, Value: 11}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Non_Negative_Difference_Duplicate_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 3},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: 30},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 12 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 12 * Second, Value: 3},\n\t\t\t{Name: \"cpu\", Time: 16 * Second, Value: 40},\n\t\t\t{Name: \"cpu\", Time: 16 * Second, Value: 3},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT non_negative_difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 8 * Second, Value: 20}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 16 * Second, Value: 30}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Non_Negative_Difference_Duplicate_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 3},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: 30},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 12 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 12 * Second, Value: 3},\n\t\t\t{Name: \"cpu\", Time: 16 * Second, Value: 40},\n\t\t\t{Name: \"cpu\", Time: 16 * Second, Value: 3},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT non_negative_difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 8 * Second, Value: 20}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 16 * Second, Value: 30}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Elapsed_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 11 * Second, Value: 3},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT elapsed(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 4 * Second, Value: 4}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 8 * Second, Value: 4}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 11 * Second, Value: 3}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Elapsed_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 11 * Second, Value: 3},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT elapsed(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 4 * Second, Value: 4}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 8 * Second, Value: 4}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 11 * Second, Value: 3}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Elapsed_String(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &StringIterator{Points: []influxql.StringPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: \"a\"},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: \"b\"},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: \"c\"},\n\t\t\t{Name: \"cpu\", Time: 11 * Second, Value: \"d\"},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT elapsed(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 4 * Second, Value: 4}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 8 * Second, Value: 4}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 11 * Second, Value: 3}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Elapsed_Boolean(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &BooleanIterator{Points: []influxql.BooleanPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: true},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: false},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: false},\n\t\t\t{Name: \"cpu\", Time: 11 * Second, Value: true},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT elapsed(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 4 * Second, Value: 4}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 8 * Second, Value: 4}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 11 * Second, Value: 3}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Integral_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Time: 10 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 15 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 20 * Second, Value: 0},\n\t\t\t{Name: \"cpu\", Time: 30 * Second, Value: -10},\n\t\t}}, nil\n\t}\n\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT integral(value) FROM cpu`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0, Value: 50}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Integral_Float_GroupByTime(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Time: 10 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 15 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 20 * Second, Value: 0},\n\t\t\t{Name: \"cpu\", Time: 30 * Second, Value: -10},\n\t\t}}, nil\n\t}\n\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT integral(value) FROM cpu WHERE time > 0s AND time < 60s GROUP BY time(20s)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0, Value: 100}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 20 * Second, Value: -50}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Integral_Float_InterpolateGroupByTime(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Time: 10 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 15 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 25 * Second, Value: 0},\n\t\t\t{Name: \"cpu\", Time: 30 * Second, Value: -10},\n\t\t}}, nil\n\t}\n\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT integral(value) FROM cpu WHERE time > 0s AND time < 60s GROUP BY time(20s)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0, Value: 112.5}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 20 * Second, Value: -12.5}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Integral_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 5 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 10 * Second, Value: 0},\n\t\t\t{Name: \"cpu\", Time: 20 * Second, Value: -10},\n\t\t}}, nil\n\t}\n\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT integral(value) FROM cpu`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0, Value: 50}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Integral_Duplicate_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 5 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 5 * Second, Value: 30},\n\t\t\t{Name: \"cpu\", Time: 10 * Second, Value: 40},\n\t\t}}, nil\n\t}\n\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT integral(value) FROM cpu`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0, Value: 250}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_Integral_Duplicate_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 5 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 5 * Second, Value: 30},\n\t\t\t{Name: \"cpu\", Time: 10 * Second, Value: 40},\n\t\t}}, nil\n\t}\n\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT integral(value, 2s) FROM cpu`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0, Value: 125}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_MovingAverage_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 12 * Second, Value: 3},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT moving_average(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 4 * Second, Value: 15, Aggregated: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 8 * Second, Value: 14.5, Aggregated: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 12 * Second, Value: 11, Aggregated: 2}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_MovingAverage_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 12 * Second, Value: 3},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT moving_average(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 4 * Second, Value: 15, Aggregated: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 8 * Second, Value: 14.5, Aggregated: 2}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 12 * Second, Value: 11, Aggregated: 2}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_CumulativeSum_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 12 * Second, Value: 3},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 20}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 4 * Second, Value: 30}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 8 * Second, Value: 49}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 12 * Second, Value: 52}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_CumulativeSum_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 8 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 12 * Second, Value: 3},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0 * Second, Value: 20}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 4 * Second, Value: 30}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 8 * Second, Value: 49}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 12 * Second, Value: 52}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_CumulativeSum_Duplicate_Float(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 3},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 20}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 0 * Second, Value: 39}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 4 * Second, Value: 49}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 4 * Second, Value: 52}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_CumulativeSum_Duplicate_Integer(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &IntegerIterator{Points: []influxql.IntegerPoint{\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 20},\n\t\t\t{Name: \"cpu\", Time: 0 * Second, Value: 19},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 4 * Second, Value: 3},\n\t\t}}, nil\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0 * Second, Value: 20}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 0 * Second, Value: 39}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 4 * Second, Value: 49}},\n\t\t{&influxql.IntegerPoint{Name: \"cpu\", Time: 4 * Second, Value: 52}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_HoltWinters_GroupBy_Agg(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn influxql.NewCallIterator(&FloatIterator{Points: []influxql.FloatPoint{\n\t\t\t{Name: \"cpu\", Time: 10 * Second, Value: 4},\n\t\t\t{Name: \"cpu\", Time: 11 * Second, Value: 6},\n\n\t\t\t{Name: \"cpu\", Time: 12 * Second, Value: 9},\n\t\t\t{Name: \"cpu\", Time: 13 * Second, Value: 11},\n\n\t\t\t{Name: \"cpu\", Time: 14 * Second, Value: 5},\n\t\t\t{Name: \"cpu\", Time: 15 * Second, Value: 7},\n\n\t\t\t{Name: \"cpu\", Time: 16 * Second, Value: 10},\n\t\t\t{Name: \"cpu\", Time: 17 * Second, Value: 12},\n\n\t\t\t{Name: \"cpu\", Time: 18 * Second, Value: 6},\n\t\t\t{Name: \"cpu\", Time: 19 * Second, Value: 8},\n\t\t}}, opt)\n\t}\n\n\t// Execute selection.\n\titrs, err := influxql.Select(MustParseSelectStatement(`SELECT holt_winters(mean(value), 2, 2) FROM cpu WHERE time >= '1970-01-01T00:00:10Z' AND time < '1970-01-01T00:00:20Z' GROUP BY time(2s)`), &ic, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if a, err := Iterators(itrs).ReadAll(); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if !deep.Equal(a, [][]influxql.Point{\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 20 * Second, Value: 11.960623419918432}},\n\t\t{&influxql.FloatPoint{Name: \"cpu\", Time: 22 * Second, Value: 7.953140268154609}},\n\t}) {\n\t\tt.Fatalf(\"unexpected points: %s\", spew.Sdump(a))\n\t}\n}\n\nfunc TestSelect_UnsupportedCall(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &FloatIterator{}, nil\n\t}\n\n\t_, err := influxql.Select(MustParseSelectStatement(`SELECT foobar(value) FROM cpu`), &ic, nil)\n\tif err == nil || err.Error() != \"unsupported call: foobar\" {\n\t\tt.Errorf(\"unexpected error: %s\", err)\n\t}\n}\n\nfunc TestSelect_InvalidQueries(t *testing.T) {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tt.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\treturn &FloatIterator{}, nil\n\t}\n\n\ttests := []struct {\n\t\tq   string\n\t\terr string\n\t}{\n\t\t{\n\t\t\tq:   `SELECT foobar(value) FROM cpu`,\n\t\t\terr: `unsupported call: foobar`,\n\t\t},\n\t\t{\n\t\t\tq:   `SELECT 'value' FROM cpu`,\n\t\t\terr: `invalid expression type: *influxql.StringLiteral`,\n\t\t},\n\t\t{\n\t\t\tq:   `SELECT 'value', value FROM cpu`,\n\t\t\terr: `invalid expression type: *influxql.StringLiteral`,\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\titrs, err := influxql.Select(MustParseSelectStatement(tt.q), &ic, nil)\n\t\tif err == nil || err.Error() != tt.err {\n\t\t\tt.Errorf(\"%d. expected error '%s', got '%s'\", i, tt.err, err)\n\t\t}\n\t\tinfluxql.Iterators(itrs).Close()\n\t}\n}\n\nfunc BenchmarkSelect_Raw_1K(b *testing.B)   { benchmarkSelectRaw(b, 1000) }\nfunc BenchmarkSelect_Raw_100K(b *testing.B) { benchmarkSelectRaw(b, 1000000) }\n\nfunc benchmarkSelectRaw(b *testing.B, pointN int) {\n\tbenchmarkSelect(b, MustParseSelectStatement(`SELECT fval FROM cpu`), NewRawBenchmarkIteratorCreator(pointN))\n}\n\nfunc benchmarkSelect(b *testing.B, stmt *influxql.SelectStatement, ic influxql.IteratorCreator) {\n\tb.ReportAllocs()\n\n\tfor i := 0; i < b.N; i++ {\n\t\titrs, err := influxql.Select(stmt, ic, nil)\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\tinfluxql.DrainIterators(itrs)\n\t}\n}\n\n// NewRawBenchmarkIteratorCreator returns a new mock iterator creator with generated fields.\nfunc NewRawBenchmarkIteratorCreator(pointN int) *IteratorCreator {\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif opt.Expr != nil {\n\t\t\tpanic(\"unexpected expression\")\n\t\t}\n\n\t\tp := influxql.FloatPoint{\n\t\t\tName: \"cpu\",\n\t\t\tAux:  make([]interface{}, len(opt.Aux)),\n\t\t}\n\n\t\tfor i := range opt.Aux {\n\t\t\tswitch opt.Aux[i].Val {\n\t\t\tcase \"fval\":\n\t\t\t\tp.Aux[i] = float64(100)\n\t\t\tdefault:\n\t\t\t\tpanic(\"unknown iterator expr: \" + opt.Expr.String())\n\t\t\t}\n\t\t}\n\n\t\treturn &FloatPointGenerator{N: pointN, Fn: func(i int) *influxql.FloatPoint {\n\t\t\tp.Time = int64(time.Duration(i) * (10 * time.Second))\n\t\t\treturn &p\n\t\t}}, nil\n\t}\n\treturn &ic\n}\n\nfunc benchmarkSelectDedupe(b *testing.B, seriesN, pointsPerSeries int) {\n\tstmt := MustParseSelectStatement(`SELECT sval::string FROM cpu`)\n\tstmt.Dedupe = true\n\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif opt.Expr != nil {\n\t\t\tpanic(\"unexpected expression\")\n\t\t}\n\n\t\tp := influxql.FloatPoint{\n\t\t\tName: \"tags\",\n\t\t\tAux:  []interface{}{nil},\n\t\t}\n\n\t\treturn &FloatPointGenerator{N: seriesN * pointsPerSeries, Fn: func(i int) *influxql.FloatPoint {\n\t\t\tp.Aux[0] = fmt.Sprintf(\"server%d\", i%seriesN)\n\t\t\treturn &p\n\t\t}}, nil\n\t}\n\n\tb.ResetTimer()\n\tbenchmarkSelect(b, stmt, &ic)\n}\n\nfunc BenchmarkSelect_Dedupe_1K(b *testing.B) { benchmarkSelectDedupe(b, 1000, 100) }\n\nfunc benchmarkSelectTop(b *testing.B, seriesN, pointsPerSeries int) {\n\tstmt := MustParseSelectStatement(`SELECT top(sval, 10) FROM cpu`)\n\n\tvar ic IteratorCreator\n\tic.CreateIteratorFn = func(m *influxql.Measurement, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t\tif m.Name != \"cpu\" {\n\t\t\tb.Fatalf(\"unexpected source: %s\", m.Name)\n\t\t}\n\t\tif !reflect.DeepEqual(opt.Expr, MustParseExpr(`sval`)) {\n\t\t\tb.Fatalf(\"unexpected expr: %s\", spew.Sdump(opt.Expr))\n\t\t}\n\n\t\tp := influxql.FloatPoint{\n\t\t\tName: \"cpu\",\n\t\t}\n\n\t\treturn &FloatPointGenerator{N: seriesN * pointsPerSeries, Fn: func(i int) *influxql.FloatPoint {\n\t\t\tp.Value = float64(rand.Int63())\n\t\t\tp.Time = int64(time.Duration(i) * (10 * time.Second))\n\t\t\treturn &p\n\t\t}}, nil\n\t}\n\n\tb.ResetTimer()\n\tbenchmarkSelect(b, stmt, &ic)\n}\n\nfunc BenchmarkSelect_Top_1K(b *testing.B) { benchmarkSelectTop(b, 1000, 1000) }\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/statement_rewriter.go",
    "content": "package influxql\n\nimport \"errors\"\n\n// RewriteStatement rewrites stmt into a new statement, if applicable.\nfunc RewriteStatement(stmt Statement) (Statement, error) {\n\tswitch stmt := stmt.(type) {\n\tcase *ShowFieldKeysStatement:\n\t\treturn rewriteShowFieldKeysStatement(stmt)\n\tcase *ShowMeasurementsStatement:\n\t\treturn rewriteShowMeasurementsStatement(stmt)\n\tcase *ShowSeriesStatement:\n\t\treturn rewriteShowSeriesStatement(stmt)\n\tcase *ShowTagKeysStatement:\n\t\treturn rewriteShowTagKeysStatement(stmt)\n\tcase *ShowTagValuesStatement:\n\t\treturn rewriteShowTagValuesStatement(stmt)\n\tdefault:\n\t\treturn stmt, nil\n\t}\n}\n\nfunc rewriteShowFieldKeysStatement(stmt *ShowFieldKeysStatement) (Statement, error) {\n\treturn &SelectStatement{\n\t\tFields: Fields([]*Field{\n\t\t\t{Expr: &VarRef{Val: \"fieldKey\"}},\n\t\t\t{Expr: &VarRef{Val: \"fieldType\"}},\n\t\t}),\n\t\tSources:    rewriteSources(stmt.Sources, \"_fieldKeys\", stmt.Database),\n\t\tCondition:  rewriteSourcesCondition(stmt.Sources, nil),\n\t\tOffset:     stmt.Offset,\n\t\tLimit:      stmt.Limit,\n\t\tSortFields: stmt.SortFields,\n\t\tOmitTime:   true,\n\t\tDedupe:     true,\n\t\tIsRawQuery: true,\n\t}, nil\n}\n\nfunc rewriteShowMeasurementsStatement(stmt *ShowMeasurementsStatement) (Statement, error) {\n\t// Check for time in WHERE clause (not supported).\n\tif HasTimeExpr(stmt.Condition) {\n\t\treturn nil, errors.New(\"SHOW MEASUREMENTS doesn't support time in WHERE clause\")\n\t}\n\n\tcondition := stmt.Condition\n\tif stmt.Source != nil {\n\t\tcondition = rewriteSourcesCondition(Sources([]Source{stmt.Source}), stmt.Condition)\n\t}\n\treturn &ShowMeasurementsStatement{\n\t\tDatabase:   stmt.Database,\n\t\tCondition:  condition,\n\t\tLimit:      stmt.Limit,\n\t\tOffset:     stmt.Offset,\n\t\tSortFields: stmt.SortFields,\n\t}, nil\n}\n\nfunc rewriteShowSeriesStatement(stmt *ShowSeriesStatement) (Statement, error) {\n\t// Check for time in WHERE clause (not supported).\n\tif HasTimeExpr(stmt.Condition) {\n\t\treturn nil, errors.New(\"SHOW SERIES doesn't support time in WHERE clause\")\n\t}\n\n\treturn &SelectStatement{\n\t\tFields: []*Field{\n\t\t\t{Expr: &VarRef{Val: \"key\"}},\n\t\t},\n\t\tSources:    rewriteSources(stmt.Sources, \"_series\", stmt.Database),\n\t\tCondition:  rewriteSourcesCondition(stmt.Sources, stmt.Condition),\n\t\tOffset:     stmt.Offset,\n\t\tLimit:      stmt.Limit,\n\t\tSortFields: stmt.SortFields,\n\t\tOmitTime:   true,\n\t\tDedupe:     true,\n\t\tIsRawQuery: true,\n\t}, nil\n}\n\nfunc rewriteShowTagValuesStatement(stmt *ShowTagValuesStatement) (Statement, error) {\n\t// Check for time in WHERE clause (not supported).\n\tif HasTimeExpr(stmt.Condition) {\n\t\treturn nil, errors.New(\"SHOW TAG VALUES doesn't support time in WHERE clause\")\n\t}\n\n\tcondition := stmt.Condition\n\tvar expr Expr\n\tif list, ok := stmt.TagKeyExpr.(*ListLiteral); ok {\n\t\tfor _, tagKey := range list.Vals {\n\t\t\ttagExpr := &BinaryExpr{\n\t\t\t\tOp:  EQ,\n\t\t\t\tLHS: &VarRef{Val: \"_tagKey\"},\n\t\t\t\tRHS: &StringLiteral{Val: tagKey},\n\t\t\t}\n\n\t\t\tif expr != nil {\n\t\t\t\texpr = &BinaryExpr{\n\t\t\t\t\tOp:  OR,\n\t\t\t\t\tLHS: expr,\n\t\t\t\t\tRHS: tagExpr,\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\texpr = tagExpr\n\t\t\t}\n\t\t}\n\t} else {\n\t\texpr = &BinaryExpr{\n\t\t\tOp:  stmt.Op,\n\t\t\tLHS: &VarRef{Val: \"_tagKey\"},\n\t\t\tRHS: stmt.TagKeyExpr,\n\t\t}\n\t}\n\n\t// Set condition or \"AND\" together.\n\tif condition == nil {\n\t\tcondition = expr\n\t} else {\n\t\tcondition = &BinaryExpr{\n\t\t\tOp:  AND,\n\t\t\tLHS: &ParenExpr{Expr: condition},\n\t\t\tRHS: &ParenExpr{Expr: expr},\n\t\t}\n\t}\n\tcondition = rewriteSourcesCondition(stmt.Sources, condition)\n\n\treturn &ShowTagValuesStatement{\n\t\tDatabase:   stmt.Database,\n\t\tOp:         stmt.Op,\n\t\tTagKeyExpr: stmt.TagKeyExpr,\n\t\tCondition:  condition,\n\t\tSortFields: stmt.SortFields,\n\t\tLimit:      stmt.Limit,\n\t\tOffset:     stmt.Offset,\n\t}, nil\n}\n\nfunc rewriteShowTagKeysStatement(stmt *ShowTagKeysStatement) (Statement, error) {\n\t// Check for time in WHERE clause (not supported).\n\tif HasTimeExpr(stmt.Condition) {\n\t\treturn nil, errors.New(\"SHOW TAG KEYS doesn't support time in WHERE clause\")\n\t}\n\n\treturn &SelectStatement{\n\t\tFields: []*Field{\n\t\t\t{Expr: &VarRef{Val: \"tagKey\"}},\n\t\t},\n\t\tSources:    rewriteSources(stmt.Sources, \"_tagKeys\", stmt.Database),\n\t\tCondition:  rewriteSourcesCondition(stmt.Sources, stmt.Condition),\n\t\tOffset:     stmt.Offset,\n\t\tLimit:      stmt.Limit,\n\t\tSortFields: stmt.SortFields,\n\t\tOmitTime:   true,\n\t\tDedupe:     true,\n\t\tIsRawQuery: true,\n\t}, nil\n}\n\n// rewriteSources rewrites sources with previous database and retention policy\nfunc rewriteSources(sources Sources, measurementName, defaultDatabase string) Sources {\n\tnewSources := Sources{}\n\tfor _, src := range sources {\n\t\tif src == nil {\n\t\t\tcontinue\n\t\t}\n\t\tmm := src.(*Measurement)\n\t\tdatabase := mm.Database\n\t\tif database == \"\" {\n\t\t\tdatabase = defaultDatabase\n\t\t}\n\t\tnewSources = append(newSources,\n\t\t\t&Measurement{\n\t\t\t\tDatabase:        database,\n\t\t\t\tRetentionPolicy: mm.RetentionPolicy,\n\t\t\t\tName:            measurementName,\n\t\t\t})\n\t}\n\tif len(newSources) <= 0 {\n\t\treturn append(newSources, &Measurement{\n\t\t\tDatabase: defaultDatabase,\n\t\t\tName:     measurementName,\n\t\t})\n\t}\n\treturn newSources\n}\n\n// rewriteSourcesCondition rewrites sources into `name` expressions.\n// Merges with cond and returns a new condition.\nfunc rewriteSourcesCondition(sources Sources, cond Expr) Expr {\n\tif len(sources) == 0 {\n\t\treturn cond\n\t}\n\n\t// Generate an OR'd set of filters on source name.\n\tvar scond Expr\n\tfor _, source := range sources {\n\t\tmm := source.(*Measurement)\n\n\t\t// Generate a filtering expression on the measurement name.\n\t\tvar expr Expr\n\t\tif mm.Regex != nil {\n\t\t\texpr = &BinaryExpr{\n\t\t\t\tOp:  EQREGEX,\n\t\t\t\tLHS: &VarRef{Val: \"_name\"},\n\t\t\t\tRHS: &RegexLiteral{Val: mm.Regex.Val},\n\t\t\t}\n\t\t} else if mm.Name != \"\" {\n\t\t\texpr = &BinaryExpr{\n\t\t\t\tOp:  EQ,\n\t\t\t\tLHS: &VarRef{Val: \"_name\"},\n\t\t\t\tRHS: &StringLiteral{Val: mm.Name},\n\t\t\t}\n\t\t}\n\n\t\tif scond == nil {\n\t\t\tscond = expr\n\t\t} else {\n\t\t\tscond = &BinaryExpr{\n\t\t\t\tOp:  OR,\n\t\t\t\tLHS: scond,\n\t\t\t\tRHS: expr,\n\t\t\t}\n\t\t}\n\t}\n\n\tif cond != nil {\n\t\treturn &BinaryExpr{\n\t\t\tOp:  AND,\n\t\t\tLHS: &ParenExpr{Expr: scond},\n\t\t\tRHS: &ParenExpr{Expr: cond},\n\t\t}\n\t}\n\treturn scond\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/statement_rewriter_test.go",
    "content": "package influxql_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n)\n\nfunc TestRewriteStatement(t *testing.T) {\n\ttests := []struct {\n\t\tstmt string\n\t\ts    string\n\t}{\n\t\t{\n\t\t\tstmt: `SHOW FIELD KEYS`,\n\t\t\ts:    `SELECT fieldKey, fieldType FROM _fieldKeys`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW FIELD KEYS ON db0`,\n\t\t\ts:    `SELECT fieldKey, fieldType FROM db0.._fieldKeys`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW FIELD KEYS FROM cpu`,\n\t\t\ts:    `SELECT fieldKey, fieldType FROM _fieldKeys WHERE _name = 'cpu'`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW FIELD KEYS ON db0 FROM cpu`,\n\t\t\ts:    `SELECT fieldKey, fieldType FROM db0.._fieldKeys WHERE _name = 'cpu'`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW FIELD KEYS FROM /c.*/`,\n\t\t\ts:    `SELECT fieldKey, fieldType FROM _fieldKeys WHERE _name =~ /c.*/`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW FIELD KEYS ON db0 FROM /c.*/`,\n\t\t\ts:    `SELECT fieldKey, fieldType FROM db0.._fieldKeys WHERE _name =~ /c.*/`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW FIELD KEYS FROM mydb.myrp2.cpu`,\n\t\t\ts:    `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name = 'cpu'`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW FIELD KEYS ON db0 FROM mydb.myrp2.cpu`,\n\t\t\ts:    `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name = 'cpu'`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW FIELD KEYS FROM mydb.myrp2./c.*/`,\n\t\t\ts:    `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name =~ /c.*/`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW FIELD KEYS ON db0 FROM mydb.myrp2./c.*/`,\n\t\t\ts:    `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name =~ /c.*/`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW SERIES`,\n\t\t\ts:    `SELECT \"key\" FROM _series`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW SERIES ON db0`,\n\t\t\ts:    `SELECT \"key\" FROM db0.._series`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW SERIES FROM cpu`,\n\t\t\ts:    `SELECT \"key\" FROM _series WHERE _name = 'cpu'`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW SERIES ON db0 FROM cpu`,\n\t\t\ts:    `SELECT \"key\" FROM db0.._series WHERE _name = 'cpu'`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW SERIES FROM mydb.myrp1.cpu`,\n\t\t\ts:    `SELECT \"key\" FROM mydb.myrp1._series WHERE _name = 'cpu'`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW SERIES ON db0 FROM mydb.myrp1.cpu`,\n\t\t\ts:    `SELECT \"key\" FROM mydb.myrp1._series WHERE _name = 'cpu'`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW SERIES FROM mydb.myrp1./c.*/`,\n\t\t\ts:    `SELECT \"key\" FROM mydb.myrp1._series WHERE _name =~ /c.*/`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW SERIES ON db0 FROM mydb.myrp1./c.*/`,\n\t\t\ts:    `SELECT \"key\" FROM mydb.myrp1._series WHERE _name =~ /c.*/`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW TAG KEYS`,\n\t\t\ts:    `SELECT tagKey FROM _tagKeys`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW TAG KEYS ON db0`,\n\t\t\ts:    `SELECT tagKey FROM db0.._tagKeys`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW TAG KEYS FROM cpu`,\n\t\t\ts:    `SELECT tagKey FROM _tagKeys WHERE _name = 'cpu'`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW TAG KEYS ON db0 FROM cpu`,\n\t\t\ts:    `SELECT tagKey FROM db0.._tagKeys WHERE _name = 'cpu'`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW TAG KEYS FROM /c.*/`,\n\t\t\ts:    `SELECT tagKey FROM _tagKeys WHERE _name =~ /c.*/`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW TAG KEYS ON db0 FROM /c.*/`,\n\t\t\ts:    `SELECT tagKey FROM db0.._tagKeys WHERE _name =~ /c.*/`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW TAG KEYS FROM cpu WHERE region = 'uswest'`,\n\t\t\ts:    `SELECT tagKey FROM _tagKeys WHERE (_name = 'cpu') AND (region = 'uswest')`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW TAG KEYS ON db0 FROM cpu WHERE region = 'uswest'`,\n\t\t\ts:    `SELECT tagKey FROM db0.._tagKeys WHERE (_name = 'cpu') AND (region = 'uswest')`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW TAG KEYS FROM mydb.myrp1.cpu`,\n\t\t\ts:    `SELECT tagKey FROM mydb.myrp1._tagKeys WHERE _name = 'cpu'`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1.cpu`,\n\t\t\ts:    `SELECT tagKey FROM mydb.myrp1._tagKeys WHERE _name = 'cpu'`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW TAG KEYS FROM mydb.myrp1./c.*/`,\n\t\t\ts:    `SELECT tagKey FROM mydb.myrp1._tagKeys WHERE _name =~ /c.*/`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1./c.*/`,\n\t\t\ts:    `SELECT tagKey FROM mydb.myrp1._tagKeys WHERE _name =~ /c.*/`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW TAG KEYS FROM mydb.myrp1.cpu WHERE region = 'uswest'`,\n\t\t\ts:    `SELECT tagKey FROM mydb.myrp1._tagKeys WHERE (_name = 'cpu') AND (region = 'uswest')`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1.cpu WHERE region = 'uswest'`,\n\t\t\ts:    `SELECT tagKey FROM mydb.myrp1._tagKeys WHERE (_name = 'cpu') AND (region = 'uswest')`,\n\t\t},\n\t\t{\n\t\t\tstmt: `SELECT value FROM cpu`,\n\t\t\ts:    `SELECT value FROM cpu`,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tstmt, err := influxql.ParseStatement(test.stmt)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"error parsing statement: %s\", err)\n\t\t} else {\n\t\t\tstmt, err = influxql.RewriteStatement(stmt)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"error rewriting statement: %s\", err)\n\t\t\t} else if s := stmt.String(); s != test.s {\n\t\t\t\tt.Errorf(\"error rendering string. expected %s, actual: %s\", test.s, s)\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/subquery.go",
    "content": "package influxql\n\ntype subqueryBuilder struct {\n\tic   IteratorCreator\n\tstmt *SelectStatement\n}\n\n// buildAuxIterator constructs an auxiliary Iterator from a subquery.\nfunc (b *subqueryBuilder) buildAuxIterator(opt IteratorOptions) (Iterator, error) {\n\t// Retrieve a list of fields needed for conditions.\n\tauxFields := opt.Aux\n\tconds := ExprNames(opt.Condition)\n\tif len(conds) > 0 {\n\t\tauxFields = make([]VarRef, len(opt.Aux)+len(conds))\n\t\tcopy(auxFields, opt.Aux)\n\t\tcopy(auxFields[len(opt.Aux):], conds)\n\t}\n\n\t// Map the desired auxiliary fields from the substatement.\n\tindexes := b.mapAuxFields(auxFields)\n\tsubOpt, err := newIteratorOptionsSubstatement(b.stmt, opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsubOpt.Aux = auxFields\n\n\titrs, err := buildIterators(b.stmt, b.ic, subOpt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Construct the iterators for the subquery.\n\tinput := NewIteratorMapper(itrs, nil, indexes, subOpt)\n\t// If there is a condition, filter it now.\n\tif opt.Condition != nil {\n\t\tinput = NewFilterIterator(input, opt.Condition, subOpt)\n\t}\n\treturn input, nil\n}\n\nfunc (b *subqueryBuilder) mapAuxFields(auxFields []VarRef) []IteratorMap {\n\tindexes := make([]IteratorMap, len(auxFields))\n\tfor i, name := range auxFields {\n\t\tm := b.mapAuxField(&name)\n\t\tif m == nil {\n\t\t\t// If this field doesn't map to anything, use the NullMap so it\n\t\t\t// shows up as null.\n\t\t\tm = NullMap{}\n\t\t}\n\t\tindexes[i] = m\n\t}\n\treturn indexes\n}\n\nfunc (b *subqueryBuilder) mapAuxField(name *VarRef) IteratorMap {\n\toffset := 0\n\tfor i, f := range b.stmt.Fields {\n\t\tif f.Name() == name.Val {\n\t\t\treturn FieldMap(i + offset)\n\t\t} else if call, ok := f.Expr.(*Call); ok && (call.Name == \"top\" || call.Name == \"bottom\") {\n\t\t\t// We may match one of the arguments in \"top\" or \"bottom\".\n\t\t\tif len(call.Args) > 2 {\n\t\t\t\tfor j, arg := range call.Args[1 : len(call.Args)-1] {\n\t\t\t\t\tif arg, ok := arg.(*VarRef); ok && arg.Val == name.Val {\n\t\t\t\t\t\treturn FieldMap(i + j + 1)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Increment the offset so we have the correct index for later fields.\n\t\t\t\toffset += len(call.Args) - 2\n\t\t\t}\n\t\t}\n\t}\n\n\t// Unable to find this in the list of fields.\n\t// Look within the dimensions and create a field if we find it.\n\tfor _, d := range b.stmt.Dimensions {\n\t\tif d, ok := d.Expr.(*VarRef); ok && name.Val == d.Val {\n\t\t\treturn TagMap(d.Val)\n\t\t}\n\t}\n\n\t// Unable to find any matches.\n\treturn nil\n}\n\nfunc (b *subqueryBuilder) buildVarRefIterator(expr *VarRef, opt IteratorOptions) (Iterator, error) {\n\t// Look for the field or tag that is driving this query.\n\tdriver := b.mapAuxField(expr)\n\tif driver == nil {\n\t\t// Exit immediately if there is no driver. If there is no driver, there\n\t\t// are no results. Period.\n\t\treturn nil, nil\n\t}\n\n\t// Determine necessary auxiliary fields for this query.\n\tauxFields := opt.Aux\n\tconds := ExprNames(opt.Condition)\n\tif len(conds) > 0 && len(opt.Aux) > 0 {\n\t\t// Combine the auxiliary fields requested with the ones in the condition.\n\t\tauxFields = make([]VarRef, len(opt.Aux)+len(conds))\n\t\tcopy(auxFields, opt.Aux)\n\t\tcopy(auxFields[len(opt.Aux):], conds)\n\t} else if len(conds) > 0 {\n\t\t// Set the auxiliary fields to what is in the condition since we have\n\t\t// requested none in the query itself.\n\t\tauxFields = conds\n\t}\n\n\t// Map the auxiliary fields to their index in the subquery.\n\tindexes := b.mapAuxFields(auxFields)\n\tsubOpt, err := newIteratorOptionsSubstatement(b.stmt, opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsubOpt.Aux = auxFields\n\n\titrs, err := buildIterators(b.stmt, b.ic, subOpt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Construct the iterators for the subquery.\n\tinput := NewIteratorMapper(itrs, driver, indexes, subOpt)\n\t// If there is a condition, filter it now.\n\tif opt.Condition != nil {\n\t\tinput = NewFilterIterator(input, opt.Condition, subOpt)\n\t}\n\treturn input, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/task_manager.go",
    "content": "package influxql\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/uber-go/zap\"\n)\n\nconst (\n\t// DefaultQueryTimeout is the default timeout for executing a query.\n\t// A value of zero will have no query timeout.\n\tDefaultQueryTimeout = time.Duration(0)\n)\n\n// TaskManager takes care of all aspects related to managing running queries.\ntype TaskManager struct {\n\t// Query execution timeout.\n\tQueryTimeout time.Duration\n\n\t// Log queries if they are slower than this time.\n\t// If zero, slow queries will never be logged.\n\tLogQueriesAfter time.Duration\n\n\t// Maximum number of concurrent queries.\n\tMaxConcurrentQueries int\n\n\t// Logger to use for all logging.\n\t// Defaults to discarding all log output.\n\tLogger zap.Logger\n\n\t// Used for managing and tracking running queries.\n\tqueries  map[uint64]*QueryTask\n\tnextID   uint64\n\tmu       sync.RWMutex\n\tshutdown bool\n}\n\n// NewTaskManager creates a new TaskManager.\nfunc NewTaskManager() *TaskManager {\n\treturn &TaskManager{\n\t\tQueryTimeout: DefaultQueryTimeout,\n\t\tLogger:       zap.New(zap.NullEncoder()),\n\t\tqueries:      make(map[uint64]*QueryTask),\n\t\tnextID:       1,\n\t}\n}\n\n// ExecuteStatement executes a statement containing one of the task management queries.\nfunc (t *TaskManager) ExecuteStatement(stmt Statement, ctx ExecutionContext) error {\n\tswitch stmt := stmt.(type) {\n\tcase *ShowQueriesStatement:\n\t\trows, err := t.executeShowQueriesStatement(stmt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tctx.Results <- &Result{\n\t\t\tStatementID: ctx.StatementID,\n\t\t\tSeries:      rows,\n\t\t}\n\tcase *KillQueryStatement:\n\t\tvar messages []*Message\n\t\tif ctx.ReadOnly {\n\t\t\tmessages = append(messages, ReadOnlyWarning(stmt.String()))\n\t\t}\n\n\t\tif err := t.executeKillQueryStatement(stmt); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Results <- &Result{\n\t\t\tStatementID: ctx.StatementID,\n\t\t\tMessages:    messages,\n\t\t}\n\tdefault:\n\t\treturn ErrInvalidQuery\n\t}\n\treturn nil\n}\n\nfunc (t *TaskManager) executeKillQueryStatement(stmt *KillQueryStatement) error {\n\treturn t.KillQuery(stmt.QueryID)\n}\n\nfunc (t *TaskManager) executeShowQueriesStatement(q *ShowQueriesStatement) (models.Rows, error) {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\tnow := time.Now()\n\n\tvalues := make([][]interface{}, 0, len(t.queries))\n\tfor id, qi := range t.queries {\n\t\td := now.Sub(qi.startTime)\n\n\t\tswitch {\n\t\tcase d >= time.Second:\n\t\t\td = d - (d % time.Second)\n\t\tcase d >= time.Millisecond:\n\t\t\td = d - (d % time.Millisecond)\n\t\tcase d >= time.Microsecond:\n\t\t\td = d - (d % time.Microsecond)\n\t\t}\n\n\t\tvalues = append(values, []interface{}{id, qi.query, qi.database, d.String()})\n\t}\n\n\treturn []*models.Row{{\n\t\tColumns: []string{\"qid\", \"query\", \"database\", \"duration\"},\n\t\tValues:  values,\n\t}}, nil\n}\n\nfunc (t *TaskManager) query(qid uint64) (*QueryTask, bool) {\n\tt.mu.RLock()\n\tquery, ok := t.queries[qid]\n\tt.mu.RUnlock()\n\treturn query, ok\n}\n\n// AttachQuery attaches a running query to be managed by the TaskManager.\n// Returns the query id of the newly attached query or an error if it was\n// unable to assign a query id or attach the query to the TaskManager.\n// This function also returns a channel that will be closed when this\n// query finishes running.\n//\n// After a query finishes running, the system is free to reuse a query id.\nfunc (t *TaskManager) AttachQuery(q *Query, database string, interrupt <-chan struct{}) (uint64, *QueryTask, error) {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tif t.shutdown {\n\t\treturn 0, nil, ErrQueryEngineShutdown\n\t}\n\n\tif t.MaxConcurrentQueries > 0 && len(t.queries) >= t.MaxConcurrentQueries {\n\t\treturn 0, nil, ErrMaxConcurrentQueriesLimitExceeded(len(t.queries), t.MaxConcurrentQueries)\n\t}\n\n\tqid := t.nextID\n\tquery := &QueryTask{\n\t\tquery:     q.String(),\n\t\tdatabase:  database,\n\t\tstartTime: time.Now(),\n\t\tclosing:   make(chan struct{}),\n\t\tmonitorCh: make(chan error),\n\t}\n\tt.queries[qid] = query\n\n\tgo t.waitForQuery(qid, query.closing, interrupt, query.monitorCh)\n\tif t.LogQueriesAfter != 0 {\n\t\tgo query.monitor(func(closing <-chan struct{}) error {\n\t\t\ttimer := time.NewTimer(t.LogQueriesAfter)\n\t\t\tdefer timer.Stop()\n\n\t\t\tselect {\n\t\t\tcase <-timer.C:\n\t\t\t\tt.Logger.Warn(fmt.Sprintf(\"Detected slow query: %s (qid: %d, database: %s, threshold: %s)\",\n\t\t\t\t\tquery.query, qid, query.database, t.LogQueriesAfter))\n\t\t\tcase <-closing:\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\tt.nextID++\n\treturn qid, query, nil\n}\n\n// KillQuery stops and removes a query from the TaskManager.\n// This method can be used to forcefully terminate a running query.\nfunc (t *TaskManager) KillQuery(qid uint64) error {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tquery, ok := t.queries[qid]\n\tif !ok {\n\t\treturn fmt.Errorf(\"no such query id: %d\", qid)\n\t}\n\n\tclose(query.closing)\n\tdelete(t.queries, qid)\n\treturn nil\n}\n\n// QueryInfo represents the information for a query.\ntype QueryInfo struct {\n\tID       uint64        `json:\"id\"`\n\tQuery    string        `json:\"query\"`\n\tDatabase string        `json:\"database\"`\n\tDuration time.Duration `json:\"duration\"`\n}\n\n// Queries returns a list of all running queries with information about them.\nfunc (t *TaskManager) Queries() []QueryInfo {\n\tt.mu.RLock()\n\tdefer t.mu.RUnlock()\n\n\tnow := time.Now()\n\tqueries := make([]QueryInfo, 0, len(t.queries))\n\tfor id, qi := range t.queries {\n\t\tqueries = append(queries, QueryInfo{\n\t\t\tID:       id,\n\t\t\tQuery:    qi.query,\n\t\t\tDatabase: qi.database,\n\t\t\tDuration: now.Sub(qi.startTime),\n\t\t})\n\t}\n\treturn queries\n}\n\nfunc (t *TaskManager) waitForQuery(qid uint64, interrupt <-chan struct{}, closing <-chan struct{}, monitorCh <-chan error) {\n\tvar timerCh <-chan time.Time\n\tif t.QueryTimeout != 0 {\n\t\ttimer := time.NewTimer(t.QueryTimeout)\n\t\ttimerCh = timer.C\n\t\tdefer timer.Stop()\n\t}\n\n\tselect {\n\tcase <-closing:\n\t\tquery, ok := t.query(qid)\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tquery.setError(ErrQueryInterrupted)\n\tcase err := <-monitorCh:\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tquery, ok := t.query(qid)\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tquery.setError(err)\n\tcase <-timerCh:\n\t\tquery, ok := t.query(qid)\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tquery.setError(ErrQueryTimeoutLimitExceeded)\n\tcase <-interrupt:\n\t\t// Query was manually closed so exit the select.\n\t\treturn\n\t}\n\tt.KillQuery(qid)\n}\n\n// Close kills all running queries and prevents new queries from being attached.\nfunc (t *TaskManager) Close() error {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tt.shutdown = true\n\tfor _, query := range t.queries {\n\t\tquery.setError(ErrQueryEngineShutdown)\n\t\tclose(query.closing)\n\t}\n\tt.queries = nil\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/tmpldata",
    "content": "[\n\t{\n\t\t\"Name\":\"Float\",\n\t\t\"name\":\"float\",\n\t\t\"Type\":\"float64\",\n\t\t\"Nil\":\"0\",\n\t\t\"Zero\":\"float64(0)\"\n\t},\n\t{\n\t\t\"Name\":\"Integer\",\n\t\t\"name\":\"integer\",\n\t\t\"Type\":\"int64\",\n\t\t\"Nil\":\"0\",\n\t\t\"Zero\":\"int64(0)\"\n\t},\n\t{\n\t\t\"Name\":\"String\",\n\t\t\"name\":\"string\",\n\t\t\"Type\":\"string\",\n\t\t\"Nil\":\"\\\"\\\"\",\n\t\t\"Zero\":\"\\\"\\\"\"\n\t},\n\t{\n\t\t\"Name\":\"Boolean\",\n\t\t\"name\":\"boolean\",\n\t\t\"Type\":\"bool\",\n\t\t\"Nil\":\"false\",\n\t\t\"Zero\":\"false\"\n\t}\n]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/influxql/token.go",
    "content": "package influxql\n\nimport (\n\t\"strings\"\n)\n\n// Token is a lexical token of the InfluxQL language.\ntype Token int\n\n// These are a comprehensive list of InfluxQL language tokens.\nconst (\n\t// ILLEGAL Token, EOF, WS are Special InfluxQL tokens.\n\tILLEGAL Token = iota\n\tEOF\n\tWS\n\tCOMMENT\n\n\tliteralBeg\n\t// IDENT and the following are InfluxQL literal tokens.\n\tIDENT       // main\n\tBOUNDPARAM  // $param\n\tNUMBER      // 12345.67\n\tINTEGER     // 12345\n\tDURATIONVAL // 13h\n\tSTRING      // \"abc\"\n\tBADSTRING   // \"abc\n\tBADESCAPE   // \\q\n\tTRUE        // true\n\tFALSE       // false\n\tREGEX       // Regular expressions\n\tBADREGEX    // `.*\n\tliteralEnd\n\n\toperatorBeg\n\t// ADD and the following are InfluxQL Operators\n\tADD         // +\n\tSUB         // -\n\tMUL         // *\n\tDIV         // /\n\tMOD         // %\n\tBITWISE_AND // &\n\tBITWISE_OR  // |\n\tBITWISE_XOR // ^\n\n\tAND // AND\n\tOR  // OR\n\n\tEQ       // =\n\tNEQ      // !=\n\tEQREGEX  // =~\n\tNEQREGEX // !~\n\tLT       // <\n\tLTE      // <=\n\tGT       // >\n\tGTE      // >=\n\toperatorEnd\n\n\tLPAREN      // (\n\tRPAREN      // )\n\tCOMMA       // ,\n\tCOLON       // :\n\tDOUBLECOLON // ::\n\tSEMICOLON   // ;\n\tDOT         // .\n\n\tkeywordBeg\n\t// ALL and the following are InfluxQL Keywords\n\tALL\n\tALTER\n\tANY\n\tAS\n\tASC\n\tBEGIN\n\tBY\n\tCREATE\n\tCONTINUOUS\n\tDATABASE\n\tDATABASES\n\tDEFAULT\n\tDELETE\n\tDESC\n\tDESTINATIONS\n\tDIAGNOSTICS\n\tDISTINCT\n\tDROP\n\tDURATION\n\tEND\n\tEVERY\n\tEXPLAIN\n\tFIELD\n\tFOR\n\tFROM\n\tGRANT\n\tGRANTS\n\tGROUP\n\tGROUPS\n\tIN\n\tINF\n\tINSERT\n\tINTO\n\tKEY\n\tKEYS\n\tKILL\n\tLIMIT\n\tMEASUREMENT\n\tMEASUREMENTS\n\tNAME\n\tOFFSET\n\tON\n\tORDER\n\tPASSWORD\n\tPOLICY\n\tPOLICIES\n\tPRIVILEGES\n\tQUERIES\n\tQUERY\n\tREAD\n\tREPLICATION\n\tRESAMPLE\n\tRETENTION\n\tREVOKE\n\tSELECT\n\tSERIES\n\tSET\n\tSHOW\n\tSHARD\n\tSHARDS\n\tSLIMIT\n\tSOFFSET\n\tSTATS\n\tSUBSCRIPTION\n\tSUBSCRIPTIONS\n\tTAG\n\tTO\n\tUSER\n\tUSERS\n\tVALUES\n\tWHERE\n\tWITH\n\tWRITE\n\tkeywordEnd\n)\n\nvar tokens = [...]string{\n\tILLEGAL: \"ILLEGAL\",\n\tEOF:     \"EOF\",\n\tWS:      \"WS\",\n\n\tIDENT:       \"IDENT\",\n\tNUMBER:      \"NUMBER\",\n\tDURATIONVAL: \"DURATIONVAL\",\n\tSTRING:      \"STRING\",\n\tBADSTRING:   \"BADSTRING\",\n\tBADESCAPE:   \"BADESCAPE\",\n\tTRUE:        \"TRUE\",\n\tFALSE:       \"FALSE\",\n\tREGEX:       \"REGEX\",\n\n\tADD:         \"+\",\n\tSUB:         \"-\",\n\tMUL:         \"*\",\n\tDIV:         \"/\",\n\tMOD:         \"%\",\n\tBITWISE_AND: \"&\",\n\tBITWISE_OR:  \"|\",\n\tBITWISE_XOR: \"^\",\n\n\tAND: \"AND\",\n\tOR:  \"OR\",\n\n\tEQ:       \"=\",\n\tNEQ:      \"!=\",\n\tEQREGEX:  \"=~\",\n\tNEQREGEX: \"!~\",\n\tLT:       \"<\",\n\tLTE:      \"<=\",\n\tGT:       \">\",\n\tGTE:      \">=\",\n\n\tLPAREN:      \"(\",\n\tRPAREN:      \")\",\n\tCOMMA:       \",\",\n\tCOLON:       \":\",\n\tDOUBLECOLON: \"::\",\n\tSEMICOLON:   \";\",\n\tDOT:         \".\",\n\n\tALL:           \"ALL\",\n\tALTER:         \"ALTER\",\n\tANY:           \"ANY\",\n\tAS:            \"AS\",\n\tASC:           \"ASC\",\n\tBEGIN:         \"BEGIN\",\n\tBY:            \"BY\",\n\tCREATE:        \"CREATE\",\n\tCONTINUOUS:    \"CONTINUOUS\",\n\tDATABASE:      \"DATABASE\",\n\tDATABASES:     \"DATABASES\",\n\tDEFAULT:       \"DEFAULT\",\n\tDELETE:        \"DELETE\",\n\tDESC:          \"DESC\",\n\tDESTINATIONS:  \"DESTINATIONS\",\n\tDIAGNOSTICS:   \"DIAGNOSTICS\",\n\tDISTINCT:      \"DISTINCT\",\n\tDROP:          \"DROP\",\n\tDURATION:      \"DURATION\",\n\tEND:           \"END\",\n\tEVERY:         \"EVERY\",\n\tEXPLAIN:       \"EXPLAIN\",\n\tFIELD:         \"FIELD\",\n\tFOR:           \"FOR\",\n\tFROM:          \"FROM\",\n\tGRANT:         \"GRANT\",\n\tGRANTS:        \"GRANTS\",\n\tGROUP:         \"GROUP\",\n\tGROUPS:        \"GROUPS\",\n\tIN:            \"IN\",\n\tINF:           \"INF\",\n\tINSERT:        \"INSERT\",\n\tINTO:          \"INTO\",\n\tKEY:           \"KEY\",\n\tKEYS:          \"KEYS\",\n\tKILL:          \"KILL\",\n\tLIMIT:         \"LIMIT\",\n\tMEASUREMENT:   \"MEASUREMENT\",\n\tMEASUREMENTS:  \"MEASUREMENTS\",\n\tNAME:          \"NAME\",\n\tOFFSET:        \"OFFSET\",\n\tON:            \"ON\",\n\tORDER:         \"ORDER\",\n\tPASSWORD:      \"PASSWORD\",\n\tPOLICY:        \"POLICY\",\n\tPOLICIES:      \"POLICIES\",\n\tPRIVILEGES:    \"PRIVILEGES\",\n\tQUERIES:       \"QUERIES\",\n\tQUERY:         \"QUERY\",\n\tREAD:          \"READ\",\n\tREPLICATION:   \"REPLICATION\",\n\tRESAMPLE:      \"RESAMPLE\",\n\tRETENTION:     \"RETENTION\",\n\tREVOKE:        \"REVOKE\",\n\tSELECT:        \"SELECT\",\n\tSERIES:        \"SERIES\",\n\tSET:           \"SET\",\n\tSHOW:          \"SHOW\",\n\tSHARD:         \"SHARD\",\n\tSHARDS:        \"SHARDS\",\n\tSLIMIT:        \"SLIMIT\",\n\tSOFFSET:       \"SOFFSET\",\n\tSTATS:         \"STATS\",\n\tSUBSCRIPTION:  \"SUBSCRIPTION\",\n\tSUBSCRIPTIONS: \"SUBSCRIPTIONS\",\n\tTAG:           \"TAG\",\n\tTO:            \"TO\",\n\tUSER:          \"USER\",\n\tUSERS:         \"USERS\",\n\tVALUES:        \"VALUES\",\n\tWHERE:         \"WHERE\",\n\tWITH:          \"WITH\",\n\tWRITE:         \"WRITE\",\n}\n\nvar keywords map[string]Token\n\nfunc init() {\n\tkeywords = make(map[string]Token)\n\tfor tok := keywordBeg + 1; tok < keywordEnd; tok++ {\n\t\tkeywords[strings.ToLower(tokens[tok])] = tok\n\t}\n\tfor _, tok := range []Token{AND, OR} {\n\t\tkeywords[strings.ToLower(tokens[tok])] = tok\n\t}\n\tkeywords[\"true\"] = TRUE\n\tkeywords[\"false\"] = FALSE\n}\n\n// String returns the string representation of the token.\nfunc (tok Token) String() string {\n\tif tok >= 0 && tok < Token(len(tokens)) {\n\t\treturn tokens[tok]\n\t}\n\treturn \"\"\n}\n\n// Precedence returns the operator precedence of the binary operator token.\nfunc (tok Token) Precedence() int {\n\tswitch tok {\n\tcase OR:\n\t\treturn 1\n\tcase AND:\n\t\treturn 2\n\tcase EQ, NEQ, EQREGEX, NEQREGEX, LT, LTE, GT, GTE:\n\t\treturn 3\n\tcase ADD, SUB, BITWISE_OR, BITWISE_XOR:\n\t\treturn 4\n\tcase MUL, DIV, MOD, BITWISE_AND:\n\t\treturn 5\n\t}\n\treturn 0\n}\n\n// isOperator returns true for operator tokens.\nfunc (tok Token) isOperator() bool { return tok > operatorBeg && tok < operatorEnd }\n\n// tokstr returns a literal if provided, otherwise returns the token string.\nfunc tokstr(tok Token, lit string) string {\n\tif lit != \"\" {\n\t\treturn lit\n\t}\n\treturn tok.String()\n}\n\n// Lookup returns the token associated with a given string.\nfunc Lookup(ident string) Token {\n\tif tok, ok := keywords[strings.ToLower(ident)]; ok {\n\t\treturn tok\n\t}\n\treturn IDENT\n}\n\n// Pos specifies the line and character position of a token.\n// The Char and Line are both zero-based indexes.\ntype Pos struct {\n\tLine int\n\tChar int\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/internal/meta_client.go",
    "content": "package internal\n\nimport (\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n)\n\n// MetaClientMock is a mockable implementation of meta.MetaClient.\ntype MetaClientMock struct {\n\tCloseFn                             func() error\n\tCreateContinuousQueryFn             func(database, name, query string) error\n\tCreateDatabaseFn                    func(name string) (*meta.DatabaseInfo, error)\n\tCreateDatabaseWithRetentionPolicyFn func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error)\n\tCreateRetentionPolicyFn             func(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error)\n\tCreateShardGroupFn                  func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error)\n\tCreateSubscriptionFn                func(database, rp, name, mode string, destinations []string) error\n\tCreateUserFn                        func(name, password string, admin bool) (meta.User, error)\n\n\tDatabaseFn  func(name string) *meta.DatabaseInfo\n\tDatabasesFn func() []meta.DatabaseInfo\n\n\tDataFn                func() meta.Data\n\tDeleteShardGroupFn    func(database string, policy string, id uint64) error\n\tDropContinuousQueryFn func(database, name string) error\n\tDropDatabaseFn        func(name string) error\n\tDropRetentionPolicyFn func(database, name string) error\n\tDropSubscriptionFn    func(database, rp, name string) error\n\tDropShardFn           func(id uint64) error\n\tDropUserFn            func(name string) error\n\n\tOpenFn func() error\n\n\tRetentionPolicyFn func(database, name string) (rpi *meta.RetentionPolicyInfo, err error)\n\n\tAuthenticateFn           func(username, password string) (ui meta.User, err error)\n\tAdminUserExistsFn        func() bool\n\tSetAdminPrivilegeFn      func(username string, admin bool) error\n\tSetDataFn                func(*meta.Data) error\n\tSetPrivilegeFn           func(username, database string, p influxql.Privilege) error\n\tShardGroupsByTimeRangeFn func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error)\n\tShardOwnerFn             func(shardID uint64) (database, policy string, sgi *meta.ShardGroupInfo)\n\tUpdateRetentionPolicyFn  func(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error\n\tUpdateUserFn             func(name, password string) error\n\tUserPrivilegeFn          func(username, database string) (*influxql.Privilege, error)\n\tUserPrivilegesFn         func(username string) (map[string]influxql.Privilege, error)\n\tUserFn                   func(username string) (meta.User, error)\n\tUsersFn                  func() []meta.UserInfo\n}\n\nfunc (c *MetaClientMock) Close() error {\n\treturn c.CloseFn()\n}\n\nfunc (c *MetaClientMock) CreateContinuousQuery(database, name, query string) error {\n\treturn c.CreateContinuousQueryFn(database, name, query)\n}\n\nfunc (c *MetaClientMock) CreateDatabase(name string) (*meta.DatabaseInfo, error) {\n\treturn c.CreateDatabaseFn(name)\n}\n\nfunc (c *MetaClientMock) CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) {\n\treturn c.CreateDatabaseWithRetentionPolicyFn(name, spec)\n}\n\nfunc (c *MetaClientMock) CreateRetentionPolicy(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error) {\n\treturn c.CreateRetentionPolicyFn(database, spec, makeDefault)\n}\n\nfunc (c *MetaClientMock) CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) {\n\treturn c.CreateShardGroupFn(database, policy, timestamp)\n}\n\nfunc (c *MetaClientMock) CreateSubscription(database, rp, name, mode string, destinations []string) error {\n\treturn c.CreateSubscriptionFn(database, rp, name, mode, destinations)\n}\n\nfunc (c *MetaClientMock) CreateUser(name, password string, admin bool) (meta.User, error) {\n\treturn c.CreateUserFn(name, password, admin)\n}\n\nfunc (c *MetaClientMock) Database(name string) *meta.DatabaseInfo {\n\treturn c.DatabaseFn(name)\n}\n\nfunc (c *MetaClientMock) Databases() []meta.DatabaseInfo {\n\treturn c.DatabasesFn()\n}\n\nfunc (c *MetaClientMock) DeleteShardGroup(database string, policy string, id uint64) error {\n\treturn c.DeleteShardGroupFn(database, policy, id)\n}\n\nfunc (c *MetaClientMock) DropContinuousQuery(database, name string) error {\n\treturn c.DropContinuousQueryFn(database, name)\n}\n\nfunc (c *MetaClientMock) DropDatabase(name string) error {\n\treturn c.DropDatabaseFn(name)\n}\n\nfunc (c *MetaClientMock) DropRetentionPolicy(database, name string) error {\n\treturn c.DropRetentionPolicyFn(database, name)\n}\n\nfunc (c *MetaClientMock) DropShard(id uint64) error {\n\treturn c.DropShardFn(id)\n}\n\nfunc (c *MetaClientMock) DropSubscription(database, rp, name string) error {\n\treturn c.DropSubscriptionFn(database, rp, name)\n}\n\nfunc (c *MetaClientMock) DropUser(name string) error {\n\treturn c.DropUserFn(name)\n}\n\nfunc (c *MetaClientMock) RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) {\n\treturn c.RetentionPolicyFn(database, name)\n}\n\nfunc (c *MetaClientMock) SetAdminPrivilege(username string, admin bool) error {\n\treturn c.SetAdminPrivilegeFn(username, admin)\n}\n\nfunc (c *MetaClientMock) SetPrivilege(username, database string, p influxql.Privilege) error {\n\treturn c.SetPrivilegeFn(username, database, p)\n}\n\nfunc (c *MetaClientMock) ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) {\n\treturn c.ShardGroupsByTimeRangeFn(database, policy, min, max)\n}\n\nfunc (c *MetaClientMock) ShardOwner(shardID uint64) (database, policy string, sgi *meta.ShardGroupInfo) {\n\treturn c.ShardOwnerFn(shardID)\n}\n\nfunc (c *MetaClientMock) UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error {\n\treturn c.UpdateRetentionPolicyFn(database, name, rpu, makeDefault)\n}\n\nfunc (c *MetaClientMock) UpdateUser(name, password string) error {\n\treturn c.UpdateUserFn(name, password)\n}\n\nfunc (c *MetaClientMock) UserPrivilege(username, database string) (*influxql.Privilege, error) {\n\treturn c.UserPrivilegeFn(username, database)\n}\n\nfunc (c *MetaClientMock) UserPrivileges(username string) (map[string]influxql.Privilege, error) {\n\treturn c.UserPrivilegesFn(username)\n}\n\nfunc (c *MetaClientMock) Authenticate(username, password string) (meta.User, error) {\n\treturn c.AuthenticateFn(username, password)\n}\nfunc (c *MetaClientMock) AdminUserExists() bool { return c.AdminUserExistsFn() }\n\nfunc (c *MetaClientMock) User(username string) (meta.User, error) { return c.UserFn(username) }\nfunc (c *MetaClientMock) Users() []meta.UserInfo                  { return c.UsersFn() }\n\nfunc (c *MetaClientMock) Open() error                { return c.OpenFn() }\nfunc (c *MetaClientMock) Data() meta.Data            { return c.DataFn() }\nfunc (c *MetaClientMock) SetData(d *meta.Data) error { return c.SetDataFn(d) }\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/man/Makefile",
    "content": "#!/usr/bin/make -f\n\nDESTDIR = /usr/local\n\nMAN1_TXT =\nMAN1_TXT += influxd.txt\nMAN1_TXT += influxd-backup.txt\nMAN1_TXT += influxd-config.txt\nMAN1_TXT += influxd-restore.txt\nMAN1_TXT += influxd-run.txt\nMAN1_TXT += influxd-version.txt\nMAN1_TXT += influx.txt\nMAN1_TXT += influx_inspect.txt\nMAN1_TXT += influx_stress.txt\nMAN1_TXT += influx_tsm.txt\n\nMAN_TXT = $(MAN1_TXT)\nMAN_XML = $(patsubst %.txt,%.xml,$(MAN_TXT))\n\nDOC_MAN1 = $(patsubst %.txt,%.1,$(MAN1_TXT))\n\nbuild: $(DOC_MAN1)\n\ninstall: build\n\t@echo '  INSTALL $(DOC_MAN1)' && \\\n\tmkdir -p $(DESTDIR)/share/man/man1 && \\\n\tinstall -m 0644 $(DOC_MAN1) $(DESTDIR)/share/man/man1\n\nclean:\n\trm -f $(MAN_XML) $(DOC_MAN1)\n\n%.xml : %.txt\n\t@echo '  ASCIIDOC $@' && rm -f $@+ && \\\n\tasciidoc -d manpage -b docbook -o $@+ $< && \\\n\tmv $@+ $@\n\n%.1 : %.xml\n\t@echo '  XMLTO $@' && \\\n\txmlto man $< 2> /dev/null\n\n.PHONY: build install clean\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/man/README.md",
    "content": "# Building the Man Pages\n\nThe man pages are created with `asciidoc`, `docbook`, and `xmlto`.\n\n## Debian/Ubuntu\n\nThis is the easiest since Debian and Ubuntu automatically install the\ndependencies correctly.\n\n```bash\n$ sudo apt-get install -y build-essential asciidoc xmlto\n```\n\nYou should then be able to run `make` and the man pages will be\nproduced.\n\n## Mac OS X\n\nMac OS X also has the tools necessary to build the docs, but one of the\ndependencies gets installed incorrectly and you need an environment\nvariable to run it correctly.\n\nUse Homebrew to install the dependencies. There might be other methods\nto get the dependencies, but that's left up to the reader if they want\nto use a different package manager.\n\nIf you have Homebrew installed, you should already have the Xcode tools\nand that should include `make`.\n\n```bash\n$ brew install asciidoc xmlto\n```\n\nThen set the following environment variable everytime you run `make`.\n\n```bash\nexport XML_CATALOG_FILES=/usr/local/etc/xml/catalog\n```\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/man/footer.txt",
    "content": "BUGS\n----\nReport bugs to the GitHub issue tracker <https://github.com/influxdata/influxdb>.\n\nAUTHORS\n-------\nInfluxDB is written and maintained by InfluxData <https://influxdata.com>.\n\nCOPYRIGHT\n---------\nInfluxDB is released under the MIT license.\n\nThis man page is released under Creative Commons Attribution 4.0 International License.\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/man/influx.txt",
    "content": "influx(1)\n=========\n\nNAME\n----\ninflux - InfluxDB client\n\nSYNOPSIS\n--------\n[verse]\n'influx' [options]\n'influx' -execute <command> [options]\n'influx' -import -path <path> (-compressed) [options]\n'influx' -version\n\nDESCRIPTION\n-----------\n'influx' is the command line program for interacting with an InfluxDB server.\n\nIn the first form, the program starts a CLI that can be used to write data or query the database. The command line is described in *COMMAND LINE*.\n\nIn the second form, this will execute a single command, usually a query. This is the equivalent of starting the command line, running one command, and then exiting.\n\nIn the third form, this imports a previously exported database to the database.\n\nThe fourth form outputs the version of the command line and then immediately exits.\n\nOPTIONS\n-------\n-host <host>::\n  Host to connect to. Default is localhost.\n\n-port <port>::\n  Port to use when connecting to the host. Default is 8086.\n\n-database <database>::\n  Database to use when connecting to the database.\n\n-username <username>::\n  Username to connect to the server.\n\n-password <password>::\n  Password to connect to the server. If left blank, this will prompt for a password.\n\n-ssl:\n  Use https for requests.\n\n-unsafeSsl::\n  Set this with '-ssl' to allow unsafe connections.\n\n-execute <command>::\n  Executes the command and exits.\n\n-format <json|csv|column>::\n  Sets the format of the server responses. Default is column.\n\n-precision <rfc3339|h|m|s|ms|u|ns>::\n  Specifies the format of the timestamp. Default is ns.\n\n-consistency <any|one|quorum|all>::\n  Set the write consistency level. Default is one.\n\n-pretty::\n  Turns on pretty print format for the JSON format.\n\n-import::\n  Import a previous database export from a file. If specified, '-path <path>' must also be specified.\n\n-path <path>::\n  Path to the database export file to import. Must be used with '-import'.\n\n-pps <n>:\n  How many points per second the import will allow. By default, it is zero and will not throttle importing.\n\n-compressed::\n  Set if the import file is compressed. Must be used with '-import'.\n\n-version::\n  Outputs the version of the influx client.\n\ninclude::footer.txt[]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/man/influx_inspect.txt",
    "content": "influx_inspect(1)\n=================\n\nNAME\n----\ninflux_inspect - Displays detailed information about InfluxDB data files\n\nSYNPOSIS\n--------\n[verse]\n'influx_inspect dumptsm' [options]\n'influx_inspect export' [options]\n'influx_inspect report' [options]\n'influx_inspect verify' [options]\n\nDESCRIPTION\n-----------\nDisplays detailed information about InfluxDB data files through one of the\nfollowing commands.\n\n*dumptsm*::\n  Dumps low-level details about tsm1 files.\n\n*export*::\n  Exports TSM files into InfluxDB line protocol format.\n\n*report*::\n  Displays shard level report.\n\n*verify*::\n  Verifies integrity of TSM files.\n\nDUMPTSM OPTIONS\n---------------\n-all::\n  Dump all data. Caution: This may print a lot of information.\n\n-blocks::\n  Dump raw block data.\n\n-filter-key <string>::\n  Only display index and block data that match this key substring.\n\n-index::\n  Dump raw index data.\n\nEXPORT OPTIONS\n--------------\n-compress::\n  Compress the output.\n\n-db <name>::\n  The database to export. Optional.\n\n-rp <name>::\n  The retention policy to export. Optional. Requires the '-db <name>' option to be specified.\n\n-data-dir <path>::\n  Data storage path. Defaults to '~/.influxdb/data'.\n\n-wal-dir <path>::\n  Wal storage path. Defaults to '~/.influxdb/wal'.\n\n-start <timestamp>::\n  The start time of the export. The timestamp is in RFC3339 format. Optional.\n\n-end <timestamp>::\n  The end time of the export. The timestamp is in RFC3339 format. Optional.\n\n-out <path>::\n  Destination file to write exported data to. Defaults to '~/.influxdb/export'.\n\nREPORT OPTIONS\n--------------\n-detailed::\n  Report detailed cardinality estimates.\n\n-pattern <string>::\n  Include only files matching a pattern.\n\nVERIFY OPTIONS\n--------------\n-dir <path>::\n  Root storage path. Defaults to '~/.influxdb'.\n\ninclude:footer.txt[]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/man/influx_stress.txt",
    "content": "influx_stress(1)\n================\n\nNAME\n----\ninflux_stress - Runs a stress test against one or multiple InfluxDB servers\n\nSYNOPSIS\n--------\n[verse]\n'influx_stress' [options]\n\nDESCRIPTION\n-----------\nRuns write and query stress tests against one or multiple InfluxDB servers to\ncreate reproducible performance benchmarks against InfluxDB.\n\nOPTIONS\n-------\n-addr <addr>::\n  IP address and port of the database where response times will persist. This\n  is not for specifying which database to test against. That option is located\n  inside of the configuration file. The default is 'http://localhost:8086'.\n\n-database <name>::\n  The database where response times will persist. This is not for specifying\n  which database to test against. See '-db' or the configuration file for that\n  option. The default is 'stress'.\n\n-retention-policy <name>::\n  The retention policy where response times will persist. This is not for\n  specifying which retention policy to test against. See the configuration file\n  for that option. The default is an empty string which will use the default\n  retention policy.\n\n-config <path>::\n  The stress configuration file.\n\n-cpuprofile <path>::\n  Write the cpu profile to the path. No cpu profile is written unless this is\n  used. This profiles 'influx_stress', not the InfluxDB server.\n\n-db <name>::\n  The target database within the test system for write and query load.\n\n-tags <values>::\n  A comma separated list of tags.\n\n-v2::\n  Use version 2 of the stress tool. The default is to use version 1.\n\ninclude::footer.txt[]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/man/influx_tsm.txt",
    "content": "influx_tsm(1)\n=============\n\nNAME\n----\ninflux_tsm - Convert a database from b1 or bz1 format to tsm1 format\n\nSYNPOSIS\n--------\n[verse]\n'influx_tsm' [options] <data-path>\n\nDESCRIPTION\n-----------\nThis tool can be used to convert a database from the deprecated b1 or bz1\nformats to tsm1 format. The b1 and bz1 formats were deprecated in 0.10 and\nremoved in 0.12.\n\nThis tool will backup the directories before conversion (if not disabled). The\nbacked-up files must be removed manually, generally after starting up the node\nagain to make sure all of the data has been converted correctly.\n\nTo restore a backup after attempting to convert to tsm1, you shut down the\nnode, remove the converted directory, and copy the backed-up directory to the\noriginal location.\n\nOPTIONS\n-------\n-backup <path>::\n  The location to backup the current databases. Must not be within the data\n  directory.\n\n-dbs <names>::\n  Comma-delimited list of databases to convert. The default is to convert all\n  databases.\n\n-debug <addr>::\n  If set, http debugging endpoints will be enabled on the given address.\n\n-interval <duration>::\n  How often status updates are printed. Default is '5s'.\n\n-nobackup::\n  Disable database backups. Not recommended.\n\n-parallel::\n  Perform parallel conversions (up to GOMAXPROCS shards at once).\n\n-profile <path>::\n  Write a CPU profile to the path.\n\n-sz <size>::\n  Maximum size of individual TSM files. Defaults to 2147483648.\n\n-y::\n  Don't ask, just convert.\n\ninclude::footer.txt[]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/man/influxd-backup.txt",
    "content": "influxd-backup(1)\n=================\n\nNAME\n----\ninfluxd-backup - Downloads a snapshot of a data node and saves it to disk\n\nSYNOPSIS\n--------\n'influxd backup' [options]\n\nDESCRIPTION\n-----------\nDownloads a snapshot of a data node and saves it to disk.\n\nOPTIONS\n-------\n-host <host:port>::\n  The host to connect to and perform a snapshot of. Defaults to '127.0.0.1:8088'.\n\n-database <name>::\n  The database to backup. Required.\n\n-retention <name>::\n  The retention policy to backup. Optional.\n\n-shard <id>::\n  The shard id to backup. Optional. If specified, '-retention <name>' is required.\n\n-since <2015-12-24T08:12:13>::\n  Do an incremental backup since the passed in time. The time needs to be in the RFC3339 format. Optional.\n\nSEE ALSO\n--------\n*influxd-restore*(1)\n\ninclude::footer.txt[]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/man/influxd-config.txt",
    "content": "influxd-config(1)\n=================\n\nNAME\n----\ninfluxd-config - Generate configuration files for InfluxDB\n\nSYNOPSIS\n--------\n[verse]\n'influxd' config (-config <path>)\n'influxd config' -config /dev/null\n\nDESCRIPTION\n-----------\n'influxd config' will generate a configuration file for InfluxDB. The configuration file will be output to standard output and can be written to a file by redirecting the shell output to another file.\n\nWhen a configuration file is specified using '-config <path>', this configuration file will be read and will overwrite the default values for any values that are present. It can be used to provide a configuration fragment with only the options you want to customize and generate a new configuration file from that file. If '-config <path>' is not specified, the command will look for a default configuration file using the same method as *influxd-run*(1).\n\nWhen using this command to regenerate a configuration file in place, be sure to use a temporary file as the output. This command will not work:\n\n===\n# DO NOT USE!\n$ influxd config -config influxdb.conf > influxdb.conf\n\n# PROPER METHOD!\n$ influxd config -config influxdb.conf > influxdb.conf.tmp && \\\n      mv influxdb.conf.tmp influxdb.conf\n===\n\nThe shell will truncate the configuration file before 'influxd config' can read it and you will lose all of your custom options. For safety, redirect output to a temporary file instead and use 'mv' to move the file afterwards.\n\nThe second command version will force 'influxd config' to output the default configuration file. Setting the configuration file to */dev/null* will cause the command to output only the defaults and will not read any values from any existing configuration files.\n\nOPTIONS\n-------\n-config <path>::\n  Customize the default configuration file to load. Disables automatic loading when the path is */dev/null*.\n\ninclude::footer.txt[]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/man/influxd-restore.txt",
    "content": "influxd-restore(1)\n==================\n\nNAME\n----\ninfluxd-restore - Restores the metastore, databases, retention policies, or specific shards\n\nSYNOPSIS\n--------\n'influxd restore' [options] PATH\n\nDESCRIPTION\n-----------\nUses backups from the PATH to restore the metastore, databases, retention policies, or specific shards. The InfluxDB process must not be running during a restore.\n\nOPTIONS\n-------\n-metadir <path>::\n  If set, the metastore will be recovered to the given path. Optional.\n\n-datadir <path>::\n  If set, the restore process will recover the specified database, retention policy, or shard to the given directory. Optional.\n\n-database <name>::\n  Will restore the database TSM files. Required if no metadir is given. Optional.\n\n-retention <name>::\n  Will restore the retention policy's TSM files. If given, database is required. Optional.\n\n-shard <id>::\n  Will restore the shard's TSM files. If given, database and retention are required. Optional.\n\nSEE ALSO\n--------\n*influxd-backup*(1)\n\ninclude::footer.txt[]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/man/influxd-run.txt",
    "content": "influxd-run(1)\n==============\n\nNAME\n----\ninfluxd-run - Configure and start an InfluxDB server\n\nSYNOPSIS\n--------\n[verse]\n'influxd' [-config <path>] [-pidfile <path>] [-cpuprofile <path>] [-memprofile <path>]\n'influxd run' [-config <path>] [-pidfile <path>] [-cpuprofile <path>] [-memprofile <path>]\n\nDESCRIPTION\n-----------\nRuns the InfluxDB server.\n\nOPTIONS\n-------\n-config <path>::\n  Sets the path to the configuration file. This defaults to the environment variable *INFLUXDB_CONFIG_PATH*, *~/.influxdb/influxdb.conf*, or */etc/influxdb/influxdb.conf* if a file is present at any of these locations. Disable the automatic loading of a configuration file by using the null device as the path (such as /dev/null on Linux or Mac OS X).\n\n-pidfile <path>::\n  Write process ID to a file.\n\n-cpuprofile <path>::\n  Write CPU profiling information to a file.\n\n-memprofile <path>::\n  Write memory usage information to a file.\n\ninclude::footer.txt[]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/man/influxd-version.txt",
    "content": "influxd-version(1)\n==================\n\nNAME\n----\ninfluxd-version - Display the version of influxdb\n\nSYNOPSIS\n--------\n[verse]\n'influxd version'\n\nDESCRIPTION\n-----------\n'influxd version' will output the version of the InfluxDB server.\n\ninclude::footer.txt[]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/man/influxd.txt",
    "content": "influxd(1)\n==========\n\nNAME\n----\ninfluxd - InfluxDB server daemon\n\nSYNOPSIS\n--------\n[verse]\n'influxd' [command] [options]\n\nDESCRIPTION\n-----------\n'influxd' is the server daemon for InfluxDB.\n\nCOMMANDS\n--------\nThese commands can be invoked using the 'influxd' program. The default is 'run' if the command parameter is skipped.\n\nbackup::\n  Downloads a snapshot of a data node and saves it to disk.\n\nconfig::\n  Displays the default configuration. This can also read an existing configuration file and output the default values for any missing fields. Default values and existing entries in a configuration file can be customized through environment variables.\n\nrestore::\n  Uses backups to restore the metastore, databases, retention policies, or specific shards. The InfluxDB process must not be running during a restore.\n\nrun::\n  Runs the InfluxDB server. This is the default command if none is specified.\n\nversion::\n  Displays the InfluxDB version, build branch, and git commit hash.\n\nSEE ALSO\n--------\n*influxd-backup*(1), *influxd-config*(1), *influxd-restore*(1), *influxd-run*(1), *influxd-version*(1)\n\ninclude::footer.txt[]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/models/consistency.go",
    "content": "package models\n\nimport (\n\t\"errors\"\n\t\"strings\"\n)\n\n// ConsistencyLevel represent a required replication criteria before a write can\n// be returned as successful.\n//\n// The consistency level is handled in open-source InfluxDB but only applicable to clusters.\ntype ConsistencyLevel int\n\nconst (\n\t// ConsistencyLevelAny allows for hinted handoff, potentially no write happened yet.\n\tConsistencyLevelAny ConsistencyLevel = iota\n\n\t// ConsistencyLevelOne requires at least one data node acknowledged a write.\n\tConsistencyLevelOne\n\n\t// ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write.\n\tConsistencyLevelQuorum\n\n\t// ConsistencyLevelAll requires all data nodes to acknowledge a write.\n\tConsistencyLevelAll\n)\n\nvar (\n\t// ErrInvalidConsistencyLevel is returned when parsing the string version\n\t// of a consistency level.\n\tErrInvalidConsistencyLevel = errors.New(\"invalid consistency level\")\n)\n\n// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const.\nfunc ParseConsistencyLevel(level string) (ConsistencyLevel, error) {\n\tswitch strings.ToLower(level) {\n\tcase \"any\":\n\t\treturn ConsistencyLevelAny, nil\n\tcase \"one\":\n\t\treturn ConsistencyLevelOne, nil\n\tcase \"quorum\":\n\t\treturn ConsistencyLevelQuorum, nil\n\tcase \"all\":\n\t\treturn ConsistencyLevelAll, nil\n\tdefault:\n\t\treturn 0, ErrInvalidConsistencyLevel\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/models/inline_fnv.go",
    "content": "package models // import \"github.com/influxdata/influxdb/models\"\n\n// from stdlib hash/fnv/fnv.go\nconst (\n\tprime64  = 1099511628211\n\toffset64 = 14695981039346656037\n)\n\n// InlineFNV64a is an alloc-free port of the standard library's fnv64a.\n// See https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function.\ntype InlineFNV64a uint64\n\n// NewInlineFNV64a returns a new instance of InlineFNV64a.\nfunc NewInlineFNV64a() InlineFNV64a {\n\treturn offset64\n}\n\n// Write adds data to the running hash.\nfunc (s *InlineFNV64a) Write(data []byte) (int, error) {\n\thash := uint64(*s)\n\tfor _, c := range data {\n\t\thash ^= uint64(c)\n\t\thash *= prime64\n\t}\n\t*s = InlineFNV64a(hash)\n\treturn len(data), nil\n}\n\n// Sum64 returns the uint64 of the current resulting hash.\nfunc (s *InlineFNV64a) Sum64() uint64 {\n\treturn uint64(*s)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/models/inline_fnv_test.go",
    "content": "package models_test\n\nimport (\n\t\"hash/fnv\"\n\t\"testing\"\n\t\"testing/quick\"\n\n\t\"github.com/influxdata/influxdb/models\"\n)\n\nfunc TestInlineFNV64aEquivalenceFuzz(t *testing.T) {\n\tf := func(data []byte) bool {\n\t\tstdlibFNV := fnv.New64a()\n\t\tstdlibFNV.Write(data)\n\t\twant := stdlibFNV.Sum64()\n\n\t\tinlineFNV := models.NewInlineFNV64a()\n\t\tinlineFNV.Write(data)\n\t\tgot := inlineFNV.Sum64()\n\n\t\treturn want == got\n\t}\n\tcfg := &quick.Config{\n\t\tMaxCount: 10000,\n\t}\n\tif err := quick.Check(f, cfg); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go",
    "content": "package models // import \"github.com/influxdata/influxdb/models\"\n\nimport (\n\t\"reflect\"\n\t\"strconv\"\n\t\"unsafe\"\n)\n\n// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.\nfunc parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {\n\ts := unsafeBytesToString(b)\n\treturn strconv.ParseInt(s, base, bitSize)\n}\n\n// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.\nfunc parseFloatBytes(b []byte, bitSize int) (float64, error) {\n\ts := unsafeBytesToString(b)\n\treturn strconv.ParseFloat(s, bitSize)\n}\n\n// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.\nfunc parseBoolBytes(b []byte) (bool, error) {\n\treturn strconv.ParseBool(unsafeBytesToString(b))\n}\n\n// unsafeBytesToString converts a []byte to a string without a heap allocation.\n//\n// It is unsafe, and is intended to prepare input to short-lived functions\n// that require strings.\nfunc unsafeBytesToString(in []byte) string {\n\tsrc := *(*reflect.SliceHeader)(unsafe.Pointer(&in))\n\tdst := reflect.StringHeader{\n\t\tData: src.Data,\n\t\tLen:  src.Len,\n\t}\n\ts := *(*string)(unsafe.Pointer(&dst))\n\treturn s\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/models/inline_strconv_parse_test.go",
    "content": "package models\n\nimport (\n\t\"strconv\"\n\t\"testing\"\n\t\"testing/quick\"\n)\n\nfunc TestParseIntBytesEquivalenceFuzz(t *testing.T) {\n\tf := func(b []byte, base int, bitSize int) bool {\n\t\texp, expErr := strconv.ParseInt(string(b), base, bitSize)\n\t\tgot, gotErr := parseIntBytes(b, base, bitSize)\n\n\t\treturn exp == got && checkErrs(expErr, gotErr)\n\t}\n\n\tcfg := &quick.Config{\n\t\tMaxCount: 10000,\n\t}\n\n\tif err := quick.Check(f, cfg); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestParseIntBytesValid64bitBase10EquivalenceFuzz(t *testing.T) {\n\tbuf := []byte{}\n\tf := func(n int64) bool {\n\t\tbuf = strconv.AppendInt(buf[:0], n, 10)\n\n\t\texp, expErr := strconv.ParseInt(string(buf), 10, 64)\n\t\tgot, gotErr := parseIntBytes(buf, 10, 64)\n\n\t\treturn exp == got && checkErrs(expErr, gotErr)\n\t}\n\n\tcfg := &quick.Config{\n\t\tMaxCount: 10000,\n\t}\n\n\tif err := quick.Check(f, cfg); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestParseFloatBytesEquivalenceFuzz(t *testing.T) {\n\tf := func(b []byte, bitSize int) bool {\n\t\texp, expErr := strconv.ParseFloat(string(b), bitSize)\n\t\tgot, gotErr := parseFloatBytes(b, bitSize)\n\n\t\treturn exp == got && checkErrs(expErr, gotErr)\n\t}\n\n\tcfg := &quick.Config{\n\t\tMaxCount: 10000,\n\t}\n\n\tif err := quick.Check(f, cfg); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestParseFloatBytesValid64bitEquivalenceFuzz(t *testing.T) {\n\tbuf := []byte{}\n\tf := func(n float64) bool {\n\t\tbuf = strconv.AppendFloat(buf[:0], n, 'f', -1, 64)\n\n\t\texp, expErr := strconv.ParseFloat(string(buf), 64)\n\t\tgot, gotErr := parseFloatBytes(buf, 64)\n\n\t\treturn exp == got && checkErrs(expErr, gotErr)\n\t}\n\n\tcfg := &quick.Config{\n\t\tMaxCount: 10000,\n\t}\n\n\tif err := quick.Check(f, cfg); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestParseBoolBytesEquivalence(t *testing.T) {\n\tvar buf []byte\n\tfor _, s := range []string{\"1\", \"t\", \"T\", \"TRUE\", \"true\", \"True\", \"0\", \"f\", \"F\", \"FALSE\", \"false\", \"False\", \"fail\", \"TrUe\", \"FAlSE\", \"numbers\", \"\"} {\n\t\tbuf = append(buf[:0], s...)\n\n\t\texp, expErr := strconv.ParseBool(s)\n\t\tgot, gotErr := parseBoolBytes(buf)\n\n\t\tif got != exp || !checkErrs(expErr, gotErr) {\n\t\t\tt.Errorf(\"Failed to parse boolean value %q correctly: wanted (%t, %v), got (%t, %v)\", s, exp, expErr, got, gotErr)\n\t\t}\n\t}\n}\n\nfunc checkErrs(a, b error) bool {\n\tif (a == nil) != (b == nil) {\n\t\treturn false\n\t}\n\n\treturn a == nil || a.Error() == b.Error()\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/models/points.go",
    "content": "// Package models implements basic objects used throughout the TICK stack.\npackage models // import \"github.com/influxdata/influxdb/models\"\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/pkg/escape\"\n)\n\nvar (\n\tmeasurementEscapeCodes = map[byte][]byte{\n\t\t',': []byte(`\\,`),\n\t\t' ': []byte(`\\ `),\n\t}\n\n\ttagEscapeCodes = map[byte][]byte{\n\t\t',': []byte(`\\,`),\n\t\t' ': []byte(`\\ `),\n\t\t'=': []byte(`\\=`),\n\t}\n\n\t// ErrPointMustHaveAField is returned when operating on a point that does not have any fields.\n\tErrPointMustHaveAField = errors.New(\"point without fields is unsupported\")\n\n\t// ErrInvalidNumber is returned when a number is expected but not provided.\n\tErrInvalidNumber = errors.New(\"invalid number\")\n\n\t// ErrInvalidPoint is returned when a point cannot be parsed correctly.\n\tErrInvalidPoint = errors.New(\"point is invalid\")\n)\n\nconst (\n\t// MaxKeyLength is the largest allowed size of the combined measurement and tag keys.\n\tMaxKeyLength = 65535\n)\n\n// Point defines the values that will be written to the database.\ntype Point interface {\n\t// Name return the measurement name for the point.\n\tName() []byte\n\n\t// SetName updates the measurement name for the point.\n\tSetName(string)\n\n\t// Tags returns the tag set for the point.\n\tTags() Tags\n\n\t// AddTag adds or replaces a tag value for a point.\n\tAddTag(key, value string)\n\n\t// SetTags replaces the tags for the point.\n\tSetTags(tags Tags)\n\n\t// HasTag returns true if the tag exists for the point.\n\tHasTag(tag []byte) bool\n\n\t// Fields returns the fields for the point.\n\tFields() (Fields, error)\n\n\t// Time return the timestamp for the point.\n\tTime() time.Time\n\n\t// SetTime updates the timestamp for the point.\n\tSetTime(t time.Time)\n\n\t// UnixNano returns the timestamp of the point as nanoseconds since Unix epoch.\n\tUnixNano() int64\n\n\t// HashID returns a non-cryptographic checksum of the point's key.\n\tHashID() uint64\n\n\t// Key returns the key (measurement joined with tags) of the point.\n\tKey() []byte\n\n\t// String returns a string representation of the point. If there is a\n\t// timestamp associated with the point then it will be specified with the default\n\t// precision of nanoseconds.\n\tString() string\n\n\t// MarshalBinary returns a binary representation of the point.\n\tMarshalBinary() ([]byte, error)\n\n\t// PrecisionString returns a string representation of the point. If there\n\t// is a timestamp associated with the point then it will be specified in the\n\t// given unit.\n\tPrecisionString(precision string) string\n\n\t// RoundedString returns a string representation of the point. If there\n\t// is a timestamp associated with the point, then it will be rounded to the\n\t// given duration.\n\tRoundedString(d time.Duration) string\n\n\t// Split will attempt to return multiple points with the same timestamp whose\n\t// string representations are no longer than size. Points with a single field or\n\t// a point without a timestamp may exceed the requested size.\n\tSplit(size int) []Point\n\n\t// Round will round the timestamp of the point to the given duration.\n\tRound(d time.Duration)\n\n\t// StringSize returns the length of the string that would be returned by String().\n\tStringSize() int\n\n\t// AppendString appends the result of String() to the provided buffer and returns\n\t// the result, potentially reducing string allocations.\n\tAppendString(buf []byte) []byte\n\n\t// FieldIterator retuns a FieldIterator that can be used to traverse the\n\t// fields of a point without constructing the in-memory map.\n\tFieldIterator() FieldIterator\n}\n\n// FieldType represents the type of a field.\ntype FieldType int\n\nconst (\n\t// Integer indicates the field's type is integer.\n\tInteger FieldType = iota\n\n\t// Float indicates the field's type is float.\n\tFloat\n\n\t// Boolean indicates the field's type is boolean.\n\tBoolean\n\n\t// String indicates the field's type is string.\n\tString\n\n\t// Empty is used to indicate that there is no field.\n\tEmpty\n)\n\n// FieldIterator provides a low-allocation interface to iterate through a point's fields.\ntype FieldIterator interface {\n\t// Next indicates whether there any fields remaining.\n\tNext() bool\n\n\t// FieldKey returns the key of the current field.\n\tFieldKey() []byte\n\n\t// Type returns the FieldType of the current field.\n\tType() FieldType\n\n\t// StringValue returns the string value of the current field.\n\tStringValue() string\n\n\t// IntegerValue returns the integer value of the current field.\n\tIntegerValue() (int64, error)\n\n\t// BooleanValue returns the boolean value of the current field.\n\tBooleanValue() (bool, error)\n\n\t// FloatValue returns the float value of the current field.\n\tFloatValue() (float64, error)\n\n\t// Reset resets the iterator to its initial state.\n\tReset()\n}\n\n// Points represents a sortable list of points by timestamp.\ntype Points []Point\n\n// Len implements sort.Interface.\nfunc (a Points) Len() int { return len(a) }\n\n// Less implements sort.Interface.\nfunc (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) }\n\n// Swap implements sort.Interface.\nfunc (a Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\n// point is the default implementation of Point.\ntype point struct {\n\ttime time.Time\n\n\t// text encoding of measurement and tags\n\t// key must always be stored sorted by tags, if the original line was not sorted,\n\t// we need to resort it\n\tkey []byte\n\n\t// text encoding of field data\n\tfields []byte\n\n\t// text encoding of timestamp\n\tts []byte\n\n\t// cached version of parsed fields from data\n\tcachedFields map[string]interface{}\n\n\t// cached version of parsed name from key\n\tcachedName string\n\n\t// cached version of parsed tags\n\tcachedTags Tags\n\n\tit fieldIterator\n}\n\nconst (\n\t// the number of characters for the largest possible int64 (9223372036854775807)\n\tmaxInt64Digits = 19\n\n\t// the number of characters for the smallest possible int64 (-9223372036854775808)\n\tminInt64Digits = 20\n\n\t// the number of characters required for the largest float64 before a range check\n\t// would occur during parsing\n\tmaxFloat64Digits = 25\n\n\t// the number of characters required for smallest float64 before a range check occur\n\t// would occur during parsing\n\tminFloat64Digits = 27\n)\n\n// ParsePoints returns a slice of Points from a text representation of a point\n// with each point separated by newlines.  If any points fail to parse, a non-nil error\n// will be returned in addition to the points that parsed successfully.\nfunc ParsePoints(buf []byte) ([]Point, error) {\n\treturn ParsePointsWithPrecision(buf, time.Now().UTC(), \"n\")\n}\n\n// ParsePointsString is identical to ParsePoints but accepts a string.\nfunc ParsePointsString(buf string) ([]Point, error) {\n\treturn ParsePoints([]byte(buf))\n}\n\n// ParseKey returns the measurement name and tags from a point.\n//\n// NOTE: to minimize heap allocations, the returned Tags will refer to subslices of buf.\n// This can have the unintended effect preventing buf from being garbage collected.\nfunc ParseKey(buf []byte) (string, Tags) {\n\t// Ignore the error because scanMeasurement returns \"missing fields\" which we ignore\n\t// when just parsing a key\n\tstate, i, _ := scanMeasurement(buf, 0)\n\n\tvar tags Tags\n\tif state == tagKeyState {\n\t\ttags = parseTags(buf)\n\t\t// scanMeasurement returns the location of the comma if there are tags, strip that off\n\t\treturn string(buf[:i-1]), tags\n\t}\n\treturn string(buf[:i]), tags\n}\n\nfunc ParseTags(buf []byte) (Tags, error) {\n\treturn parseTags(buf), nil\n}\n\nfunc ParseName(buf []byte) ([]byte, error) {\n\t// Ignore the error because scanMeasurement returns \"missing fields\" which we ignore\n\t// when just parsing a key\n\tstate, i, _ := scanMeasurement(buf, 0)\n\tif state == tagKeyState {\n\t\treturn buf[:i-1], nil\n\t}\n\treturn buf[:i], nil\n}\n\n// ParsePointsWithPrecision is similar to ParsePoints, but allows the\n// caller to provide a precision for time.\n//\n// NOTE: to minimize heap allocations, the returned Points will refer to subslices of buf.\n// This can have the unintended effect preventing buf from being garbage collected.\nfunc ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) {\n\tpoints := make([]Point, 0, bytes.Count(buf, []byte{'\\n'})+1)\n\tvar (\n\t\tpos    int\n\t\tblock  []byte\n\t\tfailed []string\n\t)\n\tfor pos < len(buf) {\n\t\tpos, block = scanLine(buf, pos)\n\t\tpos++\n\n\t\tif len(block) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// lines which start with '#' are comments\n\t\tstart := skipWhitespace(block, 0)\n\n\t\t// If line is all whitespace, just skip it\n\t\tif start >= len(block) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif block[start] == '#' {\n\t\t\tcontinue\n\t\t}\n\n\t\t// strip the newline if one is present\n\t\tif block[len(block)-1] == '\\n' {\n\t\t\tblock = block[:len(block)-1]\n\t\t}\n\n\t\tpt, err := parsePoint(block[start:], defaultTime, precision)\n\t\tif err != nil {\n\t\t\tfailed = append(failed, fmt.Sprintf(\"unable to parse '%s': %v\", string(block[start:]), err))\n\t\t} else {\n\t\t\tpoints = append(points, pt)\n\t\t}\n\n\t}\n\tif len(failed) > 0 {\n\t\treturn points, fmt.Errorf(\"%s\", strings.Join(failed, \"\\n\"))\n\t}\n\treturn points, nil\n\n}\n\nfunc parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) {\n\t// scan the first block which is measurement[,tag1=value1,tag2=value=2...]\n\tpos, key, err := scanKey(buf, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// measurement name is required\n\tif len(key) == 0 {\n\t\treturn nil, fmt.Errorf(\"missing measurement\")\n\t}\n\n\tif len(key) > MaxKeyLength {\n\t\treturn nil, fmt.Errorf(\"max key length exceeded: %v > %v\", len(key), MaxKeyLength)\n\t}\n\n\t// scan the second block is which is field1=value1[,field2=value2,...]\n\tpos, fields, err := scanFields(buf, pos)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// at least one field is required\n\tif len(fields) == 0 {\n\t\treturn nil, fmt.Errorf(\"missing fields\")\n\t}\n\n\tvar maxKeyErr error\n\twalkFields(fields, func(k, v []byte) bool {\n\t\tif sz := seriesKeySize(key, k); sz > MaxKeyLength {\n\t\t\tmaxKeyErr = fmt.Errorf(\"max key length exceeded: %v > %v\", sz, MaxKeyLength)\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t})\n\n\tif maxKeyErr != nil {\n\t\treturn nil, maxKeyErr\n\t}\n\n\t// scan the last block which is an optional integer timestamp\n\tpos, ts, err := scanTime(buf, pos)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpt := &point{\n\t\tkey:    key,\n\t\tfields: fields,\n\t\tts:     ts,\n\t}\n\n\tif len(ts) == 0 {\n\t\tpt.time = defaultTime\n\t\tpt.SetPrecision(precision)\n\t} else {\n\t\tts, err := parseIntBytes(ts, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpt.time, err = SafeCalcTime(ts, precision)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Determine if there are illegal non-whitespace characters after the\n\t\t// timestamp block.\n\t\tfor pos < len(buf) {\n\t\t\tif buf[pos] != ' ' {\n\t\t\t\treturn nil, ErrInvalidPoint\n\t\t\t}\n\t\t\tpos++\n\t\t}\n\t}\n\treturn pt, nil\n}\n\n// GetPrecisionMultiplier will return a multiplier for the precision specified.\nfunc GetPrecisionMultiplier(precision string) int64 {\n\td := time.Nanosecond\n\tswitch precision {\n\tcase \"u\":\n\t\td = time.Microsecond\n\tcase \"ms\":\n\t\td = time.Millisecond\n\tcase \"s\":\n\t\td = time.Second\n\tcase \"m\":\n\t\td = time.Minute\n\tcase \"h\":\n\t\td = time.Hour\n\t}\n\treturn int64(d)\n}\n\n// scanKey scans buf starting at i for the measurement and tag portion of the point.\n// It returns the ending position and the byte slice of key within buf.  If there\n// are tags, they will be sorted if they are not already.\nfunc scanKey(buf []byte, i int) (int, []byte, error) {\n\tstart := skipWhitespace(buf, i)\n\n\ti = start\n\n\t// Determines whether the tags are sort, assume they are\n\tsorted := true\n\n\t// indices holds the indexes within buf of the start of each tag.  For example,\n\t// a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20]\n\t// which indicates that the first tag starts at buf[4], seconds at buf[11], and\n\t// last at buf[20]\n\tindices := make([]int, 100)\n\n\t// tracks how many commas we've seen so we know how many values are indices.\n\t// Since indices is an arbitrarily large slice,\n\t// we need to know how many values in the buffer are in use.\n\tcommas := 0\n\n\t// First scan the Point's measurement.\n\tstate, i, err := scanMeasurement(buf, i)\n\tif err != nil {\n\t\treturn i, buf[start:i], err\n\t}\n\n\t// Optionally scan tags if needed.\n\tif state == tagKeyState {\n\t\ti, commas, indices, err = scanTags(buf, i, indices)\n\t\tif err != nil {\n\t\t\treturn i, buf[start:i], err\n\t\t}\n\t}\n\n\t// Now we know where the key region is within buf, and the location of tags, we\n\t// need to determine if duplicate tags exist and if the tags are sorted. This iterates\n\t// over the list comparing each tag in the sequence with each other.\n\tfor j := 0; j < commas-1; j++ {\n\t\t// get the left and right tags\n\t\t_, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=')\n\t\t_, right := scanTo(buf[indices[j+1]:indices[j+2]-1], 0, '=')\n\n\t\t// If left is greater than right, the tags are not sorted. We do not have to\n\t\t// continue because the short path no longer works.\n\t\t// If the tags are equal, then there are duplicate tags, and we should abort.\n\t\t// If the tags are not sorted, this pass may not find duplicate tags and we\n\t\t// need to do a more exhaustive search later.\n\t\tif cmp := bytes.Compare(left, right); cmp > 0 {\n\t\t\tsorted = false\n\t\t\tbreak\n\t\t} else if cmp == 0 {\n\t\t\treturn i, buf[start:i], fmt.Errorf(\"duplicate tags\")\n\t\t}\n\t}\n\n\t// If the tags are not sorted, then sort them.  This sort is inline and\n\t// uses the tag indices we created earlier.  The actual buffer is not sorted, the\n\t// indices are using the buffer for value comparison.  After the indices are sorted,\n\t// the buffer is reconstructed from the sorted indices.\n\tif !sorted && commas > 0 {\n\t\t// Get the measurement name for later\n\t\tmeasurement := buf[start : indices[0]-1]\n\n\t\t// Sort the indices\n\t\tindices := indices[:commas]\n\t\tinsertionSort(0, commas, buf, indices)\n\n\t\t// Create a new key using the measurement and sorted indices\n\t\tb := make([]byte, len(buf[start:i]))\n\t\tpos := copy(b, measurement)\n\t\tfor _, i := range indices {\n\t\t\tb[pos] = ','\n\t\t\tpos++\n\t\t\t_, v := scanToSpaceOr(buf, i, ',')\n\t\t\tpos += copy(b[pos:], v)\n\t\t}\n\n\t\t// Check again for duplicate tags now that the tags are sorted.\n\t\tfor j := 0; j < commas-1; j++ {\n\t\t\t// get the left and right tags\n\t\t\t_, left := scanTo(buf[indices[j]:], 0, '=')\n\t\t\t_, right := scanTo(buf[indices[j+1]:], 0, '=')\n\n\t\t\t// If the tags are equal, then there are duplicate tags, and we should abort.\n\t\t\t// If the tags are not sorted, this pass may not find duplicate tags and we\n\t\t\t// need to do a more exhaustive search later.\n\t\t\tif bytes.Equal(left, right) {\n\t\t\t\treturn i, b, fmt.Errorf(\"duplicate tags\")\n\t\t\t}\n\t\t}\n\n\t\treturn i, b, nil\n\t}\n\n\treturn i, buf[start:i], nil\n}\n\n// The following constants allow us to specify which state to move to\n// next, when scanning sections of a Point.\nconst (\n\ttagKeyState = iota\n\ttagValueState\n\tfieldsState\n)\n\n// scanMeasurement examines the measurement part of a Point, returning\n// the next state to move to, and the current location in the buffer.\nfunc scanMeasurement(buf []byte, i int) (int, int, error) {\n\t// Check first byte of measurement, anything except a comma is fine.\n\t// It can't be a space, since whitespace is stripped prior to this\n\t// function call.\n\tif i >= len(buf) || buf[i] == ',' {\n\t\treturn -1, i, fmt.Errorf(\"missing measurement\")\n\t}\n\n\tfor {\n\t\ti++\n\t\tif i >= len(buf) {\n\t\t\t// cpu\n\t\t\treturn -1, i, fmt.Errorf(\"missing fields\")\n\t\t}\n\n\t\tif buf[i-1] == '\\\\' {\n\t\t\t// Skip character (it's escaped).\n\t\t\tcontinue\n\t\t}\n\n\t\t// Unescaped comma; move onto scanning the tags.\n\t\tif buf[i] == ',' {\n\t\t\treturn tagKeyState, i + 1, nil\n\t\t}\n\n\t\t// Unescaped space; move onto scanning the fields.\n\t\tif buf[i] == ' ' {\n\t\t\t// cpu value=1.0\n\t\t\treturn fieldsState, i, nil\n\t\t}\n\t}\n}\n\n// scanTags examines all the tags in a Point, keeping track of and\n// returning the updated indices slice, number of commas and location\n// in buf where to start examining the Point fields.\nfunc scanTags(buf []byte, i int, indices []int) (int, int, []int, error) {\n\tvar (\n\t\terr    error\n\t\tcommas int\n\t\tstate  = tagKeyState\n\t)\n\n\tfor {\n\t\tswitch state {\n\t\tcase tagKeyState:\n\t\t\t// Grow our indices slice if we have too many tags.\n\t\t\tif commas >= len(indices) {\n\t\t\t\tnewIndics := make([]int, cap(indices)*2)\n\t\t\t\tcopy(newIndics, indices)\n\t\t\t\tindices = newIndics\n\t\t\t}\n\t\t\tindices[commas] = i\n\t\t\tcommas++\n\n\t\t\ti, err = scanTagsKey(buf, i)\n\t\t\tstate = tagValueState // tag value always follows a tag key\n\t\tcase tagValueState:\n\t\t\tstate, i, err = scanTagsValue(buf, i)\n\t\tcase fieldsState:\n\t\t\tindices[commas] = i + 1\n\t\t\treturn i, commas, indices, nil\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn i, commas, indices, err\n\t\t}\n\t}\n}\n\n// scanTagsKey scans each character in a tag key.\nfunc scanTagsKey(buf []byte, i int) (int, error) {\n\t// First character of the key.\n\tif i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' {\n\t\t// cpu,{'', ' ', ',', '='}\n\t\treturn i, fmt.Errorf(\"missing tag key\")\n\t}\n\n\t// Examine each character in the tag key until we hit an unescaped\n\t// equals (the tag value), or we hit an error (i.e., unescaped\n\t// space or comma).\n\tfor {\n\t\ti++\n\n\t\t// Either we reached the end of the buffer or we hit an\n\t\t// unescaped comma or space.\n\t\tif i >= len(buf) ||\n\t\t\t((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\\\') {\n\t\t\t// cpu,tag{'', ' ', ','}\n\t\t\treturn i, fmt.Errorf(\"missing tag value\")\n\t\t}\n\n\t\tif buf[i] == '=' && buf[i-1] != '\\\\' {\n\t\t\t// cpu,tag=\n\t\t\treturn i + 1, nil\n\t\t}\n\t}\n}\n\n// scanTagsValue scans each character in a tag value.\nfunc scanTagsValue(buf []byte, i int) (int, int, error) {\n\t// Tag value cannot be empty.\n\tif i >= len(buf) || buf[i] == ',' || buf[i] == ' ' {\n\t\t// cpu,tag={',', ' '}\n\t\treturn -1, i, fmt.Errorf(\"missing tag value\")\n\t}\n\n\t// Examine each character in the tag value until we hit an unescaped\n\t// comma (move onto next tag key), an unescaped space (move onto\n\t// fields), or we error out.\n\tfor {\n\t\ti++\n\t\tif i >= len(buf) {\n\t\t\t// cpu,tag=value\n\t\t\treturn -1, i, fmt.Errorf(\"missing fields\")\n\t\t}\n\n\t\t// An unescaped equals sign is an invalid tag value.\n\t\tif buf[i] == '=' && buf[i-1] != '\\\\' {\n\t\t\t// cpu,tag={'=', 'fo=o'}\n\t\t\treturn -1, i, fmt.Errorf(\"invalid tag format\")\n\t\t}\n\n\t\tif buf[i] == ',' && buf[i-1] != '\\\\' {\n\t\t\t// cpu,tag=foo,\n\t\t\treturn tagKeyState, i + 1, nil\n\t\t}\n\n\t\t// cpu,tag=foo value=1.0\n\t\t// cpu, tag=foo\\= value=1.0\n\t\tif buf[i] == ' ' && buf[i-1] != '\\\\' {\n\t\t\treturn fieldsState, i, nil\n\t\t}\n\t}\n}\n\nfunc insertionSort(l, r int, buf []byte, indices []int) {\n\tfor i := l + 1; i < r; i++ {\n\t\tfor j := i; j > l && less(buf, indices, j, j-1); j-- {\n\t\t\tindices[j], indices[j-1] = indices[j-1], indices[j]\n\t\t}\n\t}\n}\n\nfunc less(buf []byte, indices []int, i, j int) bool {\n\t// This grabs the tag names for i & j, it ignores the values\n\t_, a := scanTo(buf, indices[i], '=')\n\t_, b := scanTo(buf, indices[j], '=')\n\treturn bytes.Compare(a, b) < 0\n}\n\n// scanFields scans buf, starting at i for the fields section of a point.  It returns\n// the ending position and the byte slice of the fields within buf.\nfunc scanFields(buf []byte, i int) (int, []byte, error) {\n\tstart := skipWhitespace(buf, i)\n\ti = start\n\tquoted := false\n\n\t// tracks how many '=' we've seen\n\tequals := 0\n\n\t// tracks how many commas we've seen\n\tcommas := 0\n\n\tfor {\n\t\t// reached the end of buf?\n\t\tif i >= len(buf) {\n\t\t\tbreak\n\t\t}\n\n\t\t// escaped characters?\n\t\tif buf[i] == '\\\\' && i+1 < len(buf) {\n\t\t\ti += 2\n\t\t\tcontinue\n\t\t}\n\n\t\t// If the value is quoted, scan until we get to the end quote\n\t\t// Only quote values in the field value since quotes are not significant\n\t\t// in the field key\n\t\tif buf[i] == '\"' && equals > commas {\n\t\t\tquoted = !quoted\n\t\t\ti++\n\t\t\tcontinue\n\t\t}\n\n\t\t// If we see an =, ensure that there is at least on char before and after it\n\t\tif buf[i] == '=' && !quoted {\n\t\t\tequals++\n\n\t\t\t// check for \"... =123\" but allow \"a\\ =123\"\n\t\t\tif buf[i-1] == ' ' && buf[i-2] != '\\\\' {\n\t\t\t\treturn i, buf[start:i], fmt.Errorf(\"missing field key\")\n\t\t\t}\n\n\t\t\t// check for \"...a=123,=456\" but allow \"a=123,a\\,=456\"\n\t\t\tif buf[i-1] == ',' && buf[i-2] != '\\\\' {\n\t\t\t\treturn i, buf[start:i], fmt.Errorf(\"missing field key\")\n\t\t\t}\n\n\t\t\t// check for \"... value=\"\n\t\t\tif i+1 >= len(buf) {\n\t\t\t\treturn i, buf[start:i], fmt.Errorf(\"missing field value\")\n\t\t\t}\n\n\t\t\t// check for \"... value=,value2=...\"\n\t\t\tif buf[i+1] == ',' || buf[i+1] == ' ' {\n\t\t\t\treturn i, buf[start:i], fmt.Errorf(\"missing field value\")\n\t\t\t}\n\n\t\t\tif isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {\n\t\t\t\tvar err error\n\t\t\t\ti, err = scanNumber(buf, i+1)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn i, buf[start:i], err\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// If next byte is not a double-quote, the value must be a boolean\n\t\t\tif buf[i+1] != '\"' {\n\t\t\t\tvar err error\n\t\t\t\ti, _, err = scanBoolean(buf, i+1)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn i, buf[start:i], err\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif buf[i] == ',' && !quoted {\n\t\t\tcommas++\n\t\t}\n\n\t\t// reached end of block?\n\t\tif buf[i] == ' ' && !quoted {\n\t\t\tbreak\n\t\t}\n\t\ti++\n\t}\n\n\tif quoted {\n\t\treturn i, buf[start:i], fmt.Errorf(\"unbalanced quotes\")\n\t}\n\n\t// check that all field sections had key and values (e.g. prevent \"a=1,b\"\n\tif equals == 0 || commas != equals-1 {\n\t\treturn i, buf[start:i], fmt.Errorf(\"invalid field format\")\n\t}\n\n\treturn i, buf[start:i], nil\n}\n\n// scanTime scans buf, starting at i for the time section of a point. It\n// returns the ending position and the byte slice of the timestamp within buf\n// and and error if the timestamp is not in the correct numeric format.\nfunc scanTime(buf []byte, i int) (int, []byte, error) {\n\tstart := skipWhitespace(buf, i)\n\ti = start\n\n\tfor {\n\t\t// reached the end of buf?\n\t\tif i >= len(buf) {\n\t\t\tbreak\n\t\t}\n\n\t\t// Reached end of block or trailing whitespace?\n\t\tif buf[i] == '\\n' || buf[i] == ' ' {\n\t\t\tbreak\n\t\t}\n\n\t\t// Handle negative timestamps\n\t\tif i == start && buf[i] == '-' {\n\t\t\ti++\n\t\t\tcontinue\n\t\t}\n\n\t\t// Timestamps should be integers, make sure they are so we don't need\n\t\t// to actually  parse the timestamp until needed.\n\t\tif buf[i] < '0' || buf[i] > '9' {\n\t\t\treturn i, buf[start:i], fmt.Errorf(\"bad timestamp\")\n\t\t}\n\t\ti++\n\t}\n\treturn i, buf[start:i], nil\n}\n\nfunc isNumeric(b byte) bool {\n\treturn (b >= '0' && b <= '9') || b == '.'\n}\n\n// scanNumber returns the end position within buf, start at i after\n// scanning over buf for an integer, or float.  It returns an\n// error if a invalid number is scanned.\nfunc scanNumber(buf []byte, i int) (int, error) {\n\tstart := i\n\tvar isInt bool\n\n\t// Is negative number?\n\tif i < len(buf) && buf[i] == '-' {\n\t\ti++\n\t\t// There must be more characters now, as just '-' is illegal.\n\t\tif i == len(buf) {\n\t\t\treturn i, ErrInvalidNumber\n\t\t}\n\t}\n\n\t// how many decimal points we've see\n\tdecimal := false\n\n\t// indicates the number is float in scientific notation\n\tscientific := false\n\n\tfor {\n\t\tif i >= len(buf) {\n\t\t\tbreak\n\t\t}\n\n\t\tif buf[i] == ',' || buf[i] == ' ' {\n\t\t\tbreak\n\t\t}\n\n\t\tif buf[i] == 'i' && i > start && !isInt {\n\t\t\tisInt = true\n\t\t\ti++\n\t\t\tcontinue\n\t\t}\n\n\t\tif buf[i] == '.' {\n\t\t\t// Can't have more than 1 decimal (e.g. 1.1.1 should fail)\n\t\t\tif decimal {\n\t\t\t\treturn i, ErrInvalidNumber\n\t\t\t}\n\t\t\tdecimal = true\n\t\t}\n\n\t\t// `e` is valid for floats but not as the first char\n\t\tif i > start && (buf[i] == 'e' || buf[i] == 'E') {\n\t\t\tscientific = true\n\t\t\ti++\n\t\t\tcontinue\n\t\t}\n\n\t\t// + and - are only valid at this point if they follow an e (scientific notation)\n\t\tif (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {\n\t\t\ti++\n\t\t\tcontinue\n\t\t}\n\n\t\t// NaN is an unsupported value\n\t\tif i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {\n\t\t\treturn i, ErrInvalidNumber\n\t\t}\n\n\t\tif !isNumeric(buf[i]) {\n\t\t\treturn i, ErrInvalidNumber\n\t\t}\n\t\ti++\n\t}\n\n\tif isInt && (decimal || scientific) {\n\t\treturn i, ErrInvalidNumber\n\t}\n\n\tnumericDigits := i - start\n\tif isInt {\n\t\tnumericDigits--\n\t}\n\tif decimal {\n\t\tnumericDigits--\n\t}\n\tif buf[start] == '-' {\n\t\tnumericDigits--\n\t}\n\n\tif numericDigits == 0 {\n\t\treturn i, ErrInvalidNumber\n\t}\n\n\t// It's more common that numbers will be within min/max range for their type but we need to prevent\n\t// out or range numbers from being parsed successfully.  This uses some simple heuristics to decide\n\t// if we should parse the number to the actual type.  It does not do it all the time because it incurs\n\t// extra allocations and we end up converting the type again when writing points to disk.\n\tif isInt {\n\t\t// Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)\n\t\tif buf[i-1] != 'i' {\n\t\t\treturn i, ErrInvalidNumber\n\t\t}\n\t\t// Parse the int to check bounds the number of digits could be larger than the max range\n\t\t// We subtract 1 from the index to remove the `i` from our tests\n\t\tif len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {\n\t\t\tif _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil {\n\t\t\t\treturn i, fmt.Errorf(\"unable to parse integer %s: %s\", buf[start:i-1], err)\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range\n\t\tif scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {\n\t\t\tif _, err := parseFloatBytes(buf[start:i], 10); err != nil {\n\t\t\t\treturn i, fmt.Errorf(\"invalid float\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn i, nil\n}\n\n// scanBoolean returns the end position within buf, start at i after\n// scanning over buf for boolean. Valid values for a boolean are\n// t, T, true, TRUE, f, F, false, FALSE.  It returns an error if a invalid boolean\n// is scanned.\nfunc scanBoolean(buf []byte, i int) (int, []byte, error) {\n\tstart := i\n\n\tif i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {\n\t\treturn i, buf[start:i], fmt.Errorf(\"invalid boolean\")\n\t}\n\n\ti++\n\tfor {\n\t\tif i >= len(buf) {\n\t\t\tbreak\n\t\t}\n\n\t\tif buf[i] == ',' || buf[i] == ' ' {\n\t\t\tbreak\n\t\t}\n\t\ti++\n\t}\n\n\t// Single char bool (t, T, f, F) is ok\n\tif i-start == 1 {\n\t\treturn i, buf[start:i], nil\n\t}\n\n\t// length must be 4 for true or TRUE\n\tif (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {\n\t\treturn i, buf[start:i], fmt.Errorf(\"invalid boolean\")\n\t}\n\n\t// length must be 5 for false or FALSE\n\tif (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {\n\t\treturn i, buf[start:i], fmt.Errorf(\"invalid boolean\")\n\t}\n\n\t// Otherwise\n\tvalid := false\n\tswitch buf[start] {\n\tcase 't':\n\t\tvalid = bytes.Equal(buf[start:i], []byte(\"true\"))\n\tcase 'f':\n\t\tvalid = bytes.Equal(buf[start:i], []byte(\"false\"))\n\tcase 'T':\n\t\tvalid = bytes.Equal(buf[start:i], []byte(\"TRUE\")) || bytes.Equal(buf[start:i], []byte(\"True\"))\n\tcase 'F':\n\t\tvalid = bytes.Equal(buf[start:i], []byte(\"FALSE\")) || bytes.Equal(buf[start:i], []byte(\"False\"))\n\t}\n\n\tif !valid {\n\t\treturn i, buf[start:i], fmt.Errorf(\"invalid boolean\")\n\t}\n\n\treturn i, buf[start:i], nil\n\n}\n\n// skipWhitespace returns the end position within buf, starting at i after\n// scanning over spaces in tags.\nfunc skipWhitespace(buf []byte, i int) int {\n\tfor i < len(buf) {\n\t\tif buf[i] != ' ' && buf[i] != '\\t' && buf[i] != 0 {\n\t\t\tbreak\n\t\t}\n\t\ti++\n\t}\n\treturn i\n}\n\n// scanLine returns the end position in buf and the next line found within\n// buf.\nfunc scanLine(buf []byte, i int) (int, []byte) {\n\tstart := i\n\tquoted := false\n\tfields := false\n\n\t// tracks how many '=' and commas we've seen\n\t// this duplicates some of the functionality in scanFields\n\tequals := 0\n\tcommas := 0\n\tfor {\n\t\t// reached the end of buf?\n\t\tif i >= len(buf) {\n\t\t\tbreak\n\t\t}\n\n\t\t// skip past escaped characters\n\t\tif buf[i] == '\\\\' {\n\t\t\ti += 2\n\t\t\tcontinue\n\t\t}\n\n\t\tif buf[i] == ' ' {\n\t\t\tfields = true\n\t\t}\n\n\t\t// If we see a double quote, makes sure it is not escaped\n\t\tif fields {\n\t\t\tif !quoted && buf[i] == '=' {\n\t\t\t\ti++\n\t\t\t\tequals++\n\t\t\t\tcontinue\n\t\t\t} else if !quoted && buf[i] == ',' {\n\t\t\t\ti++\n\t\t\t\tcommas++\n\t\t\t\tcontinue\n\t\t\t} else if buf[i] == '\"' && equals > commas {\n\t\t\t\ti++\n\t\t\t\tquoted = !quoted\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif buf[i] == '\\n' && !quoted {\n\t\t\tbreak\n\t\t}\n\n\t\ti++\n\t}\n\n\treturn i, buf[start:i]\n}\n\n// scanTo returns the end position in buf and the next consecutive block\n// of bytes, starting from i and ending with stop byte, where stop byte\n// has not been escaped.\n//\n// If there are leading spaces, they are skipped.\nfunc scanTo(buf []byte, i int, stop byte) (int, []byte) {\n\tstart := i\n\tfor {\n\t\t// reached the end of buf?\n\t\tif i >= len(buf) {\n\t\t\tbreak\n\t\t}\n\n\t\t// Reached unescaped stop value?\n\t\tif buf[i] == stop && (i == 0 || buf[i-1] != '\\\\') {\n\t\t\tbreak\n\t\t}\n\t\ti++\n\t}\n\n\treturn i, buf[start:i]\n}\n\n// scanTo returns the end position in buf and the next consecutive block\n// of bytes, starting from i and ending with stop byte.  If there are leading\n// spaces, they are skipped.\nfunc scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) {\n\tstart := i\n\tif buf[i] == stop || buf[i] == ' ' {\n\t\treturn i, buf[start:i]\n\t}\n\n\tfor {\n\t\ti++\n\t\tif buf[i-1] == '\\\\' {\n\t\t\tcontinue\n\t\t}\n\n\t\t// reached the end of buf?\n\t\tif i >= len(buf) {\n\t\t\treturn i, buf[start:i]\n\t\t}\n\n\t\t// reached end of block?\n\t\tif buf[i] == stop || buf[i] == ' ' {\n\t\t\treturn i, buf[start:i]\n\t\t}\n\t}\n}\n\nfunc scanTagValue(buf []byte, i int) (int, []byte) {\n\tstart := i\n\tfor {\n\t\tif i >= len(buf) {\n\t\t\tbreak\n\t\t}\n\n\t\tif buf[i] == ',' && buf[i-1] != '\\\\' {\n\t\t\tbreak\n\t\t}\n\t\ti++\n\t}\n\tif i > len(buf) {\n\t\treturn i, nil\n\t}\n\treturn i, buf[start:i]\n}\n\nfunc scanFieldValue(buf []byte, i int) (int, []byte) {\n\tstart := i\n\tquoted := false\n\tfor i < len(buf) {\n\t\t// Only escape char for a field value is a double-quote and backslash\n\t\tif buf[i] == '\\\\' && i+1 < len(buf) && (buf[i+1] == '\"' || buf[i+1] == '\\\\') {\n\t\t\ti += 2\n\t\t\tcontinue\n\t\t}\n\n\t\t// Quoted value? (e.g. string)\n\t\tif buf[i] == '\"' {\n\t\t\ti++\n\t\t\tquoted = !quoted\n\t\t\tcontinue\n\t\t}\n\n\t\tif buf[i] == ',' && !quoted {\n\t\t\tbreak\n\t\t}\n\t\ti++\n\t}\n\treturn i, buf[start:i]\n}\n\nfunc escapeMeasurement(in []byte) []byte {\n\tfor b, esc := range measurementEscapeCodes {\n\t\tin = bytes.Replace(in, []byte{b}, esc, -1)\n\t}\n\treturn in\n}\n\nfunc unescapeMeasurement(in []byte) []byte {\n\tfor b, esc := range measurementEscapeCodes {\n\t\tin = bytes.Replace(in, esc, []byte{b}, -1)\n\t}\n\treturn in\n}\n\nfunc escapeTag(in []byte) []byte {\n\tfor b, esc := range tagEscapeCodes {\n\t\tif bytes.IndexByte(in, b) != -1 {\n\t\t\tin = bytes.Replace(in, []byte{b}, esc, -1)\n\t\t}\n\t}\n\treturn in\n}\n\nfunc unescapeTag(in []byte) []byte {\n\tif bytes.IndexByte(in, '\\\\') == -1 {\n\t\treturn in\n\t}\n\n\tfor b, esc := range tagEscapeCodes {\n\t\tif bytes.IndexByte(in, b) != -1 {\n\t\t\tin = bytes.Replace(in, esc, []byte{b}, -1)\n\t\t}\n\t}\n\treturn in\n}\n\n// escapeStringFieldReplacer replaces double quotes and backslashes\n// with the same character preceded by a backslash.\n// As of Go 1.7 this benchmarked better in allocations and CPU time\n// compared to iterating through a string byte-by-byte and appending to a new byte slice,\n// calling strings.Replace twice, and better than (*Regex).ReplaceAllString.\nvar escapeStringFieldReplacer = strings.NewReplacer(`\"`, `\\\"`, `\\`, `\\\\`)\n\n// EscapeStringField returns a copy of in with any double quotes or\n// backslashes with escaped values.\nfunc EscapeStringField(in string) string {\n\treturn escapeStringFieldReplacer.Replace(in)\n}\n\n// unescapeStringField returns a copy of in with any escaped double-quotes\n// or backslashes unescaped.\nfunc unescapeStringField(in string) string {\n\tif strings.IndexByte(in, '\\\\') == -1 {\n\t\treturn in\n\t}\n\n\tvar out []byte\n\ti := 0\n\tfor {\n\t\tif i >= len(in) {\n\t\t\tbreak\n\t\t}\n\t\t// unescape backslashes\n\t\tif in[i] == '\\\\' && i+1 < len(in) && in[i+1] == '\\\\' {\n\t\t\tout = append(out, '\\\\')\n\t\t\ti += 2\n\t\t\tcontinue\n\t\t}\n\t\t// unescape double-quotes\n\t\tif in[i] == '\\\\' && i+1 < len(in) && in[i+1] == '\"' {\n\t\t\tout = append(out, '\"')\n\t\t\ti += 2\n\t\t\tcontinue\n\t\t}\n\t\tout = append(out, in[i])\n\t\ti++\n\n\t}\n\treturn string(out)\n}\n\n// NewPoint returns a new point with the given measurement name, tags, fields and timestamp.  If\n// an unsupported field value (NaN) or out of range time is passed, this function returns an error.\nfunc NewPoint(name string, tags Tags, fields Fields, t time.Time) (Point, error) {\n\tkey, err := pointKey(name, tags, fields, t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &point{\n\t\tkey:    key,\n\t\ttime:   t,\n\t\tfields: fields.MarshalBinary(),\n\t}, nil\n}\n\n// pointKey checks some basic requirements for valid points, and returns the\n// key, along with an possible error.\nfunc pointKey(measurement string, tags Tags, fields Fields, t time.Time) ([]byte, error) {\n\tif len(fields) == 0 {\n\t\treturn nil, ErrPointMustHaveAField\n\t}\n\n\tif !t.IsZero() {\n\t\tif err := CheckTime(t); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tfor key, value := range fields {\n\t\tswitch value := value.(type) {\n\t\tcase float64:\n\t\t\t// Ensure the caller validates and handles invalid field values\n\t\t\tif math.IsNaN(value) {\n\t\t\t\treturn nil, fmt.Errorf(\"NaN is an unsupported value for field %s\", key)\n\t\t\t}\n\t\tcase float32:\n\t\t\t// Ensure the caller validates and handles invalid field values\n\t\t\tif math.IsNaN(float64(value)) {\n\t\t\t\treturn nil, fmt.Errorf(\"NaN is an unsupported value for field %s\", key)\n\t\t\t}\n\t\t}\n\t\tif len(key) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"all fields must have non-empty names\")\n\t\t}\n\t}\n\n\tkey := MakeKey([]byte(measurement), tags)\n\tfor field := range fields {\n\t\tsz := seriesKeySize(key, []byte(field))\n\t\tif sz > MaxKeyLength {\n\t\t\treturn nil, fmt.Errorf(\"max key length exceeded: %v > %v\", sz, MaxKeyLength)\n\t\t}\n\t}\n\n\treturn key, nil\n}\n\nfunc seriesKeySize(key, field []byte) int {\n\t// 4 is the length of the tsm1.fieldKeySeparator constant.  It's inlined here to avoid a circular\n\t// dependency.\n\treturn len(key) + 4 + len(field)\n}\n\n// NewPointFromBytes returns a new Point from a marshalled Point.\nfunc NewPointFromBytes(b []byte) (Point, error) {\n\tp := &point{}\n\tif err := p.UnmarshalBinary(b); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// This does some basic validation to ensure there are fields and they\n\t// can be unmarshalled as well.\n\titer := p.FieldIterator()\n\tvar hasField bool\n\tfor iter.Next() {\n\t\tif len(iter.FieldKey()) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\thasField = true\n\t\tswitch iter.Type() {\n\t\tcase Float:\n\t\t\t_, err := iter.FloatValue()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"unable to unmarshal field %s: %s\", string(iter.FieldKey()), err)\n\t\t\t}\n\t\tcase Integer:\n\t\t\t_, err := iter.IntegerValue()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"unable to unmarshal field %s: %s\", string(iter.FieldKey()), err)\n\t\t\t}\n\t\tcase String:\n\t\t\t// Skip since this won't return an error\n\t\tcase Boolean:\n\t\t\t_, err := iter.BooleanValue()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"unable to unmarshal field %s: %s\", string(iter.FieldKey()), err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif !hasField {\n\t\treturn nil, ErrPointMustHaveAField\n\t}\n\n\treturn p, nil\n}\n\n// MustNewPoint returns a new point with the given measurement name, tags, fields and timestamp.  If\n// an unsupported field value (NaN) is passed, this function panics.\nfunc MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point {\n\tpt, err := NewPoint(name, tags, fields, time)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn pt\n}\n\n// Key returns the key (measurement joined with tags) of the point.\nfunc (p *point) Key() []byte {\n\treturn p.key\n}\n\nfunc (p *point) name() []byte {\n\t_, name := scanTo(p.key, 0, ',')\n\treturn name\n}\n\nfunc (p *point) Name() []byte {\n\treturn escape.Unescape(p.name())\n}\n\n// SetName updates the measurement name for the point.\nfunc (p *point) SetName(name string) {\n\tp.cachedName = \"\"\n\tp.key = MakeKey([]byte(name), p.Tags())\n}\n\n// Time return the timestamp for the point.\nfunc (p *point) Time() time.Time {\n\treturn p.time\n}\n\n// SetTime updates the timestamp for the point.\nfunc (p *point) SetTime(t time.Time) {\n\tp.time = t\n}\n\n// Round will round the timestamp of the point to the given duration.\nfunc (p *point) Round(d time.Duration) {\n\tp.time = p.time.Round(d)\n}\n\n// Tags returns the tag set for the point.\nfunc (p *point) Tags() Tags {\n\tif p.cachedTags != nil {\n\t\treturn p.cachedTags\n\t}\n\tp.cachedTags = parseTags(p.key)\n\treturn p.cachedTags\n}\n\nfunc (p *point) HasTag(tag []byte) bool {\n\tif len(p.key) == 0 {\n\t\treturn false\n\t}\n\n\tvar exists bool\n\twalkTags(p.key, func(key, value []byte) bool {\n\t\tif bytes.Equal(tag, key) {\n\t\t\texists = true\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t})\n\n\treturn exists\n}\n\nfunc walkTags(buf []byte, fn func(key, value []byte) bool) {\n\tif len(buf) == 0 {\n\t\treturn\n\t}\n\n\tpos, name := scanTo(buf, 0, ',')\n\n\t// it's an empty key, so there are no tags\n\tif len(name) == 0 {\n\t\treturn\n\t}\n\n\thasEscape := bytes.IndexByte(buf, '\\\\') != -1\n\ti := pos + 1\n\tvar key, value []byte\n\tfor {\n\t\tif i >= len(buf) {\n\t\t\tbreak\n\t\t}\n\t\ti, key = scanTo(buf, i, '=')\n\t\ti, value = scanTagValue(buf, i+1)\n\n\t\tif len(value) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif hasEscape {\n\t\t\tif !fn(unescapeTag(key), unescapeTag(value)) {\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tif !fn(key, value) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\ti++\n\t}\n}\n\n// walkFields walks each field key and value via fn.  If fn returns false, the iteration\n// is stopped.  The values are the raw byte slices and not the converted types.\nfunc walkFields(buf []byte, fn func(key, value []byte) bool) {\n\tvar i int\n\tvar key, val []byte\n\tfor len(buf) > 0 {\n\t\ti, key = scanTo(buf, 0, '=')\n\t\tbuf = buf[i+1:]\n\t\ti, val = scanFieldValue(buf, 0)\n\t\tbuf = buf[i:]\n\t\tif !fn(key, val) {\n\t\t\tbreak\n\t\t}\n\n\t\t// slice off comma\n\t\tif len(buf) > 0 {\n\t\t\tbuf = buf[1:]\n\t\t}\n\t}\n}\n\nfunc parseTags(buf []byte) Tags {\n\tif len(buf) == 0 {\n\t\treturn nil\n\t}\n\n\ttags := make(Tags, 0, bytes.Count(buf, []byte(\",\")))\n\twalkTags(buf, func(key, value []byte) bool {\n\t\ttags = append(tags, NewTag(key, value))\n\t\treturn true\n\t})\n\treturn tags\n}\n\n// MakeKey creates a key for a set of tags.\nfunc MakeKey(name []byte, tags Tags) []byte {\n\t// unescape the name and then re-escape it to avoid double escaping.\n\t// The key should always be stored in escaped form.\n\treturn append(escapeMeasurement(unescapeMeasurement(name)), tags.HashKey()...)\n}\n\n// SetTags replaces the tags for the point.\nfunc (p *point) SetTags(tags Tags) {\n\tp.key = MakeKey(p.Name(), tags)\n\tp.cachedTags = tags\n}\n\n// AddTag adds or replaces a tag value for a point.\nfunc (p *point) AddTag(key, value string) {\n\ttags := p.Tags()\n\ttags = append(tags, Tag{Key: []byte(key), Value: []byte(value)})\n\tsort.Sort(tags)\n\tp.cachedTags = tags\n\tp.key = MakeKey(p.Name(), tags)\n}\n\n// Fields returns the fields for the point.\nfunc (p *point) Fields() (Fields, error) {\n\tif p.cachedFields != nil {\n\t\treturn p.cachedFields, nil\n\t}\n\tcf, err := p.unmarshalBinary()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp.cachedFields = cf\n\treturn p.cachedFields, nil\n}\n\n// SetPrecision will round a time to the specified precision.\nfunc (p *point) SetPrecision(precision string) {\n\tswitch precision {\n\tcase \"n\":\n\tcase \"u\":\n\t\tp.SetTime(p.Time().Truncate(time.Microsecond))\n\tcase \"ms\":\n\t\tp.SetTime(p.Time().Truncate(time.Millisecond))\n\tcase \"s\":\n\t\tp.SetTime(p.Time().Truncate(time.Second))\n\tcase \"m\":\n\t\tp.SetTime(p.Time().Truncate(time.Minute))\n\tcase \"h\":\n\t\tp.SetTime(p.Time().Truncate(time.Hour))\n\t}\n}\n\n// String returns the string representation of the point.\nfunc (p *point) String() string {\n\tif p.Time().IsZero() {\n\t\treturn string(p.Key()) + \" \" + string(p.fields)\n\t}\n\treturn string(p.Key()) + \" \" + string(p.fields) + \" \" + strconv.FormatInt(p.UnixNano(), 10)\n}\n\n// AppendString appends the string representation of the point to buf.\nfunc (p *point) AppendString(buf []byte) []byte {\n\tbuf = append(buf, p.key...)\n\tbuf = append(buf, ' ')\n\tbuf = append(buf, p.fields...)\n\n\tif !p.time.IsZero() {\n\t\tbuf = append(buf, ' ')\n\t\tbuf = strconv.AppendInt(buf, p.UnixNano(), 10)\n\t}\n\n\treturn buf\n}\n\n// StringSize returns the length of the string that would be returned by String().\nfunc (p *point) StringSize() int {\n\tsize := len(p.key) + len(p.fields) + 1\n\n\tif !p.time.IsZero() {\n\t\tdigits := 1 // even \"0\" has one digit\n\t\tt := p.UnixNano()\n\t\tif t < 0 {\n\t\t\t// account for negative sign, then negate\n\t\t\tdigits++\n\t\t\tt = -t\n\t\t}\n\t\tfor t > 9 { // already accounted for one digit\n\t\t\tdigits++\n\t\t\tt /= 10\n\t\t}\n\t\tsize += digits + 1 // digits and a space\n\t}\n\n\treturn size\n}\n\n// MarshalBinary returns a binary representation of the point.\nfunc (p *point) MarshalBinary() ([]byte, error) {\n\tif len(p.fields) == 0 {\n\t\treturn nil, ErrPointMustHaveAField\n\t}\n\n\ttb, err := p.time.MarshalBinary()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb := make([]byte, 8+len(p.key)+len(p.fields)+len(tb))\n\ti := 0\n\n\tbinary.BigEndian.PutUint32(b[i:], uint32(len(p.key)))\n\ti += 4\n\n\ti += copy(b[i:], p.key)\n\n\tbinary.BigEndian.PutUint32(b[i:i+4], uint32(len(p.fields)))\n\ti += 4\n\n\ti += copy(b[i:], p.fields)\n\n\tcopy(b[i:], tb)\n\treturn b, nil\n}\n\n// UnmarshalBinary decodes a binary representation of the point into a point struct.\nfunc (p *point) UnmarshalBinary(b []byte) error {\n\tvar n int\n\n\t// Read key length.\n\tif len(b) < 4 {\n\t\treturn io.ErrShortBuffer\n\t}\n\tn, b = int(binary.BigEndian.Uint32(b[:4])), b[4:]\n\n\t// Read key.\n\tif len(b) < n {\n\t\treturn io.ErrShortBuffer\n\t}\n\tp.key, b = b[:n], b[n:]\n\n\t// Read fields length.\n\tif len(b) < 4 {\n\t\treturn io.ErrShortBuffer\n\t}\n\tn, b = int(binary.BigEndian.Uint32(b[:4])), b[4:]\n\n\t// Read fields.\n\tif len(b) < n {\n\t\treturn io.ErrShortBuffer\n\t}\n\tp.fields, b = b[:n], b[n:]\n\n\t// Read timestamp.\n\tif err := p.time.UnmarshalBinary(b); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// PrecisionString returns a string representation of the point. If there\n// is a timestamp associated with the point then it will be specified in the\n// given unit.\nfunc (p *point) PrecisionString(precision string) string {\n\tif p.Time().IsZero() {\n\t\treturn fmt.Sprintf(\"%s %s\", p.Key(), string(p.fields))\n\t}\n\treturn fmt.Sprintf(\"%s %s %d\", p.Key(), string(p.fields),\n\t\tp.UnixNano()/GetPrecisionMultiplier(precision))\n}\n\n// RoundedString returns a string representation of the point. If there\n// is a timestamp associated with the point, then it will be rounded to the\n// given duration.\nfunc (p *point) RoundedString(d time.Duration) string {\n\tif p.Time().IsZero() {\n\t\treturn fmt.Sprintf(\"%s %s\", p.Key(), string(p.fields))\n\t}\n\treturn fmt.Sprintf(\"%s %s %d\", p.Key(), string(p.fields),\n\t\tp.time.Round(d).UnixNano())\n}\n\nfunc (p *point) unmarshalBinary() (Fields, error) {\n\titer := p.FieldIterator()\n\tfields := make(Fields, 8)\n\tfor iter.Next() {\n\t\tif len(iter.FieldKey()) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tswitch iter.Type() {\n\t\tcase Float:\n\t\t\tv, err := iter.FloatValue()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"unable to unmarshal field %s: %s\", string(iter.FieldKey()), err)\n\t\t\t}\n\t\t\tfields[string(iter.FieldKey())] = v\n\t\tcase Integer:\n\t\t\tv, err := iter.IntegerValue()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"unable to unmarshal field %s: %s\", string(iter.FieldKey()), err)\n\t\t\t}\n\t\t\tfields[string(iter.FieldKey())] = v\n\t\tcase String:\n\t\t\tfields[string(iter.FieldKey())] = iter.StringValue()\n\t\tcase Boolean:\n\t\t\tv, err := iter.BooleanValue()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"unable to unmarshal field %s: %s\", string(iter.FieldKey()), err)\n\t\t\t}\n\t\t\tfields[string(iter.FieldKey())] = v\n\t\t}\n\t}\n\treturn fields, nil\n}\n\n// HashID returns a non-cryptographic checksum of the point's key.\nfunc (p *point) HashID() uint64 {\n\th := NewInlineFNV64a()\n\th.Write(p.key)\n\tsum := h.Sum64()\n\treturn sum\n}\n\n// UnixNano returns the timestamp of the point as nanoseconds since Unix epoch.\nfunc (p *point) UnixNano() int64 {\n\treturn p.Time().UnixNano()\n}\n\n// Split will attempt to return multiple points with the same timestamp whose\n// string representations are no longer than size. Points with a single field or\n// a point without a timestamp may exceed the requested size.\nfunc (p *point) Split(size int) []Point {\n\tif p.time.IsZero() || len(p.String()) <= size {\n\t\treturn []Point{p}\n\t}\n\n\t// key string, timestamp string, spaces\n\tsize -= len(p.key) + len(strconv.FormatInt(p.time.UnixNano(), 10)) + 2\n\n\tvar points []Point\n\tvar start, cur int\n\n\tfor cur < len(p.fields) {\n\t\tend, _ := scanTo(p.fields, cur, '=')\n\t\tend, _ = scanFieldValue(p.fields, end+1)\n\n\t\tif cur > start && end-start > size {\n\t\t\tpoints = append(points, &point{\n\t\t\t\tkey:    p.key,\n\t\t\t\ttime:   p.time,\n\t\t\t\tfields: p.fields[start : cur-1],\n\t\t\t})\n\t\t\tstart = cur\n\t\t}\n\n\t\tcur = end + 1\n\t}\n\n\tpoints = append(points, &point{\n\t\tkey:    p.key,\n\t\ttime:   p.time,\n\t\tfields: p.fields[start:],\n\t})\n\n\treturn points\n}\n\n// Tag represents a single key/value tag pair.\ntype Tag struct {\n\tKey   []byte\n\tValue []byte\n}\n\n// NewTag returns a new Tag.\nfunc NewTag(key, value []byte) Tag {\n\treturn Tag{\n\t\tKey:   key,\n\t\tValue: value,\n\t}\n}\n\n// Size returns the size of the key and value.\nfunc (t Tag) Size() int { return len(t.Key) + len(t.Value) }\n\n// Clone returns a shallow copy of Tag.\n//\n// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed.\n// Use Clone to create a Tag with new byte slices that do not refer to the argument to ParsePointsWithPrecision.\nfunc (t Tag) Clone() Tag {\n\tother := Tag{\n\t\tKey:   make([]byte, len(t.Key)),\n\t\tValue: make([]byte, len(t.Value)),\n\t}\n\n\tcopy(other.Key, t.Key)\n\tcopy(other.Value, t.Value)\n\n\treturn other\n}\n\n// String returns the string reprsentation of the tag.\nfunc (t *Tag) String() string {\n\tvar buf bytes.Buffer\n\tbuf.WriteByte('{')\n\tbuf.WriteString(string(t.Key))\n\tbuf.WriteByte(' ')\n\tbuf.WriteString(string(t.Value))\n\tbuf.WriteByte('}')\n\treturn buf.String()\n}\n\n// Tags represents a sorted list of tags.\ntype Tags []Tag\n\n// NewTags returns a new Tags from a map.\nfunc NewTags(m map[string]string) Tags {\n\tif len(m) == 0 {\n\t\treturn nil\n\t}\n\ta := make(Tags, 0, len(m))\n\tfor k, v := range m {\n\t\ta = append(a, NewTag([]byte(k), []byte(v)))\n\t}\n\tsort.Sort(a)\n\treturn a\n}\n\n// String returns the string representation of the tags.\nfunc (a Tags) String() string {\n\tvar buf bytes.Buffer\n\tbuf.WriteByte('[')\n\tfor i := range a {\n\t\tbuf.WriteString(a[i].String())\n\t\tif i < len(a)-1 {\n\t\t\tbuf.WriteByte(' ')\n\t\t}\n\t}\n\tbuf.WriteByte(']')\n\treturn buf.String()\n}\n\n// Size returns the number of bytes needed to store all tags. Note, this is\n// the number of bytes needed to store all keys and values and does not account\n// for data structures or delimiters for example.\nfunc (a Tags) Size() int {\n\tvar total int\n\tfor _, t := range a {\n\t\ttotal += t.Size()\n\t}\n\treturn total\n}\n\n// Clone returns a copy of the slice where the elements are a result of calling `Clone` on the original elements\n//\n// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed.\n// Use Clone to create Tags with new byte slices that do not refer to the argument to ParsePointsWithPrecision.\nfunc (a Tags) Clone() Tags {\n\tif len(a) == 0 {\n\t\treturn nil\n\t}\n\n\tothers := make(Tags, len(a))\n\tfor i := range a {\n\t\tothers[i] = a[i].Clone()\n\t}\n\n\treturn others\n}\n\nfunc (a Tags) Len() int           { return len(a) }\nfunc (a Tags) Less(i, j int) bool { return bytes.Compare(a[i].Key, a[j].Key) == -1 }\nfunc (a Tags) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\n\n// Equal returns true if a equals other.\nfunc (a Tags) Equal(other Tags) bool {\n\tif len(a) != len(other) {\n\t\treturn false\n\t}\n\tfor i := range a {\n\t\tif !bytes.Equal(a[i].Key, other[i].Key) || !bytes.Equal(a[i].Value, other[i].Value) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// CompareTags returns -1 if a < b, 1 if a > b, and 0 if a == b.\nfunc CompareTags(a, b Tags) int {\n\t// Compare each key & value until a mismatch.\n\tfor i := 0; i < len(a) && i < len(b); i++ {\n\t\tif cmp := bytes.Compare(a[i].Key, b[i].Key); cmp != 0 {\n\t\t\treturn cmp\n\t\t}\n\t\tif cmp := bytes.Compare(a[i].Value, b[i].Value); cmp != 0 {\n\t\t\treturn cmp\n\t\t}\n\t}\n\n\t// If all tags are equal up to this point then return shorter tagset.\n\tif len(a) < len(b) {\n\t\treturn -1\n\t} else if len(a) > len(b) {\n\t\treturn 1\n\t}\n\n\t// All tags are equal.\n\treturn 0\n}\n\n// Get returns the value for a key.\nfunc (a Tags) Get(key []byte) []byte {\n\t// OPTIMIZE: Use sort.Search if tagset is large.\n\n\tfor _, t := range a {\n\t\tif bytes.Equal(t.Key, key) {\n\t\t\treturn t.Value\n\t\t}\n\t}\n\treturn nil\n}\n\n// GetString returns the string value for a string key.\nfunc (a Tags) GetString(key string) string {\n\treturn string(a.Get([]byte(key)))\n}\n\n// Set sets the value for a key.\nfunc (a *Tags) Set(key, value []byte) {\n\tfor i, t := range *a {\n\t\tif bytes.Equal(t.Key, key) {\n\t\t\t(*a)[i].Value = value\n\t\t\treturn\n\t\t}\n\t}\n\t*a = append(*a, Tag{Key: key, Value: value})\n\tsort.Sort(*a)\n}\n\n// SetString sets the string value for a string key.\nfunc (a *Tags) SetString(key, value string) {\n\ta.Set([]byte(key), []byte(value))\n}\n\n// Delete removes a tag by key.\nfunc (a *Tags) Delete(key []byte) {\n\tfor i, t := range *a {\n\t\tif bytes.Equal(t.Key, key) {\n\t\t\tcopy((*a)[i:], (*a)[i+1:])\n\t\t\t(*a)[len(*a)-1] = Tag{}\n\t\t\t*a = (*a)[:len(*a)-1]\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// Map returns a map representation of the tags.\nfunc (a Tags) Map() map[string]string {\n\tm := make(map[string]string, len(a))\n\tfor _, t := range a {\n\t\tm[string(t.Key)] = string(t.Value)\n\t}\n\treturn m\n}\n\n// Merge merges the tags combining the two. If both define a tag with the\n// same key, the merged value overwrites the old value.\n// A new map is returned.\nfunc (a Tags) Merge(other map[string]string) Tags {\n\tmerged := make(map[string]string, len(a)+len(other))\n\tfor _, t := range a {\n\t\tmerged[string(t.Key)] = string(t.Value)\n\t}\n\tfor k, v := range other {\n\t\tmerged[k] = v\n\t}\n\treturn NewTags(merged)\n}\n\n// HashKey hashes all of a tag's keys.\nfunc (a Tags) HashKey() []byte {\n\t// Empty maps marshal to empty bytes.\n\tif len(a) == 0 {\n\t\treturn nil\n\t}\n\n\t// Type invariant: Tags are sorted\n\n\tescaped := make(Tags, 0, len(a))\n\tsz := 0\n\tfor _, t := range a {\n\t\tek := escapeTag(t.Key)\n\t\tev := escapeTag(t.Value)\n\n\t\tif len(ev) > 0 {\n\t\t\tescaped = append(escaped, Tag{Key: ek, Value: ev})\n\t\t\tsz += len(ek) + len(ev)\n\t\t}\n\t}\n\n\tsz += len(escaped) + (len(escaped) * 2) // separators\n\n\t// Generate marshaled bytes.\n\tb := make([]byte, sz)\n\tbuf := b\n\tidx := 0\n\tfor _, k := range escaped {\n\t\tbuf[idx] = ','\n\t\tidx++\n\t\tcopy(buf[idx:idx+len(k.Key)], k.Key)\n\t\tidx += len(k.Key)\n\t\tbuf[idx] = '='\n\t\tidx++\n\t\tcopy(buf[idx:idx+len(k.Value)], k.Value)\n\t\tidx += len(k.Value)\n\t}\n\treturn b[:idx]\n}\n\n// CopyTags returns a shallow copy of tags.\nfunc CopyTags(a Tags) Tags {\n\tother := make(Tags, len(a))\n\tcopy(other, a)\n\treturn other\n}\n\n// DeepCopyTags returns a deep copy of tags.\nfunc DeepCopyTags(a Tags) Tags {\n\t// Calculate size of keys/values in bytes.\n\tvar n int\n\tfor _, t := range a {\n\t\tn += len(t.Key) + len(t.Value)\n\t}\n\n\t// Build single allocation for all key/values.\n\tbuf := make([]byte, n)\n\n\t// Copy tags to new set.\n\tother := make(Tags, len(a))\n\tfor i, t := range a {\n\t\tcopy(buf, t.Key)\n\t\tother[i].Key, buf = buf[:len(t.Key)], buf[len(t.Key):]\n\n\t\tcopy(buf, t.Value)\n\t\tother[i].Value, buf = buf[:len(t.Value)], buf[len(t.Value):]\n\t}\n\n\treturn other\n}\n\n// Fields represents a mapping between a Point's field names and their\n// values.\ntype Fields map[string]interface{}\n\n// FieldIterator retuns a FieldIterator that can be used to traverse the\n// fields of a point without constructing the in-memory map.\nfunc (p *point) FieldIterator() FieldIterator {\n\tp.Reset()\n\treturn p\n}\n\ntype fieldIterator struct {\n\tstart, end  int\n\tkey, keybuf []byte\n\tvalueBuf    []byte\n\tfieldType   FieldType\n}\n\n// Next indicates whether there any fields remaining.\nfunc (p *point) Next() bool {\n\tp.it.start = p.it.end\n\tif p.it.start >= len(p.fields) {\n\t\treturn false\n\t}\n\n\tp.it.end, p.it.key = scanTo(p.fields, p.it.start, '=')\n\tif escape.IsEscaped(p.it.key) {\n\t\tp.it.keybuf = escape.AppendUnescaped(p.it.keybuf[:0], p.it.key)\n\t\tp.it.key = p.it.keybuf\n\t}\n\n\tp.it.end, p.it.valueBuf = scanFieldValue(p.fields, p.it.end+1)\n\tp.it.end++\n\n\tif len(p.it.valueBuf) == 0 {\n\t\tp.it.fieldType = Empty\n\t\treturn true\n\t}\n\n\tc := p.it.valueBuf[0]\n\n\tif c == '\"' {\n\t\tp.it.fieldType = String\n\t\treturn true\n\t}\n\n\tif strings.IndexByte(`0123456789-.nNiI`, c) >= 0 {\n\t\tif p.it.valueBuf[len(p.it.valueBuf)-1] == 'i' {\n\t\t\tp.it.fieldType = Integer\n\t\t\tp.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1]\n\t\t} else {\n\t\t\tp.it.fieldType = Float\n\t\t}\n\t\treturn true\n\t}\n\n\t// to keep the same behavior that currently exists, default to boolean\n\tp.it.fieldType = Boolean\n\treturn true\n}\n\n// FieldKey returns the key of the current field.\nfunc (p *point) FieldKey() []byte {\n\treturn p.it.key\n}\n\n// Type returns the FieldType of the current field.\nfunc (p *point) Type() FieldType {\n\treturn p.it.fieldType\n}\n\n// StringValue returns the string value of the current field.\nfunc (p *point) StringValue() string {\n\treturn unescapeStringField(string(p.it.valueBuf[1 : len(p.it.valueBuf)-1]))\n}\n\n// IntegerValue returns the integer value of the current field.\nfunc (p *point) IntegerValue() (int64, error) {\n\tn, err := parseIntBytes(p.it.valueBuf, 10, 64)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to parse integer value %q: %v\", p.it.valueBuf, err)\n\t}\n\treturn n, nil\n}\n\n// BooleanValue returns the boolean value of the current field.\nfunc (p *point) BooleanValue() (bool, error) {\n\tb, err := parseBoolBytes(p.it.valueBuf)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"unable to parse bool value %q: %v\", p.it.valueBuf, err)\n\t}\n\treturn b, nil\n}\n\n// FloatValue returns the float value of the current field.\nfunc (p *point) FloatValue() (float64, error) {\n\tf, err := parseFloatBytes(p.it.valueBuf, 64)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"unable to parse floating point value %q: %v\", p.it.valueBuf, err)\n\t}\n\treturn f, nil\n}\n\n// Reset resets the iterator to its initial state.\nfunc (p *point) Reset() {\n\tp.it.fieldType = Empty\n\tp.it.key = nil\n\tp.it.valueBuf = nil\n\tp.it.start = 0\n\tp.it.end = 0\n}\n\n// MarshalBinary encodes all the fields to their proper type and returns the binary\n// represenation\n// NOTE: uint64 is specifically not supported due to potential overflow when we decode\n// again later to an int64\n// NOTE2: uint is accepted, and may be 64 bits, and is for some reason accepted...\nfunc (p Fields) MarshalBinary() []byte {\n\tvar b []byte\n\tkeys := make([]string, 0, len(p))\n\n\tfor k := range p {\n\t\tkeys = append(keys, k)\n\t}\n\n\t// Not really necessary, can probably be removed.\n\tsort.Strings(keys)\n\n\tfor i, k := range keys {\n\t\tif i > 0 {\n\t\t\tb = append(b, ',')\n\t\t}\n\t\tb = appendField(b, k, p[k])\n\t}\n\n\treturn b\n}\n\nfunc appendField(b []byte, k string, v interface{}) []byte {\n\tb = append(b, []byte(escape.String(k))...)\n\tb = append(b, '=')\n\n\t// check popular types first\n\tswitch v := v.(type) {\n\tcase float64:\n\t\tb = strconv.AppendFloat(b, v, 'f', -1, 64)\n\tcase int64:\n\t\tb = strconv.AppendInt(b, v, 10)\n\t\tb = append(b, 'i')\n\tcase string:\n\t\tb = append(b, '\"')\n\t\tb = append(b, []byte(EscapeStringField(v))...)\n\t\tb = append(b, '\"')\n\tcase bool:\n\t\tb = strconv.AppendBool(b, v)\n\tcase int32:\n\t\tb = strconv.AppendInt(b, int64(v), 10)\n\t\tb = append(b, 'i')\n\tcase int16:\n\t\tb = strconv.AppendInt(b, int64(v), 10)\n\t\tb = append(b, 'i')\n\tcase int8:\n\t\tb = strconv.AppendInt(b, int64(v), 10)\n\t\tb = append(b, 'i')\n\tcase int:\n\t\tb = strconv.AppendInt(b, int64(v), 10)\n\t\tb = append(b, 'i')\n\tcase uint32:\n\t\tb = strconv.AppendInt(b, int64(v), 10)\n\t\tb = append(b, 'i')\n\tcase uint16:\n\t\tb = strconv.AppendInt(b, int64(v), 10)\n\t\tb = append(b, 'i')\n\tcase uint8:\n\t\tb = strconv.AppendInt(b, int64(v), 10)\n\t\tb = append(b, 'i')\n\t// TODO: 'uint' should be considered just as \"dangerous\" as a uint64,\n\t// perhaps the value should be checked and capped at MaxInt64? We could\n\t// then include uint64 as an accepted value\n\tcase uint:\n\t\tb = strconv.AppendInt(b, int64(v), 10)\n\t\tb = append(b, 'i')\n\tcase float32:\n\t\tb = strconv.AppendFloat(b, float64(v), 'f', -1, 32)\n\tcase []byte:\n\t\tb = append(b, v...)\n\tcase nil:\n\t\t// skip\n\tdefault:\n\t\t// Can't determine the type, so convert to string\n\t\tb = append(b, '\"')\n\t\tb = append(b, []byte(EscapeStringField(fmt.Sprintf(\"%v\", v)))...)\n\t\tb = append(b, '\"')\n\n\t}\n\n\treturn b\n}\n\ntype byteSlices [][]byte\n\nfunc (a byteSlices) Len() int           { return len(a) }\nfunc (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 }\nfunc (a byteSlices) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/models/points_internal_test.go",
    "content": "package models\n\nimport \"testing\"\n\nfunc TestMarshalPointNoFields(t *testing.T) {\n\tpoints, err := ParsePointsString(\"m,k=v f=0i\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// It's unclear how this can ever happen, but we've observed points that were marshalled without any fields.\n\tpoints[0].(*point).fields = []byte{}\n\n\tif _, err := points[0].MarshalBinary(); err != ErrPointMustHaveAField {\n\t\tt.Fatalf(\"got error %v, exp %v\", err, ErrPointMustHaveAField)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/models/points_test.go",
    "content": "package models_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"math/rand\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n)\n\nvar (\n\ttags   = models.NewTags(map[string]string{\"foo\": \"bar\", \"apple\": \"orange\", \"host\": \"serverA\", \"region\": \"uswest\"})\n\tfields = models.Fields{\n\t\t\"int64\":         int64(math.MaxInt64),\n\t\t\"uint32\":        uint32(math.MaxUint32),\n\t\t\"string\":        \"String field that has a decent length, probably some log message or something\",\n\t\t\"boolean\":       false,\n\t\t\"float64-tiny\":  float64(math.SmallestNonzeroFloat64),\n\t\t\"float64-large\": float64(math.MaxFloat64),\n\t}\n\tmaxFloat64 = strconv.FormatFloat(math.MaxFloat64, 'f', 1, 64)\n\tminFloat64 = strconv.FormatFloat(-math.MaxFloat64, 'f', 1, 64)\n\n\tsink interface{}\n)\n\nfunc TestMarshal(t *testing.T) {\n\tgot := tags.HashKey()\n\tif exp := \",apple=orange,foo=bar,host=serverA,region=uswest\"; string(got) != exp {\n\t\tt.Log(\"got: \", string(got))\n\t\tt.Log(\"exp: \", exp)\n\t\tt.Error(\"invalid match\")\n\t}\n}\n\nfunc TestTags_HashKey(t *testing.T) {\n\ttags = models.NewTags(map[string]string{\"A FOO\": \"bar\", \"APPLE\": \"orange\", \"host\": \"serverA\", \"region\": \"uswest\"})\n\tgot := tags.HashKey()\n\tif exp := \",A\\\\ FOO=bar,APPLE=orange,host=serverA,region=uswest\"; string(got) != exp {\n\t\tt.Log(\"got: \", string(got))\n\t\tt.Log(\"exp: \", exp)\n\t\tt.Error(\"invalid match\")\n\t}\n}\n\nfunc BenchmarkMarshal(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\ttags.HashKey()\n\t}\n}\n\nfunc TestPoint_StringSize(t *testing.T) {\n\ttestPoint_cube(t, func(p models.Point) {\n\t\tl := p.StringSize()\n\t\ts := p.String()\n\n\t\tif l != len(s) {\n\t\t\tt.Errorf(\"Incorrect length for %q. got %v, exp %v\", s, l, len(s))\n\t\t}\n\t})\n\n}\n\nfunc TestPoint_AppendString(t *testing.T) {\n\ttestPoint_cube(t, func(p models.Point) {\n\t\tgot := p.AppendString(nil)\n\t\texp := []byte(p.String())\n\n\t\tif !reflect.DeepEqual(exp, got) {\n\t\t\tt.Errorf(\"AppendString() didn't match String(): got %v, exp %v\", got, exp)\n\t\t}\n\t})\n}\n\nfunc testPoint_cube(t *testing.T, f func(p models.Point)) {\n\t// heard of a table-driven test? let's make a cube-driven test...\n\ttagList := []models.Tags{nil, {models.NewTag([]byte(\"foo\"), []byte(\"bar\"))}, tags}\n\tfieldList := []models.Fields{{\"a\": 42.0}, {\"a\": 42, \"b\": \"things\"}, fields}\n\ttimeList := []time.Time{time.Time{}, time.Unix(0, 0), time.Unix(-34526, 0), time.Unix(231845, 0), time.Now()}\n\n\tfor _, tagSet := range tagList {\n\t\tfor _, fieldSet := range fieldList {\n\t\t\tfor _, pointTime := range timeList {\n\t\t\t\tp, err := models.NewPoint(\"test\", tagSet, fieldSet, pointTime)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"unexpected error creating point: %v\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tf(p)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestTag_Clone(t *testing.T) {\n\ttag := models.NewTag([]byte(\"key\"), []byte(\"value\"))\n\n\tc := tag.Clone()\n\n\tif &c.Key == &tag.Key || !bytes.Equal(c.Key, tag.Key) {\n\t\tt.Fatalf(\"key %s should have been a clone of %s\", c.Key, tag.Key)\n\t}\n\n\tif &c.Value == &tag.Value || !bytes.Equal(c.Value, tag.Value) {\n\t\tt.Fatalf(\"value %s should have been a clone of %s\", c.Value, tag.Value)\n\t}\n}\n\nfunc TestTags_Clone(t *testing.T) {\n\ttags := models.NewTags(map[string]string{\"k1\": \"v1\", \"k2\": \"v2\", \"k3\": \"v3\"})\n\n\tclone := tags.Clone()\n\n\tfor i := range tags {\n\t\ttag := tags[i]\n\t\tc := clone[i]\n\t\tif &c.Key == &tag.Key || !bytes.Equal(c.Key, tag.Key) {\n\t\t\tt.Fatalf(\"key %s should have been a clone of %s\", c.Key, tag.Key)\n\t\t}\n\n\t\tif &c.Value == &tag.Value || !bytes.Equal(c.Value, tag.Value) {\n\t\t\tt.Fatalf(\"value %s should have been a clone of %s\", c.Value, tag.Value)\n\t\t}\n\t}\n}\n\nvar p models.Point\n\nfunc BenchmarkNewPoint(b *testing.B) {\n\tts := time.Now()\n\tfor i := 0; i < b.N; i++ {\n\t\tp, _ = models.NewPoint(\"measurement\", tags, fields, ts)\n\t}\n}\n\nfunc BenchmarkNewPointFromBinary(b *testing.B) {\n\tpts, err := models.ParsePointsString(\"cpu value1=1.0,value2=1.0,value3=3.0,value4=4,value5=\\\"five\\\" 1000000000\")\n\tif err != nil {\n\t\tb.Fatalf(\"unexpected error ParsePointsString: %v\", err)\n\t}\n\n\tbytes, err := pts[0].MarshalBinary()\n\tif err != nil {\n\t\tb.Fatalf(\"unexpected error MarshalBinary: %v\", err)\n\t}\n\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err := models.NewPointFromBytes(bytes)\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"unexpected error NewPointsFromBytes: %v\", err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkParsePointNoTags5000(b *testing.B) {\n\tvar batch [5000]string\n\tfor i := 0; i < len(batch); i++ {\n\t\tbatch[i] = `cpu value=1i 1000000000`\n\t}\n\tlines := strings.Join(batch[:], \"\\n\")\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tmodels.ParsePoints([]byte(lines))\n\t\tb.SetBytes(int64(len(lines)))\n\t}\n}\n\nfunc BenchmarkParsePointNoTags(b *testing.B) {\n\tline := `cpu value=1i 1000000000`\n\tfor i := 0; i < b.N; i++ {\n\t\tmodels.ParsePoints([]byte(line))\n\t\tb.SetBytes(int64(len(line)))\n\t}\n}\n\nfunc BenchmarkParsePointWithPrecisionN(b *testing.B) {\n\tline := `cpu value=1i 1000000000`\n\tdefaultTime := time.Now().UTC()\n\tfor i := 0; i < b.N; i++ {\n\t\tmodels.ParsePointsWithPrecision([]byte(line), defaultTime, \"n\")\n\t\tb.SetBytes(int64(len(line)))\n\t}\n}\n\nfunc BenchmarkParsePointWithPrecisionU(b *testing.B) {\n\tline := `cpu value=1i 1000000000`\n\tdefaultTime := time.Now().UTC()\n\tfor i := 0; i < b.N; i++ {\n\t\tmodels.ParsePointsWithPrecision([]byte(line), defaultTime, \"u\")\n\t\tb.SetBytes(int64(len(line)))\n\t}\n}\n\nfunc BenchmarkParsePointsTagsSorted2(b *testing.B) {\n\tline := `cpu,host=serverA,region=us-west value=1i 1000000000`\n\tfor i := 0; i < b.N; i++ {\n\t\tmodels.ParsePoints([]byte(line))\n\t\tb.SetBytes(int64(len(line)))\n\t}\n}\n\nfunc BenchmarkParsePointsTagsSorted5(b *testing.B) {\n\tline := `cpu,env=prod,host=serverA,region=us-west,target=servers,zone=1c value=1i 1000000000`\n\tfor i := 0; i < b.N; i++ {\n\t\tmodels.ParsePoints([]byte(line))\n\t\tb.SetBytes(int64(len(line)))\n\t}\n}\n\nfunc BenchmarkParsePointsTagsSorted10(b *testing.B) {\n\tline := `cpu,env=prod,host=serverA,region=us-west,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5,target=servers,zone=1c value=1i 1000000000`\n\tfor i := 0; i < b.N; i++ {\n\t\tmodels.ParsePoints([]byte(line))\n\t\tb.SetBytes(int64(len(line)))\n\t}\n}\n\nfunc BenchmarkParsePointsTagsUnSorted2(b *testing.B) {\n\tline := `cpu,region=us-west,host=serverA value=1i 1000000000`\n\tfor i := 0; i < b.N; i++ {\n\t\tpt, _ := models.ParsePoints([]byte(line))\n\t\tb.SetBytes(int64(len(line)))\n\t\tpt[0].Key()\n\t}\n}\n\nfunc BenchmarkParsePointsTagsUnSorted5(b *testing.B) {\n\tline := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c value=1i 1000000000`\n\tfor i := 0; i < b.N; i++ {\n\t\tpt, _ := models.ParsePoints([]byte(line))\n\t\tb.SetBytes(int64(len(line)))\n\t\tpt[0].Key()\n\t}\n}\n\nfunc BenchmarkParsePointsTagsUnSorted10(b *testing.B) {\n\tline := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5 value=1i 1000000000`\n\tfor i := 0; i < b.N; i++ {\n\t\tpt, _ := models.ParsePoints([]byte(line))\n\t\tb.SetBytes(int64(len(line)))\n\t\tpt[0].Key()\n\t}\n}\n\nfunc BenchmarkParseKey(b *testing.B) {\n\tline := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5`\n\tfor i := 0; i < b.N; i++ {\n\t\tmodels.ParseKey([]byte(line))\n\t}\n}\n\n// TestPoint wraps a models.Point but also makes available the raw\n// arguments to the Point.\n//\n// This is useful for ensuring that comparisons between results of\n// operations on Points match the expected input data to the Point,\n// since models.Point does not expose the raw input data (e.g., tags)\n// via its API.\ntype TestPoint struct {\n\tRawFields models.Fields\n\tRawTags   models.Tags\n\tRawTime   time.Time\n\tmodels.Point\n}\n\n// NewTestPoint returns a new TestPoint.\n//\n// NewTestPoint panics if it is not a valid models.Point.\nfunc NewTestPoint(name string, tags models.Tags, fields models.Fields, time time.Time) TestPoint {\n\treturn TestPoint{\n\t\tRawTags:   tags,\n\t\tRawFields: fields,\n\t\tRawTime:   time,\n\t\tPoint:     models.MustNewPoint(name, tags, fields, time),\n\t}\n}\n\nfunc test(t *testing.T, line string, point TestPoint) {\n\tpts, err := models.ParsePointsWithPrecision([]byte(line), time.Unix(0, 0), \"n\")\n\tif err != nil {\n\t\tt.Fatalf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, line, err)\n\t}\n\n\tif exp := 1; len(pts) != exp {\n\t\tt.Fatalf(`ParsePoints(\"%s\") len mismatch. got %d, exp %d`, line, len(pts), exp)\n\t}\n\n\tif exp := point.Key(); !bytes.Equal(pts[0].Key(), exp) {\n\t\tt.Errorf(\"ParsePoints(\\\"%s\\\") key mismatch.\\ngot %v\\nexp %v\", line, string(pts[0].Key()), string(exp))\n\t}\n\n\tif exp := len(point.Tags()); len(pts[0].Tags()) != exp {\n\t\tt.Errorf(`ParsePoints(\"%s\") tags mismatch. got %v, exp %v`, line, pts[0].Tags(), exp)\n\t}\n\n\tfor _, tag := range pts[0].Tags() {\n\t\tif !bytes.Equal(tag.Value, point.RawTags.Get(tag.Key)) {\n\t\t\tt.Errorf(`ParsePoints(\"%s\") tags mismatch. got %s, exp %s`, line, tag.Value, point.RawTags.Get(tag.Key))\n\t\t}\n\t}\n\n\tfor name, value := range point.RawFields {\n\t\tfields, err := pts[0].Fields()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tval := fields[name]\n\t\texpfval, ok := val.(float64)\n\n\t\tif ok && math.IsNaN(expfval) {\n\t\t\tgotfval, ok := value.(float64)\n\t\t\tif ok && !math.IsNaN(gotfval) {\n\t\t\t\tt.Errorf(`ParsePoints(\"%s\") field '%s' mismatch. exp NaN`, line, name)\n\t\t\t}\n\t\t}\n\t\tif !reflect.DeepEqual(val, value) {\n\t\t\tt.Errorf(`ParsePoints(\"%s\") field '%s' mismatch. got %[3]v (%[3]T), exp %[4]v (%[4]T)`, line, name, val, value)\n\t\t}\n\t}\n\n\tif !pts[0].Time().Equal(point.Time()) {\n\t\tt.Errorf(`ParsePoints(\"%s\") time mismatch. got %v, exp %v`, line, pts[0].Time(), point.Time())\n\t}\n\n\tif !strings.HasPrefix(pts[0].String(), line) {\n\t\tt.Errorf(\"ParsePoints string mismatch.\\ngot: %v\\nexp: %v\", pts[0].String(), line)\n\t}\n}\n\nfunc TestParsePointNoValue(t *testing.T) {\n\tpts, err := models.ParsePointsString(\"\")\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, \"\", err)\n\t}\n\n\tif exp := 0; len(pts) != exp {\n\t\tt.Errorf(`ParsePoints(\"%s\") len mismatch. got %v, exp %v`, \"\", len(pts), exp)\n\t}\n}\n\nfunc TestParsePointWhitespaceValue(t *testing.T) {\n\tpts, err := models.ParsePointsString(\" \")\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, \"\", err)\n\t}\n\n\tif exp := 0; len(pts) != exp {\n\t\tt.Errorf(`ParsePoints(\"%s\") len mismatch. got %v, exp %v`, \"\", len(pts), exp)\n\t}\n}\n\nfunc TestParsePointNoFields(t *testing.T) {\n\texpectedSuffix := \"missing fields\"\n\texamples := []string{\n\t\t\"cpu_load_short,host=server01,region=us-west\",\n\t\t\"cpu\",\n\t\t\"cpu,host==\",\n\t\t\"=\",\n\t}\n\n\tfor i, example := range examples {\n\t\t_, err := models.ParsePointsString(example)\n\t\tif err == nil {\n\t\t\tt.Errorf(`[Example %d] ParsePoints(\"%s\") mismatch. got nil, exp error`, i, example)\n\t\t} else if !strings.HasSuffix(err.Error(), expectedSuffix) {\n\t\t\tt.Errorf(`[Example %d] ParsePoints(\"%s\") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix)\n\t\t}\n\t}\n}\n\nfunc TestParsePointNoTimestamp(t *testing.T) {\n\ttest(t, \"cpu value=1\", NewTestPoint(\"cpu\", nil, models.Fields{\"value\": 1.0}, time.Unix(0, 0)))\n}\n\nfunc TestParsePointMissingQuote(t *testing.T) {\n\texpectedSuffix := \"unbalanced quotes\"\n\texamples := []string{\n\t\t`cpu,host=serverA value=\"test`,\n\t\t`cpu,host=serverA value=\"test\"\"`,\n\t}\n\n\tfor i, example := range examples {\n\t\t_, err := models.ParsePointsString(example)\n\t\tif err == nil {\n\t\t\tt.Errorf(`[Example %d] ParsePoints(\"%s\") mismatch. got nil, exp error`, i, example)\n\t\t} else if !strings.HasSuffix(err.Error(), expectedSuffix) {\n\t\t\tt.Errorf(`[Example %d] ParsePoints(\"%s\") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix)\n\t\t}\n\t}\n}\n\nfunc TestParsePointMissingTagKey(t *testing.T) {\n\texpectedSuffix := \"missing tag key\"\n\texamples := []string{\n\t\t`cpu, value=1`,\n\t\t`cpu,`,\n\t\t`cpu,,,`,\n\t\t`cpu,host=serverA,=us-east value=1i`,\n\t\t`cpu,host=serverAa\\,,=us-east value=1i`,\n\t\t`cpu,host=serverA\\,,=us-east value=1i`,\n\t\t`cpu, =serverA value=1i`,\n\t}\n\n\tfor i, example := range examples {\n\t\t_, err := models.ParsePointsString(example)\n\t\tif err == nil {\n\t\t\tt.Errorf(`[Example %d] ParsePoints(\"%s\") mismatch. got nil, exp error`, i, example)\n\t\t} else if !strings.HasSuffix(err.Error(), expectedSuffix) {\n\t\t\tt.Errorf(`[Example %d] ParsePoints(\"%s\") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix)\n\t\t}\n\t}\n\n\t_, err := models.ParsePointsString(`cpu,host=serverA,\\ =us-east value=1i`)\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,\\ =us-east value=1i`, err)\n\t}\n}\n\nfunc TestParsePointMissingTagValue(t *testing.T) {\n\texpectedSuffix := \"missing tag value\"\n\texamples := []string{\n\t\t`cpu,host`,\n\t\t`cpu,host,`,\n\t\t`cpu,host=`,\n\t\t`cpu,host value=1i`,\n\t\t`cpu,host=serverA,region value=1i`,\n\t\t`cpu,host=serverA,region= value=1i`,\n\t\t`cpu,host=serverA,region=,zone=us-west value=1i`,\n\t}\n\n\tfor i, example := range examples {\n\t\t_, err := models.ParsePointsString(example)\n\t\tif err == nil {\n\t\t\tt.Errorf(`[Example %d] ParsePoints(\"%s\") mismatch. got nil, exp error`, i, example)\n\t\t} else if !strings.HasSuffix(err.Error(), expectedSuffix) {\n\t\t\tt.Errorf(`[Example %d] ParsePoints(\"%s\") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix)\n\t\t}\n\t}\n}\n\nfunc TestParsePointInvalidTagFormat(t *testing.T) {\n\texpectedSuffix := \"invalid tag format\"\n\texamples := []string{\n\t\t`cpu,host=f=o,`,\n\t\t`cpu,host=f\\==o,`,\n\t}\n\n\tfor i, example := range examples {\n\t\t_, err := models.ParsePointsString(example)\n\t\tif err == nil {\n\t\t\tt.Errorf(`[Example %d] ParsePoints(\"%s\") mismatch. got nil, exp error`, i, example)\n\t\t} else if !strings.HasSuffix(err.Error(), expectedSuffix) {\n\t\t\tt.Errorf(`[Example %d] ParsePoints(\"%s\") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix)\n\t\t}\n\t}\n}\n\nfunc TestParsePointMissingFieldName(t *testing.T) {\n\t_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west =`)\n\tif err == nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west =`)\n\t}\n\n\t_, err = models.ParsePointsString(`cpu,host=serverA,region=us-west =123i`)\n\tif err == nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west =123i`)\n\t}\n\n\t_, err = models.ParsePointsString(`cpu,host=serverA,region=us-west a\\ =123i`)\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west a\\ =123i`)\n\t}\n\t_, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=123i,=456i`)\n\tif err == nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=123i,=456i`)\n\t}\n}\n\nfunc TestParsePointMissingFieldValue(t *testing.T) {\n\t_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=`)\n\tif err == nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=`)\n\t}\n\n\t_, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value= 1000000000i`)\n\tif err == nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value= 1000000000i`)\n\t}\n\n\t_, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=,value2=1i`)\n\tif err == nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=,value2=1i`)\n\t}\n\n\t_, err = models.ParsePointsString(`cpu,host=server01,region=us-west 1434055562000000000i`)\n\tif err == nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got nil, exp error`, `cpu,host=server01,region=us-west 1434055562000000000i`)\n\t}\n\n\t_, err = models.ParsePointsString(`cpu,host=server01,region=us-west value=1i,b`)\n\tif err == nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got nil, exp error`, `cpu,host=server01,region=us-west value=1i,b`)\n\t}\n}\n\nfunc TestParsePointBadNumber(t *testing.T) {\n\tfor _, tt := range []string{\n\t\t\"cpu v=- \",\n\t\t\"cpu v=-i \",\n\t\t\"cpu v=-. \",\n\t\t\"cpu v=. \",\n\t\t\"cpu v=1.0i \",\n\t\t\"cpu v=1ii \",\n\t\t\"cpu v=1a \",\n\t\t\"cpu v=-e-e-e \",\n\t\t\"cpu v=42+3 \",\n\t\t\"cpu v= \",\n\t} {\n\t\t_, err := models.ParsePointsString(tt)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Point %q should be invalid\", tt)\n\t\t}\n\t}\n}\n\nfunc TestParsePointMaxInt64(t *testing.T) {\n\t// out of range\n\t_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9223372036854775808i`)\n\texp := `unable to parse 'cpu,host=serverA,region=us-west value=9223372036854775808i': unable to parse integer 9223372036854775808: strconv.ParseInt: parsing \"9223372036854775808\": value out of range`\n\tif err == nil || (err != nil && err.Error() != exp) {\n\t\tt.Fatalf(\"Error mismatch:\\nexp: %s\\ngot: %v\", exp, err)\n\t}\n\n\t// max int\n\tp, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9223372036854775807i`)\n\tif err != nil {\n\t\tt.Fatalf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=9223372036854775807i`, err)\n\t}\n\tfields, err := p[0].Fields()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif exp, got := int64(9223372036854775807), fields[\"value\"].(int64); exp != got {\n\t\tt.Fatalf(\"ParsePoints Value mismatch. \\nexp: %v\\ngot: %v\", exp, got)\n\t}\n\n\t// leading zeros\n\t_, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=0009223372036854775807i`)\n\tif err != nil {\n\t\tt.Fatalf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0009223372036854775807i`, err)\n\t}\n}\n\nfunc TestParsePointMinInt64(t *testing.T) {\n\t// out of range\n\t_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-9223372036854775809i`)\n\tif err == nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=-9223372036854775809i`)\n\t}\n\n\t// min int\n\t_, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=-9223372036854775808i`)\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-9223372036854775808i`, err)\n\t}\n\n\t// leading zeros\n\t_, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=-0009223372036854775808i`)\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-0009223372036854775808i`, err)\n\t}\n}\n\nfunc TestParsePointMaxFloat64(t *testing.T) {\n\t// out of range\n\t_, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, \"1\"+string(maxFloat64)))\n\tif err == nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=...`)\n\t}\n\n\t// max float\n\t_, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, string(maxFloat64)))\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=9223372036854775807`, err)\n\t}\n\n\t// leading zeros\n\t_, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, \"0000\"+string(maxFloat64)))\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0009223372036854775807`, err)\n\t}\n}\n\nfunc TestParsePointMinFloat64(t *testing.T) {\n\t// out of range\n\t_, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, \"-1\"+string(minFloat64)[1:]))\n\tif err == nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=...`)\n\t}\n\n\t// min float\n\t_, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, string(minFloat64)))\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=...`, err)\n\t}\n\n\t// leading zeros\n\t_, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, \"-0000000\"+string(minFloat64)[1:]))\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=...`, err)\n\t}\n}\n\nfunc TestParsePointNumberNonNumeric(t *testing.T) {\n\t_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=.1a`)\n\tif err == nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=.1a`)\n\t}\n}\n\nfunc TestParsePointNegativeWrongPlace(t *testing.T) {\n\t_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=0.-1`)\n\tif err == nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=0.-1`)\n\t}\n}\n\nfunc TestParsePointOnlyNegativeSign(t *testing.T) {\n\t_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-`)\n\tif err == nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=-`)\n\t}\n}\n\nfunc TestParsePointFloatMultipleDecimals(t *testing.T) {\n\t_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.1.1`)\n\tif err == nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=1.1.1`)\n\t}\n}\n\nfunc TestParsePointInteger(t *testing.T) {\n\t_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1i`)\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1i`, err)\n\t}\n}\n\nfunc TestParsePointNegativeInteger(t *testing.T) {\n\t_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1i`)\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1i`, err)\n\t}\n}\n\nfunc TestParsePointNegativeFloat(t *testing.T) {\n\t_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1.0`)\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0`, err)\n\t}\n}\n\nfunc TestParsePointFloatNoLeadingDigit(t *testing.T) {\n\t_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=.1`)\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0`, err)\n\t}\n}\n\nfunc TestParsePointFloatScientific(t *testing.T) {\n\t_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0e4`)\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e4`, err)\n\t}\n\n\tpts, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1e4`)\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e4`, err)\n\t}\n\n\tfields, err := pts[0].Fields()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif fields[\"value\"] != 1e4 {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1e4`, err)\n\t}\n}\n\nfunc TestParsePointFloatScientificUpper(t *testing.T) {\n\t_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0E4`)\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0E4`, err)\n\t}\n\n\tpts, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1E4`)\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0E4`, err)\n\t}\n\n\tfields, err := pts[0].Fields()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif fields[\"value\"] != 1e4 {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1E4`, err)\n\t}\n}\n\nfunc TestParsePointFloatScientificDecimal(t *testing.T) {\n\t_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0e-4`)\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e-4`, err)\n\t}\n}\n\nfunc TestParsePointFloatNegativeScientific(t *testing.T) {\n\t_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1.0e-4`)\n\tif err != nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0e-4`, err)\n\t}\n}\n\nfunc TestParsePointBooleanInvalid(t *testing.T) {\n\t_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=a`)\n\tif err == nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=a`)\n\t}\n}\n\nfunc TestParsePointScientificIntInvalid(t *testing.T) {\n\t_, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9ie10`)\n\tif err == nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=9ie10`)\n\t}\n\n\t_, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=9e10i`)\n\tif err == nil {\n\t\tt.Errorf(`ParsePoints(\"%s\") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=9e10i`)\n\t}\n}\n\nfunc TestParsePointWhitespace(t *testing.T) {\n\texamples := []string{\n\t\t`cpu    value=1.0 1257894000000000000`,\n\t\t`cpu value=1.0     1257894000000000000`,\n\t\t`cpu      value=1.0     1257894000000000000`,\n\t\t`cpu value=1.0 1257894000000000000   `,\n\t\t`cpu value=1.0 1257894000000000000\n`,\n\t\t`cpu   value=1.0 1257894000000000000\n`,\n\t}\n\n\texpPoint := NewTestPoint(\"cpu\", models.Tags{}, models.Fields{\"value\": 1.0}, time.Unix(0, 1257894000000000000))\n\tfor i, example := range examples {\n\t\tpts, err := models.ParsePoints([]byte(example))\n\t\tif err != nil {\n\t\t\tt.Fatalf(`[Example %d] ParsePoints(\"%s\") error. got %v, exp nil`, i, example, err)\n\t\t}\n\n\t\tif got, exp := len(pts), 1; got != exp {\n\t\t\tt.Fatalf(\"[Example %d] got %d points, expected %d\", i, got, exp)\n\t\t}\n\n\t\tif got, exp := string(pts[0].Name()), string(expPoint.Name()); got != exp {\n\t\t\tt.Fatalf(\"[Example %d] got %v measurement, expected %v\", i, got, exp)\n\t\t}\n\n\t\tfields, err := pts[0].Fields()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\teFields, err := expPoint.Fields()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif got, exp := len(fields), len(eFields); got != exp {\n\t\t\tt.Fatalf(\"[Example %d] got %d fields, expected %d\", i, got, exp)\n\t\t}\n\n\t\tif got, exp := fields[\"value\"], eFields[\"value\"]; got != exp {\n\t\t\tt.Fatalf(`[Example %d] got %v for field \"value\", expected %v`, i, got, exp)\n\t\t}\n\n\t\tif got, exp := pts[0].Time().UnixNano(), expPoint.Time().UnixNano(); got != exp {\n\t\t\tt.Fatalf(`[Example %d] got %d time, expected %d`, i, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestParsePointUnescape(t *testing.T) {\n\t// commas in measurement name\n\ttest(t, `foo\\,bar value=1i`,\n\t\tNewTestPoint(\n\t\t\t\"foo,bar\", // comma in the name\n\t\t\tmodels.NewTags(map[string]string{}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": int64(1),\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// comma in measurement name with tags\n\ttest(t, `cpu\\,main,regions=east value=1.0`,\n\t\tNewTestPoint(\n\t\t\t\"cpu,main\", // comma in the name\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"regions\": \"east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// spaces in measurement name\n\ttest(t, `cpu\\ load,region=east value=1.0`,\n\t\tNewTestPoint(\n\t\t\t\"cpu load\", // space in the name\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"region\": \"east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// equals in measurement name\n\ttest(t, `cpu\\=load,region=east value=1.0`,\n\t\tNewTestPoint(\n\t\t\t`cpu\\=load`, // backslash is literal\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"region\": \"east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// equals in measurement name\n\ttest(t, `cpu=load,region=east value=1.0`,\n\t\tNewTestPoint(\n\t\t\t`cpu=load`, // literal equals is fine in measurement name\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"region\": \"east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// commas in tag names\n\ttest(t, `cpu,region\\,zone=east value=1.0`,\n\t\tNewTestPoint(\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"region,zone\": \"east\", // comma in the tag key\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// spaces in tag name\n\ttest(t, `cpu,region\\ zone=east value=1.0`,\n\t\tNewTestPoint(\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"region zone\": \"east\", // space in the tag name\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// backslash with escaped equals in tag name\n\ttest(t, `cpu,reg\\\\=ion=east value=1.0`,\n\t\tNewTestPoint(\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t`reg\\=ion`: \"east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// space is tag name\n\ttest(t, `cpu,\\ =east value=1.0`,\n\t\tNewTestPoint(\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\" \": \"east\", // tag name is single space\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// commas in tag values\n\ttest(t, `cpu,regions=east\\,west value=1.0`,\n\t\tNewTestPoint(\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"regions\": \"east,west\", // comma in the tag value\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// backslash literal followed by escaped space\n\ttest(t, `cpu,regions=\\\\ east value=1.0`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"regions\": `\\ east`,\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// backslash literal followed by escaped space\n\ttest(t, `cpu,regions=eas\\\\ t value=1.0`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"regions\": `eas\\ t`,\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// backslash literal followed by trailing space\n\ttest(t, `cpu,regions=east\\\\  value=1.0`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"regions\": `east\\ `,\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// spaces in tag values\n\ttest(t, `cpu,regions=east\\ west value=1.0`,\n\t\tNewTestPoint(\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"regions\": \"east west\", // comma in the tag value\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// commas in field keys\n\ttest(t, `cpu,regions=east value\\,ms=1.0`,\n\t\tNewTestPoint(\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"regions\": \"east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value,ms\": 1.0, // comma in the field keys\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// spaces in field keys\n\ttest(t, `cpu,regions=east value\\ ms=1.0`,\n\t\tNewTestPoint(\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"regions\": \"east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value ms\": 1.0, // comma in the field keys\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// tag with no value\n\ttest(t, `cpu,regions=east value=\"1\"`,\n\t\tNewTestPoint(\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"regions\": \"east\",\n\t\t\t\t\"foobar\":  \"\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": \"1\",\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// commas in field values\n\ttest(t, `cpu,regions=east value=\"1,0\"`,\n\t\tNewTestPoint(\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"regions\": \"east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": \"1,0\", // comma in the field value\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// random character escaped\n\ttest(t, `cpu,regions=eas\\t value=1.0`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"regions\": \"eas\\\\t\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// backslash literal followed by escaped characters\n\ttest(t, `cpu,regions=\\\\,\\,\\=east value=1.0`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"regions\": `\\,,=east`,\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// field keys using escape char.\n\ttest(t, `cpu \\a=1i`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"\\\\a\": int64(1), // Left as parsed since it's not a known escape sequence.\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n\t// measurement, tag and tag value with equals\n\ttest(t, `cpu=load,equals\\=foo=tag\\=value value=1i`,\n\t\tNewTestPoint(\n\t\t\t\"cpu=load\", // Not escaped\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"equals=foo\": \"tag=value\", // Tag and value unescaped\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": int64(1),\n\t\t\t},\n\t\t\ttime.Unix(0, 0)))\n\n}\n\nfunc TestParsePointWithTags(t *testing.T) {\n\ttest(t,\n\t\t\"cpu,host=serverA,region=us-east value=1.0 1000000000\",\n\t\tNewTestPoint(\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\"host\": \"serverA\", \"region\": \"us-east\"}),\n\t\t\tmodels.Fields{\"value\": 1.0}, time.Unix(1, 0)))\n}\n\nfunc TestParsePointWithDuplicateTags(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\tline string\n\t\terr  string\n\t}{\n\t\t{\n\t\t\tline: `cpu,host=serverA,host=serverB value=1i 1000000000`,\n\t\t\terr:  `unable to parse 'cpu,host=serverA,host=serverB value=1i 1000000000': duplicate tags`,\n\t\t},\n\t\t{\n\t\t\tline: `cpu,b=2,b=1,c=3 value=1i 1000000000`,\n\t\t\terr:  `unable to parse 'cpu,b=2,b=1,c=3 value=1i 1000000000': duplicate tags`,\n\t\t},\n\t\t{\n\t\t\tline: `cpu,b=2,c=3,b=1 value=1i 1000000000`,\n\t\t\terr:  `unable to parse 'cpu,b=2,c=3,b=1 value=1i 1000000000': duplicate tags`,\n\t\t},\n\t} {\n\t\t_, err := models.ParsePointsString(tt.line)\n\t\tif err == nil || tt.err != err.Error() {\n\t\t\tt.Errorf(\"%d. ParsePoint() expected error '%s'. got '%s'\", i, tt.err, err)\n\t\t}\n\t}\n}\n\nfunc TestParsePointWithStringField(t *testing.T) {\n\ttest(t, `cpu,host=serverA,region=us-east value=1.0,str=\"foo\",str2=\"bar\" 1000000000`,\n\t\tNewTestPoint(\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"host\":   \"serverA\",\n\t\t\t\t\"region\": \"us-east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t\t\"str\":   \"foo\",\n\t\t\t\t\"str2\":  \"bar\",\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n\n\ttest(t, `cpu,host=serverA,region=us-east str=\"foo \\\" bar\" 1000000000`,\n\t\tNewTestPoint(\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"host\":   \"serverA\",\n\t\t\t\t\"region\": \"us-east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"str\": `foo \" bar`,\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n\n}\n\nfunc TestParsePointWithStringWithSpaces(t *testing.T) {\n\ttest(t, `cpu,host=serverA,region=us-east value=1.0,str=\"foo bar\" 1000000000`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"host\":   \"serverA\",\n\t\t\t\t\"region\": \"us-east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t\t\"str\":   \"foo bar\", // spaces in string value\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n}\n\nfunc TestParsePointWithStringWithNewline(t *testing.T) {\n\ttest(t, \"cpu,host=serverA,region=us-east value=1.0,str=\\\"foo\\nbar\\\" 1000000000\",\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"host\":   \"serverA\",\n\t\t\t\t\"region\": \"us-east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t\t\"str\":   \"foo\\nbar\", // newline in string value\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n}\n\nfunc TestParsePointWithStringWithCommas(t *testing.T) {\n\t// escaped comma\n\ttest(t, `cpu,host=serverA,region=us-east value=1.0,str=\"foo\\,bar\" 1000000000`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"host\":   \"serverA\",\n\t\t\t\t\"region\": \"us-east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t\t\"str\":   `foo\\,bar`, // commas in string value\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n\n\t// non-escaped comma\n\ttest(t, `cpu,host=serverA,region=us-east value=1.0,str=\"foo,bar\" 1000000000`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"host\":   \"serverA\",\n\t\t\t\t\"region\": \"us-east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t\t\"str\":   \"foo,bar\", // commas in string value\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n\n\t// string w/ trailing escape chars\n\ttest(t, `cpu,host=serverA,region=us-east str=\"foo\\\\\",str2=\"bar\" 1000000000`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"host\":   \"serverA\",\n\t\t\t\t\"region\": \"us-east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"str\":  \"foo\\\\\", // trailing escape char\n\t\t\t\t\"str2\": \"bar\",\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n}\n\nfunc TestParsePointQuotedMeasurement(t *testing.T) {\n\t// non-escaped comma\n\ttest(t, `\"cpu\",host=serverA,region=us-east value=1.0 1000000000`,\n\t\tNewTestPoint(\n\t\t\t`\"cpu\"`,\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"host\":   \"serverA\",\n\t\t\t\t\"region\": \"us-east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n}\n\nfunc TestParsePointQuotedTags(t *testing.T) {\n\ttest(t, `cpu,\"host\"=\"serverA\",region=us-east value=1.0 1000000000`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t`\"host\"`: `\"serverA\"`,\n\t\t\t\t\"region\": \"us-east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n}\n\nfunc TestParsePointsUnbalancedQuotedTags(t *testing.T) {\n\tpts, err := models.ParsePointsString(\"baz,mytag=\\\"a x=1 1441103862125\\nbaz,mytag=a z=1 1441103862126\")\n\tif err != nil {\n\t\tt.Fatalf(\"ParsePoints failed: %v\", err)\n\t}\n\n\tif exp := 2; len(pts) != exp {\n\t\tt.Fatalf(\"ParsePoints count mismatch. got %v, exp %v\", len(pts), exp)\n\t}\n\n\t// Expected \" in the tag value\n\texp := models.MustNewPoint(\"baz\", models.NewTags(map[string]string{\"mytag\": `\"a`}),\n\t\tmodels.Fields{\"x\": float64(1)}, time.Unix(0, 1441103862125))\n\n\tif pts[0].String() != exp.String() {\n\t\tt.Errorf(\"Point mismatch:\\ngot: %v\\nexp: %v\", pts[0].String(), exp.String())\n\t}\n\n\t// Expected two points to ensure we did not overscan the line\n\texp = models.MustNewPoint(\"baz\", models.NewTags(map[string]string{\"mytag\": `a`}),\n\t\tmodels.Fields{\"z\": float64(1)}, time.Unix(0, 1441103862126))\n\n\tif pts[1].String() != exp.String() {\n\t\tt.Errorf(\"Point mismatch:\\ngot: %v\\nexp: %v\", pts[1].String(), exp.String())\n\t}\n\n}\n\nfunc TestParsePointEscapedStringsAndCommas(t *testing.T) {\n\t// non-escaped comma and quotes\n\ttest(t, `cpu,host=serverA,region=us-east value=\"{Hello\\\"{,}\\\" World}\" 1000000000`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"host\":   \"serverA\",\n\t\t\t\t\"region\": \"us-east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": `{Hello\"{,}\" World}`,\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n\n\t// escaped comma and quotes\n\ttest(t, `cpu,host=serverA,region=us-east value=\"{Hello\\\"{\\,}\\\" World}\" 1000000000`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"host\":   \"serverA\",\n\t\t\t\t\"region\": \"us-east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": `{Hello\"{\\,}\" World}`,\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n}\n\nfunc TestParsePointWithStringWithEquals(t *testing.T) {\n\ttest(t, `cpu,host=serverA,region=us-east str=\"foo=bar\",value=1.0 1000000000`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"host\":   \"serverA\",\n\t\t\t\t\"region\": \"us-east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t\t\"str\":   \"foo=bar\", // spaces in string value\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n}\n\nfunc TestParsePointWithStringWithBackslash(t *testing.T) {\n\ttest(t, `cpu value=\"test\\\\\\\"\" 1000000000`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": `test\\\"`,\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n\n\ttest(t, `cpu value=\"test\\\\\" 1000000000`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": `test\\`,\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n\n\ttest(t, `cpu value=\"test\\\\\\\"\" 1000000000`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": `test\\\"`,\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n\n\ttest(t, `cpu value=\"test\\\"\" 1000000000`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": `test\"`,\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n}\n\nfunc TestParsePointWithBoolField(t *testing.T) {\n\ttest(t, `cpu,host=serverA,region=us-east true=true,t=t,T=T,TRUE=TRUE,True=True,false=false,f=f,F=F,FALSE=FALSE,False=False 1000000000`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"host\":   \"serverA\",\n\t\t\t\t\"region\": \"us-east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"t\":     true,\n\t\t\t\t\"T\":     true,\n\t\t\t\t\"true\":  true,\n\t\t\t\t\"True\":  true,\n\t\t\t\t\"TRUE\":  true,\n\t\t\t\t\"f\":     false,\n\t\t\t\t\"F\":     false,\n\t\t\t\t\"false\": false,\n\t\t\t\t\"False\": false,\n\t\t\t\t\"FALSE\": false,\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n}\n\nfunc TestParsePointUnicodeString(t *testing.T) {\n\ttest(t, `cpu,host=serverA,region=us-east value=\"wè\" 1000000000`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{\n\t\t\t\t\"host\":   \"serverA\",\n\t\t\t\t\"region\": \"us-east\",\n\t\t\t}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": \"wè\",\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n}\n\nfunc TestParsePointNegativeTimestamp(t *testing.T) {\n\ttest(t, `cpu value=1 -1`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(0, -1)),\n\t)\n}\n\nfunc TestParsePointMaxTimestamp(t *testing.T) {\n\ttest(t, fmt.Sprintf(`cpu value=1 %d`, models.MaxNanoTime),\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(0, models.MaxNanoTime)),\n\t)\n}\n\nfunc TestParsePointMinTimestamp(t *testing.T) {\n\ttest(t, `cpu value=1 -9223372036854775806`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(0, models.MinNanoTime)),\n\t)\n}\n\nfunc TestParsePointInvalidTimestamp(t *testing.T) {\n\texamples := []string{\n\t\t\"cpu value=1 9223372036854775808\",\n\t\t\"cpu value=1 -92233720368547758078\",\n\t\t\"cpu value=1 -\",\n\t\t\"cpu value=1 -/\",\n\t\t\"cpu value=1 -1?\",\n\t\t\"cpu value=1 1-\",\n\t\t\"cpu value=1 9223372036854775807 12\",\n\t}\n\n\tfor i, example := range examples {\n\t\t_, err := models.ParsePointsString(example)\n\t\tif err == nil {\n\t\t\tt.Fatalf(\"[Example %d] ParsePoints failed: %v\", i, err)\n\t\t}\n\t}\n}\n\nfunc TestNewPointFloatWithoutDecimal(t *testing.T) {\n\ttest(t, `cpu value=1 1000000000`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n}\nfunc TestNewPointNegativeFloat(t *testing.T) {\n\ttest(t, `cpu value=-0.64 1000000000`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": -0.64,\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n}\n\nfunc TestNewPointFloatNoDecimal(t *testing.T) {\n\ttest(t, `cpu value=1. 1000000000`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": 1.0,\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n}\n\nfunc TestNewPointFloatScientific(t *testing.T) {\n\ttest(t, `cpu value=6.632243e+06 1000000000`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": float64(6632243),\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n}\n\nfunc TestNewPointLargeInteger(t *testing.T) {\n\ttest(t, `cpu value=6632243i 1000000000`,\n\t\tNewTestPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{}),\n\t\t\tmodels.Fields{\n\t\t\t\t\"value\": int64(6632243), // if incorrectly encoded as a float, it would show up as 6.632243e+06\n\t\t\t},\n\t\t\ttime.Unix(1, 0)),\n\t)\n}\n\nfunc TestParsePointNaN(t *testing.T) {\n\t_, err := models.ParsePointsString(\"cpu value=NaN 1000000000\")\n\tif err == nil {\n\t\tt.Fatalf(\"ParsePoints expected error, got nil\")\n\t}\n\n\t_, err = models.ParsePointsString(\"cpu value=nAn 1000000000\")\n\tif err == nil {\n\t\tt.Fatalf(\"ParsePoints expected error, got nil\")\n\t}\n\n\t_, err = models.ParsePointsString(\"cpu value=NaN\")\n\tif err == nil {\n\t\tt.Fatalf(\"ParsePoints expected error, got nil\")\n\t}\n}\n\nfunc TestNewPointLargeNumberOfTags(t *testing.T) {\n\ttags := \"\"\n\tfor i := 0; i < 255; i++ {\n\t\ttags += fmt.Sprintf(\",tag%d=value%d\", i, i)\n\t}\n\n\tpt, err := models.ParsePointsString(fmt.Sprintf(\"cpu%s value=1\", tags))\n\tif err != nil {\n\t\tt.Fatalf(\"ParsePoints() with max tags failed: %v\", err)\n\t}\n\n\tif len(pt[0].Tags()) != 255 {\n\t\tt.Fatalf(\"expected %d tags, got %d\", 255, len(pt[0].Tags()))\n\t}\n}\n\nfunc TestParsePointIntsFloats(t *testing.T) {\n\tpts, err := models.ParsePoints([]byte(`cpu,host=serverA,region=us-east int=10i,float=11.0,float2=12.1 1000000000`))\n\tif err != nil {\n\t\tt.Fatalf(`ParsePoints() failed. got %s`, err)\n\t}\n\n\tif exp := 1; len(pts) != exp {\n\t\tt.Errorf(\"ParsePoint() len mismatch: got %v, exp %v\", len(pts), exp)\n\t}\n\tpt := pts[0]\n\n\tfields, err := pt.Fields()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, ok := fields[\"int\"].(int64); !ok {\n\t\tt.Errorf(\"ParsePoint() int field mismatch: got %T, exp %T\", fields[\"int\"], int64(10))\n\t}\n\n\tif _, ok := fields[\"float\"].(float64); !ok {\n\t\tt.Errorf(\"ParsePoint() float field mismatch: got %T, exp %T\", fields[\"float64\"], float64(11.0))\n\t}\n\n\tif _, ok := fields[\"float2\"].(float64); !ok {\n\t\tt.Errorf(\"ParsePoint() float field mismatch: got %T, exp %T\", fields[\"float64\"], float64(12.1))\n\t}\n}\n\nfunc TestParsePointKeyUnsorted(t *testing.T) {\n\tpts, err := models.ParsePoints([]byte(\"cpu,last=1,first=2 value=1i\"))\n\tif err != nil {\n\t\tt.Fatalf(`ParsePoints() failed. got %s`, err)\n\t}\n\n\tif exp := 1; len(pts) != exp {\n\t\tt.Errorf(\"ParsePoint() len mismatch: got %v, exp %v\", len(pts), exp)\n\t}\n\tpt := pts[0]\n\n\tif exp := \"cpu,first=2,last=1\"; string(pt.Key()) != exp {\n\t\tt.Errorf(\"ParsePoint key not sorted. got %v, exp %v\", string(pt.Key()), exp)\n\t}\n}\n\nfunc TestParsePointToString(t *testing.T) {\n\tline := `cpu,host=serverA,region=us-east bool=false,float=11,float2=12.123,int=10i,str=\"string val\" 1000000000`\n\tpts, err := models.ParsePoints([]byte(line))\n\tif err != nil {\n\t\tt.Fatalf(`ParsePoints() failed. got %s`, err)\n\t}\n\tif exp := 1; len(pts) != exp {\n\t\tt.Errorf(\"ParsePoint() len mismatch: got %v, exp %v\", len(pts), exp)\n\t}\n\tpt := pts[0]\n\n\tgot := pt.String()\n\tif line != got {\n\t\tt.Errorf(\"ParsePoint() to string mismatch:\\n got %v\\n exp %v\", got, line)\n\t}\n\n\tpt = models.MustNewPoint(\"cpu\", models.NewTags(map[string]string{\"host\": \"serverA\", \"region\": \"us-east\"}),\n\t\tmodels.Fields{\"int\": 10, \"float\": float64(11.0), \"float2\": float64(12.123), \"bool\": false, \"str\": \"string val\"},\n\t\ttime.Unix(1, 0))\n\n\tgot = pt.String()\n\tif line != got {\n\t\tt.Errorf(\"NewPoint() to string mismatch:\\n got %v\\n exp %v\", got, line)\n\t}\n}\n\nfunc TestParsePointsWithPrecision(t *testing.T) {\n\ttests := []struct {\n\t\tname      string\n\t\tline      string\n\t\tprecision string\n\t\texp       string\n\t}{\n\t\t{\n\t\t\tname:      \"nanosecond by default\",\n\t\t\tline:      `cpu,host=serverA,region=us-east value=1.0 946730096789012345`,\n\t\t\tprecision: \"\",\n\t\t\texp:       \"cpu,host=serverA,region=us-east value=1.0 946730096789012345\",\n\t\t},\n\t\t{\n\t\t\tname:      \"nanosecond\",\n\t\t\tline:      `cpu,host=serverA,region=us-east value=1.0 946730096789012345`,\n\t\t\tprecision: \"n\",\n\t\t\texp:       \"cpu,host=serverA,region=us-east value=1.0 946730096789012345\",\n\t\t},\n\t\t{\n\t\t\tname:      \"microsecond\",\n\t\t\tline:      `cpu,host=serverA,region=us-east value=1.0 946730096789012`,\n\t\t\tprecision: \"u\",\n\t\t\texp:       \"cpu,host=serverA,region=us-east value=1.0 946730096789012000\",\n\t\t},\n\t\t{\n\t\t\tname:      \"millisecond\",\n\t\t\tline:      `cpu,host=serverA,region=us-east value=1.0 946730096789`,\n\t\t\tprecision: \"ms\",\n\t\t\texp:       \"cpu,host=serverA,region=us-east value=1.0 946730096789000000\",\n\t\t},\n\t\t{\n\t\t\tname:      \"second\",\n\t\t\tline:      `cpu,host=serverA,region=us-east value=1.0 946730096`,\n\t\t\tprecision: \"s\",\n\t\t\texp:       \"cpu,host=serverA,region=us-east value=1.0 946730096000000000\",\n\t\t},\n\t\t{\n\t\t\tname:      \"minute\",\n\t\t\tline:      `cpu,host=serverA,region=us-east value=1.0 15778834`,\n\t\t\tprecision: \"m\",\n\t\t\texp:       \"cpu,host=serverA,region=us-east value=1.0 946730040000000000\",\n\t\t},\n\t\t{\n\t\t\tname:      \"hour\",\n\t\t\tline:      `cpu,host=serverA,region=us-east value=1.0 262980`,\n\t\t\tprecision: \"h\",\n\t\t\texp:       \"cpu,host=serverA,region=us-east value=1.0 946728000000000000\",\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tpts, err := models.ParsePointsWithPrecision([]byte(test.line), time.Now().UTC(), test.precision)\n\t\tif err != nil {\n\t\t\tt.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err)\n\t\t}\n\t\tif exp := 1; len(pts) != exp {\n\t\t\tt.Errorf(\"%s: ParsePoint() len mismatch: got %v, exp %v\", test.name, len(pts), exp)\n\t\t}\n\t\tpt := pts[0]\n\n\t\tgot := pt.String()\n\t\tif got != test.exp {\n\t\t\tt.Errorf(\"%s: ParsePoint() to string mismatch:\\n got %v\\n exp %v\", test.name, got, test.exp)\n\t\t}\n\t}\n}\n\nfunc TestParsePointsWithPrecisionNoTime(t *testing.T) {\n\tline := `cpu,host=serverA,region=us-east value=1.0`\n\ttm, _ := time.Parse(time.RFC3339Nano, \"2000-01-01T12:34:56.789012345Z\")\n\ttests := []struct {\n\t\tname      string\n\t\tprecision string\n\t\texp       string\n\t}{\n\t\t{\n\t\t\tname:      \"no precision\",\n\t\t\tprecision: \"\",\n\t\t\texp:       \"cpu,host=serverA,region=us-east value=1.0 946730096789012345\",\n\t\t},\n\t\t{\n\t\t\tname:      \"nanosecond precision\",\n\t\t\tprecision: \"n\",\n\t\t\texp:       \"cpu,host=serverA,region=us-east value=1.0 946730096789012345\",\n\t\t},\n\t\t{\n\t\t\tname:      \"microsecond precision\",\n\t\t\tprecision: \"u\",\n\t\t\texp:       \"cpu,host=serverA,region=us-east value=1.0 946730096789012000\",\n\t\t},\n\t\t{\n\t\t\tname:      \"millisecond precision\",\n\t\t\tprecision: \"ms\",\n\t\t\texp:       \"cpu,host=serverA,region=us-east value=1.0 946730096789000000\",\n\t\t},\n\t\t{\n\t\t\tname:      \"second precision\",\n\t\t\tprecision: \"s\",\n\t\t\texp:       \"cpu,host=serverA,region=us-east value=1.0 946730096000000000\",\n\t\t},\n\t\t{\n\t\t\tname:      \"minute precision\",\n\t\t\tprecision: \"m\",\n\t\t\texp:       \"cpu,host=serverA,region=us-east value=1.0 946730040000000000\",\n\t\t},\n\t\t{\n\t\t\tname:      \"hour precision\",\n\t\t\tprecision: \"h\",\n\t\t\texp:       \"cpu,host=serverA,region=us-east value=1.0 946728000000000000\",\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tpts, err := models.ParsePointsWithPrecision([]byte(line), tm, test.precision)\n\t\tif err != nil {\n\t\t\tt.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err)\n\t\t}\n\t\tif exp := 1; len(pts) != exp {\n\t\t\tt.Errorf(\"%s: ParsePoint() len mismatch: got %v, exp %v\", test.name, len(pts), exp)\n\t\t}\n\t\tpt := pts[0]\n\n\t\tgot := pt.String()\n\t\tif got != test.exp {\n\t\t\tt.Errorf(\"%s: ParsePoint() to string mismatch:\\n got %v\\n exp %v\", test.name, got, test.exp)\n\t\t}\n\t}\n}\n\nfunc TestParsePointsWithPrecisionComments(t *testing.T) {\n\ttests := []struct {\n\t\tname      string\n\t\tbatch     string\n\t\texp       string\n\t\tlenPoints int\n\t}{\n\t\t{\n\t\t\tname:      \"comment only\",\n\t\t\tbatch:     `# comment only`,\n\t\t\texp:       \"cpu,host=serverA,region=us-east value=1.0 946730096789012345\",\n\t\t\tlenPoints: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"point with comment above\",\n\t\t\tbatch: `# a point is below\ncpu,host=serverA,region=us-east value=1.0 946730096789012345`,\n\t\t\texp:       \"cpu,host=serverA,region=us-east value=1.0 946730096789012345\",\n\t\t\tlenPoints: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"point with comment below\",\n\t\t\tbatch: `cpu,host=serverA,region=us-east value=1.0 946730096789012345\n# end of points`,\n\t\t\texp:       \"cpu,host=serverA,region=us-east value=1.0 946730096789012345\",\n\t\t\tlenPoints: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"indented comment\",\n\t\t\tbatch: `\t# a point is below\ncpu,host=serverA,region=us-east value=1.0 946730096789012345`,\n\t\t\texp:       \"cpu,host=serverA,region=us-east value=1.0 946730096789012345\",\n\t\t\tlenPoints: 1,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tpts, err := models.ParsePointsWithPrecision([]byte(test.batch), time.Now().UTC(), \"\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err)\n\t\t}\n\t\tpointsLength := len(pts)\n\t\tif exp := test.lenPoints; pointsLength != exp {\n\t\t\tt.Errorf(\"%s: ParsePoint() len mismatch: got %v, exp %v\", test.name, pointsLength, exp)\n\t\t}\n\n\t\tif pointsLength > 0 {\n\t\t\tpt := pts[0]\n\n\t\t\tgot := pt.String()\n\t\t\tif got != test.exp {\n\t\t\t\tt.Errorf(\"%s: ParsePoint() to string mismatch:\\n got %v\\n exp %v\", test.name, got, test.exp)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestNewPointEscaped(t *testing.T) {\n\t// commas\n\tpt := models.MustNewPoint(\"cpu,main\", models.NewTags(map[string]string{\"tag,bar\": \"value\"}), models.Fields{\"name,bar\": 1.0}, time.Unix(0, 0))\n\tif exp := `cpu\\,main,tag\\,bar=value name\\,bar=1 0`; pt.String() != exp {\n\t\tt.Errorf(\"NewPoint().String() mismatch.\\ngot %v\\nexp %v\", pt.String(), exp)\n\t}\n\n\t// spaces\n\tpt = models.MustNewPoint(\"cpu main\", models.NewTags(map[string]string{\"tag bar\": \"value\"}), models.Fields{\"name bar\": 1.0}, time.Unix(0, 0))\n\tif exp := `cpu\\ main,tag\\ bar=value name\\ bar=1 0`; pt.String() != exp {\n\t\tt.Errorf(\"NewPoint().String() mismatch.\\ngot %v\\nexp %v\", pt.String(), exp)\n\t}\n\n\t// equals\n\tpt = models.MustNewPoint(\"cpu=main\", models.NewTags(map[string]string{\"tag=bar\": \"value=foo\"}), models.Fields{\"name=bar\": 1.0}, time.Unix(0, 0))\n\tif exp := `cpu=main,tag\\=bar=value\\=foo name\\=bar=1 0`; pt.String() != exp {\n\t\tt.Errorf(\"NewPoint().String() mismatch.\\ngot %v\\nexp %v\", pt.String(), exp)\n\t}\n}\n\nfunc TestNewPointWithoutField(t *testing.T) {\n\t_, err := models.NewPoint(\"cpu\", models.NewTags(map[string]string{\"tag\": \"bar\"}), models.Fields{}, time.Unix(0, 0))\n\tif err == nil {\n\t\tt.Fatalf(`NewPoint() expected error. got nil`)\n\t}\n}\n\nfunc TestNewPointUnhandledType(t *testing.T) {\n\t// nil value\n\tpt := models.MustNewPoint(\"cpu\", nil, models.Fields{\"value\": nil}, time.Unix(0, 0))\n\tif exp := `cpu value= 0`; pt.String() != exp {\n\t\tt.Errorf(\"NewPoint().String() mismatch.\\ngot %v\\nexp %v\", pt.String(), exp)\n\t}\n\n\t// unsupported type gets stored as string\n\tnow := time.Unix(0, 0).UTC()\n\tpt = models.MustNewPoint(\"cpu\", nil, models.Fields{\"value\": now}, time.Unix(0, 0))\n\tif exp := `cpu value=\"1970-01-01 00:00:00 +0000 UTC\" 0`; pt.String() != exp {\n\t\tt.Errorf(\"NewPoint().String() mismatch.\\ngot %v\\nexp %v\", pt.String(), exp)\n\t}\n\n\tfields, err := pt.Fields()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif exp := \"1970-01-01 00:00:00 +0000 UTC\"; fields[\"value\"] != exp {\n\t\tt.Errorf(\"NewPoint().String() mismatch.\\ngot %v\\nexp %v\", pt.String(), exp)\n\t}\n}\n\nfunc TestMakeKeyEscaped(t *testing.T) {\n\tif exp, got := `cpu\\ load`, models.MakeKey([]byte(`cpu\\ load`), models.NewTags(map[string]string{})); string(got) != exp {\n\t\tt.Errorf(\"MakeKey() mismatch.\\ngot %v\\nexp %v\", got, exp)\n\t}\n\n\tif exp, got := `cpu\\ load`, models.MakeKey([]byte(`cpu load`), models.NewTags(map[string]string{})); string(got) != exp {\n\t\tt.Errorf(\"MakeKey() mismatch.\\ngot %v\\nexp %v\", got, exp)\n\t}\n\n\tif exp, got := `cpu\\,load`, models.MakeKey([]byte(`cpu\\,load`), models.NewTags(map[string]string{})); string(got) != exp {\n\t\tt.Errorf(\"MakeKey() mismatch.\\ngot %v\\nexp %v\", got, exp)\n\t}\n\n\tif exp, got := `cpu\\,load`, models.MakeKey([]byte(`cpu,load`), models.NewTags(map[string]string{})); string(got) != exp {\n\t\tt.Errorf(\"MakeKey() mismatch.\\ngot %v\\nexp %v\", got, exp)\n\t}\n\n}\n\nfunc TestPrecisionString(t *testing.T) {\n\ttags := map[string]interface{}{\"value\": float64(1)}\n\ttm, _ := time.Parse(time.RFC3339Nano, \"2000-01-01T12:34:56.789012345Z\")\n\ttests := []struct {\n\t\tname      string\n\t\tprecision string\n\t\texp       string\n\t}{\n\t\t{\n\t\t\tname:      \"no precision\",\n\t\t\tprecision: \"\",\n\t\t\texp:       \"cpu value=1 946730096789012345\",\n\t\t},\n\t\t{\n\t\t\tname:      \"nanosecond precision\",\n\t\t\tprecision: \"ns\",\n\t\t\texp:       \"cpu value=1 946730096789012345\",\n\t\t},\n\t\t{\n\t\t\tname:      \"microsecond precision\",\n\t\t\tprecision: \"u\",\n\t\t\texp:       \"cpu value=1 946730096789012\",\n\t\t},\n\t\t{\n\t\t\tname:      \"millisecond precision\",\n\t\t\tprecision: \"ms\",\n\t\t\texp:       \"cpu value=1 946730096789\",\n\t\t},\n\t\t{\n\t\t\tname:      \"second precision\",\n\t\t\tprecision: \"s\",\n\t\t\texp:       \"cpu value=1 946730096\",\n\t\t},\n\t\t{\n\t\t\tname:      \"minute precision\",\n\t\t\tprecision: \"m\",\n\t\t\texp:       \"cpu value=1 15778834\",\n\t\t},\n\t\t{\n\t\t\tname:      \"hour precision\",\n\t\t\tprecision: \"h\",\n\t\t\texp:       \"cpu value=1 262980\",\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tpt := models.MustNewPoint(\"cpu\", nil, tags, tm)\n\t\tact := pt.PrecisionString(test.precision)\n\n\t\tif act != test.exp {\n\t\t\tt.Errorf(\"%s: PrecisionString() mismatch:\\n actual:\t%v\\n exp:\t\t%v\",\n\t\t\t\ttest.name, act, test.exp)\n\t\t}\n\t}\n}\n\nfunc TestRoundedString(t *testing.T) {\n\ttags := map[string]interface{}{\"value\": float64(1)}\n\ttm, _ := time.Parse(time.RFC3339Nano, \"2000-01-01T12:34:56.789012345Z\")\n\ttests := []struct {\n\t\tname      string\n\t\tprecision time.Duration\n\t\texp       string\n\t}{\n\t\t{\n\t\t\tname:      \"no precision\",\n\t\t\tprecision: time.Duration(0),\n\t\t\texp:       \"cpu value=1 946730096789012345\",\n\t\t},\n\t\t{\n\t\t\tname:      \"nanosecond precision\",\n\t\t\tprecision: time.Nanosecond,\n\t\t\texp:       \"cpu value=1 946730096789012345\",\n\t\t},\n\t\t{\n\t\t\tname:      \"microsecond precision\",\n\t\t\tprecision: time.Microsecond,\n\t\t\texp:       \"cpu value=1 946730096789012000\",\n\t\t},\n\t\t{\n\t\t\tname:      \"millisecond precision\",\n\t\t\tprecision: time.Millisecond,\n\t\t\texp:       \"cpu value=1 946730096789000000\",\n\t\t},\n\t\t{\n\t\t\tname:      \"second precision\",\n\t\t\tprecision: time.Second,\n\t\t\texp:       \"cpu value=1 946730097000000000\",\n\t\t},\n\t\t{\n\t\t\tname:      \"minute precision\",\n\t\t\tprecision: time.Minute,\n\t\t\texp:       \"cpu value=1 946730100000000000\",\n\t\t},\n\t\t{\n\t\t\tname:      \"hour precision\",\n\t\t\tprecision: time.Hour,\n\t\t\texp:       \"cpu value=1 946731600000000000\",\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tpt := models.MustNewPoint(\"cpu\", nil, tags, tm)\n\t\tact := pt.RoundedString(test.precision)\n\n\t\tif act != test.exp {\n\t\t\tt.Errorf(\"%s: RoundedString() mismatch:\\n actual:\t%v\\n exp:\t\t%v\",\n\t\t\t\ttest.name, act, test.exp)\n\t\t}\n\t}\n}\n\nfunc TestParsePointsStringWithExtraBuffer(t *testing.T) {\n\tb := make([]byte, 70*5000)\n\tbuf := bytes.NewBuffer(b)\n\tkey := \"cpu,host=A,region=uswest\"\n\tbuf.WriteString(fmt.Sprintf(\"%s value=%.3f 1\\n\", key, rand.Float64()))\n\n\tpoints, err := models.ParsePointsString(buf.String())\n\tif err != nil {\n\t\tt.Fatalf(\"failed to write points: %s\", err.Error())\n\t}\n\n\tpointKey := string(points[0].Key())\n\n\tif len(key) != len(pointKey) {\n\t\tt.Fatalf(\"expected length of both keys are same but got %d and %d\", len(key), len(pointKey))\n\t}\n\n\tif key != pointKey {\n\t\tt.Fatalf(\"expected both keys are same but got %s and %s\", key, pointKey)\n\t}\n}\n\nfunc TestParsePointsQuotesInFieldKey(t *testing.T) {\n\tbuf := `cpu \"a=1\ncpu value=2 1`\n\tpoints, err := models.ParsePointsString(buf)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to write points: %s\", err.Error())\n\t}\n\n\tfields, err := points[0].Fields()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvalue, ok := fields[\"\\\"a\"]\n\tif !ok {\n\t\tt.Fatalf(\"expected to parse field '\\\"a'\")\n\t}\n\n\tif value != float64(1) {\n\t\tt.Fatalf(\"expected field value to be 1, got %v\", value)\n\t}\n\n\t// The following input should not parse\n\tbuf = `cpu \"\\, '= \"\\ v=1.0`\n\t_, err = models.ParsePointsString(buf)\n\tif err == nil {\n\t\tt.Fatalf(\"expected parsing failure but got no error\")\n\t}\n}\n\nfunc TestParsePointsQuotesInTags(t *testing.T) {\n\tbuf := `t159,label=hey\\ \"ya a=1i,value=0i\nt159,label=another a=2i,value=1i 1`\n\tpoints, err := models.ParsePointsString(buf)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to write points: %s\", err.Error())\n\t}\n\n\tif len(points) != 2 {\n\t\tt.Fatalf(\"expected 2 points, got %d\", len(points))\n\t}\n}\n\nfunc TestParsePointsBlankLine(t *testing.T) {\n\tbuf := `cpu value=1i 1000000000\n\ncpu value=2i 2000000000`\n\tpoints, err := models.ParsePointsString(buf)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to write points: %s\", err.Error())\n\t}\n\n\tif len(points) != 2 {\n\t\tt.Fatalf(\"expected 2 points, got %d\", len(points))\n\t}\n}\n\nfunc TestNewPointsWithBytesWithCorruptData(t *testing.T) {\n\tcorrupted := []byte{0, 0, 0, 3, 102, 111, 111, 0, 0, 0, 4, 61, 34, 65, 34, 1, 0, 0, 0, 14, 206, 86, 119, 24, 32, 72, 233, 168, 2, 148}\n\tp, err := models.NewPointFromBytes(corrupted)\n\tif p != nil || err == nil {\n\t\tt.Fatalf(\"NewPointFromBytes: got: (%v, %v), expected: (nil, error)\", p, err)\n\t}\n}\n\nfunc TestNewPointsWithShortBuffer(t *testing.T) {\n\t_, err := models.NewPointFromBytes([]byte{0, 0, 0, 3, 4})\n\tif err != io.ErrShortBuffer {\n\t\tt.Fatalf(\"NewPointFromBytes: got: (%v, %v), expected: (nil, error)\", p, err)\n\t}\n}\n\nfunc TestNewPointsRejectsEmptyFieldNames(t *testing.T) {\n\tif _, err := models.NewPoint(\"foo\", nil, models.Fields{\"\": 1}, time.Now()); err == nil {\n\t\tt.Fatalf(\"new point with empty field name. got: nil, expected: error\")\n\t}\n}\n\nfunc TestNewPointsRejectsMaxKey(t *testing.T) {\n\tvar key string\n\t// tsm field key is point key, separator (4 bytes) and field\n\tfor i := 0; i < models.MaxKeyLength-len(\"value\")-4; i++ {\n\t\tkey += \"a\"\n\t}\n\n\t// Test max key len\n\tif _, err := models.NewPoint(key, nil, models.Fields{\"value\": 1, \"ok\": 2.0}, time.Now()); err != nil {\n\t\tt.Fatalf(\"new point with max key. got: %v, expected: nil\", err)\n\t}\n\n\tif _, err := models.ParsePointsString(fmt.Sprintf(\"%v value=1,ok=2.0\", key)); err != nil {\n\t\tt.Fatalf(\"parse point with max key. got: %v, expected: nil\", err)\n\t}\n\n\t// Test 1 byte over max key len\n\tkey += \"a\"\n\tif _, err := models.NewPoint(key, nil, models.Fields{\"value\": 1, \"ok\": 2.0}, time.Now()); err == nil {\n\t\tt.Fatalf(\"new point with max key. got: nil, expected: error\")\n\t}\n\n\tif _, err := models.ParsePointsString(fmt.Sprintf(\"%v value=1,ok=2.0\", key)); err == nil {\n\t\tt.Fatalf(\"parse point with max key. got: nil, expected: error\")\n\t}\n\n}\n\nfunc TestPoint_FieldIterator_Simple(t *testing.T) {\n\n\tp, err := models.ParsePoints([]byte(`m v=42i,f=42 36`))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif len(p) != 1 {\n\t\tt.Fatalf(\"wrong number of points, got %d, exp %d\", len(p), 1)\n\t}\n\n\tfi := p[0].FieldIterator()\n\n\tif !fi.Next() {\n\t\tt.Fatal(\"field iterator terminated before first field\")\n\t}\n\n\tif fi.Type() != models.Integer {\n\t\tt.Fatalf(\"'42i' should be an Integer, got %v\", fi.Type())\n\t}\n\n\tiv, err := fi.IntegerValue()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif exp, got := int64(42), iv; exp != got {\n\t\tt.Fatalf(\"'42i' should be %d, got %d\", exp, got)\n\t}\n\n\tif !fi.Next() {\n\t\tt.Fatalf(\"field iterator terminated before second field\")\n\t}\n\n\tif fi.Type() != models.Float {\n\t\tt.Fatalf(\"'42' should be a Float, got %v\", fi.Type())\n\t}\n\n\tfv, err := fi.FloatValue()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif exp, got := 42.0, fv; exp != got {\n\t\tt.Fatalf(\"'42' should be %f, got %f\", exp, got)\n\t}\n\n\tif fi.Next() {\n\t\tt.Fatal(\"field iterator didn't terminate\")\n\t}\n}\n\nfunc toFields(fi models.FieldIterator) models.Fields {\n\tm := make(models.Fields)\n\tfor fi.Next() {\n\t\tvar v interface{}\n\t\tvar err error\n\t\tswitch fi.Type() {\n\t\tcase models.Float:\n\t\t\tv, err = fi.FloatValue()\n\t\tcase models.Integer:\n\t\t\tv, err = fi.IntegerValue()\n\t\tcase models.String:\n\t\t\tv = fi.StringValue()\n\t\tcase models.Boolean:\n\t\t\tv, err = fi.BooleanValue()\n\t\tcase models.Empty:\n\t\t\tv = nil\n\t\tdefault:\n\t\t\tpanic(\"unknown type\")\n\t\t}\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tm[string(fi.FieldKey())] = v\n\t}\n\treturn m\n}\n\nfunc TestPoint_FieldIterator_FieldMap(t *testing.T) {\n\n\tpoints, err := models.ParsePointsString(`\nm v=42\nm v=42i\nm v=\"string\"\nm v=true\nm v=\"string\\\"with\\\"escapes\"\nm v=42i,f=42,g=42.314\nm a=2i,b=3i,c=true,d=\"stuff\",e=-0.23,f=123.456\n`)\n\n\tif err != nil {\n\t\tt.Fatal(\"failed to parse test points:\", err)\n\t}\n\n\tfor _, p := range points {\n\t\texp, err := p.Fields()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tgot := toFields(p.FieldIterator())\n\n\t\tif !reflect.DeepEqual(got, exp) {\n\t\t\tt.Errorf(\"FieldIterator failed for %#q: got %#v, exp %#v\", p.String(), got, exp)\n\t\t}\n\t}\n}\n\nfunc TestEscapeStringField(t *testing.T) {\n\tcases := []struct {\n\t\tin     string\n\t\texpOut string\n\t}{\n\t\t{in: \"abcdefg\", expOut: \"abcdefg\"},\n\t\t{in: `one double quote \" .`, expOut: `one double quote \\\" .`},\n\t\t{in: `quote \" then backslash \\ .`, expOut: `quote \\\" then backslash \\\\ .`},\n\t\t{in: `backslash \\ then quote \" .`, expOut: `backslash \\\\ then quote \\\" .`},\n\t}\n\n\tfor _, c := range cases {\n\t\t// Unescapes as expected.\n\t\tgot := models.EscapeStringField(c.in)\n\t\tif got != c.expOut {\n\t\t\tt.Errorf(\"unexpected result from EscapeStringField(%s)\\ngot [%s]\\nexp [%s]\\n\", c.in, got, c.expOut)\n\t\t\tcontinue\n\t\t}\n\n\t\tpointLine := fmt.Sprintf(`t s=\"%s\"`, got)\n\t\ttest(t, pointLine, NewTestPoint(\n\t\t\t\"t\",\n\t\t\tmodels.NewTags(nil),\n\t\t\tmodels.Fields{\"s\": c.in},\n\t\t\ttime.Unix(0, 0),\n\t\t))\n\t}\n}\n\nfunc BenchmarkEscapeStringField_Plain(b *testing.B) {\n\ts := \"nothing special\"\n\tfor i := 0; i < b.N; i++ {\n\t\tsink = models.EscapeStringField(s)\n\t}\n}\n\nfunc BenchmarkEscapeString_Quotes(b *testing.B) {\n\ts := `Hello, \"world\"`\n\tfor i := 0; i < b.N; i++ {\n\t\tsink = models.EscapeStringField(s)\n\t}\n}\n\nfunc BenchmarkEscapeString_Backslashes(b *testing.B) {\n\ts := `C:\\windows\\system32`\n\tfor i := 0; i < b.N; i++ {\n\t\tsink = models.EscapeStringField(s)\n\t}\n}\n\nfunc BenchmarkEscapeString_QuotesAndBackslashes(b *testing.B) {\n\ts1 := `a quote \" then backslash \\ .`\n\ts2 := `a backslash \\ then quote \" .`\n\tfor i := 0; i < b.N; i++ {\n\t\tsink = [...]string{models.EscapeStringField(s1), models.EscapeStringField(s2)}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/models/rows.go",
    "content": "package models\n\nimport (\n\t\"sort\"\n)\n\n// Row represents a single row returned from the execution of a statement.\ntype Row struct {\n\tName    string            `json:\"name,omitempty\"`\n\tTags    map[string]string `json:\"tags,omitempty\"`\n\tColumns []string          `json:\"columns,omitempty\"`\n\tValues  [][]interface{}   `json:\"values,omitempty\"`\n\tPartial bool              `json:\"partial,omitempty\"`\n}\n\n// SameSeries returns true if r contains values for the same series as o.\nfunc (r *Row) SameSeries(o *Row) bool {\n\treturn r.tagsHash() == o.tagsHash() && r.Name == o.Name\n}\n\n// tagsHash returns a hash of tag key/value pairs.\nfunc (r *Row) tagsHash() uint64 {\n\th := NewInlineFNV64a()\n\tkeys := r.tagsKeys()\n\tfor _, k := range keys {\n\t\th.Write([]byte(k))\n\t\th.Write([]byte(r.Tags[k]))\n\t}\n\treturn h.Sum64()\n}\n\n// tagKeys returns a sorted list of tag keys.\nfunc (r *Row) tagsKeys() []string {\n\ta := make([]string, 0, len(r.Tags))\n\tfor k := range r.Tags {\n\t\ta = append(a, k)\n\t}\n\tsort.Strings(a)\n\treturn a\n}\n\n// Rows represents a collection of rows. Rows implements sort.Interface.\ntype Rows []*Row\n\n// Len implements sort.Interface.\nfunc (p Rows) Len() int { return len(p) }\n\n// Less implements sort.Interface.\nfunc (p Rows) Less(i, j int) bool {\n\t// Sort by name first.\n\tif p[i].Name != p[j].Name {\n\t\treturn p[i].Name < p[j].Name\n\t}\n\n\t// Sort by tag set hash. Tags don't have a meaningful sort order so we\n\t// just compute a hash and sort by that instead. This allows the tests\n\t// to receive rows in a predictable order every time.\n\treturn p[i].tagsHash() < p[j].tagsHash()\n}\n\n// Swap implements sort.Interface.\nfunc (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] }\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/models/statistic.go",
    "content": "package models\n\n// Statistic is the representation of a statistic used by the monitoring service.\ntype Statistic struct {\n\tName   string                 `json:\"name\"`\n\tTags   map[string]string      `json:\"tags\"`\n\tValues map[string]interface{} `json:\"values\"`\n}\n\n// NewStatistic returns an initialized Statistic.\nfunc NewStatistic(name string) Statistic {\n\treturn Statistic{\n\t\tName:   name,\n\t\tTags:   make(map[string]string),\n\t\tValues: make(map[string]interface{}),\n\t}\n}\n\n// StatisticTags is a map that can be merged with others without causing\n// mutations to either map.\ntype StatisticTags map[string]string\n\n// Merge creates a new map containing the merged contents of tags and t.\n// If both tags and the receiver map contain the same key, the value in tags\n// is used in the resulting map.\n//\n// Merge always returns a usable map.\nfunc (t StatisticTags) Merge(tags map[string]string) map[string]string {\n\t// Add everything in tags to the result.\n\tout := make(map[string]string, len(tags))\n\tfor k, v := range tags {\n\t\tout[k] = v\n\t}\n\n\t// Only add values from t that don't appear in tags.\n\tfor k, v := range t {\n\t\tif _, ok := tags[k]; !ok {\n\t\t\tout[k] = v\n\t\t}\n\t}\n\treturn out\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/models/statistic_test.go",
    "content": "package models_test\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/models\"\n)\n\nfunc TestTags_Merge(t *testing.T) {\n\texamples := []struct {\n\t\tBase   map[string]string\n\t\tArg    map[string]string\n\t\tResult map[string]string\n\t}{\n\t\t{\n\t\t\tBase:   nil,\n\t\t\tArg:    nil,\n\t\t\tResult: map[string]string{},\n\t\t},\n\t\t{\n\t\t\tBase:   nil,\n\t\t\tArg:    map[string]string{\"foo\": \"foo\"},\n\t\t\tResult: map[string]string{\"foo\": \"foo\"},\n\t\t},\n\t\t{\n\t\t\tBase:   map[string]string{\"foo\": \"foo\"},\n\t\t\tArg:    nil,\n\t\t\tResult: map[string]string{\"foo\": \"foo\"},\n\t\t},\n\t\t{\n\t\t\tBase:   map[string]string{\"foo\": \"foo\"},\n\t\t\tArg:    map[string]string{\"bar\": \"bar\"},\n\t\t\tResult: map[string]string{\"foo\": \"foo\", \"bar\": \"bar\"},\n\t\t},\n\t\t{\n\t\t\tBase:   map[string]string{\"foo\": \"foo\", \"bar\": \"bar\"},\n\t\t\tArg:    map[string]string{\"zoo\": \"zoo\"},\n\t\t\tResult: map[string]string{\"foo\": \"foo\", \"bar\": \"bar\", \"zoo\": \"zoo\"},\n\t\t},\n\t\t{\n\t\t\tBase:   map[string]string{\"foo\": \"foo\", \"bar\": \"bar\"},\n\t\t\tArg:    map[string]string{\"bar\": \"newbar\"},\n\t\t\tResult: map[string]string{\"foo\": \"foo\", \"bar\": \"newbar\"},\n\t\t},\n\t}\n\n\tfor i, example := range examples {\n\t\ti++\n\t\tresult := models.StatisticTags(example.Base).Merge(example.Arg)\n\t\tif got, exp := result, example.Result; !reflect.DeepEqual(got, exp) {\n\t\t\tt.Errorf(\"[Example %d] got %#v, expected %#v\", i, got, exp)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/models/time.go",
    "content": "package models\n\n// Helper time methods since parsing time can easily overflow and we only support a\n// specific time range.\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"time\"\n)\n\nconst (\n\t// MinNanoTime is the minumum time that can be represented.\n\t//\n\t// 1677-09-21 00:12:43.145224194 +0000 UTC\n\t//\n\t// The two lowest minimum integers are used as sentinel values.  The\n\t// minimum value needs to be used as a value lower than any other value for\n\t// comparisons and another separate value is needed to act as a sentinel\n\t// default value that is unusable by the user, but usable internally.\n\t// Because these two values need to be used for a special purpose, we do\n\t// not allow users to write points at these two times.\n\tMinNanoTime = int64(math.MinInt64) + 2\n\n\t// MaxNanoTime is the maximum time that can be represented.\n\t//\n\t// 2262-04-11 23:47:16.854775806 +0000 UTC\n\t//\n\t// The highest time represented by a nanosecond needs to be used for an\n\t// exclusive range in the shard group, so the maximum time needs to be one\n\t// less than the possible maximum number of nanoseconds representable by an\n\t// int64 so that we don't lose a point at that one time.\n\tMaxNanoTime = int64(math.MaxInt64) - 1\n)\n\nvar (\n\tminNanoTime = time.Unix(0, MinNanoTime).UTC()\n\tmaxNanoTime = time.Unix(0, MaxNanoTime).UTC()\n\n\t// ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch.\n\tErrTimeOutOfRange = fmt.Errorf(\"time outside range %d - %d\", MinNanoTime, MaxNanoTime)\n)\n\n// SafeCalcTime safely calculates the time given. Will return error if the time is outside the\n// supported range.\nfunc SafeCalcTime(timestamp int64, precision string) (time.Time, error) {\n\tmult := GetPrecisionMultiplier(precision)\n\tif t, ok := safeSignedMult(timestamp, mult); ok {\n\t\ttme := time.Unix(0, t).UTC()\n\t\treturn tme, CheckTime(tme)\n\t}\n\n\treturn time.Time{}, ErrTimeOutOfRange\n}\n\n// CheckTime checks that a time is within the safe range.\nfunc CheckTime(t time.Time) error {\n\tif t.Before(minNanoTime) || t.After(maxNanoTime) {\n\t\treturn ErrTimeOutOfRange\n\t}\n\treturn nil\n}\n\n// Perform the multiplication and check to make sure it didn't overflow.\nfunc safeSignedMult(a, b int64) (int64, bool) {\n\tif a == 0 || b == 0 || a == 1 || b == 1 {\n\t\treturn a * b, true\n\t}\n\tif a == MinNanoTime || b == MaxNanoTime {\n\t\treturn 0, false\n\t}\n\tc := a * b\n\treturn c, c/b == a\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/monitor/README.md",
    "content": "# System Monitoring\n_This functionality should be considered experimental and is subject to change._\n\n_System Monitoring_ means all statistical and diagnostic information made availabe to the user of InfluxDB system, about the system itself. Its purpose is to assist with troubleshooting and performance analysis of the database itself.\n\n## Statistics vs. Diagnostics\nA distinction is made between _statistics_ and _diagnostics_ for the purposes of monitoring. Generally a statistical quality is something that is being counted, and for which it makes sense to store persistently for historical analysis. Diagnostic information is not necessarily numerical, and may not make sense to store.\n\nAn example of statistical information would be the number of points received over UDP, or the number of queries executed. Examples of diagnostic information would be a list of current Graphite TCP connections, the version of InfluxDB, or the uptime of the process.\n\n## System Statistics\n`SHOW STATS [FOR <module>]` displays statisics about subsystems within the running `influxd` process. Statistics include points received, points indexed, bytes written to disk, TCP connections handled etc. These statistics are all zero when the InfluxDB process starts. If _module_ is specified, it must be single-quoted. For example `SHOW STATS FOR 'httpd'`.\n\nAll statistics are written, by default, by each node to a \"monitor\" database within the InfluxDB system, allowing analysis of aggregated statistical data using the standard InfluxQL language. This allows users to track the performance of their system. Importantly, this allows cluster-level statistics to be viewed, since by querying the monitor database, statistics from all nodes may be queried. This can be a very powerful approach for troubleshooting your InfluxDB system and understanding its behaviour.\n\n## System Diagnostics\n`SHOW DIAGNOSTICS [FOR <module>]` displays various diagnostic information about the `influxd` process. This information is not stored persistently within the InfluxDB system. If _module_ is specified, it must be single-quoted. For example `SHOW STATS FOR 'build'`.\n\n## Standard expvar support\nAll statistical information is available at HTTP API endpoint `/debug/vars`, in [expvar](https://golang.org/pkg/expvar/) format, allowing external systems to monitor an InfluxDB node. By default, the full path to this endpoint is `http://localhost:8086/debug/vars`.\n\n## Configuration\nThe `monitor` module allows the following configuration:\n\n * Whether to write statistical and diagnostic information to an InfluxDB system. This is enabled by default.\n * The name of the database to where this information should be written. Defaults to `_internal`. The information is written to the default retention policy for the given database.\n * The name of the retention policy, along with full configuration control of the retention policy, if the default retention policy is not suitable.\n * The rate at which this information should be written. The default rate is once every 10 seconds.\n\n# Design and Implementation\n\nA new module named `monitor` supports all basic statistics and diagnostic functionality. This includes:\n\n * Allowing other modules to register statistics and diagnostics information, allowing it to be accessed on demand by the `monitor` module.\n * Serving the statistics and diagnostic information to the user, in response to commands such as `SHOW DIAGNOSTICS`.\n * Expose standard Go runtime information such as garbage collection statistics.\n * Make all collected expvar data via HTTP, for collection by 3rd-party tools.\n * Writing the statistical information to the \"monitor\" database, for query purposes.\n\n## Registering statistics and diagnostics\n\nTo export statistical information with the `monitor` system, a service should implement the `monitor.Reporter` interface. Services added to the Server will be automatically added to the list of statistics returned. Any service that is not added to the `Services` slice will need to modify the `Server`'s `Statistics(map[string]string)` method to aggregate the call to the service's `Statistics(map[string]string)` method so they are combined into a single response. The `Statistics(map[string]string)` method should return a statistics slice with the passed in tags included. The statistics should be kept inside of an internal structure and should be accessed in a thread-safe way. It is common to create a struct for holding the statistics and using `sync/atomic` instead of locking. If using `sync/atomic`, be sure to align the values in the struct so it works properly on `i386`.\n\nTo register diagnostic information, `monitor.RegisterDiagnosticsClient` is called, passing a `influxdb.monitor.DiagsClient` object to `monitor`. Implementing the `influxdb.monitor.DiagsClient` interface requires that your component have function returning diagnostic information in specific form, so that it can be displayed by the `monitor` system.\n\nStatistical information is reset to its initial state when a server is restarted.\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/monitor/build_info.go",
    "content": "package monitor\n\nimport \"github.com/influxdata/influxdb/monitor/diagnostics\"\n\n// build holds information of the build of the current executable.\ntype build struct {\n\tVersion string\n\tCommit  string\n\tBranch  string\n\tTime    string\n}\n\nfunc (b *build) Diagnostics() (*diagnostics.Diagnostics, error) {\n\td := map[string]interface{}{\n\t\t\"Version\":    b.Version,\n\t\t\"Commit\":     b.Commit,\n\t\t\"Branch\":     b.Branch,\n\t\t\"Build Time\": b.Time,\n\t}\n\n\treturn diagnostics.RowFromMap(d), nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/monitor/config.go",
    "content": "package monitor\n\nimport (\n\t\"errors\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/monitor/diagnostics\"\n\t\"github.com/influxdata/influxdb/toml\"\n)\n\nconst (\n\t// DefaultStoreEnabled is whether the system writes gathered information in\n\t// an InfluxDB system for historical analysis.\n\tDefaultStoreEnabled = true\n\n\t// DefaultStoreDatabase is the name of the database where gathered information is written.\n\tDefaultStoreDatabase = \"_internal\"\n\n\t// DefaultStoreInterval is the period between storing gathered information.\n\tDefaultStoreInterval = 10 * time.Second\n)\n\n// Config represents the configuration for the monitor service.\ntype Config struct {\n\tStoreEnabled  bool          `toml:\"store-enabled\"`\n\tStoreDatabase string        `toml:\"store-database\"`\n\tStoreInterval toml.Duration `toml:\"store-interval\"`\n}\n\n// NewConfig returns an instance of Config with defaults.\nfunc NewConfig() Config {\n\treturn Config{\n\t\tStoreEnabled:  true,\n\t\tStoreDatabase: DefaultStoreDatabase,\n\t\tStoreInterval: toml.Duration(DefaultStoreInterval),\n\t}\n}\n\n// Validate validates that the configuration is acceptable.\nfunc (c Config) Validate() error {\n\tif c.StoreInterval <= 0 {\n\t\treturn errors.New(\"monitor store interval must be positive\")\n\t}\n\tif c.StoreDatabase == \"\" {\n\t\treturn errors.New(\"monitor store database name must not be empty\")\n\t}\n\treturn nil\n}\n\n// Diagnostics returns a diagnostics representation of a subset of the Config.\nfunc (c Config) Diagnostics() (*diagnostics.Diagnostics, error) {\n\tif !c.StoreEnabled {\n\t\treturn diagnostics.RowFromMap(map[string]interface{}{\n\t\t\t\"store-enabled\": false,\n\t\t}), nil\n\t}\n\n\treturn diagnostics.RowFromMap(map[string]interface{}{\n\t\t\"store-enabled\":  true,\n\t\t\"store-database\": c.StoreDatabase,\n\t\t\"store-interval\": c.StoreInterval,\n\t}), nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/monitor/config_test.go",
    "content": "package monitor_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/influxdata/influxdb/monitor\"\n)\n\nfunc TestConfig_Parse(t *testing.T) {\n\t// Parse configuration.\n\tvar c monitor.Config\n\tif _, err := toml.Decode(`\nstore-enabled=true\nstore-database=\"the_db\"\nstore-interval=\"10m\"\n`, &c); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Validate configuration.\n\tif !c.StoreEnabled {\n\t\tt.Fatalf(\"unexpected store-enabled: %v\", c.StoreEnabled)\n\t} else if c.StoreDatabase != \"the_db\" {\n\t\tt.Fatalf(\"unexpected store-database: %s\", c.StoreDatabase)\n\t} else if time.Duration(c.StoreInterval) != 10*time.Minute {\n\t\tt.Fatalf(\"unexpected store-interval:  %s\", c.StoreInterval)\n\t}\n}\n\nfunc TestConfig_Validate(t *testing.T) {\n\t// NewConfig must validate correctly.\n\tc := monitor.NewConfig()\n\tif err := c.Validate(); err != nil {\n\t\tt.Fatalf(\"unexpected validation error: %s\", err)\n\t}\n\n\t// Non-positive duration is invalid.\n\tc = monitor.NewConfig()\n\tc.StoreInterval *= 0\n\tif err := c.Validate(); err == nil {\n\t\tt.Fatalf(\"unexpected successful validation for %#v\", c)\n\t}\n\n\t// Empty database is invalid.\n\tc = monitor.NewConfig()\n\tc.StoreDatabase = \"\"\n\tif err := c.Validate(); err == nil {\n\t\tt.Fatalf(\"unexpected successful validation for %#v\", c)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/monitor/diagnostics/diagnostics.go",
    "content": "// Package diagnostics provides the diagnostics type so that\n// other packages can provide diagnostics without depending on the monitor package.\npackage diagnostics // import \"github.com/influxdata/influxdb/monitor/diagnostics\"\n\nimport \"sort\"\n\n// Client is the interface modules implement if they register diagnostics with monitor.\ntype Client interface {\n\tDiagnostics() (*Diagnostics, error)\n}\n\n// The ClientFunc type is an adapter to allow the use of\n// ordinary functions as Diagnostics clients.\ntype ClientFunc func() (*Diagnostics, error)\n\n// Diagnostics calls f().\nfunc (f ClientFunc) Diagnostics() (*Diagnostics, error) {\n\treturn f()\n}\n\n// Diagnostics represents a table of diagnostic information. The first value\n// is the name of the columns, the second is a slice of interface slices containing\n// the values for each column, by row. This information is never written to an InfluxDB\n// system and is display-only. An example showing, say, connections follows:\n//\n//     source_ip    source_port       dest_ip     dest_port\n//     182.1.0.2    2890              127.0.0.1   38901\n//     174.33.1.2   2924              127.0.0.1   38902\ntype Diagnostics struct {\n\tColumns []string\n\tRows    [][]interface{}\n}\n\n// NewDiagnostic initialises a new Diagnostics with the specified columns.\nfunc NewDiagnostics(columns []string) *Diagnostics {\n\treturn &Diagnostics{\n\t\tColumns: columns,\n\t\tRows:    make([][]interface{}, 0),\n\t}\n}\n\n// AddRow appends the provided row to the Diagnostics' rows.\nfunc (d *Diagnostics) AddRow(r []interface{}) {\n\td.Rows = append(d.Rows, r)\n}\n\n// RowFromMap returns a new one-row Diagnostics from a map.\nfunc RowFromMap(m map[string]interface{}) *Diagnostics {\n\t// Display columns in deterministic order.\n\tsortedKeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tsortedKeys = append(sortedKeys, k)\n\t}\n\tsort.Strings(sortedKeys)\n\n\td := NewDiagnostics(sortedKeys)\n\trow := make([]interface{}, len(sortedKeys))\n\tfor i, k := range sortedKeys {\n\t\trow[i] = m[k]\n\t}\n\td.AddRow(row)\n\n\treturn d\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/monitor/go_runtime.go",
    "content": "package monitor\n\nimport (\n\t\"runtime\"\n\n\t\"github.com/influxdata/influxdb/monitor/diagnostics\"\n)\n\n// goRuntime captures Go runtime diagnostics.\ntype goRuntime struct{}\n\nfunc (g *goRuntime) Diagnostics() (*diagnostics.Diagnostics, error) {\n\td := map[string]interface{}{\n\t\t\"GOARCH\":     runtime.GOARCH,\n\t\t\"GOOS\":       runtime.GOOS,\n\t\t\"GOMAXPROCS\": runtime.GOMAXPROCS(-1),\n\t\t\"version\":    runtime.Version(),\n\t}\n\n\treturn diagnostics.RowFromMap(d), nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/monitor/network.go",
    "content": "package monitor\n\nimport (\n\t\"os\"\n\n\t\"github.com/influxdata/influxdb/monitor/diagnostics\"\n)\n\n// network captures network diagnostics.\ntype network struct{}\n\nfunc (n *network) Diagnostics() (*diagnostics.Diagnostics, error) {\n\th, err := os.Hostname()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td := map[string]interface{}{\n\t\t\"hostname\": h,\n\t}\n\n\treturn diagnostics.RowFromMap(d), nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/monitor/reporter.go",
    "content": "package monitor\n\nimport \"github.com/influxdata/influxdb/models\"\n\n// Reporter is an interface for gathering internal statistics.\ntype Reporter interface {\n\t// Statistics returns the statistics for the reporter,\n\t// with the given tags merged into the result.\n\tStatistics(tags map[string]string) []models.Statistic\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/monitor/service.go",
    "content": "// Package monitor provides a service and associated functionality\n// for InfluxDB to self-monitor internal statistics and diagnostics.\npackage monitor // import \"github.com/influxdata/influxdb/monitor\"\n\nimport (\n\t\"errors\"\n\t\"expvar\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/monitor/diagnostics\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/uber-go/zap\"\n)\n\n// Policy constants.\nconst (\n\t// Name of the retention policy used by the monitor service.\n\tMonitorRetentionPolicy = \"monitor\"\n\n\t// Duration of the monitor retention policy.\n\tMonitorRetentionPolicyDuration = 7 * 24 * time.Hour\n\n\t// Default replication factor to set on the monitor retention policy.\n\tMonitorRetentionPolicyReplicaN = 1\n)\n\n// Monitor represents an instance of the monitor system.\ntype Monitor struct {\n\t// Build information for diagnostics.\n\tVersion   string\n\tCommit    string\n\tBranch    string\n\tBuildTime string\n\n\twg sync.WaitGroup\n\n\tmu                sync.RWMutex\n\tglobalTags        map[string]string\n\tdiagRegistrations map[string]diagnostics.Client\n\treporter          Reporter\n\tdone              chan struct{}\n\tstoreCreated      bool\n\tstoreEnabled      bool\n\n\tstoreDatabase        string\n\tstoreRetentionPolicy string\n\tstoreInterval        time.Duration\n\n\tMetaClient interface {\n\t\tCreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error)\n\t\tDatabase(name string) *meta.DatabaseInfo\n\t}\n\n\t// Writer for pushing stats back into the database.\n\tPointsWriter PointsWriter\n\n\tLogger zap.Logger\n}\n\n// PointsWriter is a simplified interface for writing the points the monitor gathers.\ntype PointsWriter interface {\n\tWritePoints(database, retentionPolicy string, points models.Points) error\n}\n\n// New returns a new instance of the monitor system.\nfunc New(r Reporter, c Config) *Monitor {\n\treturn &Monitor{\n\t\tglobalTags:           make(map[string]string),\n\t\tdiagRegistrations:    make(map[string]diagnostics.Client),\n\t\treporter:             r,\n\t\tstoreEnabled:         c.StoreEnabled,\n\t\tstoreDatabase:        c.StoreDatabase,\n\t\tstoreInterval:        time.Duration(c.StoreInterval),\n\t\tstoreRetentionPolicy: MonitorRetentionPolicy,\n\t\tLogger:               zap.New(zap.NullEncoder()),\n\t}\n}\n\n// open returns whether the monitor service is open.\nfunc (m *Monitor) open() bool {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\treturn m.done != nil\n}\n\n// Open opens the monitoring system, using the given clusterID, node ID, and hostname\n// for identification purpose.\nfunc (m *Monitor) Open() error {\n\tif m.open() {\n\t\tm.Logger.Info(\"Monitor is already open\")\n\t\treturn nil\n\t}\n\n\tm.Logger.Info(\"Starting monitor system\")\n\n\t// Self-register various stats and diagnostics.\n\tm.RegisterDiagnosticsClient(\"build\", &build{\n\t\tVersion: m.Version,\n\t\tCommit:  m.Commit,\n\t\tBranch:  m.Branch,\n\t\tTime:    m.BuildTime,\n\t})\n\tm.RegisterDiagnosticsClient(\"runtime\", &goRuntime{})\n\tm.RegisterDiagnosticsClient(\"network\", &network{})\n\tm.RegisterDiagnosticsClient(\"system\", &system{})\n\n\tm.mu.Lock()\n\tm.done = make(chan struct{})\n\tm.mu.Unlock()\n\n\t// If enabled, record stats in a InfluxDB system.\n\tif m.storeEnabled {\n\t\t// Start periodic writes to system.\n\t\tm.wg.Add(1)\n\t\tgo m.storeStatistics()\n\t}\n\n\treturn nil\n}\n\nfunc (m *Monitor) writePoints(p models.Points) error {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\n\tif err := m.PointsWriter.WritePoints(m.storeDatabase, m.storeRetentionPolicy, p); err != nil {\n\t\tm.Logger.Info(fmt.Sprintf(\"failed to store statistics: %s\", err))\n\t}\n\treturn nil\n}\n\n// Close closes the monitor system.\nfunc (m *Monitor) Close() error {\n\tif !m.open() {\n\t\tm.Logger.Info(\"Monitor is already closed.\")\n\t\treturn nil\n\t}\n\n\tm.Logger.Info(\"shutting down monitor system\")\n\tm.mu.Lock()\n\tclose(m.done)\n\tm.mu.Unlock()\n\n\tm.wg.Wait()\n\n\tm.mu.Lock()\n\tm.done = nil\n\tm.mu.Unlock()\n\n\tm.DeregisterDiagnosticsClient(\"build\")\n\tm.DeregisterDiagnosticsClient(\"runtime\")\n\tm.DeregisterDiagnosticsClient(\"network\")\n\tm.DeregisterDiagnosticsClient(\"system\")\n\treturn nil\n}\n\n// SetGlobalTag can be used to set tags that will appear on all points\n// written by the Monitor.\nfunc (m *Monitor) SetGlobalTag(key string, value interface{}) {\n\tm.mu.Lock()\n\tm.globalTags[key] = fmt.Sprintf(\"%v\", value)\n\tm.mu.Unlock()\n}\n\n// RemoteWriterConfig represents the configuration of a remote writer.\ntype RemoteWriterConfig struct {\n\tRemoteAddr string\n\tNodeID     string\n\tUsername   string\n\tPassword   string\n\tClusterID  uint64\n}\n\n// SetPointsWriter can be used to set a writer for the monitoring points.\nfunc (m *Monitor) SetPointsWriter(pw PointsWriter) error {\n\tif !m.storeEnabled {\n\t\t// not enabled, nothing to do\n\t\treturn nil\n\t}\n\tm.mu.Lock()\n\tm.PointsWriter = pw\n\tm.mu.Unlock()\n\n\t// Subsequent calls to an already open Monitor are just a no-op.\n\treturn m.Open()\n}\n\n// WithLogger sets the logger for the Monitor.\nfunc (m *Monitor) WithLogger(log zap.Logger) {\n\tm.Logger = log.With(zap.String(\"service\", \"monitor\"))\n}\n\n// RegisterDiagnosticsClient registers a diagnostics client with the given name and tags.\nfunc (m *Monitor) RegisterDiagnosticsClient(name string, client diagnostics.Client) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tm.diagRegistrations[name] = client\n\tm.Logger.Info(fmt.Sprintf(`'%s' registered for diagnostics monitoring`, name))\n}\n\n// DeregisterDiagnosticsClient deregisters a diagnostics client by name.\nfunc (m *Monitor) DeregisterDiagnosticsClient(name string) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tdelete(m.diagRegistrations, name)\n}\n\n// Statistics returns the combined statistics for all expvar data. The given\n// tags are added to each of the returned statistics.\nfunc (m *Monitor) Statistics(tags map[string]string) ([]*Statistic, error) {\n\tvar statistics []*Statistic\n\n\texpvar.Do(func(kv expvar.KeyValue) {\n\t\t// Skip built-in expvar stats.\n\t\tif kv.Key == \"memstats\" || kv.Key == \"cmdline\" {\n\t\t\treturn\n\t\t}\n\n\t\tstatistic := &Statistic{\n\t\t\tStatistic: models.NewStatistic(\"\"),\n\t\t}\n\n\t\t// Add any supplied tags.\n\t\tfor k, v := range tags {\n\t\t\tstatistic.Tags[k] = v\n\t\t}\n\n\t\t// Every other top-level expvar value is a map.\n\t\tm := kv.Value.(*expvar.Map)\n\n\t\tm.Do(func(subKV expvar.KeyValue) {\n\t\t\tswitch subKV.Key {\n\t\t\tcase \"name\":\n\t\t\t\t// straight to string name.\n\t\t\t\tu, err := strconv.Unquote(subKV.Value.String())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tstatistic.Name = u\n\t\t\tcase \"tags\":\n\t\t\t\t// string-string tags map.\n\t\t\t\tn := subKV.Value.(*expvar.Map)\n\t\t\t\tn.Do(func(t expvar.KeyValue) {\n\t\t\t\t\tu, err := strconv.Unquote(t.Value.String())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tstatistic.Tags[t.Key] = u\n\t\t\t\t})\n\t\t\tcase \"values\":\n\t\t\t\t// string-interface map.\n\t\t\t\tn := subKV.Value.(*expvar.Map)\n\t\t\t\tn.Do(func(kv expvar.KeyValue) {\n\t\t\t\t\tvar f interface{}\n\t\t\t\t\tvar err error\n\t\t\t\t\tswitch v := kv.Value.(type) {\n\t\t\t\t\tcase *expvar.Float:\n\t\t\t\t\t\tf, err = strconv.ParseFloat(v.String(), 64)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\tcase *expvar.Int:\n\t\t\t\t\t\tf, err = strconv.ParseInt(v.String(), 10, 64)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\tdefault:\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tstatistic.Values[kv.Key] = f\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\n\t\t// If a registered client has no field data, don't include it in the results\n\t\tif len(statistic.Values) == 0 {\n\t\t\treturn\n\t\t}\n\n\t\tstatistics = append(statistics, statistic)\n\t})\n\n\t// Add Go memstats.\n\tstatistic := &Statistic{\n\t\tStatistic: models.NewStatistic(\"runtime\"),\n\t}\n\n\t// Add any supplied tags to Go memstats\n\tfor k, v := range tags {\n\t\tstatistic.Tags[k] = v\n\t}\n\n\tvar rt runtime.MemStats\n\truntime.ReadMemStats(&rt)\n\tstatistic.Values = map[string]interface{}{\n\t\t\"Alloc\":        int64(rt.Alloc),\n\t\t\"TotalAlloc\":   int64(rt.TotalAlloc),\n\t\t\"Sys\":          int64(rt.Sys),\n\t\t\"Lookups\":      int64(rt.Lookups),\n\t\t\"Mallocs\":      int64(rt.Mallocs),\n\t\t\"Frees\":        int64(rt.Frees),\n\t\t\"HeapAlloc\":    int64(rt.HeapAlloc),\n\t\t\"HeapSys\":      int64(rt.HeapSys),\n\t\t\"HeapIdle\":     int64(rt.HeapIdle),\n\t\t\"HeapInUse\":    int64(rt.HeapInuse),\n\t\t\"HeapReleased\": int64(rt.HeapReleased),\n\t\t\"HeapObjects\":  int64(rt.HeapObjects),\n\t\t\"PauseTotalNs\": int64(rt.PauseTotalNs),\n\t\t\"NumGC\":        int64(rt.NumGC),\n\t\t\"NumGoroutine\": int64(runtime.NumGoroutine()),\n\t}\n\tstatistics = append(statistics, statistic)\n\n\tstatistics = m.gatherStatistics(statistics, tags)\n\treturn statistics, nil\n}\n\nfunc (m *Monitor) gatherStatistics(statistics []*Statistic, tags map[string]string) []*Statistic {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\n\tfor _, s := range m.reporter.Statistics(tags) {\n\t\tstatistics = append(statistics, &Statistic{Statistic: s})\n\t}\n\treturn statistics\n}\n\n// Diagnostics fetches diagnostic information for each registered\n// diagnostic client. It skips any clients that return an error when\n// retrieving their diagnostics.\nfunc (m *Monitor) Diagnostics() (map[string]*diagnostics.Diagnostics, error) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tdiags := make(map[string]*diagnostics.Diagnostics, len(m.diagRegistrations))\n\tfor k, v := range m.diagRegistrations {\n\t\td, err := v.Diagnostics()\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tdiags[k] = d\n\t}\n\treturn diags, nil\n}\n\n// createInternalStorage ensures the internal storage has been created.\nfunc (m *Monitor) createInternalStorage() {\n\tif m.storeCreated {\n\t\treturn\n\t}\n\n\tif di := m.MetaClient.Database(m.storeDatabase); di == nil {\n\t\tduration := MonitorRetentionPolicyDuration\n\t\treplicaN := MonitorRetentionPolicyReplicaN\n\t\tspec := meta.RetentionPolicySpec{\n\t\t\tName:     MonitorRetentionPolicy,\n\t\t\tDuration: &duration,\n\t\t\tReplicaN: &replicaN,\n\t\t}\n\n\t\tif _, err := m.MetaClient.CreateDatabaseWithRetentionPolicy(m.storeDatabase, &spec); err != nil {\n\t\t\tm.Logger.Info(fmt.Sprintf(\"failed to create database '%s', failed to create storage: %s\",\n\t\t\t\tm.storeDatabase, err.Error()))\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Mark storage creation complete.\n\tm.storeCreated = true\n}\n\n// waitUntilInterval waits until we are on an even interval for the duration.\nfunc (m *Monitor) waitUntilInterval(d time.Duration) error {\n\tnow := time.Now()\n\tuntil := now.Truncate(d).Add(d)\n\ttimer := time.NewTimer(until.Sub(now))\n\tdefer timer.Stop()\n\n\tselect {\n\tcase <-timer.C:\n\t\treturn nil\n\tcase <-m.done:\n\t\treturn errors.New(\"interrupted\")\n\t}\n}\n\n// storeStatistics writes the statistics to an InfluxDB system.\nfunc (m *Monitor) storeStatistics() {\n\tdefer m.wg.Done()\n\tm.Logger.Info(fmt.Sprintf(\"Storing statistics in database '%s' retention policy '%s', at interval %s\",\n\t\tm.storeDatabase, m.storeRetentionPolicy, m.storeInterval))\n\n\thostname, _ := os.Hostname()\n\tm.SetGlobalTag(\"hostname\", hostname)\n\n\t// Wait until an even interval to start recording monitor statistics.\n\t// If we are interrupted before the interval for some reason, exit early.\n\tif err := m.waitUntilInterval(m.storeInterval); err != nil {\n\t\treturn\n\t}\n\n\ttick := time.NewTicker(m.storeInterval)\n\tdefer tick.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase now := <-tick.C:\n\t\t\tnow = now.Truncate(m.storeInterval)\n\t\t\tfunc() {\n\t\t\t\tm.mu.Lock()\n\t\t\t\tdefer m.mu.Unlock()\n\t\t\t\tm.createInternalStorage()\n\t\t\t}()\n\n\t\t\tstats, err := m.Statistics(m.globalTags)\n\t\t\tif err != nil {\n\t\t\t\tm.Logger.Info(fmt.Sprintf(\"failed to retrieve registered statistics: %s\", err))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Write all stats in batches\n\t\t\tbatch := make(models.Points, 0, 5000)\n\t\t\tfor _, s := range stats {\n\t\t\t\tpt, err := models.NewPoint(s.Name, models.NewTags(s.Tags), s.Values, now)\n\t\t\t\tif err != nil {\n\t\t\t\t\tm.Logger.Info(fmt.Sprintf(\"Dropping point %v: %v\", s.Name, err))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tbatch = append(batch, pt)\n\t\t\t\tif len(batch) == cap(batch) {\n\t\t\t\t\tm.writePoints(batch)\n\t\t\t\t\tbatch = batch[:0]\n\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Write the last batch\n\t\t\tif len(batch) > 0 {\n\t\t\t\tm.writePoints(batch)\n\t\t\t}\n\t\tcase <-m.done:\n\t\t\tm.Logger.Info(fmt.Sprintf(\"terminating storage of statistics\"))\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// Statistic represents the information returned by a single monitor client.\ntype Statistic struct {\n\tmodels.Statistic\n}\n\n// ValueNames returns a sorted list of the value names, if any.\nfunc (s *Statistic) ValueNames() []string {\n\ta := make([]string, 0, len(s.Values))\n\tfor k := range s.Values {\n\t\ta = append(a, k)\n\t}\n\tsort.Strings(a)\n\treturn a\n}\n\n// Statistics is a slice of sortable statistics.\ntype Statistics []*Statistic\n\n// Len implements sort.Interface.\nfunc (a Statistics) Len() int { return len(a) }\n\n// Less implements sort.Interface.\nfunc (a Statistics) Less(i, j int) bool {\n\treturn a[i].Name < a[j].Name\n}\n\n// Swap implements sort.Interface.\nfunc (a Statistics) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/monitor/system.go",
    "content": "package monitor\n\nimport (\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/monitor/diagnostics\"\n)\n\nvar startTime time.Time\n\nfunc init() {\n\tstartTime = time.Now().UTC()\n}\n\n// system captures system-level diagnostics.\ntype system struct{}\n\nfunc (s *system) Diagnostics() (*diagnostics.Diagnostics, error) {\n\td := map[string]interface{}{\n\t\t\"PID\":         os.Getpid(),\n\t\t\"currentTime\": time.Now().UTC(),\n\t\t\"started\":     startTime,\n\t\t\"uptime\":      time.Since(startTime).String(),\n\t}\n\n\treturn diagnostics.RowFromMap(d), nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/nightly.sh",
    "content": "#!/bin/bash\n\nSWAKS=\"/root/swaks\"\n\n# Bump this whenever a release branch is created from master\nMASTER_VERSION=0.10.0\n\n# send_failure_notification sends an e-mail with a build failure notification.\nfunction send_failure_notification {\n    smtp=$1\n    user=$2\n    password=$3\n    to=$4\n    version=$5\n    $SWAKS --auth \\\n        --server $smtp \\\n        --au $user \\\n        --ap $password \\\n        --to $to \\\n        --h-Subject: \"Nightly build has FAILED\" \\\n        --body \"The nightly build has failed, version: $version\"\n}\n\nif [ $# -lt 4 ]; then\n    echo \"$0 <smtp server> <user> <password> <to> [RACE_ENABLED]\"\n    exit 1\nfi\nSMTP=$1\nUSER=$2\nPASSWORD=$3\nTO=$4\nRACE_ENABLED=$5\n\nif [ -n \"$RACE_ENABLED\" ]; then\n    race=\"-x\"\n    echo \"Race-detection build enabled.\"\nfi\n\nREPO_DIR=`mktemp -d`\necho \"Using $REPO_DIR for all work...\"\n\ncd $REPO_DIR\nexport GOPATH=`pwd`\nmkdir -p $GOPATH/src/github.com/influxdata\ncd $GOPATH/src/github.com/influxdata\ngit clone https://github.com/influxdata/influxdb.git\n\ncd $GOPATH/src/github.com/influxdata/influxdb\nVERSION=\"$MASTER_VERSION-nightly-`git log --pretty=format:'%h' -n 1`\"\nNIGHTLY_BUILD=true ./package.sh $race $VERSION\n\nif [ $? -ne 0 ]; then\n    # Send notification e-mail.\n    send_failure_notification $SMTP $USER $PASSWORD $TO $VERSION\nfi\n\nrm -rf $REPO_DIR\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/node.go",
    "content": "package influxdb\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strconv\"\n)\n\nconst (\n\tnodeFile      = \"node.json\"\n\toldNodeFile   = \"id\"\n\tpeersFilename = \"peers.json\"\n)\n\ntype Node struct {\n\tpath string\n\tID   uint64\n}\n\n// LoadNode will load the node information from disk if present\nfunc LoadNode(path string) (*Node, error) {\n\t// Always check to see if we are upgrading first\n\tif err := upgradeNodeFile(path); err != nil {\n\t\treturn nil, err\n\t}\n\n\tn := &Node{\n\t\tpath: path,\n\t}\n\n\tf, err := os.Open(filepath.Join(path, nodeFile))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tif err := json.NewDecoder(f).Decode(n); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn n, nil\n}\n\n// NewNode will return a new node\nfunc NewNode(path string) *Node {\n\treturn &Node{\n\t\tpath: path,\n\t}\n}\n\n// Save will save the node file to disk and replace the existing one if present\nfunc (n *Node) Save() error {\n\tfile := filepath.Join(n.path, nodeFile)\n\ttmpFile := file + \"tmp\"\n\n\tf, err := os.Create(tmpFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = json.NewEncoder(f).Encode(n); err != nil {\n\t\tf.Close()\n\t\treturn err\n\t}\n\n\tif err = f.Close(); nil != err {\n\t\treturn err\n\t}\n\n\treturn os.Rename(tmpFile, file)\n}\n\nfunc upgradeNodeFile(path string) error {\n\toldFile := filepath.Join(path, oldNodeFile)\n\tb, err := ioutil.ReadFile(oldFile)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\t// We shouldn't have an empty ID file, but if we do, ignore it\n\tif len(b) == 0 {\n\t\treturn nil\n\t}\n\n\tpeers := []string{}\n\tpb, err := ioutil.ReadFile(filepath.Join(path, peersFilename))\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(pb, &peers)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(peers) > 1 {\n\t\treturn fmt.Errorf(\"to upgrade a cluster, please contact support at influxdata\")\n\t}\n\n\tn := &Node{\n\t\tpath: path,\n\t}\n\tif n.ID, err = strconv.ParseUint(string(b), 10, 64); err != nil {\n\t\treturn err\n\t}\n\tif err := n.Save(); err != nil {\n\t\treturn err\n\t}\n\tif err := os.Remove(oldFile); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/README.md",
    "content": "pkg/ is a collection of utility packages used by the InfluxDB project without being specific to its internals.\n\nUtility packages are kept separate from the InfluxDB core codebase to keep it as small and concise as possible.  If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the InfluxDB organization, to facilitate re-use by other projects. However that is not the priority.\n\nBecause utility packages are small and neatly separated from the rest of the codebase, they are a good place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them!\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/bloom/bloom.go",
    "content": "package bloom\n\n// NOTE:\n// This package implements a limited bloom filter implementation based on\n// Will Fitzgerald's bloom & bitset packages. It's implemented locally to\n// support zero-copy memory-mapped slices.\n//\n// This also optimizes the filter by always using a bitset size with a power of 2.\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\n\t\"github.com/spaolacci/murmur3\"\n)\n\n// Filter represents a bloom filter.\ntype Filter struct {\n\tk    uint64\n\tb    []byte\n\tmask uint64\n}\n\n// NewFilter returns a new instance of Filter using m bits and k hash functions.\n// If m is not a power of two then it is rounded to the next highest power of 2.\nfunc NewFilter(m uint64, k uint64) *Filter {\n\tm = pow2(m)\n\n\treturn &Filter{\n\t\tk:    k,\n\t\tb:    make([]byte, m/8),\n\t\tmask: m - 1,\n\t}\n}\n\n// NewFilterBuffer returns a new instance of a filter using a backing buffer.\n// The buffer length MUST be a power of 2.\nfunc NewFilterBuffer(buf []byte, k uint64) (*Filter, error) {\n\tm := pow2(uint64(len(buf)) * 8)\n\tif m != uint64(len(buf))*8 {\n\t\treturn nil, fmt.Errorf(\"bloom.Filter: buffer bit count must a power of two: %d/%d\", len(buf)*8, m)\n\t}\n\n\treturn &Filter{\n\t\tk:    k,\n\t\tb:    buf,\n\t\tmask: m - 1,\n\t}, nil\n}\n\n// Len returns the number of bits used in the filter.\nfunc (f *Filter) Len() uint { return uint(len(f.b)) }\n\n// K returns the number of hash functions used in the filter.\nfunc (f *Filter) K() uint64 { return f.k }\n\n// Bytes returns the underlying backing slice.\nfunc (f *Filter) Bytes() []byte { return f.b }\n\n// Insert inserts data to the filter.\nfunc (f *Filter) Insert(v []byte) {\n\th := hash(v)\n\tfor i := uint64(0); i < f.k; i++ {\n\t\tloc := f.location(h, i)\n\t\tf.b[loc/8] |= 1 << (loc % 8)\n\t}\n}\n\n// Contains returns true if the filter possibly contains v.\n// Returns false if the filter definitely does not contain v.\nfunc (f *Filter) Contains(v []byte) bool {\n\th := hash(v)\n\tfor i := uint64(0); i < f.k; i++ {\n\t\tloc := f.location(h, i)\n\t\tif f.b[loc/8]&(1<<(loc%8)) == 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// Merge performs an in-place union of other into f.\n// Returns an error if m or k of the filters differs.\nfunc (f *Filter) Merge(other *Filter) error {\n\t// Ensure m & k fields match.\n\tif len(f.b) != len(other.b) {\n\t\treturn fmt.Errorf(\"bloom.Filter.Merge(): m mismatch: %d <> %d\", len(f.b), len(other.b))\n\t} else if f.k != other.k {\n\t\treturn fmt.Errorf(\"bloom.Filter.Merge(): k mismatch: %d <> %d\", f.b, other.b)\n\t}\n\n\t// Perform union of each byte.\n\tfor i := range f.b {\n\t\tf.b[i] |= other.b[i]\n\t}\n\n\treturn nil\n}\n\n// location returns the ith hashed location using the four base hash values.\nfunc (f *Filter) location(h [4]uint64, i uint64) uint {\n\treturn uint((h[i%2] + i*h[2+(((i+(i%2))%4)/2)]) & f.mask)\n}\n\n// Estimate returns an estimated bit count and hash count given the element count and false positive rate.\nfunc Estimate(n uint64, p float64) (m uint64, k uint64) {\n\tm = uint64(math.Ceil(-1 * float64(n) * math.Log(p) / math.Pow(math.Log(2), 2)))\n\tk = uint64(math.Ceil(math.Log(2) * float64(m) / float64(n)))\n\treturn m, k\n}\n\n// pow2 returns the number that is the next highest power of 2.\n// Returns v if it is a power of 2.\nfunc pow2(v uint64) uint64 {\n\tfor i := uint64(8); i < 1<<62; i *= 2 {\n\t\tif i >= v {\n\t\t\treturn i\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}\n\n// hash returns a set of 4 based hashes.\nfunc hash(data []byte) [4]uint64 {\n\th := murmur3.New128()\n\th.Write(data)\n\tv1, v2 := h.Sum128()\n\th.Write([]byte{1})\n\tv3, v4 := h.Sum128()\n\treturn [4]uint64{v1, v2, v3, v4}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/bloom/bloom_test.go",
    "content": "package bloom_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/pkg/bloom\"\n)\n\n// Ensure filter can insert values and verify they exist.\nfunc TestFilter_InsertContains(t *testing.T) {\n\tf := bloom.NewFilter(1000, 4)\n\n\t// Insert value and validate.\n\tf.Insert([]byte(\"Bess\"))\n\tif !f.Contains([]byte(\"Bess\")) {\n\t\tt.Fatal(\"expected true\")\n\t}\n\n\t// Insert another value and test.\n\tf.Insert([]byte(\"Emma\"))\n\tif !f.Contains([]byte(\"Emma\")) {\n\t\tt.Fatal(\"expected true\")\n\t}\n\n\t// Validate that a non-existent value doesn't exist.\n\tif f.Contains([]byte(\"Jane\")) {\n\t\tt.Fatal(\"expected false\")\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil.go",
    "content": "package bytesutil\n\nimport (\n\t\"bytes\"\n\t\"sort\"\n)\n\n// Sort sorts a slice of byte slices.\nfunc Sort(a [][]byte) {\n\tsort.Sort(byteSlices(a))\n}\n\nfunc IsSorted(a [][]byte) bool {\n\treturn sort.IsSorted(byteSlices(a))\n}\n\nfunc SearchBytes(a [][]byte, x []byte) int {\n\treturn sort.Search(len(a), func(i int) bool { return bytes.Compare(a[i], x) >= 0 })\n}\n\n// Union returns the union of a & b in sorted order.\nfunc Union(a, b [][]byte) [][]byte {\n\tn := len(b)\n\tif len(a) > len(b) {\n\t\tn = len(a)\n\t}\n\tother := make([][]byte, 0, n)\n\n\tfor {\n\t\tif len(a) > 0 && len(b) > 0 {\n\t\t\tif cmp := bytes.Compare(a[0], b[0]); cmp == 0 {\n\t\t\t\tother, a, b = append(other, a[0]), a[1:], b[1:]\n\t\t\t} else if cmp == -1 {\n\t\t\t\tother, a = append(other, a[0]), a[1:]\n\t\t\t} else {\n\t\t\t\tother, b = append(other, b[0]), b[1:]\n\t\t\t}\n\t\t} else if len(a) > 0 {\n\t\t\tother, a = append(other, a[0]), a[1:]\n\t\t} else if len(b) > 0 {\n\t\t\tother, b = append(other, b[0]), b[1:]\n\t\t} else {\n\t\t\treturn other\n\t\t}\n\t}\n}\n\n// Intersect returns the intersection of a & b in sorted order.\nfunc Intersect(a, b [][]byte) [][]byte {\n\tn := len(b)\n\tif len(a) > len(b) {\n\t\tn = len(a)\n\t}\n\tother := make([][]byte, 0, n)\n\n\tfor len(a) > 0 && len(b) > 0 {\n\t\tif cmp := bytes.Compare(a[0], b[0]); cmp == 0 {\n\t\t\tother, a, b = append(other, a[0]), a[1:], b[1:]\n\t\t} else if cmp == -1 {\n\t\t\ta = a[1:]\n\t\t} else {\n\t\t\tb = b[1:]\n\t\t}\n\t}\n\treturn other\n}\n\ntype byteSlices [][]byte\n\nfunc (a byteSlices) Len() int           { return len(a) }\nfunc (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 }\nfunc (a byteSlices) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/deep/equal.go",
    "content": "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// License.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n// * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n// * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n// * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Package deep provides a deep equality check for use in tests.\npackage deep // import \"github.com/influxdata/influxdb/pkg/deep\"\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"reflect\"\n)\n\n// Equal is a copy of reflect.DeepEqual except that it treats NaN == NaN as true.\nfunc Equal(a1, a2 interface{}) bool {\n\tif a1 == nil || a2 == nil {\n\t\treturn a1 == a2\n\t}\n\tv1 := reflect.ValueOf(a1)\n\tv2 := reflect.ValueOf(a2)\n\tif v1.Type() != v2.Type() {\n\t\treturn false\n\t}\n\treturn deepValueEqual(v1, v2, make(map[visit]bool), 0)\n}\n\n// Tests for deep equality using reflected types. The map argument tracks\n// comparisons that have already been seen, which allows short circuiting on\n// recursive types.\nfunc deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool {\n\tif !v1.IsValid() || !v2.IsValid() {\n\t\treturn v1.IsValid() == v2.IsValid()\n\t}\n\tif v1.Type() != v2.Type() {\n\t\treturn false\n\t}\n\n\t// if depth > 10 { panic(\"deepValueEqual\") }\t// for debugging\n\thard := func(k reflect.Kind) bool {\n\t\tswitch k {\n\t\tcase reflect.Array, reflect.Map, reflect.Slice, reflect.Struct:\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tif v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) {\n\t\taddr1 := v1.UnsafeAddr()\n\t\taddr2 := v2.UnsafeAddr()\n\t\tif addr1 > addr2 {\n\t\t\t// Canonicalize order to reduce number of entries in visited.\n\t\t\taddr1, addr2 = addr2, addr1\n\t\t}\n\n\t\t// Short circuit if references are identical ...\n\t\tif addr1 == addr2 {\n\t\t\treturn true\n\t\t}\n\n\t\t// ... or already seen\n\t\ttyp := v1.Type()\n\t\tv := visit{addr1, addr2, typ}\n\t\tif visited[v] {\n\t\t\treturn true\n\t\t}\n\n\t\t// Remember for later.\n\t\tvisited[v] = true\n\t}\n\n\tswitch v1.Kind() {\n\tcase reflect.Array:\n\t\tfor i := 0; i < v1.Len(); i++ {\n\t\t\tif !deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase reflect.Slice:\n\t\tif v1.IsNil() != v2.IsNil() {\n\t\t\treturn false\n\t\t}\n\t\tif v1.Len() != v2.Len() {\n\t\t\treturn false\n\t\t}\n\t\tif v1.Pointer() == v2.Pointer() {\n\t\t\treturn true\n\t\t}\n\t\tfor i := 0; i < v1.Len(); i++ {\n\t\t\tif !deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase reflect.Interface:\n\t\tif v1.IsNil() || v2.IsNil() {\n\t\t\treturn v1.IsNil() == v2.IsNil()\n\t\t}\n\t\treturn deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1)\n\tcase reflect.Ptr:\n\t\treturn deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1)\n\tcase reflect.Struct:\n\t\tfor i, n := 0, v1.NumField(); i < n; i++ {\n\t\t\tif !deepValueEqual(v1.Field(i), v2.Field(i), visited, depth+1) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase reflect.Map:\n\t\tif v1.IsNil() != v2.IsNil() {\n\t\t\treturn false\n\t\t}\n\t\tif v1.Len() != v2.Len() {\n\t\t\treturn false\n\t\t}\n\t\tif v1.Pointer() == v2.Pointer() {\n\t\t\treturn true\n\t\t}\n\t\tfor _, k := range v1.MapKeys() {\n\t\t\tif !deepValueEqual(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase reflect.Func:\n\t\tif v1.IsNil() && v2.IsNil() {\n\t\t\treturn true\n\t\t}\n\t\t// Can't do better than this:\n\t\treturn false\n\tcase reflect.String:\n\t\treturn v1.String() == v2.String()\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn v1.Int() == v2.Int()\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn v1.Uint() == v2.Uint()\n\tcase reflect.Float32, reflect.Float64:\n\t\t// Special handling for floats so that NaN == NaN is true.\n\t\tf1, f2 := v1.Float(), v2.Float()\n\t\tif math.IsNaN(f1) && math.IsNaN(f2) {\n\t\t\treturn true\n\t\t}\n\t\treturn f1 == f2\n\tcase reflect.Bool:\n\t\treturn v1.Bool() == v2.Bool()\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"cannot compare type: %s\", v1.Kind().String()))\n\t}\n}\n\n// During deepValueEqual, must keep track of checks that are\n// in progress.  The comparison algorithm assumes that all\n// checks in progress are true when it reencounters them.\n// Visited comparisons are stored in a map indexed by visit.\ntype visit struct {\n\ta1  uintptr\n\ta2  uintptr\n\ttyp reflect.Type\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go",
    "content": "// Package escape contains utilities for escaping parts of InfluxQL\n// and InfluxDB line protocol.\npackage escape // import \"github.com/influxdata/influxdb/pkg/escape\"\n\nimport (\n\t\"bytes\"\n\t\"strings\"\n)\n\n// Codes is a map of bytes to be escaped.\nvar Codes = map[byte][]byte{\n\t',': []byte(`\\,`),\n\t'\"': []byte(`\\\"`),\n\t' ': []byte(`\\ `),\n\t'=': []byte(`\\=`),\n}\n\n// Bytes escapes characters on the input slice, as defined by Codes.\nfunc Bytes(in []byte) []byte {\n\tfor b, esc := range Codes {\n\t\tin = bytes.Replace(in, []byte{b}, esc, -1)\n\t}\n\treturn in\n}\n\nconst escapeChars = `,\" =`\n\n// IsEscaped returns whether b has any escaped characters,\n// i.e. whether b seems to have been processed by Bytes.\nfunc IsEscaped(b []byte) bool {\n\tfor len(b) > 0 {\n\t\ti := bytes.IndexByte(b, '\\\\')\n\t\tif i < 0 {\n\t\t\treturn false\n\t\t}\n\n\t\tif i+1 < len(b) && strings.IndexByte(escapeChars, b[i+1]) >= 0 {\n\t\t\treturn true\n\t\t}\n\t\tb = b[i+1:]\n\t}\n\treturn false\n}\n\n// AppendUnescaped appends the unescaped version of src to dst\n// and returns the resulting slice.\nfunc AppendUnescaped(dst, src []byte) []byte {\n\tvar pos int\n\tfor len(src) > 0 {\n\t\tnext := bytes.IndexByte(src[pos:], '\\\\')\n\t\tif next < 0 || pos+next+1 >= len(src) {\n\t\t\treturn append(dst, src...)\n\t\t}\n\n\t\tif pos+next+1 < len(src) && strings.IndexByte(escapeChars, src[pos+next+1]) >= 0 {\n\t\t\tif pos+next > 0 {\n\t\t\t\tdst = append(dst, src[:pos+next]...)\n\t\t\t}\n\t\t\tsrc = src[pos+next+1:]\n\t\t\tpos = 0\n\t\t} else {\n\t\t\tpos += next + 1\n\t\t}\n\t}\n\n\treturn dst\n}\n\n// Unescape returns a new slice containing the unescaped version of in.\nfunc Unescape(in []byte) []byte {\n\tif len(in) == 0 {\n\t\treturn nil\n\t}\n\n\tif bytes.IndexByte(in, '\\\\') == -1 {\n\t\treturn in\n\t}\n\n\ti := 0\n\tinLen := len(in)\n\tvar out []byte\n\n\tfor {\n\t\tif i >= inLen {\n\t\t\tbreak\n\t\t}\n\t\tif in[i] == '\\\\' && i+1 < inLen {\n\t\t\tswitch in[i+1] {\n\t\t\tcase ',':\n\t\t\t\tout = append(out, ',')\n\t\t\t\ti += 2\n\t\t\t\tcontinue\n\t\t\tcase '\"':\n\t\t\t\tout = append(out, '\"')\n\t\t\t\ti += 2\n\t\t\t\tcontinue\n\t\t\tcase ' ':\n\t\t\t\tout = append(out, ' ')\n\t\t\t\ti += 2\n\t\t\t\tcontinue\n\t\t\tcase '=':\n\t\t\t\tout = append(out, '=')\n\t\t\t\ti += 2\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tout = append(out, in[i])\n\t\ti += 1\n\t}\n\treturn out\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/escape/bytes_test.go",
    "content": "package escape\n\nimport (\n\t\"bytes\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestUnescape(t *testing.T) {\n\ttests := []struct {\n\t\tin  []byte\n\t\tout []byte\n\t}{\n\t\t{\n\t\t\t[]byte(nil),\n\t\t\t[]byte(nil),\n\t\t},\n\n\t\t{\n\t\t\t[]byte(\"\"),\n\t\t\t[]byte(nil),\n\t\t},\n\n\t\t{\n\t\t\t[]byte(\"\\\\,\\\\\\\"\\\\ \\\\=\"),\n\t\t\t[]byte(\",\\\" =\"),\n\t\t},\n\n\t\t{\n\t\t\t[]byte(\"\\\\\\\\\"),\n\t\t\t[]byte(\"\\\\\\\\\"),\n\t\t},\n\n\t\t{\n\t\t\t[]byte(\"plain and simple\"),\n\t\t\t[]byte(\"plain and simple\"),\n\t\t},\n\t}\n\n\tfor ii, tt := range tests {\n\t\tgot := Unescape(tt.in)\n\t\tif !reflect.DeepEqual(got, tt.out) {\n\t\t\tt.Errorf(\"[%d] Unescape(%#v) = %#v, expected %#v\", ii, string(tt.in), string(got), string(tt.out))\n\t\t}\n\t}\n}\n\nfunc TestAppendUnescaped(t *testing.T) {\n\tcases := strings.Split(strings.TrimSpace(`\nnormal\ninv\\alid\ngoo\\\"d\nsp\\ ace\n\\,\\\"\\ \\=\nf\\\\\\ x\n`), \"\\n\")\n\n\tfor _, c := range cases {\n\t\texp := Unescape([]byte(c))\n\t\tgot := AppendUnescaped(nil, []byte(c))\n\n\t\tif !bytes.Equal(got, exp) {\n\t\t\tt.Errorf(\"AppendUnescaped failed for %#q: got %#q, exp %#q\", c, got, exp)\n\t\t}\n\t}\n\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/escape/strings.go",
    "content": "package escape\n\nimport \"strings\"\n\nvar (\n\tescaper   = strings.NewReplacer(`,`, `\\,`, `\"`, `\\\"`, ` `, `\\ `, `=`, `\\=`)\n\tunescaper = strings.NewReplacer(`\\,`, `,`, `\\\"`, `\"`, `\\ `, ` `, `\\=`, `=`)\n)\n\n// UnescapeString returns unescaped version of in.\nfunc UnescapeString(in string) string {\n\tif strings.IndexByte(in, '\\\\') == -1 {\n\t\treturn in\n\t}\n\treturn unescaper.Replace(in)\n}\n\n// String returns the escaped version of in.\nfunc String(in string) string {\n\treturn escaper.Replace(in)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/escape/strings_test.go",
    "content": "package escape\n\nimport (\n\t\"testing\"\n)\n\nvar s string\n\nfunc BenchmarkStringEscapeNoEscapes(b *testing.B) {\n\tfor n := 0; n < b.N; n++ {\n\t\ts = String(\"no_escapes\")\n\t}\n}\n\nfunc BenchmarkStringUnescapeNoEscapes(b *testing.B) {\n\tfor n := 0; n < b.N; n++ {\n\t\ts = UnescapeString(\"no_escapes\")\n\t}\n}\n\nfunc BenchmarkManyStringEscape(b *testing.B) {\n\ttests := []string{\n\t\t\"this is my special string\",\n\t\t\"a field w=i th == tons of escapes\",\n\t\t\"some,commas,here\",\n\t}\n\n\tfor n := 0; n < b.N; n++ {\n\t\tfor _, test := range tests {\n\t\t\ts = String(test)\n\t\t}\n\t}\n}\n\nfunc BenchmarkManyStringUnescape(b *testing.B) {\n\ttests := []string{\n\t\t`this\\ is\\ my\\ special\\ string`,\n\t\t`a\\ field\\ w\\=i\\ th\\ \\=\\=\\ tons\\ of\\ escapes`,\n\t\t`some\\,commas\\,here`,\n\t}\n\n\tfor n := 0; n < b.N; n++ {\n\t\tfor _, test := range tests {\n\t\t\ts = UnescapeString(test)\n\t\t}\n\t}\n}\n\nfunc TestStringEscape(t *testing.T) {\n\ttests := []struct {\n\t\tin       string\n\t\texpected string\n\t}{\n\t\t{\n\t\t\tin:       \"\",\n\t\t\texpected: \"\",\n\t\t},\n\t\t{\n\t\t\tin:       \"this is my special string\",\n\t\t\texpected: `this\\ is\\ my\\ special\\ string`,\n\t\t},\n\t\t{\n\t\t\tin:       \"a field w=i th == tons of escapes\",\n\t\t\texpected: `a\\ field\\ w\\=i\\ th\\ \\=\\=\\ tons\\ of\\ escapes`,\n\t\t},\n\t\t{\n\t\t\tin:       \"no_escapes\",\n\t\t\texpected: \"no_escapes\",\n\t\t},\n\t\t{\n\t\t\tin:       \"some,commas,here\",\n\t\t\texpected: `some\\,commas\\,here`,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tif test.expected != String(test.in) {\n\t\t\tt.Errorf(\"Got %s, expected %s\", String(test.in), test.expected)\n\t\t}\n\t}\n}\n\nfunc TestStringUnescape(t *testing.T) {\n\ttests := []struct {\n\t\tin       string\n\t\texpected string\n\t}{\n\t\t{\n\t\t\tin:       \"\",\n\t\t\texpected: \"\",\n\t\t},\n\t\t{\n\t\t\tin:       `this\\ is\\ my\\ special\\ string`,\n\t\t\texpected: \"this is my special string\",\n\t\t},\n\t\t{\n\t\t\tin:       `a\\ field\\ w\\=i\\ th\\ \\=\\=\\ tons\\ of\\ escapes`,\n\t\t\texpected: \"a field w=i th == tons of escapes\",\n\t\t},\n\t\t{\n\t\t\tin:       \"no_escapes\",\n\t\t\texpected: \"no_escapes\",\n\t\t},\n\t\t{\n\t\t\tin:       `some\\,commas\\,here`,\n\t\t\texpected: \"some,commas,here\",\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tif test.expected != UnescapeString(test.in) {\n\t\t\tt.Errorf(\"Got %s, expected %s\", UnescapeString(test.in), test.expected)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/estimator/hll/compressed.go",
    "content": "package hll\n\nimport \"encoding/binary\"\n\n// Original author of this file is github.com/clarkduvall/hyperloglog\ntype iterable interface {\n\tdecode(i int, last uint32) (uint32, int)\n\tLen() int\n\tIter() *iterator\n}\n\ntype iterator struct {\n\ti    int\n\tlast uint32\n\tv    iterable\n}\n\nfunc (iter *iterator) Next() uint32 {\n\tn, i := iter.v.decode(iter.i, iter.last)\n\titer.last = n\n\titer.i = i\n\treturn n\n}\n\nfunc (iter *iterator) Peek() uint32 {\n\tn, _ := iter.v.decode(iter.i, iter.last)\n\treturn n\n}\n\nfunc (iter iterator) HasNext() bool {\n\treturn iter.i < iter.v.Len()\n}\n\ntype compressedList struct {\n\tcount uint32\n\tlast  uint32\n\tb     variableLengthList\n}\n\nfunc (v *compressedList) Clone() *compressedList {\n\tif v == nil {\n\t\treturn nil\n\t}\n\n\tnewV := &compressedList{\n\t\tcount: v.count,\n\t\tlast:  v.last,\n\t}\n\n\tnewV.b = make(variableLengthList, len(v.b))\n\tcopy(newV.b, v.b)\n\treturn newV\n}\n\nfunc (v *compressedList) MarshalBinary() (data []byte, err error) {\n\t// Marshal the variableLengthList\n\tbdata, err := v.b.MarshalBinary()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// At least 4 bytes for the two fixed sized values plus the size of bdata.\n\tdata = make([]byte, 0, 4+4+len(bdata))\n\n\t// Marshal the count and last values.\n\tdata = append(data, []byte{\n\t\t// Number of items in the list.\n\t\tbyte(v.count >> 24),\n\t\tbyte(v.count >> 16),\n\t\tbyte(v.count >> 8),\n\t\tbyte(v.count),\n\t\t// The last item in the list.\n\t\tbyte(v.last >> 24),\n\t\tbyte(v.last >> 16),\n\t\tbyte(v.last >> 8),\n\t\tbyte(v.last),\n\t}...)\n\n\t// Append the list\n\treturn append(data, bdata...), nil\n}\n\nfunc (v *compressedList) UnmarshalBinary(data []byte) error {\n\t// Set the count.\n\tv.count, data = binary.BigEndian.Uint32(data[:4]), data[4:]\n\n\t// Set the last value.\n\tv.last, data = binary.BigEndian.Uint32(data[:4]), data[4:]\n\n\t// Set the list.\n\tsz, data := binary.BigEndian.Uint32(data[:4]), data[4:]\n\tv.b = make([]uint8, sz)\n\tfor i := uint32(0); i < sz; i++ {\n\t\tv.b[i] = uint8(data[i])\n\t}\n\treturn nil\n}\n\nfunc newCompressedList(size int) *compressedList {\n\tv := &compressedList{}\n\tv.b = make(variableLengthList, 0, size)\n\treturn v\n}\n\nfunc (v *compressedList) Len() int {\n\treturn len(v.b)\n}\n\nfunc (v *compressedList) decode(i int, last uint32) (uint32, int) {\n\tn, i := v.b.decode(i, last)\n\treturn n + last, i\n}\n\nfunc (v *compressedList) Append(x uint32) {\n\tv.count++\n\tv.b = v.b.Append(x - v.last)\n\tv.last = x\n}\n\nfunc (v *compressedList) Iter() *iterator {\n\treturn &iterator{0, 0, v}\n}\n\ntype variableLengthList []uint8\n\nfunc (v variableLengthList) MarshalBinary() (data []byte, err error) {\n\t// 4 bytes for the size of the list, and a byte for each element in the\n\t// list.\n\tdata = make([]byte, 0, 4+v.Len())\n\n\t// Length of the list. We only need 32 bits because the size of the set\n\t// couldn't exceed that on 32 bit architectures.\n\tsz := v.Len()\n\tdata = append(data, []byte{\n\t\tbyte(sz >> 24),\n\t\tbyte(sz >> 16),\n\t\tbyte(sz >> 8),\n\t\tbyte(sz),\n\t}...)\n\n\t// Marshal each element in the list.\n\tfor i := 0; i < sz; i++ {\n\t\tdata = append(data, byte(v[i]))\n\t}\n\n\treturn data, nil\n}\n\nfunc (v variableLengthList) Len() int {\n\treturn len(v)\n}\n\nfunc (v *variableLengthList) Iter() *iterator {\n\treturn &iterator{0, 0, v}\n}\n\nfunc (v variableLengthList) decode(i int, last uint32) (uint32, int) {\n\tvar x uint32\n\tj := i\n\tfor ; v[j]&0x80 != 0; j++ {\n\t\tx |= uint32(v[j]&0x7f) << (uint(j-i) * 7)\n\t}\n\tx |= uint32(v[j]) << (uint(j-i) * 7)\n\treturn x, j + 1\n}\n\nfunc (v variableLengthList) Append(x uint32) variableLengthList {\n\tfor x&0xffffff80 != 0 {\n\t\tv = append(v, uint8((x&0x7f)|0x80))\n\t\tx >>= 7\n\t}\n\treturn append(v, uint8(x&0x7f))\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/estimator/hll/empirical_data.go",
    "content": "package hll\n\nvar rawEstimateData = [][]float64{\n\t// precision 4\n\t{11, 11.717, 12.207, 12.7896, 13.2882, 13.8204, 14.3772, 14.9342, 15.5202, 16.161, 16.7722, 17.4636, 18.0396, 18.6766, 19.3566, 20.0454, 20.7936, 21.4856, 22.2666, 22.9946, 23.766, 24.4692, 25.3638, 26.0764, 26.7864, 27.7602, 28.4814, 29.433, 30.2926, 31.0664, 31.9996, 32.7956, 33.5366, 34.5894, 35.5738, 36.2698, 37.3682, 38.0544, 39.2342, 40.0108, 40.7966, 41.9298, 42.8704, 43.6358, 44.5194, 45.773, 46.6772, 47.6174, 48.4888, 49.3304, 50.2506, 51.4996, 52.3824, 53.3078, 54.3984, 55.5838, 56.6618, 57.2174, 58.3514, 59.0802, 60.1482, 61.0376, 62.3598, 62.8078, 63.9744, 64.914, 65.781, 67.1806, 68.0594, 68.8446, 69.7928, 70.8248, 71.8324, 72.8598, 73.6246, 74.7014, 75.393, 76.6708, 77.2394},\n\t// precision 5\n\t{23, 23.1194, 23.8208, 24.2318, 24.77, 25.2436, 25.7774, 26.2848, 26.8224, 27.3742, 27.9336, 28.503, 29.0494, 29.6292, 30.2124, 30.798, 31.367, 31.9728, 32.5944, 33.217, 33.8438, 34.3696, 35.0956, 35.7044, 36.324, 37.0668, 37.6698, 38.3644, 39.049, 39.6918, 40.4146, 41.082, 41.687, 42.5398, 43.2462, 43.857, 44.6606, 45.4168, 46.1248, 46.9222, 47.6804, 48.447, 49.3454, 49.9594, 50.7636, 51.5776, 52.331, 53.19, 53.9676, 54.7564, 55.5314, 56.4442, 57.3708, 57.9774, 58.9624, 59.8796, 60.755, 61.472, 62.2076, 63.1024, 63.8908, 64.7338, 65.7728, 66.629, 67.413, 68.3266, 69.1524, 70.2642, 71.1806, 72.0566, 72.9192, 73.7598, 74.3516, 75.5802, 76.4386, 77.4916, 78.1524, 79.1892, 79.8414, 80.8798, 81.8376, 82.4698, 83.7656, 84.331, 85.5914, 86.6012, 87.7016, 88.5582, 89.3394, 90.3544, 91.4912, 92.308, 93.3552, 93.9746, 95.2052, 95.727, 97.1322, 98.3944, 98.7588, 100.242, 101.1914, 102.2538, 102.8776, 103.6292, 105.1932, 105.9152, 107.0868, 107.6728, 108.7144, 110.3114, 110.8716, 111.245, 112.7908, 113.7064, 114.636, 115.7464, 116.1788, 117.7464, 118.4896, 119.6166, 120.5082, 121.7798, 122.9028, 123.4426, 124.8854, 125.705, 126.4652, 128.3464, 128.3462, 130.0398, 131.0342, 131.0042, 132.4766, 133.511, 134.7252, 135.425, 136.5172, 138.0572, 138.6694, 139.3712, 140.8598, 141.4594, 142.554, 143.4006, 144.7374, 146.1634, 146.8994, 147.605, 147.9304, 149.1636, 150.2468, 151.5876, 152.2096, 153.7032, 154.7146, 155.807, 156.9228, 157.0372, 158.5852},\n\t// precision 6\n\t{46, 46.1902, 47.271, 47.8358, 48.8142, 49.2854, 50.317, 51.354, 51.8924, 52.9436, 53.4596, 54.5262, 55.6248, 56.1574, 57.2822, 57.837, 58.9636, 60.074, 60.7042, 61.7976, 62.4772, 63.6564, 64.7942, 65.5004, 66.686, 67.291, 68.5672, 69.8556, 70.4982, 71.8204, 72.4252, 73.7744, 75.0786, 75.8344, 77.0294, 77.8098, 79.0794, 80.5732, 81.1878, 82.5648, 83.2902, 84.6784, 85.3352, 86.8946, 88.3712, 89.0852, 90.499, 91.2686, 92.6844, 94.2234, 94.9732, 96.3356, 97.2286, 98.7262, 100.3284, 101.1048, 102.5962, 103.3562, 105.1272, 106.4184, 107.4974, 109.0822, 109.856, 111.48, 113.2834, 114.0208, 115.637, 116.5174, 118.0576, 119.7476, 120.427, 122.1326, 123.2372, 125.2788, 126.6776, 127.7926, 129.1952, 129.9564, 131.6454, 133.87, 134.5428, 136.2, 137.0294, 138.6278, 139.6782, 141.792, 143.3516, 144.2832, 146.0394, 147.0748, 148.4912, 150.849, 151.696, 153.5404, 154.073, 156.3714, 157.7216, 158.7328, 160.4208, 161.4184, 163.9424, 165.2772, 166.411, 168.1308, 168.769, 170.9258, 172.6828, 173.7502, 175.706, 176.3886, 179.0186, 180.4518, 181.927, 183.4172, 184.4114, 186.033, 188.5124, 189.5564, 191.6008, 192.4172, 193.8044, 194.997, 197.4548, 198.8948, 200.2346, 202.3086, 203.1548, 204.8842, 206.6508, 206.6772, 209.7254, 210.4752, 212.7228, 214.6614, 215.1676, 217.793, 218.0006, 219.9052, 221.66, 223.5588, 225.1636, 225.6882, 227.7126, 229.4502, 231.1978, 232.9756, 233.1654, 236.727, 238.1974, 237.7474, 241.1346, 242.3048, 244.1948, 245.3134, 246.879, 249.1204, 249.853, 252.6792, 253.857, 254.4486, 257.2362, 257.9534, 260.0286, 260.5632, 262.663, 264.723, 265.7566, 267.2566, 267.1624, 270.62, 272.8216, 273.2166, 275.2056, 276.2202, 278.3726, 280.3344, 281.9284, 283.9728, 284.1924, 286.4872, 287.587, 289.807, 291.1206, 292.769, 294.8708, 296.665, 297.1182, 299.4012, 300.6352, 302.1354, 304.1756, 306.1606, 307.3462, 308.5214, 309.4134, 310.8352, 313.9684, 315.837, 316.7796, 318.9858},\n\t// precision 7\n\t{92, 93.4934, 94.9758, 96.4574, 97.9718, 99.4954, 101.5302, 103.0756, 104.6374, 106.1782, 107.7888, 109.9522, 111.592, 113.2532, 114.9086, 116.5938, 118.9474, 120.6796, 122.4394, 124.2176, 125.9768, 128.4214, 130.2528, 132.0102, 133.8658, 135.7278, 138.3044, 140.1316, 142.093, 144.0032, 145.9092, 148.6306, 150.5294, 152.5756, 154.6508, 156.662, 159.552, 161.3724, 163.617, 165.5754, 167.7872, 169.8444, 172.7988, 174.8606, 177.2118, 179.3566, 181.4476, 184.5882, 186.6816, 189.0824, 191.0258, 193.6048, 196.4436, 198.7274, 200.957, 203.147, 205.4364, 208.7592, 211.3386, 213.781, 215.8028, 218.656, 221.6544, 223.996, 226.4718, 229.1544, 231.6098, 234.5956, 237.0616, 239.5758, 242.4878, 244.5244, 248.2146, 250.724, 252.8722, 255.5198, 258.0414, 261.941, 264.9048, 266.87, 269.4304, 272.028, 274.4708, 278.37, 281.0624, 283.4668, 286.5532, 289.4352, 293.2564, 295.2744, 298.2118, 300.7472, 304.1456, 307.2928, 309.7504, 312.5528, 315.979, 318.2102, 322.1834, 324.3494, 327.325, 330.6614, 332.903, 337.2544, 339.9042, 343.215, 345.2864, 348.0814, 352.6764, 355.301, 357.139, 360.658, 363.1732, 366.5902, 369.9538, 373.0828, 375.922, 378.9902, 382.7328, 386.4538, 388.1136, 391.2234, 394.0878, 396.708, 401.1556, 404.1852, 406.6372, 409.6822, 412.7796, 416.6078, 418.4916, 422.131, 424.5376, 428.1988, 432.211, 434.4502, 438.5282, 440.912, 444.0448, 447.7432, 450.8524, 453.7988, 456.7858, 458.8868, 463.9886, 466.5064, 468.9124, 472.6616, 475.4682, 478.582, 481.304, 485.2738, 488.6894, 490.329, 496.106, 497.6908, 501.1374, 504.5322, 506.8848, 510.3324, 513.4512, 516.179, 520.4412, 522.6066, 526.167, 528.7794, 533.379, 536.067, 538.46, 542.9116, 545.692, 547.9546, 552.493, 555.2722, 557.335, 562.449, 564.2014, 569.0738, 571.0974, 574.8564, 578.2996, 581.409, 583.9704, 585.8098, 589.6528, 594.5998, 595.958, 600.068, 603.3278, 608.2016, 609.9632, 612.864, 615.43, 620.7794, 621.272, 625.8644, 629.206, 633.219, 634.5154, 638.6102},\n\t// precision 8\n\t{184.2152, 187.2454, 190.2096, 193.6652, 196.6312, 199.6822, 203.249, 206.3296, 210.0038, 213.2074, 216.4612, 220.27, 223.5178, 227.4412, 230.8032, 234.1634, 238.1688, 241.6074, 245.6946, 249.2664, 252.8228, 257.0432, 260.6824, 264.9464, 268.6268, 272.2626, 276.8376, 280.4034, 284.8956, 288.8522, 292.7638, 297.3552, 301.3556, 305.7526, 309.9292, 313.8954, 318.8198, 322.7668, 327.298, 331.6688, 335.9466, 340.9746, 345.1672, 349.3474, 354.3028, 358.8912, 364.114, 368.4646, 372.9744, 378.4092, 382.6022, 387.843, 392.5684, 397.1652, 402.5426, 407.4152, 412.5388, 417.3592, 422.1366, 427.486, 432.3918, 437.5076, 442.509, 447.3834, 453.3498, 458.0668, 463.7346, 469.1228, 473.4528, 479.7, 484.644, 491.0518, 495.5774, 500.9068, 506.432, 512.1666, 517.434, 522.6644, 527.4894, 533.6312, 538.3804, 544.292, 550.5496, 556.0234, 562.8206, 566.6146, 572.4188, 579.117, 583.6762, 590.6576, 595.7864, 601.509, 607.5334, 612.9204, 619.772, 624.2924, 630.8654, 636.1836, 642.745, 649.1316, 655.0386, 660.0136, 666.6342, 671.6196, 678.1866, 684.4282, 689.3324, 695.4794, 702.5038, 708.129, 713.528, 720.3204, 726.463, 732.7928, 739.123, 744.7418, 751.2192, 756.5102, 762.6066, 769.0184, 775.2224, 781.4014, 787.7618, 794.1436, 798.6506, 805.6378, 811.766, 819.7514, 824.5776, 828.7322, 837.8048, 843.6302, 849.9336, 854.4798, 861.3388, 867.9894, 873.8196, 880.3136, 886.2308, 892.4588, 899.0816, 905.4076, 912.0064, 917.3878, 923.619, 929.998, 937.3482, 943.9506, 947.991, 955.1144, 962.203, 968.8222, 975.7324, 981.7826, 988.7666, 994.2648, 1000.3128, 1007.4082, 1013.7536, 1020.3376, 1026.7156, 1031.7478, 1037.4292, 1045.393, 1051.2278, 1058.3434, 1062.8726, 1071.884, 1076.806, 1082.9176, 1089.1678, 1095.5032, 1102.525, 1107.2264, 1115.315, 1120.93, 1127.252, 1134.1496, 1139.0408, 1147.5448, 1153.3296, 1158.1974, 1166.5262, 1174.3328, 1175.657, 1184.4222, 1190.9172, 1197.1292, 1204.4606, 1210.4578, 1218.8728, 1225.3336, 1226.6592, 1236.5768, 1241.363, 1249.4074, 1254.6566, 1260.8014, 1266.5454, 1274.5192},\n\t// precision 9\n\t{369, 374.8294, 381.2452, 387.6698, 394.1464, 400.2024, 406.8782, 413.6598, 420.462, 427.2826, 433.7102, 440.7416, 447.9366, 455.1046, 462.285, 469.0668, 476.306, 483.8448, 491.301, 498.9886, 506.2422, 513.8138, 521.7074, 529.7428, 537.8402, 545.1664, 553.3534, 561.594, 569.6886, 577.7876, 585.65, 594.228, 602.8036, 611.1666, 620.0818, 628.0824, 637.2574, 646.302, 655.1644, 664.0056, 672.3802, 681.7192, 690.5234, 700.2084, 708.831, 718.485, 728.1112, 737.4764, 746.76, 756.3368, 766.5538, 775.5058, 785.2646, 795.5902, 804.3818, 814.8998, 824.9532, 835.2062, 845.2798, 854.4728, 864.9582, 875.3292, 886.171, 896.781, 906.5716, 916.7048, 927.5322, 937.875, 949.3972, 958.3464, 969.7274, 980.2834, 992.1444, 1003.4264, 1013.0166, 1024.018, 1035.0438, 1046.34, 1057.6856, 1068.9836, 1079.0312, 1091.677, 1102.3188, 1113.4846, 1124.4424, 1135.739, 1147.1488, 1158.9202, 1169.406, 1181.5342, 1193.2834, 1203.8954, 1216.3286, 1226.2146, 1239.6684, 1251.9946, 1262.123, 1275.4338, 1285.7378, 1296.076, 1308.9692, 1320.4964, 1333.0998, 1343.9864, 1357.7754, 1368.3208, 1380.4838, 1392.7388, 1406.0758, 1416.9098, 1428.9728, 1440.9228, 1453.9292, 1462.617, 1476.05, 1490.2996, 1500.6128, 1513.7392, 1524.5174, 1536.6322, 1548.2584, 1562.3766, 1572.423, 1587.1232, 1596.5164, 1610.5938, 1622.5972, 1633.1222, 1647.7674, 1658.5044, 1671.57, 1683.7044, 1695.4142, 1708.7102, 1720.6094, 1732.6522, 1747.841, 1756.4072, 1769.9786, 1782.3276, 1797.5216, 1808.3186, 1819.0694, 1834.354, 1844.575, 1856.2808, 1871.1288, 1880.7852, 1893.9622, 1906.3418, 1920.6548, 1932.9302, 1945.8584, 1955.473, 1968.8248, 1980.6446, 1995.9598, 2008.349, 2019.8556, 2033.0334, 2044.0206, 2059.3956, 2069.9174, 2082.6084, 2093.7036, 2106.6108, 2118.9124, 2132.301, 2144.7628, 2159.8422, 2171.0212, 2183.101, 2193.5112, 2208.052, 2221.3194, 2233.3282, 2247.295, 2257.7222, 2273.342, 2286.5638, 2299.6786, 2310.8114, 2322.3312, 2335.516, 2349.874, 2363.5968, 2373.865, 2387.1918, 2401.8328, 2414.8496, 2424.544, 2436.7592, 2447.1682, 2464.1958, 2474.3438, 2489.0006, 2497.4526, 2513.6586, 2527.19, 2540.7028, 2553.768},\n\t// precision 10\n\t{738.1256, 750.4234, 763.1064, 775.4732, 788.4636, 801.0644, 814.488, 827.9654, 841.0832, 854.7864, 868.1992, 882.2176, 896.5228, 910.1716, 924.7752, 938.899, 953.6126, 968.6492, 982.9474, 998.5214, 1013.1064, 1028.6364, 1044.2468, 1059.4588, 1075.3832, 1091.0584, 1106.8606, 1123.3868, 1139.5062, 1156.1862, 1172.463, 1189.339, 1206.1936, 1223.1292, 1240.1854, 1257.2908, 1275.3324, 1292.8518, 1310.5204, 1328.4854, 1345.9318, 1364.552, 1381.4658, 1400.4256, 1419.849, 1438.152, 1456.8956, 1474.8792, 1494.118, 1513.62, 1532.5132, 1551.9322, 1570.7726, 1590.6086, 1610.5332, 1630.5918, 1650.4294, 1669.7662, 1690.4106, 1710.7338, 1730.9012, 1750.4486, 1770.1556, 1791.6338, 1812.7312, 1833.6264, 1853.9526, 1874.8742, 1896.8326, 1918.1966, 1939.5594, 1961.07, 1983.037, 2003.1804, 2026.071, 2047.4884, 2070.0848, 2091.2944, 2114.333, 2135.9626, 2158.2902, 2181.0814, 2202.0334, 2224.4832, 2246.39, 2269.7202, 2292.1714, 2314.2358, 2338.9346, 2360.891, 2384.0264, 2408.3834, 2430.1544, 2454.8684, 2476.9896, 2501.4368, 2522.8702, 2548.0408, 2570.6738, 2593.5208, 2617.0158, 2640.2302, 2664.0962, 2687.4986, 2714.2588, 2735.3914, 2759.6244, 2781.8378, 2808.0072, 2830.6516, 2856.2454, 2877.2136, 2903.4546, 2926.785, 2951.2294, 2976.468, 3000.867, 3023.6508, 3049.91, 3073.5984, 3098.162, 3121.5564, 3146.2328, 3170.9484, 3195.5902, 3221.3346, 3242.7032, 3271.6112, 3296.5546, 3317.7376, 3345.072, 3369.9518, 3394.326, 3418.1818, 3444.6926, 3469.086, 3494.2754, 3517.8698, 3544.248, 3565.3768, 3588.7234, 3616.979, 3643.7504, 3668.6812, 3695.72, 3719.7392, 3742.6224, 3770.4456, 3795.6602, 3819.9058, 3844.002, 3869.517, 3895.6824, 3920.8622, 3947.1364, 3973.985, 3995.4772, 4021.62, 4046.628, 4074.65, 4096.2256, 4121.831, 4146.6406, 4173.276, 4195.0744, 4223.9696, 4251.3708, 4272.9966, 4300.8046, 4326.302, 4353.1248, 4374.312, 4403.0322, 4426.819, 4450.0598, 4478.5206, 4504.8116, 4528.8928, 4553.9584, 4578.8712, 4603.8384, 4632.3872, 4655.5128, 4675.821, 4704.6222, 4731.9862, 4755.4174, 4781.2628, 4804.332, 4832.3048, 4862.8752, 4883.4148, 4906.9544, 4935.3516, 4954.3532, 4984.0248, 5011.217, 5035.3258, 5057.3672, 5084.1828},\n\t// precision 11\n\t{1477, 1501.6014, 1526.5802, 1551.7942, 1577.3042, 1603.2062, 1629.8402, 1656.2292, 1682.9462, 1709.9926, 1737.3026, 1765.4252, 1793.0578, 1821.6092, 1849.626, 1878.5568, 1908.527, 1937.5154, 1967.1874, 1997.3878, 2027.37, 2058.1972, 2089.5728, 2120.1012, 2151.9668, 2183.292, 2216.0772, 2247.8578, 2280.6562, 2313.041, 2345.714, 2380.3112, 2414.1806, 2447.9854, 2481.656, 2516.346, 2551.5154, 2586.8378, 2621.7448, 2656.6722, 2693.5722, 2729.1462, 2765.4124, 2802.8728, 2838.898, 2876.408, 2913.4926, 2951.4938, 2989.6776, 3026.282, 3065.7704, 3104.1012, 3143.7388, 3181.6876, 3221.1872, 3261.5048, 3300.0214, 3339.806, 3381.409, 3421.4144, 3461.4294, 3502.2286, 3544.651, 3586.6156, 3627.337, 3670.083, 3711.1538, 3753.5094, 3797.01, 3838.6686, 3882.1678, 3922.8116, 3967.9978, 4009.9204, 4054.3286, 4097.5706, 4140.6014, 4185.544, 4229.5976, 4274.583, 4316.9438, 4361.672, 4406.2786, 4451.8628, 4496.1834, 4543.505, 4589.1816, 4632.5188, 4678.2294, 4724.8908, 4769.0194, 4817.052, 4861.4588, 4910.1596, 4956.4344, 5002.5238, 5048.13, 5093.6374, 5142.8162, 5187.7894, 5237.3984, 5285.6078, 5331.0858, 5379.1036, 5428.6258, 5474.6018, 5522.7618, 5571.5822, 5618.59, 5667.9992, 5714.88, 5763.454, 5808.6982, 5860.3644, 5910.2914, 5953.571, 6005.9232, 6055.1914, 6104.5882, 6154.5702, 6199.7036, 6251.1764, 6298.7596, 6350.0302, 6398.061, 6448.4694, 6495.933, 6548.0474, 6597.7166, 6646.9416, 6695.9208, 6742.6328, 6793.5276, 6842.1934, 6894.2372, 6945.3864, 6996.9228, 7044.2372, 7094.1374, 7142.2272, 7192.2942, 7238.8338, 7288.9006, 7344.0908, 7394.8544, 7443.5176, 7490.4148, 7542.9314, 7595.6738, 7641.9878, 7694.3688, 7743.0448, 7797.522, 7845.53, 7899.594, 7950.3132, 7996.455, 8050.9442, 8092.9114, 8153.1374, 8197.4472, 8252.8278, 8301.8728, 8348.6776, 8401.4698, 8453.551, 8504.6598, 8553.8944, 8604.1276, 8657.6514, 8710.3062, 8758.908, 8807.8706, 8862.1702, 8910.4668, 8960.77, 9007.2766, 9063.164, 9121.0534, 9164.1354, 9218.1594, 9267.767, 9319.0594, 9372.155, 9419.7126, 9474.3722, 9520.1338, 9572.368, 9622.7702, 9675.8448, 9726.5396, 9778.7378, 9827.6554, 9878.1922, 9928.7782, 9978.3984, 10026.578, 10076.5626, 10137.1618, 10177.5244, 10229.9176},\n\t// precision 12\n\t{2954, 3003.4782, 3053.3568, 3104.3666, 3155.324, 3206.9598, 3259.648, 3312.539, 3366.1474, 3420.2576, 3474.8376, 3530.6076, 3586.451, 3643.38, 3700.4104, 3757.5638, 3815.9676, 3875.193, 3934.838, 3994.8548, 4055.018, 4117.1742, 4178.4482, 4241.1294, 4304.4776, 4367.4044, 4431.8724, 4496.3732, 4561.4304, 4627.5326, 4693.949, 4761.5532, 4828.7256, 4897.6182, 4965.5186, 5034.4528, 5104.865, 5174.7164, 5244.6828, 5316.6708, 5387.8312, 5459.9036, 5532.476, 5604.8652, 5679.6718, 5753.757, 5830.2072, 5905.2828, 5980.0434, 6056.6264, 6134.3192, 6211.5746, 6290.0816, 6367.1176, 6447.9796, 6526.5576, 6606.1858, 6686.9144, 6766.1142, 6847.0818, 6927.9664, 7010.9096, 7091.0816, 7175.3962, 7260.3454, 7344.018, 7426.4214, 7511.3106, 7596.0686, 7679.8094, 7765.818, 7852.4248, 7936.834, 8022.363, 8109.5066, 8200.4554, 8288.5832, 8373.366, 8463.4808, 8549.7682, 8642.0522, 8728.3288, 8820.9528, 8907.727, 9001.0794, 9091.2522, 9179.988, 9269.852, 9362.6394, 9453.642, 9546.9024, 9640.6616, 9732.6622, 9824.3254, 9917.7484, 10007.9392, 10106.7508, 10196.2152, 10289.8114, 10383.5494, 10482.3064, 10576.8734, 10668.7872, 10764.7156, 10862.0196, 10952.793, 11049.9748, 11146.0702, 11241.4492, 11339.2772, 11434.2336, 11530.741, 11627.6136, 11726.311, 11821.5964, 11918.837, 12015.3724, 12113.0162, 12213.0424, 12306.9804, 12408.4518, 12504.8968, 12604.586, 12700.9332, 12798.705, 12898.5142, 12997.0488, 13094.788, 13198.475, 13292.7764, 13392.9698, 13486.8574, 13590.1616, 13686.5838, 13783.6264, 13887.2638, 13992.0978, 14081.0844, 14189.9956, 14280.0912, 14382.4956, 14486.4384, 14588.1082, 14686.2392, 14782.276, 14888.0284, 14985.1864, 15088.8596, 15187.0998, 15285.027, 15383.6694, 15495.8266, 15591.3736, 15694.2008, 15790.3246, 15898.4116, 15997.4522, 16095.5014, 16198.8514, 16291.7492, 16402.6424, 16499.1266, 16606.2436, 16697.7186, 16796.3946, 16902.3376, 17005.7672, 17100.814, 17206.8282, 17305.8262, 17416.0744, 17508.4092, 17617.0178, 17715.4554, 17816.758, 17920.1748, 18012.9236, 18119.7984, 18223.2248, 18324.2482, 18426.6276, 18525.0932, 18629.8976, 18733.2588, 18831.0466, 18940.1366, 19032.2696, 19131.729, 19243.4864, 19349.6932, 19442.866, 19547.9448, 19653.2798, 19754.4034, 19854.0692, 19965.1224, 20065.1774, 20158.2212, 20253.353, 20366.3264, 20463.22},\n\t// precision 13\n\t{5908.5052, 6007.2672, 6107.347, 6208.5794, 6311.2622, 6414.5514, 6519.3376, 6625.6952, 6732.5988, 6841.3552, 6950.5972, 7061.3082, 7173.5646, 7287.109, 7401.8216, 7516.4344, 7633.3802, 7751.2962, 7870.3784, 7990.292, 8110.79, 8233.4574, 8356.6036, 8482.2712, 8607.7708, 8735.099, 8863.1858, 8993.4746, 9123.8496, 9255.6794, 9388.5448, 9522.7516, 9657.3106, 9792.6094, 9930.5642, 10068.794, 10206.7256, 10347.81, 10490.3196, 10632.0778, 10775.9916, 10920.4662, 11066.124, 11213.073, 11358.0362, 11508.1006, 11659.1716, 11808.7514, 11959.4884, 12112.1314, 12265.037, 12420.3756, 12578.933, 12734.311, 12890.0006, 13047.2144, 13207.3096, 13368.5144, 13528.024, 13689.847, 13852.7528, 14018.3168, 14180.5372, 14346.9668, 14513.5074, 14677.867, 14846.2186, 15017.4186, 15184.9716, 15356.339, 15529.2972, 15697.3578, 15871.8686, 16042.187, 16216.4094, 16389.4188, 16565.9126, 16742.3272, 16919.0042, 17094.7592, 17273.965, 17451.8342, 17634.4254, 17810.5984, 17988.9242, 18171.051, 18354.7938, 18539.466, 18721.0408, 18904.9972, 19081.867, 19271.9118, 19451.8694, 19637.9816, 19821.2922, 20013.1292, 20199.3858, 20387.8726, 20572.9514, 20770.7764, 20955.1714, 21144.751, 21329.9952, 21520.709, 21712.7016, 21906.3868, 22096.2626, 22286.0524, 22475.051, 22665.5098, 22862.8492, 23055.5294, 23249.6138, 23437.848, 23636.273, 23826.093, 24020.3296, 24213.3896, 24411.7392, 24602.9614, 24805.7952, 24998.1552, 25193.9588, 25389.0166, 25585.8392, 25780.6976, 25981.2728, 26175.977, 26376.5252, 26570.1964, 26773.387, 26962.9812, 27163.0586, 27368.164, 27565.0534, 27758.7428, 27961.1276, 28163.2324, 28362.3816, 28565.7668, 28758.644, 28956.9768, 29163.4722, 29354.7026, 29561.1186, 29767.9948, 29959.9986, 30164.0492, 30366.9818, 30562.5338, 30762.9928, 30976.1592, 31166.274, 31376.722, 31570.3734, 31770.809, 31974.8934, 32179.5286, 32387.5442, 32582.3504, 32794.076, 32989.9528, 33191.842, 33392.4684, 33595.659, 33801.8672, 34000.3414, 34200.0922, 34402.6792, 34610.0638, 34804.0084, 35011.13, 35218.669, 35418.6634, 35619.0792, 35830.6534, 36028.4966, 36229.7902, 36438.6422, 36630.7764, 36833.3102, 37048.6728, 37247.3916, 37453.5904, 37669.3614, 37854.5526, 38059.305, 38268.0936, 38470.2516, 38674.7064, 38876.167, 39068.3794, 39281.9144, 39492.8566, 39684.8628, 39898.4108, 40093.1836, 40297.6858, 40489.7086, 40717.2424},\n\t// precision 14\n\t{11817.475, 12015.0046, 12215.3792, 12417.7504, 12623.1814, 12830.0086, 13040.0072, 13252.503, 13466.178, 13683.2738, 13902.0344, 14123.9798, 14347.394, 14573.7784, 14802.6894, 15033.6824, 15266.9134, 15502.8624, 15741.4944, 15980.7956, 16223.8916, 16468.6316, 16715.733, 16965.5726, 17217.204, 17470.666, 17727.8516, 17986.7886, 18247.6902, 18510.9632, 18775.304, 19044.7486, 19314.4408, 19587.202, 19862.2576, 20135.924, 20417.0324, 20697.9788, 20979.6112, 21265.0274, 21550.723, 21841.6906, 22132.162, 22428.1406, 22722.127, 23020.5606, 23319.7394, 23620.4014, 23925.2728, 24226.9224, 24535.581, 24845.505, 25155.9618, 25470.3828, 25785.9702, 26103.7764, 26420.4132, 26742.0186, 27062.8852, 27388.415, 27714.6024, 28042.296, 28365.4494, 28701.1526, 29031.8008, 29364.2156, 29704.497, 30037.1458, 30380.111, 30723.8168, 31059.5114, 31404.9498, 31751.6752, 32095.2686, 32444.7792, 32794.767, 33145.204, 33498.4226, 33847.6502, 34209.006, 34560.849, 34919.4838, 35274.9778, 35635.1322, 35996.3266, 36359.1394, 36722.8266, 37082.8516, 37447.7354, 37815.9606, 38191.0692, 38559.4106, 38924.8112, 39294.6726, 39663.973, 40042.261, 40416.2036, 40779.2036, 41161.6436, 41540.9014, 41921.1998, 42294.7698, 42678.5264, 43061.3464, 43432.375, 43818.432, 44198.6598, 44583.0138, 44970.4794, 45353.924, 45729.858, 46118.2224, 46511.5724, 46900.7386, 47280.6964, 47668.1472, 48055.6796, 48446.9436, 48838.7146, 49217.7296, 49613.7796, 50010.7508, 50410.0208, 50793.7886, 51190.2456, 51583.1882, 51971.0796, 52376.5338, 52763.319, 53165.5534, 53556.5594, 53948.2702, 54346.352, 54748.7914, 55138.577, 55543.4824, 55941.1748, 56333.7746, 56745.1552, 57142.7944, 57545.2236, 57935.9956, 58348.5268, 58737.5474, 59158.5962, 59542.6896, 59958.8004, 60349.3788, 60755.0212, 61147.6144, 61548.194, 61946.0696, 62348.6042, 62763.603, 63162.781, 63560.635, 63974.3482, 64366.4908, 64771.5876, 65176.7346, 65597.3916, 65995.915, 66394.0384, 66822.9396, 67203.6336, 67612.2032, 68019.0078, 68420.0388, 68821.22, 69235.8388, 69640.0724, 70055.155, 70466.357, 70863.4266, 71276.2482, 71677.0306, 72080.2006, 72493.0214, 72893.5952, 73314.5856, 73714.9852, 74125.3022, 74521.2122, 74933.6814, 75341.5904, 75743.0244, 76166.0278, 76572.1322, 76973.1028, 77381.6284, 77800.6092, 78189.328, 78607.0962, 79012.2508, 79407.8358, 79825.725, 80238.701, 80646.891, 81035.6436, 81460.0448, 81876.3884},\n\t// precision 15\n\t{23635.0036, 24030.8034, 24431.4744, 24837.1524, 25246.7928, 25661.326, 26081.3532, 26505.2806, 26933.9892, 27367.7098, 27805.318, 28248.799, 28696.4382, 29148.8244, 29605.5138, 30066.8668, 30534.2344, 31006.32, 31480.778, 31962.2418, 32447.3324, 32938.0232, 33432.731, 33930.728, 34433.9896, 34944.1402, 35457.5588, 35974.5958, 36497.3296, 37021.9096, 37554.326, 38088.0826, 38628.8816, 39171.3192, 39723.2326, 40274.5554, 40832.3142, 41390.613, 41959.5908, 42532.5466, 43102.0344, 43683.5072, 44266.694, 44851.2822, 45440.7862, 46038.0586, 46640.3164, 47241.064, 47846.155, 48454.7396, 49076.9168, 49692.542, 50317.4778, 50939.65, 51572.5596, 52210.2906, 52843.7396, 53481.3996, 54127.236, 54770.406, 55422.6598, 56078.7958, 56736.7174, 57397.6784, 58064.5784, 58730.308, 59404.9784, 60077.0864, 60751.9158, 61444.1386, 62115.817, 62808.7742, 63501.4774, 64187.5454, 64883.6622, 65582.7468, 66274.5318, 66976.9276, 67688.7764, 68402.138, 69109.6274, 69822.9706, 70543.6108, 71265.5202, 71983.3848, 72708.4656, 73433.384, 74158.4664, 74896.4868, 75620.9564, 76362.1434, 77098.3204, 77835.7662, 78582.6114, 79323.9902, 80067.8658, 80814.9246, 81567.0136, 82310.8536, 83061.9952, 83821.4096, 84580.8608, 85335.547, 86092.5802, 86851.6506, 87612.311, 88381.2016, 89146.3296, 89907.8974, 90676.846, 91451.4152, 92224.5518, 92995.8686, 93763.5066, 94551.2796, 95315.1944, 96096.1806, 96881.0918, 97665.679, 98442.68, 99229.3002, 100011.0994, 100790.6386, 101580.1564, 102377.7484, 103152.1392, 103944.2712, 104730.216, 105528.6336, 106324.9398, 107117.6706, 107890.3988, 108695.2266, 109485.238, 110294.7876, 111075.0958, 111878.0496, 112695.2864, 113464.5486, 114270.0474, 115068.608, 115884.3626, 116673.2588, 117483.3716, 118275.097, 119085.4092, 119879.2808, 120687.5868, 121499.9944, 122284.916, 123095.9254, 123912.5038, 124709.0454, 125503.7182, 126323.259, 127138.9412, 127943.8294, 128755.646, 129556.5354, 130375.3298, 131161.4734, 131971.1962, 132787.5458, 133588.1056, 134431.351, 135220.2906, 136023.398, 136846.6558, 137667.0004, 138463.663, 139283.7154, 140074.6146, 140901.3072, 141721.8548, 142543.2322, 143356.1096, 144173.7412, 144973.0948, 145794.3162, 146609.5714, 147420.003, 148237.9784, 149050.5696, 149854.761, 150663.1966, 151494.0754, 152313.1416, 153112.6902, 153935.7206, 154746.9262, 155559.547, 156401.9746, 157228.7036, 158008.7254, 158820.75, 159646.9184, 160470.4458, 161279.5348, 162093.3114, 162918.542, 163729.2842},\n\t// precision 16\n\t{47271, 48062.3584, 48862.7074, 49673.152, 50492.8416, 51322.9514, 52161.03, 53009.407, 53867.6348, 54734.206, 55610.5144, 56496.2096, 57390.795, 58297.268, 59210.6448, 60134.665, 61068.0248, 62010.4472, 62962.5204, 63923.5742, 64895.0194, 65876.4182, 66862.6136, 67862.6968, 68868.8908, 69882.8544, 70911.271, 71944.0924, 72990.0326, 74040.692, 75100.6336, 76174.7826, 77252.5998, 78340.2974, 79438.2572, 80545.4976, 81657.2796, 82784.6336, 83915.515, 85059.7362, 86205.9368, 87364.4424, 88530.3358, 89707.3744, 90885.9638, 92080.197, 93275.5738, 94479.391, 95695.918, 96919.2236, 98148.4602, 99382.3474, 100625.6974, 101878.0284, 103141.6278, 104409.4588, 105686.2882, 106967.5402, 108261.6032, 109548.1578, 110852.0728, 112162.231, 113479.0072, 114806.2626, 116137.9072, 117469.5048, 118813.5186, 120165.4876, 121516.2556, 122875.766, 124250.5444, 125621.2222, 127003.2352, 128387.848, 129775.2644, 131181.7776, 132577.3086, 133979.9458, 135394.1132, 136800.9078, 138233.217, 139668.5308, 141085.212, 142535.2122, 143969.0684, 145420.2872, 146878.1542, 148332.7572, 149800.3202, 151269.66, 152743.6104, 154213.0948, 155690.288, 157169.4246, 158672.1756, 160160.059, 161650.6854, 163145.7772, 164645.6726, 166159.1952, 167682.1578, 169177.3328, 170700.0118, 172228.8964, 173732.6664, 175265.5556, 176787.799, 178317.111, 179856.6914, 181400.865, 182943.4612, 184486.742, 186033.4698, 187583.7886, 189148.1868, 190688.4526, 192250.1926, 193810.9042, 195354.2972, 196938.7682, 198493.5898, 200079.2824, 201618.912, 203205.5492, 204765.5798, 206356.1124, 207929.3064, 209498.7196, 211086.229, 212675.1324, 214256.7892, 215826.2392, 217412.8474, 218995.6724, 220618.6038, 222207.1166, 223781.0364, 225387.4332, 227005.7928, 228590.4336, 230217.8738, 231805.1054, 233408.9, 234995.3432, 236601.4956, 238190.7904, 239817.2548, 241411.2832, 243002.4066, 244640.1884, 246255.3128, 247849.3508, 249479.9734, 251106.8822, 252705.027, 254332.9242, 255935.129, 257526.9014, 259154.772, 260777.625, 262390.253, 264004.4906, 265643.59, 267255.4076, 268873.426, 270470.7252, 272106.4804, 273722.4456, 275337.794, 276945.7038, 278592.9154, 280204.3726, 281841.1606, 283489.171, 285130.1716, 286735.3362, 288364.7164, 289961.1814, 291595.5524, 293285.683, 294899.6668, 296499.3434, 298128.0462, 299761.8946, 301394.2424, 302997.6748, 304615.1478, 306269.7724, 307886.114, 309543.1028, 311153.2862, 312782.8546, 314421.2008, 316033.2438, 317692.9636, 319305.2648, 320948.7406, 322566.3364, 324228.4224, 325847.1542},\n\t// precision 17\n\t{94542, 96125.811, 97728.019, 99348.558, 100987.9705, 102646.7565, 104324.5125, 106021.7435, 107736.7865, 109469.272, 111223.9465, 112995.219, 114787.432, 116593.152, 118422.71, 120267.2345, 122134.6765, 124020.937, 125927.2705, 127851.255, 129788.9485, 131751.016, 133726.8225, 135722.592, 137736.789, 139770.568, 141821.518, 143891.343, 145982.1415, 148095.387, 150207.526, 152355.649, 154515.6415, 156696.05, 158887.7575, 161098.159, 163329.852, 165569.053, 167837.4005, 170121.6165, 172420.4595, 174732.6265, 177062.77, 179412.502, 181774.035, 184151.939, 186551.6895, 188965.691, 191402.8095, 193857.949, 196305.0775, 198774.6715, 201271.2585, 203764.78, 206299.3695, 208818.1365, 211373.115, 213946.7465, 216532.076, 219105.541, 221714.5375, 224337.5135, 226977.5125, 229613.0655, 232270.2685, 234952.2065, 237645.3555, 240331.1925, 243034.517, 245756.0725, 248517.6865, 251232.737, 254011.3955, 256785.995, 259556.44, 262368.335, 265156.911, 267965.266, 270785.583, 273616.0495, 276487.4835, 279346.639, 282202.509, 285074.3885, 287942.2855, 290856.018, 293774.0345, 296678.5145, 299603.6355, 302552.6575, 305492.9785, 308466.8605, 311392.581, 314347.538, 317319.4295, 320285.9785, 323301.7325, 326298.3235, 329301.3105, 332301.987, 335309.791, 338370.762, 341382.923, 344431.1265, 347464.1545, 350507.28, 353619.2345, 356631.2005, 359685.203, 362776.7845, 365886.488, 368958.2255, 372060.6825, 375165.4335, 378237.935, 381328.311, 384430.5225, 387576.425, 390683.242, 393839.648, 396977.8425, 400101.9805, 403271.296, 406409.8425, 409529.5485, 412678.7, 415847.423, 419020.8035, 422157.081, 425337.749, 428479.6165, 431700.902, 434893.1915, 438049.582, 441210.5415, 444379.2545, 447577.356, 450741.931, 453959.548, 457137.0935, 460329.846, 463537.4815, 466732.3345, 469960.5615, 473164.681, 476347.6345, 479496.173, 482813.1645, 486025.6995, 489249.4885, 492460.1945, 495675.8805, 498908.0075, 502131.802, 505374.3855, 508550.9915, 511806.7305, 515026.776, 518217.0005, 521523.9855, 524705.9855, 527950.997, 531210.0265, 534472.497, 537750.7315, 540926.922, 544207.094, 547429.4345, 550666.3745, 553975.3475, 557150.7185, 560399.6165, 563662.697, 566916.7395, 570146.1215, 573447.425, 576689.6245, 579874.5745, 583202.337, 586503.0255, 589715.635, 592910.161, 596214.3885, 599488.035, 602740.92, 605983.0685, 609248.67, 612491.3605, 615787.912, 619107.5245, 622307.9555, 625577.333, 628840.4385, 632085.2155, 635317.6135, 638691.7195, 641887.467, 645139.9405, 648441.546, 651666.252, 654941.845},\n\t// precision 18\n\t{189084, 192250.913, 195456.774, 198696.946, 201977.762, 205294.444, 208651.754, 212042.099, 215472.269, 218941.91, 222443.912, 225996.845, 229568.199, 233193.568, 236844.457, 240543.233, 244279.475, 248044.27, 251854.588, 255693.2, 259583.619, 263494.621, 267445.385, 271454.061, 275468.769, 279549.456, 283646.446, 287788.198, 291966.099, 296181.164, 300431.469, 304718.618, 309024.004, 313393.508, 317760.803, 322209.731, 326675.061, 331160.627, 335654.47, 340241.442, 344841.833, 349467.132, 354130.629, 358819.432, 363574.626, 368296.587, 373118.482, 377914.93, 382782.301, 387680.669, 392601.981, 397544.323, 402529.115, 407546.018, 412593.658, 417638.657, 422762.865, 427886.169, 433017.167, 438213.273, 443441.254, 448692.421, 453937.533, 459239.049, 464529.569, 469910.083, 475274.03, 480684.473, 486070.26, 491515.237, 496995.651, 502476.617, 507973.609, 513497.19, 519083.233, 524726.509, 530305.505, 535945.728, 541584.404, 547274.055, 552967.236, 558667.862, 564360.216, 570128.148, 575965.08, 581701.952, 587532.523, 593361.144, 599246.128, 605033.418, 610958.779, 616837.117, 622772.818, 628672.04, 634675.369, 640574.831, 646585.739, 652574.547, 658611.217, 664642.684, 670713.914, 676737.681, 682797.313, 688837.897, 694917.874, 701009.882, 707173.648, 713257.254, 719415.392, 725636.761, 731710.697, 737906.209, 744103.074, 750313.39, 756504.185, 762712.579, 768876.985, 775167.859, 781359, 787615.959, 793863.597, 800245.477, 806464.582, 812785.294, 819005.925, 825403.057, 831676.197, 837936.284, 844266.968, 850642.711, 856959.756, 863322.774, 869699.931, 876102.478, 882355.787, 888694.463, 895159.952, 901536.143, 907872.631, 914293.672, 920615.14, 927130.974, 933409.404, 939922.178, 946331.47, 952745.93, 959209.264, 965590.224, 972077.284, 978501.961, 984953.19, 991413.271, 997817.479, 1004222.658, 1010725.676, 1017177.138, 1023612.529, 1030098.236, 1036493.719, 1043112.207, 1049537.036, 1056008.096, 1062476.184, 1068942.337, 1075524.95, 1081932.864, 1088426.025, 1094776.005, 1101327.448, 1107901.673, 1114423.639, 1120884.602, 1127324.923, 1133794.24, 1140328.886, 1146849.376, 1153346.682, 1159836.502, 1166478.703, 1172953.304, 1179391.502, 1185950.982, 1192544.052, 1198913.41, 1205430.994, 1212015.525, 1218674.042, 1225121.683, 1231551.101, 1238126.379, 1244673.795, 1251260.649, 1257697.86, 1264320.983, 1270736.319, 1277274.694, 1283804.95, 1290211.514, 1296858.568, 1303455.691},\n}\n\nvar biasData = [][]float64{\n\t// precision 4\n\t{10, 9.717, 9.207, 8.7896, 8.2882, 7.8204, 7.3772, 6.9342, 6.5202, 6.161, 5.7722, 5.4636, 5.0396, 4.6766, 4.3566, 4.0454, 3.7936, 3.4856, 3.2666, 2.9946, 2.766, 2.4692, 2.3638, 2.0764, 1.7864, 1.7602, 1.4814, 1.433, 1.2926, 1.0664, 0.999600000000001, 0.7956, 0.5366, 0.589399999999998, 0.573799999999999, 0.269799999999996, 0.368200000000002, 0.0544000000000011, 0.234200000000001, 0.0108000000000033, -0.203400000000002, -0.0701999999999998, -0.129600000000003, -0.364199999999997, -0.480600000000003, -0.226999999999997, -0.322800000000001, -0.382599999999996, -0.511200000000002, -0.669600000000003, -0.749400000000001, -0.500399999999999, -0.617600000000003, -0.6922, -0.601599999999998, -0.416200000000003, -0.338200000000001, -0.782600000000002, -0.648600000000002, -0.919800000000002, -0.851799999999997, -0.962400000000002, -0.6402, -1.1922, -1.0256, -1.086, -1.21899999999999, -0.819400000000002, -0.940600000000003, -1.1554, -1.2072, -1.1752, -1.16759999999999, -1.14019999999999, -1.3754, -1.29859999999999, -1.607, -1.3292, -1.7606},\n\t// precision 5\n\t{22, 21.1194, 20.8208, 20.2318, 19.77, 19.2436, 18.7774, 18.2848, 17.8224, 17.3742, 16.9336, 16.503, 16.0494, 15.6292, 15.2124, 14.798, 14.367, 13.9728, 13.5944, 13.217, 12.8438, 12.3696, 12.0956, 11.7044, 11.324, 11.0668, 10.6698, 10.3644, 10.049, 9.6918, 9.4146, 9.082, 8.687, 8.5398, 8.2462, 7.857, 7.6606, 7.4168, 7.1248, 6.9222, 6.6804, 6.447, 6.3454, 5.9594, 5.7636, 5.5776, 5.331, 5.19, 4.9676, 4.7564, 4.5314, 4.4442, 4.3708, 3.9774, 3.9624, 3.8796, 3.755, 3.472, 3.2076, 3.1024, 2.8908, 2.7338, 2.7728, 2.629, 2.413, 2.3266, 2.1524, 2.2642, 2.1806, 2.0566, 1.9192, 1.7598, 1.3516, 1.5802, 1.43859999999999, 1.49160000000001, 1.1524, 1.1892, 0.841399999999993, 0.879800000000003, 0.837599999999995, 0.469800000000006, 0.765600000000006, 0.331000000000003, 0.591399999999993, 0.601200000000006, 0.701599999999999, 0.558199999999999, 0.339399999999998, 0.354399999999998, 0.491200000000006, 0.308000000000007, 0.355199999999996, -0.0254000000000048, 0.205200000000005, -0.272999999999996, 0.132199999999997, 0.394400000000005, -0.241200000000006, 0.242000000000004, 0.191400000000002, 0.253799999999998, -0.122399999999999, -0.370800000000003, 0.193200000000004, -0.0848000000000013, 0.0867999999999967, -0.327200000000005, -0.285600000000002, 0.311400000000006, -0.128399999999999, -0.754999999999995, -0.209199999999996, -0.293599999999998, -0.364000000000004, -0.253600000000006, -0.821200000000005, -0.253600000000006, -0.510400000000004, -0.383399999999995, -0.491799999999998, -0.220200000000006, -0.0972000000000008, -0.557400000000001, -0.114599999999996, -0.295000000000002, -0.534800000000004, 0.346399999999988, -0.65379999999999, 0.0398000000000138, 0.0341999999999985, -0.995800000000003, -0.523400000000009, -0.489000000000004, -0.274799999999999, -0.574999999999989, -0.482799999999997, 0.0571999999999946, -0.330600000000004, -0.628800000000012, -0.140199999999993, -0.540600000000012, -0.445999999999998, -0.599400000000003, -0.262599999999992, 0.163399999999996, -0.100599999999986, -0.39500000000001, -1.06960000000001, -0.836399999999998, -0.753199999999993, -0.412399999999991, -0.790400000000005, -0.29679999999999, -0.28540000000001, -0.193000000000012, -0.0772000000000048, -0.962799999999987, -0.414800000000014},\n\t// precision 6\n\t{45, 44.1902, 43.271, 42.8358, 41.8142, 41.2854, 40.317, 39.354, 38.8924, 37.9436, 37.4596, 36.5262, 35.6248, 35.1574, 34.2822, 33.837, 32.9636, 32.074, 31.7042, 30.7976, 30.4772, 29.6564, 28.7942, 28.5004, 27.686, 27.291, 26.5672, 25.8556, 25.4982, 24.8204, 24.4252, 23.7744, 23.0786, 22.8344, 22.0294, 21.8098, 21.0794, 20.5732, 20.1878, 19.5648, 19.2902, 18.6784, 18.3352, 17.8946, 17.3712, 17.0852, 16.499, 16.2686, 15.6844, 15.2234, 14.9732, 14.3356, 14.2286, 13.7262, 13.3284, 13.1048, 12.5962, 12.3562, 12.1272, 11.4184, 11.4974, 11.0822, 10.856, 10.48, 10.2834, 10.0208, 9.637, 9.51739999999999, 9.05759999999999, 8.74760000000001, 8.42700000000001, 8.1326, 8.2372, 8.2788, 7.6776, 7.79259999999999, 7.1952, 6.9564, 6.6454, 6.87, 6.5428, 6.19999999999999, 6.02940000000001, 5.62780000000001, 5.6782, 5.792, 5.35159999999999, 5.28319999999999, 5.0394, 5.07480000000001, 4.49119999999999, 4.84899999999999, 4.696, 4.54040000000001, 4.07300000000001, 4.37139999999999, 3.7216, 3.7328, 3.42080000000001, 3.41839999999999, 3.94239999999999, 3.27719999999999, 3.411, 3.13079999999999, 2.76900000000001, 2.92580000000001, 2.68279999999999, 2.75020000000001, 2.70599999999999, 2.3886, 3.01859999999999, 2.45179999999999, 2.92699999999999, 2.41720000000001, 2.41139999999999, 2.03299999999999, 2.51240000000001, 2.5564, 2.60079999999999, 2.41720000000001, 1.80439999999999, 1.99700000000001, 2.45480000000001, 1.8948, 2.2346, 2.30860000000001, 2.15479999999999, 1.88419999999999, 1.6508, 0.677199999999999, 1.72540000000001, 1.4752, 1.72280000000001, 1.66139999999999, 1.16759999999999, 1.79300000000001, 1.00059999999999, 0.905200000000008, 0.659999999999997, 1.55879999999999, 1.1636, 0.688199999999995, 0.712600000000009, 0.450199999999995, 1.1978, 0.975599999999986, 0.165400000000005, 1.727, 1.19739999999999, -0.252600000000001, 1.13460000000001, 1.3048, 1.19479999999999, 0.313400000000001, 0.878999999999991, 1.12039999999999, 0.853000000000009, 1.67920000000001, 0.856999999999999, 0.448599999999999, 1.2362, 0.953399999999988, 1.02859999999998, 0.563199999999995, 0.663000000000011, 0.723000000000013, 0.756599999999992, 0.256599999999992, -0.837600000000009, 0.620000000000005, 0.821599999999989, 0.216600000000028, 0.205600000000004, 0.220199999999977, 0.372599999999977, 0.334400000000016, 0.928400000000011, 0.972800000000007, 0.192400000000021, 0.487199999999973, -0.413000000000011, 0.807000000000016, 0.120600000000024, 0.769000000000005, 0.870799999999974, 0.66500000000002, 0.118200000000002, 0.401200000000017, 0.635199999999998, 0.135400000000004, 0.175599999999974, 1.16059999999999, 0.34620000000001, 0.521400000000028, -0.586599999999976, -1.16480000000001, 0.968399999999974, 0.836999999999989, 0.779600000000016, 0.985799999999983},\n\t// precision 7\n\t{91, 89.4934, 87.9758, 86.4574, 84.9718, 83.4954, 81.5302, 80.0756, 78.6374, 77.1782, 75.7888, 73.9522, 72.592, 71.2532, 69.9086, 68.5938, 66.9474, 65.6796, 64.4394, 63.2176, 61.9768, 60.4214, 59.2528, 58.0102, 56.8658, 55.7278, 54.3044, 53.1316, 52.093, 51.0032, 49.9092, 48.6306, 47.5294, 46.5756, 45.6508, 44.662, 43.552, 42.3724, 41.617, 40.5754, 39.7872, 38.8444, 37.7988, 36.8606, 36.2118, 35.3566, 34.4476, 33.5882, 32.6816, 32.0824, 31.0258, 30.6048, 29.4436, 28.7274, 27.957, 27.147, 26.4364, 25.7592, 25.3386, 24.781, 23.8028, 23.656, 22.6544, 21.996, 21.4718, 21.1544, 20.6098, 19.5956, 19.0616, 18.5758, 18.4878, 17.5244, 17.2146, 16.724, 15.8722, 15.5198, 15.0414, 14.941, 14.9048, 13.87, 13.4304, 13.028, 12.4708, 12.37, 12.0624, 11.4668, 11.5532, 11.4352, 11.2564, 10.2744, 10.2118, 9.74720000000002, 10.1456, 9.2928, 8.75040000000001, 8.55279999999999, 8.97899999999998, 8.21019999999999, 8.18340000000001, 7.3494, 7.32499999999999, 7.66140000000001, 6.90300000000002, 7.25439999999998, 6.9042, 7.21499999999997, 6.28640000000001, 6.08139999999997, 6.6764, 6.30099999999999, 5.13900000000001, 5.65800000000002, 5.17320000000001, 4.59019999999998, 4.9538, 5.08280000000002, 4.92200000000003, 4.99020000000002, 4.7328, 5.4538, 4.11360000000002, 4.22340000000003, 4.08780000000002, 3.70800000000003, 4.15559999999999, 4.18520000000001, 3.63720000000001, 3.68220000000002, 3.77960000000002, 3.6078, 2.49160000000001, 3.13099999999997, 2.5376, 3.19880000000001, 3.21100000000001, 2.4502, 3.52820000000003, 2.91199999999998, 3.04480000000001, 2.7432, 2.85239999999999, 2.79880000000003, 2.78579999999999, 1.88679999999999, 2.98860000000002, 2.50639999999999, 1.91239999999999, 2.66160000000002, 2.46820000000002, 1.58199999999999, 1.30399999999997, 2.27379999999999, 2.68939999999998, 1.32900000000001, 3.10599999999999, 1.69080000000002, 2.13740000000001, 2.53219999999999, 1.88479999999998, 1.33240000000001, 1.45119999999997, 1.17899999999997, 2.44119999999998, 1.60659999999996, 2.16700000000003, 0.77940000000001, 2.37900000000002, 2.06700000000001, 1.46000000000004, 2.91160000000002, 1.69200000000001, 0.954600000000028, 2.49300000000005, 2.2722, 1.33500000000004, 2.44899999999996, 1.20140000000004, 3.07380000000001, 2.09739999999999, 2.85640000000001, 2.29960000000005, 2.40899999999999, 1.97040000000004, 0.809799999999996, 1.65279999999996, 2.59979999999996, 0.95799999999997, 2.06799999999998, 2.32780000000002, 4.20159999999998, 1.96320000000003, 1.86400000000003, 1.42999999999995, 3.77940000000001, 1.27200000000005, 1.86440000000005, 2.20600000000002, 3.21900000000005, 1.5154, 2.61019999999996},\n\t// precision 8\n\t{183.2152, 180.2454, 177.2096, 173.6652, 170.6312, 167.6822, 164.249, 161.3296, 158.0038, 155.2074, 152.4612, 149.27, 146.5178, 143.4412, 140.8032, 138.1634, 135.1688, 132.6074, 129.6946, 127.2664, 124.8228, 122.0432, 119.6824, 116.9464, 114.6268, 112.2626, 109.8376, 107.4034, 104.8956, 102.8522, 100.7638, 98.3552, 96.3556, 93.7526, 91.9292, 89.8954, 87.8198, 85.7668, 83.298, 81.6688, 79.9466, 77.9746, 76.1672, 74.3474, 72.3028, 70.8912, 69.114, 67.4646, 65.9744, 64.4092, 62.6022, 60.843, 59.5684, 58.1652, 56.5426, 55.4152, 53.5388, 52.3592, 51.1366, 49.486, 48.3918, 46.5076, 45.509, 44.3834, 43.3498, 42.0668, 40.7346, 40.1228, 38.4528, 37.7, 36.644, 36.0518, 34.5774, 33.9068, 32.432, 32.1666, 30.434, 29.6644, 28.4894, 27.6312, 26.3804, 26.292, 25.5496000000001, 25.0234, 24.8206, 22.6146, 22.4188, 22.117, 20.6762, 20.6576, 19.7864, 19.509, 18.5334, 17.9204, 17.772, 16.2924, 16.8654, 15.1836, 15.745, 15.1316, 15.0386, 14.0136, 13.6342, 12.6196, 12.1866, 12.4281999999999, 11.3324, 10.4794000000001, 11.5038, 10.129, 9.52800000000002, 10.3203999999999, 9.46299999999997, 9.79280000000006, 9.12300000000005, 8.74180000000001, 9.2192, 7.51020000000005, 7.60659999999996, 7.01840000000004, 7.22239999999999, 7.40139999999997, 6.76179999999999, 7.14359999999999, 5.65060000000005, 5.63779999999997, 5.76599999999996, 6.75139999999999, 5.57759999999996, 3.73220000000003, 5.8048, 5.63019999999995, 4.93359999999996, 3.47979999999995, 4.33879999999999, 3.98940000000005, 3.81960000000004, 3.31359999999995, 3.23080000000004, 3.4588, 3.08159999999998, 3.4076, 3.00639999999999, 2.38779999999997, 2.61900000000003, 1.99800000000005, 3.34820000000002, 2.95060000000001, 0.990999999999985, 2.11440000000005, 2.20299999999997, 2.82219999999995, 2.73239999999998, 2.7826, 3.76660000000004, 2.26480000000004, 2.31280000000004, 2.40819999999997, 2.75360000000001, 3.33759999999995, 2.71559999999999, 1.7478000000001, 1.42920000000004, 2.39300000000003, 2.22779999999989, 2.34339999999997, 0.87259999999992, 3.88400000000001, 1.80600000000004, 1.91759999999999, 1.16779999999994, 1.50320000000011, 2.52500000000009, 0.226400000000012, 2.31500000000005, 0.930000000000064, 1.25199999999995, 2.14959999999996, 0.0407999999999902, 2.5447999999999, 1.32960000000003, 0.197400000000016, 2.52620000000002, 3.33279999999991, -1.34300000000007, 0.422199999999975, 0.917200000000093, 1.12920000000008, 1.46060000000011, 1.45779999999991, 2.8728000000001, 3.33359999999993, -1.34079999999994, 1.57680000000005, 0.363000000000056, 1.40740000000005, 0.656600000000026, 0.801400000000058, -0.454600000000028, 1.51919999999996},\n\t// precision 9\n\t{368, 361.8294, 355.2452, 348.6698, 342.1464, 336.2024, 329.8782, 323.6598, 317.462, 311.2826, 305.7102, 299.7416, 293.9366, 288.1046, 282.285, 277.0668, 271.306, 265.8448, 260.301, 254.9886, 250.2422, 244.8138, 239.7074, 234.7428, 229.8402, 225.1664, 220.3534, 215.594, 210.6886, 205.7876, 201.65, 197.228, 192.8036, 188.1666, 184.0818, 180.0824, 176.2574, 172.302, 168.1644, 164.0056, 160.3802, 156.7192, 152.5234, 149.2084, 145.831, 142.485, 139.1112, 135.4764, 131.76, 129.3368, 126.5538, 122.5058, 119.2646, 116.5902, 113.3818, 110.8998, 107.9532, 105.2062, 102.2798, 99.4728, 96.9582, 94.3292, 92.171, 89.7809999999999, 87.5716, 84.7048, 82.5322, 79.875, 78.3972, 75.3464, 73.7274, 71.2834, 70.1444, 68.4263999999999, 66.0166, 64.018, 62.0437999999999, 60.3399999999999, 58.6856, 57.9836, 55.0311999999999, 54.6769999999999, 52.3188, 51.4846, 49.4423999999999, 47.739, 46.1487999999999, 44.9202, 43.4059999999999, 42.5342000000001, 41.2834, 38.8954000000001, 38.3286000000001, 36.2146, 36.6684, 35.9946, 33.123, 33.4338, 31.7378000000001, 29.076, 28.9692, 27.4964, 27.0998, 25.9864, 26.7754, 24.3208, 23.4838, 22.7388000000001, 24.0758000000001, 21.9097999999999, 20.9728, 19.9228000000001, 19.9292, 16.617, 17.05, 18.2996000000001, 15.6128000000001, 15.7392, 14.5174, 13.6322, 12.2583999999999, 13.3766000000001, 11.423, 13.1232, 9.51639999999998, 10.5938000000001, 9.59719999999993, 8.12220000000002, 9.76739999999995, 7.50440000000003, 7.56999999999994, 6.70440000000008, 6.41419999999994, 6.71019999999999, 5.60940000000005, 4.65219999999999, 6.84099999999989, 3.4072000000001, 3.97859999999991, 3.32760000000007, 5.52160000000003, 3.31860000000006, 2.06940000000009, 4.35400000000004, 1.57500000000005, 0.280799999999999, 2.12879999999996, -0.214799999999968, -0.0378000000000611, -0.658200000000079, 0.654800000000023, -0.0697999999999865, 0.858400000000074, -2.52700000000004, -2.1751999999999, -3.35539999999992, -1.04019999999991, -0.651000000000067, -2.14439999999991, -1.96659999999997, -3.97939999999994, -0.604400000000169, -3.08260000000018, -3.39159999999993, -5.29640000000018, -5.38920000000007, -5.08759999999984, -4.69900000000007, -5.23720000000003, -3.15779999999995, -4.97879999999986, -4.89899999999989, -7.48880000000008, -5.94799999999987, -5.68060000000014, -6.67180000000008, -4.70499999999993, -7.27779999999984, -4.6579999999999, -4.4362000000001, -4.32139999999981, -5.18859999999995, -6.66879999999992, -6.48399999999992, -5.1260000000002, -4.4032000000002, -6.13500000000022, -5.80819999999994, -4.16719999999987, -4.15039999999999, -7.45600000000013, -7.24080000000004, -9.83179999999993, -5.80420000000004, -8.6561999999999, -6.99940000000015, -10.5473999999999, -7.34139999999979, -6.80999999999995, -6.29719999999998, -6.23199999999997},\n\t// precision 10\n\t{737.1256, 724.4234, 711.1064, 698.4732, 685.4636, 673.0644, 660.488, 647.9654, 636.0832, 623.7864, 612.1992, 600.2176, 588.5228, 577.1716, 565.7752, 554.899, 543.6126, 532.6492, 521.9474, 511.5214, 501.1064, 490.6364, 480.2468, 470.4588, 460.3832, 451.0584, 440.8606, 431.3868, 422.5062, 413.1862, 404.463, 395.339, 386.1936, 378.1292, 369.1854, 361.2908, 353.3324, 344.8518, 337.5204, 329.4854, 321.9318, 314.552, 306.4658, 299.4256, 292.849, 286.152, 278.8956, 271.8792, 265.118, 258.62, 252.5132, 245.9322, 239.7726, 233.6086, 227.5332, 222.5918, 216.4294, 210.7662, 205.4106, 199.7338, 194.9012, 188.4486, 183.1556, 178.6338, 173.7312, 169.6264, 163.9526, 159.8742, 155.8326, 151.1966, 147.5594, 143.07, 140.037, 134.1804, 131.071, 127.4884, 124.0848, 120.2944, 117.333, 112.9626, 110.2902, 107.0814, 103.0334, 99.4832000000001, 96.3899999999999, 93.7202000000002, 90.1714000000002, 87.2357999999999, 85.9346, 82.8910000000001, 80.0264000000002, 78.3834000000002, 75.1543999999999, 73.8683999999998, 70.9895999999999, 69.4367999999999, 64.8701999999998, 65.0408000000002, 61.6738, 59.5207999999998, 57.0158000000001, 54.2302, 53.0962, 50.4985999999999, 52.2588000000001, 47.3914, 45.6244000000002, 42.8377999999998, 43.0072, 40.6516000000001, 40.2453999999998, 35.2136, 36.4546, 33.7849999999999, 33.2294000000002, 32.4679999999998, 30.8670000000002, 28.6507999999999, 28.9099999999999, 27.5983999999999, 26.1619999999998, 24.5563999999999, 23.2328000000002, 21.9484000000002, 21.5902000000001, 21.3346000000001, 17.7031999999999, 20.6111999999998, 19.5545999999999, 15.7375999999999, 17.0720000000001, 16.9517999999998, 15.326, 13.1817999999998, 14.6925999999999, 13.0859999999998, 13.2754, 10.8697999999999, 11.248, 7.3768, 4.72339999999986, 7.97899999999981, 8.7503999999999, 7.68119999999999, 9.7199999999998, 7.73919999999998, 5.6224000000002, 7.44560000000001, 6.6601999999998, 5.9058, 4.00199999999995, 4.51699999999983, 4.68240000000014, 3.86220000000003, 5.13639999999987, 5.98500000000013, 2.47719999999981, 2.61999999999989, 1.62800000000016, 4.65000000000009, 0.225599999999758, 0.831000000000131, -0.359400000000278, 1.27599999999984, -2.92559999999958, -0.0303999999996449, 2.37079999999969, -2.0033999999996, 0.804600000000391, 0.30199999999968, 1.1247999999996, -2.6880000000001, 0.0321999999996478, -1.18099999999959, -3.9402, -1.47940000000017, -0.188400000000001, -2.10720000000038, -2.04159999999956, -3.12880000000041, -4.16160000000036, -0.612799999999879, -3.48719999999958, -8.17900000000009, -5.37780000000021, -4.01379999999972, -5.58259999999973, -5.73719999999958, -7.66799999999967, -5.69520000000011, -1.1247999999996, -5.58520000000044, -8.04560000000038, -4.64840000000004, -11.6468000000004, -7.97519999999986, -5.78300000000036, -7.67420000000038, -10.6328000000003, -9.81720000000041},\n\t// precision 11\n\t{1476, 1449.6014, 1423.5802, 1397.7942, 1372.3042, 1347.2062, 1321.8402, 1297.2292, 1272.9462, 1248.9926, 1225.3026, 1201.4252, 1178.0578, 1155.6092, 1132.626, 1110.5568, 1088.527, 1066.5154, 1045.1874, 1024.3878, 1003.37, 982.1972, 962.5728, 942.1012, 922.9668, 903.292, 884.0772, 864.8578, 846.6562, 828.041, 809.714, 792.3112, 775.1806, 757.9854, 740.656, 724.346, 707.5154, 691.8378, 675.7448, 659.6722, 645.5722, 630.1462, 614.4124, 600.8728, 585.898, 572.408, 558.4926, 544.4938, 531.6776, 517.282, 505.7704, 493.1012, 480.7388, 467.6876, 456.1872, 445.5048, 433.0214, 420.806, 411.409, 400.4144, 389.4294, 379.2286, 369.651, 360.6156, 350.337, 342.083, 332.1538, 322.5094, 315.01, 305.6686, 298.1678, 287.8116, 280.9978, 271.9204, 265.3286, 257.5706, 249.6014, 242.544, 235.5976, 229.583, 220.9438, 214.672, 208.2786, 201.8628, 195.1834, 191.505, 186.1816, 178.5188, 172.2294, 167.8908, 161.0194, 158.052, 151.4588, 148.1596, 143.4344, 138.5238, 133.13, 127.6374, 124.8162, 118.7894, 117.3984, 114.6078, 109.0858, 105.1036, 103.6258, 98.6018000000004, 95.7618000000002, 93.5821999999998, 88.5900000000001, 86.9992000000002, 82.8800000000001, 80.4539999999997, 74.6981999999998, 74.3644000000004, 73.2914000000001, 65.5709999999999, 66.9232000000002, 65.1913999999997, 62.5882000000001, 61.5702000000001, 55.7035999999998, 56.1764000000003, 52.7596000000003, 53.0302000000001, 49.0609999999997, 48.4694, 44.933, 46.0474000000004, 44.7165999999997, 41.9416000000001, 39.9207999999999, 35.6328000000003, 35.5276000000003, 33.1934000000001, 33.2371999999996, 33.3864000000003, 33.9228000000003, 30.2371999999996, 29.1373999999996, 25.2272000000003, 24.2942000000003, 19.8338000000003, 18.9005999999999, 23.0907999999999, 21.8544000000002, 19.5176000000001, 15.4147999999996, 16.9314000000004, 18.6737999999996, 12.9877999999999, 14.3688000000002, 12.0447999999997, 15.5219999999999, 12.5299999999997, 14.5940000000001, 14.3131999999996, 9.45499999999993, 12.9441999999999, 3.91139999999996, 13.1373999999996, 5.44720000000052, 9.82779999999912, 7.87279999999919, 3.67760000000089, 5.46980000000076, 5.55099999999948, 5.65979999999945, 3.89439999999922, 3.1275999999998, 5.65140000000065, 6.3062000000009, 3.90799999999945, 1.87060000000019, 5.17020000000048, 2.46680000000015, 0.770000000000437, -3.72340000000077, 1.16400000000067, 8.05340000000069, 0.135399999999208, 2.15940000000046, 0.766999999999825, 1.0594000000001, 3.15500000000065, -0.287399999999252, 2.37219999999979, -2.86620000000039, -1.63199999999961, -2.22979999999916, -0.15519999999924, -1.46039999999994, -0.262199999999211, -2.34460000000036, -2.8078000000005, -3.22179999999935, -5.60159999999996, -8.42200000000048, -9.43740000000071, 0.161799999999857, -10.4755999999998, -10.0823999999993},\n\t// precision 12\n\t{2953, 2900.4782, 2848.3568, 2796.3666, 2745.324, 2694.9598, 2644.648, 2595.539, 2546.1474, 2498.2576, 2450.8376, 2403.6076, 2357.451, 2311.38, 2266.4104, 2221.5638, 2176.9676, 2134.193, 2090.838, 2048.8548, 2007.018, 1966.1742, 1925.4482, 1885.1294, 1846.4776, 1807.4044, 1768.8724, 1731.3732, 1693.4304, 1657.5326, 1621.949, 1586.5532, 1551.7256, 1517.6182, 1483.5186, 1450.4528, 1417.865, 1385.7164, 1352.6828, 1322.6708, 1291.8312, 1260.9036, 1231.476, 1201.8652, 1173.6718, 1145.757, 1119.2072, 1092.2828, 1065.0434, 1038.6264, 1014.3192, 988.5746, 965.0816, 940.1176, 917.9796, 894.5576, 871.1858, 849.9144, 827.1142, 805.0818, 783.9664, 763.9096, 742.0816, 724.3962, 706.3454, 688.018, 667.4214, 650.3106, 633.0686, 613.8094, 597.818, 581.4248, 563.834, 547.363, 531.5066, 520.455400000001, 505.583199999999, 488.366, 476.480799999999, 459.7682, 450.0522, 434.328799999999, 423.952799999999, 408.727000000001, 399.079400000001, 387.252200000001, 373.987999999999, 360.852000000001, 351.6394, 339.642, 330.902400000001, 322.661599999999, 311.662200000001, 301.3254, 291.7484, 279.939200000001, 276.7508, 263.215200000001, 254.811400000001, 245.5494, 242.306399999999, 234.8734, 223.787200000001, 217.7156, 212.0196, 200.793, 195.9748, 189.0702, 182.449199999999, 177.2772, 170.2336, 164.741, 158.613600000001, 155.311, 147.5964, 142.837, 137.3724, 132.0162, 130.0424, 121.9804, 120.451800000001, 114.8968, 111.585999999999, 105.933199999999, 101.705, 98.5141999999996, 95.0488000000005, 89.7880000000005, 91.4750000000004, 83.7764000000006, 80.9698000000008, 72.8574000000008, 73.1615999999995, 67.5838000000003, 62.6263999999992, 63.2638000000006, 66.0977999999996, 52.0843999999997, 58.9956000000002, 47.0912000000008, 46.4956000000002, 48.4383999999991, 47.1082000000006, 43.2392, 37.2759999999998, 40.0283999999992, 35.1864000000005, 35.8595999999998, 32.0998, 28.027, 23.6694000000007, 33.8266000000003, 26.3736000000008, 27.2008000000005, 21.3245999999999, 26.4115999999995, 23.4521999999997, 19.5013999999992, 19.8513999999996, 10.7492000000002, 18.6424000000006, 13.1265999999996, 18.2436000000016, 6.71860000000015, 3.39459999999963, 6.33759999999893, 7.76719999999841, 0.813999999998487, 3.82819999999992, 0.826199999999517, 8.07440000000133, -1.59080000000176, 5.01780000000144, 0.455399999998917, -0.24199999999837, 0.174800000000687, -9.07640000000174, -4.20160000000033, -3.77520000000004, -4.75179999999818, -5.3724000000002, -8.90680000000066, -6.10239999999976, -5.74120000000039, -9.95339999999851, -3.86339999999836, -13.7304000000004, -16.2710000000006, -7.51359999999841, -3.30679999999847, -13.1339999999982, -10.0551999999989, -6.72019999999975, -8.59660000000076, -10.9307999999983, -1.8775999999998, -4.82259999999951, -13.7788, -21.6470000000008, -10.6735999999983, -15.7799999999988},\n\t// precision 13\n\t{5907.5052, 5802.2672, 5697.347, 5593.5794, 5491.2622, 5390.5514, 5290.3376, 5191.6952, 5093.5988, 4997.3552, 4902.5972, 4808.3082, 4715.5646, 4624.109, 4533.8216, 4444.4344, 4356.3802, 4269.2962, 4183.3784, 4098.292, 4014.79, 3932.4574, 3850.6036, 3771.2712, 3691.7708, 3615.099, 3538.1858, 3463.4746, 3388.8496, 3315.6794, 3244.5448, 3173.7516, 3103.3106, 3033.6094, 2966.5642, 2900.794, 2833.7256, 2769.81, 2707.3196, 2644.0778, 2583.9916, 2523.4662, 2464.124, 2406.073, 2347.0362, 2292.1006, 2238.1716, 2182.7514, 2128.4884, 2077.1314, 2025.037, 1975.3756, 1928.933, 1879.311, 1831.0006, 1783.2144, 1738.3096, 1694.5144, 1649.024, 1606.847, 1564.7528, 1525.3168, 1482.5372, 1443.9668, 1406.5074, 1365.867, 1329.2186, 1295.4186, 1257.9716, 1225.339, 1193.2972, 1156.3578, 1125.8686, 1091.187, 1061.4094, 1029.4188, 1000.9126, 972.3272, 944.004199999999, 915.7592, 889.965, 862.834200000001, 840.4254, 812.598399999999, 785.924200000001, 763.050999999999, 741.793799999999, 721.466, 699.040799999999, 677.997200000002, 649.866999999998, 634.911800000002, 609.8694, 591.981599999999, 570.2922, 557.129199999999, 538.3858, 521.872599999999, 502.951400000002, 495.776399999999, 475.171399999999, 459.751, 439.995200000001, 426.708999999999, 413.7016, 402.3868, 387.262599999998, 372.0524, 357.050999999999, 342.5098, 334.849200000001, 322.529399999999, 311.613799999999, 295.848000000002, 289.273000000001, 274.093000000001, 263.329600000001, 251.389599999999, 245.7392, 231.9614, 229.7952, 217.155200000001, 208.9588, 199.016599999999, 190.839199999999, 180.6976, 176.272799999999, 166.976999999999, 162.5252, 151.196400000001, 149.386999999999, 133.981199999998, 130.0586, 130.164000000001, 122.053400000001, 110.7428, 108.1276, 106.232400000001, 100.381600000001, 98.7668000000012, 86.6440000000002, 79.9768000000004, 82.4722000000002, 68.7026000000005, 70.1186000000016, 71.9948000000004, 58.998599999999, 59.0492000000013, 56.9818000000014, 47.5338000000011, 42.9928, 51.1591999999982, 37.2740000000013, 42.7220000000016, 31.3734000000004, 26.8090000000011, 25.8934000000008, 26.5286000000015, 29.5442000000003, 19.3503999999994, 26.0760000000009, 17.9527999999991, 14.8419999999969, 10.4683999999979, 8.65899999999965, 9.86720000000059, 4.34139999999752, -0.907800000000861, -3.32080000000133, -0.936199999996461, -11.9916000000012, -8.87000000000262, -6.33099999999831, -11.3366000000024, -15.9207999999999, -9.34659999999712, -15.5034000000014, -19.2097999999969, -15.357799999998, -28.2235999999975, -30.6898000000001, -19.3271999999997, -25.6083999999973, -24.409599999999, -13.6385999999984, -33.4473999999973, -32.6949999999997, -28.9063999999998, -31.7483999999968, -32.2935999999972, -35.8329999999987, -47.620600000002, -39.0855999999985, -33.1434000000008, -46.1371999999974, -37.5892000000022, -46.8164000000033, -47.3142000000007, -60.2914000000019, -37.7575999999972},\n\t// precision 14\n\t{11816.475, 11605.0046, 11395.3792, 11188.7504, 10984.1814, 10782.0086, 10582.0072, 10384.503, 10189.178, 9996.2738, 9806.0344, 9617.9798, 9431.394, 9248.7784, 9067.6894, 8889.6824, 8712.9134, 8538.8624, 8368.4944, 8197.7956, 8031.8916, 7866.6316, 7703.733, 7544.5726, 7386.204, 7230.666, 7077.8516, 6926.7886, 6778.6902, 6631.9632, 6487.304, 6346.7486, 6206.4408, 6070.202, 5935.2576, 5799.924, 5671.0324, 5541.9788, 5414.6112, 5290.0274, 5166.723, 5047.6906, 4929.162, 4815.1406, 4699.127, 4588.5606, 4477.7394, 4369.4014, 4264.2728, 4155.9224, 4055.581, 3955.505, 3856.9618, 3761.3828, 3666.9702, 3575.7764, 3482.4132, 3395.0186, 3305.8852, 3221.415, 3138.6024, 3056.296, 2970.4494, 2896.1526, 2816.8008, 2740.2156, 2670.497, 2594.1458, 2527.111, 2460.8168, 2387.5114, 2322.9498, 2260.6752, 2194.2686, 2133.7792, 2074.767, 2015.204, 1959.4226, 1898.6502, 1850.006, 1792.849, 1741.4838, 1687.9778, 1638.1322, 1589.3266, 1543.1394, 1496.8266, 1447.8516, 1402.7354, 1361.9606, 1327.0692, 1285.4106, 1241.8112, 1201.6726, 1161.973, 1130.261, 1094.2036, 1048.2036, 1020.6436, 990.901400000002, 961.199800000002, 924.769800000002, 899.526400000002, 872.346400000002, 834.375, 810.432000000001, 780.659800000001, 756.013800000001, 733.479399999997, 707.923999999999, 673.858, 652.222399999999, 636.572399999997, 615.738599999997, 586.696400000001, 564.147199999999, 541.679600000003, 523.943599999999, 505.714599999999, 475.729599999999, 461.779600000002, 449.750800000002, 439.020799999998, 412.7886, 400.245600000002, 383.188199999997, 362.079599999997, 357.533799999997, 334.319000000003, 327.553399999997, 308.559399999998, 291.270199999999, 279.351999999999, 271.791400000002, 252.576999999997, 247.482400000001, 236.174800000001, 218.774599999997, 220.155200000001, 208.794399999999, 201.223599999998, 182.995600000002, 185.5268, 164.547400000003, 176.5962, 150.689599999998, 157.8004, 138.378799999999, 134.021200000003, 117.614399999999, 108.194000000003, 97.0696000000025, 89.6042000000016, 95.6030000000028, 84.7810000000027, 72.635000000002, 77.3482000000004, 59.4907999999996, 55.5875999999989, 50.7346000000034, 61.3916000000027, 50.9149999999936, 39.0384000000049, 58.9395999999979, 29.633600000001, 28.2032000000036, 26.0078000000067, 17.0387999999948, 9.22000000000116, 13.8387999999977, 8.07240000000456, 14.1549999999988, 15.3570000000036, 3.42660000000615, 6.24820000000182, -2.96940000000177, -8.79940000000352, -5.97860000000219, -14.4048000000039, -3.4143999999942, -13.0148000000045, -11.6977999999945, -25.7878000000055, -22.3185999999987, -24.409599999999, -31.9756000000052, -18.9722000000038, -22.8678000000073, -30.8972000000067, -32.3715999999986, -22.3907999999938, -43.6720000000059, -35.9038, -39.7492000000057, -54.1641999999993, -45.2749999999942, -42.2989999999991, -44.1089999999967, -64.3564000000042, -49.9551999999967, -42.6116000000038},\n\t// precision 15\n\t{23634.0036, 23210.8034, 22792.4744, 22379.1524, 21969.7928, 21565.326, 21165.3532, 20770.2806, 20379.9892, 19994.7098, 19613.318, 19236.799, 18865.4382, 18498.8244, 18136.5138, 17778.8668, 17426.2344, 17079.32, 16734.778, 16397.2418, 16063.3324, 15734.0232, 15409.731, 15088.728, 14772.9896, 14464.1402, 14157.5588, 13855.5958, 13559.3296, 13264.9096, 12978.326, 12692.0826, 12413.8816, 12137.3192, 11870.2326, 11602.5554, 11340.3142, 11079.613, 10829.5908, 10583.5466, 10334.0344, 10095.5072, 9859.694, 9625.2822, 9395.7862, 9174.0586, 8957.3164, 8738.064, 8524.155, 8313.7396, 8116.9168, 7913.542, 7718.4778, 7521.65, 7335.5596, 7154.2906, 6968.7396, 6786.3996, 6613.236, 6437.406, 6270.6598, 6107.7958, 5945.7174, 5787.6784, 5635.5784, 5482.308, 5337.9784, 5190.0864, 5045.9158, 4919.1386, 4771.817, 4645.7742, 4518.4774, 4385.5454, 4262.6622, 4142.74679999999, 4015.5318, 3897.9276, 3790.7764, 3685.13800000001, 3573.6274, 3467.9706, 3368.61079999999, 3271.5202, 3170.3848, 3076.4656, 2982.38400000001, 2888.4664, 2806.4868, 2711.9564, 2634.1434, 2551.3204, 2469.7662, 2396.61139999999, 2318.9902, 2243.8658, 2171.9246, 2105.01360000001, 2028.8536, 1960.9952, 1901.4096, 1841.86079999999, 1777.54700000001, 1714.5802, 1654.65059999999, 1596.311, 1546.2016, 1492.3296, 1433.8974, 1383.84600000001, 1339.4152, 1293.5518, 1245.8686, 1193.50659999999, 1162.27959999999, 1107.19439999999, 1069.18060000001, 1035.09179999999, 999.679000000004, 957.679999999993, 925.300199999998, 888.099400000006, 848.638600000006, 818.156400000007, 796.748399999997, 752.139200000005, 725.271200000003, 692.216, 671.633600000001, 647.939799999993, 621.670599999998, 575.398799999995, 561.226599999995, 532.237999999998, 521.787599999996, 483.095799999996, 467.049599999998, 465.286399999997, 415.548599999995, 401.047399999996, 380.607999999993, 377.362599999993, 347.258799999996, 338.371599999999, 310.096999999994, 301.409199999995, 276.280799999993, 265.586800000005, 258.994399999996, 223.915999999997, 215.925399999993, 213.503800000006, 191.045400000003, 166.718200000003, 166.259000000005, 162.941200000001, 148.829400000002, 141.645999999993, 123.535399999993, 122.329800000007, 89.473399999988, 80.1962000000058, 77.5457999999926, 59.1056000000099, 83.3509999999951, 52.2906000000075, 36.3979999999865, 40.6558000000077, 42.0003999999899, 19.6630000000005, 19.7153999999864, -8.38539999999921, -0.692799999989802, 0.854800000000978, 3.23219999999856, -3.89040000000386, -5.25880000001052, -24.9052000000083, -22.6837999999989, -26.4286000000138, -34.997000000003, -37.0216000000073, -43.430400000012, -58.2390000000014, -68.8034000000043, -56.9245999999985, -57.8583999999973, -77.3097999999882, -73.2793999999994, -81.0738000000129, -87.4530000000086, -65.0254000000132, -57.296399999992, -96.2746000000043, -103.25, -96.081600000005, -91.5542000000132, -102.465200000006, -107.688599999994, -101.458000000013, -109.715800000005},\n\t// precision 16\n\t{47270, 46423.3584, 45585.7074, 44757.152, 43938.8416, 43130.9514, 42330.03, 41540.407, 40759.6348, 39988.206, 39226.5144, 38473.2096, 37729.795, 36997.268, 36272.6448, 35558.665, 34853.0248, 34157.4472, 33470.5204, 32793.5742, 32127.0194, 31469.4182, 30817.6136, 30178.6968, 29546.8908, 28922.8544, 28312.271, 27707.0924, 27114.0326, 26526.692, 25948.6336, 25383.7826, 24823.5998, 24272.2974, 23732.2572, 23201.4976, 22674.2796, 22163.6336, 21656.515, 21161.7362, 20669.9368, 20189.4424, 19717.3358, 19256.3744, 18795.9638, 18352.197, 17908.5738, 17474.391, 17052.918, 16637.2236, 16228.4602, 15823.3474, 15428.6974, 15043.0284, 14667.6278, 14297.4588, 13935.2882, 13578.5402, 13234.6032, 12882.1578, 12548.0728, 12219.231, 11898.0072, 11587.2626, 11279.9072, 10973.5048, 10678.5186, 10392.4876, 10105.2556, 9825.766, 9562.5444, 9294.2222, 9038.2352, 8784.848, 8533.2644, 8301.7776, 8058.30859999999, 7822.94579999999, 7599.11319999999, 7366.90779999999, 7161.217, 6957.53080000001, 6736.212, 6548.21220000001, 6343.06839999999, 6156.28719999999, 5975.15419999999, 5791.75719999999, 5621.32019999999, 5451.66, 5287.61040000001, 5118.09479999999, 4957.288, 4798.4246, 4662.17559999999, 4512.05900000001, 4364.68539999999, 4220.77720000001, 4082.67259999999, 3957.19519999999, 3842.15779999999, 3699.3328, 3583.01180000001, 3473.8964, 3338.66639999999, 3233.55559999999, 3117.799, 3008.111, 2909.69140000001, 2814.86499999999, 2719.46119999999, 2624.742, 2532.46979999999, 2444.7886, 2370.1868, 2272.45259999999, 2196.19260000001, 2117.90419999999, 2023.2972, 1969.76819999999, 1885.58979999999, 1833.2824, 1733.91200000001, 1682.54920000001, 1604.57980000001, 1556.11240000001, 1491.3064, 1421.71960000001, 1371.22899999999, 1322.1324, 1264.7892, 1196.23920000001, 1143.8474, 1088.67240000001, 1073.60380000001, 1023.11660000001, 959.036400000012, 927.433199999999, 906.792799999996, 853.433599999989, 841.873800000001, 791.1054, 756.899999999994, 704.343200000003, 672.495599999995, 622.790399999998, 611.254799999995, 567.283200000005, 519.406599999988, 519.188400000014, 495.312800000014, 451.350799999986, 443.973399999988, 431.882199999993, 392.027000000002, 380.924200000009, 345.128999999986, 298.901400000002, 287.771999999997, 272.625, 247.253000000026, 222.490600000019, 223.590000000026, 196.407599999977, 176.425999999978, 134.725199999986, 132.4804, 110.445599999977, 86.7939999999944, 56.7038000000175, 64.915399999998, 38.3726000000024, 37.1606000000029, 46.170999999973, 49.1716000000015, 15.3362000000197, 6.71639999997569, -34.8185999999987, -39.4476000000141, 12.6830000000191, -12.3331999999937, -50.6565999999875, -59.9538000000175, -65.1054000000004, -70.7576000000117, -106.325200000021, -126.852200000023, -110.227599999984, -132.885999999999, -113.897200000007, -142.713800000027, -151.145399999979, -150.799200000009, -177.756200000003, -156.036399999983, -182.735199999996, -177.259399999981, -198.663600000029, -174.577600000019, -193.84580000001},\n\t// precision 17\n\t{94541, 92848.811, 91174.019, 89517.558, 87879.9705, 86262.7565, 84663.5125, 83083.7435, 81521.7865, 79977.272, 78455.9465, 76950.219, 75465.432, 73994.152, 72546.71, 71115.2345, 69705.6765, 68314.937, 66944.2705, 65591.255, 64252.9485, 62938.016, 61636.8225, 60355.592, 59092.789, 57850.568, 56624.518, 55417.343, 54231.1415, 53067.387, 51903.526, 50774.649, 49657.6415, 48561.05, 47475.7575, 46410.159, 45364.852, 44327.053, 43318.4005, 42325.6165, 41348.4595, 40383.6265, 39436.77, 38509.502, 37594.035, 36695.939, 35818.6895, 34955.691, 34115.8095, 33293.949, 32465.0775, 31657.6715, 30877.2585, 30093.78, 29351.3695, 28594.1365, 27872.115, 27168.7465, 26477.076, 25774.541, 25106.5375, 24452.5135, 23815.5125, 23174.0655, 22555.2685, 21960.2065, 21376.3555, 20785.1925, 20211.517, 19657.0725, 19141.6865, 18579.737, 18081.3955, 17578.995, 17073.44, 16608.335, 16119.911, 15651.266, 15194.583, 14749.0495, 14343.4835, 13925.639, 13504.509, 13099.3885, 12691.2855, 12328.018, 11969.0345, 11596.5145, 11245.6355, 10917.6575, 10580.9785, 10277.8605, 9926.58100000001, 9605.538, 9300.42950000003, 8989.97850000003, 8728.73249999998, 8448.3235, 8175.31050000002, 7898.98700000002, 7629.79100000003, 7413.76199999999, 7149.92300000001, 6921.12650000001, 6677.1545, 6443.28000000003, 6278.23450000002, 6014.20049999998, 5791.20299999998, 5605.78450000001, 5438.48800000001, 5234.2255, 5059.6825, 4887.43349999998, 4682.935, 4496.31099999999, 4322.52250000002, 4191.42499999999, 4021.24200000003, 3900.64799999999, 3762.84250000003, 3609.98050000001, 3502.29599999997, 3363.84250000003, 3206.54849999998, 3079.70000000001, 2971.42300000001, 2867.80349999998, 2727.08100000001, 2630.74900000001, 2496.6165, 2440.902, 2356.19150000002, 2235.58199999999, 2120.54149999999, 2012.25449999998, 1933.35600000003, 1820.93099999998, 1761.54800000001, 1663.09350000002, 1578.84600000002, 1509.48149999999, 1427.3345, 1379.56150000001, 1306.68099999998, 1212.63449999999, 1084.17300000001, 1124.16450000001, 1060.69949999999, 1007.48849999998, 941.194499999983, 879.880500000028, 836.007500000007, 782.802000000025, 748.385499999975, 647.991500000004, 626.730500000005, 570.776000000013, 484.000500000024, 513.98550000001, 418.985499999952, 386.996999999974, 370.026500000036, 355.496999999974, 356.731499999994, 255.92200000002, 259.094000000041, 205.434499999974, 165.374500000034, 197.347500000033, 95.718499999959, 67.6165000000037, 54.6970000000438, 31.7395000000251, -15.8784999999916, 8.42500000004657, -26.3754999999655, -118.425500000012, -66.6629999999423, -42.9745000000112, -107.364999999991, -189.839000000036, -162.611499999999, -164.964999999967, -189.079999999958, -223.931499999948, -235.329999999958, -269.639500000048, -249.087999999989, -206.475499999942, -283.04449999996, -290.667000000016, -304.561499999953, -336.784499999951, -380.386500000022, -283.280499999993, -364.533000000054, -389.059499999974, -364.454000000027, -415.748000000021, -417.155000000028},\n\t// precision 18\n\t{189083, 185696.913, 182348.774, 179035.946, 175762.762, 172526.444, 169329.754, 166166.099, 163043.269, 159958.91, 156907.912, 153906.845, 150924.199, 147996.568, 145093.457, 142239.233, 139421.475, 136632.27, 133889.588, 131174.2, 128511.619, 125868.621, 123265.385, 120721.061, 118181.769, 115709.456, 113252.446, 110840.198, 108465.099, 106126.164, 103823.469, 101556.618, 99308.004, 97124.508, 94937.803, 92833.731, 90745.061, 88677.627, 86617.47, 84650.442, 82697.833, 80769.132, 78879.629, 77014.432, 75215.626, 73384.587, 71652.482, 69895.93, 68209.301, 66553.669, 64921.981, 63310.323, 61742.115, 60205.018, 58698.658, 57190.657, 55760.865, 54331.169, 52908.167, 51550.273, 50225.254, 48922.421, 47614.533, 46362.049, 45098.569, 43926.083, 42736.03, 41593.473, 40425.26, 39316.237, 38243.651, 37170.617, 36114.609, 35084.19, 34117.233, 33206.509, 32231.505, 31318.728, 30403.404, 29540.0550000001, 28679.236, 27825.862, 26965.216, 26179.148, 25462.08, 24645.952, 23922.523, 23198.144, 22529.128, 21762.4179999999, 21134.779, 20459.117, 19840.818, 19187.04, 18636.3689999999, 17982.831, 17439.7389999999, 16874.547, 16358.2169999999, 15835.684, 15352.914, 14823.681, 14329.313, 13816.897, 13342.874, 12880.882, 12491.648, 12021.254, 11625.392, 11293.7610000001, 10813.697, 10456.209, 10099.074, 9755.39000000001, 9393.18500000006, 9047.57900000003, 8657.98499999999, 8395.85900000005, 8033, 7736.95900000003, 7430.59699999995, 7258.47699999996, 6924.58200000005, 6691.29399999999, 6357.92500000005, 6202.05700000003, 5921.19700000004, 5628.28399999999, 5404.96799999999, 5226.71100000001, 4990.75600000005, 4799.77399999998, 4622.93099999998, 4472.478, 4171.78700000001, 3957.46299999999, 3868.95200000005, 3691.14300000004, 3474.63100000005, 3341.67200000002, 3109.14000000001, 3071.97400000005, 2796.40399999998, 2756.17799999996, 2611.46999999997, 2471.93000000005, 2382.26399999997, 2209.22400000005, 2142.28399999999, 2013.96100000001, 1911.18999999994, 1818.27099999995, 1668.47900000005, 1519.65800000005, 1469.67599999998, 1367.13800000004, 1248.52899999998, 1181.23600000003, 1022.71900000004, 1088.20700000005, 959.03600000008, 876.095999999903, 791.183999999892, 703.337000000058, 731.949999999953, 586.86400000006, 526.024999999907, 323.004999999888, 320.448000000091, 340.672999999952, 309.638999999966, 216.601999999955, 102.922999999952, 19.2399999999907, -0.114000000059605, -32.6240000000689, -89.3179999999702, -153.497999999905, -64.2970000000205, -143.695999999996, -259.497999999905, -253.017999999924, -213.948000000091, -397.590000000084, -434.006000000052, -403.475000000093, -297.958000000101, -404.317000000039, -528.898999999976, -506.621000000043, -513.205000000075, -479.351000000024, -596.139999999898, -527.016999999993, -664.681000000099, -680.306000000099, -704.050000000047, -850.486000000034, -757.43200000003, -713.308999999892},\n}\n\n// Threshold describes the point at which HLL++ will use linear counting to\n// determine the cardinality.\nvar threshold = []float64{\n\t10.0,     // precision 4\n\t20.0,     // precision 5\n\t40.0,     // precision 6\n\t80.0,     // precision 7\n\t220.0,    // precision 8\n\t400.0,    // precision 9\n\t900.0,    // precision 10\n\t1800.0,   // precision 11\n\t3100.0,   // precision 12\n\t6500.0,   // precision 13\n\t11500.0,  // precision 14\n\t20000.0,  // precision 15\n\t50000.0,  // precision 16\n\t120000.0, // precision 17\n\t350000.0, // precision 18\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/estimator/hll/hll.go",
    "content": "// Package hll contains a HyperLogLog++ implementation that is adapted (mostly\n// copied) from an implementation provided by Clark DuVall\n// github.com/clarkduvall/hyperloglog.\n//\n// The differences are that the implementation in this package:\n//\n//   * uses an AMD64 optimised xxhash algorithm instead of murmur;\n//   * uses some AMD64 optimisations for things like clz;\n//   * works with []byte rather than a Hash64 interface, to reduce allocations;\n//   * implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler\n//\n// Based on some rough benchmarking, this implementation of HyperLogLog++ is\n// around twice as fast as the github.com/clarkduvall/hyperloglog implementation.\npackage hll\n\nimport (\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\n\t\"github.com/cespare/xxhash\"\n\t\"github.com/dgryski/go-bits\"\n\t\"github.com/influxdata/influxdb/pkg/estimator\"\n)\n\n// Current version of HLL implementation.\nconst version uint8 = 1\n\n// DefaultPrecision is the default precision.\nconst DefaultPrecision = 16\n\n// Plus implements the Hyperloglog++ algorithm, described in the following\n// paper: http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/40671.pdf\n//\n// The HyperLogLog++ algorithm provides cardinality estimations.\ntype Plus struct {\n\t// hash function used to hash values to add to the sketch.\n\thash func([]byte) uint64\n\n\tp  uint8 // precision.\n\tpp uint8 // p' (sparse) precision to be used when p ∈ [4..pp] and pp < 64.\n\n\tm  uint32 // Number of substream used for stochastic averaging of stream.\n\tmp uint32 // m' (sparse) number of substreams.\n\n\talpha float64 // alpha is used for bias correction.\n\n\tsparse bool // Should we use a sparse sketch representation.\n\ttmpSet set\n\n\tdenseList  []uint8         // The dense representation of the HLL.\n\tsparseList *compressedList // values that can be stored in the sparse represenation.\n}\n\n// NewPlus returns a new Plus with precision p. p must be between 4 and 18.\nfunc NewPlus(p uint8) (*Plus, error) {\n\tif p > 18 || p < 4 {\n\t\treturn nil, errors.New(\"precision must be between 4 and 18\")\n\t}\n\n\t// p' = 25 is used in the Google paper.\n\tpp := uint8(25)\n\n\thll := &Plus{\n\t\thash:   xxhash.Sum64,\n\t\tp:      p,\n\t\tpp:     pp,\n\t\tm:      1 << p,\n\t\tmp:     1 << pp,\n\t\ttmpSet: set{},\n\t\tsparse: true,\n\t}\n\thll.sparseList = newCompressedList(int(hll.m))\n\n\t// Determine alpha.\n\tswitch hll.m {\n\tcase 16:\n\t\thll.alpha = 0.673\n\tcase 32:\n\t\thll.alpha = 0.697\n\tcase 64:\n\t\thll.alpha = 0.709\n\tdefault:\n\t\thll.alpha = 0.7213 / (1 + 1.079/float64(hll.m))\n\t}\n\n\treturn hll, nil\n}\n\n// NewDefaultPlus creates a new Plus with the default precision.\nfunc NewDefaultPlus() *Plus {\n\tp, err := NewPlus(DefaultPrecision)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn p\n}\n\n// MustNewPlus returns a new Plus with precision p. Panic on error.\nfunc MustNewPlus(p uint8) *Plus {\n\thll, err := NewPlus(p)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn hll\n}\n\n// Clone returns a deep copy of h.\nfunc (h *Plus) Clone() *Plus {\n\tvar hll = &Plus{\n\t\thash:       h.hash,\n\t\tp:          h.p,\n\t\tpp:         h.pp,\n\t\tm:          h.m,\n\t\tmp:         h.mp,\n\t\talpha:      h.alpha,\n\t\tsparse:     h.sparse,\n\t\ttmpSet:     h.tmpSet.Clone(),\n\t\tsparseList: h.sparseList.Clone(),\n\t}\n\n\thll.denseList = make([]uint8, len(h.denseList))\n\tcopy(hll.denseList, h.denseList)\n\treturn hll\n}\n\n// Add adds a new value to the HLL.\nfunc (h *Plus) Add(v []byte) {\n\tx := h.hash(v)\n\tif h.sparse {\n\t\th.tmpSet.add(h.encodeHash(x))\n\n\t\tif uint32(len(h.tmpSet))*100 > h.m {\n\t\t\th.mergeSparse()\n\t\t\tif uint32(h.sparseList.Len()) > h.m {\n\t\t\t\th.toNormal()\n\t\t\t}\n\t\t}\n\t} else {\n\t\ti := bextr(x, 64-h.p, h.p) // {x63,...,x64-p}\n\t\tw := x<<h.p | 1<<(h.p-1)   // {x63-p,...,x0}\n\n\t\trho := uint8(bits.Clz(w)) + 1\n\t\tif rho > h.denseList[i] {\n\t\t\th.denseList[i] = rho\n\t\t}\n\t}\n}\n\n// Count returns a cardinality estimate.\nfunc (h *Plus) Count() uint64 {\n\tif h.sparse {\n\t\th.mergeSparse()\n\t\treturn uint64(h.linearCount(h.mp, h.mp-uint32(h.sparseList.count)))\n\t}\n\n\test, zeros := h.e()\n\tif est <= 5.0*float64(h.m) {\n\t\test -= h.estimateBias(est)\n\t}\n\n\tif zeros > 0 {\n\t\tlc := h.linearCount(h.m, zeros)\n\t\tif lc <= threshold[h.p-4] {\n\t\t\treturn uint64(lc)\n\t\t}\n\t}\n\treturn uint64(est)\n}\n\n// Merge takes another HyperLogLogPlus and combines it with HyperLogLogPlus h.\n// If HyperLogLogPlus h is using the sparse representation, it will be converted\n// to the normal representation.\nfunc (h *Plus) Merge(s estimator.Sketch) error {\n\tif s == nil {\n\t\t// Nothing to do\n\t\treturn nil\n\t}\n\n\tother, ok := s.(*Plus)\n\tif !ok {\n\t\treturn fmt.Errorf(\"wrong type for merging: %T\", other)\n\t}\n\n\tif h.p != other.p {\n\t\treturn errors.New(\"precisions must be equal\")\n\t}\n\n\tif h.sparse {\n\t\th.toNormal()\n\t}\n\n\tif other.sparse {\n\t\tfor k := range other.tmpSet {\n\t\t\ti, r := other.decodeHash(k)\n\t\t\tif h.denseList[i] < r {\n\t\t\t\th.denseList[i] = r\n\t\t\t}\n\t\t}\n\n\t\tfor iter := other.sparseList.Iter(); iter.HasNext(); {\n\t\t\ti, r := other.decodeHash(iter.Next())\n\t\t\tif h.denseList[i] < r {\n\t\t\t\th.denseList[i] = r\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfor i, v := range other.denseList {\n\t\t\tif v > h.denseList[i] {\n\t\t\t\th.denseList[i] = v\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n// MarshalBinary implements the encoding.BinaryMarshaler interface.\nfunc (h *Plus) MarshalBinary() (data []byte, err error) {\n\t// Marshal a version marker.\n\tdata = append(data, version)\n\n\t// Marshal precision.\n\tdata = append(data, byte(h.p))\n\n\tif h.sparse {\n\t\t// It's using the sparse representation.\n\t\tdata = append(data, byte(1))\n\n\t\t// Add the tmp_set\n\t\ttsdata, err := h.tmpSet.MarshalBinary()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdata = append(data, tsdata...)\n\n\t\t// Add the sparse representation\n\t\tsdata, err := h.sparseList.MarshalBinary()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn append(data, sdata...), nil\n\t}\n\n\t// It's using the dense representation.\n\tdata = append(data, byte(0))\n\n\t// Add the dense sketch representation.\n\tsz := len(h.denseList)\n\tdata = append(data, []byte{\n\t\tbyte(sz >> 24),\n\t\tbyte(sz >> 16),\n\t\tbyte(sz >> 8),\n\t\tbyte(sz),\n\t}...)\n\n\t// Marshal each element in the list.\n\tfor i := 0; i < len(h.denseList); i++ {\n\t\tdata = append(data, byte(h.denseList[i]))\n\t}\n\n\treturn data, nil\n}\n\n// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.\nfunc (h *Plus) UnmarshalBinary(data []byte) error {\n\t// Unmarshal version. We may need this in the future if we make\n\t// non-compatible changes.\n\t_ = data[0]\n\n\t// Unmarshal precision.\n\tp := uint8(data[1])\n\n\tnewh, err := NewPlus(p)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*h = *newh\n\n\t// h is now initialised with the correct precision. We just need to fill the\n\t// rest of the details out.\n\tif data[2] == byte(1) {\n\t\t// Using the sparse representation.\n\t\th.sparse = true\n\n\t\t// Unmarshal the tmp_set.\n\t\ttssz := binary.BigEndian.Uint32(data[3:7])\n\t\th.tmpSet = make(map[uint32]struct{}, tssz)\n\n\t\t// We need to unmarshal tssz values in total, and each value requires us\n\t\t// to read 4 bytes.\n\t\ttsLastByte := int((tssz * 4) + 7)\n\t\tfor i := 7; i < tsLastByte; i += 4 {\n\t\t\tk := binary.BigEndian.Uint32(data[i : i+4])\n\t\t\th.tmpSet[k] = struct{}{}\n\t\t}\n\n\t\t// Unmarshal the sparse representation.\n\t\treturn h.sparseList.UnmarshalBinary(data[tsLastByte:])\n\t}\n\n\t// Using the dense representation.\n\th.sparse = false\n\tdsz := int(binary.BigEndian.Uint32(data[3:7]))\n\th.denseList = make([]uint8, 0, dsz)\n\tfor i := 7; i < dsz+7; i++ {\n\t\th.denseList = append(h.denseList, uint8(data[i]))\n\t}\n\treturn nil\n}\n\nfunc (h *Plus) mergeSparse() {\n\tif len(h.tmpSet) == 0 {\n\t\treturn\n\t}\n\tkeys := make(uint64Slice, 0, len(h.tmpSet))\n\tfor k := range h.tmpSet {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Sort(keys)\n\n\tnewList := newCompressedList(int(h.m))\n\tfor iter, i := h.sparseList.Iter(), 0; iter.HasNext() || i < len(keys); {\n\t\tif !iter.HasNext() {\n\t\t\tnewList.Append(keys[i])\n\t\t\ti++\n\t\t\tcontinue\n\t\t}\n\n\t\tif i >= len(keys) {\n\t\t\tnewList.Append(iter.Next())\n\t\t\tcontinue\n\t\t}\n\n\t\tx1, x2 := iter.Peek(), keys[i]\n\t\tif x1 == x2 {\n\t\t\tnewList.Append(iter.Next())\n\t\t\ti++\n\t\t} else if x1 > x2 {\n\t\t\tnewList.Append(x2)\n\t\t\ti++\n\t\t} else {\n\t\t\tnewList.Append(iter.Next())\n\t\t}\n\t}\n\n\th.sparseList = newList\n\th.tmpSet = set{}\n}\n\n// Convert from sparse representation to dense representation.\nfunc (h *Plus) toNormal() {\n\tif len(h.tmpSet) > 0 {\n\t\th.mergeSparse()\n\t}\n\n\th.denseList = make([]uint8, h.m)\n\tfor iter := h.sparseList.Iter(); iter.HasNext(); {\n\t\ti, r := h.decodeHash(iter.Next())\n\t\tif h.denseList[i] < r {\n\t\t\th.denseList[i] = r\n\t\t}\n\t}\n\n\th.sparse = false\n\th.tmpSet = nil\n\th.sparseList = nil\n}\n\n// Encode a hash to be used in the sparse representation.\nfunc (h *Plus) encodeHash(x uint64) uint32 {\n\tidx := uint32(bextr(x, 64-h.pp, h.pp))\n\tif bextr(x, 64-h.pp, h.pp-h.p) == 0 {\n\t\tzeros := bits.Clz((bextr(x, 0, 64-h.pp)<<h.pp)|(1<<h.pp-1)) + 1\n\t\treturn idx<<7 | uint32(zeros<<1) | 1\n\t}\n\treturn idx << 1\n}\n\n// Decode a hash from the sparse representation.\nfunc (h *Plus) decodeHash(k uint32) (uint32, uint8) {\n\tvar r uint8\n\tif k&1 == 1 {\n\t\tr = uint8(bextr32(k, 1, 6)) + h.pp - h.p\n\t} else {\n\t\t// We can use the 64bit clz implementation and reduce the result\n\t\t// by 32 to get a clz for a 32bit word.\n\t\tr = uint8(bits.Clz(uint64(k<<(32-h.pp+h.p-1))) - 31) // -32 + 1\n\t}\n\treturn h.getIndex(k), r\n}\n\nfunc (h *Plus) getIndex(k uint32) uint32 {\n\tif k&1 == 1 {\n\t\treturn bextr32(k, 32-h.p, h.p)\n\t}\n\treturn bextr32(k, h.pp-h.p+1, h.p)\n}\n\nfunc (h *Plus) linearCount(m uint32, v uint32) float64 {\n\tfm := float64(m)\n\treturn fm * math.Log(fm/float64(v))\n}\n\n// E calculates the raw estimate. It also returns the number of zero registers\n// which is useful for later on in a cardinality estimate.\nfunc (h *Plus) e() (float64, uint32) {\n\tsum := 0.0\n\tvar count uint32\n\tfor _, val := range h.denseList {\n\t\tsum += 1.0 / float64(uint32(1)<<val)\n\t\tif val == 0 {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn h.alpha * float64(h.m) * float64(h.m) / sum, count\n}\n\n// Estimates the bias using empirically determined values.\nfunc (h *Plus) estimateBias(est float64) float64 {\n\testTable, biasTable := rawEstimateData[h.p-4], biasData[h.p-4]\n\n\tif estTable[0] > est {\n\t\treturn estTable[0] - biasTable[0]\n\t}\n\n\tlastEstimate := estTable[len(estTable)-1]\n\tif lastEstimate < est {\n\t\treturn lastEstimate - biasTable[len(biasTable)-1]\n\t}\n\n\tvar i int\n\tfor i = 0; i < len(estTable) && estTable[i] < est; i++ {\n\t}\n\n\te1, b1 := estTable[i-1], biasTable[i-1]\n\te2, b2 := estTable[i], biasTable[i]\n\n\tc := (est - e1) / (e2 - e1)\n\treturn b1*(1-c) + b2*c\n}\n\ntype uint64Slice []uint32\n\nfunc (p uint64Slice) Len() int           { return len(p) }\nfunc (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }\nfunc (p uint64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }\n\ntype set map[uint32]struct{}\n\nfunc (s set) Clone() set {\n\tif s == nil {\n\t\treturn nil\n\t}\n\n\tnewS := make(map[uint32]struct{}, len(s))\n\tfor k, v := range s {\n\t\tnewS[k] = v\n\t}\n\treturn newS\n}\n\nfunc (s set) MarshalBinary() (data []byte, err error) {\n\t// 4 bytes for the size of the set, and 4 bytes for each key.\n\t// list.\n\tdata = make([]byte, 0, 4+(4*len(s)))\n\n\t// Length of the set. We only need 32 bits because the size of the set\n\t// couldn't exceed that on 32 bit architectures.\n\tsl := len(s)\n\tdata = append(data, []byte{\n\t\tbyte(sl >> 24),\n\t\tbyte(sl >> 16),\n\t\tbyte(sl >> 8),\n\t\tbyte(sl),\n\t}...)\n\n\t// Marshal each element in the set.\n\tfor k := range s {\n\t\tdata = append(data, []byte{\n\t\t\tbyte(k >> 24),\n\t\t\tbyte(k >> 16),\n\t\t\tbyte(k >> 8),\n\t\t\tbyte(k),\n\t\t}...)\n\t}\n\n\treturn data, nil\n}\n\nfunc (s set) add(v uint32)      { s[v] = struct{}{} }\nfunc (s set) has(v uint32) bool { _, ok := s[v]; return ok }\n\n// bextr performs a bitfield extract on v. start should be the LSB of the field\n// you wish to extract, and length the number of bits to extract.\n//\n// For example: start=0 and length=4 for the following 64-bit word would result\n// in 1111 being returned.\n//\n// <snip 56 bits>00011110\n// returns 1110\nfunc bextr(v uint64, start, length uint8) uint64 {\n\treturn (v >> start) & ((1 << length) - 1)\n}\n\nfunc bextr32(v uint32, start, length uint8) uint32 {\n\treturn (v >> start) & ((1 << length) - 1)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/estimator/hll/hll_test.go",
    "content": "package hll\n\nimport (\n\tcrand \"crypto/rand\"\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"math\"\n\t\"math/rand\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n)\n\nfunc nopHash(buf []byte) uint64 {\n\tif len(buf) != 8 {\n\t\tpanic(fmt.Sprintf(\"unexpected size buffer: %d\", len(buf)))\n\t}\n\treturn binary.BigEndian.Uint64(buf)\n}\n\nfunc toByte(v uint64) []byte {\n\tvar buf [8]byte\n\tbinary.BigEndian.PutUint64(buf[:], v)\n\treturn buf[:]\n}\n\nfunc TestHLLPP_Add_NoSparse(t *testing.T) {\n\th := NewTestPlus(16)\n\th.toNormal()\n\n\th.Add(toByte(0x00010fffffffffff))\n\tn := h.denseList[1]\n\tif n != 5 {\n\t\tt.Error(n)\n\t}\n\n\th.Add(toByte(0x0002ffffffffffff))\n\tn = h.denseList[2]\n\tif n != 1 {\n\t\tt.Error(n)\n\t}\n\n\th.Add(toByte(0x0003000000000000))\n\tn = h.denseList[3]\n\tif n != 49 {\n\t\tt.Error(n)\n\t}\n\n\th.Add(toByte(0x0003000000000001))\n\tn = h.denseList[3]\n\tif n != 49 {\n\t\tt.Error(n)\n\t}\n\n\th.Add(toByte(0xff03700000000000))\n\tn = h.denseList[0xff03]\n\tif n != 2 {\n\t\tt.Error(n)\n\t}\n\n\th.Add(toByte(0xff03080000000000))\n\tn = h.denseList[0xff03]\n\tif n != 5 {\n\t\tt.Error(n)\n\t}\n}\n\nfunc TestHLLPPPrecision_NoSparse(t *testing.T) {\n\th := NewTestPlus(4)\n\th.toNormal()\n\n\th.Add(toByte(0x1fffffffffffffff))\n\tn := h.denseList[1]\n\tif n != 1 {\n\t\tt.Error(n)\n\t}\n\n\th.Add(toByte(0xffffffffffffffff))\n\tn = h.denseList[0xf]\n\tif n != 1 {\n\t\tt.Error(n)\n\t}\n\n\th.Add(toByte(0x00ffffffffffffff))\n\tn = h.denseList[0]\n\tif n != 5 {\n\t\tt.Error(n)\n\t}\n}\n\nfunc TestHLLPP_toNormal(t *testing.T) {\n\th := NewTestPlus(16)\n\th.Add(toByte(0x00010fffffffffff))\n\th.toNormal()\n\tc := h.Count()\n\tif c != 1 {\n\t\tt.Error(c)\n\t}\n\n\tif h.sparse {\n\t\tt.Error(\"toNormal should convert to normal\")\n\t}\n\n\th = NewTestPlus(16)\n\th.hash = nopHash\n\th.Add(toByte(0x00010fffffffffff))\n\th.Add(toByte(0x0002ffffffffffff))\n\th.Add(toByte(0x0003000000000000))\n\th.Add(toByte(0x0003000000000001))\n\th.Add(toByte(0xff03700000000000))\n\th.Add(toByte(0xff03080000000000))\n\th.mergeSparse()\n\th.toNormal()\n\n\tn := h.denseList[1]\n\tif n != 5 {\n\t\tt.Error(n)\n\t}\n\tn = h.denseList[2]\n\tif n != 1 {\n\t\tt.Error(n)\n\t}\n\tn = h.denseList[3]\n\tif n != 49 {\n\t\tt.Error(n)\n\t}\n\tn = h.denseList[0xff03]\n\tif n != 5 {\n\t\tt.Error(n)\n\t}\n}\n\nfunc TestHLLPP_estimateBias(t *testing.T) {\n\th := NewTestPlus(4)\n\tb := h.estimateBias(14.0988)\n\tif math.Abs(b-7.5988) > 0.00001 {\n\t\tt.Error(b)\n\t}\n\n\th = NewTestPlus(16)\n\tb = h.estimateBias(55391.4373)\n\tif math.Abs(b-39416.9373) > 0.00001 {\n\t\tt.Error(b)\n\t}\n}\n\nfunc TestHLLPPCount(t *testing.T) {\n\th := NewTestPlus(16)\n\n\tn := h.Count()\n\tif n != 0 {\n\t\tt.Error(n)\n\t}\n\n\th.Add(toByte(0x00010fffffffffff))\n\th.Add(toByte(0x00020fffffffffff))\n\th.Add(toByte(0x00030fffffffffff))\n\th.Add(toByte(0x00040fffffffffff))\n\th.Add(toByte(0x00050fffffffffff))\n\th.Add(toByte(0x00050fffffffffff))\n\n\tn = h.Count()\n\tif n != 5 {\n\t\tt.Error(n)\n\t}\n\n\t// not mutated, still returns correct count\n\tn = h.Count()\n\tif n != 5 {\n\t\tt.Error(n)\n\t}\n\n\th.Add(toByte(0x00060fffffffffff))\n\n\t// mutated\n\tn = h.Count()\n\tif n != 6 {\n\t\tt.Error(n)\n\t}\n}\n\nfunc TestHLLPP_Merge_Error(t *testing.T) {\n\th := NewTestPlus(16)\n\th2 := NewTestPlus(10)\n\n\terr := h.Merge(h2)\n\tif err == nil {\n\t\tt.Error(\"different precision should return error\")\n\t}\n}\n\nfunc TestHLL_Merge_Sparse(t *testing.T) {\n\th := NewTestPlus(16)\n\th.Add(toByte(0x00010fffffffffff))\n\th.Add(toByte(0x00020fffffffffff))\n\th.Add(toByte(0x00030fffffffffff))\n\th.Add(toByte(0x00040fffffffffff))\n\th.Add(toByte(0x00050fffffffffff))\n\th.Add(toByte(0x00050fffffffffff))\n\n\th2 := NewTestPlus(16)\n\th2.Merge(h)\n\tn := h2.Count()\n\tif n != 5 {\n\t\tt.Error(n)\n\t}\n\n\tif h2.sparse {\n\t\tt.Error(\"Merge should convert to normal\")\n\t}\n\n\tif !h.sparse {\n\t\tt.Error(\"Merge should not modify argument\")\n\t}\n\n\th2.Merge(h)\n\tn = h2.Count()\n\tif n != 5 {\n\t\tt.Error(n)\n\t}\n\n\th.Add(toByte(0x00060fffffffffff))\n\th.Add(toByte(0x00070fffffffffff))\n\th.Add(toByte(0x00080fffffffffff))\n\th.Add(toByte(0x00090fffffffffff))\n\th.Add(toByte(0x000a0fffffffffff))\n\th.Add(toByte(0x000a0fffffffffff))\n\tn = h.Count()\n\tif n != 10 {\n\t\tt.Error(n)\n\t}\n\n\th2.Merge(h)\n\tn = h2.Count()\n\tif n != 10 {\n\t\tt.Error(n)\n\t}\n}\n\nfunc TestHLL_Merge_Normal(t *testing.T) {\n\th := NewTestPlus(16)\n\th.toNormal()\n\th.Add(toByte(0x00010fffffffffff))\n\th.Add(toByte(0x00020fffffffffff))\n\th.Add(toByte(0x00030fffffffffff))\n\th.Add(toByte(0x00040fffffffffff))\n\th.Add(toByte(0x00050fffffffffff))\n\th.Add(toByte(0x00050fffffffffff))\n\n\th2 := NewTestPlus(16)\n\th2.toNormal()\n\th2.Merge(h)\n\tn := h2.Count()\n\tif n != 5 {\n\t\tt.Error(n)\n\t}\n\n\th2.Merge(h)\n\tn = h2.Count()\n\tif n != 5 {\n\t\tt.Error(n)\n\t}\n\n\th.Add(toByte(0x00060fffffffffff))\n\th.Add(toByte(0x00070fffffffffff))\n\th.Add(toByte(0x00080fffffffffff))\n\th.Add(toByte(0x00090fffffffffff))\n\th.Add(toByte(0x000a0fffffffffff))\n\th.Add(toByte(0x000a0fffffffffff))\n\tn = h.Count()\n\tif n != 10 {\n\t\tt.Error(n)\n\t}\n\n\th2.Merge(h)\n\tn = h2.Count()\n\tif n != 10 {\n\t\tt.Error(n)\n\t}\n}\n\nfunc TestHLLPP_Merge(t *testing.T) {\n\th := NewTestPlus(16)\n\n\tk1 := uint64(0xf000017000000000)\n\th.Add(toByte(k1))\n\tif !h.tmpSet.has(h.encodeHash(k1)) {\n\t\tt.Error(\"key not in hash\")\n\t}\n\n\tk2 := uint64(0x000fff8f00000000)\n\th.Add(toByte(k2))\n\tif !h.tmpSet.has(h.encodeHash(k2)) {\n\t\tt.Error(\"key not in hash\")\n\t}\n\n\tif len(h.tmpSet) != 2 {\n\t\tt.Error(h.tmpSet)\n\t}\n\n\th.mergeSparse()\n\tif len(h.tmpSet) != 0 {\n\t\tt.Error(h.tmpSet)\n\t}\n\tif h.sparseList.count != 2 {\n\t\tt.Error(h.sparseList)\n\t}\n\n\titer := h.sparseList.Iter()\n\tn := iter.Next()\n\tif n != h.encodeHash(k2) {\n\t\tt.Error(n)\n\t}\n\tn = iter.Next()\n\tif n != h.encodeHash(k1) {\n\t\tt.Error(n)\n\t}\n\n\tk3 := uint64(0x0f00017000000000)\n\th.Add(toByte(k3))\n\tif !h.tmpSet.has(h.encodeHash(k3)) {\n\t\tt.Error(\"key not in hash\")\n\t}\n\n\th.mergeSparse()\n\tif len(h.tmpSet) != 0 {\n\t\tt.Error(h.tmpSet)\n\t}\n\tif h.sparseList.count != 3 {\n\t\tt.Error(h.sparseList)\n\t}\n\n\titer = h.sparseList.Iter()\n\tn = iter.Next()\n\tif n != h.encodeHash(k2) {\n\t\tt.Error(n)\n\t}\n\tn = iter.Next()\n\tif n != h.encodeHash(k3) {\n\t\tt.Error(n)\n\t}\n\tn = iter.Next()\n\tif n != h.encodeHash(k1) {\n\t\tt.Error(n)\n\t}\n\n\th.Add(toByte(k1))\n\tif !h.tmpSet.has(h.encodeHash(k1)) {\n\t\tt.Error(\"key not in hash\")\n\t}\n\n\th.mergeSparse()\n\tif len(h.tmpSet) != 0 {\n\t\tt.Error(h.tmpSet)\n\t}\n\tif h.sparseList.count != 3 {\n\t\tt.Error(h.sparseList)\n\t}\n\n\titer = h.sparseList.Iter()\n\tn = iter.Next()\n\tif n != h.encodeHash(k2) {\n\t\tt.Error(n)\n\t}\n\tn = iter.Next()\n\tif n != h.encodeHash(k3) {\n\t\tt.Error(n)\n\t}\n\tn = iter.Next()\n\tif n != h.encodeHash(k1) {\n\t\tt.Error(n)\n\t}\n}\n\nfunc TestHLLPP_EncodeDecode(t *testing.T) {\n\th := NewTestPlus(8)\n\ti, r := h.decodeHash(h.encodeHash(0xffffff8000000000))\n\tif i != 0xff {\n\t\tt.Error(i)\n\t}\n\tif r != 1 {\n\t\tt.Error(r)\n\t}\n\n\ti, r = h.decodeHash(h.encodeHash(0xff00000000000000))\n\tif i != 0xff {\n\t\tt.Error(i)\n\t}\n\tif r != 57 {\n\t\tt.Error(r)\n\t}\n\n\ti, r = h.decodeHash(h.encodeHash(0xff30000000000000))\n\tif i != 0xff {\n\t\tt.Error(i)\n\t}\n\tif r != 3 {\n\t\tt.Error(r)\n\t}\n\n\ti, r = h.decodeHash(h.encodeHash(0xaa10000000000000))\n\tif i != 0xaa {\n\t\tt.Error(i)\n\t}\n\tif r != 4 {\n\t\tt.Error(r)\n\t}\n\n\ti, r = h.decodeHash(h.encodeHash(0xaa0f000000000000))\n\tif i != 0xaa {\n\t\tt.Error(i)\n\t}\n\tif r != 5 {\n\t\tt.Error(r)\n\t}\n}\n\nfunc TestHLLPP_Error(t *testing.T) {\n\t_, err := NewPlus(3)\n\tif err == nil {\n\t\tt.Error(\"precision 3 should return error\")\n\t}\n\n\t_, err = NewPlus(18)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\t_, err = NewPlus(19)\n\tif err == nil {\n\t\tt.Error(\"precision 17 should return error\")\n\t}\n}\n\nfunc TestHLLPP_Marshal_Unmarshal_Sparse(t *testing.T) {\n\th, _ := NewPlus(4)\n\th.sparse = true\n\th.tmpSet = map[uint32]struct{}{26: struct{}{}, 40: struct{}{}}\n\n\t// Add a bunch of values to the sparse representation.\n\tfor i := 0; i < 10; i++ {\n\t\th.sparseList.Append(uint32(rand.Int()))\n\t}\n\n\tdata, err := h.MarshalBinary()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Peeking at the first byte should reveal the version.\n\tif got, exp := data[0], byte(1); got != exp {\n\t\tt.Fatalf(\"got byte %v, expected %v\", got, exp)\n\t}\n\n\tvar res Plus\n\tif err := res.UnmarshalBinary(data); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// reflect.DeepEqual will always return false when comparing non-nil\n\t// functions, so we'll set them to nil.\n\th.hash, res.hash = nil, nil\n\tif got, exp := &res, h; !reflect.DeepEqual(got, exp) {\n\t\tt.Fatalf(\"got %v, wanted %v\", spew.Sdump(got), spew.Sdump(exp))\n\t}\n}\n\nfunc TestHLLPP_Marshal_Unmarshal_Dense(t *testing.T) {\n\th, _ := NewPlus(4)\n\th.sparse = false\n\n\t// Add a bunch of values to the dense representation.\n\tfor i := 0; i < 10; i++ {\n\t\th.denseList = append(h.denseList, uint8(rand.Int()))\n\t}\n\n\tdata, err := h.MarshalBinary()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Peeking at the first byte should reveal the version.\n\tif got, exp := data[0], byte(1); got != exp {\n\t\tt.Fatalf(\"got byte %v, expected %v\", got, exp)\n\t}\n\n\tvar res Plus\n\tif err := res.UnmarshalBinary(data); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// reflect.DeepEqual will always return false when comparing non-nil\n\t// functions, so we'll set them to nil.\n\th.hash, res.hash = nil, nil\n\tif got, exp := &res, h; !reflect.DeepEqual(got, exp) {\n\t\tt.Fatalf(\"got %v, wanted %v\", spew.Sdump(got), spew.Sdump(exp))\n\t}\n}\n\n// Tests that a sketch can be serialised / unserialised and keep an accurate\n// cardinality estimate.\nfunc TestHLLPP_Marshal_Unmarshal_Count(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping test in short mode\")\n\t}\n\n\tcount := make(map[string]struct{}, 1000000)\n\th, _ := NewPlus(16)\n\n\tbuf := make([]byte, 8)\n\tfor i := 0; i < 1000000; i++ {\n\t\tif _, err := crand.Read(buf); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tcount[string(buf)] = struct{}{}\n\n\t\t// Add to the sketch.\n\t\th.Add(buf)\n\t}\n\n\tgotC := h.Count()\n\tepsilon := 15000 // 1.5%\n\tif got, exp := math.Abs(float64(int(gotC)-len(count))), epsilon; int(got) > exp {\n\t\tt.Fatalf(\"error was %v for estimation %d and true cardinality %d\", got, gotC, len(count))\n\t}\n\n\t// Serialise the sketch.\n\tsketch, err := h.MarshalBinary()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Deserialise.\n\th = &Plus{}\n\tif err := h.UnmarshalBinary(sketch); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// The count should be the same\n\toldC := gotC\n\tif got, exp := h.Count(), oldC; got != exp {\n\t\tt.Fatalf(\"got %d, expected %d\", got, exp)\n\t}\n\n\t// Add some more values.\n\tfor i := 0; i < 1000000; i++ {\n\t\tif _, err := crand.Read(buf); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tcount[string(buf)] = struct{}{}\n\n\t\t// Add to the sketch.\n\t\th.Add(buf)\n\t}\n\n\t// The sketch should still be working correctly.\n\tgotC = h.Count()\n\tepsilon = 30000 // 1.5%\n\tif got, exp := math.Abs(float64(int(gotC)-len(count))), epsilon; int(got) > exp {\n\t\tt.Fatalf(\"error was %v for estimation %d and true cardinality %d\", got, gotC, len(count))\n\t}\n}\n\nfunc NewTestPlus(p uint8) *Plus {\n\th, _ := NewPlus(p)\n\th.hash = nopHash\n\treturn h\n}\n\n// Generate random data to add to the sketch.\nfunc genData(n int) [][]byte {\n\tout := make([][]byte, 0, n)\n\tbuf := make([]byte, 8)\n\n\tfor i := 0; i < n; i++ {\n\t\t// generate 8 random bytes\n\t\tn, err := rand.Read(buf)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t} else if n != 8 {\n\t\t\tpanic(fmt.Errorf(\"only %d bytes generated\", n))\n\t\t}\n\n\t\tout = append(out, buf)\n\t}\n\tif len(out) != n {\n\t\tpanic(fmt.Sprintf(\"wrong size slice: %d\", n))\n\t}\n\treturn out\n}\n\n// Memoises values to be added to a sketch during a benchmark.\nvar benchdata = map[int][][]byte{}\n\nfunc benchmarkPlusAdd(b *testing.B, h *Plus, n int) {\n\tblobs, ok := benchdata[n]\n\tif !ok {\n\t\t// Generate it.\n\t\tbenchdata[n] = genData(n)\n\t\tblobs = benchdata[n]\n\t}\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tfor j := 0; j < len(blobs); j++ {\n\t\t\th.Add(blobs[j])\n\t\t}\n\t}\n\tb.StopTimer()\n}\n\nfunc BenchmarkPlus_Add_100(b *testing.B) {\n\th, _ := NewPlus(16)\n\tbenchmarkPlusAdd(b, h, 100)\n}\n\nfunc BenchmarkPlus_Add_1000(b *testing.B) {\n\th, _ := NewPlus(16)\n\tbenchmarkPlusAdd(b, h, 1000)\n}\n\nfunc BenchmarkPlus_Add_10000(b *testing.B) {\n\th, _ := NewPlus(16)\n\tbenchmarkPlusAdd(b, h, 10000)\n}\n\nfunc BenchmarkPlus_Add_100000(b *testing.B) {\n\th, _ := NewPlus(16)\n\tbenchmarkPlusAdd(b, h, 100000)\n}\n\nfunc BenchmarkPlus_Add_1000000(b *testing.B) {\n\th, _ := NewPlus(16)\n\tbenchmarkPlusAdd(b, h, 1000000)\n}\n\nfunc BenchmarkPlus_Add_10000000(b *testing.B) {\n\th, _ := NewPlus(16)\n\tbenchmarkPlusAdd(b, h, 10000000)\n}\n\nfunc BenchmarkPlus_Add_100000000(b *testing.B) {\n\th, _ := NewPlus(16)\n\tbenchmarkPlusAdd(b, h, 100000000)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/estimator/sketch.go",
    "content": "package estimator\n\nimport \"encoding\"\n\n// Sketch is the interface representing a sketch for estimating cardinality.\ntype Sketch interface {\n\t// Add adds a single value to the sketch.\n\tAdd(v []byte)\n\n\t// Count returns a cardinality estimate for the sketch.\n\tCount() uint64\n\n\t// Merge merges another sketch into this one.\n\tMerge(s Sketch) error\n\n\tencoding.BinaryMarshaler\n\tencoding.BinaryUnmarshaler\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/limiter/fixed.go",
    "content": "// Package limiter provides concurrency limiters.\npackage limiter\n\n// Fixed is a simple channel-based concurrency limiter.  It uses a fixed\n// size channel to limit callers from proceeding until there is a value available\n// in the channel.  If all are in-use, the caller blocks until one is freed.\ntype Fixed chan struct{}\n\nfunc NewFixed(limit int) Fixed {\n\treturn make(Fixed, limit)\n}\n\nfunc (t Fixed) Take() {\n\tt <- struct{}{}\n}\n\nfunc (t Fixed) Release() {\n\t<-t\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_test.go",
    "content": "package mmap_test\n\nimport (\n\t\"bytes\"\n\t\"io/ioutil\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/pkg/mmap\"\n)\n\nfunc TestMap(t *testing.T) {\n\tdata, err := mmap.Map(\"mmap_test.go\")\n\tif err != nil {\n\t\tt.Fatalf(\"Open: %v\", err)\n\t}\n\n\tif exp, err := ioutil.ReadFile(\"mmap_test.go\"); err != nil {\n\t\tt.Fatalf(\"ioutil.ReadFile: %v\", err)\n\t} else if !bytes.Equal(data, exp) {\n\t\tt.Fatalf(\"got %q\\nwant %q\", string(data), string(exp))\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_unix.go",
    "content": "// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris\n\n// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package mmap provides a way to memory-map a file.\npackage mmap\n\nimport (\n\t\"os\"\n\t\"syscall\"\n)\n\n// Map memory-maps a file.\nfunc Map(path string) ([]byte, error) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if fi.Size() == 0 {\n\t\treturn nil, nil\n\t}\n\n\tdata, err := syscall.Mmap(int(f.Fd()), 0, int(fi.Size()), syscall.PROT_READ, syscall.MAP_SHARED)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn data, nil\n}\n\n// Unmap closes the memory-map.\nfunc Unmap(data []byte) error {\n\tif data == nil {\n\t\treturn nil\n\t}\n\treturn syscall.Munmap(data)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_windows.go",
    "content": "package mmap\n\nimport (\n\t\"os\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\n// Map memory-maps a file.\nfunc Map(path string) ([]byte, error) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t} else if fi.Size() == 0 {\n\t\treturn nil, nil\n\t}\n\n\tlo, hi := uint32(fi.Size()), uint32(fi.Size()>>32)\n\tfmap, err := syscall.CreateFileMapping(syscall.Handle(f.Fd()), nil, syscall.PAGE_READONLY, hi, lo, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer syscall.CloseHandle(fmap)\n\n\tptr, err := syscall.MapViewOfFile(fmap, syscall.FILE_MAP_READ, 0, 0, uintptr(fi.Size()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdata := (*[1 << 30]byte)(unsafe.Pointer(ptr))[:fi.Size()]\n\n\treturn data, nil\n}\n\n// Unmap closes the memory-map.\nfunc Unmap(data []byte) error {\n\tif data == nil {\n\t\treturn nil\n\t}\n\treturn syscall.UnmapViewOfFile(uintptr(unsafe.Pointer(&data[0])))\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/pool/bytes.go",
    "content": "// Package pool provides pool structures to help reduce garbage collector pressure.\npackage pool\n\n// Bytes is a pool of byte slices that can be re-used.  Slices in\n// this pool will not be garbage collected when not in use.\ntype Bytes struct {\n\tpool chan []byte\n}\n\n// NewBytes returns a Bytes pool with capacity for max byte slices\n// to be pool.\nfunc NewBytes(max int) *Bytes {\n\treturn &Bytes{\n\t\tpool: make(chan []byte, max),\n\t}\n}\n\n// Get returns a byte slice size with at least sz capacity. Items\n// returned may not be in the zero state and should be reset by the\n// caller.\nfunc (p *Bytes) Get(sz int) []byte {\n\tvar c []byte\n\tselect {\n\tcase c = <-p.pool:\n\tdefault:\n\t\treturn make([]byte, sz)\n\t}\n\n\tif cap(c) < sz {\n\t\treturn make([]byte, sz)\n\t}\n\n\treturn c[:sz]\n}\n\n// Put returns a slice back to the pool.  If the pool is full, the byte\n// slice is discarded.\nfunc (p *Bytes) Put(c []byte) {\n\tselect {\n\tcase p.pool <- c:\n\tdefault:\n\t}\n}\n\n// LimitedBytes is a pool of byte slices that can be re-used.  Slices in\n// this pool will not be garbage collected when not in use.  The pool will\n// hold onto a fixed number of byte slices of a maximum size.  If the pool\n// is empty and max pool size has not been allocated yet, it will return a\n// new byte slice.  Byte slices added to the pool that are over the max size\n// are dropped.\ntype LimitedBytes struct {\n\tallocated int64\n\tmaxSize   int\n\tpool      chan []byte\n}\n\n// NewBytes returns a Bytes pool with capacity for max byte slices\n// to be pool.\nfunc NewLimitedBytes(capacity int, maxSize int) *LimitedBytes {\n\treturn &LimitedBytes{\n\t\tpool:    make(chan []byte, capacity),\n\t\tmaxSize: maxSize,\n\t}\n}\n\n// Get returns a byte slice size with at least sz capacity. Items\n// returned may not be in the zero state and should be reset by the\n// caller.\nfunc (p *LimitedBytes) Get(sz int) []byte {\n\tvar c []byte\n\n\t// If we have not allocated our capacity, return a new allocation,\n\t// otherwise block until one frees up.\n\tselect {\n\tcase c = <-p.pool:\n\tdefault:\n\t\treturn make([]byte, sz)\n\t}\n\n\tif cap(c) < sz {\n\t\treturn make([]byte, sz)\n\t}\n\n\treturn c[:sz]\n}\n\n// Put returns a slice back to the pool.  If the pool is full, the byte\n// slice is discarded.  If the byte slice is over the configured max size\n// of any byte slice in the pool, it is discared.\nfunc (p *LimitedBytes) Put(c []byte) {\n\t// Drop buffers that are larger than the max size\n\tif cap(c) >= p.maxSize {\n\t\treturn\n\t}\n\n\tselect {\n\tcase p.pool <- c:\n\tdefault:\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/pool/bytes_test.go",
    "content": "package pool_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/pkg/pool\"\n)\n\nfunc TestLimitedBytePool_Put_MaxSize(t *testing.T) {\n\tbp := pool.NewLimitedBytes(1, 10)\n\tbp.Put(make([]byte, 1024)) // should be dropped\n\n\tif got, exp := cap(bp.Get(10)), 10; got != exp {\n\t\tt.Fatalf(\"max cap size exceeded: got %v, exp %v\", got, exp)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/pool/generic.go",
    "content": "package pool\n\n// Generic is a pool of types that can be re-used.  Items in\n// this pool will not be garbage collected when not in use.\ntype Generic struct {\n\tpool chan interface{}\n\tfn   func(sz int) interface{}\n}\n\n// NewGeneric returns a Generic pool with capacity for max items\n// to be pool.\nfunc NewGeneric(max int, fn func(sz int) interface{}) *Generic {\n\treturn &Generic{\n\t\tpool: make(chan interface{}, max),\n\t\tfn:   fn,\n\t}\n}\n\n// Get returns a item from the pool or a new instance if the pool\n// is empty.  Items returned may not be in the zero state and should\n// be reset by the caller.\nfunc (p *Generic) Get(sz int) interface{} {\n\tvar c interface{}\n\tselect {\n\tcase c = <-p.pool:\n\tdefault:\n\t\tc = p.fn(sz)\n\t}\n\n\treturn c\n}\n\n// Put returns an item back to the pool.  If the pool is full, the item\n// is discarded.\nfunc (p *Generic) Put(c interface{}) {\n\tselect {\n\tcase p.pool <- c:\n\tdefault:\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/rhh/rhh.go",
    "content": "package rhh\n\nimport (\n\t\"bytes\"\n\t\"sort\"\n\n\t\"github.com/cespare/xxhash\"\n)\n\n// HashMap represents a hash map that implements Robin Hood Hashing.\n// https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf\ntype HashMap struct {\n\thashes []int64\n\telems  []hashElem\n\n\tn          int64\n\tcapacity   int64\n\tthreshold  int64\n\tmask       int64\n\tloadFactor int\n}\n\nfunc NewHashMap(opt Options) *HashMap {\n\tm := &HashMap{\n\t\tcapacity:   pow2(opt.Capacity), // Limited to 2^64.\n\t\tloadFactor: opt.LoadFactor,\n\t}\n\tm.alloc()\n\treturn m\n}\n\n// Reset clears the values in the map without deallocating the space.\nfunc (m *HashMap) Reset() {\n\tfor i := int64(0); i < m.capacity; i++ {\n\t\tm.hashes[i] = 0\n\t\tm.elems[i].reset()\n\t}\n\tm.n = 0\n}\n\nfunc (m *HashMap) Get(key []byte) interface{} {\n\ti := m.index(key)\n\tif i == -1 {\n\t\treturn nil\n\t}\n\treturn m.elems[i].value\n}\n\nfunc (m *HashMap) Put(key []byte, val interface{}) {\n\t// Grow the map if we've run out of slots.\n\tm.n++\n\tif m.n > m.threshold {\n\t\tm.grow()\n\t}\n\n\t// If the key was overwritten then decrement the size.\n\toverwritten := m.insert(HashKey(key), key, val)\n\tif overwritten {\n\t\tm.n--\n\t}\n}\n\nfunc (m *HashMap) insert(hash int64, key []byte, val interface{}) (overwritten bool) {\n\tpos := hash & m.mask\n\tvar dist int64\n\n\t// Continue searching until we find an empty slot or lower probe distance.\n\tfor {\n\t\te := &m.elems[pos]\n\n\t\t// Empty slot found or matching key, insert and exit.\n\t\tmatch := bytes.Equal(m.elems[pos].key, key)\n\t\tif m.hashes[pos] == 0 || match {\n\t\t\tm.hashes[pos] = hash\n\t\t\te.hash, e.value = hash, val\n\t\t\te.setKey(key)\n\t\t\treturn match\n\t\t}\n\n\t\t// If the existing elem has probed less than us, then swap places with\n\t\t// existing elem, and keep going to find another slot for that elem.\n\t\telemDist := Dist(m.hashes[pos], pos, m.capacity)\n\t\tif elemDist < dist {\n\t\t\t// Swap with current position.\n\t\t\thash, m.hashes[pos] = m.hashes[pos], hash\n\t\t\tval, e.value = e.value, val\n\n\t\t\ttmp := make([]byte, len(e.key))\n\t\t\tcopy(tmp, e.key)\n\n\t\t\te.setKey(key)\n\t\t\tkey = tmp\n\n\t\t\t// Update current distance.\n\t\t\tdist = elemDist\n\t\t}\n\n\t\t// Increment position, wrap around on overflow.\n\t\tpos = (pos + 1) & m.mask\n\t\tdist++\n\t}\n}\n\n// alloc elems according to currently set capacity.\nfunc (m *HashMap) alloc() {\n\tm.elems = make([]hashElem, m.capacity)\n\tm.hashes = make([]int64, m.capacity)\n\tm.threshold = (m.capacity * int64(m.loadFactor)) / 100\n\tm.mask = int64(m.capacity - 1)\n}\n\n// grow doubles the capacity and reinserts all existing hashes & elements.\nfunc (m *HashMap) grow() {\n\t// Copy old elements and hashes.\n\telems, hashes := m.elems, m.hashes\n\tcapacity := m.capacity\n\n\t// Double capacity & reallocate.\n\tm.capacity *= 2\n\tm.alloc()\n\n\t// Copy old elements to new hash/elem list.\n\tfor i := int64(0); i < capacity; i++ {\n\t\telem, hash := &elems[i], hashes[i]\n\t\tif hash == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tm.insert(hash, elem.key, elem.value)\n\t}\n}\n\n// index returns the position of key in the hash map.\nfunc (m *HashMap) index(key []byte) int64 {\n\thash := HashKey(key)\n\tpos := hash & m.mask\n\n\tvar dist int64\n\tfor {\n\t\tif m.hashes[pos] == 0 {\n\t\t\treturn -1\n\t\t} else if dist > Dist(m.hashes[pos], pos, m.capacity) {\n\t\t\treturn -1\n\t\t} else if m.hashes[pos] == hash && bytes.Equal(m.elems[pos].key, key) {\n\t\t\treturn pos\n\t\t}\n\n\t\tpos = (pos + 1) & m.mask\n\t\tdist++\n\t}\n}\n\n// Elem returns the i-th key/value pair of the hash map.\nfunc (m *HashMap) Elem(i int64) (key []byte, value interface{}) {\n\tif i >= int64(len(m.elems)) {\n\t\treturn nil, nil\n\t}\n\n\te := &m.elems[i]\n\treturn e.key, e.value\n}\n\n// Len returns the number of key/values set in map.\nfunc (m *HashMap) Len() int64 { return m.n }\n\n// Cap returns the number of key/values set in map.\nfunc (m *HashMap) Cap() int64 { return m.capacity }\n\n// AverageProbeCount returns the average number of probes for each element.\nfunc (m *HashMap) AverageProbeCount() float64 {\n\tvar sum float64\n\tfor i := int64(0); i < m.capacity; i++ {\n\t\thash := m.hashes[i]\n\t\tif hash == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tsum += float64(Dist(hash, i, m.capacity))\n\t}\n\treturn sum/float64(m.n) + 1.0\n}\n\n// Keys returns a list of sorted keys.\nfunc (m *HashMap) Keys() [][]byte {\n\ta := make([][]byte, 0, m.Len())\n\tfor i := int64(0); i < m.Cap(); i++ {\n\t\tk, v := m.Elem(i)\n\t\tif v == nil {\n\t\t\tcontinue\n\t\t}\n\t\ta = append(a, k)\n\t}\n\tsort.Sort(byteSlices(a))\n\treturn a\n}\n\ntype hashElem struct {\n\tkey   []byte\n\tvalue interface{}\n\thash  int64\n}\n\n// reset clears the values in the element.\nfunc (e *hashElem) reset() {\n\te.key = e.key[:0]\n\te.value = nil\n\te.hash = 0\n}\n\n// setKey copies v to a key on e.\nfunc (e *hashElem) setKey(v []byte) {\n\t// Shrink or grow key to fit value.\n\tif len(e.key) > len(v) {\n\t\te.key = e.key[:len(v)]\n\t} else if len(e.key) < len(v) {\n\t\te.key = append(e.key, make([]byte, len(v)-len(e.key))...)\n\t}\n\n\t// Copy value to key.\n\tcopy(e.key, v)\n}\n\n// Options represents initialization options that are passed to NewHashMap().\ntype Options struct {\n\tCapacity   int64\n\tLoadFactor int\n}\n\n// DefaultOptions represents a default set of options to pass to NewHashMap().\nvar DefaultOptions = Options{\n\tCapacity:   256,\n\tLoadFactor: 90,\n}\n\n// HashKey computes a hash of key. Hash is always non-zero.\nfunc HashKey(key []byte) int64 {\n\th := int64(xxhash.Sum64(key))\n\tif h == 0 {\n\t\th = 1\n\t} else if h < 0 {\n\t\th = 0 - h\n\t}\n\treturn h\n}\n\n// Dist returns the probe distance for a hash in a slot index.\n// NOTE: Capacity must be a power of 2.\nfunc Dist(hash, i, capacity int64) int64 {\n\tmask := capacity - 1\n\tdist := (i + capacity - (hash & mask)) & mask\n\treturn dist\n}\n\n// pow2 returns the number that is the next highest power of 2.\n// Returns v if it is a power of 2.\nfunc pow2(v int64) int64 {\n\tfor i := int64(2); i < 1<<62; i *= 2 {\n\t\tif i >= v {\n\t\t\treturn i\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}\n\ntype byteSlices [][]byte\n\nfunc (a byteSlices) Len() int           { return len(a) }\nfunc (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 }\nfunc (a byteSlices) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/rhh/rhh_test.go",
    "content": "package rhh_test\n\nimport (\n\t\"bytes\"\n\t\"math/rand\"\n\t\"reflect\"\n\t\"testing\"\n\t\"testing/quick\"\n\n\t\"github.com/influxdata/influxdb/pkg/rhh\"\n)\n\n// Ensure hash map can perform basic get/put operations.\nfunc TestHashMap(t *testing.T) {\n\tm := rhh.NewHashMap(rhh.DefaultOptions)\n\tm.Put([]byte(\"foo\"), []byte(\"bar\"))\n\tm.Put([]byte(\"baz\"), []byte(\"bat\"))\n\n\t// Verify values can be retrieved.\n\tif v := m.Get([]byte(\"foo\")); !bytes.Equal(v.([]byte), []byte(\"bar\")) {\n\t\tt.Fatalf(\"unexpected value: %s\", v)\n\t}\n\tif v := m.Get([]byte(\"baz\")); !bytes.Equal(v.([]byte), []byte(\"bat\")) {\n\t\tt.Fatalf(\"unexpected value: %s\", v)\n\t}\n\n\t// Overwrite field & verify.\n\tm.Put([]byte(\"foo\"), []byte(\"XXX\"))\n\tif v := m.Get([]byte(\"foo\")); !bytes.Equal(v.([]byte), []byte(\"XXX\")) {\n\t\tt.Fatalf(\"unexpected value: %s\", v)\n\t}\n}\n\n// Ensure hash map can insert random data.\nfunc TestHashMap_Quick(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"short mode, skipping\")\n\t}\n\n\tif err := quick.Check(func(keys, values [][]byte) bool {\n\t\tm := rhh.NewHashMap(rhh.Options{Capacity: 1000, LoadFactor: 90})\n\t\th := make(map[string][]byte)\n\n\t\t// Insert all key/values into both maps.\n\t\tfor i := range keys {\n\t\t\tkey, value := keys[i], values[i]\n\t\t\th[string(key)] = value\n\t\t\tm.Put(key, value)\n\t\t}\n\n\t\t// Verify the maps are equal.\n\t\tfor k, v := range h {\n\t\t\tif mv := m.Get([]byte(k)); !bytes.Equal(mv.([]byte), v) {\n\t\t\t\tt.Fatalf(\"value mismatch:\\nkey=%x\\ngot=%x\\nexp=%x\\n\\n\", []byte(k), mv, v)\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}, &quick.Config{\n\t\tValues: func(values []reflect.Value, rand *rand.Rand) {\n\t\t\tn := rand.Intn(10000)\n\t\t\tvalues[0] = GenerateByteSlices(rand, n)\n\t\t\tvalues[1] = GenerateByteSlices(rand, n)\n\t\t},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\n// GenerateByteSlices returns a random list of byte slices.\nfunc GenerateByteSlices(rand *rand.Rand, n int) reflect.Value {\n\tvar a [][]byte\n\tfor i := 0; i < n; i++ {\n\t\tv, _ := quick.Value(reflect.TypeOf(([]byte)(nil)), rand)\n\t\ta = append(a, v.Interface().([]byte))\n\t}\n\treturn reflect.ValueOf(a)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/pkg/slices/strings.go",
    "content": "// Package slices contains functions to operate on slices treated as sets.\npackage slices // import \"github.com/influxdata/influxdb/pkg/slices\"\n\nimport \"strings\"\n\n// Union combines two string sets.\nfunc Union(setA, setB []string, ignoreCase bool) []string {\n\tfor _, b := range setB {\n\t\tif ignoreCase {\n\t\t\tif !ExistsIgnoreCase(setA, b) {\n\t\t\t\tsetA = append(setA, b)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif !Exists(setA, b) {\n\t\t\tsetA = append(setA, b)\n\t\t}\n\t}\n\treturn setA\n}\n\n// Exists checks if a string is in a set.\nfunc Exists(set []string, find string) bool {\n\tfor _, s := range set {\n\t\tif s == find {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// ExistsIgnoreCase checks if a string is in a set but ignores its case.\nfunc ExistsIgnoreCase(set []string, find string) bool {\n\tfind = strings.ToLower(find)\n\tfor _, s := range set {\n\t\tif strings.ToLower(s) == find {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/releng/README.md",
    "content": "# influxdb/releng\n\nThis directory and its subdirectories contain release engineering scripts to build source tarballs and packages, run unit tests in an isolated environment, and so on.\nThe directory layout typically looks like:\n\n```\n├── Dockerfile\n├── build.bash\n└── fs\n    └── usr\n        └── local\n            └── bin\n                └── influxdb_tarball.bash\n```\n\nWhere you only need to run `build.bash` (or other shell scripts in the root directory) with valid arguments to complete the step.\nAll scripts in the root folders accept the `-h` flag to explain usage.\n\nThe `fs` folder is overlaid on the Docker image so that is clear where each script for the Docker containers reside.\nThose scripts make assumptions about the environment which are controlled in the outer scripts (i.e. `build.bash`), so the scripts not intended to be run outside of Docker.\n\nBy default, these scripts will use the \"current\" Go version as determined by `_go_versions.sh`.\nTo use the \"next\" version of Go, set the environment variable GO_NEXT to a non-empty value.\n\n## source-tarball\n\nGenerates a source tarball of influxdb that can be extracted to a new `GOPATH` such that you can `go build github.com/influxdata/influxdb/cmd/influxd`, etc., without manually setting linker flags or anything.\n\n## raw-binaries\n\nBuilds the raw binaries for the various influxdb commands, and stores them in OS/architecture-specific tarballs in the provided destination path.\n\n## packages\n\nGiven a source tarball and an archive of raw binaries, generates OS/architecture-specific packages (i.e. .deb and .rpm files).\n\n## unit-tests\n\nGiven a source tarball, runs the influxdb unit tests in a clean Docker environment.\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/releng/_go_versions.sh",
    "content": "# These are the current and \"next\" Go versions used to build influxdb.\n# This file is meant to be sourced from other scripts.\n\nexport GO_CURRENT_VERSION=1.8.3\nexport GO_NEXT_VERSION=1.9\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/releng/raw-binaries/Dockerfile",
    "content": "ARG GO_VERSION\nFROM golang:${GO_VERSION}\n\nRUN apt-get update && apt-get install -y --no-install-recommends \\\n  jq \\\n  && rm -rf /var/lib/apt/lists/*\n\nCOPY fs/ /\n\nENTRYPOINT [\"influxdb_raw_binaries.bash\"]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/releng/raw-binaries/build.bash",
    "content": "#!/bin/bash\n\nfunction printHelp() {\n  >&2 echo \"USAGE: $0 -i PATH_TO_SOURCE_TARBALL -o OUTDIR\n\nEmits an archive of influxdb binaries based on the current environment's GOOS and GOARCH.\n\nIf the environment variable GO_NEXT is not empty, builds the binaries with the 'next' version of Go.\n\"\n}\n\nif [ $# -eq 0 ]; then\n  printHelp\n  exit 1\nfi\n\nif [ -z \"$GOOS\" ] || [ -z \"$GOARCH\" ]; then\n  >&2 echo 'The environment variables $GOOS and $GOARCH must both be set.'\n  exit 1\nfi\n\nSRCDIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\nsource \"$SRCDIR/../_go_versions.sh\"\n\nOUTDIR=\"\"\nTARBALL=\"\"\nRACE_FLAG=\"\"\n\nwhile getopts hi:o:r arg; do\n  case \"$arg\" in\n    h) printHelp; exit 1;;\n    i) TARBALL=\"$OPTARG\";;\n    o) OUTDIR=\"$OPTARG\";;\n    r) RACE_FLAG=\"-r\";;\n  esac\ndone\n\nif [ -z \"$OUTDIR\" ] || [ -z \"$TARBALL\" ]; then\n  printHelp\n  exit 1\nfi\n\nif [ -z \"$GO_NEXT\" ]; then\n  DOCKER_TAG=latest\n  GO_VERSION=\"$GO_CURRENT_VERSION\"\nelse\n  DOCKER_TAG=next\n  GO_VERSION=\"$GO_NEXT_VERSION\"\nfi\ndocker build --build-arg \"GO_VERSION=$GO_VERSION\" -t influxdata/influxdb/releng/raw-binaries:\"$DOCKER_TAG\" \"$SRCDIR\"\n\nmkdir -p \"$OUTDIR\"\n\ndocker run --rm \\\n   --mount type=bind,source=\"${OUTDIR}\",destination=/out \\\n   --mount type=bind,source=\"${TARBALL}\",destination=/influxdb-src.tar.gz,ro=1 \\\n   -e GOOS -e GOARCH -e CGO_ENABLED \\\n  influxdata/influxdb/releng/raw-binaries:\"$DOCKER_TAG\" $RACE_FLAG\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/releng/raw-binaries/fs/usr/local/bin/influxdb_raw_binaries.bash",
    "content": "#!/bin/bash\n\nfunction printHelp() {\n  >&2 echo \"USAGE: $0 [-r]\n\nUntars the plutonium source tarball mounted at /plutonium-src.tar.gz,\nthen emits a tarball of plutonium binaries to /out,\nwhich must be a mounted volume if you want to access the file.\n\nRelies upon environment variables GOOS and GOARCH to determine what to build.\nRespects CGO_ENABLED.\n\nTo build with race detection enabled, pass the -r flag.\n\"\n}\n\nRACE_FLAG=\"\"\n\nwhile getopts hr arg; do\n  case \"$arg\" in\n    h) printHelp; exit 1;;\n    r) RACE_FLAG=\"-race\";;\n  esac\ndone\n\n\nif [ -z \"$GOOS\" ] || [ -z \"$GOARCH\" ]; then\n  >&2 echo 'The environment variables $GOOS and $GOARCH must both be set.'\n  exit 1\nfi\n\n\n# Extract tarball into GOPATH.\ntar xz -C \"$GOPATH\" -f /influxdb-src.tar.gz\n\nSHA=$(jq -r .sha < \"$GOPATH/src/github.com/influxdata/influxdb/.metadata.json\")\n\n\nSUFFIX=\nif [ \"$CGO_ENABLED\" == \"0\" ]; then\n  # Only add the static suffix to the filename when explicitly requested.\n  SUFFIX=_static\nelif [ -n \"$RACE_FLAG\" ]; then\n  # -race depends on cgo, so this option is exclusive from CGO_ENABLED.\n  SUFFIX=_race\nfi\n\nTARBALL_NAME=\"influxdb_bin_${GOOS}_${GOARCH}${SUFFIX}-${SHA}.tar.gz\"\n\n# note: according to https://github.com/golang/go/wiki/GoArm\n# we want to support armel using GOARM=5\n# and we want to support armhf using GOARM=6\n# no GOARM setting is necessary for arm64\nif [ $GOARCH == \"armel\" ]; then\n  GOARCH=arm\n  GOARM=5\nfi\n\nif [ $GOARCH == \"armhf\" ]; then\n  GOARCH=arm\n  GOARM=6\nfi\n\n\n\nOUTDIR=$(mktemp -d)\nfor cmd in \\\n  influxdb/cmd/influxd \\\n  influxdb/cmd/influx_stress \\\n  influxdb/cmd/influx \\\n  influxdb/cmd/influx_inspect \\\n  influxdb/cmd/influx_tsm \\\n  ; do\n    go build $RACE_FLAG -i -o \"$OUTDIR/$(basename $cmd)\" \"github.com/influxdata/$cmd\"\ndone\n\n\n(cd \"$OUTDIR\" && tar czf \"/out/$TARBALL_NAME\" ./*)\n(cd /out && md5sum \"$TARBALL_NAME\" > \"$TARBALL_NAME.md5\")\n(cd /out && sha256sum \"$TARBALL_NAME\" > \"$TARBALL_NAME.sha256\")\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/releng/source-tarball/Dockerfile",
    "content": "ARG GO_VERSION\nFROM golang:${GO_VERSION}-alpine\n\nRUN apk add --no-cache \\\n      bash \\\n      git \\\n      openssh-client \\\n      tar\n\n# Build the gdm binary and then clean out /go.\nRUN go get github.com/sparrc/gdm && \\\n      mv /go/bin/gdm /usr/local/bin/gdm && \\\n      rm -rf /go/*\n\nCOPY fs/ /\n\nENTRYPOINT [\"influxdb_tarball.bash\"]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/releng/source-tarball/build.bash",
    "content": "#!/bin/bash\n\nfunction printHelp() {\n  >&2 echo \\\n\"USAGE: $0 [-p INFLUXDB_GIT_DIR]\n            -s INFLUXDB_SHA -b INFLUXDB_BRANCH -v INFLUXDB_VERSION -o OUTDIR\n\nEmits a tarball of influxdb source code and dependencies to OUTDIR.\n\nIf using -p flag, directory containing influxdb source code will be used as source of truth.\nThis is helpful if you have local commits that have not been pushed.\n\nIf not using -p, you must provide -S to clone over SSH.\n\"\n}\n\nif [ $# -eq 0 ]; then\n  printHelp\n  exit 1\nfi\n\nSRCDIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\nsource \"$SRCDIR/../_go_versions.sh\"\n\nSHA=\"\"\nBRANCH=\"\"\nVERSION=\"\"\nOUTDIR=\"\"\n\n# These variables may expand to command arguments. Don't double quote them when used later.\nINFLUXDB_GIT_MOUNT=\"\"\n\n\nwhile getopts hs:b:v:o:p:S: arg; do\n  case \"$arg\" in\n    h) printHelp; exit 1;;\n    s) SHA=\"$OPTARG\";;\n    b) BRANCH=\"$OPTARG\";;\n    v) VERSION=\"$OPTARG\";;\n    o) OUTDIR=\"$OPTARG\";;\n    p) INFLUXDB_GIT_MOUNT=\"--mount type=bind,src=$OPTARG,dst=/influxdb-git,ro=1\";;\n  esac\ndone\n\nif [ -z \"$OUTDIR\" ]; then\n  # Not bothering to check the other variables since they're checked in the inner docker script.\n  printHelp\n  exit 1\nfi\n\n# Only build with GO_CURRENT_VERSION. No need to build source tarball with next version of Go.\ndocker build --build-arg \"GO_VERSION=$GO_CURRENT_VERSION\" -t influxdata/influxdb/releng/source-tarball:latest \"$SRCDIR\"\n\nmkdir -p \"$OUTDIR\"\n\ndocker run --rm \\\n  $INFLUXDB_GIT_MOUNT \\\n  --mount \"type=bind,src=${OUTDIR},dst=/out\" \\\n  influxdata/influxdb/releng/source-tarball:latest \\\n  -s \"$SHA\" \\\n  -b \"$BRANCH\" \\\n  -v \"$VERSION\"\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/releng/source-tarball/fs/usr/local/bin/influxdb_tarball.bash",
    "content": "#!/bin/bash\n\nfunction printHelp() {\n  >&2 echo \"USAGE: $0 -s INFLUXDB_SHA -b INFLUXDB_BRANCH -v INFLUXDB_VERSION\n\nEmits a tarball of influxdb source code and dependencies to /out,\nwhich must be a mounted volume if you want to access the file.\n\nIf the directory /influxdb-git exists and is mounted,\nthat will be used as the git repository used when cloning influxdb.\n\"\n}\n\nif [ $# -eq 0 ]; then\n  printHelp\n  exit 1\nfi\n\nSHA=\"\"\nBRANCH=\"\"\nVERSION=\"\"\n\nwhile getopts hs:b:v: arg; do\n  case \"$arg\" in\n    h) printHelp; exit 1;;\n    s) SHA=\"$OPTARG\";;\n    b) BRANCH=\"$OPTARG\";;\n    v) VERSION=\"$OPTARG\";;\n  esac\ndone\n\nif [ -z \"$SHA\" ] || [ -z \"$BRANCH\" ] || [ -z \"$VERSION\" ]; then\n  printHelp\n  exit 1\nfi\n\nIPATH=/go/src/github.com/influxdata\nmkdir -p \"$IPATH\" && cd \"$IPATH\"\nif [ -d /influxdb-git ]; then\n  git clone /influxdb-git \"$IPATH/influxdb\"\nelse\n\n  git clone https://github.com/influxdata/influxdb.git\nfi\n\ncd influxdb\ngit checkout \"$SHA\"\ngdm restore\ncd ..\n\n# Emit version metadata to appropriate files.\n\n# Include machine-parseable metadata JSON file.\nprintf '{\n\"version\": \"%s\",\n\"branch\": \"%s\",\n\"sha\": \"%s\"\n}' \"$VERSION\" \"$BRANCH\" \"$SHA\" > \"./influxdb/.metadata.json\"\n\n# Set version info for influxdb binaries.\n\nprintf 'package main\n\n// Code generated by influxdata/releng tooling. DO NOT EDIT.\n\nfunc init() {\n\tversion = \"%s\"\n\tbranch = \"%s\"\n\tcommit = \"%s\"\n}' \"$VERSION\" \"$BRANCH\" \"$SHA\" > \"./influxdb/cmd/influxd/version.generated.go\"\n\n\n# influx uses just version.\nprintf 'package main\n\n// Code generated by influxdata/releng tooling. DO NOT EDIT.\n\n\tfunc init() {\n\tversion = \"%s\"\n}' \"$VERSION\" > \"./influxdb/cmd/influx/version.generated.go\"\n\nTARBALL_NAME=\"influxdb-src-$SHA.tar.gz\"\n(cd /go && tar czf \"/out/$TARBALL_NAME\" --exclude-vcs ./*) # --exclude-vcs is a GNU tar option.\n(cd /out && md5sum \"$TARBALL_NAME\" > \"$TARBALL_NAME.md5\")\n(cd /out && sha256sum \"$TARBALL_NAME\" > \"$TARBALL_NAME.sha256\")\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/releng/unit-tests/Dockerfile",
    "content": "ARG GO_VERSION\nFROM golang:${GO_VERSION}-alpine\n\nRUN apk add --no-cache \\\n      bash \\\n      jq \\\n      git\n\nRUN go get -u github.com/jstemmer/go-junit-report && \\\n      mv /go/bin/go-junit-report /usr/bin/go-junit-report && \\\n      rm -rf /go/*\n\nCOPY fs/ /\n\nENTRYPOINT [\"influxdb_prebuild_tests.bash\"]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/releng/unit-tests/fs/usr/local/bin/influxdb_prebuild_tests.bash",
    "content": "#!/bin/bash\n\n# Extract tarball into GOPATH.\ntar xz -C \"$GOPATH\" -f /influxdb-src.tar.gz\n\ncd \"$GOPATH/src/github.com/influxdata/influxdb\"\ngo test -v ./... 2>&1 | tee /out/tests.log | go-junit-report > /out/influxdb.junit.xml\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/releng/unit-tests/run.bash",
    "content": "#!/bin/bash\n\nfunction printHelp() {\n  >&2 echo \"USAGE: $0 -i PATH_TO_SOURCE_TARBALL -o OUTDIR\n\nRuns unit tests for influxdb.\n\nIf the environment variable GO_NEXT is not empty, tests run with the 'next' version of Go.\n\"\n}\n\nif [ $# -eq 0 ]; then\n  printHelp\n  exit 1\nfi\n\nSRCDIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\nsource \"$SRCDIR/../_go_versions.sh\"\n\nOUTDIR=\"\"\nTARBALL=\"\"\n\nwhile getopts hi:o: arg; do\n  case \"$arg\" in\n    h) printHelp; exit 1;;\n    i) TARBALL=\"$OPTARG\";;\n    o) OUTDIR=\"$OPTARG\";;\n  esac\ndone\n\nif [ -z \"$TARBALL\" ] || [ -z \"$OUTDIR\" ]; then\n  printHelp\n  exit 1\nfi\n\nif [ -z \"$GO_NEXT\" ]; then\n  DOCKER_TAG=latest\n  GO_VERSION=\"$GO_CURRENT_VERSION\"\nelse\n  DOCKER_TAG=next\n  GO_VERSION=\"$GO_NEXT_VERSION\"\nfi\ndocker build --build-arg \"GO_VERSION=$GO_VERSION\" -t influxdata/influxdb/releng/unit-tests:\"$DOCKER_TAG\" \"$SRCDIR\"\n\ndocker run --rm \\\n   --mount type=bind,source=\"$OUTDIR\",destination=/out \\\n   --mount type=bind,source=\"$TARBALL\",destination=/influxdb-src.tar.gz,ro=1 \\\n  influxdata/influxdb/releng/unit-tests:\"$DOCKER_TAG\"\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/scripts/influxdb.service",
    "content": "# If you modify this, please also make sure to edit init.sh\n\n[Unit]\nDescription=InfluxDB is an open-source, distributed, time series database\nDocumentation=https://docs.influxdata.com/influxdb/\nAfter=network-online.target\n\n[Service]\nUser=influxdb\nGroup=influxdb\nLimitNOFILE=65536\nEnvironmentFile=-/etc/default/influxdb\nExecStart=/usr/bin/influxd -config /etc/influxdb/influxdb.conf ${INFLUXD_OPTS}\nKillMode=control-group\nRestart=on-failure\n\n[Install]\nWantedBy=multi-user.target\nAlias=influxd.service\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/scripts/init.sh",
    "content": "#!/bin/bash\n### BEGIN INIT INFO\n# Provides:          influxd\n# Required-Start:    $all\n# Required-Stop:     $remote_fs $syslog\n# Default-Start:     2 3 4 5\n# Default-Stop:      0 1 6\n# Short-Description: Start the InfluxDB process\n### END INIT INFO\n\n# If you modify this, please make sure to also edit influxdb.service\n\n# Command-line options that can be set in /etc/default/influxdb.  These will override\n# any config file values.\nDEFAULT=/etc/default/influxdb\n\n# Daemon options\nINFLUXD_OPTS=\n\n# Process name ( For display )\nNAME=influxdb\n\n# User and group\nUSER=influxdb\nGROUP=influxdb\n\n# Check for sudo or root privileges before continuing\nif [ \"$UID\" != \"0\" ]; then\n    echo \"You must be root to run this script\"\n    exit 1\nfi\n\n# Daemon name, where is the actual executable If the daemon is not\n# there, then exit.\nDAEMON=/usr/bin/influxd\nif [ ! -x $DAEMON ]; then\n    echo \"Executable $DAEMON does not exist!\"\n    exit 5\nfi\n\n# Configuration file\nCONFIG=/etc/influxdb/influxdb.conf\n\n# PID file for the daemon\nPIDFILE=/var/run/influxdb/influxd.pid\nPIDDIR=`dirname $PIDFILE`\nif [ ! -d \"$PIDDIR\" ]; then\n    mkdir -p $PIDDIR\n    chown $USER:$GROUP $PIDDIR\nfi\n\n# Max open files\nOPEN_FILE_LIMIT=65536\n\nif [ -r /lib/lsb/init-functions ]; then\n    source /lib/lsb/init-functions\nfi\n\n# Logging\nif [ -z \"$STDOUT\" ]; then\n    STDOUT=/dev/null\nfi\n\nif [ ! -f \"$STDOUT\" ]; then\n    mkdir -p $(dirname $STDOUT)\nfi\n\nif [ -z \"$STDERR\" ]; then\n    STDERR=/var/log/influxdb/influxd.log\nfi\n\nif [ ! -f \"$STDERR\" ]; then\n    mkdir -p $(dirname $STDERR)\nfi\n\n# Override init script variables with DEFAULT values\nif [ -r $DEFAULT ]; then\n    source $DEFAULT\nfi\n\nfunction log_failure_msg() {\n    echo \"$@\" \"[ FAILED ]\"\n}\n\nfunction log_success_msg() {\n    echo \"$@\" \"[ OK ]\"\n}\n\nfunction start() {\n    # Check if config file exist\n    if [ ! -r $CONFIG ]; then\n        log_failure_msg \"config file $CONFIG doesn't exist (or you don't have permission to view)\"\n        exit 4\n    fi\n\n    # Check that the PID file exists, and check the actual status of process\n    if [ -f $PIDFILE ]; then\n        PID=\"$(cat $PIDFILE)\"\n        if kill -0 \"$PID\" &>/dev/null; then\n            # Process is already up\n            log_success_msg \"$NAME process is already running\"\n            return 0\n        fi\n    else\n        su -s /bin/sh -c \"touch $PIDFILE\" $USER &>/dev/null\n        if [ $? -ne 0 ]; then\n            log_failure_msg \"$PIDFILE not writable, check permissions\"\n            exit 5\n        fi\n    fi\n\n    # Bump the file limits, before launching the daemon. These will\n    # carry over to launched processes.\n    ulimit -n $OPEN_FILE_LIMIT\n    if [ $? -ne 0 ]; then\n        log_failure_msg \"Unable to set ulimit to $OPEN_FILE_LIMIT\"\n        exit 1\n    fi\n\n    # Launch process\n    echo \"Starting $NAME...\"\n    if which start-stop-daemon &>/dev/null; then\n        start-stop-daemon \\\n            --chuid $USER:$GROUP \\\n            --start \\\n            --quiet \\\n            --pidfile $PIDFILE \\\n            --exec $DAEMON \\\n            -- \\\n            -pidfile $PIDFILE \\\n            -config $CONFIG \\\n            $INFLUXD_OPTS >>$STDOUT 2>>$STDERR &\n    else\n        local CMD=\"$DAEMON -pidfile $PIDFILE -config $CONFIG $INFLUXD_OPTS >>$STDOUT 2>>$STDERR &\"\n        su -s /bin/sh -c \"$CMD\" $USER\n    fi\n\n    # Sleep to verify process is still up\n    sleep 1\n    if [ -f $PIDFILE ]; then\n        # PIDFILE exists\n        if kill -0 $(cat $PIDFILE) &>/dev/null; then\n            # PID up, service running\n            log_success_msg \"$NAME process was started\"\n            return 0\n        fi\n    fi\n    log_failure_msg \"$NAME process was unable to start\"\n    exit 1\n}\n\nfunction stop() {\n    # Stop the daemon.\n    if [ -f $PIDFILE ]; then\n        local PID=\"$(cat $PIDFILE)\"\n        if kill -0 $PID &>/dev/null; then\n            echo \"Stopping $NAME...\"\n            # Process still up, send SIGTERM and remove PIDFILE\n            kill -s TERM $PID &>/dev/null && rm -f \"$PIDFILE\" &>/dev/null\n            n=0\n            while true; do\n                # Enter loop to ensure process is stopped\n                kill -0 $PID &>/dev/null\n                if [ \"$?\" != \"0\" ]; then\n                    # Process stopped, break from loop\n                    log_success_msg \"$NAME process was stopped\"\n                    return 0\n                fi\n\n                # Process still up after signal, sleep and wait\n                sleep 1\n                n=$(expr $n + 1)\n                if [ $n -eq 30 ]; then\n                    # After 30 seconds, send SIGKILL\n                    echo \"Timeout exceeded, sending SIGKILL...\"\n                    kill -s KILL $PID &>/dev/null\n                elif [ $? -eq 40 ]; then\n                    # After 40 seconds, error out\n                    log_failure_msg \"could not stop $NAME process\"\n                    exit 1\n                fi\n            done\n        fi\n    fi\n    log_success_msg \"$NAME process already stopped\"\n}\n\nfunction restart() {\n    # Restart the daemon.\n    stop\n    start\n}\n\nfunction status() {\n    # Check the status of the process.\n    if [ -f $PIDFILE ]; then\n        PID=\"$(cat $PIDFILE)\"\n        if kill -0 $PID &>/dev/null; then\n            log_success_msg \"$NAME process is running\"\n            exit 0\n        fi\n    fi\n    log_failure_msg \"$NAME process is not running\"\n    exit 1\n}\n\ncase $1 in\n    start)\n        start\n        ;;\n\n    stop)\n        stop\n        ;;\n\n    restart)\n        restart\n        ;;\n\n    status)\n        status\n        ;;\n\n    version)\n        $DAEMON version\n        ;;\n\n    *)\n        # For invalid arguments, print the usage message.\n        echo \"Usage: $0 {start|stop|restart|status|version}\"\n        exit 2\n        ;;\nesac\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/scripts/logrotate",
    "content": "/var/log/influxdb/influxd.log {\n    daily\n    rotate 7\n    missingok\n    dateext\n    copytruncate\n    compress\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/scripts/post-install.sh",
    "content": "#!/bin/bash\n\nBIN_DIR=/usr/bin\nDATA_DIR=/var/lib/influxdb\nLOG_DIR=/var/log/influxdb\nSCRIPT_DIR=/usr/lib/influxdb/scripts\nLOGROTATE_DIR=/etc/logrotate.d\n\nfunction install_init {\n    cp -f $SCRIPT_DIR/init.sh /etc/init.d/influxdb\n    chmod +x /etc/init.d/influxdb\n}\n\nfunction install_systemd {\n    cp -f $SCRIPT_DIR/influxdb.service /lib/systemd/system/influxdb.service\n    systemctl enable influxdb\n}\n\nfunction install_update_rcd {\n    update-rc.d influxdb defaults\n}\n\nfunction install_chkconfig {\n    chkconfig --add influxdb\n}\n\nid influxdb &>/dev/null\nif [[ $? -ne 0 ]]; then\n    useradd --system -U -M influxdb -s /bin/false -d $DATA_DIR\nfi\n\nchown -R -L influxdb:influxdb $DATA_DIR\nchown -R -L influxdb:influxdb $LOG_DIR\n\n# Add defaults file, if it doesn't exist\nif [[ ! -f /etc/default/influxdb ]]; then\n    touch /etc/default/influxdb\nfi\n\n# Remove legacy symlink, if it exists\nif [[ -L /etc/init.d/influxdb ]]; then\n    rm -f /etc/init.d/influxdb\nfi\n\n# Distribution-specific logic\nif [[ -f /etc/redhat-release ]]; then\n    # RHEL-variant logic\n    which systemctl &>/dev/null\n    if [[ $? -eq 0 ]]; then\n\tinstall_systemd\n    else\n\t# Assuming sysv\n\tinstall_init\n\tinstall_chkconfig\n    fi\nelif [[ -f /etc/debian_version ]]; then\n    # Debian/Ubuntu logic\n    which systemctl &>/dev/null\n    if [[ $? -eq 0 ]]; then\n\tinstall_systemd\n    else\n\t# Assuming sysv\n\tinstall_init\n\tinstall_update_rcd\n    fi\nelif [[ -f /etc/os-release ]]; then\n    source /etc/os-release\n    if [[ $ID = \"amzn\" ]]; then\n\t# Amazon Linux logic\n\tinstall_init\n\tinstall_chkconfig\n    fi\nfi\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/scripts/post-uninstall.sh",
    "content": "#!/bin/bash\n\nfunction disable_systemd {\n    systemctl disable influxdb\n    rm -f /lib/systemd/system/influxdb.service\n}\n\nfunction disable_update_rcd {\n    update-rc.d -f influxdb remove\n    rm -f /etc/init.d/influxdb\n}\n\nfunction disable_chkconfig {\n    chkconfig --del influxdb\n    rm -f /etc/init.d/influxdb\n}\n\nif [[ -f /etc/redhat-release ]]; then\n    # RHEL-variant logic\n    if [[ \"$1\" = \"0\" ]]; then\n\t# InfluxDB is no longer installed, remove from init system\n\trm -f /etc/default/influxdb\n\t\n\twhich systemctl &>/dev/null\n\tif [[ $? -eq 0 ]]; then\n\t    disable_systemd\n\telse\n\t    # Assuming sysv\n\t    disable_chkconfig\n\tfi\n    fi\nelif [[ -f /etc/lsb-release ]]; then\n    # Debian/Ubuntu logic\n    if [[ \"$1\" != \"upgrade\" ]]; then\n\t# Remove/purge\n\trm -f /etc/default/influxdb\n\t\n\twhich systemctl &>/dev/null\n\tif [[ $? -eq 0 ]]; then\n\t    disable_systemd\n\telse\n\t    # Assuming sysv\n\t    disable_update_rcd\n\tfi\n    fi\nelif [[ -f /etc/os-release ]]; then\n    source /etc/os-release\n    if [[ $ID = \"amzn\" ]]; then\n\t# Amazon Linux logic\n\tif [[ \"$1\" = \"0\" ]]; then\n\t    # InfluxDB is no longer installed, remove from init system\n\t    rm -f /etc/default/influxdb\n\t    disable_chkconfig\n\tfi\n    fi\nfi\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/scripts/pre-install.sh",
    "content": "#!/bin/bash\n\nif [[ -d /etc/opt/influxdb ]]; then\n    # Legacy configuration found\n    if [[ ! -d /etc/influxdb ]]; then\n\t# New configuration does not exist, move legacy configuration to new location\n\techo -e \"Please note, InfluxDB's configuration is now located at '/etc/influxdb' (previously '/etc/opt/influxdb').\"\n\tmv -vn /etc/opt/influxdb /etc/influxdb\n\n\tif [[ -f /etc/influxdb/influxdb.conf ]]; then\n\t    backup_name=\"influxdb.conf.$(date +%s).backup\"\n\t    echo \"A backup of your current configuration can be found at: /etc/influxdb/$backup_name\"\n\t    cp -a /etc/influxdb/influxdb.conf /etc/influxdb/$backup_name\n\tfi\n    fi\nfi\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/collectd/README.md",
    "content": "# The collectd Input\n\nThe [collectd](https://collectd.org) input allows InfluxDB to accept data transmitted in collectd native format. This data is transmitted over UDP.\n\n## A note on UDP/IP OS Buffer sizes\n\nIf you're running Linux or FreeBSD, please adjust your OS UDP buffer\nsize limit, [see here for more details.](../udp/README.md#a-note-on-udpip-os-buffer-sizes)\n\n## Configuration\n\nEach collectd input allows the binding address, target database, and target retention policy to be set. If the database does not exist, it will be created automatically when the input is initialized. If the retention policy is not configured, then the default retention policy for the database is used. However if the retention policy is set, the retention policy must be explicitly created. The input will not automatically create it.\n\nEach collectd input also performs internal batching of the points it receives, as batched writes to the database are more efficient. The default batch size is 1000, pending batch factor is 5, with a batch timeout of 1 second. This means the input will write batches of maximum size 1000, but if a batch has not reached 1000 points within 1 second of the first point being added to a batch, it will emit that batch regardless of size. The pending batch factor controls how many batches can be in memory at once, allowing the input to transmit a batch, while still building other batches.\n\nThe path to the collectd types database file may also be set.\n\n## Large UDP packets\n\nPlease note that UDP packets larger than the standard size of 1452 are dropped at the time of ingestion. Be sure to set `MaxPacketSize` to 1452 in the collectd configuration.\n\n## Config Example\n\n```\n[[collectd]]\n  enabled = true\n  bind-address = \":25826\" # the bind address\n  database = \"collectd\" # Name of the database that will be written to\n  retention-policy = \"\"\n  batch-size = 5000 # will flush if this many points get buffered\n  batch-pending = 10 # number of batches that may be pending in memory\n  batch-timeout = \"10s\"\n  read-buffer = 0 # UDP read buffer size, 0 means to use OS default\n  typesdb = \"/usr/share/collectd/types.db\"\n  security-level = \"none\" # \"none\", \"sign\", or \"encrypt\"\n  auth-file = \"/etc/collectd/auth_file\"\n```\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/collectd/collectd_test.conf",
    "content": "absolute\t\tvalue:ABSOLUTE:0:U\napache_bytes\t\tvalue:DERIVE:0:U\napache_connections\tvalue:GAUGE:0:65535\napache_idle_workers\tvalue:GAUGE:0:65535\napache_requests\t\tvalue:DERIVE:0:U\napache_scoreboard\tvalue:GAUGE:0:65535\nath_nodes\t\tvalue:GAUGE:0:65535\nath_stat\t\tvalue:DERIVE:0:U\nbackends\t\tvalue:GAUGE:0:65535\nbitrate\t\t\tvalue:GAUGE:0:4294967295\nbytes\t\t\tvalue:GAUGE:0:U\ncache_eviction\t\tvalue:DERIVE:0:U\ncache_operation\t\tvalue:DERIVE:0:U\ncache_ratio\t\tvalue:GAUGE:0:100\ncache_result\t\tvalue:DERIVE:0:U\ncache_size\t\tvalue:GAUGE:0:U\ncharge\t\t\tvalue:GAUGE:0:U\ncompression_ratio\tvalue:GAUGE:0:2\ncompression\t\tuncompressed:DERIVE:0:U, compressed:DERIVE:0:U\nconnections\t\tvalue:DERIVE:0:U\nconntrack\t\tvalue:GAUGE:0:4294967295\ncontextswitch\t\tvalue:DERIVE:0:U\ncounter\t\t\tvalue:COUNTER:U:U\ncpufreq\t\t\tvalue:GAUGE:0:U\ncpu\t\t\tvalue:DERIVE:0:U\ncurrent_connections\tvalue:GAUGE:0:U\ncurrent_sessions\tvalue:GAUGE:0:U\ncurrent\t\t\tvalue:GAUGE:U:U\ndelay\t\t\tvalue:GAUGE:-1000000:1000000\nderive\t\t\tvalue:DERIVE:0:U\ndf_complex\t\tvalue:GAUGE:0:U\ndf_inodes\t\tvalue:GAUGE:0:U\ndf\t\t\tused:GAUGE:0:1125899906842623, free:GAUGE:0:1125899906842623\ndisk_latency\t\tread:GAUGE:0:U, write:GAUGE:0:U\ndisk_merged\t\tread:DERIVE:0:U, write:DERIVE:0:U\ndisk_octets\t\tread:DERIVE:0:U, write:DERIVE:0:U\ndisk_ops_complex\tvalue:DERIVE:0:U\ndisk_ops\t\tread:DERIVE:0:U, write:DERIVE:0:U\ndisk_time\t\tread:DERIVE:0:U, write:DERIVE:0:U\ndns_answer\t\tvalue:DERIVE:0:U\ndns_notify\t\tvalue:DERIVE:0:U\ndns_octets\t\tqueries:DERIVE:0:U, responses:DERIVE:0:U\ndns_opcode\t\tvalue:DERIVE:0:U\ndns_qtype_cached\tvalue:GAUGE:0:4294967295\ndns_qtype\t\tvalue:DERIVE:0:U\ndns_query\t\tvalue:DERIVE:0:U\ndns_question\t\tvalue:DERIVE:0:U\ndns_rcode\t\tvalue:DERIVE:0:U\ndns_reject\t\tvalue:DERIVE:0:U\ndns_request\t\tvalue:DERIVE:0:U\ndns_resolver\t\tvalue:DERIVE:0:U\ndns_response\t\tvalue:DERIVE:0:U\ndns_transfer\t\tvalue:DERIVE:0:U\ndns_update\t\tvalue:DERIVE:0:U\ndns_zops\t\tvalue:DERIVE:0:U\nduration\t\tseconds:GAUGE:0:U\nemail_check\t\tvalue:GAUGE:0:U\nemail_count\t\tvalue:GAUGE:0:U\nemail_size\t\tvalue:GAUGE:0:U\nentropy\t\t\tvalue:GAUGE:0:4294967295\nfanspeed\t\tvalue:GAUGE:0:U\nfile_size\t\tvalue:GAUGE:0:U\nfiles\t\t\tvalue:GAUGE:0:U\nflow\t\t\tvalue:GAUGE:0:U\nfork_rate\t\tvalue:DERIVE:0:U\nfrequency_offset\tvalue:GAUGE:-1000000:1000000\nfrequency\t\tvalue:GAUGE:0:U\nfscache_stat\t\tvalue:DERIVE:0:U\ngauge\t\t\tvalue:GAUGE:U:U\nhash_collisions\t\tvalue:DERIVE:0:U\nhttp_request_methods\tvalue:DERIVE:0:U\nhttp_requests\t\tvalue:DERIVE:0:U\nhttp_response_codes\tvalue:DERIVE:0:U\nhumidity\t\tvalue:GAUGE:0:100\nif_collisions\t\tvalue:DERIVE:0:U\nif_dropped\t\trx:DERIVE:0:U, tx:DERIVE:0:U\nif_errors\t\trx:DERIVE:0:U, tx:DERIVE:0:U\nif_multicast\t\tvalue:DERIVE:0:U\nif_octets\t\trx:DERIVE:0:U, tx:DERIVE:0:U\nif_packets\t\trx:DERIVE:0:U, tx:DERIVE:0:U\nif_rx_errors\t\tvalue:DERIVE:0:U\nif_rx_octets\t\tvalue:DERIVE:0:U\nif_tx_errors\t\tvalue:DERIVE:0:U\nif_tx_octets\t\tvalue:DERIVE:0:U\ninvocations\t\tvalue:DERIVE:0:U\nio_octets\t\trx:DERIVE:0:U, tx:DERIVE:0:U\nio_packets\t\trx:DERIVE:0:U, tx:DERIVE:0:U\nipt_bytes\t\tvalue:DERIVE:0:U\nipt_packets\t\tvalue:DERIVE:0:U\nirq\t\t\tvalue:DERIVE:0:U\nlatency\t\t\tvalue:GAUGE:0:U\nlinks\t\t\tvalue:GAUGE:0:U\nload\t\t\tshortterm:GAUGE:0:5000, midterm:GAUGE:0:5000, longterm:GAUGE:0:5000\nmd_disks\t\tvalue:GAUGE:0:U\nmemcached_command\tvalue:DERIVE:0:U\nmemcached_connections\tvalue:GAUGE:0:U\nmemcached_items\t\tvalue:GAUGE:0:U\nmemcached_octets\trx:DERIVE:0:U, tx:DERIVE:0:U\nmemcached_ops\t\tvalue:DERIVE:0:U\nmemory\t\t\tvalue:GAUGE:0:281474976710656\nmultimeter\t\tvalue:GAUGE:U:U\nmutex_operations\tvalue:DERIVE:0:U\nmysql_commands\t\tvalue:DERIVE:0:U\nmysql_handler\t\tvalue:DERIVE:0:U\nmysql_locks\t\tvalue:DERIVE:0:U\nmysql_log_position\tvalue:DERIVE:0:U\nmysql_octets\t\trx:DERIVE:0:U, tx:DERIVE:0:U\nnfs_procedure\t\tvalue:DERIVE:0:U\nnginx_connections\tvalue:GAUGE:0:U\nnginx_requests\t\tvalue:DERIVE:0:U\nnode_octets\t\trx:DERIVE:0:U, tx:DERIVE:0:U\nnode_rssi\t\tvalue:GAUGE:0:255\nnode_stat\t\tvalue:DERIVE:0:U\nnode_tx_rate\t\tvalue:GAUGE:0:127\nobjects\t\t\tvalue:GAUGE:0:U\noperations\t\tvalue:DERIVE:0:U\npercent\t\t\tvalue:GAUGE:0:100.1\npercent_bytes\t\tvalue:GAUGE:0:100.1\npercent_inodes\t\tvalue:GAUGE:0:100.1\npf_counters\t\tvalue:DERIVE:0:U\npf_limits\t\tvalue:DERIVE:0:U\npf_source\t\tvalue:DERIVE:0:U\npf_states\t\tvalue:GAUGE:0:U\npf_state\t\tvalue:DERIVE:0:U\npg_blks\t\t\tvalue:DERIVE:0:U\npg_db_size\t\tvalue:GAUGE:0:U\npg_n_tup_c\t\tvalue:DERIVE:0:U\npg_n_tup_g\t\tvalue:GAUGE:0:U\npg_numbackends\t\tvalue:GAUGE:0:U\npg_scan\t\t\tvalue:DERIVE:0:U\npg_xact\t\t\tvalue:DERIVE:0:U\nping_droprate\t\tvalue:GAUGE:0:100\nping_stddev\t\tvalue:GAUGE:0:65535\nping\t\t\tvalue:GAUGE:0:65535\nplayers\t\t\tvalue:GAUGE:0:1000000\npower\t\t\tvalue:GAUGE:0:U\nprotocol_counter\tvalue:DERIVE:0:U\nps_code\t\t\tvalue:GAUGE:0:9223372036854775807\nps_count\t\tprocesses:GAUGE:0:1000000, threads:GAUGE:0:1000000\nps_cputime\t\tuser:DERIVE:0:U, syst:DERIVE:0:U\nps_data\t\t\tvalue:GAUGE:0:9223372036854775807\nps_disk_octets\t\tread:DERIVE:0:U, write:DERIVE:0:U\nps_disk_ops\t\tread:DERIVE:0:U, write:DERIVE:0:U\nps_pagefaults\t\tminflt:DERIVE:0:U, majflt:DERIVE:0:U\nps_rss\t\t\tvalue:GAUGE:0:9223372036854775807\nps_stacksize\t\tvalue:GAUGE:0:9223372036854775807\nps_state\t\tvalue:GAUGE:0:65535\nps_vm\t\t\tvalue:GAUGE:0:9223372036854775807\nqueue_length\t\tvalue:GAUGE:0:U\nrecords\t\t\tvalue:GAUGE:0:U\nrequests\t\tvalue:GAUGE:0:U\nresponse_time\t\tvalue:GAUGE:0:U\nresponse_code\t\tvalue:GAUGE:0:U\nroute_etx\t\tvalue:GAUGE:0:U\nroute_metric\t\tvalue:GAUGE:0:U\nroutes\t\t\tvalue:GAUGE:0:U\nserial_octets\t\trx:DERIVE:0:U, tx:DERIVE:0:U\nsignal_noise\t\tvalue:GAUGE:U:0\nsignal_power\t\tvalue:GAUGE:U:0\nsignal_quality\t\tvalue:GAUGE:0:U\nsnr\t\t\tvalue:GAUGE:0:U\nspam_check\t\tvalue:GAUGE:0:U\nspam_score\t\tvalue:GAUGE:U:U\nspl\t\t\tvalue:GAUGE:U:U\nswap_io\t\t\tvalue:DERIVE:0:U\nswap\t\t\tvalue:GAUGE:0:1099511627776\ntcp_connections\t\tvalue:GAUGE:0:4294967295\ntemperature\t\tvalue:GAUGE:U:U\nthreads\t\t\tvalue:GAUGE:0:U\ntime_dispersion\t\tvalue:GAUGE:-1000000:1000000\ntimeleft\t\tvalue:GAUGE:0:U\ntime_offset\t\tvalue:GAUGE:-1000000:1000000\ntotal_bytes\t\tvalue:DERIVE:0:U\ntotal_connections\tvalue:DERIVE:0:U\ntotal_objects\t\tvalue:DERIVE:0:U\ntotal_operations\tvalue:DERIVE:0:U\ntotal_requests\t\tvalue:DERIVE:0:U\ntotal_sessions\t\tvalue:DERIVE:0:U\ntotal_threads\t\tvalue:DERIVE:0:U\ntotal_time_in_ms\tvalue:DERIVE:0:U\ntotal_values\t\tvalue:DERIVE:0:U\nuptime\t\t\tvalue:GAUGE:0:4294967295\nusers\t\t\tvalue:GAUGE:0:65535\nvcl\t\t\tvalue:GAUGE:0:65535\nvcpu\t\t\tvalue:GAUGE:0:U\nvirt_cpu_total\t\tvalue:DERIVE:0:U\nvirt_vcpu\t\tvalue:DERIVE:0:U\nvmpage_action\t\tvalue:DERIVE:0:U\nvmpage_faults\t\tminflt:DERIVE:0:U, majflt:DERIVE:0:U\nvmpage_io\t\tin:DERIVE:0:U, out:DERIVE:0:U\nvmpage_number\t\tvalue:GAUGE:0:4294967295\nvolatile_changes\tvalue:GAUGE:0:U\nvoltage_threshold\tvalue:GAUGE:U:U, threshold:GAUGE:U:U\nvoltage\t\t\tvalue:GAUGE:U:U\nvs_memory\t\tvalue:GAUGE:0:9223372036854775807\nvs_processes\t\tvalue:GAUGE:0:65535\nvs_threads\t\tvalue:GAUGE:0:65535\n\n#\n# Legacy types\n# (required for the v5 upgrade target)\n#\narc_counts\t\tdemand_data:COUNTER:0:U, demand_metadata:COUNTER:0:U, prefetch_data:COUNTER:0:U, prefetch_metadata:COUNTER:0:U\narc_l2_bytes\t\tread:COUNTER:0:U, write:COUNTER:0:U\narc_l2_size\t\tvalue:GAUGE:0:U\narc_ratio\t\tvalue:GAUGE:0:U\narc_size\t\tcurrent:GAUGE:0:U, target:GAUGE:0:U, minlimit:GAUGE:0:U, maxlimit:GAUGE:0:U\nmysql_qcache\t\thits:COUNTER:0:U, inserts:COUNTER:0:U, not_cached:COUNTER:0:U, lowmem_prunes:COUNTER:0:U, queries_in_cache:GAUGE:0:U\nmysql_threads\t\trunning:GAUGE:0:U, connected:GAUGE:0:U, cached:GAUGE:0:U, created:COUNTER:0:U\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/collectd/config.go",
    "content": "package collectd\n\nimport (\n\t\"errors\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/monitor/diagnostics\"\n\t\"github.com/influxdata/influxdb/toml\"\n)\n\nconst (\n\t// DefaultBindAddress is the default port to bind to.\n\tDefaultBindAddress = \":25826\"\n\n\t// DefaultDatabase is the default DB to write to.\n\tDefaultDatabase = \"collectd\"\n\n\t// DefaultRetentionPolicy is the default retention policy of the writes.\n\tDefaultRetentionPolicy = \"\"\n\n\t// DefaultBatchSize is the default write batch size.\n\tDefaultBatchSize = 5000\n\n\t// DefaultBatchPending is the default number of pending write batches.\n\tDefaultBatchPending = 10\n\n\t// DefaultBatchDuration is the default batch timeout duration.\n\tDefaultBatchDuration = toml.Duration(10 * time.Second)\n\n\t// DefaultTypesDB is the default location of the collectd types db file.\n\tDefaultTypesDB = \"/usr/share/collectd/types.db\"\n\n\t// DefaultReadBuffer is the default buffer size for the UDP listener.\n\t// Sets the size of the operating system's receive buffer associated with\n\t// the UDP traffic. Keep in mind that the OS must be able\n\t// to handle the number set here or the UDP listener will error and exit.\n\t//\n\t// DefaultReadBuffer = 0 means to use the OS default, which is usually too\n\t// small for high UDP performance.\n\t//\n\t// Increasing OS buffer limits:\n\t//     Linux:      sudo sysctl -w net.core.rmem_max=<read-buffer>\n\t//     BSD/Darwin: sudo sysctl -w kern.ipc.maxsockbuf=<read-buffer>\n\tDefaultReadBuffer = 0\n\n\t// DefaultSecurityLevel is the default security level.\n\tDefaultSecurityLevel = \"none\"\n\n\t// DefaultAuthFile is the default location of the user/password file.\n\tDefaultAuthFile = \"/etc/collectd/auth_file\"\n)\n\n// Config represents a configuration for the collectd service.\ntype Config struct {\n\tEnabled         bool          `toml:\"enabled\"`\n\tBindAddress     string        `toml:\"bind-address\"`\n\tDatabase        string        `toml:\"database\"`\n\tRetentionPolicy string        `toml:\"retention-policy\"`\n\tBatchSize       int           `toml:\"batch-size\"`\n\tBatchPending    int           `toml:\"batch-pending\"`\n\tBatchDuration   toml.Duration `toml:\"batch-timeout\"`\n\tReadBuffer      int           `toml:\"read-buffer\"`\n\tTypesDB         string        `toml:\"typesdb\"`\n\tSecurityLevel   string        `toml:\"security-level\"`\n\tAuthFile        string        `toml:\"auth-file\"`\n}\n\n// NewConfig returns a new instance of Config with defaults.\nfunc NewConfig() Config {\n\treturn Config{\n\t\tBindAddress:     DefaultBindAddress,\n\t\tDatabase:        DefaultDatabase,\n\t\tRetentionPolicy: DefaultRetentionPolicy,\n\t\tReadBuffer:      DefaultReadBuffer,\n\t\tBatchSize:       DefaultBatchSize,\n\t\tBatchPending:    DefaultBatchPending,\n\t\tBatchDuration:   DefaultBatchDuration,\n\t\tTypesDB:         DefaultTypesDB,\n\t\tSecurityLevel:   DefaultSecurityLevel,\n\t\tAuthFile:        DefaultAuthFile,\n\t}\n}\n\n// WithDefaults takes the given config and returns a new config with any required\n// default values set.\nfunc (c *Config) WithDefaults() *Config {\n\td := *c\n\tif d.BindAddress == \"\" {\n\t\td.BindAddress = DefaultBindAddress\n\t}\n\tif d.Database == \"\" {\n\t\td.Database = DefaultDatabase\n\t}\n\tif d.RetentionPolicy == \"\" {\n\t\td.RetentionPolicy = DefaultRetentionPolicy\n\t}\n\tif d.BatchSize == 0 {\n\t\td.BatchSize = DefaultBatchSize\n\t}\n\tif d.BatchPending == 0 {\n\t\td.BatchPending = DefaultBatchPending\n\t}\n\tif d.BatchDuration == 0 {\n\t\td.BatchDuration = DefaultBatchDuration\n\t}\n\tif d.ReadBuffer == 0 {\n\t\td.ReadBuffer = DefaultReadBuffer\n\t}\n\tif d.TypesDB == \"\" {\n\t\td.TypesDB = DefaultTypesDB\n\t}\n\tif d.SecurityLevel == \"\" {\n\t\td.SecurityLevel = DefaultSecurityLevel\n\t}\n\tif d.AuthFile == \"\" {\n\t\td.AuthFile = DefaultAuthFile\n\t}\n\n\treturn &d\n}\n\n// Validate returns an error if the Config is invalid.\nfunc (c *Config) Validate() error {\n\tswitch c.SecurityLevel {\n\tcase \"none\", \"sign\", \"encrypt\":\n\tdefault:\n\t\treturn errors.New(\"Invalid security level\")\n\t}\n\n\treturn nil\n}\n\n// Configs wraps a slice of Config to aggregate diagnostics.\ntype Configs []Config\n\n// Diagnostics returns one set of diagnostics for all of the Configs.\nfunc (c Configs) Diagnostics() (*diagnostics.Diagnostics, error) {\n\td := &diagnostics.Diagnostics{\n\t\tColumns: []string{\"enabled\", \"bind-address\", \"database\", \"retention-policy\", \"batch-size\", \"batch-pending\", \"batch-timeout\"},\n\t}\n\n\tfor _, cc := range c {\n\t\tif !cc.Enabled {\n\t\t\td.AddRow([]interface{}{false})\n\t\t\tcontinue\n\t\t}\n\n\t\tr := []interface{}{true, cc.BindAddress, cc.Database, cc.RetentionPolicy, cc.BatchSize, cc.BatchPending, cc.BatchDuration}\n\t\td.AddRow(r)\n\t}\n\n\treturn d, nil\n}\n\n// Enabled returns true if any underlying Config is Enabled.\nfunc (c Configs) Enabled() bool {\n\tfor _, cc := range c {\n\t\tif cc.Enabled {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/collectd/config_test.go",
    "content": "package collectd_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/influxdata/influxdb/services/collectd\"\n)\n\nfunc TestConfig_Parse(t *testing.T) {\n\t// Parse configuration.\n\tvar c collectd.Config\n\tif _, err := toml.Decode(`\nenabled = true\nbind-address = \":9000\"\ndatabase = \"xxx\"\ntypesdb = \"yyy\"\n`, &c); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Validate configuration.\n\tif c.Enabled != true {\n\t\tt.Fatalf(\"unexpected enabled: %v\", c.Enabled)\n\t} else if c.BindAddress != \":9000\" {\n\t\tt.Fatalf(\"unexpected bind address: %s\", c.BindAddress)\n\t} else if c.Database != \"xxx\" {\n\t\tt.Fatalf(\"unexpected database: %s\", c.Database)\n\t} else if c.TypesDB != \"yyy\" {\n\t\tt.Fatalf(\"unexpected types db: %s\", c.TypesDB)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/collectd/service.go",
    "content": "// Package collectd provides a service for InfluxDB to ingest data via the collectd protocol.\npackage collectd // import \"github.com/influxdata/influxdb/services/collectd\"\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"collectd.org/api\"\n\t\"collectd.org/network\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n\t\"github.com/uber-go/zap\"\n)\n\n// statistics gathered by the collectd service.\nconst (\n\tstatPointsReceived       = \"pointsRx\"\n\tstatBytesReceived        = \"bytesRx\"\n\tstatPointsParseFail      = \"pointsParseFail\"\n\tstatReadFail             = \"readFail\"\n\tstatBatchesTransmitted   = \"batchesTx\"\n\tstatPointsTransmitted    = \"pointsTx\"\n\tstatBatchesTransmitFail  = \"batchesTxFail\"\n\tstatDroppedPointsInvalid = \"droppedPointsInvalid\"\n)\n\n// pointsWriter is an internal interface to make testing easier.\ntype pointsWriter interface {\n\tWritePointsPrivileged(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error\n}\n\n// metaClient is an internal interface to make testing easier.\ntype metaClient interface {\n\tCreateDatabase(name string) (*meta.DatabaseInfo, error)\n}\n\n// TypesDBFile reads a collectd types db from a file.\nfunc TypesDBFile(path string) (typesdb *api.TypesDB, err error) {\n\tvar reader *os.File\n\treader, err = os.Open(path)\n\tif err == nil {\n\t\ttypesdb, err = api.NewTypesDB(reader)\n\t}\n\treturn\n}\n\n// Service represents a UDP server which receives metrics in collectd's binary\n// protocol and stores them in InfluxDB.\ntype Service struct {\n\tConfig       *Config\n\tMetaClient   metaClient\n\tPointsWriter pointsWriter\n\tLogger       zap.Logger\n\n\twg      sync.WaitGroup\n\tconn    *net.UDPConn\n\tbatcher *tsdb.PointBatcher\n\tpopts   network.ParseOpts\n\taddr    net.Addr\n\n\tmu    sync.RWMutex\n\tready bool          // Has the required database been created?\n\tdone  chan struct{} // Is the service closing or closed?\n\n\t// expvar-based stats.\n\tstats       *Statistics\n\tdefaultTags models.StatisticTags\n}\n\n// NewService returns a new instance of the collectd service.\nfunc NewService(c Config) *Service {\n\ts := Service{\n\t\t// Use defaults where necessary.\n\t\tConfig: c.WithDefaults(),\n\n\t\tLogger:      zap.New(zap.NullEncoder()),\n\t\tstats:       &Statistics{},\n\t\tdefaultTags: models.StatisticTags{\"bind\": c.BindAddress},\n\t}\n\n\treturn &s\n}\n\n// Open starts the service.\nfunc (s *Service) Open() error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif !s.closed() {\n\t\treturn nil // Already open.\n\t}\n\ts.done = make(chan struct{})\n\n\ts.Logger.Info(\"Starting collectd service\")\n\n\tif s.Config.BindAddress == \"\" {\n\t\treturn fmt.Errorf(\"bind address is blank\")\n\t} else if s.Config.Database == \"\" {\n\t\treturn fmt.Errorf(\"database name is blank\")\n\t} else if s.PointsWriter == nil {\n\t\treturn fmt.Errorf(\"PointsWriter is nil\")\n\t}\n\n\tif s.popts.TypesDB == nil {\n\t\t// Open collectd types.\n\t\tif stat, err := os.Stat(s.Config.TypesDB); err != nil {\n\t\t\treturn fmt.Errorf(\"Stat(): %s\", err)\n\t\t} else if stat.IsDir() {\n\t\t\talltypesdb, err := api.NewTypesDB(&bytes.Buffer{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tvar readdir func(path string)\n\t\t\treaddir = func(path string) {\n\t\t\t\tfiles, err := ioutil.ReadDir(path)\n\t\t\t\tif err != nil {\n\t\t\t\t\ts.Logger.Info(fmt.Sprintf(\"Unable to read directory %s: %s\\n\", path, err))\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tfor _, f := range files {\n\t\t\t\t\tfullpath := filepath.Join(path, f.Name())\n\t\t\t\t\tif f.IsDir() {\n\t\t\t\t\t\treaddir(fullpath)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\ts.Logger.Info(fmt.Sprintf(\"Loading %s\\n\", fullpath))\n\t\t\t\t\ttypes, err := TypesDBFile(fullpath)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ts.Logger.Info(fmt.Sprintf(\"Unable to parse collectd types file: %s\\n\", f.Name()))\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\talltypesdb.Merge(types)\n\t\t\t\t}\n\t\t\t}\n\t\t\treaddir(s.Config.TypesDB)\n\t\t\ts.popts.TypesDB = alltypesdb\n\t\t} else {\n\t\t\ts.Logger.Info(fmt.Sprintf(\"Loading %s\\n\", s.Config.TypesDB))\n\t\t\ttypes, err := TypesDBFile(s.Config.TypesDB)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Open(): %s\", err)\n\t\t\t}\n\t\t\ts.popts.TypesDB = types\n\t\t}\n\t}\n\n\t// Sets the security level according to the config.\n\t// Default not necessary because we validate the config.\n\tswitch s.Config.SecurityLevel {\n\tcase \"none\":\n\t\ts.popts.SecurityLevel = network.None\n\tcase \"sign\":\n\t\ts.popts.SecurityLevel = network.Sign\n\tcase \"encrypt\":\n\t\ts.popts.SecurityLevel = network.Encrypt\n\t}\n\n\t// Sets the auth file according to the config.\n\tif s.popts.PasswordLookup == nil {\n\t\ts.popts.PasswordLookup = network.NewAuthFile(s.Config.AuthFile)\n\t}\n\n\t// Resolve our address.\n\taddr, err := net.ResolveUDPAddr(\"udp\", s.Config.BindAddress)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to resolve UDP address: %s\", err)\n\t}\n\ts.addr = addr\n\n\t// Start listening\n\tconn, err := net.ListenUDP(\"udp\", addr)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to listen on UDP: %s\", err)\n\t}\n\n\tif s.Config.ReadBuffer != 0 {\n\t\terr = conn.SetReadBuffer(s.Config.ReadBuffer)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to set UDP read buffer to %d: %s\",\n\t\t\t\ts.Config.ReadBuffer, err)\n\t\t}\n\t}\n\ts.conn = conn\n\n\ts.Logger.Info(fmt.Sprint(\"Listening on UDP: \", conn.LocalAddr().String()))\n\n\t// Start the points batcher.\n\ts.batcher = tsdb.NewPointBatcher(s.Config.BatchSize, s.Config.BatchPending, time.Duration(s.Config.BatchDuration))\n\ts.batcher.Start()\n\n\t// Create waitgroup for signalling goroutines to stop and start goroutines\n\t// that process collectd packets.\n\ts.wg.Add(2)\n\tgo func() { defer s.wg.Done(); s.serve() }()\n\tgo func() { defer s.wg.Done(); s.writePoints() }()\n\n\treturn nil\n}\n\n// Close stops the service.\nfunc (s *Service) Close() error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif s.closed() {\n\t\treturn nil // Already closed.\n\t}\n\tclose(s.done)\n\n\t// Close the connection, and wait for the goroutine to exit.\n\tif s.conn != nil {\n\t\ts.conn.Close()\n\t}\n\tif s.batcher != nil {\n\t\ts.batcher.Stop()\n\t}\n\ts.wg.Wait()\n\n\t// Release all remaining resources.\n\ts.conn = nil\n\ts.batcher = nil\n\ts.Logger.Info(\"collectd UDP closed\")\n\ts.done = nil\n\treturn nil\n}\n\nfunc (s *Service) closed() bool {\n\tselect {\n\tcase <-s.done:\n\t\t// Service is closing.\n\t\treturn true\n\tdefault:\n\t}\n\treturn s.done == nil\n}\n\n// createInternalStorage ensures that the required database has been created.\nfunc (s *Service) createInternalStorage() error {\n\ts.mu.RLock()\n\tready := s.ready\n\ts.mu.RUnlock()\n\tif ready {\n\t\treturn nil\n\t}\n\n\tif _, err := s.MetaClient.CreateDatabase(s.Config.Database); err != nil {\n\t\treturn err\n\t}\n\n\t// The service is now ready.\n\ts.mu.Lock()\n\ts.ready = true\n\ts.mu.Unlock()\n\treturn nil\n}\n\n// WithLogger sets the service's logger.\nfunc (s *Service) WithLogger(log zap.Logger) {\n\ts.Logger = log.With(zap.String(\"service\", \"collectd\"))\n}\n\n// Statistics maintains statistics for the collectd service.\ntype Statistics struct {\n\tPointsReceived       int64\n\tBytesReceived        int64\n\tPointsParseFail      int64\n\tReadFail             int64\n\tBatchesTransmitted   int64\n\tPointsTransmitted    int64\n\tBatchesTransmitFail  int64\n\tInvalidDroppedPoints int64\n}\n\n// Statistics returns statistics for periodic monitoring.\nfunc (s *Service) Statistics(tags map[string]string) []models.Statistic {\n\treturn []models.Statistic{{\n\t\tName: \"collectd\",\n\t\tTags: s.defaultTags.Merge(tags),\n\t\tValues: map[string]interface{}{\n\t\t\tstatPointsReceived:       atomic.LoadInt64(&s.stats.PointsReceived),\n\t\t\tstatBytesReceived:        atomic.LoadInt64(&s.stats.BytesReceived),\n\t\t\tstatPointsParseFail:      atomic.LoadInt64(&s.stats.PointsParseFail),\n\t\t\tstatReadFail:             atomic.LoadInt64(&s.stats.ReadFail),\n\t\t\tstatBatchesTransmitted:   atomic.LoadInt64(&s.stats.BatchesTransmitted),\n\t\t\tstatPointsTransmitted:    atomic.LoadInt64(&s.stats.PointsTransmitted),\n\t\t\tstatBatchesTransmitFail:  atomic.LoadInt64(&s.stats.BatchesTransmitFail),\n\t\t\tstatDroppedPointsInvalid: atomic.LoadInt64(&s.stats.InvalidDroppedPoints),\n\t\t},\n\t}}\n}\n\n// SetTypes sets collectd types db.\nfunc (s *Service) SetTypes(types string) (err error) {\n\treader := strings.NewReader(types)\n\ts.popts.TypesDB, err = api.NewTypesDB(reader)\n\treturn\n}\n\n// Addr returns the listener's address. It returns nil if listener is closed.\nfunc (s *Service) Addr() net.Addr {\n\treturn s.conn.LocalAddr()\n}\n\nfunc (s *Service) serve() {\n\t// From https://collectd.org/wiki/index.php/Binary_protocol\n\t//   1024 bytes (payload only, not including UDP / IP headers)\n\t//   In versions 4.0 through 4.7, the receive buffer has a fixed size\n\t//   of 1024 bytes. When longer packets are received, the trailing data\n\t//   is simply ignored. Since version 4.8, the buffer size can be\n\t//   configured. Version 5.0 will increase the default buffer size to\n\t//   1452 bytes (the maximum payload size when using UDP/IPv6 over\n\t//   Ethernet).\n\tbuffer := make([]byte, 1452)\n\n\tfor {\n\t\tselect {\n\t\tcase <-s.done:\n\t\t\t// We closed the connection, time to go.\n\t\t\treturn\n\t\tdefault:\n\t\t\t// Keep processing.\n\t\t}\n\n\t\tn, _, err := s.conn.ReadFromUDP(buffer)\n\t\tif err != nil {\n\t\t\tatomic.AddInt64(&s.stats.ReadFail, 1)\n\t\t\ts.Logger.Info(fmt.Sprintf(\"collectd ReadFromUDP error: %s\", err))\n\t\t\tcontinue\n\t\t}\n\t\tif n > 0 {\n\t\t\tatomic.AddInt64(&s.stats.BytesReceived, int64(n))\n\t\t\ts.handleMessage(buffer[:n])\n\t\t}\n\t}\n}\n\nfunc (s *Service) handleMessage(buffer []byte) {\n\tvalueLists, err := network.Parse(buffer, s.popts)\n\tif err != nil {\n\t\tatomic.AddInt64(&s.stats.PointsParseFail, 1)\n\t\ts.Logger.Info(fmt.Sprintf(\"Collectd parse error: %s\", err))\n\t\treturn\n\t}\n\tfor _, valueList := range valueLists {\n\t\tpoints := s.UnmarshalValueList(valueList)\n\t\tfor _, p := range points {\n\t\t\ts.batcher.In() <- p\n\t\t}\n\t\tatomic.AddInt64(&s.stats.PointsReceived, int64(len(points)))\n\t}\n}\n\nfunc (s *Service) writePoints() {\n\tfor {\n\t\tselect {\n\t\tcase <-s.done:\n\t\t\treturn\n\t\tcase batch := <-s.batcher.Out():\n\t\t\t// Will attempt to create database if not yet created.\n\t\t\tif err := s.createInternalStorage(); err != nil {\n\t\t\t\ts.Logger.Info(fmt.Sprintf(\"Required database %s not yet created: %s\", s.Config.Database, err.Error()))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err := s.PointsWriter.WritePointsPrivileged(s.Config.Database, s.Config.RetentionPolicy, models.ConsistencyLevelAny, batch); err == nil {\n\t\t\t\tatomic.AddInt64(&s.stats.BatchesTransmitted, 1)\n\t\t\t\tatomic.AddInt64(&s.stats.PointsTransmitted, int64(len(batch)))\n\t\t\t} else {\n\t\t\t\ts.Logger.Info(fmt.Sprintf(\"failed to write point batch to database %q: %s\", s.Config.Database, err))\n\t\t\t\tatomic.AddInt64(&s.stats.BatchesTransmitFail, 1)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// UnmarshalValueList translates a ValueList into InfluxDB data points.\nfunc (s *Service) UnmarshalValueList(vl *api.ValueList) []models.Point {\n\ttimestamp := vl.Time.UTC()\n\n\tvar points []models.Point\n\tfor i := range vl.Values {\n\t\tvar name string\n\t\tname = fmt.Sprintf(\"%s_%s\", vl.Identifier.Plugin, vl.DSName(i))\n\t\ttags := make(map[string]string)\n\t\tfields := make(map[string]interface{})\n\n\t\t// Convert interface back to actual type, then to float64\n\t\tswitch value := vl.Values[i].(type) {\n\t\tcase api.Gauge:\n\t\t\tfields[\"value\"] = float64(value)\n\t\tcase api.Derive:\n\t\t\tfields[\"value\"] = float64(value)\n\t\tcase api.Counter:\n\t\t\tfields[\"value\"] = float64(value)\n\t\t}\n\n\t\tif vl.Identifier.Host != \"\" {\n\t\t\ttags[\"host\"] = vl.Identifier.Host\n\t\t}\n\t\tif vl.Identifier.PluginInstance != \"\" {\n\t\t\ttags[\"instance\"] = vl.Identifier.PluginInstance\n\t\t}\n\t\tif vl.Identifier.Type != \"\" {\n\t\t\ttags[\"type\"] = vl.Identifier.Type\n\t\t}\n\t\tif vl.Identifier.TypeInstance != \"\" {\n\t\t\ttags[\"type_instance\"] = vl.Identifier.TypeInstance\n\t\t}\n\n\t\t// Drop invalid points\n\t\tp, err := models.NewPoint(name, models.NewTags(tags), fields, timestamp)\n\t\tif err != nil {\n\t\t\ts.Logger.Info(fmt.Sprintf(\"Dropping point %v: %v\", name, err))\n\t\t\tatomic.AddInt64(&s.stats.InvalidDroppedPoints, 1)\n\t\t\tcontinue\n\t\t}\n\n\t\tpoints = append(points, p)\n\t}\n\treturn points\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/collectd/service_test.go",
    "content": "package collectd\n\nimport (\n\t\"encoding/hex\"\n\t\"errors\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/internal\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/toml\"\n\t\"github.com/uber-go/zap\"\n)\n\nfunc TestService_OpenClose(t *testing.T) {\n\tservice := NewTestService(1, time.Second)\n\n\t// Closing a closed service is fine.\n\tif err := service.Service.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Closing a closed service again is fine.\n\tif err := service.Service.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := service.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Opening an already open service is fine.\n\tif err := service.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Reopening a previously opened service is fine.\n\tif err := service.Service.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := service.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Tidy up.\n\tif err := service.Service.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\n// Test that the service can read types DB files from a directory.\nfunc TestService_Open_TypesDBDir(t *testing.T) {\n\tt.Parallel()\n\n\t// Make a temp dir to write types.db into.\n\ttmpDir, err := ioutil.TempDir(os.TempDir(), \"\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(tmpDir)\n\n\t// Write types.db.\n\tif err := ioutil.WriteFile(path.Join(tmpDir, \"types.db\"), []byte(typesDBText), 0777); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Setup config to read all files in the temp dir.\n\tc := Config{\n\t\tBindAddress:   \"127.0.0.1:0\",\n\t\tDatabase:      \"collectd_test\",\n\t\tBatchSize:     1000,\n\t\tBatchDuration: toml.Duration(time.Second),\n\t\tTypesDB:       tmpDir,\n\t}\n\n\ts := &TestService{\n\t\tConfig:     c,\n\t\tService:    NewService(c),\n\t\tMetaClient: &internal.MetaClientMock{},\n\t}\n\n\tif testing.Verbose() {\n\t\ts.Service.WithLogger(zap.New(\n\t\t\tzap.NewTextEncoder(),\n\t\t\tzap.Output(os.Stderr),\n\t\t))\n\t}\n\n\ts.MetaClient.CreateDatabaseFn = func(name string) (*meta.DatabaseInfo, error) {\n\t\treturn nil, nil\n\t}\n\n\ts.Service.PointsWriter = s\n\ts.Service.MetaClient = s.MetaClient\n\n\tif err := s.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := s.Service.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\n// Test that the service checks / creates the target database every time we\n// try to write points.\nfunc TestService_CreatesDatabase(t *testing.T) {\n\tt.Parallel()\n\n\ts := NewTestService(1, time.Second)\n\n\ts.WritePointsFn = func(string, string, models.ConsistencyLevel, []models.Point) error {\n\t\treturn nil\n\t}\n\n\tcalled := make(chan struct{})\n\ts.MetaClient.CreateDatabaseFn = func(name string) (*meta.DatabaseInfo, error) {\n\t\tif name != s.Config.Database {\n\t\t\tt.Errorf(\"\\n\\texp = %s\\n\\tgot = %s\\n\", s.Config.Database, name)\n\t\t}\n\t\t// Allow some time for the caller to return and the ready status to\n\t\t// be set.\n\t\ttime.AfterFunc(10*time.Millisecond, func() { called <- struct{}{} })\n\t\treturn nil, errors.New(\"an error\")\n\t}\n\n\tif err := s.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpoints, err := models.ParsePointsString(`cpu value=1`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ts.Service.batcher.In() <- points[0] // Send a point.\n\ts.Service.batcher.Flush()\n\tselect {\n\tcase <-called:\n\t\t// OK\n\tcase <-time.NewTimer(5 * time.Second).C:\n\t\tt.Fatal(\"Service should have attempted to create database\")\n\t}\n\n\t// ready status should not have been switched due to meta client error.\n\ts.Service.mu.RLock()\n\tready := s.Service.ready\n\ts.Service.mu.RUnlock()\n\n\tif got, exp := ready, false; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\t// This time MC won't cause an error.\n\ts.MetaClient.CreateDatabaseFn = func(name string) (*meta.DatabaseInfo, error) {\n\t\t// Allow some time for the caller to return and the ready status to\n\t\t// be set.\n\t\ttime.AfterFunc(10*time.Millisecond, func() { called <- struct{}{} })\n\t\treturn nil, nil\n\t}\n\n\ts.Service.batcher.In() <- points[0] // Send a point.\n\ts.Service.batcher.Flush()\n\tselect {\n\tcase <-called:\n\t\t// OK\n\tcase <-time.NewTimer(5 * time.Second).C:\n\t\tt.Fatal(\"Service should have attempted to create database\")\n\t}\n\n\t// ready status should not have been switched due to meta client error.\n\ts.Service.mu.RLock()\n\tready = s.Service.ready\n\ts.Service.mu.RUnlock()\n\n\tif got, exp := ready, true; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\ts.Service.Close()\n}\n\n// Test that the collectd service correctly batches points by BatchSize.\nfunc TestService_BatchSize(t *testing.T) {\n\tt.Parallel()\n\n\ttotalPoints := len(expPoints)\n\n\t// Batch sizes that totalTestPoints divide evenly by.\n\tbatchSizes := []int{1, 2, 13}\n\n\tfor _, batchSize := range batchSizes {\n\t\tfunc() {\n\t\t\ts := NewTestService(batchSize, time.Second)\n\n\t\t\tpointCh := make(chan models.Point)\n\t\t\ts.WritePointsFn = func(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error {\n\t\t\t\tif len(points) != batchSize {\n\t\t\t\t\tt.Errorf(\"\\n\\texp = %d\\n\\tgot = %d\\n\", batchSize, len(points))\n\t\t\t\t}\n\n\t\t\t\tfor _, p := range points {\n\t\t\t\t\tpointCh <- p\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tif err := s.Service.Open(); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tdefer func() { t.Log(\"closing service\"); s.Service.Close() }()\n\n\t\t\t// Get the address & port the service is listening on for collectd data.\n\t\t\taddr := s.Service.Addr()\n\t\t\tconn, err := net.Dial(\"udp\", addr.String())\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\t// Send the test data to the service.\n\t\t\tif n, err := conn.Write(testData); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t} else if n != len(testData) {\n\t\t\t\tt.Fatalf(\"only sent %d of %d bytes\", n, len(testData))\n\t\t\t}\n\n\t\t\tpoints := []models.Point{}\n\t\tLoop:\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase p := <-pointCh:\n\t\t\t\t\tpoints = append(points, p)\n\t\t\t\t\tif len(points) == totalPoints {\n\t\t\t\t\t\tbreak Loop\n\t\t\t\t\t}\n\t\t\t\tcase <-time.After(time.Second):\n\t\t\t\t\tt.Logf(\"exp %d points, got %d\", totalPoints, len(points))\n\t\t\t\t\tt.Fatal(\"timed out waiting for points from collectd service\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(points) != totalPoints {\n\t\t\t\tt.Fatalf(\"exp %d points, got %d\", totalPoints, len(points))\n\t\t\t}\n\n\t\t\tfor i, exp := range expPoints {\n\t\t\t\tgot := points[i].String()\n\t\t\t\tif got != exp {\n\t\t\t\t\tt.Fatalf(\"\\n\\texp = %s\\n\\tgot = %s\\n\", exp, got)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}\n\n// Test that the collectd service correctly batches points using BatchDuration.\nfunc TestService_BatchDuration(t *testing.T) {\n\tt.Parallel()\n\n\ttotalPoints := len(expPoints)\n\n\ts := NewTestService(5000, 250*time.Millisecond)\n\n\tpointCh := make(chan models.Point, 1000)\n\ts.WritePointsFn = func(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error {\n\t\tfor _, p := range points {\n\t\t\tpointCh <- p\n\t\t}\n\t\treturn nil\n\t}\n\n\tif err := s.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer func() { t.Log(\"closing service\"); s.Service.Close() }()\n\n\t// Get the address & port the service is listening on for collectd data.\n\taddr := s.Service.Addr()\n\tconn, err := net.Dial(\"udp\", addr.String())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Send the test data to the service.\n\tif n, err := conn.Write(testData); err != nil {\n\t\tt.Fatal(err)\n\t} else if n != len(testData) {\n\t\tt.Fatalf(\"only sent %d of %d bytes\", n, len(testData))\n\t}\n\n\tpoints := []models.Point{}\nLoop:\n\tfor {\n\t\tselect {\n\t\tcase p := <-pointCh:\n\t\t\tpoints = append(points, p)\n\t\t\tif len(points) == totalPoints {\n\t\t\t\tbreak Loop\n\t\t\t}\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Logf(\"exp %d points, got %d\", totalPoints, len(points))\n\t\t\tt.Fatal(\"timed out waiting for points from collectd service\")\n\t\t}\n\t}\n\n\tif len(points) != totalPoints {\n\t\tt.Fatalf(\"exp %d points, got %d\", totalPoints, len(points))\n\t}\n\n\tfor i, exp := range expPoints {\n\t\tgot := points[i].String()\n\t\tif got != exp {\n\t\t\tt.Fatalf(\"\\n\\texp = %s\\n\\tgot = %s\\n\", exp, got)\n\t\t}\n\t}\n}\n\ntype TestService struct {\n\tService       *Service\n\tConfig        Config\n\tMetaClient    *internal.MetaClientMock\n\tWritePointsFn func(string, string, models.ConsistencyLevel, []models.Point) error\n}\n\nfunc NewTestService(batchSize int, batchDuration time.Duration) *TestService {\n\tc := Config{\n\t\tBindAddress:   \"127.0.0.1:0\",\n\t\tDatabase:      \"collectd_test\",\n\t\tBatchSize:     batchSize,\n\t\tBatchDuration: toml.Duration(batchDuration),\n\t}\n\n\ts := &TestService{\n\t\tConfig:     c,\n\t\tService:    NewService(c),\n\t\tMetaClient: &internal.MetaClientMock{},\n\t}\n\n\ts.MetaClient.CreateDatabaseFn = func(name string) (*meta.DatabaseInfo, error) {\n\t\treturn nil, nil\n\t}\n\n\ts.Service.PointsWriter = s\n\ts.Service.MetaClient = s.MetaClient\n\n\t// Set the collectd types using test string.\n\tif err := s.Service.SetTypes(typesDBText); err != nil {\n\t\tpanic(err)\n\t}\n\n\tif testing.Verbose() {\n\t\ts.Service.WithLogger(zap.New(\n\t\t\tzap.NewTextEncoder(),\n\t\t\tzap.Output(os.Stderr),\n\t\t))\n\t}\n\n\treturn s\n}\n\nfunc (w *TestService) WritePointsPrivileged(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error {\n\treturn w.WritePointsFn(database, retentionPolicy, consistencyLevel, points)\n}\n\nfunc check(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n// Raw data sent by collectd, captured using Wireshark.\nvar testData = func() []byte {\n\tdata := []string{\n\t\t\"000000167066312d36322d3231302d39342d313733000001000c00000000544928ff0007000c0000000\",\n\t\t\"0000000050002000c656e74726f7079000004000c656e74726f7079000006000f000101000000000000\",\n\t\t\"7240000200086370750000030006310000040008637075000005000969646c65000006000f000100000\",\n\t\t\"0000000a674620005000977616974000006000f00010000000000000000000002000764660000030005\",\n\t\t\"00000400076466000005000d6c6976652d636f7700000600180002010100000000a090b641000000a0c\",\n\t\t\"b6a2742000200086370750000030006310000040008637075000005000e696e74657272757074000006\",\n\t\t\"000f00010000000000000000fe0005000c736f6674697271000006000f0001000000000000000000000\",\n\t\t\"20007646600000300050000040007646600000500096c69766500000600180002010100000000000000\",\n\t\t\"00000000e0ec972742000200086370750000030006310000040008637075000005000a737465616c000\",\n\t\t\"006000f00010000000000000000000003000632000005000975736572000006000f0001000000000000\",\n\t\t\"005f36000500096e696365000006000f0001000000000000000ad80002000e696e74657266616365000\",\n\t\t\"0030005000004000e69665f6f6374657473000005000b64756d6d793000000600180002000000000000\",\n\t\t\"00000000000000000000041a000200076466000004000764660000050008746d7000000600180002010\",\n\t\t\"1000000000000f240000000a0ea97274200020008637075000003000632000004000863707500000500\",\n\t\t\"0b73797374656d000006000f00010000000000000045d30002000e696e7465726661636500000300050\",\n\t\t\"00004000f69665f7061636b657473000005000b64756d6d793000000600180002000000000000000000\",\n\t\t\"00000000000000000f000200086370750000030006320000040008637075000005000969646c6500000\",\n\t\t\"6000f0001000000000000a66480000200076466000003000500000400076466000005000d72756e2d6c\",\n\t\t\"6f636b000006001800020101000000000000000000000000000054410002000e696e746572666163650\",\n\t\t\"00004000e69665f6572726f7273000005000b64756d6d79300000060018000200000000000000000000\",\n\t\t\"00000000000000000002000863707500000300063200000400086370750000050009776169740000060\",\n\t\t\"00f00010000000000000000000005000e696e74657272757074000006000f0001000000000000000132\",\n\t}\n\tb, err := hex.DecodeString(strings.Join(data, \"\"))\n\tcheck(err)\n\treturn b\n}()\n\nvar expPoints = []string{\n\t\"entropy_value,host=pf1-62-210-94-173,type=entropy value=288 1414080767000000000\",\n\t\"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=idle value=10908770 1414080767000000000\",\n\t\"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=wait value=0 1414080767000000000\",\n\t\"df_used,host=pf1-62-210-94-173,type=df,type_instance=live-cow value=378576896 1414080767000000000\",\n\t\"df_free,host=pf1-62-210-94-173,type=df,type_instance=live-cow value=50287988736 1414080767000000000\",\n\t\"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=interrupt value=254 1414080767000000000\",\n\t\"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=softirq value=0 1414080767000000000\",\n\t\"df_used,host=pf1-62-210-94-173,type=df,type_instance=live value=0 1414080767000000000\",\n\t\"df_free,host=pf1-62-210-94-173,type=df,type_instance=live value=50666565632 1414080767000000000\",\n\t\"cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=steal value=0 1414080767000000000\",\n\t\"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=user value=24374 1414080767000000000\",\n\t\"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=nice value=2776 1414080767000000000\",\n\t\"interface_rx,host=pf1-62-210-94-173,type=if_octets,type_instance=dummy0 value=0 1414080767000000000\",\n\t\"interface_tx,host=pf1-62-210-94-173,type=if_octets,type_instance=dummy0 value=1050 1414080767000000000\",\n\t\"df_used,host=pf1-62-210-94-173,type=df,type_instance=tmp value=73728 1414080767000000000\",\n\t\"df_free,host=pf1-62-210-94-173,type=df,type_instance=tmp value=50666491904 1414080767000000000\",\n\t\"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=system value=17875 1414080767000000000\",\n\t\"interface_rx,host=pf1-62-210-94-173,type=if_packets,type_instance=dummy0 value=0 1414080767000000000\",\n\t\"interface_tx,host=pf1-62-210-94-173,type=if_packets,type_instance=dummy0 value=15 1414080767000000000\",\n\t\"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=idle value=10904704 1414080767000000000\",\n\t\"df_used,host=pf1-62-210-94-173,type=df,type_instance=run-lock value=0 1414080767000000000\",\n\t\"df_free,host=pf1-62-210-94-173,type=df,type_instance=run-lock value=5242880 1414080767000000000\",\n\t\"interface_rx,host=pf1-62-210-94-173,type=if_errors,type_instance=dummy0 value=0 1414080767000000000\",\n\t\"interface_tx,host=pf1-62-210-94-173,type=if_errors,type_instance=dummy0 value=0 1414080767000000000\",\n\t\"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=wait value=0 1414080767000000000\",\n\t\"cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=interrupt value=306 1414080767000000000\",\n}\n\n// Taken from /usr/share/collectd/types.db on a Ubuntu system\nvar typesDBText = `\nabsolute\t\tvalue:ABSOLUTE:0:U\napache_bytes\t\tvalue:DERIVE:0:U\napache_connections\tvalue:GAUGE:0:65535\napache_idle_workers\tvalue:GAUGE:0:65535\napache_requests\t\tvalue:DERIVE:0:U\napache_scoreboard\tvalue:GAUGE:0:65535\nath_nodes\t\tvalue:GAUGE:0:65535\nath_stat\t\tvalue:DERIVE:0:U\nbackends\t\tvalue:GAUGE:0:65535\nbitrate\t\t\tvalue:GAUGE:0:4294967295\nbytes\t\t\tvalue:GAUGE:0:U\ncache_eviction\t\tvalue:DERIVE:0:U\ncache_operation\t\tvalue:DERIVE:0:U\ncache_ratio\t\tvalue:GAUGE:0:100\ncache_result\t\tvalue:DERIVE:0:U\ncache_size\t\tvalue:GAUGE:0:4294967295\ncharge\t\t\tvalue:GAUGE:0:U\ncompression_ratio\tvalue:GAUGE:0:2\ncompression\t\tuncompressed:DERIVE:0:U, compressed:DERIVE:0:U\nconnections\t\tvalue:DERIVE:0:U\nconntrack\t\tvalue:GAUGE:0:4294967295\ncontextswitch\t\tvalue:DERIVE:0:U\ncounter\t\t\tvalue:COUNTER:U:U\ncpufreq\t\t\tvalue:GAUGE:0:U\ncpu\t\t\tvalue:DERIVE:0:U\ncurrent_connections\tvalue:GAUGE:0:U\ncurrent_sessions\tvalue:GAUGE:0:U\ncurrent\t\t\tvalue:GAUGE:U:U\ndelay\t\t\tvalue:GAUGE:-1000000:1000000\nderive\t\t\tvalue:DERIVE:0:U\ndf_complex\t\tvalue:GAUGE:0:U\ndf_inodes\t\tvalue:GAUGE:0:U\ndf\t\t\tused:GAUGE:0:1125899906842623, free:GAUGE:0:1125899906842623\ndisk_latency\t\tread:GAUGE:0:U, write:GAUGE:0:U\ndisk_merged\t\tread:DERIVE:0:U, write:DERIVE:0:U\ndisk_octets\t\tread:DERIVE:0:U, write:DERIVE:0:U\ndisk_ops_complex\tvalue:DERIVE:0:U\ndisk_ops\t\tread:DERIVE:0:U, write:DERIVE:0:U\ndisk_time\t\tread:DERIVE:0:U, write:DERIVE:0:U\ndns_answer\t\tvalue:DERIVE:0:U\ndns_notify\t\tvalue:DERIVE:0:U\ndns_octets\t\tqueries:DERIVE:0:U, responses:DERIVE:0:U\ndns_opcode\t\tvalue:DERIVE:0:U\ndns_qtype_cached\tvalue:GAUGE:0:4294967295\ndns_qtype\t\tvalue:DERIVE:0:U\ndns_query\t\tvalue:DERIVE:0:U\ndns_question\t\tvalue:DERIVE:0:U\ndns_rcode\t\tvalue:DERIVE:0:U\ndns_reject\t\tvalue:DERIVE:0:U\ndns_request\t\tvalue:DERIVE:0:U\ndns_resolver\t\tvalue:DERIVE:0:U\ndns_response\t\tvalue:DERIVE:0:U\ndns_transfer\t\tvalue:DERIVE:0:U\ndns_update\t\tvalue:DERIVE:0:U\ndns_zops\t\tvalue:DERIVE:0:U\nduration\t\tseconds:GAUGE:0:U\nemail_check\t\tvalue:GAUGE:0:U\nemail_count\t\tvalue:GAUGE:0:U\nemail_size\t\tvalue:GAUGE:0:U\nentropy\t\t\tvalue:GAUGE:0:4294967295\nfanspeed\t\tvalue:GAUGE:0:U\nfile_size\t\tvalue:GAUGE:0:U\nfiles\t\t\tvalue:GAUGE:0:U\nfork_rate\t\tvalue:DERIVE:0:U\nfrequency_offset\tvalue:GAUGE:-1000000:1000000\nfrequency\t\tvalue:GAUGE:0:U\nfscache_stat\t\tvalue:DERIVE:0:U\ngauge\t\t\tvalue:GAUGE:U:U\nhash_collisions\t\tvalue:DERIVE:0:U\nhttp_request_methods\tvalue:DERIVE:0:U\nhttp_requests\t\tvalue:DERIVE:0:U\nhttp_response_codes\tvalue:DERIVE:0:U\nhumidity\t\tvalue:GAUGE:0:100\nif_collisions\t\tvalue:DERIVE:0:U\nif_dropped\t\trx:DERIVE:0:U, tx:DERIVE:0:U\nif_errors\t\trx:DERIVE:0:U, tx:DERIVE:0:U\nif_multicast\t\tvalue:DERIVE:0:U\nif_octets\t\trx:DERIVE:0:U, tx:DERIVE:0:U\nif_packets\t\trx:DERIVE:0:U, tx:DERIVE:0:U\nif_rx_errors\t\tvalue:DERIVE:0:U\nif_rx_octets\t\tvalue:DERIVE:0:U\nif_tx_errors\t\tvalue:DERIVE:0:U\nif_tx_octets\t\tvalue:DERIVE:0:U\ninvocations\t\tvalue:DERIVE:0:U\nio_octets\t\trx:DERIVE:0:U, tx:DERIVE:0:U\nio_packets\t\trx:DERIVE:0:U, tx:DERIVE:0:U\nipt_bytes\t\tvalue:DERIVE:0:U\nipt_packets\t\tvalue:DERIVE:0:U\nirq\t\t\tvalue:DERIVE:0:U\nlatency\t\t\tvalue:GAUGE:0:U\nlinks\t\t\tvalue:GAUGE:0:U\nload\t\t\tshortterm:GAUGE:0:5000, midterm:GAUGE:0:5000, longterm:GAUGE:0:5000\nmd_disks\t\tvalue:GAUGE:0:U\nmemcached_command\tvalue:DERIVE:0:U\nmemcached_connections\tvalue:GAUGE:0:U\nmemcached_items\t\tvalue:GAUGE:0:U\nmemcached_octets\trx:DERIVE:0:U, tx:DERIVE:0:U\nmemcached_ops\t\tvalue:DERIVE:0:U\nmemory\t\t\tvalue:GAUGE:0:281474976710656\nmultimeter\t\tvalue:GAUGE:U:U\nmutex_operations\tvalue:DERIVE:0:U\nmysql_commands\t\tvalue:DERIVE:0:U\nmysql_handler\t\tvalue:DERIVE:0:U\nmysql_locks\t\tvalue:DERIVE:0:U\nmysql_log_position\tvalue:DERIVE:0:U\nmysql_octets\t\trx:DERIVE:0:U, tx:DERIVE:0:U\nnfs_procedure\t\tvalue:DERIVE:0:U\nnginx_connections\tvalue:GAUGE:0:U\nnginx_requests\t\tvalue:DERIVE:0:U\nnode_octets\t\trx:DERIVE:0:U, tx:DERIVE:0:U\nnode_rssi\t\tvalue:GAUGE:0:255\nnode_stat\t\tvalue:DERIVE:0:U\nnode_tx_rate\t\tvalue:GAUGE:0:127\nobjects\t\t\tvalue:GAUGE:0:U\noperations\t\tvalue:DERIVE:0:U\npercent\t\t\tvalue:GAUGE:0:100.1\npercent_bytes\t\tvalue:GAUGE:0:100.1\npercent_inodes\t\tvalue:GAUGE:0:100.1\npf_counters\t\tvalue:DERIVE:0:U\npf_limits\t\tvalue:DERIVE:0:U\npf_source\t\tvalue:DERIVE:0:U\npf_states\t\tvalue:GAUGE:0:U\npf_state\t\tvalue:DERIVE:0:U\npg_blks\t\t\tvalue:DERIVE:0:U\npg_db_size\t\tvalue:GAUGE:0:U\npg_n_tup_c\t\tvalue:DERIVE:0:U\npg_n_tup_g\t\tvalue:GAUGE:0:U\npg_numbackends\t\tvalue:GAUGE:0:U\npg_scan\t\t\tvalue:DERIVE:0:U\npg_xact\t\t\tvalue:DERIVE:0:U\nping_droprate\t\tvalue:GAUGE:0:100\nping_stddev\t\tvalue:GAUGE:0:65535\nping\t\t\tvalue:GAUGE:0:65535\nplayers\t\t\tvalue:GAUGE:0:1000000\npower\t\t\tvalue:GAUGE:0:U\nprotocol_counter\tvalue:DERIVE:0:U\nps_code\t\t\tvalue:GAUGE:0:9223372036854775807\nps_count\t\tprocesses:GAUGE:0:1000000, threads:GAUGE:0:1000000\nps_cputime\t\tuser:DERIVE:0:U, syst:DERIVE:0:U\nps_data\t\t\tvalue:GAUGE:0:9223372036854775807\nps_disk_octets\t\tread:DERIVE:0:U, write:DERIVE:0:U\nps_disk_ops\t\tread:DERIVE:0:U, write:DERIVE:0:U\nps_pagefaults\t\tminflt:DERIVE:0:U, majflt:DERIVE:0:U\nps_rss\t\t\tvalue:GAUGE:0:9223372036854775807\nps_stacksize\t\tvalue:GAUGE:0:9223372036854775807\nps_state\t\tvalue:GAUGE:0:65535\nps_vm\t\t\tvalue:GAUGE:0:9223372036854775807\nqueue_length\t\tvalue:GAUGE:0:U\nrecords\t\t\tvalue:GAUGE:0:U\nrequests\t\tvalue:GAUGE:0:U\nresponse_time\t\tvalue:GAUGE:0:U\nresponse_code\t\tvalue:GAUGE:0:U\nroute_etx\t\tvalue:GAUGE:0:U\nroute_metric\t\tvalue:GAUGE:0:U\nroutes\t\t\tvalue:GAUGE:0:U\nserial_octets\t\trx:DERIVE:0:U, tx:DERIVE:0:U\nsignal_noise\t\tvalue:GAUGE:U:0\nsignal_power\t\tvalue:GAUGE:U:0\nsignal_quality\t\tvalue:GAUGE:0:U\nsnr\t\t\tvalue:GAUGE:0:U\nspam_check\t\tvalue:GAUGE:0:U\nspam_score\t\tvalue:GAUGE:U:U\nspl\t\t\tvalue:GAUGE:U:U\nswap_io\t\t\tvalue:DERIVE:0:U\nswap\t\t\tvalue:GAUGE:0:1099511627776\ntcp_connections\t\tvalue:GAUGE:0:4294967295\ntemperature\t\tvalue:GAUGE:U:U\nthreads\t\t\tvalue:GAUGE:0:U\ntime_dispersion\t\tvalue:GAUGE:-1000000:1000000\ntimeleft\t\tvalue:GAUGE:0:U\ntime_offset\t\tvalue:GAUGE:-1000000:1000000\ntotal_bytes\t\tvalue:DERIVE:0:U\ntotal_connections\tvalue:DERIVE:0:U\ntotal_objects\t\tvalue:DERIVE:0:U\ntotal_operations\tvalue:DERIVE:0:U\ntotal_requests\t\tvalue:DERIVE:0:U\ntotal_sessions\t\tvalue:DERIVE:0:U\ntotal_threads\t\tvalue:DERIVE:0:U\ntotal_time_in_ms\tvalue:DERIVE:0:U\ntotal_values\t\tvalue:DERIVE:0:U\nuptime\t\t\tvalue:GAUGE:0:4294967295\nusers\t\t\tvalue:GAUGE:0:65535\nvcl\t\t\tvalue:GAUGE:0:65535\nvcpu\t\t\tvalue:GAUGE:0:U\nvirt_cpu_total\t\tvalue:DERIVE:0:U\nvirt_vcpu\t\tvalue:DERIVE:0:U\nvmpage_action\t\tvalue:DERIVE:0:U\nvmpage_faults\t\tminflt:DERIVE:0:U, majflt:DERIVE:0:U\nvmpage_io\t\tin:DERIVE:0:U, out:DERIVE:0:U\nvmpage_number\t\tvalue:GAUGE:0:4294967295\nvolatile_changes\tvalue:GAUGE:0:U\nvoltage_threshold\tvalue:GAUGE:U:U, threshold:GAUGE:U:U\nvoltage\t\t\tvalue:GAUGE:U:U\nvs_memory\t\tvalue:GAUGE:0:9223372036854775807\nvs_processes\t\tvalue:GAUGE:0:65535\nvs_threads\t\tvalue:GAUGE:0:65535\n#\n# Legacy types\n# (required for the v5 upgrade target)\n#\narc_counts\t\tdemand_data:COUNTER:0:U, demand_metadata:COUNTER:0:U, prefetch_data:COUNTER:0:U, prefetch_metadata:COUNTER:0:U\narc_l2_bytes\t\tread:COUNTER:0:U, write:COUNTER:0:U\narc_l2_size\t\tvalue:GAUGE:0:U\narc_ratio\t\tvalue:GAUGE:0:U\narc_size\t\tcurrent:GAUGE:0:U, target:GAUGE:0:U, minlimit:GAUGE:0:U, maxlimit:GAUGE:0:U\nmysql_qcache\t\thits:COUNTER:0:U, inserts:COUNTER:0:U, not_cached:COUNTER:0:U, lowmem_prunes:COUNTER:0:U, queries_in_cache:GAUGE:0:U\nmysql_threads\t\trunning:GAUGE:0:U, connected:GAUGE:0:U, cached:GAUGE:0:U, created:COUNTER:0:U\n`\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/collectd/test_client/README.md",
    "content": "collectD Client\n============\nThis directory contains code for generating collectd load.\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/collectd/test_client/client.go",
    "content": "package main\n\nimport (\n\t\"collectd.org/api\"\n\t\"collectd.org/network\"\n\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"math/rand\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n)\n\nvar nMeasurments = flag.Int(\"m\", 1, \"Number of measurements\")\nvar tagVariance = flag.Int(\"v\", 1, \"Number of values per tag. Client is fixed at one tag\")\nvar rate = flag.Int(\"r\", 1, \"Number of points per second\")\nvar total = flag.Int(\"t\", -1, \"Total number of points to send (default is no limit)\")\nvar host = flag.String(\"u\", \"127.0.0.1:25826\", \"Destination host in the form host:port\")\n\nfunc main() {\n\tflag.Parse()\n\n\tconn, err := network.Dial(*host, network.ClientOptions{})\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\tdefer conn.Close()\n\n\trateLimiter := make(chan int, *rate)\n\n\tgo func() {\n\t\tticker := time.NewTicker(time.Second)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tfor i := 0; i < *rate; i++ {\n\t\t\t\t\trateLimiter <- i\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\tnSent := 0\n\tfor {\n\t\tif nSent >= *total && *total > 0 {\n\t\t\tbreak\n\t\t}\n\t\t<-rateLimiter\n\n\t\tvl := api.ValueList{\n\t\t\tIdentifier: api.Identifier{\n\t\t\t\tHost:   \"tagvalue\" + strconv.Itoa(int(rand.Int31n(int32(*tagVariance)))),\n\t\t\t\tPlugin: \"golang\" + strconv.Itoa(int(rand.Int31n(int32(*nMeasurments)))),\n\t\t\t\tType:   \"gauge\",\n\t\t\t},\n\t\t\tTime:     time.Now(),\n\t\t\tInterval: 10 * time.Second,\n\t\t\tValues:   []api.Value{api.Gauge(42.0)},\n\t\t}\n\t\tctx := context.TODO()\n\t\tif err := conn.Write(ctx, &vl); err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tconn.Flush()\n\t\tnSent = nSent + 1\n\t}\n\n\tfmt.Println(\"Number of points sent:\", nSent)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/continuous_querier/config.go",
    "content": "package continuous_querier\n\nimport (\n\t\"errors\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/monitor/diagnostics\"\n\t\"github.com/influxdata/influxdb/toml\"\n)\n\n// Default values for aspects of interval computation.\nconst (\n\t// The default value of how often to check whether any CQs need to be run.\n\tDefaultRunInterval = time.Second\n)\n\n// Config represents a configuration for the continuous query service.\ntype Config struct {\n\t// Enables logging in CQ service to display when CQ's are processed and how many points are wrote.\n\tLogEnabled bool `toml:\"log-enabled\"`\n\n\t// If this flag is set to false, both the brokers and data nodes should ignore any CQ processing.\n\tEnabled bool `toml:\"enabled\"`\n\n\t// Run interval for checking continuous queries. This should be set to the least common factor\n\t// of the interval for running continuous queries. If you only aggregate continuous queries\n\t// every minute, this should be set to 1 minute. The default is set to '1s' so the interval\n\t// is compatible with most aggregations.\n\tRunInterval toml.Duration `toml:\"run-interval\"`\n}\n\n// NewConfig returns a new instance of Config with defaults.\nfunc NewConfig() Config {\n\treturn Config{\n\t\tLogEnabled:  true,\n\t\tEnabled:     true,\n\t\tRunInterval: toml.Duration(DefaultRunInterval),\n\t}\n}\n\n// Validate returns an error if the Config is invalid.\nfunc (c Config) Validate() error {\n\tif !c.Enabled {\n\t\treturn nil\n\t}\n\n\t// TODO: Should we enforce a minimum interval?\n\t// Polling every nanosecond, for instance, will greatly impact performance.\n\tif c.RunInterval <= 0 {\n\t\treturn errors.New(\"run-interval must be positive\")\n\t}\n\n\treturn nil\n}\n\n// Diagnostics returns a diagnostics representation of a subset of the Config.\nfunc (c Config) Diagnostics() (*diagnostics.Diagnostics, error) {\n\tif !c.Enabled {\n\t\treturn diagnostics.RowFromMap(map[string]interface{}{\n\t\t\t\"enabled\": false,\n\t\t}), nil\n\t}\n\n\treturn diagnostics.RowFromMap(map[string]interface{}{\n\t\t\"enabled\":      true,\n\t\t\"run-interval\": c.RunInterval,\n\t}), nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/continuous_querier/config_test.go",
    "content": "package continuous_querier_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/influxdata/influxdb/services/continuous_querier\"\n)\n\nfunc TestConfig_Parse(t *testing.T) {\n\t// Parse configuration.\n\tvar c continuous_querier.Config\n\tif _, err := toml.Decode(`\nrun-interval = \"1m\"\nenabled = true\n`, &c); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Validate configuration.\n\tif time.Duration(c.RunInterval) != time.Minute {\n\t\tt.Fatalf(\"unexpected run interval: %v\", c.RunInterval)\n\t} else if c.Enabled != true {\n\t\tt.Fatalf(\"unexpected enabled: %v\", c.Enabled)\n\t}\n}\n\nfunc TestConfig_Validate(t *testing.T) {\n\tc := continuous_querier.NewConfig()\n\tif err := c.Validate(); err != nil {\n\t\tt.Fatalf(\"unexpected validation fail from NewConfig: %s\", err)\n\t}\n\n\tc = continuous_querier.NewConfig()\n\tc.RunInterval = 0\n\tif err := c.Validate(); err == nil {\n\t\tt.Fatal(\"expected error for run-interval = 0, got nil\")\n\t}\n\n\tc = continuous_querier.NewConfig()\n\tc.RunInterval *= -1\n\tif err := c.Validate(); err == nil {\n\t\tt.Fatal(\"expected error for negative run-interval, got nil\")\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/continuous_querier/continuous_queries.md",
    "content": "# Continuous Queries\n\nThis document lays out continuous queries and a proposed architecture for how they'll work within an InfluxDB cluster.\n\n## Definition of Continuous Queries\n\nContinuous queries serve two purposes in InfluxDB:\n\n1. Combining many series into a single series (i.e. removing 1 or more tag dimensions to make queries more efficient)\n2. Aggregating and downsampling series\n\nThe purpose of both types of continuous query is to duplicate or downsample data automatically in the background, to make querying their results fast and efficient. Think of them as another way to create indexes on data.\n\nGenerally, there are continuous queries that create copies of data into another measurement or tagset, and queries that downsample and aggregate data. The only difference between the two types is if the query has a `GROUP BY time` clause.\n\nBefore we get to the continuous query examples, we need to define the `INTO` syntax of queries.\n\n### INTO\n\n`INTO` is a method for running a query and having it output into either another measurement name, retention policy, or database. The syntax looks like this:\n\n```sql\nSELECT *\nINTO [<retention policy>.]<measurement> [ON <database>]\nFROM <measurement>\n[WHERE ...]\n[GROUP BY ...]\n```\n\nThe syntax states that the retention policy, database, where clause, and group by clause are all optional. If a retention policy isn't specified, the database's default retention policy will be written into. If the database isn't specified, the database the query is running from will be written into.\n\nBy selecting specific fields, `INTO` can merge many series into one that will go into either a new measurement, retention policy, or database. For example:\n\n```sql\nSELECT mean(value) as value, region\nINTO \"1h.cpu_load\"\nFROM cpu_load\nGROUP BY time(1h), region\n```\n\nThat will give 1h summaries of the mean value of the `cpu_load` for each `region`. Specifying `region` in the `GROUP BY` clause is unnecessary since having it in the `SELECT` clause forces it to be grouped by that tag, we've just included it in the example for clarity.\n\nWith `SELECT ... INTO`, fields will be written as fields and tags will be written as tags.\n\n### Continuous Query Syntax\n\nThe `INTO` queries run once. Continuous queries will turn `INTO` queries into something that run in the background in the cluster. They're kind of like triggers in SQL.\n\n```sql\nCREATE CONTINUOUS QUERY \"1h_cpu_load\"\nON database_name\nBEGIN\n  SELECT mean(value) as value, region\n  INTO \"1h.cpu_load\"\n  FROM cpu_load\n  GROUP BY time(1h), region\nEND\n```\n\nOr chain them together:\n\n```sql\nCREATE CONTINUOUS QUERY \"10m_event_count\"\nON database_name\nBEGIN\n  SELECT count(value)\n  INTO \"10m.events\"\n  FROM events\n  GROUP BY time(10m)\nEND\n\n-- this selects from the output of one continuous query and outputs to another series\nCREATE CONTINUOUS QUERY \"1h_event_count\"\nON database_name\nBEGIN\n  SELECT sum(count) as count\n  INTO \"1h.events\"\n  FROM events\n  GROUP BY time(1h)\nEND\n```\n\nOr multiple aggregations from all series in a measurement. This example assumes you have a retention policy named `1h`.\n\n```sql\nCREATE CONTINUOUS QUERY \"1h_cpu_load\"\nON database_name\nBEGIN\n  SELECT mean(value), percentile(80, value) as percentile_80, percentile(95, value) as percentile_95\n  INTO \"1h.cpu_load\"\n  FROM cpu_load\n  GROUP BY time(1h), *\nEND\n```\n\nThe `GROUP BY *` indicates that we want to group by the tagset of the points written in. The same tags will be written to the output series. The multiple aggregates in the `SELECT` clause (percentile, mean) will be written in as fields to the resulting series.\n\nShowing what continuous queries we have:\n\n```sql\nSHOW CONTINUOUS QUERIES\n```\n\nDropping continuous queries:\n\n```sql\nDROP CONTINUOUS QUERY <name> ON <database>\n```\n\n### Security\n\nTo create or drop a continuous query, the user must be an admin.\n\n### Limitations\n\nIn order to prevent cycles and endless copying of data, the following limitation is enforced on continuous queries at create time:\n\n*The output of a continuous query must go to either a different measurement or to a different retention policy.*\n\nIn theory they'd still be able to create a cycle with multiple continuous queries. We should check for these and disallow.\n\n## Proposed Architecture\n\nContinuous queries should be stored in the metastore cluster wide. That is, they amount to a database schema that should be stored in every server in a cluster.\n\nContinuous queries will have to be handled in a different way for two different use cases: those that simply copy data (CQs without a group by time) and those that aggregate and downsample data (those with a group by time).\n\n### No GROUP BY time\n\nFor CQs that have no `GROUP BY time` clause, they should be evaluated at the data node as part of the write. The single write should create any other writes for the CQ and submit those in the same request to the brokers to ensure that all writes succeed (both the original and the new CQ writes) or none do.\n\nI imagine the process going something like this:\n\n1. Convert the data point into its compact form `<series id><time><values>`\n2. For each CQ on the measurement and retention policy without a `GROUP BY time`:\n    2.1. Run the data point through a special query engine that will output 0 or 1 data point.\n    2.2. GOTO 1. for each newly generated data point\n    2.3. Write all the data points in a single call to the brokers\n    2.4. Return success to the user\n\nNote that for the generated data points, we need to go through and run this process against them since they can feed into different retention policies, measurements, and new tag-sets. On 2.2 I mention that the output will either be a data point or not. That's because of `WHERE` clauses on the query. However, it will never be more than a single data point.\n\nI mention that we'll need a special query engine for these types of queries. In this case, they never have an aggregate function. Any query with an aggregate function also has a group by time, and these queries by definition don't have that.\n\nThe only thing we have to worry about is which fields are being selected, and what the where clause looks like. We should be able to put the raw data point through a simple transform function that either outputs another raw points or doesn't.\n\nI think this transform function be something separate from the regular query planner and engine. It can be in `influxQL` but it should be something fairly simply since the only purpose of these types of queries is to either filter some data out and output to a new series or transform into a new series by dropping tags.\n\n### Has GROUP BY time\n\nCQs that have a `GROUP BY time` (or aggregate CQs) will need to be handled differently.\n\nOne key point on continuous queries with a `GROUP BY time`, is that all their writes should always be `overwrite = true`. That is, they should only have a single data point for each timestamp. This distinction means that continuous queries for previous blocks of time can be safely run multiple times without duplicating data (i.e. they're idempotent).\n\nThere are two different ideas I have for how CQs with group by time could be handled. The first is through periodic updates handled by the Raft Leader. The second would be to expand out writes for each CQ and handle them on the data node.\n\n#### Periodic Updates\n\nIn this approach the management of how CQs run in a cluster will be centrally located on the Raft Leader. It will be responsible for orchestrating which data nodes run CQs and when.\n\nThe naive approach would be to have the leader hand out each CQ for a block of time periodically. The leader could also rerun CQ for periods of time that have recently passed. This would be an easy way to handle the \"lagging data\" problem, but it's not precise.\n\nUnfortunately, there's no easy way to tell cluster wide if there were data points written in an already passed window of time for a CQ. We might be able to add this at the data nodes and have them track it, but it would be quite a bit more work.\n\nThe easy way would just be to have CQs re-execute for periods that recently passed and have some user-configurable window of time that they stop checking after. Then we could give the user the ability to recalculate CQs ranges of time if they need to correct for some problem that occurred or the loading of a bunch of historical data.\n\nWith this approach, we'd have the metadata in the database store the last time each CQ was run. Whenever the Raft leader sent out a command to a data node to handle a CQ, the data node would use this metadata to determine which windows of time it should compute.\n\nThis approach is like what exists in 0.8, with the exception that it will automatically catch data that is lagged behind in a small window of time and give the user the ability to force recalculation.\n\n#### Expanding writes\n\nWhen a write comes into a data node, we could have it evaluated against group by CQs in addition to the non-group by ones. It would then create writes that would then go through the brokers. When the CQ writes arrive at the data nodes, they would have to handle each write differently depending on if it was a write to a raw series or if it was a CQ write.\n\nLet's lay out a concrete example.\n\n```sql\nCREATE CONTINUOUS QUERY \"10m_cpu_by_region\"\nON foo\nBEGIN\n  SELECT mean(value)\n  INTO cpu_by_region\n  FROM cpu\n  GROUP BY time(10m), region\nEND\n```\n\nIn this example we write values into `cpu` with the tags `region` and `host`.\n\nHere's another example CQ:\n\n```sql\nCREATE CONTINUOUS QUERY \"1h_cpu\"\nON foo\nBEGIN\n  SELECT mean(value)\n  INTO \"1h.cpu\"\n  FROM raw.cpu\n  GROUP BY time(10m), *\nEND\n```\n\nThat would output one series into the `1h` retention policy for the `cpu` measurement for every series from the `raw` retention policy and the `cpu` measurement.\n\nBoth of these examples would be handled the same way despite one being a big merge of a bunch of series into one and the other being an aggregation of series in a 1-to-1 mapping.\n\nSay we're collecting data for two hosts in a single region. Then we'd have two distinct series like this:\n\n```\n1 - cpu host=serverA region=uswest\n2 - cpu host=serverB region=uswest\n```\n\nWhenever a write came into a server, we'd look at the continuous queries and see if we needed to create new writes. If we had the two CQ examples above, we'd have to expand a single write into two more writes (one for each CQ).\n\nThe first CQ would have to create a new series:\n\n```\n3 - cpu_by_region region=uswest\n```\n\nThe second CQ would use the same series id as the write, but would send it to another retention policy (and thus shard).\n\nWe'd need to keep track of which series + retention policy combinations were the result of a CQ. When the data nodes get writes replicated downward, they would have to handle them like this:\n\n1. If write is normal, write through\n2. If write is CQ write, compute based on existing values, write to DB\n\n#### Approach tradeoffs\n\nThe first approach of periodically running queries would almost certainly be the easiest to implement quickly. It also has the added advantage of not putting additional load on the brokers by ballooning up the number of writes that go through the system.\n\nThe second approach is appealing because it would be accurate regardless of when writes come in. However, it would take more work and cause the number of writes going through the brokers to be multiplied by the number of continuous queries, which might not scale to where we need.\n\nAlso, if the data nodes write for every single update, the load on the underlying storage engine would go up significantly as well.\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/continuous_querier/service.go",
    "content": "// Package continuous_querier provides the continuous query service.\npackage continuous_querier // import \"github.com/influxdata/influxdb/services/continuous_querier\"\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/uber-go/zap\"\n)\n\nconst (\n\t// NoChunkingSize specifies when not to chunk results. When planning\n\t// a select statement, passing zero tells it not to chunk results.\n\t// Only applies to raw queries.\n\tNoChunkingSize = 0\n\n\t// idDelimiter is used as a delimiter when creating a unique name for a\n\t// Continuous Query.\n\tidDelimiter = string(rune(31)) // unit separator\n)\n\n// Statistics for the CQ service.\nconst (\n\tstatQueryOK   = \"queryOk\"\n\tstatQueryFail = \"queryFail\"\n)\n\n// ContinuousQuerier represents a service that executes continuous queries.\ntype ContinuousQuerier interface {\n\t// Run executes the named query in the named database.  Blank database or name matches all.\n\tRun(database, name string, t time.Time) error\n}\n\n// metaClient is an internal interface to make testing easier.\ntype metaClient interface {\n\tAcquireLease(name string) (l *meta.Lease, err error)\n\tDatabases() []meta.DatabaseInfo\n\tDatabase(name string) *meta.DatabaseInfo\n}\n\n// RunRequest is a request to run one or more CQs.\ntype RunRequest struct {\n\t// Now tells the CQ serivce what the current time is.\n\tNow time.Time\n\t// CQs tells the CQ service which queries to run.\n\t// If nil, all queries will be run.\n\tCQs []string\n}\n\n// matches returns true if the CQ matches one of the requested CQs.\nfunc (rr *RunRequest) matches(cq *meta.ContinuousQueryInfo) bool {\n\tif rr.CQs == nil {\n\t\treturn true\n\t}\n\tfor _, q := range rr.CQs {\n\t\tif q == cq.Name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// Service manages continuous query execution.\ntype Service struct {\n\tMetaClient    metaClient\n\tQueryExecutor *influxql.QueryExecutor\n\tConfig        *Config\n\tRunInterval   time.Duration\n\t// RunCh can be used by clients to signal service to run CQs.\n\tRunCh          chan *RunRequest\n\tLogger         zap.Logger\n\tloggingEnabled bool\n\tstats          *Statistics\n\t// lastRuns maps CQ name to last time it was run.\n\tmu       sync.RWMutex\n\tlastRuns map[string]time.Time\n\tstop     chan struct{}\n\twg       *sync.WaitGroup\n}\n\n// NewService returns a new instance of Service.\nfunc NewService(c Config) *Service {\n\ts := &Service{\n\t\tConfig:         &c,\n\t\tRunInterval:    time.Duration(c.RunInterval),\n\t\tRunCh:          make(chan *RunRequest),\n\t\tloggingEnabled: c.LogEnabled,\n\t\tLogger:         zap.New(zap.NullEncoder()),\n\t\tstats:          &Statistics{},\n\t\tlastRuns:       map[string]time.Time{},\n\t}\n\n\treturn s\n}\n\n// Open starts the service.\nfunc (s *Service) Open() error {\n\ts.Logger.Info(\"Starting continuous query service\")\n\n\tif s.stop != nil {\n\t\treturn nil\n\t}\n\n\tassert(s.MetaClient != nil, \"MetaClient is nil\")\n\tassert(s.QueryExecutor != nil, \"QueryExecutor is nil\")\n\n\ts.stop = make(chan struct{})\n\ts.wg = &sync.WaitGroup{}\n\ts.wg.Add(1)\n\tgo s.backgroundLoop()\n\treturn nil\n}\n\n// Close stops the service.\nfunc (s *Service) Close() error {\n\tif s.stop == nil {\n\t\treturn nil\n\t}\n\tclose(s.stop)\n\ts.wg.Wait()\n\ts.wg = nil\n\ts.stop = nil\n\treturn nil\n}\n\n// WithLogger sets the logger on the service.\nfunc (s *Service) WithLogger(log zap.Logger) {\n\ts.Logger = log.With(zap.String(\"service\", \"continuous_querier\"))\n}\n\n// Statistics maintains the statistics for the continuous query service.\ntype Statistics struct {\n\tQueryOK   int64\n\tQueryFail int64\n}\n\n// Statistics returns statistics for periodic monitoring.\nfunc (s *Service) Statistics(tags map[string]string) []models.Statistic {\n\treturn []models.Statistic{{\n\t\tName: \"cq\",\n\t\tTags: tags,\n\t\tValues: map[string]interface{}{\n\t\t\tstatQueryOK:   atomic.LoadInt64(&s.stats.QueryOK),\n\t\t\tstatQueryFail: atomic.LoadInt64(&s.stats.QueryFail),\n\t\t},\n\t}}\n}\n\n// Run runs the specified continuous query, or all CQs if none is specified.\nfunc (s *Service) Run(database, name string, t time.Time) error {\n\tvar dbs []meta.DatabaseInfo\n\n\tif database != \"\" {\n\t\t// Find the requested database.\n\t\tdb := s.MetaClient.Database(database)\n\t\tif db == nil {\n\t\t\treturn influxql.ErrDatabaseNotFound(database)\n\t\t}\n\t\tdbs = append(dbs, *db)\n\t} else {\n\t\t// Get all databases.\n\t\tdbs = s.MetaClient.Databases()\n\t}\n\n\t// Loop through databases.\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tfor _, db := range dbs {\n\t\t// Loop through CQs in each DB executing the ones that match name.\n\t\tfor _, cq := range db.ContinuousQueries {\n\t\t\tif name == \"\" || cq.Name == name {\n\t\t\t\t// Remove the last run time for the CQ\n\t\t\t\tid := fmt.Sprintf(\"%s%s%s\", db.Name, idDelimiter, cq.Name)\n\t\t\t\tif _, ok := s.lastRuns[id]; ok {\n\t\t\t\t\tdelete(s.lastRuns, id)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Signal the background routine to run CQs.\n\ts.RunCh <- &RunRequest{Now: t}\n\n\treturn nil\n}\n\n// backgroundLoop runs on a go routine and periodically executes CQs.\nfunc (s *Service) backgroundLoop() {\n\tleaseName := \"continuous_querier\"\n\tt := time.NewTimer(s.RunInterval)\n\tdefer t.Stop()\n\tdefer s.wg.Done()\n\tfor {\n\t\tselect {\n\t\tcase <-s.stop:\n\t\t\ts.Logger.Info(\"continuous query service terminating\")\n\t\t\treturn\n\t\tcase req := <-s.RunCh:\n\t\t\tif !s.hasContinuousQueries() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, err := s.MetaClient.AcquireLease(leaseName); err == nil {\n\t\t\t\ts.Logger.Info(fmt.Sprintf(\"running continuous queries by request for time: %v\", req.Now))\n\t\t\t\ts.runContinuousQueries(req)\n\t\t\t}\n\t\tcase <-t.C:\n\t\t\tif !s.hasContinuousQueries() {\n\t\t\t\tt.Reset(s.RunInterval)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, err := s.MetaClient.AcquireLease(leaseName); err == nil {\n\t\t\t\ts.runContinuousQueries(&RunRequest{Now: time.Now()})\n\t\t\t}\n\t\t\tt.Reset(s.RunInterval)\n\t\t}\n\t}\n}\n\n// hasContinuousQueries returns true if any CQs exist.\nfunc (s *Service) hasContinuousQueries() bool {\n\t// Get list of all databases.\n\tdbs := s.MetaClient.Databases()\n\t// Loop through all databases executing CQs.\n\tfor _, db := range dbs {\n\t\tif len(db.ContinuousQueries) > 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// runContinuousQueries gets CQs from the meta store and runs them.\nfunc (s *Service) runContinuousQueries(req *RunRequest) {\n\t// Get list of all databases.\n\tdbs := s.MetaClient.Databases()\n\t// Loop through all databases executing CQs.\n\tfor _, db := range dbs {\n\t\t// TODO: distribute across nodes\n\t\tfor _, cq := range db.ContinuousQueries {\n\t\t\tif !req.matches(&cq) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif ok, err := s.ExecuteContinuousQuery(&db, &cq, req.Now); err != nil {\n\t\t\t\ts.Logger.Info(fmt.Sprintf(\"error executing query: %s: err = %s\", cq.Query, err))\n\t\t\t\tatomic.AddInt64(&s.stats.QueryFail, 1)\n\t\t\t} else if ok {\n\t\t\t\tatomic.AddInt64(&s.stats.QueryOK, 1)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// ExecuteContinuousQuery may execute a single CQ. This will return false if there were no errors and the CQ was not run.\nfunc (s *Service) ExecuteContinuousQuery(dbi *meta.DatabaseInfo, cqi *meta.ContinuousQueryInfo, now time.Time) (bool, error) {\n\t// TODO: re-enable stats\n\t//s.stats.Inc(\"continuousQueryExecuted\")\n\n\t// Local wrapper / helper.\n\tcq, err := NewContinuousQuery(dbi.Name, cqi)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// Set the time zone on the now time if the CQ has one. Otherwise, force UTC.\n\tnow = now.UTC()\n\tif cq.q.Location != nil {\n\t\tnow = now.In(cq.q.Location)\n\t}\n\n\t// Get the last time this CQ was run from the service's cache.\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tid := fmt.Sprintf(\"%s%s%s\", dbi.Name, idDelimiter, cqi.Name)\n\tcq.LastRun, cq.HasRun = s.lastRuns[id]\n\n\t// Set the retention policy to default if it wasn't specified in the query.\n\tif cq.intoRP() == \"\" {\n\t\tcq.setIntoRP(dbi.DefaultRetentionPolicy)\n\t}\n\n\t// Get the group by interval.\n\tinterval, err := cq.q.GroupByInterval()\n\tif err != nil {\n\t\treturn false, err\n\t} else if interval == 0 {\n\t\treturn false, nil\n\t}\n\n\t// Get the group by offset.\n\toffset, err := cq.q.GroupByOffset()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// See if this query needs to be run.\n\trun, nextRun, err := cq.shouldRunContinuousQuery(now, interval)\n\tif err != nil {\n\t\treturn false, err\n\t} else if !run {\n\t\treturn false, nil\n\t}\n\n\tresampleEvery := interval\n\tif cq.Resample.Every != 0 {\n\t\tresampleEvery = cq.Resample.Every\n\t}\n\n\t// We're about to run the query so store the current time closest to the nearest interval.\n\t// If all is going well, this time should be the same as nextRun.\n\tcq.LastRun = truncate(now.Add(-offset), resampleEvery).Add(offset)\n\ts.lastRuns[id] = cq.LastRun\n\n\t// Retrieve the oldest interval we should calculate based on the next time\n\t// interval. We do this instead of using the current time just in case any\n\t// time intervals were missed. The start time of the oldest interval is what\n\t// we use as the start time.\n\tresampleFor := interval\n\tif cq.Resample.For != 0 {\n\t\tresampleFor = cq.Resample.For\n\t} else if interval < resampleEvery {\n\t\tresampleFor = resampleEvery\n\t}\n\n\t// If the resample interval is greater than the interval of the query, use the\n\t// query interval instead.\n\tif interval < resampleEvery {\n\t\tresampleEvery = interval\n\t}\n\n\t// Calculate and set the time range for the query.\n\tstartTime := truncate(nextRun.Add(interval-resampleFor-offset-1), interval).Add(offset)\n\tendTime := truncate(now.Add(interval-resampleEvery-offset), interval).Add(offset)\n\tif !endTime.After(startTime) {\n\t\t// Exit early since there is no time interval.\n\t\treturn false, nil\n\t}\n\n\tif err := cq.q.SetTimeRange(startTime, endTime); err != nil {\n\t\ts.Logger.Info(fmt.Sprintf(\"error setting time range: %s\\n\", err))\n\t\treturn false, err\n\t}\n\n\tvar start time.Time\n\tif s.loggingEnabled {\n\t\ts.Logger.Info(fmt.Sprintf(\"executing continuous query %s (%v to %v)\", cq.Info.Name, startTime, endTime))\n\t\tstart = time.Now()\n\t}\n\n\t// Do the actual processing of the query & writing of results.\n\tif err := s.runContinuousQueryAndWriteResult(cq); err != nil {\n\t\ts.Logger.Info(fmt.Sprintf(\"error: %s. running: %s\\n\", err, cq.q.String()))\n\t\treturn false, err\n\t}\n\n\tif s.loggingEnabled {\n\t\ts.Logger.Info(fmt.Sprintf(\"finished continuous query %s (%v to %v) in %s\", cq.Info.Name, startTime, endTime, time.Since(start)))\n\t}\n\treturn true, nil\n}\n\n// runContinuousQueryAndWriteResult will run the query against the cluster and write the results back in\nfunc (s *Service) runContinuousQueryAndWriteResult(cq *ContinuousQuery) error {\n\t// Wrap the CQ's inner SELECT statement in a Query for the QueryExecutor.\n\tq := &influxql.Query{\n\t\tStatements: influxql.Statements([]influxql.Statement{cq.q}),\n\t}\n\n\tclosing := make(chan struct{})\n\tdefer close(closing)\n\n\t// Execute the SELECT.\n\tch := s.QueryExecutor.ExecuteQuery(q, influxql.ExecutionOptions{\n\t\tDatabase: cq.Database,\n\t}, closing)\n\n\t// There is only one statement, so we will only ever receive one result\n\tres, ok := <-ch\n\tif !ok {\n\t\tpanic(\"result channel was closed\")\n\t}\n\tif res.Err != nil {\n\t\treturn res.Err\n\t}\n\treturn nil\n}\n\n// ContinuousQuery is a local wrapper / helper around continuous queries.\ntype ContinuousQuery struct {\n\tDatabase string\n\tInfo     *meta.ContinuousQueryInfo\n\tHasRun   bool\n\tLastRun  time.Time\n\tResample ResampleOptions\n\tq        *influxql.SelectStatement\n}\n\nfunc (cq *ContinuousQuery) intoRP() string      { return cq.q.Target.Measurement.RetentionPolicy }\nfunc (cq *ContinuousQuery) setIntoRP(rp string) { cq.q.Target.Measurement.RetentionPolicy = rp }\n\n// ResampleOptions controls the resampling intervals and duration of this continuous query.\ntype ResampleOptions struct {\n\t// The query will be resampled at this time interval. The first query will be\n\t// performed at this time interval. If this option is not given, the resample\n\t// interval is set to the group by interval.\n\tEvery time.Duration\n\n\t// The query will continue being resampled for this time duration. If this\n\t// option is not given, the resample duration is the same as the group by\n\t// interval. A bucket's time is calculated based on the bucket's start time,\n\t// so a 40m resample duration with a group by interval of 10m will resample\n\t// the bucket 4 times (using the default time interval).\n\tFor time.Duration\n}\n\n// NewContinuousQuery returns a ContinuousQuery object with a parsed influxql.CreateContinuousQueryStatement.\nfunc NewContinuousQuery(database string, cqi *meta.ContinuousQueryInfo) (*ContinuousQuery, error) {\n\tstmt, err := influxql.NewParser(strings.NewReader(cqi.Query)).ParseStatement()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tq, ok := stmt.(*influxql.CreateContinuousQueryStatement)\n\tif !ok || q.Source.Target == nil || q.Source.Target.Measurement == nil {\n\t\treturn nil, errors.New(\"query isn't a valid continuous query\")\n\t}\n\n\tcquery := &ContinuousQuery{\n\t\tDatabase: database,\n\t\tInfo:     cqi,\n\t\tResample: ResampleOptions{\n\t\t\tEvery: q.ResampleEvery,\n\t\t\tFor:   q.ResampleFor,\n\t\t},\n\t\tq: q.Source,\n\t}\n\n\treturn cquery, nil\n}\n\n// shouldRunContinuousQuery returns true if the CQ should be schedule to run. It will use the\n// lastRunTime of the CQ and the rules for when to run set through the query to determine\n// if this CQ should be run.\nfunc (cq *ContinuousQuery) shouldRunContinuousQuery(now time.Time, interval time.Duration) (bool, time.Time, error) {\n\t// If it's not aggregated, do not run the query.\n\tif cq.q.IsRawQuery {\n\t\treturn false, cq.LastRun, errors.New(\"continuous queries must be aggregate queries\")\n\t}\n\n\t// Override the query's default run interval with the resample options.\n\tresampleEvery := interval\n\tif cq.Resample.Every != 0 {\n\t\tresampleEvery = cq.Resample.Every\n\t}\n\n\t// Determine if we should run the continuous query based on the last time it ran.\n\t// If the query never ran, execute it using the current time.\n\tif cq.HasRun {\n\t\t// Retrieve the zone offset for the previous window.\n\t\t_, startOffset := cq.LastRun.Add(-1).Zone()\n\t\tnextRun := cq.LastRun.Add(resampleEvery)\n\t\t// Retrieve the end zone offset for the end of the current interval.\n\t\tif _, endOffset := nextRun.Add(-1).Zone(); startOffset != endOffset {\n\t\t\tdiff := int64(startOffset-endOffset) * int64(time.Second)\n\t\t\tif abs(diff) < int64(resampleEvery) {\n\t\t\t\tnextRun = nextRun.Add(time.Duration(diff))\n\t\t\t}\n\t\t}\n\t\tif nextRun.UnixNano() <= now.UnixNano() {\n\t\t\treturn true, nextRun, nil\n\t\t}\n\t} else {\n\t\t// Retrieve the location from the CQ.\n\t\tloc := cq.q.Location\n\t\tif loc == nil {\n\t\t\tloc = time.UTC\n\t\t}\n\t\treturn true, now.In(loc), nil\n\t}\n\n\treturn false, cq.LastRun, nil\n}\n\n// assert will panic with a given formatted message if the given condition is false.\nfunc assert(condition bool, msg string, v ...interface{}) {\n\tif !condition {\n\t\tpanic(fmt.Sprintf(\"assert failed: \"+msg, v...))\n\t}\n}\n\n// truncate truncates the time based on the unix timestamp instead of the\n// Go time library. The Go time library has the start of the week on Monday\n// while the start of the week for the unix timestamp is a Thursday.\nfunc truncate(ts time.Time, d time.Duration) time.Time {\n\tt := ts.UnixNano()\n\toffset := zone(ts)\n\tdt := (t + offset) % int64(d)\n\tif dt < 0 {\n\t\t// Negative modulo rounds up instead of down, so offset\n\t\t// with the duration.\n\t\tdt += int64(d)\n\t}\n\tts = time.Unix(0, t-dt).In(ts.Location())\n\tif adjustedOffset := zone(ts); adjustedOffset != offset {\n\t\tdiff := offset - adjustedOffset\n\t\tif abs(diff) < int64(d) {\n\t\t\tts = ts.Add(time.Duration(diff))\n\t\t}\n\t}\n\treturn ts\n}\n\nfunc zone(ts time.Time) int64 {\n\t_, offset := ts.Zone()\n\treturn int64(offset) * int64(time.Second)\n}\n\nfunc abs(v int64) int64 {\n\tif v < 0 {\n\t\treturn -v\n\t}\n\treturn v\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/continuous_querier/service_test.go",
    "content": "package continuous_querier\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/uber-go/zap\"\n)\n\nvar (\n\terrExpected   = errors.New(\"expected error\")\n\terrUnexpected = errors.New(\"unexpected error\")\n)\n\n// Test closing never opened, open, open already open, close, and close already closed.\nfunc TestOpenAndClose(t *testing.T) {\n\ts := NewTestService(t)\n\n\tif err := s.Close(); err != nil {\n\t\tt.Error(err)\n\t} else if err = s.Open(); err != nil {\n\t\tt.Error(err)\n\t} else if err = s.Open(); err != nil {\n\t\tt.Error(err)\n\t} else if err = s.Close(); err != nil {\n\t\tt.Error(err)\n\t} else if err = s.Close(); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\n// Test Run method.\nfunc TestContinuousQueryService_Run(t *testing.T) {\n\ts := NewTestService(t)\n\n\t// Set RunInterval high so we can trigger using Run method.\n\ts.RunInterval = 10 * time.Minute\n\n\tdone := make(chan struct{})\n\texpectCallCnt := 3\n\tcallCnt := 0\n\n\t// Set a callback for ExecuteStatement.\n\ts.QueryExecutor.StatementExecutor = &StatementExecutor{\n\t\tExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\t\tcallCnt++\n\t\t\tif callCnt >= expectCallCnt {\n\t\t\t\tdone <- struct{}{}\n\t\t\t}\n\t\t\tctx.Results <- &influxql.Result{}\n\t\t\treturn nil\n\t\t},\n\t}\n\n\t// Use a custom \"now\" time since the internals of last run care about\n\t// what the actual time is. Truncate to 10 minutes we are starting on an interval.\n\tnow := time.Now().Truncate(10 * time.Minute)\n\n\ts.Open()\n\t// Trigger service to run all CQs.\n\ts.Run(\"\", \"\", now)\n\t// Shouldn't time out.\n\tif err := wait(done, 100*time.Millisecond); err != nil {\n\t\tt.Error(err)\n\t}\n\t// This time it should timeout because ExecuteQuery should not get called again.\n\tif err := wait(done, 100*time.Millisecond); err == nil {\n\t\tt.Error(\"too many queries executed\")\n\t}\n\ts.Close()\n\n\t// Now test just one query.\n\texpectCallCnt = 1\n\tcallCnt = 0\n\ts.Open()\n\ts.Run(\"db\", \"cq\", now)\n\t// Shouldn't time out.\n\tif err := wait(done, 100*time.Millisecond); err != nil {\n\t\tt.Error(err)\n\t}\n\t// This time it should timeout because ExecuteQuery should not get called again.\n\tif err := wait(done, 100*time.Millisecond); err == nil {\n\t\tt.Error(\"too many queries executed\")\n\t}\n\ts.Close()\n}\n\nfunc TestContinuousQueryService_ResampleOptions(t *testing.T) {\n\ts := NewTestService(t)\n\tmc := NewMetaClient(t)\n\tmc.CreateDatabase(\"db\", \"\")\n\tmc.CreateContinuousQuery(\"db\", \"cq\", `CREATE CONTINUOUS QUERY cq ON db RESAMPLE EVERY 10s FOR 2m BEGIN SELECT mean(value) INTO cpu_mean FROM cpu GROUP BY time(1m) END`)\n\ts.MetaClient = mc\n\n\tdb := s.MetaClient.Database(\"db\")\n\n\tcq, err := NewContinuousQuery(db.Name, &db.ContinuousQueries[0])\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if cq.Resample.Every != 10*time.Second {\n\t\tt.Errorf(\"expected resample every to be 10s, got %s\", influxql.FormatDuration(cq.Resample.Every))\n\t} else if cq.Resample.For != 2*time.Minute {\n\t\tt.Errorf(\"expected resample for 2m, got %s\", influxql.FormatDuration(cq.Resample.For))\n\t}\n\n\t// Set RunInterval high so we can trigger using Run method.\n\ts.RunInterval = 10 * time.Minute\n\n\tdone := make(chan struct{})\n\tvar expected struct {\n\t\tmin time.Time\n\t\tmax time.Time\n\t}\n\n\t// Set a callback for ExecuteStatement.\n\ts.QueryExecutor.StatementExecutor = &StatementExecutor{\n\t\tExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\t\ts := stmt.(*influxql.SelectStatement)\n\t\t\tmin, max, err := influxql.TimeRange(s.Condition, s.Location)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"unexpected error parsing time range: %s\", err)\n\t\t\t} else if !expected.min.Equal(min) || !expected.max.Equal(max) {\n\t\t\t\tt.Errorf(\"mismatched time range: got=(%s, %s) exp=(%s, %s)\", min, max, expected.min, expected.max)\n\t\t\t}\n\t\t\tdone <- struct{}{}\n\t\t\tctx.Results <- &influxql.Result{}\n\t\t\treturn nil\n\t\t},\n\t}\n\n\ts.Open()\n\tdefer s.Close()\n\n\t// Set the 'now' time to the start of a 10 minute interval. Then trigger a run.\n\t// This should trigger two queries (one for the current time interval, one for the previous).\n\tnow := time.Now().UTC().Truncate(10 * time.Minute)\n\texpected.min = now.Add(-2 * time.Minute)\n\texpected.max = now.Add(-1)\n\ts.RunCh <- &RunRequest{Now: now}\n\n\tif err := wait(done, 100*time.Millisecond); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Trigger another run 10 seconds later. Another two queries should happen,\n\t// but it will be a different two queries.\n\texpected.min = expected.min.Add(time.Minute)\n\texpected.max = expected.max.Add(time.Minute)\n\ts.RunCh <- &RunRequest{Now: now.Add(10 * time.Second)}\n\n\tif err := wait(done, 100*time.Millisecond); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Reset the time period and send the initial request at 5 seconds after the\n\t// 10 minute mark. There should be exactly one call since the current interval is too\n\t// young and only one interval matches the FOR duration.\n\texpected.min = now.Add(-time.Minute)\n\texpected.max = now.Add(-1)\n\ts.Run(\"\", \"\", now.Add(5*time.Second))\n\n\tif err := wait(done, 100*time.Millisecond); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Send a message 10 minutes later and ensure that the system plays catchup.\n\texpected.max = now.Add(10*time.Minute - 1)\n\ts.RunCh <- &RunRequest{Now: now.Add(10 * time.Minute)}\n\n\tif err := wait(done, 100*time.Millisecond); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// No overflow should be sent.\n\tif err := wait(done, 100*time.Millisecond); err == nil {\n\t\tt.Error(\"too many queries executed\")\n\t}\n}\n\nfunc TestContinuousQueryService_EveryHigherThanInterval(t *testing.T) {\n\ts := NewTestService(t)\n\tms := NewMetaClient(t)\n\tms.CreateDatabase(\"db\", \"\")\n\tms.CreateContinuousQuery(\"db\", \"cq\", `CREATE CONTINUOUS QUERY cq ON db RESAMPLE EVERY 1m BEGIN SELECT mean(value) INTO cpu_mean FROM cpu GROUP BY time(30s) END`)\n\ts.MetaClient = ms\n\n\t// Set RunInterval high so we can trigger using Run method.\n\ts.RunInterval = 10 * time.Minute\n\n\tdone := make(chan struct{})\n\tvar expected struct {\n\t\tmin time.Time\n\t\tmax time.Time\n\t}\n\n\t// Set a callback for ExecuteQuery.\n\ts.QueryExecutor.StatementExecutor = &StatementExecutor{\n\t\tExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\t\ts := stmt.(*influxql.SelectStatement)\n\t\t\tmin, max, err := influxql.TimeRange(s.Condition, s.Location)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"unexpected error parsing time range: %s\", err)\n\t\t\t} else if !expected.min.Equal(min) || !expected.max.Equal(max) {\n\t\t\t\tt.Errorf(\"mismatched time range: got=(%s, %s) exp=(%s, %s)\", min, max, expected.min, expected.max)\n\t\t\t}\n\t\t\tdone <- struct{}{}\n\t\t\tctx.Results <- &influxql.Result{}\n\t\t\treturn nil\n\t\t},\n\t}\n\n\ts.Open()\n\tdefer s.Close()\n\n\t// Set the 'now' time to the start of a 10 minute interval. Then trigger a run.\n\t// This should trigger two queries (one for the current time interval, one for the previous)\n\t// since the default FOR interval should be EVERY, not the GROUP BY interval.\n\tnow := time.Now().Truncate(10 * time.Minute)\n\texpected.min = now.Add(-time.Minute)\n\texpected.max = now.Add(-1)\n\ts.RunCh <- &RunRequest{Now: now}\n\n\tif err := wait(done, 100*time.Millisecond); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Trigger 30 seconds later. Nothing should run.\n\ts.RunCh <- &RunRequest{Now: now.Add(30 * time.Second)}\n\n\tif err := wait(done, 100*time.Millisecond); err == nil {\n\t\tt.Fatal(\"too many queries\")\n\t}\n\n\t// Run again 1 minute later. Another two queries should run.\n\texpected.min = now\n\texpected.max = now.Add(time.Minute - 1)\n\ts.RunCh <- &RunRequest{Now: now.Add(time.Minute)}\n\n\tif err := wait(done, 100*time.Millisecond); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// No overflow should be sent.\n\tif err := wait(done, 100*time.Millisecond); err == nil {\n\t\tt.Error(\"too many queries executed\")\n\t}\n}\n\nfunc TestContinuousQueryService_GroupByOffset(t *testing.T) {\n\ts := NewTestService(t)\n\tmc := NewMetaClient(t)\n\tmc.CreateDatabase(\"db\", \"\")\n\tmc.CreateContinuousQuery(\"db\", \"cq\", `CREATE CONTINUOUS QUERY cq ON db BEGIN SELECT mean(value) INTO cpu_mean FROM cpu GROUP BY time(1m, 30s) END`)\n\ts.MetaClient = mc\n\n\t// Set RunInterval high so we can trigger using Run method.\n\ts.RunInterval = 10 * time.Minute\n\n\tdone := make(chan struct{})\n\tvar expected struct {\n\t\tmin time.Time\n\t\tmax time.Time\n\t}\n\n\t// Set a callback for ExecuteStatement.\n\ts.QueryExecutor.StatementExecutor = &StatementExecutor{\n\t\tExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\t\ts := stmt.(*influxql.SelectStatement)\n\t\t\tmin, max, err := influxql.TimeRange(s.Condition, s.Location)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"unexpected error parsing time range: %s\", err)\n\t\t\t} else if !expected.min.Equal(min) || !expected.max.Equal(max) {\n\t\t\t\tt.Errorf(\"mismatched time range: got=(%s, %s) exp=(%s, %s)\", min, max, expected.min, expected.max)\n\t\t\t}\n\t\t\tdone <- struct{}{}\n\t\t\tctx.Results <- &influxql.Result{}\n\t\t\treturn nil\n\t\t},\n\t}\n\n\ts.Open()\n\tdefer s.Close()\n\n\t// Set the 'now' time to the start of a 10 minute interval with a 30 second offset.\n\t// Then trigger a run. This should trigger two queries (one for the current time\n\t// interval, one for the previous).\n\tnow := time.Now().UTC().Truncate(10 * time.Minute).Add(30 * time.Second)\n\texpected.min = now.Add(-time.Minute)\n\texpected.max = now.Add(-1)\n\ts.RunCh <- &RunRequest{Now: now}\n\n\tif err := wait(done, 100*time.Millisecond); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\n// Test service when not the cluster leader (CQs shouldn't run).\nfunc TestContinuousQueryService_NotLeader(t *testing.T) {\n\ts := NewTestService(t)\n\t// Set RunInterval high so we can test triggering with the RunCh below.\n\ts.RunInterval = 10 * time.Second\n\ts.MetaClient.(*MetaClient).Leader = false\n\n\tdone := make(chan struct{})\n\t// Set a callback for ExecuteStatement. Shouldn't get called because we're not the leader.\n\ts.QueryExecutor.StatementExecutor = &StatementExecutor{\n\t\tExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\t\tdone <- struct{}{}\n\t\t\tctx.Results <- &influxql.Result{Err: errUnexpected}\n\t\t\treturn nil\n\t\t},\n\t}\n\n\ts.Open()\n\t// Trigger service to run CQs.\n\ts.RunCh <- &RunRequest{Now: time.Now()}\n\t// Expect timeout error because ExecuteQuery callback wasn't called.\n\tif err := wait(done, 100*time.Millisecond); err == nil {\n\t\tt.Error(err)\n\t}\n\ts.Close()\n}\n\n// Test ExecuteContinuousQuery with invalid queries.\nfunc TestExecuteContinuousQuery_InvalidQueries(t *testing.T) {\n\ts := NewTestService(t)\n\ts.QueryExecutor.StatementExecutor = &StatementExecutor{\n\t\tExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\t\treturn errUnexpected\n\t\t},\n\t}\n\tdbis := s.MetaClient.Databases()\n\tdbi := dbis[0]\n\tcqi := dbi.ContinuousQueries[0]\n\n\tcqi.Query = `this is not a query`\n\tif _, err := s.ExecuteContinuousQuery(&dbi, &cqi, time.Now()); err == nil {\n\t\tt.Error(\"expected error but got nil\")\n\t}\n\n\t// Valid query but invalid continuous query.\n\tcqi.Query = `SELECT * FROM cpu`\n\tif _, err := s.ExecuteContinuousQuery(&dbi, &cqi, time.Now()); err == nil {\n\t\tt.Error(\"expected error but got nil\")\n\t}\n\n\t// Group by requires aggregate.\n\tcqi.Query = `SELECT value INTO other_value FROM cpu WHERE time > now() - 1h GROUP BY time(1s)`\n\tif _, err := s.ExecuteContinuousQuery(&dbi, &cqi, time.Now()); err == nil {\n\t\tt.Error(\"expected error but got nil\")\n\t}\n}\n\n// Test the time range for different CQ durations.\nfunc TestExecuteContinuousQuery_TimeRange(t *testing.T) {\n\t// Choose a start date that is not on an interval border for anyone.\n\tnow := mustParseTime(t, \"2000-01-01T00:00:00Z\")\n\tfor _, tt := range []struct {\n\t\td          string\n\t\tstart, end time.Time\n\t}{\n\t\t{\n\t\t\td:     \"10s\",\n\t\t\tstart: mustParseTime(t, \"2000-01-01T00:00:00Z\"),\n\t\t\tend:   mustParseTime(t, \"2000-01-01T00:00:10Z\"),\n\t\t},\n\t\t{\n\t\t\td:     \"1m\",\n\t\t\tstart: mustParseTime(t, \"2000-01-01T00:00:00Z\"),\n\t\t\tend:   mustParseTime(t, \"2000-01-01T00:01:00Z\"),\n\t\t},\n\t\t{\n\t\t\td:     \"10m\",\n\t\t\tstart: mustParseTime(t, \"2000-01-01T00:00:00Z\"),\n\t\t\tend:   mustParseTime(t, \"2000-01-01T00:10:00Z\"),\n\t\t},\n\t\t{\n\t\t\td:     \"30m\",\n\t\t\tstart: mustParseTime(t, \"2000-01-01T00:00:00Z\"),\n\t\t\tend:   mustParseTime(t, \"2000-01-01T00:30:00Z\"),\n\t\t},\n\t\t{\n\t\t\td:     \"1h\",\n\t\t\tstart: mustParseTime(t, \"2000-01-01T00:00:00Z\"),\n\t\t\tend:   mustParseTime(t, \"2000-01-01T01:00:00Z\"),\n\t\t},\n\t\t{\n\t\t\td:     \"2h\",\n\t\t\tstart: mustParseTime(t, \"2000-01-01T00:00:00Z\"),\n\t\t\tend:   mustParseTime(t, \"2000-01-01T02:00:00Z\"),\n\t\t},\n\t\t{\n\t\t\td:     \"12h\",\n\t\t\tstart: mustParseTime(t, \"2000-01-01T00:00:00Z\"),\n\t\t\tend:   mustParseTime(t, \"2000-01-01T12:00:00Z\"),\n\t\t},\n\t\t{\n\t\t\td:     \"1d\",\n\t\t\tstart: mustParseTime(t, \"2000-01-01T00:00:00Z\"),\n\t\t\tend:   mustParseTime(t, \"2000-01-02T00:00:00Z\"),\n\t\t},\n\t\t{\n\t\t\td:     \"1w\",\n\t\t\tstart: mustParseTime(t, \"1999-12-30T00:00:00Z\"),\n\t\t\tend:   mustParseTime(t, \"2000-01-06T00:00:00Z\"),\n\t\t},\n\t} {\n\t\tt.Run(tt.d, func(t *testing.T) {\n\t\t\td, err := influxql.ParseDuration(tt.d)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"unable to parse duration: %s\", err)\n\t\t\t}\n\n\t\t\ts := NewTestService(t)\n\t\t\tmc := NewMetaClient(t)\n\t\t\tmc.CreateDatabase(\"db\", \"\")\n\t\t\tmc.CreateContinuousQuery(\"db\", \"cq\",\n\t\t\t\tfmt.Sprintf(`CREATE CONTINUOUS QUERY cq ON db BEGIN SELECT mean(value) INTO cpu_mean FROM cpu GROUP BY time(%s) END`, tt.d))\n\t\t\ts.MetaClient = mc\n\n\t\t\t// Set RunInterval high so we can trigger using Run method.\n\t\t\ts.RunInterval = 10 * time.Minute\n\t\t\tdone := make(chan struct{})\n\n\t\t\t// Set a callback for ExecuteStatement.\n\t\t\ts.QueryExecutor.StatementExecutor = &StatementExecutor{\n\t\t\t\tExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\t\t\t\ts := stmt.(*influxql.SelectStatement)\n\t\t\t\t\tmin, max, err := influxql.TimeRange(s.Condition, s.Location)\n\t\t\t\t\tmax = max.Add(time.Nanosecond)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Errorf(\"unexpected error parsing time range: %s\", err)\n\t\t\t\t\t} else if !tt.start.Equal(min) || !tt.end.Equal(max) {\n\t\t\t\t\t\tt.Errorf(\"mismatched time range: got=(%s, %s) exp=(%s, %s)\", min, max, tt.start, tt.end)\n\t\t\t\t\t}\n\t\t\t\t\tdone <- struct{}{}\n\t\t\t\t\tctx.Results <- &influxql.Result{}\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t}\n\n\t\t\ts.Open()\n\t\t\tdefer s.Close()\n\n\t\t\t// Send an initial run request one nanosecond after the start to\n\t\t\t// prime the last CQ map.\n\t\t\ts.RunCh <- &RunRequest{Now: now.Add(time.Nanosecond)}\n\t\t\t// Execute the real request after the time interval.\n\t\t\ts.RunCh <- &RunRequest{Now: now.Add(d)}\n\t\t\tif err := wait(done, 100*time.Millisecond); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// Test the time range for different CQ durations.\nfunc TestExecuteContinuousQuery_TimeZone(t *testing.T) {\n\ttype test struct {\n\t\tnow        time.Time\n\t\tstart, end time.Time\n\t}\n\n\t// Choose a start date that is not on an interval border for anyone.\n\tfor _, tt := range []struct {\n\t\tname    string\n\t\td       string\n\t\toptions string\n\t\tinitial time.Time\n\t\ttests   []test\n\t}{\n\t\t{\n\t\t\tname:    \"DaylightSavingsStart/1d\",\n\t\t\td:       \"1d\",\n\t\t\tinitial: mustParseTime(t, \"2000-04-02T00:00:00-05:00\"),\n\t\t\ttests: []test{\n\t\t\t\t{\n\t\t\t\t\tstart: mustParseTime(t, \"2000-04-02T00:00:00-05:00\"),\n\t\t\t\t\tend:   mustParseTime(t, \"2000-04-03T00:00:00-04:00\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"DaylightSavingsStart/2h\",\n\t\t\td:       \"2h\",\n\t\t\tinitial: mustParseTime(t, \"2000-04-02T00:00:00-05:00\"),\n\t\t\ttests: []test{\n\t\t\t\t{\n\t\t\t\t\tstart: mustParseTime(t, \"2000-04-02T00:00:00-05:00\"),\n\t\t\t\t\tend:   mustParseTime(t, \"2000-04-02T03:00:00-04:00\"),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tstart: mustParseTime(t, \"2000-04-02T03:00:00-04:00\"),\n\t\t\t\t\tend:   mustParseTime(t, \"2000-04-02T04:00:00-04:00\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"DaylightSavingsEnd/1d\",\n\t\t\td:       \"1d\",\n\t\t\tinitial: mustParseTime(t, \"2000-10-29T00:00:00-04:00\"),\n\t\t\ttests: []test{\n\t\t\t\t{\n\t\t\t\t\tstart: mustParseTime(t, \"2000-10-29T00:00:00-04:00\"),\n\t\t\t\t\tend:   mustParseTime(t, \"2000-10-30T00:00:00-05:00\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"DaylightSavingsEnd/2h\",\n\t\t\td:       \"2h\",\n\t\t\tinitial: mustParseTime(t, \"2000-10-29T00:00:00-04:00\"),\n\t\t\ttests: []test{\n\t\t\t\t{\n\t\t\t\t\tstart: mustParseTime(t, \"2000-10-29T00:00:00-04:00\"),\n\t\t\t\t\tend:   mustParseTime(t, \"2000-10-29T02:00:00-05:00\"),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tstart: mustParseTime(t, \"2000-10-29T02:00:00-05:00\"),\n\t\t\t\t\tend:   mustParseTime(t, \"2000-10-29T04:00:00-05:00\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t} {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\ts := NewTestService(t)\n\t\t\tmc := NewMetaClient(t)\n\t\t\tmc.CreateDatabase(\"db\", \"\")\n\t\t\tmc.CreateContinuousQuery(\"db\", \"cq\",\n\t\t\t\tfmt.Sprintf(`CREATE CONTINUOUS QUERY cq ON db %s BEGIN SELECT mean(value) INTO cpu_mean FROM cpu GROUP BY time(%s) TZ('America/New_York') END`, tt.options, tt.d))\n\t\t\ts.MetaClient = mc\n\n\t\t\t// Set RunInterval high so we can trigger using Run method.\n\t\t\ts.RunInterval = 10 * time.Minute\n\t\t\tdone := make(chan struct{})\n\n\t\t\t// Set a callback for ExecuteStatement.\n\t\t\ttests := make(chan test, 1)\n\t\t\ts.QueryExecutor.StatementExecutor = &StatementExecutor{\n\t\t\t\tExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\t\t\t\ttest := <-tests\n\t\t\t\t\ts := stmt.(*influxql.SelectStatement)\n\t\t\t\t\tmin, max, err := influxql.TimeRange(s.Condition, s.Location)\n\t\t\t\t\tmax = max.Add(time.Nanosecond)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Errorf(\"unexpected error parsing time range: %s\", err)\n\t\t\t\t\t} else if !test.start.Equal(min) || !test.end.Equal(max) {\n\t\t\t\t\t\tt.Errorf(\"mismatched time range: got=(%s, %s) exp=(%s, %s)\", min, max, test.start, test.end)\n\t\t\t\t\t}\n\t\t\t\t\tdone <- struct{}{}\n\t\t\t\t\tctx.Results <- &influxql.Result{}\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t}\n\n\t\t\ts.Open()\n\t\t\tdefer s.Close()\n\n\t\t\t// Send an initial run request one nanosecond after the start to\n\t\t\t// prime the last CQ map.\n\t\t\ts.RunCh <- &RunRequest{Now: tt.initial.Add(time.Nanosecond)}\n\t\t\t// Execute each of the tests and ensure the times are correct.\n\t\t\tfor i, test := range tt.tests {\n\t\t\t\ttests <- test\n\t\t\t\tnow := test.now\n\t\t\t\tif now.IsZero() {\n\t\t\t\t\tnow = test.end\n\t\t\t\t}\n\t\t\t\ts.RunCh <- &RunRequest{Now: now}\n\t\t\t\tif err := wait(done, 100*time.Millisecond); err != nil {\n\t\t\t\t\tt.Fatal(fmt.Errorf(\"%d. %s\", i+1, err))\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// Test ExecuteContinuousQuery when QueryExecutor returns an error.\nfunc TestExecuteContinuousQuery_QueryExecutor_Error(t *testing.T) {\n\ts := NewTestService(t)\n\ts.QueryExecutor.StatementExecutor = &StatementExecutor{\n\t\tExecuteStatementFn: func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\t\treturn errExpected\n\t\t},\n\t}\n\n\tdbis := s.MetaClient.Databases()\n\tdbi := dbis[0]\n\tcqi := dbi.ContinuousQueries[0]\n\n\tnow := time.Now().Truncate(10 * time.Minute)\n\tif _, err := s.ExecuteContinuousQuery(&dbi, &cqi, now); err != errExpected {\n\t\tt.Errorf(\"exp = %s, got = %v\", errExpected, err)\n\t}\n}\n\n// NewTestService returns a new *Service with default mock object members.\nfunc NewTestService(t *testing.T) *Service {\n\ts := NewService(NewConfig())\n\tms := NewMetaClient(t)\n\ts.MetaClient = ms\n\ts.QueryExecutor = influxql.NewQueryExecutor()\n\ts.RunInterval = time.Millisecond\n\n\t// Set Logger to write to dev/null so stdout isn't polluted.\n\tif testing.Verbose() {\n\t\ts.WithLogger(zap.New(\n\t\t\tzap.NewTextEncoder(),\n\t\t\tzap.Output(os.Stderr),\n\t\t))\n\t}\n\n\t// Add a couple test databases and CQs.\n\tms.CreateDatabase(\"db\", \"rp\")\n\tms.CreateContinuousQuery(\"db\", \"cq\", `CREATE CONTINUOUS QUERY cq ON db BEGIN SELECT count(cpu) INTO cpu_count FROM cpu WHERE time > now() - 1h GROUP BY time(1s) END`)\n\tms.CreateDatabase(\"db2\", \"default\")\n\tms.CreateContinuousQuery(\"db2\", \"cq2\", `CREATE CONTINUOUS QUERY cq2 ON db2 BEGIN SELECT mean(value) INTO cpu_mean FROM cpu WHERE time > now() - 10m GROUP BY time(1m) END`)\n\tms.CreateDatabase(\"db3\", \"default\")\n\tms.CreateContinuousQuery(\"db3\", \"cq3\", `CREATE CONTINUOUS QUERY cq3 ON db3 BEGIN SELECT mean(value) INTO \"1hAverages\".:MEASUREMENT FROM /cpu[0-9]?/ GROUP BY time(10s) END`)\n\n\treturn s\n}\n\n// MetaClient is a mock meta store.\ntype MetaClient struct {\n\tmu            sync.RWMutex\n\tLeader        bool\n\tAllowLease    bool\n\tDatabaseInfos []meta.DatabaseInfo\n\tErr           error\n\tt             *testing.T\n\tnodeID        uint64\n}\n\n// NewMetaClient returns a *MetaClient.\nfunc NewMetaClient(t *testing.T) *MetaClient {\n\treturn &MetaClient{\n\t\tLeader:     true,\n\t\tAllowLease: true,\n\t\tt:          t,\n\t\tnodeID:     1,\n\t}\n}\n\n// NodeID returns the client's node ID.\nfunc (ms *MetaClient) NodeID() uint64 { return ms.nodeID }\n\n// AcquireLease attempts to acquire the specified lease.\nfunc (ms *MetaClient) AcquireLease(name string) (l *meta.Lease, err error) {\n\tif ms.Leader {\n\t\tif ms.AllowLease {\n\t\t\treturn &meta.Lease{Name: name}, nil\n\t\t}\n\t\treturn nil, errors.New(\"another node owns the lease\")\n\t}\n\treturn nil, meta.ErrServiceUnavailable\n}\n\n// Databases returns a list of database info about each database in the coordinator.\nfunc (ms *MetaClient) Databases() []meta.DatabaseInfo {\n\tms.mu.RLock()\n\tdefer ms.mu.RUnlock()\n\treturn ms.DatabaseInfos\n}\n\n// Database returns a single database by name.\nfunc (ms *MetaClient) Database(name string) *meta.DatabaseInfo {\n\tms.mu.RLock()\n\tdefer ms.mu.RUnlock()\n\treturn ms.database(name)\n}\n\nfunc (ms *MetaClient) database(name string) *meta.DatabaseInfo {\n\tif ms.Err != nil {\n\t\treturn nil\n\t}\n\tfor i := range ms.DatabaseInfos {\n\t\tif ms.DatabaseInfos[i].Name == name {\n\t\t\treturn &ms.DatabaseInfos[i]\n\t\t}\n\t}\n\treturn nil\n}\n\n// CreateDatabase adds a new database to the meta store.\nfunc (ms *MetaClient) CreateDatabase(name, defaultRetentionPolicy string) error {\n\tms.mu.Lock()\n\tdefer ms.mu.Unlock()\n\tif ms.Err != nil {\n\t\treturn ms.Err\n\t}\n\n\t// See if the database already exists.\n\tfor _, dbi := range ms.DatabaseInfos {\n\t\tif dbi.Name == name {\n\t\t\treturn fmt.Errorf(\"database already exists: %s\", name)\n\t\t}\n\t}\n\n\t// Create database.\n\tms.DatabaseInfos = append(ms.DatabaseInfos, meta.DatabaseInfo{\n\t\tName: name,\n\t\tDefaultRetentionPolicy: defaultRetentionPolicy,\n\t})\n\n\treturn nil\n}\n\n// CreateContinuousQuery adds a CQ to the meta store.\nfunc (ms *MetaClient) CreateContinuousQuery(database, name, query string) error {\n\tms.mu.Lock()\n\tdefer ms.mu.Unlock()\n\tif ms.Err != nil {\n\t\treturn ms.Err\n\t}\n\n\tdbi := ms.database(database)\n\tif dbi == nil {\n\t\treturn fmt.Errorf(\"database not found: %s\", database)\n\t}\n\n\t// See if CQ already exists.\n\tfor _, cqi := range dbi.ContinuousQueries {\n\t\tif cqi.Name == name {\n\t\t\treturn fmt.Errorf(\"continuous query already exists: %s\", name)\n\t\t}\n\t}\n\n\t// Create a new CQ and store it.\n\tdbi.ContinuousQueries = append(dbi.ContinuousQueries, meta.ContinuousQueryInfo{\n\t\tName:  name,\n\t\tQuery: query,\n\t})\n\n\treturn nil\n}\n\n// StatementExecutor is a mock statement executor.\ntype StatementExecutor struct {\n\tExecuteStatementFn func(stmt influxql.Statement, ctx influxql.ExecutionContext) error\n}\n\nfunc (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\treturn e.ExecuteStatementFn(stmt, ctx)\n}\n\nfunc wait(c chan struct{}, d time.Duration) (err error) {\n\tselect {\n\tcase <-c:\n\tcase <-time.After(d):\n\t\terr = errors.New(\"timed out\")\n\t}\n\treturn\n}\n\nfunc mustParseTime(t *testing.T, value string) time.Time {\n\tts, err := time.Parse(time.RFC3339, value)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to parse time: %s\", err)\n\t}\n\treturn ts\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/graphite/README.md",
    "content": "# The Graphite Input\n\n## A Note On UDP/IP OS Buffer Sizes\n\nIf you're using UDP input and running Linux or FreeBSD, please adjust your UDP buffer\nsize limit, [see here for more details.](../udp/README.md#a-note-on-udpip-os-buffer-sizes)\n\n## Configuration\n\nEach Graphite input allows the binding address, target database, and protocol to be set. If the database does not exist, it will be created automatically when the input is initialized. The write-consistency-level can also be set. If any write operations do not meet the configured consistency guarantees, an error will occur and the data will not be indexed. The default consistency-level is `ONE`.\n\nEach Graphite input also performs internal batching of the points it receives, as batched writes to the database are more efficient. The default _batch size_ is 1000, _pending batch_ factor is 5, with a _batch timeout_ of 1 second. This means the input will write batches of maximum size 1000, but if a batch has not reached 1000 points within 1 second of the first point being added to a batch, it will emit that batch regardless of size. The pending batch factor controls how many batches can be in memory at once, allowing the input to transmit a batch, while still building other batches.\n\n## Parsing Metrics\n\nThe Graphite plugin allows measurements to be saved using the Graphite line protocol. By default, enabling the Graphite plugin will allow you to collect metrics and store them using the metric name as the measurement.  If you send a metric named `servers.localhost.cpu.loadavg.10`, it will store the full metric name as the measurement with no extracted tags.\n\nWhile this default setup works, it is not the ideal way to store measurements in InfluxDB since it does not take advantage of tags.  It also will not perform optimally with large dataset sizes since queries will be forced to use regexes which is known to not scale well.\n\nTo extract tags from metrics, one or more templates must be configured to parse metrics into tags and measurements.\n\n## Templates\n\nTemplates allow matching parts of a metric name to be used as tag keys in the stored metric.  They have a similar format to Graphite metric names.  The values in between the separators are used as the tag keys.  The location of the tag key that matches the same position as the Graphite metric section is used as the value.  If there is no value, the Graphite portion is skipped.\n\nThe special value _measurement_ is used to define the measurement name.  It can have a trailing `*` to indicate that the remainder of the metric should be used.  If a _measurement_ is not specified, the full metric name is used.\n\n### Basic Matching\n\n`servers.localhost.cpu.loadavg.10`\n* Template: `.host.resource.measurement*`\n* Output:  _measurement_ =`loadavg.10` _tags_ =`host=localhost resource=cpu`\n\n### Multiple Measurement & Tags Matching\n\nThe _measurement_ can be specified multiple times in a template to provide more control over the measurement name. Tags can also be\nmatched multiple times. Multiple values will be joined together using the _Separator_ config variable.  By default, this value is `.`.\n\n`servers.localhost.localdomain.cpu.cpu0.user`\n* Template: `.host.host.measurement.cpu.measurement`\n* Output: _measurement_ = `cpu.user` _tags_ = `host=localhost.localdomain cpu=cpu0`\n\nSince `.` requires queries on measurements to be double-quoted, you may want to set this to `_` to simplify querying parsed metrics.\n\n`servers.localhost.cpu.cpu0.user`\n* Separator: `_`\n* Template: `.host.measurement.cpu.measurement`\n* Output: _measurement_ = `cpu_user` _tags_ = `host=localhost cpu=cpu0`\n\n### Adding Tags\n\nAdditional tags can be added to a metric if they don't exist on the received metric.  You can add additional tags by specifying them after the pattern.  Tags have the same format as the line protocol.  Multiple tags are separated by commas.\n\n`servers.localhost.cpu.loadavg.10`\n* Template: `.host.resource.measurement* region=us-west,zone=1a`\n* Output:  _measurement_ = `loadavg.10` _tags_ = `host=localhost resource=cpu region=us-west zone=1a`\n\n### Fields\n\nA field key can be specified by using the keyword _field_. By default if no _field_ keyword is specified then the metric will be written to a field named _value_.\n\nThe field key can also be derived from the second \"half\" of the input metric-name by specifying ```field*``` (eg ```measurement.measurement.field*```). This cannot be used in conjunction with \"measurement*\"!\n\nIt's possible to amend measurement metrics with additional fields, e.g:\n\nInput:\n```\nsensu.metric.net.server0.eth0.rx_packets 461295119435 1444234982\nsensu.metric.net.server0.eth0.tx_bytes 1093086493388480 1444234982\nsensu.metric.net.server0.eth0.rx_bytes 1015633926034834 1444234982\nsensu.metric.net.server0.eth0.tx_errors 0 1444234982\nsensu.metric.net.server0.eth0.rx_errors 0 1444234982\nsensu.metric.net.server0.eth0.tx_dropped 0 1444234982\nsensu.metric.net.server0.eth0.rx_dropped 0 1444234982\n```\n\nWith template:\n```\nsensu.metric.* ..measurement.host.interface.field\n```\n\nBecomes database entry:\n```\n> select * from net\nname: net\n---------\ntime      host  interface rx_bytes    rx_dropped  rx_errors rx_packets    tx_bytes    tx_dropped  tx_errors\n1444234982000000000 server0  eth0    1.015633926034834e+15 0   0   4.61295119435e+11 1.09308649338848e+15  0 0\n```\n\n## Multiple Templates\n\nOne template may not match all metrics.  For example, using multiple plugins with diamond will produce metrics in different formats.  If you need to use multiple templates, you'll need to define a prefix filter that must match before the template can be applied.\n\n### Filters\n\nFilters have a similar format to templates but work more like wildcard expressions.  When multiple filters would match a metric, the more specific one is chosen.  Filters are configured by adding them before the template.\n\nFor example,\n\n```\nservers.localhost.cpu.loadavg.10\nservers.host123.elasticsearch.cache_hits 100\nservers.host456.mysql.tx_count 10\nservers.host789.prod.mysql.tx_count 10\n```\n* `servers.*` would match all values\n* `servers.*.mysql` would match `servers.host456.mysql.tx_count 10`\n* `servers.localhost.*` would match `servers.localhost.cpu.loadavg`\n* `servers.*.*.mysql` would match `servers.host789.prod.mysql.tx_count 10`\n\n## Default Templates\n\nIf no template filters are defined or you want to just have one basic template, you can define a default template.  This template will apply to any metric that has not already matched a filter.\n\n```\ndev.http.requests.200\nprod.myapp.errors.count\ndev.db.queries.count\n```\n\n* `env.app.measurement*` would create\n  * _measurement_=`requests.200` _tags_=`env=dev,app=http`\n  * _measurement_= `errors.count` _tags_=`env=prod,app=myapp`\n  * _measurement_=`queries.count` _tags_=`env=dev,app=db`\n\n## Global Tags\n\nIf you need to add the same set of tags to all metrics, you can define them globally at the plugin level and not within each template description.\n\n## Minimal Config\n```\n[[graphite]]\n  enabled = true\n  # bind-address = \":2003\"\n  # protocol = \"tcp\"\n  # consistency-level = \"one\"\n\n  ### If matching multiple measurement files, this string will be used to join the matched values.\n  # separator = \".\"\n\n  ### Default tags that will be added to all metrics.  These can be overridden at the template level\n  ### or by tags extracted from metric\n  # tags = [\"region=us-east\", \"zone=1c\"]\n\n  ### Each template line requires a template pattern.  It can have an optional\n  ### filter before the template and separated by spaces.  It can also have optional extra\n  ### tags following the template.  Multiple tags should be separated by commas and no spaces\n  ### similar to the line protocol format.  The can be only one default template.\n  # templates = [\n  #   \"*.app env.service.resource.measurement\",\n  #   # Default template\n  #   \"server.*\",\n #]\n```\n\n## Customized Config\n```\n[[graphite]]\n   enabled = true\n   separator = \"_\"\n   tags = [\"region=us-east\", \"zone=1c\"]\n   templates = [\n     # filter + template\n     \"*.app env.service.resource.measurement\",\n\n     # filter + template + extra tag\n     \"stats.* .host.measurement* region=us-west,agent=sensu\",\n\n     # filter + template with field key\n     \"stats.* .host.measurement.field\",\n\n     # default template. Ignore the first Graphite component \"servers\"\n     \".measurement*\",\n ]\n```\n\n## Two Graphite Listeners, UDP & TCP, Config\n\n```\n[[graphite]]\n  enabled = true\n  bind-address = \":2003\"\n  protocol = \"tcp\"\n  # consistency-level = \"one\"\n\n[[graphite]]\n  enabled = true\n  bind-address = \":2004\" # the bind address\n  protocol = \"udp\" # protocol to read via\n  udp-read-buffer = 8388608 # (8*1024*1024) UDP read buffer size\n```\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/graphite/config.go",
    "content": "package graphite\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/monitor/diagnostics\"\n\t\"github.com/influxdata/influxdb/toml\"\n)\n\nconst (\n\t// DefaultBindAddress is the default binding interface if none is specified.\n\tDefaultBindAddress = \":2003\"\n\n\t// DefaultDatabase is the default database if none is specified.\n\tDefaultDatabase = \"graphite\"\n\n\t// DefaultProtocol is the default IP protocol used by the Graphite input.\n\tDefaultProtocol = \"tcp\"\n\n\t// DefaultConsistencyLevel is the default write consistency for the Graphite input.\n\tDefaultConsistencyLevel = \"one\"\n\n\t// DefaultSeparator is the default join character to use when joining multiple\n\t// measurement parts in a template.\n\tDefaultSeparator = \".\"\n\n\t// DefaultBatchSize is the default write batch size.\n\tDefaultBatchSize = 5000\n\n\t// DefaultBatchPending is the default number of pending write batches.\n\tDefaultBatchPending = 10\n\n\t// DefaultBatchTimeout is the default Graphite batch timeout.\n\tDefaultBatchTimeout = time.Second\n\n\t// DefaultUDPReadBuffer is the default buffer size for the UDP listener.\n\t// Sets the size of the operating system's receive buffer associated with\n\t// the UDP traffic. Keep in mind that the OS must be able\n\t// to handle the number set here or the UDP listener will error and exit.\n\t//\n\t// DefaultReadBuffer = 0 means to use the OS default, which is usually too\n\t// small for high UDP performance.\n\t//\n\t// Increasing OS buffer limits:\n\t//     Linux:      sudo sysctl -w net.core.rmem_max=<read-buffer>\n\t//     BSD/Darwin: sudo sysctl -w kern.ipc.maxsockbuf=<read-buffer>\n\tDefaultUDPReadBuffer = 0\n)\n\n// Config represents the configuration for Graphite endpoints.\ntype Config struct {\n\tEnabled          bool          `toml:\"enabled\"`\n\tBindAddress      string        `toml:\"bind-address\"`\n\tDatabase         string        `toml:\"database\"`\n\tRetentionPolicy  string        `toml:\"retention-policy\"`\n\tProtocol         string        `toml:\"protocol\"`\n\tBatchSize        int           `toml:\"batch-size\"`\n\tBatchPending     int           `toml:\"batch-pending\"`\n\tBatchTimeout     toml.Duration `toml:\"batch-timeout\"`\n\tConsistencyLevel string        `toml:\"consistency-level\"`\n\tTemplates        []string      `toml:\"templates\"`\n\tTags             []string      `toml:\"tags\"`\n\tSeparator        string        `toml:\"separator\"`\n\tUDPReadBuffer    int           `toml:\"udp-read-buffer\"`\n}\n\n// NewConfig returns a new instance of Config with defaults.\nfunc NewConfig() Config {\n\treturn Config{\n\t\tBindAddress:      DefaultBindAddress,\n\t\tDatabase:         DefaultDatabase,\n\t\tProtocol:         DefaultProtocol,\n\t\tBatchSize:        DefaultBatchSize,\n\t\tBatchPending:     DefaultBatchPending,\n\t\tBatchTimeout:     toml.Duration(DefaultBatchTimeout),\n\t\tConsistencyLevel: DefaultConsistencyLevel,\n\t\tSeparator:        DefaultSeparator,\n\t}\n}\n\n// WithDefaults takes the given config and returns a new config with any required\n// default values set.\nfunc (c *Config) WithDefaults() *Config {\n\td := *c\n\tif d.BindAddress == \"\" {\n\t\td.BindAddress = DefaultBindAddress\n\t}\n\tif d.Database == \"\" {\n\t\td.Database = DefaultDatabase\n\t}\n\tif d.Protocol == \"\" {\n\t\td.Protocol = DefaultProtocol\n\t}\n\tif d.BatchSize == 0 {\n\t\td.BatchSize = DefaultBatchSize\n\t}\n\tif d.BatchPending == 0 {\n\t\td.BatchPending = DefaultBatchPending\n\t}\n\tif d.BatchTimeout == 0 {\n\t\td.BatchTimeout = toml.Duration(DefaultBatchTimeout)\n\t}\n\tif d.ConsistencyLevel == \"\" {\n\t\td.ConsistencyLevel = DefaultConsistencyLevel\n\t}\n\tif d.Separator == \"\" {\n\t\td.Separator = DefaultSeparator\n\t}\n\tif d.UDPReadBuffer == 0 {\n\t\td.UDPReadBuffer = DefaultUDPReadBuffer\n\t}\n\treturn &d\n}\n\n// DefaultTags returns the config's tags.\nfunc (c *Config) DefaultTags() models.Tags {\n\tm := make(map[string]string, len(c.Tags))\n\tfor _, t := range c.Tags {\n\t\tparts := strings.Split(t, \"=\")\n\t\tm[parts[0]] = parts[1]\n\t}\n\treturn models.NewTags(m)\n}\n\n// Validate validates the config's templates and tags.\nfunc (c *Config) Validate() error {\n\tif err := c.validateTemplates(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.validateTags(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *Config) validateTemplates() error {\n\t// map to keep track of filters we see\n\tfilters := map[string]struct{}{}\n\n\tfor i, t := range c.Templates {\n\t\tparts := strings.Fields(t)\n\t\t// Ensure template string is non-empty\n\t\tif len(parts) == 0 {\n\t\t\treturn fmt.Errorf(\"missing template at position: %d\", i)\n\t\t}\n\t\tif len(parts) == 1 && parts[0] == \"\" {\n\t\t\treturn fmt.Errorf(\"missing template at position: %d\", i)\n\t\t}\n\n\t\tif len(parts) > 3 {\n\t\t\treturn fmt.Errorf(\"invalid template format: '%s'\", t)\n\t\t}\n\n\t\ttemplate := t\n\t\tfilter := \"\"\n\t\ttags := \"\"\n\t\tif len(parts) >= 2 {\n\t\t\t// We could have <filter> <template>  or <template> <tags>.  Equals is only allowed in\n\t\t\t// tags section.\n\t\t\tif strings.Contains(parts[1], \"=\") {\n\t\t\t\ttemplate = parts[0]\n\t\t\t\ttags = parts[1]\n\t\t\t} else {\n\t\t\t\tfilter = parts[0]\n\t\t\t\ttemplate = parts[1]\n\t\t\t}\n\t\t}\n\n\t\tif len(parts) == 3 {\n\t\t\ttags = parts[2]\n\t\t}\n\n\t\t// Validate the template has one and only one measurement\n\t\tif err := c.validateTemplate(template); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Prevent duplicate filters in the config\n\t\tif _, ok := filters[filter]; ok {\n\t\t\treturn fmt.Errorf(\"duplicate filter '%s' found at position: %d\", filter, i)\n\t\t}\n\t\tfilters[filter] = struct{}{}\n\n\t\tif filter != \"\" {\n\t\t\t// Validate filter expression is valid\n\t\t\tif err := c.validateFilter(filter); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif tags != \"\" {\n\t\t\t// Validate tags\n\t\t\tfor _, tagStr := range strings.Split(tags, \",\") {\n\t\t\t\tif err := c.validateTag(tagStr); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *Config) validateTags() error {\n\tfor _, t := range c.Tags {\n\t\tif err := c.validateTag(t); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *Config) validateTemplate(template string) error {\n\thasMeasurement := false\n\tfor _, p := range strings.Split(template, \".\") {\n\t\tif p == \"measurement\" || p == \"measurement*\" {\n\t\t\thasMeasurement = true\n\t\t}\n\t}\n\n\tif !hasMeasurement {\n\t\treturn fmt.Errorf(\"no measurement in template `%s`\", template)\n\t}\n\n\treturn nil\n}\n\nfunc (c *Config) validateFilter(filter string) error {\n\tfor _, p := range strings.Split(filter, \".\") {\n\t\tif p == \"\" {\n\t\t\treturn fmt.Errorf(\"filter contains blank section: %s\", filter)\n\t\t}\n\n\t\tif strings.Contains(p, \"*\") && p != \"*\" {\n\t\t\treturn fmt.Errorf(\"invalid filter wildcard section: %s\", filter)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *Config) validateTag(keyValue string) error {\n\tparts := strings.Split(keyValue, \"=\")\n\tif len(parts) != 2 {\n\t\treturn fmt.Errorf(\"invalid template tags: '%s'\", keyValue)\n\t}\n\n\tif parts[0] == \"\" || parts[1] == \"\" {\n\t\treturn fmt.Errorf(\"invalid template tags: %s'\", keyValue)\n\t}\n\n\treturn nil\n}\n\n// Configs wraps a slice of Config to aggregate diagnostics.\ntype Configs []Config\n\n// Diagnostics returns one set of diagnostics for all of the Configs.\nfunc (c Configs) Diagnostics() (*diagnostics.Diagnostics, error) {\n\td := &diagnostics.Diagnostics{\n\t\tColumns: []string{\"enabled\", \"bind-address\", \"protocol\", \"database\", \"retention-policy\", \"batch-size\", \"batch-pending\", \"batch-timeout\"},\n\t}\n\n\tfor _, cc := range c {\n\t\tif !cc.Enabled {\n\t\t\td.AddRow([]interface{}{false})\n\t\t\tcontinue\n\t\t}\n\n\t\tr := []interface{}{true, cc.BindAddress, cc.Protocol, cc.Database, cc.RetentionPolicy, cc.BatchSize, cc.BatchPending, cc.BatchTimeout}\n\t\td.AddRow(r)\n\t}\n\n\treturn d, nil\n}\n\n// Enabled returns true if any underlying Config is Enabled.\nfunc (c Configs) Enabled() bool {\n\tfor _, cc := range c {\n\t\tif cc.Enabled {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/graphite/config_test.go",
    "content": "package graphite_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/influxdata/influxdb/services/graphite\"\n)\n\nfunc TestConfig_Parse(t *testing.T) {\n\t// Parse configuration.\n\tvar c graphite.Config\n\tif _, err := toml.Decode(`\nbind-address = \":8080\"\ndatabase = \"mydb\"\nretention-policy = \"myrp\"\nenabled = true\nprotocol = \"tcp\"\nbatch-size=100\nbatch-pending=77\nbatch-timeout=\"1s\"\nconsistency-level=\"one\"\ntemplates=[\"servers.* .host.measurement*\"]\ntags=[\"region=us-east\"]\n`, &c); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Validate configuration.\n\tif c.BindAddress != \":8080\" {\n\t\tt.Fatalf(\"unexpected bind address: %s\", c.BindAddress)\n\t} else if c.Database != \"mydb\" {\n\t\tt.Fatalf(\"unexpected database selected: %s\", c.Database)\n\t} else if c.RetentionPolicy != \"myrp\" {\n\t\tt.Fatalf(\"unexpected retention policy selected: %s\", c.RetentionPolicy)\n\t} else if c.Enabled != true {\n\t\tt.Fatalf(\"unexpected graphite enabled: %v\", c.Enabled)\n\t} else if c.Protocol != \"tcp\" {\n\t\tt.Fatalf(\"unexpected graphite protocol: %s\", c.Protocol)\n\t} else if c.BatchSize != 100 {\n\t\tt.Fatalf(\"unexpected graphite batch size: %d\", c.BatchSize)\n\t} else if c.BatchPending != 77 {\n\t\tt.Fatalf(\"unexpected graphite batch pending: %d\", c.BatchPending)\n\t} else if time.Duration(c.BatchTimeout) != time.Second {\n\t\tt.Fatalf(\"unexpected graphite batch timeout: %v\", c.BatchTimeout)\n\t} else if c.ConsistencyLevel != \"one\" {\n\t\tt.Fatalf(\"unexpected graphite consistency setting: %s\", c.ConsistencyLevel)\n\t}\n\n\tif len(c.Templates) != 1 && c.Templates[0] != \"servers.* .host.measurement*\" {\n\t\tt.Fatalf(\"unexpected graphite templates setting: %v\", c.Templates)\n\t}\n\tif len(c.Tags) != 1 && c.Tags[0] != \"regsion=us-east\" {\n\t\tt.Fatalf(\"unexpected graphite templates setting: %v\", c.Tags)\n\t}\n}\n\nfunc TestConfigValidateEmptyTemplate(t *testing.T) {\n\tc := &graphite.Config{}\n\tc.Templates = []string{\"\"}\n\tif err := c.Validate(); err == nil {\n\t\tt.Errorf(\"config validate expected error. got nil\")\n\t}\n\n\tc.Templates = []string{\"     \"}\n\tif err := c.Validate(); err == nil {\n\t\tt.Errorf(\"config validate expected error. got nil\")\n\t}\n}\n\nfunc TestConfigValidateTooManyField(t *testing.T) {\n\tc := &graphite.Config{}\n\tc.Templates = []string{\"a measurement b c\"}\n\tif err := c.Validate(); err == nil {\n\t\tt.Errorf(\"config validate expected error. got nil\")\n\t}\n}\n\nfunc TestConfigValidateTemplatePatterns(t *testing.T) {\n\tc := &graphite.Config{}\n\tc.Templates = []string{\"*measurement\"}\n\tif err := c.Validate(); err == nil {\n\t\tt.Errorf(\"config validate expected error. got nil\")\n\t}\n\n\tc.Templates = []string{\".host.region\"}\n\tif err := c.Validate(); err == nil {\n\t\tt.Errorf(\"config validate expected error. got nil\")\n\t}\n}\n\nfunc TestConfigValidateFilter(t *testing.T) {\n\tc := &graphite.Config{}\n\tc.Templates = []string{\".server measurement*\"}\n\tif err := c.Validate(); err == nil {\n\t\tt.Errorf(\"config validate expected error. got nil\")\n\t}\n\n\tc.Templates = []string{\".    .server measurement*\"}\n\tif err := c.Validate(); err == nil {\n\t\tt.Errorf(\"config validate expected error. got nil\")\n\t}\n\n\tc.Templates = []string{\"server* measurement*\"}\n\tif err := c.Validate(); err == nil {\n\t\tt.Errorf(\"config validate expected error. got nil\")\n\t}\n}\n\nfunc TestConfigValidateTemplateTags(t *testing.T) {\n\tc := &graphite.Config{}\n\tc.Templates = []string{\"*.server measurement* foo\"}\n\tif err := c.Validate(); err == nil {\n\t\tt.Errorf(\"config validate expected error. got nil\")\n\t}\n\n\tc.Templates = []string{\"*.server measurement* foo=bar=\"}\n\tif err := c.Validate(); err == nil {\n\t\tt.Errorf(\"config validate expected error. got nil\")\n\t}\n\n\tc.Templates = []string{\"*.server measurement* foo=bar,\"}\n\tif err := c.Validate(); err == nil {\n\t\tt.Errorf(\"config validate expected error. got nil\")\n\t}\n\n\tc.Templates = []string{\"*.server measurement* =\"}\n\tif err := c.Validate(); err == nil {\n\t\tt.Errorf(\"config validate expected error. got nil\")\n\t}\n}\n\nfunc TestConfigValidateDefaultTags(t *testing.T) {\n\tc := &graphite.Config{}\n\tc.Tags = []string{\"foo\"}\n\tif err := c.Validate(); err == nil {\n\t\tt.Errorf(\"config validate expected error. got nil\")\n\t}\n\n\tc.Tags = []string{\"foo=bar=\"}\n\tif err := c.Validate(); err == nil {\n\t\tt.Errorf(\"config validate expected error. got nil\")\n\t}\n\n\tc.Tags = []string{\"foo=bar\", \"\"}\n\tif err := c.Validate(); err == nil {\n\t\tt.Errorf(\"config validate expected error. got nil\")\n\t}\n\n\tc.Tags = []string{\"=\"}\n\tif err := c.Validate(); err == nil {\n\t\tt.Errorf(\"config validate expected error. got nil\")\n\t}\n}\n\nfunc TestConfigValidateFilterDuplicates(t *testing.T) {\n\tc := &graphite.Config{}\n\tc.Templates = []string{\"foo measurement*\", \"foo .host.measurement\"}\n\tif err := c.Validate(); err == nil {\n\t\tt.Errorf(\"config validate expected error. got nil\")\n\t}\n\n\t// duplicate default templates\n\tc.Templates = []string{\"measurement*\", \".host.measurement\"}\n\tif err := c.Validate(); err == nil {\n\t\tt.Errorf(\"config validate expected error. got nil\")\n\t}\n\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/graphite/errors.go",
    "content": "package graphite\n\nimport \"fmt\"\n\n// An UnsupportedValueError is returned when a parsed value is not\n// supported.\ntype UnsupportedValueError struct {\n\tField string\n\tValue float64\n}\n\nfunc (err *UnsupportedValueError) Error() string {\n\treturn fmt.Sprintf(`field \"%s\" value: \"%v\" is unsupported`, err.Field, err.Value)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/graphite/parser.go",
    "content": "package graphite\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n)\n\n// Minimum and maximum supported dates for timestamps.\nvar (\n\t// The minimum graphite timestamp allowed.\n\tMinDate = time.Date(1901, 12, 13, 0, 0, 0, 0, time.UTC)\n\n\t// The maximum graphite timestamp allowed.\n\tMaxDate = time.Date(2038, 1, 19, 0, 0, 0, 0, time.UTC)\n)\n\nvar defaultTemplate *template\n\nfunc init() {\n\tvar err error\n\tdefaultTemplate, err = NewTemplate(\"measurement*\", nil, DefaultSeparator)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n// Parser encapsulates a Graphite Parser.\ntype Parser struct {\n\tmatcher *matcher\n\ttags    models.Tags\n}\n\n// Options are configurable values that can be provided to a Parser.\ntype Options struct {\n\tSeparator   string\n\tTemplates   []string\n\tDefaultTags models.Tags\n}\n\n// NewParserWithOptions returns a graphite parser using the given options.\nfunc NewParserWithOptions(options Options) (*Parser, error) {\n\n\tmatcher := newMatcher()\n\tmatcher.AddDefaultTemplate(defaultTemplate)\n\n\tfor _, pattern := range options.Templates {\n\n\t\ttemplate := pattern\n\t\tfilter := \"\"\n\t\t// Format is [filter] <template> [tag1=value1,tag2=value2]\n\t\tparts := strings.Fields(pattern)\n\t\tif len(parts) < 1 {\n\t\t\tcontinue\n\t\t} else if len(parts) >= 2 {\n\t\t\tif strings.Contains(parts[1], \"=\") {\n\t\t\t\ttemplate = parts[0]\n\t\t\t} else {\n\t\t\t\tfilter = parts[0]\n\t\t\t\ttemplate = parts[1]\n\t\t\t}\n\t\t}\n\n\t\t// Parse out the default tags specific to this template\n\t\tvar tags models.Tags\n\t\tif strings.Contains(parts[len(parts)-1], \"=\") {\n\t\t\ttagStrs := strings.Split(parts[len(parts)-1], \",\")\n\t\t\tfor _, kv := range tagStrs {\n\t\t\t\tparts := strings.Split(kv, \"=\")\n\t\t\t\ttags.SetString(parts[0], parts[1])\n\t\t\t}\n\t\t}\n\n\t\ttmpl, err := NewTemplate(template, tags, options.Separator)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmatcher.Add(filter, tmpl)\n\t}\n\treturn &Parser{matcher: matcher, tags: options.DefaultTags}, nil\n}\n\n// NewParser returns a GraphiteParser instance.\nfunc NewParser(templates []string, defaultTags models.Tags) (*Parser, error) {\n\treturn NewParserWithOptions(\n\t\tOptions{\n\t\t\tTemplates:   templates,\n\t\t\tDefaultTags: defaultTags,\n\t\t\tSeparator:   DefaultSeparator,\n\t\t})\n}\n\n// Parse performs Graphite parsing of a single line.\nfunc (p *Parser) Parse(line string) (models.Point, error) {\n\t// Break into 3 fields (name, value, timestamp).\n\tfields := strings.Fields(line)\n\tif len(fields) != 2 && len(fields) != 3 {\n\t\treturn nil, fmt.Errorf(\"received %q which doesn't have required fields\", line)\n\t}\n\n\t// decode the name and tags\n\ttemplate := p.matcher.Match(fields[0])\n\tmeasurement, tags, field, err := template.Apply(fields[0])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Could not extract measurement, use the raw value\n\tif measurement == \"\" {\n\t\tmeasurement = fields[0]\n\t}\n\n\t// Parse value.\n\tv, err := strconv.ParseFloat(fields[1], 64)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(`field \"%s\" value: %s`, fields[0], err)\n\t}\n\n\tif math.IsNaN(v) || math.IsInf(v, 0) {\n\t\treturn nil, &UnsupportedValueError{Field: fields[0], Value: v}\n\t}\n\n\tfieldValues := map[string]interface{}{}\n\tif field != \"\" {\n\t\tfieldValues[field] = v\n\t} else {\n\t\tfieldValues[\"value\"] = v\n\t}\n\n\t// If no 3rd field, use now as timestamp\n\ttimestamp := time.Now().UTC()\n\n\tif len(fields) == 3 {\n\t\t// Parse timestamp.\n\t\tunixTime, err := strconv.ParseFloat(fields[2], 64)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(`field \"%s\" time: %s`, fields[0], err)\n\t\t}\n\n\t\t// -1 is a special value that gets converted to current UTC time\n\t\t// See https://github.com/graphite-project/carbon/issues/54\n\t\tif unixTime != float64(-1) {\n\t\t\t// Check if we have fractional seconds\n\t\t\ttimestamp = time.Unix(int64(unixTime), int64((unixTime-math.Floor(unixTime))*float64(time.Second)))\n\t\t\tif timestamp.Before(MinDate) || timestamp.After(MaxDate) {\n\t\t\t\treturn nil, fmt.Errorf(\"timestamp out of range\")\n\t\t\t}\n\t\t}\n\t}\n\n\t// Set the default tags on the point if they are not already set\n\tfor _, t := range p.tags {\n\t\tif _, ok := tags[string(t.Key)]; !ok {\n\t\t\ttags[string(t.Key)] = string(t.Value)\n\t\t}\n\t}\n\treturn models.NewPoint(measurement, models.NewTags(tags), fieldValues, timestamp)\n}\n\n// ApplyTemplate extracts the template fields from the given line and\n// returns the measurement name and tags.\nfunc (p *Parser) ApplyTemplate(line string) (string, map[string]string, string, error) {\n\t// Break line into fields (name, value, timestamp), only name is used\n\tfields := strings.Fields(line)\n\tif len(fields) == 0 {\n\t\treturn \"\", make(map[string]string), \"\", nil\n\t}\n\t// decode the name and tags\n\ttemplate := p.matcher.Match(fields[0])\n\tname, tags, field, err := template.Apply(fields[0])\n\t// Set the default tags on the point if they are not already set\n\tfor _, t := range p.tags {\n\t\tif _, ok := tags[string(t.Key)]; !ok {\n\t\t\ttags[string(t.Key)] = string(t.Value)\n\t\t}\n\t}\n\treturn name, tags, field, err\n}\n\n// template represents a pattern and tags to map a graphite metric string to a influxdb Point.\ntype template struct {\n\ttags              []string\n\tdefaultTags       models.Tags\n\tgreedyMeasurement bool\n\tseparator         string\n}\n\n// NewTemplate returns a new template ensuring it has a measurement\n// specified.\nfunc NewTemplate(pattern string, defaultTags models.Tags, separator string) (*template, error) {\n\ttags := strings.Split(pattern, \".\")\n\thasMeasurement := false\n\ttemplate := &template{tags: tags, defaultTags: defaultTags, separator: separator}\n\n\tfor _, tag := range tags {\n\t\tif strings.HasPrefix(tag, \"measurement\") {\n\t\t\thasMeasurement = true\n\t\t}\n\t\tif tag == \"measurement*\" {\n\t\t\ttemplate.greedyMeasurement = true\n\t\t}\n\t}\n\n\tif !hasMeasurement {\n\t\treturn nil, fmt.Errorf(\"no measurement specified for template. %q\", pattern)\n\t}\n\n\treturn template, nil\n}\n\n// Apply extracts the template fields from the given line and returns the measurement\n// name and tags.\nfunc (t *template) Apply(line string) (string, map[string]string, string, error) {\n\tfields := strings.Split(line, \".\")\n\tvar (\n\t\tmeasurement            []string\n\t\ttags                   = make(map[string][]string)\n\t\tfield                  string\n\t\thasFieldWildcard       = false\n\t\thasMeasurementWildcard = false\n\t)\n\n\t// Set any default tags\n\tfor _, t := range t.defaultTags {\n\t\ttags[string(t.Key)] = append(tags[string(t.Key)], string(t.Value))\n\t}\n\n\t// See if an invalid combination has been specified in the template:\n\tfor _, tag := range t.tags {\n\t\tif tag == \"measurement*\" {\n\t\t\thasMeasurementWildcard = true\n\t\t} else if tag == \"field*\" {\n\t\t\thasFieldWildcard = true\n\t\t}\n\t}\n\tif hasFieldWildcard && hasMeasurementWildcard {\n\t\treturn \"\", nil, \"\", fmt.Errorf(\"either 'field*' or 'measurement*' can be used in each template (but not both together): %q\", strings.Join(t.tags, t.separator))\n\t}\n\n\tfor i, tag := range t.tags {\n\t\tif i >= len(fields) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif tag == \"measurement\" {\n\t\t\tmeasurement = append(measurement, fields[i])\n\t\t} else if tag == \"field\" {\n\t\t\tif len(field) != 0 {\n\t\t\t\treturn \"\", nil, \"\", fmt.Errorf(\"'field' can only be used once in each template: %q\", line)\n\t\t\t}\n\t\t\tfield = fields[i]\n\t\t} else if tag == \"field*\" {\n\t\t\tfield = strings.Join(fields[i:], t.separator)\n\t\t\tbreak\n\t\t} else if tag == \"measurement*\" {\n\t\t\tmeasurement = append(measurement, fields[i:]...)\n\t\t\tbreak\n\t\t} else if tag != \"\" {\n\t\t\ttags[tag] = append(tags[tag], fields[i])\n\t\t}\n\t}\n\n\t// Convert to map of strings.\n\tout_tags := make(map[string]string)\n\tfor k, values := range tags {\n\t\tout_tags[k] = strings.Join(values, t.separator)\n\t}\n\n\treturn strings.Join(measurement, t.separator), out_tags, field, nil\n}\n\n// matcher determines which template should be applied to a given metric\n// based on a filter tree.\ntype matcher struct {\n\troot            *node\n\tdefaultTemplate *template\n}\n\nfunc newMatcher() *matcher {\n\treturn &matcher{\n\t\troot: &node{},\n\t}\n}\n\n// Add inserts the template in the filter tree based the given filter.\nfunc (m *matcher) Add(filter string, template *template) {\n\tif filter == \"\" {\n\t\tm.AddDefaultTemplate(template)\n\t\treturn\n\t}\n\tm.root.Insert(filter, template)\n}\n\nfunc (m *matcher) AddDefaultTemplate(template *template) {\n\tm.defaultTemplate = template\n}\n\n// Match returns the template that matches the given graphite line.\nfunc (m *matcher) Match(line string) *template {\n\ttmpl := m.root.Search(line)\n\tif tmpl != nil {\n\t\treturn tmpl\n\t}\n\n\treturn m.defaultTemplate\n}\n\n// node is an item in a sorted k-ary tree.  Each child is sorted by its value.\n// The special value of \"*\", is always last.\ntype node struct {\n\tvalue    string\n\tchildren nodes\n\ttemplate *template\n}\n\nfunc (n *node) insert(values []string, template *template) {\n\t// Add the end, set the template\n\tif len(values) == 0 {\n\t\tn.template = template\n\t\treturn\n\t}\n\n\t// See if the the current element already exists in the tree. If so, insert the\n\t// into that sub-tree\n\tfor _, v := range n.children {\n\t\tif v.value == values[0] {\n\t\t\tv.insert(values[1:], template)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// New element, add it to the tree and sort the children\n\tnewNode := &node{value: values[0]}\n\tn.children = append(n.children, newNode)\n\tsort.Sort(&n.children)\n\n\t// Inherit template if value is wildcard\n\tif values[0] == \"*\" {\n\t\tnewNode.template = n.template\n\t}\n\n\t// Now insert the rest of the tree into the new element\n\tnewNode.insert(values[1:], template)\n}\n\n// Insert inserts the given string template into the tree.  The filter string is separated\n// on \".\" and each part is used as the path in the tree.\nfunc (n *node) Insert(filter string, template *template) {\n\tn.insert(strings.Split(filter, \".\"), template)\n}\n\nfunc (n *node) search(lineParts []string) *template {\n\t// Nothing to search\n\tif len(lineParts) == 0 || len(n.children) == 0 {\n\t\treturn n.template\n\t}\n\n\t// If last element is a wildcard, don't include in this search since it's sorted\n\t// to the end but lexicographically it would not always be and sort.Search assumes\n\t// the slice is sorted.\n\tlength := len(n.children)\n\tif n.children[length-1].value == \"*\" {\n\t\tlength--\n\t}\n\n\t// Find the index of child with an exact match\n\ti := sort.Search(length, func(i int) bool {\n\t\treturn n.children[i].value >= lineParts[0]\n\t})\n\n\t// Found an exact match, so search that child sub-tree\n\tif i < len(n.children) && n.children[i].value == lineParts[0] {\n\t\treturn n.children[i].search(lineParts[1:])\n\t}\n\t// Not an exact match, see if we have a wildcard child to search\n\tif n.children[len(n.children)-1].value == \"*\" {\n\t\treturn n.children[len(n.children)-1].search(lineParts[1:])\n\t}\n\treturn n.template\n}\n\nfunc (n *node) Search(line string) *template {\n\treturn n.search(strings.Split(line, \".\"))\n}\n\ntype nodes []*node\n\n// Less returns a boolean indicating whether the filter at position j\n// is less than the filter at position k.  Filters are order by string\n// comparison of each component parts.  A wildcard value \"*\" is never\n// less than a non-wildcard value.\n//\n// For example, the filters:\n//             \"*.*\"\n//             \"servers.*\"\n//             \"servers.localhost\"\n//             \"*.localhost\"\n//\n// Would be sorted as:\n//             \"servers.localhost\"\n//             \"servers.*\"\n//             \"*.localhost\"\n//             \"*.*\"\nfunc (n *nodes) Less(j, k int) bool {\n\tif (*n)[j].value == \"*\" && (*n)[k].value != \"*\" {\n\t\treturn false\n\t}\n\n\tif (*n)[j].value != \"*\" && (*n)[k].value == \"*\" {\n\t\treturn true\n\t}\n\n\treturn (*n)[j].value < (*n)[k].value\n}\n\nfunc (n *nodes) Swap(i, j int) { (*n)[i], (*n)[j] = (*n)[j], (*n)[i] }\nfunc (n *nodes) Len() int      { return len(*n) }\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/graphite/parser_test.go",
    "content": "package graphite_test\n\nimport (\n\t\"reflect\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/services/graphite\"\n)\n\nfunc BenchmarkParse(b *testing.B) {\n\tp, err := graphite.NewParser([]string{\n\t\t\"*.* .wrong.measurement*\",\n\t\t\"servers.* .host.measurement*\",\n\t\t\"servers.localhost .host.measurement*\",\n\t\t\"*.localhost .host.measurement*\",\n\t\t\"*.*.cpu .host.measurement*\",\n\t\t\"a.b.c .host.measurement*\",\n\t\t\"influxd.*.foo .host.measurement*\",\n\t\t\"prod.*.mem .host.measurement*\",\n\t}, nil)\n\n\tif err != nil {\n\t\tb.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\tfor i := 0; i < b.N; i++ {\n\t\tp.Parse(\"servers.localhost.cpu.load 11 1435077219\")\n\t}\n}\n\nfunc TestTemplateApply(t *testing.T) {\n\tvar tests = []struct {\n\t\ttest        string\n\t\tinput       string\n\t\ttemplate    string\n\t\tmeasurement string\n\t\ttags        map[string]string\n\t\terr         string\n\t}{\n\t\t{\n\t\t\ttest:        \"metric only\",\n\t\t\tinput:       \"cpu\",\n\t\t\ttemplate:    \"measurement\",\n\t\t\tmeasurement: \"cpu\",\n\t\t},\n\t\t{\n\t\t\ttest:        \"metric with single series\",\n\t\t\tinput:       \"cpu.server01\",\n\t\t\ttemplate:    \"measurement.hostname\",\n\t\t\tmeasurement: \"cpu\",\n\t\t\ttags:        map[string]string{\"hostname\": \"server01\"},\n\t\t},\n\t\t{\n\t\t\ttest:        \"metric with multiple series\",\n\t\t\tinput:       \"cpu.us-west.server01\",\n\t\t\ttemplate:    \"measurement.region.hostname\",\n\t\t\tmeasurement: \"cpu\",\n\t\t\ttags:        map[string]string{\"hostname\": \"server01\", \"region\": \"us-west\"},\n\t\t},\n\t\t{\n\t\t\ttest:        \"metric with multiple tags\",\n\t\t\tinput:       \"server01.example.org.cpu.us-west\",\n\t\t\ttemplate:    \"hostname.hostname.hostname.measurement.region\",\n\t\t\tmeasurement: \"cpu\",\n\t\t\ttags:        map[string]string{\"hostname\": \"server01.example.org\", \"region\": \"us-west\"},\n\t\t},\n\t\t{\n\t\t\ttest: \"no metric\",\n\t\t\ttags: make(map[string]string),\n\t\t\terr:  `no measurement specified for template. \"\"`,\n\t\t},\n\t\t{\n\t\t\ttest:        \"ignore unnamed\",\n\t\t\tinput:       \"foo.cpu\",\n\t\t\ttemplate:    \"measurement\",\n\t\t\tmeasurement: \"foo\",\n\t\t\ttags:        make(map[string]string),\n\t\t},\n\t\t{\n\t\t\ttest:        \"name shorter than template\",\n\t\t\tinput:       \"foo\",\n\t\t\ttemplate:    \"measurement.A.B.C\",\n\t\t\tmeasurement: \"foo\",\n\t\t\ttags:        make(map[string]string),\n\t\t},\n\t\t{\n\t\t\ttest:        \"wildcard measurement at end\",\n\t\t\tinput:       \"prod.us-west.server01.cpu.load\",\n\t\t\ttemplate:    \"env.zone.host.measurement*\",\n\t\t\tmeasurement: \"cpu.load\",\n\t\t\ttags:        map[string]string{\"env\": \"prod\", \"zone\": \"us-west\", \"host\": \"server01\"},\n\t\t},\n\t\t{\n\t\t\ttest:        \"skip fields\",\n\t\t\tinput:       \"ignore.us-west.ignore-this-too.cpu.load\",\n\t\t\ttemplate:    \".zone..measurement*\",\n\t\t\tmeasurement: \"cpu.load\",\n\t\t\ttags:        map[string]string{\"zone\": \"us-west\"},\n\t\t},\n\t\t{\n\t\t\ttest:        \"conjoined fields\",\n\t\t\tinput:       \"prod.us-west.server01.cpu.util.idle.percent\",\n\t\t\ttemplate:    \"env.zone.host.measurement.measurement.field*\",\n\t\t\tmeasurement: \"cpu.util\",\n\t\t\ttags:        map[string]string{\"env\": \"prod\", \"zone\": \"us-west\", \"host\": \"server01\"},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\ttmpl, err := graphite.NewTemplate(test.template, nil, graphite.DefaultSeparator)\n\t\tif errstr(err) != test.err {\n\t\t\tt.Fatalf(\"err does not match.  expected %v, got %v\", test.err, err)\n\t\t}\n\t\tif err != nil {\n\t\t\t// If we erred out,it was intended and the following tests won't work\n\t\t\tcontinue\n\t\t}\n\n\t\tmeasurement, tags, _, _ := tmpl.Apply(test.input)\n\t\tif measurement != test.measurement {\n\t\t\tt.Fatalf(\"name parse failer.  expected %v, got %v\", test.measurement, measurement)\n\t\t}\n\t\tif len(tags) != len(test.tags) {\n\t\t\tt.Fatalf(\"unexpected number of tags.  expected %v, got %v\", test.tags, tags)\n\t\t}\n\t\tfor k, v := range test.tags {\n\t\t\tif tags[k] != v {\n\t\t\t\tt.Fatalf(\"unexpected tag value for tags[%s].  expected %q, got %q\", k, v, tags[k])\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestParseMissingMeasurement(t *testing.T) {\n\t_, err := graphite.NewParser([]string{\"a.b.c\"}, nil)\n\tif err == nil {\n\t\tt.Fatalf(\"expected error creating parser, got nil\")\n\t}\n}\n\nfunc TestParse(t *testing.T) {\n\ttestTime := time.Now().Round(time.Second)\n\tepochTime := testTime.Unix()\n\tstrTime := strconv.FormatInt(epochTime, 10)\n\n\tvar tests = []struct {\n\t\ttest        string\n\t\tinput       string\n\t\tmeasurement string\n\t\ttags        map[string]string\n\t\tvalue       float64\n\t\ttime        time.Time\n\t\ttemplate    string\n\t\terr         string\n\t}{\n\t\t{\n\t\t\ttest:        \"normal case\",\n\t\t\tinput:       `cpu.foo.bar 50 ` + strTime,\n\t\t\ttemplate:    \"measurement.foo.bar\",\n\t\t\tmeasurement: \"cpu\",\n\t\t\ttags: map[string]string{\n\t\t\t\t\"foo\": \"foo\",\n\t\t\t\t\"bar\": \"bar\",\n\t\t\t},\n\t\t\tvalue: 50,\n\t\t\ttime:  testTime,\n\t\t},\n\t\t{\n\t\t\ttest:        \"metric only with float value\",\n\t\t\tinput:       `cpu 50.554 ` + strTime,\n\t\t\tmeasurement: \"cpu\",\n\t\t\ttemplate:    \"measurement\",\n\t\t\tvalue:       50.554,\n\t\t\ttime:        testTime,\n\t\t},\n\t\t{\n\t\t\ttest:     \"missing metric\",\n\t\t\tinput:    `1419972457825`,\n\t\t\ttemplate: \"measurement\",\n\t\t\terr:      `received \"1419972457825\" which doesn't have required fields`,\n\t\t},\n\t\t{\n\t\t\ttest:     \"should error parsing invalid float\",\n\t\t\tinput:    `cpu 50.554z 1419972457825`,\n\t\t\ttemplate: \"measurement\",\n\t\t\terr:      `field \"cpu\" value: strconv.ParseFloat: parsing \"50.554z\": invalid syntax`,\n\t\t},\n\t\t{\n\t\t\ttest:     \"should error parsing invalid int\",\n\t\t\tinput:    `cpu 50z 1419972457825`,\n\t\t\ttemplate: \"measurement\",\n\t\t\terr:      `field \"cpu\" value: strconv.ParseFloat: parsing \"50z\": invalid syntax`,\n\t\t},\n\t\t{\n\t\t\ttest:     \"should error parsing invalid time\",\n\t\t\tinput:    `cpu 50.554 14199724z57825`,\n\t\t\ttemplate: \"measurement\",\n\t\t\terr:      `field \"cpu\" time: strconv.ParseFloat: parsing \"14199724z57825\": invalid syntax`,\n\t\t},\n\t\t{\n\t\t\ttest:     \"measurement* and field* (invalid)\",\n\t\t\tinput:    `prod.us-west.server01.cpu.util.idle.percent 99.99 1419972457825`,\n\t\t\ttemplate: \"env.zone.host.measurement*.field*\",\n\t\t\terr:      `either 'field*' or 'measurement*' can be used in each template (but not both together): \"env.zone.host.measurement*.field*\"`,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tp, err := graphite.NewParser([]string{test.template}, nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error creating graphite parser: %v\", err)\n\t\t}\n\n\t\tpoint, err := p.Parse(test.input)\n\t\tif errstr(err) != test.err {\n\t\t\tt.Fatalf(\"err does not match.  expected %v, got %v\", test.err, err)\n\t\t}\n\t\tif err != nil {\n\t\t\t// If we erred out,it was intended and the following tests won't work\n\t\t\tcontinue\n\t\t}\n\t\tif string(point.Name()) != test.measurement {\n\t\t\tt.Fatalf(\"name parse failer.  expected %v, got %v\", test.measurement, string(point.Name()))\n\t\t}\n\t\tif len(point.Tags()) != len(test.tags) {\n\t\t\tt.Fatalf(\"tags len mismatch.  expected %d, got %d\", len(test.tags), len(point.Tags()))\n\t\t}\n\t\tfields, err := point.Fields()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tf := fields[\"value\"].(float64)\n\t\tif fields[\"value\"] != f {\n\t\t\tt.Fatalf(\"floatValue value mismatch.  expected %v, got %v\", test.value, f)\n\t\t}\n\t\tif point.Time().UnixNano()/1000000 != test.time.UnixNano()/1000000 {\n\t\t\tt.Fatalf(\"time value mismatch.  expected %v, got %v\", test.time.UnixNano(), point.Time().UnixNano())\n\t\t}\n\t}\n}\n\nfunc TestParseNaN(t *testing.T) {\n\tp, err := graphite.NewParser([]string{\"measurement*\"}, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\t_, err = p.Parse(\"servers.localhost.cpu_load NaN 1435077219\")\n\tif err == nil {\n\t\tt.Fatalf(\"expected error. got nil\")\n\t}\n\n\tif _, ok := err.(*graphite.UnsupportedValueError); !ok {\n\t\tt.Fatalf(\"expected *graphite.ErrUnsupportedValue, got %v\", reflect.TypeOf(err))\n\t}\n}\n\nfunc TestFilterMatchDefault(t *testing.T) {\n\tp, err := graphite.NewParser([]string{\"servers.localhost .host.measurement*\"}, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\texp := models.MustNewPoint(\"miss.servers.localhost.cpu_load\",\n\t\tmodels.NewTags(map[string]string{}),\n\t\tmodels.Fields{\"value\": float64(11)},\n\t\ttime.Unix(1435077219, 0))\n\n\tpt, err := p.Parse(\"miss.servers.localhost.cpu_load 11 1435077219\")\n\tif err != nil {\n\t\tt.Fatalf(\"parse error: %v\", err)\n\t}\n\n\tif exp.String() != pt.String() {\n\t\tt.Errorf(\"parse mismatch: got %v, exp %v\", pt.String(), exp.String())\n\t}\n}\n\nfunc TestFilterMatchMultipleMeasurement(t *testing.T) {\n\tp, err := graphite.NewParser([]string{\"servers.localhost .host.measurement.measurement*\"}, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\texp := models.MustNewPoint(\"cpu.cpu_load.10\",\n\t\tmodels.NewTags(map[string]string{\"host\": \"localhost\"}),\n\t\tmodels.Fields{\"value\": float64(11)},\n\t\ttime.Unix(1435077219, 0))\n\n\tpt, err := p.Parse(\"servers.localhost.cpu.cpu_load.10 11 1435077219\")\n\tif err != nil {\n\t\tt.Fatalf(\"parse error: %v\", err)\n\t}\n\n\tif exp.String() != pt.String() {\n\t\tt.Errorf(\"parse mismatch: got %v, exp %v\", pt.String(), exp.String())\n\t}\n}\n\nfunc TestFilterMatchMultipleMeasurementSeparator(t *testing.T) {\n\tp, err := graphite.NewParserWithOptions(graphite.Options{\n\t\tTemplates: []string{\"servers.localhost .host.measurement.measurement*\"},\n\t\tSeparator: \"_\",\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\texp := models.MustNewPoint(\"cpu_cpu_load_10\",\n\t\tmodels.NewTags(map[string]string{\"host\": \"localhost\"}),\n\t\tmodels.Fields{\"value\": float64(11)},\n\t\ttime.Unix(1435077219, 0))\n\n\tpt, err := p.Parse(\"servers.localhost.cpu.cpu_load.10 11 1435077219\")\n\tif err != nil {\n\t\tt.Fatalf(\"parse error: %v\", err)\n\t}\n\n\tif exp.String() != pt.String() {\n\t\tt.Errorf(\"parse mismatch: got %v, exp %v\", pt.String(), exp.String())\n\t}\n}\n\nfunc TestFilterMatchSingle(t *testing.T) {\n\tp, err := graphite.NewParser([]string{\"servers.localhost .host.measurement*\"}, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\texp := models.MustNewPoint(\"cpu_load\",\n\t\tmodels.NewTags(map[string]string{\"host\": \"localhost\"}),\n\t\tmodels.Fields{\"value\": float64(11)},\n\t\ttime.Unix(1435077219, 0))\n\n\tpt, err := p.Parse(\"servers.localhost.cpu_load 11 1435077219\")\n\tif err != nil {\n\t\tt.Fatalf(\"parse error: %v\", err)\n\t}\n\n\tif exp.String() != pt.String() {\n\t\tt.Errorf(\"parse mismatch: got %v, exp %v\", pt.String(), exp.String())\n\t}\n}\n\nfunc TestParseNoMatch(t *testing.T) {\n\tp, err := graphite.NewParser([]string{\"servers.*.cpu .host.measurement.cpu.measurement\"}, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\texp := models.MustNewPoint(\"servers.localhost.memory.VmallocChunk\",\n\t\tmodels.NewTags(map[string]string{}),\n\t\tmodels.Fields{\"value\": float64(11)},\n\t\ttime.Unix(1435077219, 0))\n\n\tpt, err := p.Parse(\"servers.localhost.memory.VmallocChunk 11 1435077219\")\n\tif err != nil {\n\t\tt.Fatalf(\"parse error: %v\", err)\n\t}\n\n\tif exp.String() != pt.String() {\n\t\tt.Errorf(\"parse mismatch: got %v, exp %v\", pt.String(), exp.String())\n\t}\n}\n\nfunc TestFilterMatchWildcard(t *testing.T) {\n\tp, err := graphite.NewParser([]string{\"servers.* .host.measurement*\"}, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\texp := models.MustNewPoint(\"cpu_load\",\n\t\tmodels.NewTags(map[string]string{\"host\": \"localhost\"}),\n\t\tmodels.Fields{\"value\": float64(11)},\n\t\ttime.Unix(1435077219, 0))\n\n\tpt, err := p.Parse(\"servers.localhost.cpu_load 11 1435077219\")\n\tif err != nil {\n\t\tt.Fatalf(\"parse error: %v\", err)\n\t}\n\n\tif exp.String() != pt.String() {\n\t\tt.Errorf(\"parse mismatch: got %v, exp %v\", pt.String(), exp.String())\n\t}\n}\n\nfunc TestFilterMatchExactBeforeWildcard(t *testing.T) {\n\tp, err := graphite.NewParser([]string{\n\t\t\"servers.* .wrong.measurement*\",\n\t\t\"servers.localhost .host.measurement*\"}, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\texp := models.MustNewPoint(\"cpu_load\",\n\t\tmodels.NewTags(map[string]string{\"host\": \"localhost\"}),\n\t\tmodels.Fields{\"value\": float64(11)},\n\t\ttime.Unix(1435077219, 0))\n\n\tpt, err := p.Parse(\"servers.localhost.cpu_load 11 1435077219\")\n\tif err != nil {\n\t\tt.Fatalf(\"parse error: %v\", err)\n\t}\n\n\tif exp.String() != pt.String() {\n\t\tt.Errorf(\"parse mismatch: got %v, exp %v\", pt.String(), exp.String())\n\t}\n}\n\nfunc TestFilterMatchMostLongestFilter(t *testing.T) {\n\tp, err := graphite.NewParser([]string{\n\t\t\"*.* .wrong.measurement*\",\n\t\t\"servers.* .wrong.measurement*\",\n\t\t\"servers.localhost .wrong.measurement*\",\n\t\t\"servers.localhost.cpu .host.resource.measurement*\", // should match this\n\t\t\"*.localhost .wrong.measurement*\",\n\t}, nil)\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\texp := models.MustNewPoint(\"cpu_load\",\n\t\tmodels.NewTags(map[string]string{\"host\": \"localhost\", \"resource\": \"cpu\"}),\n\t\tmodels.Fields{\"value\": float64(11)},\n\t\ttime.Unix(1435077219, 0))\n\n\tpt, err := p.Parse(\"servers.localhost.cpu.cpu_load 11 1435077219\")\n\tif err != nil {\n\t\tt.Fatalf(\"parse error: %v\", err)\n\t}\n\n\tif exp.String() != pt.String() {\n\t\tt.Errorf(\"parse mismatch: got %v, exp %v\", pt.String(), exp.String())\n\t}\n}\n\nfunc TestFilterMatchMultipleWildcards(t *testing.T) {\n\tp, err := graphite.NewParser([]string{\n\t\t\"*.* .wrong.measurement*\",\n\t\t\"servers.* .host.measurement*\", // should match this\n\t\t\"servers.localhost .wrong.measurement*\",\n\t\t\"*.localhost .wrong.measurement*\",\n\t}, nil)\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\texp := models.MustNewPoint(\"cpu_load\",\n\t\tmodels.NewTags(map[string]string{\"host\": \"server01\"}),\n\t\tmodels.Fields{\"value\": float64(11)},\n\t\ttime.Unix(1435077219, 0))\n\n\tpt, err := p.Parse(\"servers.server01.cpu_load 11 1435077219\")\n\tif err != nil {\n\t\tt.Fatalf(\"parse error: %v\", err)\n\t}\n\n\tif exp.String() != pt.String() {\n\t\tt.Errorf(\"parse mismatch: got %v, exp %v\", pt.String(), exp.String())\n\t}\n}\n\nfunc TestParseDefaultTags(t *testing.T) {\n\tp, err := graphite.NewParser([]string{\"servers.localhost .host.measurement*\"}, models.NewTags(map[string]string{\n\t\t\"region\": \"us-east\",\n\t\t\"zone\":   \"1c\",\n\t\t\"host\":   \"should not set\",\n\t}))\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\texp := models.MustNewPoint(\"cpu_load\",\n\t\tmodels.NewTags(map[string]string{\"host\": \"localhost\", \"region\": \"us-east\", \"zone\": \"1c\"}),\n\t\tmodels.Fields{\"value\": float64(11)},\n\t\ttime.Unix(1435077219, 0))\n\n\tpt, err := p.Parse(\"servers.localhost.cpu_load 11 1435077219\")\n\tif err != nil {\n\t\tt.Fatalf(\"parse error: %v\", err)\n\t}\n\n\tif exp.String() != pt.String() {\n\t\tt.Errorf(\"parse mismatch: got %v, exp %v\", pt.String(), exp.String())\n\t}\n}\n\nfunc TestParseDefaultTemplateTags(t *testing.T) {\n\tp, err := graphite.NewParser([]string{\"servers.localhost .host.measurement* zone=1c\"}, models.NewTags(map[string]string{\n\t\t\"region\": \"us-east\",\n\t\t\"host\":   \"should not set\",\n\t}))\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\texp := models.MustNewPoint(\"cpu_load\",\n\t\tmodels.NewTags(map[string]string{\"host\": \"localhost\", \"region\": \"us-east\", \"zone\": \"1c\"}),\n\t\tmodels.Fields{\"value\": float64(11)},\n\t\ttime.Unix(1435077219, 0))\n\n\tpt, err := p.Parse(\"servers.localhost.cpu_load 11 1435077219\")\n\tif err != nil {\n\t\tt.Fatalf(\"parse error: %v\", err)\n\t}\n\n\tif exp.String() != pt.String() {\n\t\tt.Errorf(\"parse mismatch: got %v, exp %v\", pt.String(), exp.String())\n\t}\n}\n\nfunc TestParseDefaultTemplateTagsOverridGlobal(t *testing.T) {\n\tp, err := graphite.NewParser([]string{\"servers.localhost .host.measurement* zone=1c,region=us-east\"}, models.NewTags(map[string]string{\n\t\t\"region\": \"shot not be set\",\n\t\t\"host\":   \"should not set\",\n\t}))\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\texp := models.MustNewPoint(\"cpu_load\",\n\t\tmodels.NewTags(map[string]string{\"host\": \"localhost\", \"region\": \"us-east\", \"zone\": \"1c\"}),\n\t\tmodels.Fields{\"value\": float64(11)},\n\t\ttime.Unix(1435077219, 0))\n\n\tpt, err := p.Parse(\"servers.localhost.cpu_load 11 1435077219\")\n\tif err != nil {\n\t\tt.Fatalf(\"parse error: %v\", err)\n\t}\n\n\tif exp.String() != pt.String() {\n\t\tt.Errorf(\"parse mismatch: got %v, exp %v\", pt.String(), exp.String())\n\t}\n}\n\nfunc TestParseTemplateWhitespace(t *testing.T) {\n\tp, err := graphite.NewParser([]string{\"servers.localhost        .host.measurement*           zone=1c\"}, models.NewTags(map[string]string{\n\t\t\"region\": \"us-east\",\n\t\t\"host\":   \"should not set\",\n\t}))\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\texp := models.MustNewPoint(\"cpu_load\",\n\t\tmodels.NewTags(map[string]string{\"host\": \"localhost\", \"region\": \"us-east\", \"zone\": \"1c\"}),\n\t\tmodels.Fields{\"value\": float64(11)},\n\t\ttime.Unix(1435077219, 0))\n\n\tpt, err := p.Parse(\"servers.localhost.cpu_load 11 1435077219\")\n\tif err != nil {\n\t\tt.Fatalf(\"parse error: %v\", err)\n\t}\n\n\tif exp.String() != pt.String() {\n\t\tt.Errorf(\"parse mismatch: got %v, exp %v\", pt.String(), exp.String())\n\t}\n}\n\n// Test basic functionality of ApplyTemplate\nfunc TestApplyTemplate(t *testing.T) {\n\to := graphite.Options{\n\t\tSeparator: \"_\",\n\t\tTemplates: []string{\"current.* measurement.measurement\"},\n\t}\n\tp, err := graphite.NewParserWithOptions(o)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\tmeasurement, _, _, _ := p.ApplyTemplate(\"current.users\")\n\tif measurement != \"current_users\" {\n\t\tt.Errorf(\"Parser.ApplyTemplate unexpected result. got %s, exp %s\",\n\t\t\tmeasurement, \"current_users\")\n\t}\n}\n\n// Test basic functionality of ApplyTemplate\nfunc TestApplyTemplateNoMatch(t *testing.T) {\n\to := graphite.Options{\n\t\tSeparator: \"_\",\n\t\tTemplates: []string{\"foo.bar measurement.measurement\"},\n\t}\n\tp, err := graphite.NewParserWithOptions(o)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\tmeasurement, _, _, _ := p.ApplyTemplate(\"current.users\")\n\tif measurement != \"current.users\" {\n\t\tt.Errorf(\"Parser.ApplyTemplate unexpected result. got %s, exp %s\",\n\t\t\tmeasurement, \"current.users\")\n\t}\n}\n\n// Test that most specific template is chosen\nfunc TestApplyTemplateSpecific(t *testing.T) {\n\to := graphite.Options{\n\t\tSeparator: \"_\",\n\t\tTemplates: []string{\n\t\t\t\"current.* measurement.measurement\",\n\t\t\t\"current.*.* measurement.measurement.service\",\n\t\t},\n\t}\n\tp, err := graphite.NewParserWithOptions(o)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\tmeasurement, tags, _, _ := p.ApplyTemplate(\"current.users.facebook\")\n\tif measurement != \"current_users\" {\n\t\tt.Errorf(\"Parser.ApplyTemplate unexpected result. got %s, exp %s\",\n\t\t\tmeasurement, \"current_users\")\n\t}\n\tservice, ok := tags[\"service\"]\n\tif !ok {\n\t\tt.Error(\"Expected for template to apply a 'service' tag, but not found\")\n\t}\n\tif service != \"facebook\" {\n\t\tt.Errorf(\"Expected service='facebook' tag, got service='%s'\", service)\n\t}\n}\n\n// Test that most specific template is N/A\nfunc TestApplyTemplateSpecificIsNA(t *testing.T) {\n\to := graphite.Options{\n\t\tSeparator: \"_\",\n\t\tTemplates: []string{\n\t\t\t\"current.* measurement.service\",\n\t\t\t\"current.*.*.test measurement.measurement.service\",\n\t\t},\n\t}\n\tp, err := graphite.NewParserWithOptions(o)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\tmeasurement, _, _, _ := p.ApplyTemplate(\"current.users.facebook\")\n\tif measurement != \"current\" {\n\t\tt.Errorf(\"Parser.ApplyTemplate unexpected result. got %s, exp %s\",\n\t\t\tmeasurement, \"current\")\n\t}\n}\n\nfunc TestApplyTemplateTags(t *testing.T) {\n\to := graphite.Options{\n\t\tSeparator: \"_\",\n\t\tTemplates: []string{\"current.* measurement.measurement region=us-west\"},\n\t}\n\tp, err := graphite.NewParserWithOptions(o)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\tmeasurement, tags, _, _ := p.ApplyTemplate(\"current.users\")\n\tif measurement != \"current_users\" {\n\t\tt.Errorf(\"Parser.ApplyTemplate unexpected result. got %s, exp %s\",\n\t\t\tmeasurement, \"current_users\")\n\t}\n\n\tregion, ok := tags[\"region\"]\n\tif !ok {\n\t\tt.Error(\"Expected for template to apply a 'region' tag, but not found\")\n\t}\n\tif region != \"us-west\" {\n\t\tt.Errorf(\"Expected region='us-west' tag, got region='%s'\", region)\n\t}\n}\n\nfunc TestApplyTemplateField(t *testing.T) {\n\to := graphite.Options{\n\t\tSeparator: \"_\",\n\t\tTemplates: []string{\"current.* measurement.measurement.field\"},\n\t}\n\tp, err := graphite.NewParserWithOptions(o)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\tmeasurement, _, field, err := p.ApplyTemplate(\"current.users.logged_in\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif measurement != \"current_users\" {\n\t\tt.Errorf(\"Parser.ApplyTemplate unexpected result. got %s, exp %s\",\n\t\t\tmeasurement, \"current_users\")\n\t}\n\n\tif field != \"logged_in\" {\n\t\tt.Errorf(\"Parser.ApplyTemplate unexpected result. got %s, exp %s\",\n\t\t\tfield, \"logged_in\")\n\t}\n}\n\nfunc TestApplyTemplateFieldError(t *testing.T) {\n\to := graphite.Options{\n\t\tSeparator: \"_\",\n\t\tTemplates: []string{\"current.* measurement.field.field\"},\n\t}\n\tp, err := graphite.NewParserWithOptions(o)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating parser, got %v\", err)\n\t}\n\n\t_, _, _, err = p.ApplyTemplate(\"current.users.logged_in\")\n\tif err == nil {\n\t\tt.Errorf(\"Parser.ApplyTemplate unexpected result. got %s, exp %s\", err,\n\t\t\t\"'field' can only be used once in each template: current.users.logged_in\")\n\t}\n}\n\n// Test Helpers\nfunc errstr(err error) string {\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\treturn \"\"\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/graphite/service.go",
    "content": "// Package graphite provides a service for InfluxDB to ingest data via the graphite protocol.\npackage graphite // import \"github.com/influxdata/influxdb/services/graphite\"\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"math\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/monitor/diagnostics\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n\t\"github.com/uber-go/zap\"\n)\n\nconst udpBufferSize = 65536\n\n// statistics gathered by the graphite package.\nconst (\n\tstatPointsReceived      = \"pointsRx\"\n\tstatBytesReceived       = \"bytesRx\"\n\tstatPointsParseFail     = \"pointsParseFail\"\n\tstatPointsNaNFail       = \"pointsNaNFail\"\n\tstatBatchesTransmitted  = \"batchesTx\"\n\tstatPointsTransmitted   = \"pointsTx\"\n\tstatBatchesTransmitFail = \"batchesTxFail\"\n\tstatConnectionsActive   = \"connsActive\"\n\tstatConnectionsHandled  = \"connsHandled\"\n)\n\ntype tcpConnection struct {\n\tconn        net.Conn\n\tconnectTime time.Time\n}\n\nfunc (c *tcpConnection) Close() {\n\tc.conn.Close()\n}\n\n// Service represents a Graphite service.\ntype Service struct {\n\tbindAddress     string\n\tdatabase        string\n\tretentionPolicy string\n\tprotocol        string\n\tbatchSize       int\n\tbatchPending    int\n\tbatchTimeout    time.Duration\n\tudpReadBuffer   int\n\n\tbatcher *tsdb.PointBatcher\n\tparser  *Parser\n\n\tlogger      zap.Logger\n\tstats       *Statistics\n\tdefaultTags models.StatisticTags\n\n\ttcpConnectionsMu sync.Mutex\n\ttcpConnections   map[string]*tcpConnection\n\tdiagsKey         string\n\n\tln      net.Listener\n\taddr    net.Addr\n\tudpConn *net.UDPConn\n\n\twg sync.WaitGroup\n\n\tmu    sync.RWMutex\n\tready bool          // Has the required database been created?\n\tdone  chan struct{} // Is the service closing or closed?\n\n\tMonitor interface {\n\t\tRegisterDiagnosticsClient(name string, client diagnostics.Client)\n\t\tDeregisterDiagnosticsClient(name string)\n\t}\n\tPointsWriter interface {\n\t\tWritePointsPrivileged(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error\n\t}\n\tMetaClient interface {\n\t\tCreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error)\n\t\tCreateRetentionPolicy(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error)\n\t\tDatabase(name string) *meta.DatabaseInfo\n\t\tRetentionPolicy(database, name string) (*meta.RetentionPolicyInfo, error)\n\t}\n}\n\n// NewService returns an instance of the Graphite service.\nfunc NewService(c Config) (*Service, error) {\n\t// Use defaults where necessary.\n\td := c.WithDefaults()\n\n\ts := Service{\n\t\tbindAddress:     d.BindAddress,\n\t\tdatabase:        d.Database,\n\t\tretentionPolicy: d.RetentionPolicy,\n\t\tprotocol:        d.Protocol,\n\t\tbatchSize:       d.BatchSize,\n\t\tbatchPending:    d.BatchPending,\n\t\tudpReadBuffer:   d.UDPReadBuffer,\n\t\tbatchTimeout:    time.Duration(d.BatchTimeout),\n\t\tlogger:          zap.New(zap.NullEncoder()),\n\t\tstats:           &Statistics{},\n\t\tdefaultTags:     models.StatisticTags{\"proto\": d.Protocol, \"bind\": d.BindAddress},\n\t\ttcpConnections:  make(map[string]*tcpConnection),\n\t\tdiagsKey:        strings.Join([]string{\"graphite\", d.Protocol, d.BindAddress}, \":\"),\n\t}\n\n\tparser, err := NewParserWithOptions(Options{\n\t\tTemplates:   d.Templates,\n\t\tDefaultTags: d.DefaultTags(),\n\t\tSeparator:   d.Separator})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts.parser = parser\n\n\treturn &s, nil\n}\n\n// Open starts the Graphite input processing data.\nfunc (s *Service) Open() error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif !s.closed() {\n\t\treturn nil // Already open.\n\t}\n\ts.done = make(chan struct{})\n\n\ts.logger.Info(fmt.Sprintf(\"Starting graphite service, batch size %d, batch timeout %s\", s.batchSize, s.batchTimeout))\n\n\t// Register diagnostics if a Monitor service is available.\n\tif s.Monitor != nil {\n\t\ts.Monitor.RegisterDiagnosticsClient(s.diagsKey, s)\n\t}\n\n\ts.batcher = tsdb.NewPointBatcher(s.batchSize, s.batchPending, s.batchTimeout)\n\ts.batcher.Start()\n\n\t// Start processing batches.\n\ts.wg.Add(1)\n\tgo s.processBatches(s.batcher)\n\n\tvar err error\n\tif strings.ToLower(s.protocol) == \"tcp\" {\n\t\ts.addr, err = s.openTCPServer()\n\t} else if strings.ToLower(s.protocol) == \"udp\" {\n\t\ts.addr, err = s.openUDPServer()\n\t} else {\n\t\treturn fmt.Errorf(\"unrecognized Graphite input protocol %s\", s.protocol)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.logger.Info(fmt.Sprintf(\"Listening on %s: %s\", strings.ToUpper(s.protocol), s.addr.String()))\n\treturn nil\n}\nfunc (s *Service) closeAllConnections() {\n\ts.tcpConnectionsMu.Lock()\n\tdefer s.tcpConnectionsMu.Unlock()\n\tfor _, c := range s.tcpConnections {\n\t\tc.Close()\n\t}\n}\n\n// Close stops all data processing on the Graphite input.\nfunc (s *Service) Close() error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif s.closed() {\n\t\treturn nil // Already closed.\n\t}\n\tclose(s.done)\n\n\ts.closeAllConnections()\n\n\tif s.ln != nil {\n\t\ts.ln.Close()\n\t}\n\tif s.udpConn != nil {\n\t\ts.udpConn.Close()\n\t}\n\n\tif s.batcher != nil {\n\t\ts.batcher.Stop()\n\t}\n\n\tif s.Monitor != nil {\n\t\ts.Monitor.DeregisterDiagnosticsClient(s.diagsKey)\n\t}\n\n\ts.wg.Wait()\n\ts.done = nil\n\n\treturn nil\n}\n\n// Closed returns true if the service is currently closed.\nfunc (s *Service) Closed() bool {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\treturn s.closed()\n}\n\nfunc (s *Service) closed() bool {\n\tselect {\n\tcase <-s.done:\n\t\t// Service is closing.\n\t\treturn true\n\tdefault:\n\t}\n\treturn s.done == nil\n}\n\n// createInternalStorage ensures that the required database has been created.\nfunc (s *Service) createInternalStorage() error {\n\ts.mu.RLock()\n\tready := s.ready\n\ts.mu.RUnlock()\n\tif ready {\n\t\treturn nil\n\t}\n\n\tif db := s.MetaClient.Database(s.database); db != nil {\n\t\tif rp, _ := s.MetaClient.RetentionPolicy(s.database, s.retentionPolicy); rp == nil {\n\t\t\tspec := meta.RetentionPolicySpec{Name: s.retentionPolicy}\n\t\t\tif _, err := s.MetaClient.CreateRetentionPolicy(s.database, &spec, true); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tspec := meta.RetentionPolicySpec{Name: s.retentionPolicy}\n\t\tif _, err := s.MetaClient.CreateDatabaseWithRetentionPolicy(s.database, &spec); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// The service is now ready.\n\ts.mu.Lock()\n\ts.ready = true\n\ts.mu.Unlock()\n\treturn nil\n}\n\n// WithLogger sets the logger on the service.\nfunc (s *Service) WithLogger(log zap.Logger) {\n\ts.logger = log.With(\n\t\tzap.String(\"service\", \"graphite\"),\n\t\tzap.String(\"addr\", s.bindAddress),\n\t)\n}\n\n// Statistics maintains statistics for the graphite service.\ntype Statistics struct {\n\tPointsReceived      int64\n\tBytesReceived       int64\n\tPointsParseFail     int64\n\tPointsNaNFail       int64\n\tBatchesTransmitted  int64\n\tPointsTransmitted   int64\n\tBatchesTransmitFail int64\n\tActiveConnections   int64\n\tHandledConnections  int64\n}\n\n// Statistics returns statistics for periodic monitoring.\nfunc (s *Service) Statistics(tags map[string]string) []models.Statistic {\n\treturn []models.Statistic{{\n\t\tName: \"graphite\",\n\t\tTags: s.defaultTags.Merge(tags),\n\t\tValues: map[string]interface{}{\n\t\t\tstatPointsReceived:      atomic.LoadInt64(&s.stats.PointsReceived),\n\t\t\tstatBytesReceived:       atomic.LoadInt64(&s.stats.BytesReceived),\n\t\t\tstatPointsParseFail:     atomic.LoadInt64(&s.stats.PointsParseFail),\n\t\t\tstatPointsNaNFail:       atomic.LoadInt64(&s.stats.PointsNaNFail),\n\t\t\tstatBatchesTransmitted:  atomic.LoadInt64(&s.stats.BatchesTransmitted),\n\t\t\tstatPointsTransmitted:   atomic.LoadInt64(&s.stats.PointsTransmitted),\n\t\t\tstatBatchesTransmitFail: atomic.LoadInt64(&s.stats.BatchesTransmitFail),\n\t\t\tstatConnectionsActive:   atomic.LoadInt64(&s.stats.ActiveConnections),\n\t\t\tstatConnectionsHandled:  atomic.LoadInt64(&s.stats.HandledConnections),\n\t\t},\n\t}}\n}\n\n// Addr returns the address the Service binds to.\nfunc (s *Service) Addr() net.Addr {\n\treturn s.addr\n}\n\n// openTCPServer opens the Graphite input in TCP mode and starts processing data.\nfunc (s *Service) openTCPServer() (net.Addr, error) {\n\tln, err := net.Listen(\"tcp\", s.bindAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts.ln = ln\n\n\ts.wg.Add(1)\n\tgo func() {\n\t\tdefer s.wg.Done()\n\t\tfor {\n\t\t\tconn, err := s.ln.Accept()\n\t\t\tif opErr, ok := err.(*net.OpError); ok && !opErr.Temporary() {\n\t\t\t\ts.logger.Info(\"graphite TCP listener closed\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\ts.logger.Info(\"error accepting TCP connection\", zap.Error(err))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ts.wg.Add(1)\n\t\t\tgo s.handleTCPConnection(conn)\n\t\t}\n\t}()\n\treturn ln.Addr(), nil\n}\n\n// handleTCPConnection services an individual TCP connection for the Graphite input.\nfunc (s *Service) handleTCPConnection(conn net.Conn) {\n\tdefer s.wg.Done()\n\tdefer conn.Close()\n\tdefer atomic.AddInt64(&s.stats.ActiveConnections, -1)\n\tdefer s.untrackConnection(conn)\n\tatomic.AddInt64(&s.stats.ActiveConnections, 1)\n\tatomic.AddInt64(&s.stats.HandledConnections, 1)\n\ts.trackConnection(conn)\n\n\treader := bufio.NewReader(conn)\n\n\tfor {\n\t\t// Read up to the next newline.\n\t\tbuf, err := reader.ReadBytes('\\n')\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t// Trim the buffer, even though there should be no padding\n\t\tline := strings.TrimSpace(string(buf))\n\n\t\tatomic.AddInt64(&s.stats.PointsReceived, 1)\n\t\tatomic.AddInt64(&s.stats.BytesReceived, int64(len(buf)))\n\t\ts.handleLine(line)\n\t}\n}\n\nfunc (s *Service) trackConnection(c net.Conn) {\n\ts.tcpConnectionsMu.Lock()\n\tdefer s.tcpConnectionsMu.Unlock()\n\ts.tcpConnections[c.RemoteAddr().String()] = &tcpConnection{\n\t\tconn:        c,\n\t\tconnectTime: time.Now().UTC(),\n\t}\n}\nfunc (s *Service) untrackConnection(c net.Conn) {\n\ts.tcpConnectionsMu.Lock()\n\tdefer s.tcpConnectionsMu.Unlock()\n\tdelete(s.tcpConnections, c.RemoteAddr().String())\n}\n\n// openUDPServer opens the Graphite input in UDP mode and starts processing incoming data.\nfunc (s *Service) openUDPServer() (net.Addr, error) {\n\taddr, err := net.ResolveUDPAddr(\"udp\", s.bindAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts.udpConn, err = net.ListenUDP(\"udp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif s.udpReadBuffer != 0 {\n\t\terr = s.udpConn.SetReadBuffer(s.udpReadBuffer)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to set UDP read buffer to %d: %s\",\n\t\t\t\ts.udpReadBuffer, err)\n\t\t}\n\t}\n\n\tbuf := make([]byte, udpBufferSize)\n\ts.wg.Add(1)\n\tgo func() {\n\t\tdefer s.wg.Done()\n\t\tfor {\n\t\t\tn, _, err := s.udpConn.ReadFromUDP(buf)\n\t\t\tif err != nil {\n\t\t\t\ts.udpConn.Close()\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlines := strings.Split(string(buf[:n]), \"\\n\")\n\t\t\tfor _, line := range lines {\n\t\t\t\ts.handleLine(line)\n\t\t\t}\n\t\t\tatomic.AddInt64(&s.stats.PointsReceived, int64(len(lines)))\n\t\t\tatomic.AddInt64(&s.stats.BytesReceived, int64(n))\n\t\t}\n\t}()\n\treturn s.udpConn.LocalAddr(), nil\n}\n\nfunc (s *Service) handleLine(line string) {\n\tif line == \"\" {\n\t\treturn\n\t}\n\n\t// Parse it.\n\tpoint, err := s.parser.Parse(line)\n\tif err != nil {\n\t\tswitch err := err.(type) {\n\t\tcase *UnsupportedValueError:\n\t\t\t// Graphite ignores NaN values with no error.\n\t\t\tif math.IsNaN(err.Value) {\n\t\t\t\tatomic.AddInt64(&s.stats.PointsNaNFail, 1)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\ts.logger.Info(fmt.Sprintf(\"unable to parse line: %s: %s\", line, err))\n\t\tatomic.AddInt64(&s.stats.PointsParseFail, 1)\n\t\treturn\n\t}\n\n\ts.batcher.In() <- point\n}\n\n// processBatches continually drains the given batcher and writes the batches to the database.\nfunc (s *Service) processBatches(batcher *tsdb.PointBatcher) {\n\tdefer s.wg.Done()\n\tfor {\n\t\tselect {\n\t\tcase batch := <-batcher.Out():\n\t\t\t// Will attempt to create database if not yet created.\n\t\t\tif err := s.createInternalStorage(); err != nil {\n\t\t\t\ts.logger.Info(fmt.Sprintf(\"Required database or retention policy do not yet exist: %s\", err.Error()))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err := s.PointsWriter.WritePointsPrivileged(s.database, s.retentionPolicy, models.ConsistencyLevelAny, batch); err == nil {\n\t\t\t\tatomic.AddInt64(&s.stats.BatchesTransmitted, 1)\n\t\t\t\tatomic.AddInt64(&s.stats.PointsTransmitted, int64(len(batch)))\n\t\t\t} else {\n\t\t\t\ts.logger.Info(fmt.Sprintf(\"failed to write point batch to database %q: %s\", s.database, err))\n\t\t\t\tatomic.AddInt64(&s.stats.BatchesTransmitFail, 1)\n\t\t\t}\n\n\t\tcase <-s.done:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// Diagnostics returns diagnostics of the graphite service.\nfunc (s *Service) Diagnostics() (*diagnostics.Diagnostics, error) {\n\ts.tcpConnectionsMu.Lock()\n\tdefer s.tcpConnectionsMu.Unlock()\n\n\td := &diagnostics.Diagnostics{\n\t\tColumns: []string{\"local\", \"remote\", \"connect time\"},\n\t\tRows:    make([][]interface{}, 0, len(s.tcpConnections)),\n\t}\n\tfor _, v := range s.tcpConnections {\n\t\td.Rows = append(d.Rows, []interface{}{v.conn.LocalAddr().String(), v.conn.RemoteAddr().String(), v.connectTime})\n\t}\n\treturn d, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/graphite/service_test.go",
    "content": "package graphite\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/internal\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/toml\"\n\t\"github.com/uber-go/zap\"\n)\n\nfunc Test_Service_OpenClose(t *testing.T) {\n\t// Let the OS assign a random port since we are only opening and closing the service,\n\t// not actually connecting to it.\n\tc := Config{BindAddress: \"127.0.0.1:0\"}\n\tservice := NewTestService(&c)\n\n\t// Closing a closed service is fine.\n\tif err := service.Service.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Closing a closed service again is fine.\n\tif err := service.Service.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := service.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Opening an already open service is fine.\n\tif err := service.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Reopening a previously opened service is fine.\n\tif err := service.Service.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := service.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Tidy up.\n\tif err := service.Service.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestService_CreatesDatabase(t *testing.T) {\n\tt.Parallel()\n\n\ts := NewTestService(nil)\n\ts.WritePointsFn = func(string, string, models.ConsistencyLevel, []models.Point) error {\n\t\treturn nil\n\t}\n\n\tcalled := make(chan struct{})\n\ts.MetaClient.CreateDatabaseWithRetentionPolicyFn = func(name string, _ *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) {\n\t\tif name != s.Service.database {\n\t\t\tt.Errorf(\"\\n\\texp = %s\\n\\tgot = %s\\n\", s.Service.database, name)\n\t\t}\n\t\t// Allow some time for the caller to return and the ready status to\n\t\t// be set.\n\t\ttime.AfterFunc(10*time.Millisecond, func() { called <- struct{}{} })\n\t\treturn nil, errors.New(\"an error\")\n\t}\n\n\tif err := s.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpoints, err := models.ParsePointsString(`cpu value=1`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ts.Service.batcher.In() <- points[0] // Send a point.\n\ts.Service.batcher.Flush()\n\tselect {\n\tcase <-called:\n\t\t// OK\n\tcase <-time.NewTimer(5 * time.Second).C:\n\t\tt.Fatal(\"Service should have attempted to create database\")\n\t}\n\n\t// ready status should not have been switched due to meta client error.\n\ts.Service.mu.RLock()\n\tready := s.Service.ready\n\ts.Service.mu.RUnlock()\n\n\tif got, exp := ready, false; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\t// This time MC won't cause an error.\n\ts.MetaClient.CreateDatabaseWithRetentionPolicyFn = func(name string, _ *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) {\n\t\t// Allow some time for the caller to return and the ready status to\n\t\t// be set.\n\t\ttime.AfterFunc(10*time.Millisecond, func() { called <- struct{}{} })\n\t\treturn nil, nil\n\t}\n\n\ts.Service.batcher.In() <- points[0] // Send a point.\n\ts.Service.batcher.Flush()\n\tselect {\n\tcase <-called:\n\t\t// OK\n\tcase <-time.NewTimer(5 * time.Second).C:\n\t\tt.Fatal(\"Service should have attempted to create database\")\n\t}\n\n\t// ready status should now be true.\n\ts.Service.mu.RLock()\n\tready = s.Service.ready\n\ts.Service.mu.RUnlock()\n\n\tif got, exp := ready, true; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\ts.Service.Close()\n}\n\nfunc Test_Service_TCP(t *testing.T) {\n\tt.Parallel()\n\n\tnow := time.Now().UTC().Round(time.Second)\n\n\tconfig := Config{}\n\tconfig.Database = \"graphitedb\"\n\tconfig.BatchSize = 0 // No batching.\n\tconfig.BatchTimeout = toml.Duration(time.Second)\n\tconfig.BindAddress = \":0\"\n\n\tservice := NewTestService(&config)\n\n\t// Allow test to wait until points are written.\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\n\tservice.WritePointsFn = func(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error {\n\t\tdefer wg.Done()\n\n\t\tpt, _ := models.NewPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{}),\n\t\t\tmap[string]interface{}{\"value\": 23.456},\n\t\t\ttime.Unix(now.Unix(), 0))\n\n\t\tif database != \"graphitedb\" {\n\t\t\tt.Fatalf(\"unexpected database: %s\", database)\n\t\t} else if retentionPolicy != \"\" {\n\t\t\tt.Fatalf(\"unexpected retention policy: %s\", retentionPolicy)\n\t\t} else if len(points) != 1 {\n\t\t\tt.Fatalf(\"expected 1 point, got %d\", len(points))\n\t\t} else if points[0].String() != pt.String() {\n\t\t\tt.Fatalf(\"expected point %v, got %v\", pt.String(), points[0].String())\n\t\t}\n\t\treturn nil\n\t}\n\n\tif err := service.Service.Open(); err != nil {\n\t\tt.Fatalf(\"failed to open Graphite service: %s\", err.Error())\n\t}\n\n\t// Connect to the graphite endpoint we just spun up\n\t_, port, _ := net.SplitHostPort(service.Service.Addr().String())\n\tconn, err := net.Dial(\"tcp\", \"127.0.0.1:\"+port)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdata := []byte(`cpu 23.456 `)\n\tdata = append(data, []byte(fmt.Sprintf(\"%d\", now.Unix()))...)\n\tdata = append(data, '\\n')\n\tdata = append(data, []byte(`memory NaN `)...)\n\tdata = append(data, []byte(fmt.Sprintf(\"%d\", now.Unix()))...)\n\tdata = append(data, '\\n')\n\t_, err = conn.Write(data)\n\tconn.Close()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twg.Wait()\n}\n\nfunc Test_Service_UDP(t *testing.T) {\n\tt.Parallel()\n\n\tnow := time.Now().UTC().Round(time.Second)\n\n\tconfig := Config{}\n\tconfig.Database = \"graphitedb\"\n\tconfig.BatchSize = 0 // No batching.\n\tconfig.BatchTimeout = toml.Duration(time.Second)\n\tconfig.BindAddress = \":10000\"\n\tconfig.Protocol = \"udp\"\n\n\tservice := NewTestService(&config)\n\n\t// Allow test to wait until points are written.\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\n\tservice.WritePointsFn = func(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error {\n\t\tdefer wg.Done()\n\n\t\tpt, _ := models.NewPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.NewTags(map[string]string{}),\n\t\t\tmap[string]interface{}{\"value\": 23.456},\n\t\t\ttime.Unix(now.Unix(), 0))\n\t\tif database != \"graphitedb\" {\n\t\t\tt.Fatalf(\"unexpected database: %s\", database)\n\t\t} else if retentionPolicy != \"\" {\n\t\t\tt.Fatalf(\"unexpected retention policy: %s\", retentionPolicy)\n\t\t} else if points[0].String() != pt.String() {\n\t\t\tt.Fatalf(\"unexpected points: %#v\", points[0].String())\n\t\t}\n\t\treturn nil\n\t}\n\n\tif err := service.Service.Open(); err != nil {\n\t\tt.Fatalf(\"failed to open Graphite service: %s\", err.Error())\n\t}\n\n\t// Connect to the graphite endpoint we just spun up\n\t_, port, _ := net.SplitHostPort(service.Service.Addr().String())\n\tconn, err := net.Dial(\"udp\", \"127.0.0.1:\"+port)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdata := []byte(`cpu 23.456 `)\n\tdata = append(data, []byte(fmt.Sprintf(\"%d\", now.Unix()))...)\n\tdata = append(data, '\\n')\n\t_, err = conn.Write(data)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twg.Wait()\n\tconn.Close()\n}\n\ntype TestService struct {\n\tService       *Service\n\tMetaClient    *internal.MetaClientMock\n\tWritePointsFn func(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error\n}\n\nfunc NewTestService(c *Config) *TestService {\n\tif c == nil {\n\t\tdefaultC := NewConfig()\n\t\tc = &defaultC\n\t}\n\n\tgservice, err := NewService(*c)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tservice := &TestService{\n\t\tService:    gservice,\n\t\tMetaClient: &internal.MetaClientMock{},\n\t}\n\n\tservice.MetaClient.CreateRetentionPolicyFn = func(string, *meta.RetentionPolicySpec, bool) (*meta.RetentionPolicyInfo, error) {\n\t\treturn nil, nil\n\t}\n\n\tservice.MetaClient.CreateDatabaseWithRetentionPolicyFn = func(string, *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) {\n\t\treturn nil, nil\n\t}\n\n\tservice.MetaClient.DatabaseFn = func(string) *meta.DatabaseInfo {\n\t\treturn nil\n\t}\n\n\tservice.MetaClient.RetentionPolicyFn = func(string, string) (*meta.RetentionPolicyInfo, error) {\n\t\treturn nil, nil\n\t}\n\n\tif testing.Verbose() {\n\t\tservice.Service.WithLogger(zap.New(\n\t\t\tzap.NewTextEncoder(),\n\t\t\tzap.Output(os.Stderr),\n\t\t))\n\t}\n\n\t// Set the Meta Client and PointsWriter.\n\tservice.Service.MetaClient = service.MetaClient\n\tservice.Service.PointsWriter = service\n\n\treturn service\n}\n\nfunc (s *TestService) WritePointsPrivileged(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error {\n\treturn s.WritePointsFn(database, retentionPolicy, consistencyLevel, points)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/httpd/config.go",
    "content": "package httpd\n\nimport \"github.com/influxdata/influxdb/monitor/diagnostics\"\n\nconst (\n\t// DefaultBindAddress is the default address to bind to.\n\tDefaultBindAddress = \":8086\"\n\n\t// DefaultRealm is the default realm sent back when issuing a basic auth challenge.\n\tDefaultRealm = \"InfluxDB\"\n\n\t// DefaultBindSocket is the default unix socket to bind to.\n\tDefaultBindSocket = \"/var/run/influxdb.sock\"\n)\n\n// Config represents a configuration for a HTTP service.\ntype Config struct {\n\tEnabled            bool   `toml:\"enabled\"`\n\tBindAddress        string `toml:\"bind-address\"`\n\tAuthEnabled        bool   `toml:\"auth-enabled\"`\n\tLogEnabled         bool   `toml:\"log-enabled\"`\n\tWriteTracing       bool   `toml:\"write-tracing\"`\n\tPprofEnabled       bool   `toml:\"pprof-enabled\"`\n\tHTTPSEnabled       bool   `toml:\"https-enabled\"`\n\tHTTPSCertificate   string `toml:\"https-certificate\"`\n\tHTTPSPrivateKey    string `toml:\"https-private-key\"`\n\tMaxRowLimit        int    `toml:\"max-row-limit\"`\n\tMaxConnectionLimit int    `toml:\"max-connection-limit\"`\n\tSharedSecret       string `toml:\"shared-secret\"`\n\tRealm              string `toml:\"realm\"`\n\tUnixSocketEnabled  bool   `toml:\"unix-socket-enabled\"`\n\tBindSocket         string `toml:\"bind-socket\"`\n}\n\n// NewConfig returns a new Config with default settings.\nfunc NewConfig() Config {\n\treturn Config{\n\t\tEnabled:           true,\n\t\tBindAddress:       DefaultBindAddress,\n\t\tLogEnabled:        true,\n\t\tPprofEnabled:      true,\n\t\tHTTPSEnabled:      false,\n\t\tHTTPSCertificate:  \"/etc/ssl/influxdb.pem\",\n\t\tMaxRowLimit:       0,\n\t\tRealm:             DefaultRealm,\n\t\tUnixSocketEnabled: false,\n\t\tBindSocket:        DefaultBindSocket,\n\t}\n}\n\n// Diagnostics returns a diagnostics representation of a subset of the Config.\nfunc (c Config) Diagnostics() (*diagnostics.Diagnostics, error) {\n\tif !c.Enabled {\n\t\treturn diagnostics.RowFromMap(map[string]interface{}{\n\t\t\t\"enabled\": false,\n\t\t}), nil\n\t}\n\n\treturn diagnostics.RowFromMap(map[string]interface{}{\n\t\t\"enabled\":              true,\n\t\t\"bind-address\":         c.BindAddress,\n\t\t\"https-enabled\":        c.HTTPSEnabled,\n\t\t\"max-row-limit\":        c.MaxRowLimit,\n\t\t\"max-connection-limit\": c.MaxConnectionLimit,\n\t}), nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/httpd/config_test.go",
    "content": "package httpd_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/influxdata/influxdb/services/httpd\"\n)\n\nfunc TestConfig_Parse(t *testing.T) {\n\t// Parse configuration.\n\tvar c httpd.Config\n\tif _, err := toml.Decode(`\nenabled = true\nbind-address = \":8080\"\nauth-enabled = true\nlog-enabled = true\nwrite-tracing = true\nhttps-enabled = true\nhttps-certificate = \"/dev/null\"\nunix-socket-enabled = true\nbind-socket = \"/var/run/influxdb.sock\"\n`, &c); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Validate configuration.\n\tif c.Enabled != true {\n\t\tt.Fatalf(\"unexpected enabled: %v\", c.Enabled)\n\t} else if c.BindAddress != \":8080\" {\n\t\tt.Fatalf(\"unexpected bind address: %s\", c.BindAddress)\n\t} else if c.AuthEnabled != true {\n\t\tt.Fatalf(\"unexpected auth enabled: %v\", c.AuthEnabled)\n\t} else if c.LogEnabled != true {\n\t\tt.Fatalf(\"unexpected log enabled: %v\", c.LogEnabled)\n\t} else if c.WriteTracing != true {\n\t\tt.Fatalf(\"unexpected write tracing: %v\", c.WriteTracing)\n\t} else if c.HTTPSEnabled != true {\n\t\tt.Fatalf(\"unexpected https enabled: %v\", c.HTTPSEnabled)\n\t} else if c.HTTPSCertificate != \"/dev/null\" {\n\t\tt.Fatalf(\"unexpected https certificate: %v\", c.HTTPSCertificate)\n\t} else if c.UnixSocketEnabled != true {\n\t\tt.Fatalf(\"unexpected unix socket enabled: %v\", c.UnixSocketEnabled)\n\t} else if c.BindSocket != \"/var/run/influxdb.sock\" {\n\t\tt.Fatalf(\"unexpected bind unix socket: %v\", c.BindSocket)\n\t}\n}\n\nfunc TestConfig_WriteTracing(t *testing.T) {\n\tc := httpd.Config{WriteTracing: true}\n\ts := httpd.NewService(c)\n\tif !s.Handler.Config.WriteTracing {\n\t\tt.Fatalf(\"write tracing was not set\")\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/httpd/handler.go",
    "content": "package httpd\n\nimport (\n\t\"bytes\"\n\t\"compress/gzip\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"expvar\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n\t\"runtime/debug\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/bmizerany/pat\"\n\t\"github.com/dgrijalva/jwt-go\"\n\t\"github.com/influxdata/influxdb\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/monitor\"\n\t\"github.com/influxdata/influxdb/monitor/diagnostics\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n\t\"github.com/influxdata/influxdb/uuid\"\n\t\"github.com/uber-go/zap\"\n)\n\nconst (\n\t// DefaultChunkSize specifies the maximum number of points that will\n\t// be read before sending results back to the engine.\n\t//\n\t// This has no relation to the number of bytes that are returned.\n\tDefaultChunkSize = 10000\n\n\tDefaultDebugRequestsInterval = 10 * time.Second\n\n\tMaxDebugRequestsInterval = 6 * time.Hour\n)\n\n// AuthenticationMethod defines the type of authentication used.\ntype AuthenticationMethod int\n\n// Supported authentication methods.\nconst (\n\t// Authenticate using basic authentication.\n\tUserAuthentication AuthenticationMethod = iota\n\n\t// Authenticate with jwt.\n\tBearerAuthentication\n)\n\n// TODO: Check HTTP response codes: 400, 401, 403, 409.\n\n// Route specifies how to handle a HTTP verb for a given endpoint.\ntype Route struct {\n\tName           string\n\tMethod         string\n\tPattern        string\n\tGzipped        bool\n\tLoggingEnabled bool\n\tHandlerFunc    interface{}\n}\n\n// Handler represents an HTTP handler for the InfluxDB server.\ntype Handler struct {\n\tmux     *pat.PatternServeMux\n\tVersion string\n\n\tMetaClient interface {\n\t\tDatabase(name string) *meta.DatabaseInfo\n\t\tDatabases() []meta.DatabaseInfo\n\t\tAuthenticate(username, password string) (ui meta.User, err error)\n\t\tUser(username string) (meta.User, error)\n\t\tAdminUserExists() bool\n\t}\n\n\tQueryAuthorizer interface {\n\t\tAuthorizeQuery(u meta.User, query *influxql.Query, database string) error\n\t}\n\n\tWriteAuthorizer interface {\n\t\tAuthorizeWrite(username, database string) error\n\t}\n\n\tQueryExecutor *influxql.QueryExecutor\n\n\tMonitor interface {\n\t\tStatistics(tags map[string]string) ([]*monitor.Statistic, error)\n\t\tDiagnostics() (map[string]*diagnostics.Diagnostics, error)\n\t}\n\n\tPointsWriter interface {\n\t\tWritePoints(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, user meta.User, points []models.Point) error\n\t}\n\n\tConfig    *Config\n\tLogger    zap.Logger\n\tCLFLogger *log.Logger\n\tstats     *Statistics\n\n\trequestTracker *RequestTracker\n}\n\n// NewHandler returns a new instance of handler with routes.\nfunc NewHandler(c Config) *Handler {\n\th := &Handler{\n\t\tmux:            pat.New(),\n\t\tConfig:         &c,\n\t\tLogger:         zap.New(zap.NullEncoder()),\n\t\tCLFLogger:      log.New(os.Stderr, \"[httpd] \", 0),\n\t\tstats:          &Statistics{},\n\t\trequestTracker: NewRequestTracker(),\n\t}\n\n\th.AddRoutes([]Route{\n\t\tRoute{\n\t\t\t\"query-options\", // Satisfy CORS checks.\n\t\t\t\"OPTIONS\", \"/query\", false, true, h.serveOptions,\n\t\t},\n\t\tRoute{\n\t\t\t\"query\", // Query serving route.\n\t\t\t\"GET\", \"/query\", true, true, h.serveQuery,\n\t\t},\n\t\tRoute{\n\t\t\t\"query\", // Query serving route.\n\t\t\t\"POST\", \"/query\", true, true, h.serveQuery,\n\t\t},\n\t\tRoute{\n\t\t\t\"write-options\", // Satisfy CORS checks.\n\t\t\t\"OPTIONS\", \"/write\", false, true, h.serveOptions,\n\t\t},\n\t\tRoute{\n\t\t\t\"write\", // Data-ingest route.\n\t\t\t\"POST\", \"/write\", true, true, h.serveWrite,\n\t\t},\n\t\tRoute{ // Ping\n\t\t\t\"ping\",\n\t\t\t\"GET\", \"/ping\", false, true, h.servePing,\n\t\t},\n\t\tRoute{ // Ping\n\t\t\t\"ping-head\",\n\t\t\t\"HEAD\", \"/ping\", false, true, h.servePing,\n\t\t},\n\t\tRoute{ // Ping w/ status\n\t\t\t\"status\",\n\t\t\t\"GET\", \"/status\", false, true, h.serveStatus,\n\t\t},\n\t\tRoute{ // Ping w/ status\n\t\t\t\"status-head\",\n\t\t\t\"HEAD\", \"/status\", false, true, h.serveStatus,\n\t\t},\n\t}...)\n\n\treturn h\n}\n\n// Statistics maintains statistics for the httpd service.\ntype Statistics struct {\n\tRequests                     int64\n\tCQRequests                   int64\n\tQueryRequests                int64\n\tWriteRequests                int64\n\tPingRequests                 int64\n\tStatusRequests               int64\n\tWriteRequestBytesReceived    int64\n\tQueryRequestBytesTransmitted int64\n\tPointsWrittenOK              int64\n\tPointsWrittenDropped         int64\n\tPointsWrittenFail            int64\n\tAuthenticationFailures       int64\n\tRequestDuration              int64\n\tQueryRequestDuration         int64\n\tWriteRequestDuration         int64\n\tActiveRequests               int64\n\tActiveWriteRequests          int64\n\tClientErrors                 int64\n\tServerErrors                 int64\n}\n\n// Statistics returns statistics for periodic monitoring.\nfunc (h *Handler) Statistics(tags map[string]string) []models.Statistic {\n\treturn []models.Statistic{{\n\t\tName: \"httpd\",\n\t\tTags: tags,\n\t\tValues: map[string]interface{}{\n\t\t\tstatRequest:                      atomic.LoadInt64(&h.stats.Requests),\n\t\t\tstatQueryRequest:                 atomic.LoadInt64(&h.stats.QueryRequests),\n\t\t\tstatWriteRequest:                 atomic.LoadInt64(&h.stats.WriteRequests),\n\t\t\tstatPingRequest:                  atomic.LoadInt64(&h.stats.PingRequests),\n\t\t\tstatStatusRequest:                atomic.LoadInt64(&h.stats.StatusRequests),\n\t\t\tstatWriteRequestBytesReceived:    atomic.LoadInt64(&h.stats.WriteRequestBytesReceived),\n\t\t\tstatQueryRequestBytesTransmitted: atomic.LoadInt64(&h.stats.QueryRequestBytesTransmitted),\n\t\t\tstatPointsWrittenOK:              atomic.LoadInt64(&h.stats.PointsWrittenOK),\n\t\t\tstatPointsWrittenDropped:         atomic.LoadInt64(&h.stats.PointsWrittenDropped),\n\t\t\tstatPointsWrittenFail:            atomic.LoadInt64(&h.stats.PointsWrittenFail),\n\t\t\tstatAuthFail:                     atomic.LoadInt64(&h.stats.AuthenticationFailures),\n\t\t\tstatRequestDuration:              atomic.LoadInt64(&h.stats.RequestDuration),\n\t\t\tstatQueryRequestDuration:         atomic.LoadInt64(&h.stats.QueryRequestDuration),\n\t\t\tstatWriteRequestDuration:         atomic.LoadInt64(&h.stats.WriteRequestDuration),\n\t\t\tstatRequestsActive:               atomic.LoadInt64(&h.stats.ActiveRequests),\n\t\t\tstatWriteRequestsActive:          atomic.LoadInt64(&h.stats.ActiveWriteRequests),\n\t\t\tstatClientError:                  atomic.LoadInt64(&h.stats.ClientErrors),\n\t\t\tstatServerError:                  atomic.LoadInt64(&h.stats.ServerErrors),\n\t\t},\n\t}}\n}\n\n// AddRoutes sets the provided routes on the handler.\nfunc (h *Handler) AddRoutes(routes ...Route) {\n\tfor _, r := range routes {\n\t\tvar handler http.Handler\n\n\t\t// If it's a handler func that requires authorization, wrap it in authentication\n\t\tif hf, ok := r.HandlerFunc.(func(http.ResponseWriter, *http.Request, meta.User)); ok {\n\t\t\thandler = authenticate(hf, h, h.Config.AuthEnabled)\n\t\t}\n\n\t\t// This is a normal handler signature and does not require authentication\n\t\tif hf, ok := r.HandlerFunc.(func(http.ResponseWriter, *http.Request)); ok {\n\t\t\thandler = http.HandlerFunc(hf)\n\t\t}\n\n\t\thandler = h.responseWriter(handler)\n\t\tif r.Gzipped {\n\t\t\thandler = gzipFilter(handler)\n\t\t}\n\t\thandler = cors(handler)\n\t\thandler = requestID(handler)\n\t\tif h.Config.LogEnabled && r.LoggingEnabled {\n\t\t\thandler = h.logging(handler, r.Name)\n\t\t}\n\t\thandler = h.recovery(handler, r.Name) // make sure recovery is always last\n\n\t\th.mux.Add(r.Method, r.Pattern, handler)\n\t}\n}\n\n// ServeHTTP responds to HTTP request to the handler.\nfunc (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tatomic.AddInt64(&h.stats.Requests, 1)\n\tatomic.AddInt64(&h.stats.ActiveRequests, 1)\n\tdefer atomic.AddInt64(&h.stats.ActiveRequests, -1)\n\tstart := time.Now()\n\n\t// Add version header to all InfluxDB requests.\n\tw.Header().Add(\"X-Influxdb-Version\", h.Version)\n\n\tif strings.HasPrefix(r.URL.Path, \"/debug/pprof\") && h.Config.PprofEnabled {\n\t\th.handleProfiles(w, r)\n\t} else if strings.HasPrefix(r.URL.Path, \"/debug/vars\") {\n\t\th.serveExpvar(w, r)\n\t} else if strings.HasPrefix(r.URL.Path, \"/debug/requests\") {\n\t\th.serveDebugRequests(w, r)\n\t} else {\n\t\th.mux.ServeHTTP(w, r)\n\t}\n\n\tatomic.AddInt64(&h.stats.RequestDuration, time.Since(start).Nanoseconds())\n}\n\n// writeHeader writes the provided status code in the response, and\n// updates relevant http error statistics.\nfunc (h *Handler) writeHeader(w http.ResponseWriter, code int) {\n\tswitch code / 100 {\n\tcase 4:\n\t\tatomic.AddInt64(&h.stats.ClientErrors, 1)\n\tcase 5:\n\t\tatomic.AddInt64(&h.stats.ServerErrors, 1)\n\t}\n\tw.WriteHeader(code)\n}\n\n// serveQuery parses an incoming query and, if valid, executes the query.\nfunc (h *Handler) serveQuery(w http.ResponseWriter, r *http.Request, user meta.User) {\n\tatomic.AddInt64(&h.stats.QueryRequests, 1)\n\tdefer func(start time.Time) {\n\t\tatomic.AddInt64(&h.stats.QueryRequestDuration, time.Since(start).Nanoseconds())\n\t}(time.Now())\n\th.requestTracker.Add(r, user)\n\n\t// Retrieve the underlying ResponseWriter or initialize our own.\n\trw, ok := w.(ResponseWriter)\n\tif !ok {\n\t\trw = NewResponseWriter(w, r)\n\t}\n\n\t// Retrieve the node id the query should be executed on.\n\tnodeID, _ := strconv.ParseUint(r.FormValue(\"node_id\"), 10, 64)\n\n\tvar qr io.Reader\n\t// Attempt to read the form value from the \"q\" form value.\n\tif qp := strings.TrimSpace(r.FormValue(\"q\")); qp != \"\" {\n\t\tqr = strings.NewReader(qp)\n\t} else if r.MultipartForm != nil && r.MultipartForm.File != nil {\n\t\t// If we have a multipart/form-data, try to retrieve a file from 'q'.\n\t\tif fhs := r.MultipartForm.File[\"q\"]; len(fhs) > 0 {\n\t\t\tf, err := fhs[0].Open()\n\t\t\tif err != nil {\n\t\t\t\th.httpError(rw, err.Error(), http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer f.Close()\n\t\t\tqr = f\n\t\t}\n\t}\n\n\tif qr == nil {\n\t\th.httpError(rw, `missing required parameter \"q\"`, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tepoch := strings.TrimSpace(r.FormValue(\"epoch\"))\n\n\tp := influxql.NewParser(qr)\n\tdb := r.FormValue(\"db\")\n\n\t// Sanitize the request query params so it doesn't show up in the response logger.\n\t// Do this before anything else so a parsing error doesn't leak passwords.\n\tsanitize(r)\n\n\t// Parse the parameters\n\trawParams := r.FormValue(\"params\")\n\tif rawParams != \"\" {\n\t\tvar params map[string]interface{}\n\t\tdecoder := json.NewDecoder(strings.NewReader(rawParams))\n\t\tdecoder.UseNumber()\n\t\tif err := decoder.Decode(&params); err != nil {\n\t\t\th.httpError(rw, \"error parsing query parameters: \"+err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\t// Convert json.Number into int64 and float64 values\n\t\tfor k, v := range params {\n\t\t\tif v, ok := v.(json.Number); ok {\n\t\t\t\tvar err error\n\t\t\t\tif strings.Contains(string(v), \".\") {\n\t\t\t\t\tparams[k], err = v.Float64()\n\t\t\t\t} else {\n\t\t\t\t\tparams[k], err = v.Int64()\n\t\t\t\t}\n\n\t\t\t\tif err != nil {\n\t\t\t\t\th.httpError(rw, \"error parsing json value: \"+err.Error(), http.StatusBadRequest)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tp.SetParams(params)\n\t}\n\n\t// Parse query from query string.\n\tquery, err := p.ParseQuery()\n\tif err != nil {\n\t\th.httpError(rw, \"error parsing query: \"+err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Check authorization.\n\tif h.Config.AuthEnabled {\n\t\tif err := h.QueryAuthorizer.AuthorizeQuery(user, query, db); err != nil {\n\t\t\tif err, ok := err.(meta.ErrAuthorize); ok {\n\t\t\t\th.Logger.Info(fmt.Sprintf(\"Unauthorized request | user: %q | query: %q | database %q\", err.User, err.Query.String(), err.Database))\n\t\t\t}\n\t\t\th.httpError(rw, \"error authorizing query: \"+err.Error(), http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Parse chunk size. Use default if not provided or unparsable.\n\tchunked := r.FormValue(\"chunked\") == \"true\"\n\tchunkSize := DefaultChunkSize\n\tif chunked {\n\t\tif n, err := strconv.ParseInt(r.FormValue(\"chunk_size\"), 10, 64); err == nil && int(n) > 0 {\n\t\t\tchunkSize = int(n)\n\t\t}\n\t}\n\n\t// Parse whether this is an async command.\n\tasync := r.FormValue(\"async\") == \"true\"\n\n\topts := influxql.ExecutionOptions{\n\t\tDatabase:  db,\n\t\tChunkSize: chunkSize,\n\t\tReadOnly:  r.Method == \"GET\",\n\t\tNodeID:    nodeID,\n\t}\n\n\tif h.Config.AuthEnabled {\n\t\t// The current user determines the authorized actions.\n\t\topts.Authorizer = user\n\t} else {\n\t\t// Auth is disabled, so allow everything.\n\t\topts.Authorizer = influxql.OpenAuthorizer{}\n\t}\n\n\t// Make sure if the client disconnects we signal the query to abort\n\tvar closing chan struct{}\n\tif !async {\n\t\tclosing = make(chan struct{})\n\t\tif notifier, ok := w.(http.CloseNotifier); ok {\n\t\t\t// CloseNotify() is not guaranteed to send a notification when the query\n\t\t\t// is closed. Use this channel to signal that the query is finished to\n\t\t\t// prevent lingering goroutines that may be stuck.\n\t\t\tdone := make(chan struct{})\n\t\t\tdefer close(done)\n\n\t\t\tnotify := notifier.CloseNotify()\n\t\t\tgo func() {\n\t\t\t\t// Wait for either the request to finish\n\t\t\t\t// or for the client to disconnect\n\t\t\t\tselect {\n\t\t\t\tcase <-done:\n\t\t\t\tcase <-notify:\n\t\t\t\t\tclose(closing)\n\t\t\t\t}\n\t\t\t}()\n\t\t\topts.AbortCh = done\n\t\t} else {\n\t\t\tdefer close(closing)\n\t\t}\n\t}\n\n\t// Execute query.\n\trw.Header().Add(\"Connection\", \"close\")\n\tresults := h.QueryExecutor.ExecuteQuery(query, opts, closing)\n\n\t// If we are running in async mode, open a goroutine to drain the results\n\t// and return with a StatusNoContent.\n\tif async {\n\t\tgo h.async(query, results)\n\t\th.writeHeader(w, http.StatusNoContent)\n\t\treturn\n\t}\n\n\t// if we're not chunking, this will be the in memory buffer for all results before sending to client\n\tresp := Response{Results: make([]*influxql.Result, 0)}\n\n\t// Status header is OK once this point is reached.\n\t// Attempt to flush the header immediately so the client gets the header information\n\t// and knows the query was accepted.\n\th.writeHeader(rw, http.StatusOK)\n\tif w, ok := w.(http.Flusher); ok {\n\t\tw.Flush()\n\t}\n\n\t// pull all results from the channel\n\trows := 0\n\tfor r := range results {\n\t\t// Ignore nil results.\n\t\tif r == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// if requested, convert result timestamps to epoch\n\t\tif epoch != \"\" {\n\t\t\tconvertToEpoch(r, epoch)\n\t\t}\n\n\t\t// Write out result immediately if chunked.\n\t\tif chunked {\n\t\t\tn, _ := rw.WriteResponse(Response{\n\t\t\t\tResults: []*influxql.Result{r},\n\t\t\t})\n\t\t\tatomic.AddInt64(&h.stats.QueryRequestBytesTransmitted, int64(n))\n\t\t\tw.(http.Flusher).Flush()\n\t\t\tcontinue\n\t\t}\n\n\t\t// Limit the number of rows that can be returned in a non-chunked\n\t\t// response.  This is to prevent the server from going OOM when\n\t\t// returning a large response.  If you want to return more than the\n\t\t// default chunk size, then use chunking to process multiple blobs.\n\t\t// Iterate through the series in this result to count the rows and\n\t\t// truncate any rows we shouldn't return.\n\t\tif h.Config.MaxRowLimit > 0 {\n\t\t\tfor i, series := range r.Series {\n\t\t\t\tn := h.Config.MaxRowLimit - rows\n\t\t\t\tif n < len(series.Values) {\n\t\t\t\t\t// We have reached the maximum number of values. Truncate\n\t\t\t\t\t// the values within this row.\n\t\t\t\t\tseries.Values = series.Values[:n]\n\t\t\t\t\t// Since this was truncated, it will always be a partial return.\n\t\t\t\t\t// Add this so the client knows we truncated the response.\n\t\t\t\t\tseries.Partial = true\n\t\t\t\t}\n\t\t\t\trows += len(series.Values)\n\n\t\t\t\tif rows >= h.Config.MaxRowLimit {\n\t\t\t\t\t// Drop any remaining series since we have already reached the row limit.\n\t\t\t\t\tif i < len(r.Series) {\n\t\t\t\t\t\tr.Series = r.Series[:i+1]\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// It's not chunked so buffer results in memory.\n\t\t// Results for statements need to be combined together.\n\t\t// We need to check if this new result is for the same statement as\n\t\t// the last result, or for the next statement\n\t\tl := len(resp.Results)\n\t\tif l == 0 {\n\t\t\tresp.Results = append(resp.Results, r)\n\t\t} else if resp.Results[l-1].StatementID == r.StatementID {\n\t\t\tif r.Err != nil {\n\t\t\t\tresp.Results[l-1] = r\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcr := resp.Results[l-1]\n\t\t\trowsMerged := 0\n\t\t\tif len(cr.Series) > 0 {\n\t\t\t\tlastSeries := cr.Series[len(cr.Series)-1]\n\n\t\t\t\tfor _, row := range r.Series {\n\t\t\t\t\tif !lastSeries.SameSeries(row) {\n\t\t\t\t\t\t// Next row is for a different series than last.\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\t// Values are for the same series, so append them.\n\t\t\t\t\tlastSeries.Values = append(lastSeries.Values, row.Values...)\n\t\t\t\t\trowsMerged++\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Append remaining rows as new rows.\n\t\t\tr.Series = r.Series[rowsMerged:]\n\t\t\tcr.Series = append(cr.Series, r.Series...)\n\t\t\tcr.Messages = append(cr.Messages, r.Messages...)\n\t\t\tcr.Partial = r.Partial\n\t\t} else {\n\t\t\tresp.Results = append(resp.Results, r)\n\t\t}\n\n\t\t// Drop out of this loop and do not process further results when we hit the row limit.\n\t\tif h.Config.MaxRowLimit > 0 && rows >= h.Config.MaxRowLimit {\n\t\t\t// If the result is marked as partial, remove that partial marking\n\t\t\t// here. While the series is partial and we would normally have\n\t\t\t// tried to return the rest in the next chunk, we are not using\n\t\t\t// chunking and are truncating the series so we don't want to\n\t\t\t// signal to the client that we plan on sending another JSON blob\n\t\t\t// with another result.  The series, on the other hand, still\n\t\t\t// returns partial true if it was truncated or had more data to\n\t\t\t// send in a future chunk.\n\t\t\tr.Partial = false\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// If it's not chunked we buffered everything in memory, so write it out\n\tif !chunked {\n\t\tn, _ := rw.WriteResponse(resp)\n\t\tatomic.AddInt64(&h.stats.QueryRequestBytesTransmitted, int64(n))\n\t}\n}\n\n// async drains the results from an async query and logs a message if it fails.\nfunc (h *Handler) async(query *influxql.Query, results <-chan *influxql.Result) {\n\tfor r := range results {\n\t\t// Drain the results and do nothing with them.\n\t\t// If it fails, log the failure so there is at least a record of it.\n\t\tif r.Err != nil {\n\t\t\t// Do not log when a statement was not executed since there would\n\t\t\t// have been an earlier error that was already logged.\n\t\t\tif r.Err == influxql.ErrNotExecuted {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\th.Logger.Info(fmt.Sprintf(\"error while running async query: %s: %s\", query, r.Err))\n\t\t}\n\t}\n}\n\n// serveWrite receives incoming series data in line protocol format and writes it to the database.\nfunc (h *Handler) serveWrite(w http.ResponseWriter, r *http.Request, user meta.User) {\n\tatomic.AddInt64(&h.stats.WriteRequests, 1)\n\tatomic.AddInt64(&h.stats.ActiveWriteRequests, 1)\n\tdefer func(start time.Time) {\n\t\tatomic.AddInt64(&h.stats.ActiveWriteRequests, -1)\n\t\tatomic.AddInt64(&h.stats.WriteRequestDuration, time.Since(start).Nanoseconds())\n\t}(time.Now())\n\th.requestTracker.Add(r, user)\n\n\tdatabase := r.URL.Query().Get(\"db\")\n\tif database == \"\" {\n\t\th.httpError(w, \"database is required\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif di := h.MetaClient.Database(database); di == nil {\n\t\th.httpError(w, fmt.Sprintf(\"database not found: %q\", database), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tif h.Config.AuthEnabled {\n\t\tif user == nil {\n\t\t\th.httpError(w, fmt.Sprintf(\"user is required to write to database %q\", database), http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\n\t\tif err := h.WriteAuthorizer.AuthorizeWrite(user.ID(), database); err != nil {\n\t\t\th.httpError(w, fmt.Sprintf(\"%q user is not authorized to write to database %q\", user.ID(), database), http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Handle gzip decoding of the body\n\tbody := r.Body\n\tif r.Header.Get(\"Content-Encoding\") == \"gzip\" {\n\t\tb, err := gzip.NewReader(r.Body)\n\t\tif err != nil {\n\t\t\th.httpError(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tdefer b.Close()\n\t\tbody = b\n\t}\n\n\tvar bs []byte\n\tif clStr := r.Header.Get(\"Content-Length\"); clStr != \"\" {\n\t\tif length, err := strconv.Atoi(clStr); err == nil {\n\t\t\t// This will just be an initial hint for the gzip reader, as the\n\t\t\t// bytes.Buffer will grow as needed when ReadFrom is called\n\t\t\tbs = make([]byte, 0, length)\n\t\t}\n\t}\n\tbuf := bytes.NewBuffer(bs)\n\n\t_, err := buf.ReadFrom(body)\n\tif err != nil {\n\t\tif h.Config.WriteTracing {\n\t\t\th.Logger.Info(\"Write handler unable to read bytes from request body\")\n\t\t}\n\t\th.httpError(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tatomic.AddInt64(&h.stats.WriteRequestBytesReceived, int64(buf.Len()))\n\n\tif h.Config.WriteTracing {\n\t\th.Logger.Info(fmt.Sprintf(\"Write body received by handler: %s\", buf.Bytes()))\n\t}\n\n\tpoints, parseError := models.ParsePointsWithPrecision(buf.Bytes(), time.Now().UTC(), r.URL.Query().Get(\"precision\"))\n\t// Not points parsed correctly so return the error now\n\tif parseError != nil && len(points) == 0 {\n\t\tif parseError.Error() == \"EOF\" {\n\t\t\th.writeHeader(w, http.StatusOK)\n\t\t\treturn\n\t\t}\n\t\th.httpError(w, parseError.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Determine required consistency level.\n\tlevel := r.URL.Query().Get(\"consistency\")\n\tconsistency := models.ConsistencyLevelOne\n\tif level != \"\" {\n\t\tvar err error\n\t\tconsistency, err = models.ParseConsistencyLevel(level)\n\t\tif err != nil {\n\t\t\th.httpError(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Write points.\n\tif err := h.PointsWriter.WritePoints(database, r.URL.Query().Get(\"rp\"), consistency, user, points); influxdb.IsClientError(err) {\n\t\tatomic.AddInt64(&h.stats.PointsWrittenFail, int64(len(points)))\n\t\th.httpError(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t} else if influxdb.IsAuthorizationError(err) {\n\t\tatomic.AddInt64(&h.stats.PointsWrittenFail, int64(len(points)))\n\t\th.httpError(w, err.Error(), http.StatusForbidden)\n\t\treturn\n\t} else if werr, ok := err.(tsdb.PartialWriteError); ok {\n\t\tatomic.AddInt64(&h.stats.PointsWrittenOK, int64(len(points)-werr.Dropped))\n\t\tatomic.AddInt64(&h.stats.PointsWrittenDropped, int64(werr.Dropped))\n\t\th.httpError(w, werr.Error(), http.StatusBadRequest)\n\t\treturn\n\t} else if err != nil {\n\t\tatomic.AddInt64(&h.stats.PointsWrittenFail, int64(len(points)))\n\t\th.httpError(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t} else if parseError != nil {\n\t\t// We wrote some of the points\n\t\tatomic.AddInt64(&h.stats.PointsWrittenOK, int64(len(points)))\n\t\t// The other points failed to parse which means the client sent invalid line protocol.  We return a 400\n\t\t// response code as well as the lines that failed to parse.\n\t\th.httpError(w, tsdb.PartialWriteError{Reason: parseError.Error()}.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tatomic.AddInt64(&h.stats.PointsWrittenOK, int64(len(points)))\n\th.writeHeader(w, http.StatusNoContent)\n}\n\n// serveOptions returns an empty response to comply with OPTIONS pre-flight requests\nfunc (h *Handler) serveOptions(w http.ResponseWriter, r *http.Request) {\n\th.writeHeader(w, http.StatusNoContent)\n}\n\n// servePing returns a simple response to let the client know the server is running.\nfunc (h *Handler) servePing(w http.ResponseWriter, r *http.Request) {\n\tatomic.AddInt64(&h.stats.PingRequests, 1)\n\th.writeHeader(w, http.StatusNoContent)\n}\n\n// serveStatus has been deprecated.\nfunc (h *Handler) serveStatus(w http.ResponseWriter, r *http.Request) {\n\th.Logger.Info(\"WARNING: /status has been deprecated.  Use /ping instead.\")\n\tatomic.AddInt64(&h.stats.StatusRequests, 1)\n\th.writeHeader(w, http.StatusNoContent)\n}\n\n// convertToEpoch converts result timestamps from time.Time to the specified epoch.\nfunc convertToEpoch(r *influxql.Result, epoch string) {\n\tdivisor := int64(1)\n\n\tswitch epoch {\n\tcase \"u\":\n\t\tdivisor = int64(time.Microsecond)\n\tcase \"ms\":\n\t\tdivisor = int64(time.Millisecond)\n\tcase \"s\":\n\t\tdivisor = int64(time.Second)\n\tcase \"m\":\n\t\tdivisor = int64(time.Minute)\n\tcase \"h\":\n\t\tdivisor = int64(time.Hour)\n\t}\n\n\tfor _, s := range r.Series {\n\t\tfor _, v := range s.Values {\n\t\t\tif ts, ok := v[0].(time.Time); ok {\n\t\t\t\tv[0] = ts.UnixNano() / divisor\n\t\t\t}\n\t\t}\n\t}\n}\n\n// serveExpvar serves internal metrics in /debug/vars format over HTTP.\nfunc (h *Handler) serveExpvar(w http.ResponseWriter, r *http.Request) {\n\t// Retrieve statistics from the monitor.\n\tstats, err := h.Monitor.Statistics(nil)\n\tif err != nil {\n\t\th.httpError(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Retrieve diagnostics from the monitor.\n\tdiags, err := h.Monitor.Diagnostics()\n\tif err != nil {\n\t\th.httpError(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\n\tfirst := true\n\tif val, ok := diags[\"system\"]; ok {\n\t\tjv, err := parseSystemDiagnostics(val)\n\t\tif err != nil {\n\t\t\th.httpError(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tdata, err := json.Marshal(jv)\n\t\tif err != nil {\n\t\t\th.httpError(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tfirst = false\n\t\tfmt.Fprintln(w, \"{\")\n\t\tfmt.Fprintf(w, \"\\\"system\\\": %s\", data)\n\t} else {\n\t\tfmt.Fprintln(w, \"{\")\n\t}\n\n\tif val := expvar.Get(\"cmdline\"); val != nil {\n\t\tif !first {\n\t\t\tfmt.Fprintln(w, \",\")\n\t\t}\n\t\tfirst = false\n\t\tfmt.Fprintf(w, \"\\\"cmdline\\\": %s\", val)\n\t}\n\tif val := expvar.Get(\"memstats\"); val != nil {\n\t\tif !first {\n\t\t\tfmt.Fprintln(w, \",\")\n\t\t}\n\t\tfirst = false\n\t\tfmt.Fprintf(w, \"\\\"memstats\\\": %s\", val)\n\t}\n\n\tfor _, s := range stats {\n\t\tval, err := json.Marshal(s)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Very hackily create a unique key.\n\t\tbuf := bytes.NewBufferString(s.Name)\n\t\tif path, ok := s.Tags[\"path\"]; ok {\n\t\t\tfmt.Fprintf(buf, \":%s\", path)\n\t\t\tif id, ok := s.Tags[\"id\"]; ok {\n\t\t\t\tfmt.Fprintf(buf, \":%s\", id)\n\t\t\t}\n\t\t} else if bind, ok := s.Tags[\"bind\"]; ok {\n\t\t\tif proto, ok := s.Tags[\"proto\"]; ok {\n\t\t\t\tfmt.Fprintf(buf, \":%s\", proto)\n\t\t\t}\n\t\t\tfmt.Fprintf(buf, \":%s\", bind)\n\t\t} else if database, ok := s.Tags[\"database\"]; ok {\n\t\t\tfmt.Fprintf(buf, \":%s\", database)\n\t\t\tif rp, ok := s.Tags[\"retention_policy\"]; ok {\n\t\t\t\tfmt.Fprintf(buf, \":%s\", rp)\n\t\t\t\tif name, ok := s.Tags[\"name\"]; ok {\n\t\t\t\t\tfmt.Fprintf(buf, \":%s\", name)\n\t\t\t\t}\n\t\t\t\tif dest, ok := s.Tags[\"destination\"]; ok {\n\t\t\t\t\tfmt.Fprintf(buf, \":%s\", dest)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tkey := buf.String()\n\n\t\tif !first {\n\t\t\tfmt.Fprintln(w, \",\")\n\t\t}\n\t\tfirst = false\n\t\tfmt.Fprintf(w, \"%q: \", key)\n\t\tw.Write(bytes.TrimSpace(val))\n\t}\n\tfmt.Fprintln(w, \"\\n}\")\n}\n\n// serveDebugRequests will track requests for a period of time.\nfunc (h *Handler) serveDebugRequests(w http.ResponseWriter, r *http.Request) {\n\tvar d time.Duration\n\tif s := r.URL.Query().Get(\"seconds\"); s == \"\" {\n\t\td = DefaultDebugRequestsInterval\n\t} else if seconds, err := strconv.ParseInt(s, 10, 64); err != nil {\n\t\th.httpError(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t} else {\n\t\td = time.Duration(seconds) * time.Second\n\t\tif d > MaxDebugRequestsInterval {\n\t\t\th.httpError(w, fmt.Sprintf(\"exceeded maximum interval time: %s > %s\",\n\t\t\t\tinfluxql.FormatDuration(d),\n\t\t\t\tinfluxql.FormatDuration(MaxDebugRequestsInterval)),\n\t\t\t\thttp.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t}\n\n\tvar closing <-chan bool\n\tif notifier, ok := w.(http.CloseNotifier); ok {\n\t\tclosing = notifier.CloseNotify()\n\t}\n\n\tprofile := h.requestTracker.TrackRequests()\n\n\ttimer := time.NewTimer(d)\n\tselect {\n\tcase <-timer.C:\n\t\tprofile.Stop()\n\tcase <-closing:\n\t\t// Connection was closed early.\n\t\tprofile.Stop()\n\t\ttimer.Stop()\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tw.Header().Add(\"Connection\", \"close\")\n\n\tfmt.Fprintln(w, \"{\")\n\tfirst := true\n\tfor req, st := range profile.Requests {\n\t\tval, err := json.Marshal(st)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif !first {\n\t\t\tfmt.Fprintln(w, \",\")\n\t\t}\n\t\tfirst = false\n\t\tfmt.Fprintf(w, \"%q: \", req.String())\n\t\tw.Write(bytes.TrimSpace(val))\n\t}\n\tfmt.Fprintln(w, \"\\n}\")\n}\n\n// parseSystemDiagnostics converts the system diagnostics into an appropriate\n// format for marshaling to JSON in the /debug/vars format.\nfunc parseSystemDiagnostics(d *diagnostics.Diagnostics) (map[string]interface{}, error) {\n\t// We don't need PID in this case.\n\tm := map[string]interface{}{\"currentTime\": nil, \"started\": nil, \"uptime\": nil}\n\tfor key := range m {\n\t\t// Find the associated column.\n\t\tci := -1\n\t\tfor i, col := range d.Columns {\n\t\t\tif col == key {\n\t\t\t\tci = i\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif ci == -1 {\n\t\t\treturn nil, fmt.Errorf(\"unable to find column %q\", key)\n\t\t}\n\n\t\tif len(d.Rows) < 1 || len(d.Rows[0]) <= ci {\n\t\t\treturn nil, fmt.Errorf(\"no data for column %q\", key)\n\t\t}\n\n\t\tvar res interface{}\n\t\tswitch v := d.Rows[0][ci].(type) {\n\t\tcase time.Time:\n\t\t\tres = v\n\t\tcase string:\n\t\t\t// Should be a string representation of a time.Duration\n\t\t\td, err := time.ParseDuration(v)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tres = int64(d.Seconds())\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"value for column %q is not parsable (got %T)\", key, v)\n\t\t}\n\t\tm[key] = res\n\t}\n\treturn m, nil\n}\n\n// httpError writes an error to the client in a standard format.\nfunc (h *Handler) httpError(w http.ResponseWriter, error string, code int) {\n\tif code == http.StatusUnauthorized {\n\t\t// If an unauthorized header will be sent back, add a WWW-Authenticate header\n\t\t// as an authorization challenge.\n\t\tw.Header().Set(\"WWW-Authenticate\", fmt.Sprintf(\"Basic realm=\\\"%s\\\"\", h.Config.Realm))\n\t}\n\n\tresponse := Response{Err: errors.New(error)}\n\tif rw, ok := w.(ResponseWriter); ok {\n\t\th.writeHeader(w, code)\n\t\trw.WriteResponse(response)\n\t\treturn\n\t}\n\n\t// Default implementation if the response writer hasn't been replaced\n\t// with our special response writer type.\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\th.writeHeader(w, code)\n\tb, _ := json.Marshal(response)\n\tw.Write(b)\n}\n\n// Filters and filter helpers\n\ntype credentials struct {\n\tMethod   AuthenticationMethod\n\tUsername string\n\tPassword string\n\tToken    string\n}\n\n// parseCredentials parses a request and returns the authentication credentials.\n// The credentials may be present as URL query params, or as a Basic\n// Authentication header.\n// As params: http://127.0.0.1/query?u=username&p=password\n// As basic auth: http://username:password@127.0.0.1\n// As Bearer token in Authorization header: Bearer <JWT_TOKEN_BLOB>\nfunc parseCredentials(r *http.Request) (*credentials, error) {\n\tq := r.URL.Query()\n\n\t// Check for username and password in URL params.\n\tif u, p := q.Get(\"u\"), q.Get(\"p\"); u != \"\" && p != \"\" {\n\t\treturn &credentials{\n\t\t\tMethod:   UserAuthentication,\n\t\t\tUsername: u,\n\t\t\tPassword: p,\n\t\t}, nil\n\t}\n\n\t// Check for the HTTP Authorization header.\n\tif s := r.Header.Get(\"Authorization\"); s != \"\" {\n\t\t// Check for Bearer token.\n\t\tstrs := strings.Split(s, \" \")\n\t\tif len(strs) == 2 && strs[0] == \"Bearer\" {\n\t\t\treturn &credentials{\n\t\t\t\tMethod: BearerAuthentication,\n\t\t\t\tToken:  strs[1],\n\t\t\t}, nil\n\t\t}\n\n\t\t// Check for basic auth.\n\t\tif u, p, ok := r.BasicAuth(); ok {\n\t\t\treturn &credentials{\n\t\t\t\tMethod:   UserAuthentication,\n\t\t\t\tUsername: u,\n\t\t\t\tPassword: p,\n\t\t\t}, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"unable to parse authentication credentials\")\n}\n\n// authenticate wraps a handler and ensures that if user credentials are passed in\n// an attempt is made to authenticate that user. If authentication fails, an error is returned.\n//\n// There is one exception: if there are no users in the system, authentication is not required. This\n// is to facilitate bootstrapping of a system with authentication enabled.\nfunc authenticate(inner func(http.ResponseWriter, *http.Request, meta.User), h *Handler, requireAuthentication bool) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// Return early if we are not authenticating\n\t\tif !requireAuthentication {\n\t\t\tinner(w, r, nil)\n\t\t\treturn\n\t\t}\n\t\tvar user meta.User\n\n\t\t// TODO corylanou: never allow this in the future without users\n\t\tif requireAuthentication && h.MetaClient.AdminUserExists() {\n\t\t\tcreds, err := parseCredentials(r)\n\t\t\tif err != nil {\n\t\t\t\tatomic.AddInt64(&h.stats.AuthenticationFailures, 1)\n\t\t\t\th.httpError(w, err.Error(), http.StatusUnauthorized)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tswitch creds.Method {\n\t\t\tcase UserAuthentication:\n\t\t\t\tif creds.Username == \"\" {\n\t\t\t\t\tatomic.AddInt64(&h.stats.AuthenticationFailures, 1)\n\t\t\t\t\th.httpError(w, \"username required\", http.StatusUnauthorized)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tuser, err = h.MetaClient.Authenticate(creds.Username, creds.Password)\n\t\t\t\tif err != nil {\n\t\t\t\t\tatomic.AddInt64(&h.stats.AuthenticationFailures, 1)\n\t\t\t\t\th.httpError(w, \"authorization failed\", http.StatusUnauthorized)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase BearerAuthentication:\n\t\t\t\tkeyLookupFn := func(token *jwt.Token) (interface{}, error) {\n\t\t\t\t\t// Check for expected signing method.\n\t\t\t\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected signing method: %v\", token.Header[\"alg\"])\n\t\t\t\t\t}\n\t\t\t\t\treturn []byte(h.Config.SharedSecret), nil\n\t\t\t\t}\n\n\t\t\t\t// Parse and validate the token.\n\t\t\t\ttoken, err := jwt.Parse(creds.Token, keyLookupFn)\n\t\t\t\tif err != nil {\n\t\t\t\t\th.httpError(w, err.Error(), http.StatusUnauthorized)\n\t\t\t\t\treturn\n\t\t\t\t} else if !token.Valid {\n\t\t\t\t\th.httpError(w, \"invalid token\", http.StatusUnauthorized)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tclaims, ok := token.Claims.(jwt.MapClaims)\n\t\t\t\tif !ok {\n\t\t\t\t\th.httpError(w, \"problem authenticating token\", http.StatusInternalServerError)\n\t\t\t\t\th.Logger.Info(\"Could not assert JWT token claims as jwt.MapClaims\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// Make sure an expiration was set on the token.\n\t\t\t\tif exp, ok := claims[\"exp\"].(float64); !ok || exp <= 0.0 {\n\t\t\t\t\th.httpError(w, \"token expiration required\", http.StatusUnauthorized)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// Get the username from the token.\n\t\t\t\tusername, ok := claims[\"username\"].(string)\n\t\t\t\tif !ok {\n\t\t\t\t\th.httpError(w, \"username in token must be a string\", http.StatusUnauthorized)\n\t\t\t\t\treturn\n\t\t\t\t} else if username == \"\" {\n\t\t\t\t\th.httpError(w, \"token must contain a username\", http.StatusUnauthorized)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// Lookup user in the metastore.\n\t\t\t\tif user, err = h.MetaClient.User(username); err != nil {\n\t\t\t\t\th.httpError(w, err.Error(), http.StatusUnauthorized)\n\t\t\t\t\treturn\n\t\t\t\t} else if user == nil {\n\t\t\t\t\th.httpError(w, meta.ErrUserNotFound.Error(), http.StatusUnauthorized)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\th.httpError(w, \"unsupported authentication\", http.StatusUnauthorized)\n\t\t\t}\n\n\t\t}\n\t\tinner(w, r, user)\n\t})\n}\n\ntype gzipResponseWriter struct {\n\tio.Writer\n\thttp.ResponseWriter\n}\n\n// WriteHeader sets the provided code as the response status. If the\n// specified status is 204 No Content, then the Content-Encoding header\n// is removed from the response, to prevent clients expecting gzipped\n// encoded bodies from trying to deflate an empty response.\nfunc (w gzipResponseWriter) WriteHeader(code int) {\n\tif code != http.StatusNoContent {\n\t\tw.Header().Set(\"Content-Encoding\", \"gzip\")\n\t}\n\tw.ResponseWriter.WriteHeader(code)\n}\n\nfunc (w gzipResponseWriter) Write(b []byte) (int, error) {\n\treturn w.Writer.Write(b)\n}\n\nfunc (w gzipResponseWriter) Flush() {\n\tw.Writer.(*gzip.Writer).Flush()\n\tif w, ok := w.ResponseWriter.(http.Flusher); ok {\n\t\tw.Flush()\n\t}\n}\n\nfunc (w gzipResponseWriter) CloseNotify() <-chan bool {\n\treturn w.ResponseWriter.(http.CloseNotifier).CloseNotify()\n}\n\n// gzipFilter determines if the client can accept compressed responses, and encodes accordingly.\nfunc gzipFilter(inner http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif !strings.Contains(r.Header.Get(\"Accept-Encoding\"), \"gzip\") {\n\t\t\tinner.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\t\tgz := getGzipWriter(w)\n\t\tdefer putGzipWriter(gz)\n\t\tgzw := gzipResponseWriter{Writer: gz, ResponseWriter: w}\n\t\tinner.ServeHTTP(gzw, r)\n\t})\n}\n\nvar gzipWriterPool = sync.Pool{\n\tNew: func() interface{} {\n\t\treturn gzip.NewWriter(nil)\n\t},\n}\n\nfunc getGzipWriter(w io.Writer) *gzip.Writer {\n\tgz := gzipWriterPool.Get().(*gzip.Writer)\n\tgz.Reset(w)\n\treturn gz\n}\n\nfunc putGzipWriter(gz *gzip.Writer) {\n\tgz.Close()\n\tgzipWriterPool.Put(gz)\n}\n\n// cors responds to incoming requests and adds the appropriate cors headers\n// TODO: corylanou: add the ability to configure this in our config\nfunc cors(inner http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif origin := r.Header.Get(\"Origin\"); origin != \"\" {\n\t\t\tw.Header().Set(`Access-Control-Allow-Origin`, origin)\n\t\t\tw.Header().Set(`Access-Control-Allow-Methods`, strings.Join([]string{\n\t\t\t\t`DELETE`,\n\t\t\t\t`GET`,\n\t\t\t\t`OPTIONS`,\n\t\t\t\t`POST`,\n\t\t\t\t`PUT`,\n\t\t\t}, \", \"))\n\n\t\t\tw.Header().Set(`Access-Control-Allow-Headers`, strings.Join([]string{\n\t\t\t\t`Accept`,\n\t\t\t\t`Accept-Encoding`,\n\t\t\t\t`Authorization`,\n\t\t\t\t`Content-Length`,\n\t\t\t\t`Content-Type`,\n\t\t\t\t`X-CSRF-Token`,\n\t\t\t\t`X-HTTP-Method-Override`,\n\t\t\t}, \", \"))\n\n\t\t\tw.Header().Set(`Access-Control-Expose-Headers`, strings.Join([]string{\n\t\t\t\t`Date`,\n\t\t\t\t`X-InfluxDB-Version`,\n\t\t\t}, \", \"))\n\t\t}\n\n\t\tif r.Method == \"OPTIONS\" {\n\t\t\treturn\n\t\t}\n\n\t\tinner.ServeHTTP(w, r)\n\t})\n}\n\nfunc requestID(inner http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tuid := uuid.TimeUUID()\n\t\tr.Header.Set(\"Request-Id\", uid.String())\n\t\tw.Header().Set(\"Request-Id\", r.Header.Get(\"Request-Id\"))\n\n\t\tinner.ServeHTTP(w, r)\n\t})\n}\n\nfunc (h *Handler) logging(inner http.Handler, name string) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\t\tl := &responseLogger{w: w}\n\t\tinner.ServeHTTP(l, r)\n\t\th.CLFLogger.Println(buildLogLine(l, r, start))\n\t})\n}\n\nfunc (h *Handler) responseWriter(inner http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw = NewResponseWriter(w, r)\n\t\tinner.ServeHTTP(w, r)\n\t})\n}\n\nfunc (h *Handler) recovery(inner http.Handler, name string) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tstart := time.Now()\n\t\tl := &responseLogger{w: w}\n\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tlogLine := buildLogLine(l, r, start)\n\t\t\t\tlogLine = fmt.Sprintf(\"%s [panic:%s] %s\", logLine, err, debug.Stack())\n\t\t\t\th.CLFLogger.Println(logLine)\n\t\t\t}\n\t\t}()\n\n\t\tinner.ServeHTTP(l, r)\n\t})\n}\n\n// Response represents a list of statement results.\ntype Response struct {\n\tResults []*influxql.Result\n\tErr     error\n}\n\n// MarshalJSON encodes a Response struct into JSON.\nfunc (r Response) MarshalJSON() ([]byte, error) {\n\t// Define a struct that outputs \"error\" as a string.\n\tvar o struct {\n\t\tResults []*influxql.Result `json:\"results,omitempty\"`\n\t\tErr     string             `json:\"error,omitempty\"`\n\t}\n\n\t// Copy fields to output struct.\n\to.Results = r.Results\n\tif r.Err != nil {\n\t\to.Err = r.Err.Error()\n\t}\n\n\treturn json.Marshal(&o)\n}\n\n// UnmarshalJSON decodes the data into the Response struct.\nfunc (r *Response) UnmarshalJSON(b []byte) error {\n\tvar o struct {\n\t\tResults []*influxql.Result `json:\"results,omitempty\"`\n\t\tErr     string             `json:\"error,omitempty\"`\n\t}\n\n\terr := json.Unmarshal(b, &o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.Results = o.Results\n\tif o.Err != \"\" {\n\t\tr.Err = errors.New(o.Err)\n\t}\n\treturn nil\n}\n\n// Error returns the first error from any statement.\n// Returns nil if no errors occurred on any statements.\nfunc (r *Response) Error() error {\n\tif r.Err != nil {\n\t\treturn r.Err\n\t}\n\tfor _, rr := range r.Results {\n\t\tif rr.Err != nil {\n\t\t\treturn rr.Err\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/httpd/handler_test.go",
    "content": "package httpd_test\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"mime/multipart\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/internal\"\n\n\t\"github.com/dgrijalva/jwt-go\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/services/httpd\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n)\n\n// Ensure the handler returns results from a query (including nil results).\nfunc TestHandler_Query(t *testing.T) {\n\th := NewHandler(false)\n\th.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\tif stmt.String() != `SELECT * FROM bar` {\n\t\t\tt.Fatalf(\"unexpected query: %s\", stmt.String())\n\t\t} else if ctx.Database != `foo` {\n\t\t\tt.Fatalf(\"unexpected db: %s\", ctx.Database)\n\t\t}\n\t\tctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows([]*models.Row{{Name: \"series0\"}})}\n\t\tctx.Results <- &influxql.Result{StatementID: 2, Series: models.Rows([]*models.Row{{Name: \"series1\"}})}\n\t\treturn nil\n\t}\n\n\tw := httptest.NewRecorder()\n\th.ServeHTTP(w, MustNewJSONRequest(\"GET\", \"/query?db=foo&q=SELECT+*+FROM+bar\", nil))\n\tif w.Code != http.StatusOK {\n\t\tt.Fatalf(\"unexpected status: %d\", w.Code)\n\t} else if body := strings.TrimSpace(w.Body.String()); body != `{\"results\":[{\"statement_id\":1,\"series\":[{\"name\":\"series0\"}]},{\"statement_id\":2,\"series\":[{\"name\":\"series1\"}]}]}` {\n\t\tt.Fatalf(\"unexpected body: %s\", body)\n\t}\n}\n\n// Ensure the handler returns results from a query passed as a file.\nfunc TestHandler_Query_File(t *testing.T) {\n\th := NewHandler(false)\n\th.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\tif stmt.String() != `SELECT * FROM bar` {\n\t\t\tt.Fatalf(\"unexpected query: %s\", stmt.String())\n\t\t} else if ctx.Database != `foo` {\n\t\t\tt.Fatalf(\"unexpected db: %s\", ctx.Database)\n\t\t}\n\t\tctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows([]*models.Row{{Name: \"series0\"}})}\n\t\tctx.Results <- &influxql.Result{StatementID: 2, Series: models.Rows([]*models.Row{{Name: \"series1\"}})}\n\t\treturn nil\n\t}\n\n\tvar body bytes.Buffer\n\twriter := multipart.NewWriter(&body)\n\tpart, err := writer.CreateFormFile(\"q\", \"\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tio.WriteString(part, \"SELECT * FROM bar\")\n\n\tif err := writer.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tr := MustNewJSONRequest(\"POST\", \"/query?db=foo\", &body)\n\tr.Header.Set(\"Content-Type\", writer.FormDataContentType())\n\n\tw := httptest.NewRecorder()\n\th.ServeHTTP(w, r)\n\tif w.Code != http.StatusOK {\n\t\tt.Fatalf(\"unexpected status: %d\", w.Code)\n\t} else if body := strings.TrimSpace(w.Body.String()); body != `{\"results\":[{\"statement_id\":1,\"series\":[{\"name\":\"series0\"}]},{\"statement_id\":2,\"series\":[{\"name\":\"series1\"}]}]}` {\n\t\tt.Fatalf(\"unexpected body: %s\", body)\n\t}\n}\n\n// Test query with user authentication.\nfunc TestHandler_Query_Auth(t *testing.T) {\n\t// Create the handler to be tested.\n\th := NewHandler(true)\n\n\t// Set mock meta client functions for the handler to use.\n\th.MetaClient.AdminUserExistsFn = func() bool { return true }\n\n\th.MetaClient.UserFn = func(username string) (meta.User, error) {\n\t\tif username != \"user1\" {\n\t\t\treturn nil, meta.ErrUserNotFound\n\t\t}\n\t\treturn &meta.UserInfo{\n\t\t\tName:  \"user1\",\n\t\t\tHash:  \"abcd\",\n\t\t\tAdmin: true,\n\t\t}, nil\n\t}\n\n\th.MetaClient.AuthenticateFn = func(u, p string) (meta.User, error) {\n\t\tif u != \"user1\" {\n\t\t\treturn nil, fmt.Errorf(\"unexpected user: exp: user1, got: %s\", u)\n\t\t} else if p != \"abcd\" {\n\t\t\treturn nil, fmt.Errorf(\"unexpected password: exp: abcd, got: %s\", p)\n\t\t}\n\t\treturn h.MetaClient.User(u)\n\t}\n\n\t// Set mock query authorizer for handler to use.\n\th.QueryAuthorizer.AuthorizeQueryFn = func(u meta.User, query *influxql.Query, database string) error {\n\t\treturn nil\n\t}\n\n\t// Set mock statement executor for handler to use.\n\th.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\tif stmt.String() != `SELECT * FROM bar` {\n\t\t\tt.Fatalf(\"unexpected query: %s\", stmt.String())\n\t\t} else if ctx.Database != `foo` {\n\t\t\tt.Fatalf(\"unexpected db: %s\", ctx.Database)\n\t\t}\n\t\tctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows([]*models.Row{{Name: \"series0\"}})}\n\t\tctx.Results <- &influxql.Result{StatementID: 2, Series: models.Rows([]*models.Row{{Name: \"series1\"}})}\n\t\treturn nil\n\t}\n\n\t// Test the handler with valid user and password in the URL parameters.\n\tw := httptest.NewRecorder()\n\th.ServeHTTP(w, MustNewJSONRequest(\"GET\", \"/query?u=user1&p=abcd&db=foo&q=SELECT+*+FROM+bar\", nil))\n\tif w.Code != http.StatusOK {\n\t\tt.Fatalf(\"unexpected status: %d: %s\", w.Code, w.Body.String())\n\t} else if body := strings.TrimSpace(w.Body.String()); body != `{\"results\":[{\"statement_id\":1,\"series\":[{\"name\":\"series0\"}]},{\"statement_id\":2,\"series\":[{\"name\":\"series1\"}]}]}` {\n\t\tt.Fatalf(\"unexpected body: %s\", body)\n\t}\n\n\t// Test the handler with valid user and password using basic auth.\n\tw = httptest.NewRecorder()\n\tr := MustNewJSONRequest(\"GET\", \"/query?db=foo&q=SELECT+*+FROM+bar\", nil)\n\tr.SetBasicAuth(\"user1\", \"abcd\")\n\th.ServeHTTP(w, r)\n\tif w.Code != http.StatusOK {\n\t\tt.Fatalf(\"unexpected status: %d: %s\", w.Code, w.Body.String())\n\t} else if body := strings.TrimSpace(w.Body.String()); body != `{\"results\":[{\"statement_id\":1,\"series\":[{\"name\":\"series0\"}]},{\"statement_id\":2,\"series\":[{\"name\":\"series1\"}]}]}` {\n\t\tt.Fatalf(\"unexpected body: %s\", body)\n\t}\n\n\t// Test the handler with valid JWT bearer token.\n\treq := MustNewJSONRequest(\"GET\", \"/query?db=foo&q=SELECT+*+FROM+bar\", nil)\n\t// Create a signed JWT token string and add it to the request header.\n\t_, signedToken := MustJWTToken(\"user1\", h.Config.SharedSecret, false)\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", signedToken))\n\n\tw = httptest.NewRecorder()\n\th.ServeHTTP(w, req)\n\tif w.Code != http.StatusOK {\n\t\tt.Fatalf(\"unexpected status: %d: %s\", w.Code, w.Body.String())\n\t} else if body := strings.TrimSpace(w.Body.String()); body != `{\"results\":[{\"statement_id\":1,\"series\":[{\"name\":\"series0\"}]},{\"statement_id\":2,\"series\":[{\"name\":\"series1\"}]}]}` {\n\t\tt.Fatalf(\"unexpected body: %s\", body)\n\t}\n\n\t// Test the handler with JWT token signed with invalid key.\n\treq = MustNewJSONRequest(\"GET\", \"/query?db=foo&q=SELECT+*+FROM+bar\", nil)\n\t// Create a signed JWT token string and add it to the request header.\n\t_, signedToken = MustJWTToken(\"user1\", \"invalid key\", false)\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", signedToken))\n\n\tw = httptest.NewRecorder()\n\th.ServeHTTP(w, req)\n\tif w.Code != http.StatusUnauthorized {\n\t\tt.Fatalf(\"unexpected status: %d: %s\", w.Code, w.Body.String())\n\t} else if body := strings.TrimSpace(w.Body.String()); body != `{\"error\":\"signature is invalid\"}` {\n\t\tt.Fatalf(\"unexpected body: %s\", body)\n\t}\n\n\t// Test handler with valid JWT token carrying non-existant user.\n\t_, signedToken = MustJWTToken(\"bad_user\", h.Config.SharedSecret, false)\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", signedToken))\n\n\tw = httptest.NewRecorder()\n\th.ServeHTTP(w, req)\n\tif w.Code != http.StatusUnauthorized {\n\t\tt.Fatalf(\"unexpected status: %d: %s\", w.Code, w.Body.String())\n\t} else if body := strings.TrimSpace(w.Body.String()); body != `{\"error\":\"user not found\"}` {\n\t\tt.Fatalf(\"unexpected body: %s\", body)\n\t}\n\n\t// Test handler with expired JWT token.\n\t_, signedToken = MustJWTToken(\"user1\", h.Config.SharedSecret, true)\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", signedToken))\n\n\tw = httptest.NewRecorder()\n\th.ServeHTTP(w, req)\n\tif w.Code != http.StatusUnauthorized {\n\t\tt.Fatalf(\"unexpected status: %d: %s\", w.Code, w.Body.String())\n\t} else if !strings.Contains(w.Body.String(), `{\"error\":\"Token is expired`) {\n\t\tt.Fatalf(\"unexpected body: %s\", w.Body.String())\n\t}\n\n\t// Test handler with JWT token that has no expiration set.\n\ttoken, _ := MustJWTToken(\"user1\", h.Config.SharedSecret, false)\n\tdelete(token.Claims.(jwt.MapClaims), \"exp\")\n\tsignedToken, err := token.SignedString([]byte(h.Config.SharedSecret))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", signedToken))\n\tw = httptest.NewRecorder()\n\th.ServeHTTP(w, req)\n\tif w.Code != http.StatusUnauthorized {\n\t\tt.Fatalf(\"unexpected status: %d: %s\", w.Code, w.Body.String())\n\t} else if body := strings.TrimSpace(w.Body.String()); body != `{\"error\":\"token expiration required\"}` {\n\t\tt.Fatalf(\"unexpected body: %s\", body)\n\t}\n\n\t// Test the handler with valid user and password in the url and invalid in\n\t// basic auth (prioritize url).\n\tw = httptest.NewRecorder()\n\tr = MustNewJSONRequest(\"GET\", \"/query?u=user1&p=abcd&db=foo&q=SELECT+*+FROM+bar\", nil)\n\tr.SetBasicAuth(\"user1\", \"efgh\")\n\th.ServeHTTP(w, r)\n\tif w.Code != http.StatusOK {\n\t\tt.Fatalf(\"unexpected status: %d: %s\", w.Code, w.Body.String())\n\t} else if body := strings.TrimSpace(w.Body.String()); body != `{\"results\":[{\"statement_id\":1,\"series\":[{\"name\":\"series0\"}]},{\"statement_id\":2,\"series\":[{\"name\":\"series1\"}]}]}` {\n\t\tt.Fatalf(\"unexpected body: %s\", body)\n\t}\n}\n\n// Ensure the handler returns results from a query (including nil results).\nfunc TestHandler_QueryRegex(t *testing.T) {\n\th := NewHandler(false)\n\th.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\tif stmt.String() != `SELECT * FROM test WHERE url =~ /http\\:\\/\\/www.akamai\\.com/` {\n\t\t\tt.Fatalf(\"unexpected query: %s\", stmt.String())\n\t\t} else if ctx.Database != `test` {\n\t\t\tt.Fatalf(\"unexpected db: %s\", ctx.Database)\n\t\t}\n\t\tctx.Results <- nil\n\t\treturn nil\n\t}\n\n\tw := httptest.NewRecorder()\n\th.ServeHTTP(w, MustNewRequest(\"GET\", \"/query?db=test&q=SELECT%20%2A%20FROM%20test%20WHERE%20url%20%3D~%20%2Fhttp%5C%3A%5C%2F%5C%2Fwww.akamai%5C.com%2F\", nil))\n}\n\n// Ensure the handler merges results from the same statement.\nfunc TestHandler_Query_MergeResults(t *testing.T) {\n\th := NewHandler(false)\n\th.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\tctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows([]*models.Row{{Name: \"series0\"}})}\n\t\tctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows([]*models.Row{{Name: \"series1\"}})}\n\t\treturn nil\n\t}\n\n\tw := httptest.NewRecorder()\n\th.ServeHTTP(w, MustNewJSONRequest(\"GET\", \"/query?db=foo&q=SELECT+*+FROM+bar\", nil))\n\tif w.Code != http.StatusOK {\n\t\tt.Fatalf(\"unexpected status: %d\", w.Code)\n\t} else if body := strings.TrimSpace(w.Body.String()); body != `{\"results\":[{\"statement_id\":1,\"series\":[{\"name\":\"series0\"},{\"name\":\"series1\"}]}]}` {\n\t\tt.Fatalf(\"unexpected body: %s\", body)\n\t}\n}\n\n// Ensure the handler merges results from the same statement.\nfunc TestHandler_Query_MergeEmptyResults(t *testing.T) {\n\th := NewHandler(false)\n\th.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\tctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows{}}\n\t\tctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows([]*models.Row{{Name: \"series1\"}})}\n\t\treturn nil\n\t}\n\n\tw := httptest.NewRecorder()\n\th.ServeHTTP(w, MustNewJSONRequest(\"GET\", \"/query?db=foo&q=SELECT+*+FROM+bar\", nil))\n\tif w.Code != http.StatusOK {\n\t\tt.Fatalf(\"unexpected status: %d\", w.Code)\n\t} else if body := strings.TrimSpace(w.Body.String()); body != `{\"results\":[{\"statement_id\":1,\"series\":[{\"name\":\"series1\"}]}]}` {\n\t\tt.Fatalf(\"unexpected body: %s\", body)\n\t}\n}\n\n// Ensure the handler can parse chunked and chunk size query parameters.\nfunc TestHandler_Query_Chunked(t *testing.T) {\n\th := NewHandler(false)\n\th.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\tif ctx.ChunkSize != 2 {\n\t\t\tt.Fatalf(\"unexpected chunk size: %d\", ctx.ChunkSize)\n\t\t}\n\t\tctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows([]*models.Row{{Name: \"series0\"}})}\n\t\tctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows([]*models.Row{{Name: \"series1\"}})}\n\t\treturn nil\n\t}\n\n\tw := httptest.NewRecorder()\n\th.ServeHTTP(w, MustNewJSONRequest(\"GET\", \"/query?db=foo&q=SELECT+*+FROM+bar&chunked=true&chunk_size=2\", nil))\n\tif w.Code != http.StatusOK {\n\t\tt.Fatalf(\"unexpected status: %d\", w.Code)\n\t} else if w.Body.String() != `{\"results\":[{\"statement_id\":1,\"series\":[{\"name\":\"series0\"}]}]}\n{\"results\":[{\"statement_id\":1,\"series\":[{\"name\":\"series1\"}]}]}\n` {\n\t\tt.Fatalf(\"unexpected body: %s\", w.Body.String())\n\t}\n}\n\n// Ensure the handler can accept an async query.\nfunc TestHandler_Query_Async(t *testing.T) {\n\tdone := make(chan struct{})\n\th := NewHandler(false)\n\th.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\tif stmt.String() != `SELECT * FROM bar` {\n\t\t\tt.Fatalf(\"unexpected query: %s\", stmt.String())\n\t\t} else if ctx.Database != `foo` {\n\t\t\tt.Fatalf(\"unexpected db: %s\", ctx.Database)\n\t\t}\n\t\tctx.Results <- &influxql.Result{StatementID: 1, Series: models.Rows([]*models.Row{{Name: \"series0\"}})}\n\t\tctx.Results <- &influxql.Result{StatementID: 2, Series: models.Rows([]*models.Row{{Name: \"series1\"}})}\n\t\tclose(done)\n\t\treturn nil\n\t}\n\n\tw := httptest.NewRecorder()\n\th.ServeHTTP(w, MustNewJSONRequest(\"GET\", \"/query?db=foo&q=SELECT+*+FROM+bar&async=true\", nil))\n\tif w.Code != http.StatusNoContent {\n\t\tt.Fatalf(\"unexpected status: %d\", w.Code)\n\t} else if body := strings.TrimSpace(w.Body.String()); body != `` {\n\t\tt.Fatalf(\"unexpected body: %s\", body)\n\t}\n\n\t// Wait to make sure the async query runs and completes.\n\ttimer := time.NewTimer(100 * time.Millisecond)\n\tdefer timer.Stop()\n\n\tselect {\n\tcase <-timer.C:\n\t\tt.Fatal(\"timeout while waiting for async query to complete\")\n\tcase <-done:\n\t}\n}\n\n// Ensure the handler returns a status 400 if the query is not passed in.\nfunc TestHandler_Query_ErrQueryRequired(t *testing.T) {\n\th := NewHandler(false)\n\tw := httptest.NewRecorder()\n\th.ServeHTTP(w, MustNewJSONRequest(\"GET\", \"/query\", nil))\n\tif w.Code != http.StatusBadRequest {\n\t\tt.Fatalf(\"unexpected status: %d\", w.Code)\n\t} else if body := strings.TrimSpace(w.Body.String()); body != `{\"error\":\"missing required parameter \\\"q\\\"\"}` {\n\t\tt.Fatalf(\"unexpected body: %s\", body)\n\t}\n}\n\n// Ensure the handler returns a status 400 if the query cannot be parsed.\nfunc TestHandler_Query_ErrInvalidQuery(t *testing.T) {\n\th := NewHandler(false)\n\tw := httptest.NewRecorder()\n\th.ServeHTTP(w, MustNewJSONRequest(\"GET\", \"/query?q=SELECT\", nil))\n\tif w.Code != http.StatusBadRequest {\n\t\tt.Fatalf(\"unexpected status: %d\", w.Code)\n\t} else if body := strings.TrimSpace(w.Body.String()); body != `{\"error\":\"error parsing query: found EOF, expected identifier, string, number, bool at line 1, char 8\"}` {\n\t\tt.Fatalf(\"unexpected body: %s\", body)\n\t}\n}\n\n// Ensure the handler returns an appropriate 401 or 403 status when authentication or authorization fails.\nfunc TestHandler_Query_ErrAuthorize(t *testing.T) {\n\th := NewHandler(true)\n\th.QueryAuthorizer.AuthorizeQueryFn = func(u meta.User, q *influxql.Query, db string) error {\n\t\treturn errors.New(\"marker\")\n\t}\n\th.MetaClient.AdminUserExistsFn = func() bool { return true }\n\th.MetaClient.AuthenticateFn = func(u, p string) (meta.User, error) {\n\n\t\tusers := []meta.UserInfo{\n\t\t\t{\n\t\t\t\tName:  \"admin\",\n\t\t\t\tHash:  \"admin\",\n\t\t\t\tAdmin: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"user1\",\n\t\t\t\tHash: \"abcd\",\n\t\t\t\tPrivileges: map[string]influxql.Privilege{\n\t\t\t\t\t\"db0\": influxql.ReadPrivilege,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tfor _, user := range users {\n\t\t\tif u == user.Name {\n\t\t\t\tif p == user.Hash {\n\t\t\t\t\treturn &user, nil\n\t\t\t\t}\n\t\t\t\treturn nil, meta.ErrAuthenticate\n\t\t\t}\n\t\t}\n\t\treturn nil, meta.ErrUserNotFound\n\t}\n\n\tfor i, tt := range []struct {\n\t\tuser     string\n\t\tpassword string\n\t\tquery    string\n\t\tcode     int\n\t}{\n\t\t{\n\t\t\tquery: \"/query?q=SHOW+DATABASES\",\n\t\t\tcode:  http.StatusUnauthorized,\n\t\t},\n\t\t{\n\t\t\tuser:     \"user1\",\n\t\t\tpassword: \"abcd\",\n\t\t\tquery:    \"/query?q=SHOW+DATABASES\",\n\t\t\tcode:     http.StatusForbidden,\n\t\t},\n\t\t{\n\t\t\tuser:     \"user2\",\n\t\t\tpassword: \"abcd\",\n\t\t\tquery:    \"/query?q=SHOW+DATABASES\",\n\t\t\tcode:     http.StatusUnauthorized,\n\t\t},\n\t} {\n\t\tw := httptest.NewRecorder()\n\t\tr := MustNewJSONRequest(\"GET\", tt.query, nil)\n\t\tparams := r.URL.Query()\n\t\tif tt.user != \"\" {\n\t\t\tparams.Set(\"u\", tt.user)\n\t\t}\n\t\tif tt.password != \"\" {\n\t\t\tparams.Set(\"p\", tt.password)\n\t\t}\n\t\tr.URL.RawQuery = params.Encode()\n\n\t\th.ServeHTTP(w, r)\n\t\tif w.Code != tt.code {\n\t\t\tt.Errorf(\"%d. unexpected status: got=%d exp=%d\\noutput: %s\", i, w.Code, tt.code, w.Body.String())\n\t\t}\n\t}\n}\n\n// Ensure the handler returns a status 200 if an error is returned in the result.\nfunc TestHandler_Query_ErrResult(t *testing.T) {\n\th := NewHandler(false)\n\th.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\treturn errors.New(\"measurement not found\")\n\t}\n\n\tw := httptest.NewRecorder()\n\th.ServeHTTP(w, MustNewJSONRequest(\"GET\", \"/query?db=foo&q=SHOW+SERIES+from+bin\", nil))\n\tif w.Code != http.StatusOK {\n\t\tt.Fatalf(\"unexpected status: %d\", w.Code)\n\t} else if body := strings.TrimSpace(w.Body.String()); body != `{\"results\":[{\"statement_id\":0,\"error\":\"measurement not found\"}]}` {\n\t\tt.Fatalf(\"unexpected body: %s\", body)\n\t}\n}\n\n// Ensure that closing the HTTP connection causes the query to be interrupted.\nfunc TestHandler_Query_CloseNotify(t *testing.T) {\n\t// Avoid leaking a goroutine when this fails.\n\tdone := make(chan struct{})\n\tdefer close(done)\n\n\tinterrupted := make(chan struct{})\n\th := NewHandler(false)\n\th.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\tselect {\n\t\tcase <-ctx.InterruptCh:\n\t\tcase <-done:\n\t\t}\n\t\tclose(interrupted)\n\t\treturn nil\n\t}\n\n\ts := httptest.NewServer(h)\n\tdefer s.Close()\n\n\t// Parse the URL and generate a query request.\n\tu, err := url.Parse(s.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tu.Path = \"/query\"\n\n\tvalues := url.Values{}\n\tvalues.Set(\"q\", \"SELECT * FROM cpu\")\n\tvalues.Set(\"db\", \"db0\")\n\tvalues.Set(\"rp\", \"rp0\")\n\tvalues.Set(\"chunked\", \"true\")\n\tu.RawQuery = values.Encode()\n\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Perform the request and retrieve the response.\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Validate that the interrupted channel has NOT been closed yet.\n\ttimer := time.NewTimer(100 * time.Millisecond)\n\tselect {\n\tcase <-interrupted:\n\t\ttimer.Stop()\n\t\tt.Fatal(\"query interrupted unexpectedly\")\n\tcase <-timer.C:\n\t}\n\n\t// Close the response body which should abort the query in the handler.\n\tresp.Body.Close()\n\n\t// The query should abort within 100 milliseconds.\n\ttimer.Reset(100 * time.Millisecond)\n\tselect {\n\tcase <-interrupted:\n\t\ttimer.Stop()\n\tcase <-timer.C:\n\t\tt.Fatal(\"timeout while waiting for query to abort\")\n\t}\n}\n\n// Ensure the handler handles ping requests correctly.\n// TODO: This should be expanded to verify the MetaClient check in servePing is working correctly\nfunc TestHandler_Ping(t *testing.T) {\n\th := NewHandler(false)\n\tw := httptest.NewRecorder()\n\th.ServeHTTP(w, MustNewRequest(\"GET\", \"/ping\", nil))\n\tif w.Code != http.StatusNoContent {\n\t\tt.Fatalf(\"unexpected status: %d\", w.Code)\n\t}\n\th.ServeHTTP(w, MustNewRequest(\"HEAD\", \"/ping\", nil))\n\tif w.Code != http.StatusNoContent {\n\t\tt.Fatalf(\"unexpected status: %d\", w.Code)\n\t}\n}\n\n// Ensure the handler returns the version correctly from the different endpoints.\nfunc TestHandler_Version(t *testing.T) {\n\th := NewHandler(false)\n\th.StatementExecutor.ExecuteStatementFn = func(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\t\treturn nil\n\t}\n\ttests := []struct {\n\t\tmethod   string\n\t\tendpoint string\n\t\tbody     io.Reader\n\t}{\n\t\t{\n\t\t\tmethod:   \"GET\",\n\t\t\tendpoint: \"/ping\",\n\t\t\tbody:     nil,\n\t\t},\n\t\t{\n\t\t\tmethod:   \"GET\",\n\t\t\tendpoint: \"/query?db=foo&q=SELECT+*+FROM+bar\",\n\t\t\tbody:     nil,\n\t\t},\n\t\t{\n\t\t\tmethod:   \"POST\",\n\t\t\tendpoint: \"/write\",\n\t\t\tbody:     bytes.NewReader(make([]byte, 10)),\n\t\t},\n\t\t{\n\t\t\tmethod:   \"GET\",\n\t\t\tendpoint: \"/notfound\",\n\t\t\tbody:     nil,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tw := httptest.NewRecorder()\n\t\th.ServeHTTP(w, MustNewRequest(test.method, test.endpoint, test.body))\n\t\tif v, ok := w.HeaderMap[\"X-Influxdb-Version\"]; ok {\n\t\t\tif v[0] != \"0.0.0\" {\n\t\t\t\tt.Fatalf(\"unexpected version: %s\", v)\n\t\t\t}\n\t\t} else {\n\t\t\tt.Fatalf(\"Header entry 'X-Influxdb-Version' not present\")\n\t\t}\n\t}\n}\n\n// Ensure the handler handles status requests correctly.\nfunc TestHandler_Status(t *testing.T) {\n\th := NewHandler(false)\n\tw := httptest.NewRecorder()\n\th.ServeHTTP(w, MustNewRequest(\"GET\", \"/status\", nil))\n\tif w.Code != http.StatusNoContent {\n\t\tt.Fatalf(\"unexpected status: %d\", w.Code)\n\t}\n\th.ServeHTTP(w, MustNewRequest(\"HEAD\", \"/status\", nil))\n\tif w.Code != http.StatusNoContent {\n\t\tt.Fatalf(\"unexpected status: %d\", w.Code)\n\t}\n}\n\n// Ensure write endpoint can handle bad requests\nfunc TestHandler_HandleBadRequestBody(t *testing.T) {\n\tb := bytes.NewReader(make([]byte, 10))\n\th := NewHandler(false)\n\tw := httptest.NewRecorder()\n\th.ServeHTTP(w, MustNewRequest(\"POST\", \"/write\", b))\n\tif w.Code != http.StatusBadRequest {\n\t\tt.Fatalf(\"unexpected status: %d\", w.Code)\n\t}\n}\n\n// Ensure X-Forwarded-For header writes the correct log message.\nfunc TestHandler_XForwardedFor(t *testing.T) {\n\tvar buf bytes.Buffer\n\th := NewHandler(false)\n\th.CLFLogger = log.New(&buf, \"\", 0)\n\n\treq := MustNewRequest(\"GET\", \"/query\", nil)\n\treq.Header.Set(\"X-Forwarded-For\", \"192.168.0.1\")\n\treq.RemoteAddr = \"127.0.0.1\"\n\th.ServeHTTP(httptest.NewRecorder(), req)\n\n\tparts := strings.Split(buf.String(), \" \")\n\tif parts[0] != \"192.168.0.1,127.0.0.1\" {\n\t\tt.Errorf(\"unexpected host ip address: %s\", parts[0])\n\t}\n}\n\n// NewHandler represents a test wrapper for httpd.Handler.\ntype Handler struct {\n\t*httpd.Handler\n\tMetaClient        *internal.MetaClientMock\n\tStatementExecutor HandlerStatementExecutor\n\tQueryAuthorizer   HandlerQueryAuthorizer\n}\n\n// NewHandler returns a new instance of Handler.\nfunc NewHandler(requireAuthentication bool) *Handler {\n\tconfig := httpd.NewConfig()\n\tconfig.AuthEnabled = requireAuthentication\n\tconfig.SharedSecret = \"super secret key\"\n\n\th := &Handler{\n\t\tHandler: httpd.NewHandler(config),\n\t}\n\n\th.MetaClient = &internal.MetaClientMock{}\n\n\th.Handler.MetaClient = h.MetaClient\n\th.Handler.QueryExecutor = influxql.NewQueryExecutor()\n\th.Handler.QueryExecutor.StatementExecutor = &h.StatementExecutor\n\th.Handler.QueryAuthorizer = &h.QueryAuthorizer\n\th.Handler.Version = \"0.0.0\"\n\treturn h\n}\n\n// HandlerStatementExecutor is a mock implementation of Handler.StatementExecutor.\ntype HandlerStatementExecutor struct {\n\tExecuteStatementFn func(stmt influxql.Statement, ctx influxql.ExecutionContext) error\n}\n\nfunc (e *HandlerStatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx influxql.ExecutionContext) error {\n\treturn e.ExecuteStatementFn(stmt, ctx)\n}\n\n// HandlerQueryAuthorizer is a mock implementation of Handler.QueryAuthorizer.\ntype HandlerQueryAuthorizer struct {\n\tAuthorizeQueryFn func(u meta.User, query *influxql.Query, database string) error\n}\n\nfunc (a *HandlerQueryAuthorizer) AuthorizeQuery(u meta.User, query *influxql.Query, database string) error {\n\treturn a.AuthorizeQueryFn(u, query, database)\n}\n\n// MustNewRequest returns a new HTTP request. Panic on error.\nfunc MustNewRequest(method, urlStr string, body io.Reader) *http.Request {\n\tr, err := http.NewRequest(method, urlStr, body)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn r\n}\n\n// MustNewRequest returns a new HTTP request with the content type set. Panic on error.\nfunc MustNewJSONRequest(method, urlStr string, body io.Reader) *http.Request {\n\tr := MustNewRequest(method, urlStr, body)\n\tr.Header.Set(\"Accept\", \"application/json\")\n\treturn r\n}\n\n// MustJWTToken returns a new JWT token and signed string or panics trying.\nfunc MustJWTToken(username, secret string, expired bool) (*jwt.Token, string) {\n\ttoken := jwt.New(jwt.GetSigningMethod(\"HS512\"))\n\ttoken.Claims.(jwt.MapClaims)[\"username\"] = username\n\tif expired {\n\t\ttoken.Claims.(jwt.MapClaims)[\"exp\"] = time.Now().Add(-time.Second).Unix()\n\t} else {\n\t\ttoken.Claims.(jwt.MapClaims)[\"exp\"] = time.Now().Add(time.Minute * 10).Unix()\n\t}\n\tsigned, err := token.SignedString([]byte(secret))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn token, signed\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/httpd/listen.go",
    "content": "package httpd\n\nimport (\n\t\"net\"\n\t\"sync\"\n)\n\n// LimitListener returns a Listener that accepts at most n simultaneous\n// connections from the provided Listener and will drop extra connections.\nfunc LimitListener(l net.Listener, n int) net.Listener {\n\treturn &limitListener{Listener: l, sem: make(chan struct{}, n)}\n}\n\n// limitListener is a listener that limits the number of active connections\n// at any given time.\ntype limitListener struct {\n\tnet.Listener\n\tsem chan struct{}\n}\n\nfunc (l *limitListener) release() {\n\t<-l.sem\n}\n\nfunc (l *limitListener) Accept() (net.Conn, error) {\n\tfor {\n\t\tc, err := l.Listener.Accept()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tselect {\n\t\tcase l.sem <- struct{}{}:\n\t\t\treturn &limitListenerConn{Conn: c, release: l.release}, nil\n\t\tdefault:\n\t\t\tc.Close()\n\t\t}\n\t}\n}\n\ntype limitListenerConn struct {\n\tnet.Conn\n\treleaseOnce sync.Once\n\trelease     func()\n}\n\nfunc (l *limitListenerConn) Close() error {\n\terr := l.Conn.Close()\n\tl.releaseOnce.Do(l.release)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/httpd/listen_test.go",
    "content": "package httpd_test\n\nimport (\n\t\"io\"\n\t\"net\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/services/httpd\"\n)\n\ntype fakeListener struct {\n\tAcceptFn func() (net.Conn, error)\n}\n\nfunc (l *fakeListener) Accept() (net.Conn, error) {\n\tif l.AcceptFn != nil {\n\t\treturn l.AcceptFn()\n\t}\n\treturn &fakeConn{}, nil\n}\n\nfunc (*fakeListener) Close() error   { return nil }\nfunc (*fakeListener) Addr() net.Addr { return nil }\n\ntype fakeConn struct {\n\tclosed bool\n}\n\nfunc (*fakeConn) Read([]byte) (int, error)    { return 0, io.EOF }\nfunc (*fakeConn) Write(b []byte) (int, error) { return len(b), nil }\nfunc (c *fakeConn) Close() error {\n\tc.closed = true\n\treturn nil\n}\nfunc (*fakeConn) LocalAddr() net.Addr              { return nil }\nfunc (*fakeConn) RemoteAddr() net.Addr             { return nil }\nfunc (*fakeConn) SetDeadline(time.Time) error      { return nil }\nfunc (*fakeConn) SetReadDeadline(time.Time) error  { return nil }\nfunc (*fakeConn) SetWriteDeadline(time.Time) error { return nil }\n\nfunc TestLimitListener(t *testing.T) {\n\tconns := make(chan net.Conn, 2)\n\tl := httpd.LimitListener(&fakeListener{\n\t\tAcceptFn: func() (net.Conn, error) {\n\t\t\tselect {\n\t\t\tcase c := <-conns:\n\t\t\t\tif c != nil {\n\t\t\t\t\treturn c, nil\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t}\n\t\t\treturn nil, io.EOF\n\t\t},\n\t}, 1)\n\tc1, c2 := &fakeConn{}, &fakeConn{}\n\tconns <- c1\n\tconns <- c2\n\n\tvar c net.Conn\n\tvar err error\n\tif c, err = l.Accept(); err != nil {\n\t\tt.Fatalf(\"expected accept to succeed: %s\", err)\n\t}\n\n\tif _, err = l.Accept(); err != io.EOF {\n\t\tt.Fatalf(\"expected eof, got %s\", err)\n\t} else if !c2.closed {\n\t\tt.Fatalf(\"expected connection to be automatically closed\")\n\t}\n\tc.Close()\n\n\tconns <- &fakeConn{}\n\tif _, err = l.Accept(); err != nil {\n\t\tt.Fatalf(\"expeced accept to succeed: %s\", err)\n\t}\n}\n\nfunc BenchmarkLimitListener(b *testing.B) {\n\tvar wg sync.WaitGroup\n\twg.Add(b.N)\n\n\tl := httpd.LimitListener(&fakeListener{}, b.N)\n\terrC := make(chan error)\n\tfor i := 0; i < b.N; i++ {\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tc, err := l.Accept()\n\t\t\tif err != nil {\n\t\t\t\terrC <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tc.Close()\n\t\t}()\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(errC)\n\t}()\n\n\tfor err := range errC {\n\t\tif err != nil {\n\t\t\tb.Error(err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/httpd/pprof.go",
    "content": "package httpd\n\nimport (\n\t\"archive/tar\"\n\t\"bytes\"\n\t\"compress/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\thttppprof \"net/http/pprof\"\n\t\"runtime/pprof\"\n\t\"sort\"\n\t\"strconv\"\n\t\"text/tabwriter\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n)\n\n// handleProfiles determines which profile to return to the requester.\nfunc (h *Handler) handleProfiles(w http.ResponseWriter, r *http.Request) {\n\tswitch r.URL.Path {\n\tcase \"/debug/pprof/cmdline\":\n\t\thttppprof.Cmdline(w, r)\n\tcase \"/debug/pprof/profile\":\n\t\thttppprof.Profile(w, r)\n\tcase \"/debug/pprof/symbol\":\n\t\thttppprof.Symbol(w, r)\n\tcase \"/debug/pprof/all\":\n\t\th.archiveProfilesAndQueries(w, r)\n\tdefault:\n\t\thttppprof.Index(w, r)\n\t}\n}\n\n// prof describes a profile name and a debug value, or in the case of a CPU\n// profile, the number of seconds to collect the profile for.\ntype prof struct {\n\tName  string\n\tDebug int64\n}\n\n// archiveProfilesAndQueries collects the following profiles:\n//\t- goroutine profile\n//\t- heap profile\n//\t- blocking profile\n//\t- (optionally) CPU profile\n//\n// It also collects the following query results:\n//\n//  - SHOW SHARDS\n//  - SHOW STATS\n//  - SHOW DIAGNOSTICS\n//\n// All information is added to a tar archive and then compressed, before being\n// returned to the requester as an archive file. Where profiles support debug\n// parameters, the profile is collected with debug=1. To optionally include a\n// CPU profile, the requester should provide a `cpu` query parameter, and can\n// also provide a `seconds` parameter to specify a non-default profile\n// collection time. The default CPU profile collection time is 30 seconds.\n//\n// Example request including CPU profile:\n//\n//\thttp://localhost:8086/debug/pprof/all?cpu=true&seconds=45\n//\n// The value after the `cpu` query parameter is not actually important, as long\n// as there is something there.\n//\nfunc (h *Handler) archiveProfilesAndQueries(w http.ResponseWriter, r *http.Request) {\n\tvar allProfs = []*prof{\n\t\t{Name: \"goroutine\", Debug: 1},\n\t\t{Name: \"block\", Debug: 1},\n\t\t{Name: \"heap\", Debug: 1},\n\t}\n\n\t// Capture a CPU profile?\n\tif r.FormValue(\"cpu\") != \"\" {\n\t\tprofile := &prof{Name: \"cpu\"}\n\n\t\t// For a CPU profile we'll use the Debug field to indicate the number of\n\t\t// seconds to capture the profile for.\n\t\tprofile.Debug, _ = strconv.ParseInt(r.FormValue(\"seconds\"), 10, 64)\n\t\tif profile.Debug <= 0 {\n\t\t\tprofile.Debug = 30\n\t\t}\n\t\tallProfs = append([]*prof{profile}, allProfs...) // CPU profile first.\n\t}\n\n\tvar (\n\t\tresp bytes.Buffer // Temporary buffer for entire archive.\n\t\tbuf  bytes.Buffer // Temporary buffer for each profile/query result.\n\t)\n\n\tgz := gzip.NewWriter(&resp)\n\ttw := tar.NewWriter(gz)\n\n\t// Collect and write out profiles.\n\tfor _, profile := range allProfs {\n\t\tif profile.Name == \"cpu\" {\n\t\t\tif err := pprof.StartCPUProfile(&buf); err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tsleep(w, time.Duration(profile.Debug)*time.Second)\n\t\t\tpprof.StopCPUProfile()\n\t\t} else {\n\t\t\tprof := pprof.Lookup(profile.Name)\n\t\t\tif prof == nil {\n\t\t\t\thttp.Error(w, \"unable to find profile \"+profile.Name, http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif err := prof.WriteTo(&buf, int(profile.Debug)); err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// Write the profile file's header.\n\t\terr := tw.WriteHeader(&tar.Header{\n\t\t\tName: profile.Name + \".txt\",\n\t\t\tMode: 0600,\n\t\t\tSize: int64(buf.Len()),\n\t\t})\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\n\t\t// Write the profile file's data.\n\t\tif _, err := tw.Write(buf.Bytes()); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\n\t\t// Reset the buffer for the next profile.\n\t\tbuf.Reset()\n\t}\n\n\t// Collect and write out the queries.\n\tvar allQueries = []struct {\n\t\tname string\n\t\tfn   func() ([]*models.Row, error)\n\t}{\n\t\t{\"shards\", h.showShards},\n\t\t{\"stats\", h.showStats},\n\t\t{\"diagnostics\", h.showDiagnostics},\n\t}\n\n\ttabW := tabwriter.NewWriter(&buf, 8, 8, 1, '\\t', 0)\n\tfor _, query := range allQueries {\n\t\trows, err := query.fn()\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\n\t\tfor i, row := range rows {\n\t\t\tvar out []byte\n\t\t\t// Write the columns\n\t\t\tfor _, col := range row.Columns {\n\t\t\t\tout = append(out, []byte(col+\"\\t\")...)\n\t\t\t}\n\t\t\tout = append(out, '\\n')\n\t\t\tif _, err := tabW.Write(out); err != nil {\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t}\n\n\t\t\t// Write all the values\n\t\t\tfor _, val := range row.Values {\n\t\t\t\tout = out[:0]\n\t\t\t\tfor _, v := range val {\n\t\t\t\t\tout = append(out, []byte(fmt.Sprintf(\"%v\\t\", v))...)\n\t\t\t\t}\n\t\t\t\tout = append(out, '\\n')\n\t\t\t\tif _, err := tabW.Write(out); err != nil {\n\t\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Write a final newline\n\t\t\tif i < len(rows)-1 {\n\t\t\t\tif _, err := tabW.Write([]byte(\"\\n\")); err != nil {\n\t\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif err := tabW.Flush(); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\n\t\terr = tw.WriteHeader(&tar.Header{\n\t\t\tName: query.name + \".txt\",\n\t\t\tMode: 0600,\n\t\t\tSize: int64(buf.Len()),\n\t\t})\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\n\t\t// Write the query file's data.\n\t\tif _, err := tw.Write(buf.Bytes()); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\n\t\t// Reset the buffer for the next query.\n\t\tbuf.Reset()\n\t}\n\n\t// Close the tar writer.\n\tif err := tw.Close(); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\n\t// Close the gzip writer.\n\tif err := gz.Close(); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\n\t// Return the gzipped archive.\n\tw.Header().Set(\"Content-Disposition\", \"attachment; filename=profiles.tar.gz\")\n\tw.Header().Set(\"Content-Type\", \"application/gzip\")\n\tio.Copy(w, &resp) // Nothing we can really do about an error at this point.\n}\n\n// showShards generates the same values that a StatementExecutor would if a\n// SHOW SHARDS query was executed.\nfunc (h *Handler) showShards() ([]*models.Row, error) {\n\tdis := h.MetaClient.Databases()\n\n\trows := []*models.Row{}\n\tfor _, di := range dis {\n\t\trow := &models.Row{Columns: []string{\"id\", \"database\", \"retention_policy\", \"shard_group\", \"start_time\", \"end_time\", \"expiry_time\", \"owners\"}, Name: di.Name}\n\t\tfor _, rpi := range di.RetentionPolicies {\n\t\t\tfor _, sgi := range rpi.ShardGroups {\n\t\t\t\t// Shards associated with deleted shard groups are effectively deleted.\n\t\t\t\t// Don't list them.\n\t\t\t\tif sgi.Deleted() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfor _, si := range sgi.Shards {\n\t\t\t\t\townerIDs := make([]uint64, len(si.Owners))\n\t\t\t\t\tfor i, owner := range si.Owners {\n\t\t\t\t\t\townerIDs[i] = owner.NodeID\n\t\t\t\t\t}\n\n\t\t\t\t\trow.Values = append(row.Values, []interface{}{\n\t\t\t\t\t\tsi.ID,\n\t\t\t\t\t\tdi.Name,\n\t\t\t\t\t\trpi.Name,\n\t\t\t\t\t\tsgi.ID,\n\t\t\t\t\t\tsgi.StartTime.UTC().Format(time.RFC3339),\n\t\t\t\t\t\tsgi.EndTime.UTC().Format(time.RFC3339),\n\t\t\t\t\t\tsgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339),\n\t\t\t\t\t\tjoinUint64(ownerIDs),\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\trows = append(rows, row)\n\t}\n\treturn rows, nil\n}\n\n// showDiagnostics generates the same values that a StatementExecutor would if a\n// SHOW DIAGNOSTICS query was executed.\nfunc (h *Handler) showDiagnostics() ([]*models.Row, error) {\n\tdiags, err := h.Monitor.Diagnostics()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get a sorted list of diagnostics keys.\n\tsortedKeys := make([]string, 0, len(diags))\n\tfor k := range diags {\n\t\tsortedKeys = append(sortedKeys, k)\n\t}\n\tsort.Strings(sortedKeys)\n\n\trows := make([]*models.Row, 0, len(diags))\n\tfor _, k := range sortedKeys {\n\t\trow := &models.Row{Name: k}\n\n\t\trow.Columns = diags[k].Columns\n\t\trow.Values = diags[k].Rows\n\t\trows = append(rows, row)\n\t}\n\treturn rows, nil\n}\n\n// showStats generates the same values that a StatementExecutor would if a\n// SHOW STATS query was executed.\nfunc (h *Handler) showStats() ([]*models.Row, error) {\n\tstats, err := h.Monitor.Statistics(nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar rows []*models.Row\n\tfor _, stat := range stats {\n\t\trow := &models.Row{Name: stat.Name, Tags: stat.Tags}\n\n\t\tvalues := make([]interface{}, 0, len(stat.Values))\n\t\tfor _, k := range stat.ValueNames() {\n\t\t\trow.Columns = append(row.Columns, k)\n\t\t\tvalues = append(values, stat.Values[k])\n\t\t}\n\t\trow.Values = [][]interface{}{values}\n\t\trows = append(rows, row)\n\t}\n\treturn rows, nil\n}\n\n// joinUint64 returns a comma-delimited string of uint64 numbers.\nfunc joinUint64(a []uint64) string {\n\tvar buf []byte // Could take a guess at initial size here.\n\tfor i, x := range a {\n\t\tif i != 0 {\n\t\t\tbuf = append(buf, ',')\n\t\t}\n\t\tbuf = strconv.AppendUint(buf, x, 10)\n\t}\n\treturn string(buf)\n}\n\n// Taken from net/http/pprof/pprof.go\nfunc sleep(w http.ResponseWriter, d time.Duration) {\n\tvar clientGone <-chan bool\n\tif cn, ok := w.(http.CloseNotifier); ok {\n\t\tclientGone = cn.CloseNotify()\n\t}\n\tselect {\n\tcase <-time.After(d):\n\tcase <-clientGone:\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/httpd/requests.go",
    "content": "package httpd\n\nimport (\n\t\"container/list\"\n\t\"fmt\"\n\t\"net\"\n\t\"net/http\"\n\t\"sync\"\n\t\"sync/atomic\"\n\n\t\"github.com/influxdata/influxdb/services/meta\"\n)\n\ntype RequestInfo struct {\n\tIPAddr   string\n\tUsername string\n}\n\ntype RequestStats struct {\n\tWrites  int64 `json:\"writes\"`\n\tQueries int64 `json:\"queries\"`\n}\n\nfunc (r *RequestInfo) String() string {\n\tif r.Username != \"\" {\n\t\treturn fmt.Sprintf(\"%s:%s\", r.Username, r.IPAddr)\n\t}\n\treturn r.IPAddr\n}\n\ntype RequestProfile struct {\n\ttracker *RequestTracker\n\telem    *list.Element\n\n\tmu       sync.RWMutex\n\tRequests map[RequestInfo]*RequestStats\n}\n\nfunc (p *RequestProfile) AddWrite(info RequestInfo) {\n\tp.add(info, p.addWrite)\n}\n\nfunc (p *RequestProfile) AddQuery(info RequestInfo) {\n\tp.add(info, p.addQuery)\n}\n\nfunc (p *RequestProfile) add(info RequestInfo, fn func(*RequestStats)) {\n\t// Look for a request entry for this request.\n\tp.mu.RLock()\n\tst, ok := p.Requests[info]\n\tp.mu.RUnlock()\n\tif ok {\n\t\tfn(st)\n\t\treturn\n\t}\n\n\t// There is no entry in the request tracker. Create one.\n\tp.mu.Lock()\n\tif st, ok := p.Requests[info]; ok {\n\t\t// Something else created this entry while we were waiting for the lock.\n\t\tp.mu.Unlock()\n\t\tfn(st)\n\t\treturn\n\t}\n\n\tst = &RequestStats{}\n\tp.Requests[info] = st\n\tp.mu.Unlock()\n\tfn(st)\n}\n\nfunc (p *RequestProfile) addWrite(st *RequestStats) {\n\tatomic.AddInt64(&st.Writes, 1)\n}\n\nfunc (p *RequestProfile) addQuery(st *RequestStats) {\n\tatomic.AddInt64(&st.Queries, 1)\n}\n\n// Stop informs the RequestTracker to stop collecting statistics for this\n// profile.\nfunc (p *RequestProfile) Stop() {\n\tp.tracker.mu.Lock()\n\tp.tracker.profiles.Remove(p.elem)\n\tp.tracker.mu.Unlock()\n}\n\ntype RequestTracker struct {\n\tmu       sync.RWMutex\n\tprofiles *list.List\n}\n\nfunc NewRequestTracker() *RequestTracker {\n\treturn &RequestTracker{\n\t\tprofiles: list.New(),\n\t}\n}\n\nfunc (rt *RequestTracker) TrackRequests() *RequestProfile {\n\t// Perform the memory allocation outside of the lock.\n\tprofile := &RequestProfile{\n\t\tRequests: make(map[RequestInfo]*RequestStats),\n\t\ttracker:  rt,\n\t}\n\n\trt.mu.Lock()\n\tprofile.elem = rt.profiles.PushBack(profile)\n\trt.mu.Unlock()\n\treturn profile\n}\n\nfunc (rt *RequestTracker) Add(req *http.Request, user meta.User) {\n\trt.mu.RLock()\n\tif rt.profiles.Len() == 0 {\n\t\trt.mu.RUnlock()\n\t\treturn\n\t}\n\tdefer rt.mu.RUnlock()\n\n\tvar info RequestInfo\n\thost, _, err := net.SplitHostPort(req.RemoteAddr)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tinfo.IPAddr = host\n\tif user != nil {\n\t\tinfo.Username = user.ID()\n\t}\n\n\t// Add the request info to the profiles.\n\tfor p := rt.profiles.Front(); p != nil; p = p.Next() {\n\t\tprofile := p.Value.(*RequestProfile)\n\t\tif req.URL.Path == \"/query\" {\n\t\t\tprofile.AddQuery(info)\n\t\t} else if req.URL.Path == \"/write\" {\n\t\t\tprofile.AddWrite(info)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/httpd/response_logger.go",
    "content": "package httpd\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"net/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n)\n\n// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP status\n// code and body size\ntype responseLogger struct {\n\tw      http.ResponseWriter\n\tstatus int\n\tsize   int\n}\n\nfunc (l *responseLogger) CloseNotify() <-chan bool {\n\tif notifier, ok := l.w.(http.CloseNotifier); ok {\n\t\treturn notifier.CloseNotify()\n\t}\n\t// needed for response recorder for testing\n\treturn make(<-chan bool)\n}\n\nfunc (l *responseLogger) Header() http.Header {\n\treturn l.w.Header()\n}\n\nfunc (l *responseLogger) Flush() {\n\tl.w.(http.Flusher).Flush()\n}\n\nfunc (l *responseLogger) Write(b []byte) (int, error) {\n\tif l.status == 0 {\n\t\t// Set status if WriteHeader has not been called\n\t\tl.status = http.StatusOK\n\t}\n\tsize, err := l.w.Write(b)\n\tl.size += size\n\treturn size, err\n}\n\nfunc (l *responseLogger) WriteHeader(s int) {\n\tl.w.WriteHeader(s)\n\tl.status = s\n}\n\nfunc (l *responseLogger) Status() int {\n\tif l.status == 0 {\n\t\t// This can happen if we never actually write data, but only set response headers.\n\t\tl.status = http.StatusOK\n\t}\n\treturn l.status\n}\n\nfunc (l *responseLogger) Size() int {\n\treturn l.size\n}\n\n// redact any occurrence of a password parameter, 'p'\nfunc redactPassword(r *http.Request) {\n\tq := r.URL.Query()\n\tif p := q.Get(\"p\"); p != \"\" {\n\t\tq.Set(\"p\", \"[REDACTED]\")\n\t\tr.URL.RawQuery = q.Encode()\n\t}\n}\n\n// Common Log Format: http://en.wikipedia.org/wiki/Common_Log_Format\n\n// buildLogLine creates a common log format\n// in addition to the common fields, we also append referrer, user agent,\n// request ID and response time (microseconds)\n// ie, in apache mod_log_config terms:\n//     %h %l %u %t \\\"%r\\\" %>s %b \\\"%{Referer}i\\\" \\\"%{User-agent}i\\\"\" %L %D\nfunc buildLogLine(l *responseLogger, r *http.Request, start time.Time) string {\n\n\tredactPassword(r)\n\n\tusername := parseUsername(r)\n\n\thost, _, err := net.SplitHostPort(r.RemoteAddr)\n\tif err != nil {\n\t\thost = r.RemoteAddr\n\t}\n\n\tif xff := r.Header[\"X-Forwarded-For\"]; xff != nil {\n\t\taddrs := append(xff, host)\n\t\thost = strings.Join(addrs, \",\")\n\t}\n\n\turi := r.URL.RequestURI()\n\n\treferer := r.Referer()\n\n\tuserAgent := r.UserAgent()\n\n\treturn fmt.Sprintf(`%s - %s [%s] \"%s %s %s\" %s %s \"%s\" \"%s\" %s %d`,\n\t\thost,\n\t\tdetect(username, \"-\"),\n\t\tstart.Format(\"02/Jan/2006:15:04:05 -0700\"),\n\t\tr.Method,\n\t\turi,\n\t\tr.Proto,\n\t\tdetect(strconv.Itoa(l.Status()), \"-\"),\n\t\tstrconv.Itoa(l.Size()),\n\t\tdetect(referer, \"-\"),\n\t\tdetect(userAgent, \"-\"),\n\t\tr.Header.Get(\"Request-Id\"),\n\t\t// response time, report in microseconds because this is consistent\n\t\t// with apache's %D parameter in mod_log_config\n\t\tint64(time.Since(start)/time.Microsecond))\n}\n\n// detect detects the first presence of a non blank string and returns it\nfunc detect(values ...string) string {\n\tfor _, v := range values {\n\t\tif v != \"\" {\n\t\t\treturn v\n\t\t}\n\t}\n\treturn \"\"\n}\n\n// parses the username either from the url or auth header\nfunc parseUsername(r *http.Request) string {\n\tvar (\n\t\tusername = \"\"\n\t\turl      = r.URL\n\t)\n\n\t// get username from the url if passed there\n\tif url.User != nil {\n\t\tif name := url.User.Username(); name != \"\" {\n\t\t\tusername = name\n\t\t}\n\t}\n\n\t// Try to get the username from the query param 'u'\n\tq := url.Query()\n\tif u := q.Get(\"u\"); u != \"\" {\n\t\tusername = u\n\t}\n\n\t// Try to get it from the authorization header if set there\n\tif username == \"\" {\n\t\tif u, _, ok := r.BasicAuth(); ok {\n\t\t\tusername = u\n\t\t}\n\t}\n\treturn username\n}\n\n// sanitize redacts passwords from query string for logging.\nfunc sanitize(r *http.Request) {\n\tvalues := r.URL.Query()\n\tfor i, q := range values[\"q\"] {\n\t\tvalues[\"q\"][i] = influxql.Sanitize(q)\n\t}\n\tr.URL.RawQuery = values.Encode()\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/httpd/response_writer.go",
    "content": "package httpd\n\nimport (\n\t\"encoding/csv\"\n\t\"encoding/json\"\n\t\"io\"\n\t\"net/http\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n)\n\n// ResponseWriter is an interface for writing a response.\ntype ResponseWriter interface {\n\t// WriteResponse writes a response.\n\tWriteResponse(resp Response) (int, error)\n\n\thttp.ResponseWriter\n}\n\n// NewResponseWriter creates a new ResponseWriter based on the Accept header\n// in the request that wraps the ResponseWriter.\nfunc NewResponseWriter(w http.ResponseWriter, r *http.Request) ResponseWriter {\n\tpretty := r.URL.Query().Get(\"pretty\") == \"true\"\n\trw := &responseWriter{ResponseWriter: w}\n\tswitch r.Header.Get(\"Accept\") {\n\tcase \"application/csv\", \"text/csv\":\n\t\tw.Header().Add(\"Content-Type\", \"text/csv\")\n\t\trw.formatter = &csvFormatter{statementID: -1, Writer: w}\n\tcase \"application/json\":\n\t\tfallthrough\n\tdefault:\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\trw.formatter = &jsonFormatter{Pretty: pretty, Writer: w}\n\t}\n\treturn rw\n}\n\n// WriteError is a convenience function for writing an error response to the ResponseWriter.\nfunc WriteError(w ResponseWriter, err error) (int, error) {\n\treturn w.WriteResponse(Response{Err: err})\n}\n\n// responseWriter is an implementation of ResponseWriter.\ntype responseWriter struct {\n\tformatter interface {\n\t\tWriteResponse(resp Response) (int, error)\n\t}\n\thttp.ResponseWriter\n}\n\n// WriteResponse writes the response using the formatter.\nfunc (w *responseWriter) WriteResponse(resp Response) (int, error) {\n\treturn w.formatter.WriteResponse(resp)\n}\n\n// Flush flushes the ResponseWriter if it has a Flush() method.\nfunc (w *responseWriter) Flush() {\n\tif w, ok := w.ResponseWriter.(http.Flusher); ok {\n\t\tw.Flush()\n\t}\n}\n\n// CloseNotify calls CloseNotify on the underlying http.ResponseWriter if it\n// exists. Otherwise, it returns a nil channel that will never notify.\nfunc (w *responseWriter) CloseNotify() <-chan bool {\n\tif notifier, ok := w.ResponseWriter.(http.CloseNotifier); ok {\n\t\treturn notifier.CloseNotify()\n\t}\n\treturn nil\n}\n\ntype jsonFormatter struct {\n\tio.Writer\n\tPretty bool\n}\n\nfunc (w *jsonFormatter) WriteResponse(resp Response) (n int, err error) {\n\tvar b []byte\n\tif w.Pretty {\n\t\tb, err = json.MarshalIndent(resp, \"\", \"    \")\n\t} else {\n\t\tb, err = json.Marshal(resp)\n\t}\n\n\tif err != nil {\n\t\tn, err = io.WriteString(w, err.Error())\n\t} else {\n\t\tn, err = w.Write(b)\n\t}\n\n\tw.Write([]byte(\"\\n\"))\n\tn++\n\treturn n, err\n}\n\ntype csvFormatter struct {\n\tio.Writer\n\tstatementID int\n\tcolumns     []string\n}\n\nfunc (w *csvFormatter) WriteResponse(resp Response) (n int, err error) {\n\tcsv := csv.NewWriter(w)\n\tfor _, result := range resp.Results {\n\t\tif result.StatementID != w.statementID {\n\t\t\t// If there are no series in the result, skip past this result.\n\t\t\tif len(result.Series) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Set the statement id and print out a newline if this is not the first statement.\n\t\t\tif w.statementID >= 0 {\n\t\t\t\t// Flush the csv writer and write a newline.\n\t\t\t\tcsv.Flush()\n\t\t\t\tif err := csv.Error(); err != nil {\n\t\t\t\t\treturn n, err\n\t\t\t\t}\n\n\t\t\t\tout, err := io.WriteString(w, \"\\n\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn n, err\n\t\t\t\t}\n\t\t\t\tn += out\n\t\t\t}\n\t\t\tw.statementID = result.StatementID\n\n\t\t\t// Print out the column headers from the first series.\n\t\t\tw.columns = make([]string, 2+len(result.Series[0].Columns))\n\t\t\tw.columns[0] = \"name\"\n\t\t\tw.columns[1] = \"tags\"\n\t\t\tcopy(w.columns[2:], result.Series[0].Columns)\n\t\t\tif err := csv.Write(w.columns); err != nil {\n\t\t\t\treturn n, err\n\t\t\t}\n\t\t}\n\n\t\tfor _, row := range result.Series {\n\t\t\tw.columns[0] = row.Name\n\t\t\tif len(row.Tags) > 0 {\n\t\t\t\tw.columns[1] = string(models.NewTags(row.Tags).HashKey()[1:])\n\t\t\t} else {\n\t\t\t\tw.columns[1] = \"\"\n\t\t\t}\n\t\t\tfor _, values := range row.Values {\n\t\t\t\tfor i, value := range values {\n\t\t\t\t\tif value == nil {\n\t\t\t\t\t\tw.columns[i+2] = \"\"\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tswitch v := value.(type) {\n\t\t\t\t\tcase float64:\n\t\t\t\t\t\tw.columns[i+2] = strconv.FormatFloat(v, 'f', -1, 64)\n\t\t\t\t\tcase int64:\n\t\t\t\t\t\tw.columns[i+2] = strconv.FormatInt(v, 10)\n\t\t\t\t\tcase string:\n\t\t\t\t\t\tw.columns[i+2] = v\n\t\t\t\t\tcase bool:\n\t\t\t\t\t\tif v {\n\t\t\t\t\t\t\tw.columns[i+2] = \"true\"\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tw.columns[i+2] = \"false\"\n\t\t\t\t\t\t}\n\t\t\t\t\tcase time.Time:\n\t\t\t\t\t\tw.columns[i+2] = strconv.FormatInt(v.UnixNano(), 10)\n\t\t\t\t\tcase *float64, *int64, *string, *bool:\n\t\t\t\t\t\tw.columns[i+2] = \"\"\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcsv.Write(w.columns)\n\t\t\t}\n\t\t}\n\t}\n\tcsv.Flush()\n\tif err := csv.Error(); err != nil {\n\t\treturn n, err\n\t}\n\treturn n, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/httpd/response_writer_test.go",
    "content": "package httpd_test\n\nimport (\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/services/httpd\"\n)\n\nfunc TestResponseWriter_CSV(t *testing.T) {\n\theader := make(http.Header)\n\theader.Set(\"Accept\", \"text/csv\")\n\tr := &http.Request{\n\t\tHeader: header,\n\t\tURL:    &url.URL{},\n\t}\n\tw := httptest.NewRecorder()\n\n\twriter := httpd.NewResponseWriter(w, r)\n\twriter.WriteResponse(httpd.Response{\n\t\tResults: []*influxql.Result{\n\t\t\t{\n\t\t\t\tStatementID: 0,\n\t\t\t\tSeries: []*models.Row{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"cpu\",\n\t\t\t\t\t\tTags: map[string]string{\n\t\t\t\t\t\t\t\"host\":   \"server01\",\n\t\t\t\t\t\t\t\"region\": \"uswest\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tColumns: []string{\"time\", \"value\"},\n\t\t\t\t\t\tValues: [][]interface{}{\n\t\t\t\t\t\t\t{time.Unix(0, 10), float64(2.5)},\n\t\t\t\t\t\t\t{time.Unix(0, 20), int64(5)},\n\t\t\t\t\t\t\t{time.Unix(0, 30), nil},\n\t\t\t\t\t\t\t{time.Unix(0, 40), \"foobar\"},\n\t\t\t\t\t\t\t{time.Unix(0, 50), true},\n\t\t\t\t\t\t\t{time.Unix(0, 60), false},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n\n\tif got, want := w.Body.String(), `name,tags,time,value\ncpu,\"host=server01,region=uswest\",10,2.5\ncpu,\"host=server01,region=uswest\",20,5\ncpu,\"host=server01,region=uswest\",30,\ncpu,\"host=server01,region=uswest\",40,foobar\ncpu,\"host=server01,region=uswest\",50,true\ncpu,\"host=server01,region=uswest\",60,false\n`; got != want {\n\t\tt.Errorf(\"unexpected output:\\n\\ngot=%v\\nwant=%s\", got, want)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/httpd/service.go",
    "content": "// Package httpd implements the HTTP service and REST API for InfluxDB.\npackage httpd // import \"github.com/influxdata/influxdb/services/httpd\"\n\nimport (\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"path\"\n\t\"runtime\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/uber-go/zap\"\n)\n\n// statistics gathered by the httpd package.\nconst (\n\tstatRequest                      = \"req\"                  // Number of HTTP requests served\n\tstatQueryRequest                 = \"queryReq\"             // Number of query requests served\n\tstatWriteRequest                 = \"writeReq\"             // Number of write requests serverd\n\tstatPingRequest                  = \"pingReq\"              // Number of ping requests served\n\tstatStatusRequest                = \"statusReq\"            // Number of status requests served\n\tstatWriteRequestBytesReceived    = \"writeReqBytes\"        // Sum of all bytes in write requests\n\tstatQueryRequestBytesTransmitted = \"queryRespBytes\"       // Sum of all bytes returned in query reponses\n\tstatPointsWrittenOK              = \"pointsWrittenOK\"      // Number of points written OK\n\tstatPointsWrittenDropped         = \"pointsWrittenDropped\" // Number of points dropped by the storage engine\n\tstatPointsWrittenFail            = \"pointsWrittenFail\"    // Number of points that failed to be written\n\tstatAuthFail                     = \"authFail\"             // Number of authentication failures\n\tstatRequestDuration              = \"reqDurationNs\"        // Number of (wall-time) nanoseconds spent inside requests\n\tstatQueryRequestDuration         = \"queryReqDurationNs\"   // Number of (wall-time) nanoseconds spent inside query requests\n\tstatWriteRequestDuration         = \"writeReqDurationNs\"   // Number of (wall-time) nanoseconds spent inside write requests\n\tstatRequestsActive               = \"reqActive\"            // Number of currently active requests\n\tstatWriteRequestsActive          = \"writeReqActive\"       // Number of currently active write requests\n\tstatClientError                  = \"clientError\"          // Number of HTTP responses due to client error\n\tstatServerError                  = \"serverError\"          // Number of HTTP responses due to server error\n)\n\n// Service manages the listener and handler for an HTTP endpoint.\ntype Service struct {\n\tln    net.Listener\n\taddr  string\n\thttps bool\n\tcert  string\n\tkey   string\n\tlimit int\n\terr   chan error\n\n\tunixSocket         bool\n\tbindSocket         string\n\tunixSocketListener net.Listener\n\n\tHandler *Handler\n\n\tLogger zap.Logger\n}\n\n// NewService returns a new instance of Service.\nfunc NewService(c Config) *Service {\n\ts := &Service{\n\t\taddr:       c.BindAddress,\n\t\thttps:      c.HTTPSEnabled,\n\t\tcert:       c.HTTPSCertificate,\n\t\tkey:        c.HTTPSPrivateKey,\n\t\tlimit:      c.MaxConnectionLimit,\n\t\terr:        make(chan error),\n\t\tunixSocket: c.UnixSocketEnabled,\n\t\tbindSocket: c.BindSocket,\n\t\tHandler:    NewHandler(c),\n\t\tLogger:     zap.New(zap.NullEncoder()),\n\t}\n\tif s.key == \"\" {\n\t\ts.key = s.cert\n\t}\n\ts.Handler.Logger = s.Logger\n\treturn s\n}\n\n// Open starts the service.\nfunc (s *Service) Open() error {\n\ts.Logger.Info(\"Starting HTTP service\")\n\ts.Logger.Info(fmt.Sprint(\"Authentication enabled:\", s.Handler.Config.AuthEnabled))\n\n\t// Open listener.\n\tif s.https {\n\t\tcert, err := tls.LoadX509KeyPair(s.cert, s.key)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlistener, err := tls.Listen(\"tcp\", s.addr, &tls.Config{\n\t\t\tCertificates: []tls.Certificate{cert},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ts.Logger.Info(fmt.Sprint(\"Listening on HTTPS:\", listener.Addr().String()))\n\t\ts.ln = listener\n\t} else {\n\t\tlistener, err := net.Listen(\"tcp\", s.addr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ts.Logger.Info(fmt.Sprint(\"Listening on HTTP:\", listener.Addr().String()))\n\t\ts.ln = listener\n\t}\n\n\t// Open unix socket listener.\n\tif s.unixSocket {\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\treturn fmt.Errorf(\"unable to use unix socket on windows\")\n\t\t}\n\t\tif err := os.MkdirAll(path.Dir(s.bindSocket), 0777); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := syscall.Unlink(s.bindSocket); err != nil && !os.IsNotExist(err) {\n\t\t\treturn err\n\t\t}\n\n\t\tlistener, err := net.Listen(\"unix\", s.bindSocket)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ts.Logger.Info(fmt.Sprint(\"Listening on unix socket:\", listener.Addr().String()))\n\t\ts.unixSocketListener = listener\n\n\t\tgo s.serveUnixSocket()\n\t}\n\n\t// Enforce a connection limit if one has been given.\n\tif s.limit > 0 {\n\t\ts.ln = LimitListener(s.ln, s.limit)\n\t}\n\n\t// wait for the listeners to start\n\ttimeout := time.Now().Add(time.Second)\n\tfor {\n\t\tif s.ln.Addr() != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif time.Now().After(timeout) {\n\t\t\treturn fmt.Errorf(\"unable to open without http listener running\")\n\t\t}\n\t\ttime.Sleep(10 * time.Millisecond)\n\t}\n\n\t// Begin listening for requests in a separate goroutine.\n\tgo s.serveTCP()\n\treturn nil\n}\n\n// Close closes the underlying listener.\nfunc (s *Service) Close() error {\n\tif s.ln != nil {\n\t\tif err := s.ln.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif s.unixSocketListener != nil {\n\t\tif err := s.unixSocketListener.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// WithLogger sets the logger for the service.\nfunc (s *Service) WithLogger(log zap.Logger) {\n\ts.Logger = log.With(zap.String(\"service\", \"httpd\"))\n\ts.Handler.Logger = s.Logger\n}\n\n// Err returns a channel for fatal errors that occur on the listener.\nfunc (s *Service) Err() <-chan error { return s.err }\n\n// Addr returns the listener's address. Returns nil if listener is closed.\nfunc (s *Service) Addr() net.Addr {\n\tif s.ln != nil {\n\t\treturn s.ln.Addr()\n\t}\n\treturn nil\n}\n\n// Statistics returns statistics for periodic monitoring.\nfunc (s *Service) Statistics(tags map[string]string) []models.Statistic {\n\treturn s.Handler.Statistics(models.NewTags(map[string]string{\"bind\": s.addr}).Merge(tags).Map())\n}\n\n// serveTCP serves the handler from the TCP listener.\nfunc (s *Service) serveTCP() {\n\ts.serve(s.ln)\n}\n\n// serveUnixSocket serves the handler from the unix socket listener.\nfunc (s *Service) serveUnixSocket() {\n\ts.serve(s.unixSocketListener)\n}\n\n// serve serves the handler from the listener.\nfunc (s *Service) serve(listener net.Listener) {\n\t// The listener was closed so exit\n\t// See https://github.com/golang/go/issues/4373\n\terr := http.Serve(listener, s.Handler)\n\tif err != nil && !strings.Contains(err.Error(), \"closed\") {\n\t\ts.err <- fmt.Errorf(\"listener failed: addr=%s, err=%s\", s.Addr(), err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/meta/client.go",
    "content": "// Package meta provides control over meta data for InfluxDB,\n// such as controlling databases, retention policies, users, etc.\npackage meta\n\nimport (\n\t\"bytes\"\n\tcrand \"crypto/rand\"\n\t\"crypto/sha256\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"math/rand\"\n\t\"net/http\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/uber-go/zap\"\n\n\t\"golang.org/x/crypto/bcrypt\"\n)\n\nconst (\n\t// SaltBytes is the number of bytes used for salts.\n\tSaltBytes = 32\n\n\tmetaFile = \"meta.db\"\n\n\t// ShardGroupDeletedExpiration is the amount of time before a shard group info will be removed from cached\n\t// data after it has been marked deleted (2 weeks).\n\tShardGroupDeletedExpiration = -2 * 7 * 24 * time.Hour\n)\n\nvar (\n\t// ErrServiceUnavailable is returned when the meta service is unavailable.\n\tErrServiceUnavailable = errors.New(\"meta service unavailable\")\n\n\t// ErrService is returned when the meta service returns an error.\n\tErrService = errors.New(\"meta service error\")\n)\n\n// Client is used to execute commands on and read data from\n// a meta service cluster.\ntype Client struct {\n\tlogger zap.Logger\n\n\tmu        sync.RWMutex\n\tclosing   chan struct{}\n\tchanged   chan struct{}\n\tcacheData *Data\n\n\t// Authentication cache.\n\tauthCache map[string]authUser\n\n\tpath string\n\n\tretentionAutoCreate bool\n}\n\ntype authUser struct {\n\tbhash string\n\tsalt  []byte\n\thash  []byte\n}\n\n// NewClient returns a new *Client.\nfunc NewClient(config *Config) *Client {\n\treturn &Client{\n\t\tcacheData: &Data{\n\t\t\tClusterID: uint64(rand.Int63()),\n\t\t\tIndex:     1,\n\t\t},\n\t\tclosing:             make(chan struct{}),\n\t\tchanged:             make(chan struct{}),\n\t\tlogger:              zap.New(zap.NullEncoder()),\n\t\tauthCache:           make(map[string]authUser, 0),\n\t\tpath:                config.Dir,\n\t\tretentionAutoCreate: config.RetentionAutoCreate,\n\t}\n}\n\n// Open a connection to a meta service cluster.\nfunc (c *Client) Open() error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\t// Try to load from disk\n\tif err := c.Load(); err != nil {\n\t\treturn err\n\t}\n\n\t// If this is a brand new instance, persist to disk immediatly.\n\tif c.cacheData.Index == 1 {\n\t\tif err := snapshot(c.path, c.cacheData); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// Close the meta service cluster connection.\nfunc (c *Client) Close() error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif t, ok := http.DefaultTransport.(*http.Transport); ok {\n\t\tt.CloseIdleConnections()\n\t}\n\n\tselect {\n\tcase <-c.closing:\n\t\treturn nil\n\tdefault:\n\t\tclose(c.closing)\n\t}\n\n\treturn nil\n}\n\n// AcquireLease attempts to acquire the specified lease.\n// TODO corylanou remove this for single node\nfunc (c *Client) AcquireLease(name string) (*Lease, error) {\n\tl := Lease{\n\t\tName:       name,\n\t\tExpiration: time.Now().Add(DefaultLeaseDuration),\n\t}\n\treturn &l, nil\n}\n\n// ClusterID returns the ID of the cluster it's connected to.\nfunc (c *Client) ClusterID() uint64 {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\treturn c.cacheData.ClusterID\n}\n\n// Database returns info for the requested database.\nfunc (c *Client) Database(name string) *DatabaseInfo {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\tfor _, d := range c.cacheData.Databases {\n\t\tif d.Name == name {\n\t\t\treturn &d\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// Databases returns a list of all database infos.\nfunc (c *Client) Databases() []DatabaseInfo {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\tdbs := c.cacheData.Databases\n\tif dbs == nil {\n\t\treturn []DatabaseInfo{}\n\t}\n\treturn dbs\n}\n\n// CreateDatabase creates a database or returns it if it already exists.\nfunc (c *Client) CreateDatabase(name string) (*DatabaseInfo, error) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tdata := c.cacheData.Clone()\n\n\tif db := data.Database(name); db != nil {\n\t\treturn db, nil\n\t}\n\n\tif err := data.CreateDatabase(name); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create default retention policy\n\tif c.retentionAutoCreate {\n\t\trpi := DefaultRetentionPolicyInfo()\n\t\tif err := data.CreateRetentionPolicy(name, rpi, true); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tdb := data.Database(name)\n\n\tif err := c.commit(data); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db, nil\n}\n\n// CreateDatabaseWithRetentionPolicy creates a database with the specified\n// retention policy.\n//\n// When creating a database with a retention policy, the retention policy will\n// always be set to default. Therefore if the caller provides a retention policy\n// that already exists on the database, but that retention policy is not the\n// default one, an error will be returned.\n//\n// This call is only idempotent when the caller provides the exact same\n// retention policy, and that retention policy is already the default for the\n// database.\n//\nfunc (c *Client) CreateDatabaseWithRetentionPolicy(name string, spec *RetentionPolicySpec) (*DatabaseInfo, error) {\n\tif spec == nil {\n\t\treturn nil, errors.New(\"CreateDatabaseWithRetentionPolicy called with nil spec\")\n\t}\n\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tdata := c.cacheData.Clone()\n\n\tif spec.Duration != nil && *spec.Duration < MinRetentionPolicyDuration && *spec.Duration != 0 {\n\t\treturn nil, ErrRetentionPolicyDurationTooLow\n\t}\n\n\tdb := data.Database(name)\n\tif db == nil {\n\t\tif err := data.CreateDatabase(name); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdb = data.Database(name)\n\t}\n\n\t// No existing retention policies, so we can create the provided policy as\n\t// the new default policy.\n\trpi := spec.NewRetentionPolicyInfo()\n\tif len(db.RetentionPolicies) == 0 {\n\t\tif err := data.CreateRetentionPolicy(name, rpi, true); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if !spec.Matches(db.RetentionPolicy(rpi.Name)) {\n\t\t// In this case we already have a retention policy on the database and\n\t\t// the provided retention policy does not match it. Therefore, this call\n\t\t// is not idempotent and we need to return an error.\n\t\treturn nil, ErrRetentionPolicyConflict\n\t}\n\n\t// If a non-default retention policy was passed in that already exists then\n\t// it's an error regardless of if the exact same retention policy is\n\t// provided. CREATE DATABASE WITH RETENTION POLICY should only be used to\n\t// create DEFAULT retention policies.\n\tif db.DefaultRetentionPolicy != rpi.Name {\n\t\treturn nil, ErrRetentionPolicyConflict\n\t}\n\n\t// Commit the changes.\n\tif err := c.commit(data); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Refresh the database info.\n\tdb = data.Database(name)\n\n\treturn db, nil\n}\n\n// DropDatabase deletes a database.\nfunc (c *Client) DropDatabase(name string) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tdata := c.cacheData.Clone()\n\n\tif err := data.DropDatabase(name); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.commit(data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// CreateRetentionPolicy creates a retention policy on the specified database.\nfunc (c *Client) CreateRetentionPolicy(database string, spec *RetentionPolicySpec, makeDefault bool) (*RetentionPolicyInfo, error) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tdata := c.cacheData.Clone()\n\n\tif spec.Duration != nil && *spec.Duration < MinRetentionPolicyDuration && *spec.Duration != 0 {\n\t\treturn nil, ErrRetentionPolicyDurationTooLow\n\t}\n\n\trp := spec.NewRetentionPolicyInfo()\n\tif err := data.CreateRetentionPolicy(database, rp, makeDefault); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := c.commit(data); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn rp, nil\n}\n\n// RetentionPolicy returns the requested retention policy info.\nfunc (c *Client) RetentionPolicy(database, name string) (rpi *RetentionPolicyInfo, err error) {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\tdb := c.cacheData.Database(database)\n\tif db == nil {\n\t\treturn nil, influxdb.ErrDatabaseNotFound(database)\n\t}\n\n\treturn db.RetentionPolicy(name), nil\n}\n\n// DropRetentionPolicy drops a retention policy from a database.\nfunc (c *Client) DropRetentionPolicy(database, name string) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tdata := c.cacheData.Clone()\n\n\tif err := data.DropRetentionPolicy(database, name); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.commit(data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// UpdateRetentionPolicy updates a retention policy.\nfunc (c *Client) UpdateRetentionPolicy(database, name string, rpu *RetentionPolicyUpdate, makeDefault bool) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tdata := c.cacheData.Clone()\n\n\tif err := data.UpdateRetentionPolicy(database, name, rpu, makeDefault); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.commit(data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// Users returns a slice of UserInfo representing the currently known users.\nfunc (c *Client) Users() []UserInfo {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\tusers := c.cacheData.Users\n\n\tif users == nil {\n\t\treturn []UserInfo{}\n\t}\n\treturn users\n}\n\n// User returns the user with the given name, or ErrUserNotFound.\nfunc (c *Client) User(name string) (User, error) {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\tfor _, u := range c.cacheData.Users {\n\t\tif u.Name == name {\n\t\t\treturn &u, nil\n\t\t}\n\t}\n\n\treturn nil, ErrUserNotFound\n}\n\n// bcryptCost is the cost associated with generating password with bcrypt.\n// This setting is lowered during testing to improve test suite performance.\nvar bcryptCost = bcrypt.DefaultCost\n\n// hashWithSalt returns a salted hash of password using salt.\nfunc (c *Client) hashWithSalt(salt []byte, password string) []byte {\n\thasher := sha256.New()\n\thasher.Write(salt)\n\thasher.Write([]byte(password))\n\treturn hasher.Sum(nil)\n}\n\n// saltedHash returns a salt and salted hash of password.\nfunc (c *Client) saltedHash(password string) (salt, hash []byte, err error) {\n\tsalt = make([]byte, SaltBytes)\n\tif _, err := io.ReadFull(crand.Reader, salt); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn salt, c.hashWithSalt(salt, password), nil\n}\n\n// CreateUser adds a user with the given name and password and admin status.\nfunc (c *Client) CreateUser(name, password string, admin bool) (User, error) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tdata := c.cacheData.Clone()\n\n\t// See if the user already exists.\n\tif u := data.user(name); u != nil {\n\t\tif err := bcrypt.CompareHashAndPassword([]byte(u.Hash), []byte(password)); err != nil || u.Admin != admin {\n\t\t\treturn nil, ErrUserExists\n\t\t}\n\t\treturn u, nil\n\t}\n\n\t// Hash the password before serializing it.\n\thash, err := bcrypt.GenerateFromPassword([]byte(password), bcryptCost)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := data.CreateUser(name, string(hash), admin); err != nil {\n\t\treturn nil, err\n\t}\n\n\tu := data.user(name)\n\n\tif err := c.commit(data); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn u, nil\n}\n\n// UpdateUser updates the password of an existing user.\nfunc (c *Client) UpdateUser(name, password string) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tdata := c.cacheData.Clone()\n\n\t// Hash the password before serializing it.\n\thash, err := bcrypt.GenerateFromPassword([]byte(password), bcryptCost)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := data.UpdateUser(name, string(hash)); err != nil {\n\t\treturn err\n\t}\n\n\tdelete(c.authCache, name)\n\n\tif err := c.commit(data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// DropUser removes the user with the given name.\nfunc (c *Client) DropUser(name string) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tdata := c.cacheData.Clone()\n\n\tif err := data.DropUser(name); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.commit(data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// SetPrivilege sets a privilege for the given user on the given database.\nfunc (c *Client) SetPrivilege(username, database string, p influxql.Privilege) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tdata := c.cacheData.Clone()\n\n\tif err := data.SetPrivilege(username, database, p); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.commit(data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// SetAdminPrivilege sets or unsets admin privilege to the given username.\nfunc (c *Client) SetAdminPrivilege(username string, admin bool) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tdata := c.cacheData.Clone()\n\n\tif err := data.SetAdminPrivilege(username, admin); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.commit(data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// UserPrivileges returns the privileges for a user mapped by database name.\nfunc (c *Client) UserPrivileges(username string) (map[string]influxql.Privilege, error) {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\tp, err := c.cacheData.UserPrivileges(username)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}\n\n// UserPrivilege returns the privilege for the given user on the given database.\nfunc (c *Client) UserPrivilege(username, database string) (*influxql.Privilege, error) {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\tp, err := c.cacheData.UserPrivilege(username, database)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}\n\n// AdminUserExists returns true if any user has admin privilege.\nfunc (c *Client) AdminUserExists() bool {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\treturn c.cacheData.AdminUserExists()\n}\n\n// Authenticate returns a UserInfo if the username and password match an existing entry.\nfunc (c *Client) Authenticate(username, password string) (User, error) {\n\t// Find user.\n\tc.mu.RLock()\n\tuserInfo := c.cacheData.user(username)\n\tc.mu.RUnlock()\n\tif userInfo == nil {\n\t\treturn nil, ErrUserNotFound\n\t}\n\n\t// Check the local auth cache first.\n\tc.mu.RLock()\n\tau, ok := c.authCache[username]\n\tc.mu.RUnlock()\n\tif ok {\n\t\t// verify the password using the cached salt and hash\n\t\tif bytes.Equal(c.hashWithSalt(au.salt, password), au.hash) {\n\t\t\treturn userInfo, nil\n\t\t}\n\n\t\t// fall through to requiring a full bcrypt hash for invalid passwords\n\t}\n\n\t// Compare password with user hash.\n\tif err := bcrypt.CompareHashAndPassword([]byte(userInfo.Hash), []byte(password)); err != nil {\n\t\treturn nil, ErrAuthenticate\n\t}\n\n\t// generate a salt and hash of the password for the cache\n\tsalt, hashed, err := c.saltedHash(password)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.mu.Lock()\n\tc.authCache[username] = authUser{salt: salt, hash: hashed, bhash: userInfo.Hash}\n\tc.mu.Unlock()\n\treturn userInfo, nil\n}\n\n// UserCount returns the number of users stored.\nfunc (c *Client) UserCount() int {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\treturn len(c.cacheData.Users)\n}\n\n// ShardIDs returns a list of all shard ids.\nfunc (c *Client) ShardIDs() []uint64 {\n\tc.mu.RLock()\n\n\tvar a []uint64\n\tfor _, dbi := range c.cacheData.Databases {\n\t\tfor _, rpi := range dbi.RetentionPolicies {\n\t\t\tfor _, sgi := range rpi.ShardGroups {\n\t\t\t\tfor _, si := range sgi.Shards {\n\t\t\t\t\ta = append(a, si.ID)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tc.mu.RUnlock()\n\tsort.Sort(uint64Slice(a))\n\treturn a\n}\n\n// ShardGroupsByTimeRange returns a list of all shard groups on a database and policy that may contain data\n// for the specified time range. Shard groups are sorted by start time.\nfunc (c *Client) ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []ShardGroupInfo, err error) {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\t// Find retention policy.\n\trpi, err := c.cacheData.RetentionPolicy(database, policy)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if rpi == nil {\n\t\treturn nil, influxdb.ErrRetentionPolicyNotFound(policy)\n\t}\n\tgroups := make([]ShardGroupInfo, 0, len(rpi.ShardGroups))\n\tfor _, g := range rpi.ShardGroups {\n\t\tif g.Deleted() || !g.Overlaps(min, max) {\n\t\t\tcontinue\n\t\t}\n\t\tgroups = append(groups, g)\n\t}\n\treturn groups, nil\n}\n\n// ShardsByTimeRange returns a slice of shards that may contain data in the time range.\nfunc (c *Client) ShardsByTimeRange(sources influxql.Sources, tmin, tmax time.Time) (a []ShardInfo, err error) {\n\tm := make(map[*ShardInfo]struct{})\n\tfor _, mm := range sources.Measurements() {\n\t\tgroups, err := c.ShardGroupsByTimeRange(mm.Database, mm.RetentionPolicy, tmin, tmax)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, g := range groups {\n\t\t\tfor i := range g.Shards {\n\t\t\t\tm[&g.Shards[i]] = struct{}{}\n\t\t\t}\n\t\t}\n\t}\n\n\ta = make([]ShardInfo, 0, len(m))\n\tfor sh := range m {\n\t\ta = append(a, *sh)\n\t}\n\n\treturn a, nil\n}\n\n// DropShard deletes a shard by ID.\nfunc (c *Client) DropShard(id uint64) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tdata := c.cacheData.Clone()\n\tdata.DropShard(id)\n\treturn c.commit(data)\n}\n\n// PruneShardGroups remove deleted shard groups from the data store.\nfunc (c *Client) PruneShardGroups() error {\n\tvar changed bool\n\texpiration := time.Now().Add(ShardGroupDeletedExpiration)\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tdata := c.cacheData.Clone()\n\tfor i, d := range data.Databases {\n\t\tfor j, rp := range d.RetentionPolicies {\n\t\t\tvar remainingShardGroups []ShardGroupInfo\n\t\t\tfor _, sgi := range rp.ShardGroups {\n\t\t\t\tif sgi.DeletedAt.IsZero() || !expiration.After(sgi.DeletedAt) {\n\t\t\t\t\tremainingShardGroups = append(remainingShardGroups, sgi)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tchanged = true\n\t\t\t}\n\t\t\tdata.Databases[i].RetentionPolicies[j].ShardGroups = remainingShardGroups\n\t\t}\n\t}\n\tif changed {\n\t\treturn c.commit(data)\n\t}\n\treturn nil\n}\n\n// CreateShardGroup creates a shard group on a database and policy for a given timestamp.\nfunc (c *Client) CreateShardGroup(database, policy string, timestamp time.Time) (*ShardGroupInfo, error) {\n\t// Check under a read-lock\n\tc.mu.RLock()\n\tif sg, _ := c.cacheData.ShardGroupByTimestamp(database, policy, timestamp); sg != nil {\n\t\tc.mu.RUnlock()\n\t\treturn sg, nil\n\t}\n\tc.mu.RUnlock()\n\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\t// Check again under the write lock\n\tdata := c.cacheData.Clone()\n\tif sg, _ := data.ShardGroupByTimestamp(database, policy, timestamp); sg != nil {\n\t\treturn sg, nil\n\t}\n\n\tsgi, err := createShardGroup(data, database, policy, timestamp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := c.commit(data); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn sgi, nil\n}\n\nfunc createShardGroup(data *Data, database, policy string, timestamp time.Time) (*ShardGroupInfo, error) {\n\t// It is the responsibility of the caller to check if it exists before calling this method.\n\tif sg, _ := data.ShardGroupByTimestamp(database, policy, timestamp); sg != nil {\n\t\treturn nil, ErrShardGroupExists\n\t}\n\n\tif err := data.CreateShardGroup(database, policy, timestamp); err != nil {\n\t\treturn nil, err\n\t}\n\n\trpi, err := data.RetentionPolicy(database, policy)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if rpi == nil {\n\t\treturn nil, errors.New(\"retention policy deleted after shard group created\")\n\t}\n\n\tsgi := rpi.ShardGroupByTimestamp(timestamp)\n\treturn sgi, nil\n}\n\n// DeleteShardGroup removes a shard group from a database and retention policy by id.\nfunc (c *Client) DeleteShardGroup(database, policy string, id uint64) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tdata := c.cacheData.Clone()\n\n\tif err := data.DeleteShardGroup(database, policy, id); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.commit(data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// PrecreateShardGroups creates shard groups whose endtime is before the 'to' time passed in, but\n// is yet to expire before 'from'. This is to avoid the need for these shards to be created when data\n// for the corresponding time range arrives. Shard creation involves Raft consensus, and precreation\n// avoids taking the hit at write-time.\nfunc (c *Client) PrecreateShardGroups(from, to time.Time) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tdata := c.cacheData.Clone()\n\tvar changed bool\n\n\tfor _, di := range data.Databases {\n\t\tfor _, rp := range di.RetentionPolicies {\n\t\t\tif len(rp.ShardGroups) == 0 {\n\t\t\t\t// No data was ever written to this group, or all groups have been deleted.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tg := rp.ShardGroups[len(rp.ShardGroups)-1] // Get the last group in time.\n\t\t\tif !g.Deleted() && g.EndTime.Before(to) && g.EndTime.After(from) {\n\t\t\t\t// Group is not deleted, will end before the future time, but is still yet to expire.\n\t\t\t\t// This last check is important, so the system doesn't create shards groups wholly\n\t\t\t\t// in the past.\n\n\t\t\t\t// Create successive shard group.\n\t\t\t\tnextShardGroupTime := g.EndTime.Add(1 * time.Nanosecond)\n\t\t\t\t// if it already exists, continue\n\t\t\t\tif sg, _ := data.ShardGroupByTimestamp(di.Name, rp.Name, nextShardGroupTime); sg != nil {\n\t\t\t\t\tc.logger.Info(fmt.Sprintf(\"shard group %d exists for database %s, retention policy %s\", sg.ID, di.Name, rp.Name))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tnewGroup, err := createShardGroup(data, di.Name, rp.Name, nextShardGroupTime)\n\t\t\t\tif err != nil {\n\t\t\t\t\tc.logger.Info(fmt.Sprintf(\"failed to precreate successive shard group for group %d: %s\", g.ID, err.Error()))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tchanged = true\n\t\t\t\tc.logger.Info(fmt.Sprintf(\"new shard group %d successfully precreated for database %s, retention policy %s\", newGroup.ID, di.Name, rp.Name))\n\t\t\t}\n\t\t}\n\t}\n\n\tif changed {\n\t\tif err := c.commit(data); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ShardOwner returns the owning shard group info for a specific shard.\nfunc (c *Client) ShardOwner(shardID uint64) (database, policy string, sgi *ShardGroupInfo) {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\tfor _, dbi := range c.cacheData.Databases {\n\t\tfor _, rpi := range dbi.RetentionPolicies {\n\t\t\tfor _, g := range rpi.ShardGroups {\n\t\t\t\tif g.Deleted() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfor _, sh := range g.Shards {\n\t\t\t\t\tif sh.ID == shardID {\n\t\t\t\t\t\tdatabase = dbi.Name\n\t\t\t\t\t\tpolicy = rpi.Name\n\t\t\t\t\t\tsgi = &g\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n// CreateContinuousQuery saves a continuous query with the given name for the given database.\nfunc (c *Client) CreateContinuousQuery(database, name, query string) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tdata := c.cacheData.Clone()\n\n\tif err := data.CreateContinuousQuery(database, name, query); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.commit(data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// DropContinuousQuery removes the continuous query with the given name on the given database.\nfunc (c *Client) DropContinuousQuery(database, name string) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tdata := c.cacheData.Clone()\n\n\tif err := data.DropContinuousQuery(database, name); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.commit(data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// CreateSubscription creates a subscription against the given database and retention policy.\nfunc (c *Client) CreateSubscription(database, rp, name, mode string, destinations []string) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tdata := c.cacheData.Clone()\n\n\tif err := data.CreateSubscription(database, rp, name, mode, destinations); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.commit(data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// DropSubscription removes the named subscription from the given database and retention policy.\nfunc (c *Client) DropSubscription(database, rp, name string) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tdata := c.cacheData.Clone()\n\n\tif err := data.DropSubscription(database, rp, name); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.commit(data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// SetData overwrites the underlying data in the meta store.\nfunc (c *Client) SetData(data *Data) error {\n\tc.mu.Lock()\n\n\t// reset the index so the commit will fire a change event\n\tc.cacheData.Index = 0\n\n\t// increment the index to force the changed channel to fire\n\td := data.Clone()\n\td.Index++\n\n\tif err := c.commit(d); err != nil {\n\t\treturn err\n\t}\n\n\tc.mu.Unlock()\n\n\treturn nil\n}\n\n// Data returns a clone of the underlying data in the meta store.\nfunc (c *Client) Data() Data {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\td := c.cacheData.Clone()\n\treturn *d\n}\n\n// WaitForDataChanged returns a channel that will get closed when\n// the metastore data has changed.\nfunc (c *Client) WaitForDataChanged() chan struct{} {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\treturn c.changed\n}\n\n// commit writes data to the underlying store.\n// This method assumes c's mutex is already locked.\nfunc (c *Client) commit(data *Data) error {\n\tdata.Index++\n\n\t// try to write to disk before updating in memory\n\tif err := snapshot(c.path, data); err != nil {\n\t\treturn err\n\t}\n\n\t// update in memory\n\tc.cacheData = data\n\n\t// close channels to signal changes\n\tclose(c.changed)\n\tc.changed = make(chan struct{})\n\n\treturn nil\n}\n\n// MarshalBinary returns a binary representation of the underlying data.\nfunc (c *Client) MarshalBinary() ([]byte, error) {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\treturn c.cacheData.MarshalBinary()\n}\n\n// WithLogger sets the logger for the client.\nfunc (c *Client) WithLogger(log zap.Logger) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tc.logger = log.With(zap.String(\"service\", \"metaclient\"))\n}\n\n// snapshot saves the current meta data to disk.\nfunc snapshot(path string, data *Data) error {\n\tfile := filepath.Join(path, metaFile)\n\ttmpFile := file + \"tmp\"\n\n\tf, err := os.Create(tmpFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tvar d []byte\n\tif b, err := data.MarshalBinary(); err != nil {\n\t\treturn err\n\t} else {\n\t\td = b\n\t}\n\n\tif _, err := f.Write(d); err != nil {\n\t\treturn err\n\t}\n\n\tif err = f.Sync(); err != nil {\n\t\treturn err\n\t}\n\n\t//close file handle before renaming to support Windows\n\tif err = f.Close(); err != nil {\n\t\treturn err\n\t}\n\n\treturn renameFile(tmpFile, file)\n}\n\n// Load loads the current meta data from disk.\nfunc (c *Client) Load() error {\n\tfile := filepath.Join(c.path, metaFile)\n\n\tf, err := os.Open(file)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tdata, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.cacheData.UnmarshalBinary(data); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\ntype uint64Slice []uint64\n\nfunc (a uint64Slice) Len() int           { return len(a) }\nfunc (a uint64Slice) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a uint64Slice) Less(i, j int) bool { return a[i] < a[j] }\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/meta/client_test.go",
    "content": "package meta_test\n\nimport (\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n)\n\nfunc TestMetaClient_CreateDatabaseOnly(t *testing.T) {\n\tt.Parallel()\n\n\td, c := newClient()\n\tdefer os.RemoveAll(d)\n\tdefer c.Close()\n\n\tif db, err := c.CreateDatabase(\"db0\"); err != nil {\n\t\tt.Fatal(err)\n\t} else if db.Name != \"db0\" {\n\t\tt.Fatalf(\"database name mismatch.  exp: db0, got %s\", db.Name)\n\t}\n\n\tdb := c.Database(\"db0\")\n\tif db == nil {\n\t\tt.Fatal(\"database not found\")\n\t} else if db.Name != \"db0\" {\n\t\tt.Fatalf(\"db name wrong: %s\", db.Name)\n\t}\n\n\t// Make sure a default retention policy was created.\n\trp, err := c.RetentionPolicy(\"db0\", \"autogen\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if rp == nil {\n\t\tt.Fatal(\"failed to create rp\")\n\t} else if exp, got := \"autogen\", rp.Name; exp != got {\n\t\tt.Fatalf(\"rp name wrong:\\n\\texp: %s\\n\\tgot: %s\", exp, got)\n\t}\n}\n\nfunc TestMetaClient_CreateDatabaseIfNotExists(t *testing.T) {\n\tt.Parallel()\n\n\td, c := newClient()\n\tdefer os.RemoveAll(d)\n\tdefer c.Close()\n\n\tif _, err := c.CreateDatabase(\"db0\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdb := c.Database(\"db0\")\n\tif db == nil {\n\t\tt.Fatal(\"database not found\")\n\t} else if db.Name != \"db0\" {\n\t\tt.Fatalf(\"db name wrong: %s\", db.Name)\n\t}\n\n\tif _, err := c.CreateDatabase(\"db0\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestMetaClient_CreateDatabaseWithRetentionPolicy(t *testing.T) {\n\tt.Parallel()\n\n\td, c := newClient()\n\tdefer os.RemoveAll(d)\n\tdefer c.Close()\n\n\t// Calling CreateDatabaseWithRetentionPolicy with a nil spec should return\n\t// an error\n\tif _, err := c.CreateDatabaseWithRetentionPolicy(\"db0\", nil); err == nil {\n\t\tt.Fatal(\"expected error\")\n\t}\n\n\tduration := 1 * time.Hour\n\treplicaN := 1\n\tspec := meta.RetentionPolicySpec{\n\t\tName:               \"rp0\",\n\t\tDuration:           &duration,\n\t\tReplicaN:           &replicaN,\n\t\tShardGroupDuration: 60 * time.Minute,\n\t}\n\tif _, err := c.CreateDatabaseWithRetentionPolicy(\"db0\", &spec); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdb := c.Database(\"db0\")\n\tif db == nil {\n\t\tt.Fatal(\"database not found\")\n\t} else if db.Name != \"db0\" {\n\t\tt.Fatalf(\"db name wrong: %s\", db.Name)\n\t}\n\n\trp := db.RetentionPolicy(\"rp0\")\n\tif rp.Name != \"rp0\" {\n\t\tt.Fatalf(\"rp name wrong: %s\", rp.Name)\n\t} else if rp.Duration != time.Hour {\n\t\tt.Fatalf(\"rp duration wrong: %v\", rp.Duration)\n\t} else if rp.ReplicaN != 1 {\n\t\tt.Fatalf(\"rp replication wrong: %d\", rp.ReplicaN)\n\t} else if rp.ShardGroupDuration != 60*time.Minute {\n\t\tt.Fatalf(\"rp shard duration wrong: %v\", rp.ShardGroupDuration)\n\t}\n\n\t// Recreating the exact same database with retention policy is not\n\t// an error.\n\tif _, err := c.CreateDatabaseWithRetentionPolicy(\"db0\", &spec); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// If create database is used by itself, no error should be returned and\n\t// the default retention policy should not be changed.\n\tif dbi, err := c.CreateDatabase(\"db0\"); err != nil {\n\t\tt.Fatalf(\"got %v, but expected %v\", err, nil)\n\t} else if dbi.DefaultRetentionPolicy != \"rp0\" {\n\t\tt.Fatalf(\"got %v, but expected %v\", dbi.DefaultRetentionPolicy, \"rp0\")\n\t} else if got, exp := len(dbi.RetentionPolicies), 1; got != exp {\n\t\t// Ensure no additional retention policies were created.\n\t\tt.Fatalf(\"got %v, but expected %v\", got, exp)\n\t}\n}\n\nfunc TestMetaClient_CreateDatabaseWithRetentionPolicy_Conflict_Fields(t *testing.T) {\n\tt.Parallel()\n\n\td, c := newClient()\n\tdefer os.RemoveAll(d)\n\tdefer c.Close()\n\n\tduration := 1 * time.Hour\n\treplicaN := 1\n\tspec := meta.RetentionPolicySpec{\n\t\tName:               \"rp0\",\n\t\tDuration:           &duration,\n\t\tReplicaN:           &replicaN,\n\t\tShardGroupDuration: 60 * time.Minute,\n\t}\n\tif _, err := c.CreateDatabaseWithRetentionPolicy(\"db0\", &spec); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// If the rp's name is different, and error should be returned.\n\tspec2 := spec\n\tspec2.Name = spec.Name + \"1\"\n\tif _, err := c.CreateDatabaseWithRetentionPolicy(\"db0\", &spec2); err != meta.ErrRetentionPolicyConflict {\n\t\tt.Fatalf(\"got %v, but expected %v\", err, meta.ErrRetentionPolicyConflict)\n\t}\n\n\t// If the rp's duration is different, an error should be returned.\n\tspec2 = spec\n\tduration2 := *spec.Duration + time.Minute\n\tspec2.Duration = &duration2\n\tif _, err := c.CreateDatabaseWithRetentionPolicy(\"db0\", &spec2); err != meta.ErrRetentionPolicyConflict {\n\t\tt.Fatalf(\"got %v, but expected %v\", err, meta.ErrRetentionPolicyConflict)\n\t}\n\n\t// If the rp's replica is different, an error should be returned.\n\tspec2 = spec\n\treplica2 := *spec.ReplicaN + 1\n\tspec2.ReplicaN = &replica2\n\tif _, err := c.CreateDatabaseWithRetentionPolicy(\"db0\", &spec2); err != meta.ErrRetentionPolicyConflict {\n\t\tt.Fatalf(\"got %v, but expected %v\", err, meta.ErrRetentionPolicyConflict)\n\t}\n\n\t// If the rp's shard group duration is different, an error should be returned.\n\tspec2 = spec\n\tspec2.ShardGroupDuration = spec.ShardGroupDuration + time.Minute\n\tif _, err := c.CreateDatabaseWithRetentionPolicy(\"db0\", &spec2); err != meta.ErrRetentionPolicyConflict {\n\t\tt.Fatalf(\"got %v, but expected %v\", err, meta.ErrRetentionPolicyConflict)\n\t}\n}\n\nfunc TestMetaClient_CreateDatabaseWithRetentionPolicy_Conflict_NonDefault(t *testing.T) {\n\tt.Parallel()\n\n\td, c := newClient()\n\tdefer os.RemoveAll(d)\n\tdefer c.Close()\n\n\tduration := 1 * time.Hour\n\treplicaN := 1\n\tspec := meta.RetentionPolicySpec{\n\t\tName:               \"rp0\",\n\t\tDuration:           &duration,\n\t\tReplicaN:           &replicaN,\n\t\tShardGroupDuration: 60 * time.Minute,\n\t}\n\n\t// Create a default retention policy.\n\tif _, err := c.CreateDatabaseWithRetentionPolicy(\"db0\", &spec); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Let's create a non-default retention policy.\n\tspec2 := spec\n\tspec2.Name = \"rp1\"\n\tif _, err := c.CreateRetentionPolicy(\"db0\", &spec2, false); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// If we try to create a database with the non-default retention policy then\n\t// it's an error.\n\tif _, err := c.CreateDatabaseWithRetentionPolicy(\"db0\", &spec2); err != meta.ErrRetentionPolicyConflict {\n\t\tt.Fatalf(\"got %v, but expected %v\", err, meta.ErrRetentionPolicyConflict)\n\t}\n}\n\nfunc TestMetaClient_Databases(t *testing.T) {\n\tt.Parallel()\n\n\td, c := newClient()\n\tdefer os.RemoveAll(d)\n\tdefer c.Close()\n\n\t// Create two databases.\n\tdb, err := c.CreateDatabase(\"db0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if db == nil {\n\t\tt.Fatal(\"database not found\")\n\t} else if db.Name != \"db0\" {\n\t\tt.Fatalf(\"db name wrong: %s\", db.Name)\n\t}\n\n\tdb, err = c.CreateDatabase(\"db1\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if db.Name != \"db1\" {\n\t\tt.Fatalf(\"db name wrong: %s\", db.Name)\n\t}\n\n\tdbs := c.Databases()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(dbs) != 2 {\n\t\tt.Fatalf(\"expected 2 databases but got %d\", len(dbs))\n\t} else if dbs[0].Name != \"db0\" {\n\t\tt.Fatalf(\"db name wrong: %s\", dbs[0].Name)\n\t} else if dbs[1].Name != \"db1\" {\n\t\tt.Fatalf(\"db name wrong: %s\", dbs[1].Name)\n\t}\n}\n\nfunc TestMetaClient_DropDatabase(t *testing.T) {\n\tt.Parallel()\n\n\td, c := newClient()\n\tdefer os.RemoveAll(d)\n\tdefer c.Close()\n\n\tif _, err := c.CreateDatabase(\"db0\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdb := c.Database(\"db0\")\n\tif db == nil {\n\t\tt.Fatalf(\"database not found\")\n\t} else if db.Name != \"db0\" {\n\t\tt.Fatalf(\"db name wrong: %s\", db.Name)\n\t}\n\n\tif err := c.DropDatabase(\"db0\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif db = c.Database(\"db0\"); db != nil {\n\t\tt.Fatalf(\"expected database to not return: %v\", db)\n\t}\n\n\t// Dropping a database that does not exist is not an error.\n\tif err := c.DropDatabase(\"db foo\"); err != nil {\n\t\tt.Fatalf(\"got %v error, but expected no error\", err)\n\t}\n}\n\nfunc TestMetaClient_CreateRetentionPolicy(t *testing.T) {\n\tt.Parallel()\n\n\td, c := newClient()\n\tdefer os.RemoveAll(d)\n\tdefer c.Close()\n\n\tif _, err := c.CreateDatabase(\"db0\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdb := c.Database(\"db0\")\n\tif db == nil {\n\t\tt.Fatal(\"database not found\")\n\t} else if db.Name != \"db0\" {\n\t\tt.Fatalf(\"db name wrong: %s\", db.Name)\n\t}\n\n\trp0 := meta.RetentionPolicyInfo{\n\t\tName:               \"rp0\",\n\t\tReplicaN:           1,\n\t\tDuration:           2 * time.Hour,\n\t\tShardGroupDuration: 2 * time.Hour,\n\t}\n\n\tif _, err := c.CreateRetentionPolicy(\"db0\", &meta.RetentionPolicySpec{\n\t\tName:               rp0.Name,\n\t\tReplicaN:           &rp0.ReplicaN,\n\t\tDuration:           &rp0.Duration,\n\t\tShardGroupDuration: rp0.ShardGroupDuration,\n\t}, true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tactual, err := c.RetentionPolicy(\"db0\", \"rp0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if got, exp := actual, &rp0; !reflect.DeepEqual(got, exp) {\n\t\tt.Fatalf(\"got %#v, expected %#v\", got, exp)\n\t}\n\n\t// Create the same policy.  Should not error.\n\tif _, err := c.CreateRetentionPolicy(\"db0\", &meta.RetentionPolicySpec{\n\t\tName:               rp0.Name,\n\t\tReplicaN:           &rp0.ReplicaN,\n\t\tDuration:           &rp0.Duration,\n\t\tShardGroupDuration: rp0.ShardGroupDuration,\n\t}, true); err != nil {\n\t\tt.Fatal(err)\n\t} else if actual, err = c.RetentionPolicy(\"db0\", \"rp0\"); err != nil {\n\t\tt.Fatal(err)\n\t} else if got, exp := actual, &rp0; !reflect.DeepEqual(got, exp) {\n\t\tt.Fatalf(\"got %#v, expected %#v\", got, exp)\n\t}\n\n\t// Creating the same policy, but with a different duration should\n\t// result in an error.\n\trp1 := rp0\n\trp1.Duration = 2 * rp0.Duration\n\n\t_, got := c.CreateRetentionPolicy(\"db0\", &meta.RetentionPolicySpec{\n\t\tName:               rp1.Name,\n\t\tReplicaN:           &rp1.ReplicaN,\n\t\tDuration:           &rp1.Duration,\n\t\tShardGroupDuration: rp1.ShardGroupDuration,\n\t}, true)\n\tif exp := meta.ErrRetentionPolicyExists; got != exp {\n\t\tt.Fatalf(\"got error %v, expected error %v\", got, exp)\n\t}\n\n\t// Creating the same policy, but with a different replica factor\n\t// should also result in an error.\n\trp1 = rp0\n\trp1.ReplicaN = rp0.ReplicaN + 1\n\n\t_, got = c.CreateRetentionPolicy(\"db0\", &meta.RetentionPolicySpec{\n\t\tName:               rp1.Name,\n\t\tReplicaN:           &rp1.ReplicaN,\n\t\tDuration:           &rp1.Duration,\n\t\tShardGroupDuration: rp1.ShardGroupDuration,\n\t}, true)\n\tif exp := meta.ErrRetentionPolicyExists; got != exp {\n\t\tt.Fatalf(\"got error %v, expected error %v\", got, exp)\n\t}\n\n\t// Creating the same policy, but with a different shard group\n\t// duration should also result in an error.\n\trp1 = rp0\n\trp1.ShardGroupDuration = rp0.ShardGroupDuration / 2\n\n\t_, got = c.CreateRetentionPolicy(\"db0\", &meta.RetentionPolicySpec{\n\t\tName:               rp1.Name,\n\t\tReplicaN:           &rp1.ReplicaN,\n\t\tDuration:           &rp1.Duration,\n\t\tShardGroupDuration: rp1.ShardGroupDuration,\n\t}, true)\n\tif exp := meta.ErrRetentionPolicyExists; got != exp {\n\t\tt.Fatalf(\"got error %v, expected error %v\", got, exp)\n\t}\n\n\t// Creating a policy with the shard duration being greater than the\n\t// duration should also be an error.\n\trp1 = rp0\n\trp1.Duration = 1 * time.Hour\n\trp1.ShardGroupDuration = 2 * time.Hour\n\n\t_, got = c.CreateRetentionPolicy(\"db0\", &meta.RetentionPolicySpec{\n\t\tName:               rp1.Name,\n\t\tReplicaN:           &rp1.ReplicaN,\n\t\tDuration:           &rp1.Duration,\n\t\tShardGroupDuration: rp1.ShardGroupDuration,\n\t}, true)\n\tif exp := meta.ErrIncompatibleDurations; got != exp {\n\t\tt.Fatalf(\"got error %v, expected error %v\", got, exp)\n\t}\n}\n\nfunc TestMetaClient_DefaultRetentionPolicy(t *testing.T) {\n\tt.Parallel()\n\n\td, c := newClient()\n\tdefer os.RemoveAll(d)\n\tdefer c.Close()\n\n\tduration := 1 * time.Hour\n\treplicaN := 1\n\tif _, err := c.CreateDatabaseWithRetentionPolicy(\"db0\", &meta.RetentionPolicySpec{\n\t\tName:     \"rp0\",\n\t\tDuration: &duration,\n\t\tReplicaN: &replicaN,\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdb := c.Database(\"db0\")\n\tif db == nil {\n\t\tt.Fatal(\"datbase not found\")\n\t} else if db.Name != \"db0\" {\n\t\tt.Fatalf(\"db name wrong: %s\", db.Name)\n\t}\n\n\trp, err := c.RetentionPolicy(\"db0\", \"rp0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if rp.Name != \"rp0\" {\n\t\tt.Fatalf(\"rp name wrong: %s\", rp.Name)\n\t} else if rp.Duration != time.Hour {\n\t\tt.Fatalf(\"rp duration wrong: %s\", rp.Duration.String())\n\t} else if rp.ReplicaN != 1 {\n\t\tt.Fatalf(\"rp replication wrong: %d\", rp.ReplicaN)\n\t}\n\n\t// Make sure default retention policy is now rp0\n\tif exp, got := \"rp0\", db.DefaultRetentionPolicy; exp != got {\n\t\tt.Fatalf(\"rp name wrong: \\n\\texp: %s\\n\\tgot: %s\", exp, db.DefaultRetentionPolicy)\n\t}\n}\n\nfunc TestMetaClient_UpdateRetentionPolicy(t *testing.T) {\n\tt.Parallel()\n\n\td, c := newClient()\n\tdefer os.RemoveAll(d)\n\tdefer c.Close()\n\n\tif _, err := c.CreateDatabaseWithRetentionPolicy(\"db0\", &meta.RetentionPolicySpec{\n\t\tName:               \"rp0\",\n\t\tShardGroupDuration: 4 * time.Hour,\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trpi, err := c.RetentionPolicy(\"db0\", \"rp0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Set the duration to another value and ensure that the shard group duration\n\t// doesn't change.\n\tduration := 2 * rpi.ShardGroupDuration\n\treplicaN := 1\n\tif err := c.UpdateRetentionPolicy(\"db0\", \"rp0\", &meta.RetentionPolicyUpdate{\n\t\tDuration: &duration,\n\t\tReplicaN: &replicaN,\n\t}, true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trpi, err = c.RetentionPolicy(\"db0\", \"rp0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif exp, got := 4*time.Hour, rpi.ShardGroupDuration; exp != got {\n\t\tt.Fatalf(\"shard group duration wrong: \\n\\texp: %s\\n\\tgot: %s\", exp, got)\n\t}\n\n\t// Set the duration to below the shard group duration. This should return an error.\n\tduration = rpi.ShardGroupDuration / 2\n\tif err := c.UpdateRetentionPolicy(\"db0\", \"rp0\", &meta.RetentionPolicyUpdate{\n\t\tDuration: &duration,\n\t}, true); err == nil {\n\t\tt.Fatal(\"expected error\")\n\t} else if err != meta.ErrIncompatibleDurations {\n\t\tt.Fatalf(\"expected error '%s', got '%s'\", meta.ErrIncompatibleDurations, err)\n\t}\n\n\t// Set the shard duration longer than the overall duration. This should also return an error.\n\tsgDuration := rpi.Duration * 2\n\tif err := c.UpdateRetentionPolicy(\"db0\", \"rp0\", &meta.RetentionPolicyUpdate{\n\t\tShardGroupDuration: &sgDuration,\n\t}, true); err == nil {\n\t\tt.Fatal(\"expected error\")\n\t} else if err != meta.ErrIncompatibleDurations {\n\t\tt.Fatalf(\"expected error '%s', got '%s'\", meta.ErrIncompatibleDurations, err)\n\t}\n\n\t// Set both values to incompatible values and ensure an error is returned.\n\tduration = rpi.ShardGroupDuration\n\tsgDuration = rpi.Duration\n\tif err := c.UpdateRetentionPolicy(\"db0\", \"rp0\", &meta.RetentionPolicyUpdate{\n\t\tDuration:           &duration,\n\t\tShardGroupDuration: &sgDuration,\n\t}, true); err == nil {\n\t\tt.Fatal(\"expected error\")\n\t} else if err != meta.ErrIncompatibleDurations {\n\t\tt.Fatalf(\"expected error '%s', got '%s'\", meta.ErrIncompatibleDurations, err)\n\t}\n\n\t// Allow any shard duration if the duration is set to zero.\n\tduration = time.Duration(0)\n\tsgDuration = 168 * time.Hour\n\tif err := c.UpdateRetentionPolicy(\"db0\", \"rp0\", &meta.RetentionPolicyUpdate{\n\t\tDuration:           &duration,\n\t\tShardGroupDuration: &sgDuration,\n\t}, true); err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}\n\nfunc TestMetaClient_DropRetentionPolicy(t *testing.T) {\n\tt.Parallel()\n\n\td, c := newClient()\n\tdefer os.RemoveAll(d)\n\tdefer c.Close()\n\n\tif _, err := c.CreateDatabase(\"db0\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdb := c.Database(\"db0\")\n\tif db == nil {\n\t\tt.Fatal(\"database not found\")\n\t} else if db.Name != \"db0\" {\n\t\tt.Fatalf(\"db name wrong: %s\", db.Name)\n\t}\n\n\tduration := 1 * time.Hour\n\treplicaN := 1\n\tif _, err := c.CreateRetentionPolicy(\"db0\", &meta.RetentionPolicySpec{\n\t\tName:     \"rp0\",\n\t\tDuration: &duration,\n\t\tReplicaN: &replicaN,\n\t}, true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trp, err := c.RetentionPolicy(\"db0\", \"rp0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if rp.Name != \"rp0\" {\n\t\tt.Fatalf(\"rp name wrong: %s\", rp.Name)\n\t} else if rp.Duration != time.Hour {\n\t\tt.Fatalf(\"rp duration wrong: %s\", rp.Duration.String())\n\t} else if rp.ReplicaN != 1 {\n\t\tt.Fatalf(\"rp replication wrong: %d\", rp.ReplicaN)\n\t}\n\n\tif err := c.DropRetentionPolicy(\"db0\", \"rp0\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trp, err = c.RetentionPolicy(\"db0\", \"rp0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if rp != nil {\n\t\tt.Fatalf(\"rp should have been dropped\")\n\t}\n}\n\nfunc TestMetaClient_CreateUser(t *testing.T) {\n\tt.Parallel()\n\n\td, c := newClient()\n\tdefer os.RemoveAll(d)\n\tdefer c.Close()\n\n\t// Create an admin user\n\tif _, err := c.CreateUser(\"fred\", \"supersecure\", true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Create a non-admin user\n\tif _, err := c.CreateUser(\"wilma\", \"password\", false); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tu, err := c.User(\"fred\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif exp, got := \"fred\", u.ID(); exp != got {\n\t\tt.Fatalf(\"unexpected user name: exp: %s got: %s\", exp, got)\n\t}\n\tif !u.IsAdmin() {\n\t\tt.Fatalf(\"expected user to be admin\")\n\t}\n\n\tu, err = c.Authenticate(\"fred\", \"supersecure\")\n\tif u == nil || err != nil || u.ID() != \"fred\" {\n\t\tt.Fatalf(\"failed to authenticate\")\n\t}\n\n\t// Auth for bad password should fail\n\tu, err = c.Authenticate(\"fred\", \"badpassword\")\n\tif u != nil || err != meta.ErrAuthenticate {\n\t\tt.Fatalf(\"authentication should fail with %s\", meta.ErrAuthenticate)\n\t}\n\n\t// Auth for no password should fail\n\tu, err = c.Authenticate(\"fred\", \"\")\n\tif u != nil || err != meta.ErrAuthenticate {\n\t\tt.Fatalf(\"authentication should fail with %s\", meta.ErrAuthenticate)\n\t}\n\n\t// Change password should succeed.\n\tif err := c.UpdateUser(\"fred\", \"moresupersecure\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Auth for old password should fail\n\tu, err = c.Authenticate(\"fred\", \"supersecure\")\n\tif u != nil || err != meta.ErrAuthenticate {\n\t\tt.Fatalf(\"authentication should fail with %s\", meta.ErrAuthenticate)\n\t}\n\n\t// Auth for new password should succeed.\n\tu, err = c.Authenticate(\"fred\", \"moresupersecure\")\n\tif u == nil || err != nil || u.ID() != \"fred\" {\n\t\tt.Fatalf(\"failed to authenticate\")\n\t}\n\n\t// Auth for unkonwn user should fail\n\tu, err = c.Authenticate(\"foo\", \"\")\n\tif u != nil || err != meta.ErrUserNotFound {\n\t\tt.Fatalf(\"authentication should fail with %s\", meta.ErrUserNotFound)\n\t}\n\n\tu, err = c.User(\"wilma\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif exp, got := \"wilma\", u.ID(); exp != got {\n\t\tt.Fatalf(\"unexpected user name: exp: %s got: %s\", exp, got)\n\t}\n\tif u.IsAdmin() {\n\t\tt.Fatalf(\"expected user not to be an admin\")\n\t}\n\n\tif exp, got := 2, c.UserCount(); exp != got {\n\t\tt.Fatalf(\"unexpected user count.  got: %d exp: %d\", got, exp)\n\t}\n\n\t// Grant privilidges to a non-admin user\n\tif err := c.SetAdminPrivilege(\"wilma\", true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tu, err = c.User(\"wilma\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif exp, got := \"wilma\", u.ID(); exp != got {\n\t\tt.Fatalf(\"unexpected user name: exp: %s got: %s\", exp, got)\n\t}\n\tif !u.IsAdmin() {\n\t\tt.Fatalf(\"expected user to be an admin\")\n\t}\n\n\t// Revoke privilidges from user\n\tif err := c.SetAdminPrivilege(\"wilma\", false); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tu, err = c.User(\"wilma\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif exp, got := \"wilma\", u.ID(); exp != got {\n\t\tt.Fatalf(\"unexpected user name: exp: %s got: %s\", exp, got)\n\t}\n\tif u.IsAdmin() {\n\t\tt.Fatalf(\"expected user not to be an admin\")\n\t}\n\n\t// Create a database to use for assiging privileges to.\n\tif _, err := c.CreateDatabase(\"db0\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdb := c.Database(\"db0\")\n\tif db.Name != \"db0\" {\n\t\tt.Fatalf(\"db name wrong: %s\", db.Name)\n\t}\n\n\t// Assign a single privilege at the database level\n\tif err := c.SetPrivilege(\"wilma\", \"db0\", influxql.ReadPrivilege); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tp, err := c.UserPrivilege(\"wilma\", \"db0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif p == nil {\n\t\tt.Fatal(\"expected privilege but was nil\")\n\t}\n\tif exp, got := influxql.ReadPrivilege, *p; exp != got {\n\t\tt.Fatalf(\"unexpected privilege.  exp: %d, got: %d\", exp, got)\n\t}\n\n\t// Remove a single privilege at the database level\n\tif err := c.SetPrivilege(\"wilma\", \"db0\", influxql.NoPrivileges); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tp, err = c.UserPrivilege(\"wilma\", \"db0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif p == nil {\n\t\tt.Fatal(\"expected privilege but was nil\")\n\t}\n\tif exp, got := influxql.NoPrivileges, *p; exp != got {\n\t\tt.Fatalf(\"unexpected privilege.  exp: %d, got: %d\", exp, got)\n\t}\n\n\t// Drop a user\n\tif err := c.DropUser(\"wilma\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tu, err = c.User(\"wilma\")\n\tif err != meta.ErrUserNotFound {\n\t\tt.Fatalf(\"user lookup should fail with %s\", meta.ErrUserNotFound)\n\t}\n\n\tif exp, got := 1, c.UserCount(); exp != got {\n\t\tt.Fatalf(\"unexpected user count.  got: %d exp: %d\", got, exp)\n\t}\n}\n\nfunc TestMetaClient_UpdateUser(t *testing.T) {\n\tt.Parallel()\n\n\td, c := newClient()\n\tdefer os.RemoveAll(d)\n\tdefer c.Close()\n\n\t// UpdateUser that doesn't exist should return an error.\n\tif err := c.UpdateUser(\"foo\", \"bar\"); err == nil {\n\t\tt.Fatalf(\"expected error, got nil\")\n\t}\n}\n\nfunc TestMetaClient_ContinuousQueries(t *testing.T) {\n\tt.Parallel()\n\n\td, c := newClient()\n\tdefer os.RemoveAll(d)\n\tdefer c.Close()\n\n\t// Create a database to use\n\tif _, err := c.CreateDatabase(\"db0\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdb := c.Database(\"db0\")\n\tif db == nil {\n\t\tt.Fatalf(\"database not found\")\n\t} else if db.Name != \"db0\" {\n\t\tt.Fatalf(\"db name wrong: %s\", db.Name)\n\t}\n\n\t// Create a CQ\n\tif err := c.CreateContinuousQuery(\"db0\", \"cq0\", `SELECT count(value) INTO foo_count FROM foo GROUP BY time(10m)`); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Recreating an existing CQ with the exact same query should not\n\t// return an error.\n\tif err := c.CreateContinuousQuery(\"db0\", \"cq0\", `SELECT count(value) INTO foo_count FROM foo GROUP BY time(10m)`); err != nil {\n\t\tt.Fatalf(\"got error %q, but didn't expect one\", err)\n\t}\n\n\t// Recreating an existing CQ with a different query should return\n\t// an error.\n\tif err := c.CreateContinuousQuery(\"db0\", \"cq0\", `SELECT min(value) INTO foo_max FROM foo GROUP BY time(20m)`); err == nil {\n\t\tt.Fatal(\"didn't get and error, but expected one\")\n\t} else if got, exp := err, meta.ErrContinuousQueryExists; got.Error() != exp.Error() {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\t// Create a few more CQ's\n\tif err := c.CreateContinuousQuery(\"db0\", \"cq1\", `SELECT max(value) INTO foo_max FROM foo GROUP BY time(10m)`); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := c.CreateContinuousQuery(\"db0\", \"cq2\", `SELECT min(value) INTO foo_min FROM foo GROUP BY time(10m)`); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Drop a single CQ\n\tif err := c.DropContinuousQuery(\"db0\", \"cq1\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Dropping a nonexistent CQ should return an error.\n\tif err := c.DropContinuousQuery(\"db0\", \"not-a-cq\"); err == nil {\n\t\tt.Fatal(\"expected an error, got nil\")\n\t}\n}\n\nfunc TestMetaClient_Subscriptions_Create(t *testing.T) {\n\tt.Parallel()\n\n\td, c := newClient()\n\tdefer os.RemoveAll(d)\n\tdefer c.Close()\n\n\t// Create a database to use\n\tif _, err := c.CreateDatabase(\"db0\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdb := c.Database(\"db0\")\n\tif db == nil {\n\t\tt.Fatal(\"database not found\")\n\t} else if db.Name != \"db0\" {\n\t\tt.Fatalf(\"db name wrong: %s\", db.Name)\n\t}\n\n\t// Create a subscription\n\tif err := c.CreateSubscription(\"db0\", \"autogen\", \"sub0\", \"ALL\", []string{\"udp://example.com:9090\"}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Re-create a subscription\n\terr := c.CreateSubscription(\"db0\", \"autogen\", \"sub0\", \"ALL\", []string{\"udp://example.com:9090\"})\n\tif err == nil || err.Error() != `subscription already exists` {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n\n\t// Create another subscription.\n\tif err := c.CreateSubscription(\"db0\", \"autogen\", \"sub1\", \"ALL\", []string{\"udp://example.com:6060\"}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Create a subscription with invalid scheme\n\terr = c.CreateSubscription(\"db0\", \"autogen\", \"sub2\", \"ALL\", []string{\"bad://example.com:9191\"})\n\tif err == nil || !strings.HasPrefix(err.Error(), \"invalid subscription URL\") {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n\n\t// Create a subscription without port number\n\terr = c.CreateSubscription(\"db0\", \"autogen\", \"sub2\", \"ALL\", []string{\"udp://example.com\"})\n\tif err == nil || !strings.HasPrefix(err.Error(), \"invalid subscription URL\") {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t}\n\n\t// Create an HTTP subscription.\n\tif err := c.CreateSubscription(\"db0\", \"autogen\", \"sub3\", \"ALL\", []string{\"http://example.com:9092\"}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Create an HTTPS subscription.\n\tif err := c.CreateSubscription(\"db0\", \"autogen\", \"sub4\", \"ALL\", []string{\"https://example.com:9092\"}); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestMetaClient_Subscriptions_Drop(t *testing.T) {\n\tt.Parallel()\n\n\td, c := newClient()\n\tdefer os.RemoveAll(d)\n\tdefer c.Close()\n\n\t// Create a database to use\n\tif _, err := c.CreateDatabase(\"db0\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// DROP SUBSCRIPTION returns ErrSubscriptionNotFound when the\n\t// subscription is unknown.\n\terr := c.DropSubscription(\"db0\", \"autogen\", \"foo\")\n\tif got, exp := err, meta.ErrSubscriptionNotFound; got == nil || got.Error() != exp.Error() {\n\t\tt.Fatalf(\"got: %s, exp: %s\", got, exp)\n\t}\n\n\t// Create a subscription.\n\tif err := c.CreateSubscription(\"db0\", \"autogen\", \"sub0\", \"ALL\", []string{\"udp://example.com:9090\"}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// DROP SUBSCRIPTION returns an influxdb.ErrDatabaseNotFound when\n\t// the database is unknown.\n\terr = c.DropSubscription(\"foo\", \"autogen\", \"sub0\")\n\tif got, exp := err, influxdb.ErrDatabaseNotFound(\"foo\"); got.Error() != exp.Error() {\n\t\tt.Fatalf(\"got: %s, exp: %s\", got, exp)\n\t}\n\n\t// DROP SUBSCRIPTION returns an influxdb.ErrRetentionPolicyNotFound\n\t// when the retention policy is unknown.\n\terr = c.DropSubscription(\"db0\", \"foo_policy\", \"sub0\")\n\tif got, exp := err, influxdb.ErrRetentionPolicyNotFound(\"foo_policy\"); got.Error() != exp.Error() {\n\t\tt.Fatalf(\"got: %s, exp: %s\", got, exp)\n\t}\n\n\t// DROP SUBSCRIPTION drops the subsciption if it can find it.\n\terr = c.DropSubscription(\"db0\", \"autogen\", \"sub0\")\n\tif got := err; got != nil {\n\t\tt.Fatalf(\"got: %s, exp: %v\", got, nil)\n\t}\n}\n\nfunc TestMetaClient_Shards(t *testing.T) {\n\tt.Parallel()\n\n\td, c := newClient()\n\tdefer os.RemoveAll(d)\n\tdefer c.Close()\n\n\tif _, err := c.CreateDatabase(\"db0\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Test creating a shard group.\n\ttmin := time.Now()\n\tsg, err := c.CreateShardGroup(\"db0\", \"autogen\", tmin)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if sg == nil {\n\t\tt.Fatalf(\"expected ShardGroup\")\n\t}\n\n\t// Test pre-creating shard groups.\n\tdur := sg.EndTime.Sub(sg.StartTime) + time.Nanosecond\n\ttmax := tmin.Add(dur)\n\tif err := c.PrecreateShardGroups(tmin, tmax); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Test finding shard groups by time range.\n\tgroups, err := c.ShardGroupsByTimeRange(\"db0\", \"autogen\", tmin, tmax)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if len(groups) != 2 {\n\t\tt.Fatalf(\"wrong number of shard groups: %d\", len(groups))\n\t}\n\n\t// Test finding shard owner.\n\tdb, rp, owner := c.ShardOwner(groups[0].Shards[0].ID)\n\tif db != \"db0\" {\n\t\tt.Fatalf(\"wrong db name: %s\", db)\n\t} else if rp != \"autogen\" {\n\t\tt.Fatalf(\"wrong rp name: %s\", rp)\n\t} else if owner.ID != groups[0].ID {\n\t\tt.Fatalf(\"wrong owner: exp %d got %d\", groups[0].ID, owner.ID)\n\t}\n\n\t// Test deleting a shard group.\n\tif err := c.DeleteShardGroup(\"db0\", \"autogen\", groups[0].ID); err != nil {\n\t\tt.Fatal(err)\n\t} else if groups, err = c.ShardGroupsByTimeRange(\"db0\", \"autogen\", tmin, tmax); err != nil {\n\t\tt.Fatal(err)\n\t} else if len(groups) != 1 {\n\t\tt.Fatalf(\"wrong number of shard groups after delete: %d\", len(groups))\n\t}\n}\n\n// Tests that calling CreateShardGroup for the same time range doesn't increment the data.Index\nfunc TestMetaClient_CreateShardGroupIdempotent(t *testing.T) {\n\tt.Parallel()\n\n\td, c := newClient()\n\tdefer os.RemoveAll(d)\n\tdefer c.Close()\n\n\tif _, err := c.CreateDatabase(\"db0\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// create a shard group.\n\ttmin := time.Now()\n\tsg, err := c.CreateShardGroup(\"db0\", \"autogen\", tmin)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if sg == nil {\n\t\tt.Fatalf(\"expected ShardGroup\")\n\t}\n\n\ti := c.Data().Index\n\tt.Log(\"index: \", i)\n\n\t// create the same shard group.\n\tsg, err = c.CreateShardGroup(\"db0\", \"autogen\", tmin)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if sg == nil {\n\t\tt.Fatalf(\"expected ShardGroup\")\n\t}\n\n\tt.Log(\"index: \", i)\n\tif got, exp := c.Data().Index, i; got != exp {\n\t\tt.Fatalf(\"PrecreateShardGroups failed: invalid index, got %d, exp %d\", got, exp)\n\t}\n\n\t// make sure pre-creating is also idempotent\n\t// Test pre-creating shard groups.\n\tdur := sg.EndTime.Sub(sg.StartTime) + time.Nanosecond\n\ttmax := tmin.Add(dur)\n\tif err := c.PrecreateShardGroups(tmin, tmax); err != nil {\n\t\tt.Fatal(err)\n\t}\n\ti = c.Data().Index\n\tt.Log(\"index: \", i)\n\tif err := c.PrecreateShardGroups(tmin, tmax); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tt.Log(\"index: \", i)\n\tif got, exp := c.Data().Index, i; got != exp {\n\t\tt.Fatalf(\"PrecreateShardGroups failed: invalid index, got %d, exp %d\", got, exp)\n\t}\n}\n\nfunc TestMetaClient_PruneShardGroups(t *testing.T) {\n\tt.Parallel()\n\n\td, c := newClient()\n\tdefer os.RemoveAll(d)\n\tdefer c.Close()\n\n\tif _, err := c.CreateDatabase(\"db0\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, err := c.CreateDatabase(\"db1\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tduration := 1 * time.Hour\n\treplicaN := 1\n\n\tif _, err := c.CreateRetentionPolicy(\"db1\", &meta.RetentionPolicySpec{\n\t\tName:     \"rp0\",\n\t\tDuration: &duration,\n\t\tReplicaN: &replicaN,\n\t}, true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tsg, err := c.CreateShardGroup(\"db1\", \"autogen\", time.Now())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if sg == nil {\n\t\tt.Fatalf(\"expected ShardGroup\")\n\t}\n\n\tsg, err = c.CreateShardGroup(\"db1\", \"autogen\", time.Now().Add(15*24*time.Hour))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if sg == nil {\n\t\tt.Fatalf(\"expected ShardGroup\")\n\t}\n\n\tsg, err = c.CreateShardGroup(\"db1\", \"rp0\", time.Now())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if sg == nil {\n\t\tt.Fatalf(\"expected ShardGroup\")\n\t}\n\n\texpiration := time.Now().Add(-2 * 7 * 24 * time.Hour).Add(-1 * time.Hour)\n\n\tdata := c.Data()\n\tdata.Databases[1].RetentionPolicies[0].ShardGroups[0].DeletedAt = expiration\n\tdata.Databases[1].RetentionPolicies[0].ShardGroups[1].DeletedAt = expiration\n\n\tif err := c.SetData(&data); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := c.PruneShardGroups(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdata = c.Data()\n\trp, err := data.RetentionPolicy(\"db1\", \"autogen\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, exp := len(rp.ShardGroups), 0; got != exp {\n\t\tt.Fatalf(\"failed to prune shard group. got: %d, exp: %d\", got, exp)\n\t}\n\n\trp, err = data.RetentionPolicy(\"db1\", \"rp0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, exp := len(rp.ShardGroups), 1; got != exp {\n\t\tt.Fatalf(\"failed to prune shard group. got: %d, exp: %d\", got, exp)\n\t}\n}\n\nfunc TestMetaClient_PersistClusterIDAfterRestart(t *testing.T) {\n\tt.Parallel()\n\n\tcfg := newConfig()\n\tdefer os.RemoveAll(cfg.Dir)\n\n\tc := meta.NewClient(cfg)\n\tif err := c.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tid := c.ClusterID()\n\tif id == 0 {\n\t\tt.Fatal(\"cluster ID can't be zero\")\n\t}\n\n\tc = meta.NewClient(cfg)\n\tif err := c.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\tidAfter := c.ClusterID()\n\tif idAfter == 0 {\n\t\tt.Fatal(\"cluster ID can't be zero\")\n\t} else if idAfter != id {\n\t\tt.Fatalf(\"cluster id not the same: %d, %d\", idAfter, id)\n\t}\n}\n\nfunc newClient() (string, *meta.Client) {\n\tcfg := newConfig()\n\tc := meta.NewClient(cfg)\n\tif err := c.Open(); err != nil {\n\t\tpanic(err)\n\t}\n\treturn cfg.Dir, c\n}\n\nfunc newConfig() *meta.Config {\n\tcfg := meta.NewConfig()\n\tcfg.Dir = testTempDir(2)\n\treturn cfg\n}\n\nfunc testTempDir(skip int) string {\n\t// Get name of the calling function.\n\tpc, _, _, ok := runtime.Caller(skip)\n\tif !ok {\n\t\tpanic(\"failed to get name of test function\")\n\t}\n\t_, prefix := path.Split(runtime.FuncForPC(pc).Name())\n\t// Make a temp dir prefixed with calling function's name.\n\tdir, err := ioutil.TempDir(os.TempDir(), prefix)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn dir\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/meta/config.go",
    "content": "package meta\n\nimport (\n\t\"errors\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/monitor/diagnostics\"\n)\n\nconst (\n\t// DefaultLeaseDuration is the default duration for leases.\n\tDefaultLeaseDuration = 60 * time.Second\n\n\t// DefaultLoggingEnabled determines if log messages are printed for the meta service.\n\tDefaultLoggingEnabled = true\n)\n\n// Config represents the meta configuration.\ntype Config struct {\n\tDir string `toml:\"dir\"`\n\n\tRetentionAutoCreate bool `toml:\"retention-autocreate\"`\n\tLoggingEnabled      bool `toml:\"logging-enabled\"`\n}\n\n// NewConfig builds a new configuration with default values.\nfunc NewConfig() *Config {\n\treturn &Config{\n\t\tRetentionAutoCreate: true,\n\t\tLoggingEnabled:      DefaultLoggingEnabled,\n\t}\n}\n\n// Validate returns an error if the config is invalid.\nfunc (c *Config) Validate() error {\n\tif c.Dir == \"\" {\n\t\treturn errors.New(\"Meta.Dir must be specified\")\n\t}\n\treturn nil\n}\n\n// Diagnostics returns a diagnostics representation of a subset of the Config.\nfunc (c *Config) Diagnostics() (*diagnostics.Diagnostics, error) {\n\treturn diagnostics.RowFromMap(map[string]interface{}{\n\t\t\"dir\": c.Dir,\n\t}), nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/meta/config_test.go",
    "content": "package meta_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n)\n\nfunc TestConfig_Parse(t *testing.T) {\n\t// Parse configuration.\n\tvar c meta.Config\n\tif _, err := toml.Decode(`\ndir = \"/tmp/foo\"\nlogging-enabled = false\n`, &c); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Validate configuration.\n\tif c.Dir != \"/tmp/foo\" {\n\t\tt.Fatalf(\"unexpected dir: %s\", c.Dir)\n\t} else if c.LoggingEnabled {\n\t\tt.Fatalf(\"unexpected logging enabled: %v\", c.LoggingEnabled)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/meta/data.go",
    "content": "package meta\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"net/url\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\t\"unicode\"\n\n\t\"github.com/gogo/protobuf/proto\"\n\t\"github.com/influxdata/influxdb\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\tinternal \"github.com/influxdata/influxdb/services/meta/internal\"\n)\n\n//go:generate protoc --gogo_out=. internal/meta.proto\n\nconst (\n\t// DefaultRetentionPolicyReplicaN is the default value of RetentionPolicyInfo.ReplicaN.\n\tDefaultRetentionPolicyReplicaN = 1\n\n\t// DefaultRetentionPolicyDuration is the default value of RetentionPolicyInfo.Duration.\n\tDefaultRetentionPolicyDuration = time.Duration(0)\n\n\t// DefaultRetentionPolicyName is the default name for auto generated retention policies.\n\tDefaultRetentionPolicyName = \"autogen\"\n\n\t// MinRetentionPolicyDuration represents the minimum duration for a policy.\n\tMinRetentionPolicyDuration = time.Hour\n)\n\n// Data represents the top level collection of all metadata.\ntype Data struct {\n\tTerm      uint64 // associated raft term\n\tIndex     uint64 // associated raft index\n\tClusterID uint64\n\tDatabases []DatabaseInfo\n\tUsers     []UserInfo\n\n\t// adminUserExists provides a constant time mechanism for determining\n\t// if there is at least one admin user.\n\tadminUserExists bool\n\n\tMaxShardGroupID uint64\n\tMaxShardID      uint64\n}\n\n// Database returns a DatabaseInfo by the database name.\nfunc (data *Data) Database(name string) *DatabaseInfo {\n\tfor i := range data.Databases {\n\t\tif data.Databases[i].Name == name {\n\t\t\treturn &data.Databases[i]\n\t\t}\n\t}\n\treturn nil\n}\n\n// CloneDatabases returns a copy of the DatabaseInfo.\nfunc (data *Data) CloneDatabases() []DatabaseInfo {\n\tif data.Databases == nil {\n\t\treturn nil\n\t}\n\tdbs := make([]DatabaseInfo, len(data.Databases))\n\tfor i := range data.Databases {\n\t\tdbs[i] = data.Databases[i].clone()\n\t}\n\treturn dbs\n}\n\n// CreateDatabase creates a new database.\n// It returns an error if name is blank or if a database with the same name already exists.\nfunc (data *Data) CreateDatabase(name string) error {\n\tif name == \"\" {\n\t\treturn ErrDatabaseNameRequired\n\t} else if data.Database(name) != nil {\n\t\treturn nil\n\t}\n\n\t// Append new node.\n\tdata.Databases = append(data.Databases, DatabaseInfo{Name: name})\n\n\treturn nil\n}\n\n// DropDatabase removes a database by name. It does not return an error\n// if the database cannot be found.\nfunc (data *Data) DropDatabase(name string) error {\n\tfor i := range data.Databases {\n\t\tif data.Databases[i].Name == name {\n\t\t\tdata.Databases = append(data.Databases[:i], data.Databases[i+1:]...)\n\n\t\t\t// Remove all user privileges associated with this database.\n\t\t\tfor i := range data.Users {\n\t\t\t\tdelete(data.Users[i].Privileges, name)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}\n\n// RetentionPolicy returns a retention policy for a database by name.\nfunc (data *Data) RetentionPolicy(database, name string) (*RetentionPolicyInfo, error) {\n\tdi := data.Database(database)\n\tif di == nil {\n\t\treturn nil, influxdb.ErrDatabaseNotFound(database)\n\t}\n\n\tfor i := range di.RetentionPolicies {\n\t\tif di.RetentionPolicies[i].Name == name {\n\t\t\treturn &di.RetentionPolicies[i], nil\n\t\t}\n\t}\n\treturn nil, nil\n}\n\n// CreateRetentionPolicy creates a new retention policy on a database.\n// It returns an error if name is blank or if the database does not exist.\nfunc (data *Data) CreateRetentionPolicy(database string, rpi *RetentionPolicyInfo, makeDefault bool) error {\n\t// Validate retention policy.\n\tif rpi == nil {\n\t\treturn ErrRetentionPolicyRequired\n\t} else if rpi.Name == \"\" {\n\t\treturn ErrRetentionPolicyNameRequired\n\t} else if rpi.ReplicaN < 1 {\n\t\treturn ErrReplicationFactorTooLow\n\t}\n\n\t// Normalise ShardDuration before comparing to any existing\n\t// retention policies. The client is supposed to do this, but\n\t// do it again to verify input.\n\trpi.ShardGroupDuration = normalisedShardDuration(rpi.ShardGroupDuration, rpi.Duration)\n\n\tif rpi.Duration > 0 && rpi.Duration < rpi.ShardGroupDuration {\n\t\treturn ErrIncompatibleDurations\n\t}\n\n\t// Find database.\n\tdi := data.Database(database)\n\tif di == nil {\n\t\treturn influxdb.ErrDatabaseNotFound(database)\n\t} else if rp := di.RetentionPolicy(rpi.Name); rp != nil {\n\t\t// RP with that name already exists. Make sure they're the same.\n\t\tif rp.ReplicaN != rpi.ReplicaN || rp.Duration != rpi.Duration || rp.ShardGroupDuration != rpi.ShardGroupDuration {\n\t\t\treturn ErrRetentionPolicyExists\n\t\t}\n\t\t// if they want to make it default, and it's not the default, it's not an identical command so it's an error\n\t\tif makeDefault && di.DefaultRetentionPolicy != rpi.Name {\n\t\t\treturn ErrRetentionPolicyConflict\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Append copy of new policy.\n\tdi.RetentionPolicies = append(di.RetentionPolicies, *rpi)\n\n\t// Set the default if needed\n\tif makeDefault {\n\t\tdi.DefaultRetentionPolicy = rpi.Name\n\t}\n\n\treturn nil\n}\n\n// DropRetentionPolicy removes a retention policy from a database by name.\nfunc (data *Data) DropRetentionPolicy(database, name string) error {\n\t// Find database.\n\tdi := data.Database(database)\n\tif di == nil {\n\t\t// no database? no problem\n\t\treturn nil\n\t}\n\n\t// Remove from list.\n\tfor i := range di.RetentionPolicies {\n\t\tif di.RetentionPolicies[i].Name == name {\n\t\t\tdi.RetentionPolicies = append(di.RetentionPolicies[:i], di.RetentionPolicies[i+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// RetentionPolicyUpdate represents retention policy fields to be updated.\ntype RetentionPolicyUpdate struct {\n\tName               *string\n\tDuration           *time.Duration\n\tReplicaN           *int\n\tShardGroupDuration *time.Duration\n}\n\n// SetName sets the RetentionPolicyUpdate.Name.\nfunc (rpu *RetentionPolicyUpdate) SetName(v string) { rpu.Name = &v }\n\n// SetDuration sets the RetentionPolicyUpdate.Duration.\nfunc (rpu *RetentionPolicyUpdate) SetDuration(v time.Duration) { rpu.Duration = &v }\n\n// SetReplicaN sets the RetentionPolicyUpdate.ReplicaN.\nfunc (rpu *RetentionPolicyUpdate) SetReplicaN(v int) { rpu.ReplicaN = &v }\n\n// SetShardGroupDuration sets the RetentionPolicyUpdate.ShardGroupDuration.\nfunc (rpu *RetentionPolicyUpdate) SetShardGroupDuration(v time.Duration) { rpu.ShardGroupDuration = &v }\n\n// UpdateRetentionPolicy updates an existing retention policy.\nfunc (data *Data) UpdateRetentionPolicy(database, name string, rpu *RetentionPolicyUpdate, makeDefault bool) error {\n\t// Find database.\n\tdi := data.Database(database)\n\tif di == nil {\n\t\treturn influxdb.ErrDatabaseNotFound(database)\n\t}\n\n\t// Find policy.\n\trpi := di.RetentionPolicy(name)\n\tif rpi == nil {\n\t\treturn influxdb.ErrRetentionPolicyNotFound(name)\n\t}\n\n\t// Ensure new policy doesn't match an existing policy.\n\tif rpu.Name != nil && *rpu.Name != name && di.RetentionPolicy(*rpu.Name) != nil {\n\t\treturn ErrRetentionPolicyNameExists\n\t}\n\n\t// Enforce duration of at least MinRetentionPolicyDuration\n\tif rpu.Duration != nil && *rpu.Duration < MinRetentionPolicyDuration && *rpu.Duration != 0 {\n\t\treturn ErrRetentionPolicyDurationTooLow\n\t}\n\n\t// Enforce duration is at least the shard duration\n\tif (rpu.Duration != nil && *rpu.Duration > 0 &&\n\t\t((rpu.ShardGroupDuration != nil && *rpu.Duration < *rpu.ShardGroupDuration) ||\n\t\t\t(rpu.ShardGroupDuration == nil && *rpu.Duration < rpi.ShardGroupDuration))) ||\n\t\t(rpu.Duration == nil && rpi.Duration > 0 &&\n\t\t\trpu.ShardGroupDuration != nil && rpi.Duration < *rpu.ShardGroupDuration) {\n\t\treturn ErrIncompatibleDurations\n\t}\n\n\t// Update fields.\n\tif rpu.Name != nil {\n\t\trpi.Name = *rpu.Name\n\t}\n\tif rpu.Duration != nil {\n\t\trpi.Duration = *rpu.Duration\n\t}\n\tif rpu.ReplicaN != nil {\n\t\trpi.ReplicaN = *rpu.ReplicaN\n\t}\n\tif rpu.ShardGroupDuration != nil {\n\t\trpi.ShardGroupDuration = normalisedShardDuration(*rpu.ShardGroupDuration, rpi.Duration)\n\t}\n\n\tif di.DefaultRetentionPolicy != rpi.Name && makeDefault {\n\t\tdi.DefaultRetentionPolicy = rpi.Name\n\t}\n\n\treturn nil\n}\n\n// DropShard removes a shard by ID.\n//\n// DropShard won't return an error if the shard can't be found, which\n// allows the command to be re-run in the case that the meta store\n// succeeds but a data node fails.\nfunc (data *Data) DropShard(id uint64) {\n\tfound := -1\n\tfor dbidx, dbi := range data.Databases {\n\t\tfor rpidx, rpi := range dbi.RetentionPolicies {\n\t\t\tfor sgidx, sg := range rpi.ShardGroups {\n\t\t\t\tfor sidx, s := range sg.Shards {\n\t\t\t\t\tif s.ID == id {\n\t\t\t\t\t\tfound = sidx\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif found > -1 {\n\t\t\t\t\tshards := sg.Shards\n\t\t\t\t\tdata.Databases[dbidx].RetentionPolicies[rpidx].ShardGroups[sgidx].Shards = append(shards[:found], shards[found+1:]...)\n\n\t\t\t\t\tif len(shards) == 1 {\n\t\t\t\t\t\t// We just deleted the last shard in the shard group.\n\t\t\t\t\t\tdata.Databases[dbidx].RetentionPolicies[rpidx].ShardGroups[sgidx].DeletedAt = time.Now()\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n// ShardGroups returns a list of all shard groups on a database and retention policy.\nfunc (data *Data) ShardGroups(database, policy string) ([]ShardGroupInfo, error) {\n\t// Find retention policy.\n\trpi, err := data.RetentionPolicy(database, policy)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if rpi == nil {\n\t\treturn nil, influxdb.ErrRetentionPolicyNotFound(policy)\n\t}\n\tgroups := make([]ShardGroupInfo, 0, len(rpi.ShardGroups))\n\tfor _, g := range rpi.ShardGroups {\n\t\tif g.Deleted() {\n\t\t\tcontinue\n\t\t}\n\t\tgroups = append(groups, g)\n\t}\n\treturn groups, nil\n}\n\n// ShardGroupsByTimeRange returns a list of all shard groups on a database and policy that may contain data\n// for the specified time range. Shard groups are sorted by start time.\nfunc (data *Data) ShardGroupsByTimeRange(database, policy string, tmin, tmax time.Time) ([]ShardGroupInfo, error) {\n\t// Find retention policy.\n\trpi, err := data.RetentionPolicy(database, policy)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if rpi == nil {\n\t\treturn nil, influxdb.ErrRetentionPolicyNotFound(policy)\n\t}\n\tgroups := make([]ShardGroupInfo, 0, len(rpi.ShardGroups))\n\tfor _, g := range rpi.ShardGroups {\n\t\tif g.Deleted() || !g.Overlaps(tmin, tmax) {\n\t\t\tcontinue\n\t\t}\n\t\tgroups = append(groups, g)\n\t}\n\treturn groups, nil\n}\n\n// ShardGroupByTimestamp returns the shard group on a database and policy for a given timestamp.\nfunc (data *Data) ShardGroupByTimestamp(database, policy string, timestamp time.Time) (*ShardGroupInfo, error) {\n\t// Find retention policy.\n\trpi, err := data.RetentionPolicy(database, policy)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if rpi == nil {\n\t\treturn nil, influxdb.ErrRetentionPolicyNotFound(policy)\n\t}\n\n\treturn rpi.ShardGroupByTimestamp(timestamp), nil\n}\n\n// CreateShardGroup creates a shard group on a database and policy for a given timestamp.\nfunc (data *Data) CreateShardGroup(database, policy string, timestamp time.Time) error {\n\t// Find retention policy.\n\trpi, err := data.RetentionPolicy(database, policy)\n\tif err != nil {\n\t\treturn err\n\t} else if rpi == nil {\n\t\treturn influxdb.ErrRetentionPolicyNotFound(policy)\n\t}\n\n\t// Verify that shard group doesn't already exist for this timestamp.\n\tif rpi.ShardGroupByTimestamp(timestamp) != nil {\n\t\treturn nil\n\t}\n\n\t// Create the shard group.\n\tdata.MaxShardGroupID++\n\tsgi := ShardGroupInfo{}\n\tsgi.ID = data.MaxShardGroupID\n\tsgi.StartTime = timestamp.Truncate(rpi.ShardGroupDuration).UTC()\n\tsgi.EndTime = sgi.StartTime.Add(rpi.ShardGroupDuration).UTC()\n\tif sgi.EndTime.After(time.Unix(0, models.MaxNanoTime)) {\n\t\t// Shard group range is [start, end) so add one to the max time.\n\t\tsgi.EndTime = time.Unix(0, models.MaxNanoTime+1)\n\t}\n\n\tdata.MaxShardID++\n\tsgi.Shards = []ShardInfo{\n\t\t{ID: data.MaxShardID},\n\t}\n\n\t// Retention policy has a new shard group, so update the policy. Shard\n\t// Groups must be stored in sorted order, as other parts of the system\n\t// assume this to be the case.\n\trpi.ShardGroups = append(rpi.ShardGroups, sgi)\n\tsort.Sort(ShardGroupInfos(rpi.ShardGroups))\n\n\treturn nil\n}\n\n// DeleteShardGroup removes a shard group from a database and retention policy by id.\nfunc (data *Data) DeleteShardGroup(database, policy string, id uint64) error {\n\t// Find retention policy.\n\trpi, err := data.RetentionPolicy(database, policy)\n\tif err != nil {\n\t\treturn err\n\t} else if rpi == nil {\n\t\treturn influxdb.ErrRetentionPolicyNotFound(policy)\n\t}\n\n\t// Find shard group by ID and set its deletion timestamp.\n\tfor i := range rpi.ShardGroups {\n\t\tif rpi.ShardGroups[i].ID == id {\n\t\t\trpi.ShardGroups[i].DeletedAt = time.Now().UTC()\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn ErrShardGroupNotFound\n}\n\n// CreateContinuousQuery adds a named continuous query to a database.\nfunc (data *Data) CreateContinuousQuery(database, name, query string) error {\n\tdi := data.Database(database)\n\tif di == nil {\n\t\treturn influxdb.ErrDatabaseNotFound(database)\n\t}\n\n\t// Ensure the name doesn't already exist.\n\tfor _, cq := range di.ContinuousQueries {\n\t\tif cq.Name == name {\n\t\t\t// If the query string is the same, we'll silently return,\n\t\t\t// otherwise we'll assume the user might be trying to\n\t\t\t// overwrite an existing CQ with a different query.\n\t\t\tif strings.ToLower(cq.Query) == strings.ToLower(query) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn ErrContinuousQueryExists\n\t\t}\n\t}\n\n\t// Append new query.\n\tdi.ContinuousQueries = append(di.ContinuousQueries, ContinuousQueryInfo{\n\t\tName:  name,\n\t\tQuery: query,\n\t})\n\n\treturn nil\n}\n\n// DropContinuousQuery removes a continuous query.\nfunc (data *Data) DropContinuousQuery(database, name string) error {\n\tdi := data.Database(database)\n\tif di == nil {\n\t\treturn influxdb.ErrDatabaseNotFound(database)\n\t}\n\n\tfor i := range di.ContinuousQueries {\n\t\tif di.ContinuousQueries[i].Name == name {\n\t\t\tdi.ContinuousQueries = append(di.ContinuousQueries[:i], di.ContinuousQueries[i+1:]...)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn ErrContinuousQueryNotFound\n}\n\n// validateURL returns an error if the URL does not have a port or uses a scheme other than UDP or HTTP.\nfunc validateURL(input string) error {\n\tu, err := url.Parse(input)\n\tif err != nil {\n\t\treturn ErrInvalidSubscriptionURL(input)\n\t}\n\n\tif u.Scheme != \"udp\" && u.Scheme != \"http\" && u.Scheme != \"https\" {\n\t\treturn ErrInvalidSubscriptionURL(input)\n\t}\n\n\t_, port, err := net.SplitHostPort(u.Host)\n\tif err != nil || port == \"\" {\n\t\treturn ErrInvalidSubscriptionURL(input)\n\t}\n\n\treturn nil\n}\n\n// CreateSubscription adds a named subscription to a database and retention policy.\nfunc (data *Data) CreateSubscription(database, rp, name, mode string, destinations []string) error {\n\tfor _, d := range destinations {\n\t\tif err := validateURL(d); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\trpi, err := data.RetentionPolicy(database, rp)\n\tif err != nil {\n\t\treturn err\n\t} else if rpi == nil {\n\t\treturn influxdb.ErrRetentionPolicyNotFound(rp)\n\t}\n\n\t// Ensure the name doesn't already exist.\n\tfor i := range rpi.Subscriptions {\n\t\tif rpi.Subscriptions[i].Name == name {\n\t\t\treturn ErrSubscriptionExists\n\t\t}\n\t}\n\n\t// Append new query.\n\trpi.Subscriptions = append(rpi.Subscriptions, SubscriptionInfo{\n\t\tName:         name,\n\t\tMode:         mode,\n\t\tDestinations: destinations,\n\t})\n\n\treturn nil\n}\n\n// DropSubscription removes a subscription.\nfunc (data *Data) DropSubscription(database, rp, name string) error {\n\trpi, err := data.RetentionPolicy(database, rp)\n\tif err != nil {\n\t\treturn err\n\t} else if rpi == nil {\n\t\treturn influxdb.ErrRetentionPolicyNotFound(rp)\n\t}\n\n\tfor i := range rpi.Subscriptions {\n\t\tif rpi.Subscriptions[i].Name == name {\n\t\t\trpi.Subscriptions = append(rpi.Subscriptions[:i], rpi.Subscriptions[i+1:]...)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn ErrSubscriptionNotFound\n}\n\nfunc (data *Data) user(username string) *UserInfo {\n\tfor i := range data.Users {\n\t\tif data.Users[i].Name == username {\n\t\t\treturn &data.Users[i]\n\t\t}\n\t}\n\treturn nil\n}\n\n// User returns a user by username.\nfunc (data *Data) User(username string) User {\n\tu := data.user(username)\n\tif u == nil {\n\t\t// prevent non-nil interface with nil pointer\n\t\treturn nil\n\t}\n\treturn u\n}\n\n// CreateUser creates a new user.\nfunc (data *Data) CreateUser(name, hash string, admin bool) error {\n\t// Ensure the user doesn't already exist.\n\tif name == \"\" {\n\t\treturn ErrUsernameRequired\n\t} else if data.User(name) != nil {\n\t\treturn ErrUserExists\n\t}\n\n\t// Append new user.\n\tdata.Users = append(data.Users, UserInfo{\n\t\tName:  name,\n\t\tHash:  hash,\n\t\tAdmin: admin,\n\t})\n\n\t// We know there is now at least one admin user.\n\tif admin {\n\t\tdata.adminUserExists = true\n\t}\n\n\treturn nil\n}\n\n// DropUser removes an existing user by name.\nfunc (data *Data) DropUser(name string) error {\n\tfor i := range data.Users {\n\t\tif data.Users[i].Name == name {\n\t\t\twasAdmin := data.Users[i].Admin\n\t\t\tdata.Users = append(data.Users[:i], data.Users[i+1:]...)\n\n\t\t\t// Maybe we dropped the only admin user?\n\t\t\tif wasAdmin {\n\t\t\t\tdata.adminUserExists = data.hasAdminUser()\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn ErrUserNotFound\n}\n\n// UpdateUser updates the password hash of an existing user.\nfunc (data *Data) UpdateUser(name, hash string) error {\n\tfor i := range data.Users {\n\t\tif data.Users[i].Name == name {\n\t\t\tdata.Users[i].Hash = hash\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn ErrUserNotFound\n}\n\n// CloneUsers returns a copy of the user infos.\nfunc (data *Data) CloneUsers() []UserInfo {\n\tif len(data.Users) == 0 {\n\t\treturn []UserInfo{}\n\t}\n\tusers := make([]UserInfo, len(data.Users))\n\tfor i := range data.Users {\n\t\tusers[i] = data.Users[i].clone()\n\t}\n\n\treturn users\n}\n\n// SetPrivilege sets a privilege for a user on a database.\nfunc (data *Data) SetPrivilege(name, database string, p influxql.Privilege) error {\n\tui := data.user(name)\n\tif ui == nil {\n\t\treturn ErrUserNotFound\n\t}\n\n\tif ui.Privileges == nil {\n\t\tui.Privileges = make(map[string]influxql.Privilege)\n\t}\n\tui.Privileges[database] = p\n\n\treturn nil\n}\n\n// SetAdminPrivilege sets the admin privilege for a user.\nfunc (data *Data) SetAdminPrivilege(name string, admin bool) error {\n\tui := data.user(name)\n\tif ui == nil {\n\t\treturn ErrUserNotFound\n\t}\n\n\tui.Admin = admin\n\n\t// We could have promoted or revoked the only admin. Check if an admin\n\t// user exists.\n\tdata.adminUserExists = data.hasAdminUser()\n\treturn nil\n}\n\n// AdminUserExists returns true if an admin user exists.\nfunc (data Data) AdminUserExists() bool {\n\treturn data.adminUserExists\n}\n\n// UserPrivileges gets the privileges for a user.\nfunc (data *Data) UserPrivileges(name string) (map[string]influxql.Privilege, error) {\n\tui := data.user(name)\n\tif ui == nil {\n\t\treturn nil, ErrUserNotFound\n\t}\n\n\treturn ui.Privileges, nil\n}\n\n// UserPrivilege gets the privilege for a user on a database.\nfunc (data *Data) UserPrivilege(name, database string) (*influxql.Privilege, error) {\n\tui := data.user(name)\n\tif ui == nil {\n\t\treturn nil, ErrUserNotFound\n\t}\n\n\tfor db, p := range ui.Privileges {\n\t\tif db == database {\n\t\t\treturn &p, nil\n\t\t}\n\t}\n\n\treturn influxql.NewPrivilege(influxql.NoPrivileges), nil\n}\n\n// Clone returns a copy of data with a new version.\nfunc (data *Data) Clone() *Data {\n\tother := *data\n\n\tother.Databases = data.CloneDatabases()\n\tother.Users = data.CloneUsers()\n\n\treturn &other\n}\n\n// marshal serializes data to a protobuf representation.\nfunc (data *Data) marshal() *internal.Data {\n\tpb := &internal.Data{\n\t\tTerm:      proto.Uint64(data.Term),\n\t\tIndex:     proto.Uint64(data.Index),\n\t\tClusterID: proto.Uint64(data.ClusterID),\n\n\t\tMaxShardGroupID: proto.Uint64(data.MaxShardGroupID),\n\t\tMaxShardID:      proto.Uint64(data.MaxShardID),\n\n\t\t// Need this for reverse compatibility\n\t\tMaxNodeID: proto.Uint64(0),\n\t}\n\n\tpb.Databases = make([]*internal.DatabaseInfo, len(data.Databases))\n\tfor i := range data.Databases {\n\t\tpb.Databases[i] = data.Databases[i].marshal()\n\t}\n\n\tpb.Users = make([]*internal.UserInfo, len(data.Users))\n\tfor i := range data.Users {\n\t\tpb.Users[i] = data.Users[i].marshal()\n\t}\n\n\treturn pb\n}\n\n// unmarshal deserializes from a protobuf representation.\nfunc (data *Data) unmarshal(pb *internal.Data) {\n\tdata.Term = pb.GetTerm()\n\tdata.Index = pb.GetIndex()\n\tdata.ClusterID = pb.GetClusterID()\n\n\tdata.MaxShardGroupID = pb.GetMaxShardGroupID()\n\tdata.MaxShardID = pb.GetMaxShardID()\n\n\tdata.Databases = make([]DatabaseInfo, len(pb.GetDatabases()))\n\tfor i, x := range pb.GetDatabases() {\n\t\tdata.Databases[i].unmarshal(x)\n\t}\n\n\tdata.Users = make([]UserInfo, len(pb.GetUsers()))\n\tfor i, x := range pb.GetUsers() {\n\t\tdata.Users[i].unmarshal(x)\n\t}\n\n\t// Exhaustively determine if there is an admin user. The marshalled cache\n\t// value may not be correct.\n\tdata.adminUserExists = data.hasAdminUser()\n}\n\n// MarshalBinary encodes the metadata to a binary format.\nfunc (data *Data) MarshalBinary() ([]byte, error) {\n\treturn proto.Marshal(data.marshal())\n}\n\n// UnmarshalBinary decodes the object from a binary format.\nfunc (data *Data) UnmarshalBinary(buf []byte) error {\n\tvar pb internal.Data\n\tif err := proto.Unmarshal(buf, &pb); err != nil {\n\t\treturn err\n\t}\n\tdata.unmarshal(&pb)\n\treturn nil\n}\n\n// hasAdminUser exhaustively checks for the presence of at least one admin\n// user.\nfunc (data *Data) hasAdminUser() bool {\n\tfor _, u := range data.Users {\n\t\tif u.Admin {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// NodeInfo represents information about a single node in the cluster.\ntype NodeInfo struct {\n\tID      uint64\n\tHost    string\n\tTCPHost string\n}\n\n// NodeInfos is a slice of NodeInfo used for sorting\ntype NodeInfos []NodeInfo\n\n// Len implements sort.Interface.\nfunc (n NodeInfos) Len() int { return len(n) }\n\n// Swap implements sort.Interface.\nfunc (n NodeInfos) Swap(i, j int) { n[i], n[j] = n[j], n[i] }\n\n// Less implements sort.Interface.\nfunc (n NodeInfos) Less(i, j int) bool { return n[i].ID < n[j].ID }\n\n// DatabaseInfo represents information about a database in the system.\ntype DatabaseInfo struct {\n\tName                   string\n\tDefaultRetentionPolicy string\n\tRetentionPolicies      []RetentionPolicyInfo\n\tContinuousQueries      []ContinuousQueryInfo\n}\n\n// RetentionPolicy returns a retention policy by name.\nfunc (di DatabaseInfo) RetentionPolicy(name string) *RetentionPolicyInfo {\n\tif name == \"\" {\n\t\tif di.DefaultRetentionPolicy == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tname = di.DefaultRetentionPolicy\n\t}\n\n\tfor i := range di.RetentionPolicies {\n\t\tif di.RetentionPolicies[i].Name == name {\n\t\t\treturn &di.RetentionPolicies[i]\n\t\t}\n\t}\n\treturn nil\n}\n\n// ShardInfos returns a list of all shards' info for the database.\nfunc (di DatabaseInfo) ShardInfos() []ShardInfo {\n\tshards := map[uint64]*ShardInfo{}\n\tfor i := range di.RetentionPolicies {\n\t\tfor j := range di.RetentionPolicies[i].ShardGroups {\n\t\t\tsg := di.RetentionPolicies[i].ShardGroups[j]\n\t\t\t// Skip deleted shard groups\n\t\t\tif sg.Deleted() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor k := range sg.Shards {\n\t\t\t\tsi := &di.RetentionPolicies[i].ShardGroups[j].Shards[k]\n\t\t\t\tshards[si.ID] = si\n\t\t\t}\n\t\t}\n\t}\n\n\tinfos := make([]ShardInfo, 0, len(shards))\n\tfor _, info := range shards {\n\t\tinfos = append(infos, *info)\n\t}\n\n\treturn infos\n}\n\n// clone returns a deep copy of di.\nfunc (di DatabaseInfo) clone() DatabaseInfo {\n\tother := di\n\n\tif di.RetentionPolicies != nil {\n\t\tother.RetentionPolicies = make([]RetentionPolicyInfo, len(di.RetentionPolicies))\n\t\tfor i := range di.RetentionPolicies {\n\t\t\tother.RetentionPolicies[i] = di.RetentionPolicies[i].clone()\n\t\t}\n\t}\n\n\t// Copy continuous queries.\n\tif di.ContinuousQueries != nil {\n\t\tother.ContinuousQueries = make([]ContinuousQueryInfo, len(di.ContinuousQueries))\n\t\tfor i := range di.ContinuousQueries {\n\t\t\tother.ContinuousQueries[i] = di.ContinuousQueries[i].clone()\n\t\t}\n\t}\n\n\treturn other\n}\n\n// marshal serializes to a protobuf representation.\nfunc (di DatabaseInfo) marshal() *internal.DatabaseInfo {\n\tpb := &internal.DatabaseInfo{}\n\tpb.Name = proto.String(di.Name)\n\tpb.DefaultRetentionPolicy = proto.String(di.DefaultRetentionPolicy)\n\n\tpb.RetentionPolicies = make([]*internal.RetentionPolicyInfo, len(di.RetentionPolicies))\n\tfor i := range di.RetentionPolicies {\n\t\tpb.RetentionPolicies[i] = di.RetentionPolicies[i].marshal()\n\t}\n\n\tpb.ContinuousQueries = make([]*internal.ContinuousQueryInfo, len(di.ContinuousQueries))\n\tfor i := range di.ContinuousQueries {\n\t\tpb.ContinuousQueries[i] = di.ContinuousQueries[i].marshal()\n\t}\n\treturn pb\n}\n\n// unmarshal deserializes from a protobuf representation.\nfunc (di *DatabaseInfo) unmarshal(pb *internal.DatabaseInfo) {\n\tdi.Name = pb.GetName()\n\tdi.DefaultRetentionPolicy = pb.GetDefaultRetentionPolicy()\n\n\tif len(pb.GetRetentionPolicies()) > 0 {\n\t\tdi.RetentionPolicies = make([]RetentionPolicyInfo, len(pb.GetRetentionPolicies()))\n\t\tfor i, x := range pb.GetRetentionPolicies() {\n\t\t\tdi.RetentionPolicies[i].unmarshal(x)\n\t\t}\n\t}\n\n\tif len(pb.GetContinuousQueries()) > 0 {\n\t\tdi.ContinuousQueries = make([]ContinuousQueryInfo, len(pb.GetContinuousQueries()))\n\t\tfor i, x := range pb.GetContinuousQueries() {\n\t\t\tdi.ContinuousQueries[i].unmarshal(x)\n\t\t}\n\t}\n}\n\n// RetentionPolicySpec represents the specification for a new retention policy.\ntype RetentionPolicySpec struct {\n\tName               string\n\tReplicaN           *int\n\tDuration           *time.Duration\n\tShardGroupDuration time.Duration\n}\n\n// NewRetentionPolicyInfo creates a new retention policy info from the specification.\nfunc (s *RetentionPolicySpec) NewRetentionPolicyInfo() *RetentionPolicyInfo {\n\treturn DefaultRetentionPolicyInfo().Apply(s)\n}\n\n// Matches checks if this retention policy specification matches\n// an existing retention policy.\nfunc (s *RetentionPolicySpec) Matches(rpi *RetentionPolicyInfo) bool {\n\tif rpi == nil {\n\t\treturn false\n\t} else if s.Name != \"\" && s.Name != rpi.Name {\n\t\treturn false\n\t} else if s.Duration != nil && *s.Duration != rpi.Duration {\n\t\treturn false\n\t} else if s.ReplicaN != nil && *s.ReplicaN != rpi.ReplicaN {\n\t\treturn false\n\t}\n\n\t// Normalise ShardDuration before comparing to any existing retention policies.\n\t// Normalize with the retention policy info's duration instead of the spec\n\t// since they should be the same and we're performing a comparison.\n\tsgDuration := normalisedShardDuration(s.ShardGroupDuration, rpi.Duration)\n\treturn sgDuration == rpi.ShardGroupDuration\n}\n\n// marshal serializes to a protobuf representation.\nfunc (s *RetentionPolicySpec) marshal() *internal.RetentionPolicySpec {\n\tpb := &internal.RetentionPolicySpec{}\n\tif s.Name != \"\" {\n\t\tpb.Name = proto.String(s.Name)\n\t}\n\tif s.Duration != nil {\n\t\tpb.Duration = proto.Int64(int64(*s.Duration))\n\t}\n\tif s.ShardGroupDuration > 0 {\n\t\tpb.ShardGroupDuration = proto.Int64(int64(s.ShardGroupDuration))\n\t}\n\tif s.ReplicaN != nil {\n\t\tpb.ReplicaN = proto.Uint32(uint32(*s.ReplicaN))\n\t}\n\treturn pb\n}\n\n// unmarshal deserializes from a protobuf representation.\nfunc (s *RetentionPolicySpec) unmarshal(pb *internal.RetentionPolicySpec) {\n\tif pb.Name != nil {\n\t\ts.Name = pb.GetName()\n\t}\n\tif pb.Duration != nil {\n\t\tduration := time.Duration(pb.GetDuration())\n\t\ts.Duration = &duration\n\t}\n\tif pb.ShardGroupDuration != nil {\n\t\ts.ShardGroupDuration = time.Duration(pb.GetShardGroupDuration())\n\t}\n\tif pb.ReplicaN != nil {\n\t\treplicaN := int(pb.GetReplicaN())\n\t\ts.ReplicaN = &replicaN\n\t}\n}\n\n// MarshalBinary encodes RetentionPolicySpec to a binary format.\nfunc (s *RetentionPolicySpec) MarshalBinary() ([]byte, error) {\n\treturn proto.Marshal(s.marshal())\n}\n\n// UnmarshalBinary decodes RetentionPolicySpec from a binary format.\nfunc (s *RetentionPolicySpec) UnmarshalBinary(data []byte) error {\n\tvar pb internal.RetentionPolicySpec\n\tif err := proto.Unmarshal(data, &pb); err != nil {\n\t\treturn err\n\t}\n\ts.unmarshal(&pb)\n\treturn nil\n}\n\n// RetentionPolicyInfo represents metadata about a retention policy.\ntype RetentionPolicyInfo struct {\n\tName               string\n\tReplicaN           int\n\tDuration           time.Duration\n\tShardGroupDuration time.Duration\n\tShardGroups        []ShardGroupInfo\n\tSubscriptions      []SubscriptionInfo\n}\n\n// NewRetentionPolicyInfo returns a new instance of RetentionPolicyInfo\n// with default replication and duration.\nfunc NewRetentionPolicyInfo(name string) *RetentionPolicyInfo {\n\treturn &RetentionPolicyInfo{\n\t\tName:     name,\n\t\tReplicaN: DefaultRetentionPolicyReplicaN,\n\t\tDuration: DefaultRetentionPolicyDuration,\n\t}\n}\n\n// DefaultRetentionPolicyInfo returns a new instance of RetentionPolicyInfo\n// with default name, replication, and duration.\nfunc DefaultRetentionPolicyInfo() *RetentionPolicyInfo {\n\treturn NewRetentionPolicyInfo(DefaultRetentionPolicyName)\n}\n\n// Apply applies a specification to the retention policy info.\nfunc (rpi *RetentionPolicyInfo) Apply(spec *RetentionPolicySpec) *RetentionPolicyInfo {\n\trp := &RetentionPolicyInfo{\n\t\tName:               rpi.Name,\n\t\tReplicaN:           rpi.ReplicaN,\n\t\tDuration:           rpi.Duration,\n\t\tShardGroupDuration: rpi.ShardGroupDuration,\n\t}\n\tif spec.Name != \"\" {\n\t\trp.Name = spec.Name\n\t}\n\tif spec.ReplicaN != nil {\n\t\trp.ReplicaN = *spec.ReplicaN\n\t}\n\tif spec.Duration != nil {\n\t\trp.Duration = *spec.Duration\n\t}\n\trp.ShardGroupDuration = normalisedShardDuration(spec.ShardGroupDuration, rp.Duration)\n\treturn rp\n}\n\n// ShardGroupByTimestamp returns the shard group in the policy that contains the timestamp,\n// or nil if no shard group matches.\nfunc (rpi *RetentionPolicyInfo) ShardGroupByTimestamp(timestamp time.Time) *ShardGroupInfo {\n\tfor i := range rpi.ShardGroups {\n\t\tsgi := &rpi.ShardGroups[i]\n\t\tif sgi.Contains(timestamp) && !sgi.Deleted() && (!sgi.Truncated() || timestamp.Before(sgi.TruncatedAt)) {\n\t\t\treturn &rpi.ShardGroups[i]\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ExpiredShardGroups returns the Shard Groups which are considered expired, for the given time.\nfunc (rpi *RetentionPolicyInfo) ExpiredShardGroups(t time.Time) []*ShardGroupInfo {\n\tvar groups = make([]*ShardGroupInfo, 0)\n\tfor i := range rpi.ShardGroups {\n\t\tif rpi.ShardGroups[i].Deleted() {\n\t\t\tcontinue\n\t\t}\n\t\tif rpi.Duration != 0 && rpi.ShardGroups[i].EndTime.Add(rpi.Duration).Before(t) {\n\t\t\tgroups = append(groups, &rpi.ShardGroups[i])\n\t\t}\n\t}\n\treturn groups\n}\n\n// DeletedShardGroups returns the Shard Groups which are marked as deleted.\nfunc (rpi *RetentionPolicyInfo) DeletedShardGroups() []*ShardGroupInfo {\n\tvar groups = make([]*ShardGroupInfo, 0)\n\tfor i := range rpi.ShardGroups {\n\t\tif rpi.ShardGroups[i].Deleted() {\n\t\t\tgroups = append(groups, &rpi.ShardGroups[i])\n\t\t}\n\t}\n\treturn groups\n}\n\n// marshal serializes to a protobuf representation.\nfunc (rpi *RetentionPolicyInfo) marshal() *internal.RetentionPolicyInfo {\n\tpb := &internal.RetentionPolicyInfo{\n\t\tName:               proto.String(rpi.Name),\n\t\tReplicaN:           proto.Uint32(uint32(rpi.ReplicaN)),\n\t\tDuration:           proto.Int64(int64(rpi.Duration)),\n\t\tShardGroupDuration: proto.Int64(int64(rpi.ShardGroupDuration)),\n\t}\n\n\tpb.ShardGroups = make([]*internal.ShardGroupInfo, len(rpi.ShardGroups))\n\tfor i, sgi := range rpi.ShardGroups {\n\t\tpb.ShardGroups[i] = sgi.marshal()\n\t}\n\n\tpb.Subscriptions = make([]*internal.SubscriptionInfo, len(rpi.Subscriptions))\n\tfor i, sub := range rpi.Subscriptions {\n\t\tpb.Subscriptions[i] = sub.marshal()\n\t}\n\n\treturn pb\n}\n\n// unmarshal deserializes from a protobuf representation.\nfunc (rpi *RetentionPolicyInfo) unmarshal(pb *internal.RetentionPolicyInfo) {\n\trpi.Name = pb.GetName()\n\trpi.ReplicaN = int(pb.GetReplicaN())\n\trpi.Duration = time.Duration(pb.GetDuration())\n\trpi.ShardGroupDuration = time.Duration(pb.GetShardGroupDuration())\n\n\tif len(pb.GetShardGroups()) > 0 {\n\t\trpi.ShardGroups = make([]ShardGroupInfo, len(pb.GetShardGroups()))\n\t\tfor i, x := range pb.GetShardGroups() {\n\t\t\trpi.ShardGroups[i].unmarshal(x)\n\t\t}\n\t}\n\tif len(pb.GetSubscriptions()) > 0 {\n\t\trpi.Subscriptions = make([]SubscriptionInfo, len(pb.GetSubscriptions()))\n\t\tfor i, x := range pb.GetSubscriptions() {\n\t\t\trpi.Subscriptions[i].unmarshal(x)\n\t\t}\n\t}\n}\n\n// clone returns a deep copy of rpi.\nfunc (rpi RetentionPolicyInfo) clone() RetentionPolicyInfo {\n\tother := rpi\n\n\tif rpi.ShardGroups != nil {\n\t\tother.ShardGroups = make([]ShardGroupInfo, len(rpi.ShardGroups))\n\t\tfor i := range rpi.ShardGroups {\n\t\t\tother.ShardGroups[i] = rpi.ShardGroups[i].clone()\n\t\t}\n\t}\n\n\treturn other\n}\n\n// MarshalBinary encodes rpi to a binary format.\nfunc (rpi *RetentionPolicyInfo) MarshalBinary() ([]byte, error) {\n\treturn proto.Marshal(rpi.marshal())\n}\n\n// UnmarshalBinary decodes rpi from a binary format.\nfunc (rpi *RetentionPolicyInfo) UnmarshalBinary(data []byte) error {\n\tvar pb internal.RetentionPolicyInfo\n\tif err := proto.Unmarshal(data, &pb); err != nil {\n\t\treturn err\n\t}\n\trpi.unmarshal(&pb)\n\treturn nil\n}\n\n// shardGroupDuration returns the default duration for a shard group based on a policy duration.\nfunc shardGroupDuration(d time.Duration) time.Duration {\n\tif d >= 180*24*time.Hour || d == 0 { // 6 months or 0\n\t\treturn 7 * 24 * time.Hour\n\t} else if d >= 2*24*time.Hour { // 2 days\n\t\treturn 1 * 24 * time.Hour\n\t}\n\treturn 1 * time.Hour\n}\n\n// normalisedShardDuration returns normalised shard duration based on a policy duration.\nfunc normalisedShardDuration(sgd, d time.Duration) time.Duration {\n\t// If it is zero, it likely wasn't specified, so we default to the shard group duration\n\tif sgd == 0 {\n\t\treturn shardGroupDuration(d)\n\t}\n\t// If it was specified, but it's less than the MinRetentionPolicyDuration, then normalize\n\t// to the MinRetentionPolicyDuration\n\tif sgd < MinRetentionPolicyDuration {\n\t\treturn shardGroupDuration(MinRetentionPolicyDuration)\n\t}\n\treturn sgd\n}\n\n// ShardGroupInfo represents metadata about a shard group. The DeletedAt field is important\n// because it makes it clear that a ShardGroup has been marked as deleted, and allow the system\n// to be sure that a ShardGroup is not simply missing. If the DeletedAt is set, the system can\n// safely delete any associated shards.\ntype ShardGroupInfo struct {\n\tID          uint64\n\tStartTime   time.Time\n\tEndTime     time.Time\n\tDeletedAt   time.Time\n\tShards      []ShardInfo\n\tTruncatedAt time.Time\n}\n\n// ShardGroupInfos implements sort.Interface on []ShardGroupInfo, based\n// on the StartTime field.\ntype ShardGroupInfos []ShardGroupInfo\n\n// Len implements sort.Interface.\nfunc (a ShardGroupInfos) Len() int { return len(a) }\n\n// Swap implements sort.Interface.\nfunc (a ShardGroupInfos) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\n// Less implements sort.Interface.\nfunc (a ShardGroupInfos) Less(i, j int) bool {\n\tiEnd := a[i].EndTime\n\tif a[i].Truncated() {\n\t\tiEnd = a[i].TruncatedAt\n\t}\n\n\tjEnd := a[j].EndTime\n\tif a[j].Truncated() {\n\t\tjEnd = a[j].TruncatedAt\n\t}\n\n\tif iEnd.Equal(jEnd) {\n\t\treturn a[i].StartTime.Before(a[j].StartTime)\n\t}\n\n\treturn iEnd.Before(jEnd)\n}\n\n// Contains returns true if the shard group contains data for the timestamp.\nfunc (sgi *ShardGroupInfo) Contains(timestamp time.Time) bool {\n\treturn !sgi.StartTime.After(timestamp) && sgi.EndTime.After(timestamp)\n}\n\n// Overlaps returns whether the shard group contains data for the time range between min and max\nfunc (sgi *ShardGroupInfo) Overlaps(min, max time.Time) bool {\n\treturn !sgi.StartTime.After(max) && sgi.EndTime.After(min)\n}\n\n// Deleted returns whether this ShardGroup has been deleted.\nfunc (sgi *ShardGroupInfo) Deleted() bool {\n\treturn !sgi.DeletedAt.IsZero()\n}\n\n// Truncated returns true if this ShardGroup has been truncated (no new writes).\nfunc (sgi *ShardGroupInfo) Truncated() bool {\n\treturn !sgi.TruncatedAt.IsZero()\n}\n\n// clone returns a deep copy of sgi.\nfunc (sgi ShardGroupInfo) clone() ShardGroupInfo {\n\tother := sgi\n\n\tif sgi.Shards != nil {\n\t\tother.Shards = make([]ShardInfo, len(sgi.Shards))\n\t\tfor i := range sgi.Shards {\n\t\t\tother.Shards[i] = sgi.Shards[i].clone()\n\t\t}\n\t}\n\n\treturn other\n}\n\n// ShardFor returns the ShardInfo for a Point hash.\nfunc (sgi *ShardGroupInfo) ShardFor(hash uint64) ShardInfo {\n\treturn sgi.Shards[hash%uint64(len(sgi.Shards))]\n}\n\n// marshal serializes to a protobuf representation.\nfunc (sgi *ShardGroupInfo) marshal() *internal.ShardGroupInfo {\n\tpb := &internal.ShardGroupInfo{\n\t\tID:        proto.Uint64(sgi.ID),\n\t\tStartTime: proto.Int64(MarshalTime(sgi.StartTime)),\n\t\tEndTime:   proto.Int64(MarshalTime(sgi.EndTime)),\n\t\tDeletedAt: proto.Int64(MarshalTime(sgi.DeletedAt)),\n\t}\n\n\tif !sgi.TruncatedAt.IsZero() {\n\t\tpb.TruncatedAt = proto.Int64(MarshalTime(sgi.TruncatedAt))\n\t}\n\n\tpb.Shards = make([]*internal.ShardInfo, len(sgi.Shards))\n\tfor i := range sgi.Shards {\n\t\tpb.Shards[i] = sgi.Shards[i].marshal()\n\t}\n\n\treturn pb\n}\n\n// unmarshal deserializes from a protobuf representation.\nfunc (sgi *ShardGroupInfo) unmarshal(pb *internal.ShardGroupInfo) {\n\tsgi.ID = pb.GetID()\n\tif i := pb.GetStartTime(); i == 0 {\n\t\tsgi.StartTime = time.Unix(0, 0).UTC()\n\t} else {\n\t\tsgi.StartTime = UnmarshalTime(i)\n\t}\n\tif i := pb.GetEndTime(); i == 0 {\n\t\tsgi.EndTime = time.Unix(0, 0).UTC()\n\t} else {\n\t\tsgi.EndTime = UnmarshalTime(i)\n\t}\n\tsgi.DeletedAt = UnmarshalTime(pb.GetDeletedAt())\n\n\tif pb != nil && pb.TruncatedAt != nil {\n\t\tsgi.TruncatedAt = UnmarshalTime(pb.GetTruncatedAt())\n\t}\n\n\tif len(pb.GetShards()) > 0 {\n\t\tsgi.Shards = make([]ShardInfo, len(pb.GetShards()))\n\t\tfor i, x := range pb.GetShards() {\n\t\t\tsgi.Shards[i].unmarshal(x)\n\t\t}\n\t}\n}\n\n// ShardInfo represents metadata about a shard.\ntype ShardInfo struct {\n\tID     uint64\n\tOwners []ShardOwner\n}\n\n// OwnedBy determines whether the shard's owner IDs includes nodeID.\nfunc (si ShardInfo) OwnedBy(nodeID uint64) bool {\n\tfor _, so := range si.Owners {\n\t\tif so.NodeID == nodeID {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// clone returns a deep copy of si.\nfunc (si ShardInfo) clone() ShardInfo {\n\tother := si\n\n\tif si.Owners != nil {\n\t\tother.Owners = make([]ShardOwner, len(si.Owners))\n\t\tfor i := range si.Owners {\n\t\t\tother.Owners[i] = si.Owners[i].clone()\n\t\t}\n\t}\n\n\treturn other\n}\n\n// marshal serializes to a protobuf representation.\nfunc (si ShardInfo) marshal() *internal.ShardInfo {\n\tpb := &internal.ShardInfo{\n\t\tID: proto.Uint64(si.ID),\n\t}\n\n\tpb.Owners = make([]*internal.ShardOwner, len(si.Owners))\n\tfor i := range si.Owners {\n\t\tpb.Owners[i] = si.Owners[i].marshal()\n\t}\n\n\treturn pb\n}\n\n// UnmarshalBinary decodes the object from a binary format.\nfunc (si *ShardInfo) UnmarshalBinary(buf []byte) error {\n\tvar pb internal.ShardInfo\n\tif err := proto.Unmarshal(buf, &pb); err != nil {\n\t\treturn err\n\t}\n\tsi.unmarshal(&pb)\n\treturn nil\n}\n\n// unmarshal deserializes from a protobuf representation.\nfunc (si *ShardInfo) unmarshal(pb *internal.ShardInfo) {\n\tsi.ID = pb.GetID()\n\n\t// If deprecated \"OwnerIDs\" exists then convert it to \"Owners\" format.\n\tif len(pb.GetOwnerIDs()) > 0 {\n\t\tsi.Owners = make([]ShardOwner, len(pb.GetOwnerIDs()))\n\t\tfor i, x := range pb.GetOwnerIDs() {\n\t\t\tsi.Owners[i].unmarshal(&internal.ShardOwner{\n\t\t\t\tNodeID: proto.Uint64(x),\n\t\t\t})\n\t\t}\n\t} else if len(pb.GetOwners()) > 0 {\n\t\tsi.Owners = make([]ShardOwner, len(pb.GetOwners()))\n\t\tfor i, x := range pb.GetOwners() {\n\t\t\tsi.Owners[i].unmarshal(x)\n\t\t}\n\t}\n}\n\n// SubscriptionInfo holds the subscription information.\ntype SubscriptionInfo struct {\n\tName         string\n\tMode         string\n\tDestinations []string\n}\n\n// marshal serializes to a protobuf representation.\nfunc (si SubscriptionInfo) marshal() *internal.SubscriptionInfo {\n\tpb := &internal.SubscriptionInfo{\n\t\tName: proto.String(si.Name),\n\t\tMode: proto.String(si.Mode),\n\t}\n\n\tpb.Destinations = make([]string, len(si.Destinations))\n\tfor i := range si.Destinations {\n\t\tpb.Destinations[i] = si.Destinations[i]\n\t}\n\treturn pb\n}\n\n// unmarshal deserializes from a protobuf representation.\nfunc (si *SubscriptionInfo) unmarshal(pb *internal.SubscriptionInfo) {\n\tsi.Name = pb.GetName()\n\tsi.Mode = pb.GetMode()\n\n\tif len(pb.GetDestinations()) > 0 {\n\t\tsi.Destinations = make([]string, len(pb.GetDestinations()))\n\t\tfor i, h := range pb.GetDestinations() {\n\t\t\tsi.Destinations[i] = h\n\t\t}\n\t}\n}\n\n// ShardOwner represents a node that owns a shard.\ntype ShardOwner struct {\n\tNodeID uint64\n}\n\n// clone returns a deep copy of so.\nfunc (so ShardOwner) clone() ShardOwner {\n\treturn so\n}\n\n// marshal serializes to a protobuf representation.\nfunc (so ShardOwner) marshal() *internal.ShardOwner {\n\treturn &internal.ShardOwner{\n\t\tNodeID: proto.Uint64(so.NodeID),\n\t}\n}\n\n// unmarshal deserializes from a protobuf representation.\nfunc (so *ShardOwner) unmarshal(pb *internal.ShardOwner) {\n\tso.NodeID = pb.GetNodeID()\n}\n\n// ContinuousQueryInfo represents metadata about a continuous query.\ntype ContinuousQueryInfo struct {\n\tName  string\n\tQuery string\n}\n\n// clone returns a deep copy of cqi.\nfunc (cqi ContinuousQueryInfo) clone() ContinuousQueryInfo { return cqi }\n\n// marshal serializes to a protobuf representation.\nfunc (cqi ContinuousQueryInfo) marshal() *internal.ContinuousQueryInfo {\n\treturn &internal.ContinuousQueryInfo{\n\t\tName:  proto.String(cqi.Name),\n\t\tQuery: proto.String(cqi.Query),\n\t}\n}\n\n// unmarshal deserializes from a protobuf representation.\nfunc (cqi *ContinuousQueryInfo) unmarshal(pb *internal.ContinuousQueryInfo) {\n\tcqi.Name = pb.GetName()\n\tcqi.Query = pb.GetQuery()\n}\n\nvar _ influxql.Authorizer = (*UserInfo)(nil)\n\n// UserInfo represents metadata about a user in the system.\ntype UserInfo struct {\n\t// User's name.\n\tName string\n\n\t// Hashed password.\n\tHash string\n\n\t// Whether the user is an admin, i.e. allowed to do everything.\n\tAdmin bool\n\n\t// Map of database name to granted privilege.\n\tPrivileges map[string]influxql.Privilege\n}\n\ntype User interface {\n\tinfluxql.Authorizer\n\tID() string\n\tIsAdmin() bool\n}\n\nfunc (u *UserInfo) ID() string {\n\treturn u.Name\n}\n\nfunc (u *UserInfo) IsAdmin() bool {\n\treturn u.Admin\n}\n\n// AuthorizeDatabase returns true if the user is authorized for the given privilege on the given database.\nfunc (ui *UserInfo) AuthorizeDatabase(privilege influxql.Privilege, database string) bool {\n\tif ui.Admin || privilege == influxql.NoPrivileges {\n\t\treturn true\n\t}\n\tp, ok := ui.Privileges[database]\n\treturn ok && (p == privilege || p == influxql.AllPrivileges)\n}\n\n// AuthorizeSeriesRead is used to limit access per-series (enterprise only)\nfunc (u *UserInfo) AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool {\n\treturn true\n}\n\n// AuthorizeSeriesWrite is used to limit access per-series (enterprise only)\nfunc (u *UserInfo) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool {\n\treturn true\n}\n\n// clone returns a deep copy of si.\nfunc (ui UserInfo) clone() UserInfo {\n\tother := ui\n\n\tif ui.Privileges != nil {\n\t\tother.Privileges = make(map[string]influxql.Privilege)\n\t\tfor k, v := range ui.Privileges {\n\t\t\tother.Privileges[k] = v\n\t\t}\n\t}\n\n\treturn other\n}\n\n// marshal serializes to a protobuf representation.\nfunc (ui UserInfo) marshal() *internal.UserInfo {\n\tpb := &internal.UserInfo{\n\t\tName:  proto.String(ui.Name),\n\t\tHash:  proto.String(ui.Hash),\n\t\tAdmin: proto.Bool(ui.Admin),\n\t}\n\n\tfor database, privilege := range ui.Privileges {\n\t\tpb.Privileges = append(pb.Privileges, &internal.UserPrivilege{\n\t\t\tDatabase:  proto.String(database),\n\t\t\tPrivilege: proto.Int32(int32(privilege)),\n\t\t})\n\t}\n\n\treturn pb\n}\n\n// unmarshal deserializes from a protobuf representation.\nfunc (ui *UserInfo) unmarshal(pb *internal.UserInfo) {\n\tui.Name = pb.GetName()\n\tui.Hash = pb.GetHash()\n\tui.Admin = pb.GetAdmin()\n\n\tui.Privileges = make(map[string]influxql.Privilege)\n\tfor _, p := range pb.GetPrivileges() {\n\t\tui.Privileges[p.GetDatabase()] = influxql.Privilege(p.GetPrivilege())\n\t}\n}\n\n// Lease represents a lease held on a resource.\ntype Lease struct {\n\tName       string    `json:\"name\"`\n\tExpiration time.Time `json:\"expiration\"`\n\tOwner      uint64    `json:\"owner\"`\n}\n\n// Leases is a concurrency-safe collection of leases keyed by name.\ntype Leases struct {\n\tmu sync.Mutex\n\tm  map[string]*Lease\n\td  time.Duration\n}\n\n// NewLeases returns a new instance of Leases.\nfunc NewLeases(d time.Duration) *Leases {\n\treturn &Leases{\n\t\tm: make(map[string]*Lease),\n\t\td: d,\n\t}\n}\n\n// Acquire acquires a lease with the given name for the given nodeID.\n// If the lease doesn't exist or exists but is expired, a valid lease is returned.\n// If nodeID already owns the named and unexpired lease, the lease expiration is extended.\n// If a different node owns the lease, an error is returned.\nfunc (leases *Leases) Acquire(name string, nodeID uint64) (*Lease, error) {\n\tleases.mu.Lock()\n\tdefer leases.mu.Unlock()\n\n\tl, ok := leases.m[name]\n\tif ok {\n\t\tif time.Now().After(l.Expiration) || l.Owner == nodeID {\n\t\t\tl.Expiration = time.Now().Add(leases.d)\n\t\t\tl.Owner = nodeID\n\t\t\treturn l, nil\n\t\t}\n\t\treturn l, errors.New(\"another node has the lease\")\n\t}\n\n\tl = &Lease{\n\t\tName:       name,\n\t\tExpiration: time.Now().Add(leases.d),\n\t\tOwner:      nodeID,\n\t}\n\n\tleases.m[name] = l\n\n\treturn l, nil\n}\n\n// MarshalTime converts t to nanoseconds since epoch. A zero time returns 0.\nfunc MarshalTime(t time.Time) int64 {\n\tif t.IsZero() {\n\t\treturn 0\n\t}\n\treturn t.UnixNano()\n}\n\n// UnmarshalTime converts nanoseconds since epoch to time.\n// A zero value returns a zero time.\nfunc UnmarshalTime(v int64) time.Time {\n\tif v == 0 {\n\t\treturn time.Time{}\n\t}\n\treturn time.Unix(0, v).UTC()\n}\n\n// ValidName checks to see if the given name can would be valid for DB/RP name\nfunc ValidName(name string) bool {\n\tfor _, r := range name {\n\t\tif !unicode.IsPrint(r) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn name != \"\" &&\n\t\tname != \".\" &&\n\t\tname != \"..\" &&\n\t\t!strings.ContainsAny(name, `/\\`)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/meta/data_internal_test.go",
    "content": "package meta\n\nimport (\n\t\"sort\"\n\t\"time\"\n\n\t\"testing\"\n)\n\nfunc TestShardGroupSort(t *testing.T) {\n\tsg1 := ShardGroupInfo{\n\t\tID:          1,\n\t\tStartTime:   time.Unix(1000, 0),\n\t\tEndTime:     time.Unix(1100, 0),\n\t\tTruncatedAt: time.Unix(1050, 0),\n\t}\n\n\tsg2 := ShardGroupInfo{\n\t\tID:        2,\n\t\tStartTime: time.Unix(1000, 0),\n\t\tEndTime:   time.Unix(1100, 0),\n\t}\n\n\tsgs := ShardGroupInfos{sg2, sg1}\n\n\tsort.Sort(sgs)\n\n\tif sgs[len(sgs)-1].ID != 2 {\n\t\tt.Fatal(\"unstable sort for ShardGroupInfos\")\n\t}\n}\n\nfunc Test_Data_RetentionPolicy_MarshalBinary(t *testing.T) {\n\tzeroTime := time.Time{}\n\tepoch := time.Unix(0, 0).UTC()\n\n\tstartTime := zeroTime\n\tsgi := &ShardGroupInfo{\n\t\tStartTime: startTime,\n\t}\n\tisgi := sgi.marshal()\n\tsgi.unmarshal(isgi)\n\tif got, exp := sgi.StartTime.UTC(), epoch.UTC(); got != exp {\n\t\tt.Errorf(\"unexpected start time.  got: %s, exp: %s\", got, exp)\n\t}\n\n\tstartTime = time.Unix(0, 0)\n\tendTime := startTime.Add(time.Hour * 24)\n\tsgi = &ShardGroupInfo{\n\t\tStartTime: startTime,\n\t\tEndTime:   endTime,\n\t}\n\tisgi = sgi.marshal()\n\tsgi.unmarshal(isgi)\n\tif got, exp := sgi.StartTime.UTC(), startTime.UTC(); got != exp {\n\t\tt.Errorf(\"unexpected start time.  got: %s, exp: %s\", got, exp)\n\t}\n\tif got, exp := sgi.EndTime.UTC(), endTime.UTC(); got != exp {\n\t\tt.Errorf(\"unexpected end time.  got: %s, exp: %s\", got, exp)\n\t}\n\tif got, exp := sgi.DeletedAt.UTC(), zeroTime.UTC(); got != exp {\n\t\tt.Errorf(\"unexpected DeletedAt time.  got: %s, exp: %s\", got, exp)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/meta/data_test.go",
    "content": "package meta_test\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\n\t\"github.com/influxdata/influxdb/services/meta\"\n)\n\nfunc Test_Data_DropDatabase(t *testing.T) {\n\tdata := &meta.Data{\n\t\tDatabases: []meta.DatabaseInfo{\n\t\t\t{Name: \"db0\"},\n\t\t\t{Name: \"db1\"},\n\t\t\t{Name: \"db2\"},\n\t\t\t{Name: \"db4\"},\n\t\t\t{Name: \"db5\"},\n\t\t},\n\t\tUsers: []meta.UserInfo{\n\t\t\t{Name: \"user1\", Privileges: map[string]influxql.Privilege{\"db1\": influxql.ReadPrivilege, \"db2\": influxql.ReadPrivilege}},\n\t\t\t{Name: \"user2\", Privileges: map[string]influxql.Privilege{\"db2\": influxql.ReadPrivilege}},\n\t\t},\n\t}\n\n\t// Dropping the first database removes it from the Data object.\n\texpDbs := make([]meta.DatabaseInfo, 4)\n\tcopy(expDbs, data.Databases[1:])\n\tif err := data.DropDatabase(\"db0\"); err != nil {\n\t\tt.Fatal(err)\n\t} else if got, exp := data.Databases, expDbs; !reflect.DeepEqual(got, exp) {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\t// Dropping a middle database removes it from the data object.\n\texpDbs = []meta.DatabaseInfo{{Name: \"db1\"}, {Name: \"db2\"}, {Name: \"db5\"}}\n\tif err := data.DropDatabase(\"db4\"); err != nil {\n\t\tt.Fatal(err)\n\t} else if got, exp := data.Databases, expDbs; !reflect.DeepEqual(got, exp) {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\t// Dropping the last database removes it from the data object.\n\texpDbs = []meta.DatabaseInfo{{Name: \"db1\"}, {Name: \"db2\"}}\n\tif err := data.DropDatabase(\"db5\"); err != nil {\n\t\tt.Fatal(err)\n\t} else if got, exp := data.Databases, expDbs; !reflect.DeepEqual(got, exp) {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\t// Dropping a database also drops all the user privileges associated with\n\t// it.\n\texpUsers := []meta.UserInfo{\n\t\t{Name: \"user1\", Privileges: map[string]influxql.Privilege{\"db1\": influxql.ReadPrivilege}},\n\t\t{Name: \"user2\", Privileges: map[string]influxql.Privilege{}},\n\t}\n\tif err := data.DropDatabase(\"db2\"); err != nil {\n\t\tt.Fatal(err)\n\t} else if got, exp := data.Users, expUsers; !reflect.DeepEqual(got, exp) {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n}\n\nfunc Test_Data_CreateRetentionPolicy(t *testing.T) {\n\tdata := meta.Data{}\n\n\terr := data.CreateDatabase(\"foo\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = data.CreateRetentionPolicy(\"foo\", &meta.RetentionPolicyInfo{\n\t\tName:     \"bar\",\n\t\tReplicaN: 1,\n\t\tDuration: 24 * time.Hour,\n\t}, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trp, err := data.RetentionPolicy(\"foo\", \"bar\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif rp == nil {\n\t\tt.Fatal(\"creation of retention policy failed\")\n\t}\n\n\t// Try to recreate the same RP with default set to true, should fail\n\terr = data.CreateRetentionPolicy(\"foo\", &meta.RetentionPolicyInfo{\n\t\tName:     \"bar\",\n\t\tReplicaN: 1,\n\t\tDuration: 24 * time.Hour,\n\t}, true)\n\tif err == nil || err != meta.ErrRetentionPolicyConflict {\n\t\tt.Fatalf(\"unexpected error.  got: %v, exp: %s\", err, meta.ErrRetentionPolicyConflict)\n\t}\n\n\t// Creating the same RP with the same specifications should succeed\n\terr = data.CreateRetentionPolicy(\"foo\", &meta.RetentionPolicyInfo{\n\t\tName:     \"bar\",\n\t\tReplicaN: 1,\n\t\tDuration: 24 * time.Hour,\n\t}, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestData_AdminUserExists(t *testing.T) {\n\tdata := meta.Data{}\n\n\t// No users means no admin.\n\tif data.AdminUserExists() {\n\t\tt.Fatal(\"no admin user should exist\")\n\t}\n\n\t// Add a non-admin user.\n\tif err := data.CreateUser(\"user1\", \"a\", false); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, exp := data.AdminUserExists(), false; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\t// Add an admin user.\n\tif err := data.CreateUser(\"admin1\", \"a\", true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, exp := data.AdminUserExists(), true; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\t// Remove the original user\n\tif err := data.DropUser(\"user1\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, exp := data.AdminUserExists(), true; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\t// Add another admin\n\tif err := data.CreateUser(\"admin2\", \"a\", true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, exp := data.AdminUserExists(), true; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\t// Revoke privileges of the first admin\n\tif err := data.SetAdminPrivilege(\"admin1\", false); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, exp := data.AdminUserExists(), true; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\t// Add user1 back.\n\tif err := data.CreateUser(\"user1\", \"a\", false); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Revoke remaining admin.\n\tif err := data.SetAdminPrivilege(\"admin2\", false); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// No longer any admins\n\tif got, exp := data.AdminUserExists(), false; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\t// Make user1 an admin\n\tif err := data.SetAdminPrivilege(\"user1\", true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, exp := data.AdminUserExists(), true; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\t// Drop user1...\n\tif err := data.DropUser(\"user1\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, exp := data.AdminUserExists(), false; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n}\n\nfunc TestUserInfo_AuthorizeDatabase(t *testing.T) {\n\temptyUser := &meta.UserInfo{}\n\tif !emptyUser.AuthorizeDatabase(influxql.NoPrivileges, \"anydb\") {\n\t\tt.Fatal(\"expected NoPrivileges to be authorized but it wasn't\")\n\t}\n\tif emptyUser.AuthorizeDatabase(influxql.ReadPrivilege, \"anydb\") {\n\t\tt.Fatal(\"expected ReadPrivilege to prevent authorization, but it was authorized\")\n\t}\n\n\tadminUser := &meta.UserInfo{Admin: true}\n\tif !adminUser.AuthorizeDatabase(influxql.AllPrivileges, \"anydb\") {\n\t\tt.Fatalf(\"expected admin to be authorized but it wasn't\")\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/meta/errors.go",
    "content": "package meta\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n)\n\nvar (\n\t// ErrStoreOpen is returned when opening an already open store.\n\tErrStoreOpen = errors.New(\"store already open\")\n\n\t// ErrStoreClosed is returned when closing an already closed store.\n\tErrStoreClosed = errors.New(\"raft store already closed\")\n)\n\nvar (\n\t// ErrDatabaseExists is returned when creating an already existing database.\n\tErrDatabaseExists = errors.New(\"database already exists\")\n\n\t// ErrDatabaseNotExists is returned when operating on a not existing database.\n\tErrDatabaseNotExists = errors.New(\"database does not exist\")\n\n\t// ErrDatabaseNameRequired is returned when creating a database without a name.\n\tErrDatabaseNameRequired = errors.New(\"database name required\")\n\n\t// ErrInvalidName is returned when attempting to create a database or retention policy with an invalid name\n\tErrInvalidName = errors.New(\"invalid name\")\n)\n\nvar (\n\t// ErrRetentionPolicyExists is returned when creating an already existing policy.\n\tErrRetentionPolicyExists = errors.New(\"retention policy already exists\")\n\n\t// ErrRetentionPolicyNotFound is returned when an expected policy wasn't found.\n\tErrRetentionPolicyNotFound = errors.New(\"retention policy not found\")\n\n\t// ErrRetentionPolicyDefault is returned when attempting a prohibited operation\n\t// on a default retention policy.\n\tErrRetentionPolicyDefault = errors.New(\"retention policy is default\")\n\n\t// ErrRetentionPolicyRequired is returned when a retention policy is required\n\t// by an operation, but a nil policy was passed.\n\tErrRetentionPolicyRequired = errors.New(\"retention policy required\")\n\n\t// ErrRetentionPolicyNameRequired is returned when creating a policy without a name.\n\tErrRetentionPolicyNameRequired = errors.New(\"retention policy name required\")\n\n\t// ErrRetentionPolicyNameExists is returned when renaming a policy to\n\t// the same name as another existing policy.\n\tErrRetentionPolicyNameExists = errors.New(\"retention policy name already exists\")\n\n\t// ErrRetentionPolicyDurationTooLow is returned when updating a retention\n\t// policy that has a duration lower than the allowed minimum.\n\tErrRetentionPolicyDurationTooLow = fmt.Errorf(\"retention policy duration must be at least %s\", MinRetentionPolicyDuration)\n\n\t// ErrRetentionPolicyConflict is returned when creating a retention policy conflicts\n\t// with an existing policy.\n\tErrRetentionPolicyConflict = errors.New(\"retention policy conflicts with an existing policy\")\n\n\t// ErrIncompatibleDurations is returned when creating or updating a\n\t// retention policy that has a duration lower than the current shard\n\t// duration.\n\tErrIncompatibleDurations = errors.New(\"retention policy duration must be greater than the shard duration\")\n\n\t// ErrReplicationFactorTooLow is returned when the replication factor is not in an\n\t// acceptable range.\n\tErrReplicationFactorTooLow = errors.New(\"replication factor must be greater than 0\")\n)\n\nvar (\n\t// ErrShardGroupExists is returned when creating an already existing shard group.\n\tErrShardGroupExists = errors.New(\"shard group already exists\")\n\n\t// ErrShardGroupNotFound is returned when mutating a shard group that doesn't exist.\n\tErrShardGroupNotFound = errors.New(\"shard group not found\")\n\n\t// ErrShardNotReplicated is returned if the node requested to be dropped has\n\t// the last copy of a shard present and the force keyword was not used\n\tErrShardNotReplicated = errors.New(\"shard not replicated\")\n)\n\nvar (\n\t// ErrContinuousQueryExists is returned when creating an already existing continuous query.\n\tErrContinuousQueryExists = errors.New(\"continuous query already exists\")\n\n\t// ErrContinuousQueryNotFound is returned when removing a continuous query that doesn't exist.\n\tErrContinuousQueryNotFound = errors.New(\"continuous query not found\")\n)\n\nvar (\n\t// ErrSubscriptionExists is returned when creating an already existing subscription.\n\tErrSubscriptionExists = errors.New(\"subscription already exists\")\n\n\t// ErrSubscriptionNotFound is returned when removing a subscription that doesn't exist.\n\tErrSubscriptionNotFound = errors.New(\"subscription not found\")\n)\n\n// ErrInvalidSubscriptionURL is returned when the subscription's destination URL is invalid.\nfunc ErrInvalidSubscriptionURL(url string) error {\n\treturn fmt.Errorf(\"invalid subscription URL: %s\", url)\n}\n\nvar (\n\t// ErrUserExists is returned when creating an already existing user.\n\tErrUserExists = errors.New(\"user already exists\")\n\n\t// ErrUserNotFound is returned when mutating a user that doesn't exist.\n\tErrUserNotFound = errors.New(\"user not found\")\n\n\t// ErrUsernameRequired is returned when creating a user without a username.\n\tErrUsernameRequired = errors.New(\"username required\")\n\n\t// ErrAuthenticate is returned when authentication fails.\n\tErrAuthenticate = errors.New(\"authentication failed\")\n)\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/meta/file_unix.go",
    "content": "// +build !windows\n\npackage meta\n\nimport \"os\"\n\n// renameFile will rename the source to target using os function.\nfunc renameFile(oldpath, newpath string) error {\n\treturn os.Rename(oldpath, newpath)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/meta/file_windows.go",
    "content": "package meta\n\nimport \"os\"\n\n// renameFile will rename the source to target using os function. If target exists it will be removed before renaming.\nfunc renameFile(oldpath, newpath string) error {\n\tif _, err := os.Stat(newpath); err == nil {\n\t\tif err = os.Remove(newpath); nil != err {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn os.Rename(oldpath, newpath)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/meta/internal/meta.pb.go",
    "content": "// Code generated by protoc-gen-gogo.\n// source: internal/meta.proto\n// DO NOT EDIT!\n\n/*\nPackage meta is a generated protocol buffer package.\n\nIt is generated from these files:\n\tinternal/meta.proto\n\nIt has these top-level messages:\n\tData\n\tNodeInfo\n\tDatabaseInfo\n\tRetentionPolicySpec\n\tRetentionPolicyInfo\n\tShardGroupInfo\n\tShardInfo\n\tSubscriptionInfo\n\tShardOwner\n\tContinuousQueryInfo\n\tUserInfo\n\tUserPrivilege\n\tCommand\n\tCreateNodeCommand\n\tDeleteNodeCommand\n\tCreateDatabaseCommand\n\tDropDatabaseCommand\n\tCreateRetentionPolicyCommand\n\tDropRetentionPolicyCommand\n\tSetDefaultRetentionPolicyCommand\n\tUpdateRetentionPolicyCommand\n\tCreateShardGroupCommand\n\tDeleteShardGroupCommand\n\tCreateContinuousQueryCommand\n\tDropContinuousQueryCommand\n\tCreateUserCommand\n\tDropUserCommand\n\tUpdateUserCommand\n\tSetPrivilegeCommand\n\tSetDataCommand\n\tSetAdminPrivilegeCommand\n\tUpdateNodeCommand\n\tCreateSubscriptionCommand\n\tDropSubscriptionCommand\n\tRemovePeerCommand\n\tCreateMetaNodeCommand\n\tCreateDataNodeCommand\n\tUpdateDataNodeCommand\n\tDeleteMetaNodeCommand\n\tDeleteDataNodeCommand\n\tResponse\n\tSetMetaNodeCommand\n\tDropShardCommand\n*/\npackage meta\n\nimport proto \"github.com/gogo/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\n// This is a compile-time assertion to ensure that this generated file\n// is compatible with the proto package it is being compiled against.\n// A compilation error at this line likely means your copy of the\n// proto package needs to be updated.\nconst _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package\n\ntype Command_Type int32\n\nconst (\n\tCommand_CreateNodeCommand                Command_Type = 1\n\tCommand_DeleteNodeCommand                Command_Type = 2\n\tCommand_CreateDatabaseCommand            Command_Type = 3\n\tCommand_DropDatabaseCommand              Command_Type = 4\n\tCommand_CreateRetentionPolicyCommand     Command_Type = 5\n\tCommand_DropRetentionPolicyCommand       Command_Type = 6\n\tCommand_SetDefaultRetentionPolicyCommand Command_Type = 7\n\tCommand_UpdateRetentionPolicyCommand     Command_Type = 8\n\tCommand_CreateShardGroupCommand          Command_Type = 9\n\tCommand_DeleteShardGroupCommand          Command_Type = 10\n\tCommand_CreateContinuousQueryCommand     Command_Type = 11\n\tCommand_DropContinuousQueryCommand       Command_Type = 12\n\tCommand_CreateUserCommand                Command_Type = 13\n\tCommand_DropUserCommand                  Command_Type = 14\n\tCommand_UpdateUserCommand                Command_Type = 15\n\tCommand_SetPrivilegeCommand              Command_Type = 16\n\tCommand_SetDataCommand                   Command_Type = 17\n\tCommand_SetAdminPrivilegeCommand         Command_Type = 18\n\tCommand_UpdateNodeCommand                Command_Type = 19\n\tCommand_CreateSubscriptionCommand        Command_Type = 21\n\tCommand_DropSubscriptionCommand          Command_Type = 22\n\tCommand_RemovePeerCommand                Command_Type = 23\n\tCommand_CreateMetaNodeCommand            Command_Type = 24\n\tCommand_CreateDataNodeCommand            Command_Type = 25\n\tCommand_UpdateDataNodeCommand            Command_Type = 26\n\tCommand_DeleteMetaNodeCommand            Command_Type = 27\n\tCommand_DeleteDataNodeCommand            Command_Type = 28\n\tCommand_SetMetaNodeCommand               Command_Type = 29\n\tCommand_DropShardCommand                 Command_Type = 30\n)\n\nvar Command_Type_name = map[int32]string{\n\t1:  \"CreateNodeCommand\",\n\t2:  \"DeleteNodeCommand\",\n\t3:  \"CreateDatabaseCommand\",\n\t4:  \"DropDatabaseCommand\",\n\t5:  \"CreateRetentionPolicyCommand\",\n\t6:  \"DropRetentionPolicyCommand\",\n\t7:  \"SetDefaultRetentionPolicyCommand\",\n\t8:  \"UpdateRetentionPolicyCommand\",\n\t9:  \"CreateShardGroupCommand\",\n\t10: \"DeleteShardGroupCommand\",\n\t11: \"CreateContinuousQueryCommand\",\n\t12: \"DropContinuousQueryCommand\",\n\t13: \"CreateUserCommand\",\n\t14: \"DropUserCommand\",\n\t15: \"UpdateUserCommand\",\n\t16: \"SetPrivilegeCommand\",\n\t17: \"SetDataCommand\",\n\t18: \"SetAdminPrivilegeCommand\",\n\t19: \"UpdateNodeCommand\",\n\t21: \"CreateSubscriptionCommand\",\n\t22: \"DropSubscriptionCommand\",\n\t23: \"RemovePeerCommand\",\n\t24: \"CreateMetaNodeCommand\",\n\t25: \"CreateDataNodeCommand\",\n\t26: \"UpdateDataNodeCommand\",\n\t27: \"DeleteMetaNodeCommand\",\n\t28: \"DeleteDataNodeCommand\",\n\t29: \"SetMetaNodeCommand\",\n\t30: \"DropShardCommand\",\n}\nvar Command_Type_value = map[string]int32{\n\t\"CreateNodeCommand\":                1,\n\t\"DeleteNodeCommand\":                2,\n\t\"CreateDatabaseCommand\":            3,\n\t\"DropDatabaseCommand\":              4,\n\t\"CreateRetentionPolicyCommand\":     5,\n\t\"DropRetentionPolicyCommand\":       6,\n\t\"SetDefaultRetentionPolicyCommand\": 7,\n\t\"UpdateRetentionPolicyCommand\":     8,\n\t\"CreateShardGroupCommand\":          9,\n\t\"DeleteShardGroupCommand\":          10,\n\t\"CreateContinuousQueryCommand\":     11,\n\t\"DropContinuousQueryCommand\":       12,\n\t\"CreateUserCommand\":                13,\n\t\"DropUserCommand\":                  14,\n\t\"UpdateUserCommand\":                15,\n\t\"SetPrivilegeCommand\":              16,\n\t\"SetDataCommand\":                   17,\n\t\"SetAdminPrivilegeCommand\":         18,\n\t\"UpdateNodeCommand\":                19,\n\t\"CreateSubscriptionCommand\":        21,\n\t\"DropSubscriptionCommand\":          22,\n\t\"RemovePeerCommand\":                23,\n\t\"CreateMetaNodeCommand\":            24,\n\t\"CreateDataNodeCommand\":            25,\n\t\"UpdateDataNodeCommand\":            26,\n\t\"DeleteMetaNodeCommand\":            27,\n\t\"DeleteDataNodeCommand\":            28,\n\t\"SetMetaNodeCommand\":               29,\n\t\"DropShardCommand\":                 30,\n}\n\nfunc (x Command_Type) Enum() *Command_Type {\n\tp := new(Command_Type)\n\t*p = x\n\treturn p\n}\nfunc (x Command_Type) String() string {\n\treturn proto.EnumName(Command_Type_name, int32(x))\n}\nfunc (x *Command_Type) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(Command_Type_value, data, \"Command_Type\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = Command_Type(value)\n\treturn nil\n}\nfunc (Command_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorMeta, []int{12, 0} }\n\ntype Data struct {\n\tTerm            *uint64         `protobuf:\"varint,1,req,name=Term\" json:\"Term,omitempty\"`\n\tIndex           *uint64         `protobuf:\"varint,2,req,name=Index\" json:\"Index,omitempty\"`\n\tClusterID       *uint64         `protobuf:\"varint,3,req,name=ClusterID\" json:\"ClusterID,omitempty\"`\n\tNodes           []*NodeInfo     `protobuf:\"bytes,4,rep,name=Nodes\" json:\"Nodes,omitempty\"`\n\tDatabases       []*DatabaseInfo `protobuf:\"bytes,5,rep,name=Databases\" json:\"Databases,omitempty\"`\n\tUsers           []*UserInfo     `protobuf:\"bytes,6,rep,name=Users\" json:\"Users,omitempty\"`\n\tMaxNodeID       *uint64         `protobuf:\"varint,7,req,name=MaxNodeID\" json:\"MaxNodeID,omitempty\"`\n\tMaxShardGroupID *uint64         `protobuf:\"varint,8,req,name=MaxShardGroupID\" json:\"MaxShardGroupID,omitempty\"`\n\tMaxShardID      *uint64         `protobuf:\"varint,9,req,name=MaxShardID\" json:\"MaxShardID,omitempty\"`\n\t// added for 0.10.0\n\tDataNodes        []*NodeInfo `protobuf:\"bytes,10,rep,name=DataNodes\" json:\"DataNodes,omitempty\"`\n\tMetaNodes        []*NodeInfo `protobuf:\"bytes,11,rep,name=MetaNodes\" json:\"MetaNodes,omitempty\"`\n\tXXX_unrecognized []byte      `json:\"-\"`\n}\n\nfunc (m *Data) Reset()                    { *m = Data{} }\nfunc (m *Data) String() string            { return proto.CompactTextString(m) }\nfunc (*Data) ProtoMessage()               {}\nfunc (*Data) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{0} }\n\nfunc (m *Data) GetTerm() uint64 {\n\tif m != nil && m.Term != nil {\n\t\treturn *m.Term\n\t}\n\treturn 0\n}\n\nfunc (m *Data) GetIndex() uint64 {\n\tif m != nil && m.Index != nil {\n\t\treturn *m.Index\n\t}\n\treturn 0\n}\n\nfunc (m *Data) GetClusterID() uint64 {\n\tif m != nil && m.ClusterID != nil {\n\t\treturn *m.ClusterID\n\t}\n\treturn 0\n}\n\nfunc (m *Data) GetNodes() []*NodeInfo {\n\tif m != nil {\n\t\treturn m.Nodes\n\t}\n\treturn nil\n}\n\nfunc (m *Data) GetDatabases() []*DatabaseInfo {\n\tif m != nil {\n\t\treturn m.Databases\n\t}\n\treturn nil\n}\n\nfunc (m *Data) GetUsers() []*UserInfo {\n\tif m != nil {\n\t\treturn m.Users\n\t}\n\treturn nil\n}\n\nfunc (m *Data) GetMaxNodeID() uint64 {\n\tif m != nil && m.MaxNodeID != nil {\n\t\treturn *m.MaxNodeID\n\t}\n\treturn 0\n}\n\nfunc (m *Data) GetMaxShardGroupID() uint64 {\n\tif m != nil && m.MaxShardGroupID != nil {\n\t\treturn *m.MaxShardGroupID\n\t}\n\treturn 0\n}\n\nfunc (m *Data) GetMaxShardID() uint64 {\n\tif m != nil && m.MaxShardID != nil {\n\t\treturn *m.MaxShardID\n\t}\n\treturn 0\n}\n\nfunc (m *Data) GetDataNodes() []*NodeInfo {\n\tif m != nil {\n\t\treturn m.DataNodes\n\t}\n\treturn nil\n}\n\nfunc (m *Data) GetMetaNodes() []*NodeInfo {\n\tif m != nil {\n\t\treturn m.MetaNodes\n\t}\n\treturn nil\n}\n\ntype NodeInfo struct {\n\tID               *uint64 `protobuf:\"varint,1,req,name=ID\" json:\"ID,omitempty\"`\n\tHost             *string `protobuf:\"bytes,2,req,name=Host\" json:\"Host,omitempty\"`\n\tTCPHost          *string `protobuf:\"bytes,3,opt,name=TCPHost\" json:\"TCPHost,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *NodeInfo) Reset()                    { *m = NodeInfo{} }\nfunc (m *NodeInfo) String() string            { return proto.CompactTextString(m) }\nfunc (*NodeInfo) ProtoMessage()               {}\nfunc (*NodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{1} }\n\nfunc (m *NodeInfo) GetID() uint64 {\n\tif m != nil && m.ID != nil {\n\t\treturn *m.ID\n\t}\n\treturn 0\n}\n\nfunc (m *NodeInfo) GetHost() string {\n\tif m != nil && m.Host != nil {\n\t\treturn *m.Host\n\t}\n\treturn \"\"\n}\n\nfunc (m *NodeInfo) GetTCPHost() string {\n\tif m != nil && m.TCPHost != nil {\n\t\treturn *m.TCPHost\n\t}\n\treturn \"\"\n}\n\ntype DatabaseInfo struct {\n\tName                   *string                `protobuf:\"bytes,1,req,name=Name\" json:\"Name,omitempty\"`\n\tDefaultRetentionPolicy *string                `protobuf:\"bytes,2,req,name=DefaultRetentionPolicy\" json:\"DefaultRetentionPolicy,omitempty\"`\n\tRetentionPolicies      []*RetentionPolicyInfo `protobuf:\"bytes,3,rep,name=RetentionPolicies\" json:\"RetentionPolicies,omitempty\"`\n\tContinuousQueries      []*ContinuousQueryInfo `protobuf:\"bytes,4,rep,name=ContinuousQueries\" json:\"ContinuousQueries,omitempty\"`\n\tXXX_unrecognized       []byte                 `json:\"-\"`\n}\n\nfunc (m *DatabaseInfo) Reset()                    { *m = DatabaseInfo{} }\nfunc (m *DatabaseInfo) String() string            { return proto.CompactTextString(m) }\nfunc (*DatabaseInfo) ProtoMessage()               {}\nfunc (*DatabaseInfo) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{2} }\n\nfunc (m *DatabaseInfo) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *DatabaseInfo) GetDefaultRetentionPolicy() string {\n\tif m != nil && m.DefaultRetentionPolicy != nil {\n\t\treturn *m.DefaultRetentionPolicy\n\t}\n\treturn \"\"\n}\n\nfunc (m *DatabaseInfo) GetRetentionPolicies() []*RetentionPolicyInfo {\n\tif m != nil {\n\t\treturn m.RetentionPolicies\n\t}\n\treturn nil\n}\n\nfunc (m *DatabaseInfo) GetContinuousQueries() []*ContinuousQueryInfo {\n\tif m != nil {\n\t\treturn m.ContinuousQueries\n\t}\n\treturn nil\n}\n\ntype RetentionPolicySpec struct {\n\tName               *string `protobuf:\"bytes,1,opt,name=Name\" json:\"Name,omitempty\"`\n\tDuration           *int64  `protobuf:\"varint,2,opt,name=Duration\" json:\"Duration,omitempty\"`\n\tShardGroupDuration *int64  `protobuf:\"varint,3,opt,name=ShardGroupDuration\" json:\"ShardGroupDuration,omitempty\"`\n\tReplicaN           *uint32 `protobuf:\"varint,4,opt,name=ReplicaN\" json:\"ReplicaN,omitempty\"`\n\tXXX_unrecognized   []byte  `json:\"-\"`\n}\n\nfunc (m *RetentionPolicySpec) Reset()                    { *m = RetentionPolicySpec{} }\nfunc (m *RetentionPolicySpec) String() string            { return proto.CompactTextString(m) }\nfunc (*RetentionPolicySpec) ProtoMessage()               {}\nfunc (*RetentionPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{3} }\n\nfunc (m *RetentionPolicySpec) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *RetentionPolicySpec) GetDuration() int64 {\n\tif m != nil && m.Duration != nil {\n\t\treturn *m.Duration\n\t}\n\treturn 0\n}\n\nfunc (m *RetentionPolicySpec) GetShardGroupDuration() int64 {\n\tif m != nil && m.ShardGroupDuration != nil {\n\t\treturn *m.ShardGroupDuration\n\t}\n\treturn 0\n}\n\nfunc (m *RetentionPolicySpec) GetReplicaN() uint32 {\n\tif m != nil && m.ReplicaN != nil {\n\t\treturn *m.ReplicaN\n\t}\n\treturn 0\n}\n\ntype RetentionPolicyInfo struct {\n\tName               *string             `protobuf:\"bytes,1,req,name=Name\" json:\"Name,omitempty\"`\n\tDuration           *int64              `protobuf:\"varint,2,req,name=Duration\" json:\"Duration,omitempty\"`\n\tShardGroupDuration *int64              `protobuf:\"varint,3,req,name=ShardGroupDuration\" json:\"ShardGroupDuration,omitempty\"`\n\tReplicaN           *uint32             `protobuf:\"varint,4,req,name=ReplicaN\" json:\"ReplicaN,omitempty\"`\n\tShardGroups        []*ShardGroupInfo   `protobuf:\"bytes,5,rep,name=ShardGroups\" json:\"ShardGroups,omitempty\"`\n\tSubscriptions      []*SubscriptionInfo `protobuf:\"bytes,6,rep,name=Subscriptions\" json:\"Subscriptions,omitempty\"`\n\tXXX_unrecognized   []byte              `json:\"-\"`\n}\n\nfunc (m *RetentionPolicyInfo) Reset()                    { *m = RetentionPolicyInfo{} }\nfunc (m *RetentionPolicyInfo) String() string            { return proto.CompactTextString(m) }\nfunc (*RetentionPolicyInfo) ProtoMessage()               {}\nfunc (*RetentionPolicyInfo) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{4} }\n\nfunc (m *RetentionPolicyInfo) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *RetentionPolicyInfo) GetDuration() int64 {\n\tif m != nil && m.Duration != nil {\n\t\treturn *m.Duration\n\t}\n\treturn 0\n}\n\nfunc (m *RetentionPolicyInfo) GetShardGroupDuration() int64 {\n\tif m != nil && m.ShardGroupDuration != nil {\n\t\treturn *m.ShardGroupDuration\n\t}\n\treturn 0\n}\n\nfunc (m *RetentionPolicyInfo) GetReplicaN() uint32 {\n\tif m != nil && m.ReplicaN != nil {\n\t\treturn *m.ReplicaN\n\t}\n\treturn 0\n}\n\nfunc (m *RetentionPolicyInfo) GetShardGroups() []*ShardGroupInfo {\n\tif m != nil {\n\t\treturn m.ShardGroups\n\t}\n\treturn nil\n}\n\nfunc (m *RetentionPolicyInfo) GetSubscriptions() []*SubscriptionInfo {\n\tif m != nil {\n\t\treturn m.Subscriptions\n\t}\n\treturn nil\n}\n\ntype ShardGroupInfo struct {\n\tID               *uint64      `protobuf:\"varint,1,req,name=ID\" json:\"ID,omitempty\"`\n\tStartTime        *int64       `protobuf:\"varint,2,req,name=StartTime\" json:\"StartTime,omitempty\"`\n\tEndTime          *int64       `protobuf:\"varint,3,req,name=EndTime\" json:\"EndTime,omitempty\"`\n\tDeletedAt        *int64       `protobuf:\"varint,4,req,name=DeletedAt\" json:\"DeletedAt,omitempty\"`\n\tShards           []*ShardInfo `protobuf:\"bytes,5,rep,name=Shards\" json:\"Shards,omitempty\"`\n\tTruncatedAt      *int64       `protobuf:\"varint,6,opt,name=TruncatedAt\" json:\"TruncatedAt,omitempty\"`\n\tXXX_unrecognized []byte       `json:\"-\"`\n}\n\nfunc (m *ShardGroupInfo) Reset()                    { *m = ShardGroupInfo{} }\nfunc (m *ShardGroupInfo) String() string            { return proto.CompactTextString(m) }\nfunc (*ShardGroupInfo) ProtoMessage()               {}\nfunc (*ShardGroupInfo) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{5} }\n\nfunc (m *ShardGroupInfo) GetID() uint64 {\n\tif m != nil && m.ID != nil {\n\t\treturn *m.ID\n\t}\n\treturn 0\n}\n\nfunc (m *ShardGroupInfo) GetStartTime() int64 {\n\tif m != nil && m.StartTime != nil {\n\t\treturn *m.StartTime\n\t}\n\treturn 0\n}\n\nfunc (m *ShardGroupInfo) GetEndTime() int64 {\n\tif m != nil && m.EndTime != nil {\n\t\treturn *m.EndTime\n\t}\n\treturn 0\n}\n\nfunc (m *ShardGroupInfo) GetDeletedAt() int64 {\n\tif m != nil && m.DeletedAt != nil {\n\t\treturn *m.DeletedAt\n\t}\n\treturn 0\n}\n\nfunc (m *ShardGroupInfo) GetShards() []*ShardInfo {\n\tif m != nil {\n\t\treturn m.Shards\n\t}\n\treturn nil\n}\n\nfunc (m *ShardGroupInfo) GetTruncatedAt() int64 {\n\tif m != nil && m.TruncatedAt != nil {\n\t\treturn *m.TruncatedAt\n\t}\n\treturn 0\n}\n\ntype ShardInfo struct {\n\tID               *uint64       `protobuf:\"varint,1,req,name=ID\" json:\"ID,omitempty\"`\n\tOwnerIDs         []uint64      `protobuf:\"varint,2,rep,name=OwnerIDs\" json:\"OwnerIDs,omitempty\"`\n\tOwners           []*ShardOwner `protobuf:\"bytes,3,rep,name=Owners\" json:\"Owners,omitempty\"`\n\tXXX_unrecognized []byte        `json:\"-\"`\n}\n\nfunc (m *ShardInfo) Reset()                    { *m = ShardInfo{} }\nfunc (m *ShardInfo) String() string            { return proto.CompactTextString(m) }\nfunc (*ShardInfo) ProtoMessage()               {}\nfunc (*ShardInfo) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{6} }\n\nfunc (m *ShardInfo) GetID() uint64 {\n\tif m != nil && m.ID != nil {\n\t\treturn *m.ID\n\t}\n\treturn 0\n}\n\nfunc (m *ShardInfo) GetOwnerIDs() []uint64 {\n\tif m != nil {\n\t\treturn m.OwnerIDs\n\t}\n\treturn nil\n}\n\nfunc (m *ShardInfo) GetOwners() []*ShardOwner {\n\tif m != nil {\n\t\treturn m.Owners\n\t}\n\treturn nil\n}\n\ntype SubscriptionInfo struct {\n\tName             *string  `protobuf:\"bytes,1,req,name=Name\" json:\"Name,omitempty\"`\n\tMode             *string  `protobuf:\"bytes,2,req,name=Mode\" json:\"Mode,omitempty\"`\n\tDestinations     []string `protobuf:\"bytes,3,rep,name=Destinations\" json:\"Destinations,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *SubscriptionInfo) Reset()                    { *m = SubscriptionInfo{} }\nfunc (m *SubscriptionInfo) String() string            { return proto.CompactTextString(m) }\nfunc (*SubscriptionInfo) ProtoMessage()               {}\nfunc (*SubscriptionInfo) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{7} }\n\nfunc (m *SubscriptionInfo) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *SubscriptionInfo) GetMode() string {\n\tif m != nil && m.Mode != nil {\n\t\treturn *m.Mode\n\t}\n\treturn \"\"\n}\n\nfunc (m *SubscriptionInfo) GetDestinations() []string {\n\tif m != nil {\n\t\treturn m.Destinations\n\t}\n\treturn nil\n}\n\ntype ShardOwner struct {\n\tNodeID           *uint64 `protobuf:\"varint,1,req,name=NodeID\" json:\"NodeID,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *ShardOwner) Reset()                    { *m = ShardOwner{} }\nfunc (m *ShardOwner) String() string            { return proto.CompactTextString(m) }\nfunc (*ShardOwner) ProtoMessage()               {}\nfunc (*ShardOwner) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{8} }\n\nfunc (m *ShardOwner) GetNodeID() uint64 {\n\tif m != nil && m.NodeID != nil {\n\t\treturn *m.NodeID\n\t}\n\treturn 0\n}\n\ntype ContinuousQueryInfo struct {\n\tName             *string `protobuf:\"bytes,1,req,name=Name\" json:\"Name,omitempty\"`\n\tQuery            *string `protobuf:\"bytes,2,req,name=Query\" json:\"Query,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *ContinuousQueryInfo) Reset()                    { *m = ContinuousQueryInfo{} }\nfunc (m *ContinuousQueryInfo) String() string            { return proto.CompactTextString(m) }\nfunc (*ContinuousQueryInfo) ProtoMessage()               {}\nfunc (*ContinuousQueryInfo) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{9} }\n\nfunc (m *ContinuousQueryInfo) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *ContinuousQueryInfo) GetQuery() string {\n\tif m != nil && m.Query != nil {\n\t\treturn *m.Query\n\t}\n\treturn \"\"\n}\n\ntype UserInfo struct {\n\tName             *string          `protobuf:\"bytes,1,req,name=Name\" json:\"Name,omitempty\"`\n\tHash             *string          `protobuf:\"bytes,2,req,name=Hash\" json:\"Hash,omitempty\"`\n\tAdmin            *bool            `protobuf:\"varint,3,req,name=Admin\" json:\"Admin,omitempty\"`\n\tPrivileges       []*UserPrivilege `protobuf:\"bytes,4,rep,name=Privileges\" json:\"Privileges,omitempty\"`\n\tXXX_unrecognized []byte           `json:\"-\"`\n}\n\nfunc (m *UserInfo) Reset()                    { *m = UserInfo{} }\nfunc (m *UserInfo) String() string            { return proto.CompactTextString(m) }\nfunc (*UserInfo) ProtoMessage()               {}\nfunc (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{10} }\n\nfunc (m *UserInfo) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *UserInfo) GetHash() string {\n\tif m != nil && m.Hash != nil {\n\t\treturn *m.Hash\n\t}\n\treturn \"\"\n}\n\nfunc (m *UserInfo) GetAdmin() bool {\n\tif m != nil && m.Admin != nil {\n\t\treturn *m.Admin\n\t}\n\treturn false\n}\n\nfunc (m *UserInfo) GetPrivileges() []*UserPrivilege {\n\tif m != nil {\n\t\treturn m.Privileges\n\t}\n\treturn nil\n}\n\ntype UserPrivilege struct {\n\tDatabase         *string `protobuf:\"bytes,1,req,name=Database\" json:\"Database,omitempty\"`\n\tPrivilege        *int32  `protobuf:\"varint,2,req,name=Privilege\" json:\"Privilege,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *UserPrivilege) Reset()                    { *m = UserPrivilege{} }\nfunc (m *UserPrivilege) String() string            { return proto.CompactTextString(m) }\nfunc (*UserPrivilege) ProtoMessage()               {}\nfunc (*UserPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{11} }\n\nfunc (m *UserPrivilege) GetDatabase() string {\n\tif m != nil && m.Database != nil {\n\t\treturn *m.Database\n\t}\n\treturn \"\"\n}\n\nfunc (m *UserPrivilege) GetPrivilege() int32 {\n\tif m != nil && m.Privilege != nil {\n\t\treturn *m.Privilege\n\t}\n\treturn 0\n}\n\ntype Command struct {\n\tType                         *Command_Type `protobuf:\"varint,1,req,name=type,enum=meta.Command_Type\" json:\"type,omitempty\"`\n\tproto.XXX_InternalExtensions `json:\"-\"`\n\tXXX_unrecognized             []byte `json:\"-\"`\n}\n\nfunc (m *Command) Reset()                    { *m = Command{} }\nfunc (m *Command) String() string            { return proto.CompactTextString(m) }\nfunc (*Command) ProtoMessage()               {}\nfunc (*Command) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{12} }\n\nvar extRange_Command = []proto.ExtensionRange{\n\t{Start: 100, End: 536870911},\n}\n\nfunc (*Command) ExtensionRangeArray() []proto.ExtensionRange {\n\treturn extRange_Command\n}\n\nfunc (m *Command) GetType() Command_Type {\n\tif m != nil && m.Type != nil {\n\t\treturn *m.Type\n\t}\n\treturn Command_CreateNodeCommand\n}\n\n// This isn't used in >= 0.10.0. Kept around for upgrade purposes. Instead\n// look at CreateDataNodeCommand and CreateMetaNodeCommand\ntype CreateNodeCommand struct {\n\tHost             *string `protobuf:\"bytes,1,req,name=Host\" json:\"Host,omitempty\"`\n\tRand             *uint64 `protobuf:\"varint,2,req,name=Rand\" json:\"Rand,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *CreateNodeCommand) Reset()                    { *m = CreateNodeCommand{} }\nfunc (m *CreateNodeCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*CreateNodeCommand) ProtoMessage()               {}\nfunc (*CreateNodeCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{13} }\n\nfunc (m *CreateNodeCommand) GetHost() string {\n\tif m != nil && m.Host != nil {\n\t\treturn *m.Host\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateNodeCommand) GetRand() uint64 {\n\tif m != nil && m.Rand != nil {\n\t\treturn *m.Rand\n\t}\n\treturn 0\n}\n\nvar E_CreateNodeCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*CreateNodeCommand)(nil),\n\tField:         101,\n\tName:          \"meta.CreateNodeCommand.command\",\n\tTag:           \"bytes,101,opt,name=command\",\n}\n\ntype DeleteNodeCommand struct {\n\tID               *uint64 `protobuf:\"varint,1,req,name=ID\" json:\"ID,omitempty\"`\n\tForce            *bool   `protobuf:\"varint,2,req,name=Force\" json:\"Force,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *DeleteNodeCommand) Reset()                    { *m = DeleteNodeCommand{} }\nfunc (m *DeleteNodeCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*DeleteNodeCommand) ProtoMessage()               {}\nfunc (*DeleteNodeCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{14} }\n\nfunc (m *DeleteNodeCommand) GetID() uint64 {\n\tif m != nil && m.ID != nil {\n\t\treturn *m.ID\n\t}\n\treturn 0\n}\n\nfunc (m *DeleteNodeCommand) GetForce() bool {\n\tif m != nil && m.Force != nil {\n\t\treturn *m.Force\n\t}\n\treturn false\n}\n\nvar E_DeleteNodeCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*DeleteNodeCommand)(nil),\n\tField:         102,\n\tName:          \"meta.DeleteNodeCommand.command\",\n\tTag:           \"bytes,102,opt,name=command\",\n}\n\ntype CreateDatabaseCommand struct {\n\tName             *string              `protobuf:\"bytes,1,req,name=Name\" json:\"Name,omitempty\"`\n\tRetentionPolicy  *RetentionPolicyInfo `protobuf:\"bytes,2,opt,name=RetentionPolicy\" json:\"RetentionPolicy,omitempty\"`\n\tXXX_unrecognized []byte               `json:\"-\"`\n}\n\nfunc (m *CreateDatabaseCommand) Reset()                    { *m = CreateDatabaseCommand{} }\nfunc (m *CreateDatabaseCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*CreateDatabaseCommand) ProtoMessage()               {}\nfunc (*CreateDatabaseCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{15} }\n\nfunc (m *CreateDatabaseCommand) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateDatabaseCommand) GetRetentionPolicy() *RetentionPolicyInfo {\n\tif m != nil {\n\t\treturn m.RetentionPolicy\n\t}\n\treturn nil\n}\n\nvar E_CreateDatabaseCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*CreateDatabaseCommand)(nil),\n\tField:         103,\n\tName:          \"meta.CreateDatabaseCommand.command\",\n\tTag:           \"bytes,103,opt,name=command\",\n}\n\ntype DropDatabaseCommand struct {\n\tName             *string `protobuf:\"bytes,1,req,name=Name\" json:\"Name,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *DropDatabaseCommand) Reset()                    { *m = DropDatabaseCommand{} }\nfunc (m *DropDatabaseCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*DropDatabaseCommand) ProtoMessage()               {}\nfunc (*DropDatabaseCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{16} }\n\nfunc (m *DropDatabaseCommand) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nvar E_DropDatabaseCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*DropDatabaseCommand)(nil),\n\tField:         104,\n\tName:          \"meta.DropDatabaseCommand.command\",\n\tTag:           \"bytes,104,opt,name=command\",\n}\n\ntype CreateRetentionPolicyCommand struct {\n\tDatabase         *string              `protobuf:\"bytes,1,req,name=Database\" json:\"Database,omitempty\"`\n\tRetentionPolicy  *RetentionPolicyInfo `protobuf:\"bytes,2,req,name=RetentionPolicy\" json:\"RetentionPolicy,omitempty\"`\n\tXXX_unrecognized []byte               `json:\"-\"`\n}\n\nfunc (m *CreateRetentionPolicyCommand) Reset()         { *m = CreateRetentionPolicyCommand{} }\nfunc (m *CreateRetentionPolicyCommand) String() string { return proto.CompactTextString(m) }\nfunc (*CreateRetentionPolicyCommand) ProtoMessage()    {}\nfunc (*CreateRetentionPolicyCommand) Descriptor() ([]byte, []int) {\n\treturn fileDescriptorMeta, []int{17}\n}\n\nfunc (m *CreateRetentionPolicyCommand) GetDatabase() string {\n\tif m != nil && m.Database != nil {\n\t\treturn *m.Database\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateRetentionPolicyCommand) GetRetentionPolicy() *RetentionPolicyInfo {\n\tif m != nil {\n\t\treturn m.RetentionPolicy\n\t}\n\treturn nil\n}\n\nvar E_CreateRetentionPolicyCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*CreateRetentionPolicyCommand)(nil),\n\tField:         105,\n\tName:          \"meta.CreateRetentionPolicyCommand.command\",\n\tTag:           \"bytes,105,opt,name=command\",\n}\n\ntype DropRetentionPolicyCommand struct {\n\tDatabase         *string `protobuf:\"bytes,1,req,name=Database\" json:\"Database,omitempty\"`\n\tName             *string `protobuf:\"bytes,2,req,name=Name\" json:\"Name,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *DropRetentionPolicyCommand) Reset()                    { *m = DropRetentionPolicyCommand{} }\nfunc (m *DropRetentionPolicyCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*DropRetentionPolicyCommand) ProtoMessage()               {}\nfunc (*DropRetentionPolicyCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{18} }\n\nfunc (m *DropRetentionPolicyCommand) GetDatabase() string {\n\tif m != nil && m.Database != nil {\n\t\treturn *m.Database\n\t}\n\treturn \"\"\n}\n\nfunc (m *DropRetentionPolicyCommand) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nvar E_DropRetentionPolicyCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*DropRetentionPolicyCommand)(nil),\n\tField:         106,\n\tName:          \"meta.DropRetentionPolicyCommand.command\",\n\tTag:           \"bytes,106,opt,name=command\",\n}\n\ntype SetDefaultRetentionPolicyCommand struct {\n\tDatabase         *string `protobuf:\"bytes,1,req,name=Database\" json:\"Database,omitempty\"`\n\tName             *string `protobuf:\"bytes,2,req,name=Name\" json:\"Name,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *SetDefaultRetentionPolicyCommand) Reset()         { *m = SetDefaultRetentionPolicyCommand{} }\nfunc (m *SetDefaultRetentionPolicyCommand) String() string { return proto.CompactTextString(m) }\nfunc (*SetDefaultRetentionPolicyCommand) ProtoMessage()    {}\nfunc (*SetDefaultRetentionPolicyCommand) Descriptor() ([]byte, []int) {\n\treturn fileDescriptorMeta, []int{19}\n}\n\nfunc (m *SetDefaultRetentionPolicyCommand) GetDatabase() string {\n\tif m != nil && m.Database != nil {\n\t\treturn *m.Database\n\t}\n\treturn \"\"\n}\n\nfunc (m *SetDefaultRetentionPolicyCommand) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nvar E_SetDefaultRetentionPolicyCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*SetDefaultRetentionPolicyCommand)(nil),\n\tField:         107,\n\tName:          \"meta.SetDefaultRetentionPolicyCommand.command\",\n\tTag:           \"bytes,107,opt,name=command\",\n}\n\ntype UpdateRetentionPolicyCommand struct {\n\tDatabase         *string `protobuf:\"bytes,1,req,name=Database\" json:\"Database,omitempty\"`\n\tName             *string `protobuf:\"bytes,2,req,name=Name\" json:\"Name,omitempty\"`\n\tNewName          *string `protobuf:\"bytes,3,opt,name=NewName\" json:\"NewName,omitempty\"`\n\tDuration         *int64  `protobuf:\"varint,4,opt,name=Duration\" json:\"Duration,omitempty\"`\n\tReplicaN         *uint32 `protobuf:\"varint,5,opt,name=ReplicaN\" json:\"ReplicaN,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *UpdateRetentionPolicyCommand) Reset()         { *m = UpdateRetentionPolicyCommand{} }\nfunc (m *UpdateRetentionPolicyCommand) String() string { return proto.CompactTextString(m) }\nfunc (*UpdateRetentionPolicyCommand) ProtoMessage()    {}\nfunc (*UpdateRetentionPolicyCommand) Descriptor() ([]byte, []int) {\n\treturn fileDescriptorMeta, []int{20}\n}\n\nfunc (m *UpdateRetentionPolicyCommand) GetDatabase() string {\n\tif m != nil && m.Database != nil {\n\t\treturn *m.Database\n\t}\n\treturn \"\"\n}\n\nfunc (m *UpdateRetentionPolicyCommand) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *UpdateRetentionPolicyCommand) GetNewName() string {\n\tif m != nil && m.NewName != nil {\n\t\treturn *m.NewName\n\t}\n\treturn \"\"\n}\n\nfunc (m *UpdateRetentionPolicyCommand) GetDuration() int64 {\n\tif m != nil && m.Duration != nil {\n\t\treturn *m.Duration\n\t}\n\treturn 0\n}\n\nfunc (m *UpdateRetentionPolicyCommand) GetReplicaN() uint32 {\n\tif m != nil && m.ReplicaN != nil {\n\t\treturn *m.ReplicaN\n\t}\n\treturn 0\n}\n\nvar E_UpdateRetentionPolicyCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*UpdateRetentionPolicyCommand)(nil),\n\tField:         108,\n\tName:          \"meta.UpdateRetentionPolicyCommand.command\",\n\tTag:           \"bytes,108,opt,name=command\",\n}\n\ntype CreateShardGroupCommand struct {\n\tDatabase         *string `protobuf:\"bytes,1,req,name=Database\" json:\"Database,omitempty\"`\n\tPolicy           *string `protobuf:\"bytes,2,req,name=Policy\" json:\"Policy,omitempty\"`\n\tTimestamp        *int64  `protobuf:\"varint,3,req,name=Timestamp\" json:\"Timestamp,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *CreateShardGroupCommand) Reset()                    { *m = CreateShardGroupCommand{} }\nfunc (m *CreateShardGroupCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*CreateShardGroupCommand) ProtoMessage()               {}\nfunc (*CreateShardGroupCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{21} }\n\nfunc (m *CreateShardGroupCommand) GetDatabase() string {\n\tif m != nil && m.Database != nil {\n\t\treturn *m.Database\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateShardGroupCommand) GetPolicy() string {\n\tif m != nil && m.Policy != nil {\n\t\treturn *m.Policy\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateShardGroupCommand) GetTimestamp() int64 {\n\tif m != nil && m.Timestamp != nil {\n\t\treturn *m.Timestamp\n\t}\n\treturn 0\n}\n\nvar E_CreateShardGroupCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*CreateShardGroupCommand)(nil),\n\tField:         109,\n\tName:          \"meta.CreateShardGroupCommand.command\",\n\tTag:           \"bytes,109,opt,name=command\",\n}\n\ntype DeleteShardGroupCommand struct {\n\tDatabase         *string `protobuf:\"bytes,1,req,name=Database\" json:\"Database,omitempty\"`\n\tPolicy           *string `protobuf:\"bytes,2,req,name=Policy\" json:\"Policy,omitempty\"`\n\tShardGroupID     *uint64 `protobuf:\"varint,3,req,name=ShardGroupID\" json:\"ShardGroupID,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *DeleteShardGroupCommand) Reset()                    { *m = DeleteShardGroupCommand{} }\nfunc (m *DeleteShardGroupCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*DeleteShardGroupCommand) ProtoMessage()               {}\nfunc (*DeleteShardGroupCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{22} }\n\nfunc (m *DeleteShardGroupCommand) GetDatabase() string {\n\tif m != nil && m.Database != nil {\n\t\treturn *m.Database\n\t}\n\treturn \"\"\n}\n\nfunc (m *DeleteShardGroupCommand) GetPolicy() string {\n\tif m != nil && m.Policy != nil {\n\t\treturn *m.Policy\n\t}\n\treturn \"\"\n}\n\nfunc (m *DeleteShardGroupCommand) GetShardGroupID() uint64 {\n\tif m != nil && m.ShardGroupID != nil {\n\t\treturn *m.ShardGroupID\n\t}\n\treturn 0\n}\n\nvar E_DeleteShardGroupCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*DeleteShardGroupCommand)(nil),\n\tField:         110,\n\tName:          \"meta.DeleteShardGroupCommand.command\",\n\tTag:           \"bytes,110,opt,name=command\",\n}\n\ntype CreateContinuousQueryCommand struct {\n\tDatabase         *string `protobuf:\"bytes,1,req,name=Database\" json:\"Database,omitempty\"`\n\tName             *string `protobuf:\"bytes,2,req,name=Name\" json:\"Name,omitempty\"`\n\tQuery            *string `protobuf:\"bytes,3,req,name=Query\" json:\"Query,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *CreateContinuousQueryCommand) Reset()         { *m = CreateContinuousQueryCommand{} }\nfunc (m *CreateContinuousQueryCommand) String() string { return proto.CompactTextString(m) }\nfunc (*CreateContinuousQueryCommand) ProtoMessage()    {}\nfunc (*CreateContinuousQueryCommand) Descriptor() ([]byte, []int) {\n\treturn fileDescriptorMeta, []int{23}\n}\n\nfunc (m *CreateContinuousQueryCommand) GetDatabase() string {\n\tif m != nil && m.Database != nil {\n\t\treturn *m.Database\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateContinuousQueryCommand) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateContinuousQueryCommand) GetQuery() string {\n\tif m != nil && m.Query != nil {\n\t\treturn *m.Query\n\t}\n\treturn \"\"\n}\n\nvar E_CreateContinuousQueryCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*CreateContinuousQueryCommand)(nil),\n\tField:         111,\n\tName:          \"meta.CreateContinuousQueryCommand.command\",\n\tTag:           \"bytes,111,opt,name=command\",\n}\n\ntype DropContinuousQueryCommand struct {\n\tDatabase         *string `protobuf:\"bytes,1,req,name=Database\" json:\"Database,omitempty\"`\n\tName             *string `protobuf:\"bytes,2,req,name=Name\" json:\"Name,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *DropContinuousQueryCommand) Reset()                    { *m = DropContinuousQueryCommand{} }\nfunc (m *DropContinuousQueryCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*DropContinuousQueryCommand) ProtoMessage()               {}\nfunc (*DropContinuousQueryCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{24} }\n\nfunc (m *DropContinuousQueryCommand) GetDatabase() string {\n\tif m != nil && m.Database != nil {\n\t\treturn *m.Database\n\t}\n\treturn \"\"\n}\n\nfunc (m *DropContinuousQueryCommand) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nvar E_DropContinuousQueryCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*DropContinuousQueryCommand)(nil),\n\tField:         112,\n\tName:          \"meta.DropContinuousQueryCommand.command\",\n\tTag:           \"bytes,112,opt,name=command\",\n}\n\ntype CreateUserCommand struct {\n\tName             *string `protobuf:\"bytes,1,req,name=Name\" json:\"Name,omitempty\"`\n\tHash             *string `protobuf:\"bytes,2,req,name=Hash\" json:\"Hash,omitempty\"`\n\tAdmin            *bool   `protobuf:\"varint,3,req,name=Admin\" json:\"Admin,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *CreateUserCommand) Reset()                    { *m = CreateUserCommand{} }\nfunc (m *CreateUserCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*CreateUserCommand) ProtoMessage()               {}\nfunc (*CreateUserCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{25} }\n\nfunc (m *CreateUserCommand) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateUserCommand) GetHash() string {\n\tif m != nil && m.Hash != nil {\n\t\treturn *m.Hash\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateUserCommand) GetAdmin() bool {\n\tif m != nil && m.Admin != nil {\n\t\treturn *m.Admin\n\t}\n\treturn false\n}\n\nvar E_CreateUserCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*CreateUserCommand)(nil),\n\tField:         113,\n\tName:          \"meta.CreateUserCommand.command\",\n\tTag:           \"bytes,113,opt,name=command\",\n}\n\ntype DropUserCommand struct {\n\tName             *string `protobuf:\"bytes,1,req,name=Name\" json:\"Name,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *DropUserCommand) Reset()                    { *m = DropUserCommand{} }\nfunc (m *DropUserCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*DropUserCommand) ProtoMessage()               {}\nfunc (*DropUserCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{26} }\n\nfunc (m *DropUserCommand) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nvar E_DropUserCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*DropUserCommand)(nil),\n\tField:         114,\n\tName:          \"meta.DropUserCommand.command\",\n\tTag:           \"bytes,114,opt,name=command\",\n}\n\ntype UpdateUserCommand struct {\n\tName             *string `protobuf:\"bytes,1,req,name=Name\" json:\"Name,omitempty\"`\n\tHash             *string `protobuf:\"bytes,2,req,name=Hash\" json:\"Hash,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *UpdateUserCommand) Reset()                    { *m = UpdateUserCommand{} }\nfunc (m *UpdateUserCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*UpdateUserCommand) ProtoMessage()               {}\nfunc (*UpdateUserCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{27} }\n\nfunc (m *UpdateUserCommand) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *UpdateUserCommand) GetHash() string {\n\tif m != nil && m.Hash != nil {\n\t\treturn *m.Hash\n\t}\n\treturn \"\"\n}\n\nvar E_UpdateUserCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*UpdateUserCommand)(nil),\n\tField:         115,\n\tName:          \"meta.UpdateUserCommand.command\",\n\tTag:           \"bytes,115,opt,name=command\",\n}\n\ntype SetPrivilegeCommand struct {\n\tUsername         *string `protobuf:\"bytes,1,req,name=Username\" json:\"Username,omitempty\"`\n\tDatabase         *string `protobuf:\"bytes,2,req,name=Database\" json:\"Database,omitempty\"`\n\tPrivilege        *int32  `protobuf:\"varint,3,req,name=Privilege\" json:\"Privilege,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *SetPrivilegeCommand) Reset()                    { *m = SetPrivilegeCommand{} }\nfunc (m *SetPrivilegeCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*SetPrivilegeCommand) ProtoMessage()               {}\nfunc (*SetPrivilegeCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{28} }\n\nfunc (m *SetPrivilegeCommand) GetUsername() string {\n\tif m != nil && m.Username != nil {\n\t\treturn *m.Username\n\t}\n\treturn \"\"\n}\n\nfunc (m *SetPrivilegeCommand) GetDatabase() string {\n\tif m != nil && m.Database != nil {\n\t\treturn *m.Database\n\t}\n\treturn \"\"\n}\n\nfunc (m *SetPrivilegeCommand) GetPrivilege() int32 {\n\tif m != nil && m.Privilege != nil {\n\t\treturn *m.Privilege\n\t}\n\treturn 0\n}\n\nvar E_SetPrivilegeCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*SetPrivilegeCommand)(nil),\n\tField:         116,\n\tName:          \"meta.SetPrivilegeCommand.command\",\n\tTag:           \"bytes,116,opt,name=command\",\n}\n\ntype SetDataCommand struct {\n\tData             *Data  `protobuf:\"bytes,1,req,name=Data\" json:\"Data,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *SetDataCommand) Reset()                    { *m = SetDataCommand{} }\nfunc (m *SetDataCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*SetDataCommand) ProtoMessage()               {}\nfunc (*SetDataCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{29} }\n\nfunc (m *SetDataCommand) GetData() *Data {\n\tif m != nil {\n\t\treturn m.Data\n\t}\n\treturn nil\n}\n\nvar E_SetDataCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*SetDataCommand)(nil),\n\tField:         117,\n\tName:          \"meta.SetDataCommand.command\",\n\tTag:           \"bytes,117,opt,name=command\",\n}\n\ntype SetAdminPrivilegeCommand struct {\n\tUsername         *string `protobuf:\"bytes,1,req,name=Username\" json:\"Username,omitempty\"`\n\tAdmin            *bool   `protobuf:\"varint,2,req,name=Admin\" json:\"Admin,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *SetAdminPrivilegeCommand) Reset()                    { *m = SetAdminPrivilegeCommand{} }\nfunc (m *SetAdminPrivilegeCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*SetAdminPrivilegeCommand) ProtoMessage()               {}\nfunc (*SetAdminPrivilegeCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{30} }\n\nfunc (m *SetAdminPrivilegeCommand) GetUsername() string {\n\tif m != nil && m.Username != nil {\n\t\treturn *m.Username\n\t}\n\treturn \"\"\n}\n\nfunc (m *SetAdminPrivilegeCommand) GetAdmin() bool {\n\tif m != nil && m.Admin != nil {\n\t\treturn *m.Admin\n\t}\n\treturn false\n}\n\nvar E_SetAdminPrivilegeCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*SetAdminPrivilegeCommand)(nil),\n\tField:         118,\n\tName:          \"meta.SetAdminPrivilegeCommand.command\",\n\tTag:           \"bytes,118,opt,name=command\",\n}\n\ntype UpdateNodeCommand struct {\n\tID               *uint64 `protobuf:\"varint,1,req,name=ID\" json:\"ID,omitempty\"`\n\tHost             *string `protobuf:\"bytes,2,req,name=Host\" json:\"Host,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *UpdateNodeCommand) Reset()                    { *m = UpdateNodeCommand{} }\nfunc (m *UpdateNodeCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*UpdateNodeCommand) ProtoMessage()               {}\nfunc (*UpdateNodeCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{31} }\n\nfunc (m *UpdateNodeCommand) GetID() uint64 {\n\tif m != nil && m.ID != nil {\n\t\treturn *m.ID\n\t}\n\treturn 0\n}\n\nfunc (m *UpdateNodeCommand) GetHost() string {\n\tif m != nil && m.Host != nil {\n\t\treturn *m.Host\n\t}\n\treturn \"\"\n}\n\nvar E_UpdateNodeCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*UpdateNodeCommand)(nil),\n\tField:         119,\n\tName:          \"meta.UpdateNodeCommand.command\",\n\tTag:           \"bytes,119,opt,name=command\",\n}\n\ntype CreateSubscriptionCommand struct {\n\tName             *string  `protobuf:\"bytes,1,req,name=Name\" json:\"Name,omitempty\"`\n\tDatabase         *string  `protobuf:\"bytes,2,req,name=Database\" json:\"Database,omitempty\"`\n\tRetentionPolicy  *string  `protobuf:\"bytes,3,req,name=RetentionPolicy\" json:\"RetentionPolicy,omitempty\"`\n\tMode             *string  `protobuf:\"bytes,4,req,name=Mode\" json:\"Mode,omitempty\"`\n\tDestinations     []string `protobuf:\"bytes,5,rep,name=Destinations\" json:\"Destinations,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *CreateSubscriptionCommand) Reset()                    { *m = CreateSubscriptionCommand{} }\nfunc (m *CreateSubscriptionCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*CreateSubscriptionCommand) ProtoMessage()               {}\nfunc (*CreateSubscriptionCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{32} }\n\nfunc (m *CreateSubscriptionCommand) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateSubscriptionCommand) GetDatabase() string {\n\tif m != nil && m.Database != nil {\n\t\treturn *m.Database\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateSubscriptionCommand) GetRetentionPolicy() string {\n\tif m != nil && m.RetentionPolicy != nil {\n\t\treturn *m.RetentionPolicy\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateSubscriptionCommand) GetMode() string {\n\tif m != nil && m.Mode != nil {\n\t\treturn *m.Mode\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateSubscriptionCommand) GetDestinations() []string {\n\tif m != nil {\n\t\treturn m.Destinations\n\t}\n\treturn nil\n}\n\nvar E_CreateSubscriptionCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*CreateSubscriptionCommand)(nil),\n\tField:         121,\n\tName:          \"meta.CreateSubscriptionCommand.command\",\n\tTag:           \"bytes,121,opt,name=command\",\n}\n\ntype DropSubscriptionCommand struct {\n\tName             *string `protobuf:\"bytes,1,req,name=Name\" json:\"Name,omitempty\"`\n\tDatabase         *string `protobuf:\"bytes,2,req,name=Database\" json:\"Database,omitempty\"`\n\tRetentionPolicy  *string `protobuf:\"bytes,3,req,name=RetentionPolicy\" json:\"RetentionPolicy,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *DropSubscriptionCommand) Reset()                    { *m = DropSubscriptionCommand{} }\nfunc (m *DropSubscriptionCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*DropSubscriptionCommand) ProtoMessage()               {}\nfunc (*DropSubscriptionCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{33} }\n\nfunc (m *DropSubscriptionCommand) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *DropSubscriptionCommand) GetDatabase() string {\n\tif m != nil && m.Database != nil {\n\t\treturn *m.Database\n\t}\n\treturn \"\"\n}\n\nfunc (m *DropSubscriptionCommand) GetRetentionPolicy() string {\n\tif m != nil && m.RetentionPolicy != nil {\n\t\treturn *m.RetentionPolicy\n\t}\n\treturn \"\"\n}\n\nvar E_DropSubscriptionCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*DropSubscriptionCommand)(nil),\n\tField:         122,\n\tName:          \"meta.DropSubscriptionCommand.command\",\n\tTag:           \"bytes,122,opt,name=command\",\n}\n\ntype RemovePeerCommand struct {\n\tID               *uint64 `protobuf:\"varint,1,opt,name=ID\" json:\"ID,omitempty\"`\n\tAddr             *string `protobuf:\"bytes,2,req,name=Addr\" json:\"Addr,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *RemovePeerCommand) Reset()                    { *m = RemovePeerCommand{} }\nfunc (m *RemovePeerCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*RemovePeerCommand) ProtoMessage()               {}\nfunc (*RemovePeerCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{34} }\n\nfunc (m *RemovePeerCommand) GetID() uint64 {\n\tif m != nil && m.ID != nil {\n\t\treturn *m.ID\n\t}\n\treturn 0\n}\n\nfunc (m *RemovePeerCommand) GetAddr() string {\n\tif m != nil && m.Addr != nil {\n\t\treturn *m.Addr\n\t}\n\treturn \"\"\n}\n\nvar E_RemovePeerCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*RemovePeerCommand)(nil),\n\tField:         123,\n\tName:          \"meta.RemovePeerCommand.command\",\n\tTag:           \"bytes,123,opt,name=command\",\n}\n\ntype CreateMetaNodeCommand struct {\n\tHTTPAddr         *string `protobuf:\"bytes,1,req,name=HTTPAddr\" json:\"HTTPAddr,omitempty\"`\n\tTCPAddr          *string `protobuf:\"bytes,2,req,name=TCPAddr\" json:\"TCPAddr,omitempty\"`\n\tRand             *uint64 `protobuf:\"varint,3,req,name=Rand\" json:\"Rand,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *CreateMetaNodeCommand) Reset()                    { *m = CreateMetaNodeCommand{} }\nfunc (m *CreateMetaNodeCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*CreateMetaNodeCommand) ProtoMessage()               {}\nfunc (*CreateMetaNodeCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{35} }\n\nfunc (m *CreateMetaNodeCommand) GetHTTPAddr() string {\n\tif m != nil && m.HTTPAddr != nil {\n\t\treturn *m.HTTPAddr\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateMetaNodeCommand) GetTCPAddr() string {\n\tif m != nil && m.TCPAddr != nil {\n\t\treturn *m.TCPAddr\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateMetaNodeCommand) GetRand() uint64 {\n\tif m != nil && m.Rand != nil {\n\t\treturn *m.Rand\n\t}\n\treturn 0\n}\n\nvar E_CreateMetaNodeCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*CreateMetaNodeCommand)(nil),\n\tField:         124,\n\tName:          \"meta.CreateMetaNodeCommand.command\",\n\tTag:           \"bytes,124,opt,name=command\",\n}\n\ntype CreateDataNodeCommand struct {\n\tHTTPAddr         *string `protobuf:\"bytes,1,req,name=HTTPAddr\" json:\"HTTPAddr,omitempty\"`\n\tTCPAddr          *string `protobuf:\"bytes,2,req,name=TCPAddr\" json:\"TCPAddr,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *CreateDataNodeCommand) Reset()                    { *m = CreateDataNodeCommand{} }\nfunc (m *CreateDataNodeCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*CreateDataNodeCommand) ProtoMessage()               {}\nfunc (*CreateDataNodeCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{36} }\n\nfunc (m *CreateDataNodeCommand) GetHTTPAddr() string {\n\tif m != nil && m.HTTPAddr != nil {\n\t\treturn *m.HTTPAddr\n\t}\n\treturn \"\"\n}\n\nfunc (m *CreateDataNodeCommand) GetTCPAddr() string {\n\tif m != nil && m.TCPAddr != nil {\n\t\treturn *m.TCPAddr\n\t}\n\treturn \"\"\n}\n\nvar E_CreateDataNodeCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*CreateDataNodeCommand)(nil),\n\tField:         125,\n\tName:          \"meta.CreateDataNodeCommand.command\",\n\tTag:           \"bytes,125,opt,name=command\",\n}\n\ntype UpdateDataNodeCommand struct {\n\tID               *uint64 `protobuf:\"varint,1,req,name=ID\" json:\"ID,omitempty\"`\n\tHost             *string `protobuf:\"bytes,2,req,name=Host\" json:\"Host,omitempty\"`\n\tTCPHost          *string `protobuf:\"bytes,3,req,name=TCPHost\" json:\"TCPHost,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *UpdateDataNodeCommand) Reset()                    { *m = UpdateDataNodeCommand{} }\nfunc (m *UpdateDataNodeCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*UpdateDataNodeCommand) ProtoMessage()               {}\nfunc (*UpdateDataNodeCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{37} }\n\nfunc (m *UpdateDataNodeCommand) GetID() uint64 {\n\tif m != nil && m.ID != nil {\n\t\treturn *m.ID\n\t}\n\treturn 0\n}\n\nfunc (m *UpdateDataNodeCommand) GetHost() string {\n\tif m != nil && m.Host != nil {\n\t\treturn *m.Host\n\t}\n\treturn \"\"\n}\n\nfunc (m *UpdateDataNodeCommand) GetTCPHost() string {\n\tif m != nil && m.TCPHost != nil {\n\t\treturn *m.TCPHost\n\t}\n\treturn \"\"\n}\n\nvar E_UpdateDataNodeCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*UpdateDataNodeCommand)(nil),\n\tField:         126,\n\tName:          \"meta.UpdateDataNodeCommand.command\",\n\tTag:           \"bytes,126,opt,name=command\",\n}\n\ntype DeleteMetaNodeCommand struct {\n\tID               *uint64 `protobuf:\"varint,1,req,name=ID\" json:\"ID,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *DeleteMetaNodeCommand) Reset()                    { *m = DeleteMetaNodeCommand{} }\nfunc (m *DeleteMetaNodeCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*DeleteMetaNodeCommand) ProtoMessage()               {}\nfunc (*DeleteMetaNodeCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{38} }\n\nfunc (m *DeleteMetaNodeCommand) GetID() uint64 {\n\tif m != nil && m.ID != nil {\n\t\treturn *m.ID\n\t}\n\treturn 0\n}\n\nvar E_DeleteMetaNodeCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*DeleteMetaNodeCommand)(nil),\n\tField:         127,\n\tName:          \"meta.DeleteMetaNodeCommand.command\",\n\tTag:           \"bytes,127,opt,name=command\",\n}\n\ntype DeleteDataNodeCommand struct {\n\tID               *uint64 `protobuf:\"varint,1,req,name=ID\" json:\"ID,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *DeleteDataNodeCommand) Reset()                    { *m = DeleteDataNodeCommand{} }\nfunc (m *DeleteDataNodeCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*DeleteDataNodeCommand) ProtoMessage()               {}\nfunc (*DeleteDataNodeCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{39} }\n\nfunc (m *DeleteDataNodeCommand) GetID() uint64 {\n\tif m != nil && m.ID != nil {\n\t\treturn *m.ID\n\t}\n\treturn 0\n}\n\nvar E_DeleteDataNodeCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*DeleteDataNodeCommand)(nil),\n\tField:         128,\n\tName:          \"meta.DeleteDataNodeCommand.command\",\n\tTag:           \"bytes,128,opt,name=command\",\n}\n\ntype Response struct {\n\tOK               *bool   `protobuf:\"varint,1,req,name=OK\" json:\"OK,omitempty\"`\n\tError            *string `protobuf:\"bytes,2,opt,name=Error\" json:\"Error,omitempty\"`\n\tIndex            *uint64 `protobuf:\"varint,3,opt,name=Index\" json:\"Index,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *Response) Reset()                    { *m = Response{} }\nfunc (m *Response) String() string            { return proto.CompactTextString(m) }\nfunc (*Response) ProtoMessage()               {}\nfunc (*Response) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{40} }\n\nfunc (m *Response) GetOK() bool {\n\tif m != nil && m.OK != nil {\n\t\treturn *m.OK\n\t}\n\treturn false\n}\n\nfunc (m *Response) GetError() string {\n\tif m != nil && m.Error != nil {\n\t\treturn *m.Error\n\t}\n\treturn \"\"\n}\n\nfunc (m *Response) GetIndex() uint64 {\n\tif m != nil && m.Index != nil {\n\t\treturn *m.Index\n\t}\n\treturn 0\n}\n\n// SetMetaNodeCommand is for the initial metanode in a cluster or\n// if the single host restarts and its hostname changes, this will update it\ntype SetMetaNodeCommand struct {\n\tHTTPAddr         *string `protobuf:\"bytes,1,req,name=HTTPAddr\" json:\"HTTPAddr,omitempty\"`\n\tTCPAddr          *string `protobuf:\"bytes,2,req,name=TCPAddr\" json:\"TCPAddr,omitempty\"`\n\tRand             *uint64 `protobuf:\"varint,3,req,name=Rand\" json:\"Rand,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *SetMetaNodeCommand) Reset()                    { *m = SetMetaNodeCommand{} }\nfunc (m *SetMetaNodeCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*SetMetaNodeCommand) ProtoMessage()               {}\nfunc (*SetMetaNodeCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{41} }\n\nfunc (m *SetMetaNodeCommand) GetHTTPAddr() string {\n\tif m != nil && m.HTTPAddr != nil {\n\t\treturn *m.HTTPAddr\n\t}\n\treturn \"\"\n}\n\nfunc (m *SetMetaNodeCommand) GetTCPAddr() string {\n\tif m != nil && m.TCPAddr != nil {\n\t\treturn *m.TCPAddr\n\t}\n\treturn \"\"\n}\n\nfunc (m *SetMetaNodeCommand) GetRand() uint64 {\n\tif m != nil && m.Rand != nil {\n\t\treturn *m.Rand\n\t}\n\treturn 0\n}\n\nvar E_SetMetaNodeCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*SetMetaNodeCommand)(nil),\n\tField:         129,\n\tName:          \"meta.SetMetaNodeCommand.command\",\n\tTag:           \"bytes,129,opt,name=command\",\n}\n\ntype DropShardCommand struct {\n\tID               *uint64 `protobuf:\"varint,1,req,name=ID\" json:\"ID,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *DropShardCommand) Reset()                    { *m = DropShardCommand{} }\nfunc (m *DropShardCommand) String() string            { return proto.CompactTextString(m) }\nfunc (*DropShardCommand) ProtoMessage()               {}\nfunc (*DropShardCommand) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{42} }\n\nfunc (m *DropShardCommand) GetID() uint64 {\n\tif m != nil && m.ID != nil {\n\t\treturn *m.ID\n\t}\n\treturn 0\n}\n\nvar E_DropShardCommand_Command = &proto.ExtensionDesc{\n\tExtendedType:  (*Command)(nil),\n\tExtensionType: (*DropShardCommand)(nil),\n\tField:         130,\n\tName:          \"meta.DropShardCommand.command\",\n\tTag:           \"bytes,130,opt,name=command\",\n}\n\nfunc init() {\n\tproto.RegisterType((*Data)(nil), \"meta.Data\")\n\tproto.RegisterType((*NodeInfo)(nil), \"meta.NodeInfo\")\n\tproto.RegisterType((*DatabaseInfo)(nil), \"meta.DatabaseInfo\")\n\tproto.RegisterType((*RetentionPolicySpec)(nil), \"meta.RetentionPolicySpec\")\n\tproto.RegisterType((*RetentionPolicyInfo)(nil), \"meta.RetentionPolicyInfo\")\n\tproto.RegisterType((*ShardGroupInfo)(nil), \"meta.ShardGroupInfo\")\n\tproto.RegisterType((*ShardInfo)(nil), \"meta.ShardInfo\")\n\tproto.RegisterType((*SubscriptionInfo)(nil), \"meta.SubscriptionInfo\")\n\tproto.RegisterType((*ShardOwner)(nil), \"meta.ShardOwner\")\n\tproto.RegisterType((*ContinuousQueryInfo)(nil), \"meta.ContinuousQueryInfo\")\n\tproto.RegisterType((*UserInfo)(nil), \"meta.UserInfo\")\n\tproto.RegisterType((*UserPrivilege)(nil), \"meta.UserPrivilege\")\n\tproto.RegisterType((*Command)(nil), \"meta.Command\")\n\tproto.RegisterType((*CreateNodeCommand)(nil), \"meta.CreateNodeCommand\")\n\tproto.RegisterType((*DeleteNodeCommand)(nil), \"meta.DeleteNodeCommand\")\n\tproto.RegisterType((*CreateDatabaseCommand)(nil), \"meta.CreateDatabaseCommand\")\n\tproto.RegisterType((*DropDatabaseCommand)(nil), \"meta.DropDatabaseCommand\")\n\tproto.RegisterType((*CreateRetentionPolicyCommand)(nil), \"meta.CreateRetentionPolicyCommand\")\n\tproto.RegisterType((*DropRetentionPolicyCommand)(nil), \"meta.DropRetentionPolicyCommand\")\n\tproto.RegisterType((*SetDefaultRetentionPolicyCommand)(nil), \"meta.SetDefaultRetentionPolicyCommand\")\n\tproto.RegisterType((*UpdateRetentionPolicyCommand)(nil), \"meta.UpdateRetentionPolicyCommand\")\n\tproto.RegisterType((*CreateShardGroupCommand)(nil), \"meta.CreateShardGroupCommand\")\n\tproto.RegisterType((*DeleteShardGroupCommand)(nil), \"meta.DeleteShardGroupCommand\")\n\tproto.RegisterType((*CreateContinuousQueryCommand)(nil), \"meta.CreateContinuousQueryCommand\")\n\tproto.RegisterType((*DropContinuousQueryCommand)(nil), \"meta.DropContinuousQueryCommand\")\n\tproto.RegisterType((*CreateUserCommand)(nil), \"meta.CreateUserCommand\")\n\tproto.RegisterType((*DropUserCommand)(nil), \"meta.DropUserCommand\")\n\tproto.RegisterType((*UpdateUserCommand)(nil), \"meta.UpdateUserCommand\")\n\tproto.RegisterType((*SetPrivilegeCommand)(nil), \"meta.SetPrivilegeCommand\")\n\tproto.RegisterType((*SetDataCommand)(nil), \"meta.SetDataCommand\")\n\tproto.RegisterType((*SetAdminPrivilegeCommand)(nil), \"meta.SetAdminPrivilegeCommand\")\n\tproto.RegisterType((*UpdateNodeCommand)(nil), \"meta.UpdateNodeCommand\")\n\tproto.RegisterType((*CreateSubscriptionCommand)(nil), \"meta.CreateSubscriptionCommand\")\n\tproto.RegisterType((*DropSubscriptionCommand)(nil), \"meta.DropSubscriptionCommand\")\n\tproto.RegisterType((*RemovePeerCommand)(nil), \"meta.RemovePeerCommand\")\n\tproto.RegisterType((*CreateMetaNodeCommand)(nil), \"meta.CreateMetaNodeCommand\")\n\tproto.RegisterType((*CreateDataNodeCommand)(nil), \"meta.CreateDataNodeCommand\")\n\tproto.RegisterType((*UpdateDataNodeCommand)(nil), \"meta.UpdateDataNodeCommand\")\n\tproto.RegisterType((*DeleteMetaNodeCommand)(nil), \"meta.DeleteMetaNodeCommand\")\n\tproto.RegisterType((*DeleteDataNodeCommand)(nil), \"meta.DeleteDataNodeCommand\")\n\tproto.RegisterType((*Response)(nil), \"meta.Response\")\n\tproto.RegisterType((*SetMetaNodeCommand)(nil), \"meta.SetMetaNodeCommand\")\n\tproto.RegisterType((*DropShardCommand)(nil), \"meta.DropShardCommand\")\n\tproto.RegisterEnum(\"meta.Command_Type\", Command_Type_name, Command_Type_value)\n\tproto.RegisterExtension(E_CreateNodeCommand_Command)\n\tproto.RegisterExtension(E_DeleteNodeCommand_Command)\n\tproto.RegisterExtension(E_CreateDatabaseCommand_Command)\n\tproto.RegisterExtension(E_DropDatabaseCommand_Command)\n\tproto.RegisterExtension(E_CreateRetentionPolicyCommand_Command)\n\tproto.RegisterExtension(E_DropRetentionPolicyCommand_Command)\n\tproto.RegisterExtension(E_SetDefaultRetentionPolicyCommand_Command)\n\tproto.RegisterExtension(E_UpdateRetentionPolicyCommand_Command)\n\tproto.RegisterExtension(E_CreateShardGroupCommand_Command)\n\tproto.RegisterExtension(E_DeleteShardGroupCommand_Command)\n\tproto.RegisterExtension(E_CreateContinuousQueryCommand_Command)\n\tproto.RegisterExtension(E_DropContinuousQueryCommand_Command)\n\tproto.RegisterExtension(E_CreateUserCommand_Command)\n\tproto.RegisterExtension(E_DropUserCommand_Command)\n\tproto.RegisterExtension(E_UpdateUserCommand_Command)\n\tproto.RegisterExtension(E_SetPrivilegeCommand_Command)\n\tproto.RegisterExtension(E_SetDataCommand_Command)\n\tproto.RegisterExtension(E_SetAdminPrivilegeCommand_Command)\n\tproto.RegisterExtension(E_UpdateNodeCommand_Command)\n\tproto.RegisterExtension(E_CreateSubscriptionCommand_Command)\n\tproto.RegisterExtension(E_DropSubscriptionCommand_Command)\n\tproto.RegisterExtension(E_RemovePeerCommand_Command)\n\tproto.RegisterExtension(E_CreateMetaNodeCommand_Command)\n\tproto.RegisterExtension(E_CreateDataNodeCommand_Command)\n\tproto.RegisterExtension(E_UpdateDataNodeCommand_Command)\n\tproto.RegisterExtension(E_DeleteMetaNodeCommand_Command)\n\tproto.RegisterExtension(E_DeleteDataNodeCommand_Command)\n\tproto.RegisterExtension(E_SetMetaNodeCommand_Command)\n\tproto.RegisterExtension(E_DropShardCommand_Command)\n}\n\nfunc init() { proto.RegisterFile(\"internal/meta.proto\", fileDescriptorMeta) }\n\nvar fileDescriptorMeta = []byte{\n\t// 1617 bytes of a gzipped FileDescriptorProto\n\t0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x58, 0x5b, 0x6f, 0x1b, 0xc5,\n\t0x17, 0xd7, 0xda, 0x6b, 0xc7, 0x7b, 0x62, 0x27, 0xf6, 0x38, 0x97, 0x4d, 0x9b, 0xa4, 0xee, 0xe8,\n\t0x7f, 0xf1, 0xff, 0x2f, 0x51, 0x24, 0x2b, 0x15, 0x42, 0x5c, 0xdb, 0xb8, 0xa5, 0x11, 0x4a, 0x1a,\n\t0x62, 0x17, 0xde, 0xaa, 0x6e, 0xed, 0x49, 0xb3, 0x60, 0xef, 0x9a, 0xdd, 0x75, 0xd3, 0x50, 0x68,\n\t0x03, 0x12, 0x42, 0x20, 0x21, 0xc1, 0x0b, 0x2f, 0x3c, 0xf1, 0xc6, 0x37, 0x40, 0x3c, 0xf0, 0x29,\n\t0xf8, 0x42, 0x68, 0x66, 0xf6, 0x32, 0xbb, 0x3b, 0xb3, 0x69, 0xfb, 0x66, 0xcf, 0x39, 0x73, 0x7e,\n\t0xbf, 0x39, 0xb7, 0x39, 0xb3, 0xd0, 0xb6, 0x9d, 0x80, 0x78, 0x8e, 0x35, 0x79, 0x7d, 0x4a, 0x02,\n\t0xeb, 0xda, 0xcc, 0x73, 0x03, 0x17, 0xe9, 0xf4, 0x37, 0xfe, 0xad, 0x04, 0x7a, 0xdf, 0x0a, 0x2c,\n\t0x54, 0x07, 0x7d, 0x48, 0xbc, 0xa9, 0xa9, 0x75, 0x4a, 0x5d, 0x1d, 0x35, 0xa0, 0xb2, 0xe7, 0x8c,\n\t0xc9, 0x13, 0xb3, 0xc4, 0xfe, 0xb6, 0xc0, 0xd8, 0x9d, 0xcc, 0xfd, 0x80, 0x78, 0x7b, 0x7d, 0xb3,\n\t0xcc, 0x96, 0xb6, 0xa0, 0x72, 0xe0, 0x8e, 0x89, 0x6f, 0xea, 0x9d, 0x72, 0x77, 0xb1, 0xb7, 0x74,\n\t0x8d, 0x99, 0xa6, 0x4b, 0x7b, 0xce, 0xb1, 0x8b, 0xfe, 0x0d, 0x06, 0x35, 0xfb, 0xd0, 0xf2, 0x89,\n\t0x6f, 0x56, 0x98, 0x0a, 0xe2, 0x2a, 0xd1, 0x32, 0x53, 0xdb, 0x82, 0xca, 0x3d, 0x9f, 0x78, 0xbe,\n\t0x59, 0x15, 0xad, 0xd0, 0x25, 0x26, 0x6e, 0x81, 0xb1, 0x6f, 0x3d, 0x61, 0x46, 0xfb, 0xe6, 0x02,\n\t0xc3, 0x5d, 0x87, 0xe5, 0x7d, 0xeb, 0xc9, 0xe0, 0xc4, 0xf2, 0xc6, 0x1f, 0x78, 0xee, 0x7c, 0xb6,\n\t0xd7, 0x37, 0x6b, 0x4c, 0x80, 0x00, 0x22, 0xc1, 0x5e, 0xdf, 0x34, 0xd8, 0xda, 0x55, 0xce, 0x82,\n\t0x13, 0x05, 0x29, 0xd1, 0xab, 0x60, 0xec, 0x93, 0x48, 0x65, 0x51, 0xa6, 0x82, 0xaf, 0x43, 0x2d,\n\t0x56, 0x07, 0x28, 0xed, 0xf5, 0x43, 0x27, 0xd5, 0x41, 0xbf, 0xe3, 0xfa, 0x01, 0xf3, 0x91, 0x81,\n\t0x96, 0x61, 0x61, 0xb8, 0x7b, 0xc8, 0x16, 0xca, 0x1d, 0xad, 0x6b, 0xe0, 0xdf, 0x35, 0xa8, 0xa7,\n\t0x0e, 0x5b, 0x07, 0xfd, 0xc0, 0x9a, 0x12, 0xb6, 0xdb, 0x40, 0xdb, 0xb0, 0xd6, 0x27, 0xc7, 0xd6,\n\t0x7c, 0x12, 0x1c, 0x91, 0x80, 0x38, 0x81, 0xed, 0x3a, 0x87, 0xee, 0xc4, 0x1e, 0x9d, 0x85, 0xf6,\n\t0x76, 0xa0, 0x95, 0x16, 0xd8, 0xc4, 0x37, 0xcb, 0x8c, 0xe0, 0x06, 0x27, 0x98, 0xd9, 0xc7, 0x30,\n\t0x76, 0xa0, 0xb5, 0xeb, 0x3a, 0x81, 0xed, 0xcc, 0xdd, 0xb9, 0xff, 0xd1, 0x9c, 0x78, 0x76, 0x1c,\n\t0xa2, 0x70, 0x57, 0x5a, 0xcc, 0x76, 0xe1, 0x11, 0xb4, 0x33, 0xc6, 0x06, 0x33, 0x32, 0x12, 0x08,\n\t0x6b, 0x5d, 0x03, 0x35, 0xa1, 0xd6, 0x9f, 0x7b, 0x16, 0xd5, 0x31, 0x4b, 0x1d, 0xad, 0x5b, 0x46,\n\t0x97, 0x00, 0x25, 0x81, 0x88, 0x65, 0x65, 0x26, 0x6b, 0x42, 0xed, 0x88, 0xcc, 0x26, 0xf6, 0xc8,\n\t0x3a, 0x30, 0xf5, 0x8e, 0xd6, 0x6d, 0xe0, 0xbf, 0xb4, 0x1c, 0x8a, 0xc4, 0x2d, 0x69, 0x94, 0x52,\n\t0x01, 0x4a, 0x29, 0x87, 0x52, 0xea, 0x36, 0xd0, 0xff, 0x60, 0x31, 0xd1, 0x8e, 0x52, 0x6f, 0x85,\n\t0x1f, 0x5d, 0xc8, 0x1a, 0x0a, 0xfc, 0x1a, 0x34, 0x06, 0xf3, 0x87, 0xfe, 0xc8, 0xb3, 0x67, 0xd4,\n\t0x64, 0x94, 0x84, 0x6b, 0xa1, 0xb2, 0x20, 0x62, 0x4e, 0xfa, 0x5e, 0x83, 0xa5, 0x8c, 0x05, 0x31,\n\t0x1b, 0x5a, 0x60, 0x0c, 0x02, 0xcb, 0x0b, 0x86, 0xf6, 0x94, 0x84, 0xcc, 0x97, 0x61, 0xe1, 0x96,\n\t0x33, 0x66, 0x0b, 0x9c, 0x6e, 0x0b, 0x8c, 0x3e, 0x99, 0x90, 0x80, 0x8c, 0x6f, 0x04, 0x8c, 0x6f,\n\t0x19, 0x5d, 0x81, 0x2a, 0x33, 0x1a, 0x51, 0x5d, 0x16, 0xa8, 0x32, 0x8c, 0x36, 0x2c, 0x0e, 0xbd,\n\t0xb9, 0x33, 0xb2, 0xf8, 0xae, 0x2a, 0xf5, 0x2e, 0xbe, 0x0b, 0x46, 0xa2, 0x21, 0xb2, 0x58, 0x81,\n\t0xda, 0xdd, 0x53, 0x87, 0xd6, 0xa9, 0x6f, 0x96, 0x3a, 0xe5, 0xae, 0x7e, 0xb3, 0x64, 0x6a, 0xa8,\n\t0x03, 0x55, 0xb6, 0x1a, 0x25, 0x50, 0x53, 0x00, 0x61, 0x02, 0xdc, 0x87, 0x66, 0xf6, 0xc0, 0x99,\n\t0xc0, 0xd4, 0x41, 0xdf, 0x77, 0xc7, 0x24, 0xcc, 0xce, 0x15, 0xa8, 0xf7, 0x89, 0x1f, 0xd8, 0x8e,\n\t0xc5, 0x5d, 0x47, 0xed, 0x1a, 0x78, 0x13, 0x20, 0xb1, 0x89, 0x96, 0xa0, 0x1a, 0x96, 0x2e, 0xe3,\n\t0x86, 0x7b, 0xd0, 0x96, 0x24, 0x5f, 0x06, 0xa6, 0x01, 0x15, 0x26, 0xe2, 0x38, 0xf8, 0x3e, 0xd4,\n\t0xe2, 0x6e, 0x90, 0xe3, 0x73, 0xc7, 0xf2, 0x4f, 0x42, 0x3e, 0x0d, 0xa8, 0xdc, 0x18, 0x4f, 0x6d,\n\t0x9e, 0x17, 0x35, 0xf4, 0x5f, 0x80, 0x43, 0xcf, 0x7e, 0x6c, 0x4f, 0xc8, 0xa3, 0x38, 0xff, 0xdb,\n\t0x49, 0x73, 0x89, 0x65, 0x78, 0x07, 0x1a, 0xa9, 0x05, 0x96, 0x7f, 0x61, 0xd1, 0x86, 0x40, 0x2d,\n\t0x30, 0x62, 0x31, 0x43, 0xab, 0xe0, 0xbf, 0xab, 0xb0, 0xb0, 0xeb, 0x4e, 0xa7, 0x96, 0x33, 0x46,\n\t0x1d, 0xd0, 0x83, 0xb3, 0x19, 0x57, 0x5e, 0x8a, 0x9a, 0x5c, 0x28, 0xbc, 0x36, 0x3c, 0x9b, 0x11,\n\t0xfc, 0x6b, 0x15, 0x74, 0xfa, 0x03, 0xad, 0x42, 0x6b, 0xd7, 0x23, 0x56, 0x40, 0xa8, 0x5b, 0x42,\n\t0x95, 0xa6, 0x46, 0x97, 0x79, 0x56, 0x88, 0xcb, 0x25, 0xb4, 0x01, 0xab, 0x5c, 0x3b, 0xe2, 0x13,\n\t0x89, 0xca, 0x68, 0x1d, 0xda, 0x7d, 0xcf, 0x9d, 0x65, 0x05, 0x3a, 0xea, 0xc0, 0x26, 0xdf, 0x93,\n\t0x29, 0xb4, 0x48, 0xa3, 0x82, 0xb6, 0xe1, 0x12, 0xdd, 0xaa, 0x90, 0x57, 0xd1, 0xbf, 0xa0, 0x33,\n\t0x20, 0x81, 0xbc, 0x33, 0x45, 0x5a, 0x0b, 0x14, 0xe7, 0xde, 0x6c, 0xac, 0xc6, 0xa9, 0xa1, 0xcb,\n\t0xb0, 0xce, 0x99, 0x24, 0x25, 0x13, 0x09, 0x0d, 0x2a, 0xe4, 0x27, 0xce, 0x0b, 0x21, 0x39, 0x43,\n\t0x26, 0x59, 0x22, 0x8d, 0xc5, 0xe8, 0x0c, 0x0a, 0x79, 0x3d, 0xf1, 0x33, 0x0d, 0x6d, 0xb4, 0xdc,\n\t0x40, 0x6d, 0x58, 0xa6, 0xdb, 0xc4, 0xc5, 0x25, 0xaa, 0xcb, 0x4f, 0x22, 0x2e, 0x2f, 0x53, 0x0f,\n\t0x0f, 0x48, 0x10, 0xc7, 0x3d, 0x12, 0x34, 0x11, 0x82, 0x25, 0xea, 0x1f, 0x2b, 0xb0, 0xa2, 0xb5,\n\t0x16, 0xda, 0x04, 0x73, 0x40, 0x02, 0x96, 0x7f, 0xb9, 0x1d, 0x28, 0x41, 0x10, 0xc3, 0xdb, 0x46,\n\t0x5b, 0xb0, 0x11, 0x3a, 0x48, 0xa8, 0xbb, 0x48, 0xbc, 0xca, 0x5c, 0xe4, 0xb9, 0x33, 0x99, 0x70,\n\t0x8d, 0x9a, 0x3c, 0x22, 0x53, 0xf7, 0x31, 0x39, 0x24, 0x09, 0xe9, 0xf5, 0x24, 0x63, 0xa2, 0x1b,\n\t0x2d, 0x12, 0x99, 0xe9, 0x64, 0x12, 0x45, 0x1b, 0x54, 0xc4, 0xf9, 0x65, 0x45, 0x97, 0xa8, 0x88,\n\t0xc7, 0x29, 0x6b, 0xf0, 0x72, 0x22, 0xca, 0xee, 0xda, 0x44, 0x6b, 0x80, 0x06, 0x24, 0xc8, 0x6e,\n\t0xd9, 0x42, 0x2b, 0xd0, 0x64, 0x47, 0xa2, 0x31, 0x8f, 0x56, 0xb7, 0xff, 0x5f, 0xab, 0x8d, 0x9b,\n\t0xe7, 0xe7, 0xe7, 0xe7, 0x25, 0x7c, 0x22, 0x29, 0x8f, 0xf8, 0x92, 0x8d, 0x8b, 0xfe, 0xc8, 0x72,\n\t0xc6, 0x7c, 0x2c, 0xe9, 0xbd, 0x01, 0x0b, 0xa3, 0x50, 0xad, 0x91, 0xaa, 0x3b, 0x93, 0x74, 0xb4,\n\t0xee, 0x62, 0x6f, 0x3d, 0x5c, 0xcc, 0x1a, 0xc5, 0x8f, 0x24, 0x15, 0x97, 0x6a, 0xa3, 0x0d, 0xa8,\n\t0xdc, 0x76, 0xbd, 0x11, 0xaf, 0xf7, 0x5a, 0x01, 0xd0, 0xb1, 0x08, 0x94, 0xb3, 0x89, 0x7f, 0xd1,\n\t0x14, 0x45, 0x9c, 0x69, 0x66, 0x3d, 0x58, 0xce, 0x4f, 0x01, 0x5a, 0xe1, 0x55, 0xdf, 0x7b, 0x4b,\n\t0x49, 0xea, 0x11, 0xdb, 0x7a, 0x59, 0x3c, 0x7d, 0x06, 0x1e, 0xdf, 0x97, 0x76, 0x90, 0x34, 0xab,\n\t0xde, 0x9b, 0x4a, 0x84, 0x13, 0x91, 0x9c, 0xc4, 0x10, 0x1d, 0x7e, 0x0a, 0x3b, 0x91, 0xa4, 0xcf,\n\t0x4a, 0x7d, 0x50, 0x2a, 0xf6, 0xc1, 0x4d, 0x25, 0x43, 0x9b, 0x31, 0xc4, 0xa2, 0x0f, 0xe4, 0x4c,\n\t0xf0, 0xb3, 0xa2, 0x8e, 0x28, 0xe1, 0x19, 0xf9, 0x88, 0x5d, 0x3c, 0xbd, 0xf7, 0x95, 0x0c, 0x3e,\n\t0x65, 0x0c, 0x3a, 0x89, 0x8f, 0x14, 0xf8, 0x3f, 0x68, 0x17, 0xb7, 0xdc, 0x0b, 0x69, 0xdc, 0x56,\n\t0xd2, 0xf8, 0x8c, 0xd1, 0xf8, 0x4f, 0x78, 0xe3, 0x5f, 0x80, 0x83, 0xff, 0xd0, 0x8a, 0x3b, 0xfb,\n\t0x45, 0x44, 0xe8, 0xcc, 0x73, 0x40, 0x4e, 0xd9, 0x42, 0x39, 0x37, 0x36, 0xea, 0xb9, 0xd1, 0xb0,\n\t0x42, 0x47, 0xc3, 0x82, 0x30, 0x4e, 0xc4, 0x30, 0x16, 0x11, 0xc3, 0x3f, 0x6a, 0xca, 0x1b, 0x47,\n\t0x42, 0x7a, 0x09, 0xaa, 0xa9, 0x69, 0xbb, 0x05, 0x06, 0x9d, 0xd3, 0xfc, 0xc0, 0x9a, 0xce, 0xf8,\n\t0xb0, 0xd6, 0x7b, 0x47, 0x49, 0x6a, 0xca, 0x48, 0x6d, 0x89, 0xb9, 0x95, 0xc3, 0xc4, 0x3f, 0x69,\n\t0xca, 0x4b, 0xee, 0x05, 0xf8, 0xac, 0x40, 0x3d, 0xf5, 0xc6, 0x61, 0x8f, 0xae, 0x02, 0x4a, 0x8e,\n\t0x48, 0x49, 0x01, 0x8b, 0x7f, 0xd6, 0x8a, 0xaf, 0xd6, 0x0b, 0x83, 0x1b, 0x0f, 0x67, 0x65, 0x96,\n\t0x74, 0xea, 0xb0, 0xb9, 0xf9, 0xea, 0x93, 0x43, 0x46, 0xd5, 0xf7, 0x6a, 0x84, 0x0a, 0xaa, 0x6f,\n\t0x96, 0xad, 0x3e, 0x05, 0xfe, 0xa9, 0x64, 0x56, 0x78, 0x89, 0x49, 0xb3, 0xe0, 0x6a, 0xf8, 0x3c,\n\t0x7f, 0x07, 0x09, 0x18, 0xf8, 0xe3, 0xdc, 0x34, 0x92, 0xe9, 0xbe, 0xd7, 0x95, 0x96, 0x3d, 0x66,\n\t0x79, 0x35, 0x39, 0x9b, 0x68, 0xf7, 0x44, 0x32, 0xd0, 0x14, 0x1d, 0xa8, 0xe0, 0x04, 0xbe, 0x78,\n\t0x82, 0x9c, 0x51, 0xfc, 0x9d, 0x26, 0x1d, 0x92, 0x68, 0xd0, 0xa8, 0x9a, 0x93, 0x7e, 0xd4, 0x45,\n\t0x61, 0x2c, 0xe5, 0x87, 0x6a, 0xea, 0xc9, 0x4a, 0xc1, 0x6d, 0x13, 0x88, 0xb7, 0x8d, 0x04, 0x11,\n\t0x3f, 0xc8, 0x0e, 0x65, 0xc8, 0xe4, 0x9f, 0x35, 0x18, 0xfe, 0x62, 0x0f, 0x92, 0x4f, 0x0f, 0xbd,\n\t0x1d, 0x25, 0xcc, 0x9c, 0xc1, 0xac, 0x24, 0x9d, 0x32, 0xb1, 0x87, 0x9f, 0xaa, 0x47, 0x3c, 0xc9,\n\t0x79, 0xe3, 0x1c, 0xe1, 0xe3, 0xc3, 0xbb, 0x4a, 0xc8, 0xc7, 0x0c, 0x72, 0x3b, 0x86, 0x94, 0x02,\n\t0xe0, 0x63, 0xc9, 0x04, 0xa9, 0xfe, 0x12, 0x51, 0x10, 0xd0, 0xd3, 0x7c, 0x40, 0xc5, 0x69, 0xe5,\n\t0x4f, 0xad, 0x60, 0x26, 0x95, 0xbc, 0xd3, 0xd3, 0x21, 0x5d, 0xcf, 0xdf, 0xdf, 0xe5, 0xd4, 0xcb,\n\t0x51, 0x97, 0xbe, 0x1c, 0xe9, 0xb3, 0xd7, 0xe8, 0xbd, 0xa7, 0xe4, 0x7c, 0xc6, 0x38, 0x5f, 0x49,\n\t0x35, 0xdb, 0x3c, 0x3b, 0xda, 0xdb, 0x54, 0x03, 0xf3, 0x2b, 0x33, 0x2f, 0xe8, 0xb7, 0x5f, 0xa4,\n\t0xfa, 0xad, 0x1c, 0x97, 0xc6, 0x2d, 0x37, 0xa6, 0xc7, 0x71, 0xd3, 0x78, 0xdc, 0x6e, 0x8c, 0xc7,\n\t0xde, 0x85, 0x71, 0x7b, 0x2a, 0xc6, 0x2d, 0x67, 0x12, 0x7f, 0xab, 0x29, 0x06, 0x7f, 0x7a, 0xd6,\n\t0x3b, 0xc3, 0xe1, 0x21, 0x03, 0xd1, 0x84, 0xcf, 0x54, 0x09, 0x6a, 0x3c, 0x52, 0xf3, 0x1b, 0x46,\n\t0x3d, 0x54, 0x7e, 0x99, 0x1f, 0x2a, 0x33, 0x68, 0xf8, 0x54, 0xf1, 0xc8, 0x78, 0x01, 0x1a, 0x05,\n\t0xc0, 0x5f, 0xc9, 0xa7, 0x59, 0x11, 0xf8, 0xb9, 0xe2, 0x09, 0xf3, 0xa2, 0x9f, 0xeb, 0x8a, 0x09,\n\t0x3c, 0x13, 0x09, 0x48, 0x71, 0xf0, 0x03, 0xc5, 0x43, 0x49, 0x24, 0x50, 0x80, 0xf0, 0x5c, 0x44,\n\t0x90, 0x1a, 0xc2, 0x96, 0xe2, 0xbd, 0x95, 0x42, 0x78, 0x5b, 0x89, 0x70, 0xae, 0xe5, 0x21, 0xb2,\n\t0x87, 0xd8, 0xa1, 0x73, 0x99, 0x3f, 0x73, 0x1d, 0x9f, 0x50, 0xab, 0x77, 0x3f, 0x64, 0x56, 0x6b,\n\t0xb4, 0x9b, 0xdd, 0xf2, 0x3c, 0xd7, 0x63, 0x4f, 0x12, 0x23, 0xf9, 0x36, 0x4c, 0xe7, 0x3b, 0x1d,\n\t0x9f, 0x6b, 0xb2, 0xe7, 0xde, 0xcb, 0x67, 0x9e, 0xba, 0xfd, 0x7f, 0xcd, 0xb9, 0x9b, 0x71, 0x97,\n\t0xcc, 0xfa, 0xe6, 0x93, 0xfc, 0xc3, 0x32, 0xe5, 0x16, 0x75, 0x61, 0x7d, 0xc3, 0x4d, 0xaf, 0x09,\n\t0x75, 0x2c, 0x18, 0xf9, 0x27, 0x00, 0x00, 0xff, 0xff, 0x5d, 0xc6, 0xc9, 0x45, 0x39, 0x17, 0x00,\n\t0x00,\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/meta/internal/meta.proto",
    "content": "package meta;\n\n//========================================================================\n//\n// Metadata\n//\n//========================================================================\n\nmessage Data {\n\trequired uint64 Term = 1;\n\trequired uint64 Index = 2;\n\trequired uint64 ClusterID = 3;\n\n\trepeated NodeInfo Nodes = 4;\n\trepeated DatabaseInfo Databases = 5;\n\trepeated UserInfo Users = 6;\n\n\trequired uint64 MaxNodeID = 7;\n\trequired uint64 MaxShardGroupID = 8;\n\trequired uint64 MaxShardID = 9;\n\n\t// added for 0.10.0\n\trepeated NodeInfo DataNodes = 10;\n\trepeated NodeInfo MetaNodes = 11;\n}\n\nmessage NodeInfo {\n\trequired uint64 ID = 1;\n\trequired string Host = 2;\n\toptional string TCPHost = 3;\n}\n\nmessage DatabaseInfo {\n\trequired string Name = 1;\n\trequired string DefaultRetentionPolicy = 2;\n\trepeated RetentionPolicyInfo RetentionPolicies = 3;\n\trepeated ContinuousQueryInfo ContinuousQueries = 4;\n}\n\nmessage RetentionPolicySpec {\n\toptional string Name               = 1;\n\toptional int64  Duration           = 2;\n\toptional int64  ShardGroupDuration = 3;\n\toptional uint32 ReplicaN           = 4;\n}\n\nmessage RetentionPolicyInfo {\n\trequired string Name = 1;\n\trequired int64 Duration = 2;\n\trequired int64 ShardGroupDuration = 3;\n\trequired uint32 ReplicaN = 4;\n\trepeated ShardGroupInfo ShardGroups = 5;\n\trepeated SubscriptionInfo Subscriptions = 6;\n}\n\nmessage ShardGroupInfo {\n\trequired uint64 ID = 1;\n\trequired int64 StartTime = 2;\n\trequired int64 EndTime = 3;\n\trequired int64 DeletedAt = 4;\n\trepeated ShardInfo Shards = 5;\n\toptional int64 TruncatedAt = 6;\n}\n\nmessage ShardInfo {\n\trequired uint64 ID = 1;\n\trepeated uint64 OwnerIDs = 2 [deprecated=true];\n\trepeated ShardOwner Owners = 3;\n}\n\nmessage SubscriptionInfo{\n\trequired string Name = 1;\n\trequired string Mode = 2;\n\trepeated string Destinations = 3;\n}\n\nmessage ShardOwner {\n\trequired uint64 NodeID = 1;\n}\n\nmessage ContinuousQueryInfo {\n\trequired string Name = 1;\n\trequired string Query = 2;\n}\n\nmessage UserInfo {\n\trequired string Name = 1;\n\trequired string Hash = 2;\n\trequired bool Admin = 3;\n\trepeated UserPrivilege Privileges = 4;\n}\n\nmessage UserPrivilege {\n\trequired string Database = 1;\n\trequired int32 Privilege = 2;\n}\n\n\n//========================================================================\n//\n// COMMANDS\n//\n//========================================================================\n\nmessage Command {\n\textensions 100 to max;\n\n\tenum Type {\n\t\tCreateNodeCommand                = 1;\n\t\tDeleteNodeCommand                = 2;\n\t\tCreateDatabaseCommand            = 3;\n\t\tDropDatabaseCommand              = 4;\n\t\tCreateRetentionPolicyCommand     = 5;\n\t\tDropRetentionPolicyCommand       = 6;\n\t\tSetDefaultRetentionPolicyCommand = 7;\n\t\tUpdateRetentionPolicyCommand     = 8;\n\t\tCreateShardGroupCommand          = 9;\n\t\tDeleteShardGroupCommand          = 10;\n\t\tCreateContinuousQueryCommand     = 11;\n\t\tDropContinuousQueryCommand       = 12;\n\t\tCreateUserCommand                = 13;\n\t\tDropUserCommand                  = 14;\n\t\tUpdateUserCommand                = 15;\n\t\tSetPrivilegeCommand              = 16;\n\t\tSetDataCommand                   = 17;\n\t\tSetAdminPrivilegeCommand         = 18;\n\t\tUpdateNodeCommand                = 19;\n\t\tCreateSubscriptionCommand        = 21;\n\t\tDropSubscriptionCommand          = 22;\n\t\tRemovePeerCommand                = 23;\n\t\tCreateMetaNodeCommand            = 24;\n\t\tCreateDataNodeCommand            = 25;\n\t\tUpdateDataNodeCommand            = 26;\n\t\tDeleteMetaNodeCommand            = 27;\n\t\tDeleteDataNodeCommand            = 28;\n\t\tSetMetaNodeCommand               = 29;\n\t\tDropShardCommand                 = 30;\n\t}\n\n\trequired Type type = 1;\n}\n\n// This isn't used in >= 0.10.0. Kept around for upgrade purposes. Instead\n// look at CreateDataNodeCommand and CreateMetaNodeCommand\nmessage CreateNodeCommand {\n\textend Command {\n\t\toptional CreateNodeCommand command = 101;\n\t}\n\trequired string Host = 1;\n\trequired uint64 Rand = 2;\n}\n\nmessage DeleteNodeCommand {\n\textend Command {\n\t\toptional DeleteNodeCommand command = 102;\n\t}\n\trequired uint64 ID = 1;\n\trequired bool Force = 2;\n}\n\nmessage CreateDatabaseCommand {\n\textend Command {\n\t\toptional CreateDatabaseCommand command = 103;\n\t}\n\trequired string Name = 1;\n\toptional RetentionPolicyInfo RetentionPolicy = 2;\n}\n\nmessage DropDatabaseCommand {\n\textend Command {\n\t\toptional DropDatabaseCommand command = 104;\n\t}\n\trequired string Name = 1;\n}\n\nmessage CreateRetentionPolicyCommand {\n\textend Command {\n\t\toptional CreateRetentionPolicyCommand command = 105;\n\t}\n\trequired string Database = 1;\n\trequired RetentionPolicyInfo RetentionPolicy = 2;\n}\n\nmessage DropRetentionPolicyCommand {\n\textend Command {\n\t\toptional DropRetentionPolicyCommand command = 106;\n\t}\n\trequired string Database = 1;\n\trequired string Name = 2;\n}\n\nmessage SetDefaultRetentionPolicyCommand {\n\textend Command {\n\t\toptional SetDefaultRetentionPolicyCommand command = 107;\n\t}\n\trequired string Database = 1;\n\trequired string Name = 2;\n}\n\nmessage UpdateRetentionPolicyCommand {\n\textend Command {\n\t\toptional UpdateRetentionPolicyCommand command = 108;\n\t}\n\trequired string Database = 1;\n\trequired string Name = 2;\n\toptional string NewName = 3;\n\toptional int64 Duration = 4;\n\toptional uint32 ReplicaN = 5;\n}\n\nmessage CreateShardGroupCommand {\n\textend Command {\n\t\toptional CreateShardGroupCommand command = 109;\n\t}\n\trequired string Database = 1;\n\trequired string Policy = 2;\n\trequired int64 Timestamp = 3;\n}\n\nmessage DeleteShardGroupCommand {\n\textend Command {\n\t\toptional DeleteShardGroupCommand command = 110;\n\t}\n\trequired string Database = 1;\n\trequired string Policy = 2;\n\trequired uint64 ShardGroupID = 3;\n}\n\nmessage CreateContinuousQueryCommand {\n\textend Command {\n\t\toptional CreateContinuousQueryCommand command = 111;\n\t}\n\trequired string Database = 1;\n\trequired string Name = 2;\n\trequired string Query = 3;\n}\n\nmessage DropContinuousQueryCommand {\n\textend Command {\n\t\toptional DropContinuousQueryCommand command = 112;\n\t}\n\trequired string Database = 1;\n\trequired string Name = 2;\n}\n\nmessage CreateUserCommand {\n\textend Command {\n\t\toptional CreateUserCommand command = 113;\n\t}\n\trequired string Name = 1;\n\trequired string Hash = 2;\n\trequired bool Admin = 3;\n}\n\nmessage DropUserCommand {\n\textend Command {\n\t\toptional DropUserCommand command = 114;\n\t}\n\trequired string Name = 1;\n}\n\nmessage UpdateUserCommand {\n\textend Command {\n\t\toptional UpdateUserCommand command = 115;\n\t}\n\trequired string Name = 1;\n\trequired string Hash = 2;\n}\n\nmessage SetPrivilegeCommand {\n\textend Command {\n\t\toptional SetPrivilegeCommand command = 116;\n\t}\n\trequired string Username = 1;\n\trequired string Database = 2;\n\trequired int32 Privilege = 3;\n}\n\nmessage SetDataCommand {\n\textend Command {\n\t\toptional SetDataCommand command = 117;\n\t}\n\trequired Data Data = 1;\n}\n\nmessage SetAdminPrivilegeCommand {\n\textend Command {\n\t\toptional SetAdminPrivilegeCommand command = 118;\n\t}\n\trequired string Username = 1;\n\trequired bool Admin = 2;\n}\n\nmessage UpdateNodeCommand {\n\textend Command {\n\t\toptional UpdateNodeCommand command = 119;\n\t}\n\trequired uint64 ID = 1;\n\trequired string Host = 2;\n}\n\nmessage CreateSubscriptionCommand {\n\textend Command {\n\t\toptional CreateSubscriptionCommand command = 121;\n\t}\n\trequired string Name = 1;\n\trequired string Database = 2;\n\trequired string RetentionPolicy = 3;\n\trequired string Mode = 4;\n\trepeated string Destinations = 5;\n\n}\n\nmessage DropSubscriptionCommand {\n\textend Command {\n\t\toptional DropSubscriptionCommand command = 122;\n\t}\n\trequired string Name = 1;\n\trequired string Database = 2;\n\trequired string RetentionPolicy = 3;\n}\n\nmessage RemovePeerCommand {\n\textend Command {\n\t\toptional RemovePeerCommand command = 123;\n\t}\n\toptional uint64 ID = 1;\n\trequired string Addr = 2;\n}\n\nmessage CreateMetaNodeCommand {\n\textend Command {\n\t\toptional CreateMetaNodeCommand command = 124;\n\t}\n\trequired string HTTPAddr = 1;\n\trequired string TCPAddr = 2;\n\trequired uint64 Rand = 3;\n}\n\nmessage CreateDataNodeCommand {\n\textend Command {\n\t\toptional CreateDataNodeCommand command = 125;\n\t}\n\trequired string HTTPAddr = 1;\n\trequired string TCPAddr = 2;\n}\n\nmessage UpdateDataNodeCommand {\n\textend Command {\n\t\toptional UpdateDataNodeCommand command = 126;\n\t}\n\trequired uint64 ID = 1;\n\trequired string Host = 2;\n\trequired string TCPHost = 3;\n}\n\nmessage DeleteMetaNodeCommand {\n\textend Command {\n\t\toptional DeleteMetaNodeCommand command = 127;\n\t}\n\trequired uint64 ID = 1;\n}\n\nmessage DeleteDataNodeCommand {\n\textend Command {\n\t\toptional DeleteDataNodeCommand command = 128;\n\t}\n\trequired uint64 ID = 1;\n}\n\nmessage Response {\n\trequired bool OK = 1;\n\toptional string Error = 2;\n\toptional uint64 Index = 3;\n}\n\n// SetMetaNodeCommand is for the initial metanode in a cluster or\n// if the single host restarts and its hostname changes, this will update it\nmessage SetMetaNodeCommand {\n\textend Command {\n\t\toptional SetMetaNodeCommand command = 129;\n\t}\n\trequired string HTTPAddr = 1;\n\trequired string TCPAddr = 2;\n\trequired uint64 Rand = 3;\n}\n\nmessage DropShardCommand {\n\textend Command {\n\t\toptional DropShardCommand command = 130;\n\t}\n\trequired uint64 ID = 1;\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/meta/meta_test.go",
    "content": "package meta\n\nimport \"golang.org/x/crypto/bcrypt\"\n\nfunc init() {\n\tbcryptCost = bcrypt.MinCost\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/meta/query_authorizer.go",
    "content": "package meta\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n)\n\n// QueryAuthorizer determines whether a user is authorized to execute a given query.\ntype QueryAuthorizer struct {\n\tClient *Client\n}\n\n// NewQueryAuthorizer returns a new instance of QueryAuthorizer.\nfunc NewQueryAuthorizer(c *Client) *QueryAuthorizer {\n\treturn &QueryAuthorizer{\n\t\tClient: c,\n\t}\n}\n\n// AuthorizeQuery authorizes u to execute q on database.\n// Database can be \"\" for queries that do not require a database.\n// If no user is provided it will return an error unless the query's first statement is to create\n// a root user.\nfunc (a *QueryAuthorizer) AuthorizeQuery(u User, query *influxql.Query, database string) error {\n\t// Special case if no users exist.\n\tif n := a.Client.UserCount(); n == 0 {\n\t\t// Ensure there is at least one statement.\n\t\tif len(query.Statements) > 0 {\n\t\t\t// First statement in the query must create a user with admin privilege.\n\t\t\tcu, ok := query.Statements[0].(*influxql.CreateUserStatement)\n\t\t\tif ok && cu.Admin == true {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn &ErrAuthorize{\n\t\t\tQuery:    query,\n\t\t\tDatabase: database,\n\t\t\tMessage:  \"create admin user first or disable authentication\",\n\t\t}\n\t}\n\n\tif u == nil {\n\t\treturn &ErrAuthorize{\n\t\t\tQuery:    query,\n\t\t\tDatabase: database,\n\t\t\tMessage:  \"no user provided\",\n\t\t}\n\t}\n\n\treturn u.AuthorizeQuery(database, query)\n}\n\nfunc (u *UserInfo) AuthorizeQuery(database string, query *influxql.Query) error {\n\n\t// Admin privilege allows the user to execute all statements.\n\tif u.Admin {\n\t\treturn nil\n\t}\n\n\t// Check each statement in the query.\n\tfor _, stmt := range query.Statements {\n\t\t// Get the privileges required to execute the statement.\n\t\tprivs, err := stmt.RequiredPrivileges()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Make sure the user has the privileges required to execute\n\t\t// each statement.\n\t\tfor _, p := range privs {\n\t\t\tif p.Admin {\n\t\t\t\t// Admin privilege already checked so statement requiring admin\n\t\t\t\t// privilege cannot be run.\n\t\t\t\treturn &ErrAuthorize{\n\t\t\t\t\tQuery:    query,\n\t\t\t\t\tUser:     u.Name,\n\t\t\t\t\tDatabase: database,\n\t\t\t\t\tMessage:  fmt.Sprintf(\"statement '%s', requires admin privilege\", stmt),\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Use the db name specified by the statement or the db\n\t\t\t// name passed by the caller if one wasn't specified by\n\t\t\t// the statement.\n\t\t\tdb := p.Name\n\t\t\tif db == \"\" {\n\t\t\t\tdb = database\n\t\t\t}\n\t\t\tif !u.AuthorizeDatabase(p.Privilege, db) {\n\t\t\t\treturn &ErrAuthorize{\n\t\t\t\t\tQuery:    query,\n\t\t\t\t\tUser:     u.Name,\n\t\t\t\t\tDatabase: database,\n\t\t\t\t\tMessage:  fmt.Sprintf(\"statement '%s', requires %s on %s\", stmt, p.Privilege.String(), db),\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n// ErrAuthorize represents an authorization error.\ntype ErrAuthorize struct {\n\tQuery    *influxql.Query\n\tUser     string\n\tDatabase string\n\tMessage  string\n}\n\n// Error returns the text of the error.\nfunc (e ErrAuthorize) Error() string {\n\tif e.User == \"\" {\n\t\treturn fmt.Sprint(e.Message)\n\t}\n\treturn fmt.Sprintf(\"%s not authorized to execute %s\", e.User, e.Message)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/meta/write_authorizer.go",
    "content": "package meta\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n)\n\n// WriteAuthorizer determines whether a user is authorized to write to a given database.\ntype WriteAuthorizer struct {\n\tClient *Client\n}\n\n// NewWriteAuthorizer returns a new instance of WriteAuthorizer.\nfunc NewWriteAuthorizer(c *Client) *WriteAuthorizer {\n\treturn &WriteAuthorizer{Client: c}\n}\n\n// AuthorizeWrite returns nil if the user has permission to write to the database.\nfunc (a WriteAuthorizer) AuthorizeWrite(username, database string) error {\n\tu, err := a.Client.User(username)\n\tif err != nil || u == nil || !u.AuthorizeDatabase(influxql.WritePrivilege, database) {\n\t\treturn &ErrAuthorize{\n\t\t\tDatabase: database,\n\t\t\tMessage:  fmt.Sprintf(\"%s not authorized to write to %s\", username, database),\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/opentsdb/README.md",
    "content": "OpenTSDB Input\n============\nInfluxDB supports both the telnet and HTTP OpenTSDB protocol. This means that InfluxDB can act as a drop-in replacement for your OpenTSDB system.\n\n## Configuration\nThe OpenTSDB inputs allow the binding address, target database, and target retention policy within that database, to be set. If the database does not exist, it will be created automatically when the input is initialized. If you also decide to configure retention policy (without configuration the input will use the auto-created default retention policy), both the database and retention policy must already exist.\n\nThe write-consistency-level can also be set. If any write operations do not meet the configured consistency guarantees, an error will occur and the data will not be indexed. The default consistency-level is `ONE`.\n\nThe OpenTSDB input also performs internal batching of the points it receives, as batched writes to the database are more efficient. The default _batch size_ is 1000, _pending batch_ factor is 5, with a _batch timeout_ of 1 second. This means the input will write batches of maximum size 1000, but if a batch has not reached 1000 points within 1 second of the first point being added to a batch, it will emit that batch regardless of size. The pending batch factor controls how many batches can be in memory at once, allowing the input to transmit a batch, while still building other batches.\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/opentsdb/config.go",
    "content": "package opentsdb\n\nimport (\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/monitor/diagnostics\"\n\t\"github.com/influxdata/influxdb/toml\"\n)\n\nconst (\n\t// DefaultBindAddress is the default address that the service binds to.\n\tDefaultBindAddress = \":4242\"\n\n\t// DefaultDatabase is the default database used for writes.\n\tDefaultDatabase = \"opentsdb\"\n\n\t// DefaultRetentionPolicy is the default retention policy used for writes.\n\tDefaultRetentionPolicy = \"\"\n\n\t// DefaultConsistencyLevel is the default write consistency level.\n\tDefaultConsistencyLevel = \"one\"\n\n\t// DefaultBatchSize is the default OpenTSDB batch size.\n\tDefaultBatchSize = 1000\n\n\t// DefaultBatchTimeout is the default OpenTSDB batch timeout.\n\tDefaultBatchTimeout = time.Second\n\n\t// DefaultBatchPending is the default number of batches that can be in the queue.\n\tDefaultBatchPending = 5\n\n\t// DefaultCertificate is the default location of the certificate used when TLS is enabled.\n\tDefaultCertificate = \"/etc/ssl/influxdb.pem\"\n)\n\n// Config represents the configuration of the OpenTSDB service.\ntype Config struct {\n\tEnabled          bool          `toml:\"enabled\"`\n\tBindAddress      string        `toml:\"bind-address\"`\n\tDatabase         string        `toml:\"database\"`\n\tRetentionPolicy  string        `toml:\"retention-policy\"`\n\tConsistencyLevel string        `toml:\"consistency-level\"`\n\tTLSEnabled       bool          `toml:\"tls-enabled\"`\n\tCertificate      string        `toml:\"certificate\"`\n\tBatchSize        int           `toml:\"batch-size\"`\n\tBatchPending     int           `toml:\"batch-pending\"`\n\tBatchTimeout     toml.Duration `toml:\"batch-timeout\"`\n\tLogPointErrors   bool          `toml:\"log-point-errors\"`\n}\n\n// NewConfig returns a new config for the service.\nfunc NewConfig() Config {\n\treturn Config{\n\t\tBindAddress:      DefaultBindAddress,\n\t\tDatabase:         DefaultDatabase,\n\t\tRetentionPolicy:  DefaultRetentionPolicy,\n\t\tConsistencyLevel: DefaultConsistencyLevel,\n\t\tTLSEnabled:       false,\n\t\tCertificate:      DefaultCertificate,\n\t\tBatchSize:        DefaultBatchSize,\n\t\tBatchPending:     DefaultBatchPending,\n\t\tBatchTimeout:     toml.Duration(DefaultBatchTimeout),\n\t\tLogPointErrors:   true,\n\t}\n}\n\n// WithDefaults takes the given config and returns a new config with any required\n// default values set.\nfunc (c *Config) WithDefaults() *Config {\n\td := *c\n\tif d.BindAddress == \"\" {\n\t\td.BindAddress = DefaultBindAddress\n\t}\n\tif d.Database == \"\" {\n\t\td.Database = DefaultDatabase\n\t}\n\tif d.RetentionPolicy == \"\" {\n\t\td.RetentionPolicy = DefaultRetentionPolicy\n\t}\n\tif d.ConsistencyLevel == \"\" {\n\t\td.ConsistencyLevel = DefaultConsistencyLevel\n\t}\n\tif d.Certificate == \"\" {\n\t\td.Certificate = DefaultCertificate\n\t}\n\tif d.BatchSize == 0 {\n\t\td.BatchSize = DefaultBatchSize\n\t}\n\tif d.BatchPending == 0 {\n\t\td.BatchPending = DefaultBatchPending\n\t}\n\tif d.BatchTimeout == 0 {\n\t\td.BatchTimeout = toml.Duration(DefaultBatchTimeout)\n\t}\n\n\treturn &d\n}\n\n// Configs wraps a slice of Config to aggregate diagnostics.\ntype Configs []Config\n\n// Diagnostics returns one set of diagnostics for all of the Configs.\nfunc (c Configs) Diagnostics() (*diagnostics.Diagnostics, error) {\n\td := &diagnostics.Diagnostics{\n\t\tColumns: []string{\"enabled\", \"bind-address\", \"database\", \"retention-policy\", \"batch-size\", \"batch-pending\", \"batch-timeout\"},\n\t}\n\n\tfor _, cc := range c {\n\t\tif !cc.Enabled {\n\t\t\td.AddRow([]interface{}{false})\n\t\t\tcontinue\n\t\t}\n\n\t\tr := []interface{}{true, cc.BindAddress, cc.Database, cc.RetentionPolicy, cc.BatchSize, cc.BatchPending, cc.BatchTimeout}\n\t\td.AddRow(r)\n\t}\n\n\treturn d, nil\n}\n\n// Enabled returns true if any underlying Config is Enabled.\nfunc (c Configs) Enabled() bool {\n\tfor _, cc := range c {\n\t\tif cc.Enabled {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/opentsdb/config_test.go",
    "content": "package opentsdb_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/influxdata/influxdb/services/opentsdb\"\n)\n\nfunc TestConfig_Parse(t *testing.T) {\n\t// Parse configuration.\n\tvar c opentsdb.Config\n\tif _, err := toml.Decode(`\nenabled = true\nbind-address = \":9000\"\ndatabase = \"xxx\"\nconsistency-level =\"all\"\ntls-enabled = true\ncertificate = \"/etc/ssl/cert.pem\"\nlog-point-errors = true\n`, &c); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Validate configuration.\n\tif c.Enabled != true {\n\t\tt.Fatalf(\"unexpected enabled: %v\", c.Enabled)\n\t} else if c.BindAddress != \":9000\" {\n\t\tt.Fatalf(\"unexpected bind address: %s\", c.BindAddress)\n\t} else if c.Database != \"xxx\" {\n\t\tt.Fatalf(\"unexpected database: %s\", c.Database)\n\t} else if c.ConsistencyLevel != \"all\" {\n\t\tt.Fatalf(\"unexpected consistency-level: %s\", c.ConsistencyLevel)\n\t} else if c.TLSEnabled != true {\n\t\tt.Fatalf(\"unexpected tls-enabled: %v\", c.TLSEnabled)\n\t} else if c.Certificate != \"/etc/ssl/cert.pem\" {\n\t\tt.Fatalf(\"unexpected certificate: %s\", c.Certificate)\n\t} else if !c.LogPointErrors {\n\t\tt.Fatalf(\"unexpected log-point-errors: %v\", c.LogPointErrors)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/opentsdb/handler.go",
    "content": "package opentsdb\n\nimport (\n\t\"bufio\"\n\t\"compress/gzip\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/uber-go/zap\"\n)\n\n// Handler is an http.Handler for the OpenTSDB service.\ntype Handler struct {\n\tDatabase        string\n\tRetentionPolicy string\n\n\tPointsWriter interface {\n\t\tWritePointsPrivileged(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error\n\t}\n\n\tLogger zap.Logger\n\n\tstats *Statistics\n}\n\n// ServeHTTP handles an HTTP request of the OpenTSDB REST API.\nfunc (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tswitch r.URL.Path {\n\tcase \"/api/metadata/put\":\n\t\tw.WriteHeader(http.StatusNoContent)\n\tcase \"/api/put\":\n\t\th.servePut(w, r)\n\tdefault:\n\t\thttp.NotFound(w, r)\n\t}\n}\n\n// servePut implements OpenTSDB's HTTP /api/put endpoint.\nfunc (h *Handler) servePut(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\t// Require POST method.\n\tif r.Method != \"POST\" {\n\t\thttp.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\t// Wrap reader if it's gzip encoded.\n\tvar br *bufio.Reader\n\tif r.Header.Get(\"Content-Encoding\") == \"gzip\" {\n\t\tzr, err := gzip.NewReader(r.Body)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"could not read gzip, \"+err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tbr = bufio.NewReader(zr)\n\t} else {\n\t\tbr = bufio.NewReader(r.Body)\n\t}\n\n\t// Lookahead at the first byte.\n\tf, err := br.Peek(1)\n\tif err != nil || len(f) != 1 {\n\t\thttp.Error(w, \"peek error: \"+err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Peek to see if this is a JSON array.\n\tvar multi bool\n\tswitch f[0] {\n\tcase '{':\n\tcase '[':\n\t\tmulti = true\n\tdefault:\n\t\thttp.Error(w, \"expected JSON array or hash\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Decode JSON data into slice of points.\n\tdps := make([]point, 1)\n\tif dec := json.NewDecoder(br); multi {\n\t\tif err = dec.Decode(&dps); err != nil {\n\t\t\thttp.Error(w, \"json array decode error\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif err = dec.Decode(&dps[0]); err != nil {\n\t\t\thttp.Error(w, \"json object decode error\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Convert points into TSDB points.\n\tpoints := make([]models.Point, 0, len(dps))\n\tfor i := range dps {\n\t\tp := dps[i]\n\n\t\t// Convert timestamp to Go time.\n\t\t// If time value is over a billion then it's microseconds.\n\t\tvar ts time.Time\n\t\tif p.Time < 10000000000 {\n\t\t\tts = time.Unix(p.Time, 0)\n\t\t} else {\n\t\t\tts = time.Unix(p.Time/1000, (p.Time%1000)*1000)\n\t\t}\n\n\t\tpt, err := models.NewPoint(p.Metric, models.NewTags(p.Tags), map[string]interface{}{\"value\": p.Value}, ts)\n\t\tif err != nil {\n\t\t\th.Logger.Info(fmt.Sprintf(\"Dropping point %v: %v\", p.Metric, err))\n\t\t\tif h.stats != nil {\n\t\t\t\tatomic.AddInt64(&h.stats.InvalidDroppedPoints, 1)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tpoints = append(points, pt)\n\t}\n\n\t// Write points.\n\tif err := h.PointsWriter.WritePointsPrivileged(h.Database, h.RetentionPolicy, models.ConsistencyLevelAny, points); influxdb.IsClientError(err) {\n\t\th.Logger.Info(fmt.Sprint(\"write series error: \", err))\n\t\thttp.Error(w, \"write series error: \"+err.Error(), http.StatusBadRequest)\n\t\treturn\n\t} else if err != nil {\n\t\th.Logger.Info(fmt.Sprint(\"write series error: \", err))\n\t\thttp.Error(w, \"write series error: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusNoContent)\n}\n\n// chanListener represents a listener that receives connections through a channel.\ntype chanListener struct {\n\taddr   net.Addr\n\tch     chan net.Conn\n\tdone   chan struct{}\n\tcloser sync.Once // closer ensures that Close is idempotent.\n}\n\n// newChanListener returns a new instance of chanListener.\nfunc newChanListener(addr net.Addr) *chanListener {\n\treturn &chanListener{\n\t\taddr: addr,\n\t\tch:   make(chan net.Conn),\n\t\tdone: make(chan struct{}),\n\t}\n}\n\nfunc (ln *chanListener) Accept() (net.Conn, error) {\n\terrClosed := errors.New(\"network connection closed\")\n\tselect {\n\tcase <-ln.done:\n\t\treturn nil, errClosed\n\tcase conn, ok := <-ln.ch:\n\t\tif !ok {\n\t\t\treturn nil, errClosed\n\t\t}\n\t\treturn conn, nil\n\t}\n}\n\n// Close closes the connection channel.\nfunc (ln *chanListener) Close() error {\n\tln.closer.Do(func() {\n\t\tclose(ln.done)\n\t})\n\treturn nil\n}\n\n// Addr returns the network address of the listener.\nfunc (ln *chanListener) Addr() net.Addr { return ln.addr }\n\n// readerConn represents a net.Conn with an assignable reader.\ntype readerConn struct {\n\tnet.Conn\n\tr io.Reader\n}\n\n// Read implements the io.Reader interface.\nfunc (conn *readerConn) Read(b []byte) (n int, err error) { return conn.r.Read(b) }\n\n// point represents an incoming JSON data point.\ntype point struct {\n\tMetric string            `json:\"metric\"`\n\tTime   int64             `json:\"timestamp\"`\n\tValue  float64           `json:\"value\"`\n\tTags   map[string]string `json:\"tags,omitempty\"`\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/opentsdb/service.go",
    "content": "// Package opentsdb provides a service for InfluxDB to ingest data via the opentsdb protocol.\npackage opentsdb // import \"github.com/influxdata/influxdb/services/opentsdb\"\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/textproto\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n\t\"github.com/uber-go/zap\"\n)\n\n// statistics gathered by the openTSDB package.\nconst (\n\tstatHTTPConnectionsHandled   = \"httpConnsHandled\"\n\tstatTelnetConnectionsActive  = \"tlConnsActive\"\n\tstatTelnetConnectionsHandled = \"tlConnsHandled\"\n\tstatTelnetPointsReceived     = \"tlPointsRx\"\n\tstatTelnetBytesReceived      = \"tlBytesRx\"\n\tstatTelnetReadError          = \"tlReadErr\"\n\tstatTelnetBadLine            = \"tlBadLine\"\n\tstatTelnetBadTime            = \"tlBadTime\"\n\tstatTelnetBadTag             = \"tlBadTag\"\n\tstatTelnetBadFloat           = \"tlBadFloat\"\n\tstatBatchesTransmitted       = \"batchesTx\"\n\tstatPointsTransmitted        = \"pointsTx\"\n\tstatBatchesTransmitFail      = \"batchesTxFail\"\n\tstatConnectionsActive        = \"connsActive\"\n\tstatConnectionsHandled       = \"connsHandled\"\n\tstatDroppedPointsInvalid     = \"droppedPointsInvalid\"\n)\n\n// Service manages the listener and handler for an HTTP endpoint.\ntype Service struct {\n\tln     net.Listener  // main listener\n\thttpln *chanListener // http channel-based listener\n\n\twg   sync.WaitGroup\n\ttls  bool\n\tcert string\n\n\tmu    sync.RWMutex\n\tready bool          // Has the required database been created?\n\tdone  chan struct{} // Is the service closing or closed?\n\n\tBindAddress     string\n\tDatabase        string\n\tRetentionPolicy string\n\n\tPointsWriter interface {\n\t\tWritePointsPrivileged(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error\n\t}\n\tMetaClient interface {\n\t\tCreateDatabase(name string) (*meta.DatabaseInfo, error)\n\t}\n\n\t// Points received over the telnet protocol are batched.\n\tbatchSize    int\n\tbatchPending int\n\tbatchTimeout time.Duration\n\tbatcher      *tsdb.PointBatcher\n\n\tLogPointErrors bool\n\tLogger         zap.Logger\n\n\tstats       *Statistics\n\tdefaultTags models.StatisticTags\n}\n\n// NewService returns a new instance of Service.\nfunc NewService(c Config) (*Service, error) {\n\t// Use defaults where necessary.\n\td := c.WithDefaults()\n\n\ts := &Service{\n\t\ttls:             d.TLSEnabled,\n\t\tcert:            d.Certificate,\n\t\tBindAddress:     d.BindAddress,\n\t\tDatabase:        d.Database,\n\t\tRetentionPolicy: d.RetentionPolicy,\n\t\tbatchSize:       d.BatchSize,\n\t\tbatchPending:    d.BatchPending,\n\t\tbatchTimeout:    time.Duration(d.BatchTimeout),\n\t\tLogger:          zap.New(zap.NullEncoder()),\n\t\tLogPointErrors:  d.LogPointErrors,\n\t\tstats:           &Statistics{},\n\t\tdefaultTags:     models.StatisticTags{\"bind\": d.BindAddress},\n\t}\n\treturn s, nil\n}\n\n// Open starts the service.\nfunc (s *Service) Open() error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif !s.closed() {\n\t\treturn nil // Already open.\n\t}\n\ts.done = make(chan struct{})\n\n\ts.Logger.Info(\"Starting OpenTSDB service\")\n\n\ts.batcher = tsdb.NewPointBatcher(s.batchSize, s.batchPending, s.batchTimeout)\n\ts.batcher.Start()\n\n\t// Start processing batches.\n\ts.wg.Add(1)\n\tgo func() { defer s.wg.Done(); s.processBatches(s.batcher) }()\n\n\t// Open listener.\n\tif s.tls {\n\t\tcert, err := tls.LoadX509KeyPair(s.cert, s.cert)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlistener, err := tls.Listen(\"tcp\", s.BindAddress, &tls.Config{\n\t\t\tCertificates: []tls.Certificate{cert},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ts.Logger.Info(fmt.Sprint(\"Listening on TLS: \", listener.Addr().String()))\n\t\ts.ln = listener\n\t} else {\n\t\tlistener, err := net.Listen(\"tcp\", s.BindAddress)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ts.Logger.Info(fmt.Sprint(\"Listening on: \", listener.Addr().String()))\n\t\ts.ln = listener\n\t}\n\ts.httpln = newChanListener(s.ln.Addr())\n\n\t// Begin listening for connections.\n\ts.wg.Add(2)\n\tgo func() { defer s.wg.Done(); s.serve() }()\n\tgo func() { defer s.wg.Done(); s.serveHTTP() }()\n\n\treturn nil\n}\n\n// Close closes the openTSDB service.\nfunc (s *Service) Close() error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif s.closed() {\n\t\treturn nil // Already closed.\n\t}\n\tclose(s.done)\n\n\t// Close the listeners.\n\tif err := s.ln.Close(); err != nil {\n\t\treturn err\n\t}\n\tif err := s.httpln.Close(); err != nil {\n\t\treturn err\n\t}\n\n\ts.wg.Wait()\n\ts.done = nil\n\n\tif s.batcher != nil {\n\t\ts.batcher.Stop()\n\t}\n\n\treturn nil\n}\n\n// Closed returns true if the service is currently closed.\nfunc (s *Service) Closed() bool {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\treturn s.closed()\n}\n\nfunc (s *Service) closed() bool {\n\tselect {\n\tcase <-s.done:\n\t\t// Service is closing.\n\t\treturn true\n\tdefault:\n\t\treturn s.done == nil\n\t}\n}\n\n// createInternalStorage ensures that the required database has been created.\nfunc (s *Service) createInternalStorage() error {\n\ts.mu.RLock()\n\tready := s.ready\n\ts.mu.RUnlock()\n\tif ready {\n\t\treturn nil\n\t}\n\n\tif _, err := s.MetaClient.CreateDatabase(s.Database); err != nil {\n\t\treturn err\n\t}\n\n\t// The service is now ready.\n\ts.mu.Lock()\n\ts.ready = true\n\ts.mu.Unlock()\n\treturn nil\n}\n\n// WithLogger sets the logger for the service.\nfunc (s *Service) WithLogger(log zap.Logger) {\n\ts.Logger = log.With(zap.String(\"service\", \"opentsdb\"))\n}\n\n// Statistics maintains statistics for the subscriber service.\ntype Statistics struct {\n\tHTTPConnectionsHandled   int64\n\tActiveTelnetConnections  int64\n\tHandledTelnetConnections int64\n\tTelnetPointsReceived     int64\n\tTelnetBytesReceived      int64\n\tTelnetReadError          int64\n\tTelnetBadLine            int64\n\tTelnetBadTime            int64\n\tTelnetBadTag             int64\n\tTelnetBadFloat           int64\n\tBatchesTransmitted       int64\n\tPointsTransmitted        int64\n\tBatchesTransmitFail      int64\n\tActiveConnections        int64\n\tHandledConnections       int64\n\tInvalidDroppedPoints     int64\n}\n\n// Statistics returns statistics for periodic monitoring.\nfunc (s *Service) Statistics(tags map[string]string) []models.Statistic {\n\treturn []models.Statistic{{\n\t\tName: \"opentsdb\",\n\t\tTags: s.defaultTags.Merge(tags),\n\t\tValues: map[string]interface{}{\n\t\t\tstatHTTPConnectionsHandled:   atomic.LoadInt64(&s.stats.HTTPConnectionsHandled),\n\t\t\tstatTelnetConnectionsActive:  atomic.LoadInt64(&s.stats.ActiveTelnetConnections),\n\t\t\tstatTelnetConnectionsHandled: atomic.LoadInt64(&s.stats.HandledTelnetConnections),\n\t\t\tstatTelnetPointsReceived:     atomic.LoadInt64(&s.stats.TelnetPointsReceived),\n\t\t\tstatTelnetBytesReceived:      atomic.LoadInt64(&s.stats.TelnetBytesReceived),\n\t\t\tstatTelnetReadError:          atomic.LoadInt64(&s.stats.TelnetReadError),\n\t\t\tstatTelnetBadLine:            atomic.LoadInt64(&s.stats.TelnetBadLine),\n\t\t\tstatTelnetBadTime:            atomic.LoadInt64(&s.stats.TelnetBadTime),\n\t\t\tstatTelnetBadTag:             atomic.LoadInt64(&s.stats.TelnetBadTag),\n\t\t\tstatTelnetBadFloat:           atomic.LoadInt64(&s.stats.TelnetBadFloat),\n\t\t\tstatBatchesTransmitted:       atomic.LoadInt64(&s.stats.BatchesTransmitted),\n\t\t\tstatPointsTransmitted:        atomic.LoadInt64(&s.stats.PointsTransmitted),\n\t\t\tstatBatchesTransmitFail:      atomic.LoadInt64(&s.stats.BatchesTransmitFail),\n\t\t\tstatConnectionsActive:        atomic.LoadInt64(&s.stats.ActiveConnections),\n\t\t\tstatConnectionsHandled:       atomic.LoadInt64(&s.stats.HandledConnections),\n\t\t\tstatDroppedPointsInvalid:     atomic.LoadInt64(&s.stats.InvalidDroppedPoints),\n\t\t},\n\t}}\n}\n\n// Addr returns the listener's address. Returns nil if listener is closed.\nfunc (s *Service) Addr() net.Addr {\n\tif s.ln == nil {\n\t\treturn nil\n\t}\n\treturn s.ln.Addr()\n}\n\n// serve serves the handler from the listener.\nfunc (s *Service) serve() {\n\tfor {\n\t\t// Wait for next connection.\n\t\tconn, err := s.ln.Accept()\n\t\tif opErr, ok := err.(*net.OpError); ok && !opErr.Temporary() {\n\t\t\ts.Logger.Info(\"openTSDB TCP listener closed\")\n\t\t\treturn\n\t\t} else if err != nil {\n\t\t\ts.Logger.Info(fmt.Sprint(\"error accepting openTSDB: \", err.Error()))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Handle connection in separate goroutine.\n\t\tgo s.handleConn(conn)\n\t}\n}\n\n// handleConn processes conn. This is run in a separate goroutine.\nfunc (s *Service) handleConn(conn net.Conn) {\n\tdefer atomic.AddInt64(&s.stats.ActiveConnections, -1)\n\tatomic.AddInt64(&s.stats.ActiveConnections, 1)\n\tatomic.AddInt64(&s.stats.HandledConnections, 1)\n\n\t// Read header into buffer to check if it's HTTP.\n\tvar buf bytes.Buffer\n\tr := bufio.NewReader(io.TeeReader(conn, &buf))\n\n\t// Attempt to parse connection as HTTP.\n\t_, err := http.ReadRequest(r)\n\n\t// Rebuild connection from buffer and remaining connection data.\n\tbufr := bufio.NewReader(io.MultiReader(&buf, conn))\n\tconn = &readerConn{Conn: conn, r: bufr}\n\n\t// If no HTTP parsing error occurred then process as HTTP.\n\tif err == nil {\n\t\tatomic.AddInt64(&s.stats.HTTPConnectionsHandled, 1)\n\t\ts.httpln.ch <- conn\n\t\treturn\n\t}\n\n\t// Otherwise handle in telnet format.\n\ts.wg.Add(1)\n\ts.handleTelnetConn(conn)\n\ts.wg.Done()\n}\n\n// handleTelnetConn accepts OpenTSDB's telnet protocol.\n// Each telnet command consists of a line of the form:\n//   put sys.cpu.user 1356998400 42.5 host=webserver01 cpu=0\nfunc (s *Service) handleTelnetConn(conn net.Conn) {\n\tdefer conn.Close()\n\tdefer atomic.AddInt64(&s.stats.ActiveTelnetConnections, -1)\n\tatomic.AddInt64(&s.stats.ActiveTelnetConnections, 1)\n\tatomic.AddInt64(&s.stats.HandledTelnetConnections, 1)\n\n\t// Get connection details.\n\tremoteAddr := conn.RemoteAddr().String()\n\n\t// Wrap connection in a text protocol reader.\n\tr := textproto.NewReader(bufio.NewReader(conn))\n\tfor {\n\t\tline, err := r.ReadLine()\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tatomic.AddInt64(&s.stats.TelnetReadError, 1)\n\t\t\t\ts.Logger.Info(fmt.Sprint(\"error reading from openTSDB connection \", err.Error()))\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tatomic.AddInt64(&s.stats.TelnetPointsReceived, 1)\n\t\tatomic.AddInt64(&s.stats.TelnetBytesReceived, int64(len(line)))\n\n\t\tinputStrs := strings.Fields(line)\n\n\t\tif len(inputStrs) == 1 && inputStrs[0] == \"version\" {\n\t\t\tconn.Write([]byte(\"InfluxDB TSDB proxy\"))\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(inputStrs) < 4 || inputStrs[0] != \"put\" {\n\t\t\tatomic.AddInt64(&s.stats.TelnetBadLine, 1)\n\t\t\tif s.LogPointErrors {\n\t\t\t\ts.Logger.Info(fmt.Sprintf(\"malformed line '%s' from %s\", line, remoteAddr))\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tmeasurement := inputStrs[1]\n\t\ttsStr := inputStrs[2]\n\t\tvalueStr := inputStrs[3]\n\t\ttagStrs := inputStrs[4:]\n\n\t\tvar t time.Time\n\t\tts, err := strconv.ParseInt(tsStr, 10, 64)\n\t\tif err != nil {\n\t\t\tatomic.AddInt64(&s.stats.TelnetBadTime, 1)\n\t\t\tif s.LogPointErrors {\n\t\t\t\ts.Logger.Info(fmt.Sprintf(\"malformed time '%s' from %s\", tsStr, remoteAddr))\n\t\t\t}\n\t\t}\n\n\t\tswitch len(tsStr) {\n\t\tcase 10:\n\t\t\tt = time.Unix(ts, 0)\n\t\tcase 13:\n\t\t\tt = time.Unix(ts/1000, (ts%1000)*1000)\n\t\tdefault:\n\t\t\tatomic.AddInt64(&s.stats.TelnetBadTime, 1)\n\t\t\tif s.LogPointErrors {\n\t\t\t\ts.Logger.Info(fmt.Sprintf(\"bad time '%s' must be 10 or 13 chars, from %s \", tsStr, remoteAddr))\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\ttags := make(map[string]string)\n\t\tfor t := range tagStrs {\n\t\t\tparts := strings.SplitN(tagStrs[t], \"=\", 2)\n\t\t\tif len(parts) != 2 || parts[0] == \"\" || parts[1] == \"\" {\n\t\t\t\tatomic.AddInt64(&s.stats.TelnetBadTag, 1)\n\t\t\t\tif s.LogPointErrors {\n\t\t\t\t\ts.Logger.Info(fmt.Sprintf(\"malformed tag data '%v' from %s\", tagStrs[t], remoteAddr))\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tk := parts[0]\n\n\t\t\ttags[k] = parts[1]\n\t\t}\n\n\t\tfields := make(map[string]interface{})\n\t\tfv, err := strconv.ParseFloat(valueStr, 64)\n\t\tif err != nil {\n\t\t\tatomic.AddInt64(&s.stats.TelnetBadFloat, 1)\n\t\t\tif s.LogPointErrors {\n\t\t\t\ts.Logger.Info(fmt.Sprintf(\"bad float '%s' from %s\", valueStr, remoteAddr))\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tfields[\"value\"] = fv\n\n\t\tpt, err := models.NewPoint(measurement, models.NewTags(tags), fields, t)\n\t\tif err != nil {\n\t\t\tatomic.AddInt64(&s.stats.TelnetBadFloat, 1)\n\t\t\tif s.LogPointErrors {\n\t\t\t\ts.Logger.Info(fmt.Sprintf(\"bad float '%s' from %s\", valueStr, remoteAddr))\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\ts.batcher.In() <- pt\n\t}\n}\n\n// serveHTTP handles connections in HTTP format.\nfunc (s *Service) serveHTTP() {\n\thandler := &Handler{\n\t\tDatabase:        s.Database,\n\t\tRetentionPolicy: s.RetentionPolicy,\n\t\tPointsWriter:    s.PointsWriter,\n\t\tLogger:          s.Logger,\n\t\tstats:           s.stats,\n\t}\n\tsrv := &http.Server{Handler: handler}\n\tsrv.Serve(s.httpln)\n}\n\n// processBatches continually drains the given batcher and writes the batches to the database.\nfunc (s *Service) processBatches(batcher *tsdb.PointBatcher) {\n\tfor {\n\t\tselect {\n\t\tcase <-s.done:\n\t\t\treturn\n\t\tcase batch := <-batcher.Out():\n\t\t\t// Will attempt to create database if not yet created.\n\t\t\tif err := s.createInternalStorage(); err != nil {\n\t\t\t\ts.Logger.Info(fmt.Sprintf(\"Required database %s does not yet exist: %s\", s.Database, err.Error()))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err := s.PointsWriter.WritePointsPrivileged(s.Database, s.RetentionPolicy, models.ConsistencyLevelAny, batch); err == nil {\n\t\t\t\tatomic.AddInt64(&s.stats.BatchesTransmitted, 1)\n\t\t\t\tatomic.AddInt64(&s.stats.PointsTransmitted, int64(len(batch)))\n\t\t\t} else {\n\t\t\t\ts.Logger.Info(fmt.Sprintf(\"failed to write point batch to database %q: %s\", s.Database, err))\n\t\t\t\tatomic.AddInt64(&s.stats.BatchesTransmitFail, 1)\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/opentsdb/service_test.go",
    "content": "package opentsdb\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n\t\"github.com/influxdata/influxdb/internal\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/uber-go/zap\"\n)\n\nfunc Test_Service_OpenClose(t *testing.T) {\n\t// Let the OS assign a random port since we are only opening and closing the service,\n\t// not actually connecting to it.\n\tservice := NewTestService(\"db0\", \"127.0.0.1:0\")\n\n\t// Closing a closed service is fine.\n\tif err := service.Service.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Closing a closed service again is fine.\n\tif err := service.Service.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := service.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Opening an already open service is fine.\n\tif err := service.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Reopening a previously opened service is fine.\n\tif err := service.Service.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := service.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Tidy up.\n\tif err := service.Service.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\n// Ensure a point can be written via the telnet protocol.\nfunc TestService_CreatesDatabase(t *testing.T) {\n\tt.Parallel()\n\n\tdatabase := \"db0\"\n\ts := NewTestService(database, \"127.0.0.1:0\")\n\ts.WritePointsFn = func(string, string, models.ConsistencyLevel, []models.Point) error {\n\t\treturn nil\n\t}\n\n\tcalled := make(chan struct{})\n\ts.MetaClient.CreateDatabaseFn = func(name string) (*meta.DatabaseInfo, error) {\n\t\tif name != database {\n\t\t\tt.Errorf(\"\\n\\texp = %s\\n\\tgot = %s\\n\", database, name)\n\t\t}\n\t\t// Allow some time for the caller to return and the ready status to\n\t\t// be set.\n\t\ttime.AfterFunc(10*time.Millisecond, func() { called <- struct{}{} })\n\t\treturn nil, errors.New(\"an error\")\n\t}\n\n\tif err := s.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpoints, err := models.ParsePointsString(`cpu value=1`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ts.Service.batcher.In() <- points[0] // Send a point.\n\ts.Service.batcher.Flush()\n\tselect {\n\tcase <-called:\n\t\t// OK\n\tcase <-time.NewTimer(5 * time.Second).C:\n\t\tt.Fatal(\"Service should have attempted to create database\")\n\t}\n\n\t// ready status should not have been switched due to meta client error.\n\ts.Service.mu.RLock()\n\tready := s.Service.ready\n\ts.Service.mu.RUnlock()\n\n\tif got, exp := ready, false; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\t// This time MC won't cause an error.\n\ts.MetaClient.CreateDatabaseFn = func(name string) (*meta.DatabaseInfo, error) {\n\t\t// Allow some time for the caller to return and the ready status to\n\t\t// be set.\n\t\ttime.AfterFunc(10*time.Millisecond, func() { called <- struct{}{} })\n\t\treturn nil, nil\n\t}\n\n\ts.Service.batcher.In() <- points[0] // Send a point.\n\ts.Service.batcher.Flush()\n\tselect {\n\tcase <-called:\n\t\t// OK\n\tcase <-time.NewTimer(5 * time.Second).C:\n\t\tt.Fatal(\"Service should have attempted to create database\")\n\t}\n\n\t// ready status should not have been switched due to meta client error.\n\ts.Service.mu.RLock()\n\tready = s.Service.ready\n\ts.Service.mu.RUnlock()\n\n\tif got, exp := ready, true; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\ts.Service.Close()\n}\n\n// Ensure a point can be written via the telnet protocol.\nfunc TestService_Telnet(t *testing.T) {\n\tt.Parallel()\n\n\ts := NewTestService(\"db0\", \"127.0.0.1:0\")\n\tif err := s.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer s.Service.Close()\n\n\t// Mock points writer.\n\tvar called int32\n\ts.WritePointsFn = func(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error {\n\t\tatomic.StoreInt32(&called, 1)\n\n\t\tif database != \"db0\" {\n\t\t\tt.Fatalf(\"unexpected database: %s\", database)\n\t\t} else if retentionPolicy != \"\" {\n\t\t\tt.Fatalf(\"unexpected retention policy: %s\", retentionPolicy)\n\t\t} else if !reflect.DeepEqual(points, []models.Point{\n\t\t\tmodels.MustNewPoint(\n\t\t\t\t\"sys.cpu.user\",\n\t\t\t\tmodels.NewTags(map[string]string{\"host\": \"webserver01\", \"cpu\": \"0\"}),\n\t\t\t\tmap[string]interface{}{\"value\": 42.5},\n\t\t\t\ttime.Unix(1356998400, 0),\n\t\t\t),\n\t\t}) {\n\t\t\tt.Fatalf(\"unexpected points: %#v\", points)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Open connection to the service.\n\tconn, err := net.Dial(\"tcp\", s.Service.Addr().String())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer conn.Close()\n\n\t// Write telnet data and close.\n\tif _, err := conn.Write([]byte(\"put sys.cpu.user 1356998400 42.5 host=webserver01 cpu=0\")); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := conn.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttick := time.Tick(10 * time.Millisecond)\n\ttimeout := time.After(10 * time.Second)\n\n\tfor {\n\t\tselect {\n\t\tcase <-tick:\n\t\t\t// Verify that the writer was called.\n\t\t\tif atomic.LoadInt32(&called) > 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-timeout:\n\t\t\tt.Fatal(\"points writer not called\")\n\t\t}\n\t}\n}\n\n// Ensure a point can be written via the HTTP protocol.\nfunc TestService_HTTP(t *testing.T) {\n\tt.Parallel()\n\n\ts := NewTestService(\"db0\", \"127.0.0.1:0\")\n\tif err := s.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer s.Service.Close()\n\n\t// Mock points writer.\n\tvar called bool\n\ts.WritePointsFn = func(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error {\n\t\tcalled = true\n\t\tif database != \"db0\" {\n\t\t\tt.Fatalf(\"unexpected database: %s\", database)\n\t\t} else if retentionPolicy != \"\" {\n\t\t\tt.Fatalf(\"unexpected retention policy: %s\", retentionPolicy)\n\t\t} else if !reflect.DeepEqual(points, []models.Point{\n\t\t\tmodels.MustNewPoint(\n\t\t\t\t\"sys.cpu.nice\",\n\t\t\t\tmodels.NewTags(map[string]string{\"dc\": \"lga\", \"host\": \"web01\"}),\n\t\t\t\tmap[string]interface{}{\"value\": 18.0},\n\t\t\t\ttime.Unix(1346846400, 0),\n\t\t\t),\n\t\t}) {\n\t\t\tspew.Dump(points)\n\t\t\tt.Fatalf(\"unexpected points: %#v\", points)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Write HTTP request to server.\n\tresp, err := http.Post(\"http://\"+s.Service.Addr().String()+\"/api/put\", \"application/json\", strings.NewReader(`{\"metric\":\"sys.cpu.nice\", \"timestamp\":1346846400, \"value\":18, \"tags\":{\"host\":\"web01\", \"dc\":\"lga\"}}`))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\n\t// Verify status and body.\n\tif resp.StatusCode != http.StatusNoContent {\n\t\tt.Fatalf(\"unexpected status code: %d\", resp.StatusCode)\n\t}\n\n\t// Verify that the writer was called.\n\tif !called {\n\t\tt.Fatal(\"points writer not called\")\n\t}\n}\n\ntype TestService struct {\n\tService       *Service\n\tMetaClient    *internal.MetaClientMock\n\tWritePointsFn func(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error\n}\n\n// NewTestService returns a new instance of Service.\nfunc NewTestService(database string, bind string) *TestService {\n\ts, err := NewService(Config{\n\t\tBindAddress:      bind,\n\t\tDatabase:         database,\n\t\tConsistencyLevel: \"one\",\n\t})\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tservice := &TestService{\n\t\tService:    s,\n\t\tMetaClient: &internal.MetaClientMock{},\n\t}\n\n\tservice.MetaClient.CreateDatabaseFn = func(db string) (*meta.DatabaseInfo, error) {\n\t\tif got, exp := db, database; got != exp {\n\t\t\treturn nil, fmt.Errorf(\"got %v, expected %v\", got, exp)\n\t\t}\n\t\treturn nil, nil\n\t}\n\n\tif testing.Verbose() {\n\t\tservice.Service.WithLogger(zap.New(\n\t\t\tzap.NewTextEncoder(),\n\t\t\tzap.Output(os.Stderr),\n\t\t))\n\t}\n\n\tservice.Service.MetaClient = service.MetaClient\n\tservice.Service.PointsWriter = service\n\treturn service\n}\n\nfunc (s *TestService) WritePointsPrivileged(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error {\n\treturn s.WritePointsFn(database, retentionPolicy, consistencyLevel, points)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/precreator/README.md",
    "content": "Shard Precreation\n============\n\nDuring normal operation when InfluxDB receives time-series data, it writes the data to files known as _shards_. Each shard only contains data for a specific range of time. Therefore, before data can be accepted by the system, the shards must exist and InfluxDB always checks that the required shards exist for every incoming data point. If the required shards do not exist, InfluxDB will create those shards. Because this requires a cluster to reach consensus, the process is not instantaneous and can temporarily impact write-throughput.\n\nSince almost all time-series data is written sequentially in time, the system has an excellent idea of the timestamps of future data. Shard precreation takes advantage of this fact by creating required shards ahead of time, thereby ensuring the required shards exist by the time new time-series data actually arrives. Write-throughput is therefore not affected when data is first received for a range of time that would normally trigger shard creation.\n\nNote that the shard-existence check must remain in place in the code, even with shard precreation. This is because while most data is written sequentially in time, this is not always the case. Data may be written with timestamps in the past, or farther in the future than shard precreation handles.\n\n## Configuration\nShard precreation can be disabled if necessary, though this is not recommended. If it is disabled, then shards will be only be created when explicitly needed.\n\nThe interval between runs of the shard precreation service, as well as the time-in-advance the shards are created, are also configurable. The defaults should work for most deployments.\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/precreator/config.go",
    "content": "package precreator\n\nimport (\n\t\"errors\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/monitor/diagnostics\"\n\t\"github.com/influxdata/influxdb/toml\"\n)\n\nconst (\n\t// DefaultCheckInterval is the shard precreation check time if none is specified.\n\tDefaultCheckInterval = 10 * time.Minute\n\n\t// DefaultAdvancePeriod is the default period ahead of the endtime of a shard group\n\t// that its successor group is created.\n\tDefaultAdvancePeriod = 30 * time.Minute\n)\n\n// Config represents the configuration for shard precreation.\ntype Config struct {\n\tEnabled       bool          `toml:\"enabled\"`\n\tCheckInterval toml.Duration `toml:\"check-interval\"`\n\tAdvancePeriod toml.Duration `toml:\"advance-period\"`\n}\n\n// NewConfig returns a new Config with defaults.\nfunc NewConfig() Config {\n\treturn Config{\n\t\tEnabled:       true,\n\t\tCheckInterval: toml.Duration(DefaultCheckInterval),\n\t\tAdvancePeriod: toml.Duration(DefaultAdvancePeriod),\n\t}\n}\n\n// Validate returns an error if the Config is invalid.\nfunc (c Config) Validate() error {\n\tif !c.Enabled {\n\t\treturn nil\n\t}\n\n\t// TODO: Should we enforce a minimum interval?\n\t// Polling every nanosecond, for instance, will greatly impact performance.\n\tif c.CheckInterval <= 0 {\n\t\treturn errors.New(\"check-interval must be positive\")\n\t}\n\tif c.AdvancePeriod <= 0 {\n\t\treturn errors.New(\"advance-period must be positive\")\n\t}\n\n\treturn nil\n}\n\n// Diagnostics returns a diagnostics representation of a subset of the Config.\nfunc (c Config) Diagnostics() (*diagnostics.Diagnostics, error) {\n\tif !c.Enabled {\n\t\treturn diagnostics.RowFromMap(map[string]interface{}{\n\t\t\t\"enabled\": false,\n\t\t}), nil\n\t}\n\n\treturn diagnostics.RowFromMap(map[string]interface{}{\n\t\t\"enabled\":        true,\n\t\t\"check-interval\": c.CheckInterval,\n\t\t\"advance-period\": c.AdvancePeriod,\n\t}), nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/precreator/config_test.go",
    "content": "package precreator_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/influxdata/influxdb/services/precreator\"\n)\n\nfunc TestConfig_Parse(t *testing.T) {\n\t// Parse configuration.\n\tvar c precreator.Config\n\tif _, err := toml.Decode(`\nenabled = true\ncheck-interval = \"2m\"\nadvance-period = \"10m\"\n`, &c); err != nil {\n\n\t\tt.Fatal(err)\n\t}\n\n\t// Validate configuration.\n\tif !c.Enabled {\n\t\tt.Fatalf(\"unexpected enabled state: %v\", c.Enabled)\n\t} else if time.Duration(c.CheckInterval) != 2*time.Minute {\n\t\tt.Fatalf(\"unexpected check interval: %s\", c.CheckInterval)\n\t} else if time.Duration(c.AdvancePeriod) != 10*time.Minute {\n\t\tt.Fatalf(\"unexpected advance period: %s\", c.AdvancePeriod)\n\t}\n}\n\nfunc TestConfig_Validate(t *testing.T) {\n\tc := precreator.NewConfig()\n\tif err := c.Validate(); err != nil {\n\t\tt.Fatalf(\"unexpected validation fail from NewConfig: %s\", err)\n\t}\n\n\tc = precreator.NewConfig()\n\tc.CheckInterval = 0\n\tif err := c.Validate(); err == nil {\n\t\tt.Fatal(\"expected error for check-interval = 0, got nil\")\n\t}\n\n\tc = precreator.NewConfig()\n\tc.CheckInterval *= -1\n\tif err := c.Validate(); err == nil {\n\t\tt.Fatal(\"expected error for negative check-interval, got nil\")\n\t}\n\n\tc = precreator.NewConfig()\n\tc.AdvancePeriod = 0\n\tif err := c.Validate(); err == nil {\n\t\tt.Fatal(\"expected error for advance-period = 0, got nil\")\n\t}\n\n\tc = precreator.NewConfig()\n\tc.AdvancePeriod *= -1\n\tif err := c.Validate(); err == nil {\n\t\tt.Fatal(\"expected error for negative advance-period, got nil\")\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/precreator/service.go",
    "content": "// Package precreator provides the shard precreation service.\npackage precreator // import \"github.com/influxdata/influxdb/services/precreator\"\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/uber-go/zap\"\n)\n\n// Service manages the shard precreation service.\ntype Service struct {\n\tcheckInterval time.Duration\n\tadvancePeriod time.Duration\n\n\tLogger zap.Logger\n\n\tdone chan struct{}\n\twg   sync.WaitGroup\n\n\tMetaClient interface {\n\t\tPrecreateShardGroups(now, cutoff time.Time) error\n\t}\n}\n\n// NewService returns an instance of the precreation service.\nfunc NewService(c Config) (*Service, error) {\n\ts := Service{\n\t\tcheckInterval: time.Duration(c.CheckInterval),\n\t\tadvancePeriod: time.Duration(c.AdvancePeriod),\n\t\tLogger:        zap.New(zap.NullEncoder()),\n\t}\n\n\treturn &s, nil\n}\n\n// WithLogger sets the logger for the service.\nfunc (s *Service) WithLogger(log zap.Logger) {\n\ts.Logger = log.With(zap.String(\"service\", \"shard-precreation\"))\n}\n\n// Open starts the precreation service.\nfunc (s *Service) Open() error {\n\tif s.done != nil {\n\t\treturn nil\n\t}\n\n\ts.Logger.Info(fmt.Sprintf(\"Starting precreation service with check interval of %s, advance period of %s\",\n\t\ts.checkInterval, s.advancePeriod))\n\n\ts.done = make(chan struct{})\n\n\ts.wg.Add(1)\n\tgo s.runPrecreation()\n\treturn nil\n}\n\n// Close stops the precreation service.\nfunc (s *Service) Close() error {\n\tif s.done == nil {\n\t\treturn nil\n\t}\n\n\tclose(s.done)\n\ts.wg.Wait()\n\ts.done = nil\n\n\treturn nil\n}\n\n// runPrecreation continually checks if resources need precreation.\nfunc (s *Service) runPrecreation() {\n\tdefer s.wg.Done()\n\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(s.checkInterval):\n\t\t\tif err := s.precreate(time.Now().UTC()); err != nil {\n\t\t\t\ts.Logger.Info(fmt.Sprintf(\"failed to precreate shards: %s\", err.Error()))\n\t\t\t}\n\t\tcase <-s.done:\n\t\t\ts.Logger.Info(\"Precreation service terminating\")\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// precreate performs actual resource precreation.\nfunc (s *Service) precreate(now time.Time) error {\n\tcutoff := now.Add(s.advancePeriod).UTC()\n\tif err := s.MetaClient.PrecreateShardGroups(now, cutoff); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/precreator/service_test.go",
    "content": "package precreator\n\nimport (\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/toml\"\n)\n\nfunc Test_ShardPrecreation(t *testing.T) {\n\tt.Parallel()\n\n\tnow := time.Now().UTC()\n\tadvancePeriod := 5 * time.Minute\n\n\t// A test metastaore which returns 2 shard groups, only 1 of which requires a successor.\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tms := metaClient{\n\t\tPrecreateShardGroupsFn: func(v, u time.Time) error {\n\t\t\twg.Done()\n\t\t\tif u != now.Add(advancePeriod) {\n\t\t\t\tt.Fatalf(\"precreation called with wrong time, got %s, exp %s\", u, now)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tsrv, err := NewService(Config{\n\t\tCheckInterval: toml.Duration(time.Minute),\n\t\tAdvancePeriod: toml.Duration(advancePeriod),\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create shard precreation service: %s\", err.Error())\n\t}\n\tsrv.MetaClient = ms\n\n\terr = srv.precreate(now)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to precreate shards: %s\", err.Error())\n\t}\n\n\twg.Wait() // Ensure metaClient test function is called.\n\treturn\n}\n\n// PointsWriter represents a mock impl of PointsWriter.\ntype metaClient struct {\n\tPrecreateShardGroupsFn func(now, cutoff time.Time) error\n}\n\nfunc (m metaClient) PrecreateShardGroups(now, cutoff time.Time) error {\n\treturn m.PrecreateShardGroupsFn(now, cutoff)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/retention/config.go",
    "content": "package retention\n\nimport (\n\t\"errors\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/monitor/diagnostics\"\n\t\"github.com/influxdata/influxdb/toml\"\n)\n\n// Config represents the configuration for the retention service.\ntype Config struct {\n\tEnabled       bool          `toml:\"enabled\"`\n\tCheckInterval toml.Duration `toml:\"check-interval\"`\n}\n\n// NewConfig returns an instance of Config with defaults.\nfunc NewConfig() Config {\n\treturn Config{Enabled: true, CheckInterval: toml.Duration(30 * time.Minute)}\n}\n\n// Validate returns an error if the Config is invalid.\nfunc (c Config) Validate() error {\n\tif !c.Enabled {\n\t\treturn nil\n\t}\n\n\t// TODO: Should we enforce a minimum interval?\n\t// Polling every nanosecond, for instance, will greatly impact performance.\n\tif c.CheckInterval <= 0 {\n\t\treturn errors.New(\"check-interval must be positive\")\n\t}\n\n\treturn nil\n}\n\n// Diagnostics returns a diagnostics representation of a subset of the Config.\nfunc (c Config) Diagnostics() (*diagnostics.Diagnostics, error) {\n\tif !c.Enabled {\n\t\treturn diagnostics.RowFromMap(map[string]interface{}{\n\t\t\t\"enabled\": false,\n\t\t}), nil\n\t}\n\n\treturn diagnostics.RowFromMap(map[string]interface{}{\n\t\t\"enabled\":        true,\n\t\t\"check-interval\": c.CheckInterval,\n\t}), nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/retention/config_test.go",
    "content": "package retention_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/influxdata/influxdb/services/retention\"\n)\n\nfunc TestConfig_Parse(t *testing.T) {\n\t// Parse configuration.\n\tvar c retention.Config\n\tif _, err := toml.Decode(`\nenabled = true\ncheck-interval = \"1s\"\n`, &c); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Validate configuration.\n\tif c.Enabled != true {\n\t\tt.Fatalf(\"unexpected enabled state: %v\", c.Enabled)\n\t} else if time.Duration(c.CheckInterval) != time.Second {\n\t\tt.Fatalf(\"unexpected check interval: %v\", c.CheckInterval)\n\t}\n}\n\nfunc TestConfig_Validate(t *testing.T) {\n\tc := retention.NewConfig()\n\tif err := c.Validate(); err != nil {\n\t\tt.Fatalf(\"unexpected validation fail from NewConfig: %s\", err)\n\t}\n\n\tc = retention.NewConfig()\n\tc.CheckInterval = 0\n\tif err := c.Validate(); err == nil {\n\t\tt.Fatal(\"expected error for check-interval = 0, got nil\")\n\t}\n\n\tc = retention.NewConfig()\n\tc.CheckInterval *= -1\n\tif err := c.Validate(); err == nil {\n\t\tt.Fatal(\"expected error for negative check-interval, got nil\")\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/retention/service.go",
    "content": "// Package retention provides the retention policy enforcement service.\npackage retention // import \"github.com/influxdata/influxdb/services/retention\"\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/uber-go/zap\"\n)\n\n// Service represents the retention policy enforcement service.\ntype Service struct {\n\tMetaClient interface {\n\t\tDatabases() []meta.DatabaseInfo\n\t\tDeleteShardGroup(database, policy string, id uint64) error\n\t\tPruneShardGroups() error\n\t}\n\tTSDBStore interface {\n\t\tShardIDs() []uint64\n\t\tDeleteShard(shardID uint64) error\n\t}\n\n\tcheckInterval time.Duration\n\twg            sync.WaitGroup\n\tdone          chan struct{}\n\n\tlogger zap.Logger\n}\n\n// NewService returns a configured retention policy enforcement service.\nfunc NewService(c Config) *Service {\n\treturn &Service{\n\t\tcheckInterval: time.Duration(c.CheckInterval),\n\t\tdone:          make(chan struct{}),\n\t\tlogger:        zap.New(zap.NullEncoder()),\n\t}\n}\n\n// Open starts retention policy enforcement.\nfunc (s *Service) Open() error {\n\ts.logger.Info(fmt.Sprint(\"Starting retention policy enforcement service with check interval of \", s.checkInterval))\n\ts.wg.Add(2)\n\tgo s.deleteShardGroups()\n\tgo s.deleteShards()\n\treturn nil\n}\n\n// Close stops retention policy enforcement.\nfunc (s *Service) Close() error {\n\ts.logger.Info(\"retention policy enforcement terminating\")\n\tclose(s.done)\n\ts.wg.Wait()\n\treturn nil\n}\n\n// WithLogger sets the logger on the service.\nfunc (s *Service) WithLogger(log zap.Logger) {\n\ts.logger = log.With(zap.String(\"service\", \"retention\"))\n}\n\nfunc (s *Service) deleteShardGroups() {\n\tdefer s.wg.Done()\n\n\tticker := time.NewTicker(s.checkInterval)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-s.done:\n\t\t\treturn\n\n\t\tcase <-ticker.C:\n\t\t\tdbs := s.MetaClient.Databases()\n\t\t\tfor _, d := range dbs {\n\t\t\t\tfor _, r := range d.RetentionPolicies {\n\t\t\t\t\tfor _, g := range r.ExpiredShardGroups(time.Now().UTC()) {\n\t\t\t\t\t\tif err := s.MetaClient.DeleteShardGroup(d.Name, r.Name, g.ID); err != nil {\n\t\t\t\t\t\t\ts.logger.Info(fmt.Sprintf(\"failed to delete shard group %d from database %s, retention policy %s: %s\",\n\t\t\t\t\t\t\t\tg.ID, d.Name, r.Name, err.Error()))\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\ts.logger.Info(fmt.Sprintf(\"deleted shard group %d from database %s, retention policy %s\",\n\t\t\t\t\t\t\t\tg.ID, d.Name, r.Name))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *Service) deleteShards() {\n\tdefer s.wg.Done()\n\n\tticker := time.NewTicker(s.checkInterval)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-s.done:\n\t\t\treturn\n\n\t\tcase <-ticker.C:\n\t\t\ts.logger.Info(\"retention policy shard deletion check commencing\")\n\n\t\t\ttype deletionInfo struct {\n\t\t\t\tdb string\n\t\t\t\trp string\n\t\t\t}\n\t\t\tdeletedShardIDs := make(map[uint64]deletionInfo, 0)\n\t\t\tdbs := s.MetaClient.Databases()\n\t\t\tfor _, d := range dbs {\n\t\t\t\tfor _, r := range d.RetentionPolicies {\n\t\t\t\t\tfor _, g := range r.DeletedShardGroups() {\n\t\t\t\t\t\tfor _, sh := range g.Shards {\n\t\t\t\t\t\t\tdeletedShardIDs[sh.ID] = deletionInfo{db: d.Name, rp: r.Name}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, id := range s.TSDBStore.ShardIDs() {\n\t\t\t\tif info, ok := deletedShardIDs[id]; ok {\n\t\t\t\t\tif err := s.TSDBStore.DeleteShard(id); err != nil {\n\t\t\t\t\t\ts.logger.Error(fmt.Sprintf(\"failed to delete shard ID %d from database %s, retention policy %s: %s\",\n\t\t\t\t\t\t\tid, info.db, info.rp, err.Error()))\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\ts.logger.Info(fmt.Sprintf(\"shard ID %d from database %s, retention policy %s, deleted\",\n\t\t\t\t\t\tid, info.db, info.rp))\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err := s.MetaClient.PruneShardGroups(); err != nil {\n\t\t\t\ts.logger.Info(fmt.Sprintf(\"error pruning shard groups: %s\", err))\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/snapshotter/client.go",
    "content": "package snapshotter\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/tcp\"\n)\n\n// Client provides an API for the snapshotter service.\ntype Client struct {\n\thost string\n}\n\n// NewClient returns a new *Client.\nfunc NewClient(host string) *Client {\n\treturn &Client{host: host}\n}\n\n// MetastoreBackup returns a snapshot of the meta store.\nfunc (c *Client) MetastoreBackup() (*meta.Data, error) {\n\treq := &Request{\n\t\tType: RequestMetastoreBackup,\n\t}\n\n\tb, err := c.doRequest(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check the magic.\n\tmagic := binary.BigEndian.Uint64(b[:8])\n\tif magic != BackupMagicHeader {\n\t\treturn nil, errors.New(\"invalid metadata received\")\n\t}\n\ti := 8\n\n\t// Size of the meta store bytes.\n\tlength := int(binary.BigEndian.Uint64(b[i : i+8]))\n\ti += 8\n\tmetaBytes := b[i : i+length]\n\n\t// Unpack meta data.\n\tvar data meta.Data\n\tif err := data.UnmarshalBinary(metaBytes); err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshal: %s\", err)\n\t}\n\n\treturn &data, nil\n}\n\n// doRequest sends a request to the snapshotter service and returns the result.\nfunc (c *Client) doRequest(req *Request) ([]byte, error) {\n\t// Connect to snapshotter service.\n\tconn, err := tcp.Dial(\"tcp\", c.host, MuxHeader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer conn.Close()\n\n\t// Write the request\n\tif err := json.NewEncoder(conn).Encode(req); err != nil {\n\t\treturn nil, fmt.Errorf(\"encode snapshot request: %s\", err)\n\t}\n\n\t// Read snapshot from the connection\n\tvar buf bytes.Buffer\n\t_, err = io.Copy(&buf, conn)\n\n\treturn buf.Bytes(), err\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/snapshotter/service.go",
    "content": "// Package snapshotter provides the meta snapshot service.\npackage snapshotter // import \"github.com/influxdata/influxdb/services/snapshotter\"\n\nimport (\n\t\"bytes\"\n\t\"encoding\"\n\t\"encoding/binary\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n\t\"github.com/uber-go/zap\"\n)\n\nconst (\n\t// MuxHeader is the header byte used for the TCP muxer.\n\tMuxHeader = 3\n\n\t// BackupMagicHeader is the first 8 bytes used to identify and validate\n\t// a metastore backup file\n\tBackupMagicHeader = 0x59590101\n)\n\n// Service manages the listener for the snapshot endpoint.\ntype Service struct {\n\twg  sync.WaitGroup\n\terr chan error\n\n\tNode *influxdb.Node\n\n\tMetaClient interface {\n\t\tencoding.BinaryMarshaler\n\t\tDatabase(name string) *meta.DatabaseInfo\n\t}\n\n\tTSDBStore *tsdb.Store\n\n\tListener net.Listener\n\tLogger   zap.Logger\n}\n\n// NewService returns a new instance of Service.\nfunc NewService() *Service {\n\treturn &Service{\n\t\terr:    make(chan error),\n\t\tLogger: zap.New(zap.NullEncoder()),\n\t}\n}\n\n// Open starts the service.\nfunc (s *Service) Open() error {\n\ts.Logger.Info(\"Starting snapshot service\")\n\n\ts.wg.Add(1)\n\tgo s.serve()\n\treturn nil\n}\n\n// Close implements the Service interface.\nfunc (s *Service) Close() error {\n\tif s.Listener != nil {\n\t\ts.Listener.Close()\n\t}\n\ts.wg.Wait()\n\treturn nil\n}\n\n// WithLogger sets the logger on the service.\nfunc (s *Service) WithLogger(log zap.Logger) {\n\ts.Logger = log.With(zap.String(\"service\", \"snapshot\"))\n}\n\n// Err returns a channel for fatal out-of-band errors.\nfunc (s *Service) Err() <-chan error { return s.err }\n\n// serve serves snapshot requests from the listener.\nfunc (s *Service) serve() {\n\tdefer s.wg.Done()\n\n\tfor {\n\t\t// Wait for next connection.\n\t\tconn, err := s.Listener.Accept()\n\t\tif err != nil && strings.Contains(err.Error(), \"connection closed\") {\n\t\t\ts.Logger.Info(\"snapshot listener closed\")\n\t\t\treturn\n\t\t} else if err != nil {\n\t\t\ts.Logger.Info(fmt.Sprint(\"error accepting snapshot request: \", err.Error()))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Handle connection in separate goroutine.\n\t\ts.wg.Add(1)\n\t\tgo func(conn net.Conn) {\n\t\t\tdefer s.wg.Done()\n\t\t\tdefer conn.Close()\n\t\t\tif err := s.handleConn(conn); err != nil {\n\t\t\t\ts.Logger.Info(err.Error())\n\t\t\t}\n\t\t}(conn)\n\t}\n}\n\n// handleConn processes conn. This is run in a separate goroutine.\nfunc (s *Service) handleConn(conn net.Conn) error {\n\tr, err := s.readRequest(conn)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"read request: %s\", err)\n\t}\n\n\tswitch r.Type {\n\tcase RequestShardBackup:\n\t\tif err := s.TSDBStore.BackupShard(r.ShardID, r.Since, conn); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase RequestMetastoreBackup:\n\t\tif err := s.writeMetaStore(conn); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase RequestDatabaseInfo:\n\t\treturn s.writeDatabaseInfo(conn, r.Database)\n\tcase RequestRetentionPolicyInfo:\n\t\treturn s.writeRetentionPolicyInfo(conn, r.Database, r.RetentionPolicy)\n\tdefault:\n\t\treturn fmt.Errorf(\"request type unknown: %v\", r.Type)\n\t}\n\n\treturn nil\n}\n\nfunc (s *Service) writeMetaStore(conn net.Conn) error {\n\t// Retrieve and serialize the current meta data.\n\tmetaBlob, err := s.MetaClient.MarshalBinary()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"marshal meta: %s\", err)\n\t}\n\n\tvar nodeBytes bytes.Buffer\n\tif err := json.NewEncoder(&nodeBytes).Encode(s.Node); err != nil {\n\t\treturn err\n\t}\n\n\tvar numBytes [24]byte\n\n\tbinary.BigEndian.PutUint64(numBytes[:8], BackupMagicHeader)\n\tbinary.BigEndian.PutUint64(numBytes[8:16], uint64(len(metaBlob)))\n\tbinary.BigEndian.PutUint64(numBytes[16:24], uint64(nodeBytes.Len()))\n\n\t// backup header followed by meta blob length\n\tif _, err := conn.Write(numBytes[:16]); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := conn.Write(metaBlob); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := conn.Write(numBytes[16:24]); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := nodeBytes.WriteTo(conn); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// writeDatabaseInfo will write the relative paths of all shards in the database on\n// this server into the connection.\nfunc (s *Service) writeDatabaseInfo(conn net.Conn, database string) error {\n\tres := Response{}\n\tdb := s.MetaClient.Database(database)\n\tif db == nil {\n\t\treturn influxdb.ErrDatabaseNotFound(database)\n\t}\n\n\tfor _, rp := range db.RetentionPolicies {\n\t\tfor _, sg := range rp.ShardGroups {\n\t\t\tfor _, sh := range sg.Shards {\n\t\t\t\t// ignore if the shard isn't on the server\n\t\t\t\tif s.TSDBStore.Shard(sh.ID) == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tpath, err := s.TSDBStore.ShardRelativePath(sh.ID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tres.Paths = append(res.Paths, path)\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := json.NewEncoder(conn).Encode(res); err != nil {\n\t\treturn fmt.Errorf(\"encode resonse: %s\", err.Error())\n\t}\n\n\treturn nil\n}\n\n// writeDatabaseInfo will write the relative paths of all shards in the retention policy on\n// this server into the connection\nfunc (s *Service) writeRetentionPolicyInfo(conn net.Conn, database, retentionPolicy string) error {\n\tres := Response{}\n\tdb := s.MetaClient.Database(database)\n\tif db == nil {\n\t\treturn influxdb.ErrDatabaseNotFound(database)\n\t}\n\n\tvar ret *meta.RetentionPolicyInfo\n\n\tfor _, rp := range db.RetentionPolicies {\n\t\tif rp.Name == retentionPolicy {\n\t\t\tret = &rp\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif ret == nil {\n\t\treturn influxdb.ErrRetentionPolicyNotFound(retentionPolicy)\n\t}\n\n\tfor _, sg := range ret.ShardGroups {\n\t\tfor _, sh := range sg.Shards {\n\t\t\t// ignore if the shard isn't on the server\n\t\t\tif s.TSDBStore.Shard(sh.ID) == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tpath, err := s.TSDBStore.ShardRelativePath(sh.ID)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tres.Paths = append(res.Paths, path)\n\t\t}\n\t}\n\n\tif err := json.NewEncoder(conn).Encode(res); err != nil {\n\t\treturn fmt.Errorf(\"encode resonse: %s\", err.Error())\n\t}\n\n\treturn nil\n}\n\n// readRequest unmarshals a request object from the conn.\nfunc (s *Service) readRequest(conn net.Conn) (Request, error) {\n\tvar r Request\n\tif err := json.NewDecoder(conn).Decode(&r); err != nil {\n\t\treturn r, err\n\t}\n\treturn r, nil\n}\n\n// RequestType indicates the typeof snapshot request.\ntype RequestType uint8\n\nconst (\n\t// RequestShardBackup represents a request for a shard backup.\n\tRequestShardBackup RequestType = iota\n\n\t// RequestMetastoreBackup represents a request to back up the metastore.\n\tRequestMetastoreBackup\n\n\t// RequestDatabaseInfo represents a request for database info.\n\tRequestDatabaseInfo\n\n\t// RequestRetentionPolicyInfo represents a request for retention policy info.\n\tRequestRetentionPolicyInfo\n)\n\n// Request represents a request for a specific backup or for information\n// about the shards on this server for a database or retention policy.\ntype Request struct {\n\tType            RequestType\n\tDatabase        string\n\tRetentionPolicy string\n\tShardID         uint64\n\tSince           time.Time\n}\n\n// Response contains the relative paths for all the shards on this server\n// that are in the requested database or retention policy.\ntype Response struct {\n\tPaths []string\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/snapshotter/service_test.go",
    "content": "package snapshotter_test\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/subscriber/config.go",
    "content": "package subscriber\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/monitor/diagnostics\"\n\t\"github.com/influxdata/influxdb/toml\"\n)\n\nconst (\n\t// DefaultHTTPTimeout is the default HTTP timeout for a Config.\n\tDefaultHTTPTimeout = 30 * time.Second\n\n\t// DefaultWriteConcurrency is the default write concurrency for a Config.\n\tDefaultWriteConcurrency = 40\n\n\t// DefaultWriteBufferSize is the default write buffer size for a Config.\n\tDefaultWriteBufferSize = 1000\n)\n\n// Config represents a configuration of the subscriber service.\ntype Config struct {\n\t// Whether to enable to Subscriber service\n\tEnabled bool `toml:\"enabled\"`\n\n\tHTTPTimeout toml.Duration `toml:\"http-timeout\"`\n\n\t// InsecureSkipVerify gets passed to the http client, if true, it will\n\t// skip https certificate verification. Defaults to false\n\tInsecureSkipVerify bool `toml:\"insecure-skip-verify\"`\n\n\t// configure the path to the PEM encoded CA certs file. If the\n\t// empty string, the default system certs will be used\n\tCaCerts string `toml:\"ca-certs\"`\n\n\t// The number of writer goroutines processing the write channel.\n\tWriteConcurrency int `toml:\"write-concurrency\"`\n\n\t// The number of in-flight writes buffered in the write channel.\n\tWriteBufferSize int `toml:\"write-buffer-size\"`\n}\n\n// NewConfig returns a new instance of a subscriber config.\nfunc NewConfig() Config {\n\treturn Config{\n\t\tEnabled:            true,\n\t\tHTTPTimeout:        toml.Duration(DefaultHTTPTimeout),\n\t\tInsecureSkipVerify: false,\n\t\tCaCerts:            \"\",\n\t\tWriteConcurrency:   DefaultWriteConcurrency,\n\t\tWriteBufferSize:    DefaultWriteBufferSize,\n\t}\n}\n\n// Validate returns an error if the config is invalid.\nfunc (c Config) Validate() error {\n\tif c.HTTPTimeout <= 0 {\n\t\treturn errors.New(\"http-timeout must be greater than 0\")\n\t}\n\n\tif c.CaCerts != \"\" && !fileExists(c.CaCerts) {\n\t\tabspath, err := filepath.Abs(c.CaCerts)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"ca-certs file %s does not exist. Wrapped Error: %v\", c.CaCerts, err)\n\t\t}\n\t\treturn fmt.Errorf(\"ca-certs file %s does not exist\", abspath)\n\t}\n\n\tif c.WriteBufferSize <= 0 {\n\t\treturn errors.New(\"write-buffer-size must be greater than 0\")\n\t}\n\n\tif c.WriteConcurrency <= 0 {\n\t\treturn errors.New(\"write-concurrency must be greater than 0\")\n\t}\n\n\treturn nil\n}\n\nfunc fileExists(fileName string) bool {\n\tinfo, err := os.Stat(fileName)\n\treturn err == nil && !info.IsDir()\n}\n\n// Diagnostics returns a diagnostics representation of a subset of the Config.\nfunc (c Config) Diagnostics() (*diagnostics.Diagnostics, error) {\n\tif !c.Enabled {\n\t\treturn diagnostics.RowFromMap(map[string]interface{}{\n\t\t\t\"enabled\": false,\n\t\t}), nil\n\t}\n\n\treturn diagnostics.RowFromMap(map[string]interface{}{\n\t\t\"enabled\":           true,\n\t\t\"http-timeout\":      c.HTTPTimeout,\n\t\t\"write-concurrency\": c.WriteConcurrency,\n\t\t\"write-buffer-size\": c.WriteBufferSize,\n\t}), nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/subscriber/config_test.go",
    "content": "package subscriber_test\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/influxdata/influxdb/services/subscriber\"\n)\n\nfunc TestConfig_Parse(t *testing.T) {\n\t// Parse configuration.\n\tvar c subscriber.Config\n\tif _, err := toml.Decode(`\nenabled = false\n`, &c); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Validate configuration.\n\tif c.Enabled != false {\n\t\tt.Errorf(\"unexpected enabled state: %v\", c.Enabled)\n\t}\n\tif c.InsecureSkipVerify == true {\n\t\tt.Errorf(\"InsecureSkipVerify: expected %v. got %v\", false, c.InsecureSkipVerify)\n\t}\n}\n\nfunc TestConfig_ParseTLSConfig(t *testing.T) {\n\tabspath, err := filepath.Abs(\"/path/to/ca-certs.pem\")\n\tif err != nil {\n\t\tt.Fatalf(\"Could not construct absolute path. %v\", err)\n\t}\n\n\t// Parse configuration.\n\tvar c subscriber.Config\n\tif _, err := toml.Decode(fmt.Sprintf(`\nhttp-timeout = \"60s\"\nenabled = true\nca-certs = '%s'\ninsecure-skip-verify = true\nwrite-buffer-size = 1000\nwrite-concurrency = 10\n`, abspath), &c); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Validate configuration.\n\tif c.Enabled != true {\n\t\tt.Errorf(\"unexpected enabled state: %v\", c.Enabled)\n\t}\n\tif c.CaCerts != abspath {\n\t\tt.Errorf(\"CaCerts: expected %s. got %s\", abspath, c.CaCerts)\n\t}\n\tif c.InsecureSkipVerify != true {\n\t\tt.Errorf(\"InsecureSkipVerify: expected %v. got %v\", true, c.InsecureSkipVerify)\n\t}\n\terr = c.Validate()\n\tif err == nil {\n\t\tt.Errorf(\"Expected Validation to fail (%s doesn't exist)\", abspath)\n\t}\n\n\tif err.Error() != fmt.Sprintf(\"ca-certs file %s does not exist\", abspath) {\n\t\tt.Errorf(\"Expected descriptive validation error. Instead got %v\", err)\n\t}\n}\n\nfunc TestConfig_ParseTLSConfigValidCerts(t *testing.T) {\n\ttmpfile, err := ioutil.TempFile(\"\", \"ca-certs.crt\")\n\tif err != nil {\n\t\tt.Fatalf(\"could not create temp file. error was: %v\", err)\n\t}\n\tdefer os.Remove(tmpfile.Name())\n\n\tif _, err := tmpfile.Write([]byte(\"=== BEGIN CERTIFICATE ===\\n=== END CERTIFICATE ===\")); err != nil {\n\t\tt.Fatalf(\"could not write temp file. error was: %v\", err)\n\t}\n\tif err := tmpfile.Close(); err != nil {\n\t\tt.Fatalf(\"could not close temp file. error was %v\", err)\n\t}\n\n\t// Parse configuration.\n\tvar c subscriber.Config\n\tif _, err := toml.Decode(fmt.Sprintf(`\nhttp-timeout = \"60s\"\nenabled = true\nca-certs = '%s'\ninsecure-skip-verify = false\nwrite-buffer-size = 1000\nwrite-concurrency = 10\n`, tmpfile.Name()), &c); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Validate configuration.\n\tif c.Enabled != true {\n\t\tt.Errorf(\"unexpected enabled state: %v\", c.Enabled)\n\t}\n\tif c.CaCerts != tmpfile.Name() {\n\t\tt.Errorf(\"CaCerts: expected %v. got %v\", tmpfile.Name(), c.CaCerts)\n\t}\n\tif c.InsecureSkipVerify != false {\n\t\tt.Errorf(\"InsecureSkipVerify: expected %v. got %v\", false, c.InsecureSkipVerify)\n\t}\n\tif err := c.Validate(); err != nil {\n\t\tt.Errorf(\"Expected Validation to succeed. Instead was: %v\", err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/subscriber/http.go",
    "content": "package subscriber\n\nimport (\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"io/ioutil\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/client/v2\"\n\t\"github.com/influxdata/influxdb/coordinator\"\n)\n\n// HTTP supports writing points over HTTP using the line protocol.\ntype HTTP struct {\n\tc client.Client\n}\n\n// NewHTTP returns a new HTTP points writer with default options.\nfunc NewHTTP(addr string, timeout time.Duration) (*HTTP, error) {\n\treturn NewHTTPS(addr, timeout, false, \"\")\n}\n\n// NewHTTPS returns a new HTTPS points writer with default options and HTTPS configured.\nfunc NewHTTPS(addr string, timeout time.Duration, unsafeSsl bool, caCerts string) (*HTTP, error) {\n\ttlsConfig, err := createTLSConfig(caCerts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconf := client.HTTPConfig{\n\t\tAddr:               addr,\n\t\tTimeout:            timeout,\n\t\tInsecureSkipVerify: unsafeSsl,\n\t\tTLSConfig:          tlsConfig,\n\t}\n\n\tc, err := client.NewHTTPClient(conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &HTTP{c: c}, nil\n}\n\n// WritePoints writes points over HTTP transport.\nfunc (h *HTTP) WritePoints(p *coordinator.WritePointsRequest) (err error) {\n\tbp, _ := client.NewBatchPoints(client.BatchPointsConfig{\n\t\tDatabase:        p.Database,\n\t\tRetentionPolicy: p.RetentionPolicy,\n\t})\n\tfor _, pt := range p.Points {\n\t\tbp.AddPoint(client.NewPointFrom(pt))\n\t}\n\terr = h.c.Write(bp)\n\treturn\n}\n\nfunc createTLSConfig(caCerts string) (*tls.Config, error) {\n\tif caCerts == \"\" {\n\t\treturn nil, nil\n\t}\n\treturn loadCaCerts(caCerts)\n}\n\nfunc loadCaCerts(caCerts string) (*tls.Config, error) {\n\tcaCert, err := ioutil.ReadFile(caCerts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcaCertPool := x509.NewCertPool()\n\tcaCertPool.AppendCertsFromPEM(caCert)\n\n\treturn &tls.Config{\n\t\tRootCAs: caCertPool,\n\t}, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/subscriber/service.go",
    "content": "// Package subscriber implements the subscriber service\n// to forward incoming data to remote services.\npackage subscriber // import \"github.com/influxdata/influxdb/services/subscriber\"\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/coordinator\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/monitor\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/uber-go/zap\"\n)\n\n// Statistics for the Subscriber service.\nconst (\n\tstatCreateFailures = \"createFailures\"\n\tstatPointsWritten  = \"pointsWritten\"\n\tstatWriteFailures  = \"writeFailures\"\n)\n\n// PointsWriter is an interface for writing points to a subscription destination.\n// Only WritePoints() needs to be satisfied.  PointsWriter implementations\n// must be goroutine safe.\ntype PointsWriter interface {\n\tWritePoints(p *coordinator.WritePointsRequest) error\n}\n\n// subEntry is a unique set that identifies a given subscription.\ntype subEntry struct {\n\tdb   string\n\trp   string\n\tname string\n}\n\n// Service manages forking the incoming data from InfluxDB\n// to defined third party destinations.\n// Subscriptions are defined per database and retention policy.\ntype Service struct {\n\tMetaClient interface {\n\t\tDatabases() []meta.DatabaseInfo\n\t\tWaitForDataChanged() chan struct{}\n\t}\n\tNewPointsWriter func(u url.URL) (PointsWriter, error)\n\tLogger          zap.Logger\n\tupdate          chan struct{}\n\tstats           *Statistics\n\tpoints          chan *coordinator.WritePointsRequest\n\twg              sync.WaitGroup\n\tclosed          bool\n\tclosing         chan struct{}\n\tmu              sync.Mutex\n\tconf            Config\n\n\tsubs  map[subEntry]chanWriter\n\tsubMu sync.RWMutex\n}\n\n// NewService returns a subscriber service with given settings\nfunc NewService(c Config) *Service {\n\ts := &Service{\n\t\tLogger: zap.New(zap.NullEncoder()),\n\t\tclosed: true,\n\t\tstats:  &Statistics{},\n\t\tconf:   c,\n\t}\n\ts.NewPointsWriter = s.newPointsWriter\n\treturn s\n}\n\n// Open starts the subscription service.\nfunc (s *Service) Open() error {\n\tif !s.conf.Enabled {\n\t\treturn nil // Service disabled.\n\t}\n\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif s.MetaClient == nil {\n\t\treturn errors.New(\"no meta store\")\n\t}\n\n\ts.closed = false\n\n\ts.closing = make(chan struct{})\n\ts.update = make(chan struct{})\n\ts.points = make(chan *coordinator.WritePointsRequest, 100)\n\n\ts.wg.Add(2)\n\tgo func() {\n\t\tdefer s.wg.Done()\n\t\ts.run()\n\t}()\n\tgo func() {\n\t\tdefer s.wg.Done()\n\t\ts.waitForMetaUpdates()\n\t}()\n\n\ts.Logger.Info(\"opened service\")\n\treturn nil\n}\n\n// Close terminates the subscription service.\n// It will panic if called multiple times or without first opening the service.\nfunc (s *Service) Close() error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif s.closed {\n\t\treturn nil // Already closed.\n\t}\n\n\ts.closed = true\n\n\tclose(s.points)\n\tclose(s.closing)\n\n\ts.wg.Wait()\n\ts.Logger.Info(\"closed service\")\n\treturn nil\n}\n\n// WithLogger sets the logger on the service.\nfunc (s *Service) WithLogger(log zap.Logger) {\n\ts.Logger = log.With(zap.String(\"service\", \"subscriber\"))\n}\n\n// Statistics maintains the statistics for the subscriber service.\ntype Statistics struct {\n\tCreateFailures int64\n\tPointsWritten  int64\n\tWriteFailures  int64\n}\n\n// Statistics returns statistics for periodic monitoring.\nfunc (s *Service) Statistics(tags map[string]string) []models.Statistic {\n\tstatistics := []models.Statistic{{\n\t\tName: \"subscriber\",\n\t\tTags: tags,\n\t\tValues: map[string]interface{}{\n\t\t\tstatCreateFailures: atomic.LoadInt64(&s.stats.CreateFailures),\n\t\t\tstatPointsWritten:  atomic.LoadInt64(&s.stats.PointsWritten),\n\t\t\tstatWriteFailures:  atomic.LoadInt64(&s.stats.WriteFailures),\n\t\t},\n\t}}\n\n\ts.subMu.RLock()\n\tdefer s.subMu.RUnlock()\n\n\tfor _, sub := range s.subs {\n\t\tstatistics = append(statistics, sub.Statistics(tags)...)\n\t}\n\treturn statistics\n}\n\nfunc (s *Service) waitForMetaUpdates() {\n\tfor {\n\t\tch := s.MetaClient.WaitForDataChanged()\n\t\tselect {\n\t\tcase <-ch:\n\t\t\terr := s.Update()\n\t\t\tif err != nil {\n\t\t\t\ts.Logger.Info(fmt.Sprint(\"error updating subscriptions: \", err))\n\t\t\t}\n\t\tcase <-s.closing:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// Update will start new and stop deleted subscriptions.\nfunc (s *Service) Update() error {\n\t// signal update\n\tselect {\n\tcase s.update <- struct{}{}:\n\t\treturn nil\n\tcase <-s.closing:\n\t\treturn errors.New(\"service closed cannot update\")\n\t}\n}\n\nfunc (s *Service) createSubscription(se subEntry, mode string, destinations []string) (PointsWriter, error) {\n\tvar bm BalanceMode\n\tswitch mode {\n\tcase \"ALL\":\n\t\tbm = ALL\n\tcase \"ANY\":\n\t\tbm = ANY\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown balance mode %q\", mode)\n\t}\n\twriters := make([]PointsWriter, 0, len(destinations))\n\tstats := make([]writerStats, 0, len(destinations))\n\t// add only valid destinations\n\tfor _, dest := range destinations {\n\t\tu, err := url.Parse(dest)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse destination: %s\", dest)\n\t\t}\n\t\tw, err := s.NewPointsWriter(*u)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create writer for destination: %s\", dest)\n\t\t}\n\t\twriters = append(writers, w)\n\t\tstats = append(stats, writerStats{dest: dest})\n\t}\n\n\treturn &balancewriter{\n\t\tbm:      bm,\n\t\twriters: writers,\n\t\tstats:   stats,\n\t\tdefaultTags: models.StatisticTags{\n\t\t\t\"database\":         se.db,\n\t\t\t\"retention_policy\": se.rp,\n\t\t\t\"name\":             se.name,\n\t\t\t\"mode\":             mode,\n\t\t},\n\t}, nil\n}\n\n// Points returns a channel into which write point requests can be sent.\nfunc (s *Service) Points() chan<- *coordinator.WritePointsRequest {\n\treturn s.points\n}\n\n// run read points from the points channel and writes them to the subscriptions.\nfunc (s *Service) run() {\n\tvar wg sync.WaitGroup\n\ts.subs = make(map[subEntry]chanWriter)\n\t// Perform initial update\n\ts.updateSubs(&wg)\n\tfor {\n\t\tselect {\n\t\tcase <-s.update:\n\t\t\ts.updateSubs(&wg)\n\t\tcase p, ok := <-s.points:\n\t\t\tif !ok {\n\t\t\t\t// Close out all chanWriters\n\t\t\t\ts.close(&wg)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor se, cw := range s.subs {\n\t\t\t\tif p.Database == se.db && p.RetentionPolicy == se.rp {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase cw.writeRequests <- p:\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tatomic.AddInt64(&s.stats.WriteFailures, 1)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n// close closes the existing channel writers.\nfunc (s *Service) close(wg *sync.WaitGroup) {\n\ts.subMu.Lock()\n\tdefer s.subMu.Unlock()\n\n\tfor _, cw := range s.subs {\n\t\tcw.Close()\n\t}\n\t// Wait for them to finish\n\twg.Wait()\n\ts.subs = nil\n}\n\nfunc (s *Service) updateSubs(wg *sync.WaitGroup) {\n\ts.subMu.Lock()\n\tdefer s.subMu.Unlock()\n\n\tif s.subs == nil {\n\t\ts.subs = make(map[subEntry]chanWriter)\n\t}\n\n\tdbis := s.MetaClient.Databases()\n\tallEntries := make(map[subEntry]bool, 0)\n\t// Add in new subscriptions\n\tfor _, dbi := range dbis {\n\t\tfor _, rpi := range dbi.RetentionPolicies {\n\t\t\tfor _, si := range rpi.Subscriptions {\n\t\t\t\tse := subEntry{\n\t\t\t\t\tdb:   dbi.Name,\n\t\t\t\t\trp:   rpi.Name,\n\t\t\t\t\tname: si.Name,\n\t\t\t\t}\n\t\t\t\tallEntries[se] = true\n\t\t\t\tif _, ok := s.subs[se]; ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tsub, err := s.createSubscription(se, si.Mode, si.Destinations)\n\t\t\t\tif err != nil {\n\t\t\t\t\tatomic.AddInt64(&s.stats.CreateFailures, 1)\n\t\t\t\t\ts.Logger.Info(fmt.Sprintf(\"Subscription creation failed for '%s' with error: %s\", si.Name, err))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tcw := chanWriter{\n\t\t\t\t\twriteRequests: make(chan *coordinator.WritePointsRequest, s.conf.WriteBufferSize),\n\t\t\t\t\tpw:            sub,\n\t\t\t\t\tpointsWritten: &s.stats.PointsWritten,\n\t\t\t\t\tfailures:      &s.stats.WriteFailures,\n\t\t\t\t\tlogger:        s.Logger,\n\t\t\t\t}\n\t\t\t\tfor i := 0; i < s.conf.WriteConcurrency; i++ {\n\t\t\t\t\twg.Add(1)\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\t\tcw.Run()\n\t\t\t\t\t}()\n\t\t\t\t}\n\t\t\t\ts.subs[se] = cw\n\t\t\t\ts.Logger.Info(fmt.Sprintf(\"added new subscription for %s %s\", se.db, se.rp))\n\t\t\t}\n\t\t}\n\t}\n\n\t// Remove deleted subs\n\tfor se := range s.subs {\n\t\tif !allEntries[se] {\n\t\t\t// Close the chanWriter\n\t\t\ts.subs[se].Close()\n\n\t\t\t// Remove it from the set\n\t\t\tdelete(s.subs, se)\n\t\t\ts.Logger.Info(fmt.Sprintf(\"deleted old subscription for %s %s\", se.db, se.rp))\n\t\t}\n\t}\n}\n\n// newPointsWriter returns a new PointsWriter from the given URL.\nfunc (s *Service) newPointsWriter(u url.URL) (PointsWriter, error) {\n\tswitch u.Scheme {\n\tcase \"udp\":\n\t\treturn NewUDP(u.Host), nil\n\tcase \"http\":\n\t\treturn NewHTTP(u.String(), time.Duration(s.conf.HTTPTimeout))\n\tcase \"https\":\n\t\tif s.conf.InsecureSkipVerify {\n\t\t\ts.Logger.Info(\"WARNING: 'insecure-skip-verify' is true. This will skip all certificate verifications.\")\n\t\t}\n\t\treturn NewHTTPS(u.String(), time.Duration(s.conf.HTTPTimeout), s.conf.InsecureSkipVerify, s.conf.CaCerts)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown destination scheme %s\", u.Scheme)\n\t}\n}\n\n// chanWriter sends WritePointsRequest to a PointsWriter received over a channel.\ntype chanWriter struct {\n\twriteRequests chan *coordinator.WritePointsRequest\n\tpw            PointsWriter\n\tpointsWritten *int64\n\tfailures      *int64\n\tlogger        zap.Logger\n}\n\n// Close closes the chanWriter.\nfunc (c chanWriter) Close() {\n\tclose(c.writeRequests)\n}\n\nfunc (c chanWriter) Run() {\n\tfor wr := range c.writeRequests {\n\t\terr := c.pw.WritePoints(wr)\n\t\tif err != nil {\n\t\t\tc.logger.Info(err.Error())\n\t\t\tatomic.AddInt64(c.failures, 1)\n\t\t} else {\n\t\t\tatomic.AddInt64(c.pointsWritten, int64(len(wr.Points)))\n\t\t}\n\t}\n}\n\n// Statistics returns statistics for periodic monitoring.\nfunc (c chanWriter) Statistics(tags map[string]string) []models.Statistic {\n\tif m, ok := c.pw.(monitor.Reporter); ok {\n\t\treturn m.Statistics(tags)\n\t}\n\treturn []models.Statistic{}\n}\n\n// BalanceMode specifies what balance mode to use on a subscription.\ntype BalanceMode int\n\nconst (\n\t// ALL indicates to send writes to all subscriber destinations.\n\tALL BalanceMode = iota\n\n\t// ANY indicates to send writes to a single subscriber destination, round robin.\n\tANY\n)\n\ntype writerStats struct {\n\tdest          string\n\tfailures      int64\n\tpointsWritten int64\n}\n\n// balances writes across PointsWriters according to BalanceMode\ntype balancewriter struct {\n\tbm          BalanceMode\n\twriters     []PointsWriter\n\tstats       []writerStats\n\tdefaultTags models.StatisticTags\n\ti           int\n}\n\nfunc (b *balancewriter) WritePoints(p *coordinator.WritePointsRequest) error {\n\tvar lastErr error\n\tfor range b.writers {\n\t\t// round robin through destinations.\n\t\ti := b.i\n\t\tw := b.writers[i]\n\t\tb.i = (b.i + 1) % len(b.writers)\n\n\t\t// write points to destination.\n\t\terr := w.WritePoints(p)\n\t\tif err != nil {\n\t\t\tlastErr = err\n\t\t\tatomic.AddInt64(&b.stats[i].failures, 1)\n\t\t} else {\n\t\t\tatomic.AddInt64(&b.stats[i].pointsWritten, int64(len(p.Points)))\n\t\t\tif b.bm == ANY {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn lastErr\n}\n\n// Statistics returns statistics for periodic monitoring.\nfunc (b *balancewriter) Statistics(tags map[string]string) []models.Statistic {\n\tstatistics := make([]models.Statistic, len(b.stats))\n\tfor i := range b.stats {\n\t\tsubTags := b.defaultTags.Merge(tags)\n\t\tsubTags[\"destination\"] = b.stats[i].dest\n\t\tstatistics[i] = models.Statistic{\n\t\t\tName: \"subscriber\",\n\t\t\tTags: subTags,\n\t\t\tValues: map[string]interface{}{\n\t\t\t\tstatPointsWritten: atomic.LoadInt64(&b.stats[i].pointsWritten),\n\t\t\t\tstatWriteFailures: atomic.LoadInt64(&b.stats[i].failures),\n\t\t\t},\n\t\t}\n\t}\n\treturn statistics\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/subscriber/service_test.go",
    "content": "package subscriber_test\n\nimport (\n\t\"net/url\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/coordinator\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/services/subscriber\"\n)\n\ntype MetaClient struct {\n\tDatabasesFn          func() []meta.DatabaseInfo\n\tWaitForDataChangedFn func() chan struct{}\n}\n\nfunc (m MetaClient) Databases() []meta.DatabaseInfo {\n\treturn m.DatabasesFn()\n}\n\nfunc (m MetaClient) WaitForDataChanged() chan struct{} {\n\treturn m.WaitForDataChangedFn()\n}\n\ntype Subscription struct {\n\tWritePointsFn func(*coordinator.WritePointsRequest) error\n}\n\nfunc (s Subscription) WritePoints(p *coordinator.WritePointsRequest) error {\n\treturn s.WritePointsFn(p)\n}\n\nfunc TestService_IgnoreNonMatch(t *testing.T) {\n\tdataChanged := make(chan struct{})\n\tms := MetaClient{}\n\tms.WaitForDataChangedFn = func() chan struct{} {\n\t\treturn dataChanged\n\t}\n\tms.DatabasesFn = func() []meta.DatabaseInfo {\n\t\treturn []meta.DatabaseInfo{\n\t\t\t{\n\t\t\t\tName: \"db0\",\n\t\t\t\tRetentionPolicies: []meta.RetentionPolicyInfo{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"rp0\",\n\t\t\t\t\t\tSubscriptions: []meta.SubscriptionInfo{\n\t\t\t\t\t\t\t{Name: \"s0\", Mode: \"ANY\", Destinations: []string{\"udp://h0:9093\", \"udp://h1:9093\"}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\tprs := make(chan *coordinator.WritePointsRequest, 2)\n\turls := make(chan url.URL, 2)\n\tnewPointsWriter := func(u url.URL) (subscriber.PointsWriter, error) {\n\t\tsub := Subscription{}\n\t\tsub.WritePointsFn = func(p *coordinator.WritePointsRequest) error {\n\t\t\tprs <- p\n\t\t\treturn nil\n\t\t}\n\t\turls <- u\n\t\treturn sub, nil\n\t}\n\n\ts := subscriber.NewService(subscriber.NewConfig())\n\ts.MetaClient = ms\n\ts.NewPointsWriter = newPointsWriter\n\ts.Open()\n\tdefer s.Close()\n\n\t// Signal that data has changed\n\tdataChanged <- struct{}{}\n\n\tfor _, expURLStr := range []string{\"udp://h0:9093\", \"udp://h1:9093\"} {\n\t\tvar u url.URL\n\t\texpURL, _ := url.Parse(expURLStr)\n\t\tselect {\n\t\tcase u = <-urls:\n\t\tcase <-time.After(10 * time.Millisecond):\n\t\t\tt.Fatal(\"expected urls\")\n\t\t}\n\t\tif expURL.String() != u.String() {\n\t\t\tt.Fatalf(\"unexpected url: got %s exp %s\", u.String(), expURL.String())\n\t\t}\n\t}\n\n\t// Write points that don't match any subscription.\n\ts.Points() <- &coordinator.WritePointsRequest{\n\t\tDatabase:        \"db1\",\n\t\tRetentionPolicy: \"rp0\",\n\t}\n\ts.Points() <- &coordinator.WritePointsRequest{\n\t\tDatabase:        \"db0\",\n\t\tRetentionPolicy: \"rp2\",\n\t}\n\n\t// Shouldn't get any prs back\n\tselect {\n\tcase pr := <-prs:\n\t\tt.Fatalf(\"unexpected points request %v\", pr)\n\tdefault:\n\t}\n\tclose(dataChanged)\n}\n\nfunc TestService_ModeALL(t *testing.T) {\n\tdataChanged := make(chan struct{})\n\tms := MetaClient{}\n\tms.WaitForDataChangedFn = func() chan struct{} {\n\t\treturn dataChanged\n\t}\n\tms.DatabasesFn = func() []meta.DatabaseInfo {\n\t\treturn []meta.DatabaseInfo{\n\t\t\t{\n\t\t\t\tName: \"db0\",\n\t\t\t\tRetentionPolicies: []meta.RetentionPolicyInfo{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"rp0\",\n\t\t\t\t\t\tSubscriptions: []meta.SubscriptionInfo{\n\t\t\t\t\t\t\t{Name: \"s0\", Mode: \"ALL\", Destinations: []string{\"udp://h0:9093\", \"udp://h1:9093\"}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\tprs := make(chan *coordinator.WritePointsRequest, 2)\n\turls := make(chan url.URL, 2)\n\tnewPointsWriter := func(u url.URL) (subscriber.PointsWriter, error) {\n\t\tsub := Subscription{}\n\t\tsub.WritePointsFn = func(p *coordinator.WritePointsRequest) error {\n\t\t\tprs <- p\n\t\t\treturn nil\n\t\t}\n\t\turls <- u\n\t\treturn sub, nil\n\t}\n\n\ts := subscriber.NewService(subscriber.NewConfig())\n\ts.MetaClient = ms\n\ts.NewPointsWriter = newPointsWriter\n\ts.Open()\n\tdefer s.Close()\n\n\t// Signal that data has changed\n\tdataChanged <- struct{}{}\n\n\tfor _, expURLStr := range []string{\"udp://h0:9093\", \"udp://h1:9093\"} {\n\t\tvar u url.URL\n\t\texpURL, _ := url.Parse(expURLStr)\n\t\tselect {\n\t\tcase u = <-urls:\n\t\tcase <-time.After(10 * time.Millisecond):\n\t\t\tt.Fatal(\"expected urls\")\n\t\t}\n\t\tif expURL.String() != u.String() {\n\t\t\tt.Fatalf(\"unexpected url: got %s exp %s\", u.String(), expURL.String())\n\t\t}\n\t}\n\n\t// Write points that match subscription with mode ALL\n\texpPR := &coordinator.WritePointsRequest{\n\t\tDatabase:        \"db0\",\n\t\tRetentionPolicy: \"rp0\",\n\t}\n\ts.Points() <- expPR\n\n\t// Should get pr back twice\n\tfor i := 0; i < 2; i++ {\n\t\tvar pr *coordinator.WritePointsRequest\n\t\tselect {\n\t\tcase pr = <-prs:\n\t\tcase <-time.After(10 * time.Millisecond):\n\t\t\tt.Fatalf(\"expected points request: got %d exp 2\", i)\n\t\t}\n\t\tif pr != expPR {\n\t\t\tt.Errorf(\"unexpected points request: got %v, exp %v\", pr, expPR)\n\t\t}\n\t}\n\tclose(dataChanged)\n}\n\nfunc TestService_ModeANY(t *testing.T) {\n\tdataChanged := make(chan struct{})\n\tms := MetaClient{}\n\tms.WaitForDataChangedFn = func() chan struct{} {\n\t\treturn dataChanged\n\t}\n\tms.DatabasesFn = func() []meta.DatabaseInfo {\n\t\treturn []meta.DatabaseInfo{\n\t\t\t{\n\t\t\t\tName: \"db0\",\n\t\t\t\tRetentionPolicies: []meta.RetentionPolicyInfo{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"rp0\",\n\t\t\t\t\t\tSubscriptions: []meta.SubscriptionInfo{\n\t\t\t\t\t\t\t{Name: \"s0\", Mode: \"ANY\", Destinations: []string{\"udp://h0:9093\", \"udp://h1:9093\"}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\tprs := make(chan *coordinator.WritePointsRequest, 2)\n\turls := make(chan url.URL, 2)\n\tnewPointsWriter := func(u url.URL) (subscriber.PointsWriter, error) {\n\t\tsub := Subscription{}\n\t\tsub.WritePointsFn = func(p *coordinator.WritePointsRequest) error {\n\t\t\tprs <- p\n\t\t\treturn nil\n\t\t}\n\t\turls <- u\n\t\treturn sub, nil\n\t}\n\n\ts := subscriber.NewService(subscriber.NewConfig())\n\ts.MetaClient = ms\n\ts.NewPointsWriter = newPointsWriter\n\ts.Open()\n\tdefer s.Close()\n\n\t// Signal that data has changed\n\tdataChanged <- struct{}{}\n\n\tfor _, expURLStr := range []string{\"udp://h0:9093\", \"udp://h1:9093\"} {\n\t\tvar u url.URL\n\t\texpURL, _ := url.Parse(expURLStr)\n\t\tselect {\n\t\tcase u = <-urls:\n\t\tcase <-time.After(10 * time.Millisecond):\n\t\t\tt.Fatal(\"expected urls\")\n\t\t}\n\t\tif expURL.String() != u.String() {\n\t\t\tt.Fatalf(\"unexpected url: got %s exp %s\", u.String(), expURL.String())\n\t\t}\n\t}\n\t// Write points that match subscription with mode ANY\n\texpPR := &coordinator.WritePointsRequest{\n\t\tDatabase:        \"db0\",\n\t\tRetentionPolicy: \"rp0\",\n\t}\n\ts.Points() <- expPR\n\n\t// Validate we get the pr back just once\n\tvar pr *coordinator.WritePointsRequest\n\tselect {\n\tcase pr = <-prs:\n\tcase <-time.After(10 * time.Millisecond):\n\t\tt.Fatal(\"expected points request\")\n\t}\n\tif pr != expPR {\n\t\tt.Errorf(\"unexpected points request: got %v, exp %v\", pr, expPR)\n\t}\n\n\t// shouldn't get it a second time\n\tselect {\n\tcase pr = <-prs:\n\t\tt.Fatalf(\"unexpected points request %v\", pr)\n\tdefault:\n\t}\n\tclose(dataChanged)\n}\n\nfunc TestService_Multiple(t *testing.T) {\n\tdataChanged := make(chan struct{})\n\tms := MetaClient{}\n\tms.WaitForDataChangedFn = func() chan struct{} {\n\t\treturn dataChanged\n\t}\n\tms.DatabasesFn = func() []meta.DatabaseInfo {\n\t\treturn []meta.DatabaseInfo{\n\t\t\t{\n\t\t\t\tName: \"db0\",\n\t\t\t\tRetentionPolicies: []meta.RetentionPolicyInfo{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"rp0\",\n\t\t\t\t\t\tSubscriptions: []meta.SubscriptionInfo{\n\t\t\t\t\t\t\t{Name: \"s0\", Mode: \"ANY\", Destinations: []string{\"udp://h0:9093\", \"udp://h1:9093\"}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"rp1\",\n\t\t\t\t\t\tSubscriptions: []meta.SubscriptionInfo{\n\t\t\t\t\t\t\t{Name: \"s1\", Mode: \"ALL\", Destinations: []string{\"udp://h2:9093\", \"udp://h3:9093\"}},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\tprs := make(chan *coordinator.WritePointsRequest, 4)\n\turls := make(chan url.URL, 4)\n\tnewPointsWriter := func(u url.URL) (subscriber.PointsWriter, error) {\n\t\tsub := Subscription{}\n\t\tsub.WritePointsFn = func(p *coordinator.WritePointsRequest) error {\n\t\t\tprs <- p\n\t\t\treturn nil\n\t\t}\n\t\turls <- u\n\t\treturn sub, nil\n\t}\n\n\ts := subscriber.NewService(subscriber.NewConfig())\n\ts.MetaClient = ms\n\ts.NewPointsWriter = newPointsWriter\n\ts.Open()\n\tdefer s.Close()\n\n\t// Signal that data has changed\n\tdataChanged <- struct{}{}\n\n\tfor _, expURLStr := range []string{\"udp://h0:9093\", \"udp://h1:9093\", \"udp://h2:9093\", \"udp://h3:9093\"} {\n\t\tvar u url.URL\n\t\texpURL, _ := url.Parse(expURLStr)\n\t\tselect {\n\t\tcase u = <-urls:\n\t\tcase <-time.After(100 * time.Millisecond):\n\t\t\tt.Fatal(\"expected urls\")\n\t\t}\n\t\tif expURL.String() != u.String() {\n\t\t\tt.Fatalf(\"unexpected url: got %s exp %s\", u.String(), expURL.String())\n\t\t}\n\t}\n\n\t// Write points that don't match any subscription.\n\ts.Points() <- &coordinator.WritePointsRequest{\n\t\tDatabase:        \"db1\",\n\t\tRetentionPolicy: \"rp0\",\n\t}\n\ts.Points() <- &coordinator.WritePointsRequest{\n\t\tDatabase:        \"db0\",\n\t\tRetentionPolicy: \"rp2\",\n\t}\n\n\t// Write points that match subscription with mode ANY\n\texpPR := &coordinator.WritePointsRequest{\n\t\tDatabase:        \"db0\",\n\t\tRetentionPolicy: \"rp0\",\n\t}\n\ts.Points() <- expPR\n\n\t// Validate we get the pr back just once\n\tvar pr *coordinator.WritePointsRequest\n\tselect {\n\tcase pr = <-prs:\n\tcase <-time.After(100 * time.Millisecond):\n\t\tt.Fatal(\"expected points request\")\n\t}\n\tif pr != expPR {\n\t\tt.Errorf(\"unexpected points request: got %v, exp %v\", pr, expPR)\n\t}\n\n\t// shouldn't get it a second time\n\tselect {\n\tcase pr = <-prs:\n\t\tt.Fatalf(\"unexpected points request %v\", pr)\n\tdefault:\n\t}\n\n\t// Write points that match subscription with mode ALL\n\texpPR = &coordinator.WritePointsRequest{\n\t\tDatabase:        \"db0\",\n\t\tRetentionPolicy: \"rp1\",\n\t}\n\ts.Points() <- expPR\n\n\t// Should get pr back twice\n\tfor i := 0; i < 2; i++ {\n\t\tselect {\n\t\tcase pr = <-prs:\n\t\tcase <-time.After(100 * time.Millisecond):\n\t\t\tt.Fatalf(\"expected points request: got %d exp 2\", i)\n\t\t}\n\t\tif pr != expPR {\n\t\t\tt.Errorf(\"unexpected points request: got %v, exp %v\", pr, expPR)\n\t\t}\n\t}\n\tclose(dataChanged)\n}\n\nfunc TestService_WaitForDataChanged(t *testing.T) {\n\tdataChanged := make(chan struct{}, 1)\n\tms := MetaClient{}\n\tms.WaitForDataChangedFn = func() chan struct{} {\n\t\treturn dataChanged\n\t}\n\tcalls := make(chan bool, 2)\n\tms.DatabasesFn = func() []meta.DatabaseInfo {\n\t\tcalls <- true\n\t\treturn nil\n\t}\n\n\ts := subscriber.NewService(subscriber.NewConfig())\n\ts.MetaClient = ms\n\t// Explicitly closed below for testing\n\ts.Open()\n\n\t// Should be called once during open\n\tselect {\n\tcase <-calls:\n\tcase <-time.After(10 * time.Millisecond):\n\t\tt.Fatal(\"expected call\")\n\t}\n\n\tselect {\n\tcase <-calls:\n\t\tt.Fatal(\"unexpected call\")\n\tcase <-time.After(time.Millisecond):\n\t}\n\n\t// Signal that data has changed\n\tdataChanged <- struct{}{}\n\n\t// Should be called once more after data changed\n\tselect {\n\tcase <-calls:\n\tcase <-time.After(10 * time.Millisecond):\n\t\tt.Fatal(\"expected call\")\n\t}\n\n\tselect {\n\tcase <-calls:\n\t\tt.Fatal(\"unexpected call\")\n\tcase <-time.After(time.Millisecond):\n\t}\n\n\t//Close service ensure not called\n\ts.Close()\n\tdataChanged <- struct{}{}\n\tselect {\n\tcase <-calls:\n\t\tt.Fatal(\"unexpected call\")\n\tcase <-time.After(time.Millisecond):\n\t}\n\n\tclose(dataChanged)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/subscriber/udp.go",
    "content": "package subscriber\n\nimport (\n\t\"net\"\n\n\t\"github.com/influxdata/influxdb/coordinator\"\n)\n\n// UDP supports writing points over UDP using the line protocol.\ntype UDP struct {\n\taddr string\n}\n\n// NewUDP returns a new UDP listener with default options.\nfunc NewUDP(addr string) *UDP {\n\treturn &UDP{addr: addr}\n}\n\n// WritePoints writes points over UDP transport.\nfunc (u *UDP) WritePoints(p *coordinator.WritePointsRequest) (err error) {\n\tvar addr *net.UDPAddr\n\tvar con *net.UDPConn\n\taddr, err = net.ResolveUDPAddr(\"udp\", u.addr)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tcon, err = net.DialUDP(\"udp\", nil, addr)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer con.Close()\n\n\tfor _, p := range p.Points {\n\t\t_, err = con.Write([]byte(p.String()))\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t}\n\treturn\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/udp/README.md",
    "content": "# The UDP Input\n\n## A note on UDP/IP OS Buffer sizes\n\nSome OSes (most notably, Linux) place very restricive limits on the performance\nof UDP protocols. It is _highly_ recommended that you increase these OS limits to\nat least 25MB before trying to run UDP traffic to your instance.\n25MB is just a recommendation, and should be adjusted to be inline with your\n`read-buffer` plugin setting.\n\n### Linux\nCheck the current UDP/IP receive buffer default and limit by typing the following commands:\n\n```\nsysctl net.core.rmem_max\nsysctl net.core.rmem_default\n```\n\nIf the values are less than 26214400 bytes (25MB) you should add the following lines to the /etc/sysctl.conf file:\n\n```\nnet.core.rmem_max=26214400\nnet.core.rmem_default=26214400\n```\n\nChanges to /etc/sysctl.conf do not take effect until reboot.  To update the values immediately, type the following commands as root:\n\n```\nsysctl -w net.core.rmem_max=26214400\nsysctl -w net.core.rmem_default=26214400\n```\n\n### BSD/Darwin\n\nOn BSD/Darwin systems you need to add about a 15% padding to the kernel limit\nsocket buffer. Meaning if you want an 25MB buffer (8388608 bytes) you need to set\nthe kernel limit to `26214400*1.15 = 30146560`. This is not documented anywhere but\nhappens\n[in the kernel here.](https://github.com/freebsd/freebsd/blob/master/sys/kern/uipc_sockbuf.c#L63-L64)\n\nCheck the current UDP/IP buffer limit by typing the following command:\n\n```\nsysctl kern.ipc.maxsockbuf\n```\n\nIf the value is less than 30146560 bytes you should add the following lines to the /etc/sysctl.conf file (create it if necessary):\n\n```\nkern.ipc.maxsockbuf=30146560\n```\n\nChanges to /etc/sysctl.conf do not take effect until reboot.  To update the values immediately, type the following commands as root:\n\n```\nsysctl -w kern.ipc.maxsockbuf=30146560\n```\n\n### Using the read-buffer option for the UDP listener\n\nThe `read-buffer` option allows users to set the buffer size for the UDP listener.\nIt Sets the size of the operating system's receive buffer associated with\nthe UDP traffic. Keep in mind that the OS must be able\nto handle the number set here or the UDP listener will error and exit.\n\n`read-buffer = 0` means to use the OS default, which is usually too\nsmall for high UDP performance.\n\n## Configuration\n\nEach UDP input allows the binding address, target database, and target retention policy to be set. If the database does not exist, it will be created automatically when the input is initialized. If the retention policy is not configured, then the default retention policy for the database is used. However if the retention policy is set, the retention policy must be explicitly created. The input will not automatically create it.\n\nEach UDP input also performs internal batching of the points it receives, as batched writes to the database are more efficient. The default _batch size_ is 1000, _pending batch_ factor is 5, with a _batch timeout_ of 1 second. This means the input will write batches of maximum size 1000, but if a batch has not reached 1000 points within 1 second of the first point being added to a batch, it will emit that batch regardless of size. The pending batch factor controls how many batches can be in memory at once, allowing the input to transmit a batch, while still building other batches.\n\n## Processing\n\nThe UDP input can receive up to 64KB per read, and splits the received data by newline. Each part is then interpreted as line-protocol encoded points, and parsed accordingly.\n\n## UDP is connectionless\n\nSince UDP is a connectionless protocol there is no way to signal to the data source if any error occurs, and if data has even been successfully indexed. This should be kept in mind when deciding if and when to use the UDP input. The built-in UDP statistics are useful for monitoring the UDP inputs.\n\n## Config Examples\n\nOne UDP listener\n\n```\n# influxd.conf\n...\n[[udp]]\n  enabled = true\n  bind-address = \":8089\" # the bind address\n  database = \"telegraf\" # Name of the database that will be written to\n  batch-size = 5000 # will flush if this many points get buffered\n  batch-timeout = \"1s\" # will flush at least this often even if the batch-size is not reached\n  batch-pending = 10 # number of batches that may be pending in memory\n  read-buffer = 0 # UDP read buffer, 0 means to use OS default\n...\n```\n\nMultiple UDP listeners\n\n```\n# influxd.conf\n...\n[[udp]]\n  # Default UDP for Telegraf\n  enabled = true\n  bind-address = \":8089\" # the bind address\n  database = \"telegraf\" # Name of the database that will be written to\n  batch-size = 5000 # will flush if this many points get buffered\n  batch-timeout = \"1s\" # will flush at least this often even if the batch-size is not reached\n  batch-pending = 10 # number of batches that may be pending in memory\n  read-buffer = 0 # UDP read buffer size, 0 means to use OS default\n\n[[udp]]\n  # High-traffic UDP\n  enabled = true\n  bind-address = \":80891\" # the bind address\n  database = \"mymetrics\" # Name of the database that will be written to\n  batch-size = 5000 # will flush if this many points get buffered\n  batch-timeout = \"1s\" # will flush at least this often even if the batch-size is not reached\n  batch-pending = 100 # number of batches that may be pending in memory\n  read-buffer = 8388608 # (8*1024*1024) UDP read buffer size\n...\n```\n\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/udp/config.go",
    "content": "package udp\n\nimport (\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/monitor/diagnostics\"\n\t\"github.com/influxdata/influxdb/toml\"\n)\n\nconst (\n\t// DefaultBindAddress is the default binding interface if none is specified.\n\tDefaultBindAddress = \":8089\"\n\n\t// DefaultDatabase is the default database for UDP traffic.\n\tDefaultDatabase = \"udp\"\n\n\t// DefaultRetentionPolicy is the default retention policy used for writes.\n\tDefaultRetentionPolicy = \"\"\n\n\t// DefaultBatchSize is the default UDP batch size.\n\tDefaultBatchSize = 5000\n\n\t// DefaultBatchPending is the default number of pending UDP batches.\n\tDefaultBatchPending = 10\n\n\t// DefaultBatchTimeout is the default UDP batch timeout.\n\tDefaultBatchTimeout = time.Second\n\n\t// DefaultPrecision is the default time precision used for UDP services.\n\tDefaultPrecision = \"n\"\n\n\t// DefaultReadBuffer is the default buffer size for the UDP listener.\n\t// Sets the size of the operating system's receive buffer associated with\n\t// the UDP traffic. Keep in mind that the OS must be able\n\t// to handle the number set here or the UDP listener will error and exit.\n\t//\n\t// DefaultReadBuffer = 0 means to use the OS default, which is usually too\n\t// small for high UDP performance.\n\t//\n\t// Increasing OS buffer limits:\n\t//     Linux:      sudo sysctl -w net.core.rmem_max=<read-buffer>\n\t//     BSD/Darwin: sudo sysctl -w kern.ipc.maxsockbuf=<read-buffer>\n\tDefaultReadBuffer = 0\n)\n\n// Config holds various configuration settings for the UDP listener.\ntype Config struct {\n\tEnabled     bool   `toml:\"enabled\"`\n\tBindAddress string `toml:\"bind-address\"`\n\n\tDatabase        string        `toml:\"database\"`\n\tRetentionPolicy string        `toml:\"retention-policy\"`\n\tBatchSize       int           `toml:\"batch-size\"`\n\tBatchPending    int           `toml:\"batch-pending\"`\n\tReadBuffer      int           `toml:\"read-buffer\"`\n\tBatchTimeout    toml.Duration `toml:\"batch-timeout\"`\n\tPrecision       string        `toml:\"precision\"`\n}\n\n// NewConfig returns a new instance of Config with defaults.\nfunc NewConfig() Config {\n\treturn Config{\n\t\tBindAddress:     DefaultBindAddress,\n\t\tDatabase:        DefaultDatabase,\n\t\tRetentionPolicy: DefaultRetentionPolicy,\n\t\tBatchSize:       DefaultBatchSize,\n\t\tBatchPending:    DefaultBatchPending,\n\t\tBatchTimeout:    toml.Duration(DefaultBatchTimeout),\n\t}\n}\n\n// WithDefaults takes the given config and returns a new config with any required\n// default values set.\nfunc (c *Config) WithDefaults() *Config {\n\td := *c\n\tif d.Database == \"\" {\n\t\td.Database = DefaultDatabase\n\t}\n\tif d.BatchSize == 0 {\n\t\td.BatchSize = DefaultBatchSize\n\t}\n\tif d.BatchPending == 0 {\n\t\td.BatchPending = DefaultBatchPending\n\t}\n\tif d.BatchTimeout == 0 {\n\t\td.BatchTimeout = toml.Duration(DefaultBatchTimeout)\n\t}\n\tif d.Precision == \"\" {\n\t\td.Precision = DefaultPrecision\n\t}\n\tif d.ReadBuffer == 0 {\n\t\td.ReadBuffer = DefaultReadBuffer\n\t}\n\treturn &d\n}\n\n// Configs wraps a slice of Config to aggregate diagnostics.\ntype Configs []Config\n\n// Diagnostics returns one set of diagnostics for all of the Configs.\nfunc (c Configs) Diagnostics() (*diagnostics.Diagnostics, error) {\n\td := &diagnostics.Diagnostics{\n\t\tColumns: []string{\"enabled\", \"bind-address\", \"database\", \"retention-policy\", \"batch-size\", \"batch-pending\", \"batch-timeout\"},\n\t}\n\n\tfor _, cc := range c {\n\t\tif !cc.Enabled {\n\t\t\td.AddRow([]interface{}{false})\n\t\t\tcontinue\n\t\t}\n\n\t\tr := []interface{}{true, cc.BindAddress, cc.Database, cc.RetentionPolicy, cc.BatchSize, cc.BatchPending, cc.BatchTimeout}\n\t\td.AddRow(r)\n\t}\n\n\treturn d, nil\n}\n\n// Enabled returns true if any underlying Config is Enabled.\nfunc (c Configs) Enabled() bool {\n\tfor _, cc := range c {\n\t\tif cc.Enabled {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/udp/config_test.go",
    "content": "package udp_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/influxdata/influxdb/services/udp\"\n)\n\nfunc TestConfig_Parse(t *testing.T) {\n\t// Parse configuration.\n\tvar c udp.Config\n\tif _, err := toml.Decode(`\nenabled = true\nbind-address = \":4444\"\ndatabase = \"awesomedb\"\nretention-policy = \"awesomerp\"\nbatch-size = 100\nbatch-pending = 9\nbatch-timeout = \"10ms\"\nudp-payload-size = 1500\n`, &c); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Validate configuration.\n\tif c.Enabled != true {\n\t\tt.Fatalf(\"unexpected enabled: %v\", c.Enabled)\n\t} else if c.BindAddress != \":4444\" {\n\t\tt.Fatalf(\"unexpected bind address: %s\", c.BindAddress)\n\t} else if c.Database != \"awesomedb\" {\n\t\tt.Fatalf(\"unexpected database: %s\", c.Database)\n\t} else if c.RetentionPolicy != \"awesomerp\" {\n\t\tt.Fatalf(\"unexpected retention policy: %s\", c.RetentionPolicy)\n\t} else if c.BatchSize != 100 {\n\t\tt.Fatalf(\"unexpected batch size: %d\", c.BatchSize)\n\t} else if c.BatchPending != 9 {\n\t\tt.Fatalf(\"unexpected batch pending: %d\", c.BatchPending)\n\t} else if time.Duration(c.BatchTimeout) != (10 * time.Millisecond) {\n\t\tt.Fatalf(\"unexpected batch timeout: %v\", c.BatchTimeout)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/udp/service.go",
    "content": "// Package udp provides the UDP input service for InfluxDB.\npackage udp // import \"github.com/influxdata/influxdb/services/udp\"\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n\t\"github.com/uber-go/zap\"\n)\n\nconst (\n\t// Arbitrary, testing indicated that this doesn't typically get over 10\n\tparserChanLen = 1000\n\n\t// MaxUDPPayload is largest payload size the UDP service will accept.\n\tMaxUDPPayload = 64 * 1024\n)\n\n// statistics gathered by the UDP package.\nconst (\n\tstatPointsReceived      = \"pointsRx\"\n\tstatBytesReceived       = \"bytesRx\"\n\tstatPointsParseFail     = \"pointsParseFail\"\n\tstatReadFail            = \"readFail\"\n\tstatBatchesTransmitted  = \"batchesTx\"\n\tstatPointsTransmitted   = \"pointsTx\"\n\tstatBatchesTransmitFail = \"batchesTxFail\"\n)\n\n// Service is a UDP service that will listen for incoming packets of line protocol.\ntype Service struct {\n\tconn *net.UDPConn\n\taddr *net.UDPAddr\n\twg   sync.WaitGroup\n\n\tmu    sync.RWMutex\n\tready bool          // Has the required database been created?\n\tdone  chan struct{} // Is the service closing or closed?\n\n\tparserChan chan []byte\n\tbatcher    *tsdb.PointBatcher\n\tconfig     Config\n\n\tPointsWriter interface {\n\t\tWritePointsPrivileged(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error\n\t}\n\n\tMetaClient interface {\n\t\tCreateDatabase(name string) (*meta.DatabaseInfo, error)\n\t}\n\n\tLogger      zap.Logger\n\tstats       *Statistics\n\tdefaultTags models.StatisticTags\n}\n\n// NewService returns a new instance of Service.\nfunc NewService(c Config) *Service {\n\td := *c.WithDefaults()\n\treturn &Service{\n\t\tconfig:      d,\n\t\tparserChan:  make(chan []byte, parserChanLen),\n\t\tbatcher:     tsdb.NewPointBatcher(d.BatchSize, d.BatchPending, time.Duration(d.BatchTimeout)),\n\t\tLogger:      zap.New(zap.NullEncoder()),\n\t\tstats:       &Statistics{},\n\t\tdefaultTags: models.StatisticTags{\"bind\": d.BindAddress},\n\t}\n}\n\n// Open starts the service.\nfunc (s *Service) Open() (err error) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif !s.closed() {\n\t\treturn nil // Already open.\n\t}\n\ts.done = make(chan struct{})\n\n\tif s.config.BindAddress == \"\" {\n\t\treturn errors.New(\"bind address has to be specified in config\")\n\t}\n\tif s.config.Database == \"\" {\n\t\treturn errors.New(\"database has to be specified in config\")\n\t}\n\n\ts.addr, err = net.ResolveUDPAddr(\"udp\", s.config.BindAddress)\n\tif err != nil {\n\t\ts.Logger.Info(fmt.Sprintf(\"Failed to resolve UDP address %s: %s\", s.config.BindAddress, err))\n\t\treturn err\n\t}\n\n\ts.conn, err = net.ListenUDP(\"udp\", s.addr)\n\tif err != nil {\n\t\ts.Logger.Info(fmt.Sprintf(\"Failed to set up UDP listener at address %s: %s\", s.addr, err))\n\t\treturn err\n\t}\n\n\tif s.config.ReadBuffer != 0 {\n\t\terr = s.conn.SetReadBuffer(s.config.ReadBuffer)\n\t\tif err != nil {\n\t\t\ts.Logger.Info(fmt.Sprintf(\"Failed to set UDP read buffer to %d: %s\",\n\t\t\t\ts.config.ReadBuffer, err))\n\t\t\treturn err\n\t\t}\n\t}\n\n\ts.Logger.Info(fmt.Sprintf(\"Started listening on UDP: %s\", s.config.BindAddress))\n\n\ts.wg.Add(3)\n\tgo s.serve()\n\tgo s.parser()\n\tgo s.writer()\n\n\treturn nil\n}\n\n// Statistics maintains statistics for the UDP service.\ntype Statistics struct {\n\tPointsReceived      int64\n\tBytesReceived       int64\n\tPointsParseFail     int64\n\tReadFail            int64\n\tBatchesTransmitted  int64\n\tPointsTransmitted   int64\n\tBatchesTransmitFail int64\n}\n\n// Statistics returns statistics for periodic monitoring.\nfunc (s *Service) Statistics(tags map[string]string) []models.Statistic {\n\treturn []models.Statistic{{\n\t\tName: \"udp\",\n\t\tTags: s.defaultTags.Merge(tags),\n\t\tValues: map[string]interface{}{\n\t\t\tstatPointsReceived:      atomic.LoadInt64(&s.stats.PointsReceived),\n\t\t\tstatBytesReceived:       atomic.LoadInt64(&s.stats.BytesReceived),\n\t\t\tstatPointsParseFail:     atomic.LoadInt64(&s.stats.PointsParseFail),\n\t\t\tstatReadFail:            atomic.LoadInt64(&s.stats.ReadFail),\n\t\t\tstatBatchesTransmitted:  atomic.LoadInt64(&s.stats.BatchesTransmitted),\n\t\t\tstatPointsTransmitted:   atomic.LoadInt64(&s.stats.PointsTransmitted),\n\t\t\tstatBatchesTransmitFail: atomic.LoadInt64(&s.stats.BatchesTransmitFail),\n\t\t},\n\t}}\n}\n\nfunc (s *Service) writer() {\n\tdefer s.wg.Done()\n\n\tfor {\n\t\tselect {\n\t\tcase batch := <-s.batcher.Out():\n\t\t\t// Will attempt to create database if not yet created.\n\t\t\tif err := s.createInternalStorage(); err != nil {\n\t\t\t\ts.Logger.Info(fmt.Sprintf(\"Required database %s does not yet exist: %s\", s.config.Database, err.Error()))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err := s.PointsWriter.WritePointsPrivileged(s.config.Database, s.config.RetentionPolicy, models.ConsistencyLevelAny, batch); err == nil {\n\t\t\t\tatomic.AddInt64(&s.stats.BatchesTransmitted, 1)\n\t\t\t\tatomic.AddInt64(&s.stats.PointsTransmitted, int64(len(batch)))\n\t\t\t} else {\n\t\t\t\ts.Logger.Info(fmt.Sprintf(\"failed to write point batch to database %q: %s\", s.config.Database, err))\n\t\t\t\tatomic.AddInt64(&s.stats.BatchesTransmitFail, 1)\n\t\t\t}\n\n\t\tcase <-s.done:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (s *Service) serve() {\n\tdefer s.wg.Done()\n\n\tbuf := make([]byte, MaxUDPPayload)\n\ts.batcher.Start()\n\tfor {\n\t\tselect {\n\t\tcase <-s.done:\n\t\t\t// We closed the connection, time to go.\n\t\t\treturn\n\t\tdefault:\n\t\t\t// Keep processing.\n\t\t\tn, _, err := s.conn.ReadFromUDP(buf)\n\t\t\tif err != nil {\n\t\t\t\tatomic.AddInt64(&s.stats.ReadFail, 1)\n\t\t\t\ts.Logger.Info(fmt.Sprintf(\"Failed to read UDP message: %s\", err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tatomic.AddInt64(&s.stats.BytesReceived, int64(n))\n\n\t\t\tbufCopy := make([]byte, n)\n\t\t\tcopy(bufCopy, buf[:n])\n\t\t\ts.parserChan <- bufCopy\n\t\t}\n\t}\n}\n\nfunc (s *Service) parser() {\n\tdefer s.wg.Done()\n\n\tfor {\n\t\tselect {\n\t\tcase <-s.done:\n\t\t\treturn\n\t\tcase buf := <-s.parserChan:\n\t\t\tpoints, err := models.ParsePointsWithPrecision(buf, time.Now().UTC(), s.config.Precision)\n\t\t\tif err != nil {\n\t\t\t\tatomic.AddInt64(&s.stats.PointsParseFail, 1)\n\t\t\t\ts.Logger.Info(fmt.Sprintf(\"Failed to parse points: %s\", err))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, point := range points {\n\t\t\t\ts.batcher.In() <- point\n\t\t\t}\n\t\t\tatomic.AddInt64(&s.stats.PointsReceived, int64(len(points)))\n\t\t}\n\t}\n}\n\n// Close closes the service and the underlying listener.\nfunc (s *Service) Close() error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif s.closed() {\n\t\treturn nil // Already closed.\n\t}\n\tclose(s.done)\n\n\tif s.conn != nil {\n\t\ts.conn.Close()\n\t}\n\n\ts.batcher.Flush()\n\ts.wg.Wait()\n\n\t// Release all remaining resources.\n\ts.done = nil\n\ts.conn = nil\n\n\ts.Logger.Info(\"Service closed\")\n\n\treturn nil\n}\n\n// Closed returns true if the service is currently closed.\nfunc (s *Service) Closed() bool {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\treturn s.closed()\n}\n\nfunc (s *Service) closed() bool {\n\tselect {\n\tcase <-s.done:\n\t\t// Service is closing.\n\t\treturn true\n\tdefault:\n\t}\n\treturn s.done == nil\n}\n\n// createInternalStorage ensures that the required database has been created.\nfunc (s *Service) createInternalStorage() error {\n\ts.mu.RLock()\n\tready := s.ready\n\ts.mu.RUnlock()\n\tif ready {\n\t\treturn nil\n\t}\n\n\tif _, err := s.MetaClient.CreateDatabase(s.config.Database); err != nil {\n\t\treturn err\n\t}\n\n\t// The service is now ready.\n\ts.mu.Lock()\n\ts.ready = true\n\ts.mu.Unlock()\n\treturn nil\n}\n\n// WithLogger sets the logger on the service.\nfunc (s *Service) WithLogger(log zap.Logger) {\n\ts.Logger = log.With(zap.String(\"service\", \"udp\"))\n}\n\n// Addr returns the listener's address.\nfunc (s *Service) Addr() net.Addr {\n\treturn s.addr\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/services/udp/service_test.go",
    "content": "package udp\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/internal\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/uber-go/zap\"\n)\n\nfunc TestService_OpenClose(t *testing.T) {\n\tservice := NewTestService(nil)\n\n\t// Closing a closed service is fine.\n\tif err := service.Service.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Closing a closed service again is fine.\n\tif err := service.Service.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := service.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Opening an already open service is fine.\n\tif err := service.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Reopening a previously opened service is fine.\n\tif err := service.Service.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := service.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Tidy up.\n\tif err := service.Service.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestService_CreatesDatabase(t *testing.T) {\n\tt.Parallel()\n\n\ts := NewTestService(nil)\n\ts.WritePointsFn = func(string, string, models.ConsistencyLevel, []models.Point) error {\n\t\treturn nil\n\t}\n\n\tcalled := make(chan struct{})\n\ts.MetaClient.CreateDatabaseFn = func(name string) (*meta.DatabaseInfo, error) {\n\t\tif name != s.Config.Database {\n\t\t\tt.Errorf(\"\\n\\texp = %s\\n\\tgot = %s\\n\", s.Config.Database, name)\n\t\t}\n\t\t// Allow some time for the caller to return and the ready status to\n\t\t// be set.\n\t\ttime.AfterFunc(10*time.Millisecond, func() { called <- struct{}{} })\n\t\treturn nil, errors.New(\"an error\")\n\t}\n\n\tif err := s.Service.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpoints, err := models.ParsePointsString(`cpu value=1`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ts.Service.batcher.In() <- points[0] // Send a point.\n\ts.Service.batcher.Flush()\n\tselect {\n\tcase <-called:\n\t\t// OK\n\tcase <-time.NewTimer(5 * time.Second).C:\n\t\tt.Fatal(\"Service should have attempted to create database\")\n\t}\n\n\t// ready status should not have been switched due to meta client error.\n\ts.Service.mu.RLock()\n\tready := s.Service.ready\n\ts.Service.mu.RUnlock()\n\n\tif got, exp := ready, false; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\t// This time MC won't cause an error.\n\ts.MetaClient.CreateDatabaseFn = func(name string) (*meta.DatabaseInfo, error) {\n\t\t// Allow some time for the caller to return and the ready status to\n\t\t// be set.\n\t\ttime.AfterFunc(10*time.Millisecond, func() { called <- struct{}{} })\n\t\treturn nil, nil\n\t}\n\n\ts.Service.batcher.In() <- points[0] // Send a point.\n\ts.Service.batcher.Flush()\n\tselect {\n\tcase <-called:\n\t\t// OK\n\tcase <-time.NewTimer(5 * time.Second).C:\n\t\tt.Fatal(\"Service should have attempted to create database\")\n\t}\n\n\t// ready status should now be true.\n\ts.Service.mu.RLock()\n\tready = s.Service.ready\n\ts.Service.mu.RUnlock()\n\n\tif got, exp := ready, true; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\ts.Service.Close()\n}\n\ntype TestService struct {\n\tService       *Service\n\tConfig        Config\n\tMetaClient    *internal.MetaClientMock\n\tWritePointsFn func(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error\n}\n\nfunc NewTestService(c *Config) *TestService {\n\tif c == nil {\n\t\tdefaultC := NewConfig()\n\t\tc = &defaultC\n\t}\n\n\tservice := &TestService{\n\t\tService:    NewService(*c),\n\t\tConfig:     *c,\n\t\tMetaClient: &internal.MetaClientMock{},\n\t}\n\n\tif testing.Verbose() {\n\t\tservice.Service.WithLogger(zap.New(\n\t\t\tzap.NewTextEncoder(),\n\t\t\tzap.Output(os.Stderr),\n\t\t))\n\t}\n\n\tservice.Service.MetaClient = service.MetaClient\n\tservice.Service.PointsWriter = service\n\treturn service\n}\n\nfunc (s *TestService) WritePointsPrivileged(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error {\n\treturn s.WritePointsFn(database, retentionPolicy, consistencyLevel, points)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/DESIGN.md",
    "content": "## Stress Test\nThe logic for `StressTest` can be found in `stress/run.go`.\n\nA new `StressTest` type was added and is composed four different parts. The `StressTest` type has one method `Start(wHandle responseHandler, rHandle responseHandler)`. This method starts the stress test.\n\nA `responseHandler` is a function with type signature `func(r <-chan response, t *Timer)`. Response Handlers handle the read and write responses respectively.\n\n### Provisioner\nProvisions the InfluxDB instance where the stress test is going to be ran against.\n\nThink things like, creating the database, setting up retention policies, continuous queries, etc.\n\n### Writer\nThe `Writer` is responsible for Writing data into an InfluxDB instance. It has two components: `PointGenerator` and `InfluxClient`.\n\n##### PointGenerator\nThe `PointGenerator` is responsible for generating points that will be written into InfluxDB. Additionally, it is reponsible for keeping track of the latest timestamp of the points it is writing (Just incase the its needed by the `Reader`).\n\nAny type that implements the methods `Generate()` and `Time()` is a `PointGenerator`.\n\n##### InfluxClient\nThe `InfluxClient` is responsible for writing the data that is generated by the `PointGenerator`.\n\nAny type that implements `Batch(ps <-chan Point, r chan<- response)`, and `send(b []byte) response` is an `InfluxClient`.\n\n### Reader\nThe `Reader` is responsible for querying the database. It has two components: `QueryGenerator` and `QueryClient`.\n\n##### QueryGenerator\nThe `QueryGenerator` is responsible for generating queries.\n\n##### QueryClient\nThe `QueryClient` is responsible for executing queries against an InfluxDB instance.\n\n## Basic\n`basic.go` implements an each of the components of a stress test.\n\n## Util\n`util.go` contains utility methods used throughout the package.\n\n## Config\n`config.go` contains the logic for managing the configuration of the stress test.\n\nA sample configuration file can be found in `stress/stress.toml`. This still needs work, but whats there now is good enough IMO.\n\n## Template\n`template.go` contains the logic for a basic stress test.\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/README.md",
    "content": "# `influx_stress` usage and configuration\n\nThe binary for `influx_stress` comes bundled with all influx installations. \nTo run it against an `influxd` instance located at  `localhost:8086` with the default configuration options:\n\nSee more about the [default configuration options](https://github.com/influxdata/influxdb/blob/master/stress/stress.toml)\n\n```bash\n$ influx_stress\n```\n\nTo run `influx_stress` with a configuration file:\n```bash\n$ influx_stress -config my_awesome_test.toml\n```\n\nTo daemonize `influx_stress` and save the output to a results file:\n```bash\n$ influx_stress -config my_awesome_test.toml > my_awesome_test_out.txt 2>&1 &\n```\n\nTo run multiple instances of `influx_stress` just change the `measurement` each test writes to, details below\n```bash\n$ influx_stress -config my_awesome_test1.toml > my_awesome_test_out1.txt 2>&1 &\n$ influx_stress -config my_awesome_test2.toml > my_awesome_test_out2.txt 2>&1 &\n```\n\nBelow is a sample configuration file with comments explaining the different options\n```toml\n# The [provision] section creates a new database on the target instance for the stress test to write points to and perform queries against\n# This section can be deleted if the instance is manually configured. In that case make sure that the database referenced in [write] exists\n# The provisioner will try to delete the database before trying to recreate it.\n\n[provision]\n  [provision.basic]\n    # If set to false you can delete this section from the config\n    enabled = true\n    # address of the node to be provisioned\n    address = \"<node1_ip>:8086\"\n    # name of the database to create\n    database = \"stress\"\n    # This must be set to true\n    reset_database = true\n\n# The [write] section defines the shape of the generated data and configures the InfluxDB client\n[write]\n  # The [write.point_generator] defines the shape of the generated data\n  [write.point_generator]\n    [write.point_generator.basic]\n      # This needs to be set to true\n      enabled = true\n      # The total number of points a stress_test will write is determined by multiplying the following two numbers:\n      # point_count * series_count = total_points\n      # Number of points to write to the database for each series\n      point_count = 100\n      # Number of series to write to the database?\n      series_count = 100000\n      # This simulates collection interval in the timestamps of generated points\n      tick = \"10s\"\n      # This must be set to true\n      jitter = true\n      # The measurement name for the generated points\n      measurement = \"cpu\"\n      # The generated timestamps follow the pattern of { start_date + (n * tick) }\n      # This sequence is preserved for each series and is always increasing\n      start_date = \"2006-Jan-02\"\n      # Precision for generated points\n      # This setting MUST be the same as [write.influx_client.basic]precision\n      precision = \"s\"\n      # The '[[]]' in toml format indicates that the element is an array of items. \n      # [[write.point_generator.basic.tag]] defines a tag on the generated points\n      # key is the tag key\n      # value is the tag value\n      # The first tag defined will have '-0' through '-{series_count}' added to the end of the string\n      [[write.point_generator.basic.tag]]\n        key = \"host\"\n        value = \"server\"\n      [[write.point_generator.basic.tag]]\n        key = \"location\"\n        value = \"us-west\"\n      # [[write.point_generator.basic.field]] defines a field on the generated points\n      # key is the field key\n      # value is the type of the field\n      [[write.point_generator.basic.field]]\n        key = \"value\"\n        # Can be either \"float64\", \"int\", \"bool\"\n        value = \"float64\"\n\n  # The [write.influx_client] defines what influx instances the stress_test targets\n  [write.influx_client]\n    [write.influx_client.basic]\n      # This must be set to true\n      enabled = true\n      # This is an array of addresses\n      # addresses = [\"<node1_ip>:8086\",\"<node2_ip>:8086\",\"<node3_ip>:8086\"] to target a cluster\n      addresses = [\"<node1_ip>:8086\"] # to target an individual node \n      # This database in the in the target influx instance to write to\n      # This database MUST be created in the target instance or the test will fail\n      database = \"stress\"\n      # Write precision for points\n      # This setting MUST be the same as [write.point_generator.basic]precision\n      precision = \"s\"\n      # The number of point to write to the database with each POST /write sent\n      batch_size = 5000\n      # An optional amount of time for a worker to wait between POST requests\n      batch_interval = \"0s\"\n      # The number of workers to use to write to the database\n      # More workers == more load with diminishing returns starting at ~5 workers\n      # 10 workers provides a medium-high level of load to the database\n      concurrency = 10\n      # This must be set to false\n      ssl = false\n      # This must be set to \"line_http\"\n      format = \"line_http\"\n```"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/basic.go",
    "content": "package stress\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"math/rand\"\n\t\"net/http\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/client/v2\"\n)\n\nconst backoffInterval = time.Duration(500 * time.Millisecond)\n\n// AbstractTag is a struct that abstractly\n// defines a tag\ntype AbstractTag struct {\n\tKey   string `toml:\"key\"`\n\tValue string `toml:\"value\"`\n}\n\n// AbstractTags is a slice of abstract tags\ntype AbstractTags []AbstractTag\n\n// Template returns a templated string of tags\nfunc (t AbstractTags) Template() string {\n\tvar buf bytes.Buffer\n\tfor i, tag := range t {\n\t\tif i == 0 {\n\t\t\tbuf.Write([]byte(fmt.Sprintf(\"%v=%v-%%v,\", tag.Key, tag.Value)))\n\t\t} else {\n\t\t\tbuf.Write([]byte(fmt.Sprintf(\"%v=%v,\", tag.Key, tag.Value)))\n\t\t}\n\t}\n\n\tb := buf.Bytes()\n\tb = b[0 : len(b)-1]\n\n\treturn string(b)\n}\n\n// AbstractField is a struct that abstractly\n// defines a field\ntype AbstractField struct {\n\tKey  string `toml:\"key\"`\n\tType string `toml:\"type\"`\n}\n\n// AbstractFields is a slice of abstract fields\ntype AbstractFields []AbstractField\n\n// Template returns a templated string of fields\nfunc (f AbstractFields) Template() (string, []string) {\n\tvar buf bytes.Buffer\n\ta := make([]string, len(f))\n\tfor i, field := range f {\n\t\tbuf.Write([]byte(fmt.Sprintf(\"%v=%%v,\", field.Key)))\n\t\ta[i] = field.Type\n\t}\n\n\tb := buf.Bytes()\n\tb = b[0 : len(b)-1]\n\n\treturn string(b), a\n}\n\n// BasicPointGenerator implements the PointGenerator interface\ntype BasicPointGenerator struct {\n\tPointCount  int            `toml:\"point_count\"`\n\tTick        string         `toml:\"tick\"`\n\tJitter      bool           `toml:\"jitter\"`\n\tMeasurement string         `toml:\"measurement\"`\n\tSeriesCount int            `toml:\"series_count\"`\n\tTags        AbstractTags   `toml:\"tag\"`\n\tFields      AbstractFields `toml:\"field\"`\n\tStartDate   string         `toml:\"start_date\"`\n\tPrecision   string         `toml:\"precision\"`\n\ttime        time.Time\n\tmu          sync.Mutex\n}\n\n// typeArr accepts a string array of types and\n// returns an array of equal length where each\n// element of the array is an instance of the type\n// expressed in the string array.\nfunc typeArr(a []string) []interface{} {\n\ti := make([]interface{}, len(a))\n\tfor j, ty := range a {\n\t\tvar t string\n\t\tswitch ty {\n\t\tcase \"float64\":\n\t\t\tt = fmt.Sprintf(\"%v\", rand.Intn(1000))\n\t\tcase \"int\":\n\t\t\tt = fmt.Sprintf(\"%vi\", rand.Intn(1000))\n\t\tcase \"bool\":\n\t\t\tb := rand.Intn(2) == 1\n\t\t\tt = fmt.Sprintf(\"%t\", b)\n\t\tdefault:\n\t\t\tt = fmt.Sprintf(\"%v\", rand.Intn(1000))\n\t\t}\n\t\ti[j] = t\n\t}\n\n\treturn i\n}\n\nfunc (b *BasicPointGenerator) timestamp(t time.Time) int64 {\n\tvar n int64\n\n\tif b.Precision == \"s\" {\n\t\tn = t.Unix()\n\t} else {\n\t\tn = t.UnixNano()\n\t}\n\n\treturn n\n}\n\n// Template returns a function that returns a pointer to a Pnt.\nfunc (b *BasicPointGenerator) Template() func(i int, t time.Time) *Pnt {\n\tts := b.Tags.Template()\n\tfs, fa := b.Fields.Template()\n\ttmplt := fmt.Sprintf(\"%v,%v %v %%v\", b.Measurement, ts, fs)\n\n\treturn func(i int, t time.Time) *Pnt {\n\t\tp := &Pnt{}\n\t\tarr := []interface{}{i}\n\t\tarr = append(arr, typeArr(fa)...)\n\t\tarr = append(arr, b.timestamp(t))\n\n\t\tstr := fmt.Sprintf(tmplt, arr...)\n\t\tp.Set([]byte(str))\n\t\treturn p\n\t}\n}\n\n// Pnt is a struct that implements the Point interface.\ntype Pnt struct {\n\tline []byte\n}\n\n// Set sets the internal state for a Pnt.\nfunc (p *Pnt) Set(b []byte) {\n\tp.line = b\n}\n\n// Next generates very simple points very\n// efficiently.\n// TODO: Take this out\nfunc (p *Pnt) Next(i int, t time.Time) {\n\tp.line = []byte(fmt.Sprintf(\"a,b=c-%v v=%v\", i, i))\n}\n\n// Line returns a byte array for a point\n// in line protocol format.\nfunc (p Pnt) Line() []byte {\n\treturn p.line\n}\n\n// Graphite returns a byte array for a point\n// in graphite format.\nfunc (p Pnt) Graphite() []byte {\n\t// TODO: Implement\n\treturn []byte(\"\")\n}\n\n// OpenJSON returns a byte array for a point\n// in opentsdb json format\nfunc (p Pnt) OpenJSON() []byte {\n\t// TODO: Implement\n\treturn []byte(\"\")\n}\n\n// OpenTelnet returns a byte array for a point\n// in opentsdb-telnet format\nfunc (p Pnt) OpenTelnet() []byte {\n\t// TODO: Implement\n\treturn []byte(\"\")\n}\n\n// Generate returns a point channel. Implements the\n// Generate method for the PointGenerator interface\nfunc (b *BasicPointGenerator) Generate() (<-chan Point, error) {\n\t// TODO: should be 1.5x batch size\n\tc := make(chan Point, 15000)\n\n\ttmplt := b.Template()\n\n\tgo func(c chan Point) {\n\t\tdefer close(c)\n\n\t\tvar start time.Time\n\t\tvar err error\n\t\tif b.StartDate == \"now\" {\n\t\t\tstart = time.Now()\n\t\t} else {\n\t\t\tstart, err = time.Parse(\"2006-Jan-02\", b.StartDate)\n\t\t}\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tb.mu.Lock()\n\t\tb.time = start\n\t\tb.mu.Unlock()\n\n\t\ttick, err := time.ParseDuration(b.Tick)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tfor i := 0; i < b.PointCount; i++ {\n\t\t\tb.mu.Lock()\n\t\t\tb.time = b.time.Add(tick)\n\t\t\tb.mu.Unlock()\n\n\t\t\tfor j := 0; j < b.SeriesCount; j++ {\n\t\t\t\tp := tmplt(j, b.time)\n\n\t\t\t\tc <- *p\n\t\t\t}\n\t\t}\n\t}(c)\n\n\treturn c, nil\n}\n\n// Time returns the timestamp for the latest points\n// that are being generated. Implements the Time method\n// for the PointGenerator interface.\nfunc (b *BasicPointGenerator) Time() time.Time {\n\tdefer b.mu.Unlock()\n\tb.mu.Lock()\n\tt := b.time\n\treturn t\n}\n\n// BasicClient implements the InfluxClient\n// interface.\ntype BasicClient struct {\n\tEnabled         bool     `toml:\"enabled\"`\n\tAddresses       []string `toml:\"addresses\"`\n\tDatabase        string   `toml:\"database\"`\n\tRetentionPolicy string   `toml:\"retention-policy\"`\n\tPrecision       string   `toml:\"precision\"`\n\tBatchSize       int      `toml:\"batch_size\"`\n\tBatchInterval   string   `toml:\"batch_interval\"`\n\tConcurrency     int      `toml:\"concurrency\"`\n\tSSL             bool     `toml:\"ssl\"`\n\tFormat          string   `toml:\"format\"`\n\n\taddrId   int\n\tr        chan<- response\n\tinterval time.Duration\n}\n\nfunc (c *BasicClient) retry(b []byte, backoff time.Duration) {\n\tbo := backoff + backoffInterval\n\trs, err := c.send(b)\n\ttime.Sleep(c.interval)\n\n\tc.r <- rs\n\tif !rs.Success() || err != nil {\n\t\ttime.Sleep(bo)\n\t\tc.retry(b, bo)\n\t}\n}\n\n// Batch groups together points\nfunc (c *BasicClient) Batch(ps <-chan Point, r chan<- response) error {\n\tif !c.Enabled {\n\t\treturn nil\n\t}\n\tinstanceURLs := make([]string, len(c.Addresses))\n\tfor i := 0; i < len(c.Addresses); i++ {\n\t\tinstanceURLs[i] = fmt.Sprintf(\"http://%v/write?db=%v&rp=%v&precision=%v\", c.Addresses[i], c.Database, c.RetentionPolicy, c.Precision)\n\t}\n\n\tc.Addresses = instanceURLs\n\n\tc.r = r\n\tvar buf bytes.Buffer\n\tvar wg sync.WaitGroup\n\tcounter := NewConcurrencyLimiter(c.Concurrency)\n\n\tinterval, err := time.ParseDuration(c.BatchInterval)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.interval = interval\n\n\tctr := 0\n\n\twriteBatch := func(b []byte) {\n\t\twg.Add(1)\n\t\tcounter.Increment()\n\t\tgo func(byt []byte) {\n\t\t\tc.retry(byt, time.Duration(1))\n\t\t\tcounter.Decrement()\n\t\t\twg.Done()\n\t\t}(b)\n\n\t}\n\n\tfor p := range ps {\n\t\tb := p.Line()\n\t\tc.addrId = ctr % len(c.Addresses)\n\t\tctr++\n\n\t\tbuf.Write(b)\n\t\tbuf.Write([]byte(\"\\n\"))\n\n\t\tif ctr%c.BatchSize == 0 && ctr != 0 {\n\t\t\tb := buf.Bytes()\n\t\t\tif len(b) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Trimming the trailing newline character\n\t\t\tb = b[0 : len(b)-1]\n\n\t\t\twriteBatch(b)\n\t\t\tvar temp bytes.Buffer\n\t\t\tbuf = temp\n\t\t}\n\t}\n\t// Write out any remaining points\n\tb := buf.Bytes()\n\tif len(b) > 0 {\n\t\twriteBatch(b)\n\t}\n\n\twg.Wait()\n\n\treturn nil\n}\n\n// post sends a post request with a payload of points\nfunc post(url string, datatype string, data io.Reader) (*http.Response, error) {\n\tresp, err := http.Post(url, datatype, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {\n\t\terr := errors.New(string(body))\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}\n\n// Send calls post and returns a response\nfunc (c *BasicClient) send(b []byte) (response, error) {\n\n\tt := NewTimer()\n\tresp, err := post(c.Addresses[c.addrId], \"\", bytes.NewBuffer(b))\n\tt.StopTimer()\n\tif err != nil {\n\t\treturn response{Timer: t}, err\n\t}\n\n\tr := response{\n\t\tResp:  resp,\n\t\tTime:  time.Now(),\n\t\tTimer: t,\n\t}\n\n\treturn r, nil\n}\n\n// BasicQuery implements the QueryGenerator interface\ntype BasicQuery struct {\n\tTemplate   Query `toml:\"template\"`\n\tQueryCount int   `toml:\"query_count\"`\n\ttime       time.Time\n}\n\n// QueryGenerate returns a Query channel\nfunc (q *BasicQuery) QueryGenerate(now func() time.Time) (<-chan Query, error) {\n\tc := make(chan Query, 0)\n\n\tgo func(chan Query) {\n\t\tdefer close(c)\n\n\t\tfor i := 0; i < q.QueryCount; i++ {\n\t\t\tc <- Query(fmt.Sprintf(string(q.Template), i))\n\t\t}\n\n\t}(c)\n\n\treturn c, nil\n}\n\n// SetTime sets the internal state of time\nfunc (q *BasicQuery) SetTime(t time.Time) {\n\tq.time = t\n\treturn\n}\n\n// BasicQueryClient implements the QueryClient interface\ntype BasicQueryClient struct {\n\tEnabled       bool     `toml:\"enabled\"`\n\tAddresses     []string `toml:\"addresses\"`\n\tDatabase      string   `toml:\"database\"`\n\tQueryInterval string   `toml:\"query_interval\"`\n\tConcurrency   int      `toml:\"concurrency\"`\n\tclients       []client.Client\n\taddrId        int\n}\n\n// Init initializes the InfluxDB client\nfunc (b *BasicQueryClient) Init() error {\n\n\tfor _, a := range b.Addresses {\n\t\tcl, err := client.NewHTTPClient(client.HTTPConfig{\n\t\t\tAddr: fmt.Sprintf(\"http://%v\", a),\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tb.clients = append(b.clients, cl)\n\t}\n\n\treturn nil\n}\n\n// Query runs the query\nfunc (b *BasicQueryClient) Query(cmd Query) (response, error) {\n\tq := client.Query{\n\t\tCommand:  string(cmd),\n\t\tDatabase: b.Database,\n\t}\n\n\tt := NewTimer()\n\t_, err := b.clients[b.addrId].Query(q)\n\tt.StopTimer()\n\n\tif err != nil {\n\t\treturn response{Timer: t}, err\n\t}\n\n\t// Needs actual response type\n\tr := response{\n\t\tTime:  time.Now(),\n\t\tTimer: t,\n\t}\n\n\treturn r, nil\n\n}\n\n// Exec listens to the query channel an executes queries as they come in\nfunc (b *BasicQueryClient) Exec(qs <-chan Query, r chan<- response) error {\n\tif !b.Enabled {\n\t\treturn nil\n\t}\n\tvar wg sync.WaitGroup\n\tcounter := NewConcurrencyLimiter(b.Concurrency)\n\n\tb.Init()\n\n\tinterval, err := time.ParseDuration(b.QueryInterval)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctr := 0\n\n\tfor q := range qs {\n\t\tb.addrId = ctr % len(b.Addresses)\n\t\tctr++\n\n\t\twg.Add(1)\n\t\tcounter.Increment()\n\t\tfunc(q Query) {\n\t\t\tdefer wg.Done()\n\t\t\tqr, _ := b.Query(q)\n\t\t\tr <- qr\n\t\t\ttime.Sleep(interval)\n\t\t\tcounter.Decrement()\n\t\t}(q)\n\t}\n\n\twg.Wait()\n\n\treturn nil\n}\n\n// resetDB will drop an create a new database on an existing\n// InfluxDB instance.\nfunc resetDB(c client.Client, database string) error {\n\t_, err := c.Query(client.Query{\n\t\tCommand: fmt.Sprintf(\"DROP DATABASE %s\", database),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = c.Query(client.Query{\n\t\tCommand: fmt.Sprintf(\"CREATE DATABASE %s\", database),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// BasicProvisioner implements the Provisioner\n// interface.\ntype BasicProvisioner struct {\n\tEnabled       bool   `toml:\"enabled\"`\n\tAddress       string `toml:\"address\"`\n\tDatabase      string `toml:\"database\"`\n\tResetDatabase bool   `toml:\"reset_database\"`\n}\n\n// Provision runs the resetDB function.\nfunc (b *BasicProvisioner) Provision() error {\n\tif !b.Enabled {\n\t\treturn nil\n\t}\n\n\tcl, err := client.NewHTTPClient(client.HTTPConfig{\n\t\tAddr: fmt.Sprintf(\"http://%v\", b.Address),\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif b.ResetDatabase {\n\t\tresetDB(cl, b.Database)\n\t}\n\n\treturn nil\n}\n\ntype BroadcastChannel struct {\n\tchs []chan response\n\twg  sync.WaitGroup\n\tfns []func(t *Timer)\n}\n\nfunc NewBroadcastChannel() *BroadcastChannel {\n\tchs := make([]chan response, 0)\n\treturn &BroadcastChannel{chs: chs}\n}\n\nfunc (b *BroadcastChannel) Register(fn responseHandler) {\n\tch := make(chan response, 0)\n\n\tb.chs = append(b.chs, ch)\n\n\tf := func(t *Timer) {\n\t\tgo fn(ch, t)\n\t}\n\n\tb.fns = append(b.fns, f)\n}\n\nfunc (b *BroadcastChannel) Broadcast(r response) {\n\n\tb.wg.Add(1)\n\tfor _, ch := range b.chs {\n\t\tb.wg.Add(1)\n\t\tgo func(ch chan response) {\n\t\t\tch <- r\n\t\t\tb.wg.Done()\n\t\t}(ch)\n\t}\n\tb.wg.Done()\n}\n\nfunc (b *BroadcastChannel) Close() {\n\tb.wg.Wait()\n\tfor _, ch := range b.chs {\n\t\tclose(ch)\n\t\t// Workaround\n\t\ttime.Sleep(1 * time.Second)\n\t}\n}\n\nfunc (b *BroadcastChannel) Handle(rs <-chan response, t *Timer) {\n\n\t// Start all of the handlers\n\tfor _, fn := range b.fns {\n\t\tfn(t)\n\t}\n\n\tfor i := range rs {\n\t\tb.Broadcast(i)\n\t}\n\tb.Close()\n}\n\n// BasicWriteHandler handles write responses.\nfunc (b *BasicClient) BasicWriteHandler(rs <-chan response, wt *Timer) {\n\tn := 0\n\tsuccess := 0\n\tfail := 0\n\n\ts := time.Duration(0)\n\n\tfor t := range rs {\n\n\t\tn++\n\n\t\tif t.Success() {\n\t\t\tsuccess++\n\t\t} else {\n\t\t\tfail++\n\t\t}\n\n\t\ts += t.Timer.Elapsed()\n\n\t}\n\n\tif n == 0 {\n\t\treturn\n\t}\n\n\tfmt.Printf(\"Total Requests: %v\\n\", n)\n\tfmt.Printf(\"\tSuccess: %v\\n\", success)\n\tfmt.Printf(\"\tFail: %v\\n\", fail)\n\tfmt.Printf(\"Average Response Time: %v\\n\", s/time.Duration(n))\n\tfmt.Printf(\"Points Per Second: %v\\n\\n\", int(float64(n)*float64(b.BatchSize)/float64(wt.Elapsed().Seconds())))\n}\n\n// BasicReadHandler handles read responses.\nfunc (b *BasicQueryClient) BasicReadHandler(r <-chan response, rt *Timer) {\n\tn := 0\n\ts := time.Duration(0)\n\tfor t := range r {\n\t\tn++\n\t\ts += t.Timer.Elapsed()\n\t}\n\n\tif n == 0 {\n\t\treturn\n\t}\n\n\tfmt.Printf(\"Total Queries: %v\\n\", n)\n\tfmt.Printf(\"Average Query Response Time: %v\\n\\n\", s/time.Duration(n))\n}\n\nfunc (o *outputConfig) HTTPHandler(method string) func(r <-chan response, rt *Timer) {\n\treturn func(r <-chan response, rt *Timer) {\n\t\tc, _ := client.NewHTTPClient(client.HTTPConfig{\n\t\t\tAddr: o.addr,\n\t\t})\n\t\tbp, _ := client.NewBatchPoints(client.BatchPointsConfig{\n\t\t\tDatabase:        o.database,\n\t\t\tRetentionPolicy: o.retentionPolicy,\n\t\t\tPrecision:       \"ns\",\n\t\t})\n\t\tfor p := range r {\n\t\t\ttags := make(map[string]string, len(o.tags))\n\t\t\tfor k, v := range o.tags {\n\t\t\t\ttags[k] = v\n\t\t\t}\n\t\t\ttags[\"method\"] = method\n\t\t\tfields := map[string]interface{}{\n\t\t\t\t\"response_time\": float64(p.Timer.Elapsed()),\n\t\t\t}\n\t\t\tpt, _ := client.NewPoint(\"performance\", tags, fields, p.Time)\n\t\t\tbp.AddPoint(pt)\n\t\t\tif len(bp.Points())%1000 == 0 && len(bp.Points()) != 0 {\n\t\t\t\tc.Write(bp)\n\t\t\t\tbp, _ = client.NewBatchPoints(client.BatchPointsConfig{\n\t\t\t\t\tDatabase:        o.database,\n\t\t\t\t\tRetentionPolicy: o.retentionPolicy,\n\t\t\t\t\tPrecision:       \"ns\",\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\tif len(bp.Points()) != 0 {\n\t\t\tc.Write(bp)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/config.go",
    "content": "package stress\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/BurntSushi/toml\"\n)\n\n// Config is a struct for the Stress test configuration\ntype Config struct {\n\tProvision Provision `toml:\"provision\"`\n\tWrite     Write     `toml:\"write\"`\n\tRead      Read      `toml:\"read\"`\n}\n\n// Provision is a struct that contains the configuration\n// parameters for all implemented Provisioner's.\ntype Provision struct {\n\tBasic BasicProvisioner `toml:\"basic\"`\n}\n\n// Write is a struct that contains the configuration\n// parameters for the stress test Writer.\ntype Write struct {\n\tPointGenerators PointGenerators `toml:\"point_generator\"`\n\tInfluxClients   InfluxClients   `toml:\"influx_client\"`\n}\n\n// PointGenerators is a struct that contains the configuration\n// parameters for all implemented PointGenerator's.\ntype PointGenerators struct {\n\tBasic *BasicPointGenerator `toml:\"basic\"`\n}\n\n// InfluxClients is a struct that contains the configuration\n// parameters for all implemented InfluxClient's.\ntype InfluxClients struct {\n\tBasic BasicClient `toml:\"basic\"`\n}\n\n// Read is a struct that contains the configuration\n// parameters for the stress test Reader.\ntype Read struct {\n\tQueryGenerators QueryGenerators `toml:\"query_generator\"`\n\tQueryClients    QueryClients    `toml:\"query_client\"`\n}\n\n// QueryGenerators is a struct that contains the configuration\n// parameters for all implemented QueryGenerator's.\ntype QueryGenerators struct {\n\tBasic BasicQuery `toml:\"basic\"`\n}\n\n// QueryClients is a struct that contains the configuration\n// parameters for all implemented QueryClient's.\ntype QueryClients struct {\n\tBasic BasicQueryClient `toml:\"basic\"`\n}\n\n// NewConfig returns a pointer to a Config\nfunc NewConfig(s string) (*Config, error) {\n\tvar c *Config\n\tvar err error\n\n\tif s == \"\" {\n\t\tc, err = BasicStress()\n\t} else {\n\t\tc, err = DecodeFile(s)\n\t}\n\n\treturn c, err\n}\n\n// DecodeFile takes a file path for a toml config file\n// and returns a pointer to a Config Struct.\nfunc DecodeFile(s string) (*Config, error) {\n\tt := &Config{}\n\n\t// Decode the toml file\n\tif _, err := toml.DecodeFile(s, t); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn t, nil\n}\n\n// DecodeConfig takes a file path for a toml config file\n// and returns a pointer to a Config Struct.\nfunc DecodeConfig(s string) (*Config, error) {\n\tt := &Config{}\n\n\t// Decode the toml file\n\tif _, err := toml.Decode(s, t); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn t, nil\n}\n\ntype outputConfig struct {\n\ttags            map[string]string\n\taddr            string\n\tdatabase        string\n\tretentionPolicy string\n}\n\nfunc (t *outputConfig) SetParams(addr, db, rp string) {\n\tt.addr = addr\n\tt.database = db\n\tt.retentionPolicy = rp\n}\n\nfunc NewOutputConfig() *outputConfig {\n\tvar o outputConfig\n\ttags := make(map[string]string)\n\to.tags = tags\n\tdatabase := flag.String(\"database\", \"stress\", \"name of database where the response times will persist\")\n\tretentionPolicy := flag.String(\"retention-policy\", \"\", \"name of the retention policy where the response times will persist\")\n\taddress := flag.String(\"addr\", \"http://localhost:8086\", \"IP address and port of database where response times will persist (e.g., localhost:8086)\")\n\tflag.Var(&o, \"tags\", \"A comma seperated list of tags\")\n\tflag.Parse()\n\n\to.SetParams(*address, *database, *retentionPolicy)\n\n\treturn &o\n\n}\n\nfunc (t *outputConfig) String() string {\n\tvar s string\n\tfor k, v := range t.tags {\n\t\ts += fmt.Sprintf(\"%v=%v \", k, v)\n\t}\n\treturn fmt.Sprintf(\"%v %v %v %v\", s, t.database, t.retentionPolicy, t.addr)\n}\n\nfunc (t *outputConfig) Set(value string) error {\n\tfor _, s := range strings.Split(value, \",\") {\n\t\ttags := strings.Split(s, \"=\")\n\t\tt.tags[tags[0]] = tags[1]\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/run.go",
    "content": "package stress // import \"github.com/influxdata/influxdb/stress\"\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"sync\"\n\t\"time\"\n)\n\n// Point is an interface that is used to represent\n// the abstract idea of a point in InfluxDB.\ntype Point interface {\n\tLine() []byte\n\tGraphite() []byte\n\tOpenJSON() []byte\n\tOpenTelnet() []byte\n}\n\n///////////////////////////////////////////////////\n// Example Implementation of the Point Interface //\n///////////////////////////////////////////////////\n\n// KeyValue is an intermediate type that is used\n// to express Tag and Field similarly.\ntype KeyValue struct {\n\tKey   string\n\tValue string\n}\n\n// Tag is a struct for a tag in influxdb.\ntype Tag KeyValue\n\n// Field is a struct for a field in influxdb.\ntype Field KeyValue\n\n// Tags is an slice of all the tags for a point.\ntype Tags []Tag\n\n// Fields is an slice of all the fields for a point.\ntype Fields []Field\n\n// tagset returns a byte array for a points tagset.\nfunc (t Tags) tagset() []byte {\n\tvar buf bytes.Buffer\n\tfor _, tag := range t {\n\t\tbuf.Write([]byte(fmt.Sprintf(\"%v=%v,\", tag.Key, tag.Value)))\n\t}\n\n\tb := buf.Bytes()\n\tb = b[0 : len(b)-1]\n\n\treturn b\n}\n\n// fieldset returns a byte array for a points fieldset.\nfunc (f Fields) fieldset() []byte {\n\tvar buf bytes.Buffer\n\tfor _, field := range f {\n\t\tbuf.Write([]byte(fmt.Sprintf(\"%v=%v,\", field.Key, field.Value)))\n\t}\n\n\tb := buf.Bytes()\n\tb = b[0 : len(b)-1]\n\n\treturn b\n}\n\n// StdPoint represents a point in InfluxDB\ntype StdPoint struct {\n\tMeasurement string\n\tTags        Tags\n\tFields      Fields\n\tTimestamp   int64\n}\n\n// Line returns a byte array for a point in\n// line-protocol format\nfunc (p StdPoint) Line() []byte {\n\tvar buf bytes.Buffer\n\n\tbuf.Write([]byte(fmt.Sprintf(\"%v,\", p.Measurement)))\n\tbuf.Write(p.Tags.tagset())\n\tbuf.Write([]byte(\" \"))\n\tbuf.Write(p.Fields.fieldset())\n\tbuf.Write([]byte(\" \"))\n\tbuf.Write([]byte(fmt.Sprintf(\"%v\", p.Timestamp)))\n\n\tbyt := buf.Bytes()\n\n\treturn byt\n}\n\n// Graphite returns a byte array for a point\n// in graphite-protocol format\nfunc (p StdPoint) Graphite() []byte {\n\t// TODO: implement\n\t// timestamp is at second level resolution\n\t// but can be specified as a float to get nanosecond\n\t// level precision\n\tt := \"tag_1.tag_2.measurement[.field] acutal_value timestamp\"\n\treturn []byte(t)\n}\n\n// OpenJSON returns a byte array for a point\n// in JSON format\nfunc (p StdPoint) OpenJSON() []byte {\n\t// TODO: implement\n\t//[\n\t//    {\n\t//        \"metric\": \"sys.cpu.nice\",\n\t//        \"timestamp\": 1346846400,\n\t//        \"value\": 18,\n\t//        \"tags\": {\n\t//           \"host\": \"web01\",\n\t//           \"dc\": \"lga\"\n\t//        }\n\t//    },\n\t//    {\n\t//        \"metric\": \"sys.cpu.nice\",\n\t//        \"timestamp\": 1346846400,\n\t//        \"value\": 9,\n\t//        \"tags\": {\n\t//           \"host\": \"web02\",\n\t//           \"dc\": \"lga\"\n\t//        }\n\t//    }\n\t//]\n\treturn []byte(\"hello\")\n}\n\n// OpenTelnet returns a byte array for a point\n// in OpenTSDB-telnet format\nfunc (p StdPoint) OpenTelnet() []byte {\n\t// TODO: implement\n\t// timestamp can be 13 digits at most\n\t// sys.cpu.nice timestamp value tag_key_1=tag_value_1 tag_key_2=tag_value_2\n\treturn []byte(\"hello\")\n}\n\n////////////////////////////////////////\n\n// response is the results making\n// a request to influxdb.\ntype response struct {\n\tResp  *http.Response\n\tTime  time.Time\n\tTimer *Timer\n}\n\n// Success returns true if the request\n// was successful and false otherwise.\nfunc (r response) Success() bool {\n\t// ADD success for tcp, udp, etc\n\treturn !(r.Resp == nil || r.Resp.StatusCode != 204)\n}\n\n// WriteResponse is a response for a Writer\ntype WriteResponse response\n\n// QueryResponse is a response for a Querier\ntype QueryResponse struct {\n\tresponse\n\tBody string\n}\n\n///////////////////////////////\n// Definition of the Writer ///\n///////////////////////////////\n\n// PointGenerator is an interface for generating points.\ntype PointGenerator interface {\n\tGenerate() (<-chan Point, error)\n\tTime() time.Time\n}\n\n// InfluxClient is an interface for writing data to the database.\ntype InfluxClient interface {\n\tBatch(ps <-chan Point, r chan<- response) error\n\tsend(b []byte) (response, error)\n\t//ResponseHandler\n}\n\n// Writer is a PointGenerator and an InfluxClient.\ntype Writer struct {\n\tPointGenerator\n\tInfluxClient\n}\n\n// NewWriter returns a Writer.\nfunc NewWriter(p PointGenerator, i InfluxClient) Writer {\n\tw := Writer{\n\t\tPointGenerator: p,\n\t\tInfluxClient:   i,\n\t}\n\n\treturn w\n}\n\n////////////////////////////////\n// Definition of the Querier ///\n////////////////////////////////\n\n// Query is query\ntype Query string\n\n// QueryGenerator is an interface that is used\n// to define queries that will be ran on the DB.\ntype QueryGenerator interface {\n\tQueryGenerate(f func() time.Time) (<-chan Query, error)\n\tSetTime(t time.Time)\n}\n\n// QueryClient is an interface that can write a query\n// to an InfluxDB instance.\ntype QueryClient interface {\n\tQuery(q Query) (response, error)\n\tExec(qs <-chan Query, r chan<- response) error\n}\n\n// Querier queries the database.\ntype Querier struct {\n\tQueryGenerator\n\tQueryClient\n}\n\n// NewQuerier returns a Querier.\nfunc NewQuerier(q QueryGenerator, c QueryClient) Querier {\n\tr := Querier{\n\t\tQueryGenerator: q,\n\t\tQueryClient:    c,\n\t}\n\n\treturn r\n}\n\n///////////////////////////////////\n// Definition of the Provisioner //\n///////////////////////////////////\n\n// Provisioner is an interface that provisions an\n// InfluxDB instance\ntype Provisioner interface {\n\tProvision() error\n}\n\n/////////////////////////////////\n// Definition of StressTest /////\n/////////////////////////////////\n\n// StressTest is a struct that contains all of\n// the logic required to execute a Stress Test\ntype StressTest struct {\n\tProvisioner\n\tWriter\n\tQuerier\n}\n\n// responseHandler\ntype responseHandler func(r <-chan response, t *Timer)\n\n// Start executes the Stress Test\nfunc (s *StressTest) Start(wHandle responseHandler, rHandle responseHandler) {\n\tvar wg sync.WaitGroup\n\n\t// Provision the Instance\n\ts.Provision()\n\n\twg.Add(1)\n\t// Starts Writing\n\tgo func() {\n\t\tr := make(chan response, 0)\n\t\twt := NewTimer()\n\n\t\tgo func() {\n\t\t\tdefer wt.StopTimer()\n\t\t\tdefer close(r)\n\t\t\tp, err := s.Generate()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\terr = s.Batch(p, r)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}()\n\n\t\t// Write Results Handler\n\t\twHandle(r, wt)\n\t\twg.Done()\n\t}()\n\n\twg.Add(1)\n\t// Starts Querying\n\tgo func() {\n\t\tr := make(chan response, 0)\n\t\trt := NewTimer()\n\n\t\tgo func() {\n\t\t\tdefer rt.StopTimer()\n\t\t\tdefer close(r)\n\t\t\tq, err := s.QueryGenerate(s.Time)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\terr = s.Exec(q, r)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}()\n\n\t\t// Read Results Handler\n\t\trHandle(r, rt)\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n}\n\n// NewStressTest returns an instance of a StressTest\nfunc NewStressTest(p Provisioner, w Writer, r Querier) StressTest {\n\ts := StressTest{\n\t\tProvisioner: p,\n\t\tWriter:      w,\n\t\tQuerier:     r,\n\t}\n\n\treturn s\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/stress.toml",
    "content": "[provision]\n  [provision.basic]\n    enabled = true\n    address = \"localhost:8086\"\n    database = \"stress\"\n    reset_database = true\n\n[write]\n  [write.point_generator]\n    [write.point_generator.basic]\n      enabled = true\n      point_count = 100\n      series_count = 100000\n      tick = \"10s\"\n      jitter = true\n      measurement = \"cpu\"\n      start_date = \"2006-Jan-02\"\n      [[write.point_generator.basic.tag]]\n        key = \"host\"\n        value = \"server\"\n      [[write.point_generator.basic.tag]]\n        key = \"location\"\n        value = \"us-west\"\n      [[write.point_generator.basic.field]]\n        key = \"value\"\n        value = \"float64\"\n\n\n  [write.influx_client]\n    [write.influx_client.basic]\n      enabled = true\n      addresses = [\"localhost:8086\",\"localhost:1234\",\"localhost:5678\"] # stress_test_server runs on port 1234\n      database = \"stress\"\n      precision = \"n\"\n      batch_size = 10000\n      batch_interval = \"0s\"\n      concurrency = 10\n      ssl = false\n      format = \"line_http\" # line_udp, graphite_tcp, graphite_udp\n\n[read]\n  [read.query_generator]\n    [read.query_generator.basic]\n      template = \"SELECT count(value) FROM cpu where host='server-%v'\"\n      query_count = 250\n\n  [read.query_client]\n    [read.query_client.basic]\n      enabled = true\n      addresses = [\"localhost:8086\"]\n      database = \"stress\"\n      query_interval = \"100ms\"\n      concurrency = 1\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/stress_test.go",
    "content": "package stress\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/client/v2\"\n\t\"github.com/influxdata/influxdb/models\"\n)\n\nfunc TestTimer_StartTimer(t *testing.T) {\n\tvar epoch time.Time\n\ttmr := &Timer{}\n\ttmr.StartTimer()\n\ts := tmr.Start()\n\tif s == epoch {\n\t\tt.Errorf(\"expected tmr.start to not be %v\", s)\n\t}\n}\n\nfunc TestNewTimer(t *testing.T) {\n\tvar epoch time.Time\n\ttmr := NewTimer()\n\ts := tmr.Start()\n\tif s == epoch {\n\t\tt.Errorf(\"expected tmr.start to not be %v\", s)\n\t}\n\te := tmr.End()\n\tif e != epoch {\n\t\tt.Errorf(\"expected tmr.stop to be %v, got %v\", epoch, e)\n\t}\n}\n\nfunc TestTimer_StopTimer(t *testing.T) {\n\tvar epoch time.Time\n\ttmr := NewTimer()\n\ttmr.StopTimer()\n\te := tmr.End()\n\tif e == epoch {\n\t\tt.Errorf(\"expected tmr.stop to not be %v\", e)\n\t}\n}\n\nfunc TestTimer_Elapsed(t *testing.T) {\n\ttmr := NewTimer()\n\ttime.Sleep(2 * time.Second)\n\ttmr.StopTimer()\n\te := tmr.Elapsed()\n\n\tif time.Duration(1990*time.Millisecond) > e || e > time.Duration(3*time.Second) {\n\t\tt.Errorf(\"expected around %s got %s\", time.Duration(2*time.Second), e)\n\t}\n}\n\n/// basic.go\n\n// Types are off\nfunc Test_typeArr(t *testing.T) {\n\tvar re *regexp.Regexp\n\tvar b bool\n\tarr := []string{\n\t\t\"float64\",\n\t\t\"int\",\n\t\t\"bool\",\n\t}\n\n\tts := typeArr(arr)\n\n\tre = regexp.MustCompile(`[1-9]\\d*`)\n\tb = re.MatchString(ts[0].(string))\n\tif !b {\n\t\tt.Errorf(\"Expected line protocol float64 got %v\", ts[0])\n\t}\n\n\tre = regexp.MustCompile(`[1-9]\\d*i`)\n\tb = re.MatchString(ts[1].(string))\n\tif !b {\n\t\tt.Errorf(\"Expected line protocol int got %v\", ts[1])\n\t}\n\n\tre = regexp.MustCompile(`true|false`)\n\tb = re.MatchString(ts[2].(string))\n\tif !b {\n\t\tt.Errorf(\"Expected line protocol bool got %v\", ts[2])\n\t}\n\n}\n\nfunc Test_typeArrBadTypes(t *testing.T) {\n\tarr := []string{\n\t\t\"default\",\n\t\t\"rand\",\n\t\t\"\",\n\t}\n\n\tts := typeArr(arr)\n\n\tfor _, x := range ts {\n\t\tre := regexp.MustCompile(`[1-9]\\d*`)\n\t\tb := re.MatchString(x.(string))\n\t\tif !b {\n\t\t\tt.Errorf(\"Expected line protocol float64 got %v\", x)\n\t\t}\n\t}\n}\n\nfunc TestPnt_Line(t *testing.T) {\n\tp := &Pnt{}\n\tb := []byte(\"a,b=1,c=1 v=1\")\n\n\tp.Set(b)\n\n\tif string(p.Line()) != string(b) {\n\t\tt.Errorf(\"Expected `%v` to `%v`\", string(b), string(p.Line()))\n\t}\n}\n\nfunc TestAbstractTags_Template(t *testing.T) {\n\ttags := AbstractTags{\n\t\tAbstractTag{\n\t\t\tKey:   \"host\",\n\t\t\tValue: \"server\",\n\t\t},\n\t\tAbstractTag{\n\t\t\tKey:   \"location\",\n\t\t\tValue: \"us-west\",\n\t\t},\n\t}\n\n\ts := tags.Template()\n\ttm := \"host=server-%v,location=us-west\"\n\n\tif s != tm {\n\t\tt.Errorf(\"Expected %v got %v\", tm, s)\n\t}\n}\n\nfunc TestAbstractFields_TemplateOneField(t *testing.T) {\n\tfields := AbstractFields{\n\t\tAbstractField{\n\t\t\tKey:  \"fValue\",\n\t\t\tType: \"float64\",\n\t\t},\n\t}\n\n\ttm, _ := fields.Template()\n\n\ts := \"fValue=%v\"\n\tif s != tm {\n\t\tt.Errorf(\"Expected `%v` got `%v`\", s, tm)\n\t}\n\n}\n\nfunc TestAbstractFields_TemplateManyFields(t *testing.T) {\n\tfields := AbstractFields{\n\t\tAbstractField{\n\t\t\tKey:  \"fValue\",\n\t\t\tType: \"float64\",\n\t\t},\n\t\tAbstractField{\n\t\t\tKey:  \"iValue\",\n\t\t\tType: \"int\",\n\t\t},\n\t\tAbstractField{\n\t\t\tKey:  \"bValue\",\n\t\t\tType: \"bool\",\n\t\t},\n\t\tAbstractField{\n\t\t\tKey:  \"rValue\",\n\t\t\tType: \"rnd\",\n\t\t},\n\t}\n\n\ttm, ty := fields.Template()\n\n\ts := \"fValue=%v,iValue=%v,bValue=%v,rValue=%v\"\n\tif s != tm {\n\t\tt.Errorf(\"Expected `%v` got `%v`\", s, tm)\n\t}\n\n\tfor i, f := range fields {\n\t\tif f.Type != ty[i] {\n\t\t\tt.Errorf(\"Expected %v got %v\", f.Type, ty[i])\n\t\t}\n\t}\n\n}\n\nvar basicPG = &BasicPointGenerator{\n\tPointCount:  100,\n\tTick:        \"10s\",\n\tMeasurement: \"cpu\",\n\tSeriesCount: 100,\n\tTags: AbstractTags{\n\t\tAbstractTag{\n\t\t\tKey:   \"host\",\n\t\t\tValue: \"server\",\n\t\t},\n\t\tAbstractTag{\n\t\t\tKey:   \"location\",\n\t\t\tValue: \"us-west\",\n\t\t},\n\t},\n\tFields: AbstractFields{\n\t\tAbstractField{\n\t\t\tKey:  \"value\",\n\t\t\tType: \"float64\",\n\t\t},\n\t},\n\tStartDate: \"2006-Jan-01\",\n}\n\nfunc TestBasicPointGenerator_Template(t *testing.T) {\n\tfn := basicPG.Template()\n\tnow := time.Now()\n\tm := \"cpu,host=server-1,location=us-west\"\n\tts := fmt.Sprintf(\"%v\", now.UnixNano())\n\n\ttm := strings.Split(string(fn(1, now).Line()), \" \")\n\n\tif m != tm[0] {\n\t\tt.Errorf(\"Expected %s got %s\", m, tm[0])\n\t}\n\n\tif !strings.HasPrefix(tm[1], \"value=\") {\n\t\tt.Errorf(\"Expected %v to start with `value=`\", tm[1])\n\t}\n\n\tif ts != string(tm[2]) {\n\t\tt.Errorf(\"Expected %s got %s\", ts, tm[2])\n\t}\n}\n\nfunc TestBasicPointGenerator_Generate(t *testing.T) {\n\tps, err := basicPG.Generate()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tvar buf bytes.Buffer\n\n\tfor p := range ps {\n\t\tb := p.Line()\n\n\t\tbuf.Write(b)\n\t\tbuf.Write([]byte(\"\\n\"))\n\t}\n\n\tbs := buf.Bytes()\n\tbs = bs[0 : len(bs)-1]\n\n\t_, err = models.ParsePoints(bs)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc Test_post(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tcontent, _ := ioutil.ReadAll(r.Body)\n\t\tlines := strings.Split(string(content), \"\\n\")\n\t\tif len(lines) != 3 {\n\t\t\tt.Errorf(\"Expected 3 lines got %v\", len(lines))\n\t\t}\n\t\tw.WriteHeader(http.StatusOK)\n\t}))\n\tdefer ts.Close()\n\n\tb := []byte(\n\t\t`cpu,host=server-1,location=us-west value=100 12932\n\t\tcpu,host=server-2,location=us-west value=10 12932\n\t\tcpu,host=server-3,location=us-west value=120 12932`,\n\t)\n\n\t_, err := post(ts.URL, \"application/x-www-form-urlencoded\", bytes.NewBuffer(b))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nvar basicIC = &BasicClient{\n\tAddresses:     []string{\"localhost:8086\"},\n\tDatabase:      \"stress\",\n\tPrecision:     \"n\",\n\tBatchSize:     1000,\n\tBatchInterval: \"0s\",\n\tConcurrency:   10,\n\tFormat:        \"line_http\",\n}\n\nfunc TestBasicClient_send(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tcontent, _ := ioutil.ReadAll(r.Body)\n\t\tlines := strings.Split(string(content), \"\\n\")\n\t\tif len(lines) != 3 {\n\t\t\tt.Errorf(\"Expected 3 lines got %v\", len(lines))\n\t\t}\n\t\tw.WriteHeader(http.StatusOK)\n\t}))\n\tdefer ts.Close()\n\n\tbasicIC.Addresses[0] = ts.URL\n\tb := []byte(\n\t\t`cpu,host=server-1,location=us-west value=100 12932\n\t\tcpu,host=server-2,location=us-west value=10 12932\n\t\tcpu,host=server-3,location=us-west value=120 12932`,\n\t)\n\t_, err := basicIC.send(b)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n}\n\nfunc TestBasicClient_Batch(t *testing.T) {\n\tc := make(chan Point, 0)\n\tr := make(chan response, 0)\n\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tcontent, _ := ioutil.ReadAll(r.Body)\n\t\tlines := strings.Split(string(content), \"\\n\")\n\t\tif len(lines) != 1000 {\n\t\t\tt.Errorf(\"Expected 1000 lines got %v\", len(lines))\n\t\t}\n\t\tw.WriteHeader(http.StatusOK)\n\t}))\n\tdefer ts.Close()\n\n\tbasicIC.Addresses[0] = ts.URL[7:]\n\n\tgo func(c chan Point) {\n\t\tdefer close(c)\n\n\t\tfor i := 0; i < 1000; i++ {\n\t\t\tp := &Pnt{}\n\t\t\tp.Next(i, time.Now())\n\t\t\tc <- *p\n\t\t}\n\n\t}(c)\n\n\tgo func(r chan response) {\n\t\tfor _ = range r {\n\t\t}\n\t}(r)\n\n\terr := basicIC.Batch(c, r)\n\tclose(r)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n}\n\nvar basicQ = &BasicQuery{\n\tTemplate:   Query(\"SELECT count(value) from cpu WHERE host='server-%v'\"),\n\tQueryCount: 100,\n}\n\nfunc TestBasicQuery_QueryGenerate(t *testing.T) {\n\tqs, _ := basicQ.QueryGenerate(time.Now)\n\n\ti := 0\n\tfor q := range qs {\n\t\ttm := fmt.Sprintf(string(basicQ.Template), i)\n\t\tif Query(tm) != q {\n\t\t\tt.Errorf(\"Expected %v to be %v\", q, tm)\n\t\t}\n\t\ti++\n\t}\n}\n\nvar basicQC = &BasicQueryClient{\n\tAddresses:     []string{\"localhost:8086\"},\n\tDatabase:      \"stress\",\n\tQueryInterval: \"10s\",\n\tConcurrency:   1,\n}\n\nfunc TestBasicQueryClient_Query(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttime.Sleep(50 * time.Millisecond)\n\t\tw.Header().Set(\"X-Influxdb-Version\", \"x.x\")\n\t\tvar data client.Response\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_ = json.NewEncoder(w).Encode(data)\n\n\t\treturn\n\t}))\n\tdefer ts.Close()\n\n\tbasicQC.Addresses[0] = ts.URL[7:]\n\tbasicQC.Init()\n\n\tq := \"SELECT count(value) FROM cpu\"\n\tr, err := basicQC.Query(Query(q))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tvar epoch time.Time\n\n\tif r.Time == epoch {\n\t\tt.Errorf(\"Expected %v to not be epoch\", r.Time)\n\t}\n\n\telapsed := r.Timer.Elapsed()\n\tif elapsed.Nanoseconds() == 0 {\n\t\tt.Errorf(\"Expected %v to not be 0\", elapsed.Nanoseconds())\n\t}\n\n}\n\n/// config.go\nfunc Test_NewConfigWithFile(t *testing.T) {\n\tc, err := NewConfig(\"stress.toml\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tp := c.Provision\n\tw := c.Write\n\tr := c.Read\n\n\tif p.Basic.Address != \"localhost:8086\" {\n\t\tt.Errorf(\"Expected `localhost:8086` got %s\", p.Basic.Address)\n\t}\n\tif p.Basic.Database != \"stress\" {\n\t\tt.Errorf(\"Expected `stress` got %s\", p.Basic.Database)\n\t}\n\tif p.Basic.ResetDatabase != true {\n\t\tt.Errorf(\"Expected true got %v\", p.Basic.ResetDatabase)\n\t}\n\n\tpg := w.PointGenerators.Basic\n\tif pg.PointCount != 100 {\n\t\tt.Errorf(\"Expected 100 got %v\", pg.PointCount)\n\t}\n\tif pg.SeriesCount != 100000 {\n\t\tt.Errorf(\"Expected 100000 got %v\", pg.SeriesCount)\n\t}\n\tif pg.Tick != \"10s\" {\n\t\tt.Errorf(\"Expected 10s got %s\", pg.Tick)\n\t}\n\tif pg.Measurement != \"cpu\" {\n\t\tt.Errorf(\"Expected cpu got %s\", pg.Measurement)\n\t}\n\tif pg.StartDate != \"2006-Jan-02\" {\n\t\tt.Errorf(\"Expected `2006-Jan-02` got `%s`\", pg.StartDate)\n\t}\n\t// TODO: Check tags\n\t// TODO: Check fields\n\n\twc := w.InfluxClients.Basic\n\tif wc.Addresses[0] != \"localhost:8086\" {\n\t\tt.Errorf(\"Expected `localhost:8086` got %s\", wc.Addresses[0])\n\t}\n\tif wc.Database != \"stress\" {\n\t\tt.Errorf(\"Expected stress got %s\", wc.Database)\n\t}\n\tif wc.Precision != \"n\" {\n\t\tt.Errorf(\"Expected n got %s\", wc.Precision)\n\t}\n\tif wc.BatchSize != 10000 {\n\t\tt.Errorf(\"Expected 10000 got %v\", wc.BatchSize)\n\t}\n\tif wc.BatchInterval != \"0s\" {\n\t\tt.Errorf(\"Expected 0s got %v\", wc.BatchInterval)\n\t}\n\tif wc.Concurrency != 10 {\n\t\tt.Errorf(\"Expected 10 got %v\", wc.Concurrency)\n\t}\n\tif wc.SSL != false {\n\t\tt.Errorf(\"Expected 10 got %v\", wc.SSL)\n\t}\n\tif wc.Format != \"line_http\" {\n\t\tt.Errorf(\"Expected `line_http` got %s\", wc.Format)\n\t}\n\n\tqg := r.QueryGenerators.Basic\n\tif qg.Template != \"SELECT count(value) FROM cpu where host='server-%v'\" {\n\t\tt.Errorf(\"Expected `SELECT count(value) FROM cpu where host='server-%%v'` got %s\", qg.Template)\n\t}\n\tif qg.QueryCount != 250 {\n\t\tt.Errorf(\"Expected 250 got %v\", qg.QueryCount)\n\t}\n\n\tqc := r.QueryClients.Basic\n\tif qc.Addresses[0] != \"localhost:8086\" {\n\t\tt.Errorf(\"Expected `localhost:8086` got %s\", qc.Addresses[0])\n\t}\n\tif qc.Database != \"stress\" {\n\t\tt.Errorf(\"Expected stress got %s\", qc.Database)\n\t}\n\tif qc.QueryInterval != \"100ms\" {\n\t\tt.Errorf(\"Expected 100ms got %s\", qc.QueryInterval)\n\t}\n\tif qc.Concurrency != 1 {\n\t\tt.Errorf(\"Expected 1 got %v\", qc.Concurrency)\n\t}\n}\n\nfunc Test_NewConfigWithoutFile(t *testing.T) {\n\tc, err := NewConfig(\"\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tp := c.Provision\n\tw := c.Write\n\tr := c.Read\n\n\tif p.Basic.Address != \"localhost:8086\" {\n\t\tt.Errorf(\"Expected `localhost:8086` got %s\", p.Basic.Address)\n\t}\n\tif p.Basic.Database != \"stress\" {\n\t\tt.Errorf(\"Expected `stress` got %s\", p.Basic.Database)\n\t}\n\tif p.Basic.ResetDatabase != true {\n\t\tt.Errorf(\"Expected true got %v\", p.Basic.ResetDatabase)\n\t}\n\n\tpg := w.PointGenerators.Basic\n\tif pg.PointCount != 100 {\n\t\tt.Errorf(\"Expected 100 got %v\", pg.PointCount)\n\t}\n\tif pg.SeriesCount != 100000 {\n\t\tt.Errorf(\"Expected 100000 got %v\", pg.SeriesCount)\n\t}\n\tif pg.Tick != \"10s\" {\n\t\tt.Errorf(\"Expected 10s got %s\", pg.Tick)\n\t}\n\tif pg.Measurement != \"cpu\" {\n\t\tt.Errorf(\"Expected cpu got %s\", pg.Measurement)\n\t}\n\tif pg.StartDate != \"2006-Jan-02\" {\n\t\tt.Errorf(\"Expected `2006-Jan-02` got `%s`\", pg.StartDate)\n\t}\n\t// TODO: Check tags\n\t// TODO: Check fields\n\n\twc := w.InfluxClients.Basic\n\tif wc.Addresses[0] != \"localhost:8086\" {\n\t\tt.Errorf(\"Expected `localhost:8086` got %s\", wc.Addresses[0])\n\t}\n\tif wc.Database != \"stress\" {\n\t\tt.Errorf(\"Expected stress got %s\", wc.Database)\n\t}\n\tif wc.Precision != \"n\" {\n\t\tt.Errorf(\"Expected n got %s\", wc.Precision)\n\t}\n\tif wc.BatchSize != 5000 {\n\t\tt.Errorf(\"Expected 5000 got %v\", wc.BatchSize)\n\t}\n\tif wc.BatchInterval != \"0s\" {\n\t\tt.Errorf(\"Expected 0s got %v\", wc.BatchInterval)\n\t}\n\tif wc.Concurrency != 10 {\n\t\tt.Errorf(\"Expected 10 got %v\", wc.Concurrency)\n\t}\n\tif wc.SSL != false {\n\t\tt.Errorf(\"Expected 10 got %v\", wc.SSL)\n\t}\n\tif wc.Format != \"line_http\" {\n\t\tt.Errorf(\"Expected `line_http` got %s\", wc.Format)\n\t}\n\n\tqg := r.QueryGenerators.Basic\n\tif qg.Template != \"SELECT count(value) FROM cpu where host='server-%v'\" {\n\t\tt.Errorf(\"Expected `SELECT count(value) FROM cpu where host='server-%%v'` got %s\", qg.Template)\n\t}\n\tif qg.QueryCount != 250 {\n\t\tt.Errorf(\"Expected 250 got %v\", qg.QueryCount)\n\t}\n\n\tqc := r.QueryClients.Basic\n\tif qc.Addresses[0] != \"localhost:8086\" {\n\t\tt.Errorf(\"Expected `localhost:8086` got %s\", qc.Addresses[0])\n\t}\n\tif qc.Database != \"stress\" {\n\t\tt.Errorf(\"Expected stress got %s\", qc.Database)\n\t}\n\tif qc.QueryInterval != \"100ms\" {\n\t\tt.Errorf(\"Expected 100ms got %s\", qc.QueryInterval)\n\t}\n\tif qc.Concurrency != 1 {\n\t\tt.Errorf(\"Expected 1 got %v\", qc.Concurrency)\n\t}\n}\n\n/// run.go\n// TODO\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/stress_test_server/server.go",
    "content": "package main\n\nimport (\n\t\"expvar\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/paulbellamy/ratecounter\"\n)\n\nvar (\n\tcounter       *ratecounter.RateCounter\n\thitspersecond = expvar.NewInt(\"hits_per_second\")\n\tmu            sync.Mutex\n\tm             sync.Mutex\n)\n\n// Query handles /query endpoint\nfunc Query(w http.ResponseWriter, req *http.Request) {\n\tio.WriteString(w, \"du\")\n}\n\n// Count handles /count endpoint\nfunc Count(w http.ResponseWriter, req *http.Request) {\n\tio.WriteString(w, fmt.Sprintf(\"%v\", linecount))\n}\n\nvar n int\nvar linecount int\n\n// Write handles /write endpoints\nfunc Write(w http.ResponseWriter, req *http.Request) {\n\tmu.Lock()\n\tn++\n\tmu.Unlock()\n\n\tcounter.Incr(1)\n\thitspersecond.Set(counter.Rate())\n\tw.WriteHeader(http.StatusNoContent)\n\tfmt.Printf(\"Reqests Per Second: %v\\n\", hitspersecond)\n\tfmt.Printf(\"Count: %v\\n\", n)\n\n\tcontent, _ := ioutil.ReadAll(req.Body)\n\tm.Lock()\n\tarr := strings.Split(string(content), \"\\n\")\n\tlinecount += len(arr)\n\tm.Unlock()\n\n\tfmt.Printf(\"Line Count: %v\\n\\n\", linecount)\n}\n\nfunc init() {\n\tn = 0\n\tlinecount = 0\n\tcounter = ratecounter.NewRateCounter(1 * time.Second)\n}\n\nfunc main() {\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/query\", Query)\n\tmux.HandleFunc(\"/write\", Write)\n\tmux.HandleFunc(\"/count\", Count)\n\n\terr := http.ListenAndServe(\":1234\", mux)\n\tif err != nil {\n\t\tfmt.Println(\"Fatal\")\n\t}\n\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/template.go",
    "content": "package stress\n\nvar s = `\n[provision]\n  [provision.basic]\n    enabled = true\n    address = \"localhost:8086\"\n    database = \"stress\"\n    reset_database = true\n\n[write]\n  [write.point_generator]\n    [write.point_generator.basic]\n      enabled = true\n      point_count = 100\n      series_count = 100000\n      tick = \"10s\"\n      jitter = true\n      measurement = \"cpu\"\n      start_date = \"2006-Jan-02\"\n      precision = \"n\"\n      [[write.point_generator.basic.tag]]\n        key = \"host\"\n        value = \"server\"\n      [[write.point_generator.basic.tag]]\n        key = \"location\"\n        value = \"us-west\"\n      [[write.point_generator.basic.field]]\n        key = \"value\"\n        value = \"float64\"\n\n\n  [write.influx_client]\n    [write.influx_client.basic]\n      enabled = true\n      addresses = [\"localhost:8086\"]\n      database = \"stress\"\n      precision = \"n\"\n      batch_size = 5000\n      batch_interval = \"0s\"\n      concurrency = 10\n      ssl = false\n      format = \"line_http\" # line_udp, graphite_tcp, graphite_udp\n\n[read]\n  [read.query_generator]\n    [read.query_generator.basic]\n      template = \"SELECT count(value) FROM cpu where host='server-%v'\"\n      query_count = 250\n\n  [read.query_client]\n    [read.query_client.basic]\n      enabled = true\n      addresses = [\"localhost:8086\"]\n      database = \"stress\"\n      query_interval = \"100ms\"\n      concurrency = 1\n`\n\n// BasicStress returns a config for a basic\n// stress test.\nfunc BasicStress() (*Config, error) {\n\treturn DecodeConfig(s)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/util.go",
    "content": "package stress\n\nimport (\n\t\"time\"\n)\n\n// Timer is struct that can be used to track elaspsed time\ntype Timer struct {\n\tstart time.Time\n\tend   time.Time\n}\n\n// Start returns a Timers start field\nfunc (t *Timer) Start() time.Time {\n\treturn t.start\n}\n\n// End returns a Timers end field\nfunc (t *Timer) End() time.Time {\n\treturn t.end\n}\n\n// StartTimer sets a timers `start` field to the current time\nfunc (t *Timer) StartTimer() {\n\tt.start = time.Now()\n}\n\n// StopTimer sets a timers `end` field to the current time\nfunc (t *Timer) StopTimer() {\n\tt.end = time.Now()\n}\n\n// Elapsed returns the total elapsed time between the `start`\n// and `end` fields on a timer.\nfunc (t *Timer) Elapsed() time.Duration {\n\treturn t.end.Sub(t.start)\n}\n\n// NewTimer returns a pointer to a `Timer` struct where the\n// timers `start` field has been set to `time.Now()`\nfunc NewTimer() *Timer {\n\tt := &Timer{}\n\tt.StartTimer()\n\treturn t\n}\n\n// ResponseTime is a struct that contains `Value`\n// `Time` pairing.\ntype ResponseTime struct {\n\tValue int\n\tTime  time.Time\n}\n\n// NewResponseTime returns a new response time\n// with value `v` and time `time.Now()`.\nfunc NewResponseTime(v int) ResponseTime {\n\tr := ResponseTime{Value: v, Time: time.Now()}\n\treturn r\n}\n\n// ResponseTimes is a slice of response times\ntype ResponseTimes []ResponseTime\n\n// Implements the `Len` method for the\n// sort.Interface type\nfunc (rs ResponseTimes) Len() int {\n\treturn len(rs)\n}\n\n// Implements the `Less` method for the\n// sort.Interface type\nfunc (rs ResponseTimes) Less(i, j int) bool {\n\treturn rs[i].Value < rs[j].Value\n}\n\n// Implements the `Swap` method for the\n// sort.Interface type\nfunc (rs ResponseTimes) Swap(i, j int) {\n\trs[i], rs[j] = rs[j], rs[i]\n}\n\n//////////////////////////////////\n\n// ConcurrencyLimiter is a go routine safe struct that can be used to\n// ensure that no more than a specifid max number of goroutines are\n// executing.\ntype ConcurrencyLimiter struct {\n\tinc   chan chan struct{}\n\tdec   chan struct{}\n\tmax   int\n\tcount int\n}\n\n// NewConcurrencyLimiter returns a configured limiter that will\n// ensure that calls to Increment will block if the max is hit.\nfunc NewConcurrencyLimiter(max int) *ConcurrencyLimiter {\n\tc := &ConcurrencyLimiter{\n\t\tinc: make(chan chan struct{}),\n\t\tdec: make(chan struct{}, max),\n\t\tmax: max,\n\t}\n\tgo c.handleLimits()\n\treturn c\n}\n\n// Increment will increase the count of running goroutines by 1.\n// if the number is currently at the max, the call to Increment\n// will block until another goroutine decrements.\nfunc (c *ConcurrencyLimiter) Increment() {\n\tr := make(chan struct{})\n\tc.inc <- r\n\t<-r\n}\n\n// Decrement will reduce the count of running goroutines by 1\nfunc (c *ConcurrencyLimiter) Decrement() {\n\tc.dec <- struct{}{}\n}\n\n// handleLimits runs in a goroutine to manage the count of\n// running goroutines.\nfunc (c *ConcurrencyLimiter) handleLimits() {\n\tfor {\n\t\tr := <-c.inc\n\t\tif c.count >= c.max {\n\t\t\t<-c.dec\n\t\t\tc.count--\n\t\t}\n\t\tc.count++\n\t\tr <- struct{}{}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/DESIGN.md",
    "content": "# Influx Stress tool -> `v2`\n\nThe design of the new stress tool was designed to:\n* have higher potential write throughput than previous version\n* have more schema expressibility for testing different load profiles and professional services\n* have more granular reporting to be better able to draw conclusions from tests \n\nIn service of these requirements we designed a language that looks a lot like `influxql` to give the new test commands.  Instead of a configuration file, the new stress test takes a list of these `Statements`.  \n\nThe tool has the following components:\n* Parser - parses the configuration file and turns it into an `[]Statement`. All code related to the parser is in `v2/stressql/`. The parser was designed as per @benbjohnson's great article on [parsers in go](https://blog.gopheracademy.com/advent-2014/parsers-lexers/).\n* Statements - perform operations on target instance or change test environment. All code related to statements is in `v2/statement/`. The following are the available statements:\n  - `EXEC` - Still a TODO, planned to run outside scripts from the config file.\n  - `GO` - Prepend to an `INSERT` or `QUERY` statement to run concurrently.\n  - `INFLUXQL` - All valid `influxql` will be passed directly to the targeted instance. Useful for setting up complex downsampling environments or just your testing environment.\n  - `INSERT` - Generates points following a template\n  - `QUERY` - Runs a given query or generates sample queries given a companion `INSERT` statement\n  - `SET` - Changes the test parameters. Defaults are listed in the `README.md`\n  - `WAIT` - Required after a `GO` statement. Blocks till all proceeding statements finish.\n* Clients - The statement, results and InfluxDB clients. This code lives in `v2/stress_client`\n  - `StressTest` - The `Statement` client. Also contains the results client.\n  - `stressClient` - A performant InfluxDB client. Makes `GET /query` and `POST /write` requests. Forwards the results to the results client.\n  \n![Influx Stress Design](./influx_stress_v2.png)\n\n### Statements\n\n`Statement` is an interface defined in `v2/statement/statement.go`:\n```go\ntype Statement interface {\n\tRun(s *stressClient.StressTest)\n\tReport(s *stressClient.StressTest) string\n\tSetID(s string)\n}\n```\n* `Run` prompts the statement to carry out it's instructions. See the run functions of the various statements listed above for more information. \n* `Report` retrieves and collates all recorded test data from the reporting InfluxDB instance.\n* `SetID` gives the statement an ID. Used in the parser. Each `statementID` is an 8 character random string used for reporting.\n\n### `Statement` -> `StressTest`\n\n`Statement`s send `Package`s (queries or writes to the target database) or `Directives` (for changing test state) through the `StressTest` to the `stressClient` where they are processed.\n```go\n// v2/stress_client/package.go\n\n// T is Query or Write\n// StatementID is for reporting\ntype Package struct {\n\tT           Type\n\tBody        []byte\n\tStatementID string\n\tTracer      *Tracer\n}\n\n// v2/stress_client/directive.go\n\n// Property is test state variable to change\n// Value is the new value\ntype Directive struct {\n\tProperty string\n\tValue    string\n\tTracer   *Tracer\n}\n```\n\nThe `Tracer` on both of these packages contains a `sync.WaitGroup` that prevents `Statement`s from returning before all their operations are finished. This `WaitGroup` is incremented in the `Run()` of the statement and decremented in `*StressTest.resultsListen()` after results are recorded in the database. This is well documented with inline comments. `Tracer`s also carry optional tags for reporting purposes.\n\n```go\n//  v2/stress_client/tracer.go\ntype Tracer struct {\n\tTags map[string]string\n\n\tsync.WaitGroup\n}\n```\n\n### `StressTest`\n\nThe `StressTest` is the client for the statements through the `*StressTest.SendPackage()` and `*StressTest.SendDirective()` functions. It also contains some test state and the `ResultsClient`. \n\n```go\ntype StressTest struct {\n\tTestID   string\n\tTestName string\n\n\tPrecision string\n\tStartDate string\n\tBatchSize int\n\n\tsync.WaitGroup\n\tsync.Mutex\n\n\tpackageChan   chan<- Package\n\tdirectiveChan chan<- Directive\n\n\tResultsChan   chan Response\n\tcommunes      map[string]*commune\n\tResultsClient influx.Client\n}\n```\n\n### Reporting Client\n\nThe `ResultsClient` turns raw responses from InfluxDB into properly tagged points containing any relevant information for storage in another InfluxDB instance. The code for creating those points lives in `v2/stress_client/reporting.go`\n\n### InfluxDB Instance (reporting)\n\nThis is `localhost:8086` by default. The results are currently stored in the `_stressTest` database.\n\n### `stressClient`\n\nAn InfluxDB client designed for speed. `stressClient` also holds most test state. \n\n```go\n// v2/stress_client/stress_client.go\ntype stressClient struct {\n\ttestID string\n\n\t// State for the Stress Test\n\taddresses []string\n\tprecision string\n\tstartDate string\n\tdatabase  string\n\twdelay    string\n\tqdelay    string\n\n\t// Channels from statements\n\tpackageChan   <-chan Package\n\tdirectiveChan <-chan Directive\n\n\t// Response channel\n\tresponseChan chan<- Response\n\n\t// Concurrency utilities\n\tsync.WaitGroup\n\tsync.Mutex\n\n\t// Concurrency Limit for Writes and Reads\n\twconc int\n\tqconc int\n\n\t// Manage Read and Write concurrency separately\n\twc *ConcurrencyLimiter\n\trc *ConcurrencyLimiter\n}\n```\nCode for handling the write path is in `v2/stress_client/stress_client_write.go` while the query path is in `v2/stress_client/stress_client_query.go`.\n\n### InfluxDB Instance (stress test target)\n\nThe InfluxDB which is being put under stress.\n\n### response data\n\n`Response`s carry points from `stressClient` to the `ResultsClient`.\n\n```go\n// v2/stress_client/response.go\ntype Response struct {\n\tPoint  *influx.Point\n\tTracer *Tracer\n}\n```\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/README.md",
    "content": "# Influx Stress Tool V2\n\n```\n$ influx_stress -v2 -config iql/file.iql\n```\n\nThis stress tool works from list of InfluxQL-esque statements. The language has been extended to allow for some basic templating of fields, tags and measurements in both line protocol and query statements.\n\nBy default the test outputs a human readable report to `STDOUT` and records test statistics in an active installation of InfluxDB at `localhost:8086`.\n\nTo set state variables for the test such as the address of the Influx node use the following syntax:\n\n```\n# The values listed below are the default values for each of the parameters\n \n# Pipe delineated list of addresses. For cluster: [192.168.0.10:8086|192.168.0.2:8086|192.168.0.3:8086]\n# Queries and writes are round-robin to the configured addresses.\nSET Addresses [localhost:8086]\n\n# False (default) uses http, true uses https\nSET SSL [false]\n\n# Username for targeted influx server or cluster\nSET Username []\n\n# Password for targeted influx server or cluster\nSET Password []\n\n# Database to target for queries and writes. Works like the InfluxCLI USE\nSET Database [stress]\n\n# Precision for the data being written\n# Only s and ns supported\nSET Precision [s]\n\n# Date the first written point will be timestamped\nSET StartDate [2016-01-01]\n\n# Size of batches to send to InfluxDB\nSET BatchSize [5000]\n\n# Time to wait between sending batches\nSET WriteInterval [0s]\n\n# Time to wait between sending queries\nSET QueryInterval [0s]\n\n# Number of concurrent writers\nSET WriteConcurrency [15]\n\n# Number of concurrent readers\nSET QueryConcurrency [5]\n```\n\nThe values in the example are also the defaults.\n\nValid line protocol will be forwarded right to the server making setting up your testing environment easy:\n\n```\nCREATE DATABASE thing\n\nALTER RETENTION POLICY default ON thing DURATION 1h REPLICATION 1\n\nSET database [thing]\n```\n\nYou can write points like this:\n```\nINSERT mockCpu\ncpu,\nhost=server-[int inc(0) 10000],location=[string rand(8) 1000]\nvalue=[float rand(1000) 0]\n100000 10s\n\nExplained:\n\n# INSERT keyword kicks off the statement, next to it is the name of the statement for reporting and templated query generation\nINSERT mockCpu\n# Measurement\ncpu,\n# Tags - separated by commas. Tag values can be templates, mixed template and fixed values\nhost=server-[float rand(100) 10000],location=[int inc(0) 1000],fixed=[fix|fid|dor|pom|another_tag_value]\n# Fields - separated by commas either templates, mixed template and fixed values\nvalue=[float inc(0) 0]\n# 'Timestamp' - Number of points to insert into this measurement and the amount of time between points\n100000 10s\n```\n\nEach template contains 3 parts: a datatype (`str`, `float`, or `int`) a function which describes how the value changes between points: `inc(0)` is increasing and `rand(n)` is a random number between `0` and `n`. The last number is the number of unique values in the tag or field. `0` is unbounded. To make a tag\n\nTo run multiple insert statements at once:\n```\nGO INSERT devices\ndevices,\ncity=[str rand(8) 10],country=[str rand(8) 25],device_id=[str rand(10) 1000]\nlat=[float rand(90) 0],lng=[float rand(120) 0],temp=[float rand(40) 0]\n10000000 10s\n\nGO INSERT devices2\ndevices2,\ncity=[str rand(8) 10],country=[str rand(8) 25],device_id=[str rand(10) 1000]\nlat=[float rand(90) 0],lng=[float rand(120) 0],temp=[float rand(40) 0]\n10000000 10s\n\nWAIT\n```\n\nFastest point generation and write load requires 3-4 running `GO INSERT` statements at a time.\n\nYou can run queries like this:\n\n```\nQUERY cpu\nSELECT mean(value) FROM cpu WHERE host='server-1'\nDO 1000\n```\n\n### Output:\nOutput for config file in this repo:\n```\n[√] \"CREATE DATABASE thing\" -> 1.806785ms\n[√] \"CREATE DATABASE thing2\" -> 1.492504ms\nSET Database = 'thing'\nSET Precision = 's'\nGo Write Statement:                    mockCpu\n  Points/Sec:                          245997\n  Resp Time Average:                   173.354445ms\n  Resp Time Standard Deviation:        123.80344ms\n  95th Percentile Write Response:      381.363503ms\n  Average Request Bytes:               276110\n  Successful Write Reqs:               20\n  Retries:                             0\nGo Query Statement:                    mockCpu\n  Resp Time Average:                   3.140803ms\n  Resp Time Standard Deviation:        2.292328ms\n  95th Percentile Read Response:       5.915437ms\n  Query Resp Bytes Average:            16 bytes\n  Successful Queries:                  10\nWAIT -> 406.400059ms\nSET DATABASE = 'thing2'\nGo Write Statement:                    devices\n  Points/Sec:                          163348\n  Resp Time Average:                   132.553789ms\n  Resp Time Standard Deviation:        149.397972ms\n  95th Percentile Write Response:      567.987467ms\n  Average Request Bytes:               459999\n  Successful Write Reqs:               20\n  Retries:                             0\nGo Write Statement:                    devices2\n  Points/Sec:                          160078\n  Resp Time Average:                   133.303097ms\n  Resp Time Standard Deviation:        144.352404ms\n  95th Percentile Write Response:      560.565066ms\n  Average Request Bytes:               464999\n  Successful Write Reqs:               20\n  Retries:                             0\nGo Query Statement:                    fooName\n  Resp Time Average:                   1.3307ms\n  Resp Time Standard Deviation:        640.249µs\n  95th Percentile Read Response:       2.668ms\n  Query Resp Bytes Average:            16 bytes\n  Successful Queries:                  10\nWAIT -> 624.585319ms\n[√] \"DROP DATABASE thing\" -> 991.088464ms\n[√] \"DROP DATABASE thing2\" -> 421.362831ms\n```\n\n### Next Steps:\n\n##### Documentation\n- Parser behavior and proper `.iql` syntax\n- How the templated query generation works\n- Collection of tested `.iql` files to simulate different loads\n  \n##### Performance\n- `Commune`, a stuct to enable templated Query generation, is blocking writes when used, look into performance.\n- Templated query generation is currently in a quazi-working state. See the above point.\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/iql/default.iql",
    "content": "CREATE DATABASE stress\n\nGO INSERT cpu\ncpu,\nhost=server-[int inc(0) 100000],location=us-west\nvalue=[int rand(100) 0]\n10000000 10s \n\nGO QUERY cpu\nSELECT count(value) FROM cpu WHERE %t\nDO 250\n\nWAIT\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/iql/file.iql",
    "content": "CREATE DATABASE thing\n\nCREATE DATABASE thing2\n\nSET Database [thing]\n\nSET Precision [s]\n\nGO INSERT mockCpu\ncpu,\nhost=server-[float inc(0) 10000],loc=[us-west|us-east|eu-north]\nvalue=[int inc(100) 0]\n100000 10s\n\nGO QUERY mockCpu\nSELECT mean(value) FROM cpu WHERE host='server-1'\nDO 10\n\nWAIT\n\nSET DATABASE [thing2]\n\nGO INSERT devices\ndevices,\ncity=[str rand(8) 100],country=[str rand(8) 25],device_id=[str rand(10) 100]\nlat=[float rand(90) 0],lng=[float rand(120) 0],temp=[float rand(40) 0]\n100000 10s\n\nGO INSERT devices2\ndevices2,\ncity=[str rand(8) 100],country=[str rand(8) 25],device_id=[str rand(10) 100]\nlat=[float rand(90) 0],lng=[float rand(120) 0],temp=[float rand(40) 0]\n100000 10s\n\nGO QUERY fooName\nSELECT count(temp) FROM devices WHERE temp > 30\nDO 10\n\nWAIT\n\nDROP DATABASE thing\n\nDROP DATABASE thing2\n\nWAIT\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/main.go",
    "content": "package stress\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\tinflux \"github.com/influxdata/influxdb/client/v2\"\n\t\"github.com/influxdata/influxdb/stress/v2/stress_client\"\n\t\"github.com/influxdata/influxdb/stress/v2/stressql\"\n)\n\n// RunStress takes a configFile and kicks off the stress test\nfunc RunStress(file string) {\n\n\t// Spin up the Client\n\ts := stressClient.NewStressTest()\n\n\t// Parse the file into Statements\n\tstmts, err := stressql.ParseStatements(file)\n\n\t// Log parse errors and quit if found\n\tif err != nil {\n\t\tlog.Fatalf(\"Parsing Error\\n  error: %v\\n\", err)\n\t}\n\n\t// Run all statements\n\tfor _, stmt := range stmts {\n\t\tstmt.Run(s)\n\t}\n\n\t// Clear out the batch of unsent response points\n\tresp := blankResponse()\n\ts.ResultsChan <- resp\n\tresp.Tracer.Wait()\n\n\t// Compile all Reports\n\tfor _, stmt := range stmts {\n\t\tfmt.Println(stmt.Report(s))\n\t}\n}\n\nfunc blankResponse() stressClient.Response {\n\t// Points must have at least one field\n\tfields := map[string]interface{}{\"done\": true}\n\t// Make a 'blank' point\n\tp, err := influx.NewPoint(\"done\", make(map[string]string), fields, time.Now())\n\t// Panic on error\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating blank response point\\n  error: %v\\n\", err)\n\t}\n\t// Add a tracer to prevent program from returning too early\n\ttracer := stressClient.NewTracer(make(map[string]string))\n\t// Add to the WaitGroup\n\ttracer.Add(1)\n\t// Make a new response with the point and the tracer\n\tresp := stressClient.NewResponse(p, tracer)\n\treturn resp\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/exec.go",
    "content": "package statement\n\nimport (\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/stress/v2/stress_client\"\n)\n\n// ExecStatement run outside scripts. This functionality is not built out\n// TODO: Wire up!\ntype ExecStatement struct {\n\tStatementID string\n\tScript      string\n\n\truntime time.Duration\n}\n\n// SetID statisfies the Statement Interface\nfunc (i *ExecStatement) SetID(s string) {\n\ti.StatementID = s\n}\n\n// Run statisfies the Statement Interface\nfunc (i *ExecStatement) Run(s *stressClient.StressTest) {\n\truntime := time.Now()\n\ti.runtime = time.Since(runtime)\n}\n\n// Report statisfies the Statement Interface\nfunc (i *ExecStatement) Report(s *stressClient.StressTest) string {\n\treturn \"\"\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/exec_test.go",
    "content": "package statement\n\nimport (\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/stress/v2/stress_client\"\n)\n\nfunc TestExecSetID(t *testing.T) {\n\te := newTestExec()\n\tnewID := \"oaijnifo\"\n\te.SetID(newID)\n\tif e.StatementID != newID {\n\t\tt.Errorf(\"Expected: %v\\nGot: %v\\n\", newID, e.StatementID)\n\t}\n}\n\nfunc TestExecRun(t *testing.T) {\n\te := newTestExec()\n\ts, _, _ := stressClient.NewTestStressTest()\n\te.Run(s)\n\tif e == nil {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestExecReport(t *testing.T) {\n\te := newTestExec()\n\ts, _, _ := stressClient.NewTestStressTest()\n\trep := e.Report(s)\n\tif rep != \"\" {\n\t\tt.Fail()\n\t}\n}\n\nfunc newTestExec() *ExecStatement {\n\treturn &ExecStatement{\n\t\tStatementID: \"fooID\",\n\t\tScript:      \"fooscript.txt\",\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/function.go",
    "content": "package statement\n\nimport (\n\tcrypto \"crypto/rand\"\n\t\"fmt\"\n\t\"math/rand\"\n)\n\n// ################\n// # Function     #\n// ################\n\n// Function is a struct that holds information for generating values in templated points\ntype Function struct {\n\tType     string\n\tFn       string\n\tArgument int\n\tCount    int\n}\n\n// NewStringer creates a new Stringer\nfunc (f *Function) NewStringer(series int) Stringer {\n\tvar fn Stringer\n\tswitch f.Type {\n\tcase \"int\":\n\t\tfn = NewIntFunc(f.Fn, f.Argument)\n\tcase \"float\":\n\t\tfn = NewFloatFunc(f.Fn, f.Argument)\n\tcase \"str\":\n\t\tfn = NewStrFunc(f.Fn, f.Argument)\n\tdefault:\n\t\tfn = func() string { return \"STRINGER ERROR\" }\n\t}\n\n\tif int(f.Count) != 0 {\n\t\treturn cycle(f.Count, fn)\n\t}\n\n\treturn nTimes(series, fn)\n\n}\n\n// ################\n// # Stringers    #\n// ################\n\n// Stringers is a collection of Stringer\ntype Stringers []Stringer\n\n// Eval returns an array of all the Stringer functions evaluated once\nfunc (s Stringers) Eval(time func() int64) []interface{} {\n\tarr := make([]interface{}, len(s)+1)\n\n\tfor i, st := range s {\n\t\tarr[i] = st()\n\t}\n\n\tarr[len(s)] = time()\n\n\treturn arr\n}\n\n// Stringer is a function that returns a string\ntype Stringer func() string\n\nfunc randStr(n int) func() string {\n\treturn func() string {\n\t\tb := make([]byte, n/2)\n\n\t\t_, _ = crypto.Read(b)\n\n\t\treturn fmt.Sprintf(\"%x\", b)\n\t}\n}\n\n// NewStrFunc reates a new striger to create strings for templated writes\nfunc NewStrFunc(fn string, arg int) Stringer {\n\tswitch fn {\n\tcase \"rand\":\n\t\treturn randStr(arg)\n\tdefault:\n\t\treturn func() string { return \"STR ERROR\" }\n\t}\n}\n\nfunc randFloat(n int) func() string {\n\treturn func() string {\n\t\treturn fmt.Sprintf(\"%v\", rand.Intn(n))\n\t}\n}\n\nfunc incFloat(n int) func() string {\n\ti := n\n\treturn func() string {\n\t\ts := fmt.Sprintf(\"%v\", i)\n\t\ti++\n\t\treturn s\n\t}\n}\n\n// NewFloatFunc reates a new striger to create float values for templated writes\nfunc NewFloatFunc(fn string, arg int) Stringer {\n\tswitch fn {\n\tcase \"rand\":\n\t\treturn randFloat(arg)\n\tcase \"inc\":\n\t\treturn incFloat(arg)\n\tdefault:\n\t\treturn func() string { return \"FLOAT ERROR\" }\n\t}\n}\n\nfunc randInt(n int) Stringer {\n\treturn func() string {\n\t\treturn fmt.Sprintf(\"%vi\", rand.Intn(n))\n\t}\n}\n\nfunc incInt(n int) Stringer {\n\ti := n\n\treturn func() string {\n\t\ts := fmt.Sprintf(\"%vi\", i)\n\t\ti++\n\t\treturn s\n\t}\n}\n\n// NewIntFunc reates a new striger to create int values for templated writes\nfunc NewIntFunc(fn string, arg int) Stringer {\n\tswitch fn {\n\tcase \"rand\":\n\t\treturn randInt(arg)\n\tcase \"inc\":\n\t\treturn incInt(arg)\n\tdefault:\n\t\treturn func() string { return \"INT ERROR\" }\n\t}\n}\n\n// nTimes will return the previous return value of a function\n// n-many times before calling the function again\nfunc nTimes(n int, fn Stringer) Stringer {\n\ti := 0\n\tt := fn()\n\treturn func() string {\n\t\ti++\n\t\tif i > n {\n\t\t\tt = fn()\n\t\t\ti = 1\n\t\t}\n\t\treturn t\n\t}\n}\n\n// cycle will cycle through a list of values before repeating them\n\nfunc cycle(n int, fn Stringer) Stringer {\n\tif n == 0 {\n\t\treturn fn\n\t}\n\ti := 0\n\tcache := make([]string, n)\n\tt := fn()\n\tcache[i] = t\n\n\treturn func() string {\n\t\ti++\n\n\t\tif i < n {\n\t\t\tcache[i] = fn()\n\t\t}\n\n\t\tt = cache[(i-1)%n]\n\t\treturn t\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/function_test.go",
    "content": "package statement\n\nimport (\n\t\"testing\"\n)\n\nfunc TestNewStrRandStringer(t *testing.T) {\n\tfunction := newStrRandFunction()\n\tstrRandStringer := function.NewStringer(10)\n\ts := strRandStringer()\n\tif len(s) != function.Argument {\n\t\tt.Errorf(\"Expected: %v\\nGot: %v\\n\", function.Argument, len(s))\n\t}\n}\n\nfunc TestNewIntIncStringer(t *testing.T) {\n\tfunction := newIntIncFunction()\n\tintIncStringer := function.NewStringer(10)\n\ts := intIncStringer()\n\tif s != \"0i\" {\n\t\tt.Errorf(\"Expected: 0i\\nGot: %v\\n\", s)\n\t}\n}\n\nfunc TestNewIntRandStringer(t *testing.T) {\n\tfunction := newIntRandFunction()\n\tintRandStringer := function.NewStringer(10)\n\ts := intRandStringer()\n\tif parseInt(s[:len(s)-1]) > function.Argument {\n\t\tt.Errorf(\"Expected value below: %v\\nGot value: %v\\n\", function.Argument, s)\n\t}\n}\n\nfunc TestNewFloatIncStringer(t *testing.T) {\n\tfunction := newFloatIncFunction()\n\tfloatIncStringer := function.NewStringer(10)\n\ts := floatIncStringer()\n\tif parseFloat(s) != function.Argument {\n\t\tt.Errorf(\"Expected value: %v\\nGot: %v\\n\", function.Argument, s)\n\t}\n}\nfunc TestNewFloatRandStringer(t *testing.T) {\n\tfunction := newFloatRandFunction()\n\tfloatRandStringer := function.NewStringer(10)\n\ts := floatRandStringer()\n\tif parseFloat(s) > function.Argument {\n\t\tt.Errorf(\"Expected value below: %v\\nGot value: %v\\n\", function.Argument, s)\n\t}\n}\n\nfunc TestStringersEval(t *testing.T) {\n\t// Make the *Function(s)\n\tstrRandFunction := newStrRandFunction()\n\tintIncFunction := newIntIncFunction()\n\tintRandFunction := newIntRandFunction()\n\tfloatIncFunction := newFloatIncFunction()\n\tfloatRandFunction := newFloatRandFunction()\n\t// Make the *Stringer(s)\n\tstrRandStringer := strRandFunction.NewStringer(10)\n\tintIncStringer := intIncFunction.NewStringer(10)\n\tintRandStringer := intRandFunction.NewStringer(10)\n\tfloatIncStringer := floatIncFunction.NewStringer(10)\n\tfloatRandStringer := floatRandFunction.NewStringer(10)\n\t// Make the *Stringers\n\tstringers := Stringers([]Stringer{strRandStringer, intIncStringer, intRandStringer, floatIncStringer, floatRandStringer})\n\t// Spoff the Time function\n\t// Call *Stringers.Eval\n\tvalues := stringers.Eval(spoofTime)\n\t// Check the strRandFunction\n\tif len(values[0].(string)) != strRandFunction.Argument {\n\t\tt.Errorf(\"Expected: %v\\nGot: %v\\n\", strRandFunction.Argument, len(values[0].(string)))\n\t}\n\t// Check the intIncFunction\n\tif values[1].(string) != \"0i\" {\n\t\tt.Errorf(\"Expected: 0i\\nGot: %v\\n\", values[1].(string))\n\t}\n\t// Check the intRandFunction\n\ts := values[2].(string)\n\tif parseInt(s[:len(s)-1]) > intRandFunction.Argument {\n\t\tt.Errorf(\"Expected value below: %v\\nGot value: %v\\n\", intRandFunction.Argument, s)\n\t}\n\t// Check the floatIncFunction\n\tif parseFloat(values[3].(string)) != floatIncFunction.Argument {\n\t\tt.Errorf(\"Expected value: %v\\nGot: %v\\n\", floatIncFunction.Argument, values[3])\n\t}\n\t// Check the floatRandFunction\n\tif parseFloat(values[4].(string)) > floatRandFunction.Argument {\n\t\tt.Errorf(\"Expected value below: %v\\nGot value: %v\\n\", floatRandFunction.Argument, values[4])\n\t}\n\t// Check the spoofTime func\n\tif values[5] != 8 {\n\n\t}\n}\n\nfunc spoofTime() int64 {\n\treturn int64(8)\n}\n\nfunc newStrRandFunction() *Function {\n\treturn &Function{\n\t\tType:     \"str\",\n\t\tFn:       \"rand\",\n\t\tArgument: 8,\n\t\tCount:    1000,\n\t}\n}\n\nfunc newIntIncFunction() *Function {\n\treturn &Function{\n\t\tType:     \"int\",\n\t\tFn:       \"inc\",\n\t\tArgument: 0,\n\t\tCount:    0,\n\t}\n}\n\nfunc newIntRandFunction() *Function {\n\treturn &Function{\n\t\tType:     \"int\",\n\t\tFn:       \"rand\",\n\t\tArgument: 100,\n\t\tCount:    1000,\n\t}\n}\n\nfunc newFloatIncFunction() *Function {\n\treturn &Function{\n\t\tType:     \"float\",\n\t\tFn:       \"inc\",\n\t\tArgument: 0,\n\t\tCount:    1000,\n\t}\n}\n\nfunc newFloatRandFunction() *Function {\n\treturn &Function{\n\t\tType:     \"float\",\n\t\tFn:       \"rand\",\n\t\tArgument: 100,\n\t\tCount:    1000,\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/go.go",
    "content": "package statement\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/stress/v2/stress_client\"\n)\n\n// GoStatement is a Statement Implementation to allow other statements to be run concurrently\ntype GoStatement struct {\n\tStatement\n\n\tStatementID string\n}\n\n// SetID statisfies the Statement Interface\nfunc (i *GoStatement) SetID(s string) {\n\ti.StatementID = s\n}\n\n// Run statisfies the Statement Interface\nfunc (i *GoStatement) Run(s *stressClient.StressTest) {\n\t// TODO: remove\n\tswitch i.Statement.(type) {\n\tcase *QueryStatement:\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\n\ts.Add(1)\n\tgo func() {\n\t\ti.Statement.Run(s)\n\t\ts.Done()\n\t}()\n}\n\n// Report statisfies the Statement Interface\nfunc (i *GoStatement) Report(s *stressClient.StressTest) string {\n\treturn fmt.Sprintf(\"Go %v\", i.Statement.Report(s))\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/go_test.go",
    "content": "package statement\n\nimport (\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/stress/v2/stress_client\"\n)\n\nfunc TestGoSetID(t *testing.T) {\n\te := newTestGo()\n\tnewID := \"oaijnifo\"\n\te.SetID(newID)\n\tif e.StatementID != newID {\n\t\tt.Errorf(\"Expected: %v\\nGot: %v\\n\", newID, e.StatementID)\n\t}\n}\n\nfunc TestGoRun(t *testing.T) {\n\te := newTestGo()\n\ts, _, _ := stressClient.NewTestStressTest()\n\te.Run(s)\n\tif e == nil {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestGoReport(t *testing.T) {\n\te := newTestGo()\n\ts, _, _ := stressClient.NewTestStressTest()\n\treport := e.Report(s)\n\tif report != \"Go \" {\n\t\tt.Errorf(\"Expected: %v\\nGot: %v\\n\", \"Go \", report)\n\t}\n}\n\nfunc newTestGo() *GoStatement {\n\treturn &GoStatement{\n\t\tStatement:   newTestExec(),\n\t\tStatementID: \"fooID\",\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/influxql.go",
    "content": "package statement\n\nimport (\n\t\"log\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/stress/v2/stress_client\"\n)\n\n// InfluxqlStatement is a Statement Implementation that allows statements that parse in InfluxQL to be passed directly to the target instance\ntype InfluxqlStatement struct {\n\tStatementID string\n\tQuery       string\n\tTracer      *stressClient.Tracer\n}\n\nfunc (i *InfluxqlStatement) tags() map[string]string {\n\ttags := make(map[string]string)\n\treturn tags\n}\n\n// SetID statisfies the Statement Interface\nfunc (i *InfluxqlStatement) SetID(s string) {\n\ti.StatementID = s\n}\n\n// Run statisfies the Statement Interface\nfunc (i *InfluxqlStatement) Run(s *stressClient.StressTest) {\n\n\t// Set the tracer\n\ti.Tracer = stressClient.NewTracer(i.tags())\n\n\t// Make the Package\n\tp := stressClient.NewPackage(stressClient.Query, []byte(i.Query), i.StatementID, i.Tracer)\n\n\t// Increment the tracer\n\ti.Tracer.Add(1)\n\n\t// Send the Package\n\ts.SendPackage(p)\n\n\t// Wait for all operations to finish\n\ti.Tracer.Wait()\n}\n\n// Report statisfies the Statement Interface\n// No test coverage, fix\nfunc (i *InfluxqlStatement) Report(s *stressClient.StressTest) (out string) {\n\tallData := s.GetStatementResults(i.StatementID, \"query\")\n\n\tiqlr := &influxQlReport{\n\t\tstatement: i.Query,\n\t\tcolumns:   allData[0].Series[0].Columns,\n\t\tvalues:    allData[0].Series[0].Values,\n\t}\n\n\tiqlr.responseTime = time.Duration(responseTimes(iqlr.columns, iqlr.values)[0].Value)\n\n\tswitch countSuccesses(iqlr.columns, iqlr.values) {\n\tcase 0:\n\t\tiqlr.success = false\n\tcase 1:\n\t\tiqlr.success = true\n\tdefault:\n\t\tlog.Fatal(\"Error fetching response for InfluxQL statement\")\n\t}\n\n\treturn iqlr.String()\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/influxql_test.go",
    "content": "package statement\n\nimport (\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/stress/v2/stress_client\"\n)\n\nfunc TestInfluxQlSetID(t *testing.T) {\n\te := newTestInfluxQl()\n\tnewID := \"oaijnifo\"\n\te.SetID(newID)\n\tif e.StatementID != newID {\n\t\tt.Errorf(\"Expected: %v\\nGot: %v\\n\", newID, e.StatementID)\n\t}\n}\n\nfunc TestInfluxQlRun(t *testing.T) {\n\te := newTestInfluxQl()\n\ts, packageCh, _ := stressClient.NewTestStressTest()\n\tgo func() {\n\t\tfor pkg := range packageCh {\n\t\t\tif pkg.T != stressClient.Query {\n\t\t\t\tt.Errorf(\"Expected package to be Query\\nGot: %v\", pkg.T)\n\t\t\t}\n\t\t\tif string(pkg.Body) != e.Query {\n\t\t\t\tt.Errorf(\"Expected query: %v\\nGot: %v\", e.Query, string(pkg.Body))\n\t\t\t}\n\t\t\tif pkg.StatementID != e.StatementID {\n\t\t\t\tt.Errorf(\"Expected statementID: %v\\nGot: %v\", e.StatementID, pkg.StatementID)\n\t\t\t}\n\t\t\tpkg.Tracer.Done()\n\t\t}\n\t}()\n\te.Run(s)\n}\n\nfunc newTestInfluxQl() *InfluxqlStatement {\n\treturn &InfluxqlStatement{\n\t\tQuery:       \"CREATE DATABASE foo\",\n\t\tTracer:      stressClient.NewTracer(make(map[string]string)),\n\t\tStatementID: \"fooID\",\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/insert.go",
    "content": "package statement\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/stress/v2/stress_client\"\n)\n\n// InsertStatement is a Statement Implementation that creates points to be written to the target InfluxDB instance\ntype InsertStatement struct {\n\tTestID      string\n\tStatementID string\n\n\t// Statement Name\n\tName string\n\n\t// Template string for points. Filled by the output of stringers\n\tTemplateString string\n\n\t// TagCount is used to find the number of series in the dataset\n\tTagCount int\n\n\t// The Tracer prevents InsertStatement.Run() from returning early\n\tTracer *stressClient.Tracer\n\n\t// Timestamp is #points to write and percision\n\tTimestamp *Timestamp\n\n\t// Templates turn into stringers\n\tTemplates Templates\n\tstringers Stringers\n\n\t// Number of series in this insert Statement\n\tseries int\n\n\t// Returns the proper time for the next point\n\ttime func() int64\n\n\t// Concurrency utiliities\n\tsync.WaitGroup\n\tsync.Mutex\n\n\t// Timer for runtime and pps calculation\n\truntime time.Duration\n}\n\nfunc (i *InsertStatement) tags() map[string]string {\n\ttags := map[string]string{\n\t\t\"number_fields\":       i.numFields(),\n\t\t\"number_series\":       fmtInt(i.series),\n\t\t\"number_points_write\": fmtInt(i.Timestamp.Count),\n\t}\n\treturn tags\n}\n\n// SetID statisfies the Statement Interface\nfunc (i *InsertStatement) SetID(s string) {\n\ti.StatementID = s\n}\n\n// SetVars sets up the environment for InsertStatement to call it's Run function\nfunc (i *InsertStatement) SetVars(s *stressClient.StressTest) chan<- string {\n\t// Set the #series at 1 to start\n\ti.series = 1\n\n\t// Num series is the product of the cardinality of the tags\n\tfor _, tmpl := range i.Templates[0:i.TagCount] {\n\t\ti.series *= tmpl.numSeries()\n\t}\n\n\t// make stringers from the templates\n\ti.stringers = i.Templates.Init(i.series)\n\n\t// Set the time function, keeps track of 'time' of the points being created\n\ti.time = i.Timestamp.Time(s.StartDate, i.series, s.Precision)\n\n\t// Set a commune on the StressTest\n\ts.Lock()\n\tcomCh := s.SetCommune(i.Name)\n\ts.Unlock()\n\n\t// Set the tracer\n\ti.Tracer = stressClient.NewTracer(i.tags())\n\n\treturn comCh\n}\n\n// Run statisfies the Statement Interface\nfunc (i *InsertStatement) Run(s *stressClient.StressTest) {\n\n\t// Set variables on the InsertStatement and make the comCh\n\tcomCh := i.SetVars(s)\n\n\t// TODO: Refactor to eleminate the ctr\n\t// Start the counter\n\tctr := 0\n\n\t// Create the first bytes buffer\n\tbuf := bytes.NewBuffer([]byte{})\n\n\truntime := time.Now()\n\n\tfor k := 0; k < i.Timestamp.Count; k++ {\n\n\t\t// Increment the counter. ctr == k + 1?\n\t\tctr++\n\n\t\t// Make the point from the template string and the stringers\n\t\tpoint := fmt.Sprintf(i.TemplateString, i.stringers.Eval(i.time)...)\n\n\t\t// Add the string to the buffer\n\t\tbuf.WriteString(point)\n\t\t// Add a newline char to seperate the points\n\t\tbuf.WriteString(\"\\n\")\n\n\t\t// If len(batch) == batchSize then send it\n\t\tif ctr%s.BatchSize == 0 && ctr != 0 {\n\t\t\tb := buf.Bytes()\n\t\t\t// Trimming the trailing newline character\n\t\t\tb = b[0 : len(b)-1]\n\n\t\t\t// Create the package\n\t\t\tp := stressClient.NewPackage(stressClient.Write, b, i.StatementID, i.Tracer)\n\n\t\t\t// Use Tracer to wait for all operations to finish\n\t\t\ti.Tracer.Add(1)\n\n\t\t\t// Send the package\n\t\t\ts.SendPackage(p)\n\n\t\t\t// Reset the bytes Buffer\n\t\t\ttemp := bytes.NewBuffer([]byte{})\n\t\t\tbuf = temp\n\t\t}\n\n\t\t// TODO: Racy\n\t\t// Has to do with InsertStatement and QueryStatement communication\n\t\tif len(comCh) < cap(comCh) {\n\t\t\tselect {\n\t\t\tcase comCh <- point:\n\t\t\t\tbreak\n\t\t\tdefault:\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// If There are additional points remaining in the buffer send them before exiting\n\tif buf.Len() != 0 {\n\t\tb := buf.Bytes()\n\t\t// Trimming the trailing newline character\n\t\tb = b[0 : len(b)-1]\n\n\t\t// Create the package\n\t\tp := stressClient.NewPackage(stressClient.Write, b, i.StatementID, i.Tracer)\n\n\t\t// Use Tracer to wait for all operations to finish\n\t\ti.Tracer.Add(1)\n\n\t\t// Send the package\n\t\ts.SendPackage(p)\n\t}\n\n\t// Wait for all tracers to decrement\n\ti.Tracer.Wait()\n\n\t// Stop the timer\n\ti.runtime = time.Since(runtime)\n}\n\n// Report statisfies the Statement Interface\nfunc (i *InsertStatement) Report(s *stressClient.StressTest) string {\n\t// Pull data via StressTest client\n\tallData := s.GetStatementResults(i.StatementID, \"write\")\n\n\tif allData == nil || allData[0].Series == nil {\n\t\tlog.Fatalf(\"No data returned for write report\\n  Statement Name: %v\\n  Statement ID: %v\\n\", i.Name, i.StatementID)\n\t}\n\n\tir := &insertReport{\n\t\tname:    i.Name,\n\t\tcolumns: allData[0].Series[0].Columns,\n\t\tvalues:  allData[0].Series[0].Values,\n\t}\n\n\tresponseTimes := responseTimes(ir.columns, ir.values)\n\n\tir.percentile = percentile(responseTimes)\n\tir.avgResponseTime = avgDuration(responseTimes)\n\tir.stdDevResponseTime = stddevDuration(responseTimes)\n\tir.pointsPerSecond = int(float64(i.Timestamp.Count) / i.runtime.Seconds())\n\tir.numRetries = countRetries(ir.columns, ir.values)\n\tir.successfulWrites = countSuccesses(ir.columns, ir.values)\n\tir.avgRequestBytes = numberBytes(ir.columns, ir.values)\n\n\treturn ir.String()\n}\n\nfunc (i *InsertStatement) numFields() string {\n\tpt := strings.Split(i.TemplateString, \" \")\n\tfields := strings.Split(pt[1], \",\")\n\treturn fmtInt(len(fields))\n}\n\nfunc fmtInt(i int) string {\n\treturn strconv.FormatInt(int64(i), 10)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/insert_test.go",
    "content": "package statement\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/stress/v2/stress_client\"\n)\n\nfunc TestInsertSetID(t *testing.T) {\n\te := newTestInsert()\n\tnewID := \"oaijnifo\"\n\te.SetID(newID)\n\tif e.StatementID != newID {\n\t\tt.Errorf(\"Expected: %v\\nGot: %v\\n\", newID, e.StatementID)\n\t}\n}\n\nfunc TestInsertRun(t *testing.T) {\n\ti := newTestInsert()\n\ts, packageCh, _ := stressClient.NewTestStressTest()\n\t// Listen to the other side of the directiveCh\n\tgo func() {\n\t\tfor pkg := range packageCh {\n\t\t\tcountPoints := i.Timestamp.Count\n\t\t\tbatchSize := s.BatchSize\n\t\t\tgot := len(strings.Split(string(pkg.Body), \"\\n\"))\n\t\t\tswitch got {\n\t\t\tcase countPoints % batchSize:\n\t\t\tcase batchSize:\n\t\t\tdefault:\n\t\t\t\tt.Errorf(\"countPoints: %v\\nbatchSize: %v\\ngot: %v\\n\", countPoints, batchSize, got)\n\t\t\t}\n\t\t\tpkg.Tracer.Done()\n\t\t}\n\t}()\n\ti.Run(s)\n}\n\nfunc newTestInsert() *InsertStatement {\n\treturn &InsertStatement{\n\t\tTestID:         \"foo_test\",\n\t\tStatementID:    \"foo_ID\",\n\t\tName:           \"foo_name\",\n\t\tTemplateString: \"cpu,%v %v %v\",\n\t\tTimestamp:      newTestTimestamp(),\n\t\tTemplates:      newTestTemplates(),\n\t\tTagCount:       1,\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/query.go",
    "content": "package statement\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/stress/v2/stress_client\"\n)\n\n// QueryStatement is a Statement Implementation to run queries on the target InfluxDB instance\ntype QueryStatement struct {\n\tStatementID string\n\tName        string\n\n\t// TemplateString is a query template that can be filled in by Args\n\tTemplateString string\n\tArgs           []string\n\n\t// Number of queries to run\n\tCount int\n\n\t// Tracer for tracking returns\n\tTracer *stressClient.Tracer\n\n\t// track time for all queries\n\truntime time.Duration\n}\n\n// This function adds tags to the recording points\nfunc (i *QueryStatement) tags() map[string]string {\n\ttags := make(map[string]string)\n\treturn tags\n}\n\n// SetID statisfies the Statement Interface\nfunc (i *QueryStatement) SetID(s string) {\n\ti.StatementID = s\n}\n\n// Run statisfies the Statement Interface\nfunc (i *QueryStatement) Run(s *stressClient.StressTest) {\n\n\ti.Tracer = stressClient.NewTracer(i.tags())\n\n\tvals := make(map[string]interface{})\n\n\tvar point models.Point\n\n\truntime := time.Now()\n\n\tfor j := 0; j < i.Count; j++ {\n\n\t\t// If the query is a simple query, send it.\n\t\tif len(i.Args) == 0 {\n\t\t\tb := []byte(i.TemplateString)\n\n\t\t\t// Make the package\n\t\t\tp := stressClient.NewPackage(stressClient.Query, b, i.StatementID, i.Tracer)\n\n\t\t\t// Increment the tracer\n\t\t\ti.Tracer.Add(1)\n\n\t\t\t// Send the package\n\t\t\ts.SendPackage(p)\n\n\t\t} else {\n\t\t\t// Otherwise cherry pick field values from the commune?\n\n\t\t\t// TODO: Currently the program lock up here if s.GetPoint\n\t\t\t//       cannot return a value, which can happen.\n\t\t\t// See insert.go\n\t\t\ts.Lock()\n\t\t\tpoint = s.GetPoint(i.Name, s.Precision)\n\t\t\ts.Unlock()\n\n\t\t\tsetMapValues(vals, point)\n\n\t\t\t// Set the template string with args from the commune\n\t\t\tb := []byte(fmt.Sprintf(i.TemplateString, setArgs(vals, i.Args)...))\n\n\t\t\t// Make the package\n\t\t\tp := stressClient.NewPackage(stressClient.Query, b, i.StatementID, i.Tracer)\n\n\t\t\t// Increment the tracer\n\t\t\ti.Tracer.Add(1)\n\n\t\t\t// Send the package\n\t\t\ts.SendPackage(p)\n\n\t\t}\n\t}\n\n\t// Wait for all operations to finish\n\ti.Tracer.Wait()\n\n\t// Stop time timer\n\ti.runtime = time.Since(runtime)\n}\n\n// Report statisfies the Statement Interface\nfunc (i *QueryStatement) Report(s *stressClient.StressTest) string {\n\t// Pull data via StressTest client\n\tallData := s.GetStatementResults(i.StatementID, \"query\")\n\n\tif len(allData) == 0 || allData[0].Series == nil {\n\t\tlog.Fatalf(\"No data returned for query report\\n  Statement Name: %v\\n  Statement ID: %v\\n\", i.Name, i.StatementID)\n\t}\n\n\tqr := &queryReport{\n\t\tname:    i.Name,\n\t\tcolumns: allData[0].Series[0].Columns,\n\t\tvalues:  allData[0].Series[0].Values,\n\t}\n\n\tresponseTimes := responseTimes(qr.columns, qr.values)\n\n\tqr.percentile = percentile(responseTimes)\n\tqr.avgResponseTime = avgDuration(responseTimes)\n\tqr.stdDevResponseTime = stddevDuration(responseTimes)\n\tqr.successfulReads = countSuccesses(qr.columns, qr.values)\n\tqr.responseBytes = numberBytes(qr.columns, qr.values)\n\n\treturn qr.String()\n}\n\nfunc getRandomTagPair(m models.Tags) string {\n\tfor k, v := range m {\n\t\treturn fmt.Sprintf(\"%v='%v'\", k, v)\n\t}\n\n\treturn \"\"\n}\n\nfunc getRandomFieldKey(m map[string]interface{}) string {\n\tfor k := range m {\n\t\treturn fmt.Sprintf(\"%v\", k)\n\t}\n\n\treturn \"\"\n}\n\nfunc setMapValues(m map[string]interface{}, p models.Point) {\n\tfields, err := p.Fields()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tm[\"%f\"] = getRandomFieldKey(fields)\n\tm[\"%m\"] = string(p.Name())\n\tm[\"%t\"] = getRandomTagPair(p.Tags())\n\tm[\"%a\"] = p.UnixNano()\n}\n\nfunc setArgs(m map[string]interface{}, args []string) []interface{} {\n\tvalues := make([]interface{}, len(args))\n\tfor i, arg := range args {\n\t\tvalues[i] = m[arg]\n\t}\n\treturn values\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/query_test.go",
    "content": "package statement\n\nimport (\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/stress/v2/stress_client\"\n)\n\nfunc TestQuerySetID(t *testing.T) {\n\te := newTestQuery()\n\tnewID := \"oaijnifo\"\n\te.SetID(newID)\n\tif e.StatementID != newID {\n\t\tt.Errorf(\"Expected: %v\\nGot: %v\\n\", newID, e.StatementID)\n\t}\n}\n\nfunc TestQueryRun(t *testing.T) {\n\ti := newTestQuery()\n\ts, packageCh, _ := stressClient.NewTestStressTest()\n\t// Listen to the other side of the directiveCh\n\tgo func() {\n\t\tfor pkg := range packageCh {\n\t\t\tif i.TemplateString != string(pkg.Body) {\n\t\t\t\tt.Fail()\n\t\t\t}\n\t\t\tpkg.Tracer.Done()\n\t\t}\n\t}()\n\ti.Run(s)\n}\n\nfunc newTestQuery() *QueryStatement {\n\treturn &QueryStatement{\n\t\tStatementID:    \"foo_ID\",\n\t\tName:           \"foo_name\",\n\t\tTemplateString: \"SELECT count(value) FROM cpu\",\n\t\tArgs:           []string{},\n\t\tCount:          5,\n\t\tTracer:         stressClient.NewTracer(map[string]string{}),\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/report.go",
    "content": "package statement\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"sort\"\n\t\"time\"\n\n\tinflux \"github.com/influxdata/influxdb/client/v2\"\n)\n\n// TODO: Refactor this file to utilize a common interface\n// This will make adding new reports easier in the future\n\n// Runs performance numbers for insert statements\ntype insertReport struct {\n\tname               string\n\tnumRetries         int\n\tpointsPerSecond    int\n\tsuccessfulWrites   int\n\tavgRequestBytes    int\n\tavgResponseTime    time.Duration\n\tstdDevResponseTime time.Duration\n\tpercentile         time.Duration\n\n\tcolumns []string\n\tvalues  [][]interface{}\n}\n\n// Returns the version of the report that is output to STDOUT\nfunc (ir *insertReport) String() string {\n\ttmplString := `Write Statement:                    %v\n  Points/Sec:                          %v\n  Resp Time Average:                   %v\n  Resp Time Standard Deviation:        %v\n  95th Percentile Write Response:      %v\n  Average Request Bytes:               %v\n  Successful Write Reqs:               %v\n  Retries:                             %v`\n\n\treturn fmt.Sprintf(tmplString,\n\t\tir.name,\n\t\tir.pointsPerSecond,\n\t\tir.avgResponseTime,\n\t\tir.stdDevResponseTime,\n\t\tir.percentile,\n\t\tir.avgRequestBytes,\n\t\tir.successfulWrites,\n\t\tir.numRetries)\n}\n\n// Returns a point representation of the report to be written to the ResultsDB\nfunc (ir *insertReport) Point() *influx.Point {\n\tmeasurement := \"testDefault\"\n\ttags := map[string]string{}\n\tfields := map[string]interface{}{\"field\": \"blank\"}\n\tpoint, err := influx.NewPoint(measurement, tags, fields, time.Now())\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating insertReport point\\n  measurement: %v\\n  tags: %v\\n  fields: %v\\n  error: %v\\n\", measurement, tags, fields, err)\n\t}\n\treturn point\n}\n\n// Runs performance numbers for query statements\ntype queryReport struct {\n\tname                string\n\tsuccessfulReads     int\n\tresponseBytes       int\n\tstddevResponseBytes int\n\tavgResponseTime     time.Duration\n\tstdDevResponseTime  time.Duration\n\tpercentile          time.Duration\n\n\tcolumns []string\n\tvalues  [][]interface{}\n}\n\n// Returns the version of the report that is output to STDOUT\nfunc (qr *queryReport) String() string {\n\ttmplString := `Query Statement:                    %v\n  Resp Time Average:                   %v\n  Resp Time Standard Deviation:        %v\n  95th Percentile Read Response:       %v\n  Query Resp Bytes Average:            %v bytes\n  Successful Queries:                  %v`\n\n\treturn fmt.Sprintf(tmplString,\n\t\tqr.name,\n\t\tqr.avgResponseTime,\n\t\tqr.stdDevResponseTime,\n\t\tqr.percentile,\n\t\tqr.responseBytes,\n\t\tqr.successfulReads)\n}\n\n// Returns a point representation of the report to be written to the ResultsDB\nfunc (qr *queryReport) Point() *influx.Point {\n\tmeasurement := \"testDefault\"\n\ttags := map[string]string{}\n\tfields := map[string]interface{}{\"field\": \"blank\"}\n\tpoint, err := influx.NewPoint(measurement, tags, fields, time.Now())\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating queryReport point\\n  measurement: %v\\n  tags: %v\\n  fields: %v\\n  error: %v\\n\", measurement, tags, fields, err)\n\t}\n\treturn point\n}\n\n// Runs performance numbers for InfluxQL statements\ntype influxQlReport struct {\n\tstatement    string\n\tresponseTime time.Duration\n\tsuccess      bool\n\n\tcolumns []string\n\tvalues  [][]interface{}\n}\n\n// Returns the version of the report that is output to STDOUT\nfunc (iqlr *influxQlReport) String() string {\n\t// Fancy format success\n\tvar success string\n\tswitch iqlr.success {\n\tcase true:\n\t\tsuccess = \"[√]\"\n\tcase false:\n\t\tsuccess = \"[X]\"\n\t}\n\treturn fmt.Sprintf(\"%v '%v' -> %v\", success, iqlr.statement, iqlr.responseTime)\n}\n\n// Returns a point representation of the report to be written to the ResultsDB\nfunc (iqlr *influxQlReport) Point() *influx.Point {\n\tmeasurement := \"testDefault\"\n\ttags := map[string]string{}\n\tfields := map[string]interface{}{\"field\": \"blank\"}\n\tpoint, err := influx.NewPoint(measurement, tags, fields, time.Now())\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating influxQL point\\n  measurement: %v\\n  tags: %v\\n  fields: %v\\n  error: %v\\n\", measurement, tags, fields, err)\n\t}\n\treturn point\n}\n\n// Given a field or tag name this function returns the index where the values are found\nfunc getColumnIndex(col string, columns []string) int {\n\tindex := -1\n\tfor i, column := range columns {\n\t\tif column == col {\n\t\t\tindex = i\n\t\t}\n\t}\n\treturn index\n}\n\n// Given a full set of results pulls the average num_bytes\nfunc numberBytes(columns []string, values [][]interface{}) int {\n\tout := 0\n\tindex := getColumnIndex(\"num_bytes\", columns)\n\tfor _, val := range values {\n\t\treqBytes, err := val[index].(json.Number).Int64()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error coercing json.Number to Int64\\n  json.Number:%v\\n  error: %v\\n\", val[index], err)\n\t\t}\n\t\tout += int(reqBytes)\n\t}\n\treturn out / len(values)\n}\n\n// Counts the number of 200(query) or 204(write) responses and returns them\nfunc countSuccesses(columns []string, values [][]interface{}) (out int) {\n\tindex := getColumnIndex(\"status_code\", columns)\n\tfor _, val := range values {\n\t\tstatus, err := val[index].(json.Number).Int64()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error coercing json.Number to Int64\\n  json.Number:%v\\n  error: %v\\n\", val[index], err)\n\t\t}\n\t\tif status == 204 || status == 200 {\n\t\t\tout++\n\t\t}\n\t}\n\treturn out\n}\n\n// Counts number of 500 status codes\nfunc countRetries(columns []string, values [][]interface{}) (out int) {\n\tindex := getColumnIndex(\"status_code\", columns)\n\tfor _, val := range values {\n\t\tstatus, err := val[index].(json.Number).Int64()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error coercing json.Number to Int64\\n  json.Number:%v\\n  error: %v\\n\", val[index], err)\n\t\t}\n\t\tif status == 500 {\n\t\t\tout++\n\t\t}\n\t}\n\treturn out\n}\n\n// Pulls out the response_time_ns values and formats them into ResponseTimes for reporting\nfunc responseTimes(columns []string, values [][]interface{}) (rs ResponseTimes) {\n\trs = make([]ResponseTime, 0)\n\tindex := getColumnIndex(\"response_time_ns\", columns)\n\tfor _, val := range values {\n\t\trespTime, err := val[index].(json.Number).Int64()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error coercing json.Number to Int64\\n  json.Number:%v\\n  error: %v\\n\", val[index], err)\n\t\t}\n\t\trs = append(rs, NewResponseTime(int(respTime)))\n\t}\n\treturn rs\n}\n\n// Returns the 95th perecntile response time\nfunc percentile(rs ResponseTimes) time.Duration {\n\tsort.Sort(rs)\n\treturn time.Duration(rs[(len(rs) * 19 / 20)].Value)\n}\n\n// Returns the average response time\nfunc avgDuration(rs ResponseTimes) (out time.Duration) {\n\tfor _, t := range rs {\n\t\tout += time.Duration(t.Value)\n\t}\n\treturn out / time.Duration(len(rs))\n}\n\n// Returns the standard deviation of a sample of response times\nfunc stddevDuration(rs ResponseTimes) (out time.Duration) {\n\tavg := avgDuration(rs)\n\n\tfor _, t := range rs {\n\t\tout += (avg - time.Duration(t.Value)) * (avg - time.Duration(t.Value))\n\t}\n\n\treturn time.Duration(int64(math.Sqrt(float64(out) / float64(len(rs)))))\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/report_test.go",
    "content": "package statement\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestInsertReportString(t *testing.T) {\n\tir := newTestInsertReport()\n\ttmplString := `Write Statement:                    %v\n  Points/Sec:                          %v\n  Resp Time Average:                   %v\n  Resp Time Standard Deviation:        %v\n  95th Percentile Write Response:      %v\n  Average Request Bytes:               %v\n  Successful Write Reqs:               %v\n  Retries:                             %v`\n\texpected := fmt.Sprintf(tmplString,\n\t\tir.name,\n\t\tir.pointsPerSecond,\n\t\tir.avgResponseTime,\n\t\tir.stdDevResponseTime,\n\t\tir.percentile,\n\t\tir.avgRequestBytes,\n\t\tir.successfulWrites,\n\t\tir.numRetries)\n\tgot := ir.String()\n\tif expected != got {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestInsertReportPoint(t *testing.T) {\n\tir := newTestInsertReport()\n\texpected := \"testDefault\"\n\tgot := strings.Split(ir.Point().String(), \" \")[0]\n\tif expected != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n}\n\nfunc TestQueryReportString(t *testing.T) {\n\tqr := newTestQueryReport()\n\ttmplString := `Query Statement:                    %v\n  Resp Time Average:                   %v\n  Resp Time Standard Deviation:        %v\n  95th Percentile Read Response:       %v\n  Query Resp Bytes Average:            %v bytes\n  Successful Queries:                  %v`\n\texpected := fmt.Sprintf(tmplString,\n\t\tqr.name,\n\t\tqr.avgResponseTime,\n\t\tqr.stdDevResponseTime,\n\t\tqr.percentile,\n\t\tqr.responseBytes,\n\t\tqr.successfulReads)\n\tgot := qr.String()\n\tif expected != got {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestQueryReportPoint(t *testing.T) {\n\tqr := newTestQueryReport()\n\texpected := \"testDefault\"\n\tgot := strings.Split(qr.Point().String(), \" \")[0]\n\tif expected != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n}\n\nfunc TestInfluxQLReportString(t *testing.T) {\n\tiqlr := newTestInfluxQLReport()\n\texpected := fmt.Sprintf(\"[X] '%v' -> %v\", iqlr.statement, iqlr.responseTime)\n\tgot := iqlr.String()\n\tif expected != got {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestInfluxQLReportPoint(t *testing.T) {\n\tiqlr := newTestInfluxQLReport()\n\texpected := \"testDefault\"\n\tgot := strings.Split(iqlr.Point().String(), \" \")[0]\n\tif expected != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n}\n\nfunc newTestInsertReport() *insertReport {\n\treturn &insertReport{\n\t\tname:               \"foo_name\",\n\t\tnumRetries:         0,\n\t\tpointsPerSecond:    500000,\n\t\tsuccessfulWrites:   20000,\n\t\tavgRequestBytes:    18932,\n\t\tavgResponseTime:    time.Duration(int64(20000)),\n\t\tstdDevResponseTime: time.Duration(int64(20000)),\n\t\tpercentile:         time.Duration(int64(20000)),\n\t}\n}\n\nfunc newTestQueryReport() *queryReport {\n\treturn &queryReport{\n\t\tname:                \"foo_name\",\n\t\tsuccessfulReads:     2000,\n\t\tresponseBytes:       39049,\n\t\tstddevResponseBytes: 9091284,\n\t\tavgResponseTime:     139082,\n\t\tstdDevResponseTime:  29487,\n\t\tpercentile:          8273491,\n\t}\n}\n\nfunc newTestInfluxQLReport() *influxQlReport {\n\treturn &influxQlReport{\n\t\tstatement:    \"foo_name\",\n\t\tresponseTime: time.Duration(int64(20000)),\n\t\tsuccess:      false,\n\t}\n}\n\nfunc TestGetColumnIndex(t *testing.T) {\n\tcol := \"thing\"\n\tcolumns := []string{\"thing\"}\n\texpected := 0\n\tgot := getColumnIndex(col, columns)\n\tif expected != got {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestNumberBytes(t *testing.T) {\n\tcolumns := []string{\"num_bytes\"}\n\tvalues := [][]interface{}{[]interface{}{json.Number(\"1\")}}\n\texpected := 1\n\tgot := numberBytes(columns, values)\n\tif expected != got {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestCountSuccesses(t *testing.T) {\n\tcolumns := []string{\"status_code\"}\n\tvalues := [][]interface{}{[]interface{}{json.Number(\"200\")}}\n\texpected := 1\n\tgot := countSuccesses(columns, values)\n\tif expected != got {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestCountRetries(t *testing.T) {\n\tcolumns := []string{\"status_code\"}\n\tvalues := [][]interface{}{[]interface{}{json.Number(\"500\")}}\n\texpected := 1\n\tgot := countRetries(columns, values)\n\tif expected != got {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestResponseTimes(t *testing.T) {\n\tcolumns := []string{\"response_time_ns\"}\n\tvalues := [][]interface{}{[]interface{}{json.Number(\"380\")}}\n\texpected := ResponseTimes([]ResponseTime{NewResponseTime(380)})\n\tgot := responseTimes(columns, values)\n\tif expected[0].Value != got[0].Value {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestPercentile(t *testing.T) {\n\trs := createTestResponseTimes()\n\texpected := time.Duration(21)\n\tgot := percentile(rs)\n\tif expected != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n}\n\nfunc TestAvgDuration(t *testing.T) {\n\trs := createTestResponseTimes()\n\texpected := time.Duration(11)\n\tgot := avgDuration(rs)\n\tif expected != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n}\n\nfunc TestStddevDuration(t *testing.T) {\n\trs := createTestResponseTimes()\n\texpected := time.Duration(6)\n\tgot := stddevDuration(rs)\n\tif expected != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n}\n\nfunc createTestResponseTimes() ResponseTimes {\n\trstms := []int{1, 2, 3, 4, 5, 6, 7, 13, 14, 15, 16, 17, 18, 19, 8, 9, 10, 11, 12, 20, 21, 22}\n\trs := []ResponseTime{}\n\tfor _, rst := range rstms {\n\t\trs = append(rs, NewResponseTime(rst))\n\t}\n\treturn rs\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/response_time.go",
    "content": "package statement\n\nimport (\n\t\"time\"\n)\n\n// ResponseTime is a struct that contains `Value`\n// `Time` pairing.\ntype ResponseTime struct {\n\tValue int\n\tTime  time.Time\n}\n\n// NewResponseTime returns a new response time\n// with value `v` and time `time.Now()`.\nfunc NewResponseTime(v int) ResponseTime {\n\tr := ResponseTime{Value: v, Time: time.Now()}\n\treturn r\n}\n\n// ResponseTimes is a slice of response times\ntype ResponseTimes []ResponseTime\n\n// Implements the `Len` method for the\n// sort.Interface type\nfunc (rs ResponseTimes) Len() int {\n\treturn len(rs)\n}\n\n// Implements the `Less` method for the\n// sort.Interface type\nfunc (rs ResponseTimes) Less(i, j int) bool {\n\treturn rs[i].Value < rs[j].Value\n}\n\n// Implements the `Swap` method for the\n// sort.Interface type\nfunc (rs ResponseTimes) Swap(i, j int) {\n\trs[i], rs[j] = rs[j], rs[i]\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/response_time_test.go",
    "content": "package statement\n\nimport (\n\t\"testing\"\n)\n\nfunc TestNewResponseTime(t *testing.T) {\n\tvalue := 100000\n\trs := NewResponseTime(value)\n\tif rs.Value != value {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", value, rs.Value)\n\t}\n}\n\nfunc newResponseTimes() ResponseTimes {\n\treturn []ResponseTime{\n\t\tNewResponseTime(100),\n\t\tNewResponseTime(10),\n\t}\n}\n\nfunc TestResponseTimeLen(t *testing.T) {\n\trs := newResponseTimes()\n\tif rs.Len() != 2 {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestResponseTimeLess(t *testing.T) {\n\trs := newResponseTimes()\n\tless := rs.Less(1, 0)\n\tif !less {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestResponseTimeSwap(t *testing.T) {\n\trs := newResponseTimes()\n\trs0 := rs[0]\n\trs1 := rs[1]\n\trs.Swap(0, 1)\n\tif rs0 != rs[1] || rs1 != rs[0] {\n\t\tt.Fail()\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/set.go",
    "content": "package statement\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/influxdata/influxdb/stress/v2/stress_client\"\n)\n\n// SetStatement set state variables for the test\ntype SetStatement struct {\n\tVar   string\n\tValue string\n\n\tStatementID string\n\n\tTracer *stressClient.Tracer\n}\n\n// SetID statisfies the Statement Interface\nfunc (i *SetStatement) SetID(s string) {\n\ti.StatementID = s\n}\n\n// Run statisfies the Statement Interface\nfunc (i *SetStatement) Run(s *stressClient.StressTest) {\n\ti.Tracer = stressClient.NewTracer(make(map[string]string))\n\td := stressClient.NewDirective(strings.ToLower(i.Var), strings.ToLower(i.Value), i.Tracer)\n\tswitch d.Property {\n\t// Needs to be set on both StressTest and stressClient\n\t// Set the write percison for points generated\n\tcase \"precision\":\n\t\ts.Precision = d.Value\n\t\ti.Tracer.Add(1)\n\t\ts.SendDirective(d)\n\t// Lives on StressTest\n\t// Set the date for the first point entered into the database\n\tcase \"startdate\":\n\t\ts.Lock()\n\t\ts.StartDate = d.Value\n\t\ts.Unlock()\n\t// Lives on StressTest\n\t// Set the BatchSize for writes\n\tcase \"batchsize\":\n\t\ts.Lock()\n\t\ts.BatchSize = parseInt(d.Value)\n\t\ts.Unlock()\n\t// All other variables live on stressClient\n\tdefault:\n\t\ti.Tracer.Add(1)\n\t\ts.SendDirective(d)\n\t}\n\ti.Tracer.Wait()\n}\n\n// Report statisfies the Statement Interface\nfunc (i *SetStatement) Report(s *stressClient.StressTest) string {\n\treturn fmt.Sprintf(\"SET %v = '%v'\", i.Var, i.Value)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/set_test.go",
    "content": "package statement\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/stress/v2/stress_client\"\n)\n\nfunc TestSetSetID(t *testing.T) {\n\te := newTestSet(\"database\", \"foo\")\n\tnewID := \"oaijnifo\"\n\te.SetID(newID)\n\tif e.StatementID != newID {\n\t\tt.Errorf(\"Expected: %v\\nGot: %v\\n\", newID, e.StatementID)\n\t}\n}\n\nfunc TestSetRun(t *testing.T) {\n\tproperties := []string{\n\t\t\"precision\",\n\t\t\"startdate\",\n\t\t\"batchsize\",\n\t\t\"resultsaddress\",\n\t\t\"testname\",\n\t\t\"addresses\",\n\t\t\"writeinterval\",\n\t\t\"queryinterval\",\n\t\t\"database\",\n\t\t\"writeconcurrency\",\n\t\t\"queryconcurrency\",\n\t}\n\tfor _, prop := range properties {\n\t\ttestSetRunUtl(t, prop, \"1\")\n\t}\n}\n\nfunc testSetRunUtl(t *testing.T, property string, value string) {\n\ti := newTestSet(property, value)\n\ts, _, directiveCh := stressClient.NewTestStressTest()\n\t// Listen to the other side of the directiveCh\n\tgo func() {\n\t\tfor d := range directiveCh {\n\t\t\tif i.Var != d.Property {\n\t\t\t\tt.Errorf(\"wrong property sent to stressClient\\n  expected: %v\\n got: %v\\n\", i.Var, d.Property)\n\t\t\t}\n\t\t\tif i.Value != d.Value {\n\t\t\t\tt.Errorf(\"wrong value sent to stressClient\\n  expected: %v\\n  got: %v\\n\", i.Value, d.Value)\n\t\t\t}\n\t\t\td.Tracer.Done()\n\t\t}\n\t}()\n\t// Run the statement\n\ti.Run(s)\n\t// Check the result\n\tswitch i.Var {\n\tcase \"precision\":\n\t\tif i.Value != s.Precision {\n\t\t\tt.Errorf(\"Failed to set %v\\n\", i.Var)\n\t\t}\n\tcase \"startdate\":\n\t\tif i.Value != s.StartDate {\n\t\t\tt.Errorf(\"Failed to set %v\\n\", i.Var)\n\t\t}\n\tcase \"batchsize\":\n\t\tif parseInt(i.Value) != s.BatchSize {\n\t\t\tt.Errorf(\"Failed to set %v\\n\", i.Var)\n\t\t}\n\t// TODO: Actually test this\n\tcase \"resultsaddress\":\n\tdefault:\n\t}\n}\n\nfunc TestSetReport(t *testing.T) {\n\tset := newTestSet(\"this\", \"that\")\n\ts, _, _ := stressClient.NewTestStressTest()\n\trpt := set.Report(s)\n\texpected := fmt.Sprintf(\"SET %v = '%v'\", set.Var, set.Value)\n\tif rpt != expected {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, rpt)\n\t}\n}\n\nfunc newTestSet(toSet, value string) *SetStatement {\n\treturn &SetStatement{\n\t\tVar:         toSet,\n\t\tValue:       value,\n\t\tTracer:      stressClient.NewTracer(make(map[string]string)),\n\t\tStatementID: \"fooID\",\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/statement.go",
    "content": "package statement\n\nimport (\n\t\"log\"\n\t\"strconv\"\n\n\t\"github.com/influxdata/influxdb/stress/v2/stress_client\"\n)\n\n// Statement is the common interface to shape the testing environment and prepare database requests\n// The parser turns the 'statements' in the config file into Statements\ntype Statement interface {\n\tRun(s *stressClient.StressTest)\n\tReport(s *stressClient.StressTest) string\n\tSetID(s string)\n}\n\nfunc parseInt(s string) int {\n\ti, err := strconv.ParseInt(s, 10, 64)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error parsing integer:\\n  String: %v\\n  Error: %v\\n\", s, err)\n\t}\n\treturn int(i)\n}\n\nfunc parseFloat(s string) int {\n\ti, err := strconv.ParseFloat(s, 64)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error parsing integer:\\n  String: %v\\n  Error: %v\\n\", s, err)\n\t}\n\treturn int(i)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/template.go",
    "content": "package statement\n\n// A Template contains all information to fill in templated variables in inset and query statements\ntype Template struct {\n\tTags     []string\n\tFunction *Function\n}\n\n// Templates are a collection of Template\ntype Templates []*Template\n\n// Init makes Stringers out of the Templates for quick point creation\nfunc (t Templates) Init(seriesCount int) Stringers {\n\tarr := make([]Stringer, len(t))\n\tfor i, tmp := range t {\n\t\tif len(tmp.Tags) == 0 {\n\t\t\tarr[i] = tmp.Function.NewStringer(seriesCount)\n\t\t\tcontinue\n\t\t}\n\t\tarr[i] = tmp.NewTagFunc()\n\t}\n\treturn arr\n}\n\n// Calculates the number of series implied by a template\nfunc (t *Template) numSeries() int {\n\t// If !t.Tags then tag cardinality is t.Function.Count\n\tif len(t.Tags) == 0 {\n\t\treturn t.Function.Count\n\t}\n\t// Else tag cardinality is len(t.Tags)\n\treturn len(t.Tags)\n}\n\n// NewTagFunc returns a Stringer that loops through the given tags\nfunc (t *Template) NewTagFunc() Stringer {\n\tif len(t.Tags) == 0 {\n\t\treturn func() string { return \"EMPTY TAGS\" }\n\t}\n\n\ti := 0\n\treturn func() string {\n\t\ts := t.Tags[i]\n\t\ti = (i + 1) % len(t.Tags)\n\t\treturn s\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/template_test.go",
    "content": "package statement\n\nimport (\n\t\"testing\"\n)\n\nfunc TestNewTagFunc(t *testing.T) {\n\twtags := newTestTagsTemplate()\n\twfunc := newTestFunctionTemplate()\n\n\texpected := wtags.Tags[0]\n\tgot := wtags.NewTagFunc()()\n\tif got != expected {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n\texpected = \"EMPTY TAGS\"\n\tgot = wfunc.NewTagFunc()()\n\tif got != expected {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n}\n\nfunc TestNumSeries(t *testing.T) {\n\twtags := newTestTagsTemplate()\n\twfunc := newTestFunctionTemplate()\n\n\texpected := len(wtags.Tags)\n\tgot := wtags.numSeries()\n\tif got != expected {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n\texpected = wfunc.Function.Count\n\tgot = wfunc.numSeries()\n\tif got != expected {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n}\n\nfunc TestTemplatesInit(t *testing.T) {\n\ttmpls := newTestTemplates()\n\ts := tmpls.Init(5)\n\tvals := s.Eval(spoofTime)\n\texpected := tmpls[0].Tags[0]\n\tgot := vals[0]\n\tif got != expected {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n\texpected = \"0i\"\n\tgot = vals[1]\n\tif got != expected {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n}\n\nfunc newTestTemplates() Templates {\n\treturn []*Template{\n\t\tnewTestTagsTemplate(),\n\t\tnewTestFunctionTemplate(),\n\t}\n}\n\nfunc newTestTagsTemplate() *Template {\n\treturn &Template{\n\t\tTags: []string{\"thing\", \"other_thing\"},\n\t}\n}\n\nfunc newTestFunctionTemplate() *Template {\n\treturn &Template{\n\t\tFunction: newIntIncFunction(),\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/timestamp.go",
    "content": "package statement\n\nimport (\n\t\"log\"\n\t\"time\"\n)\n\n// A Timestamp contains all informaiton needed to generate timestamps for points created by InsertStatements\ntype Timestamp struct {\n\tCount    int\n\tDuration time.Duration\n\tJitter   bool\n}\n\n// Time returns the next timestamp needed by the InsertStatement\nfunc (t *Timestamp) Time(startDate string, series int, precision string) func() int64 {\n\tvar start time.Time\n\tvar err error\n\n\tif startDate == \"now\" {\n\t\tstart = time.Now()\n\t} else {\n\t\tstart, err = time.Parse(\"2006-01-02\", startDate)\n\t}\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error parsing start time from StartDate\\n  string: %v\\n  error: %v\\n\", startDate, err)\n\t}\n\n\treturn nextTime(start, t.Duration, series, precision)\n}\n\nfunc nextTime(ti time.Time, step time.Duration, series int, precision string) func() int64 {\n\tt := ti\n\tcount := 0\n\treturn func() int64 {\n\t\tcount++\n\t\tif count > series {\n\t\t\tt = t.Add(step)\n\t\t\tcount = 1\n\t\t}\n\n\t\tvar timestamp int64\n\t\tif precision == \"s\" {\n\t\t\ttimestamp = t.Unix()\n\t\t} else {\n\t\t\ttimestamp = t.UnixNano()\n\t\t}\n\t\treturn timestamp\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/timestamp_test.go",
    "content": "package statement\n\nimport (\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestTimestampTime(t *testing.T) {\n\ttstp := newTestTimestamp()\n\tfunction := tstp.Time(\"2016-01-01\", 100, \"s\")\n\texpected := int64(1451606400)\n\tgot := function()\n\tif expected != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n\tfunction = tstp.Time(\"now\", 100, \"ns\")\n\texpected = time.Now().UnixNano()\n\tgot = function()\n\tif expected < got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n}\n\nfunc newTestTimestamp() *Timestamp {\n\tduration, _ := time.ParseDuration(\"10s\")\n\treturn &Timestamp{\n\t\tCount:    5001,\n\t\tDuration: duration,\n\t\tJitter:   false,\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/wait.go",
    "content": "package statement\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/stress/v2/stress_client\"\n)\n\n// WaitStatement is a Statement Implementation to prevent the test from returning to early when running GoStatements\ntype WaitStatement struct {\n\tStatementID string\n\n\truntime time.Duration\n}\n\n// SetID statisfies the Statement Interface\nfunc (w *WaitStatement) SetID(s string) {\n\tw.StatementID = s\n}\n\n// Run statisfies the Statement Interface\nfunc (w *WaitStatement) Run(s *stressClient.StressTest) {\n\truntime := time.Now()\n\ts.Wait()\n\tw.runtime = time.Since(runtime)\n}\n\n// Report statisfies the Statement Interface\nfunc (w *WaitStatement) Report(s *stressClient.StressTest) string {\n\treturn fmt.Sprintf(\"WAIT -> %v\", w.runtime)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/statement/wait_test.go",
    "content": "package statement\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/stress/v2/stress_client\"\n)\n\nfunc TestWaitSetID(t *testing.T) {\n\te := newTestWait()\n\tnewID := \"oaijnifo\"\n\te.SetID(newID)\n\tif e.StatementID != newID {\n\t\tt.Errorf(\"Expected: %v\\ngott: %v\\n\", newID, e.StatementID)\n\t}\n}\n\nfunc TestWaitRun(t *testing.T) {\n\te := newTestWait()\n\ts, _, _ := stressClient.NewTestStressTest()\n\te.Run(s)\n\tif e == nil {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestWaitReport(t *testing.T) {\n\te := newTestWait()\n\ts, _, _ := stressClient.NewTestStressTest()\n\trpt := e.Report(s)\n\tif !strings.Contains(rpt, \"WAIT\") {\n\t\tt.Fail()\n\t}\n}\n\nfunc newTestWait() *WaitStatement {\n\treturn &WaitStatement{\n\t\tStatementID: \"fooID\",\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stress_client/commune.go",
    "content": "package stressClient\n\nimport (\n\t\"log\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n)\n\n// Communes are a method for passing points between InsertStatements and QueryStatements.\n\ntype commune struct {\n\tch          chan string\n\tstoredPoint models.Point\n}\n\n// NewCommune creates a new commune with a buffered chan of length n\nfunc newCommune(n int) *commune {\n\treturn &commune{ch: make(chan string, n)}\n}\n\nfunc (c *commune) point(precision string) models.Point {\n\n\tpt := []byte(<-c.ch)\n\n\tp, err := models.ParsePointsWithPrecision(pt, time.Now().UTC(), precision)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error parsing point for commune\\n  point: %v\\n  error: %v\\n\", pt, err)\n\t}\n\n\tif len(p) == 0 {\n\t\treturn c.storedPoint\n\t}\n\n\tc.storedPoint = p[0]\n\treturn p[0]\n}\n\n// SetCommune creates a new commune on the StressTest\nfunc (st *StressTest) SetCommune(name string) chan<- string {\n\tcom := newCommune(10)\n\tst.communes[name] = com\n\n\treturn com.ch\n}\n\n// GetPoint is called by a QueryStatement and retrieves a point sent by the associated InsertStatement\nfunc (st *StressTest) GetPoint(name, precision string) models.Point {\n\tp := st.communes[name].point(precision)\n\n\t// Function needs to return a point. Panic if it doesn't\n\tif p == nil {\n\t\tlog.Fatal(\"Commune not returning point\")\n\t}\n\n\treturn p\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stress_client/commune_test.go",
    "content": "package stressClient\n\nimport (\n\t\"testing\"\n)\n\nfunc TestCommunePoint(t *testing.T) {\n\tcomm := newCommune(5)\n\tpt := \"write,tag=tagVal fooField=5 1460912595\"\n\tcomm.ch <- pt\n\tpoint := comm.point(\"s\")\n\tif string(point.Name()) != \"write\" {\n\t\tt.Errorf(\"expected: write\\ngot: %v\", string(point.Name()))\n\t}\n\tif point.Tags().GetString(\"tag\") != \"tagVal\" {\n\t\tt.Errorf(\"expected: tagVal\\ngot: %v\", point.Tags().GetString(\"tag\"))\n\t}\n\tfields, err := point.Fields()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif int(fields[\"fooField\"].(float64)) != 5 {\n\t\tt.Errorf(\"expected: 5\\ngot: %v\\n\", fields[\"fooField\"])\n\t}\n\t// Make sure commune returns the prev point\n\tcomm.ch <- \"\"\n\tpoint = comm.point(\"s\")\n\tif string(point.Name()) != \"write\" {\n\t\tt.Errorf(\"expected: write\\ngot: %v\", string(point.Name()))\n\t}\n\tif point.Tags().GetString(\"tag\") != \"tagVal\" {\n\t\tt.Errorf(\"expected: tagVal\\ngot: %v\", point.Tags().GetString(\"tag\"))\n\t}\n\tif int(fields[\"fooField\"].(float64)) != 5 {\n\t\tt.Errorf(\"expected: 5\\ngot: %v\\n\", fields[\"fooField\"])\n\t}\n}\n\nfunc TestSetCommune(t *testing.T) {\n\tsf, _, _ := NewTestStressTest()\n\tch := sf.SetCommune(\"foo_name\")\n\tch <- \"write,tag=tagVal fooField=5 1460912595\"\n\tpt := sf.GetPoint(\"foo_name\", \"s\")\n\tif string(pt.Name()) != \"write\" {\n\t\tt.Errorf(\"expected: write\\ngot: %v\", string(pt.Name()))\n\t}\n\tif pt.Tags().GetString(\"tag\") != \"tagVal\" {\n\t\tt.Errorf(\"expected: tagVal\\ngot: %v\", pt.Tags().GetString(\"tag\"))\n\t}\n\tfields, err := pt.Fields()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif int(fields[\"fooField\"].(float64)) != 5 {\n\t\tt.Errorf(\"expected: 5\\ngot: %v\\n\", fields[\"fooField\"])\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stress_client/directive.go",
    "content": "package stressClient\n\n// Directive is a struct to enable communication between SetStatements and the stressClient backend\n// Directives change state for the stress test\ntype Directive struct {\n\tProperty string\n\tValue    string\n\tTracer   *Tracer\n}\n\n// NewDirective creates a new instance of a Directive with the appropriate state variable to change\nfunc NewDirective(property string, value string, tracer *Tracer) Directive {\n\td := Directive{\n\t\tProperty: property,\n\t\tValue:    value,\n\t\tTracer:   tracer,\n\t}\n\treturn d\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stress_client/directive_test.go",
    "content": "package stressClient\n\nimport (\n\t\"testing\"\n)\n\nfunc TestNewDirective(t *testing.T) {\n\ttr := NewTracer(map[string]string{})\n\tprop := \"foo_prop\"\n\tval := \"foo_value\"\n\tdir := NewDirective(prop, val, tr)\n\tgot := dir.Property\n\tif prop != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", prop, got)\n\t}\n\tgot = dir.Value\n\tif val != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", val, got)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stress_client/package.go",
    "content": "package stressClient\n\n// Package is a struct to enable communication between InsertStatements, QueryStatements and InfluxQLStatements and the stressClient backend\n// Packages carry either writes or queries in the []byte that makes up the Body\ntype Package struct {\n\tT           Type\n\tBody        []byte\n\tStatementID string\n\tTracer      *Tracer\n}\n\n// NewPackage creates a new package with the appropriate payload\nfunc NewPackage(t Type, body []byte, statementID string, tracer *Tracer) Package {\n\tp := Package{\n\t\tT:           t,\n\t\tBody:        body,\n\t\tStatementID: statementID,\n\t\tTracer:      tracer,\n\t}\n\n\treturn p\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stress_client/package_test.go",
    "content": "package stressClient\n\nimport (\n\t\"testing\"\n)\n\nfunc TestNewPackage(t *testing.T) {\n\tqry := []byte(\"SELECT * FROM foo\")\n\tstatementID := \"foo_id\"\n\ttr := NewTracer(map[string]string{})\n\tpkg := NewPackage(Query, qry, statementID, tr)\n\tgot := string(pkg.Body)\n\tif string(qry) != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", qry, got)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stress_client/reporting.go",
    "content": "package stressClient\n\nimport (\n\t\"log\"\n\t\"strconv\"\n\t\"time\"\n\n\tinflux \"github.com/influxdata/influxdb/client/v2\"\n)\n\n// reporting.go contains functions to emit tags and points from various parts of stressClient\n// These points are then written to the (\"_%v\", sf.TestName) database\n\n// These are the tags that stressClient adds to any response points\nfunc (sc *stressClient) tags(statementID string) map[string]string {\n\ttags := map[string]string{\n\t\t\"number_targets\": fmtInt(len(sc.addresses)),\n\t\t\"precision\":      sc.precision,\n\t\t\"writers\":        fmtInt(sc.wconc),\n\t\t\"readers\":        fmtInt(sc.qconc),\n\t\t\"test_id\":        sc.testID,\n\t\t\"statement_id\":   statementID,\n\t\t\"write_interval\": sc.wdelay,\n\t\t\"query_interval\": sc.qdelay,\n\t}\n\treturn tags\n}\n\n// These are the tags that the StressTest adds to any response points\nfunc (st *StressTest) tags() map[string]string {\n\ttags := map[string]string{\n\t\t\"precision\":  st.Precision,\n\t\t\"batch_size\": fmtInt(st.BatchSize),\n\t}\n\treturn tags\n}\n\n// This function makes a *client.Point for reporting on writes\nfunc (sc *stressClient) writePoint(retries int, statementID string, statusCode int, responseTime time.Duration, addedTags map[string]string, writeBytes int) *influx.Point {\n\n\ttags := sumTags(sc.tags(statementID), addedTags)\n\n\tfields := map[string]interface{}{\n\t\t\"status_code\":      statusCode,\n\t\t\"response_time_ns\": responseTime.Nanoseconds(),\n\t\t\"num_bytes\":        writeBytes,\n\t}\n\n\tpoint, err := influx.NewPoint(\"write\", tags, fields, time.Now())\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating write results point\\n  error: %v\\n\", err)\n\t}\n\n\treturn point\n}\n\n// This function makes a *client.Point for reporting on queries\nfunc (sc *stressClient) queryPoint(statementID string, body []byte, statusCode int, responseTime time.Duration, addedTags map[string]string) *influx.Point {\n\n\ttags := sumTags(sc.tags(statementID), addedTags)\n\n\tfields := map[string]interface{}{\n\t\t\"status_code\":      statusCode,\n\t\t\"num_bytes\":        len(body),\n\t\t\"response_time_ns\": responseTime.Nanoseconds(),\n\t}\n\n\tpoint, err := influx.NewPoint(\"query\", tags, fields, time.Now())\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error creating query results point\\n  error: %v\\n\", err)\n\t}\n\n\treturn point\n}\n\n// Adds two map[string]string together\nfunc sumTags(tags1, tags2 map[string]string) map[string]string {\n\ttags := make(map[string]string)\n\t// Add all tags from first map to return map\n\tfor k, v := range tags1 {\n\t\ttags[k] = v\n\t}\n\t// Add all tags from second map to return map\n\tfor k, v := range tags2 {\n\t\ttags[k] = v\n\t}\n\treturn tags\n}\n\n// Turns an int into a string\nfunc fmtInt(i int) string {\n\treturn strconv.FormatInt(int64(i), 10)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stress_client/reporting_test.go",
    "content": "package stressClient\n\nimport (\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestNewStressClientTags(t *testing.T) {\n\tpe, _, _ := newTestStressClient(\"localhost:8086\")\n\ttags := pe.tags(\"foo_id\")\n\texpected := fmtInt(len(pe.addresses))\n\tgot := tags[\"number_targets\"]\n\tif expected != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n\texpected = pe.precision\n\tgot = tags[\"precision\"]\n\tif expected != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n\texpected = pe.wdelay\n\tgot = tags[\"write_interval\"]\n\tif expected != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n\texpected = \"foo_id\"\n\tgot = tags[\"statement_id\"]\n\tif expected != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n}\n\nfunc TestNewStressTestTags(t *testing.T) {\n\tsf, _, _ := NewTestStressTest()\n\ttags := sf.tags()\n\texpected := sf.Precision\n\tgot := tags[\"precision\"]\n\tif expected != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n\texpected = fmtInt(sf.BatchSize)\n\tgot = tags[\"batch_size\"]\n\tif expected != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n}\n\nfunc TestWritePoint(t *testing.T) {\n\tpe, _, _ := newTestStressClient(\"localhost:8086\")\n\tstatementID := \"foo_id\"\n\tresponseCode := 200\n\tresponseTime := time.Duration(10 * time.Millisecond)\n\taddedTags := map[string]string{\"foo_tag\": \"foo_tag_value\"}\n\twriteBytes := 28051\n\tpt := pe.writePoint(1, statementID, responseCode, responseTime, addedTags, writeBytes)\n\tgot := pt.Tags()[\"statement_id\"]\n\tif statementID != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", statementID, got)\n\t}\n\tfields, err := pt.Fields()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgot2 := int(fields[\"status_code\"].(int64))\n\tif responseCode != got2 {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", responseCode, got2)\n\t}\n\texpected := \"write\"\n\tgot = pt.Name()\n\tif expected != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n}\n\nfunc TestQueryPoint(t *testing.T) {\n\tpe, _, _ := newTestStressClient(\"localhost:8086\")\n\tstatementID := \"foo_id\"\n\tresponseCode := 200\n\tbody := []byte{12}\n\tresponseTime := time.Duration(10 * time.Millisecond)\n\taddedTags := map[string]string{\"foo_tag\": \"foo_tag_value\"}\n\tpt := pe.queryPoint(statementID, body, responseCode, responseTime, addedTags)\n\tgot := pt.Tags()[\"statement_id\"]\n\tif statementID != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", statementID, got)\n\t}\n\tfields, err := pt.Fields()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgot2 := int(fields[\"status_code\"].(int64))\n\tif responseCode != got2 {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", responseCode, got2)\n\t}\n\texpected := \"query\"\n\tgot = pt.Name()\n\tif expected != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stress_client/response.go",
    "content": "package stressClient\n\nimport (\n\t\"log\"\n\n\tinflux \"github.com/influxdata/influxdb/client/v2\"\n)\n\n// Response holds data scraped from InfluxDB HTTP responses turned into a *influx.Point for reporting\n// See reporting.go for more information\n// The Tracer contains a wait group sent from the statement. It needs to be decremented when the Response is consumed\ntype Response struct {\n\tPoint  *influx.Point\n\tTracer *Tracer\n}\n\n// NewResponse creates a new instance of Response\nfunc NewResponse(pt *influx.Point, tr *Tracer) Response {\n\treturn Response{\n\t\tPoint:  pt,\n\t\tTracer: tr,\n\t}\n}\n\n// AddTags adds additional tags to the point held in Response and returns the point\nfunc (resp Response) AddTags(newTags map[string]string) (*influx.Point, error) {\n\n\t// Pull off the current tags\n\ttags := resp.Point.Tags()\n\n\t// Add the new tags to the current tags\n\tfor tag, tagValue := range newTags {\n\t\ttags[tag] = tagValue\n\t}\n\n\t// Make a new point\n\tfields, err := resp.Point.Fields()\n\tif err != nil {\n\t\treturn nil, err\n\n\t}\n\tpt, err := influx.NewPoint(resp.Point.Name(), tags, fields, resp.Point.Time())\n\n\t// panic on error\n\tif err != nil {\n\t\tlog.Fatalf(\"Error adding tags to response point\\n  point: %v\\n  tags:%v\\n  error: %v\\n\", resp.Point, newTags, err)\n\t}\n\n\treturn pt, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stress_client/response_test.go",
    "content": "package stressClient\n\nimport (\n\t\"testing\"\n)\n\nfunc TestNewResponse(t *testing.T) {\n\tpt := NewBlankTestPoint()\n\ttr := NewTracer(map[string]string{})\n\tr := NewResponse(pt, tr)\n\texpected := \"another_tag_value\"\n\ttest, err := r.AddTags(map[string]string{\"another_tag\": \"another_tag_value\"})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgot := test.Tags()[\"another_tag\"]\n\tif expected != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stress_client/stressTest.go",
    "content": "package stressClient\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\n\tinflux \"github.com/influxdata/influxdb/client/v2\"\n)\n\n// NewStressTest creates the backend for the stress test\nfunc NewStressTest() *StressTest {\n\n\tpackageCh := make(chan Package, 0)\n\tdirectiveCh := make(chan Directive, 0)\n\tresponseCh := make(chan Response, 0)\n\n\tclnt, _ := influx.NewHTTPClient(influx.HTTPConfig{\n\t\tAddr: fmt.Sprintf(\"http://%v/\", \"localhost:8086\"),\n\t})\n\n\ts := &StressTest{\n\t\tTestDB:    \"_stressTest\",\n\t\tPrecision: \"s\",\n\t\tStartDate: \"2016-01-02\",\n\t\tBatchSize: 5000,\n\n\t\tpackageChan:   packageCh,\n\t\tdirectiveChan: directiveCh,\n\n\t\tResultsClient: clnt,\n\t\tResultsChan:   responseCh,\n\t\tcommunes:      make(map[string]*commune),\n\t\tTestID:        randStr(10),\n\t}\n\n\t// Start the client service\n\tstartStressClient(packageCh, directiveCh, responseCh, s.TestID)\n\n\t// Listen for Results coming in\n\ts.resultsListen()\n\n\treturn s\n}\n\n// NewTestStressTest returns a StressTest to be used for testing Statements\nfunc NewTestStressTest() (*StressTest, chan Package, chan Directive) {\n\n\tpackageCh := make(chan Package, 0)\n\tdirectiveCh := make(chan Directive, 0)\n\n\ts := &StressTest{\n\t\tTestDB:    \"_stressTest\",\n\t\tPrecision: \"s\",\n\t\tStartDate: \"2016-01-02\",\n\t\tBatchSize: 5000,\n\n\t\tdirectiveChan: directiveCh,\n\t\tpackageChan:   packageCh,\n\n\t\tcommunes: make(map[string]*commune),\n\t\tTestID:   randStr(10),\n\t}\n\n\treturn s, packageCh, directiveCh\n}\n\n// The StressTest is the Statement facing API that consumes Statement output and coordinates the test results\ntype StressTest struct {\n\tTestID string\n\tTestDB string\n\n\tPrecision string\n\tStartDate string\n\tBatchSize int\n\n\tsync.WaitGroup\n\tsync.Mutex\n\n\tpackageChan   chan<- Package\n\tdirectiveChan chan<- Directive\n\n\tResultsChan   chan Response\n\tcommunes      map[string]*commune\n\tResultsClient influx.Client\n}\n\n// SendPackage is the public facing API for to send Queries and Points\nfunc (st *StressTest) SendPackage(p Package) {\n\tst.packageChan <- p\n}\n\n// SendDirective is the public facing API to set state variables in the test\nfunc (st *StressTest) SendDirective(d Directive) {\n\tst.directiveChan <- d\n}\n\n// Starts a go routine that listens for Results\nfunc (st *StressTest) resultsListen() {\n\tst.createDatabase(st.TestDB)\n\tgo func() {\n\t\tbp := st.NewResultsPointBatch()\n\t\tfor resp := range st.ResultsChan {\n\t\t\tswitch resp.Point.Name() {\n\t\t\tcase \"done\":\n\t\t\t\tst.ResultsClient.Write(bp)\n\t\t\t\tresp.Tracer.Done()\n\t\t\tdefault:\n\t\t\t\t// Add the StressTest tags\n\t\t\t\tpt, err := resp.AddTags(st.tags())\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\t// Add the point to the batch\n\t\t\t\tbp = st.batcher(pt, bp)\n\t\t\t\tresp.Tracer.Done()\n\t\t\t}\n\t\t}\n\t}()\n}\n\n// NewResultsPointBatch creates a new batch of points for the results\nfunc (st *StressTest) NewResultsPointBatch() influx.BatchPoints {\n\tbp, _ := influx.NewBatchPoints(influx.BatchPointsConfig{\n\t\tDatabase:  st.TestDB,\n\t\tPrecision: \"ns\",\n\t})\n\treturn bp\n}\n\n// Batches incoming Result.Point and sends them if the batch reaches 5k in size\nfunc (st *StressTest) batcher(pt *influx.Point, bp influx.BatchPoints) influx.BatchPoints {\n\tif len(bp.Points()) <= 5000 {\n\t\tbp.AddPoint(pt)\n\t} else {\n\t\terr := st.ResultsClient.Write(bp)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error writing performance stats\\n  error: %v\\n\", err)\n\t\t}\n\t\tbp = st.NewResultsPointBatch()\n\t}\n\treturn bp\n}\n\n// Convinence database creation function\nfunc (st *StressTest) createDatabase(db string) {\n\tquery := fmt.Sprintf(\"CREATE DATABASE %v\", db)\n\tres, err := st.ResultsClient.Query(influx.Query{Command: query})\n\tif err != nil {\n\t\tlog.Fatalf(\"error: no running influx server at localhost:8086\")\n\t\tif res.Error() != nil {\n\t\t\tlog.Fatalf(\"error: no running influx server at localhost:8086\")\n\t\t}\n\t}\n}\n\n// GetStatementResults is a convinence function for fetching all results given a StatementID\nfunc (st *StressTest) GetStatementResults(sID, t string) (res []influx.Result) {\n\tqryStr := fmt.Sprintf(`SELECT * FROM \"%v\" WHERE statement_id = '%v'`, t, sID)\n\treturn st.queryTestResults(qryStr)\n}\n\n//  Runs given qry on the test results database and returns the results or nil in case of error\nfunc (st *StressTest) queryTestResults(qry string) (res []influx.Result) {\n\tresponse, err := st.ResultsClient.Query(influx.Query{Command: qry, Database: st.TestDB})\n\tif err == nil {\n\t\tif response.Error() != nil {\n\t\t\tlog.Fatalf(\"Error sending results query\\n  error: %v\\n\", response.Error())\n\t\t}\n\t}\n\tif response.Results[0].Series == nil {\n\t\treturn nil\n\t}\n\treturn response.Results\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stress_client/stressTest_test.go",
    "content": "package stressClient\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\tinflux \"github.com/influxdata/influxdb/client/v2\"\n)\n\nfunc NewBlankTestPoint() *influx.Point {\n\tmeas := \"measurement\"\n\ttags := map[string]string{\"fooTag\": \"fooTagValue\"}\n\tfields := map[string]interface{}{\"value\": 5920}\n\tutc, _ := time.LoadLocation(\"UTC\")\n\ttimestamp := time.Date(2016, time.Month(4), 20, 0, 0, 0, 0, utc)\n\tpt, _ := influx.NewPoint(meas, tags, fields, timestamp)\n\treturn pt\n}\n\nfunc TestStressTestBatcher(t *testing.T) {\n\tsf, _, _ := NewTestStressTest()\n\tbpconf := influx.BatchPointsConfig{\n\t\tDatabase:  sf.TestDB,\n\t\tPrecision: \"ns\",\n\t}\n\tbp, _ := influx.NewBatchPoints(bpconf)\n\tpt := NewBlankTestPoint()\n\tbp = sf.batcher(pt, bp)\n\tif len(bp.Points()) != 1 {\n\t\tt.Fail()\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stress_client/stress_client.go",
    "content": "package stressClient\n\nimport (\n\t\"strings\"\n\t\"sync\"\n)\n\n// Type refers to the different Package types\ntype Type int\n\n// There are two package types, Write and Query\nconst (\n\tWrite Type = iota\n\tQuery\n)\n\nfunc startStressClient(packageCh <-chan Package, directiveCh <-chan Directive, responseCh chan<- Response, testID string) {\n\n\tc := &stressClient{\n\t\ttestID: testID,\n\n\t\taddresses: []string{\"localhost:8086\"},\n\t\tssl:       false,\n\t\tusername:  \"\",\n\t\tpassword:  \"\",\n\t\tprecision: \"ns\",\n\t\tdatabase:  \"stress\",\n\t\tstartDate: \"2016-01-01\",\n\t\tqdelay:    \"0s\",\n\t\twdelay:    \"0s\",\n\n\t\twconc: 10,\n\t\tqconc: 5,\n\n\t\tpackageChan:   packageCh,\n\t\tdirectiveChan: directiveCh,\n\n\t\tresponseChan: responseCh,\n\t}\n\t// start listening for writes and queries\n\tgo c.listen()\n\t// start listening for state changes\n\tgo c.directiveListen()\n}\n\ntype stressClient struct {\n\ttestID string\n\n\t// State for the Stress Test\n\taddresses []string\n\tprecision string\n\tstartDate string\n\tdatabase  string\n\twdelay    string\n\tqdelay    string\n\tusername  string\n\tpassword  string\n\tssl       bool\n\n\t// Channels from statements\n\tpackageChan   <-chan Package\n\tdirectiveChan <-chan Directive\n\n\t// Response channel\n\tresponseChan chan<- Response\n\n\t// Concurrency utilities\n\tsync.WaitGroup\n\tsync.Mutex\n\n\t// Concurrency Limit for Writes and Reads\n\twconc int\n\tqconc int\n\n\t// Manage Read and Write concurrency seperately\n\twc *ConcurrencyLimiter\n\trc *ConcurrencyLimiter\n}\n\n// NewTestStressClient returns a blank stressClient for testing\nfunc newTestStressClient(url string) (*stressClient, chan Directive, chan Package) {\n\tpkgChan := make(chan Package)\n\tdirChan := make(chan Directive)\n\tpe := &stressClient{\n\t\ttestID:        \"foo_id\",\n\t\taddresses:     []string{url},\n\t\tprecision:     \"s\",\n\t\tstartDate:     \"2016-01-01\",\n\t\tdatabase:      \"fooDatabase\",\n\t\twdelay:        \"50ms\",\n\t\tqdelay:        \"50ms\",\n\t\tssl:           false,\n\t\tusername:      \"\",\n\t\tpassword:      \"\",\n\t\twconc:         5,\n\t\tqconc:         5,\n\t\tpackageChan:   pkgChan,\n\t\tdirectiveChan: dirChan,\n\t\twc:            NewConcurrencyLimiter(1),\n\t\trc:            NewConcurrencyLimiter(1),\n\t}\n\treturn pe, dirChan, pkgChan\n}\n\n// stressClient starts listening for Packages on the main channel\nfunc (sc *stressClient) listen() {\n\tdefer sc.Wait()\n\tsc.wc = NewConcurrencyLimiter(sc.wconc)\n\tsc.rc = NewConcurrencyLimiter(sc.qconc)\n\tl := NewConcurrencyLimiter((sc.wconc + sc.qconc) * 2)\n\tcounter := 0\n\tfor p := range sc.packageChan {\n\t\tl.Increment()\n\t\tgo func(p Package) {\n\t\t\tdefer l.Decrement()\n\t\t\tswitch p.T {\n\t\t\tcase Write:\n\t\t\t\tsc.spinOffWritePackage(p, (counter % len(sc.addresses)))\n\t\t\tcase Query:\n\t\t\t\tsc.spinOffQueryPackage(p, (counter % len(sc.addresses)))\n\t\t\t}\n\t\t}(p)\n\t\tcounter++\n\t}\n\n}\n\n// Set handles all SET requests for test state\nfunc (sc *stressClient) directiveListen() {\n\tfor d := range sc.directiveChan {\n\t\tsc.Lock()\n\t\tswitch d.Property {\n\t\t// addresses is a []string of target InfluxDB instance(s) for the test\n\t\t// comes in as a \"|\" seperated array of addresses\n\t\tcase \"addresses\":\n\t\t\taddr := strings.Split(d.Value, \"|\")\n\t\t\tsc.addresses = addr\n\t\t// percison is the write precision for InfluxDB\n\t\tcase \"precision\":\n\t\t\tsc.precision = d.Value\n\t\t// writeinterval is an optional delay between batches\n\t\tcase \"writeinterval\":\n\t\t\tsc.wdelay = d.Value\n\t\t// queryinterval is an optional delay between the batches\n\t\tcase \"queryinterval\":\n\t\t\tsc.qdelay = d.Value\n\t\t// database is the InfluxDB database to target for both writes and queries\n\t\tcase \"database\":\n\t\t\tsc.database = d.Value\n\t\t// username for the target database\n\t\tcase \"username\":\n\t\t\tsc.username = d.Value\n\t\t// username for the target database\n\t\tcase \"password\":\n\t\t\tsc.password = d.Value\n\t\t// use https if sent true\n\t\tcase \"ssl\":\n\t\t\tif d.Value == \"true\" {\n\t\t\t\tsc.ssl = true\n\t\t\t}\n\t\t// concurrency is the number concurrent writers to the database\n\t\tcase \"writeconcurrency\":\n\t\t\tconc := parseInt(d.Value)\n\t\t\tsc.wconc = conc\n\t\t\tsc.wc.NewMax(conc)\n\t\t// concurrentqueries is the number of concurrent queriers database\n\t\tcase \"queryconcurrency\":\n\t\t\tconc := parseInt(d.Value)\n\t\t\tsc.qconc = conc\n\t\t\tsc.rc.NewMax(conc)\n\t\t}\n\t\td.Tracer.Done()\n\t\tsc.Unlock()\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stress_client/stress_client_query.go",
    "content": "package stressClient\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"time\"\n)\n\nfunc (sc *stressClient) spinOffQueryPackage(p Package, serv int) {\n\tsc.Add(1)\n\tsc.rc.Increment()\n\tgo func() {\n\t\t// Send the query\n\t\tsc.prepareQuerySend(p, serv)\n\t\tsc.Done()\n\t\tsc.rc.Decrement()\n\t}()\n}\n\n// Prepares to send the GET request\nfunc (sc *stressClient) prepareQuerySend(p Package, serv int) {\n\n\tvar queryTemplate string\n\tif sc.ssl {\n\t\tqueryTemplate = \"https://%v/query?db=%v&q=%v&u=%v&p=%v\"\n\t} else {\n\t\tqueryTemplate = \"http://%v/query?db=%v&q=%v&u=%v&p=%v\"\n\t}\n\tqueryURL := fmt.Sprintf(queryTemplate, sc.addresses[serv], sc.database, url.QueryEscape(string(p.Body)), sc.username, sc.password)\n\n\t// Send the query\n\tsc.makeGet(queryURL, p.StatementID, p.Tracer)\n\n\t// Query Interval enforcement\n\tqi, _ := time.ParseDuration(sc.qdelay)\n\ttime.Sleep(qi)\n}\n\n// Sends the GET request, reads it, and handles errors\nfunc (sc *stressClient) makeGet(addr, statementID string, tr *Tracer) {\n\n\t// Make GET request\n\tt := time.Now()\n\tresp, err := http.Get(addr)\n\telapsed := time.Since(t)\n\n\tif err != nil {\n\t\tlog.Printf(\"Error making Query HTTP request\\n  error: %v\\n\", err)\n\t}\n\n\tdefer resp.Body.Close()\n\n\t// Read body and return it for Reporting\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error reading Query response body\\n  error: %v\\n\", err)\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\tlog.Printf(\"Query returned non 200 status\\n  status: %v\\n  error: %v\\n\", resp.StatusCode, string(body))\n\t}\n\n\t// Send the response\n\tsc.responseChan <- NewResponse(sc.queryPoint(statementID, body, resp.StatusCode, elapsed, tr.Tags), tr)\n}\n\nfunc success(r *http.Response) bool {\n\t// ADD success for tcp, udp, etc\n\treturn r != nil && (r.StatusCode == 204 || r.StatusCode == 200)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stress_client/stress_client_write.go",
    "content": "package stressClient\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"time\"\n)\n\n// ###############################################\n// A selection of methods to manage the write path\n// ###############################################\n\n// Packages up Package from channel in goroutine\nfunc (sc *stressClient) spinOffWritePackage(p Package, serv int) {\n\tsc.Add(1)\n\tsc.wc.Increment()\n\tgo func() {\n\t\tsc.retry(p, time.Duration(time.Nanosecond), serv)\n\t\tsc.Done()\n\t\tsc.wc.Decrement()\n\t}()\n}\n\n// Implements backoff and retry logic for 500 responses\nfunc (sc *stressClient) retry(p Package, backoff time.Duration, serv int) {\n\n\t// Set Backoff Interval to 500ms\n\tbackoffInterval := time.Duration(500 * time.Millisecond)\n\n\t// Arithmetic backoff for kicks\n\tbo := backoff + backoffInterval\n\n\t// Make the write request\n\tresp, elapsed, err := sc.prepareWrite(p.Body, serv)\n\n\t// Find number of times request has been retried\n\tnumBackoffs := int(bo/backoffInterval) - 1\n\n\t// On 500 responses, resp == nil. This logic keeps program for panicing\n\tvar statusCode int\n\n\tif resp == nil {\n\t\tstatusCode = 500\n\t} else {\n\t\tstatusCode = resp.StatusCode\n\t}\n\n\t// Make a point for reporting\n\tpoint := sc.writePoint(numBackoffs, p.StatementID, statusCode, elapsed, p.Tracer.Tags, len(p.Body))\n\n\t// Send the Response(point, tracer)\n\tsc.responseChan <- NewResponse(point, p.Tracer)\n\n\t// BatchInterval enforcement\n\tbi, _ := time.ParseDuration(sc.wdelay)\n\ttime.Sleep(bi)\n\n\t// Retry if the statusCode was not 204 or the err != nil\n\tif !(statusCode == 204) || err != nil {\n\t\t// Increment the *Tracer waitgroup if we are going to retry the request\n\t\tp.Tracer.Add(1)\n\t\t// Log the error if there is one\n\t\tfmt.Println(err)\n\t\t// Backoff enforcement\n\t\ttime.Sleep(bo)\n\t\tsc.retry(p, bo, serv)\n\t}\n\n}\n\n// Prepares to send the POST request\nfunc (sc *stressClient) prepareWrite(points []byte, serv int) (*http.Response, time.Duration, error) {\n\n\t// Construct address string\n\tvar writeTemplate string\n\tif sc.ssl {\n\t\twriteTemplate = \"https://%v/write?db=%v&precision=%v&u=%v&p=%v\"\n\t} else {\n\t\twriteTemplate = \"http://%v/write?db=%v&precision=%v&u=%v&p=%v\"\n\t}\n\taddress := fmt.Sprintf(writeTemplate, sc.addresses[serv], sc.database, sc.precision, sc.username, sc.password)\n\n\t// Start timer\n\tt := time.Now()\n\tresp, err := makePost(address, bytes.NewBuffer(points))\n\telapsed := time.Since(t)\n\n\treturn resp, elapsed, err\n}\n\n// Send POST request, read it, and handle errors\nfunc makePost(url string, points io.Reader) (*http.Response, error) {\n\n\tresp, err := http.Post(url, \"text/plain\", points)\n\n\tif err != nil {\n\t\treturn resp, fmt.Errorf(\"Error making write POST request\\n  error: %v\\n  url: %v\\n\", err, url)\n\t}\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\n\tif resp.StatusCode != 204 {\n\t\treturn resp, fmt.Errorf(\"Write returned non-204 status code\\n  StatusCode: %v\\n  InfluxDB Error: %v\\n\", resp.StatusCode, string(body))\n\t}\n\n\tresp.Body.Close()\n\n\treturn resp, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stress_client/tracer.go",
    "content": "package stressClient\n\nimport (\n\t\"sync\"\n)\n\n// The Tracer carrys tags and a waitgroup from the statements through the package life cycle\ntype Tracer struct {\n\tTags map[string]string\n\n\tsync.WaitGroup\n}\n\n// NewTracer returns a Tracer with tags attached\nfunc NewTracer(tags map[string]string) *Tracer {\n\treturn &Tracer{\n\t\tTags: tags,\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stress_client/tracer_test.go",
    "content": "package stressClient\n\nimport (\n\t\"testing\"\n)\n\nfunc TestNewTracer(t *testing.T) {\n\ttagValue := \"foo_tag_value\"\n\ttracer := NewTracer(map[string]string{\"foo_tag_key\": tagValue})\n\tgot := tracer.Tags[\"foo_tag_key\"]\n\tif got != tagValue {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\", tagValue, got)\n\t}\n\ttracer.Add(1)\n\ttracer.Done()\n\ttracer.Wait()\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stress_client/util.go",
    "content": "package stressClient\n\nimport (\n\t\"crypto/rand\"\n\t\"fmt\"\n\t\"log\"\n\t\"strconv\"\n\t\"sync\"\n)\n\n// ###########################################\n// ConcurrencyLimiter and associated methods #\n// ###########################################\n\n// ConcurrencyLimiter ensures that no more than a specified\n// max number of goroutines are running.\ntype ConcurrencyLimiter struct {\n\tinc   chan chan struct{}\n\tdec   chan struct{}\n\tmax   int\n\tcount int\n\n\tsync.Mutex\n}\n\n// NewConcurrencyLimiter returns a configured limiter that will\n// ensure that calls to Increment will block if the max is hit.\nfunc NewConcurrencyLimiter(max int) *ConcurrencyLimiter {\n\tc := &ConcurrencyLimiter{\n\t\tinc: make(chan chan struct{}),\n\t\tdec: make(chan struct{}, max),\n\t\tmax: max,\n\t}\n\tgo c.handleLimits()\n\treturn c\n}\n\n// Increment will increase the count of running goroutines by 1.\n// if the number is currently at the max, the call to Increment\n// will block until another goroutine decrements.\nfunc (c *ConcurrencyLimiter) Increment() {\n\tr := make(chan struct{})\n\tc.inc <- r\n\t<-r\n}\n\n// Decrement will reduce the count of running goroutines by 1\nfunc (c *ConcurrencyLimiter) Decrement() {\n\tc.dec <- struct{}{}\n}\n\n// NewMax resets the max of a ConcurrencyLimiter.\nfunc (c *ConcurrencyLimiter) NewMax(i int) {\n\tc.Lock()\n\tdefer c.Unlock()\n\tc.max = i\n}\n\n// handleLimits runs in a goroutine to manage the count of\n// running goroutines.\nfunc (c *ConcurrencyLimiter) handleLimits() {\n\tfor {\n\t\tr := <-c.inc\n\t\tc.Lock()\n\t\tif c.count >= c.max {\n\t\t\t<-c.dec\n\t\t\tc.count--\n\t\t}\n\t\tc.Unlock()\n\t\tc.count++\n\t\tr <- struct{}{}\n\t}\n}\n\n// Utility interger parsing function\nfunc parseInt(s string) int {\n\ti, err := strconv.ParseInt(s, 10, 64)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error parsing integer:\\n  String: %v\\n  Error: %v\\n\", s, err)\n\t}\n\treturn int(i)\n}\n\n// Utility for making random strings of length n\nfunc randStr(n int) string {\n\tb := make([]byte, n/2)\n\t_, _ = rand.Read(b)\n\treturn fmt.Sprintf(\"%x\", b)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stressql/parser.go",
    "content": "package stressql\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/stress/v2/statement\"\n\tstressql \"github.com/influxdata/influxdb/stress/v2/stressql/statement\"\n)\n\n// Token represents a lexical token.\ntype Token int\n\n// These are the lexical tokens used by the file parser\nconst (\n\tILLEGAL Token = iota\n\tEOF\n\tSTATEMENT\n\tBREAK\n)\n\nvar eof = rune(0)\n\nfunc check(e error) {\n\tif e != nil {\n\t\tlog.Fatal(e)\n\t}\n}\n\nfunc isNewline(r rune) bool {\n\treturn r == '\\n'\n}\n\n// Scanner scans the file and tokenizes the raw text\ntype Scanner struct {\n\tr *bufio.Reader\n}\n\n// NewScanner returns a Scanner\nfunc NewScanner(r io.Reader) *Scanner {\n\treturn &Scanner{r: bufio.NewReader(r)}\n}\n\nfunc (s *Scanner) read() rune {\n\tch, _, err := s.r.ReadRune()\n\tif err != nil {\n\t\treturn eof\n\t}\n\treturn ch\n}\n\nfunc (s *Scanner) unread() { _ = s.r.UnreadRune() }\n\nfunc (s *Scanner) peek() rune {\n\tch := s.read()\n\ts.unread()\n\treturn ch\n}\n\n// Scan moves the Scanner forward one character\nfunc (s *Scanner) Scan() (tok Token, lit string) {\n\tch := s.read()\n\n\tif isNewline(ch) {\n\t\ts.unread()\n\t\treturn s.scanNewlines()\n\t} else if ch == eof {\n\t\treturn EOF, \"\"\n\t} else {\n\t\ts.unread()\n\t\treturn s.scanStatements()\n\t}\n\t// golint marks as unreachable code\n\t// return ILLEGAL, string(ch)\n}\n\nfunc (s *Scanner) scanNewlines() (tok Token, lit string) {\n\tvar buf bytes.Buffer\n\tbuf.WriteRune(s.read())\n\n\tfor {\n\t\tif ch := s.read(); ch == eof {\n\t\t\tbreak\n\t\t} else if !isNewline(ch) {\n\t\t\ts.unread()\n\t\t\tbreak\n\t\t} else {\n\t\t\tbuf.WriteRune(ch)\n\t\t}\n\t}\n\n\treturn BREAK, buf.String()\n}\n\nfunc (s *Scanner) scanStatements() (tok Token, lit string) {\n\tvar buf bytes.Buffer\n\tbuf.WriteRune(s.read())\n\n\tfor {\n\t\tif ch := s.read(); ch == eof {\n\t\t\tbreak\n\t\t} else if isNewline(ch) && isNewline(s.peek()) {\n\t\t\ts.unread()\n\t\t\tbreak\n\t\t} else if isNewline(ch) {\n\t\t\ts.unread()\n\t\t\tbuf.WriteRune(ch)\n\t\t} else {\n\t\t\tbuf.WriteRune(ch)\n\t\t}\n\t}\n\n\treturn STATEMENT, buf.String()\n}\n\n// ParseStatements takes a configFile and returns a slice of Statements\nfunc ParseStatements(file string) ([]statement.Statement, error) {\n\tseq := []statement.Statement{}\n\n\tf, err := os.Open(file)\n\tcheck(err)\n\n\ts := NewScanner(f)\n\n\tfor {\n\t\tt, l := s.Scan()\n\n\t\tif t == EOF {\n\t\t\tbreak\n\t\t}\n\t\t_, err := influxql.ParseStatement(l)\n\t\tif err == nil {\n\n\t\t\tseq = append(seq, &statement.InfluxqlStatement{Query: l, StatementID: stressql.RandStr(10)})\n\t\t} else if t == BREAK {\n\t\t\tcontinue\n\t\t} else {\n\t\t\tf := strings.NewReader(l)\n\t\t\tp := stressql.NewParser(f)\n\t\t\ts, err := p.Parse()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tseq = append(seq, s)\n\n\t\t}\n\t}\n\n\tf.Close()\n\n\treturn seq, nil\n\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stressql/parser_test.go",
    "content": "package stressql\n\nimport \"testing\"\n\n// Pulls the default configFile and makes sure it parses\nfunc TestParseStatements(t *testing.T) {\n\tstmts, err := ParseStatements(\"../iql/file.iql\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\texpected := 15\n\tgot := len(stmts)\n\tif expected != got {\n\t\tt.Errorf(\"expected: %v\\ngot: %v\\n\", expected, got)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stressql/statement/parser.go",
    "content": "package statement\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto/rand\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/stress/v2/statement\"\n)\n\n// Token represents a lexical token.\ntype Token int\n\n// The following tokens represent the different values in the AST that make up stressql\nconst (\n\tILLEGAL Token = iota\n\tEOF\n\n\tWS\n\n\tliteralBeg\n\t// IDENT and the following are InfluxQL literal tokens.\n\tIDENT       // main\n\tNUMBER      // 12345.67\n\tDURATIONVAL // 13h\n\tSTRING      // \"abc\"\n\tBADSTRING   // \"abc\n\tTEMPLATEVAR // %f\n\tliteralEnd\n\n\tCOMMA    // ,\n\tLPAREN   // (\n\tRPAREN   // )\n\tLBRACKET // [\n\tRBRACKET // ]\n\tPIPE     // |\n\tPERIOD   // .\n\n\tkeywordBeg\n\tSET\n\tUSE\n\tQUERY\n\tINSERT\n\tGO\n\tDO\n\tWAIT\n\tSTR\n\tINT\n\tFLOAT\n\tEXEC\n\tkeywordEnd\n)\n\nvar eof = rune(1)\n\nfunc isWhitespace(ch rune) bool { return ch == ' ' || ch == '\\t' || ch == '\\n' }\n\nfunc isDigit(r rune) bool {\n\treturn r >= '0' && r <= '9'\n}\n\nfunc isLetter(ch rune) bool {\n\treturn (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || (ch == '@')\n}\n\n// Scanner scans over the file and converts the raw text into tokens\ntype Scanner struct {\n\tr *bufio.Reader\n}\n\n// NewScanner returns a Scanner\nfunc NewScanner(r io.Reader) *Scanner {\n\treturn &Scanner{r: bufio.NewReader(r)}\n}\n\nfunc (s *Scanner) read() rune {\n\tch, _, err := s.r.ReadRune()\n\tif err != nil {\n\t\treturn eof\n\t}\n\treturn ch\n}\n\nfunc (s *Scanner) unread() { _ = s.r.UnreadRune() }\n\n// Scan moves to the next character in the file and returns a tokenized version as well as the literal\nfunc (s *Scanner) Scan() (tok Token, lit string) {\n\tch := s.read()\n\n\tif isWhitespace(ch) {\n\t\ts.unread()\n\t\treturn s.scanWhitespace()\n\t} else if isLetter(ch) {\n\t\ts.unread()\n\t\treturn s.scanIdent()\n\t} else if isDigit(ch) {\n\t\ts.unread()\n\t\treturn s.scanNumber()\n\t}\n\n\tswitch ch {\n\tcase eof:\n\t\treturn EOF, \"\"\n\tcase '\"':\n\t\ts.unread()\n\t\treturn s.scanIdent()\n\tcase '%':\n\t\ts.unread()\n\t\treturn s.scanTemplateVar()\n\tcase ',':\n\t\treturn COMMA, \",\"\n\tcase '.':\n\t\treturn PERIOD, \".\"\n\tcase '(':\n\t\treturn LPAREN, \"(\"\n\tcase ')':\n\t\treturn RPAREN, \")\"\n\tcase '[':\n\t\treturn LBRACKET, \"[\"\n\tcase ']':\n\t\treturn RBRACKET, \"]\"\n\tcase '|':\n\t\treturn PIPE, \"|\"\n\t}\n\n\treturn ILLEGAL, string(ch)\n}\n\nfunc (s *Scanner) scanWhitespace() (tok Token, lit string) {\n\tvar buf bytes.Buffer\n\tbuf.WriteRune(s.read())\n\n\tfor {\n\t\tif ch := s.read(); ch == eof {\n\t\t\tbreak\n\t\t} else if !isWhitespace(ch) {\n\t\t\ts.unread()\n\t\t\tbreak\n\t\t} else {\n\t\t\tbuf.WriteRune(ch)\n\t\t}\n\t}\n\n\treturn WS, buf.String()\n}\n\nfunc (s *Scanner) scanIdent() (tok Token, lit string) {\n\tvar buf bytes.Buffer\n\tbuf.WriteRune(s.read())\n\n\tfor {\n\t\tif ch := s.read(); ch == eof {\n\t\t\tbreak\n\t\t} else if !isLetter(ch) && !isDigit(ch) && ch != '_' && ch != ':' && ch != '=' && ch != '-' {\n\t\t\ts.unread()\n\t\t\tbreak\n\t\t} else {\n\t\t\t_, _ = buf.WriteRune(ch)\n\t\t}\n\t}\n\n\tswitch strings.ToUpper(buf.String()) {\n\tcase \"SET\":\n\t\treturn SET, buf.String()\n\tcase \"USE\":\n\t\treturn USE, buf.String()\n\tcase \"QUERY\":\n\t\treturn QUERY, buf.String()\n\tcase \"INSERT\":\n\t\treturn INSERT, buf.String()\n\tcase \"EXEC\":\n\t\treturn EXEC, buf.String()\n\tcase \"WAIT\":\n\t\treturn WAIT, buf.String()\n\tcase \"GO\":\n\t\treturn GO, buf.String()\n\tcase \"DO\":\n\t\treturn DO, buf.String()\n\tcase \"STR\":\n\t\treturn STR, buf.String()\n\tcase \"FLOAT\":\n\t\treturn FLOAT, buf.String()\n\tcase \"INT\":\n\t\treturn INT, buf.String()\n\t}\n\n\treturn IDENT, buf.String()\n}\n\nfunc (s *Scanner) scanTemplateVar() (tok Token, lit string) {\n\tvar buf bytes.Buffer\n\tbuf.WriteRune(s.read())\n\tbuf.WriteRune(s.read())\n\n\treturn TEMPLATEVAR, buf.String()\n}\n\nfunc (s *Scanner) scanNumber() (tok Token, lit string) {\n\tvar buf bytes.Buffer\n\tbuf.WriteRune(s.read())\n\n\tfor {\n\t\tif ch := s.read(); ch == eof {\n\t\t\tbreak\n\t\t} else if ch == 'n' || ch == 's' || ch == 'm' {\n\t\t\t_, _ = buf.WriteRune(ch)\n\t\t\treturn DURATIONVAL, buf.String()\n\t\t} else if !isDigit(ch) {\n\t\t\ts.unread()\n\t\t\tbreak\n\t\t} else {\n\t\t\t_, _ = buf.WriteRune(ch)\n\t\t}\n\t}\n\n\treturn NUMBER, buf.String()\n}\n\n/////////////////////////////////\n// PARSER ///////////////////////\n/////////////////////////////////\n\n// Parser turns the file from raw text into an AST\ntype Parser struct {\n\ts   *Scanner\n\tbuf struct {\n\t\ttok Token\n\t\tlit string\n\t\tn   int\n\t}\n}\n\n// NewParser creates a new Parser\nfunc NewParser(r io.Reader) *Parser {\n\treturn &Parser{s: NewScanner(r)}\n}\n\n// Parse returns a Statement\nfunc (p *Parser) Parse() (statement.Statement, error) {\n\ttok, lit := p.scanIgnoreWhitespace()\n\n\tswitch tok {\n\tcase QUERY:\n\t\tp.unscan()\n\t\treturn p.ParseQueryStatement()\n\tcase INSERT:\n\t\tp.unscan()\n\t\treturn p.ParseInsertStatement()\n\tcase EXEC:\n\t\tp.unscan()\n\t\treturn p.ParseExecStatement()\n\tcase SET:\n\t\tp.unscan()\n\t\treturn p.ParseSetStatement()\n\tcase GO:\n\t\tp.unscan()\n\t\treturn p.ParseGoStatement()\n\tcase WAIT:\n\t\tp.unscan()\n\t\treturn p.ParseWaitStatement()\n\t}\n\n\treturn nil, fmt.Errorf(\"Improper syntax\\n  unknown token found between statements, token: %v\\n\", lit)\n}\n\n// ParseQueryStatement returns a QueryStatement\nfunc (p *Parser) ParseQueryStatement() (*statement.QueryStatement, error) {\n\tstmt := &statement.QueryStatement{\n\t\tStatementID: RandStr(10),\n\t}\n\tif tok, lit := p.scanIgnoreWhitespace(); tok != QUERY {\n\t\treturn nil, fmt.Errorf(\"Error parsing Query Statement\\n  Expected: QUERY\\n  Found: %v\\n\", lit)\n\t}\n\n\ttok, lit := p.scanIgnoreWhitespace()\n\tif tok != IDENT {\n\t\treturn nil, fmt.Errorf(\"Error parsing Query Statement\\n  Expected: IDENT\\n  Found: %v\\n\", lit)\n\t}\n\n\tstmt.Name = lit\n\n\tfor {\n\t\ttok, lit := p.scan()\n\t\tif tok == TEMPLATEVAR {\n\t\t\tstmt.TemplateString += \"%v\"\n\t\t\tstmt.Args = append(stmt.Args, lit)\n\t\t} else if tok == DO {\n\t\t\ttok, lit := p.scanIgnoreWhitespace()\n\t\t\tif tok != NUMBER {\n\t\t\t\treturn nil, fmt.Errorf(\"Error parsing Query Statement\\n  Expected: NUMBER\\n  Found: %v\\n\", lit)\n\t\t\t}\n\t\t\t// Parse out the integer\n\t\t\ti, err := strconv.ParseInt(lit, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Error parsing integer in Query Statement:\\n  string: %v\\n  error: %v\\n\", lit, err)\n\t\t\t}\n\t\t\tstmt.Count = int(i)\n\t\t\tbreak\n\t\t} else if tok == WS && lit == \"\\n\" {\n\t\t\tcontinue\n\t\t} else {\n\t\t\tstmt.TemplateString += lit\n\t\t}\n\t}\n\n\treturn stmt, nil\n\n}\n\n// ParseInsertStatement returns a InsertStatement\nfunc (p *Parser) ParseInsertStatement() (*statement.InsertStatement, error) {\n\n\t// Initialize the InsertStatement with a statementId\n\tstmt := &statement.InsertStatement{\n\t\tStatementID: RandStr(10),\n\t}\n\n\t// If the first word is INSERT\n\tif tok, lit := p.scanIgnoreWhitespace(); tok != INSERT {\n\t\treturn nil, fmt.Errorf(\"Error parsing Insert Statement\\n  Expected: INSERT\\n  Found: %v\\n\", lit)\n\t}\n\n\t// Next should come the NAME of the statement. It is IDENT type\n\ttok, lit := p.scanIgnoreWhitespace()\n\tif tok != IDENT {\n\t\treturn nil, fmt.Errorf(\"Error parsing Insert Statement\\n  Expected: IDENT\\n  Found: %v\\n\", lit)\n\t}\n\n\t// Set the Name\n\tstmt.Name = lit\n\n\t// Next char should be a newline\n\ttok, lit = p.scan()\n\tif tok != WS {\n\t\treturn nil, fmt.Errorf(\"Error parsing Insert Statement\\n  Expected: WS\\n  Found: %v\\n\", lit)\n\t}\n\n\t// We are now scanning the tags line\n\tvar prev Token\n\tinTags := true\n\n\tfor {\n\t\t// Start for loop by scanning\n\t\ttok, lit = p.scan()\n\n\t\t// If scaned is WS then we are just entering tags or leaving tags or fields\n\t\tif tok == WS {\n\n\t\t\t// If previous is COMMA then we are leaving measurement, continue\n\t\t\tif prev == COMMA {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Otherwise we need to add a space to the template string and we are out of tags\n\t\t\tstmt.TemplateString += \" \"\n\t\t\tinTags = false\n\t\t} else if tok == LBRACKET {\n\t\t\t// If we are still inTags and there is a LBRACKET we are adding another template\n\t\t\tif inTags {\n\t\t\t\tstmt.TagCount++\n\t\t\t}\n\n\t\t\t// Add a space to fill template string with template result\n\t\t\tstmt.TemplateString += \"%v\"\n\n\t\t\t// parse template should return a template type\n\t\t\texpr, err := p.ParseTemplate()\n\n\t\t\t// If there is a Template parsing error return it\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// Add template to parsed select statement\n\t\t\tstmt.Templates = append(stmt.Templates, expr)\n\n\t\t\t// A number signifies that we are in the Timestamp section\n\t\t} else if tok == NUMBER {\n\t\t\t// Add a space to fill template string with timestamp\n\t\t\tstmt.TemplateString += \"%v\"\n\t\t\tp.unscan()\n\n\t\t\t// Parse out the Timestamp\n\t\t\tts, err := p.ParseTimestamp()\n\n\t\t\t// If there is a Timestamp parsing error return it\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// Set the Timestamp\n\t\t\tstmt.Timestamp = ts\n\n\t\t\t// Break loop as InsertStatement ends\n\t\t\tbreak\n\t\t} else if tok != IDENT && tok != COMMA {\n\t\t\treturn nil, fmt.Errorf(\"Error parsing Insert Statement\\n  Expected: IDENT or COMMA\\n  Found: %v\\n\", lit)\n\t\t} else {\n\t\t\tprev = tok\n\t\t\tstmt.TemplateString += lit\n\t\t}\n\n\t}\n\n\treturn stmt, nil\n}\n\n// ParseTemplate returns a Template\nfunc (p *Parser) ParseTemplate() (*statement.Template, error) {\n\n\t// Blank template\n\ttmplt := &statement.Template{}\n\n\tfor {\n\t\t// Scan to start loop\n\t\ttok, lit := p.scanIgnoreWhitespace()\n\n\t\t// If the tok == IDENT explicit tags are passed. Add them to the list of tags\n\t\tif tok == IDENT {\n\t\t\ttmplt.Tags = append(tmplt.Tags, lit)\n\n\t\t\t// Different flavors of functions\n\t\t} else if tok == INT || tok == FLOAT || tok == STR {\n\t\t\tp.unscan()\n\n\t\t\t// Parse out the function\n\t\t\tfn, err := p.ParseFunction()\n\n\t\t\t// If there is a Function parsing error return it\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// Set the Function on the Template\n\t\t\ttmplt.Function = fn\n\n\t\t\t// End of Function\n\t\t} else if tok == RBRACKET {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn tmplt, nil\n}\n\n// ParseExecStatement returns a ExecStatement\nfunc (p *Parser) ParseExecStatement() (*statement.ExecStatement, error) {\n\t// NEEDS TO PARSE ACTUAL PATH TO SCRIPT CURRENTLY ONLY DOES\n\t// IDENT SCRIPT NAMES\n\n\tstmt := &statement.ExecStatement{\n\t\tStatementID: RandStr(10),\n\t}\n\n\tif tok, lit := p.scanIgnoreWhitespace(); tok != EXEC {\n\t\treturn nil, fmt.Errorf(\"Error parsing Exec Statement\\n  Expected: EXEC\\n  Found: %v\\n\", lit)\n\t}\n\n\ttok, lit := p.scanIgnoreWhitespace()\n\tif tok != IDENT {\n\t\treturn nil, fmt.Errorf(\"Error parsing Exec Statement\\n  Expected: IDENT\\n  Found: %v\\n\", lit)\n\t}\n\n\tstmt.Script = lit\n\n\treturn stmt, nil\n}\n\n// ParseSetStatement returns a SetStatement\nfunc (p *Parser) ParseSetStatement() (*statement.SetStatement, error) {\n\n\tstmt := &statement.SetStatement{\n\t\tStatementID: RandStr(10),\n\t}\n\n\tif tok, lit := p.scanIgnoreWhitespace(); tok != SET {\n\t\treturn nil, fmt.Errorf(\"Error parsing Set Statement\\n  Expected: SET\\n  Found: %v\\n\", lit)\n\t}\n\n\ttok, lit := p.scanIgnoreWhitespace()\n\tif tok != IDENT {\n\t\treturn nil, fmt.Errorf(\"Error parsing Set Statement\\n  Expected: IDENT\\n  Found: %v\\n\", lit)\n\t}\n\n\tstmt.Var = lit\n\n\ttok, lit = p.scanIgnoreWhitespace()\n\n\tif tok != LBRACKET {\n\t\treturn nil, fmt.Errorf(\"Error parsing Set Statement\\n  Expected: RBRACKET\\n  Found: %v\\n\", lit)\n\t}\n\n\tfor {\n\t\ttok, lit = p.scanIgnoreWhitespace()\n\t\tif tok == RBRACKET {\n\t\t\tbreak\n\t\t} else if lit != \"-\" && lit != \":\" && tok != IDENT && tok != NUMBER && tok != DURATIONVAL && tok != PERIOD && tok != PIPE {\n\t\t\treturn nil, fmt.Errorf(\"Error parsing Set Statement\\n  Expected: IDENT || NUMBER || DURATION\\n  Found: %v\\n\", lit)\n\t\t}\n\t\tstmt.Value += lit\n\t}\n\n\treturn stmt, nil\n}\n\n// ParseWaitStatement returns a WaitStatement\nfunc (p *Parser) ParseWaitStatement() (*statement.WaitStatement, error) {\n\n\tstmt := &statement.WaitStatement{\n\t\tStatementID: RandStr(10),\n\t}\n\n\tif tok, lit := p.scanIgnoreWhitespace(); tok != WAIT {\n\t\treturn nil, fmt.Errorf(\"Error parsing Wait Statement\\n  Expected: WAIT\\n  Found: %v\\n\", lit)\n\t}\n\n\treturn stmt, nil\n}\n\n// ParseGoStatement returns a GoStatement\nfunc (p *Parser) ParseGoStatement() (*statement.GoStatement, error) {\n\n\tstmt := &statement.GoStatement{}\n\tstmt.StatementID = RandStr(10)\n\n\tif tok, lit := p.scanIgnoreWhitespace(); tok != GO {\n\t\treturn nil, fmt.Errorf(\"Error parsing Go Statement\\n  Expected: GO\\n  Found: %v\\n\", lit)\n\t}\n\n\tvar body statement.Statement\n\tvar err error\n\n\ttok, _ := p.scanIgnoreWhitespace()\n\tswitch tok {\n\tcase QUERY:\n\t\tp.unscan()\n\t\tbody, err = p.ParseQueryStatement()\n\tcase INSERT:\n\t\tp.unscan()\n\t\tbody, err = p.ParseInsertStatement()\n\tcase EXEC:\n\t\tp.unscan()\n\t\tbody, err = p.ParseExecStatement()\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstmt.Statement = body\n\n\treturn stmt, nil\n\n}\n\n// ParseFunction returns a Function\nfunc (p *Parser) ParseFunction() (*statement.Function, error) {\n\n\tfn := &statement.Function{}\n\n\t_, lit := p.scanIgnoreWhitespace()\n\tfn.Type = lit\n\n\t_, lit = p.scanIgnoreWhitespace()\n\tfn.Fn = lit\n\n\ttok, lit := p.scanIgnoreWhitespace()\n\tif tok != LPAREN {\n\t\treturn nil, fmt.Errorf(\"Error parsing Insert template function\\n  Expected: LPAREN\\n  Found: %v\\n\", lit)\n\t}\n\n\ttok, lit = p.scanIgnoreWhitespace()\n\tif tok != NUMBER {\n\t\treturn nil, fmt.Errorf(\"Error parsing Insert template function\\n  Expected: NUMBER\\n  Found: %v\\n\", lit)\n\t}\n\n\t// Parse out the integer\n\ti, err := strconv.ParseInt(lit, 10, 64)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error parsing integer in Insert template function:\\n  string: %v\\n  error: %v\\n\", lit, err)\n\t}\n\n\tfn.Argument = int(i)\n\n\ttok, _ = p.scanIgnoreWhitespace()\n\tif tok != RPAREN {\n\t\treturn nil, fmt.Errorf(\"Error parsing Insert template function\\n  Expected: RPAREN\\n  Found: %v\\n\", lit)\n\t}\n\n\ttok, lit = p.scanIgnoreWhitespace()\n\tif tok != NUMBER {\n\t\treturn nil, fmt.Errorf(\"Error parsing Insert template function\\n  Expected: NUMBER\\n  Found: %v\\n\", lit)\n\t}\n\n\t// Parse out the integer\n\ti, err = strconv.ParseInt(lit, 10, 64)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error parsing integer in Insert template function:\\n  string: %v\\n  error: %v\\n\", lit, err)\n\t}\n\n\tfn.Count = int(i)\n\n\treturn fn, nil\n}\n\n// ParseTimestamp returns a Timestamp\nfunc (p *Parser) ParseTimestamp() (*statement.Timestamp, error) {\n\n\tts := &statement.Timestamp{}\n\n\ttok, lit := p.scanIgnoreWhitespace()\n\tif tok != NUMBER {\n\t\treturn nil, fmt.Errorf(\"Error parsing Insert timestamp\\n  Expected: NUMBER\\n  Found: %v\\n\", lit)\n\t}\n\n\t// Parse out the integer\n\ti, err := strconv.ParseInt(lit, 10, 64)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error parsing integer in Insert timestamp:\\n  string: %v\\n  error: %v\\n\", lit, err)\n\t}\n\n\tts.Count = int(i)\n\n\ttok, lit = p.scanIgnoreWhitespace()\n\tif tok != DURATIONVAL {\n\t\treturn nil, fmt.Errorf(\"Error parsing Insert timestamp\\n  Expected: DURATION\\n  Found: %v\\n\", lit)\n\t}\n\n\t// Parse out the duration\n\tdur, err := time.ParseDuration(lit)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"Error parsing duration in Insert timestamp:\\n  string: %v\\n  error: %v\\n\", lit, err)\n\t}\n\n\tts.Duration = dur\n\n\treturn ts, nil\n}\n\nfunc (p *Parser) scan() (tok Token, lit string) {\n\t// If we have a token on the buffer, then return it.\n\tif p.buf.n != 0 {\n\t\tp.buf.n = 0\n\t\treturn p.buf.tok, p.buf.lit\n\t}\n\n\t// Otherwise read the next token from the scanner.\n\ttok, lit = p.s.Scan()\n\n\t// Save it to the buffer in case we unscan later.\n\tp.buf.tok, p.buf.lit = tok, lit\n\n\treturn\n}\n\n// scanIgnoreWhitespace scans the next non-whitespace token.\nfunc (p *Parser) scanIgnoreWhitespace() (tok Token, lit string) {\n\ttok, lit = p.scan()\n\tif tok == WS {\n\t\ttok, lit = p.scan()\n\t}\n\treturn\n}\n\n// unscan pushes the previously read token back onto the buffer.\nfunc (p *Parser) unscan() { p.buf.n = 1 }\n\n// RandStr returns a string of random characters with length n\nfunc RandStr(n int) string {\n\tb := make([]byte, n/2)\n\t_, _ = rand.Read(b)\n\treturn fmt.Sprintf(\"%x\", b)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/stress/v2/stressql/statement/parser_test.go",
    "content": "package statement\n\nimport (\n\t//\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/stress/v2/statement\"\n)\n\nfunc newParserFromString(s string) *Parser {\n\tf := strings.NewReader(s)\n\tp := NewParser(f)\n\n\treturn p\n}\n\nfunc TestParser_ParseStatement(t *testing.T) {\n\tvar tests = []struct {\n\t\tskip bool\n\t\ts    string\n\t\tstmt statement.Statement\n\t\terr  string\n\t}{\n\n\t\t// QUERY\n\n\t\t{\n\t\t\ts:    \"QUERY basicCount\\nSELECT count(%f) FROM cpu\\nDO 100\",\n\t\t\tstmt: &statement.QueryStatement{Name: \"basicCount\", TemplateString: \"SELECT count(%v) FROM cpu\", Args: []string{\"%f\"}, Count: 100},\n\t\t},\n\n\t\t{\n\t\t\ts:    \"QUERY basicCount\\nSELECT count(%f) FROM %m\\nDO 100\",\n\t\t\tstmt: &statement.QueryStatement{Name: \"basicCount\", TemplateString: \"SELECT count(%v) FROM %v\", Args: []string{\"%f\", \"%m\"}, Count: 100},\n\t\t},\n\n\t\t{\n\t\t\tskip: true, // SHOULD CAUSE AN ERROR\n\t\t\ts:    \"QUERY\\nSELECT count(%f) FROM %m\\nDO 100\",\n\t\t\terr:  \"Missing Name\",\n\t\t},\n\n\t\t// INSERT\n\n\t\t{\n\t\t\ts: \"INSERT mockCpu\\ncpu,\\nhost=[us-west|us-east|eu-north],server_id=[str rand(7) 1000]\\nbusy=[int rand(1000) 100],free=[float rand(10) 0]\\n100000 10s\",\n\t\t\tstmt: &statement.InsertStatement{\n\t\t\t\tName:           \"mockCpu\",\n\t\t\t\tTemplateString: \"cpu,host=%v,server_id=%v busy=%v,free=%v %v\",\n\t\t\t\tTagCount:       2,\n\t\t\t\tTemplates: []*statement.Template{\n\t\t\t\t\t&statement.Template{\n\t\t\t\t\t\tTags: []string{\"us-west\", \"us-east\", \"eu-north\"},\n\t\t\t\t\t},\n\t\t\t\t\t&statement.Template{\n\t\t\t\t\t\tFunction: &statement.Function{Type: \"str\", Fn: \"rand\", Argument: 7, Count: 1000},\n\t\t\t\t\t},\n\t\t\t\t\t&statement.Template{\n\t\t\t\t\t\tFunction: &statement.Function{Type: \"int\", Fn: \"rand\", Argument: 1000, Count: 100},\n\t\t\t\t\t},\n\t\t\t\t\t&statement.Template{\n\t\t\t\t\t\tFunction: &statement.Function{Type: \"float\", Fn: \"rand\", Argument: 10, Count: 0},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tTimestamp: &statement.Timestamp{\n\t\t\t\t\tCount:    100000,\n\t\t\t\t\tDuration: time.Duration(10 * time.Second),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: \"INSERT mockCpu\\ncpu,host=[us-west|us-east|eu-north],server_id=[str rand(7) 1000]\\nbusy=[int rand(1000) 100],free=[float rand(10) 0]\\n100000 10s\",\n\t\t\tstmt: &statement.InsertStatement{\n\t\t\t\tName:           \"mockCpu\",\n\t\t\t\tTemplateString: \"cpu,host=%v,server_id=%v busy=%v,free=%v %v\",\n\t\t\t\tTagCount:       2,\n\t\t\t\tTemplates: []*statement.Template{\n\t\t\t\t\t&statement.Template{\n\t\t\t\t\t\tTags: []string{\"us-west\", \"us-east\", \"eu-north\"},\n\t\t\t\t\t},\n\t\t\t\t\t&statement.Template{\n\t\t\t\t\t\tFunction: &statement.Function{Type: \"str\", Fn: \"rand\", Argument: 7, Count: 1000},\n\t\t\t\t\t},\n\t\t\t\t\t&statement.Template{\n\t\t\t\t\t\tFunction: &statement.Function{Type: \"int\", Fn: \"rand\", Argument: 1000, Count: 100},\n\t\t\t\t\t},\n\t\t\t\t\t&statement.Template{\n\t\t\t\t\t\tFunction: &statement.Function{Type: \"float\", Fn: \"rand\", Argument: 10, Count: 0},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tTimestamp: &statement.Timestamp{\n\t\t\t\t\tCount:    100000,\n\t\t\t\t\tDuration: time.Duration(10 * time.Second),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ts: \"INSERT mockCpu\\n[str rand(1000) 10],\\nhost=[us-west|us-east|eu-north],server_id=[str rand(7) 1000],other=x\\nbusy=[int rand(1000) 100],free=[float rand(10) 0]\\n100000 10s\",\n\t\t\tstmt: &statement.InsertStatement{\n\t\t\t\tName:           \"mockCpu\",\n\t\t\t\tTemplateString: \"%v,host=%v,server_id=%v,other=x busy=%v,free=%v %v\",\n\t\t\t\tTagCount:       3,\n\t\t\t\tTemplates: []*statement.Template{\n\t\t\t\t\t&statement.Template{\n\t\t\t\t\t\tFunction: &statement.Function{Type: \"str\", Fn: \"rand\", Argument: 1000, Count: 10},\n\t\t\t\t\t},\n\t\t\t\t\t&statement.Template{\n\t\t\t\t\t\tTags: []string{\"us-west\", \"us-east\", \"eu-north\"},\n\t\t\t\t\t},\n\t\t\t\t\t&statement.Template{\n\t\t\t\t\t\tFunction: &statement.Function{Type: \"str\", Fn: \"rand\", Argument: 7, Count: 1000},\n\t\t\t\t\t},\n\t\t\t\t\t&statement.Template{\n\t\t\t\t\t\tFunction: &statement.Function{Type: \"int\", Fn: \"rand\", Argument: 1000, Count: 100},\n\t\t\t\t\t},\n\t\t\t\t\t&statement.Template{\n\t\t\t\t\t\tFunction: &statement.Function{Type: \"float\", Fn: \"rand\", Argument: 10, Count: 0},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tTimestamp: &statement.Timestamp{\n\t\t\t\t\tCount:    100000,\n\t\t\t\t\tDuration: time.Duration(10 * time.Second),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tskip: true, // Expected error not working\n\t\t\ts:    \"INSERT\\ncpu,\\nhost=[us-west|us-east|eu-north],server_id=[str rand(7) 1000]\\nbusy=[int rand(1000) 100],free=[float rand(10) 0]\\n100000 10s\",\n\t\t\terr:  `found \",\", expected WS`,\n\t\t},\n\n\t\t// EXEC\n\n\t\t{\n\t\t\ts:    `EXEC other_script`,\n\t\t\tstmt: &statement.ExecStatement{Script: \"other_script\"},\n\t\t},\n\n\t\t{\n\t\t\tskip: true, // Implement\n\t\t\ts:    `EXEC other_script.sh`,\n\t\t\tstmt: &statement.ExecStatement{Script: \"other_script.sh\"},\n\t\t},\n\n\t\t{\n\t\t\tskip: true, // Implement\n\t\t\ts:    `EXEC ../other_script.sh`,\n\t\t\tstmt: &statement.ExecStatement{Script: \"../other_script.sh\"},\n\t\t},\n\n\t\t{\n\t\t\tskip: true, // Implement\n\t\t\ts:    `EXEC /path/to/some/other_script.sh`,\n\t\t\tstmt: &statement.ExecStatement{Script: \"/path/to/some/other_script.sh\"},\n\t\t},\n\n\t\t// GO\n\n\t\t{\n\t\t\tskip: true,\n\t\t\ts:    \"GO INSERT mockCpu\\ncpu,\\nhost=[us-west|us-east|eu-north],server_id=[str rand(7) 1000]\\nbusy=[int rand(1000) 100],free=[float rand(10) 0]\\n100000 10s\",\n\t\t\tstmt: &statement.GoStatement{\n\t\t\t\tStatement: &statement.InsertStatement{\n\t\t\t\t\tName:           \"mockCpu\",\n\t\t\t\t\tTemplateString: \"cpu,host=%v,server_id=%v busy=%v,free=%v %v\",\n\t\t\t\t\tTemplates: []*statement.Template{\n\t\t\t\t\t\t&statement.Template{\n\t\t\t\t\t\t\tTags: []string{\"us-west\", \"us-east\", \"eu-north\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&statement.Template{\n\t\t\t\t\t\t\tFunction: &statement.Function{Type: \"str\", Fn: \"rand\", Argument: 7, Count: 1000},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&statement.Template{\n\t\t\t\t\t\t\tFunction: &statement.Function{Type: \"int\", Fn: \"rand\", Argument: 1000, Count: 100},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&statement.Template{\n\t\t\t\t\t\t\tFunction: &statement.Function{Type: \"float\", Fn: \"rand\", Argument: 10, Count: 0},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tTimestamp: &statement.Timestamp{\n\t\t\t\t\t\tCount:    100000,\n\t\t\t\t\t\tDuration: time.Duration(10 * time.Second),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tskip: true,\n\t\t\ts:    \"GO QUERY basicCount\\nSELECT count(free) FROM cpu\\nDO 100\",\n\t\t\tstmt: &statement.GoStatement{\n\t\t\t\tStatement: &statement.QueryStatement{Name: \"basicCount\", TemplateString: \"SELECT count(free) FROM cpu\", Count: 100},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tskip: true,\n\t\t\ts:    `GO EXEC other_script`,\n\t\t\tstmt: &statement.GoStatement{\n\t\t\t\tStatement: &statement.ExecStatement{Script: \"other_script\"},\n\t\t\t},\n\t\t},\n\n\t\t// SET\n\n\t\t{\n\t\t\ts:    `SET database [stress]`,\n\t\t\tstmt: &statement.SetStatement{Var: \"database\", Value: \"stress\"},\n\t\t},\n\n\t\t// WAIT\n\n\t\t{\n\t\t\ts:    `Wait`,\n\t\t\tstmt: &statement.WaitStatement{},\n\t\t},\n\t}\n\n\tfor _, tst := range tests {\n\n\t\tif tst.skip {\n\t\t\tcontinue\n\t\t}\n\n\t\tstmt, err := newParserFromString(tst.s).Parse()\n\t\ttst.stmt.SetID(\"x\")\n\n\t\tif err != nil && err.Error() != tst.err {\n\t\t\tt.Errorf(\"REAL ERROR: %v\\nExpected ERROR: %v\\n\", err, tst.err)\n\t\t} else if err != nil && tst.err == err.Error() {\n\t\t\tt.Errorf(\"REAL ERROR: %v\\nExpected ERROR: %v\\n\", err, tst.err)\n\t\t} else if stmt.SetID(\"x\"); !reflect.DeepEqual(stmt, tst.stmt) {\n\t\t\tt.Errorf(\"Expected\\n%#v\\n%#v\", tst.stmt, stmt)\n\t\t}\n\t}\n\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tcp/mux.go",
    "content": "// Package tcp provides a simple multiplexer over TCP.\npackage tcp // import \"github.com/influxdata/influxdb/tcp\"\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\t// DefaultTimeout is the default length of time to wait for first byte.\n\tDefaultTimeout = 30 * time.Second\n)\n\n// Mux multiplexes a network connection.\ntype Mux struct {\n\tmu sync.RWMutex\n\tln net.Listener\n\tm  map[byte]*listener\n\n\tdefaultListener *listener\n\n\twg sync.WaitGroup\n\n\t// The amount of time to wait for the first header byte.\n\tTimeout time.Duration\n\n\t// Out-of-band error logger\n\tLogger *log.Logger\n}\n\ntype replayConn struct {\n\tnet.Conn\n\tfirstByte     byte\n\treadFirstbyte bool\n}\n\nfunc (rc *replayConn) Read(b []byte) (int, error) {\n\tif rc.readFirstbyte {\n\t\treturn rc.Conn.Read(b)\n\t}\n\n\tif len(b) == 0 {\n\t\treturn 0, nil\n\t}\n\n\tb[0] = rc.firstByte\n\trc.readFirstbyte = true\n\treturn 1, nil\n}\n\n// NewMux returns a new instance of Mux.\nfunc NewMux() *Mux {\n\treturn &Mux{\n\t\tm:       make(map[byte]*listener),\n\t\tTimeout: DefaultTimeout,\n\t\tLogger:  log.New(os.Stderr, \"[tcp] \", log.LstdFlags),\n\t}\n}\n\n// Serve handles connections from ln and multiplexes then across registered listeners.\nfunc (mux *Mux) Serve(ln net.Listener) error {\n\tmux.mu.Lock()\n\tmux.ln = ln\n\tmux.mu.Unlock()\n\tfor {\n\t\t// Wait for the next connection.\n\t\t// If it returns a temporary error then simply retry.\n\t\t// If it returns any other error then exit immediately.\n\t\tconn, err := ln.Accept()\n\t\tif err, ok := err.(interface {\n\t\t\tTemporary() bool\n\t\t}); ok && err.Temporary() {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\t// Wait for all connections to be demux\n\t\t\tmux.wg.Wait()\n\t\t\tfor _, ln := range mux.m {\n\t\t\t\tclose(ln.c)\n\t\t\t}\n\n\t\t\tif mux.defaultListener != nil {\n\t\t\t\tclose(mux.defaultListener.c)\n\t\t\t}\n\n\t\t\treturn err\n\t\t}\n\n\t\t// Demux in a goroutine to\n\t\tmux.wg.Add(1)\n\t\tgo mux.handleConn(conn)\n\t}\n}\n\nfunc (mux *Mux) handleConn(conn net.Conn) {\n\tdefer mux.wg.Done()\n\t// Set a read deadline so connections with no data don't timeout.\n\tif err := conn.SetReadDeadline(time.Now().Add(mux.Timeout)); err != nil {\n\t\tconn.Close()\n\t\tmux.Logger.Printf(\"tcp.Mux: cannot set read deadline: %s\", err)\n\t\treturn\n\t}\n\n\t// Read first byte from connection to determine handler.\n\tvar typ [1]byte\n\tif _, err := io.ReadFull(conn, typ[:]); err != nil {\n\t\tconn.Close()\n\t\tmux.Logger.Printf(\"tcp.Mux: cannot read header byte: %s\", err)\n\t\treturn\n\t}\n\n\t// Reset read deadline and let the listener handle that.\n\tif err := conn.SetReadDeadline(time.Time{}); err != nil {\n\t\tconn.Close()\n\t\tmux.Logger.Printf(\"tcp.Mux: cannot reset set read deadline: %s\", err)\n\t\treturn\n\t}\n\n\t// Retrieve handler based on first byte.\n\thandler := mux.m[typ[0]]\n\tif handler == nil {\n\t\tif mux.defaultListener == nil {\n\t\t\tconn.Close()\n\t\t\tmux.Logger.Printf(\"tcp.Mux: handler not registered: %d. Connection from %s closed\", typ[0], conn.RemoteAddr())\n\t\t\treturn\n\t\t}\n\n\t\tconn = &replayConn{\n\t\t\tConn:      conn,\n\t\t\tfirstByte: typ[0],\n\t\t}\n\t\thandler = mux.defaultListener\n\t}\n\n\t// Send connection to handler.  The handler is responsible for closing the connection.\n\ttimer := time.NewTimer(mux.Timeout)\n\tdefer timer.Stop()\n\n\tselect {\n\tcase handler.c <- conn:\n\tcase <-timer.C:\n\t\tconn.Close()\n\t\tmux.Logger.Printf(\"tcp.Mux: handler not ready: %d. Connection from %s closed\", typ[0], conn.RemoteAddr())\n\t\treturn\n\t}\n}\n\n// Listen returns a listener identified by header.\n// Any connection accepted by mux is multiplexed based on the initial header byte.\nfunc (mux *Mux) Listen(header byte) net.Listener {\n\t// Ensure two listeners are not created for the same header byte.\n\tif _, ok := mux.m[header]; ok {\n\t\tpanic(fmt.Sprintf(\"listener already registered under header byte: %d\", header))\n\t}\n\n\t// Create a new listener and assign it.\n\tln := &listener{\n\t\tc:   make(chan net.Conn),\n\t\tmux: mux,\n\t}\n\tmux.m[header] = ln\n\n\treturn ln\n}\n\n// DefaultListener will return a net.Listener that will pass-through any\n// connections with non-registered values for the first byte of the connection.\n// The connections returned from this listener's Accept() method will replay the\n// first byte of the connection as a short first Read().\n//\n// This can be used to pass to an HTTP server, so long as there are no conflicts\n// with registered listener bytes and the first character of the HTTP request:\n// 71 ('G') for GET, etc.\nfunc (mux *Mux) DefaultListener() net.Listener {\n\tif mux.defaultListener == nil {\n\t\tmux.defaultListener = &listener{\n\t\t\tc:   make(chan net.Conn),\n\t\t\tmux: mux,\n\t\t}\n\t}\n\n\treturn mux.defaultListener\n}\n\n// listener is a receiver for connections received by Mux.\ntype listener struct {\n\tc   chan net.Conn\n\tmux *Mux\n}\n\n// Accept waits for and returns the next connection to the listener.\nfunc (ln *listener) Accept() (c net.Conn, err error) {\n\tconn, ok := <-ln.c\n\tif !ok {\n\t\treturn nil, errors.New(\"network connection closed\")\n\t}\n\treturn conn, nil\n}\n\n// Close is a no-op. The mux's listener should be closed instead.\nfunc (ln *listener) Close() error { return nil }\n\n// Addr returns the Addr of the listener\nfunc (ln *listener) Addr() net.Addr {\n\tif ln.mux == nil {\n\t\treturn nil\n\t}\n\n\tln.mux.mu.RLock()\n\tdefer ln.mux.mu.RUnlock()\n\n\tif ln.mux.ln == nil {\n\t\treturn nil\n\t}\n\n\treturn ln.mux.ln.Addr()\n}\n\n// Dial connects to a remote mux listener with a given header byte.\nfunc Dial(network, address string, header byte) (net.Conn, error) {\n\tconn, err := net.Dial(network, address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif _, err := conn.Write([]byte{header}); err != nil {\n\t\treturn nil, fmt.Errorf(\"write mux header: %s\", err)\n\t}\n\n\treturn conn, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tcp/mux_test.go",
    "content": "package tcp_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"testing/quick\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/tcp\"\n)\n\n// Ensure the muxer can split a listener's connections across multiple listeners.\nfunc TestMux(t *testing.T) {\n\tif err := quick.Check(func(n uint8, msg []byte) bool {\n\t\tif testing.Verbose() {\n\t\t\tif len(msg) == 0 {\n\t\t\t\tlog.Printf(\"n=%d, <no message>\", n)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"n=%d, hdr=%d, len=%d\", n, msg[0], len(msg))\n\t\t\t}\n\t\t}\n\n\t\tvar wg sync.WaitGroup\n\n\t\t// Open single listener on random port.\n\t\ttcpListener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer tcpListener.Close()\n\n\t\t// Setup muxer & listeners.\n\t\tmux := tcp.NewMux()\n\t\tmux.Timeout = 200 * time.Millisecond\n\t\tif !testing.Verbose() {\n\t\t\tmux.Logger = log.New(ioutil.Discard, \"\", 0)\n\t\t}\n\n\t\terrC := make(chan error)\n\t\tfor i := uint8(0); i < n; i++ {\n\t\t\tln := mux.Listen(byte(i))\n\n\t\t\twg.Add(1)\n\t\t\tgo func(i uint8, ln net.Listener) {\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\t// Wait for a connection for this listener.\n\t\t\t\tconn, err := ln.Accept()\n\t\t\t\tif conn != nil {\n\t\t\t\t\tdefer conn.Close()\n\t\t\t\t}\n\n\t\t\t\t// If there is no message or the header byte\n\t\t\t\t// doesn't match then expect close.\n\t\t\t\tif len(msg) == 0 || msg[0] != byte(i) {\n\t\t\t\t\tif err == nil || err.Error() != \"network connection closed\" {\n\t\t\t\t\t\terrC <- fmt.Errorf(\"unexpected error: %s\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// If the header byte matches this listener\n\t\t\t\t// then expect a connection and read the message.\n\t\t\t\tvar buf bytes.Buffer\n\t\t\t\tif _, err := io.CopyN(&buf, conn, int64(len(msg)-1)); err != nil {\n\t\t\t\t\terrC <- err\n\t\t\t\t\treturn\n\t\t\t\t} else if !bytes.Equal(msg[1:], buf.Bytes()) {\n\t\t\t\t\terrC <- fmt.Errorf(\"message mismatch:\\n\\nexp=%x\\n\\ngot=%x\\n\\n\", msg[1:], buf.Bytes())\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// Write response.\n\t\t\t\tif _, err := conn.Write([]byte(\"OK\")); err != nil {\n\t\t\t\t\terrC <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}(i, ln)\n\t\t}\n\n\t\t// Begin serving from the listener.\n\t\tgo mux.Serve(tcpListener)\n\n\t\t// Write message to TCP listener and read OK response.\n\t\tconn, err := net.Dial(\"tcp\", tcpListener.Addr().String())\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else if _, err = conn.Write(msg); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\t// Read the response into the buffer.\n\t\tvar resp [2]byte\n\t\t_, err = io.ReadFull(conn, resp[:])\n\n\t\t// If the message header is less than n then expect a response.\n\t\t// Otherwise we should get an EOF because the mux closed.\n\t\tif len(msg) > 0 && uint8(msg[0]) < n {\n\t\t\tif string(resp[:]) != `OK` {\n\t\t\t\tt.Fatalf(\"unexpected response: %s\", resp[:])\n\t\t\t}\n\t\t} else {\n\t\t\tif err == nil || (err != io.EOF && !(strings.Contains(err.Error(), \"connection reset by peer\") ||\n\t\t\t\tstrings.Contains(err.Error(), \"closed by the remote host\"))) {\n\t\t\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t\t\t}\n\t\t}\n\n\t\t// Close connection.\n\t\tif err := conn.Close(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\t// Close original TCP listener and wait for all goroutines to close.\n\t\ttcpListener.Close()\n\n\t\tgo func() {\n\t\t\twg.Wait()\n\t\t\tclose(errC)\n\t\t}()\n\n\t\tok := true\n\t\tfor err := range errC {\n\t\t\tif err != nil {\n\t\t\t\tok = false\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t}\n\n\t\treturn ok\n\t}, nil); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\n// Ensure two handlers cannot be registered for the same header byte.\nfunc TestMux_Listen_ErrAlreadyRegistered(t *testing.T) {\n\tdefer func() {\n\t\tif r := recover(); r != `listener already registered under header byte: 5` {\n\t\t\tt.Fatalf(\"unexpected recover: %#v\", r)\n\t\t}\n\t}()\n\n\t// Register two listeners with the same header byte.\n\tmux := tcp.NewMux()\n\tmux.Listen(5)\n\tmux.Listen(5)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/test.sh",
    "content": "#!/bin/bash\n#\n# This is the InfluxDB test script.\n# This script can run tests in different environments.\n#\n# Usage: ./test.sh <environment_index>\n# Corresponding environments for environment_index:\n#      0: normal 64bit tests\n#      1: race enabled 64bit tests\n#      3: normal 32bit tests\n#      save: build the docker images and save them to DOCKER_SAVE_DIR. Do not run tests.\n#      count: print the number of test environments\n#      *: to run all tests in parallel containers\n#\n# Logs from the test runs will be saved in OUTPUT_DIR, which defaults to ./test-logs\n#\n\n# Get dir of script and make it is our working directory.\nDIR=$(cd $(dirname \"${BASH_SOURCE[0]}\") && pwd)\ncd $DIR\n\nENVIRONMENT_INDEX=$1\n# Set the default OUTPUT_DIR\nOUTPUT_DIR=${OUTPUT_DIR-./test-logs}\n# Set the default DOCKER_SAVE_DIR\nDOCKER_SAVE_DIR=${DOCKER_SAVE_DIR-$HOME/docker}\n# Set default parallelism\nPARALLELISM=${PARALLELISM-1}\n# Set default timeout\nTIMEOUT=${TIMEOUT-960s}\n\n# Default to deleteing the container\nDOCKER_RM=${DOCKER_RM-true}\n\n# Update this value if you add a new test environment.\nENV_COUNT=3\n\n# Default return code 0\nrc=0\n\n# Executes the given statement, and exits if the command returns a non-zero code.\nfunction exit_if_fail {\n    command=$@\n    echo \"Executing '$command'\"\n    $command\n    rc=$?\n    if [ $rc -ne 0 ]; then\n        echo \"'$command' returned $rc.\"\n        exit $rc\n    fi\n}\n\n# Convert dockerfile name to valid docker image tag name.\nfunction filename2imagename {\n    echo ${1/Dockerfile/influxdb}\n}\n\n# Run a test in a docker container\n# Usage: run_test_docker <Dockerfile> <env_name>\nfunction run_test_docker {\n    local dockerfile=$1\n    local imagename=$(filename2imagename \"$dockerfile\")\n    shift\n    local name=$1\n    shift\n    local logfile=\"$OUTPUT_DIR/${name}.log\"\n\n    build_docker_image \"$dockerfile\" \"$imagename\"\n    echo \"Running test in docker $name with args $@\"\n\n    docker run \\\n         --rm=$DOCKER_RM \\\n         -v \"$DIR:/root/go/src/github.com/influxdata/influxdb\" \\\n         -e \"INFLUXDB_DATA_ENGINE=$INFLUXDB_DATA_ENGINE\" \\\n         -e \"GORACE=$GORACE\" \\\n         -e \"GO_CHECKOUT=$GO_CHECKOUT\" \\\n         -e \"AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID\" \\\n         -e \"AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY\" \\\n         \"$imagename\" \\\n         \"--parallel=$PARALLELISM\" \\\n         \"--timeout=$TIMEOUT\" \\\n         \"$@\" \\\n         2>&1 | tee \"$logfile\"\n    return \"${PIPESTATUS[0]}\"\n\n}\n\n# Build the docker image defined by given dockerfile.\nfunction build_docker_image {\n    local dockerfile=$1\n    local imagename=$2\n\n    echo \"Building docker image $imagename\"\n    exit_if_fail docker build -f \"$dockerfile\" -t \"$imagename\" .\n}\n\n\n# Saves a docker image to $DOCKER_SAVE_DIR\nfunction save_docker_image {\n    local dockerfile=$1\n    local imagename=$(filename2imagename \"$dockerfile\")\n    local imagefile=\"$DOCKER_SAVE_DIR/${imagename}.tar.gz\"\n\n    if [ ! -d  \"$DOCKER_SAVE_DIR\" ]\n    then\n        mkdir -p \"$DOCKER_SAVE_DIR\"\n    fi\n\n    if [[ -e \"$imagefile\" ]]\n    then\n        zcat $imagefile | docker load\n    fi\n    imageid=$(docker images -q --no-trunc \"$imagename\")\n    build_docker_image \"$dockerfile\" \"$imagename\"\n    newimageid=$(docker images -q --no-trunc \"$imagename\")\n    rc=0\n    if [ \"$imageid\" != \"$newimageid\" ]\n    then\n        docker save \"$imagename\" | gzip > \"$imagefile\"\n        rc=\"${PIPESTATUS[0]}\"\n    fi\n    return \"$rc\"\n}\n\nif [ ! -d \"$OUTPUT_DIR\" ]\nthen\n    mkdir -p \"$OUTPUT_DIR\"\nfi\n\n# Run the tests.\ncase $ENVIRONMENT_INDEX in\n    0)\n        # 64 bit tests\n        run_test_docker Dockerfile_build_ubuntu64 test_64bit --test --junit-report\n        rc=$?\n        ;;\n    1)\n        # 64 bit race tests\n        GORACE=\"halt_on_error=1\"\n        run_test_docker Dockerfile_build_ubuntu64 test_64bit_race --test --junit-report --race\n        rc=$?\n        ;;\n    2)\n        # 32 bit tests\n        run_test_docker Dockerfile_build_ubuntu32 test_32bit --test --junit-report --arch=i386\n        rc=$?\n        ;;\n    \"save\")\n        # Save docker images for every Dockerfile_build* file.\n        # Useful for creating an external cache.\n        pids=()\n        for d in Dockerfile_build*\n        do\n            echo \"Building and saving $d ...\"\n            save_docker_image \"$d\" > $OUTPUT_DIR/${d}.log 2>&1 &\n            pids+=($!)\n        done\n        echo \"Waiting...\"\n        # Wait for all saves to finish\n        for pid in \"${pids[@]}\"\n        do\n            wait $pid\n            rc=$(($? + $rc))\n        done\n        # Check if all saves passed\n        if [ $rc -eq 0 ]\n        then\n            echo \"All saves succeeded\"\n        else\n            echo \"Some saves failed, check logs in $OUTPUT_DIR\"\n        fi\n        ;;\n    \"count\")\n        echo $ENV_COUNT\n        ;;\n    *)\n        echo \"No individual test environment specified running tests for all $ENV_COUNT environments.\"\n        # Run all test environments\n        pids=()\n        for t in $(seq 0 \"$(($ENV_COUNT - 1))\")\n        do\n            $0 $t 2>&1 > /dev/null &\n            # add PID to list\n            pids+=($!)\n        done\n\n        echo \"Started all tests. Follow logs in ${OUTPUT_DIR}. Waiting...\"\n\n        # Wait for all tests to finish\n        for pid in \"${pids[@]}\"\n        do\n            wait $pid\n            rc=$(($? + $rc))\n        done\n\n        # Check if all tests passed\n        if [ $rc -eq 0 ]\n        then\n            echo \"All test have passed\"\n        else\n            echo \"Some tests failed check logs in $OUTPUT_DIR for results\"\n        fi\n        ;;\nesac\n\nexit $rc\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tests/README.md",
    "content": "# Server Integration Tests\n\nThis directory contains integration tests for the database.\n\nTo run them using an in-process local server:\n\n```sh\ngo test ./tests\n```\n\nThey can also be run against a remote server running in a separate process\nor machine\n\n```sh\nURL=http://127.0.0.1:8086 go test -parallel 1 ./tests\n```\n\nWhen running tests against a remote server, `-parallel 1` is currently needed\nas many of the tests use the same DB and RP names which causes tests to fail\nwhen run concurrently.\n\nWhen adding tests, try to add tests that will always work for remote server usage.\n\n## Structure\n\nCurrently, the file `server_test.go` has integration tests for single node scenarios.\nAt some point we'll need to add cluster tests, and may add them in a different file, or\nrename `server_test.go` to `server_single_node_test.go` or something like that.\n\n## What is in a test?\n\nEach test is broken apart effectively into the following areas:\n\n- Write sample data\n- Use cases for table driven test, that include a command (typically a query) and an expected result.\n\nWhen each test runs it does the following:\n\n- init: determines if there are any writes and if so, writes them to the in-memory database\n- queries: iterate through each query, executing the command, and comparing the results to the expected result.\n\n## Idempotent - Allows for parallel tests\n\nEach test should be `idempotent`, meaning that its data will not be affected by other tests, or use cases within the table tests themselves.\nThis allows for parallel testing, keeping the test suite total execution time very low.\n\n### Basic sample test\n\n```go\n// Ensure the server can have a database with multiple measurements.\nfunc TestServer_Query_Multiple_Measurements(t *testing.T) {\n    t.Parallel()\n    s := OpenServer(NewConfig(), \"\")\n    defer s.Close()\n\n    if err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicyInfo(\"rp0\", 1, 1*time.Hour)); err != nil {\n        t.Fatal(err)\n    }\n\n    // Make sure we do writes for measurements that will span across shards\n    writes := []string{\n        fmt.Sprintf(\"cpu,host=server01 value=100,core=4 %d\", mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n        fmt.Sprintf(\"cpu1,host=server02 value=50,core=2 %d\", mustParseTime(time.RFC3339Nano, \"2015-01-01T00:00:00Z\").UnixNano()),\n    }\n    test := NewTest(\"db0\", \"rp0\")\n    test.write = strings.Join(writes, \"\\n\")\n\n    test.addQueries([]*Query{\n        &Query{\n            name:    \"measurement in one shard but not another shouldn't panic server\",\n            command: `SELECT host,value  FROM db0.rp0.cpu`,\n            exp:     `{\"results\":[{\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",100]]}]}]}`,\n        },\n    }...)\n\n    if err := test.init(s); err != nil {\n        t.Fatalf(\"test init failed: %s\", err)\n    }\n\n    for _, query := range test.queries {\n        if query.skip {\n            t.Logf(\"SKIP:: %s\", query.name)\n            continue\n        }\n        if err := query.Execute(s); err != nil {\n            t.Error(query.Error(err))\n        } else if !query.success() {\n            t.Error(query.failureMessage())\n        }\n    }\n}\n```\n\nLet's break this down:\n\nIn this test, we first tell it to run in parallel with the `t.Parallel()` call.\n\nWe then open a new server with:\n\n```go\ns := OpenServer(NewConfig(), \"\")\ndefer s.Close()\n```\n\nIf needed, we create a database and default retention policy.  This is usually needed\nwhen inserting and querying data.  This is not needed if you are testing commands like `CREATE DATABASE`, `SHOW DIAGNOSTICS`, etc.\n\n```go\nif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicyInfo(\"rp0\", 1, 1*time.Hour)); err != nil {\n    t.Fatal(err)\n}\n```\n\nNext, set up the write data you need:\n\n```go\nwrites := []string{\n    fmt.Sprintf(\"cpu,host=server01 value=100,core=4 %d\", mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n    fmt.Sprintf(\"cpu1,host=server02 value=50,core=2 %d\", mustParseTime(time.RFC3339Nano, \"2015-01-01T00:00:00Z\").UnixNano()),\n}\n```\nCreate a new test with the database and retention policy:\n\n```go\ntest := NewTest(\"db0\", \"rp0\")\n```\n\nSend in the writes:\n```go\ntest.write = strings.Join(writes, \"\\n\")\n```\n\nAdd some queries (the second one is mocked out to show how to add more than one):\n\n```go\ntest.addQueries([]*Query{\n    &Query{\n        name:    \"measurement in one shard but not another shouldn't panic server\",\n        command: `SELECT host,value  FROM db0.rp0.cpu`,\n        exp:     `{\"results\":[{\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",100]]}]}]}`,\n    },\n    &Query{\n        name:    \"another test here...\",\n        command: `Some query command`,\n        exp:     `the expected results`,\n    },\n}...)\n```\n\nThe rest of the code is boilerplate execution code.  It is purposefully not refactored out to a helper\nto make sure the test failure reports the proper lines for debugging purposes.\n\n#### Running the tests\n\nTo run the tests:\n\n```sh\ngo test ./cmd/influxd/run -parallel 500 -timeout 10s\n```\n\n#### Running a specific test\n\n```sh\ngo test ./cmd/influxd/run -parallel 500 -timeout 10s -run TestServer_Query_Fill\n```\n\n#### Verbose feedback\n\nBy default, all logs are silenced when testing.  If you pass in the `-v` flag, the test suite becomes verbose, and enables all logging in the system\n\n```sh\ngo test ./cmd/influxd/run -parallel 500 -timeout 10s -run TestServer_Query_Fill -v\n```\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tests/backup_restore_test.go",
    "content": "package tests\n\nimport (\n\t\"io/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/cmd/influxd/backup\"\n\t\"github.com/influxdata/influxdb/cmd/influxd/restore\"\n)\n\nfunc TestServer_BackupAndRestore(t *testing.T) {\n\tconfig := NewConfig()\n\tconfig.Data.Engine = \"tsm1\"\n\tconfig.Data.Dir, _ = ioutil.TempDir(\"\", \"data_backup\")\n\tconfig.Meta.Dir, _ = ioutil.TempDir(\"\", \"meta_backup\")\n\tconfig.BindAddress = freePort()\n\n\tbackupDir, _ := ioutil.TempDir(\"\", \"backup\")\n\tdefer os.RemoveAll(backupDir)\n\n\tdb := \"mydb\"\n\trp := \"forever\"\n\texpected := `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"myseries\",\"columns\":[\"time\",\"host\",\"value\"],\"values\":[[\"1970-01-01T00:00:00.001Z\",\"A\",23]]}]}]}`\n\n\t// set the cache snapshot size low so that a single point will cause TSM file creation\n\tconfig.Data.CacheSnapshotMemorySize = 1\n\n\tfunc() {\n\t\ts := OpenServer(config)\n\t\tdefer s.Close()\n\n\t\tif _, ok := s.(*RemoteServer); ok {\n\t\t\tt.Skip(\"Skipping.  Cannot modify remote server config\")\n\t\t}\n\n\t\tif err := s.CreateDatabaseAndRetentionPolicy(db, newRetentionPolicySpec(rp, 1, 0), true); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif _, err := s.Write(db, rp, \"myseries,host=A value=23 1000000\", nil); err != nil {\n\t\t\tt.Fatalf(\"failed to write: %s\", err)\n\t\t}\n\n\t\t// wait for the snapshot to write\n\t\ttime.Sleep(time.Second)\n\n\t\tres, err := s.Query(`select * from \"mydb\".\"forever\".\"myseries\"`)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"error querying: %s\", err.Error())\n\t\t}\n\t\tif res != expected {\n\t\t\tt.Fatalf(\"query results wrong:\\n\\texp: %s\\n\\tgot: %s\", expected, res)\n\t\t}\n\n\t\t// now backup\n\t\tcmd := backup.NewCommand()\n\t\t_, port, err := net.SplitHostPort(config.BindAddress)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\thostAddress := net.JoinHostPort(\"localhost\", port)\n\t\tif err := cmd.Run(\"-host\", hostAddress, \"-database\", \"mydb\", backupDir); err != nil {\n\t\t\tt.Fatalf(\"error backing up: %s, hostAddress: %s\", err.Error(), hostAddress)\n\t\t}\n\t}()\n\n\tif _, err := os.Stat(config.Meta.Dir); err == nil || !os.IsNotExist(err) {\n\t\tt.Fatalf(\"meta dir should be deleted\")\n\t}\n\n\tif _, err := os.Stat(config.Data.Dir); err == nil || !os.IsNotExist(err) {\n\t\tt.Fatalf(\"meta dir should be deleted\")\n\t}\n\n\t// restore\n\tcmd := restore.NewCommand()\n\n\tif err := cmd.Run(\"-metadir\", config.Meta.Dir, \"-datadir\", config.Data.Dir, \"-database\", \"mydb\", backupDir); err != nil {\n\t\tt.Fatalf(\"error restoring: %s\", err.Error())\n\t}\n\n\t// Make sure node.json was restored\n\tnodePath := filepath.Join(config.Meta.Dir, \"node.json\")\n\tif _, err := os.Stat(nodePath); err != nil || os.IsNotExist(err) {\n\t\tt.Fatalf(\"node.json should exist\")\n\t}\n\n\t// now open it up and verify we're good\n\ts := OpenServer(config)\n\tdefer s.Close()\n\n\tres, err := s.Query(`select * from \"mydb\".\"forever\".\"myseries\"`)\n\tif err != nil {\n\t\tt.Fatalf(\"error querying: %s\", err.Error())\n\t}\n\tif res != expected {\n\t\tt.Fatalf(\"query results wrong:\\n\\texp: %s\\n\\tgot: %s\", expected, res)\n\t}\n}\n\nfunc freePort() string {\n\tl, _ := net.Listen(\"tcp\", \"\")\n\tdefer l.Close()\n\treturn l.Addr().String()\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tests/server_bench_test.go",
    "content": "package tests\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"testing\"\n)\n\nvar strResult string\n\nfunc BenchmarkServer_Query_Count_1(b *testing.B)    { benchmarkServerQueryCount(b, 1) }\nfunc BenchmarkServer_Query_Count_1K(b *testing.B)   { benchmarkServerQueryCount(b, 1000) }\nfunc BenchmarkServer_Query_Count_100K(b *testing.B) { benchmarkServerQueryCount(b, 100000) }\nfunc BenchmarkServer_Query_Count_1M(b *testing.B)   { benchmarkServerQueryCount(b, 1000000) }\n\nfunc benchmarkServerQueryCount(b *testing.B, pointN int) {\n\tif _, err := benchServer.Query(`DROP MEASUREMENT cpu`); err != nil {\n\t\tb.Fatal(err)\n\t}\n\n\t// Write data into server.\n\tvar buf bytes.Buffer\n\tfor i := 0; i < pointN; i++ {\n\t\tfmt.Fprintf(&buf, `cpu value=100 %d`, i+1)\n\t\tif i != pointN-1 {\n\t\t\tfmt.Fprint(&buf, \"\\n\")\n\t\t}\n\t}\n\tbenchServer.MustWrite(\"db0\", \"rp0\", buf.String(), nil)\n\n\t// Query simple count from server.\n\tb.ResetTimer()\n\tb.ReportAllocs()\n\tvar err error\n\tfor i := 0; i < b.N; i++ {\n\t\tif strResult, err = benchServer.Query(`SELECT count(value) FROM db0.rp0.cpu`); err != nil {\n\t\t\tb.Fatal(err)\n\t\t} else if strResult != fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"count\"],\"values\":[[\"1970-01-01T00:00:00Z\",%d]]}]}]}`, pointN) {\n\t\t\tb.Fatalf(\"unexpected result: %s\", strResult)\n\t\t}\n\t}\n}\n\nfunc BenchmarkServer_Query_Count_Where_500(b *testing.B) {\n\tbenchmarkServerQueryCountWhere(b, false, 500)\n}\nfunc BenchmarkServer_Query_Count_Where_1K(b *testing.B) {\n\tbenchmarkServerQueryCountWhere(b, false, 1000)\n}\nfunc BenchmarkServer_Query_Count_Where_10K(b *testing.B) {\n\tbenchmarkServerQueryCountWhere(b, false, 10000)\n}\nfunc BenchmarkServer_Query_Count_Where_100K(b *testing.B) {\n\tbenchmarkServerQueryCountWhere(b, false, 100000)\n}\n\nfunc BenchmarkServer_Query_Count_Where_Regex_500(b *testing.B) {\n\tbenchmarkServerQueryCountWhere(b, true, 500)\n}\nfunc BenchmarkServer_Query_Count_Where_Regex_1K(b *testing.B) {\n\tbenchmarkServerQueryCountWhere(b, true, 1000)\n}\nfunc BenchmarkServer_Query_Count_Where_Regex_10K(b *testing.B) {\n\tbenchmarkServerQueryCountWhere(b, true, 10000)\n}\nfunc BenchmarkServer_Query_Count_Where_Regex_100K(b *testing.B) {\n\tbenchmarkServerQueryCountWhere(b, true, 100000)\n}\n\nfunc benchmarkServerQueryCountWhere(b *testing.B, useRegex bool, pointN int) {\n\tif _, err := benchServer.Query(`DROP MEASUREMENT cpu`); err != nil {\n\t\tb.Fatal(err)\n\t}\n\n\t// Write data into server.\n\tvar buf bytes.Buffer\n\tfor i := 0; i < pointN; i++ {\n\t\tfmt.Fprintf(&buf, `cpu,host=server-%d value=100 %d`, i, i)\n\t\tif i != pointN-1 {\n\t\t\tfmt.Fprint(&buf, \"\\n\")\n\t\t}\n\t}\n\tbenchServer.MustWrite(\"db0\", \"rp0\", buf.String(), nil)\n\n\t// Query count from server with WHERE\n\tvar (\n\t\terr       error\n\t\tcondition = `host = 'server-487'`\n\t)\n\n\tif useRegex {\n\t\tcondition = `host =~ /^server-487$/`\n\t}\n\n\tb.ResetTimer()\n\tb.ReportAllocs()\n\tfor i := 0; i < b.N; i++ {\n\t\tif strResult, err = benchServer.Query(fmt.Sprintf(`SELECT count(value) FROM db0.rp0.cpu WHERE %s`, condition)); err != nil {\n\t\t\tb.Fatal(err)\n\t\t} else if strResult == `{\"results\":[{}]}` {\n\t\t\tb.Fatal(\"no results\")\n\t\t}\n\t}\n}\n\nfunc BenchmarkServer_ShowSeries_1(b *testing.B)    { benchmarkServerShowSeries(b, 1) }\nfunc BenchmarkServer_ShowSeries_1K(b *testing.B)   { benchmarkServerShowSeries(b, 1000) }\nfunc BenchmarkServer_ShowSeries_100K(b *testing.B) { benchmarkServerShowSeries(b, 100000) }\nfunc BenchmarkServer_ShowSeries_1M(b *testing.B)   { benchmarkServerShowSeries(b, 1000000) }\n\nfunc benchmarkServerShowSeries(b *testing.B, pointN int) {\n\tif _, err := benchServer.Query(`DROP MEASUREMENT cpu`); err != nil {\n\t\tb.Fatal(err)\n\t}\n\n\t// Write data into server.\n\tvar buf bytes.Buffer\n\tfor i := 0; i < pointN; i++ {\n\t\tfmt.Fprintf(&buf, `cpu,host=server%d value=100 %d`, i, i+1)\n\t\tif i != pointN-1 {\n\t\t\tfmt.Fprint(&buf, \"\\n\")\n\t\t}\n\t}\n\tbenchServer.MustWrite(\"db0\", \"rp0\", buf.String(), nil)\n\n\t// Query simple count from server.\n\tb.ResetTimer()\n\tb.ReportAllocs()\n\tvar err error\n\tfor i := 0; i < b.N; i++ {\n\t\tif strResult, err = benchServer.QueryWithParams(`SHOW SERIES`, url.Values{\"db\": {\"db0\"}}); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tests/server_helpers.go",
    "content": "// This package is a set of convenience helpers and structs to make integration testing easier\npackage tests\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/cmd/influxd/run\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/services/httpd\"\n\t\"github.com/influxdata/influxdb/services/meta\"\n\t\"github.com/influxdata/influxdb/toml\"\n)\n\n// Server represents a test wrapper for run.Server.\ntype Server interface {\n\tURL() string\n\tOpen() error\n\tSetLogOutput(w io.Writer)\n\tClose()\n\tClosed() bool\n\n\tCreateDatabase(db string) (*meta.DatabaseInfo, error)\n\tCreateDatabaseAndRetentionPolicy(db string, rp *meta.RetentionPolicySpec, makeDefault bool) error\n\tCreateSubscription(database, rp, name, mode string, destinations []string) error\n\tReset() error\n\n\tQuery(query string) (results string, err error)\n\tQueryWithParams(query string, values url.Values) (results string, err error)\n\n\tWrite(db, rp, body string, params url.Values) (results string, err error)\n\tMustWrite(db, rp, body string, params url.Values) string\n\tWritePoints(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, user meta.User, points []models.Point) error\n}\n\n// RemoteServer is a Server that is accessed remotely via the HTTP API\ntype RemoteServer struct {\n\t*client\n\turl string\n}\n\nfunc (s *RemoteServer) URL() string {\n\treturn s.url\n}\n\nfunc (s *RemoteServer) Open() error {\n\tresp, err := http.Get(s.URL() + \"/ping\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tbody := strings.TrimSpace(string(MustReadAll(resp.Body)))\n\tif resp.StatusCode != http.StatusNoContent {\n\t\treturn fmt.Errorf(\"unexpected status code: code=%d, body=%s\", resp.StatusCode, body)\n\t}\n\treturn nil\n}\n\nfunc (s *RemoteServer) Close() {\n\t// ignore, we can't shutdown a remote server\n}\n\nfunc (s *RemoteServer) SetLogOutput(w io.Writer) {\n\t// ignore, we can't change the logging of a remote server\n}\n\nfunc (s *RemoteServer) Closed() bool {\n\treturn true\n}\n\nfunc (s *RemoteServer) CreateDatabase(db string) (*meta.DatabaseInfo, error) {\n\tstmt := fmt.Sprintf(\"CREATE+DATABASE+%s\", db)\n\n\t_, err := s.HTTPPost(s.URL()+\"/query?q=\"+stmt, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &meta.DatabaseInfo{}, nil\n}\n\nfunc (s *RemoteServer) CreateDatabaseAndRetentionPolicy(db string, rp *meta.RetentionPolicySpec, makeDefault bool) error {\n\tif _, err := s.CreateDatabase(db); err != nil {\n\t\treturn err\n\t}\n\n\tstmt := fmt.Sprintf(\"CREATE+RETENTION+POLICY+%s+ON+\\\"%s\\\"+DURATION+%s+REPLICATION+%v+SHARD+DURATION+%s\",\n\t\trp.Name, db, rp.Duration, *rp.ReplicaN, rp.ShardGroupDuration)\n\tif makeDefault {\n\t\tstmt += \"+DEFAULT\"\n\t}\n\n\t_, err := s.HTTPPost(s.URL()+\"/query?q=\"+stmt, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (s *RemoteServer) CreateSubscription(database, rp, name, mode string, destinations []string) error {\n\tdests := make([]string, 0, len(destinations))\n\tfor _, d := range destinations {\n\t\tdests = append(dests, \"'\"+d+\"'\")\n\t}\n\n\tstmt := fmt.Sprintf(\"CREATE+SUBSCRIPTION+%s+ON+\\\"%s\\\".\\\"%s\\\"+DESTINATIONS+%v+%s\",\n\t\tname, database, rp, mode, strings.Join(dests, \",\"))\n\n\t_, err := s.HTTPPost(s.URL()+\"/query?q=\"+stmt, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (s *RemoteServer) DropDatabase(db string) error {\n\tstmt := fmt.Sprintf(\"DROP+DATABASE+%s\", db)\n\n\t_, err := s.HTTPPost(s.URL()+\"/query?q=\"+stmt, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// Reset attempts to remove all database state by dropping everything\nfunc (s *RemoteServer) Reset() error {\n\tstmt := fmt.Sprintf(\"SHOW+DATABASES\")\n\tresults, err := s.HTTPPost(s.URL()+\"/query?q=\"+stmt, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp := &httpd.Response{}\n\tif resp.UnmarshalJSON([]byte(results)); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, db := range resp.Results[0].Series[0].Values {\n\t\tif err := s.DropDatabase(fmt.Sprintf(\"%s\", db[0])); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n\n}\n\nfunc (s *RemoteServer) WritePoints(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, user meta.User, points []models.Point) error {\n\tpanic(\"WritePoints not implemented\")\n}\n\n// NewServer returns a new instance of Server.\nfunc NewServer(c *run.Config) Server {\n\tbuildInfo := &run.BuildInfo{\n\t\tVersion: \"testServer\",\n\t\tCommit:  \"testCommit\",\n\t\tBranch:  \"testBranch\",\n\t}\n\n\t// If URL exists, create a server that will run against a remote endpoint\n\tif url := os.Getenv(\"URL\"); url != \"\" {\n\t\ts := &RemoteServer{\n\t\t\turl: url,\n\t\t\tclient: &client{\n\t\t\t\tURLFn: func() string {\n\t\t\t\t\treturn url\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tif err := s.Reset(); err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\treturn s\n\t}\n\n\t// Otherwise create a local server\n\tsrv, _ := run.NewServer(c, buildInfo)\n\ts := LocalServer{\n\t\tclient: &client{},\n\t\tServer: srv,\n\t\tConfig: c,\n\t}\n\ts.client.URLFn = s.URL\n\treturn &s\n}\n\n// OpenServer opens a test server.\nfunc OpenServer(c *run.Config) Server {\n\ts := NewServer(c)\n\tconfigureLogging(s)\n\tif err := s.Open(); err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn s\n}\n\n// OpenServerWithVersion opens a test server with a specific version.\nfunc OpenServerWithVersion(c *run.Config, version string) Server {\n\t// We can't change the versino of a remote server.  The test needs to\n\t// be skipped if using this func.\n\tif RemoteEnabled() {\n\t\tpanic(\"OpenServerWithVersion not support with remote server\")\n\t}\n\n\tbuildInfo := &run.BuildInfo{\n\t\tVersion: version,\n\t\tCommit:  \"\",\n\t\tBranch:  \"\",\n\t}\n\tsrv, _ := run.NewServer(c, buildInfo)\n\ts := LocalServer{\n\t\tclient: &client{},\n\t\tServer: srv,\n\t\tConfig: c,\n\t}\n\ts.client.URLFn = s.URL\n\n\tif err := s.Open(); err != nil {\n\t\tpanic(err.Error())\n\t}\n\tconfigureLogging(&s)\n\n\treturn &s\n}\n\n// OpenDefaultServer opens a test server with a default database & retention policy.\nfunc OpenDefaultServer(c *run.Config) Server {\n\ts := OpenServer(c)\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tpanic(err)\n\t}\n\treturn s\n}\n\n// LocalServer is a Server that is running in-process and can be accessed directly\ntype LocalServer struct {\n\tmu sync.RWMutex\n\t*run.Server\n\n\t*client\n\tConfig *run.Config\n}\n\n// Close shuts down the server and removes all temporary paths.\nfunc (s *LocalServer) Close() {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif err := s.Server.Close(); err != nil {\n\t\tpanic(err.Error())\n\t}\n\tif err := os.RemoveAll(s.Config.Meta.Dir); err != nil {\n\t\tpanic(err.Error())\n\t}\n\tif err := os.RemoveAll(s.Config.Data.Dir); err != nil {\n\t\tpanic(err.Error())\n\t}\n\t// Nil the server so our deadlock detector goroutine can determine if we completed writes\n\t// without timing out\n\ts.Server = nil\n}\n\nfunc (s *LocalServer) Closed() bool {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\treturn s.Server == nil\n}\n\n// URL returns the base URL for the httpd endpoint.\nfunc (s *LocalServer) URL() string {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\tfor _, service := range s.Services {\n\t\tif service, ok := service.(*httpd.Service); ok {\n\t\t\treturn \"http://\" + service.Addr().String()\n\t\t}\n\t}\n\tpanic(\"httpd server not found in services\")\n}\n\nfunc (s *LocalServer) CreateDatabase(db string) (*meta.DatabaseInfo, error) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\treturn s.MetaClient.CreateDatabase(db)\n}\n\n// CreateDatabaseAndRetentionPolicy will create the database and retention policy.\nfunc (s *LocalServer) CreateDatabaseAndRetentionPolicy(db string, rp *meta.RetentionPolicySpec, makeDefault bool) error {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\tif _, err := s.MetaClient.CreateDatabase(db); err != nil {\n\t\treturn err\n\t} else if _, err := s.MetaClient.CreateRetentionPolicy(db, rp, makeDefault); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (s *LocalServer) CreateSubscription(database, rp, name, mode string, destinations []string) error {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\treturn s.MetaClient.CreateSubscription(database, rp, name, mode, destinations)\n}\n\nfunc (s *LocalServer) DropDatabase(db string) error {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\treturn s.MetaClient.DropDatabase(db)\n}\n\nfunc (s *LocalServer) Reset() error {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\tfor _, db := range s.MetaClient.Databases() {\n\t\tif err := s.DropDatabase(db.Name); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *LocalServer) WritePoints(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, user meta.User, points []models.Point) error {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\treturn s.PointsWriter.WritePoints(database, retentionPolicy, consistencyLevel, user, points)\n}\n\n// client abstract querying and writing to a Server using HTTP\ntype client struct {\n\tURLFn func() string\n}\n\nfunc (c *client) URL() string {\n\treturn c.URLFn()\n}\n\n// Query executes a query against the server and returns the results.\nfunc (s *client) Query(query string) (results string, err error) {\n\treturn s.QueryWithParams(query, nil)\n}\n\n// MustQuery executes a query against the server and returns the results.\nfunc (s *client) MustQuery(query string) string {\n\tresults, err := s.Query(query)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn results\n}\n\n// Query executes a query against the server and returns the results.\nfunc (s *client) QueryWithParams(query string, values url.Values) (results string, err error) {\n\tvar v url.Values\n\tif values == nil {\n\t\tv = url.Values{}\n\t} else {\n\t\tv, _ = url.ParseQuery(values.Encode())\n\t}\n\tv.Set(\"q\", query)\n\treturn s.HTTPPost(s.URL()+\"/query?\"+v.Encode(), nil)\n}\n\n// MustQueryWithParams executes a query against the server and returns the results.\nfunc (s *client) MustQueryWithParams(query string, values url.Values) string {\n\tresults, err := s.QueryWithParams(query, values)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn results\n}\n\n// HTTPGet makes an HTTP GET request to the server and returns the response.\nfunc (s *client) HTTPGet(url string) (results string, err error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tbody := strings.TrimSpace(string(MustReadAll(resp.Body)))\n\tswitch resp.StatusCode {\n\tcase http.StatusBadRequest:\n\t\tif !expectPattern(\".*error parsing query*.\", body) {\n\t\t\treturn \"\", fmt.Errorf(\"unexpected status code: code=%d, body=%s\", resp.StatusCode, body)\n\t\t}\n\t\treturn body, nil\n\tcase http.StatusOK:\n\t\treturn body, nil\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"unexpected status code: code=%d, body=%s\", resp.StatusCode, body)\n\t}\n}\n\n// HTTPPost makes an HTTP POST request to the server and returns the response.\nfunc (s *client) HTTPPost(url string, content []byte) (results string, err error) {\n\tbuf := bytes.NewBuffer(content)\n\tresp, err := http.Post(url, \"application/json\", buf)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tbody := strings.TrimSpace(string(MustReadAll(resp.Body)))\n\tswitch resp.StatusCode {\n\tcase http.StatusBadRequest:\n\t\tif !expectPattern(\".*error parsing query*.\", body) {\n\t\t\treturn \"\", fmt.Errorf(\"unexpected status code: code=%d, body=%s\", resp.StatusCode, body)\n\t\t}\n\t\treturn body, nil\n\tcase http.StatusOK, http.StatusNoContent:\n\t\treturn body, nil\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"unexpected status code: code=%d, body=%s\", resp.StatusCode, body)\n\t}\n}\n\ntype WriteError struct {\n\tbody       string\n\tstatusCode int\n}\n\nfunc (wr WriteError) StatusCode() int {\n\treturn wr.statusCode\n}\n\nfunc (wr WriteError) Body() string {\n\treturn wr.body\n}\n\nfunc (wr WriteError) Error() string {\n\treturn fmt.Sprintf(\"invalid status code: code=%d, body=%s\", wr.statusCode, wr.body)\n}\n\n// Write executes a write against the server and returns the results.\nfunc (s *client) Write(db, rp, body string, params url.Values) (results string, err error) {\n\tif params == nil {\n\t\tparams = url.Values{}\n\t}\n\tif params.Get(\"db\") == \"\" {\n\t\tparams.Set(\"db\", db)\n\t}\n\tif params.Get(\"rp\") == \"\" {\n\t\tparams.Set(\"rp\", rp)\n\t}\n\tresp, err := http.Post(s.URL()+\"/write?\"+params.Encode(), \"\", strings.NewReader(body))\n\tif err != nil {\n\t\treturn \"\", err\n\t} else if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {\n\t\treturn \"\", WriteError{statusCode: resp.StatusCode, body: string(MustReadAll(resp.Body))}\n\t}\n\treturn string(MustReadAll(resp.Body)), nil\n}\n\n// MustWrite executes a write to the server. Panic on error.\nfunc (s *client) MustWrite(db, rp, body string, params url.Values) string {\n\tresults, err := s.Write(db, rp, body, params)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn results\n}\n\n// NewConfig returns the default config with temporary paths.\nfunc NewConfig() *run.Config {\n\tc := run.NewConfig()\n\tc.BindAddress = \"127.0.0.1:0\"\n\tc.ReportingDisabled = true\n\tc.Coordinator.WriteTimeout = toml.Duration(30 * time.Second)\n\tc.Meta.Dir = MustTempFile()\n\n\tif !testing.Verbose() {\n\t\tc.Meta.LoggingEnabled = false\n\t}\n\n\tc.Data.Dir = MustTempFile()\n\tc.Data.WALDir = MustTempFile()\n\n\tindexVersion := os.Getenv(\"INFLUXDB_DATA_INDEX_VERSION\")\n\tif indexVersion != \"\" {\n\t\tc.Data.Index = indexVersion\n\t}\n\n\tc.HTTPD.Enabled = true\n\tc.HTTPD.BindAddress = \"127.0.0.1:0\"\n\tc.HTTPD.LogEnabled = testing.Verbose()\n\n\tc.Monitor.StoreEnabled = false\n\n\treturn c\n}\n\nfunc newRetentionPolicySpec(name string, rf int, duration time.Duration) *meta.RetentionPolicySpec {\n\treturn &meta.RetentionPolicySpec{Name: name, ReplicaN: &rf, Duration: &duration}\n}\n\nfunc maxInt64() string {\n\tmaxInt64, _ := json.Marshal(^int64(0))\n\treturn string(maxInt64)\n}\n\nfunc now() time.Time {\n\treturn time.Now().UTC()\n}\n\nfunc yesterday() time.Time {\n\treturn now().Add(-1 * time.Hour * 24)\n}\n\nfunc mustParseTime(layout, value string) time.Time {\n\ttm, err := time.Parse(layout, value)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn tm\n}\n\nfunc mustParseLocation(tzname string) *time.Location {\n\tloc, err := time.LoadLocation(tzname)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn loc\n}\n\nvar LosAngeles = mustParseLocation(\"America/Los_Angeles\")\n\n// MustReadAll reads r. Panic on error.\nfunc MustReadAll(r io.Reader) []byte {\n\tb, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn b\n}\n\n// MustTempFile returns a path to a temporary file.\nfunc MustTempFile() string {\n\tf, err := ioutil.TempFile(\"\", \"influxd-\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tf.Close()\n\tos.Remove(f.Name())\n\treturn f.Name()\n}\n\nfunc RemoteEnabled() bool {\n\treturn os.Getenv(\"URL\") != \"\"\n}\n\nfunc expectPattern(exp, act string) bool {\n\tre := regexp.MustCompile(exp)\n\tif !re.MatchString(act) {\n\t\treturn false\n\t}\n\treturn true\n}\n\ntype Query struct {\n\tname     string\n\tcommand  string\n\tparams   url.Values\n\texp, act string\n\tpattern  bool\n\tskip     bool\n\trepeat   int\n\tonce     bool\n}\n\n// Execute runs the command and returns an err if it fails\nfunc (q *Query) Execute(s Server) (err error) {\n\tif q.params == nil {\n\t\tq.act, err = s.Query(q.command)\n\t\treturn\n\t}\n\tq.act, err = s.QueryWithParams(q.command, q.params)\n\treturn\n}\n\nfunc (q *Query) success() bool {\n\tif q.pattern {\n\t\treturn expectPattern(q.exp, q.act)\n\t}\n\treturn q.exp == q.act\n}\n\nfunc (q *Query) Error(err error) string {\n\treturn fmt.Sprintf(\"%s: %v\", q.name, err)\n}\n\nfunc (q *Query) failureMessage() string {\n\treturn fmt.Sprintf(\"%s: unexpected results\\nquery:  %s\\nparams:  %v\\nexp:    %s\\nactual: %s\\n\", q.name, q.command, q.params, q.exp, q.act)\n}\n\ntype Write struct {\n\tdb   string\n\trp   string\n\tdata string\n}\n\nfunc (w *Write) duplicate() *Write {\n\treturn &Write{\n\t\tdb:   w.db,\n\t\trp:   w.rp,\n\t\tdata: w.data,\n\t}\n}\n\ntype Writes []*Write\n\nfunc (a Writes) duplicate() Writes {\n\twrites := make(Writes, 0, len(a))\n\tfor _, w := range a {\n\t\twrites = append(writes, w.duplicate())\n\t}\n\treturn writes\n}\n\ntype Tests map[string]Test\n\ntype Test struct {\n\tinitialized bool\n\twrites      Writes\n\tparams      url.Values\n\tdb          string\n\trp          string\n\texp         string\n\tqueries     []*Query\n}\n\nfunc NewTest(db, rp string) Test {\n\treturn Test{\n\t\tdb: db,\n\t\trp: rp,\n\t}\n}\n\nfunc (t Test) duplicate() Test {\n\ttest := Test{\n\t\tinitialized: t.initialized,\n\t\twrites:      t.writes.duplicate(),\n\t\tdb:          t.db,\n\t\trp:          t.rp,\n\t\texp:         t.exp,\n\t\tqueries:     make([]*Query, len(t.queries)),\n\t}\n\n\tif t.params != nil {\n\t\tt.params = url.Values{}\n\t\tfor k, a := range t.params {\n\t\t\tvals := make([]string, len(a))\n\t\t\tcopy(vals, a)\n\t\t\ttest.params[k] = vals\n\t\t}\n\t}\n\tcopy(test.queries, t.queries)\n\treturn test\n}\n\nfunc (t *Test) addQueries(q ...*Query) {\n\tt.queries = append(t.queries, q...)\n}\n\nfunc (t *Test) database() string {\n\tif t.db != \"\" {\n\t\treturn t.db\n\t}\n\treturn \"db0\"\n}\n\nfunc (t *Test) retentionPolicy() string {\n\tif t.rp != \"\" {\n\t\treturn t.rp\n\t}\n\treturn \"default\"\n}\n\nfunc (t *Test) init(s Server) error {\n\tif len(t.writes) == 0 || t.initialized {\n\t\treturn nil\n\t}\n\tif t.db == \"\" {\n\t\tt.db = \"db0\"\n\t}\n\tif t.rp == \"\" {\n\t\tt.rp = \"rp0\"\n\t}\n\n\tif err := writeTestData(s, t); err != nil {\n\t\treturn err\n\t}\n\n\tt.initialized = true\n\n\treturn nil\n}\n\nfunc writeTestData(s Server, t *Test) error {\n\tfor i, w := range t.writes {\n\t\tif w.db == \"\" {\n\t\t\tw.db = t.database()\n\t\t}\n\t\tif w.rp == \"\" {\n\t\t\tw.rp = t.retentionPolicy()\n\t\t}\n\n\t\tif err := s.CreateDatabaseAndRetentionPolicy(w.db, newRetentionPolicySpec(w.rp, 1, 0), true); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif res, err := s.Write(w.db, w.rp, w.data, t.params); err != nil {\n\t\t\treturn fmt.Errorf(\"write #%d: %s\", i, err)\n\t\t} else if t.exp != res {\n\t\t\treturn fmt.Errorf(\"unexpected results\\nexp: %s\\ngot: %s\\n\", t.exp, res)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc configureLogging(s Server) {\n\t// Set the logger to discard unless verbose is on\n\tif !testing.Verbose() {\n\t\ts.SetLogOutput(ioutil.Discard)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tests/server_suite.go",
    "content": "package tests\n\nimport (\n\t\"fmt\"\n\t\"net/url\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar tests Tests\n\n// Load all shared tests\nfunc init() {\n\ttests = make(map[string]Test)\n\n\ttests[\"database_commands\"] = Test{\n\t\tqueries: []*Query{\n\t\t\t&Query{\n\t\t\t\tname:    \"create database should succeed\",\n\t\t\t\tcommand: `CREATE DATABASE db0`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create database with retention duration should succeed\",\n\t\t\t\tcommand: `CREATE DATABASE db0_r WITH DURATION 24h REPLICATION 2 NAME db0_r_policy`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create database with retention policy should fail with invalid name\",\n\t\t\t\tcommand: `CREATE DATABASE db1 WITH NAME \".\"`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"invalid name\"}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create database should error with some unquoted names\",\n\t\t\t\tcommand: `CREATE DATABASE 0xdb0`,\n\t\t\t\texp:     `{\"error\":\"error parsing query: found 0xdb0, expected identifier at line 1, char 17\"}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create database should error with invalid characters\",\n\t\t\t\tcommand: `CREATE DATABASE \".\"`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"invalid name\"}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create database with retention duration should error with bad retention duration\",\n\t\t\t\tcommand: `CREATE DATABASE db0 WITH DURATION xyz`,\n\t\t\t\texp:     `{\"error\":\"error parsing query: found xyz, expected duration at line 1, char 35\"}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create database with retention replication should error with bad retention replication number\",\n\t\t\t\tcommand: `CREATE DATABASE db0 WITH REPLICATION xyz`,\n\t\t\t\texp:     `{\"error\":\"error parsing query: found xyz, expected integer at line 1, char 38\"}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create database with retention name should error with missing retention name\",\n\t\t\t\tcommand: `CREATE DATABASE db0 WITH NAME`,\n\t\t\t\texp:     `{\"error\":\"error parsing query: found EOF, expected identifier at line 1, char 31\"}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"show database should succeed\",\n\t\t\t\tcommand: `SHOW DATABASES`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"databases\",\"columns\":[\"name\"],\"values\":[[\"db0\"],[\"db0_r\"]]}]}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create database should not error with existing database\",\n\t\t\t\tcommand: `CREATE DATABASE db0`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create database should create non-existing database\",\n\t\t\t\tcommand: `CREATE DATABASE db1`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create database with retention duration should error if retention policy is different\",\n\t\t\t\tcommand: `CREATE DATABASE db1 WITH DURATION 24h`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"retention policy conflicts with an existing policy\"}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create database should error with bad retention duration\",\n\t\t\t\tcommand: `CREATE DATABASE db1 WITH DURATION xyz`,\n\t\t\t\texp:     `{\"error\":\"error parsing query: found xyz, expected duration at line 1, char 35\"}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"show database should succeed\",\n\t\t\t\tcommand: `SHOW DATABASES`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"databases\",\"columns\":[\"name\"],\"values\":[[\"db0\"],[\"db0_r\"],[\"db1\"]]}]}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"drop database db0 should succeed\",\n\t\t\t\tcommand: `DROP DATABASE db0`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"drop database db0_r should succeed\",\n\t\t\t\tcommand: `DROP DATABASE db0_r`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"drop database db1 should succeed\",\n\t\t\t\tcommand: `DROP DATABASE db1`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"drop database should not error if it does not exists\",\n\t\t\t\tcommand: `DROP DATABASE db1`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"drop database should not error with non-existing database db1\",\n\t\t\t\tcommand: `DROP DATABASE db1`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"show database should have no results\",\n\t\t\t\tcommand: `SHOW DATABASES`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"databases\",\"columns\":[\"name\"]}]}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create database with shard group duration should succeed\",\n\t\t\t\tcommand: `CREATE DATABASE db0 WITH SHARD DURATION 61m`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create database with shard group duration and duration should succeed\",\n\t\t\t\tcommand: `CREATE DATABASE db1 WITH DURATION 60m SHARD DURATION 30m`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t},\n\t\t},\n\t}\n\n\ttests[\"drop_and_recreate_database\"] = Test{\n\t\tdb: \"db0\",\n\t\trp: \"rp0\",\n\t\twrites: Writes{\n\t\t\t&Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano())},\n\t\t},\n\t\tqueries: []*Query{\n\t\t\t&Query{\n\t\t\t\tname:    \"Drop database after data write\",\n\t\t\t\tcommand: `DROP DATABASE db0`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Recreate database\",\n\t\t\t\tcommand: `CREATE DATABASE db0`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Recreate retention policy\",\n\t\t\t\tcommand: `CREATE RETENTION POLICY rp0 ON db0 DURATION 365d REPLICATION 1 DEFAULT`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Show measurements after recreate\",\n\t\t\t\tcommand: `SHOW MEASUREMENTS`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Query data after recreate\",\n\t\t\t\tcommand: `SELECT * FROM cpu`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t},\n\t\t},\n\t}\n\n\ttests[\"drop_database_isolated\"] = Test{\n\t\tdb: \"db0\",\n\t\trp: \"rp0\",\n\t\twrites: Writes{\n\t\t\t&Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano())},\n\t\t},\n\t\tqueries: []*Query{\n\t\t\t&Query{\n\t\t\t\tname:    \"Query data from 1st database\",\n\t\t\t\tcommand: `SELECT * FROM cpu`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"host\",\"region\",\"val\"],\"values\":[[\"2000-01-01T00:00:00Z\",\"serverA\",\"uswest\",23.2]]}]}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Query data from 1st database with GROUP BY *\",\n\t\t\t\tcommand: `SELECT * FROM cpu GROUP BY *`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"serverA\",\"region\":\"uswest\"},\"columns\":[\"time\",\"val\"],\"values\":[[\"2000-01-01T00:00:00Z\",23.2]]}]}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Drop other database\",\n\t\t\t\tcommand: `DROP DATABASE db1`,\n\t\t\t\tonce:    true,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Query data from 1st database and ensure it's still there\",\n\t\t\t\tcommand: `SELECT * FROM cpu`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"host\",\"region\",\"val\"],\"values\":[[\"2000-01-01T00:00:00Z\",\"serverA\",\"uswest\",23.2]]}]}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Query data from 1st database and ensure it's still there with GROUP BY *\",\n\t\t\t\tcommand: `SELECT * FROM cpu GROUP BY *`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"serverA\",\"region\":\"uswest\"},\"columns\":[\"time\",\"val\"],\"values\":[[\"2000-01-01T00:00:00Z\",23.2]]}]}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t},\n\t\t},\n\t}\n\n\ttests[\"delete_series\"] = Test{\n\t\tdb: \"db0\",\n\t\trp: \"rp0\",\n\t\twrites: Writes{\n\t\t\t&Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano())},\n\t\t\t&Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=100 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-02T00:00:00Z\").UnixNano())},\n\t\t\t&Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=200 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-03T00:00:00Z\").UnixNano())},\n\t\t\t&Write{db: \"db1\", data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano())},\n\t\t},\n\t\tqueries: []*Query{\n\t\t\t&Query{\n\t\t\t\tname:    \"Show series is present\",\n\t\t\t\tcommand: `SHOW SERIES`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"key\"],\"values\":[[\"cpu,host=serverA,region=uswest\"]]}]}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Delete series\",\n\t\t\t\tcommand: `DELETE FROM cpu WHERE time < '2000-01-03T00:00:00Z'`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Show series still exists\",\n\t\t\t\tcommand: `SHOW SERIES`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"key\"],\"values\":[[\"cpu,host=serverA,region=uswest\"]]}]}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Make sure last point still exists\",\n\t\t\t\tcommand: `SELECT * FROM cpu`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"host\",\"region\",\"val\"],\"values\":[[\"2000-01-03T00:00:00Z\",\"serverA\",\"uswest\",200]]}]}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Make sure data wasn't deleted from other database.\",\n\t\t\t\tcommand: `SELECT * FROM cpu`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"host\",\"region\",\"val\"],\"values\":[[\"2000-01-01T00:00:00Z\",\"serverA\",\"uswest\",23.2]]}]}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db1\"}},\n\t\t\t},\n\t\t},\n\t}\n\n\ttests[\"drop_and_recreate_series\"] = Test{\n\t\tdb: \"db0\",\n\t\trp: \"rp0\",\n\t\twrites: Writes{\n\t\t\t&Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano())},\n\t\t\t&Write{db: \"db1\", data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano())},\n\t\t},\n\t\tqueries: []*Query{\n\t\t\t&Query{\n\t\t\t\tname:    \"Show series is present\",\n\t\t\t\tcommand: `SHOW SERIES`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"key\"],\"values\":[[\"cpu,host=serverA,region=uswest\"]]}]}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Drop series after data write\",\n\t\t\t\tcommand: `DROP SERIES FROM cpu`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Show series is gone\",\n\t\t\t\tcommand: `SHOW SERIES`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Make sure data wasn't deleted from other database.\",\n\t\t\t\tcommand: `SELECT * FROM cpu`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"host\",\"region\",\"val\"],\"values\":[[\"2000-01-01T00:00:00Z\",\"serverA\",\"uswest\",23.2]]}]}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db1\"}},\n\t\t\t},\n\t\t},\n\t}\n\ttests[\"drop_and_recreate_series_retest\"] = Test{\n\t\tdb: \"db0\",\n\t\trp: \"rp0\",\n\t\twrites: Writes{\n\t\t\t&Write{data: fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano())},\n\t\t},\n\t\tqueries: []*Query{\n\t\t\t&Query{\n\t\t\t\tname:    \"Show series is present again after re-write\",\n\t\t\t\tcommand: `SHOW SERIES`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"key\"],\"values\":[[\"cpu,host=serverA,region=uswest\"]]}]}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t},\n\t\t},\n\t}\n\n\ttests[\"drop_series_from_regex\"] = Test{\n\t\tdb: \"db0\",\n\t\trp: \"rp0\",\n\t\twrites: Writes{\n\t\t\t&Write{data: strings.Join([]string{\n\t\t\t\tfmt.Sprintf(`a,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t\t\tfmt.Sprintf(`aa,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t\t\tfmt.Sprintf(`b,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t\t\tfmt.Sprintf(`c,host=serverA,region=uswest val=30.2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t\t}, \"\\n\")},\n\t\t},\n\t\tqueries: []*Query{\n\t\t\t&Query{\n\t\t\t\tname:    \"Show series is present\",\n\t\t\t\tcommand: `SHOW SERIES`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"key\"],\"values\":[[\"a,host=serverA,region=uswest\"],[\"aa,host=serverA,region=uswest\"],[\"b,host=serverA,region=uswest\"],[\"c,host=serverA,region=uswest\"]]}]}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Drop series after data write\",\n\t\t\t\tcommand: `DROP SERIES FROM /a.*/`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Show series is gone\",\n\t\t\t\tcommand: `SHOW SERIES`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"key\"],\"values\":[[\"b,host=serverA,region=uswest\"],[\"c,host=serverA,region=uswest\"]]}]}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Drop series from regex that matches no measurements\",\n\t\t\t\tcommand: `DROP SERIES FROM /a.*/`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"make sure DROP SERIES doesn't delete anything when regex doesn't match\",\n\t\t\t\tcommand: `SHOW SERIES`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"key\"],\"values\":[[\"b,host=serverA,region=uswest\"],[\"c,host=serverA,region=uswest\"]]}]}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Drop series with WHERE field should error\",\n\t\t\t\tcommand: `DROP SERIES FROM c WHERE val > 50.0`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"shard 1: fields not supported in WHERE clause during deletion\"}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"make sure DROP SERIES with field in WHERE didn't delete data\",\n\t\t\t\tcommand: `SHOW SERIES`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"key\"],\"values\":[[\"b,host=serverA,region=uswest\"],[\"c,host=serverA,region=uswest\"]]}]}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Drop series with WHERE time should error\",\n\t\t\t\tcommand: `DROP SERIES FROM c WHERE time > now() - 1d`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"DROP SERIES doesn't support time in WHERE clause\"}]}`,\n\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t},\n\t\t},\n\t}\n\n\ttests[\"retention_policy_commands\"] = Test{\n\t\tdb: \"db0\",\n\t\tqueries: []*Query{\n\t\t\t&Query{\n\t\t\t\tname:    \"create retention policy with invalid name should return an error\",\n\t\t\t\tcommand: `CREATE RETENTION POLICY \".\" ON db0 DURATION 1d REPLICATION 1`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"invalid name\"}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create retention policy should succeed\",\n\t\t\t\tcommand: `CREATE RETENTION POLICY rp0 ON db0 DURATION 1h REPLICATION 1`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"show retention policy should succeed\",\n\t\t\t\tcommand: `SHOW RETENTION POLICIES ON db0`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"name\",\"duration\",\"shardGroupDuration\",\"replicaN\",\"default\"],\"values\":[[\"rp0\",\"1h0m0s\",\"1h0m0s\",1,false]]}]}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"alter retention policy should succeed\",\n\t\t\t\tcommand: `ALTER RETENTION POLICY rp0 ON db0 DURATION 2h REPLICATION 3 DEFAULT`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"show retention policy should have new altered information\",\n\t\t\t\tcommand: `SHOW RETENTION POLICIES ON db0`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"name\",\"duration\",\"shardGroupDuration\",\"replicaN\",\"default\"],\"values\":[[\"rp0\",\"2h0m0s\",\"1h0m0s\",3,true]]}]}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"show retention policy should still show policy\",\n\t\t\t\tcommand: `SHOW RETENTION POLICIES ON db0`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"name\",\"duration\",\"shardGroupDuration\",\"replicaN\",\"default\"],\"values\":[[\"rp0\",\"2h0m0s\",\"1h0m0s\",3,true]]}]}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create a second non-default retention policy\",\n\t\t\t\tcommand: `CREATE RETENTION POLICY rp2 ON db0 DURATION 1h REPLICATION 1`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"show retention policy should show both\",\n\t\t\t\tcommand: `SHOW RETENTION POLICIES ON db0`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"name\",\"duration\",\"shardGroupDuration\",\"replicaN\",\"default\"],\"values\":[[\"rp0\",\"2h0m0s\",\"1h0m0s\",3,true],[\"rp2\",\"1h0m0s\",\"1h0m0s\",1,false]]}]}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"dropping non-default retention policy succeed\",\n\t\t\t\tcommand: `DROP RETENTION POLICY rp2 ON db0`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create a third non-default retention policy\",\n\t\t\t\tcommand: `CREATE RETENTION POLICY rp3 ON db0 DURATION 1h REPLICATION 1 SHARD DURATION 30m`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create retention policy with default on\",\n\t\t\t\tcommand: `CREATE RETENTION POLICY rp3 ON db0 DURATION 1h REPLICATION 1 SHARD DURATION 30m DEFAULT`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"retention policy conflicts with an existing policy\"}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"show retention policy should show both with custom shard\",\n\t\t\t\tcommand: `SHOW RETENTION POLICIES ON db0`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"name\",\"duration\",\"shardGroupDuration\",\"replicaN\",\"default\"],\"values\":[[\"rp0\",\"2h0m0s\",\"1h0m0s\",3,true],[\"rp3\",\"1h0m0s\",\"1h0m0s\",1,false]]}]}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"dropping non-default custom shard retention policy succeed\",\n\t\t\t\tcommand: `DROP RETENTION POLICY rp3 ON db0`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"show retention policy should show just default\",\n\t\t\t\tcommand: `SHOW RETENTION POLICIES ON db0`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"name\",\"duration\",\"shardGroupDuration\",\"replicaN\",\"default\"],\"values\":[[\"rp0\",\"2h0m0s\",\"1h0m0s\",3,true]]}]}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Ensure retention policy with unacceptable retention cannot be created\",\n\t\t\t\tcommand: `CREATE RETENTION POLICY rp4 ON db0 DURATION 1s REPLICATION 1`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"retention policy duration must be at least 1h0m0s\"}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Check error when deleting retention policy on non-existent database\",\n\t\t\t\tcommand: `DROP RETENTION POLICY rp1 ON mydatabase`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"Ensure retention policy for non existing db is not created\",\n\t\t\t\tcommand: `CREATE RETENTION POLICY rp0 ON nodb DURATION 1h REPLICATION 1`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"database not found: nodb\"}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"drop rp0\",\n\t\t\t\tcommand: `DROP RETENTION POLICY rp0 ON db0`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t},\n\t\t\t// INF Shard Group Duration will normalize to the Retention Policy Duration Default\n\t\t\t&Query{\n\t\t\t\tname:    \"create retention policy with inf shard group duration\",\n\t\t\t\tcommand: `CREATE RETENTION POLICY rpinf ON db0 DURATION INF REPLICATION 1 SHARD DURATION 0s`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t// 0s Shard Group Duration will normalize to the Replication Policy Duration\n\t\t\t&Query{\n\t\t\t\tname:    \"create retention policy with 0s shard group duration\",\n\t\t\t\tcommand: `CREATE RETENTION POLICY rpzero ON db0 DURATION 1h REPLICATION 1 SHARD DURATION 0s`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t// 1s Shard Group Duration will normalize to the MinDefaultRetentionPolicyDuration\n\t\t\t&Query{\n\t\t\t\tname:    \"create retention policy with 1s shard group duration\",\n\t\t\t\tcommand: `CREATE RETENTION POLICY rponesecond ON db0 DURATION 2h REPLICATION 1 SHARD DURATION 1s`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"show retention policy: validate normalized shard group durations are working\",\n\t\t\t\tcommand: `SHOW RETENTION POLICIES ON db0`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"name\",\"duration\",\"shardGroupDuration\",\"replicaN\",\"default\"],\"values\":[[\"rpinf\",\"0s\",\"168h0m0s\",1,false],[\"rpzero\",\"1h0m0s\",\"1h0m0s\",1,false],[\"rponesecond\",\"2h0m0s\",\"1h0m0s\",1,false]]}]}]}`,\n\t\t\t},\n\t\t},\n\t}\n\n\ttests[\"retention_policy_auto_create\"] = Test{\n\t\tqueries: []*Query{\n\t\t\t&Query{\n\t\t\t\tname:    \"create database should succeed\",\n\t\t\t\tcommand: `CREATE DATABASE db0`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t\tonce:    true,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"show retention policies should return auto-created policy\",\n\t\t\t\tcommand: `SHOW RETENTION POLICIES ON db0`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"name\",\"duration\",\"shardGroupDuration\",\"replicaN\",\"default\"],\"values\":[[\"autogen\",\"0s\",\"168h0m0s\",1,true]]}]}]}`,\n\t\t\t},\n\t\t},\n\t}\n\n}\n\nfunc (tests Tests) load(t *testing.T, key string) Test {\n\ttest, ok := tests[key]\n\tif !ok {\n\t\tt.Fatalf(\"no test %q\", key)\n\t}\n\n\treturn test.duplicate()\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tests/server_test.go",
    "content": "package tests\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/coordinator\"\n\t\"github.com/influxdata/influxdb/models\"\n)\n\n// Global server used by benchmarks\nvar benchServer Server\n\nfunc TestMain(m *testing.M) {\n\tflag.Parse()\n\n\t// Setup\n\tc := NewConfig()\n\tc.Retention.Enabled = false\n\tc.Monitor.StoreEnabled = false\n\tc.Meta.LoggingEnabled = false\n\tc.Subscriber.Enabled = false\n\tc.ContinuousQuery.Enabled = false\n\tc.Data.MaxValuesPerTag = 1000000 // 1M\n\tbenchServer = OpenDefaultServer(c)\n\n\t// Run suite.\n\tr := m.Run()\n\n\t// Cleanup\n\tbenchServer.Close()\n\n\tos.Exit(r)\n}\n\n// Ensure that HTTP responses include the InfluxDB version.\nfunc TestServer_HTTPResponseVersion(t *testing.T) {\n\tif RemoteEnabled() {\n\t\tt.Skip(\"Skipping.  Cannot change version of remote server\")\n\t}\n\n\tversion := \"v1234\"\n\ts := OpenServerWithVersion(NewConfig(), version)\n\tdefer s.Close()\n\n\tresp, _ := http.Get(s.URL() + \"/query\")\n\tgot := resp.Header.Get(\"X-Influxdb-Version\")\n\tif got != version {\n\t\tt.Errorf(\"Server responded with incorrect version, exp %s, got %s\", version, got)\n\t}\n}\n\n// Ensure the database commands work.\nfunc TestServer_DatabaseCommands(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := tests.load(t, \"database_commands\")\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_DropAndRecreateDatabase(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := tests.load(t, \"drop_and_recreate_database\")\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_DropDatabaseIsolated(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := tests.load(t, \"drop_database_isolated\")\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db1\", newRetentionPolicySpec(\"rp1\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_DeleteSeries(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := tests.load(t, \"delete_series\")\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_DropAndRecreateSeries(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := tests.load(t, \"drop_and_recreate_series\")\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n\n\t// Re-write data and test again.\n\tretest := tests.load(t, \"drop_and_recreate_series_retest\")\n\n\tfor i, query := range retest.queries {\n\t\tif i == 0 {\n\t\t\tif err := retest.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_DropSeriesFromRegex(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := tests.load(t, \"drop_series_from_regex\")\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure retention policy commands work.\nfunc TestServer_RetentionPolicyCommands(t *testing.T) {\n\tt.Parallel()\n\tc := NewConfig()\n\tc.Meta.RetentionAutoCreate = false\n\ts := OpenServer(c)\n\tdefer s.Close()\n\n\tif _, ok := s.(*RemoteServer); ok {\n\t\tt.Skip(\"Skipping. Cannot alter auto create rp remotely\")\n\t}\n\n\ttest := tests.load(t, \"retention_policy_commands\")\n\n\t// Create a database.\n\tif _, err := s.CreateDatabase(test.database()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the autocreation of retention policy works.\nfunc TestServer_DatabaseRetentionPolicyAutoCreate(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := tests.load(t, \"retention_policy_auto_create\")\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_ShowDatabases_NoAuth(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := Test{\n\t\tqueries: []*Query{\n\t\t\t&Query{\n\t\t\t\tname:    \"create db1\",\n\t\t\t\tcommand: \"CREATE DATABASE db1\",\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create db2\",\n\t\t\t\tcommand: \"CREATE DATABASE db2\",\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"show dbs\",\n\t\t\t\tcommand: \"SHOW DATABASES\",\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"databases\",\"columns\":[\"name\"],\"values\":[[\"db1\"],[\"db2\"]]}]}]}`,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(fmt.Sprintf(\"command: %s - err: %s\", query.command, query.Error(err)))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_ShowDatabases_WithAuth(t *testing.T) {\n\tt.Parallel()\n\tc := NewConfig()\n\tc.HTTPD.AuthEnabled = true\n\ts := OpenServer(c)\n\tdefer s.Close()\n\n\tif _, ok := s.(*RemoteServer); ok {\n\t\tt.Skip(\"Skipping.  Cannot enable auth on remote server\")\n\t}\n\n\tadminParams := map[string][]string{\"u\": []string{\"admin\"}, \"p\": []string{\"admin\"}}\n\treaderParams := map[string][]string{\"u\": []string{\"reader\"}, \"p\": []string{\"r\"}}\n\twriterParams := map[string][]string{\"u\": []string{\"writer\"}, \"p\": []string{\"w\"}}\n\tnobodyParams := map[string][]string{\"u\": []string{\"nobody\"}, \"p\": []string{\"n\"}}\n\n\ttest := Test{\n\t\tqueries: []*Query{\n\t\t\t&Query{\n\t\t\t\tname:    \"create admin\",\n\t\t\t\tcommand: `CREATE USER admin WITH PASSWORD 'admin' WITH ALL PRIVILEGES`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create databases\",\n\t\t\t\tcommand: \"CREATE DATABASE dbR; CREATE DATABASE dbW\",\n\t\t\t\tparams:  adminParams,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0},{\"statement_id\":1}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"show dbs as admin\",\n\t\t\t\tcommand: \"SHOW DATABASES\",\n\t\t\t\tparams:  adminParams,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"databases\",\"columns\":[\"name\"],\"values\":[[\"dbR\"],[\"dbW\"]]}]}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"create users\",\n\t\t\t\tcommand: `CREATE USER reader WITH PASSWORD 'r'; GRANT READ ON \"dbR\" TO \"reader\"; CREATE USER writer WITH PASSWORD 'w'; GRANT WRITE ON \"dbW\" TO \"writer\"; CREATE USER nobody WITH PASSWORD 'n'`,\n\t\t\t\tparams:  adminParams,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0},{\"statement_id\":1},{\"statement_id\":2},{\"statement_id\":3},{\"statement_id\":4}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"show dbs as reader\",\n\t\t\t\tcommand: \"SHOW DATABASES\",\n\t\t\t\tparams:  readerParams,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"databases\",\"columns\":[\"name\"],\"values\":[[\"dbR\"]]}]}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"show dbs as writer\",\n\t\t\t\tcommand: \"SHOW DATABASES\",\n\t\t\t\tparams:  writerParams,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"databases\",\"columns\":[\"name\"],\"values\":[[\"dbW\"]]}]}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"show dbs as nobody\",\n\t\t\t\tcommand: \"SHOW DATABASES\",\n\t\t\t\tparams:  nobodyParams,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"databases\",\"columns\":[\"name\"]}]}]}`,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(fmt.Sprintf(\"command: %s - err: %s\", query.command, query.Error(err)))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure user commands work.\nfunc TestServer_UserCommands(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\t// Create a database.\n\tif _, err := s.CreateDatabase(\"db0\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttest := Test{\n\t\tqueries: []*Query{\n\t\t\t&Query{\n\t\t\t\tname:    \"show users, no actual users\",\n\t\t\t\tcommand: `SHOW USERS`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"user\",\"admin\"]}]}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    `create user`,\n\t\t\t\tcommand: \"CREATE USER jdoe WITH PASSWORD '1337'\",\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"show users, 1 existing user\",\n\t\t\t\tcommand: `SHOW USERS`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"user\",\"admin\"],\"values\":[[\"jdoe\",false]]}]}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"grant all priviledges to jdoe\",\n\t\t\t\tcommand: `GRANT ALL PRIVILEGES TO jdoe`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"show users, existing user as admin\",\n\t\t\t\tcommand: `SHOW USERS`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"user\",\"admin\"],\"values\":[[\"jdoe\",true]]}]}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"grant DB privileges to user\",\n\t\t\t\tcommand: `GRANT READ ON db0 TO jdoe`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"revoke all privileges\",\n\t\t\t\tcommand: `REVOKE ALL PRIVILEGES FROM jdoe`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"bad create user request\",\n\t\t\t\tcommand: `CREATE USER 0xBAD WITH PASSWORD pwd1337`,\n\t\t\t\texp:     `{\"error\":\"error parsing query: found 0xBAD, expected identifier at line 1, char 13\"}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"bad create user request, no name\",\n\t\t\t\tcommand: `CREATE USER WITH PASSWORD pwd1337`,\n\t\t\t\texp:     `{\"error\":\"error parsing query: found WITH, expected identifier at line 1, char 13\"}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"bad create user request, no password\",\n\t\t\t\tcommand: `CREATE USER jdoe`,\n\t\t\t\texp:     `{\"error\":\"error parsing query: found EOF, expected WITH at line 1, char 18\"}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"drop user\",\n\t\t\t\tcommand: `DROP USER jdoe`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"make sure user was dropped\",\n\t\t\t\tcommand: `SHOW USERS`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"user\",\"admin\"]}]}]}`,\n\t\t\t},\n\t\t\t&Query{\n\t\t\t\tname:    \"delete non existing user\",\n\t\t\t\tcommand: `DROP USER noone`,\n\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"user not found\"}]}`,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(fmt.Sprintf(\"command: %s - err: %s\", query.command, query.Error(err)))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server will write all points possible with exception to the field type conflict.\n// This should return a partial write and a status of 400\nfunc TestServer_Write_FieldTypeConflict(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif res, err := s.Write(\"db0\", \"rp0\", fmt.Sprintf(\"cpu value=1i %d\", mustParseTime(time.RFC3339Nano, \"2015-01-01T00:00:01Z\").UnixNano()), nil); err != nil {\n\t\tt.Fatal(err)\n\t} else if exp := ``; exp != res {\n\t\tt.Fatalf(\"unexpected results\\nexp: %s\\ngot: %s\\n\", exp, res)\n\t}\n\n\t// Verify the data was written.\n\tif res, err := s.Query(`SELECT * FROM db0.rp0.cpu`); err != nil {\n\t\tt.Fatal(err)\n\t} else if exp := `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2015-01-01T00:00:01Z\",1]]}]}]}`; exp != res {\n\t\tt.Fatalf(\"unexpected results\\nexp: %s\\ngot: %s\\n\", exp, res)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(\"cpu value=2i %d\", mustParseTime(time.RFC3339Nano, \"2015-01-01T00:00:02Z\").UnixNano()),\n\t\tfmt.Sprintf(\"cpu value=3  %d\", mustParseTime(time.RFC3339Nano, \"2015-01-01T00:00:03Z\").UnixNano()),\n\t\tfmt.Sprintf(\"cpu value=4i %d\", mustParseTime(time.RFC3339Nano, \"2015-01-01T00:00:04Z\").UnixNano()),\n\t}\n\tres, err := s.Write(\"db0\", \"rp0\", strings.Join(writes, \"\\n\"), nil)\n\tif err == nil {\n\t\tt.Fatal(\"expected error, got nil\")\n\t}\n\twr, ok := err.(WriteError)\n\tif !ok {\n\t\tt.Fatalf(\"wrong error type %v\", err)\n\t}\n\tif exp, got := http.StatusBadRequest, wr.StatusCode(); exp != got {\n\t\tt.Fatalf(\"unexpected status code\\nexp: %d\\ngot: %d\\n\", exp, got)\n\t}\n\tif exp := ``; exp != res {\n\t\tt.Fatalf(\"unexpected results\\nexp: %s\\ngot: %s\\n\", exp, res)\n\t}\n\n\t// Verify the data was written.\n\tif res, err := s.Query(`SELECT * FROM db0.rp0.cpu`); err != nil {\n\t\tt.Fatal(err)\n\t} else if exp := `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2015-01-01T00:00:01Z\",1],[\"2015-01-01T00:00:02Z\",2],[\"2015-01-01T00:00:04Z\",4]]}]}]}`; exp != res {\n\t\tt.Fatalf(\"unexpected results\\nexp: %s\\ngot: %s\\n\", exp, res)\n\t}\n}\n\n// Ensure the server can create a single point via line protocol with float type and read it back.\nfunc TestServer_Write_LineProtocol_Float(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 1*time.Hour), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tnow := now()\n\tif res, err := s.Write(\"db0\", \"rp0\", `cpu,host=server01 value=1.0 `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil {\n\t\tt.Fatal(err)\n\t} else if exp := ``; exp != res {\n\t\tt.Fatalf(\"unexpected results\\nexp: %s\\ngot: %s\\n\", exp, res)\n\t}\n\n\t// Verify the data was written.\n\tif res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil {\n\t\tt.Fatal(err)\n\t} else if exp := fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",1]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res {\n\t\tt.Fatalf(\"unexpected results\\nexp: %s\\ngot: %s\\n\", exp, res)\n\t}\n}\n\n// Ensure the server can create a single point via line protocol with bool type and read it back.\nfunc TestServer_Write_LineProtocol_Bool(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 1*time.Hour), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tnow := now()\n\tif res, err := s.Write(\"db0\", \"rp0\", `cpu,host=server01 value=true `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil {\n\t\tt.Fatal(err)\n\t} else if exp := ``; exp != res {\n\t\tt.Fatalf(\"unexpected results\\nexp: %s\\ngot: %s\\n\", exp, res)\n\t}\n\n\t// Verify the data was written.\n\tif res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil {\n\t\tt.Fatal(err)\n\t} else if exp := fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",true]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res {\n\t\tt.Fatalf(\"unexpected results\\nexp: %s\\ngot: %s\\n\", exp, res)\n\t}\n}\n\n// Ensure the server can create a single point via line protocol with string type and read it back.\nfunc TestServer_Write_LineProtocol_String(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 1*time.Hour), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tnow := now()\n\tif res, err := s.Write(\"db0\", \"rp0\", `cpu,host=server01 value=\"disk full\" `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil {\n\t\tt.Fatal(err)\n\t} else if exp := ``; exp != res {\n\t\tt.Fatalf(\"unexpected results\\nexp: %s\\ngot: %s\\n\", exp, res)\n\t}\n\n\t// Verify the data was written.\n\tif res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil {\n\t\tt.Fatal(err)\n\t} else if exp := fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",\"disk full\"]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res {\n\t\tt.Fatalf(\"unexpected results\\nexp: %s\\ngot: %s\\n\", exp, res)\n\t}\n}\n\n// Ensure the server can create a single point via line protocol with integer type and read it back.\nfunc TestServer_Write_LineProtocol_Integer(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 1*time.Hour), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tnow := now()\n\tif res, err := s.Write(\"db0\", \"rp0\", `cpu,host=server01 value=100 `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil {\n\t\tt.Fatal(err)\n\t} else if exp := ``; exp != res {\n\t\tt.Fatalf(\"unexpected results\\nexp: %s\\ngot: %s\\n\", exp, res)\n\t}\n\n\t// Verify the data was written.\n\tif res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil {\n\t\tt.Fatal(err)\n\t} else if exp := fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",100]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res {\n\t\tt.Fatalf(\"unexpected results\\nexp: %s\\ngot: %s\\n\", exp, res)\n\t}\n}\n\n// Ensure the server returns a partial write response when some points fail to parse. Also validate that\n// the successfully parsed points can be queried.\nfunc TestServer_Write_LineProtocol_Partial(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 1*time.Hour), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tnow := now()\n\tpoints := []string{\n\t\t\"cpu,host=server01 value=100 \" + strconv.FormatInt(now.UnixNano(), 10),\n\t\t\"cpu,host=server01 value=NaN \" + strconv.FormatInt(now.UnixNano(), 20),\n\t\t\"cpu,host=server01 value=NaN \" + strconv.FormatInt(now.UnixNano(), 30),\n\t}\n\tif res, err := s.Write(\"db0\", \"rp0\", strings.Join(points, \"\\n\"), nil); err == nil {\n\t\tt.Fatal(\"expected error. got nil\", err)\n\t} else if exp := ``; exp != res {\n\t\tt.Fatalf(\"unexpected results\\nexp: %s\\ngot: %s\\n\", exp, res)\n\t} else if exp := \"partial write\"; !strings.Contains(err.Error(), exp) {\n\t\tt.Fatalf(\"unexpected error: exp\\nexp: %v\\ngot: %v\", exp, err)\n\t}\n\n\t// Verify the data was written.\n\tif res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil {\n\t\tt.Fatal(err)\n\t} else if exp := fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",100]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res {\n\t\tt.Fatalf(\"unexpected results\\nexp: %s\\ngot: %s\\n\", exp, res)\n\t}\n}\n\n// Ensure the server can query with default databases (via param) and default retention policy\nfunc TestServer_Query_DefaultDBAndRP(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(`cpu value=1.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T01:00:00Z\").UnixNano())},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"default db and rp\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM cpu GROUP BY *`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T01:00:00Z\",1]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"default rp exists\",\n\t\t\tcommand: `show retention policies ON db0`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"name\",\"duration\",\"shardGroupDuration\",\"replicaN\",\"default\"],\"values\":[[\"autogen\",\"0s\",\"168h0m0s\",1,false],[\"rp0\",\"0s\",\"168h0m0s\",1,true]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"default rp\",\n\t\t\tcommand: `SELECT * FROM db0..cpu GROUP BY *`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T01:00:00Z\",1]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"default dp\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM rp0.cpu GROUP BY *`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T01:00:00Z\",1]]}]}]}`,\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can have a database with multiple measurements.\nfunc TestServer_Query_Multiple_Measurements(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\t// Make sure we do writes for measurements that will span across shards\n\twrites := []string{\n\t\tfmt.Sprintf(\"cpu,host=server01 value=100,core=4 %d\", mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(\"cpu1,host=server02 value=50,core=2 %d\", mustParseTime(time.RFC3339Nano, \"2015-01-01T00:00:00Z\").UnixNano()),\n\t}\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"measurement in one shard but not another shouldn't panic server\",\n\t\t\tcommand: `SELECT host,value  FROM db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"host\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",\"server01\",100]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"measurement in one shard but not another shouldn't panic server\",\n\t\t\tcommand: `SELECT host,value  FROM db0.rp0.cpu GROUP BY host`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"host\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",\"server01\",100]]}]}]}`,\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server correctly supports data with identical tag values.\nfunc TestServer_Query_IdenticalTagValues(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\twrites := []string{\n\t\tfmt.Sprintf(\"cpu,t1=val1 value=1 %d\", mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(\"cpu,t2=val2 value=2 %d\", mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:00Z\").UnixNano()),\n\t\tfmt.Sprintf(\"cpu,t1=val2 value=3 %d\", mustParseTime(time.RFC3339Nano, \"2000-01-01T00:02:00Z\").UnixNano()),\n\t}\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"measurements with identical tag values - SELECT *, no GROUP BY\",\n\t\t\tcommand: `SELECT * FROM db0.rp0.cpu GROUP BY *`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"t1\":\"\",\"t2\":\"val2\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:01:00Z\",2]]},{\"name\":\"cpu\",\"tags\":{\"t1\":\"val1\",\"t2\":\"\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",1]]},{\"name\":\"cpu\",\"tags\":{\"t1\":\"val2\",\"t2\":\"\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:02:00Z\",3]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"measurements with identical tag values - SELECT *, with GROUP BY\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu GROUP BY t1,t2`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"t1\":\"\",\"t2\":\"val2\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:01:00Z\",2]]},{\"name\":\"cpu\",\"tags\":{\"t1\":\"val1\",\"t2\":\"\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",1]]},{\"name\":\"cpu\",\"tags\":{\"t1\":\"val2\",\"t2\":\"\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:02:00Z\",3]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"measurements with identical tag values - SELECT value no GROUP BY\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",1],[\"2000-01-01T00:01:00Z\",2],[\"2000-01-01T00:02:00Z\",3]]}]}]}`,\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can handle a query that involves accessing no shards.\nfunc TestServer_Query_NoShards(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tnow := now()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: `cpu,host=server01 value=1 ` + strconv.FormatInt(now.UnixNano(), 10)},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"selecting value should succeed\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu WHERE time < now() - 1d`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can query a non-existent field\nfunc TestServer_Query_NonExistent(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tnow := now()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: `cpu,host=server01 value=1 ` + strconv.FormatInt(now.UnixNano(), 10)},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"selecting value should succeed\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",1]]}]}]}`, now.Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"selecting non-existent should succeed\",\n\t\t\tcommand: `SELECT foo FROM db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can perform basic math\nfunc TestServer_Query_Math(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tnow := now()\n\twrites := []string{\n\t\t\"float value=42 \" + strconv.FormatInt(now.UnixNano(), 10),\n\t\t\"integer value=42i \" + strconv.FormatInt(now.UnixNano(), 10),\n\t}\n\n\ttest := NewTest(\"db\", \"rp\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"SELECT multiple of float value\",\n\t\t\tcommand: `SELECT value * 2 from db.rp.float`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"float\",\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",84]]}]}]}`, now.Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"SELECT multiple of float value\",\n\t\t\tcommand: `SELECT 2 * value from db.rp.float`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"float\",\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",84]]}]}]}`, now.Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"SELECT multiple of integer value\",\n\t\t\tcommand: `SELECT value * 2 from db.rp.integer`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"integer\",\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",84]]}]}]}`, now.Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"SELECT float multiple of integer value\",\n\t\t\tcommand: `SELECT value * 2.0 from db.rp.integer`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"integer\",\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",84]]}]}]}`, now.Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"SELECT square of float value\",\n\t\t\tcommand: `SELECT value * value from db.rp.float`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"float\",\"columns\":[\"time\",\"value_value\"],\"values\":[[\"%s\",1764]]}]}]}`, now.Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"SELECT square of integer value\",\n\t\t\tcommand: `SELECT value * value from db.rp.integer`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"integer\",\"columns\":[\"time\",\"value_value\"],\"values\":[[\"%s\",1764]]}]}]}`, now.Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"SELECT square of integer, float value\",\n\t\t\tcommand: `SELECT value * value,float from db.rp.integer`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"integer\",\"columns\":[\"time\",\"value_value\",\"float\"],\"values\":[[\"%s\",1764,null]]}]}]}`, now.Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"SELECT square of integer value with alias\",\n\t\t\tcommand: `SELECT value * value as square from db.rp.integer`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"integer\",\"columns\":[\"time\",\"square\"],\"values\":[[\"%s\",1764]]}]}]}`, now.Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"SELECT sum of aggregates\",\n\t\t\tcommand: `SELECT max(value) + min(value) from db.rp.integer`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"integer\",\"columns\":[\"time\",\"max_min\"],\"values\":[[\"1970-01-01T00:00:00Z\",84]]}]}]}`),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"SELECT square of enclosed integer value\",\n\t\t\tcommand: `SELECT ((value) * (value)) from db.rp.integer`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"integer\",\"columns\":[\"time\",\"value_value\"],\"values\":[[\"%s\",1764]]}]}]}`, now.Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"SELECT square of enclosed integer value\",\n\t\t\tcommand: `SELECT (value * value) from db.rp.integer`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"integer\",\"columns\":[\"time\",\"value_value\"],\"values\":[[\"%s\",1764]]}]}]}`, now.Format(time.RFC3339Nano)),\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can query with the count aggregate function\nfunc TestServer_Query_Count(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tnow := now()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\twrites := []string{\n\t\t`cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10),\n\t\t`ram value1=1.0,value2=2.0 ` + strconv.FormatInt(now.UnixNano(), 10),\n\t}\n\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\thour_ago := now.Add(-time.Hour).UTC()\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"selecting count(value) should succeed\",\n\t\t\tcommand: `SELECT count(value) FROM db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"count\"],\"values\":[[\"1970-01-01T00:00:00Z\",1]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"selecting count(value) with where time should return result\",\n\t\t\tcommand: fmt.Sprintf(`SELECT count(value) FROM db0.rp0.cpu WHERE time >= '%s'`, hour_ago.Format(time.RFC3339Nano)),\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"count\"],\"values\":[[\"%s\",1]]}]}]}`, hour_ago.Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"selecting count(value) with filter that excludes all results should return 0\",\n\t\t\tcommand: fmt.Sprintf(`SELECT count(value) FROM db0.rp0.cpu WHERE value=100 AND time >= '%s'`, hour_ago.Format(time.RFC3339Nano)),\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"selecting count(value1) with matching filter against value2 should return correct result\",\n\t\t\tcommand: fmt.Sprintf(`SELECT count(value1) FROM db0.rp0.ram WHERE value2=2 AND time >= '%s'`, hour_ago.Format(time.RFC3339Nano)),\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"ram\",\"columns\":[\"time\",\"count\"],\"values\":[[\"%s\",1]]}]}]}`, hour_ago.Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"selecting count(value1) with non-matching filter against value2 should return correct result\",\n\t\t\tcommand: fmt.Sprintf(`SELECT count(value1) FROM db0.rp0.ram WHERE value2=3 AND time >= '%s'`, hour_ago.Format(time.RFC3339Nano)),\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"selecting count(*) should expand the wildcard\",\n\t\t\tcommand: `SELECT count(*) FROM db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"count_value\"],\"values\":[[\"1970-01-01T00:00:00Z\",1]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"selecting count(2) should error\",\n\t\t\tcommand: `SELECT count(2) FROM db0.rp0.cpu`,\n\t\t\texp:     `{\"error\":\"error parsing query: expected field argument in count()\"}`,\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can limit concurrent series.\nfunc TestServer_Query_MaxSelectSeriesN(t *testing.T) {\n\tt.Parallel()\n\tconfig := NewConfig()\n\tconfig.Coordinator.MaxSelectSeriesN = 3\n\ts := OpenServer(config)\n\tdefer s.Close()\n\n\tif _, ok := s.(*RemoteServer); ok {\n\t\tt.Skip(\"Skipping.  Cannot modify MaxSelectSeriesN remotely\")\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: `cpu,host=server01 value=1.0 0`},\n\t\t&Write{data: `cpu,host=server02 value=1.0 0`},\n\t\t&Write{data: `cpu,host=server03 value=1.0 0`},\n\t\t&Write{data: `cpu,host=server04 value=1.0 0`},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"exceeed max series\",\n\t\t\tcommand: `SELECT COUNT(value) FROM db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"max-select-series limit exceeded: (4/3)\"}]}`,\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can query with Now().\nfunc TestServer_Query_Now(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tnow := now()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10)},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"where with time < now() should work\",\n\t\t\tcommand: `SELECT * FROM db0.rp0.cpu where time < now()`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"host\",\"value\"],\"values\":[[\"%s\",\"server01\",1]]}]}]}`, now.Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"where with time < now() and GROUP BY * should work\",\n\t\t\tcommand: `SELECT * FROM db0.rp0.cpu where time < now() GROUP BY *`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",1]]}]}]}`, now.Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"where with time > now() should return an empty result\",\n\t\t\tcommand: `SELECT * FROM db0.rp0.cpu where time > now()`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"where with time > now() with GROUP BY * should return an empty result\",\n\t\t\tcommand: `SELECT * FROM db0.rp0.cpu where time > now() GROUP BY *`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can query with epoch precisions.\nfunc TestServer_Query_EpochPrecision(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tnow := now()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10)},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"nanosecond precision\",\n\t\t\tcommand: `SELECT * FROM db0.rp0.cpu GROUP BY *`,\n\t\t\tparams:  url.Values{\"epoch\": []string{\"n\"}},\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[%d,1]]}]}]}`, now.UnixNano()),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"microsecond precision\",\n\t\t\tcommand: `SELECT * FROM db0.rp0.cpu GROUP BY *`,\n\t\t\tparams:  url.Values{\"epoch\": []string{\"u\"}},\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Microsecond)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"millisecond precision\",\n\t\t\tcommand: `SELECT * FROM db0.rp0.cpu GROUP BY *`,\n\t\t\tparams:  url.Values{\"epoch\": []string{\"ms\"}},\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Millisecond)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"second precision\",\n\t\t\tcommand: `SELECT * FROM db0.rp0.cpu GROUP BY *`,\n\t\t\tparams:  url.Values{\"epoch\": []string{\"s\"}},\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Second)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"minute precision\",\n\t\t\tcommand: `SELECT * FROM db0.rp0.cpu GROUP BY *`,\n\t\t\tparams:  url.Values{\"epoch\": []string{\"m\"}},\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Minute)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"hour precision\",\n\t\t\tcommand: `SELECT * FROM db0.rp0.cpu GROUP BY *`,\n\t\t\tparams:  url.Values{\"epoch\": []string{\"h\"}},\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Hour)),\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server works with tag queries.\nfunc TestServer_Query_Tags(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tnow := now()\n\n\twrites := []string{\n\t\tfmt.Sprintf(\"cpu,host=server01 value=100,core=4 %d\", now.UnixNano()),\n\t\tfmt.Sprintf(\"cpu,host=server02 value=50,core=2 %d\", now.Add(1).UnixNano()),\n\n\t\tfmt.Sprintf(\"cpu1,host=server01,region=us-west value=100 %d\", mustParseTime(time.RFC3339Nano, \"2015-02-28T01:03:36.703820946Z\").UnixNano()),\n\t\tfmt.Sprintf(\"cpu1,host=server02 value=200 %d\", mustParseTime(time.RFC3339Nano, \"2010-02-28T01:03:37.703820946Z\").UnixNano()),\n\t\tfmt.Sprintf(\"cpu1,host=server03 value=300 %d\", mustParseTime(time.RFC3339Nano, \"2012-02-28T01:03:38.703820946Z\").UnixNano()),\n\n\t\tfmt.Sprintf(\"cpu2,host=server01 value=100 %d\", mustParseTime(time.RFC3339Nano, \"2015-02-28T01:03:36.703820946Z\").UnixNano()),\n\t\tfmt.Sprintf(\"cpu2 value=200 %d\", mustParseTime(time.RFC3339Nano, \"2012-02-28T01:03:38.703820946Z\").UnixNano()),\n\n\t\tfmt.Sprintf(\"cpu3,company=acme01 value=100 %d\", mustParseTime(time.RFC3339Nano, \"2015-02-28T01:03:36.703820946Z\").UnixNano()),\n\t\tfmt.Sprintf(\"cpu3 value=200 %d\", mustParseTime(time.RFC3339Nano, \"2012-02-28T01:03:38.703820946Z\").UnixNano()),\n\n\t\tfmt.Sprintf(\"status_code,url=http://www.example.com value=404 %d\", mustParseTime(time.RFC3339Nano, \"2015-07-22T08:13:54.929026672Z\").UnixNano()),\n\t\tfmt.Sprintf(\"status_code,url=https://influxdb.com value=418 %d\", mustParseTime(time.RFC3339Nano, \"2015-07-22T09:52:24.914395083Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"tag without field should return error\",\n\t\t\tcommand: `SELECT host FROM db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"statement must have at least one field in select clause\"}]}`,\n\t\t\tskip:    true, // FIXME(benbjohnson): tags should stream as values\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"field with tag should succeed\",\n\t\t\tcommand: `SELECT host, value FROM db0.rp0.cpu`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"host\",\"value\"],\"values\":[[\"%s\",\"server01\",100],[\"%s\",\"server02\",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"field with tag and GROUP BY should succeed\",\n\t\t\tcommand: `SELECT host, value FROM db0.rp0.cpu GROUP BY host`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"host\",\"value\"],\"values\":[[\"%s\",\"server01\",100]]},{\"name\":\"cpu\",\"tags\":{\"host\":\"server02\"},\"columns\":[\"time\",\"host\",\"value\"],\"values\":[[\"%s\",\"server02\",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"field with two tags should succeed\",\n\t\t\tcommand: `SELECT host, value, core FROM db0.rp0.cpu`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"host\",\"value\",\"core\"],\"values\":[[\"%s\",\"server01\",100,4],[\"%s\",\"server02\",50,2]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"field with two tags and GROUP BY should succeed\",\n\t\t\tcommand: `SELECT host, value, core FROM db0.rp0.cpu GROUP BY host`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"host\",\"value\",\"core\"],\"values\":[[\"%s\",\"server01\",100,4]]},{\"name\":\"cpu\",\"tags\":{\"host\":\"server02\"},\"columns\":[\"time\",\"host\",\"value\",\"core\"],\"values\":[[\"%s\",\"server02\",50,2]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"select * with tags should succeed\",\n\t\t\tcommand: `SELECT * FROM db0.rp0.cpu`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"core\",\"host\",\"value\"],\"values\":[[\"%s\",4,\"server01\",100],[\"%s\",2,\"server02\",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"select * with tags with GROUP BY * should succeed\",\n\t\t\tcommand: `SELECT * FROM db0.rp0.cpu GROUP BY *`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"core\",\"value\"],\"values\":[[\"%s\",4,100]]},{\"name\":\"cpu\",\"tags\":{\"host\":\"server02\"},\"columns\":[\"time\",\"core\",\"value\"],\"values\":[[\"%s\",2,50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"group by tag\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu GROUP by host`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",100]]},{\"name\":\"cpu\",\"tags\":{\"host\":\"server02\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"single field (EQ tag value1)\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu1\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",100]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"single field (2 EQ tags)\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' AND region = 'us-west'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu1\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",100]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"single field (OR different tags)\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server03' OR region = 'us-west'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu1\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2012-02-28T01:03:38.703820946Z\",300],[\"2015-02-28T01:03:36.703820946Z\",100]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"single field (OR with non-existent tag value)\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' OR host = 'server66'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu1\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",100]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"single field (OR with all tag values)\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' OR host = 'server02' OR host = 'server03'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu1\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2010-02-28T01:03:37.703820946Z\",200],[\"2012-02-28T01:03:38.703820946Z\",300],[\"2015-02-28T01:03:36.703820946Z\",100]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"single field (1 EQ and 1 NEQ tag)\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' AND region != 'us-west'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"single field (EQ tag value2)\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server02'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu1\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2010-02-28T01:03:37.703820946Z\",200]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"single field (NEQ tag value1)\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu1\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2010-02-28T01:03:37.703820946Z\",200],[\"2012-02-28T01:03:38.703820946Z\",300]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"single field (NEQ tag value1 AND NEQ tag value2)\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' AND host != 'server02'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu1\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2012-02-28T01:03:38.703820946Z\",300]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"single field (NEQ tag value1 OR NEQ tag value2)\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' OR host != 'server02'`, // Yes, this is always true, but that's the point.\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu1\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2010-02-28T01:03:37.703820946Z\",200],[\"2012-02-28T01:03:38.703820946Z\",300],[\"2015-02-28T01:03:36.703820946Z\",100]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"single field (NEQ tag value1 AND NEQ tag value2 AND NEQ tag value3)\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' AND host != 'server02' AND host != 'server03'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"single field (NEQ tag value1, point without any tags)\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu2 WHERE host != 'server01'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu2\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2012-02-28T01:03:38.703820946Z\",200]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"single field (NEQ tag value1, point without any tags)\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu3 WHERE company !~ /acme01/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu3\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2012-02-28T01:03:38.703820946Z\",200]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"single field (regex tag match)\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu3 WHERE company =~ /acme01/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu3\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",100]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"single field (regex tag match)\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu3 WHERE company !~ /acme[23]/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu3\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2012-02-28T01:03:38.703820946Z\",200],[\"2015-02-28T01:03:36.703820946Z\",100]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"single field (regex tag match with escaping)\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.status_code WHERE url !~ /https\\:\\/\\/influxdb\\.com/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"status_code\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2015-07-22T08:13:54.929026672Z\",404]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"single field (regex tag match with escaping)\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.status_code WHERE url =~ /https\\:\\/\\/influxdb\\.com/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"status_code\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2015-07-22T09:52:24.914395083Z\",418]]}]}]}`,\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server correctly queries with an alias.\nfunc TestServer_Query_Alias(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\twrites := []string{\n\t\tfmt.Sprintf(\"cpu value=1i,steps=3i %d\", mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(\"cpu value=2i,steps=4i %d\", mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:00Z\").UnixNano()),\n\t}\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"baseline query - SELECT * FROM db0.rp0.cpu\",\n\t\t\tcommand: `SELECT * FROM db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"steps\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",3,1],[\"2000-01-01T00:01:00Z\",4,2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"basic query with alias - SELECT steps, value as v FROM db0.rp0.cpu\",\n\t\t\tcommand: `SELECT steps, value as v FROM db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"steps\",\"v\"],\"values\":[[\"2000-01-01T00:00:00Z\",3,1],[\"2000-01-01T00:01:00Z\",4,2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"double aggregate sum - SELECT sum(value), sum(steps) FROM db0.rp0.cpu\",\n\t\t\tcommand: `SELECT sum(value), sum(steps) FROM db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"sum\",\"sum_1\"],\"values\":[[\"1970-01-01T00:00:00Z\",3,7]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"double aggregate sum reverse order - SELECT sum(steps), sum(value) FROM db0.rp0.cpu\",\n\t\t\tcommand: `SELECT sum(steps), sum(value) FROM db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"sum\",\"sum_1\"],\"values\":[[\"1970-01-01T00:00:00Z\",7,3]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"double aggregate sum with alias - SELECT sum(value) as sumv, sum(steps) as sums FROM db0.rp0.cpu\",\n\t\t\tcommand: `SELECT sum(value) as sumv, sum(steps) as sums FROM db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"sumv\",\"sums\"],\"values\":[[\"1970-01-01T00:00:00Z\",3,7]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"double aggregate with same value - SELECT sum(value), mean(value) FROM db0.rp0.cpu\",\n\t\t\tcommand: `SELECT sum(value), mean(value) FROM db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"sum\",\"mean\"],\"values\":[[\"1970-01-01T00:00:00Z\",3,1.5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"double aggregate with same value and same alias - SELECT mean(value) as mv, max(value) as mv FROM db0.rp0.cpu\",\n\t\t\tcommand: `SELECT mean(value) as mv, max(value) as mv FROM db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"mv\",\"mv\"],\"values\":[[\"1970-01-01T00:00:00Z\",1.5,2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"double aggregate with non-existent field - SELECT mean(value), max(foo) FROM db0.rp0.cpu\",\n\t\t\tcommand: `SELECT mean(value), max(foo) FROM db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"mean\",\"max\"],\"values\":[[\"1970-01-01T00:00:00Z\",1.5,null]]}]}]}`,\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server will succeed and error for common scenarios.\nfunc TestServer_Query_Common(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tnow := now()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(\"cpu,host=server01 value=1 %s\", strconv.FormatInt(now.UnixNano(), 10))},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"selecting a from a non-existent database should error\",\n\t\t\tcommand: `SELECT value FROM db1.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"database not found: db1\"}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"selecting a from a non-existent retention policy should error\",\n\t\t\tcommand: `SELECT value FROM db0.rp1.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"retention policy not found: rp1\"}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"selecting a valid  measurement and field should succeed\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",1]]}]}]}`, now.Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"explicitly selecting time and a valid measurement and field should succeed\",\n\t\t\tcommand: `SELECT time,value FROM db0.rp0.cpu`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",1]]}]}]}`, now.Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"selecting a measurement that doesn't exist should result in empty set\",\n\t\t\tcommand: `SELECT value FROM db0.rp0.idontexist`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"selecting a field that doesn't exist should result in empty set\",\n\t\t\tcommand: `SELECT idontexist FROM db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"selecting wildcard without specifying a database should error\",\n\t\t\tcommand: `SELECT * FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"database name required\"}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"selecting explicit field without specifying a database should error\",\n\t\t\tcommand: `SELECT value FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"database name required\"}]}`,\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can query two points.\nfunc TestServer_Query_SelectTwoPoints(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tnow := now()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(\"cpu value=100 %s\\ncpu value=200 %s\", strconv.FormatInt(now.UnixNano(), 10), strconv.FormatInt(now.Add(1).UnixNano(), 10))},\n\t}\n\n\ttest.addQueries(\n\t\t&Query{\n\t\t\tname:    \"selecting two points should result in two points\",\n\t\t\tcommand: `SELECT * FROM db0.rp0.cpu`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",100],[\"%s\",200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"selecting two points with GROUP BY * should result in two points\",\n\t\t\tcommand: `SELECT * FROM db0.rp0.cpu GROUP BY *`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",100],[\"%s\",200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)),\n\t\t},\n\t)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can query two negative points.\nfunc TestServer_Query_SelectTwoNegativePoints(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tnow := now()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(\"cpu value=-100 %s\\ncpu value=-200 %s\", strconv.FormatInt(now.UnixNano(), 10), strconv.FormatInt(now.Add(1).UnixNano(), 10))},\n\t}\n\n\ttest.addQueries(&Query{\n\t\tname:    \"selecting two negative points should succeed\",\n\t\tcommand: `SELECT * FROM db0.rp0.cpu`,\n\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",-100],[\"%s\",-200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)),\n\t})\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can query with relative time.\nfunc TestServer_Query_SelectRelativeTime(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tnow := now()\n\tyesterday := yesterday()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(\"cpu,host=server01 value=100 %s\\ncpu,host=server01 value=200 %s\", strconv.FormatInt(yesterday.UnixNano(), 10), strconv.FormatInt(now.UnixNano(), 10))},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"single point with time pre-calculated for past time queries yesterday\",\n\t\t\tcommand: `SELECT * FROM db0.rp0.cpu where time >= '` + yesterday.Add(-1*time.Minute).Format(time.RFC3339Nano) + `' GROUP BY *`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",100],[\"%s\",200]]}]}]}`, yesterday.Format(time.RFC3339Nano), now.Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"single point with time pre-calculated for relative time queries now\",\n\t\t\tcommand: `SELECT * FROM db0.rp0.cpu where time >= now() - 1m GROUP BY *`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",200]]}]}]}`, now.Format(time.RFC3339Nano)),\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can handle various simple derivative queries.\nfunc TestServer_Query_SelectRawDerivative(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(\"cpu value=210 1278010021000000000\\ncpu value=10 1278010022000000000\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"calculate single derivate\",\n\t\t\tcommand: `SELECT derivative(value) from db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",-200]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivate with unit\",\n\t\t\tcommand: `SELECT derivative(value, 10s) from db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",-2000]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can handle various simple non_negative_derivative queries.\nfunc TestServer_Query_SelectRawNonNegativeDerivative(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(`cpu value=10 1278010021000000000\ncpu value=15 1278010022000000000\ncpu value=10 1278010023000000000\ncpu value=20 1278010024000000000\n`)},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"calculate single non_negative_derivative\",\n\t\t\tcommand: `SELECT non_negative_derivative(value) from db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"non_negative_derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",5],[\"2010-07-01T18:47:04Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate single non_negative_derivative\",\n\t\t\tcommand: `SELECT non_negative_derivative(value, 10s) from db0.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"non_negative_derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",50],[\"2010-07-01T18:47:04Z\",100]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can handle various group by time derivative queries.\nfunc TestServer_Query_SelectGroupByTimeDerivative(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000\ncpu value=15 1278010021000000000\ncpu value=20 1278010022000000000\ncpu value=25 1278010023000000000\n\ncpu0,host=server01 ticks=10,total=100 1278010020000000000\ncpu0,host=server01 ticks=30,total=100 1278010021000000000\ncpu0,host=server01 ticks=32,total=100 1278010022000000000\ncpu0,host=server01 ticks=47,total=100 1278010023000000000\ncpu0,host=server02 ticks=40,total=100 1278010020000000000\ncpu0,host=server02 ticks=45,total=100 1278010021000000000\ncpu0,host=server02 ticks=84,total=100 1278010022000000000\ncpu0,host=server02 ticks=101,total=100 1278010023000000000\n`)},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of count with unit default (2s) group by time\",\n\t\t\tcommand: `SELECT derivative(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",2],[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of count with unit 4s group by time\",\n\t\t\tcommand: `SELECT derivative(count(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",4],[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of mean with unit default (2s) group by time\",\n\t\t\tcommand: `SELECT derivative(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of mean with unit 4s group by time\",\n\t\t\tcommand: `SELECT derivative(mean(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of median with unit default (2s) group by time\",\n\t\t\tcommand: `SELECT derivative(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of median with unit 4s group by time\",\n\t\t\tcommand: `SELECT derivative(median(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of mode with unit default (2s) group by time\",\n\t\t\tcommand: `SELECT derivative(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of mode with unit 4s group by time\",\n\t\t\tcommand: `SELECT derivative(mode(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",20]]}]}]}`,\n\t\t},\n\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of sum with unit default (2s) group by time\",\n\t\t\tcommand: `SELECT derivative(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of sum with unit 4s group by time\",\n\t\t\tcommand: `SELECT derivative(sum(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",40]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of first with unit default (2s) group by time\",\n\t\t\tcommand: `SELECT derivative(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of first with unit 4s group by time\",\n\t\t\tcommand: `SELECT derivative(first(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of last with unit default (2s) group by time\",\n\t\t\tcommand: `SELECT derivative(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of last with unit 4s group by time\",\n\t\t\tcommand: `SELECT derivative(last(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of min with unit default (2s) group by time\",\n\t\t\tcommand: `SELECT derivative(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of min with unit 4s group by time\",\n\t\t\tcommand: `SELECT derivative(min(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of max with unit default (2s) group by time\",\n\t\t\tcommand: `SELECT derivative(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of max with unit 4s group by time\",\n\t\t\tcommand: `SELECT derivative(max(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of percentile with unit default (2s) group by time\",\n\t\t\tcommand: `SELECT derivative(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of percentile with unit 4s group by time\",\n\t\t\tcommand: `SELECT derivative(percentile(value, 50), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of ticks divided by aggregate\",\n\t\t\tcommand: `SELECT non_negative_derivative(mean(ticks), 1s) / last(total) * 100 AS usage FROM db0.rp0.cpu0 WHERE time >= '2010-07-01 18:47:00' AND time <= '2010-07-01 18:47:03' GROUP BY host, time(1s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu0\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"usage\"],\"values\":[[\"2010-07-01T18:47:00Z\",null],[\"2010-07-01T18:47:01Z\",20],[\"2010-07-01T18:47:02Z\",2],[\"2010-07-01T18:47:03Z\",15]]},{\"name\":\"cpu0\",\"tags\":{\"host\":\"server02\"},\"columns\":[\"time\",\"usage\"],\"values\":[[\"2010-07-01T18:47:00Z\",null],[\"2010-07-01T18:47:01Z\",5],[\"2010-07-01T18:47:02Z\",39],[\"2010-07-01T18:47:03Z\",17]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can handle various group by time derivative queries.\nfunc TestServer_Query_SelectGroupByTimeDerivativeWithFill(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000\ncpu value=20 1278010021000000000\n`)},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of count with unit default (2s) group by time with fill 0\",\n\t\t\tcommand: `SELECT derivative(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",2],[\"2010-07-01T18:47:02Z\",-2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of count with unit 4s group by time with fill  0\",\n\t\t\tcommand: `SELECT derivative(count(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",4],[\"2010-07-01T18:47:02Z\",-4]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of count with unit default (2s) group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of count with unit 4s group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(count(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of mean with unit default (2s) group by time with fill 0\",\n\t\t\tcommand: `SELECT derivative(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",15],[\"2010-07-01T18:47:02Z\",-15]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of mean with unit 4s group by time with fill 0\",\n\t\t\tcommand: `SELECT derivative(mean(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",30],[\"2010-07-01T18:47:02Z\",-30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of mean with unit default (2s) group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of mean with unit 4s group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(mean(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of median with unit default (2s) group by time with fill 0\",\n\t\t\tcommand: `SELECT derivative(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",15],[\"2010-07-01T18:47:02Z\",-15]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of median with unit 4s group by time with fill 0\",\n\t\t\tcommand: `SELECT derivative(median(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",30],[\"2010-07-01T18:47:02Z\",-30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of median with unit default (2s) group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of median with unit 4s group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(median(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of mode with unit default (2s) group by time with fill 0\",\n\t\t\tcommand: `SELECT derivative(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",-10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of mode with unit 4s group by time with fill 0\",\n\t\t\tcommand: `SELECT derivative(mode(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",20],[\"2010-07-01T18:47:02Z\",-20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of mode with unit default (2s) group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of mode with unit 4s group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(mode(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of sum with unit default (2s) group by time with fill 0\",\n\t\t\tcommand: `SELECT derivative(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",30],[\"2010-07-01T18:47:02Z\",-30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of sum with unit 4s group by time with fill 0\",\n\t\t\tcommand: `SELECT derivative(sum(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",60],[\"2010-07-01T18:47:02Z\",-60]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of sum with unit default (2s) group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of sum with unit 4s group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(sum(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of first with unit default (2s) group by time with fill 0\",\n\t\t\tcommand: `SELECT derivative(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",-10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of first with unit 4s group by time with fill 0\",\n\t\t\tcommand: `SELECT derivative(first(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",20],[\"2010-07-01T18:47:02Z\",-20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of first with unit default (2s) group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of first with unit 4s group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(first(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of last with unit default (2s) group by time with fill 0\",\n\t\t\tcommand: `SELECT derivative(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",20],[\"2010-07-01T18:47:02Z\",-20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of last with unit 4s group by time with fill 0\",\n\t\t\tcommand: `SELECT derivative(last(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",40],[\"2010-07-01T18:47:02Z\",-40]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of last with unit default (2s) group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of last with unit 4s group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(last(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of min with unit default (2s) group by time with fill 0\",\n\t\t\tcommand: `SELECT derivative(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",-10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of min with unit 4s group by time with fill 0\",\n\t\t\tcommand: `SELECT derivative(min(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",20],[\"2010-07-01T18:47:02Z\",-20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of min with unit default (2s) group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of min with unit 4s group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(min(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of max with unit default (2s) group by time with fill 0\",\n\t\t\tcommand: `SELECT derivative(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",20],[\"2010-07-01T18:47:02Z\",-20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of max with unit 4s group by time with fill 0\",\n\t\t\tcommand: `SELECT derivative(max(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",40],[\"2010-07-01T18:47:02Z\",-40]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of max with unit default (2s) group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of max with unit 4s group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(max(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of percentile with unit default (2s) group by time with fill 0\",\n\t\t\tcommand: `SELECT derivative(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",-10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of percentile with unit 4s group by time with fill 0\",\n\t\t\tcommand: `SELECT derivative(percentile(value, 50), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:00Z\",20],[\"2010-07-01T18:47:02Z\",-20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of percentile with unit default (2s) group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate derivative of percentile with unit 4s group by time with fill previous\",\n\t\t\tcommand: `SELECT derivative(percentile(value, 50), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can handle various group by time difference queries.\nfunc TestServer_Query_SelectGroupByTimeDifference(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000\ncpu value=15 1278010021000000000\ncpu value=20 1278010022000000000\ncpu value=25 1278010023000000000\n`)},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"calculate difference of count\",\n\t\t\tcommand: `SELECT difference(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:00Z\",2],[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of mean\",\n\t\t\tcommand: `SELECT difference(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of median\",\n\t\t\tcommand: `SELECT difference(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of mode\",\n\t\t\tcommand: `SELECT difference(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of sum\",\n\t\t\tcommand: `SELECT difference(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:02Z\",20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of first\",\n\t\t\tcommand: `SELECT difference(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of last\",\n\t\t\tcommand: `SELECT difference(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of min\",\n\t\t\tcommand: `SELECT difference(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of max\",\n\t\t\tcommand: `SELECT difference(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of percentile\",\n\t\t\tcommand: `SELECT difference(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can handle various group by time difference queries with fill.\nfunc TestServer_Query_SelectGroupByTimeDifferenceWithFill(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000\ncpu value=20 1278010021000000000\n`)},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"calculate difference of count with fill 0\",\n\t\t\tcommand: `SELECT difference(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:00Z\",2],[\"2010-07-01T18:47:02Z\",-2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of count with fill previous\",\n\t\t\tcommand: `SELECT difference(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of mean with fill 0\",\n\t\t\tcommand: `SELECT difference(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:00Z\",15],[\"2010-07-01T18:47:02Z\",-15]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of mean with fill previous\",\n\t\t\tcommand: `SELECT difference(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of median with fill 0\",\n\t\t\tcommand: `SELECT difference(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:00Z\",15],[\"2010-07-01T18:47:02Z\",-15]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of median with fill previous\",\n\t\t\tcommand: `SELECT difference(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of mode with fill 0\",\n\t\t\tcommand: `SELECT difference(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",-10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of mode with fill previous\",\n\t\t\tcommand: `SELECT difference(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of sum with fill 0\",\n\t\t\tcommand: `SELECT difference(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:00Z\",30],[\"2010-07-01T18:47:02Z\",-30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of sum with fill previous\",\n\t\t\tcommand: `SELECT difference(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of first with fill 0\",\n\t\t\tcommand: `SELECT difference(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",-10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of first with fill previous\",\n\t\t\tcommand: `SELECT difference(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of last with fill 0\",\n\t\t\tcommand: `SELECT difference(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:00Z\",20],[\"2010-07-01T18:47:02Z\",-20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of last with fill previous\",\n\t\t\tcommand: `SELECT difference(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of min with fill 0\",\n\t\t\tcommand: `SELECT difference(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",-10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of min with fill previous\",\n\t\t\tcommand: `SELECT difference(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of max with fill 0\",\n\t\t\tcommand: `SELECT difference(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:00Z\",20],[\"2010-07-01T18:47:02Z\",-20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of max with fill previous\",\n\t\t\tcommand: `SELECT difference(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of percentile with fill 0\",\n\t\t\tcommand: `SELECT difference(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",-10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate difference of percentile with fill previous\",\n\t\t\tcommand: `SELECT difference(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-07-01T18:47:02Z\",0]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can handle various group by time moving average queries.\nfunc TestServer_Query_SelectGroupByTimeMovingAverage(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000\ncpu value=15 1278010021000000000\ncpu value=20 1278010022000000000\ncpu value=25 1278010023000000000\ncpu value=30 1278010024000000000\ncpu value=35 1278010025000000000\n`)},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of count\",\n\t\t\tcommand: `SELECT moving_average(count(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:00Z\",1],[\"2010-07-01T18:47:02Z\",2],[\"2010-07-01T18:47:04Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of mean\",\n\t\t\tcommand: `SELECT moving_average(mean(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:02Z\",17.5],[\"2010-07-01T18:47:04Z\",27.5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of median\",\n\t\t\tcommand: `SELECT moving_average(median(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:02Z\",17.5],[\"2010-07-01T18:47:04Z\",27.5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of mode\",\n\t\t\tcommand: `SELECT moving_average(mode(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:02Z\",15],[\"2010-07-01T18:47:04Z\",25]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of sum\",\n\t\t\tcommand: `SELECT moving_average(sum(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:02Z\",35],[\"2010-07-01T18:47:04Z\",55]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of first\",\n\t\t\tcommand: `SELECT moving_average(first(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:02Z\",15],[\"2010-07-01T18:47:04Z\",25]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of last\",\n\t\t\tcommand: `SELECT moving_average(last(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:02Z\",20],[\"2010-07-01T18:47:04Z\",30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of min\",\n\t\t\tcommand: `SELECT moving_average(min(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:02Z\",15],[\"2010-07-01T18:47:04Z\",25]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of max\",\n\t\t\tcommand: `SELECT moving_average(max(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:02Z\",20],[\"2010-07-01T18:47:04Z\",30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of percentile\",\n\t\t\tcommand: `SELECT moving_average(percentile(value, 50), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:02Z\",15],[\"2010-07-01T18:47:04Z\",25]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can handle various group by time moving average queries.\nfunc TestServer_Query_SelectGroupByTimeMovingAverageWithFill(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000\ncpu value=15 1278010021000000000\ncpu value=30 1278010024000000000\ncpu value=35 1278010025000000000\n`)},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of count with fill 0\",\n\t\t\tcommand: `SELECT moving_average(count(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:00Z\",1],[\"2010-07-01T18:47:02Z\",1],[\"2010-07-01T18:47:04Z\",1]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of count with fill previous\",\n\t\t\tcommand: `SELECT moving_average(count(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:02Z\",2],[\"2010-07-01T18:47:04Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of mean with fill 0\",\n\t\t\tcommand: `SELECT moving_average(mean(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:00Z\",6.25],[\"2010-07-01T18:47:02Z\",6.25],[\"2010-07-01T18:47:04Z\",16.25]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of mean with fill previous\",\n\t\t\tcommand: `SELECT moving_average(mean(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:02Z\",12.5],[\"2010-07-01T18:47:04Z\",22.5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of median with fill 0\",\n\t\t\tcommand: `SELECT moving_average(median(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:00Z\",6.25],[\"2010-07-01T18:47:02Z\",6.25],[\"2010-07-01T18:47:04Z\",16.25]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of median with fill previous\",\n\t\t\tcommand: `SELECT moving_average(median(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:02Z\",12.5],[\"2010-07-01T18:47:04Z\",22.5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of mode with fill 0\",\n\t\t\tcommand: `SELECT moving_average(mode(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:00Z\",5],[\"2010-07-01T18:47:02Z\",5],[\"2010-07-01T18:47:04Z\",15]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of mode with fill previous\",\n\t\t\tcommand: `SELECT moving_average(mode(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:02Z\",10],[\"2010-07-01T18:47:04Z\",20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of sum with fill 0\",\n\t\t\tcommand: `SELECT moving_average(sum(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:00Z\",12.5],[\"2010-07-01T18:47:02Z\",12.5],[\"2010-07-01T18:47:04Z\",32.5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of sum with fill previous\",\n\t\t\tcommand: `SELECT moving_average(sum(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:02Z\",25],[\"2010-07-01T18:47:04Z\",45]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of first with fill 0\",\n\t\t\tcommand: `SELECT moving_average(first(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:00Z\",5],[\"2010-07-01T18:47:02Z\",5],[\"2010-07-01T18:47:04Z\",15]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of first with fill previous\",\n\t\t\tcommand: `SELECT moving_average(first(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:02Z\",10],[\"2010-07-01T18:47:04Z\",20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of last with fill 0\",\n\t\t\tcommand: `SELECT moving_average(last(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:00Z\",7.5],[\"2010-07-01T18:47:02Z\",7.5],[\"2010-07-01T18:47:04Z\",17.5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of last with fill previous\",\n\t\t\tcommand: `SELECT moving_average(last(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:02Z\",15],[\"2010-07-01T18:47:04Z\",25]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of min with fill 0\",\n\t\t\tcommand: `SELECT moving_average(min(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:00Z\",5],[\"2010-07-01T18:47:02Z\",5],[\"2010-07-01T18:47:04Z\",15]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of min with fill previous\",\n\t\t\tcommand: `SELECT moving_average(min(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:02Z\",10],[\"2010-07-01T18:47:04Z\",20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of max with fill 0\",\n\t\t\tcommand: `SELECT moving_average(max(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:00Z\",7.5],[\"2010-07-01T18:47:02Z\",7.5],[\"2010-07-01T18:47:04Z\",17.5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of max with fill previous\",\n\t\t\tcommand: `SELECT moving_average(max(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:02Z\",15],[\"2010-07-01T18:47:04Z\",25]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of percentile with fill 0\",\n\t\t\tcommand: `SELECT moving_average(percentile(value, 50), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:00Z\",5],[\"2010-07-01T18:47:02Z\",5],[\"2010-07-01T18:47:04Z\",15]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate moving average of percentile with fill previous\",\n\t\t\tcommand: `SELECT moving_average(percentile(value, 50), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"moving_average\"],\"values\":[[\"2010-07-01T18:47:02Z\",10],[\"2010-07-01T18:47:04Z\",20]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can handle various group by time cumulative sum queries.\nfunc TestServer_Query_SelectGroupByTimeCumulativeSum(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000\ncpu value=15 1278010021000000000\ncpu value=20 1278010022000000000\ncpu value=25 1278010023000000000\n`)},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of count\",\n\t\t\tcommand: `SELECT cumulative_sum(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",2],[\"2010-07-01T18:47:02Z\",4]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of mean\",\n\t\t\tcommand: `SELECT cumulative_sum(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",12.5],[\"2010-07-01T18:47:02Z\",35]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of median\",\n\t\t\tcommand: `SELECT cumulative_sum(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",12.5],[\"2010-07-01T18:47:02Z\",35]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of mode\",\n\t\t\tcommand: `SELECT cumulative_sum(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of sum\",\n\t\t\tcommand: `SELECT cumulative_sum(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",25],[\"2010-07-01T18:47:02Z\",70]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of first\",\n\t\t\tcommand: `SELECT cumulative_sum(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of last\",\n\t\t\tcommand: `SELECT cumulative_sum(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",15],[\"2010-07-01T18:47:02Z\",40]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of min\",\n\t\t\tcommand: `SELECT cumulative_sum(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of max\",\n\t\t\tcommand: `SELECT cumulative_sum(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",15],[\"2010-07-01T18:47:02Z\",40]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of percentile\",\n\t\t\tcommand: `SELECT cumulative_sum(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",30]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure the server can handle various group by time cumulative sum queries with fill.\nfunc TestServer_Query_SelectGroupByTimeCumulativeSumWithFill(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(`cpu value=10 1278010020000000000\ncpu value=20 1278010021000000000\n`)},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of count with fill 0\",\n\t\t\tcommand: `SELECT cumulative_sum(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",2],[\"2010-07-01T18:47:02Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of count with fill previous\",\n\t\t\tcommand: `SELECT cumulative_sum(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",2],[\"2010-07-01T18:47:02Z\",4]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of mean with fill 0\",\n\t\t\tcommand: `SELECT cumulative_sum(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",15],[\"2010-07-01T18:47:02Z\",15]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of mean with fill previous\",\n\t\t\tcommand: `SELECT cumulative_sum(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",15],[\"2010-07-01T18:47:02Z\",30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of median with fill 0\",\n\t\t\tcommand: `SELECT cumulative_sum(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",15],[\"2010-07-01T18:47:02Z\",15]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of median with fill previous\",\n\t\t\tcommand: `SELECT cumulative_sum(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",15],[\"2010-07-01T18:47:02Z\",30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of mode with fill 0\",\n\t\t\tcommand: `SELECT cumulative_sum(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of mode with fill previous\",\n\t\t\tcommand: `SELECT cumulative_sum(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of sum with fill 0\",\n\t\t\tcommand: `SELECT cumulative_sum(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",30],[\"2010-07-01T18:47:02Z\",30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of sum with fill previous\",\n\t\t\tcommand: `SELECT cumulative_sum(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",30],[\"2010-07-01T18:47:02Z\",60]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of first with fill 0\",\n\t\t\tcommand: `SELECT cumulative_sum(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of first with fill previous\",\n\t\t\tcommand: `SELECT cumulative_sum(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of last with fill 0\",\n\t\t\tcommand: `SELECT cumulative_sum(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",20],[\"2010-07-01T18:47:02Z\",20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of last with fill previous\",\n\t\t\tcommand: `SELECT cumulative_sum(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",20],[\"2010-07-01T18:47:02Z\",40]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of min with fill 0\",\n\t\t\tcommand: `SELECT cumulative_sum(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of min with fill previous\",\n\t\t\tcommand: `SELECT cumulative_sum(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of max with fill 0\",\n\t\t\tcommand: `SELECT cumulative_sum(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",20],[\"2010-07-01T18:47:02Z\",20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of max with fill previous\",\n\t\t\tcommand: `SELECT cumulative_sum(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",20],[\"2010-07-01T18:47:02Z\",40]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of percentile with fill 0\",\n\t\t\tcommand: `SELECT cumulative_sum(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",10]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"calculate cumulative sum of percentile with fill previous\",\n\t\t\tcommand: `SELECT cumulative_sum(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-07-01T18:47:00Z\",10],[\"2010-07-01T18:47:02Z\",20]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_CumulativeCount(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(`events signup=t 1005832000\nevents signup=t 1048283000\nevents signup=t 1784832000\nevents signup=t 2000000000\nevents signup=t 3084890000\nevents signup=t 3838400000\n`)},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"cumulative count\",\n\t\t\tcommand: `SELECT cumulative_sum(count(signup)) from db0.rp0.events where time >= 1s and time < 4s group by time(1s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"events\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"1970-01-01T00:00:01Z\",3],[\"1970-01-01T00:00:02Z\",4],[\"1970-01-01T00:00:03Z\",6]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_SelectGroupByTime_MultipleAggregates(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(`test,t=a x=1i 1000000000\ntest,t=b y=1i 1000000000\ntest,t=a x=2i 2000000000\ntest,t=b y=2i 2000000000\ntest,t=a x=3i 3000000000\ntest,t=b y=3i 3000000000\n`)},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"two aggregates with a group by host\",\n\t\t\tcommand: `SELECT mean(x) as x, mean(y) as y from db0.rp0.test where time >= 1s and time < 4s group by t, time(1s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"test\",\"tags\":{\"t\":\"a\"},\"columns\":[\"time\",\"x\",\"y\"],\"values\":[[\"1970-01-01T00:00:01Z\",1,null],[\"1970-01-01T00:00:02Z\",2,null],[\"1970-01-01T00:00:03Z\",3,null]]},{\"name\":\"test\",\"tags\":{\"t\":\"b\"},\"columns\":[\"time\",\"x\",\"y\"],\"values\":[[\"1970-01-01T00:00:01Z\",null,1],[\"1970-01-01T00:00:02Z\",null,2],[\"1970-01-01T00:00:03Z\",null,3]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_MathWithFill(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(`cpu value=15 1278010020000000000\n`)},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"multiplication with fill previous\",\n\t\t\tcommand: `SELECT 4*mean(value) FROM db0.rp0.cpu WHERE time >= '2010-07-01 18:47:00' AND time < '2010-07-01 18:48:30' GROUP BY time(30s) FILL(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"2010-07-01T18:47:00Z\",60],[\"2010-07-01T18:47:30Z\",60],[\"2010-07-01T18:48:00Z\",60]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"multiplication of mode value with fill previous\",\n\t\t\tcommand: `SELECT 4*mode(value) FROM db0.rp0.cpu WHERE time >= '2010-07-01 18:47:00' AND time < '2010-07-01 18:48:30' GROUP BY time(30s) FILL(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"mode\"],\"values\":[[\"2010-07-01T18:47:00Z\",60],[\"2010-07-01T18:47:30Z\",60],[\"2010-07-01T18:48:00Z\",60]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// mergeMany ensures that when merging many series together and some of them have a different number\n// of points than others in a group by interval the results are correct\nfunc TestServer_Query_MergeMany(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\t// set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\n\twrites := []string{}\n\tfor i := 1; i < 11; i++ {\n\t\tfor j := 1; j < 5+i%3; j++ {\n\t\t\tdata := fmt.Sprintf(`cpu,host=server_%d value=22 %d`, i, time.Unix(int64(j), int64(0)).UTC().UnixNano())\n\t\t\twrites = append(writes, data)\n\t\t}\n\t}\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"GROUP by time\",\n\t\t\tcommand: `SELECT count(value) FROM db0.rp0.cpu WHERE time >= '1970-01-01T00:00:01Z' AND time <= '1970-01-01T00:00:06Z' GROUP BY time(1s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"count\"],\"values\":[[\"1970-01-01T00:00:01Z\",10],[\"1970-01-01T00:00:02Z\",10],[\"1970-01-01T00:00:03Z\",10],[\"1970-01-01T00:00:04Z\",10],[\"1970-01-01T00:00:05Z\",7],[\"1970-01-01T00:00:06Z\",3]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tskip:    true,\n\t\t\tname:    \"GROUP by tag - FIXME issue #2875\",\n\t\t\tcommand: `SELECT count(value) FROM db0.rp0.cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:00Z' group by host`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"count\"],\"values\":[[\"2000-01-01T00:00:00Z\",1]]},{\"name\":\"cpu\",\"tags\":{\"host\":\"server02\"},\"columns\":[\"time\",\"count\"],\"values\":[[\"2000-01-01T00:00:00Z\",1]]},{\"name\":\"cpu\",\"tags\":{\"host\":\"server03\"},\"columns\":[\"time\",\"count\"],\"values\":[[\"2000-01-01T00:00:00Z\",1]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"GROUP by field\",\n\t\t\tcommand: `SELECT count(value) FROM db0.rp0.cpu group by value`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"value\":\"\"},\"columns\":[\"time\",\"count\"],\"values\":[[\"1970-01-01T00:00:00Z\",50]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_SLimitAndSOffset(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\t// set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\n\twrites := []string{}\n\tfor i := 1; i < 10; i++ {\n\t\tdata := fmt.Sprintf(`cpu,region=us-east,host=server-%d value=%d %d`, i, i, time.Unix(int64(i), int64(0)).UnixNano())\n\t\twrites = append(writes, data)\n\t}\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"SLIMIT 2 SOFFSET 1\",\n\t\t\tcommand: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 1`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server-2\",\"region\":\"us-east\"},\"columns\":[\"time\",\"count\"],\"values\":[[\"1970-01-01T00:00:00Z\",1]]},{\"name\":\"cpu\",\"tags\":{\"host\":\"server-3\",\"region\":\"us-east\"},\"columns\":[\"time\",\"count\"],\"values\":[[\"1970-01-01T00:00:00Z\",1]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"SLIMIT 2 SOFFSET 3\",\n\t\t\tcommand: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 3`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server-4\",\"region\":\"us-east\"},\"columns\":[\"time\",\"count\"],\"values\":[[\"1970-01-01T00:00:00Z\",1]]},{\"name\":\"cpu\",\"tags\":{\"host\":\"server-5\",\"region\":\"us-east\"},\"columns\":[\"time\",\"count\"],\"values\":[[\"1970-01-01T00:00:00Z\",1]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"SLIMIT 3 SOFFSET 8\",\n\t\t\tcommand: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 3 SOFFSET 8`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server-9\",\"region\":\"us-east\"},\"columns\":[\"time\",\"count\"],\"values\":[[\"1970-01-01T00:00:00Z\",1]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Regex(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu1,host=server01 value=10 %d`, mustParseTime(time.RFC3339Nano, \"2015-02-28T01:03:36.703820946Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu2,host=server01 value=20 %d`, mustParseTime(time.RFC3339Nano, \"2015-02-28T01:03:36.703820946Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu3,host=server01 value=30 %d`, mustParseTime(time.RFC3339Nano, \"2015-02-28T01:03:36.703820946Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"default db and rp\",\n\t\t\tcommand: `SELECT * FROM /cpu[13]/`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu1\",\"columns\":[\"time\",\"host\",\"value\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",\"server01\",10]]},{\"name\":\"cpu3\",\"columns\":[\"time\",\"host\",\"value\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",\"server01\",30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"default db and rp with GROUP BY *\",\n\t\t\tcommand: `SELECT * FROM /cpu[13]/ GROUP BY *`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu1\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",10]]},{\"name\":\"cpu3\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"specifying db and rp\",\n\t\t\tcommand: `SELECT * FROM db0.rp0./cpu[13]/ GROUP BY *`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu1\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",10]]},{\"name\":\"cpu3\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"default db and specified rp\",\n\t\t\tcommand: `SELECT * FROM rp0./cpu[13]/ GROUP BY *`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu1\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",10]]},{\"name\":\"cpu3\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"specified db and default rp\",\n\t\t\tcommand: `SELECT * FROM db0../cpu[13]/ GROUP BY *`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu1\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",10]]},{\"name\":\"cpu3\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"map field type with a regex source\",\n\t\t\tcommand: `SELECT value FROM /cpu[13]/`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu1\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",10]]},{\"name\":\"cpu3\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",30]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Aggregates_Int(t *testing.T) {\n\tt.Parallel()\n\ts := OpenDefaultServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join([]string{\n\t\t\tfmt.Sprintf(`int value=45 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t}, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t// int64\n\t\t&Query{\n\t\t\tname:    \"stddev with just one point - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT STDDEV(value) FROM int`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"int\",\"columns\":[\"time\",\"stddev\"],\"values\":[[\"1970-01-01T00:00:00Z\",null]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Aggregates_IntMax(t *testing.T) {\n\tt.Parallel()\n\ts := OpenDefaultServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join([]string{\n\t\t\tfmt.Sprintf(`intmax value=%s %d`, maxInt64(), mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmax value=%s %d`, maxInt64(), mustParseTime(time.RFC3339Nano, \"2000-01-01T01:00:00Z\").UnixNano()),\n\t\t}, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"large mean and stddev - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT MEAN(value), STDDEV(value) FROM intmax`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmax\",\"columns\":[\"time\",\"mean\",\"stddev\"],\"values\":[[\"1970-01-01T00:00:00Z\",` + maxInt64() + `,0]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Aggregates_IntMany(t *testing.T) {\n\tt.Parallel()\n\ts := OpenDefaultServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join([]string{\n\t\t\tfmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:20Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:30Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:40Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:50Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:00Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:10Z\").UnixNano()),\n\t\t}, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"mean and stddev - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT MEAN(value), STDDEV(value) FROM intmany WHERE time >= '2000-01-01' AND time < '2000-01-01T00:02:00Z' GROUP BY time(10m)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"mean\",\"stddev\"],\"values\":[[\"2000-01-01T00:00:00Z\",5,2.138089935299395]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"first - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT FIRST(value) FROM intmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"first\"],\"values\":[[\"2000-01-01T00:00:00Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"first - int - epoch ms\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}, \"epoch\": []string{\"ms\"}},\n\t\t\tcommand: `SELECT FIRST(value) FROM intmany`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"first\"],\"values\":[[%d,2]]}]}]}`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()/int64(time.Millisecond)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"last - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT LAST(value) FROM intmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"last\"],\"values\":[[\"2000-01-01T00:01:10Z\",9]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"spread - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT SPREAD(value) FROM intmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"spread\"],\"values\":[[\"1970-01-01T00:00:00Z\",7]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"median - even count - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT MEDIAN(value) FROM intmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"median\"],\"values\":[[\"1970-01-01T00:00:00Z\",4.5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"median - odd count - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT MEDIAN(value) FROM intmany where time < '2000-01-01T00:01:10Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"median\"],\"values\":[[\"1970-01-01T00:00:00Z\",4]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"mode - single - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT MODE(value) FROM intmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"mode\"],\"values\":[[\"1970-01-01T00:00:00Z\",4]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"mode - multiple - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT MODE(value) FROM intmany where time < '2000-01-01T00:01:10Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"mode\"],\"values\":[[\"1970-01-01T00:00:00Z\",4]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"distinct as call - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT DISTINCT(value) FROM intmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"distinct\"],\"values\":[[\"1970-01-01T00:00:00Z\",2],[\"1970-01-01T00:00:00Z\",4],[\"1970-01-01T00:00:00Z\",5],[\"1970-01-01T00:00:00Z\",7],[\"1970-01-01T00:00:00Z\",9]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"distinct alt syntax - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT DISTINCT value FROM intmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"distinct\"],\"values\":[[\"1970-01-01T00:00:00Z\",2],[\"1970-01-01T00:00:00Z\",4],[\"1970-01-01T00:00:00Z\",5],[\"1970-01-01T00:00:00Z\",7],[\"1970-01-01T00:00:00Z\",9]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"distinct select tag - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT DISTINCT(host) FROM intmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"statement must have at least one field in select clause\"}]}`,\n\t\t\tskip:    true, // FIXME(benbjohnson): should be allowed, need to stream tag values\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"distinct alt select tag - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT DISTINCT host FROM intmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"statement must have at least one field in select clause\"}]}`,\n\t\t\tskip:    true, // FIXME(benbjohnson): should be allowed, need to stream tag values\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"count distinct - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT COUNT(DISTINCT value) FROM intmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"count\"],\"values\":[[\"1970-01-01T00:00:00Z\",5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"count distinct as call - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT COUNT(DISTINCT(value)) FROM intmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"count\"],\"values\":[[\"1970-01-01T00:00:00Z\",5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"count distinct select tag - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT COUNT(DISTINCT host) FROM intmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"count\"],\"values\":[[\"1970-01-01T00:00:00Z\",0]]}]}]}`,\n\t\t\tskip:    true, // FIXME(benbjohnson): stream tag values\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"count distinct as call select tag - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT COUNT(DISTINCT host) FROM intmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"count\"],\"values\":[[\"1970-01-01T00:00:00Z\",0]]}]}]}`,\n\t\t\tskip:    true, // FIXME(benbjohnson): stream tag values\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Aggregates_IntMany_GroupBy(t *testing.T) {\n\tt.Parallel()\n\ts := OpenDefaultServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join([]string{\n\t\t\tfmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:20Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:30Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:40Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:50Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:00Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:10Z\").UnixNano()),\n\t\t}, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"max order by time with time specified group by 10s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(10s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"max\"],\"values\":[[\"2000-01-01T00:00:00Z\",2],[\"2000-01-01T00:00:10Z\",4],[\"2000-01-01T00:00:20Z\",4],[\"2000-01-01T00:00:30Z\",4],[\"2000-01-01T00:00:40Z\",5],[\"2000-01-01T00:00:50Z\",5],[\"2000-01-01T00:01:00Z\",7],[\"2000-01-01T00:01:10Z\",9]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"max order by time without time specified group by 30s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"max\"],\"values\":[[\"2000-01-01T00:00:00Z\",4],[\"2000-01-01T00:00:30Z\",5],[\"2000-01-01T00:01:00Z\",9]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"max order by time with time specified group by 30s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"max\"],\"values\":[[\"2000-01-01T00:00:00Z\",4],[\"2000-01-01T00:00:30Z\",5],[\"2000-01-01T00:01:00Z\",9]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"min order by time without time specified group by 15s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT min(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"min\"],\"values\":[[\"2000-01-01T00:00:00Z\",2],[\"2000-01-01T00:00:15Z\",4],[\"2000-01-01T00:00:30Z\",4],[\"2000-01-01T00:00:45Z\",5],[\"2000-01-01T00:01:00Z\",7]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"min order by time with time specified group by 15s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, min(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"min\"],\"values\":[[\"2000-01-01T00:00:00Z\",2],[\"2000-01-01T00:00:15Z\",4],[\"2000-01-01T00:00:30Z\",4],[\"2000-01-01T00:00:45Z\",5],[\"2000-01-01T00:01:00Z\",7]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"first order by time without time specified group by 15s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT first(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"first\"],\"values\":[[\"2000-01-01T00:00:00Z\",2],[\"2000-01-01T00:00:15Z\",4],[\"2000-01-01T00:00:30Z\",4],[\"2000-01-01T00:00:45Z\",5],[\"2000-01-01T00:01:00Z\",7]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"first order by time with time specified group by 15s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, first(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"first\"],\"values\":[[\"2000-01-01T00:00:00Z\",2],[\"2000-01-01T00:00:15Z\",4],[\"2000-01-01T00:00:30Z\",4],[\"2000-01-01T00:00:45Z\",5],[\"2000-01-01T00:01:00Z\",7]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"last order by time without time specified group by 15s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT last(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"last\"],\"values\":[[\"2000-01-01T00:00:00Z\",4],[\"2000-01-01T00:00:15Z\",4],[\"2000-01-01T00:00:30Z\",5],[\"2000-01-01T00:00:45Z\",5],[\"2000-01-01T00:01:00Z\",9]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"last order by time with time specified group by 15s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, last(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"last\"],\"values\":[[\"2000-01-01T00:00:00Z\",4],[\"2000-01-01T00:00:15Z\",4],[\"2000-01-01T00:00:30Z\",5],[\"2000-01-01T00:00:45Z\",5],[\"2000-01-01T00:01:00Z\",9]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Aggregates_IntMany_OrderByDesc(t *testing.T) {\n\tt.Parallel()\n\ts := OpenDefaultServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join([]string{\n\t\t\tfmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:20Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:30Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:40Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:50Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:00Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:10Z\").UnixNano()),\n\t\t}, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"aggregate order by time desc\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:00Z' group by time(10s) order by time desc`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intmany\",\"columns\":[\"time\",\"max\"],\"values\":[[\"2000-01-01T00:01:00Z\",7],[\"2000-01-01T00:00:50Z\",5],[\"2000-01-01T00:00:40Z\",5],[\"2000-01-01T00:00:30Z\",4],[\"2000-01-01T00:00:20Z\",4],[\"2000-01-01T00:00:10Z\",4],[\"2000-01-01T00:00:00Z\",2]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Aggregates_IntOverlap(t *testing.T) {\n\tt.Parallel()\n\ts := OpenDefaultServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join([]string{\n\t\t\tfmt.Sprintf(`intoverlap,region=us-east value=20 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intoverlap,region=us-east value=30 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intoverlap,region=us-west value=100 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`intoverlap,region=us-east otherVal=20 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:03Z\").UnixNano()),\n\t\t}, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t/*\t\t&Query{\n\t\t\t\t\tname:    \"aggregation with no interval - int\",\n\t\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t\t\tcommand: `SELECT count(value) FROM intoverlap WHERE time = '2000-01-01 00:00:00'`,\n\t\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intoverlap\",\"columns\":[\"time\",\"count\"],\"values\":[[\"2000-01-01T00:00:00Z\",2]]}]}]}`,\n\t\t\t\t},\n\t\t\t\t&Query{\n\t\t\t\t\tname:    \"sum - int\",\n\t\t\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\t\t\tcommand: `SELECT SUM(value) FROM intoverlap WHERE time >= '2000-01-01 00:00:05' AND time <= '2000-01-01T00:00:10Z' GROUP BY time(10s), region`,\n\t\t\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intoverlap\",\"tags\":{\"region\":\"us-east\"},\"columns\":[\"time\",\"sum\"],\"values\":[[\"2000-01-01T00:00:10Z\",30]]}]}]}`,\n\t\t\t\t},\n\t\t*/&Query{\n\t\t\tname:    \"aggregation with a null field value - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT SUM(value) FROM intoverlap GROUP BY region`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intoverlap\",\"tags\":{\"region\":\"us-east\"},\"columns\":[\"time\",\"sum\"],\"values\":[[\"1970-01-01T00:00:00Z\",50]]},{\"name\":\"intoverlap\",\"tags\":{\"region\":\"us-west\"},\"columns\":[\"time\",\"sum\"],\"values\":[[\"1970-01-01T00:00:00Z\",100]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"multiple aggregations - int\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT SUM(value), MEAN(value) FROM intoverlap GROUP BY region`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intoverlap\",\"tags\":{\"region\":\"us-east\"},\"columns\":[\"time\",\"sum\",\"mean\"],\"values\":[[\"1970-01-01T00:00:00Z\",50,25]]},{\"name\":\"intoverlap\",\"tags\":{\"region\":\"us-west\"},\"columns\":[\"time\",\"sum\",\"mean\"],\"values\":[[\"1970-01-01T00:00:00Z\",100,100]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tskip:    true,\n\t\t\tname:    \"multiple aggregations with division - int FIXME issue #2879\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sum(value), mean(value), sum(value) / mean(value) as div FROM intoverlap GROUP BY region`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"intoverlap\",\"tags\":{\"region\":\"us-east\"},\"columns\":[\"time\",\"sum\",\"mean\",\"div\"],\"values\":[[\"1970-01-01T00:00:00Z\",50,25,2]]},{\"name\":\"intoverlap\",\"tags\":{\"region\":\"us-west\"},\"columns\":[\"time\",\"div\"],\"values\":[[\"1970-01-01T00:00:00Z\",100,100,1]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Aggregates_FloatSingle(t *testing.T) {\n\tt.Parallel()\n\ts := OpenDefaultServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join([]string{\n\t\t\tfmt.Sprintf(`floatsingle value=45.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t}, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"stddev with just one point - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT STDDEV(value) FROM floatsingle`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatsingle\",\"columns\":[\"time\",\"stddev\"],\"values\":[[\"1970-01-01T00:00:00Z\",null]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Aggregates_FloatMany(t *testing.T) {\n\tt.Parallel()\n\ts := OpenDefaultServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join([]string{\n\t\t\tfmt.Sprintf(`floatmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`floatmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`floatmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:20Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`floatmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:30Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`floatmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:40Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`floatmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:50Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`floatmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:00Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`floatmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:10Z\").UnixNano()),\n\t\t}, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"mean and stddev - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT MEAN(value), STDDEV(value) FROM floatmany WHERE time >= '2000-01-01' AND time < '2000-01-01T00:02:00Z' GROUP BY time(10m)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatmany\",\"columns\":[\"time\",\"mean\",\"stddev\"],\"values\":[[\"2000-01-01T00:00:00Z\",5,2.138089935299395]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"first - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT FIRST(value) FROM floatmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatmany\",\"columns\":[\"time\",\"first\"],\"values\":[[\"2000-01-01T00:00:00Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"last - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT LAST(value) FROM floatmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatmany\",\"columns\":[\"time\",\"last\"],\"values\":[[\"2000-01-01T00:01:10Z\",9]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"spread - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT SPREAD(value) FROM floatmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatmany\",\"columns\":[\"time\",\"spread\"],\"values\":[[\"1970-01-01T00:00:00Z\",7]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"median - even count - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT MEDIAN(value) FROM floatmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatmany\",\"columns\":[\"time\",\"median\"],\"values\":[[\"1970-01-01T00:00:00Z\",4.5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"median - odd count - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT MEDIAN(value) FROM floatmany where time < '2000-01-01T00:01:10Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatmany\",\"columns\":[\"time\",\"median\"],\"values\":[[\"1970-01-01T00:00:00Z\",4]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"mode - single - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT MODE(value) FROM floatmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatmany\",\"columns\":[\"time\",\"mode\"],\"values\":[[\"1970-01-01T00:00:00Z\",4]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"mode - multiple - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT MODE(value) FROM floatmany where time < '2000-01-01T00:00:10Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatmany\",\"columns\":[\"time\",\"mode\"],\"values\":[[\"1970-01-01T00:00:00Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"distinct as call - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT DISTINCT(value) FROM floatmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatmany\",\"columns\":[\"time\",\"distinct\"],\"values\":[[\"1970-01-01T00:00:00Z\",2],[\"1970-01-01T00:00:00Z\",4],[\"1970-01-01T00:00:00Z\",5],[\"1970-01-01T00:00:00Z\",7],[\"1970-01-01T00:00:00Z\",9]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"distinct alt syntax - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT DISTINCT value FROM floatmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatmany\",\"columns\":[\"time\",\"distinct\"],\"values\":[[\"1970-01-01T00:00:00Z\",2],[\"1970-01-01T00:00:00Z\",4],[\"1970-01-01T00:00:00Z\",5],[\"1970-01-01T00:00:00Z\",7],[\"1970-01-01T00:00:00Z\",9]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"distinct select tag - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT DISTINCT(host) FROM floatmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"statement must have at least one field in select clause\"}]}`,\n\t\t\tskip:    true, // FIXME(benbjohnson): show be allowed, stream tag values\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"distinct alt select tag - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT DISTINCT host FROM floatmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"statement must have at least one field in select clause\"}]}`,\n\t\t\tskip:    true, // FIXME(benbjohnson): show be allowed, stream tag values\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"count distinct - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT COUNT(DISTINCT value) FROM floatmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatmany\",\"columns\":[\"time\",\"count\"],\"values\":[[\"1970-01-01T00:00:00Z\",5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"count distinct as call - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT COUNT(DISTINCT(value)) FROM floatmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatmany\",\"columns\":[\"time\",\"count\"],\"values\":[[\"1970-01-01T00:00:00Z\",5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"count distinct select tag - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT COUNT(DISTINCT host) FROM floatmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatmany\",\"columns\":[\"time\",\"count\"],\"values\":[[\"1970-01-01T00:00:00Z\",0]]}]}]}`,\n\t\t\tskip:    true, // FIXME(benbjohnson): stream tag values\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"count distinct as call select tag - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT COUNT(DISTINCT host) FROM floatmany`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatmany\",\"columns\":[\"time\",\"count\"],\"values\":[[\"1970-01-01T00:00:00Z\",0]]}]}]}`,\n\t\t\tskip:    true, // FIXME(benbjohnson): stream tag values\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Aggregates_FloatOverlap(t *testing.T) {\n\tt.Parallel()\n\ts := OpenDefaultServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join([]string{\n\t\t\tfmt.Sprintf(`floatoverlap,region=us-east value=20.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`floatoverlap,region=us-east value=30.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`floatoverlap,region=us-west value=100.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`floatoverlap,region=us-east otherVal=20.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:03Z\").UnixNano()),\n\t\t}, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"aggregation with no interval - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT count(value) FROM floatoverlap WHERE time = '2000-01-01 00:00:00'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatoverlap\",\"columns\":[\"time\",\"count\"],\"values\":[[\"2000-01-01T00:00:00Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"sum - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT SUM(value) FROM floatoverlap WHERE time >= '2000-01-01 00:00:05' AND time <= '2000-01-01T00:00:10Z' GROUP BY time(10s), region`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatoverlap\",\"tags\":{\"region\":\"us-east\"},\"columns\":[\"time\",\"sum\"],\"values\":[[\"2000-01-01T00:00:00Z\",null],[\"2000-01-01T00:00:10Z\",30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"aggregation with a null field value - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT SUM(value) FROM floatoverlap GROUP BY region`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatoverlap\",\"tags\":{\"region\":\"us-east\"},\"columns\":[\"time\",\"sum\"],\"values\":[[\"1970-01-01T00:00:00Z\",50]]},{\"name\":\"floatoverlap\",\"tags\":{\"region\":\"us-west\"},\"columns\":[\"time\",\"sum\"],\"values\":[[\"1970-01-01T00:00:00Z\",100]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"multiple aggregations - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT SUM(value), MEAN(value) FROM floatoverlap GROUP BY region`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatoverlap\",\"tags\":{\"region\":\"us-east\"},\"columns\":[\"time\",\"sum\",\"mean\"],\"values\":[[\"1970-01-01T00:00:00Z\",50,25]]},{\"name\":\"floatoverlap\",\"tags\":{\"region\":\"us-west\"},\"columns\":[\"time\",\"sum\",\"mean\"],\"values\":[[\"1970-01-01T00:00:00Z\",100,100]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"multiple aggregations with division - float\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sum(value) / mean(value) as div FROM floatoverlap GROUP BY region`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"floatoverlap\",\"tags\":{\"region\":\"us-east\"},\"columns\":[\"time\",\"div\"],\"values\":[[\"1970-01-01T00:00:00Z\",2]]},{\"name\":\"floatoverlap\",\"tags\":{\"region\":\"us-west\"},\"columns\":[\"time\",\"div\"],\"values\":[[\"1970-01-01T00:00:00Z\",1]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Aggregates_GroupByOffset(t *testing.T) {\n\tt.Parallel()\n\ts := OpenDefaultServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join([]string{\n\t\t\tfmt.Sprintf(`offset,region=us-east,host=serverA value=20.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`offset,region=us-east,host=serverB value=30.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`offset,region=us-west,host=serverC value=100.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t}, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"group by offset - standard\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sum(value) FROM \"offset\" WHERE time >= '1999-12-31T23:59:55Z' AND time < '2000-01-01T00:00:15Z' GROUP BY time(10s, 5s) FILL(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"offset\",\"columns\":[\"time\",\"sum\"],\"values\":[[\"1999-12-31T23:59:55Z\",120],[\"2000-01-01T00:00:05Z\",30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"group by offset - misaligned time\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sum(value) FROM \"offset\" WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:20Z' GROUP BY time(10s, 5s) FILL(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"offset\",\"columns\":[\"time\",\"sum\"],\"values\":[[\"1999-12-31T23:59:55Z\",120],[\"2000-01-01T00:00:05Z\",30],[\"2000-01-01T00:00:15Z\",0]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"group by offset - negative time\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sum(value) FROM \"offset\" WHERE time >= '1999-12-31T23:59:55Z' AND time < '2000-01-01T00:00:15Z' GROUP BY time(10s, -5s) FILL(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"offset\",\"columns\":[\"time\",\"sum\"],\"values\":[[\"1999-12-31T23:59:55Z\",120],[\"2000-01-01T00:00:05Z\",30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"group by offset - modulo\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sum(value) FROM \"offset\" WHERE time >= '1999-12-31T23:59:55Z' AND time < '2000-01-01T00:00:15Z' GROUP BY time(10s, 35s) FILL(0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"offset\",\"columns\":[\"time\",\"sum\"],\"values\":[[\"1999-12-31T23:59:55Z\",120],[\"2000-01-01T00:00:05Z\",30]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Aggregates_Load(t *testing.T) {\n\tt.Parallel()\n\ts := OpenDefaultServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join([]string{\n\t\t\tfmt.Sprintf(`load,region=us-east,host=serverA value=20.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`load,region=us-east,host=serverB value=30.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`load,region=us-west,host=serverC value=100.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t}, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"group by multiple dimensions\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sum(value) FROM load GROUP BY region, host`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"load\",\"tags\":{\"host\":\"serverA\",\"region\":\"us-east\"},\"columns\":[\"time\",\"sum\"],\"values\":[[\"1970-01-01T00:00:00Z\",20]]},{\"name\":\"load\",\"tags\":{\"host\":\"serverB\",\"region\":\"us-east\"},\"columns\":[\"time\",\"sum\"],\"values\":[[\"1970-01-01T00:00:00Z\",30]]},{\"name\":\"load\",\"tags\":{\"host\":\"serverC\",\"region\":\"us-west\"},\"columns\":[\"time\",\"sum\"],\"values\":[[\"1970-01-01T00:00:00Z\",100]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"group by multiple dimensions\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sum(value)*2 FROM load`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"load\",\"columns\":[\"time\",\"sum\"],\"values\":[[\"1970-01-01T00:00:00Z\",300]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"group by multiple dimensions\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sum(value)/2 FROM load`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"load\",\"columns\":[\"time\",\"sum\"],\"values\":[[\"1970-01-01T00:00:00Z\",75]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Aggregates_CPU(t *testing.T) {\n\tt.Parallel()\n\ts := OpenDefaultServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join([]string{\n\t\t\tfmt.Sprintf(`cpu,region=uk,host=serverZ,service=redis value=20.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:03Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`cpu,region=uk,host=serverZ,service=mysql value=30.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:03Z\").UnixNano()),\n\t\t}, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"aggregation with WHERE and AND\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sum(value) FROM cpu WHERE region='uk' AND host='serverZ'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"sum\"],\"values\":[[\"1970-01-01T00:00:00Z\",50]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Aggregates_String(t *testing.T) {\n\tt.Parallel()\n\ts := OpenDefaultServer(NewConfig())\n\tdefer s.Close()\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join([]string{\n\t\t\tfmt.Sprintf(`stringdata value=\"first\" %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:03Z\").UnixNano()),\n\t\t\tfmt.Sprintf(`stringdata value=\"last\" %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:04Z\").UnixNano()),\n\t\t}, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t// strings\n\t\t&Query{\n\t\t\tname:    \"STDDEV on string data - string\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT STDDEV(value) FROM stringdata`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"stringdata\",\"columns\":[\"time\",\"stddev\"],\"values\":[[\"1970-01-01T00:00:00Z\",null]]}]}]}`,\n\t\t\tskip:    true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"MEAN on string data - string\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT MEAN(value) FROM stringdata`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"stringdata\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"1970-01-01T00:00:00Z\",0]]}]}]}`,\n\t\t\tskip:    true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"MEDIAN on string data - string\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT MEDIAN(value) FROM stringdata`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"stringdata\",\"columns\":[\"time\",\"median\"],\"values\":[[\"1970-01-01T00:00:00Z\",null]]}]}]}`,\n\t\t\tskip:    true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"COUNT on string data - string\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT COUNT(value) FROM stringdata`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"stringdata\",\"columns\":[\"time\",\"count\"],\"values\":[[\"1970-01-01T00:00:00Z\",2]]}]}]}`,\n\t\t\tskip:    true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"FIRST on string data - string\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT FIRST(value) FROM stringdata`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"stringdata\",\"columns\":[\"time\",\"first\"],\"values\":[[\"2000-01-01T00:00:03Z\",\"first\"]]}]}]}`,\n\t\t\tskip:    true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"LAST on string data - string\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT LAST(value) FROM stringdata`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"stringdata\",\"columns\":[\"time\",\"last\"],\"values\":[[\"2000-01-01T00:00:04Z\",\"last\"]]}]}]}`,\n\t\t\tskip:    true, // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Aggregates_Math(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`network,host=server01,region=west,core=1 rx=10i,tx=20i,core=2i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server02,region=west,core=2 rx=40i,tx=50i,core=3i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server03,region=east,core=3 rx=40i,tx=55i,core=4i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:20Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server04,region=east,core=4 rx=40i,tx=60i,core=1i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:30Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server05,region=west,core=1 rx=50i,tx=70i,core=2i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:40Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server06,region=east,core=2 rx=50i,tx=40i,core=3i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:50Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server07,region=west,core=3 rx=70i,tx=30i,core=4i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server08,region=east,core=4 rx=90i,tx=10i,core=1i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:10Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server09,region=east,core=1 rx=5i,tx=4i,core=2i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:20Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"add two selectors\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT max(rx) + min(rx) FROM network WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:01:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"max_min\"],\"values\":[[\"2000-01-01T00:00:00Z\",95]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"use math one two selectors separately\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT max(rx) * 1, min(rx) * 1 FROM network WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:01:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"max\",\"min\"],\"values\":[[\"2000-01-01T00:00:00Z\",90,5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"math with a single selector\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT max(rx) * 1 FROM network WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:01:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"max\"],\"values\":[[\"2000-01-01T00:01:10Z\",90]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_AggregateSelectors(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`network,host=server01,region=west,core=1 rx=10i,tx=20i,core=2i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server02,region=west,core=2 rx=40i,tx=50i,core=3i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server03,region=east,core=3 rx=40i,tx=55i,core=4i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:20Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server04,region=east,core=4 rx=40i,tx=60i,core=1i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:30Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server05,region=west,core=1 rx=50i,tx=70i,core=2i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:40Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server06,region=east,core=2 rx=50i,tx=40i,core=3i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:50Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server07,region=west,core=3 rx=70i,tx=30i,core=4i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server08,region=east,core=4 rx=90i,tx=10i,core=1i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:10Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server09,region=east,core=1 rx=5i,tx=4i,core=2i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:20Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"baseline\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM network`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"core\",\"core_1\",\"host\",\"region\",\"rx\",\"tx\"],\"values\":[[\"2000-01-01T00:00:00Z\",2,\"1\",\"server01\",\"west\",10,20],[\"2000-01-01T00:00:10Z\",3,\"2\",\"server02\",\"west\",40,50],[\"2000-01-01T00:00:20Z\",4,\"3\",\"server03\",\"east\",40,55],[\"2000-01-01T00:00:30Z\",1,\"4\",\"server04\",\"east\",40,60],[\"2000-01-01T00:00:40Z\",2,\"1\",\"server05\",\"west\",50,70],[\"2000-01-01T00:00:50Z\",3,\"2\",\"server06\",\"east\",50,40],[\"2000-01-01T00:01:00Z\",4,\"3\",\"server07\",\"west\",70,30],[\"2000-01-01T00:01:10Z\",1,\"4\",\"server08\",\"east\",90,10],[\"2000-01-01T00:01:20Z\",2,\"1\",\"server09\",\"east\",5,4]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"max - baseline 30s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"max\"],\"values\":[[\"2000-01-01T00:00:00Z\",40],[\"2000-01-01T00:00:30Z\",50],[\"2000-01-01T00:01:00Z\",90]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"max - baseline 30s - epoch ms\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}, \"epoch\": []string{\"ms\"}},\n\t\t\tcommand: `SELECT max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp: fmt.Sprintf(\n\t\t\t\t`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"max\"],\"values\":[[%d,40],[%d,50],[%d,90]]}]}]}`,\n\t\t\t\tmustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()/int64(time.Millisecond),\n\t\t\t\tmustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:30Z\").UnixNano()/int64(time.Millisecond),\n\t\t\t\tmustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:00Z\").UnixNano()/int64(time.Millisecond),\n\t\t\t),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"max - tx\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT tx, max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"tx\",\"max\"],\"values\":[[\"2000-01-01T00:00:00Z\",50,40],[\"2000-01-01T00:00:30Z\",70,50],[\"2000-01-01T00:01:00Z\",10,90]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"max - time\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"max\"],\"values\":[[\"2000-01-01T00:00:00Z\",40],[\"2000-01-01T00:00:30Z\",50],[\"2000-01-01T00:01:00Z\",90]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"max - time and tx\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, tx, max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"tx\",\"max\"],\"values\":[[\"2000-01-01T00:00:00Z\",50,40],[\"2000-01-01T00:00:30Z\",70,50],[\"2000-01-01T00:01:00Z\",10,90]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"min - baseline 30s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"min\"],\"values\":[[\"2000-01-01T00:00:00Z\",10],[\"2000-01-01T00:00:30Z\",40],[\"2000-01-01T00:01:00Z\",5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"min - tx\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT tx, min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"tx\",\"min\"],\"values\":[[\"2000-01-01T00:00:00Z\",20,10],[\"2000-01-01T00:00:30Z\",60,40],[\"2000-01-01T00:01:00Z\",4,5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"min - time\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"min\"],\"values\":[[\"2000-01-01T00:00:00Z\",10],[\"2000-01-01T00:00:30Z\",40],[\"2000-01-01T00:01:00Z\",5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"min - time and tx\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, tx, min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"tx\",\"min\"],\"values\":[[\"2000-01-01T00:00:00Z\",20,10],[\"2000-01-01T00:00:30Z\",60,40],[\"2000-01-01T00:01:00Z\",4,5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"max,min - baseline 30s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT max(rx), min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"max\",\"min\"],\"values\":[[\"2000-01-01T00:00:00Z\",40,10],[\"2000-01-01T00:00:30Z\",50,40],[\"2000-01-01T00:01:00Z\",90,5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"first - baseline 30s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"first\"],\"values\":[[\"2000-01-01T00:00:00Z\",10],[\"2000-01-01T00:00:30Z\",40],[\"2000-01-01T00:01:00Z\",70]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"first - tx\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, tx, first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"tx\",\"first\"],\"values\":[[\"2000-01-01T00:00:00Z\",20,10],[\"2000-01-01T00:00:30Z\",60,40],[\"2000-01-01T00:01:00Z\",30,70]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"first - time\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"first\"],\"values\":[[\"2000-01-01T00:00:00Z\",10],[\"2000-01-01T00:00:30Z\",40],[\"2000-01-01T00:01:00Z\",70]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"first - time and tx\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, tx, first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"tx\",\"first\"],\"values\":[[\"2000-01-01T00:00:00Z\",20,10],[\"2000-01-01T00:00:30Z\",60,40],[\"2000-01-01T00:01:00Z\",30,70]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"last - baseline 30s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"last\"],\"values\":[[\"2000-01-01T00:00:00Z\",40],[\"2000-01-01T00:00:30Z\",50],[\"2000-01-01T00:01:00Z\",5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"last - tx\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT tx, last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"tx\",\"last\"],\"values\":[[\"2000-01-01T00:00:00Z\",55,40],[\"2000-01-01T00:00:30Z\",40,50],[\"2000-01-01T00:01:00Z\",4,5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"last - time\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"last\"],\"values\":[[\"2000-01-01T00:00:00Z\",40],[\"2000-01-01T00:00:30Z\",50],[\"2000-01-01T00:01:00Z\",5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"last - time and tx\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, tx, last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"tx\",\"last\"],\"values\":[[\"2000-01-01T00:00:00Z\",55,40],[\"2000-01-01T00:00:30Z\",40,50],[\"2000-01-01T00:01:00Z\",4,5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"count - baseline 30s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT count(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"count\"],\"values\":[[\"2000-01-01T00:00:00Z\",3],[\"2000-01-01T00:00:30Z\",3],[\"2000-01-01T00:01:00Z\",3]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"count - time\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, count(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"error\":\"error parsing query: mixing aggregate and non-aggregate queries is not supported\"}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"count - tx\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT tx, count(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"error\":\"error parsing query: mixing aggregate and non-aggregate queries is not supported\"}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"distinct - baseline 30s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"distinct\"],\"values\":[[\"2000-01-01T00:00:00Z\",10],[\"2000-01-01T00:00:00Z\",40],[\"2000-01-01T00:00:30Z\",40],[\"2000-01-01T00:00:30Z\",50],[\"2000-01-01T00:01:00Z\",70],[\"2000-01-01T00:01:00Z\",90],[\"2000-01-01T00:01:00Z\",5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"distinct - time\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"error\":\"error parsing query: aggregate function distinct() cannot be combined with other functions or fields\"}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"distinct - tx\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT tx, distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"error\":\"error parsing query: aggregate function distinct() cannot be combined with other functions or fields\"}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"mean - baseline 30s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT mean(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"2000-01-01T00:00:00Z\",30],[\"2000-01-01T00:00:30Z\",46.666666666666664],[\"2000-01-01T00:01:00Z\",55]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"mean - time\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, mean(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"error\":\"error parsing query: mixing aggregate and non-aggregate queries is not supported\"}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"mean - tx\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT tx, mean(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"error\":\"error parsing query: mixing aggregate and non-aggregate queries is not supported\"}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"median - baseline 30s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT median(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"median\"],\"values\":[[\"2000-01-01T00:00:00Z\",40],[\"2000-01-01T00:00:30Z\",50],[\"2000-01-01T00:01:00Z\",70]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"median - time\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, median(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"error\":\"error parsing query: mixing aggregate and non-aggregate queries is not supported\"}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"median - tx\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT tx, median(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"error\":\"error parsing query: mixing aggregate and non-aggregate queries is not supported\"}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"mode - baseline 30s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT mode(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"mode\"],\"values\":[[\"2000-01-01T00:00:00Z\",40],[\"2000-01-01T00:00:30Z\",50],[\"2000-01-01T00:01:00Z\",5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"mode - time\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, mode(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"error\":\"error parsing query: mixing aggregate and non-aggregate queries is not supported\"}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"mode - tx\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT tx, mode(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"error\":\"error parsing query: mixing aggregate and non-aggregate queries is not supported\"}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"mode - baseline 30s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT mode(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"mode\"],\"values\":[[\"2000-01-01T00:00:00Z\",40],[\"2000-01-01T00:00:30Z\",50],[\"2000-01-01T00:01:00Z\",5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"mode - time\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, mode(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"error\":\"error parsing query: mixing aggregate and non-aggregate queries is not supported\"}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"mode - tx\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT tx, mode(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"error\":\"error parsing query: mixing aggregate and non-aggregate queries is not supported\"}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"spread - baseline 30s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT spread(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"spread\"],\"values\":[[\"2000-01-01T00:00:00Z\",30],[\"2000-01-01T00:00:30Z\",10],[\"2000-01-01T00:01:00Z\",85]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"spread - time\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, spread(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"error\":\"error parsing query: mixing aggregate and non-aggregate queries is not supported\"}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"spread - tx\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT tx, spread(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"error\":\"error parsing query: mixing aggregate and non-aggregate queries is not supported\"}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"stddev - baseline 30s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT stddev(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"stddev\"],\"values\":[[\"2000-01-01T00:00:00Z\",17.320508075688775],[\"2000-01-01T00:00:30Z\",5.773502691896258],[\"2000-01-01T00:01:00Z\",44.44097208657794]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"stddev - time\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, stddev(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"error\":\"error parsing query: mixing aggregate and non-aggregate queries is not supported\"}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"stddev - tx\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT tx, stddev(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"error\":\"error parsing query: mixing aggregate and non-aggregate queries is not supported\"}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"percentile - baseline 30s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT percentile(rx, 75) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"percentile\"],\"values\":[[\"2000-01-01T00:00:00Z\",40],[\"2000-01-01T00:00:30Z\",50],[\"2000-01-01T00:01:00Z\",70]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"percentile - time\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT time, percentile(rx, 75) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"percentile\"],\"values\":[[\"2000-01-01T00:00:00Z\",40],[\"2000-01-01T00:00:30Z\",50],[\"2000-01-01T00:01:00Z\",70]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"percentile - tx\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT tx, percentile(rx, 75) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"tx\",\"percentile\"],\"values\":[[\"2000-01-01T00:00:00Z\",50,40],[\"2000-01-01T00:00:30Z\",70,50],[\"2000-01-01T00:01:00Z\",30,70]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_ExactTimeRange(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu value=1 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00.000000000Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu value=2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00.000000001Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu value=3 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00.000000002Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"query point at exactly one time - rfc3339nano\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM cpu WHERE time = '2000-01-01T00:00:00.000000001Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:00.000000001Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"query point at exactly one time - timestamp\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM cpu WHERE time = 946684800000000001`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:00.000000001Z\",2]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Selectors(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`network,host=server01,region=west,core=1 rx=10i,tx=20i,core=2i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server02,region=west,core=2 rx=40i,tx=50i,core=3i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server03,region=east,core=3 rx=40i,tx=55i,core=4i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:20Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server04,region=east,core=4 rx=40i,tx=60i,core=1i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:30Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server05,region=west,core=1 rx=50i,tx=70i,core=2i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:40Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server06,region=east,core=2 rx=50i,tx=40i,core=3i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:50Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server07,region=west,core=3 rx=70i,tx=30i,core=4i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server08,region=east,core=4 rx=90i,tx=10i,core=1i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:10Z\").UnixNano()),\n\t\tfmt.Sprintf(`network,host=server09,region=east,core=1 rx=5i,tx=4i,core=2i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:20Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"max - tx\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT max(tx) FROM network`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"max\"],\"values\":[[\"2000-01-01T00:00:40Z\",70]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"min - tx\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT min(tx) FROM network`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"min\"],\"values\":[[\"2000-01-01T00:01:20Z\",4]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"first\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT first(tx) FROM network`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"first\"],\"values\":[[\"2000-01-01T00:00:00Z\",20]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"last\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT last(tx) FROM network`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"last\"],\"values\":[[\"2000-01-01T00:01:20Z\",4]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"percentile\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT percentile(tx, 50) FROM network`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"network\",\"columns\":[\"time\",\"percentile\"],\"values\":[[\"2000-01-01T00:00:50Z\",40]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_TopBottomInt(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\t// cpu data with overlapping duplicate values\n\t\t// hour 0\n\t\tfmt.Sprintf(`cpu,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server02 value=3.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:20Z\").UnixNano()),\n\t\t// hour 1\n\t\tfmt.Sprintf(`cpu,host=server04 value=3.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T01:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server05 value=7.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T01:00:10Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server06 value=6.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T01:00:20Z\").UnixNano()),\n\t\t// hour 2\n\t\tfmt.Sprintf(`cpu,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T02:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T02:00:10Z\").UnixNano()),\n\n\t\t// memory data\n\t\t// hour 0\n\t\tfmt.Sprintf(`memory,host=a,service=redis value=1000i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`memory,host=b,service=mysql value=2000i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`memory,host=b,service=redis value=1500i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\t// hour 1\n\t\tfmt.Sprintf(`memory,host=a,service=redis value=1001i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T01:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`memory,host=b,service=mysql value=2001i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T01:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`memory,host=b,service=redis value=1501i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T01:00:00Z\").UnixNano()),\n\t\t// hour 2\n\t\tfmt.Sprintf(`memory,host=a,service=redis value=1002i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T02:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`memory,host=b,service=mysql value=2002i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T02:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`memory,host=b,service=redis value=1502i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T02:00:00Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"top - cpu\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT TOP(value, 1) FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"top\"],\"values\":[[\"2000-01-01T02:00:10Z\",9]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"bottom - cpu\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT BOTTOM(value, 1) FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"bottom\"],\"values\":[[\"2000-01-01T00:00:00Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"top - cpu - 2 values\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT TOP(value, 2) FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"top\"],\"values\":[[\"2000-01-01T01:00:10Z\",7],[\"2000-01-01T02:00:10Z\",9]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"bottom - cpu - 2 values\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT BOTTOM(value, 2) FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"bottom\"],\"values\":[[\"2000-01-01T00:00:00Z\",2],[\"2000-01-01T00:00:10Z\",3]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"top - cpu - 3 values - sorts on tie properly\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT TOP(value, 3) FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"top\"],\"values\":[[\"2000-01-01T01:00:10Z\",7],[\"2000-01-01T02:00:00Z\",7],[\"2000-01-01T02:00:10Z\",9]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"bottom - cpu - 3 values - sorts on tie properly\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT BOTTOM(value, 3) FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"bottom\"],\"values\":[[\"2000-01-01T00:00:00Z\",2],[\"2000-01-01T00:00:10Z\",3],[\"2000-01-01T01:00:00Z\",3]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"top - cpu - with tag\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT TOP(value, host, 2) FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"top\",\"host\"],\"values\":[[\"2000-01-01T01:00:10Z\",7,\"server05\"],[\"2000-01-01T02:00:10Z\",9,\"server08\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"bottom - cpu - with tag\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT BOTTOM(value, host, 2) FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"bottom\",\"host\"],\"values\":[[\"2000-01-01T00:00:00Z\",2,\"server01\"],[\"2000-01-01T00:00:10Z\",3,\"server02\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"top - cpu - 3 values with limit 2\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT TOP(value, 3) FROM cpu limit 2`,\n\t\t\texp:     `{\"error\":\"error parsing query: limit (3) in top function can not be larger than the LIMIT (2) in the select statement\"}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"bottom - cpu - 3 values with limit 2\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT BOTTOM(value, 3) FROM cpu limit 2`,\n\t\t\texp:     `{\"error\":\"error parsing query: limit (3) in bottom function can not be larger than the LIMIT (2) in the select statement\"}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"top - cpu - hourly\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT TOP(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"top\"],\"values\":[[\"2000-01-01T00:00:20Z\",4],[\"2000-01-01T01:00:10Z\",7],[\"2000-01-01T02:00:10Z\",9]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"bottom - cpu - hourly\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT BOTTOM(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"bottom\"],\"values\":[[\"2000-01-01T00:00:00Z\",2],[\"2000-01-01T01:00:00Z\",3],[\"2000-01-01T02:00:00Z\",7]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"top - cpu - 2 values hourly\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT TOP(value, 2) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"top\"],\"values\":[[\"2000-01-01T00:00:10Z\",3],[\"2000-01-01T00:00:20Z\",4],[\"2000-01-01T01:00:10Z\",7],[\"2000-01-01T01:00:20Z\",6],[\"2000-01-01T02:00:00Z\",7],[\"2000-01-01T02:00:10Z\",9]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"bottom - cpu - 2 values hourly\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT BOTTOM(value, 2) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"bottom\"],\"values\":[[\"2000-01-01T00:00:00Z\",2],[\"2000-01-01T00:00:10Z\",3],[\"2000-01-01T01:00:00Z\",3],[\"2000-01-01T01:00:20Z\",6],[\"2000-01-01T02:00:00Z\",7],[\"2000-01-01T02:00:10Z\",9]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"top - cpu - 3 values hourly - validates that a bucket can have less than limit if no values exist in that time bucket\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT TOP(value, 3) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"top\"],\"values\":[[\"2000-01-01T00:00:00Z\",2],[\"2000-01-01T00:00:10Z\",3],[\"2000-01-01T00:00:20Z\",4],[\"2000-01-01T01:00:00Z\",3],[\"2000-01-01T01:00:10Z\",7],[\"2000-01-01T01:00:20Z\",6],[\"2000-01-01T02:00:00Z\",7],[\"2000-01-01T02:00:10Z\",9]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"bottom - cpu - 3 values hourly - validates that a bucket can have less than limit if no values exist in that time bucket\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT BOTTOM(value, 3) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"bottom\"],\"values\":[[\"2000-01-01T00:00:00Z\",2],[\"2000-01-01T00:00:10Z\",3],[\"2000-01-01T00:00:20Z\",4],[\"2000-01-01T01:00:00Z\",3],[\"2000-01-01T01:00:10Z\",7],[\"2000-01-01T01:00:20Z\",6],[\"2000-01-01T02:00:00Z\",7],[\"2000-01-01T02:00:10Z\",9]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"top - memory - 2 values, two tags\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT TOP(value, 2), host, service FROM memory`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"memory\",\"columns\":[\"time\",\"top\",\"host\",\"service\"],\"values\":[[\"2000-01-01T01:00:00Z\",2001,\"b\",\"mysql\"],[\"2000-01-01T02:00:00Z\",2002,\"b\",\"mysql\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"bottom - memory - 2 values, two tags\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT BOTTOM(value, 2), host, service FROM memory`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"memory\",\"columns\":[\"time\",\"bottom\",\"host\",\"service\"],\"values\":[[\"2000-01-01T00:00:00Z\",1000,\"a\",\"redis\"],[\"2000-01-01T01:00:00Z\",1001,\"a\",\"redis\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"top - memory - host tag with limit 2\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT TOP(value, host, 2) FROM memory`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"memory\",\"columns\":[\"time\",\"top\",\"host\"],\"values\":[[\"2000-01-01T02:00:00Z\",2002,\"b\"],[\"2000-01-01T02:00:00Z\",1002,\"a\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"bottom - memory - host tag with limit 2\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT BOTTOM(value, host, 2) FROM memory`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"memory\",\"columns\":[\"time\",\"bottom\",\"host\"],\"values\":[[\"2000-01-01T00:00:00Z\",1000,\"a\"],[\"2000-01-01T00:00:00Z\",1500,\"b\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"top - memory - host tag with limit 2, service tag in select\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT TOP(value, host, 2), service FROM memory`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"memory\",\"columns\":[\"time\",\"top\",\"host\",\"service\"],\"values\":[[\"2000-01-01T02:00:00Z\",2002,\"b\",\"mysql\"],[\"2000-01-01T02:00:00Z\",1002,\"a\",\"redis\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"bottom - memory - host tag with limit 2, service tag in select\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT BOTTOM(value, host, 2), service FROM memory`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"memory\",\"columns\":[\"time\",\"bottom\",\"host\",\"service\"],\"values\":[[\"2000-01-01T00:00:00Z\",1000,\"a\",\"redis\"],[\"2000-01-01T00:00:00Z\",1500,\"b\",\"redis\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"top - memory - service tag with limit 2, host tag in select\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT TOP(value, service, 2), host FROM memory`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"memory\",\"columns\":[\"time\",\"top\",\"service\",\"host\"],\"values\":[[\"2000-01-01T02:00:00Z\",2002,\"mysql\",\"b\"],[\"2000-01-01T02:00:00Z\",1502,\"redis\",\"b\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"bottom - memory - service tag with limit 2, host tag in select\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT BOTTOM(value, service, 2), host FROM memory`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"memory\",\"columns\":[\"time\",\"bottom\",\"service\",\"host\"],\"values\":[[\"2000-01-01T00:00:00Z\",1000,\"redis\",\"a\"],[\"2000-01-01T00:00:00Z\",2000,\"mysql\",\"b\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"top - memory - host and service tag with limit 2\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT TOP(value, host, service, 2) FROM memory`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"memory\",\"columns\":[\"time\",\"top\",\"host\",\"service\"],\"values\":[[\"2000-01-01T02:00:00Z\",2002,\"b\",\"mysql\"],[\"2000-01-01T02:00:00Z\",1502,\"b\",\"redis\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"bottom - memory - host and service tag with limit 2\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT BOTTOM(value, host, service, 2) FROM memory`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"memory\",\"columns\":[\"time\",\"bottom\",\"host\",\"service\"],\"values\":[[\"2000-01-01T00:00:00Z\",1000,\"a\",\"redis\"],[\"2000-01-01T00:00:00Z\",1500,\"b\",\"redis\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"top - memory - host tag with limit 2 with service tag in select\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT TOP(value, host, 2), service FROM memory`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"memory\",\"columns\":[\"time\",\"top\",\"host\",\"service\"],\"values\":[[\"2000-01-01T02:00:00Z\",2002,\"b\",\"mysql\"],[\"2000-01-01T02:00:00Z\",1002,\"a\",\"redis\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"bottom - memory - host tag with limit 2 with service tag in select\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT BOTTOM(value, host, 2), service FROM memory`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"memory\",\"columns\":[\"time\",\"bottom\",\"host\",\"service\"],\"values\":[[\"2000-01-01T00:00:00Z\",1000,\"a\",\"redis\"],[\"2000-01-01T00:00:00Z\",1500,\"b\",\"redis\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"top - memory - host and service tag with limit 3\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT TOP(value, host, service, 3) FROM memory`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"memory\",\"columns\":[\"time\",\"top\",\"host\",\"service\"],\"values\":[[\"2000-01-01T02:00:00Z\",2002,\"b\",\"mysql\"],[\"2000-01-01T02:00:00Z\",1502,\"b\",\"redis\"],[\"2000-01-01T02:00:00Z\",1002,\"a\",\"redis\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"bottom - memory - host and service tag with limit 3\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT BOTTOM(value, host, service, 3) FROM memory`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"memory\",\"columns\":[\"time\",\"bottom\",\"host\",\"service\"],\"values\":[[\"2000-01-01T00:00:00Z\",1000,\"a\",\"redis\"],[\"2000-01-01T00:00:00Z\",1500,\"b\",\"redis\"],[\"2000-01-01T00:00:00Z\",2000,\"b\",\"mysql\"]]}]}]}`,\n\t\t},\n\n\t\t// TODO\n\t\t// - Test that specifiying fields or tags in the function will rewrite the query to expand them to the fields\n\t\t// - Test that a field can be used in the top function\n\t\t// - Test that asking for a field will come back before a tag if they have the same name for a tag and a field\n\t\t// - Test that `select top(value, host, 2)` when there is only one value for `host` it will only bring back one value\n\t\t// - Test that `select top(value, host, 4) from foo where time > now() - 1d and time < now() group by time(1h)` and host is unique in some time buckets that it returns only the unique ones, and not always 4 values\n\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_TopBottomWriteTags(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server02 value=3.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:20Z\").UnixNano()),\n\t\t// hour 1\n\t\tfmt.Sprintf(`cpu,host=server04 value=5.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T01:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server05 value=7.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T01:00:10Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server06 value=6.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T01:00:20Z\").UnixNano()),\n\t\t// hour 2\n\t\tfmt.Sprintf(`cpu,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T02:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T02:00:10Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"top - write - with tag\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT top(value, host, 2) INTO cpu_top FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"result\",\"columns\":[\"time\",\"written\"],\"values\":[[\"1970-01-01T00:00:00Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"top - read results with tags\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM cpu_top GROUP BY *`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu_top\",\"tags\":{\"host\":\"server05\"},\"columns\":[\"time\",\"top\"],\"values\":[[\"2000-01-01T01:00:10Z\",7]]},{\"name\":\"cpu_top\",\"tags\":{\"host\":\"server08\"},\"columns\":[\"time\",\"top\"],\"values\":[[\"2000-01-01T02:00:10Z\",9]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"top - read results as fields\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM cpu_top`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu_top\",\"columns\":[\"time\",\"host\",\"top\"],\"values\":[[\"2000-01-01T01:00:10Z\",\"server05\",7],[\"2000-01-01T02:00:10Z\",\"server08\",9]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Test various aggregates when different series only have data for the same timestamp.\nfunc TestServer_Query_Aggregates_IdenticalTime(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`series,host=a value=1 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`series,host=b value=2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`series,host=c value=3 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`series,host=d value=4 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`series,host=e value=5 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`series,host=f value=5 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`series,host=g value=5 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`series,host=h value=5 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`series,host=i value=5 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"last from multiple series with identical timestamp\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT last(value) FROM \"series\"`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"series\",\"columns\":[\"time\",\"last\"],\"values\":[[\"2000-01-01T00:00:00Z\",5]]}]}]}`,\n\t\t\trepeat:  100,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"first from multiple series with identical timestamp\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT first(value) FROM \"series\"`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"series\",\"columns\":[\"time\",\"first\"],\"values\":[[\"2000-01-01T00:00:00Z\",5]]}]}]}`,\n\t\t\trepeat:  100,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tfor n := 0; n <= query.repeat; n++ {\n\t\t\tif err := query.Execute(s); err != nil {\n\t\t\t\tt.Error(query.Error(err))\n\t\t\t} else if !query.success() {\n\t\t\t\tt.Error(query.failureMessage())\n\t\t\t}\n\t\t}\n\t}\n}\n\n// This will test that when using a group by, that it observes the time you asked for\n// but will only put the values in the bucket that match the time range\nfunc TestServer_Query_GroupByTimeCutoffs(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu value=1i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu value=2i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:01Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu value=3i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:05Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu value=4i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:08Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu value=5i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:09Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu value=6i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t}\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"sum all time\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT SUM(value) FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"sum\"],\"values\":[[\"1970-01-01T00:00:00Z\",21]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"sum all time grouped by time 5s\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"sum\"],\"values\":[[\"2000-01-01T00:00:00Z\",3],[\"2000-01-01T00:00:05Z\",12],[\"2000-01-01T00:00:10Z\",6]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"sum all time grouped by time 5s missing first point\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:01Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"sum\"],\"values\":[[\"2000-01-01T00:00:00Z\",2],[\"2000-01-01T00:00:05Z\",12],[\"2000-01-01T00:00:10Z\",6]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"sum all time grouped by time 5s missing first points (null for bucket)\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:02Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"sum\"],\"values\":[[\"2000-01-01T00:00:00Z\",null],[\"2000-01-01T00:00:05Z\",12],[\"2000-01-01T00:00:10Z\",6]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"sum all time grouped by time 5s missing last point - 2 time intervals\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:09Z' group by time(5s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"sum\"],\"values\":[[\"2000-01-01T00:00:00Z\",3],[\"2000-01-01T00:00:05Z\",12]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"sum all time grouped by time 5s missing last 2 points - 2 time intervals\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:08Z' group by time(5s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"sum\"],\"values\":[[\"2000-01-01T00:00:00Z\",3],[\"2000-01-01T00:00:05Z\",7]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_MapType(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu value=2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`gpu speed=25 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t}\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"query value with a single measurement\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT value FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"query wildcard with a single measurement\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"query value with multiple measurements\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT value FROM cpu, gpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"query wildcard with multiple measurements\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM cpu, gpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"speed\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",null,2]]},{\"name\":\"gpu\",\"columns\":[\"time\",\"speed\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",25,null]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"query value with a regex measurement\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT value FROM /[cg]pu/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"query wildcard with a regex measurement\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM /[cg]pu/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"speed\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",null,2]]},{\"name\":\"gpu\",\"columns\":[\"time\",\"speed\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",25,null]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Subqueries(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu,host=server01 usage_user=70i,usage_system=30i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01 usage_user=45i,usage_system=55i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01 usage_user=23i,usage_system=77i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:20Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server02 usage_user=11i,usage_system=89i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server02 usage_user=28i,usage_system=72i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server02 usage_user=12i,usage_system=53i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:20Z\").UnixNano()),\n\t}\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT mean FROM (SELECT mean(usage_user) FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"2000-01-01T00:00:00Z\",31.5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT value FROM (SELECT mean(usage_user) AS value FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",31.5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT mean(usage) FROM (SELECT 100 - usage_user AS usage FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"2000-01-01T00:00:00Z\",68.5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT host FROM (SELECT min(usage_user), host FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"host\"],\"values\":[[\"2000-01-01T00:00:00Z\",\"server02\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT host FROM (SELECT min(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"host\"],\"values\":[[\"2000-01-01T00:00:00Z\",\"server02\"],[\"2000-01-01T00:00:20Z\",\"server01\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT host FROM (SELECT min(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' GROUP BY host`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"host\"],\"values\":[[\"2000-01-01T00:00:20Z\",\"server01\"]]},{\"name\":\"cpu\",\"tags\":{\"host\":\"server02\"},\"columns\":[\"time\",\"host\"],\"values\":[[\"2000-01-01T00:00:00Z\",\"server02\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT mean(min) FROM (SELECT min(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"2000-01-01T00:00:00Z\",17]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT mean(min) FROM (SELECT (min(usage_user)) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"2000-01-01T00:00:00Z\",17]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT max(min), host FROM (SELECT min(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"max\",\"host\"],\"values\":[[\"2000-01-01T00:00:20Z\",23,\"server01\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT mean, host FROM (SELECT mean(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"mean\",\"host\"],\"values\":[[\"2000-01-01T00:00:00Z\",46,\"server01\"],[\"2000-01-01T00:00:00Z\",17,\"server02\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT host FROM (SELECT mean(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"host\"],\"values\":[[\"2000-01-01T00:00:00Z\",\"server01\"],[\"2000-01-01T00:00:00Z\",\"server02\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT max(usage_system) FROM (SELECT min(usage_user), usage_system FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"max\"],\"values\":[[\"2000-01-01T00:00:00Z\",89]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT min(top), host FROM (SELECT top(usage_user, host, 2) FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"min\",\"host\"],\"values\":[[\"2000-01-01T00:00:10Z\",28,\"server02\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT min(top), host FROM (SELECT top(usage_user, 2), host FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"min\",\"host\"],\"values\":[[\"2000-01-01T00:00:10Z\",45,\"server01\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT count(host) FROM (SELECT top(usage_user, host, 2) FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"count\"],\"values\":[[\"2000-01-01T00:00:00Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sum(derivative) FROM (SELECT derivative(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"sum\"],\"values\":[[\"2000-01-01T00:00:00Z\",-4.6]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT min(max) FROM (SELECT 100 - max(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"min\"],\"values\":[[\"2000-01-01T00:00:00Z\",30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT min(usage_system) FROM (SELECT max(usage_user), 100 - usage_system FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"min\"],\"values\":[[\"2000-01-01T00:00:10Z\",28]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT min(value) FROM (SELECT max(usage_user), usage_user - usage_system AS value FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"min\"],\"values\":[[\"2000-01-01T00:00:10Z\",-44]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT min(value) FROM (SELECT top(usage_user, 2), usage_user - usage_system AS value FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' GROUP BY host`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"min\"],\"values\":[[\"2000-01-01T00:00:10Z\",-10]]},{\"name\":\"cpu\",\"tags\":{\"host\":\"server02\"},\"columns\":[\"time\",\"min\"],\"values\":[[\"2000-01-01T00:00:10Z\",-44]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT min(value) FROM (SELECT max(usage_user), usage_user - usage_system AS value FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' AND host = 'server01'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"min\"],\"values\":[[\"2000-01-01T00:00:00Z\",40]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT value FROM (SELECT max(usage_user), usage_user - usage_system AS value FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' AND value > 0`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",40]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT max FROM (SELECT max(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' AND host = 'server01'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"max\"],\"values\":[[\"2000-01-01T00:00:00Z\",70]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT mean(value) FROM (SELECT max(usage_user), usage_user - usage_system AS value FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' AND value > 0`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"2000-01-01T00:00:00Z\",40]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT mean(value) FROM (SELECT max(usage_user), usage_user - usage_system AS value FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' AND host =~ /server/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"2000-01-01T00:00:00Z\",-2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT top(usage_system, host, 2) FROM (SELECT min(usage_user), usage_system FROM cpu GROUP BY time(20s), host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"top\",\"host\"],\"values\":[[\"2000-01-01T00:00:00Z\",89,\"server02\"],[\"2000-01-01T00:00:20Z\",77,\"server01\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT bottom(usage_system, host, 2) FROM (SELECT max(usage_user), usage_system FROM cpu GROUP BY time(20s), host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"bottom\",\"host\"],\"values\":[[\"2000-01-01T00:00:00Z\",30,\"server01\"],[\"2000-01-01T00:00:20Z\",53,\"server02\"]]}]}]}`,\n\t\t},\n\t}...)\n}\n\nfunc TestServer_Query_SubqueryWithGroupBy(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu,host=server01,region=uswest value=1i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01,region=uswest value=2i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:01Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01,region=uswest value=3i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:02Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01,region=uswest value=4i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:03Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server02,region=uswest value=5i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server02,region=uswest value=6i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:01Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server02,region=uswest value=7i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:02Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server02,region=uswest value=8i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:03Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01,region=useast value=9i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01,region=useast value=10i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:01Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01,region=useast value=11i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:02Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01,region=useast value=12i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:03Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server02,region=useast value=13i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server02,region=useast value=14i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:01Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server02,region=useast value=15i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:02Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server02,region=useast value=16i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:03Z\").UnixNano()),\n\t}\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"group by time(2s) - time(2s), host\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT mean(mean) FROM (SELECT mean(value) FROM cpu GROUP BY time(2s), host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:04Z' GROUP BY time(2s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"2000-01-01T00:00:00Z\",7.5],[\"2000-01-01T00:00:02Z\",9.5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"group by time(4s), host - time(2s), host\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT mean(mean) FROM (SELECT mean(value) FROM cpu GROUP BY time(2s), host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:04Z' GROUP BY time(4s), host`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"mean\"],\"values\":[[\"2000-01-01T00:00:00Z\",6.5]]},{\"name\":\"cpu\",\"tags\":{\"host\":\"server02\"},\"columns\":[\"time\",\"mean\"],\"values\":[[\"2000-01-01T00:00:00Z\",10.5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"group by time(2s), host - time(2s), host, region\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT mean(mean) FROM (SELECT mean(value) FROM cpu GROUP BY time(2s), host, region) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:04Z' GROUP BY time(2s), host`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"mean\"],\"values\":[[\"2000-01-01T00:00:00Z\",5.5],[\"2000-01-01T00:00:02Z\",7.5]]},{\"name\":\"cpu\",\"tags\":{\"host\":\"server02\"},\"columns\":[\"time\",\"mean\"],\"values\":[[\"2000-01-01T00:00:00Z\",9.5],[\"2000-01-01T00:00:02Z\",11.5]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_SubqueryMath(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(\"m0 f2=4,f3=2 %d\", mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(\"m0 f1=5,f3=8 %d\", mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\tfmt.Sprintf(\"m0 f1=5,f2=3,f3=6 %d\", mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:20Z\").UnixNano()),\n\t}\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"SumThreeValues\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sum FROM (SELECT f1 + f2 + f3 AS sum FROM m0)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"m0\",\"columns\":[\"time\",\"sum\"],\"values\":[[\"2000-01-01T00:00:00Z\",null],[\"2000-01-01T00:00:10Z\",null],[\"2000-01-01T00:00:20Z\",14]]}]}]}`,\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tt.Run(query.name, func(t *testing.T) {\n\t\t\tif query.skip {\n\t\t\t\tt.Skipf(\"SKIP:: %s\", query.name)\n\t\t\t}\n\t\t\tif err := query.Execute(s); err != nil {\n\t\t\t\tt.Error(query.Error(err))\n\t\t\t} else if !query.success() {\n\t\t\t\tt.Error(query.failureMessage())\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestServer_Query_PercentileDerivative(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`counter value=12 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`counter value=34 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\tfmt.Sprintf(`counter value=78 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:20Z\").UnixNano()),\n\t\tfmt.Sprintf(`counter value=89 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:30Z\").UnixNano()),\n\t\tfmt.Sprintf(`counter value=101 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:40Z\").UnixNano()),\n\t}\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"nth percentile of derivative\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT percentile(derivative, 95) FROM (SELECT derivative(value, 1s) FROM counter) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:50Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"counter\",\"columns\":[\"time\",\"percentile\"],\"values\":[[\"2000-01-01T00:00:20Z\",4.4]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_UnderscoreMeasurement(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`_cpu value=1i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t}\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"select underscore with underscore prefix\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM _cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"_cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",1]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Write_Precision(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []struct {\n\t\twrite  string\n\t\tparams url.Values\n\t}{\n\t\t{\n\t\t\twrite: fmt.Sprintf(\"cpu_n0_precision value=1 %d\", mustParseTime(time.RFC3339Nano, \"2000-01-01T12:34:56.789012345Z\").UnixNano()),\n\t\t},\n\t\t{\n\t\t\twrite:  fmt.Sprintf(\"cpu_n1_precision value=1.1 %d\", mustParseTime(time.RFC3339Nano, \"2000-01-01T12:34:56.789012345Z\").UnixNano()),\n\t\t\tparams: url.Values{\"precision\": []string{\"n\"}},\n\t\t},\n\t\t{\n\t\t\twrite:  fmt.Sprintf(\"cpu_u_precision value=100 %d\", mustParseTime(time.RFC3339Nano, \"2000-01-01T12:34:56.789012345Z\").Truncate(time.Microsecond).UnixNano()/int64(time.Microsecond)),\n\t\t\tparams: url.Values{\"precision\": []string{\"u\"}},\n\t\t},\n\t\t{\n\t\t\twrite:  fmt.Sprintf(\"cpu_ms_precision value=200 %d\", mustParseTime(time.RFC3339Nano, \"2000-01-01T12:34:56.789012345Z\").Truncate(time.Millisecond).UnixNano()/int64(time.Millisecond)),\n\t\t\tparams: url.Values{\"precision\": []string{\"ms\"}},\n\t\t},\n\t\t{\n\t\t\twrite:  fmt.Sprintf(\"cpu_s_precision value=300 %d\", mustParseTime(time.RFC3339Nano, \"2000-01-01T12:34:56.789012345Z\").Truncate(time.Second).UnixNano()/int64(time.Second)),\n\t\t\tparams: url.Values{\"precision\": []string{\"s\"}},\n\t\t},\n\t\t{\n\t\t\twrite:  fmt.Sprintf(\"cpu_m_precision value=400 %d\", mustParseTime(time.RFC3339Nano, \"2000-01-01T12:34:56.789012345Z\").Truncate(time.Minute).UnixNano()/int64(time.Minute)),\n\t\t\tparams: url.Values{\"precision\": []string{\"m\"}},\n\t\t},\n\t\t{\n\t\t\twrite:  fmt.Sprintf(\"cpu_h_precision value=500 %d\", mustParseTime(time.RFC3339Nano, \"2000-01-01T12:34:56.789012345Z\").Truncate(time.Hour).UnixNano()/int64(time.Hour)),\n\t\t\tparams: url.Values{\"precision\": []string{\"h\"}},\n\t\t},\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"point with nanosecond precision time - no precision specified on write\",\n\t\t\tcommand: `SELECT * FROM cpu_n0_precision`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu_n0_precision\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T12:34:56.789012345Z\",1]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"point with nanosecond precision time\",\n\t\t\tcommand: `SELECT * FROM cpu_n1_precision`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu_n1_precision\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T12:34:56.789012345Z\",1.1]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"point with microsecond precision time\",\n\t\t\tcommand: `SELECT * FROM cpu_u_precision`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu_u_precision\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T12:34:56.789012Z\",100]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"point with millisecond precision time\",\n\t\t\tcommand: `SELECT * FROM cpu_ms_precision`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu_ms_precision\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T12:34:56.789Z\",200]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"point with second precision time\",\n\t\t\tcommand: `SELECT * FROM cpu_s_precision`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu_s_precision\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T12:34:56Z\",300]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"point with minute precision time\",\n\t\t\tcommand: `SELECT * FROM cpu_m_precision`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu_m_precision\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T12:34:00Z\",400]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"point with hour precision time\",\n\t\t\tcommand: `SELECT * FROM cpu_h_precision`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu_h_precision\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T12:00:00Z\",500]]}]}]}`,\n\t\t},\n\t}...)\n\n\t// we are doing writes that require parameter changes, so we are fighting the test harness a little to make this happen properly\n\tfor _, w := range writes {\n\t\ttest.writes = Writes{\n\t\t\t&Write{data: w.write},\n\t\t}\n\t\ttest.params = w.params\n\t\ttest.initialized = false\n\t\tif err := test.init(s); err != nil {\n\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t}\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Wildcards(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`wildcard,region=us-east value=10 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`wildcard,region=us-east valx=20 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\tfmt.Sprintf(`wildcard,region=us-east value=30,valx=40 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:20Z\").UnixNano()),\n\n\t\tfmt.Sprintf(`wgroup,region=us-east value=10.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`wgroup,region=us-east value=20.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\tfmt.Sprintf(`wgroup,region=us-west value=30.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:20Z\").UnixNano()),\n\n\t\tfmt.Sprintf(`m1,region=us-east value=10.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`m2,host=server01 field=20.0 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:01Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"wildcard\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM wildcard`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"wildcard\",\"columns\":[\"time\",\"region\",\"value\",\"valx\"],\"values\":[[\"2000-01-01T00:00:00Z\",\"us-east\",10,null],[\"2000-01-01T00:00:10Z\",\"us-east\",null,20],[\"2000-01-01T00:00:20Z\",\"us-east\",30,40]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"wildcard with group by\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM wildcard GROUP BY *`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"wildcard\",\"tags\":{\"region\":\"us-east\"},\"columns\":[\"time\",\"value\",\"valx\"],\"values\":[[\"2000-01-01T00:00:00Z\",10,null],[\"2000-01-01T00:00:10Z\",null,20],[\"2000-01-01T00:00:20Z\",30,40]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"GROUP BY queries\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT mean(value) FROM wgroup GROUP BY *`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"wgroup\",\"tags\":{\"region\":\"us-east\"},\"columns\":[\"time\",\"mean\"],\"values\":[[\"1970-01-01T00:00:00Z\",15]]},{\"name\":\"wgroup\",\"tags\":{\"region\":\"us-west\"},\"columns\":[\"time\",\"mean\"],\"values\":[[\"1970-01-01T00:00:00Z\",30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"GROUP BY queries with time\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT mean(value) FROM wgroup WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:01:00Z' GROUP BY *,TIME(1m)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"wgroup\",\"tags\":{\"region\":\"us-east\"},\"columns\":[\"time\",\"mean\"],\"values\":[[\"2000-01-01T00:00:00Z\",15]]},{\"name\":\"wgroup\",\"tags\":{\"region\":\"us-west\"},\"columns\":[\"time\",\"mean\"],\"values\":[[\"2000-01-01T00:00:00Z\",30]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"wildcard and field in select\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT value, * FROM wildcard`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"wildcard\",\"columns\":[\"time\",\"value\",\"region\",\"value_1\",\"valx\"],\"values\":[[\"2000-01-01T00:00:00Z\",10,\"us-east\",10,null],[\"2000-01-01T00:00:10Z\",null,\"us-east\",null,20],[\"2000-01-01T00:00:20Z\",30,\"us-east\",30,40]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"field and wildcard in select\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT value, * FROM wildcard`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"wildcard\",\"columns\":[\"time\",\"value\",\"region\",\"value_1\",\"valx\"],\"values\":[[\"2000-01-01T00:00:00Z\",10,\"us-east\",10,null],[\"2000-01-01T00:00:10Z\",null,\"us-east\",null,20],[\"2000-01-01T00:00:20Z\",30,\"us-east\",30,40]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"field and wildcard in group by\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM wildcard GROUP BY region, *`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"wildcard\",\"tags\":{\"region\":\"us-east\"},\"columns\":[\"time\",\"value\",\"valx\"],\"values\":[[\"2000-01-01T00:00:00Z\",10,null],[\"2000-01-01T00:00:10Z\",null,20],[\"2000-01-01T00:00:20Z\",30,40]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"wildcard and field in group by\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM wildcard GROUP BY *, region`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"wildcard\",\"tags\":{\"region\":\"us-east\"},\"columns\":[\"time\",\"value\",\"valx\"],\"values\":[[\"2000-01-01T00:00:00Z\",10,null],[\"2000-01-01T00:00:10Z\",null,20],[\"2000-01-01T00:00:20Z\",30,40]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"wildcard with multiple measurements\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM m1, m2`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"m1\",\"columns\":[\"time\",\"field\",\"host\",\"region\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",null,null,\"us-east\",10]]},{\"name\":\"m2\",\"columns\":[\"time\",\"field\",\"host\",\"region\",\"value\"],\"values\":[[\"2000-01-01T00:00:01Z\",20,\"server01\",null,null]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"wildcard with multiple measurements via regex\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM /^m.*/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"m1\",\"columns\":[\"time\",\"field\",\"host\",\"region\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",null,null,\"us-east\",10]]},{\"name\":\"m2\",\"columns\":[\"time\",\"field\",\"host\",\"region\",\"value\"],\"values\":[[\"2000-01-01T00:00:01Z\",20,\"server01\",null,null]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"wildcard with multiple measurements via regex and limit\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM db0../^m.*/ LIMIT 2`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"m1\",\"columns\":[\"time\",\"field\",\"host\",\"region\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",null,null,\"us-east\",10]]},{\"name\":\"m2\",\"columns\":[\"time\",\"field\",\"host\",\"region\",\"value\"],\"values\":[[\"2000-01-01T00:00:01Z\",20,\"server01\",null,null]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_WildcardExpansion(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`wildcard,region=us-east,host=A value=10,cpu=80 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`wildcard,region=us-east,host=B value=20,cpu=90 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\tfmt.Sprintf(`wildcard,region=us-west,host=B value=30,cpu=70 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:20Z\").UnixNano()),\n\t\tfmt.Sprintf(`wildcard,region=us-east,host=A value=40,cpu=60 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:30Z\").UnixNano()),\n\n\t\tfmt.Sprintf(`dupnames,region=us-east,day=1 value=10,day=3i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`dupnames,region=us-east,day=2 value=20,day=2i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\tfmt.Sprintf(`dupnames,region=us-west,day=3 value=30,day=1i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:20Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"wildcard\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM wildcard`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"wildcard\",\"columns\":[\"time\",\"cpu\",\"host\",\"region\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",80,\"A\",\"us-east\",10],[\"2000-01-01T00:00:10Z\",90,\"B\",\"us-east\",20],[\"2000-01-01T00:00:20Z\",70,\"B\",\"us-west\",30],[\"2000-01-01T00:00:30Z\",60,\"A\",\"us-east\",40]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"no wildcard in select\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT cpu, host, region, value  FROM wildcard`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"wildcard\",\"columns\":[\"time\",\"cpu\",\"host\",\"region\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",80,\"A\",\"us-east\",10],[\"2000-01-01T00:00:10Z\",90,\"B\",\"us-east\",20],[\"2000-01-01T00:00:20Z\",70,\"B\",\"us-west\",30],[\"2000-01-01T00:00:30Z\",60,\"A\",\"us-east\",40]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"no wildcard in select, preserve column order\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT host, cpu, region, value  FROM wildcard`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"wildcard\",\"columns\":[\"time\",\"host\",\"cpu\",\"region\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",\"A\",80,\"us-east\",10],[\"2000-01-01T00:00:10Z\",\"B\",90,\"us-east\",20],[\"2000-01-01T00:00:20Z\",\"B\",70,\"us-west\",30],[\"2000-01-01T00:00:30Z\",\"A\",60,\"us-east\",40]]}]}]}`,\n\t\t},\n\n\t\t&Query{\n\t\t\tname:    \"no wildcard with alias\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT cpu as c, host as h, region, value  FROM wildcard`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"wildcard\",\"columns\":[\"time\",\"c\",\"h\",\"region\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",80,\"A\",\"us-east\",10],[\"2000-01-01T00:00:10Z\",90,\"B\",\"us-east\",20],[\"2000-01-01T00:00:20Z\",70,\"B\",\"us-west\",30],[\"2000-01-01T00:00:30Z\",60,\"A\",\"us-east\",40]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"duplicate tag and field key\",\n\t\t\tcommand: `SELECT * FROM dupnames`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"dupnames\",\"columns\":[\"time\",\"day\",\"day_1\",\"region\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",3,\"1\",\"us-east\",10],[\"2000-01-01T00:00:10Z\",2,\"2\",\"us-east\",20],[\"2000-01-01T00:00:20Z\",1,\"3\",\"us-west\",30]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_AcrossShardsAndFields(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu load=100 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu load=200 %d`, mustParseTime(time.RFC3339Nano, \"2010-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu core=4 %d`, mustParseTime(time.RFC3339Nano, \"2015-01-01T00:00:00Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"two results for cpu\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT load FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"load\"],\"values\":[[\"2000-01-01T00:00:00Z\",100],[\"2010-01-01T00:00:00Z\",200]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"two results for cpu, multi-select\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT core,load FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"core\",\"load\"],\"values\":[[\"2000-01-01T00:00:00Z\",null,100],[\"2010-01-01T00:00:00Z\",null,200],[\"2015-01-01T00:00:00Z\",4,null]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"two results for cpu, wildcard select\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"core\",\"load\"],\"values\":[[\"2000-01-01T00:00:00Z\",null,100],[\"2010-01-01T00:00:00Z\",null,200],[\"2015-01-01T00:00:00Z\",4,null]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"one result for core\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT core FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"core\"],\"values\":[[\"2015-01-01T00:00:00Z\",4]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"empty result set from non-existent field\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT foo FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_OrderedAcrossShards(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu value=7 %d`, mustParseTime(time.RFC3339Nano, \"2010-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu value=14 %d`, mustParseTime(time.RFC3339Nano, \"2010-01-08T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu value=28 %d`, mustParseTime(time.RFC3339Nano, \"2010-01-15T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu value=56 %d`, mustParseTime(time.RFC3339Nano, \"2010-01-22T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu value=112 %d`, mustParseTime(time.RFC3339Nano, \"2010-01-29T00:00:00Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"derivative\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT derivative(value, 24h) FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"derivative\"],\"values\":[[\"2010-01-08T00:00:00Z\",1],[\"2010-01-15T00:00:00Z\",2],[\"2010-01-22T00:00:00Z\",4],[\"2010-01-29T00:00:00Z\",8]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"non_negative_derivative\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT non_negative_derivative(value, 24h) FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"non_negative_derivative\"],\"values\":[[\"2010-01-08T00:00:00Z\",1],[\"2010-01-15T00:00:00Z\",2],[\"2010-01-22T00:00:00Z\",4],[\"2010-01-29T00:00:00Z\",8]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"difference\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT difference(value) FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"difference\"],\"values\":[[\"2010-01-08T00:00:00Z\",7],[\"2010-01-15T00:00:00Z\",14],[\"2010-01-22T00:00:00Z\",28],[\"2010-01-29T00:00:00Z\",56]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"cumulative_sum\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT cumulative_sum(value) FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"cumulative_sum\"],\"values\":[[\"2010-01-01T00:00:00Z\",7],[\"2010-01-08T00:00:00Z\",21],[\"2010-01-15T00:00:00Z\",49],[\"2010-01-22T00:00:00Z\",105],[\"2010-01-29T00:00:00Z\",217]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Where_Fields(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu alert_id=\"alert\",tenant_id=\"tenant\",_cust=\"johnson brothers\" %d`, mustParseTime(time.RFC3339Nano, \"2015-02-28T01:03:36.703820946Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu alert_id=\"alert\",tenant_id=\"tenant\",_cust=\"johnson brothers\" %d`, mustParseTime(time.RFC3339Nano, \"2015-02-28T01:03:36.703820946Z\").UnixNano()),\n\n\t\tfmt.Sprintf(`cpu load=100.0,core=4 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:02Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu load=80.0,core=2 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:01:02Z\").UnixNano()),\n\n\t\tfmt.Sprintf(`clicks local=true %d`, mustParseTime(time.RFC3339Nano, \"2014-11-10T23:00:01Z\").UnixNano()),\n\t\tfmt.Sprintf(`clicks local=false %d`, mustParseTime(time.RFC3339Nano, \"2014-11-10T23:00:02Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t// non type specific\n\t\t&Query{\n\t\t\tname:    \"missing measurement with group by\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT load from missing group by *`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\n\t\t// string\n\t\t&Query{\n\t\t\tname:    \"single string field\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT alert_id FROM cpu WHERE alert_id='alert'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"alert_id\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",\"alert\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"string AND query, all fields in SELECT\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT alert_id,tenant_id,_cust FROM cpu WHERE alert_id='alert' AND tenant_id='tenant'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"alert_id\",\"tenant_id\",\"_cust\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",\"alert\",\"tenant\",\"johnson brothers\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"string AND query, all fields in SELECT, one in parenthesis\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT alert_id,tenant_id FROM cpu WHERE alert_id='alert' AND (tenant_id='tenant')`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"alert_id\",\"tenant_id\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",\"alert\",\"tenant\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"string underscored field\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT alert_id FROM cpu WHERE _cust='johnson brothers'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"alert_id\"],\"values\":[[\"2015-02-28T01:03:36.703820946Z\",\"alert\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"string no match\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT alert_id FROM cpu WHERE _cust='acme'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\n\t\t// float64\n\t\t&Query{\n\t\t\tname:    \"float64 GT no match\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select load from cpu where load > 100`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"float64 GTE match one\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select load from cpu where load >= 100`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"load\"],\"values\":[[\"2009-11-10T23:00:02Z\",100]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"float64 EQ match upper bound\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select load from cpu where load = 100`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"load\"],\"values\":[[\"2009-11-10T23:00:02Z\",100]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"float64 LTE match two\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select load from cpu where load <= 100`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"load\"],\"values\":[[\"2009-11-10T23:00:02Z\",100],[\"2009-11-10T23:01:02Z\",80]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"float64 GT match one\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select load from cpu where load > 99`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"load\"],\"values\":[[\"2009-11-10T23:00:02Z\",100]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"float64 EQ no match\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select load from cpu where load = 99`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"float64 LT match one\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select load from cpu where load < 99`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"load\"],\"values\":[[\"2009-11-10T23:01:02Z\",80]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"float64 LT no match\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select load from cpu where load < 80`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"float64 NE match one\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select load from cpu where load != 100`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"load\"],\"values\":[[\"2009-11-10T23:01:02Z\",80]]}]}]}`,\n\t\t},\n\n\t\t// int64\n\t\t&Query{\n\t\t\tname:    \"int64 GT no match\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select core from cpu where core > 4`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"int64 GTE match one\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select core from cpu where core >= 4`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"core\"],\"values\":[[\"2009-11-10T23:00:02Z\",4]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"int64 EQ match upper bound\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select core from cpu where core = 4`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"core\"],\"values\":[[\"2009-11-10T23:00:02Z\",4]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"int64 LTE match two \",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select core from cpu where core <= 4`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"core\"],\"values\":[[\"2009-11-10T23:00:02Z\",4],[\"2009-11-10T23:01:02Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"int64 GT match one\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select core from cpu where core > 3`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"core\"],\"values\":[[\"2009-11-10T23:00:02Z\",4]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"int64 EQ no match\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select core from cpu where core = 3`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"int64 LT match one\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select core from cpu where core < 3`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"core\"],\"values\":[[\"2009-11-10T23:01:02Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"int64 LT no match\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select core from cpu where core < 2`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"int64 NE match one\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select core from cpu where core != 4`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"core\"],\"values\":[[\"2009-11-10T23:01:02Z\",2]]}]}]}`,\n\t\t},\n\n\t\t// bool\n\t\t&Query{\n\t\t\tname:    \"bool EQ match true\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select local from clicks where local = true`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"clicks\",\"columns\":[\"time\",\"local\"],\"values\":[[\"2014-11-10T23:00:01Z\",true]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"bool EQ match false\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select local from clicks where local = false`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"clicks\",\"columns\":[\"time\",\"local\"],\"values\":[[\"2014-11-10T23:00:02Z\",false]]}]}]}`,\n\t\t},\n\n\t\t&Query{\n\t\t\tname:    \"bool NE match one\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select local from clicks where local != true`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"clicks\",\"columns\":[\"time\",\"local\"],\"values\":[[\"2014-11-10T23:00:02Z\",false]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Where_With_Tags(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`where_events,tennant=paul foo=\"bar\" %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:02Z\").UnixNano()),\n\t\tfmt.Sprintf(`where_events,tennant=paul foo=\"baz\" %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:03Z\").UnixNano()),\n\t\tfmt.Sprintf(`where_events,tennant=paul foo=\"bat\" %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:04Z\").UnixNano()),\n\t\tfmt.Sprintf(`where_events,tennant=todd foo=\"bar\" %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:05Z\").UnixNano()),\n\t\tfmt.Sprintf(`where_events,tennant=david foo=\"bap\" %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:06Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"tag field and time\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select foo from where_events where (tennant = 'paul' OR tennant = 'david') AND time > 1s AND (foo = 'bar' OR foo = 'baz' OR foo = 'bap')`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"where_events\",\"columns\":[\"time\",\"foo\"],\"values\":[[\"2009-11-10T23:00:02Z\",\"bar\"],[\"2009-11-10T23:00:03Z\",\"baz\"],[\"2009-11-10T23:00:06Z\",\"bap\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"tag or field\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select foo from where_events where tennant = 'paul' OR foo = 'bar'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"where_events\",\"columns\":[\"time\",\"foo\"],\"values\":[[\"2009-11-10T23:00:02Z\",\"bar\"],[\"2009-11-10T23:00:03Z\",\"baz\"],[\"2009-11-10T23:00:04Z\",\"bat\"],[\"2009-11-10T23:00:05Z\",\"bar\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"non-existant tag and field\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select foo from where_events where tenant != 'paul' AND foo = 'bar'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"where_events\",\"columns\":[\"time\",\"foo\"],\"values\":[[\"2009-11-10T23:00:02Z\",\"bar\"],[\"2009-11-10T23:00:05Z\",\"bar\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"non-existant tag or field\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select foo from where_events where tenant != 'paul' OR foo = 'bar'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"where_events\",\"columns\":[\"time\",\"foo\"],\"values\":[[\"2009-11-10T23:00:02Z\",\"bar\"],[\"2009-11-10T23:00:03Z\",\"baz\"],[\"2009-11-10T23:00:04Z\",\"bat\"],[\"2009-11-10T23:00:05Z\",\"bar\"],[\"2009-11-10T23:00:06Z\",\"bap\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"where on tag that should be double quoted but isn't\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `show series where data-center = 'foo'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"invalid tag comparison operator\"}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"where comparing tag and field\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select foo from where_events where tennant != foo`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"where_events\",\"columns\":[\"time\",\"foo\"],\"values\":[[\"2009-11-10T23:00:02Z\",\"bar\"],[\"2009-11-10T23:00:03Z\",\"baz\"],[\"2009-11-10T23:00:04Z\",\"bat\"],[\"2009-11-10T23:00:05Z\",\"bar\"],[\"2009-11-10T23:00:06Z\",\"bap\"]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"where comparing tag and tag\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select foo from where_events where tennant = tennant`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"where_events\",\"columns\":[\"time\",\"foo\"],\"values\":[[\"2009-11-10T23:00:02Z\",\"bar\"],[\"2009-11-10T23:00:03Z\",\"baz\"],[\"2009-11-10T23:00:04Z\",\"bat\"],[\"2009-11-10T23:00:05Z\",\"bar\"],[\"2009-11-10T23:00:06Z\",\"bap\"]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_With_EmptyTags(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu value=1 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:02Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01 value=2 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:03Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"where empty tag\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select value from cpu where host = ''`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2009-11-10T23:00:02Z\",1]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"where not empty tag\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select value from cpu where host != ''`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2009-11-10T23:00:03Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"where regex none\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select value from cpu where host !~ /.*/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"where regex exact\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select value from cpu where host =~ /^server01$/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2009-11-10T23:00:03Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"where regex exact (case insensitive)\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select value from cpu where host =~ /(?i)^SeRvEr01$/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2009-11-10T23:00:03Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"where regex exact (not)\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select value from cpu where host !~ /^server01$/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2009-11-10T23:00:02Z\",1]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"where regex at least one char\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select value from cpu where host =~ /.+/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2009-11-10T23:00:03Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"where regex not at least one char\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select value from cpu where host !~ /.+/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2009-11-10T23:00:02Z\",1]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"group by empty tag\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select value from cpu group by host`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"2009-11-10T23:00:02Z\",1]]},{\"name\":\"cpu\",\"tags\":{\"host\":\"server01\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"2009-11-10T23:00:03Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"group by missing tag\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select value from cpu group by region`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"region\":\"\"},\"columns\":[\"time\",\"value\"],\"values\":[[\"2009-11-10T23:00:02Z\",1],[\"2009-11-10T23:00:03Z\",2]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_LimitAndOffset(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`limited,tennant=paul foo=2 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:02Z\").UnixNano()),\n\t\tfmt.Sprintf(`limited,tennant=paul foo=3 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:03Z\").UnixNano()),\n\t\tfmt.Sprintf(`limited,tennant=paul foo=4 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:04Z\").UnixNano()),\n\t\tfmt.Sprintf(`limited,tennant=todd foo=5 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:05Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"limit on points\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select foo from \"limited\" LIMIT 2`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"limited\",\"columns\":[\"time\",\"foo\"],\"values\":[[\"2009-11-10T23:00:02Z\",2],[\"2009-11-10T23:00:03Z\",3]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"limit higher than the number of data points\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select foo from \"limited\" LIMIT 20`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"limited\",\"columns\":[\"time\",\"foo\"],\"values\":[[\"2009-11-10T23:00:02Z\",2],[\"2009-11-10T23:00:03Z\",3],[\"2009-11-10T23:00:04Z\",4],[\"2009-11-10T23:00:05Z\",5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"limit and offset\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select foo from \"limited\" LIMIT 2 OFFSET 1`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"limited\",\"columns\":[\"time\",\"foo\"],\"values\":[[\"2009-11-10T23:00:03Z\",3],[\"2009-11-10T23:00:04Z\",4]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"limit + offset equal to total number of points\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select foo from \"limited\" LIMIT 3 OFFSET 3`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"limited\",\"columns\":[\"time\",\"foo\"],\"values\":[[\"2009-11-10T23:00:05Z\",5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"limit - offset higher than number of points\",\n\t\t\tcommand: `select foo from \"limited\" LIMIT 2 OFFSET 20`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"limit on points with group by time\",\n\t\t\tcommand: `select mean(foo) from \"limited\" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"limited\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"2009-11-10T23:00:02Z\",2],[\"2009-11-10T23:00:03Z\",3]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"limit higher than the number of data points with group by time\",\n\t\t\tcommand: `select mean(foo) from \"limited\" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 20`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"limited\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"2009-11-10T23:00:02Z\",2],[\"2009-11-10T23:00:03Z\",3],[\"2009-11-10T23:00:04Z\",4],[\"2009-11-10T23:00:05Z\",5]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"limit and offset with group by time\",\n\t\t\tcommand: `select mean(foo) from \"limited\" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2 OFFSET 1`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"limited\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"2009-11-10T23:00:03Z\",3],[\"2009-11-10T23:00:04Z\",4]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"limit + offset equal to the number of points with group by time\",\n\t\t\tcommand: `select mean(foo) from \"limited\" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 3 OFFSET 3`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"limited\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"2009-11-10T23:00:05Z\",5]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"limit - offset higher than number of points with group by time\",\n\t\t\tcommand: `select mean(foo) from \"limited\" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2 OFFSET 20`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"limit - group by tennant\",\n\t\t\tcommand: `select foo from \"limited\" group by tennant limit 1`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"limited\",\"tags\":{\"tennant\":\"paul\"},\"columns\":[\"time\",\"foo\"],\"values\":[[\"2009-11-10T23:00:02Z\",2]]},{\"name\":\"limited\",\"tags\":{\"tennant\":\"todd\"},\"columns\":[\"time\",\"foo\"],\"values\":[[\"2009-11-10T23:00:05Z\",5]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"limit and offset - group by tennant\",\n\t\t\tcommand: `select foo from \"limited\" group by tennant limit 1 offset 1`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"limited\",\"tags\":{\"tennant\":\"paul\"},\"columns\":[\"time\",\"foo\"],\"values\":[[\"2009-11-10T23:00:03Z\",3]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Fill(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`fills val=3 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:02Z\").UnixNano()),\n\t\tfmt.Sprintf(`fills val=5 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:03Z\").UnixNano()),\n\t\tfmt.Sprintf(`fills val=4 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:06Z\").UnixNano()),\n\t\tfmt.Sprintf(`fills val=10 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:16Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"fill with value\",\n\t\t\tcommand: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(1)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"fills\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"2009-11-10T23:00:00Z\",4],[\"2009-11-10T23:00:05Z\",4],[\"2009-11-10T23:00:10Z\",1],[\"2009-11-10T23:00:15Z\",10]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"fill with value, WHERE all values match condition\",\n\t\t\tcommand: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' and val < 50 group by time(5s) FILL(1)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"fills\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"2009-11-10T23:00:00Z\",4],[\"2009-11-10T23:00:05Z\",4],[\"2009-11-10T23:00:10Z\",1],[\"2009-11-10T23:00:15Z\",10]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"fill with value, WHERE no values match condition\",\n\t\t\tcommand: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' and val > 50 group by time(5s) FILL(1)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"fill with previous\",\n\t\t\tcommand: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"fills\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"2009-11-10T23:00:00Z\",4],[\"2009-11-10T23:00:05Z\",4],[\"2009-11-10T23:00:10Z\",4],[\"2009-11-10T23:00:15Z\",10]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"fill with none, i.e. clear out nulls\",\n\t\t\tcommand: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(none)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"fills\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"2009-11-10T23:00:00Z\",4],[\"2009-11-10T23:00:05Z\",4],[\"2009-11-10T23:00:15Z\",10]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"fill defaults to null\",\n\t\t\tcommand: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"fills\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"2009-11-10T23:00:00Z\",4],[\"2009-11-10T23:00:05Z\",4],[\"2009-11-10T23:00:10Z\",null],[\"2009-11-10T23:00:15Z\",10]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"fill defaults to 0 for count\",\n\t\t\tcommand: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"fills\",\"columns\":[\"time\",\"count\"],\"values\":[[\"2009-11-10T23:00:00Z\",2],[\"2009-11-10T23:00:05Z\",1],[\"2009-11-10T23:00:10Z\",0],[\"2009-11-10T23:00:15Z\",1]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"fill none drops 0s for count\",\n\t\t\tcommand: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) fill(none)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"fills\",\"columns\":[\"time\",\"count\"],\"values\":[[\"2009-11-10T23:00:00Z\",2],[\"2009-11-10T23:00:05Z\",1],[\"2009-11-10T23:00:15Z\",1]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"fill previous overwrites 0s for count\",\n\t\t\tcommand: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) fill(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"fills\",\"columns\":[\"time\",\"count\"],\"values\":[[\"2009-11-10T23:00:00Z\",2],[\"2009-11-10T23:00:05Z\",1],[\"2009-11-10T23:00:10Z\",1],[\"2009-11-10T23:00:15Z\",1]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_TimeZone(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar writes []string\n\tfor _, start := range []time.Time{\n\t\t// One day before DST starts.\n\t\ttime.Date(2000, 4, 1, 0, 0, 0, 0, LosAngeles),\n\t\t// Middle of DST. No change.\n\t\ttime.Date(2000, 6, 1, 0, 0, 0, 0, LosAngeles),\n\t\t// One day before DST ends.\n\t\ttime.Date(2000, 10, 28, 0, 0, 0, 0, LosAngeles),\n\t} {\n\t\tts := start\n\t\t// Write every hour for 4 days.\n\t\tfor i := 0; i < 24*4; i++ {\n\t\t\twrites = append(writes, fmt.Sprintf(`cpu,interval=daily value=0 %d`, ts.UnixNano()))\n\t\t\tts = ts.Add(time.Hour)\n\t\t}\n\n\t\t// Write every 5 minutes for 3 hours. Start at 1 on the day with DST.\n\t\tts = start.Add(25 * time.Hour)\n\t\tfor i := 0; i < 12*3; i++ {\n\t\t\twrites = append(writes, fmt.Sprintf(`cpu,interval=hourly value=0 %d`, ts.UnixNano()))\n\t\t\tts = ts.Add(5 * time.Minute)\n\t\t}\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"timezone offset - dst start - daily\",\n\t\t\tcommand: `SELECT count(value) FROM cpu WHERE time >= '2000-04-02T00:00:00-08:00' AND time < '2000-04-04T00:00:00-07:00' AND interval = 'daily' GROUP BY time(1d) TZ('America/Los_Angeles')`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"count\"],\"values\":[[\"2000-04-02T00:00:00-08:00\",23],[\"2000-04-03T00:00:00-07:00\",24]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"timezone offset - no change - daily\",\n\t\t\tcommand: `SELECT count(value) FROM cpu WHERE time >= '2000-06-01T00:00:00-07:00' AND time < '2000-06-03T00:00:00-07:00' AND interval = 'daily' GROUP BY time(1d) TZ('America/Los_Angeles')`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"count\"],\"values\":[[\"2000-06-01T00:00:00-07:00\",24],[\"2000-06-02T00:00:00-07:00\",24]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"timezone offset - dst end - daily\",\n\t\t\tcommand: `SELECT count(value) FROM cpu WHERE time >= '2000-10-29T00:00:00-07:00' AND time < '2000-10-31T00:00:00-08:00' AND interval = 'daily' GROUP BY time(1d) TZ('America/Los_Angeles')`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"count\"],\"values\":[[\"2000-10-29T00:00:00-07:00\",25],[\"2000-10-30T00:00:00-08:00\",24]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"timezone offset - dst start - hourly\",\n\t\t\tcommand: `SELECT count(value) FROM cpu WHERE time >= '2000-04-02T01:00:00-08:00' AND time < '2000-04-02T04:00:00-07:00' AND interval = 'hourly' GROUP BY time(1h) TZ('America/Los_Angeles')`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"count\"],\"values\":[[\"2000-04-02T01:00:00-08:00\",12],[\"2000-04-02T03:00:00-07:00\",12]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"timezone offset - no change - hourly\",\n\t\t\tcommand: `SELECT count(value) FROM cpu WHERE time >= '2000-06-02T01:00:00-07:00' AND time < '2000-06-02T03:00:00-07:00' AND interval = 'hourly' GROUP BY time(1h) TZ('America/Los_Angeles')`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"count\"],\"values\":[[\"2000-06-02T01:00:00-07:00\",12],[\"2000-06-02T02:00:00-07:00\",12]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"timezone offset - dst end - hourly\",\n\t\t\tcommand: `SELECT count(value) FROM cpu WHERE time >= '2000-10-29T01:00:00-07:00' AND time < '2000-10-29T02:00:00-08:00' AND interval = 'hourly' GROUP BY time(1h) TZ('America/Los_Angeles')`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"count\"],\"values\":[[\"2000-10-29T01:00:00-07:00\",12],[\"2000-10-29T01:00:00-08:00\",12]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Chunk(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := make([]string, 10001) // 10,000 is the default chunking size, even when no chunking requested.\n\texpectedValues := make([]string, len(writes))\n\tfor i := 0; i < len(writes); i++ {\n\t\twrites[i] = fmt.Sprintf(`cpu value=%d %d`, i, time.Unix(0, int64(i)).UnixNano())\n\t\tif i < len(expectedValues) {\n\t\t\texpectedValues[i] = fmt.Sprintf(`[\"%s\",%d]`, time.Unix(0, int64(i)).UTC().Format(time.RFC3339Nano), i)\n\t\t}\n\t}\n\texpected := fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[%s],\"partial\":true}]}]}`, strings.Join(expectedValues, \",\"))\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"SELECT all values, no chunking\",\n\t\t\tcommand: `SELECT value FROM cpu`,\n\t\t\texp:     expected,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_DropAndRecreateMeasurement(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db1\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := strings.Join([]string{\n\t\tfmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`memory,host=serverB,region=uswest val=33.2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:01Z\").UnixNano()),\n\t}, \"\\n\")\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: writes},\n\t\t&Write{db: \"db1\", data: writes},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"verify cpu measurement exists in db1\",\n\t\t\tcommand: `SELECT * FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"host\",\"region\",\"val\"],\"values\":[[\"2000-01-01T00:00:00Z\",\"serverA\",\"uswest\",23.2]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db1\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"Drop Measurement, series tags preserved tests\",\n\t\t\tcommand: `SHOW MEASUREMENTS`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"measurements\",\"columns\":[\"name\"],\"values\":[[\"cpu\"],[\"memory\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"show series\",\n\t\t\tcommand: `SHOW SERIES`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"key\"],\"values\":[[\"cpu,host=serverA,region=uswest\"],[\"memory,host=serverB,region=uswest\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"ensure we can query for memory with both tags\",\n\t\t\tcommand: `SELECT * FROM memory where region='uswest' and host='serverB' GROUP BY *`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"memory\",\"tags\":{\"host\":\"serverB\",\"region\":\"uswest\"},\"columns\":[\"time\",\"val\"],\"values\":[[\"2000-01-01T00:00:01Z\",33.2]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"drop measurement cpu\",\n\t\t\tcommand: `DROP MEASUREMENT cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"verify measurements in DB that we deleted a measurement from\",\n\t\t\tcommand: `SHOW MEASUREMENTS`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"measurements\",\"columns\":[\"name\"],\"values\":[[\"memory\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"verify series\",\n\t\t\tcommand: `SHOW SERIES`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"key\"],\"values\":[[\"memory,host=serverB,region=uswest\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"verify cpu measurement is gone\",\n\t\t\tcommand: `SELECT * FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"verify cpu measurement is NOT gone from other DB\",\n\t\t\tcommand: `SELECT * FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"host\",\"region\",\"val\"],\"values\":[[\"2000-01-01T00:00:00Z\",\"serverA\",\"uswest\",23.2]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db1\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"verify selecting from a tag 'host' still works\",\n\t\t\tcommand: `SELECT * FROM memory where host='serverB' GROUP BY *`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"memory\",\"tags\":{\"host\":\"serverB\",\"region\":\"uswest\"},\"columns\":[\"time\",\"val\"],\"values\":[[\"2000-01-01T00:00:01Z\",33.2]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"verify selecting from a tag 'region' still works\",\n\t\t\tcommand: `SELECT * FROM memory where region='uswest' GROUP BY *`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"memory\",\"tags\":{\"host\":\"serverB\",\"region\":\"uswest\"},\"columns\":[\"time\",\"val\"],\"values\":[[\"2000-01-01T00:00:01Z\",33.2]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"verify selecting from a tag 'host' and 'region' still works\",\n\t\t\tcommand: `SELECT * FROM memory where region='uswest' and host='serverB' GROUP BY *`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"memory\",\"tags\":{\"host\":\"serverB\",\"region\":\"uswest\"},\"columns\":[\"time\",\"val\"],\"values\":[[\"2000-01-01T00:00:01Z\",33.2]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"Drop non-existant measurement\",\n\t\t\tcommand: `DROP MEASUREMENT doesntexist`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t}...)\n\n\t// Test that re-inserting the measurement works fine.\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n\n\ttest = NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: writes},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"verify measurements after recreation\",\n\t\t\tcommand: `SHOW MEASUREMENTS`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"measurements\",\"columns\":[\"name\"],\"values\":[[\"cpu\"],[\"memory\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"verify cpu measurement has been re-inserted\",\n\t\t\tcommand: `SELECT * FROM cpu GROUP BY *`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"tags\":{\"host\":\"serverA\",\"region\":\"uswest\"},\"columns\":[\"time\",\"val\"],\"values\":[[\"2000-01-01T00:00:00Z\",23.2]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_ShowQueries_Future(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu,host=server01 value=100 %d`, models.MaxNanoTime),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    `show measurements`,\n\t\t\tcommand: \"SHOW MEASUREMENTS\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"measurements\",\"columns\":[\"name\"],\"values\":[[\"cpu\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show series`,\n\t\t\tcommand: \"SHOW SERIES\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"key\"],\"values\":[[\"cpu,host=server01\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show tag keys`,\n\t\t\tcommand: \"SHOW TAG KEYS FROM cpu\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"tagKey\"],\"values\":[[\"host\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show tag values`,\n\t\t\tcommand: \"SHOW TAG VALUES WITH KEY = \\\"host\\\"\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server01\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show field keys`,\n\t\t\tcommand: \"SHOW FIELD KEYS\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"fieldKey\",\"fieldType\"],\"values\":[[\"value\",\"float\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_ShowSeries(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:01Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:02Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:03Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:04Z\").UnixNano()),\n\t\tfmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:05Z\").UnixNano()),\n\t\tfmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:06Z\").UnixNano()),\n\t\tfmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:07Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    `show series`,\n\t\t\tcommand: \"SHOW SERIES\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"key\"],\"values\":[[\"cpu,host=server01\"],[\"cpu,host=server01,region=useast\"],[\"cpu,host=server01,region=uswest\"],[\"cpu,host=server02,region=useast\"],[\"disk,host=server03,region=caeast\"],[\"gpu,host=server02,region=useast\"],[\"gpu,host=server03,region=caeast\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show series from measurement`,\n\t\t\tcommand: \"SHOW SERIES FROM cpu\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"key\"],\"values\":[[\"cpu,host=server01\"],[\"cpu,host=server01,region=useast\"],[\"cpu,host=server01,region=uswest\"],[\"cpu,host=server02,region=useast\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show series from regular expression`,\n\t\t\tcommand: \"SHOW SERIES FROM /[cg]pu/\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"key\"],\"values\":[[\"cpu,host=server01\"],[\"cpu,host=server01,region=useast\"],[\"cpu,host=server01,region=uswest\"],[\"cpu,host=server02,region=useast\"],[\"gpu,host=server02,region=useast\"],[\"gpu,host=server03,region=caeast\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show series with where tag`,\n\t\t\tcommand: \"SHOW SERIES WHERE region = 'uswest'\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"key\"],\"values\":[[\"cpu,host=server01,region=uswest\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show series where tag matches regular expression`,\n\t\t\tcommand: \"SHOW SERIES WHERE region =~ /ca.*/\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"key\"],\"values\":[[\"disk,host=server03,region=caeast\"],[\"gpu,host=server03,region=caeast\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show series`,\n\t\t\tcommand: \"SHOW SERIES WHERE host !~ /server0[12]/\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"key\"],\"values\":[[\"disk,host=server03,region=caeast\"],[\"gpu,host=server03,region=caeast\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show series with from and where`,\n\t\t\tcommand: \"SHOW SERIES FROM cpu WHERE region = 'useast'\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"columns\":[\"key\"],\"values\":[[\"cpu,host=server01,region=useast\"],[\"cpu,host=server02,region=useast\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show series with WHERE time should fail`,\n\t\t\tcommand: \"SHOW SERIES WHERE time > now() - 1h\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"SHOW SERIES doesn't support time in WHERE clause\"}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show series with WHERE field should fail`,\n\t\t\tcommand: \"SHOW SERIES WHERE value > 10.0\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"invalid tag comparison operator\"}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_ShowStats(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := s.CreateSubscription(\"db0\", \"rp0\", \"foo\", \"ALL\", []string{\"udp://localhost:9000\"}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    `show shots`,\n\t\t\tcommand: \"SHOW STATS\",\n\t\t\texp:     \"subscriber\", // Should see a subscriber stat in the json\n\t\t\tpattern: true,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_ShowMeasurements(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`gpu,host=server02,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`other,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    `show measurements with limit 2`,\n\t\t\tcommand: \"SHOW MEASUREMENTS LIMIT 2\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"measurements\",\"columns\":[\"name\"],\"values\":[[\"cpu\"],[\"gpu\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show measurements using WITH`,\n\t\t\tcommand: \"SHOW MEASUREMENTS WITH MEASUREMENT = cpu\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"measurements\",\"columns\":[\"name\"],\"values\":[[\"cpu\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show measurements using WITH and regex`,\n\t\t\tcommand: \"SHOW MEASUREMENTS WITH MEASUREMENT =~ /[cg]pu/\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"measurements\",\"columns\":[\"name\"],\"values\":[[\"cpu\"],[\"gpu\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show measurements using WITH and regex - no matches`,\n\t\t\tcommand: \"SHOW MEASUREMENTS WITH MEASUREMENT =~ /.*zzzzz.*/\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show measurements where tag matches regular expression`,\n\t\t\tcommand: \"SHOW MEASUREMENTS WHERE region =~ /ca.*/\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"measurements\",\"columns\":[\"name\"],\"values\":[[\"gpu\"],[\"other\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show measurements where tag does not match a regular expression`,\n\t\t\tcommand: \"SHOW MEASUREMENTS WHERE region !~ /ca.*/\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"measurements\",\"columns\":[\"name\"],\"values\":[[\"cpu\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show measurements with time in WHERE clauses errors`,\n\t\t\tcommand: `SHOW MEASUREMENTS WHERE time > now() - 1h`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"SHOW MEASUREMENTS doesn't support time in WHERE clause\"}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_ShowTagKeys(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    `show tag keys`,\n\t\t\tcommand: \"SHOW TAG KEYS\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"tagKey\"],\"values\":[[\"host\"],[\"region\"]]},{\"name\":\"disk\",\"columns\":[\"tagKey\"],\"values\":[[\"host\"],[\"region\"]]},{\"name\":\"gpu\",\"columns\":[\"tagKey\"],\"values\":[[\"host\"],[\"region\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"show tag keys from\",\n\t\t\tcommand: \"SHOW TAG KEYS FROM cpu\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"tagKey\"],\"values\":[[\"host\"],[\"region\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"show tag keys from regex\",\n\t\t\tcommand: \"SHOW TAG KEYS FROM /[cg]pu/\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"tagKey\"],\"values\":[[\"host\"],[\"region\"]]},{\"name\":\"gpu\",\"columns\":[\"tagKey\"],\"values\":[[\"host\"],[\"region\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"show tag keys measurement not found\",\n\t\t\tcommand: \"SHOW TAG KEYS FROM doesntexist\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"show tag keys with time in WHERE clause errors\",\n\t\t\tcommand: \"SHOW TAG KEYS FROM cpu WHERE time > now() - 1h\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"SHOW TAG KEYS doesn't support time in WHERE clause\"}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"show tag values with key\",\n\t\t\tcommand: \"SHOW TAG VALUES WITH KEY = host\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server01\"],[\"host\",\"server02\"]]},{\"name\":\"disk\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server03\"]]},{\"name\":\"gpu\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server02\"],[\"host\",\"server03\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"show tag values with key regex\",\n\t\t\tcommand: \"SHOW TAG VALUES WITH KEY =~ /ho/\",\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server01\"],[\"host\",\"server02\"]]},{\"name\":\"disk\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server03\"]]},{\"name\":\"gpu\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server02\"],[\"host\",\"server03\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show tag values with key and where`,\n\t\t\tcommand: `SHOW TAG VALUES FROM cpu WITH KEY = host WHERE region = 'uswest'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server01\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show tag values with key regex and where`,\n\t\t\tcommand: `SHOW TAG VALUES FROM cpu WITH KEY =~ /ho/ WHERE region = 'uswest'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server01\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show tag values with key and where matches the regular expression`,\n\t\t\tcommand: `SHOW TAG VALUES WITH KEY = host WHERE region =~ /ca.*/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"disk\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server03\"]]},{\"name\":\"gpu\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server03\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show tag values with key and where does not match the regular expression`,\n\t\t\tcommand: `SHOW TAG VALUES WITH KEY = region WHERE host !~ /server0[12]/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"disk\",\"columns\":[\"key\",\"value\"],\"values\":[[\"region\",\"caeast\"]]},{\"name\":\"gpu\",\"columns\":[\"key\",\"value\"],\"values\":[[\"region\",\"caeast\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show tag values with key and where partially matches the regular expression`,\n\t\t\tcommand: `SHOW TAG VALUES WITH KEY = host WHERE region =~ /us/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server01\"],[\"host\",\"server02\"]]},{\"name\":\"gpu\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server02\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show tag values with key and where partially does not match the regular expression`,\n\t\t\tcommand: `SHOW TAG VALUES WITH KEY = host WHERE region !~ /us/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server01\"]]},{\"name\":\"disk\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server03\"]]},{\"name\":\"gpu\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server03\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show tag values with key in and where does not match the regular expression`,\n\t\t\tcommand: `SHOW TAG VALUES FROM cpu WITH KEY IN (host, region) WHERE region = 'uswest'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server01\"],[\"region\",\"uswest\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show tag values with key regex and where does not match the regular expression`,\n\t\t\tcommand: `SHOW TAG VALUES FROM cpu WITH KEY =~ /(host|region)/ WHERE region = 'uswest'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server01\"],[\"region\",\"uswest\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show tag values with key and measurement matches regular expression`,\n\t\t\tcommand: `SHOW TAG VALUES FROM /[cg]pu/ WITH KEY = host`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server01\"],[\"host\",\"server02\"]]},{\"name\":\"gpu\",\"columns\":[\"key\",\"value\"],\"values\":[[\"host\",\"server02\"],[\"host\",\"server03\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show tag values with key and time in WHERE clause should error`,\n\t\t\tcommand: `SHOW TAG VALUES WITH KEY = host WHERE time > now() - 1h`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"error\":\"SHOW TAG VALUES doesn't support time in WHERE clause\"}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_ShowFieldKeys(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu,host=server01 field1=100 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01,region=uswest field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01,region=useast field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server02,region=useast field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`gpu,host=server01,region=useast field4=200,field5=300 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`gpu,host=server03,region=caeast field6=200,field7=300 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`disk,host=server03,region=caeast field8=200,field9=300 %d`, mustParseTime(time.RFC3339Nano, \"2009-11-10T23:00:00Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    `show field keys`,\n\t\t\tcommand: `SHOW FIELD KEYS`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"fieldKey\",\"fieldType\"],\"values\":[[\"field1\",\"float\"],[\"field2\",\"float\"],[\"field3\",\"float\"]]},{\"name\":\"disk\",\"columns\":[\"fieldKey\",\"fieldType\"],\"values\":[[\"field8\",\"float\"],[\"field9\",\"float\"]]},{\"name\":\"gpu\",\"columns\":[\"fieldKey\",\"fieldType\"],\"values\":[[\"field4\",\"float\"],[\"field5\",\"float\"],[\"field6\",\"float\"],[\"field7\",\"float\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show field keys from measurement`,\n\t\t\tcommand: `SHOW FIELD KEYS FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"fieldKey\",\"fieldType\"],\"values\":[[\"field1\",\"float\"],[\"field2\",\"float\"],[\"field3\",\"float\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show field keys measurement with regex`,\n\t\t\tcommand: `SHOW FIELD KEYS FROM /[cg]pu/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"fieldKey\",\"fieldType\"],\"values\":[[\"field1\",\"float\"],[\"field2\",\"float\"],[\"field3\",\"float\"]]},{\"name\":\"gpu\",\"columns\":[\"fieldKey\",\"fieldType\"],\"values\":[[\"field4\",\"float\"],[\"field5\",\"float\"],[\"field6\",\"float\"],[\"field7\",\"float\"]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_ContinuousQuery(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trunTest := func(test *Test, t *testing.T) {\n\t\tfor i, query := range test.queries {\n\t\t\tif i == 0 {\n\t\t\t\tif err := test.init(s); err != nil {\n\t\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif query.skip {\n\t\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := query.Execute(s); err != nil {\n\t\t\t\tt.Error(query.Error(err))\n\t\t\t} else if !query.success() {\n\t\t\t\tt.Error(query.failureMessage())\n\t\t\t}\n\t\t}\n\t}\n\n\t// Start times of CQ intervals.\n\tinterval0 := time.Now().Add(-time.Second).Round(time.Second * 5)\n\tinterval1 := interval0.Add(-time.Second * 5)\n\tinterval2 := interval0.Add(-time.Second * 10)\n\tinterval3 := interval0.Add(-time.Second * 15)\n\n\twrites := []string{\n\t\t// Point too far in the past for CQ to pick up.\n\t\tfmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, interval3.Add(time.Second).UnixNano()),\n\n\t\t// Points two intervals ago.\n\t\tfmt.Sprintf(`cpu,host=server01 value=100 %d`, interval2.Add(time.Second).UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, interval2.Add(time.Second*2).UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, interval2.Add(time.Second*3).UnixNano()),\n\n\t\t// Points one interval ago.\n\t\tfmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, interval1.Add(time.Second).UnixNano()),\n\t\tfmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, interval1.Add(time.Second*2).UnixNano()),\n\n\t\t// Points in the current interval.\n\t\tfmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, interval0.Add(time.Second).UnixNano()),\n\t\tfmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, interval0.Add(time.Second*2).UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    `create another retention policy for CQ to write into`,\n\t\t\tcommand: `CREATE RETENTION POLICY rp1 ON db0 DURATION 1h REPLICATION 1`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"create continuous query with backreference\",\n\t\t\tcommand: `CREATE CONTINUOUS QUERY \"cq1\" ON db0 BEGIN SELECT count(value) INTO \"rp1\".:MEASUREMENT FROM /[cg]pu/ GROUP BY time(5s) END`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    `create another retention policy for CQ to write into`,\n\t\t\tcommand: `CREATE RETENTION POLICY rp2 ON db0 DURATION 1h REPLICATION 1`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"create continuous query with backreference and group by time\",\n\t\t\tcommand: `CREATE CONTINUOUS QUERY \"cq2\" ON db0 BEGIN SELECT count(value) INTO \"rp2\".:MEASUREMENT FROM /[cg]pu/ GROUP BY time(5s), * END`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    `show continuous queries`,\n\t\t\tcommand: `SHOW CONTINUOUS QUERIES`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"db0\",\"columns\":[\"name\",\"query\"],\"values\":[[\"cq1\",\"CREATE CONTINUOUS QUERY cq1 ON db0 BEGIN SELECT count(value) INTO db0.rp1.:MEASUREMENT FROM db0.rp0./[cg]pu/ GROUP BY time(5s) END\"],[\"cq2\",\"CREATE CONTINUOUS QUERY cq2 ON db0 BEGIN SELECT count(value) INTO db0.rp2.:MEASUREMENT FROM db0.rp0./[cg]pu/ GROUP BY time(5s), * END\"]]}]}]}`,\n\t\t},\n\t}...)\n\n\t// Run first test to create CQs.\n\trunTest(&test, t)\n\n\t// Setup tests to check the CQ results.\n\ttest2 := NewTest(\"db0\", \"rp1\")\n\ttest2.addQueries([]*Query{\n\t\t&Query{\n\t\t\tskip:    true,\n\t\t\tname:    \"check results of cq1\",\n\t\t\tcommand: `SELECT * FROM \"rp1\"./[cg]pu/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"count\",\"host\",\"region\",\"value\"],\"values\":[[\"` + interval2.UTC().Format(time.RFC3339Nano) + `\",3,null,null,null]]},{\"name\":\"gpu\",\"columns\":[\"time\",\"count\",\"host\",\"region\",\"value\"],\"values\":[[\"` + interval1.UTC().Format(time.RFC3339Nano) + `\",2,null,null,null],[\"` + interval0.UTC().Format(time.RFC3339Nano) + `\",1,null,null,null]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t\t// TODO: restore this test once this is fixed: https://github.com/influxdata/influxdb/issues/3968\n\t\t&Query{\n\t\t\tskip:    true,\n\t\t\tname:    \"check results of cq2\",\n\t\t\tcommand: `SELECT * FROM \"rp2\"./[cg]pu/`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"count\",\"host\",\"region\",\"value\"],\"values\":[[\"` + interval2.UTC().Format(time.RFC3339Nano) + `\",1,\"server01\",\"uswest\",null],[\"` + interval2.UTC().Format(time.RFC3339Nano) + `\",1,\"server01\",\"\",null],[\"` + interval2.UTC().Format(time.RFC3339Nano) + `\",1,\"server01\",\"useast\",null]]},{\"name\":\"gpu\",\"columns\":[\"time\",\"count\",\"host\",\"region\",\"value\"],\"values\":[[\"` + interval1.UTC().Format(time.RFC3339Nano) + `\",1,\"server02\",\"useast\",null],[\"` + interval1.UTC().Format(time.RFC3339Nano) + `\",1,\"server03\",\"caeast\",null],[\"` + interval0.UTC().Format(time.RFC3339Nano) + `\",1,\"server03\",\"caeast\",null]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t}...)\n\n\t// Run second test to check CQ results.\n\trunTest(&test2, t)\n}\n\n// Tests that a known CQ query with concurrent writes does not deadlock the server\nfunc TestServer_ContinuousQuery_Deadlock(t *testing.T) {\n\n\t// Skip until #3517 & #3522 are merged\n\tt.Skip(\"Skipping CQ deadlock test\")\n\tif testing.Short() {\n\t\tt.Skip(\"skipping CQ deadlock test\")\n\t}\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer func() {\n\t\ts.Close()\n\t}()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"create continuous query\",\n\t\t\tcommand: `CREATE CONTINUOUS QUERY \"my.query\" ON db0 BEGIN SELECT sum(visits) as visits INTO test_1m FROM myseries GROUP BY time(1m), host END`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n\n\t// Deadlock detector.  If the deadlock is fixed, this test should complete all the writes in ~2.5s seconds (with artifical delays\n\t// added).  After 10 seconds, if the server has not been closed then we hit the deadlock bug.\n\titerations := 0\n\tgo func(s Server) {\n\t\t<-time.After(10 * time.Second)\n\n\t\t// If the server is not nil then the test is still running and stuck.  We panic to avoid\n\t\t// having the whole test suite hang indefinitely.\n\t\tif !s.Closed() {\n\t\t\tpanic(\"possible deadlock. writes did not complete in time\")\n\t\t}\n\t}(s)\n\n\tfor {\n\n\t\t// After the second write, if the deadlock exists, we'll get a write timeout and\n\t\t// all subsequent writes will timeout\n\t\tif iterations > 5 {\n\t\t\tbreak\n\t\t}\n\t\twrites := []string{}\n\t\tfor i := 0; i < 1000; i++ {\n\t\t\twrites = append(writes, fmt.Sprintf(`myseries,host=host-%d visits=1i`, i))\n\t\t}\n\t\twrite := strings.Join(writes, \"\\n\")\n\n\t\tif _, err := s.Write(test.db, test.rp, write, test.params); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\titerations += 1\n\t\ttime.Sleep(500 * time.Millisecond)\n\t}\n}\n\nfunc TestServer_Query_EvilIdentifiers(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(\"cpu select=1,in-bytes=2 %d\", mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano())},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    `query evil identifiers`,\n\t\t\tcommand: `SELECT \"select\", \"in-bytes\" FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"select\",\"in-bytes\"],\"values\":[[\"2000-01-01T00:00:00Z\",1,2]]}]}]}`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_OrderByTime(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu,host=server1 value=1 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:01Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server1 value=2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:02Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu,host=server1 value=3 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:03Z\").UnixNano()),\n\n\t\tfmt.Sprintf(`power,presence=true value=1 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:01Z\").UnixNano()),\n\t\tfmt.Sprintf(`power,presence=true value=2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:02Z\").UnixNano()),\n\t\tfmt.Sprintf(`power,presence=true value=3 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:03Z\").UnixNano()),\n\t\tfmt.Sprintf(`power,presence=false value=4 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:04Z\").UnixNano()),\n\n\t\tfmt.Sprintf(`mem,host=server1 free=1 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:01Z\").UnixNano()),\n\t\tfmt.Sprintf(`mem,host=server1 free=2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:02Z\").UnixNano()),\n\t\tfmt.Sprintf(`mem,host=server2 used=3 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:01Z\").UnixNano()),\n\t\tfmt.Sprintf(`mem,host=server2 used=4 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:02Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"order on points\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select value from \"cpu\" ORDER BY time DESC`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:03Z\",3],[\"2000-01-01T00:00:02Z\",2],[\"2000-01-01T00:00:01Z\",1]]}]}]}`,\n\t\t},\n\n\t\t&Query{\n\t\t\tname:    \"order desc with tags\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select value from \"power\" ORDER BY time DESC`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"power\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:04Z\",4],[\"2000-01-01T00:00:03Z\",3],[\"2000-01-01T00:00:02Z\",2],[\"2000-01-01T00:00:01Z\",1]]}]}]}`,\n\t\t},\n\n\t\t&Query{\n\t\t\tname:    \"order desc with sparse data\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select used, free from \"mem\" ORDER BY time DESC`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"mem\",\"columns\":[\"time\",\"used\",\"free\"],\"values\":[[\"2000-01-01T00:00:02Z\",4,null],[\"2000-01-01T00:00:02Z\",null,2],[\"2000-01-01T00:00:01Z\",3,null],[\"2000-01-01T00:00:01Z\",null,1]]}]}]}`,\n\t\t},\n\n\t\t&Query{\n\t\t\tname:    \"order desc with an aggregate and sparse data\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select first(\"used\") AS \"used\", first(\"free\") AS \"free\" from \"mem\" WHERE time >= '2000-01-01T00:00:01Z' AND time <= '2000-01-01T00:00:02Z' GROUP BY host, time(1s) FILL(none) ORDER BY time DESC`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"mem\",\"tags\":{\"host\":\"server2\"},\"columns\":[\"time\",\"used\",\"free\"],\"values\":[[\"2000-01-01T00:00:02Z\",4,null],[\"2000-01-01T00:00:01Z\",3,null]]},{\"name\":\"mem\",\"tags\":{\"host\":\"server1\"},\"columns\":[\"time\",\"used\",\"free\"],\"values\":[[\"2000-01-01T00:00:02Z\",null,2],[\"2000-01-01T00:00:01Z\",null,1]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_FieldWithMultiplePeriods(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu foo.bar.baz=1 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"baseline\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select * from cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"foo.bar.baz\"],\"values\":[[\"2000-01-01T00:00:00Z\",1]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"select field with periods\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select \"foo.bar.baz\" from cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"foo.bar.baz\"],\"values\":[[\"2000-01-01T00:00:00Z\",1]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_FieldWithMultiplePeriodsMeasurementPrefixMatch(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`foo foo.bar.baz=1 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"baseline\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select * from foo`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"foo\",\"columns\":[\"time\",\"foo.bar.baz\"],\"values\":[[\"2000-01-01T00:00:00Z\",1]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"select field with periods\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `select \"foo.bar.baz\" from foo`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"foo\",\"columns\":[\"time\",\"foo.bar.baz\"],\"values\":[[\"2000-01-01T00:00:00Z\",1]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor i, query := range test.queries {\n\t\tif i == 0 {\n\t\t\tif err := test.init(s); err != nil {\n\t\t\t\tt.Fatalf(\"test init failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_IntoTarget(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`foo value=1 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`foo value=2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\tfmt.Sprintf(`foo value=3 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:20Z\").UnixNano()),\n\t\tfmt.Sprintf(`foo value=4 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:30Z\").UnixNano()),\n\t\tfmt.Sprintf(`foo value=4,foobar=3 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:40Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"into\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * INTO baz FROM foo`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"result\",\"columns\":[\"time\",\"written\"],\"values\":[[\"1970-01-01T00:00:00Z\",5]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"confirm results\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM baz`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"baz\",\"columns\":[\"time\",\"foobar\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",null,1],[\"2000-01-01T00:00:10Z\",null,2],[\"2000-01-01T00:00:20Z\",null,3],[\"2000-01-01T00:00:30Z\",null,4],[\"2000-01-01T00:00:40Z\",3,4]]}]}]}`,\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Ensure that binary operators of aggregates of separate fields, when a field is sometimes missing and sometimes present,\n// result in values that are still properly time-aligned.\nfunc TestServer_Query_IntoTarget_Sparse(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\t// All points have fields n and a. Field b is not present in all intervals.\n\t\t// First 10s interval is missing field b. Result a_n should be (2+5)*(3+7) = 70, b_n is null.\n\t\tfmt.Sprintf(`foo a=2,n=3 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:01Z\").UnixNano()),\n\t\tfmt.Sprintf(`foo a=5,n=7 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:02Z\").UnixNano()),\n\t\t// Second 10s interval has field b. Result a_n = 11*17 = 187, b_n = 13*17 = 221.\n\t\tfmt.Sprintf(`foo a=11,b=13,n=17 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:11Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"into\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sum(a) * sum(n) as a_n, sum(b) * sum(n) as b_n INTO baz FROM foo WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:01:00Z' GROUP BY time(10s)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"result\",\"columns\":[\"time\",\"written\"],\"values\":[[\"1970-01-01T00:00:00Z\",2]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"confirm results\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM baz`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"baz\",\"columns\":[\"time\",\"a_n\",\"b_n\"],\"values\":[[\"2000-01-01T00:00:00Z\",70,null],[\"2000-01-01T00:00:10Z\",187,221]]}]}]}`,\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// This test ensures that data is not duplicated with measurements\n// of the same name.\nfunc TestServer_Query_DuplicateMeasurements(t *testing.T) {\n\tt.Parallel()\n\ts := OpenDefaultServer(NewConfig())\n\tdefer s.Close()\n\n\t// Create a second database.\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db1\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(`cpu value=1 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano())},\n\t}\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\ttest = NewTest(\"db1\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: fmt.Sprintf(`cpu value=2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano())},\n\t}\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"select from both databases\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT value FROM db0.rp0.cpu, db1.rp0.cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:00Z\",1],[\"2000-01-01T00:00:10Z\",2]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_LargeTimestamp(t *testing.T) {\n\tt.Parallel()\n\ts := OpenDefaultServer(NewConfig())\n\tdefer s.Close()\n\n\tif _, ok := s.(*RemoteServer); ok {\n\t\tt.Skip(\"Skipping.  Cannot restart remote server\")\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu value=100 %d`, models.MaxNanoTime),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    `select value at max nano time`,\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: fmt.Sprintf(`SELECT value FROM cpu WHERE time <= %d`, models.MaxNanoTime),\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"` + time.Unix(0, models.MaxNanoTime).UTC().Format(time.RFC3339Nano) + `\",100]]}]}]}`,\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\t// Open a new server with the same configuration file.\n\t// This is to ensure the meta data was marshaled correctly.\n\ts2 := OpenServer((s.(*LocalServer)).Config)\n\tdefer s2.Close()\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_DotProduct(t *testing.T) {\n\tt.Parallel()\n\ts := OpenDefaultServer(NewConfig())\n\tdefer s.Close()\n\n\t// Create a second database.\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu a=2,b=3 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu a=-5,b=8 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:10Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu a=9,b=3 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:20Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"select dot product\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sum(a_b) FROM (SELECT a * b FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"sum\"],\"values\":[[\"2000-01-01T00:00:00Z\",-7]]}]}]}`,\n\t\t},\n\t}...)\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// This test reproduced a data race with closing the\n// Subscriber points channel while writes were in-flight in the PointsWriter.\nfunc TestServer_ConcurrentPointsWriter_Subscriber(t *testing.T) {\n\tt.Parallel()\n\ts := OpenDefaultServer(NewConfig())\n\tdefer s.Close()\n\n\tif _, ok := s.(*RemoteServer); ok {\n\t\tt.Skip(\"Skipping.  Cannot access PointsWriter remotely\")\n\t}\n\t// goroutine to write points\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\twpr := &coordinator.WritePointsRequest{\n\t\t\t\t\tDatabase:        \"db0\",\n\t\t\t\t\tRetentionPolicy: \"rp0\",\n\t\t\t\t}\n\t\t\t\ts.WritePoints(wpr.Database, wpr.RetentionPolicy, models.ConsistencyLevelAny, nil, wpr.Points)\n\t\t\t}\n\t\t}\n\t}()\n\n\ttime.Sleep(10 * time.Millisecond)\n\n\tclose(done)\n}\n\n// Ensure time in where clause is inclusive\nfunc TestServer_WhereTimeInclusive(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu value=1 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:01Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu value=2 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:02Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu value=3 %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:03Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"all GTE/LTE\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * from cpu where time >= '2000-01-01T00:00:01Z' and time <= '2000-01-01T00:00:03Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:01Z\",1],[\"2000-01-01T00:00:02Z\",2],[\"2000-01-01T00:00:03Z\",3]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"all GTE\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * from cpu where time >= '2000-01-01T00:00:01Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:01Z\",1],[\"2000-01-01T00:00:02Z\",2],[\"2000-01-01T00:00:03Z\",3]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"all LTE\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * from cpu where time <= '2000-01-01T00:00:03Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:01Z\",1],[\"2000-01-01T00:00:02Z\",2],[\"2000-01-01T00:00:03Z\",3]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"first GTE/LTE\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * from cpu where time >= '2000-01-01T00:00:01Z' and time <= '2000-01-01T00:00:01Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:01Z\",1]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"last GTE/LTE\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * from cpu where time >= '2000-01-01T00:00:03Z' and time <= '2000-01-01T00:00:03Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:03Z\",3]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"before GTE/LTE\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * from cpu where time <= '2000-01-01T00:00:00Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"all GT/LT\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * from cpu where time > '2000-01-01T00:00:00Z' and time < '2000-01-01T00:00:04Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:01Z\",1],[\"2000-01-01T00:00:02Z\",2],[\"2000-01-01T00:00:03Z\",3]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"first GT/LT\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * from cpu where time > '2000-01-01T00:00:00Z' and time < '2000-01-01T00:00:02Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:01Z\",1]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"last GT/LT\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * from cpu where time > '2000-01-01T00:00:02Z' and time < '2000-01-01T00:00:04Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:03Z\",3]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"all GT\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * from cpu where time > '2000-01-01T00:00:00Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:01Z\",1],[\"2000-01-01T00:00:02Z\",2],[\"2000-01-01T00:00:03Z\",3]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"all LT\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * from cpu where time < '2000-01-01T00:00:04Z'`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"2000-01-01T00:00:01Z\",1],[\"2000-01-01T00:00:02Z\",2],[\"2000-01-01T00:00:03Z\",3]]}]}]}`,\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_ImplicitEndTime(t *testing.T) {\n\tt.Skip(\"flaky test\")\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tnow := time.Now().UTC().Truncate(time.Second)\n\tpast := now.Add(-10 * time.Second)\n\tfuture := now.Add(10 * time.Minute)\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu value=1 %d`, past.UnixNano()),\n\t\tfmt.Sprintf(`cpu value=2 %d`, future.UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"raw query\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT * FROM cpu`,\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"value\"],\"values\":[[\"%s\",1],[\"%s\",2]]}]}]}`, past.Format(time.RFC3339Nano), future.Format(time.RFC3339Nano)),\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"aggregate query\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: fmt.Sprintf(`SELECT mean(value) FROM cpu WHERE time > '%s' - 1m GROUP BY time(1m) FILL(none)`, now.Truncate(time.Minute).Format(time.RFC3339Nano)),\n\t\t\texp:     fmt.Sprintf(`{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"mean\"],\"values\":[[\"%s\",1]]}]}]}`, now.Truncate(time.Minute).Format(time.RFC3339Nano)),\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Sample_Wildcard(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu float=1,int=1i,string=\"hello, world\",bool=true %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"sample() with wildcard\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sample(*, 1) FROM cpu`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"sample_bool\",\"sample_float\",\"sample_int\",\"sample_string\"],\"values\":[[\"2000-01-01T00:00:00Z\",true,1,1,\"hello, world\"]]}]}]}`,\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\nfunc TestServer_Query_Sample_LimitOffset(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\tfmt.Sprintf(`cpu float=1,int=1i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:00:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu float=2,int=2i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:01:00Z\").UnixNano()),\n\t\tfmt.Sprintf(`cpu float=3,int=3i %d`, mustParseTime(time.RFC3339Nano, \"2000-01-01T00:02:00Z\").UnixNano()),\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"sample() with limit 1\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sample(float, 3), int FROM cpu LIMIT 1`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"sample\",\"int\"],\"values\":[[\"2000-01-01T00:00:00Z\",1,1]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"sample() with offset 1\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sample(float, 3), int FROM cpu OFFSET 1`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"sample\",\"int\"],\"values\":[[\"2000-01-01T00:01:00Z\",2,2],[\"2000-01-01T00:02:00Z\",3,3]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"sample() with limit 1 offset 1\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sample(float, 3), int FROM cpu LIMIT 1 OFFSET 1`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"sample\",\"int\"],\"values\":[[\"2000-01-01T00:01:00Z\",2,2]]}]}]}`,\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n\n// Validate that nested aggregates don't panic\nfunc TestServer_NestedAggregateWithMathPanics(t *testing.T) {\n\tt.Parallel()\n\ts := OpenServer(NewConfig())\n\tdefer s.Close()\n\n\tif err := s.CreateDatabaseAndRetentionPolicy(\"db0\", newRetentionPolicySpec(\"rp0\", 1, 0), true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twrites := []string{\n\t\t`cpu value=2i 120000000000`,\n\t}\n\n\ttest := NewTest(\"db0\", \"rp0\")\n\ttest.writes = Writes{\n\t\t&Write{data: strings.Join(writes, \"\\n\")},\n\t}\n\n\ttest.addQueries([]*Query{\n\t\t&Query{\n\t\t\tname:    \"dividing by elapsed count should not panic\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sum(value) / elapsed(sum(value), 1m) FROM cpu WHERE time > 0 AND time < 10m GROUP BY time(1m)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"sum_elapsed\"],\"values\":[[\"1970-01-01T00:00:00Z\",null],[\"1970-01-01T00:01:00Z\",null],[\"1970-01-01T00:02:00Z\",null],[\"1970-01-01T00:03:00Z\",null],[\"1970-01-01T00:04:00Z\",null],[\"1970-01-01T00:05:00Z\",null],[\"1970-01-01T00:06:00Z\",null],[\"1970-01-01T00:07:00Z\",null],[\"1970-01-01T00:08:00Z\",null],[\"1970-01-01T00:09:00Z\",null]]}]}]}`,\n\t\t},\n\t\t&Query{\n\t\t\tname:    \"dividing by elapsed count with fill previous should not panic\",\n\t\t\tparams:  url.Values{\"db\": []string{\"db0\"}},\n\t\t\tcommand: `SELECT sum(value) / elapsed(sum(value), 1m) FROM cpu WHERE time > 0 AND time < 10m GROUP BY time(1m) FILL(previous)`,\n\t\t\texp:     `{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu\",\"columns\":[\"time\",\"sum_elapsed\"],\"values\":[[\"1970-01-01T00:00:00Z\",null],[\"1970-01-01T00:01:00Z\",null],[\"1970-01-01T00:02:00Z\",null],[\"1970-01-01T00:03:00Z\",2],[\"1970-01-01T00:04:00Z\",2],[\"1970-01-01T00:05:00Z\",2],[\"1970-01-01T00:06:00Z\",2],[\"1970-01-01T00:07:00Z\",2],[\"1970-01-01T00:08:00Z\",2],[\"1970-01-01T00:09:00Z\",2]]}]}]}`,\n\t\t},\n\t}...)\n\n\tif err := test.init(s); err != nil {\n\t\tt.Fatalf(\"test init failed: %s\", err)\n\t}\n\n\tfor _, query := range test.queries {\n\t\tif query.skip {\n\t\t\tt.Logf(\"SKIP:: %s\", query.name)\n\t\t\tcontinue\n\t\t}\n\t\tif err := query.Execute(s); err != nil {\n\t\t\tt.Error(query.Error(err))\n\t\t} else if !query.success() {\n\t\t\tt.Error(query.failureMessage())\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/toml/toml.go",
    "content": "// Package toml adds support to marshal and unmarshal types not in the official TOML spec.\npackage toml // import \"github.com/influxdata/influxdb/toml\"\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"time\"\n)\n\n// maxInt is the largest integer representable by a word (architecture dependent).\nconst maxInt = int64(^uint(0) >> 1)\n\n// Duration is a TOML wrapper type for time.Duration.\ntype Duration time.Duration\n\n// String returns the string representation of the duration.\nfunc (d Duration) String() string {\n\treturn time.Duration(d).String()\n}\n\n// UnmarshalText parses a TOML value into a duration value.\nfunc (d *Duration) UnmarshalText(text []byte) error {\n\t// Ignore if there is no value set.\n\tif len(text) == 0 {\n\t\treturn nil\n\t}\n\n\t// Otherwise parse as a duration formatted string.\n\tduration, err := time.ParseDuration(string(text))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Set duration and return.\n\t*d = Duration(duration)\n\treturn nil\n}\n\n// MarshalText converts a duration to a string for decoding toml\nfunc (d Duration) MarshalText() (text []byte, err error) {\n\treturn []byte(d.String()), nil\n}\n\n// Size represents a TOML parseable file size.\n// Users can specify size using \"m\" for megabytes and \"g\" for gigabytes.\ntype Size int\n\n// UnmarshalText parses a byte size from text.\nfunc (s *Size) UnmarshalText(text []byte) error {\n\t// Parse numeric portion of value.\n\tlength := len(string(text))\n\tsize, err := strconv.ParseInt(string(text[:length-1]), 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Parse unit of measure (\"m\", \"g\", etc).\n\tswitch suffix := text[len(text)-1]; suffix {\n\tcase 'm':\n\t\tsize *= 1 << 20 // MB\n\tcase 'g':\n\t\tsize *= 1 << 30 // GB\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown size suffix: %c\", suffix)\n\t}\n\n\t// Check for overflow.\n\tif size > maxInt {\n\t\treturn fmt.Errorf(\"size %d cannot be represented by an int\", size)\n\t}\n\n\t*s = Size(size)\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/toml/toml_test.go",
    "content": "package toml_test\n\nimport (\n\t\"bytes\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/influxdata/influxdb/cmd/influxd/run\"\n\titoml \"github.com/influxdata/influxdb/toml\"\n)\n\n// Ensure that megabyte sizes can be parsed.\nfunc TestSize_UnmarshalText_MB(t *testing.T) {\n\tvar s itoml.Size\n\tif err := s.UnmarshalText([]byte(\"200m\")); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if s != 200*(1<<20) {\n\t\tt.Fatalf(\"unexpected size: %d\", s)\n\t}\n}\n\n// Ensure that gigabyte sizes can be parsed.\nfunc TestSize_UnmarshalText_GB(t *testing.T) {\n\tvar s itoml.Size\n\tif err := s.UnmarshalText([]byte(\"1g\")); err != nil {\n\t\tt.Fatalf(\"unexpected error: %s\", err)\n\t} else if s != 1073741824 {\n\t\tt.Fatalf(\"unexpected size: %d\", s)\n\t}\n}\n\nfunc TestConfig_Encode(t *testing.T) {\n\tvar c run.Config\n\tc.Coordinator.WriteTimeout = itoml.Duration(time.Minute)\n\tbuf := new(bytes.Buffer)\n\tif err := toml.NewEncoder(buf).Encode(&c); err != nil {\n\t\tt.Fatal(\"Failed to encode: \", err)\n\t}\n\tgot, search := buf.String(), `write-timeout = \"1m0s\"`\n\tif !strings.Contains(got, search) {\n\t\tt.Fatalf(\"Encoding config failed.\\nfailed to find %s in:\\n%s\\n\", search, got)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/README.md",
    "content": "# Line Protocol\n\nThe line protocol is a text based format for writing points to InfluxDB.  Each line defines a single point. \nMultiple lines must be separated by the newline character `\\n`. The format of the line consists of three parts:\n\n```\n[key] [fields] [timestamp]\n```\n\nEach section is separated by spaces.  The minimum required point consists of a measurement name and at least one field. Points without a specified timestamp will be written using the server's local timestamp. Timestamps are assumed to be in nanoseconds unless a `precision` value is passed in the query string.\n\n## Key\n\nThe key is the measurement name and any optional tags separated by commas.  Measurement names, tag keys, and tag values must escape any spaces or commas using a backslash (`\\`). For example: `\\ ` and `\\,`.  All tag values are stored as strings and should not be surrounded in quotes. \n\nTags should be sorted by key before being sent for best performance. The sort should match that from the Go `bytes.Compare` function (http://golang.org/pkg/bytes/#Compare).\n\n### Examples\n\n```\n# measurement only\ncpu\n\n# measurement and tags\ncpu,host=serverA,region=us-west\n\n# measurement with commas\ncpu\\,01,host=serverA,region=us-west\n\n# tag value with spaces\ncpu,host=server\\ A,region=us\\ west\n```\n\n## Fields\n\nFields are key-value metrics associated with the measurement.  Every line must have at least one field.  Multiple fields must be separated with commas and not spaces.\n\nField keys are always strings and follow the same syntactical rules as described above for tag keys and values. Field values can be one of four types.  The first value written for a given field on a given measurement defines the type of that field for all series under that measurement.\n\n* _integer_ - Numeric values that do not include a decimal and are followed by a trailing i when inserted (e.g. 1i, 345i, 2015i, -10i). Note that all values must have a trailing i. If they do not they will be written as floats.\n* _float_ - Numeric values that are not followed by a trailing i. (e.g. 1, 1.0, -3.14, 6.0+e5, 10).\n* _boolean_ - A value indicating true or false.  Valid boolean strings are (t, T, true, TRUE, f, F, false, and FALSE).\n* _string_ - A text value.  All string values _must_ be surrounded in double-quotes `\"`.  If the string contains\na double-quote or backslashes, it must be escaped with a backslash, e.g. `\\\"`, `\\\\`.\n\n\n```\n# integer value\ncpu value=1i\n\ncpu value=1.1i # will result in a parse error\n\n# float value\ncpu_load value=1\n\ncpu_load value=1.0\n\ncpu_load value=1.2\n\n# boolean value\nerror fatal=true\n\n# string value\nevent msg=\"logged out\"\n\n# multiple values\ncpu load=10,alert=true,reason=\"value above maximum threshold\"\n```\n\n## Timestamp\n\nThe timestamp section is optional but should be specified if possible.  The value is an integer representing nanoseconds since the epoch. If the timestamp is not provided the point will inherit the server's local timestamp.\n\nSome write APIs allow passing a lower precision.  If the API supports a lower precision, the timestamp may also be\nan integer epoch in microseconds, milliseconds, seconds, minutes or hours.\n\n## Full Example\nA full example is shown below.\n```\ncpu,host=server01,region=uswest value=1 1434055562000000000\ncpu,host=server02,region=uswest value=3 1434055562000010000\n```\nIn this example the first line shows a `measurement` of \"cpu\", there are two tags \"host\" and \"region, the `value` is 1.0, and the `timestamp` is 1434055562000000000. Following this is a second line, also a point in the `measurement` \"cpu\" but belonging to a different \"host\".\n```\ncpu,host=server\\ 01,region=uswest value=1,msg=\"all systems nominal\"\ncpu,host=server\\ 01,region=us\\,west value_int=1i\n```\nIn these examples, the \"host\" is set to `server 01`. The field value associated with field key `msg` is double-quoted, as it is a string. The second example shows a region of `us,west` with the comma properly escaped. In the first example `value` is written as a floating point number. In the second, `value_int` is an integer. \n\n# Distributed Queries\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/batcher.go",
    "content": "package tsdb\n\nimport (\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n)\n\n// PointBatcher accepts Points and will emit a batch of those points when either\n// a) the batch reaches a certain size, or b) a certain time passes.\ntype PointBatcher struct {\n\tstats PointBatcherStats\n\n\tsize     int\n\tduration time.Duration\n\n\tstop  chan struct{}\n\tin    chan models.Point\n\tout   chan []models.Point\n\tflush chan struct{}\n\n\twg *sync.WaitGroup\n}\n\n// NewPointBatcher returns a new PointBatcher. sz is the batching size,\n// bp is the maximum number of batches that may be pending. d is the time\n// after which a batch will be emitted after the first point is received\n// for the batch, regardless of its size.\nfunc NewPointBatcher(sz int, bp int, d time.Duration) *PointBatcher {\n\treturn &PointBatcher{\n\t\tsize:     sz,\n\t\tduration: d,\n\t\tstop:     make(chan struct{}),\n\t\tin:       make(chan models.Point, bp*sz),\n\t\tout:      make(chan []models.Point),\n\t\tflush:    make(chan struct{}),\n\t}\n}\n\n// PointBatcherStats are the statistics each batcher tracks.\ntype PointBatcherStats struct {\n\tBatchTotal   uint64 // Total count of batches transmitted.\n\tPointTotal   uint64 // Total count of points processed.\n\tSizeTotal    uint64 // Number of batches that reached size threshold.\n\tTimeoutTotal uint64 // Number of timeouts that occurred.\n}\n\n// Start starts the batching process. Returns the in and out channels for points\n// and point-batches respectively.\nfunc (b *PointBatcher) Start() {\n\t// Already running?\n\tif b.wg != nil {\n\t\treturn\n\t}\n\n\tvar timer *time.Timer\n\tvar batch []models.Point\n\tvar timerCh <-chan time.Time\n\n\temit := func() {\n\t\tb.out <- batch\n\t\tatomic.AddUint64(&b.stats.BatchTotal, 1)\n\t\tbatch = nil\n\t}\n\n\tb.wg = &sync.WaitGroup{}\n\tb.wg.Add(1)\n\n\tgo func() {\n\t\tdefer b.wg.Done()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-b.stop:\n\t\t\t\tif len(batch) > 0 {\n\t\t\t\t\temit()\n\t\t\t\t\ttimerCh = nil\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\tcase p := <-b.in:\n\t\t\t\tatomic.AddUint64(&b.stats.PointTotal, 1)\n\t\t\t\tif batch == nil {\n\t\t\t\t\tbatch = make([]models.Point, 0, b.size)\n\t\t\t\t\tif b.duration > 0 {\n\t\t\t\t\t\ttimer = time.NewTimer(b.duration)\n\t\t\t\t\t\ttimerCh = timer.C\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tbatch = append(batch, p)\n\t\t\t\tif len(batch) >= b.size { // 0 means send immediately.\n\t\t\t\t\tatomic.AddUint64(&b.stats.SizeTotal, 1)\n\t\t\t\t\temit()\n\t\t\t\t\ttimerCh = nil\n\t\t\t\t}\n\n\t\t\tcase <-b.flush:\n\t\t\t\tif len(batch) > 0 {\n\t\t\t\t\temit()\n\t\t\t\t\ttimerCh = nil\n\t\t\t\t}\n\n\t\t\tcase <-timerCh:\n\t\t\t\tatomic.AddUint64(&b.stats.TimeoutTotal, 1)\n\t\t\t\temit()\n\t\t\t}\n\t\t}\n\t}()\n}\n\n// Stop stops the batching process. Stop waits for the batching routine\n// to stop before returning.\nfunc (b *PointBatcher) Stop() {\n\t// If not running, nothing to stop.\n\tif b.wg == nil {\n\t\treturn\n\t}\n\n\tclose(b.stop)\n\tb.wg.Wait()\n}\n\n// In returns the channel to which points should be written.\nfunc (b *PointBatcher) In() chan<- models.Point {\n\treturn b.in\n}\n\n// Out returns the channel from which batches should be read.\nfunc (b *PointBatcher) Out() <-chan []models.Point {\n\treturn b.out\n}\n\n// Flush instructs the batcher to emit any pending points in a batch, regardless of batch size.\n// If there are no pending points, no batch is emitted.\nfunc (b *PointBatcher) Flush() {\n\tb.flush <- struct{}{}\n}\n\n// Stats returns a PointBatcherStats object for the PointBatcher. While the each statistic should be\n// closely correlated with each other statistic, it is not guaranteed.\nfunc (b *PointBatcher) Stats() *PointBatcherStats {\n\tstats := PointBatcherStats{}\n\tstats.BatchTotal = atomic.LoadUint64(&b.stats.BatchTotal)\n\tstats.PointTotal = atomic.LoadUint64(&b.stats.PointTotal)\n\tstats.SizeTotal = atomic.LoadUint64(&b.stats.SizeTotal)\n\tstats.TimeoutTotal = atomic.LoadUint64(&b.stats.TimeoutTotal)\n\treturn &stats\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/batcher_test.go",
    "content": "package tsdb_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n)\n\n// TestBatch_Size ensures that a batcher generates a batch when the size threshold is reached.\nfunc TestBatch_Size(t *testing.T) {\n\tbatchSize := 5\n\tbatcher := tsdb.NewPointBatcher(batchSize, 0, time.Hour)\n\tif batcher == nil {\n\t\tt.Fatal(\"failed to create batcher for size test\")\n\t}\n\n\tbatcher.Start()\n\n\tvar p models.Point\n\tgo func() {\n\t\tfor i := 0; i < batchSize; i++ {\n\t\t\tbatcher.In() <- p\n\t\t}\n\t}()\n\tbatch := <-batcher.Out()\n\tif len(batch) != batchSize {\n\t\tt.Errorf(\"received batch has incorrect length exp %d, got %d\", batchSize, len(batch))\n\t}\n\tcheckPointBatcherStats(t, batcher, -1, batchSize, 1, 0)\n}\n\n// TestBatch_Size ensures that a buffered batcher generates a batch when the size threshold is reached.\nfunc TestBatch_SizeBuffered(t *testing.T) {\n\tbatchSize := 5\n\tbatcher := tsdb.NewPointBatcher(batchSize, 5, time.Hour)\n\tif batcher == nil {\n\t\tt.Fatal(\"failed to create batcher for size test\")\n\t}\n\n\tbatcher.Start()\n\n\tvar p models.Point\n\tgo func() {\n\t\tfor i := 0; i < batchSize; i++ {\n\t\t\tbatcher.In() <- p\n\t\t}\n\t}()\n\tbatch := <-batcher.Out()\n\tif len(batch) != batchSize {\n\t\tt.Errorf(\"received batch has incorrect length exp %d, got %d\", batchSize, len(batch))\n\t}\n\tcheckPointBatcherStats(t, batcher, -1, batchSize, 1, 0)\n}\n\n// TestBatch_Size ensures that a batcher generates a batch when the timeout triggers.\nfunc TestBatch_Timeout(t *testing.T) {\n\tbatchSize := 5\n\tbatcher := tsdb.NewPointBatcher(batchSize+1, 0, 100*time.Millisecond)\n\tif batcher == nil {\n\t\tt.Fatal(\"failed to create batcher for timeout test\")\n\t}\n\n\tbatcher.Start()\n\n\tvar p models.Point\n\tgo func() {\n\t\tfor i := 0; i < batchSize; i++ {\n\t\t\tbatcher.In() <- p\n\t\t}\n\t}()\n\tbatch := <-batcher.Out()\n\tif len(batch) != batchSize {\n\t\tt.Errorf(\"received batch has incorrect length exp %d, got %d\", batchSize, len(batch))\n\t}\n\tcheckPointBatcherStats(t, batcher, -1, batchSize, 0, 1)\n}\n\n// TestBatch_Flush ensures that a batcher generates a batch when flushed\nfunc TestBatch_Flush(t *testing.T) {\n\tbatchSize := 2\n\tbatcher := tsdb.NewPointBatcher(batchSize, 0, time.Hour)\n\tif batcher == nil {\n\t\tt.Fatal(\"failed to create batcher for flush test\")\n\t}\n\n\tbatcher.Start()\n\n\tvar p models.Point\n\tgo func() {\n\t\tbatcher.In() <- p\n\t\tbatcher.Flush()\n\t}()\n\tbatch := <-batcher.Out()\n\tif len(batch) != 1 {\n\t\tt.Errorf(\"received batch has incorrect length exp %d, got %d\", 1, len(batch))\n\t}\n\tcheckPointBatcherStats(t, batcher, -1, 1, 0, 0)\n}\n\n// TestBatch_MultipleBatches ensures that a batcher correctly processes multiple batches.\nfunc TestBatch_MultipleBatches(t *testing.T) {\n\tbatchSize := 2\n\tbatcher := tsdb.NewPointBatcher(batchSize, 0, 100*time.Millisecond)\n\tif batcher == nil {\n\t\tt.Fatal(\"failed to create batcher for size test\")\n\t}\n\n\tbatcher.Start()\n\n\tvar p models.Point\n\tvar b []models.Point\n\n\tbatcher.In() <- p\n\tbatcher.In() <- p\n\tb = <-batcher.Out() // Batch threshold reached.\n\tif len(b) != batchSize {\n\t\tt.Errorf(\"received batch (size) has incorrect length exp %d, got %d\", batchSize, len(b))\n\t}\n\n\tbatcher.In() <- p\n\tb = <-batcher.Out() // Timeout triggered.\n\tif len(b) != 1 {\n\t\tt.Errorf(\"received batch (timeout) has incorrect length exp %d, got %d\", 1, len(b))\n\t}\n\n\tcheckPointBatcherStats(t, batcher, -1, 3, 1, 1)\n}\n\nfunc checkPointBatcherStats(t *testing.T, b *tsdb.PointBatcher, batchTotal, pointTotal, sizeTotal, timeoutTotal int) {\n\tstats := b.Stats()\n\n\tif batchTotal != -1 && stats.BatchTotal != uint64(batchTotal) {\n\t\tt.Errorf(\"batch total stat is incorrect: %d\", stats.BatchTotal)\n\t}\n\tif pointTotal != -1 && stats.PointTotal != uint64(pointTotal) {\n\t\tt.Errorf(\"point total stat is incorrect: %d\", stats.PointTotal)\n\t}\n\tif sizeTotal != -1 && stats.SizeTotal != uint64(sizeTotal) {\n\t\tt.Errorf(\"size total stat is incorrect: %d\", stats.SizeTotal)\n\t}\n\tif timeoutTotal != -1 && stats.TimeoutTotal != uint64(timeoutTotal) {\n\t\tt.Errorf(\"timeout total stat is incorrect: %d\", stats.TimeoutTotal)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/config.go",
    "content": "package tsdb\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/monitor/diagnostics\"\n\t\"github.com/influxdata/influxdb/toml\"\n)\n\nconst (\n\t// DefaultEngine is the default engine for new shards\n\tDefaultEngine = \"tsm1\"\n\n\t// DefaultIndex is the default index for new shards\n\tDefaultIndex = \"inmem\"\n\n\t// tsdb/engine/wal configuration options\n\n\t// Default settings for TSM\n\n\t// DefaultCacheMaxMemorySize is the maximum size a shard's cache can\n\t// reach before it starts rejecting writes.\n\tDefaultCacheMaxMemorySize = 1024 * 1024 * 1024 // 1GB\n\n\t// DefaultCacheSnapshotMemorySize is the size at which the engine will\n\t// snapshot the cache and write it to a TSM file, freeing up memory\n\tDefaultCacheSnapshotMemorySize = 25 * 1024 * 1024 // 25MB\n\n\t// DefaultCacheSnapshotWriteColdDuration is the length of time at which\n\t// the engine will snapshot the cache and write it to a new TSM file if\n\t// the shard hasn't received writes or deletes\n\tDefaultCacheSnapshotWriteColdDuration = time.Duration(10 * time.Minute)\n\n\t// DefaultCompactFullWriteColdDuration is the duration at which the engine\n\t// will compact all TSM files in a shard if it hasn't received a write or delete\n\tDefaultCompactFullWriteColdDuration = time.Duration(4 * time.Hour)\n\n\t// DefaultMaxPointsPerBlock is the maximum number of points in an encoded\n\t// block in a TSM file\n\tDefaultMaxPointsPerBlock = 1000\n\n\t// DefaultMaxSeriesPerDatabase is the maximum number of series a node can hold per database.\n\t// This limit only applies to the \"inmem\" index.\n\tDefaultMaxSeriesPerDatabase = 1000000\n\n\t// DefaultMaxValuesPerTag is the maximum number of values a tag can have within a measurement.\n\tDefaultMaxValuesPerTag = 100000\n\n\t// DefaultMaxConcurrentCompactions is the maximum number of concurrent full and level compactions\n\t// that can run at one time.  A value of results in runtime.GOMAXPROCS(0) used at runtime.\n\tDefaultMaxConcurrentCompactions = 0\n)\n\n// Config holds the configuration for the tsbd package.\ntype Config struct {\n\tDir    string `toml:\"dir\"`\n\tEngine string `toml:\"-\"`\n\tIndex  string `toml:\"index-version\"`\n\n\t// General WAL configuration options\n\tWALDir string `toml:\"wal-dir\"`\n\n\t// WALFsyncDelay is the amount of time that a write will wait before fsyncing.  A duration\n\t// greater than 0 can be used to batch up multiple fsync calls.  This is useful for slower\n\t// disks or when WAL write contention is seen.  A value of 0 fsyncs every write to the WAL.\n\tWALFsyncDelay toml.Duration `toml:\"wal-fsync-delay\"`\n\n\t// Query logging\n\tQueryLogEnabled bool `toml:\"query-log-enabled\"`\n\n\t// Compaction options for tsm1 (descriptions above with defaults)\n\tCacheMaxMemorySize             uint64        `toml:\"cache-max-memory-size\"`\n\tCacheSnapshotMemorySize        uint64        `toml:\"cache-snapshot-memory-size\"`\n\tCacheSnapshotWriteColdDuration toml.Duration `toml:\"cache-snapshot-write-cold-duration\"`\n\tCompactFullWriteColdDuration   toml.Duration `toml:\"compact-full-write-cold-duration\"`\n\n\t// Limits\n\n\t// MaxSeriesPerDatabase is the maximum number of series a node can hold per database.\n\t// When this limit is exceeded, writes return a 'max series per database exceeded' error.\n\t// A value of 0 disables the limit. This limit only applies when using the \"inmem\" index.\n\tMaxSeriesPerDatabase int `toml:\"max-series-per-database\"`\n\n\t// MaxValuesPerTag is the maximum number of tag values a single tag key can have within\n\t// a measurement.  When the limit is execeeded, writes return an error.\n\t// A value of 0 disables the limit.\n\tMaxValuesPerTag int `toml:\"max-values-per-tag\"`\n\n\t// MaxConcurrentCompactions is the maximum number of concurrent level and full compactions\n\t// that can be running at one time across all shards.  Compactions scheduled to run when the\n\t// limit is reached are blocked until a running compaction completes.  Snapshot compactions are\n\t// not affected by this limit.  A value of 0 limits compactions to runtime.GOMAXPROCS(0).\n\tMaxConcurrentCompactions int `toml:\"max-concurrent-compactions\"`\n\n\tTraceLoggingEnabled bool `toml:\"trace-logging-enabled\"`\n}\n\n// NewConfig returns the default configuration for tsdb.\nfunc NewConfig() Config {\n\treturn Config{\n\t\tEngine: DefaultEngine,\n\t\tIndex:  DefaultIndex,\n\n\t\tQueryLogEnabled: true,\n\n\t\tCacheMaxMemorySize:             DefaultCacheMaxMemorySize,\n\t\tCacheSnapshotMemorySize:        DefaultCacheSnapshotMemorySize,\n\t\tCacheSnapshotWriteColdDuration: toml.Duration(DefaultCacheSnapshotWriteColdDuration),\n\t\tCompactFullWriteColdDuration:   toml.Duration(DefaultCompactFullWriteColdDuration),\n\n\t\tMaxSeriesPerDatabase:     DefaultMaxSeriesPerDatabase,\n\t\tMaxValuesPerTag:          DefaultMaxValuesPerTag,\n\t\tMaxConcurrentCompactions: DefaultMaxConcurrentCompactions,\n\n\t\tTraceLoggingEnabled: false,\n\t}\n}\n\n// Validate validates the configuration hold by c.\nfunc (c *Config) Validate() error {\n\tif c.Dir == \"\" {\n\t\treturn errors.New(\"Data.Dir must be specified\")\n\t} else if c.WALDir == \"\" {\n\t\treturn errors.New(\"Data.WALDir must be specified\")\n\t}\n\n\tif c.MaxConcurrentCompactions < 0 {\n\t\treturn errors.New(\"max-concurrent-compactions must be greater than 0\")\n\t}\n\n\tvalid := false\n\tfor _, e := range RegisteredEngines() {\n\t\tif e == c.Engine {\n\t\t\tvalid = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !valid {\n\t\treturn fmt.Errorf(\"unrecognized engine %s\", c.Engine)\n\t}\n\n\tvalid = false\n\tfor _, e := range RegisteredIndexes() {\n\t\tif e == c.Index {\n\t\t\tvalid = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !valid {\n\t\treturn fmt.Errorf(\"unrecognized index %s\", c.Index)\n\t}\n\n\treturn nil\n}\n\n// Diagnostics returns a diagnostics representation of a subset of the Config.\nfunc (c Config) Diagnostics() (*diagnostics.Diagnostics, error) {\n\treturn diagnostics.RowFromMap(map[string]interface{}{\n\t\t\"dir\":                                c.Dir,\n\t\t\"wal-dir\":                            c.WALDir,\n\t\t\"wal-fsync-delay\":                    c.WALFsyncDelay,\n\t\t\"cache-max-memory-size\":              c.CacheMaxMemorySize,\n\t\t\"cache-snapshot-memory-size\":         c.CacheSnapshotMemorySize,\n\t\t\"cache-snapshot-write-cold-duration\": c.CacheSnapshotWriteColdDuration,\n\t\t\"compact-full-write-cold-duration\":   c.CompactFullWriteColdDuration,\n\t\t\"max-series-per-database\":            c.MaxSeriesPerDatabase,\n\t\t\"max-values-per-tag\":                 c.MaxValuesPerTag,\n\t\t\"max-concurrent-compactions\":         c.MaxConcurrentCompactions,\n\t}), nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/config_test.go",
    "content": "package tsdb_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n)\n\nfunc TestConfig_Parse(t *testing.T) {\n\t// Parse configuration.\n\tc := tsdb.NewConfig()\n\tif _, err := toml.Decode(`\ndir = \"/var/lib/influxdb/data\"\nwal-dir = \"/var/lib/influxdb/wal\"\nwal-fsync-delay = \"10s\"\n`, &c); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := c.Validate(); err != nil {\n\t\tt.Errorf(\"unexpected validate error: %s\", err)\n\t}\n\n\tif got, exp := c.Dir, \"/var/lib/influxdb/data\"; got != exp {\n\t\tt.Errorf(\"unexpected dir:\\n\\nexp=%v\\n\\ngot=%v\\n\\n\", exp, got)\n\t}\n\tif got, exp := c.WALDir, \"/var/lib/influxdb/wal\"; got != exp {\n\t\tt.Errorf(\"unexpected wal-dir:\\n\\nexp=%v\\n\\ngot=%v\\n\\n\", exp, got)\n\t}\n\tif got, exp := c.WALFsyncDelay, time.Duration(10*time.Second); time.Duration(got).Nanoseconds() != exp.Nanoseconds() {\n\t\tt.Errorf(\"unexpected wal-fsync-delay:\\n\\nexp=%v\\n\\ngot=%v\\n\\n\", exp, got)\n\t}\n\n}\n\nfunc TestConfig_Validate_Error(t *testing.T) {\n\tc := tsdb.NewConfig()\n\tif err := c.Validate(); err == nil || err.Error() != \"Data.Dir must be specified\" {\n\t\tt.Errorf(\"unexpected error: %s\", err)\n\t}\n\n\tc.Dir = \"/var/lib/influxdb/data\"\n\tif err := c.Validate(); err == nil || err.Error() != \"Data.WALDir must be specified\" {\n\t\tt.Errorf(\"unexpected error: %s\", err)\n\t}\n\n\tc.WALDir = \"/var/lib/influxdb/wal\"\n\tc.Engine = \"fake1\"\n\tif err := c.Validate(); err == nil || err.Error() != \"unrecognized engine fake1\" {\n\t\tt.Errorf(\"unexpected error: %s\", err)\n\t}\n\n\tc.Engine = \"tsm1\"\n\tc.Index = \"foo\"\n\tif err := c.Validate(); err == nil || err.Error() != \"unrecognized index foo\" {\n\t\tt.Errorf(\"unexpected error: %s\", err)\n\t}\n\n\tc.Index = \"inmem\"\n\tif err := c.Validate(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tc.Index = \"tsi1\"\n\tif err := c.Validate(); err != nil {\n\t\tt.Error(err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/cursor.go",
    "content": "package tsdb\n\nimport \"github.com/influxdata/influxdb/influxql\"\n\n// EOF represents a \"not found\" key returned by a Cursor.\nconst EOF = influxql.ZeroTime\n\n// Cursor represents an iterator over a series.\ntype Cursor interface {\n\tSeekTo(seek int64) (key int64, value interface{})\n\tNext() (key int64, value interface{})\n\tAscending() bool\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/doc.go",
    "content": "/*\nPackage tsdb implements a durable time series database.\n\n*/\npackage tsdb\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/engine.go",
    "content": "// Package engine can be imported to initialize and register all available TSDB engines.\n//\n// Alternatively, you can import any individual subpackage underneath engine.\npackage engine // import \"github.com/influxdata/influxdb/tsdb/engine\"\n\nimport (\n\t// Initialize and register tsm1 engine\n\t_ \"github.com/influxdata/influxdb/tsdb/engine/tsm1\"\n)\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/DESIGN.md",
    "content": "# File Structure\n\nA TSM file is composed for four sections: header, blocks, index and the footer.\n\n```\n┌────────┬────────────────────────────────────┬─────────────┬──────────────┐\n│ Header │               Blocks               │    Index    │    Footer    │\n│5 bytes │              N bytes               │   N bytes   │   4 bytes    │\n└────────┴────────────────────────────────────┴─────────────┴──────────────┘\n```\nHeader is composed of a magic number to identify the file type and a version number.\n\n```\n┌───────────────────┐\n│      Header       │\n├─────────┬─────────┤\n│  Magic  │ Version │\n│ 4 bytes │ 1 byte  │\n└─────────┴─────────┘\n```\n\nBlocks are sequences of block CRC32 and data.  The block data is opaque to the file.  The CRC32 is used for recovery to ensure blocks have not been corrupted due to bugs outside of our control.  The length of the blocks is stored in the index.\n\n```\n┌───────────────────────────────────────────────────────────┐\n│                          Blocks                           │\n├───────────────────┬───────────────────┬───────────────────┤\n│      Block 1      │      Block 2      │      Block N      │\n├─────────┬─────────┼─────────┬─────────┼─────────┬─────────┤\n│  CRC    │  Data   │  CRC    │  Data   │  CRC    │  Data   │\n│ 4 bytes │ N bytes │ 4 bytes │ N bytes │ 4 bytes │ N bytes │\n└─────────┴─────────┴─────────┴─────────┴─────────┴─────────┘\n```\n\nFollowing the blocks is the index for the blocks in the file.  The index is composed of a sequence of index entries ordered lexicographically by key and then by time.  Each index entry starts with a key length and key followed by a count of the number of blocks in the file.  Each block entry is composed of the min and max time for the block, the offset into the file where the block is located and the size of the block.\n\nThe index structure can provide efficient access to all blocks as well as the ability to determine the cost associated with accessing a given key.  Given a key and timestamp, we know exactly which file contains the block for that timestamp as well as where that block resides and how much data to read to retrieve the block.  If we know we need to read all or multiple blocks in a file, we can use the size to determine how much to read in a given IO.\n\n_TBD: The block length stored in the block data could probably be dropped since we store it in the index._\n\n```\n┌────────────────────────────────────────────────────────────────────────────┐\n│                                   Index                                    │\n├─────────┬─────────┬──────┬───────┬─────────┬─────────┬────────┬────────┬───┤\n│ Key Len │   Key   │ Type │ Count │Min Time │Max Time │ Offset │  Size  │...│\n│ 2 bytes │ N bytes │1 byte│2 bytes│ 8 bytes │ 8 bytes │8 bytes │4 bytes │   │\n└─────────┴─────────┴──────┴───────┴─────────┴─────────┴────────┴────────┴───┘\n```\n\nThe last section is the footer that stores the offset of the start of the index.\n\n```\n┌─────────┐\n│ Footer  │\n├─────────┤\n│Index Ofs│\n│ 8 bytes │\n└─────────┘\n```\n\n# File System Layout\n\nThe file system is organized a directory per shard where each shard is an integer number. Associated with each shard directory, there is a set of other directories and files:\n\n* a wal directory - contains a set numerically increasing files WAL segment files named #####.wal.  The wal directory is separate from the directory containing the TSM files so that different types can be used if necessary.\n* .tsm files - a set of numerically increasing TSM files containing compressed series data.\n* .tombstone files - files named after the corresponding TSM file as #####.tombstone.  These contain measurement and series keys that have been deleted.  These files are removed during compactions.\n\n# Data Flow\n\nWrites are appended to the current WAL segment and are also added to the Cache.  Each WAL segment is size bounded and rolls-over to a new file after it fills up.  The cache is also size bounded; snapshots are taken and WAL compactions are initiated when the cache becomes too full. If the inbound write rate exceeds the WAL compaction rate for a sustained period, the cache may become too full in which case new writes will fail until the compaction process catches up. The WAL and Cache are separate entities and do not interact with each other.  The Engine coordinates the writes to both.\n\nWhen WAL segments fill up and have been closed, the Compactor reads the WAL entries and combines them with one or more existing TSM files.  This process runs continuously until all WAL files are compacted and there is a minimum number of TSM files.  As each TSM file is completed, it is loaded and referenced by the FileStore.\n\nQueries are executed by constructing Cursors for keys.  The Cursors iterate over slices of Values.  When the current Values are exhausted, a Cursor requests the next set of Values from the Engine.  The Engine returns a slice of Values by querying the FileStore and Cache.  The Values in the Cache are overlaid on top of the values returned from the FileStore.  The FileStore reads and decodes blocks of Values according to the index for the file.\n\nUpdates (writing a newer value for a point that already exists) occur as normal writes.  Since cached values overwrite existing values, newer writes take precedence.\n\nDeletes occur by writing a delete entry for the measurement or series to the WAL and then updating the Cache and FileStore.  The Cache evicts all relevant entries.  The FileStore writes a tombstone file for each TSM file that contains relevant data.  These tombstone files are used at startup time to ignore blocks as well as during compactions to remove deleted entries.\n\n# Compactions\n\nCompactions are a serial and continuously running process that iteratively optimizes the storage for queries.  Specifically, it does the following:\n\n* Converts closed WAL files into TSM files and removes the closed WAL files\n* Combines smaller TSM files into larger ones to improve compression ratios\n* Rewrites existing files that contain series data that has been deleted\n* Rewrites existing files that contain writes with more recent data to ensure a point exists in only one TSM file.\n\nThe compaction algorithm is continuously running and always selects files to compact based on a priority.\n\n1. If there are closed WAL files, the 5 oldest WAL segments are added to the set of compaction files.\n2. If any TSM files contain points with older timestamps that also exist in the WAL files, those TSM files are added to the compaction set.\n3. If any TSM files have a tombstone marker, those TSM files are added to the compaction set.\n\nThe compaction algorithm generates a set of SeriesIterators that return a sequence of `key`, `Values` where each `key` returned is lexicographically greater than the previous one.  The iterators are ordered such that WAL iterators will override any values returned by the TSM file iterators.  WAL iterators read and cache the WAL segment so that deletes later in the log can be processed correctly.  TSM file iterators use the tombstone files to ensure that deleted series are not returned during iteration.  As each key is processed, the Values slice is grown, sorted, and then written to a new block in the new TSM file.  The blocks can be split based on number of points or size of the block.  If the total size of the current TSM file would exceed the maximum file size, a new file is created.\n\nDeletions can occur while a new file is being written.  Since the new TSM file is not complete a tombstone would not be written for it. This could result in deleted values getting written into a new file.  To prevent this, if a compaction is running and a delete occurs, the current compaction is aborted and new compaction is started.\n\nWhen all WAL files in the current compaction have been processed and the new TSM files have been successfully written, the new TSM files are renamed to their final names, the WAL segments are truncated and the associated snapshots are released from the cache.\n\nThe compaction process then runs again until there are no more WAL files and the minimum number of TSM files exist that are also under the maximum file size.\n\n# WAL\n\nCurrently, there is a WAL per shard.  This means all the writes in a WAL segment are for the given shard.  It also means that writes across a lot of shards append to many files which might result in more disk IO due to seeking to the end of multiple files.\n\nTwo options are being considered:\n\n## WAL per Shard\n\nThis is the current behavior of the WAL.  This option is conceptually easier to reason about.  For example, compactions that read in multiple WAL segments are assured that all the WAL entries pertain to the current shard.  If it completes a compaction, it is safe to remove the WAL segment.  It is also easier to deal with shard deletions as all the WAL segments can be dropped along with the other shard files.\n\nThe drawback of this option is the potential for turning sequential write IO into random IO in the presence of multiple shards and writes to many different shards.\n\n## Single WAL\n\nUsing a single WAL adds some complexity to compactions and deletions.  Compactions will need to either sort all the WAL entries in a segment by shard first and then run compactions on each shard or the compactor needs to be able to compact multiple shards concurrently while ensuring points in existing TSM files in different shards remain separate.\n\nDeletions would not be able to reclaim WAL segments immediately as in the case where there is a WAL per shard.  Similarly, a compaction of a WAL segment that contains writes for a deleted shard would need to be dropped.\n\nCurrently, we are moving towards a Single WAL implementation.\n\n# Cache\n\nThe purpose of the cache is so that data in the WAL is queryable. Every time a point is written to a WAL segment, it is also written to an in-memory cache. The cache is split into two parts: a \"hot\" part, representing the most recent writes and a \"cold\" part containing snapshots for which an active WAL compaction\nprocess is underway.\n\nQueries are satisfied with values read from the cache and finalized TSM files. Points in the cache always take precedence over points in TSM files with the same timestamp. Queries are never read directly from WAL segment files which are designed to optimize write rather than read performance.\n\nThe cache tracks its size on a \"point-calculated\" basis. \"point-calculated\" means that the RAM storage footprint for a point is the determined by calling its `Size()` method. While this does not correspond directly to the actual RAM footprint in the cache, the two values are sufficiently well correlated for the purpose of controlling RAM usage.\n\nIf the cache becomes too full, or the cache has been idle for too long, a snapshot of the cache is taken and a compaction process is initiated for the related WAL segments. When the compaction of these segments is complete, the related snapshots are released from the cache.\n\nIn cases where IO performance of the compaction process falls behind the incoming write rate, it is possible that writes might arrive at the cache while the cache is both too full and the compaction of the previous snapshot is still in progress. In this case, the cache will reject the write, causing the write to fail.\nWell behaved clients should interpret write failures as back pressure and should either discard the write or back off and retry the write after a delay.\n\n# TSM File Index\n\nEach TSM file contains a full index of the blocks contained within the file.  The existing index structure is designed to allow for a binary search across the index to find the starting block for a key.  We would then seek to that start key and sequentially scan each block to find the location of a timestamp.\n\nSome issues with the existing structure is that seeking to a given timestamp for a key has a unknown cost.  This can cause variability in read performance that would very difficult to fix.  Another issue is that startup times for loading a TSM file would grow in proportion to number and size of TSM files on disk since we would need to scan the entire file to find all keys contained in the file.  This could be addressed by using a separate index like file or changing the index structure.\n\nWe've chosen to update the block index structure to ensure a TSM file is fully self-contained, supports consistent IO characteristics for sequential and random accesses as well as provides an efficient load time regardless of file size.  The implications of these changes are that the index is slightly larger and we need to be able to search the index despite each entry being variably sized.\n\nThe following are some alternative design options to handle the cases where the index is too large to fit in memory.  We are currently planning to use an indirect MMAP indexing approach for loaded TSM files.\n\n### Indirect MMAP Indexing\n\nOne option is to MMAP the index into memory and record the pointers to the start of each index entry in a slice.  When searching for a given key, the pointers are used to perform a binary search on the underlying mmap data.  When the matching key is found, the block entries can be loaded and search or a subsequent binary search on the blocks can be performed.\n\nA variation of this can also be done without MMAPs by seeking and reading in the file.  The underlying file cache will still be utilized in this approach as well.\n\nAs an example, if we have an index structure in memory such as:\n\n ```\n┌────────────────────────────────────────────────────────────────────┐\n│                               Index                                │\n├─┬──────────────────────┬──┬───────────────────────┬───┬────────────┘\n│0│                      │62│                       │145│\n├─┴───────┬─────────┬────┼──┴──────┬─────────┬──────┼───┴─────┬──────┐\n│Key 1 Len│   Key   │... │Key 2 Len│  Key 2  │ ...  │  Key 3  │ ...  │\n│ 2 bytes │ N bytes │    │ 2 bytes │ N bytes │      │ 2 bytes │      │\n└─────────┴─────────┴────┴─────────┴─────────┴──────┴─────────┴──────┘\n```\n\nWe would build an `offsets` slices where each element pointers to the byte location for the first key in then index slice.\n\n```\n┌────────────────────────────────────────────────────────────────────┐\n│                              Offsets                               │\n├────┬────┬────┬─────────────────────────────────────────────────────┘\n│ 0  │ 62 │145 │\n└────┴────┴────┘\n ```\n\n\nUsing this offset slice we can find `Key 2` by doing a binary search over the offsets slice.  Instead of comparing the value in the offsets (e.g. `62`), we use that as an index into the underlying index to retrieve the key at position `62` and perform our comparisons with that.\n\nWhen we have identified the correct position in the index for a given key, we could perform another binary search or a linear scan.  This should be fast as well since each index entry is 28 bytes and all contiguous in memory.\n\nThe size of the offsets slice would be proportional to the number of unique series.  If we we limit file sizes to 4GB, we would use 4 bytes for each pointer.\n\n### LRU/Lazy Load\n\nA second option could be to have the index work as a memory bounded, lazy-load style cache.  When a cache miss occurs, the index structure is scanned to find the key and the entries are load and added to the cache which causes the least-recently used entries to be evicted.\n\n### Key Compression\n\nAnother option is compress keys using a key specific dictionary encoding.   For example,\n\n```\ncpu,host=server1 value=1\ncpu,host=server2 value=2\nmemory,host=server1 value=3\n```\n\nCould be compressed by expanding the key into its respective parts: measurement, tag keys, tag values and tag fields .  For each part a unique number is assigned.  e.g.\n\nMeasurements\n```\ncpu = 1\nmemory = 2\n```\n\nTag Keys\n```\nhost = 1\n```\n\nTag Values\n```\nserver1 = 1\nserver2 = 2\n```\n\nFields\n```\nvalue = 1\n```\n\nUsing this encoding dictionary, the string keys could be converted to a sequence of integers:\n\n```\ncpu,host=server1 value=1 -->    1,1,1,1\ncpu,host=server2 value=2 -->    1,1,2,1\nmemory,host=server1 value=3 --> 3,1,2,1\n```\n\nThese sequences of small integers list can then be compressed further using a bit packed format such as Simple9 or Simple8b.  The resulting byte slices would be a multiple of 4 or 8 bytes (using Simple9/Simple8b respectively) which could used as the (string).\n\n### Separate Index\n\nAnother option might be to have a separate index file (BoltDB) that serves as the storage for the `FileIndex` and is transient.   This index would be recreated at startup and updated at compaction time.\n\n# Components\n\nThese are some of the high-level components and their responsibilities.  These are ideas preliminary.\n\n## WAL\n\n* Append-only log composed of fixed size segment files.\n* Writes are appended to the current segment\n* Roll-over to new segment after filling the current segment\n* Closed segments are never modified and used for startup and recovery as well as compactions.\n* There is a single WAL for the store as opposed to a WAL per shard.\n\n## Compactor\n\n* Continuously running, iterative file storage optimizer\n* Takes closed WAL files, existing TSM files and combines into one or more new TSM files\n\n## Cache\n\n* Hold recently written series data\n* Has max size and a flushing limit\n* When the flushing limit is crossed, a snapshot is taken and a compaction process for the related WAL segments is commenced.\n* If a write comes in, the cache is too full, and the previous snapshot is still being compacted, the write will fail.\n\n# Engine\n\n* Maintains references to Cache, FileStore, WAL, etc..\n* Creates a cursor\n* Receives writes, coordinates queries\n* Hides underlying files and types from clients\n\n## Cursor\n\n* Iterates forward or reverse for given key\n* Requests values from Engine for key and timestamp\n* Has no knowledge of TSM files or WAL - delegates to Engine to request next set of Values\n\n## FileStore\n\n* Manages TSM files\n* Maintains the file indexes and references to active files\n* A TSM file that is opened entails reading in and adding the index section to the `FileIndex`.  The block data is then MMAPed up to the index offset to avoid having the index in memory twice.\n\n## FileIndex\n* Provides location information to a file and block for a given key and timestamp.\n\n## Interfaces\n\n```\nSeriesIterator returns the key and []Value such that a key is only returned\nonce and subsequent calls to Next() do not return the same key twice.\ntype SeriesIterator interface {\n   func Next() (key, []Value, error)\n}\n```\n\n## Types\n\n_NOTE: the actual func names are to illustrate the type of functionality the type is responsible._\n\n```\nTSMWriter writes a sets of key and Values to a TSM file.\ntype TSMWriter struct {}\nfunc (t *TSMWriter) Write(key string, values []Value) error {}\nfunc (t *TSMWriter) Close() error\n```\n\n\n```\n// WALIterator returns the key and []Values for a set of WAL segment files.\ntype WALIterator struct{\n    Files *os.File\n}\nfunc (r *WALReader) Next() (key, []Value, error)\n```\n\n\n```\nTSMIterator returns the key and values from a TSM file.\ntype TSMIterator struct {}\nfunc (r *TSMIterator) Next() (key, []Value, error)\n```\n\n```\ntype Compactor struct {}\nfunc (c *Compactor) Compact(iters ...SeriesIterators) error\n```\n\n```\ntype Engine struct {\n    wal *WAL\n    cache *Cache\n    fileStore *FileStore\n    compactor *Compactor\n}\n\nfunc (e *Engine) ValuesBefore(key string, timestamp time.Time) ([]Value, error)\nfunc (e *Engine) ValuesAfter(key string, timestamp time.Time) ([]Value, error)\n```\n\n```\ntype Cursor struct{\n    engine *Engine\n}\n...\n```\n\n```\n// FileStore maintains references\ntype FileStore struct {}\nfunc (f *FileStore) ValuesBefore(key string, timestamp time.Time) ([]Value, error)\nfunc (f *FileStore) ValuesAfter(key string, timestamp time.Time) ([]Value, error)\n\n```\n\n```\ntype FileIndex struct {}\n\n// Returns a file and offset for a block located in the return file that contains the requested key and timestamp.\nfunc (f *FileIndex) Location(key, timestamp) (*os.File, uint64, error)\n```\n\n```\ntype Cache struct {}\nfunc (c *Cache) Write(key string, values []Value, checkpoint uint64) error\nfunc (c *Cache) SetCheckpoint(checkpoint uint64) error\nfunc (c *Cache) Cursor(key string) tsdb.Cursor\n```\n\n```\ntype WAL struct {}\nfunc (w *WAL) Write(key string, values []Value)\nfunc (w *WAL) ClosedSegments() ([]*os.File, error)\n```\n\n\n# Concerns\n\n## Performance\n\nThere are three categories of performance this design is concerned with:\n\n* Write Throughput/Latency\n* Query Throughput/Latency\n* Startup time\n* Compaction Throughput/Latency\n* Memory Usage\n\n### Writes\n\nWrite throughput is bounded by the time to process the write on the CPU (parsing, sorting, etc..), adding and evicting to the Cache and appending the write to the WAL.  The first two items are CPU bound and can be tuned and optimized if they become a bottleneck.  The WAL write can be tuned such that in the worst case every write requires at least 2 IOPS (write + fsync) or batched so that multiple writes are queued and fsync'd in sizes matching one or more disk blocks.  Performing more work with each IO will improve throughput\n\nWrite latency is minimal for the WAL write since there are no seeks.  The latency is bounded by the time to complete any write and fsync calls.\n\n### Queries\n\nQuery throughput is directly related to how many blocks can be read in a period of time.  The index structure contains enough information to determine if one or multiple blocks can be read in a single IO.\n\nQuery latency is determine by how long it takes to find and read the relevant blocks.  The in-memory index structure contains the offsets and sizes of all blocks for a key.  This allows every block to be read in 2 IOPS (seek + read) regardless of position, structure or size of file.\n\n### Startup\n\nStartup time is proportional to the number of WAL files, TSM files and tombstone files.  WAL files can be read and process in large batches using the WALIterators.  TSM files require reading the index block into memory (5 IOPS/file).  Tombstone files are expected to be small and infrequent and would require approximately 2 IOPS/file.\n\n### Compactions\n\nCompactions are IO intensive in that they may need to read multiple, large TSM files to rewrite them.  The throughput of a compactions (MB/s) as well as the latency for each compaction is important to keep consistent even as data sizes grow.\n\nTo address these concerns, compactions prioritize old WAL files over optimizing storage/compression to avoid data being hidden during overload situations.  This also accounts for the fact that shards will eventually become cold for writes so that existing data will be able to be optimized.  To maintain consistent performance, the number of each type of file processed as well as the size of each file processed is bounded.\n\n### Memory Footprint\n\nThe memory footprint should not grow unbounded due to additional files or series keys of large sizes or numbers.  Some options for addressing this concern is covered in the [Design Options] section.\n\n## Concurrency\n\nThe main concern with concurrency is that reads and writes should not block each other.  Writes add entries to the Cache and append entries to the WAL.  During queries, the contention points will be the Cache and existing TSM files.  Since the Cache and TSM file data is only accessed through the engine by the cursors, several strategies can be used to improve concurrency.\n\n1. cached series data is returned to cursors as a copy.  Since cache snapshots are released following compaction, cursor iteration and writes to the same series could block each other.  Iterating over copies of the values can relieve some of this contention.\n2. TSM data values returned by the engine are new references to Values and not access to the actual TSM files.  This means that the `Engine`, through the `FileStore` can limit contention.\n3. Compactions are the only place where new TSM files are added and removed.  Since this is a serial, continuously running process, file contention is minimized.\n\n## Robustness\n\nThe two robustness concerns considered by this design are writes filling the cache and crash recovery.\n\n### Cache Exhaustion\n\nThe cache is used to hold the contents of uncompacted WAL segments in memory until such time that the compaction process has had a chance to convert the write-optimised WAL segments into read-optimised TSM files.\n\nThe question arises about what to do in the case that the inbound write rate temporarily exceeds the compaction rate. There are four alternatives:\n\n* block the write until the compaction process catches up\n* cache the write and hope that the compaction process catches up before memory exhaustion occurs\n* evict older cache entries to make room for new writes\n* fail the write and propagate the error back to the database client as a form of back pressure\n\nThe current design chooses the last option - failing the writes. While this option reduces the apparent robustness of the database API from the perspective of the clients, it does provide a means by which the database can communicate, via back pressure, the need for clients to temporarily backoff. Well behaved clients should respond to write errors either by discarding the write or by retrying the write after a delay in the hope that the compaction process will eventually catch up. The problem with the first two options is that they may exhaust server resources. The problem with the third option is that queries (which don't touch WAL segments) might silently return incomplete results during compaction periods; with the selected option the possibility of incomplete queries is at least flagged by the presence of write errors during periods of degraded compaction performance.\n\n### Crash Recovery\n\nCrash recovery is facilitated with the following two properties: the append-only nature of WAL segments and the write-once nature of TSM files. If the server crashes incomplete compactions are discarded and the cache is rebuilt from the discovered WAL segments. Compactions will then resume in the normal way. Similarly, TSM files are immutable once they have been created and registered with the file store. A compaction may replace an existing TSM file, but the replaced file is not removed from the file system until replacement file has been created and synced to disk.\n\n#Errata\n\nThis section is reserved for errata. In cases where the document is incorrect or inconsistent, such errata will be noted here with the contents of this section taking precedence over text elsewhere in the document in the case of discrepancies. Future full revisions of this document will fold the errata text back into the body of the document.\n\n#Revisions\n\n##14 February, 2016\n\n* refined description of cache behaviour and robustness to reflect current design based on snapshots. Most references to checkpoints and evictions have been removed. See discussion here - https://goo.gl/L7AzVu\n\n##11 November, 2015\n\n* initial design published"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/MANIFEST",
    "content": "{\n  \"files\": [\n    \"00000001.tsl\"\n  ]\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/bit_reader.go",
    "content": "package tsm1\n\nimport \"io\"\n\n// BitReader reads bits from an io.Reader.\ntype BitReader struct {\n\tdata []byte\n\n\tbuf struct {\n\t\tv uint64 // bit buffer\n\t\tn uint   // available bits\n\t}\n}\n\n// NewBitReader returns a new instance of BitReader that reads from data.\nfunc NewBitReader(data []byte) *BitReader {\n\tb := new(BitReader)\n\tb.Reset(data)\n\treturn b\n}\n\n// Reset sets the underlying reader on b and reinitializes.\nfunc (r *BitReader) Reset(data []byte) {\n\tr.data = data\n\tr.buf.v, r.buf.n = 0, 0\n\tr.readBuf()\n}\n\n// CanReadBitFast returns true if calling ReadBitFast() is allowed.\n// Fast bit reads are allowed when at least 2 values are in the buffer.\n// This is because it is not required to refilled the buffer and the caller\n// can inline the calls.\nfunc (r *BitReader) CanReadBitFast() bool { return r.buf.n > 1 }\n\n// ReadBitFast is an optimized bit read.\n// IMPORTANT: Only allowed if CanReadFastBit() is true!\nfunc (r *BitReader) ReadBitFast() bool {\n\tv := (r.buf.v&(1<<63) != 0)\n\tr.buf.v <<= 1\n\tr.buf.n -= 1\n\treturn v\n}\n\n// ReadBit returns the next bit from the underlying data.\nfunc (r *BitReader) ReadBit() (bool, error) {\n\tv, err := r.ReadBits(1)\n\treturn v != 0, err\n}\n\n// ReadBits reads nbits from the underlying data into a uint64.\n// nbits must be from 1 to 64, inclusive.\nfunc (r *BitReader) ReadBits(nbits uint) (uint64, error) {\n\t// Return EOF if there is no more data.\n\tif r.buf.n == 0 {\n\t\treturn 0, io.EOF\n\t}\n\n\t// Return bits from buffer if less than available bits.\n\tif nbits <= r.buf.n {\n\t\t// Return all bits, if requested.\n\t\tif nbits == 64 {\n\t\t\tv := r.buf.v\n\t\t\tr.buf.v, r.buf.n = 0, 0\n\t\t\tr.readBuf()\n\t\t\treturn v, nil\n\t\t}\n\n\t\t// Otherwise mask returned bits.\n\t\tv := (r.buf.v >> (64 - nbits))\n\t\tr.buf.v <<= nbits\n\t\tr.buf.n -= nbits\n\n\t\tif r.buf.n == 0 {\n\t\t\tr.readBuf()\n\t\t}\n\t\treturn v, nil\n\t}\n\n\t// Otherwise read all available bits in current buffer.\n\tv, n := r.buf.v, r.buf.n\n\n\t// Read new buffer.\n\tr.buf.v, r.buf.n = 0, 0\n\tr.readBuf()\n\n\t// Append new buffer to previous buffer and shift to remove unnecessary bits.\n\tv |= (r.buf.v >> n)\n\tv >>= 64 - nbits\n\n\t// Remove used bits from new buffer.\n\tbufN := nbits - n\n\tif bufN > r.buf.n {\n\t\tbufN = r.buf.n\n\t}\n\tr.buf.v <<= bufN\n\tr.buf.n -= bufN\n\n\tif r.buf.n == 0 {\n\t\tr.readBuf()\n\t}\n\n\treturn v, nil\n}\n\nfunc (r *BitReader) readBuf() {\n\t// Determine number of bytes to read to fill buffer.\n\tbyteN := 8 - (r.buf.n / 8)\n\n\t// Limit to the length of our data.\n\tif n := uint(len(r.data)); byteN > n {\n\t\tbyteN = n\n\t}\n\n\t// Optimized 8-byte read.\n\tif byteN == 8 {\n\t\tr.buf.v = uint64(r.data[7]) | uint64(r.data[6])<<8 |\n\t\t\tuint64(r.data[5])<<16 | uint64(r.data[4])<<24 |\n\t\t\tuint64(r.data[3])<<32 | uint64(r.data[2])<<40 |\n\t\t\tuint64(r.data[1])<<48 | uint64(r.data[0])<<56\n\t\tr.buf.n = 64\n\t\tr.data = r.data[8:]\n\t\treturn\n\t}\n\n\t// Otherwise append bytes to buffer.\n\tfor i := uint(0); i < byteN; i++ {\n\t\tr.buf.n += 8\n\t\tr.buf.v |= uint64(r.data[i]) << (64 - r.buf.n)\n\t}\n\n\t// Move data forward.\n\tr.data = r.data[byteN:]\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/bit_reader_test.go",
    "content": "package tsm1_test\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"math\"\n\t\"math/rand\"\n\t\"reflect\"\n\t\"testing\"\n\t\"testing/quick\"\n\n\t\"github.com/dgryski/go-bitstream\"\n\t\"github.com/influxdata/influxdb/tsdb/engine/tsm1\"\n)\n\nfunc TestBitStreamEOF(t *testing.T) {\n\tbr := tsm1.NewBitReader([]byte(\"0\"))\n\n\tb, err := br.ReadBits(8)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif b != '0' {\n\t\tt.Error(\"ReadBits(8) didn't return first byte\")\n\t}\n\n\tif _, err := br.ReadBits(8); err != io.EOF {\n\t\tt.Error(\"ReadBits(8) on empty string didn't return EOF\")\n\t}\n\n\t// 0 = 0b00110000\n\tbr = tsm1.NewBitReader([]byte(\"0\"))\n\n\tbuf := bytes.NewBuffer(nil)\n\tbw := bitstream.NewWriter(buf)\n\n\tfor i := 0; i < 4; i++ {\n\t\tbit, err := br.ReadBit()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Error(\"GetBit returned error err=\", err.Error())\n\t\t\treturn\n\t\t}\n\t\tbw.WriteBit(bitstream.Bit(bit))\n\t}\n\n\tbw.Flush(bitstream.One)\n\n\terr = bw.WriteByte(0xAA)\n\tif err != nil {\n\t\tt.Error(\"unable to WriteByte\")\n\t}\n\n\tc := buf.Bytes()\n\n\tif len(c) != 2 || c[1] != 0xAA || c[0] != 0x3f {\n\t\tt.Error(\"bad return from 4 read bytes\")\n\t}\n\n\t_, err = tsm1.NewBitReader([]byte(\"\")).ReadBit()\n\tif err != io.EOF {\n\t\tt.Error(\"ReadBit on empty string didn't return EOF\")\n\t}\n}\n\nfunc TestBitStream(t *testing.T) {\n\tbuf := bytes.NewBuffer(nil)\n\tbr := tsm1.NewBitReader([]byte(\"hello\"))\n\tbw := bitstream.NewWriter(buf)\n\n\tfor {\n\t\tbit, err := br.ReadBit()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Error(\"GetBit returned error err=\", err.Error())\n\t\t\treturn\n\t\t}\n\t\tbw.WriteBit(bitstream.Bit(bit))\n\t}\n\n\ts := buf.String()\n\n\tif s != \"hello\" {\n\t\tt.Error(\"expected 'hello', got=\", []byte(s))\n\t}\n}\n\nfunc TestByteStream(t *testing.T) {\n\tbuf := bytes.NewBuffer(nil)\n\tbr := tsm1.NewBitReader([]byte(\"hello\"))\n\tbw := bitstream.NewWriter(buf)\n\n\tfor i := 0; i < 3; i++ {\n\t\tbit, err := br.ReadBit()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Error(\"GetBit returned error err=\", err.Error())\n\t\t\treturn\n\t\t}\n\t\tbw.WriteBit(bitstream.Bit(bit))\n\t}\n\n\tfor i := 0; i < 3; i++ {\n\t\tbyt, err := br.ReadBits(8)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Error(\"ReadBits(8) returned error err=\", err.Error())\n\t\t\treturn\n\t\t}\n\t\tbw.WriteByte(byte(byt))\n\t}\n\n\tu, err := br.ReadBits(13)\n\n\tif err != nil {\n\t\tt.Error(\"ReadBits returned error err=\", err.Error())\n\t\treturn\n\t}\n\n\tbw.WriteBits(u, 13)\n\n\tbw.WriteBits(('!'<<12)|('.'<<4)|0x02, 20)\n\t// 0x2f == '/'\n\tbw.Flush(bitstream.One)\n\n\ts := buf.String()\n\n\tif s != \"hello!./\" {\n\t\tt.Errorf(\"expected 'hello!./', got=%x\", []byte(s))\n\t}\n}\n\n// Ensure bit reader can read random bits written to a stream.\nfunc TestBitReader_Quick(t *testing.T) {\n\tif err := quick.Check(func(values []uint64, nbits []uint) bool {\n\t\t// Limit nbits to 64.\n\t\tfor i := 0; i < len(values) && i < len(nbits); i++ {\n\t\t\tnbits[i] = (nbits[i] % 64) + 1\n\t\t\tvalues[i] = values[i] & (math.MaxUint64 >> (64 - nbits[i]))\n\t\t}\n\n\t\t// Write bits to a buffer.\n\t\tvar buf bytes.Buffer\n\t\tw := bitstream.NewWriter(&buf)\n\t\tfor i := 0; i < len(values) && i < len(nbits); i++ {\n\t\t\tw.WriteBits(values[i], int(nbits[i]))\n\t\t}\n\t\tw.Flush(bitstream.Zero)\n\n\t\t// Read bits from the buffer.\n\t\tr := tsm1.NewBitReader(buf.Bytes())\n\t\tfor i := 0; i < len(values) && i < len(nbits); i++ {\n\t\t\tv, err := r.ReadBits(nbits[i])\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"unexpected error(%d): %s\", i, err)\n\t\t\t\treturn false\n\t\t\t} else if v != values[i] {\n\t\t\t\tt.Errorf(\"value mismatch(%d): got=%d, exp=%d (nbits=%d)\", i, v, values[i], nbits[i])\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}, &quick.Config{\n\t\tValues: func(a []reflect.Value, rand *rand.Rand) {\n\t\t\ta[0], _ = quick.Value(reflect.TypeOf([]uint64{}), rand)\n\t\t\ta[1], _ = quick.Value(reflect.TypeOf([]uint{}), rand)\n\t\t},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/bool.go",
    "content": "package tsm1\n\n// boolean encoding uses 1 bit per value.  Each compressed byte slice contains a 1 byte header\n// indicating the compression type, followed by a variable byte encoded length indicating\n// how many booleans are packed in the slice.  The remaining bytes contains 1 byte for every\n// 8 boolean values encoded.\n\nimport (\n\t\"encoding/binary\"\n\t\"fmt\"\n)\n\nconst (\n\t// booleanUncompressed is an uncompressed boolean format.\n\t// Not yet implemented.\n\tbooleanUncompressed = 0\n\n\t// booleanCompressedBitPacked is an bit packed format using 1 bit per boolean\n\tbooleanCompressedBitPacked = 1\n)\n\n// BooleanEncoder encodes a series of booleans to an in-memory buffer.\ntype BooleanEncoder struct {\n\t// The encoded bytes\n\tbytes []byte\n\n\t// The current byte being encoded\n\tb byte\n\n\t// The number of bools packed into b\n\ti int\n\n\t// The total number of bools written\n\tn int\n}\n\n// NewBooleanEncoder returns a new instance of BooleanEncoder.\nfunc NewBooleanEncoder(sz int) BooleanEncoder {\n\treturn BooleanEncoder{\n\t\tbytes: make([]byte, 0, (sz+7)/8),\n\t}\n}\n\n// Reset sets the encoder to its initial state.\nfunc (e *BooleanEncoder) Reset() {\n\te.bytes = e.bytes[:0]\n\te.b = 0\n\te.i = 0\n\te.n = 0\n}\n\n// Write encodes b to the underlying buffer.\nfunc (e *BooleanEncoder) Write(b bool) {\n\t// If we have filled the current byte, flush it\n\tif e.i >= 8 {\n\t\te.flush()\n\t}\n\n\t// Use 1 bit for each boolean value, shift the current byte\n\t// by 1 and set the least signficant bit acordingly\n\te.b = e.b << 1\n\tif b {\n\t\te.b |= 1\n\t}\n\n\t// Increment the current boolean count\n\te.i++\n\t// Increment the total boolean count\n\te.n++\n}\n\nfunc (e *BooleanEncoder) flush() {\n\t// Pad remaining byte w/ 0s\n\tfor e.i < 8 {\n\t\te.b = e.b << 1\n\t\te.i++\n\t}\n\n\t// If we have bits set, append them to the byte slice\n\tif e.i > 0 {\n\t\te.bytes = append(e.bytes, e.b)\n\t\te.b = 0\n\t\te.i = 0\n\t}\n}\n\n// Flush is no-op\nfunc (e *BooleanEncoder) Flush() {}\n\n// Bytes returns a new byte slice containing the encoded booleans from previous calls to Write.\nfunc (e *BooleanEncoder) Bytes() ([]byte, error) {\n\t// Ensure the current byte is flushed\n\te.flush()\n\tb := make([]byte, 10+1)\n\n\t// Store the encoding type in the 4 high bits of the first byte\n\tb[0] = byte(booleanCompressedBitPacked) << 4\n\n\ti := 1\n\t// Encode the number of booleans written\n\ti += binary.PutUvarint(b[i:], uint64(e.n))\n\n\t// Append the packed booleans\n\treturn append(b[:i], e.bytes...), nil\n}\n\n// BooleanDecoder decodes a series of booleans from an in-memory buffer.\ntype BooleanDecoder struct {\n\tb   []byte\n\ti   int\n\tn   int\n\terr error\n}\n\n// SetBytes initializes the decoder with a new set of bytes to read from.\n// This must be called before calling any other methods.\nfunc (e *BooleanDecoder) SetBytes(b []byte) {\n\tif len(b) == 0 {\n\t\treturn\n\t}\n\n\t// First byte stores the encoding type, only have 1 bit-packet format\n\t// currently ignore for now.\n\tb = b[1:]\n\tcount, n := binary.Uvarint(b)\n\tif n <= 0 {\n\t\te.err = fmt.Errorf(\"BooleanDecoder: invalid count\")\n\t\treturn\n\t}\n\n\te.b = b[n:]\n\te.i = -1\n\te.n = int(count)\n\n\tif min := len(e.b) * 8; min < e.n {\n\t\t// Shouldn't happen - TSM file was truncated/corrupted\n\t\te.n = min\n\t}\n}\n\n// Next returns whether there are any bits remaining in the decoder.\n// It returns false if there was an error decoding.\n// The error is available on the Error method.\nfunc (e *BooleanDecoder) Next() bool {\n\tif e.err != nil {\n\t\treturn false\n\t}\n\n\te.i++\n\treturn e.i < e.n\n}\n\n// Read returns the next bit from the decoder.\nfunc (e *BooleanDecoder) Read() bool {\n\t// Index into the byte slice\n\tidx := e.i >> 3 // integer division by 8\n\n\t// Bit position\n\tpos := 7 - (e.i & 0x7)\n\n\t// The mask to select the bit\n\tmask := byte(1 << uint(pos))\n\n\t// The packed byte\n\tv := e.b[idx]\n\n\t// Returns true if the bit is set\n\treturn v&mask == mask\n}\n\n// Error returns the error encountered during decoding, if one occurred.\nfunc (e *BooleanDecoder) Error() error {\n\treturn e.err\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/bool_test.go",
    "content": "package tsm1_test\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\t\"testing/quick\"\n\n\t\"github.com/influxdata/influxdb/tsdb/engine/tsm1\"\n)\n\nfunc Test_BooleanEncoder_NoValues(t *testing.T) {\n\tenc := tsm1.NewBooleanEncoder(0)\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar dec tsm1.BooleanDecoder\n\tdec.SetBytes(b)\n\tif dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n}\n\nfunc Test_BooleanEncoder_Single(t *testing.T) {\n\tenc := tsm1.NewBooleanEncoder(1)\n\tv1 := true\n\tenc.Write(v1)\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar dec tsm1.BooleanDecoder\n\tdec.SetBytes(b)\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got false, exp true\")\n\t}\n\n\tif v1 != dec.Read() {\n\t\tt.Fatalf(\"unexpected value: got %v, exp %v\", dec.Read(), v1)\n\t}\n}\n\nfunc Test_BooleanEncoder_Multi_Compressed(t *testing.T) {\n\tenc := tsm1.NewBooleanEncoder(10)\n\n\tvalues := make([]bool, 10)\n\tfor i := range values {\n\t\tvalues[i] = i%2 == 0\n\t\tenc.Write(values[i])\n\t}\n\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif exp := 4; len(b) != exp {\n\t\tt.Fatalf(\"unexpected length: got %v, exp %v\", len(b), exp)\n\t}\n\n\tvar dec tsm1.BooleanDecoder\n\tdec.SetBytes(b)\n\n\tfor i, v := range values {\n\t\tif !dec.Next() {\n\t\t\tt.Fatalf(\"unexpected next value: got false, exp true\")\n\t\t}\n\t\tif v != dec.Read() {\n\t\t\tt.Fatalf(\"unexpected value at pos %d: got %v, exp %v\", i, dec.Read(), v)\n\t\t}\n\t}\n\n\tif dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n}\n\nfunc Test_BooleanEncoder_Quick(t *testing.T) {\n\tif err := quick.Check(func(values []bool) bool {\n\t\texpected := values\n\t\tif values == nil {\n\t\t\texpected = []bool{}\n\t\t}\n\t\t// Write values to encoder.\n\t\tenc := tsm1.NewBooleanEncoder(1024)\n\t\tfor _, v := range values {\n\t\t\tenc.Write(v)\n\t\t}\n\n\t\t// Retrieve compressed bytes.\n\t\tbuf, err := enc.Bytes()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\t// Read values out of decoder.\n\t\tgot := make([]bool, 0, len(values))\n\t\tvar dec tsm1.BooleanDecoder\n\t\tdec.SetBytes(buf)\n\t\tfor dec.Next() {\n\t\t\tgot = append(got, dec.Read())\n\t\t}\n\n\t\t// Verify that input and output values match.\n\t\tif !reflect.DeepEqual(expected, got) {\n\t\t\tt.Fatalf(\"mismatch:\\n\\nexp=%#v\\n\\ngot=%#v\\n\\n\", expected, got)\n\t\t}\n\n\t\treturn true\n\t}, nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc Test_BooleanDecoder_Corrupt(t *testing.T) {\n\tcases := []string{\n\t\t\"\",         // Empty\n\t\t\"\\x10\\x90\", // Packed: invalid count\n\t\t\"\\x10\\x7f\", // Packed: count greater than remaining bits, multiple bytes expected\n\t\t\"\\x10\\x01\", // Packed: count greater than remaining bits, one byte expected\n\t}\n\n\tfor _, c := range cases {\n\t\tvar dec tsm1.BooleanDecoder\n\t\tdec.SetBytes([]byte(c))\n\t\tif dec.Next() {\n\t\t\tt.Fatalf(\"exp next == false, got true for case %q\", c)\n\t\t}\n\t}\n}\n\nfunc BenchmarkBooleanDecoder_2048(b *testing.B) { benchmarkBooleanDecoder(b, 2048) }\n\nfunc benchmarkBooleanDecoder(b *testing.B, size int) {\n\te := tsm1.NewBooleanEncoder(size)\n\tfor i := 0; i < size; i++ {\n\t\te.Write(i&1 == 1)\n\t}\n\tbytes, err := e.Bytes()\n\tif err != nil {\n\t\tb.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tvar d tsm1.BooleanDecoder\n\t\td.SetBytes(bytes)\n\n\t\tvar n int\n\t\tfor d.Next() {\n\t\t\t_ = d.Read()\n\t\t\tn++\n\t\t}\n\t\tif n != size {\n\t\t\tb.Fatalf(\"expected to read %d booleans, but read %d\", size, n)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache.go",
    "content": "package tsm1\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n\t\"github.com/uber-go/zap\"\n)\n\n// ringShards specifies the number of partitions that the hash ring used to\n// store the entry mappings contains. It must be a power of 2. From empirical\n// testing, a value above the number of cores on the machine does not provide\n// any additional benefit. For now we'll set it to the number of cores on the\n// largest box we could imagine running influx.\nconst ringShards = 4096\n\nvar (\n\t// ErrSnapshotInProgress is returned if a snapshot is attempted while one is already running.\n\tErrSnapshotInProgress = fmt.Errorf(\"snapshot in progress\")\n)\n\n// ErrCacheMemorySizeLimitExceeded returns an error indicating an operation\n// could not be completed due to exceeding the cache-max-memory-size setting.\nfunc ErrCacheMemorySizeLimitExceeded(n, limit uint64) error {\n\treturn fmt.Errorf(\"cache-max-memory-size exceeded: (%d/%d)\", n, limit)\n}\n\n// entry is a set of values and some metadata.\ntype entry struct {\n\tmu     sync.RWMutex\n\tvalues Values // All stored values.\n\n\t// The type of values stored. Read only so doesn't need to be protected by\n\t// mu.\n\tvtype int\n}\n\n// newEntryValues returns a new instance of entry with the given values.  If the\n// values are not valid, an error is returned.\n//\n// newEntryValues takes an optional hint to indicate the initial buffer size.\n// The hint is only respected if it's positive.\nfunc newEntryValues(values []Value, hint int) (*entry, error) {\n\t// Ensure we start off with a reasonably sized values slice.\n\tif hint < 32 {\n\t\thint = 32\n\t}\n\n\te := &entry{}\n\tif len(values) > hint {\n\t\te.values = make(Values, 0, len(values))\n\t} else {\n\t\te.values = make(Values, 0, hint)\n\t}\n\te.values = append(e.values, values...)\n\n\t// No values, don't check types and ordering\n\tif len(values) == 0 {\n\t\treturn e, nil\n\t}\n\n\tet := valueType(values[0])\n\tfor _, v := range values {\n\t\t// Make sure all the values are the same type\n\t\tif et != valueType(v) {\n\t\t\treturn nil, tsdb.ErrFieldTypeConflict\n\t\t}\n\t}\n\n\t// Set the type of values stored.\n\te.vtype = et\n\n\treturn e, nil\n}\n\n// add adds the given values to the entry.\nfunc (e *entry) add(values []Value) error {\n\tif len(values) == 0 {\n\t\treturn nil // Nothing to do.\n\t}\n\n\t// Are any of the new values the wrong type?\n\tfor _, v := range values {\n\t\tif e.vtype != valueType(v) {\n\t\t\treturn tsdb.ErrFieldTypeConflict\n\t\t}\n\t}\n\n\t// entry currently has no values, so add the new ones and we're done.\n\te.mu.Lock()\n\tif len(e.values) == 0 {\n\t\t// Ensure we start off with a reasonably sized values slice.\n\t\tif len(values) < 32 {\n\t\t\te.values = make(Values, 0, 32)\n\t\t\te.values = append(e.values, values...)\n\t\t} else {\n\t\t\te.values = values\n\t\t}\n\t\te.mu.Unlock()\n\t\treturn nil\n\t}\n\n\t// Append the new values to the existing ones...\n\te.values = append(e.values, values...)\n\te.mu.Unlock()\n\treturn nil\n}\n\n// deduplicate sorts and orders the entry's values. If values are already deduped and sorted,\n// the function does no work and simply returns.\nfunc (e *entry) deduplicate() {\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\n\tif len(e.values) == 0 {\n\t\treturn\n\t}\n\te.values = e.values.Deduplicate()\n}\n\n// count returns the number of values in this entry.\nfunc (e *entry) count() int {\n\te.mu.RLock()\n\tn := len(e.values)\n\te.mu.RUnlock()\n\treturn n\n}\n\n// filter removes all values with timestamps between min and max inclusive.\nfunc (e *entry) filter(min, max int64) {\n\te.mu.Lock()\n\te.values = e.values.Exclude(min, max)\n\te.mu.Unlock()\n}\n\n// size returns the size of this entry in bytes.\nfunc (e *entry) size() int {\n\te.mu.RLock()\n\tsz := e.values.Size()\n\te.mu.RUnlock()\n\treturn sz\n}\n\n// InfluxQLType returns for the entry the data type of its values.\nfunc (e *entry) InfluxQLType() (influxql.DataType, error) {\n\te.mu.RLock()\n\tdefer e.mu.RUnlock()\n\treturn e.values.InfluxQLType()\n}\n\n// Statistics gathered by the Cache.\nconst (\n\t// levels - point in time measures\n\n\tstatCacheMemoryBytes = \"memBytes\"      // level: Size of in-memory cache in bytes\n\tstatCacheDiskBytes   = \"diskBytes\"     // level: Size of on-disk snapshots in bytes\n\tstatSnapshots        = \"snapshotCount\" // level: Number of active snapshots.\n\tstatCacheAgeMs       = \"cacheAgeMs\"    // level: Number of milliseconds since cache was last snapshoted at sample time\n\n\t// counters - accumulative measures\n\n\tstatCachedBytes         = \"cachedBytes\"         // counter: Total number of bytes written into snapshots.\n\tstatWALCompactionTimeMs = \"WALCompactionTimeMs\" // counter: Total number of milliseconds spent compacting snapshots\n\n\tstatCacheWriteOK      = \"writeOk\"\n\tstatCacheWriteErr     = \"writeErr\"\n\tstatCacheWriteDropped = \"writeDropped\"\n)\n\n// storer is the interface that descibes a cache's store.\ntype storer interface {\n\tentry(key string) (*entry, bool)                // Get an entry by its key.\n\twrite(key string, values Values) error          // Write an entry to the store.\n\tadd(key string, entry *entry)                   // Add a new entry to the store.\n\tremove(key string)                              // Remove an entry from the store.\n\tkeys(sorted bool) []string                      // Return an optionally sorted slice of entry keys.\n\tapply(f func(string, *entry) error) error       // Apply f to all entries in the store in parallel.\n\tapplySerial(f func(string, *entry) error) error // Apply f to all entries in serial.\n\treset()                                         // Reset the store to an initial unused state.\n}\n\n// Cache maintains an in-memory store of Values for a set of keys.\ntype Cache struct {\n\t// Due to a bug in atomic  size needs to be the first word in the struct, as\n\t// that's the only place where you're guaranteed to be 64-bit aligned on a\n\t// 32 bit system. See: https://golang.org/pkg/sync/atomic/#pkg-note-BUG\n\tsize         uint64\n\tsnapshotSize uint64\n\n\tmu      sync.RWMutex\n\tstore   storer\n\tmaxSize uint64\n\n\t// snapshots are the cache objects that are currently being written to tsm files\n\t// they're kept in memory while flushing so they can be queried along with the cache.\n\t// they are read only and should never be modified\n\tsnapshot     *Cache\n\tsnapshotting bool\n\n\t// This number is the number of pending or failed WriteSnaphot attempts since the last successful one.\n\tsnapshotAttempts int\n\n\tstats        *CacheStatistics\n\tlastSnapshot time.Time\n\n\t// A one time synchronization used to initial the cache with a store.  Since the store can allocate a\n\t// a large amount memory across shards, we lazily create it.\n\tinitialize       atomic.Value\n\tinitializedCount uint32\n}\n\n// NewCache returns an instance of a cache which will use a maximum of maxSize bytes of memory.\n// Only used for engine caches, never for snapshots.\nfunc NewCache(maxSize uint64, path string) *Cache {\n\tc := &Cache{\n\t\tmaxSize:      maxSize,\n\t\tstore:        emptyStore{},\n\t\tstats:        &CacheStatistics{},\n\t\tlastSnapshot: time.Now(),\n\t}\n\tc.initialize.Store(&sync.Once{})\n\tc.UpdateAge()\n\tc.UpdateCompactTime(0)\n\tc.updateCachedBytes(0)\n\tc.updateMemSize(0)\n\tc.updateSnapshots()\n\treturn c\n}\n\n// CacheStatistics hold statistics related to the cache.\ntype CacheStatistics struct {\n\tMemSizeBytes        int64\n\tDiskSizeBytes       int64\n\tSnapshotCount       int64\n\tCacheAgeMs          int64\n\tCachedBytes         int64\n\tWALCompactionTimeMs int64\n\tWriteOK             int64\n\tWriteErr            int64\n\tWriteDropped        int64\n}\n\n// Statistics returns statistics for periodic monitoring.\nfunc (c *Cache) Statistics(tags map[string]string) []models.Statistic {\n\treturn []models.Statistic{{\n\t\tName: \"tsm1_cache\",\n\t\tTags: tags,\n\t\tValues: map[string]interface{}{\n\t\t\tstatCacheMemoryBytes:    atomic.LoadInt64(&c.stats.MemSizeBytes),\n\t\t\tstatCacheDiskBytes:      atomic.LoadInt64(&c.stats.DiskSizeBytes),\n\t\t\tstatSnapshots:           atomic.LoadInt64(&c.stats.SnapshotCount),\n\t\t\tstatCacheAgeMs:          atomic.LoadInt64(&c.stats.CacheAgeMs),\n\t\t\tstatCachedBytes:         atomic.LoadInt64(&c.stats.CachedBytes),\n\t\t\tstatWALCompactionTimeMs: atomic.LoadInt64(&c.stats.WALCompactionTimeMs),\n\t\t\tstatCacheWriteOK:        atomic.LoadInt64(&c.stats.WriteOK),\n\t\t\tstatCacheWriteErr:       atomic.LoadInt64(&c.stats.WriteErr),\n\t\t\tstatCacheWriteDropped:   atomic.LoadInt64(&c.stats.WriteDropped),\n\t\t},\n\t}}\n}\n\n// init initializes the cache and allocates the underlying store.  Once initialized,\n// the store re-used until Freed.\nfunc (c *Cache) init() {\n\tif !atomic.CompareAndSwapUint32(&c.initializedCount, 0, 1) {\n\t\treturn\n\t}\n\n\tc.mu.Lock()\n\tc.store, _ = newring(ringShards)\n\tc.mu.Unlock()\n}\n\n// Free releases the underlying store and memory held by the Cache.\nfunc (c *Cache) Free() {\n\tif !atomic.CompareAndSwapUint32(&c.initializedCount, 1, 0) {\n\t\treturn\n\t}\n\n\tc.mu.Lock()\n\tc.store = emptyStore{}\n\tc.mu.Unlock()\n}\n\n// Write writes the set of values for the key to the cache. This function is goroutine-safe.\n// It returns an error if the cache will exceed its max size by adding the new values.\nfunc (c *Cache) Write(key string, values []Value) error {\n\tc.init()\n\taddedSize := uint64(Values(values).Size())\n\n\t// Enough room in the cache?\n\tlimit := c.maxSize\n\tn := c.Size() + addedSize\n\n\tif limit > 0 && n > limit {\n\t\tatomic.AddInt64(&c.stats.WriteErr, 1)\n\t\treturn ErrCacheMemorySizeLimitExceeded(n, limit)\n\t}\n\n\tif err := c.store.write(key, values); err != nil {\n\t\tatomic.AddInt64(&c.stats.WriteErr, 1)\n\t\treturn err\n\t}\n\n\t// Update the cache size and the memory size stat.\n\tc.increaseSize(addedSize)\n\tc.updateMemSize(int64(addedSize))\n\tatomic.AddInt64(&c.stats.WriteOK, 1)\n\n\treturn nil\n}\n\n// WriteMulti writes the map of keys and associated values to the cache. This\n// function is goroutine-safe. It returns an error if the cache will exceeded\n// its max size by adding the new values.  The write attempts to write as many\n// values as possible.  If one key fails, the others can still succeed and an\n// error will be returned.\nfunc (c *Cache) WriteMulti(values map[string][]Value) error {\n\tc.init()\n\tvar addedSize uint64\n\tfor _, v := range values {\n\t\taddedSize += uint64(Values(v).Size())\n\t}\n\n\t// Enough room in the cache?\n\tlimit := c.maxSize // maxSize is safe for reading without a lock.\n\tn := c.Size() + addedSize\n\tif limit > 0 && n > limit {\n\t\tatomic.AddInt64(&c.stats.WriteErr, 1)\n\t\treturn ErrCacheMemorySizeLimitExceeded(n, limit)\n\t}\n\n\tvar werr error\n\tc.mu.RLock()\n\tstore := c.store\n\tc.mu.RUnlock()\n\n\t// We'll optimistially set size here, and then decrement it for write errors.\n\tc.increaseSize(addedSize)\n\tfor k, v := range values {\n\t\tif err := store.write(k, v); err != nil {\n\t\t\t// The write failed, hold onto the error and adjust the size delta.\n\t\t\twerr = err\n\t\t\taddedSize -= uint64(Values(v).Size())\n\t\t\tc.decreaseSize(uint64(Values(v).Size()))\n\t\t}\n\t}\n\n\t// Some points in the batch were dropped.  An error is returned so\n\t// error stat is incremented as well.\n\tif werr != nil {\n\t\tatomic.AddInt64(&c.stats.WriteDropped, 1)\n\t\tatomic.AddInt64(&c.stats.WriteErr, 1)\n\t}\n\n\t// Update the memory size stat\n\tc.updateMemSize(int64(addedSize))\n\tatomic.AddInt64(&c.stats.WriteOK, 1)\n\n\treturn werr\n}\n\n// Snapshot takes a snapshot of the current cache, adds it to the slice of caches that\n// are being flushed, and resets the current cache with new values.\nfunc (c *Cache) Snapshot() (*Cache, error) {\n\tc.init()\n\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif c.snapshotting {\n\t\treturn nil, ErrSnapshotInProgress\n\t}\n\n\tc.snapshotting = true\n\tc.snapshotAttempts++ // increment the number of times we tried to do this\n\n\t// If no snapshot exists, create a new one, otherwise update the existing snapshot\n\tif c.snapshot == nil {\n\t\tstore, err := newring(ringShards)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tc.snapshot = &Cache{\n\t\t\tstore: store,\n\t\t}\n\t}\n\n\t// Did a prior snapshot exist that failed?  If so, return the existing\n\t// snapshot to retry.\n\tif c.snapshot.Size() > 0 {\n\t\treturn c.snapshot, nil\n\t}\n\n\tc.snapshot.store, c.store = c.store, c.snapshot.store\n\tsnapshotSize := c.Size()\n\n\t// Save the size of the snapshot on the snapshot cache\n\tatomic.StoreUint64(&c.snapshot.size, snapshotSize)\n\t// Save the size of the snapshot on the live cache\n\tatomic.StoreUint64(&c.snapshotSize, snapshotSize)\n\n\t// Reset the cache's store.\n\tc.store.reset()\n\tatomic.StoreUint64(&c.size, 0)\n\tc.lastSnapshot = time.Now()\n\n\tc.updateCachedBytes(snapshotSize) // increment the number of bytes added to the snapshot\n\tc.updateSnapshots()\n\n\treturn c.snapshot, nil\n}\n\n// Deduplicate sorts the snapshot before returning it. The compactor and any queries\n// coming in while it writes will need the values sorted.\nfunc (c *Cache) Deduplicate() {\n\tc.mu.RLock()\n\tstore := c.store\n\tc.mu.RUnlock()\n\n\t// Apply a function that simply calls deduplicate on each entry in the ring.\n\t// apply cannot return an error in this invocation.\n\t_ = store.apply(func(_ string, e *entry) error { e.deduplicate(); return nil })\n}\n\n// ClearSnapshot removes the snapshot cache from the list of flushing caches and\n// adjusts the size.\nfunc (c *Cache) ClearSnapshot(success bool) {\n\tc.init()\n\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tc.snapshotting = false\n\n\tif success {\n\t\tc.snapshotAttempts = 0\n\t\tc.updateMemSize(-int64(atomic.LoadUint64(&c.snapshotSize))) // decrement the number of bytes in cache\n\n\t\t// Reset the snapshot's store, and reset the snapshot to a fresh Cache.\n\t\tc.snapshot.store.reset()\n\t\tc.snapshot = &Cache{\n\t\t\tstore: c.snapshot.store,\n\t\t}\n\n\t\tatomic.StoreUint64(&c.snapshotSize, 0)\n\t\tc.updateSnapshots()\n\t}\n}\n\n// Size returns the number of point-calcuated bytes the cache currently uses.\nfunc (c *Cache) Size() uint64 {\n\treturn atomic.LoadUint64(&c.size) + atomic.LoadUint64(&c.snapshotSize)\n}\n\n// increaseSize increases size by delta.\nfunc (c *Cache) increaseSize(delta uint64) {\n\tatomic.AddUint64(&c.size, delta)\n}\n\n// decreaseSize decreases size by delta.\nfunc (c *Cache) decreaseSize(delta uint64) {\n\t// Per sync/atomic docs, bit-flip delta minus one to perform subtraction within AddUint64.\n\tatomic.AddUint64(&c.size, ^(delta - 1))\n}\n\n// MaxSize returns the maximum number of bytes the cache may consume.\nfunc (c *Cache) MaxSize() uint64 {\n\treturn c.maxSize\n}\n\n// Keys returns a sorted slice of all keys under management by the cache.\nfunc (c *Cache) Keys() []string {\n\tc.mu.RLock()\n\tstore := c.store\n\tc.mu.RUnlock()\n\treturn store.keys(true)\n}\n\n// unsortedKeys returns a slice of all keys under management by the cache. The\n// keys are not sorted.\nfunc (c *Cache) unsortedKeys() []string {\n\tc.mu.RLock()\n\tstore := c.store\n\tc.mu.RUnlock()\n\treturn store.keys(false)\n}\n\n// Values returns a copy of all values, deduped and sorted, for the given key.\nfunc (c *Cache) Values(key string) Values {\n\tvar snapshotEntries *entry\n\n\tc.mu.RLock()\n\te, ok := c.store.entry(key)\n\tif c.snapshot != nil {\n\t\tsnapshotEntries, _ = c.snapshot.store.entry(key)\n\t}\n\tc.mu.RUnlock()\n\n\tif !ok {\n\t\tif snapshotEntries == nil {\n\t\t\t// No values in hot cache or snapshots.\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\te.deduplicate()\n\t}\n\n\t// Build the sequence of entries that will be returned, in the correct order.\n\t// Calculate the required size of the destination buffer.\n\tvar entries []*entry\n\tsz := 0\n\n\tif snapshotEntries != nil {\n\t\tsnapshotEntries.deduplicate() // guarantee we are deduplicated\n\t\tentries = append(entries, snapshotEntries)\n\t\tsz += snapshotEntries.count()\n\t}\n\n\tif e != nil {\n\t\tentries = append(entries, e)\n\t\tsz += e.count()\n\t}\n\n\t// Any entries? If not, return.\n\tif sz == 0 {\n\t\treturn nil\n\t}\n\n\t// Create the buffer, and copy all hot values and snapshots. Individual\n\t// entries are sorted at this point, so now the code has to check if the\n\t// resultant buffer will be sorted from start to finish.\n\tvalues := make(Values, sz)\n\tn := 0\n\tfor _, e := range entries {\n\t\te.mu.RLock()\n\t\tn += copy(values[n:], e.values)\n\t\te.mu.RUnlock()\n\t}\n\tvalues = values[:n]\n\tvalues = values.Deduplicate()\n\n\treturn values\n}\n\n// Delete removes all values for the given keys from the cache.\nfunc (c *Cache) Delete(keys []string) {\n\tc.DeleteRange(keys, math.MinInt64, math.MaxInt64)\n}\n\n// DeleteRange removes the values for all keys containing points\n// with timestamps between between min and max from the cache.\n//\n// TODO(edd): Lock usage could possibly be optimised if necessary.\nfunc (c *Cache) DeleteRange(keys []string, min, max int64) {\n\tc.init()\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tfor _, k := range keys {\n\t\t// Make sure key exist in the cache, skip if it does not\n\t\te, ok := c.store.entry(k)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\torigSize := uint64(e.size())\n\t\tif min == math.MinInt64 && max == math.MaxInt64 {\n\t\t\tc.decreaseSize(origSize)\n\t\t\tc.store.remove(k)\n\t\t\tcontinue\n\t\t}\n\n\t\te.filter(min, max)\n\t\tif e.count() == 0 {\n\t\t\tc.store.remove(k)\n\t\t\tc.decreaseSize(origSize)\n\t\t\tcontinue\n\t\t}\n\n\t\tc.decreaseSize(origSize - uint64(e.size()))\n\t}\n\tatomic.StoreInt64(&c.stats.MemSizeBytes, int64(c.Size()))\n}\n\n// SetMaxSize updates the memory limit of the cache.\nfunc (c *Cache) SetMaxSize(size uint64) {\n\tc.mu.Lock()\n\tc.maxSize = size\n\tc.mu.Unlock()\n}\n\n// values returns the values for the key. It assumes the data is already sorted.\n// It doesn't lock the cache but it does read-lock the entry if there is one for the key.\n// values should only be used in compact.go in the CacheKeyIterator.\nfunc (c *Cache) values(key string) Values {\n\te, _ := c.store.entry(key)\n\tif e == nil {\n\t\treturn nil\n\t}\n\te.mu.RLock()\n\tv := e.values\n\te.mu.RUnlock()\n\treturn v\n}\n\n// ApplyEntryFn applies the function f to each entry in the Cache.\n// ApplyEntryFn calls f on each entry in turn, within the same goroutine.\n// It is safe for use by multiple goroutines.\nfunc (c *Cache) ApplyEntryFn(f func(key string, entry *entry) error) error {\n\tc.mu.RLock()\n\tstore := c.store\n\tc.mu.RUnlock()\n\treturn store.applySerial(f)\n}\n\n// CacheLoader processes a set of WAL segment files, and loads a cache with the data\n// contained within those files.  Processing of the supplied files take place in the\n// order they exist in the files slice.\ntype CacheLoader struct {\n\tfiles []string\n\n\tLogger zap.Logger\n}\n\n// NewCacheLoader returns a new instance of a CacheLoader.\nfunc NewCacheLoader(files []string) *CacheLoader {\n\treturn &CacheLoader{\n\t\tfiles:  files,\n\t\tLogger: zap.New(zap.NullEncoder()),\n\t}\n}\n\n// Load returns a cache loaded with the data contained within the segment files.\n// If, during reading of a segment file, corruption is encountered, that segment\n// file is truncated up to and including the last valid byte, and processing\n// continues with the next segment file.\nfunc (cl *CacheLoader) Load(cache *Cache) error {\n\n\tvar r *WALSegmentReader\n\tfor _, fn := range cl.files {\n\t\tif err := func() error {\n\t\t\tf, err := os.OpenFile(fn, os.O_CREATE|os.O_RDWR, 0666)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer f.Close()\n\n\t\t\t// Log some information about the segments.\n\t\t\tstat, err := os.Stat(f.Name())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcl.Logger.Info(fmt.Sprintf(\"reading file %s, size %d\", f.Name(), stat.Size()))\n\n\t\t\t// Nothing to read, skip it\n\t\t\tif stat.Size() == 0 {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tif r == nil {\n\t\t\t\tr = NewWALSegmentReader(f)\n\t\t\t\tdefer r.Close()\n\t\t\t} else {\n\t\t\t\tr.Reset(f)\n\t\t\t}\n\n\t\t\tfor r.Next() {\n\t\t\t\tentry, err := r.Read()\n\t\t\t\tif err != nil {\n\t\t\t\t\tn := r.Count()\n\t\t\t\t\tcl.Logger.Info(fmt.Sprintf(\"file %s corrupt at position %d, truncating\", f.Name(), n))\n\t\t\t\t\tif err := f.Truncate(n); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tswitch t := entry.(type) {\n\t\t\t\tcase *WriteWALEntry:\n\t\t\t\t\tif err := cache.WriteMulti(t.Values); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\tcase *DeleteRangeWALEntry:\n\t\t\t\t\tcache.DeleteRange(t.Keys, t.Min, t.Max)\n\t\t\t\tcase *DeleteWALEntry:\n\t\t\t\t\tcache.Delete(t.Keys)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn r.Close()\n\t\t}(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// WithLogger sets the logger on the CacheLoader.\nfunc (cl *CacheLoader) WithLogger(log zap.Logger) {\n\tcl.Logger = log.With(zap.String(\"service\", \"cacheloader\"))\n}\n\n// UpdateAge updates the age statistic based on the current time.\nfunc (c *Cache) UpdateAge() {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\tageStat := int64(time.Since(c.lastSnapshot) / time.Millisecond)\n\tatomic.StoreInt64(&c.stats.CacheAgeMs, ageStat)\n}\n\n// UpdateCompactTime updates WAL compaction time statistic based on d.\nfunc (c *Cache) UpdateCompactTime(d time.Duration) {\n\tatomic.AddInt64(&c.stats.WALCompactionTimeMs, int64(d/time.Millisecond))\n}\n\n// updateCachedBytes increases the cachedBytes counter by b.\nfunc (c *Cache) updateCachedBytes(b uint64) {\n\tatomic.AddInt64(&c.stats.CachedBytes, int64(b))\n}\n\n// updateMemSize updates the memSize level by b.\nfunc (c *Cache) updateMemSize(b int64) {\n\tatomic.AddInt64(&c.stats.MemSizeBytes, b)\n}\n\nfunc valueType(v Value) int {\n\tswitch v.(type) {\n\tcase FloatValue:\n\t\treturn 1\n\tcase IntegerValue:\n\t\treturn 2\n\tcase StringValue:\n\t\treturn 3\n\tcase BooleanValue:\n\t\treturn 4\n\tdefault:\n\t\treturn 0\n\t}\n}\n\n// updateSnapshots updates the snapshotsCount and the diskSize levels.\nfunc (c *Cache) updateSnapshots() {\n\t// Update disk stats\n\tatomic.StoreInt64(&c.stats.DiskSizeBytes, int64(atomic.LoadUint64(&c.snapshotSize)))\n\tatomic.StoreInt64(&c.stats.SnapshotCount, int64(c.snapshotAttempts))\n}\n\ntype emptyStore struct{}\n\nfunc (e emptyStore) entry(key string) (*entry, bool)                { return nil, false }\nfunc (e emptyStore) write(key string, values Values) error          { return nil }\nfunc (e emptyStore) add(key string, entry *entry)                   {}\nfunc (e emptyStore) remove(key string)                              {}\nfunc (e emptyStore) keys(sorted bool) []string                      { return nil }\nfunc (e emptyStore) apply(f func(string, *entry) error) error       { return nil }\nfunc (e emptyStore) applySerial(f func(string, *entry) error) error { return nil }\nfunc (e emptyStore) reset()                                         {}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache_race_test.go",
    "content": "package tsm1_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/tsdb/engine/tsm1\"\n)\n\nfunc TestCacheCheckConcurrentReadsAreSafe(t *testing.T) {\n\tvalues := make(tsm1.Values, 1000)\n\ttimestamps := make([]int64, len(values))\n\tseries := make([]string, 100)\n\tfor i := range timestamps {\n\t\ttimestamps[i] = int64(rand.Int63n(int64(len(values))))\n\t}\n\n\tfor i := range values {\n\t\tvalues[i] = tsm1.NewValue(timestamps[i*len(timestamps)/len(values)], float64(i))\n\t}\n\n\tfor i := range series {\n\t\tseries[i] = fmt.Sprintf(\"series%d\", i)\n\t}\n\n\twg := sync.WaitGroup{}\n\tc := tsm1.NewCache(1000000, \"\")\n\n\tch := make(chan struct{})\n\tfor _, s := range series {\n\t\tfor _, v := range values {\n\t\t\tc.Write(s, tsm1.Values{v})\n\t\t}\n\t\twg.Add(3)\n\t\tgo func(s string) {\n\t\t\tdefer wg.Done()\n\t\t\t<-ch\n\t\t\tc.Values(s)\n\t\t}(s)\n\t\tgo func(s string) {\n\t\t\tdefer wg.Done()\n\t\t\t<-ch\n\t\t\tc.Values(s)\n\t\t}(s)\n\t\tgo func(s string) {\n\t\t\tdefer wg.Done()\n\t\t\t<-ch\n\t\t\tc.Values(s)\n\t\t}(s)\n\t}\n\tclose(ch)\n\twg.Wait()\n}\n\nfunc TestCacheRace(t *testing.T) {\n\tvalues := make(tsm1.Values, 1000)\n\ttimestamps := make([]int64, len(values))\n\tseries := make([]string, 100)\n\tfor i := range timestamps {\n\t\ttimestamps[i] = int64(rand.Int63n(int64(len(values))))\n\t}\n\n\tfor i := range values {\n\t\tvalues[i] = tsm1.NewValue(timestamps[i*len(timestamps)/len(values)], float64(i))\n\t}\n\n\tfor i := range series {\n\t\tseries[i] = fmt.Sprintf(\"series%d\", i)\n\t}\n\n\twg := sync.WaitGroup{}\n\tc := tsm1.NewCache(1000000, \"\")\n\n\tch := make(chan struct{})\n\tfor _, s := range series {\n\t\tfor _, v := range values {\n\t\t\tc.Write(s, tsm1.Values{v})\n\t\t}\n\t\twg.Add(1)\n\t\tgo func(s string) {\n\t\t\tdefer wg.Done()\n\t\t\t<-ch\n\t\t\tc.Values(s)\n\t\t}(s)\n\t}\n\n\terrC := make(chan error)\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\t<-ch\n\t\ts, err := c.Snapshot()\n\t\tif err == tsm1.ErrSnapshotInProgress {\n\t\t\treturn\n\t\t}\n\n\t\tif err != nil {\n\t\t\terrC <- fmt.Errorf(\"failed to snapshot cache: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\ts.Deduplicate()\n\t\tc.ClearSnapshot(true)\n\t}()\n\n\tclose(ch)\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(errC)\n\t}()\n\n\tfor err := range errC {\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n}\n\nfunc TestCacheRace2Compacters(t *testing.T) {\n\tvalues := make(tsm1.Values, 1000)\n\ttimestamps := make([]int64, len(values))\n\tseries := make([]string, 100)\n\tfor i := range timestamps {\n\t\ttimestamps[i] = int64(rand.Int63n(int64(len(values))))\n\t}\n\n\tfor i := range values {\n\t\tvalues[i] = tsm1.NewValue(timestamps[i*len(timestamps)/len(values)], float64(i))\n\t}\n\n\tfor i := range series {\n\t\tseries[i] = fmt.Sprintf(\"series%d\", i)\n\t}\n\n\twg := sync.WaitGroup{}\n\tc := tsm1.NewCache(1000000, \"\")\n\n\tch := make(chan struct{})\n\tfor _, s := range series {\n\t\tfor _, v := range values {\n\t\t\tc.Write(s, tsm1.Values{v})\n\t\t}\n\t\twg.Add(1)\n\t\tgo func(s string) {\n\t\t\tdefer wg.Done()\n\t\t\t<-ch\n\t\t\tc.Values(s)\n\t\t}(s)\n\t}\n\tfileCounter := 0\n\tmapFiles := map[int]bool{}\n\tmu := sync.Mutex{}\n\terrC := make(chan error)\n\tfor i := 0; i < 2; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t<-ch\n\t\t\ts, err := c.Snapshot()\n\t\t\tif err == tsm1.ErrSnapshotInProgress {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\terrC <- fmt.Errorf(\"failed to snapshot cache: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmu.Lock()\n\t\t\tmapFiles[fileCounter] = true\n\t\t\tfileCounter++\n\t\t\tmyFiles := map[int]bool{}\n\t\t\tfor k, e := range mapFiles {\n\t\t\t\tmyFiles[k] = e\n\t\t\t}\n\t\t\tmu.Unlock()\n\t\t\ts.Deduplicate()\n\t\t\tc.ClearSnapshot(true)\n\t\t\tmu.Lock()\n\t\t\tdefer mu.Unlock()\n\t\t\tfor k, _ := range myFiles {\n\t\t\t\tif _, ok := mapFiles[k]; !ok {\n\t\t\t\t\terrC <- fmt.Errorf(\"something else deleted one of my files\")\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\tdelete(mapFiles, k)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\tclose(ch)\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(errC)\n\t}()\n\n\tfor err := range errC {\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache_test.go",
    "content": "package tsm1\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"math\"\n\t\"math/rand\"\n\t\"os\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\n\t\"github.com/golang/snappy\"\n)\n\nfunc TestCache_NewCache(t *testing.T) {\n\tc := NewCache(100, \"\")\n\tif c == nil {\n\t\tt.Fatalf(\"failed to create new cache\")\n\t}\n\n\tif c.MaxSize() != 100 {\n\t\tt.Fatalf(\"new cache max size not correct\")\n\t}\n\tif c.Size() != 0 {\n\t\tt.Fatalf(\"new cache size not correct\")\n\t}\n\tif len(c.Keys()) != 0 {\n\t\tt.Fatalf(\"new cache keys not correct: %v\", c.Keys())\n\t}\n}\n\nfunc TestCache_CacheWrite(t *testing.T) {\n\tv0 := NewValue(1, 1.0)\n\tv1 := NewValue(2, 2.0)\n\tv2 := NewValue(3, 3.0)\n\tvalues := Values{v0, v1, v2}\n\tvaluesSize := uint64(v0.Size() + v1.Size() + v2.Size())\n\n\tc := NewCache(3*valuesSize, \"\")\n\n\tif err := c.Write(\"foo\", values); err != nil {\n\t\tt.Fatalf(\"failed to write key foo to cache: %s\", err.Error())\n\t}\n\tif err := c.Write(\"bar\", values); err != nil {\n\t\tt.Fatalf(\"failed to write key foo to cache: %s\", err.Error())\n\t}\n\tif n := c.Size(); n != 2*valuesSize {\n\t\tt.Fatalf(\"cache size incorrect after 2 writes, exp %d, got %d\", 2*valuesSize, n)\n\t}\n\n\tif exp, keys := []string{\"bar\", \"foo\"}, c.Keys(); !reflect.DeepEqual(keys, exp) {\n\t\tt.Fatalf(\"cache keys incorrect after 2 writes, exp %v, got %v\", exp, keys)\n\t}\n}\n\nfunc TestCache_CacheWrite_TypeConflict(t *testing.T) {\n\tv0 := NewValue(1, 1.0)\n\tv1 := NewValue(2, int(64))\n\tvalues := Values{v0, v1}\n\tvaluesSize := v0.Size() + v1.Size()\n\n\tc := NewCache(uint64(2*valuesSize), \"\")\n\n\tif err := c.Write(\"foo\", values[:1]); err != nil {\n\t\tt.Fatalf(\"failed to write key foo to cache: %s\", err.Error())\n\t}\n\n\tif err := c.Write(\"foo\", values[1:]); err == nil {\n\t\tt.Fatalf(\"expected field type conflict\")\n\t}\n\n\tif exp, got := uint64(v0.Size()), c.Size(); exp != got {\n\t\tt.Fatalf(\"cache size incorrect after 2 writes, exp %d, got %d\", exp, got)\n\t}\n}\n\nfunc TestCache_CacheWriteMulti(t *testing.T) {\n\tv0 := NewValue(1, 1.0)\n\tv1 := NewValue(2, 2.0)\n\tv2 := NewValue(3, 3.0)\n\tvalues := Values{v0, v1, v2}\n\tvaluesSize := uint64(v0.Size() + v1.Size() + v2.Size())\n\n\tc := NewCache(30*valuesSize, \"\")\n\n\tif err := c.WriteMulti(map[string][]Value{\"foo\": values, \"bar\": values}); err != nil {\n\t\tt.Fatalf(\"failed to write key foo to cache: %s\", err.Error())\n\t}\n\tif n := c.Size(); n != 2*valuesSize {\n\t\tt.Fatalf(\"cache size incorrect after 2 writes, exp %d, got %d\", 2*valuesSize, n)\n\t}\n\n\tif exp, keys := []string{\"bar\", \"foo\"}, c.Keys(); !reflect.DeepEqual(keys, exp) {\n\t\tt.Fatalf(\"cache keys incorrect after 2 writes, exp %v, got %v\", exp, keys)\n\t}\n}\n\n// Tests that the cache stats and size are correctly maintained during writes.\nfunc TestCache_WriteMulti_Stats(t *testing.T) {\n\tlimit := uint64(1)\n\tc := NewCache(limit, \"\")\n\tms := NewTestStore()\n\tc.store = ms\n\n\t// Not enough room in the cache.\n\tv := NewValue(1, 1.0)\n\tvalues := map[string][]Value{\"foo\": []Value{v, v}}\n\tif got, exp := c.WriteMulti(values), ErrCacheMemorySizeLimitExceeded(uint64(v.Size()*2), limit); !reflect.DeepEqual(got, exp) {\n\t\tt.Fatalf(\"got %q, expected %q\", got, exp)\n\t}\n\n\t// Fail one of the values in the write.\n\tc = NewCache(50, \"\")\n\tc.init()\n\tc.store = ms\n\n\tms.writef = func(key string, v Values) error {\n\t\tif key == \"foo\" {\n\t\t\treturn errors.New(\"write failed\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tvalues = map[string][]Value{\"foo\": []Value{v, v}, \"bar\": []Value{v}}\n\tif got, exp := c.WriteMulti(values), errors.New(\"write failed\"); !reflect.DeepEqual(got, exp) {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\t// Cache size decreased correctly.\n\tif got, exp := c.Size(), uint64(16); got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\t// Write stats updated\n\tif got, exp := c.stats.WriteDropped, int64(1); got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t} else if got, exp := c.stats.WriteErr, int64(1); got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n}\n\nfunc TestCache_CacheWriteMulti_TypeConflict(t *testing.T) {\n\tv0 := NewValue(1, 1.0)\n\tv1 := NewValue(2, 2.0)\n\tv2 := NewValue(3, int64(3))\n\tvalues := Values{v0, v1, v2}\n\tvaluesSize := uint64(v0.Size() + v1.Size() + v2.Size())\n\n\tc := NewCache(3*valuesSize, \"\")\n\n\tif err := c.WriteMulti(map[string][]Value{\"foo\": values[:1], \"bar\": values[1:]}); err == nil {\n\t\tt.Fatalf(\" expected field type conflict\")\n\t}\n\n\tif exp, got := uint64(v0.Size()), c.Size(); exp != got {\n\t\tt.Fatalf(\"cache size incorrect after 2 writes, exp %d, got %d\", exp, got)\n\t}\n\n\tif exp, keys := []string{\"foo\"}, c.Keys(); !reflect.DeepEqual(keys, exp) {\n\t\tt.Fatalf(\"cache keys incorrect after 2 writes, exp %v, got %v\", exp, keys)\n\t}\n}\n\nfunc TestCache_Cache_DeleteRange(t *testing.T) {\n\tv0 := NewValue(1, 1.0)\n\tv1 := NewValue(2, 2.0)\n\tv2 := NewValue(3, 3.0)\n\tvalues := Values{v0, v1, v2}\n\tvaluesSize := uint64(v0.Size() + v1.Size() + v2.Size())\n\n\tc := NewCache(30*valuesSize, \"\")\n\n\tif err := c.WriteMulti(map[string][]Value{\"foo\": values, \"bar\": values}); err != nil {\n\t\tt.Fatalf(\"failed to write key foo to cache: %s\", err.Error())\n\t}\n\tif n := c.Size(); n != 2*valuesSize {\n\t\tt.Fatalf(\"cache size incorrect after 2 writes, exp %d, got %d\", 2*valuesSize, n)\n\t}\n\n\tif exp, keys := []string{\"bar\", \"foo\"}, c.Keys(); !reflect.DeepEqual(keys, exp) {\n\t\tt.Fatalf(\"cache keys incorrect after 2 writes, exp %v, got %v\", exp, keys)\n\t}\n\n\tc.DeleteRange([]string{\"bar\"}, 2, math.MaxInt64)\n\n\tif exp, keys := []string{\"bar\", \"foo\"}, c.Keys(); !reflect.DeepEqual(keys, exp) {\n\t\tt.Fatalf(\"cache keys incorrect after 2 writes, exp %v, got %v\", exp, keys)\n\t}\n\n\tif got, exp := c.Size(), valuesSize+uint64(v0.Size()); exp != got {\n\t\tt.Fatalf(\"cache size incorrect after 2 writes, exp %d, got %d\", exp, got)\n\t}\n\n\tif got, exp := len(c.Values(\"bar\")), 1; got != exp {\n\t\tt.Fatalf(\"cache values mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := len(c.Values(\"foo\")), 3; got != exp {\n\t\tt.Fatalf(\"cache values mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestCache_DeleteRange_NoValues(t *testing.T) {\n\tv0 := NewValue(1, 1.0)\n\tv1 := NewValue(2, 2.0)\n\tv2 := NewValue(3, 3.0)\n\tvalues := Values{v0, v1, v2}\n\tvaluesSize := uint64(v0.Size() + v1.Size() + v2.Size())\n\n\tc := NewCache(3*valuesSize, \"\")\n\n\tif err := c.WriteMulti(map[string][]Value{\"foo\": values}); err != nil {\n\t\tt.Fatalf(\"failed to write key foo to cache: %s\", err.Error())\n\t}\n\tif n := c.Size(); n != valuesSize {\n\t\tt.Fatalf(\"cache size incorrect after 2 writes, exp %d, got %d\", 2*valuesSize, n)\n\t}\n\n\tif exp, keys := []string{\"foo\"}, c.Keys(); !reflect.DeepEqual(keys, exp) {\n\t\tt.Fatalf(\"cache keys incorrect after 2 writes, exp %v, got %v\", exp, keys)\n\t}\n\n\tc.DeleteRange([]string{\"foo\"}, math.MinInt64, math.MaxInt64)\n\n\tif exp, keys := 0, len(c.Keys()); !reflect.DeepEqual(keys, exp) {\n\t\tt.Fatalf(\"cache keys incorrect after 2 writes, exp %v, got %v\", exp, keys)\n\t}\n\n\tif got, exp := c.Size(), uint64(0); exp != got {\n\t\tt.Fatalf(\"cache size incorrect after 2 writes, exp %d, got %d\", exp, got)\n\t}\n\n\tif got, exp := len(c.Values(\"foo\")), 0; got != exp {\n\t\tt.Fatalf(\"cache values mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestCache_Cache_Delete(t *testing.T) {\n\tv0 := NewValue(1, 1.0)\n\tv1 := NewValue(2, 2.0)\n\tv2 := NewValue(3, 3.0)\n\tvalues := Values{v0, v1, v2}\n\tvaluesSize := uint64(v0.Size() + v1.Size() + v2.Size())\n\n\tc := NewCache(30*valuesSize, \"\")\n\n\tif err := c.WriteMulti(map[string][]Value{\"foo\": values, \"bar\": values}); err != nil {\n\t\tt.Fatalf(\"failed to write key foo to cache: %s\", err.Error())\n\t}\n\tif n := c.Size(); n != 2*valuesSize {\n\t\tt.Fatalf(\"cache size incorrect after 2 writes, exp %d, got %d\", 2*valuesSize, n)\n\t}\n\n\tif exp, keys := []string{\"bar\", \"foo\"}, c.Keys(); !reflect.DeepEqual(keys, exp) {\n\t\tt.Fatalf(\"cache keys incorrect after 2 writes, exp %v, got %v\", exp, keys)\n\t}\n\n\tc.Delete([]string{\"bar\"})\n\n\tif exp, keys := []string{\"foo\"}, c.Keys(); !reflect.DeepEqual(keys, exp) {\n\t\tt.Fatalf(\"cache keys incorrect after 2 writes, exp %v, got %v\", exp, keys)\n\t}\n\n\tif got, exp := c.Size(), valuesSize; exp != got {\n\t\tt.Fatalf(\"cache size incorrect after 2 writes, exp %d, got %d\", exp, got)\n\t}\n\n\tif got, exp := len(c.Values(\"bar\")), 0; got != exp {\n\t\tt.Fatalf(\"cache values mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := len(c.Values(\"foo\")), 3; got != exp {\n\t\tt.Fatalf(\"cache values mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestCache_Cache_Delete_NonExistent(t *testing.T) {\n\tc := NewCache(1024, \"\")\n\n\tc.Delete([]string{\"bar\"})\n\n\tif got, exp := c.Size(), uint64(0); exp != got {\n\t\tt.Fatalf(\"cache size incorrect exp %d, got %d\", exp, got)\n\t}\n}\n\n// This tests writing two batches to the same series.  The first batch\n// is sorted.  The second batch is also sorted but contains duplicates.\nfunc TestCache_CacheWriteMulti_Duplicates(t *testing.T) {\n\tv0 := NewValue(2, 1.0)\n\tv1 := NewValue(3, 1.0)\n\tvalues0 := Values{v0, v1}\n\n\tv3 := NewValue(4, 2.0)\n\tv4 := NewValue(5, 3.0)\n\tv5 := NewValue(5, 3.0)\n\tvalues1 := Values{v3, v4, v5}\n\n\tc := NewCache(0, \"\")\n\n\tif err := c.WriteMulti(map[string][]Value{\"foo\": values0}); err != nil {\n\t\tt.Fatalf(\"failed to write key foo to cache: %s\", err.Error())\n\t}\n\n\tif err := c.WriteMulti(map[string][]Value{\"foo\": values1}); err != nil {\n\t\tt.Fatalf(\"failed to write key foo to cache: %s\", err.Error())\n\t}\n\n\tif exp, keys := []string{\"foo\"}, c.Keys(); !reflect.DeepEqual(keys, exp) {\n\t\tt.Fatalf(\"cache keys incorrect after 2 writes, exp %v, got %v\", exp, keys)\n\t}\n\n\texpAscValues := Values{v0, v1, v3, v5}\n\tif exp, got := len(expAscValues), len(c.Values(\"foo\")); exp != got {\n\t\tt.Fatalf(\"value count mismatch: exp: %v, got %v\", exp, got)\n\t}\n\tif deduped := c.Values(\"foo\"); !reflect.DeepEqual(expAscValues, deduped) {\n\t\tt.Fatalf(\"deduped ascending values for foo incorrect, exp: %v, got %v\", expAscValues, deduped)\n\t}\n}\n\nfunc TestCache_CacheValues(t *testing.T) {\n\tv0 := NewValue(1, 0.0)\n\tv1 := NewValue(2, 2.0)\n\tv2 := NewValue(3, 3.0)\n\tv3 := NewValue(1, 1.0)\n\tv4 := NewValue(4, 4.0)\n\n\tc := NewCache(512, \"\")\n\tif deduped := c.Values(\"no such key\"); deduped != nil {\n\t\tt.Fatalf(\"Values returned for no such key\")\n\t}\n\n\tif err := c.Write(\"foo\", Values{v0, v1, v2, v3}); err != nil {\n\t\tt.Fatalf(\"failed to write 3 values, key foo to cache: %s\", err.Error())\n\t}\n\tif err := c.Write(\"foo\", Values{v4}); err != nil {\n\t\tt.Fatalf(\"failed to write 1 value, key foo to cache: %s\", err.Error())\n\t}\n\n\texpAscValues := Values{v3, v1, v2, v4}\n\tif deduped := c.Values(\"foo\"); !reflect.DeepEqual(expAscValues, deduped) {\n\t\tt.Fatalf(\"deduped ascending values for foo incorrect, exp: %v, got %v\", expAscValues, deduped)\n\t}\n}\n\nfunc TestCache_CacheSnapshot(t *testing.T) {\n\tv0 := NewValue(2, 0.0)\n\tv1 := NewValue(3, 2.0)\n\tv2 := NewValue(4, 3.0)\n\tv3 := NewValue(5, 4.0)\n\tv4 := NewValue(6, 5.0)\n\tv5 := NewValue(1, 5.0)\n\tv6 := NewValue(7, 5.0)\n\tv7 := NewValue(2, 5.0)\n\n\tc := NewCache(512, \"\")\n\tif err := c.Write(\"foo\", Values{v0, v1, v2, v3}); err != nil {\n\t\tt.Fatalf(\"failed to write 3 values, key foo to cache: %s\", err.Error())\n\t}\n\n\t// Grab snapshot, and ensure it's as expected.\n\tsnapshot, err := c.Snapshot()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to snapshot cache: %v\", err)\n\t}\n\n\texpValues := Values{v0, v1, v2, v3}\n\tif deduped := snapshot.values(\"foo\"); !reflect.DeepEqual(expValues, deduped) {\n\t\tt.Fatalf(\"snapshotted values for foo incorrect, exp: %v, got %v\", expValues, deduped)\n\t}\n\n\t// Ensure cache is still as expected.\n\tif deduped := c.Values(\"foo\"); !reflect.DeepEqual(expValues, deduped) {\n\t\tt.Fatalf(\"post-snapshot values for foo incorrect, exp: %v, got %v\", expValues, deduped)\n\t}\n\n\t// Write a new value to the cache.\n\tif err := c.Write(\"foo\", Values{v4}); err != nil {\n\t\tt.Fatalf(\"failed to write post-snap value, key foo to cache: %s\", err.Error())\n\t}\n\texpValues = Values{v0, v1, v2, v3, v4}\n\tif deduped := c.Values(\"foo\"); !reflect.DeepEqual(expValues, deduped) {\n\t\tt.Fatalf(\"post-snapshot write values for foo incorrect, exp: %v, got %v\", expValues, deduped)\n\t}\n\n\t// Write a new, out-of-order, value to the cache.\n\tif err := c.Write(\"foo\", Values{v5}); err != nil {\n\t\tt.Fatalf(\"failed to write post-snap value, key foo to cache: %s\", err.Error())\n\t}\n\texpValues = Values{v5, v0, v1, v2, v3, v4}\n\tif deduped := c.Values(\"foo\"); !reflect.DeepEqual(expValues, deduped) {\n\t\tt.Fatalf(\"post-snapshot out-of-order write values for foo incorrect, exp: %v, got %v\", expValues, deduped)\n\t}\n\n\t// Clear snapshot, ensuring non-snapshot data untouched.\n\tc.ClearSnapshot(true)\n\n\texpValues = Values{v5, v4}\n\tif deduped := c.Values(\"foo\"); !reflect.DeepEqual(expValues, deduped) {\n\t\tt.Fatalf(\"post-clear values for foo incorrect, exp: %v, got %v\", expValues, deduped)\n\t}\n\n\t// Create another snapshot\n\tsnapshot, err = c.Snapshot()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to snapshot cache: %v\", err)\n\t}\n\n\tif err := c.Write(\"foo\", Values{v4, v5}); err != nil {\n\t\tt.Fatalf(\"failed to write post-snap value, key foo to cache: %s\", err.Error())\n\t}\n\n\tc.ClearSnapshot(true)\n\n\tsnapshot, err = c.Snapshot()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to snapshot cache: %v\", err)\n\t}\n\n\tif err := c.Write(\"foo\", Values{v6, v7}); err != nil {\n\t\tt.Fatalf(\"failed to write post-snap value, key foo to cache: %s\", err.Error())\n\t}\n\n\texpValues = Values{v5, v7, v4, v6}\n\tif deduped := c.Values(\"foo\"); !reflect.DeepEqual(expValues, deduped) {\n\t\tt.Fatalf(\"post-snapshot out-of-order write values for foo incorrect, exp: %v, got %v\", expValues, deduped)\n\t}\n}\n\n// Tests that Snapshot updates statistics correctly.\nfunc TestCache_Snapshot_Stats(t *testing.T) {\n\tlimit := uint64(16)\n\tc := NewCache(limit, \"\")\n\n\tvalues := map[string][]Value{\"foo\": []Value{NewValue(1, 1.0)}}\n\tif err := c.WriteMulti(values); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err := c.Snapshot()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Store size should have been reset.\n\tif got, exp := c.Size(), uint64(16); got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\t// Cached bytes should have been increased.\n\tif got, exp := c.stats.CachedBytes, int64(16); got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n}\n\nfunc TestCache_CacheEmptySnapshot(t *testing.T) {\n\tc := NewCache(512, \"\")\n\n\t// Grab snapshot, and ensure it's as expected.\n\tsnapshot, err := c.Snapshot()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to snapshot cache: %v\", err)\n\t}\n\tif deduped := snapshot.values(\"foo\"); !reflect.DeepEqual(Values(nil), deduped) {\n\t\tt.Fatalf(\"snapshotted values for foo incorrect, exp: %v, got %v\", nil, deduped)\n\t}\n\n\t// Ensure cache is still as expected.\n\tif deduped := c.Values(\"foo\"); !reflect.DeepEqual(Values(nil), deduped) {\n\t\tt.Fatalf(\"post-snapshotted values for foo incorrect, exp: %v, got %v\", Values(nil), deduped)\n\t}\n\n\t// Clear snapshot.\n\tc.ClearSnapshot(true)\n\tif deduped := c.Values(\"foo\"); !reflect.DeepEqual(Values(nil), deduped) {\n\t\tt.Fatalf(\"post-snapshot-clear values for foo incorrect, exp: %v, got %v\", Values(nil), deduped)\n\t}\n}\n\nfunc TestCache_CacheWriteMemoryExceeded(t *testing.T) {\n\tv0 := NewValue(1, 1.0)\n\tv1 := NewValue(2, 2.0)\n\n\tc := NewCache(uint64(v1.Size()), \"\")\n\n\tif err := c.Write(\"foo\", Values{v0}); err != nil {\n\t\tt.Fatalf(\"failed to write key foo to cache: %s\", err.Error())\n\t}\n\tif exp, keys := []string{\"foo\"}, c.Keys(); !reflect.DeepEqual(keys, exp) {\n\t\tt.Fatalf(\"cache keys incorrect after writes, exp %v, got %v\", exp, keys)\n\t}\n\tif err := c.Write(\"bar\", Values{v1}); err == nil || !strings.Contains(err.Error(), \"cache-max-memory-size\") {\n\t\tt.Fatalf(\"wrong error writing key bar to cache: %v\", err)\n\t}\n\n\t// Grab snapshot, write should still fail since we're still using the memory.\n\t_, err := c.Snapshot()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to snapshot cache: %v\", err)\n\t}\n\tif err := c.Write(\"bar\", Values{v1}); err == nil || !strings.Contains(err.Error(), \"cache-max-memory-size\") {\n\t\tt.Fatalf(\"wrong error writing key bar to cache: %v\", err)\n\t}\n\n\t// Clear the snapshot and the write should now succeed.\n\tc.ClearSnapshot(true)\n\tif err := c.Write(\"bar\", Values{v1}); err != nil {\n\t\tt.Fatalf(\"failed to write key foo to cache: %s\", err.Error())\n\t}\n\texpAscValues := Values{v1}\n\tif deduped := c.Values(\"bar\"); !reflect.DeepEqual(expAscValues, deduped) {\n\t\tt.Fatalf(\"deduped ascending values for bar incorrect, exp: %v, got %v\", expAscValues, deduped)\n\t}\n}\n\nfunc TestCache_Deduplicate_Concurrent(t *testing.T) {\n\tif testing.Short() || os.Getenv(\"GORACE\") != \"\" || os.Getenv(\"APPVEYOR\") != \"\" {\n\t\tt.Skip(\"Skipping test in short, race, appveyor mode.\")\n\t}\n\n\tvalues := make(map[string][]Value)\n\n\tfor i := 0; i < 1000; i++ {\n\t\tfor j := 0; j < 100; j++ {\n\t\t\tvalues[fmt.Sprintf(\"cpu%d\", i)] = []Value{NewValue(int64(i+j)+int64(rand.Intn(10)), float64(i))}\n\t\t}\n\t}\n\n\twg := sync.WaitGroup{}\n\tc := NewCache(1000000, \"\")\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor i := 0; i < 1000; i++ {\n\t\t\tc.WriteMulti(values)\n\t\t}\n\t}()\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor i := 0; i < 1000; i++ {\n\t\t\tc.Deduplicate()\n\t\t}\n\t}()\n\n\twg.Wait()\n}\n\n// Ensure the CacheLoader can correctly load from a single segment, even if it's corrupted.\nfunc TestCacheLoader_LoadSingle(t *testing.T) {\n\t// Create a WAL segment.\n\tdir := mustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := mustTempFile(dir)\n\tw := NewWALSegmentWriter(f)\n\n\tp1 := NewValue(1, 1.1)\n\tp2 := NewValue(1, int64(1))\n\tp3 := NewValue(1, true)\n\n\tvalues := map[string][]Value{\n\t\t\"foo\": []Value{p1},\n\t\t\"bar\": []Value{p2},\n\t\t\"baz\": []Value{p3},\n\t}\n\n\tentry := &WriteWALEntry{\n\t\tValues: values,\n\t}\n\n\tif err := w.Write(mustMarshalEntry(entry)); err != nil {\n\t\tt.Fatal(\"write points\", err)\n\t}\n\n\tif err := w.Flush(); err != nil {\n\t\tt.Fatalf(\"flush error: %v\", err)\n\t}\n\n\t// Load the cache using the segment.\n\tcache := NewCache(1024, \"\")\n\tloader := NewCacheLoader([]string{f.Name()})\n\tif err := loader.Load(cache); err != nil {\n\t\tt.Fatalf(\"failed to load cache: %s\", err.Error())\n\t}\n\n\t// Check the cache.\n\tif values := cache.Values(\"foo\"); !reflect.DeepEqual(values, Values{p1}) {\n\t\tt.Fatalf(\"cache key foo not as expected, got %v, exp %v\", values, Values{p1})\n\t}\n\tif values := cache.Values(\"bar\"); !reflect.DeepEqual(values, Values{p2}) {\n\t\tt.Fatalf(\"cache key foo not as expected, got %v, exp %v\", values, Values{p2})\n\t}\n\tif values := cache.Values(\"baz\"); !reflect.DeepEqual(values, Values{p3}) {\n\t\tt.Fatalf(\"cache key foo not as expected, got %v, exp %v\", values, Values{p3})\n\t}\n\n\t// Corrupt the WAL segment.\n\tif _, err := f.Write([]byte{1, 4, 0, 0, 0}); err != nil {\n\t\tt.Fatalf(\"corrupt WAL segment: %s\", err.Error())\n\t}\n\n\t// Reload the cache using the segment.\n\tcache = NewCache(1024, \"\")\n\tloader = NewCacheLoader([]string{f.Name()})\n\tif err := loader.Load(cache); err != nil {\n\t\tt.Fatalf(\"failed to load cache: %s\", err.Error())\n\t}\n\n\t// Check the cache.\n\tif values := cache.Values(\"foo\"); !reflect.DeepEqual(values, Values{p1}) {\n\t\tt.Fatalf(\"cache key foo not as expected, got %v, exp %v\", values, Values{p1})\n\t}\n\tif values := cache.Values(\"bar\"); !reflect.DeepEqual(values, Values{p2}) {\n\t\tt.Fatalf(\"cache key bar not as expected, got %v, exp %v\", values, Values{p2})\n\t}\n\tif values := cache.Values(\"baz\"); !reflect.DeepEqual(values, Values{p3}) {\n\t\tt.Fatalf(\"cache key baz not as expected, got %v, exp %v\", values, Values{p3})\n\t}\n}\n\n// Ensure the CacheLoader can correctly load from two segments, even if one is corrupted.\nfunc TestCacheLoader_LoadDouble(t *testing.T) {\n\t// Create a WAL segment.\n\tdir := mustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf1, f2 := mustTempFile(dir), mustTempFile(dir)\n\tw1, w2 := NewWALSegmentWriter(f1), NewWALSegmentWriter(f2)\n\n\tp1 := NewValue(1, 1.1)\n\tp2 := NewValue(1, int64(1))\n\tp3 := NewValue(1, true)\n\tp4 := NewValue(1, \"string\")\n\n\t// Write first and second segment.\n\n\tsegmentWrite := func(w *WALSegmentWriter, values map[string][]Value) {\n\t\tentry := &WriteWALEntry{\n\t\t\tValues: values,\n\t\t}\n\t\tif err := w1.Write(mustMarshalEntry(entry)); err != nil {\n\t\t\tt.Fatal(\"write points\", err)\n\t\t}\n\t\tif err := w1.Flush(); err != nil {\n\t\t\tt.Fatalf(\"flush error: %v\", err)\n\t\t}\n\t}\n\n\tvalues := map[string][]Value{\n\t\t\"foo\": []Value{p1},\n\t\t\"bar\": []Value{p2},\n\t}\n\tsegmentWrite(w1, values)\n\tvalues = map[string][]Value{\n\t\t\"baz\": []Value{p3},\n\t\t\"qux\": []Value{p4},\n\t}\n\tsegmentWrite(w2, values)\n\n\t// Corrupt the first WAL segment.\n\tif _, err := f1.Write([]byte{1, 4, 0, 0, 0}); err != nil {\n\t\tt.Fatalf(\"corrupt WAL segment: %s\", err.Error())\n\t}\n\n\t// Load the cache using the segments.\n\tcache := NewCache(1024, \"\")\n\tloader := NewCacheLoader([]string{f1.Name(), f2.Name()})\n\tif err := loader.Load(cache); err != nil {\n\t\tt.Fatalf(\"failed to load cache: %s\", err.Error())\n\t}\n\n\t// Check the cache.\n\tif values := cache.Values(\"foo\"); !reflect.DeepEqual(values, Values{p1}) {\n\t\tt.Fatalf(\"cache key foo not as expected, got %v, exp %v\", values, Values{p1})\n\t}\n\tif values := cache.Values(\"bar\"); !reflect.DeepEqual(values, Values{p2}) {\n\t\tt.Fatalf(\"cache key bar not as expected, got %v, exp %v\", values, Values{p2})\n\t}\n\tif values := cache.Values(\"baz\"); !reflect.DeepEqual(values, Values{p3}) {\n\t\tt.Fatalf(\"cache key baz not as expected, got %v, exp %v\", values, Values{p3})\n\t}\n\tif values := cache.Values(\"qux\"); !reflect.DeepEqual(values, Values{p4}) {\n\t\tt.Fatalf(\"cache key qux not as expected, got %v, exp %v\", values, Values{p4})\n\t}\n}\n\n// Ensure the CacheLoader can load deleted series\nfunc TestCacheLoader_LoadDeleted(t *testing.T) {\n\t// Create a WAL segment.\n\tdir := mustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := mustTempFile(dir)\n\tw := NewWALSegmentWriter(f)\n\n\tp1 := NewValue(1, 1.0)\n\tp2 := NewValue(2, 2.0)\n\tp3 := NewValue(3, 3.0)\n\n\tvalues := map[string][]Value{\n\t\t\"foo\": []Value{p1, p2, p3},\n\t}\n\n\tentry := &WriteWALEntry{\n\t\tValues: values,\n\t}\n\n\tif err := w.Write(mustMarshalEntry(entry)); err != nil {\n\t\tt.Fatal(\"write points\", err)\n\t}\n\n\tif err := w.Flush(); err != nil {\n\t\tt.Fatalf(\"flush error: %v\", err)\n\t}\n\n\tdentry := &DeleteRangeWALEntry{\n\t\tKeys: []string{\"foo\"},\n\t\tMin:  2,\n\t\tMax:  3,\n\t}\n\n\tif err := w.Write(mustMarshalEntry(dentry)); err != nil {\n\t\tt.Fatal(\"write points\", err)\n\t}\n\n\tif err := w.Flush(); err != nil {\n\t\tt.Fatalf(\"flush error: %v\", err)\n\t}\n\n\t// Load the cache using the segment.\n\tcache := NewCache(1024, \"\")\n\tloader := NewCacheLoader([]string{f.Name()})\n\tif err := loader.Load(cache); err != nil {\n\t\tt.Fatalf(\"failed to load cache: %s\", err.Error())\n\t}\n\n\t// Check the cache.\n\tif values := cache.Values(\"foo\"); !reflect.DeepEqual(values, Values{p1}) {\n\t\tt.Fatalf(\"cache key foo not as expected, got %v, exp %v\", values, Values{p1})\n\t}\n\n\t// Reload the cache using the segment.\n\tcache = NewCache(1024, \"\")\n\tloader = NewCacheLoader([]string{f.Name()})\n\tif err := loader.Load(cache); err != nil {\n\t\tt.Fatalf(\"failed to load cache: %s\", err.Error())\n\t}\n\n\t// Check the cache.\n\tif values := cache.Values(\"foo\"); !reflect.DeepEqual(values, Values{p1}) {\n\t\tt.Fatalf(\"cache key foo not as expected, got %v, exp %v\", values, Values{p1})\n\t}\n}\n\nfunc mustTempDir() string {\n\tdir, err := ioutil.TempDir(\"\", \"tsm1-test\")\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to create temp dir: %v\", err))\n\t}\n\treturn dir\n}\n\nfunc mustTempFile(dir string) *os.File {\n\tf, err := ioutil.TempFile(dir, \"tsm1test\")\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to create temp file: %v\", err))\n\t}\n\treturn f\n}\n\nfunc mustMarshalEntry(entry WALEntry) (WalEntryType, []byte) {\n\tbytes := make([]byte, 1024<<2)\n\n\tb, err := entry.Encode(bytes)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"error encoding: %v\", err))\n\t}\n\n\treturn entry.Type(), snappy.Encode(b, b)\n}\n\n// TestStore implements the storer interface and can be used to mock out a\n// Cache's storer implememation.\ntype TestStore struct {\n\tentryf       func(key string) (*entry, bool)\n\twritef       func(key string, values Values) error\n\taddf         func(key string, entry *entry)\n\tremovef      func(key string)\n\tkeysf        func(sorted bool) []string\n\tapplyf       func(f func(string, *entry) error) error\n\tapplySerialf func(f func(string, *entry) error) error\n\tresetf       func()\n}\n\nfunc NewTestStore() *TestStore                                      { return &TestStore{} }\nfunc (s *TestStore) entry(key string) (*entry, bool)                { return s.entryf(key) }\nfunc (s *TestStore) write(key string, values Values) error          { return s.writef(key, values) }\nfunc (s *TestStore) add(key string, entry *entry)                   { s.addf(key, entry) }\nfunc (s *TestStore) remove(key string)                              { s.removef(key) }\nfunc (s *TestStore) keys(sorted bool) []string                      { return s.keysf(sorted) }\nfunc (s *TestStore) apply(f func(string, *entry) error) error       { return s.applyf(f) }\nfunc (s *TestStore) applySerial(f func(string, *entry) error) error { return s.applySerialf(f) }\nfunc (s *TestStore) reset()                                         { s.resetf() }\n\nvar fvSize = uint64(NewValue(1, float64(1)).Size())\n\nfunc BenchmarkCacheFloatEntries(b *testing.B) {\n\tcache := NewCache(uint64(b.N)*fvSize, \"\")\n\tvals := make([][]Value, b.N)\n\tfor i := 0; i < b.N; i++ {\n\t\tvals[i] = []Value{NewValue(1, float64(i))}\n\t}\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tif err := cache.Write(\"test\", vals[i]); err != nil {\n\t\t\tb.Fatal(\"err:\", err, \"i:\", i, \"N:\", b.N)\n\t\t}\n\t}\n}\n\ntype points struct {\n\tkey  string\n\tvals []Value\n}\n\nfunc BenchmarkCacheParallelFloatEntries(b *testing.B) {\n\tc := b.N * runtime.GOMAXPROCS(0)\n\tcache := NewCache(uint64(c)*fvSize*10, \"\")\n\tvals := make([]points, c)\n\tfor i := 0; i < c; i++ {\n\t\tv := make([]Value, 10)\n\t\tfor j := 0; j < 10; j++ {\n\t\t\tv[j] = NewValue(1, float64(i+j))\n\t\t}\n\t\tvals[i] = points{key: fmt.Sprintf(\"cpu%v\", rand.Intn(20)), vals: v}\n\t}\n\ti := int32(-1)\n\tb.ResetTimer()\n\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tj := atomic.AddInt32(&i, 1)\n\t\t\tv := vals[j]\n\t\t\tif err := cache.Write(v.key, v.vals); err != nil {\n\t\t\t\tb.Fatal(\"err:\", err, \"j:\", j, \"N:\", b.N)\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc BenchmarkEntry_add(b *testing.B) {\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tb.StopTimer()\n\t\t\tvalues := make([]Value, 10)\n\t\t\tfor i := 0; i < 10; i++ {\n\t\t\t\tvalues[i] = NewValue(int64(i+1), float64(i))\n\t\t\t}\n\n\t\t\totherValues := make([]Value, 10)\n\t\t\tfor i := 0; i < 10; i++ {\n\t\t\t\totherValues[i] = NewValue(1, float64(i))\n\t\t\t}\n\n\t\t\tentry, err := newEntryValues(values, 0) // Will use default allocation size.\n\t\t\tif err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\n\t\t\tb.StartTimer()\n\t\t\tif err := entry.add(otherValues); err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go",
    "content": "// Generated by tmpl\n// https://github.com/benbjohnson/tmpl\n//\n// DO NOT EDIT!\n// Source: compact.gen.go.tmpl\n\npackage tsm1\n\nimport (\n\t\"runtime\"\n)\n\n// merge combines the next set of blocks into merged blocks.\nfunc (k *tsmKeyIterator) mergeFloat() {\n\t// No blocks left, or pending merged values, we're done\n\tif len(k.blocks) == 0 && len(k.merged) == 0 && len(k.mergedFloatValues) == 0 {\n\t\treturn\n\t}\n\n\tdedup := len(k.mergedFloatValues) != 0\n\tif len(k.blocks) > 0 && !dedup {\n\t\t// If we have more than one block or any partially tombstoned blocks, we many need to dedup\n\t\tdedup = len(k.blocks[0].tombstones) > 0 || k.blocks[0].partiallyRead()\n\n\t\t// Quickly scan each block to see if any overlap with the prior block, if they overlap then\n\t\t// we need to dedup as there may be duplicate points now\n\t\tfor i := 1; !dedup && i < len(k.blocks); i++ {\n\t\t\tif k.blocks[i].partiallyRead() {\n\t\t\t\tdedup = true\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif k.blocks[i].minTime <= k.blocks[i-1].maxTime || len(k.blocks[i].tombstones) > 0 {\n\t\t\t\tdedup = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t}\n\n\tk.merged = k.combineFloat(dedup)\n}\n\n// combine returns a new set of blocks using the current blocks in the buffers.  If dedup\n// is true, all the blocks will be decoded, dedup and sorted in in order.  If dedup is false,\n// only blocks that are smaller than the chunk size will be decoded and combined.\nfunc (k *tsmKeyIterator) combineFloat(dedup bool) blocks {\n\tif dedup {\n\t\tfor len(k.mergedFloatValues) < k.size && len(k.blocks) > 0 {\n\t\t\tfor len(k.blocks) > 0 && k.blocks[0].read() {\n\t\t\t\tk.blocks = k.blocks[1:]\n\t\t\t}\n\n\t\t\tif len(k.blocks) == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfirst := k.blocks[0]\n\t\t\tminTime := first.minTime\n\t\t\tmaxTime := first.maxTime\n\n\t\t\t// Adjust the min time to the start of any overlapping blocks.\n\t\t\tfor i := 0; i < len(k.blocks); i++ {\n\t\t\t\tif k.blocks[i].overlapsTimeRange(minTime, maxTime) && !k.blocks[i].read() {\n\t\t\t\t\tif k.blocks[i].minTime < minTime {\n\t\t\t\t\t\tminTime = k.blocks[i].minTime\n\t\t\t\t\t}\n\t\t\t\t\tif k.blocks[i].maxTime > minTime && k.blocks[i].maxTime < maxTime {\n\t\t\t\t\t\tmaxTime = k.blocks[i].maxTime\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// We have some overlapping blocks so decode all, append in order and then dedup\n\t\t\tfor i := 0; i < len(k.blocks); i++ {\n\t\t\t\tif !k.blocks[i].overlapsTimeRange(minTime, maxTime) || k.blocks[i].read() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tv, err := DecodeFloatBlock(k.blocks[i].b, &[]FloatValue{})\n\t\t\t\tif err != nil {\n\t\t\t\t\tk.err = err\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\t// Remove values we already read\n\t\t\t\tv = FloatValues(v).Exclude(k.blocks[i].readMin, k.blocks[i].readMax)\n\n\t\t\t\t// Filter out only the values for overlapping block\n\t\t\t\tv = FloatValues(v).Include(minTime, maxTime)\n\t\t\t\tif len(v) > 0 {\n\t\t\t\t\t// Record that we read a subset of the block\n\t\t\t\t\tk.blocks[i].markRead(v[0].UnixNano(), v[len(v)-1].UnixNano())\n\t\t\t\t}\n\n\t\t\t\t// Apply each tombstone to the block\n\t\t\t\tfor _, ts := range k.blocks[i].tombstones {\n\t\t\t\t\tv = FloatValues(v).Exclude(ts.Min, ts.Max)\n\t\t\t\t}\n\n\t\t\t\tk.mergedFloatValues = k.mergedFloatValues.Merge(v)\n\n\t\t\t\t// Allow other goroutines to run\n\t\t\t\truntime.Gosched()\n\n\t\t\t}\n\t\t}\n\n\t\t// Since we combined multiple blocks, we could have more values than we should put into\n\t\t// a single block.  We need to chunk them up into groups and re-encode them.\n\t\treturn k.chunkFloat(nil)\n\t} else {\n\t\tvar chunked blocks\n\t\tvar i int\n\n\t\tfor i < len(k.blocks) {\n\n\t\t\t// skip this block if it's values were already read\n\t\t\tif k.blocks[i].read() {\n\t\t\t\ti++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// If we this block is already full, just add it as is\n\t\t\tif BlockCount(k.blocks[i].b) >= k.size {\n\t\t\t\tchunked = append(chunked, k.blocks[i])\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ti++\n\t\t\t// Allow other goroutines to run\n\t\t\truntime.Gosched()\n\t\t}\n\n\t\tif k.fast {\n\t\t\tfor i < len(k.blocks) {\n\t\t\t\t// skip this block if it's values were already read\n\t\t\t\tif k.blocks[i].read() {\n\t\t\t\t\ti++\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tchunked = append(chunked, k.blocks[i])\n\t\t\t\ti++\n\t\t\t\t// Allow other goroutines to run\n\t\t\t\truntime.Gosched()\n\t\t\t}\n\t\t}\n\n\t\t// If we only have 1 blocks left, just append it as is and avoid decoding/recoding\n\t\tif i == len(k.blocks)-1 {\n\t\t\tif !k.blocks[i].read() {\n\t\t\t\tchunked = append(chunked, k.blocks[i])\n\t\t\t}\n\t\t\ti++\n\t\t}\n\n\t\t// The remaining blocks can be combined and we know that they do not overlap and\n\t\t// so we can just append each, sort and re-encode.\n\t\tfor i < len(k.blocks) && len(k.mergedFloatValues) < k.size {\n\t\t\tif k.blocks[i].read() {\n\t\t\t\ti++\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tv, err := DecodeFloatBlock(k.blocks[i].b, &[]FloatValue{})\n\t\t\tif err != nil {\n\t\t\t\tk.err = err\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// Apply each tombstone to the block\n\t\t\tfor _, ts := range k.blocks[i].tombstones {\n\t\t\t\tv = FloatValues(v).Exclude(ts.Min, ts.Max)\n\t\t\t}\n\n\t\t\tk.blocks[i].markRead(k.blocks[i].minTime, k.blocks[i].maxTime)\n\n\t\t\tk.mergedFloatValues = k.mergedFloatValues.Merge(v)\n\t\t\ti++\n\t\t\t// Allow other goroutines to run\n\t\t\truntime.Gosched()\n\t\t}\n\n\t\tk.blocks = k.blocks[i:]\n\n\t\treturn k.chunkFloat(chunked)\n\t}\n}\n\nfunc (k *tsmKeyIterator) chunkFloat(dst blocks) blocks {\n\tif len(k.mergedFloatValues) > k.size {\n\t\tvalues := k.mergedFloatValues[:k.size]\n\t\tcb, err := FloatValues(values).Encode(nil)\n\t\tif err != nil {\n\t\t\tk.err = err\n\t\t\treturn nil\n\t\t}\n\n\t\tdst = append(dst, &block{\n\t\t\tminTime: values[0].UnixNano(),\n\t\t\tmaxTime: values[len(values)-1].UnixNano(),\n\t\t\tkey:     k.key,\n\t\t\tb:       cb,\n\t\t})\n\t\tk.mergedFloatValues = k.mergedFloatValues[k.size:]\n\t\treturn dst\n\t}\n\n\t// Re-encode the remaining values into the last block\n\tif len(k.mergedFloatValues) > 0 {\n\t\tcb, err := FloatValues(k.mergedFloatValues).Encode(nil)\n\t\tif err != nil {\n\t\t\tk.err = err\n\t\t\treturn nil\n\t\t}\n\n\t\tdst = append(dst, &block{\n\t\t\tminTime: k.mergedFloatValues[0].UnixNano(),\n\t\t\tmaxTime: k.mergedFloatValues[len(k.mergedFloatValues)-1].UnixNano(),\n\t\t\tkey:     k.key,\n\t\t\tb:       cb,\n\t\t})\n\t\tk.mergedFloatValues = k.mergedFloatValues[:0]\n\t}\n\treturn dst\n}\n\n// merge combines the next set of blocks into merged blocks.\nfunc (k *tsmKeyIterator) mergeInteger() {\n\t// No blocks left, or pending merged values, we're done\n\tif len(k.blocks) == 0 && len(k.merged) == 0 && len(k.mergedIntegerValues) == 0 {\n\t\treturn\n\t}\n\n\tdedup := len(k.mergedIntegerValues) != 0\n\tif len(k.blocks) > 0 && !dedup {\n\t\t// If we have more than one block or any partially tombstoned blocks, we many need to dedup\n\t\tdedup = len(k.blocks[0].tombstones) > 0 || k.blocks[0].partiallyRead()\n\n\t\t// Quickly scan each block to see if any overlap with the prior block, if they overlap then\n\t\t// we need to dedup as there may be duplicate points now\n\t\tfor i := 1; !dedup && i < len(k.blocks); i++ {\n\t\t\tif k.blocks[i].partiallyRead() {\n\t\t\t\tdedup = true\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif k.blocks[i].minTime <= k.blocks[i-1].maxTime || len(k.blocks[i].tombstones) > 0 {\n\t\t\t\tdedup = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t}\n\n\tk.merged = k.combineInteger(dedup)\n}\n\n// combine returns a new set of blocks using the current blocks in the buffers.  If dedup\n// is true, all the blocks will be decoded, dedup and sorted in in order.  If dedup is false,\n// only blocks that are smaller than the chunk size will be decoded and combined.\nfunc (k *tsmKeyIterator) combineInteger(dedup bool) blocks {\n\tif dedup {\n\t\tfor len(k.mergedIntegerValues) < k.size && len(k.blocks) > 0 {\n\t\t\tfor len(k.blocks) > 0 && k.blocks[0].read() {\n\t\t\t\tk.blocks = k.blocks[1:]\n\t\t\t}\n\n\t\t\tif len(k.blocks) == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfirst := k.blocks[0]\n\t\t\tminTime := first.minTime\n\t\t\tmaxTime := first.maxTime\n\n\t\t\t// Adjust the min time to the start of any overlapping blocks.\n\t\t\tfor i := 0; i < len(k.blocks); i++ {\n\t\t\t\tif k.blocks[i].overlapsTimeRange(minTime, maxTime) && !k.blocks[i].read() {\n\t\t\t\t\tif k.blocks[i].minTime < minTime {\n\t\t\t\t\t\tminTime = k.blocks[i].minTime\n\t\t\t\t\t}\n\t\t\t\t\tif k.blocks[i].maxTime > minTime && k.blocks[i].maxTime < maxTime {\n\t\t\t\t\t\tmaxTime = k.blocks[i].maxTime\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// We have some overlapping blocks so decode all, append in order and then dedup\n\t\t\tfor i := 0; i < len(k.blocks); i++ {\n\t\t\t\tif !k.blocks[i].overlapsTimeRange(minTime, maxTime) || k.blocks[i].read() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tv, err := DecodeIntegerBlock(k.blocks[i].b, &[]IntegerValue{})\n\t\t\t\tif err != nil {\n\t\t\t\t\tk.err = err\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\t// Remove values we already read\n\t\t\t\tv = IntegerValues(v).Exclude(k.blocks[i].readMin, k.blocks[i].readMax)\n\n\t\t\t\t// Filter out only the values for overlapping block\n\t\t\t\tv = IntegerValues(v).Include(minTime, maxTime)\n\t\t\t\tif len(v) > 0 {\n\t\t\t\t\t// Record that we read a subset of the block\n\t\t\t\t\tk.blocks[i].markRead(v[0].UnixNano(), v[len(v)-1].UnixNano())\n\t\t\t\t}\n\n\t\t\t\t// Apply each tombstone to the block\n\t\t\t\tfor _, ts := range k.blocks[i].tombstones {\n\t\t\t\t\tv = IntegerValues(v).Exclude(ts.Min, ts.Max)\n\t\t\t\t}\n\n\t\t\t\tk.mergedIntegerValues = k.mergedIntegerValues.Merge(v)\n\n\t\t\t\t// Allow other goroutines to run\n\t\t\t\truntime.Gosched()\n\n\t\t\t}\n\t\t}\n\n\t\t// Since we combined multiple blocks, we could have more values than we should put into\n\t\t// a single block.  We need to chunk them up into groups and re-encode them.\n\t\treturn k.chunkInteger(nil)\n\t} else {\n\t\tvar chunked blocks\n\t\tvar i int\n\n\t\tfor i < len(k.blocks) {\n\n\t\t\t// skip this block if it's values were already read\n\t\t\tif k.blocks[i].read() {\n\t\t\t\ti++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// If we this block is already full, just add it as is\n\t\t\tif BlockCount(k.blocks[i].b) >= k.size {\n\t\t\t\tchunked = append(chunked, k.blocks[i])\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ti++\n\t\t\t// Allow other goroutines to run\n\t\t\truntime.Gosched()\n\t\t}\n\n\t\tif k.fast {\n\t\t\tfor i < len(k.blocks) {\n\t\t\t\t// skip this block if it's values were already read\n\t\t\t\tif k.blocks[i].read() {\n\t\t\t\t\ti++\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tchunked = append(chunked, k.blocks[i])\n\t\t\t\ti++\n\t\t\t\t// Allow other goroutines to run\n\t\t\t\truntime.Gosched()\n\t\t\t}\n\t\t}\n\n\t\t// If we only have 1 blocks left, just append it as is and avoid decoding/recoding\n\t\tif i == len(k.blocks)-1 {\n\t\t\tif !k.blocks[i].read() {\n\t\t\t\tchunked = append(chunked, k.blocks[i])\n\t\t\t}\n\t\t\ti++\n\t\t}\n\n\t\t// The remaining blocks can be combined and we know that they do not overlap and\n\t\t// so we can just append each, sort and re-encode.\n\t\tfor i < len(k.blocks) && len(k.mergedIntegerValues) < k.size {\n\t\t\tif k.blocks[i].read() {\n\t\t\t\ti++\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tv, err := DecodeIntegerBlock(k.blocks[i].b, &[]IntegerValue{})\n\t\t\tif err != nil {\n\t\t\t\tk.err = err\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// Apply each tombstone to the block\n\t\t\tfor _, ts := range k.blocks[i].tombstones {\n\t\t\t\tv = IntegerValues(v).Exclude(ts.Min, ts.Max)\n\t\t\t}\n\n\t\t\tk.blocks[i].markRead(k.blocks[i].minTime, k.blocks[i].maxTime)\n\n\t\t\tk.mergedIntegerValues = k.mergedIntegerValues.Merge(v)\n\t\t\ti++\n\t\t\t// Allow other goroutines to run\n\t\t\truntime.Gosched()\n\t\t}\n\n\t\tk.blocks = k.blocks[i:]\n\n\t\treturn k.chunkInteger(chunked)\n\t}\n}\n\nfunc (k *tsmKeyIterator) chunkInteger(dst blocks) blocks {\n\tif len(k.mergedIntegerValues) > k.size {\n\t\tvalues := k.mergedIntegerValues[:k.size]\n\t\tcb, err := IntegerValues(values).Encode(nil)\n\t\tif err != nil {\n\t\t\tk.err = err\n\t\t\treturn nil\n\t\t}\n\n\t\tdst = append(dst, &block{\n\t\t\tminTime: values[0].UnixNano(),\n\t\t\tmaxTime: values[len(values)-1].UnixNano(),\n\t\t\tkey:     k.key,\n\t\t\tb:       cb,\n\t\t})\n\t\tk.mergedIntegerValues = k.mergedIntegerValues[k.size:]\n\t\treturn dst\n\t}\n\n\t// Re-encode the remaining values into the last block\n\tif len(k.mergedIntegerValues) > 0 {\n\t\tcb, err := IntegerValues(k.mergedIntegerValues).Encode(nil)\n\t\tif err != nil {\n\t\t\tk.err = err\n\t\t\treturn nil\n\t\t}\n\n\t\tdst = append(dst, &block{\n\t\t\tminTime: k.mergedIntegerValues[0].UnixNano(),\n\t\t\tmaxTime: k.mergedIntegerValues[len(k.mergedIntegerValues)-1].UnixNano(),\n\t\t\tkey:     k.key,\n\t\t\tb:       cb,\n\t\t})\n\t\tk.mergedIntegerValues = k.mergedIntegerValues[:0]\n\t}\n\treturn dst\n}\n\n// merge combines the next set of blocks into merged blocks.\nfunc (k *tsmKeyIterator) mergeString() {\n\t// No blocks left, or pending merged values, we're done\n\tif len(k.blocks) == 0 && len(k.merged) == 0 && len(k.mergedStringValues) == 0 {\n\t\treturn\n\t}\n\n\tdedup := len(k.mergedStringValues) != 0\n\tif len(k.blocks) > 0 && !dedup {\n\t\t// If we have more than one block or any partially tombstoned blocks, we many need to dedup\n\t\tdedup = len(k.blocks[0].tombstones) > 0 || k.blocks[0].partiallyRead()\n\n\t\t// Quickly scan each block to see if any overlap with the prior block, if they overlap then\n\t\t// we need to dedup as there may be duplicate points now\n\t\tfor i := 1; !dedup && i < len(k.blocks); i++ {\n\t\t\tif k.blocks[i].partiallyRead() {\n\t\t\t\tdedup = true\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif k.blocks[i].minTime <= k.blocks[i-1].maxTime || len(k.blocks[i].tombstones) > 0 {\n\t\t\t\tdedup = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t}\n\n\tk.merged = k.combineString(dedup)\n}\n\n// combine returns a new set of blocks using the current blocks in the buffers.  If dedup\n// is true, all the blocks will be decoded, dedup and sorted in in order.  If dedup is false,\n// only blocks that are smaller than the chunk size will be decoded and combined.\nfunc (k *tsmKeyIterator) combineString(dedup bool) blocks {\n\tif dedup {\n\t\tfor len(k.mergedStringValues) < k.size && len(k.blocks) > 0 {\n\t\t\tfor len(k.blocks) > 0 && k.blocks[0].read() {\n\t\t\t\tk.blocks = k.blocks[1:]\n\t\t\t}\n\n\t\t\tif len(k.blocks) == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfirst := k.blocks[0]\n\t\t\tminTime := first.minTime\n\t\t\tmaxTime := first.maxTime\n\n\t\t\t// Adjust the min time to the start of any overlapping blocks.\n\t\t\tfor i := 0; i < len(k.blocks); i++ {\n\t\t\t\tif k.blocks[i].overlapsTimeRange(minTime, maxTime) && !k.blocks[i].read() {\n\t\t\t\t\tif k.blocks[i].minTime < minTime {\n\t\t\t\t\t\tminTime = k.blocks[i].minTime\n\t\t\t\t\t}\n\t\t\t\t\tif k.blocks[i].maxTime > minTime && k.blocks[i].maxTime < maxTime {\n\t\t\t\t\t\tmaxTime = k.blocks[i].maxTime\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// We have some overlapping blocks so decode all, append in order and then dedup\n\t\t\tfor i := 0; i < len(k.blocks); i++ {\n\t\t\t\tif !k.blocks[i].overlapsTimeRange(minTime, maxTime) || k.blocks[i].read() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tv, err := DecodeStringBlock(k.blocks[i].b, &[]StringValue{})\n\t\t\t\tif err != nil {\n\t\t\t\t\tk.err = err\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\t// Remove values we already read\n\t\t\t\tv = StringValues(v).Exclude(k.blocks[i].readMin, k.blocks[i].readMax)\n\n\t\t\t\t// Filter out only the values for overlapping block\n\t\t\t\tv = StringValues(v).Include(minTime, maxTime)\n\t\t\t\tif len(v) > 0 {\n\t\t\t\t\t// Record that we read a subset of the block\n\t\t\t\t\tk.blocks[i].markRead(v[0].UnixNano(), v[len(v)-1].UnixNano())\n\t\t\t\t}\n\n\t\t\t\t// Apply each tombstone to the block\n\t\t\t\tfor _, ts := range k.blocks[i].tombstones {\n\t\t\t\t\tv = StringValues(v).Exclude(ts.Min, ts.Max)\n\t\t\t\t}\n\n\t\t\t\tk.mergedStringValues = k.mergedStringValues.Merge(v)\n\n\t\t\t\t// Allow other goroutines to run\n\t\t\t\truntime.Gosched()\n\n\t\t\t}\n\t\t}\n\n\t\t// Since we combined multiple blocks, we could have more values than we should put into\n\t\t// a single block.  We need to chunk them up into groups and re-encode them.\n\t\treturn k.chunkString(nil)\n\t} else {\n\t\tvar chunked blocks\n\t\tvar i int\n\n\t\tfor i < len(k.blocks) {\n\n\t\t\t// skip this block if it's values were already read\n\t\t\tif k.blocks[i].read() {\n\t\t\t\ti++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// If we this block is already full, just add it as is\n\t\t\tif BlockCount(k.blocks[i].b) >= k.size {\n\t\t\t\tchunked = append(chunked, k.blocks[i])\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ti++\n\t\t\t// Allow other goroutines to run\n\t\t\truntime.Gosched()\n\t\t}\n\n\t\tif k.fast {\n\t\t\tfor i < len(k.blocks) {\n\t\t\t\t// skip this block if it's values were already read\n\t\t\t\tif k.blocks[i].read() {\n\t\t\t\t\ti++\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tchunked = append(chunked, k.blocks[i])\n\t\t\t\ti++\n\t\t\t\t// Allow other goroutines to run\n\t\t\t\truntime.Gosched()\n\t\t\t}\n\t\t}\n\n\t\t// If we only have 1 blocks left, just append it as is and avoid decoding/recoding\n\t\tif i == len(k.blocks)-1 {\n\t\t\tif !k.blocks[i].read() {\n\t\t\t\tchunked = append(chunked, k.blocks[i])\n\t\t\t}\n\t\t\ti++\n\t\t}\n\n\t\t// The remaining blocks can be combined and we know that they do not overlap and\n\t\t// so we can just append each, sort and re-encode.\n\t\tfor i < len(k.blocks) && len(k.mergedStringValues) < k.size {\n\t\t\tif k.blocks[i].read() {\n\t\t\t\ti++\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tv, err := DecodeStringBlock(k.blocks[i].b, &[]StringValue{})\n\t\t\tif err != nil {\n\t\t\t\tk.err = err\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// Apply each tombstone to the block\n\t\t\tfor _, ts := range k.blocks[i].tombstones {\n\t\t\t\tv = StringValues(v).Exclude(ts.Min, ts.Max)\n\t\t\t}\n\n\t\t\tk.blocks[i].markRead(k.blocks[i].minTime, k.blocks[i].maxTime)\n\n\t\t\tk.mergedStringValues = k.mergedStringValues.Merge(v)\n\t\t\ti++\n\t\t\t// Allow other goroutines to run\n\t\t\truntime.Gosched()\n\t\t}\n\n\t\tk.blocks = k.blocks[i:]\n\n\t\treturn k.chunkString(chunked)\n\t}\n}\n\nfunc (k *tsmKeyIterator) chunkString(dst blocks) blocks {\n\tif len(k.mergedStringValues) > k.size {\n\t\tvalues := k.mergedStringValues[:k.size]\n\t\tcb, err := StringValues(values).Encode(nil)\n\t\tif err != nil {\n\t\t\tk.err = err\n\t\t\treturn nil\n\t\t}\n\n\t\tdst = append(dst, &block{\n\t\t\tminTime: values[0].UnixNano(),\n\t\t\tmaxTime: values[len(values)-1].UnixNano(),\n\t\t\tkey:     k.key,\n\t\t\tb:       cb,\n\t\t})\n\t\tk.mergedStringValues = k.mergedStringValues[k.size:]\n\t\treturn dst\n\t}\n\n\t// Re-encode the remaining values into the last block\n\tif len(k.mergedStringValues) > 0 {\n\t\tcb, err := StringValues(k.mergedStringValues).Encode(nil)\n\t\tif err != nil {\n\t\t\tk.err = err\n\t\t\treturn nil\n\t\t}\n\n\t\tdst = append(dst, &block{\n\t\t\tminTime: k.mergedStringValues[0].UnixNano(),\n\t\t\tmaxTime: k.mergedStringValues[len(k.mergedStringValues)-1].UnixNano(),\n\t\t\tkey:     k.key,\n\t\t\tb:       cb,\n\t\t})\n\t\tk.mergedStringValues = k.mergedStringValues[:0]\n\t}\n\treturn dst\n}\n\n// merge combines the next set of blocks into merged blocks.\nfunc (k *tsmKeyIterator) mergeBoolean() {\n\t// No blocks left, or pending merged values, we're done\n\tif len(k.blocks) == 0 && len(k.merged) == 0 && len(k.mergedBooleanValues) == 0 {\n\t\treturn\n\t}\n\n\tdedup := len(k.mergedBooleanValues) != 0\n\tif len(k.blocks) > 0 && !dedup {\n\t\t// If we have more than one block or any partially tombstoned blocks, we many need to dedup\n\t\tdedup = len(k.blocks[0].tombstones) > 0 || k.blocks[0].partiallyRead()\n\n\t\t// Quickly scan each block to see if any overlap with the prior block, if they overlap then\n\t\t// we need to dedup as there may be duplicate points now\n\t\tfor i := 1; !dedup && i < len(k.blocks); i++ {\n\t\t\tif k.blocks[i].partiallyRead() {\n\t\t\t\tdedup = true\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif k.blocks[i].minTime <= k.blocks[i-1].maxTime || len(k.blocks[i].tombstones) > 0 {\n\t\t\t\tdedup = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t}\n\n\tk.merged = k.combineBoolean(dedup)\n}\n\n// combine returns a new set of blocks using the current blocks in the buffers.  If dedup\n// is true, all the blocks will be decoded, dedup and sorted in in order.  If dedup is false,\n// only blocks that are smaller than the chunk size will be decoded and combined.\nfunc (k *tsmKeyIterator) combineBoolean(dedup bool) blocks {\n\tif dedup {\n\t\tfor len(k.mergedBooleanValues) < k.size && len(k.blocks) > 0 {\n\t\t\tfor len(k.blocks) > 0 && k.blocks[0].read() {\n\t\t\t\tk.blocks = k.blocks[1:]\n\t\t\t}\n\n\t\t\tif len(k.blocks) == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfirst := k.blocks[0]\n\t\t\tminTime := first.minTime\n\t\t\tmaxTime := first.maxTime\n\n\t\t\t// Adjust the min time to the start of any overlapping blocks.\n\t\t\tfor i := 0; i < len(k.blocks); i++ {\n\t\t\t\tif k.blocks[i].overlapsTimeRange(minTime, maxTime) && !k.blocks[i].read() {\n\t\t\t\t\tif k.blocks[i].minTime < minTime {\n\t\t\t\t\t\tminTime = k.blocks[i].minTime\n\t\t\t\t\t}\n\t\t\t\t\tif k.blocks[i].maxTime > minTime && k.blocks[i].maxTime < maxTime {\n\t\t\t\t\t\tmaxTime = k.blocks[i].maxTime\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// We have some overlapping blocks so decode all, append in order and then dedup\n\t\t\tfor i := 0; i < len(k.blocks); i++ {\n\t\t\t\tif !k.blocks[i].overlapsTimeRange(minTime, maxTime) || k.blocks[i].read() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tv, err := DecodeBooleanBlock(k.blocks[i].b, &[]BooleanValue{})\n\t\t\t\tif err != nil {\n\t\t\t\t\tk.err = err\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\t// Remove values we already read\n\t\t\t\tv = BooleanValues(v).Exclude(k.blocks[i].readMin, k.blocks[i].readMax)\n\n\t\t\t\t// Filter out only the values for overlapping block\n\t\t\t\tv = BooleanValues(v).Include(minTime, maxTime)\n\t\t\t\tif len(v) > 0 {\n\t\t\t\t\t// Record that we read a subset of the block\n\t\t\t\t\tk.blocks[i].markRead(v[0].UnixNano(), v[len(v)-1].UnixNano())\n\t\t\t\t}\n\n\t\t\t\t// Apply each tombstone to the block\n\t\t\t\tfor _, ts := range k.blocks[i].tombstones {\n\t\t\t\t\tv = BooleanValues(v).Exclude(ts.Min, ts.Max)\n\t\t\t\t}\n\n\t\t\t\tk.mergedBooleanValues = k.mergedBooleanValues.Merge(v)\n\n\t\t\t\t// Allow other goroutines to run\n\t\t\t\truntime.Gosched()\n\n\t\t\t}\n\t\t}\n\n\t\t// Since we combined multiple blocks, we could have more values than we should put into\n\t\t// a single block.  We need to chunk them up into groups and re-encode them.\n\t\treturn k.chunkBoolean(nil)\n\t} else {\n\t\tvar chunked blocks\n\t\tvar i int\n\n\t\tfor i < len(k.blocks) {\n\n\t\t\t// skip this block if it's values were already read\n\t\t\tif k.blocks[i].read() {\n\t\t\t\ti++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// If we this block is already full, just add it as is\n\t\t\tif BlockCount(k.blocks[i].b) >= k.size {\n\t\t\t\tchunked = append(chunked, k.blocks[i])\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ti++\n\t\t\t// Allow other goroutines to run\n\t\t\truntime.Gosched()\n\t\t}\n\n\t\tif k.fast {\n\t\t\tfor i < len(k.blocks) {\n\t\t\t\t// skip this block if it's values were already read\n\t\t\t\tif k.blocks[i].read() {\n\t\t\t\t\ti++\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tchunked = append(chunked, k.blocks[i])\n\t\t\t\ti++\n\t\t\t\t// Allow other goroutines to run\n\t\t\t\truntime.Gosched()\n\t\t\t}\n\t\t}\n\n\t\t// If we only have 1 blocks left, just append it as is and avoid decoding/recoding\n\t\tif i == len(k.blocks)-1 {\n\t\t\tif !k.blocks[i].read() {\n\t\t\t\tchunked = append(chunked, k.blocks[i])\n\t\t\t}\n\t\t\ti++\n\t\t}\n\n\t\t// The remaining blocks can be combined and we know that they do not overlap and\n\t\t// so we can just append each, sort and re-encode.\n\t\tfor i < len(k.blocks) && len(k.mergedBooleanValues) < k.size {\n\t\t\tif k.blocks[i].read() {\n\t\t\t\ti++\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tv, err := DecodeBooleanBlock(k.blocks[i].b, &[]BooleanValue{})\n\t\t\tif err != nil {\n\t\t\t\tk.err = err\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// Apply each tombstone to the block\n\t\t\tfor _, ts := range k.blocks[i].tombstones {\n\t\t\t\tv = BooleanValues(v).Exclude(ts.Min, ts.Max)\n\t\t\t}\n\n\t\t\tk.blocks[i].markRead(k.blocks[i].minTime, k.blocks[i].maxTime)\n\n\t\t\tk.mergedBooleanValues = k.mergedBooleanValues.Merge(v)\n\t\t\ti++\n\t\t\t// Allow other goroutines to run\n\t\t\truntime.Gosched()\n\t\t}\n\n\t\tk.blocks = k.blocks[i:]\n\n\t\treturn k.chunkBoolean(chunked)\n\t}\n}\n\nfunc (k *tsmKeyIterator) chunkBoolean(dst blocks) blocks {\n\tif len(k.mergedBooleanValues) > k.size {\n\t\tvalues := k.mergedBooleanValues[:k.size]\n\t\tcb, err := BooleanValues(values).Encode(nil)\n\t\tif err != nil {\n\t\t\tk.err = err\n\t\t\treturn nil\n\t\t}\n\n\t\tdst = append(dst, &block{\n\t\t\tminTime: values[0].UnixNano(),\n\t\t\tmaxTime: values[len(values)-1].UnixNano(),\n\t\t\tkey:     k.key,\n\t\t\tb:       cb,\n\t\t})\n\t\tk.mergedBooleanValues = k.mergedBooleanValues[k.size:]\n\t\treturn dst\n\t}\n\n\t// Re-encode the remaining values into the last block\n\tif len(k.mergedBooleanValues) > 0 {\n\t\tcb, err := BooleanValues(k.mergedBooleanValues).Encode(nil)\n\t\tif err != nil {\n\t\t\tk.err = err\n\t\t\treturn nil\n\t\t}\n\n\t\tdst = append(dst, &block{\n\t\t\tminTime: k.mergedBooleanValues[0].UnixNano(),\n\t\t\tmaxTime: k.mergedBooleanValues[len(k.mergedBooleanValues)-1].UnixNano(),\n\t\t\tkey:     k.key,\n\t\t\tb:       cb,\n\t\t})\n\t\tk.mergedBooleanValues = k.mergedBooleanValues[:0]\n\t}\n\treturn dst\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go.tmpl",
    "content": "package tsm1\n\nimport (\n\t\"runtime\"\n)\n\n{{range .}}\n\n// merge combines the next set of blocks into merged blocks.\nfunc (k *tsmKeyIterator) merge{{.Name}}() {\n\t// No blocks left, or pending merged values, we're done\n\tif len(k.blocks) == 0 && len(k.merged) == 0 && len(k.merged{{.Name}}Values) == 0 {\n\t\treturn\n\t}\n\n\tdedup := len(k.merged{{.Name}}Values) != 0\n\tif len(k.blocks) > 0 && !dedup {\n\t\t// If we have more than one block or any partially tombstoned blocks, we many need to dedup\n\t\tdedup = len(k.blocks[0].tombstones) > 0 || k.blocks[0].partiallyRead()\n\n\t\t// Quickly scan each block to see if any overlap with the prior block, if they overlap then\n\t\t// we need to dedup as there may be duplicate points now\n\t\tfor i := 1; !dedup && i < len(k.blocks); i++ {\n\t\t\tif k.blocks[i].partiallyRead() {\n\t\t\t\tdedup = true\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif k.blocks[i].minTime <= k.blocks[i-1].maxTime || len(k.blocks[i].tombstones) > 0 {\n\t\t\t\tdedup = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t}\n\n\tk.merged = k.combine{{.Name}}(dedup)\n}\n\n// combine returns a new set of blocks using the current blocks in the buffers.  If dedup\n// is true, all the blocks will be decoded, dedup and sorted in in order.  If dedup is false,\n// only blocks that are smaller than the chunk size will be decoded and combined.\nfunc (k *tsmKeyIterator) combine{{.Name}}(dedup bool) blocks {\n\tif dedup {\n\t\tfor len(k.merged{{.Name}}Values) < k.size && len(k.blocks) > 0 {\n\t\t\tfor len(k.blocks) > 0 && k.blocks[0].read() {\n\t\t\t\tk.blocks = k.blocks[1:]\n\t\t\t}\n\n\t\t\tif len(k.blocks) == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfirst := k.blocks[0]\n\t\t\tminTime := first.minTime\n\t\t\tmaxTime := first.maxTime\n\n\t\t\t// Adjust the min time to the start of any overlapping blocks.\n\t\t\tfor i := 0; i < len(k.blocks); i++ {\n\t\t\t\tif k.blocks[i].overlapsTimeRange(minTime, maxTime) && !k.blocks[i].read() {\n\t\t\t\t\tif k.blocks[i].minTime < minTime {\n\t\t\t\t\t\tminTime = k.blocks[i].minTime\n\t\t\t\t\t}\n\t\t\t\t\tif k.blocks[i].maxTime > minTime && k.blocks[i].maxTime < maxTime {\n                        maxTime = k.blocks[i].maxTime\n                    }\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// We have some overlapping blocks so decode all, append in order and then dedup\n\t\t\tfor i := 0; i < len(k.blocks); i++ {\n\t\t\t\tif !k.blocks[i].overlapsTimeRange(minTime, maxTime) || k.blocks[i].read() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tv, err := Decode{{.Name}}Block(k.blocks[i].b, &[]{{.Name}}Value{})\n\t\t\t\tif err != nil {\n\t\t\t\t\tk.err = err\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\t// Remove values we already read\n\t\t\t\tv = {{.Name}}Values(v).Exclude(k.blocks[i].readMin, k.blocks[i].readMax)\n\n\t\t\t\t// Filter out only the values for overlapping block\n\t\t\t\tv = {{.Name}}Values(v).Include(minTime, maxTime)\n\t\t\t\tif len(v) > 0 {\n\t\t\t\t\t// Record that we read a subset of the block\n\t\t\t\t\tk.blocks[i].markRead(v[0].UnixNano(), v[len(v)-1].UnixNano())\n\t\t\t\t}\n\n\t\t\t\t// Apply each tombstone to the block\n\t\t\t\tfor _, ts := range k.blocks[i].tombstones {\n\t\t\t\t\tv = {{.Name}}Values(v).Exclude(ts.Min, ts.Max)\n\t\t\t\t}\n\n\t\t\t\tk.merged{{.Name}}Values = k.merged{{.Name}}Values.Merge(v)\n\n\t\t\t\t// Allow other goroutines to run\n\t\t\t\truntime.Gosched()\n\n\t\t\t}\n\t\t}\n\n\t\t// Since we combined multiple blocks, we could have more values than we should put into\n\t\t// a single block.  We need to chunk them up into groups and re-encode them.\n\t\treturn k.chunk{{.Name}}(nil)\n\t} else {\n\t\tvar chunked blocks\n\t\tvar i int\n\n\t\tfor i < len(k.blocks) {\n\n\t\t\t// skip this block if it's values were already read\n\t\t\tif k.blocks[i].read() {\n\t\t\t\ti++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// If we this block is already full, just add it as is\n\t\t\tif BlockCount(k.blocks[i].b) >= k.size {\n\t\t\t\tchunked = append(chunked, k.blocks[i])\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ti++\n\t\t\t// Allow other goroutines to run\n\t\t\truntime.Gosched()\n\t\t}\n\n\t\tif k.fast {\n\t\t\tfor i < len(k.blocks) {\n\t\t\t\t// skip this block if it's values were already read\n\t\t\t\tif k.blocks[i].read() {\n\t\t\t\t\ti++\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tchunked = append(chunked, k.blocks[i])\n\t\t\t\ti++\n\t\t\t\t// Allow other goroutines to run\n\t\t\t\truntime.Gosched()\n\t\t\t}\n\t\t}\n\n\t\t// If we only have 1 blocks left, just append it as is and avoid decoding/recoding\n\t\tif i == len(k.blocks)-1 {\n\t\t\tif !k.blocks[i].read() {\n\t\t\t\tchunked = append(chunked, k.blocks[i])\n\t\t\t}\n\t\t\ti++\n\t\t}\n\n\t\t// The remaining blocks can be combined and we know that they do not overlap and\n\t\t// so we can just append each, sort and re-encode.\n\t\tfor i < len(k.blocks) && len(k.merged{{.Name}}Values) < k.size {\n\t\t\tif k.blocks[i].read() {\n\t\t\t\ti++\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tv, err := Decode{{.Name}}Block(k.blocks[i].b, &[]{{.Name}}Value{})\n\t\t\tif err != nil {\n\t\t\t\tk.err = err\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// Apply each tombstone to the block\n\t\t\tfor _, ts := range k.blocks[i].tombstones {\n\t\t\t\tv = {{.Name}}Values(v).Exclude(ts.Min, ts.Max)\n\t\t\t}\n\n\t\t\tk.blocks[i].markRead(k.blocks[i].minTime, k.blocks[i].maxTime)\n\n\t\t\tk.merged{{.Name}}Values = k.merged{{.Name}}Values.Merge(v)\n\t\t\ti++\n\t\t\t// Allow other goroutines to run\n\t\t\truntime.Gosched()\n\t\t}\n\n\t\tk.blocks = k.blocks[i:]\n\n\t\treturn k.chunk{{.Name}}(chunked)\n\t}\n}\n\nfunc (k *tsmKeyIterator) chunk{{.Name}}(dst blocks) blocks {\n\tif len(k.merged{{.Name}}Values) > k.size {\n\t\tvalues := k.merged{{.Name}}Values[:k.size]\n\t\tcb, err := {{.Name}}Values(values).Encode(nil)\n\t\tif err != nil {\n\t\t\tk.err = err\n\t\t\treturn nil\n\t\t}\n\n\t\tdst = append(dst, &block{\n\t\t\tminTime: values[0].UnixNano(),\n\t\t\tmaxTime: values[len(values)-1].UnixNano(),\n\t\t\tkey:     k.key,\n\t\t\tb:       cb,\n\t\t})\n\t\tk.merged{{.Name}}Values = k.merged{{.Name}}Values[k.size:]\n\t\treturn dst\n\t}\n\n\t// Re-encode the remaining values into the last block\n\tif len(k.merged{{.Name}}Values) > 0 {\n\t\tcb, err := {{.Name}}Values(k.merged{{.Name}}Values).Encode(nil)\n\t\tif err != nil {\n\t\t\tk.err = err\n\t\t\treturn nil\n\t\t}\n\n\t\tdst = append(dst, &block{\n\t\t\tminTime: k.merged{{.Name}}Values[0].UnixNano(),\n\t\t\tmaxTime: k.merged{{.Name}}Values[len(k.merged{{.Name}}Values)-1].UnixNano(),\n\t\t\tkey:     k.key,\n\t\t\tb:       cb,\n\t\t})\n\t\tk.merged{{.Name}}Values = k.merged{{.Name}}Values[:0]\n\t}\n\treturn dst\n}\n\n{{ end }}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go.tmpldata",
    "content": "[\n\t{\n\t\t\"Name\":\"Float\",\n\t\t\"name\":\"float\"\n\t},\n\t{\n\t\t\"Name\":\"Integer\",\n\t\t\"name\":\"integer\"\n\t},\n\t{\n\t\t\"Name\":\"String\",\n\t\t\"name\":\"string\"\n\t},\n\t{\n\t\t\"Name\":\"Boolean\",\n\t\t\"name\":\"boolean\"\n\t}\n]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.go",
    "content": "package tsm1\n\n// Compactions are the process of creating read-optimized TSM files.\n// The files are created by converting write-optimized WAL entries\n// to read-optimized TSM format.  They can also be created from existing\n// TSM files when there are tombstone records that neeed to be removed, points\n// that were overwritten by later writes and need to updated, or multiple\n// smaller TSM files need to be merged to reduce file counts and improve\n// compression ratios.\n//\n// The compaction process is stream-oriented using multiple readers and\n// iterators.  The resulting stream is written sorted and chunked to allow for\n// one-pass writing of a new TSM file.\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"sort\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/tsdb\"\n)\n\nconst maxTSMFileSize = uint32(2048 * 1024 * 1024) // 2GB\n\nconst (\n\t// CompactionTempExtension is the extension used for temporary files created during compaction.\n\tCompactionTempExtension = \"tmp\"\n\n\t// TSMFileExtension is the extension used for TSM files.\n\tTSMFileExtension = \"tsm\"\n)\n\nvar (\n\terrMaxFileExceeded     = fmt.Errorf(\"max file exceeded\")\n\terrSnapshotsDisabled   = fmt.Errorf(\"snapshots disabled\")\n\terrCompactionsDisabled = fmt.Errorf(\"compactions disabled\")\n\terrCompactionAborted   = fmt.Errorf(\"compaction aborted\")\n)\n\ntype errCompactionInProgress struct {\n\terr error\n}\n\n// Error returns the string representation of the error, to satisfy the error interface.\nfunc (e errCompactionInProgress) Error() string {\n\tif e.err != nil {\n\t\treturn fmt.Sprintf(\"compaction in progress: %s\", e.err)\n\t}\n\treturn \"compaction in progress\"\n}\n\n// CompactionGroup represents a list of files eligible to be compacted together.\ntype CompactionGroup []string\n\n// CompactionPlanner determines what TSM files and WAL segments to include in a\n// given compaction run.\ntype CompactionPlanner interface {\n\tPlan(lastWrite time.Time) []CompactionGroup\n\tPlanLevel(level int) []CompactionGroup\n\tPlanOptimize() []CompactionGroup\n\tRelease(group []CompactionGroup)\n\tFullyCompacted() bool\n}\n\n// DefaultPlanner implements CompactionPlanner using a strategy to roll up\n// multiple generations of TSM files into larger files in stages.  It attempts\n// to minimize the number of TSM files on disk while rolling up a bounder number\n// of files.\ntype DefaultPlanner struct {\n\tFileStore fileStore\n\n\t// compactFullWriteColdDuration specifies the length of time after\n\t// which if no writes have been committed to the WAL, the engine will\n\t// do a full compaction of the TSM files in this shard. This duration\n\t// should always be greater than the CacheFlushWriteColdDuraion\n\tcompactFullWriteColdDuration time.Duration\n\n\t// lastPlanCheck is the last time Plan was called\n\tlastPlanCheck time.Time\n\n\tmu sync.RWMutex\n\t// lastFindGenerations is the last time findGenerations was run\n\tlastFindGenerations time.Time\n\n\t// lastGenerations is the last set of generations found by findGenerations\n\tlastGenerations tsmGenerations\n\n\t// filesInUse is the set of files that have been returned as part of a plan and might\n\t// be being compacted.  Two plans should not return the same file at any given time.\n\tfilesInUse map[string]struct{}\n}\n\ntype fileStore interface {\n\tStats() []FileStat\n\tLastModified() time.Time\n\tBlockCount(path string, idx int) int\n}\n\nfunc NewDefaultPlanner(fs fileStore, writeColdDuration time.Duration) *DefaultPlanner {\n\treturn &DefaultPlanner{\n\t\tFileStore:                    fs,\n\t\tcompactFullWriteColdDuration: writeColdDuration,\n\t\tfilesInUse:                   make(map[string]struct{}),\n\t}\n}\n\n// tsmGeneration represents the TSM files within a generation.\n// 000001-01.tsm, 000001-02.tsm would be in the same generation\n// 000001 each with different sequence numbers.\ntype tsmGeneration struct {\n\tid    int\n\tfiles []FileStat\n}\n\n// size returns the total size of the files in the generation.\nfunc (t *tsmGeneration) size() uint64 {\n\tvar n uint64\n\tfor _, f := range t.files {\n\t\tn += uint64(f.Size)\n\t}\n\treturn n\n}\n\n// compactionLevel returns the level of the files in this generation.\nfunc (t *tsmGeneration) level() int {\n\t// Level 0 is always created from the result of a cache compaction.  It generates\n\t// 1 file with a sequence num of 1.  Level 2 is generated by compacting multiple\n\t// level 1 files.  Level 3 is generate by compacting multiple level 2 files.  Level\n\t// 4 is for anything else.\n\t_, seq, _ := ParseTSMFileName(t.files[len(t.files)-1].Path)\n\tif seq < 4 {\n\t\treturn seq\n\t}\n\n\treturn 4\n}\n\n// count returns the number of files in the generation.\nfunc (t *tsmGeneration) count() int {\n\treturn len(t.files)\n}\n\n// hasTombstones returns true if there are keys removed for any of the files.\nfunc (t *tsmGeneration) hasTombstones() bool {\n\tfor _, f := range t.files {\n\t\tif f.HasTombstone {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// FullyCompacted returns true if the shard is fully compacted.\nfunc (c *DefaultPlanner) FullyCompacted() bool {\n\tgens := c.findGenerations()\n\treturn len(gens) <= 1 && !gens.hasTombstones()\n}\n\n// PlanLevel returns a set of TSM files to rewrite for a specific level.\nfunc (c *DefaultPlanner) PlanLevel(level int) []CompactionGroup {\n\t// Determine the generations from all files on disk.  We need to treat\n\t// a generation conceptually as a single file even though it may be\n\t// split across several files in sequence.\n\tgenerations := c.findGenerations()\n\n\t// If there is only one generation and no tombstones, then there's nothing to\n\t// do.\n\tif len(generations) <= 1 && !generations.hasTombstones() {\n\t\treturn nil\n\t}\n\n\t// Group each generation by level such that two adjacent generations in the same\n\t// level become part of the same group.\n\tvar currentGen tsmGenerations\n\tvar groups []tsmGenerations\n\tfor i := 0; i < len(generations); i++ {\n\t\tcur := generations[i]\n\n\t\tif len(currentGen) == 0 || currentGen.level() == cur.level() {\n\t\t\tcurrentGen = append(currentGen, cur)\n\t\t\tcontinue\n\t\t}\n\t\tgroups = append(groups, currentGen)\n\n\t\tcurrentGen = tsmGenerations{}\n\t\tcurrentGen = append(currentGen, cur)\n\t}\n\n\tif len(currentGen) > 0 {\n\t\tgroups = append(groups, currentGen)\n\t}\n\n\t// Remove any groups in the wrong level\n\tvar levelGroups []tsmGenerations\n\tfor _, cur := range groups {\n\t\tif cur.level() == level {\n\t\t\tlevelGroups = append(levelGroups, cur)\n\t\t}\n\t}\n\n\t// Determine the minimum number of files required for the level.  Higher levels are more\n\t// CPU intensive so we only want to include them when we have enough data to make them\n\t// worthwhile.\n\t// minGenerations 1 -> 2\n\t// minGenerations 2 -> 2\n\t// minGenerations 3 -> 4\n\t// minGenerations 4 -> 4\n\tminGenerations := level\n\tif minGenerations%2 != 0 {\n\t\tminGenerations = level + 1\n\t}\n\n\tvar cGroups []CompactionGroup\n\tfor _, group := range levelGroups {\n\t\tfor _, chunk := range group.chunk(4) {\n\t\t\tvar cGroup CompactionGroup\n\t\t\tvar hasTombstones bool\n\t\t\tfor _, gen := range chunk {\n\t\t\t\tif gen.hasTombstones() {\n\t\t\t\t\thasTombstones = true\n\t\t\t\t}\n\t\t\t\tfor _, file := range gen.files {\n\t\t\t\t\tcGroup = append(cGroup, file.Path)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(chunk) < minGenerations && !hasTombstones {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcGroups = append(cGroups, cGroup)\n\t\t}\n\t}\n\n\tif !c.acquire(cGroups) {\n\t\treturn nil\n\t}\n\n\treturn cGroups\n}\n\n// PlanOptimize returns all TSM files if they are in different generations in order\n// to optimize the index across TSM files.  Each returned compaction group can be\n// compacted concurrently.\nfunc (c *DefaultPlanner) PlanOptimize() []CompactionGroup {\n\t// Determine the generations from all files on disk.  We need to treat\n\t// a generation conceptually as a single file even though it may be\n\t// split across several files in sequence.\n\tgenerations := c.findGenerations()\n\n\t// If there is only one generation and no tombstones, then there's nothing to\n\t// do.\n\tif len(generations) <= 1 && !generations.hasTombstones() {\n\t\treturn nil\n\t}\n\n\t// Group each generation by level such that two adjacent generations in the same\n\t// level become part of the same group.\n\tvar currentGen tsmGenerations\n\tvar groups []tsmGenerations\n\tfor i := 0; i < len(generations); i++ {\n\t\tcur := generations[i]\n\n\t\tif len(currentGen) == 0 || currentGen.level() == cur.level() {\n\t\t\tcurrentGen = append(currentGen, cur)\n\t\t\tcontinue\n\t\t}\n\t\tgroups = append(groups, currentGen)\n\n\t\tcurrentGen = tsmGenerations{}\n\t\tcurrentGen = append(currentGen, cur)\n\t}\n\n\tif len(currentGen) > 0 {\n\t\tgroups = append(groups, currentGen)\n\t}\n\n\t// Only optimize level 4 files since using lower-levels will collide\n\t// with the level planners\n\tvar levelGroups []tsmGenerations\n\tfor _, cur := range groups {\n\t\tif cur.level() == 4 {\n\t\t\tlevelGroups = append(levelGroups, cur)\n\t\t}\n\t}\n\n\tvar cGroups []CompactionGroup\n\tfor _, group := range levelGroups {\n\t\t// Skip the group if it's not worthwhile to optimize it\n\t\tif len(group) < 4 && !group.hasTombstones() {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar cGroup CompactionGroup\n\t\tfor _, gen := range group {\n\t\t\tfor _, file := range gen.files {\n\t\t\t\tcGroup = append(cGroup, file.Path)\n\t\t\t}\n\t\t}\n\n\t\tcGroups = append(cGroups, cGroup)\n\t}\n\n\tif !c.acquire(cGroups) {\n\t\treturn nil\n\t}\n\n\treturn cGroups\n}\n\n// Plan returns a set of TSM files to rewrite for level 4 or higher.  The planning returns\n// multiple groups if possible to allow compactions to run concurrently.\nfunc (c *DefaultPlanner) Plan(lastWrite time.Time) []CompactionGroup {\n\tgenerations := c.findGenerations()\n\n\t// first check if we should be doing a full compaction because nothing has been written in a long time\n\tif c.compactFullWriteColdDuration > 0 && time.Since(lastWrite) > c.compactFullWriteColdDuration && len(generations) > 1 {\n\t\tvar tsmFiles []string\n\t\tvar genCount int\n\t\tfor i, group := range generations {\n\t\t\tvar skip bool\n\n\t\t\t// Skip the file if it's over the max size and contains a full block and it does not have any tombstones\n\t\t\tif len(generations) > 2 && group.size() > uint64(maxTSMFileSize) && c.FileStore.BlockCount(group.files[0].Path, 1) == tsdb.DefaultMaxPointsPerBlock && !group.hasTombstones() {\n\t\t\t\tskip = true\n\t\t\t}\n\n\t\t\t// We need to look at the level of the next file because it may need to be combined with this generation\n\t\t\t// but won't get picked up on it's own if this generation is skipped.  This allows the most recently\n\t\t\t// created files to get picked up by the full compaction planner and avoids having a few less optimally\n\t\t\t// compressed files.\n\t\t\tif i < len(generations)-1 {\n\t\t\t\tif generations[i+1].level() <= 3 {\n\t\t\t\t\tskip = false\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif skip {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, f := range group.files {\n\t\t\t\ttsmFiles = append(tsmFiles, f.Path)\n\t\t\t}\n\t\t\tgenCount += 1\n\t\t}\n\t\tsort.Strings(tsmFiles)\n\n\t\t// Make sure we have more than 1 file and more than 1 generation\n\t\tif len(tsmFiles) <= 1 || genCount <= 1 {\n\t\t\treturn nil\n\t\t}\n\n\t\tgroup := []CompactionGroup{tsmFiles}\n\t\tif !c.acquire(group) {\n\t\t\treturn nil\n\t\t}\n\t\treturn group\n\t}\n\n\t// don't plan if nothing has changed in the filestore\n\tif c.lastPlanCheck.After(c.FileStore.LastModified()) && !generations.hasTombstones() {\n\t\treturn nil\n\t}\n\n\tc.lastPlanCheck = time.Now()\n\n\t// If there is only one generation, return early to avoid re-compacting the same file\n\t// over and over again.\n\tif len(generations) <= 1 && !generations.hasTombstones() {\n\t\treturn nil\n\t}\n\n\t// Need to find the ending point for level 4 files.  They will be the oldest files. We scan\n\t// each generation in descending break once we see a file less than 4.\n\tend := 0\n\tstart := 0\n\tfor i, g := range generations {\n\t\tif g.level() <= 3 {\n\t\t\tbreak\n\t\t}\n\t\tend = i + 1\n\t}\n\n\t// As compactions run, the oldest files get bigger.  We don't want to re-compact them during\n\t// this planning if they are maxed out so skip over any we see.\n\tvar hasTombstones bool\n\tfor i, g := range generations[:end] {\n\t\tif g.hasTombstones() {\n\t\t\thasTombstones = true\n\t\t}\n\n\t\tif hasTombstones {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Skip the file if it's over the max size and contains a full block or the generation is split\n\t\t// over multiple files.  In the latter case, that would mean the data in the file spilled over\n\t\t// the 2GB limit.\n\t\tif g.size() > uint64(maxTSMFileSize) && c.FileStore.BlockCount(g.files[0].Path, 1) == tsdb.DefaultMaxPointsPerBlock || g.count() > 1 {\n\t\t\tstart = i + 1\n\t\t}\n\n\t\t// This is an edge case that can happen after multiple compactions run.  The files at the beginning\n\t\t// can become larger faster than ones after them.  We want to skip those really big ones and just\n\t\t// compact the smaller ones until they are closer in size.\n\t\tif i > 0 {\n\t\t\tif g.size()*2 < generations[i-1].size() {\n\t\t\t\tstart = i\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// step is how may files to compact in a group.  We want to clamp it at 4 but also stil\n\t// return groups smaller than 4.\n\tstep := 4\n\tif step > end {\n\t\tstep = end\n\t}\n\n\t// slice off the generations that we'll examine\n\tgenerations = generations[start:end]\n\n\t// Loop through the generations in groups of size step and see if we can compact all (or\n\t// some of them as group)\n\tgroups := []tsmGenerations{}\n\tfor i := 0; i < len(generations); i += step {\n\t\tvar skipGroup bool\n\t\tstartIndex := i\n\n\t\tfor j := i; j < i+step && j < len(generations); j++ {\n\t\t\tgen := generations[j]\n\t\t\tlvl := gen.level()\n\n\t\t\t// Skip compacting this group if there happens to be any lower level files in the\n\t\t\t// middle.  These will get picked up by the level compactors.\n\t\t\tif lvl <= 3 {\n\t\t\t\tskipGroup = true\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// Skip the file if it's over the max size and it contains a full block\n\t\t\tif gen.size() >= uint64(maxTSMFileSize) && c.FileStore.BlockCount(gen.files[0].Path, 1) == tsdb.DefaultMaxPointsPerBlock && !gen.hasTombstones() {\n\t\t\t\tstartIndex++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif skipGroup {\n\t\t\tcontinue\n\t\t}\n\n\t\tendIndex := i + step\n\t\tif endIndex > len(generations) {\n\t\t\tendIndex = len(generations)\n\t\t}\n\t\tif endIndex-startIndex > 0 {\n\t\t\tgroups = append(groups, generations[startIndex:endIndex])\n\t\t}\n\t}\n\n\tif len(groups) == 0 {\n\t\treturn nil\n\t}\n\n\t// With the groups, we need to evaluate whether the group as a whole can be compacted\n\tcompactable := []tsmGenerations{}\n\tfor _, group := range groups {\n\t\t//if we don't have enough generations to compact, skip it\n\t\tif len(group) < 2 && !group.hasTombstones() {\n\t\t\tcontinue\n\t\t}\n\t\tcompactable = append(compactable, group)\n\t}\n\n\t// All the files to be compacted must be compacted in order.  We need to convert each\n\t// group to the actual set of files in that group to be compacted.\n\tvar tsmFiles []CompactionGroup\n\tfor _, c := range compactable {\n\t\tvar cGroup CompactionGroup\n\t\tfor _, group := range c {\n\t\t\tfor _, f := range group.files {\n\t\t\t\tcGroup = append(cGroup, f.Path)\n\t\t\t}\n\t\t}\n\t\tsort.Strings(cGroup)\n\t\ttsmFiles = append(tsmFiles, cGroup)\n\t}\n\n\tif !c.acquire(tsmFiles) {\n\t\treturn nil\n\t}\n\treturn tsmFiles\n}\n\n// findGenerations groups all the TSM files by generation based\n// on their filename, then returns the generations in descending order (newest first).\nfunc (c *DefaultPlanner) findGenerations() tsmGenerations {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tlast := c.lastFindGenerations\n\tlastGen := c.lastGenerations\n\n\tif !last.IsZero() && c.FileStore.LastModified().Equal(last) {\n\t\treturn lastGen\n\t}\n\n\tgenTime := c.FileStore.LastModified()\n\ttsmStats := c.FileStore.Stats()\n\tgenerations := make(map[int]*tsmGeneration, len(tsmStats))\n\tfor _, f := range tsmStats {\n\t\tgen, _, _ := ParseTSMFileName(f.Path)\n\n\t\tgroup := generations[gen]\n\t\tif group == nil {\n\t\t\tgroup = &tsmGeneration{\n\t\t\t\tid: gen,\n\t\t\t}\n\t\t\tgenerations[gen] = group\n\t\t}\n\t\tgroup.files = append(group.files, f)\n\t}\n\n\torderedGenerations := make(tsmGenerations, 0, len(generations))\n\tfor _, g := range generations {\n\t\torderedGenerations = append(orderedGenerations, g)\n\t}\n\tif !orderedGenerations.IsSorted() {\n\t\tsort.Sort(orderedGenerations)\n\t}\n\n\tc.lastFindGenerations = genTime\n\tc.lastGenerations = orderedGenerations\n\n\treturn orderedGenerations\n}\n\nfunc (c *DefaultPlanner) acquire(groups []CompactionGroup) bool {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\t// See if the new files are already in use\n\tfor _, g := range groups {\n\t\tfor _, f := range g {\n\t\t\tif _, ok := c.filesInUse[f]; ok {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\n\t// Mark all the new files in use\n\tfor _, g := range groups {\n\t\tfor _, f := range g {\n\t\t\tc.filesInUse[f] = struct{}{}\n\t\t}\n\t}\n\treturn true\n}\n\n// Release removes the files reference in each compaction group allowing new plans\n// to be able to use them.\nfunc (c *DefaultPlanner) Release(groups []CompactionGroup) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tfor _, g := range groups {\n\t\tfor _, f := range g {\n\t\t\tdelete(c.filesInUse, f)\n\t\t}\n\t}\n}\n\n// Compactor merges multiple TSM files into new files or\n// writes a Cache into 1 or more TSM files.\ntype Compactor struct {\n\tDir  string\n\tSize int\n\n\tFileStore interface {\n\t\tNextGeneration() int\n\t}\n\n\tmu                 sync.RWMutex\n\tsnapshotsEnabled   bool\n\tcompactionsEnabled bool\n\n\t// The channel to signal that any in progress snapshots should be aborted.\n\tsnapshotsInterrupt chan struct{}\n\t// The channel to signal that any in progress level compactions should be aborted.\n\tcompactionsInterrupt chan struct{}\n\n\tfiles map[string]struct{}\n}\n\n// Open initializes the Compactor.\nfunc (c *Compactor) Open() {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tif c.snapshotsEnabled || c.compactionsEnabled {\n\t\treturn\n\t}\n\n\tc.snapshotsEnabled = true\n\tc.compactionsEnabled = true\n\tc.snapshotsInterrupt = make(chan struct{})\n\tc.compactionsInterrupt = make(chan struct{})\n\n\tc.files = make(map[string]struct{})\n}\n\n// Close disables the Compactor.\nfunc (c *Compactor) Close() {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tif !(c.snapshotsEnabled || c.compactionsEnabled) {\n\t\treturn\n\t}\n\tc.snapshotsEnabled = false\n\tc.compactionsEnabled = false\n\tif c.compactionsInterrupt != nil {\n\t\tclose(c.compactionsInterrupt)\n\t}\n\tif c.snapshotsInterrupt != nil {\n\t\tclose(c.snapshotsInterrupt)\n\t}\n}\n\n// DisableSnapshots disables the compactor from performing snapshots.\nfunc (c *Compactor) DisableSnapshots() {\n\tc.mu.Lock()\n\tc.snapshotsEnabled = false\n\tif c.snapshotsInterrupt != nil {\n\t\tclose(c.snapshotsInterrupt)\n\t\tc.snapshotsInterrupt = nil\n\t}\n\tc.mu.Unlock()\n}\n\n// EnableSnapshots allows the compactor to perform snapshots.\nfunc (c *Compactor) EnableSnapshots() {\n\tc.mu.Lock()\n\tc.snapshotsEnabled = true\n\tif c.snapshotsInterrupt == nil {\n\t\tc.snapshotsInterrupt = make(chan struct{})\n\t}\n\tc.mu.Unlock()\n}\n\n// DisableSnapshots disables the compactor from performing compactions.\nfunc (c *Compactor) DisableCompactions() {\n\tc.mu.Lock()\n\tc.compactionsEnabled = false\n\tif c.compactionsInterrupt != nil {\n\t\tclose(c.compactionsInterrupt)\n\t\tc.compactionsInterrupt = nil\n\t}\n\tc.mu.Unlock()\n}\n\n// EnableCompactions allows the compactor to perform compactions.\nfunc (c *Compactor) EnableCompactions() {\n\tc.mu.Lock()\n\tc.compactionsEnabled = true\n\tif c.compactionsInterrupt == nil {\n\t\tc.compactionsInterrupt = make(chan struct{})\n\t}\n\tc.mu.Unlock()\n}\n\n// WriteSnapshot writes a Cache snapshot to one or more new TSM files.\nfunc (c *Compactor) WriteSnapshot(cache *Cache) ([]string, error) {\n\tc.mu.RLock()\n\tenabled := c.snapshotsEnabled\n\tintC := c.snapshotsInterrupt\n\tc.mu.RUnlock()\n\n\tif !enabled {\n\t\treturn nil, errSnapshotsDisabled\n\t}\n\n\titer := NewCacheKeyIterator(cache, tsdb.DefaultMaxPointsPerBlock, intC)\n\tfiles, err := c.writeNewFiles(c.FileStore.NextGeneration(), 0, iter)\n\n\t// See if we were disabled while writing a snapshot\n\tc.mu.RLock()\n\tenabled = c.snapshotsEnabled\n\tc.mu.RUnlock()\n\n\tif !enabled {\n\t\treturn nil, errSnapshotsDisabled\n\t}\n\n\treturn files, err\n}\n\n// compact writes multiple smaller TSM files into 1 or more larger files.\nfunc (c *Compactor) compact(fast bool, tsmFiles []string) ([]string, error) {\n\tsize := c.Size\n\tif size <= 0 {\n\t\tsize = tsdb.DefaultMaxPointsPerBlock\n\t}\n\t// The new compacted files need to added to the max generation in the\n\t// set.  We need to find that max generation as well as the max sequence\n\t// number to ensure we write to the next unique location.\n\tvar maxGeneration, maxSequence int\n\tfor _, f := range tsmFiles {\n\t\tgen, seq, err := ParseTSMFileName(f)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif gen > maxGeneration {\n\t\t\tmaxGeneration = gen\n\t\t\tmaxSequence = seq\n\t\t}\n\n\t\tif gen == maxGeneration && seq > maxSequence {\n\t\t\tmaxSequence = seq\n\t\t}\n\t}\n\n\t// For each TSM file, create a TSM reader\n\tvar trs []*TSMReader\n\tfor _, file := range tsmFiles {\n\t\tf, err := os.Open(file)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttr, err := NewTSMReader(f)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer tr.Close()\n\t\ttrs = append(trs, tr)\n\t}\n\n\tif len(trs) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tc.mu.RLock()\n\tintC := c.compactionsInterrupt\n\tc.mu.RUnlock()\n\n\ttsm, err := NewTSMKeyIterator(size, fast, intC, trs...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c.writeNewFiles(maxGeneration, maxSequence, tsm)\n}\n\n// CompactFull writes multiple smaller TSM files into 1 or more larger files.\nfunc (c *Compactor) CompactFull(tsmFiles []string) ([]string, error) {\n\tc.mu.RLock()\n\tenabled := c.compactionsEnabled\n\tc.mu.RUnlock()\n\n\tif !enabled {\n\t\treturn nil, errCompactionsDisabled\n\t}\n\n\tif !c.add(tsmFiles) {\n\t\treturn nil, errCompactionInProgress{}\n\t}\n\tdefer c.remove(tsmFiles)\n\n\tfiles, err := c.compact(false, tsmFiles)\n\n\t// See if we were disabled while writing a snapshot\n\tc.mu.RLock()\n\tenabled = c.compactionsEnabled\n\tc.mu.RUnlock()\n\n\tif !enabled {\n\t\tif err := c.removeTmpFiles(files); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errCompactionsDisabled\n\t}\n\n\treturn files, err\n}\n\n// CompactFast writes multiple smaller TSM files into 1 or more larger files.\nfunc (c *Compactor) CompactFast(tsmFiles []string) ([]string, error) {\n\tc.mu.RLock()\n\tenabled := c.compactionsEnabled\n\tc.mu.RUnlock()\n\n\tif !enabled {\n\t\treturn nil, errCompactionsDisabled\n\t}\n\n\tif !c.add(tsmFiles) {\n\t\treturn nil, errCompactionInProgress{}\n\t}\n\tdefer c.remove(tsmFiles)\n\n\tfiles, err := c.compact(true, tsmFiles)\n\n\t// See if we were disabled while writing a snapshot\n\tc.mu.RLock()\n\tenabled = c.compactionsEnabled\n\tc.mu.RUnlock()\n\n\tif !enabled {\n\t\tif err := c.removeTmpFiles(files); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errCompactionsDisabled\n\t}\n\n\treturn files, err\n\n}\n\n// removeTmpFiles is responsible for cleaning up a compaction that\n// was started, but then abandoned before the temporary files were dealt with.\nfunc (c *Compactor) removeTmpFiles(files []string) error {\n\tfor _, f := range files {\n\t\tif err := os.Remove(f); err != nil {\n\t\t\treturn fmt.Errorf(\"error removing temp compaction file: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\n// writeNewFiles writes from the iterator into new TSM files, rotating\n// to a new file once it has reached the max TSM file size.\nfunc (c *Compactor) writeNewFiles(generation, sequence int, iter KeyIterator) ([]string, error) {\n\t// These are the new TSM files written\n\tvar files []string\n\n\tfor {\n\t\tsequence++\n\t\t// New TSM files are written to a temp file and renamed when fully completed.\n\t\tfileName := filepath.Join(c.Dir, fmt.Sprintf(\"%09d-%09d.%s.tmp\", generation, sequence, TSMFileExtension))\n\n\t\t// Write as much as possible to this file\n\t\terr := c.write(fileName, iter)\n\n\t\t// We've hit the max file limit and there is more to write.  Create a new file\n\t\t// and continue.\n\t\tif err == errMaxFileExceeded || err == ErrMaxBlocksExceeded {\n\t\t\tfiles = append(files, fileName)\n\t\t\tcontinue\n\t\t} else if err == ErrNoValues {\n\t\t\t// If the file only contained tombstoned entries, then it would be a 0 length\n\t\t\t// file that we can drop.\n\t\t\tif err := os.RemoveAll(fileName); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbreak\n\t\t} else if _, ok := err.(errCompactionInProgress); ok {\n\t\t\t// Don't clean up the file as another compaction is using it.  This should not happen as the\n\t\t\t// planner keeps track of which files are assigned to compaction plans now.\n\t\t\treturn nil, err\n\t\t} else if err != nil {\n\t\t\t// Remove any tmp files we already completed\n\t\t\tfor _, f := range files {\n\t\t\t\tif err := os.RemoveAll(f); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\t// We hit an error and didn't finish the compaction.  Remove the temp file and abort.\n\t\t\tif err := os.RemoveAll(fileName); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfiles = append(files, fileName)\n\t\tbreak\n\t}\n\n\treturn files, nil\n}\n\nfunc (c *Compactor) write(path string, iter KeyIterator) (err error) {\n\tfd, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_EXCL, 0666)\n\tif err != nil {\n\t\treturn errCompactionInProgress{err: err}\n\t}\n\n\t// Create the write for the new TSM file.\n\tw, err := NewTSMWriter(fd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tcloseErr := w.Close()\n\t\tif err == nil {\n\t\t\terr = closeErr\n\t\t}\n\t}()\n\n\tfor iter.Next() {\n\t\tc.mu.RLock()\n\t\tenabled := c.snapshotsEnabled || c.compactionsEnabled\n\t\tc.mu.RUnlock()\n\n\t\tif !enabled {\n\t\t\treturn errCompactionAborted\n\t\t}\n\t\t// Each call to read returns the next sorted key (or the prior one if there are\n\t\t// more values to write).  The size of values will be less than or equal to our\n\t\t// chunk size (1000)\n\t\tkey, minTime, maxTime, block, err := iter.Read()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Write the key and value\n\t\tif err := w.WriteBlock(key, minTime, maxTime, block); err == ErrMaxBlocksExceeded {\n\t\t\tif err := w.WriteIndex(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn err\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// If we have a max file size configured and we're over it, close out the file\n\t\t// and return the error.\n\t\tif w.Size() > maxTSMFileSize {\n\t\t\tif err := w.WriteIndex(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn errMaxFileExceeded\n\t\t}\n\t}\n\n\t// We're all done.  Close out the file.\n\tif err := w.WriteIndex(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *Compactor) add(files []string) bool {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\t// See if the new files are already in use\n\tfor _, f := range files {\n\t\tif _, ok := c.files[f]; ok {\n\t\t\treturn false\n\t\t}\n\t}\n\n\t// Mark all the new files in use\n\tfor _, f := range files {\n\t\tc.files[f] = struct{}{}\n\t}\n\treturn true\n}\n\nfunc (c *Compactor) remove(files []string) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tfor _, f := range files {\n\t\tdelete(c.files, f)\n\t}\n}\n\n// KeyIterator allows iteration over set of keys and values in sorted order.\ntype KeyIterator interface {\n\t// Next returns true if there are any values remaining in the iterator.\n\tNext() bool\n\n\t// Read returns the key, time range, and raw data for the next block,\n\t// or any error that occurred.\n\tRead() (key string, minTime int64, maxTime int64, data []byte, err error)\n\n\t// Close closes the iterator.\n\tClose() error\n}\n\n// tsmKeyIterator implements the KeyIterator for set of TSMReaders.  Iteration produces\n// keys in sorted order and the values between the keys sorted and deduped.  If any of\n// the readers have associated tombstone entries, they are returned as part of iteration.\ntype tsmKeyIterator struct {\n\t// readers is the set of readers it produce a sorted key run with\n\treaders []*TSMReader\n\n\t// values is the temporary buffers for each key that is returned by a reader\n\tvalues map[string][]Value\n\n\t// pos is the current key postion within the corresponding readers slice.  A value of\n\t// pos[0] = 1, means the reader[0] is currently at key 1 in its ordered index.\n\tpos []int\n\n\t// err is any error we received while iterating values.\n\terr error\n\n\t// indicates whether the iterator should choose a faster merging strategy over a more\n\t// optimally compressed one.  If fast is true, multiple blocks will just be added as is\n\t// and not combined.  In some cases, a slower path will need to be utilized even when\n\t// fast is true to prevent overlapping blocks of time for the same key.\n\t// If false, the blocks will be decoded and duplicated (if needed) and\n\t// then chunked into the maximally sized blocks.\n\tfast bool\n\n\t// size is the maximum number of values to encode in a single block\n\tsize int\n\n\t// key is the current key lowest key across all readers that has not be fully exhausted\n\t// of values.\n\tkey string\n\ttyp byte\n\n\titerators []*BlockIterator\n\tblocks    blocks\n\n\tbuf []blocks\n\n\t// mergeValues are decoded blocks that have been combined\n\tmergedFloatValues   FloatValues\n\tmergedIntegerValues IntegerValues\n\tmergedBooleanValues BooleanValues\n\tmergedStringValues  StringValues\n\n\t// merged are encoded blocks that have been combined or used as is\n\t// without decode\n\tmerged    blocks\n\tinterrupt chan struct{}\n}\n\ntype block struct {\n\tkey              string\n\tminTime, maxTime int64\n\ttyp              byte\n\tb                []byte\n\ttombstones       []TimeRange\n\n\t// readMin, readMax are the timestamps range of values have been\n\t// read and encoded from this block.\n\treadMin, readMax int64\n}\n\nfunc (b *block) overlapsTimeRange(min, max int64) bool {\n\treturn b.minTime <= max && b.maxTime >= min\n}\n\nfunc (b *block) read() bool {\n\treturn b.readMin <= b.minTime && b.readMax >= b.maxTime\n}\n\nfunc (b *block) markRead(min, max int64) {\n\tif min < b.readMin {\n\t\tb.readMin = min\n\t}\n\n\tif max > b.readMax {\n\t\tb.readMax = max\n\t}\n}\n\nfunc (b *block) partiallyRead() bool {\n\treturn b.readMin != b.minTime || b.readMax != b.maxTime\n}\n\ntype blocks []*block\n\nfunc (a blocks) Len() int { return len(a) }\n\nfunc (a blocks) Less(i, j int) bool {\n\tif a[i].key == a[j].key {\n\t\treturn a[i].minTime < a[j].minTime\n\t}\n\treturn a[i].key < a[j].key\n}\n\nfunc (a blocks) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\n// NewTSMKeyIterator returns a new TSM key iterator from readers.\n// size indicates the maximum number of values to encode in a single block.\nfunc NewTSMKeyIterator(size int, fast bool, interrupt chan struct{}, readers ...*TSMReader) (KeyIterator, error) {\n\tvar iter []*BlockIterator\n\tfor _, r := range readers {\n\t\titer = append(iter, r.BlockIterator())\n\t}\n\n\treturn &tsmKeyIterator{\n\t\treaders:   readers,\n\t\tvalues:    map[string][]Value{},\n\t\tpos:       make([]int, len(readers)),\n\t\tsize:      size,\n\t\titerators: iter,\n\t\tfast:      fast,\n\t\tbuf:       make([]blocks, len(iter)),\n\t\tinterrupt: interrupt,\n\t}, nil\n}\n\nfunc (k *tsmKeyIterator) hasMergedValues() bool {\n\treturn len(k.mergedFloatValues) > 0 ||\n\t\tlen(k.mergedIntegerValues) > 0 ||\n\t\tlen(k.mergedStringValues) > 0 ||\n\t\tlen(k.mergedBooleanValues) > 0\n}\n\n// Next returns true if there are any values remaining in the iterator.\nfunc (k *tsmKeyIterator) Next() bool {\n\t// Any merged blocks pending?\n\tif len(k.merged) > 0 {\n\t\tk.merged = k.merged[1:]\n\t\tif len(k.merged) > 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\n\t// Any merged values pending?\n\tif k.hasMergedValues() {\n\t\tk.merge()\n\t\tif len(k.merged) > 0 || k.hasMergedValues() {\n\t\t\treturn true\n\t\t}\n\t}\n\n\t// If we still have blocks from the last read, merge them\n\tif len(k.blocks) > 0 {\n\t\tk.merge()\n\t\tif len(k.merged) > 0 || k.hasMergedValues() {\n\t\t\treturn true\n\t\t}\n\t}\n\n\t// Read the next block from each TSM iterator\n\tfor i, v := range k.buf {\n\t\tif v == nil {\n\t\t\titer := k.iterators[i]\n\t\t\tif iter.Next() {\n\t\t\t\tkey, minTime, maxTime, typ, _, b, err := iter.Read()\n\t\t\t\tif err != nil {\n\t\t\t\t\tk.err = err\n\t\t\t\t}\n\n\t\t\t\t// This block may have ranges of time removed from it that would\n\t\t\t\t// reduce the block min and max time.\n\t\t\t\ttombstones := iter.r.TombstoneRange(key)\n\t\t\t\tk.buf[i] = append(k.buf[i], &block{\n\t\t\t\t\tminTime:    minTime,\n\t\t\t\t\tmaxTime:    maxTime,\n\t\t\t\t\tkey:        key,\n\t\t\t\t\ttyp:        typ,\n\t\t\t\t\tb:          b,\n\t\t\t\t\ttombstones: tombstones,\n\t\t\t\t\treadMin:    math.MaxInt64,\n\t\t\t\t\treadMax:    math.MinInt64,\n\t\t\t\t})\n\n\t\t\t\tblockKey := key\n\t\t\t\tfor iter.PeekNext() == blockKey {\n\t\t\t\t\titer.Next()\n\t\t\t\t\tkey, minTime, maxTime, typ, _, b, err := iter.Read()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tk.err = err\n\t\t\t\t\t}\n\n\t\t\t\t\ttombstones := iter.r.TombstoneRange(key)\n\n\t\t\t\t\tk.buf[i] = append(k.buf[i], &block{\n\t\t\t\t\t\tminTime:    minTime,\n\t\t\t\t\t\tmaxTime:    maxTime,\n\t\t\t\t\t\tkey:        key,\n\t\t\t\t\t\ttyp:        typ,\n\t\t\t\t\t\tb:          b,\n\t\t\t\t\t\ttombstones: tombstones,\n\t\t\t\t\t\treadMin:    math.MaxInt64,\n\t\t\t\t\t\treadMax:    math.MinInt64,\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Each reader could have a different key that it's currently at, need to find\n\t// the next smallest one to keep the sort ordering.\n\tvar minKey string\n\tvar minType byte\n\tfor _, b := range k.buf {\n\t\t// block could be nil if the iterator has been exhausted for that file\n\t\tif len(b) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif minKey == \"\" || b[0].key < minKey {\n\t\t\tminKey = b[0].key\n\t\t\tminType = b[0].typ\n\t\t}\n\t}\n\tk.key = minKey\n\tk.typ = minType\n\n\t// Now we need to find all blocks that match the min key so we can combine and dedupe\n\t// the blocks if necessary\n\tfor i, b := range k.buf {\n\t\tif len(b) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif b[0].key == k.key {\n\t\t\tk.blocks = append(k.blocks, b...)\n\t\t\tk.buf[i] = nil\n\t\t}\n\t}\n\n\tif len(k.blocks) == 0 {\n\t\treturn false\n\t}\n\n\tk.merge()\n\n\treturn len(k.merged) > 0\n}\n\n// merge combines the next set of blocks into merged blocks.\nfunc (k *tsmKeyIterator) merge() {\n\tswitch k.typ {\n\tcase BlockFloat64:\n\t\tk.mergeFloat()\n\tcase BlockInteger:\n\t\tk.mergeInteger()\n\tcase BlockBoolean:\n\t\tk.mergeBoolean()\n\tcase BlockString:\n\t\tk.mergeString()\n\tdefault:\n\t\tk.err = fmt.Errorf(\"unknown block type: %v\", k.typ)\n\t}\n}\n\nfunc (k *tsmKeyIterator) Read() (string, int64, int64, []byte, error) {\n\t// See if compactions were disabled while we were running.\n\tselect {\n\tcase <-k.interrupt:\n\t\treturn \"\", 0, 0, nil, errCompactionAborted\n\tdefault:\n\t}\n\n\tif len(k.merged) == 0 {\n\t\treturn \"\", 0, 0, nil, k.err\n\t}\n\n\tblock := k.merged[0]\n\treturn block.key, block.minTime, block.maxTime, block.b, k.err\n}\n\nfunc (k *tsmKeyIterator) Close() error {\n\tk.values = nil\n\tk.pos = nil\n\tk.iterators = nil\n\tfor _, r := range k.readers {\n\t\tif err := r.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\ntype cacheKeyIterator struct {\n\tcache *Cache\n\tsize  int\n\torder []string\n\n\ti         int\n\tblocks    [][]cacheBlock\n\tready     []chan struct{}\n\tinterrupt chan struct{}\n}\n\ntype cacheBlock struct {\n\tk                string\n\tminTime, maxTime int64\n\tb                []byte\n\terr              error\n}\n\n// NewCacheKeyIterator returns a new KeyIterator from a Cache.\nfunc NewCacheKeyIterator(cache *Cache, size int, interrupt chan struct{}) KeyIterator {\n\tkeys := cache.Keys()\n\n\tchans := make([]chan struct{}, len(keys))\n\tfor i := 0; i < len(keys); i++ {\n\t\tchans[i] = make(chan struct{}, 1)\n\t}\n\n\tcki := &cacheKeyIterator{\n\t\ti:         -1,\n\t\tsize:      size,\n\t\tcache:     cache,\n\t\torder:     keys,\n\t\tready:     chans,\n\t\tblocks:    make([][]cacheBlock, len(keys)),\n\t\tinterrupt: interrupt,\n\t}\n\tgo cki.encode()\n\treturn cki\n}\n\nfunc (c *cacheKeyIterator) encode() {\n\tconcurrency := runtime.GOMAXPROCS(0)\n\tn := len(c.ready)\n\n\t// Divide the keyset across each CPU\n\tchunkSize := 128\n\tidx := uint64(0)\n\tfor i := 0; i < concurrency; i++ {\n\t\t// Run one goroutine per CPU and encode a section of the key space concurrently\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tstart := int(atomic.AddUint64(&idx, uint64(chunkSize))) - chunkSize\n\t\t\t\tif start >= n {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tend := start + chunkSize\n\t\t\t\tif end > n {\n\t\t\t\t\tend = n\n\t\t\t\t}\n\t\t\t\tc.encodeRange(start, end)\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc (c *cacheKeyIterator) encodeRange(start, stop int) {\n\tfor i := start; i < stop; i++ {\n\t\tkey := c.order[i]\n\t\tvalues := c.cache.values(key)\n\n\t\tfor len(values) > 0 {\n\t\t\tminTime, maxTime := values[0].UnixNano(), values[len(values)-1].UnixNano()\n\t\t\tvar b []byte\n\t\t\tvar err error\n\t\t\tif len(values) > c.size {\n\t\t\t\tmaxTime = values[c.size-1].UnixNano()\n\t\t\t\tb, err = Values(values[:c.size]).Encode(nil)\n\t\t\t\tvalues = values[c.size:]\n\t\t\t} else {\n\t\t\t\tb, err = Values(values).Encode(nil)\n\t\t\t\tvalues = values[:0]\n\t\t\t}\n\t\t\tc.blocks[i] = append(c.blocks[i], cacheBlock{\n\t\t\t\tk:       key,\n\t\t\t\tminTime: minTime,\n\t\t\t\tmaxTime: maxTime,\n\t\t\t\tb:       b,\n\t\t\t\terr:     err,\n\t\t\t})\n\t\t}\n\t\t// Notify this key is fully encoded\n\t\tc.ready[i] <- struct{}{}\n\t}\n}\n\nfunc (c *cacheKeyIterator) Next() bool {\n\tif c.i >= 0 && c.i < len(c.ready) && len(c.blocks[c.i]) > 0 {\n\t\tc.blocks[c.i] = c.blocks[c.i][1:]\n\t\tif len(c.blocks[c.i]) > 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\tc.i++\n\n\tif c.i >= len(c.ready) {\n\t\treturn false\n\t}\n\n\t<-c.ready[c.i]\n\treturn true\n}\n\nfunc (c *cacheKeyIterator) Read() (string, int64, int64, []byte, error) {\n\t// See if snapshot compactions were disabled while we were running.\n\tselect {\n\tcase <-c.interrupt:\n\t\treturn \"\", 0, 0, nil, errCompactionAborted\n\tdefault:\n\t}\n\n\tblk := c.blocks[c.i][0]\n\treturn blk.k, blk.minTime, blk.maxTime, blk.b, blk.err\n}\n\nfunc (c *cacheKeyIterator) Close() error {\n\treturn nil\n}\n\ntype tsmGenerations []*tsmGeneration\n\nfunc (a tsmGenerations) Len() int           { return len(a) }\nfunc (a tsmGenerations) Less(i, j int) bool { return a[i].id < a[j].id }\nfunc (a tsmGenerations) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a tsmGenerations) hasTombstones() bool {\n\tfor _, g := range a {\n\t\tif g.hasTombstones() {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (a tsmGenerations) level() int {\n\tvar level int\n\tfor _, g := range a {\n\t\tlev := g.level()\n\t\tif lev > level {\n\t\t\tlevel = lev\n\t\t}\n\t}\n\treturn level\n}\n\nfunc (a tsmGenerations) chunk(size int) []tsmGenerations {\n\tvar chunks []tsmGenerations\n\tfor len(a) > 0 {\n\t\tif len(a) >= size {\n\t\t\tchunks = append(chunks, a[:size])\n\t\t\ta = a[size:]\n\t\t} else {\n\t\t\tchunks = append(chunks, a)\n\t\t\ta = a[len(a):]\n\t\t}\n\t}\n\treturn chunks\n}\n\nfunc (a tsmGenerations) IsSorted() bool {\n\tif len(a) == 1 {\n\t\treturn true\n\t}\n\n\tfor i := 1; i < len(a); i++ {\n\t\tif a.Less(i, i-1) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact_test.go",
    "content": "package tsm1_test\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/tsdb\"\n\t\"github.com/influxdata/influxdb/tsdb/engine/tsm1\"\n)\n\n//  Tests compacting a Cache snapshot into a single TSM file\nfunc TestCompactor_Snapshot(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\tv1 := tsm1.NewValue(1, float64(1))\n\tv2 := tsm1.NewValue(1, float64(1))\n\tv3 := tsm1.NewValue(2, float64(2))\n\n\tpoints1 := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{v1},\n\t\t\"cpu,host=B#!~#value\": []tsm1.Value{v2, v3},\n\t}\n\n\tc := tsm1.NewCache(0, \"\")\n\tfor k, v := range points1 {\n\t\tif err := c.Write(k, v); err != nil {\n\t\t\tt.Fatalf(\"failed to write key foo to cache: %s\", err.Error())\n\t\t}\n\t}\n\n\tcompactor := &tsm1.Compactor{\n\t\tDir:       dir,\n\t\tFileStore: &fakeFileStore{},\n\t}\n\n\tfiles, err := compactor.WriteSnapshot(c)\n\tif err == nil {\n\t\tt.Fatalf(\"expected error writing snapshot: %v\", err)\n\t}\n\tif len(files) > 0 {\n\t\tt.Fatalf(\"no files should be compacted: got %v\", len(files))\n\n\t}\n\n\tcompactor.Open()\n\n\tfiles, err = compactor.WriteSnapshot(c)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error writing snapshot: %v\", err)\n\t}\n\n\tif got, exp := len(files), 1; got != exp {\n\t\tt.Fatalf(\"files length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tr := MustOpenTSMReader(files[0])\n\n\tif got, exp := r.KeyCount(), 2; got != exp {\n\t\tt.Fatalf(\"keys length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tpoints []tsm1.Value\n\t}{\n\t\t{\"cpu,host=A#!~#value\", []tsm1.Value{v1}},\n\t\t{\"cpu,host=B#!~#value\", []tsm1.Value{v2, v3}},\n\t}\n\n\tfor _, p := range data {\n\t\tvalues, err := r.ReadAll(p.key)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error reading: %v\", err)\n\t\t}\n\n\t\tif got, exp := len(values), len(p.points); got != exp {\n\t\t\tt.Fatalf(\"values length mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tfor i, point := range p.points {\n\t\t\tassertValueEqual(t, values[i], point)\n\t\t}\n\t}\n}\n\n// Ensures that a compaction will properly merge multiple TSM files\nfunc TestCompactor_CompactFull(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\t// write 3 TSM files with different data and one new point\n\ta1 := tsm1.NewValue(1, 1.1)\n\twrites := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{a1},\n\t}\n\tf1 := MustWriteTSM(dir, 1, writes)\n\n\ta2 := tsm1.NewValue(2, 1.2)\n\tb1 := tsm1.NewValue(1, 2.1)\n\twrites = map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{a2},\n\t\t\"cpu,host=B#!~#value\": []tsm1.Value{b1},\n\t}\n\tf2 := MustWriteTSM(dir, 2, writes)\n\n\ta3 := tsm1.NewValue(1, 1.3)\n\tc1 := tsm1.NewValue(1, 3.1)\n\twrites = map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{a3},\n\t\t\"cpu,host=C#!~#value\": []tsm1.Value{c1},\n\t}\n\tf3 := MustWriteTSM(dir, 3, writes)\n\n\tcompactor := &tsm1.Compactor{\n\t\tDir:       dir,\n\t\tFileStore: &fakeFileStore{},\n\t}\n\n\tfiles, err := compactor.CompactFull([]string{f1, f2, f3})\n\tif err == nil {\n\t\tt.Fatalf(\"expected error writing snapshot: %v\", err)\n\t}\n\tif len(files) > 0 {\n\t\tt.Fatalf(\"no files should be compacted: got %v\", len(files))\n\n\t}\n\n\tcompactor.Open()\n\n\tfiles, err = compactor.CompactFull([]string{f1, f2, f3})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error writing snapshot: %v\", err)\n\t}\n\n\tif got, exp := len(files), 1; got != exp {\n\t\tt.Fatalf(\"files length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\texpGen, expSeq, err := tsm1.ParseTSMFileName(f3)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error parsing file name: %v\", err)\n\t}\n\texpSeq = expSeq + 1\n\n\tgotGen, gotSeq, err := tsm1.ParseTSMFileName(files[0])\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error parsing file name: %v\", err)\n\t}\n\n\tif gotGen != expGen {\n\t\tt.Fatalf(\"wrong generation for new file: got %v, exp %v\", gotGen, expGen)\n\t}\n\n\tif gotSeq != expSeq {\n\t\tt.Fatalf(\"wrong sequence for new file: got %v, exp %v\", gotSeq, expSeq)\n\t}\n\n\tr := MustOpenTSMReader(files[0])\n\n\tif got, exp := r.KeyCount(), 3; got != exp {\n\t\tt.Fatalf(\"keys length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tpoints []tsm1.Value\n\t}{\n\t\t{\"cpu,host=A#!~#value\", []tsm1.Value{a3, a2}},\n\t\t{\"cpu,host=B#!~#value\", []tsm1.Value{b1}},\n\t\t{\"cpu,host=C#!~#value\", []tsm1.Value{c1}},\n\t}\n\n\tfor _, p := range data {\n\t\tvalues, err := r.ReadAll(p.key)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error reading: %v\", err)\n\t\t}\n\n\t\tif got, exp := len(values), len(p.points); got != exp {\n\t\t\tt.Fatalf(\"values length mismatch %s: got %v, exp %v\", p.key, got, exp)\n\t\t}\n\n\t\tfor i, point := range p.points {\n\t\t\tassertValueEqual(t, values[i], point)\n\t\t}\n\t}\n}\n\n// Ensures that a compaction will properly merge multiple TSM files\nfunc TestCompactor_Compact_OverlappingBlocks(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\t// write 3 TSM files with different data and one new point\n\ta1 := tsm1.NewValue(4, 1.1)\n\ta2 := tsm1.NewValue(5, 1.1)\n\ta3 := tsm1.NewValue(7, 1.1)\n\n\twrites := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{a1, a2, a3},\n\t}\n\tf1 := MustWriteTSM(dir, 1, writes)\n\n\tc1 := tsm1.NewValue(3, 1.2)\n\tc2 := tsm1.NewValue(8, 1.2)\n\tc3 := tsm1.NewValue(9, 1.2)\n\n\twrites = map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{c1, c2, c3},\n\t}\n\tf3 := MustWriteTSM(dir, 3, writes)\n\n\tcompactor := &tsm1.Compactor{\n\t\tDir:       dir,\n\t\tFileStore: &fakeFileStore{},\n\t\tSize:      2,\n\t}\n\n\tcompactor.Open()\n\n\tfiles, err := compactor.CompactFast([]string{f1, f3})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error writing snapshot: %v\", err)\n\t}\n\n\tif got, exp := len(files), 1; got != exp {\n\t\tt.Fatalf(\"files length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tr := MustOpenTSMReader(files[0])\n\n\tif got, exp := r.KeyCount(), 1; got != exp {\n\t\tt.Fatalf(\"keys length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tpoints []tsm1.Value\n\t}{\n\t\t{\"cpu,host=A#!~#value\", []tsm1.Value{c1, a1, a2, a3, c2, c3}},\n\t}\n\n\tfor _, p := range data {\n\t\tvalues, err := r.ReadAll(p.key)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error reading: %v\", err)\n\t\t}\n\n\t\tif got, exp := len(values), len(p.points); got != exp {\n\t\t\tt.Fatalf(\"values length mismatch %s: got %v, exp %v\", p.key, got, exp)\n\t\t}\n\n\t\tfor i, point := range p.points {\n\t\t\tassertValueEqual(t, values[i], point)\n\t\t}\n\t}\n}\n\n// Ensures that a compaction will properly merge multiple TSM files\nfunc TestCompactor_Compact_OverlappingBlocksMultiple(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\t// write 3 TSM files with different data and one new point\n\ta1 := tsm1.NewValue(4, 1.1)\n\ta2 := tsm1.NewValue(5, 1.1)\n\ta3 := tsm1.NewValue(7, 1.1)\n\n\twrites := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{a1, a2, a3},\n\t}\n\tf1 := MustWriteTSM(dir, 1, writes)\n\n\tb1 := tsm1.NewValue(1, 1.2)\n\tb2 := tsm1.NewValue(2, 1.2)\n\tb3 := tsm1.NewValue(6, 1.2)\n\n\twrites = map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{b1, b2, b3},\n\t}\n\tf2 := MustWriteTSM(dir, 2, writes)\n\n\tc1 := tsm1.NewValue(3, 1.2)\n\tc2 := tsm1.NewValue(8, 1.2)\n\tc3 := tsm1.NewValue(9, 1.2)\n\n\twrites = map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{c1, c2, c3},\n\t}\n\tf3 := MustWriteTSM(dir, 3, writes)\n\n\tcompactor := &tsm1.Compactor{\n\t\tDir:       dir,\n\t\tFileStore: &fakeFileStore{},\n\t\tSize:      2,\n\t}\n\n\tcompactor.Open()\n\n\tfiles, err := compactor.CompactFast([]string{f1, f2, f3})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error writing snapshot: %v\", err)\n\t}\n\n\tif got, exp := len(files), 1; got != exp {\n\t\tt.Fatalf(\"files length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tr := MustOpenTSMReader(files[0])\n\n\tif got, exp := r.KeyCount(), 1; got != exp {\n\t\tt.Fatalf(\"keys length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tpoints []tsm1.Value\n\t}{\n\t\t{\"cpu,host=A#!~#value\", []tsm1.Value{b1, b2, c1, a1, a2, b3, a3, c2, c3}},\n\t}\n\n\tfor _, p := range data {\n\t\tvalues, err := r.ReadAll(p.key)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error reading: %v\", err)\n\t\t}\n\n\t\tif got, exp := len(values), len(p.points); got != exp {\n\t\t\tt.Fatalf(\"values length mismatch %s: got %v, exp %v\", p.key, got, exp)\n\t\t}\n\n\t\tfor i, point := range p.points {\n\t\t\tassertValueEqual(t, values[i], point)\n\t\t}\n\t}\n}\n\n// Ensures that a compaction will properly merge multiple TSM files\nfunc TestCompactor_CompactFull_SkipFullBlocks(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\t// write 3 TSM files with different data and one new point\n\ta1 := tsm1.NewValue(1, 1.1)\n\ta2 := tsm1.NewValue(2, 1.2)\n\twrites := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{a1, a2},\n\t}\n\tf1 := MustWriteTSM(dir, 1, writes)\n\n\ta3 := tsm1.NewValue(3, 1.3)\n\twrites = map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{a3},\n\t}\n\tf2 := MustWriteTSM(dir, 2, writes)\n\n\ta4 := tsm1.NewValue(4, 1.4)\n\twrites = map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{a4},\n\t}\n\tf3 := MustWriteTSM(dir, 3, writes)\n\n\tcompactor := &tsm1.Compactor{\n\t\tDir:       dir,\n\t\tFileStore: &fakeFileStore{},\n\t\tSize:      2,\n\t}\n\tcompactor.Open()\n\n\tfiles, err := compactor.CompactFull([]string{f1, f2, f3})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error writing snapshot: %v\", err)\n\t}\n\n\tif got, exp := len(files), 1; got != exp {\n\t\tt.Fatalf(\"files length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\texpGen, expSeq, err := tsm1.ParseTSMFileName(f3)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error parsing file name: %v\", err)\n\t}\n\texpSeq = expSeq + 1\n\n\tgotGen, gotSeq, err := tsm1.ParseTSMFileName(files[0])\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error parsing file name: %v\", err)\n\t}\n\n\tif gotGen != expGen {\n\t\tt.Fatalf(\"wrong generation for new file: got %v, exp %v\", gotGen, expGen)\n\t}\n\n\tif gotSeq != expSeq {\n\t\tt.Fatalf(\"wrong sequence for new file: got %v, exp %v\", gotSeq, expSeq)\n\t}\n\n\tr := MustOpenTSMReader(files[0])\n\n\tif got, exp := r.KeyCount(), 1; got != exp {\n\t\tt.Fatalf(\"keys length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tpoints []tsm1.Value\n\t}{\n\t\t{\"cpu,host=A#!~#value\", []tsm1.Value{a1, a2, a3, a4}},\n\t}\n\n\tfor _, p := range data {\n\t\tvalues, err := r.ReadAll(p.key)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error reading: %v\", err)\n\t\t}\n\n\t\tif got, exp := len(values), len(p.points); got != exp {\n\t\t\tt.Fatalf(\"values length mismatch %s: got %v, exp %v\", p.key, got, exp)\n\t\t}\n\n\t\tfor i, point := range p.points {\n\t\t\tassertValueEqual(t, values[i], point)\n\t\t}\n\t}\n\n\tif got, exp := len(r.Entries(\"cpu,host=A#!~#value\")), 2; got != exp {\n\t\tt.Fatalf(\"block count mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\n// Ensures that a full compaction will skip over blocks that have the full\n// range of time contained in the block tombstoned\nfunc TestCompactor_CompactFull_TombstonedSkipBlock(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\t// write 3 TSM files with different data and one new point\n\ta1 := tsm1.NewValue(1, 1.1)\n\ta2 := tsm1.NewValue(2, 1.2)\n\twrites := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{a1, a2},\n\t}\n\tf1 := MustWriteTSM(dir, 1, writes)\n\n\tts := tsm1.Tombstoner{\n\t\tPath: f1,\n\t}\n\tts.AddRange([]string{\"cpu,host=A#!~#value\"}, math.MinInt64, math.MaxInt64)\n\n\ta3 := tsm1.NewValue(3, 1.3)\n\twrites = map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{a3},\n\t}\n\tf2 := MustWriteTSM(dir, 2, writes)\n\n\ta4 := tsm1.NewValue(4, 1.4)\n\twrites = map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{a4},\n\t}\n\tf3 := MustWriteTSM(dir, 3, writes)\n\n\tcompactor := &tsm1.Compactor{\n\t\tDir:       dir,\n\t\tFileStore: &fakeFileStore{},\n\t\tSize:      2,\n\t}\n\tcompactor.Open()\n\n\tfiles, err := compactor.CompactFull([]string{f1, f2, f3})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error writing snapshot: %v\", err)\n\t}\n\n\tif got, exp := len(files), 1; got != exp {\n\t\tt.Fatalf(\"files length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\texpGen, expSeq, err := tsm1.ParseTSMFileName(f3)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error parsing file name: %v\", err)\n\t}\n\texpSeq = expSeq + 1\n\n\tgotGen, gotSeq, err := tsm1.ParseTSMFileName(files[0])\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error parsing file name: %v\", err)\n\t}\n\n\tif gotGen != expGen {\n\t\tt.Fatalf(\"wrong generation for new file: got %v, exp %v\", gotGen, expGen)\n\t}\n\n\tif gotSeq != expSeq {\n\t\tt.Fatalf(\"wrong sequence for new file: got %v, exp %v\", gotSeq, expSeq)\n\t}\n\n\tr := MustOpenTSMReader(files[0])\n\n\tif got, exp := r.KeyCount(), 1; got != exp {\n\t\tt.Fatalf(\"keys length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tpoints []tsm1.Value\n\t}{\n\t\t{\"cpu,host=A#!~#value\", []tsm1.Value{a3, a4}},\n\t}\n\n\tfor _, p := range data {\n\t\tvalues, err := r.ReadAll(p.key)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error reading: %v\", err)\n\t\t}\n\n\t\tif got, exp := len(values), len(p.points); got != exp {\n\t\t\tt.Fatalf(\"values length mismatch %s: got %v, exp %v\", p.key, got, exp)\n\t\t}\n\n\t\tfor i, point := range p.points {\n\t\t\tassertValueEqual(t, values[i], point)\n\t\t}\n\t}\n\n\tif got, exp := len(r.Entries(\"cpu,host=A#!~#value\")), 1; got != exp {\n\t\tt.Fatalf(\"block count mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\n// Ensures that a full compaction will decode and combine blocks with\n// partial tombstoned values\nfunc TestCompactor_CompactFull_TombstonedPartialBlock(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\t// write 3 TSM files with different data and one new point\n\ta1 := tsm1.NewValue(1, 1.1)\n\ta2 := tsm1.NewValue(2, 1.2)\n\twrites := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{a1, a2},\n\t}\n\tf1 := MustWriteTSM(dir, 1, writes)\n\n\tts := tsm1.Tombstoner{\n\t\tPath: f1,\n\t}\n\t// a1 should remain after compaction\n\tts.AddRange([]string{\"cpu,host=A#!~#value\"}, 2, math.MaxInt64)\n\n\ta3 := tsm1.NewValue(3, 1.3)\n\twrites = map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{a3},\n\t}\n\tf2 := MustWriteTSM(dir, 2, writes)\n\n\ta4 := tsm1.NewValue(4, 1.4)\n\twrites = map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{a4},\n\t}\n\tf3 := MustWriteTSM(dir, 3, writes)\n\n\tcompactor := &tsm1.Compactor{\n\t\tDir:       dir,\n\t\tFileStore: &fakeFileStore{},\n\t\tSize:      2,\n\t}\n\tcompactor.Open()\n\n\tfiles, err := compactor.CompactFull([]string{f1, f2, f3})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error writing snapshot: %v\", err)\n\t}\n\n\tif got, exp := len(files), 1; got != exp {\n\t\tt.Fatalf(\"files length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\texpGen, expSeq, err := tsm1.ParseTSMFileName(f3)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error parsing file name: %v\", err)\n\t}\n\texpSeq = expSeq + 1\n\n\tgotGen, gotSeq, err := tsm1.ParseTSMFileName(files[0])\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error parsing file name: %v\", err)\n\t}\n\n\tif gotGen != expGen {\n\t\tt.Fatalf(\"wrong generation for new file: got %v, exp %v\", gotGen, expGen)\n\t}\n\n\tif gotSeq != expSeq {\n\t\tt.Fatalf(\"wrong sequence for new file: got %v, exp %v\", gotSeq, expSeq)\n\t}\n\n\tr := MustOpenTSMReader(files[0])\n\n\tif got, exp := r.KeyCount(), 1; got != exp {\n\t\tt.Fatalf(\"keys length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tpoints []tsm1.Value\n\t}{\n\t\t{\"cpu,host=A#!~#value\", []tsm1.Value{a1, a3, a4}},\n\t}\n\n\tfor _, p := range data {\n\t\tvalues, err := r.ReadAll(p.key)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error reading: %v\", err)\n\t\t}\n\n\t\tif got, exp := len(values), len(p.points); got != exp {\n\t\t\tt.Fatalf(\"values length mismatch %s: got %v, exp %v\", p.key, got, exp)\n\t\t}\n\n\t\tfor i, point := range p.points {\n\t\t\tassertValueEqual(t, values[i], point)\n\t\t}\n\t}\n\n\tif got, exp := len(r.Entries(\"cpu,host=A#!~#value\")), 2; got != exp {\n\t\tt.Fatalf(\"block count mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\n// Ensures that a full compaction will decode and combine blocks with\n// multiple tombstoned ranges within the block e.g. (t1, t2, t3, t4)\n// having t2 and t3 removed\nfunc TestCompactor_CompactFull_TombstonedMultipleRanges(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\t// write 3 TSM files with different data and one new point\n\ta1 := tsm1.NewValue(1, 1.1)\n\ta2 := tsm1.NewValue(2, 1.2)\n\ta3 := tsm1.NewValue(3, 1.3)\n\ta4 := tsm1.NewValue(4, 1.4)\n\n\twrites := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{a1, a2, a3, a4},\n\t}\n\tf1 := MustWriteTSM(dir, 1, writes)\n\n\tts := tsm1.Tombstoner{\n\t\tPath: f1,\n\t}\n\t// a1, a3 should remain after compaction\n\tts.AddRange([]string{\"cpu,host=A#!~#value\"}, 2, 2)\n\tts.AddRange([]string{\"cpu,host=A#!~#value\"}, 4, 4)\n\n\ta5 := tsm1.NewValue(5, 1.5)\n\twrites = map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{a5},\n\t}\n\tf2 := MustWriteTSM(dir, 2, writes)\n\n\ta6 := tsm1.NewValue(6, 1.6)\n\twrites = map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{a6},\n\t}\n\tf3 := MustWriteTSM(dir, 3, writes)\n\n\tcompactor := &tsm1.Compactor{\n\t\tDir:       dir,\n\t\tFileStore: &fakeFileStore{},\n\t\tSize:      2,\n\t}\n\tcompactor.Open()\n\n\tfiles, err := compactor.CompactFull([]string{f1, f2, f3})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error writing snapshot: %v\", err)\n\t}\n\n\tif got, exp := len(files), 1; got != exp {\n\t\tt.Fatalf(\"files length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\texpGen, expSeq, err := tsm1.ParseTSMFileName(f3)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error parsing file name: %v\", err)\n\t}\n\texpSeq = expSeq + 1\n\n\tgotGen, gotSeq, err := tsm1.ParseTSMFileName(files[0])\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error parsing file name: %v\", err)\n\t}\n\n\tif gotGen != expGen {\n\t\tt.Fatalf(\"wrong generation for new file: got %v, exp %v\", gotGen, expGen)\n\t}\n\n\tif gotSeq != expSeq {\n\t\tt.Fatalf(\"wrong sequence for new file: got %v, exp %v\", gotSeq, expSeq)\n\t}\n\n\tr := MustOpenTSMReader(files[0])\n\n\tif got, exp := r.KeyCount(), 1; got != exp {\n\t\tt.Fatalf(\"keys length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tpoints []tsm1.Value\n\t}{\n\t\t{\"cpu,host=A#!~#value\", []tsm1.Value{a1, a3, a5, a6}},\n\t}\n\n\tfor _, p := range data {\n\t\tvalues, err := r.ReadAll(p.key)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error reading: %v\", err)\n\t\t}\n\n\t\tif got, exp := len(values), len(p.points); got != exp {\n\t\t\tt.Fatalf(\"values length mismatch %s: got %v, exp %v\", p.key, got, exp)\n\t\t}\n\n\t\tfor i, point := range p.points {\n\t\t\tassertValueEqual(t, values[i], point)\n\t\t}\n\t}\n\n\tif got, exp := len(r.Entries(\"cpu,host=A#!~#value\")), 2; got != exp {\n\t\tt.Fatalf(\"block count mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\n// Ensures that a compaction will properly rollover to a new file when the\n// max keys per blocks is exceeded\nfunc TestCompactor_CompactFull_MaxKeys(t *testing.T) {\n\t// This test creates a lot of data and causes timeout failures for these envs\n\tif testing.Short() || os.Getenv(\"CI\") != \"\" || os.Getenv(\"GORACE\") != \"\" {\n\t\tt.Skip(\"Skipping max keys compaction test\")\n\t}\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\t// write two files where the first contains a single key with the maximum\n\t// number of full blocks that can fit in a TSM file\n\tf1, f1Name := MustTSMWriter(dir, 1)\n\tvalues := make([]tsm1.Value, 1000)\n\tfor i := 0; i < 65535; i++ {\n\t\tvalues = values[:0]\n\t\tfor j := 0; j < 1000; j++ {\n\t\t\tvalues = append(values, tsm1.NewValue(int64(i*1000+j), int64(1)))\n\t\t}\n\t\tif err := f1.Write(\"cpu,host=A#!~#value\", values); err != nil {\n\t\t\tt.Fatalf(\"write tsm f1: %v\", err)\n\t\t}\n\t}\n\tif err := f1.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"write index f1: %v\", err)\n\t}\n\tf1.Close()\n\n\t// Write a new file with 1 block that when compacted would exceed the max\n\t// blocks\n\tlastTimeStamp := values[len(values)-1].UnixNano()\n\tvalues = values[:0]\n\tf2, f2Name := MustTSMWriter(dir, 2)\n\tfor j := lastTimeStamp; j < lastTimeStamp+1000; j++ {\n\t\tvalues = append(values, tsm1.NewValue(int64(j), int64(1)))\n\t}\n\tif err := f2.Write(\"cpu,host=A#!~#value\", values); err != nil {\n\t\tt.Fatalf(\"write tsm f1: %v\", err)\n\t}\n\n\tif err := f2.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"write index f2: %v\", err)\n\t}\n\tf2.Close()\n\n\tcompactor := &tsm1.Compactor{\n\t\tDir:       dir,\n\t\tFileStore: &fakeFileStore{},\n\t}\n\tcompactor.Open()\n\n\t// Compact both files, should get 2 files back\n\tfiles, err := compactor.CompactFull([]string{f1Name, f2Name})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error writing snapshot: %v\", err)\n\t}\n\n\tif got, exp := len(files), 2; got != exp {\n\t\tt.Fatalf(\"files length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\texpGen, expSeq, err := tsm1.ParseTSMFileName(f2Name)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error parsing file name: %v\", err)\n\t}\n\texpSeq = expSeq + 1\n\n\tgotGen, gotSeq, err := tsm1.ParseTSMFileName(files[0])\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error parsing file name: %v\", err)\n\t}\n\n\tif gotGen != expGen {\n\t\tt.Fatalf(\"wrong generation for new file: got %v, exp %v\", gotGen, expGen)\n\t}\n\n\tif gotSeq != expSeq {\n\t\tt.Fatalf(\"wrong sequence for new file: got %v, exp %v\", gotSeq, expSeq)\n\t}\n}\n\n// Tests that a single TSM file can be read and iterated over\nfunc TestTSMKeyIterator_Single(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\tv1 := tsm1.NewValue(1, 1.1)\n\twrites := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{v1},\n\t}\n\n\tr := MustTSMReader(dir, 1, writes)\n\n\titer, err := tsm1.NewTSMKeyIterator(1, false, nil, r)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating WALKeyIterator: %v\", err)\n\t}\n\n\tvar readValues bool\n\tfor iter.Next() {\n\t\tkey, _, _, block, err := iter.Read()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error read: %v\", err)\n\t\t}\n\n\t\tvalues, err := tsm1.DecodeBlock(block, nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error decode: %v\", err)\n\t\t}\n\n\t\tif got, exp := key, \"cpu,host=A#!~#value\"; got != exp {\n\t\t\tt.Fatalf(\"key mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tif got, exp := len(values), len(writes); got != exp {\n\t\t\tt.Fatalf(\"values length mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tfor _, v := range values {\n\t\t\treadValues = true\n\t\t\tassertValueEqual(t, v, v1)\n\t\t}\n\t}\n\n\tif !readValues {\n\t\tt.Fatalf(\"failed to read any values\")\n\t}\n}\n\n// Tests that duplicate point values are merged.  There is only one case\n// where this could happen and that is when a compaction completed and we replace\n// the old TSM file with a new one and we crash just before deleting the old file.\n// No data is lost but the same point time/value would exist in two files until\n// compaction corrects it.\nfunc TestTSMKeyIterator_Duplicate(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\tv1 := tsm1.NewValue(1, int64(1))\n\tv2 := tsm1.NewValue(1, int64(2))\n\n\twrites1 := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{v1},\n\t}\n\n\tr1 := MustTSMReader(dir, 1, writes1)\n\n\twrites2 := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{v2},\n\t}\n\n\tr2 := MustTSMReader(dir, 2, writes2)\n\n\titer, err := tsm1.NewTSMKeyIterator(1, false, nil, r1, r2)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating WALKeyIterator: %v\", err)\n\t}\n\n\tvar readValues bool\n\tfor iter.Next() {\n\t\tkey, _, _, block, err := iter.Read()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error read: %v\", err)\n\t\t}\n\n\t\tvalues, err := tsm1.DecodeBlock(block, nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error decode: %v\", err)\n\t\t}\n\n\t\tif got, exp := key, \"cpu,host=A#!~#value\"; got != exp {\n\t\t\tt.Fatalf(\"key mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tif got, exp := len(values), 1; got != exp {\n\t\t\tt.Fatalf(\"values length mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\treadValues = true\n\t\tassertValueEqual(t, values[0], v2)\n\t}\n\n\tif !readValues {\n\t\tt.Fatalf(\"failed to read any values\")\n\t}\n}\n\n// Tests that deleted keys are not seen during iteration with\n// TSM files.\nfunc TestTSMKeyIterator_MultipleKeysDeleted(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\tv1 := tsm1.NewValue(2, int64(1))\n\tpoints1 := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{v1},\n\t}\n\n\tr1 := MustTSMReader(dir, 1, points1)\n\tif e := r1.Delete([]string{\"cpu,host=A#!~#value\"}); nil != e {\n\t\tt.Fatal(e)\n\t}\n\n\tv2 := tsm1.NewValue(1, float64(1))\n\tv3 := tsm1.NewValue(1, float64(1))\n\n\tpoints2 := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#count\": []tsm1.Value{v2},\n\t\t\"cpu,host=B#!~#value\": []tsm1.Value{v3},\n\t}\n\n\tr2 := MustTSMReader(dir, 2, points2)\n\tr2.Delete([]string{\"cpu,host=A#!~#count\"})\n\n\titer, err := tsm1.NewTSMKeyIterator(1, false, nil, r1, r2)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating WALKeyIterator: %v\", err)\n\t}\n\n\tvar readValues bool\n\tvar data = []struct {\n\t\tkey   string\n\t\tvalue tsm1.Value\n\t}{\n\t\t{\"cpu,host=B#!~#value\", v3},\n\t}\n\n\tfor iter.Next() {\n\t\tkey, _, _, block, err := iter.Read()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error read: %v\", err)\n\t\t}\n\n\t\tvalues, err := tsm1.DecodeBlock(block, nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error decode: %v\", err)\n\t\t}\n\n\t\tif got, exp := key, data[0].key; got != exp {\n\t\t\tt.Fatalf(\"key mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tif got, exp := len(values), 1; got != exp {\n\t\t\tt.Fatalf(\"values length mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t\treadValues = true\n\n\t\tassertValueEqual(t, values[0], data[0].value)\n\t\tdata = data[1:]\n\t}\n\n\tif !readValues {\n\t\tt.Fatalf(\"failed to read any values\")\n\t}\n}\n\n// Tests that the TSMKeyIterator will abort if the interrupt channel is closed\nfunc TestTSMKeyIterator_Abort(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\tv1 := tsm1.NewValue(1, 1.1)\n\twrites := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{v1},\n\t}\n\n\tr := MustTSMReader(dir, 1, writes)\n\n\tintC := make(chan struct{})\n\titer, err := tsm1.NewTSMKeyIterator(1, false, intC, r)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating WALKeyIterator: %v\", err)\n\t}\n\n\tvar aborted bool\n\tfor iter.Next() {\n\t\t// Abort\n\t\tclose(intC)\n\n\t\t_, _, _, _, err := iter.Read()\n\t\tif err == nil {\n\t\t\tt.Fatalf(\"unexpected error read: %v\", err)\n\t\t}\n\t\taborted = err != nil\n\t}\n\n\tif !aborted {\n\t\tt.Fatalf(\"iteration not aborted\")\n\t}\n}\n\nfunc TestCacheKeyIterator_Single(t *testing.T) {\n\tv0 := tsm1.NewValue(1, 1.0)\n\n\twrites := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{v0},\n\t}\n\n\tc := tsm1.NewCache(0, \"\")\n\n\tfor k, v := range writes {\n\t\tif err := c.Write(k, v); err != nil {\n\t\t\tt.Fatalf(\"failed to write key foo to cache: %s\", err.Error())\n\t\t}\n\t}\n\n\titer := tsm1.NewCacheKeyIterator(c, 1, nil)\n\tvar readValues bool\n\tfor iter.Next() {\n\t\tkey, _, _, block, err := iter.Read()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error read: %v\", err)\n\t\t}\n\n\t\tvalues, err := tsm1.DecodeBlock(block, nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error decode: %v\", err)\n\t\t}\n\n\t\tif got, exp := key, \"cpu,host=A#!~#value\"; got != exp {\n\t\t\tt.Fatalf(\"key mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tif got, exp := len(values), len(writes); got != exp {\n\t\t\tt.Fatalf(\"values length mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tfor _, v := range values {\n\t\t\treadValues = true\n\t\t\tassertValueEqual(t, v, v0)\n\t\t}\n\t}\n\n\tif !readValues {\n\t\tt.Fatalf(\"failed to read any values\")\n\t}\n}\n\nfunc TestCacheKeyIterator_Chunked(t *testing.T) {\n\tv0 := tsm1.NewValue(1, 1.0)\n\tv1 := tsm1.NewValue(2, 2.0)\n\n\twrites := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{v0, v1},\n\t}\n\n\tc := tsm1.NewCache(0, \"\")\n\n\tfor k, v := range writes {\n\t\tif err := c.Write(k, v); err != nil {\n\t\t\tt.Fatalf(\"failed to write key foo to cache: %s\", err.Error())\n\t\t}\n\t}\n\n\titer := tsm1.NewCacheKeyIterator(c, 1, nil)\n\tvar readValues bool\n\tvar chunk int\n\tfor iter.Next() {\n\t\tkey, _, _, block, err := iter.Read()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error read: %v\", err)\n\t\t}\n\n\t\tvalues, err := tsm1.DecodeBlock(block, nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error decode: %v\", err)\n\t\t}\n\n\t\tif got, exp := key, \"cpu,host=A#!~#value\"; got != exp {\n\t\t\tt.Fatalf(\"key mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tif got, exp := len(values), 1; got != exp {\n\t\t\tt.Fatalf(\"values length mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tfor _, v := range values {\n\t\t\treadValues = true\n\t\t\tassertValueEqual(t, v, writes[\"cpu,host=A#!~#value\"][chunk])\n\t\t}\n\t\tchunk++\n\t}\n\n\tif !readValues {\n\t\tt.Fatalf(\"failed to read any values\")\n\t}\n}\n\n// Tests that the CacheKeyIterator will abort if the interrupt channel is closed\nfunc TestCacheKeyIterator_Abort(t *testing.T) {\n\tv0 := tsm1.NewValue(1, 1.0)\n\n\twrites := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{v0},\n\t}\n\n\tc := tsm1.NewCache(0, \"\")\n\n\tfor k, v := range writes {\n\t\tif err := c.Write(k, v); err != nil {\n\t\t\tt.Fatalf(\"failed to write key foo to cache: %s\", err.Error())\n\t\t}\n\t}\n\n\tintC := make(chan struct{})\n\n\titer := tsm1.NewCacheKeyIterator(c, 1, intC)\n\n\tvar aborted bool\n\tfor iter.Next() {\n\t\t//Abort\n\t\tclose(intC)\n\n\t\t_, _, _, _, err := iter.Read()\n\t\tif err == nil {\n\t\t\tt.Fatalf(\"unexpected error read: %v\", err)\n\t\t}\n\t\taborted = err != nil\n\t}\n\n\tif !aborted {\n\t\tt.Fatalf(\"iteration not aborted\")\n\t}\n}\n\nfunc TestDefaultPlanner_Plan_Min(t *testing.T) {\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn []tsm1.FileStat{\n\t\t\t\t\ttsm1.FileStat{\n\t\t\t\t\t\tPath: \"01-01.tsm1\",\n\t\t\t\t\t\tSize: 1 * 1024 * 1024,\n\t\t\t\t\t},\n\t\t\t\t\ttsm1.FileStat{\n\t\t\t\t\t\tPath: \"02-01.tsm1\",\n\t\t\t\t\t\tSize: 1 * 1024 * 1024,\n\t\t\t\t\t},\n\t\t\t\t\ttsm1.FileStat{\n\t\t\t\t\t\tPath: \"03-1.tsm1\",\n\t\t\t\t\t\tSize: 251 * 1024 * 1024,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t}, tsdb.DefaultCompactFullWriteColdDuration,\n\t)\n\n\ttsm := cp.Plan(time.Now())\n\tif exp, got := 0, len(tsm); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\n// Ensure that if there are older files that can be compacted together but a newer\n// file that is in a larger step, the older ones will get compacted.\nfunc TestDefaultPlanner_Plan_CombineSequence(t *testing.T) {\n\tdata := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-04.tsm1\",\n\t\t\tSize: 128 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-04.tsm1\",\n\t\t\tSize: 128 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-04.tsm1\",\n\t\t\tSize: 128 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"04-04.tsm1\",\n\t\t\tSize: 128 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"06-02.tsm1\",\n\t\t\tSize: 67 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"07-02.tsm1\",\n\t\t\tSize: 128 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"08-01.tsm1\",\n\t\t\tSize: 251 * 1024 * 1024,\n\t\t},\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn data\n\t\t\t},\n\t\t}, tsdb.DefaultCompactFullWriteColdDuration,\n\t)\n\n\texpFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3]}\n\ttsm := cp.Plan(time.Now())\n\tif exp, got := len(expFiles), len(tsm[0]); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, p := range expFiles {\n\t\tif got, exp := tsm[0][i], p.Path; got != exp {\n\t\t\tt.Fatalf(\"tsm file mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t}\n}\n\n// Ensure that the planner grabs the smallest compaction step\nfunc TestDefaultPlanner_Plan_MultipleGroups(t *testing.T) {\n\tdata := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-04.tsm1\",\n\t\t\tSize: 64 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-04.tsm1\",\n\t\t\tSize: 64 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-04.tsm1\",\n\t\t\tSize: 64 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"04-04.tsm1\",\n\t\t\tSize: 129 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"05-04.tsm1\",\n\t\t\tSize: 129 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"06-04.tsm1\",\n\t\t\tSize: 129 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"07-04.tsm1\",\n\t\t\tSize: 129 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"08-04.tsm1\",\n\t\t\tSize: 129 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"09-04.tsm1\", // should be skipped\n\t\t\tSize: 129 * 1024 * 1024,\n\t\t},\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(&fakeFileStore{\n\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\treturn data\n\t\t},\n\t}, tsdb.DefaultCompactFullWriteColdDuration)\n\n\texpFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3],\n\t\tdata[4], data[5], data[6], data[7]}\n\ttsm := cp.Plan(time.Now())\n\n\tif got, exp := len(tsm), 2; got != exp {\n\t\tt.Fatalf(\"compaction group length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif exp, got := len(expFiles[:4]), len(tsm[0]); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif exp, got := len(expFiles[4:]), len(tsm[1]); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, p := range expFiles[:4] {\n\t\tif got, exp := tsm[0][i], p.Path; got != exp {\n\t\t\tt.Fatalf(\"tsm file mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t}\n\n\tfor i, p := range expFiles[4:] {\n\t\tif got, exp := tsm[1][i], p.Path; got != exp {\n\t\t\tt.Fatalf(\"tsm file mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t}\n\n}\n\n// Ensure that the planner grabs the smallest compaction step\nfunc TestDefaultPlanner_PlanLevel_SmallestCompactionStep(t *testing.T) {\n\tdata := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-03.tsm1\",\n\t\t\tSize: 251 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-03.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-03.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"04-03.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"05-01.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"06-01.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn data\n\t\t\t},\n\t\t}, tsdb.DefaultCompactFullWriteColdDuration,\n\t)\n\n\texpFiles := []tsm1.FileStat{data[4], data[5]}\n\ttsm := cp.PlanLevel(1)\n\tif exp, got := len(expFiles), len(tsm[0]); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, p := range expFiles {\n\t\tif got, exp := tsm[0][i], p.Path; got != exp {\n\t\t\tt.Fatalf(\"tsm file mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t}\n}\n\nfunc TestDefaultPlanner_PlanLevel_SplitFile(t *testing.T) {\n\tdata := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-03.tsm1\",\n\t\t\tSize: 251 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-03.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-02.tsm1\",\n\t\t\tSize: 2 * 1024 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-03.tsm1\",\n\t\t\tSize: 10 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"04-03.tsm1\",\n\t\t\tSize: 10 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"05-01.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"06-01.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn data\n\t\t\t},\n\t\t}, tsdb.DefaultCompactFullWriteColdDuration,\n\t)\n\n\texpFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3], data[4]}\n\ttsm := cp.PlanLevel(3)\n\tif exp, got := len(expFiles), len(tsm[0]); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, p := range expFiles {\n\t\tif got, exp := tsm[0][i], p.Path; got != exp {\n\t\t\tt.Fatalf(\"tsm file mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t}\n}\n\nfunc TestDefaultPlanner_PlanLevel_IsolatedLowLevel(t *testing.T) {\n\tdata := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-03.tsm1\",\n\t\t\tSize: 251 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-03.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-01.tsm1\",\n\t\t\tSize: 2 * 1024 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"04-01.tsm1\",\n\t\t\tSize: 10 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"05-02.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"06-01.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn data\n\t\t\t},\n\t\t}, tsdb.DefaultCompactFullWriteColdDuration,\n\t)\n\n\texpFiles := []tsm1.FileStat{data[2], data[3]}\n\ttsm := cp.PlanLevel(1)\n\tif exp, got := len(expFiles), len(tsm[0]); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, p := range expFiles {\n\t\tif got, exp := tsm[0][i], p.Path; got != exp {\n\t\t\tt.Fatalf(\"tsm file mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t}\n}\n\nfunc TestDefaultPlanner_PlanLevel_IsolatedHighLevel(t *testing.T) {\n\tdata := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-02.tsm1\",\n\t\t\tSize: 251 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-02.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-03.tsm1\",\n\t\t\tSize: 2 * 1024 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-04.tsm1\",\n\t\t\tSize: 2 * 1024 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"04-02.tsm1\",\n\t\t\tSize: 10 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"05-02.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"06-02.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn data\n\t\t\t},\n\t\t}, tsdb.DefaultCompactFullWriteColdDuration,\n\t)\n\n\texpFiles := []tsm1.FileStat{}\n\ttsm := cp.PlanLevel(3)\n\tif exp, got := len(expFiles), len(tsm); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestDefaultPlanner_PlanLevel3_MinFiles(t *testing.T) {\n\tdata := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-03.tsm1\",\n\t\t\tSize: 251 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-03.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-01.tsm1\",\n\t\t\tSize: 2 * 1024 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"04-01.tsm1\",\n\t\t\tSize: 10 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"05-02.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"06-01.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn data\n\t\t\t},\n\t\t}, tsdb.DefaultCompactFullWriteColdDuration,\n\t)\n\n\texpFiles := []tsm1.FileStat{}\n\ttsm := cp.PlanLevel(3)\n\tif exp, got := len(expFiles), len(tsm); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestDefaultPlanner_PlanLevel2_MinFiles(t *testing.T) {\n\tdata := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-04.tsm1\",\n\t\t\tSize: 251 * 1024 * 1024,\n\t\t},\n\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-02.tsm1\",\n\t\t\tSize: 251 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-03.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn data\n\t\t\t},\n\t\t}, tsdb.DefaultCompactFullWriteColdDuration,\n\t)\n\n\texpFiles := []tsm1.FileStat{}\n\ttsm := cp.PlanLevel(2)\n\tif exp, got := len(expFiles), len(tsm); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestDefaultPlanner_PlanLevel_Tombstone(t *testing.T) {\n\tdata := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath:         \"01-03.tsm1\",\n\t\t\tSize:         251 * 1024 * 1024,\n\t\t\tHasTombstone: true,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-03.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-01.tsm1\",\n\t\t\tSize: 2 * 1024 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"04-01.tsm1\",\n\t\t\tSize: 10 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"05-02.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"06-01.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn data\n\t\t\t},\n\t\t}, tsdb.DefaultCompactFullWriteColdDuration,\n\t)\n\n\texpFiles := []tsm1.FileStat{data[0], data[1]}\n\ttsm := cp.PlanLevel(3)\n\tif exp, got := len(expFiles), len(tsm[0]); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, p := range expFiles {\n\t\tif got, exp := tsm[0][i], p.Path; got != exp {\n\t\t\tt.Fatalf(\"tsm file mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t}\n}\n\nfunc TestDefaultPlanner_PlanLevel_Multiple(t *testing.T) {\n\tdata := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-01.tsm1\",\n\t\t\tSize: 251 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-01.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-01.tsm1\",\n\t\t\tSize: 2 * 1024 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"04-01.tsm1\",\n\t\t\tSize: 10 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"05-01.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"06-01.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn data\n\t\t\t},\n\t\t}, tsdb.DefaultCompactFullWriteColdDuration,\n\t)\n\n\texpFiles1 := []tsm1.FileStat{data[0], data[1], data[2], data[3]}\n\texpFiles2 := []tsm1.FileStat{data[4], data[5]}\n\n\ttsm := cp.PlanLevel(1)\n\tif exp, got := len(expFiles1), len(tsm[0]); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, p := range expFiles1 {\n\t\tif got, exp := tsm[0][i], p.Path; got != exp {\n\t\t\tt.Fatalf(\"tsm file mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t}\n\n\tif exp, got := len(expFiles2), len(tsm[1]); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, p := range expFiles2 {\n\t\tif got, exp := tsm[1][i], p.Path; got != exp {\n\t\t\tt.Fatalf(\"tsm file mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t}\n}\n\nfunc TestDefaultPlanner_PlanOptimize_NoLevel4(t *testing.T) {\n\tdata := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-03.tsm1\",\n\t\t\tSize: 251 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-03.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-03.tsm1\",\n\t\t\tSize: 2 * 1024 * 1024 * 1024,\n\t\t},\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn data\n\t\t\t},\n\t\t}, tsdb.DefaultCompactFullWriteColdDuration,\n\t)\n\n\texpFiles := []tsm1.FileStat{}\n\ttsm := cp.PlanOptimize()\n\tif exp, got := len(expFiles), len(tsm); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestDefaultPlanner_PlanOptimize_Level4(t *testing.T) {\n\tdata := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-04.tsm1\",\n\t\t\tSize: 251 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-04.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-04.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"04-04.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"05-03.tsm1\",\n\t\t\tSize: 2 * 1024 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"06-04.tsm1\",\n\t\t\tSize: 2 * 1024 * 1024 * 1024,\n\t\t},\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn data\n\t\t\t},\n\t\t}, tsdb.DefaultCompactFullWriteColdDuration,\n\t)\n\n\texpFiles1 := []tsm1.FileStat{data[0], data[1], data[2], data[3]}\n\ttsm := cp.PlanOptimize()\n\tif exp, got := 1, len(tsm); exp != got {\n\t\tt.Fatalf(\"group length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif exp, got := len(expFiles1), len(tsm[0]); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, p := range expFiles1 {\n\t\tif got, exp := tsm[0][i], p.Path; got != exp {\n\t\t\tt.Fatalf(\"tsm file mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t}\n}\n\nfunc TestDefaultPlanner_PlanOptimize_Multiple(t *testing.T) {\n\tdata := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-04.tsm1\",\n\t\t\tSize: 251 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-04.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-04.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"04-04.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"05-03.tsm1\",\n\t\t\tSize: 2 * 1024 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"06-04.tsm1\",\n\t\t\tSize: 2 * 1024 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"07-04.tsm1\",\n\t\t\tSize: 2 * 1024 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"08-04.tsm1\",\n\t\t\tSize: 2 * 1024 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"09-04.tsm1\",\n\t\t\tSize: 2 * 1024 * 1024 * 1024,\n\t\t},\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn data\n\t\t\t},\n\t\t}, tsdb.DefaultCompactFullWriteColdDuration,\n\t)\n\n\texpFiles1 := []tsm1.FileStat{data[0], data[1], data[2], data[3]}\n\texpFiles2 := []tsm1.FileStat{data[5], data[6], data[7], data[8]}\n\n\ttsm := cp.PlanOptimize()\n\tif exp, got := 2, len(tsm); exp != got {\n\t\tt.Fatalf(\"group length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif exp, got := len(expFiles1), len(tsm[0]); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, p := range expFiles1 {\n\t\tif got, exp := tsm[0][i], p.Path; got != exp {\n\t\t\tt.Fatalf(\"tsm file mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t}\n\n\tif exp, got := len(expFiles2), len(tsm[1]); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, p := range expFiles2 {\n\t\tif got, exp := tsm[1][i], p.Path; got != exp {\n\t\t\tt.Fatalf(\"tsm file mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t}\n}\n\nfunc TestDefaultPlanner_PlanOptimize_Optimized(t *testing.T) {\n\tdata := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-03.tsm1\",\n\t\t\tSize: 251 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-04.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-05.tsm1\",\n\t\t\tSize: 2 * 1024 * 1024 * 1024,\n\t\t},\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn data\n\t\t\t},\n\t\t}, tsdb.DefaultCompactFullWriteColdDuration,\n\t)\n\n\texpFiles := []tsm1.FileStat{}\n\ttsm := cp.PlanOptimize()\n\tif exp, got := len(expFiles), len(tsm); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestDefaultPlanner_PlanOptimize_Tombstones(t *testing.T) {\n\tdata := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-04.tsm1\",\n\t\t\tSize: 251 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath:         \"01-05.tsm1\",\n\t\t\tSize:         1 * 1024 * 1024,\n\t\t\tHasTombstone: true,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-06.tsm1\",\n\t\t\tSize: 2 * 1024 * 1024 * 1024,\n\t\t},\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn data\n\t\t\t},\n\t\t}, tsdb.DefaultCompactFullWriteColdDuration,\n\t)\n\n\texpFiles := []tsm1.FileStat{data[0], data[1], data[2]}\n\ttsm := cp.PlanOptimize()\n\tif exp, got := len(expFiles), len(tsm[0]); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, p := range expFiles {\n\t\tif got, exp := tsm[0][i], p.Path; got != exp {\n\t\t\tt.Fatalf(\"tsm file mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t}\n\n}\n\n// Ensure that the planner will compact all files if no writes\n// have happened in some interval\nfunc TestDefaultPlanner_Plan_FullOnCold(t *testing.T) {\n\tdata := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-01.tsm1\",\n\t\t\tSize: 513 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-02.tsm1\",\n\t\t\tSize: 129 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-02.tsm1\",\n\t\t\tSize: 33 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"04-02.tsm1\",\n\t\t\tSize: 1 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"05-02.tsm1\",\n\t\t\tSize: 10 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"06-01.tsm1\",\n\t\t\tSize: 2 * 1024 * 1024,\n\t\t},\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn data\n\t\t\t},\n\t\t},\n\t\ttime.Nanosecond,\n\t)\n\n\ttsm := cp.Plan(time.Now().Add(-time.Second))\n\tif exp, got := len(data), len(tsm[0]); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, p := range data {\n\t\tif got, exp := tsm[0][i], p.Path; got != exp {\n\t\t\tt.Fatalf(\"tsm file mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t}\n}\n\n// Ensure that the planner will not return files that are over the max\n// allowable size\nfunc TestDefaultPlanner_Plan_SkipMaxSizeFiles(t *testing.T) {\n\tdata := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-01.tsm1\",\n\t\t\tSize: 2049 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-02.tsm1\",\n\t\t\tSize: 2049 * 1024 * 1024,\n\t\t},\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn data\n\t\t\t},\n\t\t}, tsdb.DefaultCompactFullWriteColdDuration,\n\t)\n\n\ttsm := cp.Plan(time.Now())\n\tif exp, got := 0, len(tsm); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\n// Ensure that the planner will not return files that are over the max\n// allowable size\nfunc TestDefaultPlanner_Plan_SkipPlanningAfterFull(t *testing.T) {\n\ttestSet := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-05.tsm1\",\n\t\t\tSize: 256 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-05.tsm1\",\n\t\t\tSize: 256 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-05.tsm1\",\n\t\t\tSize: 256 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"04-04.tsm1\",\n\t\t\tSize: 256 * 1024 * 1024,\n\t\t},\n\t}\n\n\tfs := &fakeFileStore{\n\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\treturn testSet\n\t\t},\n\t\tblockCount: 1000,\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(fs, time.Nanosecond)\n\tplan := cp.Plan(time.Now().Add(-time.Second))\n\t// first verify that our test set would return files\n\tif exp, got := 4, len(plan[0]); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n\tcp.Release(plan)\n\n\t// skip planning if all files are over the limit\n\tover := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-05.tsm1\",\n\t\t\tSize: 2049 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-05.tsm1\",\n\t\t\tSize: 2049 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-05.tsm1\",\n\t\t\tSize: 2049 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"04-05.tsm1\",\n\t\t\tSize: 2049 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"05-05.tsm1\",\n\t\t\tSize: 2049 * 1024 * 1024,\n\t\t},\n\t}\n\n\toverFs := &fakeFileStore{\n\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\treturn over\n\t\t},\n\t\tblockCount: 1000,\n\t}\n\n\tcp.FileStore = overFs\n\tplan = cp.Plan(time.Now().Add(-time.Second))\n\tif exp, got := 0, len(plan); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n\tcp.Release(plan)\n\n\tplan = cp.PlanOptimize()\n\t// ensure the optimize planner would pick this up\n\tif exp, got := 1, len(plan); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n\tcp.Release(plan)\n\n\tcp.FileStore = fs\n\t// ensure that it will plan if last modified has changed\n\tfs.lastModified = time.Now()\n\n\tif exp, got := 4, len(cp.Plan(time.Now())[0]); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\n// Tests that 2 generations, each over 2 GB and the second in level 2 does\n// not return just the first generation.  This was a case where full planning\n// would get repeatedly plan the same files and never stop.\nfunc TestDefaultPlanner_Plan_TwoGenLevel3(t *testing.T) {\n\tdata := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"000002245-000001666.tsm\",\n\t\t\tSize: 2049 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"000002245-000001667.tsm\",\n\t\t\tSize: 2049 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"000002245-000001668.tsm\",\n\t\t\tSize: 2049 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"000002245-000001669.tsm\",\n\t\t\tSize: 2049 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"000002245-000001670.tsm\",\n\t\t\tSize: 2049 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"000002245-000001671.tsm\",\n\t\t\tSize: 2049 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"000002245-000001672.tsm\",\n\t\t\tSize: 2049 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"000002245-000001673.tsm\",\n\t\t\tSize: 192631258,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"000002246-000000002.tsm\",\n\t\t\tSize: 2049 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"000002246-000000003.tsm\",\n\t\t\tSize: 192631258,\n\t\t},\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tblockCount: 1000,\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn data\n\t\t\t},\n\t\t},\n\t\ttime.Hour)\n\n\ttsm := cp.Plan(time.Now().Add(-24 * time.Hour))\n\tif exp, got := 1, len(tsm); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\n// Ensure that the planner will return files over the max file\n// size, but do not contain full blocks\nfunc TestDefaultPlanner_Plan_NotFullOverMaxsize(t *testing.T) {\n\ttestSet := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-05.tsm1\",\n\t\t\tSize: 256 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-05.tsm1\",\n\t\t\tSize: 256 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-05.tsm1\",\n\t\t\tSize: 256 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"04-04.tsm1\",\n\t\t\tSize: 256 * 1024 * 1024,\n\t\t},\n\t}\n\n\tfs := &fakeFileStore{\n\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\treturn testSet\n\t\t},\n\t\tblockCount: 100,\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(\n\t\tfs,\n\t\ttime.Nanosecond,\n\t)\n\n\tplan := cp.Plan(time.Now().Add(-time.Second))\n\t// first verify that our test set would return files\n\tif exp, got := 4, len(plan[0]); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n\tcp.Release(plan)\n\n\t// skip planning if all files are over the limit\n\tover := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-05.tsm1\",\n\t\t\tSize: 2049 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-05.tsm1\",\n\t\t\tSize: 2049 * 1024 * 1024,\n\t\t},\n\t}\n\n\toverFs := &fakeFileStore{\n\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\treturn over\n\t\t},\n\t\tblockCount: 100,\n\t}\n\n\tcp.FileStore = overFs\n\tif exp, got := 1, len(cp.Plan(time.Now().Add(-time.Second))); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\n// Ensure that the planner will compact files that are past the smallest step\n// size even if there is a single file in the smaller step size\nfunc TestDefaultPlanner_Plan_CompactsMiddleSteps(t *testing.T) {\n\tdata := []tsm1.FileStat{\n\t\ttsm1.FileStat{\n\t\t\tPath: \"01-04.tsm1\",\n\t\t\tSize: 64 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"02-04.tsm1\",\n\t\t\tSize: 64 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"03-04.tsm1\",\n\t\t\tSize: 64 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"04-04.tsm1\",\n\t\t\tSize: 64 * 1024 * 1024,\n\t\t},\n\t\ttsm1.FileStat{\n\t\t\tPath: \"05-02.tsm1\",\n\t\t\tSize: 2 * 1024 * 1024,\n\t\t},\n\t}\n\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn data\n\t\t\t},\n\t\t}, tsdb.DefaultCompactFullWriteColdDuration,\n\t)\n\n\texpFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3]}\n\ttsm := cp.Plan(time.Now())\n\tif exp, got := len(expFiles), len(tsm[0]); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, p := range expFiles {\n\t\tif got, exp := tsm[0][i], p.Path; got != exp {\n\t\t\tt.Fatalf(\"tsm file mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t}\n}\n\nfunc TestDefaultPlanner_Plan_LargeSets(t *testing.T) {\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn []tsm1.FileStat{\n\t\t\t\t\ttsm1.FileStat{\n\t\t\t\t\t\tPath: \"000000278-000000006.tsm\",\n\t\t\t\t\t\tSize: 2148340232,\n\t\t\t\t\t},\n\t\t\t\t\ttsm1.FileStat{\n\t\t\t\t\t\tPath: \"000000278-000000007.tsm\",\n\t\t\t\t\t\tSize: 2148356556,\n\t\t\t\t\t},\n\t\t\t\t\ttsm1.FileStat{\n\t\t\t\t\t\tPath: \"000000278-000000008.tsm\",\n\t\t\t\t\t\tSize: 167780181,\n\t\t\t\t\t},\n\t\t\t\t\ttsm1.FileStat{\n\t\t\t\t\t\tPath: \"000000446-000047040.tsm\",\n\t\t\t\t\t\tSize: 2148728539,\n\t\t\t\t\t},\n\t\t\t\t\ttsm1.FileStat{\n\t\t\t\t\t\tPath: \"000000446-000047041.tsm\",\n\t\t\t\t\t\tSize: 701863692,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t}, tsdb.DefaultCompactFullWriteColdDuration,\n\t)\n\n\ttsm := cp.Plan(time.Now())\n\tif exp, got := 0, len(tsm); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestDefaultPlanner_Plan_LargeGeneration(t *testing.T) {\n\tcp := tsm1.NewDefaultPlanner(\n\t\t&fakeFileStore{\n\t\t\tPathsFn: func() []tsm1.FileStat {\n\t\t\t\treturn []tsm1.FileStat{\n\t\t\t\t\ttsm1.FileStat{\n\t\t\t\t\t\tPath: \"000000278-000000006.tsm\",\n\t\t\t\t\t\tSize: 2148340232,\n\t\t\t\t\t},\n\t\t\t\t\ttsm1.FileStat{\n\t\t\t\t\t\tPath: \"000000278-000000007.tsm\",\n\t\t\t\t\t\tSize: 2148356556,\n\t\t\t\t\t},\n\t\t\t\t\ttsm1.FileStat{\n\t\t\t\t\t\tPath: \"000000278-000000008.tsm\",\n\t\t\t\t\t\tSize: 167780181,\n\t\t\t\t\t},\n\t\t\t\t\ttsm1.FileStat{\n\t\t\t\t\t\tPath: \"000000278-000047040.tsm\",\n\t\t\t\t\t\tSize: 2148728539,\n\t\t\t\t\t},\n\t\t\t\t\ttsm1.FileStat{\n\t\t\t\t\t\tPath: \"000000278-000047041.tsm\",\n\t\t\t\t\t\tSize: 701863692,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t}, tsdb.DefaultCompactFullWriteColdDuration,\n\t)\n\n\ttsm := cp.Plan(time.Now())\n\tif exp, got := 0, len(tsm); got != exp {\n\t\tt.Fatalf(\"tsm file length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc assertValueEqual(t *testing.T, a, b tsm1.Value) {\n\tif got, exp := a.UnixNano(), b.UnixNano(); got != exp {\n\t\tt.Fatalf(\"time mismatch: got %v, exp %v\", got, exp)\n\t}\n\tif got, exp := a.Value(), b.Value(); got != exp {\n\t\tt.Fatalf(\"value mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc MustTSMWriter(dir string, gen int) (tsm1.TSMWriter, string) {\n\tf := MustTempFile(dir)\n\toldName := f.Name()\n\n\t// Windows can't rename a file while it's open.  Close first, rename and\n\t// then re-open\n\tif err := f.Close(); err != nil {\n\t\tpanic(fmt.Sprintf(\"close temp file: %v\", err))\n\t}\n\n\tnewName := filepath.Join(filepath.Dir(oldName), tsmFileName(gen))\n\tif err := os.Rename(oldName, newName); err != nil {\n\t\tpanic(fmt.Sprintf(\"create tsm file: %v\", err))\n\t}\n\n\tvar err error\n\tf, err = os.OpenFile(newName, os.O_RDWR, 0666)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"open tsm files: %v\", err))\n\t}\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"create TSM writer: %v\", err))\n\t}\n\n\treturn w, newName\n}\n\nfunc MustWriteTSM(dir string, gen int, values map[string][]tsm1.Value) string {\n\tw, name := MustTSMWriter(dir, gen)\n\n\tfor k, v := range values {\n\t\tif err := w.Write(k, v); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"write TSM value: %v\", err))\n\t\t}\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tpanic(fmt.Sprintf(\"write TSM index: %v\", err))\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tpanic(fmt.Sprintf(\"write TSM close: %v\", err))\n\t}\n\n\treturn name\n}\n\nfunc MustTSMReader(dir string, gen int, values map[string][]tsm1.Value) *tsm1.TSMReader {\n\treturn MustOpenTSMReader(MustWriteTSM(dir, gen, values))\n}\n\nfunc MustOpenTSMReader(name string) *tsm1.TSMReader {\n\tf, err := os.Open(name)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"open file: %v\", err))\n\t}\n\n\tr, err := tsm1.NewTSMReader(f)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"new reader: %v\", err))\n\t}\n\treturn r\n}\n\ntype fakeFileStore struct {\n\tPathsFn      func() []tsm1.FileStat\n\tlastModified time.Time\n\tblockCount   int\n}\n\nfunc (w *fakeFileStore) Stats() []tsm1.FileStat {\n\treturn w.PathsFn()\n}\n\nfunc (w *fakeFileStore) NextGeneration() int {\n\treturn 1\n}\n\nfunc (w *fakeFileStore) LastModified() time.Time {\n\treturn w.lastModified\n}\n\nfunc (w *fakeFileStore) BlockCount(path string, idx int) int {\n\treturn w.blockCount\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cursor.go",
    "content": "package tsm1\n\nimport (\n\t\"math\"\n\n\t\"github.com/influxdata/influxdb/tsdb\"\n)\n\n// multieFieldCursor wraps cursors for multiple fields on the same series\n// key. Instead of returning a plain interface value in the call for Next(),\n// it returns a map[string]interface{} for the field values\ntype multiFieldCursor struct {\n\tfields      []string\n\tcursors     []tsdb.Cursor\n\tascending   bool\n\tkeyBuffer   []int64\n\tvalueBuffer []interface{}\n}\n\n// NewMultiFieldCursor returns an instance of Cursor that joins the results of cursors.\nfunc NewMultiFieldCursor(fields []string, cursors []tsdb.Cursor, ascending bool) tsdb.Cursor {\n\treturn &multiFieldCursor{\n\t\tfields:      fields,\n\t\tcursors:     cursors,\n\t\tascending:   ascending,\n\t\tkeyBuffer:   make([]int64, len(cursors)),\n\t\tvalueBuffer: make([]interface{}, len(cursors)),\n\t}\n}\n\nfunc (m *multiFieldCursor) SeekTo(seek int64) (key int64, value interface{}) {\n\tfor i, c := range m.cursors {\n\t\tm.keyBuffer[i], m.valueBuffer[i] = c.SeekTo(seek)\n\t}\n\treturn m.read()\n}\n\nfunc (m *multiFieldCursor) Next() (int64, interface{}) {\n\treturn m.read()\n}\n\nfunc (m *multiFieldCursor) Ascending() bool {\n\treturn m.ascending\n}\n\nfunc (m *multiFieldCursor) read() (int64, interface{}) {\n\tt := int64(math.MaxInt64)\n\tif !m.ascending {\n\t\tt = int64(math.MinInt64)\n\t}\n\n\t// find the time we need to combine all fields\n\tfor _, k := range m.keyBuffer {\n\t\tif k == tsdb.EOF {\n\t\t\tcontinue\n\t\t}\n\t\tif m.ascending && t > k {\n\t\t\tt = k\n\t\t} else if !m.ascending && t < k {\n\t\t\tt = k\n\t\t}\n\t}\n\n\t// get the value and advance each of the cursors that have the matching time\n\tif t == math.MinInt64 || t == math.MaxInt64 {\n\t\treturn tsdb.EOF, nil\n\t}\n\n\tmm := make(map[string]interface{})\n\tfor i, k := range m.keyBuffer {\n\t\tif k == t {\n\t\t\tmm[m.fields[i]] = m.valueBuffer[i]\n\t\t\tm.keyBuffer[i], m.valueBuffer[i] = m.cursors[i].Next()\n\t\t}\n\t}\n\treturn t, mm\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/encoding.gen.go",
    "content": "// Generated by tmpl\n// https://github.com/benbjohnson/tmpl\n//\n// DO NOT EDIT!\n// Source: encoding.gen.go.tmpl\n\npackage tsm1\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n)\n\n// Values represents a slice of  values.\ntype Values []Value\n\nfunc (a Values) MinTime() int64 {\n\treturn a[0].UnixNano()\n}\n\nfunc (a Values) MaxTime() int64 {\n\treturn a[len(a)-1].UnixNano()\n}\n\nfunc (a Values) Size() int {\n\tsz := 0\n\tfor _, v := range a {\n\t\tsz += v.Size()\n\t}\n\treturn sz\n}\n\nfunc (a Values) ordered() bool {\n\tif len(a) <= 1 {\n\t\treturn true\n\t}\n\tfor i := 1; i < len(a); i++ {\n\t\tif av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (a Values) assertOrdered() {\n\tif len(a) <= 1 {\n\t\treturn\n\t}\n\tfor i := 1; i < len(a); i++ {\n\t\tif av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab {\n\t\t\tpanic(fmt.Sprintf(\"not ordered: %d %d >= %d\", i, av, ab))\n\t\t}\n\t}\n}\n\n// Deduplicate returns a new slice with any values that have the same timestamp removed.\n// The Value that appears last in the slice is the one that is kept.\nfunc (a Values) Deduplicate() Values {\n\tif len(a) == 0 {\n\t\treturn a\n\t}\n\n\t// See if we're already sorted and deduped\n\tvar needSort bool\n\tfor i := 1; i < len(a); i++ {\n\t\tif a[i-1].UnixNano() >= a[i].UnixNano() {\n\t\t\tneedSort = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !needSort {\n\t\treturn a\n\t}\n\n\tsort.Stable(a)\n\tvar i int\n\tfor j := 1; j < len(a); j++ {\n\t\tv := a[j]\n\t\tif v.UnixNano() != a[i].UnixNano() {\n\t\t\ti++\n\t\t}\n\t\ta[i] = v\n\n\t}\n\treturn a[:i+1]\n}\n\n//  Exclude returns the subset of values not in [min, max]\nfunc (a Values) Exclude(min, max int64) Values {\n\tvar i int\n\tfor j := 0; j < len(a); j++ {\n\t\tif a[j].UnixNano() >= min && a[j].UnixNano() <= max {\n\t\t\tcontinue\n\t\t}\n\n\t\ta[i] = a[j]\n\t\ti++\n\t}\n\treturn a[:i]\n}\n\n// Include returns the subset values between min and max inclusive.\nfunc (a Values) Include(min, max int64) Values {\n\tvar i int\n\tfor j := 0; j < len(a); j++ {\n\t\tif a[j].UnixNano() < min || a[j].UnixNano() > max {\n\t\t\tcontinue\n\t\t}\n\n\t\ta[i] = a[j]\n\t\ti++\n\t}\n\treturn a[:i]\n}\n\n// Merge overlays b to top of a.  If two values conflict with\n// the same timestamp, b is used.  Both a and b must be sorted\n// in ascending order.\nfunc (a Values) Merge(b Values) Values {\n\tif len(a) == 0 {\n\t\treturn b\n\t}\n\n\tif len(b) == 0 {\n\t\treturn a\n\t}\n\n\t// Normally, both a and b should not contain duplicates.  Due to a bug in older versions, it's\n\t// possible stored blocks might contain duplicate values.  Remove them if they exists before\n\t// merging.\n\ta = a.Deduplicate()\n\tb = b.Deduplicate()\n\n\tif a[len(a)-1].UnixNano() < b[0].UnixNano() {\n\t\treturn append(a, b...)\n\t}\n\n\tif b[len(b)-1].UnixNano() < a[0].UnixNano() {\n\t\treturn append(b, a...)\n\t}\n\n\tout := make(Values, 0, len(a)+len(b))\n\tfor len(a) > 0 && len(b) > 0 {\n\t\tif a[0].UnixNano() < b[0].UnixNano() {\n\t\t\tout, a = append(out, a[0]), a[1:]\n\t\t} else if len(b) > 0 && a[0].UnixNano() == b[0].UnixNano() {\n\t\t\ta = a[1:]\n\t\t} else {\n\t\t\tout, b = append(out, b[0]), b[1:]\n\t\t}\n\t}\n\tif len(a) > 0 {\n\t\treturn append(out, a...)\n\t}\n\treturn append(out, b...)\n}\n\n// Sort methods\nfunc (a Values) Len() int           { return len(a) }\nfunc (a Values) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a Values) Less(i, j int) bool { return a[i].UnixNano() < a[j].UnixNano() }\n\n// FloatValues represents a slice of Float values.\ntype FloatValues []FloatValue\n\nfunc (a FloatValues) MinTime() int64 {\n\treturn a[0].UnixNano()\n}\n\nfunc (a FloatValues) MaxTime() int64 {\n\treturn a[len(a)-1].UnixNano()\n}\n\nfunc (a FloatValues) Size() int {\n\tsz := 0\n\tfor _, v := range a {\n\t\tsz += v.Size()\n\t}\n\treturn sz\n}\n\nfunc (a FloatValues) ordered() bool {\n\tif len(a) <= 1 {\n\t\treturn true\n\t}\n\tfor i := 1; i < len(a); i++ {\n\t\tif av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (a FloatValues) assertOrdered() {\n\tif len(a) <= 1 {\n\t\treturn\n\t}\n\tfor i := 1; i < len(a); i++ {\n\t\tif av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab {\n\t\t\tpanic(fmt.Sprintf(\"not ordered: %d %d >= %d\", i, av, ab))\n\t\t}\n\t}\n}\n\n// Deduplicate returns a new slice with any values that have the same timestamp removed.\n// The Value that appears last in the slice is the one that is kept.\nfunc (a FloatValues) Deduplicate() FloatValues {\n\tif len(a) == 0 {\n\t\treturn a\n\t}\n\n\t// See if we're already sorted and deduped\n\tvar needSort bool\n\tfor i := 1; i < len(a); i++ {\n\t\tif a[i-1].UnixNano() >= a[i].UnixNano() {\n\t\t\tneedSort = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !needSort {\n\t\treturn a\n\t}\n\n\tsort.Stable(a)\n\tvar i int\n\tfor j := 1; j < len(a); j++ {\n\t\tv := a[j]\n\t\tif v.UnixNano() != a[i].UnixNano() {\n\t\t\ti++\n\t\t}\n\t\ta[i] = v\n\n\t}\n\treturn a[:i+1]\n}\n\n//  Exclude returns the subset of values not in [min, max]\nfunc (a FloatValues) Exclude(min, max int64) FloatValues {\n\tvar i int\n\tfor j := 0; j < len(a); j++ {\n\t\tif a[j].UnixNano() >= min && a[j].UnixNano() <= max {\n\t\t\tcontinue\n\t\t}\n\n\t\ta[i] = a[j]\n\t\ti++\n\t}\n\treturn a[:i]\n}\n\n// Include returns the subset values between min and max inclusive.\nfunc (a FloatValues) Include(min, max int64) FloatValues {\n\tvar i int\n\tfor j := 0; j < len(a); j++ {\n\t\tif a[j].UnixNano() < min || a[j].UnixNano() > max {\n\t\t\tcontinue\n\t\t}\n\n\t\ta[i] = a[j]\n\t\ti++\n\t}\n\treturn a[:i]\n}\n\n// Merge overlays b to top of a.  If two values conflict with\n// the same timestamp, b is used.  Both a and b must be sorted\n// in ascending order.\nfunc (a FloatValues) Merge(b FloatValues) FloatValues {\n\tif len(a) == 0 {\n\t\treturn b\n\t}\n\n\tif len(b) == 0 {\n\t\treturn a\n\t}\n\n\t// Normally, both a and b should not contain duplicates.  Due to a bug in older versions, it's\n\t// possible stored blocks might contain duplicate values.  Remove them if they exists before\n\t// merging.\n\ta = a.Deduplicate()\n\tb = b.Deduplicate()\n\n\tif a[len(a)-1].UnixNano() < b[0].UnixNano() {\n\t\treturn append(a, b...)\n\t}\n\n\tif b[len(b)-1].UnixNano() < a[0].UnixNano() {\n\t\treturn append(b, a...)\n\t}\n\n\tout := make(FloatValues, 0, len(a)+len(b))\n\tfor len(a) > 0 && len(b) > 0 {\n\t\tif a[0].UnixNano() < b[0].UnixNano() {\n\t\t\tout, a = append(out, a[0]), a[1:]\n\t\t} else if len(b) > 0 && a[0].UnixNano() == b[0].UnixNano() {\n\t\t\ta = a[1:]\n\t\t} else {\n\t\t\tout, b = append(out, b[0]), b[1:]\n\t\t}\n\t}\n\tif len(a) > 0 {\n\t\treturn append(out, a...)\n\t}\n\treturn append(out, b...)\n}\n\nfunc (a FloatValues) Encode(buf []byte) ([]byte, error) {\n\treturn encodeFloatValuesBlock(buf, a)\n}\n\nfunc encodeFloatValuesBlock(buf []byte, values []FloatValue) ([]byte, error) {\n\tif len(values) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tvenc := getFloatEncoder(len(values))\n\ttsenc := getTimeEncoder(len(values))\n\n\tvar b []byte\n\terr := func() error {\n\t\tfor _, v := range values {\n\t\t\ttsenc.Write(v.unixnano)\n\t\t\tvenc.Write(v.value)\n\t\t}\n\t\tvenc.Flush()\n\n\t\t// Encoded timestamp values\n\t\ttb, err := tsenc.Bytes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Encoded values\n\t\tvb, err := venc.Bytes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Prepend the first timestamp of the block in the first 8 bytes and the block\n\t\t// in the next byte, followed by the block\n\t\tb = packBlock(buf, BlockFloat64, tb, vb)\n\n\t\treturn nil\n\t}()\n\n\tputTimeEncoder(tsenc)\n\tputFloatEncoder(venc)\n\n\treturn b, err\n}\n\n// Sort methods\nfunc (a FloatValues) Len() int           { return len(a) }\nfunc (a FloatValues) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a FloatValues) Less(i, j int) bool { return a[i].UnixNano() < a[j].UnixNano() }\n\n// IntegerValues represents a slice of Integer values.\ntype IntegerValues []IntegerValue\n\nfunc (a IntegerValues) MinTime() int64 {\n\treturn a[0].UnixNano()\n}\n\nfunc (a IntegerValues) MaxTime() int64 {\n\treturn a[len(a)-1].UnixNano()\n}\n\nfunc (a IntegerValues) Size() int {\n\tsz := 0\n\tfor _, v := range a {\n\t\tsz += v.Size()\n\t}\n\treturn sz\n}\n\nfunc (a IntegerValues) ordered() bool {\n\tif len(a) <= 1 {\n\t\treturn true\n\t}\n\tfor i := 1; i < len(a); i++ {\n\t\tif av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (a IntegerValues) assertOrdered() {\n\tif len(a) <= 1 {\n\t\treturn\n\t}\n\tfor i := 1; i < len(a); i++ {\n\t\tif av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab {\n\t\t\tpanic(fmt.Sprintf(\"not ordered: %d %d >= %d\", i, av, ab))\n\t\t}\n\t}\n}\n\n// Deduplicate returns a new slice with any values that have the same timestamp removed.\n// The Value that appears last in the slice is the one that is kept.\nfunc (a IntegerValues) Deduplicate() IntegerValues {\n\tif len(a) == 0 {\n\t\treturn a\n\t}\n\n\t// See if we're already sorted and deduped\n\tvar needSort bool\n\tfor i := 1; i < len(a); i++ {\n\t\tif a[i-1].UnixNano() >= a[i].UnixNano() {\n\t\t\tneedSort = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !needSort {\n\t\treturn a\n\t}\n\n\tsort.Stable(a)\n\tvar i int\n\tfor j := 1; j < len(a); j++ {\n\t\tv := a[j]\n\t\tif v.UnixNano() != a[i].UnixNano() {\n\t\t\ti++\n\t\t}\n\t\ta[i] = v\n\n\t}\n\treturn a[:i+1]\n}\n\n//  Exclude returns the subset of values not in [min, max]\nfunc (a IntegerValues) Exclude(min, max int64) IntegerValues {\n\tvar i int\n\tfor j := 0; j < len(a); j++ {\n\t\tif a[j].UnixNano() >= min && a[j].UnixNano() <= max {\n\t\t\tcontinue\n\t\t}\n\n\t\ta[i] = a[j]\n\t\ti++\n\t}\n\treturn a[:i]\n}\n\n// Include returns the subset values between min and max inclusive.\nfunc (a IntegerValues) Include(min, max int64) IntegerValues {\n\tvar i int\n\tfor j := 0; j < len(a); j++ {\n\t\tif a[j].UnixNano() < min || a[j].UnixNano() > max {\n\t\t\tcontinue\n\t\t}\n\n\t\ta[i] = a[j]\n\t\ti++\n\t}\n\treturn a[:i]\n}\n\n// Merge overlays b to top of a.  If two values conflict with\n// the same timestamp, b is used.  Both a and b must be sorted\n// in ascending order.\nfunc (a IntegerValues) Merge(b IntegerValues) IntegerValues {\n\tif len(a) == 0 {\n\t\treturn b\n\t}\n\n\tif len(b) == 0 {\n\t\treturn a\n\t}\n\n\t// Normally, both a and b should not contain duplicates.  Due to a bug in older versions, it's\n\t// possible stored blocks might contain duplicate values.  Remove them if they exists before\n\t// merging.\n\ta = a.Deduplicate()\n\tb = b.Deduplicate()\n\n\tif a[len(a)-1].UnixNano() < b[0].UnixNano() {\n\t\treturn append(a, b...)\n\t}\n\n\tif b[len(b)-1].UnixNano() < a[0].UnixNano() {\n\t\treturn append(b, a...)\n\t}\n\n\tout := make(IntegerValues, 0, len(a)+len(b))\n\tfor len(a) > 0 && len(b) > 0 {\n\t\tif a[0].UnixNano() < b[0].UnixNano() {\n\t\t\tout, a = append(out, a[0]), a[1:]\n\t\t} else if len(b) > 0 && a[0].UnixNano() == b[0].UnixNano() {\n\t\t\ta = a[1:]\n\t\t} else {\n\t\t\tout, b = append(out, b[0]), b[1:]\n\t\t}\n\t}\n\tif len(a) > 0 {\n\t\treturn append(out, a...)\n\t}\n\treturn append(out, b...)\n}\n\nfunc (a IntegerValues) Encode(buf []byte) ([]byte, error) {\n\treturn encodeIntegerValuesBlock(buf, a)\n}\n\nfunc encodeIntegerValuesBlock(buf []byte, values []IntegerValue) ([]byte, error) {\n\tif len(values) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tvenc := getIntegerEncoder(len(values))\n\ttsenc := getTimeEncoder(len(values))\n\n\tvar b []byte\n\terr := func() error {\n\t\tfor _, v := range values {\n\t\t\ttsenc.Write(v.unixnano)\n\t\t\tvenc.Write(v.value)\n\t\t}\n\t\tvenc.Flush()\n\n\t\t// Encoded timestamp values\n\t\ttb, err := tsenc.Bytes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Encoded values\n\t\tvb, err := venc.Bytes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Prepend the first timestamp of the block in the first 8 bytes and the block\n\t\t// in the next byte, followed by the block\n\t\tb = packBlock(buf, BlockInteger, tb, vb)\n\n\t\treturn nil\n\t}()\n\n\tputTimeEncoder(tsenc)\n\tputIntegerEncoder(venc)\n\n\treturn b, err\n}\n\n// Sort methods\nfunc (a IntegerValues) Len() int           { return len(a) }\nfunc (a IntegerValues) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a IntegerValues) Less(i, j int) bool { return a[i].UnixNano() < a[j].UnixNano() }\n\n// StringValues represents a slice of String values.\ntype StringValues []StringValue\n\nfunc (a StringValues) MinTime() int64 {\n\treturn a[0].UnixNano()\n}\n\nfunc (a StringValues) MaxTime() int64 {\n\treturn a[len(a)-1].UnixNano()\n}\n\nfunc (a StringValues) Size() int {\n\tsz := 0\n\tfor _, v := range a {\n\t\tsz += v.Size()\n\t}\n\treturn sz\n}\n\nfunc (a StringValues) ordered() bool {\n\tif len(a) <= 1 {\n\t\treturn true\n\t}\n\tfor i := 1; i < len(a); i++ {\n\t\tif av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (a StringValues) assertOrdered() {\n\tif len(a) <= 1 {\n\t\treturn\n\t}\n\tfor i := 1; i < len(a); i++ {\n\t\tif av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab {\n\t\t\tpanic(fmt.Sprintf(\"not ordered: %d %d >= %d\", i, av, ab))\n\t\t}\n\t}\n}\n\n// Deduplicate returns a new slice with any values that have the same timestamp removed.\n// The Value that appears last in the slice is the one that is kept.\nfunc (a StringValues) Deduplicate() StringValues {\n\tif len(a) == 0 {\n\t\treturn a\n\t}\n\n\t// See if we're already sorted and deduped\n\tvar needSort bool\n\tfor i := 1; i < len(a); i++ {\n\t\tif a[i-1].UnixNano() >= a[i].UnixNano() {\n\t\t\tneedSort = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !needSort {\n\t\treturn a\n\t}\n\n\tsort.Stable(a)\n\tvar i int\n\tfor j := 1; j < len(a); j++ {\n\t\tv := a[j]\n\t\tif v.UnixNano() != a[i].UnixNano() {\n\t\t\ti++\n\t\t}\n\t\ta[i] = v\n\n\t}\n\treturn a[:i+1]\n}\n\n//  Exclude returns the subset of values not in [min, max]\nfunc (a StringValues) Exclude(min, max int64) StringValues {\n\tvar i int\n\tfor j := 0; j < len(a); j++ {\n\t\tif a[j].UnixNano() >= min && a[j].UnixNano() <= max {\n\t\t\tcontinue\n\t\t}\n\n\t\ta[i] = a[j]\n\t\ti++\n\t}\n\treturn a[:i]\n}\n\n// Include returns the subset values between min and max inclusive.\nfunc (a StringValues) Include(min, max int64) StringValues {\n\tvar i int\n\tfor j := 0; j < len(a); j++ {\n\t\tif a[j].UnixNano() < min || a[j].UnixNano() > max {\n\t\t\tcontinue\n\t\t}\n\n\t\ta[i] = a[j]\n\t\ti++\n\t}\n\treturn a[:i]\n}\n\n// Merge overlays b to top of a.  If two values conflict with\n// the same timestamp, b is used.  Both a and b must be sorted\n// in ascending order.\nfunc (a StringValues) Merge(b StringValues) StringValues {\n\tif len(a) == 0 {\n\t\treturn b\n\t}\n\n\tif len(b) == 0 {\n\t\treturn a\n\t}\n\n\t// Normally, both a and b should not contain duplicates.  Due to a bug in older versions, it's\n\t// possible stored blocks might contain duplicate values.  Remove them if they exists before\n\t// merging.\n\ta = a.Deduplicate()\n\tb = b.Deduplicate()\n\n\tif a[len(a)-1].UnixNano() < b[0].UnixNano() {\n\t\treturn append(a, b...)\n\t}\n\n\tif b[len(b)-1].UnixNano() < a[0].UnixNano() {\n\t\treturn append(b, a...)\n\t}\n\n\tout := make(StringValues, 0, len(a)+len(b))\n\tfor len(a) > 0 && len(b) > 0 {\n\t\tif a[0].UnixNano() < b[0].UnixNano() {\n\t\t\tout, a = append(out, a[0]), a[1:]\n\t\t} else if len(b) > 0 && a[0].UnixNano() == b[0].UnixNano() {\n\t\t\ta = a[1:]\n\t\t} else {\n\t\t\tout, b = append(out, b[0]), b[1:]\n\t\t}\n\t}\n\tif len(a) > 0 {\n\t\treturn append(out, a...)\n\t}\n\treturn append(out, b...)\n}\n\nfunc (a StringValues) Encode(buf []byte) ([]byte, error) {\n\treturn encodeStringValuesBlock(buf, a)\n}\n\nfunc encodeStringValuesBlock(buf []byte, values []StringValue) ([]byte, error) {\n\tif len(values) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tvenc := getStringEncoder(len(values))\n\ttsenc := getTimeEncoder(len(values))\n\n\tvar b []byte\n\terr := func() error {\n\t\tfor _, v := range values {\n\t\t\ttsenc.Write(v.unixnano)\n\t\t\tvenc.Write(v.value)\n\t\t}\n\t\tvenc.Flush()\n\n\t\t// Encoded timestamp values\n\t\ttb, err := tsenc.Bytes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Encoded values\n\t\tvb, err := venc.Bytes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Prepend the first timestamp of the block in the first 8 bytes and the block\n\t\t// in the next byte, followed by the block\n\t\tb = packBlock(buf, BlockString, tb, vb)\n\n\t\treturn nil\n\t}()\n\n\tputTimeEncoder(tsenc)\n\tputStringEncoder(venc)\n\n\treturn b, err\n}\n\n// Sort methods\nfunc (a StringValues) Len() int           { return len(a) }\nfunc (a StringValues) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a StringValues) Less(i, j int) bool { return a[i].UnixNano() < a[j].UnixNano() }\n\n// BooleanValues represents a slice of Boolean values.\ntype BooleanValues []BooleanValue\n\nfunc (a BooleanValues) MinTime() int64 {\n\treturn a[0].UnixNano()\n}\n\nfunc (a BooleanValues) MaxTime() int64 {\n\treturn a[len(a)-1].UnixNano()\n}\n\nfunc (a BooleanValues) Size() int {\n\tsz := 0\n\tfor _, v := range a {\n\t\tsz += v.Size()\n\t}\n\treturn sz\n}\n\nfunc (a BooleanValues) ordered() bool {\n\tif len(a) <= 1 {\n\t\treturn true\n\t}\n\tfor i := 1; i < len(a); i++ {\n\t\tif av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (a BooleanValues) assertOrdered() {\n\tif len(a) <= 1 {\n\t\treturn\n\t}\n\tfor i := 1; i < len(a); i++ {\n\t\tif av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab {\n\t\t\tpanic(fmt.Sprintf(\"not ordered: %d %d >= %d\", i, av, ab))\n\t\t}\n\t}\n}\n\n// Deduplicate returns a new slice with any values that have the same timestamp removed.\n// The Value that appears last in the slice is the one that is kept.\nfunc (a BooleanValues) Deduplicate() BooleanValues {\n\tif len(a) == 0 {\n\t\treturn a\n\t}\n\n\t// See if we're already sorted and deduped\n\tvar needSort bool\n\tfor i := 1; i < len(a); i++ {\n\t\tif a[i-1].UnixNano() >= a[i].UnixNano() {\n\t\t\tneedSort = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !needSort {\n\t\treturn a\n\t}\n\n\tsort.Stable(a)\n\tvar i int\n\tfor j := 1; j < len(a); j++ {\n\t\tv := a[j]\n\t\tif v.UnixNano() != a[i].UnixNano() {\n\t\t\ti++\n\t\t}\n\t\ta[i] = v\n\n\t}\n\treturn a[:i+1]\n}\n\n//  Exclude returns the subset of values not in [min, max]\nfunc (a BooleanValues) Exclude(min, max int64) BooleanValues {\n\tvar i int\n\tfor j := 0; j < len(a); j++ {\n\t\tif a[j].UnixNano() >= min && a[j].UnixNano() <= max {\n\t\t\tcontinue\n\t\t}\n\n\t\ta[i] = a[j]\n\t\ti++\n\t}\n\treturn a[:i]\n}\n\n// Include returns the subset values between min and max inclusive.\nfunc (a BooleanValues) Include(min, max int64) BooleanValues {\n\tvar i int\n\tfor j := 0; j < len(a); j++ {\n\t\tif a[j].UnixNano() < min || a[j].UnixNano() > max {\n\t\t\tcontinue\n\t\t}\n\n\t\ta[i] = a[j]\n\t\ti++\n\t}\n\treturn a[:i]\n}\n\n// Merge overlays b to top of a.  If two values conflict with\n// the same timestamp, b is used.  Both a and b must be sorted\n// in ascending order.\nfunc (a BooleanValues) Merge(b BooleanValues) BooleanValues {\n\tif len(a) == 0 {\n\t\treturn b\n\t}\n\n\tif len(b) == 0 {\n\t\treturn a\n\t}\n\n\t// Normally, both a and b should not contain duplicates.  Due to a bug in older versions, it's\n\t// possible stored blocks might contain duplicate values.  Remove them if they exists before\n\t// merging.\n\ta = a.Deduplicate()\n\tb = b.Deduplicate()\n\n\tif a[len(a)-1].UnixNano() < b[0].UnixNano() {\n\t\treturn append(a, b...)\n\t}\n\n\tif b[len(b)-1].UnixNano() < a[0].UnixNano() {\n\t\treturn append(b, a...)\n\t}\n\n\tout := make(BooleanValues, 0, len(a)+len(b))\n\tfor len(a) > 0 && len(b) > 0 {\n\t\tif a[0].UnixNano() < b[0].UnixNano() {\n\t\t\tout, a = append(out, a[0]), a[1:]\n\t\t} else if len(b) > 0 && a[0].UnixNano() == b[0].UnixNano() {\n\t\t\ta = a[1:]\n\t\t} else {\n\t\t\tout, b = append(out, b[0]), b[1:]\n\t\t}\n\t}\n\tif len(a) > 0 {\n\t\treturn append(out, a...)\n\t}\n\treturn append(out, b...)\n}\n\nfunc (a BooleanValues) Encode(buf []byte) ([]byte, error) {\n\treturn encodeBooleanValuesBlock(buf, a)\n}\n\nfunc encodeBooleanValuesBlock(buf []byte, values []BooleanValue) ([]byte, error) {\n\tif len(values) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tvenc := getBooleanEncoder(len(values))\n\ttsenc := getTimeEncoder(len(values))\n\n\tvar b []byte\n\terr := func() error {\n\t\tfor _, v := range values {\n\t\t\ttsenc.Write(v.unixnano)\n\t\t\tvenc.Write(v.value)\n\t\t}\n\t\tvenc.Flush()\n\n\t\t// Encoded timestamp values\n\t\ttb, err := tsenc.Bytes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Encoded values\n\t\tvb, err := venc.Bytes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Prepend the first timestamp of the block in the first 8 bytes and the block\n\t\t// in the next byte, followed by the block\n\t\tb = packBlock(buf, BlockBoolean, tb, vb)\n\n\t\treturn nil\n\t}()\n\n\tputTimeEncoder(tsenc)\n\tputBooleanEncoder(venc)\n\n\treturn b, err\n}\n\n// Sort methods\nfunc (a BooleanValues) Len() int           { return len(a) }\nfunc (a BooleanValues) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a BooleanValues) Less(i, j int) bool { return a[i].UnixNano() < a[j].UnixNano() }\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/encoding.gen.go.tmpl",
    "content": "package tsm1\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n)\n\n{{range .}}\n\n// {{.Name}}Values represents a slice of {{.Name}} values.\ntype {{.Name}}Values []{{.Name}}Value\n\nfunc (a {{.Name}}Values) MinTime() int64 {\n\treturn a[0].UnixNano()\n}\n\nfunc (a {{.Name}}Values) MaxTime() int64 {\n\treturn a[len(a)-1].UnixNano()\n}\n\nfunc (a {{.Name}}Values) Size() int {\n\tsz := 0\n\tfor _, v := range a {\n\t\tsz += v.Size()\n\t}\n\treturn sz\n}\n\nfunc (a {{.Name}}Values) ordered() bool {\n\tif len(a) <= 1 {\n\t\treturn true\n\t}\n\tfor i := 1; i < len(a); i++ {\n\t\tif av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (a {{.Name}}Values) assertOrdered() {\n\tif len(a) <= 1 {\n\t\treturn\n\t}\n\tfor i := 1; i < len(a); i++ {\n\t\tif av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab {\n\t\t\tpanic(fmt.Sprintf(\"not ordered: %d %d >= %d\", i, av, ab))\n\t\t}\n\t}\n}\n\n\n// Deduplicate returns a new slice with any values that have the same timestamp removed.\n// The Value that appears last in the slice is the one that is kept.\nfunc (a {{.Name}}Values) Deduplicate() {{.Name}}Values {\n\tif len(a) == 0 {\n\t\treturn a\n\t}\n\n\t// See if we're already sorted and deduped\n\tvar needSort bool\n\tfor i := 1; i < len(a); i++ {\n\t\tif a[i-1].UnixNano() >= a[i].UnixNano() {\n\t\t\tneedSort = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !needSort {\n\t\treturn a\n\t}\n\n\tsort.Stable(a)\n\tvar i int\n\tfor j := 1; j < len(a); j++ {\n\t\tv := a[j]\n\t\tif v.UnixNano() != a[i].UnixNano() {\n\t\t\ti++\n\t\t}\n\t\ta[i] = v\n\n\t}\n\treturn a[:i+1]\n}\n\n//  Exclude returns the subset of values not in [min, max]\nfunc (a {{.Name}}Values) Exclude(min, max int64) {{.Name}}Values {\n\tvar i int\n\tfor j := 0; j < len(a); j++ {\n\t\tif a[j].UnixNano() >= min && a[j].UnixNano() <= max {\n\t\t\tcontinue\n\t\t}\n\n\t\ta[i] = a[j]\n\t\ti++\n\t}\n\treturn a[:i]\n}\n\n// Include returns the subset values between min and max inclusive.\nfunc (a {{.Name}}Values) Include(min, max int64) {{.Name}}Values {\n\tvar i int\n\tfor j := 0; j < len(a); j++ {\n\t\tif a[j].UnixNano() < min || a[j].UnixNano() > max {\n\t\t\tcontinue\n\t\t}\n\n\t\ta[i] = a[j]\n\t\ti++\n\t}\n\treturn a[:i]\n}\n\n// Merge overlays b to top of a.  If two values conflict with\n// the same timestamp, b is used.  Both a and b must be sorted\n// in ascending order.\nfunc (a {{.Name}}Values) Merge(b {{.Name}}Values) {{.Name}}Values {\n\tif len(a) == 0 {\n\t\treturn b\n\t}\n\n\tif len(b) == 0 {\n\t\treturn a\n\t}\n\n\t// Normally, both a and b should not contain duplicates.  Due to a bug in older versions, it's\n\t// possible stored blocks might contain duplicate values.  Remove them if they exists before\n\t// merging.\n\ta = a.Deduplicate()\n\tb = b.Deduplicate()\n\n\tif a[len(a)-1].UnixNano() < b[0].UnixNano() {\n\t\treturn append(a, b...)\n\t}\n\n\tif b[len(b)-1].UnixNano() < a[0].UnixNano() {\n\t\treturn append(b, a...)\n\t}\n\n\tout := make({{.Name}}Values, 0, len(a)+len(b))\n\tfor len(a) > 0 && len(b) > 0 {\n\t\tif a[0].UnixNano() < b[0].UnixNano() {\n\t\t\tout, a = append(out, a[0]), a[1:]\n\t\t} else if len(b) > 0 && a[0].UnixNano() == b[0].UnixNano() {\n\t\t\ta = a[1:]\n\t\t} else {\n\t\t\tout, b = append(out, b[0]), b[1:]\n\t\t}\n\t}\n\tif len(a) > 0 {\n\t\treturn append(out, a...)\n\t}\n\treturn append(out, b...)\n}\n\n{{ if ne .Name \"\" }}\nfunc (a {{.Name}}Values) Encode(buf []byte) ([]byte, error) {\n\treturn encode{{.Name}}ValuesBlock(buf, a)\n}\n\nfunc encode{{ .Name }}ValuesBlock(buf []byte, values []{{.Name}}Value) ([]byte, error) {\n\tif len(values) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tvenc := get{{ .Name }}Encoder(len(values))\n\ttsenc := getTimeEncoder(len(values))\n\n\tvar b []byte\n\terr := func() error {\n\t\tfor _, v := range values {\n\t\t\ttsenc.Write(v.unixnano)\n\t\t\tvenc.Write(v.value)\n\t\t}\n\t\tvenc.Flush()\n\n\t\t// Encoded timestamp values\n\t\ttb, err := tsenc.Bytes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Encoded values\n\t\tvb, err := venc.Bytes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Prepend the first timestamp of the block in the first 8 bytes and the block\n\t\t// in the next byte, followed by the block\n\t\tb = packBlock(buf, {{ .Type }}, tb, vb)\n\n\t\treturn nil\n\t}()\n\n\tputTimeEncoder(tsenc)\n\tput{{.Name}}Encoder(venc)\n\n\treturn b, err\n}\n\n{{ end }}\n\n// Sort methods\nfunc (a {{.Name}}Values) Len() int           { return len(a) }\nfunc (a {{.Name}}Values) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a {{.Name}}Values) Less(i, j int) bool { return a[i].UnixNano() < a[j].UnixNano() }\n\n\n{{ end }}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/encoding.gen.go.tmpldata",
    "content": "[\n\t{\n\t\t\"Name\":\"\",\n\t\t\"name\":\"\",\n\t\t\"Type\":\"\"\n\t},\n\t{\n\t\t\"Name\":\"Float\",\n\t\t\"name\":\"float\",\n\t\t\"Type\":\"BlockFloat64\"\n\t},\n\t{\n\t\t\"Name\":\"Integer\",\n\t\t\"name\":\"integer\",\n\t\t\"Type\":\"BlockInteger\"\n\t},\n\t{\n\t\t\"Name\":\"String\",\n\t\t\"name\":\"string\",\n\t\t\"Type\":\"BlockString\"\n\t},\n\t{\n\t\t\"Name\":\"Boolean\",\n\t\t\"name\":\"boolean\",\n\t\t\"Type\":\"BlockBoolean\"\n\t}\n]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/encoding.go",
    "content": "package tsm1\n\nimport (\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/pkg/pool\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n)\n\nconst (\n\t// BlockFloat64 designates a block encodes float64 values.\n\tBlockFloat64 = byte(0)\n\n\t// BlockInteger designates a block encodes int64 values.\n\tBlockInteger = byte(1)\n\n\t// BlockBoolean designates a block encodes boolean values.\n\tBlockBoolean = byte(2)\n\n\t// BlockString designates a block encodes string values.\n\tBlockString = byte(3)\n\n\t// encodedBlockHeaderSize is the size of the header for an encoded block.  There is one\n\t// byte encoding the type of the block.\n\tencodedBlockHeaderSize = 1\n)\n\nfunc init() {\n\t// Prime the pools with one encoder/decoder for each available CPU.\n\tvals := make([]interface{}, 0, runtime.NumCPU())\n\tfor _, p := range []*pool.Generic{\n\t\ttimeEncoderPool, timeDecoderPool,\n\t\tintegerEncoderPool, integerDecoderPool,\n\t\tfloatDecoderPool, floatDecoderPool,\n\t\tstringEncoderPool, stringEncoderPool,\n\t\tbooleanEncoderPool, booleanDecoderPool,\n\t} {\n\t\tvals = vals[:0]\n\t\t// Check one out to force the allocation now and hold onto it\n\t\tfor i := 0; i < runtime.NumCPU(); i++ {\n\t\t\tv := p.Get(tsdb.DefaultMaxPointsPerBlock)\n\t\t\tvals = append(vals, v)\n\t\t}\n\t\t// Add them all back\n\t\tfor _, v := range vals {\n\t\t\tp.Put(v)\n\t\t}\n\t}\n}\n\nvar (\n\t// encoder pools\n\n\ttimeEncoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} {\n\t\treturn NewTimeEncoder(sz)\n\t})\n\tintegerEncoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} {\n\t\treturn NewIntegerEncoder(sz)\n\t})\n\tfloatEncoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} {\n\t\treturn NewFloatEncoder()\n\t})\n\tstringEncoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} {\n\t\treturn NewStringEncoder(sz)\n\t})\n\tbooleanEncoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} {\n\t\treturn NewBooleanEncoder(sz)\n\t})\n\n\t// decoder pools\n\n\ttimeDecoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} {\n\t\treturn &TimeDecoder{}\n\t})\n\tintegerDecoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} {\n\t\treturn &IntegerDecoder{}\n\t})\n\tfloatDecoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} {\n\t\treturn &FloatDecoder{}\n\t})\n\tstringDecoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} {\n\t\treturn &StringDecoder{}\n\t})\n\tbooleanDecoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} {\n\t\treturn &BooleanDecoder{}\n\t})\n)\n\n// Value represents a TSM-encoded value.\ntype Value interface {\n\t// UnixNano returns the timestamp of the value in nanoseconds since unix epoch.\n\tUnixNano() int64\n\n\t// Value returns the underlying value.\n\tValue() interface{}\n\n\t// Size returns the number of bytes necessary to represent the value and its timestamp.\n\tSize() int\n\n\t// String returns the string representation of the value and its timestamp.\n\tString() string\n\n\t// internalOnly is unexported to ensure implementations of Value\n\t// can only originate in this package.\n\tinternalOnly()\n}\n\n// NewValue returns a new Value with the underlying type dependent on value.\nfunc NewValue(t int64, value interface{}) Value {\n\tswitch v := value.(type) {\n\tcase int64:\n\t\treturn IntegerValue{unixnano: t, value: v}\n\tcase float64:\n\t\treturn FloatValue{unixnano: t, value: v}\n\tcase bool:\n\t\treturn BooleanValue{unixnano: t, value: v}\n\tcase string:\n\t\treturn StringValue{unixnano: t, value: v}\n\t}\n\treturn EmptyValue{}\n}\n\n// NewIntegerValue returns a new integer value.\nfunc NewIntegerValue(t int64, v int64) Value {\n\treturn IntegerValue{unixnano: t, value: v}\n}\n\n// NewFloatValue returns a new float value.\nfunc NewFloatValue(t int64, v float64) Value {\n\treturn FloatValue{unixnano: t, value: v}\n}\n\n// NewBooleanValue returns a new boolean value.\nfunc NewBooleanValue(t int64, v bool) Value {\n\treturn BooleanValue{unixnano: t, value: v}\n}\n\n// NewStringValue returns a new string value.\nfunc NewStringValue(t int64, v string) Value {\n\treturn StringValue{unixnano: t, value: v}\n}\n\n// EmptyValue is used when there is no appropriate other value.\ntype EmptyValue struct{}\n\n// UnixNano returns tsdb.EOF.\nfunc (e EmptyValue) UnixNano() int64 { return tsdb.EOF }\n\n// Value returns nil.\nfunc (e EmptyValue) Value() interface{} { return nil }\n\n// Size returns 0.\nfunc (e EmptyValue) Size() int { return 0 }\n\n// String returns the empty string.\nfunc (e EmptyValue) String() string { return \"\" }\n\nfunc (_ EmptyValue) internalOnly()   {}\nfunc (_ StringValue) internalOnly()  {}\nfunc (_ IntegerValue) internalOnly() {}\nfunc (_ BooleanValue) internalOnly() {}\nfunc (_ FloatValue) internalOnly()   {}\n\n// Encode converts the values to a byte slice.  If there are no values,\n// this function panics.\nfunc (a Values) Encode(buf []byte) ([]byte, error) {\n\tif len(a) == 0 {\n\t\tpanic(\"unable to encode block type\")\n\t}\n\n\tswitch a[0].(type) {\n\tcase FloatValue:\n\t\treturn encodeFloatBlock(buf, a)\n\tcase IntegerValue:\n\t\treturn encodeIntegerBlock(buf, a)\n\tcase BooleanValue:\n\t\treturn encodeBooleanBlock(buf, a)\n\tcase StringValue:\n\t\treturn encodeStringBlock(buf, a)\n\t}\n\n\treturn nil, fmt.Errorf(\"unsupported value type %T\", a[0])\n}\n\n// InfluxQLType returns the influxql.DataType the values map to.\nfunc (a Values) InfluxQLType() (influxql.DataType, error) {\n\tif len(a) == 0 {\n\t\treturn influxql.Unknown, fmt.Errorf(\"no values to infer type\")\n\t}\n\n\tswitch a[0].(type) {\n\tcase FloatValue:\n\t\treturn influxql.Float, nil\n\tcase IntegerValue:\n\t\treturn influxql.Integer, nil\n\tcase BooleanValue:\n\t\treturn influxql.Boolean, nil\n\tcase StringValue:\n\t\treturn influxql.String, nil\n\t}\n\n\treturn influxql.Unknown, fmt.Errorf(\"unsupported value type %T\", a[0])\n}\n\n// BlockType returns the type of value encoded in a block or an error\n// if the block type is unknown.\nfunc BlockType(block []byte) (byte, error) {\n\tblockType := block[0]\n\tswitch blockType {\n\tcase BlockFloat64, BlockInteger, BlockBoolean, BlockString:\n\t\treturn blockType, nil\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"unknown block type: %d\", blockType)\n\t}\n}\n\n// BlockCount returns the number of timestamps encoded in block.\nfunc BlockCount(block []byte) int {\n\tif len(block) <= encodedBlockHeaderSize {\n\t\tpanic(fmt.Sprintf(\"count of short block: got %v, exp %v\", len(block), encodedBlockHeaderSize))\n\t}\n\t// first byte is the block type\n\ttb, _, err := unpackBlock(block[1:])\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"BlockCount: error unpacking block: %s\", err.Error()))\n\t}\n\treturn CountTimestamps(tb)\n}\n\n// DecodeBlock takes a byte slice and decodes it into values of the appropriate type\n// based on the block.\nfunc DecodeBlock(block []byte, vals []Value) ([]Value, error) {\n\tif len(block) <= encodedBlockHeaderSize {\n\t\tpanic(fmt.Sprintf(\"decode of short block: got %v, exp %v\", len(block), encodedBlockHeaderSize))\n\t}\n\n\tblockType, err := BlockType(block)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch blockType {\n\tcase BlockFloat64:\n\t\tvar buf []FloatValue\n\t\tdecoded, err := DecodeFloatBlock(block, &buf)\n\t\tif len(vals) < len(decoded) {\n\t\t\tvals = make([]Value, len(decoded))\n\t\t}\n\t\tfor i := range decoded {\n\t\t\tvals[i] = decoded[i]\n\t\t}\n\t\treturn vals[:len(decoded)], err\n\tcase BlockInteger:\n\t\tvar buf []IntegerValue\n\t\tdecoded, err := DecodeIntegerBlock(block, &buf)\n\t\tif len(vals) < len(decoded) {\n\t\t\tvals = make([]Value, len(decoded))\n\t\t}\n\t\tfor i := range decoded {\n\t\t\tvals[i] = decoded[i]\n\t\t}\n\t\treturn vals[:len(decoded)], err\n\n\tcase BlockBoolean:\n\t\tvar buf []BooleanValue\n\t\tdecoded, err := DecodeBooleanBlock(block, &buf)\n\t\tif len(vals) < len(decoded) {\n\t\t\tvals = make([]Value, len(decoded))\n\t\t}\n\t\tfor i := range decoded {\n\t\t\tvals[i] = decoded[i]\n\t\t}\n\t\treturn vals[:len(decoded)], err\n\n\tcase BlockString:\n\t\tvar buf []StringValue\n\t\tdecoded, err := DecodeStringBlock(block, &buf)\n\t\tif len(vals) < len(decoded) {\n\t\t\tvals = make([]Value, len(decoded))\n\t\t}\n\t\tfor i := range decoded {\n\t\t\tvals[i] = decoded[i]\n\t\t}\n\t\treturn vals[:len(decoded)], err\n\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown block type: %d\", blockType))\n\t}\n}\n\n// FloatValue represents a float64 value.\ntype FloatValue struct {\n\tunixnano int64\n\tvalue    float64\n}\n\n// UnixNano returns the timestamp of the value.\nfunc (v FloatValue) UnixNano() int64 {\n\treturn v.unixnano\n}\n\n// Value returns the underlying float64 value.\nfunc (v FloatValue) Value() interface{} {\n\treturn v.value\n}\n\n// Size returns the number of bytes necessary to represent the value and its timestamp.\nfunc (v FloatValue) Size() int {\n\treturn 16\n}\n\n// String returns the string representation of the value and its timestamp.\nfunc (v FloatValue) String() string {\n\treturn fmt.Sprintf(\"%v %v\", time.Unix(0, v.unixnano), v.value)\n}\n\nfunc encodeFloatBlock(buf []byte, values []Value) ([]byte, error) {\n\tif len(values) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// A float block is encoded using different compression strategies\n\t// for timestamps and values.\n\n\t// Encode values using Gorilla float compression\n\tvenc := getFloatEncoder(len(values))\n\n\t// Encode timestamps using an adaptive encoder that uses delta-encoding,\n\t// frame-or-reference and run length encoding.\n\ttsenc := getTimeEncoder(len(values))\n\n\tvar b []byte\n\terr := func() error {\n\t\tfor _, v := range values {\n\t\t\tvv := v.(FloatValue)\n\t\t\ttsenc.Write(vv.unixnano)\n\t\t\tvenc.Write(vv.value)\n\t\t}\n\t\tvenc.Flush()\n\n\t\t// Encoded timestamp values\n\t\ttb, err := tsenc.Bytes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Encoded float values\n\t\tvb, err := venc.Bytes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Prepend the first timestamp of the block in the first 8 bytes and the block\n\t\t// in the next byte, followed by the block\n\t\tb = packBlock(buf, BlockFloat64, tb, vb)\n\n\t\treturn nil\n\t}()\n\n\tputTimeEncoder(tsenc)\n\tputFloatEncoder(venc)\n\n\treturn b, err\n}\n\n// DecodeFloatBlock decodes the float block from the byte slice\n// and appends the float values to a.\nfunc DecodeFloatBlock(block []byte, a *[]FloatValue) ([]FloatValue, error) {\n\t// Block type is the next block, make sure we actually have a float block\n\tblockType := block[0]\n\tif blockType != BlockFloat64 {\n\t\treturn nil, fmt.Errorf(\"invalid block type: exp %d, got %d\", BlockFloat64, blockType)\n\t}\n\tblock = block[1:]\n\n\ttb, vb, err := unpackBlock(block)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttdec := timeDecoderPool.Get(0).(*TimeDecoder)\n\tvdec := floatDecoderPool.Get(0).(*FloatDecoder)\n\n\tvar i int\n\terr = func() error {\n\t\t// Setup our timestamp and value decoders\n\t\ttdec.Init(tb)\n\t\terr = vdec.SetBytes(vb)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Decode both a timestamp and value\n\t\tfor tdec.Next() && vdec.Next() {\n\t\t\tts := tdec.Read()\n\t\t\tv := vdec.Values()\n\t\t\tif i < len(*a) {\n\t\t\t\telem := &(*a)[i]\n\t\t\t\telem.unixnano = ts\n\t\t\t\telem.value = v\n\t\t\t} else {\n\t\t\t\t*a = append(*a, FloatValue{ts, v})\n\t\t\t}\n\t\t\ti++\n\t\t}\n\n\t\t// Did timestamp decoding have an error?\n\t\terr = tdec.Error()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Did float decoding have an error?\n\t\terr = vdec.Error()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}()\n\n\ttimeDecoderPool.Put(tdec)\n\tfloatDecoderPool.Put(vdec)\n\n\treturn (*a)[:i], err\n}\n\n// BooleanValue represents a boolean value.\ntype BooleanValue struct {\n\tunixnano int64\n\tvalue    bool\n}\n\n// Size returns the number of bytes necessary to represent the value and its timestamp.\nfunc (v BooleanValue) Size() int {\n\treturn 9\n}\n\n// UnixNano returns the timestamp of the value in nanoseconds since unix epoch.\nfunc (v BooleanValue) UnixNano() int64 {\n\treturn v.unixnano\n}\n\n// Value returns the underlying boolean value.\nfunc (v BooleanValue) Value() interface{} {\n\treturn v.value\n}\n\n// String returns the string representation of the value and its timestamp.\nfunc (v BooleanValue) String() string {\n\treturn fmt.Sprintf(\"%v %v\", time.Unix(0, v.unixnano), v.Value())\n}\n\nfunc encodeBooleanBlock(buf []byte, values []Value) ([]byte, error) {\n\tif len(values) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// A boolean block is encoded using different compression strategies\n\t// for timestamps and values.\n\tvenc := getBooleanEncoder(len(values))\n\n\t// Encode timestamps using an adaptive encoder\n\ttsenc := getTimeEncoder(len(values))\n\n\tvar b []byte\n\terr := func() error {\n\t\tfor _, v := range values {\n\t\t\tvv := v.(BooleanValue)\n\t\t\ttsenc.Write(vv.unixnano)\n\t\t\tvenc.Write(vv.value)\n\t\t}\n\n\t\t// Encoded timestamp values\n\t\ttb, err := tsenc.Bytes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Encoded float values\n\t\tvb, err := venc.Bytes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Prepend the first timestamp of the block in the first 8 bytes and the block\n\t\t// in the next byte, followed by the block\n\t\tb = packBlock(buf, BlockBoolean, tb, vb)\n\t\treturn nil\n\t}()\n\n\tputTimeEncoder(tsenc)\n\tputBooleanEncoder(venc)\n\n\treturn b, err\n}\n\n// DecodeBooleanBlock decodes the boolean block from the byte slice\n// and appends the boolean values to a.\nfunc DecodeBooleanBlock(block []byte, a *[]BooleanValue) ([]BooleanValue, error) {\n\t// Block type is the next block, make sure we actually have a float block\n\tblockType := block[0]\n\tif blockType != BlockBoolean {\n\t\treturn nil, fmt.Errorf(\"invalid block type: exp %d, got %d\", BlockBoolean, blockType)\n\t}\n\tblock = block[1:]\n\n\ttb, vb, err := unpackBlock(block)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttdec := timeDecoderPool.Get(0).(*TimeDecoder)\n\tvdec := booleanDecoderPool.Get(0).(*BooleanDecoder)\n\n\tvar i int\n\terr = func() error {\n\t\t// Setup our timestamp and value decoders\n\t\ttdec.Init(tb)\n\t\tvdec.SetBytes(vb)\n\n\t\t// Decode both a timestamp and value\n\t\tfor tdec.Next() && vdec.Next() {\n\t\t\tts := tdec.Read()\n\t\t\tv := vdec.Read()\n\t\t\tif i < len(*a) {\n\t\t\t\telem := &(*a)[i]\n\t\t\t\telem.unixnano = ts\n\t\t\t\telem.value = v\n\t\t\t} else {\n\t\t\t\t*a = append(*a, BooleanValue{ts, v})\n\t\t\t}\n\t\t\ti++\n\t\t}\n\n\t\t// Did timestamp decoding have an error?\n\t\terr = tdec.Error()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Did boolean decoding have an error?\n\t\terr = vdec.Error()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}()\n\n\ttimeDecoderPool.Put(tdec)\n\tbooleanDecoderPool.Put(vdec)\n\n\treturn (*a)[:i], err\n}\n\n// FloatValue represents an int64 value.\ntype IntegerValue struct {\n\tunixnano int64\n\tvalue    int64\n}\n\n// Value returns the underlying int64 value.\nfunc (v IntegerValue) Value() interface{} {\n\treturn v.value\n}\n\n// UnixNano returns the timestamp of the value.\nfunc (v IntegerValue) UnixNano() int64 {\n\treturn v.unixnano\n}\n\n// Size returns the number of bytes necessary to represent the value and its timestamp.\nfunc (v IntegerValue) Size() int {\n\treturn 16\n}\n\n// String returns the string representation of the value and its timestamp.\nfunc (v IntegerValue) String() string {\n\treturn fmt.Sprintf(\"%v %v\", time.Unix(0, v.unixnano), v.Value())\n}\n\nfunc encodeIntegerBlock(buf []byte, values []Value) ([]byte, error) {\n\ttsEnc := getTimeEncoder(len(values))\n\tvEnc := getIntegerEncoder(len(values))\n\n\tvar b []byte\n\terr := func() error {\n\t\tfor _, v := range values {\n\t\t\tvv := v.(IntegerValue)\n\t\t\ttsEnc.Write(vv.unixnano)\n\t\t\tvEnc.Write(vv.value)\n\t\t}\n\n\t\t// Encoded timestamp values\n\t\ttb, err := tsEnc.Bytes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Encoded int64 values\n\t\tvb, err := vEnc.Bytes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Prepend the first timestamp of the block in the first 8 bytes\n\t\tb = packBlock(buf, BlockInteger, tb, vb)\n\t\treturn nil\n\t}()\n\n\tputTimeEncoder(tsEnc)\n\tputIntegerEncoder(vEnc)\n\n\treturn b, err\n}\n\n// DecodeIntegerBlock decodes the integer block from the byte slice\n// and appends the integer values to a.\nfunc DecodeIntegerBlock(block []byte, a *[]IntegerValue) ([]IntegerValue, error) {\n\tblockType := block[0]\n\tif blockType != BlockInteger {\n\t\treturn nil, fmt.Errorf(\"invalid block type: exp %d, got %d\", BlockInteger, blockType)\n\t}\n\n\tblock = block[1:]\n\n\t// The first 8 bytes is the minimum timestamp of the block\n\ttb, vb, err := unpackBlock(block)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttdec := timeDecoderPool.Get(0).(*TimeDecoder)\n\tvdec := integerDecoderPool.Get(0).(*IntegerDecoder)\n\n\tvar i int\n\terr = func() error {\n\t\t// Setup our timestamp and value decoders\n\t\ttdec.Init(tb)\n\t\tvdec.SetBytes(vb)\n\n\t\t// Decode both a timestamp and value\n\t\tfor tdec.Next() && vdec.Next() {\n\t\t\tts := tdec.Read()\n\t\t\tv := vdec.Read()\n\t\t\tif i < len(*a) {\n\t\t\t\telem := &(*a)[i]\n\t\t\t\telem.unixnano = ts\n\t\t\t\telem.value = v\n\t\t\t} else {\n\t\t\t\t*a = append(*a, IntegerValue{ts, v})\n\t\t\t}\n\t\t\ti++\n\t\t}\n\n\t\t// Did timestamp decoding have an error?\n\t\terr = tdec.Error()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Did int64 decoding have an error?\n\t\terr = vdec.Error()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}()\n\n\ttimeDecoderPool.Put(tdec)\n\tintegerDecoderPool.Put(vdec)\n\n\treturn (*a)[:i], err\n}\n\n// StringValue represents a string value.\ntype StringValue struct {\n\tunixnano int64\n\tvalue    string\n}\n\n// Value returns the underlying string value.\nfunc (v StringValue) Value() interface{} {\n\treturn v.value\n}\n\n// UnixNano returns the timestamp of the value.\nfunc (v StringValue) UnixNano() int64 {\n\treturn v.unixnano\n}\n\n// Size returns the number of bytes necessary to represent the value and its timestamp.\nfunc (v StringValue) Size() int {\n\treturn 8 + len(v.value)\n}\n\n// String returns the string representation of the value and its timestamp.\nfunc (v StringValue) String() string {\n\treturn fmt.Sprintf(\"%v %v\", time.Unix(0, v.unixnano), v.Value())\n}\n\nfunc encodeStringBlock(buf []byte, values []Value) ([]byte, error) {\n\ttsEnc := getTimeEncoder(len(values))\n\tvEnc := getStringEncoder(len(values) * len(values[0].(StringValue).value))\n\n\tvar b []byte\n\terr := func() error {\n\t\tfor _, v := range values {\n\t\t\tvv := v.(StringValue)\n\t\t\ttsEnc.Write(vv.unixnano)\n\t\t\tvEnc.Write(vv.value)\n\t\t}\n\n\t\t// Encoded timestamp values\n\t\ttb, err := tsEnc.Bytes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Encoded string values\n\t\tvb, err := vEnc.Bytes()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Prepend the first timestamp of the block in the first 8 bytes\n\t\tb = packBlock(buf, BlockString, tb, vb)\n\n\t\treturn nil\n\t}()\n\n\tputTimeEncoder(tsEnc)\n\tputStringEncoder(vEnc)\n\n\treturn b, err\n}\n\n// DecodeStringBlock decodes the string block from the byte slice\n// and appends the string values to a.\nfunc DecodeStringBlock(block []byte, a *[]StringValue) ([]StringValue, error) {\n\tblockType := block[0]\n\tif blockType != BlockString {\n\t\treturn nil, fmt.Errorf(\"invalid block type: exp %d, got %d\", BlockString, blockType)\n\t}\n\n\tblock = block[1:]\n\n\t// The first 8 bytes is the minimum timestamp of the block\n\ttb, vb, err := unpackBlock(block)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttdec := timeDecoderPool.Get(0).(*TimeDecoder)\n\tvdec := stringDecoderPool.Get(0).(*StringDecoder)\n\n\tvar i int\n\terr = func() error {\n\t\t// Setup our timestamp and value decoders\n\t\ttdec.Init(tb)\n\t\terr = vdec.SetBytes(vb)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Decode both a timestamp and value\n\t\tfor tdec.Next() && vdec.Next() {\n\t\t\tts := tdec.Read()\n\t\t\tv := vdec.Read()\n\t\t\tif i < len(*a) {\n\t\t\t\telem := &(*a)[i]\n\t\t\t\telem.unixnano = ts\n\t\t\t\telem.value = v\n\t\t\t} else {\n\t\t\t\t*a = append(*a, StringValue{ts, v})\n\t\t\t}\n\t\t\ti++\n\t\t}\n\n\t\t// Did timestamp decoding have an error?\n\t\terr = tdec.Error()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Did string decoding have an error?\n\t\terr = vdec.Error()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}()\n\n\ttimeDecoderPool.Put(tdec)\n\tstringDecoderPool.Put(vdec)\n\n\treturn (*a)[:i], err\n}\n\nfunc packBlock(buf []byte, typ byte, ts []byte, values []byte) []byte {\n\t// We encode the length of the timestamp block using a variable byte encoding.\n\t// This allows small byte slices to take up 1 byte while larger ones use 2 or more.\n\tsz := 1 + binary.MaxVarintLen64 + len(ts) + len(values)\n\tif cap(buf) < sz {\n\t\tbuf = make([]byte, sz)\n\t}\n\tb := buf[:sz]\n\tb[0] = typ\n\ti := binary.PutUvarint(b[1:1+binary.MaxVarintLen64], uint64(len(ts)))\n\ti += 1\n\n\t// block is <len timestamp bytes>, <ts bytes>, <value bytes>\n\tcopy(b[i:], ts)\n\t// We don't encode the value length because we know it's the rest of the block after\n\t// the timestamp block.\n\tcopy(b[i+len(ts):], values)\n\treturn b[:i+len(ts)+len(values)]\n}\n\nfunc unpackBlock(buf []byte) (ts, values []byte, err error) {\n\t// Unpack the timestamp block length\n\ttsLen, i := binary.Uvarint(buf)\n\tif i <= 0 {\n\t\terr = fmt.Errorf(\"unpackBlock: unable to read timestamp block length\")\n\t\treturn\n\t}\n\n\t// Unpack the timestamp bytes\n\ttsIdx := int(i) + int(tsLen)\n\tif tsIdx > len(buf) {\n\t\terr = fmt.Errorf(\"unpackBlock: not enough data for timestamp\")\n\t\treturn\n\t}\n\tts = buf[int(i):tsIdx]\n\n\t// Unpack the value bytes\n\tvalues = buf[tsIdx:]\n\treturn\n}\n\n// ZigZagEncode converts a int64 to a uint64 by zig zagging negative and positive values\n// across even and odd numbers.  Eg. [0,-1,1,-2] becomes [0, 1, 2, 3].\nfunc ZigZagEncode(x int64) uint64 {\n\treturn uint64(uint64(x<<1) ^ uint64((int64(x) >> 63)))\n}\n\n// ZigZagDecode converts a previously zigzag encoded uint64 back to a int64.\nfunc ZigZagDecode(v uint64) int64 {\n\treturn int64((v >> 1) ^ uint64((int64(v&1)<<63)>>63))\n}\nfunc getTimeEncoder(sz int) TimeEncoder {\n\tx := timeEncoderPool.Get(sz).(TimeEncoder)\n\tx.Reset()\n\treturn x\n}\nfunc putTimeEncoder(enc TimeEncoder) { timeEncoderPool.Put(enc) }\n\nfunc getIntegerEncoder(sz int) IntegerEncoder {\n\tx := integerEncoderPool.Get(sz).(IntegerEncoder)\n\tx.Reset()\n\treturn x\n}\nfunc putIntegerEncoder(enc IntegerEncoder) { integerEncoderPool.Put(enc) }\n\nfunc getFloatEncoder(sz int) *FloatEncoder {\n\tx := floatEncoderPool.Get(sz).(*FloatEncoder)\n\tx.Reset()\n\treturn x\n}\nfunc putFloatEncoder(enc *FloatEncoder) { floatEncoderPool.Put(enc) }\n\nfunc getStringEncoder(sz int) StringEncoder {\n\tx := stringEncoderPool.Get(sz).(StringEncoder)\n\tx.Reset()\n\treturn x\n}\nfunc putStringEncoder(enc StringEncoder) { stringEncoderPool.Put(enc) }\n\nfunc getBooleanEncoder(sz int) BooleanEncoder {\n\tx := booleanEncoderPool.Get(sz).(BooleanEncoder)\n\tx.Reset()\n\treturn x\n}\nfunc putBooleanEncoder(enc BooleanEncoder) { booleanEncoderPool.Put(enc) }\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/encoding_test.go",
    "content": "package tsm1_test\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n\t\"github.com/influxdata/influxdb/tsdb/engine/tsm1\"\n)\n\nfunc TestEncoding_FloatBlock(t *testing.T) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\tvalues := make([]tsm1.Value, len(times))\n\tfor i, t := range times {\n\t\tvalues[i] = tsm1.NewValue(t, float64(i))\n\t}\n\n\tb, err := tsm1.Values(values).Encode(nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar decodedValues []tsm1.Value\n\tdecodedValues, err = tsm1.DecodeBlock(b, decodedValues)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error decoding block: %v\", err)\n\t}\n\n\tif !reflect.DeepEqual(decodedValues, values) {\n\t\tt.Fatalf(\"unexpected results:\\n\\tgot: %s\\n\\texp: %s\\n\", spew.Sdump(decodedValues), spew.Sdump(values))\n\t}\n}\n\nfunc TestEncoding_FloatBlock_ZeroTime(t *testing.T) {\n\tvalues := make([]tsm1.Value, 3)\n\tfor i := 0; i < 3; i++ {\n\t\tvalues[i] = tsm1.NewValue(0, float64(i))\n\t}\n\n\tb, err := tsm1.Values(values).Encode(nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar decodedValues []tsm1.Value\n\tdecodedValues, err = tsm1.DecodeBlock(b, decodedValues)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error decoding block: %v\", err)\n\t}\n\n\tif !reflect.DeepEqual(decodedValues, values) {\n\t\tt.Fatalf(\"unexpected results:\\n\\tgot: %v\\n\\texp: %v\\n\", decodedValues, values)\n\t}\n}\n\nfunc TestEncoding_FloatBlock_SimilarFloats(t *testing.T) {\n\tvalues := make([]tsm1.Value, 5)\n\tvalues[0] = tsm1.NewValue(1444238178437870000, 6.00065e+06)\n\tvalues[1] = tsm1.NewValue(1444238185286830000, 6.000656e+06)\n\tvalues[2] = tsm1.NewValue(1444238188441501000, 6.000657e+06)\n\tvalues[3] = tsm1.NewValue(1444238195286811000, 6.000659e+06)\n\tvalues[4] = tsm1.NewValue(1444238198439917000, 6.000661e+06)\n\n\tb, err := tsm1.Values(values).Encode(nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar decodedValues []tsm1.Value\n\tdecodedValues, err = tsm1.DecodeBlock(b, decodedValues)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error decoding block: %v\", err)\n\t}\n\n\tif !reflect.DeepEqual(decodedValues, values) {\n\t\tt.Fatalf(\"unexpected results:\\n\\tgot: %v\\n\\texp: %v\\n\", decodedValues, values)\n\t}\n}\n\nfunc TestEncoding_IntBlock_Basic(t *testing.T) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\tvalues := make([]tsm1.Value, len(times))\n\tfor i, t := range times {\n\t\tvalues[i] = tsm1.NewValue(t, int64(i))\n\t}\n\n\tb, err := tsm1.Values(values).Encode(nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar decodedValues []tsm1.Value\n\tdecodedValues, err = tsm1.DecodeBlock(b, decodedValues)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error decoding block: %v\", err)\n\t}\n\n\tif len(decodedValues) != len(values) {\n\t\tt.Fatalf(\"unexpected results length:\\n\\tgot: %v\\n\\texp: %v\\n\", len(decodedValues), len(values))\n\t}\n\n\tfor i := 0; i < len(decodedValues); i++ {\n\t\tif decodedValues[i].UnixNano() != values[i].UnixNano() {\n\t\t\tt.Fatalf(\"unexpected results:\\n\\tgot: %v\\n\\texp: %v\\n\", decodedValues[i].UnixNano(), values[i].UnixNano())\n\t\t}\n\n\t\tif decodedValues[i].Value() != values[i].Value() {\n\t\t\tt.Fatalf(\"unexpected results:\\n\\tgot: %v\\n\\texp: %v\\n\", decodedValues[i].Value(), values[i].Value())\n\t\t}\n\t}\n}\n\nfunc TestEncoding_IntBlock_Negatives(t *testing.T) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\tvalues := make([]tsm1.Value, len(times))\n\tfor i, t := range times {\n\t\tv := int64(i)\n\t\tif i%2 == 0 {\n\t\t\tv = -v\n\t\t}\n\t\tvalues[i] = tsm1.NewValue(t, int64(v))\n\t}\n\n\tb, err := tsm1.Values(values).Encode(nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar decodedValues []tsm1.Value\n\tdecodedValues, err = tsm1.DecodeBlock(b, decodedValues)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error decoding block: %v\", err)\n\t}\n\n\tif !reflect.DeepEqual(decodedValues, values) {\n\t\tt.Fatalf(\"unexpected results:\\n\\tgot: %v\\n\\texp: %v\\n\", decodedValues, values)\n\t}\n}\n\nfunc TestEncoding_BooleanBlock_Basic(t *testing.T) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\tvalues := make([]tsm1.Value, len(times))\n\tfor i, t := range times {\n\t\tv := true\n\t\tif i%2 == 0 {\n\t\t\tv = false\n\t\t}\n\t\tvalues[i] = tsm1.NewValue(t, v)\n\t}\n\n\tb, err := tsm1.Values(values).Encode(nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar decodedValues []tsm1.Value\n\tdecodedValues, err = tsm1.DecodeBlock(b, decodedValues)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error decoding block: %v\", err)\n\t}\n\n\tif !reflect.DeepEqual(decodedValues, values) {\n\t\tt.Fatalf(\"unexpected results:\\n\\tgot: %v\\n\\texp: %v\\n\", decodedValues, values)\n\t}\n}\n\nfunc TestEncoding_StringBlock_Basic(t *testing.T) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\tvalues := make([]tsm1.Value, len(times))\n\tfor i, t := range times {\n\t\tvalues[i] = tsm1.NewValue(t, fmt.Sprintf(\"value %d\", i))\n\t}\n\n\tb, err := tsm1.Values(values).Encode(nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar decodedValues []tsm1.Value\n\tdecodedValues, err = tsm1.DecodeBlock(b, decodedValues)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error decoding block: %v\", err)\n\t}\n\n\tif !reflect.DeepEqual(decodedValues, values) {\n\t\tt.Fatalf(\"unexpected results:\\n\\tgot: %v\\n\\texp: %v\\n\", decodedValues, values)\n\t}\n}\n\nfunc TestEncoding_BlockType(t *testing.T) {\n\ttests := []struct {\n\t\tvalue     interface{}\n\t\tblockType byte\n\t}{\n\t\t{value: float64(1.0), blockType: tsm1.BlockFloat64},\n\t\t{value: int64(1), blockType: tsm1.BlockInteger},\n\t\t{value: true, blockType: tsm1.BlockBoolean},\n\t\t{value: \"string\", blockType: tsm1.BlockString},\n\t}\n\n\tfor _, test := range tests {\n\t\tvar values []tsm1.Value\n\t\tvalues = append(values, tsm1.NewValue(0, test.value))\n\n\t\tb, err := tsm1.Values(values).Encode(nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\n\t\tbt, err := tsm1.BlockType(b)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error decoding block type: %v\", err)\n\t\t}\n\n\t\tif got, exp := bt, test.blockType; got != exp {\n\t\t\tt.Fatalf(\"block type mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t}\n\n\t_, err := tsm1.BlockType([]byte{10})\n\tif err == nil {\n\t\tt.Fatalf(\"expected error decoding block type, got nil\")\n\t}\n}\n\nfunc TestEncoding_Count(t *testing.T) {\n\ttests := []struct {\n\t\tvalue     interface{}\n\t\tblockType byte\n\t}{\n\t\t{value: float64(1.0), blockType: tsm1.BlockFloat64},\n\t\t{value: int64(1), blockType: tsm1.BlockInteger},\n\t\t{value: true, blockType: tsm1.BlockBoolean},\n\t\t{value: \"string\", blockType: tsm1.BlockString},\n\t}\n\n\tfor _, test := range tests {\n\t\tvar values []tsm1.Value\n\t\tvalues = append(values, tsm1.NewValue(0, test.value))\n\n\t\tb, err := tsm1.Values(values).Encode(nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\n\t\tif got, exp := tsm1.BlockCount(b), 1; got != exp {\n\t\t\tt.Fatalf(\"block count mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t}\n}\n\nfunc TestValues_MergeFloat(t *testing.T) {\n\ttests := []struct {\n\t\ta, b, exp []tsm1.Value\n\t}{\n\n\t\t{ // empty a\n\t\t\ta: []tsm1.Value{},\n\n\t\t\tb: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1, 1.2),\n\t\t\t\ttsm1.NewValue(2, 2.2),\n\t\t\t},\n\t\t\texp: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1, 1.2),\n\t\t\t\ttsm1.NewValue(2, 2.2),\n\t\t\t},\n\t\t},\n\t\t{ // empty b\n\t\t\ta: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1, 1.1),\n\t\t\t\ttsm1.NewValue(2, 2.1),\n\t\t\t},\n\n\t\t\tb: []tsm1.Value{},\n\t\t\texp: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1, 1.1),\n\t\t\t\ttsm1.NewValue(2, 2.1),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(0, 0.0),\n\t\t\t\ttsm1.NewValue(1, 1.1),\n\t\t\t\ttsm1.NewValue(2, 2.1),\n\t\t\t},\n\t\t\tb: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(2, 2.2),\n\t\t\t\ttsm1.NewValue(2, 2.2), // duplicate data\n\t\t\t},\n\t\t\texp: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(0, 0.0),\n\t\t\t\ttsm1.NewValue(1, 1.1),\n\t\t\t\ttsm1.NewValue(2, 2.2),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(0, 0.0),\n\t\t\t\ttsm1.NewValue(1, 1.1),\n\t\t\t\ttsm1.NewValue(1, 1.1), // duplicate data\n\t\t\t\ttsm1.NewValue(2, 2.1),\n\t\t\t},\n\t\t\tb: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(2, 2.2),\n\t\t\t\ttsm1.NewValue(2, 2.2), // duplicate data\n\t\t\t},\n\t\t\texp: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(0, 0.0),\n\t\t\t\ttsm1.NewValue(1, 1.1),\n\t\t\t\ttsm1.NewValue(2, 2.2),\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ta: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1, 1.1),\n\t\t\t},\n\t\t\tb: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(0, 0.0),\n\t\t\t\ttsm1.NewValue(1, 1.2), // overwrites a\n\t\t\t\ttsm1.NewValue(2, 2.2),\n\t\t\t\ttsm1.NewValue(3, 3.2),\n\t\t\t\ttsm1.NewValue(4, 4.2),\n\t\t\t},\n\t\t\texp: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(0, 0.0),\n\t\t\t\ttsm1.NewValue(1, 1.2),\n\t\t\t\ttsm1.NewValue(2, 2.2),\n\t\t\t\ttsm1.NewValue(3, 3.2),\n\t\t\t\ttsm1.NewValue(4, 4.2),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1, 1.1),\n\t\t\t\ttsm1.NewValue(2, 2.1),\n\t\t\t\ttsm1.NewValue(3, 3.1),\n\t\t\t\ttsm1.NewValue(4, 4.1),\n\t\t\t},\n\n\t\t\tb: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1, 1.2), // overwrites a\n\t\t\t\ttsm1.NewValue(2, 2.2), // overwrites a\n\t\t\t},\n\t\t\texp: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1, 1.2),\n\t\t\t\ttsm1.NewValue(2, 2.2),\n\t\t\t\ttsm1.NewValue(3, 3.1),\n\t\t\t\ttsm1.NewValue(4, 4.1),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1, 1.1),\n\t\t\t\ttsm1.NewValue(2, 2.1),\n\t\t\t\ttsm1.NewValue(3, 3.1),\n\t\t\t\ttsm1.NewValue(4, 4.1),\n\t\t\t},\n\n\t\t\tb: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1, 1.2), // overwrites a\n\t\t\t\ttsm1.NewValue(2, 2.2), // overwrites a\n\t\t\t\ttsm1.NewValue(3, 3.2),\n\t\t\t\ttsm1.NewValue(4, 4.2),\n\t\t\t},\n\t\t\texp: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1, 1.2),\n\t\t\t\ttsm1.NewValue(2, 2.2),\n\t\t\t\ttsm1.NewValue(3, 3.2),\n\t\t\t\ttsm1.NewValue(4, 4.2),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(0, 0.0),\n\t\t\t\ttsm1.NewValue(1, 1.1),\n\t\t\t\ttsm1.NewValue(2, 2.1),\n\t\t\t\ttsm1.NewValue(3, 3.1),\n\t\t\t\ttsm1.NewValue(4, 4.1),\n\t\t\t},\n\t\t\tb: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(0, 0.0),\n\t\t\t\ttsm1.NewValue(2, 2.2),\n\t\t\t\ttsm1.NewValue(4, 4.2),\n\t\t\t},\n\t\t\texp: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(0, 0.0),\n\t\t\t\ttsm1.NewValue(1, 1.1),\n\t\t\t\ttsm1.NewValue(2, 2.2),\n\t\t\t\ttsm1.NewValue(3, 3.1),\n\t\t\t\ttsm1.NewValue(4, 4.2),\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\ta: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1462498658242869207, 0.0),\n\t\t\t\ttsm1.NewValue(1462498658288956853, 1.1),\n\t\t\t},\n\t\t\tb: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1462498658242870810, 0.0),\n\t\t\t\ttsm1.NewValue(1462498658262911238, 2.2),\n\t\t\t\ttsm1.NewValue(1462498658282415038, 4.2),\n\t\t\t\ttsm1.NewValue(1462498658282417760, 4.2),\n\t\t\t},\n\t\t\texp: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1462498658242869207, 0.0),\n\t\t\t\ttsm1.NewValue(1462498658242870810, 0.0),\n\t\t\t\ttsm1.NewValue(1462498658262911238, 2.2),\n\t\t\t\ttsm1.NewValue(1462498658282415038, 4.2),\n\t\t\t\ttsm1.NewValue(1462498658282417760, 4.2),\n\t\t\t\ttsm1.NewValue(1462498658288956853, 1.1),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(4, 4.0),\n\t\t\t\ttsm1.NewValue(5, 5.0),\n\t\t\t\ttsm1.NewValue(6, 6.0),\n\t\t\t},\n\t\t\tb: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1, 1.0),\n\t\t\t\ttsm1.NewValue(2, 2.0),\n\t\t\t\ttsm1.NewValue(3, 3.0),\n\t\t\t},\n\t\t\texp: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1, 1.0),\n\t\t\t\ttsm1.NewValue(2, 2.0),\n\t\t\t\ttsm1.NewValue(3, 3.0),\n\t\t\t\ttsm1.NewValue(4, 4.0),\n\t\t\t\ttsm1.NewValue(5, 5.0),\n\t\t\t\ttsm1.NewValue(6, 6.0),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(5, 5.0),\n\t\t\t\ttsm1.NewValue(6, 6.0),\n\t\t\t},\n\t\t\tb: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1, 1.0),\n\t\t\t\ttsm1.NewValue(2, 2.0),\n\t\t\t\ttsm1.NewValue(3, 3.0),\n\t\t\t\ttsm1.NewValue(4, 4.0),\n\t\t\t\ttsm1.NewValue(7, 7.0),\n\t\t\t\ttsm1.NewValue(8, 8.0),\n\t\t\t},\n\t\t\texp: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1, 1.0),\n\t\t\t\ttsm1.NewValue(2, 2.0),\n\t\t\t\ttsm1.NewValue(3, 3.0),\n\t\t\t\ttsm1.NewValue(4, 4.0),\n\t\t\t\ttsm1.NewValue(5, 5.0),\n\t\t\t\ttsm1.NewValue(6, 6.0),\n\t\t\t\ttsm1.NewValue(7, 7.0),\n\t\t\t\ttsm1.NewValue(8, 8.0),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1, 1.0),\n\t\t\t\ttsm1.NewValue(2, 2.0),\n\t\t\t\ttsm1.NewValue(3, 3.0),\n\t\t\t},\n\t\t\tb: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(4, 4.0),\n\t\t\t\ttsm1.NewValue(5, 5.0),\n\t\t\t\ttsm1.NewValue(6, 6.0),\n\t\t\t},\n\t\t\texp: []tsm1.Value{\n\t\t\t\ttsm1.NewValue(1, 1.0),\n\t\t\t\ttsm1.NewValue(2, 2.0),\n\t\t\t\ttsm1.NewValue(3, 3.0),\n\t\t\t\ttsm1.NewValue(4, 4.0),\n\t\t\t\ttsm1.NewValue(5, 5.0),\n\t\t\t\ttsm1.NewValue(6, 6.0),\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\tgot := tsm1.Values(test.a).Merge(test.b)\n\t\tif exp, got := len(test.exp), len(got); exp != got {\n\t\t\tt.Fatalf(\"test(%d): value length mismatch: exp %v, got %v\", i, exp, got)\n\t\t}\n\n\t\tdedup := tsm1.Values(append(test.a, test.b...)).Deduplicate()\n\n\t\tfor i := range test.exp {\n\t\t\tif exp, got := test.exp[i].String(), got[i].String(); exp != got {\n\t\t\t\tt.Fatalf(\"value mismatch:\\n exp %v\\n got %v\", exp, got)\n\t\t\t}\n\n\t\t\tif exp, got := test.exp[i].String(), dedup[i].String(); exp != got {\n\t\t\t\tt.Fatalf(\"value mismatch:\\n exp %v\\n got %v\", exp, got)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestIntegerValues_Merge(t *testing.T) {\n\tintegerValue := func(t int64, f int64) tsm1.IntegerValue {\n\t\treturn tsm1.NewValue(t, f).(tsm1.IntegerValue)\n\t}\n\n\ttests := []struct {\n\t\ta, b, exp []tsm1.IntegerValue\n\t}{\n\n\t\t{ // empty a\n\t\t\ta: []tsm1.IntegerValue{},\n\n\t\t\tb: []tsm1.IntegerValue{\n\t\t\t\tintegerValue(1, 10),\n\t\t\t\tintegerValue(2, 20),\n\t\t\t},\n\t\t\texp: []tsm1.IntegerValue{\n\t\t\t\tintegerValue(1, 10),\n\t\t\t\tintegerValue(2, 20),\n\t\t\t},\n\t\t},\n\t\t{ // empty b\n\t\t\ta: []tsm1.IntegerValue{\n\t\t\t\tintegerValue(1, 1),\n\t\t\t\tintegerValue(2, 2),\n\t\t\t},\n\n\t\t\tb: []tsm1.IntegerValue{},\n\t\t\texp: []tsm1.IntegerValue{\n\t\t\t\tintegerValue(1, 1),\n\t\t\t\tintegerValue(2, 2),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.IntegerValue{\n\t\t\t\tintegerValue(1, 1),\n\t\t\t},\n\t\t\tb: []tsm1.IntegerValue{\n\t\t\t\tintegerValue(0, 0),\n\t\t\t\tintegerValue(1, 10), // overwrites a\n\t\t\t\tintegerValue(2, 20),\n\t\t\t\tintegerValue(3, 30),\n\t\t\t\tintegerValue(4, 40),\n\t\t\t},\n\t\t\texp: []tsm1.IntegerValue{\n\t\t\t\tintegerValue(0, 0),\n\t\t\t\tintegerValue(1, 10),\n\t\t\t\tintegerValue(2, 20),\n\t\t\t\tintegerValue(3, 30),\n\t\t\t\tintegerValue(4, 40),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.IntegerValue{\n\t\t\t\tintegerValue(1, 1),\n\t\t\t\tintegerValue(2, 2),\n\t\t\t\tintegerValue(3, 3),\n\t\t\t\tintegerValue(4, 4),\n\t\t\t},\n\n\t\t\tb: []tsm1.IntegerValue{\n\t\t\t\tintegerValue(1, 10), // overwrites a\n\t\t\t\tintegerValue(2, 20), // overwrites a\n\t\t\t},\n\t\t\texp: []tsm1.IntegerValue{\n\t\t\t\tintegerValue(1, 10),\n\t\t\t\tintegerValue(2, 20),\n\t\t\t\tintegerValue(3, 3),\n\t\t\t\tintegerValue(4, 4),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.IntegerValue{\n\t\t\t\tintegerValue(1, 1),\n\t\t\t\tintegerValue(2, 2),\n\t\t\t\tintegerValue(3, 3),\n\t\t\t\tintegerValue(4, 4),\n\t\t\t},\n\n\t\t\tb: []tsm1.IntegerValue{\n\t\t\t\tintegerValue(1, 10), // overwrites a\n\t\t\t\tintegerValue(2, 20), // overwrites a\n\t\t\t\tintegerValue(3, 30),\n\t\t\t\tintegerValue(4, 40),\n\t\t\t},\n\t\t\texp: []tsm1.IntegerValue{\n\t\t\t\tintegerValue(1, 10),\n\t\t\t\tintegerValue(2, 20),\n\t\t\t\tintegerValue(3, 30),\n\t\t\t\tintegerValue(4, 40),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.IntegerValue{\n\t\t\t\tintegerValue(0, 0),\n\t\t\t\tintegerValue(1, 1),\n\t\t\t\tintegerValue(2, 2),\n\t\t\t\tintegerValue(3, 3),\n\t\t\t\tintegerValue(4, 4),\n\t\t\t},\n\t\t\tb: []tsm1.IntegerValue{\n\t\t\t\tintegerValue(0, 0),\n\t\t\t\tintegerValue(2, 20),\n\t\t\t\tintegerValue(4, 40),\n\t\t\t},\n\t\t\texp: []tsm1.IntegerValue{\n\t\t\t\tintegerValue(0, 0.0),\n\t\t\t\tintegerValue(1, 1),\n\t\t\t\tintegerValue(2, 20),\n\t\t\t\tintegerValue(3, 3),\n\t\t\t\tintegerValue(4, 40),\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\tif i != 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\tgot := tsm1.IntegerValues(test.a).Merge(test.b)\n\t\tif exp, got := len(test.exp), len(got); exp != got {\n\t\t\tt.Fatalf(\"test(%d): value length mismatch: exp %v, got %v\", i, exp, got)\n\t\t}\n\n\t\tdedup := tsm1.IntegerValues(append(test.a, test.b...)).Deduplicate()\n\n\t\tfor i := range test.exp {\n\t\t\tif exp, got := test.exp[i].String(), got[i].String(); exp != got {\n\t\t\t\tt.Fatalf(\"value mismatch:\\n exp %v\\n got %v\", exp, got)\n\t\t\t}\n\n\t\t\tif exp, got := test.exp[i].String(), dedup[i].String(); exp != got {\n\t\t\t\tt.Fatalf(\"value mismatch:\\n exp %v\\n got %v\", exp, got)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestFloatValues_Merge(t *testing.T) {\n\tfloatValue := func(t int64, f float64) tsm1.FloatValue {\n\t\treturn tsm1.NewValue(t, f).(tsm1.FloatValue)\n\t}\n\n\ttests := []struct {\n\t\ta, b, exp []tsm1.FloatValue\n\t}{\n\n\t\t{ // empty a\n\t\t\ta: []tsm1.FloatValue{},\n\n\t\t\tb: []tsm1.FloatValue{\n\t\t\t\tfloatValue(1, 1.2),\n\t\t\t\tfloatValue(2, 2.2),\n\t\t\t},\n\t\t\texp: []tsm1.FloatValue{\n\t\t\t\tfloatValue(1, 1.2),\n\t\t\t\tfloatValue(2, 2.2),\n\t\t\t},\n\t\t},\n\t\t{ // empty b\n\t\t\ta: []tsm1.FloatValue{\n\t\t\t\tfloatValue(1, 1.1),\n\t\t\t\tfloatValue(2, 2.1),\n\t\t\t},\n\n\t\t\tb: []tsm1.FloatValue{},\n\t\t\texp: []tsm1.FloatValue{\n\t\t\t\tfloatValue(1, 1.1),\n\t\t\t\tfloatValue(2, 2.1),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.FloatValue{\n\t\t\t\tfloatValue(1, 1.1),\n\t\t\t},\n\t\t\tb: []tsm1.FloatValue{\n\t\t\t\tfloatValue(0, 0.0),\n\t\t\t\tfloatValue(1, 1.2), // overwrites a\n\t\t\t\tfloatValue(2, 2.2),\n\t\t\t\tfloatValue(3, 3.2),\n\t\t\t\tfloatValue(4, 4.2),\n\t\t\t},\n\t\t\texp: []tsm1.FloatValue{\n\t\t\t\tfloatValue(0, 0.0),\n\t\t\t\tfloatValue(1, 1.2),\n\t\t\t\tfloatValue(2, 2.2),\n\t\t\t\tfloatValue(3, 3.2),\n\t\t\t\tfloatValue(4, 4.2),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.FloatValue{\n\t\t\t\tfloatValue(1, 1.1),\n\t\t\t\tfloatValue(2, 2.1),\n\t\t\t\tfloatValue(3, 3.1),\n\t\t\t\tfloatValue(4, 4.1),\n\t\t\t},\n\n\t\t\tb: []tsm1.FloatValue{\n\t\t\t\tfloatValue(1, 1.2), // overwrites a\n\t\t\t\tfloatValue(2, 2.2), // overwrites a\n\t\t\t},\n\t\t\texp: []tsm1.FloatValue{\n\t\t\t\tfloatValue(1, 1.2),\n\t\t\t\tfloatValue(2, 2.2),\n\t\t\t\tfloatValue(3, 3.1),\n\t\t\t\tfloatValue(4, 4.1),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.FloatValue{\n\t\t\t\tfloatValue(1, 1.1),\n\t\t\t\tfloatValue(2, 2.1),\n\t\t\t\tfloatValue(3, 3.1),\n\t\t\t\tfloatValue(4, 4.1),\n\t\t\t},\n\n\t\t\tb: []tsm1.FloatValue{\n\t\t\t\tfloatValue(1, 1.2), // overwrites a\n\t\t\t\tfloatValue(2, 2.2), // overwrites a\n\t\t\t\tfloatValue(3, 3.2),\n\t\t\t\tfloatValue(4, 4.2),\n\t\t\t},\n\t\t\texp: []tsm1.FloatValue{\n\t\t\t\tfloatValue(1, 1.2),\n\t\t\t\tfloatValue(2, 2.2),\n\t\t\t\tfloatValue(3, 3.2),\n\t\t\t\tfloatValue(4, 4.2),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.FloatValue{\n\t\t\t\tfloatValue(0, 0.0),\n\t\t\t\tfloatValue(1, 1.1),\n\t\t\t\tfloatValue(2, 2.1),\n\t\t\t\tfloatValue(3, 3.1),\n\t\t\t\tfloatValue(4, 4.1),\n\t\t\t},\n\t\t\tb: []tsm1.FloatValue{\n\t\t\t\tfloatValue(0, 0.0),\n\t\t\t\tfloatValue(2, 2.2),\n\t\t\t\tfloatValue(4, 4.2),\n\t\t\t},\n\t\t\texp: []tsm1.FloatValue{\n\t\t\t\tfloatValue(0, 0.0),\n\t\t\t\tfloatValue(1, 1.1),\n\t\t\t\tfloatValue(2, 2.2),\n\t\t\t\tfloatValue(3, 3.1),\n\t\t\t\tfloatValue(4, 4.2),\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\tgot := tsm1.FloatValues(test.a).Merge(test.b)\n\t\tif exp, got := len(test.exp), len(got); exp != got {\n\t\t\tt.Fatalf(\"test(%d): value length mismatch: exp %v, got %v\", i, exp, got)\n\t\t}\n\n\t\tdedup := tsm1.FloatValues(append(test.a, test.b...)).Deduplicate()\n\n\t\tfor i := range test.exp {\n\t\t\tif exp, got := test.exp[i].String(), got[i].String(); exp != got {\n\t\t\t\tt.Fatalf(\"value mismatch:\\n exp %v\\n got %v\", exp, got)\n\t\t\t}\n\n\t\t\tif exp, got := test.exp[i].String(), dedup[i].String(); exp != got {\n\t\t\t\tt.Fatalf(\"value mismatch:\\n exp %v\\n got %v\", exp, got)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestBooleanValues_Merge(t *testing.T) {\n\tbooleanValue := func(t int64, f bool) tsm1.BooleanValue {\n\t\treturn tsm1.NewValue(t, f).(tsm1.BooleanValue)\n\t}\n\n\ttests := []struct {\n\t\ta, b, exp []tsm1.BooleanValue\n\t}{\n\n\t\t{ // empty a\n\t\t\ta: []tsm1.BooleanValue{},\n\n\t\t\tb: []tsm1.BooleanValue{\n\t\t\t\tbooleanValue(1, true),\n\t\t\t\tbooleanValue(2, true),\n\t\t\t},\n\t\t\texp: []tsm1.BooleanValue{\n\t\t\t\tbooleanValue(1, true),\n\t\t\t\tbooleanValue(2, true),\n\t\t\t},\n\t\t},\n\t\t{ // empty b\n\t\t\ta: []tsm1.BooleanValue{\n\t\t\t\tbooleanValue(1, true),\n\t\t\t\tbooleanValue(2, true),\n\t\t\t},\n\n\t\t\tb: []tsm1.BooleanValue{},\n\t\t\texp: []tsm1.BooleanValue{\n\t\t\t\tbooleanValue(1, true),\n\t\t\t\tbooleanValue(2, true),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.BooleanValue{\n\t\t\t\tbooleanValue(1, true),\n\t\t\t},\n\t\t\tb: []tsm1.BooleanValue{\n\t\t\t\tbooleanValue(0, false),\n\t\t\t\tbooleanValue(1, false), // overwrites a\n\t\t\t\tbooleanValue(2, false),\n\t\t\t\tbooleanValue(3, false),\n\t\t\t\tbooleanValue(4, false),\n\t\t\t},\n\t\t\texp: []tsm1.BooleanValue{\n\t\t\t\tbooleanValue(0, false),\n\t\t\t\tbooleanValue(1, false),\n\t\t\t\tbooleanValue(2, false),\n\t\t\t\tbooleanValue(3, false),\n\t\t\t\tbooleanValue(4, false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.BooleanValue{\n\t\t\t\tbooleanValue(1, true),\n\t\t\t\tbooleanValue(2, true),\n\t\t\t\tbooleanValue(3, true),\n\t\t\t\tbooleanValue(4, true),\n\t\t\t},\n\n\t\t\tb: []tsm1.BooleanValue{\n\t\t\t\tbooleanValue(1, false), // overwrites a\n\t\t\t\tbooleanValue(2, false), // overwrites a\n\t\t\t},\n\t\t\texp: []tsm1.BooleanValue{\n\t\t\t\tbooleanValue(1, false), // overwrites a\n\t\t\t\tbooleanValue(2, false), // overwrites a\n\t\t\t\tbooleanValue(3, true),\n\t\t\t\tbooleanValue(4, true),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.BooleanValue{\n\t\t\t\tbooleanValue(1, true),\n\t\t\t\tbooleanValue(2, true),\n\t\t\t\tbooleanValue(3, true),\n\t\t\t\tbooleanValue(4, true),\n\t\t\t},\n\n\t\t\tb: []tsm1.BooleanValue{\n\t\t\t\tbooleanValue(1, false), // overwrites a\n\t\t\t\tbooleanValue(2, false), // overwrites a\n\t\t\t\tbooleanValue(3, false),\n\t\t\t\tbooleanValue(4, false),\n\t\t\t},\n\t\t\texp: []tsm1.BooleanValue{\n\t\t\t\tbooleanValue(1, false),\n\t\t\t\tbooleanValue(2, false),\n\t\t\t\tbooleanValue(3, false),\n\t\t\t\tbooleanValue(4, false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.BooleanValue{\n\t\t\t\tbooleanValue(0, true),\n\t\t\t\tbooleanValue(1, true),\n\t\t\t\tbooleanValue(2, true),\n\t\t\t\tbooleanValue(3, true),\n\t\t\t\tbooleanValue(4, true),\n\t\t\t},\n\t\t\tb: []tsm1.BooleanValue{\n\t\t\t\tbooleanValue(0, false),\n\t\t\t\tbooleanValue(2, false),\n\t\t\t\tbooleanValue(4, false),\n\t\t\t},\n\t\t\texp: []tsm1.BooleanValue{\n\t\t\t\tbooleanValue(0, false),\n\t\t\t\tbooleanValue(1, true),\n\t\t\t\tbooleanValue(2, false),\n\t\t\t\tbooleanValue(3, true),\n\t\t\t\tbooleanValue(4, false),\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\tgot := tsm1.BooleanValues(test.a).Merge(test.b)\n\t\tif exp, got := len(test.exp), len(got); exp != got {\n\t\t\tt.Fatalf(\"test(%d): value length mismatch: exp %v, got %v\", i, exp, got)\n\t\t}\n\n\t\tdedup := tsm1.BooleanValues(append(test.a, test.b...)).Deduplicate()\n\n\t\tfor i := range test.exp {\n\t\t\tif exp, got := test.exp[i].String(), got[i].String(); exp != got {\n\t\t\t\tt.Fatalf(\"value mismatch:\\n exp %v\\n got %v\", exp, got)\n\t\t\t}\n\n\t\t\tif exp, got := test.exp[i].String(), dedup[i].String(); exp != got {\n\t\t\t\tt.Fatalf(\"value mismatch:\\n exp %v\\n got %v\", exp, got)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestStringValues_Merge(t *testing.T) {\n\tstringValue := func(t int64, f string) tsm1.StringValue {\n\t\treturn tsm1.NewValue(t, f).(tsm1.StringValue)\n\t}\n\n\ttests := []struct {\n\t\ta, b, exp []tsm1.StringValue\n\t}{\n\n\t\t{ // empty a\n\t\t\ta: []tsm1.StringValue{},\n\n\t\t\tb: []tsm1.StringValue{\n\t\t\t\tstringValue(1, \"10\"),\n\t\t\t\tstringValue(2, \"20\"),\n\t\t\t},\n\t\t\texp: []tsm1.StringValue{\n\t\t\t\tstringValue(1, \"10\"),\n\t\t\t\tstringValue(2, \"20\"),\n\t\t\t},\n\t\t},\n\t\t{ // empty b\n\t\t\ta: []tsm1.StringValue{\n\t\t\t\tstringValue(1, \"1\"),\n\t\t\t\tstringValue(2, \"2\"),\n\t\t\t},\n\n\t\t\tb: []tsm1.StringValue{},\n\t\t\texp: []tsm1.StringValue{\n\t\t\t\tstringValue(1, \"1\"),\n\t\t\t\tstringValue(2, \"2\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.StringValue{\n\t\t\t\tstringValue(1, \"1\"),\n\t\t\t},\n\t\t\tb: []tsm1.StringValue{\n\t\t\t\tstringValue(0, \"0\"),\n\t\t\t\tstringValue(1, \"10\"), // overwrites a\n\t\t\t\tstringValue(2, \"20\"),\n\t\t\t\tstringValue(3, \"30\"),\n\t\t\t\tstringValue(4, \"40\"),\n\t\t\t},\n\t\t\texp: []tsm1.StringValue{\n\t\t\t\tstringValue(0, \"0\"),\n\t\t\t\tstringValue(1, \"10\"),\n\t\t\t\tstringValue(2, \"20\"),\n\t\t\t\tstringValue(3, \"30\"),\n\t\t\t\tstringValue(4, \"40\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.StringValue{\n\t\t\t\tstringValue(1, \"1\"),\n\t\t\t\tstringValue(2, \"2\"),\n\t\t\t\tstringValue(3, \"3\"),\n\t\t\t\tstringValue(4, \"4\"),\n\t\t\t},\n\n\t\t\tb: []tsm1.StringValue{\n\t\t\t\tstringValue(1, \"10\"), // overwrites a\n\t\t\t\tstringValue(2, \"20\"), // overwrites a\n\t\t\t},\n\t\t\texp: []tsm1.StringValue{\n\t\t\t\tstringValue(1, \"10\"),\n\t\t\t\tstringValue(2, \"20\"),\n\t\t\t\tstringValue(3, \"3\"),\n\t\t\t\tstringValue(4, \"4\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.StringValue{\n\t\t\t\tstringValue(1, \"1\"),\n\t\t\t\tstringValue(2, \"2\"),\n\t\t\t\tstringValue(3, \"3\"),\n\t\t\t\tstringValue(4, \"4\"),\n\t\t\t},\n\n\t\t\tb: []tsm1.StringValue{\n\t\t\t\tstringValue(1, \"10\"), // overwrites a\n\t\t\t\tstringValue(2, \"20\"), // overwrites a\n\t\t\t\tstringValue(3, \"30\"),\n\t\t\t\tstringValue(4, \"40\"),\n\t\t\t},\n\t\t\texp: []tsm1.StringValue{\n\t\t\t\tstringValue(1, \"10\"),\n\t\t\t\tstringValue(2, \"20\"),\n\t\t\t\tstringValue(3, \"30\"),\n\t\t\t\tstringValue(4, \"40\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ta: []tsm1.StringValue{\n\t\t\t\tstringValue(0, \"0\"),\n\t\t\t\tstringValue(1, \"1\"),\n\t\t\t\tstringValue(2, \"2\"),\n\t\t\t\tstringValue(3, \"3\"),\n\t\t\t\tstringValue(4, \"4\"),\n\t\t\t},\n\t\t\tb: []tsm1.StringValue{\n\t\t\t\tstringValue(0, \"0\"),\n\t\t\t\tstringValue(2, \"20\"),\n\t\t\t\tstringValue(4, \"40\"),\n\t\t\t},\n\t\t\texp: []tsm1.StringValue{\n\t\t\t\tstringValue(0, \"0.0\"),\n\t\t\t\tstringValue(1, \"1\"),\n\t\t\t\tstringValue(2, \"20\"),\n\t\t\t\tstringValue(3, \"3\"),\n\t\t\t\tstringValue(4, \"40\"),\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\tif i != 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\tgot := tsm1.StringValues(test.a).Merge(test.b)\n\t\tif exp, got := len(test.exp), len(got); exp != got {\n\t\t\tt.Fatalf(\"test(%d): value length mismatch: exp %v, got %v\", i, exp, got)\n\t\t}\n\n\t\tdedup := tsm1.StringValues(append(test.a, test.b...)).Deduplicate()\n\n\t\tfor i := range test.exp {\n\t\t\tif exp, got := test.exp[i].String(), got[i].String(); exp != got {\n\t\t\t\tt.Fatalf(\"value mismatch:\\n exp %v\\n got %v\", exp, got)\n\t\t\t}\n\n\t\t\tif exp, got := test.exp[i].String(), dedup[i].String(); exp != got {\n\t\t\t\tt.Fatalf(\"value mismatch:\\n exp %v\\n got %v\", exp, got)\n\t\t\t}\n\t\t}\n\t}\n}\nfunc getTimes(n, step int, precision time.Duration) []int64 {\n\tt := time.Now().Round(precision).UnixNano()\n\ta := make([]int64, n)\n\tfor i := 0; i < n; i++ {\n\t\ta[i] = t + (time.Duration(i*60) * precision).Nanoseconds()\n\t}\n\treturn a\n}\n\nfunc BenchmarkDecodeBlock_Float_Empty(b *testing.B) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\tvalues := make([]tsm1.Value, len(times))\n\tfor i, t := range times {\n\t\tvalues[i] = tsm1.NewValue(t, float64(i))\n\t}\n\n\tbytes, err := tsm1.Values(values).Encode(nil)\n\tif err != nil {\n\t\tb.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar decodedValues []tsm1.Value\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err = tsm1.DecodeBlock(bytes, decodedValues)\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"unexpected error decoding block: %v\", err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkDecodeBlock_Float_EqualSize(b *testing.B) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\tvalues := make([]tsm1.Value, len(times))\n\tfor i, t := range times {\n\t\tvalues[i] = tsm1.NewValue(t, float64(i))\n\t}\n\n\tbytes, err := tsm1.Values(values).Encode(nil)\n\tif err != nil {\n\t\tb.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tdecodedValues := make([]tsm1.Value, len(values))\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err = tsm1.DecodeBlock(bytes, decodedValues)\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"unexpected error decoding block: %v\", err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkDecodeBlock_Float_TypeSpecific(b *testing.B) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\tvalues := make([]tsm1.Value, len(times))\n\tfor i, t := range times {\n\t\tvalues[i] = tsm1.NewValue(t, float64(i))\n\t}\n\n\tbytes, err := tsm1.Values(values).Encode(nil)\n\tif err != nil {\n\t\tb.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tdecodedValues := make([]tsm1.FloatValue, len(values))\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err = tsm1.DecodeFloatBlock(bytes, &decodedValues)\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"unexpected error decoding block: %v\", err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkDecodeBlock_Integer_Empty(b *testing.B) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\tvalues := make([]tsm1.Value, len(times))\n\tfor i, t := range times {\n\t\tvalues[i] = tsm1.NewValue(t, int64(i))\n\t}\n\n\tbytes, err := tsm1.Values(values).Encode(nil)\n\tif err != nil {\n\t\tb.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar decodedValues []tsm1.Value\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err = tsm1.DecodeBlock(bytes, decodedValues)\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"unexpected error decoding block: %v\", err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkDecodeBlock_Integer_EqualSize(b *testing.B) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\tvalues := make([]tsm1.Value, len(times))\n\tfor i, t := range times {\n\t\tvalues[i] = tsm1.NewValue(t, int64(i))\n\t}\n\n\tbytes, err := tsm1.Values(values).Encode(nil)\n\tif err != nil {\n\t\tb.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tdecodedValues := make([]tsm1.Value, len(values))\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err = tsm1.DecodeBlock(bytes, decodedValues)\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"unexpected error decoding block: %v\", err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkDecodeBlock_Integer_TypeSpecific(b *testing.B) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\tvalues := make([]tsm1.Value, len(times))\n\tfor i, t := range times {\n\t\tvalues[i] = tsm1.NewValue(t, int64(i))\n\t}\n\n\tbytes, err := tsm1.Values(values).Encode(nil)\n\tif err != nil {\n\t\tb.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tdecodedValues := make([]tsm1.IntegerValue, len(values))\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err = tsm1.DecodeIntegerBlock(bytes, &decodedValues)\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"unexpected error decoding block: %v\", err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkDecodeBlock_Boolean_Empty(b *testing.B) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\tvalues := make([]tsm1.Value, len(times))\n\tfor i, t := range times {\n\t\tvalues[i] = tsm1.NewValue(t, true)\n\t}\n\n\tbytes, err := tsm1.Values(values).Encode(nil)\n\tif err != nil {\n\t\tb.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar decodedValues []tsm1.Value\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err = tsm1.DecodeBlock(bytes, decodedValues)\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"unexpected error decoding block: %v\", err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkDecodeBlock_Boolean_EqualSize(b *testing.B) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\tvalues := make([]tsm1.Value, len(times))\n\tfor i, t := range times {\n\t\tvalues[i] = tsm1.NewValue(t, true)\n\t}\n\n\tbytes, err := tsm1.Values(values).Encode(nil)\n\tif err != nil {\n\t\tb.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tdecodedValues := make([]tsm1.Value, len(values))\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err = tsm1.DecodeBlock(bytes, decodedValues)\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"unexpected error decoding block: %v\", err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkDecodeBlock_Boolean_TypeSpecific(b *testing.B) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\tvalues := make([]tsm1.Value, len(times))\n\tfor i, t := range times {\n\t\tvalues[i] = tsm1.NewValue(t, true)\n\t}\n\n\tbytes, err := tsm1.Values(values).Encode(nil)\n\tif err != nil {\n\t\tb.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tdecodedValues := make([]tsm1.BooleanValue, len(values))\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err = tsm1.DecodeBooleanBlock(bytes, &decodedValues)\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"unexpected error decoding block: %v\", err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkDecodeBlock_String_Empty(b *testing.B) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\tvalues := make([]tsm1.Value, len(times))\n\tfor i, t := range times {\n\t\tvalues[i] = tsm1.NewValue(t, fmt.Sprintf(\"value %d\", i))\n\t}\n\n\tbytes, err := tsm1.Values(values).Encode(nil)\n\tif err != nil {\n\t\tb.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar decodedValues []tsm1.Value\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err = tsm1.DecodeBlock(bytes, decodedValues)\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"unexpected error decoding block: %v\", err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkDecodeBlock_String_EqualSize(b *testing.B) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\tvalues := make([]tsm1.Value, len(times))\n\tfor i, t := range times {\n\t\tvalues[i] = tsm1.NewValue(t, fmt.Sprintf(\"value %d\", i))\n\t}\n\n\tbytes, err := tsm1.Values(values).Encode(nil)\n\tif err != nil {\n\t\tb.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tdecodedValues := make([]tsm1.Value, len(values))\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err = tsm1.DecodeBlock(bytes, decodedValues)\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"unexpected error decoding block: %v\", err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkDecodeBlock_String_TypeSpecific(b *testing.B) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\tvalues := make([]tsm1.Value, len(times))\n\tfor i, t := range times {\n\t\tvalues[i] = tsm1.NewValue(t, fmt.Sprintf(\"value %d\", i))\n\t}\n\n\tbytes, err := tsm1.Values(values).Encode(nil)\n\tif err != nil {\n\t\tb.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tdecodedValues := make([]tsm1.StringValue, len(values))\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err = tsm1.DecodeStringBlock(bytes, &decodedValues)\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"unexpected error decoding block: %v\", err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkValues_Deduplicate(b *testing.B) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\tvalues := make([]tsm1.Value, len(times))\n\tfor i, t := range times {\n\t\tvalues[i] = tsm1.NewValue(t, fmt.Sprintf(\"value %d\", i))\n\t}\n\tvalues = append(values, values...)\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\ttsm1.Values(values).Deduplicate()\n\t}\n}\n\nfunc BenchmarkValues_Merge(b *testing.B) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\ta := make([]tsm1.Value, len(times))\n\tc := make([]tsm1.Value, len(times))\n\n\tfor i, t := range times {\n\t\ta[i] = tsm1.NewValue(t, float64(i))\n\t\tc[i] = tsm1.NewValue(t+1, float64(i))\n\t}\n\n\tb.ResetTimer()\n\tbenchmarkMerge(a, c, b)\n}\n\nfunc BenchmarkValues_MergeDisjoint(b *testing.B) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\ta := make([]tsm1.Value, len(times))\n\tc := make([]tsm1.Value, len(times))\n\n\tfor i, t := range times {\n\t\ta[i] = tsm1.NewValue(t, float64(i))\n\t\tc[i] = tsm1.NewValue(times[len(times)-1]+int64((i+1)*1e9), float64(i))\n\t}\n\n\tb.ResetTimer()\n\tbenchmarkMerge(a, c, b)\n}\n\nfunc BenchmarkValues_MergeSame(b *testing.B) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\ta := make([]tsm1.Value, len(times))\n\tc := make([]tsm1.Value, len(times))\n\n\tfor i, t := range times {\n\t\ta[i] = tsm1.NewValue(t, float64(i))\n\t\tc[i] = tsm1.NewValue(t, float64(i))\n\t}\n\n\tb.ResetTimer()\n\tbenchmarkMerge(a, c, b)\n}\n\nfunc BenchmarkValues_MergeSimilar(b *testing.B) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\ta := make([]tsm1.Value, len(times))\n\tc := make([]tsm1.Value, len(times))\n\n\tfor i, t := range times {\n\t\ta[i] = tsm1.NewValue(t, float64(i))\n\t\tif i == 0 {\n\t\t\tt++\n\t\t}\n\t\tc[i] = tsm1.NewValue(t, float64(i))\n\t}\n\n\tb.ResetTimer()\n\tbenchmarkMerge(a, c, b)\n}\n\nfunc BenchmarkValues_MergeUnevenA(b *testing.B) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\ta := make([]tsm1.Value, len(times))\n\tc := make([]tsm1.Value, len(times))\n\n\tfor i, t := range times {\n\t\ta[i] = tsm1.NewValue(t, float64(i))\n\t\tc[i] = tsm1.NewValue(t, float64(i))\n\t}\n\n\tb.ResetTimer()\n\tbenchmarkMerge(a[:700], c[:10], b)\n}\n\nfunc BenchmarkValues_MergeUnevenB(b *testing.B) {\n\tvalueCount := 1000\n\ttimes := getTimes(valueCount, 60, time.Second)\n\ta := make([]tsm1.Value, len(times))\n\tc := make([]tsm1.Value, len(times))\n\n\tfor i, t := range times {\n\t\ta[i] = tsm1.NewValue(t, float64(i))\n\t\tc[i] = tsm1.NewValue(t, float64(i))\n\t}\n\n\tb.ResetTimer()\n\tbenchmarkMerge(a[:10], c[:700], b)\n}\n\nfunc benchmarkMerge(a, c tsm1.Values, b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tb.StopTimer()\n\t\taa := make(tsm1.Values, len(a))\n\t\tcopy(aa, a)\n\t\tcc := make(tsm1.Values, len(c))\n\t\tcopy(cc, c)\n\t\tb.StartTimer()\n\t\ttsm1.Values(aa).Merge(tsm1.Values(cc))\n\t}\n}\n\nfunc BenchmarkValues_EncodeInteger(b *testing.B) {\n\tvalueCount := 1024\n\ttimes := getTimes(valueCount, 60, time.Second)\n\ta := make([]tsm1.Value, len(times))\n\n\tfor i, t := range times {\n\t\ta[i] = tsm1.NewValue(t, int64(i))\n\t}\n\n\tbuf := make([]byte, 1024*8)\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\ttsm1.Values(a).Encode(buf)\n\t}\n}\n\nfunc BenchmarkValues_EncodeFloat(b *testing.B) {\n\tvalueCount := 1024\n\ttimes := getTimes(valueCount, 60, time.Second)\n\ta := make([]tsm1.Value, len(times))\n\n\tfor i, t := range times {\n\t\ta[i] = tsm1.NewValue(t, float64(i))\n\t}\n\n\tbuf := make([]byte, 1024*8)\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\ttsm1.Values(a).Encode(buf)\n\t}\n}\nfunc BenchmarkValues_EncodeString(b *testing.B) {\n\tvalueCount := 1024\n\ttimes := getTimes(valueCount, 60, time.Second)\n\ta := make([]tsm1.Value, len(times))\n\n\tfor i, t := range times {\n\t\ta[i] = tsm1.NewValue(t, fmt.Sprintf(\"%d\", i))\n\t}\n\n\tbuf := make([]byte, 1024*8)\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\ttsm1.Values(a).Encode(buf)\n\t}\n}\nfunc BenchmarkValues_EncodeBool(b *testing.B) {\n\tvalueCount := 1024\n\ttimes := getTimes(valueCount, 60, time.Second)\n\ta := make([]tsm1.Value, len(times))\n\n\tfor i, t := range times {\n\t\tif i%2 == 0 {\n\t\t\ta[i] = tsm1.NewValue(t, true)\n\t\t} else {\n\t\t\ta[i] = tsm1.NewValue(t, false)\n\t\t}\n\t}\n\n\tbuf := make([]byte, 1024*8)\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\ttsm1.Values(a).Encode(buf)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine.go",
    "content": "// Package tsm1 provides a TSDB in the Time Structured Merge tree format.\npackage tsm1 // import \"github.com/influxdata/influxdb/tsdb/engine/tsm1\"\n\nimport (\n\t\"archive/tar\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"math\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/tsdb/index/inmem\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/pkg/bytesutil\"\n\t\"github.com/influxdata/influxdb/pkg/estimator\"\n\t\"github.com/influxdata/influxdb/pkg/limiter\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n\t_ \"github.com/influxdata/influxdb/tsdb/index\"\n\t\"github.com/uber-go/zap\"\n)\n\n//go:generate tmpl -data=@iterator.gen.go.tmpldata iterator.gen.go.tmpl\n//go:generate tmpl -data=@file_store.gen.go.tmpldata file_store.gen.go.tmpl\n//go:generate tmpl -data=@encoding.gen.go.tmpldata encoding.gen.go.tmpl\n//go:generate tmpl -data=@compact.gen.go.tmpldata compact.gen.go.tmpl\n\nfunc init() {\n\ttsdb.RegisterEngine(\"tsm1\", NewEngine)\n}\n\nvar (\n\t// Ensure Engine implements the interface.\n\t_ tsdb.Engine = &Engine{}\n\t// Static objects to prevent small allocs.\n\ttimeBytes              = []byte(\"time\")\n\tkeyFieldSeparatorBytes = []byte(keyFieldSeparator)\n)\n\nconst (\n\t// keyFieldSeparator separates the series key from the field name in the composite key\n\t// that identifies a specific field in series\n\tkeyFieldSeparator = \"#!~#\"\n)\n\n// Statistics gathered by the engine.\nconst (\n\tstatCacheCompactions        = \"cacheCompactions\"\n\tstatCacheCompactionsActive  = \"cacheCompactionsActive\"\n\tstatCacheCompactionError    = \"cacheCompactionErr\"\n\tstatCacheCompactionDuration = \"cacheCompactionDuration\"\n\n\tstatTSMLevel1Compactions        = \"tsmLevel1Compactions\"\n\tstatTSMLevel1CompactionsActive  = \"tsmLevel1CompactionsActive\"\n\tstatTSMLevel1CompactionError    = \"tsmLevel1CompactionErr\"\n\tstatTSMLevel1CompactionDuration = \"tsmLevel1CompactionDuration\"\n\n\tstatTSMLevel2Compactions        = \"tsmLevel2Compactions\"\n\tstatTSMLevel2CompactionsActive  = \"tsmLevel2CompactionsActive\"\n\tstatTSMLevel2CompactionError    = \"tsmLevel2CompactionErr\"\n\tstatTSMLevel2CompactionDuration = \"tsmLevel2CompactionDuration\"\n\n\tstatTSMLevel3Compactions        = \"tsmLevel3Compactions\"\n\tstatTSMLevel3CompactionsActive  = \"tsmLevel3CompactionsActive\"\n\tstatTSMLevel3CompactionError    = \"tsmLevel3CompactionErr\"\n\tstatTSMLevel3CompactionDuration = \"tsmLevel3CompactionDuration\"\n\n\tstatTSMOptimizeCompactions        = \"tsmOptimizeCompactions\"\n\tstatTSMOptimizeCompactionsActive  = \"tsmOptimizeCompactionsActive\"\n\tstatTSMOptimizeCompactionError    = \"tsmOptimizeCompactionErr\"\n\tstatTSMOptimizeCompactionDuration = \"tsmOptimizeCompactionDuration\"\n\n\tstatTSMFullCompactions        = \"tsmFullCompactions\"\n\tstatTSMFullCompactionsActive  = \"tsmFullCompactionsActive\"\n\tstatTSMFullCompactionError    = \"tsmFullCompactionErr\"\n\tstatTSMFullCompactionDuration = \"tsmFullCompactionDuration\"\n)\n\n// Engine represents a storage engine with compressed blocks.\ntype Engine struct {\n\tmu sync.RWMutex\n\n\t// The following group of fields is used to track the state of level compactions within the\n\t// Engine. The WaitGroup is used to monitor the compaction goroutines, the 'done' channel is\n\t// used to signal those goroutines to shutdown. Every request to disable level compactions will\n\t// call 'Wait' on 'wg', with the first goroutine to arrive (levelWorkers == 0 while holding the\n\t// lock) will close the done channel and re-assign 'nil' to the variable. Re-enabling will\n\t// decrease 'levelWorkers', and when it decreases to zero, level compactions will be started\n\t// back up again.\n\n\twg           sync.WaitGroup // waitgroup for active level compaction goroutines\n\tdone         chan struct{}  // channel to signal level compactions to stop\n\tlevelWorkers int            // Number of \"workers\" that expect compactions to be in a disabled state\n\n\tsnapDone chan struct{}  // channel to signal snapshot compactions to stop\n\tsnapWG   sync.WaitGroup // waitgroup for running snapshot compactions\n\n\tid           uint64\n\tdatabase     string\n\tpath         string\n\tlogger       zap.Logger // Logger to be used for important messages\n\ttraceLogger  zap.Logger // Logger to be used when trace-logging is on.\n\ttraceLogging bool\n\n\tindex    tsdb.Index\n\tfieldset *tsdb.MeasurementFieldSet\n\n\tWAL            *WAL\n\tCache          *Cache\n\tCompactor      *Compactor\n\tCompactionPlan CompactionPlanner\n\tFileStore      *FileStore\n\n\tMaxPointsPerBlock int\n\n\t// CacheFlushMemorySizeThreshold specifies the minimum size threshodl for\n\t// the cache when the engine should write a snapshot to a TSM file\n\tCacheFlushMemorySizeThreshold uint64\n\n\t// CacheFlushWriteColdDuration specifies the length of time after which if\n\t// no writes have been committed to the WAL, the engine will write\n\t// a snapshot of the cache to a TSM file\n\tCacheFlushWriteColdDuration time.Duration\n\n\t// Controls whether to enabled compactions when the engine is open\n\tenableCompactionsOnOpen bool\n\n\tstats *EngineStatistics\n\n\t// The limiter for concurrent compactions\n\tcompactionLimiter limiter.Fixed\n}\n\n// NewEngine returns a new instance of Engine.\nfunc NewEngine(id uint64, idx tsdb.Index, database, path string, walPath string, opt tsdb.EngineOptions) tsdb.Engine {\n\tw := NewWAL(walPath)\n\tw.syncDelay = time.Duration(opt.Config.WALFsyncDelay)\n\n\tfs := NewFileStore(path)\n\tcache := NewCache(uint64(opt.Config.CacheMaxMemorySize), path)\n\n\tc := &Compactor{\n\t\tDir:       path,\n\t\tFileStore: fs,\n\t}\n\n\tlogger := zap.New(zap.NullEncoder())\n\te := &Engine{\n\t\tid:           id,\n\t\tdatabase:     database,\n\t\tpath:         path,\n\t\tindex:        idx,\n\t\tlogger:       logger,\n\t\ttraceLogger:  logger,\n\t\ttraceLogging: opt.Config.TraceLoggingEnabled,\n\n\t\tfieldset: tsdb.NewMeasurementFieldSet(),\n\n\t\tWAL:   w,\n\t\tCache: cache,\n\n\t\tFileStore:      fs,\n\t\tCompactor:      c,\n\t\tCompactionPlan: NewDefaultPlanner(fs, time.Duration(opt.Config.CompactFullWriteColdDuration)),\n\n\t\tCacheFlushMemorySizeThreshold: opt.Config.CacheSnapshotMemorySize,\n\t\tCacheFlushWriteColdDuration:   time.Duration(opt.Config.CacheSnapshotWriteColdDuration),\n\t\tenableCompactionsOnOpen:       true,\n\t\tstats:             &EngineStatistics{},\n\t\tcompactionLimiter: opt.CompactionLimiter,\n\t}\n\n\t// Attach fieldset to index.\n\te.index.SetFieldSet(e.fieldset)\n\n\tif e.traceLogging {\n\t\tfs.enableTraceLogging(true)\n\t\tw.enableTraceLogging(true)\n\t}\n\n\treturn e\n}\n\n// SetEnabled sets whether the engine is enabled.\nfunc (e *Engine) SetEnabled(enabled bool) {\n\te.enableCompactionsOnOpen = enabled\n\te.SetCompactionsEnabled(enabled)\n}\n\n// SetCompactionsEnabled enables compactions on the engine.  When disabled\n// all running compactions are aborted and new compactions stop running.\nfunc (e *Engine) SetCompactionsEnabled(enabled bool) {\n\tif enabled {\n\t\te.enableSnapshotCompactions()\n\t\te.enableLevelCompactions(false)\n\t} else {\n\t\te.disableSnapshotCompactions()\n\t\te.disableLevelCompactions(false)\n\t}\n}\n\n// enableLevelCompactions will request that level compactions start back up again\n//\n// 'wait' signifies that a corresponding call to disableLevelCompactions(true) was made at some\n// point, and the associated task that required disabled compactions is now complete\nfunc (e *Engine) enableLevelCompactions(wait bool) {\n\t// If we don't need to wait, see if we're already enabled\n\tif !wait {\n\t\te.mu.RLock()\n\t\tif e.done != nil {\n\t\t\te.mu.RUnlock()\n\t\t\treturn\n\t\t}\n\t\te.mu.RUnlock()\n\t}\n\n\te.mu.Lock()\n\tif wait {\n\t\te.levelWorkers -= 1\n\t}\n\tif e.levelWorkers != 0 || e.done != nil {\n\t\t// still waiting on more workers or already enabled\n\t\te.mu.Unlock()\n\t\treturn\n\t}\n\n\t// last one to enable, start things back up\n\te.Compactor.EnableCompactions()\n\tquit := make(chan struct{})\n\te.done = quit\n\n\te.wg.Add(4)\n\te.mu.Unlock()\n\n\tgo func() { defer e.wg.Done(); e.compactTSMFull(quit) }()\n\tgo func() { defer e.wg.Done(); e.compactTSMLevel(true, 1, quit) }()\n\tgo func() { defer e.wg.Done(); e.compactTSMLevel(true, 2, quit) }()\n\tgo func() { defer e.wg.Done(); e.compactTSMLevel(false, 3, quit) }()\n}\n\n// disableLevelCompactions will stop level compactions before returning.\n//\n// If 'wait' is set to true, then a corresponding call to enableLevelCompactions(true) will be\n// required before level compactions will start back up again.\nfunc (e *Engine) disableLevelCompactions(wait bool) {\n\te.mu.Lock()\n\told := e.levelWorkers\n\tif wait {\n\t\te.levelWorkers += 1\n\t}\n\n\tif old == 0 && e.done != nil {\n\t\t// Prevent new compactions from starting\n\t\te.Compactor.DisableCompactions()\n\n\t\t// Stop all background compaction goroutines\n\t\tclose(e.done)\n\t\te.done = nil\n\n\t}\n\n\te.mu.Unlock()\n\te.wg.Wait()\n}\n\nfunc (e *Engine) enableSnapshotCompactions() {\n\t// Check if already enabled under read lock\n\te.mu.RLock()\n\tif e.snapDone != nil {\n\t\te.mu.RUnlock()\n\t\treturn\n\t}\n\te.mu.RUnlock()\n\n\t// Check again under write lock\n\te.mu.Lock()\n\tif e.snapDone != nil {\n\t\te.mu.Unlock()\n\t\treturn\n\t}\n\n\te.Compactor.EnableSnapshots()\n\tquit := make(chan struct{})\n\te.snapDone = quit\n\te.snapWG.Add(1)\n\te.mu.Unlock()\n\n\tgo func() { defer e.snapWG.Done(); e.compactCache(quit) }()\n}\n\nfunc (e *Engine) disableSnapshotCompactions() {\n\te.mu.Lock()\n\n\tif e.snapDone != nil {\n\t\tclose(e.snapDone)\n\t\te.snapDone = nil\n\t\te.Compactor.DisableSnapshots()\n\t}\n\n\te.mu.Unlock()\n\te.snapWG.Wait()\n\n\t// If the cache is empty, free up its resources as well.\n\tif e.Cache.Size() == 0 {\n\t\te.Cache.Free()\n\t}\n}\n\n// Path returns the path the engine was opened with.\nfunc (e *Engine) Path() string { return e.path }\n\nfunc (e *Engine) SetFieldName(measurement []byte, name string) {\n\te.index.SetFieldName(measurement, name)\n}\n\nfunc (e *Engine) MeasurementExists(name []byte) (bool, error) {\n\treturn e.index.MeasurementExists(name)\n}\n\nfunc (e *Engine) MeasurementNamesByExpr(expr influxql.Expr) ([][]byte, error) {\n\treturn e.index.MeasurementNamesByExpr(expr)\n}\n\nfunc (e *Engine) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) {\n\treturn e.index.MeasurementNamesByRegex(re)\n}\n\n// MeasurementFields returns the measurement fields for a measurement.\nfunc (e *Engine) MeasurementFields(measurement []byte) *tsdb.MeasurementFields {\n\treturn e.fieldset.CreateFieldsIfNotExists(measurement)\n}\n\nfunc (e *Engine) ForEachMeasurementSeriesByExpr(name []byte, condition influxql.Expr, fn func(tags models.Tags) error) error {\n\treturn e.index.ForEachMeasurementSeriesByExpr(name, condition, fn)\n}\n\nfunc (e *Engine) HasTagKey(name, key []byte) (bool, error) {\n\treturn e.index.HasTagKey(name, key)\n}\n\nfunc (e *Engine) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) {\n\treturn e.index.MeasurementTagKeysByExpr(name, expr)\n}\n\n// MeasurementTagKeyValuesByExpr returns a set of tag values filtered by an expression.\n//\n// MeasurementTagKeyValuesByExpr relies on the provided tag keys being sorted.\n// The caller can indicate the tag keys have been sorted by setting the\n// keysSorted argument appropriately. Tag values are returned in a slice that\n// is indexible according to the sorted order of the tag keys, e.g., the values\n// for the earliest tag k will be available in index 0 of the returned values\n// slice.\n//\nfunc (e *Engine) MeasurementTagKeyValuesByExpr(name []byte, keys []string, expr influxql.Expr, keysSorted bool) ([][]string, error) {\n\treturn e.index.MeasurementTagKeyValuesByExpr(name, keys, expr, keysSorted)\n}\n\nfunc (e *Engine) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error {\n\treturn e.index.ForEachMeasurementTagKey(name, fn)\n}\n\nfunc (e *Engine) TagKeyCardinality(name, key []byte) int {\n\treturn e.index.TagKeyCardinality(name, key)\n}\n\n// SeriesN returns the unique number of series in the index.\nfunc (e *Engine) SeriesN() int64 {\n\treturn e.index.SeriesN()\n}\n\nfunc (e *Engine) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) {\n\treturn e.index.SeriesSketches()\n}\n\nfunc (e *Engine) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) {\n\treturn e.index.MeasurementsSketches()\n}\n\n// LastModified returns the time when this shard was last modified.\nfunc (e *Engine) LastModified() time.Time {\n\twalTime := e.WAL.LastWriteTime()\n\tfsTime := e.FileStore.LastModified()\n\n\tif walTime.After(fsTime) {\n\t\treturn walTime\n\t}\n\n\treturn fsTime\n}\n\n// EngineStatistics maintains statistics for the engine.\ntype EngineStatistics struct {\n\tCacheCompactions        int64 // Counter of cache compactions that have ever run.\n\tCacheCompactionsActive  int64 // Gauge of cache compactions currently running.\n\tCacheCompactionErrors   int64 // Counter of cache compactions that have failed due to error.\n\tCacheCompactionDuration int64 // Counter of number of wall nanoseconds spent in cache compactions.\n\n\tTSMCompactions        [3]int64 // Counter of TSM compactions (by level) that have ever run.\n\tTSMCompactionsActive  [3]int64 // Gauge of TSM compactions (by level) currently running.\n\tTSMCompactionErrors   [3]int64 // Counter of TSM compcations (by level) that have failed due to error.\n\tTSMCompactionDuration [3]int64 // Counter of number of wall nanoseconds spent in TSM compactions (by level).\n\n\tTSMOptimizeCompactions        int64 // Counter of optimize compactions that have ever run.\n\tTSMOptimizeCompactionsActive  int64 // Gauge of optimize compactions currently running.\n\tTSMOptimizeCompactionErrors   int64 // Counter of optimize compactions that have failed due to error.\n\tTSMOptimizeCompactionDuration int64 // Counter of number of wall nanoseconds spent in optimize compactions.\n\n\tTSMFullCompactions        int64 // Counter of full compactions that have ever run.\n\tTSMFullCompactionsActive  int64 // Gauge of full compactions currently running.\n\tTSMFullCompactionErrors   int64 // Counter of full compactions that have failed due to error.\n\tTSMFullCompactionDuration int64 // Counter of number of wall nanoseconds spent in full compactions.\n}\n\n// Statistics returns statistics for periodic monitoring.\nfunc (e *Engine) Statistics(tags map[string]string) []models.Statistic {\n\tstatistics := make([]models.Statistic, 0, 4)\n\tstatistics = append(statistics, models.Statistic{\n\t\tName: \"tsm1_engine\",\n\t\tTags: tags,\n\t\tValues: map[string]interface{}{\n\t\t\tstatCacheCompactions:        atomic.LoadInt64(&e.stats.CacheCompactions),\n\t\t\tstatCacheCompactionsActive:  atomic.LoadInt64(&e.stats.CacheCompactionsActive),\n\t\t\tstatCacheCompactionError:    atomic.LoadInt64(&e.stats.CacheCompactionErrors),\n\t\t\tstatCacheCompactionDuration: atomic.LoadInt64(&e.stats.CacheCompactionDuration),\n\n\t\t\tstatTSMLevel1Compactions:        atomic.LoadInt64(&e.stats.TSMCompactions[0]),\n\t\t\tstatTSMLevel1CompactionsActive:  atomic.LoadInt64(&e.stats.TSMCompactionsActive[0]),\n\t\t\tstatTSMLevel1CompactionError:    atomic.LoadInt64(&e.stats.TSMCompactionErrors[0]),\n\t\t\tstatTSMLevel1CompactionDuration: atomic.LoadInt64(&e.stats.TSMCompactionDuration[0]),\n\n\t\t\tstatTSMLevel2Compactions:        atomic.LoadInt64(&e.stats.TSMCompactions[1]),\n\t\t\tstatTSMLevel2CompactionsActive:  atomic.LoadInt64(&e.stats.TSMCompactionsActive[1]),\n\t\t\tstatTSMLevel2CompactionError:    atomic.LoadInt64(&e.stats.TSMCompactionErrors[1]),\n\t\t\tstatTSMLevel2CompactionDuration: atomic.LoadInt64(&e.stats.TSMCompactionDuration[1]),\n\n\t\t\tstatTSMLevel3Compactions:        atomic.LoadInt64(&e.stats.TSMCompactions[2]),\n\t\t\tstatTSMLevel3CompactionsActive:  atomic.LoadInt64(&e.stats.TSMCompactionsActive[2]),\n\t\t\tstatTSMLevel3CompactionError:    atomic.LoadInt64(&e.stats.TSMCompactionErrors[2]),\n\t\t\tstatTSMLevel3CompactionDuration: atomic.LoadInt64(&e.stats.TSMCompactionDuration[2]),\n\n\t\t\tstatTSMOptimizeCompactions:        atomic.LoadInt64(&e.stats.TSMOptimizeCompactions),\n\t\t\tstatTSMOptimizeCompactionsActive:  atomic.LoadInt64(&e.stats.TSMOptimizeCompactionsActive),\n\t\t\tstatTSMOptimizeCompactionError:    atomic.LoadInt64(&e.stats.TSMOptimizeCompactionErrors),\n\t\t\tstatTSMOptimizeCompactionDuration: atomic.LoadInt64(&e.stats.TSMOptimizeCompactionDuration),\n\n\t\t\tstatTSMFullCompactions:        atomic.LoadInt64(&e.stats.TSMFullCompactions),\n\t\t\tstatTSMFullCompactionsActive:  atomic.LoadInt64(&e.stats.TSMFullCompactionsActive),\n\t\t\tstatTSMFullCompactionError:    atomic.LoadInt64(&e.stats.TSMFullCompactionErrors),\n\t\t\tstatTSMFullCompactionDuration: atomic.LoadInt64(&e.stats.TSMFullCompactionDuration),\n\t\t},\n\t})\n\n\tstatistics = append(statistics, e.Cache.Statistics(tags)...)\n\tstatistics = append(statistics, e.FileStore.Statistics(tags)...)\n\tstatistics = append(statistics, e.WAL.Statistics(tags)...)\n\treturn statistics\n}\n\n// DiskSize returns the total size in bytes of all TSM and WAL segments on disk.\nfunc (e *Engine) DiskSize() int64 {\n\treturn e.FileStore.DiskSizeBytes() + e.WAL.DiskSizeBytes()\n}\n\n// Open opens and initializes the engine.\nfunc (e *Engine) Open() error {\n\tif err := os.MkdirAll(e.path, 0777); err != nil {\n\t\treturn err\n\t}\n\n\tif err := e.cleanup(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := e.WAL.Open(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := e.FileStore.Open(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := e.reloadCache(); err != nil {\n\t\treturn err\n\t}\n\n\te.Compactor.Open()\n\n\tif e.enableCompactionsOnOpen {\n\t\te.SetCompactionsEnabled(true)\n\t}\n\n\treturn nil\n}\n\n// Close closes the engine. Subsequent calls to Close are a nop.\nfunc (e *Engine) Close() error {\n\te.SetCompactionsEnabled(false)\n\n\t// Lock now and close everything else down.\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\te.done = nil // Ensures that the channel will not be closed again.\n\n\tif err := e.FileStore.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn e.WAL.Close()\n}\n\n// WithLogger sets the logger for the engine.\nfunc (e *Engine) WithLogger(log zap.Logger) {\n\te.logger = log.With(zap.String(\"engine\", \"tsm1\"))\n\n\tif e.traceLogging {\n\t\te.traceLogger = e.logger\n\t}\n\n\te.WAL.WithLogger(e.logger)\n\te.FileStore.WithLogger(e.logger)\n}\n\n// LoadMetadataIndex loads the shard metadata into memory.\nfunc (e *Engine) LoadMetadataIndex(shardID uint64, index tsdb.Index) error {\n\tnow := time.Now()\n\n\t// Save reference to index for iterator creation.\n\te.index = index\n\n\tif err := e.FileStore.WalkKeys(func(key []byte, typ byte) error {\n\t\tfieldType, err := tsmFieldTypeToInfluxQLDataType(typ)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := e.addToIndexFromKey(key, fieldType); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\t// load metadata from the Cache\n\tif err := e.Cache.ApplyEntryFn(func(key string, entry *entry) error {\n\t\tfieldType, err := entry.values.InfluxQLType()\n\t\tif err != nil {\n\t\t\te.logger.Info(fmt.Sprintf(\"error getting the data type of values for key %s: %s\", key, err.Error()))\n\t\t}\n\n\t\tif err := e.addToIndexFromKey([]byte(key), fieldType); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\te.traceLogger.Info(fmt.Sprintf(\"Meta data index for shard %d loaded in %v\", shardID, time.Since(now)))\n\treturn nil\n}\n\n// IsIdle returns true if the cache is empty, there are no running compactions and the\n// shard is fully compacted.\nfunc (e *Engine) IsIdle() bool {\n\tcacheEmpty := e.Cache.Size() == 0\n\n\trunningCompactions := atomic.LoadInt64(&e.stats.CacheCompactionsActive)\n\trunningCompactions += atomic.LoadInt64(&e.stats.TSMCompactionsActive[0])\n\trunningCompactions += atomic.LoadInt64(&e.stats.TSMCompactionsActive[1])\n\trunningCompactions += atomic.LoadInt64(&e.stats.TSMCompactionsActive[2])\n\trunningCompactions += atomic.LoadInt64(&e.stats.TSMFullCompactionsActive)\n\trunningCompactions += atomic.LoadInt64(&e.stats.TSMOptimizeCompactionsActive)\n\n\treturn cacheEmpty && runningCompactions == 0 && e.CompactionPlan.FullyCompacted()\n}\n\n// Backup writes a tar archive of any TSM files modified since the passed\n// in time to the passed in writer. The basePath will be prepended to the names\n// of the files in the archive. It will force a snapshot of the WAL first\n// then perform the backup with a read lock against the file store. This means\n// that new TSM files will not be able to be created in this shard while the\n// backup is running. For shards that are still acively getting writes, this\n// could cause the WAL to backup, increasing memory usage and evenutally rejecting writes.\nfunc (e *Engine) Backup(w io.Writer, basePath string, since time.Time) error {\n\tpath, err := e.CreateSnapshot()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := e.index.SnapshotTo(path); err != nil {\n\t\treturn err\n\t}\n\n\ttw := tar.NewWriter(w)\n\tdefer tw.Close()\n\n\t// Remove the temporary snapshot dir\n\tdefer os.RemoveAll(path)\n\n\t// Recursively read all files from path.\n\tfiles, err := readDir(path, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Filter paths to only changed files.\n\tvar filtered []string\n\tfor _, file := range files {\n\t\tfi, err := os.Stat(filepath.Join(path, file))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t} else if !fi.ModTime().After(since) {\n\t\t\tcontinue\n\t\t}\n\t\tfiltered = append(filtered, file)\n\t}\n\tif len(filtered) == 0 {\n\t\treturn nil\n\t}\n\n\tfor _, f := range filtered {\n\t\tif err := e.writeFileToBackup(f, basePath, filepath.Join(path, f), tw); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// writeFileToBackup copies the file into the tar archive. Files will use the shardRelativePath\n// in their names. This should be the <db>/<retention policy>/<id> part of the path.\nfunc (e *Engine) writeFileToBackup(name string, shardRelativePath, fullPath string, tw *tar.Writer) error {\n\tf, err := os.Stat(fullPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\th := &tar.Header{\n\t\tName:    filepath.ToSlash(filepath.Join(shardRelativePath, name)),\n\t\tModTime: f.ModTime(),\n\t\tSize:    f.Size(),\n\t\tMode:    int64(f.Mode()),\n\t}\n\tif err := tw.WriteHeader(h); err != nil {\n\t\treturn err\n\t}\n\tfr, err := os.Open(fullPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer fr.Close()\n\n\t_, err = io.CopyN(tw, fr, h.Size)\n\n\treturn err\n}\n\n// Restore reads a tar archive generated by Backup().\n// Only files that match basePath will be copied into the directory. This obtains\n// a write lock so no operations can be performed while restoring.\nfunc (e *Engine) Restore(r io.Reader, basePath string) error {\n\treturn e.overlay(r, basePath, false)\n}\n\n// Import reads a tar archive generated by Backup() and adds each\n// file matching basePath as a new TSM file.  This obtains\n// a write lock so no operations can be performed while Importing.\nfunc (e *Engine) Import(r io.Reader, basePath string) error {\n\treturn e.overlay(r, basePath, true)\n}\n\n// overlay reads a tar archive generated by Backup() and adds each file\n// from the archive matching basePath to the shard.\n// If asNew is true, each file will be installed as a new TSM file even if an\n// existing file with the same name in the backup exists.\nfunc (e *Engine) overlay(r io.Reader, basePath string, asNew bool) error {\n\t// Copy files from archive while under lock to prevent reopening.\n\tnewFiles, err := func() ([]string, error) {\n\t\te.mu.Lock()\n\t\tdefer e.mu.Unlock()\n\n\t\tvar newFiles []string\n\t\ttr := tar.NewReader(r)\n\t\tfor {\n\t\t\tif fileName, err := e.readFileFromBackup(tr, basePath, asNew); err == io.EOF {\n\t\t\t\tbreak\n\t\t\t} else if err != nil {\n\t\t\t\treturn nil, err\n\t\t\t} else if fileName != \"\" {\n\t\t\t\tnewFiles = append(newFiles, fileName)\n\t\t\t}\n\t\t}\n\n\t\tif err := syncDir(e.path); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := e.FileStore.Replace(nil, newFiles); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn newFiles, nil\n\t}()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Load any new series keys to the index\n\treaders := make([]chan seriesKey, 0, len(newFiles))\n\tfor _, f := range newFiles {\n\t\tch := make(chan seriesKey, 1)\n\t\treaders = append(readers, ch)\n\n\t\t// If asNew is true, the files created from readFileFromBackup will be new ones\n\t\t// having a temp extension.\n\t\tf = strings.TrimSuffix(f, \".tmp\")\n\n\t\tfd, err := os.Open(f)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tr, err := NewTSMReader(fd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer r.Close()\n\n\t\tgo func(c chan seriesKey, r *TSMReader) {\n\t\t\tn := r.KeyCount()\n\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\tkey, typ := r.KeyAt(i)\n\t\t\t\tc <- seriesKey{key, typ}\n\t\t\t}\n\t\t\tclose(c)\n\t\t}(ch, r)\n\t}\n\n\t// Merge and dedup all the series keys across each reader to reduce\n\t// lock contention on the index.\n\tmerged := merge(readers...)\n\tfor v := range merged {\n\t\tfieldType, err := tsmFieldTypeToInfluxQLDataType(v.typ)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := e.addToIndexFromKey(v.key, fieldType); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// readFileFromBackup copies the next file from the archive into the shard.\n// The file is skipped if it does not have a matching shardRelativePath prefix.\n// If asNew is true, each file will be installed as a new TSM file even if an\n// existing file with the same name in the backup exists.\nfunc (e *Engine) readFileFromBackup(tr *tar.Reader, shardRelativePath string, asNew bool) (string, error) {\n\t// Read next archive file.\n\thdr, err := tr.Next()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tnativeFileName := filepath.FromSlash(hdr.Name)\n\n\t// Skip file if it does not have a matching prefix.\n\tif !filepath.HasPrefix(nativeFileName, shardRelativePath) {\n\t\treturn \"\", nil\n\t}\n\tfilename, err := filepath.Rel(shardRelativePath, nativeFileName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif asNew {\n\t\tfilename = fmt.Sprintf(\"%09d-%09d.%s\", e.FileStore.NextGeneration(), 1, TSMFileExtension)\n\t}\n\n\tdestPath := filepath.Join(e.path, filename)\n\ttmp := destPath + \".tmp\"\n\n\t// Create new file on disk.\n\tf, err := os.OpenFile(tmp, os.O_CREATE|os.O_RDWR, 0666)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer f.Close()\n\n\t// Copy from archive to the file.\n\tif _, err := io.CopyN(f, tr, hdr.Size); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Sync to disk & close.\n\tif err := f.Sync(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn tmp, nil\n}\n\n// addToIndexFromKey will pull the measurement name, series key, and field name from a composite key and add it to the\n// database index and measurement fields\nfunc (e *Engine) addToIndexFromKey(key []byte, fieldType influxql.DataType) error {\n\tseriesKey, field := SeriesAndFieldFromCompositeKey(key)\n\tname := tsdb.MeasurementFromSeriesKey(seriesKey)\n\n\tmf := e.fieldset.CreateFieldsIfNotExists(name)\n\tif err := mf.CreateFieldIfNotExists(field, fieldType, false); err != nil {\n\t\treturn err\n\t}\n\n\t// Build in-memory index, if necessary.\n\tif e.index.Type() == inmem.IndexName {\n\t\ttags, _ := models.ParseTags(seriesKey)\n\t\tif err := e.index.InitializeSeries(seriesKey, name, tags); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// WritePoints writes metadata and point data into the engine.\n// It returns an error if new points are added to an existing key.\nfunc (e *Engine) WritePoints(points []models.Point) error {\n\tvalues := make(map[string][]Value, len(points))\n\tvar keyBuf []byte\n\tvar baseLen int\n\tfor _, p := range points {\n\t\tkeyBuf = append(keyBuf[:0], p.Key()...)\n\t\tkeyBuf = append(keyBuf, keyFieldSeparator...)\n\t\tbaseLen = len(keyBuf)\n\t\titer := p.FieldIterator()\n\t\tt := p.Time().UnixNano()\n\t\tfor iter.Next() {\n\t\t\t// Skip fields name \"time\", they are illegal\n\t\t\tif bytes.Equal(iter.FieldKey(), timeBytes) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tkeyBuf = append(keyBuf[:baseLen], iter.FieldKey()...)\n\t\t\tvar v Value\n\t\t\tswitch iter.Type() {\n\t\t\tcase models.Float:\n\t\t\t\tfv, err := iter.FloatValue()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tv = NewFloatValue(t, fv)\n\t\t\tcase models.Integer:\n\t\t\t\tiv, err := iter.IntegerValue()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tv = NewIntegerValue(t, iv)\n\t\t\tcase models.String:\n\t\t\t\tv = NewStringValue(t, iter.StringValue())\n\t\t\tcase models.Boolean:\n\t\t\t\tbv, err := iter.BooleanValue()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tv = NewBooleanValue(t, bv)\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"unknown field type for %s: %s\", string(iter.FieldKey()), p.String())\n\t\t\t}\n\t\t\tvalues[string(keyBuf)] = append(values[string(keyBuf)], v)\n\t\t}\n\t}\n\n\te.mu.RLock()\n\tdefer e.mu.RUnlock()\n\n\t// first try to write to the cache\n\terr := e.Cache.WriteMulti(values)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = e.WAL.WriteMulti(values)\n\treturn err\n}\n\n// containsSeries returns a map of keys indicating whether the key exists and\n// has values or not.\nfunc (e *Engine) containsSeries(keys [][]byte) (map[string]bool, error) {\n\t// keyMap is used to see if a given key exists.  keys\n\t// are the measurement + tagset (minus separate & field)\n\tkeyMap := map[string]bool{}\n\tfor _, k := range keys {\n\t\tkeyMap[string(k)] = false\n\t}\n\n\tfor _, k := range e.Cache.unsortedKeys() {\n\t\tseriesKey, _ := SeriesAndFieldFromCompositeKey([]byte(k))\n\t\tkeyMap[string(seriesKey)] = true\n\t}\n\n\tif err := e.FileStore.WalkKeys(func(k []byte, _ byte) error {\n\t\tseriesKey, _ := SeriesAndFieldFromCompositeKey(k)\n\t\tif _, ok := keyMap[string(seriesKey)]; ok {\n\t\t\tkeyMap[string(seriesKey)] = true\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn keyMap, nil\n}\n\n// deleteSeries removes all series keys from the engine.\nfunc (e *Engine) deleteSeries(seriesKeys [][]byte) error {\n\treturn e.DeleteSeriesRange(seriesKeys, math.MinInt64, math.MaxInt64)\n}\n\n// DeleteSeriesRange removes the values between min and max (inclusive) from all series.\nfunc (e *Engine) DeleteSeriesRange(seriesKeys [][]byte, min, max int64) error {\n\tif len(seriesKeys) == 0 {\n\t\treturn nil\n\t}\n\n\t// Ensure keys are sorted since lower layers require them to be.\n\tif !bytesutil.IsSorted(seriesKeys) {\n\t\tbytesutil.Sort(seriesKeys)\n\t}\n\n\t// Disable and abort running compactions so that tombstones added existing tsm\n\t// files don't get removed.  This would cause deleted measurements/series to\n\t// re-appear once the compaction completed.  We only disable the level compactions\n\t// so that snapshotting does not stop while writing out tombstones.  If it is stopped,\n\t// and writing tombstones takes a long time, writes can get rejected due to the cache\n\t// filling up.\n\te.disableLevelCompactions(true)\n\tdefer e.enableLevelCompactions(true)\n\n\ttempKeys := seriesKeys[:]\n\tdeleteKeys := make([]string, 0, len(seriesKeys))\n\t// go through the keys in the file store\n\tif err := e.FileStore.WalkKeys(func(k []byte, _ byte) error {\n\t\tseriesKey, _ := SeriesAndFieldFromCompositeKey(k)\n\n\t\t// Both tempKeys and keys walked are sorted, skip any passed in keys\n\t\t// that don't exist in our key set.\n\t\tfor len(tempKeys) > 0 && bytes.Compare(tempKeys[0], seriesKey) < 0 {\n\t\t\ttempKeys = tempKeys[1:]\n\t\t}\n\n\t\t// Keys match, add the full series key to delete.\n\t\tif len(tempKeys) > 0 && bytes.Equal(tempKeys[0], seriesKey) {\n\t\t\tdeleteKeys = append(deleteKeys, string(k))\n\t\t}\n\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tif err := e.FileStore.DeleteRange(deleteKeys, min, max); err != nil {\n\t\treturn err\n\t}\n\n\t// find the keys in the cache and remove them\n\twalKeys := deleteKeys[:0]\n\n\t// ApplySerialEntryFn cannot return an error in this invocation.\n\t_ = e.Cache.ApplyEntryFn(func(k string, _ *entry) error {\n\t\tseriesKey, _ := SeriesAndFieldFromCompositeKey([]byte(k))\n\n\t\t// Cache does not walk keys in sorted order, so search the sorted\n\t\t// series we need to delete to see if any of the cache keys match.\n\t\ti := bytesutil.SearchBytes(seriesKeys, seriesKey)\n\t\tif i < len(seriesKeys) && bytes.Equal(seriesKey, seriesKeys[i]) {\n\t\t\t// k is the measurement + tags + sep + field\n\t\t\twalKeys = append(walKeys, k)\n\t\t}\n\t\treturn nil\n\t})\n\n\te.Cache.DeleteRange(walKeys, min, max)\n\n\t// delete from the WAL\n\tif _, err := e.WAL.DeleteRange(walKeys, min, max); err != nil {\n\t\treturn err\n\t}\n\n\t// Have we deleted all points for the series? If so, we need to remove\n\t// the series from the index.\n\texisting, err := e.containsSeries(seriesKeys)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor k, exists := range existing {\n\t\tif !exists {\n\t\t\tif err := e.index.UnassignShard(k, e.id); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// DeleteMeasurement deletes a measurement and all related series.\nfunc (e *Engine) DeleteMeasurement(name []byte) error {\n\t// Delete the bulk of data outside of the fields lock.\n\tif err := e.deleteMeasurement(name); err != nil {\n\t\treturn err\n\t}\n\n\t// Under lock, delete any series created deletion.\n\tif err := e.fieldset.DeleteWithLock(string(name), func() error {\n\t\treturn e.deleteMeasurement(name)\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// DeleteMeasurement deletes a measurement and all related series.\nfunc (e *Engine) deleteMeasurement(name []byte) error {\n\t// Attempt to find the series keys.\n\tkeys, err := e.index.MeasurementSeriesKeysByExpr(name, nil)\n\tif err != nil {\n\t\treturn err\n\t} else if len(keys) > 0 {\n\t\tif err := e.deleteSeries(keys); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// ForEachMeasurementName iterates over each measurement name in the engine.\nfunc (e *Engine) ForEachMeasurementName(fn func(name []byte) error) error {\n\treturn e.index.ForEachMeasurementName(fn)\n}\n\n// MeasurementSeriesKeysByExpr returns a list of series keys matching expr.\nfunc (e *Engine) MeasurementSeriesKeysByExpr(name []byte, expr influxql.Expr) ([][]byte, error) {\n\treturn e.index.MeasurementSeriesKeysByExpr(name, expr)\n}\n\nfunc (e *Engine) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSlice []models.Tags) error {\n\treturn e.index.CreateSeriesListIfNotExists(keys, names, tagsSlice)\n}\n\nfunc (e *Engine) CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error {\n\treturn e.index.CreateSeriesIfNotExists(key, name, tags)\n}\n\n// WriteTo is not implemented.\nfunc (e *Engine) WriteTo(w io.Writer) (n int64, err error) { panic(\"not implemented\") }\n\n// WriteSnapshot will snapshot the cache and write a new TSM file with its contents, releasing the snapshot when done.\nfunc (e *Engine) WriteSnapshot() error {\n\t// Lock and grab the cache snapshot along with all the closed WAL\n\t// filenames associated with the snapshot\n\n\tvar started *time.Time\n\n\tdefer func() {\n\t\tif started != nil {\n\t\t\te.Cache.UpdateCompactTime(time.Since(*started))\n\t\t\te.logger.Info(fmt.Sprintf(\"Snapshot for path %s written in %v\", e.path, time.Since(*started)))\n\t\t}\n\t}()\n\n\tclosedFiles, snapshot, err := func() ([]string, *Cache, error) {\n\t\te.mu.Lock()\n\t\tdefer e.mu.Unlock()\n\n\t\tnow := time.Now()\n\t\tstarted = &now\n\n\t\tif err := e.WAL.CloseSegment(); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tsegments, err := e.WAL.ClosedSegments()\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tsnapshot, err := e.Cache.Snapshot()\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\treturn segments, snapshot, nil\n\t}()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif snapshot.Size() == 0 {\n\t\te.Cache.ClearSnapshot(true)\n\t\treturn nil\n\t}\n\n\t// The snapshotted cache may have duplicate points and unsorted data.  We need to deduplicate\n\t// it before writing the snapshot.  This can be very expensive so it's done while we are not\n\t// holding the engine write lock.\n\tdedup := time.Now()\n\tsnapshot.Deduplicate()\n\te.traceLogger.Info(fmt.Sprintf(\"Snapshot for path %s deduplicated in %v\", e.path, time.Since(dedup)))\n\n\treturn e.writeSnapshotAndCommit(closedFiles, snapshot)\n}\n\n// CreateSnapshot will create a temp directory that holds\n// temporary hardlinks to the underylyng shard files.\nfunc (e *Engine) CreateSnapshot() (string, error) {\n\tif err := e.WriteSnapshot(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\te.mu.RLock()\n\tdefer e.mu.RUnlock()\n\n\treturn e.FileStore.CreateSnapshot()\n}\n\n// writeSnapshotAndCommit will write the passed cache to a new TSM file and remove the closed WAL segments.\nfunc (e *Engine) writeSnapshotAndCommit(closedFiles []string, snapshot *Cache) (err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\te.Cache.ClearSnapshot(false)\n\t\t}\n\t}()\n\n\t// write the new snapshot files\n\tnewFiles, err := e.Compactor.WriteSnapshot(snapshot)\n\tif err != nil {\n\t\te.logger.Info(fmt.Sprintf(\"error writing snapshot from compactor: %v\", err))\n\t\treturn err\n\t}\n\n\te.mu.RLock()\n\tdefer e.mu.RUnlock()\n\n\t// update the file store with these new files\n\tif err := e.FileStore.Replace(nil, newFiles); err != nil {\n\t\te.logger.Info(fmt.Sprintf(\"error adding new TSM files from snapshot: %v\", err))\n\t\treturn err\n\t}\n\n\t// clear the snapshot from the in-memory cache, then the old WAL files\n\te.Cache.ClearSnapshot(true)\n\n\tif err := e.WAL.Remove(closedFiles); err != nil {\n\t\te.logger.Info(fmt.Sprintf(\"error removing closed wal segments: %v\", err))\n\t}\n\n\treturn nil\n}\n\n// compactCache continually checks if the WAL cache should be written to disk.\nfunc (e *Engine) compactCache(quit <-chan struct{}) {\n\tt := time.NewTicker(time.Second)\n\tdefer t.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-quit:\n\t\t\treturn\n\n\t\tcase <-t.C:\n\t\t\te.Cache.UpdateAge()\n\t\t\tif e.ShouldCompactCache(e.WAL.LastWriteTime()) {\n\t\t\t\tstart := time.Now()\n\t\t\t\te.traceLogger.Info(fmt.Sprintf(\"Compacting cache for %s\", e.path))\n\t\t\t\terr := e.WriteSnapshot()\n\t\t\t\tif err != nil && err != errCompactionsDisabled {\n\t\t\t\t\te.logger.Info(fmt.Sprintf(\"error writing snapshot: %v\", err))\n\t\t\t\t\tatomic.AddInt64(&e.stats.CacheCompactionErrors, 1)\n\t\t\t\t} else {\n\t\t\t\t\tatomic.AddInt64(&e.stats.CacheCompactions, 1)\n\t\t\t\t}\n\t\t\t\tatomic.AddInt64(&e.stats.CacheCompactionDuration, time.Since(start).Nanoseconds())\n\t\t\t}\n\t\t}\n\t}\n}\n\n// ShouldCompactCache returns true if the Cache is over its flush threshold\n// or if the passed in lastWriteTime is older than the write cold threshold.\nfunc (e *Engine) ShouldCompactCache(lastWriteTime time.Time) bool {\n\tsz := e.Cache.Size()\n\n\tif sz == 0 {\n\t\treturn false\n\t}\n\n\treturn sz > e.CacheFlushMemorySizeThreshold ||\n\t\ttime.Since(lastWriteTime) > e.CacheFlushWriteColdDuration\n}\n\nfunc (e *Engine) compactTSMLevel(fast bool, level int, quit <-chan struct{}) {\n\tt := time.NewTicker(time.Second)\n\tdefer t.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-quit:\n\t\t\treturn\n\n\t\tcase <-t.C:\n\t\t\ts := e.levelCompactionStrategy(fast, level)\n\t\t\tif s != nil {\n\t\t\t\ts.Apply()\n\t\t\t\t// Release the files in the compaction plan\n\t\t\t\te.CompactionPlan.Release(s.compactionGroups)\n\t\t\t}\n\n\t\t}\n\t}\n}\n\nfunc (e *Engine) compactTSMFull(quit <-chan struct{}) {\n\tt := time.NewTicker(time.Second)\n\tdefer t.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-quit:\n\t\t\treturn\n\n\t\tcase <-t.C:\n\t\t\ts := e.fullCompactionStrategy()\n\t\t\tif s != nil {\n\t\t\t\ts.Apply()\n\t\t\t\t// Release the files in the compaction plan\n\t\t\t\te.CompactionPlan.Release(s.compactionGroups)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// onFileStoreReplace is callback handler invoked when the FileStore\n// has replaced one set of TSM files with a new set.\nfunc (e *Engine) onFileStoreReplace(newFiles []TSMFile) {\n\t// Load any new series keys to the index\n\treaders := make([]chan seriesKey, 0, len(newFiles))\n\tfor _, r := range newFiles {\n\t\tch := make(chan seriesKey, 1)\n\t\treaders = append(readers, ch)\n\n\t\tgo func(c chan seriesKey, r TSMFile) {\n\t\t\tn := r.KeyCount()\n\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\tkey, typ := r.KeyAt(i)\n\t\t\t\tc <- seriesKey{key, typ}\n\t\t\t}\n\t\t\tclose(c)\n\t\t}(ch, r)\n\t}\n\n\t// Merge and dedup all the series keys across each reader to reduce\n\t// lock contention on the index.\n\tmerged := merge(readers...)\n\tfor v := range merged {\n\t\tfieldType, err := tsmFieldTypeToInfluxQLDataType(v.typ)\n\t\tif err != nil {\n\t\t\te.logger.Error(fmt.Sprintf(\"refresh index (1): %v\", err))\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := e.addToIndexFromKey(v.key, fieldType); err != nil {\n\t\t\te.logger.Error(fmt.Sprintf(\"refresh index (2): %v\", err))\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t// load metadata from the Cache\n\te.Cache.ApplyEntryFn(func(key string, entry *entry) error {\n\t\tfieldType, err := entry.InfluxQLType()\n\t\tif err != nil {\n\t\t\te.logger.Error(fmt.Sprintf(\"refresh index (3): %v\", err))\n\t\t\treturn nil\n\t\t}\n\n\t\tif err := e.addToIndexFromKey([]byte(key), fieldType); err != nil {\n\t\t\te.logger.Error(fmt.Sprintf(\"refresh index (4): %v\", err))\n\t\t\treturn nil\n\t\t}\n\t\treturn nil\n\t})\n}\n\n// compactionStrategy holds the details of what to do in a compaction.\ntype compactionStrategy struct {\n\tcompactionGroups []CompactionGroup\n\n\t// concurrency determines how many compactions groups will be started\n\t// concurrently.  These groups may be limited by the global limiter if\n\t// enabled.\n\tconcurrency int\n\tfast        bool\n\tdescription string\n\n\tdurationStat *int64\n\tactiveStat   *int64\n\tsuccessStat  *int64\n\terrorStat    *int64\n\n\tlogger    zap.Logger\n\tcompactor *Compactor\n\tfileStore *FileStore\n\tlimiter   limiter.Fixed\n\tengine    *Engine\n}\n\n// Apply concurrently compacts all the groups in a compaction strategy.\nfunc (s *compactionStrategy) Apply() {\n\tstart := time.Now()\n\n\t// cap concurrent compaction groups to no more than 4 at a time.\n\tconcurrency := s.concurrency\n\tif concurrency == 0 {\n\t\tconcurrency = 4\n\t}\n\n\tthrottle := limiter.NewFixed(concurrency)\n\tvar wg sync.WaitGroup\n\tfor i := range s.compactionGroups {\n\t\twg.Add(1)\n\t\tgo func(groupNum int) {\n\t\t\tdefer wg.Done()\n\n\t\t\t// limit concurrent compaction groups\n\t\t\tthrottle.Take()\n\t\t\tdefer throttle.Release()\n\n\t\t\ts.compactGroup(groupNum)\n\t\t}(i)\n\t}\n\twg.Wait()\n\n\tatomic.AddInt64(s.durationStat, time.Since(start).Nanoseconds())\n}\n\n// compactGroup executes the compaction strategy against a single CompactionGroup.\nfunc (s *compactionStrategy) compactGroup(groupNum int) {\n\t// Limit concurrent compactions if we have a limiter\n\tif cap(s.limiter) > 0 {\n\t\ts.limiter.Take()\n\t\tdefer s.limiter.Release()\n\t}\n\n\tgroup := s.compactionGroups[groupNum]\n\tstart := time.Now()\n\ts.logger.Info(fmt.Sprintf(\"beginning %s compaction of group %d, %d TSM files\", s.description, groupNum, len(group)))\n\tfor i, f := range group {\n\t\ts.logger.Info(fmt.Sprintf(\"compacting %s group (%d) %s (#%d)\", s.description, groupNum, f, i))\n\t}\n\n\tfiles, err := func() ([]string, error) {\n\t\t// Count the compaction as active only while the compaction is actually running.\n\t\tatomic.AddInt64(s.activeStat, 1)\n\t\tdefer atomic.AddInt64(s.activeStat, -1)\n\n\t\tif s.fast {\n\t\t\treturn s.compactor.CompactFast(group)\n\t\t} else {\n\t\t\treturn s.compactor.CompactFull(group)\n\t\t}\n\t}()\n\n\tif err != nil {\n\t\t_, inProgress := err.(errCompactionInProgress)\n\t\tif err == errCompactionsDisabled || inProgress {\n\t\t\ts.logger.Info(fmt.Sprintf(\"aborted %s compaction group (%d). %v\", s.description, groupNum, err))\n\n\t\t\tif _, ok := err.(errCompactionInProgress); ok {\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\ts.logger.Info(fmt.Sprintf(\"error compacting TSM files: %v\", err))\n\t\tatomic.AddInt64(s.errorStat, 1)\n\t\ttime.Sleep(time.Second)\n\t\treturn\n\t}\n\n\tif err := s.fileStore.ReplaceWithCallback(group, files, s.engine.onFileStoreReplace); err != nil {\n\t\ts.logger.Info(fmt.Sprintf(\"error replacing new TSM files: %v\", err))\n\t\tatomic.AddInt64(s.errorStat, 1)\n\t\ttime.Sleep(time.Second)\n\t\treturn\n\t}\n\n\tfor i, f := range files {\n\t\ts.logger.Info(fmt.Sprintf(\"compacted %s group (%d) into %s (#%d)\", s.description, groupNum, f, i))\n\t}\n\ts.logger.Info(fmt.Sprintf(\"compacted %s %d files into %d files in %s\", s.description, len(group), len(files), time.Since(start)))\n\tatomic.AddInt64(s.successStat, 1)\n}\n\n// levelCompactionStrategy returns a compactionStrategy for the given level.\n// It returns nil if there are no TSM files to compact.\nfunc (e *Engine) levelCompactionStrategy(fast bool, level int) *compactionStrategy {\n\tcompactionGroups := e.CompactionPlan.PlanLevel(level)\n\n\tif len(compactionGroups) == 0 {\n\t\treturn nil\n\t}\n\n\treturn &compactionStrategy{\n\t\tconcurrency:      4,\n\t\tcompactionGroups: compactionGroups,\n\t\tlogger:           e.logger,\n\t\tfileStore:        e.FileStore,\n\t\tcompactor:        e.Compactor,\n\t\tfast:             fast,\n\t\tlimiter:          e.compactionLimiter,\n\t\tengine:           e,\n\n\t\tdescription:  fmt.Sprintf(\"level %d\", level),\n\t\tactiveStat:   &e.stats.TSMCompactionsActive[level-1],\n\t\tsuccessStat:  &e.stats.TSMCompactions[level-1],\n\t\terrorStat:    &e.stats.TSMCompactionErrors[level-1],\n\t\tdurationStat: &e.stats.TSMCompactionDuration[level-1],\n\t}\n}\n\n// fullCompactionStrategy returns a compactionStrategy for higher level generations of TSM files.\n// It returns nil if there are no TSM files to compact.\nfunc (e *Engine) fullCompactionStrategy() *compactionStrategy {\n\toptimize := false\n\tcompactionGroups := e.CompactionPlan.Plan(e.WAL.LastWriteTime())\n\n\tif len(compactionGroups) == 0 {\n\t\toptimize = true\n\t\tcompactionGroups = e.CompactionPlan.PlanOptimize()\n\t}\n\n\tif len(compactionGroups) == 0 {\n\t\treturn nil\n\t}\n\n\ts := &compactionStrategy{\n\t\tconcurrency:      1,\n\t\tcompactionGroups: compactionGroups,\n\t\tlogger:           e.logger,\n\t\tfileStore:        e.FileStore,\n\t\tcompactor:        e.Compactor,\n\t\tfast:             optimize,\n\t\tlimiter:          e.compactionLimiter,\n\t\tengine:           e,\n\t}\n\n\tif optimize {\n\t\ts.description = \"optimize\"\n\t\ts.activeStat = &e.stats.TSMOptimizeCompactionsActive\n\t\ts.successStat = &e.stats.TSMOptimizeCompactions\n\t\ts.errorStat = &e.stats.TSMOptimizeCompactionErrors\n\t\ts.durationStat = &e.stats.TSMOptimizeCompactionDuration\n\t} else {\n\t\ts.description = \"full\"\n\t\ts.activeStat = &e.stats.TSMFullCompactionsActive\n\t\ts.successStat = &e.stats.TSMFullCompactions\n\t\ts.errorStat = &e.stats.TSMFullCompactionErrors\n\t\ts.durationStat = &e.stats.TSMFullCompactionDuration\n\t}\n\n\treturn s\n}\n\n// reloadCache reads the WAL segment files and loads them into the cache.\nfunc (e *Engine) reloadCache() error {\n\tnow := time.Now()\n\tfiles, err := segmentFileNames(e.WAL.Path())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlimit := e.Cache.MaxSize()\n\tdefer func() {\n\t\te.Cache.SetMaxSize(limit)\n\t}()\n\n\t// Disable the max size during loading\n\te.Cache.SetMaxSize(0)\n\n\tloader := NewCacheLoader(files)\n\tloader.WithLogger(e.logger)\n\tif err := loader.Load(e.Cache); err != nil {\n\t\treturn err\n\t}\n\n\te.traceLogger.Info(fmt.Sprintf(\"Reloaded WAL cache %s in %v\", e.WAL.Path(), time.Since(now)))\n\treturn nil\n}\n\n// cleanup removes all temp files and dirs that exist on disk.  This is should only be run at startup to avoid\n// removing tmp files that are still in use.\nfunc (e *Engine) cleanup() error {\n\tallfiles, err := ioutil.ReadDir(e.path)\n\tif os.IsNotExist(err) {\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn err\n\t}\n\n\tfor _, f := range allfiles {\n\t\t// Check to see if there are any `.tmp` directories that were left over from failed shard snapshots\n\t\tif f.IsDir() && strings.HasSuffix(f.Name(), \".tmp\") {\n\t\t\tif err := os.RemoveAll(filepath.Join(e.path, f.Name())); err != nil {\n\t\t\t\treturn fmt.Errorf(\"error removing tmp snapshot directory %q: %s\", f.Name(), err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn e.cleanupTempTSMFiles()\n}\n\nfunc (e *Engine) cleanupTempTSMFiles() error {\n\tfiles, err := filepath.Glob(filepath.Join(e.path, fmt.Sprintf(\"*.%s\", CompactionTempExtension)))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting compaction temp files: %s\", err.Error())\n\t}\n\n\tfor _, f := range files {\n\t\tif err := os.Remove(f); err != nil {\n\t\t\treturn fmt.Errorf(\"error removing temp compaction files: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\n// KeyCursor returns a KeyCursor for the given key starting at time t.\nfunc (e *Engine) KeyCursor(key string, t int64, ascending bool) *KeyCursor {\n\treturn e.FileStore.KeyCursor(key, t, ascending)\n}\n\n// CreateIterator returns an iterator for the measurement based on opt.\nfunc (e *Engine) CreateIterator(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\tif call, ok := opt.Expr.(*influxql.Call); ok {\n\t\tif opt.Interval.IsZero() {\n\t\t\tif call.Name == \"first\" || call.Name == \"last\" {\n\t\t\t\trefOpt := opt\n\t\t\t\trefOpt.Limit = 1\n\t\t\t\trefOpt.Ascending = call.Name == \"first\"\n\t\t\t\trefOpt.Ordered = true\n\t\t\t\trefOpt.Expr = call.Args[0]\n\n\t\t\t\titrs, err := e.createVarRefIterator(measurement, refOpt)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\treturn newMergeFinalizerIterator(itrs, opt, e.logger)\n\t\t\t}\n\t\t}\n\n\t\tinputs, err := e.createCallIterator(measurement, call, opt)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if len(inputs) == 0 {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn newMergeFinalizerIterator(inputs, opt, e.logger)\n\t}\n\n\titrs, err := e.createVarRefIterator(measurement, opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newMergeFinalizerIterator(itrs, opt, e.logger)\n}\n\nfunc (e *Engine) createCallIterator(measurement string, call *influxql.Call, opt influxql.IteratorOptions) ([]influxql.Iterator, error) {\n\tref, _ := call.Args[0].(*influxql.VarRef)\n\n\tif exists, err := e.index.MeasurementExists([]byte(measurement)); err != nil {\n\t\treturn nil, err\n\t} else if !exists {\n\t\treturn nil, nil\n\t}\n\n\t// Determine tagsets for this measurement based on dimensions and filters.\n\ttagSets, err := e.index.TagSets([]byte(measurement), opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Reverse the tag sets if we are ordering by descending.\n\tif !opt.Ascending {\n\t\tfor _, t := range tagSets {\n\t\t\tt.Reverse()\n\t\t}\n\t}\n\n\t// Calculate tag sets and apply SLIMIT/SOFFSET.\n\ttagSets = influxql.LimitTagSets(tagSets, opt.SLimit, opt.SOffset)\n\n\titrs := make([]influxql.Iterator, 0, len(tagSets))\n\tif err := func() error {\n\t\tfor _, t := range tagSets {\n\t\t\t// Abort if the query was killed\n\t\t\tselect {\n\t\t\tcase <-opt.InterruptCh:\n\t\t\t\tinfluxql.Iterators(itrs).Close()\n\t\t\t\treturn err\n\t\t\tdefault:\n\t\t\t}\n\n\t\t\tinputs, err := e.createTagSetIterators(ref, measurement, t, opt)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t} else if len(inputs) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Wrap each series in a call iterator.\n\t\t\tfor i, input := range inputs {\n\t\t\t\tif opt.InterruptCh != nil {\n\t\t\t\t\tinput = influxql.NewInterruptIterator(input, opt.InterruptCh)\n\t\t\t\t}\n\n\t\t\t\titr, err := influxql.NewCallIterator(input, opt)\n\t\t\t\tif err != nil {\n\t\t\t\t\tinfluxql.Iterators(inputs).Close()\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tinputs[i] = itr\n\t\t\t}\n\n\t\t\titr := influxql.NewParallelMergeIterator(inputs, opt, runtime.GOMAXPROCS(0))\n\t\t\titrs = append(itrs, itr)\n\t\t}\n\t\treturn nil\n\t}(); err != nil {\n\t\tinfluxql.Iterators(itrs).Close()\n\t\treturn nil, err\n\t}\n\n\treturn itrs, nil\n}\n\n// createVarRefIterator creates an iterator for a variable reference.\nfunc (e *Engine) createVarRefIterator(measurement string, opt influxql.IteratorOptions) ([]influxql.Iterator, error) {\n\tref, _ := opt.Expr.(*influxql.VarRef)\n\n\tif exists, err := e.index.MeasurementExists([]byte(measurement)); err != nil {\n\t\treturn nil, err\n\t} else if !exists {\n\t\treturn nil, nil\n\t}\n\n\t// Determine tagsets for this measurement based on dimensions and filters.\n\ttagSets, err := e.index.TagSets([]byte(measurement), opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Reverse the tag sets if we are ordering by descending.\n\tif !opt.Ascending {\n\t\tfor _, t := range tagSets {\n\t\t\tt.Reverse()\n\t\t}\n\t}\n\n\t// Calculate tag sets and apply SLIMIT/SOFFSET.\n\ttagSets = influxql.LimitTagSets(tagSets, opt.SLimit, opt.SOffset)\n\n\titrs := make([]influxql.Iterator, 0, len(tagSets))\n\tif err := func() error {\n\t\tfor _, t := range tagSets {\n\t\t\tinputs, err := e.createTagSetIterators(ref, measurement, t, opt)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t} else if len(inputs) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// If we have a LIMIT or OFFSET and the grouping of the outer query\n\t\t\t// is different than the current grouping, we need to perform the\n\t\t\t// limit on each of the individual series keys instead to improve\n\t\t\t// performance.\n\t\t\tif (opt.Limit > 0 || opt.Offset > 0) && len(opt.Dimensions) != len(opt.GroupBy) {\n\t\t\t\tfor i, input := range inputs {\n\t\t\t\t\tinputs[i] = newLimitIterator(input, opt)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\titr, err := influxql.Iterators(inputs).Merge(opt)\n\t\t\tif err != nil {\n\t\t\t\tinfluxql.Iterators(inputs).Close()\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Apply a limit on the merged iterator.\n\t\t\tif opt.Limit > 0 || opt.Offset > 0 {\n\t\t\t\tif len(opt.Dimensions) == len(opt.GroupBy) {\n\t\t\t\t\t// When the final dimensions and the current grouping are\n\t\t\t\t\t// the same, we will only produce one series so we can use\n\t\t\t\t\t// the faster limit iterator.\n\t\t\t\t\titr = newLimitIterator(itr, opt)\n\t\t\t\t} else {\n\t\t\t\t\t// When the dimensions are different than the current\n\t\t\t\t\t// grouping, we need to account for the possibility there\n\t\t\t\t\t// will be multiple series. The limit iterator in the\n\t\t\t\t\t// influxql package handles that scenario.\n\t\t\t\t\titr = influxql.NewLimitIterator(itr, opt)\n\t\t\t\t}\n\t\t\t}\n\t\t\titrs = append(itrs, itr)\n\t\t}\n\t\treturn nil\n\t}(); err != nil {\n\t\tinfluxql.Iterators(itrs).Close()\n\t\treturn nil, err\n\t}\n\n\treturn itrs, nil\n}\n\n// createTagSetIterators creates a set of iterators for a tagset.\nfunc (e *Engine) createTagSetIterators(ref *influxql.VarRef, name string, t *influxql.TagSet, opt influxql.IteratorOptions) ([]influxql.Iterator, error) {\n\t// Set parallelism by number of logical cpus.\n\tparallelism := runtime.GOMAXPROCS(0)\n\tif parallelism > len(t.SeriesKeys) {\n\t\tparallelism = len(t.SeriesKeys)\n\t}\n\n\t// Create series key groupings w/ return error.\n\tgroups := make([]struct {\n\t\tkeys    []string\n\t\tfilters []influxql.Expr\n\t\titrs    []influxql.Iterator\n\t\terr     error\n\t}, parallelism)\n\n\t// Group series keys.\n\tn := len(t.SeriesKeys) / parallelism\n\tfor i := 0; i < parallelism; i++ {\n\t\tgroup := &groups[i]\n\n\t\tif i < parallelism-1 {\n\t\t\tgroup.keys = t.SeriesKeys[i*n : (i+1)*n]\n\t\t\tgroup.filters = t.Filters[i*n : (i+1)*n]\n\t\t} else {\n\t\t\tgroup.keys = t.SeriesKeys[i*n:]\n\t\t\tgroup.filters = t.Filters[i*n:]\n\t\t}\n\t}\n\n\t// Read series groups in parallel.\n\tvar wg sync.WaitGroup\n\tfor i := range groups {\n\t\twg.Add(1)\n\t\tgo func(i int) {\n\t\t\tdefer wg.Done()\n\t\t\tgroups[i].itrs, groups[i].err = e.createTagSetGroupIterators(ref, name, groups[i].keys, t, groups[i].filters, opt)\n\t\t}(i)\n\t}\n\twg.Wait()\n\n\t// Determine total number of iterators so we can allocate only once.\n\tvar itrN int\n\tfor _, group := range groups {\n\t\titrN += len(group.itrs)\n\t}\n\n\t// Combine all iterators together and check for errors.\n\tvar err error\n\titrs := make([]influxql.Iterator, 0, itrN)\n\tfor _, group := range groups {\n\t\tif group.err != nil {\n\t\t\terr = group.err\n\t\t}\n\t\titrs = append(itrs, group.itrs...)\n\t}\n\n\t// If an error occurred, make sure we close all created iterators.\n\tif err != nil {\n\t\tinfluxql.Iterators(itrs).Close()\n\t\treturn nil, err\n\t}\n\n\treturn itrs, nil\n}\n\n// createTagSetGroupIterators creates a set of iterators for a subset of a tagset's series.\nfunc (e *Engine) createTagSetGroupIterators(ref *influxql.VarRef, name string, seriesKeys []string, t *influxql.TagSet, filters []influxql.Expr, opt influxql.IteratorOptions) ([]influxql.Iterator, error) {\n\titrs := make([]influxql.Iterator, 0, len(seriesKeys))\n\tfor i, seriesKey := range seriesKeys {\n\t\tvar conditionFields []influxql.VarRef\n\t\tif filters[i] != nil {\n\t\t\t// Retrieve non-time fields from this series filter and filter out tags.\n\t\t\tconditionFields = influxql.ExprNames(filters[i])\n\t\t}\n\n\t\titr, err := e.createVarRefSeriesIterator(ref, name, seriesKey, t, filters[i], conditionFields, opt)\n\t\tif err != nil {\n\t\t\treturn itrs, err\n\t\t} else if itr == nil {\n\t\t\tcontinue\n\t\t}\n\t\titrs = append(itrs, itr)\n\n\t\t// Abort if the query was killed\n\t\tselect {\n\t\tcase <-opt.InterruptCh:\n\t\t\tinfluxql.Iterators(itrs).Close()\n\t\t\treturn nil, err\n\t\tdefault:\n\t\t}\n\n\t\t// Enforce series limit at creation time.\n\t\tif opt.MaxSeriesN > 0 && len(itrs) > opt.MaxSeriesN {\n\t\t\tinfluxql.Iterators(itrs).Close()\n\t\t\treturn nil, fmt.Errorf(\"max-select-series limit exceeded: (%d/%d)\", len(itrs), opt.MaxSeriesN)\n\t\t}\n\n\t}\n\treturn itrs, nil\n}\n\n// createVarRefSeriesIterator creates an iterator for a variable reference for a series.\nfunc (e *Engine) createVarRefSeriesIterator(ref *influxql.VarRef, name string, seriesKey string, t *influxql.TagSet, filter influxql.Expr, conditionFields []influxql.VarRef, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t_, tfs := models.ParseKey([]byte(seriesKey))\n\ttags := influxql.NewTags(tfs.Map())\n\n\t// Create options specific for this series.\n\titrOpt := opt\n\titrOpt.Condition = filter\n\n\t// Build auxilary cursors.\n\t// Tag values should be returned if the field doesn't exist.\n\tvar aux []cursorAt\n\tif len(opt.Aux) > 0 {\n\t\taux = make([]cursorAt, len(opt.Aux))\n\t\tfor i, ref := range opt.Aux {\n\t\t\t// Create cursor from field if a tag wasn't requested.\n\t\t\tif ref.Type != influxql.Tag {\n\t\t\t\tcur := e.buildCursor(name, seriesKey, &ref, opt)\n\t\t\t\tif cur != nil {\n\t\t\t\t\taux[i] = newBufCursor(cur, opt.Ascending)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// If a field was requested, use a nil cursor of the requested type.\n\t\t\t\tswitch ref.Type {\n\t\t\t\tcase influxql.Float, influxql.AnyField:\n\t\t\t\t\taux[i] = &floatNilLiteralCursor{}\n\t\t\t\t\tcontinue\n\t\t\t\tcase influxql.Integer:\n\t\t\t\t\taux[i] = &integerNilLiteralCursor{}\n\t\t\t\t\tcontinue\n\t\t\t\tcase influxql.String:\n\t\t\t\t\taux[i] = &stringNilLiteralCursor{}\n\t\t\t\t\tcontinue\n\t\t\t\tcase influxql.Boolean:\n\t\t\t\t\taux[i] = &booleanNilLiteralCursor{}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// If field doesn't exist, use the tag value.\n\t\t\tif v := tags.Value(ref.Val); v == \"\" {\n\t\t\t\t// However, if the tag value is blank then return a null.\n\t\t\t\taux[i] = &stringNilLiteralCursor{}\n\t\t\t} else {\n\t\t\t\taux[i] = &stringLiteralCursor{value: v}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Build conditional field cursors.\n\t// If a conditional field doesn't exist then ignore the series.\n\tvar conds []cursorAt\n\tif len(conditionFields) > 0 {\n\t\tconds = make([]cursorAt, len(conditionFields))\n\t\tfor i, ref := range conditionFields {\n\t\t\t// Create cursor from field if a tag wasn't requested.\n\t\t\tif ref.Type != influxql.Tag {\n\t\t\t\tcur := e.buildCursor(name, seriesKey, &ref, opt)\n\t\t\t\tif cur != nil {\n\t\t\t\t\tconds[i] = newBufCursor(cur, opt.Ascending)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// If a field was requested, use a nil cursor of the requested type.\n\t\t\t\tswitch ref.Type {\n\t\t\t\tcase influxql.Float, influxql.AnyField:\n\t\t\t\t\tconds[i] = &floatNilLiteralCursor{}\n\t\t\t\t\tcontinue\n\t\t\t\tcase influxql.Integer:\n\t\t\t\t\tconds[i] = &integerNilLiteralCursor{}\n\t\t\t\t\tcontinue\n\t\t\t\tcase influxql.String:\n\t\t\t\t\tconds[i] = &stringNilLiteralCursor{}\n\t\t\t\t\tcontinue\n\t\t\t\tcase influxql.Boolean:\n\t\t\t\t\tconds[i] = &booleanNilLiteralCursor{}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// If field doesn't exist, use the tag value.\n\t\t\tif v := tags.Value(ref.Val); v == \"\" {\n\t\t\t\t// However, if the tag value is blank then return a null.\n\t\t\t\tconds[i] = &stringNilLiteralCursor{}\n\t\t\t} else {\n\t\t\t\tconds[i] = &stringLiteralCursor{value: v}\n\t\t\t}\n\t\t}\n\t}\n\tcondNames := influxql.VarRefs(conditionFields).Strings()\n\n\t// Limit tags to only the dimensions selected.\n\tdimensions := opt.GetDimensions()\n\ttags = tags.Subset(dimensions)\n\n\t// If it's only auxiliary fields then it doesn't matter what type of iterator we use.\n\tif ref == nil {\n\t\treturn newFloatIterator(name, tags, itrOpt, nil, aux, conds, condNames), nil\n\t}\n\n\t// Build main cursor.\n\tcur := e.buildCursor(name, seriesKey, ref, opt)\n\n\t// If the field doesn't exist then don't build an iterator.\n\tif cur == nil {\n\t\tcursorsAt(aux).close()\n\t\tcursorsAt(conds).close()\n\t\treturn nil, nil\n\t}\n\n\tswitch cur := cur.(type) {\n\tcase floatCursor:\n\t\treturn newFloatIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil\n\tcase integerCursor:\n\t\treturn newIntegerIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil\n\tcase stringCursor:\n\t\treturn newStringIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil\n\tcase booleanCursor:\n\t\treturn newBooleanIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil\n\tdefault:\n\t\tpanic(\"unreachable\")\n\t}\n}\n\n// buildCursor creates an untyped cursor for a field.\nfunc (e *Engine) buildCursor(measurement, seriesKey string, ref *influxql.VarRef, opt influxql.IteratorOptions) cursor {\n\t// Look up fields for measurement.\n\tmf := e.fieldset.Fields(measurement)\n\tif mf == nil {\n\t\treturn nil\n\t}\n\n\t// Find individual field.\n\tf := mf.Field(ref.Val)\n\tif f == nil {\n\t\treturn nil\n\t}\n\n\t// Check if we need to perform a cast. Performing a cast in the\n\t// engine (if it is possible) is much more efficient than an automatic cast.\n\tif ref.Type != influxql.Unknown && ref.Type != influxql.AnyField && ref.Type != f.Type {\n\t\tswitch ref.Type {\n\t\tcase influxql.Float:\n\t\t\tswitch f.Type {\n\t\t\tcase influxql.Integer:\n\t\t\t\tcur := e.buildIntegerCursor(measurement, seriesKey, ref.Val, opt)\n\t\t\t\treturn &floatCastIntegerCursor{cursor: cur}\n\t\t\t}\n\t\tcase influxql.Integer:\n\t\t\tswitch f.Type {\n\t\t\tcase influxql.Float:\n\t\t\t\tcur := e.buildFloatCursor(measurement, seriesKey, ref.Val, opt)\n\t\t\t\treturn &integerCastFloatCursor{cursor: cur}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Return appropriate cursor based on type.\n\tswitch f.Type {\n\tcase influxql.Float:\n\t\treturn e.buildFloatCursor(measurement, seriesKey, ref.Val, opt)\n\tcase influxql.Integer:\n\t\treturn e.buildIntegerCursor(measurement, seriesKey, ref.Val, opt)\n\tcase influxql.String:\n\t\treturn e.buildStringCursor(measurement, seriesKey, ref.Val, opt)\n\tcase influxql.Boolean:\n\t\treturn e.buildBooleanCursor(measurement, seriesKey, ref.Val, opt)\n\tdefault:\n\t\tpanic(\"unreachable\")\n\t}\n}\n\n// buildFloatCursor creates a cursor for a float field.\nfunc (e *Engine) buildFloatCursor(measurement, seriesKey, field string, opt influxql.IteratorOptions) floatCursor {\n\tcacheValues := e.Cache.Values(SeriesFieldKey(seriesKey, field))\n\tkeyCursor := e.KeyCursor(SeriesFieldKey(seriesKey, field), opt.SeekTime(), opt.Ascending)\n\treturn newFloatCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)\n}\n\n// buildIntegerCursor creates a cursor for an integer field.\nfunc (e *Engine) buildIntegerCursor(measurement, seriesKey, field string, opt influxql.IteratorOptions) integerCursor {\n\tcacheValues := e.Cache.Values(SeriesFieldKey(seriesKey, field))\n\tkeyCursor := e.KeyCursor(SeriesFieldKey(seriesKey, field), opt.SeekTime(), opt.Ascending)\n\treturn newIntegerCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)\n}\n\n// buildStringCursor creates a cursor for a string field.\nfunc (e *Engine) buildStringCursor(measurement, seriesKey, field string, opt influxql.IteratorOptions) stringCursor {\n\tcacheValues := e.Cache.Values(SeriesFieldKey(seriesKey, field))\n\tkeyCursor := e.KeyCursor(SeriesFieldKey(seriesKey, field), opt.SeekTime(), opt.Ascending)\n\treturn newStringCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)\n}\n\n// buildBooleanCursor creates a cursor for a boolean field.\nfunc (e *Engine) buildBooleanCursor(measurement, seriesKey, field string, opt influxql.IteratorOptions) booleanCursor {\n\tcacheValues := e.Cache.Values(SeriesFieldKey(seriesKey, field))\n\tkeyCursor := e.KeyCursor(SeriesFieldKey(seriesKey, field), opt.SeekTime(), opt.Ascending)\n\treturn newBooleanCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor)\n}\n\nfunc (e *Engine) SeriesPointIterator(opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\treturn e.index.SeriesPointIterator(opt)\n}\n\n// SeriesFieldKey combine a series key and field name for a unique string to be hashed to a numeric ID.\nfunc SeriesFieldKey(seriesKey, field string) string {\n\treturn seriesKey + keyFieldSeparator + field\n}\n\nfunc tsmFieldTypeToInfluxQLDataType(typ byte) (influxql.DataType, error) {\n\tswitch typ {\n\tcase BlockFloat64:\n\t\treturn influxql.Float, nil\n\tcase BlockInteger:\n\t\treturn influxql.Integer, nil\n\tcase BlockBoolean:\n\t\treturn influxql.Boolean, nil\n\tcase BlockString:\n\t\treturn influxql.String, nil\n\tdefault:\n\t\treturn influxql.Unknown, fmt.Errorf(\"unknown block type: %v\", typ)\n\t}\n}\n\n// SeriesAndFieldFromCompositeKey returns the series key and the field key extracted from the composite key.\nfunc SeriesAndFieldFromCompositeKey(key []byte) ([]byte, []byte) {\n\tsep := bytes.Index(key, keyFieldSeparatorBytes)\n\tif sep == -1 {\n\t\t// No field???\n\t\treturn key, nil\n\t}\n\treturn key[:sep], key[sep+len(keyFieldSeparator):]\n}\n\n// readDir recursively reads all files from a path.\nfunc readDir(root, rel string) ([]string, error) {\n\t// Open root.\n\tf, err := os.Open(filepath.Join(root, rel))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\t// Read all files.\n\tfis, err := f.Readdir(-1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Read all subdirectories and append to the end.\n\tvar paths []string\n\tfor _, fi := range fis {\n\t\t// Simply append if it's a file.\n\t\tif !fi.IsDir() {\n\t\t\tpaths = append(paths, filepath.Join(rel, fi.Name()))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Read and append nested file paths.\n\t\tchildren, err := readDir(root, filepath.Join(rel, fi.Name()))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpaths = append(paths, children...)\n\t}\n\treturn paths, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine_test.go",
    "content": "package tsm1_test\n\nimport (\n\t\"archive/tar\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"math\"\n\t\"math/rand\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"path\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/pkg/deep\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n\t\"github.com/influxdata/influxdb/tsdb/engine/tsm1\"\n\t\"github.com/influxdata/influxdb/tsdb/index/inmem\"\n)\n\n/*\n// Ensure engine can load the metadata index after reopening.\nfunc TestEngine_LoadMetadataIndex(t *testing.T) {\n\te := MustOpenEngine()\n\tdefer e.Close()\n\n\tif err := e.WritePointsString(`cpu,host=A value=1.1 1000000000`); err != nil {\n\t\tt.Fatalf(\"failed to write points: %s\", err.Error())\n\t}\n\n\t// Ensure we can close and load index from the WAL\n\tif err := e.Reopen(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Load metadata index.\n\tindex := MustNewDatabaseIndex(\"db\")\n\tif err := e.LoadMetadataIndex(1, index); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify index is correct.\n\tm, err := index.Measurement([]byte(\"cpu\"))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t} else if m == nil {\n\t\tt.Fatal(\"measurement not found\")\n\t} else if s := m.SeriesByID(1); s.Key != \"cpu,host=A\" || !reflect.DeepEqual(s.Tags(), models.NewTags(map[string]string{\"host\": \"A\"})) {\n\t\tt.Fatalf(\"unexpected series: %q / %#v\", s.Key, s.Tags())\n\t}\n\n\t// write the snapshot, ensure we can close and load index from TSM\n\tif err := e.WriteSnapshot(); err != nil {\n\t\tt.Fatalf(\"error writing snapshot: %s\", err.Error())\n\t}\n\n\t// Ensure we can close and load index from the WAL\n\tif err := e.Reopen(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Load metadata index.\n\tindex = MustNewDatabaseIndex(\"db\")\n\tif err := e.LoadMetadataIndex(1, index); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify index is correct.\n\tif m, err = index.Measurement([]byte(\"cpu\")); err != nil {\n\t\tt.Fatal(err)\n\t} else if m == nil {\n\t\tt.Fatal(\"measurement not found\")\n\t} else if s := m.SeriesByID(1); s.Key != \"cpu,host=A\" || !reflect.DeepEqual(s.Tags(), models.NewTags(map[string]string{\"host\": \"A\"})) {\n\t\tt.Fatalf(\"unexpected series: %q / %#v\", s.Key, s.Tags())\n\t}\n\n\t// Write a new point and ensure we can close and load index from TSM and WAL\n\tif err := e.WritePoints([]models.Point{\n\t\tMustParsePointString(\"cpu,host=B value=1.2 2000000000\"),\n\t}); err != nil {\n\t\tt.Fatalf(\"failed to write points: %s\", err.Error())\n\t}\n\n\t// Ensure we can close and load index from the TSM & WAL\n\tif err := e.Reopen(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Load metadata index.\n\tindex = MustNewDatabaseIndex(\"db\")\n\tif err := e.LoadMetadataIndex(1, index); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify index is correct.\n\tif m, err = index.Measurement([]byte(\"cpu\")); err != nil {\n\t\tt.Fatal(err)\n\t} else if m == nil {\n\t\tt.Fatal(\"measurement not found\")\n\t} else if s := m.SeriesByID(1); s.Key != \"cpu,host=A\" || !reflect.DeepEqual(s.Tags(), models.NewTags(map[string]string{\"host\": \"A\"})) {\n\t\tt.Fatalf(\"unexpected series: %q / %#v\", s.Key, s.Tags())\n\t} else if s := m.SeriesByID(2); s.Key != \"cpu,host=B\" || !reflect.DeepEqual(s.Tags(), models.NewTags(map[string]string{\"host\": \"B\"})) {\n\t\tt.Fatalf(\"unexpected series: %q / %#v\", s.Key, s.Tags())\n\t}\n}\n*/\n\n// Ensure that deletes only sent to the WAL will clear out the data from the cache on restart\nfunc TestEngine_DeleteWALLoadMetadata(t *testing.T) {\n\te := MustOpenEngine()\n\tdefer e.Close()\n\n\tif err := e.WritePointsString(\n\t\t`cpu,host=A value=1.1 1000000000`,\n\t\t`cpu,host=B value=1.2 2000000000`,\n\t); err != nil {\n\t\tt.Fatalf(\"failed to write points: %s\", err.Error())\n\t}\n\n\t// Remove series.\n\tif err := e.DeleteSeriesRange([][]byte{[]byte(\"cpu,host=A\")}, math.MinInt64, math.MaxInt64); err != nil {\n\t\tt.Fatalf(\"failed to delete series: %s\", err.Error())\n\t}\n\n\t// Ensure we can close and load index from the WAL\n\tif err := e.Reopen(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif exp, got := 0, len(e.Cache.Values(tsm1.SeriesFieldKey(\"cpu,host=A\", \"value\"))); exp != got {\n\t\tt.Fatalf(\"unexpected number of values: got: %d. exp: %d\", got, exp)\n\t}\n\n\tif exp, got := 1, len(e.Cache.Values(tsm1.SeriesFieldKey(\"cpu,host=B\", \"value\"))); exp != got {\n\t\tt.Fatalf(\"unexpected number of values: got: %d. exp: %d\", got, exp)\n\t}\n}\n\n// Ensure that the engine will backup any TSM files created since the passed in time\nfunc TestEngine_Backup(t *testing.T) {\n\t// Generate temporary file.\n\tf, _ := ioutil.TempFile(\"\", \"tsm\")\n\tf.Close()\n\tos.Remove(f.Name())\n\twalPath := filepath.Join(f.Name(), \"wal\")\n\tos.MkdirAll(walPath, 0777)\n\tdefer os.RemoveAll(f.Name())\n\n\t// Create a few points.\n\tp1 := MustParsePointString(\"cpu,host=A value=1.1 1000000000\")\n\tp2 := MustParsePointString(\"cpu,host=B value=1.2 2000000000\")\n\tp3 := MustParsePointString(\"cpu,host=C value=1.3 3000000000\")\n\n\t// Write those points to the engine.\n\tdb := path.Base(f.Name())\n\topt := tsdb.NewEngineOptions()\n\topt.InmemIndex = inmem.NewIndex(db)\n\tidx := tsdb.MustOpenIndex(1, db, filepath.Join(f.Name(), \"index\"), opt)\n\tdefer idx.Close()\n\n\te := tsm1.NewEngine(1, idx, db, f.Name(), walPath, opt).(*tsm1.Engine)\n\n\t// mock the planner so compactions don't run during the test\n\te.CompactionPlan = &mockPlanner{}\n\n\tif err := e.Open(); err != nil {\n\t\tt.Fatalf(\"failed to open tsm1 engine: %s\", err.Error())\n\t}\n\n\tif err := e.WritePoints([]models.Point{p1}); err != nil {\n\t\tt.Fatalf(\"failed to write points: %s\", err.Error())\n\t}\n\tif err := e.WriteSnapshot(); err != nil {\n\t\tt.Fatalf(\"failed to snapshot: %s\", err.Error())\n\t}\n\n\tif err := e.WritePoints([]models.Point{p2}); err != nil {\n\t\tt.Fatalf(\"failed to write points: %s\", err.Error())\n\t}\n\n\tb := bytes.NewBuffer(nil)\n\tif err := e.Backup(b, \"\", time.Unix(0, 0)); err != nil {\n\t\tt.Fatalf(\"failed to backup: %s\", err.Error())\n\t}\n\n\ttr := tar.NewReader(b)\n\tif len(e.FileStore.Files()) != 2 {\n\t\tt.Fatalf(\"file count wrong: exp: %d, got: %d\", 2, len(e.FileStore.Files()))\n\t}\n\n\tfileNames := map[string]bool{}\n\tfor _, f := range e.FileStore.Files() {\n\t\tfileNames[filepath.Base(f.Path())] = true\n\t}\n\n\tth, err := tr.Next()\n\tfor err == nil {\n\t\tif !fileNames[th.Name] {\n\t\t\tt.Errorf(\"Extra file in backup: %q\", th.Name)\n\t\t}\n\t\tdelete(fileNames, th.Name)\n\t\tth, err = tr.Next()\n\t}\n\n\tif err != nil && err != io.EOF {\n\t\tt.Fatalf(\"Problem reading tar header: %s\", err)\n\t}\n\n\tfor f := range fileNames {\n\t\tt.Errorf(\"File missing from backup: %s\", f)\n\t}\n\n\tif t.Failed() {\n\t\tt.FailNow()\n\t}\n\n\tlastBackup := time.Now()\n\n\t// we have to sleep for a second because last modified times only have second level precision.\n\t// so this test won't work properly unless the file is at least a second past the last one\n\ttime.Sleep(time.Second)\n\n\tif err := e.WritePoints([]models.Point{p3}); err != nil {\n\t\tt.Fatalf(\"failed to write points: %s\", err.Error())\n\t}\n\n\tb = bytes.NewBuffer(nil)\n\tif err := e.Backup(b, \"\", lastBackup); err != nil {\n\t\tt.Fatalf(\"failed to backup: %s\", err.Error())\n\t}\n\n\ttr = tar.NewReader(b)\n\tth, err = tr.Next()\n\tif err != nil {\n\t\tt.Fatalf(\"error getting next tar header: %s\", err.Error())\n\t}\n\n\tmostRecentFile := e.FileStore.Files()[e.FileStore.Count()-1].Path()\n\tif !strings.Contains(mostRecentFile, th.Name) || th.Name == \"\" {\n\t\tt.Fatalf(\"file name doesn't match:\\n\\tgot: %s\\n\\texp: %s\", th.Name, mostRecentFile)\n\t}\n}\n\n// Ensure engine can create an ascending iterator for cached values.\nfunc TestEngine_CreateIterator_Cache_Ascending(t *testing.T) {\n\tt.Parallel()\n\n\te := MustOpenEngine()\n\tdefer e.Close()\n\n\t// e.CreateMeasurement(\"cpu\")\n\te.MeasurementFields([]byte(\"cpu\")).CreateFieldIfNotExists([]byte(\"value\"), influxql.Float, false)\n\te.CreateSeriesIfNotExists([]byte(\"cpu,host=A\"), []byte(\"cpu\"), models.NewTags(map[string]string{\"host\": \"A\"}))\n\n\tif err := e.WritePointsString(\n\t\t`cpu,host=A value=1.1 1000000000`,\n\t\t`cpu,host=A value=1.2 2000000000`,\n\t\t`cpu,host=A value=1.3 3000000000`,\n\t); err != nil {\n\t\tt.Fatalf(\"failed to write points: %s\", err.Error())\n\t}\n\n\titr, err := e.CreateIterator(\"cpu\", influxql.IteratorOptions{\n\t\tExpr:       influxql.MustParseExpr(`value`),\n\t\tDimensions: []string{\"host\"},\n\t\tStartTime:  influxql.MinTime,\n\t\tEndTime:    influxql.MaxTime,\n\t\tAscending:  true,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfitr := itr.(influxql.FloatIterator)\n\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(0): %v\", err)\n\t} else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 1000000000, Value: 1.1}) {\n\t\tt.Fatalf(\"unexpected point(0): %v\", p)\n\t}\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(1): %v\", err)\n\t} else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 2000000000, Value: 1.2}) {\n\t\tt.Fatalf(\"unexpected point(1): %v\", p)\n\t}\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(2): %v\", err)\n\t} else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 3000000000, Value: 1.3}) {\n\t\tt.Fatalf(\"unexpected point(2): %v\", p)\n\t}\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"expected eof, got error: %v\", err)\n\t} else if p != nil {\n\t\tt.Fatalf(\"expected eof: %v\", p)\n\t}\n}\n\n// Ensure engine can create an descending iterator for cached values.\nfunc TestEngine_CreateIterator_Cache_Descending(t *testing.T) {\n\tt.Parallel()\n\n\te := MustOpenEngine()\n\tdefer e.Close()\n\n\te.MeasurementFields([]byte(\"cpu\")).CreateFieldIfNotExists([]byte(\"value\"), influxql.Float, false)\n\te.CreateSeriesIfNotExists([]byte(\"cpu,host=A\"), []byte(\"cpu\"), models.NewTags(map[string]string{\"host\": \"A\"}))\n\n\tif err := e.WritePointsString(\n\t\t`cpu,host=A value=1.1 1000000000`,\n\t\t`cpu,host=A value=1.2 2000000000`,\n\t\t`cpu,host=A value=1.3 3000000000`,\n\t); err != nil {\n\t\tt.Fatalf(\"failed to write points: %s\", err.Error())\n\t}\n\n\titr, err := e.CreateIterator(\"cpu\", influxql.IteratorOptions{\n\t\tExpr:       influxql.MustParseExpr(`value`),\n\t\tDimensions: []string{\"host\"},\n\t\tStartTime:  influxql.MinTime,\n\t\tEndTime:    influxql.MaxTime,\n\t\tAscending:  false,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfitr := itr.(influxql.FloatIterator)\n\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(0): %v\", err)\n\t} else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 3000000000, Value: 1.3}) {\n\t\tt.Fatalf(\"unexpected point(0): %v\", p)\n\t}\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unepxected error(1): %v\", err)\n\t} else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 2000000000, Value: 1.2}) {\n\t\tt.Fatalf(\"unexpected point(1): %v\", p)\n\t}\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(2): %v\", err)\n\t} else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 1000000000, Value: 1.1}) {\n\t\tt.Fatalf(\"unexpected point(2): %v\", p)\n\t}\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"expected eof, got error: %v\", err)\n\t} else if p != nil {\n\t\tt.Fatalf(\"expected eof: %v\", p)\n\t}\n}\n\n// Ensure engine can create an ascending iterator for tsm values.\nfunc TestEngine_CreateIterator_TSM_Ascending(t *testing.T) {\n\tt.Parallel()\n\n\te := MustOpenEngine()\n\tdefer e.Close()\n\n\te.MeasurementFields([]byte(\"cpu\")).CreateFieldIfNotExists([]byte(\"value\"), influxql.Float, false)\n\te.CreateSeriesIfNotExists([]byte(\"cpu,host=A\"), []byte(\"cpu\"), models.NewTags(map[string]string{\"host\": \"A\"}))\n\n\tif err := e.WritePointsString(\n\t\t`cpu,host=A value=1.1 1000000000`,\n\t\t`cpu,host=A value=1.2 2000000000`,\n\t\t`cpu,host=A value=1.3 3000000000`,\n\t); err != nil {\n\t\tt.Fatalf(\"failed to write points: %s\", err.Error())\n\t}\n\te.MustWriteSnapshot()\n\n\titr, err := e.CreateIterator(\"cpu\", influxql.IteratorOptions{\n\t\tExpr:       influxql.MustParseExpr(`value`),\n\t\tDimensions: []string{\"host\"},\n\t\tStartTime:  influxql.MinTime,\n\t\tEndTime:    influxql.MaxTime,\n\t\tAscending:  true,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfitr := itr.(influxql.FloatIterator)\n\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(0): %v\", err)\n\t} else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 1000000000, Value: 1.1}) {\n\t\tt.Fatalf(\"unexpected point(0): %v\", p)\n\t}\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(1): %v\", err)\n\t} else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 2000000000, Value: 1.2}) {\n\t\tt.Fatalf(\"unexpected point(1): %v\", p)\n\t}\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(2): %v\", err)\n\t} else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 3000000000, Value: 1.3}) {\n\t\tt.Fatalf(\"unexpected point(2): %v\", p)\n\t}\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"expected eof, got error: %v\", err)\n\t} else if p != nil {\n\t\tt.Fatalf(\"expected eof: %v\", p)\n\t}\n}\n\n// Ensure engine can create an descending iterator for cached values.\nfunc TestEngine_CreateIterator_TSM_Descending(t *testing.T) {\n\tt.Parallel()\n\n\te := MustOpenEngine()\n\tdefer e.Close()\n\n\te.MeasurementFields([]byte(\"cpu\")).CreateFieldIfNotExists([]byte(\"value\"), influxql.Float, false)\n\te.CreateSeriesIfNotExists([]byte(\"cpu,host=A\"), []byte(\"cpu\"), models.NewTags(map[string]string{\"host\": \"A\"}))\n\n\tif err := e.WritePointsString(\n\t\t`cpu,host=A value=1.1 1000000000`,\n\t\t`cpu,host=A value=1.2 2000000000`,\n\t\t`cpu,host=A value=1.3 3000000000`,\n\t); err != nil {\n\t\tt.Fatalf(\"failed to write points: %s\", err.Error())\n\t}\n\te.MustWriteSnapshot()\n\n\titr, err := e.CreateIterator(\"cpu\", influxql.IteratorOptions{\n\t\tExpr:       influxql.MustParseExpr(`value`),\n\t\tDimensions: []string{\"host\"},\n\t\tStartTime:  influxql.MinTime,\n\t\tEndTime:    influxql.MaxTime,\n\t\tAscending:  false,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfitr := itr.(influxql.FloatIterator)\n\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(0): %v\", err)\n\t} else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 3000000000, Value: 1.3}) {\n\t\tt.Fatalf(\"unexpected point(0): %v\", p)\n\t}\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(1): %v\", err)\n\t} else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 2000000000, Value: 1.2}) {\n\t\tt.Fatalf(\"unexpected point(1): %v\", p)\n\t}\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(2): %v\", err)\n\t} else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 1000000000, Value: 1.1}) {\n\t\tt.Fatalf(\"unexpected point(2): %v\", p)\n\t}\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"expected eof, got error: %v\", err)\n\t} else if p != nil {\n\t\tt.Fatalf(\"expected eof: %v\", p)\n\t}\n}\n\n// Ensure engine can create an iterator with auxilary fields.\nfunc TestEngine_CreateIterator_Aux(t *testing.T) {\n\tt.Parallel()\n\n\te := MustOpenEngine()\n\tdefer e.Close()\n\n\te.MeasurementFields([]byte(\"cpu\")).CreateFieldIfNotExists([]byte(\"value\"), influxql.Float, false)\n\te.MeasurementFields([]byte(\"cpu\")).CreateFieldIfNotExists([]byte(\"F\"), influxql.Float, false)\n\te.CreateSeriesIfNotExists([]byte(\"cpu,host=A\"), []byte(\"cpu\"), models.NewTags(map[string]string{\"host\": \"A\"}))\n\n\tif err := e.WritePointsString(\n\t\t`cpu,host=A value=1.1 1000000000`,\n\t\t`cpu,host=A F=100 1000000000`,\n\t\t`cpu,host=A value=1.2 2000000000`,\n\t\t`cpu,host=A value=1.3 3000000000`,\n\t\t`cpu,host=A F=200 3000000000`,\n\t); err != nil {\n\t\tt.Fatalf(\"failed to write points: %s\", err.Error())\n\t}\n\n\titr, err := e.CreateIterator(\"cpu\", influxql.IteratorOptions{\n\t\tExpr:       influxql.MustParseExpr(`value`),\n\t\tAux:        []influxql.VarRef{{Val: \"F\"}},\n\t\tDimensions: []string{\"host\"},\n\t\tStartTime:  influxql.MinTime,\n\t\tEndTime:    influxql.MaxTime,\n\t\tAscending:  true,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfitr := itr.(influxql.FloatIterator)\n\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(0): %v\", err)\n\t} else if !deep.Equal(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 1000000000, Value: 1.1, Aux: []interface{}{float64(100)}}) {\n\t\tt.Fatalf(\"unexpected point(0): %v\", p)\n\t}\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(1): %v\", err)\n\t} else if !deep.Equal(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 2000000000, Value: 1.2, Aux: []interface{}{(*float64)(nil)}}) {\n\t\tt.Fatalf(\"unexpected point(1): %v\", p)\n\t}\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(2): %v\", err)\n\t} else if !deep.Equal(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 3000000000, Value: 1.3, Aux: []interface{}{float64(200)}}) {\n\t\tt.Fatalf(\"unexpected point(2): %v\", p)\n\t}\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"expected eof, got error: %v\", err)\n\t} else if p != nil {\n\t\tt.Fatalf(\"expected eof: %v\", p)\n\t}\n}\n\n// Ensure engine can create an iterator with a condition.\nfunc TestEngine_CreateIterator_Condition(t *testing.T) {\n\tt.Parallel()\n\n\te := MustOpenEngine()\n\tdefer e.Close()\n\n\te.MeasurementFields([]byte(\"cpu\")).CreateFieldIfNotExists([]byte(\"value\"), influxql.Float, false)\n\te.MeasurementFields([]byte(\"cpu\")).CreateFieldIfNotExists([]byte(\"X\"), influxql.Float, false)\n\te.MeasurementFields([]byte(\"cpu\")).CreateFieldIfNotExists([]byte(\"Y\"), influxql.Float, false)\n\te.CreateSeriesIfNotExists([]byte(\"cpu,host=A\"), []byte(\"cpu\"), models.NewTags(map[string]string{\"host\": \"A\"}))\n\te.SetFieldName([]byte(\"cpu\"), \"X\")\n\te.SetFieldName([]byte(\"cpu\"), \"Y\")\n\n\tif err := e.WritePointsString(\n\t\t`cpu,host=A value=1.1 1000000000`,\n\t\t`cpu,host=A X=10 1000000000`,\n\t\t`cpu,host=A Y=100 1000000000`,\n\n\t\t`cpu,host=A value=1.2 2000000000`,\n\n\t\t`cpu,host=A value=1.3 3000000000`,\n\t\t`cpu,host=A X=20 3000000000`,\n\t\t`cpu,host=A Y=200 3000000000`,\n\t); err != nil {\n\t\tt.Fatalf(\"failed to write points: %s\", err.Error())\n\t}\n\n\titr, err := e.CreateIterator(\"cpu\", influxql.IteratorOptions{\n\t\tExpr:       influxql.MustParseExpr(`value`),\n\t\tDimensions: []string{\"host\"},\n\t\tCondition:  influxql.MustParseExpr(`X = 10 OR Y > 150`),\n\t\tStartTime:  influxql.MinTime,\n\t\tEndTime:    influxql.MaxTime,\n\t\tAscending:  true,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfitr := itr.(influxql.FloatIterator)\n\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(0): %v\", err)\n\t} else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 1000000000, Value: 1.1}) {\n\t\tt.Fatalf(\"unexpected point(0): %v\", p)\n\t}\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected point(1): %v\", err)\n\t} else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=A\"), Time: 3000000000, Value: 1.3}) {\n\t\tt.Fatalf(\"unexpected point(1): %v\", p)\n\t}\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"expected eof, got error: %v\", err)\n\t} else if p != nil {\n\t\tt.Fatalf(\"expected eof: %v\", p)\n\t}\n}\n\n// Ensures that deleting series from TSM files with multiple fields removes all the\n/// series\nfunc TestEngine_DeleteSeries(t *testing.T) {\n\t// Generate temporary file.\n\tf, _ := ioutil.TempFile(\"\", \"tsm\")\n\tf.Close()\n\tos.Remove(f.Name())\n\twalPath := filepath.Join(f.Name(), \"wal\")\n\tos.MkdirAll(walPath, 0777)\n\tdefer os.RemoveAll(f.Name())\n\n\t// Create a few points.\n\tp1 := MustParsePointString(\"cpu,host=A value=1.1 1000000000\")\n\tp2 := MustParsePointString(\"cpu,host=B value=1.2 2000000000\")\n\tp3 := MustParsePointString(\"cpu,host=A sum=1.3 3000000000\")\n\n\t// Write those points to the engine.\n\tdb := path.Base(f.Name())\n\topt := tsdb.NewEngineOptions()\n\topt.InmemIndex = inmem.NewIndex(db)\n\tidx := tsdb.MustOpenIndex(1, db, filepath.Join(f.Name(), \"index\"), opt)\n\tdefer idx.Close()\n\te := tsm1.NewEngine(1, idx, db, f.Name(), walPath, opt).(*tsm1.Engine)\n\t// e.LoadMetadataIndex(1, MustNewDatabaseIndex(\"db0\")) // Initialise an index\n\n\t// mock the planner so compactions don't run during the test\n\te.CompactionPlan = &mockPlanner{}\n\n\tif err := e.Open(); err != nil {\n\t\tt.Fatalf(\"failed to open tsm1 engine: %s\", err.Error())\n\t}\n\n\tif err := e.WritePoints([]models.Point{p1, p2, p3}); err != nil {\n\t\tt.Fatalf(\"failed to write points: %s\", err.Error())\n\t}\n\tif err := e.WriteSnapshot(); err != nil {\n\t\tt.Fatalf(\"failed to snapshot: %s\", err.Error())\n\t}\n\n\tkeys := e.FileStore.Keys()\n\tif exp, got := 3, len(keys); exp != got {\n\t\tt.Fatalf(\"series count mismatch: exp %v, got %v\", exp, got)\n\t}\n\n\tif err := e.DeleteSeriesRange([][]byte{[]byte(\"cpu,host=A\")}, math.MinInt64, math.MaxInt64); err != nil {\n\t\tt.Fatalf(\"failed to delete series: %v\", err)\n\t}\n\n\tkeys = e.FileStore.Keys()\n\tif exp, got := 1, len(keys); exp != got {\n\t\tt.Fatalf(\"series count mismatch: exp %v, got %v\", exp, got)\n\t}\n\n\texp := \"cpu,host=B#!~#value\"\n\tif _, ok := keys[exp]; !ok {\n\t\tt.Fatalf(\"wrong series deleted: exp %v, got %v\", exp, keys)\n\t}\n\n}\n\nfunc TestEngine_LastModified(t *testing.T) {\n\t// Generate temporary file.\n\tdir, _ := ioutil.TempDir(\"\", \"tsm\")\n\twalPath := filepath.Join(dir, \"wal\")\n\tos.MkdirAll(walPath, 0777)\n\tdefer os.RemoveAll(dir)\n\n\t// Create a few points.\n\tp1 := MustParsePointString(\"cpu,host=A value=1.1 1000000000\")\n\tp2 := MustParsePointString(\"cpu,host=B value=1.2 2000000000\")\n\tp3 := MustParsePointString(\"cpu,host=A sum=1.3 3000000000\")\n\n\t// Write those points to the engine.\n\tdb := path.Base(dir)\n\topt := tsdb.NewEngineOptions()\n\topt.InmemIndex = inmem.NewIndex(db)\n\tidx := tsdb.MustOpenIndex(1, db, filepath.Join(dir, \"index\"), opt)\n\tdefer idx.Close()\n\n\te := tsm1.NewEngine(1, idx, db, dir, walPath, opt).(*tsm1.Engine)\n\n\t// mock the planner so compactions don't run during the test\n\te.CompactionPlan = &mockPlanner{}\n\n\tif lm := e.LastModified(); !lm.IsZero() {\n\t\tt.Fatalf(\"expected zero time, got %v\", lm.UTC())\n\t}\n\n\te.SetEnabled(false)\n\tif err := e.Open(); err != nil {\n\t\tt.Fatalf(\"failed to open tsm1 engine: %s\", err.Error())\n\t}\n\n\tif err := e.WritePoints([]models.Point{p1, p2, p3}); err != nil {\n\t\tt.Fatalf(\"failed to write points: %s\", err.Error())\n\t}\n\n\tlm := e.LastModified()\n\tif lm.IsZero() {\n\t\tt.Fatalf(\"expected non-zero time, got %v\", lm.UTC())\n\t}\n\te.SetEnabled(true)\n\n\tif err := e.WriteSnapshot(); err != nil {\n\t\tt.Fatalf(\"failed to snapshot: %s\", err.Error())\n\t}\n\n\tlm2 := e.LastModified()\n\n\tif got, exp := lm.Equal(lm2), false; exp != got {\n\t\tt.Fatalf(\"expected time change, got %v, exp %v\", got, exp)\n\t}\n\n\tif err := e.DeleteSeriesRange([][]byte{[]byte(\"cpu,host=A\")}, math.MinInt64, math.MaxInt64); err != nil {\n\t\tt.Fatalf(\"failed to delete series: %v\", err)\n\t}\n\n\tlm3 := e.LastModified()\n\tif got, exp := lm2.Equal(lm3), false; exp != got {\n\t\tt.Fatalf(\"expected time change, got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestEngine_SnapshotsDisabled(t *testing.T) {\n\t// Generate temporary file.\n\tdir, _ := ioutil.TempDir(\"\", \"tsm\")\n\twalPath := filepath.Join(dir, \"wal\")\n\tos.MkdirAll(walPath, 0777)\n\tdefer os.RemoveAll(dir)\n\n\t// Create a tsm1 engine.\n\tdb := path.Base(dir)\n\topt := tsdb.NewEngineOptions()\n\topt.InmemIndex = inmem.NewIndex(db)\n\tidx := tsdb.MustOpenIndex(1, db, filepath.Join(dir, \"index\"), opt)\n\tdefer idx.Close()\n\n\te := tsm1.NewEngine(1, idx, db, dir, walPath, opt).(*tsm1.Engine)\n\n\t// mock the planner so compactions don't run during the test\n\te.CompactionPlan = &mockPlanner{}\n\n\te.SetEnabled(false)\n\tif err := e.Open(); err != nil {\n\t\tt.Fatalf(\"failed to open tsm1 engine: %s\", err.Error())\n\t}\n\n\t// Make sure Snapshots are disabled.\n\te.SetCompactionsEnabled(false)\n\te.Compactor.DisableSnapshots()\n\n\t// Writing a snapshot should not fail when the snapshot is empty\n\t// even if snapshots are disabled.\n\tif err := e.WriteSnapshot(); err != nil {\n\t\tt.Fatalf(\"failed to snapshot: %s\", err.Error())\n\t}\n}\n\nfunc BenchmarkEngine_CreateIterator_Count_1K(b *testing.B) {\n\tbenchmarkEngineCreateIteratorCount(b, 1000)\n}\nfunc BenchmarkEngine_CreateIterator_Count_100K(b *testing.B) {\n\tbenchmarkEngineCreateIteratorCount(b, 100000)\n}\nfunc BenchmarkEngine_CreateIterator_Count_1M(b *testing.B) {\n\tbenchmarkEngineCreateIteratorCount(b, 1000000)\n}\n\nfunc benchmarkEngineCreateIteratorCount(b *testing.B, pointN int) {\n\tbenchmarkIterator(b, influxql.IteratorOptions{\n\t\tExpr:      influxql.MustParseExpr(\"count(value)\"),\n\t\tAscending: true,\n\t\tStartTime: influxql.MinTime,\n\t\tEndTime:   influxql.MaxTime,\n\t}, pointN)\n}\n\nfunc BenchmarkEngine_CreateIterator_First_1K(b *testing.B) {\n\tbenchmarkEngineCreateIteratorFirst(b, 1000)\n}\nfunc BenchmarkEngine_CreateIterator_First_100K(b *testing.B) {\n\tbenchmarkEngineCreateIteratorFirst(b, 100000)\n}\nfunc BenchmarkEngine_CreateIterator_First_1M(b *testing.B) {\n\tbenchmarkEngineCreateIteratorFirst(b, 1000000)\n}\n\nfunc benchmarkEngineCreateIteratorFirst(b *testing.B, pointN int) {\n\tbenchmarkIterator(b, influxql.IteratorOptions{\n\t\tExpr:       influxql.MustParseExpr(\"first(value)\"),\n\t\tDimensions: []string{\"host\"},\n\t\tAscending:  true,\n\t\tStartTime:  influxql.MinTime,\n\t\tEndTime:    influxql.MaxTime,\n\t}, pointN)\n}\n\nfunc BenchmarkEngine_CreateIterator_Last_1K(b *testing.B) {\n\tbenchmarkEngineCreateIteratorLast(b, 1000)\n}\nfunc BenchmarkEngine_CreateIterator_Last_100K(b *testing.B) {\n\tbenchmarkEngineCreateIteratorLast(b, 100000)\n}\nfunc BenchmarkEngine_CreateIterator_Last_1M(b *testing.B) {\n\tbenchmarkEngineCreateIteratorLast(b, 1000000)\n}\n\nfunc benchmarkEngineCreateIteratorLast(b *testing.B, pointN int) {\n\tbenchmarkIterator(b, influxql.IteratorOptions{\n\t\tExpr:       influxql.MustParseExpr(\"last(value)\"),\n\t\tDimensions: []string{\"host\"},\n\t\tAscending:  true,\n\t\tStartTime:  influxql.MinTime,\n\t\tEndTime:    influxql.MaxTime,\n\t}, pointN)\n}\n\nfunc BenchmarkEngine_CreateIterator_Limit_1K(b *testing.B) {\n\tbenchmarkEngineCreateIteratorLimit(b, 1000)\n}\nfunc BenchmarkEngine_CreateIterator_Limit_100K(b *testing.B) {\n\tbenchmarkEngineCreateIteratorLimit(b, 100000)\n}\nfunc BenchmarkEngine_CreateIterator_Limit_1M(b *testing.B) {\n\tbenchmarkEngineCreateIteratorLimit(b, 1000000)\n}\n\nfunc BenchmarkEngine_WritePoints_10(b *testing.B) {\n\tbenchmarkEngine_WritePoints(b, 10)\n}\n\nfunc BenchmarkEngine_WritePoints_100(b *testing.B) {\n\tbenchmarkEngine_WritePoints(b, 100)\n}\n\nfunc BenchmarkEngine_WritePoints_1000(b *testing.B) {\n\tbenchmarkEngine_WritePoints(b, 1000)\n}\n\nfunc BenchmarkEngine_WritePoints_5000(b *testing.B) {\n\tbenchmarkEngine_WritePoints(b, 5000)\n}\n\nfunc benchmarkEngine_WritePoints(b *testing.B, batchSize int) {\n\te := MustOpenEngine()\n\tdefer e.Close()\n\n\te.MeasurementFields([]byte(\"cpu\")).CreateFieldIfNotExists([]byte(\"value\"), influxql.Float, false)\n\n\tpp := make([]models.Point, 0, batchSize)\n\tfor i := 0; i < batchSize; i++ {\n\t\tp := MustParsePointString(fmt.Sprintf(\"cpu,host=%d value=1.2\", i))\n\t\tpp = append(pp, p)\n\t}\n\n\tb.ResetTimer()\n\tb.ReportAllocs()\n\tfor i := 0; i < b.N; i++ {\n\t\terr := e.WritePoints(pp)\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkEngine_WritePoints_Parallel_1000(b *testing.B) {\n\tbenchmarkEngine_WritePoints(b, 1000)\n}\n\nfunc BenchmarkEngine_WritePoints_Parallel_5000(b *testing.B) {\n\tbenchmarkEngine_WritePoints_Parallel(b, 5000)\n}\n\nfunc BenchmarkEngine_WritePoints_Parallel_10000(b *testing.B) {\n\tbenchmarkEngine_WritePoints_Parallel(b, 10000)\n}\n\nfunc BenchmarkEngine_WritePoints_Parallel_25000(b *testing.B) {\n\tbenchmarkEngine_WritePoints_Parallel(b, 25000)\n}\n\nfunc BenchmarkEngine_WritePoints_Parallel_50000(b *testing.B) {\n\tbenchmarkEngine_WritePoints_Parallel(b, 50000)\n}\n\nfunc BenchmarkEngine_WritePoints_Parallel_75000(b *testing.B) {\n\tbenchmarkEngine_WritePoints_Parallel(b, 75000)\n}\n\nfunc BenchmarkEngine_WritePoints_Parallel_100000(b *testing.B) {\n\tbenchmarkEngine_WritePoints_Parallel(b, 100000)\n}\n\nfunc BenchmarkEngine_WritePoints_Parallel_200000(b *testing.B) {\n\tbenchmarkEngine_WritePoints_Parallel(b, 200000)\n}\n\nfunc benchmarkEngine_WritePoints_Parallel(b *testing.B, batchSize int) {\n\te := MustOpenEngine()\n\tdefer e.Close()\n\n\t// e.Index().CreateMeasurementIndexIfNotExists(\"cpu\")\n\te.MeasurementFields([]byte(\"cpu\")).CreateFieldIfNotExists([]byte(\"value\"), influxql.Float, false)\n\n\tb.ResetTimer()\n\tb.ReportAllocs()\n\tfor i := 0; i < b.N; i++ {\n\n\t\tb.StopTimer()\n\t\tcpus := runtime.GOMAXPROCS(0)\n\t\tpp := make([]models.Point, 0, batchSize*cpus)\n\t\tfor i := 0; i < batchSize*cpus; i++ {\n\t\t\tp := MustParsePointString(fmt.Sprintf(\"cpu,host=%d value=1.2,other=%di\", i, i))\n\t\t\tpp = append(pp, p)\n\t\t}\n\t\tb.StartTimer()\n\n\t\tvar wg sync.WaitGroup\n\t\terrC := make(chan error)\n\t\tfor i := 0; i < cpus; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func(i int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tfrom, to := i*batchSize, (i+1)*batchSize\n\t\t\t\terr := e.WritePoints(pp[from:to])\n\t\t\t\tif err != nil {\n\t\t\t\t\terrC <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}(i)\n\t\t}\n\n\t\tgo func() {\n\t\t\twg.Wait()\n\t\t\tclose(errC)\n\t\t}()\n\n\t\tfor err := range errC {\n\t\t\tif err != nil {\n\t\t\t\tb.Error(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc benchmarkEngineCreateIteratorLimit(b *testing.B, pointN int) {\n\tbenchmarkIterator(b, influxql.IteratorOptions{\n\t\tExpr:       influxql.MustParseExpr(\"value\"),\n\t\tDimensions: []string{\"host\"},\n\t\tAscending:  true,\n\t\tStartTime:  influxql.MinTime,\n\t\tEndTime:    influxql.MaxTime,\n\t\tLimit:      10,\n\t}, pointN)\n}\n\nfunc benchmarkIterator(b *testing.B, opt influxql.IteratorOptions, pointN int) {\n\te := MustInitBenchmarkEngine(pointN)\n\tb.ResetTimer()\n\tb.ReportAllocs()\n\n\tfor i := 0; i < b.N; i++ {\n\t\titr, err := e.CreateIterator(\"cpu\", opt)\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\tinfluxql.DrainIterator(itr)\n\t}\n}\n\nvar benchmark struct {\n\tEngine *Engine\n\tPointN int\n}\n\nvar hostNames = []string{\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\"}\n\n// MustInitBenchmarkEngine creates a new engine and fills it with points.\n// Reuses previous engine if the same parameters were used.\nfunc MustInitBenchmarkEngine(pointN int) *Engine {\n\t// Reuse engine, if available.\n\tif benchmark.Engine != nil {\n\t\tif benchmark.PointN == pointN {\n\t\t\treturn benchmark.Engine\n\t\t}\n\n\t\t// Otherwise close and remove it.\n\t\tbenchmark.Engine.Close()\n\t\tbenchmark.Engine = nil\n\t}\n\n\tconst batchSize = 1000\n\tif pointN%batchSize != 0 {\n\t\tpanic(fmt.Sprintf(\"point count (%d) must be a multiple of batch size (%d)\", pointN, batchSize))\n\t}\n\n\te := MustOpenEngine()\n\n\t// Initialize metadata.\n\te.MeasurementFields([]byte(\"cpu\")).CreateFieldIfNotExists([]byte(\"value\"), influxql.Float, false)\n\te.CreateSeriesIfNotExists([]byte(\"cpu,host=A\"), []byte(\"cpu\"), models.NewTags(map[string]string{\"host\": \"A\"}))\n\n\t// Generate time ascending points with jitterred time & value.\n\trand := rand.New(rand.NewSource(0))\n\tfor i := 0; i < pointN; i += batchSize {\n\t\tvar buf bytes.Buffer\n\t\tfor j := 0; j < batchSize; j++ {\n\t\t\tfmt.Fprintf(&buf, \"cpu,host=%s value=%d %d\",\n\t\t\t\thostNames[j%len(hostNames)],\n\t\t\t\t100+rand.Intn(50)-25,\n\t\t\t\t(time.Duration(i+j)*time.Second)+(time.Duration(rand.Intn(500)-250)*time.Millisecond),\n\t\t\t)\n\t\t\tif j != pointN-1 {\n\t\t\t\tfmt.Fprint(&buf, \"\\n\")\n\t\t\t}\n\t\t}\n\n\t\tif err := e.WritePointsString(buf.String()); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tif err := e.WriteSnapshot(); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Force garbage collection.\n\truntime.GC()\n\n\t// Save engine reference for reuse.\n\tbenchmark.Engine = e\n\tbenchmark.PointN = pointN\n\n\treturn e\n}\n\n// Engine is a test wrapper for tsm1.Engine.\ntype Engine struct {\n\t*tsm1.Engine\n\troot  string\n\tindex tsdb.Index\n}\n\n// NewEngine returns a new instance of Engine at a temporary location.\nfunc NewEngine() *Engine {\n\troot, err := ioutil.TempDir(\"\", \"tsm1-\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdb := path.Base(root)\n\topt := tsdb.NewEngineOptions()\n\topt.InmemIndex = inmem.NewIndex(db)\n\n\tidx := tsdb.MustOpenIndex(1, db, filepath.Join(root, \"data\", \"index\"), opt)\n\n\treturn &Engine{\n\t\tEngine: tsm1.NewEngine(1,\n\t\t\tidx,\n\t\t\tdb,\n\t\t\tfilepath.Join(root, \"data\"),\n\t\t\tfilepath.Join(root, \"wal\"),\n\t\t\topt).(*tsm1.Engine),\n\t\troot:  root,\n\t\tindex: idx,\n\t}\n}\n\n// MustOpenEngine returns a new, open instance of Engine.\nfunc MustOpenEngine() *Engine {\n\te := NewEngine()\n\tif err := e.Open(); err != nil {\n\t\tpanic(err)\n\t}\n\t// if err := e.LoadMetadataIndex(1, MustNewDatabaseIndex(\"db\")); err != nil {\n\t// \tpanic(err)\n\t// }\n\treturn e\n}\n\n// Close closes the engine and removes all underlying data.\nfunc (e *Engine) Close() error {\n\tif e.index != nil {\n\t\te.index.Close()\n\t}\n\tdefer os.RemoveAll(e.root)\n\treturn e.Engine.Close()\n}\n\n// Reopen closes and reopens the engine.\nfunc (e *Engine) Reopen() error {\n\tif err := e.Engine.Close(); err != nil {\n\t\treturn err\n\t} else if e.index.Close(); err != nil {\n\t\treturn err\n\t}\n\n\tdb := path.Base(e.root)\n\topt := tsdb.NewEngineOptions()\n\topt.InmemIndex = inmem.NewIndex(db)\n\n\te.index = tsdb.MustOpenIndex(1, db, filepath.Join(e.root, \"data\", \"index\"), opt)\n\n\te.Engine = tsm1.NewEngine(1,\n\t\te.index,\n\t\tdb,\n\t\tfilepath.Join(e.root, \"data\"),\n\t\tfilepath.Join(e.root, \"wal\"),\n\t\topt).(*tsm1.Engine)\n\n\tif err := e.Engine.Open(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// MustWriteSnapshot forces a snapshot of the engine. Panic on error.\nfunc (e *Engine) MustWriteSnapshot() {\n\tif err := e.WriteSnapshot(); err != nil {\n\t\tpanic(err)\n\t}\n}\n\n// WritePointsString parses a string buffer and writes the points.\nfunc (e *Engine) WritePointsString(buf ...string) error {\n\treturn e.WritePoints(MustParsePointsString(strings.Join(buf, \"\\n\")))\n}\n\n// MustParsePointsString parses points from a string. Panic on error.\nfunc MustParsePointsString(buf string) []models.Point {\n\ta, err := models.ParsePointsString(buf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn a\n}\n\n// MustParsePointString parses the first point from a string. Panic on error.\nfunc MustParsePointString(buf string) models.Point { return MustParsePointsString(buf)[0] }\n\ntype mockPlanner struct{}\n\nfunc (m *mockPlanner) Plan(lastWrite time.Time) []tsm1.CompactionGroup { return nil }\nfunc (m *mockPlanner) PlanLevel(level int) []tsm1.CompactionGroup      { return nil }\nfunc (m *mockPlanner) PlanOptimize() []tsm1.CompactionGroup            { return nil }\nfunc (m *mockPlanner) Release(groups []tsm1.CompactionGroup)           {}\nfunc (m *mockPlanner) FullyCompacted() bool                            { return false }\n\n// ParseTags returns an instance of Tags for a comma-delimited list of key/values.\nfunc ParseTags(s string) influxql.Tags {\n\tm := make(map[string]string)\n\tfor _, kv := range strings.Split(s, \",\") {\n\t\ta := strings.Split(kv, \"=\")\n\t\tm[a[0]] = a[1]\n\t}\n\treturn influxql.NewTags(m)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store.gen.go",
    "content": "// Generated by tmpl\n// https://github.com/benbjohnson/tmpl\n//\n// DO NOT EDIT!\n// Source: file_store.gen.go.tmpl\n\npackage tsm1\n\n// ReadFloatBlock reads the next block as a set of float values.\nfunc (c *KeyCursor) ReadFloatBlock(buf *[]FloatValue) ([]FloatValue, error) {\n\t// No matching blocks to decode\n\tif len(c.current) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// First block is the oldest block containing the points we're searching for.\n\tfirst := c.current[0]\n\t*buf = (*buf)[:0]\n\tvalues, err := first.r.ReadFloatBlockAt(&first.entry, buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Remove values we already read\n\tvalues = FloatValues(values).Exclude(first.readMin, first.readMax)\n\n\t// Remove any tombstones\n\ttombstones := first.r.TombstoneRange(c.key)\n\tvalues = c.filterFloatValues(tombstones, values)\n\n\t// Check we have remaining values.\n\tif len(values) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// Only one block with this key and time range so return it\n\tif len(c.current) == 1 {\n\t\tif len(values) > 0 {\n\t\t\tfirst.markRead(values[0].UnixNano(), values[len(values)-1].UnixNano())\n\t\t}\n\t\treturn values, nil\n\t}\n\n\t// Use the current block time range as our overlapping window\n\tminT, maxT := first.readMin, first.readMax\n\tif len(values) > 0 {\n\t\tminT, maxT = values[0].UnixNano(), values[len(values)-1].UnixNano()\n\t}\n\tif c.ascending {\n\t\t// Blocks are ordered by generation, we may have values in the past in later blocks, if so,\n\t\t// expand the window to include the min time range to ensure values are returned in ascending\n\t\t// order\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.MinTime < minT && !cur.read() {\n\t\t\t\tminT = cur.entry.MinTime\n\t\t\t}\n\t\t}\n\n\t\t// Find first block that overlaps our window\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() {\n\t\t\t\t// Shrink our window so it's the intersection of the first overlapping block and the\n\t\t\t\t// first block.  We do this to minimize the region that overlaps and needs to\n\t\t\t\t// be merged.\n\t\t\t\tif cur.entry.MaxTime > maxT {\n\t\t\t\t\tmaxT = cur.entry.MaxTime\n\t\t\t\t}\n\t\t\t\tvalues = FloatValues(values).Include(minT, maxT)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// Search the remaining blocks that overlap our window and append their values so we can\n\t\t// merge them.\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\t// Skip this block if it doesn't contain points we looking for or they have already been read\n\t\t\tif !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() {\n\t\t\t\tcur.markRead(minT, maxT)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttombstones := cur.r.TombstoneRange(c.key)\n\t\t\tvar a []FloatValue\n\t\t\tv, err := cur.r.ReadFloatBlockAt(&cur.entry, &a)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// Remove any tombstoned values\n\t\t\tv = c.filterFloatValues(tombstones, v)\n\n\t\t\t// Remove values we already read\n\t\t\tv = FloatValues(v).Exclude(cur.readMin, cur.readMax)\n\n\t\t\tif len(v) > 0 {\n\t\t\t\t// Only use values in the overlapping window\n\t\t\t\tv = FloatValues(v).Include(minT, maxT)\n\n\t\t\t\t// Merge the remaing values with the existing\n\t\t\t\tvalues = FloatValues(values).Merge(v)\n\t\t\t}\n\t\t\tcur.markRead(minT, maxT)\n\t\t}\n\n\t} else {\n\t\t// Blocks are ordered by generation, we may have values in the past in later blocks, if so,\n\t\t// expand the window to include the max time range to ensure values are returned in descending\n\t\t// order\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.MaxTime > maxT && !cur.read() {\n\t\t\t\tmaxT = cur.entry.MaxTime\n\t\t\t}\n\t\t}\n\n\t\t// Find first block that overlaps our window\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() {\n\t\t\t\t// Shrink our window so it's the intersection of the first overlapping block and the\n\t\t\t\t// first block.  We do this to minimize the region that overlaps and needs to\n\t\t\t\t// be merged.\n\t\t\t\tif cur.entry.MinTime < minT {\n\t\t\t\t\tminT = cur.entry.MinTime\n\t\t\t\t}\n\t\t\t\tvalues = FloatValues(values).Include(minT, maxT)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// Search the remaining blocks that overlap our window and append their values so we can\n\t\t// merge them.\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\t// Skip this block if it doesn't contain points we looking for or they have already been read\n\t\t\tif !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() {\n\t\t\t\tcur.markRead(minT, maxT)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttombstones := cur.r.TombstoneRange(c.key)\n\n\t\t\tvar a []FloatValue\n\t\t\tv, err := cur.r.ReadFloatBlockAt(&cur.entry, &a)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// Remove any tombstoned values\n\t\t\tv = c.filterFloatValues(tombstones, v)\n\n\t\t\t// Remove values we already read\n\t\t\tv = FloatValues(v).Exclude(cur.readMin, cur.readMax)\n\n\t\t\t// If the block we decoded should have all of it's values included, mark it as read so we\n\t\t\t// don't use it again.\n\t\t\tif len(v) > 0 {\n\t\t\t\tv = FloatValues(v).Include(minT, maxT)\n\t\t\t\t// Merge the remaing values with the existing\n\t\t\t\tvalues = FloatValues(v).Merge(values)\n\t\t\t}\n\t\t\tcur.markRead(minT, maxT)\n\t\t}\n\t}\n\n\tfirst.markRead(minT, maxT)\n\n\treturn values, err\n}\n\n// ReadIntegerBlock reads the next block as a set of integer values.\nfunc (c *KeyCursor) ReadIntegerBlock(buf *[]IntegerValue) ([]IntegerValue, error) {\n\t// No matching blocks to decode\n\tif len(c.current) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// First block is the oldest block containing the points we're searching for.\n\tfirst := c.current[0]\n\t*buf = (*buf)[:0]\n\tvalues, err := first.r.ReadIntegerBlockAt(&first.entry, buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Remove values we already read\n\tvalues = IntegerValues(values).Exclude(first.readMin, first.readMax)\n\n\t// Remove any tombstones\n\ttombstones := first.r.TombstoneRange(c.key)\n\tvalues = c.filterIntegerValues(tombstones, values)\n\n\t// Check we have remaining values.\n\tif len(values) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// Only one block with this key and time range so return it\n\tif len(c.current) == 1 {\n\t\tif len(values) > 0 {\n\t\t\tfirst.markRead(values[0].UnixNano(), values[len(values)-1].UnixNano())\n\t\t}\n\t\treturn values, nil\n\t}\n\n\t// Use the current block time range as our overlapping window\n\tminT, maxT := first.readMin, first.readMax\n\tif len(values) > 0 {\n\t\tminT, maxT = values[0].UnixNano(), values[len(values)-1].UnixNano()\n\t}\n\tif c.ascending {\n\t\t// Blocks are ordered by generation, we may have values in the past in later blocks, if so,\n\t\t// expand the window to include the min time range to ensure values are returned in ascending\n\t\t// order\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.MinTime < minT && !cur.read() {\n\t\t\t\tminT = cur.entry.MinTime\n\t\t\t}\n\t\t}\n\n\t\t// Find first block that overlaps our window\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() {\n\t\t\t\t// Shrink our window so it's the intersection of the first overlapping block and the\n\t\t\t\t// first block.  We do this to minimize the region that overlaps and needs to\n\t\t\t\t// be merged.\n\t\t\t\tif cur.entry.MaxTime > maxT {\n\t\t\t\t\tmaxT = cur.entry.MaxTime\n\t\t\t\t}\n\t\t\t\tvalues = IntegerValues(values).Include(minT, maxT)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// Search the remaining blocks that overlap our window and append their values so we can\n\t\t// merge them.\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\t// Skip this block if it doesn't contain points we looking for or they have already been read\n\t\t\tif !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() {\n\t\t\t\tcur.markRead(minT, maxT)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttombstones := cur.r.TombstoneRange(c.key)\n\t\t\tvar a []IntegerValue\n\t\t\tv, err := cur.r.ReadIntegerBlockAt(&cur.entry, &a)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// Remove any tombstoned values\n\t\t\tv = c.filterIntegerValues(tombstones, v)\n\n\t\t\t// Remove values we already read\n\t\t\tv = IntegerValues(v).Exclude(cur.readMin, cur.readMax)\n\n\t\t\tif len(v) > 0 {\n\t\t\t\t// Only use values in the overlapping window\n\t\t\t\tv = IntegerValues(v).Include(minT, maxT)\n\n\t\t\t\t// Merge the remaing values with the existing\n\t\t\t\tvalues = IntegerValues(values).Merge(v)\n\t\t\t}\n\t\t\tcur.markRead(minT, maxT)\n\t\t}\n\n\t} else {\n\t\t// Blocks are ordered by generation, we may have values in the past in later blocks, if so,\n\t\t// expand the window to include the max time range to ensure values are returned in descending\n\t\t// order\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.MaxTime > maxT && !cur.read() {\n\t\t\t\tmaxT = cur.entry.MaxTime\n\t\t\t}\n\t\t}\n\n\t\t// Find first block that overlaps our window\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() {\n\t\t\t\t// Shrink our window so it's the intersection of the first overlapping block and the\n\t\t\t\t// first block.  We do this to minimize the region that overlaps and needs to\n\t\t\t\t// be merged.\n\t\t\t\tif cur.entry.MinTime < minT {\n\t\t\t\t\tminT = cur.entry.MinTime\n\t\t\t\t}\n\t\t\t\tvalues = IntegerValues(values).Include(minT, maxT)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// Search the remaining blocks that overlap our window and append their values so we can\n\t\t// merge them.\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\t// Skip this block if it doesn't contain points we looking for or they have already been read\n\t\t\tif !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() {\n\t\t\t\tcur.markRead(minT, maxT)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttombstones := cur.r.TombstoneRange(c.key)\n\n\t\t\tvar a []IntegerValue\n\t\t\tv, err := cur.r.ReadIntegerBlockAt(&cur.entry, &a)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// Remove any tombstoned values\n\t\t\tv = c.filterIntegerValues(tombstones, v)\n\n\t\t\t// Remove values we already read\n\t\t\tv = IntegerValues(v).Exclude(cur.readMin, cur.readMax)\n\n\t\t\t// If the block we decoded should have all of it's values included, mark it as read so we\n\t\t\t// don't use it again.\n\t\t\tif len(v) > 0 {\n\t\t\t\tv = IntegerValues(v).Include(minT, maxT)\n\t\t\t\t// Merge the remaing values with the existing\n\t\t\t\tvalues = IntegerValues(v).Merge(values)\n\t\t\t}\n\t\t\tcur.markRead(minT, maxT)\n\t\t}\n\t}\n\n\tfirst.markRead(minT, maxT)\n\n\treturn values, err\n}\n\n// ReadStringBlock reads the next block as a set of string values.\nfunc (c *KeyCursor) ReadStringBlock(buf *[]StringValue) ([]StringValue, error) {\n\t// No matching blocks to decode\n\tif len(c.current) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// First block is the oldest block containing the points we're searching for.\n\tfirst := c.current[0]\n\t*buf = (*buf)[:0]\n\tvalues, err := first.r.ReadStringBlockAt(&first.entry, buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Remove values we already read\n\tvalues = StringValues(values).Exclude(first.readMin, first.readMax)\n\n\t// Remove any tombstones\n\ttombstones := first.r.TombstoneRange(c.key)\n\tvalues = c.filterStringValues(tombstones, values)\n\n\t// Check we have remaining values.\n\tif len(values) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// Only one block with this key and time range so return it\n\tif len(c.current) == 1 {\n\t\tif len(values) > 0 {\n\t\t\tfirst.markRead(values[0].UnixNano(), values[len(values)-1].UnixNano())\n\t\t}\n\t\treturn values, nil\n\t}\n\n\t// Use the current block time range as our overlapping window\n\tminT, maxT := first.readMin, first.readMax\n\tif len(values) > 0 {\n\t\tminT, maxT = values[0].UnixNano(), values[len(values)-1].UnixNano()\n\t}\n\tif c.ascending {\n\t\t// Blocks are ordered by generation, we may have values in the past in later blocks, if so,\n\t\t// expand the window to include the min time range to ensure values are returned in ascending\n\t\t// order\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.MinTime < minT && !cur.read() {\n\t\t\t\tminT = cur.entry.MinTime\n\t\t\t}\n\t\t}\n\n\t\t// Find first block that overlaps our window\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() {\n\t\t\t\t// Shrink our window so it's the intersection of the first overlapping block and the\n\t\t\t\t// first block.  We do this to minimize the region that overlaps and needs to\n\t\t\t\t// be merged.\n\t\t\t\tif cur.entry.MaxTime > maxT {\n\t\t\t\t\tmaxT = cur.entry.MaxTime\n\t\t\t\t}\n\t\t\t\tvalues = StringValues(values).Include(minT, maxT)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// Search the remaining blocks that overlap our window and append their values so we can\n\t\t// merge them.\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\t// Skip this block if it doesn't contain points we looking for or they have already been read\n\t\t\tif !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() {\n\t\t\t\tcur.markRead(minT, maxT)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttombstones := cur.r.TombstoneRange(c.key)\n\t\t\tvar a []StringValue\n\t\t\tv, err := cur.r.ReadStringBlockAt(&cur.entry, &a)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// Remove any tombstoned values\n\t\t\tv = c.filterStringValues(tombstones, v)\n\n\t\t\t// Remove values we already read\n\t\t\tv = StringValues(v).Exclude(cur.readMin, cur.readMax)\n\n\t\t\tif len(v) > 0 {\n\t\t\t\t// Only use values in the overlapping window\n\t\t\t\tv = StringValues(v).Include(minT, maxT)\n\n\t\t\t\t// Merge the remaing values with the existing\n\t\t\t\tvalues = StringValues(values).Merge(v)\n\t\t\t}\n\t\t\tcur.markRead(minT, maxT)\n\t\t}\n\n\t} else {\n\t\t// Blocks are ordered by generation, we may have values in the past in later blocks, if so,\n\t\t// expand the window to include the max time range to ensure values are returned in descending\n\t\t// order\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.MaxTime > maxT && !cur.read() {\n\t\t\t\tmaxT = cur.entry.MaxTime\n\t\t\t}\n\t\t}\n\n\t\t// Find first block that overlaps our window\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() {\n\t\t\t\t// Shrink our window so it's the intersection of the first overlapping block and the\n\t\t\t\t// first block.  We do this to minimize the region that overlaps and needs to\n\t\t\t\t// be merged.\n\t\t\t\tif cur.entry.MinTime < minT {\n\t\t\t\t\tminT = cur.entry.MinTime\n\t\t\t\t}\n\t\t\t\tvalues = StringValues(values).Include(minT, maxT)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// Search the remaining blocks that overlap our window and append their values so we can\n\t\t// merge them.\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\t// Skip this block if it doesn't contain points we looking for or they have already been read\n\t\t\tif !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() {\n\t\t\t\tcur.markRead(minT, maxT)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttombstones := cur.r.TombstoneRange(c.key)\n\n\t\t\tvar a []StringValue\n\t\t\tv, err := cur.r.ReadStringBlockAt(&cur.entry, &a)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// Remove any tombstoned values\n\t\t\tv = c.filterStringValues(tombstones, v)\n\n\t\t\t// Remove values we already read\n\t\t\tv = StringValues(v).Exclude(cur.readMin, cur.readMax)\n\n\t\t\t// If the block we decoded should have all of it's values included, mark it as read so we\n\t\t\t// don't use it again.\n\t\t\tif len(v) > 0 {\n\t\t\t\tv = StringValues(v).Include(minT, maxT)\n\t\t\t\t// Merge the remaing values with the existing\n\t\t\t\tvalues = StringValues(v).Merge(values)\n\t\t\t}\n\t\t\tcur.markRead(minT, maxT)\n\t\t}\n\t}\n\n\tfirst.markRead(minT, maxT)\n\n\treturn values, err\n}\n\n// ReadBooleanBlock reads the next block as a set of boolean values.\nfunc (c *KeyCursor) ReadBooleanBlock(buf *[]BooleanValue) ([]BooleanValue, error) {\n\t// No matching blocks to decode\n\tif len(c.current) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// First block is the oldest block containing the points we're searching for.\n\tfirst := c.current[0]\n\t*buf = (*buf)[:0]\n\tvalues, err := first.r.ReadBooleanBlockAt(&first.entry, buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Remove values we already read\n\tvalues = BooleanValues(values).Exclude(first.readMin, first.readMax)\n\n\t// Remove any tombstones\n\ttombstones := first.r.TombstoneRange(c.key)\n\tvalues = c.filterBooleanValues(tombstones, values)\n\n\t// Check we have remaining values.\n\tif len(values) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// Only one block with this key and time range so return it\n\tif len(c.current) == 1 {\n\t\tif len(values) > 0 {\n\t\t\tfirst.markRead(values[0].UnixNano(), values[len(values)-1].UnixNano())\n\t\t}\n\t\treturn values, nil\n\t}\n\n\t// Use the current block time range as our overlapping window\n\tminT, maxT := first.readMin, first.readMax\n\tif len(values) > 0 {\n\t\tminT, maxT = values[0].UnixNano(), values[len(values)-1].UnixNano()\n\t}\n\tif c.ascending {\n\t\t// Blocks are ordered by generation, we may have values in the past in later blocks, if so,\n\t\t// expand the window to include the min time range to ensure values are returned in ascending\n\t\t// order\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.MinTime < minT && !cur.read() {\n\t\t\t\tminT = cur.entry.MinTime\n\t\t\t}\n\t\t}\n\n\t\t// Find first block that overlaps our window\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() {\n\t\t\t\t// Shrink our window so it's the intersection of the first overlapping block and the\n\t\t\t\t// first block.  We do this to minimize the region that overlaps and needs to\n\t\t\t\t// be merged.\n\t\t\t\tif cur.entry.MaxTime > maxT {\n\t\t\t\t\tmaxT = cur.entry.MaxTime\n\t\t\t\t}\n\t\t\t\tvalues = BooleanValues(values).Include(minT, maxT)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// Search the remaining blocks that overlap our window and append their values so we can\n\t\t// merge them.\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\t// Skip this block if it doesn't contain points we looking for or they have already been read\n\t\t\tif !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() {\n\t\t\t\tcur.markRead(minT, maxT)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttombstones := cur.r.TombstoneRange(c.key)\n\t\t\tvar a []BooleanValue\n\t\t\tv, err := cur.r.ReadBooleanBlockAt(&cur.entry, &a)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// Remove any tombstoned values\n\t\t\tv = c.filterBooleanValues(tombstones, v)\n\n\t\t\t// Remove values we already read\n\t\t\tv = BooleanValues(v).Exclude(cur.readMin, cur.readMax)\n\n\t\t\tif len(v) > 0 {\n\t\t\t\t// Only use values in the overlapping window\n\t\t\t\tv = BooleanValues(v).Include(minT, maxT)\n\n\t\t\t\t// Merge the remaing values with the existing\n\t\t\t\tvalues = BooleanValues(values).Merge(v)\n\t\t\t}\n\t\t\tcur.markRead(minT, maxT)\n\t\t}\n\n\t} else {\n\t\t// Blocks are ordered by generation, we may have values in the past in later blocks, if so,\n\t\t// expand the window to include the max time range to ensure values are returned in descending\n\t\t// order\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.MaxTime > maxT && !cur.read() {\n\t\t\t\tmaxT = cur.entry.MaxTime\n\t\t\t}\n\t\t}\n\n\t\t// Find first block that overlaps our window\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() {\n\t\t\t\t// Shrink our window so it's the intersection of the first overlapping block and the\n\t\t\t\t// first block.  We do this to minimize the region that overlaps and needs to\n\t\t\t\t// be merged.\n\t\t\t\tif cur.entry.MinTime < minT {\n\t\t\t\t\tminT = cur.entry.MinTime\n\t\t\t\t}\n\t\t\t\tvalues = BooleanValues(values).Include(minT, maxT)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// Search the remaining blocks that overlap our window and append their values so we can\n\t\t// merge them.\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\t// Skip this block if it doesn't contain points we looking for or they have already been read\n\t\t\tif !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() {\n\t\t\t\tcur.markRead(minT, maxT)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttombstones := cur.r.TombstoneRange(c.key)\n\n\t\t\tvar a []BooleanValue\n\t\t\tv, err := cur.r.ReadBooleanBlockAt(&cur.entry, &a)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// Remove any tombstoned values\n\t\t\tv = c.filterBooleanValues(tombstones, v)\n\n\t\t\t// Remove values we already read\n\t\t\tv = BooleanValues(v).Exclude(cur.readMin, cur.readMax)\n\n\t\t\t// If the block we decoded should have all of it's values included, mark it as read so we\n\t\t\t// don't use it again.\n\t\t\tif len(v) > 0 {\n\t\t\t\tv = BooleanValues(v).Include(minT, maxT)\n\t\t\t\t// Merge the remaing values with the existing\n\t\t\t\tvalues = BooleanValues(v).Merge(values)\n\t\t\t}\n\t\t\tcur.markRead(minT, maxT)\n\t\t}\n\t}\n\n\tfirst.markRead(minT, maxT)\n\n\treturn values, err\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store.gen.go.tmpl",
    "content": "package tsm1\n\n\n{{range .}}\n// Read{{.Name}}Block reads the next block as a set of {{.name}} values.\nfunc (c *KeyCursor) Read{{.Name}}Block(buf *[]{{.Name}}Value) ([]{{.Name}}Value, error) {\n\t// No matching blocks to decode\n\tif len(c.current) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// First block is the oldest block containing the points we're searching for.\n\tfirst := c.current[0]\n\t*buf = (*buf)[:0]\n\tvalues, err := first.r.Read{{.Name}}BlockAt(&first.entry, buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Remove values we already read\n\tvalues = {{.Name}}Values(values).Exclude(first.readMin, first.readMax)\n\n\t// Remove any tombstones\n\ttombstones := first.r.TombstoneRange(c.key)\n\tvalues = c.filter{{.Name}}Values(tombstones, values)\n\n\t// Check we have remaining values.\n\tif len(values) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// Only one block with this key and time range so return it\n\tif len(c.current) == 1 {\n\t\tif len(values) > 0 {\n\t\t\tfirst.markRead(values[0].UnixNano(), values[len(values)-1].UnixNano())\n\t\t}\n\t\treturn values, nil\n\t}\n\n\t// Use the current block time range as our overlapping window\n\tminT, maxT := first.readMin, first.readMax\n\tif len(values) > 0 {\n\t\tminT, maxT = values[0].UnixNano(), values[len(values)-1].UnixNano()\n\t}\n\tif c.ascending {\n\t\t// Blocks are ordered by generation, we may have values in the past in later blocks, if so,\n\t\t// expand the window to include the min time range to ensure values are returned in ascending\n\t\t// order\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.MinTime < minT && !cur.read() {\n\t\t\t\tminT = cur.entry.MinTime\n\t\t\t}\n\t\t}\n\n\t\t// Find first block that overlaps our window\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() {\n\t\t\t\t// Shrink our window so it's the intersection of the first overlapping block and the\n\t\t\t\t// first block.  We do this to minimize the region that overlaps and needs to\n\t\t\t\t// be merged.\n\t\t\t\tif cur.entry.MaxTime > maxT {\n\t\t\t\t\tmaxT = cur.entry.MaxTime\n\t\t\t\t}\n\t\t\t\tvalues = {{.Name}}Values(values).Include(minT, maxT)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// Search the remaining blocks that overlap our window and append their values so we can\n\t\t// merge them.\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\t// Skip this block if it doesn't contain points we looking for or they have already been read\n\t\t\tif !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() {\n\t\t\t\tcur.markRead(minT, maxT)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttombstones := cur.r.TombstoneRange(c.key)\n\t\t\tvar a []{{.Name}}Value\n\t\t\tv, err := cur.r.Read{{.Name}}BlockAt(&cur.entry, &a)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// Remove any tombstoned values\n\t\t\tv = c.filter{{.Name}}Values(tombstones, v)\n\n\t\t\t// Remove values we already read\n\t\t\tv = {{.Name}}Values(v).Exclude(cur.readMin, cur.readMax)\n\n\t\t\tif len(v) > 0 {\n\t\t\t\t// Only use values in the overlapping window\n\t\t\t\tv = {{.Name}}Values(v).Include(minT, maxT)\n\n\t\t\t\t// Merge the remaing values with the existing\n\t\t\t\tvalues = {{.Name}}Values(values).Merge(v)\n\t\t\t}\n\t\t\tcur.markRead(minT, maxT)\n\t\t}\n\n\t} else {\n\t\t// Blocks are ordered by generation, we may have values in the past in later blocks, if so,\n\t\t// expand the window to include the max time range to ensure values are returned in descending\n\t\t// order\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.MaxTime > maxT && !cur.read() {\n\t\t\t\tmaxT = cur.entry.MaxTime\n\t\t\t}\n\t\t}\n\n\t\t// Find first block that overlaps our window\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\tif cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() {\n\t\t\t\t// Shrink our window so it's the intersection of the first overlapping block and the\n\t\t\t\t// first block.  We do this to minimize the region that overlaps and needs to\n\t\t\t\t// be merged.\n\t\t\t\tif cur.entry.MinTime < minT {\n\t\t\t\t\tminT = cur.entry.MinTime\n\t\t\t\t}\n\t\t\t\tvalues = {{.Name}}Values(values).Include(minT, maxT)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// Search the remaining blocks that overlap our window and append their values so we can\n\t\t// merge them.\n\t\tfor i := 1; i < len(c.current); i++ {\n\t\t\tcur := c.current[i]\n\t\t\t// Skip this block if it doesn't contain points we looking for or they have already been read\n\t\t\tif !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() {\n\t\t\t\tcur.markRead(minT, maxT)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttombstones := cur.r.TombstoneRange(c.key)\n\n\t\t\tvar a []{{.Name}}Value\n\t\t\tv, err := cur.r.Read{{.Name}}BlockAt(&cur.entry, &a)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// Remove any tombstoned values\n\t\t\tv = c.filter{{.Name}}Values(tombstones, v)\n\n\t\t\t// Remove values we already read\n\t\t\tv = {{.Name}}Values(v).Exclude(cur.readMin, cur.readMax)\n\n\t\t\t// If the block we decoded should have all of it's values included, mark it as read so we\n\t\t\t// don't use it again.\n\t\t\tif len(v) > 0 {\n\t\t\t\tv = {{.Name}}Values(v).Include(minT, maxT)\n\t\t\t\t// Merge the remaing values with the existing\n\t\t\t\tvalues = {{.Name}}Values(v).Merge(values)\n\t\t\t}\n\t\t\tcur.markRead(minT, maxT)\n\t\t}\n\t}\n\n\tfirst.markRead(minT, maxT)\n\n\treturn values, err\n}\n\n{{ end }}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store.gen.go.tmpldata",
    "content": "[\n\t{\n\t\t\"Name\":\"Float\",\n\t\t\"name\":\"float\"\n\t},\n\t{\n\t\t\"Name\":\"Integer\",\n\t\t\"name\":\"integer\"\n\t},\n\t{\n\t\t\"Name\":\"String\",\n\t\t\"name\":\"string\"\n\t},\n\t{\n\t\t\"Name\":\"Boolean\",\n\t\t\"name\":\"boolean\"\n\t}\n]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store.go",
    "content": "package tsm1\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"math\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/uber-go/zap\"\n)\n\n// TSMFile represents an on-disk TSM file.\ntype TSMFile interface {\n\t// Path returns the underlying file path for the TSMFile.  If the file\n\t// has not be written or loaded from disk, the zero value is returned.\n\tPath() string\n\n\t// Read returns all the values in the block where time t resides.\n\tRead(key string, t int64) ([]Value, error)\n\n\t// ReadAt returns all the values in the block identified by entry.\n\tReadAt(entry *IndexEntry, values []Value) ([]Value, error)\n\tReadFloatBlockAt(entry *IndexEntry, values *[]FloatValue) ([]FloatValue, error)\n\tReadIntegerBlockAt(entry *IndexEntry, values *[]IntegerValue) ([]IntegerValue, error)\n\tReadStringBlockAt(entry *IndexEntry, values *[]StringValue) ([]StringValue, error)\n\tReadBooleanBlockAt(entry *IndexEntry, values *[]BooleanValue) ([]BooleanValue, error)\n\n\t// Entries returns the index entries for all blocks for the given key.\n\tEntries(key string) []IndexEntry\n\tReadEntries(key string, entries *[]IndexEntry)\n\n\t// Returns true if the TSMFile may contain a value with the specified\n\t// key and time.\n\tContainsValue(key string, t int64) bool\n\n\t// Contains returns true if the file contains any values for the given\n\t// key.\n\tContains(key string) bool\n\n\t// TimeRange returns the min and max time across all keys in the file.\n\tTimeRange() (int64, int64)\n\n\t// TombstoneRange returns ranges of time that are deleted for the given key.\n\tTombstoneRange(key string) []TimeRange\n\n\t// KeyRange returns the min and max keys in the file.\n\tKeyRange() (string, string)\n\n\t// KeyCount returns the number of distinct keys in the file.\n\tKeyCount() int\n\n\t// KeyAt returns the key located at index position idx.\n\tKeyAt(idx int) ([]byte, byte)\n\n\t// Type returns the block type of the values stored for the key.  Returns one of\n\t// BlockFloat64, BlockInt64, BlockBoolean, BlockString.  If key does not exist,\n\t// an error is returned.\n\tType(key string) (byte, error)\n\n\t// Delete removes the keys from the set of keys available in this file.\n\tDelete(keys []string) error\n\n\t// DeleteRange removes the values for keys between timestamps min and max.\n\tDeleteRange(keys []string, min, max int64) error\n\n\t// HasTombstones returns true if file contains values that have been deleted.\n\tHasTombstones() bool\n\n\t// TombstoneFiles returns the tombstone filestats if there are any tombstones\n\t// written for this file.\n\tTombstoneFiles() []FileStat\n\n\t// Close closes the underlying file resources.\n\tClose() error\n\n\t// Size returns the size of the file on disk in bytes.\n\tSize() uint32\n\n\t// Rename renames the existing TSM file to a new name and replaces the mmap backing slice using the new\n\t// file name.  Index and Reader state are not re-initialized.\n\tRename(path string) error\n\n\t// Remove deletes the file from the filesystem.\n\tRemove() error\n\n\t// InUse returns true if the file is currently in use by queries.\n\tInUse() bool\n\n\t// Ref records that this file is actively in use.\n\tRef()\n\n\t// Unref records that this file is no longer in use.\n\tUnref()\n\n\t// Stats returns summary information about the TSM file.\n\tStats() FileStat\n\n\t// BlockIterator returns an iterator pointing to the first block in the file and\n\t// allows sequential iteration to each and every block.\n\tBlockIterator() *BlockIterator\n}\n\n// Statistics gathered by the FileStore.\nconst (\n\tstatFileStoreBytes = \"diskBytes\"\n\tstatFileStoreCount = \"numFiles\"\n)\n\n// FileStore is an abstraction around multiple TSM files.\ntype FileStore struct {\n\tmu           sync.RWMutex\n\tlastModified time.Time\n\t// Most recently known file stats. If nil then stats will need to be\n\t// recalculated\n\tlastFileStats []FileStat\n\n\tcurrentGeneration int\n\tdir               string\n\n\tfiles []TSMFile\n\n\tlogger       zap.Logger // Logger to be used for important messages\n\ttraceLogger  zap.Logger // Logger to be used when trace-logging is on.\n\ttraceLogging bool\n\n\tstats  *FileStoreStatistics\n\tpurger *purger\n\n\tcurrentTempDirID int\n}\n\n// FileStat holds information about a TSM file on disk.\ntype FileStat struct {\n\tPath             string\n\tHasTombstone     bool\n\tSize             uint32\n\tLastModified     int64\n\tMinTime, MaxTime int64\n\tMinKey, MaxKey   string\n}\n\n// OverlapsTimeRange returns true if the time range of the file intersect min and max.\nfunc (f FileStat) OverlapsTimeRange(min, max int64) bool {\n\treturn f.MinTime <= max && f.MaxTime >= min\n}\n\n// OverlapsKeyRange returns true if the min and max keys of the file overlap the arguments min and max.\nfunc (f FileStat) OverlapsKeyRange(min, max string) bool {\n\treturn min != \"\" && max != \"\" && f.MinKey <= max && f.MaxKey >= min\n}\n\n// ContainsKey returns true if the min and max keys of the file overlap the arguments min and max.\nfunc (f FileStat) ContainsKey(key string) bool {\n\treturn f.MinKey >= key || key <= f.MaxKey\n}\n\n// NewFileStore returns a new instance of FileStore based on the given directory.\nfunc NewFileStore(dir string) *FileStore {\n\tlogger := zap.New(zap.NullEncoder())\n\tfs := &FileStore{\n\t\tdir:          dir,\n\t\tlastModified: time.Time{},\n\t\tlogger:       logger,\n\t\ttraceLogger:  logger,\n\t\tstats:        &FileStoreStatistics{},\n\t\tpurger: &purger{\n\t\t\tfiles:  map[string]TSMFile{},\n\t\t\tlogger: logger,\n\t\t},\n\t}\n\tfs.purger.fileStore = fs\n\treturn fs\n}\n\n// enableTraceLogging must be called before the FileStore is opened.\nfunc (f *FileStore) enableTraceLogging(enabled bool) {\n\tf.traceLogging = enabled\n\tif enabled {\n\t\tf.traceLogger = f.logger\n\t}\n}\n\n// WithLogger sets the logger on the file store.\nfunc (f *FileStore) WithLogger(log zap.Logger) {\n\tf.logger = log.With(zap.String(\"service\", \"filestore\"))\n\tf.purger.logger = f.logger\n\n\tif f.traceLogging {\n\t\tf.traceLogger = f.logger\n\t}\n}\n\n// FileStoreStatistics keeps statistics about the file store.\ntype FileStoreStatistics struct {\n\tDiskBytes int64\n\tFileCount int64\n}\n\n// Statistics returns statistics for periodic monitoring.\nfunc (f *FileStore) Statistics(tags map[string]string) []models.Statistic {\n\treturn []models.Statistic{{\n\t\tName: \"tsm1_filestore\",\n\t\tTags: tags,\n\t\tValues: map[string]interface{}{\n\t\t\tstatFileStoreBytes: atomic.LoadInt64(&f.stats.DiskBytes),\n\t\t\tstatFileStoreCount: atomic.LoadInt64(&f.stats.FileCount),\n\t\t},\n\t}}\n}\n\n// Count returns the number of TSM files currently loaded.\nfunc (f *FileStore) Count() int {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\treturn len(f.files)\n}\n\n// Files returns the slice of TSM files currently loaded.\nfunc (f *FileStore) Files() []TSMFile {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\treturn f.files\n}\n\n// CurrentGeneration returns the current generation of the TSM files.\nfunc (f *FileStore) CurrentGeneration() int {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\treturn f.currentGeneration\n}\n\n// NextGeneration increments the max file ID and returns the new value.\nfunc (f *FileStore) NextGeneration() int {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\tf.currentGeneration++\n\treturn f.currentGeneration\n}\n\n// WalkKeys calls fn for every key in every TSM file known to the FileStore.  If the key\n// exists in multiple files, it will be invoked for each file.\nfunc (f *FileStore) WalkKeys(fn func(key []byte, typ byte) error) error {\n\tf.mu.RLock()\n\tif len(f.files) == 0 {\n\t\tf.mu.RUnlock()\n\t\treturn nil\n\t}\n\n\treaders := make([]chan seriesKey, 0, len(f.files))\n\tfor _, f := range f.files {\n\t\tch := make(chan seriesKey, 1)\n\t\treaders = append(readers, ch)\n\n\t\tgo func(c chan seriesKey, r TSMFile) {\n\t\t\tn := r.KeyCount()\n\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\tkey, typ := r.KeyAt(i)\n\t\t\t\tc <- seriesKey{key, typ}\n\t\t\t}\n\t\t\tclose(ch)\n\t\t}(ch, f)\n\t}\n\tf.mu.RUnlock()\n\n\tmerged := merge(readers...)\n\tfor v := range merged {\n\t\tif err := fn(v.key, v.typ); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// Keys returns all keys and types for all files in the file store.\nfunc (f *FileStore) Keys() map[string]byte {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tuniqueKeys := map[string]byte{}\n\tif err := f.WalkKeys(func(key []byte, typ byte) error {\n\t\tuniqueKeys[string(key)] = typ\n\t\treturn nil\n\t}); err != nil {\n\t\treturn nil\n\t}\n\n\treturn uniqueKeys\n}\n\n// Type returns the type of values store at the block for key.\nfunc (f *FileStore) Type(key string) (byte, error) {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tfor _, f := range f.files {\n\t\tif f.Contains(key) {\n\t\t\treturn f.Type(key)\n\t\t}\n\t}\n\treturn 0, fmt.Errorf(\"unknown type for %v\", key)\n}\n\n// Delete removes the keys from the set of keys available in this file.\nfunc (f *FileStore) Delete(keys []string) error {\n\treturn f.DeleteRange(keys, math.MinInt64, math.MaxInt64)\n}\n\n// DeleteRange removes the values for keys between timestamps min and max.\nfunc (f *FileStore) DeleteRange(keys []string, min, max int64) error {\n\tif err := f.walkFiles(func(tsm TSMFile) error {\n\t\treturn tsm.DeleteRange(keys, min, max)\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tf.mu.Lock()\n\tf.lastModified = time.Now().UTC()\n\tf.lastFileStats = nil\n\tf.mu.Unlock()\n\treturn nil\n}\n\n// Open loads all the TSM files in the configured directory.\nfunc (f *FileStore) Open() error {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\t// Not loading files from disk so nothing to do\n\tif f.dir == \"\" {\n\t\treturn nil\n\t}\n\n\t// find the current max ID for temp directories\n\ttmpfiles, err := ioutil.ReadDir(f.dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, fi := range tmpfiles {\n\t\tif fi.IsDir() && strings.HasSuffix(fi.Name(), \".tmp\") {\n\t\t\tss := strings.Split(filepath.Base(fi.Name()), \".\")\n\t\t\tif len(ss) == 2 {\n\t\t\t\tif i, err := strconv.Atoi(ss[0]); err != nil {\n\t\t\t\t\tif i > f.currentTempDirID {\n\t\t\t\t\t\tf.currentTempDirID = i\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfiles, err := filepath.Glob(filepath.Join(f.dir, fmt.Sprintf(\"*.%s\", TSMFileExtension)))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// struct to hold the result of opening each reader in a goroutine\n\ttype res struct {\n\t\tr   *TSMReader\n\t\terr error\n\t}\n\n\treaderC := make(chan *res)\n\tfor i, fn := range files {\n\t\t// Keep track of the latest ID\n\t\tgeneration, _, err := ParseTSMFileName(fn)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif generation >= f.currentGeneration {\n\t\t\tf.currentGeneration = generation + 1\n\t\t}\n\n\t\tfile, err := os.OpenFile(fn, os.O_RDONLY, 0666)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error opening file %s: %v\", fn, err)\n\t\t}\n\n\t\tgo func(idx int, file *os.File) {\n\t\t\tstart := time.Now()\n\t\t\tdf, err := NewTSMReader(file)\n\t\t\tf.logger.Info(fmt.Sprintf(\"%s (#%d) opened in %v\", file.Name(), idx, time.Since(start)))\n\n\t\t\tif err != nil {\n\t\t\t\treaderC <- &res{r: df, err: fmt.Errorf(\"error opening memory map for file %s: %v\", file.Name(), err)}\n\t\t\t\treturn\n\t\t\t}\n\t\t\treaderC <- &res{r: df}\n\t\t}(i, file)\n\t}\n\n\tvar lm int64\n\tfor range files {\n\t\tres := <-readerC\n\t\tif res.err != nil {\n\n\t\t\treturn res.err\n\t\t}\n\t\tf.files = append(f.files, res.r)\n\t\t// Accumulate file store size stats\n\t\tatomic.AddInt64(&f.stats.DiskBytes, int64(res.r.Size()))\n\t\tfor _, ts := range res.r.TombstoneFiles() {\n\t\t\tatomic.AddInt64(&f.stats.DiskBytes, int64(ts.Size))\n\t\t}\n\n\t\t// Re-initialize the lastModified time for the file store\n\t\tif res.r.LastModified() > lm {\n\t\t\tlm = res.r.LastModified()\n\t\t}\n\n\t}\n\tf.lastModified = time.Unix(0, lm)\n\tclose(readerC)\n\n\tsort.Sort(tsmReaders(f.files))\n\tatomic.StoreInt64(&f.stats.FileCount, int64(len(f.files)))\n\treturn nil\n}\n\n// Close closes the file store.\nfunc (f *FileStore) Close() error {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\tfor _, file := range f.files {\n\t\tfile.Close()\n\t}\n\n\tf.lastFileStats = nil\n\tf.files = nil\n\tatomic.StoreInt64(&f.stats.FileCount, 0)\n\treturn nil\n}\n\nfunc (f *FileStore) DiskSizeBytes() int64 {\n\treturn atomic.LoadInt64(&f.stats.DiskBytes)\n}\n\n// Read returns the slice of values for the given key and the given timestamp,\n// if any file matches those constraints.\nfunc (f *FileStore) Read(key string, t int64) ([]Value, error) {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tfor _, f := range f.files {\n\t\t// Can this file possibly contain this key and timestamp?\n\t\tif !f.Contains(key) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// May have the key and time we are looking for so try to find\n\t\tv, err := f.Read(key, t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif len(v) > 0 {\n\t\t\treturn v, nil\n\t\t}\n\t}\n\treturn nil, nil\n}\n\n// KeyCursor returns a KeyCursor for key and t across the files in the FileStore.\nfunc (f *FileStore) KeyCursor(key string, t int64, ascending bool) *KeyCursor {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\treturn newKeyCursor(f, key, t, ascending)\n}\n\n// Stats returns the stats of the underlying files, preferring the cached version if it is still valid.\nfunc (f *FileStore) Stats() []FileStat {\n\tf.mu.RLock()\n\tif len(f.lastFileStats) > 0 {\n\t\tdefer f.mu.RUnlock()\n\t\treturn f.lastFileStats\n\t}\n\tf.mu.RUnlock()\n\n\t// The file stats cache is invalid due to changes to files. Need to\n\t// recalculate.\n\tf.mu.Lock()\n\n\t// If lastFileStats's capacity is far away from the number of entries\n\t// we need to add, then we'll reallocate.\n\tif cap(f.lastFileStats) < len(f.files)/2 {\n\t\tf.lastFileStats = make([]FileStat, 0, len(f.files))\n\t}\n\n\tfor _, fd := range f.files {\n\t\tf.lastFileStats = append(f.lastFileStats, fd.Stats())\n\t}\n\tdefer f.mu.Unlock()\n\treturn f.lastFileStats\n}\n\n// ReplaceWithCallback replaces oldFiles with newFiles and calls updatedFn with the files to be added the FileStore.\nfunc (f *FileStore) ReplaceWithCallback(oldFiles, newFiles []string, updatedFn func(r []TSMFile)) error {\n\treturn f.replace(oldFiles, newFiles, updatedFn)\n}\n\n// Replace replaces oldFiles with newFiles.\nfunc (f *FileStore) Replace(oldFiles, newFiles []string) error {\n\treturn f.replace(oldFiles, newFiles, nil)\n}\n\nfunc (f *FileStore) replace(oldFiles, newFiles []string, updatedFn func(r []TSMFile)) error {\n\tif len(oldFiles) == 0 && len(newFiles) == 0 {\n\t\treturn nil\n\t}\n\n\tf.mu.RLock()\n\tmaxTime := f.lastModified\n\tf.mu.RUnlock()\n\n\tupdated := make([]TSMFile, 0, len(newFiles))\n\n\t// Rename all the new files to make them live on restart\n\tfor _, file := range newFiles {\n\t\tvar newName = file\n\t\tif strings.HasSuffix(file, \".tmp\") {\n\t\t\t// The new TSM files have a tmp extension.  First rename them.\n\t\t\tnewName = file[:len(file)-4]\n\t\t\tif err := os.Rename(file, newName); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tfd, err := os.Open(newName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Keep track of the new mod time\n\t\tif stat, err := fd.Stat(); err == nil {\n\t\t\tif stat.ModTime().UTC().After(maxTime) {\n\t\t\t\tmaxTime = stat.ModTime().UTC()\n\t\t\t}\n\t\t}\n\n\t\ttsm, err := NewTSMReader(fd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tupdated = append(updated, tsm)\n\t}\n\n\tif updatedFn != nil {\n\t\tupdatedFn(updated)\n\t}\n\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\t// Copy the current set of active files while we rename\n\t// and load the new files.  We copy the pointers here to minimize\n\t// the time that locks are held as well as to ensure that the replacement\n\t// is atomic.©\n\n\tupdated = append(updated, f.files...)\n\n\t// We need to prune our set of active files now\n\tvar active, inuse []TSMFile\n\tfor _, file := range updated {\n\t\tkeep := true\n\t\tfor _, remove := range oldFiles {\n\t\t\tif remove == file.Path() {\n\t\t\t\tkeep = false\n\n\t\t\t\t// If queries are running against this file, then we need to move it out of the\n\t\t\t\t// way and let them complete.  We'll then delete the original file to avoid\n\t\t\t\t// blocking callers upstream.  If the process crashes, the temp file is\n\t\t\t\t// cleaned up at startup automatically.\n\t\t\t\tif file.InUse() {\n\t\t\t\t\t// Copy all the tombstones related to this TSM file\n\t\t\t\t\tvar deletes []string\n\t\t\t\t\tfor _, t := range file.TombstoneFiles() {\n\t\t\t\t\t\tdeletes = append(deletes, t.Path)\n\t\t\t\t\t}\n\t\t\t\t\tdeletes = append(deletes, file.Path())\n\n\t\t\t\t\t// Rename the TSM file used by this reader\n\t\t\t\t\ttempPath := file.Path() + \".tmp\"\n\t\t\t\t\tif err := file.Rename(tempPath); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\n\t\t\t\t\t// Remove the old file and tombstones.  We can't use the normal TSMReader.Remove()\n\t\t\t\t\t// because it now refers to our temp file which we can't remove.\n\t\t\t\t\tfor _, f := range deletes {\n\t\t\t\t\t\tif err := os.RemoveAll(f); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tinuse = append(inuse, file)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif err := file.Close(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tif err := file.Remove(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif keep {\n\t\t\tactive = append(active, file)\n\t\t}\n\t}\n\n\tif err := syncDir(f.dir); err != nil {\n\t\treturn err\n\t}\n\n\t// Tell the purger about our in-use files we need to remove\n\tf.purger.add(inuse)\n\n\t// If times didn't change (which can happen since file mod times are second level),\n\t// then add a ns to the time to ensure that lastModified changes since files on disk\n\t// actually did change\n\tif maxTime.Equal(f.lastModified) {\n\t\tmaxTime = maxTime.UTC().Add(1)\n\t}\n\n\tf.lastModified = maxTime.UTC()\n\n\tf.lastFileStats = nil\n\tf.files = active\n\tsort.Sort(tsmReaders(f.files))\n\tatomic.StoreInt64(&f.stats.FileCount, int64(len(f.files)))\n\n\t// Recalculate the disk size stat\n\tvar totalSize int64\n\tfor _, file := range f.files {\n\t\ttotalSize += int64(file.Size())\n\t\tfor _, ts := range file.TombstoneFiles() {\n\t\t\ttotalSize += int64(ts.Size)\n\t\t}\n\n\t}\n\tatomic.StoreInt64(&f.stats.DiskBytes, totalSize)\n\n\treturn nil\n}\n\n// LastModified returns the last time the file store was updated with new\n// TSM files or a delete.\nfunc (f *FileStore) LastModified() time.Time {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\treturn f.lastModified\n}\n\n// BlockCount returns number of values stored in the block at location idx\n// in the file at path.  If path does not match any file in the store, 0 is\n// returned.  If idx is out of range for the number of blocks in the file,\n// 0 is returned.\nfunc (f *FileStore) BlockCount(path string, idx int) int {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tif idx < 0 {\n\t\treturn 0\n\t}\n\n\tfor _, fd := range f.files {\n\t\tif fd.Path() == path {\n\t\t\titer := fd.BlockIterator()\n\t\t\tfor i := 0; i < idx; i++ {\n\t\t\t\tif !iter.Next() {\n\t\t\t\t\treturn 0\n\t\t\t\t}\n\t\t\t}\n\t\t\t_, _, _, _, _, block, _ := iter.Read()\n\t\t\treturn BlockCount(block)\n\t\t}\n\t}\n\treturn 0\n}\n\n// walkFiles calls fn for each file in filestore in parallel.\nfunc (f *FileStore) walkFiles(fn func(f TSMFile) error) error {\n\t// Copy the current TSM files to prevent a slow walker from\n\t// blocking other operations.\n\tf.mu.RLock()\n\tfiles := make([]TSMFile, len(f.files))\n\tcopy(files, f.files)\n\tf.mu.RUnlock()\n\n\t// struct to hold the result of opening each reader in a goroutine\n\terrC := make(chan error, len(files))\n\tfor _, f := range files {\n\t\tgo func(tsm TSMFile) {\n\t\t\tif err := fn(tsm); err != nil {\n\t\t\t\terrC <- fmt.Errorf(\"file %s: %s\", tsm.Path(), err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\terrC <- nil\n\t\t}(f)\n\t}\n\n\tfor i := 0; i < cap(errC); i++ {\n\t\tres := <-errC\n\t\tif res != nil {\n\t\t\treturn res\n\t\t}\n\t}\n\treturn nil\n}\n\n// locations returns the files and index blocks for a key and time.  ascending indicates\n// whether the key will be scan in ascending time order or descenging time order.\n// This function assumes the read-lock has been taken.\nfunc (f *FileStore) locations(key string, t int64, ascending bool) []*location {\n\tfilesSnapshot := make([]TSMFile, len(f.files))\n\tfor i := range f.files {\n\t\tfilesSnapshot[i] = f.files[i]\n\t}\n\n\tvar entries []IndexEntry\n\tlocations := make([]*location, 0, len(filesSnapshot))\n\tfor _, fd := range filesSnapshot {\n\t\tminTime, maxTime := fd.TimeRange()\n\n\t\ttombstones := fd.TombstoneRange(key)\n\t\t// If we ascending and the max time of the file is before where we want to start\n\t\t// skip it.\n\t\tif ascending && maxTime < t {\n\t\t\tcontinue\n\t\t\t// If we are descending and the min time of the file is after where we want to start,\n\t\t\t// then skip it.\n\t\t} else if !ascending && minTime > t {\n\t\t\tcontinue\n\t\t}\n\n\t\t// This file could potential contain points we are looking for so find the blocks for\n\t\t// the given key.\n\t\tfd.ReadEntries(key, &entries)\n\t\tfor _, ie := range entries {\n\n\t\t\t// Skip any blocks only contain values that are tombstoned.\n\t\t\tvar skip bool\n\t\t\tfor _, t := range tombstones {\n\t\t\t\tif t.Min <= ie.MinTime && t.Max >= ie.MaxTime {\n\t\t\t\t\tskip = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif skip {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// If we ascending and the max time of a block is before where we are looking, skip\n\t\t\t// it since the data is out of our range\n\t\t\tif ascending && ie.MaxTime < t {\n\t\t\t\tcontinue\n\t\t\t\t// If we descending and the min time of a block is after where we are looking, skip\n\t\t\t\t// it since the data is out of our range\n\t\t\t} else if !ascending && ie.MinTime > t {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlocation := &location{\n\t\t\t\tr:     fd,\n\t\t\t\tentry: ie,\n\t\t\t}\n\n\t\t\tif ascending {\n\t\t\t\t// For an ascending cursor, mark everything before the seek time as read\n\t\t\t\t// so we can filter it out at query time\n\t\t\t\tlocation.readMin = math.MinInt64\n\t\t\t\tlocation.readMax = t - 1\n\t\t\t} else {\n\t\t\t\t// For an ascending cursort, mark everything after the seek time as read\n\t\t\t\t// so we can filter it out at query time\n\t\t\t\tlocation.readMin = t + 1\n\t\t\t\tlocation.readMax = math.MaxInt64\n\t\t\t}\n\t\t\t// Otherwise, add this file and block location\n\t\t\tlocations = append(locations, location)\n\t\t}\n\t}\n\treturn locations\n}\n\n// CreateSnapshot creates hardlinks for all tsm and tombstone files\n// in the path provided.\nfunc (f *FileStore) CreateSnapshot() (string, error) {\n\tf.traceLogger.Info(fmt.Sprintf(\"Creating snapshot in %s\", f.dir))\n\tfiles := f.Files()\n\n\tf.mu.Lock()\n\tf.currentTempDirID += 1\n\tf.mu.Unlock()\n\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\t// get a tmp directory name\n\ttmpPath := fmt.Sprintf(\"%s/%d.tmp\", f.dir, f.currentTempDirID)\n\terr := os.Mkdir(tmpPath, 0777)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, tsmf := range files {\n\t\tnewpath := filepath.Join(tmpPath, filepath.Base(tsmf.Path()))\n\t\tif err := os.Link(tsmf.Path(), newpath); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"error creating tsm hard link: %q\", err)\n\t\t}\n\t\t// Check for tombstones and link those as well\n\t\tfor _, tf := range tsmf.TombstoneFiles() {\n\t\t\tnewpath := filepath.Join(tmpPath, filepath.Base(tf.Path))\n\t\t\tif err := os.Link(tf.Path, newpath); err != nil {\n\t\t\t\treturn \"\", fmt.Errorf(\"error creating tombstone hard link: %q\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn tmpPath, nil\n}\n\n// ParseTSMFileName parses the generation and sequence from a TSM file name.\nfunc ParseTSMFileName(name string) (int, int, error) {\n\tbase := filepath.Base(name)\n\tidx := strings.Index(base, \".\")\n\tif idx == -1 {\n\t\treturn 0, 0, fmt.Errorf(\"file %s is named incorrectly\", name)\n\t}\n\n\tid := base[:idx]\n\n\tidx = strings.Index(id, \"-\")\n\tif idx == -1 {\n\t\treturn 0, 0, fmt.Errorf(\"file %s is named incorrectly\", name)\n\t}\n\n\tgeneration, err := strconv.ParseUint(id[:idx], 10, 32)\n\tif err != nil {\n\t\treturn 0, 0, fmt.Errorf(\"file %s is named incorrectly\", name)\n\t}\n\n\tsequence, err := strconv.ParseUint(id[idx+1:], 10, 32)\n\tif err != nil {\n\t\treturn 0, 0, fmt.Errorf(\"file %s is named incorrectly\", name)\n\t}\n\n\treturn int(generation), int(sequence), nil\n}\n\n// KeyCursor allows iteration through keys in a set of files within a FileStore.\ntype KeyCursor struct {\n\tkey string\n\tfs  *FileStore\n\n\t// seeks is all the file locations that we need to return during iteration.\n\tseeks []*location\n\n\t// current is the set of blocks possibly containing the next set of points.\n\t// Normally this is just one entry, but there may be multiple if points have\n\t// been overwritten.\n\tcurrent []*location\n\tbuf     []Value\n\n\t// pos is the index within seeks.  Based on ascending, it will increment or\n\t// decrement through the size of seeks slice.\n\tpos       int\n\tascending bool\n\n\t// duplicates is a hint that there are overlapping blocks for this key in\n\t// multiple files (e.g. points have been overwritten but not fully compacted)\n\t// If this is true, we need to scan the duplicate blocks and dedup the points\n\t// as query time until they are compacted.\n\tduplicates bool\n\n\t// The distinct set of TSM files references by the cursor\n\trefs map[string]TSMFile\n}\n\ntype location struct {\n\tr     TSMFile\n\tentry IndexEntry\n\n\treadMin, readMax int64\n}\n\nfunc (l *location) read() bool {\n\treturn l.readMin <= l.entry.MinTime && l.readMax >= l.entry.MaxTime\n}\n\nfunc (l *location) markRead(min, max int64) {\n\tif min < l.readMin {\n\t\tl.readMin = min\n\t}\n\n\tif max > l.readMax {\n\t\tl.readMax = max\n\t}\n}\n\ntype descLocations []*location\n\n// Sort methods\nfunc (a descLocations) Len() int      { return len(a) }\nfunc (a descLocations) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a descLocations) Less(i, j int) bool {\n\tif a[i].entry.OverlapsTimeRange(a[j].entry.MinTime, a[j].entry.MaxTime) {\n\t\treturn a[i].r.Path() < a[j].r.Path()\n\t}\n\treturn a[i].entry.MaxTime < a[j].entry.MaxTime\n}\n\ntype ascLocations []*location\n\n// Sort methods\nfunc (a ascLocations) Len() int      { return len(a) }\nfunc (a ascLocations) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a ascLocations) Less(i, j int) bool {\n\tif a[i].entry.OverlapsTimeRange(a[j].entry.MinTime, a[j].entry.MaxTime) {\n\t\treturn a[i].r.Path() < a[j].r.Path()\n\t}\n\treturn a[i].entry.MinTime < a[j].entry.MinTime\n}\n\n// newKeyCursor returns a new instance of KeyCursor.\n// This function assumes the read-lock has been taken.\nfunc newKeyCursor(fs *FileStore, key string, t int64, ascending bool) *KeyCursor {\n\tc := &KeyCursor{\n\t\tkey:       key,\n\t\tfs:        fs,\n\t\tseeks:     fs.locations(key, t, ascending),\n\t\tascending: ascending,\n\t}\n\tc.refs = make(map[string]TSMFile, len(c.seeks))\n\n\tc.duplicates = c.hasOverlappingBlocks()\n\n\tif ascending {\n\t\tsort.Sort(ascLocations(c.seeks))\n\t} else {\n\t\tsort.Sort(descLocations(c.seeks))\n\t}\n\n\t// Determine the distinct set of TSM files in use and mark then as in-use\n\tfor _, f := range c.seeks {\n\t\tif _, ok := c.refs[f.r.Path()]; !ok {\n\t\t\tf.r.Ref()\n\t\t\tc.refs[f.r.Path()] = f.r\n\t\t}\n\t}\n\n\tc.seek(t)\n\treturn c\n}\n\n// Close removes all references on the cursor.\nfunc (c *KeyCursor) Close() {\n\t// Remove all of our in-use references since we're done\n\tfor _, f := range c.refs {\n\t\tf.Unref()\n\t}\n\n\tc.buf = nil\n\tc.seeks = nil\n\tc.fs = nil\n\tc.current = nil\n}\n\n// hasOverlappingBlocks returns true if blocks have overlapping time ranges.\n// This result is computed once and stored as the \"duplicates\" field.\nfunc (c *KeyCursor) hasOverlappingBlocks() bool {\n\tif len(c.seeks) == 0 {\n\t\treturn false\n\t}\n\n\tfor i := 1; i < len(c.seeks); i++ {\n\t\tprev := c.seeks[i-1]\n\t\tcur := c.seeks[i]\n\t\tif prev.entry.MaxTime >= cur.entry.MinTime {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// seek positions the cursor at the given time.\nfunc (c *KeyCursor) seek(t int64) {\n\tif len(c.seeks) == 0 {\n\t\treturn\n\t}\n\tc.current = nil\n\n\tif c.ascending {\n\t\tc.seekAscending(t)\n\t} else {\n\t\tc.seekDescending(t)\n\t}\n}\n\nfunc (c *KeyCursor) seekAscending(t int64) {\n\tfor i, e := range c.seeks {\n\t\tif t < e.entry.MinTime || e.entry.Contains(t) {\n\t\t\t// Record the position of the first block matching our seek time\n\t\t\tif len(c.current) == 0 {\n\t\t\t\tc.pos = i\n\t\t\t}\n\n\t\t\tc.current = append(c.current, e)\n\n\t\t\t// Exit if we don't have duplicates.\n\t\t\t// Otherwise, keep looking for additional blocks containing this point.\n\t\t\tif !c.duplicates {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (c *KeyCursor) seekDescending(t int64) {\n\tfor i := len(c.seeks) - 1; i >= 0; i-- {\n\t\te := c.seeks[i]\n\t\tif t > e.entry.MaxTime || e.entry.Contains(t) {\n\t\t\t// Record the position of the first block matching our seek time\n\t\t\tif len(c.current) == 0 {\n\t\t\t\tc.pos = i\n\t\t\t}\n\t\t\tc.current = append(c.current, e)\n\n\t\t\t// Exit if we don't have duplicates.\n\t\t\t// Otherwise, keep looking for additional blocks containing this point.\n\t\t\tif !c.duplicates {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Next moves the cursor to the next position.\n// Data should be read by the ReadBlock functions.\nfunc (c *KeyCursor) Next() {\n\tif len(c.current) == 0 {\n\t\treturn\n\t}\n\t// Do we still have unread values in the current block\n\tif !c.current[0].read() {\n\t\treturn\n\t}\n\tc.current = c.current[:0]\n\tif c.ascending {\n\t\tc.nextAscending()\n\t} else {\n\t\tc.nextDescending()\n\t}\n}\n\nfunc (c *KeyCursor) nextAscending() {\n\tfor {\n\t\tc.pos++\n\t\tif c.pos >= len(c.seeks) {\n\t\t\treturn\n\t\t} else if !c.seeks[c.pos].read() {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Append the first matching block\n\tif len(c.current) == 0 {\n\t\tc.current = append(c.current, nil)\n\t} else {\n\t\tc.current = c.current[:1]\n\t}\n\tc.current[0] = c.seeks[c.pos]\n\n\t// We're done if there are no overlapping blocks.\n\tif !c.duplicates {\n\t\treturn\n\t}\n\n\t// If we have ovelapping blocks, append all their values so we can dedup\n\tfor i := c.pos + 1; i < len(c.seeks); i++ {\n\t\tif c.seeks[i].read() {\n\t\t\tcontinue\n\t\t}\n\n\t\tc.current = append(c.current, c.seeks[i])\n\t}\n}\n\nfunc (c *KeyCursor) nextDescending() {\n\tfor {\n\t\tc.pos--\n\t\tif c.pos < 0 {\n\t\t\treturn\n\t\t} else if !c.seeks[c.pos].read() {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Append the first matching block\n\tif len(c.current) == 0 {\n\t\tc.current = make([]*location, 1)\n\t} else {\n\t\tc.current = c.current[:1]\n\t}\n\tc.current[0] = c.seeks[c.pos]\n\n\t// We're done if there are no overlapping blocks.\n\tif !c.duplicates {\n\t\treturn\n\t}\n\n\t// If we have ovelapping blocks, append all their values so we can dedup\n\tfor i := c.pos; i >= 0; i-- {\n\t\tif c.seeks[i].read() {\n\t\t\tcontinue\n\t\t}\n\t\tc.current = append(c.current, c.seeks[i])\n\t}\n}\n\nfunc (c *KeyCursor) filterFloatValues(tombstones []TimeRange, values FloatValues) FloatValues {\n\tfor _, t := range tombstones {\n\t\tvalues = values.Exclude(t.Min, t.Max)\n\t}\n\treturn values\n}\n\nfunc (c *KeyCursor) filterIntegerValues(tombstones []TimeRange, values IntegerValues) IntegerValues {\n\tfor _, t := range tombstones {\n\t\tvalues = values.Exclude(t.Min, t.Max)\n\t}\n\treturn values\n}\n\nfunc (c *KeyCursor) filterStringValues(tombstones []TimeRange, values StringValues) StringValues {\n\tfor _, t := range tombstones {\n\t\tvalues = values.Exclude(t.Min, t.Max)\n\t}\n\treturn values\n}\n\nfunc (c *KeyCursor) filterBooleanValues(tombstones []TimeRange, values BooleanValues) BooleanValues {\n\tfor _, t := range tombstones {\n\t\tvalues = values.Exclude(t.Min, t.Max)\n\t}\n\treturn values\n}\n\ntype purger struct {\n\tmu        sync.RWMutex\n\tfileStore *FileStore\n\tfiles     map[string]TSMFile\n\trunning   bool\n\n\tlogger zap.Logger\n}\n\nfunc (p *purger) add(files []TSMFile) {\n\tp.mu.Lock()\n\tfor _, f := range files {\n\t\tp.files[f.Path()] = f\n\t}\n\tp.mu.Unlock()\n\tp.purge()\n}\n\nfunc (p *purger) purge() {\n\tp.mu.Lock()\n\tif p.running {\n\t\tp.mu.Unlock()\n\t\treturn\n\t}\n\tp.running = true\n\tp.mu.Unlock()\n\n\tgo func() {\n\t\tfor {\n\t\t\tp.mu.Lock()\n\t\t\tfor k, v := range p.files {\n\t\t\t\tif !v.InUse() {\n\t\t\t\t\tif err := v.Close(); err != nil {\n\t\t\t\t\t\tp.logger.Info(fmt.Sprintf(\"purge: close file: %v\", err))\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif err := v.Remove(); err != nil {\n\t\t\t\t\t\tp.logger.Info(fmt.Sprintf(\"purge: remove file: %v\", err))\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tdelete(p.files, k)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(p.files) == 0 {\n\t\t\t\tp.running = false\n\t\t\t\tp.mu.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tp.mu.Unlock()\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}()\n}\n\ntype tsmReaders []TSMFile\n\nfunc (a tsmReaders) Len() int           { return len(a) }\nfunc (a tsmReaders) Less(i, j int) bool { return a[i].Path() < a[j].Path() }\nfunc (a tsmReaders) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\n\ntype stream struct {\n\tc chan seriesKey\n\tv seriesKey\n}\n\ntype seriesKey struct {\n\tkey []byte\n\ttyp byte\n}\n\n// merge merges multiple channels in parallel by recursively splitting the channels\n// until a simple two-way merge can be performed.\nfunc merge(c ...chan seriesKey) chan seriesKey {\n\tif len(c) == 0 {\n\t\tm := make(chan seriesKey)\n\t\tclose(m)\n\t\treturn m\n\t}\n\n\t// Just one, drain it\n\tif len(c) == 1 {\n\t\tm := make(chan seriesKey)\n\t\tgo func() {\n\t\t\tif c[0] != nil {\n\t\t\t\tfor v := range c[0] {\n\t\t\t\t\tm <- v\n\t\t\t\t}\n\t\t\t}\n\t\t\tclose(m)\n\t\t}()\n\t\treturn m\n\t}\n\n\t// More than two, split them up recursively\n\tif len(c) > 2 {\n\t\ta := merge(c[:len(c)/2]...)\n\t\tb := merge(c[len(c)/2:]...)\n\t\treturn merge(a, b)\n\t}\n\n\t// Merge the two streams and drop duplicates between then\n\tm := make(chan seriesKey, 1)\n\ta, b := c[0], c[1]\n\tgo func() {\n\t\t// buffer a and b values\n\t\tvar av, bv seriesKey\n\t\tif a != nil {\n\t\t\tav = <-a\n\t\t}\n\t\tif b != nil {\n\t\t\tbv = <-b\n\t\t}\n\t\tfor {\n\t\t\tif len(av.key) == 0 && len(bv.key) == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif len(av.key) == 0 {\n\t\t\t\tm <- bv\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif len(bv.key) == 0 {\n\t\t\t\tm <- av\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tcmp := bytes.Compare(av.key, bv.key)\n\t\t\tif cmp < 0 {\n\t\t\t\t// Send a's value, and re-prime a buffer\n\t\t\t\tm <- av\n\t\t\t\tav = <-a\n\t\t\t} else if cmp == 0 {\n\t\t\t\t// Send a's value, and re-prime a and b buffers\n\t\t\t\tm <- av\n\t\t\t\tav = <-a\n\t\t\t\tbv = <-b\n\t\t\t} else {\n\t\t\t\t// Send b's value, and re-prime b buffer\n\t\t\t\tm <- bv\n\t\t\t\tbv = <-b\n\t\t\t}\n\t\t}\n\n\t\tif a != nil {\n\t\t\tfor av := range a {\n\t\t\t\tm <- av\n\t\t\t}\n\t\t}\n\n\t\tif b != nil {\n\t\t\tfor bv := range b {\n\t\t\t\tm <- bv\n\t\t\t}\n\t\t}\n\t\tclose(m)\n\t}()\n\treturn m\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store_internal_test.go",
    "content": "package tsm1\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc TestMergeSeriesKey_Single(t *testing.T) {\n\ta := make(chan seriesKey, 5)\n\tfor i := 0; i < cap(a); i++ {\n\t\ta <- seriesKey{key: []byte(fmt.Sprintf(\"%d\", i))}\n\t}\n\n\tmerged := merge(a)\n\tclose(a)\n\n\texp := []string{\"0\", \"1\", \"2\", \"3\", \"4\"}\n\tfor v := range merged {\n\t\tif got, exp := v, exp[0]; !bytes.Equal(got.key, []byte(exp)) {\n\t\t\tt.Fatalf(\"value mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t\texp = exp[1:]\n\t}\n\n\tif len(exp) > 0 {\n\t\tt.Fatalf(\"missed values: %v\", exp)\n\t}\n}\n\nfunc TestMergeSeriesKey_Nil(t *testing.T) {\n\tmerged := merge(nil)\n\n\tfor v := range merged {\n\t\tt.Fatalf(\"value mismatch: got %v, exp nil\", v)\n\t}\n\n\tmerged = merge(nil, nil)\n\tfor v := range merged {\n\t\tt.Fatalf(\"value mismatch: got %v, exp nil\", v)\n\t}\n\n}\n\nfunc TestMergeSeriesKey_Duplicates(t *testing.T) {\n\ta := make(chan seriesKey, 5)\n\tb := make(chan seriesKey, 5)\n\n\tfor i := 0; i < cap(a); i++ {\n\t\ta <- seriesKey{key: []byte(fmt.Sprintf(\"%d\", i))}\n\t\tb <- seriesKey{key: []byte(fmt.Sprintf(\"%d\", i))}\n\t}\n\n\tmerged := merge(a, b)\n\tclose(a)\n\tclose(b)\n\n\texp := []string{\"0\", \"1\", \"2\", \"3\", \"4\"}\n\tfor v := range merged {\n\t\tif len(exp) == 0 {\n\t\t\tt.Fatalf(\"more values than expected: got %v\", v)\n\t\t}\n\n\t\tif got, exp := v, exp[0]; !bytes.Equal(got.key, []byte(exp)) {\n\t\t\tt.Fatalf(\"value mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\texp = exp[1:]\n\t}\n\n\tif len(exp) > 0 {\n\t\tt.Fatalf(\"missed values: %v\", exp)\n\t}\n}\n\nfunc TestMergeSeriesKey_Alternating(t *testing.T) {\n\ta := make(chan seriesKey, 2)\n\tb := make(chan seriesKey, 2)\n\n\tfor i := 0; i < cap(a); i++ {\n\t\ta <- seriesKey{key: []byte(fmt.Sprintf(\"%d\", i*2))}\n\t\tb <- seriesKey{key: []byte(fmt.Sprintf(\"%d\", i*2+1))}\n\t}\n\n\tmerged := merge(a, b)\n\tclose(a)\n\tclose(b)\n\n\texp := []string{\"0\", \"1\", \"2\", \"3\"}\n\tfor v := range merged {\n\t\tif len(exp) == 0 {\n\t\t\tt.Fatalf(\"more values than expected: got %v\", v)\n\t\t}\n\n\t\tif got, exp := v, exp[0]; !bytes.Equal(got.key, []byte(exp)) {\n\t\t\tt.Fatalf(\"value mismatch: got %v, exp %v\", string(got.key), exp)\n\t\t}\n\t\texp = exp[1:]\n\t}\n\n\tif len(exp) > 0 {\n\t\tt.Fatalf(\"missed values: %v\", exp)\n\t}\n}\n\nfunc TestMergeSeriesKey_AlternatingDuplicates(t *testing.T) {\n\ta := make(chan seriesKey, 2)\n\tb := make(chan seriesKey, 2)\n\tc := make(chan seriesKey, 2)\n\n\tfor i := 0; i < cap(a); i++ {\n\t\ta <- seriesKey{key: []byte(fmt.Sprintf(\"%d\", i*2))}\n\t\tb <- seriesKey{key: []byte(fmt.Sprintf(\"%d\", i*2+1))}\n\t\tc <- seriesKey{key: []byte(fmt.Sprintf(\"%d\", i*2))}\n\t}\n\n\tmerged := merge(a, b, c)\n\tclose(a)\n\tclose(b)\n\tclose(c)\n\n\texp := []string{\"0\", \"1\", \"2\", \"3\"}\n\tfor v := range merged {\n\t\tif len(exp) == 0 {\n\t\t\tt.Fatalf(\"more values than expected: got %v\", v)\n\t\t}\n\n\t\tif got, exp := v, exp[0]; !bytes.Equal(got.key, []byte(exp)) {\n\t\t\tt.Fatalf(\"value mismatch: got %v, exp %v\", string(got.key), exp)\n\t\t}\n\t\texp = exp[1:]\n\t}\n\n\tif len(exp) > 0 {\n\t\tt.Fatalf(\"missed values: %v\", exp)\n\t}\n}\n\nfunc TestMergeSeriesKey_Unbuffered(t *testing.T) {\n\ta := make(chan seriesKey)\n\tb := make(chan seriesKey)\n\n\tgo func() {\n\t\tfor i := 0; i < 2; i++ {\n\t\t\ta <- seriesKey{key: []byte(fmt.Sprintf(\"%d\", i*2))}\n\t\t}\n\t\tclose(a)\n\t}()\n\n\tgo func() {\n\t\tfor i := 0; i < 2; i++ {\n\t\t\tb <- seriesKey{key: []byte(fmt.Sprintf(\"%d\", i*2+1))}\n\t\t}\n\t\tclose(b)\n\t}()\n\n\tmerged := merge(a, b)\n\n\texp := []string{\"0\", \"1\", \"2\", \"3\"}\n\tfor v := range merged {\n\t\tif len(exp) == 0 {\n\t\t\tt.Fatalf(\"more values than expected: got %v\", v)\n\t\t}\n\n\t\tif got, exp := v, exp[0]; !bytes.Equal(got.key, []byte(exp)) {\n\t\t\tt.Fatalf(\"value mismatch: got %v, exp %v\", string(got.key), exp)\n\t\t}\n\t\texp = exp[1:]\n\t}\n\n\tif len(exp) > 0 {\n\t\tt.Fatalf(\"missed values: %v\", exp)\n\t}\n}\n\nfunc TestMergeSeriesKey_OneEmpty(t *testing.T) {\n\ta := make(chan seriesKey)\n\tb := make(chan seriesKey)\n\n\tgo func() {\n\t\tfor i := 0; i < 2; i++ {\n\t\t\ta <- seriesKey{key: []byte(fmt.Sprintf(\"%d\", i*2))}\n\t\t}\n\t\tclose(a)\n\t}()\n\n\tclose(b)\n\tmerged := merge(a, b)\n\n\texp := []string{\"0\", \"2\"}\n\tfor v := range merged {\n\t\tif len(exp) == 0 {\n\t\t\tt.Fatalf(\"more values than expected: got %v\", v)\n\t\t}\n\n\t\tif got, exp := v, exp[0]; !bytes.Equal(got.key, []byte(exp)) {\n\t\t\tt.Fatalf(\"value mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\t\texp = exp[1:]\n\t}\n\n\tif len(exp) > 0 {\n\t\tt.Fatalf(\"missed values: %v\", exp)\n\t}\n}\n\nfunc TestMergeSeriesKey_Overlapping(t *testing.T) {\n\ta := make(chan seriesKey)\n\tb := make(chan seriesKey)\n\tc := make(chan seriesKey)\n\n\tgo func() {\n\t\tfor i := 0; i < 3; i++ {\n\t\t\ta <- seriesKey{key: []byte(fmt.Sprintf(\"%d\", i))}\n\t\t}\n\t\tclose(a)\n\t}()\n\n\tgo func() {\n\t\tfor i := 4; i < 7; i++ {\n\t\t\tb <- seriesKey{key: []byte(fmt.Sprintf(\"%d\", i))}\n\t\t}\n\t\tclose(b)\n\t}()\n\n\tgo func() {\n\t\tfor i := 0; i < 9; i++ {\n\t\t\tc <- seriesKey{key: []byte(fmt.Sprintf(\"%d\", i))}\n\t\t}\n\t\tclose(c)\n\t}()\n\tmerged := merge(a, b, c)\n\n\texp := []string{\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\"}\n\tfor v := range merged {\n\t\tif len(exp) == 0 {\n\t\t\tt.Fatalf(\"more values than expected: got %v\", v)\n\t\t}\n\n\t\tif got, exp := v, exp[0]; !bytes.Equal(got.key, []byte(exp)) {\n\t\t\tt.Fatalf(\"value mismatch: got %v, exp %v\", string(got.key), exp)\n\t\t}\n\t\texp = exp[1:]\n\t}\n\n\tif len(exp) > 0 {\n\t\tt.Fatalf(\"missed values: %v\", exp)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store_test.go",
    "content": "package tsm1_test\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/tsdb/engine/tsm1\"\n\t\"github.com/uber-go/zap\"\n)\n\nfunc TestFileStore_Read(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, 2.0)}},\n\t\tkeyValues{\"mem\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\t// Search for an entry that exists in the second file\n\tvalues, err := fs.Read(\"cpu\", 1)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := data[1]\n\tif got, exp := len(values), len(exp.values); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp.values {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestFileStore_SeekToAsc_FromStart(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, 2.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, 3.0)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\tbuf := make([]tsm1.FloatValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 0, true)\n\t// Search for an entry that exists in the second file\n\tvalues, err := c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := data[0]\n\tif got, exp := len(values), len(exp.values); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp.values {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestFileStore_SeekToAsc_Duplicate(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 2.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, 3.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, 4.0)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\tbuf := make([]tsm1.FloatValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 0, true)\n\t// Search for an entry that exists in the second file\n\tvalues, err := c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := []tsm1.Value{\n\t\tdata[1].values[0],\n\t}\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\t// Check that calling Next will dedupe points\n\tc.Next()\n\tvalues, err = c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp = []tsm1.Value{\n\t\tdata[3].values[0],\n\t}\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\texp = nil\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestFileStore_SeekToAsc_BeforeStart(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, 1.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, 2.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(3, 3.0)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\t// Search for an entry that exists in the second file\n\tbuf := make([]tsm1.FloatValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 0, true)\n\tvalues, err := c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := data[0]\n\tif got, exp := len(values), len(exp.values); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp.values {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\n// Tests that seeking and reading all blocks that contain overlapping points does\n// not skip any blocks.\nfunc TestFileStore_SeekToAsc_BeforeStart_OverlapFloat(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 0.0), tsm1.NewValue(1, 1.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, 2.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(3, 3.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 4.0), tsm1.NewValue(2, 7.0)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\t// Search for an entry that exists in the second file\n\tbuf := make([]tsm1.FloatValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 0, true)\n\tvalues, err := c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := []tsm1.Value{\n\t\tdata[3].values[0],\n\t\tdata[0].values[1],\n\t\tdata[3].values[1],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp = []tsm1.Value{\n\t\tdata[2].values[0],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\n// Tests that seeking and reading all blocks that contain overlapping points does\n// not skip any blocks.\nfunc TestFileStore_SeekToAsc_BeforeStart_OverlapInteger(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, int64(0)), tsm1.NewValue(1, int64(1))}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, int64(2))}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(3, int64(3))}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, int64(4)), tsm1.NewValue(2, int64(7))}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\t// Search for an entry that exists in the second file\n\tbuf := make([]tsm1.IntegerValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 0, true)\n\tvalues, err := c.ReadIntegerBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := []tsm1.Value{\n\t\tdata[3].values[0],\n\t\tdata[0].values[1],\n\t\tdata[3].values[1],\n\t}\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadIntegerBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp = []tsm1.Value{\n\t\tdata[2].values[0],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\n// Tests that seeking and reading all blocks that contain overlapping points does\n// not skip any blocks.\nfunc TestFileStore_SeekToAsc_BeforeStart_OverlapBoolean(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, true), tsm1.NewValue(1, false)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, true)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(3, true)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, false), tsm1.NewValue(2, true)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\t// Search for an entry that exists in the second file\n\tbuf := make([]tsm1.BooleanValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 0, true)\n\tvalues, err := c.ReadBooleanBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := []tsm1.Value{\n\t\tdata[3].values[0],\n\t\tdata[0].values[1],\n\t\tdata[3].values[1],\n\t}\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadBooleanBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp = []tsm1.Value{\n\t\tdata[2].values[0],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\n// Tests that seeking and reading all blocks that contain overlapping points does\n// not skip any blocks.\nfunc TestFileStore_SeekToAsc_BeforeStart_OverlapString(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, \"zero\"), tsm1.NewValue(1, \"one\")}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, \"two\")}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(3, \"three\")}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, \"four\"), tsm1.NewValue(2, \"seven\")}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\t// Search for an entry that exists in the second file\n\tbuf := make([]tsm1.StringValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 0, true)\n\tvalues, err := c.ReadStringBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := []tsm1.Value{\n\t\tdata[3].values[0],\n\t\tdata[0].values[1],\n\t\tdata[3].values[1],\n\t}\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadStringBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp = []tsm1.Value{\n\t\tdata[2].values[0],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\n// Tests that blocks with a lower min time in later files are not returned\n// more than once causing unsorted results\nfunc TestFileStore_SeekToAsc_OverlapMinFloat(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, 1.0), tsm1.NewValue(3, 3.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, 2.0), tsm1.NewValue(4, 4.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 0.0), tsm1.NewValue(1, 1.1)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, 2.2)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\tbuf := make([]tsm1.FloatValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 0, true)\n\t// Search for an entry that exists in the second file\n\tvalues, err := c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := []tsm1.Value{\n\t\tdata[2].values[0],\n\t\tdata[2].values[1],\n\t\tdata[3].values[0],\n\t\tdata[0].values[1],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\t// Check that calling Next will dedupe points\n\tc.Next()\n\tvalues, err = c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp = []tsm1.Value{\n\t\tdata[1].values[1],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\texp = nil\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\n// Tests that blocks with a lower min time in later files are not returned\n// more than once causing unsorted results\nfunc TestFileStore_SeekToAsc_OverlapMinInteger(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, int64(1)), tsm1.NewValue(3, int64(3))}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, int64(2)), tsm1.NewValue(4, int64(4))}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, int64(0)), tsm1.NewValue(1, int64(10))}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, int64(5))}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\tbuf := make([]tsm1.IntegerValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 0, true)\n\t// Search for an entry that exists in the second file\n\tvalues, err := c.ReadIntegerBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := []tsm1.Value{\n\t\tdata[2].values[0],\n\t\tdata[2].values[1],\n\t\tdata[3].values[0],\n\t\tdata[0].values[1],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\t// Check that calling Next will dedupe points\n\tc.Next()\n\tvalues, err = c.ReadIntegerBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp = []tsm1.Value{\n\t\tdata[1].values[1],\n\t}\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadIntegerBlock(&buf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\texp = nil\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\n// Tests that blocks with a lower min time in later files are not returned\n// more than once causing unsorted results\nfunc TestFileStore_SeekToAsc_OverlapMinBoolean(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, true), tsm1.NewValue(3, true)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, true), tsm1.NewValue(4, true)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, true), tsm1.NewValue(1, false)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, false)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\tbuf := make([]tsm1.BooleanValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 0, true)\n\t// Search for an entry that exists in the second file\n\tvalues, err := c.ReadBooleanBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := []tsm1.Value{\n\t\tdata[2].values[0],\n\t\tdata[2].values[1],\n\t\tdata[3].values[0],\n\t\tdata[0].values[1],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\t// Check that calling Next will dedupe points\n\tc.Next()\n\tvalues, err = c.ReadBooleanBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp = []tsm1.Value{\n\t\tdata[1].values[1],\n\t}\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadBooleanBlock(&buf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\texp = nil\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\n// Tests that blocks with a lower min time in later files are not returned\n// more than once causing unsorted results\nfunc TestFileStore_SeekToAsc_OverlapMinString(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, \"1.0\"), tsm1.NewValue(3, \"3.0\")}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, \"2.0\"), tsm1.NewValue(4, \"4.0\")}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, \"0.0\"), tsm1.NewValue(1, \"1.1\")}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, \"2.2\")}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\tbuf := make([]tsm1.StringValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 0, true)\n\t// Search for an entry that exists in the second file\n\tvalues, err := c.ReadStringBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := []tsm1.Value{\n\t\tdata[2].values[0],\n\t\tdata[2].values[1],\n\t\tdata[3].values[0],\n\t\tdata[0].values[1],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\t// Check that calling Next will dedupe points\n\tc.Next()\n\tvalues, err = c.ReadStringBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp = []tsm1.Value{\n\t\tdata[1].values[1],\n\t}\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadStringBlock(&buf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\texp = nil\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestFileStore_SeekToAsc_Middle(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, 1.0),\n\t\t\ttsm1.NewValue(2, 2.0),\n\t\t\ttsm1.NewValue(3, 3.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(4, 4.0)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\t// Search for an entry that exists in the second file\n\tbuf := make([]tsm1.FloatValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 3, true)\n\tvalues, err := c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := []tsm1.Value{data[0].values[2]}\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp = []tsm1.Value{data[1].values[0]}\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n}\n\nfunc TestFileStore_SeekToAsc_End(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, 2.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, 3.0)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\tbuf := make([]tsm1.FloatValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 2, true)\n\tvalues, err := c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := data[2]\n\tif got, exp := len(values), len(exp.values); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp.values {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestFileStore_SeekToDesc_FromStart(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, 2.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, 3.0)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\t// Search for an entry that exists in the second file\n\tbuf := make([]tsm1.FloatValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 0, false)\n\tvalues, err := c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\texp := data[0]\n\tif got, exp := len(values), len(exp.values); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp.values {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestFileStore_SeekToDesc_Duplicate(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 4.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, 2.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, 3.0)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\t// Search for an entry that exists in the second file\n\tbuf := make([]tsm1.FloatValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 2, false)\n\tvalues, err := c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\texp := []tsm1.Value{\n\t\tdata[3].values[0],\n\t}\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\texp = []tsm1.Value{\n\t\tdata[1].values[0],\n\t}\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestFileStore_SeekToDesc_OverlapMaxFloat(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, 1.0), tsm1.NewValue(3, 3.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, 2.0), tsm1.NewValue(4, 4.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 0.0), tsm1.NewValue(1, 1.1)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, 2.2)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\t// Search for an entry that exists in the second file\n\tbuf := make([]tsm1.FloatValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 5, false)\n\tvalues, err := c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := []tsm1.Value{\n\t\tdata[3].values[0],\n\t\tdata[0].values[1],\n\t\tdata[1].values[1],\n\t}\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\texp = []tsm1.Value{\n\n\t\tdata[2].values[0],\n\t\tdata[2].values[1],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestFileStore_SeekToDesc_OverlapMaxInteger(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, int64(1)), tsm1.NewValue(3, int64(3))}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, int64(2)), tsm1.NewValue(4, int64(4))}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, int64(0)), tsm1.NewValue(1, int64(10))}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, int64(5))}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\t// Search for an entry that exists in the second file\n\tbuf := make([]tsm1.IntegerValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 5, false)\n\tvalues, err := c.ReadIntegerBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := []tsm1.Value{\n\t\tdata[3].values[0],\n\t\tdata[0].values[1],\n\t\tdata[1].values[1],\n\t}\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadIntegerBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\texp = []tsm1.Value{\n\t\tdata[2].values[0],\n\t\tdata[2].values[1],\n\t}\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestFileStore_SeekToDesc_OverlapMaxBoolean(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, true), tsm1.NewValue(3, true)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, true), tsm1.NewValue(4, true)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, true), tsm1.NewValue(1, false)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, false)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\t// Search for an entry that exists in the second file\n\tbuf := make([]tsm1.BooleanValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 5, false)\n\tvalues, err := c.ReadBooleanBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := []tsm1.Value{\n\t\tdata[3].values[0],\n\t\tdata[0].values[1],\n\t\tdata[1].values[1],\n\t}\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadBooleanBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\texp = []tsm1.Value{\n\t\tdata[2].values[0],\n\t\tdata[2].values[1],\n\t}\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestFileStore_SeekToDesc_OverlapMaxString(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, \"1.0\"), tsm1.NewValue(3, \"3.0\")}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, \"2.0\"), tsm1.NewValue(4, \"4.0\")}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, \"0.0\"), tsm1.NewValue(1, \"1.1\")}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, \"2.2\")}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\t// Search for an entry that exists in the second file\n\tbuf := make([]tsm1.StringValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 5, false)\n\tvalues, err := c.ReadStringBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := []tsm1.Value{\n\t\tdata[3].values[0],\n\t\tdata[0].values[1],\n\t\tdata[1].values[1],\n\t}\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadStringBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\texp = []tsm1.Value{\n\t\tdata[2].values[0],\n\t\tdata[2].values[1],\n\t}\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestFileStore_SeekToDesc_AfterEnd(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, 1.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, 2.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(3, 3.0)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\tbuf := make([]tsm1.FloatValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 4, false)\n\tvalues, err := c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := data[2]\n\tif got, exp := len(values), len(exp.values); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp.values {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestFileStore_SeekToDesc_AfterEnd_OverlapFloat(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 4 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(8, 0.0), tsm1.NewValue(9, 1.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, 2.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(3, 3.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(3, 4.0), tsm1.NewValue(7, 7.0)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\tbuf := make([]tsm1.FloatValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 10, false)\n\tvalues, err := c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := []tsm1.Value{\n\t\tdata[0].values[0],\n\t\tdata[0].values[1],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadFloatBlock(&buf)\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp = []tsm1.Value{\n\t\tdata[3].values[0],\n\t\tdata[3].values[1],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadFloatBlock(&buf)\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp = []tsm1.Value{\n\t\tdata[1].values[0],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadFloatBlock(&buf)\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\tif got, exp := len(values), 0; got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestFileStore_SeekToDesc_AfterEnd_OverlapInteger(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(8, int64(0)), tsm1.NewValue(9, int64(1))}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, int64(2))}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(3, int64(3))}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(3, int64(4)), tsm1.NewValue(10, int64(7))}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\tbuf := make([]tsm1.IntegerValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 11, false)\n\tvalues, err := c.ReadIntegerBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := []tsm1.Value{\n\t\tdata[3].values[0],\n\t\tdata[0].values[0],\n\t\tdata[0].values[1],\n\t\tdata[3].values[1],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadIntegerBlock(&buf)\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp = []tsm1.Value{\n\t\tdata[1].values[0],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadIntegerBlock(&buf)\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\tif got, exp := len(values), 0; got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestFileStore_SeekToDesc_AfterEnd_OverlapBoolean(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(8, true), tsm1.NewValue(9, true)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, true)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(3, false)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(3, true), tsm1.NewValue(7, false)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\tbuf := make([]tsm1.BooleanValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 11, false)\n\tvalues, err := c.ReadBooleanBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := []tsm1.Value{\n\t\tdata[0].values[0],\n\t\tdata[0].values[1],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadBooleanBlock(&buf)\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp = []tsm1.Value{\n\t\tdata[3].values[0],\n\t\tdata[3].values[1],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadBooleanBlock(&buf)\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp = []tsm1.Value{\n\t\tdata[1].values[0],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadBooleanBlock(&buf)\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\tif got, exp := len(values), 0; got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestFileStore_SeekToDesc_AfterEnd_OverlapString(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(8, \"eight\"), tsm1.NewValue(9, \"nine\")}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, \"two\")}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(3, \"three\")}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(3, \"four\"), tsm1.NewValue(7, \"seven\")}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\tbuf := make([]tsm1.StringValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 11, false)\n\tvalues, err := c.ReadStringBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := []tsm1.Value{\n\t\tdata[0].values[0],\n\t\tdata[0].values[1],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadStringBlock(&buf)\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp = []tsm1.Value{\n\t\tdata[3].values[0],\n\t\tdata[3].values[1],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadStringBlock(&buf)\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp = []tsm1.Value{\n\t\tdata[1].values[0],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadStringBlock(&buf)\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\tif got, exp := len(values), 0; got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestFileStore_SeekToDesc_Middle(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, 1.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{\n\t\t\ttsm1.NewValue(2, 2.0),\n\t\t\ttsm1.NewValue(3, 3.0),\n\t\t\ttsm1.NewValue(4, 4.0)},\n\t\t},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\t// Search for an entry that exists in the second file\n\tbuf := make([]tsm1.FloatValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 3, false)\n\tvalues, err := c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := []tsm1.Value{\n\t\tdata[1].values[0],\n\t\tdata[1].values[1],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\tc.Next()\n\tvalues, err = c.ReadFloatBlock(&buf)\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp = []tsm1.Value{\n\t\tdata[0].values[0],\n\t}\n\n\tif got, exp := len(values), len(exp); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n\n\tc.Next()\n\tvalues, err = c.ReadFloatBlock(&buf)\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\tif got, exp := len(values), 0; got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestFileStore_SeekToDesc_End(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, 2.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, 3.0)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\tbuf := make([]tsm1.FloatValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 2, false)\n\tvalues, err := c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texp := data[2]\n\tif got, exp := len(values), len(exp.values); got != exp {\n\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, v := range exp.values {\n\t\tif got, exp := values[i].Value(), v.Value(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", i, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestKeyCursor_TombstoneRange(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, 2.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, 3.0)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\tif err := fs.DeleteRange([]string{\"cpu\"}, 1, 1); err != nil {\n\t\tt.Fatalf(\"unexpected error delete range: %v\", err)\n\t}\n\n\tbuf := make([]tsm1.FloatValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 0, true)\n\texpValues := []int{0, 2}\n\tfor _, v := range expValues {\n\t\tvalues, err := c.ReadFloatBlock(&buf)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t\t}\n\n\t\texp := data[v]\n\t\tif got, exp := len(values), 1; got != exp {\n\t\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tif got, exp := values[0].String(), exp.values[0].String(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", 0, got, exp)\n\t\t}\n\t\tc.Next()\n\t}\n}\n\nfunc TestKeyCursor_TombstoneRange_PartialFloat(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{\n\t\t\ttsm1.NewValue(0, 1.0),\n\t\t\ttsm1.NewValue(1, 2.0),\n\t\t\ttsm1.NewValue(2, 3.0)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\tif err := fs.DeleteRange([]string{\"cpu\"}, 1, 1); err != nil {\n\t\tt.Fatalf(\"unexpected error delete range: %v\", err)\n\t}\n\n\tbuf := make([]tsm1.FloatValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 0, true)\n\tvalues, err := c.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texpValues := []tsm1.Value{data[0].values[0], data[0].values[2]}\n\tfor i, v := range expValues {\n\t\texp := v\n\t\tif got, exp := len(values), 2; got != exp {\n\t\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tif got, exp := values[i].String(), exp.String(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", 0, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestKeyCursor_TombstoneRange_PartialInteger(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{\n\t\t\ttsm1.NewValue(0, int64(1)),\n\t\t\ttsm1.NewValue(1, int64(2)),\n\t\t\ttsm1.NewValue(2, int64(3))}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\tif err := fs.DeleteRange([]string{\"cpu\"}, 1, 1); err != nil {\n\t\tt.Fatalf(\"unexpected error delete range: %v\", err)\n\t}\n\n\tbuf := make([]tsm1.IntegerValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 0, true)\n\tvalues, err := c.ReadIntegerBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texpValues := []tsm1.Value{data[0].values[0], data[0].values[2]}\n\tfor i, v := range expValues {\n\t\texp := v\n\t\tif got, exp := len(values), 2; got != exp {\n\t\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tif got, exp := values[i].String(), exp.String(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", 0, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestKeyCursor_TombstoneRange_PartialString(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{\n\t\t\ttsm1.NewValue(0, \"1\"),\n\t\t\ttsm1.NewValue(1, \"2\"),\n\t\t\ttsm1.NewValue(2, \"3\")}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\tif err := fs.DeleteRange([]string{\"cpu\"}, 1, 1); err != nil {\n\t\tt.Fatalf(\"unexpected error delete range: %v\", err)\n\t}\n\n\tbuf := make([]tsm1.StringValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 0, true)\n\tvalues, err := c.ReadStringBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texpValues := []tsm1.Value{data[0].values[0], data[0].values[2]}\n\tfor i, v := range expValues {\n\t\texp := v\n\t\tif got, exp := len(values), 2; got != exp {\n\t\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tif got, exp := values[i].String(), exp.String(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", 0, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestKeyCursor_TombstoneRange_PartialBoolean(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{\n\t\t\ttsm1.NewValue(0, true),\n\t\t\ttsm1.NewValue(1, false),\n\t\t\ttsm1.NewValue(2, true)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\tif err := fs.DeleteRange([]string{\"cpu\"}, 1, 1); err != nil {\n\t\tt.Fatalf(\"unexpected error delete range: %v\", err)\n\t}\n\n\tbuf := make([]tsm1.BooleanValue, 1000)\n\tc := fs.KeyCursor(\"cpu\", 0, true)\n\tvalues, err := c.ReadBooleanBlock(&buf)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading values: %v\", err)\n\t}\n\n\texpValues := []tsm1.Value{data[0].values[0], data[0].values[2]}\n\tfor i, v := range expValues {\n\t\texp := v\n\t\tif got, exp := len(values), 2; got != exp {\n\t\t\tt.Fatalf(\"value length mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tif got, exp := values[i].String(), exp.String(); got != exp {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %v\", 0, got, exp)\n\t\t}\n\t}\n}\n\nfunc TestFileStore_Open(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\t// Create 3 TSM files...\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, 2.0)}},\n\t\tkeyValues{\"mem\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t}\n\n\t_, err := newFileDir(dir, data...)\n\tif err != nil {\n\t\tfatal(t, \"creating test files\", err)\n\t}\n\n\tfs := tsm1.NewFileStore(dir)\n\tif err := fs.Open(); err != nil {\n\t\tfatal(t, \"opening file store\", err)\n\t}\n\tdefer fs.Close()\n\n\tif got, exp := fs.Count(), 3; got != exp {\n\t\tt.Fatalf(\"file count mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := fs.CurrentGeneration(), 4; got != exp {\n\t\tt.Fatalf(\"current ID mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestFileStore_Remove(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\t// Create 3 TSM files...\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, 2.0)}},\n\t\tkeyValues{\"mem\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t}\n\n\tfiles, err := newFileDir(dir, data...)\n\tif err != nil {\n\t\tfatal(t, \"creating test files\", err)\n\t}\n\n\tfs := tsm1.NewFileStore(dir)\n\tif err := fs.Open(); err != nil {\n\t\tfatal(t, \"opening file store\", err)\n\t}\n\tdefer fs.Close()\n\n\tif got, exp := fs.Count(), 3; got != exp {\n\t\tt.Fatalf(\"file count mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := fs.CurrentGeneration(), 4; got != exp {\n\t\tt.Fatalf(\"current ID mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfs.Replace(files[2:3], nil)\n\n\tif got, exp := fs.Count(), 2; got != exp {\n\t\tt.Fatalf(\"file count mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := fs.CurrentGeneration(), 4; got != exp {\n\t\tt.Fatalf(\"current ID mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestFileStore_Replace(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\t// Create 3 TSM files...\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, 2.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, 3.0)}},\n\t}\n\n\tfiles, err := newFileDir(dir, data...)\n\tif err != nil {\n\t\tfatal(t, \"creating test files\", err)\n\t}\n\n\t// Replace requires assumes new files have a .tmp extension\n\treplacement := files[2] + \".tmp\"\n\tos.Rename(files[2], replacement)\n\n\tfs := tsm1.NewFileStore(dir)\n\tif err := fs.Open(); err != nil {\n\t\tfatal(t, \"opening file store\", err)\n\t}\n\tdefer fs.Close()\n\n\tif got, exp := fs.Count(), 2; got != exp {\n\t\tt.Fatalf(\"file count mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\t// Should record references to the two existing TSM files\n\tcur := fs.KeyCursor(\"cpu\", 0, true)\n\n\t// Should move the existing files out of the way, but allow query to complete\n\tif err := fs.Replace(files[:2], []string{replacement}); err != nil {\n\t\tt.Fatalf(\"replace: %v\", err)\n\t}\n\n\tif got, exp := fs.Count(), 1; got != exp {\n\t\tt.Fatalf(\"file count mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\t// There should be two blocks (1 in each file)\n\tcur.Next()\n\tbuf := make([]tsm1.FloatValue, 10)\n\tvalues, err := cur.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, exp := len(values), 1; got != exp {\n\t\tt.Fatalf(\"value len mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tcur.Next()\n\tvalues, err = cur.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, exp := len(values), 1; got != exp {\n\t\tt.Fatalf(\"value len mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\t// No more blocks for this cursor\n\tcur.Next()\n\tvalues, err = cur.ReadFloatBlock(&buf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, exp := len(values), 0; got != exp {\n\t\tt.Fatalf(\"value len mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\t// Release the references (files should get evicted by purger shortly)\n\tcur.Close()\n\n\ttime.Sleep(time.Second)\n\t// Make sure the two TSM files used by the cursor are gone\n\tif _, err := os.Stat(files[0]); !os.IsNotExist(err) {\n\t\tt.Fatalf(\"stat file: %v\", err)\n\t}\n\tif _, err := os.Stat(files[1]); !os.IsNotExist(err) {\n\t\tt.Fatalf(\"stat file: %v\", err)\n\t}\n\n\t// Make sure the new file exists\n\tif _, err := os.Stat(files[2]); err != nil {\n\t\tt.Fatalf(\"stat file: %v\", err)\n\t}\n\n}\n\nfunc TestFileStore_Open_Deleted(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\t// Create 3 TSM files...\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu,host=server2!~#!value\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t\tkeyValues{\"cpu,host=server1!~#!value\", []tsm1.Value{tsm1.NewValue(1, 2.0)}},\n\t\tkeyValues{\"mem,host=server1!~#!value\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t}\n\n\t_, err := newFileDir(dir, data...)\n\tif err != nil {\n\t\tfatal(t, \"creating test files\", err)\n\t}\n\n\tfs := tsm1.NewFileStore(dir)\n\tif err := fs.Open(); err != nil {\n\t\tfatal(t, \"opening file store\", err)\n\t}\n\tdefer fs.Close()\n\n\tif got, exp := len(fs.Keys()), 3; got != exp {\n\t\tt.Fatalf(\"file count mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif err := fs.Delete([]string{\"cpu,host=server2!~#!value\"}); err != nil {\n\t\tfatal(t, \"deleting\", err)\n\t}\n\n\tfs2 := tsm1.NewFileStore(dir)\n\tif err := fs2.Open(); err != nil {\n\t\tfatal(t, \"opening file store\", err)\n\t}\n\tdefer fs2.Close()\n\n\tif got, exp := len(fs2.Keys()), 2; got != exp {\n\t\tt.Fatalf(\"file count mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestFileStore_Delete(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu,host=server2!~#!value\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t\tkeyValues{\"cpu,host=server1!~#!value\", []tsm1.Value{tsm1.NewValue(1, 2.0)}},\n\t\tkeyValues{\"mem,host=server1!~#!value\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\tkeys := fs.Keys()\n\tif got, exp := len(keys), 3; got != exp {\n\t\tt.Fatalf(\"key length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif err := fs.Delete([]string{\"cpu,host=server2!~#!value\"}); err != nil {\n\t\tfatal(t, \"deleting\", err)\n\t}\n\n\tkeys = fs.Keys()\n\tif got, exp := len(keys), 2; got != exp {\n\t\tt.Fatalf(\"key length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestFileStore_Stats(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\t// Create 3 TSM files...\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, 2.0)}},\n\t\tkeyValues{\"mem\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t}\n\n\tfiles, err := newFileDir(dir, data...)\n\tif err != nil {\n\t\tfatal(t, \"creating test files\", err)\n\t}\n\n\tfs := tsm1.NewFileStore(dir)\n\tif err := fs.Open(); err != nil {\n\t\tfatal(t, \"opening file store\", err)\n\t}\n\tdefer fs.Close()\n\n\tstats := fs.Stats()\n\tif got, exp := len(stats), 3; got != exp {\n\t\tt.Fatalf(\"file count mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\t// Another call should result in the same stats being returned.\n\tif got, exp := fs.Stats(), stats; !reflect.DeepEqual(got, exp) {\n\t\tt.Fatalf(\"got %v, exp %v\", got, exp)\n\t}\n\n\t// Removing one of the files should invalidate the cache.\n\tfs.Replace(files[0:1], nil)\n\tif got, exp := len(fs.Stats()), 2; got != exp {\n\t\tt.Fatalf(\"file count mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\t// Write a new TSM file that that is not open\n\tnewFile := MustWriteTSM(dir, 4, map[string][]tsm1.Value{\n\t\t\"mem\": []tsm1.Value{tsm1.NewValue(0, 1.0)},\n\t})\n\n\treplacement := files[2] + \"-foo\" + \".tmp\" // Assumes new files have a .tmp extension\n\tif err := os.Rename(newFile, replacement); err != nil {\n\n\t}\n\t// Replace 3 w/ 1\n\tif err := fs.Replace(files, []string{replacement}); err != nil {\n\t\tt.Fatalf(\"replace: %v\", err)\n\t}\n\n\tvar found bool\n\tstats = fs.Stats()\n\tfor _, stat := range stats {\n\t\tif strings.HasSuffix(stat.Path, \"-foo\") {\n\t\t\tfound = true\n\t\t}\n\t}\n\n\tif !found {\n\t\tt.Fatalf(\"Didn't find %s in stats: %v\", \"foo\", stats)\n\t}\n\n\tnewFile = MustWriteTSM(dir, 5, map[string][]tsm1.Value{\n\t\t\"mem\": []tsm1.Value{tsm1.NewValue(0, 1.0)},\n\t})\n\n\t// Adding some files should invalidate the cache.\n\tfs.Replace(nil, []string{newFile})\n\tif got, exp := len(fs.Stats()), 2; got != exp {\n\t\tt.Fatalf(\"file count mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestFileStore_CreateSnapshot(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tfs := tsm1.NewFileStore(dir)\n\n\t// Setup 3 files\n\tdata := []keyValues{\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(1, 2.0)}},\n\t\tkeyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(2, 3.0)}},\n\t}\n\n\tfiles, err := newFiles(dir, data...)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating files: %v\", err)\n\t}\n\n\tfs.Replace(nil, files)\n\n\t// Create a tombstone\n\tif err := fs.DeleteRange([]string{\"cpu\"}, 1, 1); err != nil {\n\t\tt.Fatalf(\"unexpected error delete range: %v\", err)\n\t}\n\n\ts, e := fs.CreateSnapshot()\n\tif e != nil {\n\t\tt.Fatal(e)\n\t}\n\tt.Logf(\"temp file for hard links: %q\", s)\n\n\ttfs, e := ioutil.ReadDir(s)\n\tif e != nil {\n\t\tt.Fatal(e)\n\t}\n\tif len(tfs) == 0 {\n\t\tt.Fatal(\"no files found\")\n\t}\n\n\tfor _, f := range fs.Files() {\n\t\tp := filepath.Join(s, filepath.Base(f.Path()))\n\t\tt.Logf(\"checking for existence of hard link %q\", p)\n\t\tif _, err := os.Stat(p); os.IsNotExist(err) {\n\t\t\tt.Fatalf(\"unable to find file %q\", p)\n\t\t}\n\t\tfor _, tf := range f.TombstoneFiles() {\n\t\t\tp := filepath.Join(s, filepath.Base(tf.Path))\n\t\t\tt.Logf(\"checking for existence of hard link %q\", p)\n\t\t\tif _, err := os.Stat(p); os.IsNotExist(err) {\n\t\t\t\tt.Fatalf(\"unable to find file %q\", p)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc newFileDir(dir string, values ...keyValues) ([]string, error) {\n\tvar files []string\n\n\tid := 1\n\tfor _, v := range values {\n\t\tf := MustTempFile(dir)\n\t\tw, err := tsm1.NewTSMWriter(f)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := w.Write(v.key, v.values); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := w.WriteIndex(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := w.Close(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tnewName := filepath.Join(filepath.Dir(f.Name()), tsmFileName(id))\n\t\tif err := os.Rename(f.Name(), newName); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tid++\n\n\t\tfiles = append(files, newName)\n\t}\n\treturn files, nil\n\n}\n\nfunc newFiles(dir string, values ...keyValues) ([]string, error) {\n\tvar files []string\n\n\tid := 1\n\tfor _, v := range values {\n\t\tf := MustTempFile(dir)\n\t\tw, err := tsm1.NewTSMWriter(f)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := w.Write(v.key, v.values); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := w.WriteIndex(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := w.Close(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tnewName := filepath.Join(filepath.Dir(f.Name()), tsmFileName(id))\n\t\tif err := os.Rename(f.Name(), newName); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tid++\n\n\t\tfiles = append(files, newName)\n\t}\n\treturn files, nil\n}\n\ntype keyValues struct {\n\tkey    string\n\tvalues []tsm1.Value\n}\n\nfunc MustTempDir() string {\n\tdir, err := ioutil.TempDir(\"\", \"tsm1-test\")\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to create temp dir: %v\", err))\n\t}\n\treturn dir\n}\n\nfunc MustTempFile(dir string) *os.File {\n\tf, err := ioutil.TempFile(dir, \"tsm1test\")\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to create temp file: %v\", err))\n\t}\n\treturn f\n}\n\nfunc fatal(t *testing.T, msg string, err error) {\n\tt.Fatalf(\"unexpected error %v: %v\", msg, err)\n}\n\nfunc tsmFileName(id int) string {\n\treturn fmt.Sprintf(\"%09d-%09d.tsm\", id, 1)\n}\n\nvar fsResult []tsm1.FileStat\n\nfunc BenchmarkFileStore_Stats(b *testing.B) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\t// Create some TSM files...\n\tdata := make([]keyValues, 0, 1000)\n\tfor i := 0; i < 1000; i++ {\n\t\tdata = append(data, keyValues{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 1.0)}})\n\t}\n\n\t_, err := newFileDir(dir, data...)\n\tif err != nil {\n\t\tb.Fatalf(\"creating benchmark files %v\", err)\n\t}\n\n\tfs := tsm1.NewFileStore(dir)\n\tif testing.Verbose() {\n\t\tfs.WithLogger(zap.New(\n\t\t\tzap.NewTextEncoder(),\n\t\t\tzap.Output(os.Stderr),\n\t\t))\n\t}\n\n\tif err := fs.Open(); err != nil {\n\t\tb.Fatalf(\"opening file store %v\", err)\n\t}\n\tdefer fs.Close()\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tfsResult = fs.Stats()\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_unix.go",
    "content": "// +build !windows\n\npackage tsm1\n\nimport \"os\"\n\nfunc syncDir(dirName string) error {\n\t// fsync the dir to flush the rename\n\tdir, err := os.OpenFile(dirName, os.O_RDONLY, os.ModeDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer dir.Close()\n\treturn dir.Sync()\n}\n\n// renameFile will rename the source to target using os function.\nfunc renameFile(oldpath, newpath string) error {\n\treturn os.Rename(oldpath, newpath)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_windows.go",
    "content": "package tsm1\n\nimport \"os\"\n\nfunc syncDir(dirName string) error {\n\treturn nil\n}\n\n// renameFile will rename the source to target using os function. If target exists it will be removed before renaming.\nfunc renameFile(oldpath, newpath string) error {\n\tif _, err := os.Stat(newpath); err == nil {\n\t\tif err = os.Remove(newpath); nil != err {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn os.Rename(oldpath, newpath)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/float.go",
    "content": "package tsm1\n\n/*\nThis code is originally from: https://github.com/dgryski/go-tsz and has been modified to remove\nthe timestamp compression fuctionality.\n\nIt implements the float compression as presented in: http://www.vldb.org/pvldb/vol8/p1816-teller.pdf.\nThis implementation uses a sentinel value of NaN which means that float64 NaN cannot be stored using\nthis version.\n*/\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math\"\n\n\t\"github.com/dgryski/go-bits\"\n\t\"github.com/dgryski/go-bitstream\"\n)\n\nconst (\n\t// floatUncompressed is an uncompressed format using 8 bytes per value.\n\t// Not yet implemented.\n\tfloatUncompressed = 0\n\n\t// floatCompressedGorilla is a compressed format using the gorilla paper encoding\n\tfloatCompressedGorilla = 1\n)\n\n// uvnan is the constant returned from math.NaN().\nconst uvnan = 0x7FF8000000000001\n\n// FloatEncoder encodes multiple float64s into a byte slice.\ntype FloatEncoder struct {\n\tval float64\n\terr error\n\n\tleading  uint64\n\ttrailing uint64\n\n\tbuf bytes.Buffer\n\tbw  *bitstream.BitWriter\n\n\tfirst    bool\n\tfinished bool\n}\n\n// NewFloatEncoder returns a new FloatEncoder.\nfunc NewFloatEncoder() *FloatEncoder {\n\ts := FloatEncoder{\n\t\tfirst:   true,\n\t\tleading: ^uint64(0),\n\t}\n\n\ts.bw = bitstream.NewWriter(&s.buf)\n\ts.buf.WriteByte(floatCompressedGorilla << 4)\n\n\treturn &s\n}\n\n// Reset sets the encoder back to its initial state.\nfunc (s *FloatEncoder) Reset() {\n\ts.val = 0\n\ts.err = nil\n\ts.leading = ^uint64(0)\n\ts.trailing = 0\n\ts.buf.Reset()\n\ts.buf.WriteByte(floatCompressedGorilla << 4)\n\n\ts.bw.Resume(0x0, 8)\n\n\ts.finished = false\n\ts.first = true\n}\n\n// Bytes returns a copy of the underlying byte buffer used in the encoder.\nfunc (s *FloatEncoder) Bytes() ([]byte, error) {\n\treturn s.buf.Bytes(), s.err\n}\n\n// Flush indicates there are no more values to encode.\nfunc (s *FloatEncoder) Flush() {\n\tif !s.finished {\n\t\t// write an end-of-stream record\n\t\ts.finished = true\n\t\ts.Write(math.NaN())\n\t\ts.bw.Flush(bitstream.Zero)\n\t}\n}\n\n// Write encodes v to the underlying buffer.\nfunc (s *FloatEncoder) Write(v float64) {\n\t// Only allow NaN as a sentinel value\n\tif math.IsNaN(v) && !s.finished {\n\t\ts.err = fmt.Errorf(\"unsupported value: NaN\")\n\t\treturn\n\t}\n\tif s.first {\n\t\t// first point\n\t\ts.val = v\n\t\ts.first = false\n\t\ts.bw.WriteBits(math.Float64bits(v), 64)\n\t\treturn\n\t}\n\n\tvDelta := math.Float64bits(v) ^ math.Float64bits(s.val)\n\n\tif vDelta == 0 {\n\t\ts.bw.WriteBit(bitstream.Zero)\n\t} else {\n\t\ts.bw.WriteBit(bitstream.One)\n\n\t\tleading := bits.Clz(vDelta)\n\t\ttrailing := bits.Ctz(vDelta)\n\n\t\t// Clamp number of leading zeros to avoid overflow when encoding\n\t\tleading &= 0x1F\n\t\tif leading >= 32 {\n\t\t\tleading = 31\n\t\t}\n\n\t\t// TODO(dgryski): check if it's 'cheaper' to reset the leading/trailing bits instead\n\t\tif s.leading != ^uint64(0) && leading >= s.leading && trailing >= s.trailing {\n\t\t\ts.bw.WriteBit(bitstream.Zero)\n\t\t\ts.bw.WriteBits(vDelta>>s.trailing, 64-int(s.leading)-int(s.trailing))\n\t\t} else {\n\t\t\ts.leading, s.trailing = leading, trailing\n\n\t\t\ts.bw.WriteBit(bitstream.One)\n\t\t\ts.bw.WriteBits(leading, 5)\n\n\t\t\t// Note that if leading == trailing == 0, then sigbits == 64.  But that\n\t\t\t// value doesn't actually fit into the 6 bits we have.\n\t\t\t// Luckily, we never need to encode 0 significant bits, since that would\n\t\t\t// put us in the other case (vdelta == 0).  So instead we write out a 0 and\n\t\t\t// adjust it back to 64 on unpacking.\n\t\t\tsigbits := 64 - leading - trailing\n\t\t\ts.bw.WriteBits(sigbits, 6)\n\t\t\ts.bw.WriteBits(vDelta>>trailing, int(sigbits))\n\t\t}\n\t}\n\n\ts.val = v\n}\n\n// FloatDecoder decodes a byte slice into multiple float64 values.\ntype FloatDecoder struct {\n\tval uint64\n\n\tleading  uint64\n\ttrailing uint64\n\n\tbr BitReader\n\tb  []byte\n\n\tfirst    bool\n\tfinished bool\n\n\terr error\n}\n\n// SetBytes initializes the decoder with b. Must call before calling Next().\nfunc (it *FloatDecoder) SetBytes(b []byte) error {\n\tvar v uint64\n\tif len(b) == 0 {\n\t\tv = uvnan\n\t} else {\n\t\t// first byte is the compression type.\n\t\t// we currently just have gorilla compression.\n\t\tit.br.Reset(b[1:])\n\n\t\tvar err error\n\t\tv, err = it.br.ReadBits(64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Reset all fields.\n\tit.val = v\n\tit.leading = 0\n\tit.trailing = 0\n\tit.b = b\n\tit.first = true\n\tit.finished = false\n\tit.err = nil\n\n\treturn nil\n}\n\n// Next returns true if there are remaining values to read.\nfunc (it *FloatDecoder) Next() bool {\n\tif it.err != nil || it.finished {\n\t\treturn false\n\t}\n\n\tif it.first {\n\t\tit.first = false\n\n\t\t// mark as finished if there were no values.\n\t\tif it.val == uvnan { // IsNaN\n\t\t\tit.finished = true\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}\n\n\t// read compressed value\n\tvar bit bool\n\tif it.br.CanReadBitFast() {\n\t\tbit = it.br.ReadBitFast()\n\t} else if v, err := it.br.ReadBit(); err != nil {\n\t\tit.err = err\n\t\treturn false\n\t} else {\n\t\tbit = v\n\t}\n\n\tif !bit {\n\t\t// it.val = it.val\n\t} else {\n\t\tvar bit bool\n\t\tif it.br.CanReadBitFast() {\n\t\t\tbit = it.br.ReadBitFast()\n\t\t} else if v, err := it.br.ReadBit(); err != nil {\n\t\t\tit.err = err\n\t\t\treturn false\n\t\t} else {\n\t\t\tbit = v\n\t\t}\n\n\t\tif !bit {\n\t\t\t// reuse leading/trailing zero bits\n\t\t\t// it.leading, it.trailing = it.leading, it.trailing\n\t\t} else {\n\t\t\tbits, err := it.br.ReadBits(5)\n\t\t\tif err != nil {\n\t\t\t\tit.err = err\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tit.leading = bits\n\n\t\t\tbits, err = it.br.ReadBits(6)\n\t\t\tif err != nil {\n\t\t\t\tit.err = err\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tmbits := bits\n\t\t\t// 0 significant bits here means we overflowed and we actually need 64; see comment in encoder\n\t\t\tif mbits == 0 {\n\t\t\t\tmbits = 64\n\t\t\t}\n\t\t\tit.trailing = 64 - it.leading - mbits\n\t\t}\n\n\t\tmbits := uint(64 - it.leading - it.trailing)\n\t\tbits, err := it.br.ReadBits(mbits)\n\t\tif err != nil {\n\t\t\tit.err = err\n\t\t\treturn false\n\t\t}\n\n\t\tvbits := it.val\n\t\tvbits ^= (bits << it.trailing)\n\n\t\tif vbits == uvnan { // IsNaN\n\t\t\tit.finished = true\n\t\t\treturn false\n\t\t}\n\t\tit.val = vbits\n\t}\n\n\treturn true\n}\n\n// Values returns the current float64 value.\nfunc (it *FloatDecoder) Values() float64 {\n\treturn math.Float64frombits(it.val)\n}\n\n// Error returns the current decoding error.\nfunc (it *FloatDecoder) Error() error {\n\treturn it.err\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/float_test.go",
    "content": "package tsm1_test\n\nimport (\n\t\"math\"\n\t\"reflect\"\n\t\"testing\"\n\t\"testing/quick\"\n\n\t\"github.com/influxdata/influxdb/tsdb/engine/tsm1\"\n)\n\nfunc TestFloatEncoder_Simple(t *testing.T) {\n\t// Example from the paper\n\ts := tsm1.NewFloatEncoder()\n\n\ts.Write(12)\n\ts.Write(12)\n\ts.Write(24)\n\n\t// extra tests\n\n\t// floating point masking/shifting bug\n\ts.Write(13)\n\ts.Write(24)\n\n\t// delta-of-delta sizes\n\ts.Write(24)\n\ts.Write(24)\n\ts.Write(24)\n\n\ts.Flush()\n\n\tb, err := s.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar it tsm1.FloatDecoder\n\tif err := it.SetBytes(b); err != nil {\n\t\tt.Fatalf(\"unexpected error creating float decoder: %v\", err)\n\t}\n\n\twant := []float64{\n\t\t12,\n\t\t12,\n\t\t24,\n\n\t\t13,\n\t\t24,\n\n\t\t24,\n\t\t24,\n\t\t24,\n\t}\n\n\tfor _, w := range want {\n\t\tif !it.Next() {\n\t\t\tt.Fatalf(\"Next()=false, want true\")\n\t\t}\n\t\tvv := it.Values()\n\t\tif w != vv {\n\t\t\tt.Errorf(\"Values()=(%v), want (%v)\\n\", vv, w)\n\t\t}\n\t}\n\n\tif it.Next() {\n\t\tt.Fatalf(\"Next()=true, want false\")\n\t}\n\n\tif err := it.Error(); err != nil {\n\t\tt.Errorf(\"it.Error()=%v, want nil\", err)\n\t}\n}\n\nfunc TestFloatEncoder_SimilarFloats(t *testing.T) {\n\ts := tsm1.NewFloatEncoder()\n\twant := []float64{\n\t\t6.00065e+06,\n\t\t6.000656e+06,\n\t\t6.000657e+06,\n\n\t\t6.000659e+06,\n\t\t6.000661e+06,\n\t}\n\n\tfor _, v := range want {\n\t\ts.Write(v)\n\t}\n\n\ts.Flush()\n\n\tb, err := s.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar it tsm1.FloatDecoder\n\tif err := it.SetBytes(b); err != nil {\n\t\tt.Fatalf(\"unexpected error creating float decoder: %v\", err)\n\t}\n\n\tfor _, w := range want {\n\t\tif !it.Next() {\n\t\t\tt.Fatalf(\"Next()=false, want true\")\n\t\t}\n\t\tvv := it.Values()\n\t\tif w != vv {\n\t\t\tt.Errorf(\"Values()=(%v), want (%v)\\n\", vv, w)\n\t\t}\n\t}\n\n\tif it.Next() {\n\t\tt.Fatalf(\"Next()=true, want false\")\n\t}\n\n\tif err := it.Error(); err != nil {\n\t\tt.Errorf(\"it.Error()=%v, want nil\", err)\n\t}\n}\n\nvar TwoHoursData = []struct {\n\tv float64\n}{\n\t// 2h of data\n\t{761}, {727}, {763}, {706}, {700},\n\t{679}, {757}, {708}, {739}, {707},\n\t{699}, {740}, {729}, {766}, {730},\n\t{715}, {705}, {693}, {765}, {724},\n\t{799}, {761}, {737}, {766}, {756},\n\t{719}, {722}, {801}, {747}, {731},\n\t{742}, {744}, {791}, {750}, {759},\n\t{809}, {751}, {705}, {770}, {792},\n\t{727}, {762}, {772}, {721}, {748},\n\t{753}, {744}, {716}, {776}, {659},\n\t{789}, {766}, {758}, {690}, {795},\n\t{770}, {758}, {723}, {767}, {765},\n\t{693}, {706}, {681}, {727}, {724},\n\t{780}, {678}, {696}, {758}, {740},\n\t{735}, {700}, {742}, {747}, {752},\n\t{734}, {743}, {732}, {746}, {770},\n\t{780}, {710}, {731}, {712}, {712},\n\t{741}, {770}, {770}, {754}, {718},\n\t{670}, {775}, {749}, {795}, {756},\n\t{741}, {787}, {721}, {745}, {782},\n\t{765}, {780}, {811}, {790}, {836},\n\t{743}, {858}, {739}, {762}, {770},\n\t{752}, {763}, {795}, {792}, {746},\n\t{786}, {785}, {774}, {786}, {718},\n}\n\nfunc TestFloatEncoder_Roundtrip(t *testing.T) {\n\ts := tsm1.NewFloatEncoder()\n\tfor _, p := range TwoHoursData {\n\t\ts.Write(p.v)\n\t}\n\ts.Flush()\n\n\tb, err := s.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar it tsm1.FloatDecoder\n\tif err := it.SetBytes(b); err != nil {\n\t\tt.Fatalf(\"unexpected error creating float decoder: %v\", err)\n\t}\n\n\tfor _, w := range TwoHoursData {\n\t\tif !it.Next() {\n\t\t\tt.Fatalf(\"Next()=false, want true\")\n\t\t}\n\t\tvv := it.Values()\n\t\t// t.Logf(\"it.Values()=(%+v, %+v)\\n\", time.Unix(int64(tt), 0), vv)\n\t\tif w.v != vv {\n\t\t\tt.Errorf(\"Values()=(%v), want (%v)\\n\", vv, w.v)\n\t\t}\n\t}\n\n\tif it.Next() {\n\t\tt.Fatalf(\"Next()=true, want false\")\n\t}\n\n\tif err := it.Error(); err != nil {\n\t\tt.Errorf(\"it.Error()=%v, want nil\", err)\n\t}\n}\n\nfunc TestFloatEncoder_Roundtrip_NaN(t *testing.T) {\n\n\ts := tsm1.NewFloatEncoder()\n\ts.Write(1.0)\n\ts.Write(math.NaN())\n\ts.Write(2.0)\n\ts.Flush()\n\n\t_, err := s.Bytes()\n\n\tif err == nil {\n\t\tt.Fatalf(\"expected error. got nil\")\n\t}\n}\n\nfunc Test_FloatEncoder_Quick(t *testing.T) {\n\tquick.Check(func(values []float64) bool {\n\n\t\texpected := values\n\t\tif values == nil {\n\t\t\texpected = []float64{}\n\t\t}\n\n\t\t// Write values to encoder.\n\t\tenc := tsm1.NewFloatEncoder()\n\t\tfor _, v := range values {\n\t\t\tenc.Write(v)\n\t\t}\n\t\tenc.Flush()\n\n\t\t// Read values out of decoder.\n\t\tgot := make([]float64, 0, len(values))\n\t\tb, err := enc.Bytes()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\n\t\tvar dec tsm1.FloatDecoder\n\t\tif err := dec.SetBytes(b); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfor dec.Next() {\n\t\t\tgot = append(got, dec.Values())\n\t\t}\n\n\t\t// Verify that input and output values match.\n\t\tif !reflect.DeepEqual(expected, got) {\n\t\t\tt.Fatalf(\"mismatch:\\n\\nexp=%#v\\n\\ngot=%#v\\n\\n\", expected, got)\n\t\t}\n\n\t\treturn true\n\t}, nil)\n}\n\nfunc TestFloatDecoder_Empty(t *testing.T) {\n\tvar dec tsm1.FloatDecoder\n\tif err := dec.SetBytes([]byte{}); err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif dec.Next() {\n\t\tt.Fatalf(\"exp next == false, got true\")\n\t}\n}\n\nfunc BenchmarkFloatEncoder(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\ts := tsm1.NewFloatEncoder()\n\t\tfor _, tt := range TwoHoursData {\n\t\t\ts.Write(tt.v)\n\t\t}\n\t\ts.Flush()\n\t}\n}\n\nfunc BenchmarkFloatDecoder(b *testing.B) {\n\ts := tsm1.NewFloatEncoder()\n\tfor _, tt := range TwoHoursData {\n\t\ts.Write(tt.v)\n\t}\n\ts.Flush()\n\tbytes, err := s.Bytes()\n\tif err != nil {\n\t\tb.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tvar it tsm1.FloatDecoder\n\t\tif err := it.SetBytes(bytes); err != nil {\n\t\t\tb.Fatalf(\"unexpected error creating float decoder: %v\", err)\n\t\t}\n\n\t\tfor j := 0; j < len(TwoHoursData); it.Next() {\n\t\t\tj++\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/int.go",
    "content": "package tsm1\n\n// Integer encoding uses two different strategies depending on the range of values in\n// the uncompressed data.  Encoded values are first encoding used zig zag encoding.\n// This interleaves positive and negative integers across a range of positive integers.\n//\n// For example, [-2,-1,0,1] becomes [3,1,0,2]. See\n// https://developers.google.com/protocol-buffers/docs/encoding?hl=en#signed-integers\n// for more information.\n//\n// If all the zig zag encoded values are less than 1 << 60 - 1, they are compressed using\n// simple8b encoding.  If any value is larger than 1 << 60 - 1, the values are stored uncompressed.\n//\n// Each encoded byte slice contains a 1 byte header followed by multiple 8 byte packed integers\n// or 8 byte uncompressed integers.  The 4 high bits of the first byte indicate the encoding type\n// for the remaining bytes.\n//\n// There are currently two encoding types that can be used with room for 16 total.  These additional\n// encoding slots are reserved for future use.  One improvement to be made is to use a patched\n// encoding such as PFOR if only a small number of values exceed the max compressed value range.  This\n// should improve compression ratios with very large integers near the ends of the int64 range.\n\nimport (\n\t\"encoding/binary\"\n\t\"fmt\"\n\n\t\"github.com/jwilder/encoding/simple8b\"\n)\n\nconst (\n\t// intUncompressed is an uncompressed format using 8 bytes per point\n\tintUncompressed = 0\n\t// intCompressedSimple is a bit-packed format using simple8b encoding\n\tintCompressedSimple = 1\n\t// intCompressedRLE is a run-length encoding format\n\tintCompressedRLE = 2\n)\n\n// IntegerEncoder encodes int64s into byte slices.\ntype IntegerEncoder struct {\n\tprev   int64\n\trle    bool\n\tvalues []uint64\n}\n\n// NewIntegerEncoder returns a new integer encoder with an initial buffer of values sized at sz.\nfunc NewIntegerEncoder(sz int) IntegerEncoder {\n\treturn IntegerEncoder{\n\t\trle:    true,\n\t\tvalues: make([]uint64, 0, sz),\n\t}\n}\n\n// Flush is no-op\nfunc (e *IntegerEncoder) Flush() {}\n\n// Reset sets the encoder back to its initial state.\nfunc (e *IntegerEncoder) Reset() {\n\te.prev = 0\n\te.rle = true\n\te.values = e.values[:0]\n}\n\n// Write encodes v to the underlying buffers.\nfunc (e *IntegerEncoder) Write(v int64) {\n\t// Delta-encode each value as it's written.  This happens before\n\t// ZigZagEncoding because the deltas could be negative.\n\tdelta := v - e.prev\n\te.prev = v\n\tenc := ZigZagEncode(delta)\n\tif len(e.values) > 1 {\n\t\te.rle = e.rle && e.values[len(e.values)-1] == enc\n\t}\n\n\te.values = append(e.values, enc)\n}\n\n// Bytes returns a copy of the underlying buffer.\nfunc (e *IntegerEncoder) Bytes() ([]byte, error) {\n\t// Only run-length encode if it could reduce storage size.\n\tif e.rle && len(e.values) > 2 {\n\t\treturn e.encodeRLE()\n\t}\n\n\tfor _, v := range e.values {\n\t\t// Value is too large to encode using packed format\n\t\tif v > simple8b.MaxValue {\n\t\t\treturn e.encodeUncompressed()\n\t\t}\n\t}\n\n\treturn e.encodePacked()\n}\n\nfunc (e *IntegerEncoder) encodeRLE() ([]byte, error) {\n\t// Large varints can take up to 10 bytes.  We're storing 3 + 1\n\t// type byte.\n\tvar b [31]byte\n\n\t// 4 high bits used for the encoding type\n\tb[0] = byte(intCompressedRLE) << 4\n\n\ti := 1\n\t// The first value\n\tbinary.BigEndian.PutUint64(b[i:], e.values[0])\n\ti += 8\n\t// The first delta\n\ti += binary.PutUvarint(b[i:], e.values[1])\n\t// The number of times the delta is repeated\n\ti += binary.PutUvarint(b[i:], uint64(len(e.values)-1))\n\n\treturn b[:i], nil\n}\n\nfunc (e *IntegerEncoder) encodePacked() ([]byte, error) {\n\tif len(e.values) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// Encode all but the first value.  Fist value is written unencoded\n\t// using 8 bytes.\n\tencoded, err := simple8b.EncodeAll(e.values[1:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb := make([]byte, 1+(len(encoded)+1)*8)\n\t// 4 high bits of first byte store the encoding type for the block\n\tb[0] = byte(intCompressedSimple) << 4\n\n\t// Write the first value since it's not part of the encoded values\n\tbinary.BigEndian.PutUint64(b[1:9], e.values[0])\n\n\t// Write the encoded values\n\tfor i, v := range encoded {\n\t\tbinary.BigEndian.PutUint64(b[9+i*8:9+i*8+8], v)\n\t}\n\treturn b, nil\n}\n\nfunc (e *IntegerEncoder) encodeUncompressed() ([]byte, error) {\n\tif len(e.values) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tb := make([]byte, 1+len(e.values)*8)\n\t// 4 high bits of first byte store the encoding type for the block\n\tb[0] = byte(intUncompressed) << 4\n\n\tfor i, v := range e.values {\n\t\tbinary.BigEndian.PutUint64(b[1+i*8:1+i*8+8], v)\n\t}\n\treturn b, nil\n}\n\n// IntegerDecoder decodes a byte slice into int64s.\ntype IntegerDecoder struct {\n\t// 240 is the maximum number of values that can be encoded into a single uint64 using simple8b\n\tvalues [240]uint64\n\tbytes  []byte\n\ti      int\n\tn      int\n\tprev   int64\n\tfirst  bool\n\n\t// The first value for a run-length encoded byte slice\n\trleFirst uint64\n\n\t// The delta value for a run-length encoded byte slice\n\trleDelta uint64\n\tencoding byte\n\terr      error\n}\n\n// SetBytes sets the underlying byte slice of the decoder.\nfunc (d *IntegerDecoder) SetBytes(b []byte) {\n\tif len(b) > 0 {\n\t\td.encoding = b[0] >> 4\n\t\td.bytes = b[1:]\n\t} else {\n\t\td.encoding = 0\n\t\td.bytes = nil\n\t}\n\n\td.i = 0\n\td.n = 0\n\td.prev = 0\n\td.first = true\n\n\td.rleFirst = 0\n\td.rleDelta = 0\n\td.err = nil\n}\n\n// Next returns true if there are any values remaining to be decoded.\nfunc (d *IntegerDecoder) Next() bool {\n\tif d.i >= d.n && len(d.bytes) == 0 {\n\t\treturn false\n\t}\n\n\td.i++\n\n\tif d.i >= d.n {\n\t\tswitch d.encoding {\n\t\tcase intUncompressed:\n\t\t\td.decodeUncompressed()\n\t\tcase intCompressedSimple:\n\t\t\td.decodePacked()\n\t\tcase intCompressedRLE:\n\t\t\td.decodeRLE()\n\t\tdefault:\n\t\t\td.err = fmt.Errorf(\"unknown encoding %v\", d.encoding)\n\t\t}\n\t}\n\treturn d.err == nil && d.i < d.n\n}\n\n// Error returns the last error encountered by the decoder.\nfunc (d *IntegerDecoder) Error() error {\n\treturn d.err\n}\n\n// Read returns the next value from the decoder.\nfunc (d *IntegerDecoder) Read() int64 {\n\tswitch d.encoding {\n\tcase intCompressedRLE:\n\t\treturn ZigZagDecode(d.rleFirst) + int64(d.i)*ZigZagDecode(d.rleDelta)\n\tdefault:\n\t\tv := ZigZagDecode(d.values[d.i])\n\t\t// v is the delta encoded value, we need to add the prior value to get the original\n\t\tv = v + d.prev\n\t\td.prev = v\n\t\treturn v\n\t}\n}\n\nfunc (d *IntegerDecoder) decodeRLE() {\n\tif len(d.bytes) == 0 {\n\t\treturn\n\t}\n\n\tif len(d.bytes) < 8 {\n\t\td.err = fmt.Errorf(\"IntegerDecoder: not enough data to decode RLE starting value\")\n\t\treturn\n\t}\n\n\tvar i, n int\n\n\t// Next 8 bytes is the starting value\n\tfirst := binary.BigEndian.Uint64(d.bytes[i : i+8])\n\ti += 8\n\n\t// Next 1-10 bytes is the delta value\n\tvalue, n := binary.Uvarint(d.bytes[i:])\n\tif n <= 0 {\n\t\td.err = fmt.Errorf(\"IntegerDecoder: invalid RLE delta value\")\n\t\treturn\n\t}\n\ti += n\n\n\t// Last 1-10 bytes is how many times the value repeats\n\tcount, n := binary.Uvarint(d.bytes[i:])\n\tif n <= 0 {\n\t\td.err = fmt.Errorf(\"IntegerDecoder: invalid RLE repeat value\")\n\t\treturn\n\t}\n\n\t// Store the first value and delta value so we do not need to allocate\n\t// a large values slice.  We can compute the value at position d.i on\n\t// demand.\n\td.rleFirst = first\n\td.rleDelta = value\n\td.n = int(count) + 1\n\td.i = 0\n\n\t// We've process all the bytes\n\td.bytes = nil\n}\n\nfunc (d *IntegerDecoder) decodePacked() {\n\tif len(d.bytes) == 0 {\n\t\treturn\n\t}\n\n\tif len(d.bytes) < 8 {\n\t\td.err = fmt.Errorf(\"IntegerDecoder: not enough data to decode packed value\")\n\t\treturn\n\t}\n\n\tv := binary.BigEndian.Uint64(d.bytes[0:8])\n\t// The first value is always unencoded\n\tif d.first {\n\t\td.first = false\n\t\td.n = 1\n\t\td.values[0] = v\n\t} else {\n\t\tn, err := simple8b.Decode(&d.values, v)\n\t\tif err != nil {\n\t\t\t// Should never happen, only error that could be returned is if the the value to be decoded was not\n\t\t\t// actually encoded by simple8b encoder.\n\t\t\td.err = fmt.Errorf(\"failed to decode value %v: %v\", v, err)\n\t\t}\n\n\t\td.n = n\n\t}\n\td.i = 0\n\td.bytes = d.bytes[8:]\n}\n\nfunc (d *IntegerDecoder) decodeUncompressed() {\n\tif len(d.bytes) == 0 {\n\t\treturn\n\t}\n\n\tif len(d.bytes) < 8 {\n\t\td.err = fmt.Errorf(\"IntegerDecoder: not enough data to decode uncompressed value\")\n\t\treturn\n\t}\n\n\td.values[0] = binary.BigEndian.Uint64(d.bytes[0:8])\n\td.i = 0\n\td.n = 1\n\td.bytes = d.bytes[8:]\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/int_test.go",
    "content": "package tsm1\n\nimport (\n\t\"math\"\n\t\"math/rand\"\n\t\"reflect\"\n\t\"testing\"\n\t\"testing/quick\"\n)\n\nfunc Test_IntegerEncoder_NoValues(t *testing.T) {\n\tenc := NewIntegerEncoder(0)\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif len(b) > 0 {\n\t\tt.Fatalf(\"unexpected lenght: exp 0, got %v\", len(b))\n\t}\n\n\tvar dec IntegerDecoder\n\tdec.SetBytes(b)\n\tif dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n}\n\nfunc Test_IntegerEncoder_One(t *testing.T) {\n\tenc := NewIntegerEncoder(1)\n\tv1 := int64(1)\n\n\tenc.Write(1)\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif got := b[0] >> 4; intCompressedSimple != got {\n\t\tt.Fatalf(\"encoding type mismatch: exp uncompressed, got %v\", got)\n\t}\n\n\tvar dec IntegerDecoder\n\tdec.SetBytes(b)\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif v1 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), v1)\n\t}\n}\n\nfunc Test_IntegerEncoder_Two(t *testing.T) {\n\tenc := NewIntegerEncoder(2)\n\tvar v1, v2 int64 = 1, 2\n\n\tenc.Write(v1)\n\tenc.Write(v2)\n\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif got := b[0] >> 4; intCompressedSimple != got {\n\t\tt.Fatalf(\"encoding type mismatch: exp uncompressed, got %v\", got)\n\t}\n\n\tvar dec IntegerDecoder\n\tdec.SetBytes(b)\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif v1 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), v1)\n\t}\n\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif v2 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), v2)\n\t}\n}\n\nfunc Test_IntegerEncoder_Negative(t *testing.T) {\n\tenc := NewIntegerEncoder(3)\n\tvar v1, v2, v3 int64 = -2, 0, 1\n\n\tenc.Write(v1)\n\tenc.Write(v2)\n\tenc.Write(v3)\n\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif got := b[0] >> 4; intCompressedSimple != got {\n\t\tt.Fatalf(\"encoding type mismatch: exp uncompressed, got %v\", got)\n\t}\n\n\tvar dec IntegerDecoder\n\tdec.SetBytes(b)\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif v1 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), v1)\n\t}\n\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif v2 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), v2)\n\t}\n\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif v3 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), v3)\n\t}\n}\n\nfunc Test_IntegerEncoder_Large_Range(t *testing.T) {\n\tenc := NewIntegerEncoder(2)\n\tvar v1, v2 int64 = math.MinInt64, math.MaxInt64\n\tenc.Write(v1)\n\tenc.Write(v2)\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif got := b[0] >> 4; intUncompressed != got {\n\t\tt.Fatalf(\"encoding type mismatch: exp uncompressed, got %v\", got)\n\t}\n\n\tvar dec IntegerDecoder\n\tdec.SetBytes(b)\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif v1 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), v1)\n\t}\n\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif v2 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), v2)\n\t}\n}\n\nfunc Test_IntegerEncoder_Uncompressed(t *testing.T) {\n\tenc := NewIntegerEncoder(3)\n\tvar v1, v2, v3 int64 = 0, 1, 1 << 60\n\n\tenc.Write(v1)\n\tenc.Write(v2)\n\tenc.Write(v3)\n\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"expected error: %v\", err)\n\t}\n\n\t// 1 byte header + 3 * 8 byte values\n\tif exp := 25; len(b) != exp {\n\t\tt.Fatalf(\"length mismatch: got %v, exp %v\", len(b), exp)\n\t}\n\n\tif got := b[0] >> 4; intUncompressed != got {\n\t\tt.Fatalf(\"encoding type mismatch: exp uncompressed, got %v\", got)\n\t}\n\n\tvar dec IntegerDecoder\n\tdec.SetBytes(b)\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif v1 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), v1)\n\t}\n\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif v2 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), v2)\n\t}\n\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif v3 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), v3)\n\t}\n}\n\nfunc Test_IntegerEncoder_NegativeUncompressed(t *testing.T) {\n\tvalues := []int64{\n\t\t-2352281900722994752, 1438442655375607923, -4110452567888190110,\n\t\t-1221292455668011702, -1941700286034261841, -2836753127140407751,\n\t\t1432686216250034552, 3663244026151507025, -3068113732684750258,\n\t\t-1949953187327444488, 3713374280993588804, 3226153669854871355,\n\t\t-2093273755080502606, 1006087192578600616, -2272122301622271655,\n\t\t2533238229511593671, -4450454445568858273, 2647789901083530435,\n\t\t2761419461769776844, -1324397441074946198, -680758138988210958,\n\t\t94468846694902125, -2394093124890745254, -2682139311758778198,\n\t}\n\tenc := NewIntegerEncoder(256)\n\tfor _, v := range values {\n\t\tenc.Write(v)\n\t}\n\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"expected error: %v\", err)\n\t}\n\n\tif got := b[0] >> 4; intUncompressed != got {\n\t\tt.Fatalf(\"encoding type mismatch: exp uncompressed, got %v\", got)\n\t}\n\n\tvar dec IntegerDecoder\n\tdec.SetBytes(b)\n\n\ti := 0\n\tfor dec.Next() {\n\t\tif i > len(values) {\n\t\t\tt.Fatalf(\"read too many values: got %v, exp %v\", i, len(values))\n\t\t}\n\n\t\tif values[i] != dec.Read() {\n\t\t\tt.Fatalf(\"read value %d mismatch: got %v, exp %v\", i, dec.Read(), values[i])\n\t\t}\n\t\ti += 1\n\t}\n\n\tif i != len(values) {\n\t\tt.Fatalf(\"failed to read enough values: got %v, exp %v\", i, len(values))\n\t}\n}\n\nfunc Test_IntegerEncoder_AllNegative(t *testing.T) {\n\tenc := NewIntegerEncoder(3)\n\tvalues := []int64{\n\t\t-10, -5, -1,\n\t}\n\n\tfor _, v := range values {\n\t\tenc.Write(v)\n\t}\n\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif got := b[0] >> 4; intCompressedSimple != got {\n\t\tt.Fatalf(\"encoding type mismatch: exp uncompressed, got %v\", got)\n\t}\n\n\tvar dec IntegerDecoder\n\tdec.SetBytes(b)\n\ti := 0\n\tfor dec.Next() {\n\t\tif i > len(values) {\n\t\t\tt.Fatalf(\"read too many values: got %v, exp %v\", i, len(values))\n\t\t}\n\n\t\tif values[i] != dec.Read() {\n\t\t\tt.Fatalf(\"read value %d mismatch: got %v, exp %v\", i, dec.Read(), values[i])\n\t\t}\n\t\ti += 1\n\t}\n\n\tif i != len(values) {\n\t\tt.Fatalf(\"failed to read enough values: got %v, exp %v\", i, len(values))\n\t}\n}\n\nfunc Test_IntegerEncoder_CounterPacked(t *testing.T) {\n\tenc := NewIntegerEncoder(16)\n\tvalues := []int64{\n\t\t1e15, 1e15 + 1, 1e15 + 2, 1e15 + 3, 1e15 + 4, 1e15 + 6,\n\t}\n\n\tfor _, v := range values {\n\t\tenc.Write(v)\n\t}\n\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif b[0]>>4 != intCompressedSimple {\n\t\tt.Fatalf(\"unexpected encoding format: expected simple, got %v\", b[0]>>4)\n\t}\n\n\t// Should use 1 header byte + 2, 8 byte words if delta-encoding is used based on\n\t// values sizes.  Without delta-encoding, we'd get 49 bytes.\n\tif exp := 17; len(b) != exp {\n\t\tt.Fatalf(\"encoded length mismatch: got %v, exp %v\", len(b), exp)\n\t}\n\n\tvar dec IntegerDecoder\n\tdec.SetBytes(b)\n\ti := 0\n\tfor dec.Next() {\n\t\tif i > len(values) {\n\t\t\tt.Fatalf(\"read too many values: got %v, exp %v\", i, len(values))\n\t\t}\n\n\t\tif values[i] != dec.Read() {\n\t\t\tt.Fatalf(\"read value %d mismatch: got %v, exp %v\", i, dec.Read(), values[i])\n\t\t}\n\t\ti += 1\n\t}\n\n\tif i != len(values) {\n\t\tt.Fatalf(\"failed to read enough values: got %v, exp %v\", i, len(values))\n\t}\n}\n\nfunc Test_IntegerEncoder_CounterRLE(t *testing.T) {\n\tenc := NewIntegerEncoder(16)\n\tvalues := []int64{\n\t\t1e15, 1e15 + 1, 1e15 + 2, 1e15 + 3, 1e15 + 4, 1e15 + 5,\n\t}\n\n\tfor _, v := range values {\n\t\tenc.Write(v)\n\t}\n\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif b[0]>>4 != intCompressedRLE {\n\t\tt.Fatalf(\"unexpected encoding format: expected RLE, got %v\", b[0]>>4)\n\t}\n\n\t// Should use 1 header byte, 8 byte first value, 1 var-byte for delta and 1 var-byte for\n\t// count of deltas in this particular RLE.\n\tif exp := 11; len(b) != exp {\n\t\tt.Fatalf(\"encoded length mismatch: got %v, exp %v\", len(b), exp)\n\t}\n\n\tvar dec IntegerDecoder\n\tdec.SetBytes(b)\n\ti := 0\n\tfor dec.Next() {\n\t\tif i > len(values) {\n\t\t\tt.Fatalf(\"read too many values: got %v, exp %v\", i, len(values))\n\t\t}\n\n\t\tif values[i] != dec.Read() {\n\t\t\tt.Fatalf(\"read value %d mismatch: got %v, exp %v\", i, dec.Read(), values[i])\n\t\t}\n\t\ti += 1\n\t}\n\n\tif i != len(values) {\n\t\tt.Fatalf(\"failed to read enough values: got %v, exp %v\", i, len(values))\n\t}\n}\n\nfunc Test_IntegerEncoder_Descending(t *testing.T) {\n\tenc := NewIntegerEncoder(16)\n\tvalues := []int64{\n\t\t7094, 4472, 1850,\n\t}\n\n\tfor _, v := range values {\n\t\tenc.Write(v)\n\t}\n\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif b[0]>>4 != intCompressedRLE {\n\t\tt.Fatalf(\"unexpected encoding format: expected simple, got %v\", b[0]>>4)\n\t}\n\n\t// Should use 1 header byte, 8 byte first value, 1 var-byte for delta and 1 var-byte for\n\t// count of deltas in this particular RLE.\n\tif exp := 12; len(b) != exp {\n\t\tt.Fatalf(\"encoded length mismatch: got %v, exp %v\", len(b), exp)\n\t}\n\n\tvar dec IntegerDecoder\n\tdec.SetBytes(b)\n\ti := 0\n\tfor dec.Next() {\n\t\tif i > len(values) {\n\t\t\tt.Fatalf(\"read too many values: got %v, exp %v\", i, len(values))\n\t\t}\n\n\t\tif values[i] != dec.Read() {\n\t\t\tt.Fatalf(\"read value %d mismatch: got %v, exp %v\", i, dec.Read(), values[i])\n\t\t}\n\t\ti += 1\n\t}\n\n\tif i != len(values) {\n\t\tt.Fatalf(\"failed to read enough values: got %v, exp %v\", i, len(values))\n\t}\n}\n\nfunc Test_IntegerEncoder_Flat(t *testing.T) {\n\tenc := NewIntegerEncoder(16)\n\tvalues := []int64{\n\t\t1, 1, 1, 1,\n\t}\n\n\tfor _, v := range values {\n\t\tenc.Write(v)\n\t}\n\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif b[0]>>4 != intCompressedRLE {\n\t\tt.Fatalf(\"unexpected encoding format: expected simple, got %v\", b[0]>>4)\n\t}\n\n\t// Should use 1 header byte, 8 byte first value, 1 var-byte for delta and 1 var-byte for\n\t// count of deltas in this particular RLE.\n\tif exp := 11; len(b) != exp {\n\t\tt.Fatalf(\"encoded length mismatch: got %v, exp %v\", len(b), exp)\n\t}\n\n\tvar dec IntegerDecoder\n\tdec.SetBytes(b)\n\ti := 0\n\tfor dec.Next() {\n\t\tif i > len(values) {\n\t\t\tt.Fatalf(\"read too many values: got %v, exp %v\", i, len(values))\n\t\t}\n\n\t\tif values[i] != dec.Read() {\n\t\t\tt.Fatalf(\"read value %d mismatch: got %v, exp %v\", i, dec.Read(), values[i])\n\t\t}\n\t\ti += 1\n\t}\n\n\tif i != len(values) {\n\t\tt.Fatalf(\"failed to read enough values: got %v, exp %v\", i, len(values))\n\t}\n}\n\nfunc Test_IntegerEncoder_MinMax(t *testing.T) {\n\tenc := NewIntegerEncoder(2)\n\tvalues := []int64{\n\t\tmath.MinInt64, math.MaxInt64,\n\t}\n\n\tfor _, v := range values {\n\t\tenc.Write(v)\n\t}\n\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif b[0]>>4 != intUncompressed {\n\t\tt.Fatalf(\"unexpected encoding format: expected simple, got %v\", b[0]>>4)\n\t}\n\n\tif exp := 17; len(b) != exp {\n\t\tt.Fatalf(\"encoded length mismatch: got %v, exp %v\", len(b), exp)\n\t}\n\n\tvar dec IntegerDecoder\n\tdec.SetBytes(b)\n\ti := 0\n\tfor dec.Next() {\n\t\tif i > len(values) {\n\t\t\tt.Fatalf(\"read too many values: got %v, exp %v\", i, len(values))\n\t\t}\n\n\t\tif values[i] != dec.Read() {\n\t\t\tt.Fatalf(\"read value %d mismatch: got %v, exp %v\", i, dec.Read(), values[i])\n\t\t}\n\t\ti += 1\n\t}\n\n\tif i != len(values) {\n\t\tt.Fatalf(\"failed to read enough values: got %v, exp %v\", i, len(values))\n\t}\n}\n\nfunc Test_IntegerEncoder_Quick(t *testing.T) {\n\tquick.Check(func(values []int64) bool {\n\t\texpected := values\n\t\tif values == nil {\n\t\t\texpected = []int64{} // is this really expected?\n\t\t}\n\n\t\t// Write values to encoder.\n\t\tenc := NewIntegerEncoder(1024)\n\t\tfor _, v := range values {\n\t\t\tenc.Write(v)\n\t\t}\n\n\t\t// Retrieve encoded bytes from encoder.\n\t\tbuf, err := enc.Bytes()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\t// Read values out of decoder.\n\t\tgot := make([]int64, 0, len(values))\n\t\tvar dec IntegerDecoder\n\t\tdec.SetBytes(buf)\n\t\tfor dec.Next() {\n\t\t\tif err := dec.Error(); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tgot = append(got, dec.Read())\n\t\t}\n\n\t\t// Verify that input and output values match.\n\t\tif !reflect.DeepEqual(expected, got) {\n\t\t\tt.Fatalf(\"mismatch:\\n\\nexp=%#v\\n\\ngot=%#v\\n\\n\", expected, got)\n\t\t}\n\n\t\treturn true\n\t}, nil)\n}\n\nfunc Test_IntegerDecoder_Corrupt(t *testing.T) {\n\tcases := []string{\n\t\t\"\",                     // Empty\n\t\t\"\\x00abc\",              // Uncompressed: less than 8 bytes\n\t\t\"\\x10abc\",              // Packed: less than 8 bytes\n\t\t\"\\x20abc\",              // RLE: less than 8 bytes\n\t\t\"\\x2012345678\\x90\",     // RLE: valid starting value but invalid delta value\n\t\t\"\\x2012345678\\x01\\x90\", // RLE: valid starting, valid delta value, invalid repeat value\n\t}\n\n\tfor _, c := range cases {\n\t\tvar dec IntegerDecoder\n\t\tdec.SetBytes([]byte(c))\n\t\tif dec.Next() {\n\t\t\tt.Fatalf(\"exp next == false, got true\")\n\t\t}\n\t}\n}\n\nfunc BenchmarkIntegerEncoderRLE(b *testing.B) {\n\tenc := NewIntegerEncoder(1024)\n\tx := make([]int64, 1024)\n\tfor i := 0; i < len(x); i++ {\n\t\tx[i] = int64(i)\n\t\tenc.Write(x[i])\n\t}\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tenc.Bytes()\n\t}\n}\n\nfunc BenchmarkIntegerEncoderPackedSimple(b *testing.B) {\n\tenc := NewIntegerEncoder(1024)\n\tx := make([]int64, 1024)\n\tfor i := 0; i < len(x); i++ {\n\t\t// Small amount of randomness prevents RLE from being used\n\t\tx[i] = int64(i) + int64(rand.Intn(10))\n\t\tenc.Write(x[i])\n\t}\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tenc.Bytes()\n\t\tenc.Reset()\n\t\tfor i := 0; i < len(x); i++ {\n\t\t\tenc.Write(x[i])\n\t\t}\n\t}\n}\n\nfunc BenchmarkIntegerDecoderPackedSimple(b *testing.B) {\n\tx := make([]int64, 1024)\n\tenc := NewIntegerEncoder(1024)\n\tfor i := 0; i < len(x); i++ {\n\t\t// Small amount of randomness prevents RLE from being used\n\t\tx[i] = int64(i) + int64(rand.Intn(10))\n\t\tenc.Write(x[i])\n\t}\n\tbytes, _ := enc.Bytes()\n\n\tb.ResetTimer()\n\n\tvar dec IntegerDecoder\n\tfor i := 0; i < b.N; i++ {\n\t\tdec.SetBytes(bytes)\n\t\tfor dec.Next() {\n\t\t}\n\t}\n}\n\nfunc BenchmarkIntegerDecoderRLE(b *testing.B) {\n\tx := make([]int64, 1024)\n\tenc := NewIntegerEncoder(1024)\n\tfor i := 0; i < len(x); i++ {\n\t\tx[i] = int64(i)\n\t\tenc.Write(x[i])\n\t}\n\tbytes, _ := enc.Bytes()\n\n\tb.ResetTimer()\n\n\tvar dec IntegerDecoder\n\tdec.SetBytes(bytes)\n\n\tfor i := 0; i < b.N; i++ {\n\t\tdec.SetBytes(bytes)\n\t\tfor dec.Next() {\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.gen.go",
    "content": "// Generated by tmpl\n// https://github.com/benbjohnson/tmpl\n//\n// DO NOT EDIT!\n// Source: iterator.gen.go.tmpl\n\npackage tsm1\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"sort\"\n\t\"sync\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n\t\"github.com/uber-go/zap\"\n)\n\ntype cursor interface {\n\tclose() error\n\tnext() (t int64, v interface{})\n}\n\n// cursorAt provides a bufferred cursor interface.\n// This required for literal value cursors which don't have a time value.\ntype cursorAt interface {\n\tclose() error\n\tpeek() (k int64, v interface{})\n\tnextAt(seek int64) interface{}\n}\n\ntype nilCursor struct{}\n\nfunc (nilCursor) next() (int64, interface{}) { return tsdb.EOF, nil }\n\n// bufCursor implements a bufferred cursor.\ntype bufCursor struct {\n\tcur cursor\n\tbuf struct {\n\t\tkey    int64\n\t\tvalue  interface{}\n\t\tfilled bool\n\t}\n\tascending bool\n}\n\n// newBufCursor returns a bufferred wrapper for cur.\nfunc newBufCursor(cur cursor, ascending bool) *bufCursor {\n\treturn &bufCursor{cur: cur, ascending: ascending}\n}\n\nfunc (c *bufCursor) close() error {\n\terr := c.cur.close()\n\tc.cur = nil\n\treturn err\n}\n\n// next returns the buffer, if filled. Otherwise returns the next key/value from the cursor.\nfunc (c *bufCursor) next() (int64, interface{}) {\n\tif c.buf.filled {\n\t\tk, v := c.buf.key, c.buf.value\n\t\tc.buf.filled = false\n\t\treturn k, v\n\t}\n\treturn c.cur.next()\n}\n\n// unread pushes k and v onto the buffer.\nfunc (c *bufCursor) unread(k int64, v interface{}) {\n\tc.buf.key, c.buf.value = k, v\n\tc.buf.filled = true\n}\n\n// peek reads next next key/value without removing them from the cursor.\nfunc (c *bufCursor) peek() (k int64, v interface{}) {\n\tk, v = c.next()\n\tc.unread(k, v)\n\treturn\n}\n\n// nextAt returns the next value where key is equal to seek.\n// Skips over any keys that are less than seek.\n// If the key doesn't exist then a nil value is returned instead.\nfunc (c *bufCursor) nextAt(seek int64) interface{} {\n\tfor {\n\t\tk, v := c.next()\n\t\tif k != tsdb.EOF {\n\t\t\tif k == seek {\n\t\t\t\treturn v\n\t\t\t} else if c.ascending && k < seek {\n\t\t\t\tcontinue\n\t\t\t} else if !c.ascending && k > seek {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.unread(k, v)\n\t\t}\n\n\t\t// Return \"nil\" value for type.\n\t\tswitch c.cur.(type) {\n\t\tcase floatCursor:\n\t\t\treturn (*float64)(nil)\n\t\tcase integerCursor:\n\t\t\treturn (*int64)(nil)\n\t\tcase stringCursor:\n\t\t\treturn (*string)(nil)\n\t\tcase booleanCursor:\n\t\t\treturn (*bool)(nil)\n\t\tdefault:\n\t\t\tpanic(\"unreachable\")\n\t\t}\n\t}\n}\n\n// statsBufferCopyIntervalN is the number of points that are read before\n// copying the stats buffer to the iterator's stats field. This is used to\n// amortize the cost of using a mutex when updating stats.\nconst statsBufferCopyIntervalN = 100\n\ntype floatFinalizerIterator struct {\n\tinfluxql.FloatIterator\n\tlogger zap.Logger\n}\n\nfunc newFloatFinalizerIterator(inner influxql.FloatIterator, logger zap.Logger) *floatFinalizerIterator {\n\titr := &floatFinalizerIterator{FloatIterator: inner, logger: logger}\n\truntime.SetFinalizer(itr, (*floatFinalizerIterator).closeGC)\n\treturn itr\n}\n\nfunc (itr *floatFinalizerIterator) closeGC() {\n\truntime.SetFinalizer(itr, nil)\n\titr.logger.Error(\"FloatIterator finalized by GC\")\n\titr.Close()\n}\n\nfunc (itr *floatFinalizerIterator) Close() error {\n\truntime.SetFinalizer(itr, nil)\n\treturn itr.FloatIterator.Close()\n}\n\ntype floatIterator struct {\n\tcur   floatCursor\n\taux   []cursorAt\n\tconds struct {\n\t\tnames []string\n\t\tcurs  []cursorAt\n\t}\n\topt influxql.IteratorOptions\n\n\tm     map[string]interface{} // map used for condition evaluation\n\tpoint influxql.FloatPoint    // reusable buffer\n\n\tstatsLock sync.Mutex\n\tstats     influxql.IteratorStats\n\tstatsBuf  influxql.IteratorStats\n}\n\nfunc newFloatIterator(name string, tags influxql.Tags, opt influxql.IteratorOptions, cur floatCursor, aux []cursorAt, conds []cursorAt, condNames []string) *floatIterator {\n\titr := &floatIterator{\n\t\tcur: cur,\n\t\taux: aux,\n\t\topt: opt,\n\t\tpoint: influxql.FloatPoint{\n\t\t\tName: name,\n\t\t\tTags: tags,\n\t\t},\n\t\tstatsBuf: influxql.IteratorStats{\n\t\t\tSeriesN: 1,\n\t\t},\n\t}\n\titr.stats = itr.statsBuf\n\n\tif len(aux) > 0 {\n\t\titr.point.Aux = make([]interface{}, len(aux))\n\t}\n\n\tif opt.Condition != nil {\n\t\titr.m = make(map[string]interface{}, len(aux)+len(conds))\n\t}\n\titr.conds.names = condNames\n\titr.conds.curs = conds\n\n\treturn itr\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *floatIterator) Next() (*influxql.FloatPoint, error) {\n\tfor {\n\t\tseek := tsdb.EOF\n\n\t\tif itr.cur != nil {\n\t\t\t// Read from the main cursor if we have one.\n\t\t\titr.point.Time, itr.point.Value = itr.cur.nextFloat()\n\t\t\tseek = itr.point.Time\n\t\t} else {\n\t\t\t// Otherwise find lowest aux timestamp.\n\t\t\tfor i := range itr.aux {\n\t\t\t\tif k, _ := itr.aux[i].peek(); k != tsdb.EOF {\n\t\t\t\t\tif seek == tsdb.EOF || (itr.opt.Ascending && k < seek) || (!itr.opt.Ascending && k > seek) {\n\t\t\t\t\t\tseek = k\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\titr.point.Time = seek\n\t\t}\n\n\t\t// Exit if we have no more points or we are outside our time range.\n\t\tif itr.point.Time == tsdb.EOF {\n\t\t\titr.copyStats()\n\t\t\treturn nil, nil\n\t\t} else if itr.opt.Ascending && itr.point.Time > itr.opt.EndTime {\n\t\t\titr.copyStats()\n\t\t\treturn nil, nil\n\t\t} else if !itr.opt.Ascending && itr.point.Time < itr.opt.StartTime {\n\t\t\titr.copyStats()\n\t\t\treturn nil, nil\n\t\t}\n\n\t\t// Read from each auxiliary cursor.\n\t\tfor i := range itr.opt.Aux {\n\t\t\titr.point.Aux[i] = itr.aux[i].nextAt(seek)\n\t\t}\n\n\t\t// Read from condition field cursors.\n\t\tfor i := range itr.conds.curs {\n\t\t\titr.m[itr.conds.names[i]] = itr.conds.curs[i].nextAt(seek)\n\t\t}\n\n\t\t// Evaluate condition, if one exists. Retry if it fails.\n\t\tif itr.opt.Condition != nil && !influxql.EvalBool(itr.opt.Condition, itr.m) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Track points returned.\n\t\titr.statsBuf.PointN++\n\n\t\t// Copy buffer to stats periodically.\n\t\tif itr.statsBuf.PointN%statsBufferCopyIntervalN == 0 {\n\t\t\titr.copyStats()\n\t\t}\n\n\t\treturn &itr.point, nil\n\t}\n}\n\n// copyStats copies from the itr stats buffer to the stats under lock.\nfunc (itr *floatIterator) copyStats() {\n\titr.statsLock.Lock()\n\titr.stats = itr.statsBuf\n\titr.statsLock.Unlock()\n}\n\n// Stats returns stats on the points processed.\nfunc (itr *floatIterator) Stats() influxql.IteratorStats {\n\titr.statsLock.Lock()\n\tstats := itr.stats\n\titr.statsLock.Unlock()\n\treturn stats\n}\n\n// Close closes the iterator.\nfunc (itr *floatIterator) Close() error {\n\tcursorsAt(itr.aux).close()\n\titr.aux = nil\n\tcursorsAt(itr.conds.curs).close()\n\titr.conds.curs = nil\n\tif itr.cur != nil {\n\t\terr := itr.cur.close()\n\t\titr.cur = nil\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// floatLimitIterator\ntype floatLimitIterator struct {\n\tinput influxql.FloatIterator\n\topt   influxql.IteratorOptions\n\tn     int\n}\n\nfunc newFloatLimitIterator(input influxql.FloatIterator, opt influxql.IteratorOptions) *floatLimitIterator {\n\treturn &floatLimitIterator{\n\t\tinput: input,\n\t\topt:   opt,\n\t}\n}\n\nfunc (itr *floatLimitIterator) Stats() influxql.IteratorStats { return itr.input.Stats() }\nfunc (itr *floatLimitIterator) Close() error                  { return itr.input.Close() }\n\nfunc (itr *floatLimitIterator) Next() (*influxql.FloatPoint, error) {\n\t// Check if we are beyond the limit.\n\tif (itr.n - itr.opt.Offset) > itr.opt.Limit {\n\t\treturn nil, nil\n\t}\n\n\t// Read the next point.\n\tp, err := itr.input.Next()\n\tif p == nil || err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Increment counter.\n\titr.n++\n\n\t// Offsets are handled by a higher level iterator so return all points.\n\treturn p, nil\n}\n\n// floatCursor represents an object for iterating over a single float field.\ntype floatCursor interface {\n\tcursor\n\tnextFloat() (t int64, v float64)\n}\n\nfunc newFloatCursor(seek int64, ascending bool, cacheValues Values, tsmKeyCursor *KeyCursor) floatCursor {\n\tif ascending {\n\t\treturn newFloatAscendingCursor(seek, cacheValues, tsmKeyCursor)\n\t}\n\treturn newFloatDescendingCursor(seek, cacheValues, tsmKeyCursor)\n}\n\ntype floatAscendingCursor struct {\n\tcache struct {\n\t\tvalues Values\n\t\tpos    int\n\t}\n\n\ttsm struct {\n\t\tbuf       []FloatValue\n\t\tvalues    []FloatValue\n\t\tpos       int\n\t\tkeyCursor *KeyCursor\n\t}\n}\n\nfunc newFloatAscendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *floatAscendingCursor {\n\tc := &floatAscendingCursor{}\n\n\tc.cache.values = cacheValues\n\tc.cache.pos = sort.Search(len(c.cache.values), func(i int) bool {\n\t\treturn c.cache.values[i].UnixNano() >= seek\n\t})\n\n\tc.tsm.keyCursor = tsmKeyCursor\n\tc.tsm.buf = make([]FloatValue, 10)\n\tc.tsm.values, _ = c.tsm.keyCursor.ReadFloatBlock(&c.tsm.buf)\n\tc.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool {\n\t\treturn c.tsm.values[i].UnixNano() >= seek\n\t})\n\n\treturn c\n}\n\n// peekCache returns the current time/value from the cache.\nfunc (c *floatAscendingCursor) peekCache() (t int64, v float64) {\n\tif c.cache.pos >= len(c.cache.values) {\n\t\treturn tsdb.EOF, 0\n\t}\n\n\titem := c.cache.values[c.cache.pos]\n\treturn item.UnixNano(), item.(FloatValue).value\n}\n\n// peekTSM returns the current time/value from tsm.\nfunc (c *floatAscendingCursor) peekTSM() (t int64, v float64) {\n\tif c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) {\n\t\treturn tsdb.EOF, 0\n\t}\n\n\titem := c.tsm.values[c.tsm.pos]\n\treturn item.UnixNano(), item.value\n}\n\n// close closes the cursor and any dependent cursors.\nfunc (c *floatAscendingCursor) close() error {\n\tc.tsm.keyCursor.Close()\n\tc.tsm.keyCursor = nil\n\tc.tsm.buf = nil\n\tc.cache.values = nil\n\tc.tsm.values = nil\n\treturn nil\n}\n\n// next returns the next key/value for the cursor.\nfunc (c *floatAscendingCursor) next() (int64, interface{}) { return c.nextFloat() }\n\n// nextFloat returns the next key/value for the cursor.\nfunc (c *floatAscendingCursor) nextFloat() (int64, float64) {\n\tckey, cvalue := c.peekCache()\n\ttkey, tvalue := c.peekTSM()\n\n\t// No more data in cache or in TSM files.\n\tif ckey == tsdb.EOF && tkey == tsdb.EOF {\n\t\treturn tsdb.EOF, 0\n\t}\n\n\t// Both cache and tsm files have the same key, cache takes precedence.\n\tif ckey == tkey {\n\t\tc.nextCache()\n\t\tc.nextTSM()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered cache key precedes that in TSM file.\n\tif ckey != tsdb.EOF && (ckey < tkey || tkey == tsdb.EOF) {\n\t\tc.nextCache()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered TSM key precedes that in cache.\n\tc.nextTSM()\n\treturn tkey, tvalue\n}\n\n// nextCache returns the next value from the cache.\nfunc (c *floatAscendingCursor) nextCache() {\n\tif c.cache.pos >= len(c.cache.values) {\n\t\treturn\n\t}\n\tc.cache.pos++\n}\n\n// nextTSM returns the next value from the TSM files.\nfunc (c *floatAscendingCursor) nextTSM() {\n\tc.tsm.pos++\n\tif c.tsm.pos >= len(c.tsm.values) {\n\t\tc.tsm.keyCursor.Next()\n\t\tc.tsm.values, _ = c.tsm.keyCursor.ReadFloatBlock(&c.tsm.buf)\n\t\tif len(c.tsm.values) == 0 {\n\t\t\treturn\n\t\t}\n\t\tc.tsm.pos = 0\n\t}\n}\n\ntype floatDescendingCursor struct {\n\tcache struct {\n\t\tvalues Values\n\t\tpos    int\n\t}\n\n\ttsm struct {\n\t\tbuf       []FloatValue\n\t\tvalues    []FloatValue\n\t\tpos       int\n\t\tkeyCursor *KeyCursor\n\t}\n}\n\nfunc newFloatDescendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *floatDescendingCursor {\n\tc := &floatDescendingCursor{}\n\n\tc.cache.values = cacheValues\n\tc.cache.pos = sort.Search(len(c.cache.values), func(i int) bool {\n\t\treturn c.cache.values[i].UnixNano() >= seek\n\t})\n\tif t, _ := c.peekCache(); t != seek {\n\t\tc.cache.pos--\n\t}\n\n\tc.tsm.keyCursor = tsmKeyCursor\n\tc.tsm.buf = make([]FloatValue, 10)\n\tc.tsm.values, _ = c.tsm.keyCursor.ReadFloatBlock(&c.tsm.buf)\n\tc.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool {\n\t\treturn c.tsm.values[i].UnixNano() >= seek\n\t})\n\tif t, _ := c.peekTSM(); t != seek {\n\t\tc.tsm.pos--\n\t}\n\n\treturn c\n}\n\n// peekCache returns the current time/value from the cache.\nfunc (c *floatDescendingCursor) peekCache() (t int64, v float64) {\n\tif c.cache.pos < 0 || c.cache.pos >= len(c.cache.values) {\n\t\treturn tsdb.EOF, 0\n\t}\n\n\titem := c.cache.values[c.cache.pos]\n\treturn item.UnixNano(), item.(FloatValue).value\n}\n\n// peekTSM returns the current time/value from tsm.\nfunc (c *floatDescendingCursor) peekTSM() (t int64, v float64) {\n\tif c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) {\n\t\treturn tsdb.EOF, 0\n\t}\n\n\titem := c.tsm.values[c.tsm.pos]\n\treturn item.UnixNano(), item.value\n}\n\n// close closes the cursor and any dependent cursors.\nfunc (c *floatDescendingCursor) close() error {\n\tc.tsm.keyCursor.Close()\n\tc.tsm.keyCursor = nil\n\tc.tsm.buf = nil\n\tc.cache.values = nil\n\tc.tsm.values = nil\n\treturn nil\n}\n\n// next returns the next key/value for the cursor.\nfunc (c *floatDescendingCursor) next() (int64, interface{}) { return c.nextFloat() }\n\n// nextFloat returns the next key/value for the cursor.\nfunc (c *floatDescendingCursor) nextFloat() (int64, float64) {\n\tckey, cvalue := c.peekCache()\n\ttkey, tvalue := c.peekTSM()\n\n\t// No more data in cache or in TSM files.\n\tif ckey == tsdb.EOF && tkey == tsdb.EOF {\n\t\treturn tsdb.EOF, 0\n\t}\n\n\t// Both cache and tsm files have the same key, cache takes precedence.\n\tif ckey == tkey {\n\t\tc.nextCache()\n\t\tc.nextTSM()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered cache key precedes that in TSM file.\n\tif ckey != tsdb.EOF && (ckey > tkey || tkey == tsdb.EOF) {\n\t\tc.nextCache()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered TSM key precedes that in cache.\n\tc.nextTSM()\n\treturn tkey, tvalue\n}\n\n// nextCache returns the next value from the cache.\nfunc (c *floatDescendingCursor) nextCache() {\n\tif c.cache.pos < 0 {\n\t\treturn\n\t}\n\tc.cache.pos--\n}\n\n// nextTSM returns the next value from the TSM files.\nfunc (c *floatDescendingCursor) nextTSM() {\n\tc.tsm.pos--\n\tif c.tsm.pos < 0 {\n\t\tc.tsm.keyCursor.Next()\n\t\tc.tsm.values, _ = c.tsm.keyCursor.ReadFloatBlock(&c.tsm.buf)\n\t\tif len(c.tsm.values) == 0 {\n\t\t\treturn\n\t\t}\n\t\tc.tsm.pos = len(c.tsm.values) - 1\n\t}\n}\n\n// floatLiteralCursor represents a cursor that always returns a single value.\n// It doesn't not have a time value so it can only be used with nextAt().\ntype floatLiteralCursor struct {\n\tvalue float64\n}\n\nfunc (c *floatLiteralCursor) close() error                   { return nil }\nfunc (c *floatLiteralCursor) peek() (t int64, v interface{}) { return tsdb.EOF, c.value }\nfunc (c *floatLiteralCursor) next() (t int64, v interface{}) { return tsdb.EOF, c.value }\nfunc (c *floatLiteralCursor) nextAt(seek int64) interface{}  { return c.value }\n\n// floatNilLiteralCursor represents a cursor that always returns a typed nil value.\n// It doesn't not have a time value so it can only be used with nextAt().\ntype floatNilLiteralCursor struct{}\n\nfunc (c *floatNilLiteralCursor) close() error                   { return nil }\nfunc (c *floatNilLiteralCursor) peek() (t int64, v interface{}) { return tsdb.EOF, (*float64)(nil) }\nfunc (c *floatNilLiteralCursor) next() (t int64, v interface{}) { return tsdb.EOF, (*float64)(nil) }\nfunc (c *floatNilLiteralCursor) nextAt(seek int64) interface{}  { return (*float64)(nil) }\n\ntype integerFinalizerIterator struct {\n\tinfluxql.IntegerIterator\n\tlogger zap.Logger\n}\n\nfunc newIntegerFinalizerIterator(inner influxql.IntegerIterator, logger zap.Logger) *integerFinalizerIterator {\n\titr := &integerFinalizerIterator{IntegerIterator: inner, logger: logger}\n\truntime.SetFinalizer(itr, (*integerFinalizerIterator).closeGC)\n\treturn itr\n}\n\nfunc (itr *integerFinalizerIterator) closeGC() {\n\truntime.SetFinalizer(itr, nil)\n\titr.logger.Error(\"IntegerIterator finalized by GC\")\n\titr.Close()\n}\n\nfunc (itr *integerFinalizerIterator) Close() error {\n\truntime.SetFinalizer(itr, nil)\n\treturn itr.IntegerIterator.Close()\n}\n\ntype integerIterator struct {\n\tcur   integerCursor\n\taux   []cursorAt\n\tconds struct {\n\t\tnames []string\n\t\tcurs  []cursorAt\n\t}\n\topt influxql.IteratorOptions\n\n\tm     map[string]interface{} // map used for condition evaluation\n\tpoint influxql.IntegerPoint  // reusable buffer\n\n\tstatsLock sync.Mutex\n\tstats     influxql.IteratorStats\n\tstatsBuf  influxql.IteratorStats\n}\n\nfunc newIntegerIterator(name string, tags influxql.Tags, opt influxql.IteratorOptions, cur integerCursor, aux []cursorAt, conds []cursorAt, condNames []string) *integerIterator {\n\titr := &integerIterator{\n\t\tcur: cur,\n\t\taux: aux,\n\t\topt: opt,\n\t\tpoint: influxql.IntegerPoint{\n\t\t\tName: name,\n\t\t\tTags: tags,\n\t\t},\n\t\tstatsBuf: influxql.IteratorStats{\n\t\t\tSeriesN: 1,\n\t\t},\n\t}\n\titr.stats = itr.statsBuf\n\n\tif len(aux) > 0 {\n\t\titr.point.Aux = make([]interface{}, len(aux))\n\t}\n\n\tif opt.Condition != nil {\n\t\titr.m = make(map[string]interface{}, len(aux)+len(conds))\n\t}\n\titr.conds.names = condNames\n\titr.conds.curs = conds\n\n\treturn itr\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *integerIterator) Next() (*influxql.IntegerPoint, error) {\n\tfor {\n\t\tseek := tsdb.EOF\n\n\t\tif itr.cur != nil {\n\t\t\t// Read from the main cursor if we have one.\n\t\t\titr.point.Time, itr.point.Value = itr.cur.nextInteger()\n\t\t\tseek = itr.point.Time\n\t\t} else {\n\t\t\t// Otherwise find lowest aux timestamp.\n\t\t\tfor i := range itr.aux {\n\t\t\t\tif k, _ := itr.aux[i].peek(); k != tsdb.EOF {\n\t\t\t\t\tif seek == tsdb.EOF || (itr.opt.Ascending && k < seek) || (!itr.opt.Ascending && k > seek) {\n\t\t\t\t\t\tseek = k\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\titr.point.Time = seek\n\t\t}\n\n\t\t// Exit if we have no more points or we are outside our time range.\n\t\tif itr.point.Time == tsdb.EOF {\n\t\t\titr.copyStats()\n\t\t\treturn nil, nil\n\t\t} else if itr.opt.Ascending && itr.point.Time > itr.opt.EndTime {\n\t\t\titr.copyStats()\n\t\t\treturn nil, nil\n\t\t} else if !itr.opt.Ascending && itr.point.Time < itr.opt.StartTime {\n\t\t\titr.copyStats()\n\t\t\treturn nil, nil\n\t\t}\n\n\t\t// Read from each auxiliary cursor.\n\t\tfor i := range itr.opt.Aux {\n\t\t\titr.point.Aux[i] = itr.aux[i].nextAt(seek)\n\t\t}\n\n\t\t// Read from condition field cursors.\n\t\tfor i := range itr.conds.curs {\n\t\t\titr.m[itr.conds.names[i]] = itr.conds.curs[i].nextAt(seek)\n\t\t}\n\n\t\t// Evaluate condition, if one exists. Retry if it fails.\n\t\tif itr.opt.Condition != nil && !influxql.EvalBool(itr.opt.Condition, itr.m) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Track points returned.\n\t\titr.statsBuf.PointN++\n\n\t\t// Copy buffer to stats periodically.\n\t\tif itr.statsBuf.PointN%statsBufferCopyIntervalN == 0 {\n\t\t\titr.copyStats()\n\t\t}\n\n\t\treturn &itr.point, nil\n\t}\n}\n\n// copyStats copies from the itr stats buffer to the stats under lock.\nfunc (itr *integerIterator) copyStats() {\n\titr.statsLock.Lock()\n\titr.stats = itr.statsBuf\n\titr.statsLock.Unlock()\n}\n\n// Stats returns stats on the points processed.\nfunc (itr *integerIterator) Stats() influxql.IteratorStats {\n\titr.statsLock.Lock()\n\tstats := itr.stats\n\titr.statsLock.Unlock()\n\treturn stats\n}\n\n// Close closes the iterator.\nfunc (itr *integerIterator) Close() error {\n\tcursorsAt(itr.aux).close()\n\titr.aux = nil\n\tcursorsAt(itr.conds.curs).close()\n\titr.conds.curs = nil\n\tif itr.cur != nil {\n\t\terr := itr.cur.close()\n\t\titr.cur = nil\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// integerLimitIterator\ntype integerLimitIterator struct {\n\tinput influxql.IntegerIterator\n\topt   influxql.IteratorOptions\n\tn     int\n}\n\nfunc newIntegerLimitIterator(input influxql.IntegerIterator, opt influxql.IteratorOptions) *integerLimitIterator {\n\treturn &integerLimitIterator{\n\t\tinput: input,\n\t\topt:   opt,\n\t}\n}\n\nfunc (itr *integerLimitIterator) Stats() influxql.IteratorStats { return itr.input.Stats() }\nfunc (itr *integerLimitIterator) Close() error                  { return itr.input.Close() }\n\nfunc (itr *integerLimitIterator) Next() (*influxql.IntegerPoint, error) {\n\t// Check if we are beyond the limit.\n\tif (itr.n - itr.opt.Offset) > itr.opt.Limit {\n\t\treturn nil, nil\n\t}\n\n\t// Read the next point.\n\tp, err := itr.input.Next()\n\tif p == nil || err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Increment counter.\n\titr.n++\n\n\t// Offsets are handled by a higher level iterator so return all points.\n\treturn p, nil\n}\n\n// integerCursor represents an object for iterating over a single integer field.\ntype integerCursor interface {\n\tcursor\n\tnextInteger() (t int64, v int64)\n}\n\nfunc newIntegerCursor(seek int64, ascending bool, cacheValues Values, tsmKeyCursor *KeyCursor) integerCursor {\n\tif ascending {\n\t\treturn newIntegerAscendingCursor(seek, cacheValues, tsmKeyCursor)\n\t}\n\treturn newIntegerDescendingCursor(seek, cacheValues, tsmKeyCursor)\n}\n\ntype integerAscendingCursor struct {\n\tcache struct {\n\t\tvalues Values\n\t\tpos    int\n\t}\n\n\ttsm struct {\n\t\tbuf       []IntegerValue\n\t\tvalues    []IntegerValue\n\t\tpos       int\n\t\tkeyCursor *KeyCursor\n\t}\n}\n\nfunc newIntegerAscendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *integerAscendingCursor {\n\tc := &integerAscendingCursor{}\n\n\tc.cache.values = cacheValues\n\tc.cache.pos = sort.Search(len(c.cache.values), func(i int) bool {\n\t\treturn c.cache.values[i].UnixNano() >= seek\n\t})\n\n\tc.tsm.keyCursor = tsmKeyCursor\n\tc.tsm.buf = make([]IntegerValue, 10)\n\tc.tsm.values, _ = c.tsm.keyCursor.ReadIntegerBlock(&c.tsm.buf)\n\tc.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool {\n\t\treturn c.tsm.values[i].UnixNano() >= seek\n\t})\n\n\treturn c\n}\n\n// peekCache returns the current time/value from the cache.\nfunc (c *integerAscendingCursor) peekCache() (t int64, v int64) {\n\tif c.cache.pos >= len(c.cache.values) {\n\t\treturn tsdb.EOF, 0\n\t}\n\n\titem := c.cache.values[c.cache.pos]\n\treturn item.UnixNano(), item.(IntegerValue).value\n}\n\n// peekTSM returns the current time/value from tsm.\nfunc (c *integerAscendingCursor) peekTSM() (t int64, v int64) {\n\tif c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) {\n\t\treturn tsdb.EOF, 0\n\t}\n\n\titem := c.tsm.values[c.tsm.pos]\n\treturn item.UnixNano(), item.value\n}\n\n// close closes the cursor and any dependent cursors.\nfunc (c *integerAscendingCursor) close() error {\n\tc.tsm.keyCursor.Close()\n\tc.tsm.keyCursor = nil\n\tc.tsm.buf = nil\n\tc.cache.values = nil\n\tc.tsm.values = nil\n\treturn nil\n}\n\n// next returns the next key/value for the cursor.\nfunc (c *integerAscendingCursor) next() (int64, interface{}) { return c.nextInteger() }\n\n// nextInteger returns the next key/value for the cursor.\nfunc (c *integerAscendingCursor) nextInteger() (int64, int64) {\n\tckey, cvalue := c.peekCache()\n\ttkey, tvalue := c.peekTSM()\n\n\t// No more data in cache or in TSM files.\n\tif ckey == tsdb.EOF && tkey == tsdb.EOF {\n\t\treturn tsdb.EOF, 0\n\t}\n\n\t// Both cache and tsm files have the same key, cache takes precedence.\n\tif ckey == tkey {\n\t\tc.nextCache()\n\t\tc.nextTSM()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered cache key precedes that in TSM file.\n\tif ckey != tsdb.EOF && (ckey < tkey || tkey == tsdb.EOF) {\n\t\tc.nextCache()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered TSM key precedes that in cache.\n\tc.nextTSM()\n\treturn tkey, tvalue\n}\n\n// nextCache returns the next value from the cache.\nfunc (c *integerAscendingCursor) nextCache() {\n\tif c.cache.pos >= len(c.cache.values) {\n\t\treturn\n\t}\n\tc.cache.pos++\n}\n\n// nextTSM returns the next value from the TSM files.\nfunc (c *integerAscendingCursor) nextTSM() {\n\tc.tsm.pos++\n\tif c.tsm.pos >= len(c.tsm.values) {\n\t\tc.tsm.keyCursor.Next()\n\t\tc.tsm.values, _ = c.tsm.keyCursor.ReadIntegerBlock(&c.tsm.buf)\n\t\tif len(c.tsm.values) == 0 {\n\t\t\treturn\n\t\t}\n\t\tc.tsm.pos = 0\n\t}\n}\n\ntype integerDescendingCursor struct {\n\tcache struct {\n\t\tvalues Values\n\t\tpos    int\n\t}\n\n\ttsm struct {\n\t\tbuf       []IntegerValue\n\t\tvalues    []IntegerValue\n\t\tpos       int\n\t\tkeyCursor *KeyCursor\n\t}\n}\n\nfunc newIntegerDescendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *integerDescendingCursor {\n\tc := &integerDescendingCursor{}\n\n\tc.cache.values = cacheValues\n\tc.cache.pos = sort.Search(len(c.cache.values), func(i int) bool {\n\t\treturn c.cache.values[i].UnixNano() >= seek\n\t})\n\tif t, _ := c.peekCache(); t != seek {\n\t\tc.cache.pos--\n\t}\n\n\tc.tsm.keyCursor = tsmKeyCursor\n\tc.tsm.buf = make([]IntegerValue, 10)\n\tc.tsm.values, _ = c.tsm.keyCursor.ReadIntegerBlock(&c.tsm.buf)\n\tc.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool {\n\t\treturn c.tsm.values[i].UnixNano() >= seek\n\t})\n\tif t, _ := c.peekTSM(); t != seek {\n\t\tc.tsm.pos--\n\t}\n\n\treturn c\n}\n\n// peekCache returns the current time/value from the cache.\nfunc (c *integerDescendingCursor) peekCache() (t int64, v int64) {\n\tif c.cache.pos < 0 || c.cache.pos >= len(c.cache.values) {\n\t\treturn tsdb.EOF, 0\n\t}\n\n\titem := c.cache.values[c.cache.pos]\n\treturn item.UnixNano(), item.(IntegerValue).value\n}\n\n// peekTSM returns the current time/value from tsm.\nfunc (c *integerDescendingCursor) peekTSM() (t int64, v int64) {\n\tif c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) {\n\t\treturn tsdb.EOF, 0\n\t}\n\n\titem := c.tsm.values[c.tsm.pos]\n\treturn item.UnixNano(), item.value\n}\n\n// close closes the cursor and any dependent cursors.\nfunc (c *integerDescendingCursor) close() error {\n\tc.tsm.keyCursor.Close()\n\tc.tsm.keyCursor = nil\n\tc.tsm.buf = nil\n\tc.cache.values = nil\n\tc.tsm.values = nil\n\treturn nil\n}\n\n// next returns the next key/value for the cursor.\nfunc (c *integerDescendingCursor) next() (int64, interface{}) { return c.nextInteger() }\n\n// nextInteger returns the next key/value for the cursor.\nfunc (c *integerDescendingCursor) nextInteger() (int64, int64) {\n\tckey, cvalue := c.peekCache()\n\ttkey, tvalue := c.peekTSM()\n\n\t// No more data in cache or in TSM files.\n\tif ckey == tsdb.EOF && tkey == tsdb.EOF {\n\t\treturn tsdb.EOF, 0\n\t}\n\n\t// Both cache and tsm files have the same key, cache takes precedence.\n\tif ckey == tkey {\n\t\tc.nextCache()\n\t\tc.nextTSM()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered cache key precedes that in TSM file.\n\tif ckey != tsdb.EOF && (ckey > tkey || tkey == tsdb.EOF) {\n\t\tc.nextCache()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered TSM key precedes that in cache.\n\tc.nextTSM()\n\treturn tkey, tvalue\n}\n\n// nextCache returns the next value from the cache.\nfunc (c *integerDescendingCursor) nextCache() {\n\tif c.cache.pos < 0 {\n\t\treturn\n\t}\n\tc.cache.pos--\n}\n\n// nextTSM returns the next value from the TSM files.\nfunc (c *integerDescendingCursor) nextTSM() {\n\tc.tsm.pos--\n\tif c.tsm.pos < 0 {\n\t\tc.tsm.keyCursor.Next()\n\t\tc.tsm.values, _ = c.tsm.keyCursor.ReadIntegerBlock(&c.tsm.buf)\n\t\tif len(c.tsm.values) == 0 {\n\t\t\treturn\n\t\t}\n\t\tc.tsm.pos = len(c.tsm.values) - 1\n\t}\n}\n\n// integerLiteralCursor represents a cursor that always returns a single value.\n// It doesn't not have a time value so it can only be used with nextAt().\ntype integerLiteralCursor struct {\n\tvalue int64\n}\n\nfunc (c *integerLiteralCursor) close() error                   { return nil }\nfunc (c *integerLiteralCursor) peek() (t int64, v interface{}) { return tsdb.EOF, c.value }\nfunc (c *integerLiteralCursor) next() (t int64, v interface{}) { return tsdb.EOF, c.value }\nfunc (c *integerLiteralCursor) nextAt(seek int64) interface{}  { return c.value }\n\n// integerNilLiteralCursor represents a cursor that always returns a typed nil value.\n// It doesn't not have a time value so it can only be used with nextAt().\ntype integerNilLiteralCursor struct{}\n\nfunc (c *integerNilLiteralCursor) close() error                   { return nil }\nfunc (c *integerNilLiteralCursor) peek() (t int64, v interface{}) { return tsdb.EOF, (*int64)(nil) }\nfunc (c *integerNilLiteralCursor) next() (t int64, v interface{}) { return tsdb.EOF, (*int64)(nil) }\nfunc (c *integerNilLiteralCursor) nextAt(seek int64) interface{}  { return (*int64)(nil) }\n\ntype stringFinalizerIterator struct {\n\tinfluxql.StringIterator\n\tlogger zap.Logger\n}\n\nfunc newStringFinalizerIterator(inner influxql.StringIterator, logger zap.Logger) *stringFinalizerIterator {\n\titr := &stringFinalizerIterator{StringIterator: inner, logger: logger}\n\truntime.SetFinalizer(itr, (*stringFinalizerIterator).closeGC)\n\treturn itr\n}\n\nfunc (itr *stringFinalizerIterator) closeGC() {\n\truntime.SetFinalizer(itr, nil)\n\titr.logger.Error(\"StringIterator finalized by GC\")\n\titr.Close()\n}\n\nfunc (itr *stringFinalizerIterator) Close() error {\n\truntime.SetFinalizer(itr, nil)\n\treturn itr.StringIterator.Close()\n}\n\ntype stringIterator struct {\n\tcur   stringCursor\n\taux   []cursorAt\n\tconds struct {\n\t\tnames []string\n\t\tcurs  []cursorAt\n\t}\n\topt influxql.IteratorOptions\n\n\tm     map[string]interface{} // map used for condition evaluation\n\tpoint influxql.StringPoint   // reusable buffer\n\n\tstatsLock sync.Mutex\n\tstats     influxql.IteratorStats\n\tstatsBuf  influxql.IteratorStats\n}\n\nfunc newStringIterator(name string, tags influxql.Tags, opt influxql.IteratorOptions, cur stringCursor, aux []cursorAt, conds []cursorAt, condNames []string) *stringIterator {\n\titr := &stringIterator{\n\t\tcur: cur,\n\t\taux: aux,\n\t\topt: opt,\n\t\tpoint: influxql.StringPoint{\n\t\t\tName: name,\n\t\t\tTags: tags,\n\t\t},\n\t\tstatsBuf: influxql.IteratorStats{\n\t\t\tSeriesN: 1,\n\t\t},\n\t}\n\titr.stats = itr.statsBuf\n\n\tif len(aux) > 0 {\n\t\titr.point.Aux = make([]interface{}, len(aux))\n\t}\n\n\tif opt.Condition != nil {\n\t\titr.m = make(map[string]interface{}, len(aux)+len(conds))\n\t}\n\titr.conds.names = condNames\n\titr.conds.curs = conds\n\n\treturn itr\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *stringIterator) Next() (*influxql.StringPoint, error) {\n\tfor {\n\t\tseek := tsdb.EOF\n\n\t\tif itr.cur != nil {\n\t\t\t// Read from the main cursor if we have one.\n\t\t\titr.point.Time, itr.point.Value = itr.cur.nextString()\n\t\t\tseek = itr.point.Time\n\t\t} else {\n\t\t\t// Otherwise find lowest aux timestamp.\n\t\t\tfor i := range itr.aux {\n\t\t\t\tif k, _ := itr.aux[i].peek(); k != tsdb.EOF {\n\t\t\t\t\tif seek == tsdb.EOF || (itr.opt.Ascending && k < seek) || (!itr.opt.Ascending && k > seek) {\n\t\t\t\t\t\tseek = k\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\titr.point.Time = seek\n\t\t}\n\n\t\t// Exit if we have no more points or we are outside our time range.\n\t\tif itr.point.Time == tsdb.EOF {\n\t\t\titr.copyStats()\n\t\t\treturn nil, nil\n\t\t} else if itr.opt.Ascending && itr.point.Time > itr.opt.EndTime {\n\t\t\titr.copyStats()\n\t\t\treturn nil, nil\n\t\t} else if !itr.opt.Ascending && itr.point.Time < itr.opt.StartTime {\n\t\t\titr.copyStats()\n\t\t\treturn nil, nil\n\t\t}\n\n\t\t// Read from each auxiliary cursor.\n\t\tfor i := range itr.opt.Aux {\n\t\t\titr.point.Aux[i] = itr.aux[i].nextAt(seek)\n\t\t}\n\n\t\t// Read from condition field cursors.\n\t\tfor i := range itr.conds.curs {\n\t\t\titr.m[itr.conds.names[i]] = itr.conds.curs[i].nextAt(seek)\n\t\t}\n\n\t\t// Evaluate condition, if one exists. Retry if it fails.\n\t\tif itr.opt.Condition != nil && !influxql.EvalBool(itr.opt.Condition, itr.m) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Track points returned.\n\t\titr.statsBuf.PointN++\n\n\t\t// Copy buffer to stats periodically.\n\t\tif itr.statsBuf.PointN%statsBufferCopyIntervalN == 0 {\n\t\t\titr.copyStats()\n\t\t}\n\n\t\treturn &itr.point, nil\n\t}\n}\n\n// copyStats copies from the itr stats buffer to the stats under lock.\nfunc (itr *stringIterator) copyStats() {\n\titr.statsLock.Lock()\n\titr.stats = itr.statsBuf\n\titr.statsLock.Unlock()\n}\n\n// Stats returns stats on the points processed.\nfunc (itr *stringIterator) Stats() influxql.IteratorStats {\n\titr.statsLock.Lock()\n\tstats := itr.stats\n\titr.statsLock.Unlock()\n\treturn stats\n}\n\n// Close closes the iterator.\nfunc (itr *stringIterator) Close() error {\n\tcursorsAt(itr.aux).close()\n\titr.aux = nil\n\tcursorsAt(itr.conds.curs).close()\n\titr.conds.curs = nil\n\tif itr.cur != nil {\n\t\terr := itr.cur.close()\n\t\titr.cur = nil\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// stringLimitIterator\ntype stringLimitIterator struct {\n\tinput influxql.StringIterator\n\topt   influxql.IteratorOptions\n\tn     int\n}\n\nfunc newStringLimitIterator(input influxql.StringIterator, opt influxql.IteratorOptions) *stringLimitIterator {\n\treturn &stringLimitIterator{\n\t\tinput: input,\n\t\topt:   opt,\n\t}\n}\n\nfunc (itr *stringLimitIterator) Stats() influxql.IteratorStats { return itr.input.Stats() }\nfunc (itr *stringLimitIterator) Close() error                  { return itr.input.Close() }\n\nfunc (itr *stringLimitIterator) Next() (*influxql.StringPoint, error) {\n\t// Check if we are beyond the limit.\n\tif (itr.n - itr.opt.Offset) > itr.opt.Limit {\n\t\treturn nil, nil\n\t}\n\n\t// Read the next point.\n\tp, err := itr.input.Next()\n\tif p == nil || err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Increment counter.\n\titr.n++\n\n\t// Offsets are handled by a higher level iterator so return all points.\n\treturn p, nil\n}\n\n// stringCursor represents an object for iterating over a single string field.\ntype stringCursor interface {\n\tcursor\n\tnextString() (t int64, v string)\n}\n\nfunc newStringCursor(seek int64, ascending bool, cacheValues Values, tsmKeyCursor *KeyCursor) stringCursor {\n\tif ascending {\n\t\treturn newStringAscendingCursor(seek, cacheValues, tsmKeyCursor)\n\t}\n\treturn newStringDescendingCursor(seek, cacheValues, tsmKeyCursor)\n}\n\ntype stringAscendingCursor struct {\n\tcache struct {\n\t\tvalues Values\n\t\tpos    int\n\t}\n\n\ttsm struct {\n\t\tbuf       []StringValue\n\t\tvalues    []StringValue\n\t\tpos       int\n\t\tkeyCursor *KeyCursor\n\t}\n}\n\nfunc newStringAscendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *stringAscendingCursor {\n\tc := &stringAscendingCursor{}\n\n\tc.cache.values = cacheValues\n\tc.cache.pos = sort.Search(len(c.cache.values), func(i int) bool {\n\t\treturn c.cache.values[i].UnixNano() >= seek\n\t})\n\n\tc.tsm.keyCursor = tsmKeyCursor\n\tc.tsm.buf = make([]StringValue, 10)\n\tc.tsm.values, _ = c.tsm.keyCursor.ReadStringBlock(&c.tsm.buf)\n\tc.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool {\n\t\treturn c.tsm.values[i].UnixNano() >= seek\n\t})\n\n\treturn c\n}\n\n// peekCache returns the current time/value from the cache.\nfunc (c *stringAscendingCursor) peekCache() (t int64, v string) {\n\tif c.cache.pos >= len(c.cache.values) {\n\t\treturn tsdb.EOF, \"\"\n\t}\n\n\titem := c.cache.values[c.cache.pos]\n\treturn item.UnixNano(), item.(StringValue).value\n}\n\n// peekTSM returns the current time/value from tsm.\nfunc (c *stringAscendingCursor) peekTSM() (t int64, v string) {\n\tif c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) {\n\t\treturn tsdb.EOF, \"\"\n\t}\n\n\titem := c.tsm.values[c.tsm.pos]\n\treturn item.UnixNano(), item.value\n}\n\n// close closes the cursor and any dependent cursors.\nfunc (c *stringAscendingCursor) close() error {\n\tc.tsm.keyCursor.Close()\n\tc.tsm.keyCursor = nil\n\tc.tsm.buf = nil\n\tc.cache.values = nil\n\tc.tsm.values = nil\n\treturn nil\n}\n\n// next returns the next key/value for the cursor.\nfunc (c *stringAscendingCursor) next() (int64, interface{}) { return c.nextString() }\n\n// nextString returns the next key/value for the cursor.\nfunc (c *stringAscendingCursor) nextString() (int64, string) {\n\tckey, cvalue := c.peekCache()\n\ttkey, tvalue := c.peekTSM()\n\n\t// No more data in cache or in TSM files.\n\tif ckey == tsdb.EOF && tkey == tsdb.EOF {\n\t\treturn tsdb.EOF, \"\"\n\t}\n\n\t// Both cache and tsm files have the same key, cache takes precedence.\n\tif ckey == tkey {\n\t\tc.nextCache()\n\t\tc.nextTSM()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered cache key precedes that in TSM file.\n\tif ckey != tsdb.EOF && (ckey < tkey || tkey == tsdb.EOF) {\n\t\tc.nextCache()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered TSM key precedes that in cache.\n\tc.nextTSM()\n\treturn tkey, tvalue\n}\n\n// nextCache returns the next value from the cache.\nfunc (c *stringAscendingCursor) nextCache() {\n\tif c.cache.pos >= len(c.cache.values) {\n\t\treturn\n\t}\n\tc.cache.pos++\n}\n\n// nextTSM returns the next value from the TSM files.\nfunc (c *stringAscendingCursor) nextTSM() {\n\tc.tsm.pos++\n\tif c.tsm.pos >= len(c.tsm.values) {\n\t\tc.tsm.keyCursor.Next()\n\t\tc.tsm.values, _ = c.tsm.keyCursor.ReadStringBlock(&c.tsm.buf)\n\t\tif len(c.tsm.values) == 0 {\n\t\t\treturn\n\t\t}\n\t\tc.tsm.pos = 0\n\t}\n}\n\ntype stringDescendingCursor struct {\n\tcache struct {\n\t\tvalues Values\n\t\tpos    int\n\t}\n\n\ttsm struct {\n\t\tbuf       []StringValue\n\t\tvalues    []StringValue\n\t\tpos       int\n\t\tkeyCursor *KeyCursor\n\t}\n}\n\nfunc newStringDescendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *stringDescendingCursor {\n\tc := &stringDescendingCursor{}\n\n\tc.cache.values = cacheValues\n\tc.cache.pos = sort.Search(len(c.cache.values), func(i int) bool {\n\t\treturn c.cache.values[i].UnixNano() >= seek\n\t})\n\tif t, _ := c.peekCache(); t != seek {\n\t\tc.cache.pos--\n\t}\n\n\tc.tsm.keyCursor = tsmKeyCursor\n\tc.tsm.buf = make([]StringValue, 10)\n\tc.tsm.values, _ = c.tsm.keyCursor.ReadStringBlock(&c.tsm.buf)\n\tc.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool {\n\t\treturn c.tsm.values[i].UnixNano() >= seek\n\t})\n\tif t, _ := c.peekTSM(); t != seek {\n\t\tc.tsm.pos--\n\t}\n\n\treturn c\n}\n\n// peekCache returns the current time/value from the cache.\nfunc (c *stringDescendingCursor) peekCache() (t int64, v string) {\n\tif c.cache.pos < 0 || c.cache.pos >= len(c.cache.values) {\n\t\treturn tsdb.EOF, \"\"\n\t}\n\n\titem := c.cache.values[c.cache.pos]\n\treturn item.UnixNano(), item.(StringValue).value\n}\n\n// peekTSM returns the current time/value from tsm.\nfunc (c *stringDescendingCursor) peekTSM() (t int64, v string) {\n\tif c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) {\n\t\treturn tsdb.EOF, \"\"\n\t}\n\n\titem := c.tsm.values[c.tsm.pos]\n\treturn item.UnixNano(), item.value\n}\n\n// close closes the cursor and any dependent cursors.\nfunc (c *stringDescendingCursor) close() error {\n\tc.tsm.keyCursor.Close()\n\tc.tsm.keyCursor = nil\n\tc.tsm.buf = nil\n\tc.cache.values = nil\n\tc.tsm.values = nil\n\treturn nil\n}\n\n// next returns the next key/value for the cursor.\nfunc (c *stringDescendingCursor) next() (int64, interface{}) { return c.nextString() }\n\n// nextString returns the next key/value for the cursor.\nfunc (c *stringDescendingCursor) nextString() (int64, string) {\n\tckey, cvalue := c.peekCache()\n\ttkey, tvalue := c.peekTSM()\n\n\t// No more data in cache or in TSM files.\n\tif ckey == tsdb.EOF && tkey == tsdb.EOF {\n\t\treturn tsdb.EOF, \"\"\n\t}\n\n\t// Both cache and tsm files have the same key, cache takes precedence.\n\tif ckey == tkey {\n\t\tc.nextCache()\n\t\tc.nextTSM()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered cache key precedes that in TSM file.\n\tif ckey != tsdb.EOF && (ckey > tkey || tkey == tsdb.EOF) {\n\t\tc.nextCache()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered TSM key precedes that in cache.\n\tc.nextTSM()\n\treturn tkey, tvalue\n}\n\n// nextCache returns the next value from the cache.\nfunc (c *stringDescendingCursor) nextCache() {\n\tif c.cache.pos < 0 {\n\t\treturn\n\t}\n\tc.cache.pos--\n}\n\n// nextTSM returns the next value from the TSM files.\nfunc (c *stringDescendingCursor) nextTSM() {\n\tc.tsm.pos--\n\tif c.tsm.pos < 0 {\n\t\tc.tsm.keyCursor.Next()\n\t\tc.tsm.values, _ = c.tsm.keyCursor.ReadStringBlock(&c.tsm.buf)\n\t\tif len(c.tsm.values) == 0 {\n\t\t\treturn\n\t\t}\n\t\tc.tsm.pos = len(c.tsm.values) - 1\n\t}\n}\n\n// stringLiteralCursor represents a cursor that always returns a single value.\n// It doesn't not have a time value so it can only be used with nextAt().\ntype stringLiteralCursor struct {\n\tvalue string\n}\n\nfunc (c *stringLiteralCursor) close() error                   { return nil }\nfunc (c *stringLiteralCursor) peek() (t int64, v interface{}) { return tsdb.EOF, c.value }\nfunc (c *stringLiteralCursor) next() (t int64, v interface{}) { return tsdb.EOF, c.value }\nfunc (c *stringLiteralCursor) nextAt(seek int64) interface{}  { return c.value }\n\n// stringNilLiteralCursor represents a cursor that always returns a typed nil value.\n// It doesn't not have a time value so it can only be used with nextAt().\ntype stringNilLiteralCursor struct{}\n\nfunc (c *stringNilLiteralCursor) close() error                   { return nil }\nfunc (c *stringNilLiteralCursor) peek() (t int64, v interface{}) { return tsdb.EOF, (*string)(nil) }\nfunc (c *stringNilLiteralCursor) next() (t int64, v interface{}) { return tsdb.EOF, (*string)(nil) }\nfunc (c *stringNilLiteralCursor) nextAt(seek int64) interface{}  { return (*string)(nil) }\n\ntype booleanFinalizerIterator struct {\n\tinfluxql.BooleanIterator\n\tlogger zap.Logger\n}\n\nfunc newBooleanFinalizerIterator(inner influxql.BooleanIterator, logger zap.Logger) *booleanFinalizerIterator {\n\titr := &booleanFinalizerIterator{BooleanIterator: inner, logger: logger}\n\truntime.SetFinalizer(itr, (*booleanFinalizerIterator).closeGC)\n\treturn itr\n}\n\nfunc (itr *booleanFinalizerIterator) closeGC() {\n\truntime.SetFinalizer(itr, nil)\n\titr.logger.Error(\"BooleanIterator finalized by GC\")\n\titr.Close()\n}\n\nfunc (itr *booleanFinalizerIterator) Close() error {\n\truntime.SetFinalizer(itr, nil)\n\treturn itr.BooleanIterator.Close()\n}\n\ntype booleanIterator struct {\n\tcur   booleanCursor\n\taux   []cursorAt\n\tconds struct {\n\t\tnames []string\n\t\tcurs  []cursorAt\n\t}\n\topt influxql.IteratorOptions\n\n\tm     map[string]interface{} // map used for condition evaluation\n\tpoint influxql.BooleanPoint  // reusable buffer\n\n\tstatsLock sync.Mutex\n\tstats     influxql.IteratorStats\n\tstatsBuf  influxql.IteratorStats\n}\n\nfunc newBooleanIterator(name string, tags influxql.Tags, opt influxql.IteratorOptions, cur booleanCursor, aux []cursorAt, conds []cursorAt, condNames []string) *booleanIterator {\n\titr := &booleanIterator{\n\t\tcur: cur,\n\t\taux: aux,\n\t\topt: opt,\n\t\tpoint: influxql.BooleanPoint{\n\t\t\tName: name,\n\t\t\tTags: tags,\n\t\t},\n\t\tstatsBuf: influxql.IteratorStats{\n\t\t\tSeriesN: 1,\n\t\t},\n\t}\n\titr.stats = itr.statsBuf\n\n\tif len(aux) > 0 {\n\t\titr.point.Aux = make([]interface{}, len(aux))\n\t}\n\n\tif opt.Condition != nil {\n\t\titr.m = make(map[string]interface{}, len(aux)+len(conds))\n\t}\n\titr.conds.names = condNames\n\titr.conds.curs = conds\n\n\treturn itr\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *booleanIterator) Next() (*influxql.BooleanPoint, error) {\n\tfor {\n\t\tseek := tsdb.EOF\n\n\t\tif itr.cur != nil {\n\t\t\t// Read from the main cursor if we have one.\n\t\t\titr.point.Time, itr.point.Value = itr.cur.nextBoolean()\n\t\t\tseek = itr.point.Time\n\t\t} else {\n\t\t\t// Otherwise find lowest aux timestamp.\n\t\t\tfor i := range itr.aux {\n\t\t\t\tif k, _ := itr.aux[i].peek(); k != tsdb.EOF {\n\t\t\t\t\tif seek == tsdb.EOF || (itr.opt.Ascending && k < seek) || (!itr.opt.Ascending && k > seek) {\n\t\t\t\t\t\tseek = k\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\titr.point.Time = seek\n\t\t}\n\n\t\t// Exit if we have no more points or we are outside our time range.\n\t\tif itr.point.Time == tsdb.EOF {\n\t\t\titr.copyStats()\n\t\t\treturn nil, nil\n\t\t} else if itr.opt.Ascending && itr.point.Time > itr.opt.EndTime {\n\t\t\titr.copyStats()\n\t\t\treturn nil, nil\n\t\t} else if !itr.opt.Ascending && itr.point.Time < itr.opt.StartTime {\n\t\t\titr.copyStats()\n\t\t\treturn nil, nil\n\t\t}\n\n\t\t// Read from each auxiliary cursor.\n\t\tfor i := range itr.opt.Aux {\n\t\t\titr.point.Aux[i] = itr.aux[i].nextAt(seek)\n\t\t}\n\n\t\t// Read from condition field cursors.\n\t\tfor i := range itr.conds.curs {\n\t\t\titr.m[itr.conds.names[i]] = itr.conds.curs[i].nextAt(seek)\n\t\t}\n\n\t\t// Evaluate condition, if one exists. Retry if it fails.\n\t\tif itr.opt.Condition != nil && !influxql.EvalBool(itr.opt.Condition, itr.m) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Track points returned.\n\t\titr.statsBuf.PointN++\n\n\t\t// Copy buffer to stats periodically.\n\t\tif itr.statsBuf.PointN%statsBufferCopyIntervalN == 0 {\n\t\t\titr.copyStats()\n\t\t}\n\n\t\treturn &itr.point, nil\n\t}\n}\n\n// copyStats copies from the itr stats buffer to the stats under lock.\nfunc (itr *booleanIterator) copyStats() {\n\titr.statsLock.Lock()\n\titr.stats = itr.statsBuf\n\titr.statsLock.Unlock()\n}\n\n// Stats returns stats on the points processed.\nfunc (itr *booleanIterator) Stats() influxql.IteratorStats {\n\titr.statsLock.Lock()\n\tstats := itr.stats\n\titr.statsLock.Unlock()\n\treturn stats\n}\n\n// Close closes the iterator.\nfunc (itr *booleanIterator) Close() error {\n\tcursorsAt(itr.aux).close()\n\titr.aux = nil\n\tcursorsAt(itr.conds.curs).close()\n\titr.conds.curs = nil\n\tif itr.cur != nil {\n\t\terr := itr.cur.close()\n\t\titr.cur = nil\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// booleanLimitIterator\ntype booleanLimitIterator struct {\n\tinput influxql.BooleanIterator\n\topt   influxql.IteratorOptions\n\tn     int\n}\n\nfunc newBooleanLimitIterator(input influxql.BooleanIterator, opt influxql.IteratorOptions) *booleanLimitIterator {\n\treturn &booleanLimitIterator{\n\t\tinput: input,\n\t\topt:   opt,\n\t}\n}\n\nfunc (itr *booleanLimitIterator) Stats() influxql.IteratorStats { return itr.input.Stats() }\nfunc (itr *booleanLimitIterator) Close() error                  { return itr.input.Close() }\n\nfunc (itr *booleanLimitIterator) Next() (*influxql.BooleanPoint, error) {\n\t// Check if we are beyond the limit.\n\tif (itr.n - itr.opt.Offset) > itr.opt.Limit {\n\t\treturn nil, nil\n\t}\n\n\t// Read the next point.\n\tp, err := itr.input.Next()\n\tif p == nil || err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Increment counter.\n\titr.n++\n\n\t// Offsets are handled by a higher level iterator so return all points.\n\treturn p, nil\n}\n\n// booleanCursor represents an object for iterating over a single boolean field.\ntype booleanCursor interface {\n\tcursor\n\tnextBoolean() (t int64, v bool)\n}\n\nfunc newBooleanCursor(seek int64, ascending bool, cacheValues Values, tsmKeyCursor *KeyCursor) booleanCursor {\n\tif ascending {\n\t\treturn newBooleanAscendingCursor(seek, cacheValues, tsmKeyCursor)\n\t}\n\treturn newBooleanDescendingCursor(seek, cacheValues, tsmKeyCursor)\n}\n\ntype booleanAscendingCursor struct {\n\tcache struct {\n\t\tvalues Values\n\t\tpos    int\n\t}\n\n\ttsm struct {\n\t\tbuf       []BooleanValue\n\t\tvalues    []BooleanValue\n\t\tpos       int\n\t\tkeyCursor *KeyCursor\n\t}\n}\n\nfunc newBooleanAscendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *booleanAscendingCursor {\n\tc := &booleanAscendingCursor{}\n\n\tc.cache.values = cacheValues\n\tc.cache.pos = sort.Search(len(c.cache.values), func(i int) bool {\n\t\treturn c.cache.values[i].UnixNano() >= seek\n\t})\n\n\tc.tsm.keyCursor = tsmKeyCursor\n\tc.tsm.buf = make([]BooleanValue, 10)\n\tc.tsm.values, _ = c.tsm.keyCursor.ReadBooleanBlock(&c.tsm.buf)\n\tc.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool {\n\t\treturn c.tsm.values[i].UnixNano() >= seek\n\t})\n\n\treturn c\n}\n\n// peekCache returns the current time/value from the cache.\nfunc (c *booleanAscendingCursor) peekCache() (t int64, v bool) {\n\tif c.cache.pos >= len(c.cache.values) {\n\t\treturn tsdb.EOF, false\n\t}\n\n\titem := c.cache.values[c.cache.pos]\n\treturn item.UnixNano(), item.(BooleanValue).value\n}\n\n// peekTSM returns the current time/value from tsm.\nfunc (c *booleanAscendingCursor) peekTSM() (t int64, v bool) {\n\tif c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) {\n\t\treturn tsdb.EOF, false\n\t}\n\n\titem := c.tsm.values[c.tsm.pos]\n\treturn item.UnixNano(), item.value\n}\n\n// close closes the cursor and any dependent cursors.\nfunc (c *booleanAscendingCursor) close() error {\n\tc.tsm.keyCursor.Close()\n\tc.tsm.keyCursor = nil\n\tc.tsm.buf = nil\n\tc.cache.values = nil\n\tc.tsm.values = nil\n\treturn nil\n}\n\n// next returns the next key/value for the cursor.\nfunc (c *booleanAscendingCursor) next() (int64, interface{}) { return c.nextBoolean() }\n\n// nextBoolean returns the next key/value for the cursor.\nfunc (c *booleanAscendingCursor) nextBoolean() (int64, bool) {\n\tckey, cvalue := c.peekCache()\n\ttkey, tvalue := c.peekTSM()\n\n\t// No more data in cache or in TSM files.\n\tif ckey == tsdb.EOF && tkey == tsdb.EOF {\n\t\treturn tsdb.EOF, false\n\t}\n\n\t// Both cache and tsm files have the same key, cache takes precedence.\n\tif ckey == tkey {\n\t\tc.nextCache()\n\t\tc.nextTSM()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered cache key precedes that in TSM file.\n\tif ckey != tsdb.EOF && (ckey < tkey || tkey == tsdb.EOF) {\n\t\tc.nextCache()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered TSM key precedes that in cache.\n\tc.nextTSM()\n\treturn tkey, tvalue\n}\n\n// nextCache returns the next value from the cache.\nfunc (c *booleanAscendingCursor) nextCache() {\n\tif c.cache.pos >= len(c.cache.values) {\n\t\treturn\n\t}\n\tc.cache.pos++\n}\n\n// nextTSM returns the next value from the TSM files.\nfunc (c *booleanAscendingCursor) nextTSM() {\n\tc.tsm.pos++\n\tif c.tsm.pos >= len(c.tsm.values) {\n\t\tc.tsm.keyCursor.Next()\n\t\tc.tsm.values, _ = c.tsm.keyCursor.ReadBooleanBlock(&c.tsm.buf)\n\t\tif len(c.tsm.values) == 0 {\n\t\t\treturn\n\t\t}\n\t\tc.tsm.pos = 0\n\t}\n}\n\ntype booleanDescendingCursor struct {\n\tcache struct {\n\t\tvalues Values\n\t\tpos    int\n\t}\n\n\ttsm struct {\n\t\tbuf       []BooleanValue\n\t\tvalues    []BooleanValue\n\t\tpos       int\n\t\tkeyCursor *KeyCursor\n\t}\n}\n\nfunc newBooleanDescendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *booleanDescendingCursor {\n\tc := &booleanDescendingCursor{}\n\n\tc.cache.values = cacheValues\n\tc.cache.pos = sort.Search(len(c.cache.values), func(i int) bool {\n\t\treturn c.cache.values[i].UnixNano() >= seek\n\t})\n\tif t, _ := c.peekCache(); t != seek {\n\t\tc.cache.pos--\n\t}\n\n\tc.tsm.keyCursor = tsmKeyCursor\n\tc.tsm.buf = make([]BooleanValue, 10)\n\tc.tsm.values, _ = c.tsm.keyCursor.ReadBooleanBlock(&c.tsm.buf)\n\tc.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool {\n\t\treturn c.tsm.values[i].UnixNano() >= seek\n\t})\n\tif t, _ := c.peekTSM(); t != seek {\n\t\tc.tsm.pos--\n\t}\n\n\treturn c\n}\n\n// peekCache returns the current time/value from the cache.\nfunc (c *booleanDescendingCursor) peekCache() (t int64, v bool) {\n\tif c.cache.pos < 0 || c.cache.pos >= len(c.cache.values) {\n\t\treturn tsdb.EOF, false\n\t}\n\n\titem := c.cache.values[c.cache.pos]\n\treturn item.UnixNano(), item.(BooleanValue).value\n}\n\n// peekTSM returns the current time/value from tsm.\nfunc (c *booleanDescendingCursor) peekTSM() (t int64, v bool) {\n\tif c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) {\n\t\treturn tsdb.EOF, false\n\t}\n\n\titem := c.tsm.values[c.tsm.pos]\n\treturn item.UnixNano(), item.value\n}\n\n// close closes the cursor and any dependent cursors.\nfunc (c *booleanDescendingCursor) close() error {\n\tc.tsm.keyCursor.Close()\n\tc.tsm.keyCursor = nil\n\tc.tsm.buf = nil\n\tc.cache.values = nil\n\tc.tsm.values = nil\n\treturn nil\n}\n\n// next returns the next key/value for the cursor.\nfunc (c *booleanDescendingCursor) next() (int64, interface{}) { return c.nextBoolean() }\n\n// nextBoolean returns the next key/value for the cursor.\nfunc (c *booleanDescendingCursor) nextBoolean() (int64, bool) {\n\tckey, cvalue := c.peekCache()\n\ttkey, tvalue := c.peekTSM()\n\n\t// No more data in cache or in TSM files.\n\tif ckey == tsdb.EOF && tkey == tsdb.EOF {\n\t\treturn tsdb.EOF, false\n\t}\n\n\t// Both cache and tsm files have the same key, cache takes precedence.\n\tif ckey == tkey {\n\t\tc.nextCache()\n\t\tc.nextTSM()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered cache key precedes that in TSM file.\n\tif ckey != tsdb.EOF && (ckey > tkey || tkey == tsdb.EOF) {\n\t\tc.nextCache()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered TSM key precedes that in cache.\n\tc.nextTSM()\n\treturn tkey, tvalue\n}\n\n// nextCache returns the next value from the cache.\nfunc (c *booleanDescendingCursor) nextCache() {\n\tif c.cache.pos < 0 {\n\t\treturn\n\t}\n\tc.cache.pos--\n}\n\n// nextTSM returns the next value from the TSM files.\nfunc (c *booleanDescendingCursor) nextTSM() {\n\tc.tsm.pos--\n\tif c.tsm.pos < 0 {\n\t\tc.tsm.keyCursor.Next()\n\t\tc.tsm.values, _ = c.tsm.keyCursor.ReadBooleanBlock(&c.tsm.buf)\n\t\tif len(c.tsm.values) == 0 {\n\t\t\treturn\n\t\t}\n\t\tc.tsm.pos = len(c.tsm.values) - 1\n\t}\n}\n\n// booleanLiteralCursor represents a cursor that always returns a single value.\n// It doesn't not have a time value so it can only be used with nextAt().\ntype booleanLiteralCursor struct {\n\tvalue bool\n}\n\nfunc (c *booleanLiteralCursor) close() error                   { return nil }\nfunc (c *booleanLiteralCursor) peek() (t int64, v interface{}) { return tsdb.EOF, c.value }\nfunc (c *booleanLiteralCursor) next() (t int64, v interface{}) { return tsdb.EOF, c.value }\nfunc (c *booleanLiteralCursor) nextAt(seek int64) interface{}  { return c.value }\n\n// booleanNilLiteralCursor represents a cursor that always returns a typed nil value.\n// It doesn't not have a time value so it can only be used with nextAt().\ntype booleanNilLiteralCursor struct{}\n\nfunc (c *booleanNilLiteralCursor) close() error                   { return nil }\nfunc (c *booleanNilLiteralCursor) peek() (t int64, v interface{}) { return tsdb.EOF, (*bool)(nil) }\nfunc (c *booleanNilLiteralCursor) next() (t int64, v interface{}) { return tsdb.EOF, (*bool)(nil) }\nfunc (c *booleanNilLiteralCursor) nextAt(seek int64) interface{}  { return (*bool)(nil) }\n\nvar _ = fmt.Print\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.gen.go.tmpl",
    "content": "package tsm1\n\nimport (\n\t\"sort\"\n\t\"fmt\"\n\t\"runtime\"\n\t\"sync\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n\t\"github.com/uber-go/zap\"\n)\n\ntype cursor interface {\n\tclose() error\n\tnext() (t int64, v interface{})\n}\n\n// cursorAt provides a bufferred cursor interface.\n// This required for literal value cursors which don't have a time value.\ntype cursorAt interface {\n\tclose() error\n\tpeek() (k int64, v interface{})\n\tnextAt(seek int64) interface{}\n}\n\ntype nilCursor struct {}\nfunc (nilCursor) next() (int64, interface{}) { return tsdb.EOF, nil }\n\n// bufCursor implements a bufferred cursor.\ntype bufCursor struct {\n\tcur cursor\n\tbuf struct {\n\t\tkey    int64\n\t\tvalue  interface{}\n\t\tfilled bool\n\t}\n\tascending bool\n}\n\n// newBufCursor returns a bufferred wrapper for cur.\nfunc newBufCursor(cur cursor, ascending bool) *bufCursor {\n\treturn &bufCursor{cur: cur, ascending: ascending}\n}\n\nfunc (c *bufCursor) close() error {\n\terr := c.cur.close()\n\tc.cur = nil\n\treturn err\n}\n\n// next returns the buffer, if filled. Otherwise returns the next key/value from the cursor.\nfunc (c *bufCursor) next() (int64, interface{}) {\n\tif c.buf.filled {\n\t\tk, v := c.buf.key, c.buf.value\n\t\tc.buf.filled = false\n\t\treturn k, v\n\t}\n\treturn c.cur.next()\n}\n\n// unread pushes k and v onto the buffer.\nfunc (c *bufCursor) unread(k int64, v interface{}) {\n\tc.buf.key, c.buf.value = k, v\n\tc.buf.filled = true\n}\n\n// peek reads next next key/value without removing them from the cursor.\nfunc (c *bufCursor) peek() (k int64, v interface{}) {\n\tk, v = c.next()\n\tc.unread(k, v)\n\treturn\n}\n\n// nextAt returns the next value where key is equal to seek.\n// Skips over any keys that are less than seek.\n// If the key doesn't exist then a nil value is returned instead.\nfunc (c *bufCursor) nextAt(seek int64) interface{} {\n\tfor {\n\t\tk, v := c.next()\n\t\tif k != tsdb.EOF {\n\t\t\tif k == seek {\n\t\t\t\treturn v\n\t\t\t} else if c.ascending && k < seek {\n\t\t\t\tcontinue\n\t\t\t} else if !c.ascending && k > seek {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.unread(k, v)\n\t\t}\n\n\t\t// Return \"nil\" value for type.\n\t\tswitch c.cur.(type) {\n\t\t\tcase floatCursor:\n\t\t\t\treturn (*float64)(nil)\n\t\t\tcase integerCursor:\n\t\t\t\treturn (*int64)(nil)\n\t\t\tcase stringCursor:\n\t\t\t\treturn (*string)(nil)\n\t\t\tcase booleanCursor:\n\t\t\t\treturn (*bool)(nil)\n\t\t\tdefault:\n\t\t\t\tpanic(\"unreachable\")\n\t\t}\n\t}\n}\n\n\n// statsBufferCopyIntervalN is the number of points that are read before\n// copying the stats buffer to the iterator's stats field. This is used to\n// amortize the cost of using a mutex when updating stats.\nconst statsBufferCopyIntervalN = 100\n\n{{range .}}\n\ntype {{.name}}FinalizerIterator struct {\n\tinfluxql.{{.Name}}Iterator\n\tlogger zap.Logger\n}\n\nfunc new{{.Name}}FinalizerIterator(inner influxql.{{.Name}}Iterator, logger zap.Logger) *{{.name}}FinalizerIterator {\n\titr := &{{.name}}FinalizerIterator{ {{.Name}}Iterator: inner, logger: logger}\n\truntime.SetFinalizer(itr, (*{{.name}}FinalizerIterator).closeGC)\n\treturn itr\n}\n\nfunc (itr *{{.name}}FinalizerIterator) closeGC() {\n\truntime.SetFinalizer(itr, nil)\n\titr.logger.Error(\"{{.Name}}Iterator finalized by GC\")\n\titr.Close()\n}\n\nfunc (itr *{{.name}}FinalizerIterator) Close() error {\n\truntime.SetFinalizer(itr, nil)\n\treturn itr.{{.Name}}Iterator.Close()\n}\n\ntype {{.name}}Iterator struct {\n\tcur   {{.name}}Cursor\n\taux   []cursorAt\n\tconds struct {\n\t\tnames []string\n\t\tcurs  []cursorAt\n\t}\n\topt   influxql.IteratorOptions\n\n\tm map[string]interface{}      // map used for condition evaluation\n\tpoint influxql.{{.Name}}Point // reusable buffer\n\n\tstatsLock sync.Mutex\n\tstats     influxql.IteratorStats\n\tstatsBuf  influxql.IteratorStats\n}\n\nfunc new{{.Name}}Iterator(name string, tags influxql.Tags, opt influxql.IteratorOptions, cur {{.name}}Cursor, aux []cursorAt, conds []cursorAt, condNames []string) *{{.name}}Iterator {\n\titr := &{{.name}}Iterator{\n\t\tcur:   cur,\n\t\taux:   aux,\n\t\topt:   opt,\n\t\tpoint: influxql.{{.Name}}Point{\n\t\t\tName: name,\n\t\t\tTags: tags,\n\t\t},\n\t\tstatsBuf: influxql.IteratorStats{\n\t\t\tSeriesN: 1,\n\t\t},\n\t}\n\titr.stats = itr.statsBuf\n\n\tif len(aux) > 0 {\n\t\titr.point.Aux = make([]interface{}, len(aux))\n\t}\n\n\tif opt.Condition != nil {\n\t\titr.m = make(map[string]interface{}, len(aux)+len(conds))\n\t}\n\titr.conds.names = condNames\n\titr.conds.curs = conds\n\n\treturn itr\n}\n\n// Next returns the next point from the iterator.\nfunc (itr *{{.name}}Iterator) Next() (*influxql.{{.Name}}Point, error) {\n\tfor {\n\t\tseek := tsdb.EOF\n\n\t\tif itr.cur != nil {\n\t\t\t// Read from the main cursor if we have one.\n\t\t\titr.point.Time, itr.point.Value = itr.cur.next{{.Name}}()\n\t\t\tseek = itr.point.Time\n\t\t} else {\n\t\t\t// Otherwise find lowest aux timestamp.\n\t\t\tfor i := range itr.aux {\n\t\t\t\tif k, _ := itr.aux[i].peek(); k != tsdb.EOF {\n\t\t\t\t\tif seek == tsdb.EOF || (itr.opt.Ascending && k < seek) || (!itr.opt.Ascending && k > seek) {\n\t\t\t\t\t\tseek = k\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\titr.point.Time = seek\n\t\t}\n\n\t\t// Exit if we have no more points or we are outside our time range.\n\t\tif itr.point.Time == tsdb.EOF {\n\t\t\titr.copyStats()\n\t\t\treturn nil, nil\n\t\t} else if itr.opt.Ascending && itr.point.Time > itr.opt.EndTime {\n\t\t\titr.copyStats()\n\t\t\treturn nil, nil\n\t\t} else if !itr.opt.Ascending && itr.point.Time < itr.opt.StartTime {\n\t\t\titr.copyStats()\n\t\t\treturn nil, nil\n\t\t}\n\n\t\t// Read from each auxiliary cursor.\n\t\tfor i := range itr.opt.Aux {\n\t\t\titr.point.Aux[i] = itr.aux[i].nextAt(seek)\n\t\t}\n\n\t\t// Read from condition field cursors.\n\t\tfor i := range itr.conds.curs {\n\t\t\titr.m[itr.conds.names[i]] = itr.conds.curs[i].nextAt(seek)\n\t\t}\n\n\t\t// Evaluate condition, if one exists. Retry if it fails.\n\t\tif itr.opt.Condition != nil && !influxql.EvalBool(itr.opt.Condition, itr.m) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Track points returned.\n\t\titr.statsBuf.PointN++\n\n\t\t// Copy buffer to stats periodically.\n\t\tif itr.statsBuf.PointN % statsBufferCopyIntervalN == 0 {\n\t\t\titr.copyStats()\n\t\t}\n\n\t\treturn &itr.point, nil\n\t}\n}\n\n// copyStats copies from the itr stats buffer to the stats under lock.\nfunc (itr *{{.name}}Iterator) copyStats() {\n\titr.statsLock.Lock()\n\titr.stats = itr.statsBuf\n\titr.statsLock.Unlock()\n}\n\n// Stats returns stats on the points processed.\nfunc (itr *{{.name}}Iterator) Stats() influxql.IteratorStats {\n\titr.statsLock.Lock()\n\tstats := itr.stats\n\titr.statsLock.Unlock()\n\treturn stats\n}\n\n// Close closes the iterator.\nfunc (itr *{{.name}}Iterator) Close() error {\n\tcursorsAt(itr.aux).close()\n\titr.aux = nil\n\tcursorsAt(itr.conds.curs).close()\n\titr.conds.curs = nil\n\tif itr.cur != nil {\n\t\terr := itr.cur.close()\n\t\titr.cur = nil\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// {{.name}}LimitIterator\ntype {{.name}}LimitIterator struct {\n\tinput influxql.{{.Name}}Iterator\n\topt   influxql.IteratorOptions\n\tn     int\n}\n\nfunc new{{.Name}}LimitIterator(input influxql.{{.Name}}Iterator, opt influxql.IteratorOptions) *{{.name}}LimitIterator {\n\treturn &{{.name}}LimitIterator{\n\t\tinput: input,\n\t\topt:   opt,\n\t}\n}\n\nfunc (itr *{{.name}}LimitIterator) Stats() influxql.IteratorStats { return itr.input.Stats() }\nfunc (itr *{{.name}}LimitIterator) Close() error                  { return itr.input.Close() }\n\nfunc (itr *{{.name}}LimitIterator) Next() (*influxql.{{.Name}}Point, error) {\n\t// Check if we are beyond the limit.\n\tif (itr.n-itr.opt.Offset) > itr.opt.Limit {\n\t\treturn nil, nil\n\t}\n\n\t// Read the next point.\n\tp, err := itr.input.Next()\n\tif p == nil || err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Increment counter.\n\titr.n++\n\n\t// Offsets are handled by a higher level iterator so return all points.\n\treturn p, nil\n}\n\n// {{.name}}Cursor represents an object for iterating over a single {{.name}} field.\ntype {{.name}}Cursor interface {\n\tcursor\n\tnext{{.Name}}() (t int64, v {{.Type}})\n}\n\nfunc new{{.Name}}Cursor(seek int64, ascending bool, cacheValues Values, tsmKeyCursor *KeyCursor) {{.name}}Cursor {\n\tif ascending {\n\t\treturn new{{.Name}}AscendingCursor(seek, cacheValues, tsmKeyCursor)\n\t}\n\treturn new{{.Name}}DescendingCursor(seek, cacheValues, tsmKeyCursor)\n}\n\ntype {{.name}}AscendingCursor struct {\n\tcache struct {\n\t\tvalues Values\n\t\tpos    int\n\t}\n\n\ttsm struct {\n\t\tbuf       []{{.Name}}Value\n\t\tvalues    []{{.Name}}Value\n\t\tpos       int\n\t\tkeyCursor *KeyCursor\n\t}\n}\n\nfunc new{{.Name}}AscendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *{{.name}}AscendingCursor {\n\tc := &{{.name}}AscendingCursor{}\n\n\tc.cache.values = cacheValues\n\tc.cache.pos = sort.Search(len(c.cache.values), func(i int) bool {\n\t\treturn c.cache.values[i].UnixNano() >= seek\n\t})\n\n\tc.tsm.keyCursor = tsmKeyCursor\n\tc.tsm.buf = make([]{{.Name}}Value, 10)\n\tc.tsm.values, _ = c.tsm.keyCursor.Read{{.Name}}Block(&c.tsm.buf)\n\tc.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool {\n\t\treturn c.tsm.values[i].UnixNano() >= seek\n\t})\n\n\treturn c\n}\n\n// peekCache returns the current time/value from the cache.\nfunc (c *{{.name}}AscendingCursor) peekCache() (t int64, v {{.Type}}) {\n\tif c.cache.pos >= len(c.cache.values) {\n\t\treturn tsdb.EOF, {{.Nil}}\n\t}\n\n\titem := c.cache.values[c.cache.pos]\n\treturn item.UnixNano(), item.({{.ValueType}}).value\n}\n\n// peekTSM returns the current time/value from tsm.\nfunc (c *{{.name}}AscendingCursor) peekTSM() (t int64, v {{.Type}}) {\n\tif c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) {\n\t\treturn tsdb.EOF, {{.Nil}}\n\t}\n\n\titem := c.tsm.values[c.tsm.pos]\n\treturn item.UnixNano(), item.value\n}\n\n// close closes the cursor and any dependent cursors.\nfunc (c *{{.name}}AscendingCursor) close() (error) {\n\tc.tsm.keyCursor.Close()\n\tc.tsm.keyCursor = nil\n\tc.tsm.buf = nil\n\tc.cache.values = nil\n\tc.tsm.values = nil\n\treturn nil\n}\n\n// next returns the next key/value for the cursor.\nfunc (c *{{.name}}AscendingCursor) next() (int64, interface{}) { return c.next{{.Name}}() }\n\n// next{{.Name}} returns the next key/value for the cursor.\nfunc (c *{{.name}}AscendingCursor) next{{.Name}}() (int64, {{.Type}}) {\n\tckey, cvalue := c.peekCache()\n\ttkey, tvalue := c.peekTSM()\n\n\t// No more data in cache or in TSM files.\n\tif ckey == tsdb.EOF && tkey == tsdb.EOF {\n\t\treturn tsdb.EOF, {{.Nil}}\n\t}\n\n\t// Both cache and tsm files have the same key, cache takes precedence.\n\tif ckey == tkey {\n\t\tc.nextCache()\n\t\tc.nextTSM()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered cache key precedes that in TSM file.\n\tif ckey != tsdb.EOF && (ckey < tkey || tkey == tsdb.EOF) {\n\t\tc.nextCache()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered TSM key precedes that in cache.\n\tc.nextTSM()\n\treturn tkey, tvalue\n}\n\n// nextCache returns the next value from the cache.\nfunc (c *{{.name}}AscendingCursor) nextCache() {\n\tif c.cache.pos >= len(c.cache.values) {\n\t\treturn\n\t}\n\tc.cache.pos++\n}\n\n// nextTSM returns the next value from the TSM files.\nfunc (c *{{.name}}AscendingCursor) nextTSM() {\n\tc.tsm.pos++\n\tif c.tsm.pos >= len(c.tsm.values) {\n\t\tc.tsm.keyCursor.Next()\n\t\tc.tsm.values, _ = c.tsm.keyCursor.Read{{.Name}}Block(&c.tsm.buf)\n\t\tif len(c.tsm.values) == 0 {\n\t\t\treturn\n\t\t}\n\t\tc.tsm.pos = 0\n\t}\n}\n\ntype {{.name}}DescendingCursor struct {\n\tcache struct {\n\t\tvalues Values\n\t\tpos    int\n\t}\n\n\ttsm struct {\n\t\tbuf       []{{.Name}}Value\n\t\tvalues    []{{.Name}}Value\n\t\tpos       int\n\t\tkeyCursor *KeyCursor\n\t}\n}\n\nfunc new{{.Name}}DescendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *{{.name}}DescendingCursor {\n\tc := &{{.name}}DescendingCursor{}\n\n\tc.cache.values = cacheValues\n\tc.cache.pos = sort.Search(len(c.cache.values), func(i int) bool {\n\t\treturn c.cache.values[i].UnixNano() >= seek\n\t})\n\tif t, _ := c.peekCache(); t != seek {\n\t\tc.cache.pos--\n\t}\n\n\tc.tsm.keyCursor = tsmKeyCursor\n\tc.tsm.buf = make([]{{.Name}}Value, 10)\n\tc.tsm.values, _ = c.tsm.keyCursor.Read{{.Name}}Block(&c.tsm.buf)\n\tc.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool {\n\t\treturn c.tsm.values[i].UnixNano() >= seek\n\t})\n\tif t, _ := c.peekTSM(); t != seek {\n\t\tc.tsm.pos--\n\t}\n\n\treturn c\n}\n\n// peekCache returns the current time/value from the cache.\nfunc (c *{{.name}}DescendingCursor) peekCache() (t int64, v {{.Type}}) {\n\tif c.cache.pos < 0 || c.cache.pos >= len(c.cache.values) {\n\t\treturn tsdb.EOF, {{.Nil}}\n\t}\n\n\titem := c.cache.values[c.cache.pos]\n\treturn item.UnixNano(), item.({{.ValueType}}).value\n}\n\n// peekTSM returns the current time/value from tsm.\nfunc (c *{{.name}}DescendingCursor) peekTSM() (t int64, v {{.Type}}) {\n\tif c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) {\n\t\treturn tsdb.EOF, {{.Nil}}\n\t}\n\n\titem := c.tsm.values[c.tsm.pos]\n\treturn item.UnixNano(), item.value\n}\n\n// close closes the cursor and any dependent cursors.\nfunc (c *{{.name}}DescendingCursor) close() (error) {\n\tc.tsm.keyCursor.Close()\n\tc.tsm.keyCursor = nil\n\tc.tsm.buf = nil\n\tc.cache.values = nil\n\tc.tsm.values = nil\n\treturn nil\n}\n\n// next returns the next key/value for the cursor.\nfunc (c *{{.name}}DescendingCursor) next() (int64, interface{}) { return c.next{{.Name}}() }\n\n// next{{.Name}} returns the next key/value for the cursor.\nfunc (c *{{.name}}DescendingCursor) next{{.Name}}() (int64, {{.Type}}) {\n\tckey, cvalue := c.peekCache()\n\ttkey, tvalue := c.peekTSM()\n\n\t// No more data in cache or in TSM files.\n\tif ckey == tsdb.EOF && tkey == tsdb.EOF {\n\t\treturn tsdb.EOF, {{.Nil}}\n\t}\n\n\t// Both cache and tsm files have the same key, cache takes precedence.\n\tif ckey == tkey {\n\t\tc.nextCache()\n\t\tc.nextTSM()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered cache key precedes that in TSM file.\n\tif ckey != tsdb.EOF && (ckey > tkey || tkey == tsdb.EOF) {\n\t\tc.nextCache()\n\t\treturn ckey, cvalue\n\t}\n\n\t// Buffered TSM key precedes that in cache.\n\tc.nextTSM()\n\treturn tkey, tvalue\n}\n\n// nextCache returns the next value from the cache.\nfunc (c *{{.name}}DescendingCursor) nextCache() {\n\tif c.cache.pos < 0 {\n\t\treturn\n\t}\n\tc.cache.pos--\n}\n\n// nextTSM returns the next value from the TSM files.\nfunc (c *{{.name}}DescendingCursor) nextTSM() {\n\tc.tsm.pos--\n\tif c.tsm.pos < 0 {\n\t\tc.tsm.keyCursor.Next()\n\t\tc.tsm.values, _ = c.tsm.keyCursor.Read{{.Name}}Block(&c.tsm.buf)\n\t\tif len(c.tsm.values) == 0 {\n\t\t\treturn\n\t\t}\n\t\tc.tsm.pos = len(c.tsm.values) - 1\n\t}\n}\n\n// {{.name}}LiteralCursor represents a cursor that always returns a single value.\n// It doesn't not have a time value so it can only be used with nextAt().\ntype {{.name}}LiteralCursor struct {\n\tvalue {{.Type}}\n}\n\nfunc (c *{{.name}}LiteralCursor) close() error { return nil }\nfunc (c *{{.name}}LiteralCursor) peek() (t int64, v interface{}) { return tsdb.EOF, c.value }\nfunc (c *{{.name}}LiteralCursor) next() (t int64, v interface{}) { return tsdb.EOF, c.value }\nfunc (c *{{.name}}LiteralCursor) nextAt(seek int64) interface{} { return c.value }\n\n\n// {{.name}}NilLiteralCursor represents a cursor that always returns a typed nil value.\n// It doesn't not have a time value so it can only be used with nextAt().\ntype {{.name}}NilLiteralCursor struct {}\n\nfunc (c *{{.name}}NilLiteralCursor) close() error { return nil }\nfunc (c *{{.name}}NilLiteralCursor) peek() (t int64, v interface{}) { return tsdb.EOF, (*{{.Type}})(nil) }\nfunc (c *{{.name}}NilLiteralCursor) next() (t int64, v interface{}) { return tsdb.EOF, (*{{.Type}})(nil) }\nfunc (c *{{.name}}NilLiteralCursor) nextAt(seek int64) interface{} { return (*{{.Type}})(nil) }\n\n{{end}}\n\nvar _ = fmt.Print\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.gen.go.tmpldata",
    "content": "[\n\t{\n\t\t\"Name\":\"Float\",\n\t\t\"name\":\"float\",\n\t\t\"Type\":\"float64\",\n\t\t\"ValueType\":\"FloatValue\",\n\t\t\"Nil\":\"0\"\n\t},\n\t{\n\t\t\"Name\":\"Integer\",\n\t\t\"name\":\"integer\",\n\t\t\"Type\":\"int64\",\n\t\t\"ValueType\":\"IntegerValue\",\n\t\t\"Nil\":\"0\"\n\t},\n\t{\n\t\t\"Name\":\"String\",\n\t\t\"name\":\"string\",\n\t\t\"Type\":\"string\",\n\t\t\"ValueType\":\"StringValue\",\n\t\t\"Nil\":\"\\\"\\\"\"\n\t},\n\t{\n\t\t\"Name\":\"Boolean\",\n\t\t\"name\":\"boolean\",\n\t\t\"Type\":\"bool\",\n\t\t\"ValueType\":\"BooleanValue\",\n\t\t\"Nil\":\"false\"\n\t}\n]\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.go",
    "content": "package tsm1\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/uber-go/zap\"\n)\n\nfunc newLimitIterator(input influxql.Iterator, opt influxql.IteratorOptions) influxql.Iterator {\n\tswitch input := input.(type) {\n\tcase influxql.FloatIterator:\n\t\treturn newFloatLimitIterator(input, opt)\n\tcase influxql.IntegerIterator:\n\t\treturn newIntegerLimitIterator(input, opt)\n\tcase influxql.StringIterator:\n\t\treturn newStringLimitIterator(input, opt)\n\tcase influxql.BooleanIterator:\n\t\treturn newBooleanLimitIterator(input, opt)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported limit iterator type: %T\", input))\n\t}\n}\n\ntype floatCastIntegerCursor struct {\n\tcursor integerCursor\n}\n\nfunc (c *floatCastIntegerCursor) close() error { return c.cursor.close() }\n\nfunc (c *floatCastIntegerCursor) next() (t int64, v interface{}) { return c.nextFloat() }\n\nfunc (c *floatCastIntegerCursor) nextFloat() (int64, float64) {\n\tt, v := c.cursor.nextInteger()\n\treturn t, float64(v)\n}\n\ntype integerCastFloatCursor struct {\n\tcursor floatCursor\n}\n\nfunc (c *integerCastFloatCursor) close() error { return c.cursor.close() }\n\nfunc (c *integerCastFloatCursor) next() (t int64, v interface{}) { return c.nextInteger() }\n\nfunc (c *integerCastFloatCursor) nextInteger() (int64, int64) {\n\tt, v := c.cursor.nextFloat()\n\treturn t, int64(v)\n}\n\ntype cursorsAt []cursorAt\n\nfunc (c cursorsAt) close() {\n\tfor _, cur := range c {\n\t\tcur.close()\n\t}\n}\n\n// newMergeFinalizerIterator creates a new Merge iterator from the inputs. If the call to Merge succeeds,\n// the resulting Iterator will be wrapped in a finalizer iterator.\n// If Merge returns an error, the inputs will be closed.\nfunc newMergeFinalizerIterator(inputs []influxql.Iterator, opt influxql.IteratorOptions, log zap.Logger) (influxql.Iterator, error) {\n\titr, err := influxql.Iterators(inputs).Merge(opt)\n\tif err != nil {\n\t\tinfluxql.Iterators(inputs).Close()\n\t\treturn nil, err\n\t}\n\treturn newFinalizerIterator(itr, log), nil\n}\n\n// newFinalizerIterator creates a new iterator that installs a runtime finalizer\n// to ensure close is eventually called if the iterator is garbage collected.\n// This additional guard attempts to protect against clients of CreateIterator not\n// correctly closing them and leaking cursors.\nfunc newFinalizerIterator(itr influxql.Iterator, log zap.Logger) influxql.Iterator {\n\tif itr == nil {\n\t\treturn nil\n\t}\n\n\tswitch inner := itr.(type) {\n\tcase influxql.FloatIterator:\n\t\treturn newFloatFinalizerIterator(inner, log)\n\tcase influxql.IntegerIterator:\n\t\treturn newIntegerFinalizerIterator(inner, log)\n\tcase influxql.StringIterator:\n\t\treturn newStringFinalizerIterator(inner, log)\n\tcase influxql.BooleanIterator:\n\t\treturn newBooleanFinalizerIterator(inner, log)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported finalizer iterator type: %T\", itr))\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/mmap_solaris.go",
    "content": "// +build solaris\n\npackage tsm1\n\nimport (\n\t\"os\"\n\t\"syscall\"\n\n\t\"golang.org/x/sys/unix\"\n)\n\nfunc mmap(f *os.File, offset int64, length int) ([]byte, error) {\n\tmmap, err := unix.Mmap(int(f.Fd()), 0, length, syscall.PROT_READ, syscall.MAP_SHARED)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := unix.Madvise(mmap, syscall.MADV_RANDOM); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn mmap, nil\n}\n\nfunc munmap(b []byte) (err error) {\n\treturn unix.Munmap(b)\n}\n\n// From: github.com/boltdb/bolt/bolt_unix.go\nfunc madvise(b []byte, advice int) (err error) {\n\treturn unix.Madvise(b, advice)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/mmap_unix.go",
    "content": "// +build !windows,!plan9,!solaris\n\npackage tsm1\n\nimport (\n\t\"os\"\n\t\"syscall\"\n)\n\nfunc mmap(f *os.File, offset int64, length int) ([]byte, error) {\n\tmmap, err := syscall.Mmap(int(f.Fd()), 0, length, syscall.PROT_READ, syscall.MAP_SHARED)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn mmap, nil\n}\n\nfunc munmap(b []byte) (err error) {\n\treturn syscall.Munmap(b)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/mmap_windows.go",
    "content": "package tsm1\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"reflect\"\n\t\"sync\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\n// mmap implementation for Windows\n// Based on: https://github.com/edsrzf/mmap-go\n// Based on: https://github.com/boltdb/bolt/bolt_windows.go\n// Ref: https://groups.google.com/forum/#!topic/golang-nuts/g0nLwQI9www\n\n// We keep this map so that we can get back the original handle from the memory address.\nvar handleLock sync.Mutex\nvar handleMap = map[uintptr]syscall.Handle{}\nvar fileMap = map[uintptr]*os.File{}\n\nfunc openSharedFile(f *os.File) (file *os.File, err error) {\n\n\tvar access, createmode, sharemode uint32\n\tvar sa *syscall.SecurityAttributes\n\n\taccess = syscall.GENERIC_READ\n\tsharemode = uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE)\n\tcreatemode = syscall.OPEN_EXISTING\n\tfileName := f.Name()\n\n\tpathp, err := syscall.UTF16PtrFromString(fileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\th, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0)\n\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\t//NewFile does not add finalizer, need to close this manually\n\treturn os.NewFile(uintptr(h), fileName), nil\n}\n\nfunc mmap(f *os.File, offset int64, length int) (out []byte, err error) {\n\t// Open a file mapping handle.\n\tsizelo := uint32(length >> 32)\n\tsizehi := uint32(length) & 0xffffffff\n\n\tsharedHandle, errno := openSharedFile(f)\n\tif errno != nil {\n\t\treturn nil, os.NewSyscallError(\"CreateFile\", errno)\n\t}\n\n\th, errno := syscall.CreateFileMapping(syscall.Handle(sharedHandle.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)\n\tif h == 0 {\n\t\treturn nil, os.NewSyscallError(\"CreateFileMapping\", errno)\n\t}\n\n\t// Create the memory map.\n\taddr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(length))\n\tif addr == 0 {\n\t\treturn nil, os.NewSyscallError(\"MapViewOfFile\", errno)\n\t}\n\n\thandleLock.Lock()\n\thandleMap[addr] = h\n\tfileMap[addr] = sharedHandle\n\thandleLock.Unlock()\n\n\t// Convert to a byte array.\n\thdr := (*reflect.SliceHeader)(unsafe.Pointer(&out))\n\thdr.Data = uintptr(unsafe.Pointer(addr))\n\thdr.Len = length\n\thdr.Cap = length\n\n\treturn\n}\n\n// munmap Windows implementation\n// Based on: https://github.com/edsrzf/mmap-go\n// Based on: https://github.com/boltdb/bolt/bolt_windows.go\nfunc munmap(b []byte) (err error) {\n\thandleLock.Lock()\n\tdefer handleLock.Unlock()\n\n\taddr := (uintptr)(unsafe.Pointer(&b[0]))\n\tif err := syscall.UnmapViewOfFile(addr); err != nil {\n\t\treturn os.NewSyscallError(\"UnmapViewOfFile\", err)\n\t}\n\n\thandle, ok := handleMap[addr]\n\tif !ok {\n\t\t// should be impossible; we would've seen the error above\n\t\treturn errors.New(\"unknown base address\")\n\t}\n\tdelete(handleMap, addr)\n\n\te := syscall.CloseHandle(syscall.Handle(handle))\n\tif e != nil {\n\t\treturn os.NewSyscallError(\"CloseHandle\", e)\n\t}\n\n\tfile, ok := fileMap[addr]\n\tif !ok {\n\t\t// should be impossible; we would've seen the error above\n\t\treturn errors.New(\"unknown base address\")\n\t}\n\tdelete(fileMap, addr)\n\n\te = file.Close()\n\tif e != nil {\n\t\treturn errors.New(\"close file\" + e.Error())\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/pools.go",
    "content": "package tsm1\n\nimport \"sync\"\n\nvar bufPool sync.Pool\n\n// getBuf returns a buffer with length size from the buffer pool.\nfunc getBuf(size int) *[]byte {\n\tx := bufPool.Get()\n\tif x == nil {\n\t\tb := make([]byte, size)\n\t\treturn &b\n\t}\n\tbuf := x.(*[]byte)\n\tif cap(*buf) < size {\n\t\tb := make([]byte, size)\n\t\treturn &b\n\t}\n\t*buf = (*buf)[:size]\n\treturn buf\n}\n\n// putBuf returns a buffer to the pool.\nfunc putBuf(buf *[]byte) {\n\tbufPool.Put(buf)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/reader.go",
    "content": "package tsm1\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"os\"\n\t\"sort\"\n\t\"sync\"\n\t\"sync/atomic\"\n)\n\n// ErrFileInUse is returned when attempting to remove or close a TSM file that is still being used.\nvar ErrFileInUse = fmt.Errorf(\"file still in use\")\n\n// TSMReader is a reader for a TSM file.\ntype TSMReader struct {\n\t// refs is the count of active references to this reader.\n\trefs int64\n\n\tmu sync.RWMutex\n\n\t// accessor provides access and decoding of blocks for the reader.\n\taccessor blockAccessor\n\n\t// index is the index of all blocks.\n\tindex TSMIndex\n\n\t// tombstoner ensures tombstoned keys are not available by the index.\n\ttombstoner *Tombstoner\n\n\t// size is the size of the file on disk.\n\tsize int64\n\n\t// lastModified is the last time this file was modified on disk\n\tlastModified int64\n}\n\n// TSMIndex represent the index section of a TSM file.  The index records all\n// blocks, their locations, sizes, min and max times.\ntype TSMIndex interface {\n\t// Delete removes the given keys from the index.\n\tDelete(keys []string)\n\n\t// DeleteRange removes the given keys with data between minTime and maxTime from the index.\n\tDeleteRange(keys []string, minTime, maxTime int64)\n\n\t// Contains return true if the given key exists in the index.\n\tContains(key string) bool\n\n\t// ContainsValue returns true if key and time might exist in this file.  This function could\n\t// return true even though the actual point does not exists.  For example, the key may\n\t// exist in this file, but not have a point exactly at time t.\n\tContainsValue(key string, timestamp int64) bool\n\n\t// Entries returns all index entries for a key.\n\tEntries(key string) []IndexEntry\n\n\t// ReadEntries reads the index entries for key into entries.\n\tReadEntries(key string, entries *[]IndexEntry)\n\n\t// Entry returns the index entry for the specified key and timestamp.  If no entry\n\t// matches the key and timestamp, nil is returned.\n\tEntry(key string, timestamp int64) *IndexEntry\n\n\t// Key returns the key in the index at the given position.\n\tKey(index int) (string, byte, []IndexEntry)\n\n\t// KeyAt returns the key in the index at the given position.\n\tKeyAt(index int) ([]byte, byte)\n\n\t// KeyCount returns the count of unique keys in the index.\n\tKeyCount() int\n\n\t// OverlapsTimeRange returns true if the time range of the file intersect min and max.\n\tOverlapsTimeRange(min, max int64) bool\n\n\t// OverlapsKeyRange returns true if the min and max keys of the file overlap the arguments min and max.\n\tOverlapsKeyRange(min, max string) bool\n\n\t// Size returns the size of the current index in bytes.\n\tSize() uint32\n\n\t// TimeRange returns the min and max time across all keys in the file.\n\tTimeRange() (int64, int64)\n\n\t// TombstoneRange returns ranges of time that are deleted for the given key.\n\tTombstoneRange(key string) []TimeRange\n\n\t// KeyRange returns the min and max keys in the file.\n\tKeyRange() (string, string)\n\n\t// Type returns the block type of the values stored for the key.  Returns one of\n\t// BlockFloat64, BlockInt64, BlockBool, BlockString.  If key does not exist,\n\t// an error is returned.\n\tType(key string) (byte, error)\n\n\t// UnmarshalBinary populates an index from an encoded byte slice\n\t// representation of an index.\n\tUnmarshalBinary(b []byte) error\n}\n\n// BlockIterator allows iterating over each block in a TSM file in order.  It provides\n// raw access to the block bytes without decoding them.\ntype BlockIterator struct {\n\tr *TSMReader\n\n\t// i is the current key index\n\ti int\n\n\t// n is the total number of keys\n\tn int\n\n\tkey     string\n\tentries []IndexEntry\n\terr     error\n\ttyp     byte\n}\n\n// PeekNext returns the next key to be iterated or an empty string.\nfunc (b *BlockIterator) PeekNext() string {\n\tif len(b.entries) > 1 {\n\t\treturn b.key\n\t} else if b.n-b.i > 1 {\n\t\tkey, _ := b.r.KeyAt(b.i + 1)\n\t\treturn string(key)\n\t}\n\treturn \"\"\n}\n\n// Next returns true if there are more blocks to iterate through.\nfunc (b *BlockIterator) Next() bool {\n\tif b.n-b.i == 0 && len(b.entries) == 0 {\n\t\treturn false\n\t}\n\n\tif len(b.entries) > 0 {\n\t\tb.entries = b.entries[1:]\n\t\tif len(b.entries) > 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\n\tif b.n-b.i > 0 {\n\t\tb.key, b.typ, b.entries = b.r.Key(b.i)\n\t\tb.i++\n\n\t\tif len(b.entries) > 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n// Read reads information about the next block to be iterated.\nfunc (b *BlockIterator) Read() (key string, minTime int64, maxTime int64, typ byte, checksum uint32, buf []byte, err error) {\n\tif b.err != nil {\n\t\treturn \"\", 0, 0, 0, 0, nil, b.err\n\t}\n\tchecksum, buf, err = b.r.ReadBytes(&b.entries[0], nil)\n\tif err != nil {\n\t\treturn \"\", 0, 0, 0, 0, nil, err\n\t}\n\treturn b.key, b.entries[0].MinTime, b.entries[0].MaxTime, b.typ, checksum, buf, err\n}\n\n// blockAccessor abstracts a method of accessing blocks from a\n// TSM file.\ntype blockAccessor interface {\n\tinit() (*indirectIndex, error)\n\tread(key string, timestamp int64) ([]Value, error)\n\treadAll(key string) ([]Value, error)\n\treadBlock(entry *IndexEntry, values []Value) ([]Value, error)\n\treadFloatBlock(entry *IndexEntry, values *[]FloatValue) ([]FloatValue, error)\n\treadIntegerBlock(entry *IndexEntry, values *[]IntegerValue) ([]IntegerValue, error)\n\treadStringBlock(entry *IndexEntry, values *[]StringValue) ([]StringValue, error)\n\treadBooleanBlock(entry *IndexEntry, values *[]BooleanValue) ([]BooleanValue, error)\n\treadBytes(entry *IndexEntry, buf []byte) (uint32, []byte, error)\n\trename(path string) error\n\tpath() string\n\tclose() error\n}\n\n// NewTSMReader returns a new TSMReader from the given file.\nfunc NewTSMReader(f *os.File) (*TSMReader, error) {\n\tt := &TSMReader{}\n\n\tstat, err := f.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt.size = stat.Size()\n\tt.lastModified = stat.ModTime().UnixNano()\n\tt.accessor = &mmapAccessor{\n\t\tf: f,\n\t}\n\n\tindex, err := t.accessor.init()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tt.index = index\n\tt.tombstoner = &Tombstoner{Path: t.Path()}\n\n\tif err := t.applyTombstones(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn t, nil\n}\n\nfunc (t *TSMReader) applyTombstones() error {\n\tvar cur, prev Tombstone\n\tbatch := make([]string, 0, 4096)\n\n\tif err := t.tombstoner.Walk(func(ts Tombstone) error {\n\t\tcur = ts\n\t\tif len(batch) > 0 {\n\t\t\tif prev.Min != cur.Min || prev.Max != cur.Max {\n\t\t\t\tt.index.DeleteRange(batch, prev.Min, prev.Max)\n\t\t\t\tbatch = batch[:0]\n\t\t\t}\n\t\t}\n\t\tbatch = append(batch, ts.Key)\n\n\t\tif len(batch) >= 4096 {\n\t\t\tt.index.DeleteRange(batch, prev.Min, prev.Max)\n\t\t\tbatch = batch[:0]\n\t\t}\n\t\tprev = ts\n\t\treturn nil\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"init: read tombstones: %v\", err)\n\t}\n\n\tif len(batch) > 0 {\n\t\tt.index.DeleteRange(batch, cur.Min, cur.Max)\n\t}\n\treturn nil\n}\n\n// Path returns the path of the file the TSMReader was initialized with.\nfunc (t *TSMReader) Path() string {\n\tt.mu.RLock()\n\tp := t.accessor.path()\n\tt.mu.RUnlock()\n\treturn p\n}\n\n// Key returns the key and the underlying entry at the numeric index.\nfunc (t *TSMReader) Key(index int) (string, byte, []IndexEntry) {\n\treturn t.index.Key(index)\n}\n\n// KeyAt returns the key and key type at position idx in the index.\nfunc (t *TSMReader) KeyAt(idx int) ([]byte, byte) {\n\treturn t.index.KeyAt(idx)\n}\n\n// ReadAt returns the values corresponding to the given index entry.\nfunc (t *TSMReader) ReadAt(entry *IndexEntry, vals []Value) ([]Value, error) {\n\tt.mu.RLock()\n\tv, err := t.accessor.readBlock(entry, vals)\n\tt.mu.RUnlock()\n\treturn v, err\n}\n\n// ReadFloatBlockAt returns the float values corresponding to the given index entry.\nfunc (t *TSMReader) ReadFloatBlockAt(entry *IndexEntry, vals *[]FloatValue) ([]FloatValue, error) {\n\tt.mu.RLock()\n\tv, err := t.accessor.readFloatBlock(entry, vals)\n\tt.mu.RUnlock()\n\treturn v, err\n}\n\n// ReadIntegerBlockAt returns the integer values corresponding to the given index entry.\nfunc (t *TSMReader) ReadIntegerBlockAt(entry *IndexEntry, vals *[]IntegerValue) ([]IntegerValue, error) {\n\tt.mu.RLock()\n\tv, err := t.accessor.readIntegerBlock(entry, vals)\n\tt.mu.RUnlock()\n\treturn v, err\n}\n\n// ReadStringBlockAt returns the string values corresponding to the given index entry.\nfunc (t *TSMReader) ReadStringBlockAt(entry *IndexEntry, vals *[]StringValue) ([]StringValue, error) {\n\tt.mu.RLock()\n\tv, err := t.accessor.readStringBlock(entry, vals)\n\tt.mu.RUnlock()\n\treturn v, err\n}\n\n// ReadBooleanBlockAt returns the boolean values corresponding to the given index entry.\nfunc (t *TSMReader) ReadBooleanBlockAt(entry *IndexEntry, vals *[]BooleanValue) ([]BooleanValue, error) {\n\tt.mu.RLock()\n\tv, err := t.accessor.readBooleanBlock(entry, vals)\n\tt.mu.RUnlock()\n\treturn v, err\n}\n\n// Read returns the values corresponding to the block at the given key and timestamp.\nfunc (t *TSMReader) Read(key string, timestamp int64) ([]Value, error) {\n\tt.mu.RLock()\n\tv, err := t.accessor.read(key, timestamp)\n\tt.mu.RUnlock()\n\treturn v, err\n}\n\n// ReadAll returns all values for a key in all blocks.\nfunc (t *TSMReader) ReadAll(key string) ([]Value, error) {\n\tt.mu.RLock()\n\tv, err := t.accessor.readAll(key)\n\tt.mu.RUnlock()\n\treturn v, err\n}\n\nfunc (t *TSMReader) ReadBytes(e *IndexEntry, b []byte) (uint32, []byte, error) {\n\tt.mu.RLock()\n\tn, v, err := t.accessor.readBytes(e, b)\n\tt.mu.RUnlock()\n\treturn n, v, err\n}\n\n// Type returns the type of values stored at the given key.\nfunc (t *TSMReader) Type(key string) (byte, error) {\n\treturn t.index.Type(key)\n}\n\n// Close closes the TSMReader.\nfunc (t *TSMReader) Close() error {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\tif t.InUse() {\n\t\treturn ErrFileInUse\n\t}\n\n\tif err := t.accessor.close(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// Ref records a usage of this TSMReader.  If there are active references\n// when the reader is closed or removed, the reader will remain open until\n// there are no more references.\nfunc (t *TSMReader) Ref() {\n\tatomic.AddInt64(&t.refs, 1)\n}\n\n// Unref removes a usage record of this TSMReader.  If the Reader was closed\n// by another goroutine while there were active references, the file will\n// be closed and remove\nfunc (t *TSMReader) Unref() {\n\tatomic.AddInt64(&t.refs, -1)\n}\n\n// InUse returns whether the TSMReader currently has any active references.\nfunc (t *TSMReader) InUse() bool {\n\trefs := atomic.LoadInt64(&t.refs)\n\treturn refs > 0\n}\n\n// Remove removes any underlying files stored on disk for this reader.\nfunc (t *TSMReader) Remove() error {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\treturn t.remove()\n}\n\n// Rename renames the underlying file to the new path.\nfunc (t *TSMReader) Rename(path string) error {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\treturn t.accessor.rename(path)\n}\n\n// Remove removes any underlying files stored on disk for this reader.\nfunc (t *TSMReader) remove() error {\n\tpath := t.accessor.path()\n\n\tif t.InUse() {\n\t\treturn ErrFileInUse\n\t}\n\n\tif path != \"\" {\n\t\tos.RemoveAll(path)\n\t}\n\n\tif err := t.tombstoner.Delete(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// Contains returns whether the given key is present in the index.\nfunc (t *TSMReader) Contains(key string) bool {\n\treturn t.index.Contains(key)\n}\n\n// ContainsValue returns true if key and time might exists in this file.  This function could\n// return true even though the actual point does not exist.  For example, the key may\n// exist in this file, but not have a point exactly at time t.\nfunc (t *TSMReader) ContainsValue(key string, ts int64) bool {\n\treturn t.index.ContainsValue(key, ts)\n}\n\n// DeleteRange removes the given points for keys between minTime and maxTime.   The series\n// keys passed in must be sorted.\nfunc (t *TSMReader) DeleteRange(keys []string, minTime, maxTime int64) error {\n\tif len(keys) == 0 {\n\t\treturn nil\n\t}\n\n\t// If the keys can't exist in this TSM file, skip it.\n\tminKey, maxKey := keys[0], keys[len(keys)-1]\n\tif !t.index.OverlapsKeyRange(minKey, maxKey) {\n\t\treturn nil\n\t}\n\n\t// If the timerange can't exist in this TSM file, skip it.\n\tif !t.index.OverlapsTimeRange(minTime, maxTime) {\n\t\treturn nil\n\t}\n\n\tif err := t.tombstoner.AddRange(keys, minTime, maxTime); err != nil {\n\t\treturn err\n\t}\n\n\tt.index.DeleteRange(keys, minTime, maxTime)\n\treturn nil\n}\n\n// Delete deletes blocks indicated by keys.\nfunc (t *TSMReader) Delete(keys []string) error {\n\tif err := t.tombstoner.Add(keys); err != nil {\n\t\treturn err\n\t}\n\n\tt.index.Delete(keys)\n\treturn nil\n}\n\n// TimeRange returns the min and max time across all keys in the file.\nfunc (t *TSMReader) TimeRange() (int64, int64) {\n\treturn t.index.TimeRange()\n}\n\n// KeyRange returns the min and max key across all keys in the file.\nfunc (t *TSMReader) KeyRange() (string, string) {\n\treturn t.index.KeyRange()\n}\n\n// KeyCount returns the count of unique keys in the TSMReader.\nfunc (t *TSMReader) KeyCount() int {\n\treturn t.index.KeyCount()\n}\n\n// Entries returns all index entries for key.\nfunc (t *TSMReader) Entries(key string) []IndexEntry {\n\treturn t.index.Entries(key)\n}\n\n// ReadEntries reads the index entries for key into entries.\nfunc (t *TSMReader) ReadEntries(key string, entries *[]IndexEntry) {\n\tt.index.ReadEntries(key, entries)\n}\n\n// IndexSize returns the size of the index in bytes.\nfunc (t *TSMReader) IndexSize() uint32 {\n\treturn t.index.Size()\n}\n\n// Size returns the size of the underlying file in bytes.\nfunc (t *TSMReader) Size() uint32 {\n\tt.mu.RLock()\n\tsize := t.size\n\tt.mu.RUnlock()\n\treturn uint32(size)\n}\n\n// LastModified returns the last time the underlying file was modified.\nfunc (t *TSMReader) LastModified() int64 {\n\tt.mu.RLock()\n\tlm := t.lastModified\n\tfor _, ts := range t.tombstoner.TombstoneFiles() {\n\t\tif ts.LastModified > lm {\n\t\t\tlm = ts.LastModified\n\t\t}\n\t}\n\tt.mu.RUnlock()\n\treturn lm\n}\n\n// HasTombstones return true if there are any tombstone entries recorded.\nfunc (t *TSMReader) HasTombstones() bool {\n\tt.mu.RLock()\n\tb := t.tombstoner.HasTombstones()\n\tt.mu.RUnlock()\n\treturn b\n}\n\n// TombstoneFiles returns any tombstone files associated with this TSM file.\nfunc (t *TSMReader) TombstoneFiles() []FileStat {\n\tt.mu.RLock()\n\tfs := t.tombstoner.TombstoneFiles()\n\tt.mu.RUnlock()\n\treturn fs\n}\n\n// TombstoneRange returns ranges of time that are deleted for the given key.\nfunc (t *TSMReader) TombstoneRange(key string) []TimeRange {\n\tt.mu.RLock()\n\ttr := t.index.TombstoneRange(key)\n\tt.mu.RUnlock()\n\treturn tr\n}\n\n// Stats returns the FileStat for the TSMReader's underlying file.\nfunc (t *TSMReader) Stats() FileStat {\n\tminTime, maxTime := t.index.TimeRange()\n\tminKey, maxKey := t.index.KeyRange()\n\treturn FileStat{\n\t\tPath:         t.Path(),\n\t\tSize:         t.Size(),\n\t\tLastModified: t.LastModified(),\n\t\tMinTime:      minTime,\n\t\tMaxTime:      maxTime,\n\t\tMinKey:       minKey,\n\t\tMaxKey:       maxKey,\n\t\tHasTombstone: t.tombstoner.HasTombstones(),\n\t}\n}\n\n// BlockIterator returns a BlockIterator for the underlying TSM file.\nfunc (t *TSMReader) BlockIterator() *BlockIterator {\n\treturn &BlockIterator{\n\t\tr: t,\n\t\tn: t.index.KeyCount(),\n\t}\n}\n\n// indirectIndex is a TSMIndex that uses a raw byte slice representation of an index.  This\n// implementation can be used for indexes that may be MMAPed into memory.\ntype indirectIndex struct {\n\tmu sync.RWMutex\n\n\t// indirectIndex works a follows.  Assuming we have an index structure in memory as\n\t// the diagram below:\n\t//\n\t// ┌────────────────────────────────────────────────────────────────────┐\n\t// │                               Index                                │\n\t// ├─┬──────────────────────┬──┬───────────────────────┬───┬────────────┘\n\t// │0│                      │62│                       │145│\n\t// ├─┴───────┬─────────┬────┼──┴──────┬─────────┬──────┼───┴─────┬──────┐\n\t// │Key 1 Len│   Key   │... │Key 2 Len│  Key 2  │ ...  │  Key 3  │ ...  │\n\t// │ 2 bytes │ N bytes │    │ 2 bytes │ N bytes │      │ 2 bytes │      │\n\t// └─────────┴─────────┴────┴─────────┴─────────┴──────┴─────────┴──────┘\n\n\t// We would build an `offsets` slices where each element pointers to the byte location\n\t// for the first key in the index slice.\n\n\t// ┌────────────────────────────────────────────────────────────────────┐\n\t// │                              Offsets                               │\n\t// ├────┬────┬────┬─────────────────────────────────────────────────────┘\n\t// │ 0  │ 62 │145 │\n\t// └────┴────┴────┘\n\n\t// Using this offset slice we can find `Key 2` by doing a binary search\n\t// over the offsets slice.  Instead of comparing the value in the offsets\n\t// (e.g. `62`), we use that as an index into the underlying index to\n\t// retrieve the key at postion `62` and perform our comparisons with that.\n\n\t// When we have identified the correct position in the index for a given\n\t// key, we could perform another binary search or a linear scan.  This\n\t// should be fast as well since each index entry is 28 bytes and all\n\t// contiguous in memory.  The current implementation uses a linear scan since the\n\t// number of block entries is expected to be < 100 per key.\n\n\t// b is the underlying index byte slice.  This could be a copy on the heap or an MMAP\n\t// slice reference\n\tb []byte\n\n\t// offsets contains the positions in b for each key.  It points to the 2 byte length of\n\t// key.\n\toffsets []int32\n\n\t// minKey, maxKey are the minium and maximum (lexicographically sorted) contained in the\n\t// file\n\tminKey, maxKey string\n\n\t// minTime, maxTime are the minimum and maximum times contained in the file across all\n\t// series.\n\tminTime, maxTime int64\n\n\t// tombstones contains only the tombstoned keys with subset of time values deleted.  An\n\t// entry would exist here if a subset of the points for a key were deleted and the file\n\t// had not be re-compacted to remove the points on disk.\n\ttombstones map[string][]TimeRange\n}\n\n// TimeRange holds a min and max timestamp.\ntype TimeRange struct {\n\tMin, Max int64\n}\n\n// NewIndirectIndex returns a new indirect index.\nfunc NewIndirectIndex() *indirectIndex {\n\treturn &indirectIndex{\n\t\ttombstones: make(map[string][]TimeRange),\n\t}\n}\n\n// search returns the index of i in offsets for where key is located.  If key is not\n// in the index, len(index) is returned.\nfunc (d *indirectIndex) search(key []byte) int {\n\t// We use a binary search across our indirect offsets (pointers to all the keys\n\t// in the index slice).\n\ti := sort.Search(len(d.offsets), func(i int) bool {\n\t\t// i is the position in offsets we are at so get offset it points to\n\t\toffset := d.offsets[i]\n\n\t\t// It's pointing to the start of the key which is a 2 byte length\n\t\tkeyLen := int32(binary.BigEndian.Uint16(d.b[offset : offset+2]))\n\n\t\t// See if it matches\n\t\treturn bytes.Compare(d.b[offset+2:offset+2+keyLen], key) >= 0\n\t})\n\n\t// See if we might have found the right index\n\tif i < len(d.offsets) {\n\t\tofs := d.offsets[i]\n\t\t_, k, err := readKey(d.b[ofs:])\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"error reading key: %v\", err))\n\t\t}\n\n\t\t// The search may have returned an i == 0 which could indicated that the value\n\t\t// searched should be inserted at postion 0.  Make sure the key in the index\n\t\t// matches the search value.\n\t\tif !bytes.Equal(key, k) {\n\t\t\treturn len(d.b)\n\t\t}\n\n\t\treturn int(ofs)\n\t}\n\n\t// The key is not in the index.  i is the index where it would be inserted so return\n\t// a value outside our offset range.\n\treturn len(d.b)\n}\n\n// Entries returns all index entries for a key.\nfunc (d *indirectIndex) Entries(key string) []IndexEntry {\n\td.mu.RLock()\n\tdefer d.mu.RUnlock()\n\n\tkb := []byte(key)\n\n\tofs := d.search(kb)\n\tif ofs < len(d.b) {\n\t\tn, k, err := readKey(d.b[ofs:])\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"error reading key: %v\", err))\n\t\t}\n\n\t\t// The search may have returned an i == 0 which could indicated that the value\n\t\t// searched should be inserted at position 0.  Make sure the key in the index\n\t\t// matches the search value.\n\t\tif !bytes.Equal(kb, k) {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Read and return all the entries\n\t\tofs += n\n\t\tvar entries indexEntries\n\t\tif _, err := readEntries(d.b[ofs:], &entries); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"error reading entries: %v\", err))\n\t\t}\n\t\treturn entries.entries\n\t}\n\n\t// The key is not in the index.  i is the index where it would be inserted.\n\treturn nil\n}\n\n// ReadEntries returns all index entries for a key.\nfunc (d *indirectIndex) ReadEntries(key string, entries *[]IndexEntry) {\n\t*entries = d.Entries(key)\n}\n\n// Entry returns the index entry for the specified key and timestamp.  If no entry\n// matches the key an timestamp, nil is returned.\nfunc (d *indirectIndex) Entry(key string, timestamp int64) *IndexEntry {\n\tentries := d.Entries(key)\n\tfor _, entry := range entries {\n\t\tif entry.Contains(timestamp) {\n\t\t\treturn &entry\n\t\t}\n\t}\n\treturn nil\n}\n\n// Key returns the key in the index at the given position.\nfunc (d *indirectIndex) Key(idx int) (string, byte, []IndexEntry) {\n\td.mu.RLock()\n\tdefer d.mu.RUnlock()\n\n\tif idx < 0 || idx >= len(d.offsets) {\n\t\treturn \"\", 0, nil\n\t}\n\tn, key, err := readKey(d.b[d.offsets[idx]:])\n\tif err != nil {\n\t\treturn \"\", 0, nil\n\t}\n\n\ttyp := d.b[int(d.offsets[idx])+n]\n\n\tvar entries indexEntries\n\tif _, err := readEntries(d.b[int(d.offsets[idx])+n:], &entries); err != nil {\n\t\treturn \"\", 0, nil\n\t}\n\treturn string(key), typ, entries.entries\n}\n\n// KeyAt returns the key in the index at the given position.\nfunc (d *indirectIndex) KeyAt(idx int) ([]byte, byte) {\n\td.mu.RLock()\n\n\tif idx < 0 || idx >= len(d.offsets) {\n\t\td.mu.RUnlock()\n\t\treturn nil, 0\n\t}\n\tn, key, _ := readKey(d.b[d.offsets[idx]:])\n\ttyp := d.b[d.offsets[idx]+int32(n)]\n\td.mu.RUnlock()\n\treturn key, typ\n}\n\n// KeyCount returns the count of unique keys in the index.\nfunc (d *indirectIndex) KeyCount() int {\n\td.mu.RLock()\n\tn := len(d.offsets)\n\td.mu.RUnlock()\n\treturn n\n}\n\n// Delete removes the given keys from the index.\nfunc (d *indirectIndex) Delete(keys []string) {\n\tif len(keys) == 0 {\n\t\treturn\n\t}\n\n\tif !sort.StringsAreSorted(keys) {\n\t\tsort.Strings(keys)\n\t}\n\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\n\t// Both keys and offsets are sorted.  Walk both in order and skip\n\t// any keys that exist in both.\n\toffsets := make([]int32, 0, len(d.offsets))\n\tfor _, offset := range d.offsets {\n\t\t_, indexKey, _ := readKey(d.b[offset:])\n\n\t\tfor len(keys) > 0 && keys[0] < string(indexKey) {\n\t\t\tkeys = keys[1:]\n\t\t}\n\n\t\tif len(keys) > 0 && keys[0] == string(indexKey) {\n\t\t\tkeys = keys[1:]\n\t\t\tcontinue\n\t\t}\n\n\t\toffsets = append(offsets, int32(offset))\n\t}\n\td.offsets = offsets\n}\n\n// DeleteRange removes the given keys with data between minTime and maxTime from the index.\nfunc (d *indirectIndex) DeleteRange(keys []string, minTime, maxTime int64) {\n\t// No keys, nothing to do\n\tif len(keys) == 0 {\n\t\treturn\n\t}\n\n\t// If we're deleting the max time range, just use tombstoning to remove the\n\t// key from the offsets slice\n\tif minTime == math.MinInt64 && maxTime == math.MaxInt64 {\n\t\td.Delete(keys)\n\t\treturn\n\t}\n\n\t// Is the range passed in outside of the time range for the file?\n\tmin, max := d.TimeRange()\n\tif minTime > max || maxTime < min {\n\t\treturn\n\t}\n\n\ttombstones := map[string][]TimeRange{}\n\tfor _, k := range keys {\n\t\t// Is the range passed in outside the time range for this key?\n\t\tentries := d.Entries(k)\n\n\t\t// If multiple tombstones are saved for the same key\n\t\tif len(entries) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tmin, max := entries[0].MinTime, entries[len(entries)-1].MaxTime\n\t\tif minTime > max || maxTime < min {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Is the range passed in cover every value for the key?\n\t\tif minTime <= min && maxTime >= max {\n\t\t\td.Delete(keys)\n\t\t\tcontinue\n\t\t}\n\n\t\ttombstones[k] = append(tombstones[k], TimeRange{minTime, maxTime})\n\t}\n\n\tif len(tombstones) == 0 {\n\t\treturn\n\t}\n\n\td.mu.Lock()\n\tfor k, v := range tombstones {\n\t\td.tombstones[k] = append(d.tombstones[k], v...)\n\t}\n\td.mu.Unlock()\n}\n\n// TombstoneRange returns ranges of time that are deleted for the given key.\nfunc (d *indirectIndex) TombstoneRange(key string) []TimeRange {\n\td.mu.RLock()\n\tr := d.tombstones[key]\n\td.mu.RUnlock()\n\treturn r\n}\n\n// Contains return true if the given key exists in the index.\nfunc (d *indirectIndex) Contains(key string) bool {\n\treturn len(d.Entries(key)) > 0\n}\n\n// ContainsValue returns true if key and time might exist in this file.\nfunc (d *indirectIndex) ContainsValue(key string, timestamp int64) bool {\n\tentry := d.Entry(key, timestamp)\n\tif entry == nil {\n\t\treturn false\n\t}\n\n\td.mu.RLock()\n\ttombstones := d.tombstones[key]\n\td.mu.RUnlock()\n\n\tfor _, t := range tombstones {\n\t\tif t.Min <= timestamp && t.Max >= timestamp {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// Type returns the block type of the values stored for the key.\nfunc (d *indirectIndex) Type(key string) (byte, error) {\n\td.mu.RLock()\n\tdefer d.mu.RUnlock()\n\n\tkb := []byte(key)\n\tofs := d.search(kb)\n\tif ofs < len(d.b) {\n\t\tn, _, err := readKey(d.b[ofs:])\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"error reading key: %v\", err))\n\t\t}\n\n\t\tofs += n\n\t\treturn d.b[ofs], nil\n\t}\n\treturn 0, fmt.Errorf(\"key does not exist: %v\", key)\n}\n\n// OverlapsTimeRange returns true if the time range of the file intersect min and max.\nfunc (d *indirectIndex) OverlapsTimeRange(min, max int64) bool {\n\treturn d.minTime <= max && d.maxTime >= min\n}\n\n// OverlapsKeyRange returns true if the min and max keys of the file overlap the arguments min and max.\nfunc (d *indirectIndex) OverlapsKeyRange(min, max string) bool {\n\treturn d.minKey <= max && d.maxKey >= min\n}\n\n// KeyRange returns the min and max keys in the index.\nfunc (d *indirectIndex) KeyRange() (string, string) {\n\treturn d.minKey, d.maxKey\n}\n\n// TimeRange returns the min and max time across all keys in the index.\nfunc (d *indirectIndex) TimeRange() (int64, int64) {\n\treturn d.minTime, d.maxTime\n}\n\n// MarshalBinary returns a byte slice encoded version of the index.\nfunc (d *indirectIndex) MarshalBinary() ([]byte, error) {\n\td.mu.RLock()\n\tdefer d.mu.RUnlock()\n\n\treturn d.b, nil\n}\n\n// UnmarshalBinary populates an index from an encoded byte slice\n// representation of an index.\nfunc (d *indirectIndex) UnmarshalBinary(b []byte) error {\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\n\t// Keep a reference to the actual index bytes\n\td.b = b\n\tif len(b) == 0 {\n\t\treturn nil\n\t}\n\n\t//var minKey, maxKey []byte\n\tvar minTime, maxTime int64 = math.MaxInt64, 0\n\n\t// To create our \"indirect\" index, we need to find the location of all the keys in\n\t// the raw byte slice.  The keys are listed once each (in sorted order).  Following\n\t// each key is a time ordered list of index entry blocks for that key.  The loop below\n\t// basically skips across the slice keeping track of the counter when we are at a key\n\t// field.\n\tvar i int32\n\tiMax := int32(len(b))\n\tfor i < iMax {\n\t\td.offsets = append(d.offsets, i)\n\n\t\t// Skip to the start of the values\n\t\t// key length value (2) + type (1) + length of key\n\t\tif i+2 >= iMax {\n\t\t\treturn fmt.Errorf(\"indirectIndex: not enough data for key length value\")\n\t\t}\n\t\ti += 3 + int32(binary.BigEndian.Uint16(b[i:i+2]))\n\n\t\t// count of index entries\n\t\tif i+indexCountSize >= iMax {\n\t\t\treturn fmt.Errorf(\"indirectIndex: not enough data for index entries count\")\n\t\t}\n\t\tcount := int32(binary.BigEndian.Uint16(b[i : i+indexCountSize]))\n\t\ti += indexCountSize\n\n\t\t// Find the min time for the block\n\t\tif i+8 >= iMax {\n\t\t\treturn fmt.Errorf(\"indirectIndex: not enough data for min time\")\n\t\t}\n\t\tminT := int64(binary.BigEndian.Uint64(b[i : i+8]))\n\t\tif minT < minTime {\n\t\t\tminTime = minT\n\t\t}\n\n\t\ti += (count - 1) * indexEntrySize\n\n\t\t// Find the max time for the block\n\t\tif i+16 >= iMax {\n\t\t\treturn fmt.Errorf(\"indirectIndex: not enough data for max time\")\n\t\t}\n\t\tmaxT := int64(binary.BigEndian.Uint64(b[i+8 : i+16]))\n\t\tif maxT > maxTime {\n\t\t\tmaxTime = maxT\n\t\t}\n\n\t\ti += indexEntrySize\n\t}\n\n\tfirstOfs := d.offsets[0]\n\t_, key, err := readKey(b[firstOfs:])\n\tif err != nil {\n\t\treturn err\n\t}\n\td.minKey = string(key)\n\n\tlastOfs := d.offsets[len(d.offsets)-1]\n\t_, key, err = readKey(b[lastOfs:])\n\tif err != nil {\n\t\treturn err\n\t}\n\td.maxKey = string(key)\n\n\td.minTime = minTime\n\td.maxTime = maxTime\n\n\treturn nil\n}\n\n// Size returns the size of the current index in bytes.\nfunc (d *indirectIndex) Size() uint32 {\n\td.mu.RLock()\n\tdefer d.mu.RUnlock()\n\n\treturn uint32(len(d.b))\n}\n\n// mmapAccess is mmap based block accessor.  It access blocks through an\n// MMAP file interface.\ntype mmapAccessor struct {\n\tmu sync.RWMutex\n\n\tf     *os.File\n\tb     []byte\n\tindex *indirectIndex\n}\n\nfunc (m *mmapAccessor) init() (*indirectIndex, error) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tif err := verifyVersion(m.f); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar err error\n\n\tif _, err := m.f.Seek(0, 0); err != nil {\n\t\treturn nil, err\n\t}\n\n\tstat, err := m.f.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm.b, err = mmap(m.f, 0, int(stat.Size()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(m.b) < 8 {\n\t\treturn nil, fmt.Errorf(\"mmapAccessor: byte slice too small for indirectIndex\")\n\t}\n\n\tindexOfsPos := len(m.b) - 8\n\tindexStart := binary.BigEndian.Uint64(m.b[indexOfsPos : indexOfsPos+8])\n\tif indexStart >= uint64(indexOfsPos) {\n\t\treturn nil, fmt.Errorf(\"mmapAccessor: invalid indexStart\")\n\t}\n\n\tm.index = NewIndirectIndex()\n\tif err := m.index.UnmarshalBinary(m.b[indexStart:indexOfsPos]); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn m.index, nil\n}\n\nfunc (m *mmapAccessor) rename(path string) error {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\terr := munmap(m.b)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := m.f.Close(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := renameFile(m.f.Name(), path); err != nil {\n\t\treturn err\n\t}\n\n\tm.f, err = os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := m.f.Seek(0, 0); err != nil {\n\t\treturn err\n\t}\n\n\tstat, err := m.f.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.b, err = mmap(m.f, 0, int(stat.Size()))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (m *mmapAccessor) read(key string, timestamp int64) ([]Value, error) {\n\tentry := m.index.Entry(key, timestamp)\n\tif entry == nil {\n\t\treturn nil, nil\n\t}\n\n\treturn m.readBlock(entry, nil)\n}\n\nfunc (m *mmapAccessor) readBlock(entry *IndexEntry, values []Value) ([]Value, error) {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\n\tif int64(len(m.b)) < entry.Offset+int64(entry.Size) {\n\t\treturn nil, ErrTSMClosed\n\t}\n\t//TODO: Validate checksum\n\tvar err error\n\tvalues, err = DecodeBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn values, nil\n}\n\nfunc (m *mmapAccessor) readFloatBlock(entry *IndexEntry, values *[]FloatValue) ([]FloatValue, error) {\n\tm.mu.RLock()\n\n\tif int64(len(m.b)) < entry.Offset+int64(entry.Size) {\n\t\tm.mu.RUnlock()\n\t\treturn nil, ErrTSMClosed\n\t}\n\n\ta, err := DecodeFloatBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values)\n\tm.mu.RUnlock()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn a, nil\n}\n\nfunc (m *mmapAccessor) readIntegerBlock(entry *IndexEntry, values *[]IntegerValue) ([]IntegerValue, error) {\n\tm.mu.RLock()\n\n\tif int64(len(m.b)) < entry.Offset+int64(entry.Size) {\n\t\tm.mu.RUnlock()\n\t\treturn nil, ErrTSMClosed\n\t}\n\n\ta, err := DecodeIntegerBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values)\n\tm.mu.RUnlock()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn a, nil\n}\n\nfunc (m *mmapAccessor) readStringBlock(entry *IndexEntry, values *[]StringValue) ([]StringValue, error) {\n\tm.mu.RLock()\n\n\tif int64(len(m.b)) < entry.Offset+int64(entry.Size) {\n\t\tm.mu.RUnlock()\n\t\treturn nil, ErrTSMClosed\n\t}\n\n\ta, err := DecodeStringBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values)\n\tm.mu.RUnlock()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn a, nil\n}\n\nfunc (m *mmapAccessor) readBooleanBlock(entry *IndexEntry, values *[]BooleanValue) ([]BooleanValue, error) {\n\tm.mu.RLock()\n\n\tif int64(len(m.b)) < entry.Offset+int64(entry.Size) {\n\t\tm.mu.RUnlock()\n\t\treturn nil, ErrTSMClosed\n\t}\n\n\ta, err := DecodeBooleanBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values)\n\tm.mu.RUnlock()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn a, nil\n}\n\nfunc (m *mmapAccessor) readBytes(entry *IndexEntry, b []byte) (uint32, []byte, error) {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\n\tif int64(len(m.b)) < entry.Offset+int64(entry.Size) {\n\t\treturn 0, nil, ErrTSMClosed\n\t}\n\n\t// return the bytes after the 4 byte checksum\n\treturn binary.BigEndian.Uint32(m.b[entry.Offset : entry.Offset+4]), m.b[entry.Offset+4 : entry.Offset+int64(entry.Size)], nil\n}\n\n// readAll returns all values for a key in all blocks.\nfunc (m *mmapAccessor) readAll(key string) ([]Value, error) {\n\tblocks := m.index.Entries(key)\n\tif len(blocks) == 0 {\n\t\treturn nil, nil\n\t}\n\n\ttombstones := m.index.TombstoneRange(key)\n\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\n\tvar temp []Value\n\tvar err error\n\tvar values []Value\n\tfor _, block := range blocks {\n\t\tvar skip bool\n\t\tfor _, t := range tombstones {\n\t\t\t// Should we skip this block because it contains points that have been deleted\n\t\t\tif t.Min <= block.MinTime && t.Max >= block.MaxTime {\n\t\t\t\tskip = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif skip {\n\t\t\tcontinue\n\t\t}\n\t\t//TODO: Validate checksum\n\t\ttemp = temp[:0]\n\t\t// The +4 is the 4 byte checksum length\n\t\ttemp, err = DecodeBlock(m.b[block.Offset+4:block.Offset+int64(block.Size)], temp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Filter out any values that were deleted\n\t\tfor _, t := range tombstones {\n\t\t\ttemp = Values(temp).Exclude(t.Min, t.Max)\n\t\t}\n\n\t\tvalues = append(values, temp...)\n\t}\n\n\treturn values, nil\n}\n\nfunc (m *mmapAccessor) path() string {\n\tm.mu.RLock()\n\tpath := m.f.Name()\n\tm.mu.RUnlock()\n\treturn path\n}\n\nfunc (m *mmapAccessor) close() error {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tif m.b == nil {\n\t\treturn nil\n\t}\n\n\terr := munmap(m.b)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.b = nil\n\treturn m.f.Close()\n}\n\ntype indexEntries struct {\n\tType    byte\n\tentries []IndexEntry\n}\n\nfunc (a *indexEntries) Len() int      { return len(a.entries) }\nfunc (a *indexEntries) Swap(i, j int) { a.entries[i], a.entries[j] = a.entries[j], a.entries[i] }\nfunc (a *indexEntries) Less(i, j int) bool {\n\treturn a.entries[i].MinTime < a.entries[j].MinTime\n}\n\nfunc (a *indexEntries) MarshalBinary() ([]byte, error) {\n\tbuf := make([]byte, len(a.entries)*indexEntrySize)\n\n\tfor i, entry := range a.entries {\n\t\tentry.AppendTo(buf[indexEntrySize*i:])\n\t}\n\n\treturn buf, nil\n}\n\nfunc (a *indexEntries) WriteTo(w io.Writer) (total int64, err error) {\n\tvar buf [indexEntrySize]byte\n\tvar n int\n\n\tfor _, entry := range a.entries {\n\t\tentry.AppendTo(buf[:])\n\t\tn, err = w.Write(buf[:])\n\t\ttotal += int64(n)\n\t\tif err != nil {\n\t\t\treturn total, err\n\t\t}\n\t}\n\n\treturn total, nil\n}\n\nfunc readKey(b []byte) (n int, key []byte, err error) {\n\t// 2 byte size of key\n\tn, size := 2, int(binary.BigEndian.Uint16(b[:2]))\n\n\t// N byte key\n\tkey = b[n : n+size]\n\n\tn += len(key)\n\treturn\n}\n\nfunc readEntries(b []byte, entries *indexEntries) (n int, err error) {\n\tif len(b) < 1+indexCountSize {\n\t\treturn 0, fmt.Errorf(\"readEntries: data too short for headers\")\n\t}\n\n\t// 1 byte block type\n\tentries.Type = b[n]\n\tn++\n\n\t// 2 byte count of index entries\n\tcount := int(binary.BigEndian.Uint16(b[n : n+indexCountSize]))\n\tn += indexCountSize\n\n\tentries.entries = make([]IndexEntry, count)\n\tfor i := 0; i < count; i++ {\n\t\tvar ie IndexEntry\n\t\tstart := i*indexEntrySize + indexCountSize + indexTypeSize\n\t\tend := start + indexEntrySize\n\t\tif end > len(b) {\n\t\t\treturn 0, fmt.Errorf(\"readEntries: data too short for indexEntry %d\", i)\n\t\t}\n\t\tif err := ie.UnmarshalBinary(b[start:end]); err != nil {\n\t\t\treturn 0, fmt.Errorf(\"readEntries: unmarshal error: %v\", err)\n\t\t}\n\t\tentries.entries[i] = ie\n\t\tn += indexEntrySize\n\t}\n\treturn\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/reader_test.go",
    "content": "package tsm1_test\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"math\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/tsdb/engine/tsm1\"\n)\n\nfunc TestTSMReader_Type(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvalues := []tsm1.Value{tsm1.NewValue(0, int64(1))}\n\tif err := w.Write(\"cpu\", values); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\n\t}\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tf, err = os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error opening: %v\", err)\n\t}\n\tr, err := tsm1.NewTSMReader(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\n\ttyp, err := r.Type(\"cpu\")\n\tif err != nil {\n\t\tfatal(t, \"reading type\", err)\n\t}\n\n\tif got, exp := typ, tsm1.BlockInteger; got != exp {\n\t\tt.Fatalf(\"type mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTSMReader_MMAP_ReadAll(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tdefer f.Close()\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tvalues []tsm1.Value\n\t}{\n\t\t{\"float\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, 1.0)},\n\t\t},\n\t\t{\"int\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, int64(1))},\n\t\t},\n\t\t{\"bool\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, true)},\n\t\t},\n\t\t{\"string\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, \"foo\")},\n\t\t},\n\t}\n\n\tfor _, d := range data {\n\t\tif err := w.Write(d.key, d.values); err != nil {\n\t\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t\t}\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error writing index: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tf, err = os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tvar count int\n\tfor _, d := range data {\n\t\treadValues, err := r.ReadAll(d.key)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error readin: %v\", err)\n\t\t}\n\n\t\tif exp := len(d.values); exp != len(readValues) {\n\t\t\tt.Fatalf(\"read values length mismatch: got %v, exp %v\", len(readValues), exp)\n\t\t}\n\n\t\tfor i, v := range d.values {\n\t\t\tif v.Value() != readValues[i].Value() {\n\t\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %d\", i, readValues[i].Value(), v.Value())\n\t\t\t}\n\t\t}\n\t\tcount++\n\t}\n\n\tif got, exp := count, len(data); got != exp {\n\t\tt.Fatalf(\"read values count mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTSMReader_MMAP_Read(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tdefer f.Close()\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tvalues []tsm1.Value\n\t}{\n\t\t{\"float\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, 1.0)},\n\t\t},\n\t\t{\"int\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, int64(1))},\n\t\t},\n\t\t{\"bool\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, true)},\n\t\t},\n\t\t{\"string\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, \"foo\")},\n\t\t},\n\t}\n\tfor _, d := range data {\n\t\tif err := w.Write(d.key, d.values); err != nil {\n\t\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t\t}\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error writing index: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tf, err = os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tvar count int\n\tfor _, d := range data {\n\t\treadValues, err := r.Read(d.key, d.values[0].UnixNano())\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error readin: %v\", err)\n\t\t}\n\n\t\tif exp := len(d.values); exp != len(readValues) {\n\t\t\tt.Fatalf(\"read values length mismatch: got %v, exp %v\", len(readValues), exp)\n\t\t}\n\n\t\tfor i, v := range d.values {\n\t\t\tif v.Value() != readValues[i].Value() {\n\t\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %d\", i, readValues[i].Value(), v.Value())\n\t\t\t}\n\t\t}\n\t\tcount++\n\t}\n\n\tif got, exp := count, len(data); got != exp {\n\t\tt.Fatalf(\"read values count mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTSMReader_MMAP_Keys(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tdefer f.Close()\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tvalues []tsm1.Value\n\t}{\n\t\t{\"float\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, 1.0)},\n\t\t},\n\t\t{\"int\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, int64(1))},\n\t\t},\n\t\t{\"bool\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, true)},\n\t\t},\n\t\t{\"string\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, \"foo\")},\n\t\t},\n\t}\n\n\tfor _, d := range data {\n\t\tif err := w.Write(d.key, d.values); err != nil {\n\t\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t\t}\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error writing index: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tf, err = os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tvar count int\n\tfor _, d := range data {\n\t\treadValues, err := r.Read(d.key, d.values[0].UnixNano())\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error readin: %v\", err)\n\t\t}\n\n\t\tif exp := len(d.values); exp != len(readValues) {\n\t\t\tt.Fatalf(\"read values length mismatch: got %v, exp %v\", len(readValues), exp)\n\t\t}\n\n\t\tfor i, v := range d.values {\n\t\t\tif v.Value() != readValues[i].Value() {\n\t\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %d\", i, readValues[i].Value(), v.Value())\n\t\t\t}\n\t\t}\n\t\tcount++\n\t}\n\n\tif got, exp := count, len(data); got != exp {\n\t\tt.Fatalf(\"read values count mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTSMReader_MMAP_Tombstone(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tdefer f.Close()\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvalues := []tsm1.Value{tsm1.NewValue(0, 1.0)}\n\tif err := w.Write(\"cpu\", values); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t}\n\n\tif err := w.Write(\"mem\", values); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error writing index: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tf, err = os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\n\tif err := r.Delete([]string{\"mem\"}); err != nil {\n\t\tt.Fatalf(\"unexpected error deleting: %v\", err)\n\t}\n\n\tr, err = tsm1.NewTSMReader(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tif got, exp := r.KeyCount(), 1; got != exp {\n\t\tt.Fatalf(\"key length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTSMReader_MMAP_TombstoneRange(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tdefer f.Close()\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\texpValues := []tsm1.Value{\n\t\ttsm1.NewValue(1, 1.0),\n\t\ttsm1.NewValue(2, 2.0),\n\t\ttsm1.NewValue(3, 3.0),\n\t}\n\tif err := w.Write(\"cpu\", expValues); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error writing index: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tf, err = os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\n\tif err := r.DeleteRange([]string{\"cpu\"}, 2, math.MaxInt64); err != nil {\n\t\tt.Fatalf(\"unexpected error deleting: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tif got, exp := r.ContainsValue(\"cpu\", 1), true; got != exp {\n\t\tt.Fatalf(\"ContainsValue mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := r.ContainsValue(\"cpu\", 3), false; got != exp {\n\t\tt.Fatalf(\"ContainsValue mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tvalues, err := r.ReadAll(\"cpu\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading all: %v\", err)\n\t}\n\n\tif got, exp := len(values), 1; got != exp {\n\t\tt.Fatalf(\"values length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := values[0].String(), expValues[0].String(); got != exp {\n\t\tt.Fatalf(\"value mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTSMReader_MMAP_TombstoneOutsideTimeRange(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tdefer f.Close()\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\texpValues := []tsm1.Value{\n\t\ttsm1.NewValue(1, 1.0),\n\t\ttsm1.NewValue(2, 2.0),\n\t\ttsm1.NewValue(3, 3.0),\n\t}\n\tif err := w.Write(\"cpu\", expValues); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error writing index: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tf, err = os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\n\tif err := r.DeleteRange([]string{\"cpu\"}, 0, 0); err != nil {\n\t\tt.Fatalf(\"unexpected error deleting: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tif got, exp := r.ContainsValue(\"cpu\", 1), true; got != exp {\n\t\tt.Fatalf(\"ContainsValue mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := r.ContainsValue(\"cpu\", 2), true; got != exp {\n\t\tt.Fatalf(\"ContainsValue mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := r.ContainsValue(\"cpu\", 3), true; got != exp {\n\t\tt.Fatalf(\"ContainsValue mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := r.HasTombstones(), false; got != exp {\n\t\tt.Fatalf(\"HasTombstones mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := len(r.TombstoneFiles()), 0; got != exp {\n\t\tt.Fatalf(\"TombstoneFiles len mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTSMReader_MMAP_TombstoneOutsideKeyRange(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tdefer f.Close()\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\texpValues := []tsm1.Value{\n\t\ttsm1.NewValue(1, 1.0),\n\t\ttsm1.NewValue(2, 2.0),\n\t\ttsm1.NewValue(3, 3.0),\n\t}\n\tif err := w.Write(\"cpu\", expValues); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error writing index: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tf, err = os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\n\tif err := r.DeleteRange([]string{\"mem\"}, 0, 3); err != nil {\n\t\tt.Fatalf(\"unexpected error deleting: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tif got, exp := r.ContainsValue(\"cpu\", 1), true; got != exp {\n\t\tt.Fatalf(\"ContainsValue mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := r.ContainsValue(\"cpu\", 2), true; got != exp {\n\t\tt.Fatalf(\"ContainsValue mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := r.ContainsValue(\"cpu\", 3), true; got != exp {\n\t\tt.Fatalf(\"ContainsValue mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := r.HasTombstones(), false; got != exp {\n\t\tt.Fatalf(\"HasTombstones mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := len(r.TombstoneFiles()), 0; got != exp {\n\t\tt.Fatalf(\"TombstoneFiles len mismatch: got %v, exp %v\", got, exp)\n\n\t}\n}\n\nfunc TestTSMReader_MMAP_TombstoneOverlapKeyRange(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tdefer f.Close()\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\texpValues := []tsm1.Value{\n\t\ttsm1.NewValue(1, 1.0),\n\t\ttsm1.NewValue(2, 2.0),\n\t\ttsm1.NewValue(3, 3.0),\n\t}\n\tif err := w.Write(\"cpu,app=foo,host=server-0#!~#value\", expValues); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t}\n\n\tif err := w.Write(\"cpu,app=foo,host=server-73379#!~#value\", expValues); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error writing index: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tf, err = os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\n\tif err := r.DeleteRange([]string{\n\t\t\"cpu,app=foo,host=server-0#!~#value\",\n\t\t\"cpu,app=foo,host=server-73379#!~#value\",\n\t\t\"cpu,app=foo,host=server-99999#!~#value\"},\n\t\tmath.MinInt64, math.MaxInt64); err != nil {\n\t\tt.Fatalf(\"unexpected error deleting: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tif got, exp := r.Contains(\"cpu,app=foo,host=server-0#!~#value\"), false; got != exp {\n\t\tt.Fatalf(\"ContainsValue mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := r.Contains(\"cpu,app=foo,host=server-73379#!~#value\"), false; got != exp {\n\t\tt.Fatalf(\"ContainsValue mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := r.HasTombstones(), true; got != exp {\n\t\tt.Fatalf(\"HasTombstones mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := len(r.TombstoneFiles()), 1; got != exp {\n\t\tt.Fatalf(\"TombstoneFiles len mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTSMReader_MMAP_TombstoneFullRange(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tdefer f.Close()\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\texpValues := []tsm1.Value{\n\t\ttsm1.NewValue(1, 1.0),\n\t\ttsm1.NewValue(2, 2.0),\n\t\ttsm1.NewValue(3, 3.0),\n\t}\n\tif err := w.Write(\"cpu\", expValues); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error writing index: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tf, err = os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\n\tif err := r.DeleteRange([]string{\"cpu\"}, math.MinInt64, math.MaxInt64); err != nil {\n\t\tt.Fatalf(\"unexpected error deleting: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tvalues, err := r.ReadAll(\"cpu\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading all: %v\", err)\n\t}\n\n\tif got, exp := len(values), 0; got != exp {\n\t\tt.Fatalf(\"values length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTSMReader_MMAP_TombstoneMultipleRanges(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tdefer f.Close()\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\texpValues := []tsm1.Value{\n\t\ttsm1.NewValue(1, 1.0),\n\t\ttsm1.NewValue(2, 2.0),\n\t\ttsm1.NewValue(3, 3.0),\n\t\ttsm1.NewValue(4, 4.0),\n\t\ttsm1.NewValue(5, 5.0),\n\t}\n\tif err := w.Write(\"cpu\", expValues); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error writing index: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tf, err = os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tif err := r.DeleteRange([]string{\"cpu\"}, 2, 2); err != nil {\n\t\tt.Fatalf(\"unexpected error deleting: %v\", err)\n\t}\n\n\tif err := r.DeleteRange([]string{\"cpu\"}, 4, 4); err != nil {\n\t\tt.Fatalf(\"unexpected error deleting: %v\", err)\n\t}\n\n\tvalues, err := r.ReadAll(\"cpu\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading all: %v\", err)\n\t}\n\n\tif got, exp := len(values), 3; got != exp {\n\t\tt.Fatalf(\"values length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTSMReader_MMAP_TombstoneOutsideRange(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tdefer f.Close()\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tcpuValues := []tsm1.Value{\n\t\ttsm1.NewValue(1, 1.0),\n\t\ttsm1.NewValue(2, 2.0),\n\t\ttsm1.NewValue(3, 3.0),\n\t}\n\tif err := w.Write(\"cpu\", cpuValues); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t}\n\n\tmemValues := []tsm1.Value{\n\t\ttsm1.NewValue(1, 1.0),\n\t\ttsm1.NewValue(2, 2.0),\n\t\ttsm1.NewValue(30, 3.0),\n\t}\n\tif err := w.Write(\"mem\", memValues); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error writing index: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tf, err = os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\n\tif err := r.DeleteRange([]string{\"cpu\", \"mem\"}, 5, math.MaxInt64); err != nil {\n\t\tt.Fatalf(\"unexpected error deleting: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tif got, exp := r.KeyCount(), 2; got != exp {\n\t\tt.Fatalf(\"key count mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := len(r.TombstoneRange(\"cpu\")), 0; got != exp {\n\t\tt.Fatalf(\"tombstone range mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tvalues, err := r.ReadAll(\"cpu\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading all: %v\", err)\n\t}\n\n\tif got, exp := len(values), len(cpuValues); got != exp {\n\t\tt.Fatalf(\"values length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := len(r.TombstoneRange(\"mem\")), 1; got != exp {\n\t\tt.Fatalf(\"tombstone range mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tvalues, err = r.ReadAll(\"mem\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading all: %v\", err)\n\t}\n\n\tif got, exp := len(values), len(memValues[:2]); got != exp {\n\t\tt.Fatalf(\"values length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n}\n\nfunc TestTSMReader_MMAP_Stats(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tdefer f.Close()\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvalues1 := []tsm1.Value{tsm1.NewValue(0, 1.0)}\n\tif err := w.Write(\"cpu\", values1); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t}\n\n\tvalues2 := []tsm1.Value{tsm1.NewValue(1, 1.0)}\n\tif err := w.Write(\"mem\", values2); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error writing index: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tf, err = os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tstats := r.Stats()\n\tif got, exp := stats.MinKey, \"cpu\"; got != exp {\n\t\tt.Fatalf(\"min key mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := stats.MaxKey, \"mem\"; got != exp {\n\t\tt.Fatalf(\"max key mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := stats.MinTime, values1[0].UnixNano(); got != exp {\n\t\tt.Fatalf(\"min time mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := stats.MaxTime, values2[0].UnixNano(); got != exp {\n\t\tt.Fatalf(\"max time mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := r.KeyCount(), 2; got != exp {\n\t\tt.Fatalf(\"key length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\n// Ensure that we return an error if we try to open a non-tsm file\nfunc TestTSMReader_VerifiesFileType(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tdefer f.Close()\n\n\t// write some garbage\n\tf.Write([]byte{0x23, 0xac, 0x99, 0x22, 0x77, 0x23, 0xac, 0x99, 0x22, 0x77, 0x23, 0xac, 0x99, 0x22, 0x77, 0x23, 0xac, 0x99, 0x22, 0x77})\n\n\t_, err := tsm1.NewTSMReader(f)\n\tif err == nil {\n\t\tt.Fatal(\"expected error trying to open non-tsm file\")\n\t}\n}\n\nfunc TestIndirectIndex_Entries(t *testing.T) {\n\tindex := tsm1.NewIndexWriter()\n\tindex.Add(\"cpu\", tsm1.BlockFloat64, 0, 1, 10, 100)\n\tindex.Add(\"cpu\", tsm1.BlockFloat64, 2, 3, 20, 200)\n\tindex.Add(\"mem\", tsm1.BlockFloat64, 0, 1, 10, 100)\n\n\tb, err := index.MarshalBinary()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error marshaling index: %v\", err)\n\t}\n\n\tindirect := tsm1.NewIndirectIndex()\n\tif err := indirect.UnmarshalBinary(b); err != nil {\n\t\tt.Fatalf(\"unexpected error unmarshaling index: %v\", err)\n\t}\n\n\texp := index.Entries(\"cpu\")\n\tentries := indirect.Entries(\"cpu\")\n\n\tif got, exp := len(entries), len(exp); got != exp {\n\t\tt.Fatalf(\"entries length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tfor i, exp := range exp {\n\t\tgot := entries[i]\n\t\tif exp.MinTime != got.MinTime {\n\t\t\tt.Fatalf(\"minTime mismatch: got %v, exp %v\", got.MinTime, exp.MinTime)\n\t\t}\n\n\t\tif exp.MaxTime != got.MaxTime {\n\t\t\tt.Fatalf(\"minTime mismatch: got %v, exp %v\", got.MaxTime, exp.MaxTime)\n\t\t}\n\n\t\tif exp.Size != got.Size {\n\t\t\tt.Fatalf(\"size mismatch: got %v, exp %v\", got.Size, exp.Size)\n\t\t}\n\t\tif exp.Offset != got.Offset {\n\t\t\tt.Fatalf(\"size mismatch: got %v, exp %v\", got.Offset, exp.Offset)\n\t\t}\n\t}\n}\n\nfunc TestIndirectIndex_Entries_NonExistent(t *testing.T) {\n\tindex := tsm1.NewIndexWriter()\n\tindex.Add(\"cpu\", tsm1.BlockFloat64, 0, 1, 10, 100)\n\tindex.Add(\"cpu\", tsm1.BlockFloat64, 2, 3, 20, 200)\n\n\tb, err := index.MarshalBinary()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error marshaling index: %v\", err)\n\t}\n\n\tindirect := tsm1.NewIndirectIndex()\n\tif err := indirect.UnmarshalBinary(b); err != nil {\n\t\tt.Fatalf(\"unexpected error unmarshaling index: %v\", err)\n\t}\n\n\t// mem has not been added to the index so we should get no entries back\n\t// for both\n\texp := index.Entries(\"mem\")\n\tentries := indirect.Entries(\"mem\")\n\n\tif got, exp := len(entries), len(exp); got != exp && exp != 0 {\n\t\tt.Fatalf(\"entries length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestIndirectIndex_MaxBlocks(t *testing.T) {\n\tindex := tsm1.NewIndexWriter()\n\tfor i := 0; i < 1<<16; i++ {\n\t\tindex.Add(\"cpu\", tsm1.BlockFloat64, 0, 1, 10, 20)\n\t}\n\n\tif _, err := index.MarshalBinary(); err == nil {\n\t\tt.Fatalf(\"expected max block count error. got nil\")\n\t} else {\n\t\tprintln(err.Error())\n\t}\n}\n\nfunc TestIndirectIndex_Type(t *testing.T) {\n\tindex := tsm1.NewIndexWriter()\n\tindex.Add(\"cpu\", tsm1.BlockInteger, 0, 1, 10, 20)\n\n\tb, err := index.MarshalBinary()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tind := tsm1.NewIndirectIndex()\n\tif err := ind.UnmarshalBinary(b); err != nil {\n\t\tfatal(t, \"unmarshal binary\", err)\n\t}\n\n\ttyp, err := ind.Type(\"cpu\")\n\tif err != nil {\n\t\tfatal(t, \"reading type\", err)\n\t}\n\n\tif got, exp := typ, tsm1.BlockInteger; got != exp {\n\t\tt.Fatalf(\"type mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestIndirectIndex_Keys(t *testing.T) {\n\tindex := tsm1.NewIndexWriter()\n\tindex.Add(\"cpu\", tsm1.BlockFloat64, 0, 1, 10, 20)\n\tindex.Add(\"mem\", tsm1.BlockFloat64, 0, 1, 10, 20)\n\tindex.Add(\"cpu\", tsm1.BlockFloat64, 1, 2, 20, 30)\n\n\tkeys := index.Keys()\n\n\t// 2 distinct keys\n\tif got, exp := len(keys), 2; got != exp {\n\t\tt.Fatalf(\"length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\t// Keys should be sorted\n\tif got, exp := keys[0], \"cpu\"; got != exp {\n\t\tt.Fatalf(\"key mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := keys[1], \"mem\"; got != exp {\n\t\tt.Fatalf(\"key mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestBlockIterator_Single(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvalues := []tsm1.Value{tsm1.NewValue(0, int64(1))}\n\tif err := w.Write(\"cpu\", values); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\n\t}\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tfd, err := os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error opening: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(fd)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\n\tvar count int\n\titer := r.BlockIterator()\n\tfor iter.Next() {\n\t\tkey, minTime, maxTime, typ, _, buf, err := iter.Read()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error creating iterator: %v\", err)\n\t\t}\n\n\t\tif got, exp := key, \"cpu\"; got != exp {\n\t\t\tt.Fatalf(\"key mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tif got, exp := minTime, int64(0); got != exp {\n\t\t\tt.Fatalf(\"min time mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tif got, exp := maxTime, int64(0); got != exp {\n\t\t\tt.Fatalf(\"max time mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tif got, exp := typ, tsm1.BlockInteger; got != exp {\n\t\t\tt.Fatalf(\"block type mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tif len(buf) == 0 {\n\t\t\tt.Fatalf(\"buf length = 0\")\n\t\t}\n\n\t\tcount++\n\t}\n\n\tif got, exp := count, len(values); got != exp {\n\t\tt.Fatalf(\"value count mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestBlockIterator_MultipleBlocks(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvalues1 := []tsm1.Value{tsm1.NewValue(0, int64(1))}\n\tif err := w.Write(\"cpu\", values1); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t}\n\n\tvalues2 := []tsm1.Value{tsm1.NewValue(1, int64(2))}\n\tif err := w.Write(\"cpu\", values2); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tfd, err := os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error opening: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(fd)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\n\tvar count int\n\texpData := []tsm1.Values{values1, values2}\n\titer := r.BlockIterator()\n\tvar i int\n\tfor iter.Next() {\n\t\tkey, minTime, maxTime, typ, _, buf, err := iter.Read()\n\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error creating iterator: %v\", err)\n\t\t}\n\n\t\tif got, exp := key, \"cpu\"; got != exp {\n\t\t\tt.Fatalf(\"key mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tif got, exp := minTime, expData[i][0].UnixNano(); got != exp {\n\t\t\tt.Fatalf(\"min time mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tif got, exp := maxTime, expData[i][0].UnixNano(); got != exp {\n\t\t\tt.Fatalf(\"max time mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tif got, exp := typ, tsm1.BlockInteger; got != exp {\n\t\t\tt.Fatalf(\"block type mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tif len(buf) == 0 {\n\t\t\tt.Fatalf(\"buf length = 0\")\n\t\t}\n\n\t\tcount++\n\t\ti++\n\t}\n\n\tif got, exp := count, 2; got != exp {\n\t\tt.Fatalf(\"value count mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestBlockIterator_Sorted(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvalues := map[string][]tsm1.Value{\n\t\t\"mem\":  []tsm1.Value{tsm1.NewValue(0, int64(1))},\n\t\t\"cpu\":  []tsm1.Value{tsm1.NewValue(1, float64(2))},\n\t\t\"disk\": []tsm1.Value{tsm1.NewValue(1, true)},\n\t\t\"load\": []tsm1.Value{tsm1.NewValue(1, \"string\")},\n\t}\n\n\tfor k, v := range values {\n\t\tif err := w.Write(k, v); err != nil {\n\t\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\n\t\t}\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tfd, err := os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error opening: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(fd)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\n\tvar count int\n\titer := r.BlockIterator()\n\tvar lastKey string\n\tfor iter.Next() {\n\t\tkey, _, _, _, _, buf, err := iter.Read()\n\n\t\tif key < lastKey {\n\t\t\tt.Fatalf(\"keys not sorted: got %v, last %v\", key, lastKey)\n\t\t}\n\n\t\tlastKey = key\n\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error creating iterator: %v\", err)\n\t\t}\n\n\t\tif len(buf) == 0 {\n\t\t\tt.Fatalf(\"buf length = 0\")\n\t\t}\n\n\t\tcount++\n\t}\n\n\tif got, exp := count, len(values); got != exp {\n\t\tt.Fatalf(\"value count mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestIndirectIndex_UnmarshalBinary_BlockCountOverflow(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tdefer f.Close()\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tfor i := 0; i < 3280; i++ {\n\t\tw.Write(\"cpu\", []tsm1.Value{tsm1.NewValue(int64(i), float64(i))})\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tf, err = os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\tdefer r.Close()\n}\n\nfunc TestCompacted_NotFull(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvalues := []tsm1.Value{tsm1.NewValue(0, 1.0)}\n\tif err := w.Write(\"cpu\", values); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\n\t}\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error writing index: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tfd, err := os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(fd)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\n\titer := r.BlockIterator()\n\tif !iter.Next() {\n\t\tt.Fatalf(\"expected next, got false\")\n\t}\n\n\t_, _, _, _, _, block, err := iter.Read()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading block: %v\", err)\n\t}\n\n\tif got, exp := tsm1.BlockCount(block), 1; got != exp {\n\t\tt.Fatalf(\"block count mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTSMReader_File_ReadAll(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tdefer f.Close()\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tvalues []tsm1.Value\n\t}{\n\t\t{\"float\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, 1.0)},\n\t\t},\n\t\t{\"int\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, int64(1))},\n\t\t},\n\t\t{\"bool\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, true)},\n\t\t},\n\t\t{\"string\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, \"foo\")},\n\t\t},\n\t}\n\n\tfor _, d := range data {\n\t\tif err := w.Write(d.key, d.values); err != nil {\n\t\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t\t}\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error writing index: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tf, err = os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tvar count int\n\tfor _, d := range data {\n\t\treadValues, err := r.ReadAll(d.key)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error reading: %v\", err)\n\t\t}\n\n\t\tif exp := len(d.values); exp != len(readValues) {\n\t\t\tt.Fatalf(\"read values length mismatch: exp %v, got %v\", exp, len(readValues))\n\t\t}\n\n\t\tfor i, v := range d.values {\n\t\t\tif exp, got := v.Value(), readValues[i].Value(); exp != got {\n\t\t\t\tt.Fatalf(\"read value mismatch(%d): exp %v, got %d\", i, v.Value(), readValues[i].Value())\n\t\t\t}\n\t\t}\n\t\tcount++\n\t}\n\n\tif exp, got := len(data), count; exp != got {\n\t\tt.Fatalf(\"read values count mismatch: exp %v, got %v\", exp, got)\n\t}\n}\n\nfunc TestTSMReader_FuzzCrashes(t *testing.T) {\n\tcases := []string{\n\t\t\"\",\n\t\t\"\\x16\\xd1\\x16\\xd1\\x01\\x10\\x14X\\xfb\\x03\\xac~\\x80\\xf0\\x00\\x00\\x00I^K\" +\n\t\t\t\"_\\xf0\\x00\\x00\\x00D424259389w\\xf0\\x00\\x00\\x00\" +\n\t\t\t\"o\\x93\\bO\\x10?\\xf0\\x00\\x00\\x00\\x00\\b\\x00\\xc2_\\xff\\xd8\\x0fX^\" +\n\t\t\t\"/\\xbf\\xe8\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\bctr#!~#n\\x00\" +\n\t\t\t\"\\x00\\x01\\x14X\\xfb\\xb0\\x03\\xac~\\x80\\x14X\\xfb\\xb1\\x00\\xd4ܥ\\x00\\x00\" +\n\t\t\t\"\\x00\\x00\\x00\\x00\\x00\\x05\\x00\\x00\\x00@\\x00\\x00\\x00\\x00\\x00\\x00\\x00E\",\n\t\t\"\\x16\\xd1\\x16\\xd1\\x01\\x80'Z\\\\\\x00\\v)\\x00\\x00\\x00\\x00;\\x9a\\xca\\x00\" +\n\t\t\t\"\\x01\\x05\\x10?\\xf0\\x00\\x00\\x00\\x00\\x00\\x00\\xc2_\\xff\\xd6\\x1d\\xd4&\\xed\\v\" +\n\t\t\t\"\\xc5\\xf7\\xfb\\xc0\\x00\\x00\\x00\\x00\\x00 \\x00\\x06a#!~#v\\x00\\x00\" +\n\t\t\t\"\\x01\\x00\\x00\\x00\\x00;\\x9a\\xca\\x00\\x00\\x00\\x00\\x01*\\x05\\xf2\\x00\\x00\\x00\\x00\" +\n\t\t\t\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x002\",\n\t\t\"\\x16\\xd1\\x16\\xd1\\x01\\x80\\xf0\\x00\\x00\\x00I^K_\\xf0\\x00\\x00\\x00D7\" +\n\t\t\t\"\\nw\\xf0\\x00\\x00\\x00o\\x93\\bO\\x10?\\xf0\\x00\\x00\\x00\\x00\\x00\\x00\\xc2\" +\n\t\t\t\"_\\xff\\x14X\\xfb\\xb0\\x03\\xac~\\x80\\x14X\\xfb\\xb1\\x00\\xd4ܥ\\x00\\x00\" +\n\t\t\t\"\\x00\\x00\\x00\\x00\\x00\\x05\\x00\\x00\\x00@\\x00\\x00\\x00\\x00\\x00\\x00\\x00E\",\n\t\t\"\\x16\\xd1\\x16\\xd1\\x01000000000000000\" +\n\t\t\t\"00000000000000000000\" +\n\t\t\t\"0000000000\\x00\\x000\\x00\\x0100000\" +\n\t\t\t\"000\\x00\\x00\\x00\\x00\\x00\\x00\\x002\",\n\t\t\"\\x16\\xd1\\x16\\xd1\\x01\",\n\t\t\"\\x16\\xd1\\x16\\xd1\\x01\\x00\\x00o\\x93\\bO\\x10?\\xf0\\x00\\x00\\x00\\x00X^\" +\n\t\t\t\"/\\xbf\\xe8\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\bctr#!~#n\\x00\" +\n\t\t\t\"\\x00\\x01\\x14X\\xfb\\xb0\\x03\\xac~\\x80\\x14X\\xfb\\xb1\\x00\\xd4ܥ\\x00\\x00\" +\n\t\t\t\"\\x00\\x00\\x00\\x00\\x00\\x05\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00E\",\n\t}\n\n\tfor _, c := range cases {\n\t\tfunc() {\n\t\t\tdir := MustTempDir()\n\t\t\tdefer os.RemoveAll(dir)\n\n\t\t\tfilename := filepath.Join(dir, \"x.tsm\")\n\t\t\tif err := ioutil.WriteFile(filename, []byte(c), 0600); err != nil {\n\t\t\t\tt.Fatalf(\"exp no error, got %s\", err)\n\t\t\t}\n\t\t\tdefer os.RemoveAll(dir)\n\n\t\t\tf, err := os.Open(filename)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"exp no error, got %s\", err)\n\t\t\t}\n\t\t\tdefer f.Close()\n\n\t\t\tr, err := tsm1.NewTSMReader(f)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer r.Close()\n\n\t\t\titer := r.BlockIterator()\n\t\t\tfor iter.Next() {\n\t\t\t\tkey, _, _, _, _, _, err := iter.Read()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t_, _ = r.Type(key)\n\n\t\t\t\tif _, err = r.ReadAll(key); err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc TestTSMReader_File_Read(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tdefer f.Close()\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tvalues []tsm1.Value\n\t}{\n\t\t{\"float\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, 1.0)},\n\t\t},\n\t\t{\"int\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, int64(1))},\n\t\t},\n\t\t{\"bool\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, true)},\n\t\t},\n\t\t{\"string\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, \"foo\")},\n\t\t},\n\t}\n\tfor _, d := range data {\n\t\tif err := w.Write(d.key, d.values); err != nil {\n\t\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t\t}\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error writing index: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tf, err = os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tvar count int\n\tfor _, d := range data {\n\t\treadValues, err := r.Read(d.key, d.values[0].UnixNano())\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error readin: %v\", err)\n\t\t}\n\n\t\tif exp, got := len(d.values), len(readValues); exp != got {\n\t\t\tt.Fatalf(\"read values length mismatch: exp %v, got %v\", exp, len(readValues))\n\t\t}\n\n\t\tfor i, v := range d.values {\n\t\t\tif v.Value() != readValues[i].Value() {\n\t\t\t\tt.Fatalf(\"read value mismatch(%d): exp %v, got %d\", i, v.Value(), readValues[i].Value())\n\t\t\t}\n\t\t}\n\t\tcount++\n\t}\n\n\tif exp, got := count, len(data); exp != got {\n\t\tt.Fatalf(\"read values count mismatch: exp %v, got %v\", exp, got)\n\t}\n}\n\nfunc TestTSMReader_References(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tdefer f.Close()\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tvalues []tsm1.Value\n\t}{\n\t\t{\"float\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, 1.0)},\n\t\t},\n\t\t{\"int\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, int64(1))},\n\t\t},\n\t\t{\"bool\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, true)},\n\t\t},\n\t\t{\"string\", []tsm1.Value{\n\t\t\ttsm1.NewValue(1, \"foo\")},\n\t\t},\n\t}\n\tfor _, d := range data {\n\t\tif err := w.Write(d.key, d.values); err != nil {\n\t\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t\t}\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error writing index: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tf, err = os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tr.Ref()\n\n\tif err := r.Close(); err != tsm1.ErrFileInUse {\n\t\tt.Fatalf(\"expected error closing reader: %v\", err)\n\t}\n\n\tif err := r.Remove(); err != tsm1.ErrFileInUse {\n\t\tt.Fatalf(\"expected error removing reader: %v\", err)\n\t}\n\n\tvar count int\n\tfor _, d := range data {\n\t\treadValues, err := r.Read(d.key, d.values[0].UnixNano())\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error readin: %v\", err)\n\t\t}\n\n\t\tif exp, got := len(d.values), len(readValues); exp != got {\n\t\t\tt.Fatalf(\"read values length mismatch: exp %v, got %v\", exp, len(readValues))\n\t\t}\n\n\t\tfor i, v := range d.values {\n\t\t\tif v.Value() != readValues[i].Value() {\n\t\t\t\tt.Fatalf(\"read value mismatch(%d): exp %v, got %d\", i, v.Value(), readValues[i].Value())\n\t\t\t}\n\t\t}\n\t\tcount++\n\t}\n\n\tif exp, got := count, len(data); exp != got {\n\t\tt.Fatalf(\"read values count mismatch: exp %v, got %v\", exp, got)\n\t}\n\tr.Unref()\n\n\tif err := r.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing reader: %v\", err)\n\t}\n\n\tif err := r.Remove(); err != nil {\n\t\tt.Fatalf(\"unexpected error removing reader: %v\", err)\n\t}\n}\n\nfunc BenchmarkIndirectIndex_UnmarshalBinary(b *testing.B) {\n\tindex := tsm1.NewIndexWriter()\n\tfor i := 0; i < 100000; i++ {\n\t\tindex.Add(fmt.Sprintf(\"cpu-%d\", i), tsm1.BlockFloat64, int64(i*2), int64(i*2+1), 10, 100)\n\t}\n\n\tbytes, err := index.MarshalBinary()\n\tif err != nil {\n\t\tb.Fatalf(\"unexpected error marshaling index: %v\", err)\n\t}\n\n\tindirect := tsm1.NewIndirectIndex()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tif err := indirect.UnmarshalBinary(bytes); err != nil {\n\t\t\tb.Fatalf(\"unexpected error unmarshaling index: %v\", err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/ring.go",
    "content": "package tsm1\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"sync\"\n\t\"sync/atomic\"\n\n\t\"github.com/cespare/xxhash\"\n)\n\n// partitions is the number of partitions we used in the ring's continuum. It\n// basically defines the maximum number of partitions you can have in the ring.\n// If a smaller number of partitions are chosen when creating a ring, then\n// they're evenly spread across this many partitions in the ring.\nconst partitions = 4096\n\n// ring is a structure that maps series keys to entries.\n//\n// ring is implemented as a crude hash ring, in so much that you can have\n// variable numbers of members in the ring, and the appropriate member for a\n// given series key can always consistently be found. Unlike a true hash ring\n// though, this ring is not resizeable—there must be at most 256 members in the\n// ring, and the number of members must always be a power of 2.\n//\n// ring works as follows: Each member of the ring contains a single store, which\n// contains a map of series keys to entries. A ring always has 256 partitions,\n// and a member takes up one or more of these partitions (depending on how many\n// members are specified to be in the ring)\n//\n// To determine the partition that a series key should be added to, the series\n// key is hashed and the first 8 bits are used as an index to the ring.\n//\ntype ring struct {\n\t// The unique set of partitions in the ring.\n\t// len(partitions) <= len(continuum)\n\tpartitions []*partition\n\n\t// A mapping of partition to location on the ring continuum. This is used\n\t// to lookup a partition.\n\tcontinuum []*partition\n\n\t// Number of keys within the ring. This is used to provide a hint for\n\t// allocating the return values in keys(). It will not be perfectly accurate\n\t// since it doesn't consider adding duplicate keys, or trying to remove non-\n\t// existent keys.\n\tkeysHint int64\n}\n\n// newring returns a new ring initialised with n partitions. n must always be a\n// power of 2, and for performance reasons should be larger than the number of\n// cores on the host. The supported set of values for n is:\n//\n//     {1, 2, 4, 8, 16, 32, 64, 128, 256}.\n//\nfunc newring(n int) (*ring, error) {\n\tif n <= 0 || n > partitions {\n\t\treturn nil, fmt.Errorf(\"invalid number of paritions: %d\", n)\n\t}\n\n\tr := ring{\n\t\tcontinuum: make([]*partition, partitions), // maximum number of partitions.\n\t}\n\n\t// The trick here is to map N partitions to all points on the continuum,\n\t// such that the first eight bits of a given hash will map directly to one\n\t// of the N partitions.\n\tfor i := 0; i < len(r.continuum); i++ {\n\t\tif (i == 0 || i%(partitions/n) == 0) && len(r.partitions) < n {\n\t\t\tr.partitions = append(r.partitions, &partition{\n\t\t\t\tstore:          make(map[string]*entry),\n\t\t\t\tentrySizeHints: make(map[uint64]int),\n\t\t\t})\n\t\t}\n\t\tr.continuum[i] = r.partitions[len(r.partitions)-1]\n\t}\n\treturn &r, nil\n}\n\n// reset resets the ring so it can be reused. Before removing references to entries\n// within each partition it gathers sizing information to provide hints when\n// reallocating entries in partition maps.\n//\n// reset is not safe for use by multiple goroutines.\nfunc (r *ring) reset() {\n\tfor _, partition := range r.partitions {\n\t\tpartition.reset()\n\t}\n\tr.keysHint = 0\n}\n\n// getPartition retrieves the hash ring partition associated with the provided\n// key.\nfunc (r *ring) getPartition(key string) *partition {\n\treturn r.continuum[int(xxhash.Sum64([]byte(key))%partitions)]\n}\n\n// entry returns the entry for the given key.\n// entry is safe for use by multiple goroutines.\nfunc (r *ring) entry(key string) (*entry, bool) {\n\treturn r.getPartition(key).entry(key)\n}\n\n// write writes values to the entry in the ring's partition associated with key.\n// If no entry exists for the key then one will be created.\n// write is safe for use by multiple goroutines.\nfunc (r *ring) write(key string, values Values) error {\n\treturn r.getPartition(key).write(key, values)\n}\n\n// add adds an entry to the ring.\nfunc (r *ring) add(key string, entry *entry) {\n\tr.getPartition(key).add(key, entry)\n\tatomic.AddInt64(&r.keysHint, 1)\n}\n\n// remove deletes the entry for the given key.\n// remove is safe for use by multiple goroutines.\nfunc (r *ring) remove(key string) {\n\tr.getPartition(key).remove(key)\n\tif r.keysHint > 0 {\n\t\tatomic.AddInt64(&r.keysHint, -1)\n\t}\n}\n\n// keys returns all the keys from all partitions in the hash ring. The returned\n// keys will be in order if sorted is true.\nfunc (r *ring) keys(sorted bool) []string {\n\tkeys := make([]string, 0, atomic.LoadInt64(&r.keysHint))\n\tfor _, p := range r.partitions {\n\t\tkeys = append(keys, p.keys()...)\n\t}\n\n\tif sorted {\n\t\tsort.Strings(keys)\n\t}\n\treturn keys\n}\n\n// apply applies the provided function to every entry in the ring under a read\n// lock using a separate goroutine for each partition. The provided function\n// will be called with each key and the corresponding entry. The first error\n// encountered will be returned, if any. apply is safe for use by multiple\n// goroutines.\nfunc (r *ring) apply(f func(string, *entry) error) error {\n\n\tvar (\n\t\twg  sync.WaitGroup\n\t\tres = make(chan error, len(r.partitions))\n\t)\n\n\tfor _, p := range r.partitions {\n\t\twg.Add(1)\n\n\t\tgo func(p *partition) {\n\t\t\tdefer wg.Done()\n\n\t\t\tp.mu.RLock()\n\t\t\tfor k, e := range p.store {\n\t\t\t\tif err := f(k, e); err != nil {\n\t\t\t\t\tres <- err\n\t\t\t\t\tp.mu.RUnlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tp.mu.RUnlock()\n\t\t}(p)\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(res)\n\t}()\n\n\t// Collect results.\n\tfor err := range res {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// applySerial is similar to apply, but invokes f on each partition in the same\n// goroutine.\n// apply is safe for use by multiple goroutines.\nfunc (r *ring) applySerial(f func(string, *entry) error) error {\n\tfor _, p := range r.partitions {\n\t\tp.mu.RLock()\n\t\tfor k, e := range p.store {\n\t\t\tif err := f(k, e); err != nil {\n\t\t\t\tp.mu.RUnlock()\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tp.mu.RUnlock()\n\t}\n\treturn nil\n}\n\n// partition provides safe access to a map of series keys to entries.\ntype partition struct {\n\tmu    sync.RWMutex\n\tstore map[string]*entry\n\n\t// entrySizeHints stores hints for appropriate sizes to pre-allocate the\n\t// []Values in an entry. entrySizeHints will only contain hints for entries\n\t// that were present prior to the most recent snapshot, preventing unbounded\n\t// growth over time.\n\tentrySizeHints map[uint64]int\n}\n\n// entry returns the partition's entry for the provided key.\n// It's safe for use by multiple goroutines.\nfunc (p *partition) entry(key string) (*entry, bool) {\n\tp.mu.RLock()\n\te, ok := p.store[key]\n\tp.mu.RUnlock()\n\treturn e, ok\n}\n\n// write writes the values to the entry in the partition, creating the entry\n// if it does not exist.\n// write is safe for use by multiple goroutines.\nfunc (p *partition) write(key string, values Values) error {\n\tp.mu.RLock()\n\te, ok := p.store[key]\n\tp.mu.RUnlock()\n\tif ok {\n\t\t// Hot path.\n\t\treturn e.add(values)\n\t}\n\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\t// Check again.\n\tif e, ok = p.store[key]; ok {\n\t\treturn e.add(values)\n\t}\n\n\t// Create a new entry using a preallocated size if we have a hint available.\n\thint, _ := p.entrySizeHints[xxhash.Sum64([]byte(key))]\n\te, err := newEntryValues(values, hint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.store[key] = e\n\treturn nil\n}\n\n// add adds a new entry for key to the partition.\nfunc (p *partition) add(key string, entry *entry) {\n\tp.mu.Lock()\n\tp.store[key] = entry\n\tp.mu.Unlock()\n}\n\n// remove deletes the entry associated with the provided key.\n// remove is safe for use by multiple goroutines.\nfunc (p *partition) remove(key string) {\n\tp.mu.Lock()\n\tdelete(p.store, key)\n\tp.mu.Unlock()\n}\n\n// keys returns an unsorted slice of the keys in the partition.\nfunc (p *partition) keys() []string {\n\tp.mu.RLock()\n\tkeys := make([]string, 0, len(p.store))\n\tfor k := range p.store {\n\t\tkeys = append(keys, k)\n\t}\n\tp.mu.RUnlock()\n\treturn keys\n}\n\n// reset resets the partition by reinitialising the store. reset returns hints\n// about sizes that the entries within the store could be reallocated with.\nfunc (p *partition) reset() {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\t// Collect the allocated sizes of values for each entry in the store.\n\tp.entrySizeHints = make(map[uint64]int)\n\tfor k, entry := range p.store {\n\t\t// If the capacity is large then there are many values in the entry.\n\t\t// Store a hint to pre-allocate the next time we see the same entry.\n\t\tentry.mu.RLock()\n\t\tif cap(entry.values) > 128 { // 4 x the default entry capacity size.\n\t\t\tp.entrySizeHints[xxhash.Sum64([]byte(k))] = cap(entry.values)\n\t\t}\n\t\tentry.mu.RUnlock()\n\t}\n\n\t// Reset the store.\n\tp.store = make(map[string]*entry, len(p.store))\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/ring_test.go",
    "content": "package tsm1\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"sync\"\n\t\"testing\"\n)\n\nfunc TestRing_newRing(t *testing.T) {\n\texamples := []struct {\n\t\tn         int\n\t\treturnErr bool\n\t}{\n\t\t{n: 1}, {n: 2}, {n: 4}, {n: 8}, {n: 16}, {n: 32}, {n: 64}, {n: 128}, {n: 256},\n\t\t{n: 0, returnErr: true}, {n: 3, returnErr: true}, {n: 512, returnErr: true},\n\t}\n\n\tfor i, example := range examples {\n\t\tr, err := newring(example.n)\n\t\tif err != nil {\n\t\t\tif example.returnErr {\n\t\t\t\tcontinue // expecting an error.\n\t\t\t}\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif got, exp := len(r.partitions), example.n; got != exp {\n\t\t\tt.Fatalf(\"[Example %d] got %v, expected %v\", i, got, exp)\n\t\t}\n\n\t\t// Check partitions distributed correctly\n\t\tpartitions := make([]*partition, 0)\n\t\tfor i, partition := range r.continuum {\n\t\t\tif i == 0 || partition != partitions[len(partitions)-1] {\n\t\t\t\tpartitions = append(partitions, partition)\n\t\t\t}\n\t\t}\n\n\t\tif got, exp := len(partitions), example.n; got != exp {\n\t\t\tt.Fatalf(\"[Example %d] got %v, expected %v\", i, got, exp)\n\t\t}\n\t}\n}\n\nvar strSliceRes []string\n\nfunc benchmarkRingkeys(b *testing.B, r *ring, keys int) {\n\t// Add some keys\n\tfor i := 0; i < keys; i++ {\n\t\tr.add(fmt.Sprintf(\"cpu,host=server-%d value=1\", i), nil)\n\t}\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tstrSliceRes = r.keys(false)\n\t}\n}\n\nfunc BenchmarkRing_keys_100(b *testing.B)    { benchmarkRingkeys(b, MustNewRing(256), 100) }\nfunc BenchmarkRing_keys_1000(b *testing.B)   { benchmarkRingkeys(b, MustNewRing(256), 1000) }\nfunc BenchmarkRing_keys_10000(b *testing.B)  { benchmarkRingkeys(b, MustNewRing(256), 10000) }\nfunc BenchmarkRing_keys_100000(b *testing.B) { benchmarkRingkeys(b, MustNewRing(256), 100000) }\n\nfunc benchmarkRingWrite(b *testing.B, r *ring, n int) {\n\tfor i := 0; i < b.N; i++ {\n\t\tvar wg sync.WaitGroup\n\t\tfor i := 0; i < runtime.GOMAXPROCS(0); i++ {\n\t\t\terrC := make(chan error)\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tfor j := 0; j < n; j++ {\n\t\t\t\t\tif err := r.write(fmt.Sprintf(\"cpu,host=server-%d value=1\", j), Values{}); err != nil {\n\t\t\t\t\t\terrC <- err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tgo func() {\n\t\t\t\twg.Wait()\n\t\t\t\tclose(errC)\n\t\t\t}()\n\n\t\t\tfor err := range errC {\n\t\t\t\tif err != nil {\n\t\t\t\t\tb.Error(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc BenchmarkRing_write_1_100(b *testing.B)      { benchmarkRingWrite(b, MustNewRing(1), 100) }\nfunc BenchmarkRing_write_1_1000(b *testing.B)     { benchmarkRingWrite(b, MustNewRing(1), 1000) }\nfunc BenchmarkRing_write_1_10000(b *testing.B)    { benchmarkRingWrite(b, MustNewRing(1), 10000) }\nfunc BenchmarkRing_write_1_100000(b *testing.B)   { benchmarkRingWrite(b, MustNewRing(1), 100000) }\nfunc BenchmarkRing_write_4_100(b *testing.B)      { benchmarkRingWrite(b, MustNewRing(4), 100) }\nfunc BenchmarkRing_write_4_1000(b *testing.B)     { benchmarkRingWrite(b, MustNewRing(4), 1000) }\nfunc BenchmarkRing_write_4_10000(b *testing.B)    { benchmarkRingWrite(b, MustNewRing(4), 10000) }\nfunc BenchmarkRing_write_4_100000(b *testing.B)   { benchmarkRingWrite(b, MustNewRing(4), 100000) }\nfunc BenchmarkRing_write_32_100(b *testing.B)     { benchmarkRingWrite(b, MustNewRing(32), 100) }\nfunc BenchmarkRing_write_32_1000(b *testing.B)    { benchmarkRingWrite(b, MustNewRing(32), 1000) }\nfunc BenchmarkRing_write_32_10000(b *testing.B)   { benchmarkRingWrite(b, MustNewRing(32), 10000) }\nfunc BenchmarkRing_write_32_100000(b *testing.B)  { benchmarkRingWrite(b, MustNewRing(32), 100000) }\nfunc BenchmarkRing_write_128_100(b *testing.B)    { benchmarkRingWrite(b, MustNewRing(128), 100) }\nfunc BenchmarkRing_write_128_1000(b *testing.B)   { benchmarkRingWrite(b, MustNewRing(128), 1000) }\nfunc BenchmarkRing_write_128_10000(b *testing.B)  { benchmarkRingWrite(b, MustNewRing(128), 10000) }\nfunc BenchmarkRing_write_128_100000(b *testing.B) { benchmarkRingWrite(b, MustNewRing(256), 100000) }\nfunc BenchmarkRing_write_256_100(b *testing.B)    { benchmarkRingWrite(b, MustNewRing(256), 100) }\nfunc BenchmarkRing_write_256_1000(b *testing.B)   { benchmarkRingWrite(b, MustNewRing(256), 1000) }\nfunc BenchmarkRing_write_256_10000(b *testing.B)  { benchmarkRingWrite(b, MustNewRing(256), 10000) }\nfunc BenchmarkRing_write_256_100000(b *testing.B) { benchmarkRingWrite(b, MustNewRing(256), 100000) }\n\nfunc MustNewRing(n int) *ring {\n\tr, err := newring(n)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn r\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/string.go",
    "content": "package tsm1\n\n// String encoding uses snappy compression to compress each string.  Each string is\n// appended to byte slice prefixed with a variable byte length followed by the string\n// bytes.  The bytes are compressed using snappy compressor and a 1 byte header is used\n// to indicate the type of encoding.\n\nimport (\n\t\"encoding/binary\"\n\t\"fmt\"\n\n\t\"github.com/golang/snappy\"\n)\n\nconst (\n\t// stringUncompressed is a an uncompressed format encoding strings as raw bytes.\n\t// Not yet implemented.\n\tstringUncompressed = 0\n\n\t// stringCompressedSnappy is a compressed encoding using Snappy compression\n\tstringCompressedSnappy = 1\n)\n\n// StringEncoder encodes multiple strings into a byte slice.\ntype StringEncoder struct {\n\t// The encoded bytes\n\tbytes []byte\n}\n\n// NewStringEncoder returns a new StringEncoder with an initial buffer ready to hold sz bytes.\nfunc NewStringEncoder(sz int) StringEncoder {\n\treturn StringEncoder{\n\t\tbytes: make([]byte, 0, sz),\n\t}\n}\n\n// Flush is no-op\nfunc (e *StringEncoder) Flush() {}\n\n// Reset sets the encoder back to its initial state.\nfunc (e *StringEncoder) Reset() {\n\te.bytes = e.bytes[:0]\n}\n\n// Write encodes s to the underlying buffer.\nfunc (e *StringEncoder) Write(s string) {\n\tb := make([]byte, 10)\n\t// Append the length of the string using variable byte encoding\n\ti := binary.PutUvarint(b, uint64(len(s)))\n\te.bytes = append(e.bytes, b[:i]...)\n\n\t// Append the string bytes\n\te.bytes = append(e.bytes, s...)\n}\n\n// Bytes returns a copy of the underlying buffer.\nfunc (e *StringEncoder) Bytes() ([]byte, error) {\n\t// Compress the currently appended bytes using snappy and prefix with\n\t// a 1 byte header for future extension\n\tdata := snappy.Encode(nil, e.bytes)\n\treturn append([]byte{stringCompressedSnappy << 4}, data...), nil\n}\n\n// StringDecoder decodes a byte slice into strings.\ntype StringDecoder struct {\n\tb   []byte\n\tl   int\n\ti   int\n\terr error\n}\n\n// SetBytes initializes the decoder with bytes to read from.\n// This must be called before calling any other method.\nfunc (e *StringDecoder) SetBytes(b []byte) error {\n\t// First byte stores the encoding type, only have snappy format\n\t// currently so ignore for now.\n\tvar data []byte\n\tif len(b) > 0 {\n\t\tvar err error\n\t\tdata, err = snappy.Decode(nil, b[1:])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to decode string block: %v\", err.Error())\n\t\t}\n\t}\n\n\te.b = data\n\te.l = 0\n\te.i = 0\n\te.err = nil\n\n\treturn nil\n}\n\n// Next returns true if there are any values remaining to be decoded.\nfunc (e *StringDecoder) Next() bool {\n\tif e.err != nil {\n\t\treturn false\n\t}\n\n\te.i += e.l\n\treturn e.i < len(e.b)\n}\n\n// Read returns the next value from the decoder.\nfunc (e *StringDecoder) Read() string {\n\t// Read the length of the string\n\tlength, n := binary.Uvarint(e.b[e.i:])\n\tif n <= 0 {\n\t\te.err = fmt.Errorf(\"StringDecoder: invalid encoded string length\")\n\t\treturn \"\"\n\t}\n\n\t// The length of this string plus the length of the variable byte encoded length\n\te.l = int(length) + n\n\n\tlower := e.i + n\n\tupper := lower + int(length)\n\tif upper < lower {\n\t\te.err = fmt.Errorf(\"StringDecoder: length overflow\")\n\t\treturn \"\"\n\t}\n\tif upper > len(e.b) {\n\t\te.err = fmt.Errorf(\"StringDecoder: not enough data to represent encoded string\")\n\t\treturn \"\"\n\t}\n\n\treturn string(e.b[lower:upper])\n}\n\n// Error returns the last error encountered by the decoder.\nfunc (e *StringDecoder) Error() error {\n\treturn e.err\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/string_test.go",
    "content": "package tsm1\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\t\"testing/quick\"\n)\n\nfunc Test_StringEncoder_NoValues(t *testing.T) {\n\tenc := NewStringEncoder(1024)\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar dec StringDecoder\n\tif err := dec.SetBytes(b); err != nil {\n\t\tt.Fatalf(\"unexpected error creating string decoder: %v\", err)\n\t}\n\tif dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n}\n\nfunc Test_StringEncoder_Single(t *testing.T) {\n\tenc := NewStringEncoder(1024)\n\tv1 := \"v1\"\n\tenc.Write(v1)\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar dec StringDecoder\n\tif dec.SetBytes(b); err != nil {\n\t\tt.Fatalf(\"unexpected error creating string decoder: %v\", err)\n\t}\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got false, exp true\")\n\t}\n\n\tif v1 != dec.Read() {\n\t\tt.Fatalf(\"unexpected value: got %v, exp %v\", dec.Read(), v1)\n\t}\n}\n\nfunc Test_StringEncoder_Multi_Compressed(t *testing.T) {\n\tenc := NewStringEncoder(1024)\n\n\tvalues := make([]string, 10)\n\tfor i := range values {\n\t\tvalues[i] = fmt.Sprintf(\"value %d\", i)\n\t\tenc.Write(values[i])\n\t}\n\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif b[0]>>4 != stringCompressedSnappy {\n\t\tt.Fatalf(\"unexpected encoding: got %v, exp %v\", b[0], stringCompressedSnappy)\n\t}\n\n\tif exp := 51; len(b) != exp {\n\t\tt.Fatalf(\"unexpected length: got %v, exp %v\", len(b), exp)\n\t}\n\n\tvar dec StringDecoder\n\tif err := dec.SetBytes(b); err != nil {\n\t\tt.Fatalf(\"unexpected erorr creating string decoder: %v\", err)\n\t}\n\n\tfor i, v := range values {\n\t\tif !dec.Next() {\n\t\t\tt.Fatalf(\"unexpected next value: got false, exp true\")\n\t\t}\n\t\tif v != dec.Read() {\n\t\t\tt.Fatalf(\"unexpected value at pos %d: got %v, exp %v\", i, dec.Read(), v)\n\t\t}\n\t}\n\n\tif dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n}\n\nfunc Test_StringEncoder_Quick(t *testing.T) {\n\tquick.Check(func(values []string) bool {\n\t\texpected := values\n\t\tif values == nil {\n\t\t\texpected = []string{}\n\t\t}\n\t\t// Write values to encoder.\n\t\tenc := NewStringEncoder(1024)\n\t\tfor _, v := range values {\n\t\t\tenc.Write(v)\n\t\t}\n\n\t\t// Retrieve encoded bytes from encoder.\n\t\tbuf, err := enc.Bytes()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\t// Read values out of decoder.\n\t\tgot := make([]string, 0, len(values))\n\t\tvar dec StringDecoder\n\t\tif err := dec.SetBytes(buf); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfor dec.Next() {\n\t\t\tif err := dec.Error(); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tgot = append(got, dec.Read())\n\t\t}\n\n\t\t// Verify that input and output values match.\n\t\tif !reflect.DeepEqual(expected, got) {\n\t\t\tt.Fatalf(\"mismatch:\\n\\nexp=%#v\\n\\ngot=%#v\\n\\n\", expected, got)\n\t\t}\n\n\t\treturn true\n\t}, nil)\n}\n\nfunc Test_StringDecoder_Empty(t *testing.T) {\n\tvar dec StringDecoder\n\tif err := dec.SetBytes([]byte{}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif dec.Next() {\n\t\tt.Fatalf(\"exp Next() == false, got true\")\n\t}\n}\n\nfunc Test_StringDecoder_CorruptRead(t *testing.T) {\n\tcases := []string{\n\t\t\"\\x10\\x03\\b\\x03Hi\", // Higher length than actual data\n\t\t\"\\x10\\x1dp\\x9c\\x90\\x90\\x90\\x90\\x90\\x90\\x90\\x90\\x90length overflow----\",\n\t}\n\n\tfor _, c := range cases {\n\t\tvar dec StringDecoder\n\t\tif err := dec.SetBytes([]byte(c)); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif !dec.Next() {\n\t\t\tt.Fatalf(\"exp Next() to return true, got false\")\n\t\t}\n\n\t\t_ = dec.Read()\n\t\tif dec.Error() == nil {\n\t\t\tt.Fatalf(\"exp an err, got nil: %q\", c)\n\t\t}\n\t}\n}\n\nfunc Test_StringDecoder_CorruptSetBytes(t *testing.T) {\n\tcases := []string{\n\t\t\"0t\\x00\\x01\\x000\\x00\\x01\\x000\\x00\\x01\\x000\\x00\\x01\\x000\\x00\\x01\" +\n\t\t\t\"\\x000\\x00\\x01\\x000\\x00\\x01\\x000\\x00\\x00\\x00\\xff:\\x01\\x00\\x01\\x00\\x01\" +\n\t\t\t\"\\x00\\x01\\x00\\x01\\x00\\x01\\x00\\x010\\x010\\x000\\x010\\x010\\x010\\x01\" +\n\t\t\t\"0\\x010\\x010\\x010\\x010\\x010\\x010\\x010\\x010\\x010\\x010\", // Upper slice bounds overflows negative\n\t}\n\n\tfor _, c := range cases {\n\t\tvar dec StringDecoder\n\t\tif err := dec.SetBytes([]byte(c)); err == nil {\n\t\t\tt.Fatalf(\"exp an err, got nil: %q\", c)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/timestamp.go",
    "content": "package tsm1\n\n// Timestamp encoding is adaptive and based on structure of the timestamps that are encoded.  It\n// uses a combination of delta encoding, scaling and compression using simple8b, run length encoding\n// as well as falling back to no compression if needed.\n//\n// Timestamp values to be encoded should be sorted before encoding.  When encoded, the values are\n// first delta-encoded.  The first value is the starting timestamp, subsequent values are the difference\n// from the prior value.\n//\n// Timestamp resolution can also be in the nanosecond.  Many timestamps are monotonically increasing\n// and fall on even boundaries of time such as every 10s.  When the timestamps have this structure,\n// they are scaled by the largest common divisor that is also a factor of 10.  This has the effect\n// of converting very large integer deltas into very small one that can be reversed by multiplying them\n// by the scaling factor.\n//\n// Using these adjusted values, if all the deltas are the same, the time range is stored using run\n// length encoding.  If run length encoding is not possible and all values are less than 1 << 60 - 1\n// (~36.5 yrs in nanosecond resolution), then the timestamps are encoded using simple8b encoding.  If\n// any value exceeds the maximum values, the deltas are stored uncompressed using 8b each.\n//\n// Each compressed byte slice has a 1 byte header indicating the compression type.  The 4 high bits\n// indicate the encoding type.  The 4 low bits are used by the encoding type.\n//\n// For run-length encoding, the 4 low bits store the log10 of the scaling factor.  The next 8 bytes are\n// the starting timestamp, next 1-10 bytes is the delta value using variable-length encoding, finally the\n// next 1-10 bytes is the count of values.\n//\n// For simple8b encoding, the 4 low bits store the log10 of the scaling factor.  The next 8 bytes is the\n// first delta value stored uncompressed, the remaining bytes are 64bit words containg compressed delta\n// values.\n//\n// For uncompressed encoding, the delta values are stored using 8 bytes each.\n\nimport (\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"math\"\n\n\t\"github.com/jwilder/encoding/simple8b\"\n)\n\nconst (\n\t// timeUncompressed is a an uncompressed format using 8 bytes per timestamp\n\ttimeUncompressed = 0\n\t// timeCompressedPackedSimple is a bit-packed format using simple8b encoding\n\ttimeCompressedPackedSimple = 1\n\t// timeCompressedRLE is a run-length encoding format\n\ttimeCompressedRLE = 2\n)\n\n// TimeEncoder encodes time.Time to byte slices.\ntype TimeEncoder interface {\n\tWrite(t int64)\n\tBytes() ([]byte, error)\n\tReset()\n}\n\ntype encoder struct {\n\tts    []uint64\n\tbytes []byte\n\tenc   *simple8b.Encoder\n}\n\n// NewTimeEncoder returns a TimeEncoder with an initial buffer ready to hold sz bytes.\nfunc NewTimeEncoder(sz int) TimeEncoder {\n\treturn &encoder{\n\t\tts:  make([]uint64, 0, sz),\n\t\tenc: simple8b.NewEncoder(),\n\t}\n}\n\n// Reset sets the encoder back to its initial state.\nfunc (e *encoder) Reset() {\n\te.ts = e.ts[:0]\n\te.bytes = e.bytes[:0]\n\te.enc.Reset()\n}\n\n// Write adds a timestamp to the compressed stream.\nfunc (e *encoder) Write(t int64) {\n\te.ts = append(e.ts, uint64(t))\n}\n\nfunc (e *encoder) reduce() (max, divisor uint64, rle bool, deltas []uint64) {\n\t// Compute the deltas in place to avoid allocating another slice\n\tdeltas = e.ts\n\t// Starting values for a max and divisor\n\tmax, divisor = 0, 1e12\n\n\t// Indicates whether the the deltas can be run-length encoded\n\trle = true\n\n\t// Iterate in reverse so we can apply deltas in place\n\tfor i := len(deltas) - 1; i > 0; i-- {\n\n\t\t// First differential encode the values\n\t\tdeltas[i] = deltas[i] - deltas[i-1]\n\n\t\t// We also need to keep track of the max value and largest common divisor\n\t\tv := deltas[i]\n\n\t\tif v > max {\n\t\t\tmax = v\n\t\t}\n\n\t\t// If our value is divisible by 10, break.  Otherwise, try the next smallest divisor.\n\t\tfor divisor > 1 && v%divisor != 0 {\n\t\t\tdivisor /= 10\n\t\t}\n\n\t\t// Skip the first value || see if prev = curr.  The deltas can be RLE if the are all equal.\n\t\trle = i == len(deltas)-1 || rle && (deltas[i+1] == deltas[i])\n\t}\n\treturn\n}\n\n// Bytes returns the encoded bytes of all written times.\nfunc (e *encoder) Bytes() ([]byte, error) {\n\tif len(e.ts) == 0 {\n\t\treturn e.bytes[:0], nil\n\t}\n\n\t// Maximum and largest common divisor.  rle is true if dts (the delta timestamps),\n\t// are all the same.\n\tmax, div, rle, dts := e.reduce()\n\n\t// The deltas are all the same, so we can run-length encode them\n\tif rle && len(e.ts) > 1 {\n\t\treturn e.encodeRLE(e.ts[0], e.ts[1], div, len(e.ts))\n\t}\n\n\t// We can't compress this time-range, the deltas exceed 1 << 60\n\tif max > simple8b.MaxValue {\n\t\treturn e.encodeRaw()\n\t}\n\n\treturn e.encodePacked(div, dts)\n}\n\nfunc (e *encoder) encodePacked(div uint64, dts []uint64) ([]byte, error) {\n\t// Only apply the divisor if it's greater than 1 since division is expensive.\n\tif div > 1 {\n\t\tfor _, v := range dts[1:] {\n\t\t\tif err := e.enc.Write(v / div); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfor _, v := range dts[1:] {\n\t\t\tif err := e.enc.Write(v); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\t// The compressed deltas\n\tdeltas, err := e.enc.Bytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsz := 8 + 1 + len(deltas)\n\tif cap(e.bytes) < sz {\n\t\te.bytes = make([]byte, sz)\n\t}\n\tb := e.bytes[:sz]\n\n\t// 4 high bits used for the encoding type\n\tb[0] = byte(timeCompressedPackedSimple) << 4\n\t// 4 low bits are the log10 divisor\n\tb[0] |= byte(math.Log10(float64(div)))\n\n\t// The first delta value\n\tbinary.BigEndian.PutUint64(b[1:9], uint64(dts[0]))\n\n\tcopy(b[9:], deltas)\n\treturn b[:9+len(deltas)], nil\n}\n\nfunc (e *encoder) encodeRaw() ([]byte, error) {\n\tsz := 1 + len(e.ts)*8\n\tif cap(e.bytes) < sz {\n\t\te.bytes = make([]byte, sz)\n\t}\n\tb := e.bytes[:sz]\n\tb[0] = byte(timeUncompressed) << 4\n\tfor i, v := range e.ts {\n\t\tbinary.BigEndian.PutUint64(b[1+i*8:1+i*8+8], uint64(v))\n\t}\n\treturn b, nil\n}\n\nfunc (e *encoder) encodeRLE(first, delta, div uint64, n int) ([]byte, error) {\n\t// Large varints can take up to 10 bytes, we're encoding 3 + 1 byte type\n\tsz := 31\n\tif cap(e.bytes) < sz {\n\t\te.bytes = make([]byte, sz)\n\t}\n\tb := e.bytes[:sz]\n\t// 4 high bits used for the encoding type\n\tb[0] = byte(timeCompressedRLE) << 4\n\t// 4 low bits are the log10 divisor\n\tb[0] |= byte(math.Log10(float64(div)))\n\n\ti := 1\n\t// The first timestamp\n\tbinary.BigEndian.PutUint64(b[i:], uint64(first))\n\ti += 8\n\t// The first delta\n\ti += binary.PutUvarint(b[i:], uint64(delta/div))\n\t// The number of times the delta is repeated\n\ti += binary.PutUvarint(b[i:], uint64(n))\n\n\treturn b[:i], nil\n}\n\n// TimeDecoder decodes a byte slice into timestamps.\ntype TimeDecoder struct {\n\tv    int64\n\ti, n int\n\tts   []uint64\n\tdec  simple8b.Decoder\n\terr  error\n\n\t// The delta value for a run-length encoded byte slice\n\trleDelta int64\n\n\tencoding byte\n}\n\n// Init initializes the decoder with bytes to read from.\nfunc (d *TimeDecoder) Init(b []byte) {\n\td.v = 0\n\td.i = 0\n\td.ts = d.ts[:0]\n\td.err = nil\n\tif len(b) > 0 {\n\t\t// Encoding type is stored in the 4 high bits of the first byte\n\t\td.encoding = b[0] >> 4\n\t}\n\td.decode(b)\n}\n\n// Next returns true if there are any timestamps remaining to be decoded.\nfunc (d *TimeDecoder) Next() bool {\n\tif d.err != nil {\n\t\treturn false\n\t}\n\n\tif d.encoding == timeCompressedRLE {\n\t\tif d.i >= d.n {\n\t\t\treturn false\n\t\t}\n\t\td.i++\n\t\td.v += d.rleDelta\n\t\treturn d.i < d.n\n\t}\n\n\tif d.i >= len(d.ts) {\n\t\treturn false\n\t}\n\td.v = int64(d.ts[d.i])\n\td.i++\n\treturn true\n}\n\n// Read returns the next timestamp from the decoder.\nfunc (d *TimeDecoder) Read() int64 {\n\treturn d.v\n}\n\n// Error returns the last error encountered by the decoder.\nfunc (d *TimeDecoder) Error() error {\n\treturn d.err\n}\n\nfunc (d *TimeDecoder) decode(b []byte) {\n\tif len(b) == 0 {\n\t\treturn\n\t}\n\n\tswitch d.encoding {\n\tcase timeUncompressed:\n\t\td.decodeRaw(b[1:])\n\tcase timeCompressedRLE:\n\t\td.decodeRLE(b)\n\tcase timeCompressedPackedSimple:\n\t\td.decodePacked(b)\n\tdefault:\n\t\td.err = fmt.Errorf(\"unknown encoding: %v\", d.encoding)\n\t}\n}\n\nfunc (d *TimeDecoder) decodePacked(b []byte) {\n\tif len(b) < 9 {\n\t\td.err = fmt.Errorf(\"TimeDecoder: not enough data to decode packed timestamps\")\n\t\treturn\n\t}\n\tdiv := uint64(math.Pow10(int(b[0] & 0xF)))\n\tfirst := uint64(binary.BigEndian.Uint64(b[1:9]))\n\n\td.dec.SetBytes(b[9:])\n\n\td.i = 0\n\tdeltas := d.ts[:0]\n\tdeltas = append(deltas, first)\n\n\tfor d.dec.Next() {\n\t\tdeltas = append(deltas, d.dec.Read())\n\t}\n\n\t// Compute the prefix sum and scale the deltas back up\n\tlast := deltas[0]\n\tif div > 1 {\n\t\tfor i := 1; i < len(deltas); i++ {\n\t\t\tdgap := deltas[i] * div\n\t\t\tdeltas[i] = last + dgap\n\t\t\tlast = deltas[i]\n\t\t}\n\t} else {\n\t\tfor i := 1; i < len(deltas); i++ {\n\t\t\tdeltas[i] += last\n\t\t\tlast = deltas[i]\n\t\t}\n\t}\n\n\td.i = 0\n\td.ts = deltas\n}\n\nfunc (d *TimeDecoder) decodeRLE(b []byte) {\n\tif len(b) < 9 {\n\t\td.err = fmt.Errorf(\"TimeDecoder: not enough data for initial RLE timestamp\")\n\t\treturn\n\t}\n\n\tvar i, n int\n\n\t// Lower 4 bits hold the 10 based exponent so we can scale the values back up\n\tmod := int64(math.Pow10(int(b[i] & 0xF)))\n\ti++\n\n\t// Next 8 bytes is the starting timestamp\n\tfirst := binary.BigEndian.Uint64(b[i : i+8])\n\ti += 8\n\n\t// Next 1-10 bytes is our (scaled down by factor of 10) run length values\n\tvalue, n := binary.Uvarint(b[i:])\n\tif n <= 0 {\n\t\td.err = fmt.Errorf(\"TimeDecoder: invalid run length in decodeRLE\")\n\t\treturn\n\t}\n\n\t// Scale the value back up\n\tvalue *= uint64(mod)\n\ti += n\n\n\t// Last 1-10 bytes is how many times the value repeats\n\tcount, n := binary.Uvarint(b[i:])\n\tif n <= 0 {\n\t\td.err = fmt.Errorf(\"TimeDecoder: invalid repeat value in decodeRLE\")\n\t\treturn\n\t}\n\n\td.v = int64(first - value)\n\td.rleDelta = int64(value)\n\n\td.i = -1\n\td.n = int(count)\n}\n\nfunc (d *TimeDecoder) decodeRaw(b []byte) {\n\td.i = 0\n\td.ts = make([]uint64, len(b)/8)\n\tfor i := range d.ts {\n\t\td.ts[i] = binary.BigEndian.Uint64(b[i*8 : i*8+8])\n\n\t\tdelta := d.ts[i]\n\t\t// Compute the prefix sum and scale the deltas back up\n\t\tif i > 0 {\n\t\t\td.ts[i] = d.ts[i-1] + delta\n\t\t}\n\t}\n}\n\nfunc CountTimestamps(b []byte) int {\n\tif len(b) == 0 {\n\t\treturn 0\n\t}\n\n\t// Encoding type is stored in the 4 high bits of the first byte\n\tencoding := b[0] >> 4\n\tswitch encoding {\n\tcase timeUncompressed:\n\t\t// Uncompressed timestamps are just 8 bytes each\n\t\treturn len(b[1:]) / 8\n\tcase timeCompressedRLE:\n\t\t// First 9 bytes are the starting timestamp and scaling factor, skip over them\n\t\ti := 9\n\t\t// Next 1-10 bytes is our (scaled down by factor of 10) run length values\n\t\t_, n := binary.Uvarint(b[9:])\n\t\ti += n\n\t\t// Last 1-10 bytes is how many times the value repeats\n\t\tcount, _ := binary.Uvarint(b[i:])\n\t\treturn int(count)\n\tcase timeCompressedPackedSimple:\n\t\t// First 9 bytes are the starting timestamp and scaling factor, skip over them\n\t\tcount, _ := simple8b.CountBytes(b[9:])\n\t\treturn count + 1 // +1 is for the first uncompressed timestamp, starting timestamep in b[1:9]\n\tdefault:\n\t\treturn 0\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/timestamp_test.go",
    "content": "package tsm1\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\t\"testing/quick\"\n\t\"time\"\n)\n\nfunc Test_TimeEncoder(t *testing.T) {\n\tenc := NewTimeEncoder(1)\n\n\tx := []int64{}\n\tnow := time.Unix(0, 0)\n\tx = append(x, now.UnixNano())\n\tenc.Write(now.UnixNano())\n\tfor i := 1; i < 4; i++ {\n\t\tx = append(x, now.Add(time.Duration(i)*time.Second).UnixNano())\n\t\tenc.Write(x[i])\n\t}\n\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif got := b[0] >> 4; got != timeCompressedRLE {\n\t\tt.Fatalf(\"Wrong encoding used: expected rle, got %v\", got)\n\t}\n\n\tvar dec TimeDecoder\n\tdec.Init(b)\n\tfor i, v := range x {\n\t\tif !dec.Next() {\n\t\t\tt.Fatalf(\"Next == false, expected true\")\n\t\t}\n\n\t\tif v != dec.Read() {\n\t\t\tt.Fatalf(\"Item %d mismatch, got %v, exp %v\", i, dec.Read(), v)\n\t\t}\n\t}\n}\n\nfunc Test_TimeEncoder_NoValues(t *testing.T) {\n\tenc := NewTimeEncoder(0)\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar dec TimeDecoder\n\tdec.Init(b)\n\tif dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n}\n\nfunc Test_TimeEncoder_One(t *testing.T) {\n\tenc := NewTimeEncoder(1)\n\tvar tm int64\n\n\tenc.Write(tm)\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif got := b[0] >> 4; got != timeCompressedPackedSimple {\n\t\tt.Fatalf(\"Wrong encoding used: expected uncompressed, got %v\", got)\n\t}\n\n\tvar dec TimeDecoder\n\tdec.Init(b)\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif tm != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), tm)\n\t}\n}\n\nfunc Test_TimeEncoder_Two(t *testing.T) {\n\tenc := NewTimeEncoder(2)\n\tt1 := int64(0)\n\tt2 := int64(1)\n\tenc.Write(t1)\n\tenc.Write(t2)\n\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif got := b[0] >> 4; got != timeCompressedRLE {\n\t\tt.Fatalf(\"Wrong encoding used: expected rle, got %v\", got)\n\t}\n\n\tvar dec TimeDecoder\n\tdec.Init(b)\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif t1 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), t1)\n\t}\n\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif t2 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), t2)\n\t}\n}\n\nfunc Test_TimeEncoder_Three(t *testing.T) {\n\tenc := NewTimeEncoder(3)\n\tt1 := int64(0)\n\tt2 := int64(1)\n\tt3 := int64(3)\n\n\tenc.Write(t1)\n\tenc.Write(t2)\n\tenc.Write(t3)\n\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif got := b[0] >> 4; got != timeCompressedPackedSimple {\n\t\tt.Fatalf(\"Wrong encoding used: expected rle, got %v\", got)\n\t}\n\n\tvar dec TimeDecoder\n\tdec.Init(b)\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif t1 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), t1)\n\t}\n\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif t2 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), t2)\n\t}\n\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif t3 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), t3)\n\t}\n}\n\nfunc Test_TimeEncoder_Large_Range(t *testing.T) {\n\tenc := NewTimeEncoder(2)\n\tt1 := int64(1442369134000000000)\n\tt2 := int64(1442369135000000000)\n\tenc.Write(t1)\n\tenc.Write(t2)\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif got := b[0] >> 4; got != timeCompressedRLE {\n\t\tt.Fatalf(\"Wrong encoding used: expected rle, got %v\", got)\n\t}\n\n\tvar dec TimeDecoder\n\tdec.Init(b)\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif t1 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), t1)\n\t}\n\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif t2 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), t2)\n\t}\n}\n\nfunc Test_TimeEncoder_Uncompressed(t *testing.T) {\n\tenc := NewTimeEncoder(3)\n\tt1 := time.Unix(0, 0).UnixNano()\n\tt2 := time.Unix(1, 0).UnixNano()\n\n\t// about 36.5yrs in NS resolution is max range for compressed format\n\t// This should cause the encoding to fallback to raw points\n\tt3 := time.Unix(2, (2 << 59)).UnixNano()\n\tenc.Write(t1)\n\tenc.Write(t2)\n\tenc.Write(t3)\n\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"expected error: %v\", err)\n\t}\n\n\tif exp := 25; len(b) != exp {\n\t\tt.Fatalf(\"length mismatch: got %v, exp %v\", len(b), exp)\n\t}\n\n\tif got := b[0] >> 4; got != timeUncompressed {\n\t\tt.Fatalf(\"Wrong encoding used: expected uncompressed, got %v\", got)\n\t}\n\n\tvar dec TimeDecoder\n\tdec.Init(b)\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif t1 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), t1)\n\t}\n\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif t2 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), t2)\n\t}\n\n\tif !dec.Next() {\n\t\tt.Fatalf(\"unexpected next value: got true, exp false\")\n\t}\n\n\tif t3 != dec.Read() {\n\t\tt.Fatalf(\"read value mismatch: got %v, exp %v\", dec.Read(), t3)\n\t}\n}\n\nfunc Test_TimeEncoder_RLE(t *testing.T) {\n\tenc := NewTimeEncoder(512)\n\tvar ts []int64\n\tfor i := 0; i < 500; i++ {\n\t\tts = append(ts, int64(i))\n\t}\n\n\tfor _, v := range ts {\n\t\tenc.Write(v)\n\t}\n\n\tb, err := enc.Bytes()\n\tif exp := 12; len(b) != exp {\n\t\tt.Fatalf(\"length mismatch: got %v, exp %v\", len(b), exp)\n\t}\n\n\tif got := b[0] >> 4; got != timeCompressedRLE {\n\t\tt.Fatalf(\"Wrong encoding used: expected uncompressed, got %v\", got)\n\t}\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar dec TimeDecoder\n\tdec.Init(b)\n\tfor i, v := range ts {\n\t\tif !dec.Next() {\n\t\t\tt.Fatalf(\"Next == false, expected true\")\n\t\t}\n\n\t\tif v != dec.Read() {\n\t\t\tt.Fatalf(\"Item %d mismatch, got %v, exp %v\", i, dec.Read(), v)\n\t\t}\n\t}\n\n\tif dec.Next() {\n\t\tt.Fatalf(\"unexpected extra values\")\n\t}\n}\n\nfunc Test_TimeEncoder_Reverse(t *testing.T) {\n\tenc := NewTimeEncoder(3)\n\tts := []int64{\n\t\tint64(3),\n\t\tint64(2),\n\t\tint64(0),\n\t}\n\n\tfor _, v := range ts {\n\t\tenc.Write(v)\n\t}\n\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif got := b[0] >> 4; got != timeUncompressed {\n\t\tt.Fatalf(\"Wrong encoding used: expected uncompressed, got %v\", got)\n\t}\n\n\tvar dec TimeDecoder\n\tdec.Init(b)\n\ti := 0\n\tfor dec.Next() {\n\t\tif ts[i] != dec.Read() {\n\t\t\tt.Fatalf(\"read value %d mismatch: got %v, exp %v\", i, dec.Read(), ts[i])\n\t\t}\n\t\ti++\n\t}\n}\n\nfunc Test_TimeEncoder_220SecondDelta(t *testing.T) {\n\tenc := NewTimeEncoder(256)\n\tvar ts []int64\n\tnow := time.Now()\n\tfor i := 0; i < 220; i++ {\n\t\tts = append(ts, now.Add(time.Duration(i*60)*time.Second).UnixNano())\n\t}\n\n\tfor _, v := range ts {\n\t\tenc.Write(v)\n\t}\n\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\t// Using RLE, should get 12 bytes\n\tif exp := 12; len(b) != exp {\n\t\tt.Fatalf(\"unexpected length: got %v, exp %v\", len(b), exp)\n\t}\n\n\tif got := b[0] >> 4; got != timeCompressedRLE {\n\t\tt.Fatalf(\"Wrong encoding used: expected uncompressed, got %v\", got)\n\t}\n\n\tvar dec TimeDecoder\n\tdec.Init(b)\n\ti := 0\n\tfor dec.Next() {\n\t\tif ts[i] != dec.Read() {\n\t\t\tt.Fatalf(\"read value %d mismatch: got %v, exp %v\", i, dec.Read(), ts[i])\n\t\t}\n\t\ti++\n\t}\n\n\tif i != len(ts) {\n\t\tt.Fatalf(\"Read too few values: exp %d, got %d\", len(ts), i)\n\t}\n\n\tif dec.Next() {\n\t\tt.Fatalf(\"expecte Next() = false, got true\")\n\t}\n}\n\nfunc Test_TimeEncoder_Quick(t *testing.T) {\n\tquick.Check(func(values []int64) bool {\n\t\t// Write values to encoder.\n\t\tenc := NewTimeEncoder(1024)\n\t\texp := make([]int64, len(values))\n\t\tfor i, v := range values {\n\t\t\texp[i] = int64(v)\n\t\t\tenc.Write(exp[i])\n\t\t}\n\n\t\t// Retrieve encoded bytes from encoder.\n\t\tbuf, err := enc.Bytes()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\t// Read values out of decoder.\n\t\tgot := make([]int64, 0, len(values))\n\t\tvar dec TimeDecoder\n\t\tdec.Init(buf)\n\t\tfor dec.Next() {\n\t\t\tif err := dec.Error(); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tgot = append(got, dec.Read())\n\t\t}\n\n\t\t// Verify that input and output values match.\n\t\tif !reflect.DeepEqual(exp, got) {\n\t\t\tt.Fatalf(\"mismatch:\\n\\nexp=%+v\\n\\ngot=%+v\\n\\n\", exp, got)\n\t\t}\n\n\t\treturn true\n\t}, nil)\n}\n\nfunc Test_TimeEncoder_RLESeconds(t *testing.T) {\n\tenc := NewTimeEncoder(6)\n\tts := make([]int64, 6)\n\n\tts[0] = int64(1444448158000000000)\n\tts[1] = int64(1444448168000000000)\n\tts[2] = int64(1444448178000000000)\n\tts[3] = int64(1444448188000000000)\n\tts[4] = int64(1444448198000000000)\n\tts[5] = int64(1444448208000000000)\n\n\tfor _, v := range ts {\n\t\tenc.Write(v)\n\t}\n\n\tb, err := enc.Bytes()\n\tif got := b[0] >> 4; got != timeCompressedRLE {\n\t\tt.Fatalf(\"Wrong encoding used: expected rle, got %v\", got)\n\t}\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tvar dec TimeDecoder\n\tdec.Init(b)\n\tfor i, v := range ts {\n\t\tif !dec.Next() {\n\t\t\tt.Fatalf(\"Next == false, expected true\")\n\t\t}\n\n\t\tif v != dec.Read() {\n\t\t\tt.Fatalf(\"Item %d mismatch, got %v, exp %v\", i, dec.Read(), v)\n\t\t}\n\t}\n\n\tif dec.Next() {\n\t\tt.Fatalf(\"unexpected extra values\")\n\t}\n}\n\nfunc TestTimeEncoder_Count_Uncompressed(t *testing.T) {\n\tenc := NewTimeEncoder(2)\n\tt1 := time.Unix(0, 0).UnixNano()\n\tt2 := time.Unix(1, 0).UnixNano()\n\n\t// about 36.5yrs in NS resolution is max range for compressed format\n\t// This should cause the encoding to fallback to raw points\n\tt3 := time.Unix(2, (2 << 59)).UnixNano()\n\tenc.Write(t1)\n\tenc.Write(t2)\n\tenc.Write(t3)\n\n\tb, err := enc.Bytes()\n\tif got := b[0] >> 4; got != timeUncompressed {\n\t\tt.Fatalf(\"Wrong encoding used: expected rle, got %v\", got)\n\t}\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif got, exp := CountTimestamps(b), 3; got != exp {\n\t\tt.Fatalf(\"count mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTimeEncoder_Count_RLE(t *testing.T) {\n\tenc := NewTimeEncoder(5)\n\tts := make([]int64, 6)\n\n\tts[0] = int64(1444448158000000000)\n\tts[1] = int64(1444448168000000000)\n\tts[2] = int64(1444448178000000000)\n\tts[3] = int64(1444448188000000000)\n\tts[4] = int64(1444448198000000000)\n\tts[5] = int64(1444448208000000000)\n\n\tfor _, v := range ts {\n\t\tenc.Write(v)\n\t}\n\n\tb, err := enc.Bytes()\n\tif got := b[0] >> 4; got != timeCompressedRLE {\n\t\tt.Fatalf(\"Wrong encoding used: expected rle, got %v\", got)\n\t}\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif got, exp := CountTimestamps(b), len(ts); got != exp {\n\t\tt.Fatalf(\"count mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTimeEncoder_Count_Simple8(t *testing.T) {\n\tenc := NewTimeEncoder(3)\n\tt1 := int64(0)\n\tt2 := int64(1)\n\tt3 := int64(3)\n\n\tenc.Write(t1)\n\tenc.Write(t2)\n\tenc.Write(t3)\n\n\tb, err := enc.Bytes()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif got := b[0] >> 4; got != timeCompressedPackedSimple {\n\t\tt.Fatalf(\"Wrong encoding used: expected rle, got %v\", got)\n\t}\n\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif got, exp := CountTimestamps(b), 3; got != exp {\n\t\tt.Fatalf(\"count mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTimeDecoder_Corrupt(t *testing.T) {\n\tcases := []string{\n\t\t\"\",                 // Empty\n\t\t\"\\x10\\x14\",         // Packed: not enough data\n\t\t\"\\x20\\x00\",         // RLE: not enough data for starting timestamp\n\t\t\"\\x2012345678\\x90\", // RLE: initial timestamp but invalid uvarint encoding\n\t\t\"\\x2012345678\\x7f\", // RLE: timestamp, RLE but invalid repeat\n\t\t\"\\x00123\",          // Raw: data length not multiple of 8\n\t}\n\n\tfor _, c := range cases {\n\t\tvar dec TimeDecoder\n\t\tdec.Init([]byte(c))\n\t\tif dec.Next() {\n\t\t\tt.Fatalf(\"exp next == false, got true\")\n\t\t}\n\t}\n}\n\nfunc BenchmarkTimeEncoder(b *testing.B) {\n\tenc := NewTimeEncoder(1024)\n\tx := make([]int64, 1024)\n\tfor i := 0; i < len(x); i++ {\n\t\tx[i] = time.Now().UnixNano()\n\t\tenc.Write(x[i])\n\t}\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tenc.Bytes()\n\t\tenc.Reset()\n\t\tfor i := 0; i < len(x); i++ {\n\t\t\tenc.Write(x[i])\n\t\t}\n\t}\n}\n\nfunc BenchmarkTimeDecoder_Packed(b *testing.B) {\n\tx := make([]int64, 1024)\n\tenc := NewTimeEncoder(1024)\n\tfor i := 0; i < len(x); i++ {\n\t\tx[i] = time.Now().UnixNano()\n\t\tenc.Write(x[i])\n\t}\n\tbytes, _ := enc.Bytes()\n\n\tb.ResetTimer()\n\n\tvar dec TimeDecoder\n\tfor i := 0; i < b.N; i++ {\n\t\tdec.Init(bytes)\n\t\tfor dec.Next() {\n\t\t}\n\t}\n}\n\nfunc BenchmarkTimeDecoder_RLE(b *testing.B) {\n\tx := make([]int64, 1024)\n\tenc := NewTimeEncoder(1024)\n\tfor i := 0; i < len(x); i++ {\n\t\tx[i] = int64(i * 10)\n\t\tenc.Write(x[i])\n\t}\n\tbytes, _ := enc.Bytes()\n\n\tb.ResetTimer()\n\n\tb.StopTimer()\n\tvar dec TimeDecoder\n\tb.StartTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tdec.Init(bytes)\n\t\tfor dec.Next() {\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/tombstone.go",
    "content": "package tsm1\n\nimport (\n\t\"bufio\"\n\t\"encoding/binary\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"math\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"sync\"\n)\n\nconst (\n\tv2header     = 0x1502\n\tv2headerSize = 4\n)\n\n// Tombstoner records tombstones when entries are deleted.\ntype Tombstoner struct {\n\tmu sync.RWMutex\n\n\t// Path is the location of the file to record tombstone. This should be the\n\t// full path to a TSM file.\n\tPath string\n\n\t// cache of the stats for this tombstone\n\tfileStats []FileStat\n\t// indicates that the stats may be out of sync with what is on disk and they\n\t// should be refreshed.\n\tstatsLoaded bool\n}\n\n// Tombstone represents an individual deletion.\ntype Tombstone struct {\n\t// Key is the tombstoned series key.\n\tKey string\n\n\t// Min and Max are the min and max unix nanosecond time ranges of Key that are deleted.  If\n\t// the full range is deleted, both values are -1.\n\tMin, Max int64\n}\n\n// Add adds the all keys, across all timestamps, to the tombstone.\nfunc (t *Tombstoner) Add(keys []string) error {\n\treturn t.AddRange(keys, math.MinInt64, math.MaxInt64)\n}\n\n// AddRange adds all keys to the tombstone specifying only the data between min and max to be removed.\nfunc (t *Tombstoner) AddRange(keys []string, min, max int64) error {\n\tif len(keys) == 0 {\n\t\treturn nil\n\t}\n\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\n\t// If this TSMFile has not been written (mainly in tests), don't write a\n\t// tombstone because the keys will not be written when it's actually saved.\n\tif t.Path == \"\" {\n\t\treturn nil\n\t}\n\n\tt.statsLoaded = false\n\n\ttombstones, err := t.readTombstone()\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tif cap(tombstones) < len(tombstones)+len(keys) {\n\t\tts := make([]Tombstone, len(tombstones), len(tombstones)+len(keys))\n\t\tcopy(ts, tombstones)\n\t\ttombstones = ts\n\t}\n\n\tfor _, k := range keys {\n\t\ttombstones = append(tombstones, Tombstone{\n\t\t\tKey: k,\n\t\t\tMin: min,\n\t\t\tMax: max,\n\t\t})\n\t}\n\n\treturn t.writeTombstone(tombstones)\n}\n\n// ReadAll returns all the tombstones in the Tombstoner's directory.\nfunc (t *Tombstoner) ReadAll() ([]Tombstone, error) {\n\treturn t.readTombstone()\n}\n\n// Delete removes all the tombstone files from disk.\nfunc (t *Tombstoner) Delete() error {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tif err := os.RemoveAll(t.tombstonePath()); err != nil {\n\t\treturn err\n\t}\n\tt.statsLoaded = false\n\treturn nil\n}\n\n// HasTombstones return true if there are any tombstone entries recorded.\nfunc (t *Tombstoner) HasTombstones() bool {\n\tfiles := t.TombstoneFiles()\n\treturn len(files) > 0 && files[0].Size > 0\n}\n\n// TombstoneFiles returns any tombstone files associated with Tombstoner's TSM file.\nfunc (t *Tombstoner) TombstoneFiles() []FileStat {\n\tt.mu.RLock()\n\tif t.statsLoaded {\n\t\tstats := t.fileStats\n\t\tt.mu.RUnlock()\n\t\treturn stats\n\t}\n\tt.mu.RUnlock()\n\n\tstat, err := os.Stat(t.tombstonePath())\n\tif os.IsNotExist(err) || err != nil {\n\t\tt.mu.Lock()\n\t\t// The file doesn't exist so record that we tried to load it so\n\t\t// we don't continue to keep trying.  This is the common case.\n\t\tt.statsLoaded = os.IsNotExist(err)\n\t\tt.fileStats = t.fileStats[:0]\n\t\tt.mu.Unlock()\n\t\treturn nil\n\t}\n\n\tt.mu.Lock()\n\tt.fileStats = append(t.fileStats[:0], FileStat{\n\t\tPath:         t.tombstonePath(),\n\t\tLastModified: stat.ModTime().UnixNano(),\n\t\tSize:         uint32(stat.Size()),\n\t})\n\tt.statsLoaded = true\n\tstats := t.fileStats\n\tt.mu.Unlock()\n\n\treturn stats\n}\n\n// Walk calls fn for every Tombstone under the Tombstoner.\nfunc (t *Tombstoner) Walk(fn func(t Tombstone) error) error {\n\tf, err := os.Open(t.tombstonePath())\n\tif os.IsNotExist(err) {\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tvar b [4]byte\n\tif _, err := f.Read(b[:]); err != nil {\n\t\t// Might be a zero length file which should not exist, but\n\t\t// an old bug allowed them to occur.  Treat it as an empty\n\t\t// v1 tombstone file so we don't abort loading the TSM file.\n\t\treturn t.readTombstoneV1(f, fn)\n\t}\n\n\tif _, err := f.Seek(0, io.SeekStart); err != nil {\n\t\treturn err\n\t}\n\n\tif binary.BigEndian.Uint32(b[:]) == v2header {\n\t\treturn t.readTombstoneV2(f, fn)\n\t}\n\treturn t.readTombstoneV1(f, fn)\n}\n\nfunc (t *Tombstoner) writeTombstone(tombstones []Tombstone) error {\n\ttmp, err := ioutil.TempFile(filepath.Dir(t.Path), \"tombstone\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tmp.Close()\n\n\tvar b [8]byte\n\n\tbw := bufio.NewWriterSize(tmp, 1024*1024)\n\n\tbinary.BigEndian.PutUint32(b[:4], v2header)\n\tif _, err := bw.Write(b[:4]); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, t := range tombstones {\n\t\tbinary.BigEndian.PutUint32(b[:4], uint32(len(t.Key)))\n\t\tif _, err := bw.Write(b[:4]); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := bw.WriteString(t.Key); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbinary.BigEndian.PutUint64(b[:], uint64(t.Min))\n\t\tif _, err := bw.Write(b[:]); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbinary.BigEndian.PutUint64(b[:], uint64(t.Max))\n\t\tif _, err := bw.Write(b[:]); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := bw.Flush(); err != nil {\n\t\treturn err\n\t}\n\n\t// fsync the file to flush the write\n\tif err := tmp.Sync(); err != nil {\n\t\treturn err\n\t}\n\n\ttmpFilename := tmp.Name()\n\ttmp.Close()\n\n\tif err := renameFile(tmpFilename, t.tombstonePath()); err != nil {\n\t\treturn err\n\t}\n\n\treturn syncDir(filepath.Dir(t.tombstonePath()))\n}\n\nfunc (t *Tombstoner) readTombstone() ([]Tombstone, error) {\n\tvar tombstones []Tombstone\n\n\tif err := t.Walk(func(t Tombstone) error {\n\t\ttombstones = append(tombstones, t)\n\t\treturn nil\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\treturn tombstones, nil\n}\n\n// readTombstoneV1 reads the first version of tombstone files that were not\n// capable of storing a min and max time for a key.  This is used for backwards\n// compatibility with versions prior to 0.13.  This format is a simple newline\n// separated text file.\nfunc (t *Tombstoner) readTombstoneV1(f *os.File, fn func(t Tombstone) error) error {\n\tr := bufio.NewScanner(f)\n\tfor r.Scan() {\n\t\tline := r.Text()\n\t\tif line == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif err := fn(Tombstone{\n\t\t\tKey: line,\n\t\t\tMin: math.MinInt64,\n\t\t\tMax: math.MaxInt64,\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn r.Err()\n}\n\n// readTombstoneV2 reads the second version of tombstone files that are capable\n// of storing keys and the range of time for the key that points were deleted. This\n// format is binary.\nfunc (t *Tombstoner) readTombstoneV2(f *os.File, fn func(t Tombstone) error) error {\n\t// Skip header, already checked earlier\n\tif _, err := f.Seek(v2headerSize, io.SeekStart); err != nil {\n\t\treturn err\n\t}\n\tn := int64(4)\n\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsize := fi.Size()\n\n\tvar (\n\t\tmin, max int64\n\t\tkey      string\n\t)\n\tb := make([]byte, 4096)\n\tfor {\n\t\tif n >= size {\n\t\t\treturn nil\n\t\t}\n\n\t\tif _, err = f.Read(b[:4]); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tn += 4\n\n\t\tkeyLen := int(binary.BigEndian.Uint32(b[:4]))\n\t\tif keyLen > len(b) {\n\t\t\tb = make([]byte, keyLen)\n\t\t}\n\n\t\tif _, err := f.Read(b[:keyLen]); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tkey = string(b[:keyLen])\n\t\tn += int64(keyLen)\n\n\t\tif _, err := f.Read(b[:8]); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tn += 8\n\n\t\tmin = int64(binary.BigEndian.Uint64(b[:8]))\n\n\t\tif _, err := f.Read(b[:8]); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tn += 8\n\t\tmax = int64(binary.BigEndian.Uint64(b[:8]))\n\n\t\tif err := fn(Tombstone{\n\t\t\tKey: key,\n\t\t\tMin: min,\n\t\t\tMax: max,\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\nfunc (t *Tombstoner) tombstonePath() string {\n\tif strings.HasSuffix(t.Path, \"tombstone\") {\n\t\treturn t.Path\n\t}\n\n\t// Filename is 0000001.tsm1\n\tfilename := filepath.Base(t.Path)\n\n\t// Strip off the tsm1\n\text := filepath.Ext(filename)\n\tif ext != \"\" {\n\t\tfilename = strings.TrimSuffix(filename, ext)\n\t}\n\n\t// Append the \"tombstone\" suffix to create a 0000001.tombstone file\n\treturn filepath.Join(filepath.Dir(t.Path), filename+\".tombstone\")\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/tombstone_test.go",
    "content": "package tsm1_test\n\nimport (\n\t\"io/ioutil\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/tsdb/engine/tsm1\"\n)\n\nfunc TestTombstoner_Add(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer func() { os.RemoveAll(dir) }()\n\n\tf := MustTempFile(dir)\n\tts := &tsm1.Tombstoner{Path: f.Name()}\n\n\tentries, err := ts.ReadAll()\n\tif err != nil {\n\t\tfatal(t, \"ReadAll\", err)\n\t}\n\n\tif got, exp := len(entries), 0; got != exp {\n\t\tt.Fatalf(\"length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tstats := ts.TombstoneFiles()\n\tif got, exp := len(stats), 0; got != exp {\n\t\tt.Fatalf(\"stat length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tts.Add([]string{\"foo\"})\n\n\tentries, err = ts.ReadAll()\n\tif err != nil {\n\t\tfatal(t, \"ReadAll\", err)\n\t}\n\n\tstats = ts.TombstoneFiles()\n\tif got, exp := len(stats), 1; got != exp {\n\t\tt.Fatalf(\"stat length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif stats[0].Size == 0 {\n\t\tt.Fatalf(\"got size %v, exp > 0\", stats[0].Size)\n\t}\n\n\tif stats[0].LastModified == 0 {\n\t\tt.Fatalf(\"got lastModified %v, exp > 0\", stats[0].LastModified)\n\t}\n\n\tif stats[0].Path == \"\" {\n\t\tt.Fatalf(\"got path %v, exp != ''\", stats[0].Path)\n\t}\n\n\tif got, exp := len(entries), 1; got != exp {\n\t\tt.Fatalf(\"length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := entries[0].Key, \"foo\"; got != exp {\n\t\tt.Fatalf(\"value mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\t// Use a new Tombstoner to verify values are persisted\n\tts = &tsm1.Tombstoner{Path: f.Name()}\n\tentries, err = ts.ReadAll()\n\tif err != nil {\n\t\tfatal(t, \"ReadAll\", err)\n\t}\n\n\tif got, exp := len(entries), 1; got != exp {\n\t\tt.Fatalf(\"length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := entries[0].Key, \"foo\"; got != exp {\n\t\tt.Fatalf(\"value mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTombstoner_Add_Empty(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer func() { os.RemoveAll(dir) }()\n\n\tf := MustTempFile(dir)\n\tts := &tsm1.Tombstoner{Path: f.Name()}\n\n\tentries, err := ts.ReadAll()\n\tif err != nil {\n\t\tfatal(t, \"ReadAll\", err)\n\t}\n\n\tif got, exp := len(entries), 0; got != exp {\n\t\tt.Fatalf(\"length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tts.Add([]string{})\n\n\t// Use a new Tombstoner to verify values are persisted\n\tts = &tsm1.Tombstoner{Path: f.Name()}\n\tentries, err = ts.ReadAll()\n\tif err != nil {\n\t\tfatal(t, \"ReadAll\", err)\n\t}\n\n\tif got, exp := len(entries), 0; got != exp {\n\t\tt.Fatalf(\"length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tstats := ts.TombstoneFiles()\n\tif got, exp := len(stats), 0; got != exp {\n\t\tt.Fatalf(\"stat length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n}\n\nfunc TestTombstoner_Delete(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer func() { os.RemoveAll(dir) }()\n\n\tf := MustTempFile(dir)\n\tts := &tsm1.Tombstoner{Path: f.Name()}\n\n\tts.Add([]string{\"foo\"})\n\n\t// Use a new Tombstoner to verify values are persisted\n\tts = &tsm1.Tombstoner{Path: f.Name()}\n\tentries, err := ts.ReadAll()\n\tif err != nil {\n\t\tfatal(t, \"ReadAll\", err)\n\t}\n\n\tif got, exp := len(entries), 1; got != exp {\n\t\tt.Fatalf(\"length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := entries[0].Key, \"foo\"; got != exp {\n\t\tt.Fatalf(\"value mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif err := ts.Delete(); err != nil {\n\t\tfatal(t, \"delete tombstone\", err)\n\t}\n\n\tstats := ts.TombstoneFiles()\n\tif got, exp := len(stats), 0; got != exp {\n\t\tt.Fatalf(\"stat length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tts = &tsm1.Tombstoner{Path: f.Name()}\n\tentries, err = ts.ReadAll()\n\tif err != nil {\n\t\tfatal(t, \"ReadAll\", err)\n\t}\n\n\tif got, exp := len(entries), 0; got != exp {\n\t\tt.Fatalf(\"length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTombstoner_ReadV1(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer func() { os.RemoveAll(dir) }()\n\n\tf := MustTempFile(dir)\n\tif err := ioutil.WriteFile(f.Name(), []byte(\"foo\\n\"), 0x0600); err != nil {\n\t\tt.Fatalf(\"write v1 file: %v\", err)\n\t}\n\tf.Close()\n\n\tif err := os.Rename(f.Name(), f.Name()+\".tombstone\"); err != nil {\n\t\tt.Fatalf(\"rename tombstone failed: %v\", err)\n\t}\n\n\tts := &tsm1.Tombstoner{Path: f.Name()}\n\n\t_, err := ts.ReadAll()\n\tif err != nil {\n\t\tfatal(t, \"ReadAll\", err)\n\t}\n\n\tentries, err := ts.ReadAll()\n\tif err != nil {\n\t\tfatal(t, \"ReadAll\", err)\n\t}\n\n\tif got, exp := len(entries), 1; got != exp {\n\t\tt.Fatalf(\"length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := entries[0].Key, \"foo\"; got != exp {\n\t\tt.Fatalf(\"value mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\t// Use a new Tombstoner to verify values are persisted\n\tts = &tsm1.Tombstoner{Path: f.Name()}\n\tentries, err = ts.ReadAll()\n\tif err != nil {\n\t\tfatal(t, \"ReadAll\", err)\n\t}\n\n\tif got, exp := len(entries), 1; got != exp {\n\t\tt.Fatalf(\"length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := entries[0].Key, \"foo\"; got != exp {\n\t\tt.Fatalf(\"value mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTombstoner_ReadEmptyV1(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer func() { os.RemoveAll(dir) }()\n\n\tf := MustTempFile(dir)\n\tf.Close()\n\n\tif err := os.Rename(f.Name(), f.Name()+\".tombstone\"); err != nil {\n\t\tt.Fatalf(\"rename tombstone failed: %v\", err)\n\t}\n\n\tts := &tsm1.Tombstoner{Path: f.Name()}\n\n\t_, err := ts.ReadAll()\n\tif err != nil {\n\t\tfatal(t, \"ReadAll\", err)\n\t}\n\n\tentries, err := ts.ReadAll()\n\tif err != nil {\n\t\tfatal(t, \"ReadAll\", err)\n\t}\n\n\tif got, exp := len(entries), 0; got != exp {\n\t\tt.Fatalf(\"length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/wal.go",
    "content": "package tsm1\n\nimport (\n\t\"bufio\"\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/golang/snappy\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/pkg/limiter\"\n\t\"github.com/influxdata/influxdb/pkg/pool\"\n\t\"github.com/uber-go/zap\"\n)\n\nconst (\n\t// DefaultSegmentSize of 10MB is the size at which segment files will be rolled over.\n\tDefaultSegmentSize = 10 * 1024 * 1024\n\n\t// WALFileExtension is the file extension we expect for wal segments.\n\tWALFileExtension = \"wal\"\n\n\t// WALFilePrefix is the prefix on all wal segment files.\n\tWALFilePrefix = \"_\"\n\n\t// walEncodeBufSize is the size of the wal entry encoding buffer\n\twalEncodeBufSize = 4 * 1024 * 1024\n\n\tfloat64EntryType = 1\n\tintegerEntryType = 2\n\tbooleanEntryType = 3\n\tstringEntryType  = 4\n)\n\n// WalEntryType is a byte written to a wal segment file that indicates what the following compressed block contains.\ntype WalEntryType byte\n\nconst (\n\t// WriteWALEntryType indicates a write entry.\n\tWriteWALEntryType WalEntryType = 0x01\n\n\t// DeleteWALEntryType indicates a delete entry.\n\tDeleteWALEntryType WalEntryType = 0x02\n\n\t// DeleteRangeWALEntryType indicates a delete range entry.\n\tDeleteRangeWALEntryType WalEntryType = 0x03\n)\n\nvar (\n\t// ErrWALClosed is returned when attempting to write to a closed WAL file.\n\tErrWALClosed = fmt.Errorf(\"WAL closed\")\n\n\t// ErrWALCorrupt is returned when reading a corrupt WAL entry.\n\tErrWALCorrupt = fmt.Errorf(\"corrupted WAL entry\")\n\n\tdefaultWaitingWALWrites = runtime.GOMAXPROCS(0) * 2\n\n\t// bytePool is a shared bytes pool buffer re-cycle []byte slices to reduce allocations.\n\tbytesPool = pool.NewLimitedBytes(256, walEncodeBufSize*2)\n)\n\n// Statistics gathered by the WAL.\nconst (\n\tstatWALOldBytes     = \"oldSegmentsDiskBytes\"\n\tstatWALCurrentBytes = \"currentSegmentDiskBytes\"\n\tstatWriteOk         = \"writeOk\"\n\tstatWriteErr        = \"writeErr\"\n)\n\n// WAL represents the write-ahead log used for writing TSM files.\ntype WAL struct {\n\t// goroutines waiting for the next fsync\n\tsyncCount   uint64\n\tsyncWaiters chan chan error\n\n\tmu            sync.RWMutex\n\tlastWriteTime time.Time\n\n\tpath string\n\n\t// write variables\n\tcurrentSegmentID     int\n\tcurrentSegmentWriter *WALSegmentWriter\n\n\t// cache and flush variables\n\tonce    sync.Once\n\tclosing chan struct{}\n\n\t// syncDelay sets the duration to wait before fsyncing writes.  A value of 0 (default)\n\t// will cause every write to be fsync'd.  This must be set before the WAL\n\t// is opened if a non-default value is required.\n\tsyncDelay time.Duration\n\n\t// WALOutput is the writer used by the logger.\n\tlogger       zap.Logger // Logger to be used for important messages\n\ttraceLogger  zap.Logger // Logger to be used when trace-logging is on.\n\ttraceLogging bool\n\n\t// SegmentSize is the file size at which a segment file will be rotated\n\tSegmentSize int\n\n\t// statistics for the WAL\n\tstats   *WALStatistics\n\tlimiter limiter.Fixed\n}\n\n// NewWAL initializes a new WAL at the given directory.\nfunc NewWAL(path string) *WAL {\n\tlogger := zap.New(zap.NullEncoder())\n\treturn &WAL{\n\t\tpath: path,\n\n\t\t// these options should be overriden by any options in the config\n\t\tSegmentSize: DefaultSegmentSize,\n\t\tclosing:     make(chan struct{}),\n\t\tsyncWaiters: make(chan chan error, 1024),\n\t\tstats:       &WALStatistics{},\n\t\tlimiter:     limiter.NewFixed(defaultWaitingWALWrites),\n\t\tlogger:      logger,\n\t\ttraceLogger: logger,\n\t}\n}\n\n// enableTraceLogging must be called before the WAL is opened.\nfunc (l *WAL) enableTraceLogging(enabled bool) {\n\tl.traceLogging = enabled\n\tif enabled {\n\t\tl.traceLogger = l.logger\n\t}\n}\n\n// WithLogger sets the WAL's logger.\nfunc (l *WAL) WithLogger(log zap.Logger) {\n\tl.logger = log.With(zap.String(\"service\", \"wal\"))\n\n\tif l.traceLogging {\n\t\tl.traceLogger = l.logger\n\t}\n}\n\n// WALStatistics maintains statistics about the WAL.\ntype WALStatistics struct {\n\tOldBytes     int64\n\tCurrentBytes int64\n\tWriteOK      int64\n\tWriteErr     int64\n}\n\n// Statistics returns statistics for periodic monitoring.\nfunc (l *WAL) Statistics(tags map[string]string) []models.Statistic {\n\treturn []models.Statistic{{\n\t\tName: \"tsm1_wal\",\n\t\tTags: tags,\n\t\tValues: map[string]interface{}{\n\t\t\tstatWALOldBytes:     atomic.LoadInt64(&l.stats.OldBytes),\n\t\t\tstatWALCurrentBytes: atomic.LoadInt64(&l.stats.CurrentBytes),\n\t\t\tstatWriteOk:         atomic.LoadInt64(&l.stats.WriteOK),\n\t\t\tstatWriteErr:        atomic.LoadInt64(&l.stats.WriteErr),\n\t\t},\n\t}}\n}\n\n// Path returns the directory the log was initialized with.\nfunc (l *WAL) Path() string {\n\tl.mu.RLock()\n\tdefer l.mu.RUnlock()\n\treturn l.path\n}\n\n// Open opens and initializes the Log. Open can recover from previous unclosed shutdowns.\nfunc (l *WAL) Open() error {\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\n\tl.traceLogger.Info(fmt.Sprintf(\"tsm1 WAL starting with %d segment size\", l.SegmentSize))\n\tl.traceLogger.Info(fmt.Sprintf(\"tsm1 WAL writing to %s\", l.path))\n\n\tif err := os.MkdirAll(l.path, 0777); err != nil {\n\t\treturn err\n\t}\n\n\tsegments, err := segmentFileNames(l.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(segments) > 0 {\n\t\tlastSegment := segments[len(segments)-1]\n\t\tid, err := idFromFileName(lastSegment)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tl.currentSegmentID = id\n\t\tstat, err := os.Stat(lastSegment)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif stat.Size() == 0 {\n\t\t\tos.Remove(lastSegment)\n\t\t\tsegments = segments[:len(segments)-1]\n\t\t}\n\t\tif err := l.newSegmentFile(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar totalOldDiskSize int64\n\tfor _, seg := range segments {\n\t\tstat, err := os.Stat(seg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ttotalOldDiskSize += stat.Size()\n\t\tif stat.ModTime().After(l.lastWriteTime) {\n\t\t\tl.lastWriteTime = stat.ModTime().UTC()\n\t\t}\n\t}\n\tatomic.StoreInt64(&l.stats.OldBytes, totalOldDiskSize)\n\n\tl.closing = make(chan struct{})\n\n\treturn nil\n}\n\n// scheduleSync will schedule an fsync to the current wal segment and notify any\n// waiting gorutines.  If an fsync is already scheduled, subsequent calls will\n// not schedule a new fsync and will be handle by the existing scheduled fsync.\nfunc (l *WAL) scheduleSync() {\n\t// If we're not the first to sync, then another goroutine is fsyncing the wal for us.\n\tif !atomic.CompareAndSwapUint64(&l.syncCount, 0, 1) {\n\t\treturn\n\t}\n\n\t// Fsync the wal and notify all pending waiters\n\tgo func() {\n\t\tvar timerCh <-chan time.Time\n\n\t\t// time.NewTicker requires a > 0 delay, since 0 indicates no delay, use a closed\n\t\t// channel which will always be ready to read from.\n\t\tif l.syncDelay == 0 {\n\t\t\t// Create a RW chan and close it\n\t\t\ttimerChrw := make(chan time.Time)\n\t\t\tclose(timerChrw)\n\t\t\t// Convert it to a read-only\n\t\t\ttimerCh = timerChrw\n\t\t} else {\n\t\t\tt := time.NewTicker(l.syncDelay)\n\t\t\tdefer t.Stop()\n\t\t\ttimerCh = t.C\n\t\t}\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-timerCh:\n\t\t\t\tl.mu.Lock()\n\t\t\t\tif len(l.syncWaiters) == 0 {\n\t\t\t\t\tatomic.StoreUint64(&l.syncCount, 0)\n\t\t\t\t\tl.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tl.sync()\n\t\t\t\tl.mu.Unlock()\n\t\t\tcase <-l.closing:\n\t\t\t\tatomic.StoreUint64(&l.syncCount, 0)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\n// sync fsyncs the current wal segments and notifies any waiters.  Callers must ensure\n// a write lock on the WAL is obtained before calling sync.\nfunc (l *WAL) sync() {\n\terr := l.currentSegmentWriter.sync()\n\tfor len(l.syncWaiters) > 0 {\n\t\terrC := <-l.syncWaiters\n\t\terrC <- err\n\t}\n}\n\n// WriteMulti writes the given values to the WAL. It returns the WAL segment ID to\n// which the points were written. If an error is returned the segment ID should\n// be ignored.\nfunc (l *WAL) WriteMulti(values map[string][]Value) (int, error) {\n\tentry := &WriteWALEntry{\n\t\tValues: values,\n\t}\n\n\tid, err := l.writeToLog(entry)\n\tif err != nil {\n\t\tatomic.AddInt64(&l.stats.WriteErr, 1)\n\t\treturn -1, err\n\t}\n\tatomic.AddInt64(&l.stats.WriteOK, 1)\n\n\treturn id, nil\n}\n\n// ClosedSegments returns a slice of the names of the closed segment files.\nfunc (l *WAL) ClosedSegments() ([]string, error) {\n\tl.mu.RLock()\n\tdefer l.mu.RUnlock()\n\t// Not loading files from disk so nothing to do\n\tif l.path == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tvar currentFile string\n\tif l.currentSegmentWriter != nil {\n\t\tcurrentFile = l.currentSegmentWriter.path()\n\t}\n\n\tfiles, err := segmentFileNames(l.path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar closedFiles []string\n\tfor _, fn := range files {\n\t\t// Skip the current path\n\t\tif fn == currentFile {\n\t\t\tcontinue\n\t\t}\n\n\t\tclosedFiles = append(closedFiles, fn)\n\t}\n\n\treturn closedFiles, nil\n}\n\n// Remove deletes the given segment file paths from disk and cleans up any associated objects.\nfunc (l *WAL) Remove(files []string) error {\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\tfor _, fn := range files {\n\t\tl.traceLogger.Info(fmt.Sprintf(\"Removing %s\", fn))\n\t\tos.RemoveAll(fn)\n\t}\n\n\t// Refresh the on-disk size stats\n\tsegments, err := segmentFileNames(l.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar totalOldDiskSize int64\n\tfor _, seg := range segments {\n\t\tstat, err := os.Stat(seg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ttotalOldDiskSize += stat.Size()\n\t}\n\tatomic.StoreInt64(&l.stats.OldBytes, totalOldDiskSize)\n\n\treturn nil\n}\n\n// LastWriteTime is the last time anything was written to the WAL.\nfunc (l *WAL) LastWriteTime() time.Time {\n\tl.mu.RLock()\n\tdefer l.mu.RUnlock()\n\treturn l.lastWriteTime\n}\n\nfunc (l *WAL) DiskSizeBytes() int64 {\n\treturn atomic.LoadInt64(&l.stats.OldBytes) + atomic.LoadInt64(&l.stats.CurrentBytes)\n}\n\nfunc (l *WAL) writeToLog(entry WALEntry) (int, error) {\n\t// limit how many concurrent encodings can be in flight.  Since we can only\n\t// write one at a time to disk, a slow disk can cause the allocations below\n\t// to increase quickly.  If we're backed up, wait until others have completed.\n\tbytes := bytesPool.Get(entry.MarshalSize())\n\n\tb, err := entry.Encode(bytes)\n\tif err != nil {\n\t\tbytesPool.Put(bytes)\n\t\treturn -1, err\n\t}\n\n\tencBuf := bytesPool.Get(snappy.MaxEncodedLen(len(b)))\n\n\tcompressed := snappy.Encode(encBuf, b)\n\tbytesPool.Put(bytes)\n\n\tsyncErr := make(chan error)\n\n\tsegID, err := func() (int, error) {\n\t\tl.mu.Lock()\n\t\tdefer l.mu.Unlock()\n\n\t\t// Make sure the log has not been closed\n\t\tselect {\n\t\tcase <-l.closing:\n\t\t\treturn -1, ErrWALClosed\n\t\tdefault:\n\t\t}\n\n\t\t// roll the segment file if needed\n\t\tif err := l.rollSegment(); err != nil {\n\t\t\treturn -1, fmt.Errorf(\"error rolling WAL segment: %v\", err)\n\t\t}\n\n\t\t// write and sync\n\t\tif err := l.currentSegmentWriter.Write(entry.Type(), compressed); err != nil {\n\t\t\treturn -1, fmt.Errorf(\"error writing WAL entry: %v\", err)\n\t\t}\n\n\t\tselect {\n\t\tcase l.syncWaiters <- syncErr:\n\t\tdefault:\n\t\t\treturn -1, fmt.Errorf(\"error syncing wal\")\n\t\t}\n\t\tl.scheduleSync()\n\n\t\t// Update stats for current segment size\n\t\tatomic.StoreInt64(&l.stats.CurrentBytes, int64(l.currentSegmentWriter.size))\n\n\t\tl.lastWriteTime = time.Now()\n\n\t\treturn l.currentSegmentID, nil\n\n\t}()\n\n\tbytesPool.Put(encBuf)\n\n\tif err != nil {\n\t\treturn segID, err\n\t}\n\n\t// schedule an fsync and wait for it to complete\n\treturn segID, <-syncErr\n}\n\n// rollSegment checks if the current segment is due to roll over to a new segment;\n// and if so, opens a new segment file for future writes.\nfunc (l *WAL) rollSegment() error {\n\tif l.currentSegmentWriter == nil || l.currentSegmentWriter.size > DefaultSegmentSize {\n\t\tif err := l.newSegmentFile(); err != nil {\n\t\t\t// A drop database or RP call could trigger this error if writes were in-flight\n\t\t\t// when the drop statement executes.\n\t\t\treturn fmt.Errorf(\"error opening new segment file for wal (2): %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn nil\n}\n\n// CloseSegment closes the current segment if it is non-empty and opens a new one.\nfunc (l *WAL) CloseSegment() error {\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\tif l.currentSegmentWriter == nil || l.currentSegmentWriter.size > 0 {\n\t\tif err := l.newSegmentFile(); err != nil {\n\t\t\t// A drop database or RP call could trigger this error if writes were in-flight\n\t\t\t// when the drop statement executes.\n\t\t\treturn fmt.Errorf(\"error opening new segment file for wal (1): %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\treturn nil\n}\n\n// Delete deletes the given keys, returning the segment ID for the operation.\nfunc (l *WAL) Delete(keys []string) (int, error) {\n\tif len(keys) == 0 {\n\t\treturn 0, nil\n\t}\n\tentry := &DeleteWALEntry{\n\t\tKeys: keys,\n\t}\n\n\tid, err := l.writeToLog(entry)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\treturn id, nil\n}\n\n// DeleteRange deletes the given keys within the given time range,\n// returning the segment ID for the operation.\nfunc (l *WAL) DeleteRange(keys []string, min, max int64) (int, error) {\n\tif len(keys) == 0 {\n\t\treturn 0, nil\n\t}\n\tentry := &DeleteRangeWALEntry{\n\t\tKeys: keys,\n\t\tMin:  min,\n\t\tMax:  max,\n\t}\n\n\tid, err := l.writeToLog(entry)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\treturn id, nil\n}\n\n// Close will finish any flush that is currently in progress and close file handles.\nfunc (l *WAL) Close() error {\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\n\tl.once.Do(func() {\n\t\t// Close, but don't set to nil so future goroutines can still be signaled\n\t\tl.traceLogger.Info(fmt.Sprintf(\"Closing %s\", l.path))\n\t\tclose(l.closing)\n\n\t\tif l.currentSegmentWriter != nil {\n\t\t\tl.sync()\n\t\t\tl.currentSegmentWriter.close()\n\t\t\tl.currentSegmentWriter = nil\n\t\t}\n\t})\n\n\treturn nil\n}\n\n// segmentFileNames will return all files that are WAL segment files in sorted order by ascending ID.\nfunc segmentFileNames(dir string) ([]string, error) {\n\tnames, err := filepath.Glob(filepath.Join(dir, fmt.Sprintf(\"%s*.%s\", WALFilePrefix, WALFileExtension)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsort.Strings(names)\n\treturn names, nil\n}\n\n// newSegmentFile will close the current segment file and open a new one, updating bookkeeping info on the log.\nfunc (l *WAL) newSegmentFile() error {\n\tl.currentSegmentID++\n\tif l.currentSegmentWriter != nil {\n\t\tl.sync()\n\n\t\tif err := l.currentSegmentWriter.close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tatomic.StoreInt64(&l.stats.OldBytes, int64(l.currentSegmentWriter.size))\n\t}\n\n\tfileName := filepath.Join(l.path, fmt.Sprintf(\"%s%05d.%s\", WALFilePrefix, l.currentSegmentID, WALFileExtension))\n\tfd, err := os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\tl.currentSegmentWriter = NewWALSegmentWriter(fd)\n\n\tif stat, err := fd.Stat(); err == nil {\n\t\tl.lastWriteTime = stat.ModTime()\n\t}\n\n\t// Reset the current segment size stat\n\tatomic.StoreInt64(&l.stats.CurrentBytes, 0)\n\n\treturn nil\n}\n\n// WALEntry is record stored in each WAL segment.  Each entry has a type\n// and an opaque, type dependent byte slice data attribute.\ntype WALEntry interface {\n\tType() WalEntryType\n\tEncode(dst []byte) ([]byte, error)\n\tMarshalBinary() ([]byte, error)\n\tUnmarshalBinary(b []byte) error\n\tMarshalSize() int\n}\n\n// WriteWALEntry represents a write of points.\ntype WriteWALEntry struct {\n\tValues map[string][]Value\n\tsz     int\n}\n\nfunc (w *WriteWALEntry) MarshalSize() int {\n\tif w.sz > 0 || len(w.Values) == 0 {\n\t\treturn w.sz\n\t}\n\n\tencLen := 7 * len(w.Values) // Type (1), Key Length (2), and Count (4) for each key\n\n\t// determine required length\n\tfor k, v := range w.Values {\n\t\tencLen += len(k)\n\t\tif len(v) == 0 {\n\t\t\treturn 0\n\t\t}\n\n\t\tencLen += 8 * len(v) // timestamps (8)\n\n\t\tswitch v[0].(type) {\n\t\tcase FloatValue, IntegerValue:\n\t\t\tencLen += 8 * len(v)\n\t\tcase BooleanValue:\n\t\t\tencLen += 1 * len(v)\n\t\tcase StringValue:\n\t\t\tfor _, vv := range v {\n\t\t\t\tstr, ok := vv.(StringValue)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn 0\n\t\t\t\t}\n\t\t\t\tencLen += 4 + len(str.value)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn 0\n\t\t}\n\t}\n\n\tw.sz = encLen\n\n\treturn w.sz\n}\n\n// Encode converts the WriteWALEntry into a byte stream using dst if it\n// is large enough.  If dst is too small, the slice will be grown to fit the\n// encoded entry.\nfunc (w *WriteWALEntry) Encode(dst []byte) ([]byte, error) {\n\t// The entries values are encode as follows:\n\t//\n\t// For each key and slice of values, first a 1 byte type for the []Values\n\t// slice is written.  Following the type, the length and key bytes are written.\n\t// Following the key, a 4 byte count followed by each value as a 8 byte time\n\t// and N byte value.  The value is dependent on the type being encoded.  float64,\n\t// int64, use 8 bytes, boolean uses 1 byte, and string is similar to the key encoding,\n\t// except that string values have a 4-byte length, and keys only use 2 bytes.\n\t//\n\t// This structure is then repeated for each key an value slices.\n\t//\n\t// ┌────────────────────────────────────────────────────────────────────┐\n\t// │                           WriteWALEntry                            │\n\t// ├──────┬─────────┬────────┬───────┬─────────┬─────────┬───┬──────┬───┤\n\t// │ Type │ Key Len │   Key  │ Count │  Time   │  Value  │...│ Type │...│\n\t// │1 byte│ 2 bytes │ N bytes│4 bytes│ 8 bytes │ N bytes │   │1 byte│   │\n\t// └──────┴─────────┴────────┴───────┴─────────┴─────────┴───┴──────┴───┘\n\n\tencLen := w.MarshalSize() // Type (1), Key Length (2), and Count (4) for each key\n\n\t// allocate or re-slice to correct size\n\tif len(dst) < encLen {\n\t\tdst = make([]byte, encLen)\n\t} else {\n\t\tdst = dst[:encLen]\n\t}\n\n\t// Finally, encode the entry\n\tvar n int\n\tvar curType byte\n\n\tfor k, v := range w.Values {\n\t\tswitch v[0].(type) {\n\t\tcase FloatValue:\n\t\t\tcurType = float64EntryType\n\t\tcase IntegerValue:\n\t\t\tcurType = integerEntryType\n\t\tcase BooleanValue:\n\t\t\tcurType = booleanEntryType\n\t\tcase StringValue:\n\t\t\tcurType = stringEntryType\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unsupported value type: %T\", v[0])\n\t\t}\n\t\tdst[n] = curType\n\t\tn++\n\n\t\tbinary.BigEndian.PutUint16(dst[n:n+2], uint16(len(k)))\n\t\tn += 2\n\t\tn += copy(dst[n:], k)\n\n\t\tbinary.BigEndian.PutUint32(dst[n:n+4], uint32(len(v)))\n\t\tn += 4\n\n\t\tfor _, vv := range v {\n\t\t\tbinary.BigEndian.PutUint64(dst[n:n+8], uint64(vv.UnixNano()))\n\t\t\tn += 8\n\n\t\t\tswitch vv := vv.(type) {\n\t\t\tcase FloatValue:\n\t\t\t\tif curType != float64EntryType {\n\t\t\t\t\treturn nil, fmt.Errorf(\"incorrect value found in %T slice: %T\", v[0].Value(), vv)\n\t\t\t\t}\n\t\t\t\tbinary.BigEndian.PutUint64(dst[n:n+8], math.Float64bits(vv.value))\n\t\t\t\tn += 8\n\t\t\tcase IntegerValue:\n\t\t\t\tif curType != integerEntryType {\n\t\t\t\t\treturn nil, fmt.Errorf(\"incorrect value found in %T slice: %T\", v[0].Value(), vv)\n\t\t\t\t}\n\t\t\t\tbinary.BigEndian.PutUint64(dst[n:n+8], uint64(vv.value))\n\t\t\t\tn += 8\n\t\t\tcase BooleanValue:\n\t\t\t\tif curType != booleanEntryType {\n\t\t\t\t\treturn nil, fmt.Errorf(\"incorrect value found in %T slice: %T\", v[0].Value(), vv)\n\t\t\t\t}\n\t\t\t\tif vv.value {\n\t\t\t\t\tdst[n] = 1\n\t\t\t\t} else {\n\t\t\t\t\tdst[n] = 0\n\t\t\t\t}\n\t\t\t\tn++\n\t\t\tcase StringValue:\n\t\t\t\tif curType != stringEntryType {\n\t\t\t\t\treturn nil, fmt.Errorf(\"incorrect value found in %T slice: %T\", v[0].Value(), vv)\n\t\t\t\t}\n\t\t\t\tbinary.BigEndian.PutUint32(dst[n:n+4], uint32(len(vv.value)))\n\t\t\t\tn += 4\n\t\t\t\tn += copy(dst[n:], vv.value)\n\t\t\tdefault:\n\t\t\t\treturn nil, fmt.Errorf(\"unsupported value found in %T slice: %T\", v[0].Value(), vv)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn dst[:n], nil\n}\n\n// MarshalBinary returns a binary representation of the entry in a new byte slice.\nfunc (w *WriteWALEntry) MarshalBinary() ([]byte, error) {\n\t// Temp buffer to write marshaled points into\n\tb := make([]byte, w.MarshalSize())\n\treturn w.Encode(b)\n}\n\n// UnmarshalBinary deserializes the byte slice into w.\nfunc (w *WriteWALEntry) UnmarshalBinary(b []byte) error {\n\tvar i int\n\tfor i < len(b) {\n\t\ttyp := b[i]\n\t\ti++\n\n\t\tif i+2 > len(b) {\n\t\t\treturn ErrWALCorrupt\n\t\t}\n\n\t\tlength := int(binary.BigEndian.Uint16(b[i : i+2]))\n\t\ti += 2\n\n\t\tif i+length > len(b) {\n\t\t\treturn ErrWALCorrupt\n\t\t}\n\n\t\tk := string(b[i : i+length])\n\t\ti += length\n\n\t\tif i+4 > len(b) {\n\t\t\treturn ErrWALCorrupt\n\t\t}\n\n\t\tnvals := int(binary.BigEndian.Uint32(b[i : i+4]))\n\t\ti += 4\n\n\t\tswitch typ {\n\t\tcase float64EntryType:\n\t\t\tif i+16*nvals > len(b) {\n\t\t\t\treturn ErrWALCorrupt\n\t\t\t}\n\n\t\t\tvalues := make([]Value, 0, nvals)\n\t\t\tfor j := 0; j < nvals; j++ {\n\t\t\t\tun := int64(binary.BigEndian.Uint64(b[i : i+8]))\n\t\t\t\ti += 8\n\t\t\t\tv := math.Float64frombits((binary.BigEndian.Uint64(b[i : i+8])))\n\t\t\t\ti += 8\n\t\t\t\tvalues = append(values, NewFloatValue(un, v))\n\t\t\t}\n\t\t\tw.Values[k] = values\n\t\tcase integerEntryType:\n\t\t\tif i+16*nvals > len(b) {\n\t\t\t\treturn ErrWALCorrupt\n\t\t\t}\n\n\t\t\tvalues := make([]Value, 0, nvals)\n\t\t\tfor j := 0; j < nvals; j++ {\n\t\t\t\tun := int64(binary.BigEndian.Uint64(b[i : i+8]))\n\t\t\t\ti += 8\n\t\t\t\tv := int64(binary.BigEndian.Uint64(b[i : i+8]))\n\t\t\t\ti += 8\n\t\t\t\tvalues = append(values, NewIntegerValue(un, v))\n\t\t\t}\n\t\t\tw.Values[k] = values\n\n\t\tcase booleanEntryType:\n\t\t\tif i+9*nvals > len(b) {\n\t\t\t\treturn ErrWALCorrupt\n\t\t\t}\n\n\t\t\tvalues := make([]Value, 0, nvals)\n\t\t\tfor j := 0; j < nvals; j++ {\n\t\t\t\tun := int64(binary.BigEndian.Uint64(b[i : i+8]))\n\t\t\t\ti += 8\n\n\t\t\t\tv := b[i]\n\t\t\t\ti += 1\n\t\t\t\tif v == 1 {\n\t\t\t\t\tvalues = append(values, NewBooleanValue(un, true))\n\t\t\t\t} else {\n\t\t\t\t\tvalues = append(values, NewBooleanValue(un, false))\n\t\t\t\t}\n\t\t\t}\n\t\t\tw.Values[k] = values\n\n\t\tcase stringEntryType:\n\t\t\tvalues := make([]Value, 0, nvals)\n\t\t\tfor j := 0; j < nvals; j++ {\n\t\t\t\tif i+12 > len(b) {\n\t\t\t\t\treturn ErrWALCorrupt\n\t\t\t\t}\n\n\t\t\t\tun := int64(binary.BigEndian.Uint64(b[i : i+8]))\n\t\t\t\ti += 8\n\n\t\t\t\tlength := int(binary.BigEndian.Uint32(b[i : i+4]))\n\t\t\t\tif i+length > len(b) {\n\t\t\t\t\treturn ErrWALCorrupt\n\t\t\t\t}\n\n\t\t\t\ti += 4\n\n\t\t\t\tif i+length > len(b) {\n\t\t\t\t\treturn ErrWALCorrupt\n\t\t\t\t}\n\n\t\t\t\tv := string(b[i : i+length])\n\t\t\t\ti += length\n\t\t\t\tvalues = append(values, NewStringValue(un, v))\n\t\t\t}\n\t\t\tw.Values[k] = values\n\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unsupported value type: %#v\", typ)\n\t\t}\n\t}\n\treturn nil\n}\n\n// Type returns WriteWALEntryType.\nfunc (w *WriteWALEntry) Type() WalEntryType {\n\treturn WriteWALEntryType\n}\n\n// DeleteWALEntry represents the deletion of multiple series.\ntype DeleteWALEntry struct {\n\tKeys []string\n\tsz   int\n}\n\n// MarshalBinary returns a binary representation of the entry in a new byte slice.\nfunc (w *DeleteWALEntry) MarshalBinary() ([]byte, error) {\n\tb := make([]byte, w.MarshalSize())\n\treturn w.Encode(b)\n}\n\n// UnmarshalBinary deserializes the byte slice into w.\nfunc (w *DeleteWALEntry) UnmarshalBinary(b []byte) error {\n\tw.Keys = strings.Split(string(b), \"\\n\")\n\treturn nil\n}\n\nfunc (w *DeleteWALEntry) MarshalSize() int {\n\tif w.sz > 0 || len(w.Keys) == 0 {\n\t\treturn w.sz\n\t}\n\n\tencLen := len(w.Keys) // newlines\n\tfor _, k := range w.Keys {\n\t\tencLen += len(k)\n\t}\n\n\tw.sz = encLen\n\n\treturn encLen\n}\n\n// Encode converts the DeleteWALEntry into a byte slice, appending to dst.\nfunc (w *DeleteWALEntry) Encode(dst []byte) ([]byte, error) {\n\tsz := w.MarshalSize()\n\n\tif len(dst) < sz {\n\t\tdst = make([]byte, sz)\n\t}\n\n\tvar n int\n\tfor _, k := range w.Keys {\n\t\tn += copy(dst[n:], k)\n\t\tn += copy(dst[n:], \"\\n\")\n\t}\n\n\t// We return n-1 to strip off the last newline so that unmarshalling the value\n\t// does not produce an empty string\n\treturn []byte(dst[:n-1]), nil\n}\n\n// Type returns DeleteWALEntryType.\nfunc (w *DeleteWALEntry) Type() WalEntryType {\n\treturn DeleteWALEntryType\n}\n\n// DeleteRangeWALEntry represents the deletion of multiple series.\ntype DeleteRangeWALEntry struct {\n\tKeys     []string\n\tMin, Max int64\n\tsz       int\n}\n\n// MarshalBinary returns a binary representation of the entry in a new byte slice.\nfunc (w *DeleteRangeWALEntry) MarshalBinary() ([]byte, error) {\n\tb := make([]byte, w.MarshalSize())\n\treturn w.Encode(b)\n}\n\n// UnmarshalBinary deserializes the byte slice into w.\nfunc (w *DeleteRangeWALEntry) UnmarshalBinary(b []byte) error {\n\tif len(b) < 16 {\n\t\treturn ErrWALCorrupt\n\t}\n\n\tw.Min = int64(binary.BigEndian.Uint64(b[:8]))\n\tw.Max = int64(binary.BigEndian.Uint64(b[8:16]))\n\n\ti := 16\n\tfor i < len(b) {\n\t\tif i+4 > len(b) {\n\t\t\treturn ErrWALCorrupt\n\t\t}\n\t\tsz := int(binary.BigEndian.Uint32(b[i : i+4]))\n\t\ti += 4\n\n\t\tif i+sz > len(b) {\n\t\t\treturn ErrWALCorrupt\n\t\t}\n\t\tw.Keys = append(w.Keys, string(b[i:i+sz]))\n\t\ti += sz\n\t}\n\treturn nil\n}\n\nfunc (w *DeleteRangeWALEntry) MarshalSize() int {\n\tif w.sz > 0 {\n\t\treturn w.sz\n\t}\n\n\tsz := 16 + len(w.Keys)*4\n\tfor _, k := range w.Keys {\n\t\tsz += len(k)\n\t}\n\n\tw.sz = sz\n\n\treturn sz\n}\n\n// Encode converts the DeleteRangeWALEntry into a byte slice, appending to b.\nfunc (w *DeleteRangeWALEntry) Encode(b []byte) ([]byte, error) {\n\tsz := w.MarshalSize()\n\n\tif len(b) < sz {\n\t\tb = make([]byte, sz)\n\t}\n\n\tbinary.BigEndian.PutUint64(b[:8], uint64(w.Min))\n\tbinary.BigEndian.PutUint64(b[8:16], uint64(w.Max))\n\n\ti := 16\n\tfor _, k := range w.Keys {\n\t\tbinary.BigEndian.PutUint32(b[i:i+4], uint32(len(k)))\n\t\ti += 4\n\t\ti += copy(b[i:], k)\n\t}\n\n\treturn b[:i], nil\n}\n\n// Type returns DeleteRangeWALEntryType.\nfunc (w *DeleteRangeWALEntry) Type() WalEntryType {\n\treturn DeleteRangeWALEntryType\n}\n\n// WALSegmentWriter writes WAL segments.\ntype WALSegmentWriter struct {\n\tbw   *bufio.Writer\n\tw    io.WriteCloser\n\tsize int\n}\n\n// NewWALSegmentWriter returns a new WALSegmentWriter writing to w.\nfunc NewWALSegmentWriter(w io.WriteCloser) *WALSegmentWriter {\n\treturn &WALSegmentWriter{\n\t\tbw: bufio.NewWriter(w),\n\t\tw:  w,\n\t}\n}\n\nfunc (w *WALSegmentWriter) path() string {\n\tif f, ok := w.w.(*os.File); ok {\n\t\treturn f.Name()\n\t}\n\treturn \"\"\n}\n\n// Write writes entryType and the buffer containing compressed entry data.\nfunc (w *WALSegmentWriter) Write(entryType WalEntryType, compressed []byte) error {\n\tvar buf [5]byte\n\tbuf[0] = byte(entryType)\n\tbinary.BigEndian.PutUint32(buf[1:5], uint32(len(compressed)))\n\n\tif _, err := w.bw.Write(buf[:]); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := w.bw.Write(compressed); err != nil {\n\t\treturn err\n\t}\n\n\tw.size += len(buf) + len(compressed)\n\n\treturn nil\n}\n\n// Sync flushes the file systems in-memory copy of recently written data to disk,\n// if w is writing to an os.File.\nfunc (w *WALSegmentWriter) sync() error {\n\tif err := w.bw.Flush(); err != nil {\n\t\treturn err\n\t}\n\n\tif f, ok := w.w.(*os.File); ok {\n\t\treturn f.Sync()\n\t}\n\treturn nil\n}\n\nfunc (w *WALSegmentWriter) Flush() error {\n\treturn w.bw.Flush()\n}\n\nfunc (w *WALSegmentWriter) close() error {\n\tif err := w.Flush(); err != nil {\n\t\treturn err\n\t}\n\treturn w.w.Close()\n}\n\n// WALSegmentReader reads WAL segments.\ntype WALSegmentReader struct {\n\trc    io.ReadCloser\n\tr     *bufio.Reader\n\tentry WALEntry\n\tn     int64\n\terr   error\n}\n\n// NewWALSegmentReader returns a new WALSegmentReader reading from r.\nfunc NewWALSegmentReader(r io.ReadCloser) *WALSegmentReader {\n\treturn &WALSegmentReader{\n\t\trc: r,\n\t\tr:  bufio.NewReader(r),\n\t}\n}\n\nfunc (r *WALSegmentReader) Reset(rc io.ReadCloser) {\n\tr.rc = rc\n\tr.r.Reset(rc)\n\tr.entry = nil\n\tr.n = 0\n\tr.err = nil\n}\n\n// Next indicates if there is a value to read.\nfunc (r *WALSegmentReader) Next() bool {\n\tvar nReadOK int\n\n\t// read the type and the length of the entry\n\tvar lv [5]byte\n\tn, err := io.ReadFull(r.r, lv[:])\n\tif err == io.EOF {\n\t\treturn false\n\t}\n\n\tif err != nil {\n\t\tr.err = err\n\t\t// We return true here because we want the client code to call read which\n\t\t// will return the this error to be handled.\n\t\treturn true\n\t}\n\tnReadOK += n\n\n\tentryType := lv[0]\n\tlength := binary.BigEndian.Uint32(lv[1:5])\n\n\tb := *(getBuf(int(length)))\n\tdefer putBuf(&b)\n\n\t// read the compressed block and decompress it\n\tn, err = io.ReadFull(r.r, b[:length])\n\tif err != nil {\n\t\tr.err = err\n\t\treturn true\n\t}\n\tnReadOK += n\n\n\tdecLen, err := snappy.DecodedLen(b[:length])\n\tif err != nil {\n\t\tr.err = err\n\t\treturn true\n\t}\n\tdecBuf := *(getBuf(decLen))\n\tdefer putBuf(&decBuf)\n\n\tdata, err := snappy.Decode(decBuf, b[:length])\n\tif err != nil {\n\t\tr.err = err\n\t\treturn true\n\t}\n\n\t// and marshal it and send it to the cache\n\tswitch WalEntryType(entryType) {\n\tcase WriteWALEntryType:\n\t\tr.entry = &WriteWALEntry{\n\t\t\tValues: make(map[string][]Value),\n\t\t}\n\tcase DeleteWALEntryType:\n\t\tr.entry = &DeleteWALEntry{}\n\tcase DeleteRangeWALEntryType:\n\t\tr.entry = &DeleteRangeWALEntry{}\n\tdefault:\n\t\tr.err = fmt.Errorf(\"unknown wal entry type: %v\", entryType)\n\t\treturn true\n\t}\n\tr.err = r.entry.UnmarshalBinary(data)\n\tif r.err == nil {\n\t\t// Read and decode of this entry was successful.\n\t\tr.n += int64(nReadOK)\n\t}\n\n\treturn true\n}\n\n// Read returns the next entry in the reader.\nfunc (r *WALSegmentReader) Read() (WALEntry, error) {\n\tif r.err != nil {\n\t\treturn nil, r.err\n\t}\n\treturn r.entry, nil\n}\n\n// Count returns the total number of bytes read successfully from the segment, as\n// of the last call to Read(). The segment is guaranteed to be valid up to and\n// including this number of bytes.\nfunc (r *WALSegmentReader) Count() int64 {\n\treturn r.n\n}\n\n// Error returns the last error encountered by the reader.\nfunc (r *WALSegmentReader) Error() error {\n\treturn r.err\n}\n\n// Close closes the underlying io.Reader.\nfunc (r *WALSegmentReader) Close() error {\n\tif r.rc == nil {\n\t\treturn nil\n\t}\n\terr := r.rc.Close()\n\tr.rc = nil\n\treturn err\n}\n\n// idFromFileName parses the segment file ID from its name.\nfunc idFromFileName(name string) (int, error) {\n\tparts := strings.Split(filepath.Base(name), \".\")\n\tif len(parts) != 2 {\n\t\treturn 0, fmt.Errorf(\"file %s has wrong name format to have an id\", name)\n\t}\n\n\tid, err := strconv.ParseUint(parts[0][1:], 10, 32)\n\n\treturn int(id), err\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/wal_test.go",
    "content": "package tsm1_test\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/tsdb/engine/tsm1\"\n\n\t\"github.com/golang/snappy\"\n)\n\nfunc TestWALWriter_WriteMulti_Single(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tw := tsm1.NewWALSegmentWriter(f)\n\n\tp1 := tsm1.NewValue(1, 1.1)\n\tp2 := tsm1.NewValue(1, int64(1))\n\tp3 := tsm1.NewValue(1, true)\n\tp4 := tsm1.NewValue(1, \"string\")\n\n\tvalues := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#float\":  []tsm1.Value{p1},\n\t\t\"cpu,host=A#!~#int\":    []tsm1.Value{p2},\n\t\t\"cpu,host=A#!~#bool\":   []tsm1.Value{p3},\n\t\t\"cpu,host=A#!~#string\": []tsm1.Value{p4},\n\t}\n\n\tentry := &tsm1.WriteWALEntry{\n\t\tValues: values,\n\t}\n\n\tif err := w.Write(mustMarshalEntry(entry)); err != nil {\n\t\tfatal(t, \"write points\", err)\n\t}\n\n\tif err := w.Flush(); err != nil {\n\t\tfatal(t, \"flush\", err)\n\t}\n\n\tif _, err := f.Seek(0, io.SeekStart); err != nil {\n\t\tfatal(t, \"seek\", err)\n\t}\n\n\tr := tsm1.NewWALSegmentReader(f)\n\n\tif !r.Next() {\n\t\tt.Fatalf(\"expected next, got false\")\n\t}\n\n\twe, err := r.Read()\n\tif err != nil {\n\t\tfatal(t, \"read entry\", err)\n\t}\n\n\te, ok := we.(*tsm1.WriteWALEntry)\n\tif !ok {\n\t\tt.Fatalf(\"expected WriteWALEntry: got %#v\", e)\n\t}\n\n\tfor k, v := range e.Values {\n\t\tfor i, vv := range v {\n\t\t\tif got, exp := vv.String(), values[k][i].String(); got != exp {\n\t\t\t\tt.Fatalf(\"points mismatch: got %v, exp %v\", got, exp)\n\t\t\t}\n\t\t}\n\t}\n\n\tif n := r.Count(); n != MustReadFileSize(f) {\n\t\tt.Fatalf(\"wrong count of bytes read, got %d, exp %d\", n, MustReadFileSize(f))\n\t}\n}\n\nfunc TestWALWriter_WriteMulti_LargeBatch(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tw := tsm1.NewWALSegmentWriter(f)\n\n\tvar points []tsm1.Value\n\tfor i := 0; i < 100000; i++ {\n\t\tpoints = append(points, tsm1.NewValue(int64(i), int64(1)))\n\t}\n\n\tvalues := map[string][]tsm1.Value{\n\t\t\"cpu,host=A,server=01,foo=bar,tag=really-long#!~#float\": points,\n\t\t\"mem,host=A,server=01,foo=bar,tag=really-long#!~#float\": points,\n\t}\n\n\tentry := &tsm1.WriteWALEntry{\n\t\tValues: values,\n\t}\n\n\tif err := w.Write(mustMarshalEntry(entry)); err != nil {\n\t\tfatal(t, \"write points\", err)\n\t}\n\n\tif err := w.Flush(); err != nil {\n\t\tfatal(t, \"flush\", err)\n\t}\n\n\tif _, err := f.Seek(0, io.SeekStart); err != nil {\n\t\tfatal(t, \"seek\", err)\n\t}\n\n\tr := tsm1.NewWALSegmentReader(f)\n\n\tif !r.Next() {\n\t\tt.Fatalf(\"expected next, got false\")\n\t}\n\n\twe, err := r.Read()\n\tif err != nil {\n\t\tfatal(t, \"read entry\", err)\n\t}\n\n\te, ok := we.(*tsm1.WriteWALEntry)\n\tif !ok {\n\t\tt.Fatalf(\"expected WriteWALEntry: got %#v\", e)\n\t}\n\n\tfor k, v := range e.Values {\n\t\tfor i, vv := range v {\n\t\t\tif got, exp := vv.String(), values[k][i].String(); got != exp {\n\t\t\t\tt.Fatalf(\"points mismatch: got %v, exp %v\", got, exp)\n\t\t\t}\n\t\t}\n\t}\n\n\tif n := r.Count(); n != MustReadFileSize(f) {\n\t\tt.Fatalf(\"wrong count of bytes read, got %d, exp %d\", n, MustReadFileSize(f))\n\t}\n}\nfunc TestWALWriter_WriteMulti_Multiple(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tw := tsm1.NewWALSegmentWriter(f)\n\n\tp1 := tsm1.NewValue(1, int64(1))\n\tp2 := tsm1.NewValue(1, int64(2))\n\n\texp := []struct {\n\t\tkey    string\n\t\tvalues []tsm1.Value\n\t}{\n\t\t{\"cpu,host=A#!~#value\", []tsm1.Value{p1}},\n\t\t{\"cpu,host=B#!~#value\", []tsm1.Value{p2}},\n\t}\n\n\tfor _, v := range exp {\n\t\tentry := &tsm1.WriteWALEntry{\n\t\t\tValues: map[string][]tsm1.Value{v.key: v.values},\n\t\t}\n\n\t\tif err := w.Write(mustMarshalEntry(entry)); err != nil {\n\t\t\tfatal(t, \"write points\", err)\n\t\t}\n\t\tif err := w.Flush(); err != nil {\n\t\t\tfatal(t, \"flush\", err)\n\t\t}\n\t}\n\n\t// Seek back to the beinning of the file for reading\n\tif _, err := f.Seek(0, io.SeekStart); err != nil {\n\t\tfatal(t, \"seek\", err)\n\t}\n\n\tr := tsm1.NewWALSegmentReader(f)\n\n\tfor _, ep := range exp {\n\t\tif !r.Next() {\n\t\t\tt.Fatalf(\"expected next, got false\")\n\t\t}\n\n\t\twe, err := r.Read()\n\t\tif err != nil {\n\t\t\tfatal(t, \"read entry\", err)\n\t\t}\n\n\t\te, ok := we.(*tsm1.WriteWALEntry)\n\t\tif !ok {\n\t\t\tt.Fatalf(\"expected WriteWALEntry: got %#v\", e)\n\t\t}\n\n\t\tfor k, v := range e.Values {\n\t\t\tif got, exp := k, ep.key; got != exp {\n\t\t\t\tt.Fatalf(\"key mismatch. got %v, exp %v\", got, exp)\n\t\t\t}\n\n\t\t\tif got, exp := len(v), len(ep.values); got != exp {\n\t\t\t\tt.Fatalf(\"values length mismatch: got %v, exp %v\", got, exp)\n\t\t\t}\n\n\t\t\tfor i, vv := range v {\n\t\t\t\tif got, exp := vv.String(), ep.values[i].String(); got != exp {\n\t\t\t\t\tt.Fatalf(\"points mismatch: got %v, exp %v\", got, exp)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif n := r.Count(); n != MustReadFileSize(f) {\n\t\tt.Fatalf(\"wrong count of bytes read, got %d, exp %d\", n, MustReadFileSize(f))\n\t}\n}\n\nfunc TestWALWriter_WriteDelete_Single(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tw := tsm1.NewWALSegmentWriter(f)\n\n\tentry := &tsm1.DeleteWALEntry{\n\t\tKeys: []string{\"cpu\"},\n\t}\n\n\tif err := w.Write(mustMarshalEntry(entry)); err != nil {\n\t\tfatal(t, \"write points\", err)\n\t}\n\n\tif err := w.Flush(); err != nil {\n\t\tfatal(t, \"flush\", err)\n\t}\n\n\tif _, err := f.Seek(0, io.SeekStart); err != nil {\n\t\tfatal(t, \"seek\", err)\n\t}\n\n\tr := tsm1.NewWALSegmentReader(f)\n\n\tif !r.Next() {\n\t\tt.Fatalf(\"expected next, got false\")\n\t}\n\n\twe, err := r.Read()\n\tif err != nil {\n\t\tfatal(t, \"read entry\", err)\n\t}\n\n\te, ok := we.(*tsm1.DeleteWALEntry)\n\tif !ok {\n\t\tt.Fatalf(\"expected WriteWALEntry: got %#v\", e)\n\t}\n\n\tif got, exp := len(e.Keys), len(entry.Keys); got != exp {\n\t\tt.Fatalf(\"key length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := e.Keys[0], entry.Keys[0]; got != exp {\n\t\tt.Fatalf(\"key mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestWALWriter_WriteMultiDelete_Multiple(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tw := tsm1.NewWALSegmentWriter(f)\n\n\tp1 := tsm1.NewValue(1, true)\n\tvalues := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{p1},\n\t}\n\n\twriteEntry := &tsm1.WriteWALEntry{\n\t\tValues: values,\n\t}\n\n\tif err := w.Write(mustMarshalEntry(writeEntry)); err != nil {\n\t\tfatal(t, \"write points\", err)\n\t}\n\n\tif err := w.Flush(); err != nil {\n\t\tfatal(t, \"flush\", err)\n\t}\n\n\t// Write the delete entry\n\tdeleteEntry := &tsm1.DeleteWALEntry{\n\t\tKeys: []string{\"cpu,host=A#!~value\"},\n\t}\n\n\tif err := w.Write(mustMarshalEntry(deleteEntry)); err != nil {\n\t\tfatal(t, \"write points\", err)\n\t}\n\n\tif err := w.Flush(); err != nil {\n\t\tfatal(t, \"flush\", err)\n\t}\n\n\t// Seek back to the beinning of the file for reading\n\tif _, err := f.Seek(0, io.SeekStart); err != nil {\n\t\tfatal(t, \"seek\", err)\n\t}\n\n\tr := tsm1.NewWALSegmentReader(f)\n\n\t// Read the write points first\n\tif !r.Next() {\n\t\tt.Fatalf(\"expected next, got false\")\n\t}\n\n\twe, err := r.Read()\n\tif err != nil {\n\t\tfatal(t, \"read entry\", err)\n\t}\n\n\te, ok := we.(*tsm1.WriteWALEntry)\n\tif !ok {\n\t\tt.Fatalf(\"expected WriteWALEntry: got %#v\", e)\n\t}\n\n\tfor k, v := range e.Values {\n\t\tif got, exp := len(v), len(values[k]); got != exp {\n\t\t\tt.Fatalf(\"values length mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tfor i, vv := range v {\n\t\t\tif got, exp := vv.String(), values[k][i].String(); got != exp {\n\t\t\t\tt.Fatalf(\"points mismatch: got %v, exp %v\", got, exp)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Read the delete second\n\tif !r.Next() {\n\t\tt.Fatalf(\"expected next, got false\")\n\t}\n\n\twe, err = r.Read()\n\tif err != nil {\n\t\tfatal(t, \"read entry\", err)\n\t}\n\n\tde, ok := we.(*tsm1.DeleteWALEntry)\n\tif !ok {\n\t\tt.Fatalf(\"expected DeleteWALEntry: got %#v\", e)\n\t}\n\n\tif got, exp := len(de.Keys), len(deleteEntry.Keys); got != exp {\n\t\tt.Fatalf(\"key length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := de.Keys[0], deleteEntry.Keys[0]; got != exp {\n\t\tt.Fatalf(\"key mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestWALWriter_WriteMultiDeleteRange_Multiple(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tw := tsm1.NewWALSegmentWriter(f)\n\n\tp1 := tsm1.NewValue(1, 1.0)\n\tp2 := tsm1.NewValue(2, 2.0)\n\tp3 := tsm1.NewValue(3, 3.0)\n\n\tvalues := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{p1, p2, p3},\n\t}\n\n\twriteEntry := &tsm1.WriteWALEntry{\n\t\tValues: values,\n\t}\n\n\tif err := w.Write(mustMarshalEntry(writeEntry)); err != nil {\n\t\tfatal(t, \"write points\", err)\n\t}\n\n\tif err := w.Flush(); err != nil {\n\t\tfatal(t, \"flush\", err)\n\t}\n\n\t// Write the delete entry\n\tdeleteEntry := &tsm1.DeleteRangeWALEntry{\n\t\tKeys: []string{\"cpu,host=A#!~value\"},\n\t\tMin:  2,\n\t\tMax:  3,\n\t}\n\n\tif err := w.Write(mustMarshalEntry(deleteEntry)); err != nil {\n\t\tfatal(t, \"write points\", err)\n\t}\n\n\tif err := w.Flush(); err != nil {\n\t\tfatal(t, \"flush\", err)\n\t}\n\n\t// Seek back to the beinning of the file for reading\n\tif _, err := f.Seek(0, io.SeekStart); err != nil {\n\t\tfatal(t, \"seek\", err)\n\t}\n\n\tr := tsm1.NewWALSegmentReader(f)\n\n\t// Read the write points first\n\tif !r.Next() {\n\t\tt.Fatalf(\"expected next, got false\")\n\t}\n\n\twe, err := r.Read()\n\tif err != nil {\n\t\tfatal(t, \"read entry\", err)\n\t}\n\n\te, ok := we.(*tsm1.WriteWALEntry)\n\tif !ok {\n\t\tt.Fatalf(\"expected WriteWALEntry: got %#v\", e)\n\t}\n\n\tfor k, v := range e.Values {\n\t\tif got, exp := len(v), len(values[k]); got != exp {\n\t\t\tt.Fatalf(\"values length mismatch: got %v, exp %v\", got, exp)\n\t\t}\n\n\t\tfor i, vv := range v {\n\t\t\tif got, exp := vv.String(), values[k][i].String(); got != exp {\n\t\t\t\tt.Fatalf(\"points mismatch: got %v, exp %v\", got, exp)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Read the delete second\n\tif !r.Next() {\n\t\tt.Fatalf(\"expected next, got false\")\n\t}\n\n\twe, err = r.Read()\n\tif err != nil {\n\t\tfatal(t, \"read entry\", err)\n\t}\n\n\tde, ok := we.(*tsm1.DeleteRangeWALEntry)\n\tif !ok {\n\t\tt.Fatalf(\"expected DeleteWALEntry: got %#v\", e)\n\t}\n\n\tif got, exp := len(de.Keys), len(deleteEntry.Keys); got != exp {\n\t\tt.Fatalf(\"key length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := de.Keys[0], deleteEntry.Keys[0]; got != exp {\n\t\tt.Fatalf(\"key mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := de.Min, int64(2); got != exp {\n\t\tt.Fatalf(\"min time mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif got, exp := de.Max, int64(3); got != exp {\n\t\tt.Fatalf(\"min time mismatch: got %v, exp %v\", got, exp)\n\t}\n\n}\n\nfunc TestWAL_ClosedSegments(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\tw := tsm1.NewWAL(dir)\n\tif err := w.Open(); err != nil {\n\t\tt.Fatalf(\"error opening WAL: %v\", err)\n\t}\n\n\tfiles, err := w.ClosedSegments()\n\tif err != nil {\n\t\tt.Fatalf(\"error getting closed segments: %v\", err)\n\t}\n\n\tif got, exp := len(files), 0; got != exp {\n\t\tt.Fatalf(\"close segment length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif _, err := w.WriteMulti(map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#value\": []tsm1.Value{\n\t\t\ttsm1.NewValue(1, 1.1),\n\t\t},\n\t}); err != nil {\n\t\tt.Fatalf(\"error writing points: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"error closing wal: %v\", err)\n\t}\n\n\t// Re-open the WAL\n\tw = tsm1.NewWAL(dir)\n\tdefer w.Close()\n\tif err := w.Open(); err != nil {\n\t\tt.Fatalf(\"error opening WAL: %v\", err)\n\t}\n\n\tfiles, err = w.ClosedSegments()\n\tif err != nil {\n\t\tt.Fatalf(\"error getting closed segments: %v\", err)\n\t}\n\tif got, exp := len(files), 1; got != exp {\n\t\tt.Fatalf(\"close segment length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestWAL_Delete(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\tw := tsm1.NewWAL(dir)\n\tif err := w.Open(); err != nil {\n\t\tt.Fatalf(\"error opening WAL: %v\", err)\n\t}\n\n\tfiles, err := w.ClosedSegments()\n\tif err != nil {\n\t\tt.Fatalf(\"error getting closed segments: %v\", err)\n\t}\n\n\tif got, exp := len(files), 0; got != exp {\n\t\tt.Fatalf(\"close segment length mismatch: got %v, exp %v\", got, exp)\n\t}\n\n\tif _, err := w.Delete([]string{\"cpu\"}); err != nil {\n\t\tt.Fatalf(\"error writing points: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"error closing wal: %v\", err)\n\t}\n\n\t// Re-open the WAL\n\tw = tsm1.NewWAL(dir)\n\tdefer w.Close()\n\tif err := w.Open(); err != nil {\n\t\tt.Fatalf(\"error opening WAL: %v\", err)\n\t}\n\n\tfiles, err = w.ClosedSegments()\n\tif err != nil {\n\t\tt.Fatalf(\"error getting closed segments: %v\", err)\n\t}\n\tif got, exp := len(files), 1; got != exp {\n\t\tt.Fatalf(\"close segment length mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestWALWriter_Corrupt(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tw := tsm1.NewWALSegmentWriter(f)\n\tcorruption := []byte{1, 4, 0, 0, 0}\n\n\tp1 := tsm1.NewValue(1, 1.1)\n\tvalues := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#float\": []tsm1.Value{p1},\n\t}\n\n\tentry := &tsm1.WriteWALEntry{\n\t\tValues: values,\n\t}\n\tif err := w.Write(mustMarshalEntry(entry)); err != nil {\n\t\tfatal(t, \"write points\", err)\n\t}\n\n\tif err := w.Flush(); err != nil {\n\t\tfatal(t, \"flush\", err)\n\t}\n\n\t// Write some random bytes to the file to simulate corruption.\n\tif _, err := f.Write(corruption); err != nil {\n\t\tfatal(t, \"corrupt WAL segment\", err)\n\t}\n\n\t// Create the WAL segment reader.\n\tif _, err := f.Seek(0, io.SeekStart); err != nil {\n\t\tfatal(t, \"seek\", err)\n\t}\n\tr := tsm1.NewWALSegmentReader(f)\n\n\t// Try to decode two entries.\n\n\tif !r.Next() {\n\t\tt.Fatalf(\"expected next, got false\")\n\t}\n\tif _, err := r.Read(); err != nil {\n\t\tfatal(t, \"read entry\", err)\n\t}\n\n\tif !r.Next() {\n\t\tt.Fatalf(\"expected next, got false\")\n\t}\n\tif _, err := r.Read(); err == nil {\n\t\tfatal(t, \"read entry did not return err\", nil)\n\t}\n\n\t// Count should only return size of valid data.\n\texpCount := MustReadFileSize(f) - int64(len(corruption))\n\tif n := r.Count(); n != expCount {\n\t\tt.Fatalf(\"wrong count of bytes read, got %d, exp %d\", n, expCount)\n\t}\n}\n\nfunc TestWriteWALSegment_UnmarshalBinary_WriteWALCorrupt(t *testing.T) {\n\tp1 := tsm1.NewValue(1, 1.1)\n\tp2 := tsm1.NewValue(1, int64(1))\n\tp3 := tsm1.NewValue(1, true)\n\tp4 := tsm1.NewValue(1, \"string\")\n\n\tvalues := map[string][]tsm1.Value{\n\t\t\"cpu,host=A#!~#float\":  []tsm1.Value{p1, p1},\n\t\t\"cpu,host=A#!~#int\":    []tsm1.Value{p2, p2},\n\t\t\"cpu,host=A#!~#bool\":   []tsm1.Value{p3, p3},\n\t\t\"cpu,host=A#!~#string\": []tsm1.Value{p4, p4},\n\t}\n\n\tw := &tsm1.WriteWALEntry{\n\t\tValues: values,\n\t}\n\n\tb, err := w.MarshalBinary()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error, got %v\", err)\n\t}\n\n\t// Test every possible truncation of a write WAL entry\n\tfor i := 0; i < len(b); i++ {\n\t\t// re-allocated to ensure capacity would be exceed if slicing\n\t\ttruncated := make([]byte, i)\n\t\tcopy(truncated, b[:i])\n\t\terr := w.UnmarshalBinary(truncated)\n\t\tif err != nil && err != tsm1.ErrWALCorrupt {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\t}\n}\n\nfunc TestWriteWALSegment_UnmarshalBinary_DeleteWALCorrupt(t *testing.T) {\n\tw := &tsm1.DeleteWALEntry{\n\t\tKeys: []string{\"foo\", \"bar\"},\n\t}\n\n\tb, err := w.MarshalBinary()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error, got %v\", err)\n\t}\n\n\t// Test every possible truncation of a write WAL entry\n\tfor i := 0; i < len(b); i++ {\n\t\t// re-allocated to ensure capacity would be exceed if slicing\n\t\ttruncated := make([]byte, i)\n\t\tcopy(truncated, b[:i])\n\t\terr := w.UnmarshalBinary(truncated)\n\t\tif err != nil && err != tsm1.ErrWALCorrupt {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\t}\n}\n\nfunc TestWriteWALSegment_UnmarshalBinary_DeleteRangeWALCorrupt(t *testing.T) {\n\tw := &tsm1.DeleteRangeWALEntry{\n\t\tKeys: []string{\"foo\", \"bar\"},\n\t\tMin:  1,\n\t\tMax:  2,\n\t}\n\n\tb, err := w.MarshalBinary()\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error, got %v\", err)\n\t}\n\n\t// Test every possible truncation of a write WAL entry\n\tfor i := 0; i < len(b); i++ {\n\t\t// re-allocated to ensure capacity would be exceed if slicing\n\t\ttruncated := make([]byte, i)\n\t\tcopy(truncated, b[:i])\n\t\terr := w.UnmarshalBinary(truncated)\n\t\tif err != nil && err != tsm1.ErrWALCorrupt {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkWALSegmentWriter(b *testing.B) {\n\tpoints := map[string][]tsm1.Value{}\n\tfor i := 0; i < 5000; i++ {\n\t\tk := \"cpu,host=A#!~#value\"\n\t\tpoints[k] = append(points[k], tsm1.NewValue(int64(i), 1.1))\n\t}\n\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\tf := MustTempFile(dir)\n\tw := tsm1.NewWALSegmentWriter(f)\n\n\twrite := &tsm1.WriteWALEntry{\n\t\tValues: points,\n\t}\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tif err := w.Write(mustMarshalEntry(write)); err != nil {\n\t\t\tb.Fatalf(\"unexpected error writing entry: %v\", err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkWALSegmentReader(b *testing.B) {\n\tpoints := map[string][]tsm1.Value{}\n\tfor i := 0; i < 5000; i++ {\n\t\tk := \"cpu,host=A#!~#value\"\n\t\tpoints[k] = append(points[k], tsm1.NewValue(int64(i), 1.1))\n\t}\n\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\n\tf := MustTempFile(dir)\n\tw := tsm1.NewWALSegmentWriter(f)\n\n\twrite := &tsm1.WriteWALEntry{\n\t\tValues: points,\n\t}\n\n\tfor i := 0; i < 100; i++ {\n\t\tif err := w.Write(mustMarshalEntry(write)); err != nil {\n\t\t\tb.Fatalf(\"unexpected error writing entry: %v\", err)\n\t\t}\n\t}\n\n\tr := tsm1.NewWALSegmentReader(f)\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tb.StopTimer()\n\t\tf.Seek(0, io.SeekStart)\n\t\tb.StartTimer()\n\n\t\tfor r.Next() {\n\t\t\t_, err := r.Read()\n\t\t\tif err != nil {\n\t\t\t\tb.Fatalf(\"unexpected error reading entry: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// MustReadFileSize returns the size of the file, or panics.\nfunc MustReadFileSize(f *os.File) int64 {\n\tstat, err := os.Stat(f.Name())\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to get size of file at %s: %s\", f.Name(), err.Error()))\n\t}\n\treturn stat.Size()\n}\n\nfunc mustMarshalEntry(entry tsm1.WALEntry) (tsm1.WalEntryType, []byte) {\n\tbytes := make([]byte, 1024<<2)\n\n\tb, err := entry.Encode(bytes)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"error encoding: %v\", err))\n\t}\n\n\treturn entry.Type(), snappy.Encode(b, b)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/writer.go",
    "content": "package tsm1\n\n/*\nA TSM file is composed for four sections: header, blocks, index and the footer.\n\n┌────────┬────────────────────────────────────┬─────────────┬──────────────┐\n│ Header │               Blocks               │    Index    │    Footer    │\n│5 bytes │              N bytes               │   N bytes   │   4 bytes    │\n└────────┴────────────────────────────────────┴─────────────┴──────────────┘\n\nHeader is composed of a magic number to identify the file type and a version\nnumber.\n\n┌───────────────────┐\n│      Header       │\n├─────────┬─────────┤\n│  Magic  │ Version │\n│ 4 bytes │ 1 byte  │\n└─────────┴─────────┘\n\nBlocks are sequences of pairs of CRC32 and data.  The block data is opaque to the\nfile.  The CRC32 is used for block level error detection.  The length of the blocks\nis stored in the index.\n\n┌───────────────────────────────────────────────────────────┐\n│                          Blocks                           │\n├───────────────────┬───────────────────┬───────────────────┤\n│      Block 1      │      Block 2      │      Block N      │\n├─────────┬─────────┼─────────┬─────────┼─────────┬─────────┤\n│  CRC    │  Data   │  CRC    │  Data   │  CRC    │  Data   │\n│ 4 bytes │ N bytes │ 4 bytes │ N bytes │ 4 bytes │ N bytes │\n└─────────┴─────────┴─────────┴─────────┴─────────┴─────────┘\n\nFollowing the blocks is the index for the blocks in the file.  The index is\ncomposed of a sequence of index entries ordered lexicographically by key and\nthen by time.  Each index entry starts with a key length and key followed by a\ncount of the number of blocks in the file.  Each block entry is composed of\nthe min and max time for the block, the offset into the file where the block\nis located and the the size of the block.\n\nThe index structure can provide efficient access to all blocks as well as the\nability to determine the cost associated with acessing a given key.  Given a key\nand timestamp, we can determine whether a file contains the block for that\ntimestamp as well as where that block resides and how much data to read to\nretrieve the block.  If we know we need to read all or multiple blocks in a\nfile, we can use the size to determine how much to read in a given IO.\n\n┌────────────────────────────────────────────────────────────────────────────┐\n│                                   Index                                    │\n├─────────┬─────────┬──────┬───────┬─────────┬─────────┬────────┬────────┬───┤\n│ Key Len │   Key   │ Type │ Count │Min Time │Max Time │ Offset │  Size  │...│\n│ 2 bytes │ N bytes │1 byte│2 bytes│ 8 bytes │ 8 bytes │8 bytes │4 bytes │   │\n└─────────┴─────────┴──────┴───────┴─────────┴─────────┴────────┴────────┴───┘\n\nThe last section is the footer that stores the offset of the start of the index.\n\n┌─────────┐\n│ Footer  │\n├─────────┤\n│Index Ofs│\n│ 8 bytes │\n└─────────┘\n*/\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"hash/crc32\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\t// MagicNumber is written as the first 4 bytes of a data file to\n\t// identify the file as a tsm1 formatted file\n\tMagicNumber uint32 = 0x16D116D1\n\n\t// Version indicates the version of the TSM file format.\n\tVersion byte = 1\n\n\t// Size in bytes of an index entry\n\tindexEntrySize = 28\n\n\t// Size in bytes used to store the count of index entries for a key\n\tindexCountSize = 2\n\n\t// Size in bytes used to store the type of block encoded\n\tindexTypeSize = 1\n\n\t// Max number of blocks for a given key that can exist in a single file\n\tmaxIndexEntries = (1 << (indexCountSize * 8)) - 1\n\n\t// max length of a key in an index entry (measurement + tags)\n\tmaxKeyLength = (1 << (2 * 8)) - 1\n)\n\nvar (\n\t//ErrNoValues is returned when TSMWriter.WriteIndex is called and there are no values to write.\n\tErrNoValues = fmt.Errorf(\"no values written\")\n\n\t// ErrTSMClosed is returned when performing an operation against a closed TSM file.\n\tErrTSMClosed = fmt.Errorf(\"tsm file closed\")\n\n\t// ErrMaxKeyLengthExceeded is returned when attempting to write a key that is too long.\n\tErrMaxKeyLengthExceeded = fmt.Errorf(\"max key length exceeded\")\n\n\t// ErrMaxBlocksExceeded is returned when attempting to write a block past the allowed number.\n\tErrMaxBlocksExceeded = fmt.Errorf(\"max blocks exceeded\")\n)\n\n// TSMWriter writes TSM formatted key and values.\ntype TSMWriter interface {\n\t// Write writes a new block for key containing and values.  Writes append\n\t// blocks in the order that the Write function is called.  The caller is\n\t// responsible for ensuring keys and blocks are sorted appropriately.\n\t// Values are encoded as a full block.  The caller is responsible for\n\t// ensuring a fixed number of values are encoded in each block as well as\n\t// ensuring the Values are sorted. The first and last timestamp values are\n\t// used as the minimum and maximum values for the index entry.\n\tWrite(key string, values Values) error\n\n\t// WriteBlock writes a new block for key containing the bytes in block.  WriteBlock appends\n\t// blocks in the order that the WriteBlock function is called.  The caller is\n\t// responsible for ensuring keys and blocks are sorted appropriately, and that the\n\t// block and index information is correct for the block.  The minTime and maxTime\n\t// timestamp values are used as the minimum and maximum values for the index entry.\n\tWriteBlock(key string, minTime, maxTime int64, block []byte) error\n\n\t// WriteIndex finishes the TSM write streams and writes the index.\n\tWriteIndex() error\n\n\t// Flushes flushes all pending changes to the underlying file resources.\n\tFlush() error\n\n\t// Close closes any underlying file resources.\n\tClose() error\n\n\t// Size returns the current size in bytes of the file.\n\tSize() uint32\n}\n\n// IndexWriter writes a TSMIndex.\ntype IndexWriter interface {\n\t// Add records a new block entry for a key in the index.\n\tAdd(key string, blockType byte, minTime, maxTime int64, offset int64, size uint32)\n\n\t// Entries returns all index entries for a key.\n\tEntries(key string) []IndexEntry\n\n\t// Keys returns the unique set of keys in the index.\n\tKeys() []string\n\n\t// KeyCount returns the count of unique keys in the index.\n\tKeyCount() int\n\n\t// Size returns the size of a the current index in bytes.\n\tSize() uint32\n\n\t// MarshalBinary returns a byte slice encoded version of the index.\n\tMarshalBinary() ([]byte, error)\n\n\t// WriteTo writes the index contents to a writer.\n\tWriteTo(w io.Writer) (int64, error)\n}\n\n// IndexEntry is the index information for a given block in a TSM file.\ntype IndexEntry struct {\n\t// The min and max time of all points stored in the block.\n\tMinTime, MaxTime int64\n\n\t// The absolute position in the file where this block is located.\n\tOffset int64\n\n\t// The size in bytes of the block in the file.\n\tSize uint32\n}\n\n// UnmarshalBinary decodes an IndexEntry from a byte slice.\nfunc (e *IndexEntry) UnmarshalBinary(b []byte) error {\n\tif len(b) != indexEntrySize {\n\t\treturn fmt.Errorf(\"unmarshalBinary: short buf: %v != %v\", indexEntrySize, len(b))\n\t}\n\te.MinTime = int64(binary.BigEndian.Uint64(b[:8]))\n\te.MaxTime = int64(binary.BigEndian.Uint64(b[8:16]))\n\te.Offset = int64(binary.BigEndian.Uint64(b[16:24]))\n\te.Size = binary.BigEndian.Uint32(b[24:28])\n\treturn nil\n}\n\n// AppendTo writes a binary-encoded version of IndexEntry to b, allocating\n// and returning a new slice, if necessary.\nfunc (e *IndexEntry) AppendTo(b []byte) []byte {\n\tif len(b) < indexEntrySize {\n\t\tif cap(b) < indexEntrySize {\n\t\t\tb = make([]byte, indexEntrySize)\n\t\t} else {\n\t\t\tb = b[:indexEntrySize]\n\t\t}\n\t}\n\n\tbinary.BigEndian.PutUint64(b[:8], uint64(e.MinTime))\n\tbinary.BigEndian.PutUint64(b[8:16], uint64(e.MaxTime))\n\tbinary.BigEndian.PutUint64(b[16:24], uint64(e.Offset))\n\tbinary.BigEndian.PutUint32(b[24:28], uint32(e.Size))\n\n\treturn b\n}\n\n// Contains returns true if this IndexEntry may contain values for the given time.\n// The min and max times are inclusive.\nfunc (e *IndexEntry) Contains(t int64) bool {\n\treturn e.MinTime <= t && e.MaxTime >= t\n}\n\n// OverlapsTimeRange returns true if the given time ranges are completely within the entry's time bounds.\nfunc (e *IndexEntry) OverlapsTimeRange(min, max int64) bool {\n\treturn e.MinTime <= max && e.MaxTime >= min\n}\n\n// String returns a string representation of the entry.\nfunc (e *IndexEntry) String() string {\n\treturn fmt.Sprintf(\"min=%s max=%s ofs=%d siz=%d\",\n\t\ttime.Unix(0, e.MinTime).UTC(), time.Unix(0, e.MaxTime).UTC(), e.Offset, e.Size)\n}\n\n// NewIndexWriter returns a new IndexWriter.\nfunc NewIndexWriter() IndexWriter {\n\treturn &directIndex{\n\t\tblocks: map[string]*indexEntries{},\n\t}\n}\n\n// directIndex is a simple in-memory index implementation for a TSM file.  The full index\n// must fit in memory.\ntype directIndex struct {\n\tmu     sync.RWMutex\n\tsize   uint32\n\tblocks map[string]*indexEntries\n}\n\nfunc (d *directIndex) Add(key string, blockType byte, minTime, maxTime int64, offset int64, size uint32) {\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\n\tentries := d.blocks[key]\n\tif entries == nil {\n\t\tentries = &indexEntries{\n\t\t\tType: blockType,\n\t\t}\n\t\td.blocks[key] = entries\n\t\t// size of the key stored in the index\n\t\td.size += uint32(2 + len(key))\n\n\t\t// size of the count of entries stored in the index\n\t\td.size += indexCountSize\n\t}\n\tentries.entries = append(entries.entries, IndexEntry{\n\t\tMinTime: minTime,\n\t\tMaxTime: maxTime,\n\t\tOffset:  offset,\n\t\tSize:    size,\n\t})\n\n\t// size of the encoded index entry\n\td.size += indexEntrySize\n}\n\nfunc (d *directIndex) entries(key string) []IndexEntry {\n\tentries := d.blocks[key]\n\tif entries == nil {\n\t\treturn nil\n\t}\n\treturn entries.entries\n}\n\nfunc (d *directIndex) Entries(key string) []IndexEntry {\n\td.mu.RLock()\n\tdefer d.mu.RUnlock()\n\n\treturn d.entries(key)\n}\n\nfunc (d *directIndex) Entry(key string, t int64) *IndexEntry {\n\td.mu.RLock()\n\tdefer d.mu.RUnlock()\n\n\tentries := d.entries(key)\n\tfor _, entry := range entries {\n\t\tif entry.Contains(t) {\n\t\t\treturn &entry\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *directIndex) Keys() []string {\n\td.mu.RLock()\n\tdefer d.mu.RUnlock()\n\n\tvar keys []string\n\tfor k := range d.blocks {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}\n\nfunc (d *directIndex) KeyCount() int {\n\td.mu.RLock()\n\tn := len(d.blocks)\n\td.mu.RUnlock()\n\treturn n\n}\n\nfunc (d *directIndex) addEntries(key string, entries *indexEntries) {\n\texisting := d.blocks[key]\n\tif existing == nil {\n\t\td.blocks[key] = entries\n\t\treturn\n\t}\n\texisting.entries = append(existing.entries, entries.entries...)\n}\n\nfunc (d *directIndex) WriteTo(w io.Writer) (int64, error) {\n\td.mu.RLock()\n\tdefer d.mu.RUnlock()\n\n\t// Index blocks are writtens sorted by key\n\tkeys := make([]string, 0, len(d.blocks))\n\tfor k := range d.blocks {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\tvar (\n\t\tn   int\n\t\terr error\n\t\tbuf [5]byte\n\t\tN   int64\n\t)\n\n\t// For each key, individual entries are sorted by time\n\tfor _, key := range keys {\n\t\tentries := d.blocks[key]\n\n\t\tif entries.Len() > maxIndexEntries {\n\t\t\treturn N, fmt.Errorf(\"key '%s' exceeds max index entries: %d > %d\", key, entries.Len(), maxIndexEntries)\n\t\t}\n\t\tsort.Sort(entries)\n\n\t\tbinary.BigEndian.PutUint16(buf[0:2], uint16(len(key)))\n\t\tbuf[2] = entries.Type\n\t\tbinary.BigEndian.PutUint16(buf[3:5], uint16(entries.Len()))\n\n\t\t// Append the key length and key\n\t\tif n, err = w.Write(buf[0:2]); err != nil {\n\t\t\treturn int64(n) + N, fmt.Errorf(\"write: writer key length error: %v\", err)\n\t\t}\n\t\tN += int64(n)\n\n\t\tif n, err = io.WriteString(w, key); err != nil {\n\t\t\treturn int64(n) + N, fmt.Errorf(\"write: writer key error: %v\", err)\n\t\t}\n\t\tN += int64(n)\n\n\t\t// Append the block type and count\n\t\tif n, err = w.Write(buf[2:5]); err != nil {\n\t\t\treturn int64(n) + N, fmt.Errorf(\"write: writer block type and count error: %v\", err)\n\t\t}\n\t\tN += int64(n)\n\n\t\t// Append each index entry for all blocks for this key\n\t\tvar n64 int64\n\t\tif n64, err = entries.WriteTo(w); err != nil {\n\t\t\treturn n64 + N, fmt.Errorf(\"write: writer entries error: %v\", err)\n\t\t}\n\t\tN += n64\n\n\t}\n\treturn N, nil\n}\n\nfunc (d *directIndex) MarshalBinary() ([]byte, error) {\n\tvar b bytes.Buffer\n\tif _, err := d.WriteTo(&b); err != nil {\n\t\treturn nil, err\n\t}\n\treturn b.Bytes(), nil\n}\n\nfunc (d *directIndex) UnmarshalBinary(b []byte) error {\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\n\td.size = uint32(len(b))\n\n\tvar pos int\n\tfor pos < len(b) {\n\t\tn, key, err := readKey(b[pos:])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"readIndex: read key error: %v\", err)\n\t\t}\n\t\tpos += n\n\n\t\tvar entries indexEntries\n\t\tn, err = readEntries(b[pos:], &entries)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"readIndex: read entries error: %v\", err)\n\t\t}\n\n\t\tpos += n\n\t\td.addEntries(string(key), &entries)\n\t}\n\treturn nil\n}\n\nfunc (d *directIndex) Size() uint32 {\n\treturn d.size\n}\n\n// tsmWriter writes keys and values in the TSM format\ntype tsmWriter struct {\n\twrapped io.Writer\n\tw       *bufio.Writer\n\tindex   IndexWriter\n\tn       int64\n}\n\n// NewTSMWriter returns a new TSMWriter writing to w.\nfunc NewTSMWriter(w io.Writer) (TSMWriter, error) {\n\tindex := &directIndex{\n\t\tblocks: map[string]*indexEntries{},\n\t}\n\n\treturn &tsmWriter{wrapped: w, w: bufio.NewWriterSize(w, 1024*1024), index: index}, nil\n}\n\nfunc (t *tsmWriter) writeHeader() error {\n\tvar buf [5]byte\n\tbinary.BigEndian.PutUint32(buf[0:4], MagicNumber)\n\tbuf[4] = Version\n\n\tn, err := t.w.Write(buf[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\tt.n = int64(n)\n\treturn nil\n}\n\n// Write writes a new block containing key and values.\nfunc (t *tsmWriter) Write(key string, values Values) error {\n\tif len(key) > maxKeyLength {\n\t\treturn ErrMaxKeyLengthExceeded\n\t}\n\n\t// Nothing to write\n\tif len(values) == 0 {\n\t\treturn nil\n\t}\n\n\t// Write header only after we have some data to write.\n\tif t.n == 0 {\n\t\tif err := t.writeHeader(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tblock, err := values.Encode(nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tblockType, err := BlockType(block)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar checksum [crc32.Size]byte\n\tbinary.BigEndian.PutUint32(checksum[:], crc32.ChecksumIEEE(block))\n\n\t_, err = t.w.Write(checksum[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tn, err := t.w.Write(block)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn += len(checksum)\n\n\t// Record this block in index\n\tt.index.Add(key, blockType, values[0].UnixNano(), values[len(values)-1].UnixNano(), t.n, uint32(n))\n\n\t// Increment file position pointer\n\tt.n += int64(n)\n\treturn nil\n}\n\n// WriteBlock writes block for the given key and time range to the TSM file.  If the write\n// exceeds max entries for a given key, ErrMaxBlocksExceeded is returned.  This indicates\n// that the index is now full for this key and no future writes to this key will succeed.\nfunc (t *tsmWriter) WriteBlock(key string, minTime, maxTime int64, block []byte) error {\n\tif len(key) > maxKeyLength {\n\t\treturn ErrMaxKeyLengthExceeded\n\t}\n\n\t// Nothing to write\n\tif len(block) == 0 {\n\t\treturn nil\n\t}\n\n\tblockType, err := BlockType(block)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Write header only after we have some data to write.\n\tif t.n == 0 {\n\t\tif err := t.writeHeader(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar checksum [crc32.Size]byte\n\tbinary.BigEndian.PutUint32(checksum[:], crc32.ChecksumIEEE(block))\n\n\t_, err = t.w.Write(checksum[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tn, err := t.w.Write(block)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn += len(checksum)\n\n\t// Record this block in index\n\tt.index.Add(key, blockType, minTime, maxTime, t.n, uint32(n))\n\n\t// Increment file position pointer (checksum + block len)\n\tt.n += int64(n)\n\n\tif len(t.index.Entries(key)) >= maxIndexEntries {\n\t\treturn ErrMaxBlocksExceeded\n\t}\n\n\treturn nil\n}\n\n// WriteIndex writes the index section of the file.  If there are no index entries to write,\n// this returns ErrNoValues.\nfunc (t *tsmWriter) WriteIndex() error {\n\tindexPos := t.n\n\n\tif t.index.KeyCount() == 0 {\n\t\treturn ErrNoValues\n\t}\n\n\t// Write the index\n\tif _, err := t.index.WriteTo(t.w); err != nil {\n\t\treturn err\n\t}\n\n\tvar buf [8]byte\n\tbinary.BigEndian.PutUint64(buf[:], uint64(indexPos))\n\n\t// Write the index index position\n\t_, err := t.w.Write(buf[:])\n\treturn err\n}\n\nfunc (t *tsmWriter) Flush() error {\n\tif err := t.w.Flush(); err != nil {\n\t\treturn err\n\t}\n\n\tif f, ok := t.wrapped.(*os.File); ok {\n\t\tif err := f.Sync(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (t *tsmWriter) Close() error {\n\tif err := t.Flush(); err != nil {\n\t\treturn err\n\t}\n\n\tif c, ok := t.wrapped.(io.Closer); ok {\n\t\treturn c.Close()\n\t}\n\treturn nil\n}\n\nfunc (t *tsmWriter) Size() uint32 {\n\treturn uint32(t.n) + t.index.Size()\n}\n\n// verifyVersion verifies that the reader's bytes are a TSM byte\n// stream of the correct version (1)\nfunc verifyVersion(r io.ReadSeeker) error {\n\t_, err := r.Seek(0, 0)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"init: failed to seek: %v\", err)\n\t}\n\tvar b [4]byte\n\t_, err = io.ReadFull(r, b[:])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"init: error reading magic number of file: %v\", err)\n\t}\n\tif binary.BigEndian.Uint32(b[:]) != MagicNumber {\n\t\treturn fmt.Errorf(\"can only read from tsm file\")\n\t}\n\t_, err = io.ReadFull(r, b[:1])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"init: error reading version: %v\", err)\n\t}\n\tif b[0] != Version {\n\t\treturn fmt.Errorf(\"init: file is version %b. expected %b\", b[0], Version)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/writer_test.go",
    "content": "package tsm1_test\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/tsdb/engine/tsm1\"\n)\n\nfunc TestTSMWriter_Write_Empty(t *testing.T) {\n\tvar b bytes.Buffer\n\tw, err := tsm1.NewTSMWriter(&b)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created writer: %v\", err)\n\t}\n\n\tif err := w.WriteIndex(); err != tsm1.ErrNoValues {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tif got, exp := len(b.Bytes()), 0; got < exp {\n\t\tt.Fatalf(\"file size mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTSMWriter_Write_NoValues(t *testing.T) {\n\tvar b bytes.Buffer\n\tw, err := tsm1.NewTSMWriter(&b)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created writer: %v\", err)\n\t}\n\n\tif err := w.Write(\"foo\", []tsm1.Value{}); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t}\n\n\tif err := w.WriteIndex(); err != tsm1.ErrNoValues {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tif got, exp := len(b.Bytes()), 0; got < exp {\n\t\tt.Fatalf(\"file size mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTSMWriter_Write_Single(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvalues := []tsm1.Value{tsm1.NewValue(0, 1.0)}\n\tif err := w.Write(\"cpu\", values); err != nil {\n\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\n\t}\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error writing index: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tfd, err := os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tb, err := ioutil.ReadAll(fd)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error reading: %v\", err)\n\t}\n\n\tif got, exp := len(b), 5; got < exp {\n\t\tt.Fatalf(\"file size mismatch: got %v, exp %v\", got, exp)\n\t}\n\tif got := binary.BigEndian.Uint32(b[0:4]); got != tsm1.MagicNumber {\n\t\tt.Fatalf(\"magic number mismatch: got %v, exp %v\", got, tsm1.MagicNumber)\n\t}\n\n\tif _, err := fd.Seek(0, io.SeekStart); err != nil {\n\t\tt.Fatalf(\"unexpected error seeking: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(fd)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\tdefer r.Close()\n\n\treadValues, err := r.ReadAll(\"cpu\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error readin: %v\", err)\n\t}\n\n\tif len(readValues) != len(values) {\n\t\tt.Fatalf(\"read values length mismatch: got %v, exp %v\", len(readValues), len(values))\n\t}\n\n\tfor i, v := range values {\n\t\tif v.Value() != readValues[i].Value() {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %d\", i, readValues[i].Value(), v.Value())\n\t\t}\n\t}\n}\n\nfunc TestTSMWriter_Write_Multiple(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tvalues []tsm1.Value\n\t}{\n\t\t{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t\t{\"mem\", []tsm1.Value{tsm1.NewValue(1, 2.0)}},\n\t}\n\n\tfor _, d := range data {\n\t\tif err := w.Write(d.key, d.values); err != nil {\n\t\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t\t}\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tfd, err := os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(fd)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tfor _, d := range data {\n\t\treadValues, err := r.ReadAll(d.key)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error readin: %v\", err)\n\t\t}\n\n\t\tif exp := len(d.values); exp != len(readValues) {\n\t\t\tt.Fatalf(\"read values length mismatch: got %v, exp %v\", len(readValues), exp)\n\t\t}\n\n\t\tfor i, v := range d.values {\n\t\t\tif v.Value() != readValues[i].Value() {\n\t\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %d\", i, readValues[i].Value(), v.Value())\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestTSMWriter_Write_MultipleKeyValues(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tvalues []tsm1.Value\n\t}{\n\t\t{\"cpu\", []tsm1.Value{\n\t\t\ttsm1.NewValue(0, 1.0),\n\t\t\ttsm1.NewValue(1, 2.0)},\n\t\t},\n\t\t{\"mem\", []tsm1.Value{\n\t\t\ttsm1.NewValue(0, 1.5),\n\t\t\ttsm1.NewValue(1, 2.5)},\n\t\t},\n\t}\n\n\tfor _, d := range data {\n\t\tif err := w.Write(d.key, d.values); err != nil {\n\t\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t\t}\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tfd, err := os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(fd)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tfor _, d := range data {\n\t\treadValues, err := r.ReadAll(d.key)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error readin: %v\", err)\n\t\t}\n\n\t\tif exp := len(d.values); exp != len(readValues) {\n\t\t\tt.Fatalf(\"read values length mismatch: got %v, exp %v\", len(readValues), exp)\n\t\t}\n\n\t\tfor i, v := range d.values {\n\t\t\tif v.Value() != readValues[i].Value() {\n\t\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %d\", i, readValues[i].Value(), v.Value())\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Tests that writing keys in reverse is able to read them back.\nfunc TestTSMWriter_Write_ReverseKeys(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tvalues []tsm1.Value\n\t}{\n\t\t{\"mem\", []tsm1.Value{\n\t\t\ttsm1.NewValue(0, 1.5),\n\t\t\ttsm1.NewValue(1, 2.5)},\n\t\t},\n\t\t{\"cpu\", []tsm1.Value{\n\t\t\ttsm1.NewValue(0, 1.0),\n\t\t\ttsm1.NewValue(1, 2.0)},\n\t\t},\n\t}\n\n\tfor _, d := range data {\n\t\tif err := w.Write(d.key, d.values); err != nil {\n\t\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t\t}\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tfd, err := os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(fd)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tfor _, d := range data {\n\t\treadValues, err := r.ReadAll(d.key)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error readin: %v\", err)\n\t\t}\n\n\t\tif exp := len(d.values); exp != len(readValues) {\n\t\t\tt.Fatalf(\"read values length mismatch: got %v, exp %v\", len(readValues), exp)\n\t\t}\n\n\t\tfor i, v := range d.values {\n\t\t\tif v.Value() != readValues[i].Value() {\n\t\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %d\", i, readValues[i].Value(), v.Value())\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Tests that writing keys in reverse is able to read them back.\nfunc TestTSMWriter_Write_SameKey(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tvalues []tsm1.Value\n\t}{\n\t\t{\"cpu\", []tsm1.Value{\n\t\t\ttsm1.NewValue(0, 1.0),\n\t\t\ttsm1.NewValue(1, 2.0)},\n\t\t},\n\t\t{\"cpu\", []tsm1.Value{\n\t\t\ttsm1.NewValue(2, 3.0),\n\t\t\ttsm1.NewValue(3, 4.0)},\n\t\t},\n\t}\n\n\tfor _, d := range data {\n\t\tif err := w.Write(d.key, d.values); err != nil {\n\t\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t\t}\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tfd, err := os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(fd)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tvalues := append(data[0].values, data[1].values...)\n\n\treadValues, err := r.ReadAll(\"cpu\")\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error readin: %v\", err)\n\t}\n\n\tif exp := len(values); exp != len(readValues) {\n\t\tt.Fatalf(\"read values length mismatch: got %v, exp %v\", len(readValues), exp)\n\t}\n\n\tfor i, v := range values {\n\t\tif v.Value() != readValues[i].Value() {\n\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %d\", i, readValues[i].Value(), v.Value())\n\t\t}\n\t}\n}\n\n// Tests that calling Read returns all the values for block matching the key\n// and timestamp\nfunc TestTSMWriter_Read_Multiple(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tvalues []tsm1.Value\n\t}{\n\t\t{\"cpu\", []tsm1.Value{\n\t\t\ttsm1.NewValue(0, 1.0),\n\t\t\ttsm1.NewValue(1, 2.0)},\n\t\t},\n\t\t{\"cpu\", []tsm1.Value{\n\t\t\ttsm1.NewValue(2, 3.0),\n\t\t\ttsm1.NewValue(3, 4.0)},\n\t\t},\n\t}\n\n\tfor _, d := range data {\n\t\tif err := w.Write(d.key, d.values); err != nil {\n\t\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t\t}\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tfd, err := os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\tr, err := tsm1.NewTSMReader(fd)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tfor _, values := range data {\n\t\t// Try the first timestamp\n\t\treadValues, err := r.Read(\"cpu\", values.values[0].UnixNano())\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error readin: %v\", err)\n\t\t}\n\n\t\tif exp := len(values.values); exp != len(readValues) {\n\t\t\tt.Fatalf(\"read values length mismatch: got %v, exp %v\", len(readValues), exp)\n\t\t}\n\n\t\tfor i, v := range values.values {\n\t\t\tif v.Value() != readValues[i].Value() {\n\t\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %d\", i, readValues[i].Value(), v.Value())\n\t\t\t}\n\t\t}\n\n\t\t// Try the last timestamp too\n\t\treadValues, err = r.Read(\"cpu\", values.values[1].UnixNano())\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error readin: %v\", err)\n\t\t}\n\n\t\tif exp := len(values.values); exp != len(readValues) {\n\t\t\tt.Fatalf(\"read values length mismatch: got %v, exp %v\", len(readValues), exp)\n\t\t}\n\n\t\tfor i, v := range values.values {\n\t\t\tif v.Value() != readValues[i].Value() {\n\t\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %d\", i, readValues[i].Value(), v.Value())\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestTSMWriter_WriteBlock_Empty(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tif err := w.WriteBlock(\"cpu\", 0, 0, nil); err != nil {\n\t\tt.Fatalf(\"unexpected error writing block: %v\", err)\n\t}\n\n\tif err := w.WriteIndex(); err != tsm1.ErrNoValues {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tfd, err := os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\tdefer fd.Close()\n\n\tb, err := ioutil.ReadAll(fd)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error read all: %v\", err)\n\t}\n\n\tif got, exp := len(b), 0; got < exp {\n\t\tt.Fatalf(\"file size mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestTSMWriter_WriteBlock_Multiple(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvar data = []struct {\n\t\tkey    string\n\t\tvalues []tsm1.Value\n\t}{\n\t\t{\"cpu\", []tsm1.Value{tsm1.NewValue(0, 1.0)}},\n\t\t{\"mem\", []tsm1.Value{tsm1.NewValue(1, 2.0)}},\n\t}\n\n\tfor _, d := range data {\n\t\tif err := w.Write(d.key, d.values); err != nil {\n\t\t\tt.Fatalf(\"unexpected error writing: %v\", err)\n\t\t}\n\t}\n\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tfd, err := os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\tdefer fd.Close()\n\n\tb, err := ioutil.ReadAll(fd)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error read all: %v\", err)\n\t}\n\n\tif got, exp := len(b), 5; got < exp {\n\t\tt.Fatalf(\"file size mismatch: got %v, exp %v\", got, exp)\n\t}\n\tif got := binary.BigEndian.Uint32(b[0:4]); got != tsm1.MagicNumber {\n\t\tt.Fatalf(\"magic number mismatch: got %v, exp %v\", got, tsm1.MagicNumber)\n\t}\n\n\tif _, err := fd.Seek(0, io.SeekStart); err != nil {\n\t\tt.Fatalf(\"error seeking: %v\", err)\n\t}\n\n\t// Create reader for that file\n\tr, err := tsm1.NewTSMReader(fd)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\n\tf = MustTempFile(dir)\n\tw, err = tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\titer := r.BlockIterator()\n\tfor iter.Next() {\n\t\tkey, minTime, maxTime, _, _, b, err := iter.Read()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error reading block: %v\", err)\n\t\t}\n\t\tif err := w.WriteBlock(key, minTime, maxTime, b); err != nil {\n\t\t\tt.Fatalf(\"unexpected error writing block: %v\", err)\n\t\t}\n\t}\n\tif err := w.WriteIndex(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tif err := w.Close(); err != nil {\n\t\tt.Fatalf(\"unexpected error closing: %v\", err)\n\t}\n\n\tfd, err = os.Open(f.Name())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error open file: %v\", err)\n\t}\n\n\t// Now create a reader to verify the written blocks matches the originally\n\t// written file using Write\n\tr, err = tsm1.NewTSMReader(fd)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created reader: %v\", err)\n\t}\n\tdefer r.Close()\n\n\tfor _, d := range data {\n\t\treadValues, err := r.ReadAll(d.key)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error readin: %v\", err)\n\t\t}\n\n\t\tif exp := len(d.values); exp != len(readValues) {\n\t\t\tt.Fatalf(\"read values length mismatch: got %v, exp %v\", len(readValues), exp)\n\t\t}\n\n\t\tfor i, v := range d.values {\n\t\t\tif v.Value() != readValues[i].Value() {\n\t\t\t\tt.Fatalf(\"read value mismatch(%d): got %v, exp %d\", i, readValues[i].Value(), v.Value())\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestTSMWriter_WriteBlock_MaxKey(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error creating writer: %v\", err)\n\t}\n\n\tvar key string\n\tfor i := 0; i < 100000; i++ {\n\t\tkey += \"a\"\n\t}\n\n\tif err := w.WriteBlock(key, 0, 0, nil); err != tsm1.ErrMaxKeyLengthExceeded {\n\t\tt.Fatalf(\"expected max key length error writing key: %v\", err)\n\t}\n}\n\nfunc TestTSMWriter_Write_MaxKey(t *testing.T) {\n\tdir := MustTempDir()\n\tdefer os.RemoveAll(dir)\n\tf := MustTempFile(dir)\n\tdefer f.Close()\n\n\tw, err := tsm1.NewTSMWriter(f)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error created writer: %v\", err)\n\t}\n\n\tvar key string\n\tfor i := 0; i < 100000; i++ {\n\t\tkey += \"a\"\n\t}\n\tif err := w.Write(key, []tsm1.Value{tsm1.NewValue(0, 1.0)}); err != tsm1.ErrMaxKeyLengthExceeded {\n\t\tt.Fatalf(\"expected max key length error writing key: %v\", err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/engine.go",
    "content": "package tsdb\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"regexp\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/pkg/estimator\"\n\t\"github.com/influxdata/influxdb/pkg/limiter\"\n\t\"github.com/uber-go/zap\"\n)\n\nvar (\n\t// ErrFormatNotFound is returned when no format can be determined from a path.\n\tErrFormatNotFound = errors.New(\"format not found\")\n\n\t// ErrUnknownEngineFormat is returned when the engine format is\n\t// unknown. ErrUnknownEngineFormat is currently returned if a format\n\t// other than tsm1 is encountered.\n\tErrUnknownEngineFormat = errors.New(\"unknown engine format\")\n)\n\n// Engine represents a swappable storage engine for the shard.\ntype Engine interface {\n\tOpen() error\n\tClose() error\n\tSetEnabled(enabled bool)\n\tSetCompactionsEnabled(enabled bool)\n\n\tWithLogger(zap.Logger)\n\n\tLoadMetadataIndex(shardID uint64, index Index) error\n\n\tCreateSnapshot() (string, error)\n\tBackup(w io.Writer, basePath string, since time.Time) error\n\tRestore(r io.Reader, basePath string) error\n\tImport(r io.Reader, basePath string) error\n\n\tCreateIterator(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error)\n\tWritePoints(points []models.Point) error\n\n\tCreateSeriesIfNotExists(key, name []byte, tags models.Tags) error\n\tCreateSeriesListIfNotExists(keys, names [][]byte, tags []models.Tags) error\n\tDeleteSeriesRange(keys [][]byte, min, max int64) error\n\n\tSeriesSketches() (estimator.Sketch, estimator.Sketch, error)\n\tMeasurementsSketches() (estimator.Sketch, estimator.Sketch, error)\n\tSeriesN() int64\n\n\tMeasurementExists(name []byte) (bool, error)\n\tMeasurementNamesByExpr(expr influxql.Expr) ([][]byte, error)\n\tMeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error)\n\tMeasurementFields(measurement []byte) *MeasurementFields\n\tForEachMeasurementName(fn func(name []byte) error) error\n\tDeleteMeasurement(name []byte) error\n\n\t// TagKeys(name []byte) ([][]byte, error)\n\tHasTagKey(name, key []byte) (bool, error)\n\tMeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error)\n\tMeasurementTagKeyValuesByExpr(name []byte, key []string, expr influxql.Expr, keysSorted bool) ([][]string, error)\n\tForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error\n\tTagKeyCardinality(name, key []byte) int\n\n\t// InfluxQL iterators\n\tMeasurementSeriesKeysByExpr(name []byte, condition influxql.Expr) ([][]byte, error)\n\tForEachMeasurementSeriesByExpr(name []byte, expr influxql.Expr, fn func(tags models.Tags) error) error\n\tSeriesPointIterator(opt influxql.IteratorOptions) (influxql.Iterator, error)\n\n\t// Statistics will return statistics relevant to this engine.\n\tStatistics(tags map[string]string) []models.Statistic\n\tLastModified() time.Time\n\tDiskSize() int64\n\tIsIdle() bool\n\n\tio.WriterTo\n}\n\n// EngineFormat represents the format for an engine.\ntype EngineFormat int\n\nconst (\n\t// TSM1Format is the format used by the tsm1 engine.\n\tTSM1Format EngineFormat = 2\n)\n\n// NewEngineFunc creates a new engine.\ntype NewEngineFunc func(id uint64, i Index, database, path string, walPath string, options EngineOptions) Engine\n\n// newEngineFuncs is a lookup of engine constructors by name.\nvar newEngineFuncs = make(map[string]NewEngineFunc)\n\n// RegisterEngine registers a storage engine initializer by name.\nfunc RegisterEngine(name string, fn NewEngineFunc) {\n\tif _, ok := newEngineFuncs[name]; ok {\n\t\tpanic(\"engine already registered: \" + name)\n\t}\n\tnewEngineFuncs[name] = fn\n}\n\n// RegisteredEngines returns the slice of currently registered engines.\nfunc RegisteredEngines() []string {\n\ta := make([]string, 0, len(newEngineFuncs))\n\tfor k := range newEngineFuncs {\n\t\ta = append(a, k)\n\t}\n\tsort.Strings(a)\n\treturn a\n}\n\n// NewEngine returns an instance of an engine based on its format.\n// If the path does not exist then the DefaultFormat is used.\nfunc NewEngine(id uint64, i Index, database, path string, walPath string, options EngineOptions) (Engine, error) {\n\t// Create a new engine\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\treturn newEngineFuncs[options.EngineVersion](id, i, database, path, walPath, options), nil\n\t}\n\n\t// If it's a dir then it's a tsm1 engine\n\tformat := DefaultEngine\n\tif fi, err := os.Stat(path); err != nil {\n\t\treturn nil, err\n\t} else if !fi.Mode().IsDir() {\n\t\treturn nil, ErrUnknownEngineFormat\n\t} else {\n\t\tformat = \"tsm1\"\n\t}\n\n\t// Lookup engine by format.\n\tfn := newEngineFuncs[format]\n\tif fn == nil {\n\t\treturn nil, fmt.Errorf(\"invalid engine format: %q\", format)\n\t}\n\n\treturn fn(id, i, database, path, walPath, options), nil\n}\n\n// EngineOptions represents the options used to initialize the engine.\ntype EngineOptions struct {\n\tEngineVersion     string\n\tIndexVersion      string\n\tShardID           uint64\n\tInmemIndex        interface{} // shared in-memory index\n\tCompactionLimiter limiter.Fixed\n\n\tConfig Config\n}\n\n// NewEngineOptions returns the default options.\nfunc NewEngineOptions() EngineOptions {\n\treturn EngineOptions{\n\t\tEngineVersion: DefaultEngine,\n\t\tIndexVersion:  DefaultIndex,\n\t\tConfig:        NewConfig(),\n\t}\n}\n\n// NewInmemIndex returns a new \"inmem\" index type.\nvar NewInmemIndex func(name string) (interface{}, error)\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/index.go",
    "content": "package index // import \"github.com/influxdata/influxdb/tsdb/index\"\n\nimport (\n\t_ \"github.com/influxdata/influxdb/tsdb/index/inmem\"\n\t_ \"github.com/influxdata/influxdb/tsdb/index/tsi1\"\n)\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/inmem/inmem.go",
    "content": "/*\nPackage inmem implements a shared, in-memory index for each database.\n\nThe in-memory index is the original index implementation and provides fast\naccess to index data. However, it also forces high memory usage for large\ndatasets and can cause OOM errors.\n\nIndex is the shared index structure that provides most of the functionality.\nHowever, ShardIndex is a light per-shard wrapper that adapts this original\nshared index format to the new per-shard format.\n*/\npackage inmem\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"sort\"\n\t\"sync\"\n\t// \"sync/atomic\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/pkg/bytesutil\"\n\t\"github.com/influxdata/influxdb/pkg/escape\"\n\t\"github.com/influxdata/influxdb/pkg/estimator\"\n\t\"github.com/influxdata/influxdb/pkg/estimator/hll\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n\t\"github.com/uber-go/zap\"\n)\n\n// IndexName is the name of this index.\nconst IndexName = \"inmem\"\n\nfunc init() {\n\ttsdb.NewInmemIndex = func(name string) (interface{}, error) { return NewIndex(name), nil }\n\n\ttsdb.RegisterIndex(IndexName, func(id uint64, database, path string, opt tsdb.EngineOptions) tsdb.Index {\n\t\treturn NewShardIndex(id, database, path, opt)\n\t})\n}\n\n// Index is the in memory index of a collection of measurements, time\n// series, and their tags. Exported functions are goroutine safe while\n// un-exported functions assume the caller will use the appropriate locks.\ntype Index struct {\n\tmu sync.RWMutex\n\n\tdatabase string\n\n\t// In-memory metadata index, built on load and updated when new series come in\n\tmeasurements map[string]*Measurement // measurement name to object and index\n\tseries       map[string]*Series      // map series key to the Series object\n\tlastID       uint64                  // last used series ID. They're in memory only for this shard\n\n\tseriesSketch, seriesTSSketch             *hll.Plus\n\tmeasurementsSketch, measurementsTSSketch *hll.Plus\n}\n\n// NewIndex returns a new initialized Index.\nfunc NewIndex(database string) *Index {\n\tindex := &Index{\n\t\tdatabase:     database,\n\t\tmeasurements: make(map[string]*Measurement),\n\t\tseries:       make(map[string]*Series),\n\t}\n\n\tindex.seriesSketch = hll.NewDefaultPlus()\n\tindex.seriesTSSketch = hll.NewDefaultPlus()\n\tindex.measurementsSketch = hll.NewDefaultPlus()\n\tindex.measurementsTSSketch = hll.NewDefaultPlus()\n\n\treturn index\n}\n\nfunc (i *Index) Type() string      { return IndexName }\nfunc (i *Index) Open() (err error) { return nil }\nfunc (i *Index) Close() error      { return nil }\n\nfunc (i *Index) WithLogger(zap.Logger) {}\n\n// Series returns a series by key.\nfunc (i *Index) Series(key []byte) (*Series, error) {\n\ti.mu.RLock()\n\ts := i.series[string(key)]\n\ti.mu.RUnlock()\n\treturn s, nil\n}\n\n// SeriesSketches returns the sketches for the series.\nfunc (i *Index) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) {\n\ti.mu.RLock()\n\tdefer i.mu.RUnlock()\n\treturn i.seriesSketch.Clone(), i.seriesTSSketch.Clone(), nil\n}\n\n// SeriesN returns the number of unique non-tombstoned series in the index.\n// Since indexes are not shared across shards, the count returned by SeriesN\n// cannot be combined with other shards' counts.\nfunc (i *Index) SeriesN() int64 {\n\ti.mu.RLock()\n\tn := int64(len(i.series))\n\ti.mu.RUnlock()\n\treturn n\n}\n\n// Measurement returns the measurement object from the index by the name\nfunc (i *Index) Measurement(name []byte) (*Measurement, error) {\n\ti.mu.RLock()\n\tdefer i.mu.RUnlock()\n\treturn i.measurements[string(name)], nil\n}\n\n// MeasurementExists returns true if the measurement exists.\nfunc (i *Index) MeasurementExists(name []byte) (bool, error) {\n\ti.mu.RLock()\n\tdefer i.mu.RUnlock()\n\treturn i.measurements[string(name)] != nil, nil\n}\n\n// MeasurementsSketches returns the sketches for the measurements.\nfunc (i *Index) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) {\n\ti.mu.RLock()\n\tdefer i.mu.RUnlock()\n\treturn i.measurementsSketch.Clone(), i.measurementsTSSketch.Clone(), nil\n}\n\n// MeasurementsByName returns a list of measurements.\nfunc (i *Index) MeasurementsByName(names [][]byte) ([]*Measurement, error) {\n\ti.mu.RLock()\n\tdefer i.mu.RUnlock()\n\n\ta := make([]*Measurement, 0, len(names))\n\tfor _, name := range names {\n\t\tif m := i.measurements[string(name)]; m != nil {\n\t\t\ta = append(a, m)\n\t\t}\n\t}\n\treturn a, nil\n}\n\n// CreateSeriesIfNotExists adds the series for the given measurement to the\n// index and sets its ID or returns the existing series object\nfunc (i *Index) CreateSeriesIfNotExists(shardID uint64, key, name []byte, tags models.Tags, opt *tsdb.EngineOptions, ignoreLimits bool) error {\n\ti.mu.RLock()\n\t// if there is a series for this id, it's already been added\n\tss := i.series[string(key)]\n\ti.mu.RUnlock()\n\n\tif ss != nil {\n\t\tss.AssignShard(shardID)\n\t\treturn nil\n\t}\n\n\t// get or create the measurement index\n\tm := i.CreateMeasurementIndexIfNotExists(name)\n\n\ti.mu.Lock()\n\t// Check for the series again under a write lock\n\tss = i.series[string(key)]\n\tif ss != nil {\n\t\ti.mu.Unlock()\n\t\tss.AssignShard(shardID)\n\t\treturn nil\n\t}\n\n\t// Verify that the series will not exceed limit.\n\tif !ignoreLimits {\n\t\tif max := opt.Config.MaxSeriesPerDatabase; max > 0 && len(i.series)+1 > max {\n\t\t\ti.mu.Unlock()\n\t\t\treturn errMaxSeriesPerDatabaseExceeded\n\t\t}\n\t}\n\n\t// set the in memory ID for query processing on this shard\n\t// The series key and tags are clone to prevent a memory leak\n\tseries := NewSeries([]byte(string(key)), tags.Clone())\n\tseries.ID = i.lastID + 1\n\ti.lastID++\n\n\tseries.SetMeasurement(m)\n\ti.series[string(key)] = series\n\n\tm.AddSeries(series)\n\tseries.AssignShard(shardID)\n\n\t// Add the series to the series sketch.\n\ti.seriesSketch.Add(key)\n\ti.mu.Unlock()\n\n\treturn nil\n}\n\n// CreateMeasurementIndexIfNotExists creates or retrieves an in memory index\n// object for the measurement\nfunc (i *Index) CreateMeasurementIndexIfNotExists(name []byte) *Measurement {\n\tname = escape.Unescape(name)\n\n\t// See if the measurement exists using a read-lock\n\ti.mu.RLock()\n\tm := i.measurements[string(name)]\n\tif m != nil {\n\t\ti.mu.RUnlock()\n\t\treturn m\n\t}\n\ti.mu.RUnlock()\n\n\t// Doesn't exist, so lock the index to create it\n\ti.mu.Lock()\n\tdefer i.mu.Unlock()\n\n\t// Make sure it was created in between the time we released our read-lock\n\t// and acquire the write lock\n\tm = i.measurements[string(name)]\n\tif m == nil {\n\t\tm = NewMeasurement(i.database, string(name))\n\t\ti.measurements[string(name)] = m\n\n\t\t// Add the measurement to the measurements sketch.\n\t\ti.measurementsSketch.Add([]byte(name))\n\t}\n\treturn m\n}\n\n// HasTagKey returns true if tag key exists.\nfunc (i *Index) HasTagKey(name, key []byte) (bool, error) {\n\ti.mu.RLock()\n\tmm := i.measurements[string(name)]\n\ti.mu.RUnlock()\n\n\tif mm == nil {\n\t\treturn false, nil\n\t}\n\treturn mm.HasTagKey(string(key)), nil\n}\n\n// HasTagValue returns true if tag value exists.\nfunc (i *Index) HasTagValue(name, key, value []byte) bool {\n\ti.mu.RLock()\n\tmm := i.measurements[string(name)]\n\ti.mu.RUnlock()\n\n\tif mm == nil {\n\t\treturn false\n\t}\n\treturn mm.HasTagKeyValue(key, value)\n}\n\n// TagValueN returns the cardinality of a tag value.\nfunc (i *Index) TagValueN(name, key []byte) int {\n\ti.mu.RLock()\n\tmm := i.measurements[string(name)]\n\ti.mu.RUnlock()\n\n\tif mm == nil {\n\t\treturn 0\n\t}\n\treturn mm.CardinalityBytes(key)\n}\n\n// MeasurementTagKeysByExpr returns an ordered set of tag keys filtered by an expression.\nfunc (i *Index) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) {\n\ti.mu.RLock()\n\tmm := i.measurements[string(name)]\n\ti.mu.RUnlock()\n\n\tif mm == nil {\n\t\treturn nil, nil\n\t}\n\treturn mm.TagKeysByExpr(expr)\n}\n\n// MeasurementTagKeyValuesByExpr returns a set of tag values filtered by an expression.\n//\n// See tsm1.Engine.MeasurementTagKeyValuesByExpr for a fuller description of this\n// method.\nfunc (i *Index) MeasurementTagKeyValuesByExpr(name []byte, keys []string, expr influxql.Expr, keysSorted bool) ([][]string, error) {\n\ti.mu.RLock()\n\tmm := i.measurements[string(name)]\n\ti.mu.RUnlock()\n\n\tif mm == nil || len(keys) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tresults := make([][]string, len(keys))\n\n\t// If we haven't been provided sorted keys, then we need to sort them.\n\tif !keysSorted {\n\t\tsort.Sort(sort.StringSlice(keys))\n\t}\n\n\tids, _, _ := mm.WalkWhereForSeriesIds(expr)\n\tif ids.Len() == 0 && expr == nil {\n\t\tfor ki, key := range keys {\n\t\t\tvalues := mm.TagValues(key)\n\t\t\tsort.Sort(sort.StringSlice(values))\n\t\t\tresults[ki] = values\n\t\t}\n\t\treturn results, nil\n\t}\n\n\t// This is the case where we have filtered series by some WHERE condition.\n\t// We only care about the tag values for the keys given the\n\t// filtered set of series ids.\n\n\tkeyIdxs := make(map[string]int, len(keys))\n\tfor ki, key := range keys {\n\t\tkeyIdxs[key] = ki\n\t}\n\n\tresultSet := make([]stringSet, len(keys))\n\tfor i := 0; i < len(resultSet); i++ {\n\t\tresultSet[i] = newStringSet()\n\t}\n\n\t// Iterate all series to collect tag values.\n\tfor _, id := range ids {\n\t\ts := mm.SeriesByID(id)\n\t\tif s == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Iterate the tag keys we're interested in and collect values\n\t\t// from this series, if they exist.\n\t\tfor _, t := range s.Tags() {\n\t\t\tif idx, ok := keyIdxs[string(t.Key)]; ok {\n\t\t\t\tresultSet[idx].add(string(t.Value))\n\t\t\t} else if string(t.Key) > keys[len(keys)-1] {\n\t\t\t\t// The tag key is > the largest key we're interested in.\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tfor i, s := range resultSet {\n\t\tresults[i] = s.list()\n\t}\n\treturn results, nil\n}\n\n// ForEachMeasurementTagKey iterates over all tag keys for a measurement.\nfunc (i *Index) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error {\n\t// Ensure we do not hold a lock on the index while fn executes in case fn tries\n\t// to acquire a lock on the index again.  If another goroutine has Lock, this will\n\t// deadlock.\n\ti.mu.RLock()\n\tmm := i.measurements[string(name)]\n\ti.mu.RUnlock()\n\n\tif mm == nil {\n\t\treturn nil\n\t}\n\n\tfor _, key := range mm.TagKeys() {\n\t\tif err := fn([]byte(key)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// TagKeyCardinality returns the number of values for a measurement/tag key.\nfunc (i *Index) TagKeyCardinality(name, key []byte) int {\n\ti.mu.RLock()\n\tmm := i.measurements[string(name)]\n\ti.mu.RUnlock()\n\n\tif mm == nil {\n\t\treturn 0\n\t}\n\treturn mm.CardinalityBytes(key)\n}\n\n// TagsForSeries returns the tag map for the passed in series\nfunc (i *Index) TagsForSeries(key string) (models.Tags, error) {\n\ti.mu.RLock()\n\tss := i.series[key]\n\ti.mu.RUnlock()\n\n\tif ss == nil {\n\t\treturn nil, nil\n\t}\n\treturn ss.Tags(), nil\n}\n\n// MeasurementNamesByExpr takes an expression containing only tags and returns a\n// list of matching meaurement names.\nfunc (i *Index) MeasurementNamesByExpr(expr influxql.Expr) ([][]byte, error) {\n\ti.mu.RLock()\n\tdefer i.mu.RUnlock()\n\n\t// Return all measurement names if no expression is provided.\n\tif expr == nil {\n\t\ta := make([][]byte, 0, len(i.measurements))\n\t\tfor name := range i.measurements {\n\t\t\ta = append(a, []byte(name))\n\t\t}\n\t\tbytesutil.Sort(a)\n\t\treturn a, nil\n\t}\n\n\treturn i.measurementNamesByExpr(expr)\n}\n\nfunc (i *Index) measurementNamesByExpr(expr influxql.Expr) ([][]byte, error) {\n\tif expr == nil {\n\t\treturn nil, nil\n\t}\n\n\tswitch e := expr.(type) {\n\tcase *influxql.BinaryExpr:\n\t\tswitch e.Op {\n\t\tcase influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX:\n\t\t\ttag, ok := e.LHS.(*influxql.VarRef)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"left side of '%s' must be a tag key\", e.Op.String())\n\t\t\t}\n\n\t\t\ttf := &TagFilter{\n\t\t\t\tOp:  e.Op,\n\t\t\t\tKey: tag.Val,\n\t\t\t}\n\n\t\t\tif influxql.IsRegexOp(e.Op) {\n\t\t\t\tre, ok := e.RHS.(*influxql.RegexLiteral)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, fmt.Errorf(\"right side of '%s' must be a regular expression\", e.Op.String())\n\t\t\t\t}\n\t\t\t\ttf.Regex = re.Val\n\t\t\t} else {\n\t\t\t\ts, ok := e.RHS.(*influxql.StringLiteral)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, fmt.Errorf(\"right side of '%s' must be a tag value string\", e.Op.String())\n\t\t\t\t}\n\t\t\t\ttf.Value = s.Val\n\t\t\t}\n\n\t\t\t// Match on name, if specified.\n\t\t\tif tag.Val == \"_name\" {\n\t\t\t\treturn i.measurementNamesByNameFilter(tf.Op, tf.Value, tf.Regex), nil\n\t\t\t} else if influxql.IsSystemName(tag.Val) {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\n\t\t\treturn i.measurementNamesByTagFilters(tf), nil\n\t\tcase influxql.OR, influxql.AND:\n\t\t\tlhs, err := i.measurementNamesByExpr(e.LHS)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\trhs, err := i.measurementNamesByExpr(e.RHS)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif e.Op == influxql.OR {\n\t\t\t\treturn bytesutil.Union(lhs, rhs), nil\n\t\t\t}\n\t\t\treturn bytesutil.Intersect(lhs, rhs), nil\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"invalid tag comparison operator\")\n\t\t}\n\tcase *influxql.ParenExpr:\n\t\treturn i.measurementNamesByExpr(e.Expr)\n\t}\n\treturn nil, fmt.Errorf(\"%#v\", expr)\n}\n\n// measurementNamesByNameFilter returns the sorted measurements matching a name.\nfunc (i *Index) measurementNamesByNameFilter(op influxql.Token, val string, regex *regexp.Regexp) [][]byte {\n\tvar names [][]byte\n\tfor _, m := range i.measurements {\n\t\tvar matched bool\n\t\tswitch op {\n\t\tcase influxql.EQ:\n\t\t\tmatched = m.Name == val\n\t\tcase influxql.NEQ:\n\t\t\tmatched = m.Name != val\n\t\tcase influxql.EQREGEX:\n\t\t\tmatched = regex.MatchString(m.Name)\n\t\tcase influxql.NEQREGEX:\n\t\t\tmatched = !regex.MatchString(m.Name)\n\t\t}\n\n\t\tif !matched {\n\t\t\tcontinue\n\t\t}\n\t\tnames = append(names, []byte(m.Name))\n\t}\n\tbytesutil.Sort(names)\n\treturn names\n}\n\n// measurementNamesByTagFilters returns the sorted measurements matching the filters on tag values.\nfunc (i *Index) measurementNamesByTagFilters(filter *TagFilter) [][]byte {\n\t// Build a list of measurements matching the filters.\n\tvar names [][]byte\n\tvar tagMatch bool\n\n\t// Iterate through all measurements in the database.\n\tfor _, m := range i.measurements {\n\t\ttagVals := m.SeriesByTagKeyValue(filter.Key)\n\t\tif tagVals == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\ttagMatch = false\n\n\t\t// If the operator is non-regex, only check the specified value.\n\t\tif filter.Op == influxql.EQ || filter.Op == influxql.NEQ {\n\t\t\tif _, ok := tagVals[filter.Value]; ok {\n\t\t\t\ttagMatch = true\n\t\t\t}\n\t\t} else {\n\t\t\t// Else, the operator is a regex and we have to check all tag\n\t\t\t// values against the regular expression.\n\t\t\tfor tagVal := range tagVals {\n\t\t\t\tif filter.Regex.MatchString(tagVal) {\n\t\t\t\t\ttagMatch = true\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t//\n\t\t// XNOR gate\n\t\t//\n\t\t// tags match | operation is EQ | measurement matches\n\t\t// --------------------------------------------------\n\t\t//     True   |       True      |      True\n\t\t//     True   |       False     |      False\n\t\t//     False  |       True      |      False\n\t\t//     False  |       False     |      True\n\t\tif tagMatch == (filter.Op == influxql.EQ || filter.Op == influxql.EQREGEX) {\n\t\t\tnames = append(names, []byte(m.Name))\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tbytesutil.Sort(names)\n\treturn names\n}\n\n// MeasurementNamesByRegex returns the measurements that match the regex.\nfunc (i *Index) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) {\n\ti.mu.RLock()\n\tdefer i.mu.RUnlock()\n\n\tvar matches [][]byte\n\tfor _, m := range i.measurements {\n\t\tif re.MatchString(m.Name) {\n\t\t\tmatches = append(matches, []byte(m.Name))\n\t\t}\n\t}\n\treturn matches, nil\n}\n\n// DropMeasurement removes the measurement and all of its underlying\n// series from the database index\nfunc (i *Index) DropMeasurement(name []byte) error {\n\ti.mu.Lock()\n\tdefer i.mu.Unlock()\n\treturn i.dropMeasurement(string(name))\n}\n\nfunc (i *Index) dropMeasurement(name string) error {\n\t// Update the tombstone sketch.\n\ti.measurementsTSSketch.Add([]byte(name))\n\n\tm := i.measurements[name]\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tdelete(i.measurements, name)\n\tfor _, s := range m.SeriesByIDMap() {\n\t\tdelete(i.series, s.Key)\n\t\ti.seriesTSSketch.Add([]byte(s.Key))\n\t}\n\treturn nil\n}\n\n// DropSeries removes the series key and its tags from the index.\nfunc (i *Index) DropSeries(key []byte) error {\n\tif key == nil {\n\t\treturn nil\n\t}\n\n\ti.mu.Lock()\n\tk := string(key)\n\tseries := i.series[k]\n\tif series == nil {\n\t\ti.mu.Unlock()\n\t\treturn nil\n\t}\n\n\t// Update the tombstone sketch.\n\ti.seriesTSSketch.Add([]byte(k))\n\n\t// Remove from the index.\n\tdelete(i.series, k)\n\n\t// Remove the measurement's reference.\n\tseries.Measurement().DropSeries(series)\n\n\t// If the measurement no longer has any series, remove it as well.\n\tif !series.Measurement().HasSeries() {\n\t\ti.dropMeasurement(series.Measurement().Name)\n\t}\n\ti.mu.Unlock()\n\n\treturn nil\n}\n\n// ForEachMeasurementSeriesByExpr iterates over all series in a measurement filtered by an expression.\nfunc (i *Index) ForEachMeasurementSeriesByExpr(name []byte, expr influxql.Expr, fn func(tags models.Tags) error) error {\n\ti.mu.RLock()\n\tmm := i.measurements[string(name)]\n\ti.mu.RUnlock()\n\n\tif mm == nil {\n\t\treturn nil\n\t}\n\n\tif err := mm.ForEachSeriesByExpr(expr, fn); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// TagSets returns a list of tag sets.\nfunc (i *Index) TagSets(shardID uint64, name []byte, opt influxql.IteratorOptions) ([]*influxql.TagSet, error) {\n\ti.mu.RLock()\n\tdefer i.mu.RUnlock()\n\n\tmm := i.measurements[string(name)]\n\tif mm == nil {\n\t\treturn nil, nil\n\t}\n\n\ttagSets, err := mm.TagSets(shardID, opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn tagSets, nil\n}\n\nfunc (i *Index) SeriesKeys() []string {\n\ti.mu.RLock()\n\ts := make([]string, 0, len(i.series))\n\tfor k := range i.series {\n\t\ts = append(s, k)\n\t}\n\ti.mu.RUnlock()\n\treturn s\n}\n\n// SetFieldSet sets a shared field set from the engine.\nfunc (i *Index) SetFieldSet(*tsdb.MeasurementFieldSet) {}\n\n// SetFieldName adds a field name to a measurement.\nfunc (i *Index) SetFieldName(measurement []byte, name string) {\n\tm := i.CreateMeasurementIndexIfNotExists(measurement)\n\tm.SetFieldName(name)\n}\n\n// ForEachMeasurementName iterates over each measurement name.\nfunc (i *Index) ForEachMeasurementName(fn func(name []byte) error) error {\n\ti.mu.RLock()\n\tdefer i.mu.RUnlock()\n\n\tmms := make(Measurements, 0, len(i.measurements))\n\tfor _, m := range i.measurements {\n\t\tmms = append(mms, m)\n\t}\n\tsort.Sort(mms)\n\n\tfor _, m := range mms {\n\t\tif err := fn([]byte(m.Name)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (i *Index) MeasurementSeriesKeysByExpr(name []byte, condition influxql.Expr) ([][]byte, error) {\n\ti.mu.RLock()\n\tdefer i.mu.RUnlock()\n\n\tm := i.measurements[string(name)]\n\tif m == nil {\n\t\treturn nil, nil\n\t}\n\n\t// Return all series if no condition specified.\n\tif condition == nil {\n\t\treturn m.SeriesKeys(), nil\n\t}\n\n\t// Get series IDs that match the WHERE clause.\n\tids, filters, err := m.WalkWhereForSeriesIds(condition)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Delete boolean literal true filter expressions.\n\t// These are returned for `WHERE tagKey = 'tagVal'` type expressions and are okay.\n\tfilters.DeleteBoolLiteralTrues()\n\n\t// Check for unsupported field filters.\n\t// Any remaining filters means there were fields (e.g., `WHERE value = 1.2`).\n\tif filters.Len() > 0 {\n\t\treturn nil, errors.New(\"fields not supported in WHERE clause during deletion\")\n\t}\n\n\treturn m.SeriesKeysByID(ids), nil\n}\n\n// SeriesPointIterator returns an influxql iterator over all series.\nfunc (i *Index) SeriesPointIterator(opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t// Read and sort all measurements.\n\tmms := make(Measurements, 0, len(i.measurements))\n\tfor _, mm := range i.measurements {\n\t\tmms = append(mms, mm)\n\t}\n\tsort.Sort(mms)\n\n\treturn &seriesPointIterator{\n\t\tmms: mms,\n\t\tpoint: influxql.FloatPoint{\n\t\t\tAux: make([]interface{}, len(opt.Aux)),\n\t\t},\n\t\topt: opt,\n\t}, nil\n}\n\n// SnapshotTo is a no-op since this is an in-memory index.\nfunc (i *Index) SnapshotTo(path string) error { return nil }\n\n// AssignShard update the index to indicate that series k exists in the given shardID.\nfunc (i *Index) AssignShard(k string, shardID uint64) {\n\tss, _ := i.Series([]byte(k))\n\tif ss != nil {\n\t\tss.AssignShard(shardID)\n\t}\n}\n\n// UnassignShard updates the index to indicate that series k does not exist in\n// the given shardID.\nfunc (i *Index) UnassignShard(k string, shardID uint64) error {\n\tss, _ := i.Series([]byte(k))\n\tif ss != nil {\n\t\tif ss.Assigned(shardID) {\n\t\t\t// Remove the shard from any series\n\t\t\tss.UnassignShard(shardID)\n\n\t\t\t// If this series no longer has shards assigned, remove the series\n\t\t\tif ss.ShardN() == 0 {\n\t\t\t\t// Remove the series key from the index.\n\t\t\t\treturn i.DropSeries([]byte(k))\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n// RemoveShard removes all references to shardID from any series or measurements\n// in the index.  If the shard was the only owner of data for the series, the series\n// is removed from the index.\nfunc (i *Index) RemoveShard(shardID uint64) {\n\tfor _, k := range i.SeriesKeys() {\n\t\ti.UnassignShard(k, shardID)\n\t}\n}\n\n// assignExistingSeries assigns the existings series to shardID and returns the series, names and tags that\n// do not exists yet.\nfunc (i *Index) assignExistingSeries(shardID uint64, keys, names [][]byte, tagsSlice []models.Tags) ([][]byte, [][]byte, []models.Tags) {\n\ti.mu.RLock()\n\tvar n int\n\tfor j, key := range keys {\n\t\tif ss, ok := i.series[string(key)]; !ok {\n\t\t\tkeys[n] = keys[j]\n\t\t\tnames[n] = names[j]\n\t\t\ttagsSlice[n] = tagsSlice[j]\n\t\t\tn++\n\t\t} else {\n\t\t\tss.AssignShard(shardID)\n\t\t}\n\t}\n\ti.mu.RUnlock()\n\treturn keys[:n], names[:n], tagsSlice[:n]\n}\n\n// Ensure index implements interface.\nvar _ tsdb.Index = &ShardIndex{}\n\n// ShardIndex represents a shim between the TSDB index interface and the shared\n// in-memory index. This is required because per-shard in-memory indexes will\n// grow the heap size too large.\ntype ShardIndex struct {\n\t*Index\n\n\tid  uint64 // shard id\n\topt tsdb.EngineOptions\n}\n\n// CreateSeriesListIfNotExists creates a list of series if they doesn't exist in bulk.\nfunc (idx *ShardIndex) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSlice []models.Tags) error {\n\tkeys, names, tagsSlice = idx.assignExistingSeries(idx.id, keys, names, tagsSlice)\n\tif len(keys) == 0 {\n\t\treturn nil\n\t}\n\n\tvar reason string\n\tvar dropped int\n\tvar droppedKeys map[string]struct{}\n\n\t// Ensure that no tags go over the maximum cardinality.\n\tif maxValuesPerTag := idx.opt.Config.MaxValuesPerTag; maxValuesPerTag > 0 {\n\t\tvar n int\n\n\touter:\n\t\tfor i, name := range names {\n\t\t\ttags := tagsSlice[i]\n\t\t\tfor _, tag := range tags {\n\t\t\t\t// Skip if the tag value already exists.\n\t\t\t\tif idx.HasTagValue(name, tag.Key, tag.Value) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Read cardinality. Skip if we're below the threshold.\n\t\t\t\tn := idx.TagValueN(name, tag.Key)\n\t\t\t\tif n < maxValuesPerTag {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tdropped++\n\t\t\t\treason = fmt.Sprintf(\"max-values-per-tag limit exceeded (%d/%d): measurement=%q tag=%q value=%q\",\n\t\t\t\t\tn, maxValuesPerTag, name, string(tag.Key), string(tag.Value))\n\n\t\t\t\tif droppedKeys == nil {\n\t\t\t\t\tdroppedKeys = make(map[string]struct{})\n\t\t\t\t}\n\t\t\t\tdroppedKeys[string(keys[i])] = struct{}{}\n\t\t\t\tcontinue outer\n\t\t\t}\n\n\t\t\t// Increment success count if all checks complete.\n\t\t\tkeys[n], names[n], tagsSlice[n] = keys[i], names[i], tagsSlice[i]\n\t\t\tn++\n\t\t}\n\n\t\t// Slice to only include successful points.\n\t\tkeys, names, tagsSlice = keys[:n], names[:n], tagsSlice[:n]\n\t}\n\n\t// Write\n\tfor i := range keys {\n\t\tif err := idx.CreateSeriesIfNotExists(keys[i], names[i], tagsSlice[i]); err == errMaxSeriesPerDatabaseExceeded {\n\t\t\tdropped++\n\t\t\treason = fmt.Sprintf(\"max-series-per-database limit exceeded: (%d)\", idx.opt.Config.MaxSeriesPerDatabase)\n\t\t\tif droppedKeys == nil {\n\t\t\t\tdroppedKeys = make(map[string]struct{})\n\t\t\t}\n\t\t\tdroppedKeys[string(keys[i])] = struct{}{}\n\t\t\tcontinue\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Report partial writes back to shard.\n\tif dropped > 0 {\n\t\treturn &tsdb.PartialWriteError{\n\t\t\tReason:      reason,\n\t\t\tDropped:     dropped,\n\t\t\tDroppedKeys: droppedKeys,\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// InitializeSeries is called during startup.\n// This works the same as CreateSeriesIfNotExists except it ignore limit errors.\nfunc (i *ShardIndex) InitializeSeries(key, name []byte, tags models.Tags) error {\n\treturn i.Index.CreateSeriesIfNotExists(i.id, key, name, tags, &i.opt, true)\n}\n\nfunc (i *ShardIndex) CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error {\n\treturn i.Index.CreateSeriesIfNotExists(i.id, key, name, tags, &i.opt, false)\n}\n\n// TagSets returns a list of tag sets based on series filtering.\nfunc (i *ShardIndex) TagSets(name []byte, opt influxql.IteratorOptions) ([]*influxql.TagSet, error) {\n\treturn i.Index.TagSets(i.id, name, opt)\n}\n\n// NewShardIndex returns a new index for a shard.\nfunc NewShardIndex(id uint64, database, path string, opt tsdb.EngineOptions) tsdb.Index {\n\treturn &ShardIndex{\n\t\tIndex: opt.InmemIndex.(*Index),\n\t\tid:    id,\n\t\topt:   opt,\n\t}\n}\n\n// seriesPointIterator emits series as influxql points.\ntype seriesPointIterator struct {\n\tmms  Measurements\n\tkeys struct {\n\t\tbuf []string\n\t\ti   int\n\t}\n\n\tpoint influxql.FloatPoint // reusable point\n\topt   influxql.IteratorOptions\n}\n\n// Stats returns stats about the points processed.\nfunc (itr *seriesPointIterator) Stats() influxql.IteratorStats { return influxql.IteratorStats{} }\n\n// Close closes the iterator.\nfunc (itr *seriesPointIterator) Close() error { return nil }\n\n// Next emits the next point in the iterator.\nfunc (itr *seriesPointIterator) Next() (*influxql.FloatPoint, error) {\n\tfor {\n\t\t// Load next measurement's keys if there are no more remaining.\n\t\tif itr.keys.i >= len(itr.keys.buf) {\n\t\t\tif err := itr.nextKeys(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif len(itr.keys.buf) == 0 {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\n\t\t// Read the next key.\n\t\tkey := itr.keys.buf[itr.keys.i]\n\t\titr.keys.i++\n\n\t\t// Write auxiliary fields.\n\t\tfor i, f := range itr.opt.Aux {\n\t\t\tswitch f.Val {\n\t\t\tcase \"key\":\n\t\t\t\titr.point.Aux[i] = key\n\t\t\t}\n\t\t}\n\t\treturn &itr.point, nil\n\t}\n}\n\n// nextKeys reads all keys for the next measurement.\nfunc (itr *seriesPointIterator) nextKeys() error {\n\tfor {\n\t\t// Ensure previous keys are cleared out.\n\t\titr.keys.i, itr.keys.buf = 0, itr.keys.buf[:0]\n\n\t\t// Read next measurement.\n\t\tif len(itr.mms) == 0 {\n\t\t\treturn nil\n\t\t}\n\t\tmm := itr.mms[0]\n\t\titr.mms = itr.mms[1:]\n\n\t\t// Read all series keys.\n\t\tids, err := mm.SeriesIDsAllOrByExpr(itr.opt.Condition)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t} else if len(ids) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\titr.keys.buf = mm.AppendSeriesKeysByID(itr.keys.buf, ids)\n\t\tsort.Strings(itr.keys.buf)\n\n\t\treturn nil\n\t}\n}\n\n// errMaxSeriesPerDatabaseExceeded is a marker error returned during series creation\n// to indicate that a new series would exceed the limits of the database.\nvar errMaxSeriesPerDatabaseExceeded = errors.New(\"max series per database exceeded\")\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta.go",
    "content": "package inmem\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"sort\"\n\t\"sync\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n)\n\n// Measurement represents a collection of time series in a database. It also\n// contains in memory structures for indexing tags. Exported functions are\n// goroutine safe while un-exported functions assume the caller will use the\n// appropriate locks.\ntype Measurement struct {\n\tdatabase string\n\tName     string `json:\"name,omitempty\"`\n\tname     []byte // cached version as []byte\n\n\tmu         sync.RWMutex\n\tfieldNames map[string]struct{}\n\n\t// in-memory index fields\n\tseriesByID          map[uint64]*Series              // lookup table for series by their id\n\tseriesByTagKeyValue map[string]map[string]SeriesIDs // map from tag key to value to sorted set of series ids\n\n\t// lazyily created sorted series IDs\n\tsortedSeriesIDs SeriesIDs // sorted list of series IDs in this measurement\n}\n\n// NewMeasurement allocates and initializes a new Measurement.\nfunc NewMeasurement(database, name string) *Measurement {\n\treturn &Measurement{\n\t\tdatabase:   database,\n\t\tName:       name,\n\t\tname:       []byte(name),\n\t\tfieldNames: make(map[string]struct{}),\n\n\t\tseriesByID:          make(map[uint64]*Series),\n\t\tseriesByTagKeyValue: make(map[string]map[string]SeriesIDs),\n\t}\n}\n\nfunc (m *Measurement) HasField(name string) bool {\n\tm.mu.RLock()\n\t_, hasField := m.fieldNames[name]\n\tm.mu.RUnlock()\n\treturn hasField\n}\n\n// SeriesByID returns a series by identifier.\nfunc (m *Measurement) SeriesByID(id uint64) *Series {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\treturn m.seriesByID[id]\n}\n\n// SeriesByIDMap returns the internal seriesByID map.\nfunc (m *Measurement) SeriesByIDMap() map[uint64]*Series {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\treturn m.seriesByID\n}\n\n// SeriesByIDSlice returns a list of series by identifiers.\nfunc (m *Measurement) SeriesByIDSlice(ids []uint64) []*Series {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\ta := make([]*Series, len(ids))\n\tfor i, id := range ids {\n\t\ta[i] = m.seriesByID[id]\n\t}\n\treturn a\n}\n\n// AppendSeriesKeysByID appends keys for a list of series ids to a buffer.\nfunc (m *Measurement) AppendSeriesKeysByID(dst []string, ids []uint64) []string {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\tfor _, id := range ids {\n\t\tif s := m.seriesByID[id]; s != nil {\n\t\t\tdst = append(dst, s.Key)\n\t\t}\n\t}\n\treturn dst\n}\n\n// SeriesKeysByID returns the a list of keys for a set of ids.\nfunc (m *Measurement) SeriesKeysByID(ids SeriesIDs) [][]byte {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\tkeys := make([][]byte, 0, len(ids))\n\tfor _, id := range ids {\n\t\ts := m.seriesByID[id]\n\t\tif s == nil {\n\t\t\tcontinue\n\t\t}\n\t\tkeys = append(keys, []byte(s.Key))\n\t}\n\treturn keys\n}\n\n// SeriesKeys returns the keys of every series in this measurement\nfunc (m *Measurement) SeriesKeys() [][]byte {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\tkeys := make([][]byte, 0, len(m.seriesByID))\n\tfor _, s := range m.seriesByID {\n\t\tkeys = append(keys, []byte(s.Key))\n\t}\n\treturn keys\n}\n\nfunc (m *Measurement) SeriesIDs() SeriesIDs {\n\tm.mu.RLock()\n\tif len(m.sortedSeriesIDs) == len(m.seriesByID) {\n\t\ts := m.sortedSeriesIDs\n\t\tm.mu.RUnlock()\n\t\treturn s\n\t}\n\tm.mu.RUnlock()\n\n\tm.mu.Lock()\n\tif len(m.sortedSeriesIDs) == len(m.seriesByID) {\n\t\ts := m.sortedSeriesIDs\n\t\tm.mu.Unlock()\n\t\treturn s\n\t}\n\n\tm.sortedSeriesIDs = m.sortedSeriesIDs[:0]\n\tif cap(m.sortedSeriesIDs) < len(m.seriesByID) {\n\t\tm.sortedSeriesIDs = make(SeriesIDs, 0, len(m.seriesByID))\n\t}\n\n\tfor k := range m.seriesByID {\n\t\tm.sortedSeriesIDs = append(m.sortedSeriesIDs, k)\n\t}\n\tsort.Sort(m.sortedSeriesIDs)\n\ts := m.sortedSeriesIDs\n\tm.mu.Unlock()\n\treturn s\n}\n\n// HasTagKey returns true if at least one series in this measurement has written a value for the passed in tag key\nfunc (m *Measurement) HasTagKey(k string) bool {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\t_, hasTag := m.seriesByTagKeyValue[k]\n\treturn hasTag\n}\n\nfunc (m *Measurement) HasTagKeyValue(k, v []byte) bool {\n\tm.mu.RLock()\n\tif vals, ok := m.seriesByTagKeyValue[string(k)]; ok {\n\t\t_, ok := vals[string(v)]\n\t\tm.mu.RUnlock()\n\t\treturn ok\n\t}\n\tm.mu.RUnlock()\n\treturn false\n}\n\n// HasSeries returns true if there is at least 1 series under this measurement.\nfunc (m *Measurement) HasSeries() bool {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\treturn len(m.seriesByID) > 0\n}\n\n// Cardinality returns the number of values associated with the given tag key.\nfunc (m *Measurement) Cardinality(key string) int {\n\tvar n int\n\tm.mu.RLock()\n\tn = m.cardinality(key)\n\tm.mu.RUnlock()\n\treturn n\n}\n\nfunc (m *Measurement) cardinality(key string) int {\n\treturn len(m.seriesByTagKeyValue[key])\n}\n\n// CardinalityBytes returns the number of values associated with the given tag key.\nfunc (m *Measurement) CardinalityBytes(key []byte) int {\n\tvar n int\n\tm.mu.RLock()\n\tn = len(m.seriesByTagKeyValue[string(key)])\n\tm.mu.RUnlock()\n\treturn n\n}\n\n// AddSeries adds a series to the measurement's index.\n// It returns true if the series was added successfully or false if the series was already present.\nfunc (m *Measurement) AddSeries(s *Series) bool {\n\tm.mu.RLock()\n\tif _, ok := m.seriesByID[s.ID]; ok {\n\t\tm.mu.RUnlock()\n\t\treturn false\n\t}\n\tm.mu.RUnlock()\n\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tif _, ok := m.seriesByID[s.ID]; ok {\n\t\treturn false\n\t}\n\n\tm.seriesByID[s.ID] = s\n\n\tif len(m.seriesByID) == 1 || (len(m.sortedSeriesIDs) == len(m.seriesByID)-1 && s.ID > m.sortedSeriesIDs[len(m.sortedSeriesIDs)-1]) {\n\t\tm.sortedSeriesIDs = append(m.sortedSeriesIDs, s.ID)\n\t}\n\n\t// add this series id to the tag index on the measurement\n\ts.ForEachTag(func(t models.Tag) {\n\t\tvalueMap := m.seriesByTagKeyValue[string(t.Key)]\n\t\tif valueMap == nil {\n\t\t\tvalueMap = make(map[string]SeriesIDs)\n\t\t\tm.seriesByTagKeyValue[string(t.Key)] = valueMap\n\t\t}\n\t\tids := valueMap[string(t.Value)]\n\t\tids = append(ids, s.ID)\n\n\t\t// most of the time the series ID will be higher than all others because it's a new\n\t\t// series. So don't do the sort if we don't have to.\n\t\tif len(ids) > 1 && ids[len(ids)-1] < ids[len(ids)-2] {\n\t\t\tsort.Sort(ids)\n\t\t}\n\t\tvalueMap[string(t.Value)] = ids\n\t})\n\n\treturn true\n}\n\n// DropSeries removes a series from the measurement's index.\nfunc (m *Measurement) DropSeries(series *Series) {\n\tseriesID := series.ID\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tif _, ok := m.seriesByID[seriesID]; !ok {\n\t\treturn\n\t}\n\tdelete(m.seriesByID, seriesID)\n\n\t// clear our lazily sorted set of ids\n\tm.sortedSeriesIDs = m.sortedSeriesIDs[:0]\n\n\t// remove this series id from the tag index on the measurement\n\t// s.seriesByTagKeyValue is defined as map[string]map[string]SeriesIDs\n\tseries.ForEachTag(func(t models.Tag) {\n\t\tvalues := m.seriesByTagKeyValue[string(t.Key)][string(t.Value)]\n\t\tids := filter(values, seriesID)\n\t\t// Check to see if we have any ids, if not, remove the key\n\t\tif len(ids) == 0 {\n\t\t\tdelete(m.seriesByTagKeyValue[string(t.Key)], string(t.Value))\n\t\t} else {\n\t\t\tm.seriesByTagKeyValue[string(t.Key)][string(t.Value)] = ids\n\t\t}\n\n\t\t// If we have no values, then we delete the key\n\t\tif len(m.seriesByTagKeyValue[string(t.Key)]) == 0 {\n\t\t\tdelete(m.seriesByTagKeyValue, string(t.Key))\n\t\t}\n\t})\n\n\treturn\n}\n\n// filters walks the where clause of a select statement and returns a map with all series ids\n// matching the where clause and any filter expression that should be applied to each\nfunc (m *Measurement) filters(condition influxql.Expr) ([]uint64, map[uint64]influxql.Expr, error) {\n\tif condition == nil || influxql.OnlyTimeExpr(condition) {\n\t\treturn m.SeriesIDs(), nil, nil\n\t}\n\treturn m.WalkWhereForSeriesIds(condition)\n}\n\n// ForEachSeriesByExpr iterates over all series filtered by condition.\nfunc (m *Measurement) ForEachSeriesByExpr(condition influxql.Expr, fn func(tags models.Tags) error) error {\n\t// Retrieve matching series ids.\n\tids, _, err := m.filters(condition)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Iterate over each series.\n\tfor _, id := range ids {\n\t\ts := m.SeriesByID(id)\n\t\tif err := fn(s.Tags()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// TagSets returns the unique tag sets that exist for the given tag keys. This is used to determine\n// what composite series will be created by a group by. i.e. \"group by region\" should return:\n// {\"region\":\"uswest\"}, {\"region\":\"useast\"}\n// or region, service returns\n// {\"region\": \"uswest\", \"service\": \"redis\"}, {\"region\": \"uswest\", \"service\": \"mysql\"}, etc...\n// This will also populate the TagSet objects with the series IDs that match each tagset and any\n// influx filter expression that goes with the series\n// TODO: this shouldn't be exported. However, until tx.go and the engine get refactored into tsdb, we need it.\nfunc (m *Measurement) TagSets(shardID uint64, opt influxql.IteratorOptions) ([]*influxql.TagSet, error) {\n\t// get the unique set of series ids and the filters that should be applied to each\n\tids, filters, err := m.filters(opt.Condition)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar dims []string\n\tif len(opt.Dimensions) > 0 {\n\t\tdims = make([]string, len(opt.Dimensions))\n\t\tcopy(dims, opt.Dimensions)\n\t\tsort.Strings(dims)\n\t}\n\n\tm.mu.RLock()\n\t// For every series, get the tag values for the requested tag keys i.e. dimensions. This is the\n\t// TagSet for that series. Series with the same TagSet are then grouped together, because for the\n\t// purpose of GROUP BY they are part of the same composite series.\n\ttagSets := make(map[string]*influxql.TagSet, 64)\n\tvar seriesN int\n\tfor _, id := range ids {\n\t\t// Abort if the query was killed\n\t\tselect {\n\t\tcase <-opt.InterruptCh:\n\t\t\tm.mu.RUnlock()\n\t\t\treturn nil, influxql.ErrQueryInterrupted\n\t\tdefault:\n\t\t}\n\n\t\tif opt.MaxSeriesN > 0 && seriesN > opt.MaxSeriesN {\n\t\t\tm.mu.RUnlock()\n\t\t\treturn nil, fmt.Errorf(\"max-select-series limit exceeded: (%d/%d)\", seriesN, opt.MaxSeriesN)\n\t\t}\n\n\t\ts := m.seriesByID[id]\n\t\tif !s.Assigned(shardID) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif opt.Authorizer != nil && !opt.Authorizer.AuthorizeSeriesRead(m.database, m.name, s.Tags()) {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar tagsAsKey []byte\n\t\tif len(dims) > 0 {\n\t\t\ttagsAsKey = tsdb.MakeTagsKey(dims, s.Tags())\n\t\t}\n\n\t\ttagSet, ok := tagSets[string(tagsAsKey)]\n\t\tif !ok {\n\t\t\t// This TagSet is new, create a new entry for it.\n\t\t\ttagSet = &influxql.TagSet{\n\t\t\t\tTags: nil,\n\t\t\t\tKey:  tagsAsKey,\n\t\t\t}\n\t\t\ttagSets[string(tagsAsKey)] = tagSet\n\t\t}\n\t\t// Associate the series and filter with the Tagset.\n\t\ttagSet.AddFilter(s.Key, filters[id])\n\t\tseriesN++\n\t}\n\t// Release the lock while we sort all the tags\n\tm.mu.RUnlock()\n\n\t// Sort the series in each tag set.\n\tfor _, t := range tagSets {\n\t\t// Abort if the query was killed\n\t\tselect {\n\t\tcase <-opt.InterruptCh:\n\t\t\treturn nil, influxql.ErrQueryInterrupted\n\t\tdefault:\n\t\t}\n\n\t\tsort.Sort(t)\n\t}\n\n\t// The TagSets have been created, as a map of TagSets. Just send\n\t// the values back as a slice, sorting for consistency.\n\tsortedTagsSets := make([]*influxql.TagSet, 0, len(tagSets))\n\tfor _, v := range tagSets {\n\t\tsortedTagsSets = append(sortedTagsSets, v)\n\t}\n\tsort.Sort(byTagKey(sortedTagsSets))\n\n\treturn sortedTagsSets, nil\n}\n\n// intersectSeriesFilters performs an intersection for two sets of ids and filter expressions.\nfunc intersectSeriesFilters(lids, rids SeriesIDs, lfilters, rfilters FilterExprs) (SeriesIDs, FilterExprs) {\n\t// We only want to allocate a slice and map of the smaller size.\n\tvar ids []uint64\n\tif len(lids) > len(rids) {\n\t\tids = make([]uint64, 0, len(rids))\n\t} else {\n\t\tids = make([]uint64, 0, len(lids))\n\t}\n\n\tvar filters FilterExprs\n\tif len(lfilters) > len(rfilters) {\n\t\tfilters = make(FilterExprs, len(rfilters))\n\t} else {\n\t\tfilters = make(FilterExprs, len(lfilters))\n\t}\n\n\t// They're in sorted order so advance the counter as needed.\n\t// This is, don't run comparisons against lower values that we've already passed.\n\tfor len(lids) > 0 && len(rids) > 0 {\n\t\tlid, rid := lids[0], rids[0]\n\t\tif lid == rid {\n\t\t\tids = append(ids, lid)\n\n\t\t\tvar expr influxql.Expr\n\t\t\tlfilter := lfilters[lid]\n\t\t\trfilter := rfilters[rid]\n\n\t\t\tif lfilter != nil && rfilter != nil {\n\t\t\t\tbe := &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.AND,\n\t\t\t\t\tLHS: lfilter,\n\t\t\t\t\tRHS: rfilter,\n\t\t\t\t}\n\t\t\t\texpr = influxql.Reduce(be, nil)\n\t\t\t} else if lfilter != nil {\n\t\t\t\texpr = lfilter\n\t\t\t} else if rfilter != nil {\n\t\t\t\texpr = rfilter\n\t\t\t}\n\n\t\t\tif expr != nil {\n\t\t\t\tfilters[lid] = expr\n\t\t\t}\n\t\t\tlids, rids = lids[1:], rids[1:]\n\t\t} else if lid < rid {\n\t\t\tlids = lids[1:]\n\t\t} else {\n\t\t\trids = rids[1:]\n\t\t}\n\t}\n\treturn ids, filters\n}\n\n// unionSeriesFilters performs a union for two sets of ids and filter expressions.\nfunc unionSeriesFilters(lids, rids SeriesIDs, lfilters, rfilters FilterExprs) (SeriesIDs, FilterExprs) {\n\tids := make([]uint64, 0, len(lids)+len(rids))\n\n\t// Setup the filters with the smallest size since we will discard filters\n\t// that do not have a match on the other side.\n\tvar filters FilterExprs\n\tif len(lfilters) < len(rfilters) {\n\t\tfilters = make(FilterExprs, len(lfilters))\n\t} else {\n\t\tfilters = make(FilterExprs, len(rfilters))\n\t}\n\n\tfor len(lids) > 0 && len(rids) > 0 {\n\t\tlid, rid := lids[0], rids[0]\n\t\tif lid == rid {\n\t\t\tids = append(ids, lid)\n\n\t\t\t// If one side does not have a filter, then the series has been\n\t\t\t// included on one side of the OR with no condition. Eliminate the\n\t\t\t// filter in this case.\n\t\t\tvar expr influxql.Expr\n\t\t\tlfilter := lfilters[lid]\n\t\t\trfilter := rfilters[rid]\n\t\t\tif lfilter != nil && rfilter != nil {\n\t\t\t\tbe := &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.OR,\n\t\t\t\t\tLHS: lfilter,\n\t\t\t\t\tRHS: rfilter,\n\t\t\t\t}\n\t\t\t\texpr = influxql.Reduce(be, nil)\n\t\t\t}\n\n\t\t\tif expr != nil {\n\t\t\t\tfilters[lid] = expr\n\t\t\t}\n\t\t\tlids, rids = lids[1:], rids[1:]\n\t\t} else if lid < rid {\n\t\t\tids = append(ids, lid)\n\n\t\t\tfilter := lfilters[lid]\n\t\t\tif filter != nil {\n\t\t\t\tfilters[lid] = filter\n\t\t\t}\n\t\t\tlids = lids[1:]\n\t\t} else {\n\t\t\tids = append(ids, rid)\n\n\t\t\tfilter := rfilters[rid]\n\t\t\tif filter != nil {\n\t\t\t\tfilters[rid] = filter\n\t\t\t}\n\t\t\trids = rids[1:]\n\t\t}\n\t}\n\n\t// Now append the remainder.\n\tif len(lids) > 0 {\n\t\tfor i := 0; i < len(lids); i++ {\n\t\t\tids = append(ids, lids[i])\n\n\t\t\tfilter := lfilters[lids[i]]\n\t\t\tif filter != nil {\n\t\t\t\tfilters[lids[i]] = filter\n\t\t\t}\n\t\t}\n\t} else if len(rids) > 0 {\n\t\tfor i := 0; i < len(rids); i++ {\n\t\t\tids = append(ids, rids[i])\n\n\t\t\tfilter := rfilters[rids[i]]\n\t\t\tif filter != nil {\n\t\t\t\tfilters[rids[i]] = filter\n\t\t\t}\n\t\t}\n\t}\n\treturn ids, filters\n}\n\n// IDsForExpr returns the series IDs that are candidates to match the given expression.\nfunc (m *Measurement) IDsForExpr(n *influxql.BinaryExpr) SeriesIDs {\n\tids, _, _ := m.idsForExpr(n)\n\treturn ids\n}\n\n// idsForExpr returns a collection of series ids and a filter expression that should\n// be used to filter points from those series.\nfunc (m *Measurement) idsForExpr(n *influxql.BinaryExpr) (SeriesIDs, influxql.Expr, error) {\n\t// If this binary expression has another binary expression, then this\n\t// is some expression math and we should just pass it to the underlying query.\n\tif _, ok := n.LHS.(*influxql.BinaryExpr); ok {\n\t\treturn m.SeriesIDs(), n, nil\n\t} else if _, ok := n.RHS.(*influxql.BinaryExpr); ok {\n\t\treturn m.SeriesIDs(), n, nil\n\t}\n\n\t// Retrieve the variable reference from the correct side of the expression.\n\tname, ok := n.LHS.(*influxql.VarRef)\n\tvalue := n.RHS\n\tif !ok {\n\t\tname, ok = n.RHS.(*influxql.VarRef)\n\t\tif !ok {\n\t\t\treturn nil, nil, fmt.Errorf(\"invalid expression: %s\", n.String())\n\t\t}\n\t\tvalue = n.LHS\n\t}\n\n\t// For time literals, return all series IDs and \"true\" as the filter.\n\tif _, ok := value.(*influxql.TimeLiteral); ok || name.Val == \"time\" {\n\t\treturn m.SeriesIDs(), &influxql.BooleanLiteral{Val: true}, nil\n\t}\n\n\t// For fields, return all series IDs from this measurement and return\n\t// the expression passed in, as the filter.\n\tif name.Val != \"_name\" && ((name.Type == influxql.Unknown && m.HasField(name.Val)) || name.Type == influxql.AnyField || (name.Type != influxql.Tag && name.Type != influxql.Unknown)) {\n\t\treturn m.SeriesIDs(), n, nil\n\t} else if value, ok := value.(*influxql.VarRef); ok {\n\t\t// Check if the RHS is a variable and if it is a field.\n\t\tif value.Val != \"_name\" && ((value.Type == influxql.Unknown && m.HasField(value.Val)) || name.Type == influxql.AnyField || (value.Type != influxql.Tag && value.Type != influxql.Unknown)) {\n\t\t\treturn m.SeriesIDs(), n, nil\n\t\t}\n\t}\n\n\t// Retrieve list of series with this tag key.\n\ttagVals := m.seriesByTagKeyValue[name.Val]\n\n\t// if we're looking for series with a specific tag value\n\tif str, ok := value.(*influxql.StringLiteral); ok {\n\t\tvar ids SeriesIDs\n\n\t\t// Special handling for \"_name\" to match measurement name.\n\t\tif name.Val == \"_name\" {\n\t\t\tif (n.Op == influxql.EQ && str.Val == m.Name) || (n.Op == influxql.NEQ && str.Val != m.Name) {\n\t\t\t\treturn m.SeriesIDs(), nil, nil\n\t\t\t}\n\t\t\treturn nil, nil, nil\n\t\t}\n\n\t\tif n.Op == influxql.EQ {\n\t\t\tif str.Val != \"\" {\n\t\t\t\t// return series that have a tag of specific value.\n\t\t\t\tids = tagVals[str.Val]\n\t\t\t} else {\n\t\t\t\t// Make a copy of all series ids and mark the ones we need to evict.\n\t\t\t\tseriesIDs := newEvictSeriesIDs(m.SeriesIDs())\n\n\t\t\t\t// Go through each slice and mark the values we find as zero so\n\t\t\t\t// they can be removed later.\n\t\t\t\tfor _, a := range tagVals {\n\t\t\t\t\tseriesIDs.mark(a)\n\t\t\t\t}\n\n\t\t\t\t// Make a new slice with only the remaining ids.\n\t\t\t\tids = seriesIDs.evict()\n\t\t\t}\n\t\t} else if n.Op == influxql.NEQ {\n\t\t\tif str.Val != \"\" {\n\t\t\t\tids = m.SeriesIDs().Reject(tagVals[str.Val])\n\t\t\t} else {\n\t\t\t\tfor k := range tagVals {\n\t\t\t\t\tids = append(ids, tagVals[k]...)\n\t\t\t\t}\n\t\t\t\tsort.Sort(ids)\n\t\t\t}\n\t\t}\n\t\treturn ids, nil, nil\n\t}\n\n\t// if we're looking for series with a tag value that matches a regex\n\tif re, ok := value.(*influxql.RegexLiteral); ok {\n\t\tvar ids SeriesIDs\n\n\t\t// Special handling for \"_name\" to match measurement name.\n\t\tif name.Val == \"_name\" {\n\t\t\tmatch := re.Val.MatchString(m.Name)\n\t\t\tif (n.Op == influxql.EQREGEX && match) || (n.Op == influxql.NEQREGEX && !match) {\n\t\t\t\treturn m.SeriesIDs(), &influxql.BooleanLiteral{Val: true}, nil\n\t\t\t}\n\t\t\treturn nil, nil, nil\n\t\t}\n\n\t\t// Check if we match the empty string to see if we should include series\n\t\t// that are missing the tag.\n\t\tempty := re.Val.MatchString(\"\")\n\n\t\t// Gather the series that match the regex. If we should include the empty string,\n\t\t// start with the list of all series and reject series that don't match our condition.\n\t\t// If we should not include the empty string, include series that match our condition.\n\t\tif empty && n.Op == influxql.EQREGEX {\n\t\t\t// See comments above for EQ with a StringLiteral.\n\t\t\tseriesIDs := newEvictSeriesIDs(m.SeriesIDs())\n\t\t\tfor k := range tagVals {\n\t\t\t\tif !re.Val.MatchString(k) {\n\t\t\t\t\tseriesIDs.mark(tagVals[k])\n\t\t\t\t}\n\t\t\t}\n\t\t\tids = seriesIDs.evict()\n\t\t} else if empty && n.Op == influxql.NEQREGEX {\n\t\t\tids = make(SeriesIDs, 0, len(m.SeriesIDs()))\n\t\t\tfor k := range tagVals {\n\t\t\t\tif !re.Val.MatchString(k) {\n\t\t\t\t\tids = append(ids, tagVals[k]...)\n\t\t\t\t}\n\t\t\t}\n\t\t\tsort.Sort(ids)\n\t\t} else if !empty && n.Op == influxql.EQREGEX {\n\t\t\tids = make(SeriesIDs, 0, len(m.SeriesIDs()))\n\t\t\tfor k := range tagVals {\n\t\t\t\tif re.Val.MatchString(k) {\n\t\t\t\t\tids = append(ids, tagVals[k]...)\n\t\t\t\t}\n\t\t\t}\n\t\t\tsort.Sort(ids)\n\t\t} else if !empty && n.Op == influxql.NEQREGEX {\n\t\t\t// See comments above for EQ with a StringLiteral.\n\t\t\tseriesIDs := newEvictSeriesIDs(m.SeriesIDs())\n\t\t\tfor k := range tagVals {\n\t\t\t\tif re.Val.MatchString(k) {\n\t\t\t\t\tseriesIDs.mark(tagVals[k])\n\t\t\t\t}\n\t\t\t}\n\t\t\tids = seriesIDs.evict()\n\t\t}\n\t\treturn ids, nil, nil\n\t}\n\n\t// compare tag values\n\tif ref, ok := value.(*influxql.VarRef); ok {\n\t\tvar ids SeriesIDs\n\n\t\tif n.Op == influxql.NEQ {\n\t\t\tids = m.SeriesIDs()\n\t\t}\n\n\t\trhsTagVals := m.seriesByTagKeyValue[ref.Val]\n\t\tfor k := range tagVals {\n\t\t\ttags := tagVals[k].Intersect(rhsTagVals[k])\n\t\t\tif n.Op == influxql.EQ {\n\t\t\t\tids = ids.Union(tags)\n\t\t\t} else if n.Op == influxql.NEQ {\n\t\t\t\tids = ids.Reject(tags)\n\t\t\t}\n\t\t}\n\t\treturn ids, nil, nil\n\t}\n\n\tif n.Op == influxql.NEQ || n.Op == influxql.NEQREGEX {\n\t\treturn m.SeriesIDs(), nil, nil\n\t}\n\treturn nil, nil, nil\n}\n\n// FilterExprs represents a map of series IDs to filter expressions.\ntype FilterExprs map[uint64]influxql.Expr\n\n// DeleteBoolLiteralTrues deletes all elements whose filter expression is a boolean literal true.\nfunc (fe FilterExprs) DeleteBoolLiteralTrues() {\n\tfor id, expr := range fe {\n\t\tif e, ok := expr.(*influxql.BooleanLiteral); ok && e.Val {\n\t\t\tdelete(fe, id)\n\t\t}\n\t}\n}\n\n// Len returns the number of elements.\nfunc (fe FilterExprs) Len() int {\n\tif fe == nil {\n\t\treturn 0\n\t}\n\treturn len(fe)\n}\n\n// WalkWhereForSeriesIds recursively walks the WHERE clause and returns an ordered set of series IDs and\n// a map from those series IDs to filter expressions that should be used to limit points returned in\n// the final query result.\nfunc (m *Measurement) WalkWhereForSeriesIds(expr influxql.Expr) (SeriesIDs, FilterExprs, error) {\n\tswitch n := expr.(type) {\n\tcase *influxql.BinaryExpr:\n\t\tswitch n.Op {\n\t\tcase influxql.EQ, influxql.NEQ, influxql.LT, influxql.LTE, influxql.GT, influxql.GTE, influxql.EQREGEX, influxql.NEQREGEX:\n\t\t\t// Get the series IDs and filter expression for the tag or field comparison.\n\t\t\tids, expr, err := m.idsForExpr(n)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\tif len(ids) == 0 {\n\t\t\t\treturn ids, nil, nil\n\t\t\t}\n\n\t\t\t// If the expression is a boolean literal that is true, ignore it.\n\t\t\tif b, ok := expr.(*influxql.BooleanLiteral); ok && b.Val {\n\t\t\t\texpr = nil\n\t\t\t}\n\n\t\t\tvar filters FilterExprs\n\t\t\tif expr != nil {\n\t\t\t\tfilters = make(FilterExprs, len(ids))\n\t\t\t\tfor _, id := range ids {\n\t\t\t\t\tfilters[id] = expr\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn ids, filters, nil\n\t\tcase influxql.AND, influxql.OR:\n\t\t\t// Get the series IDs and filter expressions for the LHS.\n\t\t\tlids, lfilters, err := m.WalkWhereForSeriesIds(n.LHS)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\t// Get the series IDs and filter expressions for the RHS.\n\t\t\trids, rfilters, err := m.WalkWhereForSeriesIds(n.RHS)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\t// Combine the series IDs from the LHS and RHS.\n\t\t\tif n.Op == influxql.AND {\n\t\t\t\tids, filters := intersectSeriesFilters(lids, rids, lfilters, rfilters)\n\t\t\t\treturn ids, filters, nil\n\t\t\t} else {\n\t\t\t\tids, filters := unionSeriesFilters(lids, rids, lfilters, rfilters)\n\t\t\t\treturn ids, filters, nil\n\t\t\t}\n\t\t}\n\n\t\tids, _, err := m.idsForExpr(n)\n\t\treturn ids, nil, err\n\tcase *influxql.ParenExpr:\n\t\t// walk down the tree\n\t\treturn m.WalkWhereForSeriesIds(n.Expr)\n\tdefault:\n\t\treturn nil, nil, nil\n\t}\n}\n\n// expandExpr returns a list of expressions expanded by all possible tag\n// combinations.\nfunc (m *Measurement) expandExpr(expr influxql.Expr) []tagSetExpr {\n\t// Retrieve list of unique values for each tag.\n\tvaluesByTagKey := m.uniqueTagValues(expr)\n\n\t// Convert keys to slices.\n\tkeys := make([]string, 0, len(valuesByTagKey))\n\tfor key := range valuesByTagKey {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Strings(keys)\n\n\t// Order uniques by key.\n\tuniques := make([][]string, len(keys))\n\tfor i, key := range keys {\n\t\tuniques[i] = valuesByTagKey[key]\n\t}\n\n\t// Reduce a condition for each combination of tag values.\n\treturn expandExprWithValues(expr, keys, []tagExpr{}, uniques, 0)\n}\n\nfunc expandExprWithValues(expr influxql.Expr, keys []string, tagExprs []tagExpr, uniques [][]string, index int) []tagSetExpr {\n\t// If we have no more keys left then execute the reduction and return.\n\tif index == len(keys) {\n\t\t// Create a map of tag key/values.\n\t\tm := make(map[string]*string, len(keys))\n\t\tfor i, key := range keys {\n\t\t\tif tagExprs[i].op == influxql.EQ {\n\t\t\t\tm[key] = &tagExprs[i].values[0]\n\t\t\t} else {\n\t\t\t\tm[key] = nil\n\t\t\t}\n\t\t}\n\n\t\t// TODO: Rewrite full expressions instead of VarRef replacement.\n\n\t\t// Reduce using the current tag key/value set.\n\t\t// Ignore it if reduces down to \"false\".\n\t\te := influxql.Reduce(expr, &tagValuer{tags: m})\n\t\tif e, ok := e.(*influxql.BooleanLiteral); ok && !e.Val {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn []tagSetExpr{{values: copyTagExprs(tagExprs), expr: e}}\n\t}\n\n\t// Otherwise expand for each possible equality value of the key.\n\tvar exprs []tagSetExpr\n\tfor _, v := range uniques[index] {\n\t\texprs = append(exprs, expandExprWithValues(expr, keys, append(tagExprs, tagExpr{keys[index], []string{v}, influxql.EQ}), uniques, index+1)...)\n\t}\n\texprs = append(exprs, expandExprWithValues(expr, keys, append(tagExprs, tagExpr{keys[index], uniques[index], influxql.NEQ}), uniques, index+1)...)\n\n\treturn exprs\n}\n\n// SeriesIDsAllOrByExpr walks an expressions for matching series IDs\n// or, if no expressions is given, returns all series IDs for the measurement.\nfunc (m *Measurement) SeriesIDsAllOrByExpr(expr influxql.Expr) (SeriesIDs, error) {\n\t// If no expression given or the measurement has no series,\n\t// we can take just return the ids or nil accordingly.\n\tif expr == nil {\n\t\treturn m.SeriesIDs(), nil\n\t}\n\n\tm.mu.RLock()\n\tl := len(m.seriesByID)\n\tm.mu.RUnlock()\n\tif l == 0 {\n\t\treturn nil, nil\n\t}\n\n\t// Get series IDs that match the WHERE clause.\n\tids, _, err := m.WalkWhereForSeriesIds(expr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ids, nil\n}\n\n// tagKeysByExpr extracts the tag keys wanted by the expression.\nfunc (m *Measurement) TagKeysByExpr(expr influxql.Expr) (map[string]struct{}, error) {\n\tswitch e := expr.(type) {\n\tcase *influxql.BinaryExpr:\n\t\tswitch e.Op {\n\t\tcase influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX:\n\t\t\ttag, ok := e.LHS.(*influxql.VarRef)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"left side of '%s' must be a tag key\", e.Op.String())\n\t\t\t} else if tag.Val != \"_tagKey\" {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\n\t\t\tif influxql.IsRegexOp(e.Op) {\n\t\t\t\tre, ok := e.RHS.(*influxql.RegexLiteral)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, fmt.Errorf(\"right side of '%s' must be a regular expression\", e.Op.String())\n\t\t\t\t}\n\t\t\t\treturn m.tagKeysByFilter(e.Op, \"\", re.Val), nil\n\t\t\t}\n\n\t\t\ts, ok := e.RHS.(*influxql.StringLiteral)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"right side of '%s' must be a tag value string\", e.Op.String())\n\t\t\t}\n\t\t\treturn m.tagKeysByFilter(e.Op, s.Val, nil), nil\n\n\t\tcase influxql.AND, influxql.OR:\n\t\t\tlhs, err := m.TagKeysByExpr(e.LHS)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\trhs, err := m.TagKeysByExpr(e.RHS)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif lhs != nil && rhs != nil {\n\t\t\t\tif e.Op == influxql.OR {\n\t\t\t\t\treturn stringSet(lhs).union(rhs), nil\n\t\t\t\t}\n\t\t\t\treturn stringSet(lhs).intersect(rhs), nil\n\t\t\t} else if lhs != nil {\n\t\t\t\treturn lhs, nil\n\t\t\t} else if rhs != nil {\n\t\t\t\treturn rhs, nil\n\t\t\t}\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"invalid operator\")\n\t\t}\n\n\tcase *influxql.ParenExpr:\n\t\treturn m.TagKeysByExpr(e.Expr)\n\t}\n\n\treturn nil, fmt.Errorf(\"%#v\", expr)\n}\n\n// tagKeysByFilter will filter the tag keys for the measurement.\nfunc (m *Measurement) tagKeysByFilter(op influxql.Token, val string, regex *regexp.Regexp) stringSet {\n\tss := newStringSet()\n\tfor _, key := range m.TagKeys() {\n\t\tvar matched bool\n\t\tswitch op {\n\t\tcase influxql.EQ:\n\t\t\tmatched = key == val\n\t\tcase influxql.NEQ:\n\t\t\tmatched = key != val\n\t\tcase influxql.EQREGEX:\n\t\t\tmatched = regex.MatchString(key)\n\t\tcase influxql.NEQREGEX:\n\t\t\tmatched = !regex.MatchString(key)\n\t\t}\n\n\t\tif !matched {\n\t\t\tcontinue\n\t\t}\n\t\tss.add(key)\n\t}\n\treturn ss\n}\n\n// tagValuer is used during expression expansion to evaluate all sets of tag values.\ntype tagValuer struct {\n\ttags map[string]*string\n}\n\n// Value returns the string value of a tag and true if it's listed in the tagset.\nfunc (v *tagValuer) Value(name string) (interface{}, bool) {\n\tif value, ok := v.tags[name]; ok {\n\t\tif value == nil {\n\t\t\treturn nil, true\n\t\t}\n\t\treturn *value, true\n\t}\n\treturn nil, false\n}\n\n// tagSetExpr represents a set of tag keys/values and associated expression.\ntype tagSetExpr struct {\n\tvalues []tagExpr\n\texpr   influxql.Expr\n}\n\n// tagExpr represents one or more values assigned to a given tag.\ntype tagExpr struct {\n\tkey    string\n\tvalues []string\n\top     influxql.Token // EQ or NEQ\n}\n\nfunc copyTagExprs(a []tagExpr) []tagExpr {\n\tother := make([]tagExpr, len(a))\n\tcopy(other, a)\n\treturn other\n}\n\n// uniqueTagValues returns a list of unique tag values used in an expression.\nfunc (m *Measurement) uniqueTagValues(expr influxql.Expr) map[string][]string {\n\t// Track unique value per tag.\n\ttags := make(map[string]map[string]struct{})\n\n\t// Find all tag values referenced in the expression.\n\tinfluxql.WalkFunc(expr, func(n influxql.Node) {\n\t\tswitch n := n.(type) {\n\t\tcase *influxql.BinaryExpr:\n\t\t\t// Ignore operators that are not equality.\n\t\t\tif n.Op != influxql.EQ {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Extract ref and string literal.\n\t\t\tvar key, value string\n\t\t\tswitch lhs := n.LHS.(type) {\n\t\t\tcase *influxql.VarRef:\n\t\t\t\tif rhs, ok := n.RHS.(*influxql.StringLiteral); ok {\n\t\t\t\t\tkey, value = lhs.Val, rhs.Val\n\t\t\t\t}\n\t\t\tcase *influxql.StringLiteral:\n\t\t\t\tif rhs, ok := n.RHS.(*influxql.VarRef); ok {\n\t\t\t\t\tkey, value = rhs.Val, lhs.Val\n\t\t\t\t}\n\t\t\t}\n\t\t\tif key == \"\" {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Add value to set.\n\t\t\tif tags[key] == nil {\n\t\t\t\ttags[key] = make(map[string]struct{})\n\t\t\t}\n\t\t\ttags[key][value] = struct{}{}\n\t\t}\n\t})\n\n\t// Convert to map of slices.\n\tout := make(map[string][]string)\n\tfor k, values := range tags {\n\t\tout[k] = make([]string, 0, len(values))\n\t\tfor v := range values {\n\t\t\tout[k] = append(out[k], v)\n\t\t}\n\t\tsort.Strings(out[k])\n\t}\n\treturn out\n}\n\n// Measurements represents a list of *Measurement.\ntype Measurements []*Measurement\n\n// Len implements sort.Interface.\nfunc (a Measurements) Len() int { return len(a) }\n\n// Less implements sort.Interface.\nfunc (a Measurements) Less(i, j int) bool { return a[i].Name < a[j].Name }\n\n// Swap implements sort.Interface.\nfunc (a Measurements) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\nfunc (a Measurements) Intersect(other Measurements) Measurements {\n\tl := a\n\tr := other\n\n\t// we want to iterate through the shortest one and stop\n\tif len(other) < len(a) {\n\t\tl = other\n\t\tr = a\n\t}\n\n\t// they're in sorted order so advance the counter as needed.\n\t// That is, don't run comparisons against lower values that we've already passed\n\tvar i, j int\n\n\tresult := make(Measurements, 0, len(l))\n\tfor i < len(l) && j < len(r) {\n\t\tif l[i].Name == r[j].Name {\n\t\t\tresult = append(result, l[i])\n\t\t\ti++\n\t\t\tj++\n\t\t} else if l[i].Name < r[j].Name {\n\t\t\ti++\n\t\t} else {\n\t\t\tj++\n\t\t}\n\t}\n\n\treturn result\n}\n\nfunc (a Measurements) Union(other Measurements) Measurements {\n\tresult := make(Measurements, 0, len(a)+len(other))\n\tvar i, j int\n\tfor i < len(a) && j < len(other) {\n\t\tif a[i].Name == other[j].Name {\n\t\t\tresult = append(result, a[i])\n\t\t\ti++\n\t\t\tj++\n\t\t} else if a[i].Name < other[j].Name {\n\t\t\tresult = append(result, a[i])\n\t\t\ti++\n\t\t} else {\n\t\t\tresult = append(result, other[j])\n\t\t\tj++\n\t\t}\n\t}\n\n\t// now append the remainder\n\tif i < len(a) {\n\t\tresult = append(result, a[i:]...)\n\t} else if j < len(other) {\n\t\tresult = append(result, other[j:]...)\n\t}\n\n\treturn result\n}\n\n// Series belong to a Measurement and represent unique time series in a database.\ntype Series struct {\n\tmu          sync.RWMutex\n\tKey         string\n\ttags        models.Tags\n\tID          uint64\n\tmeasurement *Measurement\n\tshardIDs    map[uint64]struct{} // shards that have this series defined\n}\n\n// NewSeries returns an initialized series struct\nfunc NewSeries(key []byte, tags models.Tags) *Series {\n\treturn &Series{\n\t\tKey:      string(key),\n\t\ttags:     tags,\n\t\tshardIDs: make(map[uint64]struct{}),\n\t}\n}\n\nfunc (s *Series) AssignShard(shardID uint64) {\n\ts.mu.RLock()\n\t_, ok := s.shardIDs[shardID]\n\ts.mu.RUnlock()\n\n\tif ok {\n\t\treturn\n\t}\n\n\ts.mu.Lock()\n\t// Skip the existence check under the write lock because we're just storing\n\t// and empty struct.\n\ts.shardIDs[shardID] = struct{}{}\n\ts.mu.Unlock()\n}\n\nfunc (s *Series) UnassignShard(shardID uint64) {\n\ts.mu.Lock()\n\tdelete(s.shardIDs, shardID)\n\ts.mu.Unlock()\n}\n\nfunc (s *Series) Assigned(shardID uint64) bool {\n\ts.mu.RLock()\n\t_, ok := s.shardIDs[shardID]\n\ts.mu.RUnlock()\n\treturn ok\n}\n\nfunc (s *Series) ShardN() int {\n\ts.mu.RLock()\n\tn := len(s.shardIDs)\n\ts.mu.RUnlock()\n\treturn n\n}\n\n// Measurement returns the measurement on the series.\nfunc (s *Series) Measurement() *Measurement {\n\treturn s.measurement\n}\n\n// SetMeasurement sets the measurement on the series.\nfunc (s *Series) SetMeasurement(m *Measurement) {\n\ts.measurement = m\n}\n\n// ForEachTag executes fn for every tag. Iteration occurs under lock.\nfunc (s *Series) ForEachTag(fn func(models.Tag)) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\tfor _, t := range s.tags {\n\t\tfn(t)\n\t}\n}\n\n// Tags returns a copy of the tags under lock.\nfunc (s *Series) Tags() models.Tags {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\treturn s.tags\n}\n\n// CopyTags clones the tags on the series in-place,\nfunc (s *Series) CopyTags() {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\ts.tags = s.tags.Clone()\n}\n\n// GetTagString returns a tag value under lock.\nfunc (s *Series) GetTagString(key string) string {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\treturn s.tags.GetString(key)\n}\n\n// SeriesIDs is a convenience type for sorting, checking equality, and doing\n// union and intersection of collections of series ids.\ntype SeriesIDs []uint64\n\n// Len implements sort.Interface.\nfunc (a SeriesIDs) Len() int { return len(a) }\n\n// Less implements sort.Interface.\nfunc (a SeriesIDs) Less(i, j int) bool { return a[i] < a[j] }\n\n// Swap implements sort.Interface.\nfunc (a SeriesIDs) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\n// Equals assumes that both are sorted.\nfunc (a SeriesIDs) Equals(other SeriesIDs) bool {\n\tif len(a) != len(other) {\n\t\treturn false\n\t}\n\tfor i, s := range other {\n\t\tif a[i] != s {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// Intersect returns a new collection of series ids in sorted order that is the intersection of the two.\n// The two collections must already be sorted.\nfunc (a SeriesIDs) Intersect(other SeriesIDs) SeriesIDs {\n\tl := a\n\tr := other\n\n\t// we want to iterate through the shortest one and stop\n\tif len(other) < len(a) {\n\t\tl = other\n\t\tr = a\n\t}\n\n\t// they're in sorted order so advance the counter as needed.\n\t// That is, don't run comparisons against lower values that we've already passed\n\tvar i, j int\n\n\tids := make([]uint64, 0, len(l))\n\tfor i < len(l) && j < len(r) {\n\t\tif l[i] == r[j] {\n\t\t\tids = append(ids, l[i])\n\t\t\ti++\n\t\t\tj++\n\t\t} else if l[i] < r[j] {\n\t\t\ti++\n\t\t} else {\n\t\t\tj++\n\t\t}\n\t}\n\n\treturn SeriesIDs(ids)\n}\n\n// Union returns a new collection of series ids in sorted order that is the union of the two.\n// The two collections must already be sorted.\nfunc (a SeriesIDs) Union(other SeriesIDs) SeriesIDs {\n\tl := a\n\tr := other\n\tids := make([]uint64, 0, len(l)+len(r))\n\tvar i, j int\n\tfor i < len(l) && j < len(r) {\n\t\tif l[i] == r[j] {\n\t\t\tids = append(ids, l[i])\n\t\t\ti++\n\t\t\tj++\n\t\t} else if l[i] < r[j] {\n\t\t\tids = append(ids, l[i])\n\t\t\ti++\n\t\t} else {\n\t\t\tids = append(ids, r[j])\n\t\t\tj++\n\t\t}\n\t}\n\n\t// now append the remainder\n\tif i < len(l) {\n\t\tids = append(ids, l[i:]...)\n\t} else if j < len(r) {\n\t\tids = append(ids, r[j:]...)\n\t}\n\n\treturn ids\n}\n\n// Reject returns a new collection of series ids in sorted order with the passed in set removed from the original.\n// This is useful for the NOT operator. The two collections must already be sorted.\nfunc (a SeriesIDs) Reject(other SeriesIDs) SeriesIDs {\n\tl := a\n\tr := other\n\tvar i, j int\n\n\tids := make([]uint64, 0, len(l))\n\tfor i < len(l) && j < len(r) {\n\t\tif l[i] == r[j] {\n\t\t\ti++\n\t\t\tj++\n\t\t} else if l[i] < r[j] {\n\t\t\tids = append(ids, l[i])\n\t\t\ti++\n\t\t} else {\n\t\t\tj++\n\t\t}\n\t}\n\n\t// Append the remainder\n\tif i < len(l) {\n\t\tids = append(ids, l[i:]...)\n\t}\n\n\treturn SeriesIDs(ids)\n}\n\n// seriesID is a series id that may or may not have been evicted from the\n// current id list.\ntype seriesID struct {\n\tval   uint64\n\tevict bool\n}\n\n// evictSeriesIDs is a slice of SeriesIDs with an extra field to mark if the\n// field should be evicted or not.\ntype evictSeriesIDs struct {\n\tids []seriesID\n\tsz  int\n}\n\n// newEvictSeriesIDs copies the ids into a new slice that can be used for\n// evicting series from the slice.\nfunc newEvictSeriesIDs(ids []uint64) evictSeriesIDs {\n\ta := make([]seriesID, len(ids))\n\tfor i, id := range ids {\n\t\ta[i].val = id\n\t}\n\treturn evictSeriesIDs{\n\t\tids: a,\n\t\tsz:  len(a),\n\t}\n}\n\n// mark marks all of the ids in the sorted slice to be evicted from the list of\n// series ids. If an id to be evicted does not exist, it just gets ignored.\nfunc (a *evictSeriesIDs) mark(ids []uint64) {\n\tseriesIDs := a.ids\n\tfor _, id := range ids {\n\t\tif len(seriesIDs) == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\t// Perform a binary search of the remaining slice if\n\t\t// the first element does not match the value we're\n\t\t// looking for.\n\t\ti := 0\n\t\tif seriesIDs[0].val < id {\n\t\t\ti = sort.Search(len(seriesIDs), func(i int) bool {\n\t\t\t\treturn seriesIDs[i].val >= id\n\t\t\t})\n\t\t}\n\n\t\tif i >= len(seriesIDs) {\n\t\t\tbreak\n\t\t} else if seriesIDs[i].val == id {\n\t\t\tif !seriesIDs[i].evict {\n\t\t\t\tseriesIDs[i].evict = true\n\t\t\t\ta.sz--\n\t\t\t}\n\t\t\t// Skip over this series since it has been evicted and won't be\n\t\t\t// encountered again.\n\t\t\ti++\n\t\t}\n\t\tseriesIDs = seriesIDs[i:]\n\t}\n}\n\n// evict creates a new slice with only the series that have not been evicted.\nfunc (a *evictSeriesIDs) evict() (ids SeriesIDs) {\n\tif a.sz == 0 {\n\t\treturn ids\n\t}\n\n\t// Make a new slice with only the remaining ids.\n\tids = make([]uint64, 0, a.sz)\n\tfor _, id := range a.ids {\n\t\tif id.evict {\n\t\t\tcontinue\n\t\t}\n\t\tids = append(ids, id.val)\n\t}\n\treturn ids\n}\n\n// TagFilter represents a tag filter when looking up other tags or measurements.\ntype TagFilter struct {\n\tOp    influxql.Token\n\tKey   string\n\tValue string\n\tRegex *regexp.Regexp\n}\n\n// WalkTagKeys calls fn for each tag key associated with m.  The order of the\n// keys is undefined.\nfunc (m *Measurement) WalkTagKeys(fn func(k string)) {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\n\tfor k := range m.seriesByTagKeyValue {\n\t\tfn(k)\n\t}\n}\n\n// TagKeys returns a list of the measurement's tag names, in sorted order.\nfunc (m *Measurement) TagKeys() []string {\n\tm.mu.RLock()\n\tkeys := make([]string, 0, len(m.seriesByTagKeyValue))\n\tfor k := range m.seriesByTagKeyValue {\n\t\tkeys = append(keys, k)\n\t}\n\tm.mu.RUnlock()\n\tsort.Strings(keys)\n\treturn keys\n}\n\n// TagValues returns all the values for the given tag key, in an arbitrary order.\nfunc (m *Measurement) TagValues(key string) []string {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\tvalues := make([]string, 0, len(m.seriesByTagKeyValue[key]))\n\tfor v := range m.seriesByTagKeyValue[key] {\n\t\tvalues = append(values, v)\n\t}\n\treturn values\n}\n\n// SetFieldName adds the field name to the measurement.\nfunc (m *Measurement) SetFieldName(name string) {\n\tm.mu.RLock()\n\tif _, ok := m.fieldNames[name]; ok {\n\t\tm.mu.RUnlock()\n\t\treturn\n\t}\n\tm.mu.RUnlock()\n\n\tm.mu.Lock()\n\tm.fieldNames[name] = struct{}{}\n\tm.mu.Unlock()\n}\n\n// FieldNames returns a list of the measurement's field names, in an arbitrary order.\nfunc (m *Measurement) FieldNames() []string {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\n\ta := make([]string, 0, len(m.fieldNames))\n\tfor n := range m.fieldNames {\n\t\ta = append(a, n)\n\t}\n\treturn a\n}\n\nfunc (m *Measurement) SeriesByTagKeyValue(key string) map[string]SeriesIDs {\n\tm.mu.RLock()\n\tret := m.seriesByTagKeyValue[key]\n\tm.mu.RUnlock()\n\treturn ret\n}\n\n// stringSet represents a set of strings.\ntype stringSet map[string]struct{}\n\n// newStringSet returns an empty stringSet.\nfunc newStringSet() stringSet {\n\treturn make(map[string]struct{})\n}\n\n// add adds strings to the set.\nfunc (s stringSet) add(ss ...string) {\n\tfor _, n := range ss {\n\t\ts[n] = struct{}{}\n\t}\n}\n\n// list returns the current elements in the set, in sorted order.\nfunc (s stringSet) list() []string {\n\tl := make([]string, 0, len(s))\n\tfor k := range s {\n\t\tl = append(l, k)\n\t}\n\tsort.Strings(l)\n\treturn l\n}\n\n// union returns the union of this set and another.\nfunc (s stringSet) union(o stringSet) stringSet {\n\tns := newStringSet()\n\tfor k := range s {\n\t\tns[k] = struct{}{}\n\t}\n\tfor k := range o {\n\t\tns[k] = struct{}{}\n\t}\n\treturn ns\n}\n\n// intersect returns the intersection of this set and another.\nfunc (s stringSet) intersect(o stringSet) stringSet {\n\tshorter, longer := s, o\n\tif len(longer) < len(shorter) {\n\t\tshorter, longer = longer, shorter\n\t}\n\n\tns := newStringSet()\n\tfor k := range shorter {\n\t\tif _, ok := longer[k]; ok {\n\t\t\tns[k] = struct{}{}\n\t\t}\n\t}\n\treturn ns\n}\n\n// filter removes v from a if it exists.  a must be sorted in ascending\n// order.\nfunc filter(a []uint64, v uint64) []uint64 {\n\t// binary search for v\n\ti := sort.Search(len(a), func(i int) bool { return a[i] >= v })\n\tif i >= len(a) || a[i] != v {\n\t\treturn a\n\t}\n\n\t// we found it, so shift the right half down one, overwriting v's position.\n\tcopy(a[i:], a[i+1:])\n\treturn a[:len(a)-1]\n}\n\ntype byTagKey []*influxql.TagSet\n\nfunc (t byTagKey) Len() int           { return len(t) }\nfunc (t byTagKey) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) < 0 }\nfunc (t byTagKey) Swap(i, j int)      { t[i], t[j] = t[j], t[i] }\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta_test.go",
    "content": "package inmem_test\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/tsdb/index/inmem\"\n)\n\n// Test comparing SeriesIDs for equality.\nfunc TestSeriesIDs_Equals(t *testing.T) {\n\tids1 := inmem.SeriesIDs([]uint64{1, 2, 3})\n\tids2 := inmem.SeriesIDs([]uint64{1, 2, 3})\n\tids3 := inmem.SeriesIDs([]uint64{4, 5, 6})\n\n\tif !ids1.Equals(ids2) {\n\t\tt.Fatal(\"expected ids1 == ids2\")\n\t} else if ids1.Equals(ids3) {\n\t\tt.Fatal(\"expected ids1 != ids3\")\n\t}\n}\n\n// Test intersecting sets of SeriesIDs.\nfunc TestSeriesIDs_Intersect(t *testing.T) {\n\t// Test swaping l & r, all branches of if-else, and exit loop when 'j < len(r)'\n\tids1 := inmem.SeriesIDs([]uint64{1, 3, 4, 5, 6})\n\tids2 := inmem.SeriesIDs([]uint64{1, 2, 3, 7})\n\texp := inmem.SeriesIDs([]uint64{1, 3})\n\tgot := ids1.Intersect(ids2)\n\n\tif !exp.Equals(got) {\n\t\tt.Fatalf(\"exp=%v, got=%v\", exp, got)\n\t}\n\n\t// Test exit for loop when 'i < len(l)'\n\tids1 = inmem.SeriesIDs([]uint64{1})\n\tids2 = inmem.SeriesIDs([]uint64{1, 2})\n\texp = inmem.SeriesIDs([]uint64{1})\n\tgot = ids1.Intersect(ids2)\n\n\tif !exp.Equals(got) {\n\t\tt.Fatalf(\"exp=%v, got=%v\", exp, got)\n\t}\n}\n\n// Test union sets of SeriesIDs.\nfunc TestSeriesIDs_Union(t *testing.T) {\n\t// Test all branches of if-else, exit loop because of 'j < len(r)', and append remainder from left.\n\tids1 := inmem.SeriesIDs([]uint64{1, 2, 3, 7})\n\tids2 := inmem.SeriesIDs([]uint64{1, 3, 4, 5, 6})\n\texp := inmem.SeriesIDs([]uint64{1, 2, 3, 4, 5, 6, 7})\n\tgot := ids1.Union(ids2)\n\n\tif !exp.Equals(got) {\n\t\tt.Fatalf(\"exp=%v, got=%v\", exp, got)\n\t}\n\n\t// Test exit because of 'i < len(l)' and append remainder from right.\n\tids1 = inmem.SeriesIDs([]uint64{1})\n\tids2 = inmem.SeriesIDs([]uint64{1, 2})\n\texp = inmem.SeriesIDs([]uint64{1, 2})\n\tgot = ids1.Union(ids2)\n\n\tif !exp.Equals(got) {\n\t\tt.Fatalf(\"exp=%v, got=%v\", exp, got)\n\t}\n}\n\n// Test removing one set of SeriesIDs from another.\nfunc TestSeriesIDs_Reject(t *testing.T) {\n\t// Test all branches of if-else, exit loop because of 'j < len(r)', and append remainder from left.\n\tids1 := inmem.SeriesIDs([]uint64{1, 2, 3, 7})\n\tids2 := inmem.SeriesIDs([]uint64{1, 3, 4, 5, 6})\n\texp := inmem.SeriesIDs([]uint64{2, 7})\n\tgot := ids1.Reject(ids2)\n\n\tif !exp.Equals(got) {\n\t\tt.Fatalf(\"exp=%v, got=%v\", exp, got)\n\t}\n\n\t// Test exit because of 'i < len(l)'.\n\tids1 = inmem.SeriesIDs([]uint64{1})\n\tids2 = inmem.SeriesIDs([]uint64{1, 2})\n\texp = inmem.SeriesIDs{}\n\tgot = ids1.Reject(ids2)\n\n\tif !exp.Equals(got) {\n\t\tt.Fatalf(\"exp=%v, got=%v\", exp, got)\n\t}\n}\n\nfunc TestMeasurement_AppendSeriesKeysByID_Missing(t *testing.T) {\n\tm := inmem.NewMeasurement(\"foo\", \"cpu\")\n\tvar dst []string\n\tdst = m.AppendSeriesKeysByID(dst, []uint64{1})\n\tif exp, got := 0, len(dst); exp != got {\n\t\tt.Fatalf(\"series len mismatch: exp %v, got %v\", exp, got)\n\t}\n}\n\nfunc TestMeasurement_AppendSeriesKeysByID_Exists(t *testing.T) {\n\tm := inmem.NewMeasurement(\"foo\", \"cpu\")\n\ts := inmem.NewSeries([]byte(\"cpu,host=foo\"), models.Tags{models.NewTag([]byte(\"host\"), []byte(\"foo\"))})\n\ts.ID = 1\n\tm.AddSeries(s)\n\n\tvar dst []string\n\tdst = m.AppendSeriesKeysByID(dst, []uint64{1})\n\tif exp, got := 1, len(dst); exp != got {\n\t\tt.Fatalf(\"series len mismatch: exp %v, got %v\", exp, got)\n\t}\n\n\tif exp, got := \"cpu,host=foo\", dst[0]; exp != got {\n\t\tt.Fatalf(\"series mismatch: exp %v, got %v\", exp, got)\n\t}\n}\n\nfunc TestMeasurement_TagsSet_Deadlock(t *testing.T) {\n\tm := inmem.NewMeasurement(\"foo\", \"cpu\")\n\ts1 := inmem.NewSeries([]byte(\"cpu,host=foo\"), models.Tags{models.NewTag([]byte(\"host\"), []byte(\"foo\"))})\n\ts1.ID = 1\n\tm.AddSeries(s1)\n\n\ts2 := inmem.NewSeries([]byte(\"cpu,host=bar\"), models.Tags{models.NewTag([]byte(\"host\"), []byte(\"bar\"))})\n\ts2.ID = 2\n\tm.AddSeries(s2)\n\n\tm.DropSeries(s1)\n\n\t// This was deadlocking\n\tm.TagSets(1, influxql.IteratorOptions{})\n\tif got, exp := len(m.SeriesIDs()), 1; got != exp {\n\t\tt.Fatalf(\"series count mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc TestMeasurement_ForEachSeriesByExpr_Deadlock(t *testing.T) {\n\tm := inmem.NewMeasurement(\"foo\", \"cpu\")\n\ts1 := inmem.NewSeries([]byte(\"cpu,host=foo\"), models.Tags{models.NewTag([]byte(\"host\"), []byte(\"foo\"))})\n\ts1.ID = 1\n\tm.AddSeries(s1)\n\n\ts2 := inmem.NewSeries([]byte(\"cpu,host=bar\"), models.Tags{models.NewTag([]byte(\"host\"), []byte(\"bar\"))})\n\ts2.ID = 2\n\tm.AddSeries(s2)\n\n\tm.DropSeries(s1)\n\n\t// This was deadlocking\n\tm.ForEachSeriesByExpr(nil, func(tags models.Tags) error {\n\t\treturn nil\n\t})\n\tif got, exp := len(m.SeriesIDs()), 1; got != exp {\n\t\tt.Fatalf(\"series count mismatch: got %v, exp %v\", got, exp)\n\t}\n}\n\nfunc BenchmarkMeasurement_SeriesIDForExp_EQRegex(b *testing.B) {\n\tm := inmem.NewMeasurement(\"foo\", \"cpu\")\n\tfor i := 0; i < 100000; i++ {\n\t\ts := inmem.NewSeries([]byte(\"cpu\"), models.Tags{models.NewTag(\n\t\t\t[]byte(\"host\"),\n\t\t\t[]byte(fmt.Sprintf(\"host%d\", i)))})\n\t\ts.ID = uint64(i)\n\t\tm.AddSeries(s)\n\t}\n\n\tif exp, got := 100000, len(m.SeriesKeys()); exp != got {\n\t\tb.Fatalf(\"series count mismatch: exp %v got %v\", exp, got)\n\t}\n\n\tstmt, err := influxql.NewParser(strings.NewReader(`SELECT * FROM cpu WHERE host =~ /host\\d+/`)).ParseStatement()\n\tif err != nil {\n\t\tb.Fatalf(\"invalid statement: %s\", err)\n\t}\n\n\tselectStmt := stmt.(*influxql.SelectStatement)\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tids := m.IDsForExpr(selectStmt.Condition.(*influxql.BinaryExpr))\n\t\tif exp, got := 100000, len(ids); exp != got {\n\t\t\tb.Fatalf(\"series count mismatch: exp %v got %v\", exp, got)\n\t\t}\n\n\t}\n}\n\nfunc BenchmarkMeasurement_SeriesIDForExp_NERegex(b *testing.B) {\n\tm := inmem.NewMeasurement(\"foo\", \"cpu\")\n\tfor i := 0; i < 100000; i++ {\n\t\ts := inmem.NewSeries([]byte(\"cpu\"), models.Tags{models.Tag{\n\t\t\tKey:   []byte(\"host\"),\n\t\t\tValue: []byte(fmt.Sprintf(\"host%d\", i))}})\n\t\ts.ID = uint64(i)\n\t\tm.AddSeries(s)\n\t}\n\n\tif exp, got := 100000, len(m.SeriesKeys()); exp != got {\n\t\tb.Fatalf(\"series count mismatch: exp %v got %v\", exp, got)\n\t}\n\n\tstmt, err := influxql.NewParser(strings.NewReader(`SELECT * FROM cpu WHERE host !~ /foo\\d+/`)).ParseStatement()\n\tif err != nil {\n\t\tb.Fatalf(\"invalid statement: %s\", err)\n\t}\n\n\tselectStmt := stmt.(*influxql.SelectStatement)\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tids := m.IDsForExpr(selectStmt.Condition.(*influxql.BinaryExpr))\n\t\tif exp, got := 100000, len(ids); exp != got {\n\t\t\tb.Fatalf(\"series count mismatch: exp %v got %v\", exp, got)\n\t\t}\n\n\t}\n\n}\n\nfunc benchmarkTagSets(b *testing.B, n int, opt influxql.IteratorOptions) {\n\tm := inmem.NewMeasurement(\"foo\", \"m\")\n\tfor i := 0; i < n; i++ {\n\t\ttags := map[string]string{\"tag1\": \"value1\", \"tag2\": \"value2\"}\n\t\ts := inmem.NewSeries([]byte(fmt.Sprintf(\"m,tag1=value1,tag2=value2\")), models.NewTags(tags))\n\t\ts.ID = uint64(i)\n\t\ts.AssignShard(0)\n\t\tm.AddSeries(s)\n\t}\n\n\t// warm caches\n\tm.TagSets(0, opt)\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tm.TagSets(0, opt)\n\t}\n}\n\nfunc BenchmarkMeasurement_TagSetsNoDimensions_1000(b *testing.B) {\n\tbenchmarkTagSets(b, 1000, influxql.IteratorOptions{})\n}\n\nfunc BenchmarkMeasurement_TagSetsDimensions_1000(b *testing.B) {\n\tbenchmarkTagSets(b, 1000, influxql.IteratorOptions{Dimensions: []string{\"tag1\", \"tag2\"}})\n}\n\nfunc BenchmarkMeasurement_TagSetsNoDimensions_100000(b *testing.B) {\n\tbenchmarkTagSets(b, 100000, influxql.IteratorOptions{})\n}\n\nfunc BenchmarkMeasurement_TagSetsDimensions_100000(b *testing.B) {\n\tbenchmarkTagSets(b, 100000, influxql.IteratorOptions{Dimensions: []string{\"tag1\", \"tag2\"}})\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/internal/file_set.go",
    "content": "package internal\n\nimport (\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/pkg/bloom\"\n\t\"github.com/influxdata/influxdb/pkg/estimator\"\n\t\"github.com/influxdata/influxdb/tsdb/index/tsi1\"\n)\n\n// File is a mock implementation of a tsi1.File.\ntype File struct {\n\tClosef                     func() error\n\tPathf                      func() string\n\tIDf                        func() int\n\tLevelf                     func() int\n\tMeasurementf               func(name []byte) tsi1.MeasurementElem\n\tMeasurementIteratorf       func() tsi1.MeasurementIterator\n\tHasSeriesf                 func(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool)\n\tSeriesf                    func(name []byte, tags models.Tags) tsi1.SeriesElem\n\tSeriesNf                   func() uint64\n\tTagKeyf                    func(name, key []byte) tsi1.TagKeyElem\n\tTagKeyIteratorf            func(name []byte) tsi1.TagKeyIterator\n\tTagValuef                  func(name, key, value []byte) tsi1.TagValueElem\n\tTagValueIteratorf          func(name, key []byte) tsi1.TagValueIterator\n\tSeriesIteratorf            func() tsi1.SeriesIterator\n\tMeasurementSeriesIteratorf func(name []byte) tsi1.SeriesIterator\n\tTagKeySeriesIteratorf      func(name, key []byte) tsi1.SeriesIterator\n\tTagValueSeriesIteratorf    func(name, key, value []byte) tsi1.SeriesIterator\n\tMergeSeriesSketchesf       func(s, t estimator.Sketch) error\n\tMergeMeasurementsSketchesf func(s, t estimator.Sketch) error\n\tRetainf                    func()\n\tReleasef                   func()\n\tFilterf                    func() *bloom.Filter\n}\n\nfunc (f *File) Close() error                                  { return f.Closef() }\nfunc (f *File) Path() string                                  { return f.Pathf() }\nfunc (f *File) ID() int                                       { return f.IDf() }\nfunc (f *File) Level() int                                    { return f.Levelf() }\nfunc (f *File) Measurement(name []byte) tsi1.MeasurementElem  { return f.Measurementf(name) }\nfunc (f *File) MeasurementIterator() tsi1.MeasurementIterator { return f.MeasurementIteratorf() }\nfunc (f *File) HasSeries(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool) {\n\treturn f.HasSeriesf(name, tags, buf)\n}\nfunc (f *File) Series(name []byte, tags models.Tags) tsi1.SeriesElem { return f.Seriesf(name, tags) }\nfunc (f *File) SeriesN() uint64                                      { return f.SeriesNf() }\nfunc (f *File) TagKey(name, key []byte) tsi1.TagKeyElem              { return f.TagKeyf(name, key) }\nfunc (f *File) TagKeyIterator(name []byte) tsi1.TagKeyIterator       { return f.TagKeyIteratorf(name) }\nfunc (f *File) TagValue(name, key, value []byte) tsi1.TagValueElem {\n\treturn f.TagValuef(name, key, value)\n}\nfunc (f *File) TagValueIterator(name, key []byte) tsi1.TagValueIterator {\n\treturn f.TagValueIteratorf(name, key)\n}\nfunc (f *File) SeriesIterator() tsi1.SeriesIterator { return f.SeriesIteratorf() }\nfunc (f *File) MeasurementSeriesIterator(name []byte) tsi1.SeriesIterator {\n\treturn f.MeasurementSeriesIteratorf(name)\n}\nfunc (f *File) TagKeySeriesIterator(name, key []byte) tsi1.SeriesIterator {\n\treturn f.TagKeySeriesIteratorf(name, key)\n}\nfunc (f *File) TagValueSeriesIterator(name, key, value []byte) tsi1.SeriesIterator {\n\treturn f.TagValueSeriesIteratorf(name, key, value)\n}\nfunc (f *File) MergeSeriesSketches(s, t estimator.Sketch) error { return f.MergeSeriesSketchesf(s, t) }\nfunc (f *File) MergeMeasurementsSketches(s, t estimator.Sketch) error {\n\treturn f.MergeMeasurementsSketchesf(s, t)\n}\nfunc (f *File) Retain()               { f.Retainf() }\nfunc (f *File) Release()              { f.Releasef() }\nfunc (f *File) Filter() *bloom.Filter { return f.Filterf() }\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/doc.go",
    "content": "/*\n\nPackage tsi1 provides a memory-mapped index implementation that supports\nhigh cardinality series.\n\nOverview\n\nThe top-level object in tsi1 is the Index. It is the primary access point from\nthe rest of the system. The Index is composed of LogFile and IndexFile objects.\n\nLog files are small write-ahead log files that record new series immediately\nin the order that they are received. The data within the file is indexed\nin-memory so it can be quickly accessed. When the system is restarted, this log\nfile is replayed and the in-memory representation is rebuilt.\n\nIndex files also contain series information, however, they are highly indexed\nso that reads can be performed quickly. Index files are built through a process\ncalled compaction where a log file or multiple index files are merged together.\n\n\nOperations\n\nThe index can perform many tasks related to series, measurement, & tag data.\nAll data is inserted by adding a series to the index. When adding a series,\nthe measurement, tag keys, and tag values are all extracted and indexed\nseparately.\n\nOnce a series has been added, it can be removed in several ways. First, the\nindividual series can be removed. Second, it can be removed as part of a bulk\noperation by deleting the entire measurement.\n\nThe query engine needs to be able to look up series in a variety of ways such\nas by measurement name, by tag value, or by using regular expressions. The\nindex provides an API to iterate over subsets of series and perform set\noperations such as unions and intersections.\n\n\nLog File Layout\n\nThe write-ahead file that series initially are inserted into simply appends\nall new operations sequentially. It is simply composed of a series of log\nentries. An entry contains a flag to specify the operation type, the measurement\nname, the tag set, and a checksum.\n\n\t┏━━━━━━━━━LogEntry━━━━━━━━━┓\n\t┃ ┌──────────────────────┐ ┃\n\t┃ │         Flag         │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │     Measurement      │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │      Key/Value       │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │      Key/Value       │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │      Key/Value       │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │       Checksum       │ ┃\n\t┃ └──────────────────────┘ ┃\n\t┗━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n\nWhen the log file is replayed, if the checksum is incorrect or the entry is\nincomplete (because of a partially failed write) then the log is truncated.\n\n\nIndex File Layout\n\nThe index file is composed of 3 main block types: one series block, one or more\ntag blocks, and one measurement block. At the end of the index file is a\ntrailer that records metadata such as the offsets to these blocks.\n\n\nSeries Block Layout\n\nThe series block stores raw series keys in sorted order. It also provides hash\nindexes so that series can be looked up quickly. Hash indexes are inserted\nperiodically so that memory size is limited at write time. Once all the series\nand hash indexes have been written then a list of index entries are written\nso that hash indexes can be looked up via binary search.\n\nThe end of the block contains two HyperLogLog++ sketches which track the\nestimated number of created series and deleted series. After the sketches is\na trailer which contains metadata about the block.\n\n\t┏━━━━━━━SeriesBlock━━━━━━━━┓\n\t┃ ┌──────────────────────┐ ┃\n\t┃ │      Series Key      │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │      Series Key      │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │      Series Key      │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │                      │ ┃\n\t┃ │      Hash Index      │ ┃\n\t┃ │                      │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │      Series Key      │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │      Series Key      │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │      Series Key      │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │                      │ ┃\n\t┃ │      Hash Index      │ ┃\n\t┃ │                      │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │    Index Entries     │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │     HLL Sketches     │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │       Trailer        │ ┃\n\t┃ └──────────────────────┘ ┃\n\t┗━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n\n\nTag Block Layout\n\nAfter the series block is one or more tag blocks. One of these blocks exists\nfor every measurement in the index file. The block is structured as a sorted\nlist of values for each key and then a sorted list of keys. Each of these lists\nhas their own hash index for fast direct lookups.\n\n\t┏━━━━━━━━Tag Block━━━━━━━━━┓\n\t┃ ┌──────────────────────┐ ┃\n\t┃ │        Value         │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │        Value         │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │        Value         │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │                      │ ┃\n\t┃ │      Hash Index      │ ┃\n\t┃ │                      │ ┃\n\t┃ └──────────────────────┘ ┃\n\t┃ ┌──────────────────────┐ ┃\n\t┃ │        Value         │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │        Value         │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │                      │ ┃\n\t┃ │      Hash Index      │ ┃\n\t┃ │                      │ ┃\n\t┃ └──────────────────────┘ ┃\n\t┃ ┌──────────────────────┐ ┃\n\t┃ │         Key          │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │         Key          │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │                      │ ┃\n\t┃ │      Hash Index      │ ┃\n\t┃ │                      │ ┃\n\t┃ └──────────────────────┘ ┃\n\t┃ ┌──────────────────────┐ ┃\n\t┃ │       Trailer        │ ┃\n\t┃ └──────────────────────┘ ┃\n\t┗━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n\nEach entry for values contains a sorted list of offsets for series keys that use\nthat value. Series iterators can be built around a single tag key value or\nmultiple iterators can be merged with set operators such as union or\nintersection.\n\n\nMeasurement block\n\nThe measurement block stores a sorted list of measurements, their associated\nseries offsets, and the offset to their tag block. This allows all series for\na measurement to be traversed quickly and it allows fast direct lookups of\nmeasurements and their tags.\n\nThis block also contains HyperLogLog++ sketches for new and deleted\nmeasurements.\n\n\t┏━━━━Measurement Block━━━━━┓\n\t┃ ┌──────────────────────┐ ┃\n\t┃ │     Measurement      │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │     Measurement      │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │     Measurement      │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │                      │ ┃\n\t┃ │      Hash Index      │ ┃\n\t┃ │                      │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │     HLL Sketches     │ ┃\n\t┃ ├──────────────────────┤ ┃\n\t┃ │       Trailer        │ ┃\n\t┃ └──────────────────────┘ ┃\n\t┗━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n\n\nManifest file\n\nThe index is simply an ordered set of log and index files. These files can be\nmerged together or rewritten but their order must always be the same. This is\nbecause series, measurements, & tags can be marked as deleted (aka tombstoned)\nand this action needs to be tracked in time order.\n\nWhenever the set of active files is changed, a manifest file is written to\ntrack the set. The manifest specifies the ordering of files and, on startup,\nall files not in the manifest are removed from the index directory.\n\n\nCompacting index files\n\nCompaction is the process of taking files and merging them together into a\nsingle file. There are two stages of compaction within TSI.\n\nFirst, once log files exceed a size threshold then they are compacted into an\nindex file. This threshold is relatively small because log files must maintain\ntheir index in the heap which TSI tries to avoid. Small log files are also very\nquick to convert into an index file so this is done aggressively.\n\nSecond, once a contiguous set of index files exceed a factor (e.g. 10x) then\nthey are all merged together into a single index file and the old files are\ndiscarded. Because all blocks are written in sorted order, the new index file\ncan be streamed and minimize memory use.\n\n\nConcurrency\n\nIndex files are immutable so they do not require fine grained locks, however,\ncompactions require that we track which files are in use so they are not\ndiscarded too soon. This is done by using reference counting with file sets.\n\nA file set is simply an ordered list of index files. When the current file set\nis obtained from the index, a counter is incremented to track its usage. Once\nthe user is done with the file set, it is released and the counter is\ndecremented. A file cannot be removed from the file system until this counter\nreturns to zero.\n\nBesides the reference counting, there are no other locking mechanisms when\nreading or writing index files. Log files, however, do require a lock whenever\nthey are accessed. This is another reason to minimize log file size.\n\n\n*/\npackage tsi1\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set.go",
    "content": "package tsi1\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"regexp\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/pkg/bloom\"\n\t\"github.com/influxdata/influxdb/pkg/bytesutil\"\n\t\"github.com/influxdata/influxdb/pkg/estimator\"\n\t\"github.com/influxdata/influxdb/pkg/estimator/hll\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n)\n\n// FileSet represents a collection of files.\ntype FileSet struct {\n\tlevels  []CompactionLevel\n\tfiles   []File\n\tfilters []*bloom.Filter // per-level filters\n}\n\n// NewFileSet returns a new instance of FileSet.\nfunc NewFileSet(levels []CompactionLevel, files []File) (*FileSet, error) {\n\tfs := &FileSet{levels: levels, files: files}\n\tif err := fs.buildFilters(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn fs, nil\n}\n\n// Close closes all the files in the file set.\nfunc (p FileSet) Close() error {\n\tvar err error\n\tfor _, f := range p.files {\n\t\tif e := f.Close(); e != nil && err == nil {\n\t\t\terr = e\n\t\t}\n\t}\n\treturn err\n}\n\n// Retain adds a reference count to all files.\nfunc (fs *FileSet) Retain() {\n\tfor _, f := range fs.files {\n\t\tf.Retain()\n\t}\n}\n\n// Release removes a reference count from all files.\nfunc (fs *FileSet) Release() {\n\tfor _, f := range fs.files {\n\t\tf.Release()\n\t}\n}\n\n// Prepend returns a new file set with f added at the beginning.\nfunc (fs *FileSet) Prepend(f File) (*FileSet, error) {\n\treturn NewFileSet(fs.levels, append([]File{f}, fs.files...))\n}\n\n// MustReplace swaps a list of files for a single file and returns a new file set.\n// The caller should always guarentee that the files exist and are contiguous.\nfunc (fs *FileSet) MustReplace(oldFiles []File, newFile File) *FileSet {\n\tassert(len(oldFiles) > 0, \"cannot replace empty files\")\n\n\t// Find index of first old file.\n\tvar i int\n\tfor ; i < len(fs.files); i++ {\n\t\tif fs.files[i] == oldFiles[0] {\n\t\t\tbreak\n\t\t} else if i == len(fs.files)-1 {\n\t\t\tpanic(\"first replacement file not found\")\n\t\t}\n\t}\n\n\t// Ensure all old files are contiguous.\n\tfor j := range oldFiles {\n\t\tif fs.files[i+j] != oldFiles[j] {\n\t\t\tpanic(fmt.Sprintf(\"cannot replace non-contiguous files: subset=%+v, fileset=%+v\", Files(oldFiles).IDs(), Files(fs.files).IDs()))\n\t\t}\n\t}\n\n\t// Copy to new fileset.\n\tother := make([]File, len(fs.files)-len(oldFiles)+1)\n\tcopy(other[:i], fs.files[:i])\n\tother[i] = newFile\n\tcopy(other[i+1:], fs.files[i+len(oldFiles):])\n\n\tfs, err := NewFileSet(fs.levels, other)\n\tif err != nil {\n\t\tpanic(\"cannot build file set: \" + err.Error())\n\t}\n\treturn fs\n}\n\n// MaxID returns the highest file identifier.\nfunc (fs *FileSet) MaxID() int {\n\tvar max int\n\tfor _, f := range fs.files {\n\t\tif i := f.ID(); i > max {\n\t\t\tmax = i\n\t\t}\n\t}\n\treturn max\n}\n\n// Files returns all files in the set.\nfunc (fs *FileSet) Files() []File {\n\treturn fs.files\n}\n\n// LogFiles returns all log files from the file set.\nfunc (fs *FileSet) LogFiles() []*LogFile {\n\tvar a []*LogFile\n\tfor _, f := range fs.files {\n\t\tif f, ok := f.(*LogFile); ok {\n\t\t\ta = append(a, f)\n\t\t}\n\t}\n\treturn a\n}\n\n// IndexFiles returns all index files from the file set.\nfunc (fs *FileSet) IndexFiles() []*IndexFile {\n\tvar a []*IndexFile\n\tfor _, f := range fs.files {\n\t\tif f, ok := f.(*IndexFile); ok {\n\t\t\ta = append(a, f)\n\t\t}\n\t}\n\treturn a\n}\n\n// LastContiguousIndexFilesByLevel returns the last contiguous files by level.\n// These can be used by the compaction scheduler.\nfunc (fs *FileSet) LastContiguousIndexFilesByLevel(level int) []*IndexFile {\n\tif level == 0 {\n\t\treturn nil\n\t}\n\n\tvar a []*IndexFile\n\tfor i := len(fs.files) - 1; i >= 0; i-- {\n\t\tf := fs.files[i]\n\n\t\t// Ignore files above level, stop on files below level.\n\t\tif level < f.Level() {\n\t\t\tcontinue\n\t\t} else if level > f.Level() {\n\t\t\tbreak\n\t\t}\n\n\t\ta = append([]*IndexFile{f.(*IndexFile)}, a...)\n\t}\n\treturn a\n}\n\n// SeriesIterator returns an iterator over all series in the index.\nfunc (fs *FileSet) SeriesIterator() SeriesIterator {\n\ta := make([]SeriesIterator, 0, len(fs.files))\n\tfor _, f := range fs.files {\n\t\titr := f.SeriesIterator()\n\t\tif itr == nil {\n\t\t\tcontinue\n\t\t}\n\t\ta = append(a, itr)\n\t}\n\treturn FilterUndeletedSeriesIterator(MergeSeriesIterators(a...))\n}\n\n// Measurement returns a measurement by name.\nfunc (fs *FileSet) Measurement(name []byte) MeasurementElem {\n\tfor _, f := range fs.files {\n\t\tif e := f.Measurement(name); e == nil {\n\t\t\tcontinue\n\t\t} else if e.Deleted() {\n\t\t\treturn nil\n\t\t} else {\n\t\t\treturn e\n\t\t}\n\t}\n\treturn nil\n}\n\n// MeasurementIterator returns an iterator over all measurements in the index.\nfunc (fs *FileSet) MeasurementIterator() MeasurementIterator {\n\ta := make([]MeasurementIterator, 0, len(fs.files))\n\tfor _, f := range fs.files {\n\t\titr := f.MeasurementIterator()\n\t\tif itr != nil {\n\t\t\ta = append(a, itr)\n\t\t}\n\t}\n\treturn FilterUndeletedMeasurementIterator(MergeMeasurementIterators(a...))\n}\n\n// MeasurementSeriesIterator returns an iterator over all non-tombstoned series\n// in the index for the provided measurement.\nfunc (fs *FileSet) MeasurementSeriesIterator(name []byte) SeriesIterator {\n\ta := make([]SeriesIterator, 0, len(fs.files))\n\tfor _, f := range fs.files {\n\t\titr := f.MeasurementSeriesIterator(name)\n\t\tif itr != nil {\n\t\t\ta = append(a, itr)\n\t\t}\n\t}\n\treturn FilterUndeletedSeriesIterator(MergeSeriesIterators(a...))\n}\n\n// TagKeyIterator returns an iterator over all tag keys for a measurement.\nfunc (fs *FileSet) TagKeyIterator(name []byte) TagKeyIterator {\n\ta := make([]TagKeyIterator, 0, len(fs.files))\n\tfor _, f := range fs.files {\n\t\titr := f.TagKeyIterator(name)\n\t\tif itr != nil {\n\t\t\ta = append(a, itr)\n\t\t}\n\t}\n\treturn MergeTagKeyIterators(a...)\n}\n\n// MeasurementTagKeysByExpr extracts the tag keys wanted by the expression.\nfunc (fs *FileSet) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) {\n\tswitch e := expr.(type) {\n\tcase *influxql.BinaryExpr:\n\t\tswitch e.Op {\n\t\tcase influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX:\n\t\t\ttag, ok := e.LHS.(*influxql.VarRef)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"left side of '%s' must be a tag key\", e.Op.String())\n\t\t\t} else if tag.Val != \"_tagKey\" {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\n\t\t\tif influxql.IsRegexOp(e.Op) {\n\t\t\t\tre, ok := e.RHS.(*influxql.RegexLiteral)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, fmt.Errorf(\"right side of '%s' must be a regular expression\", e.Op.String())\n\t\t\t\t}\n\t\t\t\treturn fs.tagKeysByFilter(name, e.Op, nil, re.Val), nil\n\t\t\t}\n\n\t\t\ts, ok := e.RHS.(*influxql.StringLiteral)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"right side of '%s' must be a tag value string\", e.Op.String())\n\t\t\t}\n\t\t\treturn fs.tagKeysByFilter(name, e.Op, []byte(s.Val), nil), nil\n\n\t\tcase influxql.AND, influxql.OR:\n\t\t\tlhs, err := fs.MeasurementTagKeysByExpr(name, e.LHS)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\trhs, err := fs.MeasurementTagKeysByExpr(name, e.RHS)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif lhs != nil && rhs != nil {\n\t\t\t\tif e.Op == influxql.OR {\n\t\t\t\t\treturn unionStringSets(lhs, rhs), nil\n\t\t\t\t}\n\t\t\t\treturn intersectStringSets(lhs, rhs), nil\n\t\t\t} else if lhs != nil {\n\t\t\t\treturn lhs, nil\n\t\t\t} else if rhs != nil {\n\t\t\t\treturn rhs, nil\n\t\t\t}\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"invalid operator\")\n\t\t}\n\n\tcase *influxql.ParenExpr:\n\t\treturn fs.MeasurementTagKeysByExpr(name, e.Expr)\n\t}\n\n\treturn nil, fmt.Errorf(\"%#v\", expr)\n}\n\n// tagValuesByKeyAndExpr retrieves tag values for the provided tag keys.\n//\n// tagValuesByKeyAndExpr returns sets of values for each key, indexable by the\n// position of the tag key in the keys argument.\n//\n// N.B tagValuesByKeyAndExpr relies on keys being sorted in ascending\n// lexicographic order.\nfunc (fs *FileSet) tagValuesByKeyAndExpr(name []byte, keys []string, expr influxql.Expr, fieldset *tsdb.MeasurementFieldSet) ([]map[string]struct{}, error) {\n\titr, err := fs.seriesByExprIterator(name, expr, fieldset.Fields(string(name)))\n\tif err != nil {\n\t\treturn nil, err\n\t} else if itr == nil {\n\t\treturn nil, nil\n\t}\n\n\tkeyIdxs := make(map[string]int, len(keys))\n\tfor ki, key := range keys {\n\t\tkeyIdxs[key] = ki\n\n\t\t// Check that keys are in order.\n\t\tif ki > 0 && key < keys[ki-1] {\n\t\t\treturn nil, fmt.Errorf(\"keys %v are not in ascending order\", keys)\n\t\t}\n\t}\n\n\tresultSet := make([]map[string]struct{}, len(keys))\n\tfor i := 0; i < len(resultSet); i++ {\n\t\tresultSet[i] = make(map[string]struct{})\n\t}\n\n\t// Iterate all series to collect tag values.\n\tfor e := itr.Next(); e != nil; e = itr.Next() {\n\t\tfor _, t := range e.Tags() {\n\t\t\tif idx, ok := keyIdxs[string(t.Key)]; ok {\n\t\t\t\tresultSet[idx][string(t.Value)] = struct{}{}\n\t\t\t} else if string(t.Key) > keys[len(keys)-1] {\n\t\t\t\t// The tag key is > the largest key we're interested in.\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn resultSet, nil\n}\n\n// tagKeysByFilter will filter the tag keys for the measurement.\nfunc (fs *FileSet) tagKeysByFilter(name []byte, op influxql.Token, val []byte, regex *regexp.Regexp) map[string]struct{} {\n\tss := make(map[string]struct{})\n\titr := fs.TagKeyIterator(name)\n\tfor e := itr.Next(); e != nil; e = itr.Next() {\n\t\tvar matched bool\n\t\tswitch op {\n\t\tcase influxql.EQ:\n\t\t\tmatched = bytes.Equal(e.Key(), val)\n\t\tcase influxql.NEQ:\n\t\t\tmatched = !bytes.Equal(e.Key(), val)\n\t\tcase influxql.EQREGEX:\n\t\t\tmatched = regex.Match(e.Key())\n\t\tcase influxql.NEQREGEX:\n\t\t\tmatched = !regex.Match(e.Key())\n\t\t}\n\n\t\tif !matched {\n\t\t\tcontinue\n\t\t}\n\t\tss[string(e.Key())] = struct{}{}\n\t}\n\treturn ss\n}\n\n// TagKeySeriesIterator returns a series iterator for all values across a single key.\nfunc (fs *FileSet) TagKeySeriesIterator(name, key []byte) SeriesIterator {\n\ta := make([]SeriesIterator, 0, len(fs.files))\n\tfor _, f := range fs.files {\n\t\titr := f.TagKeySeriesIterator(name, key)\n\t\tif itr != nil {\n\t\t\ta = append(a, itr)\n\t\t}\n\t}\n\treturn FilterUndeletedSeriesIterator(MergeSeriesIterators(a...))\n}\n\n// HasTagKey returns true if the tag key exists.\nfunc (fs *FileSet) HasTagKey(name, key []byte) bool {\n\tfor _, f := range fs.files {\n\t\tif e := f.TagKey(name, key); e != nil {\n\t\t\treturn !e.Deleted()\n\t\t}\n\t}\n\treturn false\n}\n\n// HasTagValue returns true if the tag value exists.\nfunc (fs *FileSet) HasTagValue(name, key, value []byte) bool {\n\tfor _, f := range fs.files {\n\t\tif e := f.TagValue(name, key, value); e != nil {\n\t\t\treturn !e.Deleted()\n\t\t}\n\t}\n\treturn false\n}\n\n// TagValueIterator returns a value iterator for a tag key.\nfunc (fs *FileSet) TagValueIterator(name, key []byte) TagValueIterator {\n\ta := make([]TagValueIterator, 0, len(fs.files))\n\tfor _, f := range fs.files {\n\t\titr := f.TagValueIterator(name, key)\n\t\tif itr != nil {\n\t\t\ta = append(a, itr)\n\t\t}\n\t}\n\treturn MergeTagValueIterators(a...)\n}\n\n// TagValueSeriesIterator returns a series iterator for a single tag value.\nfunc (fs *FileSet) TagValueSeriesIterator(name, key, value []byte) SeriesIterator {\n\ta := make([]SeriesIterator, 0, len(fs.files))\n\tfor _, f := range fs.files {\n\t\titr := f.TagValueSeriesIterator(name, key, value)\n\t\tif itr != nil {\n\t\t\ta = append(a, itr)\n\t\t}\n\t}\n\treturn FilterUndeletedSeriesIterator(MergeSeriesIterators(a...))\n}\n\n// MatchTagValueSeriesIterator returns a series iterator for tags which match value.\n// If matches is false, returns iterators which do not match value.\nfunc (fs *FileSet) MatchTagValueSeriesIterator(name, key []byte, value *regexp.Regexp, matches bool) SeriesIterator {\n\tmatchEmpty := value.MatchString(\"\")\n\n\tif matches {\n\t\tif matchEmpty {\n\t\t\treturn FilterUndeletedSeriesIterator(fs.matchTagValueEqualEmptySeriesIterator(name, key, value))\n\t\t}\n\t\treturn FilterUndeletedSeriesIterator(fs.matchTagValueEqualNotEmptySeriesIterator(name, key, value))\n\t}\n\n\tif matchEmpty {\n\t\treturn FilterUndeletedSeriesIterator(fs.matchTagValueNotEqualEmptySeriesIterator(name, key, value))\n\t}\n\treturn FilterUndeletedSeriesIterator(fs.matchTagValueNotEqualNotEmptySeriesIterator(name, key, value))\n}\n\nfunc (fs *FileSet) matchTagValueEqualEmptySeriesIterator(name, key []byte, value *regexp.Regexp) SeriesIterator {\n\tvitr := fs.TagValueIterator(name, key)\n\tif vitr == nil {\n\t\treturn fs.MeasurementSeriesIterator(name)\n\t}\n\n\tvar itrs []SeriesIterator\n\tfor e := vitr.Next(); e != nil; e = vitr.Next() {\n\t\tif !value.Match(e.Value()) {\n\t\t\titrs = append(itrs, fs.TagValueSeriesIterator(name, key, e.Value()))\n\t\t}\n\t}\n\n\treturn DifferenceSeriesIterators(\n\t\tfs.MeasurementSeriesIterator(name),\n\t\tMergeSeriesIterators(itrs...),\n\t)\n}\n\nfunc (fs *FileSet) matchTagValueEqualNotEmptySeriesIterator(name, key []byte, value *regexp.Regexp) SeriesIterator {\n\tvitr := fs.TagValueIterator(name, key)\n\tif vitr == nil {\n\t\treturn nil\n\t}\n\n\tvar itrs []SeriesIterator\n\tfor e := vitr.Next(); e != nil; e = vitr.Next() {\n\t\tif value.Match(e.Value()) {\n\t\t\titrs = append(itrs, fs.TagValueSeriesIterator(name, key, e.Value()))\n\t\t}\n\t}\n\treturn MergeSeriesIterators(itrs...)\n}\n\nfunc (fs *FileSet) matchTagValueNotEqualEmptySeriesIterator(name, key []byte, value *regexp.Regexp) SeriesIterator {\n\tvitr := fs.TagValueIterator(name, key)\n\tif vitr == nil {\n\t\treturn nil\n\t}\n\n\tvar itrs []SeriesIterator\n\tfor e := vitr.Next(); e != nil; e = vitr.Next() {\n\t\tif !value.Match(e.Value()) {\n\t\t\titrs = append(itrs, fs.TagValueSeriesIterator(name, key, e.Value()))\n\t\t}\n\t}\n\treturn MergeSeriesIterators(itrs...)\n}\n\nfunc (fs *FileSet) matchTagValueNotEqualNotEmptySeriesIterator(name, key []byte, value *regexp.Regexp) SeriesIterator {\n\tvitr := fs.TagValueIterator(name, key)\n\tif vitr == nil {\n\t\treturn fs.MeasurementSeriesIterator(name)\n\t}\n\n\tvar itrs []SeriesIterator\n\tfor e := vitr.Next(); e != nil; e = vitr.Next() {\n\t\tif value.Match(e.Value()) {\n\t\t\titrs = append(itrs, fs.TagValueSeriesIterator(name, key, e.Value()))\n\t\t}\n\t}\n\n\treturn DifferenceSeriesIterators(\n\t\tfs.MeasurementSeriesIterator(name),\n\t\tMergeSeriesIterators(itrs...),\n\t)\n}\n\nfunc (fs *FileSet) MeasurementNamesByExpr(expr influxql.Expr) ([][]byte, error) {\n\t// Return filtered list if expression exists.\n\tif expr != nil {\n\t\treturn fs.measurementNamesByExpr(expr)\n\t}\n\n\t// Iterate over all measurements if no condition exists.\n\tvar names [][]byte\n\titr := fs.MeasurementIterator()\n\tfor e := itr.Next(); e != nil; e = itr.Next() {\n\t\tnames = append(names, e.Name())\n\t}\n\treturn names, nil\n}\n\nfunc (fs *FileSet) measurementNamesByExpr(expr influxql.Expr) ([][]byte, error) {\n\tif expr == nil {\n\t\treturn nil, nil\n\t}\n\n\tswitch e := expr.(type) {\n\tcase *influxql.BinaryExpr:\n\t\tswitch e.Op {\n\t\tcase influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX:\n\t\t\ttag, ok := e.LHS.(*influxql.VarRef)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"left side of '%s' must be a tag key\", e.Op.String())\n\t\t\t}\n\n\t\t\t// Retrieve value or regex expression from RHS.\n\t\t\tvar value string\n\t\t\tvar regex *regexp.Regexp\n\t\t\tif influxql.IsRegexOp(e.Op) {\n\t\t\t\tre, ok := e.RHS.(*influxql.RegexLiteral)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, fmt.Errorf(\"right side of '%s' must be a regular expression\", e.Op.String())\n\t\t\t\t}\n\t\t\t\tregex = re.Val\n\t\t\t} else {\n\t\t\t\ts, ok := e.RHS.(*influxql.StringLiteral)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, fmt.Errorf(\"right side of '%s' must be a tag value string\", e.Op.String())\n\t\t\t\t}\n\t\t\t\tvalue = s.Val\n\t\t\t}\n\n\t\t\t// Match on name, if specified.\n\t\t\tif tag.Val == \"_name\" {\n\t\t\t\treturn fs.measurementNamesByNameFilter(e.Op, value, regex), nil\n\t\t\t} else if influxql.IsSystemName(tag.Val) {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t\treturn fs.measurementNamesByTagFilter(e.Op, tag.Val, value, regex), nil\n\n\t\tcase influxql.OR, influxql.AND:\n\t\t\tlhs, err := fs.measurementNamesByExpr(e.LHS)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\trhs, err := fs.measurementNamesByExpr(e.RHS)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif e.Op == influxql.OR {\n\t\t\t\treturn bytesutil.Union(lhs, rhs), nil\n\t\t\t}\n\t\t\treturn bytesutil.Intersect(lhs, rhs), nil\n\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"invalid tag comparison operator\")\n\t\t}\n\n\tcase *influxql.ParenExpr:\n\t\treturn fs.measurementNamesByExpr(e.Expr)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"%#v\", expr)\n\t}\n}\n\n// measurementNamesByNameFilter returns matching measurement names in sorted order.\nfunc (fs *FileSet) measurementNamesByNameFilter(op influxql.Token, val string, regex *regexp.Regexp) [][]byte {\n\tvar names [][]byte\n\titr := fs.MeasurementIterator()\n\tfor e := itr.Next(); e != nil; e = itr.Next() {\n\t\tvar matched bool\n\t\tswitch op {\n\t\tcase influxql.EQ:\n\t\t\tmatched = string(e.Name()) == val\n\t\tcase influxql.NEQ:\n\t\t\tmatched = string(e.Name()) != val\n\t\tcase influxql.EQREGEX:\n\t\t\tmatched = regex.Match(e.Name())\n\t\tcase influxql.NEQREGEX:\n\t\t\tmatched = !regex.Match(e.Name())\n\t\t}\n\n\t\tif matched {\n\t\t\tnames = append(names, e.Name())\n\t\t}\n\t}\n\tbytesutil.Sort(names)\n\treturn names\n}\n\nfunc (fs *FileSet) measurementNamesByTagFilter(op influxql.Token, key, val string, regex *regexp.Regexp) [][]byte {\n\tvar names [][]byte\n\n\tmitr := fs.MeasurementIterator()\n\tfor me := mitr.Next(); me != nil; me = mitr.Next() {\n\t\t// If the operator is non-regex, only check the specified value.\n\t\tvar tagMatch bool\n\t\tif op == influxql.EQ || op == influxql.NEQ {\n\t\t\tif fs.HasTagValue(me.Name(), []byte(key), []byte(val)) {\n\t\t\t\ttagMatch = true\n\t\t\t}\n\t\t} else {\n\t\t\t// Else, the operator is a regex and we have to check all tag\n\t\t\t// values against the regular expression.\n\t\t\tvitr := fs.TagValueIterator(me.Name(), []byte(key))\n\t\t\tif vitr != nil {\n\t\t\t\tfor ve := vitr.Next(); ve != nil; ve = vitr.Next() {\n\t\t\t\t\tif regex.Match(ve.Value()) {\n\t\t\t\t\t\ttagMatch = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t//\n\t\t// XNOR gate\n\t\t//\n\t\t// tags match | operation is EQ | measurement matches\n\t\t// --------------------------------------------------\n\t\t//     True   |       True      |      True\n\t\t//     True   |       False     |      False\n\t\t//     False  |       True      |      False\n\t\t//     False  |       False     |      True\n\t\tif tagMatch == (op == influxql.EQ || op == influxql.EQREGEX) {\n\t\t\tnames = append(names, me.Name())\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tbytesutil.Sort(names)\n\treturn names\n}\n\n// HasSeries returns true if the series exists and is not tombstoned.\nfunc (fs *FileSet) HasSeries(name []byte, tags models.Tags, buf []byte) bool {\n\tfor _, f := range fs.files {\n\t\tif exists, tombstoned := f.HasSeries(name, tags, buf); exists {\n\t\t\treturn !tombstoned\n\t\t}\n\t}\n\treturn false\n}\n\n// FilterNamesTags filters out any series which already exist. It modifies the\n// provided slices of names and tags.\nfunc (fs *FileSet) FilterNamesTags(names [][]byte, tagsSlice []models.Tags) ([][]byte, []models.Tags) {\n\tbuf := make([]byte, 4096)\n\n\t// Filter across all log files.\n\t// Log files obtain a read lock and should be done in bulk for performance.\n\tfor _, f := range fs.LogFiles() {\n\t\tnames, tagsSlice = f.FilterNamesTags(names, tagsSlice)\n\t}\n\n\t// Filter across remaining index files.\n\tindexFiles := fs.IndexFiles()\n\tnewNames, newTagsSlice := names[:0], tagsSlice[:0]\n\tfor i := range names {\n\t\tname, tags := names[i], tagsSlice[i]\n\t\tcurrentLevel, skipLevel := -1, false\n\n\t\tvar exists, tombstoned bool\n\t\tfor j := 0; j < len(indexFiles); j++ {\n\t\t\tf := indexFiles[j]\n\n\t\t\t// Check for existence on the level when it changes.\n\t\t\tif level := f.Level(); currentLevel != level {\n\t\t\t\tcurrentLevel, skipLevel = level, false\n\n\t\t\t\tif filter := fs.filters[level]; filter != nil {\n\t\t\t\t\tif !filter.Contains(AppendSeriesKey(buf[:0], name, tags)) {\n\t\t\t\t\t\tskipLevel = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Skip file if in level where it doesn't exist.\n\t\t\tif skipLevel {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Stop once we find the series in a file.\n\t\t\tif exists, tombstoned = f.HasSeries(name, tags, buf); exists {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// If the series doesn't exist or it has been tombstoned then add it.\n\t\tif !exists || tombstoned {\n\t\t\tnewNames = append(newNames, name)\n\t\t\tnewTagsSlice = append(newTagsSlice, tags)\n\t\t}\n\t}\n\treturn newNames, newTagsSlice\n}\n\n// SeriesSketches returns the merged series sketches for the FileSet.\nfunc (fs *FileSet) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) {\n\tsketch, tsketch := hll.NewDefaultPlus(), hll.NewDefaultPlus()\n\n\t// Iterate over all the files and merge the sketches into the result.\n\tfor _, f := range fs.files {\n\t\tif err := f.MergeSeriesSketches(sketch, tsketch); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\treturn sketch, tsketch, nil\n}\n\n// MeasurementsSketches returns the merged measurement sketches for the FileSet.\nfunc (fs *FileSet) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) {\n\tsketch, tsketch := hll.NewDefaultPlus(), hll.NewDefaultPlus()\n\n\t// Iterate over all the files and merge the sketches into the result.\n\tfor _, f := range fs.files {\n\t\tif err := f.MergeMeasurementsSketches(sketch, tsketch); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\treturn sketch, tsketch, nil\n}\n\n// MeasurementSeriesByExprIterator returns a series iterator for a measurement\n// that is filtered by expr. If expr only contains time expressions then this\n// call is equivalent to MeasurementSeriesIterator().\nfunc (fs *FileSet) MeasurementSeriesByExprIterator(name []byte, expr influxql.Expr, fieldset *tsdb.MeasurementFieldSet) (SeriesIterator, error) {\n\t// Return all series for the measurement if there are no tag expressions.\n\tif expr == nil || influxql.OnlyTimeExpr(expr) {\n\t\treturn fs.MeasurementSeriesIterator(name), nil\n\t}\n\treturn fs.seriesByExprIterator(name, expr, fieldset.CreateFieldsIfNotExists(name))\n}\n\n// MeasurementSeriesKeysByExpr returns a list of series keys matching expr.\nfunc (fs *FileSet) MeasurementSeriesKeysByExpr(name []byte, expr influxql.Expr, fieldset *tsdb.MeasurementFieldSet) ([][]byte, error) {\n\t// Create iterator for all matching series.\n\titr, err := fs.MeasurementSeriesByExprIterator(name, expr, fieldset)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if itr == nil {\n\t\treturn nil, nil\n\t}\n\n\t// Iterate over all series and generate keys.\n\tvar keys [][]byte\n\tfor e := itr.Next(); e != nil; e = itr.Next() {\n\t\t// Check for unsupported field filters.\n\t\t// Any remaining filters means there were fields (e.g., `WHERE value = 1.2`).\n\t\tif e.Expr() != nil {\n\t\t\treturn nil, errors.New(\"fields not supported in WHERE clause during deletion\")\n\t\t}\n\n\t\tkeys = append(keys, models.MakeKey(e.Name(), e.Tags()))\n\t}\n\treturn keys, nil\n}\n\nfunc (fs *FileSet) seriesByExprIterator(name []byte, expr influxql.Expr, mf *tsdb.MeasurementFields) (SeriesIterator, error) {\n\tswitch expr := expr.(type) {\n\tcase *influxql.BinaryExpr:\n\t\tswitch expr.Op {\n\t\tcase influxql.AND, influxql.OR:\n\t\t\t// Get the series IDs and filter expressions for the LHS.\n\t\t\tlitr, err := fs.seriesByExprIterator(name, expr.LHS, mf)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// Get the series IDs and filter expressions for the RHS.\n\t\t\tritr, err := fs.seriesByExprIterator(name, expr.RHS, mf)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// Intersect iterators if expression is \"AND\".\n\t\t\tif expr.Op == influxql.AND {\n\t\t\t\treturn IntersectSeriesIterators(litr, ritr), nil\n\t\t\t}\n\n\t\t\t// Union iterators if expression is \"OR\".\n\t\t\treturn UnionSeriesIterators(litr, ritr), nil\n\n\t\tdefault:\n\t\t\treturn fs.seriesByBinaryExprIterator(name, expr, mf)\n\t\t}\n\n\tcase *influxql.ParenExpr:\n\t\treturn fs.seriesByExprIterator(name, expr.Expr, mf)\n\n\tdefault:\n\t\treturn nil, nil\n\t}\n}\n\n// seriesByBinaryExprIterator returns a series iterator and a filtering expression.\nfunc (fs *FileSet) seriesByBinaryExprIterator(name []byte, n *influxql.BinaryExpr, mf *tsdb.MeasurementFields) (SeriesIterator, error) {\n\t// If this binary expression has another binary expression, then this\n\t// is some expression math and we should just pass it to the underlying query.\n\tif _, ok := n.LHS.(*influxql.BinaryExpr); ok {\n\t\treturn newSeriesExprIterator(fs.MeasurementSeriesIterator(name), n), nil\n\t} else if _, ok := n.RHS.(*influxql.BinaryExpr); ok {\n\t\treturn newSeriesExprIterator(fs.MeasurementSeriesIterator(name), n), nil\n\t}\n\n\t// Retrieve the variable reference from the correct side of the expression.\n\tkey, ok := n.LHS.(*influxql.VarRef)\n\tvalue := n.RHS\n\tif !ok {\n\t\tkey, ok = n.RHS.(*influxql.VarRef)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"invalid expression: %s\", n.String())\n\t\t}\n\t\tvalue = n.LHS\n\t}\n\n\t// For time literals, return all series and \"true\" as the filter.\n\tif _, ok := value.(*influxql.TimeLiteral); ok || key.Val == \"time\" {\n\t\treturn newSeriesExprIterator(fs.MeasurementSeriesIterator(name), &influxql.BooleanLiteral{Val: true}), nil\n\t}\n\n\t// For fields, return all series from this measurement.\n\tif key.Val != \"_name\" && ((key.Type == influxql.Unknown && mf.HasField(key.Val)) || key.Type == influxql.AnyField || (key.Type != influxql.Tag && key.Type != influxql.Unknown)) {\n\t\treturn newSeriesExprIterator(fs.MeasurementSeriesIterator(name), n), nil\n\t} else if value, ok := value.(*influxql.VarRef); ok {\n\t\t// Check if the RHS is a variable and if it is a field.\n\t\tif value.Val != \"_name\" && ((value.Type == influxql.Unknown && mf.HasField(value.Val)) || key.Type == influxql.AnyField || (value.Type != influxql.Tag && value.Type != influxql.Unknown)) {\n\t\t\treturn newSeriesExprIterator(fs.MeasurementSeriesIterator(name), n), nil\n\t\t}\n\t}\n\n\t// Create iterator based on value type.\n\tswitch value := value.(type) {\n\tcase *influxql.StringLiteral:\n\t\treturn fs.seriesByBinaryExprStringIterator(name, []byte(key.Val), []byte(value.Val), n.Op)\n\tcase *influxql.RegexLiteral:\n\t\treturn fs.seriesByBinaryExprRegexIterator(name, []byte(key.Val), value.Val, n.Op)\n\tcase *influxql.VarRef:\n\t\treturn fs.seriesByBinaryExprVarRefIterator(name, []byte(key.Val), value, n.Op)\n\tdefault:\n\t\tif n.Op == influxql.NEQ || n.Op == influxql.NEQREGEX {\n\t\t\treturn fs.MeasurementSeriesIterator(name), nil\n\t\t}\n\t\treturn nil, nil\n\t}\n}\n\nfunc (fs *FileSet) seriesByBinaryExprStringIterator(name, key, value []byte, op influxql.Token) (SeriesIterator, error) {\n\t// Special handling for \"_name\" to match measurement name.\n\tif bytes.Equal(key, []byte(\"_name\")) {\n\t\tif (op == influxql.EQ && bytes.Equal(value, name)) || (op == influxql.NEQ && !bytes.Equal(value, name)) {\n\t\t\treturn fs.MeasurementSeriesIterator(name), nil\n\t\t}\n\t\treturn nil, nil\n\t}\n\n\tif op == influxql.EQ {\n\t\t// Match a specific value.\n\t\tif len(value) != 0 {\n\t\t\treturn fs.TagValueSeriesIterator(name, key, value), nil\n\t\t}\n\n\t\t// Return all measurement series that have no values from this tag key.\n\t\treturn DifferenceSeriesIterators(\n\t\t\tfs.MeasurementSeriesIterator(name),\n\t\t\tfs.TagKeySeriesIterator(name, key),\n\t\t), nil\n\t}\n\n\t// Return all measurement series without this tag value.\n\tif len(value) != 0 {\n\t\treturn DifferenceSeriesIterators(\n\t\t\tfs.MeasurementSeriesIterator(name),\n\t\t\tfs.TagValueSeriesIterator(name, key, value),\n\t\t), nil\n\t}\n\n\t// Return all series across all values of this tag key.\n\treturn fs.TagKeySeriesIterator(name, key), nil\n}\n\nfunc (fs *FileSet) seriesByBinaryExprRegexIterator(name, key []byte, value *regexp.Regexp, op influxql.Token) (SeriesIterator, error) {\n\t// Special handling for \"_name\" to match measurement name.\n\tif bytes.Equal(key, []byte(\"_name\")) {\n\t\tmatch := value.Match(name)\n\t\tif (op == influxql.EQREGEX && match) || (op == influxql.NEQREGEX && !match) {\n\t\t\treturn newSeriesExprIterator(fs.MeasurementSeriesIterator(name), &influxql.BooleanLiteral{Val: true}), nil\n\t\t}\n\t\treturn nil, nil\n\t}\n\treturn fs.MatchTagValueSeriesIterator(name, key, value, op == influxql.EQREGEX), nil\n}\n\nfunc (fs *FileSet) seriesByBinaryExprVarRefIterator(name, key []byte, value *influxql.VarRef, op influxql.Token) (SeriesIterator, error) {\n\tif op == influxql.EQ {\n\t\treturn IntersectSeriesIterators(\n\t\t\tfs.TagKeySeriesIterator(name, key),\n\t\t\tfs.TagKeySeriesIterator(name, []byte(value.Val)),\n\t\t), nil\n\t}\n\n\treturn DifferenceSeriesIterators(\n\t\tfs.TagKeySeriesIterator(name, key),\n\t\tfs.TagKeySeriesIterator(name, []byte(value.Val)),\n\t), nil\n}\n\n// buildFilters builds a series existence filter for each compaction level.\nfunc (fs *FileSet) buildFilters() error {\n\tif len(fs.levels) == 0 {\n\t\tfs.filters = nil\n\t\treturn nil\n\t}\n\n\t// Generate filters for each level.\n\tfs.filters = make([]*bloom.Filter, len(fs.levels))\n\n\t// Merge filters at each level.\n\tfor _, f := range fs.files {\n\t\tlevel := f.Level()\n\n\t\t// Skip if file has no bloom filter.\n\t\tif f.Filter() == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Initialize a filter if it doesn't exist.\n\t\tif fs.filters[level] == nil {\n\t\t\tlvl := fs.levels[level]\n\t\t\tfs.filters[level] = bloom.NewFilter(lvl.M, lvl.K)\n\t\t}\n\n\t\t// Merge filter.\n\t\tif err := fs.filters[level].Merge(f.Filter()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// File represents a log or index file.\ntype File interface {\n\tClose() error\n\tPath() string\n\n\tID() int\n\tLevel() int\n\n\tMeasurement(name []byte) MeasurementElem\n\tMeasurementIterator() MeasurementIterator\n\tHasSeries(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool)\n\tSeries(name []byte, tags models.Tags) SeriesElem\n\tSeriesN() uint64\n\n\tTagKey(name, key []byte) TagKeyElem\n\tTagKeyIterator(name []byte) TagKeyIterator\n\n\tTagValue(name, key, value []byte) TagValueElem\n\tTagValueIterator(name, key []byte) TagValueIterator\n\n\t// Series iteration.\n\tSeriesIterator() SeriesIterator\n\tMeasurementSeriesIterator(name []byte) SeriesIterator\n\tTagKeySeriesIterator(name, key []byte) SeriesIterator\n\tTagValueSeriesIterator(name, key, value []byte) SeriesIterator\n\n\t// Sketches for cardinality estimation\n\tMergeSeriesSketches(s, t estimator.Sketch) error\n\tMergeMeasurementsSketches(s, t estimator.Sketch) error\n\n\t// Series existence bloom filter.\n\tFilter() *bloom.Filter\n\n\t// Reference counting.\n\tRetain()\n\tRelease()\n}\n\ntype Files []File\n\nfunc (a Files) IDs() []int {\n\tids := make([]int, len(a))\n\tfor i := range a {\n\t\tids[i] = a[i].ID()\n\t}\n\treturn ids\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set_test.go",
    "content": "package tsi1_test\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/models\"\n)\n\n// Ensure fileset can return an iterator over all series in the index.\nfunc TestFileSet_SeriesIterator(t *testing.T) {\n\tidx := MustOpenIndex()\n\tdefer idx.Close()\n\n\t// Create initial set of series.\n\tif err := idx.CreateSeriesSliceIfNotExists([]Series{\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"west\"})},\n\t\t{Name: []byte(\"mem\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify initial set of series.\n\tidx.Run(t, func(t *testing.T) {\n\t\tfs := idx.RetainFileSet()\n\t\tdefer fs.Release()\n\n\t\titr := fs.SeriesIterator()\n\t\tif itr == nil {\n\t\t\tt.Fatal(\"expected iterator\")\n\t\t}\n\n\t\tif e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region east}]` {\n\t\t\tt.Fatalf(\"unexpected series: %s/%s\", e.Name(), e.Tags().String())\n\t\t} else if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region west}]` {\n\t\t\tt.Fatalf(\"unexpected series: %s/%s\", e.Name(), e.Tags().String())\n\t\t} else if e := itr.Next(); string(e.Name()) != `mem` || e.Tags().String() != `[{region east}]` {\n\t\t\tt.Fatalf(\"unexpected series: %s/%s\", e.Name(), e.Tags().String())\n\t\t} else if e := itr.Next(); e != nil {\n\t\t\tt.Fatalf(\"expected nil series: %s/%s\", e.Name(), e.Tags().String())\n\t\t}\n\t})\n\n\t// Add more series.\n\tif err := idx.CreateSeriesSliceIfNotExists([]Series{\n\t\t{Name: []byte(\"disk\")},\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"north\"})},\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify additional series.\n\tidx.Run(t, func(t *testing.T) {\n\t\tfs := idx.RetainFileSet()\n\t\tdefer fs.Release()\n\n\t\titr := fs.SeriesIterator()\n\t\tif itr == nil {\n\t\t\tt.Fatal(\"expected iterator\")\n\t\t}\n\n\t\tif e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region east}]` {\n\t\t\tt.Fatalf(\"unexpected series: %s/%s\", e.Name(), e.Tags().String())\n\t\t} else if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region north}]` {\n\t\t\tt.Fatalf(\"unexpected series: %s/%s\", e.Name(), e.Tags().String())\n\t\t} else if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region west}]` {\n\t\t\tt.Fatalf(\"unexpected series: %s/%s\", e.Name(), e.Tags().String())\n\t\t} else if e := itr.Next(); string(e.Name()) != `disk` || len(e.Tags()) != 0 {\n\t\t\tt.Fatalf(\"unexpected series: %s/%s\", e.Name(), e.Tags().String())\n\t\t} else if e := itr.Next(); string(e.Name()) != `mem` || e.Tags().String() != `[{region east}]` {\n\t\t\tt.Fatalf(\"unexpected series: %s/%s\", e.Name(), e.Tags().String())\n\t\t} else if e := itr.Next(); e != nil {\n\t\t\tt.Fatalf(\"expected nil series: %s/%s\", e.Name(), e.Tags().String())\n\t\t}\n\t})\n}\n\n// Ensure fileset can return an iterator over all series for one measurement.\nfunc TestFileSet_MeasurementSeriesIterator(t *testing.T) {\n\tidx := MustOpenIndex()\n\tdefer idx.Close()\n\n\t// Create initial set of series.\n\tif err := idx.CreateSeriesSliceIfNotExists([]Series{\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"west\"})},\n\t\t{Name: []byte(\"mem\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify initial set of series.\n\tidx.Run(t, func(t *testing.T) {\n\t\tfs := idx.RetainFileSet()\n\t\tdefer fs.Release()\n\n\t\titr := fs.MeasurementSeriesIterator([]byte(\"cpu\"))\n\t\tif itr == nil {\n\t\t\tt.Fatal(\"expected iterator\")\n\t\t}\n\n\t\tif e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region east}]` {\n\t\t\tt.Fatalf(\"unexpected series: %s/%s\", e.Name(), e.Tags().String())\n\t\t} else if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region west}]` {\n\t\t\tt.Fatalf(\"unexpected series: %s/%s\", e.Name(), e.Tags().String())\n\t\t} else if e := itr.Next(); e != nil {\n\t\t\tt.Fatalf(\"expected nil series: %s/%s\", e.Name(), e.Tags().String())\n\t\t}\n\t})\n\n\t// Add more series.\n\tif err := idx.CreateSeriesSliceIfNotExists([]Series{\n\t\t{Name: []byte(\"disk\")},\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"north\"})},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify additional series.\n\tidx.Run(t, func(t *testing.T) {\n\t\tfs := idx.RetainFileSet()\n\t\tdefer fs.Release()\n\n\t\titr := fs.MeasurementSeriesIterator([]byte(\"cpu\"))\n\t\tif itr == nil {\n\t\t\tt.Fatalf(\"expected iterator\")\n\t\t}\n\n\t\tif e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region east}]` {\n\t\t\tt.Fatalf(\"unexpected series: %s/%s\", e.Name(), e.Tags().String())\n\t\t} else if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region north}]` {\n\t\t\tt.Fatalf(\"unexpected series: %s/%s\", e.Name(), e.Tags().String())\n\t\t} else if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region west}]` {\n\t\t\tt.Fatalf(\"unexpected series: %s/%s\", e.Name(), e.Tags().String())\n\t\t} else if e := itr.Next(); e != nil {\n\t\t\tt.Fatalf(\"expected nil series: %s/%s\", e.Name(), e.Tags().String())\n\t\t}\n\t})\n}\n\n// Ensure fileset can return an iterator over all measurements for the index.\nfunc TestFileSet_MeasurementIterator(t *testing.T) {\n\tidx := MustOpenIndex()\n\tdefer idx.Close()\n\n\t// Create initial set of series.\n\tif err := idx.CreateSeriesSliceIfNotExists([]Series{\n\t\t{Name: []byte(\"cpu\")},\n\t\t{Name: []byte(\"mem\")},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify initial set of series.\n\tidx.Run(t, func(t *testing.T) {\n\t\tfs := idx.RetainFileSet()\n\t\tdefer fs.Release()\n\n\t\titr := fs.MeasurementIterator()\n\t\tif itr == nil {\n\t\t\tt.Fatal(\"expected iterator\")\n\t\t}\n\n\t\tif e := itr.Next(); string(e.Name()) != `cpu` {\n\t\t\tt.Fatalf(\"unexpected measurement: %s\", e.Name())\n\t\t} else if e := itr.Next(); string(e.Name()) != `mem` {\n\t\t\tt.Fatalf(\"unexpected measurement: %s\", e.Name())\n\t\t} else if e := itr.Next(); e != nil {\n\t\t\tt.Fatalf(\"expected nil measurement: %s\", e.Name())\n\t\t}\n\t})\n\n\t// Add more series.\n\tif err := idx.CreateSeriesSliceIfNotExists([]Series{\n\t\t{Name: []byte(\"disk\"), Tags: models.NewTags(map[string]string{\"foo\": \"bar\"})},\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"north\", \"x\": \"y\"})},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify additional series.\n\tidx.Run(t, func(t *testing.T) {\n\t\tfs := idx.RetainFileSet()\n\t\tdefer fs.Release()\n\n\t\titr := fs.MeasurementIterator()\n\t\tif itr == nil {\n\t\t\tt.Fatal(\"expected iterator\")\n\t\t}\n\n\t\tif e := itr.Next(); string(e.Name()) != `cpu` {\n\t\t\tt.Fatalf(\"unexpected measurement: %s\", e.Name())\n\t\t} else if e := itr.Next(); string(e.Name()) != `disk` {\n\t\t\tt.Fatalf(\"unexpected measurement: %s\", e.Name())\n\t\t} else if e := itr.Next(); string(e.Name()) != `mem` {\n\t\t\tt.Fatalf(\"unexpected measurement: %s\", e.Name())\n\t\t} else if e := itr.Next(); e != nil {\n\t\t\tt.Fatalf(\"expected nil measurement: %s\", e.Name())\n\t\t}\n\t})\n}\n\n// Ensure fileset can return an iterator over all keys for one measurement.\nfunc TestFileSet_TagKeyIterator(t *testing.T) {\n\tidx := MustOpenIndex()\n\tdefer idx.Close()\n\n\t// Create initial set of series.\n\tif err := idx.CreateSeriesSliceIfNotExists([]Series{\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"west\", \"type\": \"gpu\"})},\n\t\t{Name: []byte(\"mem\"), Tags: models.NewTags(map[string]string{\"region\": \"east\", \"misc\": \"other\"})},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify initial set of series.\n\tidx.Run(t, func(t *testing.T) {\n\t\tfs := idx.RetainFileSet()\n\t\tdefer fs.Release()\n\n\t\titr := fs.TagKeyIterator([]byte(\"cpu\"))\n\t\tif itr == nil {\n\t\t\tt.Fatalf(\"expected iterator\")\n\t\t}\n\n\t\tif e := itr.Next(); string(e.Key()) != `region` {\n\t\t\tt.Fatalf(\"unexpected key: %s\", e.Key())\n\t\t} else if e := itr.Next(); string(e.Key()) != `type` {\n\t\t\tt.Fatalf(\"unexpected key: %s\", e.Key())\n\t\t} else if e := itr.Next(); e != nil {\n\t\t\tt.Fatalf(\"expected nil key: %s\", e.Key())\n\t\t}\n\t})\n\n\t// Add more series.\n\tif err := idx.CreateSeriesSliceIfNotExists([]Series{\n\t\t{Name: []byte(\"disk\"), Tags: models.NewTags(map[string]string{\"foo\": \"bar\"})},\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"north\", \"x\": \"y\"})},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify additional series.\n\tidx.Run(t, func(t *testing.T) {\n\t\tfs := idx.RetainFileSet()\n\t\tdefer fs.Release()\n\n\t\titr := fs.TagKeyIterator([]byte(\"cpu\"))\n\t\tif itr == nil {\n\t\t\tt.Fatal(\"expected iterator\")\n\t\t}\n\n\t\tif e := itr.Next(); string(e.Key()) != `region` {\n\t\t\tt.Fatalf(\"unexpected key: %s\", e.Key())\n\t\t} else if e := itr.Next(); string(e.Key()) != `type` {\n\t\t\tt.Fatalf(\"unexpected key: %s\", e.Key())\n\t\t} else if e := itr.Next(); string(e.Key()) != `x` {\n\t\t\tt.Fatalf(\"unexpected key: %s\", e.Key())\n\t\t} else if e := itr.Next(); e != nil {\n\t\t\tt.Fatalf(\"expected nil key: %s\", e.Key())\n\t\t}\n\t})\n}\n\nvar (\n\tbyteSliceResult [][]byte\n\ttagsSliceResult []models.Tags\n)\n\nfunc BenchmarkFileset_FilterNamesTags(b *testing.B) {\n\tidx := MustOpenIndex()\n\tdefer idx.Close()\n\n\tallNames := make([][]byte, 0, 2000*1000)\n\tallTags := make([]models.Tags, 0, 2000*1000)\n\n\tfor i := 0; i < 2000; i++ {\n\t\tfor j := 0; j < 1000; j++ {\n\t\t\tname := []byte(fmt.Sprintf(\"measurement-%d\", i))\n\t\t\ttags := models.NewTags(map[string]string{\"host\": fmt.Sprintf(\"server-%d\", j)})\n\t\t\tallNames = append(allNames, name)\n\t\t\tallTags = append(allTags, tags)\n\t\t}\n\t}\n\n\tif err := idx.CreateSeriesListIfNotExists(nil, allNames, allTags); err != nil {\n\t\tb.Fatal(err)\n\t}\n\t// idx.CheckFastCompaction()\n\n\tfs := idx.RetainFileSet()\n\tdefer fs.Release()\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tb.StopTimer()\n\t\tnames := [][]byte{\n\t\t\t[]byte(\"foo\"),\n\t\t\t[]byte(\"measurement-222\"), // filtered\n\t\t\t[]byte(\"measurement-222\"), // kept (tags won't match)\n\t\t\t[]byte(\"measurements-1\"),\n\t\t\t[]byte(\"measurement-900\"), // filtered\n\t\t\t[]byte(\"measurement-44444\"),\n\t\t\t[]byte(\"bar\"),\n\t\t}\n\n\t\ttags := []models.Tags{\n\t\t\tnil,\n\t\t\tmodels.NewTags(map[string]string{\"host\": \"server-297\"}), // filtered\n\t\t\tmodels.NewTags(map[string]string{\"host\": \"wrong\"}),\n\t\t\tnil,\n\t\t\tmodels.NewTags(map[string]string{\"host\": \"server-1026\"}), // filtered\n\t\t\tmodels.NewTags(map[string]string{\"host\": \"server-23\"}),   // kept (measurement won't match)\n\t\t\tmodels.NewTags(map[string]string{\"host\": \"zoo\"}),\n\t\t}\n\t\tb.StartTimer()\n\t\tbyteSliceResult, tagsSliceResult = fs.FilterNamesTags(names, tags)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index.go",
    "content": "package tsi1\n\nimport (\n\t\"crypto/rand\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/pkg/estimator\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n\t\"github.com/uber-go/zap\"\n)\n\n// IndexName is the name of the index.\nconst IndexName = \"tsi1\"\n\n// Default compaction thresholds.\nconst (\n\tDefaultMaxLogFileSize = 5 * 1024 * 1024\n)\n\nfunc init() {\n\ttsdb.RegisterIndex(IndexName, func(id uint64, database, path string, opt tsdb.EngineOptions) tsdb.Index {\n\t\tidx := NewIndex()\n\t\tidx.ShardID = id\n\t\tidx.Database = database\n\t\tidx.Path = path\n\t\tidx.options = opt\n\t\treturn idx\n\t})\n}\n\n// File extensions.\nconst (\n\tLogFileExt   = \".tsl\"\n\tIndexFileExt = \".tsi\"\n\n\tCompactingExt = \".compacting\"\n)\n\n// ManifestFileName is the name of the index manifest file.\nconst ManifestFileName = \"MANIFEST\"\n\n// Ensure index implements the interface.\nvar _ tsdb.Index = &Index{}\n\n// Index represents a collection of layered index files and WAL.\ntype Index struct {\n\tmu      sync.RWMutex\n\topened  bool\n\toptions tsdb.EngineOptions\n\n\tactiveLogFile *LogFile // current log file\n\tfileSet       *FileSet // current file set\n\tseq           int      // file id sequence\n\n\t// Compaction management\n\tlevels          []CompactionLevel // compaction levels\n\tlevelCompacting []bool            // level compaction status\n\n\t// Close management.\n\tonce    sync.Once\n\tclosing chan struct{}\n\twg      sync.WaitGroup\n\n\t// Fieldset shared with engine.\n\tfieldset *tsdb.MeasurementFieldSet\n\n\t// Associated shard info.\n\tShardID uint64\n\n\t// Name of database.\n\tDatabase string\n\n\t// Root directory of the index files.\n\tPath string\n\n\t// Log file compaction thresholds.\n\tMaxLogFileSize int64\n\n\t// Frequency of compaction checks.\n\tCompactionEnabled         bool\n\tCompactionMonitorInterval time.Duration\n\n\tlogger zap.Logger\n}\n\n// NewIndex returns a new instance of Index.\nfunc NewIndex() *Index {\n\treturn &Index{\n\t\tclosing: make(chan struct{}),\n\n\t\t// Default compaction thresholds.\n\t\tMaxLogFileSize:    DefaultMaxLogFileSize,\n\t\tCompactionEnabled: true,\n\n\t\tlogger: zap.New(zap.NullEncoder()),\n\t}\n}\n\nfunc (i *Index) Type() string { return IndexName }\n\n// Open opens the index.\nfunc (i *Index) Open() error {\n\ti.mu.Lock()\n\tdefer i.mu.Unlock()\n\n\tif i.opened {\n\t\treturn errors.New(\"index already open\")\n\t}\n\n\t// Create directory if it doesn't exist.\n\tif err := os.MkdirAll(i.Path, 0777); err != nil {\n\t\treturn err\n\t}\n\n\t// Read manifest file.\n\tm, err := ReadManifestFile(filepath.Join(i.Path, ManifestFileName))\n\tif os.IsNotExist(err) {\n\t\tm = NewManifest()\n\t} else if err != nil {\n\t\treturn err\n\t}\n\n\t// Copy compaction levels to the index.\n\ti.levels = make([]CompactionLevel, len(m.Levels))\n\tcopy(i.levels, m.Levels)\n\n\t// Set up flags to track whether a level is compacting.\n\ti.levelCompacting = make([]bool, len(i.levels))\n\n\t// Open each file in the manifest.\n\tvar files []File\n\tfor _, filename := range m.Files {\n\t\tswitch filepath.Ext(filename) {\n\t\tcase LogFileExt:\n\t\t\tf, err := i.openLogFile(filepath.Join(i.Path, filename))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfiles = append(files, f)\n\n\t\t\t// Make first log file active, if within threshold.\n\t\t\tsz, _ := f.Stat()\n\t\t\tif i.activeLogFile == nil && sz < i.MaxLogFileSize {\n\t\t\t\ti.activeLogFile = f\n\t\t\t}\n\n\t\tcase IndexFileExt:\n\t\t\tf, err := i.openIndexFile(filepath.Join(i.Path, filename))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfiles = append(files, f)\n\t\t}\n\t}\n\tfs, err := NewFileSet(i.levels, files)\n\tif err != nil {\n\t\treturn err\n\t}\n\ti.fileSet = fs\n\n\t// Set initial sequnce number.\n\ti.seq = i.fileSet.MaxID()\n\n\t// Delete any files not in the manifest.\n\tif err := i.deleteNonManifestFiles(m); err != nil {\n\t\treturn err\n\t}\n\n\t// Ensure a log file exists.\n\tif i.activeLogFile == nil {\n\t\tif err := i.prependActiveLogFile(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Mark opened.\n\ti.opened = true\n\n\t// Send a compaction request on start up.\n\ti.compact()\n\n\treturn nil\n}\n\n// openLogFile opens a log file and appends it to the index.\nfunc (i *Index) openLogFile(path string) (*LogFile, error) {\n\tf := NewLogFile(path)\n\tif err := f.Open(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn f, nil\n}\n\n// openIndexFile opens a log file and appends it to the index.\nfunc (i *Index) openIndexFile(path string) (*IndexFile, error) {\n\tf := NewIndexFile()\n\tf.SetPath(path)\n\tif err := f.Open(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn f, nil\n}\n\n// deleteNonManifestFiles removes all files not in the manifest.\nfunc (i *Index) deleteNonManifestFiles(m *Manifest) error {\n\tdir, err := os.Open(i.Path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer dir.Close()\n\n\tfis, err := dir.Readdir(-1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Loop over all files and remove any not in the manifest.\n\tfor _, fi := range fis {\n\t\tfilename := filepath.Base(fi.Name())\n\t\tif filename == ManifestFileName || m.HasFile(filename) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := os.RemoveAll(filename); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// Close closes the index.\nfunc (i *Index) Close() error {\n\t// Wait for goroutines to finish.\n\ti.once.Do(func() { close(i.closing) })\n\ti.wg.Wait()\n\n\t// Lock index and close remaining\n\ti.mu.Lock()\n\tdefer i.mu.Unlock()\n\n\t// Close log files.\n\tfor _, f := range i.fileSet.files {\n\t\tf.Close()\n\t}\n\ti.fileSet.files = nil\n\n\treturn nil\n}\n\n// NextSequence returns the next file identifier.\nfunc (i *Index) NextSequence() int {\n\ti.mu.Lock()\n\tdefer i.mu.Unlock()\n\treturn i.nextSequence()\n}\n\nfunc (i *Index) nextSequence() int {\n\ti.seq++\n\treturn i.seq\n}\n\n// ManifestPath returns the path to the index's manifest file.\nfunc (i *Index) ManifestPath() string {\n\treturn filepath.Join(i.Path, ManifestFileName)\n}\n\n// Manifest returns a manifest for the index.\nfunc (i *Index) Manifest() *Manifest {\n\tm := &Manifest{\n\t\tLevels: i.levels,\n\t\tFiles:  make([]string, len(i.fileSet.files)),\n\t}\n\n\tfor j, f := range i.fileSet.files {\n\t\tm.Files[j] = filepath.Base(f.Path())\n\t}\n\n\treturn m\n}\n\n// writeManifestFile writes the manifest to the appropriate file path.\nfunc (i *Index) writeManifestFile() error {\n\treturn WriteManifestFile(i.ManifestPath(), i.Manifest())\n}\n\n// WithLogger sets the logger for the index.\nfunc (i *Index) WithLogger(logger zap.Logger) {\n\ti.logger = logger.With(zap.String(\"index\", \"tsi\"))\n}\n\n// SetFieldSet sets a shared field set from the engine.\nfunc (i *Index) SetFieldSet(fs *tsdb.MeasurementFieldSet) {\n\ti.mu.Lock()\n\ti.fieldset = fs\n\ti.mu.Unlock()\n}\n\n// RetainFileSet returns the current fileset and adds a reference count.\nfunc (i *Index) RetainFileSet() *FileSet {\n\ti.mu.RLock()\n\tfs := i.retainFileSet()\n\ti.mu.RUnlock()\n\treturn fs\n}\n\nfunc (i *Index) retainFileSet() *FileSet {\n\tfs := i.fileSet\n\tfs.Retain()\n\treturn fs\n}\n\n// FileN returns the active files in the file set.\nfunc (i *Index) FileN() int { return len(i.fileSet.files) }\n\n// prependActiveLogFile adds a new log file so that the current log file can be compacted.\nfunc (i *Index) prependActiveLogFile() error {\n\t// Open file and insert it into the first position.\n\tf, err := i.openLogFile(filepath.Join(i.Path, FormatLogFileName(i.nextSequence())))\n\tif err != nil {\n\t\treturn err\n\t}\n\ti.activeLogFile = f\n\n\t// Prepend and generate new fileset.\n\tfs, err := i.fileSet.Prepend(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\ti.fileSet = fs\n\n\t// Write new manifest.\n\tif err := i.writeManifestFile(); err != nil {\n\t\t// TODO: Close index if write fails.\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// ForEachMeasurementName iterates over all measurement names in the index.\nfunc (i *Index) ForEachMeasurementName(fn func(name []byte) error) error {\n\tfs := i.RetainFileSet()\n\tdefer fs.Release()\n\n\titr := fs.MeasurementIterator()\n\tif itr == nil {\n\t\treturn nil\n\t}\n\n\tfor e := itr.Next(); e != nil; e = itr.Next() {\n\t\tif err := fn(e.Name()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// MeasurementExists returns true if a measurement exists.\nfunc (i *Index) MeasurementExists(name []byte) (bool, error) {\n\tfs := i.RetainFileSet()\n\tdefer fs.Release()\n\tm := fs.Measurement(name)\n\treturn m != nil && !m.Deleted(), nil\n}\n\nfunc (i *Index) MeasurementNamesByExpr(expr influxql.Expr) ([][]byte, error) {\n\tfs := i.RetainFileSet()\n\tdefer fs.Release()\n\treturn fs.MeasurementNamesByExpr(expr)\n}\n\nfunc (i *Index) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) {\n\tfs := i.RetainFileSet()\n\tdefer fs.Release()\n\n\titr := fs.MeasurementIterator()\n\tvar a [][]byte\n\tfor e := itr.Next(); e != nil; e = itr.Next() {\n\t\tif re.Match(e.Name()) {\n\t\t\ta = append(a, e.Name())\n\t\t}\n\t}\n\treturn a, nil\n}\n\n// DropMeasurement deletes a measurement from the index.\nfunc (i *Index) DropMeasurement(name []byte) error {\n\tfs := i.RetainFileSet()\n\tdefer fs.Release()\n\n\t// Delete all keys and values.\n\tif kitr := fs.TagKeyIterator(name); kitr != nil {\n\t\tfor k := kitr.Next(); k != nil; k = kitr.Next() {\n\t\t\t// Delete key if not already deleted.\n\t\t\tif !k.Deleted() {\n\t\t\t\tif err := func() error {\n\t\t\t\t\ti.mu.RLock()\n\t\t\t\t\tdefer i.mu.RUnlock()\n\t\t\t\t\treturn i.activeLogFile.DeleteTagKey(name, k.Key())\n\t\t\t\t}(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Delete each value in key.\n\t\t\tif vitr := k.TagValueIterator(); vitr != nil {\n\t\t\t\tfor v := vitr.Next(); v != nil; v = vitr.Next() {\n\t\t\t\t\tif !v.Deleted() {\n\t\t\t\t\t\tif err := func() error {\n\t\t\t\t\t\t\ti.mu.RLock()\n\t\t\t\t\t\t\tdefer i.mu.RUnlock()\n\t\t\t\t\t\t\treturn i.activeLogFile.DeleteTagValue(name, k.Key(), v.Value())\n\t\t\t\t\t\t}(); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Delete all series in measurement.\n\tif sitr := fs.MeasurementSeriesIterator(name); sitr != nil {\n\t\tfor s := sitr.Next(); s != nil; s = sitr.Next() {\n\t\t\tif !s.Deleted() {\n\t\t\t\tif err := func() error {\n\t\t\t\t\ti.mu.RLock()\n\t\t\t\t\tdefer i.mu.RUnlock()\n\t\t\t\t\treturn i.activeLogFile.DeleteSeries(s.Name(), s.Tags())\n\t\t\t\t}(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Mark measurement as deleted.\n\tif err := func() error {\n\t\ti.mu.RLock()\n\t\tdefer i.mu.RUnlock()\n\t\treturn i.activeLogFile.DeleteMeasurement(name)\n\t}(); err != nil {\n\t\treturn err\n\t}\n\n\t// Check if the log file needs to be swapped.\n\tif err := i.CheckLogFile(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// CreateSeriesListIfNotExists creates a list of series if they doesn't exist in bulk.\nfunc (i *Index) CreateSeriesListIfNotExists(_, names [][]byte, tagsSlice []models.Tags) error {\n\t// All slices must be of equal length.\n\tif len(names) != len(tagsSlice) {\n\t\treturn errors.New(\"names/tags length mismatch\")\n\t}\n\n\t// Maintain reference count on files in file set.\n\tfs := i.RetainFileSet()\n\tdefer fs.Release()\n\n\t// Filter out existing series. Exit if no new series exist.\n\tnames, tagsSlice = fs.FilterNamesTags(names, tagsSlice)\n\tif len(names) == 0 {\n\t\treturn nil\n\t}\n\n\t// Ensure fileset cannot change during insert.\n\ti.mu.RLock()\n\t// Insert series into log file.\n\tif err := i.activeLogFile.AddSeriesList(names, tagsSlice); err != nil {\n\t\ti.mu.RUnlock()\n\t\treturn err\n\t}\n\ti.mu.RUnlock()\n\n\treturn i.CheckLogFile()\n}\n\n// InitializeSeries is a no-op. This only applies to the in-memory index.\nfunc (i *Index) InitializeSeries(key, name []byte, tags models.Tags) error {\n\treturn nil\n}\n\n// CreateSeriesIfNotExists creates a series if it doesn't exist or is deleted.\nfunc (i *Index) CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error {\n\tif err := func() error {\n\t\ti.mu.RLock()\n\t\tdefer i.mu.RUnlock()\n\n\t\tfs := i.retainFileSet()\n\t\tdefer fs.Release()\n\n\t\tif fs.HasSeries(name, tags, nil) {\n\t\t\treturn nil\n\t\t}\n\n\t\tif err := i.activeLogFile.AddSeries(name, tags); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}(); err != nil {\n\t\treturn err\n\t}\n\n\t// Swap log file, if necesssary.\n\tif err := i.CheckLogFile(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (i *Index) DropSeries(key []byte) error {\n\tif err := func() error {\n\t\ti.mu.RLock()\n\t\tdefer i.mu.RUnlock()\n\n\t\tname, tags := models.ParseKey(key)\n\n\t\tmname := []byte(name)\n\t\tif err := i.activeLogFile.DeleteSeries(mname, tags); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Obtain file set after deletion because that may add a new log file.\n\t\tfs := i.retainFileSet()\n\t\tdefer fs.Release()\n\n\t\t// Check if that was the last series for the measurement in the entire index.\n\t\titr := fs.MeasurementSeriesIterator(mname)\n\t\tif itr == nil {\n\t\t\treturn nil\n\t\t} else if e := itr.Next(); e != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\t// If no more series exist in the measurement then delete the measurement.\n\t\tif err := i.activeLogFile.DeleteMeasurement(mname); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}(); err != nil {\n\t\treturn err\n\t}\n\n\t// Swap log file, if necesssary.\n\tif err := i.CheckLogFile(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// SeriesSketches returns the two sketches for the index by merging all\n// instances sketches from TSI files and the WAL.\nfunc (i *Index) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) {\n\tfs := i.RetainFileSet()\n\tdefer fs.Release()\n\treturn fs.SeriesSketches()\n}\n\n// MeasurementsSketches returns the two sketches for the index by merging all\n// instances of the type sketch types in all the index files.\nfunc (i *Index) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) {\n\tfs := i.RetainFileSet()\n\tdefer fs.Release()\n\treturn fs.MeasurementsSketches()\n}\n\n// SeriesN returns the number of unique non-tombstoned series in the index.\n// Since indexes are not shared across shards, the count returned by SeriesN\n// cannot be combined with other shard's results. If you need to count series\n// across indexes then use SeriesSketches and merge the results from other\n// indexes.\nfunc (i *Index) SeriesN() int64 {\n\tfs := i.RetainFileSet()\n\tdefer fs.Release()\n\n\tvar total int64\n\tfor _, f := range fs.files {\n\t\ttotal += int64(f.SeriesN())\n\t}\n\treturn total\n}\n\n// HasTagKey returns true if tag key exists.\nfunc (i *Index) HasTagKey(name, key []byte) (bool, error) {\n\tfs := i.RetainFileSet()\n\tdefer fs.Release()\n\treturn fs.HasTagKey(name, key), nil\n}\n\n// MeasurementTagKeysByExpr extracts the tag keys wanted by the expression.\nfunc (i *Index) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) {\n\tfs := i.RetainFileSet()\n\tdefer fs.Release()\n\treturn fs.MeasurementTagKeysByExpr(name, expr)\n}\n\n// MeasurementTagKeyValuesByExpr returns a set of tag values filtered by an expression.\n//\n// See tsm1.Engine.MeasurementTagKeyValuesByExpr for a fuller description of this\n// method.\nfunc (i *Index) MeasurementTagKeyValuesByExpr(name []byte, keys []string, expr influxql.Expr, keysSorted bool) ([][]string, error) {\n\tfs := i.RetainFileSet()\n\tdefer fs.Release()\n\n\tif len(keys) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tresults := make([][]string, len(keys))\n\t// If we haven't been provided sorted keys, then we need to sort them.\n\tif !keysSorted {\n\t\tsort.Sort(sort.StringSlice(keys))\n\t}\n\n\t// No expression means that the values shouldn't be filtered, so we can\n\t// fetch them all.\n\tif expr == nil {\n\t\tfor ki, key := range keys {\n\t\t\titr := fs.TagValueIterator(name, []byte(key))\n\t\t\tfor val := itr.Next(); val != nil; val = itr.Next() {\n\t\t\t\tresults[ki] = append(results[ki], string(val.Value()))\n\t\t\t}\n\t\t}\n\t\treturn results, nil\n\t}\n\n\t// This is the case where we have filtered series by some WHERE condition.\n\t// We only care about the tag values for the keys given the\n\t// filtered set of series ids.\n\tresultSet, err := fs.tagValuesByKeyAndExpr(name, keys, expr, i.fieldset)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Convert result sets into []string\n\tfor i, s := range resultSet {\n\t\tvalues := make([]string, 0, len(s))\n\t\tfor v := range s {\n\t\t\tvalues = append(values, v)\n\t\t}\n\t\tsort.Sort(sort.StringSlice(values))\n\t\tresults[i] = values\n\t}\n\treturn results, nil\n}\n\n// ForEachMeasurementSeriesByExpr iterates over all series in a measurement filtered by an expression.\nfunc (i *Index) ForEachMeasurementSeriesByExpr(name []byte, condition influxql.Expr, fn func(tags models.Tags) error) error {\n\tfs := i.RetainFileSet()\n\tdefer fs.Release()\n\n\titr, err := fs.MeasurementSeriesByExprIterator(name, condition, i.fieldset)\n\tif err != nil {\n\t\treturn err\n\t} else if itr == nil {\n\t\treturn nil\n\t}\n\n\tfor e := itr.Next(); e != nil; e = itr.Next() {\n\t\tif err := fn(e.Tags()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ForEachMeasurementTagKey iterates over all tag keys in a measurement.\nfunc (i *Index) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error {\n\tfs := i.RetainFileSet()\n\tdefer fs.Release()\n\n\titr := fs.TagKeyIterator(name)\n\tif itr == nil {\n\t\treturn nil\n\t}\n\n\tfor e := itr.Next(); e != nil; e = itr.Next() {\n\t\tif err := fn(e.Key()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// TagKeyCardinality always returns zero.\n// It is not possible to determine cardinality of tags across index files.\nfunc (i *Index) TagKeyCardinality(name, key []byte) int {\n\treturn 0\n}\n\n// MeasurementSeriesKeysByExpr returns a list of series keys matching expr.\nfunc (i *Index) MeasurementSeriesKeysByExpr(name []byte, expr influxql.Expr) ([][]byte, error) {\n\tfs := i.RetainFileSet()\n\tdefer fs.Release()\n\treturn fs.MeasurementSeriesKeysByExpr(name, expr, i.fieldset)\n}\n\n// TagSets returns an ordered list of tag sets for a measurement by dimension\n// and filtered by an optional conditional expression.\nfunc (i *Index) TagSets(name []byte, opt influxql.IteratorOptions) ([]*influxql.TagSet, error) {\n\tfs := i.RetainFileSet()\n\tdefer fs.Release()\n\n\titr, err := fs.MeasurementSeriesByExprIterator(name, opt.Condition, i.fieldset)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if itr == nil {\n\t\treturn nil, nil\n\t}\n\n\t// For every series, get the tag values for the requested tag keys i.e.\n\t// dimensions. This is the TagSet for that series. Series with the same\n\t// TagSet are then grouped together, because for the purpose of GROUP BY\n\t// they are part of the same composite series.\n\ttagSets := make(map[string]*influxql.TagSet, 64)\n\n\tif itr != nil {\n\t\tfor e := itr.Next(); e != nil; e = itr.Next() {\n\t\t\ttags := make(map[string]string, len(opt.Dimensions))\n\n\t\t\t// Build the TagSet for this series.\n\t\t\tfor _, dim := range opt.Dimensions {\n\t\t\t\ttags[dim] = e.Tags().GetString(dim)\n\t\t\t}\n\n\t\t\t// Convert the TagSet to a string, so it can be added to a map\n\t\t\t// allowing TagSets to be handled as a set.\n\t\t\ttagsAsKey := tsdb.MarshalTags(tags)\n\t\t\ttagSet, ok := tagSets[string(tagsAsKey)]\n\t\t\tif !ok {\n\t\t\t\t// This TagSet is new, create a new entry for it.\n\t\t\t\ttagSet = &influxql.TagSet{\n\t\t\t\t\tTags: tags,\n\t\t\t\t\tKey:  tagsAsKey,\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Associate the series and filter with the Tagset.\n\t\t\ttagSet.AddFilter(string(models.MakeKey(e.Name(), e.Tags())), e.Expr())\n\n\t\t\t// Ensure it's back in the map.\n\t\t\ttagSets[string(tagsAsKey)] = tagSet\n\t\t}\n\t}\n\n\t// Sort the series in each tag set.\n\tfor _, t := range tagSets {\n\t\tsort.Sort(t)\n\t}\n\n\t// The TagSets have been created, as a map of TagSets. Just send\n\t// the values back as a slice, sorting for consistency.\n\tsortedTagsSets := make([]*influxql.TagSet, 0, len(tagSets))\n\tfor _, v := range tagSets {\n\t\tsortedTagsSets = append(sortedTagsSets, v)\n\t}\n\tsort.Sort(byTagKey(sortedTagsSets))\n\n\treturn sortedTagsSets, nil\n}\n\n// SnapshotTo creates hard links to the file set into path.\nfunc (i *Index) SnapshotTo(path string) error {\n\ti.mu.Lock()\n\tdefer i.mu.Unlock()\n\n\tfs := i.retainFileSet()\n\tdefer fs.Release()\n\n\t// Flush active log file, if any.\n\tif err := i.activeLogFile.Flush(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := os.Mkdir(filepath.Join(path, \"index\"), 0777); err != nil {\n\t\treturn err\n\t}\n\n\t// Link manifest.\n\tif err := os.Link(i.ManifestPath(), filepath.Join(path, \"index\", filepath.Base(i.ManifestPath()))); err != nil {\n\t\treturn fmt.Errorf(\"error creating tsi manifest hard link: %q\", err)\n\t}\n\n\t// Link files in directory.\n\tfor _, f := range fs.files {\n\t\tif err := os.Link(f.Path(), filepath.Join(path, \"index\", filepath.Base(f.Path()))); err != nil {\n\t\t\treturn fmt.Errorf(\"error creating tsi hard link: %q\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (i *Index) SetFieldName(measurement []byte, name string) {}\nfunc (i *Index) RemoveShard(shardID uint64)                   {}\nfunc (i *Index) AssignShard(k string, shardID uint64)         {}\n\nfunc (i *Index) UnassignShard(k string, shardID uint64) error {\n\t// This can be called directly once inmem is gone.\n\treturn i.DropSeries([]byte(k))\n}\n\n// SeriesPointIterator returns an influxql iterator over all series.\nfunc (i *Index) SeriesPointIterator(opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\t// NOTE: The iterator handles releasing the file set.\n\tfs := i.RetainFileSet()\n\treturn newSeriesPointIterator(fs, i.fieldset, opt), nil\n}\n\n// Compact requests a compaction of log files.\nfunc (i *Index) Compact() {\n\ti.mu.Lock()\n\tdefer i.mu.Unlock()\n\ti.compact()\n}\n\n// compact compacts continguous groups of files that are not currently compacting.\nfunc (i *Index) compact() {\n\tif !i.CompactionEnabled {\n\t\treturn\n\t}\n\n\tfs := i.retainFileSet()\n\tdefer fs.Release()\n\n\t// Iterate over each level we are going to compact.\n\t// We skip the first level (0) because it is log files and they are compacted separately.\n\t// We skip the last level because the files have no higher level to compact into.\n\tminLevel, maxLevel := 1, len(i.levels)-2\n\tfor level := minLevel; level <= maxLevel; level++ {\n\t\t// Skip level if it is currently compacting.\n\t\tif i.levelCompacting[level] {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Collect contiguous files from the end of the level.\n\t\tfiles := fs.LastContiguousIndexFilesByLevel(level)\n\t\tif len(files) < 2 {\n\t\t\tcontinue\n\t\t} else if len(files) > MaxIndexMergeCount {\n\t\t\tfiles = files[len(files)-MaxIndexMergeCount:]\n\t\t}\n\n\t\t// Retain files during compaction.\n\t\tIndexFiles(files).Retain()\n\n\t\t// Mark the level as compacting.\n\t\ti.levelCompacting[level] = true\n\n\t\t// Execute in closure to save reference to the group within the loop.\n\t\tfunc(files []*IndexFile, level int) {\n\t\t\t// Start compacting in a separate goroutine.\n\t\t\ti.wg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer i.wg.Done()\n\n\t\t\t\t// Compact to a new level.\n\t\t\t\ti.compactToLevel(files, level+1)\n\n\t\t\t\t// Ensure compaction lock for the level is released.\n\t\t\t\ti.mu.Lock()\n\t\t\t\ti.levelCompacting[level] = false\n\t\t\t\ti.mu.Unlock()\n\n\t\t\t\t// Check for new compactions\n\t\t\t\ti.Compact()\n\t\t\t}()\n\t\t}(files, level)\n\t}\n}\n\n// compactToLevel compacts a set of files into a new file. Replaces old files with\n// compacted file on successful completion. This runs in a separate goroutine.\nfunc (i *Index) compactToLevel(files []*IndexFile, level int) {\n\tassert(len(files) >= 2, \"at least two index files are required for compaction\")\n\tassert(level > 0, \"cannot compact level zero\")\n\n\t// Build a logger for this compaction.\n\tlogger := i.logger.With(zap.String(\"token\", generateCompactionToken()))\n\n\t// Files have already been retained by caller.\n\t// Ensure files are released only once.\n\tvar once sync.Once\n\tdefer once.Do(func() { IndexFiles(files).Release() })\n\n\t// Track time to compact.\n\tstart := time.Now()\n\n\t// Create new index file.\n\tpath := filepath.Join(i.Path, FormatIndexFileName(i.NextSequence(), level))\n\tf, err := os.Create(path)\n\tif err != nil {\n\t\tlogger.Error(\"cannot create compation files\", zap.Error(err))\n\t\treturn\n\t}\n\tdefer f.Close()\n\n\tlogger.Info(\"performing full compaction\",\n\t\tzap.String(\"src\", joinIntSlice(IndexFiles(files).IDs(), \",\")),\n\t\tzap.String(\"dst\", path),\n\t)\n\n\t// Compact all index files to new index file.\n\tlvl := i.levels[level]\n\tn, err := IndexFiles(files).CompactTo(f, lvl.M, lvl.K)\n\tif err != nil {\n\t\tlogger.Error(\"cannot compact index files\", zap.Error(err))\n\t\treturn\n\t}\n\n\t// Close file.\n\tif err := f.Close(); err != nil {\n\t\tlogger.Error(\"error closing index file\", zap.Error(err))\n\t\treturn\n\t}\n\n\t// Reopen as an index file.\n\tfile := NewIndexFile()\n\tfile.SetPath(path)\n\tif err := file.Open(); err != nil {\n\t\tlogger.Error(\"cannot open new index file\", zap.Error(err))\n\t\treturn\n\t}\n\n\t// Obtain lock to swap in index file and write manifest.\n\tif err := func() error {\n\t\ti.mu.Lock()\n\t\tdefer i.mu.Unlock()\n\n\t\t// Replace previous files with new index file.\n\t\ti.fileSet = i.fileSet.MustReplace(IndexFiles(files).Files(), file)\n\n\t\t// Write new manifest.\n\t\tif err := i.writeManifestFile(); err != nil {\n\t\t\t// TODO: Close index if write fails.\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}(); err != nil {\n\t\tlogger.Error(\"cannot write manifest\", zap.Error(err))\n\t\treturn\n\t}\n\n\telapsed := time.Since(start)\n\tlogger.Info(\"full compaction complete\",\n\t\tzap.String(\"path\", path),\n\t\tzap.String(\"elapsed\", elapsed.String()),\n\t\tzap.Int64(\"bytes\", n),\n\t\tzap.Int(\"kb_per_sec\", int(float64(n)/elapsed.Seconds())/1024),\n\t)\n\n\t// Release old files.\n\tonce.Do(func() { IndexFiles(files).Release() })\n\n\t// Close and delete all old index files.\n\tfor _, f := range files {\n\t\tlogger.Info(\"removing index file\", zap.String(\"path\", f.Path()))\n\n\t\tif err := f.Close(); err != nil {\n\t\t\tlogger.Error(\"cannot close index file\", zap.Error(err))\n\t\t\treturn\n\t\t} else if err := os.Remove(f.Path()); err != nil {\n\t\t\tlogger.Error(\"cannot remove index file\", zap.Error(err))\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (i *Index) CheckLogFile() error {\n\t// Check log file size under read lock.\n\tif size := func() int64 {\n\t\ti.mu.RLock()\n\t\tdefer i.mu.RUnlock()\n\t\treturn i.activeLogFile.Size()\n\t}(); size < i.MaxLogFileSize {\n\t\treturn nil\n\t}\n\n\t// If file size exceeded then recheck under write lock and swap files.\n\ti.mu.Lock()\n\tdefer i.mu.Unlock()\n\treturn i.checkLogFile()\n}\n\nfunc (i *Index) checkLogFile() error {\n\tif i.activeLogFile.Size() < i.MaxLogFileSize {\n\t\treturn nil\n\t}\n\n\t// Swap current log file.\n\tlogFile := i.activeLogFile\n\n\t// Open new log file and insert it into the first position.\n\tif err := i.prependActiveLogFile(); err != nil {\n\t\treturn err\n\t}\n\n\t// Begin compacting in a background goroutine.\n\ti.wg.Add(1)\n\tgo func() {\n\t\tdefer i.wg.Done()\n\t\ti.compactLogFile(logFile)\n\t\ti.Compact() // check for new compactions\n\t}()\n\n\treturn nil\n}\n\n// compactLogFile compacts f into a tsi file. The new file will share the\n// same identifier but will have a \".tsi\" extension. Once the log file is\n// compacted then the manifest is updated and the log file is discarded.\nfunc (i *Index) compactLogFile(logFile *LogFile) {\n\tstart := time.Now()\n\n\t// Retrieve identifier from current path.\n\tid := logFile.ID()\n\tassert(id != 0, \"cannot parse log file id: %s\", logFile.Path())\n\n\t// Build a logger for this compaction.\n\tlogger := i.logger.With(\n\t\tzap.String(\"token\", generateCompactionToken()),\n\t\tzap.Int(\"id\", id),\n\t)\n\n\t// Create new index file.\n\tpath := filepath.Join(i.Path, FormatIndexFileName(id, 1))\n\tf, err := os.Create(path)\n\tif err != nil {\n\t\tlogger.Error(\"cannot create index file\", zap.Error(err))\n\t\treturn\n\t}\n\tdefer f.Close()\n\n\t// Compact log file to new index file.\n\tlvl := i.levels[1]\n\tn, err := logFile.CompactTo(f, lvl.M, lvl.K)\n\tif err != nil {\n\t\tlogger.Error(\"cannot compact log file\", zap.Error(err), zap.String(\"path\", logFile.Path()))\n\t\treturn\n\t}\n\n\t// Close file.\n\tif err := f.Close(); err != nil {\n\t\tlogger.Error(\"cannot close log file\", zap.Error(err))\n\t\treturn\n\t}\n\n\t// Reopen as an index file.\n\tfile := NewIndexFile()\n\tfile.SetPath(path)\n\tif err := file.Open(); err != nil {\n\t\tlogger.Error(\"cannot open compacted index file\", zap.Error(err), zap.String(\"path\", file.Path()))\n\t\treturn\n\t}\n\n\t// Obtain lock to swap in index file and write manifest.\n\tif err := func() error {\n\t\ti.mu.Lock()\n\t\tdefer i.mu.Unlock()\n\n\t\t// Replace previous log file with index file.\n\t\ti.fileSet = i.fileSet.MustReplace([]File{logFile}, file)\n\n\t\t// Write new manifest.\n\t\tif err := i.writeManifestFile(); err != nil {\n\t\t\t// TODO: Close index if write fails.\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}(); err != nil {\n\t\tlogger.Error(\"cannot update manifest\", zap.Error(err))\n\t\treturn\n\t}\n\n\telapsed := time.Since(start)\n\tlogger.Error(\"log file compacted\",\n\t\tzap.String(\"elapsed\", elapsed.String()),\n\t\tzap.Int64(\"bytes\", n),\n\t\tzap.Int(\"kb_per_sec\", int(float64(n)/elapsed.Seconds())/1024),\n\t)\n\n\t// Closing the log file will automatically wait until the ref count is zero.\n\tif err := logFile.Close(); err != nil {\n\t\tlogger.Error(\"cannot close log file\", zap.Error(err))\n\t\treturn\n\t} else if err := os.Remove(logFile.Path()); err != nil {\n\t\tlogger.Error(\"cannot remove log file\", zap.Error(err))\n\t\treturn\n\t}\n\n\treturn\n}\n\n// seriesPointIterator adapts SeriesIterator to an influxql.Iterator.\ntype seriesPointIterator struct {\n\tonce     sync.Once\n\tfs       *FileSet\n\tfieldset *tsdb.MeasurementFieldSet\n\tmitr     MeasurementIterator\n\tsitr     SeriesIterator\n\topt      influxql.IteratorOptions\n\n\tpoint influxql.FloatPoint // reusable point\n}\n\n// newSeriesPointIterator returns a new instance of seriesPointIterator.\nfunc newSeriesPointIterator(fs *FileSet, fieldset *tsdb.MeasurementFieldSet, opt influxql.IteratorOptions) *seriesPointIterator {\n\treturn &seriesPointIterator{\n\t\tfs:       fs,\n\t\tfieldset: fieldset,\n\t\tmitr:     fs.MeasurementIterator(),\n\t\tpoint: influxql.FloatPoint{\n\t\t\tAux: make([]interface{}, len(opt.Aux)),\n\t\t},\n\t\topt: opt,\n\t}\n}\n\n// Stats returns stats about the points processed.\nfunc (itr *seriesPointIterator) Stats() influxql.IteratorStats { return influxql.IteratorStats{} }\n\n// Close closes the iterator.\nfunc (itr *seriesPointIterator) Close() error {\n\titr.once.Do(func() { itr.fs.Release() })\n\treturn nil\n}\n\n// Next emits the next point in the iterator.\nfunc (itr *seriesPointIterator) Next() (*influxql.FloatPoint, error) {\n\tfor {\n\t\t// Create new series iterator, if necessary.\n\t\t// Exit if there are no measurements remaining.\n\t\tif itr.sitr == nil {\n\t\t\tm := itr.mitr.Next()\n\t\t\tif m == nil {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\n\t\t\tsitr, err := itr.fs.MeasurementSeriesByExprIterator(m.Name(), itr.opt.Condition, itr.fieldset)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t} else if sitr == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\titr.sitr = sitr\n\t\t}\n\n\t\t// Read next series element.\n\t\te := itr.sitr.Next()\n\t\tif e == nil {\n\t\t\titr.sitr = nil\n\t\t\tcontinue\n\t\t}\n\n\t\t// Convert to a key.\n\t\tkey := string(models.MakeKey(e.Name(), e.Tags()))\n\n\t\t// Write auxiliary fields.\n\t\tfor i, f := range itr.opt.Aux {\n\t\t\tswitch f.Val {\n\t\t\tcase \"key\":\n\t\t\t\titr.point.Aux[i] = key\n\t\t\t}\n\t\t}\n\t\treturn &itr.point, nil\n\t}\n}\n\n// unionStringSets returns the union of two sets\nfunc unionStringSets(a, b map[string]struct{}) map[string]struct{} {\n\tother := make(map[string]struct{})\n\tfor k := range a {\n\t\tother[k] = struct{}{}\n\t}\n\tfor k := range b {\n\t\tother[k] = struct{}{}\n\t}\n\treturn other\n}\n\n// intersectStringSets returns the intersection of two sets.\nfunc intersectStringSets(a, b map[string]struct{}) map[string]struct{} {\n\tif len(a) < len(b) {\n\t\ta, b = b, a\n\t}\n\n\tother := make(map[string]struct{})\n\tfor k := range a {\n\t\tif _, ok := b[k]; ok {\n\t\t\tother[k] = struct{}{}\n\t\t}\n\t}\n\treturn other\n}\n\nvar fileIDRegex = regexp.MustCompile(`^L(\\d+)-(\\d+)\\..+$`)\n\n// ParseFilename extracts the numeric id from a log or index file path.\n// Returns 0 if it cannot be parsed.\nfunc ParseFilename(name string) (level, id int) {\n\ta := fileIDRegex.FindStringSubmatch(filepath.Base(name))\n\tif a == nil {\n\t\treturn 0, 0\n\t}\n\n\tlevel, _ = strconv.Atoi(a[1])\n\tid, _ = strconv.Atoi(a[2])\n\treturn id, level\n}\n\n// Manifest represents the list of log & index files that make up the index.\n// The files are listed in time order, not necessarily ID order.\ntype Manifest struct {\n\tLevels []CompactionLevel `json:\"levels,omitempty\"`\n\tFiles  []string          `json:\"files,omitempty\"`\n}\n\n// NewManifest returns a new instance of Manifest with default compaction levels.\nfunc NewManifest() *Manifest {\n\tm := &Manifest{\n\t\tLevels: make([]CompactionLevel, len(DefaultCompactionLevels)),\n\t}\n\tcopy(m.Levels, DefaultCompactionLevels[:])\n\treturn m\n}\n\n// HasFile returns true if name is listed in the log files or index files.\nfunc (m *Manifest) HasFile(name string) bool {\n\tfor _, filename := range m.Files {\n\t\tif filename == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// ReadManifestFile reads a manifest from a file path.\nfunc ReadManifestFile(path string) (*Manifest, error) {\n\tbuf, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Decode manifest.\n\tvar m Manifest\n\tif err := json.Unmarshal(buf, &m); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &m, nil\n}\n\n// WriteManifestFile writes a manifest to a file path.\nfunc WriteManifestFile(path string, m *Manifest) error {\n\tbuf, err := json.MarshalIndent(m, \"\", \"  \")\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuf = append(buf, '\\n')\n\n\tif err := ioutil.WriteFile(path, buf, 0666); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc joinIntSlice(a []int, sep string) string {\n\tother := make([]string, len(a))\n\tfor i := range a {\n\t\tother[i] = strconv.Itoa(a[i])\n\t}\n\treturn strings.Join(other, sep)\n}\n\n// CompactionLevel represents a grouping of index files based on bloom filter\n// settings. By having the same bloom filter settings, the filters\n// can be merged and evaluated at a higher level.\ntype CompactionLevel struct {\n\t// Bloom filter bit size & hash count\n\tM uint64 `json:\"m,omitempty\"`\n\tK uint64 `json:\"k,omitempty\"`\n}\n\n// DefaultCompactionLevels is the default settings used by the index.\nvar DefaultCompactionLevels = []CompactionLevel{\n\t{M: 0, K: 0},       // L0: Log files, no filter.\n\t{M: 1 << 25, K: 6}, // L1: Initial compaction\n\t{M: 1 << 25, K: 6}, // L2\n\t{M: 1 << 26, K: 6}, // L3\n\t{M: 1 << 27, K: 6}, // L4\n\t{M: 1 << 28, K: 6}, // L5\n\t{M: 1 << 29, K: 6}, // L6\n\t{M: 1 << 30, K: 6}, // L7\n}\n\n// MaxIndexMergeCount is the maximum number of files that can be merged together at once.\nconst MaxIndexMergeCount = 2\n\n// MaxIndexFileSize is the maximum expected size of an index file.\nconst MaxIndexFileSize = 4 * (1 << 30)\n\n// generateCompactionToken returns a short token to track an individual compaction.\n// It is only used for logging so it doesn't need strong uniqueness guarantees.\nfunc generateCompactionToken() string {\n\ttoken := make([]byte, 3)\n\trand.Read(token)\n\treturn fmt.Sprintf(\"%x\", token)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file.go",
    "content": "package tsi1\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/pkg/bloom\"\n\t\"github.com/influxdata/influxdb/pkg/estimator\"\n\t\"github.com/influxdata/influxdb/pkg/mmap\"\n)\n\n// IndexFileVersion is the current TSI1 index file version.\nconst IndexFileVersion = 1\n\n// FileSignature represents a magic number at the header of the index file.\nconst FileSignature = \"TSI1\"\n\n// IndexFile field size constants.\nconst (\n\t// IndexFile trailer fields\n\tIndexFileVersionSize       = 2\n\tSeriesBlockOffsetSize      = 8\n\tSeriesBlockSizeSize        = 8\n\tMeasurementBlockOffsetSize = 8\n\tMeasurementBlockSizeSize   = 8\n\n\tIndexFileTrailerSize = IndexFileVersionSize +\n\t\tSeriesBlockOffsetSize +\n\t\tSeriesBlockSizeSize +\n\t\tMeasurementBlockOffsetSize +\n\t\tMeasurementBlockSizeSize\n)\n\n// IndexFile errors.\nvar (\n\tErrInvalidIndexFile            = errors.New(\"invalid index file\")\n\tErrUnsupportedIndexFileVersion = errors.New(\"unsupported index file version\")\n)\n\n// IndexFile represents a collection of measurement, tag, and series data.\ntype IndexFile struct {\n\twg   sync.WaitGroup // ref count\n\tdata []byte\n\n\t// Components\n\tsblk  SeriesBlock\n\ttblks map[string]*TagBlock // tag blocks by measurement name\n\tmblk  MeasurementBlock\n\n\t// Sortable identifier & filepath to the log file.\n\tlevel int\n\tid    int\n\n\t// Counters\n\tseriesN int64 // Number of unique series in this indexFile.\n\n\t// Compaction tracking.\n\tmu         sync.RWMutex\n\tcompacting bool\n\n\t// Path to data file.\n\tpath string\n}\n\n// NewIndexFile returns a new instance of IndexFile.\nfunc NewIndexFile() *IndexFile {\n\treturn &IndexFile{}\n}\n\n// Open memory maps the data file at the file's path.\nfunc (f *IndexFile) Open() error {\n\t// Extract identifier from path name.\n\tf.id, f.level = ParseFilename(f.Path())\n\n\tdata, err := mmap.Map(f.Path())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn f.UnmarshalBinary(data)\n}\n\n// Close unmaps the data file.\nfunc (f *IndexFile) Close() error {\n\t// Wait until all references are released.\n\tf.wg.Wait()\n\n\tf.sblk = SeriesBlock{}\n\tf.tblks = nil\n\tf.mblk = MeasurementBlock{}\n\tf.seriesN = 0\n\treturn mmap.Unmap(f.data)\n}\n\n// ID returns the file sequence identifier.\nfunc (f *IndexFile) ID() int { return f.id }\n\n// Path returns the file path.\nfunc (f *IndexFile) Path() string { return f.path }\n\n// SetPath sets the file's path.\nfunc (f *IndexFile) SetPath(path string) { f.path = path }\n\n// Level returns the compaction level for the file.\nfunc (f *IndexFile) Level() int { return f.level }\n\n// Filter returns the series existence filter for the file.\nfunc (f *IndexFile) Filter() *bloom.Filter { return f.sblk.filter }\n\n// Retain adds a reference count to the file.\nfunc (f *IndexFile) Retain() { f.wg.Add(1) }\n\n// Release removes a reference count from the file.\nfunc (f *IndexFile) Release() { f.wg.Done() }\n\n// Size returns the size of the index file, in bytes.\nfunc (f *IndexFile) Size() int64 { return int64(len(f.data)) }\n\n// Compacting returns true if the file is being compacted.\nfunc (f *IndexFile) Compacting() bool {\n\tf.mu.RLock()\n\tv := f.compacting\n\tf.mu.RUnlock()\n\treturn v\n}\n\n// setCompacting sets whether the index file is being compacted.\nfunc (f *IndexFile) setCompacting(v bool) {\n\tf.mu.Lock()\n\tf.compacting = v\n\tf.mu.Unlock()\n}\n\n// UnmarshalBinary opens an index from data.\n// The byte slice is retained so it must be kept open.\nfunc (f *IndexFile) UnmarshalBinary(data []byte) error {\n\t// Ensure magic number exists at the beginning.\n\tif len(data) < len(FileSignature) {\n\t\treturn io.ErrShortBuffer\n\t} else if !bytes.Equal(data[:len(FileSignature)], []byte(FileSignature)) {\n\t\treturn ErrInvalidIndexFile\n\t}\n\n\t// Read index file trailer.\n\tt, err := ReadIndexFileTrailer(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Slice measurement block data.\n\tbuf := data[t.MeasurementBlock.Offset:]\n\tbuf = buf[:t.MeasurementBlock.Size]\n\n\t// Unmarshal measurement block.\n\tif err := f.mblk.UnmarshalBinary(buf); err != nil {\n\t\treturn err\n\t}\n\n\t// Unmarshal each tag block.\n\tf.tblks = make(map[string]*TagBlock)\n\titr := f.mblk.Iterator()\n\n\tfor m := itr.Next(); m != nil; m = itr.Next() {\n\t\te := m.(*MeasurementBlockElem)\n\n\t\t// Slice measurement block data.\n\t\tbuf := data[e.tagBlock.offset:]\n\t\tbuf = buf[:e.tagBlock.size]\n\n\t\t// Unmarshal measurement block.\n\t\tvar tblk TagBlock\n\t\tif err := tblk.UnmarshalBinary(buf); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tf.tblks[string(e.name)] = &tblk\n\t}\n\n\t// Slice series list data.\n\tbuf = data[t.SeriesBlock.Offset:]\n\tbuf = buf[:t.SeriesBlock.Size]\n\n\t// Unmarshal series list.\n\tif err := f.sblk.UnmarshalBinary(buf); err != nil {\n\t\treturn err\n\t}\n\n\t// Save reference to entire data block.\n\tf.data = data\n\n\treturn nil\n}\n\n// Measurement returns a measurement element.\nfunc (f *IndexFile) Measurement(name []byte) MeasurementElem {\n\te, ok := f.mblk.Elem(name)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn &e\n}\n\n// MeasurementN returns the number of measurements in the file.\nfunc (f *IndexFile) MeasurementN() (n uint64) {\n\tmitr := f.mblk.Iterator()\n\tfor me := mitr.Next(); me != nil; me = mitr.Next() {\n\t\tn++\n\t}\n\treturn n\n}\n\n// TagValueIterator returns a value iterator for a tag key and a flag\n// indicating if a tombstone exists on the measurement or key.\nfunc (f *IndexFile) TagValueIterator(name, key []byte) TagValueIterator {\n\ttblk := f.tblks[string(name)]\n\tif tblk == nil {\n\t\treturn nil\n\t}\n\n\t// Find key element.\n\tke := tblk.TagKeyElem(key)\n\tif ke == nil {\n\t\treturn nil\n\t}\n\n\t// Merge all value series iterators together.\n\treturn ke.TagValueIterator()\n}\n\n// TagKeySeriesIterator returns a series iterator for a tag key and a flag\n// indicating if a tombstone exists on the measurement or key.\nfunc (f *IndexFile) TagKeySeriesIterator(name, key []byte) SeriesIterator {\n\ttblk := f.tblks[string(name)]\n\tif tblk == nil {\n\t\treturn nil\n\t}\n\n\t// Find key element.\n\tke := tblk.TagKeyElem(key)\n\tif ke == nil {\n\t\treturn nil\n\t}\n\n\t// Merge all value series iterators together.\n\tvitr := ke.TagValueIterator()\n\tvar itrs []SeriesIterator\n\tfor ve := vitr.Next(); ve != nil; ve = vitr.Next() {\n\t\tsitr := &rawSeriesIDIterator{data: ve.(*TagBlockValueElem).series.data}\n\t\titrs = append(itrs, newSeriesDecodeIterator(&f.sblk, sitr))\n\t}\n\n\treturn MergeSeriesIterators(itrs...)\n}\n\n// TagValueSeriesIterator returns a series iterator for a tag value and a flag\n// indicating if a tombstone exists on the measurement, key, or value.\nfunc (f *IndexFile) TagValueSeriesIterator(name, key, value []byte) SeriesIterator {\n\ttblk := f.tblks[string(name)]\n\tif tblk == nil {\n\t\treturn nil\n\t}\n\n\t// Find value element.\n\tve := tblk.TagValueElem(key, value)\n\tif ve == nil {\n\t\treturn nil\n\t}\n\n\t// Create an iterator over value's series.\n\treturn newSeriesDecodeIterator(\n\t\t&f.sblk,\n\t\t&rawSeriesIDIterator{\n\t\t\tn:    ve.(*TagBlockValueElem).series.n,\n\t\t\tdata: ve.(*TagBlockValueElem).series.data,\n\t\t},\n\t)\n}\n\n// TagKey returns a tag key.\nfunc (f *IndexFile) TagKey(name, key []byte) TagKeyElem {\n\ttblk := f.tblks[string(name)]\n\tif tblk == nil {\n\t\treturn nil\n\t}\n\treturn tblk.TagKeyElem(key)\n}\n\n// TagValue returns a tag value.\nfunc (f *IndexFile) TagValue(name, key, value []byte) TagValueElem {\n\ttblk := f.tblks[string(name)]\n\tif tblk == nil {\n\t\treturn nil\n\t}\n\treturn tblk.TagValueElem(key, value)\n}\n\n// HasSeries returns flags indicating if the series exists and if it is tombstoned.\nfunc (f *IndexFile) HasSeries(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool) {\n\treturn f.sblk.HasSeries(name, tags, buf)\n}\n\n// Series returns the series and a flag indicating if the series has been\n// tombstoned by the measurement.\nfunc (f *IndexFile) Series(name []byte, tags models.Tags) SeriesElem {\n\treturn f.sblk.Series(name, tags)\n}\n\n// TagValueElem returns an element for a measurement/tag/value.\nfunc (f *IndexFile) TagValueElem(name, key, value []byte) TagValueElem {\n\ttblk, ok := f.tblks[string(name)]\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn tblk.TagValueElem(key, value)\n}\n\n// MeasurementIterator returns an iterator over all measurements.\nfunc (f *IndexFile) MeasurementIterator() MeasurementIterator {\n\treturn f.mblk.Iterator()\n}\n\n// TagKeyIterator returns an iterator over all tag keys for a measurement.\nfunc (f *IndexFile) TagKeyIterator(name []byte) TagKeyIterator {\n\tblk := f.tblks[string(name)]\n\tif blk == nil {\n\t\treturn nil\n\t}\n\treturn blk.TagKeyIterator()\n}\n\n// MeasurementSeriesIterator returns an iterator over a measurement's series.\nfunc (f *IndexFile) MeasurementSeriesIterator(name []byte) SeriesIterator {\n\treturn &seriesDecodeIterator{\n\t\titr:  f.mblk.seriesIDIterator(name),\n\t\tsblk: &f.sblk,\n\t}\n}\n\n// MergeMeasurementsSketches merges the index file's series sketches into the provided\n// sketches.\nfunc (f *IndexFile) MergeMeasurementsSketches(s, t estimator.Sketch) error {\n\tif err := s.Merge(f.mblk.sketch); err != nil {\n\t\treturn err\n\t}\n\treturn t.Merge(f.mblk.tSketch)\n}\n\n// SeriesN returns the total number of non-tombstoned series for the index file.\nfunc (f *IndexFile) SeriesN() uint64 {\n\treturn uint64(f.sblk.seriesN - f.sblk.tombstoneN)\n}\n\n// SeriesIterator returns an iterator over all series.\nfunc (f *IndexFile) SeriesIterator() SeriesIterator {\n\treturn f.sblk.SeriesIterator()\n}\n\n// MergeSeriesSketches merges the index file's series sketches into the provided\n// sketches.\nfunc (f *IndexFile) MergeSeriesSketches(s, t estimator.Sketch) error {\n\tif err := s.Merge(f.sblk.sketch); err != nil {\n\t\treturn err\n\t}\n\treturn t.Merge(f.sblk.tsketch)\n}\n\n// ReadIndexFileTrailer returns the index file trailer from data.\nfunc ReadIndexFileTrailer(data []byte) (IndexFileTrailer, error) {\n\tvar t IndexFileTrailer\n\n\t// Read version.\n\tt.Version = int(binary.BigEndian.Uint16(data[len(data)-IndexFileVersionSize:]))\n\tif t.Version != IndexFileVersion {\n\t\treturn t, ErrUnsupportedIndexFileVersion\n\t}\n\n\t// Slice trailer data.\n\tbuf := data[len(data)-IndexFileTrailerSize:]\n\n\t// Read series list info.\n\tt.SeriesBlock.Offset = int64(binary.BigEndian.Uint64(buf[0:SeriesBlockOffsetSize]))\n\tbuf = buf[SeriesBlockOffsetSize:]\n\tt.SeriesBlock.Size = int64(binary.BigEndian.Uint64(buf[0:SeriesBlockSizeSize]))\n\tbuf = buf[SeriesBlockSizeSize:]\n\n\t// Read measurement block info.\n\tt.MeasurementBlock.Offset = int64(binary.BigEndian.Uint64(buf[0:MeasurementBlockOffsetSize]))\n\tbuf = buf[MeasurementBlockOffsetSize:]\n\tt.MeasurementBlock.Size = int64(binary.BigEndian.Uint64(buf[0:MeasurementBlockSizeSize]))\n\tbuf = buf[MeasurementBlockSizeSize:]\n\n\treturn t, nil\n}\n\n// IndexFileTrailer represents meta data written to the end of the index file.\ntype IndexFileTrailer struct {\n\tVersion     int\n\tSeriesBlock struct {\n\t\tOffset int64\n\t\tSize   int64\n\t}\n\tMeasurementBlock struct {\n\t\tOffset int64\n\t\tSize   int64\n\t}\n}\n\n// WriteTo writes the trailer to w.\nfunc (t *IndexFileTrailer) WriteTo(w io.Writer) (n int64, err error) {\n\t// Write series list info.\n\tif err := writeUint64To(w, uint64(t.SeriesBlock.Offset), &n); err != nil {\n\t\treturn n, err\n\t} else if err := writeUint64To(w, uint64(t.SeriesBlock.Size), &n); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Write measurement block info.\n\tif err := writeUint64To(w, uint64(t.MeasurementBlock.Offset), &n); err != nil {\n\t\treturn n, err\n\t} else if err := writeUint64To(w, uint64(t.MeasurementBlock.Size), &n); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Write index file encoding version.\n\tif err := writeUint16To(w, IndexFileVersion, &n); err != nil {\n\t\treturn n, err\n\t}\n\n\treturn n, nil\n}\n\n// FormatIndexFileName generates an index filename for the given index.\nfunc FormatIndexFileName(id, level int) string {\n\treturn fmt.Sprintf(\"L%d-%08d%s\", level, id, IndexFileExt)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file_test.go",
    "content": "package tsi1_test\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/tsdb/index/tsi1\"\n)\n\n// Ensure a simple index file can be built and opened.\nfunc TestCreateIndexFile(t *testing.T) {\n\tf, err := CreateIndexFile([]Series{\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"west\"})},\n\t\t{Name: []byte(\"mem\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif e := f.TagValueElem([]byte(\"cpu\"), []byte(\"region\"), []byte(\"west\")); e == nil {\n\t\tt.Fatal(\"expected element\")\n\t} else if n := e.(*tsi1.TagBlockValueElem).SeriesN(); n != 1 {\n\t\tt.Fatalf(\"unexpected series count: %d\", n)\n\t}\n}\n\n// Ensure index file generation can be successfully built.\nfunc TestGenerateIndexFile(t *testing.T) {\n\t// Build generated index file.\n\tf, err := GenerateIndexFile(10, 3, 4)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify that tag/value series can be fetched.\n\tif e := f.TagValueElem([]byte(\"measurement0\"), []byte(\"key0\"), []byte(\"value0\")); e == nil {\n\t\tt.Fatal(\"expected element\")\n\t} else if n := e.(*tsi1.TagBlockValueElem).SeriesN(); n == 0 {\n\t\tt.Fatal(\"expected series\")\n\t}\n}\n\nfunc BenchmarkIndexFile_TagValueSeries(b *testing.B) {\n\tb.Run(\"M=1,K=2,V=3\", func(b *testing.B) {\n\t\tbenchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(1, 2, 3))\n\t})\n\tb.Run(\"M=10,K=5,V=5\", func(b *testing.B) {\n\t\tbenchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(10, 5, 5))\n\t})\n\tb.Run(\"M=10,K=7,V=5\", func(b *testing.B) {\n\t\tbenchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(10, 7, 7))\n\t})\n}\n\nfunc benchmarkIndexFile_TagValueSeries(b *testing.B, idx *tsi1.IndexFile) {\n\tb.ResetTimer()\n\tb.ReportAllocs()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tif e := idx.TagValueElem([]byte(\"measurement0\"), []byte(\"key0\"), []byte(\"value0\")); e == nil {\n\t\t\tb.Fatal(\"expected element\")\n\t\t} else if e.(*tsi1.TagBlockValueElem).SeriesN() == 0 {\n\t\t\tb.Fatal(\"expected series\")\n\t\t}\n\t}\n}\n\n// CreateIndexFile creates an index file with a given set of series.\nfunc CreateIndexFile(series []Series) (*tsi1.IndexFile, error) {\n\tlf, err := CreateLogFile(series)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Write index file to buffer.\n\tvar buf bytes.Buffer\n\tif _, err := lf.CompactTo(&buf, M, K); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Load index file from buffer.\n\tvar f tsi1.IndexFile\n\tif err := f.UnmarshalBinary(buf.Bytes()); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &f, nil\n}\n\n// GenerateIndexFile generates an index file from a set of series based on the count arguments.\n// Total series returned will equal measurementN * tagN * valueN.\nfunc GenerateIndexFile(measurementN, tagN, valueN int) (*tsi1.IndexFile, error) {\n\t// Generate a new log file first.\n\tlf, err := GenerateLogFile(measurementN, tagN, valueN)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Compact log file to buffer.\n\tvar buf bytes.Buffer\n\tif _, err := lf.CompactTo(&buf, M, K); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Load index file from buffer.\n\tvar f tsi1.IndexFile\n\tif err := f.UnmarshalBinary(buf.Bytes()); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &f, nil\n}\n\nfunc MustGenerateIndexFile(measurementN, tagN, valueN int) *tsi1.IndexFile {\n\tf, err := GenerateIndexFile(measurementN, tagN, valueN)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn f\n}\n\nvar indexFileCache struct {\n\tMeasurementN int\n\tTagN         int\n\tValueN       int\n\n\tIndexFile *tsi1.IndexFile\n}\n\n// MustFindOrGenerateIndexFile returns a cached index file or generates one if it doesn't exist.\nfunc MustFindOrGenerateIndexFile(measurementN, tagN, valueN int) *tsi1.IndexFile {\n\t// Use cache if fields match and the index file has been generated.\n\tif indexFileCache.MeasurementN == measurementN &&\n\t\tindexFileCache.TagN == tagN &&\n\t\tindexFileCache.ValueN == valueN &&\n\t\tindexFileCache.IndexFile != nil {\n\t\treturn indexFileCache.IndexFile\n\t}\n\n\t// Generate and cache.\n\tindexFileCache.MeasurementN = measurementN\n\tindexFileCache.TagN = tagN\n\tindexFileCache.ValueN = valueN\n\tindexFileCache.IndexFile = MustGenerateIndexFile(measurementN, tagN, valueN)\n\treturn indexFileCache.IndexFile\n}\n\nfunc pow(x, y int) int {\n\tr := 1\n\tfor i := 0; i < y; i++ {\n\t\tr *= x\n\t}\n\treturn r\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files.go",
    "content": "package tsi1\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/pkg/estimator/hll\"\n\t\"github.com/influxdata/influxdb/pkg/mmap\"\n)\n\n// IndexFiles represents a layered set of index files.\ntype IndexFiles []*IndexFile\n\n// IDs returns the ids for all index files.\nfunc (p IndexFiles) IDs() []int {\n\ta := make([]int, len(p))\n\tfor i, f := range p {\n\t\ta[i] = f.ID()\n\t}\n\treturn a\n}\n\n// Retain adds a reference count to all files.\nfunc (p IndexFiles) Retain() {\n\tfor _, f := range p {\n\t\tf.Retain()\n\t}\n}\n\n// Release removes a reference count from all files.\nfunc (p IndexFiles) Release() {\n\tfor _, f := range p {\n\t\tf.Release()\n\t}\n}\n\n// Files returns p as a list of File objects.\nfunc (p IndexFiles) Files() []File {\n\tother := make([]File, len(p))\n\tfor i, f := range p {\n\t\tother[i] = f\n\t}\n\treturn other\n}\n\n// MeasurementNames returns a sorted list of all measurement names for all files.\nfunc (p *IndexFiles) MeasurementNames() [][]byte {\n\titr := p.MeasurementIterator()\n\tvar names [][]byte\n\tfor e := itr.Next(); e != nil; e = itr.Next() {\n\t\tnames = append(names, copyBytes(e.Name()))\n\t}\n\tsort.Sort(byteSlices(names))\n\treturn names\n}\n\n// MeasurementIterator returns an iterator that merges measurements across all files.\nfunc (p IndexFiles) MeasurementIterator() MeasurementIterator {\n\ta := make([]MeasurementIterator, 0, len(p))\n\tfor i := range p {\n\t\titr := p[i].MeasurementIterator()\n\t\tif itr == nil {\n\t\t\tcontinue\n\t\t}\n\t\ta = append(a, itr)\n\t}\n\treturn MergeMeasurementIterators(a...)\n}\n\n// TagKeyIterator returns an iterator that merges tag keys across all files.\nfunc (p *IndexFiles) TagKeyIterator(name []byte) (TagKeyIterator, error) {\n\ta := make([]TagKeyIterator, 0, len(*p))\n\tfor _, f := range *p {\n\t\titr := f.TagKeyIterator(name)\n\t\tif itr == nil {\n\t\t\tcontinue\n\t\t}\n\t\ta = append(a, itr)\n\t}\n\treturn MergeTagKeyIterators(a...), nil\n}\n\n// SeriesIterator returns an iterator that merges series across all files.\nfunc (p IndexFiles) SeriesIterator() SeriesIterator {\n\ta := make([]SeriesIterator, 0, len(p))\n\tfor _, f := range p {\n\t\titr := f.SeriesIterator()\n\t\tif itr == nil {\n\t\t\tcontinue\n\t\t}\n\t\ta = append(a, itr)\n\t}\n\treturn MergeSeriesIterators(a...)\n}\n\n// MeasurementSeriesIterator returns an iterator that merges series across all files.\nfunc (p IndexFiles) MeasurementSeriesIterator(name []byte) SeriesIterator {\n\ta := make([]SeriesIterator, 0, len(p))\n\tfor _, f := range p {\n\t\titr := f.MeasurementSeriesIterator(name)\n\t\tif itr == nil {\n\t\t\tcontinue\n\t\t}\n\t\ta = append(a, itr)\n\t}\n\treturn MergeSeriesIterators(a...)\n}\n\n// TagValueSeriesIterator returns an iterator that merges series across all files.\nfunc (p IndexFiles) TagValueSeriesIterator(name, key, value []byte) SeriesIterator {\n\ta := make([]SeriesIterator, 0, len(p))\n\tfor i := range p {\n\t\titr := p[i].TagValueSeriesIterator(name, key, value)\n\t\tif itr != nil {\n\t\t\ta = append(a, itr)\n\t\t}\n\t}\n\treturn MergeSeriesIterators(a...)\n}\n\n// CompactTo merges all index files and writes them to w.\nfunc (p IndexFiles) CompactTo(w io.Writer, m, k uint64) (n int64, err error) {\n\tvar t IndexFileTrailer\n\n\t// Wrap writer in buffered I/O.\n\tbw := bufio.NewWriter(w)\n\n\t// Setup context object to track shared data for this compaction.\n\tvar info indexCompactInfo\n\tinfo.tagSets = make(map[string]indexTagSetPos)\n\n\t// Write magic number.\n\tif err := writeTo(bw, []byte(FileSignature), &n); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Write combined series list.\n\tt.SeriesBlock.Offset = n\n\tif err := p.writeSeriesBlockTo(bw, m, k, &info, &n); err != nil {\n\t\treturn n, err\n\t}\n\tt.SeriesBlock.Size = n - t.SeriesBlock.Offset\n\n\t// Flush buffer before re-mapping.\n\tif err := bw.Flush(); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Open series block as memory-mapped data.\n\tsblk, data, err := mapIndexFileSeriesBlock(w)\n\tif data != nil {\n\t\tdefer mmap.Unmap(data)\n\t}\n\tif err != nil {\n\t\treturn n, err\n\t}\n\tinfo.sblk = sblk\n\n\t// Write tagset blocks in measurement order.\n\tif err := p.writeTagsetsTo(bw, &info, &n); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Write measurement block.\n\tt.MeasurementBlock.Offset = n\n\tif err := p.writeMeasurementBlockTo(bw, &info, &n); err != nil {\n\t\treturn n, err\n\t}\n\tt.MeasurementBlock.Size = n - t.MeasurementBlock.Offset\n\n\t// Write trailer.\n\tnn, err := t.WriteTo(bw)\n\tn += nn\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\t// Flush file.\n\tif err := bw.Flush(); err != nil {\n\t\treturn n, err\n\t}\n\n\treturn n, nil\n}\n\nfunc (p IndexFiles) writeSeriesBlockTo(w io.Writer, m, k uint64, info *indexCompactInfo, n *int64) error {\n\t// Estimate series cardinality.\n\tsketch := hll.NewDefaultPlus()\n\tfor _, f := range p {\n\t\tif err := f.MergeSeriesSketches(sketch, sketch); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\titr := p.SeriesIterator()\n\tenc := NewSeriesBlockEncoder(w, uint32(sketch.Count()), m, k)\n\n\t// Write all series.\n\tfor e := itr.Next(); e != nil; e = itr.Next() {\n\t\tif err := enc.Encode(e.Name(), e.Tags(), e.Deleted()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Close and flush block.\n\terr := enc.Close()\n\t*n += int64(enc.N())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (p IndexFiles) writeTagsetsTo(w io.Writer, info *indexCompactInfo, n *int64) error {\n\tmitr := p.MeasurementIterator()\n\tfor m := mitr.Next(); m != nil; m = mitr.Next() {\n\t\tif err := p.writeTagsetTo(w, m.Name(), info, n); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// writeTagsetTo writes a single tagset to w and saves the tagset offset.\nfunc (p IndexFiles) writeTagsetTo(w io.Writer, name []byte, info *indexCompactInfo, n *int64) error {\n\tvar seriesKey []byte\n\n\tkitr, err := p.TagKeyIterator(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tenc := NewTagBlockEncoder(w)\n\tfor ke := kitr.Next(); ke != nil; ke = kitr.Next() {\n\t\t// Encode key.\n\t\tif err := enc.EncodeKey(ke.Key(), ke.Deleted()); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Iterate over tag values.\n\t\tvitr := ke.TagValueIterator()\n\t\tfor ve := vitr.Next(); ve != nil; ve = vitr.Next() {\n\t\t\t// Merge all series together.\n\t\t\tsitr := p.TagValueSeriesIterator(name, ke.Key(), ve.Value())\n\t\t\tvar seriesIDs []uint32\n\t\t\tfor se := sitr.Next(); se != nil; se = sitr.Next() {\n\t\t\t\tseriesID, _ := info.sblk.Offset(se.Name(), se.Tags(), seriesKey[:0])\n\t\t\t\tif seriesID == 0 {\n\t\t\t\t\treturn fmt.Errorf(\"expected series id: %s/%s\", se.Name(), se.Tags().String())\n\t\t\t\t}\n\t\t\t\tseriesIDs = append(seriesIDs, seriesID)\n\t\t\t}\n\t\t\tsort.Sort(uint32Slice(seriesIDs))\n\n\t\t\t// Encode value.\n\t\t\tif err := enc.EncodeValue(ve.Value(), ve.Deleted(), seriesIDs); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Save tagset offset to measurement.\n\tpos := info.tagSets[string(name)]\n\tpos.offset = *n\n\n\t// Flush data to writer.\n\terr = enc.Close()\n\t*n += enc.N()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Save tagset size to measurement.\n\tpos.size = *n - pos.offset\n\n\tinfo.tagSets[string(name)] = pos\n\n\treturn nil\n}\n\nfunc (p IndexFiles) writeMeasurementBlockTo(w io.Writer, info *indexCompactInfo, n *int64) error {\n\tvar seriesKey []byte\n\tmw := NewMeasurementBlockWriter()\n\n\t// Add measurement data & compute sketches.\n\tmitr := p.MeasurementIterator()\n\tfor m := mitr.Next(); m != nil; m = mitr.Next() {\n\t\tname := m.Name()\n\n\t\t// Look-up series ids.\n\t\titr := p.MeasurementSeriesIterator(name)\n\t\tvar seriesIDs []uint32\n\t\tfor e := itr.Next(); e != nil; e = itr.Next() {\n\t\t\tseriesID, _ := info.sblk.Offset(e.Name(), e.Tags(), seriesKey[:0])\n\t\t\tif seriesID == 0 {\n\t\t\t\tpanic(fmt.Sprintf(\"expected series id: %s %s\", e.Name(), e.Tags().String()))\n\t\t\t}\n\t\t\tseriesIDs = append(seriesIDs, seriesID)\n\t\t}\n\t\tsort.Sort(uint32Slice(seriesIDs))\n\n\t\t// Add measurement to writer.\n\t\tpos := info.tagSets[string(name)]\n\t\tmw.Add(name, m.Deleted(), pos.offset, pos.size, seriesIDs)\n\t}\n\n\t// Flush data to writer.\n\tnn, err := mw.WriteTo(w)\n\t*n += nn\n\treturn err\n}\n\n// Stat returns the max index file size and the total file size for all index files.\nfunc (p IndexFiles) Stat() (*IndexFilesInfo, error) {\n\tvar info IndexFilesInfo\n\tfor _, f := range p {\n\t\tfi, err := os.Stat(f.Path())\n\t\tif os.IsNotExist(err) {\n\t\t\tcontinue\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif fi.Size() > info.MaxSize {\n\t\t\tinfo.MaxSize = fi.Size()\n\t\t}\n\t\tif fi.ModTime().After(info.ModTime) {\n\t\t\tinfo.ModTime = fi.ModTime()\n\t\t}\n\n\t\tinfo.Size += fi.Size()\n\t}\n\treturn &info, nil\n}\n\ntype IndexFilesInfo struct {\n\tMaxSize int64     // largest file size\n\tSize    int64     // total file size\n\tModTime time.Time // last modified\n}\n\n// indexCompactInfo is a context object used for tracking position information\n// during the compaction of index files.\ntype indexCompactInfo struct {\n\t// Memory-mapped series block.\n\t// Available after the series block has been written.\n\tsblk *SeriesBlock\n\n\t// Tracks offset/size for each measurement's tagset.\n\ttagSets map[string]indexTagSetPos\n}\n\n// indexTagSetPos stores the offset/size of tagsets.\ntype indexTagSetPos struct {\n\toffset int64\n\tsize   int64\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files_test.go",
    "content": "package tsi1_test\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/tsdb/index/tsi1\"\n)\n\n// Ensure multiple index files can be compacted together.\nfunc TestIndexFiles_WriteTo(t *testing.T) {\n\t// Write first file.\n\tf0, err := CreateIndexFile([]Series{\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"west\"})},\n\t\t{Name: []byte(\"mem\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Write second file.\n\tf1, err := CreateIndexFile([]Series{\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"west\"})},\n\t\t{Name: []byte(\"disk\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Compact the two together and write out to a buffer.\n\tvar buf bytes.Buffer\n\ta := tsi1.IndexFiles{f0, f1}\n\tif n, err := a.CompactTo(&buf, M, K); err != nil {\n\t\tt.Fatal(err)\n\t} else if n == 0 {\n\t\tt.Fatal(\"expected data written\")\n\t}\n\n\t// Unmarshal buffer into a new index file.\n\tvar f tsi1.IndexFile\n\tif err := f.UnmarshalBinary(buf.Bytes()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify data in compacted file.\n\tif e := f.TagValueElem([]byte(\"cpu\"), []byte(\"region\"), []byte(\"west\")); e == nil {\n\t\tt.Fatal(\"expected element\")\n\t} else if n := e.(*tsi1.TagBlockValueElem).SeriesN(); n != 1 {\n\t\tt.Fatalf(\"unexpected series count: %d\", n)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_test.go",
    "content": "package tsi1_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/tsdb/index/tsi1\"\n)\n\n// Bloom filter settings used in tests.\nconst M, K = 4096, 6\n\n// Ensure index can iterate over all measurement names.\nfunc TestIndex_ForEachMeasurementName(t *testing.T) {\n\tidx := MustOpenIndex()\n\tdefer idx.Close()\n\n\t// Add series to index.\n\tif err := idx.CreateSeriesSliceIfNotExists([]Series{\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"west\"})},\n\t\t{Name: []byte(\"mem\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify measurements are returned.\n\tidx.Run(t, func(t *testing.T) {\n\t\tvar names []string\n\t\tif err := idx.ForEachMeasurementName(func(name []byte) error {\n\t\t\tnames = append(names, string(name))\n\t\t\treturn nil\n\t\t}); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif !reflect.DeepEqual(names, []string{\"cpu\", \"mem\"}) {\n\t\t\tt.Fatalf(\"unexpected names: %#v\", names)\n\t\t}\n\t})\n\n\t// Add more series.\n\tif err := idx.CreateSeriesSliceIfNotExists([]Series{\n\t\t{Name: []byte(\"disk\")},\n\t\t{Name: []byte(\"mem\")},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify new measurements.\n\tidx.Run(t, func(t *testing.T) {\n\t\tvar names []string\n\t\tif err := idx.ForEachMeasurementName(func(name []byte) error {\n\t\t\tnames = append(names, string(name))\n\t\t\treturn nil\n\t\t}); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif !reflect.DeepEqual(names, []string{\"cpu\", \"disk\", \"mem\"}) {\n\t\t\tt.Fatalf(\"unexpected names: %#v\", names)\n\t\t}\n\t})\n}\n\n// Ensure index can return whether a measurement exists.\nfunc TestIndex_MeasurementExists(t *testing.T) {\n\tidx := MustOpenIndex()\n\tdefer idx.Close()\n\n\t// Add series to index.\n\tif err := idx.CreateSeriesSliceIfNotExists([]Series{\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"west\"})},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify measurement exists.\n\tidx.Run(t, func(t *testing.T) {\n\t\tif v, err := idx.MeasurementExists([]byte(\"cpu\")); err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else if !v {\n\t\t\tt.Fatal(\"expected measurement to exist\")\n\t\t}\n\t})\n\n\t// Delete one series.\n\tif err := idx.DropSeries(models.MakeKey([]byte(\"cpu\"), models.NewTags(map[string]string{\"region\": \"east\"}))); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify measurement still exists.\n\tidx.Run(t, func(t *testing.T) {\n\t\tif v, err := idx.MeasurementExists([]byte(\"cpu\")); err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else if !v {\n\t\t\tt.Fatal(\"expected measurement to still exist\")\n\t\t}\n\t})\n\n\t// Delete second series.\n\tif err := idx.DropSeries(models.MakeKey([]byte(\"cpu\"), models.NewTags(map[string]string{\"region\": \"west\"}))); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify measurement is now deleted.\n\tidx.Run(t, func(t *testing.T) {\n\t\tif v, err := idx.MeasurementExists([]byte(\"cpu\")); err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else if v {\n\t\t\tt.Fatal(\"expected measurement to be deleted\")\n\t\t}\n\t})\n}\n\n// Ensure index can return a list of matching measurements.\nfunc TestIndex_MeasurementNamesByExpr(t *testing.T) {\n\tidx := MustOpenIndex()\n\tdefer idx.Close()\n\n\t// Add series to index.\n\tif err := idx.CreateSeriesSliceIfNotExists([]Series{\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"west\"})},\n\t\t{Name: []byte(\"disk\"), Tags: models.NewTags(map[string]string{\"region\": \"north\"})},\n\t\t{Name: []byte(\"mem\"), Tags: models.NewTags(map[string]string{\"region\": \"west\", \"country\": \"us\"})},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Retrieve measurements by expression\n\tidx.Run(t, func(t *testing.T) {\n\t\tt.Run(\"EQ\", func(t *testing.T) {\n\t\t\tnames, err := idx.MeasurementNamesByExpr(influxql.MustParseExpr(`region = 'west'`))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t} else if !reflect.DeepEqual(names, [][]byte{[]byte(\"cpu\"), []byte(\"mem\")}) {\n\t\t\t\tt.Fatalf(\"unexpected names: %v\", names)\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"NEQ\", func(t *testing.T) {\n\t\t\tnames, err := idx.MeasurementNamesByExpr(influxql.MustParseExpr(`region != 'east'`))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t} else if !reflect.DeepEqual(names, [][]byte{[]byte(\"disk\"), []byte(\"mem\")}) {\n\t\t\t\tt.Fatalf(\"unexpected names: %v\", names)\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"EQREGEX\", func(t *testing.T) {\n\t\t\tnames, err := idx.MeasurementNamesByExpr(influxql.MustParseExpr(`region =~ /east|west/`))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t} else if !reflect.DeepEqual(names, [][]byte{[]byte(\"cpu\"), []byte(\"mem\")}) {\n\t\t\t\tt.Fatalf(\"unexpected names: %v\", names)\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"NEQREGEX\", func(t *testing.T) {\n\t\t\tnames, err := idx.MeasurementNamesByExpr(influxql.MustParseExpr(`country !~ /^u/`))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t} else if !reflect.DeepEqual(names, [][]byte{[]byte(\"cpu\"), []byte(\"disk\")}) {\n\t\t\t\tt.Fatalf(\"unexpected names: %v\", names)\n\t\t\t}\n\t\t})\n\t})\n}\n\n// Ensure index can return a list of matching measurements.\nfunc TestIndex_MeasurementNamesByRegex(t *testing.T) {\n\tidx := MustOpenIndex()\n\tdefer idx.Close()\n\n\t// Add series to index.\n\tif err := idx.CreateSeriesSliceIfNotExists([]Series{\n\t\t{Name: []byte(\"cpu\")},\n\t\t{Name: []byte(\"disk\")},\n\t\t{Name: []byte(\"mem\")},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Retrieve measurements by regex.\n\tidx.Run(t, func(t *testing.T) {\n\t\tnames, err := idx.MeasurementNamesByRegex(regexp.MustCompile(`cpu|mem`))\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else if !reflect.DeepEqual(names, [][]byte{[]byte(\"cpu\"), []byte(\"mem\")}) {\n\t\t\tt.Fatalf(\"unexpected names: %v\", names)\n\t\t}\n\t})\n}\n\n// Ensure index can delete a measurement and all related keys, values, & series.\nfunc TestIndex_DropMeasurement(t *testing.T) {\n\tidx := MustOpenIndex()\n\tdefer idx.Close()\n\n\t// Add series to index.\n\tif err := idx.CreateSeriesSliceIfNotExists([]Series{\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"west\"})},\n\t\t{Name: []byte(\"disk\"), Tags: models.NewTags(map[string]string{\"region\": \"north\"})},\n\t\t{Name: []byte(\"mem\"), Tags: models.NewTags(map[string]string{\"region\": \"west\", \"country\": \"us\"})},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Drop measurement.\n\tif err := idx.DropMeasurement([]byte(\"cpu\")); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify data is gone in each stage.\n\tidx.Run(t, func(t *testing.T) {\n\t\t// Verify measurement is gone.\n\t\tif v, err := idx.MeasurementExists([]byte(\"cpu\")); err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else if v {\n\t\t\tt.Fatal(\"expected no measurement\")\n\t\t}\n\n\t\t// Obtain file set to perform lower level checks.\n\t\tfs := idx.RetainFileSet()\n\t\tdefer fs.Release()\n\n\t\t// Verify tags & values are gone.\n\t\tif e := fs.TagKeyIterator([]byte(\"cpu\")).Next(); e != nil && !e.Deleted() {\n\t\t\tt.Fatal(\"expected deleted tag key\")\n\t\t}\n\t\tif itr := fs.TagValueIterator([]byte(\"cpu\"), []byte(\"region\")); itr != nil {\n\t\t\tt.Fatal(\"expected nil tag value iterator\")\n\t\t}\n\n\t})\n}\n\n// Index is a test wrapper for tsi1.Index.\ntype Index struct {\n\t*tsi1.Index\n}\n\n// NewIndex returns a new instance of Index at a temporary path.\nfunc NewIndex() *Index {\n\tidx := &Index{Index: tsi1.NewIndex()}\n\tidx.Path = MustTempDir()\n\treturn idx\n}\n\n// MustOpenIndex returns a new, open index. Panic on error.\nfunc MustOpenIndex() *Index {\n\tidx := NewIndex()\n\tif err := idx.Open(); err != nil {\n\t\tpanic(err)\n\t}\n\treturn idx\n}\n\n// Close closes and removes the index directory.\nfunc (idx *Index) Close() error {\n\tdefer os.RemoveAll(idx.Path)\n\treturn idx.Index.Close()\n}\n\n// Reopen closes and opens the index.\nfunc (idx *Index) Reopen() error {\n\tif err := idx.Index.Close(); err != nil {\n\t\treturn err\n\t}\n\n\tpath := idx.Path\n\tidx.Index = tsi1.NewIndex()\n\tidx.Path = path\n\tif err := idx.Open(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// Run executes a subtest for each of several different states:\n//\n// - Immediately\n// - After reopen\n// - After compaction\n// - After reopen again\n//\n// The index should always respond in the same fashion regardless of\n// how data is stored. This helper allows the index to be easily tested\n// in all major states.\nfunc (idx *Index) Run(t *testing.T, fn func(t *testing.T)) {\n\t// Invoke immediately.\n\tt.Run(\"state=initial\", fn)\n\n\t// Reopen and invoke again.\n\tif err := idx.Reopen(); err != nil {\n\t\tt.Fatalf(\"reopen error: %s\", err)\n\t}\n\tt.Run(\"state=reopen\", fn)\n\n\t// TODO: Request a compaction.\n\t// if err := idx.Compact(); err != nil {\n\t// \tt.Fatalf(\"compact error: %s\", err)\n\t// }\n\t// t.Run(\"state=post-compaction\", fn)\n\n\t// Reopen and invoke again.\n\tif err := idx.Reopen(); err != nil {\n\t\tt.Fatalf(\"post-compaction reopen error: %s\", err)\n\t}\n\tt.Run(\"state=post-compaction-reopen\", fn)\n}\n\n// CreateSeriesSliceIfNotExists creates multiple series at a time.\nfunc (idx *Index) CreateSeriesSliceIfNotExists(a []Series) error {\n\tfor i, s := range a {\n\t\tif err := idx.CreateSeriesIfNotExists(nil, s.Name, s.Tags); err != nil {\n\t\t\treturn fmt.Errorf(\"i=%d, name=%s, tags=%v, err=%s\", i, s.Name, s.Tags, err)\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file.go",
    "content": "package tsi1\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"hash/crc32\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/pkg/estimator/hll\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/pkg/bloom\"\n\t\"github.com/influxdata/influxdb/pkg/estimator\"\n\t\"github.com/influxdata/influxdb/pkg/mmap\"\n)\n\n// Log errors.\nvar (\n\tErrLogEntryChecksumMismatch = errors.New(\"log entry checksum mismatch\")\n)\n\n// Log entry flag constants.\nconst (\n\tLogEntrySeriesTombstoneFlag      = 0x01\n\tLogEntryMeasurementTombstoneFlag = 0x02\n\tLogEntryTagKeyTombstoneFlag      = 0x04\n\tLogEntryTagValueTombstoneFlag    = 0x08\n)\n\n// LogFile represents an on-disk write-ahead log file.\ntype LogFile struct {\n\tmu   sync.RWMutex\n\twg   sync.WaitGroup // ref count\n\tid   int            // file sequence identifier\n\tdata []byte         // mmap\n\tfile *os.File       // writer\n\tw    *bufio.Writer  // buffered writer\n\tbuf  []byte         // marshaling buffer\n\n\tsize    int64     // tracks current file size\n\tmodTime time.Time // tracks last time write occurred\n\n\tmSketch, mTSketch estimator.Sketch // Measurement sketches\n\tsSketch, sTSketch estimator.Sketch // Series sketche\n\n\t// In-memory index.\n\tmms logMeasurements\n\n\t// Filepath to the log file.\n\tpath string\n}\n\n// NewLogFile returns a new instance of LogFile.\nfunc NewLogFile(path string) *LogFile {\n\treturn &LogFile{\n\t\tpath:     path,\n\t\tmms:      make(logMeasurements),\n\t\tmSketch:  hll.NewDefaultPlus(),\n\t\tmTSketch: hll.NewDefaultPlus(),\n\t\tsSketch:  hll.NewDefaultPlus(),\n\t\tsTSketch: hll.NewDefaultPlus(),\n\t}\n}\n\n// Open reads the log from a file and validates all the checksums.\nfunc (f *LogFile) Open() error {\n\tif err := f.open(); err != nil {\n\t\tf.Close()\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (f *LogFile) open() error {\n\tf.id, _ = ParseFilename(f.path)\n\n\t// Open file for appending.\n\tfile, err := os.OpenFile(f.Path(), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.file = file\n\tf.w = bufio.NewWriter(f.file)\n\n\t// Finish opening if file is empty.\n\tfi, err := file.Stat()\n\tif err != nil {\n\t\treturn err\n\t} else if fi.Size() == 0 {\n\t\treturn nil\n\t}\n\tf.size = fi.Size()\n\tf.modTime = fi.ModTime()\n\n\t// Open a read-only memory map of the existing data.\n\tdata, err := mmap.Map(f.Path())\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.data = data\n\n\t// Read log entries from mmap.\n\tvar n int64\n\tfor buf := f.data; len(buf) > 0; {\n\t\t// Read next entry. Truncate partial writes.\n\t\tvar e LogEntry\n\t\tif err := e.UnmarshalBinary(buf); err == io.ErrShortBuffer {\n\t\t\tif err := file.Truncate(n); err != nil {\n\t\t\t\treturn err\n\t\t\t} else if _, err := file.Seek(0, io.SeekEnd); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Execute entry against in-memory index.\n\t\tf.execEntry(&e)\n\n\t\t// Move buffer forward.\n\t\tn += int64(e.Size)\n\t\tbuf = buf[e.Size:]\n\t}\n\n\treturn nil\n}\n\n// Close shuts down the file handle and mmap.\nfunc (f *LogFile) Close() error {\n\t// Wait until the file has no more references.\n\tf.wg.Wait()\n\n\tif f.w != nil {\n\t\tf.w.Flush()\n\t\tf.w = nil\n\t}\n\n\tif f.file != nil {\n\t\tf.file.Close()\n\t\tf.file = nil\n\t}\n\n\tif f.data != nil {\n\t\tmmap.Unmap(f.data)\n\t}\n\n\tf.mms = make(logMeasurements)\n\n\treturn nil\n}\n\n// Flush flushes buffered data to disk.\nfunc (f *LogFile) Flush() error {\n\tif f.w != nil {\n\t\treturn f.w.Flush()\n\t}\n\treturn nil\n}\n\n// ID returns the file sequence identifier.\nfunc (f *LogFile) ID() int { return f.id }\n\n// Path returns the file path.\nfunc (f *LogFile) Path() string { return f.path }\n\n// SetPath sets the log file's path.\nfunc (f *LogFile) SetPath(path string) { f.path = path }\n\n// Level returns the log level of the file.\nfunc (f *LogFile) Level() int { return 0 }\n\n// Filter returns the bloom filter for the file.\nfunc (f *LogFile) Filter() *bloom.Filter { return nil }\n\n// Retain adds a reference count to the file.\nfunc (f *LogFile) Retain() { f.wg.Add(1) }\n\n// Release removes a reference count from the file.\nfunc (f *LogFile) Release() { f.wg.Done() }\n\n// Stat returns size and last modification time of the file.\nfunc (f *LogFile) Stat() (int64, time.Time) {\n\tf.mu.Lock()\n\tsize, modTime := f.size, f.modTime\n\tf.mu.Unlock()\n\treturn size, modTime\n}\n\n// Size returns the size of the file, in bytes.\nfunc (f *LogFile) Size() int64 {\n\tf.mu.Lock()\n\tv := f.size\n\tf.mu.Unlock()\n\treturn v\n}\n\n// Measurement returns a measurement element.\nfunc (f *LogFile) Measurement(name []byte) MeasurementElem {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tmm, ok := f.mms[string(name)]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\treturn mm\n}\n\n// MeasurementNames returns an ordered list of measurement names.\nfunc (f *LogFile) MeasurementNames() []string {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\treturn f.measurementNames()\n}\n\nfunc (f *LogFile) measurementNames() []string {\n\ta := make([]string, 0, len(f.mms))\n\tfor name := range f.mms {\n\t\ta = append(a, name)\n\t}\n\tsort.Strings(a)\n\treturn a\n}\n\n// DeleteMeasurement adds a tombstone for a measurement to the log file.\nfunc (f *LogFile) DeleteMeasurement(name []byte) error {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\te := LogEntry{Flag: LogEntryMeasurementTombstoneFlag, Name: name}\n\tif err := f.appendEntry(&e); err != nil {\n\t\treturn err\n\t}\n\tf.execEntry(&e)\n\treturn nil\n}\n\n// TagKeySeriesIterator returns a series iterator for a tag key.\nfunc (f *LogFile) TagKeySeriesIterator(name, key []byte) SeriesIterator {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tmm, ok := f.mms[string(name)]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\ttk, ok := mm.tagSet[string(key)]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\t// Combine iterators across all tag keys.\n\titrs := make([]SeriesIterator, 0, len(tk.tagValues))\n\tfor _, tv := range tk.tagValues {\n\t\tif len(tv.series) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\titrs = append(itrs, newLogSeriesIterator(tv.series))\n\t}\n\n\treturn MergeSeriesIterators(itrs...)\n}\n\n// TagKeyIterator returns a value iterator for a measurement.\nfunc (f *LogFile) TagKeyIterator(name []byte) TagKeyIterator {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tmm, ok := f.mms[string(name)]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\ta := make([]logTagKey, 0, len(mm.tagSet))\n\tfor _, k := range mm.tagSet {\n\t\ta = append(a, k)\n\t}\n\treturn newLogTagKeyIterator(a)\n}\n\n// TagKey returns a tag key element.\nfunc (f *LogFile) TagKey(name, key []byte) TagKeyElem {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tmm, ok := f.mms[string(name)]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\ttk, ok := mm.tagSet[string(key)]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\treturn &tk\n}\n\n// TagValue returns a tag value element.\nfunc (f *LogFile) TagValue(name, key, value []byte) TagValueElem {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tmm, ok := f.mms[string(name)]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\ttk, ok := mm.tagSet[string(key)]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\ttv, ok := tk.tagValues[string(value)]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\treturn &tv\n}\n\n// TagValueIterator returns a value iterator for a tag key.\nfunc (f *LogFile) TagValueIterator(name, key []byte) TagValueIterator {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tmm, ok := f.mms[string(name)]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\ttk, ok := mm.tagSet[string(key)]\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn tk.TagValueIterator()\n}\n\n// DeleteTagKey adds a tombstone for a tag key to the log file.\nfunc (f *LogFile) DeleteTagKey(name, key []byte) error {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\te := LogEntry{Flag: LogEntryTagKeyTombstoneFlag, Name: name, Tags: models.Tags{{Key: key}}}\n\tif err := f.appendEntry(&e); err != nil {\n\t\treturn err\n\t}\n\tf.execEntry(&e)\n\treturn nil\n}\n\n// TagValueSeriesIterator returns a series iterator for a tag value.\nfunc (f *LogFile) TagValueSeriesIterator(name, key, value []byte) SeriesIterator {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tmm, ok := f.mms[string(name)]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\ttk, ok := mm.tagSet[string(key)]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\ttv, ok := tk.tagValues[string(value)]\n\tif !ok {\n\t\treturn nil\n\t} else if len(tv.series) == 0 {\n\t\treturn nil\n\t}\n\n\treturn newLogSeriesIterator(tv.series)\n}\n\n// MeasurementN returns the total number of measurements.\nfunc (f *LogFile) MeasurementN() (n uint64) {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\treturn uint64(len(f.mms))\n}\n\n// TagKeyN returns the total number of keys.\nfunc (f *LogFile) TagKeyN() (n uint64) {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\tfor _, mm := range f.mms {\n\t\tn += uint64(len(mm.tagSet))\n\t}\n\treturn n\n}\n\n// TagValueN returns the total number of values.\nfunc (f *LogFile) TagValueN() (n uint64) {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\tfor _, mm := range f.mms {\n\t\tfor _, k := range mm.tagSet {\n\t\t\tn += uint64(len(k.tagValues))\n\t\t}\n\t}\n\treturn n\n}\n\n// DeleteTagValue adds a tombstone for a tag value to the log file.\nfunc (f *LogFile) DeleteTagValue(name, key, value []byte) error {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\te := LogEntry{Flag: LogEntryTagValueTombstoneFlag, Name: name, Tags: models.Tags{{Key: key, Value: value}}}\n\tif err := f.appendEntry(&e); err != nil {\n\t\treturn err\n\t}\n\tf.execEntry(&e)\n\treturn nil\n}\n\n// AddSeriesList adds a list of series to the log file in bulk.\nfunc (f *LogFile) AddSeriesList(names [][]byte, tagsSlice []models.Tags) error {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\tfor i := range names {\n\t\t// The name and tags are clone to prevent a memory leak\n\t\te := LogEntry{Name: []byte(string(names[i])), Tags: tagsSlice[i].Clone()}\n\t\tif err := f.appendEntry(&e); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tf.execEntry(&e)\n\t}\n\treturn nil\n}\n\n// AddSeries adds a series to the log file.\nfunc (f *LogFile) AddSeries(name []byte, tags models.Tags) error {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\t// The name and tags are clone to prevent a memory leak\n\tnewName := make([]byte, len(name))\n\tcopy(newName, name)\n\n\te := LogEntry{Name: newName, Tags: tags.Clone()}\n\tif err := f.appendEntry(&e); err != nil {\n\t\treturn err\n\t}\n\tf.execEntry(&e)\n\treturn nil\n}\n\n// DeleteSeries adds a tombstone for a series to the log file.\nfunc (f *LogFile) DeleteSeries(name []byte, tags models.Tags) error {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\n\te := LogEntry{Flag: LogEntrySeriesTombstoneFlag, Name: name, Tags: tags}\n\tif err := f.appendEntry(&e); err != nil {\n\t\treturn err\n\t}\n\tf.execEntry(&e)\n\treturn nil\n}\n\n// SeriesN returns the total number of series in the file.\nfunc (f *LogFile) SeriesN() (n uint64) {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tfor _, mm := range f.mms {\n\t\tn += uint64(len(mm.series))\n\t}\n\treturn n\n}\n\n// HasSeries returns flags indicating if the series exists and if it is tombstoned.\nfunc (f *LogFile) HasSeries(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool) {\n\te := f.SeriesWithBuffer(name, tags, buf)\n\tif e == nil {\n\t\treturn false, false\n\t}\n\treturn true, e.Deleted()\n}\n\n// FilterNamesTags filters out any series which already exist. It modifies the\n// provided slices of names and tags.\nfunc (f *LogFile) FilterNamesTags(names [][]byte, tagsSlice []models.Tags) ([][]byte, []models.Tags) {\n\tbuf := make([]byte, 1024)\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tnewNames, newTagsSlice := names[:0], tagsSlice[:0]\n\tfor i := 0; i < len(names); i++ {\n\t\tname := names[i]\n\t\ttags := tagsSlice[i]\n\n\t\tmm := f.mms[string(name)]\n\t\tif mm == nil {\n\t\t\tnewNames = append(newNames, name)\n\t\t\tnewTagsSlice = append(newTagsSlice, tags)\n\t\t\tcontinue\n\t\t}\n\n\t\tkey := AppendSeriesKey(buf[:0], name, tags)\n\t\ts := mm.series[string(key)]\n\t\tif s == nil || s.Deleted() {\n\t\t\tnewNames = append(newNames, name)\n\t\t\tnewTagsSlice = append(newTagsSlice, tags)\n\t\t}\n\t}\n\treturn newNames, newTagsSlice\n}\n\n// Series returns a series by name/tags.\nfunc (f *LogFile) Series(name []byte, tags models.Tags) SeriesElem {\n\treturn f.SeriesWithBuffer(name, tags, nil)\n}\n\n// SeriesWithBuffer returns a series by name/tags.\nfunc (f *LogFile) SeriesWithBuffer(name []byte, tags models.Tags, buf []byte) SeriesElem {\n\tkey := AppendSeriesKey(buf[:0], name, tags)\n\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tmm, ok := f.mms[string(name)]\n\tif !ok {\n\t\treturn nil\n\t}\n\n\ts := mm.series[string(key)]\n\tif s == nil {\n\t\treturn nil\n\t}\n\treturn s\n}\n\n// appendEntry adds a log entry to the end of the file.\nfunc (f *LogFile) appendEntry(e *LogEntry) error {\n\t// Marshal entry to the local buffer.\n\tf.buf = appendLogEntry(f.buf[:0], e)\n\n\t// Save the size of the record.\n\te.Size = len(f.buf)\n\n\t// Write record to file.\n\tn, err := f.w.Write(f.buf)\n\tif err != nil {\n\t\t// Move position backwards over partial entry.\n\t\t// Log should be reopened if seeking cannot be completed.\n\t\tif n > 0 {\n\t\t\tf.w.Reset(f.file)\n\t\t\tif _, err := f.file.Seek(int64(-n), os.SEEK_CUR); err != nil {\n\t\t\t\tf.Close()\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n\n\t// Update in-memory file size & modification time.\n\tf.size += int64(n)\n\tf.modTime = time.Now()\n\n\treturn nil\n}\n\n// execEntry executes a log entry against the in-memory index.\n// This is done after appending and on replay of the log.\nfunc (f *LogFile) execEntry(e *LogEntry) {\n\tswitch e.Flag {\n\tcase LogEntryMeasurementTombstoneFlag:\n\t\tf.execDeleteMeasurementEntry(e)\n\tcase LogEntryTagKeyTombstoneFlag:\n\t\tf.execDeleteTagKeyEntry(e)\n\tcase LogEntryTagValueTombstoneFlag:\n\t\tf.execDeleteTagValueEntry(e)\n\tdefault:\n\t\tf.execSeriesEntry(e)\n\t}\n}\n\nfunc (f *LogFile) execDeleteMeasurementEntry(e *LogEntry) {\n\tmm := f.createMeasurementIfNotExists(e.Name)\n\tmm.deleted = true\n\tmm.tagSet = make(map[string]logTagKey)\n\tmm.series = make(map[string]*logSerie)\n\n\t// Update measurement tombstone sketch.\n\tf.mTSketch.Add(e.Name)\n}\n\nfunc (f *LogFile) execDeleteTagKeyEntry(e *LogEntry) {\n\tkey := e.Tags[0].Key\n\n\tmm := f.createMeasurementIfNotExists(e.Name)\n\tts := mm.createTagSetIfNotExists(key)\n\n\tts.deleted = true\n\n\tmm.tagSet[string(key)] = ts\n}\n\nfunc (f *LogFile) execDeleteTagValueEntry(e *LogEntry) {\n\tkey, value := e.Tags[0].Key, e.Tags[0].Value\n\n\tmm := f.createMeasurementIfNotExists(e.Name)\n\tts := mm.createTagSetIfNotExists(key)\n\ttv := ts.createTagValueIfNotExists(value)\n\n\ttv.deleted = true\n\n\tts.tagValues[string(value)] = tv\n\tmm.tagSet[string(key)] = ts\n}\n\nfunc (f *LogFile) execSeriesEntry(e *LogEntry) {\n\t// Check if series is deleted.\n\tdeleted := (e.Flag & LogEntrySeriesTombstoneFlag) != 0\n\n\t// Fetch measurement.\n\tmm := f.createMeasurementIfNotExists(e.Name)\n\n\t// Undelete measurement if it's been tombstoned previously.\n\tif !deleted && mm.deleted {\n\t\tmm.deleted = false\n\t}\n\n\t// Generate key & series, if not exists.\n\tkey := AppendSeriesKey(nil, e.Name, e.Tags)\n\tserie := mm.createSeriesIfNotExists(key, e.Name, e.Tags, deleted)\n\n\t// Save tags.\n\tfor _, t := range e.Tags {\n\t\tts := mm.createTagSetIfNotExists(t.Key)\n\t\ttv := ts.createTagValueIfNotExists(t.Value)\n\n\t\t// Add a reference to the series on the tag value.\n\t\ttv.series[string(key)] = serie\n\n\t\tts.tagValues[string(t.Value)] = tv\n\t\tmm.tagSet[string(t.Key)] = ts\n\t}\n\n\t// Update the sketches.\n\tif deleted {\n\t\t// TODO(edd) decrement series count...\n\t\tf.sTSketch.Add(key) // Deleting series so update tombstone sketch.\n\t\treturn\n\t}\n\n\t// TODO(edd) increment series count....\n\tf.sSketch.Add(key)    // Add series to sketch.\n\tf.mSketch.Add(e.Name) // Add measurement to sketch as this may be the fist series for the measurement.\n}\n\n// SeriesIterator returns an iterator over all series in the log file.\nfunc (f *LogFile) SeriesIterator() SeriesIterator {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\t// Determine total series count across all measurements.\n\tvar n int\n\tmSeriesIdx := make([]int, len(f.mms))\n\tmSeries := make([][]logSerie, 0, len(f.mms))\n\tfor _, mm := range f.mms {\n\t\tn += len(mm.series)\n\t\ta := make([]logSerie, 0, len(mm.series))\n\t\tfor _, s := range mm.series {\n\t\t\ta = append(a, *s)\n\t\t}\n\t\tsort.Sort(logSeries(a))\n\t\tmSeries = append(mSeries, a)\n\t}\n\n\t// Combine series across all measurements by merging the already sorted\n\t// series lists.\n\tsBuffer := make([]*logSerie, len(f.mms))\n\tseries := make(logSeries, 0, n)\n\tvar (\n\t\tminSerie    *logSerie\n\t\tminSerieIdx int\n\t)\n\n\tfor s := 0; s < cap(series); s++ {\n\t\tfor i := 0; i < len(sBuffer); i++ {\n\t\t\t// Are there still serie to pull from this measurement?\n\t\t\tif mSeriesIdx[i] < len(mSeries[i]) && sBuffer[i] == nil {\n\t\t\t\t// Fill the buffer slot for this measurement.\n\t\t\t\tsBuffer[i] = &mSeries[i][mSeriesIdx[i]]\n\t\t\t\tmSeriesIdx[i]++\n\t\t\t}\n\n\t\t\t// Does this measurement have the smallest current serie out of\n\t\t\t// all those in the buffer?\n\t\t\tif minSerie == nil || (sBuffer[i] != nil && sBuffer[i].Compare(minSerie.name, minSerie.tags) < 0) {\n\t\t\t\tminSerie, minSerieIdx = sBuffer[i], i\n\t\t\t}\n\t\t}\n\t\tseries, minSerie, sBuffer[minSerieIdx] = append(series, *minSerie), nil, nil\n\t}\n\n\tif len(series) == 0 {\n\t\treturn nil\n\t}\n\treturn &logSeriesIterator{series: series}\n}\n\n// createMeasurementIfNotExists returns a measurement by name.\nfunc (f *LogFile) createMeasurementIfNotExists(name []byte) *logMeasurement {\n\tmm := f.mms[string(name)]\n\tif mm == nil {\n\t\tmm = &logMeasurement{\n\t\t\tname:   name,\n\t\t\ttagSet: make(map[string]logTagKey),\n\t\t\tseries: make(map[string]*logSerie),\n\t\t}\n\t\tf.mms[string(name)] = mm\n\t}\n\treturn mm\n}\n\n// MeasurementIterator returns an iterator over all the measurements in the file.\nfunc (f *LogFile) MeasurementIterator() MeasurementIterator {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tvar itr logMeasurementIterator\n\tfor _, mm := range f.mms {\n\t\titr.mms = append(itr.mms, *mm)\n\t}\n\tsort.Sort(logMeasurementSlice(itr.mms))\n\treturn &itr\n}\n\n// MeasurementSeriesIterator returns an iterator over all series for a measurement.\nfunc (f *LogFile) MeasurementSeriesIterator(name []byte) SeriesIterator {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tmm := f.mms[string(name)]\n\tif mm == nil || len(mm.series) == 0 {\n\t\treturn nil\n\t}\n\treturn newLogSeriesIterator(mm.series)\n}\n\n// CompactTo compacts the log file and writes it to w.\nfunc (f *LogFile) CompactTo(w io.Writer, m, k uint64) (n int64, err error) {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\t// Wrap in bufferred writer.\n\tbw := bufio.NewWriter(w)\n\n\t// Setup compaction offset tracking data.\n\tvar t IndexFileTrailer\n\tinfo := newLogFileCompactInfo()\n\n\t// Write magic number.\n\tif err := writeTo(bw, []byte(FileSignature), &n); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Retreve measurement names in order.\n\tnames := f.measurementNames()\n\n\t// Write series list.\n\tt.SeriesBlock.Offset = n\n\tif err := f.writeSeriesBlockTo(bw, names, m, k, info, &n); err != nil {\n\t\treturn n, err\n\t}\n\tt.SeriesBlock.Size = n - t.SeriesBlock.Offset\n\n\t// Flush buffer & mmap series block.\n\tif err := bw.Flush(); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Update series offsets.\n\t// NOTE: Pass the raw writer so we can mmap.\n\tif err := f.updateSeriesOffsets(w, names, info); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Write tagset blocks in measurement order.\n\tif err := f.writeTagsetsTo(bw, names, info, &n); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Write measurement block.\n\tt.MeasurementBlock.Offset = n\n\tif err := f.writeMeasurementBlockTo(bw, names, info, &n); err != nil {\n\t\treturn n, err\n\t}\n\tt.MeasurementBlock.Size = n - t.MeasurementBlock.Offset\n\n\t// Write trailer.\n\tnn, err := t.WriteTo(bw)\n\tn += nn\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\t// Flush buffer.\n\tif err := bw.Flush(); err != nil {\n\t\treturn n, err\n\t}\n\n\treturn n, nil\n}\n\nfunc (f *LogFile) writeSeriesBlockTo(w io.Writer, names []string, m, k uint64, info *logFileCompactInfo, n *int64) error {\n\t// Determine series count.\n\tvar seriesN uint32\n\tfor _, mm := range f.mms {\n\t\tseriesN += uint32(len(mm.series))\n\t}\n\n\t// Write all series.\n\tenc := NewSeriesBlockEncoder(w, seriesN, m, k)\n\n\t// Add series from measurements.\n\tfor _, name := range names {\n\t\tmm := f.mms[name]\n\n\t\t// Sort series.\n\t\tkeys := make([][]byte, 0, len(mm.series))\n\t\tfor k := range mm.series {\n\t\t\tkeys = append(keys, []byte(k))\n\t\t}\n\t\tsort.Sort(seriesKeys(keys))\n\n\t\tfor _, key := range keys {\n\t\t\tserie := mm.series[string(key)]\n\t\t\tif err := enc.Encode(serie.name, serie.tags, serie.deleted); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Close and flush series block.\n\terr := enc.Close()\n\t*n += int64(enc.N())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (f *LogFile) updateSeriesOffsets(w io.Writer, names []string, info *logFileCompactInfo) error {\n\t// Open series block.\n\tsblk, data, err := mapIndexFileSeriesBlock(w)\n\tif data != nil {\n\t\tdefer mmap.Unmap(data)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Add series to each measurement and key/value.\n\tvar seriesKey []byte\n\tfor _, name := range names {\n\t\tmm := f.mms[name]\n\t\tmmInfo := info.createMeasurementInfoIfNotExists(name)\n\t\tmmInfo.seriesIDs = make([]uint32, 0, len(mm.series))\n\n\t\tfor _, serie := range mm.series {\n\t\t\t// Lookup series offset.\n\t\t\toffset, _ := sblk.Offset(serie.name, serie.tags, seriesKey[:0])\n\t\t\tif offset == 0 {\n\t\t\t\tpanic(\"series not found: \" + string(serie.name) + \" \" + serie.tags.String())\n\t\t\t}\n\n\t\t\t// Add series id to measurement, tag key, and tag value.\n\t\t\tmmInfo.seriesIDs = append(mmInfo.seriesIDs, offset)\n\n\t\t\t// Add series id to each tag value.\n\t\t\tfor _, tag := range serie.tags {\n\t\t\t\ttagSetInfo := mmInfo.createTagSetInfoIfNotExists(tag.Key)\n\t\t\t\ttagValueInfo := tagSetInfo.createTagValueInfoIfNotExists(tag.Value)\n\t\t\t\ttagValueInfo.seriesIDs = append(tagValueInfo.seriesIDs, offset)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (f *LogFile) writeTagsetsTo(w io.Writer, names []string, info *logFileCompactInfo, n *int64) error {\n\tfor _, name := range names {\n\t\tif err := f.writeTagsetTo(w, name, info, n); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// writeTagsetTo writes a single tagset to w and saves the tagset offset.\nfunc (f *LogFile) writeTagsetTo(w io.Writer, name string, info *logFileCompactInfo, n *int64) error {\n\tmm := f.mms[name]\n\tmmInfo := info.mms[name]\n\n\tenc := NewTagBlockEncoder(w)\n\tfor _, k := range mm.keys() {\n\t\ttag := mm.tagSet[k]\n\n\t\t// Encode tag. Skip values if tag is deleted.\n\t\tif err := enc.EncodeKey(tag.name, tag.deleted); err != nil {\n\t\t\treturn err\n\t\t} else if tag.deleted {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Lookup compaction info.\n\t\ttagSetInfo := mmInfo.tagSet[k]\n\t\tassert(tagSetInfo != nil, \"tag set info not found\")\n\n\t\t// Add each value.\n\t\tfor v, value := range tag.tagValues {\n\t\t\ttagValueInfo := tagSetInfo.tagValues[v]\n\t\t\tsort.Sort(uint32Slice(tagValueInfo.seriesIDs))\n\n\t\t\tif err := enc.EncodeValue(value.name, value.deleted, tagValueInfo.seriesIDs); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Save tagset offset to measurement.\n\tmmInfo.offset = *n\n\n\t// Flush tag block.\n\terr := enc.Close()\n\t*n += enc.N()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Save tagset offset to measurement.\n\tmmInfo.size = *n - mmInfo.offset\n\n\treturn nil\n}\n\nfunc (f *LogFile) writeMeasurementBlockTo(w io.Writer, names []string, info *logFileCompactInfo, n *int64) error {\n\tmw := NewMeasurementBlockWriter()\n\n\t// Add measurement data.\n\tfor _, name := range names {\n\t\tmm := f.mms[name]\n\t\tmmInfo := info.mms[name]\n\t\tassert(mmInfo != nil, \"measurement info not found\")\n\n\t\tsort.Sort(uint32Slice(mmInfo.seriesIDs))\n\t\tmw.Add(mm.name, mm.deleted, mmInfo.offset, mmInfo.size, mmInfo.seriesIDs)\n\t}\n\n\t// Flush data to writer.\n\tnn, err := mw.WriteTo(w)\n\t*n += nn\n\treturn err\n}\n\n// logFileCompactInfo is a context object to track compaction position info.\ntype logFileCompactInfo struct {\n\tmms map[string]*logFileMeasurementCompactInfo\n}\n\n// newLogFileCompactInfo returns a new instance of logFileCompactInfo.\nfunc newLogFileCompactInfo() *logFileCompactInfo {\n\treturn &logFileCompactInfo{\n\t\tmms: make(map[string]*logFileMeasurementCompactInfo),\n\t}\n}\n\nfunc (info *logFileCompactInfo) createMeasurementInfoIfNotExists(name string) *logFileMeasurementCompactInfo {\n\tmmInfo := info.mms[name]\n\tif mmInfo == nil {\n\t\tmmInfo = &logFileMeasurementCompactInfo{\n\t\t\ttagSet: make(map[string]*logFileTagSetCompactInfo),\n\t\t}\n\t\tinfo.mms[name] = mmInfo\n\t}\n\treturn mmInfo\n}\n\ntype logFileMeasurementCompactInfo struct {\n\toffset    int64\n\tsize      int64\n\tseriesIDs []uint32\n\n\ttagSet map[string]*logFileTagSetCompactInfo\n}\n\nfunc (info *logFileMeasurementCompactInfo) createTagSetInfoIfNotExists(key []byte) *logFileTagSetCompactInfo {\n\ttagSetInfo := info.tagSet[string(key)]\n\tif tagSetInfo == nil {\n\t\ttagSetInfo = &logFileTagSetCompactInfo{tagValues: make(map[string]*logFileTagValueCompactInfo)}\n\t\tinfo.tagSet[string(key)] = tagSetInfo\n\t}\n\treturn tagSetInfo\n}\n\ntype logFileTagSetCompactInfo struct {\n\ttagValues map[string]*logFileTagValueCompactInfo\n}\n\nfunc (info *logFileTagSetCompactInfo) createTagValueInfoIfNotExists(value []byte) *logFileTagValueCompactInfo {\n\ttagValueInfo := info.tagValues[string(value)]\n\tif tagValueInfo == nil {\n\t\ttagValueInfo = &logFileTagValueCompactInfo{}\n\t\tinfo.tagValues[string(value)] = tagValueInfo\n\t}\n\treturn tagValueInfo\n}\n\ntype logFileTagValueCompactInfo struct {\n\tseriesIDs []uint32\n}\n\n// MergeSeriesSketches merges the series sketches belonging to this LogFile\n// into the provided sketches.\n//\n// MergeSeriesSketches is safe for concurrent use by multiple goroutines.\nfunc (f *LogFile) MergeSeriesSketches(sketch, tsketch estimator.Sketch) error {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tif err := sketch.Merge(f.sSketch); err != nil {\n\t\treturn err\n\t}\n\treturn tsketch.Merge(f.sTSketch)\n}\n\n// MergeMeasurementsSketches merges the measurement sketches belonging to this\n// LogFile into the provided sketches.\n//\n// MergeMeasurementsSketches is safe for concurrent use by multiple goroutines.\nfunc (f *LogFile) MergeMeasurementsSketches(sketch, tsketch estimator.Sketch) error {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\n\tif err := sketch.Merge(f.mSketch); err != nil {\n\t\treturn err\n\t}\n\treturn tsketch.Merge(f.mTSketch)\n}\n\n// LogEntry represents a single log entry in the write-ahead log.\ntype LogEntry struct {\n\tFlag     byte        // flag\n\tName     []byte      // measurement name\n\tTags     models.Tags // tagset\n\tChecksum uint32      // checksum of flag/name/tags.\n\tSize     int         // total size of record, in bytes.\n}\n\n// UnmarshalBinary unmarshals data into e.\nfunc (e *LogEntry) UnmarshalBinary(data []byte) error {\n\torig := data\n\tstart := len(data)\n\n\t// Parse flag data.\n\tif len(data) < 1 {\n\t\treturn io.ErrShortBuffer\n\t}\n\te.Flag, data = data[0], data[1:]\n\n\t// Parse name length.\n\tif len(data) < 1 {\n\t\treturn io.ErrShortBuffer\n\t}\n\tsz, n := binary.Uvarint(data)\n\n\t// Read name data.\n\tif len(data) < n+int(sz) {\n\t\treturn io.ErrShortBuffer\n\t}\n\te.Name, data = data[n:n+int(sz)], data[n+int(sz):]\n\n\t// Parse tag count.\n\tif len(data) < 1 {\n\t\treturn io.ErrShortBuffer\n\t}\n\ttagN, n := binary.Uvarint(data)\n\tdata = data[n:]\n\n\t// Parse tags.\n\ttags := make(models.Tags, tagN)\n\tfor i := range tags {\n\t\ttag := &tags[i]\n\n\t\t// Parse key length.\n\t\tif len(data) < 1 {\n\t\t\treturn io.ErrShortBuffer\n\t\t}\n\t\tsz, n := binary.Uvarint(data)\n\n\t\t// Read key data.\n\t\tif len(data) < n+int(sz) {\n\t\t\treturn io.ErrShortBuffer\n\t\t}\n\t\ttag.Key, data = data[n:n+int(sz)], data[n+int(sz):]\n\n\t\t// Parse value.\n\t\tif len(data) < 1 {\n\t\t\treturn io.ErrShortBuffer\n\t\t}\n\t\tsz, n = binary.Uvarint(data)\n\n\t\t// Read value data.\n\t\tif len(data) < n+int(sz) {\n\t\t\treturn io.ErrShortBuffer\n\t\t}\n\t\ttag.Value, data = data[n:n+int(sz)], data[n+int(sz):]\n\t}\n\te.Tags = tags\n\n\t// Compute checksum.\n\tchk := crc32.ChecksumIEEE(orig[:start-len(data)])\n\n\t// Parse checksum.\n\tif len(data) < 4 {\n\t\treturn io.ErrShortBuffer\n\t}\n\te.Checksum, data = binary.BigEndian.Uint32(data[:4]), data[4:]\n\n\t// Verify checksum.\n\tif chk != e.Checksum {\n\t\treturn ErrLogEntryChecksumMismatch\n\t}\n\n\t// Save length of elem.\n\te.Size = start - len(data)\n\n\treturn nil\n}\n\n// appendLogEntry appends to dst and returns the new buffer.\n// This updates the checksum on the entry.\nfunc appendLogEntry(dst []byte, e *LogEntry) []byte {\n\tvar buf [binary.MaxVarintLen64]byte\n\tstart := len(dst)\n\n\t// Append flag.\n\tdst = append(dst, e.Flag)\n\n\t// Append name.\n\tn := binary.PutUvarint(buf[:], uint64(len(e.Name)))\n\tdst = append(dst, buf[:n]...)\n\tdst = append(dst, e.Name...)\n\n\t// Append tag count.\n\tn = binary.PutUvarint(buf[:], uint64(len(e.Tags)))\n\tdst = append(dst, buf[:n]...)\n\n\t// Append key/value pairs.\n\tfor i := range e.Tags {\n\t\tt := &e.Tags[i]\n\n\t\t// Append key.\n\t\tn := binary.PutUvarint(buf[:], uint64(len(t.Key)))\n\t\tdst = append(dst, buf[:n]...)\n\t\tdst = append(dst, t.Key...)\n\n\t\t// Append value.\n\t\tn = binary.PutUvarint(buf[:], uint64(len(t.Value)))\n\t\tdst = append(dst, buf[:n]...)\n\t\tdst = append(dst, t.Value...)\n\t}\n\n\t// Calculate checksum.\n\te.Checksum = crc32.ChecksumIEEE(dst[start:])\n\n\t// Append checksum.\n\tbinary.BigEndian.PutUint32(buf[:4], e.Checksum)\n\tdst = append(dst, buf[:4]...)\n\n\treturn dst\n}\n\ntype logSerie struct {\n\tname    []byte\n\ttags    models.Tags\n\tdeleted bool\n}\n\nfunc (s *logSerie) String() string {\n\treturn fmt.Sprintf(\"key: %s tags: %v\", s.name, s.tags)\n}\n\nfunc (s *logSerie) Name() []byte        { return s.name }\nfunc (s *logSerie) Tags() models.Tags   { return s.tags }\nfunc (s *logSerie) Deleted() bool       { return s.deleted }\nfunc (s *logSerie) Expr() influxql.Expr { return nil }\nfunc (s *logSerie) Compare(name []byte, tags models.Tags) int {\n\tif cmp := bytes.Compare(s.name, name); cmp != 0 {\n\t\treturn cmp\n\t}\n\treturn models.CompareTags(s.tags, tags)\n}\n\ntype logSeries []logSerie\n\nfunc (a logSeries) Len() int      { return len(a) }\nfunc (a logSeries) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a logSeries) Less(i, j int) bool {\n\treturn a[i].Compare(a[j].name, a[j].tags) == -1\n}\n\n// logMeasurements represents a map of measurement names to measurements.\ntype logMeasurements map[string]*logMeasurement\n\n// names returns a sorted list of measurement names.\nfunc (m logMeasurements) names() []string {\n\ta := make([]string, 0, len(m))\n\tfor name := range m {\n\t\ta = append(a, name)\n\t}\n\tsort.Strings(a)\n\treturn a\n}\n\ntype logMeasurement struct {\n\tname    []byte\n\ttagSet  map[string]logTagKey\n\tdeleted bool\n\tseries  map[string]*logSerie\n}\n\nfunc (m *logMeasurement) Name() []byte  { return m.name }\nfunc (m *logMeasurement) Deleted() bool { return m.deleted }\n\nfunc (m *logMeasurement) createTagSetIfNotExists(key []byte) logTagKey {\n\tts, ok := m.tagSet[string(key)]\n\tif !ok {\n\t\tts = logTagKey{name: key, tagValues: make(map[string]logTagValue)}\n\t}\n\treturn ts\n}\n\n// createSeriesIfNotExists creates or returns an existing series on the measurement.\nfunc (m *logMeasurement) createSeriesIfNotExists(key []byte, name []byte, tags models.Tags, deleted bool) *logSerie {\n\ts := m.series[string(key)]\n\tif s == nil {\n\t\ts = &logSerie{name: name, tags: tags, deleted: deleted}\n\t\tm.series[string(key)] = s\n\t} else {\n\t\ts.deleted = deleted\n\t}\n\treturn s\n}\n\n// keys returns a sorted list of tag keys.\nfunc (m *logMeasurement) keys() []string {\n\ta := make([]string, 0, len(m.tagSet))\n\tfor k := range m.tagSet {\n\t\ta = append(a, k)\n\t}\n\tsort.Strings(a)\n\treturn a\n}\n\n// logMeasurementSlice is a sortable list of log measurements.\ntype logMeasurementSlice []logMeasurement\n\nfunc (a logMeasurementSlice) Len() int           { return len(a) }\nfunc (a logMeasurementSlice) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a logMeasurementSlice) Less(i, j int) bool { return bytes.Compare(a[i].name, a[j].name) == -1 }\n\n// logMeasurementIterator represents an iterator over a slice of measurements.\ntype logMeasurementIterator struct {\n\tmms []logMeasurement\n}\n\n// Next returns the next element in the iterator.\nfunc (itr *logMeasurementIterator) Next() (e MeasurementElem) {\n\tif len(itr.mms) == 0 {\n\t\treturn nil\n\t}\n\te, itr.mms = &itr.mms[0], itr.mms[1:]\n\treturn e\n}\n\ntype logTagKey struct {\n\tname      []byte\n\tdeleted   bool\n\ttagValues map[string]logTagValue\n}\n\nfunc (tk *logTagKey) Key() []byte   { return tk.name }\nfunc (tk *logTagKey) Deleted() bool { return tk.deleted }\n\nfunc (tk *logTagKey) TagValueIterator() TagValueIterator {\n\ta := make([]logTagValue, 0, len(tk.tagValues))\n\tfor _, v := range tk.tagValues {\n\t\ta = append(a, v)\n\t}\n\treturn newLogTagValueIterator(a)\n}\n\nfunc (tk *logTagKey) createTagValueIfNotExists(value []byte) logTagValue {\n\ttv, ok := tk.tagValues[string(value)]\n\tif !ok {\n\t\ttv = logTagValue{name: value, series: make(map[string]*logSerie)}\n\t}\n\treturn tv\n}\n\n// logTagKey is a sortable list of log tag keys.\ntype logTagKeySlice []logTagKey\n\nfunc (a logTagKeySlice) Len() int           { return len(a) }\nfunc (a logTagKeySlice) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a logTagKeySlice) Less(i, j int) bool { return bytes.Compare(a[i].name, a[j].name) == -1 }\n\ntype logTagValue struct {\n\tname    []byte\n\tdeleted bool\n\tseries  map[string]*logSerie\n}\n\nfunc (tv *logTagValue) Value() []byte { return tv.name }\nfunc (tv *logTagValue) Deleted() bool { return tv.deleted }\n\n// logTagValue is a sortable list of log tag values.\ntype logTagValueSlice []logTagValue\n\nfunc (a logTagValueSlice) Len() int           { return len(a) }\nfunc (a logTagValueSlice) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a logTagValueSlice) Less(i, j int) bool { return bytes.Compare(a[i].name, a[j].name) == -1 }\n\n// logTagKeyIterator represents an iterator over a slice of tag keys.\ntype logTagKeyIterator struct {\n\ta []logTagKey\n}\n\n// newLogTagKeyIterator returns a new instance of logTagKeyIterator.\nfunc newLogTagKeyIterator(a []logTagKey) *logTagKeyIterator {\n\tsort.Sort(logTagKeySlice(a))\n\treturn &logTagKeyIterator{a: a}\n}\n\n// Next returns the next element in the iterator.\nfunc (itr *logTagKeyIterator) Next() (e TagKeyElem) {\n\tif len(itr.a) == 0 {\n\t\treturn nil\n\t}\n\te, itr.a = &itr.a[0], itr.a[1:]\n\treturn e\n}\n\n// logTagValueIterator represents an iterator over a slice of tag values.\ntype logTagValueIterator struct {\n\ta []logTagValue\n}\n\n// newLogTagValueIterator returns a new instance of logTagValueIterator.\nfunc newLogTagValueIterator(a []logTagValue) *logTagValueIterator {\n\tsort.Sort(logTagValueSlice(a))\n\treturn &logTagValueIterator{a: a}\n}\n\n// Next returns the next element in the iterator.\nfunc (itr *logTagValueIterator) Next() (e TagValueElem) {\n\tif len(itr.a) == 0 {\n\t\treturn nil\n\t}\n\te, itr.a = &itr.a[0], itr.a[1:]\n\treturn e\n}\n\n// logSeriesIterator represents an iterator over a slice of series.\ntype logSeriesIterator struct {\n\tseries logSeries\n}\n\n// newLogSeriesIterator returns a new instance of logSeriesIterator.\n// All series are copied to the iterator.\nfunc newLogSeriesIterator(m map[string]*logSerie) *logSeriesIterator {\n\tif len(m) == 0 {\n\t\treturn nil\n\t}\n\n\titr := logSeriesIterator{series: make(logSeries, 0, len(m))}\n\tfor _, s := range m {\n\t\titr.series = append(itr.series, *s)\n\t}\n\tsort.Sort(itr.series)\n\n\treturn &itr\n}\n\n// Next returns the next element in the iterator.\nfunc (itr *logSeriesIterator) Next() (e SeriesElem) {\n\tif len(itr.series) == 0 {\n\t\treturn nil\n\t}\n\te, itr.series = &itr.series[0], itr.series[1:]\n\treturn e\n}\n\n// FormatLogFileName generates a log filename for the given index.\nfunc FormatLogFileName(id int) string {\n\treturn fmt.Sprintf(\"L0-%08d%s\", id, LogFileExt)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file_test.go",
    "content": "package tsi1_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"math/rand\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"runtime/pprof\"\n\t\"sort\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/pkg/bloom\"\n\t\"github.com/influxdata/influxdb/tsdb/index/tsi1\"\n)\n\n// Ensure log file can append series.\nfunc TestLogFile_AddSeries(t *testing.T) {\n\tf := MustOpenLogFile()\n\tdefer f.Close()\n\n\t// Add test data.\n\tif err := f.AddSeries([]byte(\"mem\"), models.Tags{{Key: []byte(\"host\"), Value: []byte(\"serverA\")}}); err != nil {\n\t\tt.Fatal(err)\n\t} else if err := f.AddSeries([]byte(\"cpu\"), models.Tags{{Key: []byte(\"region\"), Value: []byte(\"us-east\")}}); err != nil {\n\t\tt.Fatal(err)\n\t} else if err := f.AddSeries([]byte(\"cpu\"), models.Tags{{Key: []byte(\"region\"), Value: []byte(\"us-west\")}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify data.\n\titr := f.MeasurementIterator()\n\tif e := itr.Next(); e == nil || string(e.Name()) != \"cpu\" {\n\t\tt.Fatalf(\"unexpected measurement: %#v\", e)\n\t} else if e := itr.Next(); e == nil || string(e.Name()) != \"mem\" {\n\t\tt.Fatalf(\"unexpected measurement: %#v\", e)\n\t} else if e := itr.Next(); e != nil {\n\t\tt.Fatalf(\"expected eof, got: %#v\", e)\n\t}\n\n\t// Reopen file and re-verify.\n\tif err := f.Reopen(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify data.\n\titr = f.MeasurementIterator()\n\tif e := itr.Next(); e == nil || string(e.Name()) != \"cpu\" {\n\t\tt.Fatalf(\"unexpected measurement: %#v\", e)\n\t} else if e := itr.Next(); e == nil || string(e.Name()) != \"mem\" {\n\t\tt.Fatalf(\"unexpected measurement: %#v\", e)\n\t} else if e := itr.Next(); e != nil {\n\t\tt.Fatalf(\"expected eof, got: %#v\", e)\n\t}\n}\n\nfunc TestLogFile_SeriesStoredInOrder(t *testing.T) {\n\tf := MustOpenLogFile()\n\tdefer f.Close()\n\n\t// Generate and add test data\n\ttvm := make(map[string]struct{})\n\trand.Seed(time.Now().Unix())\n\tfor i := 0; i < 100; i++ {\n\t\ttv := fmt.Sprintf(\"server-%d\", rand.Intn(50)) // Encourage adding duplicate series.\n\t\ttvm[tv] = struct{}{}\n\n\t\tif err := f.AddSeries([]byte(\"mem\"), models.Tags{models.NewTag([]byte(\"host\"), []byte(tv))}); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif err := f.AddSeries([]byte(\"cpu\"), models.Tags{models.NewTag([]byte(\"host\"), []byte(tv))}); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\t// Sort the tag values so we know what order to expect.\n\ttvs := make([]string, 0, len(tvm))\n\tfor tv := range tvm {\n\t\ttvs = append(tvs, tv)\n\t}\n\tsort.Strings(tvs)\n\n\t// Double the series values since we're adding them twice (two measurements)\n\ttvs = append(tvs, tvs...)\n\n\t// When we pull the series out via an iterator they should be in order.\n\titr := f.SeriesIterator()\n\tif itr == nil {\n\t\tt.Fatal(\"nil iterator\")\n\t}\n\n\tmname := []string{\"cpu\", \"mem\"}\n\tvar j int\n\tfor i := 0; i < len(tvs); i++ {\n\t\tserie := itr.Next()\n\t\tif serie == nil {\n\t\t\tt.Fatal(\"got nil series\")\n\t\t}\n\n\t\tif got, exp := string(serie.Name()), mname[j]; got != exp {\n\t\t\tt.Fatalf(\"[series %d] got %s, expected %s\", i, got, exp)\n\t\t}\n\n\t\tif got, exp := string(serie.Tags()[0].Value), tvs[i]; got != exp {\n\t\t\tt.Fatalf(\"[series %d] got %s, expected %s\", i, got, exp)\n\t\t}\n\n\t\tif i == (len(tvs)/2)-1 {\n\t\t\t// Next measurement\n\t\t\tj++\n\t\t}\n\t}\n}\n\n// Ensure log file can delete an existing measurement.\nfunc TestLogFile_DeleteMeasurement(t *testing.T) {\n\tf := MustOpenLogFile()\n\tdefer f.Close()\n\n\t// Add test data.\n\tif err := f.AddSeries([]byte(\"mem\"), models.Tags{{Key: []byte(\"host\"), Value: []byte(\"serverA\")}}); err != nil {\n\t\tt.Fatal(err)\n\t} else if err := f.AddSeries([]byte(\"cpu\"), models.Tags{{Key: []byte(\"region\"), Value: []byte(\"us-east\")}}); err != nil {\n\t\tt.Fatal(err)\n\t} else if err := f.AddSeries([]byte(\"cpu\"), models.Tags{{Key: []byte(\"region\"), Value: []byte(\"us-west\")}}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Remove measurement.\n\tif err := f.DeleteMeasurement([]byte(\"cpu\")); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify data.\n\titr := f.MeasurementIterator()\n\tif e := itr.Next(); string(e.Name()) != \"cpu\" || !e.Deleted() {\n\t\tt.Fatalf(\"unexpected measurement: %s/%v\", e.Name(), e.Deleted())\n\t} else if e := itr.Next(); string(e.Name()) != \"mem\" || e.Deleted() {\n\t\tt.Fatalf(\"unexpected measurement: %s/%v\", e.Name(), e.Deleted())\n\t} else if e := itr.Next(); e != nil {\n\t\tt.Fatalf(\"expected eof, got: %#v\", e)\n\t}\n}\n\n// LogFile is a test wrapper for tsi1.LogFile.\ntype LogFile struct {\n\t*tsi1.LogFile\n}\n\n// NewLogFile returns a new instance of LogFile with a temporary file path.\nfunc NewLogFile() *LogFile {\n\tfile, err := ioutil.TempFile(\"\", \"tsi1-log-file-\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfile.Close()\n\n\treturn &LogFile{LogFile: tsi1.NewLogFile(file.Name())}\n}\n\n// MustOpenLogFile returns a new, open instance of LogFile. Panic on error.\nfunc MustOpenLogFile() *LogFile {\n\tf := NewLogFile()\n\tif err := f.Open(); err != nil {\n\t\tpanic(err)\n\t}\n\treturn f\n}\n\n// Close closes the log file and removes it from disk.\nfunc (f *LogFile) Close() error {\n\tdefer os.Remove(f.Path())\n\treturn f.LogFile.Close()\n}\n\n// Reopen closes and reopens the file.\nfunc (f *LogFile) Reopen() error {\n\tif err := f.LogFile.Close(); err != nil {\n\t\treturn err\n\t}\n\tif err := f.LogFile.Open(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// CreateLogFile creates a new temporary log file and adds a list of series.\nfunc CreateLogFile(series []Series) (*LogFile, error) {\n\tf := MustOpenLogFile()\n\tfor _, serie := range series {\n\t\tif err := f.AddSeries(serie.Name, serie.Tags); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn f, nil\n}\n\n// GenerateLogFile generates a log file from a set of series based on the count arguments.\n// Total series returned will equal measurementN * tagN * valueN.\nfunc GenerateLogFile(measurementN, tagN, valueN int) (*LogFile, error) {\n\ttagValueN := pow(valueN, tagN)\n\n\tf := MustOpenLogFile()\n\tfor i := 0; i < measurementN; i++ {\n\t\tname := []byte(fmt.Sprintf(\"measurement%d\", i))\n\n\t\t// Generate tag sets.\n\t\tfor j := 0; j < tagValueN; j++ {\n\t\t\tvar tags models.Tags\n\t\t\tfor k := 0; k < tagN; k++ {\n\t\t\t\tkey := []byte(fmt.Sprintf(\"key%d\", k))\n\t\t\t\tvalue := []byte(fmt.Sprintf(\"value%d\", (j / pow(valueN, k) % valueN)))\n\t\t\t\ttags = append(tags, models.NewTag(key, value))\n\t\t\t}\n\t\t\tif err := f.AddSeries(name, tags); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\treturn f, nil\n}\n\nfunc MustGenerateLogFile(measurementN, tagN, valueN int) *LogFile {\n\tf, err := GenerateLogFile(measurementN, tagN, valueN)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn f\n}\n\nfunc benchmarkLogFile_AddSeries(b *testing.B, measurementN, seriesKeyN, seriesValueN int) {\n\tb.StopTimer()\n\tf := MustOpenLogFile()\n\n\ttype Datum struct {\n\t\tName []byte\n\t\tTags models.Tags\n\t}\n\n\t// Pre-generate everything.\n\tvar (\n\t\tdata   []Datum\n\t\tseries int\n\t)\n\n\ttagValueN := pow(seriesValueN, seriesKeyN)\n\n\tfor i := 0; i < measurementN; i++ {\n\t\tname := []byte(fmt.Sprintf(\"measurement%d\", i))\n\t\tfor j := 0; j < tagValueN; j++ {\n\t\t\tvar tags models.Tags\n\t\t\tfor k := 0; k < seriesKeyN; k++ {\n\t\t\t\tkey := []byte(fmt.Sprintf(\"key%d\", k))\n\t\t\t\tvalue := []byte(fmt.Sprintf(\"value%d\", (j / pow(seriesValueN, k) % seriesValueN)))\n\t\t\t\ttags = append(tags, models.NewTag(key, value))\n\t\t\t}\n\t\t\tdata = append(data, Datum{Name: name, Tags: tags})\n\t\t\tseries += len(tags)\n\t\t}\n\t}\n\tb.StartTimer()\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tfor _, d := range data {\n\t\t\tif err := f.AddSeries(d.Name, d.Tags); err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc BenchmarkLogFile_AddSeries_100_1_1(b *testing.B)    { benchmarkLogFile_AddSeries(b, 100, 1, 1) }    // 100 series\nfunc BenchmarkLogFile_AddSeries_1000_1_1(b *testing.B)   { benchmarkLogFile_AddSeries(b, 1000, 1, 1) }   // 1000 series\nfunc BenchmarkLogFile_AddSeries_10000_1_1(b *testing.B)  { benchmarkLogFile_AddSeries(b, 10000, 1, 1) }  // 10000 series\nfunc BenchmarkLogFile_AddSeries_100_2_10(b *testing.B)   { benchmarkLogFile_AddSeries(b, 100, 2, 10) }   // ~20K series\nfunc BenchmarkLogFile_AddSeries_100000_1_1(b *testing.B) { benchmarkLogFile_AddSeries(b, 100000, 1, 1) } // ~100K series\nfunc BenchmarkLogFile_AddSeries_100_3_7(b *testing.B)    { benchmarkLogFile_AddSeries(b, 100, 3, 7) }    // ~100K series\nfunc BenchmarkLogFile_AddSeries_200_3_7(b *testing.B)    { benchmarkLogFile_AddSeries(b, 200, 3, 7) }    // ~200K series\nfunc BenchmarkLogFile_AddSeries_200_4_7(b *testing.B)    { benchmarkLogFile_AddSeries(b, 200, 4, 7) }    // ~1.9M series\n\nfunc BenchmarkLogFile_WriteTo(b *testing.B) {\n\tfor _, seriesN := range []int{1000, 10000, 100000, 1000000} {\n\t\tname := fmt.Sprintf(\"series=%d\", seriesN)\n\t\tb.Run(name, func(b *testing.B) {\n\t\t\tf := MustOpenLogFile()\n\t\t\tdefer f.Close()\n\n\t\t\t// Estimate bloom filter size.\n\t\t\tm, k := bloom.Estimate(uint64(seriesN), 0.02)\n\n\t\t\t// Initialize log file with series data.\n\t\t\tfor i := 0; i < seriesN; i++ {\n\t\t\t\tif err := f.AddSeries(\n\t\t\t\t\t[]byte(\"cpu\"),\n\t\t\t\t\tmodels.Tags{\n\t\t\t\t\t\t{Key: []byte(\"host\"), Value: []byte(fmt.Sprintf(\"server-%d\", i))},\n\t\t\t\t\t\t{Key: []byte(\"location\"), Value: []byte(\"us-west\")},\n\t\t\t\t\t},\n\t\t\t\t); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tb.ResetTimer()\n\n\t\t\t// Create cpu profile for each subtest.\n\t\t\tMustStartCPUProfile(name)\n\t\t\tdefer pprof.StopCPUProfile()\n\n\t\t\t// Compact log file.\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tbuf := bytes.NewBuffer(make([]byte, 0, 150*seriesN))\n\t\t\t\tif _, err := f.CompactTo(buf, m, k); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tb.Logf(\"sz=%db\", buf.Len())\n\t\t\t}\n\t\t})\n\t}\n}\n\n// MustStartCPUProfile starts a cpu profile in a temporary path based on name.\nfunc MustStartCPUProfile(name string) {\n\tname = regexp.MustCompile(`\\W+`).ReplaceAllString(name, \"-\")\n\n\t// Open file and start pprof.\n\tf, err := os.Create(filepath.Join(\"/tmp\", fmt.Sprintf(\"cpu-%s.pprof\", name)))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif err := pprof.StartCPUProfile(f); err != nil {\n\t\tpanic(err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block.go",
    "content": "package tsi1\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"io\"\n\t\"sort\"\n\n\t\"github.com/influxdata/influxdb/pkg/estimator\"\n\t\"github.com/influxdata/influxdb/pkg/estimator/hll\"\n\t\"github.com/influxdata/influxdb/pkg/rhh\"\n)\n\n// MeasurementBlockVersion is the version of the measurement block.\nconst MeasurementBlockVersion = 1\n\n// Measurement flag constants.\nconst (\n\tMeasurementTombstoneFlag = 0x01\n)\n\n// Measurement field size constants.\nconst (\n\t// 1 byte offset for the block to ensure non-zero offsets.\n\tMeasurementFillSize = 1\n\n\t// Measurement trailer fields\n\tMeasurementTrailerSize = 0 +\n\t\t2 + // version\n\t\t8 + 8 + // data offset/size\n\t\t8 + 8 + // hash index offset/size\n\t\t8 + 8 + // measurement sketch offset/size\n\t\t8 + 8 // tombstone measurement sketch offset/size\n\n\t// Measurement key block fields.\n\tMeasurementNSize      = 8\n\tMeasurementOffsetSize = 8\n)\n\n// Measurement errors.\nvar (\n\tErrUnsupportedMeasurementBlockVersion = errors.New(\"unsupported measurement block version\")\n\tErrMeasurementBlockSizeMismatch       = errors.New(\"measurement block size mismatch\")\n)\n\n// MeasurementBlock represents a collection of all measurements in an index.\ntype MeasurementBlock struct {\n\tdata     []byte\n\thashData []byte\n\n\t// Series block sketch and tombstone sketch for cardinality estimation.\n\t// While we have exact counts for the block, these sketches allow us to\n\t// estimate cardinality across multiple blocks (which might contain\n\t// duplicate series).\n\tsketch, tSketch estimator.Sketch\n\n\tversion int // block version\n}\n\n// Version returns the encoding version parsed from the data.\n// Only valid after UnmarshalBinary() has been successfully invoked.\nfunc (blk *MeasurementBlock) Version() int { return blk.version }\n\n// Elem returns an element for a measurement.\nfunc (blk *MeasurementBlock) Elem(name []byte) (e MeasurementBlockElem, ok bool) {\n\tn := int64(binary.BigEndian.Uint64(blk.hashData[:MeasurementNSize]))\n\thash := rhh.HashKey(name)\n\tpos := hash % n\n\n\t// Track current distance\n\tvar d int64\n\tfor {\n\t\t// Find offset of measurement.\n\t\toffset := binary.BigEndian.Uint64(blk.hashData[MeasurementNSize+(pos*MeasurementOffsetSize):])\n\t\tif offset == 0 {\n\t\t\treturn MeasurementBlockElem{}, false\n\t\t}\n\n\t\t// Evaluate name if offset is not empty.\n\t\tif offset > 0 {\n\t\t\t// Parse into element.\n\t\t\tvar e MeasurementBlockElem\n\t\t\te.UnmarshalBinary(blk.data[offset:])\n\n\t\t\t// Return if name match.\n\t\t\tif bytes.Equal(e.name, name) {\n\t\t\t\treturn e, true\n\t\t\t}\n\n\t\t\t// Check if we've exceeded the probe distance.\n\t\t\tif d > rhh.Dist(rhh.HashKey(e.name), pos, n) {\n\t\t\t\treturn MeasurementBlockElem{}, false\n\t\t\t}\n\t\t}\n\n\t\t// Move position forward.\n\t\tpos = (pos + 1) % n\n\t\td++\n\n\t\tif d > n {\n\t\t\treturn MeasurementBlockElem{}, false\n\t\t}\n\t}\n}\n\n// UnmarshalBinary unpacks data into the block. Block is not copied so data\n// should be retained and unchanged after being passed into this function.\nfunc (blk *MeasurementBlock) UnmarshalBinary(data []byte) error {\n\t// Read trailer.\n\tt, err := ReadMeasurementBlockTrailer(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Save data section.\n\tblk.data = data[t.Data.Offset:]\n\tblk.data = blk.data[:t.Data.Size]\n\n\t// Save hash index block.\n\tblk.hashData = data[t.HashIndex.Offset:]\n\tblk.hashData = blk.hashData[:t.HashIndex.Size]\n\n\t// Initialise sketches. We're currently using HLL+.\n\tvar s, ts = hll.NewDefaultPlus(), hll.NewDefaultPlus()\n\tif err := s.UnmarshalBinary(data[t.Sketch.Offset:][:t.Sketch.Size]); err != nil {\n\t\treturn err\n\t}\n\tblk.sketch = s\n\n\tif err := ts.UnmarshalBinary(data[t.TSketch.Offset:][:t.TSketch.Size]); err != nil {\n\t\treturn err\n\t}\n\tblk.tSketch = ts\n\n\treturn nil\n}\n\n// Iterator returns an iterator over all measurements.\nfunc (blk *MeasurementBlock) Iterator() MeasurementIterator {\n\treturn &blockMeasurementIterator{data: blk.data[MeasurementFillSize:]}\n}\n\n// seriesIDIterator returns an iterator for all series ids in a measurement.\nfunc (blk *MeasurementBlock) seriesIDIterator(name []byte) seriesIDIterator {\n\t// Find measurement element.\n\te, ok := blk.Elem(name)\n\tif !ok {\n\t\treturn &rawSeriesIDIterator{}\n\t}\n\treturn &rawSeriesIDIterator{n: e.series.n, data: e.series.data}\n}\n\n// blockMeasurementIterator iterates over a list measurements in a block.\ntype blockMeasurementIterator struct {\n\telem MeasurementBlockElem\n\tdata []byte\n}\n\n// Next returns the next measurement. Returns nil when iterator is complete.\nfunc (itr *blockMeasurementIterator) Next() MeasurementElem {\n\t// Return nil when we run out of data.\n\tif len(itr.data) == 0 {\n\t\treturn nil\n\t}\n\n\t// Unmarshal the element at the current position.\n\titr.elem.UnmarshalBinary(itr.data)\n\n\t// Move the data forward past the record.\n\titr.data = itr.data[itr.elem.size:]\n\n\treturn &itr.elem\n}\n\n// rawSeriesIterator iterates over a list of raw series data.\ntype rawSeriesIDIterator struct {\n\tprev uint32\n\tn    uint32\n\tdata []byte\n}\n\n// next returns the next decoded series.\nfunc (itr *rawSeriesIDIterator) next() uint32 {\n\tif len(itr.data) == 0 {\n\t\treturn 0\n\t}\n\n\tdelta, n := binary.Uvarint(itr.data)\n\titr.data = itr.data[n:]\n\n\tseriesID := itr.prev + uint32(delta)\n\titr.prev = seriesID\n\treturn seriesID\n}\n\n// MeasurementBlockTrailer represents meta data at the end of a MeasurementBlock.\ntype MeasurementBlockTrailer struct {\n\tVersion int // Encoding version\n\n\t// Offset & size of data section.\n\tData struct {\n\t\tOffset int64\n\t\tSize   int64\n\t}\n\n\t// Offset & size of hash map section.\n\tHashIndex struct {\n\t\tOffset int64\n\t\tSize   int64\n\t}\n\n\t// Offset and size of cardinality sketch for measurements.\n\tSketch struct {\n\t\tOffset int64\n\t\tSize   int64\n\t}\n\n\t// Offset and size of cardinality sketch for tombstoned measurements.\n\tTSketch struct {\n\t\tOffset int64\n\t\tSize   int64\n\t}\n}\n\n// ReadMeasurementBlockTrailer returns the block trailer from data.\nfunc ReadMeasurementBlockTrailer(data []byte) (MeasurementBlockTrailer, error) {\n\tvar t MeasurementBlockTrailer\n\n\t// Read version (which is located in the last two bytes of the trailer).\n\tt.Version = int(binary.BigEndian.Uint16(data[len(data)-2:]))\n\tif t.Version != MeasurementBlockVersion {\n\t\treturn t, ErrUnsupportedIndexFileVersion\n\t}\n\n\t// Slice trailer data.\n\tbuf := data[len(data)-MeasurementTrailerSize:]\n\n\t// Read data section info.\n\tt.Data.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:]\n\tt.Data.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:]\n\n\t// Read measurement block info.\n\tt.HashIndex.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:]\n\tt.HashIndex.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:]\n\n\t// Read measurment sketch info.\n\tt.Sketch.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:]\n\tt.Sketch.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:]\n\n\t// Read tombstone measurment sketch info.\n\tt.TSketch.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:]\n\tt.TSketch.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:]\n\n\treturn t, nil\n}\n\n// WriteTo writes the trailer to w.\nfunc (t *MeasurementBlockTrailer) WriteTo(w io.Writer) (n int64, err error) {\n\t// Write data section info.\n\tif err := writeUint64To(w, uint64(t.Data.Offset), &n); err != nil {\n\t\treturn n, err\n\t} else if err := writeUint64To(w, uint64(t.Data.Size), &n); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Write hash index section info.\n\tif err := writeUint64To(w, uint64(t.HashIndex.Offset), &n); err != nil {\n\t\treturn n, err\n\t} else if err := writeUint64To(w, uint64(t.HashIndex.Size), &n); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Write measurement sketch info.\n\tif err := writeUint64To(w, uint64(t.Sketch.Offset), &n); err != nil {\n\t\treturn n, err\n\t} else if err := writeUint64To(w, uint64(t.Sketch.Size), &n); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Write tombstone measurement sketch info.\n\tif err := writeUint64To(w, uint64(t.TSketch.Offset), &n); err != nil {\n\t\treturn n, err\n\t} else if err := writeUint64To(w, uint64(t.TSketch.Size), &n); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Write measurement block version.\n\tif err := writeUint16To(w, MeasurementBlockVersion, &n); err != nil {\n\t\treturn n, err\n\t}\n\n\treturn n, nil\n}\n\n// MeasurementBlockElem represents an internal measurement element.\ntype MeasurementBlockElem struct {\n\tflag byte   // flag\n\tname []byte // measurement name\n\n\ttagBlock struct {\n\t\toffset int64\n\t\tsize   int64\n\t}\n\n\tseries struct {\n\t\tn    uint32 // series count\n\t\tdata []byte // serialized series data\n\t}\n\n\t// size in bytes, set after unmarshaling.\n\tsize int\n}\n\n// Name returns the measurement name.\nfunc (e *MeasurementBlockElem) Name() []byte { return e.name }\n\n// Deleted returns true if the tombstone flag is set.\nfunc (e *MeasurementBlockElem) Deleted() bool {\n\treturn (e.flag & MeasurementTombstoneFlag) != 0\n}\n\n// TagBlockOffset returns the offset of the measurement's tag block.\nfunc (e *MeasurementBlockElem) TagBlockOffset() int64 { return e.tagBlock.offset }\n\n// TagBlockSize returns the size of the measurement's tag block.\nfunc (e *MeasurementBlockElem) TagBlockSize() int64 { return e.tagBlock.size }\n\n// SeriesData returns the raw series data.\nfunc (e *MeasurementBlockElem) SeriesData() []byte { return e.series.data }\n\n// SeriesN returns the number of series associated with the measurement.\nfunc (e *MeasurementBlockElem) SeriesN() uint32 { return e.series.n }\n\n// SeriesID returns series ID at an index.\nfunc (e *MeasurementBlockElem) SeriesID(i int) uint32 {\n\treturn binary.BigEndian.Uint32(e.series.data[i*SeriesIDSize:])\n}\n\n// SeriesIDs returns a list of decoded series ids.\n//\n// NOTE: This should be used for testing and diagnostics purposes only.\n// It requires loading the entire list of series in-memory.\nfunc (e *MeasurementBlockElem) SeriesIDs() []uint32 {\n\ta := make([]uint32, 0, e.series.n)\n\tvar prev uint32\n\tfor data := e.series.data; len(data) > 0; {\n\t\tdelta, n := binary.Uvarint(data)\n\t\tdata = data[n:]\n\n\t\tseriesID := prev + uint32(delta)\n\t\ta = append(a, seriesID)\n\t\tprev = seriesID\n\t}\n\treturn a\n}\n\n// Size returns the size of the element.\nfunc (e *MeasurementBlockElem) Size() int { return e.size }\n\n// UnmarshalBinary unmarshals data into e.\nfunc (e *MeasurementBlockElem) UnmarshalBinary(data []byte) error {\n\tstart := len(data)\n\n\t// Parse flag data.\n\te.flag, data = data[0], data[1:]\n\n\t// Parse tag block offset.\n\te.tagBlock.offset, data = int64(binary.BigEndian.Uint64(data)), data[8:]\n\te.tagBlock.size, data = int64(binary.BigEndian.Uint64(data)), data[8:]\n\n\t// Parse name.\n\tsz, n := binary.Uvarint(data)\n\te.name, data = data[n:n+int(sz)], data[n+int(sz):]\n\n\t// Parse series data.\n\tv, n := binary.Uvarint(data)\n\te.series.n, data = uint32(v), data[n:]\n\tsz, n = binary.Uvarint(data)\n\tdata = data[n:]\n\te.series.data, data = data[:sz], data[sz:]\n\n\t// Save length of elem.\n\te.size = start - len(data)\n\n\treturn nil\n}\n\n// MeasurementBlockWriter writes a measurement block.\ntype MeasurementBlockWriter struct {\n\tbuf bytes.Buffer\n\tmms map[string]measurement\n\n\t// Measurement sketch and tombstoned measurement sketch.\n\tsketch, tSketch estimator.Sketch\n}\n\n// NewMeasurementBlockWriter returns a new MeasurementBlockWriter.\nfunc NewMeasurementBlockWriter() *MeasurementBlockWriter {\n\treturn &MeasurementBlockWriter{\n\t\tmms:     make(map[string]measurement),\n\t\tsketch:  hll.NewDefaultPlus(),\n\t\ttSketch: hll.NewDefaultPlus(),\n\t}\n}\n\n// Add adds a measurement with series and tag set offset/size.\nfunc (mw *MeasurementBlockWriter) Add(name []byte, deleted bool, offset, size int64, seriesIDs []uint32) {\n\tmm := mw.mms[string(name)]\n\tmm.deleted = deleted\n\tmm.tagBlock.offset = offset\n\tmm.tagBlock.size = size\n\tmm.seriesIDs = seriesIDs\n\tmw.mms[string(name)] = mm\n\n\tif deleted {\n\t\tmw.tSketch.Add(name)\n\t} else {\n\t\tmw.sketch.Add(name)\n\t}\n}\n\n// WriteTo encodes the measurements to w.\nfunc (mw *MeasurementBlockWriter) WriteTo(w io.Writer) (n int64, err error) {\n\tvar t MeasurementBlockTrailer\n\n\t// The sketches must be set before calling WriteTo.\n\tif mw.sketch == nil {\n\t\treturn 0, errors.New(\"measurement sketch not set\")\n\t} else if mw.tSketch == nil {\n\t\treturn 0, errors.New(\"measurement tombstone sketch not set\")\n\t}\n\n\t// Sort names.\n\tnames := make([]string, 0, len(mw.mms))\n\tfor name := range mw.mms {\n\t\tnames = append(names, name)\n\t}\n\tsort.Strings(names)\n\n\t// Begin data section.\n\tt.Data.Offset = n\n\n\t// Write padding byte so no offsets are zero.\n\tif err := writeUint8To(w, 0, &n); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Encode key list.\n\tfor _, name := range names {\n\t\t// Retrieve measurement and save offset.\n\t\tmm := mw.mms[name]\n\t\tmm.offset = n\n\t\tmw.mms[name] = mm\n\n\t\t// Write measurement\n\t\tif err := mw.writeMeasurementTo(w, []byte(name), &mm, &n); err != nil {\n\t\t\treturn n, err\n\t\t}\n\t}\n\tt.Data.Size = n - t.Data.Offset\n\n\t// Build key hash map\n\tm := rhh.NewHashMap(rhh.Options{\n\t\tCapacity:   int64(len(names)),\n\t\tLoadFactor: LoadFactor,\n\t})\n\tfor name := range mw.mms {\n\t\tmm := mw.mms[name]\n\t\tm.Put([]byte(name), &mm)\n\t}\n\n\tt.HashIndex.Offset = n\n\n\t// Encode hash map length.\n\tif err := writeUint64To(w, uint64(m.Cap()), &n); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Encode hash map offset entries.\n\tfor i := int64(0); i < m.Cap(); i++ {\n\t\t_, v := m.Elem(i)\n\n\t\tvar offset int64\n\t\tif mm, ok := v.(*measurement); ok {\n\t\t\toffset = mm.offset\n\t\t}\n\n\t\tif err := writeUint64To(w, uint64(offset), &n); err != nil {\n\t\t\treturn n, err\n\t\t}\n\t}\n\tt.HashIndex.Size = n - t.HashIndex.Offset\n\n\t// Write the sketches out.\n\tt.Sketch.Offset = n\n\tif err := writeSketchTo(w, mw.sketch, &n); err != nil {\n\t\treturn n, err\n\t}\n\tt.Sketch.Size = n - t.Sketch.Offset\n\n\tt.TSketch.Offset = n\n\tif err := writeSketchTo(w, mw.tSketch, &n); err != nil {\n\t\treturn n, err\n\t}\n\tt.TSketch.Size = n - t.TSketch.Offset\n\n\t// Write trailer.\n\tnn, err := t.WriteTo(w)\n\tn += nn\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\treturn n, nil\n}\n\n// writeMeasurementTo encodes a single measurement entry into w.\nfunc (mw *MeasurementBlockWriter) writeMeasurementTo(w io.Writer, name []byte, mm *measurement, n *int64) error {\n\t// Write flag & tag block offset.\n\tif err := writeUint8To(w, mm.flag(), n); err != nil {\n\t\treturn err\n\t}\n\tif err := writeUint64To(w, uint64(mm.tagBlock.offset), n); err != nil {\n\t\treturn err\n\t} else if err := writeUint64To(w, uint64(mm.tagBlock.size), n); err != nil {\n\t\treturn err\n\t}\n\n\t// Write measurement name.\n\tif err := writeUvarintTo(w, uint64(len(name)), n); err != nil {\n\t\treturn err\n\t}\n\tif err := writeTo(w, name, n); err != nil {\n\t\treturn err\n\t}\n\n\t// Write series data to buffer.\n\tmw.buf.Reset()\n\tvar prev uint32\n\tfor _, seriesID := range mm.seriesIDs {\n\t\tdelta := seriesID - prev\n\n\t\tvar buf [binary.MaxVarintLen32]byte\n\t\ti := binary.PutUvarint(buf[:], uint64(delta))\n\t\tif _, err := mw.buf.Write(buf[:i]); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tprev = seriesID\n\t}\n\n\t// Write series count.\n\tif err := writeUvarintTo(w, uint64(len(mm.seriesIDs)), n); err != nil {\n\t\treturn err\n\t}\n\n\t// Write data size & buffer.\n\tif err := writeUvarintTo(w, uint64(mw.buf.Len()), n); err != nil {\n\t\treturn err\n\t}\n\tnn, err := mw.buf.WriteTo(w)\n\tif *n += nn; err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// writeSketchTo writes an estimator.Sketch into w, updating the number of bytes\n// written via n.\nfunc writeSketchTo(w io.Writer, s estimator.Sketch, n *int64) error {\n\t// TODO(edd): implement io.WriterTo on sketches.\n\tdata, err := s.MarshalBinary()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnn, err := w.Write(data)\n\t*n += int64(nn)\n\treturn err\n}\n\ntype measurement struct {\n\tdeleted  bool\n\ttagBlock struct {\n\t\toffset int64\n\t\tsize   int64\n\t}\n\tseriesIDs []uint32\n\toffset    int64\n}\n\nfunc (mm measurement) flag() byte {\n\tvar flag byte\n\tif mm.deleted {\n\t\tflag |= MeasurementTombstoneFlag\n\t}\n\treturn flag\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block_test.go",
    "content": "package tsi1_test\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/tsdb/index/tsi1\"\n)\n\nfunc TestReadMeasurementBlockTrailer(t *testing.T) {\n\t// Build a trailer\n\tvar (\n\t\tdata                       = make([]byte, tsi1.MeasurementTrailerSize)\n\t\tblockversion               = uint16(1)\n\t\tblockOffset, blockSize     = uint64(1), uint64(2500)\n\t\thashIdxOffset, hashIdxSize = uint64(2501), uint64(1000)\n\t\tsketchOffset, sketchSize   = uint64(3501), uint64(250)\n\t\ttsketchOffset, tsketchSize = uint64(3751), uint64(250)\n\t)\n\n\tbinary.BigEndian.PutUint64(data[0:], blockOffset)\n\tbinary.BigEndian.PutUint64(data[8:], blockSize)\n\tbinary.BigEndian.PutUint64(data[16:], hashIdxOffset)\n\tbinary.BigEndian.PutUint64(data[24:], hashIdxSize)\n\tbinary.BigEndian.PutUint64(data[32:], sketchOffset)\n\tbinary.BigEndian.PutUint64(data[40:], sketchSize)\n\tbinary.BigEndian.PutUint64(data[48:], tsketchOffset)\n\tbinary.BigEndian.PutUint64(data[56:], tsketchSize)\n\tbinary.BigEndian.PutUint16(data[64:], blockversion)\n\n\ttrailer, err := tsi1.ReadMeasurementBlockTrailer(data)\n\tif err != nil {\n\t\tt.Logf(\"trailer is: %#v\\n\", trailer)\n\t\tt.Fatal(err)\n\t}\n\n\tok := true &&\n\t\ttrailer.Version == int(blockversion) &&\n\t\ttrailer.Data.Offset == int64(blockOffset) &&\n\t\ttrailer.Data.Size == int64(blockSize) &&\n\t\ttrailer.HashIndex.Offset == int64(hashIdxOffset) &&\n\t\ttrailer.HashIndex.Size == int64(hashIdxSize) &&\n\t\ttrailer.Sketch.Offset == int64(sketchOffset) &&\n\t\ttrailer.Sketch.Size == int64(sketchSize) &&\n\t\ttrailer.TSketch.Offset == int64(tsketchOffset) &&\n\t\ttrailer.TSketch.Size == int64(tsketchSize)\n\n\tif !ok {\n\t\tt.Fatalf(\"got %v\\nwhich does not match expected\", trailer)\n\t}\n}\n\nfunc TestMeasurementBlockTrailer_WriteTo(t *testing.T) {\n\tvar trailer = tsi1.MeasurementBlockTrailer{\n\t\tVersion: 1,\n\t\tData: struct {\n\t\t\tOffset int64\n\t\t\tSize   int64\n\t\t}{Offset: 1, Size: 2},\n\t\tHashIndex: struct {\n\t\t\tOffset int64\n\t\t\tSize   int64\n\t\t}{Offset: 3, Size: 4},\n\t\tSketch: struct {\n\t\t\tOffset int64\n\t\t\tSize   int64\n\t\t}{Offset: 5, Size: 6},\n\t\tTSketch: struct {\n\t\t\tOffset int64\n\t\t\tSize   int64\n\t\t}{Offset: 7, Size: 8},\n\t}\n\n\tvar buf bytes.Buffer\n\tn, err := trailer.WriteTo(&buf)\n\tif got, exp := n, int64(tsi1.MeasurementTrailerSize); got != exp {\n\t\tt.Fatalf(\"got %v, exp %v\", got, exp)\n\t}\n\n\tif got := err; got != nil {\n\t\tt.Fatalf(\"got %v, exp %v\", got, nil)\n\t}\n\n\t// Verify trailer written correctly.\n\texp := \"\" +\n\t\t\"0000000000000001\" + // data offset\n\t\t\"0000000000000002\" + // data size\n\t\t\"0000000000000003\" + // hash index offset\n\t\t\"0000000000000004\" + // hash index size\n\t\t\"0000000000000005\" + // sketch offset\n\t\t\"0000000000000006\" + // sketch size\n\t\t\"0000000000000007\" + // tsketch offset\n\t\t\"0000000000000008\" + // tsketch size\n\t\t\"0001\" // version\n\n\tif got, exp := fmt.Sprintf(\"%x\", buf.String()), exp; got != exp {\n\t\tt.Fatalf(\"got %v, exp %v\", got, exp)\n\t}\n}\n\n// Ensure measurement blocks can be written and opened.\nfunc TestMeasurementBlockWriter(t *testing.T) {\n\tms := Measurements{\n\t\tNewMeasurement([]byte(\"foo\"), false, 100, 10, []uint32{1, 3, 4}),\n\t\tNewMeasurement([]byte(\"bar\"), false, 200, 20, []uint32{2}),\n\t\tNewMeasurement([]byte(\"baz\"), false, 300, 30, []uint32{5, 6}),\n\t}\n\n\t// Write the measurements to writer.\n\tmw := tsi1.NewMeasurementBlockWriter()\n\tfor _, m := range ms {\n\t\tmw.Add(m.Name, m.Deleted, m.Offset, m.Size, m.ids)\n\t}\n\n\t// Encode into buffer.\n\tvar buf bytes.Buffer\n\tif n, err := mw.WriteTo(&buf); err != nil {\n\t\tt.Fatal(err)\n\t} else if n == 0 {\n\t\tt.Fatal(\"expected bytes written\")\n\t}\n\n\t// Unmarshal into a block.\n\tvar blk tsi1.MeasurementBlock\n\tif err := blk.UnmarshalBinary(buf.Bytes()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify data in block.\n\tif e, ok := blk.Elem([]byte(\"foo\")); !ok {\n\t\tt.Fatal(\"expected element\")\n\t} else if e.TagBlockOffset() != 100 || e.TagBlockSize() != 10 {\n\t\tt.Fatalf(\"unexpected offset/size: %v/%v\", e.TagBlockOffset(), e.TagBlockSize())\n\t} else if !reflect.DeepEqual(e.SeriesIDs(), []uint32{1, 3, 4}) {\n\t\tt.Fatalf(\"unexpected series data: %#v\", e.SeriesIDs())\n\t}\n\n\tif e, ok := blk.Elem([]byte(\"bar\")); !ok {\n\t\tt.Fatal(\"expected element\")\n\t} else if e.TagBlockOffset() != 200 || e.TagBlockSize() != 20 {\n\t\tt.Fatalf(\"unexpected offset/size: %v/%v\", e.TagBlockOffset(), e.TagBlockSize())\n\t} else if !reflect.DeepEqual(e.SeriesIDs(), []uint32{2}) {\n\t\tt.Fatalf(\"unexpected series data: %#v\", e.SeriesIDs())\n\t}\n\n\tif e, ok := blk.Elem([]byte(\"baz\")); !ok {\n\t\tt.Fatal(\"expected element\")\n\t} else if e.TagBlockOffset() != 300 || e.TagBlockSize() != 30 {\n\t\tt.Fatalf(\"unexpected offset/size: %v/%v\", e.TagBlockOffset(), e.TagBlockSize())\n\t} else if !reflect.DeepEqual(e.SeriesIDs(), []uint32{5, 6}) {\n\t\tt.Fatalf(\"unexpected series data: %#v\", e.SeriesIDs())\n\t}\n\n\t// Verify non-existent measurement doesn't exist.\n\tif _, ok := blk.Elem([]byte(\"BAD_MEASUREMENT\")); ok {\n\t\tt.Fatal(\"expected no element\")\n\t}\n}\n\ntype Measurements []Measurement\n\ntype Measurement struct {\n\tName    []byte\n\tDeleted bool\n\tOffset  int64\n\tSize    int64\n\tids     []uint32\n}\n\nfunc NewMeasurement(name []byte, deleted bool, offset, size int64, ids []uint32) Measurement {\n\treturn Measurement{\n\t\tName:    name,\n\t\tDeleted: deleted,\n\t\tOffset:  offset,\n\t\tSize:    size,\n\t\tids:     ids,\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/series_block.go",
    "content": "package tsi1\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/pkg/bloom\"\n\t\"github.com/influxdata/influxdb/pkg/estimator\"\n\t\"github.com/influxdata/influxdb/pkg/estimator/hll\"\n\t\"github.com/influxdata/influxdb/pkg/mmap\"\n\t\"github.com/influxdata/influxdb/pkg/rhh\"\n)\n\n// ErrSeriesOverflow is returned when too many series are added to a series writer.\nvar ErrSeriesOverflow = errors.New(\"series overflow\")\n\n// Series list field size constants.\nconst (\n\t// Series list trailer field sizes.\n\tSeriesBlockTrailerSize = 0 +\n\t\t4 + 4 + // series data offset/size\n\t\t4 + 4 + 4 + // series index offset/size/capacity\n\t\t8 + 4 + 4 + // bloom filter false positive rate, offset/size\n\t\t4 + 4 + // series sketch offset/size\n\t\t4 + 4 + // tombstone series sketch offset/size\n\t\t4 + 4 + // series count and tombstone count\n\t\t0\n\n\t// Other field sizes\n\tSeriesCountSize = 4\n\tSeriesIDSize    = 4\n)\n\n// Series flag constants.\nconst (\n\t// Marks the series as having been deleted.\n\tSeriesTombstoneFlag = 0x01\n\n\t// Marks the following bytes as a hash index.\n\t// These bytes should be skipped by an iterator.\n\tSeriesHashIndexFlag = 0x02\n)\n\n// MaxSeriesBlockHashSize is the maximum number of series in a single hash.\nconst MaxSeriesBlockHashSize = (65536 * LoadFactor) / 100\n\n// SeriesBlock represents the section of the index that holds series data.\ntype SeriesBlock struct {\n\tdata []byte\n\n\t// Series data & index/capacity.\n\tseriesData    []byte\n\tseriesIndexes []seriesBlockIndex\n\n\t// Exact series counts for this block.\n\tseriesN    int32\n\ttombstoneN int32\n\n\t// Bloom filter used for fast series existence check.\n\tfilter *bloom.Filter\n\n\t// Series block sketch and tombstone sketch for cardinality estimation.\n\t// While we have exact counts for the block, these sketches allow us to\n\t// estimate cardinality across multiple blocks (which might contain\n\t// duplicate series).\n\tsketch, tsketch estimator.Sketch\n}\n\n// HasSeries returns flags indicating if the series exists and if it is tombstoned.\nfunc (blk *SeriesBlock) HasSeries(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool) {\n\toffset, tombstoned := blk.Offset(name, tags, buf)\n\treturn offset != 0, tombstoned\n}\n\n// Series returns a series element.\nfunc (blk *SeriesBlock) Series(name []byte, tags models.Tags) SeriesElem {\n\toffset, _ := blk.Offset(name, tags, nil)\n\tif offset == 0 {\n\t\treturn nil\n\t}\n\n\tvar e SeriesBlockElem\n\te.UnmarshalBinary(blk.data[offset:])\n\treturn &e\n}\n\n// Offset returns the byte offset of the series within the block.\nfunc (blk *SeriesBlock) Offset(name []byte, tags models.Tags, buf []byte) (offset uint32, tombstoned bool) {\n\t// Exit if no series indexes exist.\n\tif len(blk.seriesIndexes) == 0 {\n\t\treturn 0, false\n\t}\n\n\t// Compute series key.\n\tbuf = AppendSeriesKey(buf[:0], name, tags)\n\tbufN := uint32(len(buf))\n\n\t// Quickly check the bloom filter.\n\t// If the key doesn't exist then we know for sure that it doesn't exist.\n\t// If it does exist then we need to do a hash index check to verify. False\n\t// positives are possible with a bloom filter.\n\tif !blk.filter.Contains(buf) {\n\t\treturn 0, false\n\t}\n\n\t// Find the correct partition.\n\t// Use previous index unless an exact match on the min value.\n\ti := sort.Search(len(blk.seriesIndexes), func(i int) bool {\n\t\treturn CompareSeriesKeys(blk.seriesIndexes[i].min, buf) != -1\n\t})\n\tif i >= len(blk.seriesIndexes) || !bytes.Equal(blk.seriesIndexes[i].min, buf) {\n\t\ti--\n\t}\n\tseriesIndex := blk.seriesIndexes[i]\n\n\t// Search within partition.\n\tn := int64(seriesIndex.capacity)\n\thash := rhh.HashKey(buf)\n\tpos := hash % n\n\n\t// Track current distance\n\tvar d int64\n\tfor {\n\t\t// Find offset of series.\n\t\toffset := binary.BigEndian.Uint32(seriesIndex.data[pos*SeriesIDSize:])\n\t\tif offset == 0 {\n\t\t\treturn 0, false\n\t\t}\n\n\t\t// Evaluate encoded value matches expected.\n\t\tkey := ReadSeriesKey(blk.data[offset+1 : offset+1+bufN])\n\t\tif bytes.Equal(buf, key) {\n\t\t\treturn offset, (blk.data[offset] & SeriesTombstoneFlag) != 0\n\t\t}\n\n\t\t// Check if we've exceeded the probe distance.\n\t\tmax := rhh.Dist(rhh.HashKey(key), pos, n)\n\t\tif d > max {\n\t\t\treturn 0, false\n\t\t}\n\n\t\t// Move position forward.\n\t\tpos = (pos + 1) % n\n\t\td++\n\n\t\tif d > n {\n\t\t\treturn 0, false\n\t\t}\n\t}\n}\n\n// SeriesCount returns the number of series.\nfunc (blk *SeriesBlock) SeriesCount() uint32 {\n\treturn uint32(blk.seriesN + blk.tombstoneN)\n}\n\n// SeriesIterator returns an iterator over all the series.\nfunc (blk *SeriesBlock) SeriesIterator() SeriesIterator {\n\treturn &seriesBlockIterator{\n\t\tn:      blk.SeriesCount(),\n\t\toffset: 1,\n\t\tsblk:   blk,\n\t}\n}\n\n// UnmarshalBinary unpacks data into the series list.\n//\n// If data is an mmap then it should stay open until the series list is no\n// longer used because data access is performed directly from the byte slice.\nfunc (blk *SeriesBlock) UnmarshalBinary(data []byte) error {\n\tt := ReadSeriesBlockTrailer(data)\n\n\t// Save entire block.\n\tblk.data = data\n\n\t// Slice series data.\n\tblk.seriesData = data[t.Series.Data.Offset:]\n\tblk.seriesData = blk.seriesData[:t.Series.Data.Size]\n\n\t// Read in all index partitions.\n\tbuf := data[t.Series.Index.Offset:]\n\tbuf = buf[:t.Series.Index.Size]\n\tblk.seriesIndexes = make([]seriesBlockIndex, t.Series.Index.N)\n\tfor i := range blk.seriesIndexes {\n\t\tidx := &blk.seriesIndexes[i]\n\n\t\t// Read data block.\n\t\tvar offset, size uint32\n\t\toffset, buf = binary.BigEndian.Uint32(buf[:4]), buf[4:]\n\t\tsize, buf = binary.BigEndian.Uint32(buf[:4]), buf[4:]\n\t\tidx.data = blk.data[offset : offset+size]\n\n\t\t// Read block capacity.\n\t\tidx.capacity, buf = int32(binary.BigEndian.Uint32(buf[:4])), buf[4:]\n\n\t\t// Read min key.\n\t\tvar n uint32\n\t\tn, buf = binary.BigEndian.Uint32(buf[:4]), buf[4:]\n\t\tidx.min, buf = buf[:n], buf[n:]\n\t}\n\tif len(buf) != 0 {\n\t\treturn fmt.Errorf(\"data remaining in index list buffer: %d\", len(buf))\n\t}\n\n\t// Initialize bloom filter.\n\tfilter, err := bloom.NewFilterBuffer(data[t.Bloom.Offset:][:t.Bloom.Size], t.Bloom.K)\n\tif err != nil {\n\t\treturn err\n\t}\n\tblk.filter = filter\n\n\t// Initialise sketches. We're currently using HLL+.\n\tvar s, ts = hll.NewDefaultPlus(), hll.NewDefaultPlus()\n\tif err := s.UnmarshalBinary(data[t.Sketch.Offset:][:t.Sketch.Size]); err != nil {\n\t\treturn err\n\t}\n\tblk.sketch = s\n\n\tif err := ts.UnmarshalBinary(data[t.TSketch.Offset:][:t.TSketch.Size]); err != nil {\n\t\treturn err\n\t}\n\tblk.tsketch = ts\n\n\t// Set the series and tombstone counts\n\tblk.seriesN, blk.tombstoneN = t.SeriesN, t.TombstoneN\n\n\treturn nil\n}\n\n// seriesBlockIndex represents a partitioned series block index.\ntype seriesBlockIndex struct {\n\tdata     []byte\n\tmin      []byte\n\tcapacity int32\n}\n\n// seriesBlockIterator is an iterator over a series ids in a series list.\ntype seriesBlockIterator struct {\n\ti, n   uint32\n\toffset uint32\n\tsblk   *SeriesBlock\n\te      SeriesBlockElem // buffer\n}\n\n// Next returns the next series element.\nfunc (itr *seriesBlockIterator) Next() SeriesElem {\n\tfor {\n\t\t// Exit if at the end.\n\t\tif itr.i == itr.n {\n\t\t\treturn nil\n\t\t}\n\n\t\t// If the current element is a hash index partition then skip it.\n\t\tif flag := itr.sblk.data[itr.offset]; flag&SeriesHashIndexFlag != 0 {\n\t\t\t// Skip flag\n\t\t\titr.offset++\n\n\t\t\t// Read index capacity.\n\t\t\tn := binary.BigEndian.Uint32(itr.sblk.data[itr.offset:])\n\t\t\titr.offset += 4\n\n\t\t\t// Skip over index.\n\t\t\titr.offset += n * SeriesIDSize\n\t\t\tcontinue\n\t\t}\n\n\t\t// Read next element.\n\t\titr.e.UnmarshalBinary(itr.sblk.data[itr.offset:])\n\n\t\t// Move iterator and offset forward.\n\t\titr.i++\n\t\titr.offset += uint32(itr.e.size)\n\n\t\treturn &itr.e\n\t}\n}\n\n// seriesDecodeIterator decodes a series id iterator into unmarshaled elements.\ntype seriesDecodeIterator struct {\n\titr  seriesIDIterator\n\tsblk *SeriesBlock\n\te    SeriesBlockElem // buffer\n}\n\n// newSeriesDecodeIterator returns a new instance of seriesDecodeIterator.\nfunc newSeriesDecodeIterator(sblk *SeriesBlock, itr seriesIDIterator) *seriesDecodeIterator {\n\treturn &seriesDecodeIterator{sblk: sblk, itr: itr}\n}\n\n// Next returns the next series element.\nfunc (itr *seriesDecodeIterator) Next() SeriesElem {\n\t// Read next series id.\n\tid := itr.itr.next()\n\tif id == 0 {\n\t\treturn nil\n\t}\n\n\t// Read next element.\n\titr.e.UnmarshalBinary(itr.sblk.data[id:])\n\treturn &itr.e\n}\n\n// SeriesBlockElem represents a series element in the series list.\ntype SeriesBlockElem struct {\n\tflag byte\n\tname []byte\n\ttags models.Tags\n\tsize int\n}\n\n// Deleted returns true if the tombstone flag is set.\nfunc (e *SeriesBlockElem) Deleted() bool { return (e.flag & SeriesTombstoneFlag) != 0 }\n\n// Name returns the measurement name.\nfunc (e *SeriesBlockElem) Name() []byte { return e.name }\n\n// Tags returns the tag set.\nfunc (e *SeriesBlockElem) Tags() models.Tags { return e.tags }\n\n// Expr always returns a nil expression.\n// This is only used by higher level query planning.\nfunc (e *SeriesBlockElem) Expr() influxql.Expr { return nil }\n\n// UnmarshalBinary unmarshals data into e.\nfunc (e *SeriesBlockElem) UnmarshalBinary(data []byte) error {\n\tstart := len(data)\n\n\t// Parse flag data.\n\te.flag, data = data[0], data[1:]\n\n\t// Parse total size.\n\t_, szN := binary.Uvarint(data)\n\tdata = data[szN:]\n\n\t// Parse name.\n\tn, data := binary.BigEndian.Uint16(data[:2]), data[2:]\n\te.name, data = data[:n], data[n:]\n\n\t// Parse tags.\n\te.tags = e.tags[:0]\n\ttagN, szN := binary.Uvarint(data)\n\tdata = data[szN:]\n\n\tfor i := uint64(0); i < tagN; i++ {\n\t\tvar tag models.Tag\n\n\t\tn, data = binary.BigEndian.Uint16(data[:2]), data[2:]\n\t\ttag.Key, data = data[:n], data[n:]\n\n\t\tn, data = binary.BigEndian.Uint16(data[:2]), data[2:]\n\t\ttag.Value, data = data[:n], data[n:]\n\n\t\te.tags = append(e.tags, tag)\n\t}\n\n\t// Save length of elem.\n\te.size = start - len(data)\n\n\treturn nil\n}\n\n// AppendSeriesElem serializes flag/name/tags to dst and returns the new buffer.\nfunc AppendSeriesElem(dst []byte, flag byte, name []byte, tags models.Tags) []byte {\n\tdst = append(dst, flag)\n\treturn AppendSeriesKey(dst, name, tags)\n}\n\n// AppendSeriesKey serializes name and tags to a byte slice.\n// The total length is prepended as a uvarint.\nfunc AppendSeriesKey(dst []byte, name []byte, tags models.Tags) []byte {\n\tbuf := make([]byte, binary.MaxVarintLen32)\n\torigLen := len(dst)\n\n\t// The tag count is variable encoded, so we need to know ahead of time what\n\t// the size of the tag count value will be.\n\ttcBuf := make([]byte, binary.MaxVarintLen32)\n\ttcSz := binary.PutUvarint(tcBuf, uint64(len(tags)))\n\n\t// Size of name/tags. Does not include total length.\n\tsize := 0 + //\n\t\t2 + // size of measurement\n\t\tlen(name) + // measurement\n\t\ttcSz + // size of number of tags\n\t\t(4 * len(tags)) + // length of each tag key and value\n\t\ttags.Size() // size of tag keys/values\n\n\t// Variable encode length.\n\ttotalSz := binary.PutUvarint(buf, uint64(size))\n\n\t// If caller doesn't provide a buffer then pre-allocate an exact one.\n\tif dst == nil {\n\t\tdst = make([]byte, 0, size+totalSz)\n\t}\n\n\t// Append total length.\n\tdst = append(dst, buf[:totalSz]...)\n\n\t// Append name.\n\tbinary.BigEndian.PutUint16(buf, uint16(len(name)))\n\tdst = append(dst, buf[:2]...)\n\tdst = append(dst, name...)\n\n\t// Append tag count.\n\tdst = append(dst, tcBuf[:tcSz]...)\n\n\t// Append tags.\n\tfor _, tag := range tags {\n\t\tbinary.BigEndian.PutUint16(buf, uint16(len(tag.Key)))\n\t\tdst = append(dst, buf[:2]...)\n\t\tdst = append(dst, tag.Key...)\n\n\t\tbinary.BigEndian.PutUint16(buf, uint16(len(tag.Value)))\n\t\tdst = append(dst, buf[:2]...)\n\t\tdst = append(dst, tag.Value...)\n\t}\n\n\t// Verify that the total length equals the encoded byte count.\n\tif got, exp := len(dst)-origLen, size+totalSz; got != exp {\n\t\tpanic(fmt.Sprintf(\"series key encoding does not match calculated total length: actual=%d, exp=%d, key=%x\", got, exp, dst))\n\t}\n\n\treturn dst\n}\n\n// ReadSeriesKey returns the series key from the beginning of the buffer.\nfunc ReadSeriesKey(data []byte) []byte {\n\tsz, n := binary.Uvarint(data)\n\treturn data[:int(sz)+n]\n}\n\nfunc CompareSeriesKeys(a, b []byte) int {\n\t// Handle 'nil' keys.\n\tif len(a) == 0 && len(b) == 0 {\n\t\treturn 0\n\t} else if len(a) == 0 {\n\t\treturn -1\n\t} else if len(b) == 0 {\n\t\treturn 1\n\t}\n\n\t// Read total size.\n\t_, i := binary.Uvarint(a)\n\ta = a[i:]\n\t_, i = binary.Uvarint(b)\n\tb = b[i:]\n\n\t// Read names.\n\tvar n uint16\n\tn, a = binary.BigEndian.Uint16(a), a[2:]\n\tname0, a := a[:n], a[n:]\n\tn, b = binary.BigEndian.Uint16(b), b[2:]\n\tname1, b := b[:n], b[n:]\n\n\t// Compare names, return if not equal.\n\tif cmp := bytes.Compare(name0, name1); cmp != 0 {\n\t\treturn cmp\n\t}\n\n\t// Read tag counts.\n\ttagN0, i := binary.Uvarint(a)\n\ta = a[i:]\n\n\ttagN1, i := binary.Uvarint(b)\n\tb = b[i:]\n\n\t// Compare each tag in order.\n\tfor i := uint64(0); ; i++ {\n\t\t// Check for EOF.\n\t\tif i == tagN0 && i == tagN1 {\n\t\t\treturn 0\n\t\t} else if i == tagN0 {\n\t\t\treturn -1\n\t\t} else if i == tagN1 {\n\t\t\treturn 1\n\t\t}\n\n\t\t// Read keys.\n\t\tvar key0, key1 []byte\n\t\tn, a = binary.BigEndian.Uint16(a), a[2:]\n\t\tkey0, a = a[:n], a[n:]\n\t\tn, b = binary.BigEndian.Uint16(b), b[2:]\n\t\tkey1, b = b[:n], b[n:]\n\n\t\t// Compare keys.\n\t\tif cmp := bytes.Compare(key0, key1); cmp != 0 {\n\t\t\treturn cmp\n\t\t}\n\n\t\t// Read values.\n\t\tvar value0, value1 []byte\n\t\tn, a = binary.BigEndian.Uint16(a), a[2:]\n\t\tvalue0, a = a[:n], a[n:]\n\t\tn, b = binary.BigEndian.Uint16(b), b[2:]\n\t\tvalue1, b = b[:n], b[n:]\n\n\t\t// Compare values.\n\t\tif cmp := bytes.Compare(value0, value1); cmp != 0 {\n\t\t\treturn cmp\n\t\t}\n\t}\n}\n\ntype seriesKeys [][]byte\n\nfunc (a seriesKeys) Len() int      { return len(a) }\nfunc (a seriesKeys) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a seriesKeys) Less(i, j int) bool {\n\treturn CompareSeriesKeys(a[i], a[j]) == -1\n}\n\n// SeriesBlockEncoder encodes series to a SeriesBlock in an underlying writer.\ntype SeriesBlockEncoder struct {\n\tw io.Writer\n\n\t// Double buffer for writing series.\n\t// First elem is current buffer, second is previous buffer.\n\tbuf [2][]byte\n\n\t// Track bytes written, sections, & offsets.\n\tn        int64\n\ttrailer  SeriesBlockTrailer\n\toffsets  *rhh.HashMap\n\tindexMin []byte\n\tindexes  []seriesBlockIndexEncodeInfo\n\n\t// Bloom filter to check for series existance.\n\tfilter *bloom.Filter\n\n\t// Series sketch and tombstoned series sketch. These must be\n\t// set before calling WriteTo.\n\tsketch, tSketch estimator.Sketch\n}\n\n// NewSeriesBlockEncoder returns a new instance of SeriesBlockEncoder.\nfunc NewSeriesBlockEncoder(w io.Writer, n uint32, m, k uint64) *SeriesBlockEncoder {\n\treturn &SeriesBlockEncoder{\n\t\tw: w,\n\n\t\toffsets: rhh.NewHashMap(rhh.Options{\n\t\t\tCapacity:   MaxSeriesBlockHashSize,\n\t\t\tLoadFactor: LoadFactor,\n\t\t}),\n\n\t\tfilter: bloom.NewFilter(m, k),\n\n\t\tsketch:  hll.NewDefaultPlus(),\n\t\ttSketch: hll.NewDefaultPlus(),\n\t}\n}\n\n// N returns the number of bytes written.\nfunc (enc *SeriesBlockEncoder) N() int64 { return enc.n }\n\n// Encode writes a series to the underlying writer.\n// The series must be lexicographical sorted after the previous encoded series.\nfunc (enc *SeriesBlockEncoder) Encode(name []byte, tags models.Tags, deleted bool) error {\n\t// An initial empty byte must be written.\n\tif err := enc.ensureHeaderWritten(); err != nil {\n\t\treturn err\n\t}\n\n\t// Generate the series element.\n\tbuf := AppendSeriesElem(enc.buf[0][:0], encodeSerieFlag(deleted), name, tags)\n\n\t// Verify series is after previous series.\n\tif enc.buf[1] != nil {\n\t\t// Skip the first byte since it is the flag. Remaining bytes are key.\n\t\tkey0, key1 := buf[1:], enc.buf[1][1:]\n\n\t\tif cmp := CompareSeriesKeys(key0, key1); cmp == -1 {\n\t\t\treturn fmt.Errorf(\"series out of order: prev=%q, new=%q\", enc.buf[1], buf)\n\t\t} else if cmp == 0 {\n\t\t\treturn fmt.Errorf(\"series already encoded: %s\", buf)\n\t\t}\n\t}\n\n\t// Flush a hash index, if necessary.\n\tif err := enc.checkFlushIndex(buf[1:]); err != nil {\n\t\treturn err\n\t}\n\n\t// Swap double buffer.\n\tenc.buf[0], enc.buf[1] = enc.buf[1], buf\n\n\t// Write encoded series to writer.\n\toffset := enc.n\n\tif err := writeTo(enc.w, buf, &enc.n); err != nil {\n\t\treturn err\n\t}\n\n\t// Save offset to generate index later.\n\t// Key is copied by the RHH map.\n\tenc.offsets.Put(buf[1:], uint32(offset))\n\n\t// Update bloom filter.\n\tenc.filter.Insert(buf[1:])\n\n\t// Update sketches & trailer.\n\tif deleted {\n\t\tenc.trailer.TombstoneN++\n\t\tenc.tSketch.Add(buf)\n\t} else {\n\t\tenc.trailer.SeriesN++\n\t\tenc.sketch.Add(buf)\n\t}\n\n\treturn nil\n}\n\n// Close writes the index and trailer.\n// This should be called at the end once all series have been encoded.\nfunc (enc *SeriesBlockEncoder) Close() error {\n\tif err := enc.ensureHeaderWritten(); err != nil {\n\t\treturn err\n\t}\n\n\t// Flush outstanding hash index.\n\tif err := enc.flushIndex(); err != nil {\n\t\treturn err\n\t}\n\n\t// Write dictionary-encoded series list.\n\tenc.trailer.Series.Data.Offset = 1\n\tenc.trailer.Series.Data.Size = int32(enc.n) - enc.trailer.Series.Data.Offset\n\n\t// Write dictionary-encoded series hash index.\n\tenc.trailer.Series.Index.Offset = int32(enc.n)\n\tif err := enc.writeIndexEntries(); err != nil {\n\t\treturn err\n\t}\n\tenc.trailer.Series.Index.Size = int32(enc.n) - enc.trailer.Series.Index.Offset\n\n\t// Flush bloom filter.\n\tenc.trailer.Bloom.K = enc.filter.K()\n\tenc.trailer.Bloom.Offset = int32(enc.n)\n\tif err := writeTo(enc.w, enc.filter.Bytes(), &enc.n); err != nil {\n\t\treturn err\n\t}\n\tenc.trailer.Bloom.Size = int32(enc.n) - enc.trailer.Bloom.Offset\n\n\t// Write the sketches out.\n\tenc.trailer.Sketch.Offset = int32(enc.n)\n\tif err := writeSketchTo(enc.w, enc.sketch, &enc.n); err != nil {\n\t\treturn err\n\t}\n\tenc.trailer.Sketch.Size = int32(enc.n) - enc.trailer.Sketch.Offset\n\n\tenc.trailer.TSketch.Offset = int32(enc.n)\n\tif err := writeSketchTo(enc.w, enc.tSketch, &enc.n); err != nil {\n\t\treturn err\n\t}\n\tenc.trailer.TSketch.Size = int32(enc.n) - enc.trailer.TSketch.Offset\n\n\t// Write trailer.\n\tnn, err := enc.trailer.WriteTo(enc.w)\n\tenc.n += nn\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// writeIndexEntries writes a list of series hash index entries.\nfunc (enc *SeriesBlockEncoder) writeIndexEntries() error {\n\tenc.trailer.Series.Index.N = int32(len(enc.indexes))\n\n\tfor _, idx := range enc.indexes {\n\t\t// Write offset/size.\n\t\tif err := writeUint32To(enc.w, uint32(idx.offset), &enc.n); err != nil {\n\t\t\treturn err\n\t\t} else if err := writeUint32To(enc.w, uint32(idx.size), &enc.n); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Write capacity.\n\t\tif err := writeUint32To(enc.w, uint32(idx.capacity), &enc.n); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Write min key.\n\t\tif err := writeUint32To(enc.w, uint32(len(idx.min)), &enc.n); err != nil {\n\t\t\treturn err\n\t\t} else if err := writeTo(enc.w, idx.min, &enc.n); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ensureHeaderWritten writes a single empty byte at the front of the file\n// so that series offsets will always be non-zero.\nfunc (enc *SeriesBlockEncoder) ensureHeaderWritten() error {\n\tif enc.n > 0 {\n\t\treturn nil\n\t}\n\n\tif _, err := enc.w.Write([]byte{0}); err != nil {\n\t\treturn err\n\t}\n\tenc.n++\n\n\treturn nil\n}\n\n// checkFlushIndex flushes a hash index segment if the index is too large.\n// The min argument specifies the lowest series key in the next index, if one is created.\nfunc (enc *SeriesBlockEncoder) checkFlushIndex(min []byte) error {\n\t// Ignore if there is still room in the index.\n\tif enc.offsets.Len() < MaxSeriesBlockHashSize {\n\t\treturn nil\n\t}\n\n\t// Flush index values.\n\tif err := enc.flushIndex(); err != nil {\n\t\treturn nil\n\t}\n\n\t// Reset index and save minimum series key.\n\tenc.offsets.Reset()\n\tenc.indexMin = make([]byte, len(min))\n\tcopy(enc.indexMin, min)\n\n\treturn nil\n}\n\n// flushIndex flushes the hash index segment.\nfunc (enc *SeriesBlockEncoder) flushIndex() error {\n\tif enc.offsets.Len() == 0 {\n\t\treturn nil\n\t}\n\n\t// Write index segment flag.\n\tif err := writeUint8To(enc.w, SeriesHashIndexFlag, &enc.n); err != nil {\n\t\treturn err\n\t}\n\t// Write index capacity.\n\t// This is used for skipping over when iterating sequentially.\n\tif err := writeUint32To(enc.w, uint32(enc.offsets.Cap()), &enc.n); err != nil {\n\t\treturn err\n\t}\n\n\t// Determine size.\n\tvar sz int64 = enc.offsets.Cap() * 4\n\n\t// Save current position to ensure size is correct by the end.\n\toffset := enc.n\n\n\t// Encode hash map offset entries.\n\tfor i := int64(0); i < enc.offsets.Cap(); i++ {\n\t\t_, v := enc.offsets.Elem(i)\n\t\tseriesOffset, _ := v.(uint32)\n\n\t\tif err := writeUint32To(enc.w, uint32(seriesOffset), &enc.n); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Determine total size.\n\tsize := enc.n - offset\n\n\t// Verify actual size equals calculated size.\n\tif size != sz {\n\t\treturn fmt.Errorf(\"series hash index size mismatch: %d <> %d\", size, sz)\n\t}\n\n\t// Add to index entries.\n\tenc.indexes = append(enc.indexes, seriesBlockIndexEncodeInfo{\n\t\toffset:   uint32(offset),\n\t\tsize:     uint32(size),\n\t\tcapacity: uint32(enc.offsets.Cap()),\n\t\tmin:      enc.indexMin,\n\t})\n\n\t// Clear next min.\n\tenc.indexMin = nil\n\n\treturn nil\n}\n\n// seriesBlockIndexEncodeInfo stores offset information for seriesBlockIndex structures.\ntype seriesBlockIndexEncodeInfo struct {\n\toffset   uint32\n\tsize     uint32\n\tcapacity uint32\n\tmin      []byte\n}\n\n// ReadSeriesBlockTrailer returns the series list trailer from data.\nfunc ReadSeriesBlockTrailer(data []byte) SeriesBlockTrailer {\n\tvar t SeriesBlockTrailer\n\n\t// Slice trailer data.\n\tbuf := data[len(data)-SeriesBlockTrailerSize:]\n\n\t// Read series data info.\n\tt.Series.Data.Offset, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]\n\tt.Series.Data.Size, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]\n\n\t// Read series hash index info.\n\tt.Series.Index.Offset, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]\n\tt.Series.Index.Size, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]\n\tt.Series.Index.N, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]\n\n\t// Read bloom filter info.\n\tt.Bloom.K, buf = binary.BigEndian.Uint64(buf[0:8]), buf[8:]\n\tt.Bloom.Offset, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]\n\tt.Bloom.Size, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]\n\n\t// Read series sketch info.\n\tt.Sketch.Offset, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]\n\tt.Sketch.Size, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]\n\n\t// Read tombstone series sketch info.\n\tt.TSketch.Offset, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]\n\tt.TSketch.Size, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]\n\n\t// Read series & tombstone count.\n\tt.SeriesN, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]\n\tt.TombstoneN, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:]\n\n\treturn t\n}\n\n// SeriesBlockTrailer represents meta data written to the end of the series list.\ntype SeriesBlockTrailer struct {\n\tSeries struct {\n\t\tData struct {\n\t\t\tOffset int32\n\t\t\tSize   int32\n\t\t}\n\t\tIndex struct {\n\t\t\tOffset int32\n\t\t\tSize   int32\n\t\t\tN      int32\n\t\t}\n\t}\n\n\t// Bloom filter info.\n\tBloom struct {\n\t\tK      uint64\n\t\tOffset int32\n\t\tSize   int32\n\t}\n\n\t// Offset and size of cardinality sketch for measurements.\n\tSketch struct {\n\t\tOffset int32\n\t\tSize   int32\n\t}\n\n\t// Offset and size of cardinality sketch for tombstoned measurements.\n\tTSketch struct {\n\t\tOffset int32\n\t\tSize   int32\n\t}\n\n\tSeriesN    int32\n\tTombstoneN int32\n}\n\nfunc (t SeriesBlockTrailer) WriteTo(w io.Writer) (n int64, err error) {\n\tif err := writeUint32To(w, uint32(t.Series.Data.Offset), &n); err != nil {\n\t\treturn n, err\n\t} else if err := writeUint32To(w, uint32(t.Series.Data.Size), &n); err != nil {\n\t\treturn n, err\n\t}\n\n\tif err := writeUint32To(w, uint32(t.Series.Index.Offset), &n); err != nil {\n\t\treturn n, err\n\t} else if err := writeUint32To(w, uint32(t.Series.Index.Size), &n); err != nil {\n\t\treturn n, err\n\t} else if err := writeUint32To(w, uint32(t.Series.Index.N), &n); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Write bloom filter info.\n\tif err := writeUint64To(w, t.Bloom.K, &n); err != nil {\n\t\treturn n, err\n\t} else if err := writeUint32To(w, uint32(t.Bloom.Offset), &n); err != nil {\n\t\treturn n, err\n\t} else if err := writeUint32To(w, uint32(t.Bloom.Size), &n); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Write measurement sketch info.\n\tif err := writeUint32To(w, uint32(t.Sketch.Offset), &n); err != nil {\n\t\treturn n, err\n\t} else if err := writeUint32To(w, uint32(t.Sketch.Size), &n); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Write tombstone measurement sketch info.\n\tif err := writeUint32To(w, uint32(t.TSketch.Offset), &n); err != nil {\n\t\treturn n, err\n\t} else if err := writeUint32To(w, uint32(t.TSketch.Size), &n); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Write series and tombstone count.\n\tif err := writeUint32To(w, uint32(t.SeriesN), &n); err != nil {\n\t\treturn n, err\n\t} else if err := writeUint32To(w, uint32(t.TombstoneN), &n); err != nil {\n\t\treturn n, err\n\t}\n\n\treturn n, nil\n}\n\ntype serie struct {\n\tname    []byte\n\ttags    models.Tags\n\tdeleted bool\n\toffset  uint32\n}\n\nfunc (s *serie) flag() uint8 { return encodeSerieFlag(s.deleted) }\n\nfunc encodeSerieFlag(deleted bool) byte {\n\tvar flag byte\n\tif deleted {\n\t\tflag |= SeriesTombstoneFlag\n\t}\n\treturn flag\n}\n\ntype series []serie\n\nfunc (a series) Len() int      { return len(a) }\nfunc (a series) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a series) Less(i, j int) bool {\n\tif cmp := bytes.Compare(a[i].name, a[j].name); cmp != 0 {\n\t\treturn cmp == -1\n\t}\n\treturn models.CompareTags(a[i].tags, a[j].tags) == -1\n}\n\n// mapIndexFileSeriesBlock maps a writer to a series block.\n// Returns the series block and the mmap byte slice (if mmap is used).\n// The memory-mapped slice MUST be unmapped by the caller.\nfunc mapIndexFileSeriesBlock(w io.Writer) (*SeriesBlock, []byte, error) {\n\tswitch w := w.(type) {\n\tcase *bytes.Buffer:\n\t\treturn mapIndexFileSeriesBlockBuffer(w)\n\tcase *os.File:\n\t\treturn mapIndexFileSeriesBlockFile(w)\n\tdefault:\n\t\treturn nil, nil, fmt.Errorf(\"invalid tsi1 writer type: %T\", w)\n\t}\n}\n\n// mapIndexFileSeriesBlockBuffer maps a buffer to a series block.\nfunc mapIndexFileSeriesBlockBuffer(buf *bytes.Buffer) (*SeriesBlock, []byte, error) {\n\tdata := buf.Bytes()\n\tdata = data[len(FileSignature):] // Skip file signature.\n\n\tvar sblk SeriesBlock\n\tif err := sblk.UnmarshalBinary(data); err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn &sblk, nil, nil\n}\n\n// mapIndexFileSeriesBlockFile memory-maps a file to a series block.\nfunc mapIndexFileSeriesBlockFile(f *os.File) (*SeriesBlock, []byte, error) {\n\t// Open a read-only memory map of the existing data.\n\tdata, err := mmap.Map(f.Name())\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tsblk_data := data[len(FileSignature):] // Skip file signature.\n\n\t// Unmarshal block on top of mmap.\n\tvar sblk SeriesBlock\n\tif err := sblk.UnmarshalBinary(sblk_data); err != nil {\n\t\tmmap.Unmap(data)\n\t\treturn nil, nil, err\n\t}\n\n\treturn &sblk, data, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/series_block_test.go",
    "content": "package tsi1_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/tsdb/index/tsi1\"\n)\n\n// Ensure series block can be unmarshaled.\nfunc TestSeriesBlock_UnmarshalBinary(t *testing.T) {\n\tif _, err := CreateSeriesBlock([]Series{\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"west\"})},\n\t\t{Name: []byte(\"mem\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\n// Ensure series block contains the correct set of series.\nfunc TestSeriesBlock_Series(t *testing.T) {\n\tseries := []Series{\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t\t{Name: []byte(\"cpu\"), Tags: models.NewTags(map[string]string{\"region\": \"west\"})},\n\t\t{Name: []byte(\"mem\"), Tags: models.NewTags(map[string]string{\"region\": \"east\"})},\n\t}\n\tl := MustCreateSeriesBlock(series)\n\n\t// Verify total number of series is correct.\n\tif n := l.SeriesCount(); n != 3 {\n\t\tt.Fatalf(\"unexpected series count: %d\", n)\n\t}\n\n\t// Verify all series exist.\n\tfor i, s := range series {\n\t\tif e := l.Series(s.Name, s.Tags); e == nil {\n\t\t\tt.Fatalf(\"series does not exist: i=%d\", i)\n\t\t} else if !bytes.Equal(e.Name(), s.Name) || models.CompareTags(e.Tags(), s.Tags) != 0 {\n\t\t\tt.Fatalf(\"series element does not match: i=%d, %s (%s) != %s (%s)\", i, e.Name(), e.Tags().String(), s.Name, s.Tags.String())\n\t\t} else if e.Deleted() {\n\t\t\tt.Fatalf(\"series deleted: i=%d\", i)\n\t\t}\n\t}\n\n\t// Verify non-existent series doesn't exist.\n\tif e := l.Series([]byte(\"foo\"), models.NewTags(map[string]string{\"region\": \"north\"})); e != nil {\n\t\tt.Fatalf(\"series should not exist: %#v\", e)\n\t}\n}\n\n// CreateSeriesBlock returns an in-memory SeriesBlock with a list of series.\nfunc CreateSeriesBlock(a []Series) (*tsi1.SeriesBlock, error) {\n\tvar buf bytes.Buffer\n\n\t// Create writer and sketches. Add series.\n\tenc := tsi1.NewSeriesBlockEncoder(&buf, uint32(len(a)), M, K)\n\tfor i, s := range a {\n\t\tif err := enc.Encode(s.Name, s.Tags, s.Deleted); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"SeriesBlockWriter.Add(): i=%d, err=%s\", i, err)\n\t\t}\n\t}\n\n\t// Close and flush.\n\tif err := enc.Close(); err != nil {\n\t\treturn nil, fmt.Errorf(\"SeriesBlockWriter.WriteTo(): %s\", err)\n\t}\n\n\t// Unpack bytes into series block.\n\tvar blk tsi1.SeriesBlock\n\tif err := blk.UnmarshalBinary(buf.Bytes()); err != nil {\n\t\treturn nil, fmt.Errorf(\"SeriesBlock.UnmarshalBinary(): %s\", err)\n\t}\n\n\treturn &blk, nil\n}\n\n// MustCreateSeriesBlock calls CreateSeriesBlock(). Panic on error.\nfunc MustCreateSeriesBlock(a []Series) *tsi1.SeriesBlock {\n\tl, err := CreateSeriesBlock(a)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn l\n}\n\n// Series represents name/tagset pairs that are used in testing.\ntype Series struct {\n\tName    []byte\n\tTags    models.Tags\n\tDeleted bool\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block.go",
    "content": "package tsi1\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/influxdata/influxdb/pkg/rhh\"\n)\n\n// TagBlockVersion is the version of the tag block.\nconst TagBlockVersion = 1\n\n// Tag key flag constants.\nconst (\n\tTagKeyTombstoneFlag = 0x01\n)\n\n// Tag value flag constants.\nconst (\n\tTagValueTombstoneFlag = 0x01\n)\n\n// TagBlock variable size constants.\nconst (\n\t// TagBlock key block fields.\n\tTagKeyNSize      = 8\n\tTagKeyOffsetSize = 8\n\n\t// TagBlock value block fields.\n\tTagValueNSize      = 8\n\tTagValueOffsetSize = 8\n)\n\n// TagBlock errors.\nvar (\n\tErrUnsupportedTagBlockVersion = errors.New(\"unsupported tag block version\")\n\tErrTagBlockSizeMismatch       = errors.New(\"tag block size mismatch\")\n)\n\n// TagBlock represents tag key/value block for a single measurement.\ntype TagBlock struct {\n\tdata []byte\n\n\tvalueData []byte\n\tkeyData   []byte\n\thashData  []byte\n\n\tversion int // tag block version\n}\n\n// Version returns the encoding version parsed from the data.\n// Only valid after UnmarshalBinary() has been successfully invoked.\nfunc (blk *TagBlock) Version() int { return blk.version }\n\n// UnmarshalBinary unpacks data into the tag block. Tag block is not copied so data\n// should be retained and unchanged after being passed into this function.\nfunc (blk *TagBlock) UnmarshalBinary(data []byte) error {\n\t// Read trailer.\n\tt, err := ReadTagBlockTrailer(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Verify data size is correct.\n\tif int64(len(data)) != t.Size {\n\t\treturn ErrTagBlockSizeMismatch\n\t}\n\n\t// Save data section.\n\tblk.valueData = data[t.ValueData.Offset:]\n\tblk.valueData = blk.valueData[:t.ValueData.Size]\n\n\t// Save key data section.\n\tblk.keyData = data[t.KeyData.Offset:]\n\tblk.keyData = blk.keyData[:t.KeyData.Size]\n\n\t// Save hash index block.\n\tblk.hashData = data[t.HashIndex.Offset:]\n\tblk.hashData = blk.hashData[:t.HashIndex.Size]\n\n\t// Save entire block.\n\tblk.data = data\n\n\treturn nil\n}\n\n// TagKeyElem returns an element for a tag key.\n// Returns an element with a nil key if not found.\nfunc (blk *TagBlock) TagKeyElem(key []byte) TagKeyElem {\n\tkeyN := int64(binary.BigEndian.Uint64(blk.hashData[:TagKeyNSize]))\n\thash := rhh.HashKey(key)\n\tpos := hash % keyN\n\n\t// Track current distance\n\tvar d int64\n\tfor {\n\t\t// Find offset of tag key.\n\t\toffset := binary.BigEndian.Uint64(blk.hashData[TagKeyNSize+(pos*TagKeyOffsetSize):])\n\t\tif offset == 0 {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Parse into element.\n\t\tvar e TagBlockKeyElem\n\t\te.unmarshal(blk.data[offset:], blk.data)\n\n\t\t// Return if keys match.\n\t\tif bytes.Equal(e.key, key) {\n\t\t\treturn &e\n\t\t}\n\n\t\t// Check if we've exceeded the probe distance.\n\t\tif d > rhh.Dist(rhh.HashKey(e.key), pos, keyN) {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Move position forward.\n\t\tpos = (pos + 1) % keyN\n\t\td++\n\n\t\tif d > keyN {\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\n// TagValueElem returns an element for a tag value.\nfunc (blk *TagBlock) TagValueElem(key, value []byte) TagValueElem {\n\t// Find key element, exit if not found.\n\tkelem, _ := blk.TagKeyElem(key).(*TagBlockKeyElem)\n\tif kelem == nil {\n\t\treturn nil\n\t}\n\n\t// Slice hash index data.\n\thashData := kelem.hashIndex.buf\n\n\tvalueN := int64(binary.BigEndian.Uint64(hashData[:TagValueNSize]))\n\thash := rhh.HashKey(value)\n\tpos := hash % valueN\n\n\t// Track current distance\n\tvar d int64\n\tfor {\n\t\t// Find offset of tag value.\n\t\toffset := binary.BigEndian.Uint64(hashData[TagValueNSize+(pos*TagValueOffsetSize):])\n\t\tif offset == 0 {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Parse into element.\n\t\tvar e TagBlockValueElem\n\t\te.unmarshal(blk.data[offset:])\n\n\t\t// Return if values match.\n\t\tif bytes.Equal(e.value, value) {\n\t\t\treturn &e\n\t\t}\n\n\t\t// Check if we've exceeded the probe distance.\n\t\tmax := rhh.Dist(rhh.HashKey(e.value), pos, valueN)\n\t\tif d > max {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Move position forward.\n\t\tpos = (pos + 1) % valueN\n\t\td++\n\n\t\tif d > valueN {\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\n// TagKeyIterator returns an iterator over all the keys in the block.\nfunc (blk *TagBlock) TagKeyIterator() TagKeyIterator {\n\treturn &tagBlockKeyIterator{\n\t\tblk:     blk,\n\t\tkeyData: blk.keyData,\n\t}\n}\n\n// tagBlockKeyIterator represents an iterator over all keys in a TagBlock.\ntype tagBlockKeyIterator struct {\n\tblk     *TagBlock\n\tkeyData []byte\n\te       TagBlockKeyElem\n}\n\n// Next returns the next element in the iterator.\nfunc (itr *tagBlockKeyIterator) Next() TagKeyElem {\n\t// Exit when there is no data left.\n\tif len(itr.keyData) == 0 {\n\t\treturn nil\n\t}\n\n\t// Unmarshal next element & move data forward.\n\titr.e.unmarshal(itr.keyData, itr.blk.data)\n\titr.keyData = itr.keyData[itr.e.size:]\n\n\tassert(len(itr.e.Key()) > 0, \"invalid zero-length tag key\")\n\treturn &itr.e\n}\n\n// tagBlockValueIterator represents an iterator over all values for a tag key.\ntype tagBlockValueIterator struct {\n\tdata []byte\n\te    TagBlockValueElem\n}\n\n// Next returns the next element in the iterator.\nfunc (itr *tagBlockValueIterator) Next() TagValueElem {\n\t// Exit when there is no data left.\n\tif len(itr.data) == 0 {\n\t\treturn nil\n\t}\n\n\t// Unmarshal next element & move data forward.\n\titr.e.unmarshal(itr.data)\n\titr.data = itr.data[itr.e.size:]\n\n\tassert(len(itr.e.Value()) > 0, \"invalid zero-length tag value\")\n\treturn &itr.e\n}\n\n// TagBlockKeyElem represents a tag key element in a TagBlock.\ntype TagBlockKeyElem struct {\n\tflag byte\n\tkey  []byte\n\n\t// Value data\n\tdata struct {\n\t\toffset uint64\n\t\tsize   uint64\n\t\tbuf    []byte\n\t}\n\n\t// Value hash index data\n\thashIndex struct {\n\t\toffset uint64\n\t\tsize   uint64\n\t\tbuf    []byte\n\t}\n\n\tsize int\n\n\t// Reusable iterator.\n\titr tagBlockValueIterator\n}\n\n// Deleted returns true if the key has been tombstoned.\nfunc (e *TagBlockKeyElem) Deleted() bool { return (e.flag & TagKeyTombstoneFlag) != 0 }\n\n// Key returns the key name of the element.\nfunc (e *TagBlockKeyElem) Key() []byte { return e.key }\n\n// TagValueIterator returns an iterator over the key's values.\nfunc (e *TagBlockKeyElem) TagValueIterator() TagValueIterator {\n\treturn &tagBlockValueIterator{data: e.data.buf}\n}\n\n// unmarshal unmarshals buf into e.\n// The data argument represents the entire block data.\nfunc (e *TagBlockKeyElem) unmarshal(buf, data []byte) {\n\tstart := len(buf)\n\n\t// Parse flag data.\n\te.flag, buf = buf[0], buf[1:]\n\n\t// Parse data offset/size.\n\te.data.offset, buf = binary.BigEndian.Uint64(buf), buf[8:]\n\te.data.size, buf = binary.BigEndian.Uint64(buf), buf[8:]\n\n\t// Slice data.\n\te.data.buf = data[e.data.offset:]\n\te.data.buf = e.data.buf[:e.data.size]\n\n\t// Parse hash index offset/size.\n\te.hashIndex.offset, buf = binary.BigEndian.Uint64(buf), buf[8:]\n\te.hashIndex.size, buf = binary.BigEndian.Uint64(buf), buf[8:]\n\n\t// Slice hash index data.\n\te.hashIndex.buf = data[e.hashIndex.offset:]\n\te.hashIndex.buf = e.hashIndex.buf[:e.hashIndex.size]\n\n\t// Parse key.\n\tn, sz := binary.Uvarint(buf)\n\te.key, buf = buf[sz:sz+int(n)], buf[int(n)+sz:]\n\n\t// Save length of elem.\n\te.size = start - len(buf)\n}\n\n// TagBlockValueElem represents a tag value element.\ntype TagBlockValueElem struct {\n\tflag   byte\n\tvalue  []byte\n\tseries struct {\n\t\tn    uint32 // Series count\n\t\tdata []byte // Raw series data\n\t}\n\n\tsize int\n}\n\n// Deleted returns true if the element has been tombstoned.\nfunc (e *TagBlockValueElem) Deleted() bool { return (e.flag & TagValueTombstoneFlag) != 0 }\n\n// Value returns the value for the element.\nfunc (e *TagBlockValueElem) Value() []byte { return e.value }\n\n// SeriesN returns the series count.\nfunc (e *TagBlockValueElem) SeriesN() uint32 { return e.series.n }\n\n// SeriesData returns the raw series data.\nfunc (e *TagBlockValueElem) SeriesData() []byte { return e.series.data }\n\n// SeriesID returns series ID at an index.\nfunc (e *TagBlockValueElem) SeriesID(i int) uint32 {\n\treturn binary.BigEndian.Uint32(e.series.data[i*SeriesIDSize:])\n}\n\n// SeriesIDs returns a list decoded series ids.\nfunc (e *TagBlockValueElem) SeriesIDs() []uint32 {\n\ta := make([]uint32, 0, e.series.n)\n\tvar prev uint32\n\tfor data := e.series.data; len(data) > 0; {\n\t\tdelta, n := binary.Uvarint(data)\n\t\tdata = data[n:]\n\n\t\tseriesID := prev + uint32(delta)\n\t\ta = append(a, seriesID)\n\t\tprev = seriesID\n\t}\n\treturn a\n}\n\n// Size returns the size of the element.\nfunc (e *TagBlockValueElem) Size() int { return e.size }\n\n// unmarshal unmarshals buf into e.\nfunc (e *TagBlockValueElem) unmarshal(buf []byte) {\n\tstart := len(buf)\n\n\t// Parse flag data.\n\te.flag, buf = buf[0], buf[1:]\n\n\t// Parse value.\n\tsz, n := binary.Uvarint(buf)\n\te.value, buf = buf[n:n+int(sz)], buf[n+int(sz):]\n\n\t// Parse series count.\n\tv, n := binary.Uvarint(buf)\n\te.series.n = uint32(v)\n\tbuf = buf[n:]\n\n\t// Parse data block size.\n\tsz, n = binary.Uvarint(buf)\n\tbuf = buf[n:]\n\n\t// Save reference to series data.\n\te.series.data = buf[:sz]\n\tbuf = buf[sz:]\n\n\t// Save length of elem.\n\te.size = start - len(buf)\n}\n\n// TagBlockTrailerSize is the total size of the on-disk trailer.\nconst TagBlockTrailerSize = 0 +\n\t8 + 8 + // value data offset/size\n\t8 + 8 + // key data offset/size\n\t8 + 8 + // hash index offset/size\n\t8 + // size\n\t2 // version\n\n// TagBlockTrailer represents meta data at the end of a TagBlock.\ntype TagBlockTrailer struct {\n\tVersion int   // Encoding version\n\tSize    int64 // Total size w/ trailer\n\n\t// Offset & size of value data section.\n\tValueData struct {\n\t\tOffset int64\n\t\tSize   int64\n\t}\n\n\t// Offset & size of key data section.\n\tKeyData struct {\n\t\tOffset int64\n\t\tSize   int64\n\t}\n\n\t// Offset & size of hash map section.\n\tHashIndex struct {\n\t\tOffset int64\n\t\tSize   int64\n\t}\n}\n\n// WriteTo writes the trailer to w.\nfunc (t *TagBlockTrailer) WriteTo(w io.Writer) (n int64, err error) {\n\t// Write data info.\n\tif err := writeUint64To(w, uint64(t.ValueData.Offset), &n); err != nil {\n\t\treturn n, err\n\t} else if err := writeUint64To(w, uint64(t.ValueData.Size), &n); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Write key data info.\n\tif err := writeUint64To(w, uint64(t.KeyData.Offset), &n); err != nil {\n\t\treturn n, err\n\t} else if err := writeUint64To(w, uint64(t.KeyData.Size), &n); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Write hash index info.\n\tif err := writeUint64To(w, uint64(t.HashIndex.Offset), &n); err != nil {\n\t\treturn n, err\n\t} else if err := writeUint64To(w, uint64(t.HashIndex.Size), &n); err != nil {\n\t\treturn n, err\n\t}\n\n\t// Write total size & encoding version.\n\tif err := writeUint64To(w, uint64(t.Size), &n); err != nil {\n\t\treturn n, err\n\t} else if err := writeUint16To(w, IndexFileVersion, &n); err != nil {\n\t\treturn n, err\n\t}\n\n\treturn n, nil\n}\n\n// ReadTagBlockTrailer returns the tag block trailer from data.\nfunc ReadTagBlockTrailer(data []byte) (TagBlockTrailer, error) {\n\tvar t TagBlockTrailer\n\n\t// Read version.\n\tt.Version = int(binary.BigEndian.Uint16(data[len(data)-2:]))\n\tif t.Version != TagBlockVersion {\n\t\treturn t, ErrUnsupportedTagBlockVersion\n\t}\n\n\t// Slice trailer data.\n\tbuf := data[len(data)-TagBlockTrailerSize:]\n\n\t// Read data section info.\n\tt.ValueData.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:]\n\tt.ValueData.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:]\n\n\t// Read key section info.\n\tt.KeyData.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:]\n\tt.KeyData.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:]\n\n\t// Read hash section info.\n\tt.HashIndex.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:]\n\tt.HashIndex.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:]\n\n\t// Read total size.\n\tt.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:]\n\n\treturn t, nil\n}\n\n// TagBlockEncoder encodes a tags to a TagBlock section.\ntype TagBlockEncoder struct {\n\tw   io.Writer\n\tbuf bytes.Buffer\n\n\t// Track value offsets.\n\toffsets *rhh.HashMap\n\n\t// Track bytes written, sections.\n\tn       int64\n\ttrailer TagBlockTrailer\n\n\t// Track tag keys.\n\tkeys []tagKeyEncodeEntry\n}\n\n// NewTagBlockEncoder returns a new TagBlockEncoder.\nfunc NewTagBlockEncoder(w io.Writer) *TagBlockEncoder {\n\treturn &TagBlockEncoder{\n\t\tw:       w,\n\t\toffsets: rhh.NewHashMap(rhh.Options{LoadFactor: LoadFactor}),\n\t\ttrailer: TagBlockTrailer{\n\t\t\tVersion: TagBlockVersion,\n\t\t},\n\t}\n}\n\n// N returns the number of bytes written.\nfunc (enc *TagBlockEncoder) N() int64 { return enc.n }\n\n// EncodeKey writes a tag key to the underlying writer.\nfunc (enc *TagBlockEncoder) EncodeKey(key []byte, deleted bool) error {\n\t// An initial empty byte must be written.\n\tif err := enc.ensureHeaderWritten(); err != nil {\n\t\treturn err\n\t}\n\n\t// Verify key is lexicographically after previous key.\n\tif len(enc.keys) > 0 {\n\t\tprev := enc.keys[len(enc.keys)-1].key\n\t\tif cmp := bytes.Compare(prev, key); cmp == 1 {\n\t\t\treturn fmt.Errorf(\"tag key out of order: prev=%s, new=%s\", prev, key)\n\t\t} else if cmp == 0 {\n\t\t\treturn fmt.Errorf(\"tag key already encoded: %s\", key)\n\t\t}\n\t}\n\n\t// Flush values section for key.\n\tif err := enc.flushValueHashIndex(); err != nil {\n\t\treturn err\n\t}\n\n\t// Append key on to the end of the key list.\n\tentry := tagKeyEncodeEntry{\n\t\tkey:     key,\n\t\tdeleted: deleted,\n\t}\n\tentry.data.offset = enc.n\n\n\tenc.keys = append(enc.keys, entry)\n\n\treturn nil\n}\n\n// EncodeValue writes a tag value to the underlying writer.\n// The tag key must be lexicographical sorted after the previous encoded tag key.\nfunc (enc *TagBlockEncoder) EncodeValue(value []byte, deleted bool, seriesIDs []uint32) error {\n\tif len(enc.keys) == 0 {\n\t\treturn fmt.Errorf(\"tag key must be encoded before encoding values\")\n\t} else if len(value) == 0 {\n\t\treturn fmt.Errorf(\"zero length tag value not allowed\")\n\t}\n\n\t// Save offset to hash map.\n\tenc.offsets.Put(value, enc.n)\n\n\t// Write flag.\n\tif err := writeUint8To(enc.w, encodeTagValueFlag(deleted), &enc.n); err != nil {\n\t\treturn err\n\t}\n\n\t// Write value.\n\tif err := writeUvarintTo(enc.w, uint64(len(value)), &enc.n); err != nil {\n\t\treturn err\n\t} else if err := writeTo(enc.w, value, &enc.n); err != nil {\n\t\treturn err\n\t}\n\n\t// Build series data in buffer.\n\tenc.buf.Reset()\n\tvar prev uint32\n\tfor _, seriesID := range seriesIDs {\n\t\tdelta := seriesID - prev\n\n\t\tvar buf [binary.MaxVarintLen32]byte\n\t\ti := binary.PutUvarint(buf[:], uint64(delta))\n\t\tif _, err := enc.buf.Write(buf[:i]); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tprev = seriesID\n\t}\n\n\t// Write series count.\n\tif err := writeUvarintTo(enc.w, uint64(len(seriesIDs)), &enc.n); err != nil {\n\t\treturn err\n\t}\n\n\t// Write data size & buffer.\n\tif err := writeUvarintTo(enc.w, uint64(enc.buf.Len()), &enc.n); err != nil {\n\t\treturn err\n\t}\n\tnn, err := enc.buf.WriteTo(enc.w)\n\tif enc.n += nn; err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// Close flushes the trailer of the encoder to the writer.\nfunc (enc *TagBlockEncoder) Close() error {\n\t// Flush last value set.\n\tif err := enc.ensureHeaderWritten(); err != nil {\n\t\treturn err\n\t} else if err := enc.flushValueHashIndex(); err != nil {\n\t\treturn err\n\t}\n\n\t// Save ending position of entire data block.\n\tenc.trailer.ValueData.Size = enc.n - enc.trailer.ValueData.Offset\n\n\t// Write key block to point to value blocks.\n\tif err := enc.encodeTagKeyBlock(); err != nil {\n\t\treturn err\n\t}\n\n\t// Compute total size w/ trailer.\n\tenc.trailer.Size = enc.n + TagBlockTrailerSize\n\n\t// Write trailer.\n\tnn, err := enc.trailer.WriteTo(enc.w)\n\tenc.n += nn\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// ensureHeaderWritten writes a single byte to offset the rest of the block.\nfunc (enc *TagBlockEncoder) ensureHeaderWritten() error {\n\tif enc.n > 0 {\n\t\treturn nil\n\t} else if _, err := enc.w.Write([]byte{0}); err != nil {\n\t\treturn err\n\t}\n\n\tenc.n++\n\tenc.trailer.ValueData.Offset = enc.n\n\n\treturn nil\n}\n\n// flushValueHashIndex builds writes the hash map at the end of a value set.\nfunc (enc *TagBlockEncoder) flushValueHashIndex() error {\n\t// Ignore if no keys have been written.\n\tif len(enc.keys) == 0 {\n\t\treturn nil\n\t}\n\tkey := &enc.keys[len(enc.keys)-1]\n\n\t// Save size of data section.\n\tkey.data.size = enc.n - key.data.offset\n\n\t// Encode hash map length.\n\tkey.hashIndex.offset = enc.n\n\tif err := writeUint64To(enc.w, uint64(enc.offsets.Cap()), &enc.n); err != nil {\n\t\treturn err\n\t}\n\n\t// Encode hash map offset entries.\n\tfor i := int64(0); i < enc.offsets.Cap(); i++ {\n\t\t_, v := enc.offsets.Elem(i)\n\t\toffset, _ := v.(int64)\n\t\tif err := writeUint64To(enc.w, uint64(offset), &enc.n); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tkey.hashIndex.size = enc.n - key.hashIndex.offset\n\n\t// Clear offsets.\n\tenc.offsets = rhh.NewHashMap(rhh.Options{LoadFactor: LoadFactor})\n\n\treturn nil\n}\n\n// encodeTagKeyBlock encodes the keys section to the writer.\nfunc (enc *TagBlockEncoder) encodeTagKeyBlock() error {\n\toffsets := rhh.NewHashMap(rhh.Options{Capacity: int64(len(enc.keys)), LoadFactor: LoadFactor})\n\n\t// Encode key list in sorted order.\n\tenc.trailer.KeyData.Offset = enc.n\n\tfor i := range enc.keys {\n\t\tentry := &enc.keys[i]\n\n\t\t// Save current offset so we can use it in the hash index.\n\t\toffsets.Put(entry.key, enc.n)\n\n\t\tif err := writeUint8To(enc.w, encodeTagKeyFlag(entry.deleted), &enc.n); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Write value data offset & size.\n\t\tif err := writeUint64To(enc.w, uint64(entry.data.offset), &enc.n); err != nil {\n\t\t\treturn err\n\t\t} else if err := writeUint64To(enc.w, uint64(entry.data.size), &enc.n); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Write value hash index offset & size.\n\t\tif err := writeUint64To(enc.w, uint64(entry.hashIndex.offset), &enc.n); err != nil {\n\t\t\treturn err\n\t\t} else if err := writeUint64To(enc.w, uint64(entry.hashIndex.size), &enc.n); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Write key length and data.\n\t\tif err := writeUvarintTo(enc.w, uint64(len(entry.key)), &enc.n); err != nil {\n\t\t\treturn err\n\t\t} else if err := writeTo(enc.w, entry.key, &enc.n); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tenc.trailer.KeyData.Size = enc.n - enc.trailer.KeyData.Offset\n\n\t// Encode hash map length.\n\tenc.trailer.HashIndex.Offset = enc.n\n\tif err := writeUint64To(enc.w, uint64(offsets.Cap()), &enc.n); err != nil {\n\t\treturn err\n\t}\n\n\t// Encode hash map offset entries.\n\tfor i := int64(0); i < offsets.Cap(); i++ {\n\t\t_, v := offsets.Elem(i)\n\t\toffset, _ := v.(int64)\n\t\tif err := writeUint64To(enc.w, uint64(offset), &enc.n); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tenc.trailer.HashIndex.Size = enc.n - enc.trailer.HashIndex.Offset\n\n\treturn nil\n}\n\ntype tagKeyEncodeEntry struct {\n\tkey     []byte\n\tdeleted bool\n\n\tdata struct {\n\t\toffset int64\n\t\tsize   int64\n\t}\n\thashIndex struct {\n\t\toffset int64\n\t\tsize   int64\n\t}\n}\n\nfunc encodeTagKeyFlag(deleted bool) byte {\n\tvar flag byte\n\tif deleted {\n\t\tflag |= TagKeyTombstoneFlag\n\t}\n\treturn flag\n}\n\nfunc encodeTagValueFlag(deleted bool) byte {\n\tvar flag byte\n\tif deleted {\n\t\tflag |= TagValueTombstoneFlag\n\t}\n\treturn flag\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block_test.go",
    "content": "package tsi1_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/tsdb/index/tsi1\"\n)\n\n// Ensure tag blocks can be written and opened.\nfunc TestTagBlockWriter(t *testing.T) {\n\t// Write 3 series to writer.\n\tvar buf bytes.Buffer\n\tenc := tsi1.NewTagBlockEncoder(&buf)\n\n\tif err := enc.EncodeKey([]byte(\"host\"), false); err != nil {\n\t\tt.Fatal(err)\n\t} else if err := enc.EncodeValue([]byte(\"server0\"), false, []uint32{1}); err != nil {\n\t\tt.Fatal(err)\n\t} else if err := enc.EncodeValue([]byte(\"server1\"), false, []uint32{2}); err != nil {\n\t\tt.Fatal(err)\n\t} else if err := enc.EncodeValue([]byte(\"server2\"), false, []uint32{3}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := enc.EncodeKey([]byte(\"region\"), false); err != nil {\n\t\tt.Fatal(err)\n\t} else if err := enc.EncodeValue([]byte(\"us-east\"), false, []uint32{1, 2}); err != nil {\n\t\tt.Fatal(err)\n\t} else if err := enc.EncodeValue([]byte(\"us-west\"), false, []uint32{3}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Flush encoder.\n\tif err := enc.Close(); err != nil {\n\t\tt.Fatal(err)\n\t} else if int(enc.N()) != buf.Len() {\n\t\tt.Fatalf(\"bytes written mismatch: %d, expected %d\", enc.N(), buf.Len())\n\t}\n\n\t// Unmarshal into a block.\n\tvar blk tsi1.TagBlock\n\tif err := blk.UnmarshalBinary(buf.Bytes()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify data.\n\tif e := blk.TagValueElem([]byte(\"region\"), []byte(\"us-east\")); e == nil {\n\t\tt.Fatal(\"expected element\")\n\t} else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint32{1, 2}) {\n\t\tt.Fatalf(\"unexpected series ids: %#v\", a)\n\t}\n\n\tif e := blk.TagValueElem([]byte(\"region\"), []byte(\"us-west\")); e == nil {\n\t\tt.Fatal(\"expected element\")\n\t} else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint32{3}) {\n\t\tt.Fatalf(\"unexpected series ids: %#v\", a)\n\t}\n\tif e := blk.TagValueElem([]byte(\"host\"), []byte(\"server0\")); e == nil {\n\t\tt.Fatal(\"expected element\")\n\t} else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint32{1}) {\n\t\tt.Fatalf(\"unexpected series ids: %#v\", a)\n\t}\n\tif e := blk.TagValueElem([]byte(\"host\"), []byte(\"server1\")); e == nil {\n\t\tt.Fatal(\"expected element\")\n\t} else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint32{2}) {\n\t\tt.Fatalf(\"unexpected series ids: %#v\", a)\n\t}\n\tif e := blk.TagValueElem([]byte(\"host\"), []byte(\"server2\")); e == nil {\n\t\tt.Fatal(\"expected element\")\n\t} else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint32{3}) {\n\t\tt.Fatalf(\"unexpected series ids: %#v\", a)\n\t}\n}\n\nvar benchmarkTagBlock10x1000 *tsi1.TagBlock\nvar benchmarkTagBlock100x1000 *tsi1.TagBlock\nvar benchmarkTagBlock1000x1000 *tsi1.TagBlock\nvar benchmarkTagBlock1x1000000 *tsi1.TagBlock\n\nfunc BenchmarkTagBlock_SeriesN_10_1000(b *testing.B) {\n\tbenchmarkTagBlock_SeriesN(b, 10, 1000, &benchmarkTagBlock10x1000)\n}\nfunc BenchmarkTagBlock_SeriesN_100_1000(b *testing.B) {\n\tbenchmarkTagBlock_SeriesN(b, 100, 1000, &benchmarkTagBlock100x1000)\n}\nfunc BenchmarkTagBlock_SeriesN_1000_1000(b *testing.B) {\n\tbenchmarkTagBlock_SeriesN(b, 1000, 1000, &benchmarkTagBlock1000x1000)\n}\nfunc BenchmarkTagBlock_SeriesN_1_1000000(b *testing.B) {\n\tbenchmarkTagBlock_SeriesN(b, 1, 1000000, &benchmarkTagBlock1x1000000)\n}\n\nfunc benchmarkTagBlock_SeriesN(b *testing.B, tagN, valueN int, blk **tsi1.TagBlock) {\n\tif (*blk) == nil {\n\t\tvar buf bytes.Buffer\n\t\tenc := tsi1.NewTagBlockEncoder(&buf)\n\n\t\t// Write block.\n\t\tfor i := 0; i < tagN; i++ {\n\t\t\tif err := enc.EncodeKey([]byte(fmt.Sprintf(\"%08d\", i)), false); err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\n\t\t\tfor j := 0; j < valueN; j++ {\n\t\t\t\tif err := enc.EncodeValue([]byte(fmt.Sprintf(\"%08d\", j)), false, []uint32{1}); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Flush encoder.\n\t\tif err := enc.Close(); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\tb.Log(\"size\", buf.Len())\n\n\t\t// Unmarshal into a block.\n\t\t*blk = &tsi1.TagBlock{}\n\t\tif err := (*blk).UnmarshalBinary(buf.Bytes()); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n\n\t// Benchmark lookups.\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\tkey, value := []byte(\"0\"), []byte(\"0\")\n\tfor i := 0; i < b.N; i++ {\n\t\tif e := (*blk).TagValueElem(key, value); e == nil {\n\t\t\tb.Fatal(\"expected element\")\n\t\t} else if n := e.(*tsi1.TagBlockValueElem).SeriesN(); n != 1 {\n\t\t\tb.Fatalf(\"unexpected series count: %d\", n)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1.go",
    "content": "package tsi1\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"encoding/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n)\n\n// LoadFactor is the fill percent for RHH indexes.\nconst LoadFactor = 80\n\n// MeasurementElem represents a generic measurement element.\ntype MeasurementElem interface {\n\tName() []byte\n\tDeleted() bool\n}\n\n// MeasurementElems represents a list of MeasurementElem.\ntype MeasurementElems []MeasurementElem\n\nfunc (a MeasurementElems) Len() int           { return len(a) }\nfunc (a MeasurementElems) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a MeasurementElems) Less(i, j int) bool { return bytes.Compare(a[i].Name(), a[j].Name()) == -1 }\n\n// MeasurementIterator represents a iterator over a list of measurements.\ntype MeasurementIterator interface {\n\tNext() MeasurementElem\n}\n\n// MergeMeasurementIterators returns an iterator that merges a set of iterators.\n// Iterators that are first in the list take precendence and a deletion by those\n// early iterators will invalidate elements by later iterators.\nfunc MergeMeasurementIterators(itrs ...MeasurementIterator) MeasurementIterator {\n\tif len(itrs) == 0 {\n\t\treturn nil\n\t}\n\n\treturn &measurementMergeIterator{\n\t\te:    make(measurementMergeElem, 0, len(itrs)),\n\t\tbuf:  make([]MeasurementElem, len(itrs)),\n\t\titrs: itrs,\n\t}\n}\n\ntype measurementMergeIterator struct {\n\te    measurementMergeElem\n\tbuf  []MeasurementElem\n\titrs []MeasurementIterator\n}\n\n// Next returns the element with the next lowest name across the iterators.\n//\n// If multiple iterators contain the same name then the first is returned\n// and the remaining ones are skipped.\nfunc (itr *measurementMergeIterator) Next() MeasurementElem {\n\t// Find next lowest name amongst the buffers.\n\tvar name []byte\n\tfor i, buf := range itr.buf {\n\t\t// Fill buffer if empty.\n\t\tif buf == nil {\n\t\t\tif buf = itr.itrs[i].Next(); buf != nil {\n\t\t\t\titr.buf[i] = buf\n\t\t\t} else {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// Find next lowest name.\n\t\tif name == nil || bytes.Compare(itr.buf[i].Name(), name) == -1 {\n\t\t\tname = itr.buf[i].Name()\n\t\t}\n\t}\n\n\t// Return nil if no elements remaining.\n\tif name == nil {\n\t\treturn nil\n\t}\n\n\t// Merge all elements together and clear buffers.\n\titr.e = itr.e[:0]\n\tfor i, buf := range itr.buf {\n\t\tif buf == nil || !bytes.Equal(buf.Name(), name) {\n\t\t\tcontinue\n\t\t}\n\t\titr.e = append(itr.e, buf)\n\t\titr.buf[i] = nil\n\t}\n\treturn itr.e\n}\n\n// measurementMergeElem represents a merged measurement element.\ntype measurementMergeElem []MeasurementElem\n\n// Name returns the name of the first element.\nfunc (p measurementMergeElem) Name() []byte {\n\tif len(p) == 0 {\n\t\treturn nil\n\t}\n\treturn p[0].Name()\n}\n\n// Deleted returns the deleted flag of the first element.\nfunc (p measurementMergeElem) Deleted() bool {\n\tif len(p) == 0 {\n\t\treturn false\n\t}\n\treturn p[0].Deleted()\n}\n\n// filterUndeletedMeasurementIterator returns all measurements which are not deleted.\ntype filterUndeletedMeasurementIterator struct {\n\titr MeasurementIterator\n}\n\n// FilterUndeletedMeasurementIterator returns an iterator which filters all deleted measurement.\nfunc FilterUndeletedMeasurementIterator(itr MeasurementIterator) MeasurementIterator {\n\tif itr == nil {\n\t\treturn nil\n\t}\n\treturn &filterUndeletedMeasurementIterator{itr: itr}\n}\n\nfunc (itr *filterUndeletedMeasurementIterator) Next() MeasurementElem {\n\tfor {\n\t\te := itr.itr.Next()\n\t\tif e == nil {\n\t\t\treturn nil\n\t\t} else if e.Deleted() {\n\t\t\tcontinue\n\t\t}\n\t\treturn e\n\t}\n}\n\n// TagKeyElem represents a generic tag key element.\ntype TagKeyElem interface {\n\tKey() []byte\n\tDeleted() bool\n\tTagValueIterator() TagValueIterator\n}\n\n// TagKeyIterator represents a iterator over a list of tag keys.\ntype TagKeyIterator interface {\n\tNext() TagKeyElem\n}\n\n// MergeTagKeyIterators returns an iterator that merges a set of iterators.\n// Iterators that are first in the list take precendence and a deletion by those\n// early iterators will invalidate elements by later iterators.\nfunc MergeTagKeyIterators(itrs ...TagKeyIterator) TagKeyIterator {\n\tif len(itrs) == 0 {\n\t\treturn nil\n\t}\n\n\treturn &tagKeyMergeIterator{\n\t\te:    make(tagKeyMergeElem, 0, len(itrs)),\n\t\tbuf:  make([]TagKeyElem, len(itrs)),\n\t\titrs: itrs,\n\t}\n}\n\ntype tagKeyMergeIterator struct {\n\te    tagKeyMergeElem\n\tbuf  []TagKeyElem\n\titrs []TagKeyIterator\n}\n\n// Next returns the element with the next lowest key across the iterators.\n//\n// If multiple iterators contain the same key then the first is returned\n// and the remaining ones are skipped.\nfunc (itr *tagKeyMergeIterator) Next() TagKeyElem {\n\t// Find next lowest key amongst the buffers.\n\tvar key []byte\n\tfor i, buf := range itr.buf {\n\t\t// Fill buffer.\n\t\tif buf == nil {\n\t\t\tif buf = itr.itrs[i].Next(); buf != nil {\n\t\t\t\titr.buf[i] = buf\n\t\t\t} else {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// Find next lowest key.\n\t\tif key == nil || bytes.Compare(buf.Key(), key) == -1 {\n\t\t\tkey = buf.Key()\n\t\t}\n\t}\n\n\t// Return nil if no elements remaining.\n\tif key == nil {\n\t\treturn nil\n\t}\n\n\t// Merge elements together & clear buffer.\n\titr.e = itr.e[:0]\n\tfor i, buf := range itr.buf {\n\t\tif buf == nil || !bytes.Equal(buf.Key(), key) {\n\t\t\tcontinue\n\t\t}\n\t\titr.e = append(itr.e, buf)\n\t\titr.buf[i] = nil\n\t}\n\n\treturn itr.e\n}\n\n// tagKeyMergeElem represents a merged tag key element.\ntype tagKeyMergeElem []TagKeyElem\n\n// Key returns the key of the first element.\nfunc (p tagKeyMergeElem) Key() []byte {\n\tif len(p) == 0 {\n\t\treturn nil\n\t}\n\treturn p[0].Key()\n}\n\n// Deleted returns the deleted flag of the first element.\nfunc (p tagKeyMergeElem) Deleted() bool {\n\tif len(p) == 0 {\n\t\treturn false\n\t}\n\treturn p[0].Deleted()\n}\n\n// TagValueIterator returns a merge iterator for all elements until a tombstone occurs.\nfunc (p tagKeyMergeElem) TagValueIterator() TagValueIterator {\n\tif len(p) == 0 {\n\t\treturn nil\n\t}\n\n\ta := make([]TagValueIterator, 0, len(p))\n\tfor _, e := range p {\n\t\titr := e.TagValueIterator()\n\n\t\ta = append(a, itr)\n\t\tif e.Deleted() {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn MergeTagValueIterators(a...)\n}\n\n// TagValueElem represents a generic tag value element.\ntype TagValueElem interface {\n\tValue() []byte\n\tDeleted() bool\n}\n\n// TagValueIterator represents a iterator over a list of tag values.\ntype TagValueIterator interface {\n\tNext() TagValueElem\n}\n\n// MergeTagValueIterators returns an iterator that merges a set of iterators.\n// Iterators that are first in the list take precendence and a deletion by those\n// early iterators will invalidate elements by later iterators.\nfunc MergeTagValueIterators(itrs ...TagValueIterator) TagValueIterator {\n\tif len(itrs) == 0 {\n\t\treturn nil\n\t}\n\n\treturn &tagValueMergeIterator{\n\t\te:    make(tagValueMergeElem, 0, len(itrs)),\n\t\tbuf:  make([]TagValueElem, len(itrs)),\n\t\titrs: itrs,\n\t}\n}\n\ntype tagValueMergeIterator struct {\n\te    tagValueMergeElem\n\tbuf  []TagValueElem\n\titrs []TagValueIterator\n}\n\n// Next returns the element with the next lowest value across the iterators.\n//\n// If multiple iterators contain the same value then the first is returned\n// and the remaining ones are skipped.\nfunc (itr *tagValueMergeIterator) Next() TagValueElem {\n\t// Find next lowest value amongst the buffers.\n\tvar value []byte\n\tfor i, buf := range itr.buf {\n\t\t// Fill buffer.\n\t\tif buf == nil {\n\t\t\tif buf = itr.itrs[i].Next(); buf != nil {\n\t\t\t\titr.buf[i] = buf\n\t\t\t} else {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// Find next lowest value.\n\t\tif value == nil || bytes.Compare(buf.Value(), value) == -1 {\n\t\t\tvalue = buf.Value()\n\t\t}\n\t}\n\n\t// Return nil if no elements remaining.\n\tif value == nil {\n\t\treturn nil\n\t}\n\n\t// Merge elements and clear buffers.\n\titr.e = itr.e[:0]\n\tfor i, buf := range itr.buf {\n\t\tif buf == nil || !bytes.Equal(buf.Value(), value) {\n\t\t\tcontinue\n\t\t}\n\t\titr.e = append(itr.e, buf)\n\t\titr.buf[i] = nil\n\t}\n\treturn itr.e\n}\n\n// tagValueMergeElem represents a merged tag value element.\ntype tagValueMergeElem []TagValueElem\n\n// Name returns the value of the first element.\nfunc (p tagValueMergeElem) Value() []byte {\n\tif len(p) == 0 {\n\t\treturn nil\n\t}\n\treturn p[0].Value()\n}\n\n// Deleted returns the deleted flag of the first element.\nfunc (p tagValueMergeElem) Deleted() bool {\n\tif len(p) == 0 {\n\t\treturn false\n\t}\n\treturn p[0].Deleted()\n}\n\n// SeriesElem represents a generic series element.\ntype SeriesElem interface {\n\tName() []byte\n\tTags() models.Tags\n\tDeleted() bool\n\n\t// InfluxQL expression associated with series during filtering.\n\tExpr() influxql.Expr\n}\n\n// SeriesElemKey encodes e as a series key.\nfunc SeriesElemKey(e SeriesElem) []byte {\n\tname, tags := e.Name(), e.Tags()\n\n\t// TODO: Precompute allocation size.\n\t// FIXME: Handle escaping.\n\n\tvar buf []byte\n\tbuf = append(buf, name...)\n\tfor _, t := range tags {\n\t\tbuf = append(buf, ',')\n\t\tbuf = append(buf, t.Key...)\n\t\tbuf = append(buf, '=')\n\t\tbuf = append(buf, t.Value...)\n\t}\n\treturn buf\n}\n\n// CompareSeriesElem returns -1 if a < b, 1 if a > b, and 0 if equal.\nfunc CompareSeriesElem(a, b SeriesElem) int {\n\tif cmp := bytes.Compare(a.Name(), b.Name()); cmp != 0 {\n\t\treturn cmp\n\t}\n\treturn models.CompareTags(a.Tags(), b.Tags())\n}\n\n// seriesElem represents an in-memory implementation of SeriesElem.\ntype seriesElem struct {\n\tname    []byte\n\ttags    models.Tags\n\tdeleted bool\n}\n\nfunc (e *seriesElem) Name() []byte        { return e.name }\nfunc (e *seriesElem) Tags() models.Tags   { return e.tags }\nfunc (e *seriesElem) Deleted() bool       { return e.deleted }\nfunc (e *seriesElem) Expr() influxql.Expr { return nil }\n\n// SeriesIterator represents a iterator over a list of series.\ntype SeriesIterator interface {\n\tNext() SeriesElem\n}\n\n// MergeSeriesIterators returns an iterator that merges a set of iterators.\n// Iterators that are first in the list take precendence and a deletion by those\n// early iterators will invalidate elements by later iterators.\nfunc MergeSeriesIterators(itrs ...SeriesIterator) SeriesIterator {\n\tif n := len(itrs); n == 0 {\n\t\treturn nil\n\t} else if n == 1 {\n\t\treturn itrs[0]\n\t}\n\n\treturn &seriesMergeIterator{\n\t\tbuf:  make([]SeriesElem, len(itrs)),\n\t\titrs: itrs,\n\t}\n}\n\n// seriesMergeIterator is an iterator that merges multiple iterators together.\ntype seriesMergeIterator struct {\n\tbuf  []SeriesElem\n\titrs []SeriesIterator\n}\n\n// Next returns the element with the next lowest name/tags across the iterators.\n//\n// If multiple iterators contain the same name/tags then the first is returned\n// and the remaining ones are skipped.\nfunc (itr *seriesMergeIterator) Next() SeriesElem {\n\t// Find next lowest name/tags amongst the buffers.\n\tvar name []byte\n\tvar tags models.Tags\n\tfor i, buf := range itr.buf {\n\t\t// Fill buffer.\n\t\tif buf == nil {\n\t\t\tif buf = itr.itrs[i].Next(); buf != nil {\n\t\t\t\titr.buf[i] = buf\n\t\t\t} else {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// If the name is not set the pick the first non-empty name.\n\t\tif name == nil {\n\t\t\tname, tags = buf.Name(), buf.Tags()\n\t\t\tcontinue\n\t\t}\n\n\t\t// Set name/tags if they are lower than what has been seen.\n\t\tif cmp := bytes.Compare(buf.Name(), name); cmp == -1 || (cmp == 0 && models.CompareTags(buf.Tags(), tags) == -1) {\n\t\t\tname, tags = buf.Name(), buf.Tags()\n\t\t}\n\t}\n\n\t// Return nil if no elements remaining.\n\tif name == nil {\n\t\treturn nil\n\t}\n\n\t// Refill buffer.\n\tvar e SeriesElem\n\tfor i, buf := range itr.buf {\n\t\tif buf == nil || !bytes.Equal(buf.Name(), name) || models.CompareTags(buf.Tags(), tags) != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Copy first matching buffer to the return buffer.\n\t\tif e == nil {\n\t\t\te = buf\n\t\t}\n\n\t\t// Clear buffer.\n\t\titr.buf[i] = nil\n\t}\n\treturn e\n}\n\n// IntersectSeriesIterators returns an iterator that only returns series which\n// occur in both iterators. If both series have associated expressions then\n// they are combined together.\nfunc IntersectSeriesIterators(itr0, itr1 SeriesIterator) SeriesIterator {\n\tif itr0 == nil || itr1 == nil {\n\t\treturn nil\n\t}\n\n\treturn &seriesIntersectIterator{itrs: [2]SeriesIterator{itr0, itr1}}\n}\n\n// seriesIntersectIterator is an iterator that merges two iterators together.\ntype seriesIntersectIterator struct {\n\te    seriesExprElem\n\tbuf  [2]SeriesElem\n\titrs [2]SeriesIterator\n}\n\n// Next returns the next element which occurs in both iterators.\nfunc (itr *seriesIntersectIterator) Next() (e SeriesElem) {\n\tfor {\n\t\t// Fill buffers.\n\t\tif itr.buf[0] == nil {\n\t\t\titr.buf[0] = itr.itrs[0].Next()\n\t\t}\n\t\tif itr.buf[1] == nil {\n\t\t\titr.buf[1] = itr.itrs[1].Next()\n\t\t}\n\n\t\t// Exit if either buffer is still empty.\n\t\tif itr.buf[0] == nil || itr.buf[1] == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Skip if both series are not equal.\n\t\tif cmp := CompareSeriesElem(itr.buf[0], itr.buf[1]); cmp == -1 {\n\t\t\titr.buf[0] = nil\n\t\t\tcontinue\n\t\t} else if cmp == 1 {\n\t\t\titr.buf[1] = nil\n\t\t\tcontinue\n\t\t}\n\n\t\t// Merge series together if equal.\n\t\titr.e.SeriesElem = itr.buf[0]\n\n\t\t// Attach expression.\n\t\texpr0 := itr.buf[0].Expr()\n\t\texpr1 := itr.buf[1].Expr()\n\t\tif expr0 == nil {\n\t\t\titr.e.expr = expr1\n\t\t} else if expr1 == nil {\n\t\t\titr.e.expr = expr0\n\t\t} else {\n\t\t\titr.e.expr = influxql.Reduce(&influxql.BinaryExpr{\n\t\t\t\tOp:  influxql.AND,\n\t\t\t\tLHS: expr0,\n\t\t\t\tRHS: expr1,\n\t\t\t}, nil)\n\t\t}\n\n\t\titr.buf[0], itr.buf[1] = nil, nil\n\t\treturn &itr.e\n\t}\n}\n\n// UnionSeriesIterators returns an iterator that returns series from both\n// both iterators. If both series have associated expressions then they are\n// combined together.\nfunc UnionSeriesIterators(itr0, itr1 SeriesIterator) SeriesIterator {\n\t// Return other iterator if either one is nil.\n\tif itr0 == nil {\n\t\treturn itr1\n\t} else if itr1 == nil {\n\t\treturn itr0\n\t}\n\n\treturn &seriesUnionIterator{itrs: [2]SeriesIterator{itr0, itr1}}\n}\n\n// seriesUnionIterator is an iterator that unions two iterators together.\ntype seriesUnionIterator struct {\n\te    seriesExprElem\n\tbuf  [2]SeriesElem\n\titrs [2]SeriesIterator\n}\n\n// Next returns the next element which occurs in both iterators.\nfunc (itr *seriesUnionIterator) Next() (e SeriesElem) {\n\t// Fill buffers.\n\tif itr.buf[0] == nil {\n\t\titr.buf[0] = itr.itrs[0].Next()\n\t}\n\tif itr.buf[1] == nil {\n\t\titr.buf[1] = itr.itrs[1].Next()\n\t}\n\n\t// Return the other iterator if either one is empty.\n\tif itr.buf[0] == nil {\n\t\te, itr.buf[1] = itr.buf[1], nil\n\t\treturn e\n\t} else if itr.buf[1] == nil {\n\t\te, itr.buf[0] = itr.buf[0], nil\n\t\treturn e\n\t}\n\n\t// Return lesser series.\n\tif cmp := CompareSeriesElem(itr.buf[0], itr.buf[1]); cmp == -1 {\n\t\te, itr.buf[0] = itr.buf[0], nil\n\t\treturn e\n\t} else if cmp == 1 {\n\t\te, itr.buf[1] = itr.buf[1], nil\n\t\treturn e\n\t}\n\n\t// Attach element.\n\titr.e.SeriesElem = itr.buf[0]\n\n\t// Attach expression.\n\texpr0 := itr.buf[0].Expr()\n\texpr1 := itr.buf[1].Expr()\n\tif expr0 != nil && expr1 != nil {\n\t\titr.e.expr = influxql.Reduce(&influxql.BinaryExpr{\n\t\t\tOp:  influxql.OR,\n\t\t\tLHS: expr0,\n\t\t\tRHS: expr1,\n\t\t}, nil)\n\t} else {\n\t\titr.e.expr = nil\n\t}\n\n\titr.buf[0], itr.buf[1] = nil, nil\n\treturn &itr.e\n}\n\n// DifferenceSeriesIterators returns an iterator that only returns series which\n// occur the first iterator but not the second iterator.\nfunc DifferenceSeriesIterators(itr0, itr1 SeriesIterator) SeriesIterator {\n\tif itr0 != nil && itr1 == nil {\n\t\treturn itr0\n\t} else if itr0 == nil {\n\t\treturn nil\n\t}\n\treturn &seriesDifferenceIterator{itrs: [2]SeriesIterator{itr0, itr1}}\n}\n\n// seriesDifferenceIterator is an iterator that merges two iterators together.\ntype seriesDifferenceIterator struct {\n\tbuf  [2]SeriesElem\n\titrs [2]SeriesIterator\n}\n\n// Next returns the next element which occurs only in the first iterator.\nfunc (itr *seriesDifferenceIterator) Next() (e SeriesElem) {\n\tfor {\n\t\t// Fill buffers.\n\t\tif itr.buf[0] == nil {\n\t\t\titr.buf[0] = itr.itrs[0].Next()\n\t\t}\n\t\tif itr.buf[1] == nil {\n\t\t\titr.buf[1] = itr.itrs[1].Next()\n\t\t}\n\n\t\t// Exit if first buffer is still empty.\n\t\tif itr.buf[0] == nil {\n\t\t\treturn nil\n\t\t} else if itr.buf[1] == nil {\n\t\t\te, itr.buf[0] = itr.buf[0], nil\n\t\t\treturn e\n\t\t}\n\n\t\t// Return first series if it's less.\n\t\t// If second series is less then skip it.\n\t\t// If both series are equal then skip both.\n\t\tif cmp := CompareSeriesElem(itr.buf[0], itr.buf[1]); cmp == -1 {\n\t\t\te, itr.buf[0] = itr.buf[0], nil\n\t\t\treturn e\n\t\t} else if cmp == 1 {\n\t\t\titr.buf[1] = nil\n\t\t\tcontinue\n\t\t} else {\n\t\t\titr.buf[0], itr.buf[1] = nil, nil\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\n// filterUndeletedSeriesIterator returns all series which are not deleted.\ntype filterUndeletedSeriesIterator struct {\n\titr SeriesIterator\n}\n\n// FilterUndeletedSeriesIterator returns an iterator which filters all deleted series.\nfunc FilterUndeletedSeriesIterator(itr SeriesIterator) SeriesIterator {\n\tif itr == nil {\n\t\treturn nil\n\t}\n\treturn &filterUndeletedSeriesIterator{itr: itr}\n}\n\nfunc (itr *filterUndeletedSeriesIterator) Next() SeriesElem {\n\tfor {\n\t\te := itr.itr.Next()\n\t\tif e == nil {\n\t\t\treturn nil\n\t\t} else if e.Deleted() {\n\t\t\tcontinue\n\t\t}\n\t\treturn e\n\t}\n}\n\n// seriesExprElem holds a series and its associated filter expression.\ntype seriesExprElem struct {\n\tSeriesElem\n\texpr influxql.Expr\n}\n\n// Expr returns the associated expression.\nfunc (e *seriesExprElem) Expr() influxql.Expr { return e.expr }\n\n// seriesExprIterator is an iterator that attaches an associated expression.\ntype seriesExprIterator struct {\n\titr SeriesIterator\n\te   seriesExprElem\n}\n\n// newSeriesExprIterator returns a new instance of seriesExprIterator.\nfunc newSeriesExprIterator(itr SeriesIterator, expr influxql.Expr) SeriesIterator {\n\tif itr == nil {\n\t\treturn nil\n\t}\n\n\treturn &seriesExprIterator{\n\t\titr: itr,\n\t\te: seriesExprElem{\n\t\t\texpr: expr,\n\t\t},\n\t}\n}\n\n// Next returns the next element in the iterator.\nfunc (itr *seriesExprIterator) Next() SeriesElem {\n\titr.e.SeriesElem = itr.itr.Next()\n\tif itr.e.SeriesElem == nil {\n\t\treturn nil\n\t}\n\treturn &itr.e\n}\n\n// seriesIDIterator represents a iterator over a list of series ids.\ntype seriesIDIterator interface {\n\tnext() uint32\n}\n\n// writeTo writes write v into w. Updates n.\nfunc writeTo(w io.Writer, v []byte, n *int64) error {\n\tnn, err := w.Write(v)\n\t*n += int64(nn)\n\treturn err\n}\n\n// writeUint8To writes write v into w. Updates n.\nfunc writeUint8To(w io.Writer, v uint8, n *int64) error {\n\tnn, err := w.Write([]byte{v})\n\t*n += int64(nn)\n\treturn err\n}\n\n// writeUint16To writes write v into w using big endian encoding. Updates n.\nfunc writeUint16To(w io.Writer, v uint16, n *int64) error {\n\tvar buf [2]byte\n\tbinary.BigEndian.PutUint16(buf[:], v)\n\tnn, err := w.Write(buf[:])\n\t*n += int64(nn)\n\treturn err\n}\n\n// writeUint32To writes write v into w using big endian encoding. Updates n.\nfunc writeUint32To(w io.Writer, v uint32, n *int64) error {\n\tvar buf [4]byte\n\tbinary.BigEndian.PutUint32(buf[:], v)\n\tnn, err := w.Write(buf[:])\n\t*n += int64(nn)\n\treturn err\n}\n\n// writeUint64To writes write v into w using big endian encoding. Updates n.\nfunc writeUint64To(w io.Writer, v uint64, n *int64) error {\n\tvar buf [8]byte\n\tbinary.BigEndian.PutUint64(buf[:], v)\n\tnn, err := w.Write(buf[:])\n\t*n += int64(nn)\n\treturn err\n}\n\n// writeUvarintTo writes write v into w using variable length encoding. Updates n.\nfunc writeUvarintTo(w io.Writer, v uint64, n *int64) error {\n\tvar buf [binary.MaxVarintLen64]byte\n\ti := binary.PutUvarint(buf[:], v)\n\tnn, err := w.Write(buf[:i])\n\t*n += int64(nn)\n\treturn err\n}\n\ntype uint32Slice []uint32\n\nfunc (a uint32Slice) Len() int           { return len(a) }\nfunc (a uint32Slice) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a uint32Slice) Less(i, j int) bool { return a[i] < a[j] }\n\ntype uint64Slice []uint64\n\nfunc (a uint64Slice) Len() int           { return len(a) }\nfunc (a uint64Slice) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a uint64Slice) Less(i, j int) bool { return a[i] < a[j] }\n\ntype byteSlices [][]byte\n\nfunc (a byteSlices) Len() int           { return len(a) }\nfunc (a byteSlices) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 }\n\n// copyBytes returns a copy of b.\nfunc copyBytes(b []byte) []byte {\n\tif b == nil {\n\t\treturn nil\n\t}\n\tbuf := make([]byte, len(b))\n\tcopy(buf, b)\n\treturn buf\n}\n\n// assert will panic with a given formatted message if the given condition is false.\nfunc assert(condition bool, msg string, v ...interface{}) {\n\tif !condition {\n\t\tpanic(fmt.Sprintf(\"assert failed: \"+msg, v...))\n\t}\n}\n\ntype byTagKey []*influxql.TagSet\n\nfunc (t byTagKey) Len() int           { return len(t) }\nfunc (t byTagKey) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) < 0 }\nfunc (t byTagKey) Swap(i, j int)      { t[i], t[j] = t[j], t[i] }\n\n// hexdump is a helper for dumping binary data to stderr.\nfunc hexdump(data []byte) { os.Stderr.Write([]byte(hex.Dump(data))) }\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1_test.go",
    "content": "package tsi1_test\n\nimport (\n\t\"bytes\"\n\t\"io/ioutil\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/tsdb/index/tsi1\"\n)\n\n// Ensure iterator can operate over an in-memory list of elements.\nfunc TestMeasurementIterator(t *testing.T) {\n\telems := []MeasurementElem{\n\t\tMeasurementElem{name: []byte(\"cpu\"), deleted: true},\n\t\tMeasurementElem{name: []byte(\"mem\")},\n\t}\n\n\titr := MeasurementIterator{Elems: elems}\n\tif e := itr.Next(); !reflect.DeepEqual(&elems[0], e) {\n\t\tt.Fatalf(\"unexpected elem(0): %#v\", e)\n\t} else if e := itr.Next(); !reflect.DeepEqual(&elems[1], e) {\n\t\tt.Fatalf(\"unexpected elem(1): %#v\", e)\n\t} else if e := itr.Next(); e != nil {\n\t\tt.Fatalf(\"expected nil elem: %#v\", e)\n\t}\n}\n\n// Ensure iterator can merge multiple iterators together.\nfunc TestMergeMeasurementIterators(t *testing.T) {\n\titr := tsi1.MergeMeasurementIterators(\n\t\t&MeasurementIterator{Elems: []MeasurementElem{\n\t\t\t{name: []byte(\"aaa\")},\n\t\t\t{name: []byte(\"bbb\"), deleted: true},\n\t\t\t{name: []byte(\"ccc\")},\n\t\t}},\n\t\t&MeasurementIterator{},\n\t\t&MeasurementIterator{Elems: []MeasurementElem{\n\t\t\t{name: []byte(\"bbb\")},\n\t\t\t{name: []byte(\"ccc\"), deleted: true},\n\t\t\t{name: []byte(\"ddd\")},\n\t\t}},\n\t)\n\n\tif e := itr.Next(); !bytes.Equal(e.Name(), []byte(\"aaa\")) || e.Deleted() {\n\t\tt.Fatalf(\"unexpected elem(0): %s/%v\", e.Name(), e.Deleted())\n\t} else if e := itr.Next(); !bytes.Equal(e.Name(), []byte(\"bbb\")) || !e.Deleted() {\n\t\tt.Fatalf(\"unexpected elem(1): %s/%v\", e.Name(), e.Deleted())\n\t} else if e := itr.Next(); !bytes.Equal(e.Name(), []byte(\"ccc\")) || e.Deleted() {\n\t\tt.Fatalf(\"unexpected elem(2): %s/%v\", e.Name(), e.Deleted())\n\t} else if e := itr.Next(); !bytes.Equal(e.Name(), []byte(\"ddd\")) || e.Deleted() {\n\t\tt.Fatalf(\"unexpected elem(3): %s/%v\", e.Name(), e.Deleted())\n\t} else if e := itr.Next(); e != nil {\n\t\tt.Fatalf(\"expected nil elem: %#v\", e)\n\t}\n}\n\n// Ensure iterator can operate over an in-memory list of tag key elements.\nfunc TestTagKeyIterator(t *testing.T) {\n\telems := []TagKeyElem{\n\t\t{key: []byte(\"aaa\"), deleted: true},\n\t\t{key: []byte(\"bbb\")},\n\t}\n\n\titr := TagKeyIterator{Elems: elems}\n\tif e := itr.Next(); !reflect.DeepEqual(&elems[0], e) {\n\t\tt.Fatalf(\"unexpected elem(0): %#v\", e)\n\t} else if e := itr.Next(); !reflect.DeepEqual(&elems[1], e) {\n\t\tt.Fatalf(\"unexpected elem(1): %#v\", e)\n\t} else if e := itr.Next(); e != nil {\n\t\tt.Fatalf(\"expected nil elem: %#v\", e)\n\t}\n}\n\n// Ensure iterator can merge multiple iterators together.\nfunc TestMergeTagKeyIterators(t *testing.T) {\n\titr := tsi1.MergeTagKeyIterators(\n\t\t&TagKeyIterator{Elems: []TagKeyElem{\n\t\t\t{key: []byte(\"aaa\")},\n\t\t\t{key: []byte(\"bbb\"), deleted: true},\n\t\t\t{key: []byte(\"ccc\")},\n\t\t}},\n\t\t&TagKeyIterator{},\n\t\t&TagKeyIterator{Elems: []TagKeyElem{\n\t\t\t{key: []byte(\"bbb\")},\n\t\t\t{key: []byte(\"ccc\"), deleted: true},\n\t\t\t{key: []byte(\"ddd\")},\n\t\t}},\n\t)\n\n\tif e := itr.Next(); !bytes.Equal(e.Key(), []byte(\"aaa\")) || e.Deleted() {\n\t\tt.Fatalf(\"unexpected elem(0): %s/%v\", e.Key(), e.Deleted())\n\t} else if e := itr.Next(); !bytes.Equal(e.Key(), []byte(\"bbb\")) || !e.Deleted() {\n\t\tt.Fatalf(\"unexpected elem(1): %s/%v\", e.Key(), e.Deleted())\n\t} else if e := itr.Next(); !bytes.Equal(e.Key(), []byte(\"ccc\")) || e.Deleted() {\n\t\tt.Fatalf(\"unexpected elem(2): %s/%v\", e.Key(), e.Deleted())\n\t} else if e := itr.Next(); !bytes.Equal(e.Key(), []byte(\"ddd\")) || e.Deleted() {\n\t\tt.Fatalf(\"unexpected elem(3): %s/%v\", e.Key(), e.Deleted())\n\t} else if e := itr.Next(); e != nil {\n\t\tt.Fatalf(\"expected nil elem: %#v\", e)\n\t}\n}\n\n// Ensure iterator can operate over an in-memory list of tag value elements.\nfunc TestTagValueIterator(t *testing.T) {\n\telems := []TagValueElem{\n\t\t{value: []byte(\"aaa\"), deleted: true},\n\t\t{value: []byte(\"bbb\")},\n\t}\n\n\titr := &TagValueIterator{Elems: elems}\n\tif e := itr.Next(); !reflect.DeepEqual(&elems[0], e) {\n\t\tt.Fatalf(\"unexpected elem(0): %#v\", e)\n\t} else if e := itr.Next(); !reflect.DeepEqual(&elems[1], e) {\n\t\tt.Fatalf(\"unexpected elem(1): %#v\", e)\n\t} else if e := itr.Next(); e != nil {\n\t\tt.Fatalf(\"expected nil elem: %#v\", e)\n\t}\n}\n\n// Ensure iterator can merge multiple iterators together.\nfunc TestMergeTagValueIterators(t *testing.T) {\n\titr := tsi1.MergeTagValueIterators(\n\t\t&TagValueIterator{Elems: []TagValueElem{\n\t\t\t{value: []byte(\"aaa\")},\n\t\t\t{value: []byte(\"bbb\"), deleted: true},\n\t\t\t{value: []byte(\"ccc\")},\n\t\t}},\n\t\t&TagValueIterator{},\n\t\t&TagValueIterator{Elems: []TagValueElem{\n\t\t\t{value: []byte(\"bbb\")},\n\t\t\t{value: []byte(\"ccc\"), deleted: true},\n\t\t\t{value: []byte(\"ddd\")},\n\t\t}},\n\t)\n\n\tif e := itr.Next(); !bytes.Equal(e.Value(), []byte(\"aaa\")) || e.Deleted() {\n\t\tt.Fatalf(\"unexpected elem(0): %s/%v\", e.Value(), e.Deleted())\n\t} else if e := itr.Next(); !bytes.Equal(e.Value(), []byte(\"bbb\")) || !e.Deleted() {\n\t\tt.Fatalf(\"unexpected elem(1): %s/%v\", e.Value(), e.Deleted())\n\t} else if e := itr.Next(); !bytes.Equal(e.Value(), []byte(\"ccc\")) || e.Deleted() {\n\t\tt.Fatalf(\"unexpected elem(2): %s/%v\", e.Value(), e.Deleted())\n\t} else if e := itr.Next(); !bytes.Equal(e.Value(), []byte(\"ddd\")) || e.Deleted() {\n\t\tt.Fatalf(\"unexpected elem(3): %s/%v\", e.Value(), e.Deleted())\n\t} else if e := itr.Next(); e != nil {\n\t\tt.Fatalf(\"expected nil elem: %#v\", e)\n\t}\n}\n\n// Ensure iterator can operate over an in-memory list of series.\nfunc TestSeriesIterator(t *testing.T) {\n\telems := []SeriesElem{\n\t\t{name: []byte(\"cpu\"), tags: models.Tags{{Key: []byte(\"region\"), Value: []byte(\"us-east\")}}, deleted: true},\n\t\t{name: []byte(\"mem\")},\n\t}\n\n\titr := SeriesIterator{Elems: elems}\n\tif e := itr.Next(); !reflect.DeepEqual(&elems[0], e) {\n\t\tt.Fatalf(\"unexpected elem(0): %#v\", e)\n\t} else if e := itr.Next(); !reflect.DeepEqual(&elems[1], e) {\n\t\tt.Fatalf(\"unexpected elem(1): %#v\", e)\n\t} else if e := itr.Next(); e != nil {\n\t\tt.Fatalf(\"expected nil elem: %#v\", e)\n\t}\n}\n\n// Ensure iterator can merge multiple iterators together.\nfunc TestMergeSeriesIterators(t *testing.T) {\n\titr := tsi1.MergeSeriesIterators(\n\t\t&SeriesIterator{Elems: []SeriesElem{\n\t\t\t{name: []byte(\"aaa\"), tags: models.Tags{{Key: []byte(\"region\"), Value: []byte(\"us-east\")}}, deleted: true},\n\t\t\t{name: []byte(\"bbb\"), deleted: true},\n\t\t\t{name: []byte(\"ccc\")},\n\t\t}},\n\t\t&SeriesIterator{},\n\t\t&SeriesIterator{Elems: []SeriesElem{\n\t\t\t{name: []byte(\"aaa\"), tags: models.Tags{{Key: []byte(\"region\"), Value: []byte(\"us-east\")}}},\n\t\t\t{name: []byte(\"aaa\"), tags: models.Tags{{Key: []byte(\"region\"), Value: []byte(\"us-west\")}}},\n\t\t\t{name: []byte(\"bbb\")},\n\t\t\t{name: []byte(\"ccc\"), deleted: true},\n\t\t\t{name: []byte(\"ddd\")},\n\t\t}},\n\t)\n\n\tif e := itr.Next(); !reflect.DeepEqual(e, &SeriesElem{name: []byte(\"aaa\"), tags: models.Tags{{Key: []byte(\"region\"), Value: []byte(\"us-east\")}}, deleted: true}) {\n\t\tt.Fatalf(\"unexpected elem(0): %#v\", e)\n\t} else if e := itr.Next(); !reflect.DeepEqual(e, &SeriesElem{name: []byte(\"aaa\"), tags: models.Tags{{Key: []byte(\"region\"), Value: []byte(\"us-west\")}}}) {\n\t\tt.Fatalf(\"unexpected elem(1): %#v\", e)\n\t} else if e := itr.Next(); !reflect.DeepEqual(e, &SeriesElem{name: []byte(\"bbb\"), deleted: true}) {\n\t\tt.Fatalf(\"unexpected elem(2): %#v\", e)\n\t} else if e := itr.Next(); !reflect.DeepEqual(e, &SeriesElem{name: []byte(\"ccc\")}) {\n\t\tt.Fatalf(\"unexpected elem(3): %#v\", e)\n\t} else if e := itr.Next(); !reflect.DeepEqual(e, &SeriesElem{name: []byte(\"ddd\")}) {\n\t\tt.Fatalf(\"unexpected elem(4): %#v\", e)\n\t} else if e := itr.Next(); e != nil {\n\t\tt.Fatalf(\"expected nil elem: %#v\", e)\n\t}\n}\n\n// MeasurementElem represents a test implementation of tsi1.MeasurementElem.\ntype MeasurementElem struct {\n\tname    []byte\n\tdeleted bool\n}\n\nfunc (e *MeasurementElem) Name() []byte                        { return e.name }\nfunc (e *MeasurementElem) Deleted() bool                       { return e.deleted }\nfunc (e *MeasurementElem) TagKeyIterator() tsi1.TagKeyIterator { return nil }\n\n// MeasurementIterator represents an iterator over a slice of measurements.\ntype MeasurementIterator struct {\n\tElems []MeasurementElem\n}\n\n// Next returns the next element in the iterator.\nfunc (itr *MeasurementIterator) Next() (e tsi1.MeasurementElem) {\n\tif len(itr.Elems) == 0 {\n\t\treturn nil\n\t}\n\te, itr.Elems = &itr.Elems[0], itr.Elems[1:]\n\treturn e\n}\n\n// TagKeyElem represents a test implementation of tsi1.TagKeyElem.\ntype TagKeyElem struct {\n\tkey     []byte\n\tdeleted bool\n}\n\nfunc (e *TagKeyElem) Key() []byte                             { return e.key }\nfunc (e *TagKeyElem) Deleted() bool                           { return e.deleted }\nfunc (e *TagKeyElem) TagValueIterator() tsi1.TagValueIterator { return nil }\n\n// TagKeyIterator represents an iterator over a slice of tag keys.\ntype TagKeyIterator struct {\n\tElems []TagKeyElem\n}\n\n// Next returns the next element in the iterator.\nfunc (itr *TagKeyIterator) Next() (e tsi1.TagKeyElem) {\n\tif len(itr.Elems) == 0 {\n\t\treturn nil\n\t}\n\te, itr.Elems = &itr.Elems[0], itr.Elems[1:]\n\treturn e\n}\n\n// TagValueElem represents a test implementation of tsi1.TagValueElem.\ntype TagValueElem struct {\n\tvalue   []byte\n\tdeleted bool\n}\n\nfunc (e *TagValueElem) Value() []byte                       { return e.value }\nfunc (e *TagValueElem) Deleted() bool                       { return e.deleted }\nfunc (e *TagValueElem) SeriesIterator() tsi1.SeriesIterator { return nil }\n\n// TagValueIterator represents an iterator over a slice of tag values.\ntype TagValueIterator struct {\n\tElems []TagValueElem\n}\n\n// Next returns the next element in the iterator.\nfunc (itr *TagValueIterator) Next() (e tsi1.TagValueElem) {\n\tif len(itr.Elems) == 0 {\n\t\treturn nil\n\t}\n\te, itr.Elems = &itr.Elems[0], itr.Elems[1:]\n\treturn e\n}\n\n// SeriesElem represents a test implementation of tsi1.SeriesElem.\ntype SeriesElem struct {\n\tname    []byte\n\ttags    models.Tags\n\tdeleted bool\n\texpr    influxql.Expr\n}\n\nfunc (e *SeriesElem) Name() []byte        { return e.name }\nfunc (e *SeriesElem) Tags() models.Tags   { return e.tags }\nfunc (e *SeriesElem) Deleted() bool       { return e.deleted }\nfunc (e *SeriesElem) Expr() influxql.Expr { return e.expr }\n\n// SeriesIterator represents an iterator over a slice of tag values.\ntype SeriesIterator struct {\n\tElems []SeriesElem\n}\n\n// Next returns the next element in the iterator.\nfunc (itr *SeriesIterator) Next() (e tsi1.SeriesElem) {\n\tif len(itr.Elems) == 0 {\n\t\treturn nil\n\t}\n\te, itr.Elems = &itr.Elems[0], itr.Elems[1:]\n\treturn e\n}\n\n// MustTempDir returns a temporary directory. Panic on error.\nfunc MustTempDir() string {\n\tpath, err := ioutil.TempDir(\"\", \"tsi-\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn path\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/index.go",
    "content": "package tsdb\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"sort\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/pkg/estimator\"\n\t\"github.com/uber-go/zap\"\n)\n\ntype Index interface {\n\tOpen() error\n\tClose() error\n\tWithLogger(zap.Logger)\n\n\tMeasurementExists(name []byte) (bool, error)\n\tMeasurementNamesByExpr(expr influxql.Expr) ([][]byte, error)\n\tMeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error)\n\tDropMeasurement(name []byte) error\n\tForEachMeasurementName(fn func(name []byte) error) error\n\n\tInitializeSeries(key, name []byte, tags models.Tags) error\n\tCreateSeriesIfNotExists(key, name []byte, tags models.Tags) error\n\tCreateSeriesListIfNotExists(keys, names [][]byte, tags []models.Tags) error\n\tDropSeries(key []byte) error\n\n\tSeriesSketches() (estimator.Sketch, estimator.Sketch, error)\n\tMeasurementsSketches() (estimator.Sketch, estimator.Sketch, error)\n\tSeriesN() int64\n\n\tHasTagKey(name, key []byte) (bool, error)\n\tTagSets(name []byte, options influxql.IteratorOptions) ([]*influxql.TagSet, error)\n\tMeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error)\n\tMeasurementTagKeyValuesByExpr(name []byte, keys []string, expr influxql.Expr, keysSorted bool) ([][]string, error)\n\n\tForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error\n\tTagKeyCardinality(name, key []byte) int\n\n\t// InfluxQL system iterators\n\tMeasurementSeriesKeysByExpr(name []byte, condition influxql.Expr) ([][]byte, error)\n\tForEachMeasurementSeriesByExpr(name []byte, expr influxql.Expr, fn func(tags models.Tags) error) error\n\tSeriesPointIterator(opt influxql.IteratorOptions) (influxql.Iterator, error)\n\n\t// Sets a shared fieldset from the engine.\n\tSetFieldSet(fs *MeasurementFieldSet)\n\n\t// Creates hard links inside path for snapshotting.\n\tSnapshotTo(path string) error\n\n\t// To be removed w/ tsi1.\n\tSetFieldName(measurement []byte, name string)\n\tAssignShard(k string, shardID uint64)\n\tUnassignShard(k string, shardID uint64) error\n\tRemoveShard(shardID uint64)\n\n\tType() string\n}\n\n// IndexFormat represents the format for an index.\ntype IndexFormat int\n\nconst (\n\t// InMemFormat is the format used by the original in-memory shared index.\n\tInMemFormat IndexFormat = 1\n\n\t// TSI1Format is the format used by the tsi1 index.\n\tTSI1Format IndexFormat = 2\n)\n\n// NewIndexFunc creates a new index.\ntype NewIndexFunc func(id uint64, database, path string, options EngineOptions) Index\n\n// newIndexFuncs is a lookup of index constructors by name.\nvar newIndexFuncs = make(map[string]NewIndexFunc)\n\n// RegisterIndex registers a storage index initializer by name.\nfunc RegisterIndex(name string, fn NewIndexFunc) {\n\tif _, ok := newIndexFuncs[name]; ok {\n\t\tpanic(\"index already registered: \" + name)\n\t}\n\tnewIndexFuncs[name] = fn\n}\n\n// RegisteredIndexs returns the slice of currently registered indexes.\nfunc RegisteredIndexes() []string {\n\ta := make([]string, 0, len(newIndexFuncs))\n\tfor k := range newIndexFuncs {\n\t\ta = append(a, k)\n\t}\n\tsort.Strings(a)\n\treturn a\n}\n\n// NewIndex returns an instance of an index based on its format.\n// If the path does not exist then the DefaultFormat is used.\nfunc NewIndex(id uint64, database, path string, options EngineOptions) (Index, error) {\n\tformat := options.IndexVersion\n\n\t// Use default format unless existing directory exists.\n\t_, err := os.Stat(path)\n\tif os.IsNotExist(err) {\n\t\t// nop, use default\n\t} else if err != nil {\n\t\treturn nil, err\n\t} else if err == nil {\n\t\tformat = \"tsi1\"\n\t}\n\n\t// Lookup index by format.\n\tfn := newIndexFuncs[format]\n\tif fn == nil {\n\t\treturn nil, fmt.Errorf(\"invalid index format: %q\", format)\n\t}\n\treturn fn(id, database, path, options), nil\n}\n\nfunc MustOpenIndex(id uint64, database, path string, options EngineOptions) Index {\n\tidx, err := NewIndex(id, database, path, options)\n\tif err != nil {\n\t\tpanic(err)\n\t} else if err := idx.Open(); err != nil {\n\t\tpanic(err)\n\t}\n\treturn idx\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/internal/meta.pb.go",
    "content": "// Code generated by protoc-gen-gogo.\n// source: internal/meta.proto\n// DO NOT EDIT!\n\n/*\nPackage meta is a generated protocol buffer package.\n\nIt is generated from these files:\n\tinternal/meta.proto\n\nIt has these top-level messages:\n\tSeries\n\tTag\n\tMeasurementFields\n\tField\n*/\npackage meta\n\nimport proto \"github.com/gogo/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\n// This is a compile-time assertion to ensure that this generated file\n// is compatible with the proto package it is being compiled against.\n// A compilation error at this line likely means your copy of the\n// proto package needs to be updated.\nconst _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package\n\ntype Series struct {\n\tKey              *string `protobuf:\"bytes,1,req,name=Key\" json:\"Key,omitempty\"`\n\tTags             []*Tag  `protobuf:\"bytes,2,rep,name=Tags\" json:\"Tags,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *Series) Reset()                    { *m = Series{} }\nfunc (m *Series) String() string            { return proto.CompactTextString(m) }\nfunc (*Series) ProtoMessage()               {}\nfunc (*Series) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{0} }\n\nfunc (m *Series) GetKey() string {\n\tif m != nil && m.Key != nil {\n\t\treturn *m.Key\n\t}\n\treturn \"\"\n}\n\nfunc (m *Series) GetTags() []*Tag {\n\tif m != nil {\n\t\treturn m.Tags\n\t}\n\treturn nil\n}\n\ntype Tag struct {\n\tKey              *string `protobuf:\"bytes,1,req,name=Key\" json:\"Key,omitempty\"`\n\tValue            *string `protobuf:\"bytes,2,req,name=Value\" json:\"Value,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *Tag) Reset()                    { *m = Tag{} }\nfunc (m *Tag) String() string            { return proto.CompactTextString(m) }\nfunc (*Tag) ProtoMessage()               {}\nfunc (*Tag) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{1} }\n\nfunc (m *Tag) GetKey() string {\n\tif m != nil && m.Key != nil {\n\t\treturn *m.Key\n\t}\n\treturn \"\"\n}\n\nfunc (m *Tag) GetValue() string {\n\tif m != nil && m.Value != nil {\n\t\treturn *m.Value\n\t}\n\treturn \"\"\n}\n\ntype MeasurementFields struct {\n\tFields           []*Field `protobuf:\"bytes,1,rep,name=Fields\" json:\"Fields,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *MeasurementFields) Reset()                    { *m = MeasurementFields{} }\nfunc (m *MeasurementFields) String() string            { return proto.CompactTextString(m) }\nfunc (*MeasurementFields) ProtoMessage()               {}\nfunc (*MeasurementFields) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{2} }\n\nfunc (m *MeasurementFields) GetFields() []*Field {\n\tif m != nil {\n\t\treturn m.Fields\n\t}\n\treturn nil\n}\n\ntype Field struct {\n\tID               *int32  `protobuf:\"varint,1,req,name=ID\" json:\"ID,omitempty\"`\n\tName             *string `protobuf:\"bytes,2,req,name=Name\" json:\"Name,omitempty\"`\n\tType             *int32  `protobuf:\"varint,3,req,name=Type\" json:\"Type,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *Field) Reset()                    { *m = Field{} }\nfunc (m *Field) String() string            { return proto.CompactTextString(m) }\nfunc (*Field) ProtoMessage()               {}\nfunc (*Field) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{3} }\n\nfunc (m *Field) GetID() int32 {\n\tif m != nil && m.ID != nil {\n\t\treturn *m.ID\n\t}\n\treturn 0\n}\n\nfunc (m *Field) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *Field) GetType() int32 {\n\tif m != nil && m.Type != nil {\n\t\treturn *m.Type\n\t}\n\treturn 0\n}\n\nfunc init() {\n\tproto.RegisterType((*Series)(nil), \"meta.Series\")\n\tproto.RegisterType((*Tag)(nil), \"meta.Tag\")\n\tproto.RegisterType((*MeasurementFields)(nil), \"meta.MeasurementFields\")\n\tproto.RegisterType((*Field)(nil), \"meta.Field\")\n}\n\nfunc init() { proto.RegisterFile(\"internal/meta.proto\", fileDescriptorMeta) }\n\nvar fileDescriptorMeta = []byte{\n\t// 180 bytes of a gzipped FileDescriptorProto\n\t0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x54, 0x8c, 0xbd, 0xca, 0xc2, 0x30,\n\t0x14, 0x40, 0x69, 0xd2, 0x16, 0x7a, 0xfb, 0x7d, 0x83, 0x71, 0x30, 0xe0, 0x52, 0x33, 0x75, 0x6a,\n\t0xc5, 0x67, 0x10, 0x41, 0x44, 0x17, 0x83, 0xfb, 0x05, 0x2f, 0xa5, 0xd0, 0x3f, 0x92, 0x74, 0xe8,\n\t0xdb, 0x4b, 0x52, 0x17, 0xb7, 0x73, 0xee, 0xcf, 0x81, 0x6d, 0x3b, 0x38, 0x32, 0x03, 0x76, 0x75,\n\t0x4f, 0x0e, 0xab, 0xc9, 0x8c, 0x6e, 0x14, 0xb1, 0x67, 0x55, 0x41, 0xfa, 0x24, 0xd3, 0x92, 0x15,\n\t0x39, 0xf0, 0x1b, 0x2d, 0x32, 0x2a, 0x58, 0x99, 0x89, 0x1d, 0xc4, 0x1a, 0x1b, 0x2b, 0x59, 0xc1,\n\t0xcb, 0xfc, 0x94, 0x55, 0xe1, 0x4f, 0x63, 0xa3, 0x0e, 0xc0, 0x35, 0x36, 0xbf, 0xc7, 0xff, 0x90,\n\t0xbc, 0xb0, 0x9b, 0x49, 0x32, 0xaf, 0xea, 0x08, 0x9b, 0x3b, 0xa1, 0x9d, 0x0d, 0xf5, 0x34, 0xb8,\n\t0x4b, 0x4b, 0xdd, 0xdb, 0x8a, 0x3d, 0xa4, 0x2b, 0xc9, 0x28, 0x24, 0xf3, 0x35, 0x19, 0x66, 0xaa,\n\t0x86, 0x24, 0x80, 0x00, 0x60, 0xd7, 0x73, 0xa8, 0x26, 0xe2, 0x0f, 0xe2, 0x07, 0xf6, 0xdf, 0xa8,\n\t0x37, 0xbd, 0x4c, 0x24, 0xb9, 0xdf, 0x7d, 0x02, 0x00, 0x00, 0xff, 0xff, 0x04, 0x3d, 0x58, 0x4a,\n\t0xd1, 0x00, 0x00, 0x00,\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/internal/meta.proto",
    "content": "package meta;\n\n//========================================================================\n//\n// Metadata\n//\n//========================================================================\n\nmessage Series {\n  required string Key = 1;\n  repeated Tag Tags = 2;\n}\n\nmessage Tag {\n  required string Key = 1;\n  required string Value = 2;\n}\n\nmessage MeasurementFields {\n  repeated Field Fields = 1;\n}\n\nmessage Field {\n  required int32 ID = 1;\n  required string Name = 2;\n  required int32 Type = 3;\n}"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/meta.go",
    "content": "package tsdb\n\n//go:generate protoc --gogo_out=. internal/meta.proto\n\nimport (\n\t\"sort\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/pkg/escape\"\n)\n\n// MarshalTags converts a tag set to bytes for use as a lookup key.\nfunc MarshalTags(tags map[string]string) []byte {\n\t// Empty maps marshal to empty bytes.\n\tif len(tags) == 0 {\n\t\treturn nil\n\t}\n\n\t// Extract keys and determine final size.\n\tsz := (len(tags) * 2) - 1 // separators\n\tkeys := make([]string, 0, len(tags))\n\tfor k, v := range tags {\n\t\tkeys = append(keys, k)\n\t\tsz += len(k) + len(v)\n\t}\n\tsort.Strings(keys)\n\n\t// Generate marshaled bytes.\n\tb := make([]byte, sz)\n\tbuf := b\n\tfor _, k := range keys {\n\t\tcopy(buf, k)\n\t\tbuf[len(k)] = '|'\n\t\tbuf = buf[len(k)+1:]\n\t}\n\tfor i, k := range keys {\n\t\tv := tags[k]\n\t\tcopy(buf, v)\n\t\tif i < len(keys)-1 {\n\t\t\tbuf[len(v)] = '|'\n\t\t\tbuf = buf[len(v)+1:]\n\t\t}\n\t}\n\treturn b\n}\n\n// MakeTagsKey converts a tag set to bytes for use as a lookup key.\nfunc MakeTagsKey(keys []string, tags models.Tags) []byte {\n\t// precondition: keys is sorted\n\t// precondition: models.Tags is sorted\n\n\t// Empty maps marshal to empty bytes.\n\tif len(keys) == 0 || len(tags) == 0 {\n\t\treturn nil\n\t}\n\n\tsel := make([]int, 0, len(keys))\n\n\tsz := 0\n\ti, j := 0, 0\n\tfor i < len(keys) && j < len(tags) {\n\t\tif keys[i] < string(tags[j].Key) {\n\t\t\ti++\n\t\t} else if keys[i] > string(tags[j].Key) {\n\t\t\tj++\n\t\t} else {\n\t\t\tsel = append(sel, j)\n\t\t\tsz += len(keys[i]) + len(tags[j].Value)\n\t\t\ti++\n\t\t\tj++\n\t\t}\n\t}\n\n\tif len(sel) == 0 {\n\t\t// no tags matched the requested keys\n\t\treturn nil\n\t}\n\n\tsz += (len(sel) * 2) - 1 // selected tags, add separators\n\n\t// Generate marshaled bytes.\n\tb := make([]byte, sz)\n\tbuf := b\n\tfor _, k := range sel {\n\t\tcopy(buf, tags[k].Key)\n\t\tbuf[len(tags[k].Key)] = '|'\n\t\tbuf = buf[len(tags[k].Key)+1:]\n\t}\n\n\tfor i, k := range sel {\n\t\tcopy(buf, tags[k].Value)\n\t\tif i < len(sel)-1 {\n\t\t\tbuf[len(tags[k].Value)] = '|'\n\t\t\tbuf = buf[len(tags[k].Value)+1:]\n\t\t}\n\t}\n\n\treturn b\n}\n\n// MeasurementFromSeriesKey returns the name of the measurement from a key that\n// contains a measurement name.\nfunc MeasurementFromSeriesKey(key []byte) []byte {\n\t// Ignoring the error because the func returns \"missing fields\"\n\tk, _ := models.ParseName(key)\n\treturn escape.Unescape(k)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/meta_test.go",
    "content": "package tsdb_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n\t\"github.com/influxdata/influxdb/tsdb/index/inmem\"\n)\n\n// Ensure tags can be marshaled into a byte slice.\nfunc TestMarshalTags(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\ttags   map[string]string\n\t\tresult []byte\n\t}{\n\t\t{\n\t\t\ttags:   nil,\n\t\t\tresult: nil,\n\t\t},\n\t\t{\n\t\t\ttags:   map[string]string{\"foo\": \"bar\"},\n\t\t\tresult: []byte(`foo|bar`),\n\t\t},\n\t\t{\n\t\t\ttags:   map[string]string{\"foo\": \"bar\", \"baz\": \"battttt\"},\n\t\t\tresult: []byte(`baz|foo|battttt|bar`),\n\t\t},\n\t\t{\n\t\t\ttags:   map[string]string{\"baz\": \"battttt\", \"foo\": \"bar\"},\n\t\t\tresult: []byte(`baz|foo|battttt|bar`),\n\t\t},\n\t} {\n\t\tresult := tsdb.MarshalTags(tt.tags)\n\t\tif !bytes.Equal(result, tt.result) {\n\t\t\tt.Fatalf(\"%d. unexpected result: exp=%s, got=%s\", i, tt.result, result)\n\t\t}\n\t}\n}\n\nfunc BenchmarkMarshalTags_KeyN1(b *testing.B)  { benchmarkMarshalTags(b, 1) }\nfunc BenchmarkMarshalTags_KeyN3(b *testing.B)  { benchmarkMarshalTags(b, 3) }\nfunc BenchmarkMarshalTags_KeyN5(b *testing.B)  { benchmarkMarshalTags(b, 5) }\nfunc BenchmarkMarshalTags_KeyN10(b *testing.B) { benchmarkMarshalTags(b, 10) }\n\nfunc benchmarkMarshalTags(b *testing.B, keyN int) {\n\tconst keySize, valueSize = 8, 15\n\n\t// Generate tag map.\n\ttags := make(map[string]string)\n\tfor i := 0; i < keyN; i++ {\n\t\ttags[fmt.Sprintf(\"%0*d\", keySize, i)] = fmt.Sprintf(\"%0*d\", valueSize, i)\n\t}\n\n\t// Unmarshal map into byte slice.\n\tb.ReportAllocs()\n\tfor i := 0; i < b.N; i++ {\n\t\ttsdb.MarshalTags(tags)\n\t}\n}\n\n// Ensure tags can be marshaled into a byte slice.\nfunc TestMakeTagsKey(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\tkeys   []string\n\t\ttags   models.Tags\n\t\tresult []byte\n\t}{\n\t\t{\n\t\t\tkeys:   nil,\n\t\t\ttags:   nil,\n\t\t\tresult: nil,\n\t\t},\n\t\t{\n\t\t\tkeys:   []string{\"foo\"},\n\t\t\ttags:   models.NewTags(map[string]string{\"foo\": \"bar\"}),\n\t\t\tresult: []byte(`foo|bar`),\n\t\t},\n\t\t{\n\t\t\tkeys:   []string{\"foo\"},\n\t\t\ttags:   models.NewTags(map[string]string{\"baz\": \"battttt\"}),\n\t\t\tresult: []byte(``),\n\t\t},\n\t\t{\n\t\t\tkeys:   []string{\"baz\", \"foo\"},\n\t\t\ttags:   models.NewTags(map[string]string{\"baz\": \"battttt\"}),\n\t\t\tresult: []byte(`baz|battttt`),\n\t\t},\n\t\t{\n\t\t\tkeys:   []string{\"baz\", \"foo\", \"zzz\"},\n\t\t\ttags:   models.NewTags(map[string]string{\"foo\": \"bar\"}),\n\t\t\tresult: []byte(`foo|bar`),\n\t\t},\n\t\t{\n\t\t\tkeys:   []string{\"baz\", \"foo\"},\n\t\t\ttags:   models.NewTags(map[string]string{\"foo\": \"bar\", \"baz\": \"battttt\"}),\n\t\t\tresult: []byte(`baz|foo|battttt|bar`),\n\t\t},\n\t\t{\n\t\t\tkeys:   []string{\"baz\"},\n\t\t\ttags:   models.NewTags(map[string]string{\"baz\": \"battttt\", \"foo\": \"bar\"}),\n\t\t\tresult: []byte(`baz|battttt`),\n\t\t},\n\t} {\n\t\tresult := tsdb.MakeTagsKey(tt.keys, tt.tags)\n\t\tif !bytes.Equal(result, tt.result) {\n\t\t\tt.Fatalf(\"%d. unexpected result: exp=%s, got=%s\", i, tt.result, result)\n\t\t}\n\t}\n}\n\nfunc BenchmarkMakeTagsKey_KeyN1(b *testing.B)  { benchmarkMakeTagsKey(b, 1) }\nfunc BenchmarkMakeTagsKey_KeyN3(b *testing.B)  { benchmarkMakeTagsKey(b, 3) }\nfunc BenchmarkMakeTagsKey_KeyN5(b *testing.B)  { benchmarkMakeTagsKey(b, 5) }\nfunc BenchmarkMakeTagsKey_KeyN10(b *testing.B) { benchmarkMakeTagsKey(b, 10) }\n\nfunc makeTagsAndKeys(keyN int) ([]string, models.Tags) {\n\tconst keySize, valueSize = 8, 15\n\n\t// Generate tag map.\n\tkeys := make([]string, keyN)\n\ttags := make(map[string]string)\n\tfor i := 0; i < keyN; i++ {\n\t\tkeys[i] = fmt.Sprintf(\"%0*d\", keySize, i)\n\t\ttags[keys[i]] = fmt.Sprintf(\"%0*d\", valueSize, i)\n\t}\n\n\treturn keys, models.NewTags(tags)\n}\n\nfunc benchmarkMakeTagsKey(b *testing.B, keyN int) {\n\tkeys, tags := makeTagsAndKeys(keyN)\n\n\t// Unmarshal map into byte slice.\n\tb.ReportAllocs()\n\tfor i := 0; i < b.N; i++ {\n\t\ttsdb.MakeTagsKey(keys, tags)\n\t}\n}\n\ntype TestSeries struct {\n\tMeasurement string\n\tSeries      *inmem.Series\n}\n\nfunc genTestSeries(mCnt, tCnt, vCnt int) []*TestSeries {\n\tmeasurements := genStrList(\"measurement\", mCnt)\n\ttagSets := NewTagSetGenerator(tCnt, vCnt).AllSets()\n\tseries := []*TestSeries{}\n\tfor _, m := range measurements {\n\t\tfor _, ts := range tagSets {\n\t\t\tseries = append(series, &TestSeries{\n\t\t\t\tMeasurement: m,\n\t\t\t\tSeries:      inmem.NewSeries([]byte(fmt.Sprintf(\"%s:%s\", m, string(tsdb.MarshalTags(ts)))), models.NewTags(ts)),\n\t\t\t})\n\t\t}\n\t}\n\treturn series\n}\n\ntype TagValGenerator struct {\n\tKey  string\n\tVals []string\n\tidx  int\n}\n\nfunc NewTagValGenerator(tagKey string, nVals int) *TagValGenerator {\n\ttvg := &TagValGenerator{Key: tagKey}\n\tfor i := 0; i < nVals; i++ {\n\t\ttvg.Vals = append(tvg.Vals, fmt.Sprintf(\"tagValue%d\", i))\n\t}\n\treturn tvg\n}\n\nfunc (tvg *TagValGenerator) First() string {\n\ttvg.idx = 0\n\treturn tvg.Curr()\n}\n\nfunc (tvg *TagValGenerator) Curr() string {\n\treturn tvg.Vals[tvg.idx]\n}\n\nfunc (tvg *TagValGenerator) Next() string {\n\ttvg.idx++\n\tif tvg.idx >= len(tvg.Vals) {\n\t\ttvg.idx--\n\t\treturn \"\"\n\t}\n\treturn tvg.Curr()\n}\n\ntype TagSet map[string]string\n\ntype TagSetGenerator struct {\n\tTagVals []*TagValGenerator\n}\n\nfunc NewTagSetGenerator(nSets int, nTagVals ...int) *TagSetGenerator {\n\ttsg := &TagSetGenerator{}\n\tfor i := 0; i < nSets; i++ {\n\t\tnVals := nTagVals[0]\n\t\tif i < len(nTagVals) {\n\t\t\tnVals = nTagVals[i]\n\t\t}\n\t\ttagKey := fmt.Sprintf(\"tagKey%d\", i)\n\t\ttsg.TagVals = append(tsg.TagVals, NewTagValGenerator(tagKey, nVals))\n\t}\n\treturn tsg\n}\n\nfunc (tsg *TagSetGenerator) First() TagSet {\n\tfor _, tsv := range tsg.TagVals {\n\t\ttsv.First()\n\t}\n\treturn tsg.Curr()\n}\n\nfunc (tsg *TagSetGenerator) Curr() TagSet {\n\tts := TagSet{}\n\tfor _, tvg := range tsg.TagVals {\n\t\tts[tvg.Key] = tvg.Curr()\n\t}\n\treturn ts\n}\n\nfunc (tsg *TagSetGenerator) Next() TagSet {\n\tval := \"\"\n\tfor _, tsv := range tsg.TagVals {\n\t\tif val = tsv.Next(); val != \"\" {\n\t\t\tbreak\n\t\t} else {\n\t\t\ttsv.First()\n\t\t}\n\t}\n\n\tif val == \"\" {\n\t\treturn nil\n\t}\n\n\treturn tsg.Curr()\n}\n\nfunc (tsg *TagSetGenerator) AllSets() []TagSet {\n\tallSets := []TagSet{}\n\tfor ts := tsg.First(); ts != nil; ts = tsg.Next() {\n\t\tallSets = append(allSets, ts)\n\t}\n\treturn allSets\n}\n\nfunc genStrList(prefix string, n int) []string {\n\tlst := make([]string, 0, n)\n\tfor i := 0; i < n; i++ {\n\t\tlst = append(lst, fmt.Sprintf(\"%s%d\", prefix, i))\n\t}\n\treturn lst\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/shard.go",
    "content": "package tsdb\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/gogo/protobuf/proto\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/pkg/estimator\"\n\tinternal \"github.com/influxdata/influxdb/tsdb/internal\"\n\t\"github.com/uber-go/zap\"\n)\n\n// monitorStatInterval is the interval at which the shard is inspected\n// for the purpose of determining certain monitoring statistics.\nconst monitorStatInterval = 30 * time.Second\n\nconst (\n\tstatWriteReq           = \"writeReq\"\n\tstatWriteReqOK         = \"writeReqOk\"\n\tstatWriteReqErr        = \"writeReqErr\"\n\tstatSeriesCreate       = \"seriesCreate\"\n\tstatFieldsCreate       = \"fieldsCreate\"\n\tstatWritePointsErr     = \"writePointsErr\"\n\tstatWritePointsDropped = \"writePointsDropped\"\n\tstatWritePointsOK      = \"writePointsOk\"\n\tstatWriteBytes         = \"writeBytes\"\n\tstatDiskBytes          = \"diskBytes\"\n)\n\nvar (\n\t// ErrFieldOverflow is returned when too many fields are created on a measurement.\n\tErrFieldOverflow = errors.New(\"field overflow\")\n\n\t// ErrFieldTypeConflict is returned when a new field already exists with a different type.\n\tErrFieldTypeConflict = errors.New(\"field type conflict\")\n\n\t// ErrFieldNotFound is returned when a field cannot be found.\n\tErrFieldNotFound = errors.New(\"field not found\")\n\n\t// ErrFieldUnmappedID is returned when the system is presented, during decode, with a field ID\n\t// there is no mapping for.\n\tErrFieldUnmappedID = errors.New(\"field ID not mapped\")\n\n\t// ErrEngineClosed is returned when a caller attempts indirectly to\n\t// access the shard's underlying engine.\n\tErrEngineClosed = errors.New(\"engine is closed\")\n\n\t// ErrShardDisabled is returned when a the shard is not available for\n\t// queries or writes.\n\tErrShardDisabled = errors.New(\"shard is disabled\")\n)\n\nvar (\n\t// Static objects to prevent small allocs.\n\ttimeBytes = []byte(\"time\")\n)\n\n// A ShardError implements the error interface, and contains extra\n// context about the shard that generated the error.\ntype ShardError struct {\n\tid  uint64\n\tErr error\n}\n\n// NewShardError returns a new ShardError.\nfunc NewShardError(id uint64, err error) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\treturn ShardError{id: id, Err: err}\n}\n\n// Error returns the string representation of the error, to satisfy the error interface.\nfunc (e ShardError) Error() string {\n\treturn fmt.Sprintf(\"[shard %d] %s\", e.id, e.Err)\n}\n\n// PartialWriteError indicates a write request could only write a portion of the\n// requested values.\ntype PartialWriteError struct {\n\tReason  string\n\tDropped int\n\n\t// The set of series keys that were dropped. Can be nil.\n\tDroppedKeys map[string]struct{}\n}\n\nfunc (e PartialWriteError) Error() string {\n\treturn fmt.Sprintf(\"partial write: %s dropped=%d\", e.Reason, e.Dropped)\n}\n\n// Shard represents a self-contained time series database. An inverted index of\n// the measurement and tag data is kept along with the raw time series data.\n// Data can be split across many shards. The query engine in TSDB is responsible\n// for combining the output of many shards into a single query result.\ntype Shard struct {\n\tpath    string\n\twalPath string\n\tid      uint64\n\n\tdatabase        string\n\tretentionPolicy string\n\n\toptions EngineOptions\n\n\tmu      sync.RWMutex\n\t_engine Engine\n\tindex   Index\n\n\tclosing chan struct{}\n\tenabled bool\n\n\t// expvar-based stats.\n\tstats       *ShardStatistics\n\tdefaultTags models.StatisticTags\n\n\tbaseLogger zap.Logger\n\tlogger     zap.Logger\n\n\tEnableOnOpen bool\n}\n\n// NewShard returns a new initialized Shard. walPath doesn't apply to the b1 type index\nfunc NewShard(id uint64, path string, walPath string, opt EngineOptions) *Shard {\n\tdb, rp := decodeStorePath(path)\n\tlogger := zap.New(zap.NullEncoder())\n\n\ts := &Shard{\n\t\tid:      id,\n\t\tpath:    path,\n\t\twalPath: walPath,\n\t\toptions: opt,\n\t\tclosing: make(chan struct{}),\n\n\t\tstats: &ShardStatistics{},\n\t\tdefaultTags: models.StatisticTags{\n\t\t\t\"path\":            path,\n\t\t\t\"walPath\":         walPath,\n\t\t\t\"id\":              fmt.Sprintf(\"%d\", id),\n\t\t\t\"database\":        db,\n\t\t\t\"retentionPolicy\": rp,\n\t\t\t\"engine\":          opt.EngineVersion,\n\t\t},\n\n\t\tdatabase:        db,\n\t\tretentionPolicy: rp,\n\n\t\tlogger:       logger,\n\t\tbaseLogger:   logger,\n\t\tEnableOnOpen: true,\n\t}\n\treturn s\n}\n\n// WithLogger sets the logger on the shard.\nfunc (s *Shard) WithLogger(log zap.Logger) {\n\ts.baseLogger = log\n\tengine, err := s.engine()\n\tif err == nil {\n\t\tengine.WithLogger(s.baseLogger)\n\t\ts.index.WithLogger(s.baseLogger)\n\t}\n\ts.logger = s.baseLogger.With(zap.String(\"service\", \"shard\"))\n}\n\n// SetEnabled enables the shard for queries and write.  When disabled, all\n// writes and queries return an error and compactions are stopped for the shard.\nfunc (s *Shard) SetEnabled(enabled bool) {\n\ts.mu.Lock()\n\t// Prevent writes and queries\n\ts.enabled = enabled\n\tif s._engine != nil {\n\t\t// Disable background compactions and snapshotting\n\t\ts._engine.SetEnabled(enabled)\n\t}\n\ts.mu.Unlock()\n}\n\n// ID returns the shards ID.\nfunc (s *Shard) ID() uint64 {\n\treturn s.id\n}\n\n// Database returns the database of the shard.\nfunc (s *Shard) Database() string {\n\treturn s.database\n}\n\n// RetentionPolicy returns the retention policy of the shard.\nfunc (s *Shard) RetentionPolicy() string {\n\treturn s.retentionPolicy\n}\n\n// ShardStatistics maintains statistics for a shard.\ntype ShardStatistics struct {\n\tWriteReq           int64\n\tWriteReqOK         int64\n\tWriteReqErr        int64\n\tFieldsCreated      int64\n\tWritePointsErr     int64\n\tWritePointsDropped int64\n\tWritePointsOK      int64\n\tBytesWritten       int64\n\tDiskBytes          int64\n}\n\n// Statistics returns statistics for periodic monitoring.\nfunc (s *Shard) Statistics(tags map[string]string) []models.Statistic {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\t// Refresh our disk size stat\n\tif _, err := s.DiskSize(); err != nil {\n\t\treturn nil\n\t}\n\tseriesN := engine.SeriesN()\n\n\ttags = s.defaultTags.Merge(tags)\n\tstatistics := []models.Statistic{{\n\t\tName: \"shard\",\n\t\tTags: tags,\n\t\tValues: map[string]interface{}{\n\t\t\tstatWriteReq:           atomic.LoadInt64(&s.stats.WriteReq),\n\t\t\tstatWriteReqOK:         atomic.LoadInt64(&s.stats.WriteReqOK),\n\t\t\tstatWriteReqErr:        atomic.LoadInt64(&s.stats.WriteReqErr),\n\t\t\tstatSeriesCreate:       seriesN,\n\t\t\tstatFieldsCreate:       atomic.LoadInt64(&s.stats.FieldsCreated),\n\t\t\tstatWritePointsErr:     atomic.LoadInt64(&s.stats.WritePointsErr),\n\t\t\tstatWritePointsDropped: atomic.LoadInt64(&s.stats.WritePointsDropped),\n\t\t\tstatWritePointsOK:      atomic.LoadInt64(&s.stats.WritePointsOK),\n\t\t\tstatWriteBytes:         atomic.LoadInt64(&s.stats.BytesWritten),\n\t\t\tstatDiskBytes:          atomic.LoadInt64(&s.stats.DiskBytes),\n\t\t},\n\t}}\n\n\t// Add the index and engine statistics.\n\tstatistics = append(statistics, engine.Statistics(tags)...)\n\treturn statistics\n}\n\n// Path returns the path set on the shard when it was created.\nfunc (s *Shard) Path() string { return s.path }\n\n// Open initializes and opens the shard's store.\nfunc (s *Shard) Open() error {\n\tif err := func() error {\n\t\ts.mu.Lock()\n\t\tdefer s.mu.Unlock()\n\n\t\t// Return if the shard is already open\n\t\tif s._engine != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Initialize underlying index.\n\t\tipath := filepath.Join(s.path, \"index\")\n\t\tidx, err := NewIndex(s.id, s.database, ipath, s.options)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Open index.\n\t\tif err := idx.Open(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.index = idx\n\t\tidx.WithLogger(s.baseLogger)\n\n\t\t// Initialize underlying engine.\n\t\te, err := NewEngine(s.id, idx, s.database, s.path, s.walPath, s.options)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Set log output on the engine.\n\t\te.WithLogger(s.baseLogger)\n\n\t\t// Disable compactions while loading the index\n\t\te.SetEnabled(false)\n\n\t\t// Open engine.\n\t\tif err := e.Open(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Load metadata index for the inmem index only.\n\t\tif err := e.LoadMetadataIndex(s.id, s.index); err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts._engine = e\n\n\t\treturn nil\n\t}(); err != nil {\n\t\ts.close(true)\n\t\treturn NewShardError(s.id, err)\n\t}\n\n\tif s.EnableOnOpen {\n\t\t// enable writes, queries and compactions\n\t\ts.SetEnabled(true)\n\t}\n\n\treturn nil\n}\n\n// Close shuts down the shard's store.\nfunc (s *Shard) Close() error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\treturn s.close(true)\n}\n\n// CloseFast closes the shard without cleaning up the shard ID or any of the\n// shard's series keys from the index it belongs to.\n//\n// CloseFast can be called when the entire index is being removed, e.g., when\n// the database the shard belongs to is being dropped.\nfunc (s *Shard) CloseFast() error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\treturn s.close(false)\n}\n\n// close closes the shard an removes reference to the shard from associated\n// indexes, unless clean is false.\nfunc (s *Shard) close(clean bool) error {\n\tif s._engine == nil {\n\t\treturn nil\n\t}\n\n\t// Close the closing channel at most once.\n\tselect {\n\tcase <-s.closing:\n\tdefault:\n\t\tclose(s.closing)\n\t}\n\n\tif clean {\n\t\t// Don't leak our shard ID and series keys in the index\n\t\ts.index.RemoveShard(s.id)\n\t}\n\n\terr := s._engine.Close()\n\tif err == nil {\n\t\ts._engine = nil\n\t}\n\n\tif e := s.index.Close(); e == nil {\n\t\ts.index = nil\n\t}\n\treturn err\n}\n\nfunc (s *Shard) IndexType() string {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\tif err := s.ready(); err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn s.index.Type()\n}\n\n// ready determines if the Shard is ready for queries or writes.\n// It returns nil if ready, otherwise ErrShardClosed or ErrShardDiabled\nfunc (s *Shard) ready() error {\n\tvar err error\n\tif s._engine == nil {\n\t\terr = ErrEngineClosed\n\t} else if !s.enabled {\n\t\terr = ErrShardDisabled\n\t}\n\treturn err\n}\n\n// LastModified returns the time when this shard was last modified.\nfunc (s *Shard) LastModified() time.Time {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn time.Time{}\n\t}\n\treturn engine.LastModified()\n}\n\n// UnloadIndex removes all references to this shard from the DatabaseIndex\nfunc (s *Shard) UnloadIndex() {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\tif err := s.ready(); err != nil {\n\t\treturn\n\t}\n\ts.index.RemoveShard(s.id)\n}\n\n// IsIdle return true if the shard is not receiving writes and is fully compacted.\nfunc (s *Shard) IsIdle() bool {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn true\n\t}\n\treturn engine.IsIdle()\n}\n\n// SetCompactionsEnabled enables or disable shard background compactions.\nfunc (s *Shard) SetCompactionsEnabled(enabled bool) {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn\n\t}\n\tengine.SetCompactionsEnabled(enabled)\n}\n\n// DiskSize returns the size on disk of this shard.\nfunc (s *Shard) DiskSize() (int64, error) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\t// We don't use engine() becuase we still want to report the shard's disk\n\t// size even if the shard has been disabled.\n\tif s._engine == nil {\n\t\treturn 0, ErrEngineClosed\n\t}\n\tsize := s._engine.DiskSize()\n\tatomic.StoreInt64(&s.stats.DiskBytes, size)\n\treturn size, nil\n}\n\n// FieldCreate holds information for a field to create on a measurement.\ntype FieldCreate struct {\n\tMeasurement []byte\n\tField       *Field\n}\n\n// WritePoints will write the raw data points and any new metadata to the index in the shard.\nfunc (s *Shard) WritePoints(points []models.Point) error {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\n\tengine, err := s.engineNoLock()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar writeError error\n\tatomic.AddInt64(&s.stats.WriteReq, 1)\n\n\tpoints, fieldsToCreate, err := s.validateSeriesAndFields(points)\n\tif err != nil {\n\t\tif _, ok := err.(PartialWriteError); !ok {\n\t\t\treturn err\n\t\t}\n\t\t// There was a partial write (points dropped), hold onto the error to return\n\t\t// to the caller, but continue on writing the remaining points.\n\t\twriteError = err\n\t}\n\tatomic.AddInt64(&s.stats.FieldsCreated, int64(len(fieldsToCreate)))\n\n\t// add any new fields and keep track of what needs to be saved\n\tif err := s.createFieldsAndMeasurements(fieldsToCreate); err != nil {\n\t\treturn err\n\t}\n\n\t// Write to the engine.\n\tif err := engine.WritePoints(points); err != nil {\n\t\tatomic.AddInt64(&s.stats.WritePointsErr, int64(len(points)))\n\t\tatomic.AddInt64(&s.stats.WriteReqErr, 1)\n\t\treturn fmt.Errorf(\"engine: %s\", err)\n\t}\n\tatomic.AddInt64(&s.stats.WritePointsOK, int64(len(points)))\n\tatomic.AddInt64(&s.stats.WriteReqOK, 1)\n\n\treturn writeError\n}\n\n// validateSeriesAndFields checks which series and fields are new and whose metadata should be saved and indexed.\nfunc (s *Shard) validateSeriesAndFields(points []models.Point) ([]models.Point, []*FieldCreate, error) {\n\tvar (\n\t\tfieldsToCreate []*FieldCreate\n\t\terr            error\n\t\tdropped        int\n\t\treason         string // only first error reason is set unless returned from CreateSeriesListIfNotExists\n\t)\n\n\t// Create all series against the index in bulk.\n\tkeys := make([][]byte, len(points))\n\tnames := make([][]byte, len(points))\n\ttagsSlice := make([]models.Tags, len(points))\n\n\t// Drop any series w/ a \"time\" tag, these are illegal\n\tvar j int\n\tfor i, p := range points {\n\t\ttags := p.Tags()\n\t\tif v := tags.Get(timeBytes); v != nil {\n\t\t\tdropped++\n\t\t\tif reason == \"\" {\n\t\t\t\treason = fmt.Sprintf(\"invalid tag key: input tag \\\"%s\\\" on measurement \\\"%s\\\" is invalid\", \"time\", string(p.Name()))\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tkeys[j] = p.Key()\n\t\tnames[j] = p.Name()\n\t\ttagsSlice[j] = tags\n\t\tpoints[j] = points[i]\n\t\tj++\n\t}\n\tpoints, keys, names, tagsSlice = points[:j], keys[:j], names[:j], tagsSlice[:j]\n\n\tengine, err := s.engineNoLock()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// Add new series. Check for partial writes.\n\tvar droppedKeys map[string]struct{}\n\tif err := engine.CreateSeriesListIfNotExists(keys, names, tagsSlice); err != nil {\n\t\tswitch err := err.(type) {\n\t\tcase *PartialWriteError:\n\t\t\treason = err.Reason\n\t\t\tdropped += err.Dropped\n\t\t\tdroppedKeys = err.DroppedKeys\n\t\t\tatomic.AddInt64(&s.stats.WritePointsDropped, int64(err.Dropped))\n\t\tdefault:\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\t// get the shard mutex for locally defined fields\n\tn := 0\n\n\t// mfCache is a local cache of MeasurementFields to reduce lock contention when validating\n\t// field types.\n\tmfCache := make(map[string]*MeasurementFields, 16)\n\tfor i, p := range points {\n\t\tvar skip bool\n\t\tvar validField bool\n\t\titer := p.FieldIterator()\n\t\tfor iter.Next() {\n\t\t\tif bytes.Equal(iter.FieldKey(), timeBytes) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvalidField = true\n\t\t\tbreak\n\t\t}\n\n\t\tif !validField {\n\t\t\tdropped++\n\t\t\tif reason == \"\" {\n\t\t\t\treason = fmt.Sprintf(\"invalid field name: input field \\\"%s\\\" on measurement \\\"%s\\\" is invalid\", \"time\", string(p.Name()))\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\titer.Reset()\n\n\t\t// Skip points if keys have been dropped.\n\t\t// The drop count has already been incremented during series creation.\n\t\tif droppedKeys != nil {\n\t\t\tif _, ok := droppedKeys[string(keys[i])]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tname := p.Name()\n\t\t// see if the field definitions need to be saved to the shard\n\t\tmf := mfCache[string(name)]\n\t\tif mf == nil {\n\t\t\tmf = engine.MeasurementFields(name).Clone()\n\t\t\tmfCache[string(name)] = mf\n\t\t}\n\t\titer.Reset()\n\n\t\t// validate field types and encode data\n\t\tfor iter.Next() {\n\n\t\t\t// Skip fields name \"time\", they are illegal\n\t\t\tif bytes.Equal(iter.FieldKey(), timeBytes) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar fieldType influxql.DataType\n\t\t\tswitch iter.Type() {\n\t\t\tcase models.Float:\n\t\t\t\tfieldType = influxql.Float\n\t\t\tcase models.Integer:\n\t\t\t\tfieldType = influxql.Integer\n\t\t\tcase models.Boolean:\n\t\t\t\tfieldType = influxql.Boolean\n\t\t\tcase models.String:\n\t\t\t\tfieldType = influxql.String\n\t\t\tdefault:\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif f := mf.FieldBytes(iter.FieldKey()); f != nil {\n\t\t\t\t// Field present in shard metadata, make sure there is no type conflict.\n\t\t\t\tif f.Type != fieldType {\n\t\t\t\t\tatomic.AddInt64(&s.stats.WritePointsDropped, 1)\n\t\t\t\t\tdropped++\n\t\t\t\t\tif reason == \"\" {\n\t\t\t\t\t\treason = fmt.Sprintf(\"%s: input field \\\"%s\\\" on measurement \\\"%s\\\" is type %s, already exists as type %s\", ErrFieldTypeConflict, iter.FieldKey(), name, fieldType, f.Type)\n\t\t\t\t\t}\n\t\t\t\t\tskip = true\n\t\t\t\t} else {\n\t\t\t\t\tcontinue // Field is present, and it's of the same type. Nothing more to do.\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !skip {\n\t\t\t\tfieldsToCreate = append(fieldsToCreate, &FieldCreate{p.Name(), &Field{Name: string(iter.FieldKey()), Type: fieldType}})\n\t\t\t}\n\t\t}\n\n\t\tif !skip {\n\t\t\tpoints[n] = points[i]\n\t\t\tn++\n\t\t}\n\t}\n\tpoints = points[:n]\n\n\tif dropped > 0 {\n\t\terr = PartialWriteError{Reason: reason, Dropped: dropped}\n\t}\n\n\treturn points, fieldsToCreate, err\n}\n\nfunc (s *Shard) createFieldsAndMeasurements(fieldsToCreate []*FieldCreate) error {\n\tif len(fieldsToCreate) == 0 {\n\t\treturn nil\n\t}\n\n\tengine, err := s.engineNoLock()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// add fields\n\tfor _, f := range fieldsToCreate {\n\t\tmf := engine.MeasurementFields(f.Measurement)\n\t\tif err := mf.CreateFieldIfNotExists([]byte(f.Field.Name), f.Field.Type, false); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ts.index.SetFieldName(f.Measurement, f.Field.Name)\n\t}\n\n\treturn nil\n}\n\n// DeleteSeries deletes a list of series.\nfunc (s *Shard) DeleteSeries(seriesKeys [][]byte) error {\n\treturn s.DeleteSeriesRange(seriesKeys, math.MinInt64, math.MaxInt64)\n}\n\n// DeleteSeriesRange deletes all values from for seriesKeys between min and max (inclusive)\nfunc (s *Shard) DeleteSeriesRange(seriesKeys [][]byte, min, max int64) error {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn engine.DeleteSeriesRange(seriesKeys, min, max)\n}\n\n// DeleteMeasurement deletes a measurement and all underlying series.\nfunc (s *Shard) DeleteMeasurement(name []byte) error {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn engine.DeleteMeasurement(name)\n}\n\n// SeriesN returns the unique number of series in the shard.\nfunc (s *Shard) SeriesN() int64 {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn engine.SeriesN()\n}\n\n// SeriesSketches returns the series sketches for the shard.\nfunc (s *Shard) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn engine.SeriesSketches()\n}\n\n// MeasurementsSketches returns the measurement sketches for the shard.\nfunc (s *Shard) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn engine.MeasurementsSketches()\n}\n\n// MeasurementNamesByExpr returns names of measurements matching the condition.\n// If cond is nil then all measurement names are returned.\nfunc (s *Shard) MeasurementNamesByExpr(cond influxql.Expr) ([][]byte, error) {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn engine.MeasurementNamesByExpr(cond)\n}\n\n// MeasurementNamesByRegex returns names of measurements matching the regular expression.\nfunc (s *Shard) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn engine.MeasurementNamesByRegex(re)\n}\n\n// MeasurementSeriesKeysByExpr returns a list of series keys from the shard\n// matching expr.\nfunc (s *Shard) MeasurementSeriesKeysByExpr(name []byte, expr influxql.Expr) ([][]byte, error) {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn engine.MeasurementSeriesKeysByExpr(name, expr)\n}\n\n// MeasurementTagKeysByExpr returns all the tag keys for the provided expression.\nfunc (s *Shard) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn engine.MeasurementTagKeysByExpr(name, expr)\n}\n\n// MeasurementTagKeyValuesByExpr returns all the tag keys values for the\n// provided expression.\nfunc (s *Shard) MeasurementTagKeyValuesByExpr(name []byte, key []string, expr influxql.Expr, keysSorted bool) ([][]string, error) {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn engine.MeasurementTagKeyValuesByExpr(name, key, expr, keysSorted)\n}\n\n// MeasurementFields returns fields for a measurement.\n// TODO(edd): This method is currently only being called from tests; do we\n// really need it?\nfunc (s *Shard) MeasurementFields(name []byte) *MeasurementFields {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn engine.MeasurementFields(name)\n}\n\n// MeasurementExists returns true if the shard contains name.\n// TODO(edd): This method is currently only being called from tests; do we\n// really need it?\nfunc (s *Shard) MeasurementExists(name []byte) (bool, error) {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn engine.MeasurementExists(name)\n}\n\n// WriteTo writes the shard's data to w.\nfunc (s *Shard) WriteTo(w io.Writer) (int64, error) {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tn, err := engine.WriteTo(w)\n\tatomic.AddInt64(&s.stats.BytesWritten, int64(n))\n\treturn n, err\n}\n\n// CreateIterator returns an iterator for the data in the shard.\nfunc (s *Shard) CreateIterator(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif strings.HasPrefix(measurement, \"_\") {\n\t\tif itr, ok, err := s.createSystemIterator(engine, measurement, opt); ok {\n\t\t\treturn itr, err\n\t\t}\n\t\t// Unknown system source so pass this to the engine.\n\t}\n\treturn engine.CreateIterator(measurement, opt)\n}\n\n// createSystemIterator returns an iterator for a system source.\nfunc (s *Shard) createSystemIterator(engine Engine, measurement string, opt influxql.IteratorOptions) (influxql.Iterator, bool, error) {\n\tswitch measurement {\n\tcase \"_fieldKeys\":\n\t\titr, err := NewFieldKeysIterator(engine, opt)\n\t\treturn itr, true, err\n\tcase \"_series\":\n\t\titr, err := s.createSeriesIterator(opt)\n\t\treturn itr, true, err\n\tcase \"_tagKeys\":\n\t\titr, err := NewTagKeysIterator(engine, opt)\n\t\treturn itr, true, err\n\t}\n\treturn nil, false, nil\n}\n\n// createSeriesIterator returns a new instance of SeriesIterator.\nfunc (s *Shard) createSeriesIterator(opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Only equality operators are allowed.\n\tinfluxql.WalkFunc(opt.Condition, func(n influxql.Node) {\n\t\tswitch n := n.(type) {\n\t\tcase *influxql.BinaryExpr:\n\t\t\tswitch n.Op {\n\t\t\tcase influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX,\n\t\t\t\tinfluxql.OR, influxql.AND:\n\t\t\tdefault:\n\t\t\t\terr = errors.New(\"invalid tag comparison operator\")\n\t\t\t}\n\t\t}\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn engine.SeriesPointIterator(opt)\n}\n\n// FieldDimensions returns unique sets of fields and dimensions across a list of sources.\nfunc (s *Shard) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tfields = make(map[string]influxql.DataType)\n\tdimensions = make(map[string]struct{})\n\n\tfor _, name := range measurements {\n\t\t// Handle system sources.\n\t\tif strings.HasPrefix(name, \"_\") {\n\t\t\tvar keys []string\n\t\t\tswitch name {\n\t\t\tcase \"_fieldKeys\":\n\t\t\t\tkeys = []string{\"fieldKey\", \"fieldType\"}\n\t\t\tcase \"_series\":\n\t\t\t\tkeys = []string{\"key\"}\n\t\t\tcase \"_tagKeys\":\n\t\t\t\tkeys = []string{\"tagKey\"}\n\t\t\t}\n\n\t\t\tif len(keys) > 0 {\n\t\t\t\tfor _, k := range keys {\n\t\t\t\t\tif _, ok := fields[k]; !ok || influxql.String < fields[k] {\n\t\t\t\t\t\tfields[k] = influxql.String\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Unknown system source so default to looking for a measurement.\n\t\t}\n\n\t\t// Retrieve measurement.\n\t\tif exists, err := engine.MeasurementExists([]byte(name)); err != nil {\n\t\t\treturn nil, nil, err\n\t\t} else if !exists {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Append fields and dimensions.\n\t\tmf := engine.MeasurementFields([]byte(name))\n\t\tif mf != nil {\n\t\t\tfor k, typ := range mf.FieldSet() {\n\t\t\t\tif _, ok := fields[k]; !ok || typ < fields[k] {\n\t\t\t\t\tfields[k] = typ\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif err := engine.ForEachMeasurementTagKey([]byte(name), func(key []byte) error {\n\t\t\tdimensions[string(key)] = struct{}{}\n\t\t\treturn nil\n\t\t}); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\treturn fields, dimensions, nil\n}\n\n// mapType returns the data type for the field within the measurement.\nfunc (s *Shard) mapType(measurement, field string) (influxql.DataType, error) {\n\tengine, err := s.engineNoLock()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// Process system measurements.\n\tif strings.HasPrefix(measurement, \"_\") {\n\t\tswitch measurement {\n\t\tcase \"_fieldKeys\":\n\t\t\tif field == \"fieldKey\" || field == \"fieldType\" {\n\t\t\t\treturn influxql.String, nil\n\t\t\t}\n\t\t\treturn influxql.Unknown, nil\n\t\tcase \"_series\":\n\t\t\tif field == \"key\" {\n\t\t\t\treturn influxql.String, nil\n\t\t\t}\n\t\t\treturn influxql.Unknown, nil\n\t\tcase \"_tagKeys\":\n\t\t\tif field == \"tagKey\" {\n\t\t\t\treturn influxql.String, nil\n\t\t\t}\n\t\t\treturn influxql.Unknown, nil\n\t\t}\n\t\t// Unknown system source so default to looking for a measurement.\n\t}\n\n\tif exists, _ := engine.MeasurementExists([]byte(measurement)); !exists {\n\t\treturn influxql.Unknown, nil\n\t}\n\n\tmf := engine.MeasurementFields([]byte(measurement))\n\tif mf != nil {\n\t\tf := mf.Field(field)\n\t\tif f != nil {\n\t\t\treturn f.Type, nil\n\t\t}\n\t}\n\n\tif exists, _ := engine.HasTagKey([]byte(measurement), []byte(field)); exists {\n\t\treturn influxql.Tag, nil\n\t}\n\n\treturn influxql.Unknown, nil\n}\n\n// expandSources expands regex sources and removes duplicates.\n// NOTE: sources must be normalized (db and rp set) before calling this function.\nfunc (s *Shard) expandSources(sources influxql.Sources) (influxql.Sources, error) {\n\tengine, err := s.engineNoLock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Use a map as a set to prevent duplicates.\n\tset := map[string]influxql.Source{}\n\n\t// Iterate all sources, expanding regexes when they're found.\n\tfor _, source := range sources {\n\t\tswitch src := source.(type) {\n\t\tcase *influxql.Measurement:\n\t\t\t// Add non-regex measurements directly to the set.\n\t\t\tif src.Regex == nil {\n\t\t\t\tset[src.String()] = src\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Loop over matching measurements.\n\t\t\tnames, err := engine.MeasurementNamesByRegex(src.Regex.Val)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tfor _, name := range names {\n\t\t\t\tother := &influxql.Measurement{\n\t\t\t\t\tDatabase:        src.Database,\n\t\t\t\t\tRetentionPolicy: src.RetentionPolicy,\n\t\t\t\t\tName:            string(name),\n\t\t\t\t}\n\t\t\t\tset[other.String()] = other\n\t\t\t}\n\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"expandSources: unsupported source type: %T\", source)\n\t\t}\n\t}\n\n\t// Convert set to sorted slice.\n\tnames := make([]string, 0, len(set))\n\tfor name := range set {\n\t\tnames = append(names, name)\n\t}\n\tsort.Strings(names)\n\n\t// Convert set to a list of Sources.\n\texpanded := make(influxql.Sources, 0, len(set))\n\tfor _, name := range names {\n\t\texpanded = append(expanded, set[name])\n\t}\n\n\treturn expanded, nil\n}\n\n// Backup backs up the shard by creating a tar archive of all TSM files that\n// have been modified since the provided time. See Engine.Backup for more details.\nfunc (s *Shard) Backup(w io.Writer, basePath string, since time.Time) error {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn engine.Backup(w, basePath, since)\n}\n\n// Restore restores data to the underlying engine for the shard.\n// The shard is reopened after restore.\nfunc (s *Shard) Restore(r io.Reader, basePath string) error {\n\tif err := func() error {\n\t\ts.mu.Lock()\n\t\tdefer s.mu.Unlock()\n\n\t\t// Special case - we can still restore to a disabled shard, so we should\n\t\t// only check if the engine is closed and not care if the shard is\n\t\t// disabled.\n\t\tif s._engine == nil {\n\t\t\treturn ErrEngineClosed\n\t\t}\n\n\t\t// Restore to engine.\n\t\treturn s._engine.Restore(r, basePath)\n\t}(); err != nil {\n\t\treturn err\n\t}\n\n\t// Close shard.\n\tif err := s.Close(); err != nil {\n\t\treturn err\n\t}\n\n\t// Reopen engine.\n\treturn s.Open()\n}\n\n// Import imports data to the underlying engine for the shard. r should\n// be a reader from a backup created by Backup.\nfunc (s *Shard) Import(r io.Reader, basePath string) error {\n\t// Special case - we can still import to a disabled shard, so we should\n\t// only check if the engine is closed and not care if the shard is\n\t// disabled.\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif s._engine == nil {\n\t\treturn ErrEngineClosed\n\t}\n\n\t// Import to engine.\n\treturn s._engine.Import(r, basePath)\n}\n\n// CreateSnapshot will return a path to a temp directory\n// containing hard links to the underlying shard files.\nfunc (s *Shard) CreateSnapshot() (string, error) {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn engine.CreateSnapshot()\n}\n\n// ForEachMeasurementName iterates over each measurement in the shard.\nfunc (s *Shard) ForEachMeasurementName(fn func(name []byte) error) error {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn engine.ForEachMeasurementName(fn)\n}\n\nfunc (s *Shard) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn engine.ForEachMeasurementTagKey(name, fn)\n}\n\nfunc (s *Shard) TagKeyCardinality(name, key []byte) int {\n\tengine, err := s.engine()\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn engine.TagKeyCardinality(name, key)\n}\n\n// engine safely (under an RLock) returns a reference to the shard's Engine, or\n// an error if the Engine is closed, or the shard is currently disabled.\n//\n// The shard's Engine should always be accessed via a call to engine(), rather\n// than directly referencing Shard.engine.\n//\n// If a caller needs an Engine reference but is already under a lock, then they\n// should use engineNoLock().\nfunc (s *Shard) engine() (Engine, error) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\treturn s.engineNoLock()\n}\n\n// engineNoLock is similar to calling engine(), but the caller must guarantee\n// that they already hold an appropriate lock.\nfunc (s *Shard) engineNoLock() (Engine, error) {\n\tif err := s.ready(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn s._engine, nil\n}\n\ntype ShardGroup interface {\n\tMeasurementsByRegex(re *regexp.Regexp) []string\n\tFieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error)\n\tMapType(measurement, field string) influxql.DataType\n\tCreateIterator(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error)\n\tExpandSources(sources influxql.Sources) (influxql.Sources, error)\n}\n\n// Shards represents a sortable list of shards.\ntype Shards []*Shard\n\n// Len implements sort.Interface.\nfunc (a Shards) Len() int { return len(a) }\n\n// Less implements sort.Interface.\nfunc (a Shards) Less(i, j int) bool { return a[i].id < a[j].id }\n\n// Swap implements sort.Interface.\nfunc (a Shards) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\n// MeasurementsByRegex returns the unique set of measurements matching the\n// provided regex, for all the shards.\nfunc (a Shards) MeasurementsByRegex(re *regexp.Regexp) []string {\n\tvar m map[string]struct{}\n\tfor _, sh := range a {\n\t\tnames, err := sh.MeasurementNamesByRegex(re)\n\t\tif err != nil {\n\t\t\tcontinue // Skip this shard's results—previous behaviour.\n\t\t}\n\n\t\tif m == nil {\n\t\t\tm = make(map[string]struct{}, len(names))\n\t\t}\n\n\t\tfor _, name := range names {\n\t\t\tm[string(name)] = struct{}{}\n\t\t}\n\t}\n\n\tif len(m) == 0 {\n\t\treturn nil\n\t}\n\n\tnames := make([]string, 0, len(m))\n\tfor key := range m {\n\t\tnames = append(names, key)\n\t}\n\tsort.Strings(names)\n\treturn names\n}\n\nfunc (a Shards) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) {\n\tfields = make(map[string]influxql.DataType)\n\tdimensions = make(map[string]struct{})\n\n\tfor _, sh := range a {\n\t\tf, d, err := sh.FieldDimensions(measurements)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tfor k, typ := range f {\n\t\t\tif _, ok := fields[k]; typ != influxql.Unknown && (!ok || typ < fields[k]) {\n\t\t\t\tfields[k] = typ\n\t\t\t}\n\t\t}\n\t\tfor k := range d {\n\t\t\tdimensions[k] = struct{}{}\n\t\t}\n\t}\n\treturn\n}\n\nfunc (a Shards) MapType(measurement, field string) influxql.DataType {\n\tvar typ influxql.DataType\n\tfor _, sh := range a {\n\t\tsh.mu.RLock()\n\t\tif t, err := sh.mapType(measurement, field); err == nil && typ.LessThan(t) {\n\t\t\ttyp = t\n\t\t}\n\t\tsh.mu.RUnlock()\n\t}\n\treturn typ\n}\n\nfunc (a Shards) CreateIterator(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\titrs := make([]influxql.Iterator, 0, len(a))\n\tfor _, sh := range a {\n\t\titr, err := sh.CreateIterator(measurement, opt)\n\t\tif err != nil {\n\t\t\tinfluxql.Iterators(itrs).Close()\n\t\t\treturn nil, err\n\t\t} else if itr == nil {\n\t\t\tcontinue\n\t\t}\n\t\titrs = append(itrs, itr)\n\n\t\tselect {\n\t\tcase <-opt.InterruptCh:\n\t\t\tinfluxql.Iterators(itrs).Close()\n\t\t\treturn nil, err\n\t\tdefault:\n\t\t}\n\n\t\t// Enforce series limit at creation time.\n\t\tif opt.MaxSeriesN > 0 {\n\t\t\tstats := itr.Stats()\n\t\t\tif stats.SeriesN > opt.MaxSeriesN {\n\t\t\t\tinfluxql.Iterators(itrs).Close()\n\t\t\t\treturn nil, fmt.Errorf(\"max-select-series limit exceeded: (%d/%d)\", stats.SeriesN, opt.MaxSeriesN)\n\t\t\t}\n\t\t}\n\t}\n\treturn influxql.Iterators(itrs).Merge(opt)\n}\n\nfunc (a Shards) ExpandSources(sources influxql.Sources) (influxql.Sources, error) {\n\t// Use a map as a set to prevent duplicates.\n\tset := map[string]influxql.Source{}\n\n\t// Iterate through every shard and expand the sources.\n\tfor _, sh := range a {\n\t\tsh.mu.RLock()\n\t\texpanded, err := sh.expandSources(sources)\n\t\tsh.mu.RUnlock()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, src := range expanded {\n\t\t\tswitch src := src.(type) {\n\t\t\tcase *influxql.Measurement:\n\t\t\t\tset[src.String()] = src\n\t\t\tdefault:\n\t\t\t\treturn nil, fmt.Errorf(\"Store.ExpandSources: unsupported source type: %T\", src)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Convert set to sorted slice.\n\tnames := make([]string, 0, len(set))\n\tfor name := range set {\n\t\tnames = append(names, name)\n\t}\n\tsort.Strings(names)\n\n\t// Convert set to a list of Sources.\n\tsorted := make([]influxql.Source, 0, len(set))\n\tfor _, name := range names {\n\t\tsorted = append(sorted, set[name])\n\t}\n\treturn sorted, nil\n}\n\n// MeasurementFields holds the fields of a measurement and their codec.\ntype MeasurementFields struct {\n\tmu sync.RWMutex\n\n\tfields map[string]*Field\n}\n\n// NewMeasurementFields returns an initialised *MeasurementFields value.\nfunc NewMeasurementFields() *MeasurementFields {\n\treturn &MeasurementFields{fields: make(map[string]*Field)}\n}\n\n// MarshalBinary encodes the object to a binary format.\nfunc (m *MeasurementFields) MarshalBinary() ([]byte, error) {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\n\tvar pb internal.MeasurementFields\n\tfor _, f := range m.fields {\n\t\tid := int32(f.ID)\n\t\tname := f.Name\n\t\tt := int32(f.Type)\n\t\tpb.Fields = append(pb.Fields, &internal.Field{ID: &id, Name: &name, Type: &t})\n\t}\n\treturn proto.Marshal(&pb)\n}\n\n// UnmarshalBinary decodes the object from a binary format.\nfunc (m *MeasurementFields) UnmarshalBinary(buf []byte) error {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tvar pb internal.MeasurementFields\n\tif err := proto.Unmarshal(buf, &pb); err != nil {\n\t\treturn err\n\t}\n\tm.fields = make(map[string]*Field, len(pb.Fields))\n\tfor _, f := range pb.Fields {\n\t\tm.fields[f.GetName()] = &Field{ID: uint8(f.GetID()), Name: f.GetName(), Type: influxql.DataType(f.GetType())}\n\t}\n\treturn nil\n}\n\n// CreateFieldIfNotExists creates a new field with an autoincrementing ID.\n// Returns an error if 255 fields have already been created on the measurement or\n// the fields already exists with a different type.\nfunc (m *MeasurementFields) CreateFieldIfNotExists(name []byte, typ influxql.DataType, limitCount bool) error {\n\tm.mu.RLock()\n\n\t// Ignore if the field already exists.\n\tif f := m.fields[string(name)]; f != nil {\n\t\tif f.Type != typ {\n\t\t\tm.mu.RUnlock()\n\t\t\treturn ErrFieldTypeConflict\n\t\t}\n\t\tm.mu.RUnlock()\n\t\treturn nil\n\t}\n\tm.mu.RUnlock()\n\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\t// Re-check field and type under write lock.\n\tif f := m.fields[string(name)]; f != nil {\n\t\tif f.Type != typ {\n\t\t\treturn ErrFieldTypeConflict\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Create and append a new field.\n\tf := &Field{\n\t\tID:   uint8(len(m.fields) + 1),\n\t\tName: string(name),\n\t\tType: typ,\n\t}\n\tm.fields[string(name)] = f\n\n\treturn nil\n}\n\nfunc (m *MeasurementFields) FieldN() int {\n\tm.mu.RLock()\n\tn := len(m.fields)\n\tm.mu.RUnlock()\n\treturn n\n}\n\n// Field returns the field for name, or nil if there is no field for name.\nfunc (m *MeasurementFields) Field(name string) *Field {\n\tm.mu.RLock()\n\tf := m.fields[name]\n\tm.mu.RUnlock()\n\treturn f\n}\n\nfunc (m *MeasurementFields) HasField(name string) bool {\n\tm.mu.RLock()\n\tf := m.fields[name]\n\tm.mu.RUnlock()\n\treturn f != nil\n}\n\n// FieldBytes returns the field for name, or nil if there is no field for name.\n// FieldBytes should be preferred to Field when the caller has a []byte, because\n// it avoids a string allocation, which can't be avoided if the caller converts\n// the []byte to a string and calls Field.\nfunc (m *MeasurementFields) FieldBytes(name []byte) *Field {\n\tm.mu.RLock()\n\tf := m.fields[string(name)]\n\tm.mu.RUnlock()\n\treturn f\n}\n\n// FieldSet returns the set of fields and their types for the measurement.\nfunc (m *MeasurementFields) FieldSet() map[string]influxql.DataType {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\n\tfields := make(map[string]influxql.DataType)\n\tfor name, f := range m.fields {\n\t\tfields[name] = f.Type\n\t}\n\treturn fields\n}\n\n// Clone returns copy of the MeasurementFields\nfunc (m *MeasurementFields) Clone() *MeasurementFields {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\tfields := make(map[string]*Field, len(m.fields))\n\tfor key, field := range m.fields {\n\t\tfields[key] = field\n\t}\n\treturn &MeasurementFields{\n\t\tfields: fields,\n\t}\n}\n\n// MeasurementFieldSet represents a collection of fields by measurement.\n// This safe for concurrent use.\ntype MeasurementFieldSet struct {\n\tmu     sync.RWMutex\n\tfields map[string]*MeasurementFields\n}\n\n// NewMeasurementFieldSet returns a new instance of MeasurementFieldSet.\nfunc NewMeasurementFieldSet() *MeasurementFieldSet {\n\treturn &MeasurementFieldSet{\n\t\tfields: make(map[string]*MeasurementFields),\n\t}\n}\n\n// Fields returns fields for a measurement by name.\nfunc (fs *MeasurementFieldSet) Fields(name string) *MeasurementFields {\n\tfs.mu.RLock()\n\tmf := fs.fields[name]\n\tfs.mu.RUnlock()\n\treturn mf\n}\n\n// CreateFieldsIfNotExists returns fields for a measurement by name.\nfunc (fs *MeasurementFieldSet) CreateFieldsIfNotExists(name []byte) *MeasurementFields {\n\tfs.mu.RLock()\n\tmf := fs.fields[string(name)]\n\tfs.mu.RUnlock()\n\n\tif mf != nil {\n\t\treturn mf\n\t}\n\n\tfs.mu.Lock()\n\tmf = fs.fields[string(name)]\n\tif mf == nil {\n\t\tmf = NewMeasurementFields()\n\t\tfs.fields[string(name)] = mf\n\t}\n\tfs.mu.Unlock()\n\treturn mf\n}\n\n// Delete removes a field set for a measurement.\nfunc (fs *MeasurementFieldSet) Delete(name string) {\n\tfs.mu.Lock()\n\tdelete(fs.fields, name)\n\tfs.mu.Unlock()\n}\n\n// DeleteWithLock executes fn and removes a field set from a measurement under lock.\nfunc (fs *MeasurementFieldSet) DeleteWithLock(name string, fn func() error) error {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\tif err := fn(); err != nil {\n\t\treturn err\n\t}\n\n\tdelete(fs.fields, name)\n\treturn nil\n}\n\n// Field represents a series field.\ntype Field struct {\n\tID   uint8             `json:\"id,omitempty\"`\n\tName string            `json:\"name,omitempty\"`\n\tType influxql.DataType `json:\"type,omitempty\"`\n}\n\n// NewFieldKeysIterator returns an iterator that can be iterated over to\n// retrieve field keys.\nfunc NewFieldKeysIterator(engine Engine, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\titr := &fieldKeysIterator{engine: engine}\n\n\t// Retrieve measurements from shard. Filter if condition specified.\n\tnames, err := engine.MeasurementNamesByExpr(opt.Condition)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\titr.names = names\n\n\treturn itr, nil\n}\n\n// fieldKeysIterator iterates over measurements and gets field keys from each measurement.\ntype fieldKeysIterator struct {\n\tengine Engine\n\tnames  [][]byte // remaining measurement names\n\tbuf    struct {\n\t\tname   []byte  // current measurement name\n\t\tfields []Field // current measurement's fields\n\t}\n}\n\n// Stats returns stats about the points processed.\nfunc (itr *fieldKeysIterator) Stats() influxql.IteratorStats { return influxql.IteratorStats{} }\n\n// Close closes the iterator.\nfunc (itr *fieldKeysIterator) Close() error { return nil }\n\n// Next emits the next tag key name.\nfunc (itr *fieldKeysIterator) Next() (*influxql.FloatPoint, error) {\n\tfor {\n\t\t// If there are no more keys then move to the next measurements.\n\t\tif len(itr.buf.fields) == 0 {\n\t\t\tif len(itr.names) == 0 {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\n\t\t\titr.buf.name = itr.names[0]\n\t\t\tmf := itr.engine.MeasurementFields(itr.buf.name)\n\t\t\tif mf != nil {\n\t\t\t\tfset := mf.FieldSet()\n\t\t\t\tif len(fset) == 0 {\n\t\t\t\t\titr.names = itr.names[1:]\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tkeys := make([]string, 0, len(fset))\n\t\t\t\tfor k := range fset {\n\t\t\t\t\tkeys = append(keys, k)\n\t\t\t\t}\n\t\t\t\tsort.Strings(keys)\n\n\t\t\t\titr.buf.fields = make([]Field, len(keys))\n\t\t\t\tfor i, name := range keys {\n\t\t\t\t\titr.buf.fields[i] = Field{Name: name, Type: fset[name]}\n\t\t\t\t}\n\t\t\t}\n\t\t\titr.names = itr.names[1:]\n\t\t\tcontinue\n\t\t}\n\n\t\t// Return next key.\n\t\tfield := itr.buf.fields[0]\n\t\tp := &influxql.FloatPoint{\n\t\t\tName: string(itr.buf.name),\n\t\t\tAux:  []interface{}{field.Name, field.Type.String()},\n\t\t}\n\t\titr.buf.fields = itr.buf.fields[1:]\n\n\t\treturn p, nil\n\t}\n}\n\n// NewTagKeysIterator returns a new instance of TagKeysIterator.\nfunc NewTagKeysIterator(engine Engine, opt influxql.IteratorOptions) (influxql.Iterator, error) {\n\tfn := func(name []byte) ([][]byte, error) {\n\t\tvar keys [][]byte\n\t\tif err := engine.ForEachMeasurementTagKey(name, func(key []byte) error {\n\t\t\tkeys = append(keys, key)\n\t\t\treturn nil\n\t\t}); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn keys, nil\n\t}\n\treturn newMeasurementKeysIterator(engine, fn, opt)\n}\n\n// measurementKeyFunc is the function called by measurementKeysIterator.\ntype measurementKeyFunc func(name []byte) ([][]byte, error)\n\nfunc newMeasurementKeysIterator(engine Engine, fn measurementKeyFunc, opt influxql.IteratorOptions) (*measurementKeysIterator, error) {\n\titr := &measurementKeysIterator{fn: fn}\n\n\tnames, err := engine.MeasurementNamesByExpr(opt.Condition)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\titr.names = names\n\n\treturn itr, nil\n}\n\n// measurementKeysIterator iterates over measurements and gets keys from each measurement.\ntype measurementKeysIterator struct {\n\tnames [][]byte // remaining measurement names\n\tbuf   struct {\n\t\tname []byte   // current measurement name\n\t\tkeys [][]byte // current measurement's keys\n\t}\n\tfn measurementKeyFunc\n}\n\n// Stats returns stats about the points processed.\nfunc (itr *measurementKeysIterator) Stats() influxql.IteratorStats { return influxql.IteratorStats{} }\n\n// Close closes the iterator.\nfunc (itr *measurementKeysIterator) Close() error { return nil }\n\n// Next emits the next tag key name.\nfunc (itr *measurementKeysIterator) Next() (*influxql.FloatPoint, error) {\n\tfor {\n\t\t// If there are no more keys then move to the next measurements.\n\t\tif len(itr.buf.keys) == 0 {\n\t\t\tif len(itr.names) == 0 {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\n\t\t\titr.buf.name, itr.names = itr.names[0], itr.names[1:]\n\n\t\t\tkeys, err := itr.fn(itr.buf.name)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\titr.buf.keys = keys\n\t\t\tcontinue\n\t\t}\n\n\t\t// Return next key.\n\t\tp := &influxql.FloatPoint{\n\t\t\tName: string(itr.buf.name),\n\t\t\tAux:  []interface{}{string(itr.buf.keys[0])},\n\t\t}\n\t\titr.buf.keys = itr.buf.keys[1:]\n\n\t\treturn p, nil\n\t}\n}\n\n// LimitError represents an error caused by a configurable limit.\ntype LimitError struct {\n\tReason string\n}\n\nfunc (e *LimitError) Error() string { return e.Reason }\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/shard_internal_test.go",
    "content": "package tsdb\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/google/go-cmp/cmp\"\n\t\"github.com/google/go-cmp/cmp/cmpopts\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n)\n\nfunc TestShard_MapType(t *testing.T) {\n\tvar sh *TempShard\n\n\tsetup := func(index string) {\n\t\tsh = NewTempShard(index)\n\n\t\tif err := sh.Open(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tsh.MustWritePointsString(`\ncpu,host=serverA,region=uswest value=100 0\ncpu,host=serverA,region=uswest value=50,val2=5  10\ncpu,host=serverB,region=uswest value=25  0\nmem,host=serverA value=25i 0\nmem,host=serverB value=50i,val3=t 10\n_reserved,region=uswest value=\"foo\" 0\n`)\n\t}\n\n\tfor _, index := range RegisteredIndexes() {\n\t\tsetup(index)\n\t\tfor _, tt := range []struct {\n\t\t\tmeasurement string\n\t\t\tfield       string\n\t\t\ttyp         influxql.DataType\n\t\t}{\n\t\t\t{\n\t\t\t\tmeasurement: \"cpu\",\n\t\t\t\tfield:       \"value\",\n\t\t\t\ttyp:         influxql.Float,\n\t\t\t},\n\t\t\t{\n\t\t\t\tmeasurement: \"cpu\",\n\t\t\t\tfield:       \"host\",\n\t\t\t\ttyp:         influxql.Tag,\n\t\t\t},\n\t\t\t{\n\t\t\t\tmeasurement: \"cpu\",\n\t\t\t\tfield:       \"region\",\n\t\t\t\ttyp:         influxql.Tag,\n\t\t\t},\n\t\t\t{\n\t\t\t\tmeasurement: \"cpu\",\n\t\t\t\tfield:       \"val2\",\n\t\t\t\ttyp:         influxql.Float,\n\t\t\t},\n\t\t\t{\n\t\t\t\tmeasurement: \"cpu\",\n\t\t\t\tfield:       \"unknown\",\n\t\t\t\ttyp:         influxql.Unknown,\n\t\t\t},\n\t\t\t{\n\t\t\t\tmeasurement: \"mem\",\n\t\t\t\tfield:       \"value\",\n\t\t\t\ttyp:         influxql.Integer,\n\t\t\t},\n\t\t\t{\n\t\t\t\tmeasurement: \"mem\",\n\t\t\t\tfield:       \"val3\",\n\t\t\t\ttyp:         influxql.Boolean,\n\t\t\t},\n\t\t\t{\n\t\t\t\tmeasurement: \"mem\",\n\t\t\t\tfield:       \"host\",\n\t\t\t\ttyp:         influxql.Tag,\n\t\t\t},\n\t\t\t{\n\t\t\t\tmeasurement: \"unknown\",\n\t\t\t\tfield:       \"unknown\",\n\t\t\t\ttyp:         influxql.Unknown,\n\t\t\t},\n\t\t\t{\n\t\t\t\tmeasurement: \"_fieldKeys\",\n\t\t\t\tfield:       \"fieldKey\",\n\t\t\t\ttyp:         influxql.String,\n\t\t\t},\n\t\t\t{\n\t\t\t\tmeasurement: \"_fieldKeys\",\n\t\t\t\tfield:       \"fieldType\",\n\t\t\t\ttyp:         influxql.String,\n\t\t\t},\n\t\t\t{\n\t\t\t\tmeasurement: \"_fieldKeys\",\n\t\t\t\tfield:       \"unknown\",\n\t\t\t\ttyp:         influxql.Unknown,\n\t\t\t},\n\t\t\t{\n\t\t\t\tmeasurement: \"_series\",\n\t\t\t\tfield:       \"key\",\n\t\t\t\ttyp:         influxql.String,\n\t\t\t},\n\t\t\t{\n\t\t\t\tmeasurement: \"_series\",\n\t\t\t\tfield:       \"unknown\",\n\t\t\t\ttyp:         influxql.Unknown,\n\t\t\t},\n\t\t\t{\n\t\t\t\tmeasurement: \"_tagKeys\",\n\t\t\t\tfield:       \"tagKey\",\n\t\t\t\ttyp:         influxql.String,\n\t\t\t},\n\t\t\t{\n\t\t\t\tmeasurement: \"_tagKeys\",\n\t\t\t\tfield:       \"unknown\",\n\t\t\t\ttyp:         influxql.Unknown,\n\t\t\t},\n\t\t\t{\n\t\t\t\tmeasurement: \"_reserved\",\n\t\t\t\tfield:       \"value\",\n\t\t\t\ttyp:         influxql.String,\n\t\t\t},\n\t\t\t{\n\t\t\t\tmeasurement: \"_reserved\",\n\t\t\t\tfield:       \"region\",\n\t\t\t\ttyp:         influxql.Tag,\n\t\t\t},\n\t\t} {\n\t\t\tname := fmt.Sprintf(\"%s_%s_%s\", index, tt.measurement, tt.field)\n\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\ttyp, err := sh.mapType(tt.measurement, tt.field)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tif have, want := typ, tt.typ; have != want {\n\t\t\t\t\tt.Errorf(\"unexpected data type: have=%#v want=%#v\", have, want)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t\tsh.Close()\n\t}\n}\n\nfunc TestShard_MeasurementsByRegex(t *testing.T) {\n\tvar sh *TempShard\n\tsetup := func(index string) {\n\t\tsh = NewTempShard(index)\n\t\tif err := sh.Open(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tsh.MustWritePointsString(`\ncpu,host=serverA,region=uswest value=100 0\ncpu,host=serverA,region=uswest value=50,val2=5  10\ncpu,host=serverB,region=uswest value=25  0\nmem,host=serverA value=25i 0\nmem,host=serverB value=50i,val3=t 10\n`)\n\t}\n\n\tfor _, index := range RegisteredIndexes() {\n\t\tsetup(index)\n\t\tfor _, tt := range []struct {\n\t\t\tregex        string\n\t\t\tmeasurements []string\n\t\t}{\n\t\t\t{regex: `cpu`, measurements: []string{\"cpu\"}},\n\t\t\t{regex: `mem`, measurements: []string{\"mem\"}},\n\t\t\t{regex: `cpu|mem`, measurements: []string{\"cpu\", \"mem\"}},\n\t\t\t{regex: `gpu`, measurements: []string{}},\n\t\t\t{regex: `pu`, measurements: []string{\"cpu\"}},\n\t\t\t{regex: `p|m`, measurements: []string{\"cpu\", \"mem\"}},\n\t\t} {\n\t\t\tt.Run(index+\"_\"+tt.regex, func(t *testing.T) {\n\t\t\t\tre := regexp.MustCompile(tt.regex)\n\t\t\t\tmeasurements, err := sh.MeasurementNamesByRegex(re)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tmstrings := make([]string, 0, len(measurements))\n\t\t\t\tfor _, name := range measurements {\n\t\t\t\t\tmstrings = append(mstrings, string(name))\n\t\t\t\t}\n\t\t\t\tsort.Strings(mstrings)\n\t\t\t\tif diff := cmp.Diff(tt.measurements, mstrings, cmpopts.EquateEmpty()); diff != \"\" {\n\t\t\t\t\tt.Errorf(\"unexpected measurements:\\n%s\", diff)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t\tsh.Close()\n\t}\n}\n\n// TempShard represents a test wrapper for Shard that uses temporary\n// filesystem paths.\ntype TempShard struct {\n\t*Shard\n\tpath string\n}\n\n// NewTempShard returns a new instance of TempShard with temp paths.\nfunc NewTempShard(index string) *TempShard {\n\t// Create temporary path for data and WAL.\n\tdir, err := ioutil.TempDir(\"\", \"influxdb-tsdb-\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Build engine options.\n\topt := NewEngineOptions()\n\topt.IndexVersion = index\n\topt.Config.WALDir = filepath.Join(dir, \"wal\")\n\tif index == \"inmem\" {\n\t\topt.InmemIndex, _ = NewInmemIndex(path.Base(dir))\n\t}\n\n\treturn &TempShard{\n\t\tShard: NewShard(0,\n\t\t\tfilepath.Join(dir, \"data\", \"db0\", \"rp0\", \"1\"),\n\t\t\tfilepath.Join(dir, \"wal\", \"db0\", \"rp0\", \"1\"),\n\t\t\topt,\n\t\t),\n\t\tpath: dir,\n\t}\n}\n\n// Close closes the shard and removes all underlying data.\nfunc (sh *TempShard) Close() error {\n\tdefer os.RemoveAll(sh.path)\n\treturn sh.Shard.Close()\n}\n\n// MustWritePointsString parses the line protocol (with second precision) and\n// inserts the resulting points into the shard. Panic on error.\nfunc (sh *TempShard) MustWritePointsString(s string) {\n\ta, err := models.ParsePointsWithPrecision([]byte(strings.TrimSpace(s)), time.Time{}, \"s\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif err := sh.WritePoints(a); err != nil {\n\t\tpanic(err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/shard_test.go",
    "content": "package tsdb_test\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/pkg/deep\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n\t_ \"github.com/influxdata/influxdb/tsdb/engine\"\n\t_ \"github.com/influxdata/influxdb/tsdb/index\"\n\t\"github.com/influxdata/influxdb/tsdb/index/inmem\"\n)\n\nfunc TestShardWriteAndIndex(t *testing.T) {\n\ttmpDir, _ := ioutil.TempDir(\"\", \"shard_test\")\n\tdefer os.RemoveAll(tmpDir)\n\ttmpShard := path.Join(tmpDir, \"shard\")\n\ttmpWal := path.Join(tmpDir, \"wal\")\n\n\topts := tsdb.NewEngineOptions()\n\topts.Config.WALDir = filepath.Join(tmpDir, \"wal\")\n\topts.InmemIndex = inmem.NewIndex(path.Base(tmpDir))\n\n\tsh := tsdb.NewShard(1, tmpShard, tmpWal, opts)\n\n\t// Calling WritePoints when the engine is not open will return\n\t// ErrEngineClosed.\n\tif got, exp := sh.WritePoints(nil), tsdb.ErrEngineClosed; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\tif err := sh.Open(); err != nil {\n\t\tt.Fatalf(\"error opening shard: %s\", err.Error())\n\t}\n\n\tpt := models.MustNewPoint(\n\t\t\"cpu\",\n\t\tmodels.Tags{{Key: []byte(\"host\"), Value: []byte(\"server\")}},\n\t\tmap[string]interface{}{\"value\": 1.0},\n\t\ttime.Unix(1, 2),\n\t)\n\n\terr := sh.WritePoints([]models.Point{pt})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\tpt.SetTime(time.Unix(2, 3))\n\terr = sh.WritePoints([]models.Point{pt})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\tvalidateIndex := func() {\n\t\tcnt := sh.SeriesN()\n\t\tif got, exp := cnt, int64(1); got != exp {\n\t\t\tt.Fatalf(\"got %v series, exp %v series in index\", got, exp)\n\t\t}\n\t}\n\n\tvalidateIndex()\n\n\t// ensure the index gets loaded after closing and opening the shard\n\tsh.Close()\n\n\tsh = tsdb.NewShard(1, tmpShard, tmpWal, opts)\n\tif err := sh.Open(); err != nil {\n\t\tt.Fatalf(\"error opening shard: %s\", err.Error())\n\t}\n\n\tvalidateIndex()\n\n\t// and ensure that we can still write data\n\tpt.SetTime(time.Unix(2, 6))\n\terr = sh.WritePoints([]models.Point{pt})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n}\n\nfunc TestMaxSeriesLimit(t *testing.T) {\n\ttmpDir, _ := ioutil.TempDir(\"\", \"shard_test\")\n\tdefer os.RemoveAll(tmpDir)\n\ttmpShard := path.Join(tmpDir, \"db\", \"rp\", \"1\")\n\ttmpWal := path.Join(tmpDir, \"wal\")\n\n\topts := tsdb.NewEngineOptions()\n\topts.Config.WALDir = filepath.Join(tmpDir, \"wal\")\n\topts.Config.MaxSeriesPerDatabase = 1000\n\topts.InmemIndex = inmem.NewIndex(path.Base(tmpDir))\n\n\tsh := tsdb.NewShard(1, tmpShard, tmpWal, opts)\n\n\tif err := sh.Open(); err != nil {\n\t\tt.Fatalf(\"error opening shard: %s\", err.Error())\n\t}\n\n\t// Writing 1K series should succeed.\n\tpoints := []models.Point{}\n\n\tfor i := 0; i < 1000; i++ {\n\t\tpt := models.MustNewPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.Tags{{Key: []byte(\"host\"), Value: []byte(fmt.Sprintf(\"server%d\", i))}},\n\t\t\tmap[string]interface{}{\"value\": 1.0},\n\t\t\ttime.Unix(1, 2),\n\t\t)\n\t\tpoints = append(points, pt)\n\t}\n\n\terr := sh.WritePoints(points)\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\t// Writing one more series should exceed the series limit.\n\tpt := models.MustNewPoint(\n\t\t\"cpu\",\n\t\tmodels.Tags{{Key: []byte(\"host\"), Value: []byte(\"server9999\")}},\n\t\tmap[string]interface{}{\"value\": 1.0},\n\t\ttime.Unix(1, 2),\n\t)\n\n\terr = sh.WritePoints([]models.Point{pt})\n\tif err == nil {\n\t\tt.Fatal(\"expected error\")\n\t} else if exp, got := `partial write: max-series-per-database limit exceeded: (1000) dropped=1`, err.Error(); exp != got {\n\t\tt.Fatalf(\"unexpected error message:\\n\\texp = %s\\n\\tgot = %s\", exp, got)\n\t}\n\n\tsh.Close()\n}\n\nfunc TestShard_MaxTagValuesLimit(t *testing.T) {\n\ttmpDir, _ := ioutil.TempDir(\"\", \"shard_test\")\n\tdefer os.RemoveAll(tmpDir)\n\ttmpShard := path.Join(tmpDir, \"db\", \"rp\", \"1\")\n\ttmpWal := path.Join(tmpDir, \"wal\")\n\n\topts := tsdb.NewEngineOptions()\n\topts.Config.WALDir = filepath.Join(tmpDir, \"wal\")\n\topts.Config.MaxValuesPerTag = 1000\n\topts.InmemIndex = inmem.NewIndex(path.Base(tmpDir))\n\n\tsh := tsdb.NewShard(1, tmpShard, tmpWal, opts)\n\n\tif err := sh.Open(); err != nil {\n\t\tt.Fatalf(\"error opening shard: %s\", err.Error())\n\t}\n\n\t// Writing 1K series should succeed.\n\tpoints := []models.Point{}\n\n\tfor i := 0; i < 1000; i++ {\n\t\tpt := models.MustNewPoint(\n\t\t\t\"cpu\",\n\t\t\tmodels.Tags{{Key: []byte(\"host\"), Value: []byte(fmt.Sprintf(\"server%d\", i))}},\n\t\t\tmap[string]interface{}{\"value\": 1.0},\n\t\t\ttime.Unix(1, 2),\n\t\t)\n\t\tpoints = append(points, pt)\n\t}\n\n\terr := sh.WritePoints(points)\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\t// Writing one more series should exceed the series limit.\n\tpt := models.MustNewPoint(\n\t\t\"cpu\",\n\t\tmodels.Tags{{Key: []byte(\"host\"), Value: []byte(\"server9999\")}},\n\t\tmap[string]interface{}{\"value\": 1.0},\n\t\ttime.Unix(1, 2),\n\t)\n\n\terr = sh.WritePoints([]models.Point{pt})\n\tif err == nil {\n\t\tt.Fatal(\"expected error\")\n\t} else if exp, got := `partial write: max-values-per-tag limit exceeded (1000/1000): measurement=\"cpu\" tag=\"host\" value=\"server9999\" dropped=1`, err.Error(); exp != got {\n\t\tt.Fatalf(\"unexpected error message:\\n\\texp = %s\\n\\tgot = %s\", exp, got)\n\t}\n\n\tsh.Close()\n}\n\nfunc TestWriteTimeTag(t *testing.T) {\n\ttmpDir, _ := ioutil.TempDir(\"\", \"shard_test\")\n\tdefer os.RemoveAll(tmpDir)\n\ttmpShard := path.Join(tmpDir, \"shard\")\n\ttmpWal := path.Join(tmpDir, \"wal\")\n\n\topts := tsdb.NewEngineOptions()\n\topts.Config.WALDir = filepath.Join(tmpDir, \"wal\")\n\topts.InmemIndex = inmem.NewIndex(path.Base(tmpDir))\n\n\tsh := tsdb.NewShard(1, tmpShard, tmpWal, opts)\n\tif err := sh.Open(); err != nil {\n\t\tt.Fatalf(\"error opening shard: %s\", err.Error())\n\t}\n\tdefer sh.Close()\n\n\tpt := models.MustNewPoint(\n\t\t\"cpu\",\n\t\tmodels.NewTags(map[string]string{}),\n\t\tmap[string]interface{}{\"time\": 1.0},\n\t\ttime.Unix(1, 2),\n\t)\n\n\tif err := sh.WritePoints([]models.Point{pt}); err == nil {\n\t\tt.Fatal(\"expected error: got nil\")\n\t}\n\n\tpt = models.MustNewPoint(\n\t\t\"cpu\",\n\t\tmodels.NewTags(map[string]string{}),\n\t\tmap[string]interface{}{\"value\": 1.0, \"time\": 1.0},\n\t\ttime.Unix(1, 2),\n\t)\n\n\tif err := sh.WritePoints([]models.Point{pt}); err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tmf := sh.MeasurementFields([]byte(\"cpu\"))\n\tif mf == nil {\n\t\tt.Fatal(\"expected cpu measurement fields\")\n\t}\n\n\tif got, exp := mf.FieldN(), 1; got != exp {\n\t\tt.Fatalf(\"invalid number of field names: got=%v exp=%v\", got, exp)\n\t}\n}\n\nfunc TestWriteTimeField(t *testing.T) {\n\ttmpDir, _ := ioutil.TempDir(\"\", \"shard_test\")\n\tdefer os.RemoveAll(tmpDir)\n\ttmpShard := path.Join(tmpDir, \"shard\")\n\ttmpWal := path.Join(tmpDir, \"wal\")\n\n\topts := tsdb.NewEngineOptions()\n\topts.Config.WALDir = filepath.Join(tmpDir, \"wal\")\n\topts.InmemIndex = inmem.NewIndex(path.Base(tmpDir))\n\n\tsh := tsdb.NewShard(1, tmpShard, tmpWal, opts)\n\tif err := sh.Open(); err != nil {\n\t\tt.Fatalf(\"error opening shard: %s\", err.Error())\n\t}\n\tdefer sh.Close()\n\n\tpt := models.MustNewPoint(\n\t\t\"cpu\",\n\t\tmodels.NewTags(map[string]string{\"time\": \"now\"}),\n\t\tmap[string]interface{}{\"value\": 1.0},\n\t\ttime.Unix(1, 2),\n\t)\n\n\tif err := sh.WritePoints([]models.Point{pt}); err == nil {\n\t\tt.Fatal(\"expected error: got nil\")\n\t}\n\n\tkey := models.MakeKey([]byte(\"cpu\"), nil)\n\tif ok, err := sh.MeasurementExists(key); ok && err == nil {\n\t\tt.Fatal(\"unexpected series\")\n\t}\n}\n\nfunc TestShardWriteAddNewField(t *testing.T) {\n\ttmpDir, _ := ioutil.TempDir(\"\", \"shard_test\")\n\tdefer os.RemoveAll(tmpDir)\n\ttmpShard := path.Join(tmpDir, \"shard\")\n\ttmpWal := path.Join(tmpDir, \"wal\")\n\n\topts := tsdb.NewEngineOptions()\n\topts.Config.WALDir = filepath.Join(tmpDir, \"wal\")\n\topts.InmemIndex = inmem.NewIndex(path.Base(tmpDir))\n\n\tsh := tsdb.NewShard(1, tmpShard, tmpWal, opts)\n\tif err := sh.Open(); err != nil {\n\t\tt.Fatalf(\"error opening shard: %s\", err.Error())\n\t}\n\tdefer sh.Close()\n\n\tpt := models.MustNewPoint(\n\t\t\"cpu\",\n\t\tmodels.NewTags(map[string]string{\"host\": \"server\"}),\n\t\tmap[string]interface{}{\"value\": 1.0},\n\t\ttime.Unix(1, 2),\n\t)\n\n\terr := sh.WritePoints([]models.Point{pt})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\tpt = models.MustNewPoint(\n\t\t\"cpu\",\n\t\tmodels.NewTags(map[string]string{\"host\": \"server\"}),\n\t\tmap[string]interface{}{\"value\": 1.0, \"value2\": 2.0},\n\t\ttime.Unix(1, 2),\n\t)\n\n\terr = sh.WritePoints([]models.Point{pt})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\tif got, exp := sh.SeriesN(), int64(1); got != exp {\n\t\tt.Fatalf(\"got %d series, exp %d series in index\", got, exp)\n\t}\n}\n\n// Tests concurrently writing to the same shard with different field types which\n// can trigger a panic when the shard is snapshotted to TSM files.\nfunc TestShard_WritePoints_FieldConflictConcurrent(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip()\n\t}\n\ttmpDir, _ := ioutil.TempDir(\"\", \"shard_test\")\n\tdefer os.RemoveAll(tmpDir)\n\ttmpShard := path.Join(tmpDir, \"shard\")\n\ttmpWal := path.Join(tmpDir, \"wal\")\n\n\topts := tsdb.NewEngineOptions()\n\topts.Config.WALDir = filepath.Join(tmpDir, \"wal\")\n\topts.InmemIndex = inmem.NewIndex(path.Base(tmpDir))\n\n\tsh := tsdb.NewShard(1, tmpShard, tmpWal, opts)\n\tif err := sh.Open(); err != nil {\n\t\tt.Fatalf(\"error opening shard: %s\", err.Error())\n\t}\n\tdefer sh.Close()\n\n\tpoints := make([]models.Point, 0, 1000)\n\tfor i := 0; i < cap(points); i++ {\n\t\tif i < 500 {\n\t\t\tpoints = append(points, models.MustNewPoint(\n\t\t\t\t\"cpu\",\n\t\t\t\tmodels.NewTags(map[string]string{\"host\": \"server\"}),\n\t\t\t\tmap[string]interface{}{\"value\": 1.0},\n\t\t\t\ttime.Unix(int64(i), 0),\n\t\t\t))\n\t\t} else {\n\t\t\tpoints = append(points, models.MustNewPoint(\n\t\t\t\t\"cpu\",\n\t\t\t\tmodels.NewTags(map[string]string{\"host\": \"server\"}),\n\t\t\t\tmap[string]interface{}{\"value\": int64(1)},\n\t\t\t\ttime.Unix(int64(i), 0),\n\t\t\t))\n\t\t}\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\terrC := make(chan error)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor i := 0; i < 50; i++ {\n\t\t\tif err := sh.DeleteMeasurement([]byte(\"cpu\")); err != nil {\n\t\t\t\terrC <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t_ = sh.WritePoints(points[:500])\n\t\t\tif f, err := sh.CreateSnapshot(); err == nil {\n\t\t\t\tos.RemoveAll(f)\n\t\t\t}\n\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor i := 0; i < 50; i++ {\n\t\t\tif err := sh.DeleteMeasurement([]byte(\"cpu\")); err != nil {\n\t\t\t\terrC <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t_ = sh.WritePoints(points[500:])\n\t\t\tif f, err := sh.CreateSnapshot(); err == nil {\n\t\t\t\tos.RemoveAll(f)\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(errC)\n\t}()\n\n\tfor err := range errC {\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n}\n\nfunc TestShard_WritePoints_FieldConflictConcurrentQuery(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip()\n\t}\n\ttmpDir, _ := ioutil.TempDir(\"\", \"shard_test\")\n\tdefer os.RemoveAll(tmpDir)\n\ttmpShard := path.Join(tmpDir, \"shard\")\n\ttmpWal := path.Join(tmpDir, \"wal\")\n\n\topts := tsdb.NewEngineOptions()\n\topts.Config.WALDir = filepath.Join(tmpDir, \"wal\")\n\topts.InmemIndex = inmem.NewIndex(path.Base(tmpDir))\n\n\tsh := tsdb.NewShard(1, tmpShard, tmpWal, opts)\n\tif err := sh.Open(); err != nil {\n\t\tt.Fatalf(\"error opening shard: %s\", err.Error())\n\t}\n\tdefer sh.Close()\n\n\t// Spin up two goroutines that write points with different field types in reverse\n\t// order concurrently.  After writing them, query them back.\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\t// Write 250 floats and then ints to the same field\n\t\tpoints := make([]models.Point, 0, 500)\n\t\tfor i := 0; i < cap(points); i++ {\n\t\t\tif i < 250 {\n\t\t\t\tpoints = append(points, models.MustNewPoint(\n\t\t\t\t\t\"cpu\",\n\t\t\t\t\tmodels.NewTags(map[string]string{\"host\": \"server\"}),\n\t\t\t\t\tmap[string]interface{}{\"value\": 1.0},\n\t\t\t\t\ttime.Unix(int64(i), 0),\n\t\t\t\t))\n\t\t\t} else {\n\t\t\t\tpoints = append(points, models.MustNewPoint(\n\t\t\t\t\t\"cpu\",\n\t\t\t\t\tmodels.NewTags(map[string]string{\"host\": \"server\"}),\n\t\t\t\t\tmap[string]interface{}{\"value\": int64(1)},\n\t\t\t\t\ttime.Unix(int64(i), 0),\n\t\t\t\t))\n\t\t\t}\n\t\t}\n\n\t\tfor i := 0; i < 500; i++ {\n\t\t\tif err := sh.DeleteMeasurement([]byte(\"cpu\")); err != nil {\n\t\t\t\tt.Fatalf(err.Error())\n\t\t\t}\n\n\t\t\tsh.WritePoints(points)\n\n\t\t\titer, err := sh.CreateIterator(\"cpu\", influxql.IteratorOptions{\n\t\t\t\tExpr:       influxql.MustParseExpr(`value`),\n\t\t\t\tAux:        []influxql.VarRef{{Val: \"value\"}},\n\t\t\t\tDimensions: []string{},\n\t\t\t\tAscending:  true,\n\t\t\t\tStartTime:  influxql.MinTime,\n\t\t\t\tEndTime:    influxql.MaxTime,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(err.Error())\n\t\t\t}\n\n\t\t\tswitch itr := iter.(type) {\n\t\t\tcase influxql.IntegerIterator:\n\t\t\t\tp, err := itr.Next()\n\t\t\t\tfor p != nil && err == nil {\n\t\t\t\t\tp, err = itr.Next()\n\t\t\t\t}\n\t\t\t\titer.Close()\n\n\t\t\tcase influxql.FloatIterator:\n\t\t\t\tp, err := itr.Next()\n\t\t\t\tfor p != nil && err == nil {\n\t\t\t\t\tp, err = itr.Next()\n\t\t\t\t}\n\t\t\t\titer.Close()\n\n\t\t\t}\n\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\t// Write 250 ints and then floats to the same field\n\t\tpoints := make([]models.Point, 0, 500)\n\t\tfor i := 0; i < cap(points); i++ {\n\t\t\tif i < 250 {\n\t\t\t\tpoints = append(points, models.MustNewPoint(\n\t\t\t\t\t\"cpu\",\n\t\t\t\t\tmodels.NewTags(map[string]string{\"host\": \"server\"}),\n\t\t\t\t\tmap[string]interface{}{\"value\": int64(1)},\n\t\t\t\t\ttime.Unix(int64(i), 0),\n\t\t\t\t))\n\t\t\t} else {\n\t\t\t\tpoints = append(points, models.MustNewPoint(\n\t\t\t\t\t\"cpu\",\n\t\t\t\t\tmodels.NewTags(map[string]string{\"host\": \"server\"}),\n\t\t\t\t\tmap[string]interface{}{\"value\": 1.0},\n\t\t\t\t\ttime.Unix(int64(i), 0),\n\t\t\t\t))\n\t\t\t}\n\t\t}\n\t\tfor i := 0; i < 500; i++ {\n\t\t\tif err := sh.DeleteMeasurement([]byte(\"cpu\")); err != nil {\n\t\t\t\tt.Fatalf(err.Error())\n\t\t\t}\n\n\t\t\tsh.WritePoints(points)\n\n\t\t\titer, err := sh.CreateIterator(\"cpu\", influxql.IteratorOptions{\n\t\t\t\tExpr:       influxql.MustParseExpr(`value`),\n\t\t\t\tAux:        []influxql.VarRef{{Val: \"value\"}},\n\t\t\t\tDimensions: []string{},\n\t\t\t\tAscending:  true,\n\t\t\t\tStartTime:  influxql.MinTime,\n\t\t\t\tEndTime:    influxql.MaxTime,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(err.Error())\n\t\t\t}\n\n\t\t\tswitch itr := iter.(type) {\n\t\t\tcase influxql.IntegerIterator:\n\t\t\t\tp, err := itr.Next()\n\t\t\t\tfor p != nil && err == nil {\n\t\t\t\t\tp, err = itr.Next()\n\t\t\t\t}\n\t\t\t\titer.Close()\n\t\t\tcase influxql.FloatIterator:\n\t\t\t\tp, err := itr.Next()\n\t\t\t\tfor p != nil && err == nil {\n\t\t\t\t\tp, err = itr.Next()\n\t\t\t\t}\n\t\t\t\titer.Close()\n\t\t\t}\n\t\t}\n\t}()\n\n\twg.Wait()\n}\n\n// Ensures that when a shard is closed, it removes any series meta-data\n// from the index.\nfunc TestShard_Close_RemoveIndex(t *testing.T) {\n\ttmpDir, _ := ioutil.TempDir(\"\", \"shard_test\")\n\tdefer os.RemoveAll(tmpDir)\n\ttmpShard := path.Join(tmpDir, \"shard\")\n\ttmpWal := path.Join(tmpDir, \"wal\")\n\n\topts := tsdb.NewEngineOptions()\n\topts.Config.WALDir = filepath.Join(tmpDir, \"wal\")\n\topts.InmemIndex = inmem.NewIndex(path.Base(tmpDir))\n\n\tsh := tsdb.NewShard(1, tmpShard, tmpWal, opts)\n\tif err := sh.Open(); err != nil {\n\t\tt.Fatalf(\"error opening shard: %s\", err.Error())\n\t}\n\n\tpt := models.MustNewPoint(\n\t\t\"cpu\",\n\t\tmodels.NewTags(map[string]string{\"host\": \"server\"}),\n\t\tmap[string]interface{}{\"value\": 1.0},\n\t\ttime.Unix(1, 2),\n\t)\n\n\terr := sh.WritePoints([]models.Point{pt})\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\tif got, exp := sh.SeriesN(), int64(1); got != exp {\n\t\tt.Fatalf(\"got %d series, exp %d series in index\", got, exp)\n\t}\n\n\t// ensure the index gets loaded after closing and opening the shard\n\tsh.Close()\n\tsh.Open()\n\n\tif got, exp := sh.SeriesN(), int64(1); got != exp {\n\t\tt.Fatalf(\"got %d series, exp %d series in index\", got, exp)\n\t}\n}\n\n// Ensure a shard can create iterators for its underlying data.\nfunc TestShard_CreateIterator_Ascending(t *testing.T) {\n\tsh := NewShard()\n\n\t// Calling CreateIterator when the engine is not open will return\n\t// ErrEngineClosed.\n\t_, got := sh.CreateIterator(\"cpu\", influxql.IteratorOptions{})\n\tif exp := tsdb.ErrEngineClosed; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\tif err := sh.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer sh.Close()\n\n\tsh.MustWritePointsString(`\ncpu,host=serverA,region=uswest value=100 0\ncpu,host=serverA,region=uswest value=50,val2=5  10\ncpu,host=serverB,region=uswest value=25  0\n`)\n\n\t// Create iterator.\n\titr, err := sh.CreateIterator(\"cpu\", influxql.IteratorOptions{\n\t\tExpr:       influxql.MustParseExpr(`value`),\n\t\tAux:        []influxql.VarRef{{Val: \"val2\"}},\n\t\tDimensions: []string{\"host\"},\n\t\tAscending:  true,\n\t\tStartTime:  influxql.MinTime,\n\t\tEndTime:    influxql.MaxTime,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfitr := itr.(influxql.FloatIterator)\n\tdefer itr.Close()\n\n\t// Read values from iterator.\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(0): %s\", err)\n\t} else if !deep.Equal(p, &influxql.FloatPoint{\n\t\tName:  \"cpu\",\n\t\tTags:  influxql.NewTags(map[string]string{\"host\": \"serverA\"}),\n\t\tTime:  time.Unix(0, 0).UnixNano(),\n\t\tValue: 100,\n\t\tAux:   []interface{}{(*float64)(nil)},\n\t}) {\n\t\tt.Fatalf(\"unexpected point(0): %s\", spew.Sdump(p))\n\t}\n\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(1): %s\", err)\n\t} else if !deep.Equal(p, &influxql.FloatPoint{\n\t\tName:  \"cpu\",\n\t\tTags:  influxql.NewTags(map[string]string{\"host\": \"serverA\"}),\n\t\tTime:  time.Unix(10, 0).UnixNano(),\n\t\tValue: 50,\n\t\tAux:   []interface{}{float64(5)},\n\t}) {\n\t\tt.Fatalf(\"unexpected point(1): %s\", spew.Sdump(p))\n\t}\n\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(2): %s\", err)\n\t} else if !deep.Equal(p, &influxql.FloatPoint{\n\t\tName:  \"cpu\",\n\t\tTags:  influxql.NewTags(map[string]string{\"host\": \"serverB\"}),\n\t\tTime:  time.Unix(0, 0).UnixNano(),\n\t\tValue: 25,\n\t\tAux:   []interface{}{(*float64)(nil)},\n\t}) {\n\t\tt.Fatalf(\"unexpected point(2): %s\", spew.Sdump(p))\n\t}\n}\n\n// Ensure a shard can create iterators for its underlying data.\nfunc TestShard_CreateIterator_Descending(t *testing.T) {\n\tsh := NewShard()\n\n\t// Calling CreateIterator when the engine is not open will return\n\t// ErrEngineClosed.\n\t_, got := sh.CreateIterator(\"cpu\", influxql.IteratorOptions{})\n\tif exp := tsdb.ErrEngineClosed; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\tif err := sh.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer sh.Close()\n\n\tsh.MustWritePointsString(`\ncpu,host=serverA,region=uswest value=100 0\ncpu,host=serverA,region=uswest value=50,val2=5  10\ncpu,host=serverB,region=uswest value=25  0\n`)\n\n\t// Create iterator.\n\titr, err := sh.CreateIterator(\"cpu\", influxql.IteratorOptions{\n\t\tExpr:       influxql.MustParseExpr(`value`),\n\t\tAux:        []influxql.VarRef{{Val: \"val2\"}},\n\t\tDimensions: []string{\"host\"},\n\t\tAscending:  false,\n\t\tStartTime:  influxql.MinTime,\n\t\tEndTime:    influxql.MaxTime,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer itr.Close()\n\tfitr := itr.(influxql.FloatIterator)\n\n\t// Read values from iterator.\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(0): %s\", err)\n\t} else if !deep.Equal(p, &influxql.FloatPoint{\n\t\tName:  \"cpu\",\n\t\tTags:  influxql.NewTags(map[string]string{\"host\": \"serverB\"}),\n\t\tTime:  time.Unix(0, 0).UnixNano(),\n\t\tValue: 25,\n\t\tAux:   []interface{}{(*float64)(nil)},\n\t}) {\n\t\tt.Fatalf(\"unexpected point(0): %s\", spew.Sdump(p))\n\t}\n\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(1): %s\", err)\n\t} else if !deep.Equal(p, &influxql.FloatPoint{\n\t\tName:  \"cpu\",\n\t\tTags:  influxql.NewTags(map[string]string{\"host\": \"serverA\"}),\n\t\tTime:  time.Unix(10, 0).UnixNano(),\n\t\tValue: 50,\n\t\tAux:   []interface{}{float64(5)},\n\t}) {\n\t\tt.Fatalf(\"unexpected point(1): %s\", spew.Sdump(p))\n\t}\n\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(2): %s\", err)\n\t} else if !deep.Equal(p, &influxql.FloatPoint{\n\t\tName:  \"cpu\",\n\t\tTags:  influxql.NewTags(map[string]string{\"host\": \"serverA\"}),\n\t\tTime:  time.Unix(0, 0).UnixNano(),\n\t\tValue: 100,\n\t\tAux:   []interface{}{(*float64)(nil)},\n\t}) {\n\t\tt.Fatalf(\"unexpected point(2): %s\", spew.Sdump(p))\n\t}\n}\n\nfunc TestShard_Disabled_WriteQuery(t *testing.T) {\n\tsh := NewShard()\n\tif err := sh.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer sh.Close()\n\n\tsh.SetEnabled(false)\n\n\tpt := models.MustNewPoint(\n\t\t\"cpu\",\n\t\tmodels.NewTags(map[string]string{\"host\": \"server\"}),\n\t\tmap[string]interface{}{\"value\": 1.0},\n\t\ttime.Unix(1, 2),\n\t)\n\n\terr := sh.WritePoints([]models.Point{pt})\n\tif err == nil {\n\t\tt.Fatalf(\"expected shard disabled error\")\n\t}\n\tif err != tsdb.ErrShardDisabled {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\t_, got := sh.CreateIterator(\"cpu\", influxql.IteratorOptions{})\n\tif err == nil {\n\t\tt.Fatalf(\"expected shard disabled error\")\n\t}\n\tif exp := tsdb.ErrShardDisabled; got != exp {\n\t\tt.Fatalf(\"got %v, expected %v\", got, exp)\n\t}\n\n\tsh.SetEnabled(true)\n\n\terr = sh.WritePoints([]models.Point{pt})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tif _, err = sh.CreateIterator(\"cpu\", influxql.IteratorOptions{}); err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", got)\n\t}\n}\n\nfunc TestShard_Closed_Functions(t *testing.T) {\n\tsh := NewShard()\n\tif err := sh.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer sh.Close()\n\n\tpt := models.MustNewPoint(\n\t\t\"cpu\",\n\t\tmodels.NewTags(map[string]string{\"host\": \"server\"}),\n\t\tmap[string]interface{}{\"value\": 1.0},\n\t\ttime.Unix(1, 2),\n\t)\n\n\tif err := sh.WritePoints([]models.Point{pt}); err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tsh.Close()\n\n\t// Should not panic, but returns an error when shard is closed\n\tif err := sh.ForEachMeasurementTagKey([]byte(\"cpu\"), func(k []byte) error {\n\t\treturn nil\n\t}); err == nil {\n\t\tt.Fatal(\"expected error: got nil\")\n\t}\n\n\t// Should not panic.\n\tif exp, got := 0, sh.TagKeyCardinality([]byte(\"cpu\"), []byte(\"host\")); exp != got {\n\t\tt.Fatalf(\"got %d, expected %d\", got, exp)\n\t}\n}\n\nfunc TestShard_FieldDimensions(t *testing.T) {\n\tsh := NewShard()\n\n\tif err := sh.Open(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer sh.Close()\n\n\tsh.MustWritePointsString(`\ncpu,host=serverA,region=uswest value=100 0\ncpu,host=serverA,region=uswest value=50,val2=5  10\ncpu,host=serverB,region=uswest value=25  0\nmem,host=serverA value=25i 0\nmem,host=serverB value=50i,val3=t 10\n`)\n\n\tfor i, tt := range []struct {\n\t\tsources []string\n\t\tf       map[string]influxql.DataType\n\t\td       map[string]struct{}\n\t}{\n\t\t{\n\t\t\tsources: []string{\"cpu\"},\n\t\t\tf: map[string]influxql.DataType{\n\t\t\t\t\"value\": influxql.Float,\n\t\t\t\t\"val2\":  influxql.Float,\n\t\t\t},\n\t\t\td: map[string]struct{}{\n\t\t\t\t\"host\":   struct{}{},\n\t\t\t\t\"region\": struct{}{},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tsources: []string{\"cpu\", \"mem\"},\n\t\t\tf: map[string]influxql.DataType{\n\t\t\t\t\"value\": influxql.Float,\n\t\t\t\t\"val2\":  influxql.Float,\n\t\t\t\t\"val3\":  influxql.Boolean,\n\t\t\t},\n\t\t\td: map[string]struct{}{\n\t\t\t\t\"host\":   struct{}{},\n\t\t\t\t\"region\": struct{}{},\n\t\t\t},\n\t\t},\n\t} {\n\t\tf, d, err := sh.FieldDimensions(tt.sources)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\n\t\tif !reflect.DeepEqual(f, tt.f) {\n\t\t\tt.Errorf(\"%d. unexpected fields:\\n\\nexp=%#v\\n\\ngot=%#v\\n\\n\", i, tt.f, f)\n\t\t}\n\t\tif !reflect.DeepEqual(d, tt.d) {\n\t\t\tt.Errorf(\"%d. unexpected dimensions:\\n\\nexp=%#v\\n\\ngot=%#v\\n\\n\", i, tt.d, d)\n\t\t}\n\t}\n}\n\nfunc BenchmarkWritePoints_NewSeries_1K(b *testing.B)   { benchmarkWritePoints(b, 38, 3, 3, 1) }\nfunc BenchmarkWritePoints_NewSeries_100K(b *testing.B) { benchmarkWritePoints(b, 32, 5, 5, 1) }\nfunc BenchmarkWritePoints_NewSeries_250K(b *testing.B) { benchmarkWritePoints(b, 80, 5, 5, 1) }\nfunc BenchmarkWritePoints_NewSeries_500K(b *testing.B) { benchmarkWritePoints(b, 160, 5, 5, 1) }\nfunc BenchmarkWritePoints_NewSeries_1M(b *testing.B)   { benchmarkWritePoints(b, 320, 5, 5, 1) }\n\n// Fix measurement and tag key cardinalities and vary tag value cardinality\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_100_TagValues(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1, 100, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_500_TagValues(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1, 500, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_1000_TagValues(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1, 1000, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_5000_TagValues(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1, 5000, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_10000_TagValues(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1, 10000, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_50000_TagValues(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1, 50000, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_100000_TagValues(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1, 100000, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_500000_TagValues(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1, 500000, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_1000000_TagValues(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1, 1000000, 1)\n}\n\n// Fix tag key and tag values cardinalities and vary measurement cardinality\nfunc BenchmarkWritePoints_NewSeries_100_Measurements_1_TagKey_1_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 100, 1, 1, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_500_Measurements_1_TagKey_1_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 500, 1, 1, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1000_Measurement_1_TagKey_1_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 1000, 1, 1, 1)\n}\n\nfunc BenchmarkWritePoints_NewSeries_5000_Measurement_1_TagKey_1_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 5000, 1, 1, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_10000_Measurement_1_TagKey_1_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 10000, 1, 1, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_50000_Measurement_1_TagKey_1_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 50000, 1, 1, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_100000_Measurement_1_TagKey_1_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 100000, 1, 1, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_500000_Measurement_1_TagKey_1_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 500000, 1, 1, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1000000_Measurement_1_TagKey_1_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 1000000, 1, 1, 1)\n}\n\n// Fix measurement and tag values cardinalities and vary tag key cardinality\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_2_TagKeys_1_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1<<1, 1, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurements_4_TagKeys_1_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1<<2, 1, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurements_8_TagKeys_1_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1<<3, 1, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_16_TagKeys_1_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1<<4, 1, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_32_TagKeys_1_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1<<5, 1, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_64_TagKeys_1_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1<<6, 1, 1)\n\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_128_TagKeys_1_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1<<7, 1, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_256_TagKeys_1_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1<<8, 1, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_512_TagKeys_1_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1<<9, 1, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_1024_TagKeys_1_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1<<10, 1, 1)\n}\n\n// Fix series cardinality and vary tag keys and value cardinalities\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_65536_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 1, 1<<16, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_2_TagKeys_256_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 2, 1<<8, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_4_TagKeys_16_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 4, 1<<4, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_8_TagKeys_4_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 8, 1<<2, 1)\n}\nfunc BenchmarkWritePoints_NewSeries_1_Measurement_16_TagKeys_2_TagValue(b *testing.B) {\n\tbenchmarkWritePoints(b, 1, 16, 1<<1, 1)\n}\n\nfunc BenchmarkWritePoints_ExistingSeries_1K(b *testing.B) {\n\tbenchmarkWritePointsExistingSeries(b, 38, 3, 3, 1)\n}\nfunc BenchmarkWritePoints_ExistingSeries_100K(b *testing.B) {\n\tbenchmarkWritePointsExistingSeries(b, 32, 5, 5, 1)\n}\nfunc BenchmarkWritePoints_ExistingSeries_250K(b *testing.B) {\n\tbenchmarkWritePointsExistingSeries(b, 80, 5, 5, 1)\n}\nfunc BenchmarkWritePoints_ExistingSeries_500K(b *testing.B) {\n\tbenchmarkWritePointsExistingSeries(b, 160, 5, 5, 1)\n}\nfunc BenchmarkWritePoints_ExistingSeries_1M(b *testing.B) {\n\tbenchmarkWritePointsExistingSeries(b, 320, 5, 5, 1)\n}\n\n// benchmarkWritePoints benchmarks writing new series to a shard.\n// mCnt - measurement count\n// tkCnt - tag key count\n// tvCnt - tag value count (values per tag)\n// pntCnt - points per series.  # of series = mCnt * (tvCnt ^ tkCnt)\nfunc benchmarkWritePoints(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) {\n\t// Generate test series (measurements + unique tag sets).\n\tseries := genTestSeries(mCnt, tkCnt, tvCnt)\n\t// Generate point data to write to the shard.\n\tpoints := []models.Point{}\n\tfor _, s := range series {\n\t\tfor val := 0.0; val < float64(pntCnt); val++ {\n\t\t\tp := models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{\"value\": val}, time.Now())\n\t\t\tpoints = append(points, p)\n\t\t}\n\t}\n\n\t// Stop & reset timers and mem-stats before the main benchmark loop.\n\tb.StopTimer()\n\tb.ResetTimer()\n\n\t// Run the benchmark loop.\n\tfor n := 0; n < b.N; n++ {\n\t\ttmpDir, _ := ioutil.TempDir(\"\", \"shard_test\")\n\t\ttmpShard := path.Join(tmpDir, \"shard\")\n\t\ttmpWal := path.Join(tmpDir, \"wal\")\n\t\tshard := tsdb.NewShard(1, tmpShard, tmpWal, tsdb.NewEngineOptions())\n\t\tshard.Open()\n\n\t\tb.StartTimer()\n\t\t// Call the function being benchmarked.\n\t\tchunkedWrite(shard, points)\n\n\t\tb.StopTimer()\n\t\tshard.Close()\n\t\tos.RemoveAll(tmpDir)\n\t}\n}\n\n// benchmarkWritePointsExistingSeries benchmarks writing to existing series in a shard.\n// mCnt - measurement count\n// tkCnt - tag key count\n// tvCnt - tag value count (values per tag)\n// pntCnt - points per series.  # of series = mCnt * (tvCnt ^ tkCnt)\nfunc benchmarkWritePointsExistingSeries(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) {\n\t// Generate test series (measurements + unique tag sets).\n\tseries := genTestSeries(mCnt, tkCnt, tvCnt)\n\t// Generate point data to write to the shard.\n\tpoints := []models.Point{}\n\tfor _, s := range series {\n\t\tfor val := 0.0; val < float64(pntCnt); val++ {\n\t\t\tp := models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{\"value\": val}, time.Now())\n\t\t\tpoints = append(points, p)\n\t\t}\n\t}\n\n\ttmpDir, _ := ioutil.TempDir(\"\", \"\")\n\tdefer os.RemoveAll(tmpDir)\n\ttmpShard := path.Join(tmpDir, \"shard\")\n\ttmpWal := path.Join(tmpDir, \"wal\")\n\tshard := tsdb.NewShard(1, tmpShard, tmpWal, tsdb.NewEngineOptions())\n\tshard.Open()\n\tdefer shard.Close()\n\tchunkedWrite(shard, points)\n\n\t// Reset timers and mem-stats before the main benchmark loop.\n\tb.ResetTimer()\n\n\t// Run the benchmark loop.\n\tfor n := 0; n < b.N; n++ {\n\t\tb.StopTimer()\n\t\tfor _, p := range points {\n\t\t\tp.SetTime(p.Time().Add(time.Second))\n\t\t}\n\n\t\tb.StartTimer()\n\t\t// Call the function being benchmarked.\n\t\tchunkedWrite(shard, points)\n\t}\n}\n\nfunc chunkedWrite(shard *tsdb.Shard, points []models.Point) {\n\tnPts := len(points)\n\tchunkSz := 10000\n\tstart := 0\n\tend := chunkSz\n\n\tfor {\n\t\tif end > nPts {\n\t\t\tend = nPts\n\t\t}\n\t\tif end-start == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tshard.WritePoints(points[start:end])\n\t\tstart = end\n\t\tend += chunkSz\n\t}\n}\n\n// Shard represents a test wrapper for tsdb.Shard.\ntype Shard struct {\n\t*tsdb.Shard\n\tpath string\n}\n\n// NewShard returns a new instance of Shard with temp paths.\nfunc NewShard() *Shard {\n\t// Create temporary path for data and WAL.\n\tdir, err := ioutil.TempDir(\"\", \"influxdb-tsdb-\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Build engine options.\n\topt := tsdb.NewEngineOptions()\n\topt.Config.WALDir = filepath.Join(dir, \"wal\")\n\topt.InmemIndex = inmem.NewIndex(path.Base(dir))\n\n\treturn &Shard{\n\t\tShard: tsdb.NewShard(0,\n\t\t\tfilepath.Join(dir, \"data\", \"db0\", \"rp0\", \"1\"),\n\t\t\tfilepath.Join(dir, \"wal\", \"db0\", \"rp0\", \"1\"),\n\t\t\topt,\n\t\t),\n\t\tpath: dir,\n\t}\n}\n\n// Close closes the shard and removes all underlying data.\nfunc (sh *Shard) Close() error {\n\tdefer os.RemoveAll(sh.path)\n\treturn sh.Shard.Close()\n}\n\n// MustWritePointsString parses the line protocol (with second precision) and\n// inserts the resulting points into the shard. Panic on error.\nfunc (sh *Shard) MustWritePointsString(s string) {\n\ta, err := models.ParsePointsWithPrecision([]byte(strings.TrimSpace(s)), time.Time{}, \"s\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif err := sh.WritePoints(a); err != nil {\n\t\tpanic(err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/store.go",
    "content": "package tsdb // import \"github.com/influxdata/influxdb/tsdb\"\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/pkg/bytesutil\"\n\t\"github.com/influxdata/influxdb/pkg/estimator\"\n\t\"github.com/influxdata/influxdb/pkg/limiter\"\n\t\"github.com/uber-go/zap\"\n)\n\nvar (\n\t// ErrShardNotFound is returned when trying to get a non existing shard.\n\tErrShardNotFound = fmt.Errorf(\"shard not found\")\n\t// ErrStoreClosed is returned when trying to use a closed Store.\n\tErrStoreClosed = fmt.Errorf(\"store is closed\")\n)\n\n// Statistics gathered by the store.\nconst (\n\tstatDatabaseSeries       = \"numSeries\"       // number of series in a database\n\tstatDatabaseMeasurements = \"numMeasurements\" // number of measurements in a database\n)\n\n// Store manages shards and indexes for databases.\ntype Store struct {\n\tmu sync.RWMutex\n\t// databases keeps track of the number of databases being managed by the store.\n\tdatabases map[string]struct{}\n\n\tpath string\n\n\t// shared per-database indexes, only if using \"inmem\".\n\tindexes map[string]interface{}\n\n\t// shards is a map of shard IDs to the associated Shard.\n\tshards map[uint64]*Shard\n\n\tEngineOptions EngineOptions\n\n\tbaseLogger zap.Logger\n\tLogger     zap.Logger\n\n\tclosing chan struct{}\n\twg      sync.WaitGroup\n\topened  bool\n}\n\n// NewStore returns a new store with the given path and a default configuration.\n// The returned store must be initialized by calling Open before using it.\nfunc NewStore(path string) *Store {\n\tlogger := zap.New(zap.NullEncoder())\n\treturn &Store{\n\t\tdatabases:     make(map[string]struct{}),\n\t\tpath:          path,\n\t\tindexes:       make(map[string]interface{}),\n\t\tEngineOptions: NewEngineOptions(),\n\t\tLogger:        logger,\n\t\tbaseLogger:    logger,\n\t}\n}\n\n// WithLogger sets the logger for the store.\nfunc (s *Store) WithLogger(log zap.Logger) {\n\ts.baseLogger = log\n\ts.Logger = log.With(zap.String(\"service\", \"store\"))\n\tfor _, sh := range s.shards {\n\t\tsh.WithLogger(s.baseLogger)\n\t}\n}\n\n// Statistics returns statistics for period monitoring.\nfunc (s *Store) Statistics(tags map[string]string) []models.Statistic {\n\ts.mu.RLock()\n\tshards := s.shardsSlice()\n\ts.mu.RUnlock()\n\n\t// Add all the series and measurements cardinality estimations.\n\tdatabases := s.Databases()\n\tstatistics := make([]models.Statistic, 0, len(databases))\n\tfor _, database := range databases {\n\t\tsc, err := s.SeriesCardinality(database)\n\t\tif err != nil {\n\t\t\ts.Logger.Error(\"cannot retrieve series cardinality\", zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\n\t\tmc, err := s.MeasurementsCardinality(database)\n\t\tif err != nil {\n\t\t\ts.Logger.Error(\"cannot retrieve measurement cardinality\", zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\n\t\tstatistics = append(statistics, models.Statistic{\n\t\t\tName: \"database\",\n\t\t\tTags: models.StatisticTags{\"database\": database}.Merge(tags),\n\t\t\tValues: map[string]interface{}{\n\t\t\t\tstatDatabaseSeries:       sc,\n\t\t\t\tstatDatabaseMeasurements: mc,\n\t\t\t},\n\t\t})\n\t}\n\n\t// Gather all statistics for all shards.\n\tfor _, shard := range shards {\n\t\tstatistics = append(statistics, shard.Statistics(tags)...)\n\t}\n\treturn statistics\n}\n\n// Path returns the store's root path.\nfunc (s *Store) Path() string { return s.path }\n\n// Open initializes the store, creating all necessary directories, loading all\n// shards as well as initializing periodic maintenance of them.\nfunc (s *Store) Open() error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\ts.closing = make(chan struct{})\n\ts.shards = map[uint64]*Shard{}\n\n\ts.Logger.Info(fmt.Sprintf(\"Using data dir: %v\", s.Path()))\n\n\t// Create directory.\n\tif err := os.MkdirAll(s.path, 0777); err != nil {\n\t\treturn err\n\t}\n\n\tif err := s.loadShards(); err != nil {\n\t\treturn err\n\t}\n\n\ts.opened = true\n\ts.wg.Add(1)\n\tgo s.monitorShards()\n\n\treturn nil\n}\n\nfunc (s *Store) loadShards() error {\n\t// res holds the result from opening each shard in a goroutine\n\ttype res struct {\n\t\ts   *Shard\n\t\terr error\n\t}\n\n\tt := limiter.NewFixed(runtime.GOMAXPROCS(0))\n\n\t// Setup a shared limiter for compactions\n\tlim := s.EngineOptions.Config.MaxConcurrentCompactions\n\tif lim == 0 {\n\t\tlim = runtime.GOMAXPROCS(0)\n\t}\n\ts.EngineOptions.CompactionLimiter = limiter.NewFixed(lim)\n\n\tresC := make(chan *res)\n\tvar n int\n\n\t// Determine how many shards we need to open by checking the store path.\n\tdbDirs, err := ioutil.ReadDir(s.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, db := range dbDirs {\n\t\tif !db.IsDir() {\n\t\t\ts.Logger.Info(\"Not loading. Not a database directory.\", zap.String(\"name\", db.Name()))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Retrieve database index.\n\t\tidx, err := s.createIndexIfNotExists(db.Name())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Load each retention policy within the database directory.\n\t\trpDirs, err := ioutil.ReadDir(filepath.Join(s.path, db.Name()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, rp := range rpDirs {\n\t\t\tif !rp.IsDir() {\n\t\t\t\ts.Logger.Info(fmt.Sprintf(\"Skipping retention policy dir: %s. Not a directory\", rp.Name()))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tshardDirs, err := ioutil.ReadDir(filepath.Join(s.path, db.Name(), rp.Name()))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfor _, sh := range shardDirs {\n\t\t\t\tn++\n\t\t\t\tgo func(db, rp, sh string) {\n\t\t\t\t\tt.Take()\n\t\t\t\t\tdefer t.Release()\n\n\t\t\t\t\tstart := time.Now()\n\t\t\t\t\tpath := filepath.Join(s.path, db, rp, sh)\n\t\t\t\t\twalPath := filepath.Join(s.EngineOptions.Config.WALDir, db, rp, sh)\n\n\t\t\t\t\t// Shard file names are numeric shardIDs\n\t\t\t\t\tshardID, err := strconv.ParseUint(sh, 10, 64)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tresC <- &res{err: fmt.Errorf(\"%s is not a valid ID. Skipping shard.\", sh)}\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t// Copy options and assign shared index.\n\t\t\t\t\topt := s.EngineOptions\n\t\t\t\t\topt.InmemIndex = idx\n\n\t\t\t\t\t// Existing shards should continue to use inmem index.\n\t\t\t\t\tif _, err := os.Stat(filepath.Join(path, \"index\")); os.IsNotExist(err) {\n\t\t\t\t\t\topt.IndexVersion = \"inmem\"\n\t\t\t\t\t}\n\n\t\t\t\t\t// Open engine.\n\t\t\t\t\tshard := NewShard(shardID, path, walPath, opt)\n\n\t\t\t\t\t// Disable compactions, writes and queries until all shards are loaded\n\t\t\t\t\tshard.EnableOnOpen = false\n\t\t\t\t\tshard.WithLogger(s.baseLogger)\n\n\t\t\t\t\terr = shard.Open()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tresC <- &res{err: fmt.Errorf(\"Failed to open shard: %d: %s\", shardID, err)}\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tresC <- &res{s: shard}\n\t\t\t\t\ts.Logger.Info(fmt.Sprintf(\"%s opened in %s\", path, time.Since(start)))\n\t\t\t\t}(db.Name(), rp.Name(), sh.Name())\n\t\t\t}\n\t\t}\n\t}\n\n\t// Gather results of opening shards concurrently, keeping track of how\n\t// many databases we are managing.\n\tfor i := 0; i < n; i++ {\n\t\tres := <-resC\n\t\tif res.err != nil {\n\t\t\ts.Logger.Info(res.err.Error())\n\t\t\tcontinue\n\t\t}\n\t\ts.shards[res.s.id] = res.s\n\t\ts.databases[res.s.database] = struct{}{}\n\t}\n\tclose(resC)\n\n\t// Enable all shards\n\tfor _, sh := range s.shards {\n\t\tsh.SetEnabled(true)\n\t\tif sh.IsIdle() {\n\t\t\tsh.SetCompactionsEnabled(false)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// Close closes the store and all associated shards. After calling Close accessing\n// shards through the Store will result in ErrStoreClosed being returned.\nfunc (s *Store) Close() error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif s.opened {\n\t\tclose(s.closing)\n\t}\n\ts.wg.Wait()\n\n\t// Close all the shards in parallel.\n\tif err := s.walkShards(s.shardsSlice(), func(sh *Shard) error {\n\t\treturn sh.CloseFast()\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\ts.opened = false\n\ts.shards = nil\n\n\treturn nil\n}\n\n// createIndexIfNotExists returns a shared index for a database, if the inmem\n// index is being used. If the TSI index is being used, then this method is\n// basically a no-op.\nfunc (s *Store) createIndexIfNotExists(name string) (interface{}, error) {\n\tif idx := s.indexes[name]; idx != nil {\n\t\treturn idx, nil\n\t}\n\n\tidx, err := NewInmemIndex(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts.indexes[name] = idx\n\treturn idx, nil\n}\n\n// Shard returns a shard by id.\nfunc (s *Store) Shard(id uint64) *Shard {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\tsh, ok := s.shards[id]\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn sh\n}\n\n// Shards returns a list of shards by id.\nfunc (s *Store) Shards(ids []uint64) []*Shard {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\ta := make([]*Shard, 0, len(ids))\n\tfor _, id := range ids {\n\t\tsh, ok := s.shards[id]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\ta = append(a, sh)\n\t}\n\treturn a\n}\n\n// ShardGroup returns a ShardGroup with a list of shards by id.\nfunc (s *Store) ShardGroup(ids []uint64) ShardGroup {\n\treturn Shards(s.Shards(ids))\n}\n\n// ShardN returns the number of shards in the store.\nfunc (s *Store) ShardN() int {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\treturn len(s.shards)\n}\n\n// CreateShard creates a shard with the given id and retention policy on a database.\nfunc (s *Store) CreateShard(database, retentionPolicy string, shardID uint64, enabled bool) error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tselect {\n\tcase <-s.closing:\n\t\treturn ErrStoreClosed\n\tdefault:\n\t}\n\n\t// Shard already exists.\n\tif _, ok := s.shards[shardID]; ok {\n\t\treturn nil\n\t}\n\n\t// Create the db and retention policy directories if they don't exist.\n\tif err := os.MkdirAll(filepath.Join(s.path, database, retentionPolicy), 0700); err != nil {\n\t\treturn err\n\t}\n\n\t// Create the WAL directory.\n\twalPath := filepath.Join(s.EngineOptions.Config.WALDir, database, retentionPolicy, fmt.Sprintf(\"%d\", shardID))\n\tif err := os.MkdirAll(walPath, 0700); err != nil {\n\t\treturn err\n\t}\n\n\t// Retrieve shared index, if needed.\n\tidx, err := s.createIndexIfNotExists(database)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Copy index options and pass in shared index.\n\topt := s.EngineOptions\n\topt.InmemIndex = idx\n\n\tpath := filepath.Join(s.path, database, retentionPolicy, strconv.FormatUint(shardID, 10))\n\tshard := NewShard(shardID, path, walPath, opt)\n\tshard.WithLogger(s.baseLogger)\n\tshard.EnableOnOpen = enabled\n\n\tif err := shard.Open(); err != nil {\n\t\treturn err\n\t}\n\n\ts.shards[shardID] = shard\n\ts.databases[database] = struct{}{} // Ensure we are tracking any new db.\n\n\treturn nil\n}\n\n// CreateShardSnapShot will create a hard link to the underlying shard and return a path.\n// The caller is responsible for cleaning up (removing) the file path returned.\nfunc (s *Store) CreateShardSnapshot(id uint64) (string, error) {\n\tsh := s.Shard(id)\n\tif sh == nil {\n\t\treturn \"\", ErrShardNotFound\n\t}\n\n\treturn sh.CreateSnapshot()\n}\n\n// SetShardEnabled enables or disables a shard for read and writes.\nfunc (s *Store) SetShardEnabled(shardID uint64, enabled bool) error {\n\tsh := s.Shard(shardID)\n\tif sh == nil {\n\t\treturn ErrShardNotFound\n\t}\n\tsh.SetEnabled(enabled)\n\treturn nil\n}\n\n// DeleteShard removes a shard from disk.\nfunc (s *Store) DeleteShard(shardID uint64) error {\n\tsh := s.Shard(shardID)\n\tif sh == nil {\n\t\treturn nil\n\t}\n\n\t// Remove the shard from the database indexes before closing the shard.\n\t// Closing the shard will do this as well, but it will unload it while\n\t// the shard is locked which can block stats collection and other calls.\n\tsh.UnloadIndex()\n\n\tif err := sh.Close(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := os.RemoveAll(sh.path); err != nil {\n\t\treturn err\n\t}\n\n\tif err := os.RemoveAll(sh.walPath); err != nil {\n\t\treturn err\n\t}\n\n\ts.mu.Lock()\n\tdelete(s.shards, shardID)\n\ts.mu.Unlock()\n\n\treturn nil\n}\n\n// DeleteDatabase will close all shards associated with a database and remove the directory and files from disk.\nfunc (s *Store) DeleteDatabase(name string) error {\n\ts.mu.RLock()\n\tif _, ok := s.databases[name]; !ok {\n\t\ts.mu.RUnlock()\n\t\t// no files locally, so nothing to do\n\t\treturn nil\n\t}\n\tshards := s.filterShards(func(sh *Shard) bool {\n\t\treturn sh.database == name\n\t})\n\ts.mu.RUnlock()\n\n\tif err := s.walkShards(shards, func(sh *Shard) error {\n\t\tif sh.database != name {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn sh.CloseFast()\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tdbPath := filepath.Clean(filepath.Join(s.path, name))\n\n\t// extra sanity check to make sure that even if someone named their database \"../..\"\n\t// that we don't delete everything because of it, they'll just have extra files forever\n\tif filepath.Clean(s.path) != filepath.Dir(dbPath) {\n\t\treturn fmt.Errorf(\"invalid database directory location for database '%s': %s\", name, dbPath)\n\t}\n\n\tif err := os.RemoveAll(dbPath); err != nil {\n\t\treturn err\n\t}\n\tif err := os.RemoveAll(filepath.Join(s.EngineOptions.Config.WALDir, name)); err != nil {\n\t\treturn err\n\t}\n\n\ts.mu.Lock()\n\tfor _, sh := range shards {\n\t\tdelete(s.shards, sh.id)\n\t}\n\n\t// Remove database from store list of databases\n\tdelete(s.databases, name)\n\n\t// Remove shared index for database if using inmem index.\n\tdelete(s.indexes, name)\n\ts.mu.Unlock()\n\n\treturn nil\n}\n\n// DeleteRetentionPolicy will close all shards associated with the\n// provided retention policy, remove the retention policy directories on\n// both the DB and WAL, and remove all shard files from disk.\nfunc (s *Store) DeleteRetentionPolicy(database, name string) error {\n\ts.mu.RLock()\n\tif _, ok := s.databases[database]; !ok {\n\t\ts.mu.RUnlock()\n\t\t// unknown database, nothing to do\n\t\treturn nil\n\t}\n\tshards := s.filterShards(func(sh *Shard) bool {\n\t\treturn sh.database == database && sh.retentionPolicy == name\n\t})\n\ts.mu.RUnlock()\n\n\t// Close and delete all shards under the retention policy on the\n\t// database.\n\tif err := s.walkShards(shards, func(sh *Shard) error {\n\t\tif sh.database != database || sh.retentionPolicy != name {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn sh.Close()\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\t// Remove the retention policy folder.\n\trpPath := filepath.Clean(filepath.Join(s.path, database, name))\n\n\t// ensure Store's path is the grandparent of the retention policy\n\tif filepath.Clean(s.path) != filepath.Dir(filepath.Dir(rpPath)) {\n\t\treturn fmt.Errorf(\"invalid path for database '%s', retention policy '%s': %s\", database, name, rpPath)\n\t}\n\n\t// Remove the retention policy folder.\n\tif err := os.RemoveAll(filepath.Join(s.path, database, name)); err != nil {\n\t\treturn err\n\t}\n\n\t// Remove the retention policy folder from the the WAL.\n\tif err := os.RemoveAll(filepath.Join(s.EngineOptions.Config.WALDir, database, name)); err != nil {\n\t\treturn err\n\t}\n\n\ts.mu.Lock()\n\tfor _, sh := range shards {\n\t\tdelete(s.shards, sh.id)\n\t}\n\ts.mu.Unlock()\n\treturn nil\n}\n\n// DeleteMeasurement removes a measurement and all associated series from a database.\nfunc (s *Store) DeleteMeasurement(database, name string) error {\n\ts.mu.RLock()\n\tshards := s.filterShards(byDatabase(database))\n\ts.mu.RUnlock()\n\n\t// Limit to 1 delete for each shard since expanding the measurement into the list\n\t// of series keys can be very memory intensive if run concurrently.\n\tlimit := limiter.NewFixed(1)\n\treturn s.walkShards(shards, func(sh *Shard) error {\n\t\tlimit.Take()\n\t\tdefer limit.Release()\n\n\t\tif err := sh.DeleteMeasurement([]byte(name)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\n// filterShards returns a slice of shards where fn returns true\n// for the shard. If the provided predicate is nil then all shards are returned.\nfunc (s *Store) filterShards(fn func(sh *Shard) bool) []*Shard {\n\tvar shards []*Shard\n\tif fn == nil {\n\t\tshards = make([]*Shard, 0, len(s.shards))\n\t\tfn = func(*Shard) bool { return true }\n\t} else {\n\t\tshards = make([]*Shard, 0)\n\t}\n\n\tfor _, sh := range s.shards {\n\t\tif fn(sh) {\n\t\t\tshards = append(shards, sh)\n\t\t}\n\t}\n\treturn shards\n}\n\n// byDatabase provides a predicate for filterShards that matches on the name of\n// the database passed in.\nfunc byDatabase(name string) func(sh *Shard) bool {\n\treturn func(sh *Shard) bool {\n\t\treturn sh.database == name\n\t}\n}\n\n// walkShards apply a function to each shard in parallel.  If any of the\n// functions return an error, the first error is returned.\nfunc (s *Store) walkShards(shards []*Shard, fn func(sh *Shard) error) error {\n\t// struct to hold the result of opening each reader in a goroutine\n\ttype res struct {\n\t\terr error\n\t}\n\n\tresC := make(chan res)\n\tvar n int\n\n\tfor _, sh := range shards {\n\t\tn++\n\n\t\tgo func(sh *Shard) {\n\t\t\tif err := fn(sh); err != nil {\n\t\t\t\tresC <- res{err: fmt.Errorf(\"shard %d: %s\", sh.id, err)}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tresC <- res{}\n\t\t}(sh)\n\t}\n\n\tvar err error\n\tfor i := 0; i < n; i++ {\n\t\tres := <-resC\n\t\tif res.err != nil {\n\t\t\terr = res.err\n\t\t}\n\t}\n\tclose(resC)\n\treturn err\n}\n\n// ShardIDs returns a slice of all ShardIDs under management.\nfunc (s *Store) ShardIDs() []uint64 {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\treturn s.shardIDs()\n}\n\nfunc (s *Store) shardIDs() []uint64 {\n\ta := make([]uint64, 0, len(s.shards))\n\tfor shardID := range s.shards {\n\t\ta = append(a, shardID)\n\t}\n\treturn a\n}\n\n// shardsSlice returns an ordered list of shards.\nfunc (s *Store) shardsSlice() []*Shard {\n\ta := make([]*Shard, 0, len(s.shards))\n\tfor _, sh := range s.shards {\n\t\ta = append(a, sh)\n\t}\n\tsort.Sort(Shards(a))\n\treturn a\n}\n\n// Databases returns the names of all databases managed by the store.\nfunc (s *Store) Databases() []string {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\n\tdatabases := make([]string, 0, len(s.databases))\n\tfor k, _ := range s.databases {\n\t\tdatabases = append(databases, k)\n\t}\n\treturn databases\n}\n\n// DiskSize returns the size of all the shard files in bytes.\n// This size does not include the WAL size.\nfunc (s *Store) DiskSize() (int64, error) {\n\tvar size int64\n\n\ts.mu.RLock()\n\tallShards := s.filterShards(nil)\n\ts.mu.RUnlock()\n\n\tfor _, sh := range allShards {\n\t\tsz, err := sh.DiskSize()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tsize += sz\n\t}\n\treturn size, nil\n}\n\nfunc (s *Store) estimateCardinality(dbName string, getSketches func(*Shard) (estimator.Sketch, estimator.Sketch, error)) (int64, error) {\n\tvar (\n\t\tss estimator.Sketch // Sketch estimating number of items.\n\t\tts estimator.Sketch // Sketch estimating number of tombstoned items.\n\t)\n\n\ts.mu.RLock()\n\tshards := s.filterShards(byDatabase(dbName))\n\ts.mu.RUnlock()\n\n\t// Iterate over all shards for the database and combine all of the sketches.\n\tfor _, shard := range shards {\n\t\ts, t, err := getSketches(shard)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\tif ss == nil {\n\t\t\tss, ts = s, t\n\t\t} else if err = ss.Merge(s); err != nil {\n\t\t\treturn 0, err\n\t\t} else if err = ts.Merge(t); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tif ss != nil {\n\t\treturn int64(ss.Count() - ts.Count()), nil\n\t}\n\treturn 0, nil\n}\n\n// SeriesCardinality returns the series cardinality for the provided database.\nfunc (s *Store) SeriesCardinality(database string) (int64, error) {\n\treturn s.estimateCardinality(database, func(sh *Shard) (estimator.Sketch, estimator.Sketch, error) {\n\t\tif sh == nil {\n\t\t\treturn nil, nil, errors.New(\"shard nil, can't get cardinality\")\n\t\t}\n\t\treturn sh.SeriesSketches()\n\t})\n}\n\n// MeasurementsCardinality returns the measurement cardinality for the provided\n// database.\nfunc (s *Store) MeasurementsCardinality(database string) (int64, error) {\n\treturn s.estimateCardinality(database, func(sh *Shard) (estimator.Sketch, estimator.Sketch, error) {\n\t\tif sh == nil {\n\t\t\treturn nil, nil, errors.New(\"shard nil, can't get cardinality\")\n\t\t}\n\t\treturn sh.MeasurementsSketches()\n\t})\n}\n\n// BackupShard will get the shard and have the engine backup since the passed in\n// time to the writer.\nfunc (s *Store) BackupShard(id uint64, since time.Time, w io.Writer) error {\n\tshard := s.Shard(id)\n\tif shard == nil {\n\t\treturn fmt.Errorf(\"shard %d doesn't exist on this server\", id)\n\t}\n\n\tpath, err := relativePath(s.path, shard.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn shard.Backup(w, path, since)\n}\n\n// RestoreShard restores a backup from r to a given shard.\n// This will only overwrite files included in the backup.\nfunc (s *Store) RestoreShard(id uint64, r io.Reader) error {\n\tshard := s.Shard(id)\n\tif shard == nil {\n\t\treturn fmt.Errorf(\"shard %d doesn't exist on this server\", id)\n\t}\n\n\tpath, err := relativePath(s.path, shard.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn shard.Restore(r, path)\n}\n\n// ImportShard imports the contents of r to a given shard.\n// All files in the backup are added as new files which may\n// cause duplicated data to occur requiring more expensive\n// compactions.\nfunc (s *Store) ImportShard(id uint64, r io.Reader) error {\n\tshard := s.Shard(id)\n\tif shard == nil {\n\t\treturn fmt.Errorf(\"shard %d doesn't exist on this server\", id)\n\t}\n\n\tpath, err := relativePath(s.path, shard.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn shard.Import(r, path)\n}\n\n// ShardRelativePath will return the relative path to the shard, i.e.,\n// <database>/<retention>/<id>.\nfunc (s *Store) ShardRelativePath(id uint64) (string, error) {\n\tshard := s.Shard(id)\n\tif shard == nil {\n\t\treturn \"\", fmt.Errorf(\"shard %d doesn't exist on this server\", id)\n\t}\n\treturn relativePath(s.path, shard.path)\n}\n\n// DeleteSeries loops through the local shards and deletes the series data for\n// the passed in series keys.\nfunc (s *Store) DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error {\n\t// Expand regex expressions in the FROM clause.\n\ta, err := s.ExpandSources(sources)\n\tif err != nil {\n\t\treturn err\n\t} else if sources != nil && len(sources) != 0 && len(a) == 0 {\n\t\treturn nil\n\t}\n\tsources = a\n\n\t// Determine deletion time range.\n\tmin, max, err := influxql.TimeRangeAsEpochNano(condition)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.mu.RLock()\n\tshards := s.filterShards(byDatabase(database))\n\ts.mu.RUnlock()\n\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\n\t// Limit to 1 delete for each shard since expanding the measurement into the list\n\t// of series keys can be very memory intensive if run concurrently.\n\tlimit := limiter.NewFixed(1)\n\n\treturn s.walkShards(shards, func(sh *Shard) error {\n\t\t// Determine list of measurements from sources.\n\t\t// Use all measurements if no FROM clause was provided.\n\t\tvar names []string\n\t\tif len(sources) > 0 {\n\t\t\tfor _, source := range sources {\n\t\t\t\tnames = append(names, source.(*influxql.Measurement).Name)\n\t\t\t}\n\t\t} else {\n\t\t\tif err := sh.ForEachMeasurementName(func(name []byte) error {\n\t\t\t\tnames = append(names, string(name))\n\t\t\t\treturn nil\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tsort.Strings(names)\n\n\t\tlimit.Take()\n\t\tdefer limit.Release()\n\n\t\t// Find matching series keys for each measurement.\n\t\tvar keys [][]byte\n\t\tfor _, name := range names {\n\t\t\ta, err := sh.MeasurementSeriesKeysByExpr([]byte(name), condition)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tkeys = append(keys, a...)\n\t\t}\n\n\t\tif !bytesutil.IsSorted(keys) {\n\t\t\tbytesutil.Sort(keys)\n\t\t}\n\n\t\t// Delete all matching keys.\n\t\tif err := sh.DeleteSeriesRange(keys, min, max); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\n// ExpandSources expands sources against all local shards.\nfunc (s *Store) ExpandSources(sources influxql.Sources) (influxql.Sources, error) {\n\tshards := func() Shards {\n\t\ts.mu.RLock()\n\t\tdefer s.mu.RUnlock()\n\t\treturn Shards(s.shardsSlice())\n\t}()\n\treturn shards.ExpandSources(sources)\n}\n\n// WriteToShard writes a list of points to a shard identified by its ID.\nfunc (s *Store) WriteToShard(shardID uint64, points []models.Point) error {\n\ts.mu.RLock()\n\n\tselect {\n\tcase <-s.closing:\n\t\ts.mu.RUnlock()\n\t\treturn ErrStoreClosed\n\tdefault:\n\t}\n\n\tsh := s.shards[shardID]\n\tif sh == nil {\n\t\ts.mu.RUnlock()\n\t\treturn ErrShardNotFound\n\t}\n\ts.mu.RUnlock()\n\n\treturn sh.WritePoints(points)\n}\n\n// MeasurementNames returns a slice of all measurements. Measurements accepts an\n// optional condition expression. If cond is nil, then all measurements for the\n// database will be returned.\nfunc (s *Store) MeasurementNames(database string, cond influxql.Expr) ([][]byte, error) {\n\ts.mu.RLock()\n\tshards := s.filterShards(byDatabase(database))\n\ts.mu.RUnlock()\n\n\t// Map to deduplicate measurement names across all shards.  This is kind of naive\n\t// and could be improved using a sorted merge of the already sorted measurements in\n\t// each shard.\n\tset := make(map[string]struct{})\n\tvar names [][]byte\n\tfor _, sh := range shards {\n\t\ta, err := sh.MeasurementNamesByExpr(cond)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, m := range a {\n\t\t\tif _, ok := set[string(m)]; !ok {\n\t\t\t\tset[string(m)] = struct{}{}\n\t\t\t\tnames = append(names, m)\n\t\t\t}\n\t\t}\n\t}\n\tbytesutil.Sort(names)\n\n\treturn names, nil\n}\n\n// MeasurementSeriesCounts returns the number of measurements and series in all\n// the shards' indices.\nfunc (s *Store) MeasurementSeriesCounts(database string) (measuments int, series int) {\n\t// TODO: implement me\n\treturn 0, 0\n}\n\ntype TagValues struct {\n\tMeasurement string\n\tValues      []KeyValue\n}\n\ntype TagValuesSlice []TagValues\n\nfunc (a TagValuesSlice) Len() int           { return len(a) }\nfunc (a TagValuesSlice) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a TagValuesSlice) Less(i, j int) bool { return a[i].Measurement < a[j].Measurement }\n\n// tagValues is a temporary representation of a TagValues. Rather than allocating\n// KeyValues as we build up a TagValues object, We hold off allocating KeyValues\n// until we have merged multiple tagValues together.\ntype tagValues struct {\n\tname   []byte\n\tkeys   []string\n\tvalues [][]string\n}\n\n// Is a slice of tagValues that can be sorted by measurement.\ntype tagValuesSlice []tagValues\n\nfunc (a tagValuesSlice) Len() int           { return len(a) }\nfunc (a tagValuesSlice) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a tagValuesSlice) Less(i, j int) bool { return bytes.Compare(a[i].name, a[j].name) == -1 }\n\n// TagValues returns the tag keys and values in the given database, matching the condition.\nfunc (s *Store) TagValues(database string, cond influxql.Expr) ([]TagValues, error) {\n\tif cond == nil {\n\t\treturn nil, errors.New(\"a condition is required\")\n\t}\n\n\tmeasurementExpr := influxql.CloneExpr(cond)\n\tmeasurementExpr = influxql.Reduce(influxql.RewriteExpr(measurementExpr, func(e influxql.Expr) influxql.Expr {\n\t\tswitch e := e.(type) {\n\t\tcase *influxql.BinaryExpr:\n\t\t\tswitch e.Op {\n\t\t\tcase influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX:\n\t\t\t\ttag, ok := e.LHS.(*influxql.VarRef)\n\t\t\t\tif !ok || tag.Val != \"_name\" {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn e\n\t}), nil)\n\n\tfilterExpr := influxql.CloneExpr(cond)\n\tfilterExpr = influxql.Reduce(influxql.RewriteExpr(filterExpr, func(e influxql.Expr) influxql.Expr {\n\t\tswitch e := e.(type) {\n\t\tcase *influxql.BinaryExpr:\n\t\t\tswitch e.Op {\n\t\t\tcase influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX:\n\t\t\t\ttag, ok := e.LHS.(*influxql.VarRef)\n\t\t\t\tif !ok || strings.HasPrefix(tag.Val, \"_\") {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn e\n\t}), nil)\n\n\t// Get all measurements for the shards we're interested in.\n\ts.mu.RLock()\n\tshards := s.filterShards(byDatabase(database))\n\ts.mu.RUnlock()\n\n\t// If we're using the inmem index then all shards contain a duplicate\n\t// version of the global index. We don't need to iterate over all shards\n\t// since we have everything we need from the first shard.\n\tif s.EngineOptions.IndexVersion == \"inmem\" && len(shards) > 0 {\n\t\tshards = shards[:1]\n\t}\n\n\t// Stores each list of TagValues for each measurement.\n\tvar allResults []tagValues\n\tvar maxMeasurements int // Hint as to lower bound on number of measurements.\n\tfor _, sh := range shards {\n\t\t// names will be sorted by MeasurementNamesByExpr.\n\t\tnames, err := sh.MeasurementNamesByExpr(measurementExpr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif len(names) > maxMeasurements {\n\t\t\tmaxMeasurements = len(names)\n\t\t}\n\n\t\tif allResults == nil {\n\t\t\tallResults = make([]tagValues, 0, len(shards)*len(names)) // Assuming all series in all shards.\n\t\t}\n\n\t\t// Iterate over each matching measurement in the shard. For each\n\t\t// measurement we'll get the matching tag keys (e.g., when a WITH KEYS)\n\t\t// statement is used, and we'll then use those to fetch all the relevant\n\t\t// values from matching series. Series may be filtered using a WHERE\n\t\t// filter.\n\t\tfor _, name := range names {\n\t\t\t// Determine a list of keys from condition.\n\t\t\tkeySet, err := sh.MeasurementTagKeysByExpr(name, cond)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif len(keySet) == 0 {\n\t\t\t\t// No matching tag keys for this measurement\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tresult := tagValues{\n\t\t\t\tname: name,\n\t\t\t\tkeys: make([]string, 0, len(keySet)),\n\t\t\t}\n\n\t\t\t// Add the keys to the tagValues and sort them.\n\t\t\tfor k := range keySet {\n\t\t\t\tresult.keys = append(result.keys, k)\n\t\t\t}\n\t\t\tsort.Sort(sort.StringSlice(result.keys))\n\n\t\t\t// get all the tag values for each key in the keyset.\n\t\t\t// Each slice in the results contains the sorted values associated\n\t\t\t// associated with each tag key for the measurement from the key set.\n\t\t\tif result.values, err = sh.MeasurementTagKeyValuesByExpr(name, result.keys, filterExpr, true); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tallResults = append(allResults, result)\n\t\t}\n\t}\n\n\tresult := make([]TagValues, 0, maxMeasurements)\n\n\t// We need to sort all results by measurement name.\n\tif len(shards) > 1 {\n\t\tsort.Sort(tagValuesSlice(allResults))\n\t}\n\n\t// The next stage is to merge the tagValue results for each shard's measurements.\n\tvar i, j int\n\t// Used as a temporary buffer in mergeTagValues. There can be at most len(shards)\n\t// instances of tagValues for a given measurement.\n\tidxBuf := make([][2]int, 0, len(shards))\n\tfor i < len(allResults) {\n\t\t// Gather all occurrences of the same measurement for merging.\n\t\tfor j+1 < len(allResults) && bytes.Equal(allResults[j+1].name, allResults[i].name) {\n\t\t\tj++\n\t\t}\n\n\t\t// An invariant is that there can't be more than n instances of tag\n\t\t// key value pairs for a given measurement, where n is the number of\n\t\t// shards.\n\t\tif got, exp := j-i+1, len(shards); got > exp {\n\t\t\treturn nil, fmt.Errorf(\"unexpected results returned engine. Got %d measurement sets for %d shards\", got, exp)\n\t\t}\n\n\t\tnextResult := mergeTagValues(idxBuf, allResults[i:j+1]...)\n\t\ti = j + 1\n\t\tif len(nextResult.Values) > 0 {\n\t\t\tresult = append(result, nextResult)\n\t\t}\n\t}\n\treturn result, nil\n}\n\n// mergeTagValues merges multiple sorted sets of temporary tagValues using a\n// direct k-way merge whilst also removing duplicated entries. The result is a\n// single TagValue type.\n//\n// TODO(edd): a Tournament based merge (see: Knuth's TAOCP 5.4.1) might be more\n// appropriate at some point.\n//\nfunc mergeTagValues(valueIdxs [][2]int, tvs ...tagValues) TagValues {\n\tvar result TagValues\n\tif len(tvs) == 0 {\n\t\treturn TagValues{}\n\t} else if len(tvs) == 1 {\n\t\tresult.Measurement = string(tvs[0].name)\n\t\t// TODO(edd): will be too small likely. Find a hint?\n\t\tresult.Values = make([]KeyValue, 0, len(tvs[0].values))\n\n\t\tfor ki, key := range tvs[0].keys {\n\t\t\tfor _, value := range tvs[0].values[ki] {\n\t\t\t\tresult.Values = append(result.Values, KeyValue{Key: key, Value: value})\n\t\t\t}\n\t\t}\n\t\treturn result\n\t}\n\n\tresult.Measurement = string(tvs[0].name)\n\n\tvar maxSize int\n\tfor _, tv := range tvs {\n\t\tif len(tv.values) > maxSize {\n\t\t\tmaxSize = len(tv.values)\n\t\t}\n\t}\n\tresult.Values = make([]KeyValue, 0, maxSize) // This will likely be too small but it's a start.\n\n\t// Resize and reset to the number of TagValues we're merging.\n\tvalueIdxs = valueIdxs[:len(tvs)]\n\tfor i := 0; i < len(valueIdxs); i++ {\n\t\tvalueIdxs[i][0], valueIdxs[i][1] = 0, 0\n\t}\n\n\tvar (\n\t\tj              int\n\t\tkeyCmp, valCmp int\n\t)\n\n\tfor {\n\t\t// Which of the provided TagValue sets currently holds the smallest element.\n\t\t// j is the candidate we're going to next pick for the result set.\n\t\tj = -1\n\n\t\t// Find the smallest element\n\t\tfor i := 0; i < len(tvs); i++ {\n\t\t\tif valueIdxs[i][0] >= len(tvs[i].keys) {\n\t\t\t\tcontinue // We have completely drained all tag keys and values for this shard.\n\t\t\t} else if len(tvs[i].values[valueIdxs[i][0]]) == 0 {\n\t\t\t\t// There are no tag values for these keys.\n\t\t\t\tvalueIdxs[i][0]++\n\t\t\t\tvalueIdxs[i][1] = 0\n\t\t\t\tcontinue\n\t\t\t} else if j == -1 {\n\t\t\t\t// We haven't picked a best TagValues set yet. Pick this one.\n\t\t\t\tj = i\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// It this tag key is lower than the candidate's tag key\n\t\t\tkeyCmp = strings.Compare(tvs[i].keys[valueIdxs[i][0]], tvs[j].keys[valueIdxs[j][0]])\n\t\t\tif keyCmp == -1 {\n\t\t\t\tj = i\n\t\t\t} else if keyCmp == 0 {\n\t\t\t\tvalCmp = strings.Compare(tvs[i].values[valueIdxs[i][0]][valueIdxs[i][1]], tvs[j].values[valueIdxs[j][0]][valueIdxs[j][1]])\n\t\t\t\t// Same tag key but this tag value is lower than the candidate.\n\t\t\t\tif valCmp == -1 {\n\t\t\t\t\tj = i\n\t\t\t\t} else if valCmp == 0 {\n\t\t\t\t\t// Duplicate tag key/value pair.... Remove and move onto\n\t\t\t\t\t// the next value for shard i.\n\t\t\t\t\tvalueIdxs[i][1]++\n\t\t\t\t\tif valueIdxs[i][1] >= len(tvs[i].values[valueIdxs[i][0]]) {\n\t\t\t\t\t\t// Drained all these tag values, move onto next key.\n\t\t\t\t\t\tvalueIdxs[i][0]++\n\t\t\t\t\t\tvalueIdxs[i][1] = 0\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// We could have drained all of the TagValue sets and be done...\n\t\tif j == -1 {\n\t\t\tbreak\n\t\t}\n\n\t\t// Append the smallest KeyValue\n\t\tresult.Values = append(result.Values, KeyValue{\n\t\t\tKey:   string(tvs[j].keys[valueIdxs[j][0]]),\n\t\t\tValue: tvs[j].values[valueIdxs[j][0]][valueIdxs[j][1]],\n\t\t})\n\t\t// Increment the indexes for the chosen TagValue.\n\t\tvalueIdxs[j][1]++\n\t\tif valueIdxs[j][1] >= len(tvs[j].values[valueIdxs[j][0]]) {\n\t\t\t// Drained all these tag values, move onto next key.\n\t\t\tvalueIdxs[j][0]++\n\t\t\tvalueIdxs[j][1] = 0\n\t\t}\n\t}\n\treturn result\n}\n\nfunc (s *Store) monitorShards() {\n\tdefer s.wg.Done()\n\tt := time.NewTicker(10 * time.Second)\n\tdefer t.Stop()\n\tt2 := time.NewTicker(time.Minute)\n\tdefer t2.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-s.closing:\n\t\t\treturn\n\t\tcase <-t.C:\n\t\t\ts.mu.RLock()\n\t\t\tfor _, sh := range s.shards {\n\t\t\t\tif sh.IsIdle() {\n\t\t\t\t\tsh.SetCompactionsEnabled(false)\n\t\t\t\t} else {\n\t\t\t\t\tsh.SetCompactionsEnabled(true)\n\t\t\t\t}\n\t\t\t}\n\t\t\ts.mu.RUnlock()\n\t\tcase <-t2.C:\n\t\t\tif s.EngineOptions.Config.MaxValuesPerTag == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ts.mu.RLock()\n\t\t\tshards := s.filterShards(func(sh *Shard) bool {\n\t\t\t\treturn sh.IndexType() == \"inmem\"\n\t\t\t})\n\t\t\ts.mu.RUnlock()\n\n\t\t\t// No inmem shards...\n\t\t\tif len(shards) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// inmem shards share the same index instance so just use the first one to avoid\n\t\t\t// allocating the same measurements repeatedly\n\t\t\tfirst := shards[0]\n\t\t\tnames, err := first.MeasurementNamesByExpr(nil)\n\t\t\tif err != nil {\n\t\t\t\ts.Logger.Warn(\"cannot retrieve measurement names\", zap.Error(err))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ts.walkShards(shards, func(sh *Shard) error {\n\t\t\t\tdb := sh.database\n\t\t\t\tid := sh.id\n\n\t\t\t\tfor _, name := range names {\n\t\t\t\t\tsh.ForEachMeasurementTagKey(name, func(k []byte) error {\n\t\t\t\t\t\tn := sh.TagKeyCardinality(name, k)\n\t\t\t\t\t\tperc := int(float64(n) / float64(s.EngineOptions.Config.MaxValuesPerTag) * 100)\n\t\t\t\t\t\tif perc > 100 {\n\t\t\t\t\t\t\tperc = 100\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// Log at 80, 85, 90-100% levels\n\t\t\t\t\t\tif perc == 80 || perc == 85 || perc >= 90 {\n\t\t\t\t\t\t\ts.Logger.Info(fmt.Sprintf(\"WARN: %d%% of max-values-per-tag limit exceeded: (%d/%d), db=%s shard=%d measurement=%s tag=%s\",\n\t\t\t\t\t\t\t\tperc, n, s.EngineOptions.Config.MaxValuesPerTag, db, id, name, k))\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t})\n\t\t}\n\t}\n}\n\n// KeyValue holds a string key and a string value.\ntype KeyValue struct {\n\tKey, Value string\n}\n\n// KeyValues is a sortable slice of KeyValue.\ntype KeyValues []KeyValue\n\n// Len implements sort.Interface.\nfunc (a KeyValues) Len() int { return len(a) }\n\n// Swap implements sort.Interface.\nfunc (a KeyValues) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\n// Less implements sort.Interface. Keys are compared before values.\nfunc (a KeyValues) Less(i, j int) bool {\n\tki, kj := a[i].Key, a[j].Key\n\tif ki == kj {\n\t\treturn a[i].Value < a[j].Value\n\t}\n\treturn ki < kj\n}\n\n// filterShowSeriesResult will limit the number of series returned based on the limit and the offset.\n// Unlike limit and offset on SELECT statements, the limit and offset don't apply to the number of Rows, but\n// to the number of total Values returned, since each Value represents a unique series.\nfunc (e *Store) filterShowSeriesResult(limit, offset int, rows models.Rows) models.Rows {\n\tvar filteredSeries models.Rows\n\tseriesCount := 0\n\tfor _, r := range rows {\n\t\tvar currentSeries [][]interface{}\n\n\t\t// filter the values\n\t\tfor _, v := range r.Values {\n\t\t\tif seriesCount >= offset && seriesCount-offset < limit {\n\t\t\t\tcurrentSeries = append(currentSeries, v)\n\t\t\t}\n\t\t\tseriesCount++\n\t\t}\n\n\t\t// only add the row back in if there are some values in it\n\t\tif len(currentSeries) > 0 {\n\t\t\tr.Values = currentSeries\n\t\t\tfilteredSeries = append(filteredSeries, r)\n\t\t\tif seriesCount > limit+offset {\n\t\t\t\treturn filteredSeries\n\t\t\t}\n\t\t}\n\t}\n\treturn filteredSeries\n}\n\n// decodeStorePath extracts the database and retention policy names\n// from a given shard or WAL path.\nfunc decodeStorePath(shardOrWALPath string) (database, retentionPolicy string) {\n\t// shardOrWALPath format: /maybe/absolute/base/then/:database/:retentionPolicy/:nameOfShardOrWAL\n\n\t// Discard the last part of the path (the shard name or the wal name).\n\tpath, _ := filepath.Split(filepath.Clean(shardOrWALPath))\n\n\t// Extract the database and retention policy.\n\tpath, rp := filepath.Split(filepath.Clean(path))\n\t_, db := filepath.Split(filepath.Clean(path))\n\treturn db, rp\n}\n\n// relativePath will expand out the full paths passed in and return\n// the relative shard path from the store\nfunc relativePath(storePath, shardPath string) (string, error) {\n\tpath, err := filepath.Abs(storePath)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"store abs path: %s\", err)\n\t}\n\n\tfp, err := filepath.Abs(shardPath)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"file abs path: %s\", err)\n\t}\n\n\tname, err := filepath.Rel(path, fp)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"file rel path: %s\", err)\n\t}\n\n\treturn name, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/store_internal_test.go",
    "content": "package tsdb\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"sort\"\n\t\"testing\"\n)\n\nfunc TestStore_mergeTagValues(t *testing.T) {\n\texamples := []struct {\n\t\tin  []tagValues\n\t\tout TagValues\n\t}{\n\t\t{},\n\t\t{in: make([]tagValues, 4), out: TagValues{Values: []KeyValue{}}},\n\t\t{\n\t\t\tin:  []tagValues{createtagValues(\"m0\", map[string][]string{\"host\": {\"server-a\", \"server-b\", \"server-c\"}})},\n\t\t\tout: createTagValues(\"m0\", map[string][]string{\"host\": {\"server-a\", \"server-b\", \"server-c\"}}),\n\t\t},\n\t\t{\n\t\t\tin: []tagValues{\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"host\": {\"server-a\", \"server-b\", \"server-c\"}}),\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"host\": {\"server-a\", \"server-b\", \"server-c\"}}),\n\t\t\t},\n\t\t\tout: createTagValues(\"m0\", map[string][]string{\"host\": {\"server-a\", \"server-b\", \"server-c\"}}),\n\t\t},\n\t\t{\n\t\t\tin: []tagValues{\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"host\": {\"server-a\", \"server-b\", \"server-c\"}}),\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"host\": {\"server-a\", \"server-d\", \"server-e\"}}),\n\t\t\t},\n\t\t\tout: createTagValues(\"m0\", map[string][]string{\"host\": {\"server-a\", \"server-b\", \"server-c\", \"server-d\", \"server-e\"}}),\n\t\t},\n\t\t{\n\t\t\tin: []tagValues{\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"host\": {\"server-a\"}}),\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{}),\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"host\": {\"server-a\"}}),\n\t\t\t},\n\t\t\tout: createTagValues(\"m0\", map[string][]string{\"host\": {\"server-a\"}}),\n\t\t},\n\t\t{\n\t\t\tin: []tagValues{\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"host\": {\"server-q\", \"server-z\"}}),\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"host\": {\"server-a\", \"server-b\", \"server-c\"}}),\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"host\": {\"server-a\", \"server-d\", \"server-e\"}}),\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"host\": {\"server-e\", \"server-q\", \"server-z\"}}),\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"host\": {\"server-a\"}}),\n\t\t\t},\n\t\t\tout: createTagValues(\"m0\", map[string][]string{\"host\": {\"server-a\", \"server-b\", \"server-c\", \"server-d\", \"server-e\", \"server-q\", \"server-z\"}}),\n\t\t},\n\t\t{\n\t\t\tin: []tagValues{\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"a\": {\"0\", \"1\"}, \"host1\": {\"server-q\", \"server-z\"}}),\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"a\": {\"0\", \"2\"}, \"host2\": {\"server-a\", \"server-b\", \"server-c\"}}),\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"a\": {\"0\", \"3\"}, \"host3\": {\"server-a\", \"server-d\", \"server-e\"}}),\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"a\": {\"0\", \"4\"}, \"host4\": {\"server-e\", \"server-q\", \"server-z\"}}),\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"a\": {\"0\", \"5\"}, \"host5\": {\"server-a\"}}),\n\t\t\t},\n\t\t\tout: createTagValues(\"m0\", map[string][]string{\n\t\t\t\t\"a\":     {\"0\", \"1\", \"2\", \"3\", \"4\", \"5\"},\n\t\t\t\t\"host1\": {\"server-q\", \"server-z\"},\n\t\t\t\t\"host2\": {\"server-a\", \"server-b\", \"server-c\"},\n\t\t\t\t\"host3\": {\"server-a\", \"server-d\", \"server-e\"},\n\t\t\t\t\"host4\": {\"server-e\", \"server-q\", \"server-z\"},\n\t\t\t\t\"host5\": {\"server-a\"},\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tin: []tagValues{\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"region\": {\"east-1\", \"west-1\"}, \"host\": {\"server-a\", \"server-b\", \"server-c\"}}),\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"region\": {\"north-1\", \"west-1\"}, \"host\": {\"server-a\", \"server-d\", \"server-e\"}}),\n\t\t\t},\n\t\t\tout: createTagValues(\"m0\", map[string][]string{\n\t\t\t\t\"host\":   {\"server-a\", \"server-b\", \"server-c\", \"server-d\", \"server-e\"},\n\t\t\t\t\"region\": {\"east-1\", \"north-1\", \"west-1\"},\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tin: []tagValues{\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"region\": {\"east-1\", \"west-1\"}, \"host\": {\"server-a\", \"server-b\", \"server-c\"}}),\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"city\": {\"Baltimore\", \"Las Vegas\"}}),\n\t\t\t},\n\t\t\tout: createTagValues(\"m0\", map[string][]string{\n\t\t\t\t\"city\":   {\"Baltimore\", \"Las Vegas\"},\n\t\t\t\t\"host\":   {\"server-a\", \"server-b\", \"server-c\"},\n\t\t\t\t\"region\": {\"east-1\", \"west-1\"},\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tin: []tagValues{\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"city\": {\"Baltimore\", \"Las Vegas\"}}),\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"region\": {\"east-1\", \"west-1\"}, \"host\": {\"server-a\", \"server-b\", \"server-c\"}}),\n\t\t\t},\n\t\t\tout: createTagValues(\"m0\", map[string][]string{\n\t\t\t\t\"city\":   {\"Baltimore\", \"Las Vegas\"},\n\t\t\t\t\"host\":   {\"server-a\", \"server-b\", \"server-c\"},\n\t\t\t\t\"region\": {\"east-1\", \"west-1\"},\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tin: []tagValues{\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{\"region\": {\"east-1\", \"west-1\"}, \"host\": {\"server-a\", \"server-b\", \"server-c\"}}),\n\t\t\t\tcreatetagValues(\"m0\", map[string][]string{}),\n\t\t\t},\n\t\t\tout: createTagValues(\"m0\", map[string][]string{\n\t\t\t\t\"host\":   {\"server-a\", \"server-b\", \"server-c\"},\n\t\t\t\t\"region\": {\"east-1\", \"west-1\"},\n\t\t\t}),\n\t\t},\n\t}\n\n\tbuf := make([][2]int, 10)\n\tfor i, example := range examples {\n\t\tt.Run(fmt.Sprintf(\"example_%d\", i+1), func(t *testing.T) {\n\t\t\tif got, exp := mergeTagValues(buf, example.in...), example.out; !reflect.DeepEqual(got, exp) {\n\t\t\t\tt.Fatalf(\"\\ngot\\n %#v\\n\\n expected\\n %#v\", got, exp)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// Helper to create some tagValues.\nfunc createtagValues(mname string, kvs map[string][]string) tagValues {\n\tout := tagValues{\n\t\tname:   []byte(mname),\n\t\tkeys:   make([]string, 0, len(kvs)),\n\t\tvalues: make([][]string, len(kvs)),\n\t}\n\n\tfor k := range kvs {\n\t\tout.keys = append(out.keys, k)\n\t}\n\tsort.Sort(sort.StringSlice(out.keys))\n\n\tfor i, k := range out.keys {\n\t\tvalues := kvs[k]\n\t\tsort.Sort(sort.StringSlice(values))\n\t\tout.values[i] = values\n\t}\n\treturn out\n}\n\n// Helper to create some TagValues\nfunc createTagValues(mname string, kvs map[string][]string) TagValues {\n\tvar sz int\n\tfor _, v := range kvs {\n\t\tsz += len(v)\n\t}\n\n\tout := TagValues{\n\t\tMeasurement: mname,\n\t\tValues:      make([]KeyValue, 0, sz),\n\t}\n\n\tfor tk, tvs := range kvs {\n\t\tfor _, tv := range tvs {\n\t\t\tout.Values = append(out.Values, KeyValue{Key: tk, Value: tv})\n\t\t}\n\t\t// We have to sort the KeyValues since that's how they're provided from\n\t\t// the Store.\n\t\tsort.Sort(KeyValues(out.Values))\n\t}\n\n\treturn out\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/tsdb/store_test.go",
    "content": "package tsdb_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"math\"\n\t\"math/rand\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/influxdb/pkg/deep\"\n\t\"github.com/influxdata/influxdb/tsdb\"\n\t\"github.com/uber-go/zap\"\n)\n\n// Ensure the store can delete a retention policy and all shards under\n// it.\nfunc TestStore_DeleteRetentionPolicy(t *testing.T) {\n\tt.Parallel()\n\n\ts := MustOpenStore()\n\tdefer s.Close()\n\n\t// Create a new shard and verify that it exists.\n\tif err := s.CreateShard(\"db0\", \"rp0\", 1, true); err != nil {\n\t\tt.Fatal(err)\n\t} else if sh := s.Shard(1); sh == nil {\n\t\tt.Fatalf(\"expected shard\")\n\t}\n\n\t// Create a new shard under the same retention policy,  and verify\n\t// that it exists.\n\tif err := s.CreateShard(\"db0\", \"rp0\", 2, true); err != nil {\n\t\tt.Fatal(err)\n\t} else if sh := s.Shard(2); sh == nil {\n\t\tt.Fatalf(\"expected shard\")\n\t}\n\n\t// Create a new shard under a different retention policy, and\n\t// verify that it exists.\n\tif err := s.CreateShard(\"db0\", \"rp1\", 3, true); err != nil {\n\t\tt.Fatal(err)\n\t} else if sh := s.Shard(3); sh == nil {\n\t\tt.Fatalf(\"expected shard\")\n\t}\n\n\t// Deleting the rp0 retention policy does not return an error.\n\tif err := s.DeleteRetentionPolicy(\"db0\", \"rp0\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// It deletes the shards under that retention policy.\n\tif sh := s.Shard(1); sh != nil {\n\t\tt.Errorf(\"shard 1 was not deleted\")\n\t}\n\n\tif sh := s.Shard(2); sh != nil {\n\t\tt.Errorf(\"shard 2 was not deleted\")\n\t}\n\n\t// It deletes the retention policy directory.\n\tif got, exp := dirExists(filepath.Join(s.Path(), \"db0\", \"rp0\")), false; got != exp {\n\t\tt.Error(\"directory exists, but should have been removed\")\n\t}\n\n\t// It deletes the WAL retention policy directory.\n\tif got, exp := dirExists(filepath.Join(s.EngineOptions.Config.WALDir, \"db0\", \"rp0\")), false; got != exp {\n\t\tt.Error(\"directory exists, but should have been removed\")\n\t}\n\n\t// Reopen other shard and check it still exists.\n\tif err := s.Reopen(); err != nil {\n\t\tt.Error(err)\n\t} else if sh := s.Shard(3); sh == nil {\n\t\tt.Errorf(\"shard 3 does not exist\")\n\t}\n\n\t// It does not delete other retention policy directories.\n\tif got, exp := dirExists(filepath.Join(s.Path(), \"db0\", \"rp1\")), true; got != exp {\n\t\tt.Error(\"directory does not exist, but should\")\n\t}\n\tif got, exp := dirExists(filepath.Join(s.EngineOptions.Config.WALDir, \"db0\", \"rp1\")), true; got != exp {\n\t\tt.Error(\"directory does not exist, but should\")\n\t}\n}\n\n// Ensure the store can create a new shard.\nfunc TestStore_CreateShard(t *testing.T) {\n\tt.Parallel()\n\n\ts := MustOpenStore()\n\tdefer s.Close()\n\n\t// Create a new shard and verify that it exists.\n\tif err := s.CreateShard(\"db0\", \"rp0\", 1, true); err != nil {\n\t\tt.Fatal(err)\n\t} else if sh := s.Shard(1); sh == nil {\n\t\tt.Fatalf(\"expected shard\")\n\t}\n\n\t// Create another shard and verify that it exists.\n\tif err := s.CreateShard(\"db0\", \"rp0\", 2, true); err != nil {\n\t\tt.Fatal(err)\n\t} else if sh := s.Shard(2); sh == nil {\n\t\tt.Fatalf(\"expected shard\")\n\t}\n\n\t// Reopen shard and recheck.\n\tif err := s.Reopen(); err != nil {\n\t\tt.Fatal(err)\n\t} else if sh := s.Shard(1); sh == nil {\n\t\tt.Fatalf(\"expected shard(1)\")\n\t} else if sh = s.Shard(2); sh == nil {\n\t\tt.Fatalf(\"expected shard(2)\")\n\t}\n}\n\n// Ensure the store can delete an existing shard.\nfunc TestStore_DeleteShard(t *testing.T) {\n\tt.Parallel()\n\n\ts := MustOpenStore()\n\tdefer s.Close()\n\n\t// Create a new shard and verify that it exists.\n\tif err := s.CreateShard(\"db0\", \"rp0\", 1, true); err != nil {\n\t\tt.Fatal(err)\n\t} else if sh := s.Shard(1); sh == nil {\n\t\tt.Fatalf(\"expected shard\")\n\t}\n\n\t// Reopen shard and recheck.\n\tif err := s.Reopen(); err != nil {\n\t\tt.Fatal(err)\n\t} else if sh := s.Shard(1); sh == nil {\n\t\tt.Fatalf(\"shard exists\")\n\t}\n}\n\n// Ensure the store can create a snapshot to a shard.\nfunc TestStore_CreateShardSnapShot(t *testing.T) {\n\tt.Parallel()\n\n\ts := MustOpenStore()\n\tdefer s.Close()\n\n\t// Create a new shard and verify that it exists.\n\tif err := s.CreateShard(\"db0\", \"rp0\", 1, true); err != nil {\n\t\tt.Fatal(err)\n\t} else if sh := s.Shard(1); sh == nil {\n\t\tt.Fatalf(\"expected shard\")\n\t}\n\n\tdir, e := s.CreateShardSnapshot(1)\n\tif e != nil {\n\t\tt.Fatal(e)\n\t}\n\tif dir == \"\" {\n\t\tt.Fatal(\"empty directory name\")\n\t}\n}\n\nfunc TestStore_Open(t *testing.T) {\n\tt.Parallel()\n\n\ts := NewStore()\n\tdefer s.Close()\n\n\tif err := os.MkdirAll(filepath.Join(s.Path(), \"db0\", \"rp0\", \"2\"), 0777); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := os.MkdirAll(filepath.Join(s.Path(), \"db0\", \"rp2\", \"4\"), 0777); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := os.MkdirAll(filepath.Join(s.Path(), \"db1\", \"rp0\", \"1\"), 0777); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Store should ignore shard since it does not have a numeric name.\n\tif err := s.Open(); err != nil {\n\t\tt.Fatal(err)\n\t} else if n := len(s.Databases()); n != 2 {\n\t\tt.Fatalf(\"unexpected database index count: %d\", n)\n\t} else if n := s.ShardN(); n != 3 {\n\t\tt.Fatalf(\"unexpected shard count: %d\", n)\n\t}\n\n\texpDatabases := []string{\"db0\", \"db1\"}\n\tgotDatabases := s.Databases()\n\tsort.Strings(gotDatabases)\n\n\tif got, exp := gotDatabases, expDatabases; !reflect.DeepEqual(got, exp) {\n\t\tt.Fatalf(\"got %#v, expected %#v\", got, exp)\n\t}\n}\n\n// Ensure the store reports an error when it can't open a database directory.\nfunc TestStore_Open_InvalidDatabaseFile(t *testing.T) {\n\tt.Parallel()\n\n\ts := NewStore()\n\tdefer s.Close()\n\n\t// Create a file instead of a directory for a database.\n\tif _, err := os.Create(filepath.Join(s.Path(), \"db0\")); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Store should ignore database since it's a file.\n\tif err := s.Open(); err != nil {\n\t\tt.Fatal(err)\n\t} else if n := len(s.Databases()); n != 0 {\n\t\tt.Fatalf(\"unexpected database index count: %d\", n)\n\t}\n}\n\n// Ensure the store reports an error when it can't open a retention policy.\nfunc TestStore_Open_InvalidRetentionPolicy(t *testing.T) {\n\tt.Parallel()\n\n\ts := NewStore()\n\tdefer s.Close()\n\n\t// Create an RP file instead of a directory.\n\tif err := os.MkdirAll(filepath.Join(s.Path(), \"db0\"), 0777); err != nil {\n\t\tt.Fatal(err)\n\t} else if _, err := os.Create(filepath.Join(s.Path(), \"db0\", \"rp0\")); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Store should ignore retention policy since it's a file, and there should\n\t// be no indices created.\n\tif err := s.Open(); err != nil {\n\t\tt.Fatal(err)\n\t} else if n := len(s.Databases()); n != 0 {\n\t\tt.Log(s.Databases())\n\t\tt.Fatalf(\"unexpected database index count: %d\", n)\n\t}\n}\n\n// Ensure the store reports an error when it can't open a retention policy.\nfunc TestStore_Open_InvalidShard(t *testing.T) {\n\tt.Parallel()\n\n\ts := NewStore()\n\tdefer s.Close()\n\n\t// Create a non-numeric shard file.\n\tif err := os.MkdirAll(filepath.Join(s.Path(), \"db0\", \"rp0\"), 0777); err != nil {\n\t\tt.Fatal(err)\n\t} else if _, err := os.Create(filepath.Join(s.Path(), \"db0\", \"rp0\", \"bad_shard\")); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Store should ignore shard since it does not have a numeric name.\n\tif err := s.Open(); err != nil {\n\t\tt.Fatal(err)\n\t} else if n := len(s.Databases()); n != 0 {\n\t\tt.Fatalf(\"unexpected database index count: %d\", n)\n\t} else if n := s.ShardN(); n != 0 {\n\t\tt.Fatalf(\"unexpected shard count: %d\", n)\n\t}\n}\n\n// Ensure shards can create iterators.\nfunc TestShards_CreateIterator(t *testing.T) {\n\tt.Parallel()\n\n\ts := MustOpenStore()\n\tdefer s.Close()\n\n\t// Create shard #0 with data.\n\ts.MustCreateShardWithData(\"db0\", \"rp0\", 0,\n\t\t`cpu,host=serverA value=1  0`,\n\t\t`cpu,host=serverA value=2 10`,\n\t\t`cpu,host=serverB value=3 20`,\n\t)\n\n\t// Create shard #1 with data.\n\ts.MustCreateShardWithData(\"db0\", \"rp0\", 1,\n\t\t`cpu,host=serverA value=1 30`,\n\t\t`mem,host=serverA value=2 40`, // skip: wrong source\n\t\t`cpu,host=serverC value=3 60`,\n\t)\n\n\t// Retrieve shard group.\n\tshards := s.ShardGroup([]uint64{0, 1})\n\n\t// Create iterator.\n\titr, err := shards.CreateIterator(\"cpu\", influxql.IteratorOptions{\n\t\tExpr:       influxql.MustParseExpr(`value`),\n\t\tDimensions: []string{\"host\"},\n\t\tAscending:  true,\n\t\tStartTime:  influxql.MinTime,\n\t\tEndTime:    influxql.MaxTime,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer itr.Close()\n\tfitr := itr.(influxql.FloatIterator)\n\n\t// Read values from iterator. The host=serverA points should come first.\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(0): %s\", err)\n\t} else if !deep.Equal(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=serverA\"), Time: time.Unix(0, 0).UnixNano(), Value: 1}) {\n\t\tt.Fatalf(\"unexpected point(0): %s\", spew.Sdump(p))\n\t}\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(1): %s\", err)\n\t} else if !deep.Equal(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=serverA\"), Time: time.Unix(10, 0).UnixNano(), Value: 2}) {\n\t\tt.Fatalf(\"unexpected point(1): %s\", spew.Sdump(p))\n\t}\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(2): %s\", err)\n\t} else if !deep.Equal(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=serverA\"), Time: time.Unix(30, 0).UnixNano(), Value: 1}) {\n\t\tt.Fatalf(\"unexpected point(2): %s\", spew.Sdump(p))\n\t}\n\n\t// Next the host=serverB point.\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(3): %s\", err)\n\t} else if !deep.Equal(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=serverB\"), Time: time.Unix(20, 0).UnixNano(), Value: 3}) {\n\t\tt.Fatalf(\"unexpected point(3): %s\", spew.Sdump(p))\n\t}\n\n\t// And finally the host=serverC point.\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"unexpected error(4): %s\", err)\n\t} else if !deep.Equal(p, &influxql.FloatPoint{Name: \"cpu\", Tags: ParseTags(\"host=serverC\"), Time: time.Unix(60, 0).UnixNano(), Value: 3}) {\n\t\tt.Fatalf(\"unexpected point(4): %s\", spew.Sdump(p))\n\t}\n\n\t// Then an EOF should occur.\n\tif p, err := fitr.Next(); err != nil {\n\t\tt.Fatalf(\"expected eof, got error: %s\", err)\n\t} else if p != nil {\n\t\tt.Fatalf(\"expected eof, got: %s\", spew.Sdump(p))\n\t}\n}\n\n// Ensure the store can backup a shard and another store can restore it.\nfunc TestStore_BackupRestoreShard(t *testing.T) {\n\tt.Parallel()\n\n\ts0, s1 := MustOpenStore(), MustOpenStore()\n\tdefer s0.Close()\n\tdefer s1.Close()\n\n\t// Create shard with data.\n\ts0.MustCreateShardWithData(\"db0\", \"rp0\", 100,\n\t\t`cpu value=1 0`,\n\t\t`cpu value=2 10`,\n\t\t`cpu value=3 20`,\n\t)\n\n\tif err := s0.Reopen(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Backup shard to a buffer.\n\tvar buf bytes.Buffer\n\tif err := s0.BackupShard(100, time.Time{}, &buf); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Create the shard on the other store and restore from buffer.\n\tif err := s1.CreateShard(\"db0\", \"rp0\", 100, true); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := s1.RestoreShard(100, &buf); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Read data from\n\titr, err := s0.Shard(100).CreateIterator(\"cpu\", influxql.IteratorOptions{\n\t\tExpr:      influxql.MustParseExpr(`value`),\n\t\tAscending: true,\n\t\tStartTime: influxql.MinTime,\n\t\tEndTime:   influxql.MaxTime,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfitr := itr.(influxql.FloatIterator)\n\n\t// Read values from iterator. The host=serverA points should come first.\n\tp, e := fitr.Next()\n\tif e != nil {\n\t\tt.Fatal(e)\n\t}\n\tif !deep.Equal(p, &influxql.FloatPoint{Name: \"cpu\", Time: time.Unix(0, 0).UnixNano(), Value: 1}) {\n\t\tt.Fatalf(\"unexpected point(0): %s\", spew.Sdump(p))\n\t}\n\tp, e = fitr.Next()\n\tif e != nil {\n\t\tt.Fatal(e)\n\t}\n\tif !deep.Equal(p, &influxql.FloatPoint{Name: \"cpu\", Time: time.Unix(10, 0).UnixNano(), Value: 2}) {\n\t\tt.Fatalf(\"unexpected point(1): %s\", spew.Sdump(p))\n\t}\n\tp, e = fitr.Next()\n\tif e != nil {\n\t\tt.Fatal(e)\n\t}\n\tif !deep.Equal(p, &influxql.FloatPoint{Name: \"cpu\", Time: time.Unix(20, 0).UnixNano(), Value: 3}) {\n\t\tt.Fatalf(\"unexpected point(2): %s\", spew.Sdump(p))\n\t}\n}\n\nfunc TestStore_MeasurementNames_Deduplicate(t *testing.T) {\n\tt.Parallel()\n\n\ts := MustOpenStore()\n\tdefer s.Close()\n\n\t// Create shard with data.\n\ts.MustCreateShardWithData(\"db0\", \"rp0\", 1,\n\t\t`cpu value=1 0`,\n\t\t`cpu value=2 10`,\n\t\t`cpu value=3 20`,\n\t)\n\n\t// Create 2nd shard w/ same measurements.\n\ts.MustCreateShardWithData(\"db0\", \"rp0\", 2,\n\t\t`cpu value=1 0`,\n\t\t`cpu value=2 10`,\n\t\t`cpu value=3 20`,\n\t)\n\n\tmeas, err := s.MeasurementNames(\"db0\", nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error with MeasurementNames: %v\", err)\n\t}\n\n\tif exp, got := 1, len(meas); exp != got {\n\t\tt.Fatalf(\"measurement len mismatch: exp %v, got %v\", exp, got)\n\t}\n\n\tif exp, got := \"cpu\", string(meas[0]); exp != got {\n\t\tt.Fatalf(\"measurement name mismatch: exp %v, got %v\", exp, got)\n\t}\n}\n\nfunc testStoreCardinalityTombstoning(t *testing.T, store *Store) {\n\tif testing.Short() || os.Getenv(\"GORACE\") != \"\" || os.Getenv(\"APPVEYOR\") != \"\" {\n\t\tt.Skip(\"Skipping test in short, race and appveyor mode.\")\n\t}\n\n\t// Generate point data to write to the shards.\n\tseries := genTestSeries(10, 2, 4) // 160 series\n\n\tpoints := make([]models.Point, 0, len(series))\n\tfor _, s := range series {\n\t\tpoints = append(points, models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{\"value\": 1.0}, time.Now()))\n\t}\n\n\t// Create requested number of shards in the store & write points across\n\t// shards such that we never write the same series to multiple shards.\n\tfor shardID := 0; shardID < 4; shardID++ {\n\t\tif err := store.CreateShard(\"db\", \"rp\", uint64(shardID), true); err != nil {\n\t\t\tt.Errorf(\"create shard: %s\", err)\n\t\t}\n\n\t\tif err := store.BatchWrite(shardID, points[shardID*40:(shardID+1)*40]); err != nil {\n\t\t\tt.Errorf(\"batch write: %s\", err)\n\t\t}\n\t}\n\n\t// Delete all the series for each measurement.\n\tmnames, err := store.MeasurementNames(\"db\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor _, name := range mnames {\n\t\tif err := store.DeleteSeries(\"db\", []influxql.Source{&influxql.Measurement{Name: string(name)}}, nil); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\t// Estimate the series cardinality...\n\tcardinality, err := store.Store.SeriesCardinality(\"db\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Estimated cardinality should be well within 10 of the actual cardinality.\n\t// TODO(edd): this epsilon is arbitrary. How can I make it better?\n\tif got, exp := cardinality, int64(10); got > exp {\n\t\tt.Errorf(\"series cardinality out by %v (expected within %v), estimation was: %d\", got, exp, cardinality)\n\t}\n\n\t// Since all the series have been deleted, all the measurements should have\n\t// been removed from the index too.\n\tif cardinality, err = store.Store.MeasurementsCardinality(\"db\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Estimated cardinality should be well within 2 of the actual cardinality.\n\t// TODO(edd): this is totally arbitrary. How can I make it better?\n\tif got, exp := cardinality, int64(2); got > exp {\n\t\tt.Errorf(\"measurement cardinality out by %v (expected within %v), estimation was: %d\", got, exp, cardinality)\n\t}\n}\n\nfunc TestStore_Cardinality_Tombstoning_Inmem(t *testing.T) {\n\tt.Parallel()\n\n\tstore := NewStore()\n\tstore.EngineOptions.Config.Index = \"inmem\"\n\tif err := store.Open(); err != nil {\n\t\tpanic(err)\n\t}\n\tdefer store.Close()\n\ttestStoreCardinalityTombstoning(t, store)\n}\n\nfunc TestStore_Cardinality_Tombstoning_TSI(t *testing.T) {\n\tt.Parallel()\n\n\tstore := NewStore()\n\tstore.EngineOptions.Config.Index = \"tsi1\"\n\tif err := store.Open(); err != nil {\n\t\tpanic(err)\n\t}\n\tdefer store.Close()\n\ttestStoreCardinalityTombstoning(t, store)\n}\n\nfunc testStoreCardinalityUnique(t *testing.T, store *Store) {\n\tif testing.Short() || os.Getenv(\"GORACE\") != \"\" || os.Getenv(\"APPVEYOR\") != \"\" {\n\t\tt.Skip(\"Skipping test in short, race and appveyor mode.\")\n\t}\n\n\t// Generate point data to write to the shards.\n\tseries := genTestSeries(64, 5, 5) // 200,000 series\n\texpCardinality := len(series)\n\n\tpoints := make([]models.Point, 0, len(series))\n\tfor _, s := range series {\n\t\tpoints = append(points, models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{\"value\": 1.0}, time.Now()))\n\t}\n\n\t// Create requested number of shards in the store & write points across\n\t// shards such that we never write the same series to multiple shards.\n\tfor shardID := 0; shardID < 10; shardID++ {\n\t\tif err := store.CreateShard(\"db\", \"rp\", uint64(shardID), true); err != nil {\n\t\t\tt.Fatalf(\"create shard: %s\", err)\n\t\t}\n\t\tif err := store.BatchWrite(shardID, points[shardID*20000:(shardID+1)*20000]); err != nil {\n\t\t\tt.Fatalf(\"batch write: %s\", err)\n\t\t}\n\t}\n\n\t// Estimate the series cardinality...\n\tcardinality, err := store.Store.SeriesCardinality(\"db\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Estimated cardinality should be well within 1.5% of the actual cardinality.\n\tif got, exp := math.Abs(float64(cardinality)-float64(expCardinality))/float64(expCardinality), 0.015; got > exp {\n\t\tt.Errorf(\"got epsilon of %v for series cardinality %v (expected %v), which is larger than expected %v\", got, cardinality, expCardinality, exp)\n\t}\n\n\t// Estimate the measurement cardinality...\n\tif cardinality, err = store.Store.MeasurementsCardinality(\"db\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Estimated cardinality should be well within 2 of the actual cardinality. (arbitrary...)\n\texpCardinality = 64\n\tif got, exp := math.Abs(float64(cardinality)-float64(expCardinality)), 2.0; got > exp {\n\t\tt.Errorf(\"got measurmement cardinality %v, expected upto %v; difference is larger than expected %v\", cardinality, expCardinality, exp)\n\t}\n}\n\nfunc TestStore_Cardinality_Unique_Inmem(t *testing.T) {\n\tt.Parallel()\n\n\tstore := NewStore()\n\tstore.EngineOptions.Config.Index = \"inmem\"\n\tstore.EngineOptions.Config.MaxSeriesPerDatabase = 0\n\tif err := store.Open(); err != nil {\n\t\tpanic(err)\n\t}\n\tdefer store.Close()\n\ttestStoreCardinalityUnique(t, store)\n}\n\nfunc TestStore_Cardinality_Unique_TSI1(t *testing.T) {\n\tt.Parallel()\n\n\tstore := NewStore()\n\tstore.EngineOptions.Config.Index = \"tsi1\"\n\tstore.EngineOptions.Config.MaxSeriesPerDatabase = 0\n\tif err := store.Open(); err != nil {\n\t\tpanic(err)\n\t}\n\tdefer store.Close()\n\ttestStoreCardinalityUnique(t, store)\n}\n\n// This test tests cardinality estimation when series data is duplicated across\n// multiple shards.\nfunc testStoreCardinalityDuplicates(t *testing.T, store *Store) {\n\tif testing.Short() || os.Getenv(\"GORACE\") != \"\" || os.Getenv(\"APPVEYOR\") != \"\" {\n\t\tt.Skip(\"Skipping test in short, race and appveyor mode.\")\n\t}\n\n\t// Generate point data to write to the shards.\n\tseries := genTestSeries(64, 5, 5) // 200,000 series.\n\texpCardinality := len(series)\n\n\tpoints := make([]models.Point, 0, len(series))\n\tfor _, s := range series {\n\t\tpoints = append(points, models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{\"value\": 1.0}, time.Now()))\n\t}\n\n\t// Create requested number of shards in the store & write points.\n\tfor shardID := 0; shardID < 10; shardID++ {\n\t\tif err := store.CreateShard(\"db\", \"rp\", uint64(shardID), true); err != nil {\n\t\t\tt.Fatalf(\"create shard: %s\", err)\n\t\t}\n\n\t\tvar from, to int\n\t\tif shardID == 0 {\n\t\t\t// if it's the first shard then write all of the points.\n\t\t\tfrom, to = 0, len(points)-1\n\t\t} else {\n\t\t\t// For other shards we write a random sub-section of all the points.\n\t\t\t// which will duplicate the series and shouldn't increase the\n\t\t\t// cardinality.\n\t\t\tfrom, to := rand.Intn(len(points)), rand.Intn(len(points))\n\t\t\tif from > to {\n\t\t\t\tfrom, to = to, from\n\t\t\t}\n\t\t}\n\n\t\tif err := store.BatchWrite(shardID, points[from:to]); err != nil {\n\t\t\tt.Fatalf(\"batch write: %s\", err)\n\t\t}\n\t}\n\n\t// Estimate the series cardinality...\n\tcardinality, err := store.Store.SeriesCardinality(\"db\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Estimated cardinality should be well within 1.5% of the actual cardinality.\n\tif got, exp := math.Abs(float64(cardinality)-float64(expCardinality))/float64(expCardinality), 0.015; got > exp {\n\t\tt.Errorf(\"got epsilon of %v for series cardinality %d (expected %d), which is larger than expected %v\", got, cardinality, expCardinality, exp)\n\t}\n\n\t// Estimate the measurement cardinality...\n\tif cardinality, err = store.Store.MeasurementsCardinality(\"db\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Estimated cardinality should be well within 2 of the actual cardinality. (Arbitrary...)\n\texpCardinality = 64\n\tif got, exp := math.Abs(float64(cardinality)-float64(expCardinality)), 2.0; got > exp {\n\t\tt.Errorf(\"got measurement cardinality %v, expected upto %v; difference is larger than expected %v\", cardinality, expCardinality, exp)\n\t}\n}\n\nfunc TestStore_Cardinality_Duplicates_Inmem(t *testing.T) {\n\tt.Parallel()\n\n\tstore := NewStore()\n\tstore.EngineOptions.Config.Index = \"inmem\"\n\tstore.EngineOptions.Config.MaxSeriesPerDatabase = 0\n\tif err := store.Open(); err != nil {\n\t\tpanic(err)\n\t}\n\tdefer store.Close()\n\ttestStoreCardinalityDuplicates(t, store)\n}\n\nfunc TestStore_Cardinality_Duplicates_TSI1(t *testing.T) {\n\tt.Parallel()\n\n\tstore := NewStore()\n\tstore.EngineOptions.Config.Index = \"tsi1\"\n\tstore.EngineOptions.Config.MaxSeriesPerDatabase = 0\n\tif err := store.Open(); err != nil {\n\t\tpanic(err)\n\t}\n\tdefer store.Close()\n\ttestStoreCardinalityDuplicates(t, store)\n}\n\n// Creates a large number of series in multiple shards, which will force\n// compactions to occur.\nfunc testStoreCardinalityCompactions(t *testing.T, store *Store) {\n\tif testing.Short() || os.Getenv(\"GORACE\") != \"\" || os.Getenv(\"APPVEYOR\") != \"\" {\n\t\tt.Skip(\"Skipping test in short, race and appveyor mode.\")\n\t}\n\n\t// Generate point data to write to the shards.\n\tseries := genTestSeries(300, 5, 5) // 937,500 series\n\texpCardinality := len(series)\n\n\tpoints := make([]models.Point, 0, len(series))\n\tfor _, s := range series {\n\t\tpoints = append(points, models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{\"value\": 1.0}, time.Now()))\n\t}\n\n\t// Create requested number of shards in the store & write points across\n\t// shards such that we never write the same series to multiple shards.\n\tfor shardID := 0; shardID < 2; shardID++ {\n\t\tif err := store.CreateShard(\"db\", \"rp\", uint64(shardID), true); err != nil {\n\t\t\tt.Fatalf(\"create shard: %s\", err)\n\t\t}\n\t\tif err := store.BatchWrite(shardID, points[shardID*468750:(shardID+1)*468750]); err != nil {\n\t\t\tt.Fatalf(\"batch write: %s\", err)\n\t\t}\n\t}\n\n\t// Estimate the series cardinality...\n\tcardinality, err := store.Store.SeriesCardinality(\"db\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Estimated cardinality should be well within 1.5% of the actual cardinality.\n\tif got, exp := math.Abs(float64(cardinality)-float64(expCardinality))/float64(expCardinality), 0.015; got > exp {\n\t\tt.Errorf(\"got epsilon of %v for series cardinality %v (expected %v), which is larger than expected %v\", got, cardinality, expCardinality, exp)\n\t}\n\n\t// Estimate the measurement cardinality...\n\tif cardinality, err = store.Store.MeasurementsCardinality(\"db\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Estimated cardinality should be well within 2 of the actual cardinality. (Arbitrary...)\n\texpCardinality = 300\n\tif got, exp := math.Abs(float64(cardinality)-float64(expCardinality)), 2.0; got > exp {\n\t\tt.Errorf(\"got measurement cardinality %v, expected upto %v; difference is larger than expected %v\", cardinality, expCardinality, exp)\n\t}\n}\n\nfunc TestStore_Cardinality_Compactions_Inmem(t *testing.T) {\n\tt.Parallel()\n\n\tstore := NewStore()\n\tstore.EngineOptions.Config.Index = \"inmem\"\n\tstore.EngineOptions.Config.MaxSeriesPerDatabase = 0\n\tif err := store.Open(); err != nil {\n\t\tpanic(err)\n\t}\n\tdefer store.Close()\n\ttestStoreCardinalityCompactions(t, store)\n}\n\nfunc TestStore_Cardinality_Compactions_TSI1(t *testing.T) {\n\tt.Parallel()\n\n\tstore := NewStore()\n\tstore.EngineOptions.Config.Index = \"tsi1\"\n\tstore.EngineOptions.Config.MaxSeriesPerDatabase = 0\n\tif err := store.Open(); err != nil {\n\t\tpanic(err)\n\t}\n\tdefer store.Close()\n\ttestStoreCardinalityCompactions(t, store)\n}\n\nfunc TestStore_TagValues(t *testing.T) {\n\tt.Parallel()\n\n\t// No WHERE - just get for keys host and shard\n\tRHSAll := &influxql.ParenExpr{\n\t\tExpr: &influxql.BinaryExpr{\n\t\t\tOp: influxql.OR,\n\t\t\tLHS: &influxql.BinaryExpr{\n\t\t\t\tOp:  influxql.EQ,\n\t\t\t\tLHS: &influxql.VarRef{Val: \"_tagKey\"},\n\t\t\t\tRHS: &influxql.StringLiteral{Val: \"host\"},\n\t\t\t},\n\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\tOp:  influxql.EQ,\n\t\t\t\tLHS: &influxql.VarRef{Val: \"_tagKey\"},\n\t\t\t\tRHS: &influxql.StringLiteral{Val: \"shard\"},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Get for host and shard, but also WHERE on foo = a\n\tRHSWhere := &influxql.ParenExpr{\n\t\tExpr: &influxql.BinaryExpr{\n\t\t\tOp: influxql.AND,\n\t\t\tLHS: &influxql.ParenExpr{\n\t\t\t\tExpr: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"foo\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"a\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRHS: RHSAll,\n\t\t},\n\t}\n\n\t// SHOW TAG VALUES FROM /cpu\\d/ WITH KEY IN (\"host\", \"shard\")\n\t//\n\t// Switching out RHS for RHSWhere would make the query:\n\t//    SHOW TAG VALUES FROM /cpu\\d/ WITH KEY IN (\"host\", \"shard\") WHERE foo = 'a'\n\tbase := influxql.BinaryExpr{\n\t\tOp: influxql.AND,\n\t\tLHS: &influxql.ParenExpr{\n\t\t\tExpr: &influxql.BinaryExpr{\n\t\t\t\tOp:  influxql.EQREGEX,\n\t\t\t\tLHS: &influxql.VarRef{Val: \"_name\"},\n\t\t\t\tRHS: &influxql.RegexLiteral{Val: regexp.MustCompile(`cpu\\d`)},\n\t\t\t},\n\t\t},\n\t\tRHS: RHSAll,\n\t}\n\n\tvar baseWhere *influxql.BinaryExpr = influxql.CloneExpr(&base).(*influxql.BinaryExpr)\n\tbaseWhere.RHS = RHSWhere\n\n\texamples := []struct {\n\t\tName string\n\t\tExpr influxql.Expr\n\t\tExp  []tsdb.TagValues\n\t}{\n\t\t{\n\t\t\tName: \"No WHERE clause\",\n\t\t\tExpr: &base,\n\t\t\tExp: []tsdb.TagValues{\n\t\t\t\tcreateTagValues(\"cpu0\", map[string][]string{\"host\": {\"nofoo\", \"tv0\", \"tv1\", \"tv2\", \"tv3\"}, \"shard\": {\"s0\", \"s1\", \"s2\"}}),\n\t\t\t\tcreateTagValues(\"cpu1\", map[string][]string{\"host\": {\"nofoo\", \"tv0\", \"tv1\", \"tv2\", \"tv3\"}, \"shard\": {\"s0\", \"s1\", \"s2\"}}),\n\t\t\t\tcreateTagValues(\"cpu2\", map[string][]string{\"host\": {\"nofoo\", \"tv0\", \"tv1\", \"tv2\", \"tv3\"}, \"shard\": {\"s0\", \"s1\", \"s2\"}}),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"With WHERE clause\",\n\t\t\tExpr: baseWhere,\n\t\t\tExp: []tsdb.TagValues{\n\t\t\t\tcreateTagValues(\"cpu0\", map[string][]string{\"host\": {\"tv0\", \"tv1\", \"tv2\", \"tv3\"}, \"shard\": {\"s0\", \"s1\", \"s2\"}}),\n\t\t\t\tcreateTagValues(\"cpu1\", map[string][]string{\"host\": {\"tv0\", \"tv1\", \"tv2\", \"tv3\"}, \"shard\": {\"s0\", \"s1\", \"s2\"}}),\n\t\t\t\tcreateTagValues(\"cpu2\", map[string][]string{\"host\": {\"tv0\", \"tv1\", \"tv2\", \"tv3\"}, \"shard\": {\"s0\", \"s1\", \"s2\"}}),\n\t\t\t},\n\t\t},\n\t}\n\n\tvar s *Store\n\tsetup := func(index string) {\n\t\ts = MustOpenStore()\n\t\ts.EngineOptions.IndexVersion = index\n\n\t\tfmtStr := `cpu%[1]d,foo=a,ignoreme=nope,host=tv%[2]d,shard=s%[3]d value=1 %[4]d\n\t\tcpu%[1]d,host=nofoo value=1 %[4]d\n\tmem,host=nothanks value=1 %[4]d\n\t`\n\t\tgenPoints := func(sid int) []string {\n\t\t\tvar ts int\n\t\t\tpoints := make([]string, 0, 3*4)\n\t\t\tfor m := 0; m < 3; m++ {\n\t\t\t\tfor tagvid := 0; tagvid < 4; tagvid++ {\n\t\t\t\t\tpoints = append(points, fmt.Sprintf(fmtStr, m, tagvid, sid, ts))\n\t\t\t\t\tts++\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn points\n\t\t}\n\n\t\t// Create data across 3 shards.\n\t\tfor i := 0; i < 3; i++ {\n\t\t\ts.MustCreateShardWithData(\"db0\", \"rp0\", i, genPoints(i)...)\n\t\t}\n\t}\n\n\tindexes := []string{\"inmem\", \"tsi1\"}\n\tfor _, example := range examples {\n\t\tfor _, index := range indexes {\n\t\t\tsetup(index)\n\t\t\tt.Run(example.Name+\"_\"+index, func(t *testing.T) {\n\t\t\t\tgot, err := s.TagValues(\"db0\", example.Expr)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t\texp := example.Exp\n\n\t\t\t\tif !reflect.DeepEqual(got, exp) {\n\t\t\t\t\tt.Fatalf(\"got:\\n%#v\\n\\nexp:\\n%#v\", got, exp)\n\t\t\t\t}\n\t\t\t})\n\t\t\ts.Close()\n\t\t}\n\t}\n}\n\n// Helper to create some tag values\nfunc createTagValues(mname string, kvs map[string][]string) tsdb.TagValues {\n\tvar sz int\n\tfor _, v := range kvs {\n\t\tsz += len(v)\n\t}\n\n\tout := tsdb.TagValues{\n\t\tMeasurement: mname,\n\t\tValues:      make([]tsdb.KeyValue, 0, sz),\n\t}\n\n\tfor tk, tvs := range kvs {\n\t\tfor _, tv := range tvs {\n\t\t\tout.Values = append(out.Values, tsdb.KeyValue{Key: tk, Value: tv})\n\t\t}\n\t\t// We have to sort the KeyValues since that's how they're provided from\n\t\t// the tsdb.Store.\n\t\tsort.Sort(tsdb.KeyValues(out.Values))\n\t}\n\n\treturn out\n}\n\nfunc benchmarkStoreSeriesCardinality(b *testing.B, store *Store, n int) {\n\t// Write a point to n shards.\n\tfor shardID := 0; shardID < n; shardID++ {\n\t\tif err := store.CreateShard(\"db\", \"rp\", uint64(shardID), true); err != nil {\n\t\t\tb.Fatalf(\"create shard: %s\", err)\n\t\t}\n\n\t\terr := store.WriteToShard(uint64(shardID), []models.Point{models.MustNewPoint(\"cpu\", nil, map[string]interface{}{\"value\": 1.0}, time.Now())})\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"write: %s\", err)\n\t\t}\n\t}\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, _ = store.SeriesCardinality(\"db\")\n\t}\n}\n\nfunc BenchmarkStore_SeriesCardinality_100_Shards_Inmem(b *testing.B) {\n\tstore := NewStore()\n\tstore.EngineOptions.Config.Index = \"inmem\"\n\tif err := store.Open(); err != nil {\n\t\tpanic(err)\n\t}\n\tdefer store.Close()\n\tbenchmarkStoreSeriesCardinality(b, store, 100)\n}\n\nfunc BenchmarkStore_SeriesCardinality_100_Shards_TSI(b *testing.B) {\n\tstore := NewStore()\n\tstore.EngineOptions.Config.Index = \"tsi1\"\n\tif err := store.Open(); err != nil {\n\t\tpanic(err)\n\t}\n\tdefer store.Close()\n\tbenchmarkStoreSeriesCardinality(b, store, 100)\n}\n\nfunc BenchmarkStoreOpen_200KSeries_100Shards(b *testing.B) { benchmarkStoreOpen(b, 64, 5, 5, 1, 100) }\n\nfunc benchmarkStoreOpen(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt, shardCnt int) {\n\tvar path string\n\tif err := func() error {\n\t\tstore := MustOpenStore()\n\t\tdefer store.Store.Close()\n\t\tpath = store.Path()\n\n\t\t// Generate test series (measurements + unique tag sets).\n\t\tseries := genTestSeries(mCnt, tkCnt, tvCnt)\n\n\t\t// Generate point data to write to the shards.\n\t\tpoints := []models.Point{}\n\t\tfor _, s := range series {\n\t\t\tfor val := 0.0; val < float64(pntCnt); val++ {\n\t\t\t\tp := models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{\"value\": val}, time.Now())\n\t\t\t\tpoints = append(points, p)\n\t\t\t}\n\t\t}\n\n\t\t// Create requested number of shards in the store & write points.\n\t\tfor shardID := 0; shardID < shardCnt; shardID++ {\n\t\t\tif err := store.CreateShard(\"mydb\", \"myrp\", uint64(shardID), true); err != nil {\n\t\t\t\treturn fmt.Errorf(\"create shard: %s\", err)\n\t\t\t}\n\t\t\tif err := store.BatchWrite(shardID, points); err != nil {\n\t\t\t\treturn fmt.Errorf(\"batch write: %s\", err)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}(); err != nil {\n\t\tb.Fatal(err)\n\t}\n\tdefer os.RemoveAll(path)\n\n\t// Run the benchmark loop.\n\tb.ResetTimer()\n\tfor n := 0; n < b.N; n++ {\n\t\tstore := tsdb.NewStore(path)\n\t\tif err := store.Open(); err != nil {\n\t\t\tb.Fatalf(\"open store error: %s\", err)\n\t\t}\n\n\t\tb.StopTimer()\n\t\tstore.Close()\n\t\tb.StartTimer()\n\t}\n}\n\n// To store result of benchmark (ensure allocated on heap).\nvar tvResult []tsdb.TagValues\n\nfunc BenchmarkStore_TagValues(b *testing.B) {\n\tbenchmarks := []struct {\n\t\tname         string\n\t\tshards       int\n\t\tmeasurements int\n\t\ttagValues    int\n\t}{\n\t\t{name: \"s=1_m=1_v=100\", shards: 1, measurements: 1, tagValues: 100},\n\t\t{name: \"s=1_m=1_v=1000\", shards: 1, measurements: 1, tagValues: 1000},\n\t\t{name: \"s=1_m=10_v=100\", shards: 1, measurements: 10, tagValues: 100},\n\t\t{name: \"s=1_m=10_v=1000\", shards: 1, measurements: 10, tagValues: 1000},\n\t\t{name: \"s=1_m=100_v=100\", shards: 1, measurements: 100, tagValues: 100},\n\t\t{name: \"s=1_m=100_v=1000\", shards: 1, measurements: 100, tagValues: 1000},\n\t\t{name: \"s=10_m=1_v=100\", shards: 10, measurements: 1, tagValues: 100},\n\t\t{name: \"s=10_m=1_v=1000\", shards: 10, measurements: 1, tagValues: 1000},\n\t\t{name: \"s=10_m=10_v=100\", shards: 10, measurements: 10, tagValues: 100},\n\t\t{name: \"s=10_m=10_v=1000\", shards: 10, measurements: 10, tagValues: 1000},\n\t\t{name: \"s=10_m=100_v=100\", shards: 10, measurements: 100, tagValues: 100},\n\t\t{name: \"s=10_m=100_v=1000\", shards: 10, measurements: 100, tagValues: 1000},\n\t}\n\n\tvar s *Store\n\tsetup := func(shards, measurements, tagValues int, index string, useRandom bool) {\n\t\ts = NewStore()\n\t\ts.EngineOptions.IndexVersion = index\n\t\tif err := s.Open(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tfmtStr := `cpu%[1]d,host=tv%[2]d,shard=s%[3]d,z1=s%[1]d%[2]d,z2=%[4]s value=1 %[5]d`\n\t\t// genPoints generates some point data. If ran is true then random tag\n\t\t// key values will be generated, meaning more work sorting and merging.\n\t\t// If ran is false, then the same set of points will be produced for the\n\t\t// same set of parameters, meaning more de-duplication of points will be\n\t\t// needed.\n\t\tgenPoints := func(sid int, ran bool) []string {\n\t\t\tvar v, ts int\n\t\t\tvar half string\n\t\t\tpoints := make([]string, 0, measurements*tagValues)\n\t\t\tfor m := 0; m < measurements; m++ {\n\t\t\t\tfor tagvid := 0; tagvid < tagValues; tagvid++ {\n\t\t\t\t\tv = tagvid\n\t\t\t\t\tif ran {\n\t\t\t\t\t\tv = rand.Intn(100000)\n\t\t\t\t\t}\n\t\t\t\t\thalf = fmt.Sprint(rand.Intn(2) == 0)\n\t\t\t\t\tpoints = append(points, fmt.Sprintf(fmtStr, m, v, sid, half, ts))\n\t\t\t\t\tts++\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn points\n\t\t}\n\n\t\t// Create data across chosen number of shards.\n\t\tfor i := 0; i < shards; i++ {\n\t\t\ts.MustCreateShardWithData(\"db0\", \"rp0\", i, genPoints(i, useRandom)...)\n\t\t}\n\t}\n\n\tteardown := func() {\n\t\tif err := s.Close(); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n\n\t// SHOW TAG VALUES WITH KEY IN (\"host\", \"shard\")\n\tcond1 := &influxql.ParenExpr{\n\t\tExpr: &influxql.BinaryExpr{\n\t\t\tOp: influxql.OR,\n\t\t\tLHS: &influxql.BinaryExpr{\n\t\t\t\tOp:  influxql.EQ,\n\t\t\t\tLHS: &influxql.VarRef{Val: \"_tagKey\"},\n\t\t\t\tRHS: &influxql.StringLiteral{Val: \"host\"},\n\t\t\t},\n\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\tOp:  influxql.EQ,\n\t\t\t\tLHS: &influxql.VarRef{Val: \"_tagKey\"},\n\t\t\t\tRHS: &influxql.StringLiteral{Val: \"shard\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tcond2 := &influxql.ParenExpr{\n\t\tExpr: &influxql.BinaryExpr{\n\t\t\tOp: influxql.AND,\n\t\t\tLHS: &influxql.ParenExpr{\n\t\t\t\tExpr: &influxql.BinaryExpr{\n\t\t\t\t\tOp:  influxql.EQ,\n\t\t\t\t\tLHS: &influxql.VarRef{Val: \"z2\"},\n\t\t\t\t\tRHS: &influxql.StringLiteral{Val: \"true\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRHS: cond1,\n\t\t},\n\t}\n\n\tvar err error\n\tfor _, index := range []string{\"inmem\", \"tsi1\"} {\n\t\tfor useRand := 0; useRand < 2; useRand++ {\n\t\t\tfor c, condition := range []influxql.Expr{cond1, cond2} {\n\t\t\t\tfor _, bm := range benchmarks {\n\t\t\t\t\tsetup(bm.shards, bm.measurements, bm.tagValues, index, useRand == 1)\n\t\t\t\t\tcnd := \"Unfiltered\"\n\t\t\t\t\tif c == 0 {\n\t\t\t\t\t\tcnd = \"Filtered\"\n\t\t\t\t\t}\n\t\t\t\t\tb.Run(\"random_values=\"+fmt.Sprint(useRand == 1)+\"_index=\"+index+\"_\"+cnd+\"_\"+bm.name, func(b *testing.B) {\n\t\t\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\t\t\tif tvResult, err = s.TagValues(\"db0\", condition); err != nil {\n\t\t\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\t\t\t\t\tteardown()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Store is a test wrapper for tsdb.Store.\ntype Store struct {\n\t*tsdb.Store\n}\n\n// NewStore returns a new instance of Store with a temporary path.\nfunc NewStore() *Store {\n\tpath, err := ioutil.TempDir(\"\", \"influxdb-tsdb-\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\ts := &Store{Store: tsdb.NewStore(path)}\n\ts.EngineOptions.Config.WALDir = filepath.Join(path, \"wal\")\n\ts.EngineOptions.Config.TraceLoggingEnabled = true\n\n\tif testing.Verbose() {\n\t\ts.WithLogger(zap.New(\n\t\t\tzap.NewTextEncoder(),\n\t\t\tzap.Output(os.Stdout),\n\t\t))\n\t}\n\treturn s\n}\n\n// MustOpenStore returns a new, open Store using the default index,\n// at a temporary path.\nfunc MustOpenStore() *Store {\n\ts := NewStore()\n\tif err := s.Open(); err != nil {\n\t\tpanic(err)\n\t}\n\treturn s\n}\n\n// Reopen closes and reopens the store as a new store.\nfunc (s *Store) Reopen() error {\n\tif err := s.Store.Close(); err != nil {\n\t\treturn err\n\t}\n\ts.Store = tsdb.NewStore(s.Path())\n\ts.EngineOptions.Config.WALDir = filepath.Join(s.Path(), \"wal\")\n\treturn s.Open()\n}\n\n// Close closes the store and removes the underlying data.\nfunc (s *Store) Close() error {\n\tdefer os.RemoveAll(s.Path())\n\treturn s.Store.Close()\n}\n\n// MustCreateShardWithData creates a shard and writes line protocol data to it.\nfunc (s *Store) MustCreateShardWithData(db, rp string, shardID int, data ...string) {\n\tif err := s.CreateShard(db, rp, uint64(shardID), true); err != nil {\n\t\tpanic(err)\n\t}\n\ts.MustWriteToShardString(shardID, data...)\n}\n\n// MustWriteToShardString parses the line protocol (with second precision) and\n// inserts the resulting points into a shard. Panic on error.\nfunc (s *Store) MustWriteToShardString(shardID int, data ...string) {\n\tvar points []models.Point\n\tfor i := range data {\n\t\ta, err := models.ParsePointsWithPrecision([]byte(strings.TrimSpace(data[i])), time.Time{}, \"s\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tpoints = append(points, a...)\n\t}\n\n\tif err := s.WriteToShard(uint64(shardID), points); err != nil {\n\t\tpanic(err)\n\t}\n}\n\n// BatchWrite writes points to a shard in chunks.\nfunc (s *Store) BatchWrite(shardID int, points []models.Point) error {\n\tnPts := len(points)\n\tchunkSz := 10000\n\tstart := 0\n\tend := chunkSz\n\n\tfor {\n\t\tif end > nPts {\n\t\t\tend = nPts\n\t\t}\n\t\tif end-start == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tif err := s.WriteToShard(uint64(shardID), points[start:end]); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstart = end\n\t\tend += chunkSz\n\t}\n\treturn nil\n}\n\n// ParseTags returns an instance of Tags for a comma-delimited list of key/values.\nfunc ParseTags(s string) influxql.Tags {\n\tm := make(map[string]string)\n\tfor _, kv := range strings.Split(s, \",\") {\n\t\ta := strings.Split(kv, \"=\")\n\t\tm[a[0]] = a[1]\n\t}\n\treturn influxql.NewTags(m)\n}\n\nfunc dirExists(path string) bool {\n\tvar err error\n\tif _, err = os.Stat(path); err == nil {\n\t\treturn true\n\t}\n\treturn !os.IsNotExist(err)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/influxdb/uuid/uuid.go",
    "content": "// Copyright (c) 2012 The gocql Authors. All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//    * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//    * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//    * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// The uuid package can be used to generate and parse universally unique\n// identifiers, a standardized format in the form of a 128 bit number.\n//\n// http://tools.ietf.org/html/rfc4122\n\n// Package uuid provides functions to create time-based UUIDs.\npackage uuid // import \"github.com/influxdata/influxdb/uuid\"\n\nimport (\n\t\"sync/atomic\"\n\t\"time\"\n)\n\n// UUID - unique identifier type representing a 128 bit number\ntype UUID [16]byte\n\nvar timeBase = time.Date(1582, time.October, 15, 0, 0, 0, 0, time.UTC).Unix()\nvar hardwareAddr []byte\nvar clockSeq uint32\n\n// TimeUUID generates a new time based UUID (version 1) using the current\n// time as the timestamp.\nfunc TimeUUID() UUID {\n\treturn FromTime(time.Now())\n}\n\n// FromTime generates a new time based UUID (version 1) as described in\n// RFC 4122. This UUID contains the MAC address of the node that generated\n// the UUID, the given timestamp and a sequence number.\nfunc FromTime(aTime time.Time) UUID {\n\tvar u UUID\n\n\tutcTime := aTime.In(time.UTC)\n\tt := uint64(utcTime.Unix()-timeBase)*10000000 + uint64(utcTime.Nanosecond()/100)\n\tu[0], u[1], u[2], u[3] = byte(t>>24), byte(t>>16), byte(t>>8), byte(t)\n\tu[4], u[5] = byte(t>>40), byte(t>>32)\n\tu[6], u[7] = byte(t>>56)&0x0F, byte(t>>48)\n\n\tclock := atomic.AddUint32(&clockSeq, 1)\n\tu[8] = byte(clock >> 8)\n\tu[9] = byte(clock)\n\n\tcopy(u[10:], hardwareAddr)\n\n\tu[6] |= 0x10 // set version to 1 (time based uuid)\n\tu[8] &= 0x3F // clear variant\n\tu[8] |= 0x80 // set to IETF variant\n\n\treturn u\n}\n\n// String returns the UUID in it's canonical form, a 32 digit hexadecimal\n// number in the form of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.\nfunc (u UUID) String() string {\n\tvar offsets = [...]int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34}\n\tconst hexString = \"0123456789abcdef\"\n\tr := make([]byte, 36)\n\tfor i, b := range u {\n\t\tr[offsets[i]] = hexString[b>>4]\n\t\tr[offsets[i]+1] = hexString[b&0xF]\n\t}\n\tr[8] = '-'\n\tr[13] = '-'\n\tr[18] = '-'\n\tr[23] = '-'\n\treturn string(r)\n\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/.drone.sec",
    "content": "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.kK6pryC8R-O1R0Gj9ydLvQuIZlcYLGze23WdW7xbpiEEKdz6nweJrMm7ysy8lgu1tM47JVo19p2_b26bNKSQshCUOETvd7Hb2UMZOjnyUnqdyAAyoi6UkIquXfUUbHTNS0iMxwSxxW9KMp2GXNq8-o6T8xQZTDirBJFKKd8ZNUasTaoa5j8U9IfdR1aCavTBuOhvk8IVs-jSbY5TVJMJiE0IOPXois7aRJ6uAiANQBk9VKLegEcZD_qAewecXHDsHi-u0jbmg3o3PPaJaK_Qv5dsPlR2M-E2kE3AGUn0-zn5zYRngoAZ8WZr2O4GvLdltJKq9i2z7jOrdOzzRcDRow.96qvwl_E1Hj15u7Q.hWs-jQ8FsqQFD7pE9N-UEP1BWQ9rsJIcCaPvQRIp8Fukm_vvlw9YEaEq0ERLrsUWsJWpd1ca8_h8x7xD6f_d5YppwRqRHIeGIsdBOTMhNs0lG8ikkQXLat-UroCpy8EC17nuUtDE2E2Kdxrk4Cdd6Bk-dKk0Ta4w3Ud0YBKa.P8zrO7xizgv0i98eVWWzEg"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/.drone.yml",
    "content": "clone:\n  tags: true\n  path: github.com/vmware/govmomi\nbuild:\n  image: golang:1.7\n  pull: true\n  environment:\n    - GOVC_TEST_URL=$$GOVC_TEST_URL\n    - GOVC_INSECURE=1\n    - VCA=1\n  commands:\n    - make all install\n    - git clone https://github.com/sstephenson/bats.git /tmp/bats\n    - /tmp/bats/install.sh /usr/local\n    - apt-get -qq update && apt-get install -yqq uuid-runtime bsdmainutils jq\n    - govc/test/images/update.sh\n    - bats govc/test\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/.gitignore",
    "content": "secrets.yml\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/.mailmap",
    "content": "Amit Bathla <abathla@.vmware.com> <abathla@promb-1s-dhcp216.eng.vmware.com>\nBruce Downs <bruceadowns@gmail.com> <bdowns@vmware.com>\nBruce Downs <bruceadowns@gmail.com> <bruce.downs@jivesoftware.com>\nClint Greenwood <cgreenwood@vmware.com> <clint.greenwood@gmail.com>\nCédric Blomart <cblomart@gmail.com> <cedric.blomart@minfin.fed.be>\nCédric Blomart <cblomart@gmail.com> cedric <cblomart@gmail.com>\nDavid Stark <dave@davidstark.name> <david.stark@bskyb.com>\nEric Gray <egray@vmware.com> <ericgray@users.noreply.github.com>\nEric Yutao <eric.yutao@gmail.com> eric <eric.yutao@gmail.com>\nHenrik Hodne <henrik@travis-ci.com> <henrik@hodne.io>\nJeremy Canady <jcanady@jackhenry.com> <jcanady@gmail.com>\nPieter Noordhuis <pnoordhuis@vmware.com> <pcnoordhuis@gmail.com>\nTakaaki Furukawa <takaaki.frkw@gmail.com> takaaki.furukawa <takaaki.furukawa@mail.rakuten.com>\nTakaaki Furukawa <takaaki.frkw@gmail.com> tkak <takaaki.frkw@gmail.com>\nVadim Egorov <vegorov@vmware.com> <egorovv@gmail.com>\nZach Tucker <ztucker@vmware.com> <jzt@users.noreply.github.com>\nZee Yang <zeey@vmware.com> <zee.yang@gmail.com>\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/.travis.yml",
    "content": "sudo: false\n\nlanguage: go\n\ngo:\n   - 1.7\n\nbefore_install:\n  - make vendor\n\nscript:\n  - make check test\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/CHANGELOG.md",
    "content": "# changelog\n\n### 0.15.0 (2017-06-19)\n\n* WaitOptions.MaxWaitSeconds is now optional\n\n* Support removal of ExtraConfig entries\n\n* GuestPosixFileAttributes OwnerId and GroupId fields are now pointers,\n  rather than omitempty ints to allow chown with root uid:gid\n\n* Updated examples/ using view package\n\n* Add DatastoreFile.TailFunc method\n\n* Export VirtualMachine.FindSnapshot method\n\n* Add AuthorizationManager {Enable,Disable}Methods\n\n* Add PBM client\n\n### 0.14.0 (2017-04-08)\n\n* Add view.ContainerView type and methods\n\n* Add Collector.RetrieveWithFilter method\n\n* Add property.Filter type\n\n* Implement EthernetCardBackingInfo for OpaqueNetwork\n\n* Finder: support changing object root in find mode\n\n* Add VirtualDiskManager.QueryVirtualDiskInfo\n\n* Add performance.Manager APIs\n\n### 0.13.0 (2017-03-02)\n\n* Add DatastoreFileManager API wrapper\n\n* Add HostVsanInternalSystem API wrappers\n\n* Add Container support to view package\n\n* Finder supports Folder recursion without specifying a path\n\n* Add VirtualMachine.QueryConfigTarget method\n\n* Add device option to VirtualMachine.WaitForNetIP\n\n* Remove _Task suffix from vapp methods\n\n### 0.12.1 (2016-12-19)\n\n* Add DiagnosticLog helper\n\n* Add DatastorePath helper\n\n### 0.12.0 (2016-12-01)\n\n* Disable use of service ticket for datastore HTTP access by default\n\n* Attach context to HTTP requests for cancellations\n\n* Update to vim25/6.5 API\n\n### 0.11.4 (2016-11-15)\n\n* Add object.AuthorizationManager methods: RetrieveRolePermissions, RetrieveAllPermissions, AddRole, RemoveRole, UpdateRole\n\n### 0.11.3 (2016-11-08)\n\n* Allow DatastoreFile.Follow reader to drain current body after stopping\n\n### 0.11.2 (2016-11-01)\n\n* Avoid possible NPE in VirtualMachine.Device method\n\n* Add support for OpaqueNetwork type to Finder\n\n* Add HostConfigManager.AccountManager support for ESX 5.5\n\n### 0.11.1 (2016-10-27)\n\n* Add Finder.ResourcePoolListAll method\n\n### 0.11.0 (2016-10-25)\n\n* Add object.DistributedVirtualPortgroup.Reconfigure method\n\n### 0.10.0 (2016-10-20)\n\n* Add option to set soap.Client.UserAgent\n\n* Add service ticket thumbprint validation\n\n* Update use of http.DefaultTransport fields to 1.7\n\n* Set default locale to en_US (override with GOVMOMI_LOCALE env var)\n\n* Add object.HostCertificateInfo (types.HostCertificateManagerCertificateInfo helpers)\n\n* Add object.HostCertificateManager type and HostConfigManager.CertificateManager method\n\n* Add soap.Client SetRootCAs and SetDialTLS methods\n\n### 0.9.0 (2016-09-09)\n\n* Add object.DatastoreFile helpers for streaming and tailing datastore files\n\n* Add object VirtualMachine.Unregister method\n\n* Add object.ListView methods: Add, Remove, Reset\n\n* Update to Go 1.7 - using stdlib's context package\n\n### 0.8.0 (2016-06-30)\n\n* Add session.Manager.AcquireLocalTicket\n\n* Include StoragePod in Finder.FolderList\n\n* Add Finder methods for finding by ManagedObjectReference: Element, ObjectReference\n\n* Add mo.ManagedObjectReference methods: Reference, String, FromString\n\n* Add support using SessionManagerGenericServiceTicket.HostName for Datastore HTTP access\n\n### 0.7.1 (2016-06-03)\n\n* Fix object.ObjectName method\n\n### 0.7.0 (2016-06-02)\n\n* Move InventoryPath field to object.Common\n\n* Add HostDatastoreSystem.CreateLocalDatastore method\n\n* Add DatastoreNamespaceManager methods: CreateDirectory, DeleteDirectory\n\n* Add HostServiceSystem\n\n* Add HostStorageSystem methods: MarkAsSdd, MarkAsNonSdd, MarkAsLocal, MarkAsNonLocal\n\n* Add HostStorageSystem.RescanAllHba method\n\n### 0.6.2 (2016-05-11)\n\n* Get complete file details in Datastore.Stat\n\n* SOAP decoding fixes\n\n* Add VirtualMachine.RemoveAllSnapshot\n\n### 0.6.1 (2016-04-30)\n\n* Fix mo.Entity interface\n\n### 0.6.0 (2016-04-29)\n\n* Add Common.Rename method\n\n* Add mo.Entity interface\n\n* Add OptionManager\n\n* Add Finder.FolderList method\n\n* Add VirtualMachine.WaitForNetIP method\n\n* Add VirtualMachine.RevertToSnapshot method\n\n* Add Datastore.Download method\n\n### 0.5.0 (2016-03-30)\n\nGenerated fields using xsd type 'int' change to Go type 'int32'\n\nVirtualDevice.UnitNumber field changed to pointer type\n\n### 0.4.0 (2016-02-26)\n\n* Add method to convert virtual device list to array with virtual device\n  changes that can be used in the VirtualMachineConfigSpec.\n\n* Make datastore cluster traversable in lister\n\n* Add finder.DatastoreCluster methods (also known as storage pods)\n\n* Add Drone CI check\n\n* Add object.Datastore Type and AttachedClusterHosts methods\n\n* Add finder.*OrDefault methods\n\n### 0.3.0 (2016-01-16)\n\n* Add object.VirtualNicManager wrapper\n\n* Add object.HostVsanSystem wrapper\n\n* Add object.HostSystem methods: EnterMaintenanceMode, ExitMaintenanceMode, Disconnect, Reconnect\n\n* Add finder.Folder method\n\n* Add object.Common.Destroy method\n\n* Add object.ComputeResource.Reconfigure method\n\n* Add license.AssignmentManager wrapper\n\n* Add object.HostFirewallSystem wrapper\n\n* Add object.DiagnosticManager wrapper\n\n* Add LoginExtensionByCertificate support\n\n* Add object.ExtensionManager\n\n...\n\n### 0.2.0 (2015-09-15)\n\n* Update to vim25/6.0 API\n\n* Stop returning children from `ManagedObjectList`\n\n    Change the `ManagedObjectList` function in the `find` package to only\n    return the managed objects specified by the path argument and not their\n    children. The original behavior was used by govc's `ls` command and is\n    now available in the newly added function `ManagedObjectListChildren`.\n\n* Add retry functionality to vim25 package\n\n* Change finder functions to no longer take varargs\n\n    The `find` package had functions to return a list of objects, given a\n    variable number of patterns. This makes it impossible to distinguish which\n    patterns produced results and which ones didn't.\n\n    In particular for govc, where multiple arguments can be passed from the\n    command line, it is useful to let the user know which ones produce results\n    and which ones don't.\n\n    To evaluate multiple patterns, the user should call the find functions\n    multiple times (either serially or in parallel).\n\n* Make optional boolean fields pointers (`vim25/types`).\n\n    False is the zero value of a boolean field, which means they are not serialized\n    if the field is marked \"omitempty\". If the field is a pointer instead, the zero\n    value will be the nil pointer, and both true and false values are serialized.\n\n### 0.1.0 (2015-03-17)\n\nPrior to this version the API of this library was in flux.\n\nNotable changes w.r.t. the state of this library before March 2015 are:\n\n* All functions that may execute a request take a `context.Context` parameter.\n* The `vim25` package contains a minimal client implementation.\n* The property collector and its convenience functions live in the `property` package.\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/CONTRIBUTING.md",
    "content": "# Contributing to govmomi\n\n## Getting started\n\nFirst, fork the repository on GitHub to your personal account.\n\nNote that _GOPATH_ can be any directory, the example below uses _$HOME/govmomi_.\nChange _$USER_ below to your github username if they are not the same.\n\n``` shell\nexport GOPATH=$HOME/govmomi\ngo get github.com/vmware/govmomi\ncd $GOPATH/src/github.com/vmware/govmomi\ngit config push.default nothing # anything to avoid pushing to vmware/govmomi by default\ngit remote rename origin vmware\ngit remote add $USER git@github.com:$USER/govmomi.git\ngit fetch $USER\n```\n\n## Installing from source\n\nCompile the govmomi libraries and install govc using:\n\n``` shell\ngo install -v github.com/vmware/govmomi/govc\n```\n\nNote that **govc/build.sh** is only used for building release binaries.\n\n## Contribution flow\n\nThis is a rough outline of what a contributor's workflow looks like:\n\n- Create a topic branch from where you want to base your work.\n- Make commits of logical units.\n- Make sure your commit messages are in the proper format (see below).\n- Update CHANGELOG.md and/or govc/CHANGELOG.md when appropriate.\n- Push your changes to a topic branch in your fork of the repository.\n- Submit a pull request to vmware/govmomi.\n\nExample:\n\n``` shell\ngit checkout -b my-new-feature vmware/master\ngit commit -a\ngit push $USER my-new-feature\n```\n\n### Stay in sync with upstream\n\nWhen your branch gets out of sync with the vmware/master branch, use the following to update:\n\n``` shell\ngit checkout my-new-feature\ngit fetch -a\ngit rebase vmware/master\ngit push --force-with-lease $USER my-new-feature\n```\n\n### Updating pull requests\n\nIf your PR fails to pass CI or needs changes based on code review, you'll most likely want to squash these changes into\nexisting commits.\n\nIf your pull request contains a single commit or your changes are related to the most recent commit, you can simply\namend the commit.\n\n``` shell\ngit add .\ngit commit --amend\ngit push --force-with-lease $USER my-new-feature\n```\n\nIf you need to squash changes into an earlier commit, you can use:\n\n``` shell\ngit add .\ngit commit --fixup <commit>\ngit rebase -i --autosquash vmware/master\ngit push --force-with-lease $USER my-new-feature\n```\n\nBe sure to add a comment to the PR indicating your new changes are ready to review, as github does not generate a\nnotification when you git push.\n\n### Code style\n\nThe coding style suggested by the Golang community is used in govmomi. See the\n[style doc](https://github.com/golang/go/wiki/CodeReviewComments) for details.\n\nTry to limit column width to 120 characters for both code and markdown documents such as this one.\n\n### Format of the Commit Message\n\nWe follow the conventions on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/).\n\nBe sure to include any related GitHub issue references in the commit message.\n\n## Reporting Bugs and Creating Issues\n\nWhen opening a new issue, try to roughly follow the commit message format conventions above.\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/CONTRIBUTORS",
    "content": "# People who can (and typically have) contributed to this repository.\n#\n# This script is generated by contributors.sh\n#\n\nabrarshivani <abrarshivani@users.noreply.github.com>\nAlvaro Miranda <kikitux@gmail.com>\namandahla <amanda.andrade@serpro.gov.br>\nAmit Bathla <abathla@.vmware.com>\nAndrew Chin <andrew@andrewtchin.com>\naniketGslab <aniket.shinde@gslab.com>\nArran Walker <arran.walker@zopa.com>\nAryeh Weinreb <aryehweinreb@gmail.com>\nAustin Parker <aparker@apprenda.com>\nBalu Dontu <bdontu@vmware.com>\nbastienbc <bastien.barbe.creuly@gmail.com>\nBob Killen <killen.bob@gmail.com>\nBrad Fitzpatrick <bradfitz@golang.org>\nBruce Downs <bruceadowns@gmail.com>\nCédric Blomart <cblomart@gmail.com>\nChristian Höltje <docwhat@gerf.org>\nClint Greenwood <cgreenwood@vmware.com>\nDanny Lockard <danny.lockard@banno.com>\nDave Tucker <dave@dtucker.co.uk>\nDavide Agnello <dagnello@hp.com>\nDavid Stark <dave@davidstark.name>\nDoug MacEachern <dougm@vmware.com>\nEloy Coto <eloy.coto@gmail.com>\nEric Gray <egray@vmware.com>\nEric Yutao <eric.yutao@gmail.com>\nFabio Rapposelli <fabio@vmware.com>\nFaiyaz Ahmed <ahmedf@vmware.com>\nforkbomber <forkbomber@users.noreply.github.com>\nGavin Gray <gavin@infinio.com>\nGavrie Philipson <gavrie.philipson@elastifile.com>\nGeorge Hicken <ghicken@vmware.com>\nGerrit Renker <Gerrit.Renker@ctl.io>\ngthombare <gthombare@vmware.com>\nHasan Mahmood <mahmoodh@vmware.com>\nHenrik Hodne <henrik@travis-ci.com>\nIsaac Rodman <isaac@eyz.us>\nIvan Porto Carrero <icarrero@vmware.com>\nJason Kincl <jkincl@gmail.com>\nJeremy Canady <jcanady@jackhenry.com>\nLouie Jiang <jiangl@vmware.com>\nMarc Carmier <mcarmier@gmail.com>\nMevan Samaratunga <mevansam@gmail.com>\nNicolas Lamirault <nicolas.lamirault@gmail.com>\nPieter Noordhuis <pnoordhuis@vmware.com>\nrunner.mei <runner.mei@gmail.com>\nS.Çağlar Onur <conur@vmware.com>\nSergey Ignatov <sergey.ignatov@jetbrains.com>\nSteve Purcell <steve@sanityinc.com>\nTakaaki Furukawa <takaaki.frkw@gmail.com>\nTed Zlatanov <tzz@lifelogs.com>\nThibaut Ackermann <thibaut.ackermann@alcatel-lucent.com>\nVadim Egorov <vegorov@vmware.com>\nYang Yang <yangy@vmware.com>\nYuya Kusakabe <yuya.kusakabe@gmail.com>\nZach Tucker <ztucker@vmware.com>\nZee Yang <zeey@vmware.com>\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/LICENSE.txt",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/Makefile",
    "content": ".PHONY: test\n\nall: check test\n\ncheck: goimports govet\n\ngoimports:\n\t@echo checking go imports...\n\t@go get golang.org/x/tools/cmd/goimports\n\t@! goimports -d . 2>&1 | egrep -v '^$$'\n\ngovet:\n\t@echo checking go vet...\n\t@go tool vet -structtags=false -methods=false .\n\ntest:\n\tgo test -v $(TEST_OPTS) ./...\n\ninstall:\n\tgo install github.com/vmware/govmomi/govc\n\ndoc: install\n\t./govc/usage.sh > ./govc/USAGE.md\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/README.md",
    "content": "[![Build Status](https://travis-ci.org/vmware/govmomi.png?branch=master)](https://travis-ci.org/vmware/govmomi)\n[![Go Report Card](https://goreportcard.com/badge/github.com/vmware/govmomi)](https://goreportcard.com/report/github.com/vmware/govmomi)\n\n# govmomi\n\nA Go library for interacting with VMware vSphere APIs (ESXi and/or vCenter).\n\nFor `govc`, a CLI built on top of govmomi, check out the [govc](./govc) directory and [USAGE](./govc/USAGE.md) document.\n\n## Compatibility\n\nThis library is built for and tested against ESXi and vCenter 5.5, 6.0 and 6.5.\n\nIf you're able to use it against older versions of ESXi and/or vCenter, please\nleave a note and we'll include it in this compatibility list.\n\n## Documentation\n\nThe APIs exposed by this library very closely follow the API described in the [VMware vSphere API Reference Documentation][apiref].\nRefer to this document to become familiar with the upstream API.\n\nThe code in the `govmomi` package is a wrapper for the code that is generated from the vSphere API description.\nIt primarily provides convenience functions for working with the vSphere API.\nSee [godoc.org][godoc] for documentation.\n\n[apiref]:http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.wssdk.apiref.doc/right-pane.html\n[godoc]:http://godoc.org/github.com/vmware/govmomi\n[drone]:https://drone.io\n[dronesrc]:https://github.com/drone/drone\n[dronecli]:http://readme.drone.io/devs/cli/\n\n#### Building with CI\nMerges to this repository will trigger builds in both Travis and [Drone][drone].\n\nTo build locally with Drone:\n- Ensure that you have Docker 1.6 or higher installed.\n- Install the [Drone command line tools][dronecli].\n- Run `drone exec` from within the root directory of the govmomi repository.\n\n## Discussion\n\nContributors and users are encouraged to collaborate using GitHub issues and/or\n[Slack](https://vmwarecode.slack.com/messages/govmomi).\nAccess to Slack requires a [VMware {code} membership](https://code.vmware.com/join/).\n\n## Status\n\nChanges to the API are subject to [semantic versioning](http://semver.org).\n\nRefer to the [CHANGELOG](CHANGELOG.md) for version to version changes.\n\n## Projects using govmomi\n\n* [Docker Machine](https://github.com/docker/machine/tree/master/drivers/vmwarevsphere)\n\n* [Kubernetes](https://github.com/kubernetes/kubernetes/tree/master/pkg/cloudprovider/providers/vsphere)\n\n* [Terraform](https://github.com/hashicorp/terraform/tree/master/builtin/providers/vsphere)\n\n* [VMware VIC Engine](https://github.com/vmware/vic)\n\n* [Travis CI](https://github.com/travis-ci/jupiter-brain)\n\n* [collectd-vsphere](https://github.com/travis-ci/collectd-vsphere)\n\n* [Gru](https://github.com/dnaeon/gru)\n\n* [Libretto](https://github.com/apcera/libretto/tree/master/virtualmachine/vsphere)\n\n## Related projects\n\n* [rbvmomi](https://github.com/vmware/rbvmomi)\n\n* [pyvmomi](https://github.com/vmware/pyvmomi)\n\n## License\n\ngovmomi is available under the [Apache 2 license](LICENSE).\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/client.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n/*\nThis package is the root package of the govmomi library.\n\nThe library is structured as follows:\n\nPackage vim25\n\nThe minimal usable functionality is available through the vim25 package.\nIt contains subpackages that contain generated types, managed objects, and all\navailable methods. The vim25 package is entirely independent of the other\npackages in the govmomi tree -- it has no dependencies on its peers.\n\nThe vim25 package itself contains a client structure that is\npassed around throughout the entire library. It abstracts a session and its\nimmutable state. See the vim25 package for more information.\n\nPackage session\n\nThe session package contains an abstraction for the session manager that allows\na user to login and logout. It also provides access to the current session\n(i.e. to determine if the user is in fact logged in)\n\nPackage object\n\nThe object package contains wrappers for a selection of managed objects. The\nconstructors of these objects all take a *vim25.Client, which they pass along\nto derived objects, if applicable.\n\nPackage govc\n\nThe govc package contains the govc CLI. The code in this tree is not intended\nto be used as a library. Any functionality that govc contains that _could_ be\nused as a library function but isn't, _should_ live in a root level package.\n\nOther packages\n\nOther packages, such as \"event\", \"guest\", or \"license\", provide wrappers for\nthe respective subsystems. They are typically not needed in normal workflows so\nare kept outside the object package.\n*/\npackage govmomi\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"net/url\"\n\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/session\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype Client struct {\n\t*vim25.Client\n\n\tSessionManager *session.Manager\n}\n\n// NewClient creates a new client from a URL. The client authenticates with the\n// server with username/password before returning if the URL contains user information.\nfunc NewClient(ctx context.Context, u *url.URL, insecure bool) (*Client, error) {\n\tsoapClient := soap.NewClient(u, insecure)\n\tvimClient, err := vim25.NewClient(ctx, soapClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &Client{\n\t\tClient:         vimClient,\n\t\tSessionManager: session.NewManager(vimClient),\n\t}\n\n\t// Only login if the URL contains user information.\n\tif u.User != nil {\n\t\terr = c.Login(ctx, u.User)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn c, nil\n}\n\n// NewClientWithCertificate creates a new client from a URL. The client authenticates with the\n// server with the certificate before returning if the URL contains user information.\nfunc NewClientWithCertificate(ctx context.Context, u *url.URL, insecure bool, cert tls.Certificate) (*Client, error) {\n\tsoapClient := soap.NewClient(u, insecure)\n\tsoapClient.SetCertificate(cert)\n\tvimClient, err := vim25.NewClient(ctx, soapClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &Client{\n\t\tClient:         vimClient,\n\t\tSessionManager: session.NewManager(vimClient),\n\t}\n\n\tif u.User != nil {\n\t\terr = c.LoginExtensionByCertificate(ctx, u.User.Username(), \"\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn c, nil\n}\n\n// Login dispatches to the SessionManager.\nfunc (c *Client) Login(ctx context.Context, u *url.Userinfo) error {\n\treturn c.SessionManager.Login(ctx, u)\n}\n\n// Login dispatches to the SessionManager.\nfunc (c *Client) LoginExtensionByCertificate(ctx context.Context, key string, locale string) error {\n\treturn c.SessionManager.LoginExtensionByCertificate(ctx, key, locale)\n}\n\n// Logout dispatches to the SessionManager.\nfunc (c *Client) Logout(ctx context.Context) error {\n\t// Close any idle connections after logging out.\n\tdefer c.Client.CloseIdleConnections()\n\treturn c.SessionManager.Logout(ctx)\n}\n\n// PropertyCollector returns the session's default property collector.\nfunc (c *Client) PropertyCollector() *property.Collector {\n\treturn property.DefaultCollector(c.Client)\n}\n\n// RetrieveOne dispatches to the Retrieve function on the default property collector.\nfunc (c *Client) RetrieveOne(ctx context.Context, obj types.ManagedObjectReference, p []string, dst interface{}) error {\n\treturn c.PropertyCollector().RetrieveOne(ctx, obj, p, dst)\n}\n\n// Retrieve dispatches to the Retrieve function on the default property collector.\nfunc (c *Client) Retrieve(ctx context.Context, objs []types.ManagedObjectReference, p []string, dst interface{}) error {\n\treturn c.PropertyCollector().Retrieve(ctx, objs, p, dst)\n}\n\n// Wait dispatches to property.Wait.\nfunc (c *Client) Wait(ctx context.Context, obj types.ManagedObjectReference, ps []string, f func([]types.PropertyChange) bool) error {\n\treturn property.Wait(ctx, c.PropertyCollector(), obj, ps, f)\n}\n\n// IsVC returns true if we are connected to a vCenter\nfunc (c *Client) IsVC() bool {\n\treturn c.Client.IsVC()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/client_test.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage govmomi\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"testing\"\n\n\t\"github.com/vmware/govmomi/test\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\nfunc TestNewClient(t *testing.T) {\n\tu := test.URL()\n\tif u == nil {\n\t\tt.SkipNow()\n\t}\n\n\tc, err := NewClient(context.Background(), u, true)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tf := func() error {\n\t\tvar x mo.Folder\n\t\terr = mo.RetrieveProperties(context.Background(), c, c.ServiceContent.PropertyCollector, c.ServiceContent.RootFolder, &x)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(x.Name) == 0 {\n\t\t\treturn errors.New(\"empty response\")\n\t\t}\n\t\treturn nil\n\t}\n\n\t// check cookie is valid with an sdk request\n\tif err := f(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// check cookie is valid with a non-sdk request\n\tu.User = nil // turn off Basic auth\n\tu.Path = \"/folder\"\n\tr, err := c.Client.Get(u.String())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif r.StatusCode != http.StatusOK {\n\t\tt.Fatal(r)\n\t}\n\n\t// sdk request should fail w/o a valid cookie\n\tc.Client.Jar = nil\n\tif err := f(); err == nil {\n\t\tt.Fatal(\"should fail\")\n\t}\n\n\t// invalid login\n\tu.Path = \"/sdk\"\n\tu.User = url.UserPassword(\"ENOENT\", \"EINVAL\")\n\t_, err = NewClient(context.Background(), u, true)\n\tif err == nil {\n\t\tt.Fatal(\"should fail\")\n\t}\n}\n\nfunc TestInvalidSdk(t *testing.T) {\n\tu := test.URL()\n\tif u == nil {\n\t\tt.SkipNow()\n\t}\n\n\t// a URL other than a valid /sdk should error, not panic\n\tu.Path = \"/mob\"\n\t_, err := NewClient(context.Background(), u, true)\n\tif err == nil {\n\t\tt.Fatal(\"should fail\")\n\t}\n}\n\nfunc TestPropertiesN(t *testing.T) {\n\tu := test.URL()\n\tif u == nil {\n\t\tt.SkipNow()\n\t}\n\n\tc, err := NewClient(context.Background(), u, true)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar f mo.Folder\n\terr = c.RetrieveOne(context.Background(), c.ServiceContent.RootFolder, nil, &f)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar dc mo.Datacenter\n\terr = c.RetrieveOne(context.Background(), f.ChildEntity[0], nil, &dc)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar folderReferences = []types.ManagedObjectReference{\n\t\tdc.DatastoreFolder,\n\t\tdc.HostFolder,\n\t\tdc.NetworkFolder,\n\t\tdc.VmFolder,\n\t}\n\n\tvar folders []mo.Folder\n\terr = c.Retrieve(context.Background(), folderReferences, []string{\"name\"}, &folders)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif len(folders) != len(folderReferences) {\n\t\tt.Fatalf(\"Expected %d, got %d\", len(folderReferences), len(folders))\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/event/history_collector.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage event\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype HistoryCollector struct {\n\t*object.HistoryCollector\n}\n\nfunc NewHistoryCollector(c *vim25.Client, ref types.ManagedObjectReference) *HistoryCollector {\n\treturn &HistoryCollector{\n\t\tHistoryCollector: object.NewHistoryCollector(c, ref),\n\t}\n}\n\nfunc (h HistoryCollector) LatestPage(ctx context.Context) ([]types.BaseEvent, error) {\n\tvar o mo.EventHistoryCollector\n\n\terr := h.Properties(ctx, h.Reference(), []string{\"latestPage\"}, &o)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn o.LatestPage, nil\n}\n\nfunc (h HistoryCollector) ReadNextEvents(ctx context.Context, maxCount int32) ([]types.BaseEvent, error) {\n\treq := types.ReadNextEvents{\n\t\tThis:     h.Reference(),\n\t\tMaxCount: maxCount,\n\t}\n\n\tres, err := methods.ReadNextEvents(ctx, h.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (h HistoryCollector) ReadPreviousEvents(ctx context.Context, maxCount int32) ([]types.BaseEvent, error) {\n\treq := types.ReadPreviousEvents{\n\t\tThis:     h.Reference(),\n\t\tMaxCount: maxCount,\n\t}\n\n\tres, err := methods.ReadPreviousEvents(ctx, h.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/event/manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage event\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"sync\"\n\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype Manager struct {\n\tobject.Common\n\n\teventCategory   map[string]string\n\teventCategoryMu *sync.Mutex\n\tmaxObjects      int\n}\n\nfunc NewManager(c *vim25.Client) *Manager {\n\tm := Manager{\n\t\tCommon: object.NewCommon(c, *c.ServiceContent.EventManager),\n\n\t\teventCategory:   make(map[string]string),\n\t\teventCategoryMu: new(sync.Mutex),\n\t\tmaxObjects:      10,\n\t}\n\n\treturn &m\n}\n\nfunc (m Manager) CreateCollectorForEvents(ctx context.Context, filter types.EventFilterSpec) (*HistoryCollector, error) {\n\treq := types.CreateCollectorForEvents{\n\t\tThis:   m.Common.Reference(),\n\t\tFilter: filter,\n\t}\n\n\tres, err := methods.CreateCollectorForEvents(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewHistoryCollector(m.Client(), res.Returnval), nil\n}\n\nfunc (m Manager) LogUserEvent(ctx context.Context, entity types.ManagedObjectReference, msg string) error {\n\treq := types.LogUserEvent{\n\t\tThis:   m.Common.Reference(),\n\t\tEntity: entity,\n\t\tMsg:    msg,\n\t}\n\n\t_, err := methods.LogUserEvent(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (m Manager) PostEvent(ctx context.Context, eventToPost types.BaseEvent, taskInfo types.TaskInfo) error {\n\treq := types.PostEvent{\n\t\tThis:        m.Common.Reference(),\n\t\tEventToPost: eventToPost,\n\t\tTaskInfo:    &taskInfo,\n\t}\n\n\t_, err := methods.PostEvent(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (m Manager) QueryEvents(ctx context.Context, filter types.EventFilterSpec) ([]types.BaseEvent, error) {\n\treq := types.QueryEvents{\n\t\tThis:   m.Common.Reference(),\n\t\tFilter: filter,\n\t}\n\n\tres, err := methods.QueryEvents(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m Manager) RetrieveArgumentDescription(ctx context.Context, eventTypeID string) ([]types.EventArgDesc, error) {\n\treq := types.RetrieveArgumentDescription{\n\t\tThis:        m.Common.Reference(),\n\t\tEventTypeId: eventTypeID,\n\t}\n\n\tres, err := methods.RetrieveArgumentDescription(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m Manager) eventCategoryMap(ctx context.Context) (map[string]string, error) {\n\tm.eventCategoryMu.Lock()\n\tdefer m.eventCategoryMu.Unlock()\n\n\tif len(m.eventCategory) != 0 {\n\t\treturn m.eventCategory, nil\n\t}\n\n\tvar o mo.EventManager\n\n\tps := []string{\"description.eventInfo\"}\n\terr := property.DefaultCollector(m.Client()).RetrieveOne(ctx, m.Common.Reference(), ps, &o)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, info := range o.Description.EventInfo {\n\t\tm.eventCategory[info.Key] = info.Category\n\t}\n\n\treturn m.eventCategory, nil\n}\n\n// EventCategory returns the category for an event, such as \"info\" or \"error\" for example.\nfunc (m Manager) EventCategory(ctx context.Context, event types.BaseEvent) (string, error) {\n\t// Most of the event details are included in the Event.FullFormattedMessage, but the category\n\t// is only available via the EventManager description.eventInfo property.  The value of this\n\t// property is static, so we fetch and once and cache.\n\teventCategory, err := m.eventCategoryMap(ctx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tclass := reflect.TypeOf(event).Elem().Name()\n\n\treturn eventCategory[class], nil\n}\n\n// Get the events from the specified object(s) and optionanlly tail the event stream\nfunc (m Manager) Events(ctx context.Context, objects []types.ManagedObjectReference, pageSize int32, tail bool, force bool, f func(types.ManagedObjectReference, []types.BaseEvent) error) error {\n\n\tif len(objects) >= m.maxObjects && !force {\n\t\treturn fmt.Errorf(\"Maximum number of objects to monitor (%d) exceeded, refine search\", m.maxObjects)\n\t}\n\n\tproc := newEventProcessor(m, pageSize, f)\n\tfor _, o := range objects {\n\t\tproc.addObject(ctx, o)\n\t}\n\n\treturn proc.run(ctx, tail)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/event/processor.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage event\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/view\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\nconst latestPageProp = \"latestPage\"\n\ntype tailInfo struct {\n\tt         *eventTailer\n\tobj       types.ManagedObjectReference\n\tcollector types.ManagedObjectReference\n}\n\ntype eventProcessor struct {\n\tmgr      Manager\n\tpageSize int32\n\ttailers  map[types.ManagedObjectReference]*tailInfo // tailers by collector ref\n\tcallback func(types.ManagedObjectReference, []types.BaseEvent) error\n}\n\nfunc newEventProcessor(mgr Manager, pageSize int32, callback func(types.ManagedObjectReference, []types.BaseEvent) error) *eventProcessor {\n\treturn &eventProcessor{\n\t\tmgr:      mgr,\n\t\ttailers:  make(map[types.ManagedObjectReference]*tailInfo),\n\t\tcallback: callback,\n\t\tpageSize: pageSize,\n\t}\n}\n\nfunc (p *eventProcessor) addObject(ctx context.Context, obj types.ManagedObjectReference) error {\n\tfilter := types.EventFilterSpec{\n\t\tEntity: &types.EventFilterSpecByEntity{\n\t\t\tEntity:    obj,\n\t\t\tRecursion: types.EventFilterSpecRecursionOptionAll,\n\t\t},\n\t}\n\n\tcollector, err := p.mgr.CreateCollectorForEvents(ctx, filter)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"[%#v] %s\", obj, err)\n\t}\n\n\terr = collector.SetPageSize(ctx, p.pageSize)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.tailers[collector.Reference()] = &tailInfo{\n\t\tt:         newEventTailer(),\n\t\tobj:       obj,\n\t\tcollector: collector.Reference(),\n\t}\n\n\treturn nil\n}\n\nfunc (p *eventProcessor) run(ctx context.Context, tail bool) error {\n\tif len(p.tailers) == 0 {\n\t\treturn nil\n\t}\n\n\tvar err error\n\tvar collectors []types.ManagedObjectReference\n\tfor _, t := range p.tailers {\n\t\tcollectors = append(collectors, t.collector)\n\t}\n\n\tif len(p.tailers) > 1 {\n\t\t// create and populate a ListView\n\t\tviewMgr := view.NewManager(p.mgr.Client())\n\t\tvar listView *view.ListView\n\t\tlistView, err = viewMgr.CreateListView(ctx, collectors)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcount := 0\n\t\t// Retrieve the property from the objects in the ListView\n\t\terr = property.WaitForView(ctx, property.DefaultCollector(p.mgr.Client()), listView.Reference(), collectors[0], []string{latestPageProp}, func(c types.ManagedObjectReference, pc []types.PropertyChange) bool {\n\t\t\tif err = p.process(c, pc); err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tcount++\n\t\t\tif count == len(collectors) && !tail {\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\treturn false\n\t\t})\n\n\t\treturn err\n\t}\n\n\t// only one object to follow\n\terr = property.Wait(ctx, property.DefaultCollector(p.mgr.Client()), collectors[0], []string{latestPageProp}, func(pc []types.PropertyChange) bool {\n\t\tif err = p.process(collectors[0], pc); err != nil {\n\t\t\treturn false\n\t\t}\n\n\t\tif !tail {\n\t\t\treturn true\n\t\t}\n\n\t\treturn false\n\t})\n\n\treturn err\n}\n\nfunc (p *eventProcessor) process(c types.ManagedObjectReference, pc []types.PropertyChange) error {\n\tt := p.tailers[c]\n\tif t == nil {\n\t\treturn fmt.Errorf(\"unknown collector %s\", c.String())\n\t}\n\n\tfor _, u := range pc {\n\t\tif u.Name != latestPageProp {\n\t\t\tcontinue\n\t\t}\n\n\t\tevs := t.t.newEvents(u.Val.(types.ArrayOfEvent).Event)\n\t\tif len(evs) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := p.callback(t.obj, evs); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nconst invalidKey = int32(-1)\n\ntype eventTailer struct {\n\tlastKey int32\n}\n\nfunc newEventTailer() *eventTailer {\n\treturn &eventTailer{\n\t\tlastKey: invalidKey,\n\t}\n}\n\nfunc (t *eventTailer) newEvents(evs []types.BaseEvent) []types.BaseEvent {\n\tvar ret []types.BaseEvent\n\tif t.lastKey == invalidKey {\n\t\tret = evs\n\t} else {\n\t\tfound := false\n\t\tfor i := range evs {\n\t\t\tif evs[i].GetEvent().Key != t.lastKey {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfound = true\n\t\t\tret = evs[:i]\n\t\t\tbreak\n\t\t}\n\n\t\tif !found {\n\t\t\tret = evs\n\t\t}\n\t}\n\n\tif len(ret) > 0 {\n\t\tt.lastKey = ret[0].GetEvent().Key\n\t}\n\n\treturn ret\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/event/sort.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage event\n\nimport (\n\t\"sort\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\n// Sort events in ascending order base on Key\n// From the EventHistoryCollector.latestPage sdk docs:\n//   The \"oldest event\" is the one with the smallest key (event ID).\n//   The events in the returned page are unordered.\nfunc Sort(events []types.BaseEvent) {\n\tsort.Sort(baseEvent(events))\n}\n\ntype baseEvent []types.BaseEvent\n\nfunc (d baseEvent) Len() int {\n\treturn len(d)\n}\n\nfunc (d baseEvent) Less(i, j int) bool {\n\treturn d[i].GetEvent().Key < d[j].GetEvent().Key\n}\n\nfunc (d baseEvent) Swap(i, j int) {\n\td[i], d[j] = d[j], d[i]\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/examples/datastores/main.go",
    "content": "/*\nCopyright (c) 2015-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n/*\nThis example program shows how the `view` and `property` packages can\nbe used to navigate a vSphere inventory structure using govmomi.\n*/\n\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/examples\"\n\t\"github.com/vmware/govmomi/units\"\n\t\"github.com/vmware/govmomi/view\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n)\n\nfunc main() {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\t// Connect and log in to ESX or vCenter\n\tc, err := examples.NewClient(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdefer c.Logout(ctx)\n\n\t// Create a view of Datastore objects\n\tm := view.NewManager(c.Client)\n\n\tv, err := m.CreateContainerView(ctx, c.ServiceContent.RootFolder, []string{\"Datastore\"}, true)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdefer v.Destroy(ctx)\n\n\t// Retrieve summary property for all datastores\n\t// Reference: http://pubs.vmware.com/vsphere-60/topic/com.vmware.wssdk.apiref.doc/vim.Datastore.html\n\tvar dss []mo.Datastore\n\terr = v.Retrieve(ctx, []string{\"Datastore\"}, []string{\"summary\"}, &dss)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Print summary per datastore (see also: govc/datastore/info.go)\n\n\ttw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0)\n\tfmt.Fprintf(tw, \"Name:\\tType:\\tCapacity:\\tFree:\\n\")\n\n\tfor _, ds := range dss {\n\t\tfmt.Fprintf(tw, \"%s\\t\", ds.Summary.Name)\n\t\tfmt.Fprintf(tw, \"%s\\t\", ds.Summary.Type)\n\t\tfmt.Fprintf(tw, \"%s\\t\", units.ByteSize(ds.Summary.Capacity))\n\t\tfmt.Fprintf(tw, \"%s\\t\", units.ByteSize(ds.Summary.FreeSpace))\n\t\tfmt.Fprintf(tw, \"\\n\")\n\t}\n\n\t_ = tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/examples/examples.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage examples\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n)\n\n// getEnvString returns string from environment variable.\nfunc getEnvString(v string, def string) string {\n\tr := os.Getenv(v)\n\tif r == \"\" {\n\t\treturn def\n\t}\n\n\treturn r\n}\n\n// getEnvBool returns boolean from environment variable.\nfunc getEnvBool(v string, def bool) bool {\n\tr := os.Getenv(v)\n\tif r == \"\" {\n\t\treturn def\n\t}\n\n\tswitch strings.ToLower(r[0:1]) {\n\tcase \"t\", \"y\", \"1\":\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nconst (\n\tenvURL      = \"GOVMOMI_URL\"\n\tenvUserName = \"GOVMOMI_USERNAME\"\n\tenvPassword = \"GOVMOMI_PASSWORD\"\n\tenvInsecure = \"GOVMOMI_INSECURE\"\n)\n\nvar urlDescription = fmt.Sprintf(\"ESX or vCenter URL [%s]\", envURL)\nvar urlFlag = flag.String(\"url\", getEnvString(envURL, \"https://username:password@host/sdk\"), urlDescription)\n\nvar insecureDescription = fmt.Sprintf(\"Don't verify the server's certificate chain [%s]\", envInsecure)\nvar insecureFlag = flag.Bool(\"insecure\", getEnvBool(envInsecure, false), insecureDescription)\n\nfunc processOverride(u *url.URL) {\n\tenvUsername := os.Getenv(envUserName)\n\tenvPassword := os.Getenv(envPassword)\n\n\t// Override username if provided\n\tif envUsername != \"\" {\n\t\tvar password string\n\t\tvar ok bool\n\n\t\tif u.User != nil {\n\t\t\tpassword, ok = u.User.Password()\n\t\t}\n\n\t\tif ok {\n\t\t\tu.User = url.UserPassword(envUsername, password)\n\t\t} else {\n\t\t\tu.User = url.User(envUsername)\n\t\t}\n\t}\n\n\t// Override password if provided\n\tif envPassword != \"\" {\n\t\tvar username string\n\n\t\tif u.User != nil {\n\t\t\tusername = u.User.Username()\n\t\t}\n\n\t\tu.User = url.UserPassword(username, envPassword)\n\t}\n}\n\n// NewClient creates a govmomi.Client for use in the examples\nfunc NewClient(ctx context.Context) (*govmomi.Client, error) {\n\tflag.Parse()\n\n\t// Parse URL from string\n\tu, err := soap.ParseURL(*urlFlag)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Override username and/or password as required\n\tprocessOverride(u)\n\n\t// Connect and log in to ESX or vCenter\n\treturn govmomi.NewClient(ctx, u, *insecureFlag)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/examples/hosts/main.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n/*\nThis example program shows how the `view` and `property` packages can\nbe used to navigate a vSphere inventory structure using govmomi.\n*/\n\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/examples\"\n\t\"github.com/vmware/govmomi/units\"\n\t\"github.com/vmware/govmomi/view\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n)\n\nfunc main() {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\t// Connect and login to ESX or vCenter\n\tc, err := examples.NewClient(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdefer c.Logout(ctx)\n\n\t// Create a view of HostSystem objects\n\tm := view.NewManager(c.Client)\n\n\tv, err := m.CreateContainerView(ctx, c.ServiceContent.RootFolder, []string{\"HostSystem\"}, true)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdefer v.Destroy(ctx)\n\n\t// Retrieve summary property for all hosts\n\t// Reference: http://pubs.vmware.com/vsphere-60/topic/com.vmware.wssdk.apiref.doc/vim.HostSystem.html\n\tvar hss []mo.HostSystem\n\terr = v.Retrieve(ctx, []string{\"HostSystem\"}, []string{\"summary\"}, &hss)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Print summary per host (see also: govc/host/info.go)\n\n\ttw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0)\n\tfmt.Fprintf(tw, \"Name:\\tUsed CPU:\\tTotal CPU:\\tFree CPU:\\tUsed Memory:\\tTotal Memory:\\tFree Memory\\t:\\n\")\n\n\tfor _, hs := range hss {\n\t\ttotalCPU := int64(hs.Summary.Hardware.CpuMhz) * int64(hs.Summary.Hardware.NumCpuCores)\n\t\tfreeCPU := int64(totalCPU) - int64(hs.Summary.QuickStats.OverallCpuUsage)\n\t\tfreeMemory := int64(hs.Summary.Hardware.MemorySize) - (int64(hs.Summary.QuickStats.OverallMemoryUsage) * 1024 * 1024)\n\t\tfmt.Fprintf(tw, \"%s\\t\", hs.Summary.Config.Name)\n\t\tfmt.Fprintf(tw, \"%d\\t\", hs.Summary.QuickStats.OverallCpuUsage)\n\t\tfmt.Fprintf(tw, \"%d\\t\", totalCPU)\n\t\tfmt.Fprintf(tw, \"%d\\t\", freeCPU)\n\t\tfmt.Fprintf(tw, \"%s\\t\", units.ByteSize(hs.Summary.QuickStats.OverallMemoryUsage))\n\t\tfmt.Fprintf(tw, \"%s\\t\", units.ByteSize(hs.Summary.Hardware.MemorySize))\n\t\tfmt.Fprintf(tw, \"%d\\t\", freeMemory)\n\t\tfmt.Fprintf(tw, \"\\n\")\n\t}\n\n\t_ = tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/examples/networks/main.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com/vmware/govmomi/examples\"\n\t\"github.com/vmware/govmomi/view\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n)\n\nfunc main() {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\t// Connect and login to ESX or vCenter\n\tc, err := examples.NewClient(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdefer c.Logout(ctx)\n\n\t// Create a view of Network types\n\tm := view.NewManager(c.Client)\n\n\tv, err := m.CreateContainerView(ctx, c.ServiceContent.RootFolder, []string{\"Network\"}, true)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdefer v.Destroy(ctx)\n\n\t// Reference: http://pubs.vmware.com/vsphere-60/topic/com.vmware.wssdk.apiref.doc/vim.Network.html\n\tvar networks []mo.Network\n\terr = v.Retrieve(ctx, []string{\"Network\"}, nil, &networks)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor _, net := range networks {\n\t\tfmt.Printf(\"%s: %s\\n\", net.Name, net.Reference())\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/examples/virtualmachines/main.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n/*\nThis example program shows how the `view` package can\nbe used to navigate a vSphere inventory structure using govmomi.\n*/\n\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com/vmware/govmomi/examples\"\n\t\"github.com/vmware/govmomi/view\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n)\n\nfunc main() {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\t// Connect and login to ESX or vCenter\n\tc, err := examples.NewClient(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdefer c.Logout(ctx)\n\n\t// Create view of VirtualMachine objects\n\tm := view.NewManager(c.Client)\n\n\tv, err := m.CreateContainerView(ctx, c.ServiceContent.RootFolder, []string{\"VirtualMachine\"}, true)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdefer v.Destroy(ctx)\n\n\t// Retrieve summary property for all machines\n\t// Reference: http://pubs.vmware.com/vsphere-60/topic/com.vmware.wssdk.apiref.doc/vim.VirtualMachine.html\n\tvar vms []mo.VirtualMachine\n\terr = v.Retrieve(ctx, []string{\"VirtualMachine\"}, []string{\"summary\"}, &vms)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Print summary per vm (see also: govc/vm/info.go)\n\n\tfor _, vm := range vms {\n\t\tfmt.Printf(\"%s: %s\\n\", vm.Summary.Config.Name, vm.Summary.Config.GuestFullName)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/find/doc.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n/*\nPackage find implements inventory listing and searching.\n\nThe Finder is an alternative to the object.SearchIndex FindByInventoryPath() and FindChild() methods.\nSearchIndex.FindByInventoryPath requires an absolute path, whereas the Finder also supports relative paths\nand patterns via path.Match.\nSearchIndex.FindChild requires a parent to find the child, whereas the Finder also supports an ancestor via\nrecursive object traversal.\n\nThe various Finder methods accept a \"path\" argument, which can absolute or relative to the Folder for the object type.\nThe Finder supports two modes, \"list\" and \"find\".  The \"list\" mode behaves like the \"ls\" command, only searching within\nthe immediate path.  The \"find\" mode behaves like the \"find\" command, with the search starting at the immediate path but\nalso recursing into sub Folders relative to the Datacenter.  The default mode is \"list\" if the given path contains a \"/\",\notherwise \"find\" mode is used.\n\nThe exception is to use a \"...\" wildcard with a path to find all objects recursively underneath any root object.\nFor example: VirtualMachineList(\"/DC1/...\")\n\nSee also: https://github.com/vmware/govmomi/blob/master/govc/README.md#usage\n*/\npackage find\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/find/error.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage find\n\nimport \"fmt\"\n\ntype NotFoundError struct {\n\tkind string\n\tpath string\n}\n\nfunc (e *NotFoundError) Error() string {\n\treturn fmt.Sprintf(\"%s '%s' not found\", e.kind, e.path)\n}\n\ntype MultipleFoundError struct {\n\tkind string\n\tpath string\n}\n\nfunc (e *MultipleFoundError) Error() string {\n\treturn fmt.Sprintf(\"path '%s' resolves to multiple %ss\", e.path, e.kind)\n}\n\ntype DefaultNotFoundError struct {\n\tkind string\n}\n\nfunc (e *DefaultNotFoundError) Error() string {\n\treturn fmt.Sprintf(\"no default %s found\", e.kind)\n}\n\ntype DefaultMultipleFoundError struct {\n\tkind string\n}\n\nfunc (e DefaultMultipleFoundError) Error() string {\n\treturn fmt.Sprintf(\"default %s resolves to multiple instances, please specify\", e.kind)\n}\n\nfunc toDefaultError(err error) error {\n\tswitch e := err.(type) {\n\tcase *NotFoundError:\n\t\treturn &DefaultNotFoundError{e.kind}\n\tcase *MultipleFoundError:\n\t\treturn &DefaultMultipleFoundError{e.kind}\n\tdefault:\n\t\treturn err\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/find/finder.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage find\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"path\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/list\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype Finder struct {\n\tclient  *vim25.Client\n\tr       recurser\n\tdc      *object.Datacenter\n\tsi      *object.SearchIndex\n\tfolders *object.DatacenterFolders\n}\n\nfunc NewFinder(client *vim25.Client, all bool) *Finder {\n\tf := &Finder{\n\t\tclient: client,\n\t\tsi:     object.NewSearchIndex(client),\n\t\tr: recurser{\n\t\t\tCollector: property.DefaultCollector(client),\n\t\t\tAll:       all,\n\t\t},\n\t}\n\n\treturn f\n}\n\nfunc (f *Finder) SetDatacenter(dc *object.Datacenter) *Finder {\n\tf.dc = dc\n\tf.folders = nil\n\treturn f\n}\n\n// findRoot makes it possible to use \"find\" mode with a different root path.\n// Example: ResourcePoolList(\"/dc1/host/cluster1/...\")\nfunc (f *Finder) findRoot(ctx context.Context, root *list.Element, parts []string) bool {\n\tif len(parts) == 0 {\n\t\treturn false\n\t}\n\n\tix := len(parts) - 1\n\n\tif parts[ix] != \"...\" {\n\t\treturn false\n\t}\n\n\tif ix == 0 {\n\t\treturn true // We already have the Object for root.Path\n\t}\n\n\t// Lookup the Object for the new root.Path\n\trootPath := path.Join(root.Path, path.Join(parts[:ix]...))\n\n\tref, err := f.si.FindByInventoryPath(ctx, rootPath)\n\tif err != nil || ref == nil {\n\t\t// If we get an error or fail to match, fall through to find() with the original root and path\n\t\treturn false\n\t}\n\n\troot.Path = rootPath\n\troot.Object = ref\n\n\treturn true\n}\n\nfunc (f *Finder) find(ctx context.Context, arg string, s *spec) ([]list.Element, error) {\n\tisPath := strings.Contains(arg, \"/\")\n\n\troot := list.Element{\n\t\tPath:   \"/\",\n\t\tObject: object.NewRootFolder(f.client),\n\t}\n\n\tparts := list.ToParts(arg)\n\n\tif len(parts) > 0 {\n\t\tswitch parts[0] {\n\t\tcase \"..\": // Not supported; many edge case, little value\n\t\t\treturn nil, errors.New(\"cannot traverse up a tree\")\n\t\tcase \".\": // Relative to whatever\n\t\t\tpivot, err := s.Relative(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tmes, err := mo.Ancestors(ctx, f.client, f.client.ServiceContent.PropertyCollector, pivot.Reference())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tfor _, me := range mes {\n\t\t\t\t// Skip root entity in building inventory path.\n\t\t\t\tif me.Parent == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\troot.Path = path.Join(root.Path, me.Name)\n\t\t\t}\n\n\t\t\troot.Object = pivot\n\t\t\tparts = parts[1:]\n\t\t}\n\t}\n\n\tif s.listMode(isPath) {\n\t\tif f.findRoot(ctx, &root, parts) {\n\t\t\tparts = []string{\"*\"}\n\t\t} else {\n\t\t\treturn f.r.List(ctx, s, root, parts)\n\t\t}\n\t}\n\n\ts.Parents = append(s.Parents, s.Nested...)\n\n\treturn f.r.Find(ctx, s, root, parts)\n}\n\nfunc (f *Finder) datacenter() (*object.Datacenter, error) {\n\tif f.dc == nil {\n\t\treturn nil, errors.New(\"please specify a datacenter\")\n\t}\n\n\treturn f.dc, nil\n}\n\n// datacenterPath returns the absolute path to the Datacenter containing the given ref\nfunc (f *Finder) datacenterPath(ctx context.Context, ref types.ManagedObjectReference) (string, error) {\n\tmes, err := mo.Ancestors(ctx, f.client, f.client.ServiceContent.PropertyCollector, ref)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Chop leaves under the Datacenter\n\tfor i := len(mes) - 1; i > 0; i-- {\n\t\tif mes[i].Self.Type == \"Datacenter\" {\n\t\t\tbreak\n\t\t}\n\t\tmes = mes[:i]\n\t}\n\n\tvar p string\n\n\tfor _, me := range mes {\n\t\t// Skip root entity in building inventory path.\n\t\tif me.Parent == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tp = p + \"/\" + me.Name\n\t}\n\n\treturn p, nil\n}\n\nfunc (f *Finder) dcFolders(ctx context.Context) (*object.DatacenterFolders, error) {\n\tif f.folders != nil {\n\t\treturn f.folders, nil\n\t}\n\n\tdc, err := f.datacenter()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfolders, err := dc.Folders(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tf.folders = folders\n\n\treturn f.folders, nil\n}\n\nfunc (f *Finder) dcReference(_ context.Context) (object.Reference, error) {\n\tdc, err := f.datacenter()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn dc, nil\n}\n\nfunc (f *Finder) vmFolder(ctx context.Context) (object.Reference, error) {\n\tfolders, err := f.dcFolders(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn folders.VmFolder, nil\n}\n\nfunc (f *Finder) hostFolder(ctx context.Context) (object.Reference, error) {\n\tfolders, err := f.dcFolders(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn folders.HostFolder, nil\n}\n\nfunc (f *Finder) datastoreFolder(ctx context.Context) (object.Reference, error) {\n\tfolders, err := f.dcFolders(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn folders.DatastoreFolder, nil\n}\n\nfunc (f *Finder) networkFolder(ctx context.Context) (object.Reference, error) {\n\tfolders, err := f.dcFolders(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn folders.NetworkFolder, nil\n}\n\nfunc (f *Finder) rootFolder(_ context.Context) (object.Reference, error) {\n\treturn object.NewRootFolder(f.client), nil\n}\n\nfunc (f *Finder) managedObjectList(ctx context.Context, path string, tl bool, include []string) ([]list.Element, error) {\n\tfn := f.rootFolder\n\n\tif f.dc != nil {\n\t\tfn = f.dcReference\n\t}\n\n\tif len(path) == 0 {\n\t\tpath = \".\"\n\t}\n\n\ts := &spec{\n\t\tRelative: fn,\n\t\tParents:  []string{\"ComputeResource\", \"ClusterComputeResource\", \"HostSystem\", \"VirtualApp\", \"StoragePod\"},\n\t\tInclude:  include,\n\t}\n\n\tif tl {\n\t\ts.Contents = true\n\t\ts.ListMode = types.NewBool(true)\n\t}\n\n\treturn f.find(ctx, path, s)\n}\n\n// Element returns an Element for the given ManagedObjectReference\n// This method is only useful for looking up the InventoryPath of a ManagedObjectReference.\nfunc (f *Finder) Element(ctx context.Context, ref types.ManagedObjectReference) (*list.Element, error) {\n\trl := func(_ context.Context) (object.Reference, error) {\n\t\treturn ref, nil\n\t}\n\n\ts := &spec{\n\t\tRelative: rl,\n\t}\n\n\te, err := f.find(ctx, \"./\", s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(e) == 0 {\n\t\treturn nil, &NotFoundError{ref.Type, ref.Value}\n\t}\n\n\tif len(e) > 1 {\n\t\tpanic(\"ManagedObjectReference must be unique\")\n\t}\n\n\treturn &e[0], nil\n}\n\n// ObjectReference converts the given ManagedObjectReference to a type from the object package via object.NewReference\n// with the object.Common.InventoryPath field set.\nfunc (f *Finder) ObjectReference(ctx context.Context, ref types.ManagedObjectReference) (object.Reference, error) {\n\te, err := f.Element(ctx, ref)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := object.NewReference(f.client, ref)\n\n\ttype common interface {\n\t\tSetInventoryPath(string)\n\t}\n\n\tr.(common).SetInventoryPath(e.Path)\n\n\tif f.dc != nil {\n\t\tif ds, ok := r.(*object.Datastore); ok {\n\t\t\tds.DatacenterPath = f.dc.InventoryPath\n\t\t}\n\t}\n\n\treturn r, nil\n}\n\nfunc (f *Finder) ManagedObjectList(ctx context.Context, path string, include ...string) ([]list.Element, error) {\n\treturn f.managedObjectList(ctx, path, false, include)\n}\n\nfunc (f *Finder) ManagedObjectListChildren(ctx context.Context, path string, include ...string) ([]list.Element, error) {\n\treturn f.managedObjectList(ctx, path, true, include)\n}\n\nfunc (f *Finder) DatacenterList(ctx context.Context, path string) ([]*object.Datacenter, error) {\n\ts := &spec{\n\t\tRelative: f.rootFolder,\n\t\tInclude:  []string{\"Datacenter\"},\n\t}\n\n\tes, err := f.find(ctx, path, s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar dcs []*object.Datacenter\n\tfor _, e := range es {\n\t\tref := e.Object.Reference()\n\t\tif ref.Type == \"Datacenter\" {\n\t\t\tdc := object.NewDatacenter(f.client, ref)\n\t\t\tdc.InventoryPath = e.Path\n\t\t\tdcs = append(dcs, dc)\n\t\t}\n\t}\n\n\tif len(dcs) == 0 {\n\t\treturn nil, &NotFoundError{\"datacenter\", path}\n\t}\n\n\treturn dcs, nil\n}\n\nfunc (f *Finder) Datacenter(ctx context.Context, path string) (*object.Datacenter, error) {\n\tdcs, err := f.DatacenterList(ctx, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(dcs) > 1 {\n\t\treturn nil, &MultipleFoundError{\"datacenter\", path}\n\t}\n\n\treturn dcs[0], nil\n}\n\nfunc (f *Finder) DefaultDatacenter(ctx context.Context) (*object.Datacenter, error) {\n\tdc, err := f.Datacenter(ctx, \"*\")\n\tif err != nil {\n\t\treturn nil, toDefaultError(err)\n\t}\n\n\treturn dc, nil\n}\n\nfunc (f *Finder) DatacenterOrDefault(ctx context.Context, path string) (*object.Datacenter, error) {\n\tif path != \"\" {\n\t\tdc, err := f.Datacenter(ctx, path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn dc, nil\n\t}\n\n\treturn f.DefaultDatacenter(ctx)\n}\n\nfunc (f *Finder) DatastoreList(ctx context.Context, path string) ([]*object.Datastore, error) {\n\ts := &spec{\n\t\tRelative: f.datastoreFolder,\n\t\tParents:  []string{\"StoragePod\"},\n\t}\n\n\tes, err := f.find(ctx, path, s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar dss []*object.Datastore\n\tfor _, e := range es {\n\t\tref := e.Object.Reference()\n\t\tif ref.Type == \"Datastore\" {\n\t\t\tds := object.NewDatastore(f.client, ref)\n\t\t\tds.InventoryPath = e.Path\n\n\t\t\tif f.dc == nil {\n\t\t\t\t// In this case SetDatacenter was not called and path is absolute\n\t\t\t\tds.DatacenterPath, err = f.datacenterPath(ctx, ref)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tds.DatacenterPath = f.dc.InventoryPath\n\t\t\t}\n\n\t\t\tdss = append(dss, ds)\n\t\t}\n\t}\n\n\tif len(dss) == 0 {\n\t\treturn nil, &NotFoundError{\"datastore\", path}\n\t}\n\n\treturn dss, nil\n}\n\nfunc (f *Finder) Datastore(ctx context.Context, path string) (*object.Datastore, error) {\n\tdss, err := f.DatastoreList(ctx, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(dss) > 1 {\n\t\treturn nil, &MultipleFoundError{\"datastore\", path}\n\t}\n\n\treturn dss[0], nil\n}\n\nfunc (f *Finder) DefaultDatastore(ctx context.Context) (*object.Datastore, error) {\n\tds, err := f.Datastore(ctx, \"*\")\n\tif err != nil {\n\t\treturn nil, toDefaultError(err)\n\t}\n\n\treturn ds, nil\n}\n\nfunc (f *Finder) DatastoreOrDefault(ctx context.Context, path string) (*object.Datastore, error) {\n\tif path != \"\" {\n\t\tds, err := f.Datastore(ctx, path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn ds, nil\n\t}\n\n\treturn f.DefaultDatastore(ctx)\n}\n\nfunc (f *Finder) DatastoreClusterList(ctx context.Context, path string) ([]*object.StoragePod, error) {\n\ts := &spec{\n\t\tRelative: f.datastoreFolder,\n\t}\n\n\tes, err := f.find(ctx, path, s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar sps []*object.StoragePod\n\tfor _, e := range es {\n\t\tref := e.Object.Reference()\n\t\tif ref.Type == \"StoragePod\" {\n\t\t\tsp := object.NewStoragePod(f.client, ref)\n\t\t\tsp.InventoryPath = e.Path\n\t\t\tsps = append(sps, sp)\n\t\t}\n\t}\n\n\tif len(sps) == 0 {\n\t\treturn nil, &NotFoundError{\"datastore cluster\", path}\n\t}\n\n\treturn sps, nil\n}\n\nfunc (f *Finder) DatastoreCluster(ctx context.Context, path string) (*object.StoragePod, error) {\n\tsps, err := f.DatastoreClusterList(ctx, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(sps) > 1 {\n\t\treturn nil, &MultipleFoundError{\"datastore cluster\", path}\n\t}\n\n\treturn sps[0], nil\n}\n\nfunc (f *Finder) DefaultDatastoreCluster(ctx context.Context) (*object.StoragePod, error) {\n\tsp, err := f.DatastoreCluster(ctx, \"*\")\n\tif err != nil {\n\t\treturn nil, toDefaultError(err)\n\t}\n\n\treturn sp, nil\n}\n\nfunc (f *Finder) DatastoreClusterOrDefault(ctx context.Context, path string) (*object.StoragePod, error) {\n\tif path != \"\" {\n\t\tsp, err := f.DatastoreCluster(ctx, path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn sp, nil\n\t}\n\n\treturn f.DefaultDatastoreCluster(ctx)\n}\n\nfunc (f *Finder) ComputeResourceList(ctx context.Context, path string) ([]*object.ComputeResource, error) {\n\ts := &spec{\n\t\tRelative: f.hostFolder,\n\t}\n\n\tes, err := f.find(ctx, path, s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar crs []*object.ComputeResource\n\tfor _, e := range es {\n\t\tvar cr *object.ComputeResource\n\n\t\tswitch o := e.Object.(type) {\n\t\tcase mo.ComputeResource, mo.ClusterComputeResource:\n\t\t\tcr = object.NewComputeResource(f.client, o.Reference())\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\n\t\tcr.InventoryPath = e.Path\n\t\tcrs = append(crs, cr)\n\t}\n\n\tif len(crs) == 0 {\n\t\treturn nil, &NotFoundError{\"compute resource\", path}\n\t}\n\n\treturn crs, nil\n}\n\nfunc (f *Finder) ComputeResource(ctx context.Context, path string) (*object.ComputeResource, error) {\n\tcrs, err := f.ComputeResourceList(ctx, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(crs) > 1 {\n\t\treturn nil, &MultipleFoundError{\"compute resource\", path}\n\t}\n\n\treturn crs[0], nil\n}\n\nfunc (f *Finder) DefaultComputeResource(ctx context.Context) (*object.ComputeResource, error) {\n\tcr, err := f.ComputeResource(ctx, \"*\")\n\tif err != nil {\n\t\treturn nil, toDefaultError(err)\n\t}\n\n\treturn cr, nil\n}\n\nfunc (f *Finder) ComputeResourceOrDefault(ctx context.Context, path string) (*object.ComputeResource, error) {\n\tif path != \"\" {\n\t\tcr, err := f.ComputeResource(ctx, path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn cr, nil\n\t}\n\n\treturn f.DefaultComputeResource(ctx)\n}\n\nfunc (f *Finder) ClusterComputeResourceList(ctx context.Context, path string) ([]*object.ClusterComputeResource, error) {\n\ts := &spec{\n\t\tRelative: f.hostFolder,\n\t}\n\n\tes, err := f.find(ctx, path, s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ccrs []*object.ClusterComputeResource\n\tfor _, e := range es {\n\t\tvar ccr *object.ClusterComputeResource\n\n\t\tswitch o := e.Object.(type) {\n\t\tcase mo.ClusterComputeResource:\n\t\t\tccr = object.NewClusterComputeResource(f.client, o.Reference())\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\n\t\tccr.InventoryPath = e.Path\n\t\tccrs = append(ccrs, ccr)\n\t}\n\n\tif len(ccrs) == 0 {\n\t\treturn nil, &NotFoundError{\"cluster\", path}\n\t}\n\n\treturn ccrs, nil\n}\n\nfunc (f *Finder) ClusterComputeResource(ctx context.Context, path string) (*object.ClusterComputeResource, error) {\n\tccrs, err := f.ClusterComputeResourceList(ctx, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(ccrs) > 1 {\n\t\treturn nil, &MultipleFoundError{\"cluster\", path}\n\t}\n\n\treturn ccrs[0], nil\n}\n\nfunc (f *Finder) HostSystemList(ctx context.Context, path string) ([]*object.HostSystem, error) {\n\ts := &spec{\n\t\tRelative: f.hostFolder,\n\t\tParents:  []string{\"ComputeResource\", \"ClusterComputeResource\"},\n\t\tInclude:  []string{\"HostSystem\"},\n\t}\n\n\tes, err := f.find(ctx, path, s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar hss []*object.HostSystem\n\tfor _, e := range es {\n\t\tvar hs *object.HostSystem\n\n\t\tswitch o := e.Object.(type) {\n\t\tcase mo.HostSystem:\n\t\t\ths = object.NewHostSystem(f.client, o.Reference())\n\n\t\t\ths.InventoryPath = e.Path\n\t\t\thss = append(hss, hs)\n\t\tcase mo.ComputeResource, mo.ClusterComputeResource:\n\t\t\tcr := object.NewComputeResource(f.client, o.Reference())\n\n\t\t\tcr.InventoryPath = e.Path\n\n\t\t\thosts, err := cr.Hosts(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\thss = append(hss, hosts...)\n\t\t}\n\t}\n\n\tif len(hss) == 0 {\n\t\treturn nil, &NotFoundError{\"host\", path}\n\t}\n\n\treturn hss, nil\n}\n\nfunc (f *Finder) HostSystem(ctx context.Context, path string) (*object.HostSystem, error) {\n\thss, err := f.HostSystemList(ctx, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(hss) > 1 {\n\t\treturn nil, &MultipleFoundError{\"host\", path}\n\t}\n\n\treturn hss[0], nil\n}\n\nfunc (f *Finder) DefaultHostSystem(ctx context.Context) (*object.HostSystem, error) {\n\ths, err := f.HostSystem(ctx, \"*/*\")\n\tif err != nil {\n\t\treturn nil, toDefaultError(err)\n\t}\n\n\treturn hs, nil\n}\n\nfunc (f *Finder) HostSystemOrDefault(ctx context.Context, path string) (*object.HostSystem, error) {\n\tif path != \"\" {\n\t\ths, err := f.HostSystem(ctx, path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn hs, nil\n\t}\n\n\treturn f.DefaultHostSystem(ctx)\n}\n\nfunc (f *Finder) NetworkList(ctx context.Context, path string) ([]object.NetworkReference, error) {\n\ts := &spec{\n\t\tRelative: f.networkFolder,\n\t}\n\n\tes, err := f.find(ctx, path, s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ns []object.NetworkReference\n\tfor _, e := range es {\n\t\tref := e.Object.Reference()\n\t\tswitch ref.Type {\n\t\tcase \"Network\":\n\t\t\tr := object.NewNetwork(f.client, ref)\n\t\t\tr.InventoryPath = e.Path\n\t\t\tns = append(ns, r)\n\t\tcase \"OpaqueNetwork\":\n\t\t\tr := object.NewOpaqueNetwork(f.client, ref)\n\t\t\tr.InventoryPath = e.Path\n\t\t\tns = append(ns, r)\n\t\tcase \"DistributedVirtualPortgroup\":\n\t\t\tr := object.NewDistributedVirtualPortgroup(f.client, ref)\n\t\t\tr.InventoryPath = e.Path\n\t\t\tns = append(ns, r)\n\t\tcase \"DistributedVirtualSwitch\", \"VmwareDistributedVirtualSwitch\":\n\t\t\tr := object.NewDistributedVirtualSwitch(f.client, ref)\n\t\t\tr.InventoryPath = e.Path\n\t\t\tns = append(ns, r)\n\t\t}\n\t}\n\n\tif len(ns) == 0 {\n\t\treturn nil, &NotFoundError{\"network\", path}\n\t}\n\n\treturn ns, nil\n}\n\nfunc (f *Finder) Network(ctx context.Context, path string) (object.NetworkReference, error) {\n\tnetworks, err := f.NetworkList(ctx, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(networks) > 1 {\n\t\treturn nil, &MultipleFoundError{\"network\", path}\n\t}\n\n\treturn networks[0], nil\n}\n\nfunc (f *Finder) DefaultNetwork(ctx context.Context) (object.NetworkReference, error) {\n\tnetwork, err := f.Network(ctx, \"*\")\n\tif err != nil {\n\t\treturn nil, toDefaultError(err)\n\t}\n\n\treturn network, nil\n}\n\nfunc (f *Finder) NetworkOrDefault(ctx context.Context, path string) (object.NetworkReference, error) {\n\tif path != \"\" {\n\t\tnetwork, err := f.Network(ctx, path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn network, nil\n\t}\n\n\treturn f.DefaultNetwork(ctx)\n}\n\nfunc (f *Finder) ResourcePoolList(ctx context.Context, path string) ([]*object.ResourcePool, error) {\n\ts := &spec{\n\t\tRelative: f.hostFolder,\n\t\tParents:  []string{\"ComputeResource\", \"ClusterComputeResource\", \"VirtualApp\"},\n\t\tNested:   []string{\"ResourcePool\"},\n\t\tContents: true,\n\t}\n\n\tes, err := f.find(ctx, path, s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar rps []*object.ResourcePool\n\tfor _, e := range es {\n\t\tvar rp *object.ResourcePool\n\n\t\tswitch o := e.Object.(type) {\n\t\tcase mo.ResourcePool:\n\t\t\trp = object.NewResourcePool(f.client, o.Reference())\n\t\t\trp.InventoryPath = e.Path\n\t\t\trps = append(rps, rp)\n\t\t}\n\t}\n\n\tif len(rps) == 0 {\n\t\treturn nil, &NotFoundError{\"resource pool\", path}\n\t}\n\n\treturn rps, nil\n}\n\nfunc (f *Finder) ResourcePool(ctx context.Context, path string) (*object.ResourcePool, error) {\n\trps, err := f.ResourcePoolList(ctx, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(rps) > 1 {\n\t\treturn nil, &MultipleFoundError{\"resource pool\", path}\n\t}\n\n\treturn rps[0], nil\n}\n\nfunc (f *Finder) DefaultResourcePool(ctx context.Context) (*object.ResourcePool, error) {\n\trp, err := f.ResourcePool(ctx, \"*/Resources\")\n\tif err != nil {\n\t\treturn nil, toDefaultError(err)\n\t}\n\n\treturn rp, nil\n}\n\nfunc (f *Finder) ResourcePoolOrDefault(ctx context.Context, path string) (*object.ResourcePool, error) {\n\tif path != \"\" {\n\t\trp, err := f.ResourcePool(ctx, path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn rp, nil\n\t}\n\n\treturn f.DefaultResourcePool(ctx)\n}\n\n// ResourcePoolListAll combines ResourcePoolList and VirtualAppList\n// VirtualAppList is only called if ResourcePoolList does not find any pools with the given path.\nfunc (f *Finder) ResourcePoolListAll(ctx context.Context, path string) ([]*object.ResourcePool, error) {\n\tpools, err := f.ResourcePoolList(ctx, path)\n\tif err != nil {\n\t\tif _, ok := err.(*NotFoundError); !ok {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvapps, _ := f.VirtualAppList(ctx, path)\n\n\t\tif len(vapps) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, vapp := range vapps {\n\t\t\tpools = append(pools, vapp.ResourcePool)\n\t\t}\n\t}\n\n\treturn pools, nil\n}\n\nfunc (f *Finder) DefaultFolder(ctx context.Context) (*object.Folder, error) {\n\tref, err := f.vmFolder(ctx)\n\tif err != nil {\n\t\treturn nil, toDefaultError(err)\n\t}\n\tfolder := object.NewFolder(f.client, ref.Reference())\n\n\treturn folder, nil\n}\n\nfunc (f *Finder) FolderOrDefault(ctx context.Context, path string) (*object.Folder, error) {\n\tif path != \"\" {\n\t\tfolder, err := f.Folder(ctx, path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn folder, nil\n\t}\n\treturn f.DefaultFolder(ctx)\n}\n\nfunc (f *Finder) VirtualMachineList(ctx context.Context, path string) ([]*object.VirtualMachine, error) {\n\ts := &spec{\n\t\tRelative: f.vmFolder,\n\t\tParents:  []string{\"VirtualApp\"},\n\t}\n\n\tes, err := f.find(ctx, path, s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar vms []*object.VirtualMachine\n\tfor _, e := range es {\n\t\tswitch o := e.Object.(type) {\n\t\tcase mo.VirtualMachine:\n\t\t\tvm := object.NewVirtualMachine(f.client, o.Reference())\n\t\t\tvm.InventoryPath = e.Path\n\t\t\tvms = append(vms, vm)\n\t\t}\n\t}\n\n\tif len(vms) == 0 {\n\t\treturn nil, &NotFoundError{\"vm\", path}\n\t}\n\n\treturn vms, nil\n}\n\nfunc (f *Finder) VirtualMachine(ctx context.Context, path string) (*object.VirtualMachine, error) {\n\tvms, err := f.VirtualMachineList(ctx, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(vms) > 1 {\n\t\treturn nil, &MultipleFoundError{\"vm\", path}\n\t}\n\n\treturn vms[0], nil\n}\n\nfunc (f *Finder) VirtualAppList(ctx context.Context, path string) ([]*object.VirtualApp, error) {\n\ts := &spec{\n\t\tRelative: f.vmFolder,\n\t}\n\n\tes, err := f.find(ctx, path, s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar apps []*object.VirtualApp\n\tfor _, e := range es {\n\t\tswitch o := e.Object.(type) {\n\t\tcase mo.VirtualApp:\n\t\t\tapp := object.NewVirtualApp(f.client, o.Reference())\n\t\t\tapp.InventoryPath = e.Path\n\t\t\tapps = append(apps, app)\n\t\t}\n\t}\n\n\tif len(apps) == 0 {\n\t\treturn nil, &NotFoundError{\"app\", path}\n\t}\n\n\treturn apps, nil\n}\n\nfunc (f *Finder) VirtualApp(ctx context.Context, path string) (*object.VirtualApp, error) {\n\tapps, err := f.VirtualAppList(ctx, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(apps) > 1 {\n\t\treturn nil, &MultipleFoundError{\"app\", path}\n\t}\n\n\treturn apps[0], nil\n}\n\nfunc (f *Finder) FolderList(ctx context.Context, path string) ([]*object.Folder, error) {\n\tes, err := f.ManagedObjectList(ctx, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar folders []*object.Folder\n\n\tfor _, e := range es {\n\t\tswitch o := e.Object.(type) {\n\t\tcase mo.Folder, mo.StoragePod:\n\t\t\tfolder := object.NewFolder(f.client, o.Reference())\n\t\t\tfolder.InventoryPath = e.Path\n\t\t\tfolders = append(folders, folder)\n\t\tcase *object.Folder:\n\t\t\t// RootFolder\n\t\t\tfolders = append(folders, o)\n\t\t}\n\t}\n\n\tif len(folders) == 0 {\n\t\treturn nil, &NotFoundError{\"folder\", path}\n\t}\n\n\treturn folders, nil\n}\n\nfunc (f *Finder) Folder(ctx context.Context, path string) (*object.Folder, error) {\n\tfolders, err := f.FolderList(ctx, path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(folders) > 1 {\n\t\treturn nil, &MultipleFoundError{\"folder\", path}\n\t}\n\n\treturn folders[0], nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/find/recurser.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage find\n\nimport (\n\t\"context\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com/vmware/govmomi/list\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n)\n\n// spec is used to specify per-search configuration, independent of the Finder instance.\ntype spec struct {\n\t// Relative returns the root object to resolve Relative paths (starts with \".\")\n\tRelative func(ctx context.Context) (object.Reference, error)\n\n\t// ListMode can be used to optionally force \"ls\" behavior, rather than \"find\" behavior\n\tListMode *bool\n\n\t// Contents configures the Recurser to list the Contents of traversable leaf nodes.\n\t// This is typically set to true when used from the ls command, where listing\n\t// a folder means listing its Contents. This is typically set to false for\n\t// commands that take managed entities that are not folders as input.\n\tContents bool\n\n\t// Parents specifies the types which can contain the child types being searched for.\n\t// for example, when searching for a HostSystem, parent types can be\n\t// \"ComputeResource\" or \"ClusterComputeResource\".\n\tParents []string\n\n\t// Include specifies which types to be included in the results, used only in \"find\" mode.\n\tInclude []string\n\n\t// Nested should be set to types that can be Nested, used only in \"find\" mode.\n\tNested []string\n\n\t// ChildType avoids traversing into folders that can't contain the Include types, used only in \"find\" mode.\n\tChildType []string\n}\n\nfunc (s *spec) traversable(o mo.Reference) bool {\n\tref := o.Reference()\n\n\tswitch ref.Type {\n\tcase \"Datacenter\":\n\t\tif len(s.Include) == 1 && s.Include[0] == \"Datacenter\" {\n\t\t\t// No point in traversing deeper as Datacenters cannot be nested\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\tcase \"Folder\":\n\t\tif f, ok := o.(mo.Folder); ok {\n\t\t\t// TODO: Not making use of this yet, but here we can optimize when searching the entire\n\t\t\t// inventory across Datacenters for specific types, for example: 'govc ls -t VirtualMachine /**'\n\t\t\t// should not traverse into a Datacenter's host, network or datatore folders.\n\t\t\tif !s.traversableChildType(f.ChildType) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\tfor _, kind := range s.Parents {\n\t\tif kind == ref.Type {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (s *spec) traversableChildType(ctypes []string) bool {\n\tif len(s.ChildType) == 0 {\n\t\treturn true\n\t}\n\n\tfor _, t := range ctypes {\n\t\tfor _, c := range s.ChildType {\n\t\t\tif t == c {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (s *spec) wanted(e list.Element) bool {\n\tif len(s.Include) == 0 {\n\t\treturn true\n\t}\n\n\tw := e.Object.Reference().Type\n\n\tfor _, kind := range s.Include {\n\t\tif w == kind {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n// listMode is a global option to revert to the original Finder behavior,\n// disabling the newer \"find\" mode.\nvar listMode = os.Getenv(\"GOVMOMI_FINDER_LIST_MODE\") == \"true\"\n\nfunc (s *spec) listMode(isPath bool) bool {\n\tif listMode {\n\t\treturn true\n\t}\n\n\tif s.ListMode != nil {\n\t\treturn *s.ListMode\n\t}\n\n\treturn isPath\n}\n\ntype recurser struct {\n\tCollector *property.Collector\n\n\t// All configures the recurses to fetch complete objects for leaf nodes.\n\tAll bool\n}\n\nfunc (r recurser) List(ctx context.Context, s *spec, root list.Element, parts []string) ([]list.Element, error) {\n\tif len(parts) == 0 {\n\t\t// Include non-traversable leaf elements in result. For example, consider\n\t\t// the pattern \"./vm/my-vm-*\", where the pattern should match the VMs and\n\t\t// not try to traverse them.\n\t\t//\n\t\t// Include traversable leaf elements in result, if the contents\n\t\t// field is set to false.\n\t\t//\n\t\tif !s.Contents || !s.traversable(root.Object.Reference()) {\n\t\t\treturn []list.Element{root}, nil\n\t\t}\n\t}\n\n\tk := list.Lister{\n\t\tCollector: r.Collector,\n\t\tReference: root.Object.Reference(),\n\t\tPrefix:    root.Path,\n\t}\n\n\tif r.All && len(parts) < 2 {\n\t\tk.All = true\n\t}\n\n\tin, err := k.List(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// This folder is a leaf as far as the glob goes.\n\tif len(parts) == 0 {\n\t\treturn in, nil\n\t}\n\n\tpattern := parts[0]\n\tparts = parts[1:]\n\n\tvar out []list.Element\n\tfor _, e := range in {\n\t\tmatched, err := path.Match(pattern, path.Base(e.Path))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif !matched {\n\t\t\tcontinue\n\t\t}\n\n\t\tnres, err := r.List(ctx, s, e, parts)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tout = append(out, nres...)\n\t}\n\n\treturn out, nil\n}\n\nfunc (r recurser) Find(ctx context.Context, s *spec, root list.Element, parts []string) ([]list.Element, error) {\n\tvar out []list.Element\n\n\tif len(parts) > 0 {\n\t\tpattern := parts[0]\n\t\tmatched, err := path.Match(pattern, path.Base(root.Path))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif matched && s.wanted(root) {\n\t\t\tout = append(out, root)\n\t\t}\n\t}\n\n\tif !s.traversable(root.Object) {\n\t\treturn out, nil\n\t}\n\n\tk := list.Lister{\n\t\tCollector: r.Collector,\n\t\tReference: root.Object.Reference(),\n\t\tPrefix:    root.Path,\n\t}\n\n\tin, err := k.List(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, e := range in {\n\t\tnres, err := r.Find(ctx, s, e, parts)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tout = append(out, nres...)\n\t}\n\n\treturn out, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/gen/Gemfile",
    "content": "source \"https://rubygems.org\"\n\ngem \"nokogiri\"\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/gen/gen.sh",
    "content": "#!/bin/bash\n\n# Copyright (c) 2014 VMware, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -e\n\ngenerate() {\n  dst=\"$1\"\n  wsdl=\"$2\"\n  modl=\"$3\"\n\n  pkgs=(types methods)\n  if [ -n \"$modl\" ] ; then\n    pkgs+=(mo)\n  fi\n\n  for p in \"${pkgs[@]}\"\n  do\n    mkdir -p \"$dst/$p\"\n  done\n\n  echo \"generating $dst/...\"\n\n  bundle exec ruby gen_from_wsdl.rb \"$dst\" \"$wsdl\"\n  if [ -n \"$modl\" ] ; then\n    bundle exec ruby gen_from_vmodl.rb \"$dst\" \"$wsdl\" \"$modl\"\n  fi\n\n  for p in \"${pkgs[@]}\"\n  do\n    pushd \"$dst/$p\" >/dev/null\n    goimports -w ./*.go\n    go install\n    popd >/dev/null\n  done\n}\n\n# ./sdk/ contains the contents of wsdl.zip from vimbase build 5037323\n\ngenerate \"../vim25\" \"vim\" \"./rbvmomi/vmodl.db\" # from github.com/vmware/rbvmomi@f6907e6\ngenerate \"../pbm\" \"pbm\"\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/gen/gen_from_vmodl.rb",
    "content": "# Copyright (c) 2014 VMware, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n$:.unshift(File.expand_path(File.dirname(__FILE__)))\n\nrequire \"vim_wsdl\"\n\nrequire \"test/unit\"\n\ndef read(file)\n  File.open(file)\nend\n\nclass Prop\n  def initialize(vmodl, data)\n    @vmodl = vmodl\n    @data = data\n  end\n\n  def slice?\n    @data[\"is-array\"]\n  end\n\n  def optional?\n    @data[\"is-optional\"]\n  end\n\n  def name\n    @data[\"name\"]\n  end\n\n  def var_field\n    n = name\n    n[0].capitalize + n[1..-1]\n  end\n\n  def var_type_prefix(base=false)\n    if slice?\n      \"[]\"\n    else\n      if optional? && !base\n        \"*\"\n      else\n        \"\"\n      end\n    end\n  end\n\n  def var_type\n    type = @data[\"wsdl_type\"]\n    if @vmodl.managed_hash.has_key?(type)\n      type = \"ManagedObjectReference\"\n    end\n\n    # Fix up type from vmodl\n    case type\n    when \"TypeName\", \"MethodName\"\n      type = \"xsd:string\"\n    when \"ManagedObject\"\n      type = \"ManagedObjectReference\"\n    when \"xsd:anyType\"\n      type = \"AnyType\"\n    end\n\n    if type =~ /^xsd:(.*)$/\n      type = $1\n      case type\n      when \"string\"\n      when \"int\"\n        type = \"int32\"\n      when \"boolean\"\n        type =\"bool\"\n      when \"long\"\n        type =\"int64\"\n      when \"dateTime\"\n        type =\"time.Time\"\n        prefix += \"*\" if !slice? && optional?\n      when \"byte\"\n      when \"double\"\n        type =\"float64\"\n      when \"float\"\n        type =\"float32\"\n      when \"short\"\n        type =\"int16\"\n      when \"base64Binary\"\n        type =\"[]byte\"\n      else\n        raise \"unknown type: %s\" % type\n      end\n    else\n      if Peek.base?(type)\n        type = \"Base\" + type\n        base = true\n      end\n      type = \"types.\" + type\n    end\n\n    var_type_prefix(base) + type\n  end\n\n  def var_tag\n    \"mo:\\\"%s\\\"\" % name\n  end\n\n  def dump(io)\n    io.print \"%s %s `%s`\\n\" % [var_field, var_type, var_tag]\n  end\nend\n\nclass Managed\n  def initialize(vmodl, name, data)\n    @vmodl = vmodl\n    @name = name\n    @data = data\n  end\n\n  def name\n    @name\n  end\n\n  def props\n    @data[\"props\"].map do |p|\n      Prop.new(@vmodl, p)\n    end\n  end\n\n  def dump(io)\n    include_ref_getter = false\n    include_ent_getter = false\n\n    io.print \"type %s struct {\\n\" % name\n\n    case @data[\"wsdl_base\"]\n    when nil, \"ManagedObject\", \"View\"\n      include_ref_getter = true\n      io.print \"Self types.ManagedObjectReference\\n\\n\"\n    else\n      io.print \"%s\\n\\n\" % @data[\"wsdl_base\"]\n      if @data[\"wsdl_base\"] == \"ManagedEntity\"\n        include_ent_getter = true\n      end\n    end\n\n    props.each do |p|\n      p.dump(io)\n    end\n    io.print \"}\\n\\n\"\n\n    if include_ref_getter\n      io.print \"func (m %s) Reference() types.ManagedObjectReference {\\n\" % [name]\n      io.print \"return m.Self\\n\"\n      io.print \"}\\n\\n\"\n    end\n\n    if include_ent_getter\n      io.print \"func (m *%s) Entity() *ManagedEntity {\\n\" % [name]\n      io.print \"return &m.ManagedEntity\\n\"\n      io.print \"}\\n\\n\"\n    end\n  end\n\n  def dump_init(io)\n    io.print \"func init() {\\n\"\n    io.print \"t[\\\"%s\\\"] = reflect.TypeOf((*%s)(nil)).Elem()\\n\" % [name, name]\n    io.print \"}\\n\\n\"\n  end\nend\n\nclass Vmodl\n  def initialize(data)\n    @data = Marshal.load(data)\n  end\n\n  def managed_hash\n    @managed_hash ||= begin\n                        h = {}\n                        managed.each do |m|\n                          h[m.name] = m\n                        end\n                        h\n                      end\n  end\n\n  def managed\n    @data.map do |k,v|\n      next if !v.is_a?(Hash)\n      next if v[\"kind\"] != \"managed\"\n\n      Managed.new(self, k, v)\n    end.compact\n  end\nend\n\nif !File.directory?(ARGV.first)\n  raise \"first argument not a directory\"\nend\n\nwsdl = WSDL.new(WSDL.read ARGV[1]+\".wsdl\")\nwsdl.validate_assumptions!\nwsdl.peek()\n\nvmodl = Vmodl.new(read ARGV[2] || \"./rbvmomi/vmodl.db\")\n\nFile.open(File.join(ARGV.first, \"mo/mo.go\"), \"w\") do |io|\n  io.print WSDL.header(\"mo\")\n\n  vmodl.\n    managed.\n    sort_by { |m| m.name }.\n    each { |m| m.dump(io); m.dump_init(io); }\nend\n\nexit(0)\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/gen/gen_from_wsdl.rb",
    "content": "# Copyright (c) 2014 VMware, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n$:.unshift(File.expand_path(File.dirname(__FILE__)))\n\nrequire \"vim_wsdl\"\n\nif !File.directory?(ARGV.first)\n  raise \"first argument not a directory\"\nend\n\ntarget = ARGV[1]\nwsdl = WSDL.new(WSDL.read target+\".wsdl\")\nwsdl.validate_assumptions!\nwsdl.peek()\n\nifs = Peek.types.keys.select { |name| Peek.base?(name) }.size()\nputs \"%d classes, %d interfaces\" % [Peek.types.size(), ifs]\n\nFile.open(File.join(ARGV.first, \"types/enum.go\"), \"w\") do |io|\n  io.print WSDL.header(\"types\")\n\n  wsdl.\n    types.\n    sort_by { |x| x.name }.\n    uniq { |x| x.name }.\n    select { |x| x.name[0] == x.name[0].upcase }. # Only capitalized methods for now...\n    select { |t| t.is_enum? }.\n    each { |e| e.dump(io); e.dump_init(io) }\nend\n\nFile.open(File.join(ARGV.first, \"types/types.go\"), \"w\") do |io|\n  io.print WSDL.header(\"types\")\n  if target != \"vim\"\n    io.print <<EOF\nimport (\n        \"context\"\n        \"github.com/vmware/govmomi/vim25/types\"\n)\nEOF\n  end\n\n  wsdl.\n    types.\n    sort_by { |x| x.name }.\n    uniq { |x| x.name }.\n    select { |x| x.name[0] == x.name[0].upcase }. # Only capitalized methods for now...\n    select { |t| !t.is_enum? }.\n    each { |e| e.dump(io); e.dump_init(io) }\nend\n\nFile.open(File.join(ARGV.first, \"types/if.go\"), \"w\") do |io|\n  io.print WSDL.header(\"types\")\n\n  Peek.dump_interfaces(io)\nend\n\nFile.open(File.join(ARGV.first, \"methods/methods.go\"), \"w\") do |io|\n  io.print WSDL.header(\"methods\")\n  if target == \"vim\"\n    target += \"25\"\n  end\n\n  io.print <<EOF\nimport (\n        \"context\"\n        \"github.com/vmware/govmomi/#{target}/types\"\n        \"github.com/vmware/govmomi/vim25/soap\"\n)\nEOF\n\n  wsdl.\n    operations.\n    sort_by { |x| x.name }.\n    select { |x| x.name[0] == x.name[0].upcase }. # Only capitalized methods for now...\n    each { |e| e.dump(io) }\nend\n\nexit(0)\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/gen/vim_wsdl.rb",
    "content": "# Copyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire \"nokogiri\"\nrequire \"test/unit\"\n\n$namespaces = %w(vim25)\n\ndef valid_ns?(t)\n  $namespaces.include?(t)\nend\n\ndef init_type(io, name, kind)\n  t = \"reflect.TypeOf((*#{kind})(nil)).Elem()\"\n\n  io.print \"func init() {\\n\"\n\n  if $target == \"vim25\"\n    io.print \"t[\\\"#{name}\\\"] = #{t}\\n\"\n  else\n    unless name.start_with? \"Base\"\n      name = \"#{$target}:#{name}\"\n    end\n    io.print \"types.Add(\\\"#{name}\\\", #{t})\\n\"\n  end\n\n  io.print \"}\\n\\n\"\nend\n\nclass Peek\n  class Type\n    attr_accessor :parent, :children, :klass\n\n    def initialize(name)\n      @name = name\n      @children = []\n    end\n\n    def base?\n      return !children.empty?\n    end\n  end\n\n  @@types = {}\n  @@refs = {}\n  @@enums = {}\n\n  def self.types\n    return @@types\n  end\n\n  def self.refs\n    return @@refs\n  end\n\n  def self.enums\n    return @@enums\n  end\n\n  def self.ref(type)\n    refs[type] = true\n  end\n\n  def self.enum(type)\n    enums[type] = true\n  end\n\n  def self.enum?(type)\n    enums[type]\n  end\n\n  def self.register(name)\n    raise unless name\n    types[name] ||= Type.new(name)\n  end\n\n  def self.base?(name)\n    return unless c = types[name]\n    c.base?\n  end\n\n  def self.dump_interfaces(io)\n    types.keys.sort.each do |name|\n      next unless base?(name)\n      klass = types[name].klass\n      klass.dump_interface(io, name) if klass\n    end\n  end\nend\n\nclass EnumValue\n  def initialize(type, value)\n    @type = type\n    @value = value\n  end\n\n  def type_name\n    @type.name\n  end\n\n  def var_name\n    n = @type.name\n    v = var_value\n    if v == \"\"\n      n += \"Null\"\n    else\n      n += (v[0].capitalize + v[1..-1])\n    end\n\n    return n\n  end\n\n  def var_value\n    @value\n  end\n\n  def dump(io)\n    io.print \"%s = %s(\\\"%s\\\")\\n\" % [var_name, type_name, var_value]\n  end\nend\n\nclass Simple\n  include Test::Unit::Assertions\n\n  attr_accessor :name, :type\n\n  def initialize(node)\n    @node = node\n  end\n\n  def name\n    @name || @node[\"name\"]\n  end\n\n  def type\n    @type || @node[\"type\"]\n  end\n\n  def is_enum?\n    false\n  end\n\n  def dump_init(io)\n    # noop\n  end\n\n  def var_name\n    n = self.name\n    n = n[1..-1] if n[0] == \"_\" # Strip leading _\n    n = n[0].capitalize + n[1..-1] # Capitalize\n    return n\n  end\n\n  def ns(t = self.type)\n    t.split(\":\", 2)[0]\n  end\n\n  def vim_type?\n    valid_ns? ns\n  end\n\n  def vim_type(t = self.type)\n    ns, kind = t.split(\":\", 2)\n    if ! valid_ns? ns\n        raise\n    end\n    kind\n  end\n\n  def base_type?\n    vim_type? && Peek.base?(vim_type)\n  end\n\n  def enum_type?\n    vim_type? && Peek.enum?(vim_type)\n  end\n\n  def any_type?\n    self.type == \"xsd:anyType\"\n  end\n\n  def pointer_type?\n    [\"UnitNumber\", \"OwnerId\", \"GroupId\", \"MaxWaitSeconds\"].include?(var_name)\n  end\n\n  def var_type\n    t = self.type\n    prefix = \"\"\n\n    prefix += \"[]\" if slice?\n\n    if t =~ /^xsd:(.*)$/\n      t = $1\n      case t\n      when \"string\"\n      when \"int\"\n        if pointer_type?\n          prefix += \"*\"\n          self.need_omitempty = false\n        end\n        t = \"int32\"\n      when \"boolean\"\n        t = \"bool\"\n        if !slice? && optional?\n          prefix += \"*\"\n          self.need_omitempty = false\n        end\n      when \"long\"\n        t = \"int64\"\n      when \"dateTime\"\n        t = \"time.Time\"\n        if !slice? && optional?\n          prefix += \"*\"\n          self.need_omitempty = false\n        end\n      when \"anyType\"\n        pkg = \"\"\n        if $target != \"vim25\"\n          pkg = \"types.\"\n        end\n        t = \"#{pkg}AnyType\"\n        if [\"Value\"].include?(var_name)\n          self.need_omitempty = false\n        end\n      when \"byte\"\n      when \"double\"\n        t = \"float64\"\n      when \"float\"\n        t = \"float32\"\n      when \"short\"\n        t = \"int16\"\n      when \"base64Binary\"\n        t = \"[]byte\"\n      when \"anyURI\"\n        t = \"url.URL\"\n      else\n        raise \"unknown type: %s\" % t\n      end\n    else\n      pkg = \"\"\n      if $target != self.ns\n        pkg = \"types.\"\n      end\n\n      t = vim_type\n\n      if base_type?\n        prefix += \"#{pkg}Base\"\n      else\n        t = pkg + t\n        prefix += \"*\" if !slice? && !enum_type? && optional?\n      end\n    end\n\n    prefix + t\n  end\n\n  def slice?\n    test_attr(\"maxOccurs\", \"unbounded\")\n  end\n\n  def optional?\n    test_attr(\"minOccurs\", \"0\")\n  end\n\n  def need_omitempty=(v)\n    @need_omitempty = v\n  end\n\n  def need_omitempty?\n    var_type # HACK: trigger setting need_omitempty if necessary\n    if @need_omitempty.nil?\n      @need_omitempty = optional?\n    else\n      @need_omitempty\n    end\n  end\n\n  def need_typeattr?\n    base_type? || any_type?\n  end\n\n  protected\n\n  def test_attr(attr, expected)\n    actual = @node.attr(attr)\n    if actual != nil\n      case actual\n      when expected\n        true\n      else\n        raise \"%s=%s\" % [value, type.attr(value)]\n      end\n    else\n      false\n    end\n  end\nend\n\nclass Element < Simple\n  def initialize(node)\n    super(node)\n  end\n\n  def has_type?\n    !@node[\"type\"].nil?\n  end\n\n  def child\n    cs = @node.element_children\n    assert_equal 1, cs.length\n    assert_equal \"complexType\", cs.first.name\n\n    t = ComplexType.new(cs.first)\n    t.name = self.name\n    t\n  end\n\n  def dump(io)\n    if has_type?\n      io.print \"type %s %s\\n\\n\" % [name, var_type]\n    else\n      child.dump(io)\n    end\n  end\n\n  def dump_init(io)\n    if has_type?\n      init_type io, name, name\n    end\n  end\n\n  def dump_field(io)\n    tag = name\n    tag += \",omitempty\" if need_omitempty?\n    tag += \",typeattr\" if need_typeattr?\n    io.print \"%s %s `xml:\\\"%s\\\"`\\n\" % [var_name, var_type, tag]\n  end\n\n  def peek(type=nil)\n    if has_type?\n      return if self.type =~ /^xsd:/\n\n      Peek.ref(vim_type)\n    else\n      child.peek()\n    end\n  end\nend\n\nclass Attribute < Simple\n  def dump_field(io)\n    tag = name\n    tag += \",omitempty\" if need_omitempty?\n    tag += \",attr\"\n    io.print \"%s %s `xml:\\\"%s\\\"`\\n\" % [var_name, var_type, tag]\n  end\nend\n\nclass SimpleType < Simple\n  def is_enum?\n    true\n  end\n\n  def dump(io)\n    enums = @node.xpath(\".//xsd:enumeration\").map do |n|\n      EnumValue.new(self, n[\"value\"])\n    end\n\n    io.print \"type %s string\\n\\n\" % name\n    io.print \"const (\\n\"\n    enums.each { |e| e.dump(io) }\n    io.print \")\\n\\n\"\n  end\n\n  def dump_init(io)\n    init_type io, name, name\n  end\n\n  def peek\n    Peek.enum(name)\n  end\nend\n\nclass ComplexType < Simple\n  class SimpleContent < Simple\n    def dump(io)\n      attr = Attribute.new(@node.at_xpath(\".//xsd:attribute\"))\n      attr.dump_field(io)\n\n      # HACK DELUXE(PN)\n      extension = @node.at_xpath(\".//xsd:extension\")\n      type = extension[\"base\"].split(\":\", 2)[1]\n      io.print \"Value %s `xml:\\\",chardata\\\"`\\n\" % type\n    end\n\n    def peek\n    end\n  end\n\n  class ComplexContent < Simple\n    def base\n      extension = @node.at_xpath(\".//xsd:extension\")\n      assert_not_nil extension\n\n      base = extension[\"base\"]\n      assert_not_nil base\n\n      base\n    end\n\n    def dump(io)\n      Sequence.new(@node).dump(io, base)\n    end\n\n    def dump_interface(io, name)\n      Sequence.new(@node).dump_interface(io, name)\n    end\n\n    def peek\n      Sequence.new(@node).peek(vim_type(base))\n    end\n  end\n\n  class Sequence < Simple\n    def sequence\n      sequence = @node.at_xpath(\".//xsd:sequence\")\n      if sequence != nil\n        sequence.element_children.map do |n|\n          Element.new(n)\n        end\n      else\n        nil\n      end\n    end\n\n    def dump(io, base = nil)\n      return unless elements = sequence\n      if base != nil\n        kind = vim_type(base)\n\n        pkg = \"\"\n        if $target != ns(base)\n          pkg = \"types.\"\n        end\n        io.print \"#{pkg}#{kind}\\n\\n\"\n      end\n\n      elements.each do |e|\n        e.dump_field(io)\n      end\n    end\n\n    def dump_interface(io, name)\n      method = \"Get%s() *%s\" % [name, name]\n      io.print \"func (b *%s) %s { return b }\\n\" % [name, method]\n      io.print \"type Base%s interface {\\n\" % name\n      io.print \"%s\\n\" % method\n      io.print \"}\\n\\n\"\n      init_type io, \"Base#{name}\", name\n    end\n\n    def peek(base = nil)\n      return unless elements = sequence\n      name = @node.attr(\"name\")\n      return unless name\n\n      elements.each do |e|\n        e.peek(name)\n      end\n\n      c = Peek.register(name)\n      if base\n        c.parent = base\n        Peek.register(c.parent).children << name\n      end\n    end\n  end\n\n  def klass\n    @klass ||= begin\n                 cs = @node.element_children\n                 if !cs.empty?\n                   assert_equal 1, cs.length\n\n                   case cs.first.name\n                   when \"simpleContent\"\n                     SimpleContent.new(@node)\n                   when \"complexContent\"\n                     ComplexContent.new(@node)\n                   when \"sequence\"\n                     Sequence.new(@node)\n                   else\n                     raise \"don't know what to do for element: %s...\" % cs.first.name\n                   end\n                 end\n               end\n  end\n\n  def dump_init(io)\n    init_type io, name, name\n  end\n\n  def dump(io)\n    io.print \"type %s struct {\\n\" % name\n    klass.dump(io) if klass\n    io.print \"}\\n\\n\"\n  end\n\n  def peek\n    Peek.register(name).klass = klass\n    klass.peek if klass\n  end\nend\n\nclass Schema\n  include Test::Unit::Assertions\n\n  attr_accessor :namespace\n\n  def initialize(xml)\n    @xml = Nokogiri::XML.parse(xml)\n    @namespace = @xml.root.attr(\"targetNamespace\").split(\":\", 2)[1]\n    @xml\n  end\n\n  # We have some assumptions about structure, make sure they hold.\n  def validate_assumptions!\n    # Every enumeration is part of a restriction\n    @xml.xpath(\".//xsd:enumeration\").each do |n|\n      assert_equal \"restriction\", n.parent.name\n    end\n\n    # See type == enum\n    @xml.xpath(\".//xsd:restriction\").each do |n|\n      # Every restriction has type xsd:string (it's an enum)\n      assert_equal \"xsd:string\", n[\"base\"]\n\n      # Every restriction is part of a simpleType\n      assert_equal \"simpleType\", n.parent.name\n\n      # Every restriction is alone\n      assert_equal 1, n.parent.element_children.size\n    end\n\n    # See type == complex_content\n    @xml.xpath(\".//xsd:complexContent\").each do |n|\n      # complexContent is child of complexType\n      assert_equal \"complexType\", n.parent.name\n\n    end\n\n    # See type == complex_type\n    @xml.xpath(\".//xsd:complexType\").each do |n|\n      cc = n.element_children\n\n      # OK to have an empty complexType\n      next if cc.size == 0\n\n      # Require 1 element otherwise\n      assert_equal 1, cc.size\n\n      case cc.first.name\n      when \"complexContent\"\n        # complexContent has 1 \"extension\" element\n        cc = cc.first.element_children\n        assert_equal 1, cc.size\n        assert_equal \"extension\", cc.first.name\n\n        # extension has 1 \"sequence\" element\n        ec = cc.first.element_children\n        assert_equal 1, ec.size\n        assert_equal \"sequence\", ec.first.name\n\n        # sequence has N \"element\" elements\n        sc = ec.first.element_children\n        assert sc.all? { |e| e.name == \"element\" }\n      when \"simpleContent\"\n        # simpleContent has 1 \"extension\" element\n        cc = cc.first.element_children\n        assert_equal 1, cc.size\n        assert_equal \"extension\", cc.first.name\n\n        # extension has 1 or more \"attribute\" elements\n        ec = cc.first.element_children\n        assert_not_equal 0, ec.size\n        assert_equal \"attribute\", ec.first.name\n      when \"sequence\"\n        # sequence has N \"element\" elements\n        sc = cc.first.element_children\n        assert sc.all? { |e| e.name == \"element\" }\n      else\n        raise \"unknown element: %s\" % cc.first.name\n      end\n    end\n\n    imports.each do |i|\n      i.validate_assumptions!\n    end\n\n    includes.each do |i|\n      i.validate_assumptions!\n    end\n  end\n\n  def types\n    return to_enum(:types) unless block_given?\n\n    if $target != self.namespace\n      return\n    end\n\n    imports.each do |i|\n      i.types do |t|\n        yield t\n      end\n    end\n\n    includes.each do |i|\n      i.types do |t|\n        yield t\n      end\n    end\n\n    @xml.root.children.each do |n|\n      case n.class.to_s\n      when \"Nokogiri::XML::Text\"\n        next\n      when \"Nokogiri::XML::Element\"\n        case n.name\n        when \"include\", \"import\"\n          next\n        when \"element\"\n          e = Element.new(n)\n          if e.has_type? && e.vim_type?\n            if e.ns == $target\n              yield e\n            end\n          else\n            yield e\n          end\n        when \"simpleType\"\n          yield SimpleType.new(n)\n        when \"complexType\"\n          yield ComplexType.new(n)\n        else\n          raise \"unknown child: %s\" % n.name\n        end\n      else\n        raise \"unknown type: %s\" % n.class\n      end\n    end\n  end\n\n  def imports\n    @imports ||= @xml.root.xpath(\".//xmlns:import\").map do |n|\n      Schema.new(WSDL.read n[\"schemaLocation\"])\n    end\n  end\n\n  def includes\n    @includes ||= @xml.root.xpath(\".//xmlns:include\").map do |n|\n      Schema.new(WSDL.read n[\"schemaLocation\"])\n    end\n  end\nend\n\n\nclass Operation\n  include Test::Unit::Assertions\n\n  def initialize(wsdl, operation_node)\n    @wsdl = wsdl\n    @operation_node = operation_node\n  end\n\n  def name\n    @operation_node[\"name\"]\n  end\n\n  def namespace\n    type = @operation_node.at_xpath(\"./xmlns:input\").attr(\"message\")\n    keep_ns(type)\n  end\n\n  def remove_ns(x)\n    ns, x = x.split(\":\", 2)\n    if ! valid_ns? ns\n        raise\n    end\n    x\n  end\n\n  def keep_ns(x)\n    ns, x = x.split(\":\", 2)\n    if ! valid_ns? ns\n        raise\n    end\n    ns\n  end\n\n  def find_type_for(type)\n    type = remove_ns(type)\n\n    message = @wsdl.message(type)\n    assert_not_nil message\n\n    part = message.at_xpath(\"./xmlns:part\")\n    assert_not_nil message\n\n    remove_ns(part[\"element\"])\n  end\n\n  def input\n    type = @operation_node.at_xpath(\"./xmlns:input\").attr(\"message\")\n    find_type_for(type)\n  end\n\n  def go_input\n    \"types.\" + input\n  end\n\n  def output\n    type = @operation_node.at_xpath(\"./xmlns:output\").attr(\"message\")\n    find_type_for(type)\n  end\n\n  def go_output\n    \"types.\" + output\n  end\n\n  def dump(io)\n    io.print <<EOS\n  type #{name}Body struct{\n    Req *#{go_input} `xml:\"urn:#{namespace} #{input},omitempty\"`\n    Res *#{go_output} `xml:\"urn:#{namespace} #{output},omitempty\"`\n    Fault_ *soap.Fault `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n  }\n\n  func (b *#{name}Body) Fault() *soap.Fault { return b.Fault_ }\n\nEOS\n\n    io.print \"func %s(ctx context.Context, r soap.RoundTripper, req *%s) (*%s, error) {\\n\" % [name, go_input, go_output]\n    io.print <<EOS\n  var reqBody, resBody #{name}Body\n\n  reqBody.Req = req\n\n  if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n    return nil, err\n  }\n\n  return resBody.Res, nil\nEOS\n\n    io.print \"}\\n\\n\"\n  end\nend\n\nclass WSDL\n  attr_reader :xml\n\n  PATH = File.expand_path(\"../sdk\", __FILE__)\n\n  def self.read(file)\n    File.open(File.join(PATH, file))\n  end\n\n  def initialize(xml)\n    @xml = Nokogiri::XML.parse(xml)\n    $target = @xml.root[\"targetNamespace\"].split(\":\", 2)[1]\n\n    unless $namespaces.include? $target\n      $namespaces.push $target\n    end\n  end\n\n  def validate_assumptions!\n    schemas.each do |s|\n      s.validate_assumptions!\n    end\n  end\n\n  def types(&blk)\n    return to_enum(:types) unless block_given?\n\n    schemas.each do |s|\n      s.types(&blk)\n    end\n  end\n\n  def schemas\n    @schemas ||= @xml.xpath('.//xmlns:types/xsd:schema').map do |n|\n      Schema.new(n.to_xml)\n    end\n  end\n\n  def operations\n    @operations ||= @xml.xpath('.//xmlns:portType/xmlns:operation').map do |o|\n      Operation.new(self, o)\n    end\n  end\n\n  def message(type)\n    @messages ||= begin\n                    h = {}\n                    @xml.xpath('.//xmlns:message').each do |n|\n                      h[n.attr(\"name\")] = n\n                    end\n                    h\n                  end\n\n    @messages[type]\n  end\n\n  def peek\n    types.\n      sort_by { |x| x.name }.\n      uniq { |x| x.name }.\n      select { |x| x.name[0] == x.name[0].upcase }. # Only capitalized methods for now...\n      each { |e| e.peek() }\n  end\n\n  def self.header(name)\n    return <<EOF\n/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage #{name}\n\nEOF\n  end\nend\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/.gitignore",
    "content": "/govc*\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/CHANGELOG.md",
    "content": "# changelog\n\n### 0.15.0 (2017-06-19)\n\n* Remove FileAttr flags from guest.chmod command,\n  chown functionality moved to its own command\n\n* Add optional path to guest mktemp file methods\n\n* Add guest.touch and guest.mv commands\n\n* Add host.shutdown command\n\n* Add option flags to host.autostart.add\n\n* Add object.method command\n\n* Add options.{ls,set} commands\n\n### 0.14.0 (2017-04-08)\n\n* Add find command\n\n* Add '-wait' option to vm.ip to allow a non-waiting query\n\n* Add datastore.disk.info command\n\n* Add bash completion script\n\n* Add metric commands: change, ls, info, sample, reset, interval.change, interval.info\n\n### 0.13.0 (2017-03-02)\n\n* Add vm.guest.tools command\n\n* Add datastore.disk.create command\n\n* Add datastore.vsan.dom.ls and datastore.vsan.dom.rm commands\n\n* Add vm.disk.change command\n\n* Add vm.rdm attach and ls commands\n\n* Add '-n' option to vm.ip to wait for a specific NIC\n\n* Add '-annotation' option to vm.create and vm.clone commands\n\n* Add '-sync-time-with-host-' flag to vm.change command\n\n* Add object.collect command (MOB for cli + Emacs)\n\n### 0.12.1 (2016-12-19)\n\n* Add '-f' flag to logs command\n\n* Add storage support to vm.migrate\n\n* Add support for file backed serialport devices\n\n### 0.12.0 (2016-12-01)\n\n* Add optional '-host' flag to datastore download/tail commands\n\n* Support InjectOvfEnv without PowerOn when importing\n\n* Support stdin as import options source\n\n* Add basic NVME controller support\n\n### 0.11.4 (2016-11-15)\n\n* Add role create, remove, update, ls and usage commands\n\n### 0.11.3 (2016-11-08)\n\n* Add `-product-version` flag to dvs.create\n\n* datastore.tail -f will exit without error if the file no longer exists\n\n### 0.11.2 (2016-11-01)\n\n* Add object.reload command\n\n* Add ESX 5.5 support to host.account commands\n\n### 0.11.1 (2016-10-27)\n\n* Add support for VirtualApp in pool.change command\n\n### 0.11.0 (2016-10-25)\n\n* Add object.destroy and object.rename commands\n\n* Remove datacenter.destroy command (use object.destroy instead)\n\n* Remove folder.destroy command (use object.destroy instead)\n\n* Rename folder.move_into -> object.mv\n\n* Add dvs.portgroup.change command\n\n* Add vlan flag to dvs.portgroup.add command\n\n### 0.10.0 (2016-10-20)\n\n* Add generated govc/USAGE.md\n\n* Add host.date info and change commands\n\n* Add session ls and rm commands\n\n* Add `-tls-known-hosts` and `-tls-ca-certs` flags\n\n* Add host.cert commands : info, csr, import\n\n* Add about.cert command (similar to the Chrome Certificate Viewer)\n\n* Add `-vspc-proxy` flag to device.serial.connect command\n\n* Rename license.list -> license.ls, license.assigned.list -> license.assigned.ls\n\n### 0.9.0 (2016-09-09)\n\n* Add `-R` option to datastore.ls\n\n* Add datastore.tail command\n\n* Add vm.migrate command\n\n* Add govc vm.register and vm.unregister commands\n\n* Add govc vm snapshot commands: create, remove, revert, tree\n\n* Add device.usb.add command\n\n* Support stdin/stdout in datastore upload/download\n\n* Add host.portgroup.change command\n\n* Add host.portgroup.info command\n\n* Add HostNetworkPolicy to host.vswitch.info\n\n* Add `-json` support to host.vswitch.info command\n\n* Support instance uuid in SearchFlag\n\n* Add `-json` support to esxcli command\n\n* Add `-unclaimed` flag to host.storage.info command\n\n* Support Network mapping in import.{ova,ovf} commands\n\n### 0.8.0 (2016-06-30)\n\n* If username (`-u` / GOVC_USERNAME) is empty, attempt login via local ticket (Workstation)\n\n* Add StoragePod support to govc folder.create\n\n* Add `-folder` flag to datacenter.create command\n\n* Logout when session persistence is disabled\n\n* Add `-L` flag to ls command for resolving by managed object reference\n\n* Add `-i` flag to ls command for listing the managed object reference\n\n* Add vm.markasvm command\n\n* Add vm.markastemplate command\n\n### 0.7.1 (2016-06-03)\n\n* Fix datastore.{upload,download} against VirtualCenter\n\n### 0.7.0 (2016-06-02)\n\n* Add `-require` flag to version command\n\n* Add support for local type in the datastore.create command\n\n* Add `-namespace` option to datastore.mkdir and datastore.rm to create/remove namespaces on VSANs\n\n* Add host.service command\n\n* Add host.storage.mark command\n\n* Add `-rescan` option to host.storage.info command\n\n### 0.6.0 (2016-04-29)\n\n* Add folder commands: info, create, destroy, rename, moveinto\n\n* Add datastore.info command\n\n* Add `-a` and `-v4` flags to vm.ip command\n\n* Add host.account.{create,update,remove} commands\n\n* Add env command\n\n* Add vm.clone command\n\n### 0.5.0 (2016-03-30)\n\n* Add dvs.portgroup.info command\n\n* Add `-folder` flag to vm.create command\n\n* Add `-dump` flag to OutputFlag\n\n* Add `-f` flag to events command\n\n* Add `-mode` flag to vm.disk.create command\n\n* Add `-net` flag to device.info command\n\n* Add `-eager` and `-thick` options to vm.create command\n\n### 0.4.0 (2016-02-26)\n\n* Add support for placement in datastore cluster to vm.create command\n\n* Add support for creating new disks in vm.create command\n\n* Add `-p` and `-a` options to govc datastore.ls command\n\n### 0.3.0 (2016-01-16)\n\n* Add permissions.{ls,set,remove} commands\n\n* Add datastore.{create,remove} commands.\n  The new create command supports both creating NAS and VMFS datastores.\n\n* Add dvs.{create,add} and dvs.portgroup.add commands\n\n* Add host.vnic.{service,info} commands\n\n* Add cluster.{create,change,add} commands\n\n* Add host.{disconnect,reconnect,remove,maintenance.enter,maintenance.exit} commands\n\n* Add license.decode, license.assigned.list and license.assign commands\n\n* Add firewall.ruleset.find command\n\n* Add logs, logs.ls and logs.download commands\n\n* Add support for LoginExtensionByCertificate with new `-cert` and `-key` flags\n\n* Add govc extension.{info,register,unregister,setcert} commands\n\n* Add govc vapp.{info,destroy,power} commands\n\n### 0.2.0 (2015-09-15)\n\n* The `vm.power` guest `-s` and `-r` options will fallback to hard `-off` / `-reset` if tools is unavailable and `-force` flag is given\n\n* Add `PowerOn, InjectOvfEnv, WaitForIP` options to `import.ovf` and `import.ova` option spec file\n\n* Add `import.spec` to produce an example json document\n\n* Add `-options` to `import.ovf` and `import.ova`\n\n* Add `-folder` to `import.ovf` and `import.ova`\n\n* Add `fields` command to manage custom fields\n\n* Add `datastore.info` command\n\n* Add `events` command\n\n* Add `-net.address` (Hardware Address) option to `vm.change` and `vm.create`\n\n* Add `host.add` command to add host to datacenter.\n\n* Add `GOVC_USERNAME` and `GOVC_PASSWORD` to allow overriding username and/or\n  password (used when they contain special characters that prevent them from\n  being embedded in the URL).\n\n* Add `-e' (ExtraConfig) option to `vm.change` and `vm.info`\n\n* Retry twice on temporary network errors.\n\n* Add `host.autostart` commands to manage VM autostart.\n\n* Add `-persist-session` flag to control whether or not the session is\n  persisted to disk (defaults to true).\n\n### 0.1.0 (2015-03-17)\n\nPrior to this version the changes to govc's command set were not documented.\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/README.md",
    "content": "# govc\n\ngovc is a vSphere CLI built on top of govmomi.\n\n## Installation\n\nYou can find prebuilt govc binaries on the [releases page](https://github.com/vmware/govmomi/releases).\n\nDownload and install a binary locally like this:\n\n```sh\ncurl -L $URL_TO_BINARY | gunzip > /usr/local/bin/govc\nchmod +x /usr/local/bin/govc\n```\n\n### Source\n\nYou can install the latest govc version from source if you have the Go toolchain installed.\n\n```sh\ngo get -u github.com/vmware/govmomi/govc\n```\n\n(make sure `$GOPATH/bin` is in your `PATH`)\n\n## Usage\n\nFor the complete list of commands and flags, refer to the [USAGE](USAGE.md) document.\n\nCommon flags include:\n\n* `-u`: ESXi or vCenter URL (ex: `user:pass@host`)\n* `-debug`: Trace requests and responses (to `~/.govmomi/debug`)\n\nManaged entities can be referred to by their absolute path or by their relative\npath. For example, when specifying a datastore to use for a subcommand, you can\neither specify it as `/mydatacenter/datastore/mydatastore`, or as\n`mydatastore`. If you're not sure about the name of the datastore, or even the\nfull path to the datastore, you can specify a pattern to match. Both\n`/*center/*/my*` (absolute) and `my*store` (relative) will resolve to the same\ndatastore, given there are no other datastores that match those globs.\n\nThe relative path in this example can only be used if the command can\numambigously resolve a datacenter to use as origin for the query. If no\ndatacenter is specified, govc defaults to the only datacenter, if there is only\none. The datacenter itself can be specified as a pattern as well, enabling the\nfollowing arguments: `-dc='my*' -ds='*store'`. The datastore pattern is looked\nup and matched relative to the datacenter which itself is specified as a\npattern.\n\nBesides specifying managed entities as arguments, they can also be specified\nusing environment variables. The following environment variables are used by govc\nto set defaults:\n\n* `GOVC_URL`: URL of ESXi or vCenter instance to connect to.\n\n  > The URL scheme defaults to `https` and the URL path defaults to `/sdk`.\n  > This means that specifying `user:pass@host` is equivalent to\n  > `https://user:pass@host/sdk`.\n\n  > If password include special characters like `#` or `:` you can use\n  > `GOVC_USERNAME` and `GOVC_PASSWORD` to have a simple `GOVC_URL`\n\n  > When using govc against VMware Workstation, GOVC_URL can be set to \"localhost\"\n  > without a user or pass, in which case local ticket based authentication is used.\n\n* `GOVC_USERNAME`: USERNAME to use if not specified in GOVC_URL.\n\n* `GOVC_PASSWORD`: PASSWORD to use if not specified in GOVC_URL.\n\n* `GOVC_TLS_CA_CERTS`: Override system root certificate authorities.\n\n  > export GOVC_TLS_CA_CERTS=~/.govc_ca.crt\n  > Use path separator to specify multiple files:\n  > export GOVC_TLS_CA_CERTS=~/ca-certificates/bar.crt:~/ca-certificates/foo.crt\n\n* `GOVC_TLS_KNOWN_HOSTS`: File(s) for thumbprint based certificate verification.\n\n  > Thumbprint based verification can be used in addition to or as an alternative to\n  > GOVC_TLS_CA_CERTS for self-signed certificates.  Example:\n  > export GOVC_TLS_KNOWN_HOSTS=~/.govc_known_hosts\n  > govc about.cert -u host -k -thumbprint | tee -a $GOVC_TLS_KNOWN_HOSTS\n  > govc about -u user:pass@host\n\n* `GOVC_INSECURE`: Disable certificate verification.\n\n  > This option sets Go's tls.Config.InsecureSkipVerify flag and is false by default.\n  > Quoting https://golang.org/pkg/crypto/tls/#Config:\n  > > InsecureSkipVerify controls whether a client verifies the\n  > > server's certificate chain and host name.\n  > > If InsecureSkipVerify is true, TLS accepts any certificate\n  > > presented by the server and any host name in that certificate.\n  > > In this mode, TLS is susceptible to man-in-the-middle attacks.\n  > > This should be used only for testing.\n\n* `GOVC_DATACENTER`\n\n* `GOVC_DATASTORE`\n\n* `GOVC_NETWORK`\n\n* `GOVC_RESOURCE_POOL`\n\n* `GOVC_HOST`\n\n* `GOVC_GUEST_LOGIN`: Guest credentials for guest operations\n\n* `GOVC_VIM_NAMESPACE`: Vim namespace defaults to `urn:vim25`\n\n* `GOVC_VIM_VERSION`: Vim version defaults to `6.0`\n\n## Platform specific notes\n\n### MSYS2 (Windows)\n\nInventory path arguments with a leading '/' are subject\nto [Posix path conversion](http://www.mingw.org/wiki/Posix_path_conversion).\n\n## Examples\n\nSeveral examples are embedded in the govc command [help](USAGE.md)\n\n* [Upload ssh public key to a VM](examples/lib/ssh.sh)\n\n* [Create and configure a vCenter VM](examples/vcsa.sh)\n\n* [Create a CoreOS VM](https://github.com/vmware/vic/blob/master/pkg/vsphere/toolbox/toolbox-test.sh)\n\n* [Create a Debian VM](https://github.com/kubernetes/kubernetes/tree/master/cluster/vsphere)\n\n* [Create a Windows VM](https://github.com/dougm/govc-windows-box/blob/master/provision-esx.sh)\n\n* [Create an ESX VM](https://github.com/vmware/vic/blob/master/infra/machines/vcsa/create-esxi-vm.sh)\n\n* [Create a vCenter VM](https://github.com/vmware/vic/blob/master/infra/machines/vcsa/create-vcsa-vm.sh)\n\n* [Create a Cluster](https://github.com/vmware/vic/blob/master/infra/machines/vcsa/create-cluster.sh)\n\n## Status\n\nChanges to the cli are subject to [semantic versioning](http://semver.org).\n\nRefer to the [CHANGELOG](CHANGELOG.md) for version to version changes.\n\nWhen new govc commands or flags are added, the PATCH version will be incremented.  This enables you to require a minimum\nversion from within a script, for example:\n\n```\ngovc version -require 0.14\n```\n\n## Projects using govc\n\n* [Emacs govc package](./emacs)\n\n* [Kubernetes vSphere Provider](https://github.com/kubernetes/kubernetes/tree/master/cluster/vsphere)\n\n* [VMware VIC Engine](https://github.com/vmware/vic)\n\n* [vSphere Docker Volume Service](https://github.com/vmware/docker-volume-vsphere)\n\n* [golang/build](https://github.com/golang/build)\n\n## Related projects\n\n* [rvc](https://github.com/vmware/rvc)\n\n## License\n\ngovc is available under the [Apache 2 license](../LICENSE).\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/USAGE.md",
    "content": "# govc usage\n\nThis document is generated from `govc -h` and `govc $cmd -h` commands.\n\nThe following common options are filtered out in this document,\nbut appear via `govc $cmd -h`:\n\n```\n  -cert=                    Certificate [GOVC_CERTIFICATE]\n  -debug=false              Store debug logs [GOVC_DEBUG]\n  -dump=false               Enable output dump\n  -json=false               Enable JSON output\n  -k=false                  Skip verification of server certificate [GOVC_INSECURE]\n  -key=                     Private key [GOVC_PRIVATE_KEY]\n  -persist-session=true     Persist session to disk [GOVC_PERSIST_SESSION]\n  -tls-ca-certs=            TLS CA certificates file [GOVC_TLS_CA_CERTS]\n  -tls-known-hosts=         TLS known hosts file [GOVC_TLS_KNOWN_HOSTS]\n  -u=                       ESX or vCenter URL [GOVC_URL]\n  -vim-namespace=urn:vim25  Vim namespace [GOVC_VIM_NAMESPACE]\n  -vim-version=6.0          Vim version [GOVC_VIM_VERSION]\n  -dc=                      Datacenter [GOVC_DATACENTER]\n  -host.dns=                Find host by FQDN\n  -host.ip=                 Find host by IP address\n  -host.ipath=              Find host by inventory path\n  -host.uuid=               Find host by UUID\n  -vm.dns=                  Find VM by FQDN\n  -vm.ip=                   Find VM by IP address\n  -vm.ipath=                Find VM by inventory path\n  -vm.path=                 Find VM by path to .vmx file\n  -vm.uuid=                 Find VM by UUID\n```\n\n## about\n\n```\nUsage: govc about [OPTIONS]\n\nDisplay About info for HOST.\n\nSystem information including the name, type, version, and build number.\n\nExamples:\n  govc about\n  govc about -json | jq -r .About.ProductLineId\n\nOptions:\n  -l=false                  Include service content\n```\n\n## about.cert\n\n```\nUsage: govc about.cert [OPTIONS]\n\nDisplay TLS certificate info for HOST.\n\nIf the HOST certificate cannot be verified, about.cert will return with exit code 60 (as curl does).\nIf the '-k' flag is provided, about.cert will return with exit code 0 in this case.\nThe SHA1 thumbprint can also be used as '-thumbprint' for the 'host.add' and 'cluster.add' commands.\n\nExamples:\n  govc about.cert -k -json | jq -r .ThumbprintSHA1\n  govc about.cert -k -show | sudo tee /usr/local/share/ca-certificates/host.crt\n  govc about.cert -k -thumbprint | tee -a ~/.govmomi/known_hosts\n\nOptions:\n  -show=false               Show PEM encoded server certificate only\n  -thumbprint=false         Output host hash and thumbprint only\n```\n\n## cluster.add\n\n```\nUsage: govc cluster.add [OPTIONS]\n\nAdd HOST to CLUSTER.\n\nThe host is added to the cluster specified by the 'cluster' flag.\n\nExamples:\n  thumbprint=$(govc about.cert -k -u host.example.com -thumbprint | awk '{print $2}')\n  govc cluster.add -cluster ClusterA -hostname host.example.com -username root -password pass -thumbprint $thumbprint\n  govc cluster.add -cluster ClusterB -hostname 10.0.6.1 -username root -password pass -noverify\n\nOptions:\n  -cluster=*                Path to cluster\n  -connect=true             Immediately connect to host\n  -force=false              Force when host is managed by another VC\n  -hostname=                Hostname or IP address of the host\n  -license=                 Assign license key\n  -noverify=false           Accept host thumbprint without verification\n  -password=                Password of administration account on the host\n  -thumbprint=              SHA-1 thumbprint of the host's SSL certificate\n  -username=                Username of administration account on the host\n```\n\n## cluster.change\n\n```\nUsage: govc cluster.change [OPTIONS] CLUSTER...\n\nChange configuration of the given clusters.\n\nExamples:\n  govc cluster.change -drs-enabled -vsan-enabled -vsan-autoclaim ClusterA\n  govc cluster.change -drs-enabled=false ClusterB\n\nOptions:\n  -drs-enabled=<nil>        Enable DRS\n  -drs-mode=                DRS behavior for virtual machines: manual, partiallyAutomated, fullyAutomated\n  -ha-enabled=<nil>         Enable HA\n  -vsan-autoclaim=<nil>     Autoclaim storage on cluster hosts\n  -vsan-enabled=<nil>       Enable vSAN\n```\n\n## cluster.create\n\n```\nUsage: govc cluster.create [OPTIONS] CLUSTER\n\nCreate CLUSTER in datacenter.\n\nThe cluster is added to the folder specified by the 'folder' flag. If not given,\nthis defaults to the host folder in the specified or default datacenter.\n\nExamples:\n  govc cluster.create ClusterA\n  govc cluster.create -folder /dc2/test-folder ClusterB\n\nOptions:\n  -folder=                  Inventory folder [GOVC_FOLDER]\n```\n\n## datacenter.create\n\n```\nUsage: govc datacenter.create [OPTIONS] NAME...\n\nOptions:\n  -folder=                  Inventory folder [GOVC_FOLDER]\n```\n\n## datacenter.info\n\n```\nUsage: govc datacenter.info [OPTIONS] [PATH]...\n\nOptions:\n```\n\n## datastore.cp\n\n```\nUsage: govc datastore.cp [OPTIONS] SRC DST\n\nCopy SRC to DST on DATASTORE.\n\nExamples:\n  govc datastore.cp foo/foo.vmx foo/foo.vmx.old\n  govc datastore.cp -f my.vmx foo/foo.vmx\n\nOptions:\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -f=false                  If true, overwrite any identically named file at the destination\n```\n\n## datastore.create\n\n```\nUsage: govc datastore.create [OPTIONS] HOST...\n\nCreate datastore on HOST.\n\nExamples:\n  govc datastore.create -type nfs -name nfsDatastore -remote-host 10.143.2.232 -remote-path /share cluster1\n  govc datastore.create -type vmfs -name vmfsDatastore -disk=mpx.vmhba0:C0:T0:L0 cluster1\n  govc datastore.create -type local -name localDatastore -path /var/datastore host1\n\nOptions:\n  -disk=                    Canonical name of disk (VMFS only)\n  -force=false              Ignore DuplicateName error if datastore is already mounted on a host\n  -host=                    Host system [GOVC_HOST]\n  -mode=readOnly            Access mode for the mount point (readOnly|readWrite)\n  -name=                    Datastore name\n  -password=                Password to use when connecting (CIFS only)\n  -path=                    Local directory path for the datastore (local only)\n  -remote-host=             Remote hostname of the NAS datastore\n  -remote-path=             Remote path of the NFS mount point\n  -type=                    Datastore type (NFS|NFS41|CIFS|VMFS|local)\n  -username=                Username to use when connecting (CIFS only)\n```\n\n## datastore.disk.create\n\n```\nUsage: govc datastore.disk.create [OPTIONS] VMDK\n\nCreate VMDK on DS.\n\nExamples:\n  govc datastore.mkdir disks\n  govc datastore.disk.create -size 24G disks/disk1.vmdk\n\nOptions:\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -size=10.0GB              Size of new disk\n```\n\n## datastore.disk.info\n\n```\nUsage: govc datastore.disk.info [OPTIONS] VMDK\n\nQuery VMDK info on DS.\n\nExamples:\n  govc datastore.disk.info disks/disk1.vmdk\n\nOptions:\n  -c=false                  Chain format\n  -d=false                  Include datastore in output\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -p=true                   Include parents\n```\n\n## datastore.download\n\n```\nUsage: govc datastore.download [OPTIONS] SOURCE DEST\n\nCopy SOURCE from DS to DEST on the local system.\n\nIf DEST name is \"-\", source is written to stdout.\n\nExamples:\n  govc datastore.download vm-name/vmware.log ./local.log\n  govc datastore.download vm-name/vmware.log - | grep -i error\n\nOptions:\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -host=                    Host system [GOVC_HOST]\n```\n\n## datastore.info\n\n```\nUsage: govc datastore.info [OPTIONS] [PATH]...\n\nOptions:\n```\n\n## datastore.ls\n\n```\nUsage: govc datastore.ls [OPTIONS] [FILE]...\n\nOptions:\n  -R=false                  List subdirectories recursively\n  -a=false                  Do not ignore entries starting with .\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -l=false                  Long listing format\n  -p=false                  Append / indicator to directories\n```\n\n## datastore.mkdir\n\n```\nUsage: govc datastore.mkdir [OPTIONS] DIRECTORY\n\nOptions:\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -namespace=false          Return uuid of namespace created on vsan datastore\n  -p=false                  Create intermediate directories as needed\n```\n\n## datastore.mv\n\n```\nUsage: govc datastore.mv [OPTIONS] SRC DST\n\nMove SRC to DST on DATASTORE.\n\nExamples:\n  govc datastore.mv foo/foo.vmx foo/foo.vmx.old\n  govc datastore.mv -f my.vmx foo/foo.vmx\n\nOptions:\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -f=false                  If true, overwrite any identically named file at the destination\n```\n\n## datastore.remove\n\n```\nUsage: govc datastore.remove [OPTIONS] HOST...\n\nRemove datastore from HOST.\n\nExamples:\n  govc datastore.remove -ds nfsDatastore cluster1\n  govc datastore.remove -ds nasDatastore host1 host2 host3\n\nOptions:\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -host=                    Host system [GOVC_HOST]\n```\n\n## datastore.rm\n\n```\nUsage: govc datastore.rm [OPTIONS] FILE\n\nRemove FILE from DATASTORE.\n\nExamples:\n  govc datastore.rm vm/vmware.log\n  govc datastore.rm vm\n  govc datastore.rm -f images/base.vmdk\n\nOptions:\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -f=false                  Force; ignore nonexistent files and arguments\n  -namespace=false          Path is uuid of namespace on vsan datastore\n  -t=true                   Use file type to choose disk or file manager\n```\n\n## datastore.tail\n\n```\nUsage: govc datastore.tail [OPTIONS] PATH\n\nOutput the last part of datastore files.\n\nExamples:\n  govc datastore.tail -n 100 vm-name/vmware.log\n  govc datastore.tail -n 0 -f vm-name/vmware.log\n\nOptions:\n  -c=-1                     Output the last NUM bytes\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -f=false                  Output appended data as the file grows\n  -host=                    Host system [GOVC_HOST]\n  -n=10                     Output the last NUM lines\n```\n\n## datastore.upload\n\n```\nUsage: govc datastore.upload [OPTIONS] SOURCE DEST\n\nCopy SOURCE from the local system to DEST on DS.\n\nIf SOURCE name is \"-\", read source from stdin.\n\nExamples:\n  govc datastore.upload -ds datastore1 ./config.iso vm-name/config.iso\n  genisoimage ... | govc datastore.upload -ds datastore1 - vm-name/config.iso\n\nOptions:\n  -ds=                      Datastore [GOVC_DATASTORE]\n```\n\n## datastore.vsan.dom.ls\n\n```\nUsage: govc datastore.vsan.dom.ls [OPTIONS] [UUID]...\n\nList vSAN DOM objects in DS.\n\nExamples:\n  govc datastore.vsan.dom.ls\n  govc datastore.vsan.dom.ls -ds vsanDatastore -l\n  govc datastore.vsan.dom.ls -l d85aa758-63f5-500a-3150-0200308e589c\n\nOptions:\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -l=false                  Long listing\n  -o=false                  List orphan objects\n```\n\n## datastore.vsan.dom.rm\n\n```\nUsage: govc datastore.vsan.dom.rm [OPTIONS] UUID...\n\nRemove vSAN DOM objects in DS.\n\nExamples:\n  govc datastore.vsan.dom.rm d85aa758-63f5-500a-3150-0200308e589c\n  govc datastore.vsan.dom.rm -f d85aa758-63f5-500a-3150-0200308e589c\n  govc datastore.vsan.dom.ls -o | xargs govc datastore.vsan.dom.rm\n\nOptions:\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -f=false                  Force delete\n  -v=false                  Print deleted UUIDs to stdout, failed to stderr\n```\n\n## device.boot\n\n```\nUsage: govc device.boot [OPTIONS]\n\nConfigure VM boot settings.\n\nExamples:\n  govc device.boot -vm $vm -delay 1000 -order floppy,cdrom,ethernet,disk\n\nOptions:\n  -delay=0                  Delay in ms before starting the boot sequence\n  -order=                   Boot device order\n  -retry=false              If true, retry boot after retry-delay\n  -retry-delay=0            Delay in ms before a boot retry\n  -setup=false              If true, enter BIOS setup on next boot\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## device.cdrom.add\n\n```\nUsage: govc device.cdrom.add [OPTIONS]\n\nAdd CD-ROM device to VM.\n\nExamples:\n  govc device.cdrom.add -vm $vm\n  govc device.ls -vm $vm | grep ide-\n  govc device.cdrom.add -vm $vm -controller ide-200\n  govc device.info cdrom-*\n\nOptions:\n  -controller=              IDE controller name\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## device.cdrom.eject\n\n```\nUsage: govc device.cdrom.eject [OPTIONS]\n\nEject media from CD-ROM device.\n\nIf device is not specified, the first CD-ROM device is used.\n\nExamples:\n  govc device.cdrom.eject -vm vm-1\n  govc device.cdrom.eject -vm vm-1 -device floppy-1\n\nOptions:\n  -device=                  CD-ROM device name\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## device.cdrom.insert\n\n```\nUsage: govc device.cdrom.insert [OPTIONS] ISO\n\nInsert media on datastore into CD-ROM device.\n\nIf device is not specified, the first CD-ROM device is used.\n\nExamples:\n  govc device.cdrom.insert -vm vm-1 -device cdrom-3000 images/boot.iso\n\nOptions:\n  -device=                  CD-ROM device name\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## device.connect\n\n```\nUsage: govc device.connect [OPTIONS] DEVICE...\n\nConnect DEVICE on VM.\n\nExamples:\n  govc device.connect -vm $name cdrom-3000\n\nOptions:\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## device.disconnect\n\n```\nUsage: govc device.disconnect [OPTIONS] DEVICE...\n\nDisconnect DEVICE on VM.\n\nExamples:\n  govc device.disconnect -vm $name cdrom-3000\n\nOptions:\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## device.floppy.add\n\n```\nUsage: govc device.floppy.add [OPTIONS]\n\nAdd floppy device to VM.\n\nExamples:\n  govc device.floppy.add -vm $vm\n  govc device.info floppy-*\n\nOptions:\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## device.floppy.eject\n\n```\nUsage: govc device.floppy.eject [OPTIONS]\n\nEject image from floppy device.\n\nIf device is not specified, the first floppy device is used.\n\nExamples:\n  govc device.floppy.eject -vm vm-1\n\nOptions:\n  -device=                  Floppy device name\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## device.floppy.insert\n\n```\nUsage: govc device.floppy.insert [OPTIONS] IMG\n\nInsert IMG on datastore into floppy device.\n\nIf device is not specified, the first floppy device is used.\n\nExamples:\n  govc device.floppy.insert -vm vm-1 vm-1/config.img\n\nOptions:\n  -device=                  Floppy device name\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## device.info\n\n```\nUsage: govc device.info [OPTIONS] [DEVICE]...\n\nDevice info for VM.\n\nExamples:\n  govc device.info -vm $name\n  govc device.info -vm $name disk-*\n  govc device.info -vm $name -json ethernet-0 | jq -r .Devices[].MacAddress\n\nOptions:\n  -net=                     Network [GOVC_NETWORK]\n  -net.adapter=e1000        Network adapter type\n  -net.address=             Network hardware address\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## device.ls\n\n```\nUsage: govc device.ls [OPTIONS]\n\nList devices for VM.\n\nExamples:\n  govc device.ls -vm $name\n\nOptions:\n  -boot=false               List devices configured in the VM's boot options\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## device.remove\n\n```\nUsage: govc device.remove [OPTIONS] DEVICE...\n\nRemove DEVICE from VM.\n\nExamples:\n  govc device.remove -vm $name cdrom-3000\n  govc device.remove -vm $name -keep disk-1000\n\nOptions:\n  -keep=false               Keep files in datastore\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## device.scsi.add\n\n```\nUsage: govc device.scsi.add [OPTIONS]\n\nAdd SCSI controller to VM.\n\nExamples:\n  govc device.scsi.add -vm $vm\n  govc device.scsi.add -vm $vm -type pvscsi\n  govc device.info -vm $vm {lsi,pv}*\n\nOptions:\n  -hot=false                Enable hot-add/remove\n  -sharing=noSharing        SCSI sharing\n  -type=lsilogic            SCSI controller type (lsilogic|buslogic|pvscsi|lsilogic-sas)\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## device.serial.add\n\n```\nUsage: govc device.serial.add [OPTIONS]\n\nAdd serial port to VM.\n\nExamples:\n  govc device.serial.add -vm $vm\n  govc device.info -vm $vm serialport-*\n\nOptions:\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## device.serial.connect\n\n```\nUsage: govc device.serial.connect [OPTIONS] URI\n\nConnect service URI to serial port.\n\nIf \"-\" is given as URI, connects file backed device with file name of\ndevice name + .log suffix in the VM Config.Files.LogDirectory.\n\nDefaults to the first serial port if no DEVICE is given.\n\nExamples:\n  govc device.ls | grep serialport-\n  govc device.serial.connect -vm $vm -device serialport-8000 telnet://:33233\n  govc device.info -vm $vm serialport-*\n  govc device.serial.connect -vm $vm \"[datastore1] $vm/console.log\"\n  govc device.serial.connect -vm $vm -\n  govc datastore.tail -f $vm/serialport-8000.log\n\nOptions:\n  -client=false             Use client direction\n  -device=                  serial port device name\n  -vm=                      Virtual machine [GOVC_VM]\n  -vspc-proxy=              vSPC proxy URI\n```\n\n## device.serial.disconnect\n\n```\nUsage: govc device.serial.disconnect [OPTIONS]\n\nDisconnect service URI from serial port.\n\nExamples:\n  govc device.ls | grep serialport-\n  govc device.serial.disconnect -vm $vm -device serialport-8000\n  govc device.info -vm $vm serialport-*\n\nOptions:\n  -device=                  serial port device name\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## device.usb.add\n\n```\nUsage: govc device.usb.add [OPTIONS]\n\nAdd USB device to VM.\n\nExamples:\n  govc device.usb.add -vm $vm\n  govc device.usb.add -type xhci -vm $vm\n  govc device.info usb*\n\nOptions:\n  -auto=true                Enable ability to hot plug devices\n  -ehci=true                Enable enhanced host controller interface (USB 2.0)\n  -type=usb                 USB controller type (usb|xhci)\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## dvs.add\n\n```\nUsage: govc dvs.add [OPTIONS] HOST...\n\nAdd hosts to DVS.\n\nExamples:\n  govc dvs.add -dvs dvsName -pnic vmnic1 hostA hostB hostC\n\nOptions:\n  -dvs=                     DVS path\n  -host=                    Host system [GOVC_HOST]\n  -pnic=vmnic0              Name of the host physical NIC\n```\n\n## dvs.create\n\n```\nUsage: govc dvs.create [OPTIONS] DVS\n\nCreate DVS (DistributedVirtualSwitch) in datacenter.\n\nThe dvs is added to the folder specified by the 'folder' flag. If not given,\nthis defaults to the network folder in the specified or default datacenter.\n\nExamples:\n  govc dvs.create DSwitch\n  govc dvs.create -product-version 5.5.0 DSwitch\n\nOptions:\n  -folder=                  Inventory folder [GOVC_FOLDER]\n  -product-version=         DVS product version\n```\n\n## dvs.portgroup.add\n\n```\nUsage: govc dvs.portgroup.add [OPTIONS] NAME\n\nAdd portgroup to DVS.\n\nExamples:\n  govc dvs.create DSwitch\n  govc dvs.portgroup.add -dvs DSwitch -type earlyBinding -nports 16 ExternalNetwork\n  govc dvs.portgroup.add -dvs DSwitch -type ephemeral InternalNetwork\n\nOptions:\n  -dvs=                     DVS path\n  -nports=128               Number of ports\n  -type=earlyBinding        Portgroup type (earlyBinding|lateBinding|ephemeral)\n  -vlan=0                   VLAN ID\n```\n\n## dvs.portgroup.change\n\n```\nUsage: govc dvs.portgroup.change [OPTIONS] PATH\n\nChange DVS portgroup configuration.\n\nExamples:\n  govc dvs.portgroup.change -nports 26 ExternalNetwork\n  govc dvs.portgroup.change -vlan 3214 ExternalNetwork\n\nOptions:\n  -nports=0                 Number of ports\n  -type=earlyBinding        Portgroup type (earlyBinding|lateBinding|ephemeral)\n  -vlan=0                   VLAN ID\n```\n\n## dvs.portgroup.info\n\n```\nUsage: govc dvs.portgroup.info [OPTIONS] DVS\n\nPortgroup info for DVS.\n\nExamples:\n  govc dvs.portgroup.info DSwitch\n  govc find / -type DistributedVirtualSwitch | xargs -n1 govc dvs.portgroup.info\n\nOptions:\n  -active=false             Filter by port active or inactive status\n  -connected=false          Filter by port connected or disconnected status\n  -count=0                  Number of matches to return (0 = unlimited)\n  -inside=true              Filter by port inside or outside status\n  -pg=                      Distributed Virtual Portgroup\n  -uplinkPort=false         Filter for uplink ports\n  -vlan=0                   Filter by VLAN ID (0 = unfiltered)\n```\n\n## env\n\n```\nUsage: govc env [OPTIONS]\n\nOutput the environment variables for this client.\n\nIf credentials are included in the url, they are split into separate variables.\nUseful as bash scripting helper to parse GOVC_URL.\n\nOptions:\n  -x=false                  Output variables for each GOVC_URL component\n```\n\n## events\n\n```\nUsage: govc events [OPTIONS] [PATH]...\n\nDisplay events.\n\nExamples:\n  govc events vm/my-vm1 vm/my-vm2\n  govc events /dc1/vm/* /dc2/vm/*\n  govc ls -t HostSystem host/* | xargs govc events | grep -i vsan\n\nOptions:\n  -f=false                  Follow event stream\n  -force=false              Disable number objects to monitor limit\n  -n=25                     Output the last N events\n```\n\n## extension.info\n\n```\nUsage: govc extension.info [OPTIONS] [KEY]...\n\nOptions:\n```\n\n## extension.register\n\n```\nUsage: govc extension.register [OPTIONS]\n\nOptions:\n  -update=false             Update extension\n```\n\n## extension.setcert\n\n```\nUsage: govc extension.setcert [OPTIONS] ID\n\nSet certificate for the extension ID.\n\nThe '-cert-pem' option can be one of the following:\n'-' : Read the certificate from stdin\n'+' : Generate a new key pair and save locally to ID.crt and ID.key\n... : Any other value is passed as-is to ExtensionManager.SetCertificate\n\nOptions:\n  -cert-pem=-               PEM encoded certificate\n  -org=VMware               Organization for generated certificate\n```\n\n## extension.unregister\n\n```\nUsage: govc extension.unregister [OPTIONS]\n\nOptions:\n```\n\n## fields.add\n\n```\nUsage: govc fields.add [OPTIONS] NAME\n\nOptions:\n```\n\n## fields.ls\n\n```\nUsage: govc fields.ls [OPTIONS]\n\nOptions:\n```\n\n## fields.rename\n\n```\nUsage: govc fields.rename [OPTIONS] KEY NAME\n\nOptions:\n```\n\n## fields.rm\n\n```\nUsage: govc fields.rm [OPTIONS] KEY...\n\nOptions:\n```\n\n## fields.set\n\n```\nUsage: govc fields.set [OPTIONS] KEY VALUE PATH...\n\nOptions:\n```\n\n## find\n\n```\nUsage: govc find [OPTIONS] [ROOT] [KEY VAL]...\n\nFind managed objects.\n\nROOT can be an inventory path or ManagedObjectReference.\nROOT defaults to '.', an alias for the root folder or DC if set.\n\nOptional KEY VAL pairs can be used to filter results against object instance properties.\n\nThe '-type' flag value can be a managed entity type or one of the following aliases:\n\n  a    VirtualApp\n  c    ClusterComputeResource\n  d    Datacenter\n  f    Folder\n  g    DistributedVirtualPortgroup\n  h    HostSystem\n  m    VirtualMachine\n  n    Network\n  o    OpaqueNetwork\n  p    ResourcePool\n  r    ComputeResource\n  s    Datastore\n  w    DistributedVirtualSwitch\n\nExamples:\n  govc find\n  govc find /dc1 -type c\n  govc find vm -name my-vm-*\n  govc find . -type n\n  govc find . -type m -runtime.powerState poweredOn\n  govc find . -type m -datastore $(govc find -i datastore -name vsanDatastore)\n  govc find . -type s -summary.type vsan\n  govc find . -type h -hardware.cpuInfo.numCpuCores 16\n\nOptions:\n  -i=false                  Print the managed object reference\n  -maxdepth=-1              Max depth\n  -name=*                   Resource name\n  -type=[]                  Resource type\n```\n\n## firewall.ruleset.find\n\n```\nUsage: govc firewall.ruleset.find [OPTIONS]\n\nFind firewall rulesets matching the given rule.\n\nFor a complete list of rulesets: govc host.esxcli network firewall ruleset list\nFor a complete list of rules:    govc host.esxcli network firewall ruleset rule list\n\nExamples:\n  govc firewall.ruleset.find -direction inbound -port 22\n  govc firewall.ruleset.find -direction outbound -port 2377\n\nOptions:\n  -c=true                   Check if esx firewall is enabled\n  -direction=outbound       Direction\n  -enabled=true             Find enabled rule sets if true, disabled if false\n  -host=                    Host system [GOVC_HOST]\n  -port=0                   Port\n  -proto=tcp                Protocol\n  -type=dst                 Port type\n```\n\n## folder.create\n\n```\nUsage: govc folder.create [OPTIONS] PATH...\n\nCreate folder with PATH.\n\nExamples:\n  govc folder.create /dc1/vm/folder-foo\n  govc object.mv /dc1/vm/vm-foo-* /dc1/vm/folder-foo\n  govc folder.create -pod /dc1/datastore/sdrs\n  govc object.mv /dc1/datastore/iscsi-* /dc1/datastore/sdrs\n\nOptions:\n  -pod=false                Create folder(s) of type StoragePod (DatastoreCluster)\n```\n\n## folder.info\n\n```\nUsage: govc folder.info [OPTIONS] [PATH]...\n\nOptions:\n```\n\n## guest.chmod\n\n```\nUsage: govc guest.chmod [OPTIONS] MODE FILE\n\nChange FILE MODE on VM.\n\nExamples:\n  govc guest.chmod -vm $name 0644 /var/log/foo.log\n\nOptions:\n  -l=:                      Guest VM credentials [GOVC_GUEST_LOGIN]\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## guest.chown\n\n```\nUsage: govc guest.chown [OPTIONS] UID[:GID] FILE\n\nChange FILE UID and GID on VM.\n\nExamples:\n  govc guest.chown -vm $name UID[:GID] /var/log/foo.log\n\nOptions:\n  -l=:                      Guest VM credentials [GOVC_GUEST_LOGIN]\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## guest.download\n\n```\nUsage: govc guest.download [OPTIONS] SOURCE DEST\n\nCopy SOURCE from the guest VM to DEST on the local system.\n\nIf DEST name is \"-\", source is written to stdout.\n\nExamples:\n  govc guest.download -l user:pass -vm=my-vm /var/log/my.log ./local.log\n  govc guest.download -l user:pass -vm=my-vm /etc/motd -\n\nOptions:\n  -f=false                  If set, the local destination file is clobbered\n  -l=:                      Guest VM credentials [GOVC_GUEST_LOGIN]\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## guest.getenv\n\n```\nUsage: govc guest.getenv [OPTIONS] [NAME]...\n\nRead NAME environment variables from VM.\n\nExamples:\n  govc guest.getenv -vm $name\n  govc guest.getenv -vm $name HOME\n\nOptions:\n  -l=:                      Guest VM credentials [GOVC_GUEST_LOGIN]\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## guest.kill\n\n```\nUsage: govc guest.kill [OPTIONS]\n\nKill process ID on VM.\n\nExamples:\n  govc guest.kill -vm $name -p 12345\n\nOptions:\n  -l=:                      Guest VM credentials [GOVC_GUEST_LOGIN]\n  -p=[]                     Process ID\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## guest.ls\n\n```\nUsage: govc guest.ls [OPTIONS] PATH\n\nList PATH files in VM.\n\nExamples:\n  govc guest.ls -vm $name /tmp\n\nOptions:\n  -l=:                      Guest VM credentials [GOVC_GUEST_LOGIN]\n  -s=false                  Simple path only listing\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## guest.mkdir\n\n```\nUsage: govc guest.mkdir [OPTIONS] PATH\n\nCreate directory PATH in VM.\n\nExamples:\n  govc guest.mkdir -vm $name /tmp/logs\n  govc guest.mkdir -vm $name -p /tmp/logs/foo/bar\n\nOptions:\n  -l=:                      Guest VM credentials [GOVC_GUEST_LOGIN]\n  -p=false                  Create intermediate directories as needed\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## guest.mktemp\n\n```\nUsage: govc guest.mktemp [OPTIONS]\n\nCreate a temporary file or directory in VM.\n\nExamples:\n  govc guest.mktemp -vm $name\n  govc guest.mktemp -vm $name -d\n  govc guest.mktemp -vm $name -t myprefix\n  govc guest.mktemp -vm $name -p /var/tmp/$USER\n\nOptions:\n  -d=false                  Make a directory instead of a file\n  -l=:                      Guest VM credentials [GOVC_GUEST_LOGIN]\n  -p=                       If specified, create relative to this directory\n  -s=                       Suffix\n  -t=                       Prefix\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## guest.mv\n\n```\nUsage: govc guest.mv [OPTIONS] SOURCE DEST\n\nMove (rename) files in VM.\n\nExamples:\n  govc guest.mv -vm $name /tmp/foo.sh /tmp/bar.sh\n  govc guest.mv -vm $name -n /tmp/baz.sh /tmp/bar.sh\n\nOptions:\n  -l=:                      Guest VM credentials [GOVC_GUEST_LOGIN]\n  -n=false                  Do not overwrite an existing file\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## guest.ps\n\n```\nUsage: govc guest.ps [OPTIONS]\n\nList processes in VM.\n\nBy default, unless the '-e', '-p' or '-U' flag is specified, only processes owned\nby the '-l' flag user are displayed.\n\nThe '-x' and '-X' flags only apply to processes started by vmware-tools,\nsuch as those started with the govc guest.start command.\n\nExamples:\n  govc guest.ps -vm $name\n  govc guest.ps -vm $name -e\n  govc guest.ps -vm $name -p 12345\n  govc guest.ps -vm $name -U root\n\nOptions:\n  -U=                       Select by process UID\n  -X=false                  Wait for process to exit\n  -e=false                  Select all processes\n  -l=:                      Guest VM credentials [GOVC_GUEST_LOGIN]\n  -p=[]                     Select by process ID\n  -vm=                      Virtual machine [GOVC_VM]\n  -x=false                  Output exit time and code\n```\n\n## guest.rm\n\n```\nUsage: govc guest.rm [OPTIONS] PATH\n\nRemove file PATH in VM.\n\nExamples:\n  govc guest.rm -vm $name /tmp/foo.log\n\nOptions:\n  -l=:                      Guest VM credentials [GOVC_GUEST_LOGIN]\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## guest.rmdir\n\n```\nUsage: govc guest.rmdir [OPTIONS] PATH\n\nRemove directory PATH in VM.\n\nExamples:\n  govc guest.rmdir -vm $name /tmp/empty-dir\n  govc guest.rmdir -vm $name -r /tmp/non-empty-dir\n\nOptions:\n  -l=:                      Guest VM credentials [GOVC_GUEST_LOGIN]\n  -r=false                  Recursive removal\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## guest.start\n\n```\nUsage: govc guest.start [OPTIONS] PATH [ARG]...\n\nStart program in VM.\n\nThe process can have its status queried with govc guest.ps.\nWhen the process completes, its exit code and end time will be available for 5 minutes after completion.\n\nExamples:\n  govc guest.start -vm $name /bin/mount /dev/hdb1 /data\n  pid=$(govc guest.start -vm $name /bin/long-running-thing)\n  govc guest.ps -vm $name -p $pid -X\n\nOptions:\n  -C=                       The absolute path of the working directory for the program to start\n  -e=[]                     Set environment variable (key=val)\n  -l=:                      Guest VM credentials [GOVC_GUEST_LOGIN]\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## guest.touch\n\n```\nUsage: govc guest.touch [OPTIONS] FILE\n\nChange FILE times on VM.\n\nExamples:\n  govc guest.touch -vm $name /var/log/foo.log\n  govc guest.touch -vm $name -d \"$(date -d '1 day ago')\" /var/log/foo.log\n\nOptions:\n  -a=false                  Change only the access time\n  -c=false                  Do not create any files\n  -d=                       Use DATE instead of current time\n  -l=:                      Guest VM credentials [GOVC_GUEST_LOGIN]\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## guest.upload\n\n```\nUsage: govc guest.upload [OPTIONS] SOURCE DEST\n\nCopy SOURCE from the local system to DEST in the guest VM.\n\nIf SOURCE name is \"-\", read source from stdin.\n\nExamples:\n  govc guest.upload -l user:pass -vm=my-vm ~/.ssh/id_rsa.pub /home/$USER/.ssh/authorized_keys\n  cowsay \"have a great day\" | govc guest.upload -l user:pass -vm=my-vm - /etc/motd\n\nOptions:\n  -f=false                  If set, the guest destination file is clobbered\n  -gid=<nil>                Group ID\n  -l=:                      Guest VM credentials [GOVC_GUEST_LOGIN]\n  -perm=0                   File permissions\n  -uid=<nil>                User ID\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## host.account.create\n\n```\nUsage: govc host.account.create [OPTIONS]\n\nCreate local account on HOST.\n\nExamples:\n  govc host.account.create -id $USER -password password-for-esx60\n\nOptions:\n  -description=             The description of the specified account\n  -host=                    Host system [GOVC_HOST]\n  -id=                      The ID of the specified account\n  -password=                The password for the specified account id\n```\n\n## host.account.remove\n\n```\nUsage: govc host.account.remove [OPTIONS]\n\nRemove local account on HOST.\n\nExamples:\n  govc host.account.remove -id $USER\n\nOptions:\n  -description=             The description of the specified account\n  -host=                    Host system [GOVC_HOST]\n  -id=                      The ID of the specified account\n  -password=                The password for the specified account id\n```\n\n## host.account.update\n\n```\nUsage: govc host.account.update [OPTIONS]\n\nUpdate local account on HOST.\n\nExamples:\n  govc host.account.update -id root -password password-for-esx60\n\nOptions:\n  -description=             The description of the specified account\n  -host=                    Host system [GOVC_HOST]\n  -id=                      The ID of the specified account\n  -password=                The password for the specified account id\n```\n\n## host.add\n\n```\nUsage: govc host.add [OPTIONS]\n\nAdd host to datacenter.\n\nThe host is added to the folder specified by the 'folder' flag. If not given,\nthis defaults to the host folder in the specified or default datacenter.\n\nExamples:\n  thumbprint=$(govc about.cert -k -u host.example.com -thumbprint | awk '{print $2}')\n  govc host.add -hostname host.example.com -username root -password pass -thumbprint $thumbprint\n  govc host.add -hostname 10.0.6.1 -username root -password pass -noverify\n\nOptions:\n  -connect=true             Immediately connect to host\n  -folder=                  Inventory folder [GOVC_FOLDER]\n  -force=false              Force when host is managed by another VC\n  -hostname=                Hostname or IP address of the host\n  -noverify=false           Accept host thumbprint without verification\n  -password=                Password of administration account on the host\n  -thumbprint=              SHA-1 thumbprint of the host's SSL certificate\n  -username=                Username of administration account on the host\n```\n\n## host.autostart.add\n\n```\nUsage: govc host.autostart.add [OPTIONS] VM...\n\nOptions:\n  -host=                      Host system [GOVC_HOST]\n  -start-action=powerOn       Start Action\n  -start-delay=-1             Start Delay\n  -start-order=-1             Start Order\n  -stop-action=systemDefault  Stop Action\n  -stop-delay=-1              Stop Delay\n  -wait=systemDefault         Wait for Hearbeat Setting (systemDefault|yes|no)\n```\n\n## host.autostart.configure\n\n```\nUsage: govc host.autostart.configure [OPTIONS] \n\nOptions:\n  -enabled=<nil>             \n  -host=                     Host system [GOVC_HOST]\n  -start-delay=0             \n  -stop-action=              \n  -stop-delay=0              \n  -wait-for-heartbeat=<nil>  \n```\n\n## host.autostart.info\n\n```\nUsage: govc host.autostart.info [OPTIONS] \n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.autostart.remove\n\n```\nUsage: govc host.autostart.remove [OPTIONS] VM...\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.cert.csr\n\n```\nUsage: govc host.cert.csr [OPTIONS]\n\nGenerate a certificate-signing request (CSR) for HOST.\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n  -ip=false                 Use IP address as CN\n```\n\n## host.cert.import\n\n```\nUsage: govc host.cert.import [OPTIONS] FILE\n\nInstall SSL certificate FILE on HOST.\n\nIf FILE name is \"-\", read certificate from stdin.\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.cert.info\n\n```\nUsage: govc host.cert.info [OPTIONS]\n\nDisplay SSL certificate info for HOST.\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.date.change\n\n```\nUsage: govc host.date.change [OPTIONS]\n\nChange date and time for HOST.\n\nExamples:\n  govc host.date.change -date \"$(date -u)\"\n  govc host.date.change -server time.vmware.com\n  govc host.service enable ntpd\n  govc host.service start ntpd\n\nOptions:\n  -date=                    Update the date/time on the host\n  -host=                    Host system [GOVC_HOST]\n  -server=                  IP or FQDN for NTP server(s)\n  -tz=                      Change timezone of the host\n```\n\n## host.date.info\n\n```\nUsage: govc host.date.info [OPTIONS]\n\nDisplay date and time info for HOST.\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.disconnect\n\n```\nUsage: govc host.disconnect [OPTIONS]\n\nDisconnect HOST from vCenter.\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.esxcli\n\n```\nUsage: govc host.esxcli [OPTIONS] COMMAND [ARG]...\n\nInvoke esxcli command on HOST.\n\nOutput is rendered in table form when possible, unless disabled with '-hints=false'.\n\nExamples:\n  govc host.esxcli network ip connection list\n  govc host.esxcli system settings advanced set -o /Net/GuestIPHack -i 1\n  govc host.esxcli network firewall ruleset set -r remoteSerialPort -e true\n  govc host.esxcli network firewall set -e false\n\nOptions:\n  -hints=true               Use command info hints when formatting output\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.info\n\n```\nUsage: govc host.info [OPTIONS]\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.maintenance.enter\n\n```\nUsage: govc host.maintenance.enter [OPTIONS] HOST...\n\nPut HOST in maintenance mode.\n\nWhile this task is running and when the host is in maintenance mode,\nno VMs can be powered on and no provisioning operations can be performed on the host.\n\nOptions:\n  -evacuate=false           Evacuate powered off VMs\n  -host=                    Host system [GOVC_HOST]\n  -timeout=0                Timeout\n```\n\n## host.maintenance.exit\n\n```\nUsage: govc host.maintenance.exit [OPTIONS] HOST...\n\nTake HOST out of maintenance mode.\n\nThis blocks if any concurrent running maintenance-only host configurations operations are being performed.\nFor example, if VMFS volumes are being upgraded.\n\nThe 'timeout' flag is the number of seconds to wait for the exit maintenance mode to succeed.\nIf the timeout is less than or equal to zero, there is no timeout.\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n  -timeout=0                Timeout\n```\n\n## host.option.ls\n\n```\nUsage: govc host.option.ls [OPTIONS] [NAME]\n\nList option with the given NAME.\n\nIf NAME ends with a dot, all options for that subtree are listed.\n\nExamples:\n  govc host.option.ls\n  govc host.option.ls Config.HostAgent.\n  govc host.option.ls Config.HostAgent.plugins.solo.enableMob\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.option.set\n\n```\nUsage: govc host.option.set [OPTIONS] NAME VALUE\n\nSet option NAME to VALUE.\n\nExamples:\n  govc host.option.set Config.HostAgent.plugins.solo.enableMob true\n  govc host.option.set Config.HostAgent.log.level verbose\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.portgroup.add\n\n```\nUsage: govc host.portgroup.add [OPTIONS] NAME\n\nAdd portgroup to HOST.\n\nExamples:\n  govc host.portgroup.add -vswitch vSwitch0 -vlan 3201 bridge\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n  -vlan=0                   VLAN ID\n  -vswitch=                 vSwitch Name\n```\n\n## host.portgroup.change\n\n```\nUsage: govc host.portgroup.change [OPTIONS] NAME\n\nChange configuration of HOST portgroup NAME.\n\nExamples:\n  govc host.portgroup.change -allow-promiscuous -forged-transmits -mac-changes \"VM Network\"\n  govc host.portgroup.change -vswitch-name vSwitch1 \"Management Network\"\n\nOptions:\n  -allow-promiscuous=<nil>  Allow promiscuous mode\n  -forged-transmits=<nil>   Allow forged transmits\n  -host=                    Host system [GOVC_HOST]\n  -mac-changes=<nil>        Allow MAC changes\n  -name=                    Portgroup name\n  -vlan-id=-1               VLAN ID\n  -vswitch-name=            vSwitch name\n```\n\n## host.portgroup.info\n\n```\nUsage: govc host.portgroup.info [OPTIONS]\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.portgroup.remove\n\n```\nUsage: govc host.portgroup.remove [OPTIONS] NAME\n\nRemove portgroup from HOST.\n\nExamples:\n  govc host.portgroup.remove bridge\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.reconnect\n\n```\nUsage: govc host.reconnect [OPTIONS]\n\nReconnect HOST to vCenter.\n\nThis command can also be used to change connection properties (hostname, fingerprint, username, password),\nwithout disconnecting the host.\n\nOptions:\n  -force=false              Force when host is managed by another VC\n  -host=                    Host system [GOVC_HOST]\n  -hostname=                Hostname or IP address of the host\n  -noverify=false           Accept host thumbprint without verification\n  -password=                Password of administration account on the host\n  -sync-state=false         Sync state\n  -thumbprint=              SHA-1 thumbprint of the host's SSL certificate\n  -username=                Username of administration account on the host\n```\n\n## host.remove\n\n```\nUsage: govc host.remove [OPTIONS] HOST...\n\nRemove HOST from vCenter.\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.service\n\n```\nUsage: govc host.service [OPTIONS] ACTION ID\n\nApply host service ACTION to service ID.\n\nWhere ACTION is one of: start, stop, restart, status, enable, disable\n\nExamples:\n  govc host.service enable TSM-SSH\n  govc host.service start TSM-SSH\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.service.ls\n\n```\nUsage: govc host.service.ls [OPTIONS]\n\nList HOST services.\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.shutdown\n\n```\nUsage: govc host.shutdown [OPTIONS]\n\nShutdown HOST.\n\nOptions:\n  -f=false                  Force shutdown when host is not in maintenance mode\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.storage.info\n\n```\nUsage: govc host.storage.info [OPTIONS]\n\nShow HOST storage system information.\n\nExamples:\n  govc ls -t HostSystem host/* | xargs -n1 govc host.storage.info -unclaimed -host\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n  -rescan=false             Rescan for new storage devices\n  -t=lun                    Type (hba,lun)\n  -unclaimed=false          Only show disks that can be used as new VMFS datastores\n```\n\n## host.storage.mark\n\n```\nUsage: govc host.storage.mark [OPTIONS] DEVICE_PATH\n\nMark device at DEVICE_PATH.\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n  -local=<nil>              Mark as local\n  -ssd=<nil>                Mark as SSD\n```\n\n## host.storage.partition\n\n```\nUsage: govc host.storage.partition [OPTIONS] DEVICE_PATH\n\nShow partition table for device at DEVICE_PATH.\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.vnic.info\n\n```\nUsage: govc host.vnic.info [OPTIONS]\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.vnic.service\n\n```\nUsage: govc host.vnic.service [OPTIONS] SERVICE DEVICE\n\n\nEnable or disable service on a virtual nic device.\n\nWhere SERVICE is one of: vmotion|faultToleranceLogging|vSphereReplication|vSphereReplicationNFC|management|vsan|vSphereProvisioning\nWhere DEVICE is one of: vmk0|vmk1|...\n\nExamples:\n  govc host.vnic.service -host hostname -enable vsan vmk0\n  govc host.vnic.service -host hostname -enable=false vmotion vmk1\n\nOptions:\n  -enable=true              Enable service\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.vswitch.add\n\n```\nUsage: govc host.vswitch.add [OPTIONS] NAME\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n  -mtu=0                    MTU\n  -nic=                     Bridge nic device\n  -ports=128                Number of ports\n```\n\n## host.vswitch.info\n\n```\nUsage: govc host.vswitch.info [OPTIONS]\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n```\n\n## host.vswitch.remove\n\n```\nUsage: govc host.vswitch.remove [OPTIONS] NAME\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n```\n\n## import.ova\n\n```\nUsage: govc import.ova [OPTIONS] PATH_TO_OVA\n\nOptions:\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -folder=                  Path to folder to add the VM to\n  -host=                    Host system [GOVC_HOST]\n  -name=                    Name to use for new entity\n  -options=                 Options spec file path for VM deployment\n  -pool=                    Resource pool [GOVC_RESOURCE_POOL]\n```\n\n## import.ovf\n\n```\nUsage: govc import.ovf [OPTIONS] PATH_TO_OVF\n\nOptions:\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -folder=                  Path to folder to add the VM to\n  -host=                    Host system [GOVC_HOST]\n  -name=                    Name to use for new entity\n  -options=                 Options spec file path for VM deployment\n  -pool=                    Resource pool [GOVC_RESOURCE_POOL]\n```\n\n## import.spec\n\n```\nUsage: govc import.spec [OPTIONS] PATH_TO_OVF_OR_OVA\n\nOptions:\n  -verbose=false  Verbose spec output\n```\n\n## import.vmdk\n\n```\nUsage: govc import.vmdk [OPTIONS] PATH_TO_VMDK [REMOTE_DIRECTORY]\n\nOptions:\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -force=false              Overwrite existing disk\n  -keep=false               Keep uploaded disk after import\n  -pool=                    Resource pool [GOVC_RESOURCE_POOL]\n  -upload=true              Upload specified disk\n```\n\n## license.add\n\n```\nUsage: govc license.add [OPTIONS] KEY...\n\nOptions:\n```\n\n## license.assign\n\n```\nUsage: govc license.assign [OPTIONS] KEY\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n  -name=                    Display name\n  -remove=false             Remove assignment\n```\n\n## license.assigned.ls\n\n```\nUsage: govc license.assigned.ls [OPTIONS]\n\nOptions:\n  -id=                      Entity ID\n```\n\n## license.decode\n\n```\nUsage: govc license.decode [OPTIONS] KEY...\n\nOptions:\n  -feature=                 List licenses with given feature\n```\n\n## license.ls\n\n```\nUsage: govc license.ls [OPTIONS]\n\nOptions:\n  -feature=                 List licenses with given feature\n```\n\n## license.remove\n\n```\nUsage: govc license.remove [OPTIONS] KEY...\n\nOptions:\n```\n\n## logs\n\n```\nUsage: govc logs [OPTIONS]\n\nView VPX and ESX logs.\n\nThe '-log' option defaults to \"hostd\" when connected directly to a host or\nwhen connected to VirtualCenter and a '-host' option is given.  Otherwise,\nthe '-log' option defaults to \"vpxd:vpxd.log\".  The '-host' option is ignored\nwhen connected directly to a host.  See 'govc logs.ls' for other '-log' options.\n\nExamples:\n  govc logs -n 1000 -f\n  govc logs -host esx1\n  govc logs -host esx1 -log vmkernel\n\nOptions:\n  -f=false                  Follow log file changes\n  -host=                    Host system [GOVC_HOST]\n  -log=                     Log file key\n  -n=25                     Output the last N log lines\n```\n\n## logs.download\n\n```\nUsage: govc logs.download [OPTIONS] [PATH]...\n\nGenerate diagnostic bundles.\n\nA diagnostic bundle includes log files and other configuration information.\n\nUse PATH to include a specific set of hosts to include.\n\nExamples:\n  govc logs.download\n  govc logs.download host-a host-b\n\nOptions:\n  -default=true             Specifies if the bundle should include the default server\n```\n\n## logs.ls\n\n```\nUsage: govc logs.ls [OPTIONS]\n\nList diagnostic log keys.\n\nExamples:\n  govc logs.ls\n  govc logs.ls -host host-a\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n```\n\n## ls\n\n```\nUsage: govc ls [OPTIONS] [PATH]...\n\nList inventory items.\n\nExamples:\n  govc ls -l '*'\n  govc ls -t ClusterComputeResource host\n  govc ls -t Datastore host/ClusterA/* | grep -v local | xargs -n1 basename | sort | uniq\n\nOptions:\n  -L=false                  Follow managed object references\n  -i=false                  Print the managed object reference\n  -l=false                  Long listing format\n  -t=                       Object type\n```\n\n## metric.change\n\n```\nUsage: govc metric.change [OPTIONS] NAME...\n\nChange counter NAME levels.\n\nExamples:\n  govc metric.change -level 1 net.bytesRx.average net.bytesTx.average\n\nOptions:\n  -device-level=0           Level for the per device counter\n  -i=0                      Interval ID\n  -level=0                  Level for the aggregate counter\n```\n\n## metric.info\n\n```\nUsage: govc metric.info [OPTIONS] PATH [NAME]...\n\nMetric info for NAME.\n\nIf PATH is a value other than '-', provider summary and instance list are included\nfor the given object type.\n\nIf NAME is not specified, all available metrics for the given INTERVAL are listed.\nAn object PATH must be provided in this case.\n\nExamples:\n  govc metric.info vm/my-vm\n  govc metric.info -i 300 vm/my-vm\n  govc metric.info - cpu.usage.average\n  govc metric.info /dc1/host/cluster cpu.usage.average\n\nOptions:\n  -i=0                      Interval ID\n```\n\n## metric.interval.change\n\n```\nUsage: govc metric.interval.change [OPTIONS]\n\nChange historical metric intervals.\n\nExamples:\n  govc metric.interval.change -i 300 -level 2\n  govc metric.interval.change -i 86400 -enabled=false\n\nOptions:\n  -enabled=<nil>            Enable or disable\n  -i=0                      Interval ID\n  -level=0                  Level\n```\n\n## metric.interval.info\n\n```\nUsage: govc metric.interval.info [OPTIONS]\n\nList historical metric intervals.\n\nExamples:\n  govc metric.interval.info\n  govc metric.interval.info -i 300\n\nOptions:\n  -i=0                      Interval ID\n```\n\n## metric.ls\n\n```\nUsage: govc metric.ls [OPTIONS] PATH\n\nList available metrics for PATH.\n\nExamples:\n  govc metric.ls /dc1/host/cluster1\n  govc metric.ls datastore/*\n  govc metric.ls vm/* | grep mem. | xargs govc metric.sample vm/*\n\nOptions:\n  -i=0                      Interval ID\n  -l=false                  Long listing format\n```\n\n## metric.reset\n\n```\nUsage: govc metric.reset [OPTIONS] NAME...\n\nReset counter NAME to the default level of data collection.\n\nExamples:\n  govc metric.reset net.bytesRx.average net.bytesTx.average\n\nOptions:\n  -i=0                      Interval ID\n```\n\n## metric.sample\n\n```\nUsage: govc metric.sample [OPTIONS] PATH... NAME...\n\nSample for object PATH of metric NAME.\n\nInterval ID defaults to 20 (realtime) if supported, otherwise 300 (5m interval).\n\nBy default, INSTANCE '*' samples all instances and the aggregate counter.\nAn INSTANCE value of '-' will only sample the aggregate counter.\nAn INSTANCE value other than '*' or '-' will only sample the given instance counter.\n\nIf PLOT value is set to '-', output a gnuplot script.  If non-empty with another\nvalue, PLOT will pipe the script to gnuplot for you.  The value is also used to set\nthe gnuplot 'terminal' variable, unless the value is that of the DISPLAY env var.\nOnly 1 metric NAME can be specified when the PLOT flag is set.\n\nExamples:\n  govc metric.sample host/cluster1/* cpu.usage.average\n  govc metric.sample -plot .png host/cluster1/* cpu.usage.average | xargs open\n  govc metric.sample vm/* net.bytesTx.average net.bytesTx.average\n  govc metric.sample -instance vmnic0 vm/* net.bytesTx.average\n  govc metric.sample -instance - vm/* net.bytesTx.average\n\nOptions:\n  -d=30                     Limit object display name to D chars\n  -i=0                      Interval ID\n  -instance=*               Instance\n  -n=6                      Max number of samples\n  -plot=                    Plot data using gnuplot\n  -t=false                  Include sample times\n```\n\n## object.collect\n\n```\nUsage: govc object.collect [OPTIONS] [MOID] [PROPERTY]...\n\nCollect managed object properties.\n\nMOID can be an inventory path or ManagedObjectReference.\nMOID defaults to '-', an alias for 'ServiceInstance:ServiceInstance'.\n\nBy default only the current property value(s) are collected.  Use the '-n' flag to wait for updates.\n\nExamples:\n  govc object.collect - content\n  govc object.collect -s HostSystem:ha-host hardware.systemInfo.uuid\n  govc object.collect -s /ha-datacenter/vm/foo overallStatus\n  govc object.collect -json -n=-1 EventManager:ha-eventmgr latestEvent | jq .\n  govc object.collect -json -s $(govc object.collect -s - content.perfManager) description.counterType | jq .\n\nOptions:\n  -n=0                      Wait for N property updates\n  -s=false                  Output property value only\n```\n\n## object.destroy\n\n```\nUsage: govc object.destroy [OPTIONS] PATH...\n\nDestroy managed objects.\n\nExamples:\n  govc object.destroy /dc1/network/dvs /dc1/host/cluster\n\nOptions:\n```\n\n## object.method\n\n```\nUsage: govc object.method [OPTIONS] PATH...\n\nEnable or disable methods for managed objects.\n\nExamples:\n  govc object.method -name Destroy_Task -enable=false /dc1/vm/foo\n  govc object.collect /dc1/vm/foo disabledMethod | grep --color Destroy_Task\n  govc object.method -name Destroy_Task -enable /dc1/vm/foo\n\nOptions:\n  -enable=true              Enable method\n  -name=                    Method name\n  -reason=                  Reason for disabling method\n  -source=govc              Source ID\n```\n\n## object.mv\n\n```\nUsage: govc object.mv [OPTIONS] PATH... FOLDER\n\nMove managed entities to FOLDER.\n\nExamples:\n  govc folder.create /dc1/host/example\n  govc object.mv /dc2/host/*.example.com /dc1/host/example\n\nOptions:\n```\n\n## object.reload\n\n```\nUsage: govc object.reload [OPTIONS] PATH...\n\nReload managed object state.\n\nExamples:\n  govc datastore.upload $vm.vmx $vm/$vm.vmx\n  govc object.reload /dc1/vm/$vm\n\nOptions:\n```\n\n## object.rename\n\n```\nUsage: govc object.rename [OPTIONS] PATH NAME\n\nRename managed objects.\n\nExamples:\n  govc object.rename /dc1/network/dvs1 Switch1\n\nOptions:\n```\n\n## option.ls\n\n```\nUsage: govc option.ls [OPTIONS] [NAME]\n\nList option with the given NAME.\n\nIf NAME ends with a dot, all options for that subtree are listed.\n\nExamples:\n  govc option.ls\n  govc option.ls config.vpxd.sso.\n  govc option.ls config.vpxd.sso.sts.uri\n\nOptions:\n```\n\n## option.set\n\n```\nUsage: govc option.set [OPTIONS] NAME VALUE\n\nSet option NAME to VALUE.\n\nExamples:\n  govc option.set log.level info\n  govc option.set logger.Vsan verbose\n\nOptions:\n```\n\n## permissions.ls\n\n```\nUsage: govc permissions.ls [OPTIONS] [PATH]...\n\nList the permissions defined on or effective on managed entities.\n\nExamples:\n  govc permissions.ls\n  govc permissions.ls /dc1/host/cluster1\n\nOptions:\n  -a=true                   Include inherited permissions defined by parent entities\n  -i=false                  Use moref instead of inventory path\n```\n\n## permissions.remove\n\n```\nUsage: govc permissions.remove [OPTIONS] [PATH]...\n\nRemoves a permission rule from managed entities.\n\nExamples:\n  govc permissions.remove -principal root\n  govc permissions.remove -principal $USER@vsphere.local -role Admin /dc1/host/cluster1\n\nOptions:\n  -group=false              True, if principal refers to a group name; false, for a user name\n  -i=false                  Use moref instead of inventory path\n  -principal=               User or group for which the permission is defined\n```\n\n## permissions.set\n\n```\nUsage: govc permissions.set [OPTIONS] [PATH]...\n\nSet the permissions managed entities.\n\nExamples:\n  govc permissions.set -principal root -role Admin\n  govc permissions.set -principal $USER@vsphere.local -role Admin /dc1/host/cluster1\n\nOptions:\n  -group=false              True, if principal refers to a group name; false, for a user name\n  -i=false                  Use moref instead of inventory path\n  -principal=               User or group for which the permission is defined\n  -propagate=true           Whether or not this permission propagates down the hierarchy to sub-entities\n  -role=Admin               Permission role name\n```\n\n## pool.change\n\n```\nUsage: govc pool.change [OPTIONS] POOL...\n\nChange the configuration of one or more resource POOLs.\n\nPOOL may be an absolute or relative path to a resource pool or a (clustered)\ncompute host. If it resolves to a compute host, the associated root resource\npool is returned. If a relative path is specified, it is resolved with respect\nto the current datacenter's \"host\" folder (i.e. /ha-datacenter/host).\n\nPaths to nested resource pools must traverse through the root resource pool of\nthe selected compute host, i.e. \"compute-host/Resources/nested-pool\".\n\nThe same globbing rules that apply to the \"ls\" command apply here. For example,\nPOOL may be specified as \"*/Resources/*\" to expand to all resource pools that\nare nested one level under the root resource pool, on all (clustered) compute\nhosts in the current datacenter.\n\nOptions:\n  -cpu.expandable=<nil>     CPU expandable reservation\n  -cpu.limit=0              CPU limit in MHz\n  -cpu.reservation=0        CPU reservation in MHz\n  -cpu.shares=              CPU shares level or number\n  -mem.expandable=<nil>     Memory expandable reservation\n  -mem.limit=0              Memory limit in MB\n  -mem.reservation=0        Memory reservation in MB\n  -mem.shares=              Memory shares level or number\n  -name=                    Resource pool name\n```\n\n## pool.create\n\n```\nUsage: govc pool.create [OPTIONS] POOL...\n\nCreate one or more resource POOLs.\n\nPOOL may be an absolute or relative path to a resource pool. The parent of the\nspecified POOL must be an existing resource pool. If a relative path is\nspecified, it is resolved with respect to the current datacenter's \"host\"\nfolder (i.e. /ha-datacenter/host). The basename of the specified POOL is used\nas the name for the new resource pool.\n\nThe same globbing rules that apply to the \"ls\" command apply here. For example,\nthe path to the parent resource pool in POOL may be specified as \"*/Resources\"\nto expand to the root resource pools on all (clustered) compute hosts in the\ncurrent datacenter.\n\nFor example:\n  */Resources/test             Create resource pool \"test\" on all (clustered)\n                               compute hosts in the current datacenter.\n  somehost/Resources/*/nested  Create resource pool \"nested\" in every\n                               resource pool that is a direct descendant of\n                               the root resource pool on \"somehost\".\n\nOptions:\n  -cpu.expandable=true      CPU expandable reservation\n  -cpu.limit=0              CPU limit in MHz\n  -cpu.reservation=0        CPU reservation in MHz\n  -cpu.shares=normal        CPU shares level or number\n  -mem.expandable=true      Memory expandable reservation\n  -mem.limit=0              Memory limit in MB\n  -mem.reservation=0        Memory reservation in MB\n  -mem.shares=normal        Memory shares level or number\n```\n\n## pool.destroy\n\n```\nUsage: govc pool.destroy [OPTIONS] POOL...\n\nDestroy one or more resource POOLs.\n\nPOOL may be an absolute or relative path to a resource pool or a (clustered)\ncompute host. If it resolves to a compute host, the associated root resource\npool is returned. If a relative path is specified, it is resolved with respect\nto the current datacenter's \"host\" folder (i.e. /ha-datacenter/host).\n\nPaths to nested resource pools must traverse through the root resource pool of\nthe selected compute host, i.e. \"compute-host/Resources/nested-pool\".\n\nThe same globbing rules that apply to the \"ls\" command apply here. For example,\nPOOL may be specified as \"*/Resources/*\" to expand to all resource pools that\nare nested one level under the root resource pool, on all (clustered) compute\nhosts in the current datacenter.\n\nOptions:\n  -children=false           Remove all children pools\n```\n\n## pool.info\n\n```\nUsage: govc pool.info [OPTIONS] POOL...\n\nRetrieve information about one or more resource POOLs.\n\nPOOL may be an absolute or relative path to a resource pool or a (clustered)\ncompute host. If it resolves to a compute host, the associated root resource\npool is returned. If a relative path is specified, it is resolved with respect\nto the current datacenter's \"host\" folder (i.e. /ha-datacenter/host).\n\nPaths to nested resource pools must traverse through the root resource pool of\nthe selected compute host, i.e. \"compute-host/Resources/nested-pool\".\n\nThe same globbing rules that apply to the \"ls\" command apply here. For example,\nPOOL may be specified as \"*/Resources/*\" to expand to all resource pools that\nare nested one level under the root resource pool, on all (clustered) compute\nhosts in the current datacenter.\n\nOptions:\n  -a=false                  List virtual app resource pools\n  -p=true                   List resource pools\n```\n\n## role.create\n\n```\nUsage: govc role.create [OPTIONS] NAME [PRIVILEGE]...\n\nCreate authorization role.\n\nOptionally populate the role with the given PRIVILEGE(s).\n\nExamples:\n  govc role.create MyRole\n  govc role.create NoDC $(govc role.ls Admin | grep -v Datacenter.)\n\nOptions:\n  -i=false                  Use moref instead of inventory path\n```\n\n## role.ls\n\n```\nUsage: govc role.ls [OPTIONS] [NAME]\n\nList authorization roles.\n\nIf NAME is provided, list privileges for the role.\n\nExamples:\n  govc role.ls\n  govc role.ls Admin\n\nOptions:\n  -i=false                  Use moref instead of inventory path\n```\n\n## role.remove\n\n```\nUsage: govc role.remove [OPTIONS] NAME\n\nRemove authorization role.\n\nExamples:\n  govc role.remove MyRole\n  govc role.remove MyRole -force\n\nOptions:\n  -force=false              Force removal if role is in use\n  -i=false                  Use moref instead of inventory path\n```\n\n## role.update\n\n```\nUsage: govc role.update [OPTIONS] NAME [PRIVILEGE]...\n\nUpdate authorization role.\n\nSet, Add or Remove role PRIVILEGE(s).\n\nExamples:\n  govc role.update MyRole $(govc role.ls Admin | grep VirtualMachine.)\n  govc role.update -r MyRole $(govc role.ls Admin | grep VirtualMachine.GuestOperations.)\n  govc role.update -a MyRole $(govc role.ls Admin | grep Datastore.)\n  govc role.update -name RockNRole MyRole\n\nOptions:\n  -a=false                  Add given PRIVILEGE(s)\n  -i=false                  Use moref instead of inventory path\n  -name=                    Change role name\n  -r=false                  Remove given PRIVILEGE(s)\n```\n\n## role.usage\n\n```\nUsage: govc role.usage [OPTIONS] NAME...\n\nList usage for role NAME.\n\nExamples:\n  govc role.usage\n  govc role.usage Admin\n\nOptions:\n  -i=false                  Use moref instead of inventory path\n```\n\n## session.ls\n\n```\nUsage: govc session.ls [OPTIONS]\n\nList active sessions.\n\nExamples:\n  govc session.ls\n  govc session.ls -json | jq -r .CurrentSession.Key\n\nOptions:\n```\n\n## session.rm\n\n```\nUsage: govc session.rm [OPTIONS] KEY...\n\nRemove active sessions.\n\nExamples:\n  govc session.ls | grep root\n  govc session.rm 5279e245-e6f1-4533-4455-eb94353b213a\n\nOptions:\n```\n\n## snapshot.create\n\n```\nUsage: govc snapshot.create [OPTIONS] NAME\n\nCreate snapshot of VM with NAME.\n\nExamples:\n  govc snapshot.create -vm my-vm happy-vm-state\n\nOptions:\n  -d=                       Snapshot description\n  -m=true                   Include memory state\n  -q=false                  Quiesce guest file system\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## snapshot.remove\n\n```\nUsage: govc snapshot.remove [OPTIONS] NAME\n\nRemove snapshot of VM with given NAME.\n\nNAME can be the snapshot name, tree path, moid or '*' to remove all snapshots.\n\nExamples:\n  govc snapshot.remove -vm my-vm happy-vm-state\n\nOptions:\n  -c=true                   Consolidate disks\n  -r=false                  Remove snapshot children\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## snapshot.revert\n\n```\nUsage: govc snapshot.revert [OPTIONS] [NAME]\n\nRevert to snapshot of VM with given NAME.\n\nIf NAME is not provided, revert to the current snapshot.\nOtherwise, NAME can be the snapshot name, tree path or moid.\n\nExamples:\n  govc snapshot.revert -vm my-vm happy-vm-state\n\nOptions:\n  -s=false                  Suppress power on\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## snapshot.tree\n\n```\nUsage: govc snapshot.tree [OPTIONS]\n\nList VM snapshots in a tree-like format.\n\nThe command will exit 0 with no output if VM does not have any snapshots.\n\nExamples:\n  govc snapshot.tree -vm my-vm\n  govc snapshot.tree -vm my-vm -D -i\n\nOptions:\n  -D=false                  Print the snapshot creation date\n  -c=true                   Print the current snapshot\n  -f=false                  Print the full path prefix for snapshot\n  -i=false                  Print the snapshot id\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## vapp.destroy\n\n```\nUsage: govc vapp.destroy [OPTIONS] VAPP...\n\nOptions:\n```\n\n## vapp.power\n\n```\nUsage: govc vapp.power [OPTIONS]\n\nOptions:\n  -force=false              Force (If force is false, the shutdown order in the vApp is executed. If force is true, all virtual machines are powered-off (regardless of shutdown order))\n  -off=false                Power off\n  -on=false                 Power on\n  -suspend=false            Power suspend\n  -vapp.ipath=              Find vapp by inventory path\n```\n\n## version\n\n```\nUsage: govc version [OPTIONS]\n\nOptions:\n  -require=  Require govc version >= this value\n```\n\n## vm.change\n\n```\nUsage: govc vm.change [OPTIONS]\n\nChange VM configuration.\n\nTo add ExtraConfig variables that can read within the guest, use the 'guestinfo.' prefix.\n\nExamples:\n  govc vm.change -vm $vm -e smc.present=TRUE -e ich7m.present=TRUE\n  govc vm.change -vm $vm -e guestinfo.vmname $vm\n  # Read the variable set above inside the guest:\n  vmware-rpctool \"info-get guestinfo.vmname\"\n\nOptions:\n  -c=0                        Number of CPUs\n  -e=[]                       ExtraConfig. <key>=<value>\n  -g=                         Guest OS\n  -m=0                        Size in MB of memory\n  -name=                      Display name\n  -nested-hv-enabled=<nil>    Enable nested hardware-assisted virtualization\n  -sync-time-with-host=<nil>  Enable SyncTimeWithHost\n  -vm=                        Virtual machine [GOVC_VM]\n```\n\n## vm.clone\n\n```\nUsage: govc vm.clone [OPTIONS] NAME\n\nClone VM to NAME.\n\nExamples:\n  govc vm.clone -vm template-vm new-vm\n\nOptions:\n  -annotation=              VM description\n  -c=0                      Number of CPUs\n  -customization=           Customization Specification Name\n  -datastore-cluster=       Datastore cluster [GOVC_DATASTORE_CLUSTER]\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -folder=                  Inventory folder [GOVC_FOLDER]\n  -force=false              Create VM if vmx already exists\n  -host=                    Host system [GOVC_HOST]\n  -m=0                      Size in MB of memory\n  -net=                     Network [GOVC_NETWORK]\n  -net.adapter=e1000        Network adapter type\n  -net.address=             Network hardware address\n  -on=true                  Power on VM\n  -pool=                    Resource pool [GOVC_RESOURCE_POOL]\n  -template=false           Create a Template\n  -vm=                      Virtual machine [GOVC_VM]\n  -waitip=false             Wait for VM to acquire IP address\n```\n\n## vm.create\n\n```\nUsage: govc vm.create [OPTIONS]\n\nOptions:\n  -annotation=              VM description\n  -c=1                      Number of CPUs\n  -datastore-cluster=       Datastore cluster [GOVC_DATASTORE_CLUSTER]\n  -disk=                    Disk path (to use existing) OR size (to create new, e.g. 20GB)\n  -disk-datastore=          Datastore for disk file\n  -disk.controller=scsi     Disk controller type\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -folder=                  Inventory folder [GOVC_FOLDER]\n  -force=false              Create VM if vmx already exists\n  -g=otherGuest             Guest OS\n  -host=                    Host system [GOVC_HOST]\n  -iso=                     ISO path\n  -iso-datastore=           Datastore for ISO file\n  -link=true                Link specified disk\n  -m=1024                   Size in MB of memory\n  -net=                     Network [GOVC_NETWORK]\n  -net.adapter=e1000        Network adapter type\n  -net.address=             Network hardware address\n  -on=true                  Power on VM. Default is true if -disk argument is given.\n  -pool=                    Resource pool [GOVC_RESOURCE_POOL]\n```\n\n## vm.destroy\n\n```\nUsage: govc vm.destroy [OPTIONS]\n\nOptions:\n```\n\n## vm.disk.attach\n\n```\nUsage: govc vm.disk.attach [OPTIONS]\n\nOptions:\n  -controller=              Disk controller\n  -disk=                    Disk path name\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -link=true                Link specified disk\n  -persist=true             Persist attached disk\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## vm.disk.change\n\n```\nUsage: govc vm.disk.change [OPTIONS]\n\nChange some properties of a VM's DISK\n\nIn particular, you can change the DISK mode, and the size (as long as it is bigger)\n\nExamples:\n  govc vm.disk.change -vm VM -disk.key 2001 -size 10G\n  govc vm.disk.change -vm VM -disk.label \"BDD disk\" -size 10G\n  govc vm.disk.change -vm VM -disk.name \"hard-1000-0\" -size 12G\n  govc vm.disk.change -vm VM -disk.filePath \"[DS] VM/VM-1.vmdk\" -mode nonpersistent\n\nOptions:\n  -disk.filePath=           Disk file name\n  -disk.key=0               Disk unique key\n  -disk.label=              Disk label\n  -disk.name=               Disk name\n  -mode=                    Disk mode (persistent|nonpersistent|undoable|independent_persistent|independent_nonpersistent|append)\n  -size=0B                  New disk size\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## vm.disk.create\n\n```\nUsage: govc vm.disk.create [OPTIONS]\n\nCreate disk and attach to VM.\n\nExamples:\n  govc vm.disk.create -vm $name -name $name/disk1 -size 10G\n\nOptions:\n  -controller=              Disk controller\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -eager=false              Eagerly scrub new disk\n  -mode=persistent          Disk mode (persistent|nonpersistent|undoable|independent_persistent|independent_nonpersistent|append)\n  -name=                    Name for new disk\n  -size=10.0GB              Size of new disk\n  -thick=false              Thick provision new disk\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## vm.guest.tools\n\n```\nUsage: govc vm.guest.tools [OPTIONS] VM...\n\nManage guest tools in VM.\n\nExamples:\n  govc vm.guest.tools -mount VM\n  govc vm.guest.tools -unmount VM\n  govc vm.guest.tools -upgrade -options \"opt1 opt2\" VM\n\nOptions:\n  -mount=false              Mount tools CD installer in the guest\n  -options=                 Installer options\n  -unmount=false            Unmount tools CD installer in the guest\n  -upgrade=false            Upgrade tools in the guest\n```\n\n## vm.info\n\n```\nUsage: govc vm.info [OPTIONS]\n\nOptions:\n  -e=false                  Show ExtraConfig\n  -g=true                   Show general summary\n  -r=false                  Show resource summary\n  -t=false                  Show ToolsConfigInfo\n  -waitip=false             Wait for VM to acquire IP address\n```\n\n## vm.ip\n\n```\nUsage: govc vm.ip [OPTIONS] VM...\n\nList IPs for VM.\n\nBy default the vm.ip command depends on vmware-tools to report the 'guest.ipAddress' field and will\nwait until it has done so.  This value can also be obtained using:\n\n  govc vm.info -json $vm | jq -r .VirtualMachines[].Guest.IpAddress\n\nWhen given the '-a' flag, only IP addresses for which there is a corresponding virtual nic are listed.\nIf there are multiple nics, the listed addresses will be comma delimited.  The '-a' flag depends on\nvmware-tools to report the 'guest.net' field and will wait until it has done so for all nics.\nNote that this list includes IPv6 addresses if any, use '-v4' to filter them out.  IP addresses reported\nby tools for which there is no virtual nic are not included, for example that of the 'docker0' interface.\n\nThese values can also be obtained using:\n\n  govc vm.info -json $vm | jq -r .VirtualMachines[].Guest.Net[].IpConfig.IpAddress[].IpAddress\n\nWhen given the '-n' flag, filters '-a' behavior to the nic specified by MAC address or device name.\n\nThe 'esxcli' flag does not require vmware-tools to be installed, but does require the ESX host to\nhave the /Net/GuestIPHack setting enabled.\n\nThe 'wait' flag default to 1hr (original default was infinite).  If a VM does not obtain an IP within\nthe wait time, the command will still exit with status 0.\n\nExamples:\n  govc vm.ip $vm\n  govc vm.ip -wait 5m $vm\n  govc vm.ip -a -v4 $vm\n  govc vm.ip -n 00:0c:29:57:7b:c3 $vm\n  govc vm.ip -n ethernet-0 $vm\n  govc host.esxcli system settings advanced set -o /Net/GuestIPHack -i 1\n  govc vm.ip -esxcli $vm\n\nOptions:\n  -a=false                  Wait for an IP address on all NICs\n  -esxcli=false             Use esxcli instead of guest tools\n  -n=                       Wait for IP address on NIC, specified by device name or MAC\n  -v4=false                 Only report IPv4 addresses\n  -wait=1h0m0s              Wait time for the VM obtain an IP address\n```\n\n## vm.markastemplate\n\n```\nUsage: govc vm.markastemplate [OPTIONS] VM...\n\nMark VM as a virtual machine template.\n\nExamples:\n  govc vm.markastemplate $name\n\nOptions:\n```\n\n## vm.markasvm\n\n```\nUsage: govc vm.markasvm [OPTIONS] VM...\n\nMark VM template as a virtual machine.\n\nExamples:\n  govc vm.markasvm $name -host host1\n  govc vm.markasvm $name -pool cluster1/Resources\n\nOptions:\n  -host=                    Host system [GOVC_HOST]\n  -pool=                    Resource pool [GOVC_RESOURCE_POOL]\n```\n\n## vm.migrate\n\n```\nUsage: govc vm.migrate [OPTIONS] VM...\n\nMigrates VM to a specific resource pool, host or datastore.\n\nExamples:\n  govc vm.migrate -host another-host vm-1 vm-2 vm-3\n  govc vm.migrate -ds another-ds vm-1 vm-2 vm-3\n\nOptions:\n  -ds=                       Datastore [GOVC_DATASTORE]\n  -host=                     Host system [GOVC_HOST]\n  -pool=                     Resource pool [GOVC_RESOURCE_POOL]\n  -priority=defaultPriority  The task priority\n```\n\n## vm.network.add\n\n```\nUsage: govc vm.network.add [OPTIONS]\n\nAdd network adapter to VM.\n\nExamples:\n  govc vm.network.add -vm $vm -net \"VM Network\" -net.adapter e1000e\n  govc device.info -vm $vm ethernet-*\n\nOptions:\n  -net=                     Network [GOVC_NETWORK]\n  -net.adapter=e1000        Network adapter type\n  -net.address=             Network hardware address\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## vm.network.change\n\n```\nUsage: govc vm.network.change [OPTIONS] DEVICE\n\nChange network DEVICE configuration.\n\nExamples:\n  govc vm.network.change -vm $vm -net PG2 ethernet-0\n  govc vm.network.change -vm $vm -net.address 00:00:0f:2e:5d:69 ethernet-0\n  govc device.info -vm $vm ethernet-*\n\nOptions:\n  -net=                     Network [GOVC_NETWORK]\n  -net.adapter=e1000        Network adapter type\n  -net.address=             Network hardware address\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## vm.power\n\n```\nUsage: govc vm.power [OPTIONS]\n\nOptions:\n  -force=false              Force (ignore state error and hard shutdown/reboot if tools unavailable)\n  -off=false                Power off\n  -on=false                 Power on\n  -r=false                  Reboot guest\n  -reset=false              Power reset\n  -s=false                  Shutdown guest\n  -suspend=false            Power suspend\n```\n\n## vm.question\n\n```\nUsage: govc vm.question [OPTIONS]\n\nOptions:\n  -answer=                  Answer to question\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## vm.rdm.attach\n\n```\nUsage: govc vm.rdm.attach [OPTIONS]\n\nAttach DEVICE to VM with RDM.\n\nExamples:\n  govc vm.rdm.attach -vm VM -device /vmfs/devices/disks/naa.000000000000000000000000000000000\n\nOptions:\n  -device=                  Device Name\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## vm.rdm.ls\n\n```\nUsage: govc vm.rdm.ls [OPTIONS]\n\nList available devices that could be attach to VM with RDM.\n\nExamples:\n  govc vm.rdm.ls -vm VM\n\nOptions:\n  -vm=                      Virtual machine [GOVC_VM]\n```\n\n## vm.register\n\n```\nUsage: govc vm.register [OPTIONS] VMX\n\nAdd an existing VM to the inventory.\n\nVMX is a path to the vm config file, relative to DATASTORE.\n\nExamples:\n  govc vm.register path/name.vmx\n\nOptions:\n  -as-template=false        Mark VM as template\n  -ds=                      Datastore [GOVC_DATASTORE]\n  -folder=                  Inventory folder [GOVC_FOLDER]\n  -host=                    Host system [GOVC_HOST]\n  -name=                    Name of the VM\n  -pool=                    Resource pool [GOVC_RESOURCE_POOL]\n```\n\n## vm.unregister\n\n```\nUsage: govc vm.unregister [OPTIONS] VM...\n\nRemove VM from inventory without removing any of the VM files on disk.\n\nOptions:\n```\n\n## vm.vnc\n\n```\nUsage: govc vm.vnc [OPTIONS] VM...\n\nEnable or disable VNC for VM.\n\nPort numbers are automatically chosen if not specified.\n\nIf neither -enable or -disable is specified, the current state is returned.\n\nExamples:\n  govc vm.vnc -enable -password 1234 $vm | awk '{print $2}' | xargs open\n\nOptions:\n  -disable=false            Disable VNC\n  -enable=false             Enable VNC\n  -password=                VNC password\n  -port=-1                  VNC port (-1 for auto-select)\n  -port-range=5900-5999     VNC port auto-select range\n```\n\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/about/cert.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage about\n\nimport (\n\t\"context\"\n\t\"encoding/pem\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n)\n\ntype cert struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n\n\tshow       bool\n\tthumbprint bool\n}\n\nfunc init() {\n\tcli.Register(\"about.cert\", &cert{})\n}\n\nfunc (cmd *cert) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.show, \"show\", false, \"Show PEM encoded server certificate only\")\n\tf.BoolVar(&cmd.thumbprint, \"thumbprint\", false, \"Output host hash and thumbprint only\")\n}\n\nfunc (cmd *cert) Description() string {\n\treturn `Display TLS certificate info for HOST.\n\nIf the HOST certificate cannot be verified, about.cert will return with exit code 60 (as curl does).\nIf the '-k' flag is provided, about.cert will return with exit code 0 in this case.\nThe SHA1 thumbprint can also be used as '-thumbprint' for the 'host.add' and 'cluster.add' commands.\n\nExamples:\n  govc about.cert -k -json | jq -r .ThumbprintSHA1\n  govc about.cert -k -show | sudo tee /usr/local/share/ca-certificates/host.crt\n  govc about.cert -k -thumbprint | tee -a ~/.govmomi/known_hosts`\n}\n\nfunc (cmd *cert) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\ntype certResult struct {\n\tcmd  *cert\n\tinfo object.HostCertificateInfo\n}\n\nfunc (r *certResult) Write(w io.Writer) error {\n\tif r.cmd.show {\n\t\treturn pem.Encode(w, &pem.Block{Type: \"CERTIFICATE\", Bytes: r.info.Certificate.Raw})\n\t}\n\n\tif r.cmd.thumbprint {\n\t\tu := r.cmd.URLWithoutPassword()\n\t\t_, err := fmt.Fprintf(w, \"%s %s\\n\", u.Host, r.info.ThumbprintSHA1)\n\t\treturn err\n\t}\n\n\treturn r.cmd.WriteResult(&r.info)\n}\n\nfunc (cmd *cert) Run(ctx context.Context, f *flag.FlagSet) error {\n\tu := cmd.URLWithoutPassword()\n\tc := soap.NewClient(u, false)\n\tt := c.Client.Transport.(*http.Transport)\n\tr := certResult{cmd: cmd}\n\n\tif err := cmd.SetRootCAs(c); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.info.FromURL(u, t.TLSClientConfig); err != nil {\n\t\treturn err\n\t}\n\n\tif r.info.Err != nil && r.cmd.IsSecure() {\n\t\tcmd.Out = os.Stderr\n\t\t// using same exit code as curl:\n\t\tdefer os.Exit(60)\n\t}\n\n\treturn r.Write(cmd.Out)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/about/command.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage about\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype about struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n\n\tLong bool\n}\n\nfunc init() {\n\tcli.Register(\"about\", &about{})\n}\n\nfunc (cmd *about) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.Long, \"l\", false, \"Include service content\")\n}\n\nfunc (cmd *about) Description() string {\n\treturn `Display About info for HOST.\n\nSystem information including the name, type, version, and build number.\n\nExamples:\n  govc about\n  govc about -json | jq -r .About.ProductLineId`\n}\n\nfunc (cmd *about) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *about) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tres := infoResult{\n\t\ta: &c.ServiceContent.About,\n\t}\n\n\tif cmd.Long {\n\t\tres.Content = &c.ServiceContent\n\t} else {\n\t\tres.About = res.a\n\t}\n\n\treturn cmd.WriteResult(&res)\n}\n\ntype infoResult struct {\n\tContent *types.ServiceContent `json:\",omitempty\"`\n\tAbout   *types.AboutInfo      `json:\",omitempty\"`\n\ta       *types.AboutInfo\n}\n\nfunc (r *infoResult) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\tfmt.Fprintf(tw, \"Name:\\t%s\\n\", r.a.Name)\n\tfmt.Fprintf(tw, \"Vendor:\\t%s\\n\", r.a.Vendor)\n\tfmt.Fprintf(tw, \"Version:\\t%s\\n\", r.a.Version)\n\tfmt.Fprintf(tw, \"Build:\\t%s\\n\", r.a.Build)\n\tfmt.Fprintf(tw, \"OS type:\\t%s\\n\", r.a.OsType)\n\tfmt.Fprintf(tw, \"API type:\\t%s\\n\", r.a.ApiType)\n\tfmt.Fprintf(tw, \"API version:\\t%s\\n\", r.a.ApiVersion)\n\tfmt.Fprintf(tw, \"Product ID:\\t%s\\n\", r.a.ProductLineId)\n\tfmt.Fprintf(tw, \"UUID:\\t%s\\n\", r.a.InstanceUuid)\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/build.sh",
    "content": "#!/bin/bash -e\n\ngit_version=$(git describe)\nif git_status=$(git status --porcelain 2>/dev/null) && [ -n \"${git_status}\" ]; then\n  git_version=\"${git_version}-dirty\"\nfi\n\nldflags=\"-X github.com/vmware/govmomi/govc/version.gitVersion=${git_version}\"\n\nBUILD_OS=${BUILD_OS:-darwin linux windows freebsd}\nBUILD_ARCH=${BUILD_ARCH:-386 amd64}\n\nfor os in ${BUILD_OS}; do\n  export GOOS=\"${os}\"\n  for arch in ${BUILD_ARCH}; do\n    export GOARCH=\"${arch}\"\n\n    out=\"govc_${os}_${arch}\"\n    if [ \"${os}\" == \"windows\" ]; then\n      out=\"${out}.exe\"\n    fi\n\n    set -x\n    go build \\\n      -o=\"${out}\" \\\n      -pkgdir=\"./_pkg\" \\\n      -compiler='gc' \\\n      -ldflags=\"${ldflags}\" \\\n      github.com/vmware/govmomi/govc &\n    set +x\n  done\ndone\n\nwait\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/cli/command.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage cli\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"sort\"\n\t\"text/tabwriter\"\n)\n\ntype HasFlags interface {\n\t// Register may be called more than once and should be idempotent.\n\tRegister(ctx context.Context, f *flag.FlagSet)\n\n\t// Process may be called more than once and should be idempotent.\n\tProcess(ctx context.Context) error\n}\n\ntype Command interface {\n\tHasFlags\n\n\tRun(ctx context.Context, f *flag.FlagSet) error\n}\n\nfunc generalHelp(w io.Writer) {\n\tfmt.Fprintf(w, \"Usage of %s:\\n\", os.Args[0])\n\n\tcmds := []string{}\n\tfor name := range commands {\n\t\tcmds = append(cmds, name)\n\t}\n\n\tsort.Strings(cmds)\n\n\tfor _, name := range cmds {\n\t\tfmt.Fprintf(w, \"  %s\\n\", name)\n\t}\n}\n\nfunc commandHelp(w io.Writer, name string, cmd Command, f *flag.FlagSet) {\n\ttype HasUsage interface {\n\t\tUsage() string\n\t}\n\n\tfmt.Fprintf(w, \"Usage: %s %s [OPTIONS]\", os.Args[0], name)\n\tif u, ok := cmd.(HasUsage); ok {\n\t\tfmt.Fprintf(w, \" %s\", u.Usage())\n\t}\n\tfmt.Fprintf(w, \"\\n\")\n\n\ttype HasDescription interface {\n\t\tDescription() string\n\t}\n\n\tif u, ok := cmd.(HasDescription); ok {\n\t\tfmt.Fprintf(w, \"\\n%s\\n\", u.Description())\n\t}\n\n\tn := 0\n\tf.VisitAll(func(_ *flag.Flag) {\n\t\tn += 1\n\t})\n\n\tif n > 0 {\n\t\tfmt.Fprintf(w, \"\\nOptions:\\n\")\n\t\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\t\tf.VisitAll(func(f *flag.Flag) {\n\t\t\tfmt.Fprintf(tw, \"\\t-%s=%s\\t%s\\n\", f.Name, f.DefValue, f.Usage)\n\t\t})\n\t\ttw.Flush()\n\t}\n}\n\nfunc clientLogout(ctx context.Context, cmd Command) error {\n\ttype logout interface {\n\t\tLogout(context.Context) error\n\t}\n\n\tif l, ok := cmd.(logout); ok {\n\t\treturn l.Logout(ctx)\n\t}\n\n\treturn nil\n}\n\nfunc Run(args []string) int {\n\thw := os.Stderr\n\trc := 1\n\thwrc := func(arg string) {\n\t\tif arg == \"-h\" {\n\t\t\thw = os.Stdout\n\t\t\trc = 0\n\t\t}\n\t}\n\n\tvar err error\n\n\tif len(args) == 0 {\n\t\tgeneralHelp(hw)\n\t\treturn rc\n\t}\n\n\t// Look up real command name in aliases table.\n\tname, ok := aliases[args[0]]\n\tif !ok {\n\t\tname = args[0]\n\t}\n\n\tcmd, ok := commands[name]\n\tif !ok {\n\t\thwrc(name)\n\t\tgeneralHelp(hw)\n\t\treturn rc\n\t}\n\n\tfs := flag.NewFlagSet(\"\", flag.ContinueOnError)\n\tfs.SetOutput(ioutil.Discard)\n\n\tctx := context.Background()\n\tcmd.Register(ctx, fs)\n\n\tif err = fs.Parse(args[1:]); err != nil {\n\t\tgoto error\n\t}\n\n\tif err = cmd.Process(ctx); err != nil {\n\t\tgoto error\n\t}\n\n\tif err = cmd.Run(ctx, fs); err != nil {\n\t\tgoto error\n\t}\n\n\tif err = clientLogout(ctx, cmd); err != nil {\n\t\tgoto error\n\t}\n\n\treturn 0\n\nerror:\n\tif err == flag.ErrHelp {\n\t\tif len(args) == 2 {\n\t\t\thwrc(args[1])\n\t\t}\n\t\tcommandHelp(hw, args[0], cmd, fs)\n\t} else {\n\t\tfmt.Fprintf(os.Stderr, \"%s: %s\\n\", os.Args[0], err)\n\t}\n\n\t_ = clientLogout(ctx, cmd)\n\n\treturn rc\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/cli/register.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage cli\n\nvar commands = map[string]Command{}\n\nvar aliases = map[string]string{}\n\nfunc Register(name string, c Command) {\n\tcommands[name] = c\n}\n\nfunc Alias(name string, alias string) {\n\taliases[alias] = name\n}\n\nfunc Commands() map[string]Command {\n\treturn commands\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/cluster/add.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage cluster\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype add struct {\n\t*flags.DatacenterFlag\n\t*flags.HostConnectFlag\n\n\tcluster string\n\tconnect bool\n\tlicense string\n}\n\nfunc init() {\n\tcli.Register(\"cluster.add\", &add{})\n}\n\nfunc (cmd *add) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n\n\tcmd.HostConnectFlag, ctx = flags.NewHostConnectFlag(ctx)\n\tcmd.HostConnectFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.cluster, \"cluster\", \"*\", \"Path to cluster\")\n\n\tf.StringVar(&cmd.license, \"license\", \"\", \"Assign license key\")\n\n\tf.BoolVar(&cmd.connect, \"connect\", true, \"Immediately connect to host\")\n}\n\nfunc (cmd *add) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostConnectFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif cmd.HostName == \"\" {\n\t\treturn flag.ErrHelp\n\t}\n\tif cmd.UserName == \"\" {\n\t\treturn flag.ErrHelp\n\t}\n\tif cmd.Password == \"\" {\n\t\treturn flag.ErrHelp\n\t}\n\treturn nil\n}\n\nfunc (cmd *add) Description() string {\n\treturn `Add HOST to CLUSTER.\n\nThe host is added to the cluster specified by the 'cluster' flag.\n\nExamples:\n  thumbprint=$(govc about.cert -k -u host.example.com -thumbprint | awk '{print $2}')\n  govc cluster.add -cluster ClusterA -hostname host.example.com -username root -password pass -thumbprint $thumbprint\n  govc cluster.add -cluster ClusterB -hostname 10.0.6.1 -username root -password pass -noverify`\n}\n\nfunc (cmd *add) Add(ctx context.Context, cluster *object.ClusterComputeResource) error {\n\tspec := cmd.HostConnectSpec\n\n\tvar license *string\n\tif cmd.license != \"\" {\n\t\tlicense = &cmd.license\n\t}\n\n\ttask, err := cluster.AddHost(ctx, cmd.Spec(cluster.Client()), cmd.connect, license, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger := cmd.ProgressLogger(fmt.Sprintf(\"adding %s to cluster %s... \", spec.HostName, cluster.InventoryPath))\n\tdefer logger.Wait()\n\n\t_, err = task.WaitForResult(ctx, logger)\n\treturn err\n}\n\nfunc (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcluster, err := finder.ClusterComputeResource(ctx, cmd.cluster)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn cmd.Fault(cmd.Add(ctx, cluster))\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/cluster/change.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage cluster\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype change struct {\n\t*flags.DatacenterFlag\n\n\ttypes.ClusterConfigSpecEx\n}\n\nfunc init() {\n\tcli.Register(\"cluster.change\", &change{})\n}\n\nfunc (cmd *change) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n\n\tcmd.DrsConfig = new(types.ClusterDrsConfigInfo)\n\tcmd.DasConfig = new(types.ClusterDasConfigInfo)\n\tcmd.VsanConfig = new(types.VsanClusterConfigInfo)\n\tcmd.VsanConfig.DefaultConfig = new(types.VsanClusterConfigInfoHostDefaultInfo)\n\n\t// DRS\n\tf.Var(flags.NewOptionalBool(&cmd.DrsConfig.Enabled), \"drs-enabled\", \"Enable DRS\")\n\n\tdrsModes := []string{\n\t\tstring(types.DrsBehaviorManual),\n\t\tstring(types.DrsBehaviorPartiallyAutomated),\n\t\tstring(types.DrsBehaviorFullyAutomated),\n\t}\n\tf.StringVar((*string)(&cmd.DrsConfig.DefaultVmBehavior), \"drs-mode\", \"\",\n\t\t\"DRS behavior for virtual machines: \"+strings.Join(drsModes, \", \"))\n\n\t// HA\n\tf.Var(flags.NewOptionalBool(&cmd.DasConfig.Enabled), \"ha-enabled\", \"Enable HA\")\n\n\t// vSAN\n\tf.Var(flags.NewOptionalBool(&cmd.VsanConfig.Enabled), \"vsan-enabled\", \"Enable vSAN\")\n\tf.Var(flags.NewOptionalBool(&cmd.VsanConfig.DefaultConfig.AutoClaimStorage), \"vsan-autoclaim\", \"Autoclaim storage on cluster hosts\")\n}\n\nfunc (cmd *change) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *change) Usage() string {\n\treturn \"CLUSTER...\"\n}\n\nfunc (cmd *change) Description() string {\n\treturn `Change configuration of the given clusters.\n\nExamples:\n  govc cluster.change -drs-enabled -vsan-enabled -vsan-autoclaim ClusterA\n  govc cluster.change -drs-enabled=false ClusterB`\n}\n\nfunc (cmd *change) Run(ctx context.Context, f *flag.FlagSet) error {\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, path := range f.Args() {\n\t\tclusters, err := finder.ClusterComputeResourceList(ctx, path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, cluster := range clusters {\n\t\t\ttask, err := cluster.Reconfigure(ctx, &cmd.ClusterConfigSpecEx, true)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t_, err = task.WaitForResult(ctx, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/cluster/create.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage cluster\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype create struct {\n\t*flags.FolderFlag\n\n\ttypes.ClusterConfigSpecEx\n}\n\nfunc init() {\n\tcli.Register(\"cluster.create\", &create{})\n}\n\nfunc (cmd *create) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.FolderFlag, ctx = flags.NewFolderFlag(ctx)\n\tcmd.FolderFlag.Register(ctx, f)\n}\n\nfunc (cmd *create) Usage() string {\n\treturn \"CLUSTER\"\n}\n\nfunc (cmd *create) Description() string {\n\treturn `Create CLUSTER in datacenter.\n\nThe cluster is added to the folder specified by the 'folder' flag. If not given,\nthis defaults to the host folder in the specified or default datacenter.\n\nExamples:\n  govc cluster.create ClusterA\n  govc cluster.create -folder /dc2/test-folder ClusterB`\n}\n\nfunc (cmd *create) Process(ctx context.Context) error {\n\tif err := cmd.FolderFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tfolder, err := cmd.FolderOrDefault(\"host\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = folder.CreateCluster(ctx, f.Arg(0), cmd.ClusterConfigSpecEx)\n\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/datacenter/create.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage datacenter\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype create struct {\n\t*flags.FolderFlag\n}\n\nfunc init() {\n\tcli.Register(\"datacenter.create\", &create{})\n}\n\nfunc (cmd *create) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.FolderFlag, ctx = flags.NewFolderFlag(ctx)\n\tcmd.FolderFlag.Register(ctx, f)\n}\n\nfunc (cmd *create) Usage() string {\n\treturn \"NAME...\"\n}\n\nfunc (cmd *create) Process(ctx context.Context) error {\n\tif err := cmd.FolderFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error {\n\tfolder, err := cmd.FolderOrDefault(\"/\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif f.NArg() == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tfor _, name := range f.Args() {\n\t\t_, err := folder.CreateDatacenter(ctx, name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/datacenter/info.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage datacenter\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"path\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/find\"\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype info struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n\t*flags.DatacenterFlag\n}\n\nfunc init() {\n\tcli.Register(\"datacenter.info\", &info{})\n}\n\nfunc (cmd *info) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n}\n\nfunc (cmd *info) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *info) Usage() string {\n\treturn \"[PATH]...\"\n}\n\nfunc (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\targs := f.Args()\n\tif len(args) == 0 {\n\t\targs = []string{\"*\"}\n\t}\n\n\tvar props []string\n\tres := infoResult{\n\t\tfinder: finder,\n\t\tctx:    ctx,\n\t}\n\n\tif !cmd.OutputFlag.JSON {\n\t\tprops = []string{\n\t\t\t\"name\",\n\t\t\t\"vmFolder\",\n\t\t\t\"hostFolder\",\n\t\t\t\"datastoreFolder\",\n\t\t\t\"networkFolder\",\n\t\t\t\"datastore\",\n\t\t\t\"network\",\n\t\t}\n\t}\n\n\tfor _, arg := range args {\n\t\tobjects, err := finder.DatacenterList(ctx, arg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres.objects = append(res.objects, objects...)\n\t}\n\n\tif len(res.objects) != 0 {\n\t\trefs := make([]types.ManagedObjectReference, 0, len(res.objects))\n\t\tfor _, o := range res.objects {\n\t\t\trefs = append(refs, o.Reference())\n\t\t}\n\n\t\tpc := property.DefaultCollector(c)\n\t\terr = pc.Retrieve(ctx, refs, props, &res.Datacenters)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn cmd.WriteResult(&res)\n}\n\ntype infoResult struct {\n\tDatacenters []mo.Datacenter\n\tobjects     []*object.Datacenter\n\tfinder      *find.Finder\n\tctx         context.Context\n}\n\nfunc (r *infoResult) Write(w io.Writer) error {\n\t// Maintain order via r.objects as Property collector does not always return results in order.\n\tobjects := make(map[types.ManagedObjectReference]mo.Datacenter, len(r.Datacenters))\n\tfor _, o := range r.Datacenters {\n\t\tobjects[o.Reference()] = o\n\t}\n\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\n\tfor _, o := range r.objects {\n\t\tdc := objects[o.Reference()]\n\t\tfmt.Fprintf(tw, \"Name:\\t%s\\n\", dc.Name)\n\t\tfmt.Fprintf(tw, \"  Path:\\t%s\\n\", o.InventoryPath)\n\n\t\tfolders, err := o.Folders(r.ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tr.finder.SetDatacenter(o)\n\n\t\thosts, _ := r.finder.HostSystemList(r.ctx, path.Join(folders.HostFolder.InventoryPath, \"*\"))\n\t\tfmt.Fprintf(tw, \"  Hosts:\\t%d\\n\", len(hosts))\n\n\t\tclusters, _ := r.finder.ClusterComputeResourceList(r.ctx, path.Join(folders.HostFolder.InventoryPath, \"*\"))\n\t\tfmt.Fprintf(tw, \"  Clusters:\\t%d\\n\", len(clusters))\n\n\t\tvms, _ := r.finder.VirtualMachineList(r.ctx, path.Join(folders.VmFolder.InventoryPath, \"*\"))\n\t\tfmt.Fprintf(tw, \"  Virtual Machines:\\t%d\\n\", len(vms))\n\n\t\tfmt.Fprintf(tw, \"  Networks:\\t%d\\n\", len(dc.Network))\n\t\tfmt.Fprintf(tw, \"  Datastores:\\t%d\\n\", len(dc.Datastore))\n\t}\n\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/datastore/cp.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage datastore\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype cp struct {\n\t*flags.OutputFlag\n\t*flags.DatastoreFlag\n\n\tforce bool\n}\n\nfunc init() {\n\tcli.Register(\"datastore.cp\", &cp{})\n}\n\nfunc (cmd *cp) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.force, \"f\", false, \"If true, overwrite any identically named file at the destination\")\n}\n\nfunc (cmd *cp) Process(ctx context.Context) error {\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *cp) Usage() string {\n\treturn \"SRC DST\"\n}\n\nfunc (cmd *cp) Description() string {\n\treturn `Copy SRC to DST on DATASTORE.\n\nExamples:\n  govc datastore.cp foo/foo.vmx foo/foo.vmx.old\n  govc datastore.cp -f my.vmx foo/foo.vmx`\n}\n\nfunc (cmd *cp) Run(ctx context.Context, f *flag.FlagSet) error {\n\targs := f.Args()\n\tif len(args) != 2 {\n\t\treturn errors.New(\"SRC and DST arguments are required\")\n\t}\n\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdc, err := cmd.Datacenter()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO: support cross-datacenter copy\n\n\tsrc, err := cmd.DatastorePath(args[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdst, err := cmd.DatastorePath(args[1])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm := object.NewFileManager(c)\n\ttask, err := m.CopyDatastoreFile(ctx, src, dc, dst, dc, cmd.force)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn task.Wait(ctx)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/datastore/create.go",
    "content": "/*\nCopyright (c) 2015-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage datastore\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype create struct {\n\t*flags.HostSystemFlag\n\n\t// Generic options\n\tType  typeFlag\n\tName  string\n\tForce bool\n\n\t// Options for NAS\n\tRemoteHost string\n\tRemotePath string\n\tAccessMode string\n\tUserName   string\n\tPassword   string\n\n\t// Options for VMFS\n\tDiskCanonicalName string\n\n\t// Options for local\n\tPath string\n}\n\nfunc init() {\n\tcli.Register(\"datastore.create\", &create{})\n}\n\nvar nasTypes = []string{\n\tstring(types.HostFileSystemVolumeFileSystemTypeNFS),\n\tstring(types.HostFileSystemVolumeFileSystemTypeNFS41),\n\tstring(types.HostFileSystemVolumeFileSystemTypeCIFS),\n}\n\nvar vmfsTypes = []string{\n\tstring(types.HostFileSystemVolumeFileSystemTypeVMFS),\n}\n\nvar localTypes = []string{\n\t\"local\",\n}\n\nvar allTypes = []string{}\n\nfunc init() {\n\tallTypes = append(allTypes, nasTypes...)\n\tallTypes = append(allTypes, vmfsTypes...)\n\tallTypes = append(allTypes, localTypes...)\n}\n\ntype typeFlag string\n\nfunc (t *typeFlag) Set(s string) error {\n\ts = strings.ToLower(s)\n\tfor _, e := range allTypes {\n\t\tif s == strings.ToLower(e) {\n\t\t\t*t = typeFlag(e)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn fmt.Errorf(\"unknown type\")\n}\n\nfunc (t *typeFlag) String() string {\n\treturn string(*t)\n}\n\nfunc (t *typeFlag) partOf(m []string) bool {\n\tfor _, e := range m {\n\t\tif t.String() == e {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (t *typeFlag) IsNasType() bool {\n\treturn t.partOf(nasTypes)\n}\n\nfunc (t *typeFlag) IsVmfsType() bool {\n\treturn t.partOf(vmfsTypes)\n}\n\nfunc (t *typeFlag) IsLocalType() bool {\n\treturn t.partOf(localTypes)\n}\n\nfunc (cmd *create) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tmodes := []string{\n\t\tstring(types.HostMountModeReadOnly),\n\t\tstring(types.HostMountModeReadWrite),\n\t}\n\n\tf.StringVar(&cmd.Name, \"name\", \"\", \"Datastore name\")\n\tf.Var(&cmd.Type, \"type\", fmt.Sprintf(\"Datastore type (%s)\", strings.Join(allTypes, \"|\")))\n\tf.BoolVar(&cmd.Force, \"force\", false, \"Ignore DuplicateName error if datastore is already mounted on a host\")\n\n\t// Options for NAS\n\tf.StringVar(&cmd.RemoteHost, \"remote-host\", \"\", \"Remote hostname of the NAS datastore\")\n\tf.StringVar(&cmd.RemotePath, \"remote-path\", \"\", \"Remote path of the NFS mount point\")\n\tf.StringVar(&cmd.AccessMode, \"mode\", modes[0],\n\t\tfmt.Sprintf(\"Access mode for the mount point (%s)\", strings.Join(modes, \"|\")))\n\tf.StringVar(&cmd.UserName, \"username\", \"\", \"Username to use when connecting (CIFS only)\")\n\tf.StringVar(&cmd.Password, \"password\", \"\", \"Password to use when connecting (CIFS only)\")\n\n\t// Options for VMFS\n\tf.StringVar(&cmd.DiskCanonicalName, \"disk\", \"\", \"Canonical name of disk (VMFS only)\")\n\n\t// Options for Local\n\tf.StringVar(&cmd.Path, \"path\", \"\", \"Local directory path for the datastore (local only)\")\n}\n\nfunc (cmd *create) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *create) Usage() string {\n\treturn \"HOST...\"\n}\n\nfunc (cmd *create) Description() string {\n\treturn `Create datastore on HOST.\n\nExamples:\n  govc datastore.create -type nfs -name nfsDatastore -remote-host 10.143.2.232 -remote-path /share cluster1\n  govc datastore.create -type vmfs -name vmfsDatastore -disk=mpx.vmhba0:C0:T0:L0 cluster1\n  govc datastore.create -type local -name localDatastore -path /var/datastore host1`\n}\n\nfunc (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error {\n\thosts, err := cmd.HostSystems(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch {\n\tcase cmd.Type.IsNasType():\n\t\treturn cmd.CreateNasDatastore(ctx, hosts)\n\tcase cmd.Type.IsVmfsType():\n\t\treturn cmd.CreateVmfsDatastore(ctx, hosts)\n\tcase cmd.Type.IsLocalType():\n\t\treturn cmd.CreateLocalDatastore(ctx, hosts)\n\tdefault:\n\t\treturn fmt.Errorf(\"unhandled type %#v\", cmd.Type)\n\t}\n}\n\nfunc (cmd *create) GetHostNasVolumeSpec() types.HostNasVolumeSpec {\n\tlocalPath := cmd.Path\n\tif localPath == \"\" {\n\t\tlocalPath = cmd.Name\n\t}\n\n\ts := types.HostNasVolumeSpec{\n\t\tLocalPath:  localPath,\n\t\tType:       cmd.Type.String(),\n\t\tRemoteHost: cmd.RemoteHost,\n\t\tRemotePath: cmd.RemotePath,\n\t\tAccessMode: cmd.AccessMode,\n\t\tUserName:   cmd.UserName,\n\t\tPassword:   cmd.Password,\n\t}\n\n\treturn s\n}\n\nfunc (cmd *create) CreateNasDatastore(ctx context.Context, hosts []*object.HostSystem) error {\n\tobject := types.ManagedObjectReference{\n\t\tType:  \"Datastore\",\n\t\tValue: fmt.Sprintf(\"%s:%s\", cmd.RemoteHost, cmd.RemotePath),\n\t}\n\n\tspec := cmd.GetHostNasVolumeSpec()\n\n\tfor _, host := range hosts {\n\t\tds, err := host.ConfigManager().DatastoreSystem(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = ds.CreateNasDatastore(ctx, spec)\n\t\tif err != nil {\n\t\t\tif soap.IsSoapFault(err) {\n\t\t\t\tswitch fault := soap.ToSoapFault(err).VimFault().(type) {\n\t\t\t\tcase types.PlatformConfigFault:\n\t\t\t\t\tif len(fault.FaultMessage) != 0 {\n\t\t\t\t\t\treturn errors.New(fault.FaultMessage[0].Message)\n\t\t\t\t\t}\n\t\t\t\tcase types.DuplicateName:\n\t\t\t\t\tif cmd.Force && fault.Object == object {\n\t\t\t\t\t\tfmt.Fprintf(os.Stderr, \"%s: '%s' already mounted\\n\",\n\t\t\t\t\t\t\thost.InventoryPath, cmd.Name)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn fmt.Errorf(\"%s: %s\", host.InventoryPath, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *create) CreateVmfsDatastore(ctx context.Context, hosts []*object.HostSystem) error {\n\tfor _, host := range hosts {\n\t\tds, err := host.ConfigManager().DatastoreSystem(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Find the specified disk\n\t\tdisks, err := ds.QueryAvailableDisksForVmfs(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar disk *types.HostScsiDisk\n\t\tfor _, e := range disks {\n\t\t\tif e.CanonicalName == cmd.DiskCanonicalName {\n\t\t\t\tdisk = &e\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif disk == nil {\n\t\t\treturn fmt.Errorf(\"no eligible disk found for name %#v\", cmd.DiskCanonicalName)\n\t\t}\n\n\t\t// Query for creation options and pick the right one\n\t\toptions, err := ds.QueryVmfsDatastoreCreateOptions(ctx, disk.DevicePath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar option *types.VmfsDatastoreOption\n\t\tfor _, e := range options {\n\t\t\tif _, ok := e.Info.(*types.VmfsDatastoreAllExtentOption); ok {\n\t\t\t\toption = &e\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif option == nil {\n\t\t\treturn fmt.Errorf(\"cannot use entire disk for datastore for name %#v\", cmd.DiskCanonicalName)\n\t\t}\n\n\t\tspec := *option.Spec.(*types.VmfsDatastoreCreateSpec)\n\t\tspec.Vmfs.VolumeName = cmd.Name\n\t\t_, err = ds.CreateVmfsDatastore(ctx, spec)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *create) CreateLocalDatastore(ctx context.Context, hosts []*object.HostSystem) error {\n\tfor _, host := range hosts {\n\t\tds, err := host.ConfigManager().DatastoreSystem(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif cmd.Path == \"\" {\n\t\t\tcmd.Path = cmd.Name\n\t\t}\n\n\t\tif cmd.Name == \"\" {\n\t\t\tcmd.Name = filepath.Base(cmd.Path)\n\t\t}\n\n\t\t_, err = ds.CreateLocalDatastore(ctx, cmd.Name, cmd.Path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/datastore/disk/create.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage disk\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/units\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype create struct {\n\t*flags.DatastoreFlag\n\n\tBytes units.ByteSize\n}\n\nfunc init() {\n\tcli.Register(\"datastore.disk.create\", &create{})\n}\n\nfunc (cmd *create) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\n\t_ = cmd.Bytes.Set(\"10G\")\n\tf.Var(&cmd.Bytes, \"size\", \"Size of new disk\")\n}\n\nfunc (cmd *create) Process(ctx context.Context) error {\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *create) Usage() string {\n\treturn \"VMDK\"\n}\n\nfunc (cmd *create) Description() string {\n\treturn `Create VMDK on DS.\n\nExamples:\n  govc datastore.mkdir disks\n  govc datastore.disk.create -size 24G disks/disk1.vmdk`\n}\n\nfunc (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdc, err := cmd.Datacenter()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tds, err := cmd.Datastore()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm := object.NewVirtualDiskManager(ds.Client())\n\n\tspec := &types.FileBackedVirtualDiskSpec{\n\t\tVirtualDiskSpec: types.VirtualDiskSpec{\n\t\t\tAdapterType: string(types.VirtualDiskAdapterTypeLsiLogic),\n\t\t\tDiskType:    string(types.VirtualDiskTypeThin),\n\t\t},\n\t\tCapacityKb: int64(cmd.Bytes) / 1024,\n\t}\n\n\ttask, err := m.CreateVirtualDisk(ctx, ds.Path(f.Arg(0)), dc, spec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn task.Wait(ctx)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/datastore/disk/info.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage disk\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype info struct {\n\t*flags.DatastoreFlag\n\n\tc bool\n\td bool\n\tp bool\n}\n\nfunc init() {\n\tcli.Register(\"datastore.disk.info\", &info{})\n}\n\nfunc (cmd *info) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.c, \"c\", false, \"Chain format\")\n\tf.BoolVar(&cmd.d, \"d\", false, \"Include datastore in output\")\n\tf.BoolVar(&cmd.p, \"p\", true, \"Include parents\")\n}\n\nfunc (cmd *info) Process(ctx context.Context) error {\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *info) Usage() string {\n\treturn \"VMDK\"\n}\n\nfunc (cmd *info) Description() string {\n\treturn `Query VMDK info on DS.\n\nExamples:\n  govc datastore.disk.info disks/disk1.vmdk`\n}\n\nfunc fullPath(s string) string {\n\treturn s\n}\n\nfunc dsPath(s string) string {\n\tvar p object.DatastorePath\n\n\tif p.FromString(s) {\n\t\treturn p.Path\n\t}\n\n\treturn s\n}\n\nvar infoPath = dsPath\n\ntype infoResult []object.VirtualDiskInfo\n\nfunc (r infoResult) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\n\tfor _, info := range r {\n\t\tfmt.Fprintf(tw, \"Name:\\t%s\\n\", infoPath(info.Name))\n\t\tfmt.Fprintf(tw, \"  Type:\\t%s\\n\", info.DiskType)\n\t\tfmt.Fprintf(tw, \"  Parent:\\t%s\\n\", infoPath(info.Parent))\n\t}\n\n\treturn tw.Flush()\n}\n\ntype chainResult []object.VirtualDiskInfo\n\nfunc (r chainResult) Write(w io.Writer) error {\n\tfor i, info := range r {\n\t\tfmt.Fprint(w, strings.Repeat(\" \", i*2))\n\t\tfmt.Fprintln(w, infoPath(info.Name))\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdc, err := cmd.Datacenter()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tds, err := cmd.Datastore()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm := object.NewVirtualDiskManager(ds.Client())\n\n\tinfo, err := m.QueryVirtualDiskInfo(ctx, ds.Path(f.Arg(0)), dc, cmd.p)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.d {\n\t\tinfoPath = fullPath\n\t}\n\n\tvar r flags.OutputWriter = infoResult(info)\n\n\tif cmd.c {\n\t\tr = chainResult(info)\n\t}\n\n\treturn cmd.WriteResult(r)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/datastore/download.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage datastore\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n)\n\ntype download struct {\n\t*flags.DatastoreFlag\n\t*flags.HostSystemFlag\n}\n\nfunc init() {\n\tcli.Register(\"datastore.download\", &download{})\n}\n\nfunc (cmd *download) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n}\n\nfunc (cmd *download) Process(ctx context.Context) error {\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *download) Usage() string {\n\treturn \"SOURCE DEST\"\n}\n\nfunc (cmd *download) Description() string {\n\treturn `Copy SOURCE from DS to DEST on the local system.\n\nIf DEST name is \"-\", source is written to stdout.\n\nExamples:\n  govc datastore.download vm-name/vmware.log ./local.log\n  govc datastore.download vm-name/vmware.log - | grep -i error`\n}\n\nfunc (cmd *download) Run(ctx context.Context, f *flag.FlagSet) error {\n\targs := f.Args()\n\tif len(args) != 2 {\n\t\treturn errors.New(\"invalid arguments\")\n\t}\n\n\tds, err := cmd.Datastore()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\th, err := cmd.HostSystemIfSpecified()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar via string\n\n\tif h != nil {\n\t\tvia = fmt.Sprintf(\" via %s\", h.InventoryPath)\n\t\tctx = ds.HostContext(ctx, h)\n\t}\n\n\tp := soap.DefaultDownload\n\n\tsrc := args[0]\n\tdst := args[1]\n\n\tif dst == \"-\" {\n\t\tf, _, err := ds.Download(ctx, src, &p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = io.Copy(os.Stdout, f)\n\t\treturn err\n\t}\n\n\tif cmd.DatastoreFlag.OutputFlag.TTY {\n\t\tlogger := cmd.DatastoreFlag.ProgressLogger(fmt.Sprintf(\"Downloading%s... \", via))\n\t\tp.Progress = logger\n\t\tdefer logger.Wait()\n\t}\n\n\treturn ds.DownloadFile(ctx, src, dst, &p)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/datastore/info.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage datastore\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype info struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n\t*flags.DatacenterFlag\n}\n\nfunc init() {\n\tcli.Register(\"datastore.info\", &info{})\n}\n\nfunc (cmd *info) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n}\n\nfunc (cmd *info) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *info) Usage() string {\n\treturn \"[PATH]...\"\n}\n\nfunc (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\targs := f.Args()\n\tif len(args) == 0 {\n\t\targs = []string{\"*\"}\n\t}\n\n\tvar res infoResult\n\tvar props []string\n\n\tif cmd.OutputFlag.JSON {\n\t\tprops = nil // Load everything\n\t} else {\n\t\tprops = []string{\"info\", \"summary\"} // Load summary\n\t}\n\n\tfor _, arg := range args {\n\t\tobjects, err := finder.DatastoreList(ctx, arg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres.objects = append(res.objects, objects...)\n\t}\n\n\tif len(res.objects) != 0 {\n\t\trefs := make([]types.ManagedObjectReference, 0, len(res.objects))\n\t\tfor _, o := range res.objects {\n\t\t\trefs = append(refs, o.Reference())\n\t\t}\n\n\t\tpc := property.DefaultCollector(c)\n\t\terr = pc.Retrieve(ctx, refs, props, &res.Datastores)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn cmd.WriteResult(&res)\n}\n\ntype infoResult struct {\n\tDatastores []mo.Datastore\n\tobjects    []*object.Datastore\n}\n\nfunc (r *infoResult) Write(w io.Writer) error {\n\t// Maintain order via r.objects as Property collector does not always return results in order.\n\tobjects := make(map[types.ManagedObjectReference]mo.Datastore, len(r.Datastores))\n\tfor _, o := range r.Datastores {\n\t\tobjects[o.Reference()] = o\n\t}\n\n\ttw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0)\n\n\tfor _, o := range r.objects {\n\t\tds := objects[o.Reference()]\n\t\ts := ds.Summary\n\t\tfmt.Fprintf(tw, \"Name:\\t%s\\n\", s.Name)\n\t\tfmt.Fprintf(tw, \"  Path:\\t%s\\n\", o.InventoryPath)\n\t\tfmt.Fprintf(tw, \"  Type:\\t%s\\n\", s.Type)\n\t\tfmt.Fprintf(tw, \"  URL:\\t%s\\n\", s.Url)\n\t\tfmt.Fprintf(tw, \"  Capacity:\\t%.1f GB\\n\", float64(s.Capacity)/(1<<30))\n\t\tfmt.Fprintf(tw, \"  Free:\\t%.1f GB\\n\", float64(s.FreeSpace)/(1<<30))\n\n\t\tswitch info := ds.Info.(type) {\n\t\tcase *types.NasDatastoreInfo:\n\t\t\tfmt.Fprintf(tw, \"  Remote:\\t%s:%s\\n\", info.Nas.RemoteHost, info.Nas.RemotePath)\n\t\t}\n\t}\n\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/datastore/ls.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage datastore\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"path\"\n\t\"strings\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/units\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype ls struct {\n\t*flags.DatastoreFlag\n\t*flags.OutputFlag\n\n\tlong    bool\n\tslash   bool\n\tall     bool\n\trecurse bool\n}\n\nfunc init() {\n\tcli.Register(\"datastore.ls\", &ls{})\n}\n\nfunc (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.long, \"l\", false, \"Long listing format\")\n\tf.BoolVar(&cmd.slash, \"p\", false, \"Append / indicator to directories\")\n\tf.BoolVar(&cmd.all, \"a\", false, \"Do not ignore entries starting with .\")\n\tf.BoolVar(&cmd.recurse, \"R\", false, \"List subdirectories recursively\")\n}\n\nfunc (cmd *ls) Process(ctx context.Context) error {\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *ls) Usage() string {\n\treturn \"[FILE]...\"\n}\n\nfunc isInvalid(err error) bool {\n\tif f, ok := err.(types.HasFault); ok {\n\t\tswitch f.Fault().(type) {\n\t\tcase *types.InvalidArgument:\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error {\n\tds, err := cmd.Datastore()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb, err := ds.Browser(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\targs := f.Args()\n\tif len(args) == 0 {\n\t\targs = []string{\"\"}\n\t}\n\n\tresult := &listOutput{\n\t\trs:  make([]types.HostDatastoreBrowserSearchResults, 0),\n\t\tcmd: cmd,\n\t}\n\n\tfor _, arg := range args {\n\t\tspec := types.HostDatastoreBrowserSearchSpec{\n\t\t\tMatchPattern: []string{\"*\"},\n\t\t}\n\n\t\tif cmd.long {\n\t\t\tspec.Details = &types.FileQueryFlags{\n\t\t\t\tFileType:     true,\n\t\t\t\tFileSize:     true,\n\t\t\t\tFileOwner:    types.NewBool(true), // TODO: omitempty is generated, but seems to be required\n\t\t\t\tModification: true,\n\t\t\t}\n\t\t}\n\n\t\tfor i := 0; ; i++ {\n\t\t\tr, err := cmd.ListPath(b, arg, spec)\n\t\t\tif err != nil {\n\t\t\t\t// Treat the argument as a match pattern if not found as directory\n\t\t\t\tif i == 0 && types.IsFileNotFound(err) || isInvalid(err) {\n\t\t\t\t\tspec.MatchPattern[0] = path.Base(arg)\n\t\t\t\t\targ = path.Dir(arg)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Treat an empty result against match pattern as file not found\n\t\t\tif i == 1 && len(r) == 1 && len(r[0].File) == 0 {\n\t\t\t\treturn fmt.Errorf(\"File %s/%s was not found\", r[0].FolderPath, spec.MatchPattern[0])\n\t\t\t}\n\n\t\t\tfor n := range r {\n\t\t\t\tresult.add(r[n])\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn cmd.WriteResult(result)\n}\n\nfunc (cmd *ls) ListPath(b *object.HostDatastoreBrowser, path string, spec types.HostDatastoreBrowserSearchSpec) ([]types.HostDatastoreBrowserSearchResults, error) {\n\tctx := context.TODO()\n\n\tpath, err := cmd.DatastorePath(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsearch := b.SearchDatastore\n\tif cmd.recurse {\n\t\tsearch = b.SearchDatastoreSubFolders\n\t}\n\n\ttask, err := search(ctx, path, &spec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo, err := task.WaitForResult(ctx, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch r := info.Result.(type) {\n\tcase types.HostDatastoreBrowserSearchResults:\n\t\treturn []types.HostDatastoreBrowserSearchResults{r}, nil\n\tcase types.ArrayOfHostDatastoreBrowserSearchResults:\n\t\treturn r.HostDatastoreBrowserSearchResults, nil\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown result type: %T\", r))\n\t}\n}\n\ntype listOutput struct {\n\trs  []types.HostDatastoreBrowserSearchResults\n\tcmd *ls\n}\n\nfunc (o *listOutput) add(r types.HostDatastoreBrowserSearchResults) {\n\tif o.cmd.recurse && !o.cmd.all {\n\t\t// filter out \".hidden\" directories\n\t\tpath := strings.SplitN(r.FolderPath, \" \", 2)\n\t\tif len(path) == 2 {\n\t\t\tpath = strings.Split(path[1], \"/\")\n\t\t\tif path[0] == \".\" {\n\t\t\t\tpath = path[1:]\n\t\t\t}\n\n\t\t\tfor _, p := range path {\n\t\t\t\tif len(p) != 0 && p[0] == '.' {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tres := r\n\tres.File = nil\n\n\tfor _, f := range r.File {\n\t\tif f.GetFileInfo().Path[0] == '.' && !o.cmd.all {\n\t\t\tcontinue\n\t\t}\n\n\t\tif o.cmd.slash {\n\t\t\tif d, ok := f.(*types.FolderFileInfo); ok {\n\t\t\t\td.Path += \"/\"\n\t\t\t}\n\t\t}\n\n\t\tres.File = append(res.File, f)\n\t}\n\n\to.rs = append(o.rs, res)\n}\n\n// hasMultiplePaths returns whether or not the slice of search results contains\n// results from more than one folder path.\nfunc (o *listOutput) hasMultiplePaths() bool {\n\tif len(o.rs) == 0 {\n\t\treturn false\n\t}\n\n\tp := o.rs[0].FolderPath\n\n\t// Multiple paths if any entry is not equal to the first one.\n\tfor _, e := range o.rs {\n\t\tif e.FolderPath != p {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (o *listOutput) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(o.rs)\n}\n\nfunc (o *listOutput) Write(w io.Writer) error {\n\t// Only include path header if we're dealing with more than one path.\n\tincludeHeader := false\n\tif o.hasMultiplePaths() {\n\t\tincludeHeader = true\n\t}\n\n\ttw := tabwriter.NewWriter(w, 3, 0, 2, ' ', 0)\n\tfor i, r := range o.rs {\n\t\tif includeHeader {\n\t\t\tif i > 0 {\n\t\t\t\tfmt.Fprintf(tw, \"\\n\")\n\t\t\t}\n\t\t\tfmt.Fprintf(tw, \"%s:\\n\", r.FolderPath)\n\t\t}\n\t\tfor _, file := range r.File {\n\t\t\tinfo := file.GetFileInfo()\n\t\t\tif o.cmd.long {\n\t\t\t\tfmt.Fprintf(tw, \"%s\\t%s\\t%s\\n\", units.ByteSize(info.FileSize), info.Modification.Format(\"Mon Jan 2 15:04:05 2006\"), info.Path)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(tw, \"%s\\n\", info.Path)\n\t\t\t}\n\t\t}\n\t}\n\ttw.Flush()\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/datastore/mkdir.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage datastore\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype mkdir struct {\n\t*flags.DatastoreFlag\n\n\tcreateParents bool\n\tisNamespace   bool\n}\n\nfunc init() {\n\tcli.Register(\"datastore.mkdir\", &mkdir{})\n}\n\nfunc (cmd *mkdir) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.createParents, \"p\", false, \"Create intermediate directories as needed\")\n\tf.BoolVar(&cmd.isNamespace, \"namespace\", false, \"Return uuid of namespace created on vsan datastore\")\n}\n\nfunc (cmd *mkdir) Process(ctx context.Context) error {\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *mkdir) Usage() string {\n\treturn \"DIRECTORY\"\n}\n\nfunc (cmd *mkdir) Run(ctx context.Context, f *flag.FlagSet) error {\n\targs := f.Args()\n\tif len(args) == 0 {\n\t\treturn errors.New(\"missing operand\")\n\t}\n\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.isNamespace {\n\t\tvar uuid string\n\t\tvar ds *object.Datastore\n\n\t\tif ds, err = cmd.Datastore(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpath := args[0]\n\n\t\tnm := object.NewDatastoreNamespaceManager(c)\n\t\tif uuid, err = nm.CreateDirectory(ctx, ds, path, \"\"); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Println(uuid)\n\t} else {\n\t\tvar dc *object.Datacenter\n\t\tvar path string\n\n\t\tdc, err = cmd.Datacenter()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpath, err = cmd.DatastorePath(args[0])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tm := object.NewFileManager(c)\n\t\terr = m.MakeDirectory(ctx, path, dc, cmd.createParents)\n\n\t\t// ignore EEXIST if -p flag is given\n\t\tif err != nil && cmd.createParents {\n\t\t\tif soap.IsSoapFault(err) {\n\t\t\t\tsoapFault := soap.ToSoapFault(err)\n\t\t\t\tif _, ok := soapFault.VimFault().(types.FileAlreadyExists); ok {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/datastore/mv.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage datastore\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype mv struct {\n\t*flags.DatastoreFlag\n\n\tforce bool\n}\n\nfunc init() {\n\tcli.Register(\"datastore.mv\", &mv{})\n}\n\nfunc (cmd *mv) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.force, \"f\", false, \"If true, overwrite any identically named file at the destination\")\n}\n\nfunc (cmd *mv) Process(ctx context.Context) error {\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *mv) Usage() string {\n\treturn \"SRC DST\"\n}\n\nfunc (cmd *mv) Description() string {\n\treturn `Move SRC to DST on DATASTORE.\n\nExamples:\n  govc datastore.mv foo/foo.vmx foo/foo.vmx.old\n  govc datastore.mv -f my.vmx foo/foo.vmx`\n}\n\nfunc (cmd *mv) Run(ctx context.Context, f *flag.FlagSet) error {\n\targs := f.Args()\n\tif len(args) != 2 {\n\t\treturn errors.New(\"SRC and DST arguments are required\")\n\t}\n\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdc, err := cmd.Datacenter()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO: support cross-datacenter move\n\n\tsrc, err := cmd.DatastorePath(args[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdst, err := cmd.DatastorePath(args[1])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm := object.NewFileManager(c)\n\ttask, err := m.MoveDatastoreFile(ctx, src, dc, dst, dc, cmd.force)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn task.Wait(ctx)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/datastore/remove.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage datastore\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype remove struct {\n\t*flags.HostSystemFlag\n\t*flags.DatastoreFlag\n}\n\nfunc init() {\n\tcli.Register(\"datastore.remove\", &remove{})\n}\n\nfunc (cmd *remove) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n}\n\nfunc (cmd *remove) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *remove) Usage() string {\n\treturn \"HOST...\"\n}\n\nfunc (cmd *remove) Description() string {\n\treturn `Remove datastore from HOST.\n\nExamples:\n  govc datastore.remove -ds nfsDatastore cluster1\n  govc datastore.remove -ds nasDatastore host1 host2 host3`\n}\n\nfunc (cmd *remove) Run(ctx context.Context, f *flag.FlagSet) error {\n\tds, err := cmd.Datastore()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thosts, err := cmd.HostSystems(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, host := range hosts {\n\t\thds, err := host.ConfigManager().DatastoreSystem(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = hds.Remove(ctx, ds)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/datastore/rm.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage datastore\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype rm struct {\n\t*flags.DatastoreFlag\n\n\tkind        bool\n\tforce       bool\n\tisNamespace bool\n}\n\nfunc init() {\n\tcli.Register(\"datastore.rm\", &rm{})\n\tcli.Alias(\"datastore.rm\", \"datastore.delete\")\n}\n\nfunc (cmd *rm) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.kind, \"t\", true, \"Use file type to choose disk or file manager\")\n\tf.BoolVar(&cmd.force, \"f\", false, \"Force; ignore nonexistent files and arguments\")\n\tf.BoolVar(&cmd.isNamespace, \"namespace\", false, \"Path is uuid of namespace on vsan datastore\")\n}\n\nfunc (cmd *rm) Process(ctx context.Context) error {\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *rm) Usage() string {\n\treturn \"FILE\"\n}\n\nfunc (cmd *rm) Description() string {\n\treturn `Remove FILE from DATASTORE.\n\nExamples:\n  govc datastore.rm vm/vmware.log\n  govc datastore.rm vm\n  govc datastore.rm -f images/base.vmdk`\n}\n\nfunc (cmd *rm) Run(ctx context.Context, f *flag.FlagSet) error {\n\targs := f.Args()\n\tif len(args) == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar dc *object.Datacenter\n\tdc, err = cmd.Datacenter()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tds, err := cmd.Datastore()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.isNamespace {\n\t\tpath := args[0]\n\n\t\tnm := object.NewDatastoreNamespaceManager(c)\n\t\terr = nm.DeleteDirectory(ctx, dc, path)\n\t} else {\n\t\tfm := ds.NewFileManager(dc, cmd.force)\n\n\t\tremove := fm.DeleteFile // File delete\n\t\tif cmd.kind {\n\t\t\tremove = fm.Delete // VirtualDisk or File delete\n\t\t}\n\n\t\terr = remove(ctx, args[0])\n\t}\n\n\tif err != nil {\n\t\tif types.IsFileNotFound(err) && cmd.force {\n\t\t\t// Ignore error\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/datastore/tail.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage datastore\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"io\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype tail struct {\n\t*flags.DatastoreFlag\n\t*flags.HostSystemFlag\n\n\tcount  int64\n\tlines  int\n\tfollow bool\n}\n\nfunc init() {\n\tcli.Register(\"datastore.tail\", &tail{})\n}\n\nfunc (cmd *tail) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tf.Int64Var(&cmd.count, \"c\", -1, \"Output the last NUM bytes\")\n\tf.IntVar(&cmd.lines, \"n\", 10, \"Output the last NUM lines\")\n\tf.BoolVar(&cmd.follow, \"f\", false, \"Output appended data as the file grows\")\n}\n\nfunc (cmd *tail) Description() string {\n\treturn `Output the last part of datastore files.\n\nExamples:\n  govc datastore.tail -n 100 vm-name/vmware.log\n  govc datastore.tail -n 0 -f vm-name/vmware.log`\n}\n\nfunc (cmd *tail) Process(ctx context.Context) error {\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *tail) Usage() string {\n\treturn \"PATH\"\n}\n\nfunc (cmd *tail) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tds, err := cmd.Datastore()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\th, err := cmd.HostSystemIfSpecified()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif h != nil {\n\t\tctx = ds.HostContext(ctx, h)\n\t}\n\n\tfile, err := ds.Open(ctx, f.Arg(0))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar reader io.ReadCloser = file\n\n\tvar offset int64\n\n\tif cmd.count >= 0 {\n\t\tinfo, serr := file.Stat()\n\t\tif serr != nil {\n\t\t\treturn serr\n\t\t}\n\n\t\tif info.Size() > cmd.count {\n\t\t\toffset = info.Size() - cmd.count\n\n\t\t\t_, err = file.Seek(offset, io.SeekStart)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else if cmd.lines >= 0 {\n\t\terr = file.Tail(cmd.lines)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif cmd.follow {\n\t\treader = file.Follow(time.Second)\n\t}\n\n\t_, err = io.Copy(os.Stdout, reader)\n\n\t_ = reader.Close()\n\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/datastore/upload.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage datastore\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n)\n\ntype upload struct {\n\t*flags.OutputFlag\n\t*flags.DatastoreFlag\n}\n\nfunc init() {\n\tcli.Register(\"datastore.upload\", &upload{})\n}\n\nfunc (cmd *upload) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n}\n\nfunc (cmd *upload) Process(ctx context.Context) error {\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *upload) Usage() string {\n\treturn \"SOURCE DEST\"\n}\n\nfunc (cmd *upload) Description() string {\n\treturn `Copy SOURCE from the local system to DEST on DS.\n\nIf SOURCE name is \"-\", read source from stdin.\n\nExamples:\n  govc datastore.upload -ds datastore1 ./config.iso vm-name/config.iso\n  genisoimage ... | govc datastore.upload -ds datastore1 - vm-name/config.iso`\n}\n\nfunc (cmd *upload) Run(ctx context.Context, f *flag.FlagSet) error {\n\targs := f.Args()\n\tif len(args) != 2 {\n\t\treturn errors.New(\"invalid arguments\")\n\t}\n\n\tds, err := cmd.Datastore()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp := soap.DefaultUpload\n\n\tsrc := args[0]\n\tdst := args[1]\n\n\tif src == \"-\" {\n\t\treturn ds.Upload(ctx, os.Stdin, dst, &p)\n\t}\n\n\tif cmd.OutputFlag.TTY {\n\t\tlogger := cmd.ProgressLogger(\"Uploading... \")\n\t\tp.Progress = logger\n\t\tdefer logger.Wait()\n\t}\n\n\treturn ds.UploadFile(ctx, src, dst, &p)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/datastore/vsan/ls.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vsan\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n)\n\ntype ls struct {\n\t*flags.DatastoreFlag\n\n\tlong   bool\n\torphan bool\n}\n\nfunc init() {\n\tcli.Register(\"datastore.vsan.dom.ls\", &ls{})\n}\n\nfunc (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.long, \"l\", false, \"Long listing\")\n\tf.BoolVar(&cmd.orphan, \"o\", false, \"List orphan objects\")\n}\n\nfunc (cmd *ls) Process(ctx context.Context) error {\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *ls) Usage() string {\n\treturn \"[UUID]...\"\n}\n\nfunc (cmd *ls) Description() string {\n\treturn `List vSAN DOM objects in DS.\n\nExamples:\n  govc datastore.vsan.dom.ls\n  govc datastore.vsan.dom.ls -ds vsanDatastore -l\n  govc datastore.vsan.dom.ls -l d85aa758-63f5-500a-3150-0200308e589c`\n}\n\nfunc (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error {\n\tds, err := cmd.Datastore()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar mds mo.Datastore\n\terr = ds.Properties(ctx, ds.Reference(), []string{\"summary\"}, &mds)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif mds.Summary.Type != \"vsan\" {\n\t\treturn flag.ErrHelp\n\t}\n\n\thosts, err := ds.AttachedHosts(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(hosts) == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tm, err := hosts[0].ConfigManager().VsanInternalSystem(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tids, err := m.QueryVsanObjectUuidsByFilter(ctx, f.Args(), 0, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(ids) == 0 {\n\t\treturn nil\n\t}\n\n\tif !cmd.long && !cmd.orphan {\n\t\tfor _, id := range ids {\n\t\t\tfmt.Fprintln(cmd.Out, id)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tobjs, err := m.GetVsanObjExtAttrs(ctx, ids)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tu, err := url.Parse(mds.Summary.Url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttw := tabwriter.NewWriter(cmd.Out, 2, 0, 2, ' ', 0)\n\tcmd.Out = tw\n\n\tfor id, obj := range objs {\n\t\tpath := obj.DatastorePath(u.Path)\n\n\t\tif cmd.orphan {\n\t\t\t_, err = ds.Stat(ctx, path)\n\t\t\tif err == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tswitch err.(type) {\n\t\t\tcase object.DatastoreNoSuchDirectoryError, object.DatastoreNoSuchFileError:\n\t\t\tdefault:\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif !cmd.long {\n\t\t\t\tfmt.Fprintln(cmd.Out, id)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tfmt.Fprintf(cmd.Out, \"%s\\t%s\\t%s\\n\", id, obj.Class, path)\n\t}\n\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/datastore/vsan/rm.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vsan\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype rm struct {\n\t*flags.DatastoreFlag\n\n\tforce   bool\n\tverbose bool\n}\n\nfunc init() {\n\tcli.Register(\"datastore.vsan.dom.rm\", &rm{})\n}\n\nfunc (cmd *rm) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.force, \"f\", false, \"Force delete\")\n\tf.BoolVar(&cmd.verbose, \"v\", false, \"Print deleted UUIDs to stdout, failed to stderr\")\n}\n\nfunc (cmd *rm) Process(ctx context.Context) error {\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *rm) Usage() string {\n\treturn \"UUID...\"\n}\n\nfunc (cmd *rm) Description() string {\n\treturn `Remove vSAN DOM objects in DS.\n\nExamples:\n  govc datastore.vsan.dom.rm d85aa758-63f5-500a-3150-0200308e589c\n  govc datastore.vsan.dom.rm -f d85aa758-63f5-500a-3150-0200308e589c\n  govc datastore.vsan.dom.ls -o | xargs govc datastore.vsan.dom.rm`\n}\n\nfunc (cmd *rm) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tds, err := cmd.Datastore()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thosts, err := ds.AttachedHosts(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(hosts) == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tm, err := hosts[0].ConfigManager().VsanInternalSystem(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tres, err := m.DeleteVsanObjects(ctx, f.Args(), &cmd.force)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.verbose {\n\t\tfor _, r := range res {\n\t\t\tif r.Success {\n\t\t\t\tfmt.Fprintln(cmd.Out, r.Uuid)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%s %s\\n\", r.Uuid, r.FailureReason[0].Message)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/device/boot.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage device\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype boot struct {\n\t*flags.VirtualMachineFlag\n\n\torder string\n\ttypes.VirtualMachineBootOptions\n}\n\nfunc init() {\n\tcli.Register(\"device.boot\", &boot{})\n}\n\nfunc (cmd *boot) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tf.Int64Var(&cmd.BootDelay, \"delay\", 0, \"Delay in ms before starting the boot sequence\")\n\tf.StringVar(&cmd.order, \"order\", \"\", \"Boot device order\")\n\tf.Int64Var(&cmd.BootRetryDelay, \"retry-delay\", 0, \"Delay in ms before a boot retry\")\n\n\tcmd.BootRetryEnabled = types.NewBool(false)\n\tf.BoolVar(cmd.BootRetryEnabled, \"retry\", false, \"If true, retry boot after retry-delay\")\n\n\tcmd.EnterBIOSSetup = types.NewBool(false)\n\tf.BoolVar(cmd.EnterBIOSSetup, \"setup\", false, \"If true, enter BIOS setup on next boot\")\n}\n\nfunc (cmd *boot) Description() string {\n\treturn `Configure VM boot settings.\n\nExamples:\n  govc device.boot -vm $vm -delay 1000 -order floppy,cdrom,ethernet,disk`\n}\n\nfunc (cmd *boot) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *boot) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.order != \"\" {\n\t\to := strings.Split(cmd.order, \",\")\n\t\tcmd.BootOrder = devices.BootOrder(o)\n\t}\n\n\treturn vm.SetBootOptions(ctx, &cmd.VirtualMachineBootOptions)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/device/cdrom/add.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage cdrom\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype add struct {\n\t*flags.VirtualMachineFlag\n\n\tcontroller string\n}\n\nfunc init() {\n\tcli.Register(\"device.cdrom.add\", &add{})\n}\n\nfunc (cmd *add) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.controller, \"controller\", \"\", \"IDE controller name\")\n}\n\nfunc (cmd *add) Description() string {\n\treturn `Add CD-ROM device to VM.\n\nExamples:\n  govc device.cdrom.add -vm $vm\n  govc device.ls -vm $vm | grep ide-\n  govc device.cdrom.add -vm $vm -controller ide-200\n  govc device.info cdrom-*`\n}\n\nfunc (cmd *add) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc, err := devices.FindIDEController(cmd.controller)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td, err := devices.CreateCdrom(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = vm.AddDevice(ctx, d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// output name of device we just created\n\tdevices, err = vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdevices = devices.SelectByType(d)\n\n\tname := devices.Name(devices[len(devices)-1])\n\n\tfmt.Println(name)\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/device/cdrom/eject.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage cdrom\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype eject struct {\n\t*flags.VirtualMachineFlag\n\n\tdevice string\n}\n\nfunc init() {\n\tcli.Register(\"device.cdrom.eject\", &eject{})\n}\n\nfunc (cmd *eject) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.device, \"device\", \"\", \"CD-ROM device name\")\n}\n\nfunc (cmd *eject) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *eject) Description() string {\n\treturn `Eject media from CD-ROM device.\n\nIf device is not specified, the first CD-ROM device is used.\n\nExamples:\n  govc device.cdrom.eject -vm vm-1\n  govc device.cdrom.eject -vm vm-1 -device floppy-1`\n}\n\nfunc (cmd *eject) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc, err := devices.FindCdrom(cmd.device)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn vm.EditDevice(ctx, devices.EjectIso(c))\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/device/cdrom/insert.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage cdrom\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype insert struct {\n\t*flags.DatastoreFlag\n\t*flags.VirtualMachineFlag\n\n\tdevice string\n}\n\nfunc init() {\n\tcli.Register(\"device.cdrom.insert\", &insert{})\n}\n\nfunc (cmd *insert) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.device, \"device\", \"\", \"CD-ROM device name\")\n}\n\nfunc (cmd *insert) Process(ctx context.Context) error {\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *insert) Usage() string {\n\treturn \"ISO\"\n}\n\nfunc (cmd *insert) Description() string {\n\treturn `Insert media on datastore into CD-ROM device.\n\nIf device is not specified, the first CD-ROM device is used.\n\nExamples:\n  govc device.cdrom.insert -vm vm-1 -device cdrom-3000 images/boot.iso`\n}\n\nfunc (cmd *insert) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil || f.NArg() != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc, err := devices.FindCdrom(cmd.device)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tiso, err := cmd.DatastorePath(f.Arg(0))\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn vm.EditDevice(ctx, devices.InsertIso(c, iso))\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/device/connect.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage device\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype connect struct {\n\t*flags.VirtualMachineFlag\n}\n\nfunc init() {\n\tcli.Register(\"device.connect\", &connect{})\n}\n\nfunc (cmd *connect) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n}\n\nfunc (cmd *connect) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *connect) Usage() string {\n\treturn \"DEVICE...\"\n}\n\nfunc (cmd *connect) Description() string {\n\treturn `Connect DEVICE on VM.\n\nExamples:\n  govc device.connect -vm $name cdrom-3000`\n}\n\nfunc (cmd *connect) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, name := range f.Args() {\n\t\tdevice := devices.Find(name)\n\t\tif device == nil {\n\t\t\treturn fmt.Errorf(\"device '%s' not found\", name)\n\t\t}\n\n\t\tif err = devices.Connect(device); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = vm.EditDevice(ctx, device); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/device/disconnect.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage device\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype disconnect struct {\n\t*flags.VirtualMachineFlag\n}\n\nfunc init() {\n\tcli.Register(\"device.disconnect\", &disconnect{})\n}\n\nfunc (cmd *disconnect) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n}\n\nfunc (cmd *disconnect) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *disconnect) Usage() string {\n\treturn \"DEVICE...\"\n}\n\nfunc (cmd *disconnect) Description() string {\n\treturn `Disconnect DEVICE on VM.\n\nExamples:\n  govc device.disconnect -vm $name cdrom-3000`\n}\n\nfunc (cmd *disconnect) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, name := range f.Args() {\n\t\tdevice := devices.Find(name)\n\t\tif device == nil {\n\t\t\treturn fmt.Errorf(\"device '%s' not found\", name)\n\t\t}\n\n\t\tif err = devices.Disconnect(device); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = vm.EditDevice(ctx, device); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/device/floppy/add.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage floppy\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype add struct {\n\t*flags.VirtualMachineFlag\n}\n\nfunc init() {\n\tcli.Register(\"device.floppy.add\", &add{})\n}\n\nfunc (cmd *add) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n}\n\nfunc (cmd *add) Description() string {\n\treturn `Add floppy device to VM.\n\nExamples:\n  govc device.floppy.add -vm $vm\n  govc device.info floppy-*`\n}\n\nfunc (cmd *add) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td, err := devices.CreateFloppy()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = vm.AddDevice(ctx, d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// output name of device we just created\n\tdevices, err = vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdevices = devices.SelectByType(d)\n\n\tname := devices.Name(devices[len(devices)-1])\n\n\tfmt.Println(name)\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/device/floppy/eject.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage floppy\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype eject struct {\n\t*flags.VirtualMachineFlag\n\n\tdevice string\n}\n\nfunc init() {\n\tcli.Register(\"device.floppy.eject\", &eject{})\n}\n\nfunc (cmd *eject) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.device, \"device\", \"\", \"Floppy device name\")\n}\n\nfunc (cmd *eject) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *eject) Description() string {\n\treturn `Eject image from floppy device.\n\nIf device is not specified, the first floppy device is used.\n\nExamples:\n  govc device.floppy.eject -vm vm-1`\n}\n\nfunc (cmd *eject) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc, err := devices.FindFloppy(cmd.device)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn vm.EditDevice(ctx, devices.EjectImg(c))\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/device/floppy/insert.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage floppy\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype insert struct {\n\t*flags.DatastoreFlag\n\t*flags.VirtualMachineFlag\n\n\tdevice string\n}\n\nfunc init() {\n\tcli.Register(\"device.floppy.insert\", &insert{})\n}\n\nfunc (cmd *insert) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.device, \"device\", \"\", \"Floppy device name\")\n}\n\nfunc (cmd *insert) Process(ctx context.Context) error {\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *insert) Usage() string {\n\treturn \"IMG\"\n}\n\nfunc (cmd *insert) Description() string {\n\treturn `Insert IMG on datastore into floppy device.\n\nIf device is not specified, the first floppy device is used.\n\nExamples:\n  govc device.floppy.insert -vm vm-1 vm-1/config.img`\n}\n\nfunc (cmd *insert) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil || f.NArg() != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc, err := devices.FindFloppy(cmd.device)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\timg, err := cmd.DatastorePath(f.Arg(0))\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn vm.EditDevice(ctx, devices.InsertImg(c, img))\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/device/info.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage device\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype info struct {\n\t*flags.VirtualMachineFlag\n\t*flags.OutputFlag\n\t*flags.NetworkFlag\n}\n\nfunc init() {\n\tcli.Register(\"device.info\", &info{})\n}\n\nfunc (cmd *info) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tcmd.NetworkFlag, ctx = flags.NewNetworkFlag(ctx)\n\tcmd.NetworkFlag.Register(ctx, f)\n}\n\nfunc (cmd *info) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.NetworkFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *info) Usage() string {\n\treturn \"[DEVICE]...\"\n}\n\nfunc (cmd *info) Description() string {\n\treturn `Device info for VM.\n\nExamples:\n  govc device.info -vm $name\n  govc device.info -vm $name disk-*\n  govc device.info -vm $name -json ethernet-0 | jq -r .Devices[].MacAddress`\n}\n\nfunc (cmd *info) match(p string, devices object.VirtualDeviceList) object.VirtualDeviceList {\n\tvar matches object.VirtualDeviceList\n\tmatch := func(name string) bool {\n\t\tmatched, _ := path.Match(p, name)\n\t\treturn matched\n\t}\n\n\tfor _, device := range devices {\n\t\tname := devices.Name(device)\n\t\teq := name == p\n\t\tif eq || match(name) {\n\t\t\tmatches = append(matches, device)\n\t\t}\n\t\tif eq {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn matches\n}\n\nfunc (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tres := infoResult{\n\t\tlist: devices,\n\t}\n\n\tif cmd.NetworkFlag.IsSet() {\n\t\tnet, err := cmd.Network()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbacking, err := net.EthernetCardBackingInfo(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdevices = devices.SelectByBackingInfo(backing)\n\t}\n\n\tif f.NArg() == 0 {\n\t\tres.Devices = devices\n\t} else {\n\t\tfor _, name := range f.Args() {\n\t\t\tmatches := cmd.match(name, devices)\n\t\t\tif len(matches) == 0 {\n\t\t\t\treturn fmt.Errorf(\"device '%s' not found\", name)\n\t\t\t}\n\n\t\t\tres.Devices = append(res.Devices, matches...)\n\t\t}\n\t}\n\n\treturn cmd.WriteResult(&res)\n}\n\ntype infoResult struct {\n\tDevices object.VirtualDeviceList\n\t// need the full list of devices to lookup attached devices and controllers\n\tlist object.VirtualDeviceList\n}\n\nfunc (r *infoResult) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0)\n\n\tfor _, device := range r.Devices {\n\t\td := device.GetVirtualDevice()\n\t\tinfo := d.DeviceInfo.GetDescription()\n\n\t\tfmt.Fprintf(tw, \"Name:\\t%s\\n\", r.Devices.Name(device))\n\t\tfmt.Fprintf(tw, \"  Type:\\t%s\\n\", r.Devices.TypeName(device))\n\t\tfmt.Fprintf(tw, \"  Label:\\t%s\\n\", info.Label)\n\t\tfmt.Fprintf(tw, \"  Summary:\\t%s\\n\", info.Summary)\n\t\tfmt.Fprintf(tw, \"  Key:\\t%d\\n\", d.Key)\n\n\t\tif c, ok := device.(types.BaseVirtualController); ok {\n\t\t\tvar attached []string\n\t\t\tfor _, key := range c.GetVirtualController().Device {\n\t\t\t\tattached = append(attached, r.Devices.Name(r.list.FindByKey(key)))\n\t\t\t}\n\t\t\tfmt.Fprintf(tw, \"  Devices:\\t%s\\n\", strings.Join(attached, \", \"))\n\t\t} else {\n\t\t\tif c := r.list.FindByKey(d.ControllerKey); c != nil {\n\t\t\t\tfmt.Fprintf(tw, \"  Controller:\\t%s\\n\", r.Devices.Name(c))\n\t\t\t\tif d.UnitNumber != nil {\n\t\t\t\t\tfmt.Fprintf(tw, \"  Unit number:\\t%d\\n\", *d.UnitNumber)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Fprintf(tw, \"  Unit number:\\t<nil>\\n\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif ca := d.Connectable; ca != nil {\n\t\t\tfmt.Fprintf(tw, \"  Connected:\\t%t\\n\", ca.Connected)\n\t\t\tfmt.Fprintf(tw, \"  Start connected:\\t%t\\n\", ca.StartConnected)\n\t\t\tfmt.Fprintf(tw, \"  Guest control:\\t%t\\n\", ca.AllowGuestControl)\n\t\t\tfmt.Fprintf(tw, \"  Status:\\t%s\\n\", ca.Status)\n\t\t}\n\n\t\tswitch md := device.(type) {\n\t\tcase types.BaseVirtualEthernetCard:\n\t\t\tfmt.Fprintf(tw, \"  MAC Address:\\t%s\\n\", md.GetVirtualEthernetCard().MacAddress)\n\t\t\tfmt.Fprintf(tw, \"  Address type:\\t%s\\n\", md.GetVirtualEthernetCard().AddressType)\n\t\tcase *types.VirtualDisk:\n\t\t\tif b, ok := md.Backing.(types.BaseVirtualDeviceFileBackingInfo); ok {\n\t\t\t\tfmt.Fprintf(tw, \"  File:\\t%s\\n\", b.GetVirtualDeviceFileBackingInfo().FileName)\n\t\t\t}\n\t\t\tif b, ok := md.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok && b.Parent != nil {\n\t\t\t\tfmt.Fprintf(tw, \"  Parent:\\t%s\\n\", b.Parent.GetVirtualDeviceFileBackingInfo().FileName)\n\t\t\t}\n\t\tcase *types.VirtualSerialPort:\n\t\t\tif b, ok := md.Backing.(*types.VirtualSerialPortURIBackingInfo); ok {\n\t\t\t\tfmt.Fprintf(tw, \"  Direction:\\t%s\\n\", b.Direction)\n\t\t\t\tfmt.Fprintf(tw, \"  Service URI:\\t%s\\n\", b.ServiceURI)\n\t\t\t\tfmt.Fprintf(tw, \"  Proxy URI:\\t%s\\n\", b.ProxyURI)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/device/ls.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage device\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype ls struct {\n\t*flags.VirtualMachineFlag\n\n\tboot bool\n}\n\nfunc init() {\n\tcli.Register(\"device.ls\", &ls{})\n}\n\nfunc (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.boot, \"boot\", false, \"List devices configured in the VM's boot options\")\n}\n\nfunc (cmd *ls) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *ls) Description() string {\n\treturn `List devices for VM.\n\nExamples:\n  govc device.ls -vm $name`\n}\n\nfunc (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.boot {\n\t\toptions, err := vm.BootOptions(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdevices = devices.SelectBootOrder(options.BootOrder)\n\t}\n\n\ttw := tabwriter.NewWriter(os.Stdout, 3, 0, 2, ' ', 0)\n\n\tfor _, device := range devices {\n\t\tfmt.Fprintf(tw, \"%s\\t%s\\t%s\\n\", devices.Name(device), devices.TypeName(device),\n\t\t\tdevice.GetVirtualDevice().DeviceInfo.GetDescription().Summary)\n\t}\n\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/device/remove.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage device\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype remove struct {\n\t*flags.VirtualMachineFlag\n\tkeepFiles bool\n}\n\nfunc init() {\n\tcli.Register(\"device.remove\", &remove{})\n}\n\nfunc (cmd *remove) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\tf.BoolVar(&cmd.keepFiles, \"keep\", false, \"Keep files in datastore\")\n}\n\nfunc (cmd *remove) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *remove) Usage() string {\n\treturn \"DEVICE...\"\n}\n\nfunc (cmd *remove) Description() string {\n\treturn `Remove DEVICE from VM.\n\nExamples:\n  govc device.remove -vm $name cdrom-3000\n  govc device.remove -vm $name -keep disk-1000`\n}\n\nfunc (cmd *remove) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, name := range f.Args() {\n\t\tdevice := devices.Find(name)\n\t\tif device == nil {\n\t\t\treturn fmt.Errorf(\"device '%s' not found\", name)\n\t\t}\n\n\t\tif err = vm.RemoveDevice(ctx, cmd.keepFiles, device); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/device/scsi/add.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage scsi\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype add struct {\n\t*flags.VirtualMachineFlag\n\n\tcontroller   string\n\tsharedBus    string\n\thotAddRemove bool\n}\n\nfunc init() {\n\tcli.Register(\"device.scsi.add\", &add{})\n}\n\nfunc (cmd *add) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tvar ctypes []string\n\tct := object.SCSIControllerTypes()\n\tfor _, t := range ct {\n\t\tctypes = append(ctypes, ct.Type(t))\n\t}\n\tf.StringVar(&cmd.controller, \"type\", ct.Type(ct[0]),\n\t\tfmt.Sprintf(\"SCSI controller type (%s)\", strings.Join(ctypes, \"|\")))\n\tf.StringVar(&cmd.sharedBus, \"sharing\", string(types.VirtualSCSISharingNoSharing), \"SCSI sharing\")\n\tf.BoolVar(&cmd.hotAddRemove, \"hot\", false, \"Enable hot-add/remove\")\n}\n\nfunc (cmd *add) Description() string {\n\treturn `Add SCSI controller to VM.\n\nExamples:\n  govc device.scsi.add -vm $vm\n  govc device.scsi.add -vm $vm -type pvscsi\n  govc device.info -vm $vm {lsi,pv}*`\n}\n\nfunc (cmd *add) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td, err := devices.CreateSCSIController(cmd.controller)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc := d.(types.BaseVirtualSCSIController).GetVirtualSCSIController()\n\tc.HotAddRemove = &cmd.hotAddRemove\n\tc.SharedBus = types.VirtualSCSISharing(cmd.sharedBus)\n\n\terr = vm.AddDevice(ctx, d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// output name of device we just created\n\tdevices, err = vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdevices = devices.SelectByType(d)\n\n\tname := devices.Name(devices[len(devices)-1])\n\n\tfmt.Println(name)\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/device/serial/add.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage serial\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype add struct {\n\t*flags.VirtualMachineFlag\n}\n\nfunc init() {\n\tcli.Register(\"device.serial.add\", &add{})\n}\n\nfunc (cmd *add) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n}\n\nfunc (cmd *add) Description() string {\n\treturn `Add serial port to VM.\n\nExamples:\n  govc device.serial.add -vm $vm\n  govc device.info -vm $vm serialport-*`\n}\n\nfunc (cmd *add) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td, err := devices.CreateSerialPort()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = vm.AddDevice(ctx, d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// output name of device we just created\n\tdevices, err = vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdevices = devices.SelectByType(d)\n\n\tname := devices.Name(devices[len(devices)-1])\n\n\tfmt.Println(name)\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/device/serial/connect.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage serial\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"path\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n)\n\ntype connect struct {\n\t*flags.VirtualMachineFlag\n\n\tproxy  string\n\tdevice string\n\tclient bool\n}\n\nfunc init() {\n\tcli.Register(\"device.serial.connect\", &connect{})\n}\n\nfunc (cmd *connect) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.proxy, \"vspc-proxy\", \"\", \"vSPC proxy URI\")\n\tf.StringVar(&cmd.device, \"device\", \"\", \"serial port device name\")\n\tf.BoolVar(&cmd.client, \"client\", false, \"Use client direction\")\n}\n\nfunc (cmd *connect) Usage() string {\n\treturn \"URI\"\n}\n\nfunc (cmd *connect) Description() string {\n\treturn `Connect service URI to serial port.\n\nIf \"-\" is given as URI, connects file backed device with file name of\ndevice name + .log suffix in the VM Config.Files.LogDirectory.\n\nDefaults to the first serial port if no DEVICE is given.\n\nExamples:\n  govc device.ls | grep serialport-\n  govc device.serial.connect -vm $vm -device serialport-8000 telnet://:33233\n  govc device.info -vm $vm serialport-*\n  govc device.serial.connect -vm $vm \"[datastore1] $vm/console.log\"\n  govc device.serial.connect -vm $vm -\n  govc datastore.tail -f $vm/serialport-8000.log`\n}\n\nfunc (cmd *connect) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *connect) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td, err := devices.FindSerialPort(cmd.device)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\turi := f.Arg(0)\n\n\tif uri == \"-\" {\n\t\tvar mvm mo.VirtualMachine\n\t\terr = vm.Properties(ctx, vm.Reference(), []string{\"config.files.logDirectory\"}, &mvm)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\turi = path.Join(mvm.Config.Files.LogDirectory, fmt.Sprintf(\"%s.log\", devices.Name(d)))\n\t}\n\n\treturn vm.EditDevice(ctx, devices.ConnectSerialPort(d, uri, cmd.client, cmd.proxy))\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/device/serial/disconnect.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage serial\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype disconnect struct {\n\t*flags.VirtualMachineFlag\n\n\tdevice string\n}\n\nfunc init() {\n\tcli.Register(\"device.serial.disconnect\", &disconnect{})\n}\n\nfunc (cmd *disconnect) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.device, \"device\", \"\", \"serial port device name\")\n}\n\nfunc (cmd *disconnect) Description() string {\n\treturn `Disconnect service URI from serial port.\n\nExamples:\n  govc device.ls | grep serialport-\n  govc device.serial.disconnect -vm $vm -device serialport-8000\n  govc device.info -vm $vm serialport-*`\n}\n\nfunc (cmd *disconnect) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *disconnect) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td, err := devices.FindSerialPort(cmd.device)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn vm.EditDevice(ctx, devices.DisconnectSerialPort(d))\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/device/usb/add.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage usb\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype add struct {\n\t*flags.VirtualMachineFlag\n\n\tcontroller  string\n\tautoConnect bool\n\tehciEnabled bool\n}\n\nfunc init() {\n\tcli.Register(\"device.usb.add\", &add{})\n}\n\nfunc (cmd *add) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tctypes := []string{\"usb\", \"xhci\"}\n\tf.StringVar(&cmd.controller, \"type\", ctypes[0],\n\t\tfmt.Sprintf(\"USB controller type (%s)\", strings.Join(ctypes, \"|\")))\n\n\tf.BoolVar(&cmd.autoConnect, \"auto\", true, \"Enable ability to hot plug devices\")\n\tf.BoolVar(&cmd.ehciEnabled, \"ehci\", true, \"Enable enhanced host controller interface (USB 2.0)\")\n}\n\nfunc (cmd *add) Description() string {\n\treturn `Add USB device to VM.\n\nExamples:\n  govc device.usb.add -vm $vm\n  govc device.usb.add -type xhci -vm $vm\n  govc device.info usb*`\n}\n\nfunc (cmd *add) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tvar d types.BaseVirtualDevice\n\n\tswitch cmd.controller {\n\tcase \"usb\":\n\t\tc := new(types.VirtualUSBController)\n\t\tc.AutoConnectDevices = &cmd.autoConnect\n\t\tc.EhciEnabled = &cmd.ehciEnabled\n\t\td = c\n\tcase \"xhci\":\n\t\tc := new(types.VirtualUSBXHCIController)\n\t\tc.AutoConnectDevices = &cmd.autoConnect\n\t\td = c\n\tdefault:\n\t\treturn flag.ErrHelp\n\t}\n\n\terr = vm.AddDevice(ctx, d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// output name of device we just created\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdevices = devices.SelectByType(d)\n\n\tname := devices.Name(devices[len(devices)-1])\n\n\tfmt.Println(name)\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/dvs/add.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage dvs\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype add struct {\n\t*flags.HostSystemFlag\n\n\tpath string\n\tpnic string\n}\n\nfunc init() {\n\tcli.Register(\"dvs.add\", &add{})\n}\n\nfunc (cmd *add) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.path, \"dvs\", \"\", \"DVS path\")\n\tf.StringVar(&cmd.pnic, \"pnic\", \"vmnic0\", \"Name of the host physical NIC\")\n}\n\nfunc (cmd *add) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *add) Usage() string {\n\treturn \"HOST...\"\n}\n\nfunc (cmd *add) Description() string {\n\treturn `Add hosts to DVS.\n\nExamples:\n  govc dvs.add -dvs dvsName -pnic vmnic1 hostA hostB hostC`\n}\n\nfunc (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnet, err := finder.Network(ctx, cmd.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdvs, ok := net.(*object.DistributedVirtualSwitch)\n\tif !ok {\n\t\treturn fmt.Errorf(\"%s (%T) is not of type %T\", cmd.path, net, dvs)\n\t}\n\n\tvar s mo.VmwareDistributedVirtualSwitch\n\terr = dvs.Properties(ctx, dvs.Reference(), []string{\"config\"}, &s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbacking := new(types.DistributedVirtualSwitchHostMemberPnicBacking)\n\n\tfor _, vmnic := range strings.Split(cmd.pnic, \",\") {\n\t\tbacking.PnicSpec = append(backing.PnicSpec, types.DistributedVirtualSwitchHostMemberPnicSpec{\n\t\t\tPnicDevice: strings.TrimSpace(vmnic),\n\t\t})\n\t}\n\n\tconfig := &types.DVSConfigSpec{\n\t\tConfigVersion: s.Config.GetDVSConfigInfo().ConfigVersion,\n\t}\n\n\thosts, err := cmd.HostSystems(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\texisting := make(map[string]bool)\n\t// TODO: host.pnic.info command\n\tfor _, member := range s.Config.GetDVSConfigInfo().Host {\n\t\texisting[member.Config.Host.Value] = true\n\t}\n\n\tfor _, host := range hosts {\n\t\tref := host.Reference()\n\t\tif existing[ref.Value] {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s is already a member of %s\\n\", host.InventoryPath, dvs.InventoryPath)\n\t\t\tcontinue\n\t\t}\n\n\t\tconfig.Host = append(config.Host, types.DistributedVirtualSwitchHostMemberConfigSpec{\n\t\t\tOperation: \"add\",\n\t\t\tHost:      ref,\n\t\t\tBacking:   backing,\n\t\t})\n\t}\n\n\tif len(config.Host) == 0 {\n\t\treturn nil\n\t}\n\n\ttask, err := dvs.Reconfigure(ctx, config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger := cmd.ProgressLogger(fmt.Sprintf(\"adding %d hosts to dvs %s... \", len(config.Host), dvs.InventoryPath))\n\tdefer logger.Wait()\n\n\t_, err = task.WaitForResult(ctx, logger)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/dvs/create.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage dvs\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype create struct {\n\t*flags.FolderFlag\n\n\ttypes.DVSCreateSpec\n\n\tconfigSpec *types.VMwareDVSConfigSpec\n}\n\nfunc init() {\n\tcli.Register(\"dvs.create\", &create{})\n}\n\nfunc (cmd *create) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.FolderFlag, ctx = flags.NewFolderFlag(ctx)\n\tcmd.FolderFlag.Register(ctx, f)\n\n\tcmd.configSpec = new(types.VMwareDVSConfigSpec)\n\n\tcmd.DVSCreateSpec.ConfigSpec = cmd.configSpec\n\tcmd.DVSCreateSpec.ProductInfo = new(types.DistributedVirtualSwitchProductSpec)\n\n\tf.StringVar(&cmd.ProductInfo.Version, \"product-version\", \"\", \"DVS product version\")\n}\n\nfunc (cmd *create) Usage() string {\n\treturn \"DVS\"\n}\n\nfunc (cmd *create) Description() string {\n\treturn `Create DVS (DistributedVirtualSwitch) in datacenter.\n\nThe dvs is added to the folder specified by the 'folder' flag. If not given,\nthis defaults to the network folder in the specified or default datacenter.\n\nExamples:\n  govc dvs.create DSwitch\n  govc dvs.create -product-version 5.5.0 DSwitch`\n}\n\nfunc (cmd *create) Process(ctx context.Context) error {\n\tif err := cmd.FolderFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tname := f.Arg(0)\n\n\tcmd.configSpec.Name = name\n\n\tfolder, err := cmd.FolderOrDefault(\"network\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttask, err := folder.CreateDVS(ctx, cmd.DVSCreateSpec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger := cmd.ProgressLogger(fmt.Sprintf(\"adding %s to folder %s... \", name, folder.InventoryPath))\n\tdefer logger.Wait()\n\n\t_, err = task.WaitForResult(ctx, logger)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/dvs/portgroup/add.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage portgroup\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype add struct {\n\t*flags.DatacenterFlag\n\n\tDVPortgroupConfigSpec\n\n\tpath string\n}\n\nfunc init() {\n\tcli.Register(\"dvs.portgroup.add\", &add{})\n}\n\nfunc (cmd *add) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.path, \"dvs\", \"\", \"DVS path\")\n\n\tcmd.DVPortgroupConfigSpec.NumPorts = 128 // default\n\n\tcmd.DVPortgroupConfigSpec.Register(ctx, f)\n}\n\nfunc (cmd *add) Description() string {\n\treturn `Add portgroup to DVS.\n\nExamples:\n  govc dvs.create DSwitch\n  govc dvs.portgroup.add -dvs DSwitch -type earlyBinding -nports 16 ExternalNetwork\n  govc dvs.portgroup.add -dvs DSwitch -type ephemeral InternalNetwork`\n}\n\nfunc (cmd *add) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *add) Usage() string {\n\treturn \"NAME\"\n}\n\nfunc (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tname := f.Arg(0)\n\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnet, err := finder.Network(ctx, cmd.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdvs, ok := net.(*object.DistributedVirtualSwitch)\n\tif !ok {\n\t\treturn fmt.Errorf(\"%s (%T) is not of type %T\", cmd.path, net, dvs)\n\t}\n\n\tcmd.DVPortgroupConfigSpec.Name = name\n\n\ttask, err := dvs.AddPortgroup(ctx, []types.DVPortgroupConfigSpec{cmd.Spec()})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger := cmd.ProgressLogger(fmt.Sprintf(\"adding %s portgroup to dvs %s... \", name, dvs.InventoryPath))\n\tdefer logger.Wait()\n\n\t_, err = task.WaitForResult(ctx, logger)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/dvs/portgroup/change.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage portgroup\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n)\n\ntype change struct {\n\t*flags.DatacenterFlag\n\n\tDVPortgroupConfigSpec\n}\n\nfunc init() {\n\tcli.Register(\"dvs.portgroup.change\", &change{})\n}\n\nfunc (cmd *change) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n\n\tcmd.DVPortgroupConfigSpec.Register(ctx, f)\n}\n\nfunc (cmd *change) Description() string {\n\treturn `Change DVS portgroup configuration.\n\nExamples:\n  govc dvs.portgroup.change -nports 26 ExternalNetwork\n  govc dvs.portgroup.change -vlan 3214 ExternalNetwork`\n}\n\nfunc (cmd *change) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *change) Usage() string {\n\treturn \"PATH\"\n}\n\nfunc (cmd *change) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tpath := f.Arg(0)\n\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnet, err := finder.Network(ctx, path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpg, ok := net.(*object.DistributedVirtualPortgroup)\n\tif !ok {\n\t\treturn fmt.Errorf(\"%s (%T) is not of type %T\", path, net, pg)\n\t}\n\n\tvar s mo.DistributedVirtualPortgroup\n\terr = pg.Properties(ctx, pg.Reference(), []string{\"config.configVersion\"}, &s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tspec := cmd.Spec()\n\tspec.ConfigVersion = s.Config.ConfigVersion\n\n\ttask, err := pg.Reconfigure(ctx, spec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger := cmd.ProgressLogger(fmt.Sprintf(\"changing %s portgroup configuration %s... \", pg.Name(), pg.InventoryPath))\n\tdefer logger.Wait()\n\n\t_, err = task.WaitForResult(ctx, logger)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/dvs/portgroup/info.go",
    "content": "/*\nCopyright (c) 2015-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage portgroup\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype info struct {\n\t*flags.DatacenterFlag\n\n\tpg         string\n\tactive     bool\n\tconnected  bool\n\tinside     bool\n\tuplinkPort bool\n\tvlanID     int\n\tcount      uint\n}\n\nfunc init() {\n\tcli.Register(\"dvs.portgroup.info\", &info{})\n}\n\nfunc (cmd *info) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.pg, \"pg\", \"\", \"Distributed Virtual Portgroup\")\n\tf.BoolVar(&cmd.active, \"active\", false, \"Filter by port active or inactive status\")\n\tf.BoolVar(&cmd.connected, \"connected\", false, \"Filter by port connected or disconnected status\")\n\tf.BoolVar(&cmd.inside, \"inside\", true, \"Filter by port inside or outside status\")\n\tf.BoolVar(&cmd.uplinkPort, \"uplinkPort\", false, \"Filter for uplink ports\")\n\tf.IntVar(&cmd.vlanID, \"vlan\", 0, \"Filter by VLAN ID (0 = unfiltered)\")\n\tf.UintVar(&cmd.count, \"count\", 0, \"Number of matches to return (0 = unlimited)\")\n}\n\nfunc (cmd *info) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *info) Usage() string {\n\treturn \"DVS\"\n}\n\nfunc (cmd *info) Description() string {\n\treturn `Portgroup info for DVS.\n\nExamples:\n  govc dvs.portgroup.info DSwitch\n  govc find / -type DistributedVirtualSwitch | xargs -n1 govc dvs.portgroup.info`\n}\n\nfunc (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tclient, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Retrieve DVS reference\n\tnet, err := finder.Network(ctx, f.Arg(0))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Convert to DVS object type\n\tdvs, ok := net.(*object.DistributedVirtualSwitch)\n\tif !ok {\n\t\treturn fmt.Errorf(\"%s (%s) is not a DVS\", f.Arg(0), net.Reference().Type)\n\t}\n\n\t// Set base search criteria\n\tcriteria := types.DistributedVirtualSwitchPortCriteria{\n\t\tConnected:  types.NewBool(cmd.connected),\n\t\tActive:     types.NewBool(cmd.active),\n\t\tUplinkPort: types.NewBool(cmd.uplinkPort),\n\t\tInside:     types.NewBool(cmd.inside),\n\t}\n\n\t// If a distributed virtual portgroup path is set, then add its portgroup key to the base criteria\n\tif len(cmd.pg) > 0 {\n\t\t// Retrieve distributed virtual portgroup reference\n\t\tnet, err = finder.Network(ctx, cmd.pg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Convert distributed virtual portgroup object type\n\t\tdvpg, ok := net.(*object.DistributedVirtualPortgroup)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"%s (%s) is not a DVPG\", cmd.pg, net.Reference().Type)\n\t\t}\n\n\t\t// Obtain portgroup key property\n\t\tvar dvp mo.DistributedVirtualPortgroup\n\t\tif err := dvpg.Properties(ctx, dvpg.Reference(), []string{\"key\"}, &dvp); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Add portgroup key to port search criteria\n\t\tcriteria.PortgroupKey = []string{dvp.Key}\n\t}\n\n\t// Prepare request\n\treq := types.FetchDVPorts{\n\t\tThis:     dvs.Reference(),\n\t\tCriteria: &criteria,\n\t}\n\n\t// Fetch ports\n\tres, err := methods.FetchDVPorts(ctx, client, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar returnedPorts uint\n\n\t// Iterate over returned ports\n\tfor _, port := range res.Returnval {\n\t\tportConfigSetting := port.Config.Setting.(*types.VMwareDVSPortSetting)\n\t\tportVlan := portConfigSetting.Vlan.(*types.VmwareDistributedVirtualSwitchVlanIdSpec)\n\t\tportVlanID := portVlan.VlanId\n\n\t\t// Show port info if: VLAN ID is not defined, or VLAN ID matches requested VLAN\n\t\tif cmd.vlanID == 0 || portVlanID == int32(cmd.vlanID) {\n\t\t\treturnedPorts++\n\n\t\t\tfmt.Printf(\"PortgroupKey: %s\\n\", port.PortgroupKey)\n\t\t\tfmt.Printf(\"DvsUuid:      %s\\n\", port.DvsUuid)\n\t\t\tfmt.Printf(\"VlanId:       %d\\n\", portVlanID)\n\t\t\tfmt.Printf(\"PortKey:      %s\\n\\n\", port.Key)\n\n\t\t\t// If we are limiting the count and have reached the count, then stop returning output\n\t\t\tif cmd.count > 0 && returnedPorts == cmd.count {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/dvs/portgroup/spec.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage portgroup\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype DVPortgroupConfigSpec struct {\n\ttypes.DVPortgroupConfigSpec\n}\n\nfunc (spec *DVPortgroupConfigSpec) Register(ctx context.Context, f *flag.FlagSet) {\n\tptypes := []string{\n\t\tstring(types.DistributedVirtualPortgroupPortgroupTypeEarlyBinding),\n\t\tstring(types.DistributedVirtualPortgroupPortgroupTypeLateBinding),\n\t\tstring(types.DistributedVirtualPortgroupPortgroupTypeEphemeral),\n\t}\n\n\tf.StringVar(&spec.Type, \"type\", ptypes[0],\n\t\tfmt.Sprintf(\"Portgroup type (%s)\", strings.Join(ptypes, \"|\")))\n\n\tf.Var(flags.NewInt32(&spec.NumPorts), \"nports\", \"Number of ports\")\n\n\tconfig := new(types.VMwareDVSPortSetting)\n\tvlan := new(types.VmwareDistributedVirtualSwitchVlanIdSpec)\n\tspec.DefaultPortConfig = config\n\tconfig.Vlan = vlan\n\n\tf.Var(flags.NewInt32(&vlan.VlanId), \"vlan\", \"VLAN ID\")\n}\n\nfunc (spec *DVPortgroupConfigSpec) Spec() types.DVPortgroupConfigSpec {\n\treturn spec.DVPortgroupConfigSpec\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/emacs/.gitignore",
    "content": ".cask\nelpa\n*.elc\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/emacs/Cask",
    "content": "(source gnu)\n(source melpa)\n\n(package-file \"govc.el\")\n\n(development\n (depends-on \"dash\")\n (depends-on \"json-mode\")\n (depends-on \"magit\")\n (depends-on \"s\"))\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/emacs/Makefile",
    "content": "CASK = cask\nEMACS_BIN ?= emacs\nEMACS_FLAGS =\nEMACS_EXEC = $(CASK) exec $(EMACS_BIN) --no-site-file --no-site-lisp --batch $(EMACS_FLAGS)\n\nOBJECTS = govc.elc\n\nelpa:\n\t$(CASK) install\n\t$(CASK) update\n\ttouch $@\n\n.PHONY: build test docs clean\n\nbuild: elpa $(OBJECTS)\n\ntest: build docs\n\t$(EMACS_EXEC) -l test/make.el -f make-test\n\ndocs: build\n\t$(EMACS_EXEC) -l test/make.el -f make-docs\nclean:\n\trm -f $(OBJECTS) elpa\n\trm -rf .cask\n\n%.elc: %.el\n\t$(EMACS_EXEC) -f batch-byte-compile $<\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/emacs/README.md",
    "content": "# govc.el\n\nInterface to govc for managing VMware ESXi and vCenter.\n\n\nThe goal of this package is to provide a simple interface for commonly used\ngovc commands within Emacs.  This includes table based inventory/state modes\nfor vms, hosts, datastores and pools.  The keymap for each mode provides\nshortcuts for easily feeding the data in view to other govc commands.\n\nWithin the various govc modes, press `?` to see a popup menu of options.\nA menu bar is enabled for certain modes, such as `govc-vm-mode` and `govc-host-mode`.\nThere is also a `govc` menu at all times under the `Tools` menu.\n\nThe recommended way to install govc.el is via MELPA (http://melpa.org/).\n\n## govc-mode\n\nRunning `govc-global-mode` creates key bindings to the various govc modes.\nThe default prefix is `C-c ;` and can be changed by setting `govc-keymap-prefix`.\n\n### govc-command-map\n\nKeybinding     | Description\n---------------|------------------------------------------------------------\n<kbd>h</kbd>   | Host info via govc\n<kbd>p</kbd>   | Pool info via govc\n<kbd>v</kbd>   | VM info via govc\n<kbd>s</kbd>   | Datastore info via govc\n\n### govc-urls\n\nList of URLs for use with `govc-session`.\nThe `govc-session-name` displayed by `govc-mode-line` uses `url-target` (anchor)\nif set, otherwise `url-host` is used.\n\nExample:\n```\n  (setq govc-urls `(\"root:vagrant@localhost:18443#Vagrant-ESXi\"\n                    \"root:password@192.168.1.192#Intel-NUC\"\n                    \"Administrator@vsphere.local:password!@vcva-clovervm\"))\n```\nTo enter a URL that is not in the list, prefix `universal-argument`, for example:\n\n  `C-u M-x govc-vm`\n\nTo avoid putting your credentials in a variable, you can use the\nauth-source search integration.\n\n```\n  (setq govc-urls `(\"myserver-vmware-2\"))\n```\n\nAnd then put this line in your `auth-sources` (e.g. `~/.authinfo.gpg`):\n```\n    machine myserver-vmware-2 login tzz password mypass url \"myserver-vmware-2.some.domain.here:443?insecure=true\"\n```\n\nWhich will result in the URL \"tzz:mypass@myserver-vmware-2.some.domain.here:443?insecure=true\".\nFor more details on `auth-sources`, see Info node `(auth) Help for users`.\n\nWhen in `govc-vm` or `govc-host` mode, a default URL is composed with the\ncurrent session credentials and the IP address of the current vm/host and\nthe vm/host name as the session name.  This makes it easier to connect to\nnested ESX/vCenter VMs or directly to an ESX host.\n\n### govc-session-url\n\nESX or vCenter URL set by `govc-session` via `govc-urls` selection.\n\n### govc-session-insecure\n\nSkip verification of server certificate when true.\nThis variable is set to the value of the `GOVC_INSECURE` env var by default.\nIt can also be set per-url via the query string (insecure=true).  For example:\n```\n  (setq govc-urls `(\"root:password@hostname?insecure=true\"))\n```\n\n### govc-session-datacenter\n\nDatacenter to use for the current `govc-session`.\nIf the endpoint has a single Datacenter it will be used by default, otherwise\n`govc-session` will prompt for selection.  It can also be set per-url via the\nquery string.  For example:\n```\n  (setq govc-urls `(\"root:password@hostname?datacenter=dc1\"))\n```\n\n### govc-session-datastore\n\nDatastore to use for the current `govc-session`.\nIf the endpoint has a single Datastore it will be used by default, otherwise\n`govc-session` will prompt for selection.  It can also be set per-url via the\nquery string.  For example:\n```\n  (setq govc-urls `(\"root:password@hostname?datastore=vsanDatastore\"))\n```\n\n### govc-session-network\n\nNetwork to use for the current `govc-session`.\n\n## govc-tabulated-list-mode\n\nGeneric table bindings to mark/unmark rows.\n\nIn addition to any hooks its parent mode `tabulated-list-mode` might have run,\nthis mode runs the hook `govc-tabulated-list-mode-hook`, as the final step\nduring initialization.\n\n### govc-tabulated-list-mode-map\n\nKeybinding     | Description\n---------------|------------------------------------------------------------\n<kbd>m</kbd>   | Mark and move to the next line\n<kbd>u</kbd>   | Unmark and move to the next line\n<kbd>t</kbd>   | Toggle mark\n<kbd>U</kbd>   | Unmark all\n<kbd>M-&</kbd> | Shell CMD with current `govc-session` exported as GOVC_ env vars\n<kbd>M-w</kbd> | Copy current selection or region to the kill ring\n<kbd>M-E</kbd> | Export session to `process-environment` and `kill-ring`\n\n## govc-host-mode\n\nMajor mode for handling a list of govc hosts.\n\nIn addition to any hooks its parent mode `govc-tabulated-list-mode` might have run,\nthis mode runs the hook `govc-host-mode-hook`, as the final step\nduring initialization.\n\n### govc-host-mode-map\n\nKeybinding     | Description\n---------------|------------------------------------------------------------\n<kbd>E</kbd>   | Events via govc events -n `govc-max-events`\n<kbd>L</kbd>   | Logs via govc logs -n `govc-max-events`\n<kbd>J</kbd>   | JSON via govc host\n<kbd>M</kbd>   | Metrics info\n<kbd>N</kbd>   | Netstat via `govc-esxcli-netstat-info` with current host id\n<kbd>O</kbd>   | Object browser via govc object\n<kbd>c</kbd>   | Connect new session for the current govc mode\n<kbd>p</kbd>   | Pool-mode with current session\n<kbd>s</kbd>   | Datastore-mode with current session\n<kbd>v</kbd>   | VM-mode with current session\n\n## govc-pool-mode\n\nMajor mode for handling a list of govc pools.\n\nIn addition to any hooks its parent mode `govc-tabulated-list-mode` might have run,\nthis mode runs the hook `govc-pool-mode-hook`, as the final step\nduring initialization.\n\n### govc-pool-mode-map\n\nKeybinding     | Description\n---------------|------------------------------------------------------------\n<kbd>D</kbd>   | Destroy via `govc-pool-destroy` on the pool selection\n<kbd>E</kbd>   | Events via govc events -n `govc-max-events`\n<kbd>J</kbd>   | JSON via govc pool\n<kbd>M</kbd>   | Metrics info\n<kbd>O</kbd>   | Object browser via govc object\n<kbd>c</kbd>   | Connect new session for the current govc mode\n<kbd>h</kbd>   | Host-mode with current session\n<kbd>s</kbd>   | Datastore-mode with current session\n<kbd>v</kbd>   | VM-mode with current session\n\n## govc-datastore-mode\n\nMajor mode for govc datastore.info.\n\nIn addition to any hooks its parent mode `tabulated-list-mode` might have run,\nthis mode runs the hook `govc-datastore-mode-hook`, as the final step\nduring initialization.\n\n### govc-datastore-mode-map\n\nKeybinding     | Description\n---------------|------------------------------------------------------------\n<kbd>J</kbd>   | JSON via govc datastore\n<kbd>M</kbd>   | Metrics info\n<kbd>O</kbd>   | Object browser via govc object\n<kbd>RET</kbd> | Browse datastore\n<kbd>c</kbd>   | Connect new session for the current govc mode\n<kbd>h</kbd>   | Host-mode with current session\n<kbd>p</kbd>   | Pool-mode with current session\n<kbd>v</kbd>   | VM-mode with current session\n\n## govc-datastore-ls-mode\n\nMajor mode govc datastore.ls.\n\nIn addition to any hooks its parent mode `govc-tabulated-list-mode` might have run,\nthis mode runs the hook `govc-datastore-ls-mode-hook`, as the final step\nduring initialization.\n\n### govc-datastore-ls-mode-map\n\nKeybinding     | Description\n---------------|------------------------------------------------------------\n<kbd>J</kbd>   | JSON via govc datastore\n<kbd>S</kbd>   | Search via govc datastore\n<kbd>D</kbd>   | Delete selected datastore paths\n<kbd>T</kbd>   | Tail datastore file\n<kbd>+</kbd>   | Mkdir via govc datastore\n<kbd>DEL</kbd> | Up to parent folder\n<kbd>RET</kbd> | Open datastore folder or file\n\n## govc-vm-mode\n\nMajor mode for handling a list of govc vms.\n\nIn addition to any hooks its parent mode `govc-tabulated-list-mode` might have run,\nthis mode runs the hook `govc-vm-mode-hook`, as the final step\nduring initialization.\n\n### govc-vm-mode-map\n\nKeybinding     | Description\n---------------|------------------------------------------------------------\n<kbd>E</kbd>   | Events via govc events -n `govc-max-events`\n<kbd>J</kbd>   | JSON via govc vm\n<kbd>O</kbd>   | Object browser via govc object\n<kbd>X</kbd>   | ExtraConfig via `govc-vm-extra-config` on the current selection\n<kbd>RET</kbd> | Devices via `govc-device` on the current selection\n<kbd>C</kbd>   | Console screenshot via `govc-vm-screen` on the current selection\n<kbd>V</kbd>   | VNC via `govc-vm-vnc` on the current selection\n<kbd>D</kbd>   | Destroy via `govc-vm-destroy` on the current selection\n<kbd>^</kbd>   | Start via `govc-vm-start` on the current selection\n<kbd>!</kbd>   | Shutdown via `govc-vm-shutdown` on the current selection\n<kbd>@</kbd>   | Reboot via `govc-vm-reboot` on the current selection\n<kbd>&</kbd>   | Suspend via `govc-vm-suspend` on the current selection\n<kbd>H</kbd>   | Host info via `govc-host` with host(s) of current selection\n<kbd>M</kbd>   | Metrics info\n<kbd>P</kbd>   | Ping VM\n<kbd>S</kbd>   | Datastore via `govc-datastore-ls` with datastore of current selection\n<kbd>c</kbd>   | Connect new session for the current govc mode\n<kbd>h</kbd>   | Host-mode with current session\n<kbd>p</kbd>   | Pool-mode with current session\n<kbd>s</kbd>   | Datastore-mode with current session\n\n## govc-device-mode\n\nMajor mode for handling a govc device.\n\nIn addition to any hooks its parent mode `govc-tabulated-list-mode` might have run,\nthis mode runs the hook `govc-device-mode-hook`, as the final step\nduring initialization.\n\n### govc-device-mode-map\n\nKeybinding     | Description\n---------------|------------------------------------------------------------\n<kbd>J</kbd>   | JSON via govc device\n<kbd>RET</kbd> | Tabulated govc device\n\n## govc-object-mode\n\nMajor mode for handling a govc object.\n\nIn addition to any hooks its parent mode `govc-tabulated-list-mode` might have run,\nthis mode runs the hook `govc-object-mode-hook`, as the final step\nduring initialization.\n\n### govc-object-mode-map\n\nKeybinding     | Description\n---------------|------------------------------------------------------------\n<kbd>J</kbd>   | JSON object selection via govc object\n<kbd>N</kbd>   | Next managed object reference\n<kbd>O</kbd>   | Object browser via govc object\n<kbd>DEL</kbd> | Parent object selection if reachable, otherwise prompt with `govc-object-history`\n<kbd>RET</kbd> | Expand object selection via govc object\n\n## govc-metric-mode\n\nMajor mode for handling a govc metric.\n\nIn addition to any hooks its parent mode `govc-tabulated-list-mode` might have run,\nthis mode runs the hook `govc-metric-mode-hook`, as the final step\nduring initialization.\n\n### govc-metric-mode-map\n\nKeybinding     | Description\n---------------|------------------------------------------------------------\n<kbd>RET</kbd> | Sample metrics\n<kbd>P</kbd>   | Plot metric sample\n<kbd>s</kbd>   | Select metric names\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/emacs/govc.el",
    "content": ";;; govc.el --- Interface to govc for managing VMware ESXi and vCenter\n\n;; Author: The govc developers\n;; URL: https://github.com/vmware/govmomi/tree/master/govc/emacs\n;; Keywords: convenience\n;; Version: 0.14.0\n;; Package-Requires: ((emacs \"24.3\") (dash \"1.5.0\") (s \"1.9.0\") (magit-popup \"2.0.50\") (json-mode \"1.6.0\"))\n\n;; This file is NOT part of GNU Emacs.\n\n;; Copyright (c) 2016 VMware, Inc. All Rights Reserved.\n;;\n;; Licensed under the Apache License, Version 2.0 (the \"License\");\n;; you may not use this file except in compliance with the License.\n;; You may obtain a copy of the License at\n;;\n;; http://www.apache.org/licenses/LICENSE-2.0\n;;\n;; Unless required by applicable law or agreed to in writing, software\n;; distributed under the License is distributed on an \"AS IS\" BASIS,\n;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n;; See the License for the specific language governing permissions and\n;; limitations under the License.\n\n;;; Commentary:\n\n;; The goal of this package is to provide a simple interface for commonly used\n;; govc commands within Emacs.  This includes table based inventory/state modes\n;; for vms, hosts, datastores and pools.  The keymap for each mode provides\n;; shortcuts for easily feeding the data in view to other govc commands.\n;;\n;; Within the various govc modes, press `?' to see a popup menu of options.\n;; A menu bar is enabled for certain modes, such as `govc-vm-mode' and `govc-host-mode'.\n;; There is also a `govc' menu at all times under the `Tools' menu.\n;;\n;; The recommended way to install govc.el is via MELPA (http://melpa.org/).\n\n;;; Code:\n\n(eval-when-compile\n  (require 'cl))\n(require 'dash)\n(require 'diff)\n(require 'dired)\n(require 'json-mode)\n(require 'magit-popup)\n(require 'url-parse)\n(require 's)\n\n(autoload 'auth-source-search \"auth-source\")\n\n(defgroup govc nil\n  \"Emacs customization group for govc.\"\n  :group 'convenience)\n\n(defcustom govc-keymap-prefix \"C-c ;\"\n  \"Prefix for `govc-mode'.\"\n  :group 'govc)\n\n(defcustom govc-command \"govc\"\n  \"Executable path to the govc utility.\"\n  :type 'string\n  :group 'govc)\n\n(defvar govc-command-map\n  (let ((map (make-sparse-keymap)))\n    (define-key map \"h\" 'govc-host)\n    (define-key map \"p\" 'govc-pool)\n    (define-key map \"v\" 'govc-vm)\n    (define-key map \"s\" 'govc-datastore)\n    (define-key map \"?\" 'govc-popup)\n    map)\n  \"Keymap for `govc-mode' after `govc-keymap-prefix' was pressed.\")\n\n(defvar govc-mode-map\n  (let ((map (make-sparse-keymap)))\n    (define-key map (kbd govc-keymap-prefix) govc-command-map)\n    map)\n  \"Keymap for `govc-mode'.\")\n\n;;;###autoload\n(define-minor-mode govc-mode\n  \"Running `govc-global-mode' creates key bindings to the various govc modes.\nThe default prefix is `C-c ;' and can be changed by setting `govc-keymap-prefix'.\n\n\\\\{govc-mode-map\\}\"\n  nil govc-mode-line govc-mode-map\n  :group 'govc)\n\n;;;###autoload\n(define-globalized-minor-mode govc-global-mode govc-mode govc-mode)\n\n(defcustom govc-mode-line\n  '(:eval (format \" govc[%s]\" (or (govc-session-name) \"-\")))\n  \"Mode line lighter for govc.\"\n  :group 'govc\n  :type 'sexp\n  :risky t)\n\n\f\n;;; Tabulated list mode extensions (derived from https://github.com/Silex/docker.el tabulated-list-ext.el)\n(defun govc-tabulated-list-mark ()\n  \"Mark and move to the next line.\"\n  (interactive)\n  (tabulated-list-put-tag (char-to-string dired-marker-char) t))\n\n(defun govc-tabulated-list-unmark ()\n  \"Unmark and move to the next line.\"\n  (interactive)\n  (tabulated-list-put-tag \"\" t))\n\n(defun govc-tabulated-list-toggle-marks ()\n  \"Toggle mark.\"\n  (interactive)\n  (save-excursion\n    (goto-char (point-min))\n    (let ((cmd))\n      (while (not (eobp))\n        (setq cmd (char-after))\n        (tabulated-list-put-tag\n         (if (eq cmd dired-marker-char)\n             \"\"\n           (char-to-string dired-marker-char)) t)))))\n\n(defun govc-tabulated-list-unmark-all ()\n  \"Unmark all.\"\n  (interactive)\n  (save-excursion\n    (goto-char (point-min))\n    (while (not (eobp))\n      (tabulated-list-put-tag \"\" t))))\n\n(defun govc-selection ()\n  \"Get the current selection as a list of names.\"\n  (let ((selection))\n    (save-excursion\n      (goto-char (point-min))\n      (while (not (eobp))\n        (when (eq (char-after) ?*)\n          (add-to-list 'selection (tabulated-list-get-id)))\n        (forward-line)))\n    (or selection (let ((id (tabulated-list-get-id)))\n                    (if id\n                        (list id))))))\n\n(defun govc-do-selection (fn action)\n  \"Call FN with `govc-selection' confirming ACTION.\"\n  (let* ((selection (govc-selection))\n         (count (length selection))\n         (prompt (if (= count 1)\n                     (car selection)\n                   (format \"* [%d] marked\" count))))\n    (if (yes-or-no-p (format \"%s %s ?\" action prompt))\n        (funcall fn selection))))\n\n(defun govc-copy-selection ()\n  \"Copy current selection or region to the kill ring.\"\n  (interactive)\n  (if (region-active-p)\n      (copy-region-as-kill (mark) (point) 'region)\n    (kill-new (message \"%s\" (s-join \" \" (--map (format \"'%s'\" it) (govc-selection)))))))\n\n(defvar govc-font-lock-keywords\n  (list\n   (list dired-re-mark '(0 dired-mark-face))\n   (list \"types.ManagedObjectReference\\\\(.*\\\\)\" '(1 dired-directory-face))\n   (list \"[^ ]*/$\" '(0 dired-directory-face))\n   (list \"\\\\.\\\\.\\\\.$\" '(0 dired-symlink-face))))\n\n(defvar govc-tabulated-list-mode-map\n  (let ((map (make-sparse-keymap)))\n    (define-key map \"m\" 'govc-tabulated-list-mark)\n    (define-key map \"u\" 'govc-tabulated-list-unmark)\n    (define-key map \"t\" 'govc-tabulated-list-toggle-marks)\n    (define-key map \"U\" 'govc-tabulated-list-unmark-all)\n    (define-key map (kbd \"M-&\") 'govc-shell-command)\n    (define-key map (kbd \"M-w\") 'govc-copy-selection)\n    (define-key map (kbd \"M-E\") 'govc-copy-environment)\n    map)\n  \"Keymap for `govc-tabulated-list-mode'.\")\n\n(define-derived-mode govc-tabulated-list-mode tabulated-list-mode \"Tabulated govc\"\n  \"Generic table bindings to mark/unmark rows.\"\n  (setq-local font-lock-defaults\n              '(govc-font-lock-keywords t nil nil beginning-of-line)))\n\n\f\n;;; Keymap helpers for generating menus and popups\n(defun govc-keymap-list (keymap)\n  \"Return a list of (key name function) for govc bindings in the given KEYMAP.\nThe name returned is the first word of the function `documentation'.\"\n  (let ((map))\n    (map-keymap\n     (lambda (k f)\n       (when (keymapp f)\n         (setq map (append map\n                           (--map (and (setcar it (kbd (format \"M-%s\" (char-to-string (car it))))) it)\n                                  (govc-keymap-list f)))))\n       (when (and (symbolp f)\n                  (s-starts-with? \"govc-\" (symbol-name f)))\n         (if (not (eq ?? k))\n             (add-to-list 'map (list k (car (split-string (documentation f))) f))))) keymap)\n    map))\n\n(defun govc-keymap-menu (keymap)\n  \"Return a list of [key function t] for govc bindings in the given KEYMAP.\nFor use with `easy-menu-define'.\"\n  (-map (lambda (item)\n          (vector (nth 1 item) (nth 2 item) t))\n        (govc-keymap-list keymap)))\n\n(defun govc-key-description (key)\n  \"Call `key-description' ensuring KEY is a sequence.\"\n  (key-description (if (integerp key) (list key) key)))\n\n(defun govc-keymap-list-to-help (keymap)\n  \"Convert KEYMAP to list of help text.\"\n  (--map (list (govc-key-description (car it))\n               (car (split-string (documentation (nth 2 it)) \"\\\\.\")))\n         keymap))\n\n(defun govc-keymap-popup-help ()\n  \"Default keymap help for `govc-keymap-popup'.\"\n  (append (govc-keymap-list-to-help (govc-keymap-list govc-tabulated-list-mode-map))\n          '((\"g\" \"Refresh current buffer\")\n            (\"C-h m\" \"Show all key bindings\"))))\n\n(defun govc-keymap-popup (keymap)\n  \"Convert a `govc-keymap-list' using KEYMAP for use with `magit-define-popup'.\nKeys in the ASCII range of 32-97 are mapped to popup commands, all others are listed as help text.\"\n  (let* ((maps (--separate (and (integerp (car it))\n                                (>= (car it) 32)\n                                (<= (car it) 97))\n                           (govc-keymap-list keymap)))\n         (help (govc-keymap-list-to-help (cadr maps))))\n    (append\n     '(\"Commands\")\n     (car maps)\n     (list (s-join \"\\n\" (--map (format \" %-6s %s\" (car it) (cadr it))\n                               (append help (govc-keymap-popup-help))))\n           nil))))\n\n\f\n;;; govc process helpers\n(defcustom govc-urls nil\n  \"List of URLs for use with `govc-session'.\nThe `govc-session-name' displayed by `govc-mode-line' uses `url-target' (anchor)\nif set, otherwise `url-host' is used.\n\nExample:\n```\n  (setq govc-urls '(\\\"root:vagrant@localhost:18443#Vagrant-ESXi\\\"\n                    \\\"root:password@192.168.1.192#Intel-NUC\\\"\n                    \\\"Administrator@vsphere.local:password!@vcva-clovervm\\\"))\n```\nTo enter a URL that is not in the list, prefix `universal-argument', for example:\n\n  `\\\\[universal-argument] \\\\[govc-vm]'\n\nTo avoid putting your credentials in a variable, you can use the\nauth-source search integration.\n\n```\n  (setq govc-urls '(\\\"myserver-vmware-2\\\"))\n```\n\nAnd then put this line in your `auth-sources' (e.g. `~/.authinfo.gpg'):\n```\n    machine myserver-vmware-2 login tzz password mypass url \\\"myserver-vmware-2.some.domain.here:443?insecure=true\\\"\n```\n\nWhich will result in the URL \\\"tzz:mypass@myserver-vmware-2.some.domain.here:443?insecure=true\\\".\nFor more details on `auth-sources', see Info node `(auth) Help for users'.\n\nWhen in `govc-vm' or `govc-host' mode, a default URL is composed with the\ncurrent session credentials and the IP address of the current vm/host and\nthe vm/host name as the session name.  This makes it easier to connect to\nnested ESX/vCenter VMs or directly to an ESX host.\"\n  :group 'govc\n  :type '(repeat (string :tag \"vcenter URL or auth-source machine reference\")))\n\n(defvar-local govc-session-url nil\n  \"ESX or vCenter URL set by `govc-session' via `govc-urls' selection.\")\n\n(defvar-local govc-session-path nil)\n\n(defvar-local govc-session-insecure nil\n  \"Skip verification of server certificate when true.\nThis variable is set to the value of the `GOVC_INSECURE' env var by default.\nIt can also be set per-url via the query string (insecure=true).  For example:\n```\n  (setq govc-urls '(\\\"root:password@hostname?insecure=true\\\"))\n```\")\n\n(defvar-local govc-session-datacenter nil\n  \"Datacenter to use for the current `govc-session'.\nIf the endpoint has a single Datacenter it will be used by default, otherwise\n`govc-session' will prompt for selection.  It can also be set per-url via the\nquery string.  For example:\n```\n  (setq govc-urls '(\\\"root:password@hostname?datacenter=dc1\\\"))\n```\")\n\n(defvar-local govc-session-datastore nil\n  \"Datastore to use for the current `govc-session'.\nIf the endpoint has a single Datastore it will be used by default, otherwise\n`govc-session' will prompt for selection.  It can also be set per-url via the\nquery string.  For example:\n```\n  (setq govc-urls '(\\\"root:password@hostname?datastore=vsanDatastore\\\"))\n```\")\n\n(defvar-local govc-session-network nil\n  \"Network to use for the current `govc-session'.\")\n\n(defvar-local govc-filter nil\n  \"Resource path filter.\")\n\n(defvar-local govc-args nil\n  \"Additional govc arguments.\")\n\n(defun govc-session-name ()\n  \"Return a name for the current session.\nDerived from `govc-session-url' if set, otherwise from the 'GOVC_URL' env var.\nReturn value is the url anchor if set, otherwise the hostname is returned.\"\n  (let* ((u (or govc-session-url (getenv \"GOVC_URL\")))\n         (url (if u (govc-url-parse u))))\n    (if url\n        (concat (or (url-target url) (url-host url)) govc-session-path))))\n\n(defun govc-format-command (command &rest args)\n  \"Format govc COMMAND ARGS.\"\n  (format \"%s %s %s\" govc-command command\n          (s-join \" \" (--map (format \"'%s'\" it)\n                             (-flatten (-non-nil args))))))\n\n(defconst govc-environment-map (--map (cons (concat \"GOVC_\" (upcase it))\n                                            (intern (concat \"govc-session-\" it)))\n                                      '(\"url\" \"insecure\" \"datacenter\" \"datastore\" \"network\"))\n\n  \"Map of `GOVC_*' environment variable names to `govc-session-*' symbol names.\")\n\n(defun govc-environment (&optional unset)\n  \"Return `process-environment' for govc.\nOptionally clear govc env if UNSET is non-nil.\"\n  (let ((process-environment (copy-sequence process-environment)))\n    (dolist (e govc-environment-map)\n      (setenv (car e) (unless unset (symbol-value (cdr e)))))\n    process-environment))\n\n(defun govc-export-environment (arg)\n  \"Set if ARG is \\\\[universal-argument], unset if ARG is \\\\[negative-argument].\"\n  (if (equal arg '-)\n      (progn (setq process-environment (govc-environment t))\n             (cons \"unset\" (--map (car it)\n                                  govc-environment-map)))\n    (progn (setq process-environment (govc-environment))\n           (cons \"export\" (--map (format \"%s='%s'\" (car it) (or (symbol-value (cdr it)) \"\"))\n                                 govc-environment-map)))))\n\n(defun govc-copy-environment (&optional arg)\n  \"Export session to `process-environment' and `kill-ring'.\nOptionally set `GOVC_*' vars in `process-environment' using prefix\n\\\\[universal-argument] ARG or unset with prefix \\\\[negative-argument] ARG.\"\n  (interactive \"P\")\n  (message (kill-new (if arg (s-join \" \" (govc-export-environment arg)) govc-session-url))))\n\n(defun govc-process (command handler)\n  \"Run COMMAND, calling HANDLER upon successful exit of the process.\"\n  (message command)\n  (let ((process-environment (govc-environment))\n        (exit-code))\n    (add-to-list 'govc-command-history command)\n    (with-temp-buffer\n      (setq exit-code (call-process-shell-command command nil (current-buffer)))\n      (if (zerop exit-code)\n          (funcall handler)\n        (error (buffer-string))))))\n\n(defun govc (command &rest args)\n  \"Execute govc COMMAND with ARGS.\nReturn value is `buffer-string' split on newlines.\"\n  (govc-process (govc-format-command command args)\n                (lambda ()\n                  (split-string (buffer-string) \"\\n\" t))))\n\n(defun govc-json (command &rest args)\n  \"Execute govc COMMAND passing arguments ARGS.\nReturn value is `json-read'.\"\n  (govc-process (govc-format-command command (cons \"-json\" args))\n                (lambda ()\n                  (goto-char (point-min))\n                  (let ((json-object-type 'plist))\n                    (json-read)))))\n\n(defun govc-ls-datacenter ()\n  \"List datacenters.\"\n  (govc \"ls\" \"-t\" \"Datacenter\" \"./...\"))\n\n(defun govc-object-prompt (prompt ls)\n  \"PROMPT for object name via LS function.  Return object without PROMPT if there is just one instance.\"\n  (let ((objs (if (listp ls) ls (funcall ls))))\n    (if (eq 1 (length objs))\n        (car objs)\n      (completing-read prompt objs))))\n\n(defun govc-url-parse (url)\n  \"A `url-generic-parse-url' wrapper to handle URL with password, but no scheme.\nAlso fixes the case where user contains an '@'.\"\n  (let* ((full (s-contains? \"://\" url))\n         (u (url-generic-parse-url (concat (unless full \"https://\") url))))\n    (unless full\n      (setf (url-type u) nil)\n      (setf (url-fullness u) nil))\n    (if (s-contains? \"@\" (url-host u))\n        (let* ((h (split-string (url-host u) \"@\"))\n               (p (split-string (car h) \":\")))\n          (setf (url-host u) (cadr h))\n          (setf (url-user u) (concat (url-user u) \"@\" (car p)))\n          (setf (url-password u) (cadr p))))\n    u))\n\n(defun govc-url-default ()\n  \"Default URL when creating a new session.\"\n  (if govc-session-url\n      (let ((url (govc-url-parse govc-session-url)))\n        (if (equal major-mode 'govc-host-mode)\n            (progn (setf (url-host url) (govc-table-column-value \"Name\"))\n                   (setf (url-target url) nil))\n          (progn (setf (url-host url) (govc-table-column-value \"IP address\"))\n                 (setf (url-target url) (govc-table-column-value \"Name\"))))\n        (setf (url-filename url) \"\") ; erase query string\n        (if (string-empty-p (url-user url))\n            (setf (url-user url) \"root\")) ; local workstation url has no user set\n        (url-recreate-url url))))\n\n(defun govc-urls-completing-read ()\n  \"A wrapper for `completing-read' to mask credentials in `govc-urls'.\"\n  (let ((alist))\n    (dolist (ent govc-urls)\n      (let ((u (govc-url-parse ent)))\n        (setf (url-password u) nil)\n        (add-to-list 'alist `(,(url-recreate-url u) . ,ent) t)))\n    (let ((u (completing-read \"govc url: \" (-map 'car alist))))\n      (cdr (assoc u alist)))))\n\n(defun govc-session-url-lookup-auth-source (url-or-address)\n  \"Check if URL-OR-ADDRESS is a logical name in the authinfo file.\nGiven URL-OR-ADDRESS `myserver-vmware-2' this function will find\na line like\n    machine myserver-vmware-2 login tzz password mypass url \\\"myserver-vmware-2.some.domain.here:443?insecure=true\\\"\n\nand will return the URL \\\"tzz:mypass@myserver-vmware-2.some.domain.here:443?insecure=true\\\".\n\nIf the line is not found, the original URL-OR-ADDRESS is\nreturned, assuming that's what the user wanted.\"\n  (let ((found (nth 0 (auth-source-search :max 1\n                                          :host url-or-address\n                                          :require '(:user :secret :url)\n                                          :create nil))))\n    (if found\n        (format \"%s:%s@%s\"\n                (plist-get found :user)\n                (let ((secret (plist-get found :secret)))\n                  (if (functionp secret)\n                      (funcall secret)\n                    secret))\n                (plist-get found :url))\n      url-or-address)))\n\n(defun govc-session-set-url (url)\n  \"Set `govc-session-url' to URL and optionally set other govc-session-* variables via URL query.\"\n  ;; Replace the original URL with the auth-source lookup if there is no user.\n  (unless (url-user (govc-url-parse url))\n    (setq url (govc-session-url-lookup-auth-source url)))\n\n  (let ((q (cdr (url-path-and-query (govc-url-parse url)))))\n    (dolist (opt (if q (url-parse-query-string q)))\n      (let ((var (intern (concat \"govc-session-\" (car opt)))))\n        (if (boundp var)\n            (set var (cadr opt))))))\n  (setq govc-session-url url))\n\n(defun govc-session ()\n  \"Initialize a govc session.\"\n  (interactive)\n  (let ((url (if (or current-prefix-arg (eq 0 (length govc-urls)))\n                 (read-string \"govc url: \" (govc-url-default))\n               (if (eq 1 (length govc-urls))\n                   (car govc-urls)\n                 (govc-urls-completing-read)))))\n    ;; Wait until this point to clear so current session is preserved in the\n    ;; event of `keyboard-quit' in `read-string'.\n    (setq govc-session-datacenter nil\n          govc-session-datastore nil\n          govc-session-network nil\n          govc-filter nil)\n    (govc-session-set-url url))\n  (unless govc-session-insecure\n    (setq govc-session-insecure (or (getenv \"GOVC_INSECURE\")\n                                    (completing-read \"govc insecure: \" '(\"true\" \"false\")))))\n  (unless govc-session-datacenter\n    (setq govc-session-datacenter (govc-object-prompt \"govc datacenter: \" 'govc-ls-datacenter)))\n  (add-to-list 'govc-urls govc-session-url))\n\n(defalias 'govc-current-session 'buffer-local-variables)\n\n(defun govc-session-clone (session)\n  \"Clone a session from SESSION buffer locals.\"\n  (dolist (v session)\n    (let ((s (car v)))\n      (when (s-starts-with? \"govc-session-\" (symbol-name s))\n        (set s (assoc-default s session))))))\n\n(defvar govc-command-history nil\n  \"History list for govc commands used by `govc-shell-command'.\")\n\n(defun govc-shell-command (&optional cmd)\n  \"Shell CMD with current `govc-session' exported as GOVC_ env vars.\"\n  (interactive)\n  (let ((process-environment (govc-environment))\n        (current-prefix-arg \"*govc*\")\n        (url govc-session-url)\n        (shell-command-history govc-command-history))\n    (if cmd\n        (async-shell-command cmd current-prefix-arg)\n      (call-interactively 'async-shell-command))\n    (setq govc-command-history shell-command-history)\n    (with-current-buffer (get-buffer current-prefix-arg)\n      (setq govc-session-url url))))\n\n(defcustom govc-max-events 100\n  \"Limit events output to the last N events.\"\n  :type 'integer\n  :group 'govc)\n\n(defun govc-events ()\n  \"Events via govc events -n `govc-max-events'.\"\n  (interactive)\n  (govc-shell-command\n   (govc-format-command \"events\"\n                        (list \"-n\" govc-max-events (if current-prefix-arg \"-f\") (govc-selection)))))\n\n(defun govc-logs ()\n  \"Logs via govc logs -n `govc-max-events'.\"\n  (interactive)\n  (govc-shell-command\n   (let ((host (govc-selection)))\n     (govc-format-command \"logs\"\n                          (list \"-n\" govc-max-events (if current-prefix-arg \"-f\") (if host (list \"-host\" host)))))))\n\n(defun govc-parse-info (output)\n  \"Parse govc info command OUTPUT.\"\n  (let* ((entries)\n         (entry)\n         (entry-key))\n    (-each output\n      (lambda (line)\n        (let* ((ix (s-index-of \":\" line))\n               (key (s-trim (substring line 0 ix)))\n               (val (s-trim (substring line (+ ix 1)))))\n          (unless entry-key\n            (setq entry-key key))\n          (when (s-equals? key entry-key)\n            (setq entry (make-hash-table :test 'equal))\n            (add-to-list 'entries entry))\n          (puthash key val entry))))\n    entries))\n\n(defun govc-table-column-names ()\n  \"Return a list of column names from `tabulated-list-format'.\"\n  (--map (car (aref tabulated-list-format it))\n         (number-sequence 0 (- (length tabulated-list-format) 1))))\n\n(defun govc-table-column-value (key)\n  \"Return current column value for given KEY.\"\n  (let ((names (govc-table-column-names))\n        (entry (tabulated-list-get-entry))\n        (value))\n    (dotimes (ix (- (length names) 1))\n      (if (s-equals? key (nth ix names))\n          (setq value (elt entry ix))))\n    value))\n\n(defun govc-table-info (command &optional args)\n  \"Convert `govc-parse-info' COMMAND ARGS output to `tabulated-list-entries' format.\"\n  (let ((names (govc-table-column-names)))\n    (-map (lambda (info)\n            (let ((id (or (gethash \"Path\" info)\n                          (gethash (car names) info))))\n              (list id (vconcat\n                        (--map (or (gethash it info) \"-\")\n                               names)))))\n          (govc-parse-info (govc command args)))))\n\n(defun govc-map-info (command &optional args)\n  \"Populate key=val map table with govc COMMAND ARGS output.\"\n  (-map (lambda (line)\n          (let* ((ix (s-index-of \":\" line))\n                 (key (s-trim (substring line 0 ix)))\n                 (val (s-trim (substring line (+ ix 1)))))\n            (list key (vector key val))))\n        (govc command args)))\n\n(defun govc-map-info-table (entries)\n  \"Tabulated `govc-map-info' data via ENTRIES.\"\n  (let ((session (govc-current-session))\n        (args (append govc-args (govc-selection)))\n        (buffer (get-buffer-create \"*govc-info*\")))\n    (pop-to-buffer buffer)\n    (tabulated-list-mode)\n    (setq govc-args args)\n    (govc-session-clone session)\n    (setq tabulated-list-format [(\"Name\" 50)\n                                 (\"Value\" 50)]\n          tabulated-list-padding 2\n          tabulated-list-entries entries)\n    (tabulated-list-print)))\n\n(defun govc-type-list-entries (command)\n  \"Convert govc COMMAND type table output to `tabulated-list-entries'.\"\n  (-map (lambda (line)\n          (let* ((entry (s-split-up-to \" \" (s-collapse-whitespace line) 2))\n                 (name (car entry))\n                 (type (nth 1 entry))\n                 (value (car (last entry))))\n            (list name (vector name type value))))\n        (govc command govc-args)))\n\n(defun govc-json-info-selection (command)\n  \"Run govc COMMAND -json on `govc-selection'.\"\n  (if current-prefix-arg\n      (--each (govc-selection) (govc-json-info command it))\n    (govc-json-info command (govc-selection))))\n\n(defun govc-json-diff ()\n  \"Diff two *govc-json* buffers in view.\"\n  (let ((buffers))\n    (-each (window-list-1)\n      (lambda (w)\n        (with-current-buffer (window-buffer w)\n          (if (and (eq major-mode 'json-mode)\n                   (s-starts-with? \"*govc-json*\" (buffer-name)))\n              (push (current-buffer) buffers)))) )\n    (if (= (length buffers) 2)\n        (pop-to-buffer\n         (diff-no-select (car buffers) (cadr buffers))))))\n\n(defun govc-json-info (command selection)\n  \"Run govc COMMAND -json on SELECTION.\"\n  (govc-process (govc-format-command command \"-json\" govc-args selection)\n                (lambda ()\n                  (let ((buffer (get-buffer-create (concat \"*govc-json*\" (if current-prefix-arg selection)))))\n                    (copy-to-buffer buffer (point-min) (point-max))\n                    (with-current-buffer buffer\n                      (json-mode)\n                      ;; We use `json-mode-beautify' as `json-pretty-print-buffer' does not work for `govc-host-json-info'\n                      (json-mode-beautify))\n                    (display-buffer buffer))))\n  (if current-prefix-arg\n      (govc-json-diff)))\n\n(defun govc-mode-new-session ()\n  \"Connect new session for the current govc mode.\"\n  (interactive)\n  (call-interactively 'govc-session)\n  (revert-buffer))\n\n(defun govc-host-with-session ()\n  \"Host-mode with current session.\"\n  (interactive)\n  (govc-host nil (govc-current-session)))\n\n(defun govc-vm-with-session ()\n  \"VM-mode with current session.\"\n  (interactive)\n  (govc-vm nil (govc-current-session)))\n\n(defun govc-datastore-with-session ()\n  \"Datastore-mode with current session.\"\n  (interactive)\n  (govc-datastore nil (govc-current-session)))\n\n(defun govc-pool-with-session ()\n  \"Pool-mode with current session.\"\n  (interactive)\n  (govc-pool nil (govc-current-session)))\n\n\f\n;;; govc object mode\n(defvar-local govc-object-history '(\"-\")\n  \"History list of visited objects.\")\n\n(defun govc-object-collect ()\n  \"Wrapper for govc object.collect.\"\n  (interactive)\n  (let ((id (car govc-args)))\n    (add-to-list 'govc-object-history id)\n    (setq govc-session-path id))\n  (govc-type-list-entries \"object.collect\"))\n\n(defun govc-object-collect-selection (&optional json)\n  \"Expand object selection via govc object.collect.\nOptionally specify JSON encoding.\"\n  (interactive)\n  (let* ((entry (or (tabulated-list-get-entry) (error \"No entry\")))\n         (name (elt entry 0))\n         (type (elt entry 1))\n         (val (elt entry 2)))\n\n    (setq govc-args (list (car govc-args) name))\n\n    (cond\n     ((s-blank? val))\n     ((and (not json) (s-ends-with? \"types.ManagedObjectReference\" type))\n      (let ((ids (govc \"ls\" \"-L\" (split-string val \",\"))))\n        (setq govc-args (list (govc-object-prompt \"moid: \" ids)))))\n     ((string= val \"...\")\n      (if (s-starts-with? \"[]\" type) (setq json t))))\n\n    (if json\n        (govc-json-info \"object.collect\" nil)\n      (tabulated-list-revert))))\n\n(defun govc-object-collect-selection-json ()\n  \"JSON object selection via govc object.collect.\"\n  (interactive)\n  (govc-object-collect-selection t))\n\n(defun govc-object-next ()\n  \"Next managed object reference.\"\n  (interactive)\n  (if (search-forward \"types.ManagedObjectReference\" nil t)\n      (progn (govc-tabulated-list-unmark-all)\n             (tabulated-list-put-tag (char-to-string dired-marker-char)))\n    (goto-char (point-min))))\n\n(defun govc-object-collect-parent ()\n  \"Parent object selection if reachable, otherwise prompt with `govc-object-history'.\"\n  (interactive)\n  (if (cadr govc-args)\n      (let ((prop (butlast (split-string (cadr govc-args) \"\\\\.\"))))\n        (setq govc-args (list (car govc-args) (if prop (s-join \".\" prop)))))\n    (save-excursion\n      (goto-char (point-min))\n      (if (re-search-forward \"^[[:space:]]*parent\" nil t)\n          (govc-object-collect-selection)\n        (let ((id (govc-object-prompt \"moid: \" govc-object-history)))\n          (setq govc-args (list id (if (string= id \"-\") \"content\")))))))\n  (tabulated-list-revert))\n\n(defun govc-object (&optional moid property session)\n  \"Object browser aka MOB (Managed Object Browser).\nOptionally starting at MOID and PROPERTY if given.\nInherit SESSION if given.\"\n  (interactive)\n  (let ((buffer (get-buffer-create \"*govc-object*\")))\n    (if (called-interactively-p 'interactive)\n        (switch-to-buffer buffer)\n      (pop-to-buffer buffer))\n    (govc-object-mode)\n    (if session\n        (govc-session-clone session)\n      (call-interactively 'govc-session))\n    (setq govc-args (list (or moid \"-\") property))\n    (tabulated-list-print)))\n\n(defun govc-object-info ()\n  \"Object browser via govc object.collect on `govc-selection'.\"\n  (interactive)\n  (if (equal major-mode 'govc-object-mode)\n      (progn\n        (setq govc-args (list (govc-object-prompt \"moid: \" govc-object-history)))\n        (tabulated-list-revert))\n    (govc-object (tabulated-list-get-id) nil (govc-current-session))))\n\n(defvar govc-object-mode-map\n  (let ((map (make-sparse-keymap)))\n    (define-key map \"J\" 'govc-object-collect-selection-json)\n    (define-key map \"N\" 'govc-object-next)\n    (define-key map \"O\" 'govc-object-info)\n    (define-key map (kbd \"DEL\") 'govc-object-collect-parent)\n    (define-key map (kbd \"RET\") 'govc-object-collect-selection)\n    (define-key map \"?\" 'govc-object-popup)\n    map)\n  \"Keymap for `govc-object-mode'.\")\n\n(define-derived-mode govc-object-mode govc-tabulated-list-mode \"Object\"\n  \"Major mode for handling a govc object.\"\n  (setq tabulated-list-format [(\"Name\" 40 t)\n                               (\"Type\" 40 t)\n                               (\"Value\" 40 t)]\n        tabulated-list-padding 2\n        tabulated-list-entries #'govc-object-collect)\n  (tabulated-list-init-header))\n\n(magit-define-popup govc-object-popup\n  \"Object popup.\"\n  :actions (govc-keymap-popup govc-object-mode-map))\n\n\f\n;;; govc metric mode\n(defun govc-metric-sample ()\n  \"Sample metrics.\"\n  (interactive)\n  (govc-shell-command (govc-format-command \"metric.sample\" govc-args govc-filter (govc-selection))))\n\n(defun govc-metric-sample-plot ()\n  \"Plot metric sample.\"\n  (interactive)\n  (let* ((type (if (and (display-images-p) (not (eq current-prefix-arg '-))) 'png 'dumb))\n         (max (if (member \"-i\" govc-args) \"60\" \"180\"))\n         (args (append govc-args (list \"-n\" max \"-plot\" type govc-filter)))\n         (session (govc-current-session))\n         (metrics (govc-selection)))\n    (with-current-buffer (get-buffer-create \"*govc*\")\n      (govc-session-clone session)\n      (erase-buffer)\n      (delete-other-windows)\n      (if (eq type 'dumb)\n          (split-window-right)\n        (split-window-below))\n      (display-buffer-use-some-window (current-buffer) '((inhibit-same-window . t)))\n      (--each metrics\n        (let* ((cmd (govc-format-command \"metric.sample\" args it))\n               (data (govc-process cmd 'buffer-string)))\n          (if (eq type 'dumb)\n              (insert data)\n            (insert-image (create-image (string-as-unibyte data) type t))))))))\n\n(defun govc-metric-select (metrics)\n  \"Select metric names.  METRICS is a regexp.\"\n  (interactive (list (read-regexp \"Select metrics\" (regexp-quote \".usage.\"))))\n  (save-excursion\n    (goto-char (point-min))\n    (while (not (eobp))\n      (if (string-match-p metrics (tabulated-list-get-id))\n          (govc-tabulated-list-mark)\n        (govc-tabulated-list-unmark)))))\n\n(defun govc-metric-info ()\n  \"Wrapper for govc metric.info.\"\n  (govc-table-info \"metric.info\" (list govc-args (car govc-filter))))\n\n(defvar govc-metric-mode-map\n  (let ((map (make-sparse-keymap)))\n    (define-key map (kbd \"RET\") 'govc-metric-sample)\n    (define-key map (kbd \"P\") 'govc-metric-sample-plot)\n    (define-key map (kbd \"s\") 'govc-metric-select)\n    map)\n  \"Keymap for `govc-metric-mode'.\")\n\n(defun govc-metric ()\n  \"Metrics info.\"\n  (interactive)\n  (let ((session (govc-current-session))\n        (filter (or (govc-selection) (list govc-session-path)))\n        (buffer (get-buffer-create \"*govc-metric*\")))\n    (pop-to-buffer buffer)\n    (govc-metric-mode)\n    (govc-session-clone session)\n    (if current-prefix-arg (setq govc-args '(\"-i\" \"300\")))\n    (setq govc-filter filter)\n    (tabulated-list-print)))\n\n(define-derived-mode govc-metric-mode govc-tabulated-list-mode \"Metric\"\n  \"Major mode for handling a govc metric.\"\n  (setq tabulated-list-format [(\"Name\" 35 t)\n                               (\"Group\" 15 t)\n                               (\"Unit\" 4 t)\n                               (\"Level\" 5 t)\n                               (\"Summary\" 50)]\n        tabulated-list-sort-key (cons \"Name\" nil)\n        tabulated-list-padding 2\n        tabulated-list-entries #'govc-metric-info)\n  (tabulated-list-init-header))\n\n\f\n;;; govc host mode\n(defun govc-ls-host ()\n  \"List hosts.\"\n  (govc \"ls\" \"-t\" \"HostSystem\" \"host/*\"))\n\n(defun govc-esxcli-netstat-info ()\n  \"Wrapper for govc host.esxcli network ip connection list.\"\n  (govc-table-info \"host.esxcli\"\n                   (append govc-args '(\"-hints=false\" \"--\" \"network\" \"ip\" \"connection\" \"list\"))))\n\n(defun govc-esxcli-netstat (host)\n  \"Tabulated `govc-esxcli-netstat-info' HOST.\"\n  (interactive (list (govc-object-prompt \"Host: \" 'govc-ls-host)))\n  (let ((session (govc-current-session))\n        (buffer (get-buffer-create \"*govc-esxcli*\")))\n    (pop-to-buffer buffer)\n    (tabulated-list-mode)\n    (setq govc-args (list \"-host\" host))\n    (govc-session-clone session)\n    (setq tabulated-list-format [(\"CCAlgo\" 10 t)\n                                 (\"ForeignAddress\" 20 t)\n                                 (\"LocalAddress\" 20 t)\n                                 (\"Proto\" 5 t)\n                                 (\"RecvQ\" 5 t)\n                                 (\"SendQ\" 5 t)\n                                 (\"State\" 15 t)\n                                 (\"WorldID\" 7 t)\n                                 (\"WorldName\" 10 t)]\n          tabulated-list-padding 2\n          tabulated-list-entries #'govc-esxcli-netstat-info)\n    (tabulated-list-init-header)\n    (tabulated-list-print)))\n\n(defun govc-host-esxcli-netstat ()\n  \"Netstat via `govc-esxcli-netstat-info' with current host id.\"\n  (interactive)\n  (govc-esxcli-netstat (tabulated-list-get-id)))\n\n(defun govc-host-info ()\n  \"Wrapper for govc host.info.\"\n  (govc-table-info \"host.info\" (or govc-filter \"*\")))\n\n(defun govc-host-json-info ()\n  \"JSON via govc host.info -json on current selection.\"\n  (interactive)\n  (govc-json-info-selection \"host.info\"))\n\n(defvar govc-host-mode-map\n  (let ((map (make-sparse-keymap)))\n    (define-key map \"E\" 'govc-events)\n    (define-key map \"L\" 'govc-logs)\n    (define-key map \"J\" 'govc-host-json-info)\n    (define-key map \"M\" 'govc-metric)\n    (define-key map \"N\" 'govc-host-esxcli-netstat)\n    (define-key map \"O\" 'govc-object-info)\n    (define-key map \"c\" 'govc-mode-new-session)\n    (define-key map \"p\" 'govc-pool-with-session)\n    (define-key map \"s\" 'govc-datastore-with-session)\n    (define-key map \"v\" 'govc-vm-with-session)\n    (define-key map \"?\" 'govc-host-popup)\n    map)\n  \"Keymap for `govc-host-mode'.\")\n\n(defun govc-host (&optional filter session)\n  \"Host info via govc.\nOptionally filter by FILTER and inherit SESSION.\"\n  (interactive)\n  (let ((buffer (get-buffer-create \"*govc-host*\")))\n    (pop-to-buffer buffer)\n    (govc-host-mode)\n    (if session\n        (govc-session-clone session)\n      (call-interactively 'govc-session))\n    (setq govc-filter filter)\n    (tabulated-list-print)))\n\n(define-derived-mode govc-host-mode govc-tabulated-list-mode \"Host\"\n  \"Major mode for handling a list of govc hosts.\"\n  (setq tabulated-list-format [(\"Name\" 30 t)\n                               (\"Logical CPUs\" 20 t)\n                               (\"CPU usage\" 25 t)\n                               (\"Memory\" 10 t)\n                               (\"Memory usage\" 25 t)\n                               (\"Manufacturer\" 13 t)\n                               (\"Boot time\" 15 t)]\n        tabulated-list-sort-key (cons \"Name\" nil)\n        tabulated-list-padding 2\n        tabulated-list-entries #'govc-host-info)\n  (tabulated-list-init-header))\n\n(magit-define-popup govc-host-popup\n  \"Host popup.\"\n  :actions (govc-keymap-popup govc-host-mode-map))\n\n(easy-menu-define govc-host-mode-menu govc-host-mode-map\n  \"Host menu.\"\n  (cons \"Host\" (govc-keymap-menu govc-host-mode-map)))\n\n\f\n;;; govc pool mode\n(defun govc-pool-destroy (name)\n  \"Destroy pool with given NAME.\"\n  (interactive (list (completing-read \"Destroy pool: \" (govc \"ls\" \"-t\" \"ResourcePool\" \"host/*\"))))\n  (govc \"pool.destroy\" name))\n\n(defun govc-pool-destroy-selection ()\n  \"Destroy via `govc-pool-destroy' on the pool selection.\"\n  (interactive)\n  (govc-do-selection 'govc-pool-destroy \"Delete\")\n  (tabulated-list-revert))\n\n(defun govc-pool-info ()\n  \"Wrapper for govc pool.info.\"\n  (govc-table-info \"pool.info\" (list \"-a\" (or govc-filter (setq govc-filter \"*\")))))\n\n(defun govc-pool-json-info ()\n  \"JSON via govc pool.info -json on current selection.\"\n  (interactive)\n  (govc-json-info-selection \"pool.info\"))\n\n(defvar govc-pool-mode-map\n  (let ((map (make-sparse-keymap)))\n    (define-key map \"D\" 'govc-pool-destroy-selection)\n    (define-key map \"E\" 'govc-events)\n    (define-key map \"J\" 'govc-pool-json-info)\n    (define-key map \"M\" 'govc-metric)\n    (define-key map \"O\" 'govc-object-info)\n    (define-key map \"c\" 'govc-mode-new-session)\n    (define-key map \"h\" 'govc-host-with-session)\n    (define-key map \"s\" 'govc-datastore-with-session)\n    (define-key map \"v\" 'govc-vm-with-session)\n    (define-key map \"?\" 'govc-pool-popup)\n    map)\n  \"Keymap for `govc-pool-mode'.\")\n\n(defun govc-pool (&optional filter session)\n  \"Pool info via govc.\nOptionally filter by FILTER and inherit SESSION.\"\n  (interactive)\n  (let ((buffer (get-buffer-create \"*govc-pool*\")))\n    (pop-to-buffer buffer)\n    (govc-pool-mode)\n    (if session\n        (govc-session-clone session)\n      (call-interactively 'govc-session))\n    (setq govc-filter filter)\n    (tabulated-list-print)))\n\n(define-derived-mode govc-pool-mode govc-tabulated-list-mode \"Pool\"\n  \"Major mode for handling a list of govc pools.\"\n  (setq tabulated-list-format [(\"Name\" 30 t)\n                               (\"CPU Usage\" 25 t)\n                               (\"CPU Shares\" 25 t)\n                               (\"CPU Reservation\" 25 t)\n                               (\"CPU Limit\" 10 t)\n                               (\"Mem Usage\" 25 t)\n                               (\"Mem Shares\" 25 t)\n                               (\"Mem Reservation\" 25 t)\n                               (\"Mem Limit\" 10 t)]\n        tabulated-list-sort-key (cons \"Name\" nil)\n        tabulated-list-padding 2\n        tabulated-list-entries #'govc-pool-info)\n  (tabulated-list-init-header))\n\n(magit-define-popup govc-pool-popup\n  \"Pool popup.\"\n  :actions (govc-keymap-popup govc-pool-mode-map))\n\n(easy-menu-define govc-host-mode-menu govc-pool-mode-map\n  \"Pool menu.\"\n  (cons \"Pool\" (govc-keymap-menu govc-pool-mode-map)))\n\n\f\n;;; govc datastore mode\n(defun govc-ls-datastore ()\n  \"List datastores.\"\n  (govc \"ls\" \"datastore\"))\n\n(defun govc-datastore-ls-entries ()\n  \"Wrapper for govc datastore.ls.\"\n  (let* ((data (govc-json \"datastore.ls\" \"-l\" \"-p\" govc-filter))\n         (file (plist-get (elt data 0) :File)))\n    (-map (lambda (ent)\n            (let ((name (plist-get ent :Path))\n                  (size (plist-get ent :FileSize))\n                  (time (plist-get ent :Modification))\n                  (user (plist-get ent :Owner)))\n              (list (concat govc-filter name)\n                    (vector (file-size-human-readable size)\n                            (current-time-string (date-to-time time))\n                            name)))) file)))\n\n(defun govc-datastore-ls-parent ()\n  \"Up to parent folder.\"\n  (interactive)\n  (if (s-blank? govc-filter)\n      (let ((session (govc-current-session)))\n        (govc-datastore-mode)\n        (govc-session-clone session))\n    (setq govc-filter (file-name-directory (directory-file-name govc-filter))))\n  (tabulated-list-revert))\n\n(defun govc-datastore-ls-child ()\n  \"Open datastore folder or file.\"\n  (interactive)\n  (let ((id (tabulated-list-get-id)))\n    (if current-prefix-arg\n        (govc-shell-command (govc-format-command \"datastore.ls\" \"-l\" \"-p\" \"-R\" id))\n      (if (s-ends-with? \"/\" id)\n          (progn (setq govc-filter id)\n                 (tabulated-list-revert))\n        (govc-datastore-open)))))\n\n(defun govc-datastore-open ()\n  \"Open datastore file.\"\n  (lexical-let* ((srcfile (tabulated-list-get-id))\n                 (srcpath (format \"[%s] %s\" (file-name-nondirectory govc-session-datastore) (s-chop-prefix \"/\" srcfile)))\n                 (suffix (file-name-extension srcfile t))\n                 (tmpfile (make-temp-file \"govc-ds\" nil suffix))\n                 (session (govc-current-session)))\n    (when (yes-or-no-p (concat \"Open \" srcpath \"?\"))\n      (govc \"datastore.download\" srcfile tmpfile)\n      (with-current-buffer (pop-to-buffer (find-file-noselect tmpfile))\n        (govc-session-clone session)\n        (add-hook 'kill-buffer-hook (lambda ()\n                                      (with-demoted-errors\n                                          (delete-file tmpfile))) t t)\n        (add-hook 'after-save-hook (lambda ()\n                                     (if (yes-or-no-p (concat \"Upload changes to \" srcpath \"?\"))\n                                         (with-demoted-errors\n                                             (govc \"datastore.upload\" tmpfile srcfile)))) t t)))))\n\n(defun govc-datastore-tail ()\n  \"Tail datastore file.\"\n  (interactive)\n  (govc-shell-command\n   (govc-format-command \"datastore.tail\"\n                        (list \"-n\" govc-max-events (if current-prefix-arg \"-f\")) (govc-selection))))\n\n(defun govc-datastore-disk-info ()\n  \"Info datastore disk.\"\n  (interactive)\n  (delete-other-windows)\n  (govc-shell-command\n   (govc-format-command \"datastore.disk.info\" (if current-prefix-arg \"-c\") (govc-selection))))\n\n(defun govc-datastore-ls-json ()\n  \"JSON via govc datastore.ls -json on current selection.\"\n  (interactive)\n  (let ((govc-args '(\"-l\" \"-p\")))\n    (govc-json-info-selection \"datastore.ls\")))\n\n(defun govc-datastore-ls-r-json ()\n  \"Search via govc datastore.ls -json -R on current selection.\"\n  (interactive)\n  (let ((govc-args '(\"-l\" \"-p\" \"-R\")))\n    (govc-json-info-selection \"datastore.ls\")))\n\n(defun govc-datastore-mkdir (name)\n  \"Mkdir via govc datastore.mkdir with given NAME.\"\n  (interactive (list (read-from-minibuffer \"Create directory: \" govc-filter)))\n  (govc \"datastore.mkdir\" name)\n  (tabulated-list-revert))\n\n(defun govc-datastore-rm (paths)\n  \"Delete datastore PATHS.\"\n  (--each paths (govc \"datastore.rm\" (if current-prefix-arg \"-f\") it)))\n\n(defun govc-datastore-rm-selection ()\n  \"Delete selected datastore paths.\"\n  (interactive)\n  (govc-do-selection 'govc-datastore-rm \"Delete\")\n  (tabulated-list-revert))\n\n(defvar govc-datastore-ls-mode-map\n  (let ((map (make-sparse-keymap)))\n    (define-key map \"I\" 'govc-datastore-disk-info)\n    (define-key map \"J\" 'govc-datastore-ls-json)\n    (define-key map \"S\" 'govc-datastore-ls-r-json)\n    (define-key map \"D\" 'govc-datastore-rm-selection)\n    (define-key map \"T\" 'govc-datastore-tail)\n    (define-key map \"+\" 'govc-datastore-mkdir)\n    (define-key map (kbd \"DEL\") 'govc-datastore-ls-parent)\n    (define-key map (kbd \"RET\") 'govc-datastore-ls-child)\n    (define-key map \"?\" 'govc-datastore-ls-popup)\n    map)\n  \"Keymap for `govc-datastore-ls-mode'.\")\n\n(defun govc-datastore-ls (&optional datastore session filter)\n  \"List govc datastore.  Optionally specify DATASTORE, SESSION and FILTER.\"\n  (interactive)\n  (let ((buffer (get-buffer-create \"*govc-datastore*\")))\n    (pop-to-buffer buffer)\n    (govc-datastore-ls-mode)\n    (if session\n        (govc-session-clone session)\n      (call-interactively 'govc-session))\n    (setq govc-session-datastore (or datastore (govc-object-prompt \"govc datastore: \" 'govc-ls-datastore)))\n    (setq govc-filter filter)\n    (tabulated-list-print)))\n\n(define-derived-mode govc-datastore-ls-mode govc-tabulated-list-mode \"Datastore\"\n  \"Major mode govc datastore.ls.\"\n  (setq tabulated-list-format [(\"Size\" 10 t)\n                               (\"Modification time\" 25 t)\n                               (\"Name\" 40 t)]\n        tabulated-list-sort-key (cons \"Name\" nil)\n        tabulated-list-padding 2\n        tabulated-list-entries #'govc-datastore-ls-entries)\n  (tabulated-list-init-header))\n\n(magit-define-popup govc-datastore-ls-popup\n  \"Datastore ls popup.\"\n  :actions (govc-keymap-popup govc-datastore-ls-mode-map))\n\n(easy-menu-define govc-datastore-ls-mode-menu govc-datastore-ls-mode-map\n  \"Datastore ls menu.\"\n  (cons \"Datastore\" (govc-keymap-menu govc-datastore-ls-mode-map)))\n\n(defvar govc-datastore-mode-map\n  (let ((map (make-sparse-keymap)))\n    (define-key map \"J\" 'govc-datastore-json-info)\n    (define-key map \"M\" 'govc-metric)\n    (define-key map \"O\" 'govc-object-info)\n    (define-key map (kbd \"RET\") 'govc-datastore-ls-selection)\n    (define-key map \"c\" 'govc-mode-new-session)\n    (define-key map \"h\" 'govc-host-with-session)\n    (define-key map \"p\" 'govc-pool-with-session)\n    (define-key map \"v\" 'govc-vm-with-session)\n    (define-key map \"?\" 'govc-datastore-popup)\n    map)\n  \"Keymap for `govc-datastore-mode'.\")\n\n(defun govc-datastore-json-info ()\n  \"JSON via govc datastore.info -json on current selection.\"\n  (interactive)\n  (govc-json-info-selection \"datastore.info\"))\n\n(defun govc-datastore-info ()\n  \"Wrapper for govc datastore.info.\"\n  (govc-table-info \"datastore.info\" (or govc-filter \"*\")))\n\n(defun govc-datastore-ls-selection ()\n  \"Browse datastore.\"\n  (interactive)\n  (govc-datastore-ls (tabulated-list-get-id) (govc-current-session)))\n\n(defun govc-datastore (&optional filter session)\n  \"Datastore info via govc.\nOptionally filter by FILTER and inherit SESSION.\"\n  (interactive)\n  (let ((buffer (get-buffer-create \"*govc-datastore*\")))\n    (pop-to-buffer buffer)\n    (govc-datastore-mode)\n    (if session\n        (govc-session-clone session)\n      (call-interactively 'govc-session))\n    (setq govc-filter filter)\n    (tabulated-list-print)\n    (if (and govc-session-datastore (search-forward govc-session-datastore nil t))\n        (beginning-of-line))))\n\n(define-derived-mode govc-datastore-mode tabulated-list-mode \"Datastore\"\n  \"Major mode for govc datastore.info.\"\n  (setq tabulated-list-format [(\"Name\" 15 t)\n                               (\"Type\" 10 t)\n                               (\"Capacity\" 10 t)\n                               (\"Free\" 10 t)\n                               (\"Remote\" 30 t)]\n        tabulated-list-sort-key (cons \"Name\" nil)\n        tabulated-list-padding 2\n        tabulated-list-entries #'govc-datastore-info)\n  (tabulated-list-init-header))\n\n(magit-define-popup govc-datastore-popup\n  \"Datastore popup.\"\n  :actions (govc-keymap-popup govc-datastore-mode-map))\n\n(easy-menu-define govc-datastore-mode-menu govc-datastore-mode-map\n  \"Datastore menu.\"\n  (cons \"Datastore\" (govc-keymap-menu govc-datastore-mode-map)))\n\n\f\n;;; govc vm mode\n(defun govc-vm-prompt (prompt)\n  \"PROMPT for a vm name.\"\n  (completing-read prompt (govc \"ls\" \"vm\")))\n\n(defun govc-vm-start (name)\n  \"Start vm with given NAME.\"\n  (interactive (list (govc-vm-prompt \"Start vm: \")))\n  (govc \"vm.power\" \"-on\" name))\n\n(defun govc-vm-shutdown (name)\n  \"Shutdown vm with given NAME.\"\n  (interactive (list (govc-vm-prompt \"Shutdown vm: \")))\n  (govc \"vm.power\" \"-s\" \"-force\" name))\n\n(defun govc-vm-reboot (name)\n  \"Reboot vm with given NAME.\"\n  (interactive (list (govc-vm-prompt \"Reboot vm: \")))\n  (govc \"vm.power\" \"-r\" \"-force\" name))\n\n(defun govc-vm-suspend (name)\n  \"Suspend vm with given NAME.\"\n  (interactive (list (govc-vm-prompt \"Suspend vm: \")))\n  (govc \"vm.power\" \"-suspend\" name))\n\n(defun govc-vm-destroy (name)\n  \"Destroy vm with given NAME.\"\n  (interactive (list (govc-vm-prompt \"Destroy vm: \")))\n  (govc \"vm.destroy\" name))\n\n(defun govc-vm-vnc-enable (name)\n  \"Enable vnc on vm with given NAME.\"\n  (--map (last (split-string it))\n         (govc \"vm.vnc\" \"-enable\"\n               \"-port\" \"-1\"\n               \"-password\" (format \"%08x\" (random (expt 16 8))) name)))\n\n(defun govc-vm-vnc (name &optional arg)\n  \"VNC for vm with given NAME.\nBy default, enable and open VNC for the given vm NAME.\nWith prefix \\\\[negative-argument] ARG, VNC will be disabled.\nWith prefix \\\\[universal-argument] ARG, VNC will be enabled but not opened.\"\n  (interactive (list (govc-vm-prompt \"VNC vm: \")\n                     current-prefix-arg))\n  (if (equal arg '-)\n      (govc \"vm.vnc\" \"-disable\" name)\n    (let ((urls (govc-vm-vnc-enable name)))\n      (unless arg\n        (-each (-flatten urls) 'browse-url)))))\n\n(defun govc-vm-screen (name &optional arg)\n  \"Console screenshot of vm NAME console.\nOpen via `eww' by default, via `browse-url' if ARG is non-nil.\"\n  (interactive (list (govc-vm-prompt \"Console screenshot vm: \")\n                     current-prefix-arg))\n  (let* ((data (govc-json \"vm.info\" name))\n         (vms (plist-get data :VirtualMachines))\n         (url (govc-url-parse govc-session-url)))\n    (mapc\n     (lambda (vm)\n       (let* ((moid (plist-get (plist-get vm :Self) :Value))\n              (on (string= \"poweredOn\" (plist-get (plist-get vm :Runtime) :PowerState)))\n              (host (format \"%s:%d\" (url-host url) (or (url-port url) 443)))\n              (path (concat \"/screen?id=\" moid))\n              (auth (concat (url-user url) \":\" (url-password url))))\n         (if current-prefix-arg\n             (browse-url (concat \"https://\" auth \"@\" host path))\n           (let ((creds `((,host (\"VMware HTTP server\" . ,(base64-encode-string auth)))))\n                 (url-basic-auth-storage 'creds)\n                 (u (concat \"https://\" host path)))\n             (require 'eww)\n             (if on\n                 (url-retrieve u 'eww-render (list u))\n               (kill-new (message u)))))))\n     vms)))\n\n(defun govc-vm-start-selection ()\n  \"Start via `govc-vm-start' on the current selection.\"\n  (interactive)\n  (govc-vm-start (govc-selection))\n  (tabulated-list-revert))\n\n(defun govc-vm-shutdown-selection ()\n  \"Shutdown via `govc-vm-shutdown' on the current selection.\"\n  (interactive)\n  (govc-vm-shutdown (govc-selection))\n  (tabulated-list-revert))\n\n(defun govc-vm-reboot-selection ()\n  \"Reboot via `govc-vm-reboot' on the current selection.\"\n  (interactive)\n  (govc-vm-reboot (govc-selection))\n  (tabulated-list-revert))\n\n(defun govc-vm-suspend-selection ()\n  \"Suspend via `govc-vm-suspend' on the current selection.\"\n  (interactive)\n  (govc-vm-suspend (govc-selection))\n  (tabulated-list-revert))\n\n(defun govc-vm-destroy-selection ()\n  \"Destroy via `govc-vm-destroy' on the current selection.\"\n  (interactive)\n  (govc-do-selection 'govc-vm-destroy \"Destroy\")\n  (tabulated-list-revert))\n\n(defun govc-vm-vnc-selection ()\n  \"VNC via `govc-vm-vnc' on the current selection.\"\n  (interactive)\n  (govc-vm-vnc (govc-selection) current-prefix-arg))\n\n(defun govc-vm-screen-selection ()\n  \"Console screenshot via `govc-vm-screen' on the current selection.\"\n  (interactive)\n  (govc-vm-screen (govc-selection) current-prefix-arg))\n\n(defun govc-vm-info ()\n  \"Wrapper for govc vm.info.\"\n  (govc-table-info \"vm.info\" (list \"-r\" (or govc-filter (setq govc-filter \"*\")))))\n\n(defun govc-vm-host ()\n  \"Host info via `govc-host' with host(s) of current selection.\"\n  (interactive)\n  (govc-host (concat \"*/\" (govc-table-column-value \"Host\"))\n             (govc-current-session)))\n\n(defun govc-vm-datastore ()\n  \"Datastore via `govc-datastore-ls' with datastore of current selection.\"\n  (interactive)\n  (if current-prefix-arg\n      (govc-datastore (s-split \", \" (govc-table-column-value \"Storage\") t)\n                      (govc-current-session))\n    (let* ((data (govc-json \"vm.info\" (tabulated-list-get-id)))\n           (vm (elt (plist-get data :VirtualMachines) 0))\n           (dir (plist-get (plist-get (plist-get vm :Config) :Files) :LogDirectory))\n           (args (s-split \"\\\\[\\\\|\\\\]\" dir t)))\n      (govc-datastore-ls (first args) (govc-current-session) (concat (s-trim (second args)) \"/\")))))\n\n(defun govc-vm-ping ()\n  \"Ping VM.\"\n  (interactive)\n  (let ((ping-program-options '(\"-c\" \"20\")))\n    (ping (govc-table-column-value \"IP address\"))))\n\n(defun govc-vm-device-ls ()\n  \"Devices via `govc-device' on the current selection.\"\n  (interactive)\n  (govc-device (tabulated-list-get-id)\n               (govc-current-session)))\n\n(defun govc-vm-extra-config ()\n  \"Populate table with govc vm.info -e output.\"\n  (let* ((data (govc-json \"vm.info\" govc-args))\n         (vms (plist-get data :VirtualMachines))\n         (info))\n    (mapc\n     (lambda (vm)\n       (let* ((config (plist-get vm :Config))\n              (name (plist-get config :Name)))\n         (mapc (lambda (x)\n                 (let ((key (plist-get x :Key))\n                       (val (plist-get x :Value)))\n                   (push (list key (vector key val)) info)))\n               (plist-get config :ExtraConfig))\n         (if (> (length vms) 1)\n             (push (list name (vector \"vm.name\" name)) info))))\n     vms)\n    info))\n\n(defun govc-vm-extra-config-table ()\n  \"ExtraConfig via `govc-vm-extra-config' on the current selection.\"\n  (interactive)\n  (govc-map-info-table #'govc-vm-extra-config))\n\n(defun govc-vm-json-info ()\n  \"JSON via govc vm.info -json on current selection.\"\n  (interactive)\n  (govc-json-info-selection \"vm.info\"))\n\n(defvar govc-vm-mode-map\n  (let ((map (make-sparse-keymap)))\n    (define-key map \"E\" 'govc-events)\n    (define-key map \"J\" 'govc-vm-json-info)\n    (define-key map \"O\" 'govc-object-info)\n    (define-key map \"X\" 'govc-vm-extra-config-table)\n    (define-key map (kbd \"RET\") 'govc-vm-device-ls)\n    (define-key map \"C\" 'govc-vm-screen-selection)\n    (define-key map \"V\" 'govc-vm-vnc-selection)\n    (define-key map \"D\" 'govc-vm-destroy-selection)\n    (define-key map \"^\" 'govc-vm-start-selection)\n    (define-key map \"!\" 'govc-vm-shutdown-selection)\n    (define-key map \"@\" 'govc-vm-reboot-selection)\n    (define-key map \"&\" 'govc-vm-suspend-selection)\n    (define-key map \"H\" 'govc-vm-host)\n    (define-key map \"M\" 'govc-metric)\n    (define-key map \"P\" 'govc-vm-ping)\n    (define-key map \"S\" 'govc-vm-datastore)\n    (define-key map \"c\" 'govc-mode-new-session)\n    (define-key map \"h\" 'govc-host-with-session)\n    (define-key map \"p\" 'govc-pool-with-session)\n    (define-key map \"s\" 'govc-datastore-with-session)\n    (define-key map \"?\" 'govc-vm-popup)\n    map)\n  \"Keymap for `govc-vm-mode'.\")\n\n(defun govc-vm (&optional filter session)\n  \"VM info via govc.\nOptionally filter by FILTER and inherit SESSION.\"\n  (interactive)\n  (let ((buffer (get-buffer-create \"*govc-vm*\")))\n    (pop-to-buffer buffer)\n    (govc-vm-mode)\n    (if session\n        (govc-session-clone session)\n      (call-interactively 'govc-session))\n    (setq govc-filter filter)\n    (tabulated-list-print)))\n\n(define-derived-mode govc-vm-mode govc-tabulated-list-mode \"VM\"\n  \"Major mode for handling a list of govc vms.\"\n  (setq tabulated-list-format [(\"Name\" 40 t)\n                               (\"Power state\" 12 t)\n                               (\"Boot time\" 13 t)\n                               (\"IP address\" 15 t)\n                               (\"Guest name\" 20 t)\n                               (\"Host\" 20 t)\n                               (\"CPU usage\" 15 t)\n                               (\"Host memory usage\" 18 t)\n                               (\"Guest memory usage\" 19 t)\n                               (\"Storage committed\" 18 t)\n                               (\"Storage\" 10 t)\n                               (\"Network\" 10 t)]\n        tabulated-list-sort-key (cons \"Name\" nil)\n        tabulated-list-padding 2\n        tabulated-list-entries #'govc-vm-info)\n  (tabulated-list-init-header))\n\n(magit-define-popup govc-vm-popup\n  \"VM popup.\"\n  :actions (govc-keymap-popup govc-vm-mode-map))\n\n(easy-menu-define govc-vm-mode-menu govc-vm-mode-map\n  \"VM menu.\"\n  (cons \"VM\" (govc-keymap-menu govc-vm-mode-map)))\n\n\f\n;;; govc device mode\n(defun govc-device-ls ()\n  \"Wrapper for govc device.ls -vm VM.\"\n  (govc-type-list-entries \"device.ls\"))\n\n(defun govc-device-info ()\n  \"Populate table with govc device.info output.\"\n  (govc-map-info \"device.info\" govc-args))\n\n(defun govc-device-info-table ()\n  \"Tabulated govc device.info.\"\n  (interactive)\n  (govc-map-info-table #'govc-device-info))\n\n(defun govc-device-json-info ()\n  \"JSON via govc device.info -json on current selection.\"\n  (interactive)\n  (govc-json-info-selection \"device.info\"))\n\n(defvar govc-device-mode-map\n  (let ((map (make-sparse-keymap)))\n    (define-key map (kbd \"J\") 'govc-device-json-info)\n    (define-key map (kbd \"RET\") 'govc-device-info-table)\n    map)\n  \"Keymap for `govc-device-mode'.\")\n\n(defun govc-device (&optional vm session)\n  \"List govc devices for VM.  Optionally inherit SESSION.\"\n  (interactive)\n  (let ((buffer (get-buffer-create \"*govc-device*\")))\n    (pop-to-buffer buffer)\n    (govc-device-mode)\n    (if session\n        (govc-session-clone session)\n      (call-interactively 'govc-session))\n    (setq govc-args (list \"-vm\" (or vm (govc-vm-prompt \"vm: \"))))\n    (tabulated-list-print)))\n\n(define-derived-mode govc-device-mode govc-tabulated-list-mode \"Device\"\n  \"Major mode for handling a govc device.\"\n  (setq tabulated-list-format [(\"Name\" 15 t)\n                               (\"Type\" 30 t)\n                               (\"Summary\" 40 t)]\n        tabulated-list-sort-key (cons \"Name\" nil)\n        tabulated-list-padding 2\n        tabulated-list-entries #'govc-device-ls)\n  (tabulated-list-init-header))\n\n(magit-define-popup govc-popup\n  \"govc popup.\"\n  :actions (govc-keymap-list govc-command-map))\n\n(easy-menu-change\n '(\"Tools\") \"govc\"\n (govc-keymap-menu govc-command-map)\n \"Search Files (Grep)...\")\n\n(provide 'govc)\n\n;;; govc.el ends here\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/emacs/test/govc-test.el",
    "content": "(defconst testsuite-dir\n  (if load-file-name\n      (file-name-directory load-file-name)\n    ;; Fall back to default directory (in case of M-x eval-buffer)\n    default-directory)\n  \"Directory of the test suite.\")\n\n(defconst govc-test-helper-path\n  (concat (expand-file-name (concat testsuite-dir \"/../../test/test_helper.bash\"))))\n\n(load (expand-file-name \"../govc\" testsuite-dir) nil :no-message)\n\n(ert-deftest test-govc-url-parse ()\n  (dolist (u '(\"root:vagrant@localhost:18443\"\n               \"Administrator@vsphere.local:vagrant@localhost\"\n               \"https://root:vagrant@localhost:18443/sdk\"\n               \"https://Administrator@vsphere.local:vagrant@localhost/sdk\"))\n    (should (equal u (url-recreate-url (govc-url-parse u))))))\n\n(ert-deftest test-govc-session-set-url ()\n  (should (equal govc-session-insecure nil))\n  (should (equal govc-session-datacenter nil))\n  (with-temp-buffer\n    (govc-session-set-url \"vc.example.com?insecure=true&datacenter=foo&ignored=true\")\n    (should (equal govc-session-insecure \"true\"))\n    (should (equal govc-session-datacenter \"foo\"))\n    (should (equal govc-session-datastore nil))))\n\n(ert-deftest test-govc-copy-environment ()\n  (let ((process-environment)\n        (govc-session-url \"vc.example.com\")\n        (govc-session-insecure \"false\")\n        (govc-session-datacenter \"dc1\")\n        (govc-session-datastore \"ds1\")\n        (govc-session-network \"net1\"))\n    (govc-export-environment '-)\n    (dolist (e govc-environment-map)\n      (should (equal nil (getenv (car e)))))\n    (govc-export-environment (universal-argument))\n    (dolist (e govc-environment-map)\n      (should (not (equal nil (getenv (car e))))))))\n\n(defun govc-test-env ()\n  (let ((url (getenv \"GOVC_TEST_URL\")))\n    (unless url\n      (ert-skip \"env GOVC_TEST_URL not set\"))\n    (setq govc-session-url url\n          govc-session-insecure \"true\")))\n\n(defun govc-test-helper (arg)\n  (shell-command-to-string (format \"bash -c \\\"source %s; %s\\\"\" govc-test-helper-path arg)))\n\n(defun govc-test-new-vm ()\n  (s-trim-right (govc-test-helper \"new_empty_vm\")))\n\n(defun govc-test-new-id ()\n  (s-trim-right (govc-test-helper \"new_id\")))\n\n(defun govc-test-teardown ()\n  (ignore-errors\n    (govc-test-helper \"teardown\")))\n\n(ert-deftest test-govc-vm-info ()\n  (govc-test-env)\n  (unwind-protect\n      (let ((id (govc-test-new-vm)))\n        (govc-json-info \"vm.info\" (list id))\n        (with-current-buffer \"*govc-json*\"\n          (goto-char (point-min))\n          (let ((data (json-read)))\n            (should (= (length data) 1))\n            (should (cdr (assq 'VirtualMachines data)))))\n\n        (govc-json-info \"vm.info\" (list \"ENOENT\"))\n        (with-current-buffer \"*govc-json*\"\n          (goto-char (point-min))\n          (let ((data (json-read)))\n            (should (= (length data) 1))\n            (should (not (cdr (assq 'VirtualMachines data))))))\n\n        (let ((govc-args (list id))\n              (len1)\n              (len2))\n          (setq len1 (length (govc-vm-extra-config)))\n          (should (>= len1 1))\n          (govc \"vm.change\" \"-vm\" id\n                \"-e\" \"govc-test-one=1\"\n                \"-e\" \"govc-test-two:2.2=2\"\n                ;; test that we don't choke on \\n\n                \"-e\" \"foo=bar\nbaz\")\n          (setq len2 (length (govc-vm-extra-config)))\n\n          (should (= (- len2 len1) 3)))\n\n        (let ((govc-filter \"*\"))\n          (should (>= (length (govc-vm-info)) 1)))\n\n        (let ((govc-filter \"ENOENT\"))\n          (should (= (length (govc-vm-info)) 0)))\n\n        (govc-vm-screen id))\n    (govc-test-teardown)))\n\n(ert-deftest test-govc-datastore-ls-entries ()\n  (govc-test-env)\n  (unwind-protect\n      (let ((id (govc-test-new-id)))\n        (should (>= (length (govc-datastore-ls-entries)) 1))\n\n        (let ((govc-filter (concat id \"/\")))\n          (should-error (govc-datastore-ls-entries))\n          (govc \"datastore.mkdir\" id)\n          (should (= (length (govc-datastore-ls-entries)) 0))\n          (dotimes (i 3)\n            (govc \"datastore.mkdir\" (format \"%s/dir %d\" id i)))\n          (let ((entries (govc-datastore-ls-entries)))\n            (should (= (length entries) 3))\n            (should (s-starts-with? (concat id \"/dir \") (caar entries))))))\n    (govc-test-teardown)))\n\n(ert-deftest test-govc-pool-ls ()\n  (govc-test-env)\n  (unwind-protect\n      (let* ((pools (govc-ls-pool))\n             (num (length pools))\n             (path (concat (car pools) \"/\" (govc-test-new-id))))\n        (should (>= num 1))\n        (message \"%d existing pools [%S]\" num pools)\n        (govc \"pool.create\" path)\n        (setq pools (govc-ls-pool))\n        (govc-pool-destroy path)\n        (should (= (- (length pools) num) 1)))\n    (govc-test-teardown)))\n\n(ert-deftest test-govc-about ()\n  (govc-test-env)\n  (govc \"about\"))\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/emacs/test/make.el",
    "content": "#!/usr/bin/env emacs --script\n\n(let ((current-directory (file-name-directory load-file-name)))\n  (setq project-test-path (expand-file-name \".\" current-directory))\n  (setq project-root-path (expand-file-name \"..\" current-directory)))\n\n(add-to-list 'load-path project-root-path)\n(add-to-list 'load-path project-test-path)\n\n(require 'lisp-mnt)\n(require 'govc)\n(require 's)\n\n(defun make-test ()\n  (dolist (test-file (or argv (directory-files project-test-path t \"-test.el$\")))\n    (load test-file nil t))\n  (ert-run-tests-batch-and-exit t))\n\n(defun govc-help ()\n  \"Summary of govc modes in markdown format.\"\n  (interactive)\n  (with-help-window (help-buffer) ; TODO: this turned into a mess, but does the job of generating README.md from govc.el\n    (dolist (kind '(govc-mode govc-urls\n                    govc-session-url govc-session-insecure govc-session-datacenter govc-session-datastore govc-session-network\n                    tabulated-list host pool datastore datastore-ls vm device object metric))\n      (let* ((name (if (boundp kind) (symbol-name kind) (format \"govc-%s-mode\" kind)))\n             (map (if (equal 'govc-mode kind) 'govc-command-map (intern (concat name \"-map\"))))\n             (doc (lambda (f &optional all)\n                    (let* ((txt (if (functionp f) (documentation f t) (documentation-property f 'variable-documentation)))\n                           (ix (if all (length txt) (s-index-of \".\" txt))))\n                      (s-replace (format \"\\n\\n\\\\\\{%s\\}\" (concat name \"-map\")) \"\"\n                                 (s-replace \"'\" \"`\" (substring txt 0 ix)))))))\n        (princ (concat (s-repeat (if (and (boundp kind) (not (fboundp kind))) 3 2) \"#\") \" \" name \"\\n\"))\n        (princ (concat \"\\n\" (funcall doc (intern name) t) \"\\n\\n\"))\n        (when (boundp map)\n          (princ (concat \"### \" (symbol-name map) \"\\n\\n\"))\n          (princ \"Keybinding     | Description\\n\")\n          (princ \"---------------|------------------------------------------------------------\\n\")\n          (dolist (kl (govc-keymap-list (symbol-value map)))\n            (let ((key (govc-key-description (car kl))))\n              (princ (format \"<kbd>%s</kbd>%s| %s\\n\" key (s-repeat (- 4 (length key)) \" \") (funcall doc (nth 2 kl))))))\n          (princ \"\\n\"))))))\n\n(defun make-docs ()\n  (let ((commentary)\n        (summary))\n    (with-current-buffer (find-file-noselect (concat project-root-path \"/govc.el\"))\n      (setq commentary (s-replace \";;; Commentary:\" \"\" (lm-commentary))\n            summary (lm-summary)))\n    (let ((readme (find-file-noselect (concat project-root-path \"/README.md\"))))\n      (with-current-buffer readme\n        (erase-buffer)\n        (govc-help)\n        (with-current-buffer (help-buffer)\n          (copy-to-buffer readme (point-min) (point-max)))\n        (goto-char (point-min))\n        (insert (concat \"# govc.el\\n\\n\" summary \".\\n\"))\n        (insert (s-replace \"'\" \"`\" (replace-regexp-in-string \";; ?\" \"\" commentary t t)))\n        (save-buffer 0)))))\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/env/command.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage env\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype env struct {\n\t*flags.OutputFlag\n\t*flags.ClientFlag\n\n\textra bool\n}\n\nfunc init() {\n\tcli.Register(\"env\", &env{})\n}\n\nfunc (cmd *env) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.extra, \"x\", false, \"Output variables for each GOVC_URL component\")\n}\n\nfunc (cmd *env) Process(ctx context.Context) error {\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *env) Description() string {\n\treturn `Output the environment variables for this client.\n\nIf credentials are included in the url, they are split into separate variables.\nUseful as bash scripting helper to parse GOVC_URL.`\n}\n\nfunc (cmd *env) Run(ctx context.Context, f *flag.FlagSet) error {\n\tenv := envResult(cmd.ClientFlag.Environ(cmd.extra))\n\n\tif f.NArg() > 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\t// Option to just output the value, example use:\n\t// password=$(govc env GOVC_PASSWORD)\n\tif f.NArg() == 1 {\n\t\tvar output []string\n\n\t\tprefix := fmt.Sprintf(\"%s=\", f.Arg(0))\n\n\t\tfor _, e := range env {\n\t\t\tif strings.HasPrefix(e, prefix) {\n\t\t\t\toutput = append(output, e[len(prefix):])\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\treturn cmd.WriteResult(envResult(output))\n\t}\n\n\treturn cmd.WriteResult(env)\n}\n\ntype envResult []string\n\nfunc (r envResult) Write(w io.Writer) error {\n\tfor _, e := range r {\n\t\tfmt.Println(e)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/events/command.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage events\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/event\"\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype events struct {\n\t*flags.DatacenterFlag\n\n\tMax   int32\n\tTail  bool\n\tForce bool\n}\n\nfunc init() {\n\t// initialize with the maximum allowed objects set\n\tcli.Register(\"events\", &events{})\n}\n\nfunc (cmd *events) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n\n\tcmd.Max = 25 // default\n\tf.Var(flags.NewInt32(&cmd.Max), \"n\", \"Output the last N events\")\n\tf.BoolVar(&cmd.Tail, \"f\", false, \"Follow event stream\")\n\tf.BoolVar(&cmd.Force, \"force\", false, \"Disable number objects to monitor limit\")\n}\n\nfunc (cmd *events) Description() string {\n\treturn `Display events.\n\nExamples:\n  govc events vm/my-vm1 vm/my-vm2\n  govc events /dc1/vm/* /dc2/vm/*\n  govc ls -t HostSystem host/* | xargs govc events | grep -i vsan`\n}\n\nfunc (cmd *events) Usage() string {\n\treturn \"[PATH]...\"\n}\n\nfunc (cmd *events) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *events) printEvents(ctx context.Context, obj *types.ManagedObjectReference, page []types.BaseEvent, m *event.Manager) error {\n\tevent.Sort(page)\n\tif obj != nil {\n\t\t// print the object reference\n\t\tfmt.Fprintf(os.Stdout, \"\\n==> %s <==\\n\", obj.String())\n\t}\n\tfor _, e := range page {\n\t\tcat, err := m.EventCategory(ctx, e)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tevent := e.GetEvent()\n\t\tmsg := strings.TrimSpace(event.FullFormattedMessage)\n\n\t\t// if this is a TaskEvent gather a little more information\n\t\tif t, ok := e.(*types.TaskEvent); ok {\n\t\t\t// some tasks won't have this information, so just use the event message\n\t\t\tif t.Info.Entity != nil {\n\t\t\t\tmsg = fmt.Sprintf(\"%s (target=%s %s)\", msg, t.Info.Entity.Type, t.Info.EntityName)\n\t\t\t}\n\t\t}\n\n\t\tfmt.Fprintf(os.Stdout, \"[%s] [%s] %s\\n\",\n\t\t\tevent.CreatedTime.Local().Format(time.ANSIC),\n\t\t\tcat,\n\t\t\tmsg)\n\t}\n\treturn nil\n}\n\nfunc (cmd *events) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tobjs, err := cmd.ManagedObjects(ctx, f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(objs) > 0 {\n\t\t// need an event manager\n\t\tm := event.NewManager(c)\n\n\t\t// get the event stream\n\t\terr = m.Events(ctx, objs, cmd.Max, cmd.Tail, cmd.Force, func(obj types.ManagedObjectReference, ee []types.BaseEvent) error {\n\t\t\tvar o *types.ManagedObjectReference\n\t\t\tif len(objs) > 1 {\n\t\t\t\to = &obj\n\t\t\t}\n\t\t\terr = cmd.printEvents(ctx, o, ee, m)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/examples/lib/ssh.sh",
    "content": "function public-key {\n  local dir=${HOME}/.ssh\n\n  for f in $HOME/.ssh/{id_{rsa,dsa},*}.pub; do\n    if [ -r $f ]; then\n      echo $f\n      return\n    fi\n  done\n\n  echo \"Can't find public key file...\"\n  exit 1\n}\n\nPUBLIC_KEY_FILE=${PUBLIC_KEY_FILE-$(public-key)}\nSSH_OPTS=\"-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=quiet\"\n\nfunction upload-public-key {\n  local vm_name=$1\n  local dir=$2\n\n  if [ -z \"$dir\" ]\n  then\n    uid=$(echo $GOVC_GUEST_LOGIN | awk -F: '{print $1}')\n    dir=$(govc guest.getenv -vm ${vm_name} HOME | awk -F= '{print $2}')\n\n    if [ -z \"$dir\" ]\n    then\n      echo \"Can't find ${uid}'s HOME dir...\"\n      exit 1\n    fi\n  fi\n\n  govc guest.mkdir \\\n       -vm ${vm_name} \\\n       -p \\\n       ${dir}/.ssh\n\n  govc guest.upload \\\n       -vm ${vm_name} \\\n       -f \\\n       ${PUBLIC_KEY_FILE} \\\n       ${dir}/.ssh/authorized_keys\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/examples/vcsa.sh",
    "content": "#!/bin/bash -e\n\nlib=$(readlink -nf $(dirname $0))/lib\n. $lib/ssh.sh\n\nova=$1\n\nif [ -z \"$ova\" ]\nthen\n  ova=./VMware-vCenter-Server-Appliance-5.5.0.10300-2000350_OVF10.ova\nfi\n\n# default to local Vagrant esxbox for testing\nexport GOVC_URL=${GOVC_URL-\"https://root:vagrant@localhost:8443/sdk\"}\n\n# default VCSA credentials\nexport GOVC_GUEST_LOGIN=root:vmware\n\n# VM name as defined in the VCSA .ovf\nvm_name=VMware_vCenter_Server_Appliance\n\necho \"Importing $ova...\"\ngovc import.ova $ova\n\necho \"Powering on $vm_name...\"\ngovc vm.power -on $vm_name\n\necho \"Waiting for $vm_name's IP address...\"\nvc=$(govc vm.ip $vm_name)\n\ngovc vm.info $vm_name\n\necho \"Uploading ssh public key to $vm_name...\"\nupload-public-key $vm_name\n\necho \"Configuring vCenter Server Appliance...\"\n\n# http://www.virtuallyghetto.com/2012/02/automating-vcenter-server-appliance.html\nssh ${SSH_OPTS} root@$vc <<EOF\necho \"Accepting EULA ...\"\n/usr/sbin/vpxd_servicecfg eula accept\n\necho \"Configuring Embedded DB ...\"\n/usr/sbin/vpxd_servicecfg db write embedded\n\necho \"Configuring SSO...\"\n/usr/sbin/vpxd_servicecfg sso write embedded\n\necho \"Starting VCSA ...\"\n/usr/sbin/vpxd_servicecfg service start\nEOF\n\nvc_url=https://${GOVC_GUEST_LOGIN}@${vc}/sdk\n\necho \"VCSA configured and ready...\"\n\ngovc about -u $vc_url\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/extension/info.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage extension\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype info struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n}\n\nfunc init() {\n\tcli.Register(\"extension.info\", &info{})\n}\n\nfunc (cmd *info) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n}\n\nfunc (cmd *info) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *info) Usage() string {\n\treturn \"[KEY]...\"\n}\n\nfunc (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := object.GetExtensionManager(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlist, err := m.List(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar res infoResult\n\n\tif f.NArg() == 0 {\n\t\tres.Extensions = list\n\t} else {\n\t\texts := make(map[string]types.Extension)\n\t\tfor _, e := range list {\n\t\t\texts[e.Key] = e\n\t\t}\n\n\t\tfor _, key := range f.Args() {\n\t\t\tif e, ok := exts[key]; ok {\n\t\t\t\tres.Extensions = append(res.Extensions, e)\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"extension %s not found\", key)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn cmd.WriteResult(&res)\n}\n\ntype infoResult struct {\n\tExtensions []types.Extension\n}\n\nfunc (r *infoResult) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0)\n\n\tfor _, e := range r.Extensions {\n\t\tfmt.Fprintf(tw, \"Name:\\t%s\\n\", e.Key)\n\t\tfmt.Fprintf(tw, \"  Version:\\t%s\\n\", e.Version)\n\t\tfmt.Fprintf(tw, \"  Description:\\t%s\\n\", e.Description.GetDescription().Summary)\n\t\tfmt.Fprintf(tw, \"  Company:\\t%s\\n\", e.Company)\n\t\tfmt.Fprintf(tw, \"  Last heartbeat time:\\t%s\\n\", e.LastHeartbeatTime)\n\t\tfmt.Fprintf(tw, \"  Subject name:\\t%s\\n\", e.SubjectName)\n\t\tfmt.Fprintf(tw, \"  Type:\\t%s\\n\", e.Type)\n\t}\n\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/extension/register.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage extension\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"flag\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype register struct {\n\t*flags.ClientFlag\n\n\tupdate bool\n}\n\nfunc init() {\n\tcli.Register(\"extension.register\", &register{})\n}\n\nfunc (cmd *register) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.update, \"update\", false, \"Update extension\")\n}\n\nfunc (cmd *register) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *register) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := object.GetExtensionManager(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar e types.Extension\n\te.Description = new(types.Description)\n\n\tif err = json.NewDecoder(os.Stdin).Decode(&e); err != nil {\n\t\treturn err\n\t}\n\n\te.LastHeartbeatTime = time.Now().UTC()\n\n\tif cmd.update {\n\t\treturn m.Update(ctx, e)\n\t}\n\n\treturn m.Register(ctx, e)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/extension/setcert.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage extension\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"crypto/x509\"\n\t\"crypto/x509/pkix\"\n\t\"encoding/pem\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"math/big\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype setcert struct {\n\t*flags.ClientFlag\n\n\tcert string\n\torg  string\n\n\tencodedCert bytes.Buffer\n}\n\nfunc init() {\n\tcli.Register(\"extension.setcert\", &setcert{})\n}\n\nfunc (cmd *setcert) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.cert, \"cert-pem\", \"-\", \"PEM encoded certificate\")\n\tf.StringVar(&cmd.org, \"org\", \"VMware\", \"Organization for generated certificate\")\n}\n\nfunc (cmd *setcert) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *setcert) Usage() string {\n\treturn \"ID\"\n}\n\nfunc (cmd *setcert) Description() string {\n\treturn `Set certificate for the extension ID.\n\nThe '-cert-pem' option can be one of the following:\n'-' : Read the certificate from stdin\n'+' : Generate a new key pair and save locally to ID.crt and ID.key\n... : Any other value is passed as-is to ExtensionManager.SetCertificate`\n}\n\nfunc (cmd *setcert) create(id string) error {\n\tcertFile, err := os.Create(id + \".crt\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer certFile.Close()\n\n\tkeyFile, err := os.Create(id + \".key\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer keyFile.Close()\n\n\tpriv, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnotBefore := time.Now()\n\tnotAfter := notBefore.Add(5 * 365 * 24 * time.Hour) // 5 years\n\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttemplate := x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{cmd.org},\n\t\t},\n\t\tNotBefore:             notBefore,\n\t\tNotAfter:              notAfter,\n\t\tKeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tBasicConstraintsValid: true,\n\t}\n\n\tderBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = pem.Encode(&cmd.encodedCert, &pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = certFile.Write(cmd.encodedCert.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = pem.Encode(keyFile, &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(priv)})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *setcert) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := object.GetExtensionManager(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif f.NArg() != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tkey := f.Arg(0)\n\n\tif cmd.cert == \"-\" {\n\t\tb, err := ioutil.ReadAll(os.Stdin)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcmd.cert = string(b)\n\t} else if cmd.cert == \"+\" {\n\t\tif err := cmd.create(key); err != nil {\n\t\t\treturn fmt.Errorf(\"creating certificate: %s\", err)\n\t\t}\n\t\tcmd.cert = cmd.encodedCert.String()\n\t}\n\n\treturn m.SetCertificate(ctx, key, cmd.cert)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/extension/unregister.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage extension\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype unregister struct {\n\t*flags.ClientFlag\n}\n\nfunc init() {\n\tcli.Register(\"extension.unregister\", &unregister{})\n}\n\nfunc (cmd *unregister) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n}\n\nfunc (cmd *unregister) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *unregister) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := object.GetExtensionManager(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, key := range f.Args() {\n\t\tif err = m.Unregister(ctx, key); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/fields/add.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage fields\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype add struct {\n\t*flags.ClientFlag\n}\n\nfunc init() {\n\tcli.Register(\"fields.add\", &add{})\n}\n\nfunc (cmd *add) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n}\n\nfunc (cmd *add) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *add) Usage() string {\n\treturn \"NAME\"\n}\n\nfunc (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := object.GetCustomFieldsManager(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tname := f.Arg(0)\n\n\tdef, err := m.Add(ctx, name, \"\", nil, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"%d\\n\", def.Key)\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/fields/ls.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage fields\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype ls struct {\n\t*flags.ClientFlag\n}\n\nfunc init() {\n\tcli.Register(\"fields.ls\", &ls{})\n}\n\nfunc (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n}\n\nfunc (cmd *ls) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := object.GetCustomFieldsManager(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfield, err := m.Field(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0)\n\n\tfor _, def := range field {\n\t\tfmt.Fprintf(tw, \"%d\\t%s\\n\", def.Key, def.Name)\n\t}\n\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/fields/rename.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage fields\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype rename struct {\n\t*flags.ClientFlag\n}\n\nfunc init() {\n\tcli.Register(\"fields.rename\", &rename{})\n}\n\nfunc (cmd *rename) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n}\n\nfunc (cmd *rename) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *rename) Usage() string {\n\treturn \"KEY NAME\"\n}\n\nfunc (cmd *rename) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 2 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := object.GetCustomFieldsManager(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkey, err := m.FindKey(ctx, f.Arg(0))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tname := f.Arg(1)\n\n\treturn m.Rename(ctx, key, name)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/fields/rm.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage fields\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype rm struct {\n\t*flags.ClientFlag\n}\n\nfunc init() {\n\tcli.Register(\"fields.rm\", &rm{})\n}\n\nfunc (cmd *rm) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n}\n\nfunc (cmd *rm) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *rm) Usage() string {\n\treturn \"KEY...\"\n}\n\nfunc (cmd *rm) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := object.GetCustomFieldsManager(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, name := range f.Args() {\n\t\tkey, err := m.FindKey(ctx, name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := m.Remove(ctx, key); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/fields/set.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage fields\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype set struct {\n\t*flags.DatacenterFlag\n}\n\nfunc init() {\n\tcli.Register(\"fields.set\", &set{})\n}\n\nfunc (cmd *set) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n}\n\nfunc (cmd *set) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *set) Usage() string {\n\treturn \"KEY VALUE PATH...\"\n}\n\nfunc (cmd *set) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() < 3 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := object.GetCustomFieldsManager(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\targs := f.Args()\n\n\tkey, err := m.FindKey(ctx, args[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tval := args[1]\n\n\tobjs, err := cmd.ManagedObjects(ctx, args[2:])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, ref := range objs {\n\t\terr := m.Set(ctx, ref, key, val)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/client.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage flags\n\nimport (\n\t\"context\"\n\t\"crypto/sha1\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/url\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/session\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\nconst (\n\tenvURL           = \"GOVC_URL\"\n\tenvUsername      = \"GOVC_USERNAME\"\n\tenvPassword      = \"GOVC_PASSWORD\"\n\tenvCertificate   = \"GOVC_CERTIFICATE\"\n\tenvPrivateKey    = \"GOVC_PRIVATE_KEY\"\n\tenvInsecure      = \"GOVC_INSECURE\"\n\tenvPersist       = \"GOVC_PERSIST_SESSION\"\n\tenvMinAPIVersion = \"GOVC_MIN_API_VERSION\"\n\tenvVimNamespace  = \"GOVC_VIM_NAMESPACE\"\n\tenvVimVersion    = \"GOVC_VIM_VERSION\"\n\tenvTLSCaCerts    = \"GOVC_TLS_CA_CERTS\"\n\tenvTLSKnownHosts = \"GOVC_TLS_KNOWN_HOSTS\"\n)\n\nconst cDescr = \"ESX or vCenter URL\"\n\ntype ClientFlag struct {\n\tcommon\n\n\t*DebugFlag\n\n\turl           *url.URL\n\tusername      string\n\tpassword      string\n\tcert          string\n\tkey           string\n\tinsecure      bool\n\tpersist       bool\n\tminAPIVersion string\n\tvimNamespace  string\n\tvimVersion    string\n\ttlsCaCerts    string\n\ttlsKnownHosts string\n\ttlsHostHash   string\n\n\tclient *vim25.Client\n}\n\nvar (\n\thome          = os.Getenv(\"GOVMOMI_HOME\")\n\tclientFlagKey = flagKey(\"client\")\n)\n\nfunc init() {\n\tif home == \"\" {\n\t\thome = filepath.Join(os.Getenv(\"HOME\"), \".govmomi\")\n\t}\n}\n\nfunc NewClientFlag(ctx context.Context) (*ClientFlag, context.Context) {\n\tif v := ctx.Value(clientFlagKey); v != nil {\n\t\treturn v.(*ClientFlag), ctx\n\t}\n\n\tv := &ClientFlag{}\n\tv.DebugFlag, ctx = NewDebugFlag(ctx)\n\tctx = context.WithValue(ctx, clientFlagKey, v)\n\treturn v, ctx\n}\n\nfunc (flag *ClientFlag) URLWithoutPassword() *url.URL {\n\tif flag.url == nil {\n\t\treturn nil\n\t}\n\n\twithoutCredentials := *flag.url\n\twithoutCredentials.User = url.User(flag.url.User.Username())\n\treturn &withoutCredentials\n}\n\nfunc (flag *ClientFlag) IsSecure() bool {\n\treturn !flag.insecure\n}\n\nfunc (flag *ClientFlag) String() string {\n\turl := flag.URLWithoutPassword()\n\tif url == nil {\n\t\treturn \"\"\n\t}\n\n\treturn url.String()\n}\n\nfunc (flag *ClientFlag) Set(s string) error {\n\tvar err error\n\n\tflag.url, err = soap.ParseURL(s)\n\n\treturn err\n}\n\nfunc (flag *ClientFlag) Register(ctx context.Context, f *flag.FlagSet) {\n\tflag.RegisterOnce(func() {\n\t\tflag.DebugFlag.Register(ctx, f)\n\n\t\t{\n\t\t\tflag.Set(os.Getenv(envURL))\n\t\t\tusage := fmt.Sprintf(\"%s [%s]\", cDescr, envURL)\n\t\t\tf.Var(flag, \"u\", usage)\n\t\t}\n\n\t\t{\n\t\t\tflag.username = os.Getenv(envUsername)\n\t\t\tflag.password = os.Getenv(envPassword)\n\t\t}\n\n\t\t{\n\t\t\tvalue := os.Getenv(envCertificate)\n\t\t\tusage := fmt.Sprintf(\"Certificate [%s]\", envCertificate)\n\t\t\tf.StringVar(&flag.cert, \"cert\", value, usage)\n\t\t}\n\n\t\t{\n\t\t\tvalue := os.Getenv(envPrivateKey)\n\t\t\tusage := fmt.Sprintf(\"Private key [%s]\", envPrivateKey)\n\t\t\tf.StringVar(&flag.key, \"key\", value, usage)\n\t\t}\n\n\t\t{\n\t\t\tinsecure := false\n\t\t\tswitch env := strings.ToLower(os.Getenv(envInsecure)); env {\n\t\t\tcase \"1\", \"true\":\n\t\t\t\tinsecure = true\n\t\t\t}\n\n\t\t\tusage := fmt.Sprintf(\"Skip verification of server certificate [%s]\", envInsecure)\n\t\t\tf.BoolVar(&flag.insecure, \"k\", insecure, usage)\n\t\t}\n\n\t\t{\n\t\t\tpersist := true\n\t\t\tswitch env := strings.ToLower(os.Getenv(envPersist)); env {\n\t\t\tcase \"0\", \"false\":\n\t\t\t\tpersist = false\n\t\t\t}\n\n\t\t\tusage := fmt.Sprintf(\"Persist session to disk [%s]\", envPersist)\n\t\t\tf.BoolVar(&flag.persist, \"persist-session\", persist, usage)\n\t\t}\n\n\t\t{\n\t\t\tenv := os.Getenv(envMinAPIVersion)\n\t\t\tif env == \"\" {\n\t\t\t\tenv = soap.DefaultMinVimVersion\n\t\t\t}\n\n\t\t\tflag.minAPIVersion = env\n\t\t}\n\n\t\t{\n\t\t\tvalue := os.Getenv(envVimNamespace)\n\t\t\tif value == \"\" {\n\t\t\t\tvalue = soap.DefaultVimNamespace\n\t\t\t}\n\t\t\tusage := fmt.Sprintf(\"Vim namespace [%s]\", envVimNamespace)\n\t\t\tf.StringVar(&flag.vimNamespace, \"vim-namespace\", value, usage)\n\t\t}\n\n\t\t{\n\t\t\tvalue := os.Getenv(envVimVersion)\n\t\t\tif value == \"\" {\n\t\t\t\tvalue = soap.DefaultVimVersion\n\t\t\t}\n\t\t\tusage := fmt.Sprintf(\"Vim version [%s]\", envVimVersion)\n\t\t\tf.StringVar(&flag.vimVersion, \"vim-version\", value, usage)\n\t\t}\n\n\t\t{\n\t\t\tvalue := os.Getenv(envTLSCaCerts)\n\t\t\tusage := fmt.Sprintf(\"TLS CA certificates file [%s]\", envTLSCaCerts)\n\t\t\tf.StringVar(&flag.tlsCaCerts, \"tls-ca-certs\", value, usage)\n\t\t}\n\n\t\t{\n\t\t\tvalue := os.Getenv(envTLSKnownHosts)\n\t\t\tusage := fmt.Sprintf(\"TLS known hosts file [%s]\", envTLSKnownHosts)\n\t\t\tf.StringVar(&flag.tlsKnownHosts, \"tls-known-hosts\", value, usage)\n\t\t}\n\t})\n}\n\nfunc (flag *ClientFlag) Process(ctx context.Context) error {\n\treturn flag.ProcessOnce(func() error {\n\t\tif err := flag.DebugFlag.Process(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif flag.url == nil {\n\t\t\treturn errors.New(\"specify an \" + cDescr)\n\t\t}\n\n\t\t// Override username if set\n\t\tif flag.username != \"\" {\n\t\t\tvar password string\n\t\t\tvar ok bool\n\n\t\t\tif flag.url.User != nil {\n\t\t\t\tpassword, ok = flag.url.User.Password()\n\t\t\t}\n\n\t\t\tif ok {\n\t\t\t\tflag.url.User = url.UserPassword(flag.username, password)\n\t\t\t} else {\n\t\t\t\tflag.url.User = url.User(flag.username)\n\t\t\t}\n\t\t}\n\n\t\t// Override password if set\n\t\tif flag.password != \"\" {\n\t\t\tvar username string\n\n\t\t\tif flag.url.User != nil {\n\t\t\t\tusername = flag.url.User.Username()\n\t\t\t}\n\n\t\t\tflag.url.User = url.UserPassword(username, flag.password)\n\t\t}\n\n\t\treturn nil\n\t})\n}\n\n// configure TLS and retry settings before making any connections\nfunc (flag *ClientFlag) configure(sc *soap.Client) (soap.RoundTripper, error) {\n\t// Set namespace and version\n\tsc.Namespace = flag.vimNamespace\n\tsc.Version = flag.vimVersion\n\n\tsc.UserAgent = fmt.Sprintf(\"govc/%s\", Version)\n\n\tif err := flag.SetRootCAs(sc); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := sc.LoadThumbprints(flag.tlsKnownHosts); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Retry twice when a temporary I/O error occurs.\n\t// This means a maximum of 3 attempts.\n\treturn vim25.Retry(sc, vim25.TemporaryNetworkError(3)), nil\n}\n\nfunc (flag *ClientFlag) sessionFile() string {\n\turl := flag.URLWithoutPassword()\n\n\t// Key session file off of full URI and insecure setting.\n\t// Hash key to get a predictable, canonical format.\n\tkey := fmt.Sprintf(\"%s#insecure=%t\", url.String(), flag.insecure)\n\tname := fmt.Sprintf(\"%040x\", sha1.Sum([]byte(key)))\n\treturn filepath.Join(home, \"sessions\", name)\n}\n\nfunc (flag *ClientFlag) saveClient(c *vim25.Client) error {\n\tif !flag.persist {\n\t\treturn nil\n\t}\n\n\tp := flag.sessionFile()\n\terr := os.MkdirAll(filepath.Dir(p), 0700)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf, err := os.OpenFile(p, os.O_CREATE|os.O_WRONLY, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\terr = json.NewEncoder(f).Encode(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (flag *ClientFlag) restoreClient(c *vim25.Client) (bool, error) {\n\tif !flag.persist {\n\t\treturn false, nil\n\t}\n\n\tf, err := os.Open(flag.sessionFile())\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn false, err\n\t}\n\n\tdefer f.Close()\n\n\tdec := json.NewDecoder(f)\n\terr = dec.Decode(c)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}\n\nfunc (flag *ClientFlag) loadClient() (*vim25.Client, error) {\n\tc := new(vim25.Client)\n\tok, err := flag.restoreClient(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !ok || !c.Valid() {\n\t\treturn nil, nil\n\t}\n\n\tc.RoundTripper, err = flag.configure(c.Client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm := session.NewManager(c)\n\tu, err := m.UserSession(context.TODO())\n\tif err != nil {\n\t\tif soap.IsSoapFault(err) {\n\t\t\tfault := soap.ToSoapFault(err).VimFault()\n\t\t\t// If the PropertyCollector is not found, the saved session for this URL is not valid\n\t\t\tif _, ok := fault.(types.ManagedObjectNotFound); ok {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\n\t\treturn nil, err\n\t}\n\n\t// If the session is nil, the client is not authenticated\n\tif u == nil {\n\t\treturn nil, nil\n\t}\n\n\treturn c, nil\n}\n\nfunc (flag *ClientFlag) SetRootCAs(c *soap.Client) error {\n\tif flag.tlsCaCerts != \"\" {\n\t\treturn c.SetRootCAs(flag.tlsCaCerts)\n\t}\n\treturn nil\n}\n\nfunc (flag *ClientFlag) newClient() (*vim25.Client, error) {\n\tctx := context.TODO()\n\tsc := soap.NewClient(flag.url, flag.insecure)\n\tisTunnel := false\n\n\tif flag.cert != \"\" {\n\t\tisTunnel = true\n\t\tcert, err := tls.LoadX509KeyPair(flag.cert, flag.key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tsc.SetCertificate(cert)\n\t}\n\n\trt, err := flag.configure(sc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc, err := vim25.NewClient(ctx, rt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Set client, since we didn't pass it in the constructor\n\tc.Client = sc\n\n\tm := session.NewManager(c)\n\tu := flag.url.User\n\n\tif u.Username() == \"\" {\n\t\t// Assume we are running on an ESX or Workstation host if no username is provided\n\t\tu, err = flag.localTicket(ctx, m)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif isTunnel {\n\t\terr = m.LoginExtensionByCertificate(ctx, u.Username(), \"\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\terr = m.Login(ctx, u)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\terr = flag.saveClient(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}\n\nfunc (flag *ClientFlag) localTicket(ctx context.Context, m *session.Manager) (*url.Userinfo, error) {\n\tticket, err := m.AcquireLocalTicket(ctx, os.Getenv(\"USER\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpassword, err := ioutil.ReadFile(ticket.PasswordFilePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn url.UserPassword(ticket.UserName, string(password)), nil\n}\n\n// apiVersionValid returns whether or not the API version supported by the\n// server the client is connected to is not recent enough.\nfunc apiVersionValid(c *vim25.Client, minVersionString string) error {\n\tif minVersionString == \"-\" {\n\t\t// Disable version check\n\t\treturn nil\n\t}\n\n\tapiVersion := c.ServiceContent.About.ApiVersion\n\tif strings.HasSuffix(apiVersion, \".x\") {\n\t\t// Skip version check for development builds\n\t\treturn nil\n\t}\n\n\trealVersion, err := ParseVersion(apiVersion)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tminVersion, err := ParseVersion(minVersionString)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !minVersion.Lte(realVersion) {\n\t\terr = fmt.Errorf(\"Require API version %s, connected to API version %s (set %s to override)\",\n\t\t\tminVersionString,\n\t\t\tc.ServiceContent.About.ApiVersion,\n\t\t\tenvMinAPIVersion)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (flag *ClientFlag) Client() (*vim25.Client, error) {\n\tif flag.client != nil {\n\t\treturn flag.client, nil\n\t}\n\n\tc, err := flag.loadClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// loadClient returns nil if it was unable to load a session from disk\n\tif c == nil {\n\t\tc, err = flag.newClient()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Check that the endpoint has the right API version\n\terr = apiVersionValid(c, flag.minAPIVersion)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tflag.client = c\n\treturn flag.client, nil\n}\n\nfunc (flag *ClientFlag) Logout(ctx context.Context) error {\n\tif flag.persist || flag.client == nil {\n\t\treturn nil\n\t}\n\n\tm := session.NewManager(flag.client)\n\n\treturn m.Logout(ctx)\n}\n\n// Environ returns the govc environment variables for this connection\nfunc (flag *ClientFlag) Environ(extra bool) []string {\n\tvar env []string\n\tadd := func(k, v string) {\n\t\tenv = append(env, fmt.Sprintf(\"%s=%s\", k, v))\n\t}\n\n\tu := *flag.url\n\tif u.User != nil {\n\t\tadd(envUsername, u.User.Username())\n\n\t\tif p, ok := u.User.Password(); ok {\n\t\t\tadd(envPassword, p)\n\t\t}\n\n\t\tu.User = nil\n\t}\n\n\tif u.Path == \"/sdk\" {\n\t\tu.Path = \"\"\n\t}\n\tu.Fragment = \"\"\n\tu.RawQuery = \"\"\n\n\tval := u.String()\n\tprefix := \"https://\"\n\tif strings.HasPrefix(val, prefix) {\n\t\tval = val[len(prefix):]\n\t}\n\tadd(envURL, val)\n\n\tkeys := []string{\n\t\tenvCertificate,\n\t\tenvPrivateKey,\n\t\tenvInsecure,\n\t\tenvPersist,\n\t\tenvMinAPIVersion,\n\t\tenvVimNamespace,\n\t\tenvVimVersion,\n\t}\n\n\tfor _, k := range keys {\n\t\tif v := os.Getenv(k); v != \"\" {\n\t\t\tadd(k, v)\n\t\t}\n\t}\n\n\tif extra {\n\t\tadd(\"GOVC_URL_SCHEME\", flag.url.Scheme)\n\n\t\tv := strings.SplitN(u.Host, \":\", 2)\n\t\tadd(\"GOVC_URL_HOST\", v[0])\n\t\tif len(v) == 2 {\n\t\t\tadd(\"GOVC_URL_PORT\", v[1])\n\t\t}\n\n\t\tadd(\"GOVC_URL_PATH\", flag.url.Path)\n\n\t\tif f := flag.url.Fragment; f != \"\" {\n\t\t\tadd(\"GOVC_URL_FRAGMENT\", f)\n\t\t}\n\n\t\tif q := flag.url.RawQuery; q != \"\" {\n\t\t\tadd(\"GOVC_URL_QUERY\", q)\n\t\t}\n\t}\n\n\treturn env\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/common.go",
    "content": "/*\nCopyright (c) 2015-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\npackage flags\n\nimport \"sync\"\n\n// Key type for storing flag instances in a context.Context.\ntype flagKey string\n\n// Type to help flags out with only registering/processing once.\ntype common struct {\n\tregister sync.Once\n\tprocess  sync.Once\n}\n\nfunc (c *common) RegisterOnce(fn func()) {\n\tc.register.Do(fn)\n}\n\nfunc (c *common) ProcessOnce(fn func() error) (err error) {\n\tc.process.Do(func() {\n\t\terr = fn()\n\t})\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/datacenter.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage flags\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/find\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype DatacenterFlag struct {\n\tcommon\n\n\t*ClientFlag\n\t*OutputFlag\n\n\tpath   string\n\tdc     *object.Datacenter\n\tfinder *find.Finder\n\terr    error\n}\n\nvar datacenterFlagKey = flagKey(\"datacenter\")\n\nfunc NewDatacenterFlag(ctx context.Context) (*DatacenterFlag, context.Context) {\n\tif v := ctx.Value(datacenterFlagKey); v != nil {\n\t\treturn v.(*DatacenterFlag), ctx\n\t}\n\n\tv := &DatacenterFlag{}\n\tv.ClientFlag, ctx = NewClientFlag(ctx)\n\tv.OutputFlag, ctx = NewOutputFlag(ctx)\n\tctx = context.WithValue(ctx, datacenterFlagKey, v)\n\treturn v, ctx\n}\n\nfunc (flag *DatacenterFlag) Register(ctx context.Context, f *flag.FlagSet) {\n\tflag.RegisterOnce(func() {\n\t\tflag.ClientFlag.Register(ctx, f)\n\t\tflag.OutputFlag.Register(ctx, f)\n\n\t\tenv := \"GOVC_DATACENTER\"\n\t\tvalue := os.Getenv(env)\n\t\tusage := fmt.Sprintf(\"Datacenter [%s]\", env)\n\t\tf.StringVar(&flag.path, \"dc\", value, usage)\n\t})\n}\n\nfunc (flag *DatacenterFlag) Process(ctx context.Context) error {\n\treturn flag.ProcessOnce(func() error {\n\t\tif err := flag.ClientFlag.Process(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := flag.OutputFlag.Process(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc (flag *DatacenterFlag) Finder() (*find.Finder, error) {\n\tif flag.finder != nil {\n\t\treturn flag.finder, nil\n\t}\n\n\tc, err := flag.Client()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfinder := find.NewFinder(c, flag.JSON || flag.Dump)\n\n\t// Datacenter is not required (ls command for example).\n\t// Set for relative func if dc flag is given or\n\t// if there is a single (default) Datacenter\n\tctx := context.TODO()\n\tif flag.path == \"\" {\n\t\tflag.dc, flag.err = finder.DefaultDatacenter(ctx)\n\t} else {\n\t\tif flag.dc, err = finder.Datacenter(ctx, flag.path); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tfinder.SetDatacenter(flag.dc)\n\n\tflag.finder = finder\n\n\treturn flag.finder, nil\n}\n\nfunc (flag *DatacenterFlag) Datacenter() (*object.Datacenter, error) {\n\tif flag.dc != nil {\n\t\treturn flag.dc, nil\n\t}\n\n\t_, err := flag.Finder()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif flag.err != nil {\n\t\t// Should only happen if no dc is specified and len(dcs) > 1\n\t\treturn nil, flag.err\n\t}\n\n\treturn flag.dc, err\n}\n\nfunc (flag *DatacenterFlag) DatacenterIfSpecified() (*object.Datacenter, error) {\n\tif flag.path == \"\" {\n\t\treturn nil, nil\n\t}\n\treturn flag.Datacenter()\n}\n\nfunc (flag *DatacenterFlag) ManagedObjects(ctx context.Context, args []string) ([]types.ManagedObjectReference, error) {\n\tvar refs []types.ManagedObjectReference\n\n\tc, err := flag.Client()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(args) == 0 {\n\t\trefs = append(refs, c.ServiceContent.RootFolder)\n\t\treturn refs, nil\n\t}\n\n\tfinder, err := flag.Finder()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, arg := range args {\n\t\telements, err := finder.ManagedObjectList(ctx, arg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif len(elements) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"object '%s' not found\", arg)\n\t\t}\n\n\t\tfor _, e := range elements {\n\t\t\trefs = append(refs, e.Object.Reference())\n\t\t}\n\t}\n\n\treturn refs, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/datastore.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage flags\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype DatastoreFlag struct {\n\tcommon\n\n\t*DatacenterFlag\n\n\tName string\n\n\tds *object.Datastore\n}\n\nvar datastoreFlagKey = flagKey(\"datastore\")\n\n// NewCustomDatastoreFlag creates and returns a new DatastoreFlag without\n// trying to retrieve an existing one from the specified context.\nfunc NewCustomDatastoreFlag(ctx context.Context) (*DatastoreFlag, context.Context) {\n\tv := &DatastoreFlag{}\n\tv.DatacenterFlag, ctx = NewDatacenterFlag(ctx)\n\treturn v, ctx\n}\n\nfunc NewDatastoreFlag(ctx context.Context) (*DatastoreFlag, context.Context) {\n\tif v := ctx.Value(datastoreFlagKey); v != nil {\n\t\treturn v.(*DatastoreFlag), ctx\n\t}\n\n\tv, ctx := NewCustomDatastoreFlag(ctx)\n\tctx = context.WithValue(ctx, datastoreFlagKey, v)\n\treturn v, ctx\n}\n\nfunc (f *DatastoreFlag) Register(ctx context.Context, fs *flag.FlagSet) {\n\tf.RegisterOnce(func() {\n\t\tf.DatacenterFlag.Register(ctx, fs)\n\n\t\tenv := \"GOVC_DATASTORE\"\n\t\tvalue := os.Getenv(env)\n\t\tusage := fmt.Sprintf(\"Datastore [%s]\", env)\n\t\tfs.StringVar(&f.Name, \"ds\", value, usage)\n\t})\n}\n\nfunc (f *DatastoreFlag) Process(ctx context.Context) error {\n\treturn f.ProcessOnce(func() error {\n\t\tif err := f.DatacenterFlag.Process(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc (f *DatastoreFlag) Datastore() (*object.Datastore, error) {\n\tif f.ds != nil {\n\t\treturn f.ds, nil\n\t}\n\n\tfinder, err := f.Finder()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif f.ds, err = finder.DatastoreOrDefault(context.TODO(), f.Name); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn f.ds, nil\n}\n\nfunc (flag *DatastoreFlag) DatastoreIfSpecified() (*object.Datastore, error) {\n\tif flag.Name == \"\" {\n\t\treturn nil, nil\n\t}\n\treturn flag.Datastore()\n}\n\nfunc (f *DatastoreFlag) DatastorePath(name string) (string, error) {\n\tds, err := f.Datastore()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn ds.Path(name), nil\n}\n\nfunc (f *DatastoreFlag) Stat(ctx context.Context, file string) (types.BaseFileInfo, error) {\n\tds, err := f.Datastore()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ds.Stat(ctx, file)\n\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/debug.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage flags\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/vim25/debug\"\n)\n\ntype DebugFlag struct {\n\tcommon\n\n\tenable bool\n}\n\nvar debugFlagKey = flagKey(\"debug\")\n\nfunc NewDebugFlag(ctx context.Context) (*DebugFlag, context.Context) {\n\tif v := ctx.Value(debugFlagKey); v != nil {\n\t\treturn v.(*DebugFlag), ctx\n\t}\n\n\tv := &DebugFlag{}\n\tctx = context.WithValue(ctx, debugFlagKey, v)\n\treturn v, ctx\n}\n\nfunc (flag *DebugFlag) Register(ctx context.Context, f *flag.FlagSet) {\n\tflag.RegisterOnce(func() {\n\t\tenv := \"GOVC_DEBUG\"\n\t\tenable := false\n\t\tswitch env := strings.ToLower(os.Getenv(env)); env {\n\t\tcase \"1\", \"true\":\n\t\t\tenable = true\n\t\t}\n\n\t\tusage := fmt.Sprintf(\"Store debug logs [%s]\", env)\n\t\tf.BoolVar(&flag.enable, \"debug\", enable, usage)\n\t})\n}\n\nfunc (flag *DebugFlag) Process(ctx context.Context) error {\n\tif !flag.enable {\n\t\treturn nil\n\t}\n\n\treturn flag.ProcessOnce(func() error {\n\t\t// Base path for storing debug logs.\n\t\tr := os.Getenv(\"GOVC_DEBUG_PATH\")\n\t\tif r == \"\" {\n\t\t\tr = home\n\t\t}\n\t\tr = filepath.Join(r, \"debug\")\n\n\t\t// Path for this particular run.\n\t\trun := os.Getenv(\"GOVC_DEBUG_PATH_RUN\")\n\t\tif run == \"\" {\n\t\t\tnow := time.Now().Format(\"2006-01-02T15-04-05.999999999\")\n\t\t\tr = filepath.Join(r, now)\n\t\t} else {\n\t\t\t// reuse the same path\n\t\t\tr = filepath.Join(r, run)\n\t\t\t_ = os.RemoveAll(r)\n\t\t}\n\n\t\terr := os.MkdirAll(r, 0700)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tp := debug.FileProvider{\n\t\t\tPath: r,\n\t\t}\n\n\t\tdebug.SetProvider(&p)\n\t\treturn nil\n\t})\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/empty.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage flags\n\nimport (\n\t\"context\"\n\t\"flag\"\n)\n\ntype EmptyFlag struct{}\n\nfunc (flag *EmptyFlag) Register(ctx context.Context, f *flag.FlagSet) {\n}\n\nfunc (flag *EmptyFlag) Process(ctx context.Context) error {\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/folder.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage flags\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype FolderFlag struct {\n\tcommon\n\n\t*DatacenterFlag\n\n\tname   string\n\tfolder *object.Folder\n}\n\nvar folderFlagKey = flagKey(\"folder\")\n\nfunc NewFolderFlag(ctx context.Context) (*FolderFlag, context.Context) {\n\tif v := ctx.Value(folderFlagKey); v != nil {\n\t\treturn v.(*FolderFlag), ctx\n\t}\n\n\tv := &FolderFlag{}\n\tv.DatacenterFlag, ctx = NewDatacenterFlag(ctx)\n\tctx = context.WithValue(ctx, folderFlagKey, v)\n\treturn v, ctx\n}\n\nfunc (flag *FolderFlag) Register(ctx context.Context, f *flag.FlagSet) {\n\tflag.RegisterOnce(func() {\n\t\tflag.DatacenterFlag.Register(ctx, f)\n\n\t\tenv := \"GOVC_FOLDER\"\n\t\tvalue := os.Getenv(env)\n\t\tusage := fmt.Sprintf(\"Inventory folder [%s]\", env)\n\t\tf.StringVar(&flag.name, \"folder\", value, usage)\n\t})\n}\n\nfunc (flag *FolderFlag) Process(ctx context.Context) error {\n\treturn flag.ProcessOnce(func() error {\n\t\tif err := flag.DatacenterFlag.Process(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc (flag *FolderFlag) Folder() (*object.Folder, error) {\n\tif flag.folder != nil {\n\t\treturn flag.folder, nil\n\t}\n\n\tfinder, err := flag.Finder()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif flag.folder, err = finder.FolderOrDefault(context.TODO(), flag.name); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn flag.folder, nil\n}\n\nfunc (flag *FolderFlag) FolderOrDefault(kind string) (*object.Folder, error) {\n\tif flag.folder != nil {\n\t\treturn flag.folder, nil\n\t}\n\n\tif flag.name != \"\" {\n\t\treturn flag.Folder()\n\t}\n\n\t// RootFolder, no dc required\n\tif kind == \"/\" {\n\t\tclient, err := flag.Client()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tflag.folder = object.NewRootFolder(client)\n\t\treturn flag.folder, nil\n\t}\n\n\tdc, err := flag.Datacenter()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfolders, err := dc.Folders(context.TODO())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch kind {\n\tcase \"vm\":\n\t\tflag.folder = folders.VmFolder\n\tcase \"host\":\n\t\tflag.folder = folders.HostFolder\n\tcase \"datastore\":\n\t\tflag.folder = folders.DatastoreFolder\n\tcase \"network\":\n\t\tflag.folder = folders.NetworkFolder\n\tdefault:\n\t\tpanic(kind)\n\t}\n\n\treturn flag.folder, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/host_connect.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage flags\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype HostConnectFlag struct {\n\tcommon\n\n\ttypes.HostConnectSpec\n\n\tnoverify bool\n}\n\nvar hostConnectFlagKey = flagKey(\"hostConnect\")\n\nfunc NewHostConnectFlag(ctx context.Context) (*HostConnectFlag, context.Context) {\n\tif v := ctx.Value(hostConnectFlagKey); v != nil {\n\t\treturn v.(*HostConnectFlag), ctx\n\t}\n\n\tv := &HostConnectFlag{}\n\tctx = context.WithValue(ctx, hostConnectFlagKey, v)\n\treturn v, ctx\n}\n\nfunc (flag *HostConnectFlag) Register(ctx context.Context, f *flag.FlagSet) {\n\tflag.RegisterOnce(func() {\n\t\tf.StringVar(&flag.HostName, \"hostname\", \"\", \"Hostname or IP address of the host\")\n\t\tf.StringVar(&flag.UserName, \"username\", \"\", \"Username of administration account on the host\")\n\t\tf.StringVar(&flag.Password, \"password\", \"\", \"Password of administration account on the host\")\n\t\tf.StringVar(&flag.SslThumbprint, \"thumbprint\", \"\", \"SHA-1 thumbprint of the host's SSL certificate\")\n\t\tf.BoolVar(&flag.Force, \"force\", false, \"Force when host is managed by another VC\")\n\n\t\tf.BoolVar(&flag.noverify, \"noverify\", false, \"Accept host thumbprint without verification\")\n\t})\n}\n\nfunc (flag *HostConnectFlag) Process(ctx context.Context) error {\n\treturn nil\n}\n\n// Spec attempts to fill in SslThumbprint if empty.\n// First checks GOVC_TLS_KNOWN_HOSTS, if not found and noverify=true then\n// use object.HostCertificateInfo to get the thumbprint.\nfunc (flag *HostConnectFlag) Spec(c *vim25.Client) types.HostConnectSpec {\n\tspec := flag.HostConnectSpec\n\n\tif spec.SslThumbprint == \"\" {\n\t\tspec.SslThumbprint = c.Thumbprint(spec.HostName)\n\n\t\tif spec.SslThumbprint == \"\" && flag.noverify {\n\t\t\tvar info object.HostCertificateInfo\n\t\t\tt := c.Transport.(*http.Transport)\n\t\t\t_ = info.FromURL(&url.URL{Host: spec.HostName}, t.TLSClientConfig)\n\t\t\tspec.SslThumbprint = info.ThumbprintSHA1\n\t\t}\n\t}\n\n\treturn spec\n}\n\n// Fault checks if error is SSLVerifyFault, including the thumbprint if so\nfunc (flag *HostConnectFlag) Fault(err error) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tif f, ok := err.(types.HasFault); ok {\n\t\tswitch fault := f.Fault().(type) {\n\t\tcase *types.SSLVerifyFault:\n\t\t\treturn fmt.Errorf(\"%s thumbprint=%s\", err, fault.Thumbprint)\n\t\t}\n\t}\n\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/host_system.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage flags\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype HostSystemFlag struct {\n\tcommon\n\n\t*ClientFlag\n\t*DatacenterFlag\n\t*SearchFlag\n\n\tname string\n\thost *object.HostSystem\n\tpool *object.ResourcePool\n}\n\nvar hostSystemFlagKey = flagKey(\"hostSystem\")\n\nfunc NewHostSystemFlag(ctx context.Context) (*HostSystemFlag, context.Context) {\n\tif v := ctx.Value(hostSystemFlagKey); v != nil {\n\t\treturn v.(*HostSystemFlag), ctx\n\t}\n\n\tv := &HostSystemFlag{}\n\tv.ClientFlag, ctx = NewClientFlag(ctx)\n\tv.DatacenterFlag, ctx = NewDatacenterFlag(ctx)\n\tv.SearchFlag, ctx = NewSearchFlag(ctx, SearchHosts)\n\tctx = context.WithValue(ctx, hostSystemFlagKey, v)\n\treturn v, ctx\n}\n\nfunc (flag *HostSystemFlag) Register(ctx context.Context, f *flag.FlagSet) {\n\tflag.RegisterOnce(func() {\n\t\tflag.ClientFlag.Register(ctx, f)\n\t\tflag.DatacenterFlag.Register(ctx, f)\n\t\tflag.SearchFlag.Register(ctx, f)\n\n\t\tenv := \"GOVC_HOST\"\n\t\tvalue := os.Getenv(env)\n\t\tusage := fmt.Sprintf(\"Host system [%s]\", env)\n\t\tf.StringVar(&flag.name, \"host\", value, usage)\n\t})\n}\n\nfunc (flag *HostSystemFlag) Process(ctx context.Context) error {\n\treturn flag.ProcessOnce(func() error {\n\t\tif err := flag.ClientFlag.Process(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := flag.DatacenterFlag.Process(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := flag.SearchFlag.Process(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc (flag *HostSystemFlag) HostSystemIfSpecified() (*object.HostSystem, error) {\n\tif flag.host != nil {\n\t\treturn flag.host, nil\n\t}\n\n\t// Use search flags if specified.\n\tif flag.SearchFlag.IsSet() {\n\t\thost, err := flag.SearchFlag.HostSystem()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tflag.host = host\n\t\treturn flag.host, nil\n\t}\n\n\t// Never look for a default host system.\n\t// A host system parameter is optional for vm creation. It uses a mandatory\n\t// resource pool parameter to determine where the vm should be placed.\n\tif flag.name == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tfinder, err := flag.Finder()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tflag.host, err = finder.HostSystem(context.TODO(), flag.name)\n\treturn flag.host, err\n}\n\nfunc (flag *HostSystemFlag) HostSystem() (*object.HostSystem, error) {\n\thost, err := flag.HostSystemIfSpecified()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif host != nil {\n\t\treturn host, nil\n\t}\n\n\tfinder, err := flag.Finder()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tflag.host, err = finder.DefaultHostSystem(context.TODO())\n\treturn flag.host, err\n}\n\nfunc (flag *HostSystemFlag) HostNetworkSystem() (*object.HostNetworkSystem, error) {\n\thost, err := flag.HostSystem()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn host.ConfigManager().NetworkSystem(context.TODO())\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/int32.go",
    "content": "/*\nCopyright (c) 2016-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage flags\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"strconv\"\n)\n\n// This flag type is internal to stdlib:\n// https://github.com/golang/go/blob/master/src/cmd/internal/obj/flag.go\ntype int32Value int32\n\nfunc (i *int32Value) Set(s string) error {\n\tv, err := strconv.ParseInt(s, 0, 32)\n\t*i = int32Value(v)\n\treturn err\n}\n\nfunc (i *int32Value) Get() interface{} {\n\treturn int32(*i)\n}\n\nfunc (i *int32Value) String() string {\n\treturn fmt.Sprintf(\"%v\", *i)\n}\n\n// NewInt32 behaves as flag.IntVar, but using an int32 type.\nfunc NewInt32(v *int32) flag.Value {\n\treturn (*int32Value)(v)\n}\n\ntype int32ptrValue struct {\n\tval **int32\n}\n\nfunc (i *int32ptrValue) Set(s string) error {\n\tv, err := strconv.ParseInt(s, 0, 32)\n\t*i.val = new(int32)\n\t**i.val = int32(v)\n\treturn err\n}\n\nfunc (i *int32ptrValue) Get() interface{} {\n\tif i.val == nil {\n\t\treturn -1\n\t}\n\treturn *i.val\n}\n\nfunc (i *int32ptrValue) String() string {\n\treturn fmt.Sprintf(\"%v\", i.Get())\n}\n\nfunc NewOptionalInt32(v **int32) flag.Value {\n\treturn &int32ptrValue{val: v}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/network.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage flags\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype NetworkFlag struct {\n\tcommon\n\n\t*DatacenterFlag\n\n\tname    string\n\tnet     object.NetworkReference\n\tadapter string\n\taddress string\n\tisset   bool\n}\n\nvar networkFlagKey = flagKey(\"network\")\n\nfunc NewNetworkFlag(ctx context.Context) (*NetworkFlag, context.Context) {\n\tif v := ctx.Value(networkFlagKey); v != nil {\n\t\treturn v.(*NetworkFlag), ctx\n\t}\n\n\tv := &NetworkFlag{}\n\tv.DatacenterFlag, ctx = NewDatacenterFlag(ctx)\n\tctx = context.WithValue(ctx, networkFlagKey, v)\n\treturn v, ctx\n}\n\nfunc (flag *NetworkFlag) Register(ctx context.Context, f *flag.FlagSet) {\n\tflag.RegisterOnce(func() {\n\t\tflag.DatacenterFlag.Register(ctx, f)\n\n\t\tenv := \"GOVC_NETWORK\"\n\t\tvalue := os.Getenv(env)\n\t\tflag.name = value\n\t\tusage := fmt.Sprintf(\"Network [%s]\", env)\n\t\tf.Var(flag, \"net\", usage)\n\t\tf.StringVar(&flag.adapter, \"net.adapter\", \"e1000\", \"Network adapter type\")\n\t\tf.StringVar(&flag.address, \"net.address\", \"\", \"Network hardware address\")\n\t})\n}\n\nfunc (flag *NetworkFlag) Process(ctx context.Context) error {\n\treturn flag.ProcessOnce(func() error {\n\t\tif err := flag.DatacenterFlag.Process(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc (flag *NetworkFlag) String() string {\n\treturn flag.name\n}\n\nfunc (flag *NetworkFlag) Set(name string) error {\n\tflag.name = name\n\tflag.isset = true\n\treturn nil\n}\n\nfunc (flag *NetworkFlag) IsSet() bool {\n\treturn flag.isset\n}\n\nfunc (flag *NetworkFlag) Network() (object.NetworkReference, error) {\n\tif flag.net != nil {\n\t\treturn flag.net, nil\n\t}\n\n\tfinder, err := flag.Finder()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif flag.net, err = finder.NetworkOrDefault(context.TODO(), flag.name); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn flag.net, nil\n}\n\nfunc (flag *NetworkFlag) Device() (types.BaseVirtualDevice, error) {\n\tnet, err := flag.Network()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbacking, err := net.EthernetCardBackingInfo(context.TODO())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdevice, err := object.EthernetCardTypes().CreateEthernetCard(flag.adapter, backing)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif flag.address != \"\" {\n\t\tcard := device.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard()\n\t\tcard.AddressType = string(types.VirtualEthernetCardMacTypeManual)\n\t\tcard.MacAddress = flag.address\n\t}\n\n\treturn device, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/optional_bool.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage flags\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"strconv\"\n)\n\ntype optionalBool struct {\n\tval **bool\n}\n\nfunc (b *optionalBool) Set(s string) error {\n\tv, err := strconv.ParseBool(s)\n\t*b.val = &v\n\treturn err\n}\n\nfunc (b *optionalBool) Get() interface{} {\n\tif *b.val == nil {\n\t\treturn nil\n\t}\n\treturn **b.val\n}\n\nfunc (b *optionalBool) String() string {\n\tif b.val == nil || *b.val == nil {\n\t\treturn \"<nil>\"\n\t}\n\treturn fmt.Sprintf(\"%v\", **b.val)\n}\n\nfunc (b *optionalBool) IsBoolFlag() bool { return true }\n\n// NewOptionalBool returns a flag.Value implementation where there is no default value.\n// This avoids sending a default value over the wire as using flag.BoolVar() would.\nfunc NewOptionalBool(v **bool) flag.Value {\n\treturn &optionalBool{v}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/optional_bool_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage flags\n\nimport (\n\t\"flag\"\n\t\"testing\"\n)\n\nfunc TestOptionalBool(t *testing.T) {\n\tfs := flag.NewFlagSet(\"\", flag.ContinueOnError)\n\tvar val *bool\n\n\tfs.Var(NewOptionalBool(&val), \"obool\", \"optional bool\")\n\n\tb := fs.Lookup(\"obool\")\n\n\tif b.DefValue != \"<nil>\" {\n\t\tt.Fail()\n\t}\n\n\tif b.Value.String() != \"<nil>\" {\n\t\tt.Fail()\n\t}\n\n\tif b.Value.(flag.Getter).Get() != nil {\n\t\tt.Fail()\n\t}\n\n\tb.Value.Set(\"true\")\n\n\tif b.Value.String() != \"true\" {\n\t\tt.Fail()\n\t}\n\n\tif b.Value.(flag.Getter).Get() != true {\n\t\tt.Fail()\n\t}\n\n\tif val == nil || *val != true {\n\t\tt.Fail()\n\t}\n\n\tb.Value.Set(\"false\")\n\n\tif b.Value.String() != \"false\" {\n\t\tt.Fail()\n\t}\n\n\tif b.Value.(flag.Getter).Get() != false {\n\t\tt.Fail()\n\t}\n\n\tif val == nil || *val != false {\n\t\tt.Fail()\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/output.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage flags\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/vim25/progress\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n)\n\ntype OutputWriter interface {\n\tWrite(io.Writer) error\n}\n\ntype OutputFlag struct {\n\tcommon\n\n\tJSON bool\n\tTTY  bool\n\tDump bool\n\tOut  io.Writer\n}\n\nvar outputFlagKey = flagKey(\"output\")\n\nfunc NewOutputFlag(ctx context.Context) (*OutputFlag, context.Context) {\n\tif v := ctx.Value(outputFlagKey); v != nil {\n\t\treturn v.(*OutputFlag), ctx\n\t}\n\n\tv := &OutputFlag{Out: os.Stdout}\n\tctx = context.WithValue(ctx, outputFlagKey, v)\n\treturn v, ctx\n}\n\nfunc (flag *OutputFlag) Register(ctx context.Context, f *flag.FlagSet) {\n\tflag.RegisterOnce(func() {\n\t\tf.BoolVar(&flag.JSON, \"json\", false, \"Enable JSON output\")\n\t\tf.BoolVar(&flag.Dump, \"dump\", false, \"Enable output dump\")\n\t})\n}\n\nfunc (flag *OutputFlag) Process(ctx context.Context) error {\n\treturn flag.ProcessOnce(func() error {\n\t\tif !flag.JSON {\n\t\t\t// Assume we have a tty if not outputting JSON\n\t\t\tflag.TTY = true\n\t\t}\n\n\t\treturn nil\n\t})\n}\n\n// Log outputs the specified string, prefixed with the current time.\n// A newline is not automatically added. If the specified string\n// starts with a '\\r', the current line is cleared first.\nfunc (flag *OutputFlag) Log(s string) (int, error) {\n\tif len(s) > 0 && s[0] == '\\r' {\n\t\tflag.Write([]byte{'\\r', 033, '[', 'K'})\n\t\ts = s[1:]\n\t}\n\n\treturn flag.WriteString(time.Now().Format(\"[02-01-06 15:04:05] \") + s)\n}\n\nfunc (flag *OutputFlag) Write(b []byte) (int, error) {\n\tif !flag.TTY {\n\t\treturn 0, nil\n\t}\n\n\tn, err := os.Stdout.Write(b)\n\tos.Stdout.Sync()\n\treturn n, err\n}\n\nfunc (flag *OutputFlag) WriteString(s string) (int, error) {\n\treturn flag.Write([]byte(s))\n}\n\nfunc (flag *OutputFlag) WriteResult(result OutputWriter) error {\n\tvar err error\n\n\tif flag.JSON {\n\t\terr = json.NewEncoder(flag.Out).Encode(result)\n\t} else if flag.Dump {\n\t\tscs := spew.ConfigState{Indent: \"    \"}\n\t\tscs.Fdump(flag.Out, result)\n\t} else {\n\t\terr = result.Write(flag.Out)\n\t}\n\n\treturn err\n}\n\ntype progressLogger struct {\n\tflag   *OutputFlag\n\tprefix string\n\n\twg sync.WaitGroup\n\n\tsink chan chan progress.Report\n\tdone chan struct{}\n}\n\nfunc newProgressLogger(flag *OutputFlag, prefix string) *progressLogger {\n\tp := &progressLogger{\n\t\tflag:   flag,\n\t\tprefix: prefix,\n\n\t\tsink: make(chan chan progress.Report),\n\t\tdone: make(chan struct{}),\n\t}\n\n\tp.wg.Add(1)\n\n\tgo p.loopA()\n\n\treturn p\n}\n\n// loopA runs before Sink() has been called.\nfunc (p *progressLogger) loopA() {\n\tvar err error\n\n\tdefer p.wg.Done()\n\n\ttick := time.NewTicker(100 * time.Millisecond)\n\tdefer tick.Stop()\n\n\tfor stop := false; !stop; {\n\t\tselect {\n\t\tcase ch := <-p.sink:\n\t\t\terr = p.loopB(tick, ch)\n\t\t\tstop = true\n\t\tcase <-p.done:\n\t\t\tstop = true\n\t\tcase <-tick.C:\n\t\t\tline := fmt.Sprintf(\"\\r%s\", p.prefix)\n\t\t\tp.flag.Log(line)\n\t\t}\n\t}\n\n\tif err != nil && err != io.EOF {\n\t\tp.flag.Log(fmt.Sprintf(\"\\r%sError: %s\\n\", p.prefix, err))\n\t} else {\n\t\tp.flag.Log(fmt.Sprintf(\"\\r%sOK\\n\", p.prefix))\n\t}\n}\n\n// loopA runs after Sink() has been called.\nfunc (p *progressLogger) loopB(tick *time.Ticker, ch <-chan progress.Report) error {\n\tvar r progress.Report\n\tvar ok bool\n\tvar err error\n\n\tfor ok = true; ok; {\n\t\tselect {\n\t\tcase r, ok = <-ch:\n\t\t\tif !ok {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\terr = r.Error()\n\t\tcase <-tick.C:\n\t\t\tline := fmt.Sprintf(\"\\r%s\", p.prefix)\n\t\t\tif r != nil {\n\t\t\t\tline += fmt.Sprintf(\"(%.0f%%\", r.Percentage())\n\t\t\t\tdetail := r.Detail()\n\t\t\t\tif detail != \"\" {\n\t\t\t\t\tline += fmt.Sprintf(\", %s\", detail)\n\t\t\t\t}\n\t\t\t\tline += \")\"\n\t\t\t}\n\t\t\tp.flag.Log(line)\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc (p *progressLogger) Sink() chan<- progress.Report {\n\tch := make(chan progress.Report)\n\tp.sink <- ch\n\treturn ch\n}\n\nfunc (p *progressLogger) Wait() {\n\tclose(p.done)\n\tp.wg.Wait()\n}\n\nfunc (flag *OutputFlag) ProgressLogger(prefix string) *progressLogger {\n\treturn newProgressLogger(flag, prefix)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/resource_pool.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage flags\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype ResourcePoolFlag struct {\n\tcommon\n\n\t*DatacenterFlag\n\n\tname string\n\tpool *object.ResourcePool\n}\n\nvar resourcePoolFlagKey = flagKey(\"resourcePool\")\n\nfunc NewResourcePoolFlag(ctx context.Context) (*ResourcePoolFlag, context.Context) {\n\tif v := ctx.Value(resourcePoolFlagKey); v != nil {\n\t\treturn v.(*ResourcePoolFlag), ctx\n\t}\n\n\tv := &ResourcePoolFlag{}\n\tv.DatacenterFlag, ctx = NewDatacenterFlag(ctx)\n\tctx = context.WithValue(ctx, resourcePoolFlagKey, v)\n\treturn v, ctx\n}\n\nfunc (flag *ResourcePoolFlag) Register(ctx context.Context, f *flag.FlagSet) {\n\tflag.RegisterOnce(func() {\n\t\tflag.DatacenterFlag.Register(ctx, f)\n\n\t\tenv := \"GOVC_RESOURCE_POOL\"\n\t\tvalue := os.Getenv(env)\n\t\tusage := fmt.Sprintf(\"Resource pool [%s]\", env)\n\t\tf.StringVar(&flag.name, \"pool\", value, usage)\n\t})\n}\n\nfunc (flag *ResourcePoolFlag) Process(ctx context.Context) error {\n\treturn flag.ProcessOnce(func() error {\n\t\tif err := flag.DatacenterFlag.Process(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc (flag *ResourcePoolFlag) ResourcePool() (*object.ResourcePool, error) {\n\tif flag.pool != nil {\n\t\treturn flag.pool, nil\n\t}\n\n\tfinder, err := flag.Finder()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif flag.pool, err = finder.ResourcePoolOrDefault(context.TODO(), flag.name); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn flag.pool, nil\n}\n\nfunc (flag *ResourcePoolFlag) ResourcePoolIfSpecified() (*object.ResourcePool, error) {\n\tif flag.name == \"\" {\n\t\treturn nil, nil\n\t}\n\treturn flag.ResourcePool()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/search.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage flags\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/find\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\nconst (\n\tSearchVirtualMachines = iota + 1\n\tSearchHosts\n\tSearchVirtualApps\n)\n\ntype SearchFlag struct {\n\tcommon\n\n\t*ClientFlag\n\t*DatacenterFlag\n\n\tt      int\n\tentity string\n\n\tbyDatastorePath string\n\tbyDNSName       string\n\tbyInventoryPath string\n\tbyIP            string\n\tbyUUID          string\n\n\tisset bool\n}\n\nvar searchFlagKey = flagKey(\"search\")\n\nfunc NewSearchFlag(ctx context.Context, t int) (*SearchFlag, context.Context) {\n\tif v := ctx.Value(searchFlagKey); v != nil {\n\t\treturn v.(*SearchFlag), ctx\n\t}\n\n\tv := &SearchFlag{\n\t\tt: t,\n\t}\n\n\tv.ClientFlag, ctx = NewClientFlag(ctx)\n\tv.DatacenterFlag, ctx = NewDatacenterFlag(ctx)\n\n\tswitch t {\n\tcase SearchVirtualMachines:\n\t\tv.entity = \"VM\"\n\tcase SearchHosts:\n\t\tv.entity = \"host\"\n\tcase SearchVirtualApps:\n\t\tv.entity = \"vapp\"\n\tdefault:\n\t\tpanic(\"invalid search type\")\n\t}\n\n\tctx = context.WithValue(ctx, searchFlagKey, v)\n\treturn v, ctx\n}\n\nfunc (flag *SearchFlag) Register(ctx context.Context, fs *flag.FlagSet) {\n\tflag.RegisterOnce(func() {\n\t\tflag.ClientFlag.Register(ctx, fs)\n\t\tflag.DatacenterFlag.Register(ctx, fs)\n\n\t\tregister := func(v *string, f string, d string) {\n\t\t\tf = fmt.Sprintf(\"%s.%s\", strings.ToLower(flag.entity), f)\n\t\t\td = fmt.Sprintf(d, flag.entity)\n\t\t\tfs.StringVar(v, f, \"\", d)\n\t\t}\n\n\t\tswitch flag.t {\n\t\tcase SearchVirtualMachines:\n\t\t\tregister(&flag.byDatastorePath, \"path\", \"Find %s by path to .vmx file\")\n\t\t}\n\n\t\tswitch flag.t {\n\t\tcase SearchVirtualMachines, SearchHosts:\n\t\t\tregister(&flag.byDNSName, \"dns\", \"Find %s by FQDN\")\n\t\t\tregister(&flag.byIP, \"ip\", \"Find %s by IP address\")\n\t\t\tregister(&flag.byUUID, \"uuid\", \"Find %s by UUID\")\n\t\t}\n\n\t\tregister(&flag.byInventoryPath, \"ipath\", \"Find %s by inventory path\")\n\t})\n}\n\nfunc (flag *SearchFlag) Process(ctx context.Context) error {\n\treturn flag.ProcessOnce(func() error {\n\t\tif err := flag.ClientFlag.Process(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := flag.DatacenterFlag.Process(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tflags := []string{\n\t\t\tflag.byDatastorePath,\n\t\t\tflag.byDNSName,\n\t\t\tflag.byInventoryPath,\n\t\t\tflag.byIP,\n\t\t\tflag.byUUID,\n\t\t}\n\n\t\tflag.isset = false\n\t\tfor _, f := range flags {\n\t\t\tif f != \"\" {\n\t\t\t\tif flag.isset {\n\t\t\t\t\treturn errors.New(\"cannot use more than one search flag\")\n\t\t\t\t}\n\t\t\t\tflag.isset = true\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n}\n\nfunc (flag *SearchFlag) IsSet() bool {\n\treturn flag.isset\n}\n\nfunc (flag *SearchFlag) searchIndex(c *vim25.Client) *object.SearchIndex {\n\treturn object.NewSearchIndex(c)\n}\n\nfunc (flag *SearchFlag) searchByDatastorePath(c *vim25.Client, dc *object.Datacenter) (object.Reference, error) {\n\tctx := context.TODO()\n\tswitch flag.t {\n\tcase SearchVirtualMachines:\n\t\treturn flag.searchIndex(c).FindByDatastorePath(ctx, dc, flag.byDatastorePath)\n\tdefault:\n\t\tpanic(\"unsupported type\")\n\t}\n}\n\nfunc (flag *SearchFlag) searchByDNSName(c *vim25.Client, dc *object.Datacenter) (object.Reference, error) {\n\tctx := context.TODO()\n\tswitch flag.t {\n\tcase SearchVirtualMachines:\n\t\treturn flag.searchIndex(c).FindByDnsName(ctx, dc, flag.byDNSName, true)\n\tcase SearchHosts:\n\t\treturn flag.searchIndex(c).FindByDnsName(ctx, dc, flag.byDNSName, false)\n\tdefault:\n\t\tpanic(\"unsupported type\")\n\t}\n}\n\nfunc (flag *SearchFlag) searchByInventoryPath(c *vim25.Client, dc *object.Datacenter) (object.Reference, error) {\n\t// TODO(PN): The datacenter flag should not be set because it is ignored.\n\tctx := context.TODO()\n\treturn flag.searchIndex(c).FindByInventoryPath(ctx, flag.byInventoryPath)\n}\n\nfunc (flag *SearchFlag) searchByIP(c *vim25.Client, dc *object.Datacenter) (object.Reference, error) {\n\tctx := context.TODO()\n\tswitch flag.t {\n\tcase SearchVirtualMachines:\n\t\treturn flag.searchIndex(c).FindByIp(ctx, dc, flag.byIP, true)\n\tcase SearchHosts:\n\t\treturn flag.searchIndex(c).FindByIp(ctx, dc, flag.byIP, false)\n\tdefault:\n\t\tpanic(\"unsupported type\")\n\t}\n}\n\nfunc (flag *SearchFlag) searchByUUID(c *vim25.Client, dc *object.Datacenter) (object.Reference, error) {\n\tctx := context.TODO()\n\tisVM := false\n\tswitch flag.t {\n\tcase SearchVirtualMachines:\n\t\tisVM = true\n\tcase SearchHosts:\n\tdefault:\n\t\tpanic(\"unsupported type\")\n\t}\n\n\tvar ref object.Reference\n\tvar err error\n\n\tfor _, iu := range []*bool{nil, types.NewBool(true)} {\n\t\tref, err = flag.searchIndex(c).FindByUuid(ctx, dc, flag.byUUID, isVM, iu)\n\t\tif err != nil {\n\t\t\tif soap.IsSoapFault(err) {\n\t\t\t\tfault := soap.ToSoapFault(err).VimFault()\n\t\t\t\tif _, ok := fault.(types.InvalidArgument); ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\t\tif ref != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn ref, nil\n}\n\nfunc (flag *SearchFlag) search() (object.Reference, error) {\n\tctx := context.TODO()\n\tvar ref object.Reference\n\tvar err error\n\n\tc, err := flag.Client()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdc, err := flag.Datacenter()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch {\n\tcase flag.byDatastorePath != \"\":\n\t\tref, err = flag.searchByDatastorePath(c, dc)\n\tcase flag.byDNSName != \"\":\n\t\tref, err = flag.searchByDNSName(c, dc)\n\tcase flag.byInventoryPath != \"\":\n\t\tref, err = flag.searchByInventoryPath(c, dc)\n\tcase flag.byIP != \"\":\n\t\tref, err = flag.searchByIP(c, dc)\n\tcase flag.byUUID != \"\":\n\t\tref, err = flag.searchByUUID(c, dc)\n\tdefault:\n\t\terr = errors.New(\"no search flag specified\")\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif ref == nil {\n\t\treturn nil, fmt.Errorf(\"no such %s\", flag.entity)\n\t}\n\n\t// set the InventoryPath field\n\tfinder, err := flag.Finder()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tref, err = finder.ObjectReference(ctx, ref.Reference())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ref, nil\n}\n\nfunc (flag *SearchFlag) VirtualMachine() (*object.VirtualMachine, error) {\n\tref, err := flag.search()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvm, ok := ref.(*object.VirtualMachine)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"expected VirtualMachine entity, got %s\", ref.Reference().Type)\n\t}\n\n\treturn vm, nil\n}\n\nfunc (flag *SearchFlag) VirtualMachines(args []string) ([]*object.VirtualMachine, error) {\n\tctx := context.TODO()\n\tvar out []*object.VirtualMachine\n\n\tif flag.IsSet() {\n\t\tvm, err := flag.VirtualMachine()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tout = append(out, vm)\n\t\treturn out, nil\n\t}\n\n\t// List virtual machines\n\tif len(args) == 0 {\n\t\treturn nil, errors.New(\"no argument\")\n\t}\n\n\tfinder, err := flag.Finder()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar nfe error\n\n\t// List virtual machines for every argument\n\tfor _, arg := range args {\n\t\tvms, err := finder.VirtualMachineList(ctx, arg)\n\t\tif err != nil {\n\t\t\tif _, ok := err.(*find.NotFoundError); ok {\n\t\t\t\t// Let caller decide how to handle NotFoundError\n\t\t\t\tnfe = err\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\n\t\tout = append(out, vms...)\n\t}\n\n\treturn out, nfe\n}\n\nfunc (flag *SearchFlag) VirtualApp() (*object.VirtualApp, error) {\n\tref, err := flag.search()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tapp, ok := ref.(*object.VirtualApp)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"expected VirtualApp entity, got %s\", ref.Reference().Type)\n\t}\n\n\treturn app, nil\n}\n\nfunc (flag *SearchFlag) VirtualApps(args []string) ([]*object.VirtualApp, error) {\n\tctx := context.TODO()\n\tvar out []*object.VirtualApp\n\n\tif flag.IsSet() {\n\t\tapp, err := flag.VirtualApp()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tout = append(out, app)\n\t\treturn out, nil\n\t}\n\n\t// List virtual apps\n\tif len(args) == 0 {\n\t\treturn nil, errors.New(\"no argument\")\n\t}\n\n\tfinder, err := flag.Finder()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// List virtual apps for every argument\n\tfor _, arg := range args {\n\t\tapps, err := finder.VirtualAppList(ctx, arg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tout = append(out, apps...)\n\t}\n\n\treturn out, nil\n}\n\nfunc (flag *SearchFlag) HostSystem() (*object.HostSystem, error) {\n\tref, err := flag.search()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thost, ok := ref.(*object.HostSystem)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"expected HostSystem entity, got %s\", ref.Reference().Type)\n\t}\n\n\treturn host, nil\n}\n\nfunc (flag *SearchFlag) HostSystems(args []string) ([]*object.HostSystem, error) {\n\tctx := context.TODO()\n\tvar out []*object.HostSystem\n\n\tif flag.IsSet() {\n\t\thost, err := flag.HostSystem()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tout = append(out, host)\n\t\treturn out, nil\n\t}\n\n\t// List host system\n\tif len(args) == 0 {\n\t\treturn nil, errors.New(\"no argument\")\n\t}\n\n\tfinder, err := flag.Finder()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// List host systems for every argument\n\tfor _, arg := range args {\n\t\tvms, err := finder.HostSystemList(ctx, arg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tout = append(out, vms...)\n\t}\n\n\treturn out, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/storage_pod.go",
    "content": "package flags\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype StoragePodFlag struct {\n\tcommon\n\n\t*DatacenterFlag\n\n\tName string\n\n\tsp *object.StoragePod\n}\n\nvar storagePodFlagKey = flagKey(\"storagePod\")\n\nfunc NewStoragePodFlag(ctx context.Context) (*StoragePodFlag, context.Context) {\n\tif v := ctx.Value(storagePodFlagKey); v != nil {\n\t\treturn v.(*StoragePodFlag), ctx\n\t}\n\n\tv := &StoragePodFlag{}\n\tv.DatacenterFlag, ctx = NewDatacenterFlag(ctx)\n\tctx = context.WithValue(ctx, storagePodFlagKey, v)\n\treturn v, ctx\n}\n\nfunc (f *StoragePodFlag) Register(ctx context.Context, fs *flag.FlagSet) {\n\tf.RegisterOnce(func() {\n\t\tf.DatacenterFlag.Register(ctx, fs)\n\n\t\tenv := \"GOVC_DATASTORE_CLUSTER\"\n\t\tvalue := os.Getenv(env)\n\t\tusage := fmt.Sprintf(\"Datastore cluster [%s]\", env)\n\t\tfs.StringVar(&f.Name, \"datastore-cluster\", value, usage)\n\t})\n}\n\nfunc (f *StoragePodFlag) Process(ctx context.Context) error {\n\treturn f.DatacenterFlag.Process(ctx)\n}\n\nfunc (f *StoragePodFlag) Isset() bool {\n\treturn f.Name != \"\"\n}\n\nfunc (f *StoragePodFlag) StoragePod() (*object.StoragePod, error) {\n\tctx := context.TODO()\n\tif f.sp != nil {\n\t\treturn f.sp, nil\n\t}\n\n\tfinder, err := f.Finder()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif f.Isset() {\n\t\tf.sp, err = finder.DatastoreCluster(ctx, f.Name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tf.sp, err = finder.DefaultDatastoreCluster(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn f.sp, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/version.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage flags\n\nimport (\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst Version = \"0.15.0\"\n\ntype version []int\n\nfunc ParseVersion(s string) (version, error) {\n\tv := make(version, 0)\n\tps := strings.Split(s, \".\")\n\tfor _, p := range ps {\n\t\ti, err := strconv.Atoi(p)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tv = append(v, i)\n\t}\n\n\treturn v, nil\n}\n\nfunc (v version) Lte(u version) bool {\n\tlv := len(v)\n\tlu := len(u)\n\n\tfor i := 0; i < lv; i++ {\n\t\t// Everything up to here has been equal and v has more elements than u.\n\t\tif i >= lu {\n\t\t\treturn false\n\t\t}\n\n\t\t// Move to next digit if equal.\n\t\tif v[i] == u[i] {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn v[i] < u[i]\n\t}\n\n\t// Equal.\n\treturn true\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/version_test.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage flags\n\nimport \"testing\"\n\nfunc TestParseVersion(t *testing.T) {\n\tvar v version\n\tvar err error\n\n\tv, err = ParseVersion(\"5.5.5.5\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif len(v) != 4 {\n\t\tt.Errorf(\"Expected %d elements, got %d\", 4, len(v))\n\t}\n\n\tfor i := 0; i < len(v); i++ {\n\t\tif v[i] != 5 {\n\t\t\tt.Errorf(\"Expected %d, got %d\", 5, v[i])\n\t\t}\n\t}\n}\n\nfunc TestLte(t *testing.T) {\n\tv1, err := ParseVersion(\"5.5\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tv2, err := ParseVersion(\"5.6\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif !v1.Lte(v1) {\n\t\tt.Errorf(\"Expected 5.5 <= 5.5\")\n\t}\n\n\tif !v1.Lte(v2) {\n\t\tt.Errorf(\"Expected 5.5 <= 5.6\")\n\t}\n\n\tif v2.Lte(v1) {\n\t\tt.Errorf(\"Expected not 5.6 <= 5.5\")\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/virtual_app.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage flags\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype VirtualAppFlag struct {\n\tcommon\n\n\t*DatacenterFlag\n\t*SearchFlag\n\n\tname string\n\tapp  *object.VirtualApp\n}\n\nvar virtualAppFlagKey = flagKey(\"virtualApp\")\n\nfunc NewVirtualAppFlag(ctx context.Context) (*VirtualAppFlag, context.Context) {\n\tif v := ctx.Value(virtualAppFlagKey); v != nil {\n\t\treturn v.(*VirtualAppFlag), ctx\n\t}\n\n\tv := &VirtualAppFlag{}\n\tv.DatacenterFlag, ctx = NewDatacenterFlag(ctx)\n\tv.SearchFlag, ctx = NewSearchFlag(ctx, SearchVirtualApps)\n\tctx = context.WithValue(ctx, virtualAppFlagKey, v)\n\treturn v, ctx\n}\n\nfunc (flag *VirtualAppFlag) Register(ctx context.Context, f *flag.FlagSet) {\n\tflag.RegisterOnce(func() {\n\t\tflag.DatacenterFlag.Register(ctx, f)\n\t\tflag.SearchFlag.Register(ctx, f)\n\n\t\tenv := \"GOVC_VAPP\"\n\t\tvalue := os.Getenv(env)\n\t\tusage := fmt.Sprintf(\"Virtual App [%s]\", env)\n\t\tf.StringVar(&flag.name, \"vapp\", value, usage)\n\t})\n}\n\nfunc (flag *VirtualAppFlag) Process(ctx context.Context) error {\n\treturn flag.ProcessOnce(func() error {\n\t\tif err := flag.DatacenterFlag.Process(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := flag.SearchFlag.Process(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc (flag *VirtualAppFlag) VirtualApp() (*object.VirtualApp, error) {\n\tctx := context.TODO()\n\n\tif flag.app != nil {\n\t\treturn flag.app, nil\n\t}\n\n\t// Use search flags if specified.\n\tif flag.SearchFlag.IsSet() {\n\t\tapp, err := flag.SearchFlag.VirtualApp()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tflag.app = app\n\t\treturn flag.app, nil\n\t}\n\n\t// Never look for a default virtual app.\n\tif flag.name == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tfinder, err := flag.Finder()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tflag.app, err = finder.VirtualApp(ctx, flag.name)\n\treturn flag.app, err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/flags/virtual_machine.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage flags\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype VirtualMachineFlag struct {\n\tcommon\n\n\t*ClientFlag\n\t*DatacenterFlag\n\t*SearchFlag\n\n\tname string\n\tvm   *object.VirtualMachine\n}\n\nvar virtualMachineFlagKey = flagKey(\"virtualMachine\")\n\nfunc NewVirtualMachineFlag(ctx context.Context) (*VirtualMachineFlag, context.Context) {\n\tif v := ctx.Value(virtualMachineFlagKey); v != nil {\n\t\treturn v.(*VirtualMachineFlag), ctx\n\t}\n\n\tv := &VirtualMachineFlag{}\n\tv.ClientFlag, ctx = NewClientFlag(ctx)\n\tv.DatacenterFlag, ctx = NewDatacenterFlag(ctx)\n\tv.SearchFlag, ctx = NewSearchFlag(ctx, SearchVirtualMachines)\n\tctx = context.WithValue(ctx, virtualMachineFlagKey, v)\n\treturn v, ctx\n}\n\nfunc (flag *VirtualMachineFlag) Register(ctx context.Context, f *flag.FlagSet) {\n\tflag.RegisterOnce(func() {\n\t\tflag.ClientFlag.Register(ctx, f)\n\t\tflag.DatacenterFlag.Register(ctx, f)\n\t\tflag.SearchFlag.Register(ctx, f)\n\n\t\tenv := \"GOVC_VM\"\n\t\tvalue := os.Getenv(env)\n\t\tusage := fmt.Sprintf(\"Virtual machine [%s]\", env)\n\t\tf.StringVar(&flag.name, \"vm\", value, usage)\n\t})\n}\n\nfunc (flag *VirtualMachineFlag) Process(ctx context.Context) error {\n\treturn flag.ProcessOnce(func() error {\n\t\tif err := flag.ClientFlag.Process(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := flag.DatacenterFlag.Process(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := flag.SearchFlag.Process(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc (flag *VirtualMachineFlag) VirtualMachine() (*object.VirtualMachine, error) {\n\tctx := context.TODO()\n\n\tif flag.vm != nil {\n\t\treturn flag.vm, nil\n\t}\n\n\t// Use search flags if specified.\n\tif flag.SearchFlag.IsSet() {\n\t\tvm, err := flag.SearchFlag.VirtualMachine()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tflag.vm = vm\n\t\treturn flag.vm, nil\n\t}\n\n\t// Never look for a default virtual machine.\n\tif flag.name == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tfinder, err := flag.Finder()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tflag.vm, err = finder.VirtualMachine(ctx, flag.name)\n\treturn flag.vm, err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/folder/create.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage folder\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"path\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype create struct {\n\t*flags.DatacenterFlag\n\n\tpod bool\n}\n\nfunc init() {\n\tcli.Register(\"folder.create\", &create{})\n}\n\nfunc (cmd *create) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.pod, \"pod\", false, \"Create folder(s) of type StoragePod (DatastoreCluster)\")\n}\n\nfunc (cmd *create) Usage() string {\n\treturn \"PATH...\"\n}\n\nfunc (cmd *create) Description() string {\n\treturn `Create folder with PATH.\n\nExamples:\n  govc folder.create /dc1/vm/folder-foo\n  govc object.mv /dc1/vm/vm-foo-* /dc1/vm/folder-foo\n  govc folder.create -pod /dc1/datastore/sdrs\n  govc object.mv /dc1/datastore/iscsi-* /dc1/datastore/sdrs`\n}\n\nfunc (cmd *create) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error {\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, arg := range f.Args() {\n\t\tdir := path.Dir(arg)\n\t\tname := path.Base(arg)\n\n\t\tif dir == \"\" {\n\t\t\tdir = \"/\"\n\t\t}\n\n\t\tfolder, err := finder.Folder(ctx, dir)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar create func() error\n\t\tif cmd.pod {\n\t\t\tcreate = func() error {\n\t\t\t\t_, err = folder.CreateStoragePod(ctx, name)\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tcreate = func() error {\n\t\t\t\t_, err = folder.CreateFolder(ctx, name)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\terr = create()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/folder/info.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage folder\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype info struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n\t*flags.DatacenterFlag\n}\n\nfunc init() {\n\tcli.Register(\"folder.info\", &info{})\n}\n\nfunc (cmd *info) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n}\n\nfunc (cmd *info) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *info) Usage() string {\n\treturn \"[PATH]...\"\n}\n\nfunc (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\targs := f.Args()\n\tif len(args) == 0 {\n\t\targs = []string{\"/\"}\n\t}\n\n\tvar props []string\n\tvar res infoResult\n\n\tif !cmd.OutputFlag.JSON {\n\t\tprops = []string{\n\t\t\t\"name\",\n\t\t\t\"childEntity\",\n\t\t\t\"childType\",\n\t\t}\n\t}\n\n\tfor _, arg := range args {\n\t\tobject, err := finder.FolderList(ctx, arg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres.objects = append(res.objects, object...)\n\t}\n\n\tif len(res.objects) != 0 {\n\t\trefs := make([]types.ManagedObjectReference, 0, len(res.objects))\n\t\tfor _, o := range res.objects {\n\t\t\trefs = append(refs, o.Reference())\n\t\t}\n\n\t\tpc := property.DefaultCollector(c)\n\t\terr = pc.Retrieve(ctx, refs, props, &res.Folders)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn cmd.WriteResult(&res)\n}\n\ntype infoResult struct {\n\tFolders []mo.Folder\n\tobjects []*object.Folder\n}\n\nfunc (r *infoResult) Write(w io.Writer) error {\n\t// Maintain order via r.objects as Property collector does not always return results in order.\n\tobjects := make(map[types.ManagedObjectReference]mo.Folder, len(r.Folders))\n\tfor _, o := range r.Folders {\n\t\tobjects[o.Reference()] = o\n\t}\n\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\n\tfor _, o := range r.objects {\n\t\tinfo := objects[o.Reference()]\n\t\tfmt.Fprintf(tw, \"Name:\\t%s\\n\", info.Name)\n\t\tfmt.Fprintf(tw, \"  Path:\\t%s\\n\", o.InventoryPath)\n\t\tfmt.Fprintf(tw, \"  Types:\\t%v\\n\", strings.Join(info.ChildType, \",\"))\n\t\tfmt.Fprintf(tw, \"  Children:\\t%d\\n\", len(info.ChildEntity))\n\t}\n\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/account/account.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage account\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype AccountFlag struct {\n\t*flags.ClientFlag\n\t*flags.DatacenterFlag\n\t*flags.HostSystemFlag\n\n\ttypes.HostAccountSpec\n}\n\nfunc newAccountFlag(ctx context.Context) (*AccountFlag, context.Context) {\n\tf := &AccountFlag{}\n\tf.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tf.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tf.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\treturn f, ctx\n}\n\nfunc (f *AccountFlag) Register(ctx context.Context, fs *flag.FlagSet) {\n\tf.ClientFlag.Register(ctx, fs)\n\tf.DatacenterFlag.Register(ctx, fs)\n\tf.HostSystemFlag.Register(ctx, fs)\n\n\tfs.StringVar(&f.Id, \"id\", \"\", \"The ID of the specified account\")\n\tfs.StringVar(&f.Password, \"password\", \"\", \"The password for the specified account id\")\n\tfs.StringVar(&f.Description, \"description\", \"\", \"The description of the specified account\")\n}\n\nfunc (f *AccountFlag) Process(ctx context.Context) error {\n\tif err := f.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := f.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := f.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (f *AccountFlag) HostAccountManager(ctx context.Context) (*object.HostAccountManager, error) {\n\th, err := f.HostSystem()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn h.ConfigManager().AccountManager(ctx)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/account/create.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage account\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n)\n\ntype create struct {\n\t*AccountFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.account.create\", &create{})\n}\n\nfunc (cmd *create) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.AccountFlag, ctx = newAccountFlag(ctx)\n\tcmd.AccountFlag.Register(ctx, f)\n}\n\nfunc (cmd *create) Description() string {\n\treturn `Create local account on HOST.\n\nExamples:\n  govc host.account.create -id $USER -password password-for-esx60`\n}\n\nfunc (cmd *create) Process(ctx context.Context) error {\n\tif err := cmd.AccountFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error {\n\tm, err := cmd.AccountFlag.HostAccountManager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn m.Create(ctx, &cmd.HostAccountSpec)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/account/remove.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage account\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n)\n\ntype remove struct {\n\t*AccountFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.account.remove\", &remove{})\n}\n\nfunc (cmd *remove) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.AccountFlag, ctx = newAccountFlag(ctx)\n\tcmd.AccountFlag.Register(ctx, f)\n}\n\nfunc (cmd *remove) Description() string {\n\treturn `Remove local account on HOST.\n\nExamples:\n  govc host.account.remove -id $USER`\n}\n\nfunc (cmd *remove) Process(ctx context.Context) error {\n\tif err := cmd.AccountFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *remove) Run(ctx context.Context, f *flag.FlagSet) error {\n\tm, err := cmd.AccountFlag.HostAccountManager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn m.Remove(ctx, cmd.HostAccountSpec.Id)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/account/update.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage account\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n)\n\ntype update struct {\n\t*AccountFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.account.update\", &update{})\n}\n\nfunc (cmd *update) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.AccountFlag, ctx = newAccountFlag(ctx)\n\tcmd.AccountFlag.Register(ctx, f)\n}\n\nfunc (cmd *update) Description() string {\n\treturn `Update local account on HOST.\n\nExamples:\n  govc host.account.update -id root -password password-for-esx60`\n}\n\nfunc (cmd *update) Process(ctx context.Context) error {\n\tif err := cmd.AccountFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *update) Run(ctx context.Context, f *flag.FlagSet) error {\n\tm, err := cmd.AccountFlag.HostAccountManager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn m.Update(ctx, &cmd.HostAccountSpec)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/add.go",
    "content": "/*\nCopyright (c) 2015-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage host\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype add struct {\n\t*flags.FolderFlag\n\t*flags.HostConnectFlag\n\n\tconnect bool\n}\n\nfunc init() {\n\tcli.Register(\"host.add\", &add{})\n}\n\nfunc (cmd *add) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.FolderFlag, ctx = flags.NewFolderFlag(ctx)\n\tcmd.FolderFlag.Register(ctx, f)\n\n\tcmd.HostConnectFlag, ctx = flags.NewHostConnectFlag(ctx)\n\tcmd.HostConnectFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.connect, \"connect\", true, \"Immediately connect to host\")\n}\n\nfunc (cmd *add) Process(ctx context.Context) error {\n\tif err := cmd.FolderFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostConnectFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif cmd.HostName == \"\" {\n\t\treturn flag.ErrHelp\n\t}\n\tif cmd.UserName == \"\" {\n\t\treturn flag.ErrHelp\n\t}\n\tif cmd.Password == \"\" {\n\t\treturn flag.ErrHelp\n\t}\n\treturn nil\n}\n\nfunc (cmd *add) Description() string {\n\treturn `Add host to datacenter.\n\nThe host is added to the folder specified by the 'folder' flag. If not given,\nthis defaults to the host folder in the specified or default datacenter.\n\nExamples:\n  thumbprint=$(govc about.cert -k -u host.example.com -thumbprint | awk '{print $2}')\n  govc host.add -hostname host.example.com -username root -password pass -thumbprint $thumbprint\n  govc host.add -hostname 10.0.6.1 -username root -password pass -noverify`\n}\n\nfunc (cmd *add) Add(ctx context.Context, parent *object.Folder) error {\n\tspec := cmd.Spec(parent.Client())\n\n\treq := types.AddStandaloneHost_Task{\n\t\tThis:         parent.Reference(),\n\t\tSpec:         spec,\n\t\tAddConnected: cmd.connect,\n\t}\n\n\tres, err := methods.AddStandaloneHost_Task(ctx, parent.Client(), &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger := cmd.ProgressLogger(fmt.Sprintf(\"adding %s to folder %s... \", spec.HostName, parent.InventoryPath))\n\tdefer logger.Wait()\n\n\ttask := object.NewTask(parent.Client(), res.Returnval)\n\t_, err = task.WaitForResult(ctx, logger)\n\treturn err\n}\n\nfunc (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tfolder, err := cmd.FolderOrDefault(\"host\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn cmd.Fault(cmd.Add(ctx, folder))\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/autostart/add.go",
    "content": "/*\nCopyright (c) 2015-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage autostart\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype add struct {\n\t*AutostartFlag\n\t// from types.AutoStartPowerInfo\n\tStartOrder       int32\n\tStartDelay       int32\n\tWaitForHeartbeat string\n\tStartAction      string\n\tStopDelay        int32\n\tStopAction       string\n}\n\nfunc init() {\n\tcli.Register(\"host.autostart.add\", &add{})\n}\n\nvar waitHeartbeatTypes = []string{\n\tstring(types.AutoStartWaitHeartbeatSettingSystemDefault),\n\tstring(types.AutoStartWaitHeartbeatSettingYes),\n\tstring(types.AutoStartWaitHeartbeatSettingNo),\n}\n\nfunc (cmd *add) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.AutostartFlag, ctx = newAutostartFlag(ctx)\n\tcmd.AutostartFlag.Register(ctx, f)\n\n\tcmd.StartOrder = -1\n\tcmd.StartDelay = -1\n\tcmd.StopDelay = -1\n\tf.Var(flags.NewInt32(&cmd.StartOrder), \"start-order\", \"Start Order\")\n\tf.Var(flags.NewInt32(&cmd.StartDelay), \"start-delay\", \"Start Delay\")\n\tf.Var(flags.NewInt32(&cmd.StopDelay), \"stop-delay\", \"Stop Delay\")\n\tf.StringVar(&cmd.StartAction, \"start-action\", \"powerOn\", \"Start Action\")\n\tf.StringVar(&cmd.StopAction, \"stop-action\", \"systemDefault\", \"Stop Action\")\n\tf.StringVar(&cmd.WaitForHeartbeat, \"wait\", waitHeartbeatTypes[0],\n\t\tfmt.Sprintf(\"Wait for Hearbeat Setting (%s)\", strings.Join(waitHeartbeatTypes, \"|\")))\n}\n\nfunc (cmd *add) Process(ctx context.Context) error {\n\tif err := cmd.AutostartFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *add) Usage() string {\n\treturn \"VM...\"\n}\n\nfunc (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error {\n\tpowerInfo := types.AutoStartPowerInfo{\n\t\tStartOrder:       cmd.StartOrder,\n\t\tStartDelay:       cmd.StartDelay,\n\t\tWaitForHeartbeat: types.AutoStartWaitHeartbeatSetting(cmd.WaitForHeartbeat),\n\t\tStartAction:      cmd.StartAction,\n\t\tStopDelay:        cmd.StopDelay,\n\t\tStopAction:       cmd.StopAction,\n\t}\n\treturn cmd.ReconfigureVMs(f.Args(), powerInfo)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/autostart/autostart.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage autostart\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype AutostartFlag struct {\n\t*flags.ClientFlag\n\t*flags.DatacenterFlag\n\t*flags.HostSystemFlag\n}\n\nfunc newAutostartFlag(ctx context.Context) (*AutostartFlag, context.Context) {\n\tf := &AutostartFlag{}\n\tf.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tf.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tf.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\treturn f, ctx\n}\n\nfunc (f *AutostartFlag) Register(ctx context.Context, fs *flag.FlagSet) {\n\tf.ClientFlag.Register(ctx, fs)\n\tf.DatacenterFlag.Register(ctx, fs)\n\tf.HostSystemFlag.Register(ctx, fs)\n}\n\nfunc (f *AutostartFlag) Process(ctx context.Context) error {\n\tif err := f.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := f.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := f.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// VirtualMachines returns list of virtual machine objects based on the\n// arguments specified on the command line. This helper is defined in\n// flags.SearchFlag as well, but that pulls in other virtual machine flags that\n// are not relevant here.\nfunc (f *AutostartFlag) VirtualMachines(args []string) ([]*object.VirtualMachine, error) {\n\tctx := context.TODO()\n\tif len(args) == 0 {\n\t\treturn nil, errors.New(\"no argument\")\n\t}\n\n\tfinder, err := f.Finder()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar out []*object.VirtualMachine\n\tfor _, arg := range args {\n\t\tvms, err := finder.VirtualMachineList(ctx, arg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tout = append(out, vms...)\n\t}\n\n\treturn out, nil\n}\n\nfunc (f *AutostartFlag) HostAutoStartManager() (*mo.HostAutoStartManager, error) {\n\tctx := context.TODO()\n\th, err := f.HostSystem()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar mhs mo.HostSystem\n\terr = h.Properties(ctx, h.Reference(), []string{\"configManager.autoStartManager\"}, &mhs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar mhas mo.HostAutoStartManager\n\terr = h.Properties(ctx, *mhs.ConfigManager.AutoStartManager, nil, &mhas)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &mhas, nil\n}\n\nfunc (f *AutostartFlag) ReconfigureDefaults(template types.AutoStartDefaults) error {\n\tctx := context.TODO()\n\tc, err := f.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmhas, err := f.HostAutoStartManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq := types.ReconfigureAutostart{\n\t\tThis: mhas.Reference(),\n\t\tSpec: types.HostAutoStartManagerConfig{\n\t\t\tDefaults: &template,\n\t\t},\n\t}\n\n\t_, err = methods.ReconfigureAutostart(ctx, c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (f *AutostartFlag) ReconfigureVMs(args []string, template types.AutoStartPowerInfo) error {\n\tctx := context.TODO()\n\tc, err := f.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmhas, err := f.HostAutoStartManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq := types.ReconfigureAutostart{\n\t\tThis: mhas.Reference(),\n\t\tSpec: types.HostAutoStartManagerConfig{\n\t\t\tPowerInfo: make([]types.AutoStartPowerInfo, 0),\n\t\t},\n\t}\n\n\tvms, err := f.VirtualMachines(args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, vm := range vms {\n\t\tpi := template\n\t\tpi.Key = vm.Reference()\n\t\treq.Spec.PowerInfo = append(req.Spec.PowerInfo, pi)\n\t}\n\n\t_, err = methods.ReconfigureAutostart(ctx, c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/autostart/configure.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage autostart\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype configure struct {\n\t*AutostartFlag\n\n\ttypes.AutoStartDefaults\n}\n\nfunc init() {\n\tcli.Register(\"host.autostart.configure\", &configure{})\n}\n\nfunc (cmd *configure) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.AutostartFlag, ctx = newAutostartFlag(ctx)\n\tcmd.AutostartFlag.Register(ctx, f)\n\n\tf.Var(flags.NewOptionalBool(&cmd.Enabled), \"enabled\", \"\")\n\tf.Var(flags.NewInt32(&cmd.StartDelay), \"start-delay\", \"\")\n\tf.StringVar(&cmd.StopAction, \"stop-action\", \"\", \"\")\n\tf.Var(flags.NewInt32(&cmd.StopDelay), \"stop-delay\", \"\")\n\tf.Var(flags.NewOptionalBool(&cmd.WaitForHeartbeat), \"wait-for-heartbeat\", \"\")\n}\n\nfunc (cmd *configure) Process(ctx context.Context) error {\n\tif err := cmd.AutostartFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *configure) Usage() string {\n\treturn \"\"\n}\n\nfunc (cmd *configure) Run(ctx context.Context, f *flag.FlagSet) error {\n\treturn cmd.ReconfigureDefaults(cmd.AutoStartDefaults)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/autostart/info.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage autostart\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n)\n\ntype info struct {\n\tcli.Command\n\n\t*AutostartFlag\n\t*flags.OutputFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.autostart.info\", &info{})\n}\n\nfunc (cmd *info) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.AutostartFlag, ctx = newAutostartFlag(ctx)\n\tcmd.AutostartFlag.Register(ctx, f)\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n}\n\nfunc (cmd *info) Process(ctx context.Context) error {\n\tif err := cmd.AutostartFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *info) Usage() string {\n\treturn \"\"\n}\n\nfunc (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error {\n\tclient, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmhas, err := cmd.HostAutoStartManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn cmd.WriteResult(&infoResult{client, mhas})\n}\n\ntype infoResult struct {\n\tclient *vim25.Client\n\tmhas   *mo.HostAutoStartManager\n}\n\nfunc (r *infoResult) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(r.mhas)\n}\n\n// vmPaths resolves the paths for the VMs in the result.\nfunc (r *infoResult) vmPaths() (map[string]string, error) {\n\tctx := context.TODO()\n\tpaths := make(map[string]string)\n\tfor _, info := range r.mhas.Config.PowerInfo {\n\t\tmes, err := mo.Ancestors(ctx, r.client, r.client.ServiceContent.PropertyCollector, info.Key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpath := \"\"\n\t\tfor _, me := range mes {\n\t\t\t// Skip root entity in building inventory path.\n\t\t\tif me.Parent == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tpath += \"/\" + me.Name\n\t\t}\n\n\t\tpaths[info.Key.Value] = path\n\t}\n\n\treturn paths, nil\n}\n\nfunc (r *infoResult) Write(w io.Writer) error {\n\tpaths, err := r.vmPaths()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0)\n\n\tfmt.Fprintf(tw, \"VM\")\n\tfmt.Fprintf(tw, \"\\tStartAction\")\n\tfmt.Fprintf(tw, \"\\tStartDelay\")\n\tfmt.Fprintf(tw, \"\\tStartOrder\")\n\tfmt.Fprintf(tw, \"\\tStopAction\")\n\tfmt.Fprintf(tw, \"\\tStopDelay\")\n\tfmt.Fprintf(tw, \"\\tWaitForHeartbeat\")\n\tfmt.Fprintf(tw, \"\\n\")\n\n\tfor _, info := range r.mhas.Config.PowerInfo {\n\t\tfmt.Fprintf(tw, \"%s\", paths[info.Key.Value])\n\t\tfmt.Fprintf(tw, \"\\t%s\", info.StartAction)\n\t\tfmt.Fprintf(tw, \"\\t%d\", info.StartDelay)\n\t\tfmt.Fprintf(tw, \"\\t%d\", info.StartOrder)\n\t\tfmt.Fprintf(tw, \"\\t%s\", info.StopAction)\n\t\tfmt.Fprintf(tw, \"\\t%d\", info.StopDelay)\n\t\tfmt.Fprintf(tw, \"\\t%s\", info.WaitForHeartbeat)\n\t\tfmt.Fprintf(tw, \"\\n\")\n\t}\n\n\t_ = tw.Flush()\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/autostart/remove.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage autostart\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype remove struct {\n\t*AutostartFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.autostart.remove\", &remove{})\n}\n\nfunc (cmd *remove) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.AutostartFlag, ctx = newAutostartFlag(ctx)\n\tcmd.AutostartFlag.Register(ctx, f)\n}\n\nfunc (cmd *remove) Process(ctx context.Context) error {\n\tif err := cmd.AutostartFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *remove) Usage() string {\n\treturn \"VM...\"\n}\n\nfunc (cmd *remove) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvar powerInfo = types.AutoStartPowerInfo{\n\t\tStartAction:      \"none\",\n\t\tStartDelay:       -1,\n\t\tStartOrder:       -1,\n\t\tStopAction:       \"none\",\n\t\tStopDelay:        -1,\n\t\tWaitForHeartbeat: types.AutoStartWaitHeartbeatSettingSystemDefault,\n\t}\n\n\treturn cmd.ReconfigureVMs(f.Args(), powerInfo)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/cert/csr.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage cert\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype csr struct {\n\t*flags.HostSystemFlag\n\n\tip bool\n}\n\nfunc init() {\n\tcli.Register(\"host.cert.csr\", &csr{})\n}\n\nfunc (cmd *csr) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.ip, \"ip\", false, \"Use IP address as CN\")\n}\n\nfunc (cmd *csr) Description() string {\n\treturn `Generate a certificate-signing request (CSR) for HOST.`\n}\n\nfunc (cmd *csr) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *csr) Run(ctx context.Context, f *flag.FlagSet) error {\n\thost, err := cmd.HostSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := host.ConfigManager().CertificateManager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\toutput, err := m.GenerateCertificateSigningRequest(ctx, cmd.ip)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = fmt.Println(output)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/cert/info.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage cert\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype info struct {\n\t*flags.HostSystemFlag\n\t*flags.OutputFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.cert.info\", &info{})\n}\n\nfunc (cmd *info) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n}\n\nfunc (cmd *info) Description() string {\n\treturn `Display SSL certificate info for HOST.`\n}\n\nfunc (cmd *info) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error {\n\thost, err := cmd.HostSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := host.ConfigManager().CertificateManager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinfo, err := m.CertificateInfo(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn cmd.WriteResult(info)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/cert/install.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage cert\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"flag\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype install struct {\n\t*flags.HostSystemFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.cert.import\", &install{})\n}\n\nfunc (cmd *install) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n}\n\nfunc (cmd *install) Usage() string {\n\treturn \"FILE\"\n}\n\nfunc (cmd *install) Description() string {\n\treturn `Install SSL certificate FILE on HOST.\n\nIf FILE name is \"-\", read certificate from stdin.`\n}\n\nfunc (cmd *install) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *install) Run(ctx context.Context, f *flag.FlagSet) error {\n\thost, err := cmd.HostSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := host.ConfigManager().CertificateManager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar cert string\n\n\tname := f.Arg(0)\n\tif name == \"-\" || name == \"\" {\n\t\tvar buf bytes.Buffer\n\t\tif _, err := io.Copy(&buf, os.Stdin); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcert = buf.String()\n\t} else {\n\t\tb, err := ioutil.ReadFile(name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcert = string(b)\n\t}\n\n\treturn m.InstallServerCertificate(ctx, cert)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/date/change.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage date\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype change struct {\n\t*flags.HostSystemFlag\n\n\ttypes.HostNtpConfig\n\ttypes.HostDateTimeConfig\n\tdate string\n}\n\nfunc init() {\n\tcli.Register(\"host.date.change\", &change{})\n}\n\ntype serverConfig types.HostNtpConfig\n\nfunc (s *serverConfig) String() string {\n\treturn strings.Join(s.Server, \",\")\n}\n\nfunc (s *serverConfig) Set(v string) error {\n\ts.Server = append(s.Server, v)\n\treturn nil\n}\n\nfunc (cmd *change) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tf.Var((*serverConfig)(&cmd.HostNtpConfig), \"server\", \"IP or FQDN for NTP server(s)\")\n\tf.StringVar(&cmd.TimeZone, \"tz\", \"\", \"Change timezone of the host\")\n\tf.StringVar(&cmd.date, \"date\", \"\", \"Update the date/time on the host\")\n}\n\nfunc (cmd *change) Description() string {\n\treturn `Change date and time for HOST.\n\nExamples:\n  govc host.date.change -date \"$(date -u)\"\n  govc host.date.change -server time.vmware.com\n  govc host.service enable ntpd\n  govc host.service start ntpd`\n}\n\nfunc (cmd *change) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *change) Run(ctx context.Context, f *flag.FlagSet) error {\n\thost, err := cmd.HostSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts, err := host.ConfigManager().DateTimeSystem(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.date != \"\" {\n\t\td, err := time.Parse(time.UnixDate, cmd.date)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn s.Update(ctx, d)\n\t}\n\n\tif len(cmd.HostNtpConfig.Server) > 0 {\n\t\tcmd.NtpConfig = &cmd.HostNtpConfig\n\t}\n\n\treturn s.UpdateConfig(ctx, cmd.HostDateTimeConfig)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/date/info.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage date\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"text/tabwriter\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/govc/host/service\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype info struct {\n\t*flags.HostSystemFlag\n\t*flags.OutputFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.date.info\", &info{})\n}\n\nfunc (cmd *info) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n}\n\nfunc (cmd *info) Description() string {\n\treturn `Display date and time info for HOST.`\n}\n\nfunc (cmd *info) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\ntype dateInfo struct {\n\ttypes.HostDateTimeInfo\n\tService *types.HostService\n\tCurrent *time.Time\n}\n\nfunc (info *dateInfo) servers() string {\n\tif len(info.NtpConfig.Server) == 0 {\n\t\treturn \"None\"\n\t}\n\treturn strings.Join(info.NtpConfig.Server, \", \")\n}\n\nfunc (info *dateInfo) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\n\tfmt.Fprintf(tw, \"Current date and time:\\t%s\\n\", info.Current.Format(time.UnixDate))\n\tif info.Service != nil {\n\t\tfmt.Fprintf(tw, \"NTP client status:\\t%s\\n\", service.Policy(*info.Service))\n\t\tfmt.Fprintf(tw, \"NTP service status:\\t%s\\n\", service.Status(*info.Service))\n\t}\n\tfmt.Fprintf(tw, \"NTP servers:\\t%s\\n\", info.servers())\n\n\treturn tw.Flush()\n}\n\nfunc (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error {\n\thost, err := cmd.HostSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts, err := host.ConfigManager().DateTimeSystem(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar hs mo.HostDateTimeSystem\n\tif err = s.Properties(ctx, s.Reference(), nil, &hs); err != nil {\n\t\treturn nil\n\t}\n\n\tss, err := host.ConfigManager().ServiceSystem(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tservices, err := ss.Service(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tres := &dateInfo{HostDateTimeInfo: hs.DateTimeInfo}\n\n\tfor i, service := range services {\n\t\tif service.Key == \"ntpd\" {\n\t\t\tres.Service = &services[i]\n\t\t\tbreak\n\t\t}\n\t}\n\n\tres.Current, err = s.Query(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn cmd.WriteResult(res)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/disconnect.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage host\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype disconnect struct {\n\t*flags.HostSystemFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.disconnect\", &disconnect{})\n}\n\nfunc (cmd *disconnect) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n}\n\nfunc (cmd *disconnect) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *disconnect) Description() string {\n\treturn `Disconnect HOST from vCenter.`\n}\n\nfunc (cmd *disconnect) Disconnect(ctx context.Context, host *object.HostSystem) error {\n\ttask, err := host.Disconnect(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger := cmd.ProgressLogger(fmt.Sprintf(\"%s disconnecting... \", host.InventoryPath))\n\tdefer logger.Wait()\n\n\t_, err = task.WaitForResult(ctx, logger)\n\treturn err\n}\n\nfunc (cmd *disconnect) Run(ctx context.Context, f *flag.FlagSet) error {\n\thosts, err := cmd.HostSystems(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, host := range hosts {\n\t\terr = cmd.Disconnect(ctx, host)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/esxcli/command.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage esxcli\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype Command struct {\n\tname []string\n\targs []string\n}\n\ntype CommandInfoItem struct {\n\tName        string `xml:\"name\"`\n\tDisplayName string `xml:\"displayName\"`\n\tHelp        string `xml:\"help\"`\n}\n\ntype CommandInfoParam struct {\n\tCommandInfoItem\n\tAliases []string `xml:\"aliases\"`\n\tFlag    bool     `xml:\"flag\"`\n}\n\ntype CommandInfoHint struct {\n\tKey   string `xml:\"key\"`\n\tValue string `xml:\"value\"`\n}\n\ntype CommandInfoHints []CommandInfoHint\n\ntype CommandInfoMethod struct {\n\tCommandInfoItem\n\tParam []CommandInfoParam `xml:\"param\"`\n\tHints CommandInfoHints   `xml:\"hints\"`\n}\n\ntype CommandInfo struct {\n\tCommandInfoItem\n\tMethod []*CommandInfoMethod `xml:\"method\"`\n}\n\nfunc NewCommand(args []string) *Command {\n\tc := &Command{}\n\n\tfor i, arg := range args {\n\t\tif strings.HasPrefix(arg, \"-\") {\n\t\t\tc.args = args[i:]\n\t\t\tbreak\n\t\t} else {\n\t\t\tc.name = append(c.name, arg)\n\t\t}\n\t}\n\n\treturn c\n}\n\nfunc (c *Command) Namespace() string {\n\treturn strings.Join(c.name[:len(c.name)-1], \".\")\n}\n\nfunc (c *Command) Name() string {\n\treturn c.name[len(c.name)-1]\n}\n\nfunc (c *Command) Method() string {\n\treturn \"vim.EsxCLI.\" + strings.Join(c.name, \".\")\n}\n\nfunc (c *Command) Moid() string {\n\treturn \"ha-cli-handler-\" + strings.Join(c.name[:len(c.name)-1], \"-\")\n}\n\n// Parse generates a flag.FlagSet based on the given []CommandInfoParam and\n// returns arguments for use with methods.ExecuteSoap\nfunc (c *Command) Parse(params []CommandInfoParam) ([]types.ReflectManagedMethodExecuterSoapArgument, error) {\n\tflags := flag.NewFlagSet(strings.Join(c.name, \" \"), flag.ExitOnError)\n\tvals := make([]string, len(params))\n\n\tfor i, p := range params {\n\t\tv := &vals[i]\n\t\tfor _, a := range p.Aliases {\n\t\t\ta = strings.TrimPrefix(a[1:], \"-\")\n\t\t\tflags.StringVar(v, a, \"\", p.Help)\n\t\t}\n\t}\n\n\terr := flags.Parse(c.args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\targs := []types.ReflectManagedMethodExecuterSoapArgument{}\n\n\tfor i, p := range params {\n\t\tif vals[i] == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\targs = append(args, c.Argument(p.Name, vals[i]))\n\t}\n\n\treturn args, nil\n}\n\nfunc (c *Command) Argument(name string, val string) types.ReflectManagedMethodExecuterSoapArgument {\n\treturn types.ReflectManagedMethodExecuterSoapArgument{\n\t\tName: name,\n\t\tVal:  fmt.Sprintf(\"<%s>%s</%s>\", name, val, name),\n\t}\n}\n\nfunc (h CommandInfoHints) Formatter() string {\n\tfor _, hint := range h {\n\t\tif hint.Key == \"formatter\" {\n\t\t\treturn hint.Value\n\t\t}\n\t}\n\n\treturn \"simple\"\n}\n\nfunc (h CommandInfoHints) Fields() []string {\n\tfor _, hint := range h {\n\t\tif strings.HasPrefix(hint.Key, \"fields:\") {\n\t\t\treturn strings.Split(hint.Value, \",\")\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/esxcli/command_test.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage esxcli\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\nfunc TestSystemSettingsAdvancedSetCommand(t *testing.T) {\n\tc := NewCommand([]string{\"system\", \"settings\", \"advanced\", \"set\", \"-o\", \"/Net/GuestIPHack\", \"-i\", \"1\"})\n\n\ttests := []struct {\n\t\tf      func() string\n\t\texpect string\n\t}{\n\t\t{c.Name, \"set\"},\n\t\t{c.Namespace, \"system.settings.advanced\"},\n\t\t{c.Method, \"vim.EsxCLI.system.settings.advanced.set\"},\n\t\t{c.Moid, \"ha-cli-handler-system-settings-advanced\"},\n\t}\n\n\tfor _, test := range tests {\n\t\tactual := test.f()\n\t\tif actual != test.expect {\n\t\t\tt.Errorf(\"%s != %s\", actual, test.expect)\n\t\t}\n\t}\n\n\tparams := []CommandInfoParam{\n\t\t{\n\t\t\tCommandInfoItem: CommandInfoItem{Name: \"default\", DisplayName: \"default\", Help: \"Reset the option to its default value.\"},\n\t\t\tAliases:         []string{\"-d\", \"--default\"},\n\t\t\tFlag:            true,\n\t\t},\n\t\t{\n\t\t\tCommandInfoItem: CommandInfoItem{Name: \"intvalue\", DisplayName: \"int-value\", Help: \"If the option is an integer value use this option.\"},\n\t\t\tAliases:         []string{\"-i\", \"--int-value\"},\n\t\t\tFlag:            false,\n\t\t},\n\t\t{\n\t\t\tCommandInfoItem: CommandInfoItem{Name: \"option\", DisplayName: \"option\", Help: \"The name of the option to set the value of. Example: \\\"/Misc/HostName\\\"\"},\n\t\t\tAliases:         []string{\"-o\", \"--option\"},\n\t\t\tFlag:            false,\n\t\t},\n\t\t{\n\t\t\tCommandInfoItem: CommandInfoItem{Name: \"stringvalue\", DisplayName: \"string-value\", Help: \"If the option is a string use this option.\"},\n\t\t\tAliases:         []string{\"-s\", \"--string-value\"},\n\t\t\tFlag:            false,\n\t\t},\n\t}\n\n\targs, err := c.Parse(params)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\texpect := []types.ReflectManagedMethodExecuterSoapArgument{\n\t\t{\n\t\t\tDynamicData: types.DynamicData{},\n\t\t\tName:        \"intvalue\",\n\t\t\tVal:         \"<intvalue>1</intvalue>\",\n\t\t},\n\t\t{\n\t\t\tDynamicData: types.DynamicData{},\n\t\t\tName:        \"option\",\n\t\t\tVal:         \"<option>/Net/GuestIPHack</option>\",\n\t\t},\n\t}\n\n\tif !reflect.DeepEqual(args, expect) {\n\t\tt.Errorf(\"%s != %s\", args, expect)\n\t}\n}\n\nfunc TestNetworkVmListCommand(t *testing.T) {\n\tc := NewCommand([]string{\"network\", \"vm\", \"list\"})\n\n\ttests := []struct {\n\t\tf      func() string\n\t\texpect string\n\t}{\n\t\t{c.Name, \"list\"},\n\t\t{c.Namespace, \"network.vm\"},\n\t\t{c.Method, \"vim.EsxCLI.network.vm.list\"},\n\t\t{c.Moid, \"ha-cli-handler-network-vm\"},\n\t}\n\n\tfor _, test := range tests {\n\t\tactual := test.f()\n\t\tif actual != test.expect {\n\t\t\tt.Errorf(\"%s != %s\", actual, test.expect)\n\t\t}\n\t}\n\n\tvar params []CommandInfoParam\n\n\targs, err := c.Parse(params)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\texpect := []types.ReflectManagedMethodExecuterSoapArgument{}\n\n\tif !reflect.DeepEqual(args, expect) {\n\t\tt.Errorf(\"%s != %s\", args, expect)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/esxcli/esxcli.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage esxcli\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\t\"strings\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype esxcli struct {\n\t*flags.HostSystemFlag\n\n\thints bool\n}\n\nfunc init() {\n\tcli.Register(\"host.esxcli\", &esxcli{})\n}\n\nfunc (cmd *esxcli) Usage() string {\n\treturn \"COMMAND [ARG]...\"\n}\n\nfunc (cmd *esxcli) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.hints, \"hints\", true, \"Use command info hints when formatting output\")\n}\n\nfunc (cmd *esxcli) Description() string {\n\treturn `Invoke esxcli command on HOST.\n\nOutput is rendered in table form when possible, unless disabled with '-hints=false'.\n\nExamples:\n  govc host.esxcli network ip connection list\n  govc host.esxcli system settings advanced set -o /Net/GuestIPHack -i 1\n  govc host.esxcli network firewall ruleset set -r remoteSerialPort -e true\n  govc host.esxcli network firewall set -e false`\n}\n\nfunc (cmd *esxcli) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *esxcli) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\thost, err := cmd.HostSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te, err := NewExecutor(c, host)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tres, err := e.Run(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(res.Values) == 0 {\n\t\treturn nil\n\t}\n\n\treturn cmd.WriteResult(&result{res, cmd})\n}\n\ntype result struct {\n\t*Response\n\tcmd *esxcli\n}\n\nfunc (r *result) Write(w io.Writer) error {\n\tvar formatType string\n\tif r.cmd.hints {\n\t\tformatType = r.Info.Hints.Formatter()\n\t}\n\n\tswitch formatType {\n\tcase \"table\":\n\t\tr.cmd.formatTable(w, r.Response)\n\tdefault:\n\t\tr.cmd.formatSimple(w, r.Response)\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *esxcli) formatSimple(w io.Writer, res *Response) {\n\tvar keys []string\n\tfor key := range res.Values[0] {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Strings(keys)\n\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\n\tfor i, rv := range res.Values {\n\t\tif i > 0 {\n\t\t\tfmt.Fprintln(tw)\n\t\t\t_ = tw.Flush()\n\t\t}\n\t\tfor _, key := range keys {\n\t\t\tfmt.Fprintf(tw, \"%s:\\t%s\\n\", key, strings.Join(rv[key], \", \"))\n\t\t}\n\t}\n\n\t_ = tw.Flush()\n}\n\nfunc (cmd *esxcli) formatTable(w io.Writer, res *Response) {\n\tfields := res.Info.Hints.Fields()\n\n\ttw := tabwriter.NewWriter(w, len(fields), 0, 2, ' ', 0)\n\n\tvar hr []string\n\tfor _, name := range fields {\n\t\thr = append(hr, strings.Repeat(\"-\", len(name)))\n\t}\n\n\tfmt.Fprintln(tw, strings.Join(fields, \"\\t\"))\n\tfmt.Fprintln(tw, strings.Join(hr, \"\\t\"))\n\n\tfor _, vals := range res.Values {\n\t\tvar row []string\n\n\t\tfor _, name := range fields {\n\t\t\tkey := strings.Replace(name, \" \", \"\", -1)\n\t\t\tif val, ok := vals[key]; ok {\n\t\t\t\trow = append(row, strings.Join(val, \", \"))\n\t\t\t} else {\n\t\t\t\trow = append(row, \"\")\n\t\t\t}\n\t\t}\n\n\t\tfmt.Fprintln(tw, strings.Join(row, \"\\t\"))\n\t}\n\n\t_ = tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/esxcli/executor.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage esxcli\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n\t\"github.com/vmware/govmomi/vim25/xml\"\n)\n\ntype Executor struct {\n\tc    *vim25.Client\n\thost *object.HostSystem\n\tmme  *types.ReflectManagedMethodExecuter\n\tdtm  *types.InternalDynamicTypeManager\n\tinfo map[string]*CommandInfo\n}\n\nfunc NewExecutor(c *vim25.Client, host *object.HostSystem) (*Executor, error) {\n\tctx := context.TODO()\n\te := &Executor{\n\t\tc:    c,\n\t\thost: host,\n\t\tinfo: make(map[string]*CommandInfo),\n\t}\n\n\t{\n\t\treq := types.RetrieveManagedMethodExecuter{\n\t\t\tThis: host.Reference(),\n\t\t}\n\n\t\tres, err := methods.RetrieveManagedMethodExecuter(ctx, c, &req)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\te.mme = res.Returnval\n\t}\n\n\t{\n\t\treq := types.RetrieveDynamicTypeManager{\n\t\t\tThis: host.Reference(),\n\t\t}\n\n\t\tres, err := methods.RetrieveDynamicTypeManager(ctx, c, &req)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\te.dtm = res.Returnval\n\t}\n\n\treturn e, nil\n}\n\nfunc (e *Executor) CommandInfo(c *Command) (*CommandInfoMethod, error) {\n\tns := c.Namespace()\n\tvar info *CommandInfo\n\tvar ok bool\n\n\tif info, ok = e.info[ns]; !ok {\n\t\treq := types.ExecuteSoap{\n\t\t\tMoid:   \"ha-dynamic-type-manager-local-cli-cliinfo\",\n\t\t\tMethod: \"vim.CLIInfo.FetchCLIInfo\",\n\t\t\tArgument: []types.ReflectManagedMethodExecuterSoapArgument{\n\t\t\t\tc.Argument(\"typeName\", \"vim.EsxCLI.\"+ns),\n\t\t\t},\n\t\t}\n\n\t\tinfo = new(CommandInfo)\n\t\tif err := e.Execute(&req, info); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\te.info[ns] = info\n\t}\n\n\tname := c.Name()\n\tfor _, method := range info.Method {\n\t\tif method.Name == name {\n\t\t\treturn method, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"method '%s' not found in name space '%s'\", name, c.Namespace())\n}\n\nfunc (e *Executor) NewRequest(args []string) (*types.ExecuteSoap, *CommandInfoMethod, error) {\n\tc := NewCommand(args)\n\n\tinfo, err := e.CommandInfo(c)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tsargs, err := c.Parse(info.Param)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tsreq := types.ExecuteSoap{\n\t\tMoid:     c.Moid(),\n\t\tMethod:   c.Method(),\n\t\tArgument: sargs,\n\t}\n\n\treturn &sreq, info, nil\n}\n\nfunc (e *Executor) Execute(req *types.ExecuteSoap, res interface{}) error {\n\tctx := context.TODO()\n\treq.This = e.mme.ManagedObjectReference\n\treq.Version = \"urn:vim25/5.0\"\n\n\tx, err := methods.ExecuteSoap(ctx, e.c, req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif x.Returnval != nil {\n\t\tif x.Returnval.Fault != nil {\n\t\t\treturn errors.New(x.Returnval.Fault.FaultMsg)\n\t\t}\n\n\t\tif err := xml.Unmarshal([]byte(x.Returnval.Response), res); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (e *Executor) Run(args []string) (*Response, error) {\n\treq, info, err := e.NewRequest(args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres := &Response{\n\t\tInfo: info,\n\t}\n\n\tif err := e.Execute(req, res); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/esxcli/firewall_info.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage esxcli\n\nimport \"github.com/vmware/govmomi/object\"\n\ntype FirewallInfo struct {\n\tLoaded        bool\n\tEnabled       bool\n\tDefaultAction string\n}\n\n// GetFirewallInfo via 'esxcli network firewall get'\n// The HostFirewallSystem type does not expose this data.\n// This helper can be useful in particular to determine if the firewall is enabled or disabled.\nfunc GetFirewallInfo(s *object.HostSystem) (*FirewallInfo, error) {\n\tx, err := NewExecutor(s.Client(), s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres, err := x.Run([]string{\"network\", \"firewall\", \"get\"})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo := &FirewallInfo{\n\t\tLoaded:        res.Values[0][\"Loaded\"][0] == \"true\",\n\t\tEnabled:       res.Values[0][\"Enabled\"][0] == \"true\",\n\t\tDefaultAction: res.Values[0][\"DefaultAction\"][0],\n\t}\n\n\treturn info, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/esxcli/fixtures/network_vm_list.xml",
    "content": "<obj xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"urn:vim25\" versionId=\"5.0\" xsi:type=\"ArrayOfDataObject\">\n <DataObject xsi:type=\"VimEsxCLInetworkvmlistVM\">\n  <Name>foo</Name>\n  <Networks>VM Network</Networks>\n  <Networks>dougm</Networks>\n  <NumPorts>2</NumPorts>\n  <WorldID>98842</WorldID>\n </DataObject>\n <DataObject xsi:type=\"VimEsxCLInetworkvmlistVM\">\n  <Name>bar</Name>\n  <Networks>VM Network</Networks>\n  <NumPorts>1</NumPorts>\n  <WorldID>236235</WorldID>\n </DataObject>\n</obj>\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/esxcli/fixtures/network_vm_port_list.xml",
    "content": "<obj xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"urn:vim25\" versionId=\"5.0\" xsi:type=\"ArrayOfDataObject\">\n <DataObject xsi:type=\"VimEsxCLInetworkvmportlistPort\">\n  <DVPortID></DVPortID>\n  <IPAddress>192.168.247.149</IPAddress>\n  <MACAddress>00:0c:29:12:b2:cf</MACAddress>\n  <PortID>33554438</PortID>\n  <Portgroup>VM Network</Portgroup>\n  <TeamUplink>vmnic0</TeamUplink>\n  <UplinkPortID>33554434</UplinkPortID>\n  <vSwitch>vSwitch0</vSwitch>\n </DataObject>\n</obj>\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/esxcli/fixtures/system_hostname_get.xml",
    "content": "<obj xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"urn:vim25\" versionId=\"5.0\" xsi:type=\"VimEsxCLIsystemhostnamegetFullyQualifiedHostName\">\n <DomainName>localdomain</DomainName>\n <FullyQualifiedDomainName>esxbox.localdomain</FullyQualifiedDomainName>\n <HostName>esxbox</HostName>\n</obj>\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/esxcli/guest_info.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage esxcli\n\nimport (\n\t\"context\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype hostInfo struct {\n\t*Executor\n\twids map[string]string\n}\n\ntype GuestInfo struct {\n\tc     *vim25.Client\n\thosts map[string]*hostInfo\n}\n\nfunc NewGuestInfo(c *vim25.Client) *GuestInfo {\n\treturn &GuestInfo{\n\t\tc:     c,\n\t\thosts: make(map[string]*hostInfo),\n\t}\n}\n\nfunc (g *GuestInfo) hostInfo(ref *types.ManagedObjectReference) (*hostInfo, error) {\n\t// cache exectuor and uuid -> worldid map\n\tif h, ok := g.hosts[ref.Value]; ok {\n\t\treturn h, nil\n\t}\n\n\thost := object.NewHostSystem(g.c, *ref)\n\n\te, err := NewExecutor(g.c, host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres, err := e.Run([]string{\"vm\", \"process\", \"list\"})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tids := make(map[string]string, len(res.Values))\n\n\tfor _, process := range res.Values {\n\t\t// Normalize uuid, esxcli and mo.VirtualMachine have different formats\n\t\tuuid := strings.Replace(process[\"UUID\"][0], \" \", \"\", -1)\n\t\tuuid = strings.Replace(uuid, \"-\", \"\", -1)\n\n\t\tids[uuid] = process[\"WorldID\"][0]\n\t}\n\n\th := &hostInfo{e, ids}\n\tg.hosts[ref.Value] = h\n\n\treturn h, nil\n}\n\n// IpAddress attempts to find the guest IP address using esxcli.\n// ESX hosts must be configured with the /Net/GuestIPHack enabled.\n// For example:\n// $ govc host.esxcli -- system settings advanced set -o /Net/GuestIPHack -i 1\nfunc (g *GuestInfo) IpAddress(vm *object.VirtualMachine) (string, error) {\n\tctx := context.TODO()\n\tconst any = \"0.0.0.0\"\n\tvar mvm mo.VirtualMachine\n\n\tpc := property.DefaultCollector(g.c)\n\terr := pc.RetrieveOne(ctx, vm.Reference(), []string{\"runtime.host\", \"config.uuid\"}, &mvm)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\th, err := g.hostInfo(mvm.Runtime.Host)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Normalize uuid, esxcli and mo.VirtualMachine have different formats\n\tuuid := strings.Replace(mvm.Config.Uuid, \"-\", \"\", -1)\n\n\tif wid, ok := h.wids[uuid]; ok {\n\t\tres, err := h.Run([]string{\"network\", \"vm\", \"port\", \"list\", \"--world-id\", wid})\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tfor _, val := range res.Values {\n\t\t\tif ip, ok := val[\"IPAddress\"]; ok {\n\t\t\t\tif ip[0] != any {\n\t\t\t\t\treturn ip[0], nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn any, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/esxcli/response.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\npackage esxcli\n\nimport (\n\t\"io\"\n\n\t\"github.com/vmware/govmomi/vim25/xml\"\n)\n\ntype Values map[string][]string\n\ntype Response struct {\n\tInfo   *CommandInfoMethod\n\tValues []Values\n}\n\nfunc (v Values) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {\n\tfor {\n\t\tt, err := d.Token()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\tif s, ok := t.(xml.StartElement); ok {\n\t\t\tt, err = d.Token()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tkey := s.Name.Local\n\t\t\tvar val string\n\t\t\tif c, ok := t.(xml.CharData); ok {\n\t\t\t\tval = string(c)\n\t\t\t}\n\t\t\tv[key] = append(v[key], val)\n\t\t}\n\t}\n}\n\nfunc (r *Response) Type(start xml.StartElement) string {\n\tfor _, a := range start.Attr {\n\t\tif a.Name.Local == \"type\" {\n\t\t\treturn a.Value\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc (r *Response) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {\n\tstype := r.Type(start)\n\n\tif stype != \"ArrayOfDataObject\" {\n\t\tv := Values{}\n\t\tif err := d.DecodeElement(&v, &start); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tr.Values = append(r.Values, v)\n\t\treturn nil\n\t}\n\n\tfor {\n\t\tt, err := d.Token()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\tif s, ok := t.(xml.StartElement); ok {\n\t\t\tif s.Name.Local == \"DataObject\" {\n\t\t\t\tv := Values{}\n\t\t\t\tif err := d.DecodeElement(&v, &s); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tr.Values = append(r.Values, v)\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/esxcli/response_test.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage esxcli\n\nimport (\n\t\"os\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n\t\"github.com/vmware/govmomi/vim25/xml\"\n)\n\nfunc load(name string) *Response {\n\tf, err := os.Open(name)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer f.Close()\n\n\tvar b Response\n\n\tdec := xml.NewDecoder(f)\n\tdec.TypeFunc = types.TypeFunc()\n\tif err := dec.Decode(&b); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn &b\n}\n\nfunc TestSystemHostnameGetResponse(t *testing.T) {\n\tres := load(\"fixtures/system_hostname_get.xml\")\n\n\texpect := []Values{\n\t\t{\n\t\t\t\"DomainName\":               {\"localdomain\"},\n\t\t\t\"FullyQualifiedDomainName\": {\"esxbox.localdomain\"},\n\t\t\t\"HostName\":                 {\"esxbox\"},\n\t\t},\n\t}\n\n\tif !reflect.DeepEqual(res.Values, expect) {\n\t\tt.Errorf(\"%s != %s\", res.Values, expect)\n\t}\n}\n\nfunc TestNetworkVmList(t *testing.T) {\n\tres := load(\"fixtures/network_vm_list.xml\")\n\n\texpect := []Values{\n\t\t{\n\t\t\t\"Name\":     {\"foo\"},\n\t\t\t\"Networks\": {\"VM Network\", \"dougm\"},\n\t\t\t\"NumPorts\": {\"2\"},\n\t\t\t\"WorldID\":  {\"98842\"},\n\t\t},\n\t\t{\n\t\t\t\"Name\":     {\"bar\"},\n\t\t\t\"Networks\": {\"VM Network\"},\n\t\t\t\"NumPorts\": {\"1\"},\n\t\t\t\"WorldID\":  {\"236235\"},\n\t\t},\n\t}\n\n\tif !reflect.DeepEqual(res.Values, expect) {\n\t\tt.Errorf(\"%s != %s\", res.Values, expect)\n\t}\n}\n\nfunc TestNetworkVmPortList(t *testing.T) {\n\tr := load(\"fixtures/network_vm_port_list.xml\")\n\n\texpect := []Values{\n\t\t{\n\t\t\t\"IPAddress\":    {\"192.168.247.149\"},\n\t\t\t\"MACAddress\":   {\"00:0c:29:12:b2:cf\"},\n\t\t\t\"PortID\":       {\"33554438\"},\n\t\t\t\"Portgroup\":    {\"VM Network\"},\n\t\t\t\"TeamUplink\":   {\"vmnic0\"},\n\t\t\t\"UplinkPortID\": {\"33554434\"},\n\t\t\t\"vSwitch\":      {\"vSwitch0\"},\n\t\t\t\"DVPortID\":     {\"\"},\n\t\t},\n\t}\n\n\tif !reflect.DeepEqual(r.Values, expect) {\n\t\tt.Errorf(\"%s != %s\", r.Values, expect)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/firewall/find.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage firewall\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/govc/host/esxcli\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype find struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n\t*flags.HostSystemFlag\n\n\tenabled bool\n\tcheck   bool\n\n\ttypes.HostFirewallRule\n}\n\nfunc init() {\n\tcli.Register(\"firewall.ruleset.find\", &find{})\n}\n\nfunc (cmd *find) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.check, \"c\", true, \"Check if esx firewall is enabled\")\n\tf.BoolVar(&cmd.enabled, \"enabled\", true, \"Find enabled rule sets if true, disabled if false\")\n\tf.StringVar((*string)(&cmd.Direction), \"direction\", string(types.HostFirewallRuleDirectionOutbound), \"Direction\")\n\tf.StringVar((*string)(&cmd.PortType), \"type\", string(types.HostFirewallRulePortTypeDst), \"Port type\")\n\tf.StringVar((*string)(&cmd.Protocol), \"proto\", string(types.HostFirewallRuleProtocolTcp), \"Protocol\")\n\tf.Var(flags.NewInt32(&cmd.Port), \"port\", \"Port\")\n}\n\nfunc (cmd *find) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *find) Description() string {\n\treturn `Find firewall rulesets matching the given rule.\n\nFor a complete list of rulesets: govc host.esxcli network firewall ruleset list\nFor a complete list of rules:    govc host.esxcli network firewall ruleset rule list\n\nExamples:\n  govc firewall.ruleset.find -direction inbound -port 22\n  govc firewall.ruleset.find -direction outbound -port 2377`\n}\n\nfunc (cmd *find) Run(ctx context.Context, f *flag.FlagSet) error {\n\thost, err := cmd.HostSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfs, err := host.ConfigManager().FirewallSystem(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.check {\n\t\tesxfw, err := esxcli.GetFirewallInfo(host)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif !esxfw.Enabled {\n\t\t\tfmt.Fprintln(os.Stderr, \"host firewall is disabled\")\n\t\t}\n\t}\n\n\tinfo, err := fs.Info(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif f.NArg() != 0 {\n\t\t// TODO: f.Args() -> types.HostFirewallRulesetIpList\n\t\treturn flag.ErrHelp\n\t}\n\n\trs := object.HostFirewallRulesetList(info.Ruleset)\n\tmatched, err := rs.EnabledByRule(cmd.HostFirewallRule, cmd.enabled)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, r := range matched {\n\t\tfmt.Println(r.Key)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/info.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage host\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype info struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n\t*flags.HostSystemFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.info\", &info{})\n}\n\nfunc (cmd *info) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n}\n\nfunc (cmd *info) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar res infoResult\n\tvar props []string\n\n\tif cmd.OutputFlag.JSON {\n\t\tprops = nil // Load everything\n\t} else {\n\t\tprops = []string{\"summary\"} // Load summary\n\t}\n\n\t// We could do without the -host flag, leaving it for compat\n\thost, err := cmd.HostSystemIfSpecified()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Default only if there is a single host\n\tif host == nil && f.NArg() == 0 {\n\t\thost, err = cmd.HostSystem()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif host != nil {\n\t\tres.objects = append(res.objects, host)\n\t} else {\n\t\tres.objects, err = cmd.HostSystems(f.Args())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res.objects) != 0 {\n\t\trefs := make([]types.ManagedObjectReference, 0, len(res.objects))\n\t\tfor _, o := range res.objects {\n\t\t\trefs = append(refs, o.Reference())\n\t\t}\n\n\t\tpc := property.DefaultCollector(c)\n\t\terr = pc.Retrieve(ctx, refs, props, &res.HostSystems)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn cmd.WriteResult(&res)\n}\n\ntype infoResult struct {\n\tHostSystems []mo.HostSystem\n\tobjects     []*object.HostSystem\n}\n\nfunc (r *infoResult) Write(w io.Writer) error {\n\t// Maintain order via r.objects as Property collector does not always return results in order.\n\tobjects := make(map[types.ManagedObjectReference]mo.HostSystem, len(r.HostSystems))\n\tfor _, o := range r.HostSystems {\n\t\tobjects[o.Reference()] = o\n\t}\n\n\ttw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0)\n\n\tfor _, o := range r.objects {\n\t\thost := objects[o.Reference()]\n\t\ts := host.Summary\n\t\th := s.Hardware\n\t\tz := s.QuickStats\n\t\tncpu := int32(h.NumCpuPkgs * h.NumCpuCores)\n\t\tcpuUsage := 100 * float64(z.OverallCpuUsage) / float64(ncpu*h.CpuMhz)\n\t\tmemUsage := 100 * float64(z.OverallMemoryUsage<<20) / float64(h.MemorySize)\n\n\t\tfmt.Fprintf(tw, \"Name:\\t%s\\n\", s.Config.Name)\n\t\tfmt.Fprintf(tw, \"  Path:\\t%s\\n\", o.InventoryPath)\n\t\tfmt.Fprintf(tw, \"  Manufacturer:\\t%s\\n\", h.Vendor)\n\t\tfmt.Fprintf(tw, \"  Logical CPUs:\\t%d CPUs @ %dMHz\\n\", ncpu, h.CpuMhz)\n\t\tfmt.Fprintf(tw, \"  Processor type:\\t%s\\n\", h.CpuModel)\n\t\tfmt.Fprintf(tw, \"  CPU usage:\\t%d MHz (%.1f%%)\\n\", z.OverallCpuUsage, cpuUsage)\n\t\tfmt.Fprintf(tw, \"  Memory:\\t%dMB\\n\", h.MemorySize/(1024*1024))\n\t\tfmt.Fprintf(tw, \"  Memory usage:\\t%d MB (%.1f%%)\\n\", z.OverallMemoryUsage, memUsage)\n\t\tfmt.Fprintf(tw, \"  Boot time:\\t%s\\n\", s.Runtime.BootTime)\n\t}\n\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/maintenance/enter.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage maintenance\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype enter struct {\n\t*flags.HostSystemFlag\n\n\ttimeout  int32\n\tevacuate bool\n}\n\nfunc init() {\n\tcli.Register(\"host.maintenance.enter\", &enter{})\n}\n\nfunc (cmd *enter) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tf.Var(flags.NewInt32(&cmd.timeout), \"timeout\", \"Timeout\")\n\tf.BoolVar(&cmd.evacuate, \"evacuate\", false, \"Evacuate powered off VMs\")\n}\n\nfunc (cmd *enter) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *enter) Usage() string {\n\treturn \"HOST...\"\n}\n\nfunc (cmd *enter) Description() string {\n\treturn `Put HOST in maintenance mode.\n\nWhile this task is running and when the host is in maintenance mode,\nno VMs can be powered on and no provisioning operations can be performed on the host.`\n}\n\nfunc (cmd *enter) EnterMaintenanceMode(ctx context.Context, host *object.HostSystem) error {\n\ttask, err := host.EnterMaintenanceMode(ctx, cmd.timeout, cmd.evacuate, nil) // TODO: spec param\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger := cmd.ProgressLogger(fmt.Sprintf(\"%s entering maintenance mode... \", host.InventoryPath))\n\tdefer logger.Wait()\n\n\t_, err = task.WaitForResult(ctx, logger)\n\treturn err\n}\n\nfunc (cmd *enter) Run(ctx context.Context, f *flag.FlagSet) error {\n\thosts, err := cmd.HostSystems(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, host := range hosts {\n\t\terr = cmd.EnterMaintenanceMode(ctx, host)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/maintenance/exit.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage maintenance\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype exit struct {\n\t*flags.HostSystemFlag\n\n\ttimeout int32\n}\n\nfunc init() {\n\tcli.Register(\"host.maintenance.exit\", &exit{})\n}\n\nfunc (cmd *exit) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tf.Var(flags.NewInt32(&cmd.timeout), \"timeout\", \"Timeout\")\n}\n\nfunc (cmd *exit) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *exit) Usage() string {\n\treturn \"HOST...\"\n}\n\nfunc (cmd *exit) Description() string {\n\treturn `Take HOST out of maintenance mode.\n\nThis blocks if any concurrent running maintenance-only host configurations operations are being performed.\nFor example, if VMFS volumes are being upgraded.\n\nThe 'timeout' flag is the number of seconds to wait for the exit maintenance mode to succeed.\nIf the timeout is less than or equal to zero, there is no timeout.`\n}\n\nfunc (cmd *exit) ExitMaintenanceMode(ctx context.Context, host *object.HostSystem) error {\n\ttask, err := host.ExitMaintenanceMode(ctx, cmd.timeout)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger := cmd.ProgressLogger(fmt.Sprintf(\"%s exiting maintenance mode... \", host.InventoryPath))\n\tdefer logger.Wait()\n\n\t_, err = task.WaitForResult(ctx, logger)\n\treturn err\n}\n\nfunc (cmd *exit) Run(ctx context.Context, f *flag.FlagSet) error {\n\thosts, err := cmd.HostSystems(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, host := range hosts {\n\t\terr = cmd.ExitMaintenanceMode(ctx, host)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/option/ls.go",
    "content": "/*\nCopyright (c) 2016-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage option\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/govc/option\"\n)\n\ntype ls struct {\n\t*option.List\n\t*flags.HostSystemFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.option.ls\", &ls{})\n}\n\nfunc (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.List = &option.List{}\n\tcmd.List.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.List.ClientFlag.Register(ctx, f)\n\n\tcmd.List.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.List.OutputFlag.Register(ctx, f)\n\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n}\n\nfunc (cmd *ls) Process(ctx context.Context) error {\n\tif err := cmd.List.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *ls) Description() string {\n\treturn option.ListDescription + `\n\nExamples:\n  govc host.option.ls\n  govc host.option.ls Config.HostAgent.\n  govc host.option.ls Config.HostAgent.plugins.solo.enableMob`\n}\n\nfunc (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error {\n\thost, err := cmd.HostSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := host.ConfigManager().OptionManager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn cmd.Query(ctx, f, m)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/option/set.go",
    "content": "/*\nCopyright (c) 2016-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage option\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/govc/option\"\n)\n\ntype set struct {\n\t*option.Set\n\t*flags.HostSystemFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.option.set\", &set{})\n}\n\nfunc (cmd *set) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.Set = &option.Set{}\n\tcmd.Set.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.Set.ClientFlag.Register(ctx, f)\n\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n}\n\nfunc (cmd *set) Process(ctx context.Context) error {\n\tif err := cmd.Set.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *set) Description() string {\n\treturn option.SetDescription + `\n\nExamples:\n  govc host.option.set Config.HostAgent.plugins.solo.enableMob true\n  govc host.option.set Config.HostAgent.log.level verbose`\n}\n\nfunc (cmd *set) Run(ctx context.Context, f *flag.FlagSet) error {\n\thost, err := cmd.HostSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := host.ConfigManager().OptionManager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn cmd.Update(ctx, f, m)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/portgroup/add.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage portgroup\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype add struct {\n\t*flags.HostSystemFlag\n\n\tspec types.HostPortGroupSpec\n}\n\nfunc init() {\n\tcli.Register(\"host.portgroup.add\", &add{})\n}\n\nfunc (cmd *add) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.spec.VswitchName, \"vswitch\", \"\", \"vSwitch Name\")\n\tf.Var(flags.NewInt32(&cmd.spec.VlanId), \"vlan\", \"VLAN ID\")\n}\n\nfunc (cmd *add) Description() string {\n\treturn `Add portgroup to HOST.\n\nExamples:\n  govc host.portgroup.add -vswitch vSwitch0 -vlan 3201 bridge`\n}\n\nfunc (cmd *add) Usage() string {\n\treturn \"NAME\"\n}\n\nfunc (cmd *add) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error {\n\tns, err := cmd.HostNetworkSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd.spec.Name = f.Arg(0)\n\n\treturn ns.AddPortGroup(ctx, cmd.spec)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/portgroup/change.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage portgroup\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype change struct {\n\t*flags.ClientFlag\n\t*flags.HostSystemFlag\n\n\ttypes.HostPortGroupSpec\n\ttypes.HostNetworkSecurityPolicy\n}\n\nfunc init() {\n\tcli.Register(\"host.portgroup.change\", &change{})\n}\n\nfunc (cmd *change) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tcmd.VlanId = -1\n\tf.Var(flags.NewInt32(&cmd.VlanId), \"vlan-id\", \"VLAN ID\")\n\tf.StringVar(&cmd.Name, \"name\", \"\", \"Portgroup name\")\n\tf.StringVar(&cmd.VswitchName, \"vswitch-name\", \"\", \"vSwitch name\")\n\n\tf.Var(flags.NewOptionalBool(&cmd.AllowPromiscuous), \"allow-promiscuous\", \"Allow promiscuous mode\")\n\tf.Var(flags.NewOptionalBool(&cmd.ForgedTransmits), \"forged-transmits\", \"Allow forged transmits\")\n\tf.Var(flags.NewOptionalBool(&cmd.MacChanges), \"mac-changes\", \"Allow MAC changes\")\n}\n\nfunc (cmd *change) Description() string {\n\treturn `Change configuration of HOST portgroup NAME.\n\nExamples:\n  govc host.portgroup.change -allow-promiscuous -forged-transmits -mac-changes \"VM Network\"\n  govc host.portgroup.change -vswitch-name vSwitch1 \"Management Network\"`\n}\n\nfunc (cmd *change) Usage() string {\n\treturn \"NAME\"\n}\n\nfunc (cmd *change) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *change) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tpg, err := networkInfoPortgroup(ctx, cmd.ClientFlag, cmd.HostSystemFlag)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tns, err := cmd.HostNetworkSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tname := f.Arg(0)\n\tvar current *types.HostPortGroupSpec\n\n\tfor _, g := range pg {\n\t\tif g.Spec.Name == name {\n\t\t\tcurrent = &g.Spec\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif current != nil {\n\t\tif cmd.Name == \"\" {\n\t\t\tcmd.Name = current.Name\n\t\t}\n\t\tif cmd.VswitchName == \"\" {\n\t\t\tcmd.VswitchName = current.VswitchName\n\t\t}\n\t\tif cmd.VlanId < 0 {\n\t\t\tcmd.VlanId = current.VlanId\n\t\t}\n\t}\n\n\tcmd.HostPortGroupSpec.Policy.Security = &cmd.HostNetworkSecurityPolicy\n\n\treturn ns.UpdatePortGroup(ctx, name, cmd.HostPortGroupSpec)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/portgroup/info.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage portgroup\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/govc/host/vswitch\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype info struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n\t*flags.HostSystemFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.portgroup.info\", &info{})\n}\n\nfunc (cmd *info) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n}\n\nfunc (cmd *info) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc networkInfoPortgroup(ctx context.Context, c *flags.ClientFlag, h *flags.HostSystemFlag) ([]types.HostPortGroup, error) {\n\tclient, err := c.Client()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tns, err := h.HostNetworkSystem()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar mns mo.HostNetworkSystem\n\n\tpc := property.DefaultCollector(client)\n\terr = pc.RetrieveOne(ctx, ns.Reference(), []string{\"networkInfo.portgroup\"}, &mns)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn mns.NetworkInfo.Portgroup, nil\n}\n\nfunc (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error {\n\tpg, err := networkInfoPortgroup(ctx, cmd.ClientFlag, cmd.HostSystemFlag)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr := &infoResult{pg}\n\n\treturn cmd.WriteResult(r)\n}\n\ntype infoResult struct {\n\tPortgroup []types.HostPortGroup\n}\n\nfunc (r *infoResult) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\n\tfor i, s := range r.Portgroup {\n\t\tif i > 0 {\n\t\t\tfmt.Fprintln(tw)\n\t\t}\n\t\tfmt.Fprintf(tw, \"Name:\\t%s\\n\", s.Spec.Name)\n\t\tfmt.Fprintf(tw, \"Virtual switch:\\t%s\\n\", s.Spec.VswitchName)\n\t\tfmt.Fprintf(tw, \"VLAN ID:\\t%d\\n\", s.Spec.VlanId)\n\t\tfmt.Fprintf(tw, \"Active ports:\\t%d\\n\", len(s.Port))\n\t\tvswitch.HostNetworkPolicy(tw, &s.ComputedPolicy)\n\t}\n\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/portgroup/remove.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage portgroup\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype remove struct {\n\t*flags.HostSystemFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.portgroup.remove\", &remove{})\n}\n\nfunc (cmd *remove) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n}\n\nfunc (cmd *remove) Description() string {\n\treturn `Remove portgroup from HOST.\n\nExamples:\n  govc host.portgroup.remove bridge`\n}\n\nfunc (cmd *remove) Usage() string {\n\treturn \"NAME\"\n}\n\nfunc (cmd *remove) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *remove) Run(ctx context.Context, f *flag.FlagSet) error {\n\tns, err := cmd.HostNetworkSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ns.RemovePortGroup(ctx, f.Arg(0))\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/reconnect.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage host\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype reconnect struct {\n\t*flags.HostSystemFlag\n\t*flags.HostConnectFlag\n\n\ttypes.HostSystemReconnectSpec\n}\n\nfunc init() {\n\tcli.Register(\"host.reconnect\", &reconnect{})\n}\n\nfunc (cmd *reconnect) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tcmd.HostConnectFlag, ctx = flags.NewHostConnectFlag(ctx)\n\tcmd.HostConnectFlag.Register(ctx, f)\n\n\tcmd.HostSystemReconnectSpec.SyncState = types.NewBool(false)\n\tf.BoolVar(cmd.HostSystemReconnectSpec.SyncState, \"sync-state\", false, \"Sync state\")\n}\n\nfunc (cmd *reconnect) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostConnectFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *reconnect) Description() string {\n\treturn `Reconnect HOST to vCenter.\n\nThis command can also be used to change connection properties (hostname, fingerprint, username, password),\nwithout disconnecting the host.`\n}\n\nfunc (cmd *reconnect) Reconnect(ctx context.Context, host *object.HostSystem) error {\n\ttask, err := host.Reconnect(ctx, &cmd.HostConnectSpec, &cmd.HostSystemReconnectSpec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger := cmd.ProgressLogger(fmt.Sprintf(\"%s reconnecting... \", host.InventoryPath))\n\tdefer logger.Wait()\n\n\t_, err = task.WaitForResult(ctx, logger)\n\treturn err\n}\n\nfunc (cmd *reconnect) Run(ctx context.Context, f *flag.FlagSet) error {\n\thosts, err := cmd.HostSystems(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, host := range hosts {\n\t\terr = cmd.Reconnect(ctx, host)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/remove.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage host\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n)\n\ntype remove struct {\n\t*flags.HostSystemFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.remove\", &remove{})\n}\n\nfunc (cmd *remove) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n}\n\nfunc (cmd *remove) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *remove) Usage() string {\n\treturn \"HOST...\"\n}\n\nfunc (cmd *remove) Description() string {\n\treturn `Remove HOST from vCenter.`\n}\n\nfunc (cmd *remove) Remove(ctx context.Context, host *object.HostSystem) error {\n\tvar h mo.HostSystem\n\terr := host.Properties(ctx, host.Reference(), []string{\"parent\"}, &h)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tremove := host.Destroy\n\n\tif h.Parent.Type == \"ComputeResource\" {\n\t\t// Standalone host.  From the docs:\n\t\t// \"Invoking remove on a HostSystem of standalone type throws a NotSupported fault.\n\t\t//  A standalone HostSystem can be removeed only by invoking remove on its parent ComputeResource.\"\n\t\tremove = object.NewComputeResource(host.Client(), *h.Parent).Destroy\n\t}\n\n\ttask, err := remove(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger := cmd.ProgressLogger(fmt.Sprintf(\"%s removing... \", host.InventoryPath))\n\tdefer logger.Wait()\n\n\t_, err = task.WaitForResult(ctx, logger)\n\treturn err\n}\n\nfunc (cmd *remove) Run(ctx context.Context, f *flag.FlagSet) error {\n\thosts, err := cmd.HostSystems(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, host := range hosts {\n\t\terr = cmd.Remove(ctx, host)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/service/command.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage service\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype service struct {\n\t*flags.ClientFlag\n\t*flags.HostSystemFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.service\", &service{})\n}\n\nfunc (cmd *service) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n}\n\nfunc (cmd *service) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *service) Usage() string {\n\treturn \"ACTION ID\"\n}\n\nfunc (cmd *service) Description() string {\n\treturn `Apply host service ACTION to service ID.\n\nWhere ACTION is one of: start, stop, restart, status, enable, disable\n\nExamples:\n  govc host.service enable TSM-SSH\n  govc host.service start TSM-SSH`\n}\n\nfunc (cmd *service) status(ctx context.Context, s *object.HostServiceSystem, id string) (string, error) {\n\tservices, err := s.Service(ctx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, service := range services {\n\t\tif id == service.Key {\n\t\t\treturn Status(service), nil\n\t\t}\n\t}\n\n\treturn \"N/A\", nil\n}\n\nfunc (cmd *service) Run(ctx context.Context, f *flag.FlagSet) error {\n\thost, err := cmd.HostSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif f.NArg() != 2 {\n\t\treturn flag.ErrHelp\n\t}\n\n\targ := f.Arg(0)\n\tid := f.Arg(1)\n\n\ts, err := host.ConfigManager().ServiceSystem(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch arg {\n\tcase \"start\":\n\t\treturn s.Start(ctx, id)\n\tcase \"stop\":\n\t\treturn s.Stop(ctx, id)\n\tcase \"restart\":\n\t\treturn s.Restart(ctx, id)\n\tcase \"status\":\n\t\tss, err := cmd.status(ctx, s, id)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(ss)\n\t\treturn nil\n\tcase \"enable\":\n\t\treturn s.UpdatePolicy(ctx, id, string(types.HostServicePolicyOn))\n\tcase \"disable\":\n\t\treturn s.UpdatePolicy(ctx, id, string(types.HostServicePolicyOff))\n\t}\n\n\treturn flag.ErrHelp\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/service/ls.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage service\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype ls struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n\t*flags.HostSystemFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.service.ls\", &ls{})\n}\n\nfunc (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n}\n\nfunc (cmd *ls) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *ls) Description() string {\n\treturn `List HOST services.`\n}\n\nfunc Status(s types.HostService) string {\n\tif s.Running {\n\t\treturn \"Running\"\n\t}\n\treturn \"Stopped\"\n}\n\nfunc Policy(s types.HostService) string {\n\tswitch types.HostServicePolicy(s.Policy) {\n\tcase types.HostServicePolicyOff:\n\t\treturn \"Disabled\"\n\tcase types.HostServicePolicyOn:\n\t\treturn \"Enabled\"\n\tcase types.HostServicePolicyAutomatic:\n\t\treturn \"Automatic\"\n\tdefault:\n\t\treturn s.Policy\n\t}\n}\n\nfunc (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error {\n\thost, err := cmd.HostSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts, err := host.ConfigManager().ServiceSystem(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tservices, err := s.Service(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn cmd.WriteResult(optionResult(services))\n}\n\ntype optionResult []types.HostService\n\nfunc (services optionResult) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\n\tfmt.Fprintf(tw, \"%s\\t%s\\t%v\\t%s\\n\", \"Key\", \"Policy\", \"Status\", \"Label\")\n\n\tfor _, s := range services {\n\t\tfmt.Fprintf(tw, \"%s\\t%s\\t%s\\t%s\\n\", s.Key, s.Policy, Status(s), s.Label)\n\t}\n\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/shutdown.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage host\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype shutdown struct {\n\t*flags.HostSystemFlag\n\tforce bool\n}\n\nfunc init() {\n\tcli.Register(\"host.shutdown\", &shutdown{})\n}\n\nfunc (cmd *shutdown) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.force, \"f\", false, \"Force shutdown when host is not in maintenance mode\")\n}\n\nfunc (cmd *shutdown) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *shutdown) Description() string {\n\treturn `Shutdown HOST.`\n}\n\nfunc (cmd *shutdown) Shutdown(ctx context.Context, host *object.HostSystem) error {\n\treq := types.ShutdownHost_Task{\n\t\tThis:  host.Reference(),\n\t\tForce: cmd.force,\n\t}\n\n\tres, err := methods.ShutdownHost_Task(ctx, host.Client(), &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttask := object.NewTask(host.Client(), res.Returnval)\n\n\tlogger := cmd.ProgressLogger(fmt.Sprintf(\"%s shutdown... \", host.InventoryPath))\n\tdefer logger.Wait()\n\n\t_, err = task.WaitForResult(ctx, logger)\n\treturn err\n}\n\nfunc (cmd *shutdown) Run(ctx context.Context, f *flag.FlagSet) error {\n\thosts, err := cmd.HostSystems(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, host := range hosts {\n\t\terr = cmd.Shutdown(ctx, host)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/storage/info.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage storage\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/units\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\nvar infoTypes = []string{\"hba\", \"lun\"}\n\ntype infoType string\n\nfunc (t *infoType) Set(s string) error {\n\ts = strings.ToLower(s)\n\n\tfor _, e := range infoTypes {\n\t\tif s == e {\n\t\t\t*t = infoType(s)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn fmt.Errorf(\"invalid type\")\n}\n\nfunc (t *infoType) String() string {\n\treturn string(*t)\n}\n\nfunc (t *infoType) Result(hss mo.HostStorageSystem) flags.OutputWriter {\n\tswitch string(*t) {\n\tcase \"hba\":\n\t\treturn hbaResult(hss)\n\tcase \"lun\":\n\t\treturn lunResult(hss)\n\tdefault:\n\t\tpanic(\"unsupported\")\n\t}\n}\n\ntype info struct {\n\t*flags.HostSystemFlag\n\t*flags.OutputFlag\n\n\ttyp       infoType\n\trescan    bool\n\tunclaimed bool\n}\n\nfunc init() {\n\tcli.Register(\"host.storage.info\", &info{})\n}\n\nfunc (cmd *info) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\terr := cmd.typ.Set(\"lun\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tf.Var(&cmd.typ, \"t\", fmt.Sprintf(\"Type (%s)\", strings.Join(infoTypes, \",\")))\n\n\tf.BoolVar(&cmd.rescan, \"rescan\", false, \"Rescan for new storage devices\")\n\tf.BoolVar(&cmd.unclaimed, \"unclaimed\", false, \"Only show disks that can be used as new VMFS datastores\")\n}\n\nfunc (cmd *info) Description() string {\n\treturn `Show HOST storage system information.\n\nExamples:\n  govc ls -t HostSystem host/* | xargs -n1 govc host.storage.info -unclaimed -host`\n}\n\nfunc (cmd *info) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error {\n\thost, err := cmd.HostSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tss, err := host.ConfigManager().StorageSystem(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.rescan {\n\t\terr = ss.RescanAllHba(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar hss mo.HostStorageSystem\n\terr = ss.Properties(ctx, ss.Reference(), nil, &hss)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tif cmd.unclaimed {\n\t\tds, err := host.ConfigManager().DatastoreSystem(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdisks, err := ds.QueryAvailableDisksForVmfs(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar luns []types.BaseScsiLun\n\t\tfor i := range disks {\n\t\t\tluns = append(luns, &disks[i])\n\t\t}\n\t\thss.StorageDeviceInfo.ScsiLun = luns\n\t}\n\n\treturn cmd.WriteResult(cmd.typ.Result(hss))\n}\n\ntype hbaResult mo.HostStorageSystem\n\nfunc (r hbaResult) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\n\tfmt.Fprintf(tw, \"Device\\t\")\n\tfmt.Fprintf(tw, \"PCI\\t\")\n\tfmt.Fprintf(tw, \"Driver\\t\")\n\tfmt.Fprintf(tw, \"Status\\t\")\n\tfmt.Fprintf(tw, \"Model\\t\")\n\tfmt.Fprintf(tw, \"\\n\")\n\n\tfor _, e := range r.StorageDeviceInfo.HostBusAdapter {\n\t\thba := e.GetHostHostBusAdapter()\n\n\t\tfmt.Fprintf(tw, \"%s\\t\", hba.Device)\n\t\tfmt.Fprintf(tw, \"%s\\t\", hba.Pci)\n\t\tfmt.Fprintf(tw, \"%s\\t\", hba.Driver)\n\t\tfmt.Fprintf(tw, \"%s\\t\", hba.Status)\n\t\tfmt.Fprintf(tw, \"%s\\t\", hba.Model)\n\t\tfmt.Fprintf(tw, \"\\n\")\n\t}\n\n\treturn tw.Flush()\n}\n\ntype lunResult mo.HostStorageSystem\n\nfunc (r lunResult) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\n\tfmt.Fprintf(tw, \"Name\\t\")\n\tfmt.Fprintf(tw, \"Type\\t\")\n\tfmt.Fprintf(tw, \"Capacity\\t\")\n\tfmt.Fprintf(tw, \"Model\\t\")\n\tfmt.Fprintf(tw, \"\\n\")\n\n\tfor _, e := range r.StorageDeviceInfo.ScsiLun {\n\t\tvar tags []string\n\t\tvar capacity int64\n\n\t\tlun := e.GetScsiLun()\n\t\tif disk, ok := e.(*types.HostScsiDisk); ok {\n\t\t\tcapacity = int64(disk.Capacity.Block) * int64(disk.Capacity.BlockSize)\n\t\t\tif disk.LocalDisk != nil && *disk.LocalDisk {\n\t\t\t\ttags = append(tags, \"local\")\n\t\t\t}\n\t\t\tif disk.Ssd != nil && *disk.Ssd {\n\t\t\t\ttags = append(tags, \"ssd\")\n\t\t\t}\n\t\t}\n\n\t\tfmt.Fprintf(tw, \"%s\\t\", lun.DeviceName)\n\t\tfmt.Fprintf(tw, \"%s\\t\", lun.DeviceType)\n\n\t\tif capacity == 0 {\n\t\t\tfmt.Fprintf(tw, \"-\\t\")\n\t\t} else {\n\t\t\tfmt.Fprintf(tw, \"%s\\t\", units.ByteSize(capacity))\n\t\t}\n\n\t\tfmt.Fprintf(tw, \"%s\", lun.Model)\n\t\tif len(tags) > 0 {\n\t\t\tfmt.Fprintf(tw, \" (%s)\", strings.Join(tags, \",\"))\n\t\t}\n\t\tfmt.Fprintf(tw, \"\\n\")\n\t}\n\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/storage/mark.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage storage\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype mark struct {\n\t*flags.HostSystemFlag\n\n\tssd   *bool\n\tlocal *bool\n}\n\nfunc init() {\n\tcli.Register(\"host.storage.mark\", &mark{})\n}\n\nfunc (cmd *mark) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tf.Var(flags.NewOptionalBool(&cmd.ssd), \"ssd\", \"Mark as SSD\")\n\tf.Var(flags.NewOptionalBool(&cmd.local), \"local\", \"Mark as local\")\n}\n\nfunc (cmd *mark) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *mark) Usage() string {\n\treturn \"DEVICE_PATH\"\n}\n\nfunc (cmd *mark) Description() string {\n\treturn `Mark device at DEVICE_PATH.`\n}\n\nfunc (cmd *mark) Mark(ctx context.Context, ss *object.HostStorageSystem, uuid string) error {\n\tvar err error\n\tvar task *object.Task\n\n\tif cmd.ssd != nil {\n\t\tif *cmd.ssd {\n\t\t\ttask, err = ss.MarkAsSsd(ctx, uuid)\n\t\t} else {\n\t\t\ttask, err = ss.MarkAsNonSsd(ctx, uuid)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = task.Wait(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif cmd.local != nil {\n\t\tif *cmd.local {\n\t\t\ttask, err = ss.MarkAsLocal(ctx, uuid)\n\t\t} else {\n\t\t\ttask, err = ss.MarkAsNonLocal(ctx, uuid)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = task.Wait(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *mark) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 1 {\n\t\treturn fmt.Errorf(\"specify device path\")\n\t}\n\n\tpath := f.Args()[0]\n\n\thost, err := cmd.HostSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tss, err := host.ConfigManager().StorageSystem(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar hss mo.HostStorageSystem\n\terr = ss.Properties(ctx, ss.Reference(), nil, &hss)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tfor _, e := range hss.StorageDeviceInfo.ScsiLun {\n\t\tdisk, ok := e.(*types.HostScsiDisk)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tif disk.DevicePath == path {\n\t\t\treturn cmd.Mark(ctx, ss, disk.Uuid)\n\t\t}\n\t}\n\n\treturn fmt.Errorf(\"%s not found\", path)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/storage/partition.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage storage\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/units\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype partition struct {\n\t*flags.HostSystemFlag\n\t*flags.OutputFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.storage.partition\", &partition{})\n}\n\nfunc (cmd *partition) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n}\n\nfunc (cmd *partition) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *partition) Usage() string {\n\treturn \"DEVICE_PATH\"\n}\n\nfunc (cmd *partition) Description() string {\n\treturn `Show partition table for device at DEVICE_PATH.`\n}\n\nfunc (cmd *partition) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 1 {\n\t\treturn fmt.Errorf(\"specify device path\")\n\t}\n\n\tpath := f.Args()[0]\n\n\thost, err := cmd.HostSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tss, err := host.ConfigManager().StorageSystem(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar hss mo.HostStorageSystem\n\terr = ss.Properties(ctx, ss.Reference(), nil, &hss)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tinfo, err := ss.RetrieveDiskPartitionInfo(ctx, path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn cmd.WriteResult(partitionInfo(*info))\n}\n\ntype partitionInfo types.HostDiskPartitionInfo\n\nfunc (p partitionInfo) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\n\tfmt.Fprintf(tw, \"Table format: %s\\n\", p.Spec.PartitionFormat)\n\tfmt.Fprintf(tw, \"Number of sectors: %d\\n\", p.Spec.TotalSectors)\n\tfmt.Fprintf(tw, \"\\n\")\n\n\tfmt.Fprintf(tw, \"Number\\t\")\n\tfmt.Fprintf(tw, \"Start\\t\")\n\tfmt.Fprintf(tw, \"End\\t\")\n\tfmt.Fprintf(tw, \"Size\\t\")\n\tfmt.Fprintf(tw, \"Type\\t\")\n\tfmt.Fprintf(tw, \"\\n\")\n\n\tfor _, e := range p.Spec.Partition {\n\t\tsectors := e.EndSector - e.StartSector\n\n\t\tfmt.Fprintf(tw, \"%d\\t\", e.Partition)\n\t\tfmt.Fprintf(tw, \"%d\\t\", e.StartSector)\n\t\tfmt.Fprintf(tw, \"%d\\t\", e.EndSector)\n\t\tfmt.Fprintf(tw, \"%s\\t\", units.ByteSize(sectors*512))\n\t\tfmt.Fprintf(tw, \"%s\\t\", e.Type)\n\t\tfmt.Fprintf(tw, \"\\n\")\n\t}\n\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/vnic/info.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vnic\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype info struct {\n\t*flags.HostSystemFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.vnic.info\", &info{})\n}\n\nfunc (cmd *info) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n}\n\nfunc (cmd *info) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error {\n\thost, err := cmd.HostSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tns, err := cmd.HostNetworkSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar mns mo.HostNetworkSystem\n\n\tm, err := host.ConfigManager().VirtualNicManager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinfo, err := m.Info(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ns.Properties(ctx, ns.Reference(), []string{\"networkInfo\"}, &mns)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0)\n\n\ttype dnet struct {\n\t\tdvp mo.DistributedVirtualPortgroup\n\t\tdvs mo.VmwareDistributedVirtualSwitch\n\t}\n\n\tdnets := make(map[string]*dnet)\n\n\tfor _, nic := range mns.NetworkInfo.Vnic {\n\t\tfmt.Fprintf(tw, \"Device:\\t%s\\n\", nic.Device)\n\n\t\tif dvp := nic.Spec.DistributedVirtualPort; dvp != nil {\n\t\t\tdn, ok := dnets[dvp.PortgroupKey]\n\n\t\t\tif !ok {\n\t\t\t\tdn = new(dnet)\n\t\t\t\to := object.NewDistributedVirtualPortgroup(host.Client(), types.ManagedObjectReference{\n\t\t\t\t\tType:  \"DistributedVirtualPortgroup\",\n\t\t\t\t\tValue: dvp.PortgroupKey,\n\t\t\t\t})\n\n\t\t\t\terr = o.Properties(ctx, o.Reference(), []string{\"name\", \"config.distributedVirtualSwitch\"}, &dn.dvp)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\terr = o.Properties(ctx, *dn.dvp.Config.DistributedVirtualSwitch, []string{\"name\"}, &dn.dvs)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tdnets[dvp.PortgroupKey] = dn\n\t\t\t}\n\n\t\t\tfmt.Fprintf(tw, \"Network label:\\t%s\\n\", dn.dvp.Name)\n\t\t\tfmt.Fprintf(tw, \"Switch:\\t%s\\n\", dn.dvs.Name)\n\t\t} else {\n\t\t\tfmt.Fprintf(tw, \"Network label:\\t%s\\n\", nic.Portgroup)\n\t\t\tfor _, pg := range mns.NetworkInfo.Portgroup {\n\t\t\t\tif pg.Spec.Name == nic.Portgroup {\n\t\t\t\t\tfmt.Fprintf(tw, \"Switch:\\t%s\\n\", pg.Spec.VswitchName)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfmt.Fprintf(tw, \"IP address:\\t%s\\n\", nic.Spec.Ip.IpAddress)\n\t\tfmt.Fprintf(tw, \"TCP/IP stack:\\t%s\\n\", nic.Spec.NetStackInstanceKey)\n\n\t\tvar services []string\n\t\tfor _, nc := range info.NetConfig {\n\t\t\tfor _, dev := range nc.SelectedVnic {\n\t\t\t\tkey := nc.NicType + \".\" + nic.Key\n\t\t\t\tif dev == key {\n\t\t\t\t\tservices = append(services, nc.NicType)\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t\tfmt.Fprintf(tw, \"Enabled services:\\t%s\\n\", strings.Join(services, \", \"))\n\t}\n\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/vnic/service.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vnic\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype service struct {\n\t*flags.HostSystemFlag\n\n\tEnable bool\n}\n\nfunc init() {\n\tcli.Register(\"host.vnic.service\", &service{})\n}\n\nfunc (cmd *service) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.Enable, \"enable\", true, \"Enable service\")\n}\n\nfunc (cmd *service) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *service) Usage() string {\n\treturn \"SERVICE DEVICE\"\n}\n\nfunc (cmd *service) Description() string {\n\tnicTypes := []string{\n\t\tstring(types.HostVirtualNicManagerNicTypeVmotion),\n\t\tstring(types.HostVirtualNicManagerNicTypeFaultToleranceLogging),\n\t\tstring(types.HostVirtualNicManagerNicTypeVSphereReplication),\n\t\tstring(types.HostVirtualNicManagerNicTypeVSphereReplicationNFC),\n\t\tstring(types.HostVirtualNicManagerNicTypeManagement),\n\t\tstring(types.HostVirtualNicManagerNicTypeVsan),\n\t\tstring(types.HostVirtualNicManagerNicTypeVSphereProvisioning),\n\t}\n\n\treturn fmt.Sprintf(`\nEnable or disable service on a virtual nic device.\n\nWhere SERVICE is one of: %s\nWhere DEVICE is one of: %s\n\nExamples:\n  govc host.vnic.service -host hostname -enable vsan vmk0\n  govc host.vnic.service -host hostname -enable=false vmotion vmk1`,\n\t\tstrings.Join(nicTypes, \"|\"),\n\t\tstrings.Join([]string{\"vmk0\", \"vmk1\", \"...\"}, \"|\"))\n}\n\nfunc (cmd *service) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 2 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tservice := f.Arg(0)\n\tdevice := f.Arg(1)\n\n\thost, err := cmd.HostSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := host.ConfigManager().VirtualNicManager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar method func(context.Context, string, string) error\n\n\tif cmd.Enable {\n\t\tmethod = m.SelectVnic\n\t} else {\n\t\tmethod = m.DeselectVnic\n\t}\n\n\tif method == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\treturn method(ctx, service, device)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/vswitch/add.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vswitch\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype add struct {\n\t*flags.HostSystemFlag\n\n\tnic  string\n\tspec types.HostVirtualSwitchSpec\n}\n\nfunc init() {\n\tcli.Register(\"host.vswitch.add\", &add{})\n}\n\nfunc (cmd *add) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tcmd.spec.NumPorts = 128 // default\n\tf.Var(flags.NewInt32(&cmd.spec.NumPorts), \"ports\", \"Number of ports\")\n\tf.Var(flags.NewInt32(&cmd.spec.Mtu), \"mtu\", \"MTU\")\n\tf.StringVar(&cmd.nic, \"nic\", \"\", \"Bridge nic device\")\n}\n\nfunc (cmd *add) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *add) Usage() string {\n\treturn \"NAME\"\n}\n\nfunc (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error {\n\tns, err := cmd.HostNetworkSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.nic != \"\" {\n\t\tcmd.spec.Bridge = &types.HostVirtualSwitchBondBridge{\n\t\t\tNicDevice: []string{cmd.nic},\n\t\t}\n\t}\n\n\treturn ns.AddVirtualSwitch(ctx, f.Arg(0), &cmd.spec)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/vswitch/info.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vswitch\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype info struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n\t*flags.HostSystemFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.vswitch.info\", &info{})\n}\n\nfunc (cmd *info) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n}\n\nfunc (cmd *info) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error {\n\tclient, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tns, err := cmd.HostNetworkSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar mns mo.HostNetworkSystem\n\n\tpc := property.DefaultCollector(client)\n\terr = pc.RetrieveOne(ctx, ns.Reference(), []string{\"networkInfo.vswitch\"}, &mns)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr := &infoResult{mns.NetworkInfo.Vswitch}\n\n\treturn cmd.WriteResult(r)\n}\n\ntype infoResult struct {\n\tVswitch []types.HostVirtualSwitch\n}\n\nfunc (r *infoResult) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\n\tfor i, s := range r.Vswitch {\n\t\tif i > 0 {\n\t\t\tfmt.Fprintln(tw)\n\t\t}\n\t\tfmt.Fprintf(tw, \"Name:\\t%s\\n\", s.Name)\n\t\tfmt.Fprintf(tw, \"Portgroup:\\t%s\\n\", keys(\"key-vim.host.PortGroup-\", s.Portgroup))\n\t\tfmt.Fprintf(tw, \"Pnic:\\t%s\\n\", keys(\"key-vim.host.PhysicalNic-\", s.Pnic))\n\t\tfmt.Fprintf(tw, \"MTU:\\t%d\\n\", s.Mtu)\n\t\tfmt.Fprintf(tw, \"Ports:\\t%d\\n\", s.NumPorts)\n\t\tfmt.Fprintf(tw, \"Ports Available:\\t%d\\n\", s.NumPortsAvailable)\n\t\tHostNetworkPolicy(tw, s.Spec.Policy)\n\t}\n\n\treturn tw.Flush()\n}\n\nfunc keys(key string, vals []string) string {\n\tfor i, val := range vals {\n\t\tvals[i] = strings.TrimPrefix(val, key)\n\t}\n\treturn strings.Join(vals, \", \")\n}\n\nfunc enabled(b *bool) string {\n\tif b != nil && *b {\n\t\treturn \"Yes\"\n\t}\n\treturn \"No\"\n}\n\nfunc HostNetworkPolicy(w io.Writer, p *types.HostNetworkPolicy) {\n\tif p == nil || p.Security == nil {\n\t\treturn // e.g. Workstation\n\t}\n\tfmt.Fprintf(w, \"Allow promiscuous mode:\\t%s\\n\", enabled(p.Security.AllowPromiscuous))\n\tfmt.Fprintf(w, \"Allow forged transmits:\\t%s\\n\", enabled(p.Security.ForgedTransmits))\n\tfmt.Fprintf(w, \"Allow MAC changes:\\t%s\\n\", enabled(p.Security.MacChanges))\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/host/vswitch/remove.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vswitch\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype remove struct {\n\t*flags.HostSystemFlag\n}\n\nfunc init() {\n\tcli.Register(\"host.vswitch.remove\", &remove{})\n}\n\nfunc (cmd *remove) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n}\n\nfunc (cmd *remove) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *remove) Usage() string {\n\treturn \"NAME\"\n}\n\nfunc (cmd *remove) Run(ctx context.Context, f *flag.FlagSet) error {\n\tns, err := cmd.HostNetworkSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ns.RemoveVirtualSwitch(ctx, f.Arg(0))\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/importx/archive.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage importx\n\nimport (\n\t\"archive/tar\"\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n\n\t\"github.com/vmware/govmomi/ovf\"\n)\n\n// ArchiveFlag doesn't register any flags;\n// only encapsulates some common archive related functionality.\ntype ArchiveFlag struct {\n\tArchive\n}\n\nfunc newArchiveFlag(ctx context.Context) (*ArchiveFlag, context.Context) {\n\treturn &ArchiveFlag{}, ctx\n}\n\nfunc (f *ArchiveFlag) Register(ctx context.Context, fs *flag.FlagSet) {\n}\n\nfunc (f *ArchiveFlag) Process(ctx context.Context) error {\n\treturn nil\n}\n\nfunc (f *ArchiveFlag) ReadOvf(fpath string) ([]byte, error) {\n\tr, _, err := f.Archive.Open(fpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Close()\n\n\treturn ioutil.ReadAll(r)\n}\n\nfunc (f *ArchiveFlag) ReadEnvelope(fpath string) (*ovf.Envelope, error) {\n\tif fpath == \"\" {\n\t\treturn &ovf.Envelope{}, nil\n\t}\n\n\tr, _, err := f.Open(fpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Close()\n\n\te, err := ovf.Unmarshal(r)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse ovf: %s\", err.Error())\n\t}\n\n\treturn e, nil\n}\n\ntype Archive interface {\n\tOpen(string) (io.ReadCloser, int64, error)\n}\n\ntype TapeArchive struct {\n\tpath string\n}\n\ntype TapeArchiveEntry struct {\n\tio.Reader\n\tf *os.File\n}\n\nfunc (t *TapeArchiveEntry) Close() error {\n\treturn t.f.Close()\n}\n\nfunc (t *TapeArchive) Open(name string) (io.ReadCloser, int64, error) {\n\tf, err := os.Open(t.path)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tr := tar.NewReader(f)\n\n\tfor {\n\t\th, err := r.Next()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\n\t\tmatched, err := path.Match(name, path.Base(h.Name))\n\t\tif err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\n\t\tif matched {\n\t\t\treturn &TapeArchiveEntry{r, f}, h.Size, nil\n\t\t}\n\t}\n\n\t_ = f.Close()\n\n\treturn nil, 0, os.ErrNotExist\n}\n\ntype FileArchive struct {\n\tpath string\n}\n\nfunc (t *FileArchive) Open(name string) (io.ReadCloser, int64, error) {\n\tfpath := name\n\tif name != t.path {\n\t\tfpath = filepath.Join(filepath.Dir(t.path), name)\n\t}\n\n\ts, err := os.Stat(fpath)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tf, err := os.Open(fpath)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn f, s.Size(), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/importx/folder.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage importx\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype FolderFlag struct {\n\t*flags.DatacenterFlag\n\n\tfolder string\n}\n\nfunc newFolderFlag(ctx context.Context) (*FolderFlag, context.Context) {\n\tf := &FolderFlag{}\n\tf.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\treturn f, ctx\n}\n\nfunc (flag *FolderFlag) Register(ctx context.Context, f *flag.FlagSet) {\n\tflag.DatacenterFlag.Register(ctx, f)\n\n\tf.StringVar(&flag.folder, \"folder\", \"\", \"Path to folder to add the VM to\")\n}\n\nfunc (flag *FolderFlag) Process(ctx context.Context) error {\n\treturn flag.DatacenterFlag.Process(ctx)\n}\n\nfunc (flag *FolderFlag) Folder() (*object.Folder, error) {\n\tctx := context.TODO()\n\tif len(flag.folder) == 0 {\n\t\tdc, err := flag.Datacenter()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfolders, err := dc.Folders(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn folders.VmFolder, nil\n\t}\n\n\tfinder, err := flag.Finder()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmo, err := finder.ManagedObjectList(ctx, flag.folder)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(mo) == 0 {\n\t\treturn nil, errors.New(\"folder argument does not resolve to object\")\n\t}\n\tif len(mo) > 1 {\n\t\treturn nil, errors.New(\"folder argument resolves to more than one object\")\n\t}\n\n\tref := mo[0].Object.Reference()\n\tif ref.Type != \"Folder\" {\n\t\treturn nil, errors.New(\"folder argument does not resolve to folder\")\n\t}\n\n\tc, err := flag.Client()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn object.NewFolder(c, ref), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/importx/importable.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage importx\n\nimport (\n\t\"fmt\"\n\t\"path\"\n)\n\ntype importable struct {\n\tlocalPath  string\n\tremotePath string\n}\n\nfunc (i importable) Ext() string {\n\treturn path.Ext(i.localPath)\n}\n\nfunc (i importable) Base() string {\n\treturn path.Base(i.localPath)\n}\n\nfunc (i importable) BaseClean() string {\n\tb := i.Base()\n\te := i.Ext()\n\treturn b[:len(b)-len(e)]\n}\n\nfunc (i importable) RemoteSrcVMDK() string {\n\tfile := fmt.Sprintf(\"%s-src.vmdk\", i.BaseClean())\n\treturn i.toRemotePath(file)\n}\n\nfunc (i importable) RemoteDstVMDK() string {\n\tfile := fmt.Sprintf(\"%s.vmdk\", i.BaseClean())\n\treturn i.toRemotePath(file)\n}\n\nfunc (i importable) toRemotePath(p string) string {\n\tif i.remotePath == \"\" {\n\t\treturn p\n\t}\n\n\treturn path.Join(i.remotePath, p)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/importx/lease_updater.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage importx\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/progress\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype ovfFileItem struct {\n\turl  *url.URL\n\titem types.OvfFileItem\n\tch   chan progress.Report\n}\n\nfunc (o ovfFileItem) Sink() chan<- progress.Report {\n\treturn o.ch\n}\n\ntype leaseUpdater struct {\n\tclient *vim25.Client\n\tlease  *object.HttpNfcLease\n\n\tpos   int64 // Number of bytes\n\ttotal int64 // Total number of bytes\n\n\tdone chan struct{} // When lease updater should stop\n\n\twg sync.WaitGroup // Track when update loop is done\n}\n\nfunc newLeaseUpdater(client *vim25.Client, lease *object.HttpNfcLease, items []ovfFileItem) *leaseUpdater {\n\tl := leaseUpdater{\n\t\tclient: client,\n\t\tlease:  lease,\n\n\t\tdone: make(chan struct{}),\n\t}\n\n\tfor _, item := range items {\n\t\tl.total += item.item.Size\n\t\tgo l.waitForProgress(item)\n\t}\n\n\t// Kickstart update loop\n\tl.wg.Add(1)\n\tgo l.run()\n\n\treturn &l\n}\n\nfunc (l *leaseUpdater) waitForProgress(item ovfFileItem) {\n\tvar pos, total int64\n\n\ttotal = item.item.Size\n\n\tfor {\n\t\tselect {\n\t\tcase <-l.done:\n\t\t\treturn\n\t\tcase p, ok := <-item.ch:\n\t\t\t// Return in case of error\n\t\t\tif ok && p.Error() != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif !ok {\n\t\t\t\t// Last element on the channel, add to total\n\t\t\t\tatomic.AddInt64(&l.pos, total-pos)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Approximate progress in number of bytes\n\t\t\tx := int64(float32(total) * (p.Percentage() / 100.0))\n\t\t\tatomic.AddInt64(&l.pos, x-pos)\n\t\t\tpos = x\n\t\t}\n\t}\n}\n\nfunc (l *leaseUpdater) run() {\n\tdefer l.wg.Done()\n\n\ttick := time.NewTicker(2 * time.Second)\n\tdefer tick.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-l.done:\n\t\t\treturn\n\t\tcase <-tick.C:\n\t\t\t// From the vim api HttpNfcLeaseProgress(percent) doc, percent ==\n\t\t\t// \"Completion status represented as an integer in the 0-100 range.\"\n\t\t\t// Always report the current value of percent, as it will renew the\n\t\t\t// lease even if the value hasn't changed or is 0.\n\t\t\tpercent := int32(float32(100*atomic.LoadInt64(&l.pos)) / float32(l.total))\n\t\t\terr := l.lease.HttpNfcLeaseProgress(context.TODO(), percent)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"from lease updater: %s\\n\", err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (l *leaseUpdater) Done() {\n\tclose(l.done)\n\tl.wg.Wait()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/importx/options.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage importx\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"flag\"\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/ovf\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype Property struct {\n\ttypes.KeyValue\n\tSpec *ovf.Property `json:\",omitempty\"`\n}\n\ntype Network struct {\n\tName    string\n\tNetwork string\n}\n\ntype Options struct {\n\tAllDeploymentOptions []string `json:\",omitempty\"`\n\tDeployment           string\n\n\tAllDiskProvisioningOptions []string `json:\",omitempty\"`\n\tDiskProvisioning           string\n\n\tAllIPAllocationPolicyOptions []string `json:\",omitempty\"`\n\tIPAllocationPolicy           string\n\n\tAllIPProtocolOptions []string `json:\",omitempty\"`\n\tIPProtocol           string\n\n\tPropertyMapping []Property `json:\",omitempty\"`\n\n\tNetworkMapping []Network `json:\",omitempty\"`\n\n\tAnnotation string `json:\",omitempty\"`\n\n\tPowerOn      bool\n\tInjectOvfEnv bool\n\tWaitForIP    bool\n\tName         *string\n}\n\ntype OptionsFlag struct {\n\tOptions Options\n\n\tpath string\n}\n\nfunc newOptionsFlag(ctx context.Context) (*OptionsFlag, context.Context) {\n\treturn &OptionsFlag{}, ctx\n}\n\nfunc (flag *OptionsFlag) Register(ctx context.Context, f *flag.FlagSet) {\n\tf.StringVar(&flag.path, \"options\", \"\", \"Options spec file path for VM deployment\")\n}\n\nfunc (flag *OptionsFlag) Process(ctx context.Context) error {\n\tif len(flag.path) == 0 {\n\t\treturn nil\n\t}\n\n\tvar err error\n\tin := os.Stdin\n\n\tif flag.path != \"-\" {\n\t\tin, err = os.Open(flag.path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer in.Close()\n\t}\n\n\treturn json.NewDecoder(in).Decode(&flag.Options)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/importx/ova.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage importx\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype ova struct {\n\t*ovfx\n}\n\nfunc init() {\n\tcli.Register(\"import.ova\", &ova{&ovfx{}})\n}\n\nfunc (cmd *ova) Usage() string {\n\treturn \"PATH_TO_OVA\"\n}\n\nfunc (cmd *ova) Run(ctx context.Context, f *flag.FlagSet) error {\n\tfpath, err := cmd.Prepare(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd.Archive = &TapeArchive{fpath}\n\n\tmoref, err := cmd.Import(fpath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvm := object.NewVirtualMachine(cmd.Client, *moref)\n\treturn cmd.Deploy(vm)\n}\n\nfunc (cmd *ova) Import(fpath string) (*types.ManagedObjectReference, error) {\n\tovf := \"*.ovf\"\n\treturn cmd.ovfx.Import(ovf)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/importx/ovf.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage importx\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"path\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/ovf\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/progress\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype ovfx struct {\n\t*flags.DatastoreFlag\n\t*flags.HostSystemFlag\n\t*flags.OutputFlag\n\t*flags.ResourcePoolFlag\n\n\t*ArchiveFlag\n\t*OptionsFlag\n\t*FolderFlag\n\n\tName string\n\n\tClient       *vim25.Client\n\tDatacenter   *object.Datacenter\n\tDatastore    *object.Datastore\n\tResourcePool *object.ResourcePool\n}\n\nfunc init() {\n\tcli.Register(\"import.ovf\", &ovfx{})\n}\n\nfunc (cmd *ovfx) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\tcmd.ResourcePoolFlag, ctx = flags.NewResourcePoolFlag(ctx)\n\tcmd.ResourcePoolFlag.Register(ctx, f)\n\n\tcmd.ArchiveFlag, ctx = newArchiveFlag(ctx)\n\tcmd.ArchiveFlag.Register(ctx, f)\n\tcmd.OptionsFlag, ctx = newOptionsFlag(ctx)\n\tcmd.OptionsFlag.Register(ctx, f)\n\tcmd.FolderFlag, ctx = newFolderFlag(ctx)\n\tcmd.FolderFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.Name, \"name\", \"\", \"Name to use for new entity\")\n}\n\nfunc (cmd *ovfx) Process(ctx context.Context) error {\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.ResourcePoolFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.ArchiveFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OptionsFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.FolderFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *ovfx) Usage() string {\n\treturn \"PATH_TO_OVF\"\n}\n\nfunc (cmd *ovfx) Run(ctx context.Context, f *flag.FlagSet) error {\n\tfpath, err := cmd.Prepare(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd.Archive = &FileArchive{fpath}\n\n\tmoref, err := cmd.Import(fpath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvm := object.NewVirtualMachine(cmd.Client, *moref)\n\treturn cmd.Deploy(vm)\n}\n\nfunc (cmd *ovfx) Prepare(f *flag.FlagSet) (string, error) {\n\tvar err error\n\n\targs := f.Args()\n\tif len(args) != 1 {\n\t\treturn \"\", errors.New(\"no file specified\")\n\t}\n\n\tcmd.Client, err = cmd.DatastoreFlag.Client()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcmd.Datacenter, err = cmd.DatastoreFlag.Datacenter()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcmd.Datastore, err = cmd.DatastoreFlag.Datastore()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcmd.ResourcePool, err = cmd.ResourcePoolFlag.ResourcePool()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn f.Arg(0), nil\n}\n\nfunc (cmd *ovfx) Deploy(vm *object.VirtualMachine) error {\n\tif err := cmd.InjectOvfEnv(vm); err != nil {\n\t\treturn err\n\t}\n\n\tif err := cmd.PowerOn(vm); err != nil {\n\t\treturn err\n\t}\n\n\tif err := cmd.WaitForIP(vm); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *ovfx) Map(op []Property) (p []types.KeyValue) {\n\tfor _, v := range op {\n\t\tp = append(p, v.KeyValue)\n\t}\n\n\treturn\n}\n\nfunc (cmd *ovfx) NetworkMap(e *ovf.Envelope) (p []types.OvfNetworkMapping) {\n\tctx := context.TODO()\n\tfinder, err := cmd.DatastoreFlag.Finder()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tnetworks := map[string]string{}\n\n\tif e.Network != nil {\n\t\tfor _, net := range e.Network.Networks {\n\t\t\tnetworks[net.Name] = net.Name\n\t\t}\n\t}\n\n\tfor _, net := range cmd.Options.NetworkMapping {\n\t\tnetworks[net.Name] = net.Network\n\t}\n\n\tfor src, dst := range networks {\n\t\tif net, err := finder.Network(ctx, dst); err == nil {\n\t\t\tp = append(p, types.OvfNetworkMapping{\n\t\t\t\tName:    src,\n\t\t\t\tNetwork: net.Reference(),\n\t\t\t})\n\t\t}\n\t}\n\treturn\n}\n\nfunc (cmd *ovfx) Import(fpath string) (*types.ManagedObjectReference, error) {\n\tctx := context.TODO()\n\to, err := cmd.ReadOvf(fpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\te, err := cmd.ReadEnvelope(fpath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse ovf: %s\", err.Error())\n\t}\n\n\tname := \"Govc Virtual Appliance\"\n\tif e.VirtualSystem != nil {\n\t\tname = e.VirtualSystem.ID\n\t\tif e.VirtualSystem.Name != nil {\n\t\t\tname = *e.VirtualSystem.Name\n\t\t}\n\t}\n\n\t// Override name from options if specified\n\tif cmd.Options.Name != nil {\n\t\tname = *cmd.Options.Name\n\t}\n\n\t// Override name from arguments if specified\n\tif cmd.Name != \"\" {\n\t\tname = cmd.Name\n\t}\n\n\tcisp := types.OvfCreateImportSpecParams{\n\t\tDiskProvisioning:   cmd.Options.DiskProvisioning,\n\t\tEntityName:         name,\n\t\tIpAllocationPolicy: cmd.Options.IPAllocationPolicy,\n\t\tIpProtocol:         cmd.Options.IPProtocol,\n\t\tOvfManagerCommonParams: types.OvfManagerCommonParams{\n\t\t\tDeploymentOption: cmd.Options.Deployment,\n\t\t\tLocale:           \"US\"},\n\t\tPropertyMapping: cmd.Map(cmd.Options.PropertyMapping),\n\t\tNetworkMapping:  cmd.NetworkMap(e),\n\t}\n\n\tm := object.NewOvfManager(cmd.Client)\n\tspec, err := m.CreateImportSpec(ctx, string(o), cmd.ResourcePool, cmd.Datastore, cisp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif spec.Error != nil {\n\t\treturn nil, errors.New(spec.Error[0].LocalizedMessage)\n\t}\n\tif spec.Warning != nil {\n\t\tfor _, w := range spec.Warning {\n\t\t\t_, _ = cmd.Log(fmt.Sprintf(\"Warning: %s\\n\", w.LocalizedMessage))\n\t\t}\n\t}\n\n\tif cmd.Options.Annotation != \"\" {\n\t\tswitch s := spec.ImportSpec.(type) {\n\t\tcase *types.VirtualMachineImportSpec:\n\t\t\ts.ConfigSpec.Annotation = cmd.Options.Annotation\n\t\tcase *types.VirtualAppImportSpec:\n\t\t\ts.VAppConfigSpec.Annotation = cmd.Options.Annotation\n\t\t}\n\t}\n\n\tvar host *object.HostSystem\n\tif cmd.SearchFlag.IsSet() {\n\t\tif host, err = cmd.HostSystem(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tfolder, err := cmd.Folder()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlease, err := cmd.ResourcePool.ImportVApp(ctx, spec.ImportSpec, folder, host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo, err := lease.Wait(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Build slice of items and URLs first, so that the lease updater can know\n\t// about every item that needs to be uploaded, and thereby infer progress.\n\tvar items []ovfFileItem\n\n\tfor _, device := range info.DeviceUrl {\n\t\tfor _, item := range spec.FileItem {\n\t\t\tif device.ImportKey != item.DeviceId {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tu, err := cmd.Client.ParseURL(device.Url)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\ti := ovfFileItem{\n\t\t\t\turl:  u,\n\t\t\t\titem: item,\n\t\t\t\tch:   make(chan progress.Report),\n\t\t\t}\n\n\t\t\titems = append(items, i)\n\t\t}\n\t}\n\n\tu := newLeaseUpdater(cmd.Client, lease, items)\n\tdefer u.Done()\n\n\tfor _, i := range items {\n\t\terr = cmd.Upload(lease, i)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &info.Entity, lease.HttpNfcLeaseComplete(ctx)\n}\n\nfunc (cmd *ovfx) Upload(lease *object.HttpNfcLease, ofi ovfFileItem) error {\n\titem := ofi.item\n\tfile := item.Path\n\n\tf, size, err := cmd.Open(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tlogger := cmd.ProgressLogger(fmt.Sprintf(\"Uploading %s... \", path.Base(file)))\n\tdefer logger.Wait()\n\n\topts := soap.Upload{\n\t\tContentLength: size,\n\t\tProgress:      progress.Tee(ofi, logger),\n\t}\n\n\t// Non-disk files (such as .iso) use the PUT method.\n\t// Overwrite: t header is also required in this case (ovftool does the same)\n\tif item.Create {\n\t\topts.Method = \"PUT\"\n\t\topts.Headers = map[string]string{\n\t\t\t\"Overwrite\": \"t\",\n\t\t}\n\t} else {\n\t\topts.Method = \"POST\"\n\t\topts.Type = \"application/x-vnd.vmware-streamVmdk\"\n\t}\n\n\treturn cmd.Client.Client.Upload(f, ofi.url, &opts)\n}\n\nfunc (cmd *ovfx) PowerOn(vm *object.VirtualMachine) error {\n\tctx := context.TODO()\n\tif !cmd.Options.PowerOn {\n\t\treturn nil\n\t}\n\n\tcmd.Log(\"Powering on VM...\\n\")\n\n\ttask, err := vm.PowerOn(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err = task.WaitForResult(ctx, nil); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *ovfx) InjectOvfEnv(vm *object.VirtualMachine) error {\n\tif !cmd.Options.InjectOvfEnv {\n\t\treturn nil\n\t}\n\n\tcmd.Log(\"Injecting OVF environment...\\n\")\n\n\tvar opts []types.BaseOptionValue\n\n\ta := cmd.Client.ServiceContent.About\n\n\t// build up Environment in order to marshal to xml\n\tvar props []ovf.EnvProperty\n\tfor _, p := range cmd.Options.PropertyMapping {\n\t\tprops = append(props, ovf.EnvProperty{\n\t\t\tKey:   p.Key,\n\t\t\tValue: p.Value,\n\t\t})\n\t}\n\n\tenv := ovf.Env{\n\t\tEsxID: vm.Reference().Value,\n\t\tPlatform: &ovf.PlatformSection{\n\t\t\tKind:    a.Name,\n\t\t\tVersion: a.Version,\n\t\t\tVendor:  a.Vendor,\n\t\t\tLocale:  \"US\",\n\t\t},\n\t\tProperty: &ovf.PropertySection{\n\t\t\tProperties: props,\n\t\t},\n\t}\n\n\topts = append(opts, &types.OptionValue{\n\t\tKey:   \"guestinfo.ovfEnv\",\n\t\tValue: env.MarshalManual(),\n\t})\n\n\tctx := context.Background()\n\n\ttask, err := vm.Reconfigure(ctx, types.VirtualMachineConfigSpec{\n\t\tExtraConfig: opts,\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn task.Wait(ctx)\n}\n\nfunc (cmd *ovfx) WaitForIP(vm *object.VirtualMachine) error {\n\tctx := context.TODO()\n\tif !cmd.Options.PowerOn || !cmd.Options.WaitForIP {\n\t\treturn nil\n\t}\n\n\tcmd.Log(\"Waiting for IP address...\\n\")\n\tip, err := vm.WaitForIP(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd.Log(fmt.Sprintf(\"Received IP address: %s\\n\", ip))\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/importx/spec.go",
    "content": "/*\nCopyright (c) 2015-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage importx\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"path\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/ovf\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\nvar (\n\t// all possible ovf property values\n\t// the first element being the default value\n\tallDeploymentOptions         = []string{\"small\", \"medium\", \"large\"}\n\tallDiskProvisioningOptions   = []string{\"thin\", \"monolithicSparse\", \"monolithicFlat\", \"twoGbMaxExtentSparse\", \"twoGbMaxExtentFlat\", \"seSparse\", \"eagerZeroedThick\", \"thick\", \"sparse\", \"flat\"}\n\tallIPAllocationPolicyOptions = []string{\"dhcpPolicy\", \"transientPolicy\", \"fixedPolicy\", \"fixedAllocatedPolicy\"}\n\tallIPProtocolOptions         = []string{\"IPv4\", \"IPv6\"}\n)\n\ntype spec struct {\n\t*ArchiveFlag\n\n\tverbose bool\n}\n\nfunc init() {\n\tcli.Register(\"import.spec\", &spec{})\n}\n\nfunc (cmd *spec) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ArchiveFlag, ctx = newArchiveFlag(ctx)\n\tcmd.ArchiveFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.verbose, \"verbose\", false, \"Verbose spec output\")\n}\n\nfunc (cmd *spec) Process(ctx context.Context) error {\n\tif err := cmd.ArchiveFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *spec) Usage() string {\n\treturn \"PATH_TO_OVF_OR_OVA\"\n}\n\nfunc (cmd *spec) Run(ctx context.Context, f *flag.FlagSet) error {\n\tfpath := \"\"\n\targs := f.Args()\n\tif len(args) == 1 {\n\t\tfpath = f.Arg(0)\n\t}\n\n\tif len(fpath) > 0 {\n\t\tswitch path.Ext(fpath) {\n\t\tcase \".ovf\":\n\t\t\tcmd.Archive = &FileArchive{fpath}\n\t\tcase \"\", \".ova\":\n\t\t\tcmd.Archive = &TapeArchive{fpath}\n\t\t\tfpath = \"*.ovf\"\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"invalid file extension %s\", path.Ext(fpath))\n\t\t}\n\t}\n\n\treturn cmd.Spec(fpath)\n}\n\nfunc (cmd *spec) Map(e *ovf.Envelope) (res []Property) {\n\tif e == nil || e.VirtualSystem == nil {\n\t\treturn nil\n\t}\n\n\tfor _, p := range e.VirtualSystem.Product {\n\t\tfor i, v := range p.Property {\n\t\t\td := \"\"\n\t\t\tif v.Default != nil {\n\t\t\t\td = *v.Default\n\t\t\t}\n\n\t\t\t// From OVF spec, section 9.5.1:\n\t\t\t// key-value-env = [class-value \".\"] key-value-prod [\".\" instance-value]\n\t\t\tk := v.Key\n\t\t\tif p.Class != nil {\n\t\t\t\tk = fmt.Sprintf(\"%s.%s\", *p.Class, k)\n\t\t\t}\n\t\t\tif p.Instance != nil {\n\t\t\t\tk = fmt.Sprintf(\"%s.%s\", k, *p.Instance)\n\t\t\t}\n\n\t\t\tnp := Property{KeyValue: types.KeyValue{Key: k, Value: d}}\n\t\t\tif cmd.verbose {\n\t\t\t\tnp.Spec = &p.Property[i]\n\t\t\t}\n\n\t\t\tres = append(res, np)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (cmd *spec) Spec(fpath string) error {\n\te, err := cmd.ReadEnvelope(fpath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar deploymentOptions = allDeploymentOptions\n\tif e.DeploymentOption != nil && e.DeploymentOption.Configuration != nil {\n\t\tdeploymentOptions = nil\n\n\t\t// add default first\n\t\tfor _, c := range e.DeploymentOption.Configuration {\n\t\t\tif c.Default != nil && *c.Default {\n\t\t\t\tdeploymentOptions = append(deploymentOptions, c.ID)\n\t\t\t}\n\t\t}\n\n\t\tfor _, c := range e.DeploymentOption.Configuration {\n\t\t\tif c.Default == nil || !*c.Default {\n\t\t\t\tdeploymentOptions = append(deploymentOptions, c.ID)\n\t\t\t}\n\t\t}\n\t}\n\n\to := Options{\n\t\tDeployment:         deploymentOptions[0],\n\t\tDiskProvisioning:   allDiskProvisioningOptions[0],\n\t\tIPAllocationPolicy: allIPAllocationPolicyOptions[0],\n\t\tIPProtocol:         allIPProtocolOptions[0],\n\t\tPowerOn:            false,\n\t\tWaitForIP:          false,\n\t\tInjectOvfEnv:       false,\n\t\tPropertyMapping:    cmd.Map(e)}\n\n\tif e.VirtualSystem != nil && e.VirtualSystem.Annotation != nil {\n\t\tfor _, a := range e.VirtualSystem.Annotation {\n\t\t\to.Annotation += a.Annotation\n\t\t}\n\t}\n\n\tif e.Network != nil {\n\t\tfor _, net := range e.Network.Networks {\n\t\t\to.NetworkMapping = append(o.NetworkMapping, Network{net.Name, \"\"})\n\t\t}\n\t}\n\n\tif cmd.verbose {\n\t\to.AllDeploymentOptions = deploymentOptions\n\t\to.AllDiskProvisioningOptions = allDiskProvisioningOptions\n\t\to.AllIPAllocationPolicyOptions = allIPAllocationPolicyOptions\n\t\to.AllIPProtocolOptions = allIPProtocolOptions\n\t}\n\n\tj, err := json.Marshal(&o)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println(string(j))\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/importx/vmdk.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage importx\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"path\"\n\t\"reflect\"\n\t\"regexp\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/progress\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype vmdk struct {\n\t*flags.DatastoreFlag\n\t*flags.ResourcePoolFlag\n\t*flags.OutputFlag\n\n\tupload bool\n\tforce  bool\n\tkeep   bool\n\n\tClient       *vim25.Client\n\tDatacenter   *object.Datacenter\n\tDatastore    *object.Datastore\n\tResourcePool *object.ResourcePool\n}\n\nfunc init() {\n\tcli.Register(\"import.vmdk\", &vmdk{})\n\tcli.Alias(\"import.vmdk\", \"datastore.import\")\n}\n\nfunc (cmd *vmdk) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\tcmd.ResourcePoolFlag, ctx = flags.NewResourcePoolFlag(ctx)\n\tcmd.ResourcePoolFlag.Register(ctx, f)\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.upload, \"upload\", true, \"Upload specified disk\")\n\tf.BoolVar(&cmd.force, \"force\", false, \"Overwrite existing disk\")\n\tf.BoolVar(&cmd.keep, \"keep\", false, \"Keep uploaded disk after import\")\n}\n\nfunc (cmd *vmdk) Process(ctx context.Context) error {\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.ResourcePoolFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *vmdk) Usage() string {\n\treturn \"PATH_TO_VMDK [REMOTE_DIRECTORY]\"\n}\n\nfunc (cmd *vmdk) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvar err error\n\n\targs := f.Args()\n\tif len(args) < 1 {\n\t\treturn errors.New(\"no file to import\")\n\t}\n\n\tfile := importable{\n\t\tlocalPath: f.Arg(0),\n\t}\n\n\t// Include remote path if specified\n\tif len(args) >= 2 {\n\t\tfile.remotePath = f.Arg(1)\n\t}\n\n\tcmd.Client, err = cmd.DatastoreFlag.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd.Datacenter, err = cmd.DatastoreFlag.Datacenter()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd.Datastore, err = cmd.DatastoreFlag.Datastore()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd.ResourcePool, err = cmd.ResourcePoolFlag.ResourcePool()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = cmd.PrepareDestination(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.upload {\n\t\terr = cmd.Upload(file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn cmd.Import(file)\n}\n\n// PrepareDestination makes sure that the destination VMDK does not yet exist.\n// If the force flag is passed, it removes the existing VMDK. This functions\n// exists to give a meaningful error if the remote VMDK already exists.\n//\n// CopyVirtualDisk can return a \"<src> file does not exist\" error while in fact\n// the source file *does* exist and the *destination* file also exist.\n//\nfunc (cmd *vmdk) PrepareDestination(i importable) error {\n\tctx := context.TODO()\n\tvmdkPath := i.RemoteDstVMDK()\n\tres, err := cmd.Datastore.Stat(ctx, vmdkPath)\n\tif err != nil {\n\t\tswitch err.(type) {\n\t\tcase object.DatastoreNoSuchDirectoryError:\n\t\t\t// The base path doesn't exist. Create it.\n\t\t\tdsPath := cmd.Datastore.Path(path.Dir(vmdkPath))\n\t\t\tm := object.NewFileManager(cmd.Client)\n\t\t\treturn m.MakeDirectory(ctx, dsPath, cmd.Datacenter, true)\n\t\tcase object.DatastoreNoSuchFileError:\n\t\t\t// Destination path doesn't exist; all good to continue with import.\n\t\t\treturn nil\n\t\t}\n\n\t\treturn err\n\t}\n\n\t// Check that the returned entry has the right type.\n\tswitch res.(type) {\n\tcase *types.VmDiskFileInfo:\n\tdefault:\n\t\texpected := \"VmDiskFileInfo\"\n\t\tactual := reflect.TypeOf(res)\n\t\tpanic(fmt.Sprintf(\"Expected: %s, actual: %s\", expected, actual))\n\t}\n\n\tif !cmd.force {\n\t\tdsPath := cmd.Datastore.Path(vmdkPath)\n\t\terr = fmt.Errorf(\"File %s already exists\", dsPath)\n\t\treturn err\n\t}\n\n\t// Delete existing disk.\n\terr = cmd.DeleteDisk(vmdkPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *vmdk) Upload(i importable) error {\n\tctx := context.TODO()\n\tp := soap.DefaultUpload\n\tif cmd.OutputFlag.TTY {\n\t\tlogger := cmd.ProgressLogger(\"Uploading... \")\n\t\tp.Progress = logger\n\t\tdefer logger.Wait()\n\t}\n\n\treturn cmd.Datastore.UploadFile(ctx, i.localPath, i.RemoteSrcVMDK(), &p)\n}\n\nfunc (cmd *vmdk) Import(i importable) error {\n\terr := cmd.Copy(i)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !cmd.keep {\n\t\terr = cmd.DeleteDisk(i.RemoteSrcVMDK())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *vmdk) Copy(i importable) error {\n\tvar err error\n\n\tlogger := cmd.ProgressLogger(\"Importing... \")\n\tdefer logger.Wait()\n\n\tagg := progress.NewAggregator(logger)\n\tdefer agg.Done()\n\n\tswitch p := cmd.Client.ServiceContent.About.ApiType; p {\n\tcase \"HostAgent\":\n\t\terr = cmd.CopyHostAgent(i, agg)\n\tcase \"VirtualCenter\":\n\t\terr = cmd.CopyVirtualCenter(i, agg)\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported ApiType: %s\", p)\n\t}\n\n\treturn err\n}\n\nfunc (cmd *vmdk) CopyHostAgent(i importable, s progress.Sinker) error {\n\tctx := context.TODO()\n\tspec := &types.VirtualDiskSpec{\n\t\tAdapterType: \"lsiLogic\",\n\t\tDiskType:    \"thin\",\n\t}\n\n\tdc := cmd.Datacenter\n\tsrc := cmd.Datastore.Path(i.RemoteSrcVMDK())\n\tdst := cmd.Datastore.Path(i.RemoteDstVMDK())\n\tvdm := object.NewVirtualDiskManager(cmd.Client)\n\ttask, err := vdm.CopyVirtualDisk(ctx, src, dc, dst, dc, spec, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tps := progress.Prefix(s, \"copying disk\")\n\t_, err = task.WaitForResult(ctx, ps)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *vmdk) CopyVirtualCenter(i importable, s progress.Sinker) error {\n\tvar err error\n\n\tsrcName := i.BaseClean() + \"-srcvm\"\n\tdstName := i.BaseClean() + \"-dstvm\"\n\n\tspec := &configSpec{\n\t\tName:    srcName,\n\t\tGuestId: \"otherGuest\",\n\t\tFiles: &types.VirtualMachineFileInfo{\n\t\t\tVmPathName: fmt.Sprintf(\"[%s]\", cmd.Datastore.Name()),\n\t\t},\n\t}\n\n\tspec.AddDisk(cmd.Datastore, i.RemoteSrcVMDK())\n\n\tsrc, err := cmd.CreateVM(spec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdst, err := cmd.CloneVM(src, dstName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = cmd.DestroyVM(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvmdk, err := cmd.DetachDisk(dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = cmd.MoveDisk(vmdk, i.RemoteDstVMDK())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = cmd.DestroyVM(dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *vmdk) MoveDisk(src, dst string) error {\n\tctx := context.TODO()\n\tdsSrc := cmd.Datastore.Path(src)\n\tdsDst := cmd.Datastore.Path(dst)\n\tvdm := object.NewVirtualDiskManager(cmd.Client)\n\ttask, err := vdm.MoveVirtualDisk(ctx, dsSrc, cmd.Datacenter, dsDst, cmd.Datacenter, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn task.Wait(ctx)\n}\n\nfunc (cmd *vmdk) DeleteDisk(path string) error {\n\tctx := context.TODO()\n\tvdm := object.NewVirtualDiskManager(cmd.Client)\n\ttask, err := vdm.DeleteVirtualDisk(ctx, cmd.Datastore.Path(path), cmd.Datacenter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn task.Wait(ctx)\n}\n\nfunc (cmd *vmdk) DetachDisk(vm *object.VirtualMachine) (string, error) {\n\tctx := context.TODO()\n\tvar mvm mo.VirtualMachine\n\n\tpc := property.DefaultCollector(cmd.Client)\n\terr := pc.RetrieveOne(ctx, vm.Reference(), []string{\"config.hardware\"}, &mvm)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tspec := new(configSpec)\n\tdsFile := spec.RemoveDisk(&mvm)\n\n\ttask, err := vm.Reconfigure(ctx, spec.ToSpec())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = task.Wait(ctx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn dsFile, nil\n}\n\nfunc (cmd *vmdk) CreateVM(spec *configSpec) (*object.VirtualMachine, error) {\n\tctx := context.TODO()\n\tfolders, err := cmd.Datacenter.Folders(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttask, err := folders.VmFolder.CreateVM(ctx, spec.ToSpec(), cmd.ResourcePool, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo, err := task.WaitForResult(ctx, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn object.NewVirtualMachine(cmd.Client, info.Result.(types.ManagedObjectReference)), nil\n}\n\nfunc (cmd *vmdk) CloneVM(vm *object.VirtualMachine, name string) (*object.VirtualMachine, error) {\n\tctx := context.TODO()\n\tfolders, err := cmd.Datacenter.Folders(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tspec := types.VirtualMachineCloneSpec{\n\t\tConfig:   &types.VirtualMachineConfigSpec{},\n\t\tLocation: types.VirtualMachineRelocateSpec{},\n\t}\n\n\ttask, err := vm.Clone(ctx, folders.VmFolder, name, spec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo, err := task.WaitForResult(ctx, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn object.NewVirtualMachine(cmd.Client, info.Result.(types.ManagedObjectReference)), nil\n}\n\nfunc (cmd *vmdk) DestroyVM(vm *object.VirtualMachine) error {\n\tctx := context.TODO()\n\t_, err := cmd.DetachDisk(vm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttask, err := vm.Destroy(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = task.Wait(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\ntype configSpec types.VirtualMachineConfigSpec\n\nfunc (c *configSpec) ToSpec() types.VirtualMachineConfigSpec {\n\treturn types.VirtualMachineConfigSpec(*c)\n}\n\nfunc (c *configSpec) AddChange(d types.BaseVirtualDeviceConfigSpec) {\n\tc.DeviceChange = append(c.DeviceChange, d)\n}\n\nfunc (c *configSpec) AddDisk(ds *object.Datastore, path string) {\n\tvar devices object.VirtualDeviceList\n\n\tcontroller, err := devices.CreateSCSIController(\"\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdevices = append(devices, controller)\n\n\tdisk := devices.CreateDisk(controller.(types.BaseVirtualController), ds.Reference(), ds.Path(path))\n\tdevices = append(devices, disk)\n\n\tspec, err := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tc.DeviceChange = append(c.DeviceChange, spec...)\n}\n\nvar dsPathRegexp = regexp.MustCompile(`^\\[.*\\] (.*)$`)\n\nfunc (c *configSpec) RemoveDisk(vm *mo.VirtualMachine) string {\n\tvar file string\n\n\tfor _, d := range vm.Config.Hardware.Device {\n\t\tswitch device := d.(type) {\n\t\tcase *types.VirtualDisk:\n\t\t\tif file != \"\" {\n\t\t\t\tpanic(\"expected VM to have only one disk\")\n\t\t\t}\n\n\t\t\tswitch backing := device.Backing.(type) {\n\t\t\tcase *types.VirtualDiskFlatVer1BackingInfo:\n\t\t\t\tfile = backing.FileName\n\t\t\tcase *types.VirtualDiskFlatVer2BackingInfo:\n\t\t\t\tfile = backing.FileName\n\t\t\tcase *types.VirtualDiskSeSparseBackingInfo:\n\t\t\t\tfile = backing.FileName\n\t\t\tcase *types.VirtualDiskSparseVer1BackingInfo:\n\t\t\t\tfile = backing.FileName\n\t\t\tcase *types.VirtualDiskSparseVer2BackingInfo:\n\t\t\t\tfile = backing.FileName\n\t\t\tdefault:\n\t\t\t\tname := reflect.TypeOf(device.Backing).String()\n\t\t\t\tpanic(fmt.Sprintf(\"unexpected backing type: %s\", name))\n\t\t\t}\n\n\t\t\t// Remove [datastore] prefix\n\t\t\tm := dsPathRegexp.FindStringSubmatch(file)\n\t\t\tif len(m) != 2 {\n\t\t\t\tpanic(fmt.Sprintf(\"expected regexp match for %#v\", file))\n\t\t\t}\n\t\t\tfile = m[1]\n\n\t\t\tremoveOp := &types.VirtualDeviceConfigSpec{\n\t\t\t\tOperation: types.VirtualDeviceConfigSpecOperationRemove,\n\t\t\t\tDevice:    device,\n\t\t\t}\n\n\t\t\tc.AddChange(removeOp)\n\t\t}\n\t}\n\n\treturn file\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/license/add.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage license\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/license\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype add struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n}\n\nfunc init() {\n\tcli.Register(\"license.add\", &add{})\n}\n\nfunc (cmd *add) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n}\n\nfunc (cmd *add) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *add) Usage() string {\n\treturn \"KEY...\"\n}\n\nfunc (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error {\n\tclient, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm := license.NewManager(client)\n\n\t// From the vSphere 5.5 documentation:\n\t//\n\t//     To specify the edition type and any optional functions, use\n\t//     updateLicense for ESX Server and addLicense follow by\n\t//     LicenseAssingmentManager.updateAssignedLicense for VirtualCenter.\n\t//\n\tvar addFunc func(ctx context.Context, key string, labels map[string]string) (types.LicenseManagerLicenseInfo, error)\n\tswitch t := client.ServiceContent.About.ApiType; t {\n\tcase \"HostAgent\":\n\t\taddFunc = m.Update\n\tcase \"VirtualCenter\":\n\t\taddFunc = m.Add\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported ApiType: %s\", t)\n\t}\n\n\tresult := make(licenseOutput, 0)\n\tfor _, v := range f.Args() {\n\t\tlicense, err := addFunc(ctx, v, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tresult = append(result, license)\n\t}\n\n\treturn cmd.WriteResult(licenseOutput(result))\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/license/assign.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage license\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/license\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype assign struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n\t*flags.HostSystemFlag\n\n\tname   string\n\tremove bool\n}\n\nfunc init() {\n\tcli.Register(\"license.assign\", &assign{})\n}\n\nfunc (cmd *assign) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.name, \"name\", \"\", \"Display name\")\n\tf.BoolVar(&cmd.remove, \"remove\", false, \"Remove assignment\")\n}\n\nfunc (cmd *assign) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *assign) Usage() string {\n\treturn \"KEY\"\n}\n\nfunc (cmd *assign) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tkey := f.Arg(0)\n\n\tclient, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := license.NewManager(client).AssignmentManager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thost, err := cmd.HostSystemIfSpecified()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar id string\n\n\tif host == nil {\n\t\t// Default to vCenter UUID\n\t\tid = client.ServiceContent.About.InstanceUuid\n\t} else {\n\t\tid = host.Reference().Value\n\t}\n\n\tif cmd.remove {\n\t\treturn m.Remove(ctx, id)\n\t}\n\n\tinfo, err := m.Update(ctx, id, key, cmd.name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn cmd.WriteResult(licenseOutput([]types.LicenseManagerLicenseInfo{*info}))\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/license/assigned.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage license\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/license\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype assigned struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n\n\tid string\n}\n\nfunc init() {\n\tcli.Register(\"license.assigned.ls\", &assigned{})\n}\n\nfunc (cmd *assigned) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.id, \"id\", \"\", \"Entity ID\")\n}\n\nfunc (cmd *assigned) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *assigned) Run(ctx context.Context, f *flag.FlagSet) error {\n\tclient, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := license.NewManager(client).AssignmentManager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tassigned, err := m.QueryAssigned(ctx, cmd.id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn cmd.WriteResult(assignedOutput(assigned))\n}\n\ntype assignedOutput []types.LicenseAssignmentManagerLicenseAssignment\n\nfunc (res assignedOutput) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(os.Stdout, 4, 0, 2, ' ', 0)\n\tfmt.Fprintf(tw, \"Id:\\tScope:\\tName:\\tLicense:\\n\")\n\tfor _, v := range res {\n\t\tfmt.Fprintf(tw, \"%s\\t\", v.EntityId)\n\t\tfmt.Fprintf(tw, \"%s\\t\", v.Scope)\n\t\tfmt.Fprintf(tw, \"%s\\t\", v.EntityDisplayName)\n\t\tfmt.Fprintf(tw, \"%s\\t\", v.AssignedLicense.LicenseKey)\n\t\tfmt.Fprintf(tw, \"\\n\")\n\t}\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/license/decode.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage license\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/license\"\n)\n\ntype decode struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n\n\tfeature string\n}\n\nfunc init() {\n\tcli.Register(\"license.decode\", &decode{})\n}\n\nfunc (cmd *decode) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.feature, \"feature\", \"\", featureUsage)\n}\n\nfunc (cmd *decode) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *decode) Usage() string {\n\treturn \"KEY...\"\n}\n\nfunc (cmd *decode) Run(ctx context.Context, f *flag.FlagSet) error {\n\tclient, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar result license.InfoList\n\tm := license.NewManager(client)\n\tfor _, v := range f.Args() {\n\t\tlicense, err := m.Decode(ctx, v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tresult = append(result, license)\n\t}\n\n\tif cmd.feature != \"\" {\n\t\tresult = result.WithFeature(cmd.feature)\n\t}\n\n\treturn cmd.WriteResult(licenseOutput(result))\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/license/ls.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage license\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/license\"\n)\n\nvar featureUsage = \"List licenses with given feature\"\n\ntype ls struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n\n\tfeature string\n}\n\nfunc init() {\n\tcli.Register(\"license.ls\", &ls{})\n}\n\nfunc (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.feature, \"feature\", \"\", featureUsage)\n}\n\nfunc (cmd *ls) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error {\n\tclient, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm := license.NewManager(client)\n\tresult, err := m.List(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.feature != \"\" {\n\t\tresult = result.WithFeature(cmd.feature)\n\t}\n\n\treturn cmd.WriteResult(licenseOutput(result))\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/license/output.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage license\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype licenseOutput []types.LicenseManagerLicenseInfo\n\nfunc (res licenseOutput) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(os.Stdout, 4, 0, 2, ' ', 0)\n\tfmt.Fprintf(tw, \"Key:\\tEdition:\\tUsed:\\tTotal:\\n\")\n\tfor _, v := range res {\n\t\tfmt.Fprintf(tw, \"%s\\t\", v.LicenseKey)\n\t\tfmt.Fprintf(tw, \"%s\\t\", v.EditionKey)\n\t\tfmt.Fprintf(tw, \"%d\\t\", v.Used)\n\t\tfmt.Fprintf(tw, \"%d\\t\", v.Total)\n\t\tfmt.Fprintf(tw, \"\\n\")\n\t}\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/license/remove.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage license\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/license\"\n)\n\ntype remove struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n}\n\nfunc init() {\n\tcli.Register(\"license.remove\", &remove{})\n}\n\nfunc (cmd *remove) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n}\n\nfunc (cmd *remove) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *remove) Usage() string {\n\treturn \"KEY...\"\n}\n\nfunc (cmd *remove) Run(ctx context.Context, f *flag.FlagSet) error {\n\tclient, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm := license.NewManager(client)\n\tfor _, v := range f.Args() {\n\t\terr = m.Remove(ctx, v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/logs/command.go",
    "content": "/*\nCopyright (c) 2015-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage logs\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype logs struct {\n\t*flags.HostSystemFlag\n\n\tMax int32\n\tKey string\n\n\tfollow bool\n}\n\nfunc init() {\n\tcli.Register(\"logs\", &logs{})\n}\n\nfunc (cmd *logs) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tcmd.Max = 25 // default\n\tf.Var(flags.NewInt32(&cmd.Max), \"n\", \"Output the last N log lines\")\n\tf.StringVar(&cmd.Key, \"log\", \"\", \"Log file key\")\n\tf.BoolVar(&cmd.follow, \"f\", false, \"Follow log file changes\")\n}\n\nfunc (cmd *logs) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *logs) Description() string {\n\treturn `View VPX and ESX logs.\n\nThe '-log' option defaults to \"hostd\" when connected directly to a host or\nwhen connected to VirtualCenter and a '-host' option is given.  Otherwise,\nthe '-log' option defaults to \"vpxd:vpxd.log\".  The '-host' option is ignored\nwhen connected directly to a host.  See 'govc logs.ls' for other '-log' options.\n\nExamples:\n  govc logs -n 1000 -f\n  govc logs -host esx1\n  govc logs -host esx1 -log vmkernel`\n}\n\nfunc (cmd *logs) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefaultKey := \"hostd\"\n\tvar host *object.HostSystem\n\n\tif c.IsVC() {\n\t\thost, err = cmd.HostSystemIfSpecified()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif host == nil {\n\t\t\tdefaultKey = \"vpxd:vpxd.log\"\n\t\t}\n\t}\n\n\tm := object.NewDiagnosticManager(c)\n\n\tkey := cmd.Key\n\tif key == \"\" {\n\t\tkey = defaultKey\n\t}\n\n\tl := m.Log(ctx, host, key)\n\n\terr = l.Seek(ctx, cmd.Max)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor {\n\t\t_, err = l.Copy(ctx, cmd.Out)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tif !cmd.follow {\n\t\t\tbreak\n\t\t}\n\n\t\t<-time.After(time.Second)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/logs/download.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage logs\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"path\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype download struct {\n\t*flags.DatacenterFlag\n\n\tIncludeDefault bool\n}\n\nfunc init() {\n\tcli.Register(\"logs.download\", &download{})\n}\n\nfunc (cmd *download) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.IncludeDefault, \"default\", true, \"Specifies if the bundle should include the default server\")\n}\n\nfunc (cmd *download) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *download) Usage() string {\n\treturn \"[PATH]...\"\n}\n\nfunc (cmd *download) Description() string {\n\treturn `Generate diagnostic bundles.\n\nA diagnostic bundle includes log files and other configuration information.\n\nUse PATH to include a specific set of hosts to include.\n\nExamples:\n  govc logs.download\n  govc logs.download host-a host-b`\n}\n\nfunc (cmd *download) DownloadFile(c *vim25.Client, b string) error {\n\tu, err := c.Client.ParseURL(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdst := path.Base(u.Path)\n\tp := soap.DefaultDownload\n\tif cmd.OutputFlag.TTY {\n\t\tlogger := cmd.ProgressLogger(fmt.Sprintf(\"Downloading %s... \", dst))\n\t\tdefer logger.Wait()\n\t\tp.Progress = logger\n\t}\n\n\treturn c.Client.DownloadFile(dst, u, &p)\n}\n\nfunc (cmd *download) GenerateLogBundles(m *object.DiagnosticManager, host []*object.HostSystem) ([]types.DiagnosticManagerBundleInfo, error) {\n\tctx := context.TODO()\n\tlogger := cmd.ProgressLogger(\"Generating log bundles... \")\n\tdefer logger.Wait()\n\n\ttask, err := m.GenerateLogBundles(ctx, cmd.IncludeDefault, host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr, err := task.WaitForResult(ctx, logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r.Result.(types.ArrayOfDiagnosticManagerBundleInfo).DiagnosticManagerBundleInfo, nil\n}\n\nfunc (cmd *download) Run(ctx context.Context, f *flag.FlagSet) error {\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar host []*object.HostSystem\n\n\tfor _, arg := range f.Args() {\n\t\ths, err := finder.HostSystemList(ctx, arg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\thost = append(host, hs...)\n\t}\n\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm := object.NewDiagnosticManager(c)\n\n\tbundles, err := cmd.GenerateLogBundles(m, host)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, bundle := range bundles {\n\t\terr := cmd.DownloadFile(c, bundle.Url)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/logs/ls.go",
    "content": "/*\nCopyright (c) 2015-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage logs\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype ls struct {\n\t*flags.HostSystemFlag\n}\n\nfunc init() {\n\tcli.Register(\"logs.ls\", &ls{})\n}\n\nfunc (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n}\n\nfunc (cmd *ls) Process(ctx context.Context) error {\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *ls) Description() string {\n\treturn `List diagnostic log keys.\n\nExamples:\n  govc logs.ls\n  govc logs.ls -host host-a`\n}\n\nfunc (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar host *object.HostSystem\n\n\tif c.IsVC() {\n\t\thost, err = cmd.HostSystemIfSpecified()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tm := object.NewDiagnosticManager(c)\n\n\tdesc, err := m.QueryDescriptions(ctx, host)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0)\n\n\tfor _, d := range desc {\n\t\tfmt.Fprintf(tw, \"%s\\t%s\\n\", d.Key, d.FileName)\n\t}\n\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/ls/command.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage ls\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/list\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype ls struct {\n\t*flags.DatacenterFlag\n\n\tLong  bool\n\tType  string\n\tToRef bool\n\tDeRef bool\n}\n\nfunc init() {\n\tcli.Register(\"ls\", &ls{})\n}\n\nfunc (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.Long, \"l\", false, \"Long listing format\")\n\tf.BoolVar(&cmd.ToRef, \"i\", false, \"Print the managed object reference\")\n\tf.BoolVar(&cmd.DeRef, \"L\", false, \"Follow managed object references\")\n\tf.StringVar(&cmd.Type, \"t\", \"\", \"Object type\")\n}\n\nfunc (cmd *ls) Description() string {\n\treturn `List inventory items.\n\nExamples:\n  govc ls -l '*'\n  govc ls -t ClusterComputeResource host\n  govc ls -t Datastore host/ClusterA/* | grep -v local | xargs -n1 basename | sort | uniq`\n}\n\nfunc (cmd *ls) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *ls) Usage() string {\n\treturn \"[PATH]...\"\n}\n\nfunc (cmd *ls) typeMatch(ref types.ManagedObjectReference) bool {\n\tif cmd.Type == \"\" {\n\t\treturn true\n\t}\n\n\treturn strings.ToLower(cmd.Type) == strings.ToLower(ref.Type)\n}\n\nfunc (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error {\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlr := listResult{\n\t\tls:       cmd,\n\t\tElements: nil,\n\t}\n\n\targs := f.Args()\n\tif len(args) == 0 {\n\t\targs = []string{\".\"}\n\t}\n\n\tvar ref = new(types.ManagedObjectReference)\n\n\tvar types []string\n\tif cmd.Type != \"\" {\n\t\t// TODO: support multiple -t flags\n\t\ttypes = []string{cmd.Type}\n\t}\n\n\tfor _, arg := range args {\n\t\tif cmd.DeRef && ref.FromString(arg) {\n\t\t\te, err := finder.Element(ctx, *ref)\n\t\t\tif err == nil {\n\t\t\t\tif cmd.typeMatch(*ref) {\n\t\t\t\t\tif e.Path == \"/\" && ref.Type != \"Folder\" {\n\t\t\t\t\t\t// Special case: when given a moref with no ancestors,\n\t\t\t\t\t\t// just echo the moref.\n\t\t\t\t\t\te.Path = ref.String()\n\t\t\t\t\t}\n\t\t\t\t\tlr.Elements = append(lr.Elements, *e)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tes, err := finder.ManagedObjectListChildren(ctx, arg, types...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, e := range es {\n\t\t\tif cmd.typeMatch(e.Object.Reference()) {\n\t\t\t\tlr.Elements = append(lr.Elements, e)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn cmd.WriteResult(lr)\n}\n\ntype listResult struct {\n\t*ls\n\tElements []list.Element `json:\"elements\"`\n}\n\nfunc (l listResult) Write(w io.Writer) error {\n\tvar err error\n\n\tfor _, e := range l.Elements {\n\t\tif l.ToRef {\n\t\t\tfmt.Fprint(w, e.Object.Reference().String())\n\t\t\tif l.Long {\n\t\t\t\tfmt.Fprintf(w, \" %s\", e.Path)\n\t\t\t}\n\t\t\tfmt.Fprintln(w)\n\t\t\tcontinue\n\t\t}\n\n\t\tif !l.Long {\n\t\t\tfmt.Fprintf(w, \"%s\\n\", e.Path)\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch e.Object.(type) {\n\t\tcase mo.Folder:\n\t\t\tif _, err = fmt.Fprintf(w, \"%s/\\n\", e.Path); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\tif _, err = fmt.Fprintf(w, \"%s (%s)\\n\", e.Path, e.Object.Reference().Type); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/main.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage main\n\nimport (\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\n\t_ \"github.com/vmware/govmomi/govc/about\"\n\t_ \"github.com/vmware/govmomi/govc/cluster\"\n\t_ \"github.com/vmware/govmomi/govc/datacenter\"\n\t_ \"github.com/vmware/govmomi/govc/datastore\"\n\t_ \"github.com/vmware/govmomi/govc/datastore/disk\"\n\t_ \"github.com/vmware/govmomi/govc/datastore/vsan\"\n\t_ \"github.com/vmware/govmomi/govc/device\"\n\t_ \"github.com/vmware/govmomi/govc/device/cdrom\"\n\t_ \"github.com/vmware/govmomi/govc/device/floppy\"\n\t_ \"github.com/vmware/govmomi/govc/device/scsi\"\n\t_ \"github.com/vmware/govmomi/govc/device/serial\"\n\t_ \"github.com/vmware/govmomi/govc/device/usb\"\n\t_ \"github.com/vmware/govmomi/govc/dvs\"\n\t_ \"github.com/vmware/govmomi/govc/dvs/portgroup\"\n\t_ \"github.com/vmware/govmomi/govc/env\"\n\t_ \"github.com/vmware/govmomi/govc/events\"\n\t_ \"github.com/vmware/govmomi/govc/extension\"\n\t_ \"github.com/vmware/govmomi/govc/fields\"\n\t_ \"github.com/vmware/govmomi/govc/folder\"\n\t_ \"github.com/vmware/govmomi/govc/host\"\n\t_ \"github.com/vmware/govmomi/govc/host/account\"\n\t_ \"github.com/vmware/govmomi/govc/host/autostart\"\n\t_ \"github.com/vmware/govmomi/govc/host/cert\"\n\t_ \"github.com/vmware/govmomi/govc/host/date\"\n\t_ \"github.com/vmware/govmomi/govc/host/esxcli\"\n\t_ \"github.com/vmware/govmomi/govc/host/firewall\"\n\t_ \"github.com/vmware/govmomi/govc/host/maintenance\"\n\t_ \"github.com/vmware/govmomi/govc/host/option\"\n\t_ \"github.com/vmware/govmomi/govc/host/portgroup\"\n\t_ \"github.com/vmware/govmomi/govc/host/service\"\n\t_ \"github.com/vmware/govmomi/govc/host/storage\"\n\t_ \"github.com/vmware/govmomi/govc/host/vnic\"\n\t_ \"github.com/vmware/govmomi/govc/host/vswitch\"\n\t_ \"github.com/vmware/govmomi/govc/importx\"\n\t_ \"github.com/vmware/govmomi/govc/license\"\n\t_ \"github.com/vmware/govmomi/govc/logs\"\n\t_ \"github.com/vmware/govmomi/govc/ls\"\n\t_ \"github.com/vmware/govmomi/govc/metric\"\n\t_ \"github.com/vmware/govmomi/govc/metric/interval\"\n\t_ \"github.com/vmware/govmomi/govc/object\"\n\t_ \"github.com/vmware/govmomi/govc/option\"\n\t_ \"github.com/vmware/govmomi/govc/permissions\"\n\t_ \"github.com/vmware/govmomi/govc/pool\"\n\t_ \"github.com/vmware/govmomi/govc/role\"\n\t_ \"github.com/vmware/govmomi/govc/session\"\n\t_ \"github.com/vmware/govmomi/govc/vapp\"\n\t_ \"github.com/vmware/govmomi/govc/version\"\n\t_ \"github.com/vmware/govmomi/govc/vm\"\n\t_ \"github.com/vmware/govmomi/govc/vm/disk\"\n\t_ \"github.com/vmware/govmomi/govc/vm/guest\"\n\t_ \"github.com/vmware/govmomi/govc/vm/network\"\n\t_ \"github.com/vmware/govmomi/govc/vm/rdm\"\n\t_ \"github.com/vmware/govmomi/govc/vm/snapshot\"\n)\n\nfunc main() {\n\tos.Exit(cli.Run(os.Args[1:]))\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/main_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage main\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"testing\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n)\n\nfunc TestMain(t *testing.T) {\n\t// Execute flag registration for every command to verify there are no\n\t// commands with flag name collisions\n\tfor _, cmd := range cli.Commands() {\n\t\tfs := flag.NewFlagSet(\"\", flag.ContinueOnError)\n\n\t\t// Use fresh context for every command\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tdefer cancel()\n\t\tcmd.Register(ctx, fs)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/metric/change.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage metric\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype change struct {\n\t*PerformanceFlag\n\n\tlevel  int\n\tdevice int\n}\n\nfunc init() {\n\tcli.Register(\"metric.change\", &change{})\n}\n\nfunc (cmd *change) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.PerformanceFlag, ctx = NewPerformanceFlag(ctx)\n\tcmd.PerformanceFlag.Register(ctx, f)\n\n\tf.IntVar(&cmd.level, \"level\", 0, \"Level for the aggregate counter\")\n\tf.IntVar(&cmd.device, \"device-level\", 0, \"Level for the per device counter\")\n}\n\nfunc (cmd *change) Usage() string {\n\treturn \"NAME...\"\n}\n\nfunc (cmd *change) Description() string {\n\treturn `Change counter NAME levels.\n\nExamples:\n  govc metric.change -level 1 net.bytesRx.average net.bytesTx.average`\n}\n\nfunc (cmd *change) Process(ctx context.Context) error {\n\tif err := cmd.PerformanceFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *change) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() == 0 || (cmd.level == 0 && cmd.device == 0) {\n\t\treturn flag.ErrHelp\n\t}\n\n\tm, err := cmd.Manager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcounters, err := m.CounterInfoByName(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar mapping []types.PerformanceManagerCounterLevelMapping\n\n\tfor _, name := range f.Args() {\n\t\tcounter, ok := counters[name]\n\t\tif !ok {\n\t\t\treturn cmd.ErrNotFound(name)\n\t\t}\n\n\t\tmapping = append(mapping, types.PerformanceManagerCounterLevelMapping{\n\t\t\tCounterId:      counter.Key,\n\t\t\tAggregateLevel: int32(cmd.level),\n\t\t\tPerDeviceLevel: int32(cmd.device),\n\t\t})\n\t}\n\n\t_, err = methods.UpdateCounterLevelMapping(ctx, m.Client(), &types.UpdateCounterLevelMapping{\n\t\tThis:            m.Reference(),\n\t\tCounterLevelMap: mapping,\n\t})\n\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/metric/info.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage metric\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype info struct {\n\t*PerformanceFlag\n}\n\nfunc init() {\n\tcli.Register(\"metric.info\", &info{})\n}\n\nfunc (cmd *info) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.PerformanceFlag, ctx = NewPerformanceFlag(ctx)\n\tcmd.PerformanceFlag.Register(ctx, f)\n}\n\nfunc (cmd *info) Usage() string {\n\treturn \"PATH [NAME]...\"\n}\n\nfunc (cmd *info) Description() string {\n\treturn `Metric info for NAME.\n\nIf PATH is a value other than '-', provider summary and instance list are included\nfor the given object type.\n\nIf NAME is not specified, all available metrics for the given INTERVAL are listed.\nAn object PATH must be provided in this case.\n\nExamples:\n  govc metric.info vm/my-vm\n  govc metric.info -i 300 vm/my-vm\n  govc metric.info - cpu.usage.average\n  govc metric.info /dc1/host/cluster cpu.usage.average`\n}\n\nfunc (cmd *info) Process(ctx context.Context) error {\n\tif err := cmd.PerformanceFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\ntype EntityDetail struct {\n\tRealtime   bool\n\tHistorical bool\n\tInstance   []string\n}\n\ntype MetricInfo struct {\n\tCounter          *types.PerfCounterInfo\n\tEnabled          []string\n\tPerDeviceEnabled []string\n\tDetail           *EntityDetail\n}\n\ntype infoResult struct {\n\tcmd  *info\n\tInfo []*MetricInfo\n}\n\nfunc (r *infoResult) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\n\tfor _, info := range r.Info {\n\t\tcounter := info.Counter\n\n\t\tfmt.Fprintf(tw, \"Name:\\t%s\\n\", counter.Name())\n\t\tfmt.Fprintf(tw, \"  Label:\\t%s\\n\", counter.NameInfo.GetElementDescription().Label)\n\t\tfmt.Fprintf(tw, \"  Summary:\\t%s\\n\", counter.NameInfo.GetElementDescription().Summary)\n\t\tfmt.Fprintf(tw, \"  Group:\\t%s\\n\", counter.GroupInfo.GetElementDescription().Label)\n\t\tfmt.Fprintf(tw, \"  Unit:\\t%s\\n\", counter.UnitInfo.GetElementDescription().Label)\n\t\tfmt.Fprintf(tw, \"  Rollup type:\\t%s\\n\", counter.RollupType)\n\t\tfmt.Fprintf(tw, \"  Stats type:\\t%s\\n\", counter.StatsType)\n\t\tfmt.Fprintf(tw, \"  Level:\\t%d\\n\", counter.Level)\n\t\tfmt.Fprintf(tw, \"    Intervals:\\t%s\\n\", strings.Join(info.Enabled, \",\"))\n\t\tfmt.Fprintf(tw, \"  Per-device level:\\t%d\\n\", counter.PerDeviceLevel)\n\t\tfmt.Fprintf(tw, \"    Intervals:\\t%s\\n\", strings.Join(info.PerDeviceEnabled, \",\"))\n\n\t\tsummary := info.Detail\n\t\tif summary == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfmt.Fprintf(tw, \"  Realtime:\\t%t\\n\", summary.Realtime)\n\t\tfmt.Fprintf(tw, \"  Historical:\\t%t\\n\", summary.Historical)\n\t\tfmt.Fprintf(tw, \"  Instances:\\t%s\\n\", strings.Join(summary.Instance, \",\"))\n\t}\n\n\treturn tw.Flush()\n}\n\nfunc (r *infoResult) MarshalJSON() ([]byte, error) {\n\tm := make(map[string]*MetricInfo)\n\n\tfor _, info := range r.Info {\n\t\tm[info.Counter.Name()] = info\n\t}\n\n\treturn json.Marshal(m)\n}\n\nfunc (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tnames := f.Args()[1:]\n\n\tm, err := cmd.Manager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcounters, err := m.CounterInfoByName(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tintervals, err := m.HistoricalInterval(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tenabled := intervals.Enabled()\n\n\tvar summary *types.PerfProviderSummary\n\tvar mids map[int32][]*types.PerfMetricId\n\n\tif f.Arg(0) == \"-\" {\n\t\tif len(names) == 0 {\n\t\t\treturn flag.ErrHelp\n\t\t}\n\t} else {\n\t\tobjs, err := cmd.ManagedObjects(ctx, f.Args()[:1])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tsummary, err = m.ProviderSummary(ctx, objs[0])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tall, err := m.AvailableMetric(ctx, objs[0], cmd.Interval(summary.RefreshRate))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tmids = all.ByKey()\n\n\t\tif len(names) == 0 {\n\t\t\tnc, _ := m.CounterInfoByKey(ctx)\n\n\t\t\tfor i := range all {\n\t\t\t\tid := &all[i]\n\t\t\t\tif id.Instance != \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tnames = append(names, nc[id.CounterId].Name())\n\t\t\t}\n\t\t}\n\t}\n\n\tvar metrics []*MetricInfo\n\n\tfor _, name := range names {\n\t\tcounter, ok := counters[name]\n\t\tif !ok {\n\t\t\treturn cmd.ErrNotFound(name)\n\t\t}\n\n\t\tinfo := &MetricInfo{\n\t\t\tCounter:          counter,\n\t\t\tEnabled:          enabled[counter.Level],\n\t\t\tPerDeviceEnabled: enabled[counter.PerDeviceLevel],\n\t\t}\n\n\t\tmetrics = append(metrics, info)\n\n\t\tif summary == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar instances []string\n\n\t\tfor _, id := range mids[counter.Key] {\n\t\t\tif id.Instance != \"\" {\n\t\t\t\tinstances = append(instances, id.Instance)\n\t\t\t}\n\t\t}\n\n\t\tinfo.Detail = &EntityDetail{\n\t\t\tRealtime:   summary.CurrentSupported,\n\t\t\tHistorical: summary.SummarySupported,\n\t\t\tInstance:   instances,\n\t\t}\n\n\t}\n\n\treturn cmd.WriteResult(&infoResult{cmd, metrics})\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/metric/interval/change.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage interval\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/govc/metric\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype change struct {\n\t*metric.PerformanceFlag\n\n\tenabled *bool\n\tlevel   int\n}\n\nfunc init() {\n\tcli.Register(\"metric.interval.change\", &change{})\n}\n\nfunc (cmd *change) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.PerformanceFlag, ctx = metric.NewPerformanceFlag(ctx)\n\tcmd.PerformanceFlag.Register(ctx, f)\n\n\tf.Var(flags.NewOptionalBool(&cmd.enabled), \"enabled\", \"Enable or disable\")\n\tf.IntVar(&cmd.level, \"level\", 0, \"Level\")\n}\n\nfunc (cmd *change) Description() string {\n\treturn `Change historical metric intervals.\n\nExamples:\n  govc metric.interval.change -i 300 -level 2\n  govc metric.interval.change -i 86400 -enabled=false`\n}\n\nfunc (cmd *change) Process(ctx context.Context) error {\n\tif err := cmd.PerformanceFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *change) Run(ctx context.Context, f *flag.FlagSet) error {\n\tm, err := cmd.Manager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tintervals, err := m.HistoricalInterval(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinterval := cmd.Interval(0)\n\tif interval == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tvar current *types.PerfInterval\n\n\tfor _, i := range intervals {\n\t\tif i.SamplingPeriod == interval {\n\t\t\tcurrent = &i\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif current == nil {\n\t\treturn fmt.Errorf(\"%d interval ID not found\", interval)\n\t}\n\n\tif cmd.level != 0 {\n\t\tif cmd.level > 4 {\n\t\t\treturn flag.ErrHelp\n\t\t}\n\t\tcurrent.Level = int32(cmd.level)\n\t}\n\n\tif cmd.enabled != nil {\n\t\tcurrent.Enabled = *cmd.enabled\n\t}\n\n\t_, err = methods.UpdatePerfInterval(ctx, m.Client(), &types.UpdatePerfInterval{\n\t\tThis:     m.Reference(),\n\t\tInterval: *current,\n\t})\n\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/metric/interval/info.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage interval\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"text/tabwriter\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/metric\"\n)\n\ntype info struct {\n\t*metric.PerformanceFlag\n}\n\nfunc init() {\n\tcli.Register(\"metric.interval.info\", &info{})\n}\n\nfunc (cmd *info) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.PerformanceFlag, ctx = metric.NewPerformanceFlag(ctx)\n\tcmd.PerformanceFlag.Register(ctx, f)\n}\n\nfunc (cmd *info) Description() string {\n\treturn `List historical metric intervals.\n\nExamples:\n  govc metric.interval.info\n  govc metric.interval.info -i 300`\n}\n\nfunc (cmd *info) Process(ctx context.Context) error {\n\tif err := cmd.PerformanceFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error {\n\tm, err := cmd.Manager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tintervals, err := m.HistoricalInterval(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttw := tabwriter.NewWriter(cmd.Out, 2, 0, 2, ' ', 0)\n\tcmd.Out = tw\n\n\tinterval := cmd.Interval(0)\n\n\tfor _, i := range intervals {\n\t\tif interval != 0 && i.SamplingPeriod != interval {\n\t\t\tcontinue\n\t\t}\n\n\t\tfmt.Fprintf(cmd.Out, \"ID:\\t%d\\n\", i.SamplingPeriod)\n\t\tfmt.Fprintf(cmd.Out, \"  Enabled:\\t%t\\n\", i.Enabled)\n\t\tfmt.Fprintf(cmd.Out, \"  Interval:\\t%s\\n\", time.Duration(i.SamplingPeriod)*time.Second)\n\t\tfmt.Fprintf(cmd.Out, \"  Name:\\t%s\\n\", i.Name)\n\t\tfmt.Fprintf(cmd.Out, \"  Level:\\t%d\\n\", i.Level)\n\t}\n\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/metric/ls.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage metric\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/performance\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype ls struct {\n\t*PerformanceFlag\n\n\tlong bool\n}\n\nfunc init() {\n\tcli.Register(\"metric.ls\", &ls{})\n}\n\nfunc (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.PerformanceFlag, ctx = NewPerformanceFlag(ctx)\n\tcmd.PerformanceFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.long, \"l\", false, \"Long listing format\")\n}\n\nfunc (cmd *ls) Usage() string {\n\treturn \"PATH\"\n}\n\nfunc (cmd *ls) Description() string {\n\treturn `List available metrics for PATH.\n\nExamples:\n  govc metric.ls /dc1/host/cluster1\n  govc metric.ls datastore/*\n  govc metric.ls vm/* | grep mem. | xargs govc metric.sample vm/*`\n}\n\nfunc (cmd *ls) Process(ctx context.Context) error {\n\tif err := cmd.PerformanceFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\ntype lsResult struct {\n\tcmd      *ls\n\tcounters map[int32]*types.PerfCounterInfo\n\tperformance.MetricList\n}\n\nfunc (r *lsResult) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\n\tfor _, id := range r.MetricList {\n\t\tif id.Instance != \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tinfo := r.counters[id.CounterId]\n\n\t\tif r.cmd.long {\n\t\t\tfmt.Fprintf(w, \"%s\\t%s\\n\", info.Name(),\n\t\t\t\tinfo.NameInfo.GetElementDescription().Label)\n\t\t\tcontinue\n\t\t}\n\n\t\tfmt.Fprintln(w, info.Name())\n\t}\n\n\treturn tw.Flush()\n}\n\nfunc (r *lsResult) MarshalJSON() ([]byte, error) {\n\tm := make(map[string]*types.PerfCounterInfo)\n\n\tfor _, id := range r.MetricList {\n\t\tinfo := r.counters[id.CounterId]\n\n\t\tm[info.Name()] = info\n\t}\n\n\treturn json.Marshal(m)\n}\n\nfunc (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tobjs, err := cmd.ManagedObjects(ctx, f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := cmd.Manager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts, err := m.ProviderSummary(ctx, objs[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmids, err := m.AvailableMetric(ctx, objs[0], cmd.Interval(s.RefreshRate))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcounters, err := m.CounterInfoByKey(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn cmd.WriteResult(&lsResult{cmd, counters, mids})\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/metric/performance.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage metric\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/performance\"\n)\n\ntype PerformanceFlag struct {\n\t*flags.DatacenterFlag\n\t*flags.OutputFlag\n\n\tm *performance.Manager\n\n\tinterval int\n}\n\nfunc NewPerformanceFlag(ctx context.Context) (*PerformanceFlag, context.Context) {\n\tf := &PerformanceFlag{}\n\tf.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tf.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\treturn f, ctx\n}\n\nfunc (f *PerformanceFlag) Register(ctx context.Context, fs *flag.FlagSet) {\n\tf.DatacenterFlag.Register(ctx, fs)\n\tf.OutputFlag.Register(ctx, fs)\n\n\tfs.IntVar(&f.interval, \"i\", 0, \"Interval ID\")\n}\n\nfunc (f *PerformanceFlag) Process(ctx context.Context) error {\n\tif err := f.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := f.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (f *PerformanceFlag) Manager(ctx context.Context) (*performance.Manager, error) {\n\tif f.m != nil {\n\t\treturn f.m, nil\n\t}\n\n\tc, err := f.Client()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tf.m = performance.NewManager(c)\n\n\tf.m.Sort = true\n\n\treturn f.m, err\n}\n\nfunc (f *PerformanceFlag) Interval(val int32) int32 {\n\tinterval := int32(f.interval)\n\n\tif interval == 0 {\n\t\tif val == -1 {\n\t\t\t// realtime not supported\n\t\t\treturn 300\n\t\t}\n\n\t\treturn val\n\t}\n\n\treturn interval\n}\n\nfunc (f *PerformanceFlag) ErrNotFound(name string) error {\n\treturn fmt.Errorf(\"counter %q not found\", name)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/metric/reset.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage metric\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype reset struct {\n\t*PerformanceFlag\n}\n\nfunc init() {\n\tcli.Register(\"metric.reset\", &reset{})\n}\n\nfunc (cmd *reset) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.PerformanceFlag, ctx = NewPerformanceFlag(ctx)\n\tcmd.PerformanceFlag.Register(ctx, f)\n}\n\nfunc (cmd *reset) Usage() string {\n\treturn \"NAME...\"\n}\n\nfunc (cmd *reset) Description() string {\n\treturn `Reset counter NAME to the default level of data collection.\n\nExamples:\n  govc metric.reset net.bytesRx.average net.bytesTx.average`\n}\n\nfunc (cmd *reset) Process(ctx context.Context) error {\n\tif err := cmd.PerformanceFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *reset) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tm, err := cmd.Manager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcounters, err := m.CounterInfoByName(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar ids []int32\n\n\tfor _, name := range f.Args() {\n\t\tcounter, ok := counters[name]\n\t\tif !ok {\n\t\t\treturn cmd.ErrNotFound(name)\n\t\t}\n\n\t\tids = append(ids, counter.Key)\n\t}\n\n\t_, err = methods.ResetCounterLevelMapping(ctx, m.Client(), &types.ResetCounterLevelMapping{\n\t\tThis:     m.Reference(),\n\t\tCounters: ids,\n\t})\n\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/metric/sample.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage metric\n\nimport (\n\t\"context\"\n\t\"crypto/md5\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path\"\n\t\"strings\"\n\t\"text/tabwriter\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/performance\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype sample struct {\n\t*PerformanceFlag\n\n\td        int\n\tn        int\n\tt        bool\n\tplot     string\n\tinstance string\n}\n\nfunc init() {\n\tcli.Register(\"metric.sample\", &sample{})\n}\n\nfunc (cmd *sample) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.PerformanceFlag, ctx = NewPerformanceFlag(ctx)\n\tcmd.PerformanceFlag.Register(ctx, f)\n\n\tf.IntVar(&cmd.d, \"d\", 30, \"Limit object display name to D chars\")\n\tf.IntVar(&cmd.n, \"n\", 6, \"Max number of samples\")\n\tf.StringVar(&cmd.plot, \"plot\", \"\", \"Plot data using gnuplot\")\n\tf.BoolVar(&cmd.t, \"t\", false, \"Include sample times\")\n\tf.StringVar(&cmd.instance, \"instance\", \"*\", \"Instance\")\n}\n\nfunc (cmd *sample) Usage() string {\n\treturn \"PATH... NAME...\"\n}\n\nfunc (cmd *sample) Description() string {\n\treturn `Sample for object PATH of metric NAME.\n\nInterval ID defaults to 20 (realtime) if supported, otherwise 300 (5m interval).\n\nBy default, INSTANCE '*' samples all instances and the aggregate counter.\nAn INSTANCE value of '-' will only sample the aggregate counter.\nAn INSTANCE value other than '*' or '-' will only sample the given instance counter.\n\nIf PLOT value is set to '-', output a gnuplot script.  If non-empty with another\nvalue, PLOT will pipe the script to gnuplot for you.  The value is also used to set\nthe gnuplot 'terminal' variable, unless the value is that of the DISPLAY env var.\nOnly 1 metric NAME can be specified when the PLOT flag is set.\n\nExamples:\n  govc metric.sample host/cluster1/* cpu.usage.average\n  govc metric.sample -plot .png host/cluster1/* cpu.usage.average | xargs open\n  govc metric.sample vm/* net.bytesTx.average net.bytesTx.average\n  govc metric.sample -instance vmnic0 vm/* net.bytesTx.average\n  govc metric.sample -instance - vm/* net.bytesTx.average`\n}\n\nfunc (cmd *sample) Process(ctx context.Context) error {\n\tif err := cmd.PerformanceFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\ntype sampleResult struct {\n\tcmd      *sample\n\tm        *performance.Manager\n\tcounters map[string]*types.PerfCounterInfo\n\tSample   []performance.EntityMetric\n}\n\nfunc (r *sampleResult) name(e types.ManagedObjectReference) string {\n\tvar me mo.ManagedEntity\n\t_ = r.m.Properties(context.Background(), e, []string{\"name\"}, &me)\n\n\tname := me.Name\n\n\tif r.cmd.d > 0 && len(name) > r.cmd.d {\n\t\treturn name[:r.cmd.d] + \"*\"\n\t}\n\n\treturn name\n}\n\nfunc sampleInfoTimes(m *performance.EntityMetric) []string {\n\tvals := make([]string, len(m.SampleInfo))\n\n\tfor i := range m.SampleInfo {\n\t\tvals[i] = m.SampleInfo[i].Timestamp.Format(time.RFC3339)\n\t}\n\n\treturn vals\n}\n\nfunc (r *sampleResult) Plot(w io.Writer) error {\n\tif len(r.Sample) == 0 {\n\t\treturn nil\n\t}\n\n\tif r.cmd.plot != \"-\" {\n\t\tcmd := exec.Command(\"gnuplot\", \"-persist\")\n\t\tcmd.Stdout = w\n\t\tcmd.Stderr = os.Stderr\n\t\tstdin, err := cmd.StdinPipe()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = cmd.Start(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tw = stdin\n\t\tdefer func() {\n\t\t\t_ = stdin.Close()\n\t\t\t_ = cmd.Wait()\n\t\t}()\n\t}\n\n\tcounter := r.counters[r.Sample[0].Value[0].Name]\n\tunit := counter.UnitInfo.GetElementDescription()\n\n\tfmt.Fprintf(w, \"set title %q\\n\", counter.Name())\n\tfmt.Fprintf(w, \"set ylabel %q\\n\", unit.Label)\n\tfmt.Fprintf(w, \"set xlabel %q\\n\", \"Time\")\n\tfmt.Fprintf(w, \"set xdata %s\\n\", \"time\")\n\tfmt.Fprintf(w, \"set format x %q\\n\", \"%H:%M\")\n\tfmt.Fprintf(w, \"set timefmt %q\\n\", \"%Y-%m-%dT%H:%M:%SZ\")\n\n\text := path.Ext(r.cmd.plot)\n\tif ext != \"\" {\n\t\t// If a file name is given, use the extension as terminal type.\n\t\t// If just an ext is given, use the entities and counter as the file name.\n\t\tfile := r.cmd.plot\n\t\tname := r.cmd.plot[:len(r.cmd.plot)-len(ext)]\n\t\tr.cmd.plot = ext[1:]\n\n\t\tif name == \"\" {\n\t\t\th := md5.New()\n\n\t\t\tfor i := range r.Sample {\n\t\t\t\t_, _ = io.WriteString(h, r.Sample[i].Entity.String())\n\t\t\t}\n\t\t\t_, _ = io.WriteString(h, counter.Name())\n\n\t\t\tfile = fmt.Sprintf(\"govc-plot-%x%s\", h.Sum(nil), ext)\n\t\t}\n\n\t\tfmt.Fprintf(w, \"set output %q\\n\", file)\n\n\t\tdefer func() {\n\t\t\tfmt.Fprintln(r.cmd.Out, file)\n\t\t}()\n\t}\n\n\tswitch r.cmd.plot {\n\tcase \"-\", os.Getenv(\"DISPLAY\"):\n\tdefault:\n\t\tfmt.Fprintf(w, \"set terminal %s\\n\", r.cmd.plot)\n\t}\n\n\tif unit.Key == string(types.PerformanceManagerUnitPercent) {\n\t\tfmt.Fprintln(w, \"set yrange [0:100]\")\n\t}\n\n\tfmt.Fprintln(w)\n\n\tvar set []string\n\n\tfor i := range r.Sample {\n\t\tname := r.name(r.Sample[i].Entity)\n\t\tname = strings.Replace(name, \"_\", \"*\", -1) // underscore is some gnuplot markup?\n\t\tset = append(set, fmt.Sprintf(\"'-' using 1:2 title '%s' with lines\", name))\n\t}\n\n\tfmt.Fprintf(w, \"plot %s\\n\", strings.Join(set, \", \"))\n\n\tfor i := range r.Sample {\n\t\ttimes := sampleInfoTimes(&r.Sample[i])\n\n\t\tfor _, value := range r.Sample[i].Value {\n\t\t\tfor j := range value.Value {\n\t\t\t\tfmt.Fprintf(w, \"%s %s\\n\", times[j], value.Format(value.Value[j]))\n\t\t\t}\n\t\t}\n\n\t\tfmt.Fprintln(w, \"e\")\n\t}\n\n\treturn nil\n}\n\nfunc (r *sampleResult) Write(w io.Writer) error {\n\tif r.cmd.plot != \"\" {\n\t\treturn r.Plot(w)\n\t}\n\n\tcmd := r.cmd\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\n\tfor i := range r.Sample {\n\t\tmetric := r.Sample[i]\n\t\tname := r.name(metric.Entity)\n\t\tt := \"\"\n\t\tif cmd.t {\n\t\t\tt = metric.SampleInfoCSV()\n\t\t}\n\n\t\tfor _, v := range metric.Value {\n\t\t\tcounter := r.counters[v.Name]\n\t\t\tunits := counter.UnitInfo.GetElementDescription().Label\n\n\t\t\tinstance := v.Instance\n\t\t\tif instance == \"\" {\n\t\t\t\tinstance = \"-\"\n\t\t\t}\n\n\t\t\tfmt.Fprintf(tw, \"%s\\t%s\\t%s\\t%v\\t%s\\t%s\\n\",\n\t\t\t\tname, instance, v.Name, t, v.ValueCSV(), units)\n\t\t}\n\t}\n\n\treturn tw.Flush()\n}\n\nfunc (cmd *sample) Run(ctx context.Context, f *flag.FlagSet) error {\n\tm, err := cmd.Manager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar paths []string\n\tvar names []string\n\n\tbyName, err := m.CounterInfoByName(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, arg := range f.Args() {\n\t\tif _, ok := byName[arg]; ok {\n\t\t\tnames = append(names, arg)\n\t\t} else {\n\t\t\tpaths = append(paths, arg)\n\t\t}\n\t}\n\n\tif len(paths) == 0 || len(names) == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tif cmd.plot != \"\" {\n\t\tif len(names) > 1 {\n\t\t\treturn flag.ErrHelp\n\t\t}\n\n\t\tif cmd.instance == \"*\" {\n\t\t\tcmd.instance = \"\"\n\t\t}\n\t}\n\n\tobjs, err := cmd.ManagedObjects(ctx, paths)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts, err := m.ProviderSummary(ctx, objs[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.instance == \"-\" {\n\t\tcmd.instance = \"\"\n\t}\n\n\tspec := types.PerfQuerySpec{\n\t\tFormat:     string(types.PerfFormatNormal),\n\t\tMaxSample:  int32(cmd.n),\n\t\tMetricId:   []types.PerfMetricId{{Instance: cmd.instance}},\n\t\tIntervalId: cmd.Interval(s.RefreshRate),\n\t}\n\n\tsample, err := m.SampleByName(ctx, spec, names, objs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresult, err := m.ToMetricSeries(ctx, sample)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcounters, err := m.CounterInfoByName(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn cmd.WriteResult(&sampleResult{cmd, m, counters, result})\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/object/collect.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"strings\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype collect struct {\n\t*flags.DatacenterFlag\n\n\tsingle bool\n\tsimple bool\n\tn      int\n}\n\nfunc init() {\n\tcli.Register(\"object.collect\", &collect{})\n}\n\nfunc (cmd *collect) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.simple, \"s\", false, \"Output property value only\")\n\tf.IntVar(&cmd.n, \"n\", 0, \"Wait for N property updates\")\n}\n\nfunc (cmd *collect) Usage() string {\n\treturn \"[MOID] [PROPERTY]...\"\n}\n\nfunc (cmd *collect) Description() string {\n\treturn `Collect managed object properties.\n\nMOID can be an inventory path or ManagedObjectReference.\nMOID defaults to '-', an alias for 'ServiceInstance:ServiceInstance'.\n\nBy default only the current property value(s) are collected.  Use the '-n' flag to wait for updates.\n\nExamples:\n  govc object.collect - content\n  govc object.collect -s HostSystem:ha-host hardware.systemInfo.uuid\n  govc object.collect -s /ha-datacenter/vm/foo overallStatus\n  govc object.collect -json -n=-1 EventManager:ha-eventmgr latestEvent | jq .\n  govc object.collect -json -s $(govc object.collect -s - content.perfManager) description.counterType | jq .`\n}\n\nfunc (cmd *collect) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nvar stringer = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()\n\ntype change struct {\n\tcmd            *collect\n\tPropertyChange []types.PropertyChange\n}\n\nfunc (pc *change) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(pc.PropertyChange)\n}\n\nfunc (pc *change) output(name string, rval reflect.Value, rtype reflect.Type) {\n\ts := \"...\"\n\n\tkind := rval.Kind()\n\n\tif kind == reflect.Ptr || kind == reflect.Interface {\n\t\tif rval.IsNil() {\n\t\t\ts = \"\"\n\t\t} else {\n\t\t\trval = rval.Elem()\n\t\t\tkind = rval.Kind()\n\t\t}\n\t}\n\n\tswitch kind {\n\tcase reflect.Ptr, reflect.Interface:\n\tcase reflect.Slice:\n\t\tif rval.Len() == 0 {\n\t\t\ts = \"\"\n\t\t\tbreak\n\t\t}\n\n\t\tetype := rtype.Elem()\n\n\t\tif etype.Kind() != reflect.Interface && etype.Kind() != reflect.Struct || etype.Implements(stringer) {\n\t\t\tvar val []string\n\n\t\t\tfor i := 0; i < rval.Len(); i++ {\n\t\t\t\tv := rval.Index(i).Interface()\n\n\t\t\t\tif fstr, ok := v.(fmt.Stringer); ok {\n\t\t\t\t\ts = fstr.String()\n\t\t\t\t} else {\n\t\t\t\t\ts = fmt.Sprintf(\"%v\", v)\n\t\t\t\t}\n\n\t\t\t\tval = append(val, s)\n\t\t\t}\n\n\t\t\ts = strings.Join(val, \",\")\n\t\t}\n\tcase reflect.Struct:\n\t\tif rtype.Implements(stringer) {\n\t\t\ts = rval.Interface().(fmt.Stringer).String()\n\t\t}\n\tdefault:\n\t\ts = fmt.Sprintf(\"%v\", rval.Interface())\n\t}\n\n\tif pc.cmd.simple {\n\t\tfmt.Fprintln(pc.cmd.Out, s)\n\t\treturn\n\t}\n\n\tfmt.Fprintf(pc.cmd.Out, \"%s\\t%s\\t%s\\n\", name, rtype, s)\n}\n\nfunc (pc *change) writeStruct(name string, rval reflect.Value, rtype reflect.Type) {\n\tfor i := 0; i < rval.NumField(); i++ {\n\t\tfval := rval.Field(i)\n\t\tfield := rtype.Field(i)\n\n\t\tif field.Anonymous {\n\t\t\tpc.writeStruct(name, fval, fval.Type())\n\t\t\tcontinue\n\t\t}\n\n\t\tfname := fmt.Sprintf(\"%s.%s%s\", name, strings.ToLower(field.Name[:1]), field.Name[1:])\n\t\tpc.output(fname, fval, field.Type)\n\t}\n}\n\nfunc (pc *change) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(pc.cmd.Out, 4, 0, 2, ' ', 0)\n\tpc.cmd.Out = tw\n\n\tfor _, c := range pc.PropertyChange {\n\t\tif c.Val == nil {\n\t\t\t// type is unknown in this case, as xsi:type was not provided - just skip for now\n\t\t\tcontinue\n\t\t}\n\n\t\trval := reflect.ValueOf(c.Val)\n\t\trtype := rval.Type()\n\n\t\tif strings.HasPrefix(rtype.Name(), \"ArrayOf\") {\n\t\t\trval = rval.Field(0)\n\t\t\trtype = rval.Type()\n\t\t}\n\n\t\tif pc.cmd.single && rtype.Kind() == reflect.Struct && !rtype.Implements(stringer) {\n\t\t\tpc.writeStruct(c.Name, rval, rtype)\n\t\t\tcontinue\n\t\t}\n\n\t\tpc.output(c.Name, rval, rtype)\n\t}\n\n\treturn tw.Flush()\n}\n\nfunc (cmd *collect) Run(ctx context.Context, f *flag.FlagSet) error {\n\tclient, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tref := methods.ServiceInstance\n\targ := f.Arg(0)\n\n\tswitch arg {\n\tcase \"\", \"-\":\n\tdefault:\n\t\tif !ref.FromString(arg) {\n\t\t\tl, ferr := finder.ManagedObjectList(ctx, arg)\n\t\t\tif ferr != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tswitch len(l) {\n\t\t\tcase 0:\n\t\t\t\treturn fmt.Errorf(\"%s not found\", arg)\n\t\t\tcase 1:\n\t\t\t\tref = l[0].Object.Reference()\n\t\t\tdefault:\n\t\t\t\treturn flag.ErrHelp\n\t\t\t}\n\t\t}\n\t}\n\n\tp := property.DefaultCollector(client)\n\n\tvar props []string\n\tif f.NArg() > 1 {\n\t\tprops = f.Args()[1:]\n\t\tcmd.single = len(props) == 1\n\t}\n\n\treturn property.Wait(ctx, p, ref, props, func(pc []types.PropertyChange) bool {\n\t\t_ = cmd.WriteResult(&change{cmd, pc})\n\n\t\tcmd.n--\n\n\t\treturn cmd.n == -1\n\t})\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/object/destroy.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype destroy struct {\n\t*flags.DatacenterFlag\n}\n\nfunc init() {\n\tcli.Register(\"object.destroy\", &destroy{})\n}\n\nfunc (cmd *destroy) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n}\n\nfunc (cmd *destroy) Usage() string {\n\treturn \"PATH...\"\n}\n\nfunc (cmd *destroy) Description() string {\n\treturn `Destroy managed objects.\n\nExamples:\n  govc object.destroy /dc1/network/dvs /dc1/host/cluster`\n}\n\nfunc (cmd *destroy) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *destroy) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tobjs, err := cmd.ManagedObjects(ctx, f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, obj := range objs {\n\t\ttask, err := object.NewCommon(c, obj).Destroy(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlogger := cmd.ProgressLogger(fmt.Sprintf(\"destroying %s... \", obj))\n\t\t_, err = task.WaitForResult(ctx, logger)\n\t\tlogger.Wait()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/object/find.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/view\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype find struct {\n\t*flags.DatacenterFlag\n\n\tref      bool\n\tkind     kinds\n\tname     string\n\tmaxdepth int\n}\n\nvar alias = []struct {\n\tname string\n\tkind string\n}{\n\t{\"a\", \"VirtualApp\"},\n\t{\"c\", \"ClusterComputeResource\"},\n\t{\"d\", \"Datacenter\"},\n\t{\"f\", \"Folder\"},\n\t{\"g\", \"DistributedVirtualPortgroup\"},\n\t{\"h\", \"HostSystem\"},\n\t{\"m\", \"VirtualMachine\"},\n\t{\"n\", \"Network\"},\n\t{\"o\", \"OpaqueNetwork\"},\n\t{\"p\", \"ResourcePool\"},\n\t{\"r\", \"ComputeResource\"},\n\t{\"s\", \"Datastore\"},\n\t{\"w\", \"DistributedVirtualSwitch\"},\n}\n\nfunc aliasHelp() string {\n\tvar help bytes.Buffer\n\n\tfor _, a := range alias {\n\t\tfmt.Fprintf(&help, \"  %s    %s\\n\", a.name, a.kind)\n\t}\n\n\treturn help.String()\n}\n\ntype kinds []string\n\nfunc (e *kinds) String() string {\n\treturn fmt.Sprint(*e)\n}\n\nfunc (e *kinds) Set(value string) error {\n\t*e = append(*e, e.alias(value))\n\treturn nil\n}\n\nfunc (e *kinds) alias(value string) string {\n\tif len(value) != 1 {\n\t\treturn value\n\t}\n\n\tfor _, a := range alias {\n\t\tif a.name == value {\n\t\t\treturn a.kind\n\t\t}\n\t}\n\n\treturn value\n}\n\nfunc (e *kinds) wanted(kind string) bool {\n\tif len(*e) == 0 {\n\t\treturn true\n\t}\n\n\tfor _, k := range *e {\n\t\tif kind == k {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc init() {\n\tcli.Register(\"find\", &find{})\n}\n\nfunc (cmd *find) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n\n\tf.Var(&cmd.kind, \"type\", \"Resource type\")\n\tf.StringVar(&cmd.name, \"name\", \"*\", \"Resource name\")\n\tf.IntVar(&cmd.maxdepth, \"maxdepth\", -1, \"Max depth\")\n\tf.BoolVar(&cmd.ref, \"i\", false, \"Print the managed object reference\")\n}\n\nfunc (cmd *find) Usage() string {\n\treturn \"[ROOT] [KEY VAL]...\"\n}\n\nfunc (cmd *find) Description() string {\n\tatable := aliasHelp()\n\n\treturn fmt.Sprintf(`Find managed objects.\n\nROOT can be an inventory path or ManagedObjectReference.\nROOT defaults to '.', an alias for the root folder or DC if set.\n\nOptional KEY VAL pairs can be used to filter results against object instance properties.\n\nThe '-type' flag value can be a managed entity type or one of the following aliases:\n\n%s\nExamples:\n  govc find\n  govc find /dc1 -type c\n  govc find vm -name my-vm-*\n  govc find . -type n\n  govc find . -type m -runtime.powerState poweredOn\n  govc find . -type m -datastore $(govc find -i datastore -name vsanDatastore)\n  govc find . -type s -summary.type vsan\n  govc find . -type h -hardware.cpuInfo.numCpuCores 16`, atable)\n}\n\nfunc (cmd *find) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// rootMatch returns true if the root object path should be printed\nfunc (cmd *find) rootMatch(ctx context.Context, root object.Reference, client *vim25.Client, filter property.Filter) bool {\n\tref := root.Reference()\n\n\tif !cmd.kind.wanted(ref.Type) {\n\t\treturn false\n\t}\n\n\tif len(filter) == 1 && filter[\"name\"] == \"*\" {\n\t\treturn true\n\t}\n\n\tvar content []types.ObjectContent\n\n\tpc := property.DefaultCollector(client)\n\t_ = pc.RetrieveWithFilter(ctx, []types.ManagedObjectReference{ref}, filter.Keys(), &content, filter)\n\n\treturn content != nil\n}\n\nfunc (cmd *find) Run(ctx context.Context, f *flag.FlagSet) error {\n\tclient, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\troot := client.ServiceContent.RootFolder\n\trootPath := \"/\"\n\n\targ := f.Arg(0)\n\tprops := f.Args()\n\n\tif len(props) > 0 {\n\t\tif strings.HasPrefix(arg, \"-\") {\n\t\t\targ = \".\"\n\t\t} else {\n\t\t\tprops = props[1:]\n\t\t}\n\t}\n\n\tif len(props)%2 != 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tswitch arg {\n\tcase rootPath:\n\tcase \"\", \".\":\n\t\tdc, _ := cmd.DatacenterIfSpecified()\n\t\tif dc == nil {\n\t\t\targ = rootPath\n\t\t} else {\n\t\t\targ = \".\"\n\t\t\troot = dc.Reference()\n\t\t\trootPath = dc.InventoryPath\n\t\t}\n\tdefault:\n\t\tl, ferr := finder.ManagedObjectList(ctx, arg)\n\t\tif ferr != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch len(l) {\n\t\tcase 0:\n\t\t\treturn fmt.Errorf(\"%s not found\", arg)\n\t\tcase 1:\n\t\t\troot = l[0].Object.Reference()\n\t\t\trootPath = l[0].Path\n\t\tdefault:\n\t\t\treturn flag.ErrHelp\n\t\t}\n\t}\n\n\tfilter := property.Filter{}\n\n\tfor i := 0; i < len(props); i++ {\n\t\tkey := props[i]\n\t\tif !strings.HasPrefix(key, \"-\") {\n\t\t\treturn flag.ErrHelp\n\t\t}\n\n\t\tkey = key[1:]\n\t\ti++\n\t\tval := props[i]\n\n\t\tif xf := f.Lookup(key); xf != nil {\n\t\t\t// Support use of -flag following the ROOT arg (flag package does not do this)\n\t\t\tif err = xf.Value.Set(val); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tfilter[key] = val\n\t\t}\n\t}\n\n\tfilter[\"name\"] = cmd.name\n\n\tprintPath := func(o types.ManagedObjectReference, p string) {\n\t\tif cmd.ref {\n\t\t\tfmt.Fprintln(cmd.Out, o)\n\t\t\treturn\n\t\t}\n\n\t\tpath := strings.Replace(p, rootPath, arg, 1)\n\t\tfmt.Fprintln(cmd.Out, path)\n\t}\n\n\trecurse := false\n\n\tswitch cmd.maxdepth {\n\tcase -1:\n\t\trecurse = true\n\tcase 0:\n\tcase 1:\n\tdefault:\n\t\treturn flag.ErrHelp // TODO: ?\n\t}\n\n\tif cmd.rootMatch(ctx, root, client, filter) {\n\t\tprintPath(root, arg)\n\t}\n\n\tif cmd.maxdepth == 0 {\n\t\treturn nil\n\t}\n\n\tm := view.NewManager(client)\n\n\tv, err := m.CreateContainerView(ctx, root, cmd.kind, recurse)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer v.Destroy(ctx)\n\n\tobjs, err := v.Find(ctx, cmd.kind, filter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, o := range objs {\n\t\tvar path string\n\n\t\tif !cmd.ref {\n\t\t\te, err := finder.Element(ctx, o)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tpath = e.Path\n\t\t}\n\n\t\tprintPath(o, path)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/object/method.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype method struct {\n\t*flags.DatacenterFlag\n\n\tname   string\n\treason string\n\tsource string\n\tenable bool\n}\n\nfunc init() {\n\tcli.Register(\"object.method\", &method{})\n}\n\nfunc (cmd *method) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.name, \"name\", \"\", \"Method name\")\n\tf.StringVar(&cmd.reason, \"reason\", \"\", \"Reason for disabling method\")\n\tf.StringVar(&cmd.source, \"source\", \"govc\", \"Source ID\")\n\tf.BoolVar(&cmd.enable, \"enable\", true, \"Enable method\")\n}\n\nfunc (cmd *method) Usage() string {\n\treturn \"PATH...\"\n}\n\nfunc (cmd *method) Description() string {\n\treturn `Enable or disable methods for managed objects.\n\nExamples:\n  govc object.method -name Destroy_Task -enable=false /dc1/vm/foo\n  govc object.collect /dc1/vm/foo disabledMethod | grep --color Destroy_Task\n  govc object.method -name Destroy_Task -enable /dc1/vm/foo`\n}\n\nfunc (cmd *method) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *method) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tif cmd.name == \"\" {\n\t\treturn flag.ErrHelp\n\t}\n\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tobjs, err := cmd.ManagedObjects(ctx, f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm := object.NewAuthorizationManager(c)\n\n\tif cmd.enable {\n\t\treturn m.EnableMethods(ctx, objs, []string{cmd.name}, cmd.source)\n\t}\n\n\tmethod := []object.DisabledMethodRequest{\n\t\t{\n\t\t\tMethod: cmd.name,\n\t\t\tReason: cmd.reason,\n\t\t},\n\t}\n\n\treturn m.DisableMethods(ctx, objs, method, cmd.source)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/object/mv.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype mv struct {\n\t*flags.DatacenterFlag\n}\n\nfunc init() {\n\tcli.Register(\"object.mv\", &mv{})\n}\n\nfunc (cmd *mv) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n}\n\nfunc (cmd *mv) Usage() string {\n\treturn \"PATH... FOLDER\"\n}\n\nfunc (cmd *mv) Description() string {\n\treturn `Move managed entities to FOLDER.\n\nExamples:\n  govc folder.create /dc1/host/example\n  govc object.mv /dc2/host/*.example.com /dc1/host/example`\n}\n\nfunc (cmd *mv) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *mv) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() < 2 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tn := f.NArg() - 1\n\n\tfolder, err := finder.Folder(ctx, f.Arg(n))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tobjs, err := cmd.ManagedObjects(ctx, f.Args()[:n])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttask, err := folder.MoveInto(ctx, objs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger := cmd.ProgressLogger(fmt.Sprintf(\"moving %d objects to %s... \", len(objs), folder.InventoryPath))\n\t_, err = task.WaitForResult(ctx, logger)\n\tlogger.Wait()\n\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/object/reload.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype reload struct {\n\t*flags.DatacenterFlag\n}\n\nfunc init() {\n\tcli.Register(\"object.reload\", &reload{})\n}\n\nfunc (cmd *reload) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n}\n\nfunc (cmd *reload) Usage() string {\n\treturn \"PATH...\"\n}\n\nfunc (cmd *reload) Description() string {\n\treturn `Reload managed object state.\n\nExamples:\n  govc datastore.upload $vm.vmx $vm/$vm.vmx\n  govc object.reload /dc1/vm/$vm`\n}\n\nfunc (cmd *reload) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *reload) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tobjs, err := cmd.ManagedObjects(ctx, f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, obj := range objs {\n\t\treq := types.Reload{\n\t\t\tThis: obj,\n\t\t}\n\n\t\t_, err = methods.Reload(ctx, c, &req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/object/rename.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype rename struct {\n\t*flags.DatacenterFlag\n}\n\nfunc init() {\n\tcli.Register(\"object.rename\", &rename{})\n}\n\nfunc (cmd *rename) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n}\n\nfunc (cmd *rename) Usage() string {\n\treturn \"PATH NAME\"\n}\n\nfunc (cmd *rename) Description() string {\n\treturn `Rename managed objects.\n\nExamples:\n  govc object.rename /dc1/network/dvs1 Switch1`\n}\n\nfunc (cmd *rename) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *rename) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 2 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tobjs, err := cmd.ManagedObjects(ctx, f.Args()[:1])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttask, err := object.NewCommon(c, objs[0]).Rename(ctx, f.Arg(1))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger := cmd.ProgressLogger(fmt.Sprintf(\"renaming %s... \", objs[0]))\n\t_, err = task.WaitForResult(ctx, logger)\n\tlogger.Wait()\n\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/option/ls.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage option\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype List struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n}\n\nfunc init() {\n\tcli.Register(\"option.ls\", &List{})\n}\n\nfunc (cmd *List) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n}\n\nfunc (cmd *List) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *List) Usage() string {\n\treturn \"[NAME]\"\n}\n\nvar ListDescription = `List option with the given NAME.\n\nIf NAME ends with a dot, all options for that subtree are listed.`\n\nfunc (cmd *List) Description() string {\n\treturn ListDescription + `\n\nExamples:\n  govc option.ls\n  govc option.ls config.vpxd.sso.\n  govc option.ls config.vpxd.sso.sts.uri`\n}\n\nfunc (cmd *List) Query(ctx context.Context, f *flag.FlagSet, m *object.OptionManager) error {\n\tvar err error\n\tvar opts []types.BaseOptionValue\n\n\tif f.NArg() > 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tif f.NArg() == 1 {\n\t\topts, err = m.Query(ctx, f.Arg(0))\n\t} else {\n\t\tvar om mo.OptionManager\n\t\terr = m.Properties(ctx, m.Reference(), []string{\"setting\"}, &om)\n\t\topts = om.Setting\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn cmd.WriteResult(optionResult(opts))\n}\n\nfunc (cmd *List) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm := object.NewOptionManager(c, *c.ServiceContent.Setting)\n\n\treturn cmd.Query(ctx, f, m)\n}\n\ntype optionResult []types.BaseOptionValue\n\nfunc (r optionResult) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0)\n\tfor _, opt := range r {\n\t\to := opt.GetOptionValue()\n\t\tfmt.Fprintf(tw, \"%s:\\t%v\\n\", o.Key, o.Value)\n\t}\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/option/set.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage option\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype Set struct {\n\t*flags.ClientFlag\n}\n\nfunc init() {\n\tcli.Register(\"option.set\", &Set{})\n}\n\nfunc (cmd *Set) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n}\n\nfunc (cmd *Set) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *Set) Usage() string {\n\treturn \"NAME VALUE\"\n}\n\nvar SetDescription = `Set option NAME to VALUE.`\n\nfunc (cmd *Set) Description() string {\n\treturn SetDescription + `\n\nExamples:\n  govc option.set log.level info\n  govc option.set logger.Vsan verbose`\n}\n\nfunc (cmd *Set) Update(ctx context.Context, f *flag.FlagSet, m *object.OptionManager) error {\n\tif f.NArg() != 2 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tname := f.Arg(0)\n\topts, err := m.Query(ctx, name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(opts) != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tval := f.Arg(1)\n\tvar set types.AnyType\n\n\tswitch x := opts[0].GetOptionValue().Value.(type) {\n\tcase string:\n\t\tset = val\n\tcase bool:\n\t\tset, err = strconv.ParseBool(val)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase int32:\n\t\ts, err := strconv.ParseInt(val, 10, 32)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tset = s\n\tcase int64:\n\t\tset, err = strconv.ParseInt(val, 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"type %T conversion not supported\", x)\n\t}\n\n\topts[0].GetOptionValue().Value = set\n\n\treturn m.Update(ctx, opts)\n}\n\nfunc (cmd *Set) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm := object.NewOptionManager(c, *c.ServiceContent.Setting)\n\n\treturn cmd.Update(ctx, f, m)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/permissions/ls.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage permissions\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n)\n\ntype ls struct {\n\t*PermissionFlag\n\n\tinherited bool\n}\n\nfunc init() {\n\tcli.Register(\"permissions.ls\", &ls{})\n}\n\nfunc (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.PermissionFlag, ctx = NewPermissionFlag(ctx)\n\tcmd.PermissionFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.inherited, \"a\", true, \"Include inherited permissions defined by parent entities\")\n}\n\nfunc (cmd *ls) Process(ctx context.Context) error {\n\tif err := cmd.PermissionFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *ls) Usage() string {\n\treturn \"[PATH]...\"\n}\n\nfunc (cmd *ls) Description() string {\n\treturn `List the permissions defined on or effective on managed entities.\n\nExamples:\n  govc permissions.ls\n  govc permissions.ls /dc1/host/cluster1`\n}\n\nfunc (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error {\n\trefs, err := cmd.ManagedObjects(ctx, f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := cmd.Manager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, ref := range refs {\n\t\tperms, err := m.RetrieveEntityPermissions(ctx, ref, cmd.inherited)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcmd.List.Add(perms)\n\t}\n\n\treturn cmd.WriteResult(&cmd.List)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/permissions/permissions.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage permissions\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype List struct {\n\tRoles object.AuthorizationRoleList `json:\",omitempty\"`\n\n\tPermissions []types.Permission `json:\",omitempty\"`\n\n\tf *PermissionFlag\n}\n\ntype PermissionFlag struct {\n\t*flags.DatacenterFlag\n\t*flags.OutputFlag\n\n\tasRef bool\n\n\tm *object.AuthorizationManager\n\n\tList\n}\n\nfunc NewPermissionFlag(ctx context.Context) (*PermissionFlag, context.Context) {\n\tf := &PermissionFlag{}\n\tf.List.f = f\n\tf.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tf.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\treturn f, ctx\n}\n\nfunc (f *PermissionFlag) Register(ctx context.Context, fs *flag.FlagSet) {\n\tf.DatacenterFlag.Register(ctx, fs)\n\tf.OutputFlag.Register(ctx, fs)\n\n\tfs.BoolVar(&f.asRef, \"i\", false, \"Use moref instead of inventory path\")\n}\n\nfunc (f *PermissionFlag) Process(ctx context.Context) error {\n\tif err := f.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := f.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (f *PermissionFlag) Manager(ctx context.Context) (*object.AuthorizationManager, error) {\n\tif f.m != nil {\n\t\treturn f.m, nil\n\t}\n\n\tc, err := f.Client()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tf.m = object.NewAuthorizationManager(c)\n\tf.Roles, err = f.m.RoleList(ctx)\n\n\treturn f.m, err\n}\n\nfunc (f *PermissionFlag) Role(name string) (*types.AuthorizationRole, error) {\n\trole := f.Roles.ByName(name)\n\tif role == nil {\n\t\treturn nil, fmt.Errorf(\"role %q not found\", name)\n\t}\n\treturn role, nil\n}\n\nfunc (f *PermissionFlag) ManagedObjects(ctx context.Context, args []string) ([]types.ManagedObjectReference, error) {\n\tif !f.asRef {\n\t\treturn f.DatacenterFlag.ManagedObjects(ctx, args)\n\t}\n\n\tvar refs []types.ManagedObjectReference\n\n\tfor _, arg := range args {\n\t\tvar ref types.ManagedObjectReference\n\t\tif ref.FromString(arg) {\n\t\t\trefs = append(refs, ref)\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"invalid moref: %s\", arg)\n\t\t}\n\t}\n\n\treturn refs, nil\n}\n\nfunc (l *List) Write(w io.Writer) error {\n\tctx := context.Background()\n\tfinder, err := l.f.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trefs := make(map[types.ManagedObjectReference]string)\n\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\n\tfmt.Fprintf(tw, \"%s\\t%s\\t%s\\t%s\\n\", \"Role\", \"Entity\", \"Principal\", \"Propagate\")\n\n\tfor _, perm := range l.Permissions {\n\t\tpropagate := \"No\"\n\t\tif perm.Propagate {\n\t\t\tpropagate = \"Yes\"\n\t\t}\n\n\t\tname := l.Roles.ById(perm.RoleId).Name\n\n\t\tp := \"-\"\n\t\tif perm.Entity != nil {\n\t\t\tif l.f.asRef {\n\t\t\t\tp = perm.Entity.String()\n\t\t\t} else {\n\t\t\t\t// convert moref to inventory path\n\t\t\t\tif p = refs[*perm.Entity]; p == \"\" {\n\t\t\t\t\te, err := finder.Element(ctx, *perm.Entity)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tp = e.Path\n\t\t\t\t\t}\n\n\t\t\t\t\trefs[*perm.Entity] = p\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfmt.Fprintf(tw, \"%s\\t%s\\t%s\\t%s\\n\", name, p, perm.Principal, propagate)\n\t}\n\n\treturn tw.Flush()\n}\n\nfunc (l *List) Add(perms []types.Permission) {\n\tl.Permissions = append(l.Permissions, perms...)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/permissions/remove.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage permissions\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype remove struct {\n\t*PermissionFlag\n\n\ttypes.Permission\n\n\trole string\n}\n\nfunc init() {\n\tcli.Register(\"permissions.remove\", &remove{})\n}\n\nfunc (cmd *remove) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.PermissionFlag, ctx = NewPermissionFlag(ctx)\n\tcmd.PermissionFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.Principal, \"principal\", \"\", \"User or group for which the permission is defined\")\n\tf.BoolVar(&cmd.Group, \"group\", false, \"True, if principal refers to a group name; false, for a user name\")\n}\n\nfunc (cmd *remove) Process(ctx context.Context) error {\n\tif err := cmd.PermissionFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *remove) Usage() string {\n\treturn \"[PATH]...\"\n}\n\nfunc (cmd *remove) Description() string {\n\treturn `Removes a permission rule from managed entities.\n\nExamples:\n  govc permissions.remove -principal root\n  govc permissions.remove -principal $USER@vsphere.local -role Admin /dc1/host/cluster1`\n}\n\nfunc (cmd *remove) Run(ctx context.Context, f *flag.FlagSet) error {\n\trefs, err := cmd.ManagedObjects(ctx, f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := cmd.Manager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, ref := range refs {\n\t\terr = m.RemoveEntityPermission(ctx, ref, cmd.Principal, cmd.Group)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/permissions/set.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage permissions\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype set struct {\n\t*PermissionFlag\n\n\ttypes.Permission\n\n\trole string\n}\n\nfunc init() {\n\tcli.Register(\"permissions.set\", &set{})\n}\n\nfunc (cmd *set) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.PermissionFlag, ctx = NewPermissionFlag(ctx)\n\tcmd.PermissionFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.Principal, \"principal\", \"\", \"User or group for which the permission is defined\")\n\tf.BoolVar(&cmd.Group, \"group\", false, \"True, if principal refers to a group name; false, for a user name\")\n\tf.BoolVar(&cmd.Propagate, \"propagate\", true, \"Whether or not this permission propagates down the hierarchy to sub-entities\")\n\tf.StringVar(&cmd.role, \"role\", \"Admin\", \"Permission role name\")\n}\n\nfunc (cmd *set) Process(ctx context.Context) error {\n\tif err := cmd.PermissionFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *set) Usage() string {\n\treturn \"[PATH]...\"\n}\n\nfunc (cmd *set) Description() string {\n\treturn `Set the permissions managed entities.\n\nExamples:\n  govc permissions.set -principal root -role Admin\n  govc permissions.set -principal $USER@vsphere.local -role Admin /dc1/host/cluster1`\n}\n\nfunc (cmd *set) Run(ctx context.Context, f *flag.FlagSet) error {\n\trefs, err := cmd.ManagedObjects(ctx, f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := cmd.Manager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trole, err := cmd.Role(cmd.role)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd.Permission.RoleId = role.RoleId\n\n\tperms := []types.Permission{cmd.Permission}\n\n\tfor _, ref := range refs {\n\t\terr = m.SetEntityPermissions(ctx, ref, perms)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/pool/change.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage pool\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype change struct {\n\t*flags.DatacenterFlag\n\t*ResourceConfigSpecFlag\n\n\tname string\n}\n\nfunc init() {\n\tcli.Register(\"pool.change\", &change{})\n}\n\nfunc (cmd *change) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n\tcmd.ResourceConfigSpecFlag = NewResourceConfigSpecFlag()\n\tcmd.ResourceConfigSpecFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.name, \"name\", \"\", \"Resource pool name\")\n}\n\nfunc (cmd *change) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.ResourceConfigSpecFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *change) Usage() string {\n\treturn \"POOL...\"\n}\n\nfunc (cmd *change) Description() string {\n\treturn \"Change the configuration of one or more resource POOLs.\\n\" + poolNameHelp\n}\n\nfunc (cmd *change) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd.SetAllocation(func(a types.BaseResourceAllocationInfo) {\n\t\tra := a.GetResourceAllocationInfo()\n\t\tif ra.Shares.Level == \"\" {\n\t\t\tra.Shares = nil\n\t\t}\n\t})\n\n\tfor _, arg := range f.Args() {\n\t\tpools, err := finder.ResourcePoolListAll(ctx, arg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, pool := range pools {\n\t\t\terr := pool.UpdateConfig(ctx, cmd.name, &cmd.ResourceConfigSpec)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/pool/create.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage pool\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"path\"\n\n\t\"github.com/vmware/govmomi/find\"\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype create struct {\n\t*flags.DatacenterFlag\n\t*ResourceConfigSpecFlag\n}\n\nfunc init() {\n\tcli.Register(\"pool.create\", &create{})\n}\n\nfunc (cmd *create) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n\n\tcmd.ResourceConfigSpecFlag = NewResourceConfigSpecFlag()\n\tcmd.ResourceConfigSpecFlag.SetAllocation(func(a types.BaseResourceAllocationInfo) {\n\t\tra := a.GetResourceAllocationInfo()\n\t\tra.Shares.Level = types.SharesLevelNormal\n\t\tra.ExpandableReservation = types.NewBool(true)\n\t})\n\tcmd.ResourceConfigSpecFlag.Register(ctx, f)\n}\n\nfunc (cmd *create) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.ResourceConfigSpecFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *create) Usage() string {\n\treturn \"POOL...\"\n}\n\nfunc (cmd *create) Description() string {\n\treturn \"Create one or more resource POOLs.\\n\" + poolCreateHelp\n}\n\nfunc (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, arg := range f.Args() {\n\t\tdir := path.Dir(arg)\n\t\tbase := path.Base(arg)\n\t\tparents, err := finder.ResourcePoolList(ctx, dir)\n\t\tif err != nil {\n\t\t\tif _, ok := err.(*find.NotFoundError); ok {\n\t\t\t\treturn fmt.Errorf(\"cannot create resource pool '%s': parent not found\", base)\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, parent := range parents {\n\t\t\t_, err = parent.Create(ctx, base, cmd.ResourceConfigSpec)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/pool/destroy.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage pool\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/find\"\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype destroy struct {\n\t*flags.DatacenterFlag\n\n\tchildren bool\n}\n\nfunc init() {\n\tcli.Register(\"pool.destroy\", &destroy{})\n}\n\nfunc (cmd *destroy) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.children, \"children\", false, \"Remove all children pools\")\n}\n\nfunc (cmd *destroy) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *destroy) Usage() string {\n\treturn \"POOL...\"\n}\n\nfunc (cmd *destroy) Description() string {\n\treturn \"Destroy one or more resource POOLs.\\n\" + poolNameHelp\n}\n\nfunc (cmd *destroy) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, arg := range f.Args() {\n\t\tpools, err := finder.ResourcePoolList(ctx, arg)\n\t\tif err != nil {\n\t\t\tif _, ok := err.(*find.NotFoundError); ok {\n\t\t\t\t// Ignore if pool cannot be found\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, pool := range pools {\n\t\t\tif cmd.children {\n\t\t\t\terr = pool.DestroyChildren(ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\ttask, err := pool.Destroy(ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\terr = task.Wait(ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/pool/help.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage pool\n\nvar poolNameHelp = `\nPOOL may be an absolute or relative path to a resource pool or a (clustered)\ncompute host. If it resolves to a compute host, the associated root resource\npool is returned. If a relative path is specified, it is resolved with respect\nto the current datacenter's \"host\" folder (i.e. /ha-datacenter/host).\n\nPaths to nested resource pools must traverse through the root resource pool of\nthe selected compute host, i.e. \"compute-host/Resources/nested-pool\".\n\nThe same globbing rules that apply to the \"ls\" command apply here. For example,\nPOOL may be specified as \"*/Resources/*\" to expand to all resource pools that\nare nested one level under the root resource pool, on all (clustered) compute\nhosts in the current datacenter.`\n\nvar poolCreateHelp = `\nPOOL may be an absolute or relative path to a resource pool. The parent of the\nspecified POOL must be an existing resource pool. If a relative path is\nspecified, it is resolved with respect to the current datacenter's \"host\"\nfolder (i.e. /ha-datacenter/host). The basename of the specified POOL is used\nas the name for the new resource pool.\n\nThe same globbing rules that apply to the \"ls\" command apply here. For example,\nthe path to the parent resource pool in POOL may be specified as \"*/Resources\"\nto expand to the root resource pools on all (clustered) compute hosts in the\ncurrent datacenter.\n\nFor example:\n  */Resources/test             Create resource pool \"test\" on all (clustered)\n                               compute hosts in the current datacenter.\n  somehost/Resources/*/nested  Create resource pool \"nested\" in every\n                               resource pool that is a direct descendant of\n                               the root resource pool on \"somehost\".`\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/pool/info.go",
    "content": "/*\nCopyright (c) 2015-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage pool\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/find\"\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype info struct {\n\t*flags.DatacenterFlag\n\t*flags.OutputFlag\n\n\tpools bool\n\tapps  bool\n}\n\nfunc init() {\n\tcli.Register(\"pool.info\", &info{})\n}\n\nfunc (cmd *info) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.pools, \"p\", true, \"List resource pools\")\n\tf.BoolVar(&cmd.apps, \"a\", false, \"List virtual app resource pools\")\n}\n\nfunc (cmd *info) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *info) Usage() string {\n\treturn \"POOL...\"\n}\n\nfunc (cmd *info) Description() string {\n\treturn \"Retrieve information about one or more resource POOLs.\\n\" + poolNameHelp\n}\n\nfunc (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar res infoResult\n\tvar props []string\n\n\tif cmd.OutputFlag.JSON {\n\t\tprops = nil\n\t} else {\n\t\tprops = []string{\n\t\t\t\"name\",\n\t\t\t\"config.cpuAllocation\",\n\t\t\t\"config.memoryAllocation\",\n\t\t\t\"runtime.cpu\",\n\t\t\t\"runtime.memory\",\n\t\t}\n\t}\n\n\tvar vapps []*object.VirtualApp\n\n\tfor _, arg := range f.Args() {\n\t\tif cmd.pools {\n\t\t\tobjects, err := finder.ResourcePoolList(ctx, arg)\n\t\t\tif err != nil {\n\t\t\t\tif _, ok := err.(*find.NotFoundError); !ok {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tres.objects = append(res.objects, objects...)\n\t\t}\n\n\t\tif cmd.apps {\n\t\t\tapps, err := finder.VirtualAppList(ctx, arg)\n\t\t\tif err != nil {\n\t\t\t\tif _, ok := err.(*find.NotFoundError); !ok {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tvapps = append(vapps, apps...)\n\t\t}\n\t}\n\n\tif len(res.objects) != 0 {\n\t\trefs := make([]types.ManagedObjectReference, 0, len(res.objects))\n\t\tfor _, o := range res.objects {\n\t\t\trefs = append(refs, o.Reference())\n\t\t}\n\n\t\tpc := property.DefaultCollector(c)\n\t\terr = pc.Retrieve(ctx, refs, props, &res.ResourcePools)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(vapps) != 0 {\n\t\tvar apps []mo.VirtualApp\n\t\trefs := make([]types.ManagedObjectReference, 0, len(vapps))\n\t\tfor _, o := range vapps {\n\t\t\trefs = append(refs, o.Reference())\n\t\t\tp := object.NewResourcePool(c, o.Reference())\n\t\t\tp.InventoryPath = o.InventoryPath\n\t\t\tres.objects = append(res.objects, p)\n\t\t}\n\n\t\tpc := property.DefaultCollector(c)\n\t\terr = pc.Retrieve(ctx, refs, props, &apps)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, app := range apps {\n\t\t\tres.ResourcePools = append(res.ResourcePools, app.ResourcePool)\n\t\t}\n\t}\n\n\treturn cmd.WriteResult(&res)\n}\n\ntype infoResult struct {\n\tResourcePools []mo.ResourcePool\n\tobjects       []*object.ResourcePool\n}\n\nfunc (r *infoResult) Write(w io.Writer) error {\n\t// Maintain order via r.objects as Property collector does not always return results in order.\n\tobjects := make(map[types.ManagedObjectReference]mo.ResourcePool, len(r.ResourcePools))\n\tfor _, o := range r.ResourcePools {\n\t\tobjects[o.Reference()] = o\n\t}\n\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\n\tfor _, o := range r.objects {\n\t\tpool := objects[o.Reference()]\n\t\tfmt.Fprintf(tw, \"Name:\\t%s\\n\", pool.Name)\n\t\tfmt.Fprintf(tw, \"  Path:\\t%s\\n\", o.InventoryPath)\n\n\t\twriteInfo(tw, \"CPU\", \"MHz\", &pool.Runtime.Cpu, pool.Config.CpuAllocation)\n\t\tpool.Runtime.Memory.MaxUsage >>= 20\n\t\tpool.Runtime.Memory.OverallUsage >>= 20\n\t\twriteInfo(tw, \"Mem\", \"MB\", &pool.Runtime.Memory, pool.Config.MemoryAllocation)\n\t}\n\n\treturn tw.Flush()\n}\n\nfunc writeInfo(w io.Writer, name string, units string, ru *types.ResourcePoolResourceUsage, b types.BaseResourceAllocationInfo) {\n\tra := b.GetResourceAllocationInfo()\n\tusage := 100.0 * float64(ru.OverallUsage) / float64(ru.MaxUsage)\n\tshares := \"\"\n\tlimit := \"unlimited\"\n\n\tif ra.Shares.Level == types.SharesLevelCustom {\n\t\tshares = fmt.Sprintf(\" (%d)\", ra.Shares.Shares)\n\t}\n\n\tif ra.Limit != -1 {\n\t\tlimit = fmt.Sprintf(\"%d%s\", ra.Limit, units)\n\t}\n\n\tfmt.Fprintf(w, \"  %s Usage:\\t%d%s (%0.1f%%)\\n\", name, ru.OverallUsage, units, usage)\n\tfmt.Fprintf(w, \"  %s Shares:\\t%s%s\\n\", name, ra.Shares.Level, shares)\n\tfmt.Fprintf(w, \"  %s Reservation:\\t%d%s (expandable=%v)\\n\", name, ra.Reservation, units, *ra.ExpandableReservation)\n\tfmt.Fprintf(w, \"  %s Limit:\\t%s\\n\", name, limit)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/pool/resource_config_spec.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage pool\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype sharesInfo types.SharesInfo\n\nfunc (s *sharesInfo) String() string {\n\treturn string(s.Level)\n}\n\nfunc (s *sharesInfo) Set(val string) error {\n\tswitch val {\n\tcase string(types.SharesLevelNormal), string(types.SharesLevelLow), string(types.SharesLevelHigh):\n\t\ts.Level = types.SharesLevel(val)\n\tdefault:\n\t\tn, err := strconv.Atoi(val)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ts.Level = types.SharesLevelCustom\n\t\ts.Shares = int32(n)\n\t}\n\n\treturn nil\n}\n\nfunc NewResourceConfigSpecFlag() *ResourceConfigSpecFlag {\n\tf := new(ResourceConfigSpecFlag)\n\tf.MemoryAllocation = new(types.ResourceAllocationInfo)\n\tf.CpuAllocation = new(types.ResourceAllocationInfo)\n\n\tf.SetAllocation(func(a types.BaseResourceAllocationInfo) {\n\t\ta.GetResourceAllocationInfo().Shares = new(types.SharesInfo)\n\t})\n\treturn f\n}\n\ntype ResourceConfigSpecFlag struct {\n\ttypes.ResourceConfigSpec\n}\n\nfunc (s *ResourceConfigSpecFlag) Register(ctx context.Context, f *flag.FlagSet) {\n\topts := []struct {\n\t\tname  string\n\t\tunits string\n\t\ttypes.BaseResourceAllocationInfo\n\t}{\n\t\t{\"CPU\", \"MHz\", s.CpuAllocation},\n\t\t{\"Memory\", \"MB\", s.MemoryAllocation},\n\t}\n\n\tfor _, opt := range opts {\n\t\tprefix := strings.ToLower(opt.name)[:3]\n\t\tra := opt.GetResourceAllocationInfo()\n\t\tshares := (*sharesInfo)(ra.Shares)\n\n\t\tf.Int64Var(&ra.Limit, prefix+\".limit\", 0, opt.name+\" limit in \"+opt.units)\n\t\tf.Int64Var(&ra.Reservation, prefix+\".reservation\", 0, opt.name+\" reservation in \"+opt.units)\n\t\tf.Var(flags.NewOptionalBool(&ra.ExpandableReservation), prefix+\".expandable\", opt.name+\" expandable reservation\")\n\t\tf.Var(shares, prefix+\".shares\", opt.name+\" shares level or number\")\n\t}\n}\n\nfunc (s *ResourceConfigSpecFlag) Process(ctx context.Context) error {\n\treturn nil\n}\n\nfunc (s *ResourceConfigSpecFlag) SetAllocation(f func(types.BaseResourceAllocationInfo)) {\n\tfor _, a := range []types.BaseResourceAllocationInfo{s.CpuAllocation, s.MemoryAllocation} {\n\t\tf(a)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/release.sh",
    "content": "#!/bin/bash -e\n\nif ! which github-release > /dev/null; then\n  echo 'Please install github-release...'\n  echo ''\n  echo '  $ go get github.com/aktau/github-release'\n  echo ''\n  exit 1\nfi\n\nif [ -z \"${GITHUB_TOKEN}\" ]; then\n  echo 'Please set GITHUB_TOKEN...'\n  exit 1\nfi\n\nexport GITHUB_USER=\"${GITHUB_USER:-vmware}\"\nexport GITHUB_REPO=\"${GITHUB_REPO:-govmomi}\"\n\nname=\"$(git describe)\"\nrelease=(github-release release --draft --name \"${name}\")\n\ncase \"$1\" in\n  release)\n    tag=\"${name}\"\n    ;;\n  prerelease)\n    tag=\"prerelease-${name}\"\n    release+=(--pre-release)\n    ;;\n  dryrun)\n    ;;\n  *)\n    echo \"Usage: $0 [release|prerelease]\"\n    exit 1\n    ;;\nesac\n\necho \"Building govc...\"\nrm -f ./govc_*\n./build.sh\n\nfor name in govc_* ; do\n  if [ \"${name: -4}\" == \".exe\" ] ; then\n    zip \"${name}.zip\" \"$name\" &\n  else\n    gzip -f \"$name\" &\n  fi\ndone\n\nwait\n\nif [ -n \"$tag\" ] ; then\n  echo \"Pushing tag ${tag}...\"\n  git tag -f \"${tag}\"\n  git push origin \"refs/tags/${tag}\"\nfi\n\n# Generate description\ndescription=$(\nif [[ \"${tag}\" == \"prerelease-\"* ]]; then\n  echo '**This is a PRERELEASE version.**'\nfi\n\necho \"\nDocumentation:\n\n* [CHANGELOG](https://github.com/vmware/govmomi/blob/$tag/govc/CHANGELOG.md)\n\n* [README](https://github.com/vmware/govmomi/blob/$tag/govc/README.md)\n\n* [USAGE](https://github.com/vmware/govmomi/blob/$tag/govc/USAGE.md)\n\nThe binaries below are provided without warranty, following the [Apache license](LICENSE).\n\"\n\necho '\nInstructions:\n* Download the file relevant to your operating system\n* Decompress (i.e. `gzip -d govc_linux_amd64.gz`)\n* Set the executable bit (i.e. `chmod +x govc_linux_amd64`)\n* Move the file to a directory in your `$PATH` (i.e. `mv govc_linux_amd64 /usr/local/bin/govc`)\n'\n\necho '```'\necho '$ sha1sum govc_*.gz'\nsha1sum govc_*.gz\necho '$ sha1sum govc_*.zip'\nsha1sum govc_*.zip\necho '```'\n)\n\nrelease+=(--tag \"${tag}\" --description \"${description}\")\n\nif [ -n \"$tag\" ] ; then\n  echo \"Creating release...\"\n  \"${release[@]}\"\nelse\n  echo \"${release[@]}\"\nfi\n\n# Upload build artifacts\nfor f in govc_*.{gz,zip}; do\n  echo \"Uploading $f...\"\n  if [ -n \"$tag\" ] ; then\n    github-release upload --tag \"${tag}\" --name \"${f}\" --file \"${f}\"\n  fi\ndone\n\necho \"Remember to publish the draft release!\"\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/role/create.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage role\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/permissions\"\n)\n\ntype create struct {\n\t*permissions.PermissionFlag\n}\n\nfunc init() {\n\tcli.Register(\"role.create\", &create{})\n}\n\nfunc (cmd *create) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.PermissionFlag, ctx = permissions.NewPermissionFlag(ctx)\n\tcmd.PermissionFlag.Register(ctx, f)\n}\n\nfunc (cmd *create) Process(ctx context.Context) error {\n\tif err := cmd.PermissionFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *create) Usage() string {\n\treturn \"NAME [PRIVILEGE]...\"\n}\n\nfunc (cmd *create) Description() string {\n\treturn `Create authorization role.\n\nOptionally populate the role with the given PRIVILEGE(s).\n\nExamples:\n  govc role.create MyRole\n  govc role.create NoDC $(govc role.ls Admin | grep -v Datacenter.)`\n}\n\nfunc (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tm, err := cmd.Manager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = m.AddRole(ctx, f.Arg(0), f.Args()[1:])\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/role/ls.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage role\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/permissions\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype ls struct {\n\t*permissions.PermissionFlag\n}\n\nfunc init() {\n\tcli.Register(\"role.ls\", &ls{})\n}\n\nfunc (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.PermissionFlag, ctx = permissions.NewPermissionFlag(ctx)\n\tcmd.PermissionFlag.Register(ctx, f)\n}\n\nfunc (cmd *ls) Process(ctx context.Context) error {\n\tif err := cmd.PermissionFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *ls) Usage() string {\n\treturn \"[NAME]\"\n}\n\nfunc (cmd *ls) Description() string {\n\treturn `List authorization roles.\n\nIf NAME is provided, list privileges for the role.\n\nExamples:\n  govc role.ls\n  govc role.ls Admin`\n}\n\ntype lsRoleList object.AuthorizationRoleList\n\nfunc (rl lsRoleList) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\n\tfor _, role := range rl {\n\t\tfmt.Fprintf(tw, \"%s\\t%s\\n\", role.Name, role.Info.GetDescription().Summary)\n\t}\n\n\treturn tw.Flush()\n}\n\ntype lsRole types.AuthorizationRole\n\nfunc (r lsRole) Write(w io.Writer) error {\n\tfor _, p := range r.Privilege {\n\t\tfmt.Println(p)\n\t}\n\treturn nil\n}\n\nfunc (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() > 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\t_, err := cmd.Manager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif f.NArg() == 1 {\n\t\trole, err := cmd.Role(f.Arg(0))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn cmd.WriteResult(lsRole(*role))\n\t}\n\n\treturn cmd.WriteResult(lsRoleList(cmd.Roles))\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/role/remove.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage role\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/permissions\"\n)\n\ntype remove struct {\n\t*permissions.PermissionFlag\n\n\tforce bool\n}\n\nfunc init() {\n\tcli.Register(\"role.remove\", &remove{})\n}\n\nfunc (cmd *remove) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.PermissionFlag, ctx = permissions.NewPermissionFlag(ctx)\n\tcmd.PermissionFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.force, \"force\", false, \"Force removal if role is in use\")\n}\n\nfunc (cmd *remove) Process(ctx context.Context) error {\n\tif err := cmd.PermissionFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *remove) Usage() string {\n\treturn \"NAME\"\n}\n\nfunc (cmd *remove) Description() string {\n\treturn `Remove authorization role.\n\nExamples:\n  govc role.remove MyRole\n  govc role.remove MyRole -force`\n}\n\nfunc (cmd *remove) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tm, err := cmd.Manager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trole, err := cmd.Role(f.Arg(0))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn m.RemoveRole(ctx, role.RoleId, !cmd.force)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/role/update.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage role\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/permissions\"\n)\n\ntype update struct {\n\t*permissions.PermissionFlag\n\n\tname   string\n\tremove bool\n\tadd    bool\n}\n\nfunc init() {\n\tcli.Register(\"role.update\", &update{})\n}\n\nfunc (cmd *update) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.PermissionFlag, ctx = permissions.NewPermissionFlag(ctx)\n\tcmd.PermissionFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.name, \"name\", \"\", \"Change role name\")\n\tf.BoolVar(&cmd.remove, \"r\", false, \"Remove given PRIVILEGE(s)\")\n\tf.BoolVar(&cmd.add, \"a\", false, \"Add given PRIVILEGE(s)\")\n}\n\nfunc (cmd *update) Process(ctx context.Context) error {\n\tif err := cmd.PermissionFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *update) Usage() string {\n\treturn \"NAME [PRIVILEGE]...\"\n}\n\nfunc (cmd *update) Description() string {\n\treturn `Update authorization role.\n\nSet, Add or Remove role PRIVILEGE(s).\n\nExamples:\n  govc role.update MyRole $(govc role.ls Admin | grep VirtualMachine.)\n  govc role.update -r MyRole $(govc role.ls Admin | grep VirtualMachine.GuestOperations.)\n  govc role.update -a MyRole $(govc role.ls Admin | grep Datastore.)\n  govc role.update -name RockNRole MyRole`\n}\n\nfunc (cmd *update) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tm, err := cmd.Manager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trole, err := cmd.Role(f.Arg(0))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tids := role.Privilege\n\targs := f.Args()[1:]\n\n\tif cmd.add {\n\t\tids = append(ids, args...)\n\t} else if cmd.remove {\n\t\tids = nil\n\t\trm := make(map[string]bool, len(args))\n\t\tfor _, arg := range args {\n\t\t\trm[arg] = true\n\t\t}\n\n\t\tfor _, id := range role.Privilege {\n\t\t\tif !rm[id] {\n\t\t\t\tids = append(ids, id)\n\t\t\t}\n\t\t}\n\t} else if len(args) != 0 {\n\t\tids = args\n\t}\n\n\tif cmd.name == \"\" {\n\t\tcmd.name = role.Name\n\t}\n\n\treturn m.UpdateRole(ctx, role.RoleId, cmd.name, ids)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/role/usage.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage role\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/permissions\"\n)\n\ntype usage struct {\n\t*permissions.PermissionFlag\n}\n\nfunc init() {\n\tcli.Register(\"role.usage\", &usage{})\n}\n\nfunc (cmd *usage) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.PermissionFlag, ctx = permissions.NewPermissionFlag(ctx)\n\tcmd.PermissionFlag.Register(ctx, f)\n}\n\nfunc (cmd *usage) Process(ctx context.Context) error {\n\tif err := cmd.PermissionFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *usage) Usage() string {\n\treturn \"NAME...\"\n}\n\nfunc (cmd *usage) Description() string {\n\treturn `List usage for role NAME.\n\nExamples:\n  govc role.usage\n  govc role.usage Admin`\n}\n\nfunc (cmd *usage) Run(ctx context.Context, f *flag.FlagSet) error {\n\tm, err := cmd.Manager(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif f.NArg() == 0 {\n\t\tcmd.List.Permissions, err = m.RetrieveAllPermissions(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tfor _, name := range f.Args() {\n\t\t\trole, err := cmd.Role(name)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tperms, err := m.RetrieveRolePermissions(ctx, role.RoleId)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcmd.List.Add(perms)\n\t\t}\n\t}\n\n\treturn cmd.WriteResult(&cmd.List)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/session/ls.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage session\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"text/tabwriter\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n)\n\ntype ls struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n}\n\nfunc init() {\n\tcli.Register(\"session.ls\", &ls{})\n}\n\nfunc (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n}\n\nfunc (cmd *ls) Description() string {\n\treturn `List active sessions.\n\nExamples:\n  govc session.ls\n  govc session.ls -json | jq -r .CurrentSession.Key`\n}\n\nfunc (cmd *ls) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\ntype sessionInfo struct {\n\tcmd *ls\n\tmo.SessionManager\n}\n\nfunc (s *sessionInfo) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(w, 4, 0, 2, ' ', 0)\n\n\tfmt.Fprintf(tw, \"Key\\t\")\n\tfmt.Fprintf(tw, \"Name\\t\")\n\tfmt.Fprintf(tw, \"Time\\t\")\n\tfmt.Fprintf(tw, \"Idle\\t\")\n\tfmt.Fprintf(tw, \"Host\\t\")\n\tfmt.Fprintf(tw, \"Agent\\t\")\n\tfmt.Fprintf(tw, \"\\t\")\n\tfmt.Fprint(tw, \"\\n\")\n\n\tfor _, v := range s.SessionList {\n\t\tidle := \"  .\"\n\t\tif v.Key != s.CurrentSession.Key {\n\t\t\tsince := time.Since(v.LastActiveTime)\n\t\t\tif since > time.Hour {\n\t\t\t\tidle = \"old\"\n\t\t\t} else {\n\t\t\t\tidle = (time.Duration(since.Seconds()) * time.Second).String()\n\t\t\t}\n\t\t}\n\t\tfmt.Fprintf(tw, \"%s\\t\", v.Key)\n\t\tfmt.Fprintf(tw, \"%s\\t\", v.UserName)\n\t\tfmt.Fprintf(tw, \"%s\\t\", v.LoginTime.Format(\"2006-01-02 15:04\"))\n\t\tfmt.Fprintf(tw, \"%s\\t\", idle)\n\t\tfmt.Fprintf(tw, \"%s\\t\", v.IpAddress)\n\t\tfmt.Fprintf(tw, \"%s\\t\", v.UserAgent)\n\t\tfmt.Fprint(tw, \"\\n\")\n\t}\n\n\treturn tw.Flush()\n}\n\nfunc (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar m mo.SessionManager\n\tpc := property.DefaultCollector(c)\n\terr = pc.RetrieveOne(ctx, *c.ServiceContent.SessionManager, nil, &m)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn cmd.WriteResult(&sessionInfo{cmd, m})\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/session/rm.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage session\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/session\"\n)\n\ntype rm struct {\n\t*flags.ClientFlag\n}\n\nfunc init() {\n\tcli.Register(\"session.rm\", &rm{})\n}\n\nfunc (cmd *rm) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n}\n\nfunc (cmd *rm) Usage() string {\n\treturn \"KEY...\"\n}\n\nfunc (cmd *rm) Description() string {\n\treturn `Remove active sessions.\n\nExamples:\n  govc session.ls | grep root\n  govc session.rm 5279e245-e6f1-4533-4455-eb94353b213a`\n}\n\nfunc (cmd *rm) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *rm) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn session.NewManager(c).TerminateSession(ctx, f.Args())\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/.gitignore",
    "content": ".vagrant\ngovc_ca.*\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/README.md",
    "content": "# Functional tests for govc\n\n## Bats\n\nInstall [Bats](https://github.com/sstephenson/bats/)\n\n## coreutils\n\nInstall gxargs, greadlink and gmktemp on Darwin\n\n```\nbrew install coreutils\nbrew install findutils\n```\n\n## Download test images\n\nSome tests depend on ttylinux images, these can be downloaded by running:\n\n```\n./images/update.sh\n```\n\nThese images are uploaded to the esxbox as needed by tests and can be\nremoved with the following command:\n\n```\n./clean.sh\n```\n\n## GOVC_TEST_URL\n\nThe govc tests need an ESX instance to run against.  The default\n`GOVC_TEST_URL` is that of the vagrant box in the *esxbox* directory:\n\n```\n(cd esxbox && vagrant up)\n```\n\nAny other ESX box can be used by exporting the following variable:\n\n```\nexport GOVC_TEST_URL=user:pass@hostname\n```\n\n## vCenter Simulator\n\nSome tests require vCenter and depend on the Vagrant box in the\n*vcsim* directory.  These tests are skipped if the vcsim box is not\nrunning.  To enable these tests:\n\n```\n(cd vcsim && vagrant up)\n```\n\n## Running tests\n\nThe test helper prepends ../govc to `PATH`.\n\nThe tests can be run from any directory, as *govc* is found related to\n`PATH` and *images* are found relative to `$BATS_TEST_DIRNAME`.\n\nThe entire suite can be run with the following command:\n\n```\ncd $GOPATH/github.com/vmware/govmomi/govc\ngo build -v .\nbats test\n```\n\nOr individually, for example:\n\n```\n./test/cli.bats\n```\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/boot_order_test.sh",
    "content": "#!/bin/bash -e\n\n# This test is not run via bats.\n# A VNC session will be opened to observe the VM boot order:\n# 1) from floppy  (followed by: eject floppy, reboot)\n# 2) from cdrom   (followed by: eject cdrom, reboot)\n# 3) from network (will timeout)\n# 4) from disk\n\n. $(dirname $0)/test_helper.bash\n\nupload_img\nupload_iso\n\nid=$(new_ttylinux_vm)\n\nfunction cleanup() {\n  quit_vnc $vnc\n  govc vm.destroy $id\n  pkill -TERM -g $$ ^nc\n}\n\ntrap cleanup EXIT\n\ngovc device.cdrom.add -vm $id > /dev/null\ngovc device.cdrom.insert -vm $id $GOVC_TEST_ISO\n\ngovc device.floppy.add -vm $id > /dev/null\ngovc device.floppy.insert -vm $id $GOVC_TEST_IMG\n\ngovc device.boot -vm $id -delay 1000 -order floppy,cdrom,ethernet,disk\n\nvnc=$(govc vm.vnc -port 21122 -password govmomi -enable \"${id}\" | awk '{print $2}')\n\necho \"booting from floppy...\"\ngovc vm.power -on $id\n\nopen_vnc $vnc\n\nsleep 10\n\ngovc vm.power -off $id\n\ngovc device.floppy.eject -vm $id\n\n# this is ttylinux-live, notice the 'boot:' prompt vs 'login:' prompt when booted from disk\necho \"booting from cdrom...\"\ngovc vm.power -on $id\n\nsleep 10\n\ngovc vm.power -off $id\n\ngovc device.cdrom.eject -vm $id\n\ngovc device.serial.add -vm $id > /dev/null\ngovc device.serial.connect -vm $id -\n\necho \"booting from network, will timeout then boot from disk...\"\ngovc vm.power -on $id\n\n# serial console log\ndevice=$(govc device.ls -vm \"$id\" | grep serialport- | awk '{print $1}')\ngovc datastore.tail -f \"$id/$device.log\" &\n\nip=$(govc vm.ip $id)\n\necho \"VM booted from disk (ip=$ip)\"\n\nsleep 5\n\ngovc vm.power -s $id\n\nsleep 5\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/clean.sh",
    "content": "#!/bin/bash\n#\n# Cleanup any artifacts created by govc\n#\n\n. $(dirname $0)/test_helper.bash\n\nteardown\n\ndatastore_rm() {\n  name=$1\n  govc datastore.rm $name 2> /dev/null\n}\n\ndatastore_rm $GOVC_TEST_IMG\ndatastore_rm $GOVC_TEST_ISO\ndatastore_rm $GOVC_TEST_VMDK\ndatastore_rm $(echo $GOVC_TEST_VMDK | sed 's/.vmdk/-flat.vmdk/')\ndatastore_rm $(dirname $GOVC_TEST_VMDK)\n\n# Recursively destroy all resource pools created by the test suite\ngovc ls host/*/Resources/govc-test-* | \\\n  xargs -rt govc pool.destroy -r\n\ngovc datastore.ls\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/cli.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"about\" {\n  run govc about\n  assert_success\n  assert_line \"Vendor: VMware, Inc.\"\n\n  run govc about -json\n  assert_success\n\n  run govc about -json -l\n  assert_success\n}\n\n@test \"about.cert\" {\n  run govc about.cert\n  assert_success\n\n  run govc about.cert -json\n  assert_success\n\n  run govc about.cert -show\n  assert_success\n\n  # with -k=true we get thumbprint output and exit 0\n  thumbprint=$(govc about.cert -k=true -thumbprint)\n\n  # with -k=true we get thumbprint output and exit 60\n  run govc about.cert -k=false -thumbprint\n  if [ \"$status\" -ne 60 ]; then\n    flunk $(printf \"expected failed exit status=60, got status=%d\" $status)\n  fi\n  assert_output \"$thumbprint\"\n\n  run govc about -k=false\n  assert_failure\n\n  run govc about -k=false -tls-known-hosts <(echo \"$thumbprint\")\n  assert_success\n\n  run govc about -k=false -tls-known-hosts <(echo \"nope nope\")\n  assert_failure\n}\n\n@test \"version\" {\n    run govc version\n    assert_success\n\n    v=$(govc version | awk '{print $NF}')\n    run govc version -require \"$v\"\n    assert_success\n\n    run govc version -require \"not-a-version-string\"\n    assert_failure\n\n    run govc version -require 100.0.0\n    assert_failure\n}\n\n@test \"login attempt without credentials\" {\n  host=$(govc env -x GOVC_URL_HOST)\n  run govc about -u \"enoent@$host\"\n  assert_failure \"govc: ServerFaultCode: Cannot complete login due to an incorrect user name or password.\"\n}\n\n@test \"login attempt with GOVC_URL, GOVC_USERNAME, and GOVC_PASSWORD\" {\n  govc_url_to_vars\n  run govc about\n  assert_success\n}\n\n@test \"connect to an endpoint with a non-supported API version\" {\n  run env GOVC_MIN_API_VERSION=24.4 govc about\n  assert grep -q \"^govc: Require API version 24.4,\" <<<${output}\n}\n\n@test \"connect to an endpoint with user provided Vim namespace and Vim version\" {\n  run govc about -vim-namespace urn:vim25 -vim-version 6.0\n  assert_success\n}\n\n@test \"govc env\" {\n  output=\"$(govc env -x -u 'user:pass@enoent:99999?key=val#anchor')\"\n  assert grep -q GOVC_URL=enoent:99999 <<<${output}\n  assert grep -q GOVC_USERNAME=user <<<${output}\n  assert grep -q GOVC_PASSWORD=pass <<<${output}\n\n  assert grep -q GOVC_URL_SCHEME=https <<<${output}\n  assert grep -q GOVC_URL_HOST=enoent <<<${output}\n  assert grep -q GOVC_URL_PORT=99999 <<<${output}\n  assert grep -q GOVC_URL_PATH=/sdk <<<${output}\n  assert grep -q GOVC_URL_QUERY=key=val <<<${output}\n  assert grep -q GOVC_URL_FRAGMENT=anchor <<<${output}\n\n  password=\"pa\\$sword\\!ok\"\n  run govc env -u \"user:${password}@enoent:99999\" GOVC_PASSWORD\n  assert_output \"$password\"\n}\n\n@test \"govc help\" {\n  run govc\n  assert_failure\n\n  run govc -h\n  assert_success\n\n  run govc -enoent\n  assert_failure\n\n  run govc vm.create\n  assert_failure\n\n  run govc vm.create -h\n  assert_success\n\n  run govc vm.create -enoent\n  assert_failure\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/datacenter.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"datacenter.info\" {\n    dc=$(govc ls -t Datacenter / | head -n1)\n    run govc datacenter.info \"$dc\"\n    assert_success\n\n    run govc datacenter.info -json \"$dc\"\n    assert_success\n\n    run govc datacenter.info /enoent\n    assert_failure\n}\n\n@test \"create and destroy datacenters\" {\n  vcsim_env\n\n  dcs=($(new_id) $(new_id))\n  run govc datacenter.create \"${dcs[@]}\"\n  assert_success\n\n  for dc in ${dcs[*]}; do\n    run govc ls \"/$dc\"\n    assert_success\n    # /<datacenter>/{vm,network,host,datastore}\n    [ ${#lines[@]} -eq 4 ]\n\n    run govc datacenter.info \"/$dc\"\n    assert_success\n  done\n\n  run govc object.destroy \"${dcs[@]}\"\n  assert_success\n\n  for dc in ${dcs[*]}; do\n    run govc ls \"/$dc\"\n    assert_success\n    [ ${#lines[@]} -eq 0 ]\n  done\n}\n\n@test \"destroy datacenter using glob\" {\n  vcsim_env\n  unset GOVC_DATACENTER GOVC_DATASTORE\n\n  folder=$(new_id)\n  dcs=($(new_id) $(new_id))\n\n  run govc folder.create \"$folder\"\n  assert_success\n\n  run govc datacenter.create -folder \"$folder\" \"${dcs[@]}\"\n  assert_success\n\n  run govc object.destroy \"$folder/*\"\n  assert_success\n\n  for dc in ${dcs[*]}; do\n    run govc ls \"/$dc\"\n    assert_success\n    [ ${#lines[@]} -eq 0 ]\n  done\n\n  run govc folder.destroy \"$folder\"\n  assert_success\n}\n\n@test \"fails when datacenter name not specified\" {\n  run govc datacenter.create\n  assert_failure\n}\n\n@test \"datacenter commands fail against ESX\" {\n  run govc datacenter.create something\n  assert_failure \"govc: ServerFaultCode: The operation is not supported on the object.\"\n\n  run govc object.destroy /ha-datacenter\n  assert_failure\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/datastore.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\nupload_file() {\n  name=$(new_id)\n\n  echo \"Hello world\" | govc datastore.upload - \"$name\"\n  assert_success\n\n  echo \"$name\"\n}\n\n@test \"datastore.ls\" {\n  name=$(upload_file)\n\n  # Single argument\n  run govc datastore.ls \"${name}\"\n  assert_success\n  [ ${#lines[@]} -eq 1 ]\n\n  # Multiple arguments\n  run govc datastore.ls \"${name}\" \"${name}\"\n  assert_success\n  [ ${#lines[@]} -eq 2 ]\n\n  # Pattern argument\n  run govc datastore.ls \"./govc-test-*\"\n  assert_success\n  [ ${#lines[@]} -ge 1 ]\n\n  # Long listing\n  run govc datastore.ls -l \"./govc-test-*\"\n  assert_success\n  assert_equal \"12B\" $(awk '{ print $1 }' <<<${output})\n}\n\n@test \"datastore.ls-R\" {\n  dir=$(new_id)\n\n  run govc datastore.mkdir \"$dir\"\n  assert_success\n\n  for name in one two three ; do\n    echo \"$name world\" | govc datastore.upload - \"$dir/file-$name\"\n    run govc datastore.mkdir -p \"$dir/dir-$name/subdir-$name\"\n    run govc datastore.mkdir -p \"$dir/dir-$name/.hidden\"\n    assert_success\n    echo \"$name world\" | govc datastore.upload - \"$dir/dir-$name/.hidden/other-$name\"\n    echo \"$name world\" | govc datastore.upload - \"$dir/dir-$name/other-$name\"\n    echo \"$name world\" | govc datastore.upload - \"$dir/dir-$name/subdir-$name/last-$name\"\n  done\n\n  # without -R\n  json=$(govc datastore.ls -json -l -p \"$dir\")\n  result=$(jq -r .[].File[].Path <<<\"$json\" | wc -l)\n  [ \"$result\" -eq 6 ]\n\n  result=$(jq -r .[].FolderPath <<<\"$json\" | wc -l)\n  [ \"$result\" -eq 1 ]\n\n  # with -R\n  json=$(govc datastore.ls -json -l -p -R \"$dir\")\n  result=$(jq -r .[].File[].Path <<<\"$json\" | wc -l)\n  [ \"$result\" -eq 15 ]\n\n  result=$(jq -r .[].FolderPath <<<\"$json\" | wc -l)\n  [ \"$result\" -eq 7 ]\n\n  # with -R -a\n  json=$(govc datastore.ls -json -l -p -R -a \"$dir\")\n  result=$(jq -r .[].File[].Path <<<\"$json\" | wc -l)\n  [ \"$result\" -eq 21 ]\n\n  result=$(jq -r .[].FolderPath <<<\"$json\" | wc -l)\n  [ \"$result\" -eq 10 ]\n}\n\n@test \"datastore.rm\" {\n  name=$(upload_file)\n\n  # Not found is a failure\n  run govc datastore.rm \"${name}.notfound\"\n  assert_failure\n  assert_matches \"govc: File .* was not found\" \"${output}\"\n\n  # Not found is NOT a failure with the force flag\n  run govc datastore.rm -f \"${name}.notfound\"\n  assert_success\n  assert_empty \"${output}\"\n\n  # Verify the file is present\n  run govc datastore.ls \"${name}\"\n  assert_success\n\n  # Delete the file\n  run govc datastore.rm \"${name}\"\n  assert_success\n  assert_empty \"${output}\"\n\n  # Verify the file is gone\n  run govc datastore.ls \"${name}\"\n  assert_failure\n}\n\n@test \"datastore.info\" {\n  run govc datastore.info enoent\n  assert_failure\n\n  run govc datastore.info\n  assert_success\n  [ ${#lines[@]} -gt 1 ]\n}\n\n\n@test \"datastore.mkdir\" {\n  name=$(new_id)\n\n  # Not supported datastore type is a failure\n  run govc datastore.mkdir -namespace \"notfound\"\n  assert_failure\n  assert_matches \"govc: ServerFaultCode: .*\" \"${output}\"\n\n  run govc datastore.mkdir \"${name}\"\n  assert_success\n  assert_empty \"${output}\"\n\n  # Verify the dir is present\n  run govc datastore.ls \"${name}\"\n  assert_success\n\n  # Delete the dir on an unsupported datastore type is a failure\n  run govc datastore.rm -namespace \"${name}\"\n  assert_failure\n  assert_matches \"govc: ServerFaultCode: .*\" \"${output}\"\n\n  # Delete the dir\n  run govc datastore.rm \"${name}\"\n  assert_success\n  assert_empty \"${output}\"\n\n  # Verify the dir is gone\n  run govc datastore.ls \"${name}\"\n  assert_failure\n}\n\n@test \"datastore.download\" {\n  name=$(upload_file)\n  run govc datastore.download \"$name\" -\n  assert_success\n  assert_output \"Hello world\"\n\n  run govc datastore.download \"$name\" \"$TMPDIR/$name\"\n  assert_success\n  run cat \"$TMPDIR/$name\"\n  assert_output \"Hello world\"\n  rm \"$TMPDIR/$name\"\n}\n\n@test \"datastore.upload\" {\n  name=$(new_id)\n  echo -n \"Hello world\" | govc datastore.upload - \"$name\"\n\n  run govc datastore.download \"$name\" -\n  assert_success\n  assert_output \"Hello world\"\n}\n\n@test \"datastore.tail\" {\n  run govc datastore.tail \"enoent/enoent.log\"\n  assert_failure\n\n  id=$(new_id)\n  govc vm.create \"$id\"\n  govc vm.power -off \"$id\"\n\n  # test with .log (> bufSize) and .vmx (< bufSize)\n  for file in \"$id/vmware.log\" \"$id/$id.vmx\" ; do\n    log=$(govc datastore.download \"$file\" -)\n\n    for n in 0 1 5 10 123 456 7890 ; do\n      expect=$(tail -n $n <<<\"$log\")\n\n      run govc datastore.tail -n $n \"$file\"\n      assert_output \"$expect\"\n\n      expect=$(tail -c $n <<<\"$log\")\n\n      run govc datastore.tail -c $n \"$file\"\n      assert_output \"$expect\"\n    done\n  done\n}\n\n@test \"datastore.disk\" {\n  id=$(new_id)\n  vmdk=\"$id/$id.vmdk\"\n\n  run govc datastore.mkdir \"$id\"\n  assert_success\n\n  run govc datastore.disk.create \"$vmdk\"\n  assert_success\n\n  run govc datastore.disk.info \"$vmdk\"\n  assert_success\n\n  run govc datastore.rm \"$vmdk\"\n  assert_success\n\n  run govc datastore.mkdir -p \"$id\"\n  assert_success\n\n  run govc datastore.disk.create \"$vmdk\"\n  assert_success\n\n  id=$(new_id)\n  run govc vm.create -on=false -link -disk \"$vmdk\" \"$id\"\n  assert_success\n\n  run govc datastore.disk.info -d \"$vmdk\"\n  assert_success\n\n  run govc datastore.disk.info -p=false \"$vmdk\"\n  assert_success\n\n  run govc datastore.disk.info -c \"$vmdk\"\n  assert_success\n\n  run govc datastore.disk.info -json \"$vmdk\"\n  assert_success\n\n  # should fail due to: ddb.deletable=false\n  run govc datastore.rm \"$vmdk\"\n  assert_failure\n\n  run govc datastore.rm -f \"$vmdk\"\n  assert_success\n\n  # one more time, but rm the directory w/o -f\n  run govc datastore.mkdir -p \"$id\"\n  assert_success\n\n  run govc datastore.disk.create \"$vmdk\"\n  assert_success\n\n  id=$(new_id)\n  run govc vm.create -on=false -link -disk \"$vmdk\" \"$id\"\n  assert_success\n\n  run govc datastore.rm \"$(dirname \"$vmdk\")\"\n  assert_success\n}\n\n@test \"datastore.disk.info\" {\n  import_ttylinux_vmdk\n\n  run govc datastore.disk.info\n  assert_failure\n\n  run govc datastore.disk.info enoent\n  assert_failure\n\n  run govc datastore.disk.info \"$GOVC_TEST_VMDK\"\n  assert_success\n\n  run govc datastore.disk.info -d \"$GOVC_TEST_VMDK\"\n  assert_success\n\n  run govc datastore.disk.info -c \"$GOVC_TEST_VMDK\"\n  assert_success\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/datastore_file_manager_test.sh",
    "content": "#!/bin/bash -e\n\n# This test is not run via bats\n\n# See also: datastore.bats@test \"datastore.disk\"\n\nexport GOVC_TEST_URL=$GOVC_URL\n\n. \"$(dirname \"$0\")\"/test_helper.bash\n\necho -n \"checking datastore type...\"\ntype=$(govc object.collect -s \"datastore/$GOVC_DATASTORE\" summary.type)\necho \"$type\"\n\nif [ \"$type\" = \"vsan\" ] ; then\n  echo -n \"checking for orphan objects...\"\n  objs=($(govc datastore.vsan.dom.ls -o))\n  echo \"${#objs[@]}\"\n\n  if [ \"${#objs[@]}\" -ne \"0\" ] ; then\n    govc datastore.vsan.dom.rm \"${objs[@]}\"\n  fi\nfi\n\ndir=govc-test-dfm\n\necho \"uploading plain file...\"\ncal | govc datastore.upload - $dir/cal.txt\necho \"removing plain file...\"\ngovc datastore.rm $dir/cal.txt\n\nscratch=$dir/govc-test-scratch/govc-test-scratch.vmdk\n\ngovc datastore.mkdir -p \"$(dirname $scratch)\"\n\necho \"creating disk $scratch...\"\ngovc datastore.disk.create -size 1M $scratch\n\nid=$(new_id)\n\necho \"creating $id VM with disk linked to $scratch...\"\ngovc vm.create -on=false -link -disk $scratch \"$id\"\ninfo=$(govc device.info -vm \"$id\" disk-*)\necho \"$info\"\n\ndisk=\"$(grep Name: <<<\"$info\" | awk '{print $2}')\"\nvmdk=\"$id/$id.vmdk\"\n\necho \"removing $disk device but keeping the .vmdk backing file...\"\ngovc device.remove -vm \"$id\" -keep \"$disk\"\n\necho -n \"checking delta disk ddb.deletable...\"\ngovc datastore.download \"$vmdk\" - | grep -q -v ddb.deletable\necho \"yes\"\n\necho -n \"checking scratch disk ddb.deletable...\"\ngovc datastore.download \"$scratch\" - | grep ddb.deletable | grep -q false\necho \"no\"\n\necho \"removing $vmdk\"\ngovc datastore.rm \"$vmdk\"\n\necho -n \"checking that rm $scratch fails...\"\ngovc datastore.rm \"$scratch\" 2>/dev/null || echo \"yes\"\n\necho -n \"checking that rm -f $scratch deletes...\"\ngovc datastore.rm -f \"$scratch\" && echo \"yes\"\n\necho \"removing disk Directory via FileManager...\"\ngovc datastore.mkdir -p \"$(dirname $scratch)\"\ngovc datastore.disk.create -size 1M $scratch\ngovc datastore.rm \"$(dirname $scratch)\"\n\necho -n \"checking for remaining files...\"\ngovc datastore.ls -p -R $dir\n\nteardown\n\nstatus=0\n\nif [ \"$type\" = \"vsan\" ] ; then\n  echo -n \"checking for leaked objects...\"\n  objs=($(govc datastore.vsan.dom.ls -l -o | awk '{print $3}'))\n  echo \"${#objs[@]}\"\n\n  if [ \"${#objs[@]}\" -ne \"0\" ] ; then\n    printf \"%s\\n\" \"${objs[@]}\"\n    status=1\n  else\n    # this is expected to leak on vSAN currently\n    echo -n \"checking if FileManager.Delete still leaks...\"\n    govc datastore.mkdir -p \"$(dirname $scratch)\"\n    govc datastore.disk.create -size 1M $scratch\n    # '-t=false' forces use of FileManager instead of VirtualDiskManager\n    govc datastore.rm -t=false $scratch\n    govc datastore.rm $dir\n\n    govc datastore.vsan.dom.ls -o | xargs -r govc datastore.vsan.dom.rm -v\n  fi\nfi\n\nexit $status\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/datastore_tail_test.sh",
    "content": "#!/bin/bash -xe\n\n# This test is not run via bats as the bats pipeline hangs when we background a process\n\n. \"$(dirname \"$0\")\"/test_helper.bash\n\nname=$(new_id)\nn=16\ntmp=$(mktemp --tmpdir \"${name}-XXXXX\")\n\necho -n | govc datastore.upload - \"$name\"\ngovc datastore.tail -f \"$name\" > \"$tmp\" &\npid=$!\n\nsleep 1\nyes | dd bs=${n}K count=1 2>/dev/null | govc datastore.upload - \"$name\"\nsleep 2\n\n# stops following when the file has gone away\ngovc datastore.mv \"$name\" \"${name}.old\"\nwait $pid\n\ngovc datastore.download \"${name}.old\" - | cmp \"$tmp\" -\n\nrm \"$tmp\"\nteardown\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/device.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"device.ls\" {\n  vm=$(new_empty_vm)\n\n  result=$(govc device.ls -vm $vm | grep ethernet-0 | wc -l)\n  [ $result -eq 1 ]\n}\n\n@test \"device.info\" {\n  vm=$(new_empty_vm)\n\n  run govc device.info -vm $vm ide-200\n  assert_success\n\n  run govc device.info -vm $vm ide-20000\n  assert_failure\n\n  run govc device.info -vm $vm -net enoent\n  assert_failure\n\n  run govc device.info -vm $vm -net \"VM Network\" ide-200\n  assert_failure\n\n  result=$(govc device.info -vm $vm -net \"VM Network\" | grep \"MAC Address\" | wc -l)\n  [ $result -eq 1 ]\n}\n\n@test \"device.boot\" {\n  vm=$(new_ttylinux_vm)\n\n  result=$(govc device.ls -vm $vm -boot | wc -l)\n  [ $result -eq 0 ]\n\n  run govc device.boot -vm $vm -order floppy,cdrom,ethernet,disk\n  assert_success\n\n  result=$(govc device.ls -vm $vm -boot | wc -l)\n  [ $result -eq 2 ]\n\n  run govc device.cdrom.add -vm $vm\n  assert_success\n\n  run govc device.floppy.add -vm $vm\n  assert_success\n\n  run govc device.boot -vm $vm -order floppy,cdrom,ethernet,disk\n  assert_success\n\n  result=$(govc device.ls -vm $vm -boot | wc -l)\n  [ $result -eq 4 ]\n}\n\n@test \"device.cdrom\" {\n  vm=$(new_empty_vm)\n\n  result=$(govc device.ls -vm $vm | grep cdrom- | wc -l)\n  [ $result -eq 0 ]\n\n  run govc device.cdrom.add -vm $vm\n  assert_success\n  id=$output\n\n  result=$(govc device.ls -vm $vm | grep $id | wc -l)\n  [ $result -eq 1 ]\n\n  run govc device.info -vm $vm $id\n  assert_success\n\n  run govc device.cdrom.insert -vm $vm -device $id x.iso\n  assert_success\n\n  run govc device.info -vm $vm $id\n  assert_line \"Summary: ISO [${GOVC_DATASTORE}] x.iso\"\n\n  run govc device.disconnect -vm $vm $id\n  assert_success\n\n  run govc device.connect -vm $vm $id\n  assert_success\n\n  run govc device.remove -vm $vm $id\n  assert_success\n\n  run govc device.disconnect -vm $vm $id\n  assert_failure \"govc: device '$id' not found\"\n\n  run govc device.cdrom.insert -vm $vm -device $id x.iso\n  assert_failure \"govc: device '$id' not found\"\n\n  run govc device.remove -vm $vm $id\n  assert_failure \"govc: device '$id' not found\"\n}\n\n@test \"device.floppy\" {\n  vm=$(new_empty_vm)\n\n  result=$(govc device.ls -vm $vm | grep floppy- | wc -l)\n  [ $result -eq 0 ]\n\n  run govc device.floppy.add -vm $vm\n  assert_success\n  id=$output\n\n  result=$(govc device.ls -vm $vm | grep $id | wc -l)\n  [ $result -eq 1 ]\n\n  run govc device.info -vm $vm $id\n  assert_success\n\n  run govc device.floppy.insert -vm $vm -device $id x.img\n  assert_success\n\n  run govc device.info -vm $vm $id\n  assert_line \"Summary: Image [${GOVC_DATASTORE}] x.img\"\n\n  run govc device.disconnect -vm $vm $id\n  assert_success\n\n  run govc device.connect -vm $vm $id\n  assert_success\n\n  run govc device.remove -vm $vm $id\n  assert_success\n\n  run govc device.disconnect -vm $vm $id\n  assert_failure \"govc: device '$id' not found\"\n\n  run govc device.floppy.insert -vm $vm -device $id x.img\n  assert_failure \"govc: device '$id' not found\"\n\n  run govc device.remove -vm $vm $id\n  assert_failure \"govc: device '$id' not found\"\n}\n\n@test \"device.serial\" {\n  vm=$(new_empty_vm)\n\n  result=$(govc device.ls -vm $vm | grep serial- | wc -l)\n  [ $result -eq 0 ]\n\n  run govc device.serial.add -vm $vm\n  assert_success\n  id=$output\n\n  result=$(govc device.ls -vm $vm | grep $id | wc -l)\n  [ $result -eq 1 ]\n\n  run govc device.info -vm $vm $id\n  assert_success\n\n  run govc device.serial.connect -vm $vm -\n  assert_success\n\n  run govc device.info -vm $vm $id\n  assert_line \"Summary: File [$GOVC_DATASTORE] $vm/${id}.log\"\n\n  uri=telnet://:33233\n  run govc device.serial.connect -vm $vm -device $id $uri\n  assert_success\n\n  run govc device.info -vm $vm $id\n  assert_line \"Summary: Remote $uri\"\n\n  run govc device.serial.disconnect -vm $vm -device $id\n  assert_success\n\n  run govc device.info -vm $vm $id\n  assert_line \"Summary: Remote localhost:0\"\n\n  run govc device.disconnect -vm $vm $id\n  assert_success\n\n  run govc device.connect -vm $vm $id\n  assert_success\n\n  run govc device.remove -vm $vm $id\n  assert_success\n\n  run govc device.disconnect -vm $vm $id\n  assert_failure \"govc: device '$id' not found\"\n\n  run govc device.serial.connect -vm $vm -device $id $uri\n  assert_failure \"govc: device '$id' not found\"\n\n  run govc device.remove -vm $vm $id\n  assert_failure \"govc: device '$id' not found\"\n}\n\n@test \"device.scsi\" {\n  vm=$(new_empty_vm)\n\n  result=$(govc device.ls -vm $vm | grep lsilogic- | wc -l)\n  [ $result -eq 1 ]\n\n  run govc device.scsi.add -vm $vm\n  assert_success\n  id=$output\n\n  result=$(govc device.ls -vm $vm | grep $id | wc -l)\n  [ $result -eq 1 ]\n\n  result=$(govc device.ls -vm $vm | grep lsilogic- | wc -l)\n  [ $result -eq 2 ]\n\n  run govc device.scsi.add -vm $vm -type pvscsi\n  assert_success\n  id=$output\n\n  result=$(govc device.ls -vm $vm | grep $id | wc -l)\n  [ $result -eq 1 ]\n}\n\n@test \"device.usb\" {\n  vm=$(new_empty_vm)\n\n  result=$(govc device.ls -vm $vm | grep usb | wc -l)\n  [ $result -eq 0 ]\n\n  run govc device.usb.add -type enoent -vm $vm\n  assert_failure\n\n  run govc device.usb.add -vm $vm\n  assert_success\n  id=$output\n\n  result=$(govc device.ls -vm $vm | grep $id | wc -l)\n  [ $result -eq 1 ]\n\n  run govc device.usb.add -vm $vm\n  assert_failure # 1 per vm max\n\n  run govc device.usb.add -type xhci -vm $vm\n  assert_success\n  id=$output\n\n  result=$(govc device.ls -vm $vm | grep $id | wc -l)\n  [ $result -eq 1 ]\n\n  run govc device.usb.add -type xhci -vm $vm\n  assert_failure # 1 per vm max\n}\n\n@test \"device.scsi slots\" {\n  vm=$(new_empty_vm)\n\n  for i in $(seq 1 15) ; do\n    name=\"disk-${i}\"\n    run govc vm.disk.create -vm \"$vm\" -name \"$name\" -size 1K\n    assert_success\n    result=$(govc device.ls -vm \"$vm\" | grep disk- | wc -l)\n    [ \"$result\" -eq \"$i\" ]\n  done\n\n  # We're at the max, so this will fail\n  run govc vm.disk.create -vm \"$vm\" -name disk-16 -size 1K\n  assert_failure\n\n  # Remove disk #2\n  run govc device.remove -vm \"$vm\" disk-1000-2\n  assert_success\n\n  # No longer at the max, this should use the UnitNumber released by the remove above\n  run govc vm.disk.create -vm \"$vm\" -name disk-16 -size 1K\n  assert_success\n}\n\n\n@test \"device nil config\" {\n  vm=$(new_empty_vm)\n\n  run govc device.ls -vm \"$vm\"\n  assert_success\n\n  run govc datastore.rm \"$vm\"\n  assert_success\n\n  run govc object.reload \"vm/$vm\"\n  assert_success\n\n  run govc device.ls -vm \"$vm\"\n  assert_failure\n\n  run govc vm.unregister \"$vm\"\n  assert_success\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/esxbox/Vagrantfile",
    "content": "# -*- mode: ruby -*-\n# vi: set ft=ruby :\n\nVagrant.configure(\"2\") do |config|\n  config.ssh.insert_key = false\n  config.ssh.default.username = \"root\"\n  config.ssh.shell = \"sh\"\n  config.vm.hostname = \"esxbox\"\n\n  config.vm.box = \"esxi55\"\n  config.vm.synced_folder \".\", \"/vagrant\", disabled: true\n  config.vm.network \"forwarded_port\", guest: 443, host: 18443\n\n  [:vmware_fusion, :vmware_workstation].each do |name|\n    config.vm.provider name do |v,override|\n      v.vmx[\"memsize\"] = \"4096\"\n    end\n  end\n\n  config.vm.provision \"shell\", privileged: false, :inline => <<EOS\n# Turn off the firewall\nesxcli network firewall set --enabled false\n\n# Enable option so we can get VM ip address w/o vmware-tools\nesxcli system settings advanced set -o /Net/GuestIPHack -i 1\nEOS\nend\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/esxcli.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"network vm list\" {\n  # make sure there's at least 1 VM so we get a table header to count against\n  vm=$(new_empty_vm)\n  govc vm.power -on $vm\n\n  nlines=$(govc host.esxcli network vm list | wc -l)\n\n  vm=$(new_empty_vm)\n  govc vm.power -on $vm\n\n  xlines=$(govc host.esxcli network vm list | wc -l)\n\n  # test that we see a new row\n  [ $(($nlines + 1)) -eq $xlines ]\n\n  run govc host.esxcli network vm list enoent\n  assert_failure\n}\n\n@test \"network ip connection list\" {\n  run govc host.esxcli -- network ip connection list -t tcp\n  assert_success\n\n  # test that we get the expected number of table columns\n  nf=$(echo \"${lines[3]}\" | awk '{print NF}')\n  [ $nf -eq 9 ]\n\n  run govc host.esxcli -- network ip connection list -t enoent\n  assert_failure\n}\n\n@test \"system settings advanced list\" {\n  run govc host.esxcli -- system settings advanced list -o /Net/GuestIPHack\n  assert_success\n  assert_line \"Path: /Net/GuestIPHack\"\n\n  run govc host.esxcli -- system settings advanced list -o /Net/ENOENT\n  assert_failure\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/events.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"events dc\" {\n  run govc events\n  assert_success\n  nevents=${#lines[@]}\n  # there should be plenty more than 1 event at the top (dc) level\n  [ $nevents -ge 1 ]\n\n  # test -n flag\n  run govc events -n $((nevents - 1))\n  assert_success\n  [ ${#lines[@]} -le $nevents ]\n}\n\n@test \"events host\" {\n  run govc events 'host/*'\n  assert_success\n  [ ${#lines[@]} -ge 1 ]\n}\n\n@test \"events vm\" {\n  vm=$(new_id)\n\n  run govc vm.create -on=false $vm\n  assert_success\n\n  run govc events vm/$vm\n  assert_success\n  nevents=${#lines[@]}\n  [ $nevents -gt 1 ]\n\n  # glob should have same # of events\n  run govc events vm/${vm}*\n  assert_success\n  [ ${#lines[@]} -eq $nevents ]\n\n  # create a new vm, glob should match more events\n  run govc vm.create -on=false \"${vm}-2\"\n  assert_success\n  run govc events vm/${vm}*\n  assert_success\n  [ ${#lines[@]} -gt $nevents ]\n  nevents=${#lines[@]}\n\n  run govc events vm\n  assert_success\n  [ ${#lines[@]} -ge $nevents ]\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/extension.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"extension\" {\n  vcsim_env\n\n  govc extension.info | grep Name: | grep govc-test | awk '{print $2}' | $xargs -r govc extension.unregister\n\n  run govc extension.info enoent\n  assert_failure\n\n  id=$(new_id)\n\n  result=$(govc extension.info | grep $id | wc -l)\n  [ $result -eq 0 ]\n\n  # register extension\n  run govc extension.register $id <<EOS\n  {\n    \"Description\": {\n      \"Label\": \"govc\",\n      \"Summary\": \"Go interface to vCenter\"\n    },\n    \"Key\": \"${id}\",\n    \"Company\": \"VMware, Inc.\",\n    \"Version\": \"0.2.0\"\n  }\nEOS\n  assert_success\n\n  # check info output is legit\n  run govc extension.info $id\n  assert_line \"Name: $id\"\n\n  json=$(govc extension.info -json $id)\n  label=$(jq -r .Extensions[].Description.Label <<<\"$json\")\n  assert_equal \"govc\" \"$label\"\n\n  # change label and update extension\n  json=$(jq -r '.Extensions[] | .Description.Label = \"novc\"' <<<\"$json\")\n  run govc extension.register -update $id <<<\"$json\"\n  assert_success\n\n  # check label changed in info output\n  json=$(govc extension.info -json $id)\n  label=$(jq -r .Extensions[].Description.Label <<<\"$json\")\n  assert_equal \"novc\" \"$label\"\n\n  # set extension certificate to generated certificate\n  run govc extension.setcert -cert-pem '+' $id\n  assert_success\n\n  # test client certificate authentication\n  (\n    # remove password from env, set user to extension id and turn of session cache\n    govc_url_to_vars\n    unset GOVC_PASSWORD\n    GOVC_USERNAME=$id\n    export GOVC_PERSIST_SESSION=false\n    # vagrant port forwards to VC's port 80\n    export GOVC_TUNNEL_PROXY_PORT=16080\n    run govc about -cert \"${id}.crt\" -key \"${id}.key\"\n    assert_success\n  )\n\n  # remove generated cert and key\n  rm ${id}.{crt,key}\n\n  run govc extension.unregister $id\n  assert_success\n\n  result=$(govc extension.info | grep $id | wc -l)\n  [ $result -eq 0 ]\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/fields.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"fields\" {\n  vcsim_env\n\n  vm_id=$(new_id)\n  run govc vm.create $vm_id\n  assert_success\n\n  field=$(new_id)\n\n  result=$(govc fields.ls | grep $field | wc -l)\n  [ $result -eq 0 ]\n\n  key=$(govc fields.add $field)\n\n  result=$(govc fields.ls | grep $field | wc -l)\n  [ $result -eq 1 ]\n\n  key=$(govc fields.ls | grep $field | awk '{print $1}')\n\n  val=\"foo\"\n  run govc fields.set $field $val vm/$vm_id\n  assert_success\n\n  info=$(govc vm.info -json $vm_id | jq .VirtualMachines[0].CustomValue[0])\n\n  ikey=$(jq -r .Key <<<\"$info\")\n  assert_equal $key $ikey\n\n  ival=$(jq -r .Value <<<\"$info\")\n  assert_equal $val $ival\n\n  old_field=$field\n  field=$(new_id)\n  run govc fields.rename $key $field\n  assert_success\n  result=$(govc fields.ls | grep $old_field | wc -l)\n  [ $result -eq 0 ]\n\n  run govc fields.rm $field\n  assert_success\n\n  result=$(govc fields.ls | grep $field | wc -l)\n  [ $result -eq 0 ]\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/firewall.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"firewall.ruleset.find\" {\n  # Assumes sshServer ruleset is enabled\n  run govc firewall.ruleset.find -direction inbound -port 22\n  assert_success\n\n  run govc firewall.ruleset.find -direction outbound -port 22\n  if [ \"$status\" -eq 1 ] ; then\n    # If outbound port 22 is blocked, we should be able to list disabled rules via:\n    run govc firewall.ruleset.find -direction outbound -port 22 -enabled=false\n    assert_success\n\n    # find disabled should include sshClient ruleset in output\n    result=$(govc firewall.ruleset.find -direction outbound -port 22 -enabled=false | grep sshClient | wc -l)\n    [ $result -eq 1 ]\n  fi\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/folder.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"folder.info\" {\n    for name in / vm host network datastore ; do\n        run govc folder.info $name\n        assert_success\n\n        govc folder.info -json $name\n        assert_success\n    done\n\n    result=$(govc folder.info '*' | grep Name: | wc -l)\n    [ $result -eq 4 ]\n\n    run govc info.info /enoent\n    assert_failure\n}\n\n@test \"folder.create\" {\n    vcsim_env\n\n    name=$(new_id)\n\n    # relative to $GOVC_DATACENTER\n    run govc folder.create $name\n    assert_failure\n\n    run govc folder.create vm/$name\n    assert_success\n\n    run govc folder.info vm/$name\n    assert_success\n\n    run govc folder.info /$GOVC_DATACENTER/vm/$name\n    assert_success\n\n    run govc object.destroy vm/$name\n    assert_success\n\n    unset GOVC_DATACENTER\n    # relative to /\n\n    run govc folder.create $name\n    assert_success\n\n    run govc folder.info /$name\n    assert_success\n\n    child=$(new_id)\n    run govc folder.create $child\n    assert_success\n\n    run govc folder.info /$name/$child\n    assert_failure\n\n    run govc object.mv $child /$name\n    assert_success\n\n    run govc folder.info /$name/$child\n    assert_success\n\n    new=$(new_id)\n    run govc object.rename /$name $new\n    assert_success\n    name=$new\n\n    run govc folder.info /$name\n    assert_success\n\n    run govc object.destroy $name\n    assert_success\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/govc-sim",
    "content": "#!/bin/bash\n\n. $(dirname $0)/test_helper.bash\n\nvcsim_env\n\ngovc \"$@\"\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/guest_operations_test.sh",
    "content": "#!/bin/bash -e\n\n# This test is not run via bats.\n# 1) Test guest operations (govc guest.* commands)\n# 2) Test vm disk persistence\n\n. \"$(dirname \"$0\")\"/test_helper.bash\n\nimport_ttylinux_vmdk\n\nexport GOVC_GUEST_LOGIN=root:password\n\nfor persist in true false ; do\n  id=govc-test-persist-$persist\n  govc ls vm/$id | xargs -r govc vm.destroy\n\n  if [ \"$persist\" = \"true\" ] ; then\n    grepf=-v\n    mode=persistent\n  else\n    mode=independent_nonpersistent\n  fi\n\n  echo \"Creating vm...\"\n  govc vm.create -m 32 -disk.controller ide -on=false $id\n\n  # Save some noise by defaulting to '-vm $id'\n  export GOVC_VM=$id\n\n  echo \"Attaching linked disk...\"\n  govc vm.disk.attach -controller ide -persist=$persist -link=true -disk \"$GOVC_TEST_VMDK\"\n\n  echo \"Creating data disk...\"\n  govc vm.disk.create -controller ide -mode=$mode -name \"$id\"/data -size \"10M\"\n\n  echo \"Powering on vm...\"\n  govc vm.power -on $id 1>/dev/null\n  echo \"Waiting for tools to initialize...\"\n  govc vm.ip $id 1>/dev/null\n\n  echo \"Formatting the data disk...\"\n  govc guest.mkdir /data\n  script=$(govc guest.mktemp)\n\n  govc guest.upload -f - \"$script\" <<'EOF'\n#!/bin/sh -xe\n\nopts=(n p 1 1 ' ' w)\nprintf \"%s\\n\" \"${opts[@]}\" | fdisk /dev/hdb\nmkfs.ext3 /dev/hdb1\nmount /dev/hdb1 /data\ndf -h\ncp /etc/motd /data\nEOF\n\n  govc guest.chown 65534 \"$script\"\n  govc guest.chown 65534:65534 \"$script\"\n  govc guest.ls \"$script\" | grep 65534\n  govc guest.chmod 0755 \"$script\"\n  pid=$(govc guest.start \"$script\" '>&' /tmp/disk.log)\n  status=$(govc guest.ps -p \"$pid\" -json -X | jq .ProcessInfo[].ExitCode)\n  govc guest.download /tmp/disk.log -\n  if [ \"$status\" -ne \"0\" ] ; then\n    exit 1\n  fi\n\n  echo \"Writing some data to the disks...\"\n  for d in /etc /data ; do\n    govc guest.touch \"$d/motd.bak\"\n    govc guest.touch -d \"$(date -d '1 day ago')\" \"$d/motd\"\n    govc guest.ls \"$d/motd\"\n    govc guest.download $d/motd - | grep Chop\n  done\n  govc version | govc guest.upload -f - /etc/motd\n  govc guest.download /etc/motd - | grep -v Chop\n\n  pid=$(govc guest.start /bin/sync)\n  status=$(govc guest.ps -p \"$pid\" -json -X | jq .ProcessInfo[].ExitCode)\n  if [ \"$status\" -ne \"0\" ] ; then\n    exit 1\n  fi\n\n  echo \"Rebooting vm...\"\n  govc vm.power -off $id\n  govc vm.power -on $id\n  echo \"Waiting for tools to initialize...\"\n  govc vm.ip $id 1>/dev/null\n\n  echo \"Verifying data persistence...\"\n  govc guest.download /etc/motd - | grep $grepf Chop\n  pid=$(govc guest.start /bin/mount /dev/hdb1 /data)\n  status=$(govc guest.ps -p \"$pid\" -json -X | jq .ProcessInfo[].ExitCode)\n\n  if [ \"$persist\" = \"true\" ] ; then\n    govc guest.ls /data\n    govc guest.download /data/motd - | grep -v Chop\n    govc guest.rm /data/motd\n\n    govc guest.mkdir /data/foo/bar/baz 2>/dev/null && exit 1 # should fail\n    govc guest.mkdir -p /data/foo/bar/baz\n\n    govc guest.rmdir /data/foo 2>/dev/null && exit 1 # should fail\n    govc guest.rmdir /data/foo/bar/baz\n    dir=$(govc guest.mktemp -d -p /data/foo -s govc)\n    file=$(govc guest.mktemp -p \"$dir\")\n    govc guest.mv -n \"$(govc guest.mktemp)\" \"$file\" 2>/dev/null && exit 1 # should fail\n    govc guest.mv \"$file\" \"${file}-old\"\n    govc guest.mv \"$dir\" \"${dir}-old\"\n    govc guest.rmdir -r /data/foo\n    govc guest.ls /data | grep -v foo\n  else\n    if [ \"$status\" -eq \"0\" ] ; then\n      echo \"expected failure\"\n      exit 1\n    fi\n  fi\ndone\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/host.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"host info esx\" {\n  run govc host.info\n  assert_success\n  grep -q Manufacturer: <<<$output\n\n  run govc host.info -host enoent\n  assert_failure \"govc: host 'enoent' not found\"\n\n  for opt in dns ip ipath uuid\n  do\n    run govc host.info \"-host.$opt\" enoent\n    assert_failure \"govc: no such host\"\n  done\n\n  # avoid hardcoding the esxbox hostname\n  local name=$(govc ls '/*/host/*' | grep -v Resources)\n\n  run govc host.info -host $name\n  assert_success\n  grep -q Manufacturer: <<<$output\n\n  run govc host.info -host ${name##*/}\n  assert_success\n  grep -q Manufacturer: <<<$output\n\n  run govc host.info -host.ipath $name\n  assert_success\n\n  run govc host.info -host.dns $(basename $(dirname $name))\n  assert_success\n\n  uuid=$(govc host.info -json | jq -r .HostSystems[].Hardware.SystemInfo.Uuid)\n  run govc host.info -host.uuid $uuid\n  assert_success\n\n  run govc host.info \"*\"\n  assert_success\n}\n\n@test \"host info vc\" {\n  vcsim_env\n\n  run govc host.info\n  assert_success\n  grep -q Manufacturer: <<<$output\n\n  run govc host.info -host enoent\n  assert_failure \"govc: host 'enoent' not found\"\n\n  for opt in dns ip ipath uuid\n  do\n    run govc host.info \"-host.$opt\" enoent\n    assert_failure \"govc: no such host\"\n  done\n\n  local name=$GOVC_HOST\n\n  unset GOVC_HOST\n  run govc host.info\n  assert_failure \"govc: default host resolves to multiple instances, please specify\"\n\n  run govc host.info -host $name\n  assert_success\n  grep -q Manufacturer: <<<$output\n\n  run govc host.info -host.ipath $name\n  assert_success\n\n  run govc host.info -host.dns $(basename $name)\n  assert_success\n\n  uuid=$(govc host.info -host $name -json | jq -r .HostSystems[].Hardware.SystemInfo.Uuid)\n  run govc host.info -host.uuid $uuid\n  assert_success\n}\n\n@test \"host.vnic.info\" {\n  run govc host.vnic.info\n  assert_success\n}\n\n@test \"host.vswitch.info\" {\n  run govc host.vswitch.info\n  assert_success\n\n  run govc host.vswitch.info -json\n  assert_success\n}\n\n@test \"host.portgroup.info\" {\n  run govc host.portgroup.info\n  assert_success\n\n  run govc host.portgroup.info -json\n  assert_success\n}\n\n@test \"host.options\" {\n    run govc host.option.ls Config.HostAgent.plugins.solo.enableMob\n    assert_success\n\n    run govc host.option.ls Config.HostAgent.plugins.\n    assert_success\n\n    run govc host.option.ls -json Config.HostAgent.plugins.\n    assert_success\n\n    run govc host.option.ls Config.HostAgent.plugins.solo.ENOENT\n    assert_failure\n}\n\n@test \"host.service\" {\n    run govc host.service.ls\n    assert_success\n\n    run govc host.service.ls -json\n    assert_success\n\n    run govc host.service status TSM-SSH\n    assert_success\n}\n\n@test \"host.cert.info\" {\n  run govc host.cert.info\n  assert_success\n\n  run govc host.cert.info -json\n  assert_success\n\n  expires=$(govc host.cert.info -json | jq -r .NotAfter)\n  about_expires=$(govc about.cert -json | jq -r .NotAfter)\n  assert_equal \"$expires\" \"$about_expires\"\n}\n\n@test \"host.cert.csr\" {\n  #   Requested Extensions:\n  #       X509v3 Subject Alternative Name:\n  #       IP Address:...\n  result=$(govc host.cert.csr -ip | openssl req -text -noout)\n  assert_matches \"IP Address:\" \"$result\"\n  ! assert_matches \"DNS:\" \"$result\"\n\n  #   Requested Extensions:\n  #       X509v3 Subject Alternative Name:\n  #       DNS:...\n  result=$(govc host.cert.csr | openssl req -text -noout)\n  ! assert_matches \"IP Address:\" \"$result\"\n  assert_matches \"DNS:\" \"$result\"\n}\n\n@test \"host.cert.import\" {\n  issuer=$(govc host.cert.info -json | jq -r .Issuer)\n  expires=$(govc host.cert.info -json | jq -r .NotAfter)\n\n  # only mess with the cert if its already been signed by our test CA\n  if [[ \"$issuer\" != CN=govc-ca,* ]] ; then\n    skip \"host cert not signed by govc-ca\"\n  fi\n\n  govc host.cert.csr -ip | ./host_cert_sign.sh | govc host.cert.import\n  expires2=$(govc host.cert.info -json | jq -r .NotAfter)\n\n  # cert expiration should have changed\n  [ \"$expires\" != \"$expires2\" ]\n\n  # verify hostd is using the new cert too\n  expires=$(govc about.cert -json | jq -r .NotAfter)\n  assert_equal \"$expires\" \"$expires2\"\n\n  # our cert is not trusted against the system CA list\n  status=$(govc about.cert | grep Status:)\n  assert_matches ERROR \"$status\"\n\n  # with our CA trusted, the cert should be too\n  status=$(govc about.cert -tls-ca-certs ./govc_ca.pem | grep Status:)\n  assert_matches good \"$status\"\n}\n\n@test \"host.date.info\" {\n  run govc host.date.info\n  assert_success\n\n  run govc host.date.info -json\n  assert_success\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/host_cert_sign.sh",
    "content": "#!/bin/bash -e\n\n# Usage: govc host.cert.csr | ./host_cert_sign.sh | govc host.cert.import\n\npushd \"$(dirname \"$0\")\" >/dev/null\n\ndays=$((365 * 5))\n\nif [ ! -e govc_ca.key ] ; then\n  echo \"Generating CA private key...\" 1>&2\n  openssl genrsa -out govc_ca.key 2048\n\n  echo \"Generating CA self signed certificate...\" 1>&2\n  openssl req -x509 -new -nodes -key govc_ca.key -out govc_ca.pem -subj /C=US/ST=CA/L=SF/O=VMware/OU=Eng/CN=govc-ca -days $days\nfi\n\necho \"Signing CSR with the CA certificate...\" 1>&2\n\n# The hostd generated CSR includes:\n#   Requested Extensions:\n#       X509v3 Subject Alternative Name:\n#       IP Address:$ip\n# But seems it doesn't get copied by default, so we end up with:\n#   x509: cannot validate certificate for $ip because it doesn't contain any IP SANs (x509.HostnameError)\n# Using -extfile to add it to the signed cert.\n\nip=$(govc env -x GOVC_URL_HOST)\nopenssl x509 -req -CA govc_ca.pem -CAkey govc_ca.key -CAcreateserial -days $days -extfile <(echo \"subjectAltName=IP:$ip\")\n\npopd >/dev/null\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/images/.gitignore",
    "content": "ttylinux-*\nfloppybird.img\n\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/images/update.sh",
    "content": "#!/bin/bash\n\npushd $(dirname $0)\n\n# Sadly, the ttylinux project was abandoned in late 2015.\n# But this release still serves us well.\nbase_url=http://dl.bintray.com/dougm/ttylinux\nttylinux=\"ttylinux-pc_i486-16.1\"\nfiles=\"${ttylinux}.iso ${ttylinux}-live.ova ${ttylinux}.ova\"\n\nfor name in $files ; do\n  wget -qO $name $base_url/$name\ndone\n\nwget -qN https://github.com/icebreaker/floppybird/raw/master/build/floppybird.img\n\n# extract ova so we can also use the .vmdk and .ovf files directly\ntar -xvf ${ttylinux}.ova\n\npopd\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/import.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"import.ova\" {\n  run govc import.ova $GOVC_IMAGES/${TTYLINUX_NAME}.ova\n  assert_success\n\n  run govc vm.destroy ${TTYLINUX_NAME}\n  assert_success\n}\n\n@test \"import.ova with iso\" {\n  run govc import.ova $GOVC_IMAGES/${TTYLINUX_NAME}-live.ova\n  assert_success\n\n  run govc vm.destroy ${TTYLINUX_NAME}-live\n  assert_success\n}\n\n@test \"import.ovf\" {\n  run govc import.ovf $GOVC_IMAGES/${TTYLINUX_NAME}.ovf\n  assert_success\n\n  run govc vm.destroy ${TTYLINUX_NAME}\n  assert_success\n\n  # test w/ relative dir\n  pushd $BATS_TEST_DIRNAME >/dev/null\n  run govc import.ovf ./images/${TTYLINUX_NAME}.ovf\n  assert_success\n  popd >/dev/null\n\n  run govc vm.destroy ${TTYLINUX_NAME}\n  assert_success\n}\n\n@test \"import.ovf with name in options\" {\n  name=$(new_id)\n  file=$($mktemp --tmpdir govc-test-XXXXX)\n  echo \"{ \\\"Name\\\": \\\"${name}\\\"}\" > ${file}\n\n  run govc import.ovf -options=\"${file}\" $GOVC_IMAGES/${TTYLINUX_NAME}.ovf\n  assert_success\n\n  run govc vm.destroy \"${name}\"\n  assert_success\n\n  rm -f ${file}\n}\n\n@test \"import.ovf with name as argument\" {\n  name=$(new_id)\n\n  run govc import.ova -name=\"${name}\" $GOVC_IMAGES/${TTYLINUX_NAME}.ova\n  assert_success\n\n  run govc vm.destroy \"${name}\"\n  assert_success\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/license.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n# These tests should only run against a server running an evaluation license.\nverify_evaluation() {\n  if [ \"$(govc license.ls -json | jq -r .[0].EditionKey)\" != \"eval\" ]; then\n    skip \"requires evaluation license\"\n  fi\n}\n\nget_key() {\n  jq \".[] | select(.LicenseKey == \\\"$1\\\")\"\n}\n\nget_property() {\n  jq -r \".Properties[] | select(.Key == \\\"$1\\\") | .Value\"\n}\n\n@test \"license.add\" {\n  verify_evaluation\n\n  run govc license.add -json 00000-00000-00000-00000-00001 00000-00000-00000-00000-00002\n  assert_success\n\n  # Expect to see an entry for both the first and the second key\n  assert_equal \"License is not valid for this product\" $(get_key 00000-00000-00000-00000-00001 <<<${output} | get_property diagnostic)\n  assert_equal \"License is not valid for this product\" $(get_key 00000-00000-00000-00000-00002 <<<${output} | get_property diagnostic)\n}\n\n@test \"license.remove\" {\n  verify_evaluation\n\n  run govc license.remove -json 00000-00000-00000-00000-00001\n  assert_success\n}\n\n@test \"license.ls\" {\n  verify_evaluation\n\n  run govc license.ls -json\n  assert_success\n\n  # Expect the test instance to run in evaluation mode\n  assert_equal \"Evaluation Mode\" $(get_key 00000-00000-00000-00000-00000 <<<$output | jq -r \".Name\")\n}\n\n@test \"license.decode\" {\n  verify_evaluation\n\n  key=00000-00000-00000-00000-00000\n  assert_equal \"eval\" $(govc license.decode $key | grep $key | awk '{print $2}')\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/logs.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"logs\" {\n  run govc logs\n  assert_success\n  nlogs=${#lines[@]}\n  # there should be plenty more than 1 line of hostd logs\n  [ $nlogs -ge 1 ]\n\n  # test -n flag\n  run govc logs -n $((nlogs - 10))\n  assert_success\n  [ ${#lines[@]} -le $nlogs ]\n\n  run govc logs -log vmkernel\n  assert_success\n  nlogs=${#lines[@]}\n  # there should be plenty more than 1 line of vmkernel logs\n  [ $nlogs -ge 1 ]\n\n  # test > 1 call to BrowseLog()\n  run govc logs -n 2002\n  assert_success\n\n  # -host ignored against ESX\n  run govc logs -host enoent\n  assert_success\n\n  run govc logs -log enoent\n  assert_failure\n}\n\n@test \"logs.ls\" {\n  run govc logs.ls\n  assert_success\n\n  # -host ignored against ESX\n  run govc logs.ls -host enoent\n  assert_success\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/ls.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"ls\" {\n  run govc ls\n  assert_success\n  # /dc/{vm,network,host,datastore}\n  n=${#lines[@]}\n  [ $n -ge 4 ]\n\n  # list entire inventory\n  run govc ls '/**'\n  assert_success\n  [ ${#lines[@]} -ge $n ]\n\n  run govc ls ./...\n  assert_success\n  [ ${#lines[@]} -ge $n ]\n\n  run govc ls -t HostSystem '*'\n  assert_success\n  [ ${#lines[@]} -eq 0 ]\n\n  run govc ls host\n  assert_success\n  [ ${#lines[@]} -ge 1 ]\n\n  run govc ls enoent\n  assert_success\n  [ ${#lines[@]} -eq 0 ]\n}\n\n@test \"ls -R\" {\n  # search entire inventory\n  run govc ls ./...\n  assert_success\n  # should have at least 1 dc + folders, 1 host, 1 network, 1 datastore\n  [ ${#lines[@]} -ge 9 ]\n\n  run govc ls -t HostSystem ./...\n  assert_success\n  [ ${#lines[@]} -eq 1 ]\n\n  run govc ls -t Datacenter /...\n  assert_success\n  [ ${#lines[@]} -eq 1 ]\n\n  run govc ls -t ResourcePool host/...\n  assert_success\n  [ ${#lines[@]} -ge 1 ]\n\n  run govc ls -t ResourcePool vm/...\n  assert_success\n  [ ${#lines[@]} -eq 0 ]\n\n  c=$(govc ls -t ComputeResource ./... | head -1)\n  run govc ls -t ResourcePool \"$c/...\"\n  assert_success\n  [ ${#lines[@]} -ge 1 ]\n}\n\n@test \"ls vm\" {\n  vm=$(new_empty_vm)\n\n  run govc ls vm\n  assert_success\n  [ ${#lines[@]} -ge 1 ]\n\n  run govc ls vm/$vm\n  assert_success\n  [ ${#lines[@]} -eq 1 ]\n\n  run govc ls /*/vm/$vm\n  assert_success\n  [ ${#lines[@]} -eq 1 ]\n}\n\n@test \"ls network\" {\n  run govc ls network\n  assert_success\n  [ ${#lines[@]} -ge 1 ]\n\n  local path=${lines[0]}\n  run govc ls \"$path\"\n  assert_success\n  [ ${#lines[@]} -eq 1 ]\n\n  run govc ls \"network/$(basename \"$path\")\"\n  assert_success\n  [ ${#lines[@]} -eq 1 ]\n\n  run govc ls \"/*/network/$(basename \"$path\")\"\n  assert_success\n  [ ${#lines[@]} -eq 1 ]\n}\n\n@test \"ls multi ds\" {\n  vcsim_env\n\n  run govc ls\n  assert_success\n  # /DC0/{vm,network,host,datastore}\n  [ ${#lines[@]} -eq 4 ]\n\n  run govc ls /DC*\n  assert_success\n  # /DC[0,1]/{vm,network,host,datastore}\n  [ ${#lines[@]} -eq 8 ]\n\n  # here 'vm' is relative to /DC0\n  run govc ls vm\n  assert_success\n  [ ${#lines[@]} -gt 0 ]\n\n  unset GOVC_DATACENTER\n\n  run govc ls\n  assert_success\n  # /DC[0,1]\n  [ ${#lines[@]} -eq 2 ]\n\n  run govc ls -dc enoent\n  assert_failure\n  [ ${#lines[@]} -gt 0 ]\n\n  # here 'vm' is relative to '/' - so there are no matches\n  run govc ls vm\n  assert_success\n  [ ${#lines[@]} -eq 0 ]\n\n  # ls all vms in all datacenters\n  run govc ls */vm\n  assert_success\n  [ ${#lines[@]} -gt 0 ]\n}\n\n@test \"ls moref\" {\n    # ensure the vm folder isn't empty\n    run govc vm.create -on=false \"$(new_id)\"\n    assert_success\n\n    # list dc folder paths\n    folders1=$(govc ls)\n    # list dc folder refs | govc ls -L ; should output the same paths\n    folders2=$(govc ls -i | xargs govc ls -L)\n\n    assert_equal \"$folders1\" \"$folders2\"\n\n    for folder in $folders1\n    do\n        # list paths in $folder\n        items1=$(govc ls \"$folder\")\n        # list refs in $folder | govc ls -L ; should output the same paths\n        items2=$(govc ls -i \"$folder\" | xargs -d '\\n' govc ls -L)\n\n        assert_equal \"$items1\" \"$items2\"\n    done\n\n    ref=ViewManager:ViewManager\n    path=$(govc ls -L $ref)\n    assert_equal \"$ref\" \"$path\"\n\n    path=$(govc ls -L Folder:ha-folder-root)\n    assert_equal \"/\" \"$path\"\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/metric.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"metric.ls\" {\n  run govc metric.ls\n  assert_failure\n\n  run govc metric.ls enoent\n  assert_failure\n\n  host=$(govc ls -t HostSystem ./... | head -n 1)\n  pool=$(govc ls -t ResourcePool ./... | head -n 1)\n\n  run govc metric.ls \"$host\"\n  assert_success\n\n  run govc metric.ls -json \"$host\"\n  assert_success\n\n  run govc metric.ls \"$pool\"\n  assert_success\n}\n\n@test \"metric.sample\" {\n  host=$(govc ls -t HostSystem ./... | head -n 1)\n  metrics=($(govc metric.ls \"$host\"))\n\n  run govc metric.sample \"$host\" enoent\n  assert_failure\n\n  run govc metric.sample \"$host\" \"${metrics[@]}\"\n  assert_success\n\n  run govc metric.sample -instance - \"$host\" \"${metrics[@]}\"\n  assert_success\n\n  run govc metric.sample -json \"$host\" \"${metrics[@]}\"\n  assert_success\n\n  vm=$(new_ttylinux_vm)\n\n  run govc metric.ls \"$vm\"\n  assert_output \"\"\n\n  run govc vm.power -on \"$vm\"\n  assert_success\n\n  run govc vm.ip \"$vm\"\n  assert_success\n\n  metrics=($(govc metric.ls \"$vm\"))\n\n  run govc metric.sample \"$vm\" \"${metrics[@]}\"\n  assert_success\n\n  run govc metric.sample -json \"$vm\" \"${metrics[@]}\"\n  assert_success\n\n  run govc metric.sample \"govc-test-*\" \"${metrics[@]}\"\n  assert_success\n}\n\n@test \"metric.info\" {\n  host=$(govc ls -t HostSystem ./... | head -n 1)\n  metrics=($(govc metric.ls \"$host\"))\n\n  run govc metric.info \"$host\" enoent\n  assert_failure\n\n  run govc metric.info \"$host\"\n  assert_success\n\n  run govc metric.info -json \"$host\"\n  assert_success\n\n  run govc metric.sample \"$host\" \"${metrics[@]}\"\n  assert_success\n\n  run govc metric.info \"$host\" \"${metrics[@]}\"\n  assert_success\n\n  run govc metric.info - \"${metrics[@]}\"\n  assert_success\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/metric_info_test.sh",
    "content": "#!/bin/bash -e\n\ntypes=\"Datacenter HostSystem ClusterComputeResource ResourcePool VirtualMachine Datastore VirtualApp\"\n\nfor type in $types ; do\n  echo \"$type...\"\n\n  obj=$(govc ls -t \"$type\" ./... | head -n 1)\n  if [ -z \"$obj\" ] ; then\n    echo \"...no instances found\"\n    continue\n  fi\n\n  if ! govc metric.info \"$obj\" 2>/dev/null ; then\n    echo \"...N/A\" # Datacenter, Datastore on ESX for example\n    continue\n  fi\n\n  govc metric.ls \"$obj\" | xargs govc metric.sample -n 5 \"$obj\"\ndone\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/network.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"network dvs backing\" {\n  vcsim_env\n\n  # DVS backed network by default (from vcsim_env)\n  vm=$(new_empty_vm)\n\n  eth0=$(govc device.ls -vm $vm | grep ethernet- | awk '{print $1}')\n  run govc device.info -vm $vm $eth0\n  assert_success\n\n  summary=$(govc device.info -vm $vm $eth0 | grep Summary: | awk '{print $2}')\n  assert_equal \"DVSwitch:\" $summary\n\n  run govc device.remove -vm $vm $eth0\n  assert_success\n\n  eth0=$(govc device.ls -vm $vm | grep ethernet- | awk '{print $1}')\n  [ -z \"$eth0\" ]\n\n  # Standard network backing\n  run govc vm.network.add -vm $vm -net \"VM Network\"\n  assert_success\n\n  eth0=$(govc device.ls -vm $vm | grep ethernet- | awk '{print $1}')\n\n  run govc device.info -vm $vm $eth0\n  assert_success\n\n  summary=$(govc device.info -vm $vm $eth0 | grep Summary: | awk -F: '{print $2}')\n  assert_equal \"VM Network\" $(collapse_ws $summary)\n\n  run govc device.remove -vm $vm $eth0\n  assert_success\n\n  run govc device.remove -vm $vm $eth0\n  assert_failure \"govc: device '$eth0' not found\"\n}\n\n@test \"network change backing\" {\n  vcsim_env\n\n  vm=$(new_empty_vm)\n\n  eth0=$(govc device.ls -vm $vm | grep ethernet- | awk '{print $1}')\n  run govc vm.network.change -vm $vm $eth0 enoent\n  assert_failure \"govc: network 'enoent' not found\"\n\n  run govc vm.network.change -vm $vm enoent \"VM Network\"\n  assert_failure \"govc: device 'enoent' not found\"\n\n  run govc vm.network.change -vm $vm $eth0 \"VM Network\"\n  assert_success\n\n  run govc vm.network.change -vm $vm $eth0\n  assert_success\n\n  unset GOVC_NETWORK\n  run govc vm.network.change -vm $vm $eth0\n  assert_failure \"govc: default network resolves to multiple instances, please specify\"\n\n  run govc vm.power -on $vm\n  assert_success\n  run govc vm.power -off $vm\n\n  mac=$(vm_mac $vm)\n  run govc vm.network.change -vm $vm -net \"VM Network\" $eth0\n  assert_success\n\n  # verify we didn't change the mac address\n  run govc vm.power -on $vm\n  assert_success\n  assert_equal $mac $(vm_mac $vm)\n}\n\n@test \"network standard backing\" {\n  vm=$(new_empty_vm)\n\n  run govc device.info -vm $vm ethernet-0\n  assert_success\n\n  run govc device.remove -vm $vm ethernet-0\n  assert_success\n\n  run govc device.info -vm $vm ethernet-0\n  assert_failure\n\n  run govc vm.network.add -vm $vm enoent\n  assert_failure \"govc: network 'enoent' not found\"\n\n  run govc vm.network.add -vm $vm \"VM Network\"\n  assert_success\n\n  run govc device.info -vm $vm ethernet-0\n  assert_success\n}\n\n@test \"network adapter\" {\n  vm=$(new_id)\n  run govc vm.create -on=false -net.adapter=enoent $vm\n  assert_failure \"govc: unknown ethernet card type 'enoent'\"\n\n  vm=$(new_id)\n  run govc vm.create -on=false -net.adapter=vmxnet3 $vm\n  assert_success\n\n  eth0=$(govc device.ls -vm $vm | grep ethernet- | awk '{print $1}')\n  type=$(govc device.info -vm $vm $eth0 | grep Type: | awk -F: '{print $2}')\n  assert_equal \"VirtualVmxnet3\" $(collapse_ws $type)\n\n  run govc vm.network.add -vm $vm -net.adapter e1000e \"VM Network\"\n  assert_success\n\n  eth1=$(govc device.ls -vm $vm | grep ethernet- | grep -v $eth0 | awk '{print $1}')\n  type=$(govc device.info -vm $vm $eth1 | grep Type: | awk -F: '{print $2}')\n  assert_equal \"VirtualE1000e\" $(collapse_ws $type)\n}\n\n@test \"network flag required\" {\n  vcsim_env\n\n  # -net flag is required when there are multiple networks\n  unset GOVC_NETWORK\n  run govc vm.create -on=false $(new_id)\n  assert_failure \"govc: default network resolves to multiple instances, please specify\"\n}\n\n@test \"network change hardware address\" {\n  mac=\"00:00:0f$(dd bs=1 count=3 if=/dev/random 2>/dev/null | hexdump -v -e '/1 \":%02x\"')\"\n  vm=$(new_id)\n  run govc vm.create -on=false $vm\n  assert_success\n\n  run govc vm.network.change -vm $vm -net.address $mac ethernet-0\n  assert_success\n\n  run govc vm.power -on $vm\n  assert_success\n\n  assert_equal $mac $(vm_mac $vm)\n}\n\n@test \"dvs.portgroup\" {\n  vcsim_env\n  id=$(new_id)\n\n  run govc dvs.create \"$id\"\n  assert_success\n\n  run govc dvs.portgroup.add -dvs \"$id\" -type earlyBinding -nports 16 \"${id}-ExternalNetwork\"\n  assert_success\n\n  run govc dvs.portgroup.add -dvs \"$id\" -type ephemeral -vlan 3122 \"${id}-InternalNetwork\"\n  assert_success\n\n  info=$(govc dvs.portgroup.info \"$id\" | grep VlanId: | uniq | grep 3122)\n  [ -n \"$info\" ]\n\n  run govc dvs.portgroup.change -vlan 3123 \"${id}-InternalNetwork\"\n  assert_success\n\n  info=$(govc dvs.portgroup.info \"$id\" | grep VlanId: | uniq | grep 3123)\n  [ -n \"$info\" ]\n\n  run govc object.destroy \"network/${id}-ExternalNetwork\" \"network/${id}-InternalNetwork\" \"network/${id}\"\n  assert_success\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/object.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"object.destroy\" {\n    run govc object.destroy \"/enoent\"\n    assert_failure\n\n    run govc object.destroy\n    assert_failure\n\n    vm=$(new_id)\n    run govc vm.create \"$vm\"\n    assert_success\n\n    # fails when powered on\n    run govc object.destroy \"vm/$vm\"\n    assert_failure\n\n    run govc vm.power -off \"$vm\"\n    assert_success\n\n    run govc object.destroy \"vm/$vm\"\n    assert_success\n}\n\n@test \"object.rename\" {\n  run govc object.rename \"/enoent\" \"nope\"\n  assert_failure\n\n  vm=$(new_id)\n  run govc vm.create -on=false \"$vm\"\n  assert_success\n\n  run govc object.rename \"vm/$vm\" \"${vm}-renamed\"\n  assert_success\n\n  run govc object.rename \"vm/$vm\" \"${vm}-renamed\"\n  assert_failure\n\n  run govc object.destroy \"vm/${vm}-renamed\"\n  assert_success\n}\n\n@test \"object.mv\" {\n  vcsim_env\n\n  folder=$(new_id)\n\n  run govc folder.create \"vm/$folder\"\n  assert_success\n\n  for _ in $(seq 1 3) ; do\n    vm=$(new_id)\n    run govc vm.create -folder \"$folder\" \"$vm\"\n    assert_success\n  done\n\n  result=$(govc ls \"vm/$folder\" | wc -l)\n  [ \"$result\" -eq \"3\" ]\n\n  run govc folder.create \"vm/${folder}-2\"\n  assert_success\n\n  run govc object.mv \"vm/$folder/*\" \"vm/${folder}-2\"\n  assert_success\n\n  result=$(govc ls \"vm/${folder}-2\" | wc -l)\n  [ \"$result\" -eq \"3\" ]\n\n  result=$(govc ls \"vm/$folder\" | wc -l)\n  [ \"$result\" -eq \"0\" ]\n}\n\n@test \"object.collect\" {\n  run govc object.collect\n  assert_success\n\n  run govc object.collect -json\n  assert_success\n\n  run govc object.collect -\n  assert_success\n\n  run govc object.collect -json -\n  assert_success\n\n  run govc object.collect - content\n  assert_success\n\n  run govc object.collect -json - content\n  assert_success\n\n  root=$(govc object.collect - content | grep content.rootFolder | awk '{print $3}')\n\n  dc=$(govc object.collect \"$root\" childEntity | awk '{print $3}' | cut -d, -f1)\n\n  hostFolder=$(govc object.collect \"$dc\" hostFolder | awk '{print $3}')\n\n  cr=$(govc object.collect \"$hostFolder\" childEntity | awk '{print $3}' | cut -d, -f1)\n\n  host=$(govc object.collect \"$cr\" host | awk '{print $3}' | cut -d, -f1)\n\n  run govc object.collect \"$host\"\n  assert_success\n\n  run govc object.collect \"$host\" hardware\n  assert_success\n\n  run govc object.collect \"$host\" hardware.systemInfo\n  assert_success\n\n  uuid=$(govc object.collect \"$host\" hardware.systemInfo.uuid | awk '{print $3}')\n  uuid_s=$(govc object.collect -s \"$host\" hardware.systemInfo.uuid)\n  assert_equal \"$uuid\" \"$uuid_s\"\n\n  run govc object.collect \"$(govc ls host | head -n1)\"\n  assert_success\n\n  # test against slice of interface\n  perfman=$(govc object.collect -s - content.perfManager)\n  result=$(govc object.collect -s \"$perfman\" description.counterType)\n  assert_equal \"...\" \"$result\"\n\n  # test against an interface field\n  run govc object.collect '/ha-datacenter/network/VM Network' summary\n  assert_success\n}\n\n@test \"object.find\" {\n  unset GOVC_DATACENTER\n\n  run govc find \"/enoent\"\n  assert_failure\n\n  run govc find\n  assert_success\n\n  run govc find .\n  assert_success\n\n  run govc find /\n  assert_success\n\n  run govc find . -type HostSystem\n  assert_success\n\n  dc=$(govc find / -type Datacenter | head -1)\n\n  run govc find \"$dc\" -maxdepth 0\n  assert_output \"$dc\"\n\n  run govc find \"$dc/vm\" -maxdepth 0\n  assert_output \"$dc/vm\"\n\n  run govc find \"$dc\" -maxdepth 1 -type Folder\n  assert_success\n  # /<datacenter>/{vm,network,host,datastore}\n  [ ${#lines[@]} -eq 4 ]\n\n  folder=$(govc find -type Folder -name vm)\n\n  vm=$(new_empty_vm)\n\n  run govc find . -name \"$vm\"\n  assert_output \"$folder/$vm\"\n\n  run govc find \"$folder\" -name \"$vm\"\n  assert_output \"$folder/$vm\"\n\n  # moref for VM Network\n  net=$(govc find -i network -name \"$GOVC_NETWORK\")\n\n  # $vm.network.contains($net) == true\n  run govc find . -type m -name \"$vm\" -network \"$net\"\n  assert_output \"$folder/$vm\"\n\n  # remove network reference\n  run govc device.remove -vm \"$vm\" ethernet-0\n  assert_success\n\n  # $vm.network.contains($net) == false\n  run govc find . -type VirtualMachine -name \"$vm\" -network \"$net\"\n  assert_output \"\"\n\n  run govc find \"$folder\" -type VirtualMachine -name \"govc-test-*\" -runtime.powerState poweredOn\n  assert_output \"\"\n\n  run govc find \"$folder\" -type VirtualMachine -name \"govc-test-*\" -runtime.powerState poweredOff\n  assert_output \"$folder/$vm\"\n\n  run govc vm.power -on \"$vm\"\n  assert_success\n\n  run govc find \"$folder\" -type VirtualMachine -name \"govc-test-*\" -runtime.powerState poweredOff\n  assert_output \"\"\n\n  run govc find \"$folder\" -type VirtualMachine -name \"govc-test-*\" -runtime.powerState poweredOn\n  assert_output \"$folder/$vm\"\n\n  # output paths should be relative to \".\" in these cases\n  export GOVC_DATACENTER=$dc\n\n  folder=\"./vm\"\n\n  run govc find . -name \"$vm\"\n  assert_output \"$folder/$vm\"\n\n  run govc find \"$folder\" -name \"$vm\"\n}\n\n@test \"object.method\" {\n  vcsim_env\n\n  vm=$(govc find vm -type m | head -1)\n\n  run govc object.method -enable=false -name NoSuchMethod \"$vm\"\n  assert_failure\n\n  run govc object.method -enable=false -name Destroy_Task enoent\n  assert_failure\n\n  run govc object.collect -s \"$vm\" disabledMethod\n  ! assert_matches \"Destroy_Task\" \"$output\"\n\n  run govc object.method -enable=false -name Destroy_Task \"$vm\"\n  assert_success\n\n  run govc object.collect -s \"$vm\" disabledMethod\n  assert_matches \"Destroy_Task\" \"$output\"\n\n  run govc object.method -enable -name Destroy_Task \"$vm\"\n  assert_success\n\n  run govc object.collect -s \"$vm\" disabledMethod\n  ! assert_matches \"Destroy_Task\" \"$output\"\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/pool.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"pool.create\" {\n  path=\"*/Resources/$(new_id)/$(new_id)\"\n  run govc pool.create $path\n  assert_failure\n  assert_line \"govc: cannot create resource pool '$(basename ${path})': parent not found\"\n\n  id=$(new_id)\n  path=\"*/Resources/$id\"\n  run govc pool.create -cpu.shares low -mem.reservation 500 $path\n  assert_success\n\n  run govc pool.info $path\n  assert_success\n\n  assert_line \"Name: $id\"\n  assert_line \"CPU Shares: low\"\n  assert_line \"Mem Reservation: 500MB (expandable=true)\"\n\n  run govc pool.destroy $path\n  assert_success\n}\n\n@test \"pool.create multiple\" {\n  id=$(new_id)\n  path=\"*/Resources/$id\"\n  govc pool.create $path\n\n  # Finder.List($path)\n  run govc pool.info \"$path\"\n  assert_success\n\n  # Finder.Find($name)\n  run govc pool.info \"$id\"\n  assert_success\n\n  # Create multiple parent pools with multiple arguments (without globbing)\n  run govc pool.create $path/a $path/b\n  assert_success\n  result=$(govc ls \"host/$path/*\" | wc -l)\n  [ $result -eq 2 ]\n\n  # Create multiple child pools with one argument (with globbing)\n  run govc pool.create $path/*/{a,b}\n  assert_success\n  result=$(govc ls \"host/$path/*/*\" | wc -l)\n  [ $result -eq 4 ]\n\n  # Clean up\n  run govc pool.destroy $path/*/* $path/* $path\n  assert_success\n}\n\n@test \"pool.change\" {\n  id=$(new_id)\n  path=\"*/Resources/$id\"\n  govc pool.create $path\n\n  run govc pool.change -mem.shares high $path\n  assert_success\n  run govc pool.info $path\n  assert_success\n  assert_line \"Mem Shares: high\"\n  assert_line \"CPU Shares: normal\"\n\n  nid=$(new_id)\n  run govc pool.change -name $nid $path\n  assert_success\n  path=\"*/Resources/$nid\"\n\n  run govc pool.info $path\n  assert_success\n  assert_line \"Name: $nid\"\n\n  run govc pool.destroy $path\n  assert_success\n}\n\n@test \"pool.change multiple\" {\n  id=$(new_id)\n  path=\"*/Resources/$id\"\n  govc pool.create $path\n\n  # Create some nested pools so that we can test changing multiple in one call\n  govc pool.create $path/{a,b} $path/{a,b}/test\n\n  # Test precondition\n  run govc pool.info $path/a/test\n  assert_success\n  assert_line \"Name: test\"\n  run govc pool.info $path/b/test\n  assert_success\n  assert_line \"Name: test\"\n\n  # Change name of both test pools\n  run govc pool.change -name hello $path/*/test\n  assert_success\n\n  # Test postcondition\n  run govc pool.info $path/a/hello\n  assert_success\n  assert_line \"Name: hello\"\n  run govc pool.info $path/b/hello\n  assert_success\n  assert_line \"Name: hello\"\n\n  # Clean up\n  govc pool.destroy $path/a/hello\n  govc pool.destroy $path/a\n  govc pool.destroy $path/b/hello\n  govc pool.destroy $path/b\n  govc pool.destroy $path\n}\n\n@test \"pool.destroy\" {\n  id=$(new_id)\n\n  # parent pool\n  path=\"*/Resources/$id\"\n  run govc pool.create $path\n  assert_success\n\n  result=$(govc ls \"host/$path/*\" | wc -l)\n  [ $result -eq 0 ]\n\n  # child pools\n  id1=$(new_id)\n  run govc pool.create $path/$id1\n  assert_success\n\n  id2=$(new_id)\n  run govc pool.create $path/$id2\n  assert_success\n\n  # 2 child pools\n  result=$(govc ls \"host/$path/*\" | wc -l)\n  [ $result -eq 2 ]\n\n  # 1 parent pool\n  result=$(govc ls \"host/$path\" | wc -l)\n  [ $result -eq 1 ]\n\n  run govc pool.destroy $path\n  assert_success\n\n  # no more parent pool\n  result=$(govc ls \"host/$path\" | wc -l)\n  [ $result -eq 0 ]\n\n  # the child pools are not present anymore\n  # the only place they could pop into is the parent pool\n\n  # first child pool\n  result=$(govc ls \"host/*/Resources/$id1\" | wc -l)\n  [ $result -eq 0 ]\n\n  # second child pool\n  result=$(govc ls \"host/*/Resources/$id2\" | wc -l)\n  [ $result -eq 0 ]\n}\n\n@test \"pool.destroy children\" {\n  id=$(new_id)\n\n  # parent pool\n  path=\"*/Resources/$id\"\n  run govc pool.create $path\n  assert_success\n\n  result=$(govc ls \"host/$path/*\" | wc -l)\n  [ $result -eq 0 ]\n\n  # child pools\n  run govc pool.create $path/$(new_id)\n  assert_success\n\n  run govc pool.create $path/$(new_id)\n  assert_success\n\n  # 2 child pools\n  result=$(govc ls \"host/$path/*\" | wc -l)\n  [ $result -eq 2 ]\n\n  # 1 parent pool\n  result=$(govc ls \"host/*/Resources/govc-test-*\" | wc -l)\n  [ $result -eq 1 ]\n\n  # delete childs\n  run govc pool.destroy -children $path\n  assert_success\n\n  # no more child pools\n  result=$(govc ls \"host/$path/*\" | wc -l)\n  [ $result -eq 0 ]\n\n  # cleanup\n  run govc pool.destroy $path\n  assert_success\n\n  # cleanup check\n  result=$(govc ls \"host/$path\" | wc -l)\n  [ $result -eq 0 ]\n}\n\n@test \"pool.destroy multiple\" {\n  id=$(new_id)\n  path=\"*/Resources/$id\"\n  govc pool.create $path\n\n  # Create some nested pools so that we can test destroying multiple in one call\n  govc pool.create $path/{a,b}\n\n  # Test precondition\n  result=$(govc ls \"host/$path/*\" | wc -l)\n  [ $result -eq 2 ]\n\n  # Destroy both pools\n  run govc pool.destroy $path/{a,b}\n  assert_success\n\n  # Test postcondition\n  result=$(govc ls \"host/$path/*\" | wc -l)\n  [ $result -eq 0 ]\n\n  # Clean up\n  govc pool.destroy $path\n}\n\n@test \"vm.create -pool\" {\n  # test with full inventory path to pools\n  parent_path=$(govc ls 'host/*/Resources')\n  parent_name=$(basename $parent_path)\n  [ \"$parent_name\" = \"Resources\" ]\n\n  child_name=$(new_id)\n  child_path=\"$parent_path/$child_name\"\n\n  grand_child_name=$(new_id)\n  grand_child_path=\"$child_path/$grand_child_name\"\n\n  run govc pool.create $parent_path/$child_name{,/$grand_child_name}\n  assert_success\n\n  for path in $parent_path $child_path $grand_child_path\n  do\n    run govc vm.create -pool $path $(new_id)\n    assert_success\n  done\n\n  run govc pool.change -mem.limit 100 -mem.expandable=false $child_path\n  assert_failure\n\n  run govc pool.change -mem.limit 100 $child_path\n  assert_success\n\n  run govc pool.change -mem.limit 120 -mem.expandable $child_path\n  assert_success\n\n  # test with glob inventory path to pools\n  parent_path=\"*/$parent_name\"\n  child_path=\"$parent_path/$child_name\"\n  grand_child_path=\"$child_path/$grand_child_name\"\n\n  for path in $grand_child_path $child_path\n  do\n    run govc pool.destroy $path\n    assert_success\n  done\n}\n\n@test \"vm.create -pool host\" {\n  id=$(new_id)\n\n  path=$(govc ls host)\n\n  run govc vm.create -on=false -pool enoent $id\n  assert_failure \"govc: resource pool 'enoent' not found\"\n\n  run govc vm.create -on=false -pool $path $id\n  assert_success\n}\n\n@test \"vm.create -pool cluster\" {\n  vcsim_env\n\n  id=$(new_id)\n\n  path=$(dirname $GOVC_HOST)\n\n  unset GOVC_HOST\n  unset GOVC_RESOURCE_POOL\n\n  run govc vm.create -on=false -pool enoent $id\n  assert_failure \"govc: resource pool 'enoent' not found\"\n\n  run govc vm.create -on=false -pool $path $id\n  assert_success\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/role.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"permissions.ls\" {\n  run govc permissions.ls\n  assert_success\n\n  run govc permissions.ls -json\n  assert_success\n}\n\n@test \"role.ls\" {\n  run govc role.ls\n  assert_success\n\n  run govc role.ls -json\n  assert_success\n\n  run govc role.ls Admin\n  assert_success\n\n  run govc role.ls -json Admin\n  assert_success\n\n  run govc role.ls enoent\n  assert_failure\n}\n\n@test \"role.usage\" {\n  run govc role.usage\n  assert_success\n\n  run govc role.usage -json\n  assert_success\n\n  run govc role.usage Admin\n  assert_success\n\n  run govc role.usage -json Admin\n  assert_success\n\n  run govc role.usage enoent\n  assert_failure\n}\n\n@test \"role.create\" {\n  id=$(new_id)\n  run govc role.create \"$id\"\n  assert_success\n\n  run govc role.ls \"$id\"\n  assert_success\n\n  priv=$(govc role.ls \"$id\" | wc -l)\n  vm_priv=($(govc role.ls Admin | grep VirtualMachine.))\n\n  # Test set\n  run govc role.update \"$id\" \"${vm_priv[@]}\"\n  assert_success\n\n  npriv=$(govc role.ls \"$id\" | wc -l)\n  [ \"$npriv\" -gt \"$priv\" ]\n  priv=$npriv\n\n  op_priv=($(govc role.ls \"$id\" | grep VirtualMachine.GuestOperations.))\n  # Test remove\n  run govc role.update -r \"$id\" \"${op_priv[@]}\"\n  assert_success\n\n  npriv=$(govc role.ls \"$id\" | wc -l)\n  [ \"$npriv\" -lt \"$priv\" ]\n  priv=$npriv\n\n  # Test add\n  run govc role.update -a \"$id\" \"${op_priv[@]}\"\n  assert_success\n\n  npriv=$(govc role.ls \"$id\" | wc -l)\n  [ \"$npriv\" -gt \"$priv\" ]\n  priv=$npriv\n\n  # Test rename\n  run govc role.update -name \"${id}-N\" \"$id\"\n  assert_success\n\n  id=\"${id}-N\"\n  # Test we didn't drop any privileges during rename\n  [ \"$priv\" -eq \"$(govc role.ls \"$id\" | wc -l)\" ]\n\n  run govc role.remove \"${id}\"\n  assert_success\n\n  run govc role.ls \"$id\"\n  assert_failure\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/session.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"session.ls\" {\n  run govc session.ls\n  assert_success\n\n  run govc session.ls -json\n  assert_success\n\n  # Test User-Agent\n  govc session.ls | grep \"$(govc version | tr ' ' /)\"\n}\n\n@test \"session.rm\" {\n  run govc session.rm enoent\n  assert_failure\n  assert_output \"govc: ServerFaultCode: The object or item referred to could not be found.\"\n\n  # Can't remove the current session\n  id=$(govc session.ls -json | jq -r .CurrentSession.Key)\n  run govc session.rm \"$id\"\n  assert_failure\n\n  thumbprint=$(govc about.cert -thumbprint)\n  # persist session just to avoid the Logout() so we can session.rm below\n  dir=$(mktemp -d govc-test-XXXXX)\n\n  id=$(GOVMOMI_HOME=\"$dir\" govc session.ls -json -k=false -persist-session -tls-known-hosts <(echo \"$thumbprint\") | jq -r .CurrentSession.Key)\n\n  rm -rf \"$dir\"\n\n  run govc session.rm \"$id\"\n  assert_success\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/test_helper.bash",
    "content": "# set the following variables only if they've not been set\nGOVC_TEST_URL=${GOVC_TEST_URL-\"https://root:vagrant@localhost:18443/sdk\"}\nexport GOVC_URL=$GOVC_TEST_URL\nexport GOVC_DATASTORE=${GOVC_DATASTORE-datastore1}\nexport GOVC_NETWORK=${GOVC_NETWORK-\"VM Network\"}\n\nexport GOVC_INSECURE=true\nexport GOVC_PERSIST_SESSION=false\nunset GOVC_DEBUG\nunset GOVC_TLS_KNOWN_HOSTS\nunset GOVC_DATACENTER\nunset GOVC_HOST\nunset GOVC_USERNAME\nunset GOVC_PASSWORD\n\nif [ -z \"$BATS_TEST_DIRNAME\" ]; then\n  BATS_TEST_DIRNAME=$(dirname ${BASH_SOURCE})\nfi\n\n# gnu core utils\nreadlink=$(type -p greadlink readlink | head -1)\nxargs=$(type -p gxargs xargs | head -1)\nmktemp=$(type -p gmktemp mktemp | head -1)\n\nBATS_TEST_DIRNAME=$($readlink -nf $BATS_TEST_DIRNAME)\n\nGOVC_IMAGES=$BATS_TEST_DIRNAME/images\nTTYLINUX_NAME=ttylinux-pc_i486-16.1\n\nGOVC_TEST_VMDK_SRC=$GOVC_IMAGES/${TTYLINUX_NAME}-disk1.vmdk\nGOVC_TEST_VMDK=govc-images/$(basename $GOVC_TEST_VMDK_SRC)\n\nGOVC_TEST_ISO_SRC=$GOVC_IMAGES/${TTYLINUX_NAME}.iso\nGOVC_TEST_ISO=govc-images/$(basename $GOVC_TEST_ISO_SRC)\n\nGOVC_TEST_IMG_SRC=$GOVC_IMAGES/floppybird.img\nGOVC_TEST_IMG=govc-images/$(basename $GOVC_TEST_IMG_SRC)\n\nPATH=\"$(dirname $BATS_TEST_DIRNAME):$PATH\"\n\nteardown() {\n  govc ls vm | grep govc-test- | $xargs -r govc vm.destroy\n  govc datastore.ls | grep govc-test- | awk '{print ($NF)}' | $xargs -n1 -r govc datastore.rm\n  govc ls \"host/*/Resources/govc-test-*\" | $xargs -r govc pool.destroy\n}\n\nnew_id() {\n  echo \"govc-test-$(uuidgen)\"\n}\n\nimport_ttylinux_vmdk() {\n  govc datastore.mkdir -p govc-images\n  govc datastore.ls \"$GOVC_TEST_VMDK\" >/dev/null 2>&1 || \\\n    govc import.vmdk \"$GOVC_TEST_VMDK_SRC\" govc-images > /dev/null\n}\n\ndatastore_upload() {\n  src=$1\n  dst=govc-images/$(basename $src)\n\n  govc datastore.mkdir -p govc-images\n  govc datastore.ls \"$dst\" >/dev/null 2>&1 || \\\n    govc datastore.upload \"$src\" \"$dst\" > /dev/null\n}\n\nupload_img() {\n  datastore_upload $GOVC_TEST_IMG_SRC\n}\n\nupload_iso() {\n  datastore_upload $GOVC_TEST_ISO_SRC\n}\n\nnew_ttylinux_vm() {\n  import_ttylinux_vmdk # TODO: make this part of vagrant provision\n  id=$(new_id)\n  govc vm.create -m 32 -disk $GOVC_TEST_VMDK -disk.controller ide -on=false $id\n  echo $id\n}\n\nnew_empty_vm() {\n  id=$(new_id)\n  govc vm.create -on=false $id\n  echo $id\n}\n\nvm_power_state() {\n  govc vm.info \"$1\" | grep \"Power state:\" | awk -F: '{print $2}' | collapse_ws\n}\n\nvm_mac() {\n  govc device.info -vm \"$1\" ethernet-0 | grep \"MAC Address\" | awk '{print $NF}'\n}\n\n# exports an environment for using vcsim if running, otherwise skips the calling test.\nvcsim_env() {\n  if [ \"$(uname)\" == \"Darwin\" ]; then\n    PATH=\"/Applications/VMware Fusion.app/Contents/Library:$PATH\"\n  fi\n\n  if [ \"$(vmrun list | grep $BATS_TEST_DIRNAME/vcsim | wc -l)\" -eq 1 ]; then\n    export GOVC_URL=https://root:vmware@localhost:16443/sdk \\\n           GOVC_DATACENTER=DC0 \\\n           GOVC_DATASTORE=GlobalDS_0 \\\n           GOVC_HOST=/DC0/host/DC0_C0/DC0_C0_H0 \\\n           GOVC_RESOURCE_POOL=/DC0/host/DC0_C0/Resources \\\n           GOVC_NETWORK=/DC0/network/DC0_DVPG0\n  else\n    skip \"requires vcsim\"\n  fi\n}\n\n# remove username/password from $GOVC_URL and set $GOVC_{USERNAME,PASSWORD}\ngovc_url_to_vars() {\n  GOVC_USERNAME=\"$(govc env GOVC_USERNAME)\"\n  GOVC_PASSWORD=\"$(govc env GOVC_PASSWORD)\"\n  GOVC_URL=\"$(govc env GOVC_URL)\"\n  export GOVC_URL GOVC_USERNAME GOVC_PASSWORD\n\n  # double check that we removed user/pass\n  grep -q -v @ <<<\"$GOVC_URL\"\n}\n\nquit_vnc() {\n  if [ \"$(uname)\" = \"Darwin\" ]; then\n    osascript <<EOF\ntell application \"Screen Sharing\"\n   quit\nend tell\nEOF\n  fi\n}\n\nopen_vnc() {\n  url=$1\n  echo \"open $url\"\n\n  if [ \"$(uname)\" = \"Darwin\" ]; then\n    open $url\n  fi\n}\n\n# collapse spaces, for example testing against Go's tabwriter output\ncollapse_ws() {\n  local line\n  if [ $# -eq 0 ]; then line=\"$(cat -)\"\n  else line=\"$@\"\n  fi\n  echo \"$line\" | tr -s ' ' | sed -e 's/^ //'\n}\n\n# the following helpers are borrowed from the test_helper.bash in https://github.com/sstephenson/rbenv\n\nflunk() {\n  { if [ \"$#\" -eq 0 ]; then cat -\n    else echo \"$@\"\n    fi\n  } >&2\n  return 1\n}\n\nassert_success() {\n  if [ \"$status\" -ne 0 ]; then\n    flunk \"command failed with exit status $status: $output\"\n  elif [ \"$#\" -gt 0 ]; then\n    assert_output \"$1\"\n  fi\n}\n\nassert_failure() {\n  if [ \"$status\" -ne 1 ]; then\n    flunk $(printf \"expected failed exit status=1, got status=%d\" $status)\n  elif [ \"$#\" -gt 0 ]; then\n    assert_output \"$1\"\n  fi\n}\n\nassert_equal() {\n  if [ \"$1\" != \"$2\" ]; then\n    { echo \"expected: $1\"\n      echo \"actual:   $2\"\n    } | flunk\n  fi\n}\n\nassert_output() {\n  local expected\n  if [ $# -eq 0 ]; then expected=\"$(cat -)\"\n  else expected=\"$1\"\n  fi\n  assert_equal \"$expected\" \"$output\"\n}\n\nassert_matches() {\n  local pattern=\"${1}\"\n  local actual=\"${2}\"\n\n  if [ $# -eq 1 ]; then\n    actual=\"$(cat -)\"\n  fi\n\n  if ! grep -q \"${pattern}\" <<<\"${actual}\"; then\n    { echo \"pattern: ${pattern}\"\n      echo \"actual:  ${actual}\"\n    } | flunk\n  fi\n}\n\nassert_empty() {\n  local actual=\"${1}\"\n\n  if [ $# -eq 0 ]; then\n    actual=\"$(cat -)\"\n  fi\n\n  if [ -n \"${actual}\" ]; then\n    { echo \"actual: ${actual}\"\n    } | flunk\n  fi\n}\n\nassert_line() {\n  if [ \"$1\" -ge 0 ] 2>/dev/null; then\n    assert_equal \"$2\" \"$(collapse_ws ${lines[$1]})\"\n  else\n    local line\n    for line in \"${lines[@]}\"; do\n      if [ \"$(collapse_ws $line)\" = \"$1\" ]; then return 0; fi\n    done\n    flunk \"expected line \\`$1'\"\n  fi\n}\n\nrefute_line() {\n  if [ \"$1\" -ge 0 ] 2>/dev/null; then\n    local num_lines=\"${#lines[@]}\"\n    if [ \"$1\" -lt \"$num_lines\" ]; then\n      flunk \"output has $num_lines lines\"\n    fi\n  else\n    local line\n    for line in \"${lines[@]}\"; do\n      if [ \"$line\" = \"$1\" ]; then\n        flunk \"expected to not find line \\`$line'\"\n      fi\n    done\n  fi\n}\n\nassert() {\n  if ! \"$@\"; then\n    flunk \"failed: $@\"\n  fi\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/vcsim/Vagrantfile",
    "content": "# -*- mode: ruby -*-\n# vi: set ft=ruby :\n\n# VCSA with VC simulator enabled.\n# 'vagrant provision' will erase any existing inventory and\n# populate with the config in ./provision.sh\n\nVagrant.configure(\"2\") do |config|\n  config.vm.hostname = \"vcsim\"\n\n  config.vm.box = \"vcsa\"\n  config.vm.synced_folder \".\", \"/vagrant\", disabled: true\n\n  config.vm.provision \"shell\", path: \"provision.sh\"\n\n  config.vm.network \"forwarded_port\", guest: 443, host: 16443\n  config.vm.network \"forwarded_port\", guest: 80, host: 16080\n\n  [:vmware_fusion, :vmware_workstation].each do |name|\n    config.vm.provider name do |v,override|\n      v.vmx[\"memsize\"] = \"4096\"\n    end\n  end\nend\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/vcsim/provision.sh",
    "content": "PATH=$PATH:/sbin:/usr/sbin:/usr/local/sbin\n\necho \"Creating vcsim model...\"\ncat > /etc/vmware-vpx/vcsim/model/initInventory-govc.cfg <<EOF\n<config>\n  <inventory>\n    <dc>2</dc>\n    <host-per-dc>3</host-per-dc>\n    <vm-per-host>3</vm-per-host>\n    <poweron-vm-per-host>2</poweron-vm-per-host>\n    <cluster-per-dc>2</cluster-per-dc>\n    <host-per-cluster>3</host-per-cluster>\n    <rp-per-cluster>3</rp-per-cluster>\n    <vm-per-rp>4</vm-per-rp>\n    <poweron-vm-per-rp>3</poweron-vm-per-rp>\n    <dv-portgroups>2</dv-portgroups>\n  </inventory>\n</config>\nEOF\n\ncat > /etc/vmware-vpx/vcsim/model/vcsim-default.cfg <<EOF\n<simulator>\n<enabled>true</enabled>\n<initInventory>vcsim/model/initInventory-govc.cfg</initInventory>\n</simulator>\nEOF\n\necho \"Starting VC simulator...\"\nvmware-vcsim-stop\nvmware-vcsim-start default\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/test/vm.bats",
    "content": "#!/usr/bin/env bats\n\nload test_helper\n\n@test \"vm.ip\" {\n  id=$(new_ttylinux_vm)\n\n  run govc vm.power -on $id\n  assert_success\n\n  run govc vm.ip $id\n  assert_success\n\n  run govc vm.ip -a -v4 $id\n  assert_success\n\n  run govc vm.ip -n $(vm_mac $id) $id\n  assert_success\n\n  run govc vm.ip -n ethernet-0 $id\n  assert_success\n\n  ip=$(govc vm.ip $id)\n\n  # add a second nic\n  run govc vm.network.add -vm $id \"VM Network\"\n  assert_success\n\n  res=$(govc vm.ip -n ethernet-0 $id)\n  assert_equal $ip $res\n}\n\n@test \"vm.ip -esxcli\" {\n  ok=$(govc host.esxcli system settings advanced list -o /Net/GuestIPHack | grep ^IntValue: | awk '{print $2}')\n  if [ \"$ok\" != \"1\" ] ; then\n    skip \"/Net/GuestIPHack=0\"\n  fi\n  id=$(new_ttylinux_vm)\n\n  run govc vm.power -on $id\n  assert_success\n\n  run govc vm.ip -esxcli $id\n  assert_success\n\n  ip_esxcli=$output\n\n  run govc vm.ip $id\n  assert_success\n  ip_tools=$output\n\n  assert_equal $ip_esxcli $ip_tools\n}\n\n@test \"vm.create\" {\n  id=$(new_ttylinux_vm)\n\n  run govc vm.power -on $id\n  assert_success\n\n  result=$(govc device.ls -vm $vm | grep disk- | wc -l)\n  [ $result -eq 0 ]\n\n  result=$(govc device.ls -vm $vm | grep cdrom- | wc -l)\n  [ $result -eq 0 ]\n}\n\n@test \"vm.change\" {\n  id=$(new_ttylinux_vm)\n\n  run govc vm.change -g ubuntu64Guest -m 1024 -c 2 -vm $id\n  assert_success\n\n  run govc vm.info $id\n  assert_success\n  assert_line \"Guest name: Ubuntu Linux (64-bit)\"\n  assert_line \"Memory: 1024MB\"\n  assert_line \"CPU: 2 vCPU(s)\"\n\n  run govc vm.change -e \"guestinfo.a=1\" -e \"guestinfo.b=2\" -vm $id\n  assert_success\n\n  run govc vm.info -e $id\n  assert_success\n  assert_line \"guestinfo.a: 1\"\n  assert_line \"guestinfo.b: 2\"\n\n  run govc vm.change -sync-time-with-host=false -vm $id\n  assert_success\n\n  run govc vm.info -t $id\n  assert_success\n  assert_line \"SyncTimeWithHost: false\"\n\n  run govc vm.change -sync-time-with-host=true -vm $id\n  assert_success\n\n  run govc vm.info -t $id\n  assert_success\n  assert_line \"SyncTimeWithHost: true\"\n\n  nid=$(new_id)\n  run govc vm.change -name $nid -vm $id\n  assert_success\n\n  run govc vm.info $id\n  [ ${#lines[@]} -eq 0 ]\n\n  run govc vm.info $nid\n  [ ${#lines[@]} -gt 0 ]\n}\n\n@test \"vm.power\" {\n  vm=$(new_ttylinux_vm)\n\n  run vm_power_state $vm\n  assert_success \"poweredOff\"\n\n  run govc vm.power $vm\n  assert_failure\n\n  run govc vm.power -on -off $vm\n  assert_failure\n\n  run govc vm.power -on $vm\n  assert_success\n  run vm_power_state $vm\n  assert_success \"poweredOn\"\n\n  run govc vm.power -suspend $vm\n  assert_success\n  run vm_power_state $vm\n  assert_success \"suspended\"\n\n  run govc vm.power -on $vm\n  assert_success\n  run vm_power_state $vm\n  assert_success \"poweredOn\"\n}\n\n@test \"vm.power -force\" {\n  vm=$(new_id)\n  govc vm.create $vm\n\n  run govc vm.power -r $vm\n  assert_failure\n\n  run govc vm.power -r -force $vm\n  assert_success\n\n  run govc vm.power -s $vm\n  assert_failure\n\n  run govc vm.power -s -force $vm\n  assert_success\n\n  run govc vm.power -off $vm\n  assert_failure\n\n  run govc vm.power -off -force $vm\n  assert_success\n\n  run govc vm.destroy $vm\n  assert_success\n\n  run govc vm.power -off $vm\n  assert_failure\n\n  run govc vm.power -off -force $vm\n  assert_failure\n}\n\n@test \"vm.create pvscsi\" {\n  vm=$(new_id)\n  govc vm.create -on=false -disk.controller pvscsi $vm\n\n  result=$(govc device.ls -vm $vm | grep pvscsi- | wc -l)\n  [ $result -eq 1 ]\n\n  result=$(govc device.ls -vm $vm | grep lsilogic- | wc -l)\n  [ $result -eq 0 ]\n\n  vm=$(new_id)\n  govc vm.create -on=false -disk.controller pvscsi -disk=1GB $vm\n}\n\n@test \"vm.create in cluster\" {\n  vcsim_env\n\n  # using GOVC_HOST and its resource pool\n  run govc vm.create -on=false $(new_id)\n  assert_success\n\n  # using no -host and the default resource pool for DC0\n  unset GOVC_HOST\n  run govc vm.create -on=false $(new_id)\n  assert_success\n}\n\n@test \"vm.info\" {\n  local num=3\n\n  local prefix=$(new_id)\n\n  for x in $(seq $num)\n  do\n    local id=\"${prefix}-${x}\"\n\n    # If VM is not found: No output, exit code==0\n    run govc vm.info $id\n    assert_success\n    [ ${#lines[@]} -eq 0 ]\n\n    # If VM is not found (using -json flag): Valid json output, exit code==0\n    run govc vm.info -json $id\n    assert_success\n    assert_line \"{\\\"VirtualMachines\\\":null}\"\n\n    run govc vm.create -on=false $id\n    assert_success\n\n    local info=$(govc vm.info -r $id)\n    local found=$(grep Name: <<<\"$info\" | wc -l)\n    [ \"$found\" -eq 1 ]\n\n    # test that mo names are printed\n    found=$(grep Host: <<<\"$info\" | awk '{print $2}')\n    [ -n \"$found\" ]\n    found=$(grep Storage: <<<\"$info\" | awk '{print $2}')\n    [ -n \"$found\" ]\n    found=$(grep Network: <<<\"$info\" | awk '{print $2}')\n    [ -n \"$found\" ]\n  done\n\n  # test find slice\n  local slice=$(govc vm.info ${prefix}-*)\n  local found=$(grep Name: <<<\"$slice\" | wc -l)\n  [ \"$found\" -eq $num ]\n\n  # test -r\n  found=$(grep Storage: <<<\"$slice\" | wc -l)\n  [ \"$found\" -eq 0 ]\n  found=$(grep Network: <<<\"$slice\" | wc -l)\n  [ \"$found\" -eq 0 ]\n  slice=$(govc vm.info -r ${prefix}-*)\n  found=$(grep Storage: <<<\"$slice\" | wc -l)\n  [ \"$found\" -eq $num ]\n  found=$(grep Network: <<<\"$slice\" | wc -l)\n  [ \"$found\" -eq $num ]\n\n  # test extraConfig\n  run govc vm.change -e \"guestinfo.a=2\" -vm $id\n  assert_success\n  run govc vm.info -e $id\n  assert_success\n  assert_line \"guestinfo.a: 2\"\n  run govc vm.change -e \"guestinfo.a=\" -vm $id\n  assert_success\n  refute_line \"guestinfo.a: 2\"\n}\n\n@test \"vm.create linked ide disk\" {\n  import_ttylinux_vmdk\n\n  vm=$(new_id)\n\n  run govc vm.create -disk $GOVC_TEST_VMDK -disk.controller ide -on=false $vm\n  assert_success\n\n  run govc device.info -vm $vm disk-200-0\n  assert_success\n  assert_line \"Controller: ide-200\"\n}\n\n@test \"vm.create linked scsi disk\" {\n  import_ttylinux_vmdk\n\n  vm=$(new_id)\n\n  run govc vm.create -disk enoent -on=false $vm\n  assert_failure \"govc: cannot stat '[${GOVC_DATASTORE##*/}] enoent': No such file\"\n\n  run govc vm.create -disk $GOVC_TEST_VMDK -on=false $vm\n  assert_success\n\n  run govc device.info -vm $vm disk-1000-0\n  assert_success\n  assert_line \"Controller: lsilogic-1000\"\n  assert_line \"Parent: [${GOVC_DATASTORE##*/}] $GOVC_TEST_VMDK\"\n  assert_line \"File: [${GOVC_DATASTORE##*/}] $vm/${vm}.vmdk\"\n}\n\n@test \"vm.create scsi disk\" {\n  import_ttylinux_vmdk\n\n  vm=$(new_id)\n\n  run govc vm.create -disk enoent -on=false $vm\n  assert_failure \"govc: cannot stat '[${GOVC_DATASTORE##*/}] enoent': No such file\"\n\n\n  run govc vm.create -disk $GOVC_TEST_VMDK -on=false -link=false $vm\n  assert_success\n\n  run govc device.info -vm $vm disk-1000-0\n  assert_success\n  assert_line \"Controller: lsilogic-1000\"\n  refute_line \"Parent: [${GOVC_DATASTORE##*/}] $GOVC_TEST_VMDK\"\n  assert_line \"File: [${GOVC_DATASTORE##*/}] $GOVC_TEST_VMDK\"\n}\n\n@test \"vm.create scsi disk with datastore argument\" {\n  import_ttylinux_vmdk\n\n  vm=$(new_id)\n\n  run govc vm.create -disk=\"${GOVC_TEST_VMDK}\" -disk-datastore=\"${GOVC_DATASTORE}\" -on=false -link=false $vm\n  assert_success\n\n  run govc device.info -vm $vm disk-1000-0\n  assert_success\n  assert_line \"File: [${GOVC_DATASTORE##*/}] $GOVC_TEST_VMDK\"\n}\n\n@test \"vm.create iso\" {\n  upload_iso\n\n  vm=$(new_id)\n\n  run govc vm.create -iso enoent -on=false $vm\n  assert_failure \"govc: cannot stat '[${GOVC_DATASTORE##*/}] enoent': No such file\"\n\n  run govc vm.create -iso $GOVC_TEST_ISO -on=false $vm\n  assert_success\n\n  run govc device.info -vm $vm cdrom-3000\n  assert_success\n  assert_line \"Controller: ide-200\"\n  assert_line \"Summary: ISO [${GOVC_DATASTORE##*/}] $GOVC_TEST_ISO\"\n}\n\n@test \"vm.create iso with datastore argument\" {\n  upload_iso\n\n  vm=$(new_id)\n\n  run govc vm.create -iso=\"${GOVC_TEST_ISO}\" -iso-datastore=\"${GOVC_DATASTORE}\" -on=false $vm\n  assert_success\n\n  run govc device.info -vm $vm cdrom-3000\n  assert_success\n  assert_line \"Summary: ISO [${GOVC_DATASTORE##*/}] $GOVC_TEST_ISO\"\n}\n\n@test \"vm.disk.create empty vm\" {\n  vm=$(new_empty_vm)\n\n  local name=$(new_id)\n\n  run govc vm.disk.create -vm $vm -name $name -size 1G\n  assert_success\n  result=$(govc device.ls -vm $vm | grep disk- | wc -l)\n  [ $result -eq 1 ]\n\n  name=$(new_id)\n\n  run govc vm.disk.create -vm $vm -name $name -controller lsilogic-1000 -size 2G\n  assert_success\n  result=$(govc device.ls -vm $vm | grep disk- | wc -l)\n  [ $result -eq 2 ]\n}\n\n@test \"vm.disk.create\" {\n  import_ttylinux_vmdk\n\n  vm=$(new_id)\n\n  govc vm.create -disk $GOVC_TEST_VMDK -on=false $vm\n  result=$(govc device.ls -vm $vm | grep disk- | wc -l)\n  [ $result -eq 1 ]\n\n  local name=$(new_id)\n\n  run govc vm.disk.create -vm $vm -name $name -size 1G\n  assert_success\n  result=$(govc device.ls -vm $vm | grep disk- | wc -l)\n  [ $result -eq 2 ]\n\n  run govc vm.disk.create -vm $vm -name $name -size 1G\n  assert_success # TODO: should fail?\n  result=$(govc device.ls -vm $vm | grep disk- | wc -l)\n  [ $result -eq 2 ]\n}\n\n@test \"vm.disk.attach\" {\n  import_ttylinux_vmdk\n\n  vm=$(new_id)\n\n  govc vm.create -disk $GOVC_TEST_VMDK -on=false $vm\n  result=$(govc device.ls -vm $vm | grep disk- | wc -l)\n  [ $result -eq 1 ]\n\n  run govc import.vmdk $GOVC_TEST_VMDK_SRC $vm\n  assert_success\n\n  run govc vm.disk.attach -vm $vm -link=false -disk enoent.vmdk\n  assert_failure \"govc: File [${GOVC_DATASTORE##*/}] enoent.vmdk was not found\"\n\n  run govc vm.disk.attach -vm $vm -disk enoent.vmdk\n  assert_failure \"govc: Invalid configuration for device '0'.\"\n\n  run govc vm.disk.attach -vm $vm -disk $vm/$(basename $GOVC_TEST_VMDK) -controller lsilogic-1000\n  assert_success\n  result=$(govc device.ls -vm $vm | grep disk- | wc -l)\n  [ $result -eq 2 ]\n}\n\n@test \"vm.create new disk with datastore argument\" {\n  vm=$(new_id)\n\n  run govc vm.create -disk=\"1GiB\" -ds=\"${GOVC_DATASTORE}\" -on=false -link=false $vm\n  assert_success\n\n  run govc device.info -vm $vm disk-1000-0\n  assert_success\n  assert_line \"File: [${GOVC_DATASTORE##*/}] ${vm}/${vm}.vmdk\"\n}\n\n@test \"vm.create new disk with datastore cluster argument\" {\n  if [ -z \"${GOVC_DATASTORE_CLUSTER}\" ]; then\n    skip \"requires datastore cluster\"\n  fi\n\n  vm=$(new_id)\n\n  run govc vm.create -disk=\"1GiB\" -datastore-cluster=\"${GOVC_DATASTORE_CLUSTER}\" -on=false -link=false $vm\n  assert_success\n\n  run govc device.info -vm $vm disk-1000-0\n  assert_success\n}\n\n@test \"vm.register\" {\n  run govc vm.unregister enoent\n  assert_failure\n\n  vm=$(new_empty_vm)\n\n  run govc vm.change -vm \"$vm\" -e foo=bar\n  assert_success\n\n  run govc vm.unregister \"$vm\"\n  assert_success\n\n  run govc vm.change -vm \"$vm\" -e foo=bar\n  assert_failure\n\n  run govc vm.register \"$vm/${vm}.vmx\"\n  assert_success\n\n  run govc vm.change -vm \"$vm\" -e foo=bar\n  assert_success\n}\n\n@test \"vm.clone\" {\n  vcsim_env\n  vm=$(new_empty_vm)\n  clone=$(new_id)\n\n  run govc vm.clone -vm $vm $clone\n  assert_success\n\n  result=$(govc device.ls -vm $clone | grep disk- | wc -l)\n  [ $result -eq 0 ]\n\n  result=$(govc device.ls -vm $clone | grep cdrom- | wc -l)\n  [ $result -eq 0 ]\n}\n\n@test \"vm.clone change resources\" {\n  vcsim_env\n  vm=$(new_ttylinux_vm)\n  clone=$(new_id)\n\n  run govc vm.clone -m 1024 -c 2 -vm $vm $clone\n  assert_success\n\n  run govc vm.info $clone\n  assert_success\n  assert_line \"Memory: 1024MB\"\n  assert_line \"CPU: 2 vCPU(s)\"\n}\n\n@test \"vm.clone usage\" {\n  # validate we require -vm flag\n  run govc vm.clone enoent\n  assert_failure\n}\n\n@test \"vm.migrate\" {\n  vcsim_env\n  vm=$(new_empty_vm)\n\n  # migrate from H0 to H1\n  run govc vm.migrate -host DC0_C0/DC0_C0_H1 \"$vm\"\n  assert_success\n\n  # migrate from C0 to C1\n  run govc vm.migrate -pool DC0_C1/Resources \"$vm\"\n  assert_success\n}\n\n@test \"vm.snapshot\" {\n  vm=$(new_ttylinux_vm)\n  id=$(new_id)\n\n  # No snapshots == no output\n  run govc snapshot.tree -vm \"$vm\"\n  assert_success \"\"\n\n  run govc snapshot.remove -vm \"$vm\" '*'\n  assert_success\n\n  run govc snapshot.revert -vm \"$vm\"\n  assert_failure\n\n  run govc snapshot.create -vm \"$vm\" \"$id\"\n  assert_success\n\n  run govc snapshot.revert -vm \"$vm\" enoent\n  assert_failure\n\n  run govc snapshot.revert -vm \"$vm\"\n  assert_success\n\n  run govc snapshot.remove -vm \"$vm\" \"$id\"\n  assert_success\n\n  run govc snapshot.create -vm \"$vm\" root\n  assert_success\n\n  run govc snapshot.create -vm \"$vm\" child\n  assert_success\n\n  run govc snapshot.create -vm \"$vm\" grand\n  assert_success\n\n  run govc snapshot.create -vm \"$vm\" child\n  assert_success\n\n  result=$(govc snapshot.tree -vm \"$vm\" -f | grep -c root/child/grand/child)\n  [ \"$result\" -eq 1 ]\n\n  run govc snapshot.revert -vm \"$vm\" root\n  assert_success\n\n  run govc snapshot.create -vm \"$vm\" child\n  assert_success\n\n  # 3 snapshots named \"child\"\n  result=$(govc snapshot.tree -vm \"$vm\" | grep -c child)\n  [ \"$result\" -eq 3 ]\n\n  run govc snapshot.remove -vm \"$vm\" child\n  assert_failure\n\n  # 2 snapshots with path \"root/child\"\n  result=$(govc snapshot.tree -vm \"$vm\" -f | egrep -c 'root/child$')\n  [ \"$result\" -eq 2 ]\n\n  run govc snapshot.remove -vm \"$vm\" root/child\n  assert_failure\n\n  # path is unique\n  run govc snapshot.remove -vm \"$vm\" root/child/grand/child\n  assert_success\n\n  # name is unique\n  run govc snapshot.remove -vm \"$vm\" grand\n  assert_success\n\n  result=$(govc snapshot.tree -vm \"$vm\" -f | grep root/child/grand/child | wc -l)\n  [ \"$result\" -eq 0 ]\n\n  id=$(govc snapshot.tree -vm \"$vm\" -f -i | egrep 'root/child$' | head -n1 | awk '{print $1}' | tr -d '[]')\n  # moid is unique\n  run govc snapshot.remove -vm \"$vm\" \"$id\"\n  assert_success\n\n  # now root/child is unique\n  run govc snapshot.remove -vm \"$vm\" root/child\n  assert_success\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/usage.sh",
    "content": "#!/bin/bash -e\n\nfor var in $(env | grep GOVC_) ; do\n  eval \"unset ${var/=*}\"\ndone\n\ncommon_opts=$(cat <<EOF\n  -cert=                    Certificate [GOVC_CERTIFICATE]\n  -debug=false              Store debug logs [GOVC_DEBUG]\n  -dump=false               Enable output dump\n  -json=false               Enable JSON output\n  -k=false                  Skip verification of server certificate [GOVC_INSECURE]\n  -key=                     Private key [GOVC_PRIVATE_KEY]\n  -persist-session=true     Persist session to disk [GOVC_PERSIST_SESSION]\n  -tls-ca-certs=            TLS CA certificates file [GOVC_TLS_CA_CERTS]\n  -tls-known-hosts=         TLS known hosts file [GOVC_TLS_KNOWN_HOSTS]\n  -u=                       ESX or vCenter URL [GOVC_URL]\n  -vim-namespace=urn:vim25  Vim namespace [GOVC_VIM_NAMESPACE]\n  -vim-version=6.0          Vim version [GOVC_VIM_VERSION]\n  -dc=                      Datacenter [GOVC_DATACENTER]\n  -host.dns=                Find host by FQDN\n  -host.ip=                 Find host by IP address\n  -host.ipath=              Find host by inventory path\n  -host.uuid=               Find host by UUID\n  -vm.dns=                  Find VM by FQDN\n  -vm.ip=                   Find VM by IP address\n  -vm.ipath=                Find VM by inventory path\n  -vm.path=                 Find VM by path to .vmx file\n  -vm.uuid=                 Find VM by UUID\nEOF\n)\n\ncat <<'EOF'\n# govc usage\n\nThis document is generated from `govc -h` and `govc $cmd -h` commands.\n\nThe following common options are filtered out in this document,\nbut appear via `govc $cmd -h`:\n\n```\nEOF\n\nprintf \"%s\\n\\`\\`\\`\\n\\n\" \"${common_opts}\"\n\ncmds=($(govc -h | grep -v Usage))\n\nopts=($(cut -s -d= -f1 <<<\"$common_opts\" | xargs -n1 | sed -e 's/^/\\\\/'))\nfilter=$(printf \"|%s=\" \"${opts[@]}\")\n\nfor cmd in \"${cmds[@]}\" ; do\n    printf \"## %s\\n\\n\" \"$cmd\"\n    printf \"\\`\\`\\`\\n\"\n    govc \"$cmd\" -h | egrep -v \"${filter:1}\"\n    printf \"\\`\\`\\`\\n\\n\"\ndone\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vapp/destroy.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vapp\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/find\"\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype destroy struct {\n\t*flags.DatacenterFlag\n}\n\nfunc init() {\n\tcli.Register(\"vapp.destroy\", &destroy{})\n}\n\nfunc (cmd *destroy) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n}\n\nfunc (cmd *destroy) Process(ctx context.Context) error {\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *destroy) Usage() string {\n\treturn \"VAPP...\"\n}\n\nfunc (cmd *destroy) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tfinder, err := cmd.Finder()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, arg := range f.Args() {\n\t\tvapps, err := finder.VirtualAppList(ctx, arg)\n\t\tif err != nil {\n\t\t\tif _, ok := err.(*find.NotFoundError); ok {\n\t\t\t\t// Ignore if vapp cannot be found\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, vapp := range vapps {\n\t\t\tpowerOff := func() error {\n\t\t\t\ttask, err := vapp.PowerOff(ctx, false)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\terr = task.Wait(ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\t// it's safe to ignore if the vapp is already powered off\n\t\t\t\t\tif f, ok := err.(types.HasFault); ok {\n\t\t\t\t\t\tswitch f.Fault().(type) {\n\t\t\t\t\t\tcase *types.InvalidPowerState:\n\t\t\t\t\t\t\treturn nil\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif err := powerOff(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tdestroy := func() error {\n\t\t\t\ttask, err := vapp.Destroy(ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\terr = task.Wait(ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif err := destroy(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vapp/power.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vapp\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype power struct {\n\t*flags.SearchFlag\n\n\tOn      bool\n\tOff     bool\n\tSuspend bool\n\tForce   bool\n}\n\nfunc init() {\n\tcli.Register(\"vapp.power\", &power{})\n}\n\nfunc (cmd *power) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.SearchFlag, ctx = flags.NewSearchFlag(ctx, flags.SearchVirtualApps)\n\tcmd.SearchFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.On, \"on\", false, \"Power on\")\n\tf.BoolVar(&cmd.Off, \"off\", false, \"Power off\")\n\tf.BoolVar(&cmd.Suspend, \"suspend\", false, \"Power suspend\")\n\tf.BoolVar(&cmd.Force, \"force\", false, \"Force (If force is false, the shutdown order in the vApp is executed. If force is true, all virtual machines are powered-off (regardless of shutdown order))\")\n}\n\nfunc (cmd *power) Process(ctx context.Context) error {\n\tif err := cmd.SearchFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\topts := []bool{cmd.On, cmd.Off, cmd.Suspend}\n\tselected := false\n\n\tfor _, opt := range opts {\n\t\tif opt {\n\t\t\tif selected {\n\t\t\t\treturn flag.ErrHelp\n\t\t\t}\n\t\t\tselected = opt\n\t\t}\n\t}\n\n\tif !selected {\n\t\treturn flag.ErrHelp\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *power) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvapps, err := cmd.VirtualApps(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, vapp := range vapps {\n\t\tvar task *object.Task\n\n\t\tswitch {\n\t\tcase cmd.On:\n\t\t\tfmt.Fprintf(cmd, \"Powering on %s... \", vapp.Reference())\n\t\t\ttask, err = vapp.PowerOn(ctx)\n\t\tcase cmd.Off:\n\t\t\tfmt.Fprintf(cmd, \"Powering off %s... \", vapp.Reference())\n\t\t\ttask, err = vapp.PowerOff(ctx, cmd.Force)\n\t\tcase cmd.Suspend:\n\t\t\tfmt.Fprintf(cmd, \"Suspend %s... \", vapp.Reference())\n\t\t\ttask, err = vapp.Suspend(ctx)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif task != nil {\n\t\t\terr = task.Wait(ctx)\n\t\t}\n\t\tif err == nil {\n\t\t\tfmt.Fprintf(cmd, \"OK\\n\")\n\t\t\tcontinue\n\t\t}\n\n\t\treturn err\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/version/command.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage version\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\nvar gitVersion string\n\ntype version struct {\n\t*flags.EmptyFlag\n\n\trequire string\n}\n\nfunc init() {\n\t// Check that git tag in the release builds match the hardcoded version\n\tif gitVersion != \"\" && gitVersion[1:] != flags.Version {\n\t\tlog.Panicf(\"version mismatch: git=%s vs govc=%s\", gitVersion[1:], flags.Version)\n\t}\n\n\tcli.Register(\"version\", &version{})\n}\n\nfunc (cmd *version) Register(ctx context.Context, f *flag.FlagSet) {\n\tf.StringVar(&cmd.require, \"require\", \"\", \"Require govc version >= this value\")\n}\n\nfunc (cmd *version) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif cmd.require != \"\" {\n\t\tv, err := flags.ParseVersion(flags.Version)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\trv, err := flags.ParseVersion(cmd.require)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to parse required version '%s': %s\", cmd.require, err)\n\t\t}\n\n\t\tif !rv.Lte(v) {\n\t\t\treturn fmt.Errorf(\"version %s or higher is required, this is version %s\", cmd.require, flags.Version)\n\t\t}\n\t}\n\n\tfmt.Printf(\"govc %s\\n\", flags.Version)\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/change.go",
    "content": "/*\nCopyright (c) 2015-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vm\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype extraConfig []types.BaseOptionValue\n\nfunc (e *extraConfig) String() string {\n\treturn fmt.Sprintf(\"%v\", *e)\n}\n\nfunc (e *extraConfig) Set(v string) error {\n\tr := strings.SplitN(v, \"=\", 2)\n\tif len(r) < 2 {\n\t\treturn fmt.Errorf(\"failed to parse extraConfig: %s\", v)\n\t}\n\t*e = append(*e, &types.OptionValue{Key: r[0], Value: r[1]})\n\treturn nil\n}\n\ntype change struct {\n\t*flags.VirtualMachineFlag\n\n\ttypes.VirtualMachineConfigSpec\n\textraConfig extraConfig\n}\n\nfunc init() {\n\tcli.Register(\"vm.change\", &change{})\n}\n\nfunc (cmd *change) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tf.Int64Var(&cmd.MemoryMB, \"m\", 0, \"Size in MB of memory\")\n\tf.Var(flags.NewInt32(&cmd.NumCPUs), \"c\", \"Number of CPUs\")\n\tf.StringVar(&cmd.GuestId, \"g\", \"\", \"Guest OS\")\n\tf.StringVar(&cmd.Name, \"name\", \"\", \"Display name\")\n\tf.Var(&cmd.extraConfig, \"e\", \"ExtraConfig. <key>=<value>\")\n\n\tf.Var(flags.NewOptionalBool(&cmd.NestedHVEnabled), \"nested-hv-enabled\", \"Enable nested hardware-assisted virtualization\")\n\tcmd.Tools = &types.ToolsConfigInfo{}\n\tf.Var(flags.NewOptionalBool(&cmd.Tools.SyncTimeWithHost), \"sync-time-with-host\", \"Enable SyncTimeWithHost\")\n}\n\nfunc (cmd *change) Description() string {\n\treturn `Change VM configuration.\n\nTo add ExtraConfig variables that can read within the guest, use the 'guestinfo.' prefix.\n\nExamples:\n  govc vm.change -vm $vm -e smc.present=TRUE -e ich7m.present=TRUE\n  govc vm.change -vm $vm -e guestinfo.vmname $vm\n  # Read the variable set above inside the guest:\n  vmware-rpctool \"info-get guestinfo.vmname\"`\n}\n\nfunc (cmd *change) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *change) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tif len(cmd.extraConfig) > 0 {\n\t\tcmd.VirtualMachineConfigSpec.ExtraConfig = cmd.extraConfig\n\t}\n\n\ttask, err := vm.Reconfigure(ctx, cmd.VirtualMachineConfigSpec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn task.Wait(ctx)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/clone.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vm\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype clone struct {\n\t*flags.ClientFlag\n\t*flags.DatacenterFlag\n\t*flags.DatastoreFlag\n\t*flags.StoragePodFlag\n\t*flags.ResourcePoolFlag\n\t*flags.HostSystemFlag\n\t*flags.NetworkFlag\n\t*flags.FolderFlag\n\t*flags.VirtualMachineFlag\n\n\tname          string\n\tmemory        int\n\tcpus          int\n\ton            bool\n\tforce         bool\n\ttemplate      bool\n\tcustomization string\n\twaitForIP     bool\n\tannotation    string\n\n\tClient         *vim25.Client\n\tDatacenter     *object.Datacenter\n\tDatastore      *object.Datastore\n\tStoragePod     *object.StoragePod\n\tResourcePool   *object.ResourcePool\n\tHostSystem     *object.HostSystem\n\tFolder         *object.Folder\n\tVirtualMachine *object.VirtualMachine\n}\n\nfunc init() {\n\tcli.Register(\"vm.clone\", &clone{})\n}\n\nfunc (cmd *clone) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\n\tcmd.StoragePodFlag, ctx = flags.NewStoragePodFlag(ctx)\n\tcmd.StoragePodFlag.Register(ctx, f)\n\n\tcmd.ResourcePoolFlag, ctx = flags.NewResourcePoolFlag(ctx)\n\tcmd.ResourcePoolFlag.Register(ctx, f)\n\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tcmd.NetworkFlag, ctx = flags.NewNetworkFlag(ctx)\n\tcmd.NetworkFlag.Register(ctx, f)\n\n\tcmd.FolderFlag, ctx = flags.NewFolderFlag(ctx)\n\tcmd.FolderFlag.Register(ctx, f)\n\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tf.IntVar(&cmd.memory, \"m\", 0, \"Size in MB of memory\")\n\tf.IntVar(&cmd.cpus, \"c\", 0, \"Number of CPUs\")\n\tf.BoolVar(&cmd.on, \"on\", true, \"Power on VM\")\n\tf.BoolVar(&cmd.force, \"force\", false, \"Create VM if vmx already exists\")\n\tf.BoolVar(&cmd.template, \"template\", false, \"Create a Template\")\n\tf.StringVar(&cmd.customization, \"customization\", \"\", \"Customization Specification Name\")\n\tf.BoolVar(&cmd.waitForIP, \"waitip\", false, \"Wait for VM to acquire IP address\")\n\tf.StringVar(&cmd.annotation, \"annotation\", \"\", \"VM description\")\n}\n\nfunc (cmd *clone) Usage() string {\n\treturn \"NAME\"\n}\n\nfunc (cmd *clone) Description() string {\n\treturn `Clone VM to NAME.\n\nExamples:\n  govc vm.clone -vm template-vm new-vm`\n}\n\nfunc (cmd *clone) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.StoragePodFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.ResourcePoolFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.NetworkFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.FolderFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *clone) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvar err error\n\n\tif len(f.Args()) != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tcmd.name = f.Arg(0)\n\tif cmd.name == \"\" {\n\t\treturn flag.ErrHelp\n\t}\n\n\tcmd.Client, err = cmd.ClientFlag.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd.Datacenter, err = cmd.DatacenterFlag.Datacenter()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.StoragePodFlag.Isset() {\n\t\tcmd.StoragePod, err = cmd.StoragePodFlag.StoragePod()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tcmd.Datastore, err = cmd.DatastoreFlag.Datastore()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tcmd.HostSystem, err = cmd.HostSystemFlag.HostSystemIfSpecified()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.HostSystem != nil {\n\t\tif cmd.ResourcePool, err = cmd.HostSystem.ResourcePool(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t// -host is optional\n\t\tif cmd.ResourcePool, err = cmd.ResourcePoolFlag.ResourcePool(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif cmd.Folder, err = cmd.FolderFlag.Folder(); err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.VirtualMachine, err = cmd.VirtualMachineFlag.VirtualMachine(); err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.VirtualMachine == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\ttask, err := cmd.cloneVM(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinfo, err := task.WaitForResult(ctx, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvm := object.NewVirtualMachine(cmd.Client, info.Result.(types.ManagedObjectReference))\n\n\tif cmd.cpus > 0 || cmd.memory > 0 {\n\t\tvmConfigSpec := types.VirtualMachineConfigSpec{}\n\t\tif cmd.cpus > 0 {\n\t\t\tvmConfigSpec.NumCPUs = int32(cmd.cpus)\n\t\t}\n\t\tif cmd.memory > 0 {\n\t\t\tvmConfigSpec.MemoryMB = int64(cmd.memory)\n\t\t}\n\t\tvmConfigSpec.Annotation = cmd.annotation\n\t\ttask, err := vm.Reconfigure(ctx, vmConfigSpec)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = task.WaitForResult(ctx, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif cmd.on {\n\t\ttask, err := vm.PowerOn(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = task.WaitForResult(ctx, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif cmd.waitForIP {\n\t\t\t_, err = vm.WaitForIP(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *clone) cloneVM(ctx context.Context) (*object.Task, error) {\n\n\t// search for the first network card of the source\n\tdevices, err := cmd.VirtualMachine.Device(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar card *types.VirtualEthernetCard\n\tfor _, device := range devices {\n\t\tif c, ok := device.(types.BaseVirtualEthernetCard); ok {\n\t\t\tcard = c.GetVirtualEthernetCard()\n\t\t\tbreak\n\t\t}\n\t}\n\tif card == nil {\n\t\treturn nil, fmt.Errorf(\"No network device found.\")\n\t}\n\n\t// get the new backing information\n\tdev, err := cmd.NetworkFlag.Device()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//set backing info\n\tcard.Backing = dev.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard().Backing\n\n\t// prepare virtual device config spec for network card\n\tconfigSpecs := []types.BaseVirtualDeviceConfigSpec{\n\t\t&types.VirtualDeviceConfigSpec{\n\t\t\tOperation: types.VirtualDeviceConfigSpecOperationEdit,\n\t\t\tDevice:    card,\n\t\t},\n\t}\n\n\tfolderref := cmd.Folder.Reference()\n\tpoolref := cmd.ResourcePool.Reference()\n\n\trelocateSpec := types.VirtualMachineRelocateSpec{\n\t\tDeviceChange: configSpecs,\n\t\tFolder:       &folderref,\n\t\tPool:         &poolref,\n\t}\n\n\tif cmd.HostSystem != nil {\n\t\thostref := cmd.HostSystem.Reference()\n\t\trelocateSpec.Host = &hostref\n\t}\n\n\tcloneSpec := &types.VirtualMachineCloneSpec{\n\t\tLocation: relocateSpec,\n\t\tPowerOn:  false,\n\t\tTemplate: cmd.template,\n\t}\n\n\t// clone to storage pod\n\tdatastoreref := types.ManagedObjectReference{}\n\tif cmd.StoragePod != nil && cmd.Datastore == nil {\n\t\tstoragePod := cmd.StoragePod.Reference()\n\n\t\t// Build pod selection spec from config spec\n\t\tpodSelectionSpec := types.StorageDrsPodSelectionSpec{\n\t\t\tStoragePod: &storagePod,\n\t\t}\n\n\t\t// Get the virtual machine reference\n\t\tvmref := cmd.VirtualMachine.Reference()\n\n\t\t// Build the placement spec\n\t\tstoragePlacementSpec := types.StoragePlacementSpec{\n\t\t\tFolder:           &folderref,\n\t\t\tVm:               &vmref,\n\t\t\tCloneName:        cmd.name,\n\t\t\tCloneSpec:        cloneSpec,\n\t\t\tPodSelectionSpec: podSelectionSpec,\n\t\t\tType:             string(types.StoragePlacementSpecPlacementTypeClone),\n\t\t}\n\n\t\t// Get the storage placement result\n\t\tstorageResourceManager := object.NewStorageResourceManager(cmd.Client)\n\t\tresult, err := storageResourceManager.RecommendDatastores(ctx, storagePlacementSpec)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Get the recommendations\n\t\trecommendations := result.Recommendations\n\t\tif len(recommendations) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"no recommendations\")\n\t\t}\n\n\t\t// Get the first recommendation\n\t\tdatastoreref = recommendations[0].Action[0].(*types.StoragePlacementAction).Destination\n\t} else if cmd.StoragePod == nil && cmd.Datastore != nil {\n\t\tdatastoreref = cmd.Datastore.Reference()\n\t} else {\n\t\treturn nil, fmt.Errorf(\"Please provide either a datastore or a storagepod\")\n\t}\n\n\t// Set the destination datastore\n\tcloneSpec.Location.Datastore = &datastoreref\n\n\t// Check if vmx already exists\n\tif !cmd.force {\n\t\tvmxPath := fmt.Sprintf(\"%s/%s.vmx\", cmd.name, cmd.name)\n\n\t\tvar mds mo.Datastore\n\t\terr = property.DefaultCollector(cmd.Client).RetrieveOne(ctx, datastoreref, []string{\"name\"}, &mds)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdatastore := object.NewDatastore(cmd.Client, datastoreref)\n\t\tdatastore.InventoryPath = mds.Name\n\n\t\t_, err := datastore.Stat(ctx, vmxPath)\n\t\tif err == nil {\n\t\t\tdsPath := cmd.Datastore.Path(vmxPath)\n\t\t\treturn nil, fmt.Errorf(\"File %s already exists\", dsPath)\n\t\t}\n\t}\n\n\t// check if customization specification requested\n\tif len(cmd.customization) > 0 {\n\t\t// get the customization spec manager\n\t\tcustomizationSpecManager := object.NewCustomizationSpecManager(cmd.Client)\n\t\t// check if customization specification exists\n\t\texists, err := customizationSpecManager.DoesCustomizationSpecExist(ctx, cmd.customization)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif exists == false {\n\t\t\treturn nil, fmt.Errorf(\"Customization specification %s does not exists.\", cmd.customization)\n\t\t}\n\t\t// get the customization specification\n\t\tcustomSpecItem, err := customizationSpecManager.GetCustomizationSpec(ctx, cmd.customization)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcustomSpec := customSpecItem.Spec\n\t\t// set the customization\n\t\tcloneSpec.Customization = &customSpec\n\t}\n\n\t// clone virtualmachine\n\treturn cmd.VirtualMachine.Clone(ctx, cmd.Folder, cmd.name, *cloneSpec)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/create.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vm\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/units\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype create struct {\n\t*flags.ClientFlag\n\t*flags.DatacenterFlag\n\t*flags.DatastoreFlag\n\t*flags.StoragePodFlag\n\t*flags.ResourcePoolFlag\n\t*flags.HostSystemFlag\n\t*flags.NetworkFlag\n\t*flags.FolderFlag\n\n\tname       string\n\tmemory     int\n\tcpus       int\n\tguestID    string\n\tlink       bool\n\ton         bool\n\tforce      bool\n\tcontroller string\n\tannotation string\n\n\tiso              string\n\tisoDatastoreFlag *flags.DatastoreFlag\n\tisoDatastore     *object.Datastore\n\n\tdisk              string\n\tdiskDatastoreFlag *flags.DatastoreFlag\n\tdiskDatastore     *object.Datastore\n\n\t// Only set if the disk argument is a byte size, which means the disk\n\t// doesn't exist yet and should be created\n\tdiskByteSize int64\n\n\tClient       *vim25.Client\n\tDatacenter   *object.Datacenter\n\tDatastore    *object.Datastore\n\tStoragePod   *object.StoragePod\n\tResourcePool *object.ResourcePool\n\tHostSystem   *object.HostSystem\n\tFolder       *object.Folder\n}\n\nfunc init() {\n\tcli.Register(\"vm.create\", &create{})\n}\n\nfunc (cmd *create) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\n\tcmd.StoragePodFlag, ctx = flags.NewStoragePodFlag(ctx)\n\tcmd.StoragePodFlag.Register(ctx, f)\n\n\tcmd.ResourcePoolFlag, ctx = flags.NewResourcePoolFlag(ctx)\n\tcmd.ResourcePoolFlag.Register(ctx, f)\n\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tcmd.NetworkFlag, ctx = flags.NewNetworkFlag(ctx)\n\tcmd.NetworkFlag.Register(ctx, f)\n\n\tcmd.FolderFlag, ctx = flags.NewFolderFlag(ctx)\n\tcmd.FolderFlag.Register(ctx, f)\n\n\tf.IntVar(&cmd.memory, \"m\", 1024, \"Size in MB of memory\")\n\tf.IntVar(&cmd.cpus, \"c\", 1, \"Number of CPUs\")\n\tf.StringVar(&cmd.guestID, \"g\", \"otherGuest\", \"Guest OS\")\n\tf.BoolVar(&cmd.link, \"link\", true, \"Link specified disk\")\n\tf.BoolVar(&cmd.on, \"on\", true, \"Power on VM. Default is true if -disk argument is given.\")\n\tf.BoolVar(&cmd.force, \"force\", false, \"Create VM if vmx already exists\")\n\tf.StringVar(&cmd.controller, \"disk.controller\", \"scsi\", \"Disk controller type\")\n\tf.StringVar(&cmd.annotation, \"annotation\", \"\", \"VM description\")\n\n\tf.StringVar(&cmd.iso, \"iso\", \"\", \"ISO path\")\n\tcmd.isoDatastoreFlag, ctx = flags.NewCustomDatastoreFlag(ctx)\n\tf.StringVar(&cmd.isoDatastoreFlag.Name, \"iso-datastore\", \"\", \"Datastore for ISO file\")\n\n\tf.StringVar(&cmd.disk, \"disk\", \"\", \"Disk path (to use existing) OR size (to create new, e.g. 20GB)\")\n\tcmd.diskDatastoreFlag, _ = flags.NewCustomDatastoreFlag(ctx)\n\tf.StringVar(&cmd.diskDatastoreFlag.Name, \"disk-datastore\", \"\", \"Datastore for disk file\")\n}\n\nfunc (cmd *create) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.StoragePodFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.ResourcePoolFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.NetworkFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.FolderFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\n\t// Default iso/disk datastores to the VM's datastore\n\tif cmd.isoDatastoreFlag.Name == \"\" {\n\t\tcmd.isoDatastoreFlag = cmd.DatastoreFlag\n\t}\n\tif cmd.diskDatastoreFlag.Name == \"\" {\n\t\tcmd.diskDatastoreFlag = cmd.DatastoreFlag\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvar err error\n\n\tif len(f.Args()) != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tcmd.name = f.Arg(0)\n\tif cmd.name == \"\" {\n\t\treturn flag.ErrHelp\n\t}\n\n\tcmd.Client, err = cmd.ClientFlag.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd.Datacenter, err = cmd.DatacenterFlag.Datacenter()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.StoragePodFlag.Isset() {\n\t\tcmd.StoragePod, err = cmd.StoragePodFlag.StoragePod()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tcmd.Datastore, err = cmd.DatastoreFlag.Datastore()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tcmd.HostSystem, err = cmd.HostSystemFlag.HostSystemIfSpecified()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.HostSystem != nil {\n\t\tif cmd.ResourcePool, err = cmd.HostSystem.ResourcePool(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t// -host is optional\n\t\tif cmd.ResourcePool, err = cmd.ResourcePoolFlag.ResourcePool(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif cmd.Folder, err = cmd.FolderFlag.Folder(); err != nil {\n\t\treturn err\n\t}\n\n\t// Verify ISO exists\n\tif cmd.iso != \"\" {\n\t\t_, err = cmd.isoDatastoreFlag.Stat(ctx, cmd.iso)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcmd.isoDatastore, err = cmd.isoDatastoreFlag.Datastore()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Verify disk exists\n\tif cmd.disk != \"\" {\n\t\tvar b units.ByteSize\n\n\t\t// If disk can be parsed as byte units, don't stat\n\t\terr = b.Set(cmd.disk)\n\t\tif err == nil {\n\t\t\tcmd.diskByteSize = int64(b)\n\t\t} else {\n\t\t\t_, err = cmd.diskDatastoreFlag.Stat(ctx, cmd.disk)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcmd.diskDatastore, err = cmd.diskDatastoreFlag.Datastore()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\ttask, err := cmd.createVM(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinfo, err := task.WaitForResult(ctx, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvm := object.NewVirtualMachine(cmd.Client, info.Result.(types.ManagedObjectReference))\n\n\tif cmd.on {\n\t\ttask, err := vm.PowerOn(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = task.WaitForResult(ctx, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *create) createVM(ctx context.Context) (*object.Task, error) {\n\tvar devices object.VirtualDeviceList\n\tvar err error\n\n\tspec := &types.VirtualMachineConfigSpec{\n\t\tName:       cmd.name,\n\t\tGuestId:    cmd.guestID,\n\t\tNumCPUs:    int32(cmd.cpus),\n\t\tMemoryMB:   int64(cmd.memory),\n\t\tAnnotation: cmd.annotation,\n\t}\n\n\tdevices, err = cmd.addStorage(nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdevices, err = cmd.addNetwork(devices)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdeviceChange, err := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tspec.DeviceChange = deviceChange\n\n\tvar datastore *object.Datastore\n\n\t// If storage pod is specified, collect placement recommendations\n\tif cmd.StoragePod != nil {\n\t\tdatastore, err = cmd.recommendDatastore(ctx, spec)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tdatastore = cmd.Datastore\n\t}\n\n\tif !cmd.force {\n\t\tvmxPath := fmt.Sprintf(\"%s/%s.vmx\", cmd.name, cmd.name)\n\n\t\t_, err := datastore.Stat(ctx, vmxPath)\n\t\tif err == nil {\n\t\t\tdsPath := cmd.Datastore.Path(vmxPath)\n\t\t\treturn nil, fmt.Errorf(\"File %s already exists\", dsPath)\n\t\t}\n\t}\n\n\tfolder := cmd.Folder\n\n\tspec.Files = &types.VirtualMachineFileInfo{\n\t\tVmPathName: fmt.Sprintf(\"[%s]\", datastore.Name()),\n\t}\n\n\treturn folder.CreateVM(ctx, *spec, cmd.ResourcePool, cmd.HostSystem)\n}\n\nfunc (cmd *create) addStorage(devices object.VirtualDeviceList) (object.VirtualDeviceList, error) {\n\tif cmd.controller != \"ide\" {\n\t\tif cmd.controller == \"nvme\" {\n\t\t\tnvme, err := devices.CreateNVMEController()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tdevices = append(devices, nvme)\n\t\t\tcmd.controller = devices.Name(nvme)\n\t\t} else {\n\t\t\tscsi, err := devices.CreateSCSIController(cmd.controller)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tdevices = append(devices, scsi)\n\t\t\tcmd.controller = devices.Name(scsi)\n\t\t}\n\t}\n\n\t// If controller is specified to be IDE or if an ISO is specified, add IDE controller.\n\tif cmd.controller == \"ide\" || cmd.iso != \"\" {\n\t\tide, err := devices.CreateIDEController()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdevices = append(devices, ide)\n\t}\n\n\tif cmd.diskByteSize != 0 {\n\t\tcontroller, err := devices.FindDiskController(cmd.controller)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdisk := &types.VirtualDisk{\n\t\t\tVirtualDevice: types.VirtualDevice{\n\t\t\t\tKey: devices.NewKey(),\n\t\t\t\tBacking: &types.VirtualDiskFlatVer2BackingInfo{\n\t\t\t\t\tDiskMode:        string(types.VirtualDiskModePersistent),\n\t\t\t\t\tThinProvisioned: types.NewBool(true),\n\t\t\t\t},\n\t\t\t},\n\t\t\tCapacityInKB: cmd.diskByteSize / 1024,\n\t\t}\n\n\t\tdevices.AssignController(disk, controller)\n\t\tdevices = append(devices, disk)\n\t} else if cmd.disk != \"\" {\n\t\tcontroller, err := devices.FindDiskController(cmd.controller)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tds := cmd.diskDatastore.Reference()\n\t\tpath := cmd.diskDatastore.Path(cmd.disk)\n\t\tdisk := devices.CreateDisk(controller, ds, path)\n\n\t\tif cmd.link {\n\t\t\tdisk = devices.ChildDisk(disk)\n\t\t}\n\n\t\tdevices = append(devices, disk)\n\t}\n\n\tif cmd.iso != \"\" {\n\t\tide, err := devices.FindIDEController(\"\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcdrom, err := devices.CreateCdrom(ide)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcdrom = devices.InsertIso(cdrom, cmd.isoDatastore.Path(cmd.iso))\n\t\tdevices = append(devices, cdrom)\n\t}\n\n\treturn devices, nil\n}\n\nfunc (cmd *create) addNetwork(devices object.VirtualDeviceList) (object.VirtualDeviceList, error) {\n\tnetdev, err := cmd.NetworkFlag.Device()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdevices = append(devices, netdev)\n\treturn devices, nil\n}\n\nfunc (cmd *create) recommendDatastore(ctx context.Context, spec *types.VirtualMachineConfigSpec) (*object.Datastore, error) {\n\tsp := cmd.StoragePod.Reference()\n\n\t// Build pod selection spec from config spec\n\tpodSelectionSpec := types.StorageDrsPodSelectionSpec{\n\t\tStoragePod: &sp,\n\t}\n\n\t// Keep list of disks that need to be placed\n\tvar disks []*types.VirtualDisk\n\n\t// Collect disks eligible for placement\n\tfor _, deviceConfigSpec := range spec.DeviceChange {\n\t\ts := deviceConfigSpec.GetVirtualDeviceConfigSpec()\n\t\tif s.Operation != types.VirtualDeviceConfigSpecOperationAdd {\n\t\t\tcontinue\n\t\t}\n\n\t\tif s.FileOperation != types.VirtualDeviceConfigSpecFileOperationCreate {\n\t\t\tcontinue\n\t\t}\n\n\t\td, ok := s.Device.(*types.VirtualDisk)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tpodConfigForPlacement := types.VmPodConfigForPlacement{\n\t\t\tStoragePod: sp,\n\t\t\tDisk: []types.PodDiskLocator{\n\t\t\t\t{\n\t\t\t\t\tDiskId:          d.Key,\n\t\t\t\t\tDiskBackingInfo: d.Backing,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tpodSelectionSpec.InitialVmConfig = append(podSelectionSpec.InitialVmConfig, podConfigForPlacement)\n\t\tdisks = append(disks, d)\n\t}\n\n\tsps := types.StoragePlacementSpec{\n\t\tType:             string(types.StoragePlacementSpecPlacementTypeCreate),\n\t\tResourcePool:     types.NewReference(cmd.ResourcePool.Reference()),\n\t\tPodSelectionSpec: podSelectionSpec,\n\t\tConfigSpec:       spec,\n\t}\n\n\tsrm := object.NewStorageResourceManager(cmd.Client)\n\tresult, err := srm.RecommendDatastores(ctx, sps)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Use result to pin disks to recommended datastores\n\trecs := result.Recommendations\n\tif len(recs) == 0 {\n\t\treturn nil, fmt.Errorf(\"no recommendations\")\n\t}\n\n\tds := recs[0].Action[0].(*types.StoragePlacementAction).Destination\n\n\tvar mds mo.Datastore\n\terr = property.DefaultCollector(cmd.Client).RetrieveOne(ctx, ds, []string{\"name\"}, &mds)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdatastore := object.NewDatastore(cmd.Client, ds)\n\tdatastore.InventoryPath = mds.Name\n\n\t// Apply recommendation to eligible disks\n\tfor _, disk := range disks {\n\t\tbacking := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)\n\t\tbacking.Datastore = &ds\n\t}\n\n\treturn datastore, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/destroy.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vm\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype destroy struct {\n\t*flags.ClientFlag\n\t*flags.SearchFlag\n}\n\nfunc init() {\n\tcli.Register(\"vm.destroy\", &destroy{})\n}\n\nfunc (cmd *destroy) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.SearchFlag, ctx = flags.NewSearchFlag(ctx, flags.SearchVirtualMachines)\n\tcmd.SearchFlag.Register(ctx, f)\n}\n\nfunc (cmd *destroy) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SearchFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *destroy) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvms, err := cmd.VirtualMachines(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, vm := range vms {\n\t\ttask, err := vm.PowerOff(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Ignore error since the VM may already been in powered off state.\n\t\t// vm.Destroy will fail if the VM is still powered on.\n\t\t_ = task.Wait(ctx)\n\n\t\ttask, err = vm.Destroy(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = task.Wait(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/disk/attach.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage disk\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype attach struct {\n\t*flags.DatastoreFlag\n\t*flags.VirtualMachineFlag\n\n\tpersist    bool\n\tlink       bool\n\tdisk       string\n\tcontroller string\n}\n\nfunc init() {\n\tcli.Register(\"vm.disk.attach\", &attach{})\n}\n\nfunc (cmd *attach) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.persist, \"persist\", true, \"Persist attached disk\")\n\tf.BoolVar(&cmd.link, \"link\", true, \"Link specified disk\")\n\tf.StringVar(&cmd.controller, \"controller\", \"\", \"Disk controller\")\n\tf.StringVar(&cmd.disk, \"disk\", \"\", \"Disk path name\")\n}\n\nfunc (cmd *attach) Process(ctx context.Context) error {\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *attach) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tds, err := cmd.Datastore()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontroller, err := devices.FindDiskController(cmd.controller)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdisk := devices.CreateDisk(controller, ds.Reference(), ds.Path(cmd.disk))\n\tbacking := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)\n\n\tif cmd.link {\n\t\tif cmd.persist {\n\t\t\tbacking.DiskMode = string(types.VirtualDiskModeIndependent_persistent)\n\t\t} else {\n\t\t\tbacking.DiskMode = string(types.VirtualDiskModeIndependent_nonpersistent)\n\t\t}\n\n\t\tdisk = devices.ChildDisk(disk)\n\t\treturn vm.AddDevice(ctx, disk)\n\t}\n\n\tif cmd.persist {\n\t\tbacking.DiskMode = string(types.VirtualDiskModePersistent)\n\t} else {\n\t\tbacking.DiskMode = string(types.VirtualDiskModeNonpersistent)\n\t}\n\n\treturn vm.AddDevice(ctx, disk)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/disk/change.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage disk\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/units\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype change struct {\n\t*flags.VirtualMachineFlag\n\n\tname     string\n\tkey      int\n\tlabel    string\n\tfilePath string\n\n\tbytes units.ByteSize\n\tmode  string\n}\n\nfunc init() {\n\tcli.Register(\"vm.disk.change\", &change{})\n}\n\nfunc (cmd *change) Description() string {\n\treturn `Change some properties of a VM's DISK\n\nIn particular, you can change the DISK mode, and the size (as long as it is bigger)\n\nExamples:\n  govc vm.disk.change -vm VM -disk.key 2001 -size 10G\n  govc vm.disk.change -vm VM -disk.label \"BDD disk\" -size 10G\n  govc vm.disk.change -vm VM -disk.name \"hard-1000-0\" -size 12G\n  govc vm.disk.change -vm VM -disk.filePath \"[DS] VM/VM-1.vmdk\" -mode nonpersistent`\n}\n\nfunc (cmd *change) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\terr := (&cmd.bytes).Set(\"0G\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tf.Var(&cmd.bytes, \"size\", \"New disk size\")\n\tf.StringVar(&cmd.name, \"disk.name\", \"\", \"Disk name\")\n\tf.StringVar(&cmd.label, \"disk.label\", \"\", \"Disk label\")\n\tf.StringVar(&cmd.filePath, \"disk.filePath\", \"\", \"Disk file name\")\n\tf.IntVar(&cmd.key, \"disk.key\", 0, \"Disk unique key\")\n\tf.StringVar(&cmd.mode, \"mode\", \"\", fmt.Sprintf(\"Disk mode (%s)\", strings.Join(vdmTypes, \"|\")))\n}\n\nfunc (cmd *change) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *change) FindDisk(ctx context.Context, list object.VirtualDeviceList) (*types.VirtualDisk, error) {\n\tvar disks []*types.VirtualDisk\n\tfor _, device := range list {\n\t\tswitch md := device.(type) {\n\t\tcase *types.VirtualDisk:\n\t\t\tif cmd.CheckDiskProperties(ctx, list.Name(device), md) {\n\t\t\t\tdisks = append(disks, md)\n\t\t\t}\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tswitch len(disks) {\n\tcase 0:\n\t\treturn nil, errors.New(\"No disk found using the given values\")\n\tcase 1:\n\t\treturn disks[0], nil\n\t}\n\treturn nil, errors.New(\"The given disk values match multiple disks\")\n}\n\nfunc (cmd *change) CheckDiskProperties(ctx context.Context, name string, disk *types.VirtualDisk) bool {\n\tswitch {\n\tcase cmd.key != 0 && disk.Key != int32(cmd.key):\n\t\tfallthrough\n\tcase cmd.name != \"\" && name != cmd.name:\n\t\tfallthrough\n\tcase cmd.label != \"\" && disk.DeviceInfo.GetDescription().Label != cmd.label:\n\t\treturn false\n\tcase cmd.filePath != \"\":\n\t\tif b, ok := disk.Backing.(types.BaseVirtualDeviceFileBackingInfo); ok {\n\t\t\tif b.GetVirtualDeviceFileBackingInfo().FileName != cmd.filePath {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (cmd *change) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\teditdisk, err := cmd.FindDisk(ctx, devices)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif int64(cmd.bytes) != 0 {\n\t\teditdisk.CapacityInKB = int64(cmd.bytes) / 1024\n\t}\n\n\tbacking := editdisk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)\n\n\tif len(cmd.mode) != 0 {\n\t\tbacking.DiskMode = cmd.mode\n\t}\n\n\tspec := types.VirtualMachineConfigSpec{}\n\n\tconfig := &types.VirtualDeviceConfigSpec{\n\t\tDevice:    editdisk,\n\t\tOperation: types.VirtualDeviceConfigSpecOperationEdit,\n\t}\n\n\tconfig.FileOperation = \"\"\n\n\tspec.DeviceChange = append(spec.DeviceChange, config)\n\n\ttask, err := vm.Reconfigure(ctx, spec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = task.Wait(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error resizing main disk\\nLogged Item:  %s\", err)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/disk/create.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage disk\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/units\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype create struct {\n\t*flags.DatastoreFlag\n\t*flags.OutputFlag\n\t*flags.VirtualMachineFlag\n\n\tcontroller string\n\tName       string\n\tBytes      units.ByteSize\n\tThick      bool\n\tEager      bool\n\tDiskMode   string\n}\n\nvar vdmTypes = []string{\n\tstring(types.VirtualDiskModePersistent),\n\tstring(types.VirtualDiskModeNonpersistent),\n\tstring(types.VirtualDiskModeUndoable),\n\tstring(types.VirtualDiskModeIndependent_persistent),\n\tstring(types.VirtualDiskModeIndependent_nonpersistent),\n\tstring(types.VirtualDiskModeAppend),\n}\n\nfunc init() {\n\tcli.Register(\"vm.disk.create\", &create{})\n}\n\nfunc (cmd *create) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\terr := (&cmd.Bytes).Set(\"10G\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tf.StringVar(&cmd.controller, \"controller\", \"\", \"Disk controller\")\n\tf.StringVar(&cmd.Name, \"name\", \"\", \"Name for new disk\")\n\tf.Var(&cmd.Bytes, \"size\", \"Size of new disk\")\n\tf.BoolVar(&cmd.Thick, \"thick\", false, \"Thick provision new disk\")\n\tf.BoolVar(&cmd.Eager, \"eager\", false, \"Eagerly scrub new disk\")\n\tf.StringVar(&cmd.DiskMode, \"mode\", \"persistent\", fmt.Sprintf(\"Disk mode (%s)\", strings.Join(vdmTypes, \"|\")))\n}\n\nfunc (cmd *create) Process(ctx context.Context) error {\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *create) Description() string {\n\treturn `Create disk and attach to VM.\n\nExamples:\n  govc vm.disk.create -vm $name -name $name/disk1 -size 10G`\n}\n\nfunc (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif len(cmd.Name) == 0 {\n\t\treturn errors.New(\"please specify a disk name\")\n\t}\n\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif vm == nil {\n\t\treturn errors.New(\"please specify a vm\")\n\t}\n\n\tds, err := cmd.Datastore()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontroller, err := devices.FindDiskController(cmd.controller)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvdmMatch := false\n\tfor _, vdm := range vdmTypes {\n\t\tif cmd.DiskMode == vdm {\n\t\t\tvdmMatch = true\n\t\t}\n\t}\n\n\tif vdmMatch == false {\n\t\treturn errors.New(\"please specify a valid disk mode\")\n\t}\n\n\tdisk := devices.CreateDisk(controller, ds.Reference(), ds.Path(cmd.Name))\n\n\texisting := devices.SelectByBackingInfo(disk.Backing)\n\n\tif len(existing) > 0 {\n\t\tcmd.Log(\"Disk already present\\n\")\n\t\treturn nil\n\t}\n\n\tbacking := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)\n\n\tif cmd.Thick {\n\t\tbacking.ThinProvisioned = types.NewBool(false)\n\t\tbacking.EagerlyScrub = types.NewBool(cmd.Eager)\n\t}\n\n\tbacking.DiskMode = cmd.DiskMode\n\n\tcmd.Log(\"Creating disk\\n\")\n\tdisk.CapacityInKB = int64(cmd.Bytes) / 1024\n\treturn vm.AddDevice(ctx, disk)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/guest/auth.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype AuthFlag struct {\n\tauth types.NamePasswordAuthentication\n}\n\nfunc newAuthFlag(ctx context.Context) (*AuthFlag, context.Context) {\n\treturn &AuthFlag{}, ctx\n}\n\nfunc (flag *AuthFlag) String() string {\n\treturn fmt.Sprintf(\"%s:%s\", flag.auth.Username, strings.Repeat(\"x\", len(flag.auth.Password)))\n}\n\nfunc (flag *AuthFlag) Set(s string) error {\n\tc := strings.Split(s, \":\")\n\tif len(c) > 0 {\n\t\tflag.auth.Username = c[0]\n\t\tif len(c) > 1 {\n\t\t\tflag.auth.Password = c[1]\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (flag *AuthFlag) Register(ctx context.Context, f *flag.FlagSet) {\n\tenv := \"GOVC_GUEST_LOGIN\"\n\tvalue := os.Getenv(env)\n\tflag.Set(value)\n\tusage := fmt.Sprintf(\"Guest VM credentials [%s]\", env)\n\tf.Var(flag, \"l\", usage)\n}\n\nfunc (flag *AuthFlag) Process(ctx context.Context) error {\n\treturn nil\n}\n\nfunc (flag *AuthFlag) Auth() types.BaseGuestAuthentication {\n\treturn &flag.auth\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/guest/chmod.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"strconv\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype chmod struct {\n\t*GuestFlag\n}\n\nfunc init() {\n\tcli.Register(\"guest.chmod\", &chmod{})\n}\n\nfunc (cmd *chmod) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.GuestFlag, ctx = newGuestFlag(ctx)\n\tcmd.GuestFlag.Register(ctx, f)\n}\n\nfunc (cmd *chmod) Process(ctx context.Context) error {\n\tif err := cmd.GuestFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *chmod) Usage() string {\n\treturn \"MODE FILE\"\n}\n\nfunc (cmd *chmod) Description() string {\n\treturn `Change FILE MODE on VM.\n\nExamples:\n  govc guest.chmod -vm $name 0644 /var/log/foo.log`\n}\n\nfunc (cmd *chmod) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 2 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tm, err := cmd.FileManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar attr types.GuestPosixFileAttributes\n\n\tattr.Permissions, err = strconv.ParseInt(f.Arg(0), 0, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn m.ChangeFileAttributes(ctx, cmd.Auth(), f.Arg(1), &attr)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/guest/chown.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype chown struct {\n\t*GuestFlag\n}\n\nfunc init() {\n\tcli.Register(\"guest.chown\", &chown{})\n}\n\nfunc (cmd *chown) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.GuestFlag, ctx = newGuestFlag(ctx)\n\tcmd.GuestFlag.Register(ctx, f)\n}\n\nfunc (cmd *chown) Process(ctx context.Context) error {\n\tif err := cmd.GuestFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *chown) Usage() string {\n\treturn \"UID[:GID] FILE\"\n}\n\nfunc (cmd *chown) Description() string {\n\treturn `Change FILE UID and GID on VM.\n\nExamples:\n  govc guest.chown -vm $name UID[:GID] /var/log/foo.log`\n}\n\nfunc (cmd *chown) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 2 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tm, err := cmd.FileManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar attr types.GuestPosixFileAttributes\n\n\tids := strings.SplitN(f.Arg(0), \":\", 2)\n\tif len(ids) == 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tid, err := strconv.Atoi(ids[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tattr.OwnerId = new(int32)\n\t*attr.OwnerId = int32(id)\n\n\tif len(ids) == 2 {\n\t\tid, err = strconv.Atoi(ids[1])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tattr.GroupId = new(int32)\n\t\t*attr.GroupId = int32(id)\n\t}\n\n\treturn m.ChangeFileAttributes(ctx, cmd.Auth(), f.Arg(1), &attr)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/guest/download.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"flag\"\n\t\"io\"\n\n\t\"context\"\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n)\n\ntype download struct {\n\t*GuestFlag\n\n\toverwrite bool\n}\n\nfunc init() {\n\tcli.Register(\"guest.download\", &download{})\n}\n\nfunc (cmd *download) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.GuestFlag, ctx = newGuestFlag(ctx)\n\tcmd.GuestFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.overwrite, \"f\", false, \"If set, the local destination file is clobbered\")\n}\n\nfunc (cmd *download) Usage() string {\n\treturn \"SOURCE DEST\"\n}\n\nfunc (cmd *download) Description() string {\n\treturn `Copy SOURCE from the guest VM to DEST on the local system.\n\nIf DEST name is \"-\", source is written to stdout.\n\nExamples:\n  govc guest.download -l user:pass -vm=my-vm /var/log/my.log ./local.log\n  govc guest.download -l user:pass -vm=my-vm /etc/motd -`\n}\n\nfunc (cmd *download) Process(ctx context.Context) error {\n\tif err := cmd.GuestFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *download) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 2 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tm, err := cmd.FileManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsrc := f.Arg(0)\n\tdst := f.Arg(1)\n\n\t_, err = os.Stat(dst)\n\tif err == nil && !cmd.overwrite {\n\t\treturn os.ErrExist\n\t}\n\n\tinfo, err := m.InitiateFileTransferFromGuest(ctx, cmd.Auth(), src)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tu, err := cmd.ParseURL(info.Url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tp := soap.DefaultDownload\n\n\tif dst == \"-\" {\n\t\tf, _, err := c.Client.Download(u, &p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = io.Copy(os.Stdout, f)\n\t\treturn err\n\t}\n\n\tif cmd.OutputFlag.TTY {\n\t\tlogger := cmd.ProgressLogger(\"Downloading... \")\n\t\tp.Progress = logger\n\t\tdefer logger.Wait()\n\t}\n\n\treturn c.Client.DownloadFile(dst, u, &p)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/guest/file_attr.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype FileAttrFlag struct {\n\ttypes.GuestPosixFileAttributes\n}\n\nfunc newFileAttrFlag(ctx context.Context) (*FileAttrFlag, context.Context) {\n\treturn &FileAttrFlag{}, ctx\n}\n\nfunc (flag *FileAttrFlag) Register(ctx context.Context, f *flag.FlagSet) {\n\tf.Var(flags.NewOptionalInt32(&flag.OwnerId), \"uid\", \"User ID\")\n\tf.Var(flags.NewOptionalInt32(&flag.GroupId), \"gid\", \"Group ID\")\n\tf.Int64Var(&flag.Permissions, \"perm\", 0, \"File permissions\")\n}\n\nfunc (flag *FileAttrFlag) Process(ctx context.Context) error {\n\treturn nil\n}\n\nfunc (flag *FileAttrFlag) Attr() types.BaseGuestFileAttributes {\n\treturn &flag.GuestPosixFileAttributes\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/guest/getenv.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n)\n\ntype getenv struct {\n\t*GuestFlag\n}\n\nfunc init() {\n\tcli.Register(\"guest.getenv\", &getenv{})\n}\n\nfunc (cmd *getenv) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.GuestFlag, ctx = newGuestFlag(ctx)\n\tcmd.GuestFlag.Register(ctx, f)\n}\n\nfunc (cmd *getenv) Process(ctx context.Context) error {\n\tif err := cmd.GuestFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *getenv) Usage() string {\n\treturn \"[NAME]...\"\n}\n\nfunc (cmd *getenv) Description() string {\n\treturn `Read NAME environment variables from VM.\n\nExamples:\n  govc guest.getenv -vm $name\n  govc guest.getenv -vm $name HOME`\n}\n\nfunc (cmd *getenv) Run(ctx context.Context, f *flag.FlagSet) error {\n\tm, err := cmd.ProcessManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvars, err := m.ReadEnvironmentVariable(ctx, cmd.Auth(), f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, v := range vars {\n\t\tfmt.Printf(\"%s\\n\", v)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/guest/guest.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\n\t\"context\"\n\t\"net/url\"\n\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/guest\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype GuestFlag struct {\n\t*flags.ClientFlag\n\t*flags.VirtualMachineFlag\n\n\t*AuthFlag\n}\n\nfunc newGuestFlag(ctx context.Context) (*GuestFlag, context.Context) {\n\tf := &GuestFlag{}\n\tf.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tf.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tf.AuthFlag, ctx = newAuthFlag(ctx)\n\treturn f, ctx\n}\n\nfunc (flag *GuestFlag) Register(ctx context.Context, f *flag.FlagSet) {\n\tflag.ClientFlag.Register(ctx, f)\n\tflag.VirtualMachineFlag.Register(ctx, f)\n\tflag.AuthFlag.Register(ctx, f)\n}\n\nfunc (flag *GuestFlag) Process(ctx context.Context) error {\n\tif err := flag.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := flag.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := flag.AuthFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (flag *GuestFlag) FileManager() (*guest.FileManager, error) {\n\tctx := context.TODO()\n\tc, err := flag.Client()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvm, err := flag.VirtualMachine()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\to := guest.NewOperationsManager(c, vm.Reference())\n\treturn o.FileManager(ctx)\n}\n\nfunc (flag *GuestFlag) ProcessManager() (*guest.ProcessManager, error) {\n\tctx := context.TODO()\n\tc, err := flag.Client()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvm, err := flag.VirtualMachine()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\to := guest.NewOperationsManager(c, vm.Reference())\n\treturn o.ProcessManager(ctx)\n}\n\nfunc (flag *GuestFlag) ParseURL(urlStr string) (*url.URL, error) {\n\tc, err := flag.Client()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c.Client.ParseURL(urlStr)\n}\n\nfunc (flag *GuestFlag) VirtualMachine() (*object.VirtualMachine, error) {\n\tvm, err := flag.VirtualMachineFlag.VirtualMachine()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif vm == nil {\n\t\treturn nil, errors.New(\"no vm specified\")\n\t}\n\treturn vm, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/guest/kill.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n)\n\ntype kill struct {\n\t*GuestFlag\n\n\tpids pidSelector\n}\n\nfunc init() {\n\tcli.Register(\"guest.kill\", &kill{})\n}\n\nfunc (cmd *kill) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.GuestFlag, ctx = newGuestFlag(ctx)\n\tcmd.GuestFlag.Register(ctx, f)\n\n\tf.Var(&cmd.pids, \"p\", \"Process ID\")\n}\n\nfunc (cmd *kill) Process(ctx context.Context) error {\n\tif err := cmd.GuestFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *kill) Description() string {\n\treturn `Kill process ID on VM.\n\nExamples:\n  govc guest.kill -vm $name -p 12345`\n}\n\nfunc (cmd *kill) Run(ctx context.Context, f *flag.FlagSet) error {\n\tm, err := cmd.ProcessManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, pid := range cmd.pids {\n\t\tif err := m.TerminateProcess(ctx, cmd.Auth(), pid); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/guest/ls.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/units\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype ls struct {\n\t*GuestFlag\n\n\tsimple bool\n}\n\nfunc init() {\n\tcli.Register(\"guest.ls\", &ls{})\n}\n\nfunc (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.GuestFlag, ctx = newGuestFlag(ctx)\n\tcmd.GuestFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.simple, \"s\", false, \"Simple path only listing\") // sadly we used '-l' for guest login\n}\n\nfunc (cmd *ls) Process(ctx context.Context) error {\n\tif err := cmd.GuestFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *ls) Usage() string {\n\treturn \"PATH\"\n}\n\nfunc (cmd *ls) Description() string {\n\treturn `List PATH files in VM.\n\nExamples:\n  govc guest.ls -vm $name /tmp`\n}\n\nfunc (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error {\n\tm, err := cmd.FileManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar offset int32\n\ttw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0)\n\n\tfor {\n\t\tinfo, err := m.ListFiles(ctx, cmd.Auth(), f.Arg(0), offset, 0, f.Arg(1))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, f := range info.Files {\n\t\t\tif cmd.simple {\n\t\t\t\tfmt.Fprintln(tw, f.Path)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar kind byte\n\n\t\t\tswitch types.GuestFileType(f.Type) {\n\t\t\tcase types.GuestFileTypeDirectory:\n\t\t\t\tkind = 'd'\n\t\t\t\tif f.Size == 0 {\n\t\t\t\t\tf.Size = 4092\n\t\t\t\t}\n\t\t\tcase types.GuestFileTypeSymlink:\n\t\t\t\tkind = 'l'\n\t\t\tcase types.GuestFileTypeFile:\n\t\t\t\tkind = '-'\n\t\t\t}\n\n\t\t\tswitch x := f.Attributes.(type) {\n\t\t\tcase *types.GuestPosixFileAttributes:\n\t\t\t\tperm := os.FileMode(x.Permissions).Perm().String()[1:]\n\t\t\t\tfmt.Fprintf(tw, \"%c%s\\t%d\\t%d\\t\", kind, perm, *x.OwnerId, *x.GroupId)\n\t\t\t}\n\n\t\t\tattr := f.Attributes.GetGuestFileAttributes()\n\n\t\t\tfmt.Fprintf(tw, \"%s\\t%s\\t%s\\n\", units.FileSize(f.Size), attr.ModificationTime.Format(\"Jan 2 15:04 2006\"), f.Path)\n\t\t}\n\n\t\t_ = tw.Flush()\n\n\t\tif info.Remaining == 0 {\n\t\t\tbreak\n\t\t}\n\t\toffset += int32(len(info.Files))\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/guest/mkdir.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype mkdir struct {\n\t*GuestFlag\n\n\tcreateParents bool\n}\n\nfunc init() {\n\tcli.Register(\"guest.mkdir\", &mkdir{})\n}\n\nfunc (cmd *mkdir) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.GuestFlag, ctx = newGuestFlag(ctx)\n\tcmd.GuestFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.createParents, \"p\", false, \"Create intermediate directories as needed\")\n}\n\nfunc (cmd *mkdir) Process(ctx context.Context) error {\n\tif err := cmd.GuestFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *mkdir) Usage() string {\n\treturn \"PATH\"\n}\n\nfunc (cmd *mkdir) Description() string {\n\treturn `Create directory PATH in VM.\n\nExamples:\n  govc guest.mkdir -vm $name /tmp/logs\n  govc guest.mkdir -vm $name -p /tmp/logs/foo/bar`\n}\n\nfunc (cmd *mkdir) Run(ctx context.Context, f *flag.FlagSet) error {\n\tm, err := cmd.FileManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = m.MakeDirectory(ctx, cmd.Auth(), f.Arg(0), cmd.createParents)\n\n\t// ignore EEXIST if -p flag is given\n\tif err != nil && cmd.createParents {\n\t\tif soap.IsSoapFault(err) {\n\t\t\tsoapFault := soap.ToSoapFault(err)\n\t\t\tif _, ok := soapFault.VimFault().(types.FileAlreadyExists); ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/guest/mktemp.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n)\n\ntype mktemp struct {\n\t*GuestFlag\n\n\tdir    bool\n\tpath   string\n\tprefix string\n\tsuffix string\n}\n\nfunc init() {\n\tcli.Register(\"guest.mktemp\", &mktemp{})\n}\n\nfunc (cmd *mktemp) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.GuestFlag, ctx = newGuestFlag(ctx)\n\tcmd.GuestFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.dir, \"d\", false, \"Make a directory instead of a file\")\n\tf.StringVar(&cmd.path, \"p\", \"\", \"If specified, create relative to this directory\")\n\tf.StringVar(&cmd.prefix, \"t\", \"\", \"Prefix\")\n\tf.StringVar(&cmd.suffix, \"s\", \"\", \"Suffix\")\n}\n\nfunc (cmd *mktemp) Process(ctx context.Context) error {\n\tif err := cmd.GuestFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *mktemp) Description() string {\n\treturn `Create a temporary file or directory in VM.\n\nExamples:\n  govc guest.mktemp -vm $name\n  govc guest.mktemp -vm $name -d\n  govc guest.mktemp -vm $name -t myprefix\n  govc guest.mktemp -vm $name -p /var/tmp/$USER`\n}\n\nfunc (cmd *mktemp) Run(ctx context.Context, f *flag.FlagSet) error {\n\tm, err := cmd.FileManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmk := m.CreateTemporaryFile\n\tif cmd.dir {\n\t\tmk = m.CreateTemporaryDirectory\n\t}\n\n\tname, err := mk(ctx, cmd.Auth(), cmd.prefix, cmd.suffix, cmd.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println(name)\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/guest/mv.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype mv struct {\n\t*GuestFlag\n\n\tnoclobber bool\n}\n\nfunc init() {\n\tcli.Register(\"guest.mv\", &mv{})\n}\n\nfunc (cmd *mv) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.GuestFlag, ctx = newGuestFlag(ctx)\n\tcmd.GuestFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.noclobber, \"n\", false, \"Do not overwrite an existing file\")\n}\n\nfunc (cmd *mv) Process(ctx context.Context) error {\n\tif err := cmd.GuestFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *mv) Usage() string {\n\treturn \"SOURCE DEST\"\n}\n\nfunc (cmd *mv) Description() string {\n\treturn `Move (rename) files in VM.\n\nExamples:\n  govc guest.mv -vm $name /tmp/foo.sh /tmp/bar.sh\n  govc guest.mv -vm $name -n /tmp/baz.sh /tmp/bar.sh`\n}\n\nfunc (cmd *mv) Run(ctx context.Context, f *flag.FlagSet) error {\n\tm, err := cmd.FileManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsrc := f.Arg(0)\n\tdst := f.Arg(1)\n\n\terr = m.MoveFile(ctx, cmd.Auth(), src, dst, !cmd.noclobber)\n\n\tif err != nil {\n\t\tif soap.IsSoapFault(err) {\n\t\t\tsoapFault := soap.ToSoapFault(err)\n\t\t\tif _, ok := soapFault.VimFault().(types.NotAFile); ok {\n\t\t\t\terr = m.MoveDirectory(ctx, cmd.Auth(), src, dst)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/guest/ps.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n\t\"text/tabwriter\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype ps struct {\n\t*flags.OutputFlag\n\t*GuestFlag\n\n\tevery bool\n\texit  bool\n\twait  bool\n\n\tpids pidSelector\n\tuids uidSelector\n}\n\ntype pidSelector []int64\n\nfunc (s *pidSelector) String() string {\n\treturn fmt.Sprint(*s)\n}\n\nfunc (s *pidSelector) Set(value string) error {\n\tv, err := strconv.ParseInt(value, 0, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*s = append(*s, v)\n\treturn nil\n}\n\ntype uidSelector map[string]bool\n\nfunc (s uidSelector) String() string {\n\treturn \"\"\n}\n\nfunc (s uidSelector) Set(value string) error {\n\ts[value] = true\n\treturn nil\n}\n\nfunc init() {\n\tcli.Register(\"guest.ps\", &ps{})\n}\n\nfunc (cmd *ps) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tcmd.GuestFlag, ctx = newGuestFlag(ctx)\n\tcmd.GuestFlag.Register(ctx, f)\n\n\tcmd.uids = make(map[string]bool)\n\tf.BoolVar(&cmd.every, \"e\", false, \"Select all processes\")\n\tf.BoolVar(&cmd.exit, \"x\", false, \"Output exit time and code\")\n\tf.BoolVar(&cmd.wait, \"X\", false, \"Wait for process to exit\")\n\tf.Var(&cmd.pids, \"p\", \"Select by process ID\")\n\tf.Var(&cmd.uids, \"U\", \"Select by process UID\")\n}\n\nfunc (cmd *ps) Process(ctx context.Context) error {\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.GuestFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *ps) Description() string {\n\treturn `List processes in VM.\n\nBy default, unless the '-e', '-p' or '-U' flag is specified, only processes owned\nby the '-l' flag user are displayed.\n\nThe '-x' and '-X' flags only apply to processes started by vmware-tools,\nsuch as those started with the govc guest.start command.\n\nExamples:\n  govc guest.ps -vm $name\n  govc guest.ps -vm $name -e\n  govc guest.ps -vm $name -p 12345\n  govc guest.ps -vm $name -U root`\n}\n\nfunc running(procs []types.GuestProcessInfo) bool {\n\tfor _, p := range procs {\n\t\tif p.EndTime == nil {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (cmd *ps) list(ctx context.Context) ([]types.GuestProcessInfo, error) {\n\tm, err := cmd.ProcessManager()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tauth := cmd.Auth()\n\n\tfor {\n\t\tprocs, err := m.ListProcesses(ctx, auth, cmd.pids)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif cmd.wait && running(procs) {\n\t\t\t<-time.After(time.Millisecond * 250)\n\t\t\tcontinue\n\t\t}\n\n\t\treturn procs, nil\n\t}\n}\n\nfunc (cmd *ps) Run(ctx context.Context, f *flag.FlagSet) error {\n\tprocs, err := cmd.list(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr := &psResult{cmd, procs}\n\n\treturn cmd.WriteResult(r)\n}\n\ntype psResult struct {\n\tcmd         *ps\n\tProcessInfo []types.GuestProcessInfo\n}\n\nfunc (r *psResult) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\n\tfmt.Fprintf(tw, \"%s\\t%s\\t%s\", \"UID\", \"PID\", \"STIME\")\n\tif r.cmd.exit {\n\t\tfmt.Fprintf(tw, \"\\t%s\\t%s\", \"XTIME\", \"XCODE\")\n\t}\n\tfmt.Fprint(tw, \"\\tCMD\\n\")\n\n\tif len(r.cmd.pids) != 0 {\n\t\tr.cmd.every = true\n\t}\n\n\tif !r.cmd.every && len(r.cmd.uids) == 0 {\n\t\tr.cmd.uids[r.cmd.auth.Username] = true\n\t}\n\n\tfor _, p := range r.ProcessInfo {\n\t\tif r.cmd.every || r.cmd.uids[p.Owner] {\n\t\t\tfmt.Fprintf(tw, \"%s\\t%d\\t%s\", p.Owner, p.Pid, p.StartTime.Format(\"15:04\"))\n\t\t\tif r.cmd.exit {\n\t\t\t\tetime := \"-\"\n\t\t\t\tecode := \"-\"\n\t\t\t\tif p.EndTime != nil {\n\t\t\t\t\tetime = p.EndTime.Format(\"15:04\")\n\t\t\t\t\tecode = strconv.Itoa(int(p.ExitCode))\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(tw, \"\\t%s\\t%s\", etime, ecode)\n\t\t\t}\n\t\t\tfmt.Fprintf(tw, \"\\t%s\\n\", strings.TrimSpace(p.CmdLine))\n\t\t}\n\t}\n\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/guest/rm.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n)\n\ntype rm struct {\n\t*GuestFlag\n}\n\nfunc init() {\n\tcli.Register(\"guest.rm\", &rm{})\n}\n\nfunc (cmd *rm) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.GuestFlag, ctx = newGuestFlag(ctx)\n\tcmd.GuestFlag.Register(ctx, f)\n}\n\nfunc (cmd *rm) Process(ctx context.Context) error {\n\tif err := cmd.GuestFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *rm) Usage() string {\n\treturn \"PATH\"\n}\n\nfunc (cmd *rm) Description() string {\n\treturn `Remove file PATH in VM.\n\nExamples:\n  govc guest.rm -vm $name /tmp/foo.log`\n}\n\nfunc (cmd *rm) Run(ctx context.Context, f *flag.FlagSet) error {\n\tm, err := cmd.FileManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn m.DeleteFile(ctx, cmd.Auth(), f.Arg(0))\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/guest/rmdir.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n)\n\ntype rmdir struct {\n\t*GuestFlag\n\n\trecursive bool\n}\n\nfunc init() {\n\tcli.Register(\"guest.rmdir\", &rmdir{})\n}\n\nfunc (cmd *rmdir) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.GuestFlag, ctx = newGuestFlag(ctx)\n\tcmd.GuestFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.recursive, \"r\", false, \"Recursive removal\")\n}\n\nfunc (cmd *rmdir) Process(ctx context.Context) error {\n\tif err := cmd.GuestFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *rmdir) Usage() string {\n\treturn \"PATH\"\n}\n\nfunc (cmd *rmdir) Description() string {\n\treturn `Remove directory PATH in VM.\n\nExamples:\n  govc guest.rmdir -vm $name /tmp/empty-dir\n  govc guest.rmdir -vm $name -r /tmp/non-empty-dir`\n}\n\nfunc (cmd *rmdir) Run(ctx context.Context, f *flag.FlagSet) error {\n\tm, err := cmd.FileManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn m.DeleteDirectory(ctx, cmd.Auth(), f.Arg(0), cmd.recursive)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/guest/start.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype start struct {\n\t*GuestFlag\n\n\tdir  string\n\tvars env\n}\n\ntype env []string\n\nfunc (e *env) String() string {\n\treturn fmt.Sprint(*e)\n}\n\nfunc (e *env) Set(value string) error {\n\t*e = append(*e, value)\n\treturn nil\n}\n\nfunc init() {\n\tcli.Register(\"guest.start\", &start{})\n}\n\nfunc (cmd *start) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.GuestFlag, ctx = newGuestFlag(ctx)\n\tcmd.GuestFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.dir, \"C\", \"\", \"The absolute path of the working directory for the program to start\")\n\tf.Var(&cmd.vars, \"e\", \"Set environment variable (key=val)\")\n}\n\nfunc (cmd *start) Process(ctx context.Context) error {\n\tif err := cmd.GuestFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *start) Usage() string {\n\treturn \"PATH [ARG]...\"\n}\n\nfunc (cmd *start) Description() string {\n\treturn `Start program in VM.\n\nThe process can have its status queried with govc guest.ps.\nWhen the process completes, its exit code and end time will be available for 5 minutes after completion.\n\nExamples:\n  govc guest.start -vm $name /bin/mount /dev/hdb1 /data\n  pid=$(govc guest.start -vm $name /bin/long-running-thing)\n  govc guest.ps -vm $name -p $pid -X`\n}\n\nfunc (cmd *start) Run(ctx context.Context, f *flag.FlagSet) error {\n\tm, err := cmd.ProcessManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tspec := types.GuestProgramSpec{\n\t\tProgramPath:      f.Arg(0),\n\t\tArguments:        strings.Join(f.Args()[1:], \" \"),\n\t\tWorkingDirectory: cmd.dir,\n\t\tEnvVariables:     cmd.vars,\n\t}\n\n\tpid, err := m.StartProgram(ctx, cmd.Auth(), &spec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"%d\\n\", pid)\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/guest/tools.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype tools struct {\n\t*flags.ClientFlag\n\t*flags.SearchFlag\n\n\tmount   bool\n\tupgrade bool\n\toptions string\n\tunmount bool\n}\n\nfunc init() {\n\tcli.Register(\"vm.guest.tools\", &tools{})\n}\n\nfunc (cmd *tools) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.SearchFlag, ctx = flags.NewSearchFlag(ctx, flags.SearchVirtualMachines)\n\tcmd.SearchFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.mount, \"mount\", false, \"Mount tools CD installer in the guest\")\n\tf.BoolVar(&cmd.upgrade, \"upgrade\", false, \"Upgrade tools in the guest\")\n\tf.StringVar(&cmd.options, \"options\", \"\", \"Installer options\")\n\tf.BoolVar(&cmd.unmount, \"unmount\", false, \"Unmount tools CD installer in the guest\")\n}\n\nfunc (cmd *tools) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SearchFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *tools) Usage() string {\n\treturn \"VM...\"\n}\n\nfunc (cmd *tools) Description() string {\n\treturn `Manage guest tools in VM.\n\nExamples:\n  govc vm.guest.tools -mount VM\n  govc vm.guest.tools -unmount VM\n  govc vm.guest.tools -upgrade -options \"opt1 opt2\" VM`\n}\n\nfunc (cmd *tools) Upgrade(ctx context.Context, vm *object.VirtualMachine) error {\n\ttask, err := vm.UpgradeTools(ctx, cmd.options)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn task.Wait(ctx)\n}\n\nfunc (cmd *tools) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvms, err := cmd.VirtualMachines(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, vm := range vms {\n\t\tswitch {\n\t\tcase cmd.mount:\n\t\t\terr = vm.MountToolsInstaller(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase cmd.upgrade:\n\t\t\terr = cmd.Upgrade(ctx, vm)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase cmd.unmount:\n\t\t\terr = vm.UnmountToolsInstaller(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\treturn flag.ErrHelp\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/guest/touch.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"flag\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype touch struct {\n\t*GuestFlag\n\n\tnocreate bool\n\tatime    bool\n\tdate     string\n}\n\nfunc init() {\n\tcli.Register(\"guest.touch\", &touch{})\n}\n\nfunc (cmd *touch) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.GuestFlag, ctx = newGuestFlag(ctx)\n\tcmd.GuestFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.atime, \"a\", false, \"Change only the access time\")\n\tf.BoolVar(&cmd.nocreate, \"c\", false, \"Do not create any files\")\n\tf.StringVar(&cmd.date, \"d\", \"\", \"Use DATE instead of current time\")\n}\n\nfunc (cmd *touch) Process(ctx context.Context) error {\n\tif err := cmd.GuestFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *touch) Usage() string {\n\treturn \"FILE\"\n}\n\nfunc (cmd *touch) Description() string {\n\treturn `Change FILE times on VM.\n\nExamples:\n  govc guest.touch -vm $name /var/log/foo.log\n  govc guest.touch -vm $name -d \"$(date -d '1 day ago')\" /var/log/foo.log`\n}\n\nfunc (cmd *touch) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tm, err := cmd.FileManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tname := f.Arg(0)\n\n\tvar attr types.GuestFileAttributes\n\tnow := time.Now()\n\n\tif cmd.date != \"\" {\n\t\tnow, err = time.Parse(time.UnixDate, cmd.date)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif cmd.atime {\n\t\tattr.AccessTime = &now\n\t} else {\n\t\tattr.ModificationTime = &now\n\t}\n\n\terr = m.ChangeFileAttributes(ctx, cmd.Auth(), name, &attr)\n\tif err != nil && !cmd.nocreate && soap.IsSoapFault(err) {\n\t\tfault := soap.ToSoapFault(err)\n\t\tif _, ok := fault.VimFault().(types.FileNotFound); ok {\n\t\t\t// create a new empty file\n\t\t\turl, cerr := m.InitiateFileTransferToGuest(ctx, cmd.Auth(), name, &attr, 0, false)\n\t\t\tif cerr != nil {\n\t\t\t\treturn cerr\n\t\t\t}\n\n\t\t\tu, cerr := cmd.ParseURL(url)\n\t\t\tif cerr != nil {\n\t\t\t\treturn cerr\n\t\t\t}\n\n\t\t\tc, cerr := cmd.Client()\n\t\t\tif cerr != nil {\n\t\t\t\treturn cerr\n\t\t\t}\n\n\t\t\terr = c.Client.Upload(new(bytes.Buffer), u, &soap.DefaultUpload)\n\t\t\tif err == nil && cmd.date != \"\" {\n\t\t\t\terr = m.ChangeFileAttributes(ctx, cmd.Auth(), name, &attr)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/guest/upload.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"flag\"\n\t\"io\"\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n)\n\ntype upload struct {\n\t*GuestFlag\n\t*FileAttrFlag\n\n\toverwrite bool\n}\n\nfunc init() {\n\tcli.Register(\"guest.upload\", &upload{})\n}\n\nfunc (cmd *upload) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.GuestFlag, ctx = newGuestFlag(ctx)\n\tcmd.GuestFlag.Register(ctx, f)\n\tcmd.FileAttrFlag, ctx = newFileAttrFlag(ctx)\n\tcmd.FileAttrFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.overwrite, \"f\", false, \"If set, the guest destination file is clobbered\")\n}\n\nfunc (cmd *upload) Usage() string {\n\treturn \"SOURCE DEST\"\n}\n\nfunc (cmd *upload) Description() string {\n\treturn `Copy SOURCE from the local system to DEST in the guest VM.\n\nIf SOURCE name is \"-\", read source from stdin.\n\nExamples:\n  govc guest.upload -l user:pass -vm=my-vm ~/.ssh/id_rsa.pub /home/$USER/.ssh/authorized_keys\n  cowsay \"have a great day\" | govc guest.upload -l user:pass -vm=my-vm - /etc/motd`\n}\n\nfunc (cmd *upload) Process(ctx context.Context) error {\n\tif err := cmd.GuestFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.FileAttrFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *upload) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 2 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tm, err := cmd.FileManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsrc := f.Arg(0)\n\tdst := f.Arg(1)\n\n\tvar size int64\n\tvar buf *bytes.Buffer\n\n\tif src == \"-\" {\n\t\tbuf = new(bytes.Buffer)\n\t\tsize, err = io.Copy(buf, os.Stdin)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\ts, err := os.Stat(src)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsize = s.Size()\n\t}\n\n\turl, err := m.InitiateFileTransferToGuest(ctx, cmd.Auth(), dst, cmd.Attr(), size, cmd.overwrite)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tu, err := cmd.ParseURL(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tp := soap.DefaultUpload\n\n\tif buf != nil {\n\t\tp.ContentLength = size\n\t\treturn c.Client.Upload(buf, u, &p)\n\t}\n\n\tif cmd.OutputFlag.TTY {\n\t\tlogger := cmd.ProgressLogger(\"Uploading... \")\n\t\tp.Progress = logger\n\t\tdefer logger.Wait()\n\t}\n\n\treturn c.Client.UploadFile(src, u, nil)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/info.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vm\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\t\"text/tabwriter\"\n\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/find\"\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/property\"\n\n\t\"github.com/vmware/govmomi/units\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype info struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n\t*flags.SearchFlag\n\n\tWaitForIP       bool\n\tGeneral         bool\n\tExtraConfig     bool\n\tResources       bool\n\tToolsConfigInfo bool\n}\n\nfunc init() {\n\tcli.Register(\"vm.info\", &info{})\n}\n\nfunc (cmd *info) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tcmd.SearchFlag, ctx = flags.NewSearchFlag(ctx, flags.SearchVirtualMachines)\n\tcmd.SearchFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.WaitForIP, \"waitip\", false, \"Wait for VM to acquire IP address\")\n\tf.BoolVar(&cmd.General, \"g\", true, \"Show general summary\")\n\tf.BoolVar(&cmd.ExtraConfig, \"e\", false, \"Show ExtraConfig\")\n\tf.BoolVar(&cmd.Resources, \"r\", false, \"Show resource summary\")\n\tf.BoolVar(&cmd.ToolsConfigInfo, \"t\", false, \"Show ToolsConfigInfo\")\n}\n\nfunc (cmd *info) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SearchFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *info) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvms, err := cmd.VirtualMachines(f.Args())\n\tif err != nil {\n\t\tif _, ok := err.(*find.NotFoundError); ok {\n\t\t\t// Continue with empty VM slice\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\n\trefs := make([]types.ManagedObjectReference, 0, len(vms))\n\tfor _, vm := range vms {\n\t\trefs = append(refs, vm.Reference())\n\t}\n\n\tvar res infoResult\n\tvar props []string\n\n\tif cmd.OutputFlag.JSON {\n\t\tprops = nil // Load everything\n\t} else {\n\t\tprops = []string{\"summary\"} // Load summary\n\t\tif cmd.General {\n\t\t\tprops = append(props, \"guest.ipAddress\")\n\t\t}\n\t\tif cmd.ExtraConfig {\n\t\t\tprops = append(props, \"config.extraConfig\")\n\t\t}\n\t\tif cmd.Resources {\n\t\t\tprops = append(props, \"datastore\", \"network\")\n\t\t}\n\t\tif cmd.ToolsConfigInfo {\n\t\t\tprops = append(props, \"config.tools\")\n\t\t}\n\t}\n\n\tpc := property.DefaultCollector(c)\n\tif len(refs) != 0 {\n\t\terr = pc.Retrieve(ctx, refs, props, &res.VirtualMachines)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif cmd.WaitForIP {\n\t\tfor i, vm := range res.VirtualMachines {\n\t\t\tif vm.Guest == nil || vm.Guest.IpAddress == \"\" {\n\t\t\t\t_, err = vms[i].WaitForIP(ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t// Reload virtual machine object\n\t\t\t\terr = pc.RetrieveOne(ctx, vms[i].Reference(), props, &res.VirtualMachines[i])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif !cmd.OutputFlag.JSON {\n\t\tres.objects = vms\n\t\tres.cmd = cmd\n\t\tif err = res.collectReferences(pc, ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn cmd.WriteResult(&res)\n}\n\ntype infoResult struct {\n\tVirtualMachines []mo.VirtualMachine\n\tobjects         []*object.VirtualMachine\n\tentities        map[types.ManagedObjectReference]string\n\tcmd             *info\n}\n\n// collectReferences builds a unique set of MORs to the set of VirtualMachines,\n// so we can collect properties in a single call for each reference type {host,datastore,network}.\nfunc (r *infoResult) collectReferences(pc *property.Collector, ctx context.Context) error {\n\tr.entities = make(map[types.ManagedObjectReference]string) // MOR -> Name map\n\n\tvar host []mo.HostSystem\n\tvar network []mo.Network\n\tvar opaque []mo.OpaqueNetwork\n\tvar dvp []mo.DistributedVirtualPortgroup\n\tvar datastore []mo.Datastore\n\t// Table to drive inflating refs to their mo.* counterparts (dest)\n\t// and save() the Name to r.entities w/o using reflection here.\n\t// Note that we cannot use a []mo.ManagedEntity here, since mo.Network has its own 'Name' field,\n\t// the mo.Network.ManagedEntity.Name field will not be set.\n\tvrefs := map[string]*struct {\n\t\tdest interface{}\n\t\trefs []types.ManagedObjectReference\n\t\tsave func()\n\t}{\n\t\t\"HostSystem\": {\n\t\t\t&host, nil, func() {\n\t\t\t\tfor _, e := range host {\n\t\t\t\t\tr.entities[e.Reference()] = e.Name\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"Network\": {\n\t\t\t&network, nil, func() {\n\t\t\t\tfor _, e := range network {\n\t\t\t\t\tr.entities[e.Reference()] = e.Name\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"OpaqueNetwork\": {\n\t\t\t&opaque, nil, func() {\n\t\t\t\tfor _, e := range opaque {\n\t\t\t\t\tr.entities[e.Reference()] = e.Name\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"DistributedVirtualPortgroup\": {\n\t\t\t&dvp, nil, func() {\n\t\t\t\tfor _, e := range dvp {\n\t\t\t\t\tr.entities[e.Reference()] = e.Name\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"Datastore\": {\n\t\t\t&datastore, nil, func() {\n\t\t\t\tfor _, e := range datastore {\n\t\t\t\t\tr.entities[e.Reference()] = e.Name\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\txrefs := make(map[types.ManagedObjectReference]bool)\n\t// Add MOR to vrefs[kind].refs avoiding any duplicates.\n\taddRef := func(refs ...types.ManagedObjectReference) {\n\t\tfor _, ref := range refs {\n\t\t\tif _, exists := xrefs[ref]; exists {\n\t\t\t\treturn\n\t\t\t}\n\t\t\txrefs[ref] = true\n\t\t\tvref := vrefs[ref.Type]\n\t\t\tvref.refs = append(vref.refs, ref)\n\t\t}\n\t}\n\n\tfor _, vm := range r.VirtualMachines {\n\t\tif r.cmd.General {\n\t\t\tif ref := vm.Summary.Runtime.Host; ref != nil {\n\t\t\t\taddRef(*ref)\n\t\t\t}\n\t\t}\n\n\t\tif r.cmd.Resources {\n\t\t\taddRef(vm.Datastore...)\n\t\t\taddRef(vm.Network...)\n\t\t}\n\t}\n\n\tfor _, vref := range vrefs {\n\t\tif vref.refs == nil {\n\t\t\tcontinue\n\t\t}\n\t\terr := pc.Retrieve(ctx, vref.refs, []string{\"name\"}, vref.dest)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvref.save()\n\t}\n\n\treturn nil\n}\n\nfunc (r *infoResult) entityNames(refs []types.ManagedObjectReference) string {\n\tvar names []string\n\tfor _, ref := range refs {\n\t\tnames = append(names, r.entities[ref])\n\t}\n\treturn strings.Join(names, \", \")\n}\n\nfunc (r *infoResult) Write(w io.Writer) error {\n\t// Maintain order via r.objects as Property collector does not always return results in order.\n\tobjects := make(map[types.ManagedObjectReference]mo.VirtualMachine, len(r.VirtualMachines))\n\tfor _, o := range r.VirtualMachines {\n\t\tobjects[o.Reference()] = o\n\t}\n\n\ttw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0)\n\n\tfor _, o := range r.objects {\n\t\tvm := objects[o.Reference()]\n\t\ts := vm.Summary\n\n\t\tfmt.Fprintf(tw, \"Name:\\t%s\\n\", s.Config.Name)\n\n\t\tif r.cmd.General {\n\t\t\thostName := \"<unavailable>\"\n\n\t\t\tif href := vm.Summary.Runtime.Host; href != nil {\n\t\t\t\tif name, ok := r.entities[*href]; ok {\n\t\t\t\t\thostName = name\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfmt.Fprintf(tw, \"  Path:\\t%s\\n\", o.InventoryPath)\n\t\t\tfmt.Fprintf(tw, \"  UUID:\\t%s\\n\", s.Config.Uuid)\n\t\t\tfmt.Fprintf(tw, \"  Guest name:\\t%s\\n\", s.Config.GuestFullName)\n\t\t\tfmt.Fprintf(tw, \"  Memory:\\t%dMB\\n\", s.Config.MemorySizeMB)\n\t\t\tfmt.Fprintf(tw, \"  CPU:\\t%d vCPU(s)\\n\", s.Config.NumCpu)\n\t\t\tfmt.Fprintf(tw, \"  Power state:\\t%s\\n\", s.Runtime.PowerState)\n\t\t\tfmt.Fprintf(tw, \"  Boot time:\\t%s\\n\", s.Runtime.BootTime)\n\t\t\tfmt.Fprintf(tw, \"  IP address:\\t%s\\n\", s.Guest.IpAddress)\n\t\t\tfmt.Fprintf(tw, \"  Host:\\t%s\\n\", hostName)\n\t\t}\n\n\t\tif r.cmd.Resources {\n\t\t\tif s.Storage == nil {\n\t\t\t\ts.Storage = new(types.VirtualMachineStorageSummary)\n\t\t\t}\n\t\t\tfmt.Fprintf(tw, \"  CPU usage:\\t%dMHz\\n\", s.QuickStats.OverallCpuUsage)\n\t\t\tfmt.Fprintf(tw, \"  Host memory usage:\\t%dMB\\n\", s.QuickStats.HostMemoryUsage)\n\t\t\tfmt.Fprintf(tw, \"  Guest memory usage:\\t%dMB\\n\", s.QuickStats.GuestMemoryUsage)\n\t\t\tfmt.Fprintf(tw, \"  Storage uncommitted:\\t%s\\n\", units.ByteSize(s.Storage.Uncommitted))\n\t\t\tfmt.Fprintf(tw, \"  Storage committed:\\t%s\\n\", units.ByteSize(s.Storage.Committed))\n\t\t\tfmt.Fprintf(tw, \"  Storage unshared:\\t%s\\n\", units.ByteSize(s.Storage.Unshared))\n\t\t\tfmt.Fprintf(tw, \"  Storage:\\t%s\\n\", r.entityNames(vm.Datastore))\n\t\t\tfmt.Fprintf(tw, \"  Network:\\t%s\\n\", r.entityNames(vm.Network))\n\t\t}\n\n\t\tif r.cmd.ExtraConfig {\n\t\t\tfmt.Fprintf(tw, \"  ExtraConfig:\\n\")\n\t\t\tfor _, v := range vm.Config.ExtraConfig {\n\t\t\t\tfmt.Fprintf(tw, \"    %s:\\t%s\\n\", v.GetOptionValue().Key, v.GetOptionValue().Value)\n\t\t\t}\n\t\t}\n\n\t\tif r.cmd.ToolsConfigInfo {\n\t\t\tt := vm.Config.Tools\n\t\t\tfmt.Fprintf(tw, \"  ToolsConfigInfo:\\n\")\n\t\t\tfmt.Fprintf(tw, \"    ToolsVersion:\\t%d\\n\", t.ToolsVersion)\n\t\t\tfmt.Fprintf(tw, \"    AfterPowerOn:\\t%s\\n\", flags.NewOptionalBool(&t.AfterPowerOn).String())\n\t\t\tfmt.Fprintf(tw, \"    AfterResume:\\t%s\\n\", flags.NewOptionalBool(&t.AfterResume).String())\n\t\t\tfmt.Fprintf(tw, \"    BeforeGuestStandby:\\t%s\\n\", flags.NewOptionalBool(&t.BeforeGuestStandby).String())\n\t\t\tfmt.Fprintf(tw, \"    BeforeGuestShutdown:\\t%s\\n\", flags.NewOptionalBool(&t.BeforeGuestShutdown).String())\n\t\t\tfmt.Fprintf(tw, \"    BeforeGuestReboot:\\t%s\\n\", flags.NewOptionalBool(&t.BeforeGuestReboot).String())\n\t\t\tfmt.Fprintf(tw, \"    ToolsUpgradePolicy:\\t%s\\n\", t.ToolsUpgradePolicy)\n\t\t\tfmt.Fprintf(tw, \"    PendingCustomization:\\t%s\\n\", t.PendingCustomization)\n\t\t\tfmt.Fprintf(tw, \"    SyncTimeWithHost:\\t%s\\n\", flags.NewOptionalBool(&t.SyncTimeWithHost).String())\n\t\t}\n\t}\n\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/ip.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vm\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/govc/host/esxcli\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype ip struct {\n\t*flags.OutputFlag\n\t*flags.SearchFlag\n\n\tesx  bool\n\tall  bool\n\tv4   bool\n\twait time.Duration\n\tnic  string\n}\n\nfunc init() {\n\tcli.Register(\"vm.ip\", &ip{})\n}\n\nfunc (cmd *ip) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n\n\tcmd.SearchFlag, ctx = flags.NewSearchFlag(ctx, flags.SearchVirtualMachines)\n\tcmd.SearchFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.esx, \"esxcli\", false, \"Use esxcli instead of guest tools\")\n\tf.BoolVar(&cmd.all, \"a\", false, \"Wait for an IP address on all NICs\")\n\tf.StringVar(&cmd.nic, \"n\", \"\", \"Wait for IP address on NIC, specified by device name or MAC\")\n\tf.BoolVar(&cmd.v4, \"v4\", false, \"Only report IPv4 addresses\")\n\tf.DurationVar(&cmd.wait, \"wait\", time.Hour, \"Wait time for the VM obtain an IP address\")\n}\n\nfunc (cmd *ip) Usage() string {\n\treturn \"VM...\"\n}\n\nfunc (cmd *ip) Description() string {\n\treturn `List IPs for VM.\n\nBy default the vm.ip command depends on vmware-tools to report the 'guest.ipAddress' field and will\nwait until it has done so.  This value can also be obtained using:\n\n  govc vm.info -json $vm | jq -r .VirtualMachines[].Guest.IpAddress\n\nWhen given the '-a' flag, only IP addresses for which there is a corresponding virtual nic are listed.\nIf there are multiple nics, the listed addresses will be comma delimited.  The '-a' flag depends on\nvmware-tools to report the 'guest.net' field and will wait until it has done so for all nics.\nNote that this list includes IPv6 addresses if any, use '-v4' to filter them out.  IP addresses reported\nby tools for which there is no virtual nic are not included, for example that of the 'docker0' interface.\n\nThese values can also be obtained using:\n\n  govc vm.info -json $vm | jq -r .VirtualMachines[].Guest.Net[].IpConfig.IpAddress[].IpAddress\n\nWhen given the '-n' flag, filters '-a' behavior to the nic specified by MAC address or device name.\n\nThe 'esxcli' flag does not require vmware-tools to be installed, but does require the ESX host to\nhave the /Net/GuestIPHack setting enabled.\n\nThe 'wait' flag default to 1hr (original default was infinite).  If a VM does not obtain an IP within\nthe wait time, the command will still exit with status 0.\n\nExamples:\n  govc vm.ip $vm\n  govc vm.ip -wait 5m $vm\n  govc vm.ip -a -v4 $vm\n  govc vm.ip -n 00:0c:29:57:7b:c3 $vm\n  govc vm.ip -n ethernet-0 $vm\n  govc host.esxcli system settings advanced set -o /Net/GuestIPHack -i 1\n  govc vm.ip -esxcli $vm`\n}\n\nfunc (cmd *ip) Process(ctx context.Context) error {\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SearchFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *ip) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvms, err := cmd.VirtualMachines(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar get func(*object.VirtualMachine, context.Context) (string, error)\n\n\tif cmd.esx {\n\t\tget = func(vm *object.VirtualMachine, deadline context.Context) (string, error) {\n\t\t\tguest := esxcli.NewGuestInfo(c)\n\n\t\t\tticker := time.NewTicker(time.Millisecond * 500)\n\t\t\tdefer ticker.Stop()\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-ticker.C:\n\t\t\t\t\tip, err := guest.IpAddress(vm)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn \"\", err\n\t\t\t\t\t}\n\n\t\t\t\t\tif ip != \"0.0.0.0\" {\n\t\t\t\t\t\treturn ip, nil\n\t\t\t\t\t}\n\t\t\t\tcase <-deadline.Done():\n\t\t\t\t\treturn \"\", nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tvar hwaddr []string\n\t\tif cmd.nic != \"\" {\n\t\t\thwaddr = strings.Split(cmd.nic, \",\")\n\t\t}\n\n\t\tget = func(vm *object.VirtualMachine, deadline context.Context) (string, error) {\n\t\t\tif cmd.all || hwaddr != nil {\n\t\t\t\tmacs, err := vm.WaitForNetIP(deadline, cmd.v4, hwaddr...)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\n\t\t\t\tvar ips []string\n\t\t\t\tfor _, addrs := range macs {\n\t\t\t\t\tfor _, ip := range addrs {\n\t\t\t\t\t\tips = append(ips, ip)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn strings.Join(ips, \",\"), nil\n\t\t\t}\n\t\t\treturn vm.WaitForIP(deadline)\n\t\t}\n\t}\n\n\tfor _, vm := range vms {\n\t\tdeadline, cancel := context.WithDeadline(ctx, time.Now().Add(cmd.wait))\n\n\t\tip, err := get(vm, deadline)\n\t\tif err != nil {\n\t\t\tif deadline.Err() != context.DeadlineExceeded {\n\t\t\t\tcancel()\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tcancel()\n\n\t\tif ip == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t// TODO(PN): Display inventory path to VM\n\t\tfmt.Fprintf(cmd, \"%s\\n\", ip)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/markastemplate.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vm\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype markastemplate struct {\n\t*flags.SearchFlag\n}\n\nfunc init() {\n\tcli.Register(\"vm.markastemplate\", &markastemplate{})\n}\n\nfunc (cmd *markastemplate) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.SearchFlag, ctx = flags.NewSearchFlag(ctx, flags.SearchVirtualMachines)\n\tcmd.SearchFlag.Register(ctx, f)\n}\n\nfunc (cmd *markastemplate) Process(ctx context.Context) error {\n\tif err := cmd.SearchFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *markastemplate) Usage() string {\n\treturn \"VM...\"\n}\n\nfunc (cmd *markastemplate) Description() string {\n\treturn `Mark VM as a virtual machine template.\n\nExamples:\n  govc vm.markastemplate $name`\n}\n\nfunc (cmd *markastemplate) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvms, err := cmd.VirtualMachines(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, vm := range vms {\n\t\terr := vm.MarkAsTemplate(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/markasvm.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vm\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype markasvm struct {\n\t*flags.SearchFlag\n\t*flags.ResourcePoolFlag\n\t*flags.HostSystemFlag\n}\n\nfunc init() {\n\tcli.Register(\"vm.markasvm\", &markasvm{})\n}\n\nfunc (cmd *markasvm) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.SearchFlag, ctx = flags.NewSearchFlag(ctx, flags.SearchVirtualMachines)\n\tcmd.SearchFlag.Register(ctx, f)\n\tcmd.ResourcePoolFlag, ctx = flags.NewResourcePoolFlag(ctx)\n\tcmd.ResourcePoolFlag.Register(ctx, f)\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n}\n\nfunc (cmd *markasvm) Process(ctx context.Context) error {\n\tif err := cmd.SearchFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.ResourcePoolFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *markasvm) Usage() string {\n\treturn \"VM...\"\n}\n\nfunc (cmd *markasvm) Description() string {\n\treturn `Mark VM template as a virtual machine.\n\nExamples:\n  govc vm.markasvm $name -host host1\n  govc vm.markasvm $name -pool cluster1/Resources`\n}\n\nfunc (cmd *markasvm) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvms, err := cmd.VirtualMachines(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpool, err := cmd.ResourcePoolIfSpecified()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thost, err := cmd.HostSystemFlag.HostSystemIfSpecified()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif pool == nil {\n\t\tif host == nil {\n\t\t\treturn flag.ErrHelp\n\t\t}\n\n\t\tpool, err = host.ResourcePool(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor _, vm := range vms {\n\t\terr := vm.MarkAsVirtualMachine(ctx, *pool, host)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/migrate.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vm\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype migrate struct {\n\t*flags.ResourcePoolFlag\n\t*flags.HostSystemFlag\n\t*flags.DatastoreFlag\n\t*flags.SearchFlag\n\n\tpriority types.VirtualMachineMovePriority\n\tspec     types.VirtualMachineRelocateSpec\n}\n\nfunc init() {\n\tcli.Register(\"vm.migrate\", &migrate{})\n}\n\nfunc (cmd *migrate) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.SearchFlag, ctx = flags.NewSearchFlag(ctx, flags.SearchVirtualMachines)\n\tcmd.SearchFlag.Register(ctx, f)\n\n\tcmd.ResourcePoolFlag, ctx = flags.NewResourcePoolFlag(ctx)\n\tcmd.ResourcePoolFlag.Register(ctx, f)\n\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\n\tf.StringVar((*string)(&cmd.priority), \"priority\", string(types.VirtualMachineMovePriorityDefaultPriority), \"The task priority\")\n}\n\nfunc (cmd *migrate) Process(ctx context.Context) error {\n\tif err := cmd.ResourcePoolFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *migrate) Usage() string {\n\treturn \"VM...\"\n}\n\nfunc (cmd *migrate) Description() string {\n\treturn `Migrates VM to a specific resource pool, host or datastore.\n\nExamples:\n  govc vm.migrate -host another-host vm-1 vm-2 vm-3\n  govc vm.migrate -ds another-ds vm-1 vm-2 vm-3`\n}\n\nfunc (cmd *migrate) relocate(ctx context.Context, vm *object.VirtualMachine) error {\n\ttask, err := vm.Relocate(ctx, cmd.spec, cmd.priority)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger := cmd.DatastoreFlag.ProgressLogger(fmt.Sprintf(\"migrating %s... \", vm.Reference()))\n\t_, err = task.WaitForResult(ctx, logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Wait()\n\n\treturn nil\n}\n\nfunc (cmd *migrate) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvms, err := cmd.VirtualMachines(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thost, err := cmd.HostSystemFlag.HostSystemIfSpecified()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif host != nil {\n\t\tref := host.Reference()\n\t\tcmd.spec.Host = &ref\n\t}\n\n\tpool, err := cmd.ResourcePoolFlag.ResourcePoolIfSpecified()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif pool != nil {\n\t\tref := pool.Reference()\n\t\tcmd.spec.Pool = &ref\n\t}\n\n\tds, err := cmd.DatastoreFlag.DatastoreIfSpecified()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif ds != nil {\n\t\tref := ds.Reference()\n\t\tcmd.spec.Datastore = &ref\n\t}\n\n\tfor _, vm := range vms {\n\t\terr = cmd.relocate(ctx, vm)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/network/add.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage network\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype add struct {\n\t*flags.VirtualMachineFlag\n\t*flags.NetworkFlag\n}\n\nfunc init() {\n\tcli.Register(\"vm.network.add\", &add{})\n}\n\nfunc (cmd *add) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\tcmd.NetworkFlag, ctx = flags.NewNetworkFlag(ctx)\n\tcmd.NetworkFlag.Register(ctx, f)\n}\n\nfunc (cmd *add) Description() string {\n\treturn `Add network adapter to VM.\n\nExamples:\n  govc vm.network.add -vm $vm -net \"VM Network\" -net.adapter e1000e\n  govc device.info -vm $vm ethernet-*`\n}\n\nfunc (cmd *add) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.NetworkFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *add) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachineFlag.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn errors.New(\"please specify a vm\")\n\t}\n\n\t// Set network if specified as extra argument.\n\tif f.NArg() > 0 {\n\t\t_ = cmd.NetworkFlag.Set(f.Arg(0))\n\t}\n\n\tnet, err := cmd.NetworkFlag.Device()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn vm.AddDevice(ctx, net)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/network/change.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage network\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype change struct {\n\t*flags.VirtualMachineFlag\n\t*flags.NetworkFlag\n}\n\nfunc init() {\n\tcli.Register(\"vm.network.change\", &change{})\n}\n\nfunc (cmd *change) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\tcmd.NetworkFlag, ctx = flags.NewNetworkFlag(ctx)\n\tcmd.NetworkFlag.Register(ctx, f)\n}\n\nfunc (cmd *change) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.NetworkFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *change) Usage() string {\n\treturn \"DEVICE\"\n}\n\nfunc (cmd *change) Description() string {\n\treturn `Change network DEVICE configuration.\n\nExamples:\n  govc vm.network.change -vm $vm -net PG2 ethernet-0\n  govc vm.network.change -vm $vm -net.address 00:00:0f:2e:5d:69 ethernet-0\n  govc device.info -vm $vm ethernet-*`\n}\n\nfunc (cmd *change) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachineFlag.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn errors.New(\"please specify a vm\")\n\t}\n\n\tname := f.Arg(0)\n\n\tif name == \"\" {\n\t\treturn errors.New(\"please specify a device name\")\n\t}\n\n\t// Set network if specified as extra argument.\n\tif f.NArg() > 1 {\n\t\t_ = cmd.NetworkFlag.Set(f.Arg(1))\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnet := devices.Find(name)\n\n\tif net == nil {\n\t\treturn fmt.Errorf(\"device '%s' not found\", name)\n\t}\n\n\tdev, err := cmd.NetworkFlag.Device()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcurrent := net.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard()\n\tchanged := dev.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard()\n\n\tcurrent.Backing = changed.Backing\n\n\tif changed.MacAddress != \"\" {\n\t\tcurrent.MacAddress = changed.MacAddress\n\t}\n\n\tif changed.AddressType != \"\" {\n\t\tcurrent.AddressType = changed.AddressType\n\t}\n\n\treturn vm.EditDevice(ctx, net)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/power.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vm\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype power struct {\n\t*flags.ClientFlag\n\t*flags.SearchFlag\n\n\tOn       bool\n\tOff      bool\n\tReset    bool\n\tReboot   bool\n\tShutdown bool\n\tSuspend  bool\n\tForce    bool\n}\n\nfunc init() {\n\tcli.Register(\"vm.power\", &power{})\n}\n\nfunc (cmd *power) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.SearchFlag, ctx = flags.NewSearchFlag(ctx, flags.SearchVirtualMachines)\n\tcmd.SearchFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.On, \"on\", false, \"Power on\")\n\tf.BoolVar(&cmd.Off, \"off\", false, \"Power off\")\n\tf.BoolVar(&cmd.Reset, \"reset\", false, \"Power reset\")\n\tf.BoolVar(&cmd.Suspend, \"suspend\", false, \"Power suspend\")\n\tf.BoolVar(&cmd.Reboot, \"r\", false, \"Reboot guest\")\n\tf.BoolVar(&cmd.Shutdown, \"s\", false, \"Shutdown guest\")\n\tf.BoolVar(&cmd.Force, \"force\", false, \"Force (ignore state error and hard shutdown/reboot if tools unavailable)\")\n}\n\nfunc (cmd *power) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SearchFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\topts := []bool{cmd.On, cmd.Off, cmd.Reset, cmd.Suspend, cmd.Reboot, cmd.Shutdown}\n\tselected := false\n\n\tfor _, opt := range opts {\n\t\tif opt {\n\t\t\tif selected {\n\t\t\t\treturn flag.ErrHelp\n\t\t\t}\n\t\t\tselected = opt\n\t\t}\n\t}\n\n\tif !selected {\n\t\treturn flag.ErrHelp\n\t}\n\n\treturn nil\n}\n\nfunc isToolsUnavailable(err error) bool {\n\tif soap.IsSoapFault(err) {\n\t\tsoapFault := soap.ToSoapFault(err)\n\t\tif _, ok := soapFault.VimFault().(types.ToolsUnavailable); ok {\n\t\t\treturn ok\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (cmd *power) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvms, err := cmd.VirtualMachines(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, vm := range vms {\n\t\tvar task *object.Task\n\n\t\tswitch {\n\t\tcase cmd.On:\n\t\t\tfmt.Fprintf(cmd, \"Powering on %s... \", vm.Reference())\n\t\t\ttask, err = vm.PowerOn(ctx)\n\t\tcase cmd.Off:\n\t\t\tfmt.Fprintf(cmd, \"Powering off %s... \", vm.Reference())\n\t\t\ttask, err = vm.PowerOff(ctx)\n\t\tcase cmd.Reset:\n\t\t\tfmt.Fprintf(cmd, \"Reset %s... \", vm.Reference())\n\t\t\ttask, err = vm.Reset(ctx)\n\t\tcase cmd.Suspend:\n\t\t\tfmt.Fprintf(cmd, \"Suspend %s... \", vm.Reference())\n\t\t\ttask, err = vm.Suspend(ctx)\n\t\tcase cmd.Reboot:\n\t\t\tfmt.Fprintf(cmd, \"Reboot guest %s... \", vm.Reference())\n\t\t\terr = vm.RebootGuest(ctx)\n\n\t\t\tif err != nil && cmd.Force && isToolsUnavailable(err) {\n\t\t\t\ttask, err = vm.Reset(ctx)\n\t\t\t}\n\t\tcase cmd.Shutdown:\n\t\t\tfmt.Fprintf(cmd, \"Shutdown guest %s... \", vm.Reference())\n\t\t\terr = vm.ShutdownGuest(ctx)\n\n\t\t\tif err != nil && cmd.Force && isToolsUnavailable(err) {\n\t\t\t\ttask, err = vm.PowerOff(ctx)\n\t\t\t}\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif task != nil {\n\t\t\terr = task.Wait(ctx)\n\t\t}\n\t\tif err == nil {\n\t\t\tfmt.Fprintf(cmd, \"OK\\n\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif cmd.Force {\n\t\t\tfmt.Fprintf(cmd, \"Error: %s\\n\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\treturn err\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/question.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vm\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype question struct {\n\t*flags.VirtualMachineFlag\n\n\tanswer string\n}\n\nfunc init() {\n\tcli.Register(\"vm.question\", &question{})\n}\n\nfunc (cmd *question) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.answer, \"answer\", \"\", \"Answer to question\")\n}\n\nfunc (cmd *question) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *question) Run(ctx context.Context, f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn errors.New(\"No VM specified\")\n\t}\n\n\tvar mvm mo.VirtualMachine\n\n\tpc := property.DefaultCollector(c)\n\terr = pc.RetrieveOne(ctx, vm.Reference(), []string{\"runtime.question\"}, &mvm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tq := mvm.Runtime.Question\n\tif q == nil {\n\t\tfmt.Printf(\"No pending question\\n\")\n\t\treturn nil\n\t}\n\n\t// Print question if no answer is specified\n\tif cmd.answer == \"\" {\n\t\tfmt.Printf(\"Question:\\n%s\\n\\n\", q.Text)\n\t\tfmt.Printf(\"Possible answers:\\n\")\n\t\tfor _, e := range q.Choice.ChoiceInfo {\n\t\t\ted := e.(*types.ElementDescription)\n\t\t\tfmt.Printf(\"%s) %s\\n\", ed.Key, ed.Description.Label)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Answer question\n\treturn vm.Answer(ctx, q.Id, cmd.answer)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/rdm/attach.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage rdm\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype attach struct {\n\t*flags.VirtualMachineFlag\n\n\tdevice string\n}\n\nfunc init() {\n\tcli.Register(\"vm.rdm.attach\", &attach{})\n}\n\nfunc (cmd *attach) Register(ctx context.Context, f *flag.FlagSet) {\n\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.device, \"device\", \"\", \"Device Name\")\n}\n\nfunc (cmd *attach) Description() string {\n\treturn `Attach DEVICE to VM with RDM.\n\nExamples:\n  govc vm.rdm.attach -vm VM -device /vmfs/devices/disks/naa.000000000000000000000000000000000`\n}\n\nfunc (cmd *attach) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n//This piece of code was developped mainly thanks to the project govmax on github.com\n//This file in particular https://github.com/codedellemc/govmax/blob/master/api/v1/vmomi.go\nfunc (cmd *attach) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tdevices, err := vm.Device(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontroller, err := devices.FindSCSIController(\"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvmConfigOptions, err := vm.QueryConfigTarget(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, scsiDisk := range vmConfigOptions.ScsiDisk {\n\t\tif !strings.Contains(scsiDisk.Disk.CanonicalName, cmd.device) {\n\t\t\tcontinue\n\t\t}\n\t\tvar backing types.VirtualDiskRawDiskMappingVer1BackingInfo\n\t\tbacking.CompatibilityMode = string(types.VirtualDiskCompatibilityModePhysicalMode)\n\t\tbacking.DeviceName = scsiDisk.Disk.DeviceName\n\t\tfor _, descriptor := range scsiDisk.Disk.Descriptor {\n\t\t\tif strings.HasPrefix(descriptor.Id, \"vml.\") {\n\t\t\t\tbacking.LunUuid = descriptor.Id\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tvar device types.VirtualDisk\n\t\tdevice.Backing = &backing\n\t\tdevice.ControllerKey = controller.VirtualController.Key\n\n\t\tvar unitNumber *int32\n\t\tscsiCtrlUnitNumber := controller.VirtualController.UnitNumber\n\t\tvar u int32\n\t\tfor u = 0; u < 16; u++ {\n\t\t\tfree := true\n\t\t\tfor _, d := range devices {\n\t\t\t\tif d.GetVirtualDevice().ControllerKey == device.GetVirtualDevice().ControllerKey {\n\t\t\t\t\tif u == *(d.GetVirtualDevice().UnitNumber) || u == *scsiCtrlUnitNumber {\n\t\t\t\t\t\tfree = false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif free {\n\t\t\t\tunitNumber = &u\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tdevice.UnitNumber = unitNumber\n\n\t\tspec := types.VirtualMachineConfigSpec{}\n\n\t\tconfig := &types.VirtualDeviceConfigSpec{\n\t\t\tDevice:    &device,\n\t\t\tOperation: types.VirtualDeviceConfigSpecOperationAdd,\n\t\t}\n\n\t\tconfig.FileOperation = types.VirtualDeviceConfigSpecFileOperationCreate\n\n\t\tspec.DeviceChange = append(spec.DeviceChange, config)\n\n\t\ttask, err := vm.Reconfigure(ctx, spec)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = task.Wait(ctx)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error adding device %+v \\n with backing %+v \\nLogged Item:  %s\", device, backing, err)\n\t\t}\n\t\treturn nil\n\n\t}\n\treturn fmt.Errorf(\"Error: No LUN with device name containing %s found\", cmd.device)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/rdm/ls.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage rdm\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype ls struct {\n\t*flags.VirtualMachineFlag\n\t*flags.OutputFlag\n}\n\nfunc init() {\n\tcli.Register(\"vm.rdm.ls\", &ls{})\n}\n\nfunc (cmd *ls) Register(ctx context.Context, f *flag.FlagSet) {\n\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\tcmd.OutputFlag, ctx = flags.NewOutputFlag(ctx)\n\tcmd.OutputFlag.Register(ctx, f)\n}\n\nfunc (cmd *ls) Description() string {\n\treturn `List available devices that could be attach to VM with RDM.\n\nExamples:\n  govc vm.rdm.ls -vm VM`\n}\n\nfunc (cmd *ls) Process(ctx context.Context) error {\n\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.OutputFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *ls) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tvmConfigOptions, err := vm.QueryConfigTarget(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tres := infoResult{\n\t\tDisks: vmConfigOptions.ScsiDisk,\n\t}\n\treturn cmd.WriteResult(&res)\n}\n\ntype infoResult struct {\n\tDisks []types.VirtualMachineScsiDiskDeviceInfo\n}\n\nfunc (r *infoResult) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0)\n\tfor _, disk := range r.Disks {\n\t\tfmt.Fprintf(tw, \"Name:\\t%s\\n\", disk.Name)\n\t\tfmt.Fprintf(tw, \"  Device name:\\t%s\\n\", disk.Disk.DeviceName)\n\t\tfmt.Fprintf(tw, \"  Device path:\\t%s\\n\", disk.Disk.DevicePath)\n\t\tfmt.Fprintf(tw, \"  Canonical Name:\\t%s\\n\", disk.Disk.CanonicalName)\n\n\t\tvar uids []string\n\t\tfor _, descriptor := range disk.Disk.Descriptor {\n\t\t\tuids = append(uids, descriptor.Id)\n\t\t}\n\n\t\tfmt.Fprintf(tw, \"  UIDS:\\t%s\\n\", strings.Join(uids, \" ,\"))\n\t}\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/register.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vm\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25\"\n)\n\ntype register struct {\n\t*flags.ClientFlag\n\t*flags.DatacenterFlag\n\t*flags.DatastoreFlag\n\t*flags.ResourcePoolFlag\n\t*flags.HostSystemFlag\n\t*flags.FolderFlag\n\n\tname     string\n\ttemplate bool\n\n\tClient       *vim25.Client\n\tDatacenter   *object.Datacenter\n\tDatastore    *object.Datastore\n\tResourcePool *object.ResourcePool\n\tHostSystem   *object.HostSystem\n\tFolder       *object.Folder\n}\n\nfunc init() {\n\tcli.Register(\"vm.register\", &register{})\n}\n\nfunc (cmd *register) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.DatacenterFlag, ctx = flags.NewDatacenterFlag(ctx)\n\tcmd.DatacenterFlag.Register(ctx, f)\n\n\tcmd.DatastoreFlag, ctx = flags.NewDatastoreFlag(ctx)\n\tcmd.DatastoreFlag.Register(ctx, f)\n\n\tcmd.ResourcePoolFlag, ctx = flags.NewResourcePoolFlag(ctx)\n\tcmd.ResourcePoolFlag.Register(ctx, f)\n\n\tcmd.HostSystemFlag, ctx = flags.NewHostSystemFlag(ctx)\n\tcmd.HostSystemFlag.Register(ctx, f)\n\n\tcmd.FolderFlag, ctx = flags.NewFolderFlag(ctx)\n\tcmd.FolderFlag.Register(ctx, f)\n\n\tf.StringVar(&cmd.name, \"name\", \"\", \"Name of the VM\")\n\tf.BoolVar(&cmd.template, \"as-template\", false, \"Mark VM as template\")\n}\n\nfunc (cmd *register) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.DatacenterFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.DatastoreFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.ResourcePoolFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.HostSystemFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.FolderFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *register) Usage() string {\n\treturn \"VMX\"\n}\n\nfunc (cmd *register) Description() string {\n\treturn `Add an existing VM to the inventory.\n\nVMX is a path to the vm config file, relative to DATASTORE.\n\nExamples:\n  govc vm.register path/name.vmx`\n}\n\nfunc (cmd *register) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvar err error\n\n\tif len(f.Args()) != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tcmd.Client, err = cmd.ClientFlag.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd.Datacenter, err = cmd.DatacenterFlag.Datacenter()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd.Datastore, err = cmd.DatastoreFlag.Datastore()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd.HostSystem, err = cmd.HostSystemFlag.HostSystemIfSpecified()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cmd.HostSystem != nil {\n\t\tif cmd.ResourcePool, err = cmd.HostSystem.ResourcePool(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if !cmd.template {\n\t\tif cmd.ResourcePool, err = cmd.ResourcePoolFlag.ResourcePool(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif cmd.Folder, err = cmd.FolderFlag.Folder(); err != nil {\n\t\treturn err\n\t}\n\n\tpath := cmd.Datastore.Path(f.Arg(0))\n\n\ttask, err := cmd.Folder.RegisterVM(ctx, path, cmd.name, cmd.template, cmd.ResourcePool, cmd.HostSystem)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn task.Wait(ctx)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/snapshot/create.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage snapshot\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype create struct {\n\t*flags.VirtualMachineFlag\n\n\tdescription string\n\tmemory      bool\n\tquiesce     bool\n}\n\nfunc init() {\n\tcli.Register(\"snapshot.create\", &create{})\n}\n\nfunc (cmd *create) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.memory, \"m\", true, \"Include memory state\")\n\tf.BoolVar(&cmd.quiesce, \"q\", false, \"Quiesce guest file system\")\n\tf.StringVar(&cmd.description, \"d\", \"\", \"Snapshot description\")\n}\n\nfunc (cmd *create) Usage() string {\n\treturn \"NAME\"\n}\n\nfunc (cmd *create) Description() string {\n\treturn `Create snapshot of VM with NAME.\n\nExamples:\n  govc snapshot.create -vm my-vm happy-vm-state`\n}\n\nfunc (cmd *create) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\ttask, err := vm.CreateSnapshot(ctx, f.Arg(0), cmd.description, cmd.memory, cmd.quiesce)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn task.Wait(ctx)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/snapshot/remove.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage snapshot\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype remove struct {\n\t*flags.VirtualMachineFlag\n\n\trecursive   bool\n\tconsolidate bool\n}\n\nfunc init() {\n\tcli.Register(\"snapshot.remove\", &remove{})\n}\n\nfunc (cmd *remove) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.recursive, \"r\", false, \"Remove snapshot children\")\n\tf.BoolVar(&cmd.consolidate, \"c\", true, \"Consolidate disks\")\n}\n\nfunc (cmd *remove) Usage() string {\n\treturn \"NAME\"\n}\n\nfunc (cmd *remove) Description() string {\n\treturn `Remove snapshot of VM with given NAME.\n\nNAME can be the snapshot name, tree path, moid or '*' to remove all snapshots.\n\nExamples:\n  govc snapshot.remove -vm my-vm happy-vm-state`\n}\n\nfunc (cmd *remove) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *remove) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tvar task *object.Task\n\n\tif f.Arg(0) == \"*\" {\n\t\ttask, err = vm.RemoveAllSnapshot(ctx, &cmd.consolidate)\n\t} else {\n\t\ttask, err = vm.RemoveSnapshot(ctx, f.Arg(0), cmd.recursive, &cmd.consolidate)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn task.Wait(ctx)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/snapshot/revert.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage snapshot\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n)\n\ntype revert struct {\n\t*flags.VirtualMachineFlag\n\n\tsuppressPowerOn bool\n}\n\nfunc init() {\n\tcli.Register(\"snapshot.revert\", &revert{})\n}\n\nfunc (cmd *revert) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.suppressPowerOn, \"s\", false, \"Suppress power on\")\n}\n\nfunc (cmd *revert) Usage() string {\n\treturn \"[NAME]\"\n}\n\nfunc (cmd *revert) Description() string {\n\treturn `Revert to snapshot of VM with given NAME.\n\nIf NAME is not provided, revert to the current snapshot.\nOtherwise, NAME can be the snapshot name, tree path or moid.\n\nExamples:\n  govc snapshot.revert -vm my-vm happy-vm-state`\n}\n\nfunc (cmd *revert) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *revert) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() > 1 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tvar task *object.Task\n\n\tif f.NArg() == 1 {\n\t\ttask, err = vm.RevertToSnapshot(ctx, f.Arg(0), cmd.suppressPowerOn)\n\t} else {\n\t\ttask, err = vm.RevertToCurrentSnapshot(ctx, cmd.suppressPowerOn)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn task.Wait(ctx)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/snapshot/tree.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage snapshot\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"path\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype tree struct {\n\t*flags.VirtualMachineFlag\n\n\tfullPath bool\n\tcurrent  bool\n\tdate     bool\n\tid       bool\n\n\tinfo *types.VirtualMachineSnapshotInfo\n}\n\nfunc init() {\n\tcli.Register(\"snapshot.tree\", &tree{})\n}\n\nfunc (cmd *tree) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.VirtualMachineFlag, ctx = flags.NewVirtualMachineFlag(ctx)\n\tcmd.VirtualMachineFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.fullPath, \"f\", false, \"Print the full path prefix for snapshot\")\n\tf.BoolVar(&cmd.current, \"c\", true, \"Print the current snapshot\")\n\tf.BoolVar(&cmd.date, \"D\", false, \"Print the snapshot creation date\")\n\tf.BoolVar(&cmd.id, \"i\", false, \"Print the snapshot id\")\n}\n\nfunc (cmd *tree) Description() string {\n\treturn `List VM snapshots in a tree-like format.\n\nThe command will exit 0 with no output if VM does not have any snapshots.\n\nExamples:\n  govc snapshot.tree -vm my-vm\n  govc snapshot.tree -vm my-vm -D -i`\n}\n\nfunc (cmd *tree) Process(ctx context.Context) error {\n\tif err := cmd.VirtualMachineFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *tree) write(level int, parent string, st []types.VirtualMachineSnapshotTree) {\n\tfor _, s := range st {\n\t\tsname := s.Name\n\n\t\tif cmd.fullPath && parent != \"\" {\n\t\t\tsname = path.Join(parent, sname)\n\t\t}\n\n\t\tnames := []string{sname}\n\n\t\tif cmd.current && s.Snapshot == *cmd.info.CurrentSnapshot {\n\t\t\tnames = append(names, \".\")\n\t\t}\n\n\t\tfor _, name := range names {\n\t\t\tvar attr []string\n\t\t\tvar meta string\n\n\t\t\tif cmd.id {\n\t\t\t\tattr = append(attr, s.Snapshot.Value)\n\t\t\t}\n\n\t\t\tif cmd.date {\n\t\t\t\tattr = append(attr, s.CreateTime.Format(\"Jan 2 15:04\"))\n\t\t\t}\n\n\t\t\tif len(attr) > 0 {\n\t\t\t\tmeta = fmt.Sprintf(\"[%s]  \", strings.Join(attr, \" \"))\n\t\t\t}\n\n\t\t\tfmt.Printf(\"%s%s%s\\n\", strings.Repeat(\" \", level), meta, name)\n\t\t}\n\n\t\tcmd.write(level+2, sname, s.ChildSnapshotList)\n\t}\n}\n\nfunc (cmd *tree) Run(ctx context.Context, f *flag.FlagSet) error {\n\tif f.NArg() != 0 {\n\t\treturn flag.ErrHelp\n\t}\n\n\tvm, err := cmd.VirtualMachine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif vm == nil {\n\t\treturn flag.ErrHelp\n\t}\n\n\tvar o mo.VirtualMachine\n\n\terr = vm.Properties(ctx, vm.Reference(), []string{\"snapshot\"}, &o)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif o.Snapshot == nil {\n\t\treturn nil\n\t}\n\n\tif cmd.current && o.Snapshot.CurrentSnapshot == nil {\n\t\tcmd.current = false\n\t}\n\n\tcmd.info = o.Snapshot\n\n\tcmd.write(0, \"\", o.Snapshot.RootSnapshotList)\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/unregister.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vm\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n)\n\ntype unregister struct {\n\t*flags.ClientFlag\n\t*flags.SearchFlag\n}\n\nfunc init() {\n\tcli.Register(\"vm.unregister\", &unregister{})\n}\n\nfunc (cmd *unregister) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.ClientFlag, ctx = flags.NewClientFlag(ctx)\n\tcmd.ClientFlag.Register(ctx, f)\n\n\tcmd.SearchFlag, ctx = flags.NewSearchFlag(ctx, flags.SearchVirtualMachines)\n\tcmd.SearchFlag.Register(ctx, f)\n}\n\nfunc (cmd *unregister) Process(ctx context.Context) error {\n\tif err := cmd.ClientFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.SearchFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cmd *unregister) Usage() string {\n\treturn \"VM...\"\n}\n\nfunc (cmd *unregister) Description() string {\n\treturn `Remove VM from inventory without removing any of the VM files on disk.`\n}\n\nfunc (cmd *unregister) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvms, err := cmd.VirtualMachines(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, vm := range vms {\n\t\terr := vm.Unregister(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/govc/vm/vnc.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vm\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/govc/cli\"\n\t\"github.com/vmware/govmomi/govc/flags\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype intRange struct {\n\tlow, high int\n}\n\nvar intRangeRegexp = regexp.MustCompile(\"^([0-9]+)-([0-9]+)$\")\n\nfunc (i *intRange) Set(s string) error {\n\tm := intRangeRegexp.FindStringSubmatch(s)\n\tif m == nil {\n\t\treturn fmt.Errorf(\"invalid range: %s\", s)\n\t}\n\n\tlow, _ := strconv.Atoi(m[1])\n\thigh, _ := strconv.Atoi(m[2])\n\tif low > high {\n\t\treturn fmt.Errorf(\"invalid range: low > high\")\n\t}\n\n\ti.low = low\n\ti.high = high\n\treturn nil\n}\n\nfunc (i *intRange) String() string {\n\treturn fmt.Sprintf(\"%d-%d\", i.low, i.high)\n}\n\ntype vnc struct {\n\t*flags.SearchFlag\n\n\tEnable    bool\n\tDisable   bool\n\tPort      int\n\tPortRange intRange\n\tPassword  string\n}\n\nfunc init() {\n\tcmd := &vnc{}\n\tcmd.PortRange.Set(\"5900-5999\")\n\tcli.Register(\"vm.vnc\", cmd)\n}\n\nfunc (cmd *vnc) Register(ctx context.Context, f *flag.FlagSet) {\n\tcmd.SearchFlag, ctx = flags.NewSearchFlag(ctx, flags.SearchVirtualMachines)\n\tcmd.SearchFlag.Register(ctx, f)\n\n\tf.BoolVar(&cmd.Enable, \"enable\", false, \"Enable VNC\")\n\tf.BoolVar(&cmd.Disable, \"disable\", false, \"Disable VNC\")\n\tf.IntVar(&cmd.Port, \"port\", -1, \"VNC port (-1 for auto-select)\")\n\tf.Var(&cmd.PortRange, \"port-range\", \"VNC port auto-select range\")\n\tf.StringVar(&cmd.Password, \"password\", \"\", \"VNC password\")\n}\n\nfunc (cmd *vnc) Process(ctx context.Context) error {\n\tif err := cmd.SearchFlag.Process(ctx); err != nil {\n\t\treturn err\n\t}\n\t// Either may be true or none may be true.\n\tif cmd.Enable && cmd.Disable {\n\t\treturn flag.ErrHelp\n\t}\n\n\treturn nil\n}\n\nfunc (cmd *vnc) Usage() string {\n\treturn \"VM...\"\n}\n\nfunc (cmd *vnc) Description() string {\n\treturn `Enable or disable VNC for VM.\n\nPort numbers are automatically chosen if not specified.\n\nIf neither -enable or -disable is specified, the current state is returned.\n\nExamples:\n  govc vm.vnc -enable -password 1234 $vm | awk '{print $2}' | xargs open`\n}\n\nfunc (cmd *vnc) Run(ctx context.Context, f *flag.FlagSet) error {\n\tvms, err := cmd.loadVMs(f.Args())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Actuate settings in VMs\n\tfor _, vm := range vms {\n\t\tswitch {\n\t\tcase cmd.Enable:\n\t\t\tvm.enable(cmd.Port, cmd.Password)\n\t\tcase cmd.Disable:\n\t\t\tvm.disable()\n\t\t}\n\t}\n\n\t// Reconfigure VMs to reflect updates\n\tfor _, vm := range vms {\n\t\terr = vm.reconfigure()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn cmd.WriteResult(vncResult(vms))\n}\n\nfunc (cmd *vnc) loadVMs(args []string) ([]*vncVM, error) {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvms, err := cmd.VirtualMachines(args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar vncVMs []*vncVM\n\tfor _, vm := range vms {\n\t\tv, err := newVNCVM(c, vm)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvncVMs = append(vncVMs, v)\n\t}\n\n\t// Assign vncHosts to vncVMs\n\thosts := make(map[string]*vncHost)\n\tfor _, vm := range vncVMs {\n\t\tif h, ok := hosts[vm.hostReference().Value]; ok {\n\t\t\tvm.host = h\n\t\t\tcontinue\n\t\t}\n\n\t\ths := object.NewHostSystem(c, vm.hostReference())\n\t\th, err := newVNCHost(c, hs, cmd.PortRange.low, cmd.PortRange.high)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\thosts[vm.hostReference().Value] = h\n\t\tvm.host = h\n\t}\n\n\treturn vncVMs, nil\n}\n\ntype vncVM struct {\n\tc    *vim25.Client\n\tvm   *object.VirtualMachine\n\tmvm  mo.VirtualMachine\n\thost *vncHost\n\n\tcurOptions vncOptions\n\tnewOptions vncOptions\n}\n\nfunc newVNCVM(c *vim25.Client, vm *object.VirtualMachine) (*vncVM, error) {\n\tv := &vncVM{\n\t\tc:  c,\n\t\tvm: vm,\n\t}\n\n\tvirtualMachineProperties := []string{\n\t\t\"name\",\n\t\t\"config.extraConfig\",\n\t\t\"runtime.host\",\n\t}\n\n\tpc := property.DefaultCollector(c)\n\tctx := context.TODO()\n\terr := pc.RetrieveOne(ctx, vm.Reference(), virtualMachineProperties, &v.mvm)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tv.curOptions = vncOptionsFromExtraConfig(v.mvm.Config.ExtraConfig)\n\tv.newOptions = vncOptionsFromExtraConfig(v.mvm.Config.ExtraConfig)\n\n\treturn v, nil\n}\n\nfunc (v *vncVM) hostReference() types.ManagedObjectReference {\n\treturn *v.mvm.Runtime.Host\n}\n\nfunc (v *vncVM) enable(port int, password string) error {\n\tv.newOptions[\"enabled\"] = \"true\"\n\tv.newOptions[\"port\"] = fmt.Sprintf(\"%d\", port)\n\tv.newOptions[\"password\"] = password\n\n\t// Find port if auto-select\n\tif port == -1 {\n\t\t// Reuse port if If VM already has a port, reuse it.\n\t\t// Otherwise, find unused VNC port on host.\n\t\tif p, ok := v.curOptions[\"port\"]; ok && p != \"\" {\n\t\t\tv.newOptions[\"port\"] = p\n\t\t} else {\n\t\t\tport, err := v.host.popUnusedPort()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tv.newOptions[\"port\"] = fmt.Sprintf(\"%d\", port)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (v *vncVM) disable() error {\n\tv.newOptions[\"enabled\"] = \"false\"\n\tv.newOptions[\"port\"] = \"\"\n\tv.newOptions[\"password\"] = \"\"\n\treturn nil\n}\n\nfunc (v *vncVM) reconfigure() error {\n\tif reflect.DeepEqual(v.curOptions, v.newOptions) {\n\t\t// No changes to settings\n\t\treturn nil\n\t}\n\n\tspec := types.VirtualMachineConfigSpec{\n\t\tExtraConfig: v.newOptions.ToExtraConfig(),\n\t}\n\n\tctx := context.TODO()\n\ttask, err := v.vm.Reconfigure(ctx, spec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn task.Wait(ctx)\n}\n\nfunc (v *vncVM) uri() (string, error) {\n\tip, err := v.host.managementIP()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\turi := fmt.Sprintf(\"vnc://:%s@%s:%s\",\n\t\tv.newOptions[\"password\"],\n\t\tip,\n\t\tv.newOptions[\"port\"])\n\n\treturn uri, nil\n}\n\nfunc (v *vncVM) write(w io.Writer) error {\n\tif strings.EqualFold(v.newOptions[\"enabled\"], \"true\") {\n\t\turi, err := v.uri()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Printf(\"%s: %s\\n\", v.mvm.Name, uri)\n\t} else {\n\t\tfmt.Printf(\"%s: disabled\\n\", v.mvm.Name)\n\t}\n\treturn nil\n}\n\ntype vncHost struct {\n\tc     *vim25.Client\n\thost  *object.HostSystem\n\tports map[int]struct{}\n\tip    string // This field is populated by `managementIP`\n}\n\nfunc newVNCHost(c *vim25.Client, host *object.HostSystem, low, high int) (*vncHost, error) {\n\tports := make(map[int]struct{})\n\tfor i := low; i <= high; i++ {\n\t\tports[i] = struct{}{}\n\t}\n\n\tused, err := loadUsedPorts(c, host.Reference())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Remove used ports from range\n\tfor _, u := range used {\n\t\tdelete(ports, u)\n\t}\n\n\th := &vncHost{\n\t\tc:     c,\n\t\thost:  host,\n\t\tports: ports,\n\t}\n\n\treturn h, nil\n}\n\nfunc loadUsedPorts(c *vim25.Client, host types.ManagedObjectReference) ([]int, error) {\n\tctx := context.TODO()\n\tospec := types.ObjectSpec{\n\t\tObj: host,\n\t\tSelectSet: []types.BaseSelectionSpec{\n\t\t\t&types.TraversalSpec{\n\t\t\t\tType: \"HostSystem\",\n\t\t\t\tPath: \"vm\",\n\t\t\t\tSkip: types.NewBool(false),\n\t\t\t},\n\t\t},\n\t\tSkip: types.NewBool(false),\n\t}\n\n\tpspec := types.PropertySpec{\n\t\tType:    \"VirtualMachine\",\n\t\tPathSet: []string{\"config.extraConfig\"},\n\t}\n\n\treq := types.RetrieveProperties{\n\t\tThis: c.ServiceContent.PropertyCollector,\n\t\tSpecSet: []types.PropertyFilterSpec{\n\t\t\t{\n\t\t\t\tObjectSet: []types.ObjectSpec{ospec},\n\t\t\t\tPropSet:   []types.PropertySpec{pspec},\n\t\t\t},\n\t\t},\n\t}\n\n\tvar vms []mo.VirtualMachine\n\terr := mo.RetrievePropertiesForRequest(ctx, c, req, &vms)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ports []int\n\tfor _, vm := range vms {\n\t\tif vm.Config == nil || vm.Config.ExtraConfig == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\toptions := vncOptionsFromExtraConfig(vm.Config.ExtraConfig)\n\t\tif ps, ok := options[\"port\"]; ok && ps != \"\" {\n\t\t\tpi, err := strconv.Atoi(ps)\n\t\t\tif err == nil {\n\t\t\t\tports = append(ports, pi)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ports, nil\n}\n\nfunc (h *vncHost) popUnusedPort() (int, error) {\n\tif len(h.ports) == 0 {\n\t\treturn 0, fmt.Errorf(\"no unused ports in range\")\n\t}\n\n\t// Return first port we get when iterating\n\tvar port int\n\tfor port = range h.ports {\n\t\tbreak\n\t}\n\tdelete(h.ports, port)\n\treturn port, nil\n}\n\nfunc (h *vncHost) managementIP() (string, error) {\n\tctx := context.TODO()\n\tif h.ip != \"\" {\n\t\treturn h.ip, nil\n\t}\n\n\tips, err := h.host.ManagementIPs(ctx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(ips) > 0 {\n\t\th.ip = ips[0].String()\n\t} else {\n\t\th.ip = \"<unknown>\"\n\t}\n\n\treturn h.ip, nil\n}\n\ntype vncResult []*vncVM\n\nfunc (vms vncResult) MarshalJSON() ([]byte, error) {\n\tout := make(map[string]string)\n\tfor _, vm := range vms {\n\t\turi, err := vm.uri()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tout[vm.mvm.Name] = uri\n\t}\n\treturn json.Marshal(out)\n}\n\nfunc (vms vncResult) Write(w io.Writer) error {\n\tfor _, vm := range vms {\n\t\terr := vm.write(w)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\ntype vncOptions map[string]string\n\nvar vncPrefix = \"RemoteDisplay.vnc.\"\n\nfunc vncOptionsFromExtraConfig(ov []types.BaseOptionValue) vncOptions {\n\tvo := make(vncOptions)\n\tfor _, b := range ov {\n\t\to := b.GetOptionValue()\n\t\tif strings.HasPrefix(o.Key, vncPrefix) {\n\t\t\tkey := o.Key[len(vncPrefix):]\n\t\t\tif key != \"key\" {\n\t\t\t\tvo[key] = o.Value.(string)\n\t\t\t}\n\t\t}\n\t}\n\treturn vo\n}\n\nfunc (vo vncOptions) ToExtraConfig() []types.BaseOptionValue {\n\tov := make([]types.BaseOptionValue, 0, 0)\n\tfor k, v := range vo {\n\t\tkey := vncPrefix + k\n\t\tvalue := v\n\n\t\to := types.OptionValue{\n\t\t\tKey:   key,\n\t\t\tValue: &value, // Pass pointer to avoid omitempty\n\t\t}\n\n\t\tov = append(ov, &o)\n\t}\n\n\t// Don't know how to deal with the key option, set it to be empty...\n\to := types.OptionValue{\n\t\tKey:   vncPrefix + \"key\",\n\t\tValue: new(string), // Pass pointer to avoid omitempty\n\t}\n\n\tov = append(ov, &o)\n\n\treturn ov\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/guest/auth_manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype AuthManager struct {\n\ttypes.ManagedObjectReference\n\n\tvm types.ManagedObjectReference\n\n\tc *vim25.Client\n}\n\nfunc (m AuthManager) Reference() types.ManagedObjectReference {\n\treturn m.ManagedObjectReference\n}\n\nfunc (m AuthManager) AcquireCredentials(ctx context.Context, requestedAuth types.BaseGuestAuthentication, sessionID int64) (types.BaseGuestAuthentication, error) {\n\treq := types.AcquireCredentialsInGuest{\n\t\tThis:          m.Reference(),\n\t\tVm:            m.vm,\n\t\tRequestedAuth: requestedAuth,\n\t\tSessionID:     sessionID,\n\t}\n\n\tres, err := methods.AcquireCredentialsInGuest(ctx, m.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m AuthManager) ReleaseCredentials(ctx context.Context, auth types.BaseGuestAuthentication) error {\n\treq := types.ReleaseCredentialsInGuest{\n\t\tThis: m.Reference(),\n\t\tVm:   m.vm,\n\t\tAuth: auth,\n\t}\n\n\t_, err := methods.ReleaseCredentialsInGuest(ctx, m.c, &req)\n\n\treturn err\n}\n\nfunc (m AuthManager) ValidateCredentials(ctx context.Context, auth types.BaseGuestAuthentication) error {\n\treq := types.ValidateCredentialsInGuest{\n\t\tThis: m.Reference(),\n\t\tVm:   m.vm,\n\t\tAuth: auth,\n\t}\n\n\t_, err := methods.ValidateCredentialsInGuest(ctx, m.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/guest/file_manager.go",
    "content": "/*\nCopyright (c) 2015-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"context\"\n\t\"net/url\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype FileManager struct {\n\ttypes.ManagedObjectReference\n\n\tvm types.ManagedObjectReference\n\n\tc *vim25.Client\n}\n\nfunc (m FileManager) Reference() types.ManagedObjectReference {\n\treturn m.ManagedObjectReference\n}\n\nfunc (m FileManager) ChangeFileAttributes(ctx context.Context, auth types.BaseGuestAuthentication, guestFilePath string, fileAttributes types.BaseGuestFileAttributes) error {\n\treq := types.ChangeFileAttributesInGuest{\n\t\tThis:           m.Reference(),\n\t\tVm:             m.vm,\n\t\tAuth:           auth,\n\t\tGuestFilePath:  guestFilePath,\n\t\tFileAttributes: fileAttributes,\n\t}\n\n\t_, err := methods.ChangeFileAttributesInGuest(ctx, m.c, &req)\n\treturn err\n}\n\nfunc (m FileManager) CreateTemporaryDirectory(ctx context.Context, auth types.BaseGuestAuthentication, prefix, suffix string, path string) (string, error) {\n\treq := types.CreateTemporaryDirectoryInGuest{\n\t\tThis:          m.Reference(),\n\t\tVm:            m.vm,\n\t\tAuth:          auth,\n\t\tPrefix:        prefix,\n\t\tSuffix:        suffix,\n\t\tDirectoryPath: path,\n\t}\n\n\tres, err := methods.CreateTemporaryDirectoryInGuest(ctx, m.c, &req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m FileManager) CreateTemporaryFile(ctx context.Context, auth types.BaseGuestAuthentication, prefix, suffix string, path string) (string, error) {\n\treq := types.CreateTemporaryFileInGuest{\n\t\tThis:          m.Reference(),\n\t\tVm:            m.vm,\n\t\tAuth:          auth,\n\t\tPrefix:        prefix,\n\t\tSuffix:        suffix,\n\t\tDirectoryPath: path,\n\t}\n\n\tres, err := methods.CreateTemporaryFileInGuest(ctx, m.c, &req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m FileManager) DeleteDirectory(ctx context.Context, auth types.BaseGuestAuthentication, directoryPath string, recursive bool) error {\n\treq := types.DeleteDirectoryInGuest{\n\t\tThis:          m.Reference(),\n\t\tVm:            m.vm,\n\t\tAuth:          auth,\n\t\tDirectoryPath: directoryPath,\n\t\tRecursive:     recursive,\n\t}\n\n\t_, err := methods.DeleteDirectoryInGuest(ctx, m.c, &req)\n\treturn err\n}\n\nfunc (m FileManager) DeleteFile(ctx context.Context, auth types.BaseGuestAuthentication, filePath string) error {\n\treq := types.DeleteFileInGuest{\n\t\tThis:     m.Reference(),\n\t\tVm:       m.vm,\n\t\tAuth:     auth,\n\t\tFilePath: filePath,\n\t}\n\n\t_, err := methods.DeleteFileInGuest(ctx, m.c, &req)\n\treturn err\n}\n\nfunc (m FileManager) InitiateFileTransferFromGuest(ctx context.Context, auth types.BaseGuestAuthentication, guestFilePath string) (*types.FileTransferInformation, error) {\n\treq := types.InitiateFileTransferFromGuest{\n\t\tThis:          m.Reference(),\n\t\tVm:            m.vm,\n\t\tAuth:          auth,\n\t\tGuestFilePath: guestFilePath,\n\t}\n\n\tres, err := methods.InitiateFileTransferFromGuest(ctx, m.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif strings.HasSuffix(guestFilePath, \"/\") {\n\t\t// Propagate the trailing '/' for directory download support, see soap.directoryReader\n\t\tu, err := url.Parse(res.Returnval.Url)\n\t\tif err == nil {\n\t\t\tu.Path += \"/\"\n\t\t\tres.Returnval.Url = u.String()\n\t\t}\n\t}\n\n\treturn &res.Returnval, nil\n}\n\nfunc (m FileManager) InitiateFileTransferToGuest(ctx context.Context, auth types.BaseGuestAuthentication, guestFilePath string, fileAttributes types.BaseGuestFileAttributes, fileSize int64, overwrite bool) (string, error) {\n\treq := types.InitiateFileTransferToGuest{\n\t\tThis:           m.Reference(),\n\t\tVm:             m.vm,\n\t\tAuth:           auth,\n\t\tGuestFilePath:  guestFilePath,\n\t\tFileAttributes: fileAttributes,\n\t\tFileSize:       fileSize,\n\t\tOverwrite:      overwrite,\n\t}\n\n\tres, err := methods.InitiateFileTransferToGuest(ctx, m.c, &req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m FileManager) ListFiles(ctx context.Context, auth types.BaseGuestAuthentication, filePath string, index int32, maxResults int32, matchPattern string) (*types.GuestListFileInfo, error) {\n\treq := types.ListFilesInGuest{\n\t\tThis:         m.Reference(),\n\t\tVm:           m.vm,\n\t\tAuth:         auth,\n\t\tFilePath:     filePath,\n\t\tIndex:        index,\n\t\tMaxResults:   maxResults,\n\t\tMatchPattern: matchPattern,\n\t}\n\n\tres, err := methods.ListFilesInGuest(ctx, m.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n\nfunc (m FileManager) MakeDirectory(ctx context.Context, auth types.BaseGuestAuthentication, directoryPath string, createParentDirectories bool) error {\n\treq := types.MakeDirectoryInGuest{\n\t\tThis:                    m.Reference(),\n\t\tVm:                      m.vm,\n\t\tAuth:                    auth,\n\t\tDirectoryPath:           directoryPath,\n\t\tCreateParentDirectories: createParentDirectories,\n\t}\n\n\t_, err := methods.MakeDirectoryInGuest(ctx, m.c, &req)\n\treturn err\n}\n\nfunc (m FileManager) MoveDirectory(ctx context.Context, auth types.BaseGuestAuthentication, srcDirectoryPath string, dstDirectoryPath string) error {\n\treq := types.MoveDirectoryInGuest{\n\t\tThis:             m.Reference(),\n\t\tVm:               m.vm,\n\t\tAuth:             auth,\n\t\tSrcDirectoryPath: srcDirectoryPath,\n\t\tDstDirectoryPath: dstDirectoryPath,\n\t}\n\n\t_, err := methods.MoveDirectoryInGuest(ctx, m.c, &req)\n\treturn err\n}\n\nfunc (m FileManager) MoveFile(ctx context.Context, auth types.BaseGuestAuthentication, srcFilePath string, dstFilePath string, overwrite bool) error {\n\treq := types.MoveFileInGuest{\n\t\tThis:        m.Reference(),\n\t\tVm:          m.vm,\n\t\tAuth:        auth,\n\t\tSrcFilePath: srcFilePath,\n\t\tDstFilePath: dstFilePath,\n\t\tOverwrite:   overwrite,\n\t}\n\n\t_, err := methods.MoveFileInGuest(ctx, m.c, &req)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/guest/operations_manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype OperationsManager struct {\n\tc  *vim25.Client\n\tvm types.ManagedObjectReference\n}\n\nfunc NewOperationsManager(c *vim25.Client, vm types.ManagedObjectReference) *OperationsManager {\n\treturn &OperationsManager{c, vm}\n}\n\nfunc (m OperationsManager) retrieveOne(ctx context.Context, p string, dst *mo.GuestOperationsManager) error {\n\tpc := property.DefaultCollector(m.c)\n\treturn pc.RetrieveOne(ctx, *m.c.ServiceContent.GuestOperationsManager, []string{p}, dst)\n}\n\nfunc (m OperationsManager) AuthManager(ctx context.Context) (*AuthManager, error) {\n\tvar g mo.GuestOperationsManager\n\n\terr := m.retrieveOne(ctx, \"authManager\", &g)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &AuthManager{*g.AuthManager, m.vm, m.c}, nil\n}\n\nfunc (m OperationsManager) FileManager(ctx context.Context) (*FileManager, error) {\n\tvar g mo.GuestOperationsManager\n\n\terr := m.retrieveOne(ctx, \"fileManager\", &g)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &FileManager{*g.FileManager, m.vm, m.c}, nil\n}\n\nfunc (m OperationsManager) ProcessManager(ctx context.Context) (*ProcessManager, error) {\n\tvar g mo.GuestOperationsManager\n\n\terr := m.retrieveOne(ctx, \"processManager\", &g)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ProcessManager{*g.ProcessManager, m.vm, m.c}, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/guest/process_manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage guest\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype ProcessManager struct {\n\ttypes.ManagedObjectReference\n\n\tvm types.ManagedObjectReference\n\n\tc *vim25.Client\n}\n\nfunc (m ProcessManager) Reference() types.ManagedObjectReference {\n\treturn m.ManagedObjectReference\n}\n\nfunc (m ProcessManager) ListProcesses(ctx context.Context, auth types.BaseGuestAuthentication, pids []int64) ([]types.GuestProcessInfo, error) {\n\treq := types.ListProcessesInGuest{\n\t\tThis: m.Reference(),\n\t\tVm:   m.vm,\n\t\tAuth: auth,\n\t\tPids: pids,\n\t}\n\n\tres, err := methods.ListProcessesInGuest(ctx, m.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, err\n}\n\nfunc (m ProcessManager) ReadEnvironmentVariable(ctx context.Context, auth types.BaseGuestAuthentication, names []string) ([]string, error) {\n\treq := types.ReadEnvironmentVariableInGuest{\n\t\tThis:  m.Reference(),\n\t\tVm:    m.vm,\n\t\tAuth:  auth,\n\t\tNames: names,\n\t}\n\n\tres, err := methods.ReadEnvironmentVariableInGuest(ctx, m.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, err\n}\n\nfunc (m ProcessManager) StartProgram(ctx context.Context, auth types.BaseGuestAuthentication, spec types.BaseGuestProgramSpec) (int64, error) {\n\treq := types.StartProgramInGuest{\n\t\tThis: m.Reference(),\n\t\tVm:   m.vm,\n\t\tAuth: auth,\n\t\tSpec: spec,\n\t}\n\n\tres, err := methods.StartProgramInGuest(ctx, m.c, &req)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn res.Returnval, err\n}\n\nfunc (m ProcessManager) TerminateProcess(ctx context.Context, auth types.BaseGuestAuthentication, pid int64) error {\n\treq := types.TerminateProcessInGuest{\n\t\tThis: m.Reference(),\n\t\tVm:   m.vm,\n\t\tAuth: auth,\n\t\tPid:  pid,\n\t}\n\n\t_, err := methods.TerminateProcessInGuest(ctx, m.c, &req)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/license/assignment_manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage license\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype AssignmentManager struct {\n\tobject.Common\n}\n\nfunc (m AssignmentManager) QueryAssigned(ctx context.Context, id string) ([]types.LicenseAssignmentManagerLicenseAssignment, error) {\n\treq := types.QueryAssignedLicenses{\n\t\tThis:     m.Reference(),\n\t\tEntityId: id,\n\t}\n\n\tres, err := methods.QueryAssignedLicenses(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m AssignmentManager) Remove(ctx context.Context, id string) error {\n\treq := types.RemoveAssignedLicense{\n\t\tThis:     m.Reference(),\n\t\tEntityId: id,\n\t}\n\n\t_, err := methods.RemoveAssignedLicense(ctx, m.Client(), &req)\n\n\treturn err\n}\n\nfunc (m AssignmentManager) Update(ctx context.Context, id string, key string, name string) (*types.LicenseManagerLicenseInfo, error) {\n\treq := types.UpdateAssignedLicense{\n\t\tThis:              m.Reference(),\n\t\tEntity:            id,\n\t\tLicenseKey:        key,\n\t\tEntityDisplayName: name,\n\t}\n\n\tres, err := methods.UpdateAssignedLicense(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/license/manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage license\n\nimport (\n\t\"context\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype Manager struct {\n\tobject.Common\n}\n\nfunc NewManager(c *vim25.Client) *Manager {\n\tm := Manager{\n\t\tobject.NewCommon(c, *c.ServiceContent.LicenseManager),\n\t}\n\n\treturn &m\n}\n\nfunc mapToKeyValueSlice(m map[string]string) []types.KeyValue {\n\tr := make([]types.KeyValue, len(m))\n\tfor k, v := range m {\n\t\tr = append(r, types.KeyValue{Key: k, Value: v})\n\t}\n\treturn r\n}\n\nfunc (m Manager) Add(ctx context.Context, key string, labels map[string]string) (types.LicenseManagerLicenseInfo, error) {\n\treq := types.AddLicense{\n\t\tThis:       m.Reference(),\n\t\tLicenseKey: key,\n\t\tLabels:     mapToKeyValueSlice(labels),\n\t}\n\n\tres, err := methods.AddLicense(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn types.LicenseManagerLicenseInfo{}, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m Manager) Decode(ctx context.Context, key string) (types.LicenseManagerLicenseInfo, error) {\n\treq := types.DecodeLicense{\n\t\tThis:       m.Reference(),\n\t\tLicenseKey: key,\n\t}\n\n\tres, err := methods.DecodeLicense(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn types.LicenseManagerLicenseInfo{}, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m Manager) Remove(ctx context.Context, key string) error {\n\treq := types.RemoveLicense{\n\t\tThis:       m.Reference(),\n\t\tLicenseKey: key,\n\t}\n\n\t_, err := methods.RemoveLicense(ctx, m.Client(), &req)\n\treturn err\n}\n\nfunc (m Manager) Update(ctx context.Context, key string, labels map[string]string) (types.LicenseManagerLicenseInfo, error) {\n\treq := types.UpdateLicense{\n\t\tThis:       m.Reference(),\n\t\tLicenseKey: key,\n\t\tLabels:     mapToKeyValueSlice(labels),\n\t}\n\n\tres, err := methods.UpdateLicense(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn types.LicenseManagerLicenseInfo{}, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m Manager) List(ctx context.Context) (InfoList, error) {\n\tvar mlm mo.LicenseManager\n\n\terr := m.Properties(ctx, m.Reference(), []string{\"licenses\"}, &mlm)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn InfoList(mlm.Licenses), nil\n}\n\nfunc (m Manager) AssignmentManager(ctx context.Context) (*AssignmentManager, error) {\n\tvar mlm mo.LicenseManager\n\n\terr := m.Properties(ctx, m.Reference(), []string{\"licenseAssignmentManager\"}, &mlm)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif mlm.LicenseAssignmentManager == nil {\n\t\treturn nil, object.ErrNotSupported\n\t}\n\n\tam := AssignmentManager{\n\t\tobject.NewCommon(m.Client(), *mlm.LicenseAssignmentManager),\n\t}\n\n\treturn &am, nil\n}\n\ntype licenseFeature struct {\n\tname  string\n\tlevel int\n}\n\nfunc parseLicenseFeature(feature string) *licenseFeature {\n\tlf := new(licenseFeature)\n\n\tf := strings.Split(feature, \":\")\n\n\tlf.name = f[0]\n\n\tif len(f) > 1 {\n\t\tvar err error\n\t\tlf.level, err = strconv.Atoi(f[1])\n\t\tif err != nil {\n\t\t\tlf.name = feature\n\t\t}\n\t}\n\n\treturn lf\n}\n\nfunc HasFeature(license types.LicenseManagerLicenseInfo, key string) bool {\n\tfeature := parseLicenseFeature(key)\n\n\tfor _, p := range license.Properties {\n\t\tif p.Key != \"feature\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tkv, ok := p.Value.(types.KeyValue)\n\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tlf := parseLicenseFeature(kv.Key)\n\n\t\tif lf.name == feature.name && lf.level >= feature.level {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n// InfoList provides helper methods for []types.LicenseManagerLicenseInfo\ntype InfoList []types.LicenseManagerLicenseInfo\n\nfunc (l InfoList) WithFeature(key string) InfoList {\n\tvar result InfoList\n\n\tfor _, license := range l {\n\t\tif HasFeature(license, key) {\n\t\t\tresult = append(result, license)\n\t\t}\n\t}\n\n\treturn result\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/list/lister.go",
    "content": "/*\nCopyright (c) 2014-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage list\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"path\"\n\t\"reflect\"\n\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype Element struct {\n\tPath   string\n\tObject mo.Reference\n}\n\nfunc (e Element) String() string {\n\treturn fmt.Sprintf(\"%s @ %s\", e.Object.Reference(), e.Path)\n}\n\nfunc ToElement(r mo.Reference, prefix string) Element {\n\tvar name string\n\n\t// Comments about types to be expected in folders copied from the\n\t// documentation of the Folder managed object:\n\t// http://pubs.vmware.com/vsphere-55/topic/com.vmware.wssdk.apiref.doc/vim.Folder.html\n\tswitch m := r.(type) {\n\tcase mo.Folder:\n\t\tname = m.Name\n\tcase mo.StoragePod:\n\t\tname = m.Name\n\n\t// { \"vim.Datacenter\" } - Identifies the root folder and its descendant\n\t// folders. Data center folders can contain child data center folders and\n\t// Datacenter managed objects. Datacenter objects contain virtual machine,\n\t// compute resource, network entity, and datastore folders.\n\tcase mo.Datacenter:\n\t\tname = m.Name\n\n\t// { \"vim.Virtualmachine\", \"vim.VirtualApp\" } - Identifies a virtual machine\n\t// folder. A virtual machine folder may contain child virtual machine\n\t// folders. It also can contain VirtualMachine managed objects, templates,\n\t// and VirtualApp managed objects.\n\tcase mo.VirtualMachine:\n\t\tname = m.Name\n\tcase mo.VirtualApp:\n\t\tname = m.Name\n\n\t// { \"vim.ComputeResource\" } - Identifies a compute resource\n\t// folder, which contains child compute resource folders and ComputeResource\n\t// hierarchies.\n\tcase mo.ComputeResource:\n\t\tname = m.Name\n\tcase mo.ClusterComputeResource:\n\t\tname = m.Name\n\tcase mo.HostSystem:\n\t\tname = m.Name\n\tcase mo.ResourcePool:\n\t\tname = m.Name\n\n\t// { \"vim.Network\" } - Identifies a network entity folder.\n\t// Network entity folders on a vCenter Server can contain Network,\n\t// DistributedVirtualSwitch, and DistributedVirtualPortgroup managed objects.\n\t// Network entity folders on an ESXi host can contain only Network objects.\n\tcase mo.Network:\n\t\tname = m.Name\n\tcase mo.OpaqueNetwork:\n\t\tname = m.Name\n\tcase mo.DistributedVirtualSwitch:\n\t\tname = m.Name\n\tcase mo.DistributedVirtualPortgroup:\n\t\tname = m.Name\n\tcase mo.VmwareDistributedVirtualSwitch:\n\t\tname = m.Name\n\n\t// { \"vim.Datastore\" } - Identifies a datastore folder. Datastore folders can\n\t// contain child datastore folders and Datastore managed objects.\n\tcase mo.Datastore:\n\t\tname = m.Name\n\n\tdefault:\n\t\tpanic(\"not implemented for type \" + reflect.TypeOf(r).String())\n\t}\n\n\te := Element{\n\t\tPath:   path.Join(prefix, name),\n\t\tObject: r,\n\t}\n\n\treturn e\n}\n\ntype Lister struct {\n\tCollector *property.Collector\n\tReference types.ManagedObjectReference\n\tPrefix    string\n\tAll       bool\n}\n\nfunc (l Lister) retrieveProperties(ctx context.Context, req types.RetrieveProperties, dst *[]interface{}) error {\n\tres, err := l.Collector.RetrieveProperties(ctx, req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Instead of using mo.LoadRetrievePropertiesResponse, use a custom loop to\n\t// iterate over the results and ignore entries that have properties that\n\t// could not be retrieved (a non-empty `missingSet` property). Since the\n\t// returned objects are enumerated by vSphere in the first place, any object\n\t// that has a non-empty `missingSet` property is indicative of a race\n\t// condition in vSphere where the object was enumerated initially, but was\n\t// removed before its properties could be collected.\n\tfor _, p := range res.Returnval {\n\t\tv, err := mo.ObjectContentToType(p)\n\t\tif err != nil {\n\t\t\t// Ignore fault if it is ManagedObjectNotFound\n\t\t\tif soap.IsVimFault(err) {\n\t\t\t\tswitch soap.ToVimFault(err).(type) {\n\t\t\t\tcase *types.ManagedObjectNotFound:\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn err\n\t\t}\n\n\t\t*dst = append(*dst, v)\n\t}\n\n\treturn nil\n}\n\nfunc (l Lister) List(ctx context.Context) ([]Element, error) {\n\tswitch l.Reference.Type {\n\tcase \"Folder\", \"StoragePod\":\n\t\treturn l.ListFolder(ctx)\n\tcase \"Datacenter\":\n\t\treturn l.ListDatacenter(ctx)\n\tcase \"ComputeResource\", \"ClusterComputeResource\":\n\t\t// Treat ComputeResource and ClusterComputeResource as one and the same.\n\t\t// It doesn't matter from the perspective of the lister.\n\t\treturn l.ListComputeResource(ctx)\n\tcase \"ResourcePool\":\n\t\treturn l.ListResourcePool(ctx)\n\tcase \"HostSystem\":\n\t\treturn l.ListHostSystem(ctx)\n\tcase \"VirtualApp\":\n\t\treturn l.ListVirtualApp(ctx)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"cannot traverse type \" + l.Reference.Type)\n\t}\n}\n\nfunc (l Lister) ListFolder(ctx context.Context) ([]Element, error) {\n\tspec := types.PropertyFilterSpec{\n\t\tObjectSet: []types.ObjectSpec{\n\t\t\t{\n\t\t\t\tObj: l.Reference,\n\t\t\t\tSelectSet: []types.BaseSelectionSpec{\n\t\t\t\t\t&types.TraversalSpec{\n\t\t\t\t\t\tPath: \"childEntity\",\n\t\t\t\t\t\tSkip: types.NewBool(false),\n\t\t\t\t\t\tType: \"Folder\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSkip: types.NewBool(true),\n\t\t\t},\n\t\t},\n\t}\n\n\t// Retrieve all objects that we can deal with\n\tchildTypes := []string{\n\t\t\"Folder\",\n\t\t\"Datacenter\",\n\t\t\"VirtualApp\",\n\t\t\"VirtualMachine\",\n\t\t\"Network\",\n\t\t\"ComputeResource\",\n\t\t\"ClusterComputeResource\",\n\t\t\"Datastore\",\n\t\t\"DistributedVirtualSwitch\",\n\t}\n\n\tfor _, t := range childTypes {\n\t\tpspec := types.PropertySpec{\n\t\t\tType: t,\n\t\t}\n\n\t\tif l.All {\n\t\t\tpspec.All = types.NewBool(true)\n\t\t} else {\n\t\t\tpspec.PathSet = []string{\"name\"}\n\n\t\t\t// Additional basic properties.\n\t\t\tswitch t {\n\t\t\tcase \"Folder\":\n\t\t\t\tpspec.PathSet = append(pspec.PathSet, \"childType\")\n\t\t\tcase \"ComputeResource\", \"ClusterComputeResource\":\n\t\t\t\t// The ComputeResource and ClusterComputeResource are dereferenced in\n\t\t\t\t// the ResourcePoolFlag. Make sure they always have their resourcePool\n\t\t\t\t// field populated.\n\t\t\t\tpspec.PathSet = append(pspec.PathSet, \"resourcePool\")\n\t\t\t}\n\t\t}\n\n\t\tspec.PropSet = append(spec.PropSet, pspec)\n\t}\n\n\treq := types.RetrieveProperties{\n\t\tSpecSet: []types.PropertyFilterSpec{spec},\n\t}\n\n\tvar dst []interface{}\n\n\terr := l.retrieveProperties(ctx, req, &dst)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tes := []Element{}\n\tfor _, v := range dst {\n\t\tes = append(es, ToElement(v.(mo.Reference), l.Prefix))\n\t}\n\n\treturn es, nil\n}\n\nfunc (l Lister) ListDatacenter(ctx context.Context) ([]Element, error) {\n\tospec := types.ObjectSpec{\n\t\tObj:  l.Reference,\n\t\tSkip: types.NewBool(true),\n\t}\n\n\t// Include every datastore folder in the select set\n\tfields := []string{\n\t\t\"vmFolder\",\n\t\t\"hostFolder\",\n\t\t\"datastoreFolder\",\n\t\t\"networkFolder\",\n\t}\n\n\tfor _, f := range fields {\n\t\ttspec := types.TraversalSpec{\n\t\t\tPath: f,\n\t\t\tSkip: types.NewBool(false),\n\t\t\tType: \"Datacenter\",\n\t\t}\n\n\t\tospec.SelectSet = append(ospec.SelectSet, &tspec)\n\t}\n\n\tpspec := types.PropertySpec{\n\t\tType: \"Folder\",\n\t}\n\n\tif l.All {\n\t\tpspec.All = types.NewBool(true)\n\t} else {\n\t\tpspec.PathSet = []string{\"name\", \"childType\"}\n\t}\n\n\treq := types.RetrieveProperties{\n\t\tSpecSet: []types.PropertyFilterSpec{\n\t\t\t{\n\t\t\t\tObjectSet: []types.ObjectSpec{ospec},\n\t\t\t\tPropSet:   []types.PropertySpec{pspec},\n\t\t\t},\n\t\t},\n\t}\n\n\tvar dst []interface{}\n\n\terr := l.retrieveProperties(ctx, req, &dst)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tes := []Element{}\n\tfor _, v := range dst {\n\t\tes = append(es, ToElement(v.(mo.Reference), l.Prefix))\n\t}\n\n\treturn es, nil\n}\n\nfunc (l Lister) ListComputeResource(ctx context.Context) ([]Element, error) {\n\tospec := types.ObjectSpec{\n\t\tObj:  l.Reference,\n\t\tSkip: types.NewBool(true),\n\t}\n\n\tfields := []string{\n\t\t\"host\",\n\t\t\"resourcePool\",\n\t}\n\n\tfor _, f := range fields {\n\t\ttspec := types.TraversalSpec{\n\t\t\tPath: f,\n\t\t\tSkip: types.NewBool(false),\n\t\t\tType: \"ComputeResource\",\n\t\t}\n\n\t\tospec.SelectSet = append(ospec.SelectSet, &tspec)\n\t}\n\n\tchildTypes := []string{\n\t\t\"HostSystem\",\n\t\t\"ResourcePool\",\n\t}\n\n\tvar pspecs []types.PropertySpec\n\tfor _, t := range childTypes {\n\t\tpspec := types.PropertySpec{\n\t\t\tType: t,\n\t\t}\n\n\t\tif l.All {\n\t\t\tpspec.All = types.NewBool(true)\n\t\t} else {\n\t\t\tpspec.PathSet = []string{\"name\"}\n\t\t}\n\n\t\tpspecs = append(pspecs, pspec)\n\t}\n\n\treq := types.RetrieveProperties{\n\t\tSpecSet: []types.PropertyFilterSpec{\n\t\t\t{\n\t\t\t\tObjectSet: []types.ObjectSpec{ospec},\n\t\t\t\tPropSet:   pspecs,\n\t\t\t},\n\t\t},\n\t}\n\n\tvar dst []interface{}\n\n\terr := l.retrieveProperties(ctx, req, &dst)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tes := []Element{}\n\tfor _, v := range dst {\n\t\tes = append(es, ToElement(v.(mo.Reference), l.Prefix))\n\t}\n\n\treturn es, nil\n}\n\nfunc (l Lister) ListResourcePool(ctx context.Context) ([]Element, error) {\n\tospec := types.ObjectSpec{\n\t\tObj:  l.Reference,\n\t\tSkip: types.NewBool(true),\n\t}\n\n\tfields := []string{\n\t\t\"resourcePool\",\n\t}\n\n\tfor _, f := range fields {\n\t\ttspec := types.TraversalSpec{\n\t\t\tPath: f,\n\t\t\tSkip: types.NewBool(false),\n\t\t\tType: \"ResourcePool\",\n\t\t}\n\n\t\tospec.SelectSet = append(ospec.SelectSet, &tspec)\n\t}\n\n\tchildTypes := []string{\n\t\t\"ResourcePool\",\n\t}\n\n\tvar pspecs []types.PropertySpec\n\tfor _, t := range childTypes {\n\t\tpspec := types.PropertySpec{\n\t\t\tType: t,\n\t\t}\n\n\t\tif l.All {\n\t\t\tpspec.All = types.NewBool(true)\n\t\t} else {\n\t\t\tpspec.PathSet = []string{\"name\"}\n\t\t}\n\n\t\tpspecs = append(pspecs, pspec)\n\t}\n\n\treq := types.RetrieveProperties{\n\t\tSpecSet: []types.PropertyFilterSpec{\n\t\t\t{\n\t\t\t\tObjectSet: []types.ObjectSpec{ospec},\n\t\t\t\tPropSet:   pspecs,\n\t\t\t},\n\t\t},\n\t}\n\n\tvar dst []interface{}\n\n\terr := l.retrieveProperties(ctx, req, &dst)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tes := []Element{}\n\tfor _, v := range dst {\n\t\tes = append(es, ToElement(v.(mo.Reference), l.Prefix))\n\t}\n\n\treturn es, nil\n}\n\nfunc (l Lister) ListHostSystem(ctx context.Context) ([]Element, error) {\n\tospec := types.ObjectSpec{\n\t\tObj:  l.Reference,\n\t\tSkip: types.NewBool(true),\n\t}\n\n\tfields := []string{\n\t\t\"datastore\",\n\t\t\"network\",\n\t\t\"vm\",\n\t}\n\n\tfor _, f := range fields {\n\t\ttspec := types.TraversalSpec{\n\t\t\tPath: f,\n\t\t\tSkip: types.NewBool(false),\n\t\t\tType: \"HostSystem\",\n\t\t}\n\n\t\tospec.SelectSet = append(ospec.SelectSet, &tspec)\n\t}\n\n\tchildTypes := []string{\n\t\t\"Datastore\",\n\t\t\"Network\",\n\t\t\"VirtualMachine\",\n\t}\n\n\tvar pspecs []types.PropertySpec\n\tfor _, t := range childTypes {\n\t\tpspec := types.PropertySpec{\n\t\t\tType: t,\n\t\t}\n\n\t\tif l.All {\n\t\t\tpspec.All = types.NewBool(true)\n\t\t} else {\n\t\t\tpspec.PathSet = []string{\"name\"}\n\t\t}\n\n\t\tpspecs = append(pspecs, pspec)\n\t}\n\n\treq := types.RetrieveProperties{\n\t\tSpecSet: []types.PropertyFilterSpec{\n\t\t\t{\n\t\t\t\tObjectSet: []types.ObjectSpec{ospec},\n\t\t\t\tPropSet:   pspecs,\n\t\t\t},\n\t\t},\n\t}\n\n\tvar dst []interface{}\n\n\terr := l.retrieveProperties(ctx, req, &dst)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tes := []Element{}\n\tfor _, v := range dst {\n\t\tes = append(es, ToElement(v.(mo.Reference), l.Prefix))\n\t}\n\n\treturn es, nil\n}\n\nfunc (l Lister) ListVirtualApp(ctx context.Context) ([]Element, error) {\n\tospec := types.ObjectSpec{\n\t\tObj:  l.Reference,\n\t\tSkip: types.NewBool(true),\n\t}\n\n\tfields := []string{\n\t\t\"resourcePool\",\n\t\t\"vm\",\n\t}\n\n\tfor _, f := range fields {\n\t\ttspec := types.TraversalSpec{\n\t\t\tPath: f,\n\t\t\tSkip: types.NewBool(false),\n\t\t\tType: \"VirtualApp\",\n\t\t}\n\n\t\tospec.SelectSet = append(ospec.SelectSet, &tspec)\n\t}\n\n\tchildTypes := []string{\n\t\t\"ResourcePool\",\n\t\t\"VirtualMachine\",\n\t}\n\n\tvar pspecs []types.PropertySpec\n\tfor _, t := range childTypes {\n\t\tpspec := types.PropertySpec{\n\t\t\tType: t,\n\t\t}\n\n\t\tif l.All {\n\t\t\tpspec.All = types.NewBool(true)\n\t\t} else {\n\t\t\tpspec.PathSet = []string{\"name\"}\n\t\t}\n\n\t\tpspecs = append(pspecs, pspec)\n\t}\n\n\treq := types.RetrieveProperties{\n\t\tSpecSet: []types.PropertyFilterSpec{\n\t\t\t{\n\t\t\t\tObjectSet: []types.ObjectSpec{ospec},\n\t\t\t\tPropSet:   pspecs,\n\t\t\t},\n\t\t},\n\t}\n\n\tvar dst []interface{}\n\n\terr := l.retrieveProperties(ctx, req, &dst)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tes := []Element{}\n\tfor _, v := range dst {\n\t\tes = append(es, ToElement(v.(mo.Reference), l.Prefix))\n\t}\n\n\treturn es, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/list/path.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage list\n\nimport (\n\t\"path\"\n\t\"strings\"\n)\n\nfunc ToParts(p string) []string {\n\tp = path.Clean(p)\n\tif p == \"/\" {\n\t\treturn []string{}\n\t}\n\n\tif len(p) > 0 {\n\t\t// Prefix ./ if relative\n\t\tif p[0] != '/' && p[0] != '.' {\n\t\t\tp = \"./\" + p\n\t\t}\n\t}\n\n\tps := strings.Split(p, \"/\")\n\tif ps[0] == \"\" {\n\t\t// Start at root\n\t\tps = ps[1:]\n\t}\n\n\treturn ps\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/list/path_test.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage list\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestToParts(t *testing.T) {\n\ttests := []struct {\n\t\tIn  string\n\t\tOut []string\n\t}{\n\t\t{\n\t\t\tIn:  \"/\",\n\t\t\tOut: []string{},\n\t\t},\n\t\t{\n\t\t\tIn:  \"/foo\",\n\t\t\tOut: []string{\"foo\"},\n\t\t},\n\t\t{\n\t\t\tIn:  \"/foo/..\",\n\t\t\tOut: []string{},\n\t\t},\n\t\t{\n\t\t\tIn:  \"/./foo\",\n\t\t\tOut: []string{\"foo\"},\n\t\t},\n\t\t{\n\t\t\tIn:  \"/../foo\",\n\t\t\tOut: []string{\"foo\"},\n\t\t},\n\t\t{\n\t\t\tIn:  \"/foo/bar\",\n\t\t\tOut: []string{\"foo\", \"bar\"},\n\t\t},\n\t\t{\n\t\t\tIn:  \"/foo/bar/..\",\n\t\t\tOut: []string{\"foo\"},\n\t\t},\n\t\t{\n\t\t\tIn:  \"\",\n\t\t\tOut: []string{\".\"},\n\t\t},\n\t\t{\n\t\t\tIn:  \".\",\n\t\t\tOut: []string{\".\"},\n\t\t},\n\t\t{\n\t\t\tIn:  \"foo\",\n\t\t\tOut: []string{\".\", \"foo\"},\n\t\t},\n\t\t{\n\t\t\tIn:  \"foo/..\",\n\t\t\tOut: []string{\".\"},\n\t\t},\n\t\t{\n\t\t\tIn:  \"./foo\",\n\t\t\tOut: []string{\".\", \"foo\"},\n\t\t},\n\t\t{\n\t\t\tIn:  \"../foo\", // Special case...\n\t\t\tOut: []string{\"..\", \"foo\"},\n\t\t},\n\t\t{\n\t\t\tIn:  \"foo/bar/..\",\n\t\t\tOut: []string{\".\", \"foo\"},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tout := ToParts(test.In)\n\t\tif !reflect.DeepEqual(test.Out, out) {\n\t\t\tt.Errorf(\"Expected %s to return: %#v, actual: %#v\", test.In, test.Out, out)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/authorization_manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype AuthorizationManager struct {\n\tCommon\n}\n\nfunc NewAuthorizationManager(c *vim25.Client) *AuthorizationManager {\n\tm := AuthorizationManager{\n\t\tCommon: NewCommon(c, *c.ServiceContent.AuthorizationManager),\n\t}\n\n\treturn &m\n}\n\ntype AuthorizationRoleList []types.AuthorizationRole\n\nfunc (l AuthorizationRoleList) ById(id int32) *types.AuthorizationRole {\n\tfor _, role := range l {\n\t\tif role.RoleId == id {\n\t\t\treturn &role\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (l AuthorizationRoleList) ByName(name string) *types.AuthorizationRole {\n\tfor _, role := range l {\n\t\tif role.Name == name {\n\t\t\treturn &role\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (m AuthorizationManager) RoleList(ctx context.Context) (AuthorizationRoleList, error) {\n\tvar am mo.AuthorizationManager\n\n\terr := m.Properties(ctx, m.Reference(), []string{\"roleList\"}, &am)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn AuthorizationRoleList(am.RoleList), nil\n}\n\nfunc (m AuthorizationManager) RetrieveEntityPermissions(ctx context.Context, entity types.ManagedObjectReference, inherited bool) ([]types.Permission, error) {\n\treq := types.RetrieveEntityPermissions{\n\t\tThis:      m.Reference(),\n\t\tEntity:    entity,\n\t\tInherited: inherited,\n\t}\n\n\tres, err := methods.RetrieveEntityPermissions(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m AuthorizationManager) RemoveEntityPermission(ctx context.Context, entity types.ManagedObjectReference, user string, isGroup bool) error {\n\treq := types.RemoveEntityPermission{\n\t\tThis:    m.Reference(),\n\t\tEntity:  entity,\n\t\tUser:    user,\n\t\tIsGroup: isGroup,\n\t}\n\n\t_, err := methods.RemoveEntityPermission(ctx, m.Client(), &req)\n\treturn err\n}\n\nfunc (m AuthorizationManager) SetEntityPermissions(ctx context.Context, entity types.ManagedObjectReference, permission []types.Permission) error {\n\treq := types.SetEntityPermissions{\n\t\tThis:       m.Reference(),\n\t\tEntity:     entity,\n\t\tPermission: permission,\n\t}\n\n\t_, err := methods.SetEntityPermissions(ctx, m.Client(), &req)\n\treturn err\n}\n\nfunc (m AuthorizationManager) RetrieveRolePermissions(ctx context.Context, id int32) ([]types.Permission, error) {\n\treq := types.RetrieveRolePermissions{\n\t\tThis:   m.Reference(),\n\t\tRoleId: id,\n\t}\n\n\tres, err := methods.RetrieveRolePermissions(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m AuthorizationManager) RetrieveAllPermissions(ctx context.Context) ([]types.Permission, error) {\n\treq := types.RetrieveAllPermissions{\n\t\tThis: m.Reference(),\n\t}\n\n\tres, err := methods.RetrieveAllPermissions(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m AuthorizationManager) AddRole(ctx context.Context, name string, ids []string) (int32, error) {\n\treq := types.AddAuthorizationRole{\n\t\tThis:    m.Reference(),\n\t\tName:    name,\n\t\tPrivIds: ids,\n\t}\n\n\tres, err := methods.AddAuthorizationRole(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m AuthorizationManager) RemoveRole(ctx context.Context, id int32, failIfUsed bool) error {\n\treq := types.RemoveAuthorizationRole{\n\t\tThis:       m.Reference(),\n\t\tRoleId:     id,\n\t\tFailIfUsed: failIfUsed,\n\t}\n\n\t_, err := methods.RemoveAuthorizationRole(ctx, m.Client(), &req)\n\treturn err\n}\n\nfunc (m AuthorizationManager) UpdateRole(ctx context.Context, id int32, name string, ids []string) error {\n\treq := types.UpdateAuthorizationRole{\n\t\tThis:    m.Reference(),\n\t\tRoleId:  id,\n\t\tNewName: name,\n\t\tPrivIds: ids,\n\t}\n\n\t_, err := methods.UpdateAuthorizationRole(ctx, m.Client(), &req)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/authorization_manager_internal.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype DisabledMethodRequest struct {\n\tMethod string `xml:\"method\"`\n\tReason string `xml:\"reasonId\"`\n}\n\ntype disableMethodsRequest struct {\n\tThis   types.ManagedObjectReference   `xml:\"_this\"`\n\tEntity []types.ManagedObjectReference `xml:\"entity\"`\n\tMethod []DisabledMethodRequest        `xml:\"method\"`\n\tSource string                         `xml:\"sourceId\"`\n\tScope  bool                           `xml:\"sessionScope,omitempty\"`\n}\n\ntype disableMethodsBody struct {\n\tReq *disableMethodsRequest `xml:\"urn:internalvim25 DisableMethods,omitempty\"`\n\tRes interface{}            `xml:\"urn:vim25 DisableMethodsResponse,omitempty\"`\n\tErr *soap.Fault            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *disableMethodsBody) Fault() *soap.Fault { return b.Err }\n\nfunc (m AuthorizationManager) DisableMethods(ctx context.Context, entity []types.ManagedObjectReference, method []DisabledMethodRequest, source string) error {\n\tvar reqBody, resBody disableMethodsBody\n\n\treqBody.Req = &disableMethodsRequest{\n\t\tThis:   m.Reference(),\n\t\tEntity: entity,\n\t\tMethod: method,\n\t\tSource: source,\n\t}\n\n\treturn m.Client().RoundTrip(ctx, &reqBody, &resBody)\n}\n\ntype enableMethodsRequest struct {\n\tThis   types.ManagedObjectReference   `xml:\"_this\"`\n\tEntity []types.ManagedObjectReference `xml:\"entity\"`\n\tMethod []string                       `xml:\"method\"`\n\tSource string                         `xml:\"sourceId\"`\n}\n\ntype enableMethodsBody struct {\n\tReq *enableMethodsRequest `xml:\"urn:internalvim25 EnableMethods,omitempty\"`\n\tRes interface{}           `xml:\"urn:vim25 EnableMethodsResponse,omitempty\"`\n\tErr *soap.Fault           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *enableMethodsBody) Fault() *soap.Fault { return b.Err }\n\nfunc (m AuthorizationManager) EnableMethods(ctx context.Context, entity []types.ManagedObjectReference, method []string, source string) error {\n\tvar reqBody, resBody enableMethodsBody\n\n\treqBody.Req = &enableMethodsRequest{\n\t\tThis:   m.Reference(),\n\t\tEntity: entity,\n\t\tMethod: method,\n\t\tSource: source,\n\t}\n\n\treturn m.Client().RoundTrip(ctx, &reqBody, &resBody)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/cluster_compute_resource.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype ClusterComputeResource struct {\n\tComputeResource\n}\n\nfunc NewClusterComputeResource(c *vim25.Client, ref types.ManagedObjectReference) *ClusterComputeResource {\n\treturn &ClusterComputeResource{\n\t\tComputeResource: *NewComputeResource(c, ref),\n\t}\n}\n\nfunc (c ClusterComputeResource) ReconfigureCluster(ctx context.Context, spec types.ClusterConfigSpec) (*Task, error) {\n\treq := types.ReconfigureCluster_Task{\n\t\tThis:   c.Reference(),\n\t\tSpec:   spec,\n\t\tModify: true,\n\t}\n\n\tres, err := methods.ReconfigureCluster_Task(ctx, c.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(c.c, res.Returnval), nil\n}\n\nfunc (c ClusterComputeResource) AddHost(ctx context.Context, spec types.HostConnectSpec, asConnected bool, license *string, resourcePool *types.ManagedObjectReference) (*Task, error) {\n\treq := types.AddHost_Task{\n\t\tThis:        c.Reference(),\n\t\tSpec:        spec,\n\t\tAsConnected: asConnected,\n\t}\n\n\tif license != nil {\n\t\treq.License = *license\n\t}\n\n\tif resourcePool != nil {\n\t\treq.ResourcePool = resourcePool\n\t}\n\n\tres, err := methods.AddHost_Task(ctx, c.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(c.c, res.Returnval), nil\n}\n\nfunc (c ClusterComputeResource) Destroy(ctx context.Context) (*Task, error) {\n\treq := types.Destroy_Task{\n\t\tThis: c.Reference(),\n\t}\n\n\tres, err := methods.Destroy_Task(ctx, c.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(c.c, res.Returnval), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/cluster_compute_resource_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\n// ComputeResource should implement the Reference interface.\nvar _ Reference = ClusterComputeResource{}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/common.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"path\"\n\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\nvar (\n\tErrNotSupported = errors.New(\"product/version specific feature not supported by target\")\n)\n\n// Common contains the fields and functions common to all objects.\ntype Common struct {\n\tInventoryPath string\n\n\tc *vim25.Client\n\tr types.ManagedObjectReference\n}\n\nfunc (c Common) String() string {\n\tref := fmt.Sprintf(\"%v\", c.Reference())\n\n\tif c.InventoryPath == \"\" {\n\t\treturn ref\n\t}\n\n\treturn fmt.Sprintf(\"%s @ %s\", ref, c.InventoryPath)\n}\n\nfunc NewCommon(c *vim25.Client, r types.ManagedObjectReference) Common {\n\treturn Common{c: c, r: r}\n}\n\nfunc (c Common) Reference() types.ManagedObjectReference {\n\treturn c.r\n}\n\nfunc (c Common) Client() *vim25.Client {\n\treturn c.c\n}\n\n// Name returns the base name of the InventoryPath field\nfunc (c Common) Name() string {\n\tif c.InventoryPath == \"\" {\n\t\treturn \"\"\n\t}\n\treturn path.Base(c.InventoryPath)\n}\n\nfunc (c *Common) SetInventoryPath(p string) {\n\tc.InventoryPath = p\n}\n\n// ObjectName returns the base name of the InventoryPath field if set,\n// otherwise fetches the mo.ManagedEntity.Name field via the property collector.\nfunc (c Common) ObjectName(ctx context.Context) (string, error) {\n\tvar o mo.ManagedEntity\n\n\tname := c.Name()\n\tif name != \"\" {\n\t\treturn name, nil\n\t}\n\n\terr := c.Properties(ctx, c.Reference(), []string{\"name\"}, &o)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn o.Name, nil\n}\n\nfunc (c Common) Properties(ctx context.Context, r types.ManagedObjectReference, ps []string, dst interface{}) error {\n\treturn property.DefaultCollector(c.c).RetrieveOne(ctx, r, ps, dst)\n}\n\nfunc (c Common) Destroy(ctx context.Context) (*Task, error) {\n\treq := types.Destroy_Task{\n\t\tThis: c.Reference(),\n\t}\n\n\tres, err := methods.Destroy_Task(ctx, c.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(c.c, res.Returnval), nil\n}\n\nfunc (c Common) Rename(ctx context.Context, name string) (*Task, error) {\n\treq := types.Rename_Task{\n\t\tThis:    c.Reference(),\n\t\tNewName: name,\n\t}\n\n\tres, err := methods.Rename_Task(ctx, c.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(c.c, res.Returnval), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/common_test.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport \"testing\"\n\nfunc TestCommonName(t *testing.T) {\n\tc := &Common{}\n\n\tname := c.Name()\n\tif name != \"\" {\n\t\tt.Errorf(\"Name=%s\", name)\n\t}\n\n\tc.InventoryPath = \"/foo/bar\"\n\tname = c.Name()\n\tif name != \"bar\" {\n\t\tt.Errorf(\"Name=%s\", name)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/compute_resource.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"path\"\n\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype ComputeResource struct {\n\tCommon\n}\n\nfunc NewComputeResource(c *vim25.Client, ref types.ManagedObjectReference) *ComputeResource {\n\treturn &ComputeResource{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\nfunc (c ComputeResource) Hosts(ctx context.Context) ([]*HostSystem, error) {\n\tvar cr mo.ComputeResource\n\n\terr := c.Properties(ctx, c.Reference(), []string{\"host\"}, &cr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(cr.Host) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tvar hs []mo.HostSystem\n\tpc := property.DefaultCollector(c.Client())\n\terr = pc.Retrieve(ctx, cr.Host, []string{\"name\"}, &hs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar hosts []*HostSystem\n\n\tfor _, h := range hs {\n\t\thost := NewHostSystem(c.Client(), h.Reference())\n\t\thost.InventoryPath = path.Join(c.InventoryPath, h.Name)\n\t\thosts = append(hosts, host)\n\t}\n\n\treturn hosts, nil\n}\n\nfunc (c ComputeResource) Datastores(ctx context.Context) ([]*Datastore, error) {\n\tvar cr mo.ComputeResource\n\n\terr := c.Properties(ctx, c.Reference(), []string{\"datastore\"}, &cr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar dss []*Datastore\n\tfor _, ref := range cr.Datastore {\n\t\tds := NewDatastore(c.c, ref)\n\t\tdss = append(dss, ds)\n\t}\n\n\treturn dss, nil\n}\n\nfunc (c ComputeResource) ResourcePool(ctx context.Context) (*ResourcePool, error) {\n\tvar cr mo.ComputeResource\n\n\terr := c.Properties(ctx, c.Reference(), []string{\"resourcePool\"}, &cr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewResourcePool(c.c, *cr.ResourcePool), nil\n}\n\nfunc (c ComputeResource) Reconfigure(ctx context.Context, spec types.BaseComputeResourceConfigSpec, modify bool) (*Task, error) {\n\treq := types.ReconfigureComputeResource_Task{\n\t\tThis:   c.Reference(),\n\t\tSpec:   spec,\n\t\tModify: modify,\n\t}\n\n\tres, err := methods.ReconfigureComputeResource_Task(ctx, c.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(c.c, res.Returnval), nil\n}\n\nfunc (c ComputeResource) Destroy(ctx context.Context) (*Task, error) {\n\treq := types.Destroy_Task{\n\t\tThis: c.Reference(),\n\t}\n\n\tres, err := methods.Destroy_Task(ctx, c.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(c.c, res.Returnval), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/compute_resource_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\n// ComputeResource should implement the Reference interface.\nvar _ Reference = ComputeResource{}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/custom_fields_manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"strconv\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\nvar (\n\tErrKeyNameNotFound = errors.New(\"key name not found\")\n)\n\ntype CustomFieldsManager struct {\n\tCommon\n}\n\n// GetCustomFieldsManager wraps NewCustomFieldsManager, returning ErrNotSupported\n// when the client is not connected to a vCenter instance.\nfunc GetCustomFieldsManager(c *vim25.Client) (*CustomFieldsManager, error) {\n\tif c.ServiceContent.CustomFieldsManager == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\treturn NewCustomFieldsManager(c), nil\n}\n\nfunc NewCustomFieldsManager(c *vim25.Client) *CustomFieldsManager {\n\tm := CustomFieldsManager{\n\t\tCommon: NewCommon(c, *c.ServiceContent.CustomFieldsManager),\n\t}\n\n\treturn &m\n}\n\nfunc (m CustomFieldsManager) Add(ctx context.Context, name string, moType string, fieldDefPolicy *types.PrivilegePolicyDef, fieldPolicy *types.PrivilegePolicyDef) (*types.CustomFieldDef, error) {\n\treq := types.AddCustomFieldDef{\n\t\tThis:           m.Reference(),\n\t\tName:           name,\n\t\tMoType:         moType,\n\t\tFieldDefPolicy: fieldDefPolicy,\n\t\tFieldPolicy:    fieldPolicy,\n\t}\n\n\tres, err := methods.AddCustomFieldDef(ctx, m.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n\nfunc (m CustomFieldsManager) Remove(ctx context.Context, key int32) error {\n\treq := types.RemoveCustomFieldDef{\n\t\tThis: m.Reference(),\n\t\tKey:  key,\n\t}\n\n\t_, err := methods.RemoveCustomFieldDef(ctx, m.c, &req)\n\treturn err\n}\n\nfunc (m CustomFieldsManager) Rename(ctx context.Context, key int32, name string) error {\n\treq := types.RenameCustomFieldDef{\n\t\tThis: m.Reference(),\n\t\tKey:  key,\n\t\tName: name,\n\t}\n\n\t_, err := methods.RenameCustomFieldDef(ctx, m.c, &req)\n\treturn err\n}\n\nfunc (m CustomFieldsManager) Set(ctx context.Context, entity types.ManagedObjectReference, key int32, value string) error {\n\treq := types.SetField{\n\t\tThis:   m.Reference(),\n\t\tEntity: entity,\n\t\tKey:    key,\n\t\tValue:  value,\n\t}\n\n\t_, err := methods.SetField(ctx, m.c, &req)\n\treturn err\n}\n\nfunc (m CustomFieldsManager) Field(ctx context.Context) ([]types.CustomFieldDef, error) {\n\tvar fm mo.CustomFieldsManager\n\n\terr := m.Properties(ctx, m.Reference(), []string{\"field\"}, &fm)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn fm.Field, nil\n}\n\nfunc (m CustomFieldsManager) FindKey(ctx context.Context, key string) (int32, error) {\n\tfield, err := m.Field(ctx)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\tfor _, def := range field {\n\t\tif def.Name == key {\n\t\t\treturn def.Key, nil\n\t\t}\n\t}\n\n\tk, err := strconv.Atoi(key)\n\tif err == nil {\n\t\t// assume literal int key\n\t\treturn int32(k), nil\n\t}\n\n\treturn -1, ErrKeyNameNotFound\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/customization_spec_manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype CustomizationSpecManager struct {\n\tCommon\n}\n\nfunc NewCustomizationSpecManager(c *vim25.Client) *CustomizationSpecManager {\n\tcs := CustomizationSpecManager{\n\t\tCommon: NewCommon(c, *c.ServiceContent.CustomizationSpecManager),\n\t}\n\n\treturn &cs\n}\n\nfunc (cs CustomizationSpecManager) DoesCustomizationSpecExist(ctx context.Context, name string) (bool, error) {\n\treq := types.DoesCustomizationSpecExist{\n\t\tThis: cs.Reference(),\n\t\tName: name,\n\t}\n\n\tres, err := methods.DoesCustomizationSpecExist(ctx, cs.c, &req)\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (cs CustomizationSpecManager) GetCustomizationSpec(ctx context.Context, name string) (*types.CustomizationSpecItem, error) {\n\treq := types.GetCustomizationSpec{\n\t\tThis: cs.Reference(),\n\t\tName: name,\n\t}\n\n\tres, err := methods.GetCustomizationSpec(ctx, cs.c, &req)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n\nfunc (cs CustomizationSpecManager) CreateCustomizationSpec(ctx context.Context, item types.CustomizationSpecItem) error {\n\treq := types.CreateCustomizationSpec{\n\t\tThis: cs.Reference(),\n\t\tItem: item,\n\t}\n\n\t_, err := methods.CreateCustomizationSpec(ctx, cs.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (cs CustomizationSpecManager) OverwriteCustomizationSpec(ctx context.Context, item types.CustomizationSpecItem) error {\n\treq := types.OverwriteCustomizationSpec{\n\t\tThis: cs.Reference(),\n\t\tItem: item,\n\t}\n\n\t_, err := methods.OverwriteCustomizationSpec(ctx, cs.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (cs CustomizationSpecManager) DeleteCustomizationSpec(ctx context.Context, name string) error {\n\treq := types.DeleteCustomizationSpec{\n\t\tThis: cs.Reference(),\n\t\tName: name,\n\t}\n\n\t_, err := methods.DeleteCustomizationSpec(ctx, cs.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (cs CustomizationSpecManager) DuplicateCustomizationSpec(ctx context.Context, name string, newName string) error {\n\treq := types.DuplicateCustomizationSpec{\n\t\tThis:    cs.Reference(),\n\t\tName:    name,\n\t\tNewName: newName,\n\t}\n\n\t_, err := methods.DuplicateCustomizationSpec(ctx, cs.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (cs CustomizationSpecManager) RenameCustomizationSpec(ctx context.Context, name string, newName string) error {\n\treq := types.RenameCustomizationSpec{\n\t\tThis:    cs.Reference(),\n\t\tName:    name,\n\t\tNewName: newName,\n\t}\n\n\t_, err := methods.RenameCustomizationSpec(ctx, cs.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (cs CustomizationSpecManager) CustomizationSpecItemToXml(ctx context.Context, item types.CustomizationSpecItem) (string, error) {\n\treq := types.CustomizationSpecItemToXml{\n\t\tThis: cs.Reference(),\n\t\tItem: item,\n\t}\n\n\tres, err := methods.CustomizationSpecItemToXml(ctx, cs.c, &req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (cs CustomizationSpecManager) XmlToCustomizationSpecItem(ctx context.Context, xml string) (*types.CustomizationSpecItem, error) {\n\treq := types.XmlToCustomizationSpecItem{\n\t\tThis:        cs.Reference(),\n\t\tSpecItemXml: xml,\n\t}\n\n\tres, err := methods.XmlToCustomizationSpecItem(ctx, cs.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &res.Returnval, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/datacenter.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype DatacenterFolders struct {\n\tVmFolder        *Folder\n\tHostFolder      *Folder\n\tDatastoreFolder *Folder\n\tNetworkFolder   *Folder\n}\n\ntype Datacenter struct {\n\tCommon\n}\n\nfunc NewDatacenter(c *vim25.Client, ref types.ManagedObjectReference) *Datacenter {\n\treturn &Datacenter{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\nfunc (d *Datacenter) Folders(ctx context.Context) (*DatacenterFolders, error) {\n\tvar md mo.Datacenter\n\n\tps := []string{\"name\", \"vmFolder\", \"hostFolder\", \"datastoreFolder\", \"networkFolder\"}\n\terr := d.Properties(ctx, d.Reference(), ps, &md)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdf := &DatacenterFolders{\n\t\tVmFolder:        NewFolder(d.c, md.VmFolder),\n\t\tHostFolder:      NewFolder(d.c, md.HostFolder),\n\t\tDatastoreFolder: NewFolder(d.c, md.DatastoreFolder),\n\t\tNetworkFolder:   NewFolder(d.c, md.NetworkFolder),\n\t}\n\n\tpaths := []struct {\n\t\tname string\n\t\tpath *string\n\t}{\n\t\t{\"vm\", &df.VmFolder.InventoryPath},\n\t\t{\"host\", &df.HostFolder.InventoryPath},\n\t\t{\"datastore\", &df.DatastoreFolder.InventoryPath},\n\t\t{\"network\", &df.NetworkFolder.InventoryPath},\n\t}\n\n\tfor _, p := range paths {\n\t\t*p.path = fmt.Sprintf(\"/%s/%s\", md.Name, p.name)\n\t}\n\n\treturn df, nil\n}\n\nfunc (d Datacenter) Destroy(ctx context.Context) (*Task, error) {\n\treq := types.Destroy_Task{\n\t\tThis: d.Reference(),\n\t}\n\n\tres, err := methods.Destroy_Task(ctx, d.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(d.c, res.Returnval), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/datacenter_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\n// Datacenter should implement the Reference interface.\nvar _ Reference = Datacenter{}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/datastore.go",
    "content": "/*\nCopyright (c) 2015-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"math/rand\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\n\t\"context\"\n\t\"net/http\"\n\t\"net/url\"\n\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/session\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\n// DatastoreNoSuchDirectoryError is returned when a directory could not be found.\ntype DatastoreNoSuchDirectoryError struct {\n\tverb    string\n\tsubject string\n}\n\nfunc (e DatastoreNoSuchDirectoryError) Error() string {\n\treturn fmt.Sprintf(\"cannot %s '%s': No such directory\", e.verb, e.subject)\n}\n\n// DatastoreNoSuchFileError is returned when a file could not be found.\ntype DatastoreNoSuchFileError struct {\n\tverb    string\n\tsubject string\n}\n\nfunc (e DatastoreNoSuchFileError) Error() string {\n\treturn fmt.Sprintf(\"cannot %s '%s': No such file\", e.verb, e.subject)\n}\n\ntype Datastore struct {\n\tCommon\n\n\tDatacenterPath string\n}\n\nfunc NewDatastore(c *vim25.Client, ref types.ManagedObjectReference) *Datastore {\n\treturn &Datastore{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\nfunc (d Datastore) Path(path string) string {\n\treturn (&DatastorePath{\n\t\tDatastore: d.Name(),\n\t\tPath:      path,\n\t}).String()\n}\n\n// NewURL constructs a url.URL with the given file path for datastore access over HTTP.\nfunc (d Datastore) NewURL(path string) *url.URL {\n\tu := d.c.URL()\n\n\treturn &url.URL{\n\t\tScheme: u.Scheme,\n\t\tHost:   u.Host,\n\t\tPath:   fmt.Sprintf(\"/folder/%s\", path),\n\t\tRawQuery: url.Values{\n\t\t\t\"dcPath\": []string{d.DatacenterPath},\n\t\t\t\"dsName\": []string{d.Name()},\n\t\t}.Encode(),\n\t}\n}\n\n// URL is deprecated, use NewURL instead.\nfunc (d Datastore) URL(ctx context.Context, dc *Datacenter, path string) (*url.URL, error) {\n\treturn d.NewURL(path), nil\n}\n\nfunc (d Datastore) Browser(ctx context.Context) (*HostDatastoreBrowser, error) {\n\tvar do mo.Datastore\n\n\terr := d.Properties(ctx, d.Reference(), []string{\"browser\"}, &do)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewHostDatastoreBrowser(d.c, do.Browser), nil\n}\n\nfunc (d Datastore) useServiceTicket() bool {\n\t// If connected to workstation, service ticketing not supported\n\t// If connected to ESX, service ticketing not needed\n\tif !d.c.IsVC() {\n\t\treturn false\n\t}\n\n\tkey := \"GOVMOMI_USE_SERVICE_TICKET\"\n\n\tval := d.c.URL().Query().Get(key)\n\tif val == \"\" {\n\t\tval = os.Getenv(key)\n\t}\n\n\tif val == \"1\" || val == \"true\" {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc (d Datastore) useServiceTicketHostName(name string) bool {\n\t// No need if talking directly to ESX.\n\tif !d.c.IsVC() {\n\t\treturn false\n\t}\n\n\t// If version happens to be < 5.1\n\tif name == \"\" {\n\t\treturn false\n\t}\n\n\t// If the HostSystem is using DHCP on a network without dynamic DNS,\n\t// HostSystem.Config.Network.DnsConfig.HostName is set to \"localhost\" by default.\n\t// This resolves to \"localhost.localdomain\" by default via /etc/hosts on ESX.\n\t// In that case, we will stick with the HostSystem.Name which is the IP address that\n\t// was used to connect the host to VC.\n\tif name == \"localhost.localdomain\" {\n\t\treturn false\n\t}\n\n\t// Still possible to have HostName that don't resolve via DNS,\n\t// so we default to false.\n\tkey := \"GOVMOMI_USE_SERVICE_TICKET_HOSTNAME\"\n\n\tval := d.c.URL().Query().Get(key)\n\tif val == \"\" {\n\t\tval = os.Getenv(key)\n\t}\n\n\tif val == \"1\" || val == \"true\" {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\ntype datastoreServiceTicketHostKey struct{}\n\n// HostContext returns a Context where the given host will be used for datastore HTTP access\n// via the ServiceTicket method.\nfunc (d Datastore) HostContext(ctx context.Context, host *HostSystem) context.Context {\n\treturn context.WithValue(ctx, datastoreServiceTicketHostKey{}, host)\n}\n\n// ServiceTicket obtains a ticket via AcquireGenericServiceTicket and returns it an http.Cookie with the url.URL\n// that can be used along with the ticket cookie to access the given path.  An host is chosen at random unless the\n// the given Context was created with a specific host via the HostContext method.\nfunc (d Datastore) ServiceTicket(ctx context.Context, path string, method string) (*url.URL, *http.Cookie, error) {\n\tu := d.NewURL(path)\n\n\thost, ok := ctx.Value(datastoreServiceTicketHostKey{}).(*HostSystem)\n\n\tif !ok {\n\t\tif !d.useServiceTicket() {\n\t\t\treturn u, nil, nil\n\t\t}\n\n\t\thosts, err := d.AttachedHosts(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tif len(hosts) == 0 {\n\t\t\t// Fallback to letting vCenter choose a host\n\t\t\treturn u, nil, nil\n\t\t}\n\n\t\t// Pick a random attached host\n\t\thost = hosts[rand.Intn(len(hosts))]\n\t}\n\n\tips, err := host.ManagementIPs(ctx)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif len(ips) > 0 {\n\t\t// prefer a ManagementIP\n\t\tu.Host = ips[0].String()\n\t} else {\n\t\t// fallback to inventory name\n\t\tu.Host, err = host.ObjectName(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\t// VC datacenter path will not be valid against ESX\n\tq := u.Query()\n\tdelete(q, \"dcPath\")\n\tu.RawQuery = q.Encode()\n\n\tspec := types.SessionManagerHttpServiceRequestSpec{\n\t\tUrl: u.String(),\n\t\t// See SessionManagerHttpServiceRequestSpecMethod enum\n\t\tMethod: fmt.Sprintf(\"http%s%s\", method[0:1], strings.ToLower(method[1:])),\n\t}\n\n\tsm := session.NewManager(d.Client())\n\n\tticket, err := sm.AcquireGenericServiceTicket(ctx, &spec)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcookie := &http.Cookie{\n\t\tName:  \"vmware_cgi_ticket\",\n\t\tValue: ticket.Id,\n\t}\n\n\tif d.useServiceTicketHostName(ticket.HostName) {\n\t\tu.Host = ticket.HostName\n\t}\n\n\td.Client().SetThumbprint(u.Host, ticket.SslThumbprint)\n\n\treturn u, cookie, nil\n}\n\nfunc (d Datastore) uploadTicket(ctx context.Context, path string, param *soap.Upload) (*url.URL, *soap.Upload, error) {\n\tp := soap.DefaultUpload\n\tif param != nil {\n\t\tp = *param // copy\n\t}\n\n\tu, ticket, err := d.ServiceTicket(ctx, path, p.Method)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tp.Ticket = ticket\n\n\treturn u, &p, nil\n}\n\nfunc (d Datastore) downloadTicket(ctx context.Context, path string, param *soap.Download) (*url.URL, *soap.Download, error) {\n\tp := soap.DefaultDownload\n\tif param != nil {\n\t\tp = *param // copy\n\t}\n\n\tu, ticket, err := d.ServiceTicket(ctx, path, p.Method)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tp.Ticket = ticket\n\n\treturn u, &p, nil\n}\n\n// Upload via soap.Upload with an http service ticket\nfunc (d Datastore) Upload(ctx context.Context, f io.Reader, path string, param *soap.Upload) error {\n\tu, p, err := d.uploadTicket(ctx, path, param)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn d.Client().Upload(f, u, p)\n}\n\n// UploadFile via soap.Upload with an http service ticket\nfunc (d Datastore) UploadFile(ctx context.Context, file string, path string, param *soap.Upload) error {\n\tu, p, err := d.uploadTicket(ctx, path, param)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn d.Client().UploadFile(file, u, p)\n}\n\n// Download via soap.Download with an http service ticket\nfunc (d Datastore) Download(ctx context.Context, path string, param *soap.Download) (io.ReadCloser, int64, error) {\n\tu, p, err := d.downloadTicket(ctx, path, param)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\treturn d.Client().Download(u, p)\n}\n\n// DownloadFile via soap.Download with an http service ticket\nfunc (d Datastore) DownloadFile(ctx context.Context, path string, file string, param *soap.Download) error {\n\tu, p, err := d.downloadTicket(ctx, path, param)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn d.Client().DownloadFile(file, u, p)\n}\n\n// AttachedHosts returns hosts that have this Datastore attached, accessible and writable.\nfunc (d Datastore) AttachedHosts(ctx context.Context) ([]*HostSystem, error) {\n\tvar ds mo.Datastore\n\tvar hosts []*HostSystem\n\n\tpc := property.DefaultCollector(d.Client())\n\terr := pc.RetrieveOne(ctx, d.Reference(), []string{\"host\"}, &ds)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmounts := make(map[types.ManagedObjectReference]types.DatastoreHostMount)\n\tvar refs []types.ManagedObjectReference\n\tfor _, host := range ds.Host {\n\t\trefs = append(refs, host.Key)\n\t\tmounts[host.Key] = host\n\t}\n\n\tvar hs []mo.HostSystem\n\terr = pc.Retrieve(ctx, refs, []string{\"runtime.connectionState\", \"runtime.powerState\"}, &hs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, host := range hs {\n\t\tif host.Runtime.ConnectionState == types.HostSystemConnectionStateConnected &&\n\t\t\thost.Runtime.PowerState == types.HostSystemPowerStatePoweredOn {\n\n\t\t\tmount := mounts[host.Reference()]\n\t\t\tinfo := mount.MountInfo\n\n\t\t\tif *info.Mounted && *info.Accessible && info.AccessMode == string(types.HostMountModeReadWrite) {\n\t\t\t\thosts = append(hosts, NewHostSystem(d.Client(), mount.Key))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn hosts, nil\n}\n\n// AttachedClusterHosts returns hosts that have this Datastore attached, accessible and writable and are members of the given cluster.\nfunc (d Datastore) AttachedClusterHosts(ctx context.Context, cluster *ComputeResource) ([]*HostSystem, error) {\n\tvar hosts []*HostSystem\n\n\tclusterHosts, err := cluster.Hosts(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tattachedHosts, err := d.AttachedHosts(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trefs := make(map[types.ManagedObjectReference]bool)\n\tfor _, host := range attachedHosts {\n\t\trefs[host.Reference()] = true\n\t}\n\n\tfor _, host := range clusterHosts {\n\t\tif refs[host.Reference()] {\n\t\t\thosts = append(hosts, host)\n\t\t}\n\t}\n\n\treturn hosts, nil\n}\n\nfunc (d Datastore) Stat(ctx context.Context, file string) (types.BaseFileInfo, error) {\n\tb, err := d.Browser(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tspec := types.HostDatastoreBrowserSearchSpec{\n\t\tDetails: &types.FileQueryFlags{\n\t\t\tFileType:     true,\n\t\t\tFileSize:     true,\n\t\t\tModification: true,\n\t\t\tFileOwner:    types.NewBool(true),\n\t\t},\n\t\tMatchPattern: []string{path.Base(file)},\n\t}\n\n\tdsPath := d.Path(path.Dir(file))\n\ttask, err := b.SearchDatastore(ctx, dsPath, &spec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo, err := task.WaitForResult(ctx, nil)\n\tif err != nil {\n\t\tif info == nil || info.Error != nil {\n\t\t\t_, ok := info.Error.Fault.(*types.FileNotFound)\n\t\t\tif ok {\n\t\t\t\t// FileNotFound means the base path doesn't exist.\n\t\t\t\treturn nil, DatastoreNoSuchDirectoryError{\"stat\", dsPath}\n\t\t\t}\n\t\t}\n\n\t\treturn nil, err\n\t}\n\n\tres := info.Result.(types.HostDatastoreBrowserSearchResults)\n\tif len(res.File) == 0 {\n\t\t// File doesn't exist\n\t\treturn nil, DatastoreNoSuchFileError{\"stat\", d.Path(file)}\n\t}\n\n\treturn res.File[0], nil\n\n}\n\n// Type returns the type of file system volume.\nfunc (d Datastore) Type(ctx context.Context) (types.HostFileSystemVolumeFileSystemType, error) {\n\tvar mds mo.Datastore\n\n\tif err := d.Properties(ctx, d.Reference(), []string{\"summary.type\"}, &mds); err != nil {\n\t\treturn types.HostFileSystemVolumeFileSystemType(\"\"), err\n\t}\n\treturn types.HostFileSystemVolumeFileSystemType(mds.Summary.Type), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/datastore_file.go",
    "content": "/*\nCopyright (c) 2016-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/vim25/soap\"\n)\n\n// DatastoreFile implements io.Reader, io.Seeker and io.Closer interfaces for datastore file access.\ntype DatastoreFile struct {\n\td    Datastore\n\tctx  context.Context\n\tname string\n\n\tbuf    io.Reader\n\tbody   io.ReadCloser\n\tlength int64\n\toffset struct {\n\t\tread, seek int64\n\t}\n}\n\n// Open opens the named file relative to the Datastore.\nfunc (d Datastore) Open(ctx context.Context, name string) (*DatastoreFile, error) {\n\treturn &DatastoreFile{\n\t\td:      d,\n\t\tname:   name,\n\t\tlength: -1,\n\t\tctx:    ctx,\n\t}, nil\n}\n\n// Read reads up to len(b) bytes from the DatastoreFile.\nfunc (f *DatastoreFile) Read(b []byte) (int, error) {\n\tif f.offset.read != f.offset.seek {\n\t\t// A Seek() call changed the offset, we need to issue a new GET\n\t\t_ = f.Close()\n\n\t\tf.offset.read = f.offset.seek\n\t} else if f.buf != nil {\n\t\t// f.buf + f behaves like an io.MultiReader\n\t\tn, err := f.buf.Read(b)\n\t\tif err == io.EOF {\n\t\t\tf.buf = nil // buffer has been drained\n\t\t}\n\t\tif n > 0 {\n\t\t\treturn n, nil\n\t\t}\n\t}\n\n\tbody, err := f.get()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tn, err := body.Read(b)\n\n\tf.offset.read += int64(n)\n\tf.offset.seek += int64(n)\n\n\treturn n, err\n}\n\n// Close closes the DatastoreFile.\nfunc (f *DatastoreFile) Close() error {\n\tvar err error\n\n\tif f.body != nil {\n\t\terr = f.body.Close()\n\t\tf.body = nil\n\t}\n\n\tf.buf = nil\n\n\treturn err\n}\n\n// Seek sets the offset for the next Read on the DatastoreFile.\nfunc (f *DatastoreFile) Seek(offset int64, whence int) (int64, error) {\n\tswitch whence {\n\tcase io.SeekStart:\n\tcase io.SeekCurrent:\n\t\toffset += f.offset.seek\n\tcase io.SeekEnd:\n\t\tif f.length < 0 {\n\t\t\t_, err := f.Stat()\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\t\toffset += f.length\n\tdefault:\n\t\treturn 0, errors.New(\"Seek: invalid whence\")\n\t}\n\n\t// allow negative SeekStart for initial Range request\n\tif offset < 0 {\n\t\treturn 0, errors.New(\"Seek: invalid offset\")\n\t}\n\n\tf.offset.seek = offset\n\n\treturn offset, nil\n}\n\ntype fileStat struct {\n\tfile   *DatastoreFile\n\theader http.Header\n}\n\nfunc (s *fileStat) Name() string {\n\treturn path.Base(s.file.name)\n}\n\nfunc (s *fileStat) Size() int64 {\n\treturn s.file.length\n}\n\nfunc (s *fileStat) Mode() os.FileMode {\n\treturn 0\n}\n\nfunc (s *fileStat) ModTime() time.Time {\n\treturn time.Now() // no Last-Modified\n}\n\nfunc (s *fileStat) IsDir() bool {\n\treturn false\n}\n\nfunc (s *fileStat) Sys() interface{} {\n\treturn s.header\n}\n\nfunc statusError(res *http.Response) error {\n\tif res.StatusCode == http.StatusNotFound {\n\t\treturn os.ErrNotExist\n\t}\n\treturn errors.New(res.Status)\n}\n\n// Stat returns the os.FileInfo interface describing file.\nfunc (f *DatastoreFile) Stat() (os.FileInfo, error) {\n\t// TODO: consider using Datastore.Stat() instead\n\tu, p, err := f.d.downloadTicket(f.ctx, f.name, &soap.Download{Method: \"HEAD\"})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres, err := f.d.Client().DownloadRequest(u, p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn nil, statusError(res)\n\t}\n\n\tf.length = res.ContentLength\n\n\treturn &fileStat{f, res.Header}, nil\n}\n\nfunc (f *DatastoreFile) get() (io.Reader, error) {\n\tif f.body != nil {\n\t\treturn f.body, nil\n\t}\n\n\tu, p, err := f.d.downloadTicket(f.ctx, f.name, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif f.offset.read != 0 {\n\t\tp.Headers = map[string]string{\n\t\t\t\"Range\": fmt.Sprintf(\"bytes=%d-\", f.offset.read),\n\t\t}\n\t}\n\n\tres, err := f.d.Client().DownloadRequest(u, p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch res.StatusCode {\n\tcase http.StatusOK:\n\t\tf.length = res.ContentLength\n\tcase http.StatusPartialContent:\n\t\tvar start, end int\n\t\tcr := res.Header.Get(\"Content-Range\")\n\t\t_, err = fmt.Sscanf(cr, \"bytes %d-%d/%d\", &start, &end, &f.length)\n\t\tif err != nil {\n\t\t\tf.length = -1\n\t\t}\n\tcase http.StatusRequestedRangeNotSatisfiable:\n\t\t// ok: Read() will return io.EOF\n\tdefault:\n\t\treturn nil, statusError(res)\n\t}\n\n\tif f.length < 0 {\n\t\t_ = res.Body.Close()\n\t\treturn nil, errors.New(\"unable to determine file size\")\n\t}\n\n\tf.body = res.Body\n\n\treturn f.body, nil\n}\n\nfunc lastIndexLines(s []byte, line *int, include func(l int, m string) bool) (int64, bool) {\n\ti := len(s) - 1\n\tdone := false\n\n\tfor i > 0 {\n\t\to := bytes.LastIndexByte(s[:i], '\\n')\n\t\tif o < 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tmsg := string(s[o+1 : i+1])\n\t\tif !include(*line, msg) {\n\t\t\tdone = true\n\t\t\tbreak\n\t\t} else {\n\t\t\ti = o\n\t\t\t*line++\n\t\t}\n\t}\n\n\treturn int64(i), done\n}\n\n// Tail seeks to the position of the last N lines of the file.\nfunc (f *DatastoreFile) Tail(n int) error {\n\treturn f.TailFunc(n, func(line int, _ string) bool { return n > line })\n}\n\n// TailFunc will seek backwards in the datastore file until it hits a line that does\n// not satisfy the supplied `include` function.\nfunc (f *DatastoreFile) TailFunc(lines int, include func(line int, message string) bool) error {\n\t// Read the file in reverse using bsize chunks\n\tconst bsize = int64(1024 * 16)\n\n\tfsize, err := f.Seek(0, io.SeekEnd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif lines == 0 {\n\t\treturn nil\n\t}\n\n\tchunk := int64(-1)\n\n\tbuf := bytes.NewBuffer(make([]byte, 0, bsize))\n\tline := 0\n\n\tfor {\n\t\tvar eof bool\n\t\tvar pos int64\n\n\t\tnread := bsize\n\n\t\toffset := chunk * bsize\n\t\tremain := fsize + offset\n\n\t\tif remain < 0 {\n\t\t\tif pos, err = f.Seek(0, io.SeekStart); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tnread = bsize + remain\n\t\t\teof = true\n\t\t} else {\n\t\t\tif pos, err = f.Seek(offset, io.SeekEnd); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif _, err = io.CopyN(buf, f, nread); err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tb := buf.Bytes()\n\t\tidx, done := lastIndexLines(b, &line, include)\n\n\t\tif done {\n\t\t\tif chunk == -1 {\n\t\t\t\t// We found all N lines in the last chunk of the file.\n\t\t\t\t// The seek offset is also now at the current end of file.\n\t\t\t\t// Save this buffer to avoid another GET request when Read() is called.\n\t\t\t\tbuf.Next(int(idx + 1))\n\t\t\t\tf.buf = buf\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tif _, err = f.Seek(pos+idx+1, io.SeekStart); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\n\t\tif eof {\n\t\t\tif remain < 0 {\n\t\t\t\t// We found < N lines in the entire file, so seek to the start.\n\t\t\t\t_, _ = f.Seek(0, io.SeekStart)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\tchunk--\n\t\tbuf.Reset()\n\t}\n\n\treturn nil\n}\n\ntype followDatastoreFile struct {\n\tr *DatastoreFile\n\tc chan struct{}\n\ti time.Duration\n}\n\n// Read reads up to len(b) bytes from the DatastoreFile being followed.\n// This method will block until data is read, an error other than io.EOF is returned or Close() is called.\nfunc (f *followDatastoreFile) Read(p []byte) (int, error) {\n\toffset := f.r.offset.seek\n\tstop := false\n\n\tfor {\n\t\tn, err := f.r.Read(p)\n\t\tif err != nil && err == io.EOF {\n\t\t\t_ = f.r.Close() // GET request body has been drained.\n\t\t\tif stop {\n\t\t\t\treturn n, err\n\t\t\t}\n\t\t\terr = nil\n\t\t}\n\n\t\tif n > 0 {\n\t\t\treturn n, err\n\t\t}\n\n\t\tselect {\n\t\tcase <-f.c:\n\t\t\t// Wake up and stop polling once the body has been drained\n\t\t\tstop = true\n\t\tcase <-time.After(f.i):\n\t\t}\n\n\t\tinfo, serr := f.r.Stat()\n\t\tif serr != nil {\n\t\t\t// Return EOF rather than 404 if the file goes away\n\t\t\tif serr == os.ErrNotExist {\n\t\t\t\t_ = f.r.Close()\n\t\t\t\treturn 0, io.EOF\n\t\t\t}\n\t\t\treturn 0, serr\n\t\t}\n\n\t\tif info.Size() < offset {\n\t\t\t// assume file has be truncated\n\t\t\toffset, err = f.r.Seek(0, io.SeekStart)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Close will stop Follow polling and close the underlying DatastoreFile.\nfunc (f *followDatastoreFile) Close() error {\n\tclose(f.c)\n\treturn nil\n}\n\n// Follow returns an io.ReadCloser to stream the file contents as data is appended.\nfunc (f *DatastoreFile) Follow(interval time.Duration) io.ReadCloser {\n\treturn &followDatastoreFile{f, make(chan struct{}), interval}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/datastore_file_manager.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"path\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/vim25/soap\"\n)\n\n// DatastoreFileManager combines FileManager and VirtualDiskManager to manage files on a Datastore\ntype DatastoreFileManager struct {\n\tDatacenter         *Datacenter\n\tDatastore          *Datastore\n\tFileManager        *FileManager\n\tVirtualDiskManager *VirtualDiskManager\n\n\tForce bool\n}\n\n// NewFileManager creates a new instance of DatastoreFileManager\nfunc (d Datastore) NewFileManager(dc *Datacenter, force bool) *DatastoreFileManager {\n\tc := d.Client()\n\n\tm := &DatastoreFileManager{\n\t\tDatacenter:         dc,\n\t\tDatastore:          &d,\n\t\tFileManager:        NewFileManager(c),\n\t\tVirtualDiskManager: NewVirtualDiskManager(c),\n\t\tForce:              force,\n\t}\n\n\treturn m\n}\n\n// Delete dispatches to the appropriate Delete method based on file name extension\nfunc (m *DatastoreFileManager) Delete(ctx context.Context, name string) error {\n\tswitch path.Ext(name) {\n\tcase \".vmdk\":\n\t\treturn m.DeleteVirtualDisk(ctx, name)\n\tdefault:\n\t\treturn m.DeleteFile(ctx, name)\n\t}\n}\n\n// DeleteFile calls FileManager.DeleteDatastoreFile\nfunc (m *DatastoreFileManager) DeleteFile(ctx context.Context, name string) error {\n\tp := m.Path(name)\n\n\ttask, err := m.FileManager.DeleteDatastoreFile(ctx, p.String(), m.Datacenter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn task.Wait(ctx)\n}\n\n// DeleteVirtualDisk calls VirtualDiskManager.DeleteVirtualDisk\n// Regardless of the Datastore type, DeleteVirtualDisk will fail if 'ddb.deletable=false',\n// so if Force=true this method attempts to set 'ddb.deletable=true' before starting the delete task.\nfunc (m *DatastoreFileManager) DeleteVirtualDisk(ctx context.Context, name string) error {\n\tp := m.Path(name)\n\n\tvar merr error\n\n\tif m.Force {\n\t\tmerr = m.markDiskAsDeletable(ctx, p)\n\t}\n\n\ttask, err := m.VirtualDiskManager.DeleteVirtualDisk(ctx, p.String(), m.Datacenter)\n\tif err != nil {\n\t\tlog.Printf(\"markDiskAsDeletable(%s): %s\", p, merr)\n\t\treturn err\n\t}\n\n\treturn task.Wait(ctx)\n}\n\n// Path converts path name to a DatastorePath\nfunc (m *DatastoreFileManager) Path(name string) *DatastorePath {\n\tvar p DatastorePath\n\n\tif !p.FromString(name) {\n\t\tp.Path = name\n\t\tp.Datastore = m.Datastore.Name()\n\t}\n\n\treturn &p\n}\n\nfunc (m *DatastoreFileManager) markDiskAsDeletable(ctx context.Context, path *DatastorePath) error {\n\tr, _, err := m.Datastore.Download(ctx, path.Path, &soap.DefaultDownload)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer r.Close()\n\n\thasFlag := false\n\tbuf := new(bytes.Buffer)\n\n\ts := bufio.NewScanner(&io.LimitedReader{R: r, N: 2048}) // should be only a few hundred bytes, limit to be sure\n\n\tfor s.Scan() {\n\t\tline := s.Text()\n\t\tif strings.HasPrefix(line, \"ddb.deletable\") {\n\t\t\thasFlag = true\n\t\t\tcontinue\n\t\t}\n\n\t\tfmt.Fprintln(buf, line)\n\t}\n\n\tif err := s.Err(); err != nil {\n\t\treturn err // any error other than EOF\n\t}\n\n\tif !hasFlag {\n\t\treturn nil // already deletable, so leave as-is\n\t}\n\n\t// rewrite the .vmdk with ddb.deletable flag removed (the default is true)\n\treturn m.Datastore.Upload(ctx, buf, path.Path, &soap.DefaultUpload)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/datastore_path.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\n// DatastorePath contains the components of a datastore path.\ntype DatastorePath struct {\n\tDatastore string\n\tPath      string\n}\n\n// FromString parses a datastore path.\n// Returns true if the path could be parsed, false otherwise.\nfunc (p *DatastorePath) FromString(s string) bool {\n\tif len(s) == 0 {\n\t\treturn false\n\t}\n\n\ts = strings.TrimSpace(s)\n\n\tif !strings.HasPrefix(s, \"[\") {\n\t\treturn false\n\t}\n\n\ts = s[1:]\n\n\tix := strings.Index(s, \"]\")\n\tif ix < 0 {\n\t\treturn false\n\t}\n\n\tp.Datastore = s[:ix]\n\tp.Path = strings.TrimSpace(s[ix+1:])\n\n\treturn true\n}\n\n// String formats a datastore path.\nfunc (p *DatastorePath) String() string {\n\ts := fmt.Sprintf(\"[%s]\", p.Datastore)\n\n\tif p.Path == \"\" {\n\t\treturn s\n\t}\n\n\treturn strings.Join([]string{s, p.Path}, \" \")\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/datastore_path_test.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport \"testing\"\n\nfunc TestParseDatastorePath(t *testing.T) {\n\ttests := []struct {\n\t\tdsPath string\n\t\tdsFile string\n\t\tfail   bool\n\t}{\n\t\t{\"\", \"\", true},\n\t\t{\"x\", \"\", true},\n\t\t{\"[\", \"\", true},\n\t\t{\"[nope\", \"\", true},\n\t\t{\"[te st]\", \"\", false},\n\t\t{\"[te st] foo\", \"foo\", false},\n\t\t{\"[te st] foo/foo.vmx\", \"foo/foo.vmx\", false},\n\t\t{\"[te st]foo bar/foo bar.vmx\", \"foo bar/foo bar.vmx\", false},\n\t\t{\" [te st]     bar/bar.vmx  \", \"bar/bar.vmx\", false},\n\t}\n\n\tfor _, test := range tests {\n\t\tp := new(DatastorePath)\n\t\tok := p.FromString(test.dsPath)\n\n\t\tif test.fail {\n\t\t\tif ok {\n\t\t\t\tt.Errorf(\"expected error for: %s\", test.dsPath)\n\t\t\t}\n\t\t} else {\n\t\t\tif !ok {\n\t\t\t\tt.Errorf(\"failed to parse: %q\", test.dsPath)\n\t\t\t} else {\n\t\t\t\tif test.dsFile != p.Path {\n\t\t\t\t\tt.Errorf(\"dsFile=%s\", p.Path)\n\t\t\t\t}\n\t\t\t\tif p.Datastore != \"te st\" {\n\t\t\t\t\tt.Errorf(\"ds=%s\", p.Datastore)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\ts := \"[datastore1] foo/bar.vmdk\"\n\tp := new(DatastorePath)\n\tok := p.FromString(s)\n\tif !ok {\n\t\tt.Fatal(s)\n\t}\n\n\tif p.String() != s {\n\t\tt.Fatal(p.String())\n\t}\n\n\tp.Path = \"\"\n\n\tif p.String() != \"[datastore1]\" {\n\t\tt.Fatal(p.String())\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/datastore_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\n// Datastore should implement the Reference interface.\nvar _ Reference = Datastore{}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/diagnostic_log.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n)\n\n// DiagnosticLog wraps DiagnosticManager.BrowseLog\ntype DiagnosticLog struct {\n\tm DiagnosticManager\n\n\tKey  string\n\tHost *HostSystem\n\n\tStart int32\n}\n\n// Seek to log position starting at the last nlines of the log\nfunc (l *DiagnosticLog) Seek(ctx context.Context, nlines int32) error {\n\th, err := l.m.BrowseLog(ctx, l.Host, l.Key, math.MaxInt32, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tl.Start = h.LineEnd - nlines\n\n\treturn nil\n}\n\n// Copy log starting from l.Start to the given io.Writer\n// Returns on error or when end of log is reached.\nfunc (l *DiagnosticLog) Copy(ctx context.Context, w io.Writer) (int, error) {\n\tconst max = 500 // VC max == 500, ESX max == 1000\n\twritten := 0\n\n\tfor {\n\t\th, err := l.m.BrowseLog(ctx, l.Host, l.Key, l.Start, max)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\tfor _, line := range h.LineText {\n\t\t\tn, err := fmt.Fprintln(w, line)\n\t\t\twritten += n\n\t\t\tif err != nil {\n\t\t\t\treturn written, err\n\t\t\t}\n\t\t}\n\n\t\tl.Start += int32(len(h.LineText))\n\n\t\tif l.Start >= h.LineEnd {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn written, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/diagnostic_manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype DiagnosticManager struct {\n\tCommon\n}\n\nfunc NewDiagnosticManager(c *vim25.Client) *DiagnosticManager {\n\tm := DiagnosticManager{\n\t\tCommon: NewCommon(c, *c.ServiceContent.DiagnosticManager),\n\t}\n\n\treturn &m\n}\n\nfunc (m DiagnosticManager) Log(ctx context.Context, host *HostSystem, key string) *DiagnosticLog {\n\treturn &DiagnosticLog{\n\t\tm:    m,\n\t\tKey:  key,\n\t\tHost: host,\n\t}\n}\n\nfunc (m DiagnosticManager) BrowseLog(ctx context.Context, host *HostSystem, key string, start, lines int32) (*types.DiagnosticManagerLogHeader, error) {\n\treq := types.BrowseDiagnosticLog{\n\t\tThis:  m.Reference(),\n\t\tKey:   key,\n\t\tStart: start,\n\t\tLines: lines,\n\t}\n\n\tif host != nil {\n\t\tref := host.Reference()\n\t\treq.Host = &ref\n\t}\n\n\tres, err := methods.BrowseDiagnosticLog(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n\nfunc (m DiagnosticManager) GenerateLogBundles(ctx context.Context, includeDefault bool, host []*HostSystem) (*Task, error) {\n\treq := types.GenerateLogBundles_Task{\n\t\tThis:           m.Reference(),\n\t\tIncludeDefault: includeDefault,\n\t}\n\n\tif host != nil {\n\t\tfor _, h := range host {\n\t\t\treq.Host = append(req.Host, h.Reference())\n\t\t}\n\t}\n\n\tres, err := methods.GenerateLogBundles_Task(ctx, m.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(m.c, res.Returnval), nil\n}\n\nfunc (m DiagnosticManager) QueryDescriptions(ctx context.Context, host *HostSystem) ([]types.DiagnosticManagerLogDescriptor, error) {\n\treq := types.QueryDescriptions{\n\t\tThis: m.Reference(),\n\t}\n\n\tif host != nil {\n\t\tref := host.Reference()\n\t\treq.Host = &ref\n\t}\n\n\tres, err := methods.QueryDescriptions(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype DistributedVirtualPortgroup struct {\n\tCommon\n}\n\nfunc NewDistributedVirtualPortgroup(c *vim25.Client, ref types.ManagedObjectReference) *DistributedVirtualPortgroup {\n\treturn &DistributedVirtualPortgroup{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\n// EthernetCardBackingInfo returns the VirtualDeviceBackingInfo for this DistributedVirtualPortgroup\nfunc (p DistributedVirtualPortgroup) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) {\n\tvar dvp mo.DistributedVirtualPortgroup\n\tvar dvs mo.VmwareDistributedVirtualSwitch // TODO: should be mo.BaseDistributedVirtualSwitch\n\n\tif err := p.Properties(ctx, p.Reference(), []string{\"key\", \"config.distributedVirtualSwitch\"}, &dvp); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := p.Properties(ctx, *dvp.Config.DistributedVirtualSwitch, []string{\"uuid\"}, &dvs); err != nil {\n\t\treturn nil, err\n\t}\n\n\tbacking := &types.VirtualEthernetCardDistributedVirtualPortBackingInfo{\n\t\tPort: types.DistributedVirtualSwitchPortConnection{\n\t\t\tPortgroupKey: dvp.Key,\n\t\t\tSwitchUuid:   dvs.Uuid,\n\t\t},\n\t}\n\n\treturn backing, nil\n}\n\nfunc (p DistributedVirtualPortgroup) Reconfigure(ctx context.Context, spec types.DVPortgroupConfigSpec) (*Task, error) {\n\treq := types.ReconfigureDVPortgroup_Task{\n\t\tThis: p.Reference(),\n\t\tSpec: spec,\n\t}\n\n\tres, err := methods.ReconfigureDVPortgroup_Task(ctx, p.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(p.Client(), res.Returnval), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\n// DistributedVirtualPortgroup should implement the Reference interface.\nvar _ Reference = DistributedVirtualPortgroup{}\n\n// DistributedVirtualPortgroup should implement the NetworkReference interface.\nvar _ NetworkReference = DistributedVirtualPortgroup{}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype DistributedVirtualSwitch struct {\n\tCommon\n}\n\nfunc NewDistributedVirtualSwitch(c *vim25.Client, ref types.ManagedObjectReference) *DistributedVirtualSwitch {\n\treturn &DistributedVirtualSwitch{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\nfunc (s DistributedVirtualSwitch) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) {\n\treturn nil, ErrNotSupported // TODO: just to satisfy NetworkReference interface for the finder\n}\n\nfunc (s DistributedVirtualSwitch) Reconfigure(ctx context.Context, spec types.BaseDVSConfigSpec) (*Task, error) {\n\treq := types.ReconfigureDvs_Task{\n\t\tThis: s.Reference(),\n\t\tSpec: spec,\n\t}\n\n\tres, err := methods.ReconfigureDvs_Task(ctx, s.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(s.Client(), res.Returnval), nil\n}\n\nfunc (s DistributedVirtualSwitch) AddPortgroup(ctx context.Context, spec []types.DVPortgroupConfigSpec) (*Task, error) {\n\treq := types.AddDVPortgroup_Task{\n\t\tThis: s.Reference(),\n\t\tSpec: spec,\n\t}\n\n\tres, err := methods.AddDVPortgroup_Task(ctx, s.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(s.Client(), res.Returnval), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/extension_manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype ExtensionManager struct {\n\tCommon\n}\n\n// GetExtensionManager wraps NewExtensionManager, returning ErrNotSupported\n// when the client is not connected to a vCenter instance.\nfunc GetExtensionManager(c *vim25.Client) (*ExtensionManager, error) {\n\tif c.ServiceContent.ExtensionManager == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\treturn NewExtensionManager(c), nil\n}\n\nfunc NewExtensionManager(c *vim25.Client) *ExtensionManager {\n\to := ExtensionManager{\n\t\tCommon: NewCommon(c, *c.ServiceContent.ExtensionManager),\n\t}\n\n\treturn &o\n}\n\nfunc (m ExtensionManager) List(ctx context.Context) ([]types.Extension, error) {\n\tvar em mo.ExtensionManager\n\n\terr := m.Properties(ctx, m.Reference(), []string{\"extensionList\"}, &em)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn em.ExtensionList, nil\n}\n\nfunc (m ExtensionManager) Find(ctx context.Context, key string) (*types.Extension, error) {\n\treq := types.FindExtension{\n\t\tThis:         m.Reference(),\n\t\tExtensionKey: key,\n\t}\n\n\tres, err := methods.FindExtension(ctx, m.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m ExtensionManager) Register(ctx context.Context, extension types.Extension) error {\n\treq := types.RegisterExtension{\n\t\tThis:      m.Reference(),\n\t\tExtension: extension,\n\t}\n\n\t_, err := methods.RegisterExtension(ctx, m.c, &req)\n\treturn err\n}\n\nfunc (m ExtensionManager) SetCertificate(ctx context.Context, key string, certificatePem string) error {\n\treq := types.SetExtensionCertificate{\n\t\tThis:           m.Reference(),\n\t\tExtensionKey:   key,\n\t\tCertificatePem: certificatePem,\n\t}\n\n\t_, err := methods.SetExtensionCertificate(ctx, m.c, &req)\n\treturn err\n}\n\nfunc (m ExtensionManager) Unregister(ctx context.Context, key string) error {\n\treq := types.UnregisterExtension{\n\t\tThis:         m.Reference(),\n\t\tExtensionKey: key,\n\t}\n\n\t_, err := methods.UnregisterExtension(ctx, m.c, &req)\n\treturn err\n}\n\nfunc (m ExtensionManager) Update(ctx context.Context, extension types.Extension) error {\n\treq := types.UpdateExtension{\n\t\tThis:      m.Reference(),\n\t\tExtension: extension,\n\t}\n\n\t_, err := methods.UpdateExtension(ctx, m.c, &req)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/file_manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype FileManager struct {\n\tCommon\n}\n\nfunc NewFileManager(c *vim25.Client) *FileManager {\n\tf := FileManager{\n\t\tCommon: NewCommon(c, *c.ServiceContent.FileManager),\n\t}\n\n\treturn &f\n}\n\nfunc (f FileManager) CopyDatastoreFile(ctx context.Context, sourceName string, sourceDatacenter *Datacenter, destinationName string, destinationDatacenter *Datacenter, force bool) (*Task, error) {\n\treq := types.CopyDatastoreFile_Task{\n\t\tThis:            f.Reference(),\n\t\tSourceName:      sourceName,\n\t\tDestinationName: destinationName,\n\t\tForce:           types.NewBool(force),\n\t}\n\n\tif sourceDatacenter != nil {\n\t\tref := sourceDatacenter.Reference()\n\t\treq.SourceDatacenter = &ref\n\t}\n\n\tif destinationDatacenter != nil {\n\t\tref := destinationDatacenter.Reference()\n\t\treq.DestinationDatacenter = &ref\n\t}\n\n\tres, err := methods.CopyDatastoreFile_Task(ctx, f.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(f.c, res.Returnval), nil\n}\n\n// DeleteDatastoreFile deletes the specified file or folder from the datastore.\nfunc (f FileManager) DeleteDatastoreFile(ctx context.Context, name string, dc *Datacenter) (*Task, error) {\n\treq := types.DeleteDatastoreFile_Task{\n\t\tThis: f.Reference(),\n\t\tName: name,\n\t}\n\n\tif dc != nil {\n\t\tref := dc.Reference()\n\t\treq.Datacenter = &ref\n\t}\n\n\tres, err := methods.DeleteDatastoreFile_Task(ctx, f.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(f.c, res.Returnval), nil\n}\n\n// MakeDirectory creates a folder using the specified name.\nfunc (f FileManager) MakeDirectory(ctx context.Context, name string, dc *Datacenter, createParentDirectories bool) error {\n\treq := types.MakeDirectory{\n\t\tThis: f.Reference(),\n\t\tName: name,\n\t\tCreateParentDirectories: types.NewBool(createParentDirectories),\n\t}\n\n\tif dc != nil {\n\t\tref := dc.Reference()\n\t\treq.Datacenter = &ref\n\t}\n\n\t_, err := methods.MakeDirectory(ctx, f.c, &req)\n\treturn err\n}\n\nfunc (f FileManager) MoveDatastoreFile(ctx context.Context, sourceName string, sourceDatacenter *Datacenter, destinationName string, destinationDatacenter *Datacenter, force bool) (*Task, error) {\n\treq := types.MoveDatastoreFile_Task{\n\t\tThis:            f.Reference(),\n\t\tSourceName:      sourceName,\n\t\tDestinationName: destinationName,\n\t\tForce:           types.NewBool(force),\n\t}\n\n\tif sourceDatacenter != nil {\n\t\tref := sourceDatacenter.Reference()\n\t\treq.SourceDatacenter = &ref\n\t}\n\n\tif destinationDatacenter != nil {\n\t\tref := destinationDatacenter.Reference()\n\t\treq.DestinationDatacenter = &ref\n\t}\n\n\tres, err := methods.MoveDatastoreFile_Task(ctx, f.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(f.c, res.Returnval), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/folder.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype Folder struct {\n\tCommon\n}\n\nfunc NewFolder(c *vim25.Client, ref types.ManagedObjectReference) *Folder {\n\treturn &Folder{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\nfunc NewRootFolder(c *vim25.Client) *Folder {\n\tf := NewFolder(c, c.ServiceContent.RootFolder)\n\tf.InventoryPath = \"/\"\n\treturn f\n}\n\nfunc (f Folder) Children(ctx context.Context) ([]Reference, error) {\n\tvar mf mo.Folder\n\n\terr := f.Properties(ctx, f.Reference(), []string{\"childEntity\"}, &mf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar rs []Reference\n\tfor _, e := range mf.ChildEntity {\n\t\tif r := NewReference(f.c, e); r != nil {\n\t\t\trs = append(rs, r)\n\t\t}\n\t}\n\n\treturn rs, nil\n}\n\nfunc (f Folder) CreateDatacenter(ctx context.Context, datacenter string) (*Datacenter, error) {\n\treq := types.CreateDatacenter{\n\t\tThis: f.Reference(),\n\t\tName: datacenter,\n\t}\n\n\tres, err := methods.CreateDatacenter(ctx, f.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Response will be nil if this is an ESX host that does not belong to a vCenter\n\tif res == nil {\n\t\treturn nil, nil\n\t}\n\n\treturn NewDatacenter(f.c, res.Returnval), nil\n}\n\nfunc (f Folder) CreateCluster(ctx context.Context, cluster string, spec types.ClusterConfigSpecEx) (*ClusterComputeResource, error) {\n\treq := types.CreateClusterEx{\n\t\tThis: f.Reference(),\n\t\tName: cluster,\n\t\tSpec: spec,\n\t}\n\n\tres, err := methods.CreateClusterEx(ctx, f.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Response will be nil if this is an ESX host that does not belong to a vCenter\n\tif res == nil {\n\t\treturn nil, nil\n\t}\n\n\treturn NewClusterComputeResource(f.c, res.Returnval), nil\n}\n\nfunc (f Folder) CreateFolder(ctx context.Context, name string) (*Folder, error) {\n\treq := types.CreateFolder{\n\t\tThis: f.Reference(),\n\t\tName: name,\n\t}\n\n\tres, err := methods.CreateFolder(ctx, f.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewFolder(f.c, res.Returnval), err\n}\n\nfunc (f Folder) CreateStoragePod(ctx context.Context, name string) (*StoragePod, error) {\n\treq := types.CreateStoragePod{\n\t\tThis: f.Reference(),\n\t\tName: name,\n\t}\n\n\tres, err := methods.CreateStoragePod(ctx, f.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewStoragePod(f.c, res.Returnval), err\n}\n\nfunc (f Folder) AddStandaloneHost(ctx context.Context, spec types.HostConnectSpec, addConnected bool, license *string, compResSpec *types.BaseComputeResourceConfigSpec) (*Task, error) {\n\treq := types.AddStandaloneHost_Task{\n\t\tThis:         f.Reference(),\n\t\tSpec:         spec,\n\t\tAddConnected: addConnected,\n\t}\n\n\tif license != nil {\n\t\treq.License = *license\n\t}\n\n\tif compResSpec != nil {\n\t\treq.CompResSpec = *compResSpec\n\t}\n\n\tres, err := methods.AddStandaloneHost_Task(ctx, f.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(f.c, res.Returnval), nil\n}\n\nfunc (f Folder) CreateVM(ctx context.Context, config types.VirtualMachineConfigSpec, pool *ResourcePool, host *HostSystem) (*Task, error) {\n\treq := types.CreateVM_Task{\n\t\tThis:   f.Reference(),\n\t\tConfig: config,\n\t\tPool:   pool.Reference(),\n\t}\n\n\tif host != nil {\n\t\tref := host.Reference()\n\t\treq.Host = &ref\n\t}\n\n\tres, err := methods.CreateVM_Task(ctx, f.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(f.c, res.Returnval), nil\n}\n\nfunc (f Folder) RegisterVM(ctx context.Context, path string, name string, asTemplate bool, pool *ResourcePool, host *HostSystem) (*Task, error) {\n\treq := types.RegisterVM_Task{\n\t\tThis:       f.Reference(),\n\t\tPath:       path,\n\t\tAsTemplate: asTemplate,\n\t}\n\n\tif name != \"\" {\n\t\treq.Name = name\n\t}\n\n\tif host != nil {\n\t\tref := host.Reference()\n\t\treq.Host = &ref\n\t}\n\n\tif pool != nil {\n\t\tref := pool.Reference()\n\t\treq.Pool = &ref\n\t}\n\n\tres, err := methods.RegisterVM_Task(ctx, f.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(f.c, res.Returnval), nil\n}\n\nfunc (f Folder) CreateDVS(ctx context.Context, spec types.DVSCreateSpec) (*Task, error) {\n\treq := types.CreateDVS_Task{\n\t\tThis: f.Reference(),\n\t\tSpec: spec,\n\t}\n\n\tres, err := methods.CreateDVS_Task(ctx, f.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(f.c, res.Returnval), nil\n}\n\nfunc (f Folder) MoveInto(ctx context.Context, list []types.ManagedObjectReference) (*Task, error) {\n\treq := types.MoveIntoFolder_Task{\n\t\tThis: f.Reference(),\n\t\tList: list,\n\t}\n\n\tres, err := methods.MoveIntoFolder_Task(ctx, f.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(f.c, res.Returnval), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/folder_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\n// Folder should implement the Reference interface.\nvar _ Reference = Folder{}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/history_collector.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype HistoryCollector struct {\n\tCommon\n}\n\nfunc NewHistoryCollector(c *vim25.Client, ref types.ManagedObjectReference) *HistoryCollector {\n\treturn &HistoryCollector{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\nfunc (h HistoryCollector) Destroy(ctx context.Context) error {\n\treq := types.DestroyCollector{\n\t\tThis: h.Reference(),\n\t}\n\n\t_, err := methods.DestroyCollector(ctx, h.c, &req)\n\treturn err\n}\n\nfunc (h HistoryCollector) Reset(ctx context.Context) error {\n\treq := types.ResetCollector{\n\t\tThis: h.Reference(),\n\t}\n\n\t_, err := methods.ResetCollector(ctx, h.c, &req)\n\treturn err\n}\n\nfunc (h HistoryCollector) Rewind(ctx context.Context) error {\n\treq := types.RewindCollector{\n\t\tThis: h.Reference(),\n\t}\n\n\t_, err := methods.RewindCollector(ctx, h.c, &req)\n\treturn err\n}\n\nfunc (h HistoryCollector) SetPageSize(ctx context.Context, maxCount int32) error {\n\treq := types.SetCollectorPageSize{\n\t\tThis:     h.Reference(),\n\t\tMaxCount: maxCount,\n\t}\n\n\t_, err := methods.SetCollectorPageSize(ctx, h.c, &req)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/host_account_manager.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype HostAccountManager struct {\n\tCommon\n}\n\nfunc NewHostAccountManager(c *vim25.Client, ref types.ManagedObjectReference) *HostAccountManager {\n\treturn &HostAccountManager{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\nfunc (m HostAccountManager) Create(ctx context.Context, user *types.HostAccountSpec) error {\n\treq := types.CreateUser{\n\t\tThis: m.Reference(),\n\t\tUser: user,\n\t}\n\n\t_, err := methods.CreateUser(ctx, m.Client(), &req)\n\treturn err\n}\n\nfunc (m HostAccountManager) Update(ctx context.Context, user *types.HostAccountSpec) error {\n\treq := types.UpdateUser{\n\t\tThis: m.Reference(),\n\t\tUser: user,\n\t}\n\n\t_, err := methods.UpdateUser(ctx, m.Client(), &req)\n\treturn err\n}\n\nfunc (m HostAccountManager) Remove(ctx context.Context, userName string) error {\n\treq := types.RemoveUser{\n\t\tThis:     m.Reference(),\n\t\tUserName: userName,\n\t}\n\n\t_, err := methods.RemoveUser(ctx, m.Client(), &req)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/host_certificate_info.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"crypto/sha256\"\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"crypto/x509/pkix\"\n\t\"encoding/asn1\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/url\"\n\t\"strings\"\n\t\"text/tabwriter\"\n\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\n// HostCertificateInfo provides helpers for types.HostCertificateManagerCertificateInfo\ntype HostCertificateInfo struct {\n\ttypes.HostCertificateManagerCertificateInfo\n\n\tThumbprintSHA1   string\n\tThumbprintSHA256 string\n\n\tErr         error\n\tCertificate *x509.Certificate `json:\"-\"`\n\n\tsubjectName *pkix.Name\n\tissuerName  *pkix.Name\n}\n\n// FromCertificate converts x509.Certificate to HostCertificateInfo\nfunc (info *HostCertificateInfo) FromCertificate(cert *x509.Certificate) *HostCertificateInfo {\n\tinfo.Certificate = cert\n\tinfo.subjectName = &cert.Subject\n\tinfo.issuerName = &cert.Issuer\n\n\tinfo.Issuer = info.fromName(info.issuerName)\n\tinfo.NotBefore = &cert.NotBefore\n\tinfo.NotAfter = &cert.NotAfter\n\tinfo.Subject = info.fromName(info.subjectName)\n\n\tinfo.ThumbprintSHA1 = soap.ThumbprintSHA1(cert)\n\n\t// SHA-256 for info purposes only, API fields all use SHA-1\n\tsum := sha256.Sum256(cert.Raw)\n\thex := make([]string, len(sum))\n\tfor i, b := range sum {\n\t\thex[i] = fmt.Sprintf(\"%02X\", b)\n\t}\n\tinfo.ThumbprintSHA256 = strings.Join(hex, \":\")\n\n\tif info.Status == \"\" {\n\t\tinfo.Status = string(types.HostCertificateManagerCertificateInfoCertificateStatusUnknown)\n\t}\n\n\treturn info\n}\n\n// FromURL connects to the given URL.Host via tls.Dial with the given tls.Config and populates the HostCertificateInfo\n// via tls.ConnectionState.  If the certificate was verified with the given tls.Config, the Err field will be nil.\n// Otherwise, Err will be set to the x509.UnknownAuthorityError or x509.HostnameError.\n// If tls.Dial returns an error of any other type, that error is returned.\nfunc (info *HostCertificateInfo) FromURL(u *url.URL, config *tls.Config) error {\n\taddr := u.Host\n\tif !(strings.LastIndex(addr, \":\") > strings.LastIndex(addr, \"]\")) {\n\t\taddr += \":443\"\n\t}\n\n\tconn, err := tls.Dial(\"tcp\", addr, config)\n\tif err != nil {\n\t\tswitch err.(type) {\n\t\tcase x509.UnknownAuthorityError:\n\t\tcase x509.HostnameError:\n\t\tdefault:\n\t\t\treturn err\n\t\t}\n\n\t\tinfo.Err = err\n\n\t\tconn, err = tls.Dial(\"tcp\", addr, &tls.Config{InsecureSkipVerify: true})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tinfo.Status = string(types.HostCertificateManagerCertificateInfoCertificateStatusGood)\n\t}\n\n\tstate := conn.ConnectionState()\n\t_ = conn.Close()\n\tinfo.FromCertificate(state.PeerCertificates[0])\n\n\treturn nil\n}\n\nvar emailAddressOID = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 1}\n\nfunc (info *HostCertificateInfo) fromName(name *pkix.Name) string {\n\tvar attrs []string\n\n\toids := map[string]string{\n\t\temailAddressOID.String(): \"emailAddress\",\n\t}\n\n\tfor _, attr := range name.Names {\n\t\tif key, ok := oids[attr.Type.String()]; ok {\n\t\t\tattrs = append(attrs, fmt.Sprintf(\"%s=%s\", key, attr.Value))\n\t\t}\n\t}\n\n\tattrs = append(attrs, fmt.Sprintf(\"CN=%s\", name.CommonName))\n\n\tadd := func(key string, vals []string) {\n\t\tfor _, val := range vals {\n\t\t\tattrs = append(attrs, fmt.Sprintf(\"%s=%s\", key, val))\n\t\t}\n\t}\n\n\telts := []struct {\n\t\tkey string\n\t\tval []string\n\t}{\n\t\t{\"OU\", name.OrganizationalUnit},\n\t\t{\"O\", name.Organization},\n\t\t{\"L\", name.Locality},\n\t\t{\"ST\", name.Province},\n\t\t{\"C\", name.Country},\n\t}\n\n\tfor _, elt := range elts {\n\t\tadd(elt.key, elt.val)\n\t}\n\n\treturn strings.Join(attrs, \",\")\n}\n\nfunc (info *HostCertificateInfo) toName(s string) *pkix.Name {\n\tvar name pkix.Name\n\n\tfor _, pair := range strings.Split(s, \",\") {\n\t\tattr := strings.SplitN(pair, \"=\", 2)\n\t\tif len(attr) != 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\tv := attr[1]\n\n\t\tswitch strings.ToLower(attr[0]) {\n\t\tcase \"cn\":\n\t\t\tname.CommonName = v\n\t\tcase \"ou\":\n\t\t\tname.OrganizationalUnit = append(name.OrganizationalUnit, v)\n\t\tcase \"o\":\n\t\t\tname.Organization = append(name.Organization, v)\n\t\tcase \"l\":\n\t\t\tname.Locality = append(name.Locality, v)\n\t\tcase \"st\":\n\t\t\tname.Province = append(name.Province, v)\n\t\tcase \"c\":\n\t\t\tname.Country = append(name.Country, v)\n\t\tcase \"emailaddress\":\n\t\t\tname.Names = append(name.Names, pkix.AttributeTypeAndValue{Type: emailAddressOID, Value: v})\n\t\t}\n\t}\n\n\treturn &name\n}\n\n// SubjectName parses Subject into a pkix.Name\nfunc (info *HostCertificateInfo) SubjectName() *pkix.Name {\n\tif info.subjectName != nil {\n\t\treturn info.subjectName\n\t}\n\n\treturn info.toName(info.Subject)\n}\n\n// IssuerName parses Issuer into a pkix.Name\nfunc (info *HostCertificateInfo) IssuerName() *pkix.Name {\n\tif info.issuerName != nil {\n\t\treturn info.issuerName\n\t}\n\n\treturn info.toName(info.Issuer)\n}\n\n// Write outputs info similar to the Chrome Certificate Viewer.\nfunc (info *HostCertificateInfo) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0)\n\n\ts := func(val string) string {\n\t\tif val != \"\" {\n\t\t\treturn val\n\t\t}\n\t\treturn \"<Not Part Of Certificate>\"\n\t}\n\n\tss := func(val []string) string {\n\t\treturn s(strings.Join(val, \",\"))\n\t}\n\n\tname := func(n *pkix.Name) {\n\t\tfmt.Fprintf(tw, \"  Common Name (CN):\\t%s\\n\", s(n.CommonName))\n\t\tfmt.Fprintf(tw, \"  Organization (O):\\t%s\\n\", ss(n.Organization))\n\t\tfmt.Fprintf(tw, \"  Organizational Unit (OU):\\t%s\\n\", ss(n.OrganizationalUnit))\n\t}\n\n\tstatus := info.Status\n\tif info.Err != nil {\n\t\tstatus = fmt.Sprintf(\"ERROR %s\", info.Err)\n\t}\n\tfmt.Fprintf(tw, \"Certificate Status:\\t%s\\n\", status)\n\n\tfmt.Fprintln(tw, \"Issued To:\\t\")\n\tname(info.SubjectName())\n\n\tfmt.Fprintln(tw, \"Issued By:\\t\")\n\tname(info.IssuerName())\n\n\tfmt.Fprintln(tw, \"Validity Period:\\t\")\n\tfmt.Fprintf(tw, \"  Issued On:\\t%s\\n\", info.NotBefore)\n\tfmt.Fprintf(tw, \"  Expires On:\\t%s\\n\", info.NotAfter)\n\n\tif info.ThumbprintSHA1 != \"\" {\n\t\tfmt.Fprintln(tw, \"Thumbprints:\\t\")\n\t\tif info.ThumbprintSHA256 != \"\" {\n\t\t\tfmt.Fprintf(tw, \"  SHA-256 Thumbprint:\\t%s\\n\", info.ThumbprintSHA256)\n\t\t}\n\t\tfmt.Fprintf(tw, \"  SHA-1 Thumbprint:\\t%s\\n\", info.ThumbprintSHA1)\n\t}\n\n\treturn tw.Flush()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/host_certificate_info_test.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport \"testing\"\n\nfunc TestHostCertificateManagerCertificateInfo(t *testing.T) {\n\tsubject := \"emailAddress=vmca@vmware.com,CN=w2-xlr8-autoroot-esx004.eng.vmware.com,OU=VMware Engineering,O=VMware,L=Palo Alto,ST=California,C=US\"\n\n\tvar info HostCertificateInfo\n\tname := info.toName(subject)\n\ts := info.fromName(name)\n\tif subject != s {\n\t\tt.Errorf(\"%s != %s\", s, subject)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/host_certificate_manager.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\n// HostCertificateManager provides helper methods around the HostSystem.ConfigManager.CertificateManager\ntype HostCertificateManager struct {\n\tCommon\n\tHost *HostSystem\n}\n\n// NewHostCertificateManager creates a new HostCertificateManager helper\nfunc NewHostCertificateManager(c *vim25.Client, ref types.ManagedObjectReference, host types.ManagedObjectReference) *HostCertificateManager {\n\treturn &HostCertificateManager{\n\t\tCommon: NewCommon(c, ref),\n\t\tHost:   NewHostSystem(c, host),\n\t}\n}\n\n// CertificateInfo wraps the host CertificateManager certificateInfo property with the HostCertificateInfo helper.\n// The ThumbprintSHA1 field is set to HostSystem.Summary.Config.SslThumbprint if the host system is managed by a vCenter.\nfunc (m HostCertificateManager) CertificateInfo(ctx context.Context) (*HostCertificateInfo, error) {\n\tvar hs mo.HostSystem\n\tvar cm mo.HostCertificateManager\n\n\tpc := property.DefaultCollector(m.Client())\n\n\terr := pc.RetrieveOne(ctx, m.Reference(), []string{\"certificateInfo\"}, &cm)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_ = pc.RetrieveOne(ctx, m.Host.Reference(), []string{\"summary.config.sslThumbprint\"}, &hs)\n\n\treturn &HostCertificateInfo{\n\t\tHostCertificateManagerCertificateInfo: cm.CertificateInfo,\n\t\tThumbprintSHA1:                        hs.Summary.Config.SslThumbprint,\n\t}, nil\n}\n\n// GenerateCertificateSigningRequest requests the host system to generate a certificate-signing request (CSR) for itself.\n// The CSR is then typically provided to a Certificate Authority to sign and issue the SSL certificate for the host system.\n// Use InstallServerCertificate to import this certificate.\nfunc (m HostCertificateManager) GenerateCertificateSigningRequest(ctx context.Context, useIPAddressAsCommonName bool) (string, error) {\n\treq := types.GenerateCertificateSigningRequest{\n\t\tThis: m.Reference(),\n\t\tUseIpAddressAsCommonName: useIPAddressAsCommonName,\n\t}\n\n\tres, err := methods.GenerateCertificateSigningRequest(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn res.Returnval, nil\n}\n\n// GenerateCertificateSigningRequestByDn requests the host system to generate a certificate-signing request (CSR) for itself.\n// Alternative version similar to GenerateCertificateSigningRequest but takes a Distinguished Name (DN) as a parameter.\nfunc (m HostCertificateManager) GenerateCertificateSigningRequestByDn(ctx context.Context, distinguishedName string) (string, error) {\n\treq := types.GenerateCertificateSigningRequestByDn{\n\t\tThis:              m.Reference(),\n\t\tDistinguishedName: distinguishedName,\n\t}\n\n\tres, err := methods.GenerateCertificateSigningRequestByDn(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn res.Returnval, nil\n}\n\n// InstallServerCertificate imports the given SSL certificate to the host system.\nfunc (m HostCertificateManager) InstallServerCertificate(ctx context.Context, cert string) error {\n\treq := types.InstallServerCertificate{\n\t\tThis: m.Reference(),\n\t\tCert: cert,\n\t}\n\n\t_, err := methods.InstallServerCertificate(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// NotifyAffectedService is internal, not exposing as we don't have a use case other than with InstallServerCertificate\n\t// Without this call, hostd needs to be restarted to use the updated certificate\n\t// Note: using Refresh as it has the same struct/signature, we just need to use different xml name tags\n\tbody := struct {\n\t\tReq *types.Refresh         `xml:\"urn:vim25 NotifyAffectedServices,omitempty\"`\n\t\tRes *types.RefreshResponse `xml:\"urn:vim25 NotifyAffectedServicesResponse,omitempty\"`\n\t\tmethods.RefreshBody\n\t}{\n\t\tReq: &types.Refresh{This: m.Reference()},\n\t}\n\n\treturn m.Client().RoundTrip(ctx, &body, &body)\n}\n\n// ListCACertificateRevocationLists returns the SSL CRLs of Certificate Authorities that are trusted by the host system.\nfunc (m HostCertificateManager) ListCACertificateRevocationLists(ctx context.Context) ([]string, error) {\n\treq := types.ListCACertificateRevocationLists{\n\t\tThis: m.Reference(),\n\t}\n\n\tres, err := methods.ListCACertificateRevocationLists(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\n// ListCACertificates returns the SSL certificates of Certificate Authorities that are trusted by the host system.\nfunc (m HostCertificateManager) ListCACertificates(ctx context.Context) ([]string, error) {\n\treq := types.ListCACertificates{\n\t\tThis: m.Reference(),\n\t}\n\n\tres, err := methods.ListCACertificates(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\n// ReplaceCACertificatesAndCRLs replaces the trusted CA certificates and CRL used by the host system.\n// These determine whether the server can verify the identity of an external entity.\nfunc (m HostCertificateManager) ReplaceCACertificatesAndCRLs(ctx context.Context, caCert []string, caCrl []string) error {\n\treq := types.ReplaceCACertificatesAndCRLs{\n\t\tThis:   m.Reference(),\n\t\tCaCert: caCert,\n\t\tCaCrl:  caCrl,\n\t}\n\n\t_, err := methods.ReplaceCACertificatesAndCRLs(ctx, m.Client(), &req)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/host_config_manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype HostConfigManager struct {\n\tCommon\n}\n\nfunc NewHostConfigManager(c *vim25.Client, ref types.ManagedObjectReference) *HostConfigManager {\n\treturn &HostConfigManager{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\nfunc (m HostConfigManager) DatastoreSystem(ctx context.Context) (*HostDatastoreSystem, error) {\n\tvar h mo.HostSystem\n\n\terr := m.Properties(ctx, m.Reference(), []string{\"configManager.datastoreSystem\"}, &h)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewHostDatastoreSystem(m.c, *h.ConfigManager.DatastoreSystem), nil\n}\n\nfunc (m HostConfigManager) NetworkSystem(ctx context.Context) (*HostNetworkSystem, error) {\n\tvar h mo.HostSystem\n\n\terr := m.Properties(ctx, m.Reference(), []string{\"configManager.networkSystem\"}, &h)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewHostNetworkSystem(m.c, *h.ConfigManager.NetworkSystem), nil\n}\n\nfunc (m HostConfigManager) FirewallSystem(ctx context.Context) (*HostFirewallSystem, error) {\n\tvar h mo.HostSystem\n\n\terr := m.Properties(ctx, m.Reference(), []string{\"configManager.firewallSystem\"}, &h)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewHostFirewallSystem(m.c, *h.ConfigManager.FirewallSystem), nil\n}\n\nfunc (m HostConfigManager) StorageSystem(ctx context.Context) (*HostStorageSystem, error) {\n\tvar h mo.HostSystem\n\n\terr := m.Properties(ctx, m.Reference(), []string{\"configManager.storageSystem\"}, &h)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewHostStorageSystem(m.c, *h.ConfigManager.StorageSystem), nil\n}\n\nfunc (m HostConfigManager) VirtualNicManager(ctx context.Context) (*HostVirtualNicManager, error) {\n\tvar h mo.HostSystem\n\n\terr := m.Properties(ctx, m.Reference(), []string{\"configManager.virtualNicManager\"}, &h)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewHostVirtualNicManager(m.c, *h.ConfigManager.VirtualNicManager, m.Reference()), nil\n}\n\nfunc (m HostConfigManager) VsanSystem(ctx context.Context) (*HostVsanSystem, error) {\n\tvar h mo.HostSystem\n\n\terr := m.Properties(ctx, m.Reference(), []string{\"configManager.vsanSystem\"}, &h)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Added in 5.5\n\tif h.ConfigManager.VsanSystem == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\n\treturn NewHostVsanSystem(m.c, *h.ConfigManager.VsanSystem), nil\n}\n\nfunc (m HostConfigManager) VsanInternalSystem(ctx context.Context) (*HostVsanInternalSystem, error) {\n\tvar h mo.HostSystem\n\n\terr := m.Properties(ctx, m.Reference(), []string{\"configManager.vsanInternalSystem\"}, &h)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Added in 5.5\n\tif h.ConfigManager.VsanInternalSystem == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\n\treturn NewHostVsanInternalSystem(m.c, *h.ConfigManager.VsanInternalSystem), nil\n}\n\nfunc (m HostConfigManager) AccountManager(ctx context.Context) (*HostAccountManager, error) {\n\tvar h mo.HostSystem\n\n\terr := m.Properties(ctx, m.Reference(), []string{\"configManager.accountManager\"}, &h)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tref := h.ConfigManager.AccountManager // Added in 6.0\n\tif ref == nil {\n\t\t// Versions < 5.5 can use the ServiceContent ref,\n\t\t// but we can only use it when connected directly to ESX.\n\t\tc := m.Client()\n\t\tif !c.IsVC() {\n\t\t\tref = c.ServiceContent.AccountManager\n\t\t}\n\n\t\tif ref == nil {\n\t\t\treturn nil, ErrNotSupported\n\t\t}\n\t}\n\n\treturn NewHostAccountManager(m.c, *ref), nil\n}\n\nfunc (m HostConfigManager) OptionManager(ctx context.Context) (*OptionManager, error) {\n\tvar h mo.HostSystem\n\n\terr := m.Properties(ctx, m.Reference(), []string{\"configManager.advancedOption\"}, &h)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewOptionManager(m.c, *h.ConfigManager.AdvancedOption), nil\n}\n\nfunc (m HostConfigManager) ServiceSystem(ctx context.Context) (*HostServiceSystem, error) {\n\tvar h mo.HostSystem\n\n\terr := m.Properties(ctx, m.Reference(), []string{\"configManager.serviceSystem\"}, &h)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewHostServiceSystem(m.c, *h.ConfigManager.ServiceSystem), nil\n}\n\nfunc (m HostConfigManager) CertificateManager(ctx context.Context) (*HostCertificateManager, error) {\n\tvar h mo.HostSystem\n\n\terr := m.Properties(ctx, m.Reference(), []string{\"configManager.certificateManager\"}, &h)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Added in 6.0\n\tif h.ConfigManager.CertificateManager == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\n\treturn NewHostCertificateManager(m.c, *h.ConfigManager.CertificateManager, m.Reference()), nil\n}\n\nfunc (m HostConfigManager) DateTimeSystem(ctx context.Context) (*HostDateTimeSystem, error) {\n\tvar h mo.HostSystem\n\n\terr := m.Properties(ctx, m.Reference(), []string{\"configManager.dateTimeSystem\"}, &h)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewHostDateTimeSystem(m.c, *h.ConfigManager.DateTimeSystem), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/host_datastore_browser.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype HostDatastoreBrowser struct {\n\tCommon\n}\n\nfunc NewHostDatastoreBrowser(c *vim25.Client, ref types.ManagedObjectReference) *HostDatastoreBrowser {\n\treturn &HostDatastoreBrowser{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\nfunc (b HostDatastoreBrowser) SearchDatastore(ctx context.Context, datastorePath string, searchSpec *types.HostDatastoreBrowserSearchSpec) (*Task, error) {\n\treq := types.SearchDatastore_Task{\n\t\tThis:          b.Reference(),\n\t\tDatastorePath: datastorePath,\n\t\tSearchSpec:    searchSpec,\n\t}\n\n\tres, err := methods.SearchDatastore_Task(ctx, b.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(b.c, res.Returnval), nil\n}\n\nfunc (b HostDatastoreBrowser) SearchDatastoreSubFolders(ctx context.Context, datastorePath string, searchSpec *types.HostDatastoreBrowserSearchSpec) (*Task, error) {\n\treq := types.SearchDatastoreSubFolders_Task{\n\t\tThis:          b.Reference(),\n\t\tDatastorePath: datastorePath,\n\t\tSearchSpec:    searchSpec,\n\t}\n\n\tres, err := methods.SearchDatastoreSubFolders_Task(ctx, b.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(b.c, res.Returnval), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/host_datastore_system.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype HostDatastoreSystem struct {\n\tCommon\n}\n\nfunc NewHostDatastoreSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostDatastoreSystem {\n\treturn &HostDatastoreSystem{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\nfunc (s HostDatastoreSystem) CreateLocalDatastore(ctx context.Context, name string, path string) (*Datastore, error) {\n\treq := types.CreateLocalDatastore{\n\t\tThis: s.Reference(),\n\t\tName: name,\n\t\tPath: path,\n\t}\n\n\tres, err := methods.CreateLocalDatastore(ctx, s.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewDatastore(s.Client(), res.Returnval), nil\n}\n\nfunc (s HostDatastoreSystem) CreateNasDatastore(ctx context.Context, spec types.HostNasVolumeSpec) (*Datastore, error) {\n\treq := types.CreateNasDatastore{\n\t\tThis: s.Reference(),\n\t\tSpec: spec,\n\t}\n\n\tres, err := methods.CreateNasDatastore(ctx, s.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewDatastore(s.Client(), res.Returnval), nil\n}\n\nfunc (s HostDatastoreSystem) CreateVmfsDatastore(ctx context.Context, spec types.VmfsDatastoreCreateSpec) (*Datastore, error) {\n\treq := types.CreateVmfsDatastore{\n\t\tThis: s.Reference(),\n\t\tSpec: spec,\n\t}\n\n\tres, err := methods.CreateVmfsDatastore(ctx, s.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewDatastore(s.Client(), res.Returnval), nil\n}\n\nfunc (s HostDatastoreSystem) Remove(ctx context.Context, ds *Datastore) error {\n\treq := types.RemoveDatastore{\n\t\tThis:      s.Reference(),\n\t\tDatastore: ds.Reference(),\n\t}\n\n\t_, err := methods.RemoveDatastore(ctx, s.Client(), &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s HostDatastoreSystem) QueryAvailableDisksForVmfs(ctx context.Context) ([]types.HostScsiDisk, error) {\n\treq := types.QueryAvailableDisksForVmfs{\n\t\tThis: s.Reference(),\n\t}\n\n\tres, err := methods.QueryAvailableDisksForVmfs(ctx, s.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (s HostDatastoreSystem) QueryVmfsDatastoreCreateOptions(ctx context.Context, devicePath string) ([]types.VmfsDatastoreOption, error) {\n\treq := types.QueryVmfsDatastoreCreateOptions{\n\t\tThis:       s.Reference(),\n\t\tDevicePath: devicePath,\n\t}\n\n\tres, err := methods.QueryVmfsDatastoreCreateOptions(ctx, s.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/host_date_time_system.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype HostDateTimeSystem struct {\n\tCommon\n}\n\nfunc NewHostDateTimeSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostDateTimeSystem {\n\treturn &HostDateTimeSystem{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\nfunc (s HostDateTimeSystem) UpdateConfig(ctx context.Context, config types.HostDateTimeConfig) error {\n\treq := types.UpdateDateTimeConfig{\n\t\tThis:   s.Reference(),\n\t\tConfig: config,\n\t}\n\n\t_, err := methods.UpdateDateTimeConfig(ctx, s.c, &req)\n\treturn err\n}\n\nfunc (s HostDateTimeSystem) Update(ctx context.Context, date time.Time) error {\n\treq := types.UpdateDateTime{\n\t\tThis:     s.Reference(),\n\t\tDateTime: date,\n\t}\n\n\t_, err := methods.UpdateDateTime(ctx, s.c, &req)\n\treturn err\n}\n\nfunc (s HostDateTimeSystem) Query(ctx context.Context) (*time.Time, error) {\n\treq := types.QueryDateTime{\n\t\tThis: s.Reference(),\n\t}\n\n\tres, err := methods.QueryDateTime(ctx, s.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/host_firewall_system.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype HostFirewallSystem struct {\n\tCommon\n}\n\nfunc NewHostFirewallSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostFirewallSystem {\n\treturn &HostFirewallSystem{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\nfunc (s HostFirewallSystem) DisableRuleset(ctx context.Context, id string) error {\n\treq := types.DisableRuleset{\n\t\tThis: s.Reference(),\n\t\tId:   id,\n\t}\n\n\t_, err := methods.DisableRuleset(ctx, s.c, &req)\n\treturn err\n}\n\nfunc (s HostFirewallSystem) EnableRuleset(ctx context.Context, id string) error {\n\treq := types.EnableRuleset{\n\t\tThis: s.Reference(),\n\t\tId:   id,\n\t}\n\n\t_, err := methods.EnableRuleset(ctx, s.c, &req)\n\treturn err\n}\n\nfunc (s HostFirewallSystem) Refresh(ctx context.Context) error {\n\treq := types.RefreshFirewall{\n\t\tThis: s.Reference(),\n\t}\n\n\t_, err := methods.RefreshFirewall(ctx, s.c, &req)\n\treturn err\n}\n\nfunc (s HostFirewallSystem) Info(ctx context.Context) (*types.HostFirewallInfo, error) {\n\tvar fs mo.HostFirewallSystem\n\n\terr := s.Properties(ctx, s.Reference(), []string{\"firewallInfo\"}, &fs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn fs.FirewallInfo, nil\n}\n\n// HostFirewallRulesetList provides helpers for a slice of types.HostFirewallRuleset\ntype HostFirewallRulesetList []types.HostFirewallRuleset\n\n// ByRule returns a HostFirewallRulesetList where Direction, PortType and Protocol are equal and Port is within range\nfunc (l HostFirewallRulesetList) ByRule(rule types.HostFirewallRule) HostFirewallRulesetList {\n\tvar matches HostFirewallRulesetList\n\n\tfor _, rs := range l {\n\t\tfor _, r := range rs.Rule {\n\t\t\tif r.PortType != rule.PortType ||\n\t\t\t\tr.Protocol != rule.Protocol ||\n\t\t\t\tr.Direction != rule.Direction {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif r.EndPort == 0 && rule.Port == r.Port ||\n\t\t\t\trule.Port >= r.Port && rule.Port <= r.EndPort {\n\t\t\t\tmatches = append(matches, rs)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn matches\n}\n\n// EnabledByRule returns a HostFirewallRulesetList with Match(rule) applied and filtered via Enabled()\n// if enabled param is true, otherwise filtered via Disabled().\n// An error is returned if the resulting list is empty.\nfunc (l HostFirewallRulesetList) EnabledByRule(rule types.HostFirewallRule, enabled bool) (HostFirewallRulesetList, error) {\n\tvar matched, skipped HostFirewallRulesetList\n\tvar matchedKind, skippedKind string\n\n\tl = l.ByRule(rule)\n\n\tif enabled {\n\t\tmatched = l.Enabled()\n\t\tmatchedKind = \"enabled\"\n\n\t\tskipped = l.Disabled()\n\t\tskippedKind = \"disabled\"\n\t} else {\n\t\tmatched = l.Disabled()\n\t\tmatchedKind = \"disabled\"\n\n\t\tskipped = l.Enabled()\n\t\tskippedKind = \"enabled\"\n\t}\n\n\tif len(matched) == 0 {\n\t\tmsg := fmt.Sprintf(\"%d %s firewall rulesets match %s %s %s %d, %d %s rulesets match\",\n\t\t\tlen(matched), matchedKind,\n\t\t\trule.Direction, rule.Protocol, rule.PortType, rule.Port,\n\t\t\tlen(skipped), skippedKind)\n\n\t\tif len(skipped) != 0 {\n\t\t\tmsg += fmt.Sprintf(\": %s\", strings.Join(skipped.Keys(), \", \"))\n\t\t}\n\n\t\treturn nil, errors.New(msg)\n\t}\n\n\treturn matched, nil\n}\n\n// Enabled returns a HostFirewallRulesetList with enabled rules\nfunc (l HostFirewallRulesetList) Enabled() HostFirewallRulesetList {\n\tvar matches HostFirewallRulesetList\n\n\tfor _, rs := range l {\n\t\tif rs.Enabled {\n\t\t\tmatches = append(matches, rs)\n\t\t}\n\t}\n\n\treturn matches\n}\n\n// Disabled returns a HostFirewallRulesetList with disabled rules\nfunc (l HostFirewallRulesetList) Disabled() HostFirewallRulesetList {\n\tvar matches HostFirewallRulesetList\n\n\tfor _, rs := range l {\n\t\tif !rs.Enabled {\n\t\t\tmatches = append(matches, rs)\n\t\t}\n\t}\n\n\treturn matches\n}\n\n// Keys returns the HostFirewallRuleset.Key for each ruleset in the list\nfunc (l HostFirewallRulesetList) Keys() []string {\n\tvar keys []string\n\n\tfor _, rs := range l {\n\t\tkeys = append(keys, rs.Key)\n\t}\n\n\treturn keys\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/host_network_system.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype HostNetworkSystem struct {\n\tCommon\n}\n\nfunc NewHostNetworkSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostNetworkSystem {\n\treturn &HostNetworkSystem{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\n// AddPortGroup wraps methods.AddPortGroup\nfunc (o HostNetworkSystem) AddPortGroup(ctx context.Context, portgrp types.HostPortGroupSpec) error {\n\treq := types.AddPortGroup{\n\t\tThis:    o.Reference(),\n\t\tPortgrp: portgrp,\n\t}\n\n\t_, err := methods.AddPortGroup(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// AddServiceConsoleVirtualNic wraps methods.AddServiceConsoleVirtualNic\nfunc (o HostNetworkSystem) AddServiceConsoleVirtualNic(ctx context.Context, portgroup string, nic types.HostVirtualNicSpec) (string, error) {\n\treq := types.AddServiceConsoleVirtualNic{\n\t\tThis:      o.Reference(),\n\t\tPortgroup: portgroup,\n\t\tNic:       nic,\n\t}\n\n\tres, err := methods.AddServiceConsoleVirtualNic(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn res.Returnval, nil\n}\n\n// AddVirtualNic wraps methods.AddVirtualNic\nfunc (o HostNetworkSystem) AddVirtualNic(ctx context.Context, portgroup string, nic types.HostVirtualNicSpec) (string, error) {\n\treq := types.AddVirtualNic{\n\t\tThis:      o.Reference(),\n\t\tPortgroup: portgroup,\n\t\tNic:       nic,\n\t}\n\n\tres, err := methods.AddVirtualNic(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn res.Returnval, nil\n}\n\n// AddVirtualSwitch wraps methods.AddVirtualSwitch\nfunc (o HostNetworkSystem) AddVirtualSwitch(ctx context.Context, vswitchName string, spec *types.HostVirtualSwitchSpec) error {\n\treq := types.AddVirtualSwitch{\n\t\tThis:        o.Reference(),\n\t\tVswitchName: vswitchName,\n\t\tSpec:        spec,\n\t}\n\n\t_, err := methods.AddVirtualSwitch(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// QueryNetworkHint wraps methods.QueryNetworkHint\nfunc (o HostNetworkSystem) QueryNetworkHint(ctx context.Context, device []string) error {\n\treq := types.QueryNetworkHint{\n\t\tThis:   o.Reference(),\n\t\tDevice: device,\n\t}\n\n\t_, err := methods.QueryNetworkHint(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// RefreshNetworkSystem wraps methods.RefreshNetworkSystem\nfunc (o HostNetworkSystem) RefreshNetworkSystem(ctx context.Context) error {\n\treq := types.RefreshNetworkSystem{\n\t\tThis: o.Reference(),\n\t}\n\n\t_, err := methods.RefreshNetworkSystem(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// RemovePortGroup wraps methods.RemovePortGroup\nfunc (o HostNetworkSystem) RemovePortGroup(ctx context.Context, pgName string) error {\n\treq := types.RemovePortGroup{\n\t\tThis:   o.Reference(),\n\t\tPgName: pgName,\n\t}\n\n\t_, err := methods.RemovePortGroup(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// RemoveServiceConsoleVirtualNic wraps methods.RemoveServiceConsoleVirtualNic\nfunc (o HostNetworkSystem) RemoveServiceConsoleVirtualNic(ctx context.Context, device string) error {\n\treq := types.RemoveServiceConsoleVirtualNic{\n\t\tThis:   o.Reference(),\n\t\tDevice: device,\n\t}\n\n\t_, err := methods.RemoveServiceConsoleVirtualNic(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// RemoveVirtualNic wraps methods.RemoveVirtualNic\nfunc (o HostNetworkSystem) RemoveVirtualNic(ctx context.Context, device string) error {\n\treq := types.RemoveVirtualNic{\n\t\tThis:   o.Reference(),\n\t\tDevice: device,\n\t}\n\n\t_, err := methods.RemoveVirtualNic(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// RemoveVirtualSwitch wraps methods.RemoveVirtualSwitch\nfunc (o HostNetworkSystem) RemoveVirtualSwitch(ctx context.Context, vswitchName string) error {\n\treq := types.RemoveVirtualSwitch{\n\t\tThis:        o.Reference(),\n\t\tVswitchName: vswitchName,\n\t}\n\n\t_, err := methods.RemoveVirtualSwitch(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// RestartServiceConsoleVirtualNic wraps methods.RestartServiceConsoleVirtualNic\nfunc (o HostNetworkSystem) RestartServiceConsoleVirtualNic(ctx context.Context, device string) error {\n\treq := types.RestartServiceConsoleVirtualNic{\n\t\tThis:   o.Reference(),\n\t\tDevice: device,\n\t}\n\n\t_, err := methods.RestartServiceConsoleVirtualNic(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// UpdateConsoleIpRouteConfig wraps methods.UpdateConsoleIpRouteConfig\nfunc (o HostNetworkSystem) UpdateConsoleIpRouteConfig(ctx context.Context, config types.BaseHostIpRouteConfig) error {\n\treq := types.UpdateConsoleIpRouteConfig{\n\t\tThis:   o.Reference(),\n\t\tConfig: config,\n\t}\n\n\t_, err := methods.UpdateConsoleIpRouteConfig(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// UpdateDnsConfig wraps methods.UpdateDnsConfig\nfunc (o HostNetworkSystem) UpdateDnsConfig(ctx context.Context, config types.BaseHostDnsConfig) error {\n\treq := types.UpdateDnsConfig{\n\t\tThis:   o.Reference(),\n\t\tConfig: config,\n\t}\n\n\t_, err := methods.UpdateDnsConfig(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// UpdateIpRouteConfig wraps methods.UpdateIpRouteConfig\nfunc (o HostNetworkSystem) UpdateIpRouteConfig(ctx context.Context, config types.BaseHostIpRouteConfig) error {\n\treq := types.UpdateIpRouteConfig{\n\t\tThis:   o.Reference(),\n\t\tConfig: config,\n\t}\n\n\t_, err := methods.UpdateIpRouteConfig(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// UpdateIpRouteTableConfig wraps methods.UpdateIpRouteTableConfig\nfunc (o HostNetworkSystem) UpdateIpRouteTableConfig(ctx context.Context, config types.HostIpRouteTableConfig) error {\n\treq := types.UpdateIpRouteTableConfig{\n\t\tThis:   o.Reference(),\n\t\tConfig: config,\n\t}\n\n\t_, err := methods.UpdateIpRouteTableConfig(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// UpdateNetworkConfig wraps methods.UpdateNetworkConfig\nfunc (o HostNetworkSystem) UpdateNetworkConfig(ctx context.Context, config types.HostNetworkConfig, changeMode string) (*types.HostNetworkConfigResult, error) {\n\treq := types.UpdateNetworkConfig{\n\t\tThis:       o.Reference(),\n\t\tConfig:     config,\n\t\tChangeMode: changeMode,\n\t}\n\n\tres, err := methods.UpdateNetworkConfig(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n\n// UpdatePhysicalNicLinkSpeed wraps methods.UpdatePhysicalNicLinkSpeed\nfunc (o HostNetworkSystem) UpdatePhysicalNicLinkSpeed(ctx context.Context, device string, linkSpeed *types.PhysicalNicLinkInfo) error {\n\treq := types.UpdatePhysicalNicLinkSpeed{\n\t\tThis:      o.Reference(),\n\t\tDevice:    device,\n\t\tLinkSpeed: linkSpeed,\n\t}\n\n\t_, err := methods.UpdatePhysicalNicLinkSpeed(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// UpdatePortGroup wraps methods.UpdatePortGroup\nfunc (o HostNetworkSystem) UpdatePortGroup(ctx context.Context, pgName string, portgrp types.HostPortGroupSpec) error {\n\treq := types.UpdatePortGroup{\n\t\tThis:    o.Reference(),\n\t\tPgName:  pgName,\n\t\tPortgrp: portgrp,\n\t}\n\n\t_, err := methods.UpdatePortGroup(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// UpdateServiceConsoleVirtualNic wraps methods.UpdateServiceConsoleVirtualNic\nfunc (o HostNetworkSystem) UpdateServiceConsoleVirtualNic(ctx context.Context, device string, nic types.HostVirtualNicSpec) error {\n\treq := types.UpdateServiceConsoleVirtualNic{\n\t\tThis:   o.Reference(),\n\t\tDevice: device,\n\t\tNic:    nic,\n\t}\n\n\t_, err := methods.UpdateServiceConsoleVirtualNic(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// UpdateVirtualNic wraps methods.UpdateVirtualNic\nfunc (o HostNetworkSystem) UpdateVirtualNic(ctx context.Context, device string, nic types.HostVirtualNicSpec) error {\n\treq := types.UpdateVirtualNic{\n\t\tThis:   o.Reference(),\n\t\tDevice: device,\n\t\tNic:    nic,\n\t}\n\n\t_, err := methods.UpdateVirtualNic(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// UpdateVirtualSwitch wraps methods.UpdateVirtualSwitch\nfunc (o HostNetworkSystem) UpdateVirtualSwitch(ctx context.Context, vswitchName string, spec types.HostVirtualSwitchSpec) error {\n\treq := types.UpdateVirtualSwitch{\n\t\tThis:        o.Reference(),\n\t\tVswitchName: vswitchName,\n\t\tSpec:        spec,\n\t}\n\n\t_, err := methods.UpdateVirtualSwitch(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/host_service_system.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype HostServiceSystem struct {\n\tCommon\n}\n\nfunc NewHostServiceSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostServiceSystem {\n\treturn &HostServiceSystem{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\nfunc (s HostServiceSystem) Service(ctx context.Context) ([]types.HostService, error) {\n\tvar ss mo.HostServiceSystem\n\n\terr := s.Properties(ctx, s.Reference(), []string{\"serviceInfo.service\"}, &ss)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ss.ServiceInfo.Service, nil\n}\n\nfunc (s HostServiceSystem) Start(ctx context.Context, id string) error {\n\treq := types.StartService{\n\t\tThis: s.Reference(),\n\t\tId:   id,\n\t}\n\n\t_, err := methods.StartService(ctx, s.Client(), &req)\n\treturn err\n}\n\nfunc (s HostServiceSystem) Stop(ctx context.Context, id string) error {\n\treq := types.StopService{\n\t\tThis: s.Reference(),\n\t\tId:   id,\n\t}\n\n\t_, err := methods.StopService(ctx, s.Client(), &req)\n\treturn err\n}\n\nfunc (s HostServiceSystem) Restart(ctx context.Context, id string) error {\n\treq := types.RestartService{\n\t\tThis: s.Reference(),\n\t\tId:   id,\n\t}\n\n\t_, err := methods.RestartService(ctx, s.Client(), &req)\n\treturn err\n}\n\nfunc (s HostServiceSystem) UpdatePolicy(ctx context.Context, id string, policy string) error {\n\treq := types.UpdateServicePolicy{\n\t\tThis:   s.Reference(),\n\t\tId:     id,\n\t\tPolicy: policy,\n\t}\n\n\t_, err := methods.UpdateServicePolicy(ctx, s.Client(), &req)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/host_storage_system.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"errors\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype HostStorageSystem struct {\n\tCommon\n}\n\nfunc NewHostStorageSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostStorageSystem {\n\treturn &HostStorageSystem{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\nfunc (s HostStorageSystem) RetrieveDiskPartitionInfo(ctx context.Context, devicePath string) (*types.HostDiskPartitionInfo, error) {\n\treq := types.RetrieveDiskPartitionInfo{\n\t\tThis:       s.Reference(),\n\t\tDevicePath: []string{devicePath},\n\t}\n\n\tres, err := methods.RetrieveDiskPartitionInfo(ctx, s.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif res.Returnval == nil || len(res.Returnval) == 0 {\n\t\treturn nil, errors.New(\"no partition info\")\n\t}\n\n\treturn &res.Returnval[0], nil\n}\n\nfunc (s HostStorageSystem) ComputeDiskPartitionInfo(ctx context.Context, devicePath string, layout types.HostDiskPartitionLayout) (*types.HostDiskPartitionInfo, error) {\n\treq := types.ComputeDiskPartitionInfo{\n\t\tThis:       s.Reference(),\n\t\tDevicePath: devicePath,\n\t\tLayout:     layout,\n\t}\n\n\tres, err := methods.ComputeDiskPartitionInfo(ctx, s.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n\nfunc (s HostStorageSystem) UpdateDiskPartitionInfo(ctx context.Context, devicePath string, spec types.HostDiskPartitionSpec) error {\n\treq := types.UpdateDiskPartitions{\n\t\tThis:       s.Reference(),\n\t\tDevicePath: devicePath,\n\t\tSpec:       spec,\n\t}\n\n\t_, err := methods.UpdateDiskPartitions(ctx, s.c, &req)\n\treturn err\n}\n\nfunc (s HostStorageSystem) RescanAllHba(ctx context.Context) error {\n\treq := types.RescanAllHba{\n\t\tThis: s.Reference(),\n\t}\n\n\t_, err := methods.RescanAllHba(ctx, s.c, &req)\n\treturn err\n}\n\nfunc (s HostStorageSystem) MarkAsSsd(ctx context.Context, uuid string) (*Task, error) {\n\treq := types.MarkAsSsd_Task{\n\t\tThis:         s.Reference(),\n\t\tScsiDiskUuid: uuid,\n\t}\n\n\tres, err := methods.MarkAsSsd_Task(ctx, s.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(s.c, res.Returnval), nil\n}\n\nfunc (s HostStorageSystem) MarkAsNonSsd(ctx context.Context, uuid string) (*Task, error) {\n\treq := types.MarkAsNonSsd_Task{\n\t\tThis:         s.Reference(),\n\t\tScsiDiskUuid: uuid,\n\t}\n\n\tres, err := methods.MarkAsNonSsd_Task(ctx, s.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(s.c, res.Returnval), nil\n}\n\nfunc (s HostStorageSystem) MarkAsLocal(ctx context.Context, uuid string) (*Task, error) {\n\treq := types.MarkAsLocal_Task{\n\t\tThis:         s.Reference(),\n\t\tScsiDiskUuid: uuid,\n\t}\n\n\tres, err := methods.MarkAsLocal_Task(ctx, s.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(s.c, res.Returnval), nil\n}\n\nfunc (s HostStorageSystem) MarkAsNonLocal(ctx context.Context, uuid string) (*Task, error) {\n\treq := types.MarkAsNonLocal_Task{\n\t\tThis:         s.Reference(),\n\t\tScsiDiskUuid: uuid,\n\t}\n\n\tres, err := methods.MarkAsNonLocal_Task(ctx, s.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(s.c, res.Returnval), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/host_system.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype HostSystem struct {\n\tCommon\n}\n\nfunc NewHostSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostSystem {\n\treturn &HostSystem{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\nfunc (h HostSystem) ConfigManager() *HostConfigManager {\n\treturn NewHostConfigManager(h.c, h.Reference())\n}\n\nfunc (h HostSystem) ResourcePool(ctx context.Context) (*ResourcePool, error) {\n\tvar mh mo.HostSystem\n\n\terr := h.Properties(ctx, h.Reference(), []string{\"parent\"}, &mh)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar mcr *mo.ComputeResource\n\tvar parent interface{}\n\n\tswitch mh.Parent.Type {\n\tcase \"ComputeResource\":\n\t\tmcr = new(mo.ComputeResource)\n\t\tparent = mcr\n\tcase \"ClusterComputeResource\":\n\t\tmcc := new(mo.ClusterComputeResource)\n\t\tmcr = &mcc.ComputeResource\n\t\tparent = mcc\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown host parent type: %s\", mh.Parent.Type)\n\t}\n\n\terr = h.Properties(ctx, *mh.Parent, []string{\"resourcePool\"}, parent)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpool := NewResourcePool(h.c, *mcr.ResourcePool)\n\treturn pool, nil\n}\n\nfunc (h HostSystem) ManagementIPs(ctx context.Context) ([]net.IP, error) {\n\tvar mh mo.HostSystem\n\n\terr := h.Properties(ctx, h.Reference(), []string{\"config.virtualNicManagerInfo.netConfig\"}, &mh)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ips []net.IP\n\tfor _, nc := range mh.Config.VirtualNicManagerInfo.NetConfig {\n\t\tif nc.NicType == \"management\" && len(nc.CandidateVnic) > 0 {\n\t\t\tip := net.ParseIP(nc.CandidateVnic[0].Spec.Ip.IpAddress)\n\t\t\tif ip != nil {\n\t\t\t\tips = append(ips, ip)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ips, nil\n}\n\nfunc (h HostSystem) Disconnect(ctx context.Context) (*Task, error) {\n\treq := types.DisconnectHost_Task{\n\t\tThis: h.Reference(),\n\t}\n\n\tres, err := methods.DisconnectHost_Task(ctx, h.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(h.c, res.Returnval), nil\n}\n\nfunc (h HostSystem) Reconnect(ctx context.Context, cnxSpec *types.HostConnectSpec, reconnectSpec *types.HostSystemReconnectSpec) (*Task, error) {\n\treq := types.ReconnectHost_Task{\n\t\tThis:          h.Reference(),\n\t\tCnxSpec:       cnxSpec,\n\t\tReconnectSpec: reconnectSpec,\n\t}\n\n\tres, err := methods.ReconnectHost_Task(ctx, h.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(h.c, res.Returnval), nil\n}\n\nfunc (h HostSystem) EnterMaintenanceMode(ctx context.Context, timeout int32, evacuate bool, spec *types.HostMaintenanceSpec) (*Task, error) {\n\treq := types.EnterMaintenanceMode_Task{\n\t\tThis:                  h.Reference(),\n\t\tTimeout:               timeout,\n\t\tEvacuatePoweredOffVms: types.NewBool(evacuate),\n\t\tMaintenanceSpec:       spec,\n\t}\n\n\tres, err := methods.EnterMaintenanceMode_Task(ctx, h.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(h.c, res.Returnval), nil\n}\n\nfunc (h HostSystem) ExitMaintenanceMode(ctx context.Context, timeout int32) (*Task, error) {\n\treq := types.ExitMaintenanceMode_Task{\n\t\tThis:    h.Reference(),\n\t\tTimeout: timeout,\n\t}\n\n\tres, err := methods.ExitMaintenanceMode_Task(ctx, h.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(h.c, res.Returnval), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/host_virtual_nic_manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype HostVirtualNicManager struct {\n\tCommon\n\tHost *HostSystem\n}\n\nfunc NewHostVirtualNicManager(c *vim25.Client, ref types.ManagedObjectReference, host types.ManagedObjectReference) *HostVirtualNicManager {\n\treturn &HostVirtualNicManager{\n\t\tCommon: NewCommon(c, ref),\n\t\tHost:   NewHostSystem(c, host),\n\t}\n}\n\nfunc (m HostVirtualNicManager) Info(ctx context.Context) (*types.HostVirtualNicManagerInfo, error) {\n\tvar vnm mo.HostVirtualNicManager\n\n\terr := m.Properties(ctx, m.Reference(), []string{\"info\"}, &vnm)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &vnm.Info, nil\n}\n\nfunc (m HostVirtualNicManager) DeselectVnic(ctx context.Context, nicType string, device string) error {\n\tif nicType == string(types.HostVirtualNicManagerNicTypeVsan) {\n\t\t// Avoid fault.NotSupported:\n\t\t// \"Error deselecting device '$device': VSAN interfaces must be deselected using vim.host.VsanSystem\"\n\t\ts, err := m.Host.ConfigManager().VsanSystem(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn s.updateVnic(ctx, device, false)\n\t}\n\n\treq := types.DeselectVnicForNicType{\n\t\tThis:    m.Reference(),\n\t\tNicType: nicType,\n\t\tDevice:  device,\n\t}\n\n\t_, err := methods.DeselectVnicForNicType(ctx, m.Client(), &req)\n\treturn err\n}\n\nfunc (m HostVirtualNicManager) SelectVnic(ctx context.Context, nicType string, device string) error {\n\tif nicType == string(types.HostVirtualNicManagerNicTypeVsan) {\n\t\t// Avoid fault.NotSupported:\n\t\t// \"Error selecting device '$device': VSAN interfaces must be selected using vim.host.VsanSystem\"\n\t\ts, err := m.Host.ConfigManager().VsanSystem(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn s.updateVnic(ctx, device, true)\n\t}\n\n\treq := types.SelectVnicForNicType{\n\t\tThis:    m.Reference(),\n\t\tNicType: nicType,\n\t\tDevice:  device,\n\t}\n\n\t_, err := methods.SelectVnicForNicType(ctx, m.Client(), &req)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/host_vsan_internal_system.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype HostVsanInternalSystem struct {\n\tCommon\n}\n\nfunc NewHostVsanInternalSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostVsanInternalSystem {\n\tm := HostVsanInternalSystem{\n\t\tCommon: NewCommon(c, ref),\n\t}\n\n\treturn &m\n}\n\n// QueryVsanObjectUuidsByFilter returns vSAN DOM object uuids by filter.\nfunc (m HostVsanInternalSystem) QueryVsanObjectUuidsByFilter(ctx context.Context, uuids []string, limit int32, version int32) ([]string, error) {\n\treq := types.QueryVsanObjectUuidsByFilter{\n\t\tThis:    m.Reference(),\n\t\tUuids:   uuids,\n\t\tLimit:   limit,\n\t\tVersion: version,\n\t}\n\n\tres, err := methods.QueryVsanObjectUuidsByFilter(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\ntype VsanObjExtAttrs struct {\n\tType  string `json:\"Object type\"`\n\tClass string `json:\"Object class\"`\n\tSize  string `json:\"Object size\"`\n\tPath  string `json:\"Object path\"`\n\tName  string `json:\"User friendly name\"`\n}\n\nfunc (a *VsanObjExtAttrs) DatastorePath(dir string) string {\n\tl := len(dir)\n\tpath := a.Path\n\n\tif len(path) >= l {\n\t\tpath = a.Path[l:]\n\t}\n\n\tif path != \"\" {\n\t\treturn path\n\t}\n\n\treturn a.Name // vmnamespace\n}\n\n// GetVsanObjExtAttrs is internal and intended for troubleshooting/debugging situations in the field.\n// WARNING: This API can be slow because we do IOs (reads) to all the objects.\nfunc (m HostVsanInternalSystem) GetVsanObjExtAttrs(ctx context.Context, uuids []string) (map[string]VsanObjExtAttrs, error) {\n\treq := types.GetVsanObjExtAttrs{\n\t\tThis:  m.Reference(),\n\t\tUuids: uuids,\n\t}\n\n\tres, err := methods.GetVsanObjExtAttrs(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar attrs map[string]VsanObjExtAttrs\n\n\terr = json.Unmarshal([]byte(res.Returnval), &attrs)\n\n\treturn attrs, err\n}\n\n// DeleteVsanObjects is internal and intended for troubleshooting/debugging only.\n// WARNING: This API can be slow because we do IOs to all the objects.\n// DOM won't allow access to objects which have lost quorum. Such objects can be deleted with the optional \"force\" flag.\n// These objects may however re-appear with quorum if the absent components come back (network partition gets resolved, etc.)\nfunc (m HostVsanInternalSystem) DeleteVsanObjects(ctx context.Context, uuids []string, force *bool) ([]types.HostVsanInternalSystemDeleteVsanObjectsResult, error) {\n\treq := types.DeleteVsanObjects{\n\t\tThis:  m.Reference(),\n\t\tUuids: uuids,\n\t\tForce: force,\n\t}\n\n\tres, err := methods.DeleteVsanObjects(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/host_vsan_system.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype HostVsanSystem struct {\n\tCommon\n}\n\nfunc NewHostVsanSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostVsanSystem {\n\treturn &HostVsanSystem{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\nfunc (s HostVsanSystem) Update(ctx context.Context, config types.VsanHostConfigInfo) (*Task, error) {\n\treq := types.UpdateVsan_Task{\n\t\tThis:   s.Reference(),\n\t\tConfig: config,\n\t}\n\n\tres, err := methods.UpdateVsan_Task(ctx, s.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(s.Client(), res.Returnval), nil\n}\n\n// updateVnic in support of the HostVirtualNicManager.{SelectVnic,DeselectVnic} methods\nfunc (s HostVsanSystem) updateVnic(ctx context.Context, device string, enable bool) error {\n\tvar vsan mo.HostVsanSystem\n\n\terr := s.Properties(ctx, s.Reference(), []string{\"config.networkInfo.port\"}, &vsan)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinfo := vsan.Config\n\n\tvar port []types.VsanHostConfigInfoNetworkInfoPortConfig\n\n\tfor _, p := range info.NetworkInfo.Port {\n\t\tif p.Device == device {\n\t\t\tcontinue\n\t\t}\n\n\t\tport = append(port, p)\n\t}\n\n\tif enable {\n\t\tport = append(port, types.VsanHostConfigInfoNetworkInfoPortConfig{\n\t\t\tDevice: device,\n\t\t})\n\t}\n\n\tinfo.NetworkInfo.Port = port\n\n\ttask, err := s.Update(ctx, info)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = task.WaitForResult(ctx, nil)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/http_nfc_lease.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype HttpNfcLease struct {\n\tCommon\n}\n\nfunc NewHttpNfcLease(c *vim25.Client, ref types.ManagedObjectReference) *HttpNfcLease {\n\treturn &HttpNfcLease{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\n// HttpNfcLeaseAbort wraps methods.HttpNfcLeaseAbort\nfunc (o HttpNfcLease) HttpNfcLeaseAbort(ctx context.Context, fault *types.LocalizedMethodFault) error {\n\treq := types.HttpNfcLeaseAbort{\n\t\tThis:  o.Reference(),\n\t\tFault: fault,\n\t}\n\n\t_, err := methods.HttpNfcLeaseAbort(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// HttpNfcLeaseComplete wraps methods.HttpNfcLeaseComplete\nfunc (o HttpNfcLease) HttpNfcLeaseComplete(ctx context.Context) error {\n\treq := types.HttpNfcLeaseComplete{\n\t\tThis: o.Reference(),\n\t}\n\n\t_, err := methods.HttpNfcLeaseComplete(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// HttpNfcLeaseGetManifest wraps methods.HttpNfcLeaseGetManifest\nfunc (o HttpNfcLease) HttpNfcLeaseGetManifest(ctx context.Context) error {\n\treq := types.HttpNfcLeaseGetManifest{\n\t\tThis: o.Reference(),\n\t}\n\n\t_, err := methods.HttpNfcLeaseGetManifest(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// HttpNfcLeaseProgress wraps methods.HttpNfcLeaseProgress\nfunc (o HttpNfcLease) HttpNfcLeaseProgress(ctx context.Context, percent int32) error {\n\treq := types.HttpNfcLeaseProgress{\n\t\tThis:    o.Reference(),\n\t\tPercent: percent,\n\t}\n\n\t_, err := methods.HttpNfcLeaseProgress(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (o HttpNfcLease) Wait(ctx context.Context) (*types.HttpNfcLeaseInfo, error) {\n\tvar lease mo.HttpNfcLease\n\n\tpc := property.DefaultCollector(o.c)\n\terr := property.Wait(ctx, pc, o.Reference(), []string{\"state\", \"info\", \"error\"}, func(pc []types.PropertyChange) bool {\n\t\tdone := false\n\n\t\tfor _, c := range pc {\n\t\t\tif c.Val == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tswitch c.Name {\n\t\t\tcase \"error\":\n\t\t\t\tval := c.Val.(types.LocalizedMethodFault)\n\t\t\t\tlease.Error = &val\n\t\t\t\tdone = true\n\t\t\tcase \"info\":\n\t\t\t\tval := c.Val.(types.HttpNfcLeaseInfo)\n\t\t\t\tlease.Info = &val\n\t\t\tcase \"state\":\n\t\t\t\tlease.State = c.Val.(types.HttpNfcLeaseState)\n\t\t\t\tif lease.State != types.HttpNfcLeaseStateInitializing {\n\t\t\t\t\tdone = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn done\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif lease.State == types.HttpNfcLeaseStateReady {\n\t\treturn lease.Info, nil\n\t}\n\n\tif lease.Error != nil {\n\t\treturn nil, errors.New(lease.Error.LocalizedMessage)\n\t}\n\n\treturn nil, fmt.Errorf(\"unexpected nfc lease state: %s\", lease.State)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/namespace_manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype DatastoreNamespaceManager struct {\n\tCommon\n}\n\nfunc NewDatastoreNamespaceManager(c *vim25.Client) *DatastoreNamespaceManager {\n\tn := DatastoreNamespaceManager{\n\t\tCommon: NewCommon(c, *c.ServiceContent.DatastoreNamespaceManager),\n\t}\n\n\treturn &n\n}\n\n// CreateDirectory creates a top-level directory on the given vsan datastore, using\n// the given user display name hint and opaque storage policy.\nfunc (nm DatastoreNamespaceManager) CreateDirectory(ctx context.Context, ds *Datastore, displayName string, policy string) (string, error) {\n\n\treq := &types.CreateDirectory{\n\t\tThis:        nm.Reference(),\n\t\tDatastore:   ds.Reference(),\n\t\tDisplayName: displayName,\n\t\tPolicy:      policy,\n\t}\n\n\tresp, err := methods.CreateDirectory(ctx, nm.c, req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn resp.Returnval, nil\n}\n\n// DeleteDirectory deletes the given top-level directory from a vsan datastore.\nfunc (nm DatastoreNamespaceManager) DeleteDirectory(ctx context.Context, dc *Datacenter, datastorePath string) error {\n\n\treq := &types.DeleteDirectory{\n\t\tThis:          nm.Reference(),\n\t\tDatastorePath: datastorePath,\n\t}\n\n\tif dc != nil {\n\t\tref := dc.Reference()\n\t\treq.Datacenter = &ref\n\t}\n\n\tif _, err := methods.DeleteDirectory(ctx, nm.c, req); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/network.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype Network struct {\n\tCommon\n}\n\nfunc NewNetwork(c *vim25.Client, ref types.ManagedObjectReference) *Network {\n\treturn &Network{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\n// EthernetCardBackingInfo returns the VirtualDeviceBackingInfo for this Network\nfunc (n Network) EthernetCardBackingInfo(_ context.Context) (types.BaseVirtualDeviceBackingInfo, error) {\n\tname := n.Name()\n\n\tbacking := &types.VirtualEthernetCardNetworkBackingInfo{\n\t\tVirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{\n\t\t\tDeviceName: name,\n\t\t},\n\t}\n\n\treturn backing, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/network_reference.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\n// The NetworkReference interface is implemented by managed objects\n// which can be used as the backing for a VirtualEthernetCard.\ntype NetworkReference interface {\n\tReference\n\n\tEthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/network_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\n// Network should implement the Reference interface.\nvar _ Reference = Network{}\n\n// Network should implement the NetworkReference interface.\nvar _ NetworkReference = Network{}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/opaque_network.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype OpaqueNetwork struct {\n\tCommon\n}\n\nfunc NewOpaqueNetwork(c *vim25.Client, ref types.ManagedObjectReference) *OpaqueNetwork {\n\treturn &OpaqueNetwork{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\n// EthernetCardBackingInfo returns the VirtualDeviceBackingInfo for this Network\nfunc (n OpaqueNetwork) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) {\n\tvar net mo.OpaqueNetwork\n\n\tif err := n.Properties(ctx, n.Reference(), []string{\"summary\"}, &net); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsummary, ok := net.Summary.(*types.OpaqueNetworkSummary)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"%s unsupported network type: %T\", n, net.Summary)\n\t}\n\n\tbacking := &types.VirtualEthernetCardOpaqueNetworkBackingInfo{\n\t\tOpaqueNetworkId:   summary.OpaqueNetworkId,\n\t\tOpaqueNetworkType: summary.OpaqueNetworkType,\n\t}\n\n\treturn backing, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/option_manager.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype OptionManager struct {\n\tCommon\n}\n\nfunc NewOptionManager(c *vim25.Client, ref types.ManagedObjectReference) *OptionManager {\n\treturn &OptionManager{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\nfunc (m OptionManager) Query(ctx context.Context, name string) ([]types.BaseOptionValue, error) {\n\treq := types.QueryOptions{\n\t\tThis: m.Reference(),\n\t\tName: name,\n\t}\n\n\tres, err := methods.QueryOptions(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m OptionManager) Update(ctx context.Context, value []types.BaseOptionValue) error {\n\treq := types.UpdateOptions{\n\t\tThis:         m.Reference(),\n\t\tChangedValue: value,\n\t}\n\n\t_, err := methods.UpdateOptions(ctx, m.Client(), &req)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/ovf_manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype OvfManager struct {\n\tCommon\n}\n\nfunc NewOvfManager(c *vim25.Client) *OvfManager {\n\to := OvfManager{\n\t\tCommon: NewCommon(c, *c.ServiceContent.OvfManager),\n\t}\n\n\treturn &o\n}\n\n// CreateDescriptor wraps methods.CreateDescriptor\nfunc (o OvfManager) CreateDescriptor(ctx context.Context, obj Reference, cdp types.OvfCreateDescriptorParams) (*types.OvfCreateDescriptorResult, error) {\n\treq := types.CreateDescriptor{\n\t\tThis: o.Reference(),\n\t\tObj:  obj.Reference(),\n\t\tCdp:  cdp,\n\t}\n\n\tres, err := methods.CreateDescriptor(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n\n// CreateImportSpec wraps methods.CreateImportSpec\nfunc (o OvfManager) CreateImportSpec(ctx context.Context, ovfDescriptor string, resourcePool Reference, datastore Reference, cisp types.OvfCreateImportSpecParams) (*types.OvfCreateImportSpecResult, error) {\n\treq := types.CreateImportSpec{\n\t\tThis:          o.Reference(),\n\t\tOvfDescriptor: ovfDescriptor,\n\t\tResourcePool:  resourcePool.Reference(),\n\t\tDatastore:     datastore.Reference(),\n\t\tCisp:          cisp,\n\t}\n\n\tres, err := methods.CreateImportSpec(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n\n// ParseDescriptor wraps methods.ParseDescriptor\nfunc (o OvfManager) ParseDescriptor(ctx context.Context, ovfDescriptor string, pdp types.OvfParseDescriptorParams) (*types.OvfParseDescriptorResult, error) {\n\treq := types.ParseDescriptor{\n\t\tThis:          o.Reference(),\n\t\tOvfDescriptor: ovfDescriptor,\n\t\tPdp:           pdp,\n\t}\n\n\tres, err := methods.ParseDescriptor(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n\n// ValidateHost wraps methods.ValidateHost\nfunc (o OvfManager) ValidateHost(ctx context.Context, ovfDescriptor string, host Reference, vhp types.OvfValidateHostParams) (*types.OvfValidateHostResult, error) {\n\treq := types.ValidateHost{\n\t\tThis:          o.Reference(),\n\t\tOvfDescriptor: ovfDescriptor,\n\t\tHost:          host.Reference(),\n\t\tVhp:           vhp,\n\t}\n\n\tres, err := methods.ValidateHost(ctx, o.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/resource_pool.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype ResourcePool struct {\n\tCommon\n}\n\nfunc NewResourcePool(c *vim25.Client, ref types.ManagedObjectReference) *ResourcePool {\n\treturn &ResourcePool{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\nfunc (p ResourcePool) ImportVApp(ctx context.Context, spec types.BaseImportSpec, folder *Folder, host *HostSystem) (*HttpNfcLease, error) {\n\treq := types.ImportVApp{\n\t\tThis: p.Reference(),\n\t\tSpec: spec,\n\t}\n\n\tif folder != nil {\n\t\tref := folder.Reference()\n\t\treq.Folder = &ref\n\t}\n\n\tif host != nil {\n\t\tref := host.Reference()\n\t\treq.Host = &ref\n\t}\n\n\tres, err := methods.ImportVApp(ctx, p.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewHttpNfcLease(p.c, res.Returnval), nil\n}\n\nfunc (p ResourcePool) Create(ctx context.Context, name string, spec types.ResourceConfigSpec) (*ResourcePool, error) {\n\treq := types.CreateResourcePool{\n\t\tThis: p.Reference(),\n\t\tName: name,\n\t\tSpec: spec,\n\t}\n\n\tres, err := methods.CreateResourcePool(ctx, p.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewResourcePool(p.c, res.Returnval), nil\n}\n\nfunc (p ResourcePool) CreateVApp(ctx context.Context, name string, resSpec types.ResourceConfigSpec, configSpec types.VAppConfigSpec, folder *Folder) (*VirtualApp, error) {\n\treq := types.CreateVApp{\n\t\tThis:       p.Reference(),\n\t\tName:       name,\n\t\tResSpec:    resSpec,\n\t\tConfigSpec: configSpec,\n\t}\n\n\tif folder != nil {\n\t\tref := folder.Reference()\n\t\treq.VmFolder = &ref\n\t}\n\n\tres, err := methods.CreateVApp(ctx, p.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewVirtualApp(p.c, res.Returnval), nil\n}\n\nfunc (p ResourcePool) UpdateConfig(ctx context.Context, name string, config *types.ResourceConfigSpec) error {\n\treq := types.UpdateConfig{\n\t\tThis:   p.Reference(),\n\t\tName:   name,\n\t\tConfig: config,\n\t}\n\n\tif config != nil && config.Entity == nil {\n\t\tref := p.Reference()\n\n\t\t// Create copy of config so changes won't leak back to the caller\n\t\tnewConfig := *config\n\t\tnewConfig.Entity = &ref\n\t\treq.Config = &newConfig\n\t}\n\n\t_, err := methods.UpdateConfig(ctx, p.c, &req)\n\treturn err\n}\n\nfunc (p ResourcePool) DestroyChildren(ctx context.Context) error {\n\treq := types.DestroyChildren{\n\t\tThis: p.Reference(),\n\t}\n\n\t_, err := methods.DestroyChildren(ctx, p.c, &req)\n\treturn err\n}\n\nfunc (p ResourcePool) Destroy(ctx context.Context) (*Task, error) {\n\treq := types.Destroy_Task{\n\t\tThis: p.Reference(),\n\t}\n\n\tres, err := methods.Destroy_Task(ctx, p.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(p.c, res.Returnval), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/search_index.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype SearchIndex struct {\n\tCommon\n}\n\nfunc NewSearchIndex(c *vim25.Client) *SearchIndex {\n\ts := SearchIndex{\n\t\tCommon: NewCommon(c, *c.ServiceContent.SearchIndex),\n\t}\n\n\treturn &s\n}\n\n// FindByDatastorePath finds a virtual machine by its location on a datastore.\nfunc (s SearchIndex) FindByDatastorePath(ctx context.Context, dc *Datacenter, path string) (Reference, error) {\n\treq := types.FindByDatastorePath{\n\t\tThis:       s.Reference(),\n\t\tDatacenter: dc.Reference(),\n\t\tPath:       path,\n\t}\n\n\tres, err := methods.FindByDatastorePath(ctx, s.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif res.Returnval == nil {\n\t\treturn nil, nil\n\t}\n\treturn NewReference(s.c, *res.Returnval), nil\n}\n\n// FindByDnsName finds a virtual machine or host by DNS name.\nfunc (s SearchIndex) FindByDnsName(ctx context.Context, dc *Datacenter, dnsName string, vmSearch bool) (Reference, error) {\n\treq := types.FindByDnsName{\n\t\tThis:     s.Reference(),\n\t\tDnsName:  dnsName,\n\t\tVmSearch: vmSearch,\n\t}\n\tif dc != nil {\n\t\tref := dc.Reference()\n\t\treq.Datacenter = &ref\n\t}\n\n\tres, err := methods.FindByDnsName(ctx, s.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif res.Returnval == nil {\n\t\treturn nil, nil\n\t}\n\treturn NewReference(s.c, *res.Returnval), nil\n}\n\n// FindByInventoryPath finds a managed entity based on its location in the inventory.\nfunc (s SearchIndex) FindByInventoryPath(ctx context.Context, path string) (Reference, error) {\n\treq := types.FindByInventoryPath{\n\t\tThis:          s.Reference(),\n\t\tInventoryPath: path,\n\t}\n\n\tres, err := methods.FindByInventoryPath(ctx, s.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif res.Returnval == nil {\n\t\treturn nil, nil\n\t}\n\treturn NewReference(s.c, *res.Returnval), nil\n}\n\n// FindByIp finds a virtual machine or host by IP address.\nfunc (s SearchIndex) FindByIp(ctx context.Context, dc *Datacenter, ip string, vmSearch bool) (Reference, error) {\n\treq := types.FindByIp{\n\t\tThis:     s.Reference(),\n\t\tIp:       ip,\n\t\tVmSearch: vmSearch,\n\t}\n\tif dc != nil {\n\t\tref := dc.Reference()\n\t\treq.Datacenter = &ref\n\t}\n\n\tres, err := methods.FindByIp(ctx, s.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif res.Returnval == nil {\n\t\treturn nil, nil\n\t}\n\treturn NewReference(s.c, *res.Returnval), nil\n}\n\n// FindByUuid finds a virtual machine or host by UUID.\nfunc (s SearchIndex) FindByUuid(ctx context.Context, dc *Datacenter, uuid string, vmSearch bool, instanceUuid *bool) (Reference, error) {\n\treq := types.FindByUuid{\n\t\tThis:         s.Reference(),\n\t\tUuid:         uuid,\n\t\tVmSearch:     vmSearch,\n\t\tInstanceUuid: instanceUuid,\n\t}\n\tif dc != nil {\n\t\tref := dc.Reference()\n\t\treq.Datacenter = &ref\n\t}\n\n\tres, err := methods.FindByUuid(ctx, s.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif res.Returnval == nil {\n\t\treturn nil, nil\n\t}\n\treturn NewReference(s.c, *res.Returnval), nil\n}\n\n// FindChild finds a particular child based on a managed entity name.\nfunc (s SearchIndex) FindChild(ctx context.Context, entity Reference, name string) (Reference, error) {\n\treq := types.FindChild{\n\t\tThis:   s.Reference(),\n\t\tEntity: entity.Reference(),\n\t\tName:   name,\n\t}\n\n\tres, err := methods.FindChild(ctx, s.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif res.Returnval == nil {\n\t\treturn nil, nil\n\t}\n\treturn NewReference(s.c, *res.Returnval), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/search_index_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/vmware/govmomi/test\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n)\n\nfunc TestSearch(t *testing.T) {\n\tc := test.NewAuthenticatedClient(t)\n\ts := NewSearchIndex(c)\n\n\tref, err := s.FindChild(context.Background(), NewRootFolder(c), \"ha-datacenter\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdc, ok := ref.(*Datacenter)\n\tif !ok {\n\t\tt.Errorf(\"Expected Datacenter: %#v\", ref)\n\t}\n\n\tfolders, err := dc.Folders(context.Background())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tref, err = s.FindChild(context.Background(), folders.DatastoreFolder, \"datastore1\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, ok = ref.(*Datastore)\n\tif !ok {\n\t\tt.Errorf(\"Expected Datastore: %#v\", ref)\n\t}\n\n\tref, err = s.FindByInventoryPath(context.Background(), \"/ha-datacenter/network/VM Network\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, ok = ref.(*Network)\n\tif !ok {\n\t\tt.Errorf(\"Expected Network: %#v\", ref)\n\t}\n\n\tcrs, err := folders.HostFolder.Children(context.Background())\n\tif err != nil {\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\tif len(crs) != 0 {\n\t\tvar cr mo.ComputeResource\n\t\tref = crs[0]\n\t\terr = s.Properties(context.Background(), ref.Reference(), []string{\"host\"}, &cr)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tvar host mo.HostSystem\n\t\tref = NewHostSystem(c, cr.Host[0])\n\t\terr = s.Properties(context.Background(), ref.Reference(), []string{\"name\", \"hardware\", \"config\"}, &host)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tdnsConfig := host.Config.Network.DnsConfig.GetHostDnsConfig()\n\t\tdnsName := fmt.Sprintf(\"%s.%s\", dnsConfig.HostName, dnsConfig.DomainName)\n\t\tshost, err := s.FindByDnsName(context.Background(), dc, dnsName, false)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !reflect.DeepEqual(ref, shost) {\n\t\t\tt.Errorf(\"%#v != %#v\\n\", ref, shost)\n\t\t}\n\n\t\tshost, err = s.FindByUuid(context.Background(), dc, host.Hardware.SystemInfo.Uuid, false, nil)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !reflect.DeepEqual(ref, shost) {\n\t\t\tt.Errorf(\"%#v != %#v\\n\", ref, shost)\n\t\t}\n\t}\n\n\tvms, err := folders.VmFolder.Children(context.Background())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(vms) != 0 {\n\t\tvar vm mo.VirtualMachine\n\t\tref = vms[0]\n\t\terr = s.Properties(context.Background(), ref.Reference(), []string{\"config\", \"guest\"}, &vm)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tsvm, err := s.FindByDatastorePath(context.Background(), dc, vm.Config.Files.VmPathName)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !reflect.DeepEqual(ref, svm) {\n\t\t\tt.Errorf(\"%#v != %#v\\n\", ref, svm)\n\t\t}\n\n\t\tsvm, err = s.FindByUuid(context.Background(), dc, vm.Config.Uuid, true, nil)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !reflect.DeepEqual(ref, svm) {\n\t\t\tt.Errorf(\"%#v != %#v\\n\", ref, svm)\n\t\t}\n\n\t\tif vm.Guest.HostName != \"\" {\n\t\t\tsvm, err := s.FindByDnsName(context.Background(), dc, vm.Guest.HostName, true)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(ref, svm) {\n\t\t\t\tt.Errorf(\"%#v != %#v\\n\", ref, svm)\n\t\t\t}\n\t\t}\n\n\t\tif vm.Guest.IpAddress != \"\" {\n\t\t\tsvm, err := s.FindByIp(context.Background(), dc, vm.Guest.IpAddress, true)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(ref, svm) {\n\t\t\t\tt.Errorf(\"%#v != %#v\\n\", ref, svm)\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/storage_pod.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype StoragePod struct {\n\t*Folder\n}\n\nfunc NewStoragePod(c *vim25.Client, ref types.ManagedObjectReference) *StoragePod {\n\treturn &StoragePod{\n\t\tFolder: &Folder{\n\t\t\tCommon: NewCommon(c, ref),\n\t\t},\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/storage_resource_manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype StorageResourceManager struct {\n\tCommon\n}\n\nfunc NewStorageResourceManager(c *vim25.Client) *StorageResourceManager {\n\tsr := StorageResourceManager{\n\t\tCommon: NewCommon(c, *c.ServiceContent.StorageResourceManager),\n\t}\n\n\treturn &sr\n}\n\nfunc (sr StorageResourceManager) ApplyStorageDrsRecommendation(ctx context.Context, key []string) (*Task, error) {\n\treq := types.ApplyStorageDrsRecommendation_Task{\n\t\tThis: sr.Reference(),\n\t\tKey:  key,\n\t}\n\n\tres, err := methods.ApplyStorageDrsRecommendation_Task(ctx, sr.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(sr.c, res.Returnval), nil\n}\n\nfunc (sr StorageResourceManager) ApplyStorageDrsRecommendationToPod(ctx context.Context, pod *StoragePod, key string) (*Task, error) {\n\treq := types.ApplyStorageDrsRecommendationToPod_Task{\n\t\tThis: sr.Reference(),\n\t\tKey:  key,\n\t}\n\n\tif pod != nil {\n\t\treq.Pod = pod.Reference()\n\t}\n\n\tres, err := methods.ApplyStorageDrsRecommendationToPod_Task(ctx, sr.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(sr.c, res.Returnval), nil\n}\n\nfunc (sr StorageResourceManager) CancelStorageDrsRecommendation(ctx context.Context, key []string) error {\n\treq := types.CancelStorageDrsRecommendation{\n\t\tThis: sr.Reference(),\n\t\tKey:  key,\n\t}\n\n\t_, err := methods.CancelStorageDrsRecommendation(ctx, sr.c, &req)\n\n\treturn err\n}\n\nfunc (sr StorageResourceManager) ConfigureDatastoreIORM(ctx context.Context, datastore *Datastore, spec types.StorageIORMConfigSpec, key string) (*Task, error) {\n\treq := types.ConfigureDatastoreIORM_Task{\n\t\tThis: sr.Reference(),\n\t\tSpec: spec,\n\t}\n\n\tif datastore != nil {\n\t\treq.Datastore = datastore.Reference()\n\t}\n\n\tres, err := methods.ConfigureDatastoreIORM_Task(ctx, sr.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(sr.c, res.Returnval), nil\n}\n\nfunc (sr StorageResourceManager) ConfigureStorageDrsForPod(ctx context.Context, pod *StoragePod, spec types.StorageDrsConfigSpec, modify bool) (*Task, error) {\n\treq := types.ConfigureStorageDrsForPod_Task{\n\t\tThis:   sr.Reference(),\n\t\tSpec:   spec,\n\t\tModify: modify,\n\t}\n\n\tif pod != nil {\n\t\treq.Pod = pod.Reference()\n\t}\n\n\tres, err := methods.ConfigureStorageDrsForPod_Task(ctx, sr.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(sr.c, res.Returnval), nil\n}\n\nfunc (sr StorageResourceManager) QueryDatastorePerformanceSummary(ctx context.Context, datastore *Datastore) ([]types.StoragePerformanceSummary, error) {\n\treq := types.QueryDatastorePerformanceSummary{\n\t\tThis: sr.Reference(),\n\t}\n\n\tif datastore != nil {\n\t\treq.Datastore = datastore.Reference()\n\t}\n\n\tres, err := methods.QueryDatastorePerformanceSummary(ctx, sr.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (sr StorageResourceManager) QueryIORMConfigOption(ctx context.Context, host *HostSystem) (*types.StorageIORMConfigOption, error) {\n\treq := types.QueryIORMConfigOption{\n\t\tThis: sr.Reference(),\n\t}\n\n\tif host != nil {\n\t\treq.Host = host.Reference()\n\t}\n\n\tres, err := methods.QueryIORMConfigOption(ctx, sr.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n\nfunc (sr StorageResourceManager) RecommendDatastores(ctx context.Context, storageSpec types.StoragePlacementSpec) (*types.StoragePlacementResult, error) {\n\treq := types.RecommendDatastores{\n\t\tThis:        sr.Reference(),\n\t\tStorageSpec: storageSpec,\n\t}\n\n\tres, err := methods.RecommendDatastores(ctx, sr.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n\nfunc (sr StorageResourceManager) RefreshStorageDrsRecommendation(ctx context.Context, pod *StoragePod) error {\n\treq := types.RefreshStorageDrsRecommendation{\n\t\tThis: sr.Reference(),\n\t}\n\n\tif pod != nil {\n\t\treq.Pod = pod.Reference()\n\t}\n\n\t_, err := methods.RefreshStorageDrsRecommendation(ctx, sr.c, &req)\n\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/task.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/task\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/progress\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\n// Task is a convenience wrapper around task.Task that keeps a reference to\n// the client that was used to create it. This allows users to call the Wait()\n// function with only a context parameter, instead of a context parameter, a\n// soap.RoundTripper, and reference to the root property collector.\ntype Task struct {\n\tCommon\n}\n\nfunc NewTask(c *vim25.Client, ref types.ManagedObjectReference) *Task {\n\tt := Task{\n\t\tCommon: NewCommon(c, ref),\n\t}\n\n\treturn &t\n}\n\nfunc (t *Task) Wait(ctx context.Context) error {\n\t_, err := t.WaitForResult(ctx, nil)\n\treturn err\n}\n\nfunc (t *Task) WaitForResult(ctx context.Context, s progress.Sinker) (*types.TaskInfo, error) {\n\tp := property.DefaultCollector(t.c)\n\treturn task.Wait(ctx, t.Reference(), p, s)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/types.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype Reference interface {\n\tReference() types.ManagedObjectReference\n}\n\nfunc NewReference(c *vim25.Client, e types.ManagedObjectReference) Reference {\n\tswitch e.Type {\n\tcase \"Folder\":\n\t\treturn NewFolder(c, e)\n\tcase \"StoragePod\":\n\t\treturn &StoragePod{\n\t\t\tNewFolder(c, e),\n\t\t}\n\tcase \"Datacenter\":\n\t\treturn NewDatacenter(c, e)\n\tcase \"VirtualMachine\":\n\t\treturn NewVirtualMachine(c, e)\n\tcase \"VirtualApp\":\n\t\treturn &VirtualApp{\n\t\t\tNewResourcePool(c, e),\n\t\t}\n\tcase \"ComputeResource\":\n\t\treturn NewComputeResource(c, e)\n\tcase \"ClusterComputeResource\":\n\t\treturn NewClusterComputeResource(c, e)\n\tcase \"HostSystem\":\n\t\treturn NewHostSystem(c, e)\n\tcase \"Network\", \"OpaqueNetwork\":\n\t\treturn NewNetwork(c, e)\n\tcase \"ResourcePool\":\n\t\treturn NewResourcePool(c, e)\n\tcase \"DistributedVirtualSwitch\":\n\t\treturn NewDistributedVirtualSwitch(c, e)\n\tcase \"VmwareDistributedVirtualSwitch\":\n\t\treturn &VmwareDistributedVirtualSwitch{*NewDistributedVirtualSwitch(c, e)}\n\tcase \"DistributedVirtualPortgroup\":\n\t\treturn NewDistributedVirtualPortgroup(c, e)\n\tcase \"Datastore\":\n\t\treturn NewDatastore(c, e)\n\tdefault:\n\t\tpanic(\"Unknown managed entity: \" + e.Type)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/virtual_app.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype VirtualApp struct {\n\t*ResourcePool\n}\n\nfunc NewVirtualApp(c *vim25.Client, ref types.ManagedObjectReference) *VirtualApp {\n\treturn &VirtualApp{\n\t\tResourcePool: NewResourcePool(c, ref),\n\t}\n}\n\nfunc (p VirtualApp) CreateChildVM(ctx context.Context, config types.VirtualMachineConfigSpec, host *HostSystem) (*Task, error) {\n\treq := types.CreateChildVM_Task{\n\t\tThis:   p.Reference(),\n\t\tConfig: config,\n\t}\n\n\tif host != nil {\n\t\tref := host.Reference()\n\t\treq.Host = &ref\n\t}\n\n\tres, err := methods.CreateChildVM_Task(ctx, p.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(p.c, res.Returnval), nil\n}\n\nfunc (p VirtualApp) UpdateConfig(ctx context.Context, spec types.VAppConfigSpec) error {\n\treq := types.UpdateVAppConfig{\n\t\tThis: p.Reference(),\n\t\tSpec: spec,\n\t}\n\n\t_, err := methods.UpdateVAppConfig(ctx, p.c, &req)\n\treturn err\n}\n\nfunc (p VirtualApp) PowerOn(ctx context.Context) (*Task, error) {\n\treq := types.PowerOnVApp_Task{\n\t\tThis: p.Reference(),\n\t}\n\n\tres, err := methods.PowerOnVApp_Task(ctx, p.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(p.c, res.Returnval), nil\n}\n\nfunc (p VirtualApp) PowerOff(ctx context.Context, force bool) (*Task, error) {\n\treq := types.PowerOffVApp_Task{\n\t\tThis:  p.Reference(),\n\t\tForce: force,\n\t}\n\n\tres, err := methods.PowerOffVApp_Task(ctx, p.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(p.c, res.Returnval), nil\n\n}\n\nfunc (p VirtualApp) Suspend(ctx context.Context) (*Task, error) {\n\treq := types.SuspendVApp_Task{\n\t\tThis: p.Reference(),\n\t}\n\n\tres, err := methods.SuspendVApp_Task(ctx, p.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(p.c, res.Returnval), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/virtual_device_list.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"path/filepath\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\n// Type values for use in BootOrder\nconst (\n\tDeviceTypeCdrom    = \"cdrom\"\n\tDeviceTypeDisk     = \"disk\"\n\tDeviceTypeEthernet = \"ethernet\"\n\tDeviceTypeFloppy   = \"floppy\"\n)\n\n// VirtualDeviceList provides helper methods for working with a list of virtual devices.\ntype VirtualDeviceList []types.BaseVirtualDevice\n\n// SCSIControllerTypes are used for adding a new SCSI controller to a VM.\nfunc SCSIControllerTypes() VirtualDeviceList {\n\t// Return a mutable list of SCSI controller types, initialized with defaults.\n\treturn VirtualDeviceList([]types.BaseVirtualDevice{\n\t\t&types.VirtualLsiLogicController{},\n\t\t&types.VirtualBusLogicController{},\n\t\t&types.ParaVirtualSCSIController{},\n\t\t&types.VirtualLsiLogicSASController{},\n\t}).Select(func(device types.BaseVirtualDevice) bool {\n\t\tc := device.(types.BaseVirtualSCSIController).GetVirtualSCSIController()\n\t\tc.SharedBus = types.VirtualSCSISharingNoSharing\n\t\tc.BusNumber = -1\n\t\treturn true\n\t})\n}\n\n// EthernetCardTypes are used for adding a new ethernet card to a VM.\nfunc EthernetCardTypes() VirtualDeviceList {\n\treturn VirtualDeviceList([]types.BaseVirtualDevice{\n\t\t&types.VirtualE1000{},\n\t\t&types.VirtualE1000e{},\n\t\t&types.VirtualVmxnet3{},\n\t}).Select(func(device types.BaseVirtualDevice) bool {\n\t\tc := device.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard()\n\t\tc.GetVirtualDevice().Key = -1\n\t\treturn true\n\t})\n}\n\n// Select returns a new list containing all elements of the list for which the given func returns true.\nfunc (l VirtualDeviceList) Select(f func(device types.BaseVirtualDevice) bool) VirtualDeviceList {\n\tvar found VirtualDeviceList\n\n\tfor _, device := range l {\n\t\tif f(device) {\n\t\t\tfound = append(found, device)\n\t\t}\n\t}\n\n\treturn found\n}\n\n// SelectByType returns a new list with devices that are equal to or extend the given type.\nfunc (l VirtualDeviceList) SelectByType(deviceType types.BaseVirtualDevice) VirtualDeviceList {\n\tdtype := reflect.TypeOf(deviceType)\n\tif dtype == nil {\n\t\treturn nil\n\t}\n\tdname := dtype.Elem().Name()\n\n\treturn l.Select(func(device types.BaseVirtualDevice) bool {\n\t\tt := reflect.TypeOf(device)\n\n\t\tif t == dtype {\n\t\t\treturn true\n\t\t}\n\n\t\t_, ok := t.Elem().FieldByName(dname)\n\n\t\treturn ok\n\t})\n}\n\n// SelectByBackingInfo returns a new list with devices matching the given backing info.\n// If the value of backing is nil, any device with a backing of the same type will be returned.\nfunc (l VirtualDeviceList) SelectByBackingInfo(backing types.BaseVirtualDeviceBackingInfo) VirtualDeviceList {\n\tt := reflect.TypeOf(backing)\n\n\treturn l.Select(func(device types.BaseVirtualDevice) bool {\n\t\tdb := device.GetVirtualDevice().Backing\n\t\tif db == nil {\n\t\t\treturn false\n\t\t}\n\n\t\tif reflect.TypeOf(db) != t {\n\t\t\treturn false\n\t\t}\n\n\t\tif reflect.ValueOf(backing).IsNil() {\n\t\t\t// selecting by backing type\n\t\t\treturn true\n\t\t}\n\n\t\tswitch a := db.(type) {\n\t\tcase *types.VirtualEthernetCardNetworkBackingInfo:\n\t\t\tb := backing.(*types.VirtualEthernetCardNetworkBackingInfo)\n\t\t\treturn a.DeviceName == b.DeviceName\n\t\tcase *types.VirtualEthernetCardDistributedVirtualPortBackingInfo:\n\t\t\tb := backing.(*types.VirtualEthernetCardDistributedVirtualPortBackingInfo)\n\t\t\treturn a.Port.SwitchUuid == b.Port.SwitchUuid &&\n\t\t\t\ta.Port.PortgroupKey == b.Port.PortgroupKey\n\t\tcase *types.VirtualDiskFlatVer2BackingInfo:\n\t\t\tb := backing.(*types.VirtualDiskFlatVer2BackingInfo)\n\t\t\tif a.Parent != nil && b.Parent != nil {\n\t\t\t\treturn a.Parent.FileName == b.Parent.FileName\n\t\t\t}\n\t\t\treturn a.FileName == b.FileName\n\t\tcase *types.VirtualSerialPortURIBackingInfo:\n\t\t\tb := backing.(*types.VirtualSerialPortURIBackingInfo)\n\t\t\treturn a.ServiceURI == b.ServiceURI\n\t\tcase types.BaseVirtualDeviceFileBackingInfo:\n\t\t\tb := backing.(types.BaseVirtualDeviceFileBackingInfo)\n\t\t\treturn a.GetVirtualDeviceFileBackingInfo().FileName == b.GetVirtualDeviceFileBackingInfo().FileName\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t})\n}\n\n// Find returns the device matching the given name.\nfunc (l VirtualDeviceList) Find(name string) types.BaseVirtualDevice {\n\tfor _, device := range l {\n\t\tif l.Name(device) == name {\n\t\t\treturn device\n\t\t}\n\t}\n\treturn nil\n}\n\n// FindByKey returns the device matching the given key.\nfunc (l VirtualDeviceList) FindByKey(key int32) types.BaseVirtualDevice {\n\tfor _, device := range l {\n\t\tif device.GetVirtualDevice().Key == key {\n\t\t\treturn device\n\t\t}\n\t}\n\treturn nil\n}\n\n// FindIDEController will find the named IDE controller if given, otherwise will pick an available controller.\n// An error is returned if the named controller is not found or not an IDE controller.  Or, if name is not\n// given and no available controller can be found.\nfunc (l VirtualDeviceList) FindIDEController(name string) (*types.VirtualIDEController, error) {\n\tif name != \"\" {\n\t\td := l.Find(name)\n\t\tif d == nil {\n\t\t\treturn nil, fmt.Errorf(\"device '%s' not found\", name)\n\t\t}\n\t\tif c, ok := d.(*types.VirtualIDEController); ok {\n\t\t\treturn c, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"%s is not an IDE controller\", name)\n\t}\n\n\tc := l.PickController((*types.VirtualIDEController)(nil))\n\tif c == nil {\n\t\treturn nil, errors.New(\"no available IDE controller\")\n\t}\n\n\treturn c.(*types.VirtualIDEController), nil\n}\n\n// CreateIDEController creates a new IDE controller.\nfunc (l VirtualDeviceList) CreateIDEController() (types.BaseVirtualDevice, error) {\n\tide := &types.VirtualIDEController{}\n\tide.Key = l.NewKey()\n\treturn ide, nil\n}\n\n// FindSCSIController will find the named SCSI controller if given, otherwise will pick an available controller.\n// An error is returned if the named controller is not found or not an SCSI controller.  Or, if name is not\n// given and no available controller can be found.\nfunc (l VirtualDeviceList) FindSCSIController(name string) (*types.VirtualSCSIController, error) {\n\tif name != \"\" {\n\t\td := l.Find(name)\n\t\tif d == nil {\n\t\t\treturn nil, fmt.Errorf(\"device '%s' not found\", name)\n\t\t}\n\t\tif c, ok := d.(types.BaseVirtualSCSIController); ok {\n\t\t\treturn c.GetVirtualSCSIController(), nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"%s is not an SCSI controller\", name)\n\t}\n\n\tc := l.PickController((*types.VirtualSCSIController)(nil))\n\tif c == nil {\n\t\treturn nil, errors.New(\"no available SCSI controller\")\n\t}\n\n\treturn c.(types.BaseVirtualSCSIController).GetVirtualSCSIController(), nil\n}\n\n// CreateSCSIController creates a new SCSI controller of type name if given, otherwise defaults to lsilogic.\nfunc (l VirtualDeviceList) CreateSCSIController(name string) (types.BaseVirtualDevice, error) {\n\tctypes := SCSIControllerTypes()\n\n\tif name == \"scsi\" || name == \"\" {\n\t\tname = ctypes.Type(ctypes[0])\n\t}\n\n\tfound := ctypes.Select(func(device types.BaseVirtualDevice) bool {\n\t\treturn l.Type(device) == name\n\t})\n\n\tif len(found) == 0 {\n\t\treturn nil, fmt.Errorf(\"unknown SCSI controller type '%s'\", name)\n\t}\n\n\tc, ok := found[0].(types.BaseVirtualSCSIController)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"invalid SCSI controller type '%s'\", name)\n\t}\n\n\tscsi := c.GetVirtualSCSIController()\n\tscsi.BusNumber = l.newSCSIBusNumber()\n\tscsi.Key = l.NewKey()\n\tscsi.ScsiCtlrUnitNumber = 7\n\treturn c.(types.BaseVirtualDevice), nil\n}\n\nvar scsiBusNumbers = []int{0, 1, 2, 3}\n\n// newSCSIBusNumber returns the bus number to use for adding a new SCSI bus device.\n// -1 is returned if there are no bus numbers available.\nfunc (l VirtualDeviceList) newSCSIBusNumber() int32 {\n\tvar used []int\n\n\tfor _, d := range l.SelectByType((*types.VirtualSCSIController)(nil)) {\n\t\tnum := d.(types.BaseVirtualSCSIController).GetVirtualSCSIController().BusNumber\n\t\tif num >= 0 {\n\t\t\tused = append(used, int(num))\n\t\t} // else caller is creating a new vm using SCSIControllerTypes\n\t}\n\n\tsort.Ints(used)\n\n\tfor i, n := range scsiBusNumbers {\n\t\tif i == len(used) || n != used[i] {\n\t\t\treturn int32(n)\n\t\t}\n\t}\n\n\treturn -1\n}\n\n// FindNVMEController will find the named NVME controller if given, otherwise will pick an available controller.\n// An error is returned if the named controller is not found or not an NVME controller.  Or, if name is not\n// given and no available controller can be found.\nfunc (l VirtualDeviceList) FindNVMEController(name string) (*types.VirtualNVMEController, error) {\n\tif name != \"\" {\n\t\td := l.Find(name)\n\t\tif d == nil {\n\t\t\treturn nil, fmt.Errorf(\"device '%s' not found\", name)\n\t\t}\n\t\tif c, ok := d.(*types.VirtualNVMEController); ok {\n\t\t\treturn c, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"%s is not an NVME controller\", name)\n\t}\n\n\tc := l.PickController((*types.VirtualNVMEController)(nil))\n\tif c == nil {\n\t\treturn nil, errors.New(\"no available NVME controller\")\n\t}\n\n\treturn c.(*types.VirtualNVMEController), nil\n}\n\n// CreateNVMEController creates a new NVMWE controller.\nfunc (l VirtualDeviceList) CreateNVMEController() (types.BaseVirtualDevice, error) {\n\tnvme := &types.VirtualNVMEController{}\n\tnvme.BusNumber = l.newNVMEBusNumber()\n\tnvme.Key = l.NewKey()\n\n\treturn nvme, nil\n}\n\nvar nvmeBusNumbers = []int{0, 1, 2, 3}\n\n// newNVMEBusNumber returns the bus number to use for adding a new NVME bus device.\n// -1 is returned if there are no bus numbers available.\nfunc (l VirtualDeviceList) newNVMEBusNumber() int32 {\n\tvar used []int\n\n\tfor _, d := range l.SelectByType((*types.VirtualNVMEController)(nil)) {\n\t\tnum := d.(types.BaseVirtualController).GetVirtualController().BusNumber\n\t\tif num >= 0 {\n\t\t\tused = append(used, int(num))\n\t\t} // else caller is creating a new vm using NVMEControllerTypes\n\t}\n\n\tsort.Ints(used)\n\n\tfor i, n := range nvmeBusNumbers {\n\t\tif i == len(used) || n != used[i] {\n\t\t\treturn int32(n)\n\t\t}\n\t}\n\n\treturn -1\n}\n\n// FindDiskController will find an existing ide or scsi disk controller.\nfunc (l VirtualDeviceList) FindDiskController(name string) (types.BaseVirtualController, error) {\n\tswitch {\n\tcase name == \"ide\":\n\t\treturn l.FindIDEController(\"\")\n\tcase name == \"scsi\" || name == \"\":\n\t\treturn l.FindSCSIController(\"\")\n\tcase name == \"nvme\":\n\t\treturn l.FindNVMEController(\"\")\n\tdefault:\n\t\tif c, ok := l.Find(name).(types.BaseVirtualController); ok {\n\t\t\treturn c, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"%s is not a valid controller\", name)\n\t}\n}\n\n// PickController returns a controller of the given type(s).\n// If no controllers are found or have no available slots, then nil is returned.\nfunc (l VirtualDeviceList) PickController(kind types.BaseVirtualController) types.BaseVirtualController {\n\tl = l.SelectByType(kind.(types.BaseVirtualDevice)).Select(func(device types.BaseVirtualDevice) bool {\n\t\tnum := len(device.(types.BaseVirtualController).GetVirtualController().Device)\n\n\t\tswitch device.(type) {\n\t\tcase types.BaseVirtualSCSIController:\n\t\t\treturn num < 15\n\t\tcase *types.VirtualIDEController:\n\t\t\treturn num < 2\n\t\tcase *types.VirtualNVMEController:\n\t\t\treturn num < 8\n\t\tdefault:\n\t\t\treturn true\n\t\t}\n\t})\n\n\tif len(l) == 0 {\n\t\treturn nil\n\t}\n\n\treturn l[0].(types.BaseVirtualController)\n}\n\n// newUnitNumber returns the unit number to use for attaching a new device to the given controller.\nfunc (l VirtualDeviceList) newUnitNumber(c types.BaseVirtualController) int32 {\n\tunits := make([]bool, 30)\n\n\tswitch sc := c.(type) {\n\tcase types.BaseVirtualSCSIController:\n\t\t//  The SCSI controller sits on its own bus\n\t\tunits[sc.GetVirtualSCSIController().ScsiCtlrUnitNumber] = true\n\t}\n\n\tkey := c.GetVirtualController().Key\n\n\tfor _, device := range l {\n\t\td := device.GetVirtualDevice()\n\n\t\tif d.ControllerKey == key && d.UnitNumber != nil {\n\t\t\tunits[int(*d.UnitNumber)] = true\n\t\t}\n\t}\n\n\tfor unit, used := range units {\n\t\tif !used {\n\t\t\treturn int32(unit)\n\t\t}\n\t}\n\n\treturn -1\n}\n\n// NewKey returns the key to use for adding a new device to the device list.\n// The device list we're working with here may not be complete (e.g. when\n// we're only adding new devices), so any positive keys could conflict with device keys\n// that are already in use. To avoid this type of conflict, we can use negative keys\n// here, which will be resolved to positive keys by vSphere as the reconfiguration is done.\nfunc (l VirtualDeviceList) NewKey() int32 {\n\tvar key int32 = -200\n\n\tfor _, device := range l {\n\t\td := device.GetVirtualDevice()\n\t\tif d.Key < key {\n\t\t\tkey = d.Key\n\t\t}\n\t}\n\n\treturn key - 1\n}\n\n// AssignController assigns a device to a controller.\nfunc (l VirtualDeviceList) AssignController(device types.BaseVirtualDevice, c types.BaseVirtualController) {\n\td := device.GetVirtualDevice()\n\td.ControllerKey = c.GetVirtualController().Key\n\td.UnitNumber = new(int32)\n\t*d.UnitNumber = l.newUnitNumber(c)\n\tif d.Key == 0 {\n\t\td.Key = -1\n\t}\n}\n\n// CreateDisk creates a new VirtualDisk device which can be added to a VM.\nfunc (l VirtualDeviceList) CreateDisk(c types.BaseVirtualController, ds types.ManagedObjectReference, name string) *types.VirtualDisk {\n\t// If name is not specified, one will be chosen for you.\n\t// But if when given, make sure it ends in .vmdk, otherwise it will be treated as a directory.\n\tif len(name) > 0 && filepath.Ext(name) != \".vmdk\" {\n\t\tname += \".vmdk\"\n\t}\n\n\tdevice := &types.VirtualDisk{\n\t\tVirtualDevice: types.VirtualDevice{\n\t\t\tBacking: &types.VirtualDiskFlatVer2BackingInfo{\n\t\t\t\tDiskMode:        string(types.VirtualDiskModePersistent),\n\t\t\t\tThinProvisioned: types.NewBool(true),\n\t\t\t\tVirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{\n\t\t\t\t\tFileName:  name,\n\t\t\t\t\tDatastore: &ds,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tl.AssignController(device, c)\n\treturn device\n}\n\n// ChildDisk creates a new VirtualDisk device, linked to the given parent disk, which can be added to a VM.\nfunc (l VirtualDeviceList) ChildDisk(parent *types.VirtualDisk) *types.VirtualDisk {\n\tdisk := *parent\n\tbacking := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)\n\tp := new(DatastorePath)\n\tp.FromString(backing.FileName)\n\tp.Path = \"\"\n\n\t// Use specified disk as parent backing to a new disk.\n\tdisk.Backing = &types.VirtualDiskFlatVer2BackingInfo{\n\t\tVirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{\n\t\t\tFileName:  p.String(),\n\t\t\tDatastore: backing.Datastore,\n\t\t},\n\t\tParent:          backing,\n\t\tDiskMode:        backing.DiskMode,\n\t\tThinProvisioned: backing.ThinProvisioned,\n\t}\n\n\treturn &disk\n}\n\nfunc (l VirtualDeviceList) connectivity(device types.BaseVirtualDevice, v bool) error {\n\tc := device.GetVirtualDevice().Connectable\n\tif c == nil {\n\t\treturn fmt.Errorf(\"%s is not connectable\", l.Name(device))\n\t}\n\n\tc.Connected = v\n\tc.StartConnected = v\n\n\treturn nil\n}\n\n// Connect changes the device to connected, returns an error if the device is not connectable.\nfunc (l VirtualDeviceList) Connect(device types.BaseVirtualDevice) error {\n\treturn l.connectivity(device, true)\n}\n\n// Disconnect changes the device to disconnected, returns an error if the device is not connectable.\nfunc (l VirtualDeviceList) Disconnect(device types.BaseVirtualDevice) error {\n\treturn l.connectivity(device, false)\n}\n\n// FindCdrom finds a cdrom device with the given name, defaulting to the first cdrom device if any.\nfunc (l VirtualDeviceList) FindCdrom(name string) (*types.VirtualCdrom, error) {\n\tif name != \"\" {\n\t\td := l.Find(name)\n\t\tif d == nil {\n\t\t\treturn nil, fmt.Errorf(\"device '%s' not found\", name)\n\t\t}\n\t\tif c, ok := d.(*types.VirtualCdrom); ok {\n\t\t\treturn c, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"%s is not a cdrom device\", name)\n\t}\n\n\tc := l.SelectByType((*types.VirtualCdrom)(nil))\n\tif len(c) == 0 {\n\t\treturn nil, errors.New(\"no cdrom device found\")\n\t}\n\n\treturn c[0].(*types.VirtualCdrom), nil\n}\n\n// CreateCdrom creates a new VirtualCdrom device which can be added to a VM.\nfunc (l VirtualDeviceList) CreateCdrom(c *types.VirtualIDEController) (*types.VirtualCdrom, error) {\n\tdevice := &types.VirtualCdrom{}\n\n\tl.AssignController(device, c)\n\n\tl.setDefaultCdromBacking(device)\n\n\tdevice.Connectable = &types.VirtualDeviceConnectInfo{\n\t\tAllowGuestControl: true,\n\t\tConnected:         true,\n\t\tStartConnected:    true,\n\t}\n\n\treturn device, nil\n}\n\n// InsertIso changes the cdrom device backing to use the given iso file.\nfunc (l VirtualDeviceList) InsertIso(device *types.VirtualCdrom, iso string) *types.VirtualCdrom {\n\tdevice.Backing = &types.VirtualCdromIsoBackingInfo{\n\t\tVirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{\n\t\t\tFileName: iso,\n\t\t},\n\t}\n\n\treturn device\n}\n\n// EjectIso removes the iso file based backing and replaces with the default cdrom backing.\nfunc (l VirtualDeviceList) EjectIso(device *types.VirtualCdrom) *types.VirtualCdrom {\n\tl.setDefaultCdromBacking(device)\n\treturn device\n}\n\nfunc (l VirtualDeviceList) setDefaultCdromBacking(device *types.VirtualCdrom) {\n\tdevice.Backing = &types.VirtualCdromAtapiBackingInfo{\n\t\tVirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{\n\t\t\tDeviceName:    fmt.Sprintf(\"%s-%d-%d\", DeviceTypeCdrom, device.ControllerKey, device.UnitNumber),\n\t\t\tUseAutoDetect: types.NewBool(false),\n\t\t},\n\t}\n}\n\n// FindFloppy finds a floppy device with the given name, defaulting to the first floppy device if any.\nfunc (l VirtualDeviceList) FindFloppy(name string) (*types.VirtualFloppy, error) {\n\tif name != \"\" {\n\t\td := l.Find(name)\n\t\tif d == nil {\n\t\t\treturn nil, fmt.Errorf(\"device '%s' not found\", name)\n\t\t}\n\t\tif c, ok := d.(*types.VirtualFloppy); ok {\n\t\t\treturn c, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"%s is not a floppy device\", name)\n\t}\n\n\tc := l.SelectByType((*types.VirtualFloppy)(nil))\n\tif len(c) == 0 {\n\t\treturn nil, errors.New(\"no floppy device found\")\n\t}\n\n\treturn c[0].(*types.VirtualFloppy), nil\n}\n\n// CreateFloppy creates a new VirtualFloppy device which can be added to a VM.\nfunc (l VirtualDeviceList) CreateFloppy() (*types.VirtualFloppy, error) {\n\tdevice := &types.VirtualFloppy{}\n\n\tc := l.PickController((*types.VirtualSIOController)(nil))\n\tif c == nil {\n\t\treturn nil, errors.New(\"no available SIO controller\")\n\t}\n\n\tl.AssignController(device, c)\n\n\tl.setDefaultFloppyBacking(device)\n\n\tdevice.Connectable = &types.VirtualDeviceConnectInfo{\n\t\tAllowGuestControl: true,\n\t\tConnected:         true,\n\t\tStartConnected:    true,\n\t}\n\n\treturn device, nil\n}\n\n// InsertImg changes the floppy device backing to use the given img file.\nfunc (l VirtualDeviceList) InsertImg(device *types.VirtualFloppy, img string) *types.VirtualFloppy {\n\tdevice.Backing = &types.VirtualFloppyImageBackingInfo{\n\t\tVirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{\n\t\t\tFileName: img,\n\t\t},\n\t}\n\n\treturn device\n}\n\n// EjectImg removes the img file based backing and replaces with the default floppy backing.\nfunc (l VirtualDeviceList) EjectImg(device *types.VirtualFloppy) *types.VirtualFloppy {\n\tl.setDefaultFloppyBacking(device)\n\treturn device\n}\n\nfunc (l VirtualDeviceList) setDefaultFloppyBacking(device *types.VirtualFloppy) {\n\tdevice.Backing = &types.VirtualFloppyDeviceBackingInfo{\n\t\tVirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{\n\t\t\tDeviceName:    fmt.Sprintf(\"%s-%d\", DeviceTypeFloppy, device.UnitNumber),\n\t\t\tUseAutoDetect: types.NewBool(false),\n\t\t},\n\t}\n}\n\n// FindSerialPort finds a serial port device with the given name, defaulting to the first serial port device if any.\nfunc (l VirtualDeviceList) FindSerialPort(name string) (*types.VirtualSerialPort, error) {\n\tif name != \"\" {\n\t\td := l.Find(name)\n\t\tif d == nil {\n\t\t\treturn nil, fmt.Errorf(\"device '%s' not found\", name)\n\t\t}\n\t\tif c, ok := d.(*types.VirtualSerialPort); ok {\n\t\t\treturn c, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"%s is not a serial port device\", name)\n\t}\n\n\tc := l.SelectByType((*types.VirtualSerialPort)(nil))\n\tif len(c) == 0 {\n\t\treturn nil, errors.New(\"no serial port device found\")\n\t}\n\n\treturn c[0].(*types.VirtualSerialPort), nil\n}\n\n// CreateSerialPort creates a new VirtualSerialPort device which can be added to a VM.\nfunc (l VirtualDeviceList) CreateSerialPort() (*types.VirtualSerialPort, error) {\n\tdevice := &types.VirtualSerialPort{\n\t\tYieldOnPoll: true,\n\t}\n\n\tc := l.PickController((*types.VirtualSIOController)(nil))\n\tif c == nil {\n\t\treturn nil, errors.New(\"no available SIO controller\")\n\t}\n\n\tl.AssignController(device, c)\n\n\tl.setDefaultSerialPortBacking(device)\n\n\treturn device, nil\n}\n\n// ConnectSerialPort connects a serial port to a server or client uri.\nfunc (l VirtualDeviceList) ConnectSerialPort(device *types.VirtualSerialPort, uri string, client bool, proxyuri string) *types.VirtualSerialPort {\n\tif strings.HasPrefix(uri, \"[\") {\n\t\tdevice.Backing = &types.VirtualSerialPortFileBackingInfo{\n\t\t\tVirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{\n\t\t\t\tFileName: uri,\n\t\t\t},\n\t\t}\n\n\t\treturn device\n\t}\n\n\tdirection := types.VirtualDeviceURIBackingOptionDirectionServer\n\tif client {\n\t\tdirection = types.VirtualDeviceURIBackingOptionDirectionClient\n\t}\n\n\tdevice.Backing = &types.VirtualSerialPortURIBackingInfo{\n\t\tVirtualDeviceURIBackingInfo: types.VirtualDeviceURIBackingInfo{\n\t\t\tDirection:  string(direction),\n\t\t\tServiceURI: uri,\n\t\t\tProxyURI:   proxyuri,\n\t\t},\n\t}\n\n\treturn device\n}\n\n// DisconnectSerialPort disconnects the serial port backing.\nfunc (l VirtualDeviceList) DisconnectSerialPort(device *types.VirtualSerialPort) *types.VirtualSerialPort {\n\tl.setDefaultSerialPortBacking(device)\n\treturn device\n}\n\nfunc (l VirtualDeviceList) setDefaultSerialPortBacking(device *types.VirtualSerialPort) {\n\tdevice.Backing = &types.VirtualSerialPortURIBackingInfo{\n\t\tVirtualDeviceURIBackingInfo: types.VirtualDeviceURIBackingInfo{\n\t\t\tDirection:  \"client\",\n\t\t\tServiceURI: \"localhost:0\",\n\t\t},\n\t}\n}\n\n// CreateEthernetCard creates a new VirtualEthernetCard of the given name name and initialized with the given backing.\nfunc (l VirtualDeviceList) CreateEthernetCard(name string, backing types.BaseVirtualDeviceBackingInfo) (types.BaseVirtualDevice, error) {\n\tctypes := EthernetCardTypes()\n\n\tif name == \"\" {\n\t\tname = ctypes.deviceName(ctypes[0])\n\t}\n\n\tfound := ctypes.Select(func(device types.BaseVirtualDevice) bool {\n\t\treturn l.deviceName(device) == name\n\t})\n\n\tif len(found) == 0 {\n\t\treturn nil, fmt.Errorf(\"unknown ethernet card type '%s'\", name)\n\t}\n\n\tc, ok := found[0].(types.BaseVirtualEthernetCard)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"invalid ethernet card type '%s'\", name)\n\t}\n\n\tc.GetVirtualEthernetCard().Backing = backing\n\n\treturn c.(types.BaseVirtualDevice), nil\n}\n\n// PrimaryMacAddress returns the MacAddress field of the primary VirtualEthernetCard\nfunc (l VirtualDeviceList) PrimaryMacAddress() string {\n\teth0 := l.Find(\"ethernet-0\")\n\n\tif eth0 == nil {\n\t\treturn \"\"\n\t}\n\n\treturn eth0.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard().MacAddress\n}\n\n// convert a BaseVirtualDevice to a BaseVirtualMachineBootOptionsBootableDevice\nvar bootableDevices = map[string]func(device types.BaseVirtualDevice) types.BaseVirtualMachineBootOptionsBootableDevice{\n\tDeviceTypeCdrom: func(types.BaseVirtualDevice) types.BaseVirtualMachineBootOptionsBootableDevice {\n\t\treturn &types.VirtualMachineBootOptionsBootableCdromDevice{}\n\t},\n\tDeviceTypeDisk: func(d types.BaseVirtualDevice) types.BaseVirtualMachineBootOptionsBootableDevice {\n\t\treturn &types.VirtualMachineBootOptionsBootableDiskDevice{\n\t\t\tDeviceKey: d.GetVirtualDevice().Key,\n\t\t}\n\t},\n\tDeviceTypeEthernet: func(d types.BaseVirtualDevice) types.BaseVirtualMachineBootOptionsBootableDevice {\n\t\treturn &types.VirtualMachineBootOptionsBootableEthernetDevice{\n\t\t\tDeviceKey: d.GetVirtualDevice().Key,\n\t\t}\n\t},\n\tDeviceTypeFloppy: func(types.BaseVirtualDevice) types.BaseVirtualMachineBootOptionsBootableDevice {\n\t\treturn &types.VirtualMachineBootOptionsBootableFloppyDevice{}\n\t},\n}\n\n// BootOrder returns a list of devices which can be used to set boot order via VirtualMachine.SetBootOptions.\n// The order can any of \"ethernet\", \"cdrom\", \"floppy\" or \"disk\" or by specific device name.\nfunc (l VirtualDeviceList) BootOrder(order []string) []types.BaseVirtualMachineBootOptionsBootableDevice {\n\tvar devices []types.BaseVirtualMachineBootOptionsBootableDevice\n\n\tfor _, name := range order {\n\t\tif kind, ok := bootableDevices[name]; ok {\n\t\t\tfor _, device := range l {\n\t\t\t\tif l.Type(device) == name {\n\t\t\t\t\tdevices = append(devices, kind(device))\n\t\t\t\t}\n\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif d := l.Find(name); d != nil {\n\t\t\tif kind, ok := bootableDevices[l.Type(d)]; ok {\n\t\t\t\tdevices = append(devices, kind(d))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn devices\n}\n\n// SelectBootOrder returns an ordered list of devices matching the given bootable device order\nfunc (l VirtualDeviceList) SelectBootOrder(order []types.BaseVirtualMachineBootOptionsBootableDevice) VirtualDeviceList {\n\tvar devices VirtualDeviceList\n\n\tfor _, bd := range order {\n\t\tfor _, device := range l {\n\t\t\tif kind, ok := bootableDevices[l.Type(device)]; ok {\n\t\t\t\tif reflect.DeepEqual(kind(device), bd) {\n\t\t\t\t\tdevices = append(devices, device)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn devices\n}\n\n// TypeName returns the vmodl type name of the device\nfunc (l VirtualDeviceList) TypeName(device types.BaseVirtualDevice) string {\n\tdtype := reflect.TypeOf(device)\n\tif dtype == nil {\n\t\treturn \"\"\n\t}\n\treturn dtype.Elem().Name()\n}\n\nvar deviceNameRegexp = regexp.MustCompile(`(?:Virtual)?(?:Machine)?(\\w+?)(?:Card|Device|Controller)?$`)\n\nfunc (l VirtualDeviceList) deviceName(device types.BaseVirtualDevice) string {\n\tname := \"device\"\n\ttypeName := l.TypeName(device)\n\n\tm := deviceNameRegexp.FindStringSubmatch(typeName)\n\tif len(m) == 2 {\n\t\tname = strings.ToLower(m[1])\n\t}\n\n\treturn name\n}\n\n// Type returns a human-readable name for the given device\nfunc (l VirtualDeviceList) Type(device types.BaseVirtualDevice) string {\n\tswitch device.(type) {\n\tcase types.BaseVirtualEthernetCard:\n\t\treturn DeviceTypeEthernet\n\tcase *types.ParaVirtualSCSIController:\n\t\treturn \"pvscsi\"\n\tcase *types.VirtualLsiLogicSASController:\n\t\treturn \"lsilogic-sas\"\n\tcase *types.VirtualNVMEController:\n\t\treturn \"nvme\"\n\tdefault:\n\t\treturn l.deviceName(device)\n\t}\n}\n\n// Name returns a stable, human-readable name for the given device\nfunc (l VirtualDeviceList) Name(device types.BaseVirtualDevice) string {\n\tvar key string\n\tvar UnitNumber int32\n\td := device.GetVirtualDevice()\n\tif d.UnitNumber != nil {\n\t\tUnitNumber = *d.UnitNumber\n\t}\n\n\tdtype := l.Type(device)\n\tswitch dtype {\n\tcase DeviceTypeEthernet:\n\t\tkey = fmt.Sprintf(\"%d\", UnitNumber-7)\n\tcase DeviceTypeDisk:\n\t\tkey = fmt.Sprintf(\"%d-%d\", d.ControllerKey, UnitNumber)\n\tdefault:\n\t\tkey = fmt.Sprintf(\"%d\", d.Key)\n\t}\n\n\treturn fmt.Sprintf(\"%s-%s\", dtype, key)\n}\n\n// ConfigSpec creates a virtual machine configuration spec for\n// the specified operation, for the list of devices in the device list.\nfunc (l VirtualDeviceList) ConfigSpec(op types.VirtualDeviceConfigSpecOperation) ([]types.BaseVirtualDeviceConfigSpec, error) {\n\tvar fop types.VirtualDeviceConfigSpecFileOperation\n\tswitch op {\n\tcase types.VirtualDeviceConfigSpecOperationAdd:\n\t\tfop = types.VirtualDeviceConfigSpecFileOperationCreate\n\tcase types.VirtualDeviceConfigSpecOperationEdit:\n\t\tfop = types.VirtualDeviceConfigSpecFileOperationReplace\n\tcase types.VirtualDeviceConfigSpecOperationRemove:\n\t\tfop = types.VirtualDeviceConfigSpecFileOperationDestroy\n\tdefault:\n\t\tpanic(\"unknown op\")\n\t}\n\n\tvar res []types.BaseVirtualDeviceConfigSpec\n\tfor _, device := range l {\n\t\tconfig := &types.VirtualDeviceConfigSpec{\n\t\t\tDevice:    device,\n\t\t\tOperation: op,\n\t\t}\n\n\t\tif disk, ok := device.(*types.VirtualDisk); ok {\n\t\t\tconfig.FileOperation = fop\n\n\t\t\t// Special case to attach an existing disk\n\t\t\tif op == types.VirtualDeviceConfigSpecOperationAdd && disk.CapacityInKB == 0 {\n\t\t\t\tchildDisk := false\n\t\t\t\tif b, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {\n\t\t\t\t\tchildDisk = b.Parent != nil\n\t\t\t\t}\n\n\t\t\t\tif !childDisk {\n\t\t\t\t\t// Existing disk, clear file operation\n\t\t\t\t\tconfig.FileOperation = \"\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tres = append(res, config)\n\t}\n\n\treturn res, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/virtual_device_list_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"math/rand\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\nfunc intPtrValue(val int32) *int32 {\n\treturn &val\n}\n\nvar devices = VirtualDeviceList([]types.BaseVirtualDevice{\n\t&types.VirtualIDEController{\n\t\tVirtualController: types.VirtualController{\n\t\t\tVirtualDevice: types.VirtualDevice{\n\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\tKey:         200,\n\t\t\t\tDeviceInfo: &types.Description{\n\t\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\t\tLabel:       \"IDE 0\",\n\t\t\t\t\tSummary:     \"IDE 0\",\n\t\t\t\t},\n\t\t\t\tBacking:       nil,\n\t\t\t\tConnectable:   (*types.VirtualDeviceConnectInfo)(nil),\n\t\t\t\tSlotInfo:      nil,\n\t\t\t\tControllerKey: 0,\n\t\t\t\tUnitNumber:    intPtrValue(0),\n\t\t\t},\n\t\t\tBusNumber: 0,\n\t\t\tDevice:    []int32{3001, 3000},\n\t\t},\n\t},\n\t&types.VirtualIDEController{\n\t\tVirtualController: types.VirtualController{\n\t\t\tVirtualDevice: types.VirtualDevice{\n\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\tKey:         201,\n\t\t\t\tDeviceInfo: &types.Description{\n\t\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\t\tLabel:       \"IDE 1\",\n\t\t\t\t\tSummary:     \"IDE 1\",\n\t\t\t\t},\n\t\t\t\tBacking:       nil,\n\t\t\t\tConnectable:   (*types.VirtualDeviceConnectInfo)(nil),\n\t\t\t\tSlotInfo:      nil,\n\t\t\t\tControllerKey: 0,\n\t\t\t\tUnitNumber:    intPtrValue(0),\n\t\t\t},\n\t\t\tBusNumber: 1,\n\t\t\tDevice:    []int32{3002},\n\t\t},\n\t},\n\t&types.VirtualPS2Controller{\n\t\tVirtualController: types.VirtualController{\n\t\t\tVirtualDevice: types.VirtualDevice{\n\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\tKey:         300,\n\t\t\t\tDeviceInfo: &types.Description{\n\t\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\t\tLabel:       \"PS2 controller 0\",\n\t\t\t\t\tSummary:     \"PS2 controller 0\",\n\t\t\t\t},\n\t\t\t\tBacking:       nil,\n\t\t\t\tConnectable:   (*types.VirtualDeviceConnectInfo)(nil),\n\t\t\t\tSlotInfo:      nil,\n\t\t\t\tControllerKey: 0,\n\t\t\t\tUnitNumber:    intPtrValue(0),\n\t\t\t},\n\t\t\tBusNumber: 0,\n\t\t\tDevice:    []int32{600, 700},\n\t\t},\n\t},\n\t&types.VirtualPCIController{\n\t\tVirtualController: types.VirtualController{\n\t\t\tVirtualDevice: types.VirtualDevice{\n\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\tKey:         100,\n\t\t\t\tDeviceInfo: &types.Description{\n\t\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\t\tLabel:       \"PCI controller 0\",\n\t\t\t\t\tSummary:     \"PCI controller 0\",\n\t\t\t\t},\n\t\t\t\tBacking:       nil,\n\t\t\t\tConnectable:   (*types.VirtualDeviceConnectInfo)(nil),\n\t\t\t\tSlotInfo:      nil,\n\t\t\t\tControllerKey: 0,\n\t\t\t\tUnitNumber:    intPtrValue(0),\n\t\t\t},\n\t\t\tBusNumber: 0,\n\t\t\tDevice:    []int32{500, 12000, 1000, 4000},\n\t\t},\n\t},\n\t&types.VirtualSIOController{\n\t\tVirtualController: types.VirtualController{\n\t\t\tVirtualDevice: types.VirtualDevice{\n\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\tKey:         400,\n\t\t\t\tDeviceInfo: &types.Description{\n\t\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\t\tLabel:       \"SIO controller 0\",\n\t\t\t\t\tSummary:     \"SIO controller 0\",\n\t\t\t\t},\n\t\t\t\tBacking:       nil,\n\t\t\t\tConnectable:   (*types.VirtualDeviceConnectInfo)(nil),\n\t\t\t\tSlotInfo:      nil,\n\t\t\t\tControllerKey: 0,\n\t\t\t\tUnitNumber:    intPtrValue(0),\n\t\t\t},\n\t\t\tBusNumber: 0,\n\t\t\tDevice:    []int32{9000},\n\t\t},\n\t},\n\t&types.VirtualKeyboard{\n\t\tVirtualDevice: types.VirtualDevice{\n\t\t\tDynamicData: types.DynamicData{},\n\t\t\tKey:         600,\n\t\t\tDeviceInfo: &types.Description{\n\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\tLabel:       \"Keyboard \",\n\t\t\t\tSummary:     \"Keyboard\",\n\t\t\t},\n\t\t\tBacking:       nil,\n\t\t\tConnectable:   (*types.VirtualDeviceConnectInfo)(nil),\n\t\t\tSlotInfo:      nil,\n\t\t\tControllerKey: 300,\n\t\t\tUnitNumber:    intPtrValue(0),\n\t\t},\n\t},\n\t&types.VirtualPointingDevice{\n\t\tVirtualDevice: types.VirtualDevice{\n\t\t\tDynamicData: types.DynamicData{},\n\t\t\tKey:         700,\n\t\t\tDeviceInfo: &types.Description{\n\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\tLabel:       \"Pointing device\",\n\t\t\t\tSummary:     \"Pointing device; Device\",\n\t\t\t},\n\t\t\tBacking: &types.VirtualPointingDeviceDeviceBackingInfo{\n\t\t\t\tVirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{},\n\t\t\t\tHostPointingDevice:             \"autodetect\",\n\t\t\t},\n\t\t\tConnectable:   (*types.VirtualDeviceConnectInfo)(nil),\n\t\t\tSlotInfo:      nil,\n\t\t\tControllerKey: 300,\n\t\t\tUnitNumber:    intPtrValue(1),\n\t\t},\n\t},\n\t&types.VirtualMachineVideoCard{\n\t\tVirtualDevice: types.VirtualDevice{\n\t\t\tDynamicData: types.DynamicData{},\n\t\t\tKey:         500,\n\t\t\tDeviceInfo: &types.Description{\n\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\tLabel:       \"Video card \",\n\t\t\t\tSummary:     \"Video card\",\n\t\t\t},\n\t\t\tBacking:       nil,\n\t\t\tConnectable:   (*types.VirtualDeviceConnectInfo)(nil),\n\t\t\tSlotInfo:      nil,\n\t\t\tControllerKey: 100,\n\t\t\tUnitNumber:    intPtrValue(0),\n\t\t},\n\t\tVideoRamSizeInKB: 4096,\n\t\tNumDisplays:      1,\n\t\tUseAutoDetect:    types.NewBool(false),\n\t\tEnable3DSupport:  types.NewBool(false),\n\t\tUse3dRenderer:    \"automatic\",\n\t},\n\t&types.VirtualMachineVMCIDevice{\n\t\tVirtualDevice: types.VirtualDevice{\n\t\t\tDynamicData: types.DynamicData{},\n\t\t\tKey:         12000,\n\t\t\tDeviceInfo: &types.Description{\n\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\tLabel:       \"VMCI device\",\n\t\t\t\tSummary:     \"Device on the virtual machine PCI bus that provides support for the virtual machine communication interface\",\n\t\t\t},\n\t\t\tBacking:     nil,\n\t\t\tConnectable: (*types.VirtualDeviceConnectInfo)(nil),\n\t\t\tSlotInfo: &types.VirtualDevicePciBusSlotInfo{\n\t\t\t\tVirtualDeviceBusSlotInfo: types.VirtualDeviceBusSlotInfo{},\n\t\t\t\tPciSlotNumber:            33,\n\t\t\t},\n\t\t\tControllerKey: 100,\n\t\t\tUnitNumber:    intPtrValue(17),\n\t\t},\n\t\tId: 1754519335,\n\t\tAllowUnrestrictedCommunication: types.NewBool(false),\n\t},\n\t&types.VirtualLsiLogicController{\n\t\tVirtualSCSIController: types.VirtualSCSIController{\n\t\t\tVirtualController: types.VirtualController{\n\t\t\t\tVirtualDevice: types.VirtualDevice{\n\t\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\t\tKey:         1000,\n\t\t\t\t\tDeviceInfo: &types.Description{\n\t\t\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\t\t\tLabel:       \"SCSI controller 0\",\n\t\t\t\t\t\tSummary:     \"LSI Logic\",\n\t\t\t\t\t},\n\t\t\t\t\tBacking:       nil,\n\t\t\t\t\tConnectable:   (*types.VirtualDeviceConnectInfo)(nil),\n\t\t\t\t\tSlotInfo:      nil,\n\t\t\t\t\tControllerKey: 100,\n\t\t\t\t\tUnitNumber:    intPtrValue(3),\n\t\t\t\t},\n\t\t\t\tBusNumber: 0,\n\t\t\t\tDevice:    nil,\n\t\t\t},\n\t\t\tHotAddRemove:       types.NewBool(true),\n\t\t\tSharedBus:          \"noSharing\",\n\t\t\tScsiCtlrUnitNumber: 7,\n\t\t},\n\t},\n\t&types.VirtualCdrom{\n\t\tVirtualDevice: types.VirtualDevice{\n\t\t\tDynamicData: types.DynamicData{},\n\t\t\tKey:         3001,\n\t\t\tDeviceInfo: &types.Description{\n\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\tLabel:       \"CD/DVD drive 1\",\n\t\t\t\tSummary:     \"ISO [datastore1] ttylinux-pc_i486-16.1.iso\",\n\t\t\t},\n\t\t\tBacking: &types.VirtualCdromIsoBackingInfo{\n\t\t\t\tVirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{\n\t\t\t\t\tVirtualDeviceBackingInfo: types.VirtualDeviceBackingInfo{},\n\t\t\t\t\tFileName:                 \"[datastore1] foo.iso\",\n\t\t\t\t\tDatastore:                &types.ManagedObjectReference{Type: \"Datastore\", Value: \"53fe43cc-75dc5110-3643-000c2918dc41\"},\n\t\t\t\t\tBackingObjectId:          \"\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tConnectable: &types.VirtualDeviceConnectInfo{\n\t\t\t\tDynamicData:       types.DynamicData{},\n\t\t\t\tStartConnected:    true,\n\t\t\t\tAllowGuestControl: true,\n\t\t\t\tConnected:         false,\n\t\t\t\tStatus:            \"untried\",\n\t\t\t},\n\t\t\tSlotInfo:      nil,\n\t\t\tControllerKey: 200,\n\t\t\tUnitNumber:    intPtrValue(1),\n\t\t},\n\t},\n\t&types.VirtualDisk{\n\t\tVirtualDevice: types.VirtualDevice{\n\t\t\tDynamicData: types.DynamicData{},\n\t\t\tKey:         3000,\n\t\t\tDeviceInfo: &types.Description{\n\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\tLabel:       \"Hard disk 1\",\n\t\t\t\tSummary:     \"30,720 KB\",\n\t\t\t},\n\t\t\tBacking: &types.VirtualDiskFlatVer2BackingInfo{\n\t\t\t\tVirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{\n\t\t\t\t\tVirtualDeviceBackingInfo: types.VirtualDeviceBackingInfo{},\n\t\t\t\t\tFileName:                 \"[datastore1] bar/bar.vmdk\",\n\t\t\t\t\tDatastore:                &types.ManagedObjectReference{Type: \"Datastore\", Value: \"53fe43cc-75dc5110-3643-000c2918dc41\"},\n\t\t\t\t\tBackingObjectId:          \"3-3000-0\",\n\t\t\t\t},\n\t\t\t\tDiskMode:        \"persistent\",\n\t\t\t\tSplit:           types.NewBool(false),\n\t\t\t\tWriteThrough:    types.NewBool(false),\n\t\t\t\tThinProvisioned: types.NewBool(false),\n\t\t\t\tEagerlyScrub:    types.NewBool(true),\n\t\t\t\tUuid:            \"6000C296-d0af-1209-1975-10c98eae10e4\",\n\t\t\t\tContentId:       \"d46395062e2d1b1790985bdec573b211\",\n\t\t\t\tChangeId:        \"\",\n\t\t\t\tParent: &types.VirtualDiskFlatVer2BackingInfo{\n\t\t\t\t\tVirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{\n\t\t\t\t\t\tVirtualDeviceBackingInfo: types.VirtualDeviceBackingInfo{},\n\t\t\t\t\t\tFileName:                 \"[datastore1] ttylinux.vmdk\",\n\t\t\t\t\t\tDatastore:                &types.ManagedObjectReference{Type: \"Datastore\", Value: \"53fe43cc-75dc5110-3643-000c2918dc41\"},\n\t\t\t\t\t\tBackingObjectId:          \"3-3000-1\",\n\t\t\t\t\t},\n\t\t\t\t\tDiskMode:        \"persistent\",\n\t\t\t\t\tSplit:           types.NewBool(false),\n\t\t\t\t\tWriteThrough:    types.NewBool(false),\n\t\t\t\t\tThinProvisioned: types.NewBool(false),\n\t\t\t\t\tEagerlyScrub:    types.NewBool(true),\n\t\t\t\t\tUuid:            \"6000C296-d0af-1209-1975-10c98eae10e4\",\n\t\t\t\t\tContentId:       \"1c2dad9e1662219e962a620c6d238a7c\",\n\t\t\t\t\tChangeId:        \"\",\n\t\t\t\t\tParent:          (*types.VirtualDiskFlatVer2BackingInfo)(nil),\n\t\t\t\t\tDeltaDiskFormat: \"\",\n\t\t\t\t\tDigestEnabled:   types.NewBool(false),\n\t\t\t\t\tDeltaGrainSize:  0,\n\t\t\t\t},\n\t\t\t\tDeltaDiskFormat: \"redoLogFormat\",\n\t\t\t\tDigestEnabled:   types.NewBool(false),\n\t\t\t\tDeltaGrainSize:  0,\n\t\t\t},\n\t\t\tConnectable:   (*types.VirtualDeviceConnectInfo)(nil),\n\t\t\tSlotInfo:      nil,\n\t\t\tControllerKey: 200,\n\t\t\tUnitNumber:    intPtrValue(0),\n\t\t},\n\t\tCapacityInKB:    30720,\n\t\tCapacityInBytes: 31457280,\n\t\tShares: &types.SharesInfo{\n\t\t\tDynamicData: types.DynamicData{},\n\t\t\tShares:      1000,\n\t\t\tLevel:       \"normal\",\n\t\t},\n\t\tStorageIOAllocation: &types.StorageIOAllocationInfo{\n\t\t\tDynamicData: types.DynamicData{},\n\t\t\tLimit:       -1,\n\t\t\tShares: &types.SharesInfo{\n\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\tShares:      1000,\n\t\t\t\tLevel:       \"normal\",\n\t\t\t},\n\t\t\tReservation: 0,\n\t\t},\n\t\tDiskObjectId:          \"3-3000\",\n\t\tVFlashCacheConfigInfo: (*types.VirtualDiskVFlashCacheConfigInfo)(nil),\n\t},\n\t&types.VirtualDisk{\n\t\tVirtualDevice: types.VirtualDevice{\n\t\t\tDynamicData: types.DynamicData{},\n\t\t\tKey:         3002,\n\t\t\tDeviceInfo: &types.Description{\n\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\tLabel:       \"Hard disk 2\",\n\t\t\t\tSummary:     \"10,000,000 KB\",\n\t\t\t},\n\t\t\tBacking: &types.VirtualDiskFlatVer2BackingInfo{\n\t\t\t\tVirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{\n\t\t\t\t\tVirtualDeviceBackingInfo: types.VirtualDeviceBackingInfo{},\n\t\t\t\t\tFileName:                 \"[datastore1] bar/disk-201-0.vmdk\",\n\t\t\t\t\tDatastore:                &types.ManagedObjectReference{Type: \"Datastore\", Value: \"53fe43cc-75dc5110-3643-000c2918dc41\"},\n\t\t\t\t\tBackingObjectId:          \"3-3002-0\",\n\t\t\t\t},\n\t\t\t\tDiskMode:        \"persistent\",\n\t\t\t\tSplit:           types.NewBool(false),\n\t\t\t\tWriteThrough:    types.NewBool(false),\n\t\t\t\tThinProvisioned: types.NewBool(true),\n\t\t\t\tEagerlyScrub:    types.NewBool(false),\n\t\t\t\tUuid:            \"6000C293-fde5-4457-5118-dd267ea992a7\",\n\t\t\t\tContentId:       \"90399989b9d520eed6793ab0fffffffe\",\n\t\t\t\tChangeId:        \"\",\n\t\t\t\tParent:          (*types.VirtualDiskFlatVer2BackingInfo)(nil),\n\t\t\t\tDeltaDiskFormat: \"\",\n\t\t\t\tDigestEnabled:   types.NewBool(false),\n\t\t\t\tDeltaGrainSize:  0,\n\t\t\t},\n\t\t\tConnectable:   (*types.VirtualDeviceConnectInfo)(nil),\n\t\t\tSlotInfo:      nil,\n\t\t\tControllerKey: 201,\n\t\t\tUnitNumber:    intPtrValue(0),\n\t\t},\n\t\tCapacityInKB:    10000000,\n\t\tCapacityInBytes: 10240000000,\n\t\tShares: &types.SharesInfo{\n\t\t\tDynamicData: types.DynamicData{},\n\t\t\tShares:      1000,\n\t\t\tLevel:       \"normal\",\n\t\t},\n\t\tStorageIOAllocation: &types.StorageIOAllocationInfo{\n\t\t\tDynamicData: types.DynamicData{},\n\t\t\tLimit:       -1,\n\t\t\tShares: &types.SharesInfo{\n\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\tShares:      1000,\n\t\t\t\tLevel:       \"normal\",\n\t\t\t},\n\t\t\tReservation: 0,\n\t\t},\n\t\tDiskObjectId:          \"3-3002\",\n\t\tVFlashCacheConfigInfo: (*types.VirtualDiskVFlashCacheConfigInfo)(nil),\n\t},\n\t&types.VirtualE1000{\n\t\tVirtualEthernetCard: types.VirtualEthernetCard{\n\t\t\tVirtualDevice: types.VirtualDevice{\n\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\tKey:         4000,\n\t\t\t\tDeviceInfo: &types.Description{\n\t\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\t\tLabel:       \"Network adapter 1\",\n\t\t\t\t\tSummary:     \"VM Network\",\n\t\t\t\t},\n\t\t\t\tBacking: &types.VirtualEthernetCardNetworkBackingInfo{\n\t\t\t\t\tVirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{\n\t\t\t\t\t\tVirtualDeviceBackingInfo: types.VirtualDeviceBackingInfo{},\n\t\t\t\t\t\tDeviceName:               \"VM Network\",\n\t\t\t\t\t\tUseAutoDetect:            types.NewBool(false),\n\t\t\t\t\t},\n\t\t\t\t\tNetwork:           &types.ManagedObjectReference{Type: \"Network\", Value: \"HaNetwork-VM Network\"},\n\t\t\t\t\tInPassthroughMode: types.NewBool(false),\n\t\t\t\t},\n\t\t\t\tConnectable: &types.VirtualDeviceConnectInfo{\n\t\t\t\t\tDynamicData:       types.DynamicData{},\n\t\t\t\t\tStartConnected:    true,\n\t\t\t\t\tAllowGuestControl: true,\n\t\t\t\t\tConnected:         false,\n\t\t\t\t\tStatus:            \"untried\",\n\t\t\t\t},\n\t\t\t\tSlotInfo: &types.VirtualDevicePciBusSlotInfo{\n\t\t\t\t\tVirtualDeviceBusSlotInfo: types.VirtualDeviceBusSlotInfo{},\n\t\t\t\t\tPciSlotNumber:            32,\n\t\t\t\t},\n\t\t\t\tControllerKey: 100,\n\t\t\t\tUnitNumber:    intPtrValue(7),\n\t\t\t},\n\t\t\tAddressType:      \"generated\",\n\t\t\tMacAddress:       \"00:0c:29:93:d7:27\",\n\t\t\tWakeOnLanEnabled: types.NewBool(true),\n\t\t},\n\t},\n\t&types.VirtualSerialPort{\n\t\tVirtualDevice: types.VirtualDevice{\n\t\t\tDynamicData: types.DynamicData{},\n\t\t\tKey:         9000,\n\t\t\tDeviceInfo: &types.Description{\n\t\t\t\tDynamicData: types.DynamicData{},\n\t\t\t\tLabel:       \"Serial port 1\",\n\t\t\t\tSummary:     \"Remote localhost:0\",\n\t\t\t},\n\t\t\tBacking: &types.VirtualSerialPortURIBackingInfo{\n\t\t\t\tVirtualDeviceURIBackingInfo: types.VirtualDeviceURIBackingInfo{\n\t\t\t\t\tVirtualDeviceBackingInfo: types.VirtualDeviceBackingInfo{},\n\t\t\t\t\tServiceURI:               \"localhost:0\",\n\t\t\t\t\tDirection:                \"client\",\n\t\t\t\t\tProxyURI:                 \"\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tConnectable: &types.VirtualDeviceConnectInfo{\n\t\t\t\tDynamicData:       types.DynamicData{},\n\t\t\t\tStartConnected:    true,\n\t\t\t\tAllowGuestControl: true,\n\t\t\t\tConnected:         false,\n\t\t\t\tStatus:            \"untried\",\n\t\t\t},\n\t\t\tSlotInfo:      nil,\n\t\t\tControllerKey: 400,\n\t\t\tUnitNumber:    intPtrValue(0),\n\t\t},\n\t\tYieldOnPoll: true,\n\t},\n})\n\nfunc TestSelectByType(t *testing.T) {\n\ttests := []struct {\n\t\tdtype  types.BaseVirtualDevice\n\t\texpect int\n\t}{\n\t\t{\n\t\t\t(*types.VirtualCdrom)(nil),\n\t\t\t1,\n\t\t},\n\t\t{\n\t\t\t(*types.VirtualEthernetCard)(nil),\n\t\t\t1,\n\t\t},\n\t\t{\n\t\t\t(*types.VirtualDisk)(nil),\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t(*types.VirtualController)(nil),\n\t\t\t6,\n\t\t},\n\t\t{\n\t\t\t(*types.VirtualIDEController)(nil),\n\t\t\t2,\n\t\t},\n\t\t{\n\t\t\t(*types.VirtualSCSIController)(nil),\n\t\t\t1,\n\t\t},\n\t\t{\n\t\t\t(*types.VirtualLsiLogicController)(nil),\n\t\t\t1,\n\t\t},\n\t\t{\n\t\t\t(*types.ParaVirtualSCSIController)(nil),\n\t\t\t0,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\td := devices.SelectByType(test.dtype)\n\n\t\tif len(d) != test.expect {\n\t\t\tt.Errorf(\"%#v has %d\", test.dtype, len(devices))\n\t\t}\n\t}\n}\n\nfunc TestSelectByBackingInfo(t *testing.T) {\n\ttests := []types.BaseVirtualDeviceBackingInfo{\n\t\t&types.VirtualEthernetCardNetworkBackingInfo{\n\t\t\tVirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{\n\t\t\t\tDeviceName: \"VM Network\",\n\t\t\t},\n\t\t},\n\t\t&types.VirtualDiskFlatVer2BackingInfo{\n\t\t\tVirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{\n\t\t\t\tFileName: \"[datastore1] bar/bar.vmdk\",\n\t\t\t},\n\t\t},\n\t\t&types.VirtualDiskFlatVer2BackingInfo{\n\t\t\tParent: &types.VirtualDiskFlatVer2BackingInfo{\n\t\t\t\tVirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{\n\t\t\t\t\tFileName: \"[datastore1] ttylinux.vmdk\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t&types.VirtualCdromIsoBackingInfo{\n\t\t\tVirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{\n\t\t\t\tVirtualDeviceBackingInfo: types.VirtualDeviceBackingInfo{},\n\t\t\t\tFileName:                 \"[datastore1] foo.iso\",\n\t\t\t},\n\t\t},\n\t\t(*types.VirtualCdromIsoBackingInfo)(nil),\n\t\t&types.VirtualSerialPortURIBackingInfo{\n\t\t\tVirtualDeviceURIBackingInfo: types.VirtualDeviceURIBackingInfo{\n\t\t\t\tVirtualDeviceBackingInfo: types.VirtualDeviceBackingInfo{},\n\t\t\t\tServiceURI:               \"localhost:0\",\n\t\t\t\tDirection:                \"client\",\n\t\t\t\tProxyURI:                 \"\",\n\t\t\t},\n\t\t},\n\t\t(*types.VirtualSerialPortURIBackingInfo)(nil),\n\t}\n\n\tfor _, test := range tests {\n\t\tl := devices.SelectByBackingInfo(test)\n\n\t\tif len(l) != 1 {\n\t\t\tt.Errorf(\"Expected 1, got %d: %#v\", len(l), test)\n\t\t}\n\t}\n\n\t// test selecting by backing type\n\ttests = []types.BaseVirtualDeviceBackingInfo{\n\t\t(*types.VirtualDiskFlatVer2BackingInfo)(nil),\n\t}\n\n\tfor _, test := range tests {\n\t\tl := devices.SelectByBackingInfo(test)\n\n\t\tif len(l) != 2 {\n\t\t\tt.Errorf(\"Expected 2, got %d: %#v\", len(l), test)\n\t\t}\n\t}\n}\n\nfunc TestFind(t *testing.T) {\n\tfor _, device := range devices {\n\t\tname := devices.Name(device)\n\t\td := devices.Find(name)\n\t\tif name != devices.Name(d) {\n\t\t\tt.Errorf(\"expected name: %s, got: %s\", name, devices.Name(d))\n\t\t}\n\t}\n\n\td := devices.Find(\"enoent\")\n\tif d != nil {\n\t\tt.Errorf(\"unexpected: %#v\", d)\n\t}\n}\n\nfunc TestFindController(t *testing.T) {\n\tfor _, name := range []string{\"\", \"ide-200\"} {\n\t\t_, err := devices.FindIDEController(name)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n\n\tfor _, name := range []string{\"\", \"lsilogic-1000\"} {\n\t\t_, err := devices.FindSCSIController(name)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n\n\tfns := []func() error{\n\t\tfunc() error {\n\t\t\t_, err := devices.FindIDEController(\"lsilogic-1000\")\n\t\t\treturn err\n\t\t},\n\t\tfunc() error {\n\t\t\t_, err := devices.FindSCSIController(\"ide-200\")\n\t\t\treturn err\n\t\t},\n\t}\n\n\tfor _, f := range fns {\n\t\terr := f()\n\t\tif err == nil {\n\t\t\tt.Error(\"should fail\")\n\t\t}\n\t}\n}\n\nfunc TestPickController(t *testing.T) {\n\tlist := devices\n\n\ttests := []struct {\n\t\tctype types.BaseVirtualController\n\t\tkey   int32\n\t\tunit  int32\n\t}{\n\t\t{\n\t\t\t(*types.VirtualIDEController)(nil), 201, 1,\n\t\t},\n\t\t{\n\t\t\t(*types.VirtualSCSIController)(nil), 1000, 0,\n\t\t},\n\t\t{\n\t\t\t(*types.VirtualSCSIController)(nil), 1000, 1,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tc := list.PickController(test.ctype).GetVirtualController()\n\n\t\tkey := c.Key\n\t\tif key != test.key {\n\t\t\tt.Errorf(\"expected controller key: %d, got: %d\\n\", test.key, key)\n\t\t}\n\n\t\tunit := list.newUnitNumber(c)\n\t\tif unit != test.unit {\n\t\t\tt.Errorf(\"expected unit number: %d, got: %d\\n\", test.unit, unit)\n\t\t}\n\n\t\tdev := &types.VirtualDevice{\n\t\t\tKey:           int32(rand.Int()),\n\t\t\tUnitNumber:    new(int32),\n\t\t\tControllerKey: key,\n\t\t}\n\t\t*dev.UnitNumber = unit\n\n\t\tlist = append(list, dev)\n\t\tc.Device = append(c.Device, dev.Key)\n\t}\n\n\tif list.PickController((*types.VirtualIDEController)(nil)) != nil {\n\t\tt.Error(\"should be nil\")\n\t}\n\n\tif list.PickController((*types.VirtualSCSIController)(nil)) == nil {\n\t\tt.Errorf(\"should not be nil\")\n\t}\n}\n\nfunc TestCreateSCSIController(t *testing.T) {\n\tfor _, l := range []VirtualDeviceList{SCSIControllerTypes(), devices} {\n\t\t_, err := l.CreateSCSIController(\"enoent\")\n\t\tif err == nil {\n\t\t\tt.Error(\"should fail\")\n\t\t}\n\n\t\tfor _, name := range []string{\"\", \"scsi\", \"pvscsi\", \"buslogic\", \"lsilogic\", \"lsilogic-sas\"} {\n\t\t\t_, err = l.CreateSCSIController(name)\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestCreateEthernetCard(t *testing.T) {\n\t_, err := EthernetCardTypes().CreateEthernetCard(\"enoent\", nil)\n\tif err == nil {\n\t\tt.Error(\"should fail\")\n\t}\n\n\tfor _, name := range []string{\"\", \"e1000\", \"e1000e\", \"vmxnet3\"} {\n\t\t_, err := EthernetCardTypes().CreateEthernetCard(name, nil)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n}\n\nfunc TestCdrom(t *testing.T) {\n\tc, err := devices.FindCdrom(\"\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\td := devices.Find(devices.Name(c))\n\n\tif c.Key != d.GetVirtualDevice().Key {\n\t\tt.Error(\"device key mismatch\")\n\t}\n\n\tfor _, name := range []string{\"enoent\", \"ide-200\"} {\n\t\tc, err = devices.FindCdrom(name)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"FindCdrom(%s) should fail\", name)\n\t\t}\n\t}\n\n\tc, err = devices.Select(func(device types.BaseVirtualDevice) bool {\n\t\tif _, ok := device.(*types.VirtualCdrom); ok {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}).FindCdrom(\"\")\n\n\tif err == nil {\n\t\tt.Error(\"FindCdrom('') should fail\")\n\t}\n}\n\nfunc TestSerialPort(t *testing.T) {\n\tdevice, err := devices.CreateSerialPort()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdevices.ConnectSerialPort(device, \"telnet://:33233\", false, \"\")\n}\n\nfunc TestPrimaryMacAddress(t *testing.T) {\n\texpect := \"00:0c:29:93:d7:27\"\n\tmac := devices.PrimaryMacAddress()\n\tif expect != mac {\n\t\tt.Errorf(\"expected: %s, got: %s\", expect, mac)\n\t}\n}\n\nfunc TestBootOrder(t *testing.T) {\n\to := []string{DeviceTypeEthernet, DeviceTypeCdrom, DeviceTypeFloppy, DeviceTypeDisk}\n\tlist := devices\n\n\tn := 4 // 1 ethernet, 1 cdrom, 2 disk\n\torder := list.BootOrder(o)\n\tif len(order) != n {\n\t\tt.Errorf(\"expected %d boot devices, got: %d\", n, len(order))\n\t}\n\n\tlist = list.SelectBootOrder(order)\n\tif len(list) != n {\n\t\tt.Errorf(\"expected %d boot devices, got: %d\", n, len(list))\n\t}\n\n\t// test lookup by name\n\tvar names []string\n\tfor _, x := range list {\n\t\tnames = append(names, list.Name(x))\n\t}\n\n\torder = list.BootOrder(names)\n\tif len(order) != n {\n\t\tt.Errorf(\"expected %d boot devices, got: %d\", n, len(order))\n\t}\n\n\tif !reflect.DeepEqual(list, list.SelectBootOrder(order)) {\n\t\tt.Error(\"boot order mismatch\")\n\t}\n\n\t// remove disks\n\tlist = list.Select(func(device types.BaseVirtualDevice) bool {\n\t\tif _, ok := device.(*types.VirtualDisk); ok {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t})\n\n\tn = 2 // 1 ethernet, 1 cdrom\n\torder = list.BootOrder(o)\n\tif len(order) != n {\n\t\tt.Errorf(\"expected %d boot devices, got: %d\", n, len(order))\n\t}\n\n\tif !reflect.DeepEqual(list, list.SelectBootOrder(order)) {\n\t\tt.Error(\"boot order mismatch\")\n\t}\n\n\tif len(list.BootOrder([]string{DeviceTypeDisk})) != 0 {\n\t\tt.Error(\"expected 0 disks\")\n\t}\n}\n\nfunc TestName(t *testing.T) {\n\ttests := []struct {\n\t\tdevice types.BaseVirtualDevice\n\t\texpect string\n\t}{\n\t\t{\n\t\t\t&types.VirtualCdrom{},\n\t\t\t\"cdrom-0\",\n\t\t},\n\t\t{\n\t\t\t&types.VirtualDisk{},\n\t\t\t\"disk-0-0\",\n\t\t},\n\t\t{\n\t\t\t&types.VirtualFloppy{},\n\t\t\t\"floppy-0\",\n\t\t},\n\t\t{\n\t\t\t&types.VirtualIDEController{},\n\t\t\t\"ide-0\",\n\t\t},\n\t\t{\n\t\t\t&types.VirtualMachineVideoCard{},\n\t\t\t\"video-0\",\n\t\t},\n\t\t{\n\t\t\t&types.VirtualPointingDevice{},\n\t\t\t\"pointing-0\",\n\t\t},\n\t\t{\n\t\t\t&types.ParaVirtualSCSIController{},\n\t\t\t\"pvscsi-0\",\n\t\t},\n\t\t{\n\t\t\t&types.VirtualSerialPort{},\n\t\t\t\"serialport-0\",\n\t\t},\n\t\t{\n\t\t\t&types.VirtualE1000{\n\t\t\t\tVirtualEthernetCard: types.VirtualEthernetCard{\n\t\t\t\t\tVirtualDevice: types.VirtualDevice{\n\t\t\t\t\t\tUnitNumber: intPtrValue(7),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"ethernet-0\",\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tname := devices.Name(test.device)\n\t\tif name != test.expect {\n\t\t\tt.Errorf(\"expected: %s, got: %s\", test.expect, name)\n\t\t}\n\t}\n}\n\nfunc TestChildDisk(t *testing.T) {\n\tdisks := devices.SelectByType((*types.VirtualDisk)(nil))\n\n\tfor _, disk := range disks {\n\t\tchild := disks.ChildDisk(disk.(*types.VirtualDisk))\n\t\tname := child.Backing.(*types.VirtualDiskFlatVer2BackingInfo).VirtualDeviceFileBackingInfo.FileName\n\n\t\tp := new(DatastorePath)\n\t\tp.FromString(name)\n\n\t\tif p.Datastore != \"datastore1\" {\n\t\t\tt.Fatal(p.Datastore)\n\t\t}\n\n\t\tif p.Path != \"\" {\n\t\t\tt.Fatal(p.Path)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/virtual_disk_manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype VirtualDiskManager struct {\n\tCommon\n}\n\nfunc NewVirtualDiskManager(c *vim25.Client) *VirtualDiskManager {\n\tm := VirtualDiskManager{\n\t\tCommon: NewCommon(c, *c.ServiceContent.VirtualDiskManager),\n\t}\n\n\treturn &m\n}\n\n// CopyVirtualDisk copies a virtual disk, performing conversions as specified in the spec.\nfunc (m VirtualDiskManager) CopyVirtualDisk(\n\tctx context.Context,\n\tsourceName string, sourceDatacenter *Datacenter,\n\tdestName string, destDatacenter *Datacenter,\n\tdestSpec *types.VirtualDiskSpec, force bool) (*Task, error) {\n\n\treq := types.CopyVirtualDisk_Task{\n\t\tThis:       m.Reference(),\n\t\tSourceName: sourceName,\n\t\tDestName:   destName,\n\t\tDestSpec:   destSpec,\n\t\tForce:      types.NewBool(force),\n\t}\n\n\tif sourceDatacenter != nil {\n\t\tref := sourceDatacenter.Reference()\n\t\treq.SourceDatacenter = &ref\n\t}\n\n\tif destDatacenter != nil {\n\t\tref := destDatacenter.Reference()\n\t\treq.DestDatacenter = &ref\n\t}\n\n\tres, err := methods.CopyVirtualDisk_Task(ctx, m.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(m.c, res.Returnval), nil\n}\n\n// CreateVirtualDisk creates a new virtual disk.\nfunc (m VirtualDiskManager) CreateVirtualDisk(\n\tctx context.Context,\n\tname string, datacenter *Datacenter,\n\tspec types.BaseVirtualDiskSpec) (*Task, error) {\n\n\treq := types.CreateVirtualDisk_Task{\n\t\tThis: m.Reference(),\n\t\tName: name,\n\t\tSpec: spec,\n\t}\n\n\tif datacenter != nil {\n\t\tref := datacenter.Reference()\n\t\treq.Datacenter = &ref\n\t}\n\n\tres, err := methods.CreateVirtualDisk_Task(ctx, m.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(m.c, res.Returnval), nil\n}\n\n// MoveVirtualDisk moves a virtual disk.\nfunc (m VirtualDiskManager) MoveVirtualDisk(\n\tctx context.Context,\n\tsourceName string, sourceDatacenter *Datacenter,\n\tdestName string, destDatacenter *Datacenter,\n\tforce bool) (*Task, error) {\n\treq := types.MoveVirtualDisk_Task{\n\t\tThis:       m.Reference(),\n\t\tSourceName: sourceName,\n\t\tDestName:   destName,\n\t\tForce:      types.NewBool(force),\n\t}\n\n\tif sourceDatacenter != nil {\n\t\tref := sourceDatacenter.Reference()\n\t\treq.SourceDatacenter = &ref\n\t}\n\n\tif destDatacenter != nil {\n\t\tref := destDatacenter.Reference()\n\t\treq.DestDatacenter = &ref\n\t}\n\n\tres, err := methods.MoveVirtualDisk_Task(ctx, m.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(m.c, res.Returnval), nil\n}\n\n// DeleteVirtualDisk deletes a virtual disk.\nfunc (m VirtualDiskManager) DeleteVirtualDisk(ctx context.Context, name string, dc *Datacenter) (*Task, error) {\n\treq := types.DeleteVirtualDisk_Task{\n\t\tThis: m.Reference(),\n\t\tName: name,\n\t}\n\n\tif dc != nil {\n\t\tref := dc.Reference()\n\t\treq.Datacenter = &ref\n\t}\n\n\tres, err := methods.DeleteVirtualDisk_Task(ctx, m.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(m.c, res.Returnval), nil\n}\n\n// Queries virtual disk uuid\nfunc (m VirtualDiskManager) QueryVirtualDiskUuid(ctx context.Context, name string, dc *Datacenter) (string, error) {\n\treq := types.QueryVirtualDiskUuid{\n\t\tThis: m.Reference(),\n\t\tName: name,\n\t}\n\n\tif dc != nil {\n\t\tref := dc.Reference()\n\t\treq.Datacenter = &ref\n\t}\n\n\tres, err := methods.QueryVirtualDiskUuid(ctx, m.c, &req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif res == nil {\n\t\treturn \"\", nil\n\t}\n\n\treturn res.Returnval, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/virtual_disk_manager_internal.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"reflect\"\n\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\nfunc init() {\n\ttypes.Add(\"ArrayOfVirtualDiskInfo\", reflect.TypeOf((*arrayOfVirtualDiskInfo)(nil)).Elem())\n\n\ttypes.Add(\"VirtualDiskInfo\", reflect.TypeOf((*VirtualDiskInfo)(nil)).Elem())\n}\n\ntype arrayOfVirtualDiskInfo struct {\n\tVirtualDiskInfo []VirtualDiskInfo `xml:\"VirtualDiskInfo,omitempty\"`\n}\n\ntype queryVirtualDiskInfoTaskRequest struct {\n\tThis           types.ManagedObjectReference  `xml:\"_this\"`\n\tName           string                        `xml:\"name\"`\n\tDatacenter     *types.ManagedObjectReference `xml:\"datacenter,omitempty\"`\n\tIncludeParents bool                          `xml:\"includeParents\"`\n}\n\ntype queryVirtualDiskInfoTaskResponse struct {\n\tReturnval types.ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype queryVirtualDiskInfoTaskBody struct {\n\tReq *queryVirtualDiskInfoTaskRequest  `xml:\"urn:internalvim25 QueryVirtualDiskInfo_Task,omitempty\"`\n\tRes *queryVirtualDiskInfoTaskResponse `xml:\"urn:vim25 QueryVirtualDiskInfo_TaskResponse,omitempty\"`\n\tErr *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *queryVirtualDiskInfoTaskBody) Fault() *soap.Fault { return b.Err }\n\nfunc queryVirtualDiskInfoTask(ctx context.Context, r soap.RoundTripper, req *queryVirtualDiskInfoTaskRequest) (*queryVirtualDiskInfoTaskResponse, error) {\n\tvar reqBody, resBody queryVirtualDiskInfoTaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype VirtualDiskInfo struct {\n\tName     string `xml:\"unit>name\"`\n\tDiskType string `xml:\"diskType\"`\n\tParent   string `xml:\"parent,omitempty\"`\n}\n\nfunc (m VirtualDiskManager) QueryVirtualDiskInfo(ctx context.Context, name string, dc *Datacenter, includeParents bool) ([]VirtualDiskInfo, error) {\n\treq := queryVirtualDiskInfoTaskRequest{\n\t\tThis:           m.Reference(),\n\t\tName:           name,\n\t\tIncludeParents: includeParents,\n\t}\n\n\tif dc != nil {\n\t\tref := dc.Reference()\n\t\treq.Datacenter = &ref\n\t}\n\n\tres, err := queryVirtualDiskInfoTask(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo, err := NewTask(m.Client(), res.Returnval).WaitForResult(ctx, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn info.Result.(arrayOfVirtualDiskInfo).VirtualDiskInfo, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/virtual_machine.go",
    "content": "/*\nCopyright (c) 2015-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"path\"\n\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\nconst (\n\tPropRuntimePowerState = \"summary.runtime.powerState\"\n)\n\ntype VirtualMachine struct {\n\tCommon\n}\n\nfunc NewVirtualMachine(c *vim25.Client, ref types.ManagedObjectReference) *VirtualMachine {\n\treturn &VirtualMachine{\n\t\tCommon: NewCommon(c, ref),\n\t}\n}\n\nfunc (v VirtualMachine) PowerState(ctx context.Context) (types.VirtualMachinePowerState, error) {\n\tvar o mo.VirtualMachine\n\n\terr := v.Properties(ctx, v.Reference(), []string{PropRuntimePowerState}, &o)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn o.Summary.Runtime.PowerState, nil\n}\n\nfunc (v VirtualMachine) PowerOn(ctx context.Context) (*Task, error) {\n\treq := types.PowerOnVM_Task{\n\t\tThis: v.Reference(),\n\t}\n\n\tres, err := methods.PowerOnVM_Task(ctx, v.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(v.c, res.Returnval), nil\n}\n\nfunc (v VirtualMachine) PowerOff(ctx context.Context) (*Task, error) {\n\treq := types.PowerOffVM_Task{\n\t\tThis: v.Reference(),\n\t}\n\n\tres, err := methods.PowerOffVM_Task(ctx, v.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(v.c, res.Returnval), nil\n}\n\nfunc (v VirtualMachine) Reset(ctx context.Context) (*Task, error) {\n\treq := types.ResetVM_Task{\n\t\tThis: v.Reference(),\n\t}\n\n\tres, err := methods.ResetVM_Task(ctx, v.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(v.c, res.Returnval), nil\n}\n\nfunc (v VirtualMachine) Suspend(ctx context.Context) (*Task, error) {\n\treq := types.SuspendVM_Task{\n\t\tThis: v.Reference(),\n\t}\n\n\tres, err := methods.SuspendVM_Task(ctx, v.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(v.c, res.Returnval), nil\n}\n\nfunc (v VirtualMachine) ShutdownGuest(ctx context.Context) error {\n\treq := types.ShutdownGuest{\n\t\tThis: v.Reference(),\n\t}\n\n\t_, err := methods.ShutdownGuest(ctx, v.c, &req)\n\treturn err\n}\n\nfunc (v VirtualMachine) RebootGuest(ctx context.Context) error {\n\treq := types.RebootGuest{\n\t\tThis: v.Reference(),\n\t}\n\n\t_, err := methods.RebootGuest(ctx, v.c, &req)\n\treturn err\n}\n\nfunc (v VirtualMachine) Destroy(ctx context.Context) (*Task, error) {\n\treq := types.Destroy_Task{\n\t\tThis: v.Reference(),\n\t}\n\n\tres, err := methods.Destroy_Task(ctx, v.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(v.c, res.Returnval), nil\n}\n\nfunc (v VirtualMachine) Clone(ctx context.Context, folder *Folder, name string, config types.VirtualMachineCloneSpec) (*Task, error) {\n\treq := types.CloneVM_Task{\n\t\tThis:   v.Reference(),\n\t\tFolder: folder.Reference(),\n\t\tName:   name,\n\t\tSpec:   config,\n\t}\n\n\tres, err := methods.CloneVM_Task(ctx, v.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(v.c, res.Returnval), nil\n}\n\nfunc (v VirtualMachine) Customize(ctx context.Context, spec types.CustomizationSpec) (*Task, error) {\n\treq := types.CustomizeVM_Task{\n\t\tThis: v.Reference(),\n\t\tSpec: spec,\n\t}\n\n\tres, err := methods.CustomizeVM_Task(ctx, v.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(v.c, res.Returnval), nil\n}\n\nfunc (v VirtualMachine) Relocate(ctx context.Context, config types.VirtualMachineRelocateSpec, priority types.VirtualMachineMovePriority) (*Task, error) {\n\treq := types.RelocateVM_Task{\n\t\tThis:     v.Reference(),\n\t\tSpec:     config,\n\t\tPriority: priority,\n\t}\n\n\tres, err := methods.RelocateVM_Task(ctx, v.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(v.c, res.Returnval), nil\n}\n\nfunc (v VirtualMachine) Reconfigure(ctx context.Context, config types.VirtualMachineConfigSpec) (*Task, error) {\n\treq := types.ReconfigVM_Task{\n\t\tThis: v.Reference(),\n\t\tSpec: config,\n\t}\n\n\tres, err := methods.ReconfigVM_Task(ctx, v.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(v.c, res.Returnval), nil\n}\n\nfunc (v VirtualMachine) WaitForIP(ctx context.Context) (string, error) {\n\tvar ip string\n\n\tp := property.DefaultCollector(v.c)\n\terr := property.Wait(ctx, p, v.Reference(), []string{\"guest.ipAddress\"}, func(pc []types.PropertyChange) bool {\n\t\tfor _, c := range pc {\n\t\t\tif c.Name != \"guest.ipAddress\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif c.Op != types.PropertyChangeOpAssign {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif c.Val == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tip = c.Val.(string)\n\t\t\treturn true\n\t\t}\n\n\t\treturn false\n\t})\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn ip, nil\n}\n\n// WaitForNetIP waits for the VM guest.net property to report an IP address for all VM NICs.\n// Only consider IPv4 addresses if the v4 param is true.\n// By default, wait for all NICs to get an IP address, unless 1 or more device is given.\n// A device can be specified by the MAC address or the device name, e.g. \"ethernet-0\".\n// Returns a map with MAC address as the key and IP address list as the value.\nfunc (v VirtualMachine) WaitForNetIP(ctx context.Context, v4 bool, device ...string) (map[string][]string, error) {\n\tmacs := make(map[string][]string)\n\teths := make(map[string]string)\n\n\tp := property.DefaultCollector(v.c)\n\n\t// Wait for all NICs to have a MacAddress, which may not be generated yet.\n\terr := property.Wait(ctx, p, v.Reference(), []string{\"config.hardware.device\"}, func(pc []types.PropertyChange) bool {\n\t\tfor _, c := range pc {\n\t\t\tif c.Op != types.PropertyChangeOpAssign {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdevices := VirtualDeviceList(c.Val.(types.ArrayOfVirtualDevice).VirtualDevice)\n\t\t\tfor _, d := range devices {\n\t\t\t\tif nic, ok := d.(types.BaseVirtualEthernetCard); ok {\n\t\t\t\t\tmac := nic.GetVirtualEthernetCard().MacAddress\n\t\t\t\t\tif mac == \"\" {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t\tmacs[mac] = nil\n\t\t\t\t\teths[devices.Name(d)] = mac\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t})\n\n\tif len(device) != 0 {\n\t\t// Only wait for specific NIC(s)\n\t\tmacs = make(map[string][]string)\n\t\tfor _, mac := range device {\n\t\t\tif eth, ok := eths[mac]; ok {\n\t\t\t\tmac = eth // device name, e.g. \"ethernet-0\"\n\t\t\t}\n\t\t\tmacs[mac] = nil\n\t\t}\n\t}\n\n\terr = property.Wait(ctx, p, v.Reference(), []string{\"guest.net\"}, func(pc []types.PropertyChange) bool {\n\t\tfor _, c := range pc {\n\t\t\tif c.Op != types.PropertyChangeOpAssign {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tnics := c.Val.(types.ArrayOfGuestNicInfo).GuestNicInfo\n\t\t\tfor _, nic := range nics {\n\t\t\t\tmac := nic.MacAddress\n\t\t\t\tif mac == \"\" || nic.IpConfig == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfor _, ip := range nic.IpConfig.IpAddress {\n\t\t\t\t\tif _, ok := macs[mac]; !ok {\n\t\t\t\t\t\tcontinue // Ignore any that don't correspond to a VM device\n\t\t\t\t\t}\n\t\t\t\t\tif v4 && net.ParseIP(ip.IpAddress).To4() == nil {\n\t\t\t\t\t\tcontinue // Ignore non IPv4 address\n\t\t\t\t\t}\n\t\t\t\t\tmacs[mac] = append(macs[mac], ip.IpAddress)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, ips := range macs {\n\t\t\tif len(ips) == 0 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn macs, nil\n}\n\n// Device returns the VirtualMachine's config.hardware.device property.\nfunc (v VirtualMachine) Device(ctx context.Context) (VirtualDeviceList, error) {\n\tvar o mo.VirtualMachine\n\n\terr := v.Properties(ctx, v.Reference(), []string{\"config.hardware.device\", \"summary.runtime.connectionState\"}, &o)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Quoting the SDK doc:\n\t//   The virtual machine configuration is not guaranteed to be available.\n\t//   For example, the configuration information would be unavailable if the server\n\t//   is unable to access the virtual machine files on disk, and is often also unavailable\n\t//   during the initial phases of virtual machine creation.\n\tif o.Config == nil {\n\t\treturn nil, fmt.Errorf(\"%s Config is not available, connectionState=%s\",\n\t\t\tv.Reference(), o.Summary.Runtime.ConnectionState)\n\t}\n\n\treturn VirtualDeviceList(o.Config.Hardware.Device), nil\n}\n\nfunc (v VirtualMachine) HostSystem(ctx context.Context) (*HostSystem, error) {\n\tvar o mo.VirtualMachine\n\n\terr := v.Properties(ctx, v.Reference(), []string{\"summary.runtime.host\"}, &o)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thost := o.Summary.Runtime.Host\n\tif host == nil {\n\t\treturn nil, errors.New(\"VM doesn't have a HostSystem\")\n\t}\n\n\treturn NewHostSystem(v.c, *host), nil\n}\n\nfunc (v VirtualMachine) ResourcePool(ctx context.Context) (*ResourcePool, error) {\n\tvar o mo.VirtualMachine\n\n\terr := v.Properties(ctx, v.Reference(), []string{\"resourcePool\"}, &o)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trp := o.ResourcePool\n\tif rp == nil {\n\t\treturn nil, errors.New(\"VM doesn't have a resourcePool\")\n\t}\n\n\treturn NewResourcePool(v.c, *rp), nil\n}\n\nfunc (v VirtualMachine) configureDevice(ctx context.Context, op types.VirtualDeviceConfigSpecOperation, fop types.VirtualDeviceConfigSpecFileOperation, devices ...types.BaseVirtualDevice) error {\n\tspec := types.VirtualMachineConfigSpec{}\n\n\tfor _, device := range devices {\n\t\tconfig := &types.VirtualDeviceConfigSpec{\n\t\t\tDevice:    device,\n\t\t\tOperation: op,\n\t\t}\n\n\t\tif disk, ok := device.(*types.VirtualDisk); ok {\n\t\t\tconfig.FileOperation = fop\n\n\t\t\t// Special case to attach an existing disk\n\t\t\tif op == types.VirtualDeviceConfigSpecOperationAdd && disk.CapacityInKB == 0 {\n\t\t\t\tchildDisk := false\n\t\t\t\tif b, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {\n\t\t\t\t\tchildDisk = b.Parent != nil\n\t\t\t\t}\n\n\t\t\t\tif !childDisk {\n\t\t\t\t\tconfig.FileOperation = \"\" // existing disk\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tspec.DeviceChange = append(spec.DeviceChange, config)\n\t}\n\n\ttask, err := v.Reconfigure(ctx, spec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn task.Wait(ctx)\n}\n\n// AddDevice adds the given devices to the VirtualMachine\nfunc (v VirtualMachine) AddDevice(ctx context.Context, device ...types.BaseVirtualDevice) error {\n\treturn v.configureDevice(ctx, types.VirtualDeviceConfigSpecOperationAdd, types.VirtualDeviceConfigSpecFileOperationCreate, device...)\n}\n\n// EditDevice edits the given (existing) devices on the VirtualMachine\nfunc (v VirtualMachine) EditDevice(ctx context.Context, device ...types.BaseVirtualDevice) error {\n\treturn v.configureDevice(ctx, types.VirtualDeviceConfigSpecOperationEdit, types.VirtualDeviceConfigSpecFileOperationReplace, device...)\n}\n\n// RemoveDevice removes the given devices on the VirtualMachine\nfunc (v VirtualMachine) RemoveDevice(ctx context.Context, keepFiles bool, device ...types.BaseVirtualDevice) error {\n\tfop := types.VirtualDeviceConfigSpecFileOperationDestroy\n\tif keepFiles {\n\t\tfop = \"\"\n\t}\n\treturn v.configureDevice(ctx, types.VirtualDeviceConfigSpecOperationRemove, fop, device...)\n}\n\n// BootOptions returns the VirtualMachine's config.bootOptions property.\nfunc (v VirtualMachine) BootOptions(ctx context.Context) (*types.VirtualMachineBootOptions, error) {\n\tvar o mo.VirtualMachine\n\n\terr := v.Properties(ctx, v.Reference(), []string{\"config.bootOptions\"}, &o)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn o.Config.BootOptions, nil\n}\n\n// SetBootOptions reconfigures the VirtualMachine with the given options.\nfunc (v VirtualMachine) SetBootOptions(ctx context.Context, options *types.VirtualMachineBootOptions) error {\n\tspec := types.VirtualMachineConfigSpec{}\n\n\tspec.BootOptions = options\n\n\ttask, err := v.Reconfigure(ctx, spec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn task.Wait(ctx)\n}\n\n// Answer answers a pending question.\nfunc (v VirtualMachine) Answer(ctx context.Context, id, answer string) error {\n\treq := types.AnswerVM{\n\t\tThis:         v.Reference(),\n\t\tQuestionId:   id,\n\t\tAnswerChoice: answer,\n\t}\n\n\t_, err := methods.AnswerVM(ctx, v.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// CreateSnapshot creates a new snapshot of a virtual machine.\nfunc (v VirtualMachine) CreateSnapshot(ctx context.Context, name string, description string, memory bool, quiesce bool) (*Task, error) {\n\treq := types.CreateSnapshot_Task{\n\t\tThis:        v.Reference(),\n\t\tName:        name,\n\t\tDescription: description,\n\t\tMemory:      memory,\n\t\tQuiesce:     quiesce,\n\t}\n\n\tres, err := methods.CreateSnapshot_Task(ctx, v.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(v.c, res.Returnval), nil\n}\n\n// RemoveAllSnapshot removes all snapshots of a virtual machine\nfunc (v VirtualMachine) RemoveAllSnapshot(ctx context.Context, consolidate *bool) (*Task, error) {\n\treq := types.RemoveAllSnapshots_Task{\n\t\tThis:        v.Reference(),\n\t\tConsolidate: consolidate,\n\t}\n\n\tres, err := methods.RemoveAllSnapshots_Task(ctx, v.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(v.c, res.Returnval), nil\n}\n\ntype snapshotMap map[string][]Reference\n\nfunc (m snapshotMap) add(parent string, tree []types.VirtualMachineSnapshotTree) {\n\tfor i, st := range tree {\n\t\tsname := st.Name\n\t\tnames := []string{sname, st.Snapshot.Value}\n\n\t\tif parent != \"\" {\n\t\t\tsname = path.Join(parent, sname)\n\t\t\t// Add full path as an option to resolve duplicate names\n\t\t\tnames = append(names, sname)\n\t\t}\n\n\t\tfor _, name := range names {\n\t\t\tm[name] = append(m[name], &tree[i].Snapshot)\n\t\t}\n\n\t\tm.add(sname, st.ChildSnapshotList)\n\t}\n}\n\n// FindSnapshot supports snapshot lookup by name, where name can be:\n// 1) snapshot ManagedObjectReference.Value (unique)\n// 2) snapshot name (may not be unique)\n// 3) snapshot tree path (may not be unique)\nfunc (v VirtualMachine) FindSnapshot(ctx context.Context, name string) (Reference, error) {\n\tvar o mo.VirtualMachine\n\n\terr := v.Properties(ctx, v.Reference(), []string{\"snapshot\"}, &o)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif o.Snapshot == nil || len(o.Snapshot.RootSnapshotList) == 0 {\n\t\treturn nil, errors.New(\"No snapshots for this VM\")\n\t}\n\n\tm := make(snapshotMap)\n\tm.add(\"\", o.Snapshot.RootSnapshotList)\n\n\ts := m[name]\n\tswitch len(s) {\n\tcase 0:\n\t\treturn nil, fmt.Errorf(\"snapshot %q not found\", name)\n\tcase 1:\n\t\treturn s[0], nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"%q resolves to %d snapshots\", name, len(s))\n\t}\n}\n\n// RemoveSnapshot removes a named snapshot\nfunc (v VirtualMachine) RemoveSnapshot(ctx context.Context, name string, removeChildren bool, consolidate *bool) (*Task, error) {\n\tsnapshot, err := v.FindSnapshot(ctx, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq := types.RemoveSnapshot_Task{\n\t\tThis:           snapshot.Reference(),\n\t\tRemoveChildren: removeChildren,\n\t\tConsolidate:    consolidate,\n\t}\n\n\tres, err := methods.RemoveSnapshot_Task(ctx, v.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(v.c, res.Returnval), nil\n}\n\n// RevertToCurrentSnapshot reverts to the current snapshot\nfunc (v VirtualMachine) RevertToCurrentSnapshot(ctx context.Context, suppressPowerOn bool) (*Task, error) {\n\treq := types.RevertToCurrentSnapshot_Task{\n\t\tThis:            v.Reference(),\n\t\tSuppressPowerOn: types.NewBool(suppressPowerOn),\n\t}\n\n\tres, err := methods.RevertToCurrentSnapshot_Task(ctx, v.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(v.c, res.Returnval), nil\n}\n\n// RevertToSnapshot reverts to a named snapshot\nfunc (v VirtualMachine) RevertToSnapshot(ctx context.Context, name string, suppressPowerOn bool) (*Task, error) {\n\tsnapshot, err := v.FindSnapshot(ctx, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq := types.RevertToSnapshot_Task{\n\t\tThis:            snapshot.Reference(),\n\t\tSuppressPowerOn: types.NewBool(suppressPowerOn),\n\t}\n\n\tres, err := methods.RevertToSnapshot_Task(ctx, v.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(v.c, res.Returnval), nil\n}\n\n// IsToolsRunning returns true if VMware Tools is currently running in the guest OS, and false otherwise.\nfunc (v VirtualMachine) IsToolsRunning(ctx context.Context) (bool, error) {\n\tvar o mo.VirtualMachine\n\n\terr := v.Properties(ctx, v.Reference(), []string{\"guest.toolsRunningStatus\"}, &o)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn o.Guest.ToolsRunningStatus == string(types.VirtualMachineToolsRunningStatusGuestToolsRunning), nil\n}\n\n// Wait for the VirtualMachine to change to the desired power state.\nfunc (v VirtualMachine) WaitForPowerState(ctx context.Context, state types.VirtualMachinePowerState) error {\n\tp := property.DefaultCollector(v.c)\n\terr := property.Wait(ctx, p, v.Reference(), []string{PropRuntimePowerState}, func(pc []types.PropertyChange) bool {\n\t\tfor _, c := range pc {\n\t\t\tif c.Name != PropRuntimePowerState {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif c.Val == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tps := c.Val.(types.VirtualMachinePowerState)\n\t\t\tif ps == state {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t})\n\n\treturn err\n}\n\nfunc (v VirtualMachine) MarkAsTemplate(ctx context.Context) error {\n\treq := types.MarkAsTemplate{\n\t\tThis: v.Reference(),\n\t}\n\n\t_, err := methods.MarkAsTemplate(ctx, v.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (v VirtualMachine) MarkAsVirtualMachine(ctx context.Context, pool ResourcePool, host *HostSystem) error {\n\treq := types.MarkAsVirtualMachine{\n\t\tThis: v.Reference(),\n\t\tPool: pool.Reference(),\n\t}\n\n\tif host != nil {\n\t\tref := host.Reference()\n\t\treq.Host = &ref\n\t}\n\n\t_, err := methods.MarkAsVirtualMachine(ctx, v.c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (v VirtualMachine) Migrate(ctx context.Context, pool *ResourcePool, host *HostSystem, priority types.VirtualMachineMovePriority, state types.VirtualMachinePowerState) (*Task, error) {\n\treq := types.MigrateVM_Task{\n\t\tThis:     v.Reference(),\n\t\tPriority: priority,\n\t\tState:    state,\n\t}\n\n\tif pool != nil {\n\t\tref := pool.Reference()\n\t\treq.Pool = &ref\n\t}\n\n\tif host != nil {\n\t\tref := host.Reference()\n\t\treq.Host = &ref\n\t}\n\n\tres, err := methods.MigrateVM_Task(ctx, v.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(v.c, res.Returnval), nil\n}\n\nfunc (v VirtualMachine) Unregister(ctx context.Context) error {\n\treq := types.UnregisterVM{\n\t\tThis: v.Reference(),\n\t}\n\n\t_, err := methods.UnregisterVM(ctx, v.Client(), &req)\n\treturn err\n}\n\n// QueryEnvironmentBrowser is a helper to get the environmentBrowser property.\nfunc (v VirtualMachine) QueryConfigTarget(ctx context.Context) (*types.ConfigTarget, error) {\n\tvar vm mo.VirtualMachine\n\n\terr := v.Properties(ctx, v.Reference(), []string{\"environmentBrowser\"}, &vm)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq := types.QueryConfigTarget{\n\t\tThis: vm.EnvironmentBrowser,\n\t}\n\n\tres, err := methods.QueryConfigTarget(ctx, v.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (v VirtualMachine) MountToolsInstaller(ctx context.Context) error {\n\treq := types.MountToolsInstaller{\n\t\tThis: v.Reference(),\n\t}\n\n\t_, err := methods.MountToolsInstaller(ctx, v.Client(), &req)\n\treturn err\n}\n\nfunc (v VirtualMachine) UnmountToolsInstaller(ctx context.Context) error {\n\treq := types.UnmountToolsInstaller{\n\t\tThis: v.Reference(),\n\t}\n\n\t_, err := methods.UnmountToolsInstaller(ctx, v.Client(), &req)\n\treturn err\n}\n\nfunc (v VirtualMachine) UpgradeTools(ctx context.Context, options string) (*Task, error) {\n\treq := types.UpgradeTools_Task{\n\t\tThis:             v.Reference(),\n\t\tInstallerOptions: options,\n\t}\n\n\tres, err := methods.UpgradeTools_Task(ctx, v.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewTask(v.c, res.Returnval), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/virtual_machine_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\n// VirtualMachine should implement the Reference interface.\nvar _ Reference = VirtualMachine{}\n\n// pretty.Printf generated\nvar snapshot = &types.VirtualMachineSnapshotInfo{\n\tDynamicData:     types.DynamicData{},\n\tCurrentSnapshot: &types.ManagedObjectReference{Type: \"VirtualMachineSnapshot\", Value: \"2-snapshot-11\"},\n\tRootSnapshotList: []types.VirtualMachineSnapshotTree{\n\t\t{\n\t\t\tDynamicData:    types.DynamicData{},\n\t\t\tSnapshot:       types.ManagedObjectReference{Type: \"VirtualMachineSnapshot\", Value: \"2-snapshot-1\"},\n\t\t\tVm:             types.ManagedObjectReference{Type: \"VirtualMachine\", Value: \"2\"},\n\t\t\tName:           \"root\",\n\t\t\tDescription:    \"\",\n\t\t\tId:             1,\n\t\t\tCreateTime:     time.Now(),\n\t\t\tState:          \"poweredOn\",\n\t\t\tQuiesced:       false,\n\t\t\tBackupManifest: \"\",\n\t\t\tChildSnapshotList: []types.VirtualMachineSnapshotTree{\n\t\t\t\t{\n\t\t\t\t\tDynamicData:       types.DynamicData{},\n\t\t\t\t\tSnapshot:          types.ManagedObjectReference{Type: \"VirtualMachineSnapshot\", Value: \"2-snapshot-2\"},\n\t\t\t\t\tVm:                types.ManagedObjectReference{Type: \"VirtualMachine\", Value: \"2\"},\n\t\t\t\t\tName:              \"child\",\n\t\t\t\t\tDescription:       \"\",\n\t\t\t\t\tId:                2,\n\t\t\t\t\tCreateTime:        time.Now(),\n\t\t\t\t\tState:             \"poweredOn\",\n\t\t\t\t\tQuiesced:          false,\n\t\t\t\t\tBackupManifest:    \"\",\n\t\t\t\t\tChildSnapshotList: nil,\n\t\t\t\t\tReplaySupported:   types.NewBool(false),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tDynamicData:    types.DynamicData{},\n\t\t\t\t\tSnapshot:       types.ManagedObjectReference{Type: \"VirtualMachineSnapshot\", Value: \"2-snapshot-3\"},\n\t\t\t\t\tVm:             types.ManagedObjectReference{Type: \"VirtualMachine\", Value: \"2\"},\n\t\t\t\t\tName:           \"child\",\n\t\t\t\t\tDescription:    \"\",\n\t\t\t\t\tId:             3,\n\t\t\t\t\tCreateTime:     time.Now(),\n\t\t\t\t\tState:          \"poweredOn\",\n\t\t\t\t\tQuiesced:       false,\n\t\t\t\t\tBackupManifest: \"\",\n\t\t\t\t\tChildSnapshotList: []types.VirtualMachineSnapshotTree{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tDynamicData:    types.DynamicData{},\n\t\t\t\t\t\t\tSnapshot:       types.ManagedObjectReference{Type: \"VirtualMachineSnapshot\", Value: \"2-snapshot-9\"},\n\t\t\t\t\t\t\tVm:             types.ManagedObjectReference{Type: \"VirtualMachine\", Value: \"2\"},\n\t\t\t\t\t\t\tName:           \"grandkid\",\n\t\t\t\t\t\t\tDescription:    \"\",\n\t\t\t\t\t\t\tId:             9,\n\t\t\t\t\t\t\tCreateTime:     time.Now(),\n\t\t\t\t\t\t\tState:          \"poweredOn\",\n\t\t\t\t\t\t\tQuiesced:       false,\n\t\t\t\t\t\t\tBackupManifest: \"\",\n\t\t\t\t\t\t\tChildSnapshotList: []types.VirtualMachineSnapshotTree{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tDynamicData:       types.DynamicData{},\n\t\t\t\t\t\t\t\t\tSnapshot:          types.ManagedObjectReference{Type: \"VirtualMachineSnapshot\", Value: \"2-snapshot-10\"},\n\t\t\t\t\t\t\t\t\tVm:                types.ManagedObjectReference{Type: \"VirtualMachine\", Value: \"2\"},\n\t\t\t\t\t\t\t\t\tName:              \"great\",\n\t\t\t\t\t\t\t\t\tDescription:       \"\",\n\t\t\t\t\t\t\t\t\tId:                10,\n\t\t\t\t\t\t\t\t\tCreateTime:        time.Now(),\n\t\t\t\t\t\t\t\t\tState:             \"poweredOn\",\n\t\t\t\t\t\t\t\t\tQuiesced:          false,\n\t\t\t\t\t\t\t\t\tBackupManifest:    \"\",\n\t\t\t\t\t\t\t\t\tChildSnapshotList: nil,\n\t\t\t\t\t\t\t\t\tReplaySupported:   types.NewBool(false),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tReplaySupported: types.NewBool(false),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tReplaySupported: types.NewBool(false),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tDynamicData:    types.DynamicData{},\n\t\t\t\t\tSnapshot:       types.ManagedObjectReference{Type: \"VirtualMachineSnapshot\", Value: \"2-snapshot-5\"},\n\t\t\t\t\tVm:             types.ManagedObjectReference{Type: \"VirtualMachine\", Value: \"2\"},\n\t\t\t\t\tName:           \"voodoo\",\n\t\t\t\t\tDescription:    \"\",\n\t\t\t\t\tId:             5,\n\t\t\t\t\tCreateTime:     time.Now(),\n\t\t\t\t\tState:          \"poweredOn\",\n\t\t\t\t\tQuiesced:       false,\n\t\t\t\t\tBackupManifest: \"\",\n\t\t\t\t\tChildSnapshotList: []types.VirtualMachineSnapshotTree{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tDynamicData:       types.DynamicData{},\n\t\t\t\t\t\t\tSnapshot:          types.ManagedObjectReference{Type: \"VirtualMachineSnapshot\", Value: \"2-snapshot-11\"},\n\t\t\t\t\t\t\tVm:                types.ManagedObjectReference{Type: \"VirtualMachine\", Value: \"2\"},\n\t\t\t\t\t\t\tName:              \"child\",\n\t\t\t\t\t\t\tDescription:       \"\",\n\t\t\t\t\t\t\tId:                11,\n\t\t\t\t\t\t\tCreateTime:        time.Now(),\n\t\t\t\t\t\t\tState:             \"poweredOn\",\n\t\t\t\t\t\t\tQuiesced:          false,\n\t\t\t\t\t\t\tBackupManifest:    \"\",\n\t\t\t\t\t\t\tChildSnapshotList: nil,\n\t\t\t\t\t\t\tReplaySupported:   types.NewBool(false),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tReplaySupported: types.NewBool(false),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tDynamicData:    types.DynamicData{},\n\t\t\t\t\tSnapshot:       types.ManagedObjectReference{Type: \"VirtualMachineSnapshot\", Value: \"2-snapshot-6\"},\n\t\t\t\t\tVm:             types.ManagedObjectReference{Type: \"VirtualMachine\", Value: \"2\"},\n\t\t\t\t\tName:           \"better\",\n\t\t\t\t\tDescription:    \"\",\n\t\t\t\t\tId:             6,\n\t\t\t\t\tCreateTime:     time.Now(),\n\t\t\t\t\tState:          \"poweredOn\",\n\t\t\t\t\tQuiesced:       false,\n\t\t\t\t\tBackupManifest: \"\",\n\t\t\t\t\tChildSnapshotList: []types.VirtualMachineSnapshotTree{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tDynamicData:    types.DynamicData{},\n\t\t\t\t\t\t\tSnapshot:       types.ManagedObjectReference{Type: \"VirtualMachineSnapshot\", Value: \"2-snapshot-7\"},\n\t\t\t\t\t\t\tVm:             types.ManagedObjectReference{Type: \"VirtualMachine\", Value: \"2\"},\n\t\t\t\t\t\t\tName:           \"best\",\n\t\t\t\t\t\t\tDescription:    \"\",\n\t\t\t\t\t\t\tId:             7,\n\t\t\t\t\t\t\tCreateTime:     time.Now(),\n\t\t\t\t\t\t\tState:          \"poweredOn\",\n\t\t\t\t\t\t\tQuiesced:       false,\n\t\t\t\t\t\t\tBackupManifest: \"\",\n\t\t\t\t\t\t\tChildSnapshotList: []types.VirtualMachineSnapshotTree{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tDynamicData:       types.DynamicData{},\n\t\t\t\t\t\t\t\t\tSnapshot:          types.ManagedObjectReference{Type: \"VirtualMachineSnapshot\", Value: \"2-snapshot-8\"},\n\t\t\t\t\t\t\t\t\tVm:                types.ManagedObjectReference{Type: \"VirtualMachine\", Value: \"2\"},\n\t\t\t\t\t\t\t\t\tName:              \"betterer\",\n\t\t\t\t\t\t\t\t\tDescription:       \"\",\n\t\t\t\t\t\t\t\t\tId:                8,\n\t\t\t\t\t\t\t\t\tCreateTime:        time.Now(),\n\t\t\t\t\t\t\t\t\tState:             \"poweredOn\",\n\t\t\t\t\t\t\t\t\tQuiesced:          false,\n\t\t\t\t\t\t\t\t\tBackupManifest:    \"\",\n\t\t\t\t\t\t\t\t\tChildSnapshotList: nil,\n\t\t\t\t\t\t\t\t\tReplaySupported:   types.NewBool(false),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tReplaySupported: types.NewBool(false),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tReplaySupported: types.NewBool(false),\n\t\t\t\t},\n\t\t\t},\n\t\t\tReplaySupported: types.NewBool(false),\n\t\t},\n\t},\n}\n\nfunc TestVirtualMachineSnapshotMap(t *testing.T) {\n\tm := make(snapshotMap)\n\tm.add(\"\", snapshot.RootSnapshotList)\n\n\ttests := []struct {\n\t\tname   string\n\t\texpect int\n\t}{\n\t\t{\"enoent\", 0},\n\t\t{\"root\", 1},\n\t\t{\"child\", 3},\n\t\t{\"root/child\", 2},\n\t\t{\"root/voodoo/child\", 1},\n\t\t{\"2-snapshot-6\", 1},\n\t}\n\n\tfor _, test := range tests {\n\t\ts := m[test.name]\n\n\t\tif len(s) != test.expect {\n\t\t\tt.Errorf(\"%s: %d != %d\", test.name, len(s), test.expect)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/object/vmware_distributed_virtual_switch.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage object\n\ntype VmwareDistributedVirtualSwitch struct {\n\tDistributedVirtualSwitch\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/ovf/cim.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage ovf\n\n/*\nSource: http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.24.0/CIM_VirtualSystemSettingData.xsd\n*/\n\ntype CIMVirtualSystemSettingData struct {\n\tElementName string `xml:\"ElementName\"`\n\tInstanceID  string `xml:\"InstanceID\"`\n\n\tAutomaticRecoveryAction              *uint8   `xml:\"AutomaticRecoveryAction\"`\n\tAutomaticShutdownAction              *uint8   `xml:\"AutomaticShutdownAction\"`\n\tAutomaticStartupAction               *uint8   `xml:\"AutomaticStartupAction\"`\n\tAutomaticStartupActionDelay          *string  `xml:\"AutomaticStartupActionDelay>Interval\"`\n\tAutomaticStartupActionSequenceNumber *uint16  `xml:\"AutomaticStartupActionSequenceNumber\"`\n\tCaption                              *string  `xml:\"Caption\"`\n\tConfigurationDataRoot                *string  `xml:\"ConfigurationDataRoot\"`\n\tConfigurationFile                    *string  `xml:\"ConfigurationFile\"`\n\tConfigurationID                      *string  `xml:\"ConfigurationID\"`\n\tCreationTime                         *string  `xml:\"CreationTime\"`\n\tDescription                          *string  `xml:\"Description\"`\n\tLogDataRoot                          *string  `xml:\"LogDataRoot\"`\n\tNotes                                []string `xml:\"Notes\"`\n\tRecoveryFile                         *string  `xml:\"RecoveryFile\"`\n\tSnapshotDataRoot                     *string  `xml:\"SnapshotDataRoot\"`\n\tSuspendDataRoot                      *string  `xml:\"SuspendDataRoot\"`\n\tSwapFileDataRoot                     *string  `xml:\"SwapFileDataRoot\"`\n\tVirtualSystemIdentifier              *string  `xml:\"VirtualSystemIdentifier\"`\n\tVirtualSystemType                    *string  `xml:\"VirtualSystemType\"`\n}\n\n/*\nSource: http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.24.0/CIM_ResourceAllocationSettingData.xsd\n*/\n\ntype CIMResourceAllocationSettingData struct {\n\tElementName string `xml:\"ElementName\"`\n\tInstanceID  string `xml:\"InstanceID\"`\n\n\tResourceType      *uint16 `xml:\"ResourceType\"`\n\tOtherResourceType *string `xml:\"OtherResourceType\"`\n\tResourceSubType   *string `xml:\"ResourceSubType\"`\n\n\tAddressOnParent       *string  `xml:\"AddressOnParent\"`\n\tAddress               *string  `xml:\"Address\"`\n\tAllocationUnits       *string  `xml:\"AllocationUnits\"`\n\tAutomaticAllocation   *bool    `xml:\"AutomaticAllocation\"`\n\tAutomaticDeallocation *bool    `xml:\"AutomaticDeallocation\"`\n\tCaption               *string  `xml:\"Caption\"`\n\tConnection            []string `xml:\"Connection\"`\n\tConsumerVisibility    *uint16  `xml:\"ConsumerVisibility\"`\n\tDescription           *string  `xml:\"Description\"`\n\tHostResource          []string `xml:\"HostResource\"`\n\tLimit                 *uint64  `xml:\"Limit\"`\n\tMappingBehavior       *uint    `xml:\"MappingBehavior\"`\n\tParent                *string  `xml:\"Parent\"`\n\tPoolID                *string  `xml:\"PoolID\"`\n\tReservation           *uint64  `xml:\"Reservation\"`\n\tVirtualQuantity       *uint    `xml:\"VirtualQuantity\"`\n\tVirtualQuantityUnits  *string  `xml:\"VirtualQuantityUnits\"`\n\tWeight                *uint    `xml:\"Weight\"`\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/ovf/doc.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n/*\nPackage ovf provides functionality to unmarshal and inspect the structure\nof an OVF file. It is not a complete implementation of the specification and\nis intended to be used to import virtual infrastructure into vSphere.\n\nFor a complete specification of the OVF standard, refer to:\nhttps://www.dmtf.org/sites/default/files/standards/documents/DSP0243_2.1.0.pdf\n*/\npackage ovf\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/ovf/env.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage ovf\n\nimport (\n\t\"bytes\"\n\t\"encoding/xml\"\n\t\"fmt\"\n)\n\nconst (\n\tovfEnvHeader = `<Environment\n\t\txmlns=\"http://schemas.dmtf.org/ovf/environment/1\"\n\t\txmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n\t\txmlns:oe=\"http://schemas.dmtf.org/ovf/environment/1\"\n\t\txmlns:ve=\"http://www.vmware.com/schema/ovfenv\"\n\t\toe:id=\"\"\n\t\tve:esxId=\"%s\">`\n\tovfEnvPlatformSection = `<PlatformSection>\n\t\t<Kind>%s</Kind>\n\t\t<Version>%s</Version>\n\t\t<Vendor>%s</Vendor>\n\t\t<Locale>%s</Locale>\n\t\t</PlatformSection>`\n\tovfEnvPropertyHeader = `<PropertySection>`\n\tovfEnvPropertyEntry  = `<Property oe:key=\"%s\" oe:value=\"%s\"/>`\n\tovfEnvPropertyFooter = `</PropertySection>`\n\tovfEnvFooter         = `</Environment>`\n)\n\ntype Env struct {\n\tXMLName xml.Name `xml:\"http://schemas.dmtf.org/ovf/environment/1 Environment\"`\n\tID      string   `xml:\"id,attr\"`\n\tEsxID   string   `xml:\"http://www.vmware.com/schema/ovfenv esxId,attr\"`\n\n\tPlatform *PlatformSection `xml:\"PlatformSection\"`\n\tProperty *PropertySection `xml:\"PropertySection\"`\n}\n\ntype PlatformSection struct {\n\tKind    string `xml:\"Kind\"`\n\tVersion string `xml:\"Version\"`\n\tVendor  string `xml:\"Vendor\"`\n\tLocale  string `xml:\"Locale\"`\n}\n\ntype PropertySection struct {\n\tProperties []EnvProperty `xml:\"Property\"`\n}\n\ntype EnvProperty struct {\n\tKey   string `xml:\"key,attr\"`\n\tValue string `xml:\"value,attr\"`\n}\n\n// Marshal marshals Env to xml by using xml.Marshal.\nfunc (e Env) Marshal() (string, error) {\n\tx, err := xml.Marshal(e)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"%s%s\", xml.Header, x), nil\n}\n\n// MarshalManual manually marshals Env to xml suitable for a vApp guest.\n// It exists to overcome the lack of expressiveness in Go's XML namespaces.\nfunc (e Env) MarshalManual() string {\n\tvar buffer bytes.Buffer\n\n\tbuffer.WriteString(xml.Header)\n\tbuffer.WriteString(fmt.Sprintf(ovfEnvHeader, e.EsxID))\n\tbuffer.WriteString(fmt.Sprintf(ovfEnvPlatformSection, e.Platform.Kind, e.Platform.Version, e.Platform.Vendor, e.Platform.Locale))\n\n\tbuffer.WriteString(fmt.Sprintf(ovfEnvPropertyHeader))\n\tfor _, p := range e.Property.Properties {\n\t\tbuffer.WriteString(fmt.Sprintf(ovfEnvPropertyEntry, p.Key, p.Value))\n\t}\n\tbuffer.WriteString(fmt.Sprintf(ovfEnvPropertyFooter))\n\n\tbuffer.WriteString(fmt.Sprintf(ovfEnvFooter))\n\n\treturn buffer.String()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/ovf/env_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\npackage ovf\n\nimport \"testing\"\n\nfunc testEnv() Env {\n\treturn Env{\n\t\tEsxID: \"vm moref\",\n\t\tPlatform: &PlatformSection{\n\t\t\tKind:    \"VMware vCenter Server\",\n\t\t\tVersion: \"5.5.0\",\n\t\t\tVendor:  \"VMware, Inc.\",\n\t\t\tLocale:  \"US\",\n\t\t},\n\t\tProperty: &PropertySection{\n\t\t\tProperties: []EnvProperty{\n\t\t\t\t{\"foo\", \"bar\"},\n\t\t\t\t{\"ham\", \"eggs\"}}},\n\t}\n}\n\nfunc TestMarshalEnv(t *testing.T) {\n\tenv := testEnv()\n\n\txenv, err := env.Marshal()\n\tif err != nil {\n\t\tt.Fatal(\"Error marshalling environment\")\n\t}\n\tif len(xenv) < 1 {\n\t\tt.Fatal(\"Marshalled document is empty\")\n\t}\n\tt.Log(xenv)\n}\n\nfunc TestMarshalManualEnv(t *testing.T) {\n\tenv := testEnv()\n\n\txenv := env.MarshalManual()\n\tif len(xenv) < 1 {\n\t\tt.Fatal(\"Marshalled document is empty\")\n\t}\n\tt.Log(xenv)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/ovf/envelope.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage ovf\n\ntype Envelope struct {\n\tReferences []File `xml:\"References>File\"`\n\n\t// Package level meta-data\n\tAnnotation         *AnnotationSection         `xml:\"AnnotationSection\"`\n\tProduct            *ProductSection            `xml:\"ProductSection\"`\n\tNetwork            *NetworkSection            `xml:\"NetworkSection\"`\n\tDisk               *DiskSection               `xml:\"DiskSection\"`\n\tOperatingSystem    *OperatingSystemSection    `xml:\"OperatingSystemSection\"`\n\tEula               *EulaSection               `xml:\"EulaSection\"`\n\tVirtualHardware    *VirtualHardwareSection    `xml:\"VirtualHardwareSection\"`\n\tResourceAllocation *ResourceAllocationSection `xml:\"ResourceAllocationSection\"`\n\tDeploymentOption   *DeploymentOptionSection   `xml:\"DeploymentOptionSection\"`\n\n\t// Content: A VirtualSystem or a VirtualSystemCollection\n\tVirtualSystem *VirtualSystem `xml:\"VirtualSystem\"`\n}\n\ntype VirtualSystem struct {\n\tContent\n\n\tAnnotation      []AnnotationSection      `xml:\"AnnotationSection\"`\n\tProduct         []ProductSection         `xml:\"ProductSection\"`\n\tOperatingSystem []OperatingSystemSection `xml:\"OperatingSystemSection\"`\n\tEula            []EulaSection            `xml:\"EulaSection\"`\n\tVirtualHardware []VirtualHardwareSection `xml:\"VirtualHardwareSection\"`\n}\n\ntype File struct {\n\tID          string  `xml:\"id,attr\"`\n\tHref        string  `xml:\"href,attr\"`\n\tSize        uint    `xml:\"size,attr\"`\n\tCompression *string `xml:\"compression,attr\"`\n\tChunkSize   *int    `xml:\"chunkSize,attr\"`\n}\n\ntype Content struct {\n\tID   string  `xml:\"id,attr\"`\n\tInfo string  `xml:\"Info\"`\n\tName *string `xml:\"Name\"`\n}\n\ntype Section struct {\n\tRequired *bool  `xml:\"required,attr\"`\n\tInfo     string `xml:\"Info\"`\n}\n\ntype AnnotationSection struct {\n\tSection\n\n\tAnnotation string `xml:\"Annotation\"`\n}\n\ntype ProductSection struct {\n\tSection\n\n\tClass    *string `xml:\"class,attr\"`\n\tInstance *string `xml:\"instance,attr\"`\n\n\tProduct     string     `xml:\"Product\"`\n\tVendor      string     `xml:\"Vendor\"`\n\tVersion     string     `xml:\"Version\"`\n\tFullVersion string     `xml:\"FullVersion\"`\n\tProductURL  string     `xml:\"ProductUrl\"`\n\tVendorURL   string     `xml:\"VendorUrl\"`\n\tAppURL      string     `xml:\"AppUrl\"`\n\tProperty    []Property `xml:\"Property\"`\n}\n\ntype Property struct {\n\tKey              string  `xml:\"key,attr\"`\n\tType             string  `xml:\"type,attr\"`\n\tQualifiers       *string `xml:\"qualifiers,attr\"`\n\tUserConfigurable *bool   `xml:\"userConfigurable,attr\"`\n\tDefault          *string `xml:\"value,attr\"`\n\tPassword         *bool   `xml:\"password,attr\"`\n\n\tLabel       *string `xml:\"Label\"`\n\tDescription *string `xml:\"Description\"`\n\n\tValues []PropertyConfigurationValue `xml:\"Value\"`\n}\n\ntype PropertyConfigurationValue struct {\n\tValue         string  `xml:\"value,attr\"`\n\tConfiguration *string `xml:\"configuration,attr\"`\n}\n\ntype NetworkSection struct {\n\tSection\n\n\tNetworks []Network `xml:\"Network\"`\n}\n\ntype Network struct {\n\tName string `xml:\"name,attr\"`\n\n\tDescription string `xml:\"Description\"`\n}\n\ntype DiskSection struct {\n\tSection\n\n\tDisks []VirtualDiskDesc `xml:\"Disk\"`\n}\n\ntype VirtualDiskDesc struct {\n\tDiskID                  string  `xml:\"diskId,attr\"`\n\tFileRef                 *string `xml:\"fileRef,attr\"`\n\tCapacity                string  `xml:\"capacity,attr\"`\n\tCapacityAllocationUnits *string `xml:\"capacityAllocationUnits,attr\"`\n\tFormat                  *string `xml:\"format,attr\"`\n\tPopulatedSize           *int    `xml:\"populatedSize,attr\"`\n\tParentRef               *string `xml:\"parentRef,attr\"`\n}\n\ntype OperatingSystemSection struct {\n\tSection\n\n\tID      uint16  `xml:\"id,attr\"`\n\tVersion *string `xml:\"version,attr\"`\n\tOSType  *string `xml:\"osType,attr\"`\n\n\tDescription *string `xml:\"Description\"`\n}\n\ntype EulaSection struct {\n\tSection\n\n\tLicense string `xml:\"License\"`\n}\n\ntype VirtualHardwareSection struct {\n\tSection\n\n\tID        *string `xml:\"id,attr\"`\n\tTransport *string `xml:\"transport,attr\"`\n\n\tSystem *VirtualSystemSettingData       `xml:\"System\"`\n\tItem   []ResourceAllocationSettingData `xml:\"Item\"`\n}\n\ntype VirtualSystemSettingData struct {\n\tCIMVirtualSystemSettingData\n}\n\ntype ResourceAllocationSettingData struct {\n\tCIMResourceAllocationSettingData\n\n\tRequired      *bool   `xml:\"required,attr\"`\n\tConfiguration *string `xml:\"configuration,attr\"`\n\tBound         *string `xml:\"bound,attr\"`\n}\n\ntype ResourceAllocationSection struct {\n\tSection\n\n\tItem []ResourceAllocationSettingData `xml:\"Item\"`\n}\n\ntype DeploymentOptionSection struct {\n\tSection\n\n\tConfiguration []DeploymentOptionConfiguration `xml:\"Configuration\"`\n}\n\ntype DeploymentOptionConfiguration struct {\n\tID      string `xml:\"id,attr\"`\n\tDefault *bool  `xml:\"default,attr\"`\n\n\tLabel       string `xml:\"Label\"`\n\tDescription string `xml:\"Description\"`\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/ovf/ovf.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage ovf\n\nimport (\n\t\"encoding/xml\"\n\t\"io\"\n)\n\nfunc Unmarshal(r io.Reader) (*Envelope, error) {\n\tvar e Envelope\n\n\tdec := xml.NewDecoder(r)\n\terr := dec.Decode(&e)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &e, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/ovf/ovf_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage ovf\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\t\"text/tabwriter\"\n)\n\nfunc testFile(t *testing.T) *os.File {\n\tn := os.Getenv(\"OVF_TEST_FILE\")\n\tif n == \"\" {\n\t\tt.Skip(\"Please specify OVF_TEST_FILE\")\n\t}\n\n\tf, err := os.Open(n)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treturn f\n}\n\nfunc testEnvelope(t *testing.T) *Envelope {\n\tf := testFile(t)\n\tdefer f.Close()\n\n\te, err := Unmarshal(f)\n\tif err != nil {\n\t\tt.Fatal(f)\n\t}\n\n\tif e == nil {\n\t\tt.Fatal(\"Empty envelope\")\n\t}\n\n\treturn e\n}\n\nfunc TestUnmarshal(t *testing.T) {\n\ttestEnvelope(t)\n}\n\nfunc TestDeploymentOptions(t *testing.T) {\n\te := testEnvelope(t)\n\n\tif e.DeploymentOption == nil {\n\t\tt.Fatal(\"Missing DeploymentOptionSection\")\n\t}\n\n\tvar b bytes.Buffer\n\ttw := tabwriter.NewWriter(&b, 2, 0, 2, ' ', 0)\n\tfmt.Fprintf(tw, \"\\n\")\n\tfor _, c := range e.DeploymentOption.Configuration {\n\t\tfmt.Fprintf(tw, \"id=%s\\t\", c.ID)\n\t\tfmt.Fprintf(tw, \"label=%s\\t\", c.Label)\n\n\t\td := false\n\t\tif c.Default != nil {\n\t\t\td = *c.Default\n\t\t}\n\n\t\tfmt.Fprintf(tw, \"default=%t\\t\", d)\n\t\tfmt.Fprintf(tw, \"\\n\")\n\t}\n\ttw.Flush()\n\tt.Log(b.String())\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/pbm/client.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage pbm\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/pbm/methods\"\n\t\"github.com/vmware/govmomi/pbm/types\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\tvim \"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype Client struct {\n\t*soap.Client\n\n\tServiceContent types.PbmServiceInstanceContent\n}\n\nfunc NewClient(ctx context.Context, c *vim25.Client) (*Client, error) {\n\tsc := c.Client.NewServiceClient(\"/pbm/sdk\", \"urn:pbm\")\n\n\treq := types.PbmRetrieveServiceContent{\n\t\tThis: vim.ManagedObjectReference{\n\t\t\tType:  \"PbmServiceInstance\",\n\t\t\tValue: \"ServiceInstance\",\n\t\t},\n\t}\n\n\tres, err := methods.PbmRetrieveServiceContent(ctx, sc, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Client{sc, res.Returnval}, nil\n}\n\nfunc (c *Client) QueryProfile(ctx context.Context, rtype types.PbmProfileResourceType, category string) ([]types.PbmProfileId, error) {\n\treq := types.PbmQueryProfile{\n\t\tThis:            c.ServiceContent.ProfileManager,\n\t\tResourceType:    rtype,\n\t\tProfileCategory: category,\n\t}\n\n\tres, err := methods.PbmQueryProfile(ctx, c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (c *Client) RetrieveContent(ctx context.Context, ids []types.PbmProfileId) ([]types.BasePbmProfile, error) {\n\treq := types.PbmRetrieveContent{\n\t\tThis:       c.ServiceContent.ProfileManager,\n\t\tProfileIds: ids,\n\t}\n\n\tres, err := methods.PbmRetrieveContent(ctx, c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\ntype PlacementCompatibilityResult []types.PbmPlacementCompatibilityResult\n\nfunc (c *Client) CheckRequirements(ctx context.Context, hubs []types.PbmPlacementHub, ref *types.PbmServerObjectRef, preq []types.BasePbmPlacementRequirement) (PlacementCompatibilityResult, error) {\n\treq := types.PbmCheckRequirements{\n\t\tThis:                        c.ServiceContent.PlacementSolver,\n\t\tHubsToSearch:                hubs,\n\t\tPlacementSubjectRef:         ref,\n\t\tPlacementSubjectRequirement: preq,\n\t}\n\n\tres, err := methods.PbmCheckRequirements(ctx, c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (l PlacementCompatibilityResult) CompatibleDatastores() []types.PbmPlacementHub {\n\tvar compatibleDatastores []types.PbmPlacementHub\n\n\tfor _, res := range l {\n\t\tif len(res.Error) == 0 {\n\t\t\tcompatibleDatastores = append(compatibleDatastores, res.Hub)\n\t\t}\n\t}\n\treturn compatibleDatastores\n}\n\nfunc (l PlacementCompatibilityResult) NonCompatibleDatastores() []types.PbmPlacementHub {\n\tvar nonCompatibleDatastores []types.PbmPlacementHub\n\n\tfor _, res := range l {\n\t\tif len(res.Error) > 0 {\n\t\t\tnonCompatibleDatastores = append(nonCompatibleDatastores, res.Hub)\n\t\t}\n\t}\n\treturn nonCompatibleDatastores\n}\n\nfunc (c *Client) CreateProfile(ctx context.Context, capabilityProfileCreateSpec types.PbmCapabilityProfileCreateSpec) (*types.PbmProfileId, error) {\n\treq := types.PbmCreate{\n\t\tThis:       c.ServiceContent.ProfileManager,\n\t\tCreateSpec: capabilityProfileCreateSpec,\n\t}\n\n\tres, err := methods.PbmCreate(ctx, c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n\nfunc (c *Client) UpdateProfile(ctx context.Context, id types.PbmProfileId, updateSpec types.PbmCapabilityProfileUpdateSpec) error {\n\treq := types.PbmUpdate{\n\t\tThis:       c.ServiceContent.ProfileManager,\n\t\tProfileId:  id,\n\t\tUpdateSpec: updateSpec,\n\t}\n\n\t_, err := methods.PbmUpdate(ctx, c, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *Client) DeleteProfile(ctx context.Context, ids []types.PbmProfileId) ([]types.PbmProfileOperationOutcome, error) {\n\treq := types.PbmDelete{\n\t\tThis:      c.ServiceContent.ProfileManager,\n\t\tProfileId: ids,\n\t}\n\n\tres, err := methods.PbmDelete(ctx, c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (c *Client) QueryAssociatedEntity(ctx context.Context, id types.PbmProfileId, entityType string) ([]types.PbmServerObjectRef, error) {\n\treq := types.PbmQueryAssociatedEntity{\n\t\tThis:       c.ServiceContent.ProfileManager,\n\t\tProfile:    id,\n\t\tEntityType: entityType,\n\t}\n\n\tres, err := methods.PbmQueryAssociatedEntity(ctx, c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (c *Client) QueryAssociatedEntities(ctx context.Context, ids []types.PbmProfileId) ([]types.PbmQueryProfileResult, error) {\n\treq := types.PbmQueryAssociatedEntities{\n\t\tThis:     c.ServiceContent.ProfileManager,\n\t\tProfiles: ids,\n\t}\n\n\tres, err := methods.PbmQueryAssociatedEntities(ctx, c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (c *Client) ProfileIDByName(ctx context.Context, profileName string) (string, error) {\n\tresourceType := types.PbmProfileResourceType{\n\t\tResourceType: string(types.PbmProfileResourceTypeEnumSTORAGE),\n\t}\n\tcategory := types.PbmProfileCategoryEnumREQUIREMENT\n\tids, err := c.QueryProfile(ctx, resourceType, string(category))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tprofiles, err := c.RetrieveContent(ctx, ids)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor i := range profiles {\n\t\tprofile := profiles[i].GetPbmProfile()\n\t\tif profile.Name == profileName {\n\t\t\treturn profile.ProfileId.UniqueId, nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"no pbm profile found with name: %q\", profileName)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/pbm/client_test.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage pbm\n\nimport (\n\t\"context\"\n\t\"os\"\n\t\"reflect\"\n\t\"sort\"\n\t\"testing\"\n\n\t\"github.com/vmware/govmomi\"\n\t\"github.com/vmware/govmomi/pbm/types\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/view\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\tvim \"github.com/vmware/govmomi/vim25/types\"\n)\n\nfunc TestClient(t *testing.T) {\n\turl := os.Getenv(\"GOVMOMI_PBM_URL\")\n\tif url == \"\" {\n\t\tt.SkipNow()\n\t}\n\n\tclusterName := os.Getenv(\"GOVMOMI_PBM_CLUSTER\")\n\n\tu, err := soap.ParseURL(url)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tctx := context.Background()\n\n\tc, err := govmomi.NewClient(ctx, u, true)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpc, err := NewClient(ctx, c.Client)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tt.Logf(\"PBM version=%s\", pc.ServiceContent.AboutInfo.Version)\n\n\trtype := types.PbmProfileResourceType{\n\t\tResourceType: string(types.PbmProfileResourceTypeEnumSTORAGE),\n\t}\n\n\tcategory := types.PbmProfileCategoryEnumREQUIREMENT\n\n\t// 1. Query all the profiles on the vCenter.\n\tids, err := pc.QueryProfile(ctx, rtype, string(category))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar qids []string\n\n\tfor _, id := range ids {\n\t\tqids = append(qids, id.UniqueId)\n\t}\n\n\tvar cids []string\n\n\t// 2. Retrieve the content of all profiles.\n\tpolicies, err := pc.RetrieveContent(ctx, ids)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor i := range policies {\n\t\tprofile := policies[i].GetPbmProfile()\n\t\tcids = append(cids, profile.ProfileId.UniqueId)\n\t}\n\n\tsort.Strings(qids)\n\tsort.Strings(cids)\n\n\t// Check whether ids retreived from QueryProfile and RetrieveContent are identical.\n\tif !reflect.DeepEqual(qids, cids) {\n\t\tt.Error(\"ids mismatch\")\n\t}\n\n\t// 3. Get list of datastores in a cluster if cluster name is specified.\n\troot := c.ServiceContent.RootFolder\n\tvar datastores []vim.ManagedObjectReference\n\tvar kind []string\n\n\tif clusterName == \"\" {\n\t\tkind = []string{\"Datastore\"}\n\t} else {\n\t\tkind = []string{\"ClusterComputeResource\"}\n\t}\n\n\tm := view.NewManager(c.Client)\n\n\tv, err := m.CreateContainerView(ctx, root, kind, true)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif clusterName == \"\" {\n\t\tdatastores, err = v.Find(ctx, kind, nil)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t} else {\n\t\tvar cluster mo.ClusterComputeResource\n\n\t\terr = v.RetrieveWithFilter(ctx, kind, []string{\"datastore\"}, &cluster, property.Filter{\"name\": clusterName})\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tdatastores = cluster.Datastore\n\t}\n\n\t_ = v.Destroy(ctx)\n\n\tt.Logf(\"checking %d datatores for compatibility results\", len(datastores))\n\n\tvar hubs []types.PbmPlacementHub\n\n\tfor _, ds := range datastores {\n\t\thubs = append(hubs, types.PbmPlacementHub{\n\t\t\tHubType: ds.Type,\n\t\t\tHubId:   ds.Value,\n\t\t})\n\t}\n\n\tvar req []types.BasePbmPlacementRequirement\n\n\tfor _, id := range ids {\n\t\treq = append(req, &types.PbmPlacementCapabilityProfileRequirement{\n\t\t\tProfileId: id,\n\t\t})\n\t}\n\n\t// 4. Get the compatibility results for all the profiles on the vCenter.\n\tres, err := pc.CheckRequirements(ctx, hubs, nil, req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tt.Logf(\"CheckRequirements results: %d\", len(res))\n\n\t// user spec for the profile.\n\t// VSAN profile with 2 capability instances - hostFailuresToTolerate = 2, stripeWidth = 1\n\tpbmCreateSpecForVSAN := CapabilityProfileCreateSpec{\n\t\tName:        \"Kubernetes-VSAN-TestPolicy\",\n\t\tDescription: \"VSAN Test policy create\",\n\t\tCategory:    string(types.PbmProfileCategoryEnumREQUIREMENT),\n\t\tCapabilityList: []Capability{\n\t\t\tCapability{\n\t\t\t\tID:        \"hostFailuresToTolerate\",\n\t\t\t\tNamespace: \"VSAN\",\n\t\t\t\tPropertyList: []Property{\n\t\t\t\t\tProperty{\n\t\t\t\t\t\tID:       \"hostFailuresToTolerate\",\n\t\t\t\t\t\tValue:    \"2\",\n\t\t\t\t\t\tDataType: \"int\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tCapability{\n\t\t\t\tID:        \"stripeWidth\",\n\t\t\t\tNamespace: \"VSAN\",\n\t\t\t\tPropertyList: []Property{\n\t\t\t\t\tProperty{\n\t\t\t\t\t\tID:       \"stripeWidth\",\n\t\t\t\t\t\tValue:    \"1\",\n\t\t\t\t\t\tDataType: \"int\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Create PBM capability spec for the above defined user spec.\n\tcreateSpecVSAN, err := CreateCapabilityProfileSpec(pbmCreateSpecForVSAN)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// 5. Create SPBM VSAN profile.\n\tvsanProfileID, err := pc.CreateProfile(ctx, *createSpecVSAN)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tt.Logf(\"VSAN Profile: %q successfully created\", vsanProfileID.UniqueId)\n\n\t// 6. Verify if profile created exists by issuing a RetrieveContent request.\n\t_, err = pc.RetrieveContent(ctx, []types.PbmProfileId{*vsanProfileID})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tt.Logf(\"Profile: %q exists on vCenter\", vsanProfileID.UniqueId)\n\n\t// 7. Get compatible datastores for the VSAN profile.\n\tcompatibleDatastores := res.CompatibleDatastores()\n\tt.Logf(\"Found %d compatible-datastores for profile: %q\", len(compatibleDatastores), vsanProfileID.UniqueId)\n\n\t// 8. Get non-compatible datastores for the VSAN profile.\n\tnonCompatibleDatastores := res.NonCompatibleDatastores()\n\tt.Logf(\"Found %d non-compatible datastores for profile: %q\", len(nonCompatibleDatastores), vsanProfileID.UniqueId)\n\n\t// Check whether count of compatible and non-compatible datastores match the total number of datastores.\n\tif (len(nonCompatibleDatastores) + len(compatibleDatastores)) != len(datastores) {\n\t\tt.Error(\"datastore count mismatch\")\n\t}\n\n\t// user spec for the profile.\n\t// VSAN profile with 2 capability instances - stripeWidth = 1 and an SIOC profile.\n\tpbmCreateSpecVSANandSIOC := CapabilityProfileCreateSpec{\n\t\tName:        \"Kubernetes-VSAN-SIOC-TestPolicy\",\n\t\tDescription: \"VSAN-SIOC-Test policy create\",\n\t\tCategory:    string(types.PbmProfileCategoryEnumREQUIREMENT),\n\t\tCapabilityList: []Capability{\n\t\t\tCapability{\n\t\t\t\tID:        \"stripeWidth\",\n\t\t\t\tNamespace: \"VSAN\",\n\t\t\t\tPropertyList: []Property{\n\t\t\t\t\tProperty{\n\t\t\t\t\t\tID:       \"stripeWidth\",\n\t\t\t\t\t\tValue:    \"1\",\n\t\t\t\t\t\tDataType: \"int\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tCapability{\n\t\t\t\tID:        \"spm@DATASTOREIOCONTROL\",\n\t\t\t\tNamespace: \"spm\",\n\t\t\t\tPropertyList: []Property{\n\t\t\t\t\tProperty{\n\t\t\t\t\t\tID:       \"limit\",\n\t\t\t\t\t\tValue:    \"200\",\n\t\t\t\t\t\tDataType: \"int\",\n\t\t\t\t\t},\n\t\t\t\t\tProperty{\n\t\t\t\t\t\tID:       \"reservation\",\n\t\t\t\t\t\tValue:    \"1000\",\n\t\t\t\t\t\tDataType: \"int\",\n\t\t\t\t\t},\n\t\t\t\t\tProperty{\n\t\t\t\t\t\tID:       \"shares\",\n\t\t\t\t\t\tValue:    \"2000\",\n\t\t\t\t\t\tDataType: \"int\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Create PBM capability spec for the above defined user spec.\n\tcreateSpecVSANandSIOC, err := CreateCapabilityProfileSpec(pbmCreateSpecVSANandSIOC)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// 9. Create SPBM VSAN profile.\n\tvsansiocProfileID, err := pc.CreateProfile(ctx, *createSpecVSANandSIOC)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tt.Logf(\"VSAN-SIOC Profile: %q successfully created\", vsansiocProfileID.UniqueId)\n\n\t// 9. Get ProfileID by Name\n\tprofileID, err := pc.ProfileIDByName(ctx, \"Kubernetes-VSAN-SIOC-TestPolicy\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif vsansiocProfileID.UniqueId != profileID {\n\t\tt.Errorf(\"vsan-sioc profile: %q and retrieved profileID: %q successfully matched\", vsansiocProfileID.UniqueId, profileID)\n\t}\n\tt.Logf(\"VSAN-SIOC profile: %q and retrieved profileID: %q successfully matched\", vsansiocProfileID.UniqueId, profileID)\n\n\t// 10. Delete VSAN and VSAN-SIOC profile.\n\t_, err = pc.DeleteProfile(ctx, []types.PbmProfileId{*vsanProfileID, *vsansiocProfileID})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tt.Logf(\"Profile: %+v successfully deleted\", []types.PbmProfileId{*vsanProfileID, *vsansiocProfileID})\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/pbm/methods/methods.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage methods\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/pbm/types\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n)\n\ntype PbmAssignDefaultRequirementProfileBody struct {\n\tReq    *types.PbmAssignDefaultRequirementProfile         `xml:\"urn:pbm PbmAssignDefaultRequirementProfile,omitempty\"`\n\tRes    *types.PbmAssignDefaultRequirementProfileResponse `xml:\"urn:pbm PbmAssignDefaultRequirementProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmAssignDefaultRequirementProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmAssignDefaultRequirementProfile(ctx context.Context, r soap.RoundTripper, req *types.PbmAssignDefaultRequirementProfile) (*types.PbmAssignDefaultRequirementProfileResponse, error) {\n\tvar reqBody, resBody PbmAssignDefaultRequirementProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmCheckCompatibilityBody struct {\n\tReq    *types.PbmCheckCompatibility         `xml:\"urn:pbm PbmCheckCompatibility,omitempty\"`\n\tRes    *types.PbmCheckCompatibilityResponse `xml:\"urn:pbm PbmCheckCompatibilityResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmCheckCompatibilityBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmCheckCompatibility(ctx context.Context, r soap.RoundTripper, req *types.PbmCheckCompatibility) (*types.PbmCheckCompatibilityResponse, error) {\n\tvar reqBody, resBody PbmCheckCompatibilityBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmCheckCompatibilityWithSpecBody struct {\n\tReq    *types.PbmCheckCompatibilityWithSpec         `xml:\"urn:pbm PbmCheckCompatibilityWithSpec,omitempty\"`\n\tRes    *types.PbmCheckCompatibilityWithSpecResponse `xml:\"urn:pbm PbmCheckCompatibilityWithSpecResponse,omitempty\"`\n\tFault_ *soap.Fault                                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmCheckCompatibilityWithSpecBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmCheckCompatibilityWithSpec(ctx context.Context, r soap.RoundTripper, req *types.PbmCheckCompatibilityWithSpec) (*types.PbmCheckCompatibilityWithSpecResponse, error) {\n\tvar reqBody, resBody PbmCheckCompatibilityWithSpecBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmCheckComplianceBody struct {\n\tReq    *types.PbmCheckCompliance         `xml:\"urn:pbm PbmCheckCompliance,omitempty\"`\n\tRes    *types.PbmCheckComplianceResponse `xml:\"urn:pbm PbmCheckComplianceResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmCheckComplianceBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmCheckCompliance(ctx context.Context, r soap.RoundTripper, req *types.PbmCheckCompliance) (*types.PbmCheckComplianceResponse, error) {\n\tvar reqBody, resBody PbmCheckComplianceBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmCheckRequirementsBody struct {\n\tReq    *types.PbmCheckRequirements         `xml:\"urn:pbm PbmCheckRequirements,omitempty\"`\n\tRes    *types.PbmCheckRequirementsResponse `xml:\"urn:pbm PbmCheckRequirementsResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmCheckRequirementsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmCheckRequirements(ctx context.Context, r soap.RoundTripper, req *types.PbmCheckRequirements) (*types.PbmCheckRequirementsResponse, error) {\n\tvar reqBody, resBody PbmCheckRequirementsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmCheckRollupComplianceBody struct {\n\tReq    *types.PbmCheckRollupCompliance         `xml:\"urn:pbm PbmCheckRollupCompliance,omitempty\"`\n\tRes    *types.PbmCheckRollupComplianceResponse `xml:\"urn:pbm PbmCheckRollupComplianceResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmCheckRollupComplianceBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmCheckRollupCompliance(ctx context.Context, r soap.RoundTripper, req *types.PbmCheckRollupCompliance) (*types.PbmCheckRollupComplianceResponse, error) {\n\tvar reqBody, resBody PbmCheckRollupComplianceBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmCreateBody struct {\n\tReq    *types.PbmCreate         `xml:\"urn:pbm PbmCreate,omitempty\"`\n\tRes    *types.PbmCreateResponse `xml:\"urn:pbm PbmCreateResponse,omitempty\"`\n\tFault_ *soap.Fault              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmCreateBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmCreate(ctx context.Context, r soap.RoundTripper, req *types.PbmCreate) (*types.PbmCreateResponse, error) {\n\tvar reqBody, resBody PbmCreateBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmDeleteBody struct {\n\tReq    *types.PbmDelete         `xml:\"urn:pbm PbmDelete,omitempty\"`\n\tRes    *types.PbmDeleteResponse `xml:\"urn:pbm PbmDeleteResponse,omitempty\"`\n\tFault_ *soap.Fault              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmDeleteBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmDelete(ctx context.Context, r soap.RoundTripper, req *types.PbmDelete) (*types.PbmDeleteResponse, error) {\n\tvar reqBody, resBody PbmDeleteBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmFetchCapabilityMetadataBody struct {\n\tReq    *types.PbmFetchCapabilityMetadata         `xml:\"urn:pbm PbmFetchCapabilityMetadata,omitempty\"`\n\tRes    *types.PbmFetchCapabilityMetadataResponse `xml:\"urn:pbm PbmFetchCapabilityMetadataResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmFetchCapabilityMetadataBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmFetchCapabilityMetadata(ctx context.Context, r soap.RoundTripper, req *types.PbmFetchCapabilityMetadata) (*types.PbmFetchCapabilityMetadataResponse, error) {\n\tvar reqBody, resBody PbmFetchCapabilityMetadataBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmFetchCapabilitySchemaBody struct {\n\tReq    *types.PbmFetchCapabilitySchema         `xml:\"urn:pbm PbmFetchCapabilitySchema,omitempty\"`\n\tRes    *types.PbmFetchCapabilitySchemaResponse `xml:\"urn:pbm PbmFetchCapabilitySchemaResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmFetchCapabilitySchemaBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmFetchCapabilitySchema(ctx context.Context, r soap.RoundTripper, req *types.PbmFetchCapabilitySchema) (*types.PbmFetchCapabilitySchemaResponse, error) {\n\tvar reqBody, resBody PbmFetchCapabilitySchemaBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmFetchComplianceResultBody struct {\n\tReq    *types.PbmFetchComplianceResult         `xml:\"urn:pbm PbmFetchComplianceResult,omitempty\"`\n\tRes    *types.PbmFetchComplianceResultResponse `xml:\"urn:pbm PbmFetchComplianceResultResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmFetchComplianceResultBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmFetchComplianceResult(ctx context.Context, r soap.RoundTripper, req *types.PbmFetchComplianceResult) (*types.PbmFetchComplianceResultResponse, error) {\n\tvar reqBody, resBody PbmFetchComplianceResultBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmFetchResourceTypeBody struct {\n\tReq    *types.PbmFetchResourceType         `xml:\"urn:pbm PbmFetchResourceType,omitempty\"`\n\tRes    *types.PbmFetchResourceTypeResponse `xml:\"urn:pbm PbmFetchResourceTypeResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmFetchResourceTypeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmFetchResourceType(ctx context.Context, r soap.RoundTripper, req *types.PbmFetchResourceType) (*types.PbmFetchResourceTypeResponse, error) {\n\tvar reqBody, resBody PbmFetchResourceTypeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmFetchRollupComplianceResultBody struct {\n\tReq    *types.PbmFetchRollupComplianceResult         `xml:\"urn:pbm PbmFetchRollupComplianceResult,omitempty\"`\n\tRes    *types.PbmFetchRollupComplianceResultResponse `xml:\"urn:pbm PbmFetchRollupComplianceResultResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmFetchRollupComplianceResultBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmFetchRollupComplianceResult(ctx context.Context, r soap.RoundTripper, req *types.PbmFetchRollupComplianceResult) (*types.PbmFetchRollupComplianceResultResponse, error) {\n\tvar reqBody, resBody PbmFetchRollupComplianceResultBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmFetchVendorInfoBody struct {\n\tReq    *types.PbmFetchVendorInfo         `xml:\"urn:pbm PbmFetchVendorInfo,omitempty\"`\n\tRes    *types.PbmFetchVendorInfoResponse `xml:\"urn:pbm PbmFetchVendorInfoResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmFetchVendorInfoBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmFetchVendorInfo(ctx context.Context, r soap.RoundTripper, req *types.PbmFetchVendorInfo) (*types.PbmFetchVendorInfoResponse, error) {\n\tvar reqBody, resBody PbmFetchVendorInfoBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmFindApplicableDefaultProfileBody struct {\n\tReq    *types.PbmFindApplicableDefaultProfile         `xml:\"urn:pbm PbmFindApplicableDefaultProfile,omitempty\"`\n\tRes    *types.PbmFindApplicableDefaultProfileResponse `xml:\"urn:pbm PbmFindApplicableDefaultProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmFindApplicableDefaultProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmFindApplicableDefaultProfile(ctx context.Context, r soap.RoundTripper, req *types.PbmFindApplicableDefaultProfile) (*types.PbmFindApplicableDefaultProfileResponse, error) {\n\tvar reqBody, resBody PbmFindApplicableDefaultProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmQueryAssociatedEntitiesBody struct {\n\tReq    *types.PbmQueryAssociatedEntities         `xml:\"urn:pbm PbmQueryAssociatedEntities,omitempty\"`\n\tRes    *types.PbmQueryAssociatedEntitiesResponse `xml:\"urn:pbm PbmQueryAssociatedEntitiesResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmQueryAssociatedEntitiesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmQueryAssociatedEntities(ctx context.Context, r soap.RoundTripper, req *types.PbmQueryAssociatedEntities) (*types.PbmQueryAssociatedEntitiesResponse, error) {\n\tvar reqBody, resBody PbmQueryAssociatedEntitiesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmQueryAssociatedEntityBody struct {\n\tReq    *types.PbmQueryAssociatedEntity         `xml:\"urn:pbm PbmQueryAssociatedEntity,omitempty\"`\n\tRes    *types.PbmQueryAssociatedEntityResponse `xml:\"urn:pbm PbmQueryAssociatedEntityResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmQueryAssociatedEntityBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmQueryAssociatedEntity(ctx context.Context, r soap.RoundTripper, req *types.PbmQueryAssociatedEntity) (*types.PbmQueryAssociatedEntityResponse, error) {\n\tvar reqBody, resBody PbmQueryAssociatedEntityBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmQueryAssociatedProfileBody struct {\n\tReq    *types.PbmQueryAssociatedProfile         `xml:\"urn:pbm PbmQueryAssociatedProfile,omitempty\"`\n\tRes    *types.PbmQueryAssociatedProfileResponse `xml:\"urn:pbm PbmQueryAssociatedProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmQueryAssociatedProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmQueryAssociatedProfile(ctx context.Context, r soap.RoundTripper, req *types.PbmQueryAssociatedProfile) (*types.PbmQueryAssociatedProfileResponse, error) {\n\tvar reqBody, resBody PbmQueryAssociatedProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmQueryAssociatedProfilesBody struct {\n\tReq    *types.PbmQueryAssociatedProfiles         `xml:\"urn:pbm PbmQueryAssociatedProfiles,omitempty\"`\n\tRes    *types.PbmQueryAssociatedProfilesResponse `xml:\"urn:pbm PbmQueryAssociatedProfilesResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmQueryAssociatedProfilesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmQueryAssociatedProfiles(ctx context.Context, r soap.RoundTripper, req *types.PbmQueryAssociatedProfiles) (*types.PbmQueryAssociatedProfilesResponse, error) {\n\tvar reqBody, resBody PbmQueryAssociatedProfilesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmQueryByRollupComplianceStatusBody struct {\n\tReq    *types.PbmQueryByRollupComplianceStatus         `xml:\"urn:pbm PbmQueryByRollupComplianceStatus,omitempty\"`\n\tRes    *types.PbmQueryByRollupComplianceStatusResponse `xml:\"urn:pbm PbmQueryByRollupComplianceStatusResponse,omitempty\"`\n\tFault_ *soap.Fault                                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmQueryByRollupComplianceStatusBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmQueryByRollupComplianceStatus(ctx context.Context, r soap.RoundTripper, req *types.PbmQueryByRollupComplianceStatus) (*types.PbmQueryByRollupComplianceStatusResponse, error) {\n\tvar reqBody, resBody PbmQueryByRollupComplianceStatusBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmQueryDefaultRequirementProfileBody struct {\n\tReq    *types.PbmQueryDefaultRequirementProfile         `xml:\"urn:pbm PbmQueryDefaultRequirementProfile,omitempty\"`\n\tRes    *types.PbmQueryDefaultRequirementProfileResponse `xml:\"urn:pbm PbmQueryDefaultRequirementProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmQueryDefaultRequirementProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmQueryDefaultRequirementProfile(ctx context.Context, r soap.RoundTripper, req *types.PbmQueryDefaultRequirementProfile) (*types.PbmQueryDefaultRequirementProfileResponse, error) {\n\tvar reqBody, resBody PbmQueryDefaultRequirementProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmQueryDefaultRequirementProfilesBody struct {\n\tReq    *types.PbmQueryDefaultRequirementProfiles         `xml:\"urn:pbm PbmQueryDefaultRequirementProfiles,omitempty\"`\n\tRes    *types.PbmQueryDefaultRequirementProfilesResponse `xml:\"urn:pbm PbmQueryDefaultRequirementProfilesResponse,omitempty\"`\n\tFault_ *soap.Fault                                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmQueryDefaultRequirementProfilesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmQueryDefaultRequirementProfiles(ctx context.Context, r soap.RoundTripper, req *types.PbmQueryDefaultRequirementProfiles) (*types.PbmQueryDefaultRequirementProfilesResponse, error) {\n\tvar reqBody, resBody PbmQueryDefaultRequirementProfilesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmQueryMatchingHubBody struct {\n\tReq    *types.PbmQueryMatchingHub         `xml:\"urn:pbm PbmQueryMatchingHub,omitempty\"`\n\tRes    *types.PbmQueryMatchingHubResponse `xml:\"urn:pbm PbmQueryMatchingHubResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmQueryMatchingHubBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmQueryMatchingHub(ctx context.Context, r soap.RoundTripper, req *types.PbmQueryMatchingHub) (*types.PbmQueryMatchingHubResponse, error) {\n\tvar reqBody, resBody PbmQueryMatchingHubBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmQueryMatchingHubWithSpecBody struct {\n\tReq    *types.PbmQueryMatchingHubWithSpec         `xml:\"urn:pbm PbmQueryMatchingHubWithSpec,omitempty\"`\n\tRes    *types.PbmQueryMatchingHubWithSpecResponse `xml:\"urn:pbm PbmQueryMatchingHubWithSpecResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmQueryMatchingHubWithSpecBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmQueryMatchingHubWithSpec(ctx context.Context, r soap.RoundTripper, req *types.PbmQueryMatchingHubWithSpec) (*types.PbmQueryMatchingHubWithSpecResponse, error) {\n\tvar reqBody, resBody PbmQueryMatchingHubWithSpecBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmQueryProfileBody struct {\n\tReq    *types.PbmQueryProfile         `xml:\"urn:pbm PbmQueryProfile,omitempty\"`\n\tRes    *types.PbmQueryProfileResponse `xml:\"urn:pbm PbmQueryProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmQueryProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmQueryProfile(ctx context.Context, r soap.RoundTripper, req *types.PbmQueryProfile) (*types.PbmQueryProfileResponse, error) {\n\tvar reqBody, resBody PbmQueryProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmQueryReplicationGroupsBody struct {\n\tReq    *types.PbmQueryReplicationGroups         `xml:\"urn:pbm PbmQueryReplicationGroups,omitempty\"`\n\tRes    *types.PbmQueryReplicationGroupsResponse `xml:\"urn:pbm PbmQueryReplicationGroupsResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmQueryReplicationGroupsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmQueryReplicationGroups(ctx context.Context, r soap.RoundTripper, req *types.PbmQueryReplicationGroups) (*types.PbmQueryReplicationGroupsResponse, error) {\n\tvar reqBody, resBody PbmQueryReplicationGroupsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmQuerySpaceStatsForStorageContainerBody struct {\n\tReq    *types.PbmQuerySpaceStatsForStorageContainer         `xml:\"urn:pbm PbmQuerySpaceStatsForStorageContainer,omitempty\"`\n\tRes    *types.PbmQuerySpaceStatsForStorageContainerResponse `xml:\"urn:pbm PbmQuerySpaceStatsForStorageContainerResponse,omitempty\"`\n\tFault_ *soap.Fault                                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmQuerySpaceStatsForStorageContainerBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmQuerySpaceStatsForStorageContainer(ctx context.Context, r soap.RoundTripper, req *types.PbmQuerySpaceStatsForStorageContainer) (*types.PbmQuerySpaceStatsForStorageContainerResponse, error) {\n\tvar reqBody, resBody PbmQuerySpaceStatsForStorageContainerBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmResetDefaultRequirementProfileBody struct {\n\tReq    *types.PbmResetDefaultRequirementProfile         `xml:\"urn:pbm PbmResetDefaultRequirementProfile,omitempty\"`\n\tRes    *types.PbmResetDefaultRequirementProfileResponse `xml:\"urn:pbm PbmResetDefaultRequirementProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmResetDefaultRequirementProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmResetDefaultRequirementProfile(ctx context.Context, r soap.RoundTripper, req *types.PbmResetDefaultRequirementProfile) (*types.PbmResetDefaultRequirementProfileResponse, error) {\n\tvar reqBody, resBody PbmResetDefaultRequirementProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmResetVSanDefaultProfileBody struct {\n\tReq    *types.PbmResetVSanDefaultProfile         `xml:\"urn:pbm PbmResetVSanDefaultProfile,omitempty\"`\n\tRes    *types.PbmResetVSanDefaultProfileResponse `xml:\"urn:pbm PbmResetVSanDefaultProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmResetVSanDefaultProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmResetVSanDefaultProfile(ctx context.Context, r soap.RoundTripper, req *types.PbmResetVSanDefaultProfile) (*types.PbmResetVSanDefaultProfileResponse, error) {\n\tvar reqBody, resBody PbmResetVSanDefaultProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmRetrieveContentBody struct {\n\tReq    *types.PbmRetrieveContent         `xml:\"urn:pbm PbmRetrieveContent,omitempty\"`\n\tRes    *types.PbmRetrieveContentResponse `xml:\"urn:pbm PbmRetrieveContentResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmRetrieveContentBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmRetrieveContent(ctx context.Context, r soap.RoundTripper, req *types.PbmRetrieveContent) (*types.PbmRetrieveContentResponse, error) {\n\tvar reqBody, resBody PbmRetrieveContentBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmRetrieveServiceContentBody struct {\n\tReq    *types.PbmRetrieveServiceContent         `xml:\"urn:pbm PbmRetrieveServiceContent,omitempty\"`\n\tRes    *types.PbmRetrieveServiceContentResponse `xml:\"urn:pbm PbmRetrieveServiceContentResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmRetrieveServiceContentBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmRetrieveServiceContent(ctx context.Context, r soap.RoundTripper, req *types.PbmRetrieveServiceContent) (*types.PbmRetrieveServiceContentResponse, error) {\n\tvar reqBody, resBody PbmRetrieveServiceContentBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PbmUpdateBody struct {\n\tReq    *types.PbmUpdate         `xml:\"urn:pbm PbmUpdate,omitempty\"`\n\tRes    *types.PbmUpdateResponse `xml:\"urn:pbm PbmUpdateResponse,omitempty\"`\n\tFault_ *soap.Fault              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PbmUpdateBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PbmUpdate(ctx context.Context, r soap.RoundTripper, req *types.PbmUpdate) (*types.PbmUpdateResponse, error) {\n\tvar reqBody, resBody PbmUpdateBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/pbm/pbm_util.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage pbm\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/pbm/types\"\n)\n\n// A struct to capture pbm create spec details.\ntype CapabilityProfileCreateSpec struct {\n\tName           string\n\tDescription    string\n\tCategory       string\n\tCapabilityList []Capability\n}\n\n// A struct to capture pbm capability instance details.\ntype Capability struct {\n\tID           string\n\tNamespace    string\n\tPropertyList []Property\n}\n\n// A struct to capture pbm property instance details.\ntype Property struct {\n\tID       string\n\tOperator string\n\tValue    string\n\tDataType string\n}\n\nfunc CreateCapabilityProfileSpec(pbmCreateSpec CapabilityProfileCreateSpec) (*types.PbmCapabilityProfileCreateSpec, error) {\n\tcapabilities, err := createCapabilityInstances(pbmCreateSpec.CapabilityList)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpbmCapabilityProfileSpec := types.PbmCapabilityProfileCreateSpec{\n\t\tName:        pbmCreateSpec.Name,\n\t\tDescription: pbmCreateSpec.Description,\n\t\tCategory:    pbmCreateSpec.Category,\n\t\tResourceType: types.PbmProfileResourceType{\n\t\t\tResourceType: string(types.PbmProfileResourceTypeEnumSTORAGE),\n\t\t},\n\t\tConstraints: &types.PbmCapabilitySubProfileConstraints{\n\t\t\tSubProfiles: []types.PbmCapabilitySubProfile{\n\t\t\t\ttypes.PbmCapabilitySubProfile{\n\t\t\t\t\tCapability: capabilities,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\treturn &pbmCapabilityProfileSpec, nil\n}\n\nfunc createCapabilityInstances(rules []Capability) ([]types.PbmCapabilityInstance, error) {\n\tvar capabilityInstances []types.PbmCapabilityInstance\n\tfor _, capabilityRule := range rules {\n\t\tcapability := types.PbmCapabilityInstance{\n\t\t\tId: types.PbmCapabilityMetadataUniqueId{\n\t\t\t\tNamespace: capabilityRule.Namespace,\n\t\t\t\tId:        capabilityRule.ID,\n\t\t\t},\n\t\t}\n\n\t\tvar propertyInstances []types.PbmCapabilityPropertyInstance\n\t\tfor _, propertyRule := range capabilityRule.PropertyList {\n\t\t\tproperty := types.PbmCapabilityPropertyInstance{\n\t\t\t\tId: propertyRule.ID,\n\t\t\t}\n\t\t\tif propertyRule.Operator != \"\" {\n\t\t\t\tproperty.Operator = propertyRule.Operator\n\t\t\t}\n\t\t\tvar err error\n\t\t\tswitch strings.ToLower(propertyRule.DataType) {\n\t\t\tcase \"int\":\n\t\t\t\t// Go int32 is marshalled to xsi:int whereas Go int is marshalled to xsi:long when sending down the wire.\n\t\t\t\tvar val int32\n\t\t\t\tval, err = verifyPropertyValueIsInt(propertyRule.Value, propertyRule.DataType)\n\t\t\t\tproperty.Value = val\n\t\t\tcase \"bool\":\n\t\t\t\tvar val bool\n\t\t\t\tval, err = verifyPropertyValueIsBoolean(propertyRule.Value, propertyRule.DataType)\n\t\t\t\tproperty.Value = val\n\t\t\tcase \"string\":\n\t\t\t\tproperty.Value = propertyRule.Value\n\t\t\tcase \"set\":\n\t\t\t\tset := types.PbmCapabilityDiscreteSet{}\n\t\t\t\tfor _, val := range strings.Split(propertyRule.Value, \",\") {\n\t\t\t\t\tset.Values = append(set.Values, val)\n\t\t\t\t}\n\t\t\t\tproperty.Value = set\n\t\t\tdefault:\n\t\t\t\treturn nil, fmt.Errorf(\"invalid value: %q with datatype: %q\", propertyRule.Value, propertyRule.Value)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid value: %q with datatype: %q\", propertyRule.Value, propertyRule.Value)\n\t\t\t}\n\t\t\tpropertyInstances = append(propertyInstances, property)\n\t\t}\n\t\tconstraintInstances := []types.PbmCapabilityConstraintInstance{\n\t\t\ttypes.PbmCapabilityConstraintInstance{\n\t\t\t\tPropertyInstance: propertyInstances,\n\t\t\t},\n\t\t}\n\t\tcapability.Constraint = constraintInstances\n\t\tcapabilityInstances = append(capabilityInstances, capability)\n\t}\n\treturn capabilityInstances, nil\n}\n\n// Verify if the capability value is of type integer.\nfunc verifyPropertyValueIsInt(propertyValue string, dataType string) (int32, error) {\n\tval, err := strconv.ParseInt(propertyValue, 10, 32)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\treturn int32(val), nil\n}\n\n// Verify if the capability value is of type integer.\nfunc verifyPropertyValueIsBoolean(propertyValue string, dataType string) (bool, error) {\n\tval, err := strconv.ParseBool(propertyValue)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn val, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/pbm/types/enum.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage types\n\nimport (\n\t\"reflect\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype PbmBuiltinGenericType string\n\nconst (\n\tPbmBuiltinGenericTypeVMW_RANGE = PbmBuiltinGenericType(\"VMW_RANGE\")\n\tPbmBuiltinGenericTypeVMW_SET   = PbmBuiltinGenericType(\"VMW_SET\")\n)\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmBuiltinGenericType\", reflect.TypeOf((*PbmBuiltinGenericType)(nil)).Elem())\n}\n\ntype PbmBuiltinType string\n\nconst (\n\tPbmBuiltinTypeXSD_LONG     = PbmBuiltinType(\"XSD_LONG\")\n\tPbmBuiltinTypeXSD_SHORT    = PbmBuiltinType(\"XSD_SHORT\")\n\tPbmBuiltinTypeXSD_INTEGER  = PbmBuiltinType(\"XSD_INTEGER\")\n\tPbmBuiltinTypeXSD_INT      = PbmBuiltinType(\"XSD_INT\")\n\tPbmBuiltinTypeXSD_STRING   = PbmBuiltinType(\"XSD_STRING\")\n\tPbmBuiltinTypeXSD_BOOLEAN  = PbmBuiltinType(\"XSD_BOOLEAN\")\n\tPbmBuiltinTypeXSD_DOUBLE   = PbmBuiltinType(\"XSD_DOUBLE\")\n\tPbmBuiltinTypeXSD_DATETIME = PbmBuiltinType(\"XSD_DATETIME\")\n\tPbmBuiltinTypeVMW_TIMESPAN = PbmBuiltinType(\"VMW_TIMESPAN\")\n\tPbmBuiltinTypeVMW_POLICY   = PbmBuiltinType(\"VMW_POLICY\")\n)\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmBuiltinType\", reflect.TypeOf((*PbmBuiltinType)(nil)).Elem())\n}\n\ntype PbmCapabilityOperator string\n\nconst (\n\tPbmCapabilityOperatorNOT = PbmCapabilityOperator(\"NOT\")\n)\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityOperator\", reflect.TypeOf((*PbmCapabilityOperator)(nil)).Elem())\n}\n\ntype PbmCapabilityTimeUnitType string\n\nconst (\n\tPbmCapabilityTimeUnitTypeSECONDS = PbmCapabilityTimeUnitType(\"SECONDS\")\n\tPbmCapabilityTimeUnitTypeMINUTES = PbmCapabilityTimeUnitType(\"MINUTES\")\n\tPbmCapabilityTimeUnitTypeHOURS   = PbmCapabilityTimeUnitType(\"HOURS\")\n\tPbmCapabilityTimeUnitTypeDAYS    = PbmCapabilityTimeUnitType(\"DAYS\")\n\tPbmCapabilityTimeUnitTypeWEEKS   = PbmCapabilityTimeUnitType(\"WEEKS\")\n\tPbmCapabilityTimeUnitTypeMONTHS  = PbmCapabilityTimeUnitType(\"MONTHS\")\n\tPbmCapabilityTimeUnitTypeYEARS   = PbmCapabilityTimeUnitType(\"YEARS\")\n)\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityTimeUnitType\", reflect.TypeOf((*PbmCapabilityTimeUnitType)(nil)).Elem())\n}\n\ntype PbmComplianceResultComplianceTaskStatus string\n\nconst (\n\tPbmComplianceResultComplianceTaskStatusInProgress = PbmComplianceResultComplianceTaskStatus(\"inProgress\")\n\tPbmComplianceResultComplianceTaskStatusSuccess    = PbmComplianceResultComplianceTaskStatus(\"success\")\n\tPbmComplianceResultComplianceTaskStatusFailed     = PbmComplianceResultComplianceTaskStatus(\"failed\")\n)\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmComplianceResultComplianceTaskStatus\", reflect.TypeOf((*PbmComplianceResultComplianceTaskStatus)(nil)).Elem())\n}\n\ntype PbmComplianceStatus string\n\nconst (\n\tPbmComplianceStatusCompliant     = PbmComplianceStatus(\"compliant\")\n\tPbmComplianceStatusNonCompliant  = PbmComplianceStatus(\"nonCompliant\")\n\tPbmComplianceStatusUnknown       = PbmComplianceStatus(\"unknown\")\n\tPbmComplianceStatusNotApplicable = PbmComplianceStatus(\"notApplicable\")\n\tPbmComplianceStatusOutOfDate     = PbmComplianceStatus(\"outOfDate\")\n)\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmComplianceStatus\", reflect.TypeOf((*PbmComplianceStatus)(nil)).Elem())\n}\n\ntype PbmIofilterInfoFilterType string\n\nconst (\n\tPbmIofilterInfoFilterTypeINSPECTION         = PbmIofilterInfoFilterType(\"INSPECTION\")\n\tPbmIofilterInfoFilterTypeCOMPRESSION        = PbmIofilterInfoFilterType(\"COMPRESSION\")\n\tPbmIofilterInfoFilterTypeENCRYPTION         = PbmIofilterInfoFilterType(\"ENCRYPTION\")\n\tPbmIofilterInfoFilterTypeREPLICATION        = PbmIofilterInfoFilterType(\"REPLICATION\")\n\tPbmIofilterInfoFilterTypeCACHE              = PbmIofilterInfoFilterType(\"CACHE\")\n\tPbmIofilterInfoFilterTypeDATAPROVIDER       = PbmIofilterInfoFilterType(\"DATAPROVIDER\")\n\tPbmIofilterInfoFilterTypeDATASTOREIOCONTROL = PbmIofilterInfoFilterType(\"DATASTOREIOCONTROL\")\n)\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmIofilterInfoFilterType\", reflect.TypeOf((*PbmIofilterInfoFilterType)(nil)).Elem())\n}\n\ntype PbmLineOfServiceInfoLineOfServiceEnum string\n\nconst (\n\tPbmLineOfServiceInfoLineOfServiceEnumINSPECTION           = PbmLineOfServiceInfoLineOfServiceEnum(\"INSPECTION\")\n\tPbmLineOfServiceInfoLineOfServiceEnumCOMPRESSION          = PbmLineOfServiceInfoLineOfServiceEnum(\"COMPRESSION\")\n\tPbmLineOfServiceInfoLineOfServiceEnumENCRYPTION           = PbmLineOfServiceInfoLineOfServiceEnum(\"ENCRYPTION\")\n\tPbmLineOfServiceInfoLineOfServiceEnumREPLICATION          = PbmLineOfServiceInfoLineOfServiceEnum(\"REPLICATION\")\n\tPbmLineOfServiceInfoLineOfServiceEnumCACHING              = PbmLineOfServiceInfoLineOfServiceEnum(\"CACHING\")\n\tPbmLineOfServiceInfoLineOfServiceEnumPERSISTENCE          = PbmLineOfServiceInfoLineOfServiceEnum(\"PERSISTENCE\")\n\tPbmLineOfServiceInfoLineOfServiceEnumDATA_PROVIDER        = PbmLineOfServiceInfoLineOfServiceEnum(\"DATA_PROVIDER\")\n\tPbmLineOfServiceInfoLineOfServiceEnumDATASTORE_IO_CONTROL = PbmLineOfServiceInfoLineOfServiceEnum(\"DATASTORE_IO_CONTROL\")\n)\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmLineOfServiceInfoLineOfServiceEnum\", reflect.TypeOf((*PbmLineOfServiceInfoLineOfServiceEnum)(nil)).Elem())\n}\n\ntype PbmObjectType string\n\nconst (\n\tPbmObjectTypeVirtualMachine         = PbmObjectType(\"virtualMachine\")\n\tPbmObjectTypeVirtualMachineAndDisks = PbmObjectType(\"virtualMachineAndDisks\")\n\tPbmObjectTypeVirtualDiskId          = PbmObjectType(\"virtualDiskId\")\n\tPbmObjectTypeVirtualDiskUUID        = PbmObjectType(\"virtualDiskUUID\")\n\tPbmObjectTypeDatastore              = PbmObjectType(\"datastore\")\n\tPbmObjectTypeUnknown                = PbmObjectType(\"unknown\")\n)\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmObjectType\", reflect.TypeOf((*PbmObjectType)(nil)).Elem())\n}\n\ntype PbmProfileCategoryEnum string\n\nconst (\n\tPbmProfileCategoryEnumREQUIREMENT         = PbmProfileCategoryEnum(\"REQUIREMENT\")\n\tPbmProfileCategoryEnumRESOURCE            = PbmProfileCategoryEnum(\"RESOURCE\")\n\tPbmProfileCategoryEnumDATA_SERVICE_POLICY = PbmProfileCategoryEnum(\"DATA_SERVICE_POLICY\")\n)\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmProfileCategoryEnum\", reflect.TypeOf((*PbmProfileCategoryEnum)(nil)).Elem())\n}\n\ntype PbmProfileResourceTypeEnum string\n\nconst (\n\tPbmProfileResourceTypeEnumSTORAGE = PbmProfileResourceTypeEnum(\"STORAGE\")\n)\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmProfileResourceTypeEnum\", reflect.TypeOf((*PbmProfileResourceTypeEnum)(nil)).Elem())\n}\n\ntype PbmSystemCreatedProfileType string\n\nconst (\n\tPbmSystemCreatedProfileTypeVsanDefaultProfile = PbmSystemCreatedProfileType(\"VsanDefaultProfile\")\n\tPbmSystemCreatedProfileTypeVVolDefaultProfile = PbmSystemCreatedProfileType(\"VVolDefaultProfile\")\n)\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmSystemCreatedProfileType\", reflect.TypeOf((*PbmSystemCreatedProfileType)(nil)).Elem())\n}\n\ntype PbmVmOperation string\n\nconst (\n\tPbmVmOperationCREATE      = PbmVmOperation(\"CREATE\")\n\tPbmVmOperationRECONFIGURE = PbmVmOperation(\"RECONFIGURE\")\n\tPbmVmOperationMIGRATE     = PbmVmOperation(\"MIGRATE\")\n\tPbmVmOperationCLONE       = PbmVmOperation(\"CLONE\")\n)\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmVmOperation\", reflect.TypeOf((*PbmVmOperation)(nil)).Elem())\n}\n\ntype PbmVvolType string\n\nconst (\n\tPbmVvolTypeConfig = PbmVvolType(\"Config\")\n\tPbmVvolTypeData   = PbmVvolType(\"Data\")\n\tPbmVvolTypeSwap   = PbmVvolType(\"Swap\")\n)\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmVvolType\", reflect.TypeOf((*PbmVvolType)(nil)).Elem())\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/pbm/types/if.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage types\n\nimport (\n\t\"reflect\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\nfunc (b *PbmCapabilityConstraints) GetPbmCapabilityConstraints() *PbmCapabilityConstraints { return b }\n\ntype BasePbmCapabilityConstraints interface {\n\tGetPbmCapabilityConstraints() *PbmCapabilityConstraints\n}\n\nfunc init() {\n\ttypes.Add(\"BasePbmCapabilityConstraints\", reflect.TypeOf((*PbmCapabilityConstraints)(nil)).Elem())\n}\n\nfunc (b *PbmCapabilityProfile) GetPbmCapabilityProfile() *PbmCapabilityProfile { return b }\n\ntype BasePbmCapabilityProfile interface {\n\tGetPbmCapabilityProfile() *PbmCapabilityProfile\n}\n\nfunc init() {\n\ttypes.Add(\"BasePbmCapabilityProfile\", reflect.TypeOf((*PbmCapabilityProfile)(nil)).Elem())\n}\n\nfunc (b *PbmCapabilityProfilePropertyMismatchFault) GetPbmCapabilityProfilePropertyMismatchFault() *PbmCapabilityProfilePropertyMismatchFault {\n\treturn b\n}\n\ntype BasePbmCapabilityProfilePropertyMismatchFault interface {\n\tGetPbmCapabilityProfilePropertyMismatchFault() *PbmCapabilityProfilePropertyMismatchFault\n}\n\nfunc init() {\n\ttypes.Add(\"BasePbmCapabilityProfilePropertyMismatchFault\", reflect.TypeOf((*PbmCapabilityProfilePropertyMismatchFault)(nil)).Elem())\n}\n\nfunc (b *PbmCapabilityTypeInfo) GetPbmCapabilityTypeInfo() *PbmCapabilityTypeInfo { return b }\n\ntype BasePbmCapabilityTypeInfo interface {\n\tGetPbmCapabilityTypeInfo() *PbmCapabilityTypeInfo\n}\n\nfunc init() {\n\ttypes.Add(\"BasePbmCapabilityTypeInfo\", reflect.TypeOf((*PbmCapabilityTypeInfo)(nil)).Elem())\n}\n\nfunc (b *PbmCompatibilityCheckFault) GetPbmCompatibilityCheckFault() *PbmCompatibilityCheckFault {\n\treturn b\n}\n\ntype BasePbmCompatibilityCheckFault interface {\n\tGetPbmCompatibilityCheckFault() *PbmCompatibilityCheckFault\n}\n\nfunc init() {\n\ttypes.Add(\"BasePbmCompatibilityCheckFault\", reflect.TypeOf((*PbmCompatibilityCheckFault)(nil)).Elem())\n}\n\nfunc (b *PbmFault) GetPbmFault() *PbmFault { return b }\n\ntype BasePbmFault interface {\n\tGetPbmFault() *PbmFault\n}\n\nfunc init() {\n\ttypes.Add(\"BasePbmFault\", reflect.TypeOf((*PbmFault)(nil)).Elem())\n}\n\nfunc (b *PbmLineOfServiceInfo) GetPbmLineOfServiceInfo() *PbmLineOfServiceInfo { return b }\n\ntype BasePbmLineOfServiceInfo interface {\n\tGetPbmLineOfServiceInfo() *PbmLineOfServiceInfo\n}\n\nfunc init() {\n\ttypes.Add(\"BasePbmLineOfServiceInfo\", reflect.TypeOf((*PbmLineOfServiceInfo)(nil)).Elem())\n}\n\nfunc (b *PbmPlacementMatchingResources) GetPbmPlacementMatchingResources() *PbmPlacementMatchingResources {\n\treturn b\n}\n\ntype BasePbmPlacementMatchingResources interface {\n\tGetPbmPlacementMatchingResources() *PbmPlacementMatchingResources\n}\n\nfunc init() {\n\ttypes.Add(\"BasePbmPlacementMatchingResources\", reflect.TypeOf((*PbmPlacementMatchingResources)(nil)).Elem())\n}\n\nfunc (b *PbmPlacementRequirement) GetPbmPlacementRequirement() *PbmPlacementRequirement { return b }\n\ntype BasePbmPlacementRequirement interface {\n\tGetPbmPlacementRequirement() *PbmPlacementRequirement\n}\n\nfunc init() {\n\ttypes.Add(\"BasePbmPlacementRequirement\", reflect.TypeOf((*PbmPlacementRequirement)(nil)).Elem())\n}\n\nfunc (b *PbmProfile) GetPbmProfile() *PbmProfile { return b }\n\ntype BasePbmProfile interface {\n\tGetPbmProfile() *PbmProfile\n}\n\nfunc init() {\n\ttypes.Add(\"BasePbmProfile\", reflect.TypeOf((*PbmProfile)(nil)).Elem())\n}\n\nfunc (b *PbmPropertyMismatchFault) GetPbmPropertyMismatchFault() *PbmPropertyMismatchFault { return b }\n\ntype BasePbmPropertyMismatchFault interface {\n\tGetPbmPropertyMismatchFault() *PbmPropertyMismatchFault\n}\n\nfunc init() {\n\ttypes.Add(\"BasePbmPropertyMismatchFault\", reflect.TypeOf((*PbmPropertyMismatchFault)(nil)).Elem())\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/pbm/types/types.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage types\n\nimport (\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype ArrayOfPbmCapabilityConstraintInstance struct {\n\tPbmCapabilityConstraintInstance []PbmCapabilityConstraintInstance `xml:\"PbmCapabilityConstraintInstance,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmCapabilityConstraintInstance\", reflect.TypeOf((*ArrayOfPbmCapabilityConstraintInstance)(nil)).Elem())\n}\n\ntype ArrayOfPbmCapabilityInstance struct {\n\tPbmCapabilityInstance []PbmCapabilityInstance `xml:\"PbmCapabilityInstance,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmCapabilityInstance\", reflect.TypeOf((*ArrayOfPbmCapabilityInstance)(nil)).Elem())\n}\n\ntype ArrayOfPbmCapabilityMetadata struct {\n\tPbmCapabilityMetadata []PbmCapabilityMetadata `xml:\"PbmCapabilityMetadata,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmCapabilityMetadata\", reflect.TypeOf((*ArrayOfPbmCapabilityMetadata)(nil)).Elem())\n}\n\ntype ArrayOfPbmCapabilityMetadataPerCategory struct {\n\tPbmCapabilityMetadataPerCategory []PbmCapabilityMetadataPerCategory `xml:\"PbmCapabilityMetadataPerCategory,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmCapabilityMetadataPerCategory\", reflect.TypeOf((*ArrayOfPbmCapabilityMetadataPerCategory)(nil)).Elem())\n}\n\ntype ArrayOfPbmCapabilityPropertyInstance struct {\n\tPbmCapabilityPropertyInstance []PbmCapabilityPropertyInstance `xml:\"PbmCapabilityPropertyInstance,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmCapabilityPropertyInstance\", reflect.TypeOf((*ArrayOfPbmCapabilityPropertyInstance)(nil)).Elem())\n}\n\ntype ArrayOfPbmCapabilityPropertyMetadata struct {\n\tPbmCapabilityPropertyMetadata []PbmCapabilityPropertyMetadata `xml:\"PbmCapabilityPropertyMetadata,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmCapabilityPropertyMetadata\", reflect.TypeOf((*ArrayOfPbmCapabilityPropertyMetadata)(nil)).Elem())\n}\n\ntype ArrayOfPbmCapabilitySchema struct {\n\tPbmCapabilitySchema []PbmCapabilitySchema `xml:\"PbmCapabilitySchema,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmCapabilitySchema\", reflect.TypeOf((*ArrayOfPbmCapabilitySchema)(nil)).Elem())\n}\n\ntype ArrayOfPbmCapabilitySubProfile struct {\n\tPbmCapabilitySubProfile []PbmCapabilitySubProfile `xml:\"PbmCapabilitySubProfile,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmCapabilitySubProfile\", reflect.TypeOf((*ArrayOfPbmCapabilitySubProfile)(nil)).Elem())\n}\n\ntype ArrayOfPbmCapabilityVendorNamespaceInfo struct {\n\tPbmCapabilityVendorNamespaceInfo []PbmCapabilityVendorNamespaceInfo `xml:\"PbmCapabilityVendorNamespaceInfo,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmCapabilityVendorNamespaceInfo\", reflect.TypeOf((*ArrayOfPbmCapabilityVendorNamespaceInfo)(nil)).Elem())\n}\n\ntype ArrayOfPbmCapabilityVendorResourceTypeInfo struct {\n\tPbmCapabilityVendorResourceTypeInfo []PbmCapabilityVendorResourceTypeInfo `xml:\"PbmCapabilityVendorResourceTypeInfo,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmCapabilityVendorResourceTypeInfo\", reflect.TypeOf((*ArrayOfPbmCapabilityVendorResourceTypeInfo)(nil)).Elem())\n}\n\ntype ArrayOfPbmCompliancePolicyStatus struct {\n\tPbmCompliancePolicyStatus []PbmCompliancePolicyStatus `xml:\"PbmCompliancePolicyStatus,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmCompliancePolicyStatus\", reflect.TypeOf((*ArrayOfPbmCompliancePolicyStatus)(nil)).Elem())\n}\n\ntype ArrayOfPbmComplianceResult struct {\n\tPbmComplianceResult []PbmComplianceResult `xml:\"PbmComplianceResult,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmComplianceResult\", reflect.TypeOf((*ArrayOfPbmComplianceResult)(nil)).Elem())\n}\n\ntype ArrayOfPbmDatastoreSpaceStatistics struct {\n\tPbmDatastoreSpaceStatistics []PbmDatastoreSpaceStatistics `xml:\"PbmDatastoreSpaceStatistics,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmDatastoreSpaceStatistics\", reflect.TypeOf((*ArrayOfPbmDatastoreSpaceStatistics)(nil)).Elem())\n}\n\ntype ArrayOfPbmDefaultProfileInfo struct {\n\tPbmDefaultProfileInfo []PbmDefaultProfileInfo `xml:\"PbmDefaultProfileInfo,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmDefaultProfileInfo\", reflect.TypeOf((*ArrayOfPbmDefaultProfileInfo)(nil)).Elem())\n}\n\ntype ArrayOfPbmPlacementCompatibilityResult struct {\n\tPbmPlacementCompatibilityResult []PbmPlacementCompatibilityResult `xml:\"PbmPlacementCompatibilityResult,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmPlacementCompatibilityResult\", reflect.TypeOf((*ArrayOfPbmPlacementCompatibilityResult)(nil)).Elem())\n}\n\ntype ArrayOfPbmPlacementHub struct {\n\tPbmPlacementHub []PbmPlacementHub `xml:\"PbmPlacementHub,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmPlacementHub\", reflect.TypeOf((*ArrayOfPbmPlacementHub)(nil)).Elem())\n}\n\ntype ArrayOfPbmPlacementMatchingResources struct {\n\tPbmPlacementMatchingResources []BasePbmPlacementMatchingResources `xml:\"PbmPlacementMatchingResources,omitempty,typeattr\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmPlacementMatchingResources\", reflect.TypeOf((*ArrayOfPbmPlacementMatchingResources)(nil)).Elem())\n}\n\ntype ArrayOfPbmPlacementRequirement struct {\n\tPbmPlacementRequirement []BasePbmPlacementRequirement `xml:\"PbmPlacementRequirement,omitempty,typeattr\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmPlacementRequirement\", reflect.TypeOf((*ArrayOfPbmPlacementRequirement)(nil)).Elem())\n}\n\ntype ArrayOfPbmPlacementResourceUtilization struct {\n\tPbmPlacementResourceUtilization []PbmPlacementResourceUtilization `xml:\"PbmPlacementResourceUtilization,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmPlacementResourceUtilization\", reflect.TypeOf((*ArrayOfPbmPlacementResourceUtilization)(nil)).Elem())\n}\n\ntype ArrayOfPbmProfile struct {\n\tPbmProfile []BasePbmProfile `xml:\"PbmProfile,omitempty,typeattr\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmProfile\", reflect.TypeOf((*ArrayOfPbmProfile)(nil)).Elem())\n}\n\ntype ArrayOfPbmProfileId struct {\n\tPbmProfileId []PbmProfileId `xml:\"PbmProfileId,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmProfileId\", reflect.TypeOf((*ArrayOfPbmProfileId)(nil)).Elem())\n}\n\ntype ArrayOfPbmProfileOperationOutcome struct {\n\tPbmProfileOperationOutcome []PbmProfileOperationOutcome `xml:\"PbmProfileOperationOutcome,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmProfileOperationOutcome\", reflect.TypeOf((*ArrayOfPbmProfileOperationOutcome)(nil)).Elem())\n}\n\ntype ArrayOfPbmProfileResourceType struct {\n\tPbmProfileResourceType []PbmProfileResourceType `xml:\"PbmProfileResourceType,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmProfileResourceType\", reflect.TypeOf((*ArrayOfPbmProfileResourceType)(nil)).Elem())\n}\n\ntype ArrayOfPbmProfileType struct {\n\tPbmProfileType []PbmProfileType `xml:\"PbmProfileType,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmProfileType\", reflect.TypeOf((*ArrayOfPbmProfileType)(nil)).Elem())\n}\n\ntype ArrayOfPbmQueryProfileResult struct {\n\tPbmQueryProfileResult []PbmQueryProfileResult `xml:\"PbmQueryProfileResult,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmQueryProfileResult\", reflect.TypeOf((*ArrayOfPbmQueryProfileResult)(nil)).Elem())\n}\n\ntype ArrayOfPbmQueryReplicationGroupResult struct {\n\tPbmQueryReplicationGroupResult []PbmQueryReplicationGroupResult `xml:\"PbmQueryReplicationGroupResult,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmQueryReplicationGroupResult\", reflect.TypeOf((*ArrayOfPbmQueryReplicationGroupResult)(nil)).Elem())\n}\n\ntype ArrayOfPbmRollupComplianceResult struct {\n\tPbmRollupComplianceResult []PbmRollupComplianceResult `xml:\"PbmRollupComplianceResult,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmRollupComplianceResult\", reflect.TypeOf((*ArrayOfPbmRollupComplianceResult)(nil)).Elem())\n}\n\ntype ArrayOfPbmServerObjectRef struct {\n\tPbmServerObjectRef []PbmServerObjectRef `xml:\"PbmServerObjectRef,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:ArrayOfPbmServerObjectRef\", reflect.TypeOf((*ArrayOfPbmServerObjectRef)(nil)).Elem())\n}\n\ntype PbmAboutInfo struct {\n\ttypes.DynamicData\n\n\tName         string `xml:\"name\"`\n\tVersion      string `xml:\"version\"`\n\tInstanceUuid string `xml:\"instanceUuid\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmAboutInfo\", reflect.TypeOf((*PbmAboutInfo)(nil)).Elem())\n}\n\ntype PbmAlreadyExists struct {\n\tPbmFault\n\n\tName string `xml:\"name,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmAlreadyExists\", reflect.TypeOf((*PbmAlreadyExists)(nil)).Elem())\n}\n\ntype PbmAssignDefaultRequirementProfile PbmAssignDefaultRequirementProfileRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmAssignDefaultRequirementProfile\", reflect.TypeOf((*PbmAssignDefaultRequirementProfile)(nil)).Elem())\n}\n\ntype PbmAssignDefaultRequirementProfileRequestType struct {\n\tThis       types.ManagedObjectReference `xml:\"_this\"`\n\tProfile    PbmProfileId                 `xml:\"profile\"`\n\tDatastores []PbmPlacementHub            `xml:\"datastores\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmAssignDefaultRequirementProfileRequestType\", reflect.TypeOf((*PbmAssignDefaultRequirementProfileRequestType)(nil)).Elem())\n}\n\ntype PbmAssignDefaultRequirementProfileResponse struct {\n}\n\ntype PbmCapabilityConstraintInstance struct {\n\ttypes.DynamicData\n\n\tPropertyInstance []PbmCapabilityPropertyInstance `xml:\"propertyInstance\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityConstraintInstance\", reflect.TypeOf((*PbmCapabilityConstraintInstance)(nil)).Elem())\n}\n\ntype PbmCapabilityConstraints struct {\n\ttypes.DynamicData\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityConstraints\", reflect.TypeOf((*PbmCapabilityConstraints)(nil)).Elem())\n}\n\ntype PbmCapabilityDescription struct {\n\ttypes.DynamicData\n\n\tDescription PbmExtendedElementDescription `xml:\"description\"`\n\tValue       types.AnyType                 `xml:\"value,typeattr\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityDescription\", reflect.TypeOf((*PbmCapabilityDescription)(nil)).Elem())\n}\n\ntype PbmCapabilityDiscreteSet struct {\n\ttypes.DynamicData\n\n\tValues []types.AnyType `xml:\"values,typeattr\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityDiscreteSet\", reflect.TypeOf((*PbmCapabilityDiscreteSet)(nil)).Elem())\n}\n\ntype PbmCapabilityGenericTypeInfo struct {\n\tPbmCapabilityTypeInfo\n\n\tGenericTypeName string `xml:\"genericTypeName\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityGenericTypeInfo\", reflect.TypeOf((*PbmCapabilityGenericTypeInfo)(nil)).Elem())\n}\n\ntype PbmCapabilityInstance struct {\n\ttypes.DynamicData\n\n\tId         PbmCapabilityMetadataUniqueId     `xml:\"id\"`\n\tConstraint []PbmCapabilityConstraintInstance `xml:\"constraint\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityInstance\", reflect.TypeOf((*PbmCapabilityInstance)(nil)).Elem())\n}\n\ntype PbmCapabilityMetadata struct {\n\ttypes.DynamicData\n\n\tId                       PbmCapabilityMetadataUniqueId   `xml:\"id\"`\n\tSummary                  PbmExtendedElementDescription   `xml:\"summary\"`\n\tMandatory                *bool                           `xml:\"mandatory\"`\n\tHint                     *bool                           `xml:\"hint\"`\n\tKeyId                    string                          `xml:\"keyId,omitempty\"`\n\tAllowMultipleConstraints *bool                           `xml:\"allowMultipleConstraints\"`\n\tPropertyMetadata         []PbmCapabilityPropertyMetadata `xml:\"propertyMetadata\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityMetadata\", reflect.TypeOf((*PbmCapabilityMetadata)(nil)).Elem())\n}\n\ntype PbmCapabilityMetadataPerCategory struct {\n\ttypes.DynamicData\n\n\tSubCategory        string                  `xml:\"subCategory\"`\n\tCapabilityMetadata []PbmCapabilityMetadata `xml:\"capabilityMetadata\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityMetadataPerCategory\", reflect.TypeOf((*PbmCapabilityMetadataPerCategory)(nil)).Elem())\n}\n\ntype PbmCapabilityMetadataUniqueId struct {\n\ttypes.DynamicData\n\n\tNamespace string `xml:\"namespace\"`\n\tId        string `xml:\"id\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityMetadataUniqueId\", reflect.TypeOf((*PbmCapabilityMetadataUniqueId)(nil)).Elem())\n}\n\ntype PbmCapabilityNamespaceInfo struct {\n\ttypes.DynamicData\n\n\tVersion   string                         `xml:\"version\"`\n\tNamespace string                         `xml:\"namespace\"`\n\tInfo      *PbmExtendedElementDescription `xml:\"info,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityNamespaceInfo\", reflect.TypeOf((*PbmCapabilityNamespaceInfo)(nil)).Elem())\n}\n\ntype PbmCapabilityProfile struct {\n\tPbmProfile\n\n\tProfileCategory          string                       `xml:\"profileCategory\"`\n\tResourceType             PbmProfileResourceType       `xml:\"resourceType\"`\n\tConstraints              BasePbmCapabilityConstraints `xml:\"constraints,typeattr\"`\n\tGenerationId             int64                        `xml:\"generationId,omitempty\"`\n\tIsDefault                bool                         `xml:\"isDefault\"`\n\tSystemCreatedProfileType string                       `xml:\"systemCreatedProfileType,omitempty\"`\n\tLineOfService            string                       `xml:\"lineOfService,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityProfile\", reflect.TypeOf((*PbmCapabilityProfile)(nil)).Elem())\n}\n\ntype PbmCapabilityProfileCreateSpec struct {\n\ttypes.DynamicData\n\n\tName         string                       `xml:\"name\"`\n\tDescription  string                       `xml:\"description,omitempty\"`\n\tCategory     string                       `xml:\"category,omitempty\"`\n\tResourceType PbmProfileResourceType       `xml:\"resourceType\"`\n\tConstraints  BasePbmCapabilityConstraints `xml:\"constraints,typeattr\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityProfileCreateSpec\", reflect.TypeOf((*PbmCapabilityProfileCreateSpec)(nil)).Elem())\n}\n\ntype PbmCapabilityProfilePropertyMismatchFault struct {\n\tPbmPropertyMismatchFault\n\n\tResourcePropertyInstance PbmCapabilityPropertyInstance `xml:\"resourcePropertyInstance\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityProfilePropertyMismatchFault\", reflect.TypeOf((*PbmCapabilityProfilePropertyMismatchFault)(nil)).Elem())\n}\n\ntype PbmCapabilityProfilePropertyMismatchFaultFault BasePbmCapabilityProfilePropertyMismatchFault\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityProfilePropertyMismatchFaultFault\", reflect.TypeOf((*PbmCapabilityProfilePropertyMismatchFaultFault)(nil)).Elem())\n}\n\ntype PbmCapabilityProfileUpdateSpec struct {\n\ttypes.DynamicData\n\n\tName        string                       `xml:\"name,omitempty\"`\n\tDescription string                       `xml:\"description,omitempty\"`\n\tConstraints BasePbmCapabilityConstraints `xml:\"constraints,omitempty,typeattr\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityProfileUpdateSpec\", reflect.TypeOf((*PbmCapabilityProfileUpdateSpec)(nil)).Elem())\n}\n\ntype PbmCapabilityPropertyInstance struct {\n\ttypes.DynamicData\n\n\tId       string        `xml:\"id\"`\n\tOperator string        `xml:\"operator,omitempty\"`\n\tValue    types.AnyType `xml:\"value,typeattr\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityPropertyInstance\", reflect.TypeOf((*PbmCapabilityPropertyInstance)(nil)).Elem())\n}\n\ntype PbmCapabilityPropertyMetadata struct {\n\ttypes.DynamicData\n\n\tId                   string                        `xml:\"id\"`\n\tSummary              PbmExtendedElementDescription `xml:\"summary\"`\n\tMandatory            bool                          `xml:\"mandatory\"`\n\tType                 BasePbmCapabilityTypeInfo     `xml:\"type,omitempty,typeattr\"`\n\tDefaultValue         types.AnyType                 `xml:\"defaultValue,omitempty,typeattr\"`\n\tAllowedValue         types.AnyType                 `xml:\"allowedValue,omitempty,typeattr\"`\n\tRequirementsTypeHint string                        `xml:\"requirementsTypeHint,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityPropertyMetadata\", reflect.TypeOf((*PbmCapabilityPropertyMetadata)(nil)).Elem())\n}\n\ntype PbmCapabilityRange struct {\n\ttypes.DynamicData\n\n\tMin types.AnyType `xml:\"min,typeattr\"`\n\tMax types.AnyType `xml:\"max,typeattr\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityRange\", reflect.TypeOf((*PbmCapabilityRange)(nil)).Elem())\n}\n\ntype PbmCapabilitySchema struct {\n\ttypes.DynamicData\n\n\tVendorInfo                    PbmCapabilitySchemaVendorInfo      `xml:\"vendorInfo\"`\n\tNamespaceInfo                 PbmCapabilityNamespaceInfo         `xml:\"namespaceInfo\"`\n\tLineOfService                 BasePbmLineOfServiceInfo           `xml:\"lineOfService,omitempty,typeattr\"`\n\tCapabilityMetadataPerCategory []PbmCapabilityMetadataPerCategory `xml:\"capabilityMetadataPerCategory\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilitySchema\", reflect.TypeOf((*PbmCapabilitySchema)(nil)).Elem())\n}\n\ntype PbmCapabilitySchemaVendorInfo struct {\n\ttypes.DynamicData\n\n\tVendorUuid string                        `xml:\"vendorUuid\"`\n\tInfo       PbmExtendedElementDescription `xml:\"info\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilitySchemaVendorInfo\", reflect.TypeOf((*PbmCapabilitySchemaVendorInfo)(nil)).Elem())\n}\n\ntype PbmCapabilitySubProfile struct {\n\ttypes.DynamicData\n\n\tName           string                  `xml:\"name\"`\n\tCapability     []PbmCapabilityInstance `xml:\"capability\"`\n\tForceProvision *bool                   `xml:\"forceProvision\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilitySubProfile\", reflect.TypeOf((*PbmCapabilitySubProfile)(nil)).Elem())\n}\n\ntype PbmCapabilitySubProfileConstraints struct {\n\tPbmCapabilityConstraints\n\n\tSubProfiles []PbmCapabilitySubProfile `xml:\"subProfiles\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilitySubProfileConstraints\", reflect.TypeOf((*PbmCapabilitySubProfileConstraints)(nil)).Elem())\n}\n\ntype PbmCapabilityTimeSpan struct {\n\ttypes.DynamicData\n\n\tValue int32  `xml:\"value\"`\n\tUnit  string `xml:\"unit\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityTimeSpan\", reflect.TypeOf((*PbmCapabilityTimeSpan)(nil)).Elem())\n}\n\ntype PbmCapabilityTypeInfo struct {\n\ttypes.DynamicData\n\n\tTypeName string `xml:\"typeName\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityTypeInfo\", reflect.TypeOf((*PbmCapabilityTypeInfo)(nil)).Elem())\n}\n\ntype PbmCapabilityVendorNamespaceInfo struct {\n\ttypes.DynamicData\n\n\tVendorInfo    PbmCapabilitySchemaVendorInfo `xml:\"vendorInfo\"`\n\tNamespaceInfo PbmCapabilityNamespaceInfo    `xml:\"namespaceInfo\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityVendorNamespaceInfo\", reflect.TypeOf((*PbmCapabilityVendorNamespaceInfo)(nil)).Elem())\n}\n\ntype PbmCapabilityVendorResourceTypeInfo struct {\n\ttypes.DynamicData\n\n\tResourceType        string                             `xml:\"resourceType\"`\n\tVendorNamespaceInfo []PbmCapabilityVendorNamespaceInfo `xml:\"vendorNamespaceInfo\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCapabilityVendorResourceTypeInfo\", reflect.TypeOf((*PbmCapabilityVendorResourceTypeInfo)(nil)).Elem())\n}\n\ntype PbmCheckCompatibility PbmCheckCompatibilityRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCheckCompatibility\", reflect.TypeOf((*PbmCheckCompatibility)(nil)).Elem())\n}\n\ntype PbmCheckCompatibilityRequestType struct {\n\tThis         types.ManagedObjectReference `xml:\"_this\"`\n\tHubsToSearch []PbmPlacementHub            `xml:\"hubsToSearch,omitempty\"`\n\tProfile      PbmProfileId                 `xml:\"profile\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCheckCompatibilityRequestType\", reflect.TypeOf((*PbmCheckCompatibilityRequestType)(nil)).Elem())\n}\n\ntype PbmCheckCompatibilityResponse struct {\n\tReturnval []PbmPlacementCompatibilityResult `xml:\"returnval,omitempty\"`\n}\n\ntype PbmCheckCompatibilityWithSpec PbmCheckCompatibilityWithSpecRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCheckCompatibilityWithSpec\", reflect.TypeOf((*PbmCheckCompatibilityWithSpec)(nil)).Elem())\n}\n\ntype PbmCheckCompatibilityWithSpecRequestType struct {\n\tThis         types.ManagedObjectReference   `xml:\"_this\"`\n\tHubsToSearch []PbmPlacementHub              `xml:\"hubsToSearch,omitempty\"`\n\tProfileSpec  PbmCapabilityProfileCreateSpec `xml:\"profileSpec\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCheckCompatibilityWithSpecRequestType\", reflect.TypeOf((*PbmCheckCompatibilityWithSpecRequestType)(nil)).Elem())\n}\n\ntype PbmCheckCompatibilityWithSpecResponse struct {\n\tReturnval []PbmPlacementCompatibilityResult `xml:\"returnval,omitempty\"`\n}\n\ntype PbmCheckCompliance PbmCheckComplianceRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCheckCompliance\", reflect.TypeOf((*PbmCheckCompliance)(nil)).Elem())\n}\n\ntype PbmCheckComplianceRequestType struct {\n\tThis     types.ManagedObjectReference `xml:\"_this\"`\n\tEntities []PbmServerObjectRef         `xml:\"entities\"`\n\tProfile  *PbmProfileId                `xml:\"profile,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCheckComplianceRequestType\", reflect.TypeOf((*PbmCheckComplianceRequestType)(nil)).Elem())\n}\n\ntype PbmCheckComplianceResponse struct {\n\tReturnval []PbmComplianceResult `xml:\"returnval,omitempty\"`\n}\n\ntype PbmCheckRequirements PbmCheckRequirementsRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCheckRequirements\", reflect.TypeOf((*PbmCheckRequirements)(nil)).Elem())\n}\n\ntype PbmCheckRequirementsRequestType struct {\n\tThis                        types.ManagedObjectReference  `xml:\"_this\"`\n\tHubsToSearch                []PbmPlacementHub             `xml:\"hubsToSearch,omitempty\"`\n\tPlacementSubjectRef         *PbmServerObjectRef           `xml:\"placementSubjectRef,omitempty\"`\n\tPlacementSubjectRequirement []BasePbmPlacementRequirement `xml:\"placementSubjectRequirement,omitempty,typeattr\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCheckRequirementsRequestType\", reflect.TypeOf((*PbmCheckRequirementsRequestType)(nil)).Elem())\n}\n\ntype PbmCheckRequirementsResponse struct {\n\tReturnval []PbmPlacementCompatibilityResult `xml:\"returnval,omitempty\"`\n}\n\ntype PbmCheckRollupCompliance PbmCheckRollupComplianceRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCheckRollupCompliance\", reflect.TypeOf((*PbmCheckRollupCompliance)(nil)).Elem())\n}\n\ntype PbmCheckRollupComplianceRequestType struct {\n\tThis   types.ManagedObjectReference `xml:\"_this\"`\n\tEntity []PbmServerObjectRef         `xml:\"entity\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCheckRollupComplianceRequestType\", reflect.TypeOf((*PbmCheckRollupComplianceRequestType)(nil)).Elem())\n}\n\ntype PbmCheckRollupComplianceResponse struct {\n\tReturnval []PbmRollupComplianceResult `xml:\"returnval,omitempty\"`\n}\n\ntype PbmCompatibilityCheckFault struct {\n\tPbmFault\n\n\tHub PbmPlacementHub `xml:\"hub\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCompatibilityCheckFault\", reflect.TypeOf((*PbmCompatibilityCheckFault)(nil)).Elem())\n}\n\ntype PbmCompatibilityCheckFaultFault BasePbmCompatibilityCheckFault\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCompatibilityCheckFaultFault\", reflect.TypeOf((*PbmCompatibilityCheckFaultFault)(nil)).Elem())\n}\n\ntype PbmComplianceOperationalStatus struct {\n\ttypes.DynamicData\n\n\tHealthy           *bool      `xml:\"healthy\"`\n\tOperationETA      *time.Time `xml:\"operationETA\"`\n\tOperationProgress int64      `xml:\"operationProgress,omitempty\"`\n\tTransitional      *bool      `xml:\"transitional\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmComplianceOperationalStatus\", reflect.TypeOf((*PbmComplianceOperationalStatus)(nil)).Elem())\n}\n\ntype PbmCompliancePolicyStatus struct {\n\ttypes.DynamicData\n\n\tExpectedValue PbmCapabilityInstance  `xml:\"expectedValue\"`\n\tCurrentValue  *PbmCapabilityInstance `xml:\"currentValue,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCompliancePolicyStatus\", reflect.TypeOf((*PbmCompliancePolicyStatus)(nil)).Elem())\n}\n\ntype PbmComplianceResult struct {\n\ttypes.DynamicData\n\n\tCheckTime            time.Time                       `xml:\"checkTime\"`\n\tEntity               PbmServerObjectRef              `xml:\"entity\"`\n\tProfile              *PbmProfileId                   `xml:\"profile,omitempty\"`\n\tComplianceTaskStatus string                          `xml:\"complianceTaskStatus,omitempty\"`\n\tComplianceStatus     string                          `xml:\"complianceStatus\"`\n\tMismatch             bool                            `xml:\"mismatch\"`\n\tViolatedPolicies     []PbmCompliancePolicyStatus     `xml:\"violatedPolicies,omitempty\"`\n\tErrorCause           []types.LocalizedMethodFault    `xml:\"errorCause,omitempty\"`\n\tOperationalStatus    *PbmComplianceOperationalStatus `xml:\"operationalStatus,omitempty\"`\n\tInfo                 *PbmExtendedElementDescription  `xml:\"info,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmComplianceResult\", reflect.TypeOf((*PbmComplianceResult)(nil)).Elem())\n}\n\ntype PbmCreate PbmCreateRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCreate\", reflect.TypeOf((*PbmCreate)(nil)).Elem())\n}\n\ntype PbmCreateRequestType struct {\n\tThis       types.ManagedObjectReference   `xml:\"_this\"`\n\tCreateSpec PbmCapabilityProfileCreateSpec `xml:\"createSpec\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmCreateRequestType\", reflect.TypeOf((*PbmCreateRequestType)(nil)).Elem())\n}\n\ntype PbmCreateResponse struct {\n\tReturnval PbmProfileId `xml:\"returnval\"`\n}\n\ntype PbmDataServiceToPoliciesMap struct {\n\ttypes.DynamicData\n\n\tDataServicePolicy     PbmProfileId                `xml:\"dataServicePolicy\"`\n\tParentStoragePolicies []PbmProfileId              `xml:\"parentStoragePolicies,omitempty\"`\n\tFault                 *types.LocalizedMethodFault `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmDataServiceToPoliciesMap\", reflect.TypeOf((*PbmDataServiceToPoliciesMap)(nil)).Elem())\n}\n\ntype PbmDatastoreSpaceStatistics struct {\n\ttypes.DynamicData\n\n\tProfileId         string `xml:\"profileId,omitempty\"`\n\tPhysicalTotalInMB int64  `xml:\"physicalTotalInMB\"`\n\tPhysicalFreeInMB  int64  `xml:\"physicalFreeInMB\"`\n\tPhysicalUsedInMB  int64  `xml:\"physicalUsedInMB\"`\n\tLogicalLimitInMB  int64  `xml:\"logicalLimitInMB,omitempty\"`\n\tLogicalFreeInMB   int64  `xml:\"logicalFreeInMB\"`\n\tLogicalUsedInMB   int64  `xml:\"logicalUsedInMB\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmDatastoreSpaceStatistics\", reflect.TypeOf((*PbmDatastoreSpaceStatistics)(nil)).Elem())\n}\n\ntype PbmDefaultCapabilityProfile struct {\n\tPbmCapabilityProfile\n\n\tVvolType    []string `xml:\"vvolType\"`\n\tContainerId string   `xml:\"containerId\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmDefaultCapabilityProfile\", reflect.TypeOf((*PbmDefaultCapabilityProfile)(nil)).Elem())\n}\n\ntype PbmDefaultProfileAppliesFault struct {\n\tPbmCompatibilityCheckFault\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmDefaultProfileAppliesFault\", reflect.TypeOf((*PbmDefaultProfileAppliesFault)(nil)).Elem())\n}\n\ntype PbmDefaultProfileAppliesFaultFault PbmDefaultProfileAppliesFault\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmDefaultProfileAppliesFaultFault\", reflect.TypeOf((*PbmDefaultProfileAppliesFaultFault)(nil)).Elem())\n}\n\ntype PbmDefaultProfileInfo struct {\n\ttypes.DynamicData\n\n\tDatastores     []PbmPlacementHub `xml:\"datastores\"`\n\tDefaultProfile BasePbmProfile    `xml:\"defaultProfile,omitempty,typeattr\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmDefaultProfileInfo\", reflect.TypeOf((*PbmDefaultProfileInfo)(nil)).Elem())\n}\n\ntype PbmDelete PbmDeleteRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmDelete\", reflect.TypeOf((*PbmDelete)(nil)).Elem())\n}\n\ntype PbmDeleteRequestType struct {\n\tThis      types.ManagedObjectReference `xml:\"_this\"`\n\tProfileId []PbmProfileId               `xml:\"profileId\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmDeleteRequestType\", reflect.TypeOf((*PbmDeleteRequestType)(nil)).Elem())\n}\n\ntype PbmDeleteResponse struct {\n\tReturnval []PbmProfileOperationOutcome `xml:\"returnval,omitempty\"`\n}\n\ntype PbmDuplicateName struct {\n\tPbmFault\n\n\tName string `xml:\"name\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmDuplicateName\", reflect.TypeOf((*PbmDuplicateName)(nil)).Elem())\n}\n\ntype PbmDuplicateNameFault PbmDuplicateName\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmDuplicateNameFault\", reflect.TypeOf((*PbmDuplicateNameFault)(nil)).Elem())\n}\n\ntype PbmExtendedElementDescription struct {\n\ttypes.DynamicData\n\n\tLabel                   string              `xml:\"label\"`\n\tSummary                 string              `xml:\"summary\"`\n\tKey                     string              `xml:\"key\"`\n\tMessageCatalogKeyPrefix string              `xml:\"messageCatalogKeyPrefix\"`\n\tMessageArg              []types.KeyAnyValue `xml:\"messageArg,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmExtendedElementDescription\", reflect.TypeOf((*PbmExtendedElementDescription)(nil)).Elem())\n}\n\ntype PbmFault struct {\n\ttypes.MethodFault\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFault\", reflect.TypeOf((*PbmFault)(nil)).Elem())\n}\n\ntype PbmFaultFault BasePbmFault\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFaultFault\", reflect.TypeOf((*PbmFaultFault)(nil)).Elem())\n}\n\ntype PbmFaultInvalidLogin struct {\n\tPbmFault\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFaultInvalidLogin\", reflect.TypeOf((*PbmFaultInvalidLogin)(nil)).Elem())\n}\n\ntype PbmFaultInvalidLoginFault PbmFaultInvalidLogin\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFaultInvalidLoginFault\", reflect.TypeOf((*PbmFaultInvalidLoginFault)(nil)).Elem())\n}\n\ntype PbmFaultNotFound struct {\n\tPbmFault\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFaultNotFound\", reflect.TypeOf((*PbmFaultNotFound)(nil)).Elem())\n}\n\ntype PbmFaultNotFoundFault PbmFaultNotFound\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFaultNotFoundFault\", reflect.TypeOf((*PbmFaultNotFoundFault)(nil)).Elem())\n}\n\ntype PbmFaultProfileStorageFault struct {\n\tPbmFault\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFaultProfileStorageFault\", reflect.TypeOf((*PbmFaultProfileStorageFault)(nil)).Elem())\n}\n\ntype PbmFaultProfileStorageFaultFault PbmFaultProfileStorageFault\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFaultProfileStorageFaultFault\", reflect.TypeOf((*PbmFaultProfileStorageFaultFault)(nil)).Elem())\n}\n\ntype PbmFetchCapabilityMetadata PbmFetchCapabilityMetadataRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFetchCapabilityMetadata\", reflect.TypeOf((*PbmFetchCapabilityMetadata)(nil)).Elem())\n}\n\ntype PbmFetchCapabilityMetadataRequestType struct {\n\tThis         types.ManagedObjectReference `xml:\"_this\"`\n\tResourceType *PbmProfileResourceType      `xml:\"resourceType,omitempty\"`\n\tVendorUuid   string                       `xml:\"vendorUuid,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFetchCapabilityMetadataRequestType\", reflect.TypeOf((*PbmFetchCapabilityMetadataRequestType)(nil)).Elem())\n}\n\ntype PbmFetchCapabilityMetadataResponse struct {\n\tReturnval []PbmCapabilityMetadataPerCategory `xml:\"returnval,omitempty\"`\n}\n\ntype PbmFetchCapabilitySchema PbmFetchCapabilitySchemaRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFetchCapabilitySchema\", reflect.TypeOf((*PbmFetchCapabilitySchema)(nil)).Elem())\n}\n\ntype PbmFetchCapabilitySchemaRequestType struct {\n\tThis          types.ManagedObjectReference `xml:\"_this\"`\n\tVendorUuid    string                       `xml:\"vendorUuid,omitempty\"`\n\tLineOfService []string                     `xml:\"lineOfService,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFetchCapabilitySchemaRequestType\", reflect.TypeOf((*PbmFetchCapabilitySchemaRequestType)(nil)).Elem())\n}\n\ntype PbmFetchCapabilitySchemaResponse struct {\n\tReturnval []PbmCapabilitySchema `xml:\"returnval,omitempty\"`\n}\n\ntype PbmFetchComplianceResult PbmFetchComplianceResultRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFetchComplianceResult\", reflect.TypeOf((*PbmFetchComplianceResult)(nil)).Elem())\n}\n\ntype PbmFetchComplianceResultRequestType struct {\n\tThis     types.ManagedObjectReference `xml:\"_this\"`\n\tEntities []PbmServerObjectRef         `xml:\"entities\"`\n\tProfile  *PbmProfileId                `xml:\"profile,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFetchComplianceResultRequestType\", reflect.TypeOf((*PbmFetchComplianceResultRequestType)(nil)).Elem())\n}\n\ntype PbmFetchComplianceResultResponse struct {\n\tReturnval []PbmComplianceResult `xml:\"returnval,omitempty\"`\n}\n\ntype PbmFetchResourceType PbmFetchResourceTypeRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFetchResourceType\", reflect.TypeOf((*PbmFetchResourceType)(nil)).Elem())\n}\n\ntype PbmFetchResourceTypeRequestType struct {\n\tThis types.ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFetchResourceTypeRequestType\", reflect.TypeOf((*PbmFetchResourceTypeRequestType)(nil)).Elem())\n}\n\ntype PbmFetchResourceTypeResponse struct {\n\tReturnval []PbmProfileResourceType `xml:\"returnval,omitempty\"`\n}\n\ntype PbmFetchRollupComplianceResult PbmFetchRollupComplianceResultRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFetchRollupComplianceResult\", reflect.TypeOf((*PbmFetchRollupComplianceResult)(nil)).Elem())\n}\n\ntype PbmFetchRollupComplianceResultRequestType struct {\n\tThis   types.ManagedObjectReference `xml:\"_this\"`\n\tEntity []PbmServerObjectRef         `xml:\"entity\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFetchRollupComplianceResultRequestType\", reflect.TypeOf((*PbmFetchRollupComplianceResultRequestType)(nil)).Elem())\n}\n\ntype PbmFetchRollupComplianceResultResponse struct {\n\tReturnval []PbmRollupComplianceResult `xml:\"returnval,omitempty\"`\n}\n\ntype PbmFetchVendorInfo PbmFetchVendorInfoRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFetchVendorInfo\", reflect.TypeOf((*PbmFetchVendorInfo)(nil)).Elem())\n}\n\ntype PbmFetchVendorInfoRequestType struct {\n\tThis         types.ManagedObjectReference `xml:\"_this\"`\n\tResourceType *PbmProfileResourceType      `xml:\"resourceType,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFetchVendorInfoRequestType\", reflect.TypeOf((*PbmFetchVendorInfoRequestType)(nil)).Elem())\n}\n\ntype PbmFetchVendorInfoResponse struct {\n\tReturnval []PbmCapabilityVendorResourceTypeInfo `xml:\"returnval,omitempty\"`\n}\n\ntype PbmFindApplicableDefaultProfile PbmFindApplicableDefaultProfileRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFindApplicableDefaultProfile\", reflect.TypeOf((*PbmFindApplicableDefaultProfile)(nil)).Elem())\n}\n\ntype PbmFindApplicableDefaultProfileRequestType struct {\n\tThis       types.ManagedObjectReference `xml:\"_this\"`\n\tDatastores []PbmPlacementHub            `xml:\"datastores\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmFindApplicableDefaultProfileRequestType\", reflect.TypeOf((*PbmFindApplicableDefaultProfileRequestType)(nil)).Elem())\n}\n\ntype PbmFindApplicableDefaultProfileResponse struct {\n\tReturnval []BasePbmProfile `xml:\"returnval,omitempty,typeattr\"`\n}\n\ntype PbmIncompatibleVendorSpecificRuleSet struct {\n\tPbmCapabilityProfilePropertyMismatchFault\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmIncompatibleVendorSpecificRuleSet\", reflect.TypeOf((*PbmIncompatibleVendorSpecificRuleSet)(nil)).Elem())\n}\n\ntype PbmIncompatibleVendorSpecificRuleSetFault PbmIncompatibleVendorSpecificRuleSet\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmIncompatibleVendorSpecificRuleSetFault\", reflect.TypeOf((*PbmIncompatibleVendorSpecificRuleSetFault)(nil)).Elem())\n}\n\ntype PbmLegacyHubsNotSupported struct {\n\tPbmFault\n\n\tHubs []PbmPlacementHub `xml:\"hubs\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmLegacyHubsNotSupported\", reflect.TypeOf((*PbmLegacyHubsNotSupported)(nil)).Elem())\n}\n\ntype PbmLegacyHubsNotSupportedFault PbmLegacyHubsNotSupported\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmLegacyHubsNotSupportedFault\", reflect.TypeOf((*PbmLegacyHubsNotSupportedFault)(nil)).Elem())\n}\n\ntype PbmLineOfServiceInfo struct {\n\ttypes.DynamicData\n\n\tLineOfService string                         `xml:\"lineOfService\"`\n\tName          PbmExtendedElementDescription  `xml:\"name\"`\n\tDescription   *PbmExtendedElementDescription `xml:\"description,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmLineOfServiceInfo\", reflect.TypeOf((*PbmLineOfServiceInfo)(nil)).Elem())\n}\n\ntype PbmNonExistentHubs struct {\n\tPbmFault\n\n\tHubs []PbmPlacementHub `xml:\"hubs\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmNonExistentHubs\", reflect.TypeOf((*PbmNonExistentHubs)(nil)).Elem())\n}\n\ntype PbmNonExistentHubsFault PbmNonExistentHubs\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmNonExistentHubsFault\", reflect.TypeOf((*PbmNonExistentHubsFault)(nil)).Elem())\n}\n\ntype PbmPersistenceBasedDataServiceInfo struct {\n\tPbmLineOfServiceInfo\n\n\tCompatiblePersistenceSchemaNamespace []string `xml:\"compatiblePersistenceSchemaNamespace,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmPersistenceBasedDataServiceInfo\", reflect.TypeOf((*PbmPersistenceBasedDataServiceInfo)(nil)).Elem())\n}\n\ntype PbmPlacementCapabilityConstraintsRequirement struct {\n\tPbmPlacementRequirement\n\n\tConstraints BasePbmCapabilityConstraints `xml:\"constraints,typeattr\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmPlacementCapabilityConstraintsRequirement\", reflect.TypeOf((*PbmPlacementCapabilityConstraintsRequirement)(nil)).Elem())\n}\n\ntype PbmPlacementCapabilityProfileRequirement struct {\n\tPbmPlacementRequirement\n\n\tProfileId PbmProfileId `xml:\"profileId\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmPlacementCapabilityProfileRequirement\", reflect.TypeOf((*PbmPlacementCapabilityProfileRequirement)(nil)).Elem())\n}\n\ntype PbmPlacementCompatibilityResult struct {\n\ttypes.DynamicData\n\n\tHub               PbmPlacementHub                     `xml:\"hub\"`\n\tMatchingResources []BasePbmPlacementMatchingResources `xml:\"matchingResources,omitempty,typeattr\"`\n\tHowMany           int64                               `xml:\"howMany,omitempty\"`\n\tUtilization       []PbmPlacementResourceUtilization   `xml:\"utilization,omitempty\"`\n\tWarning           []types.LocalizedMethodFault        `xml:\"warning,omitempty\"`\n\tError             []types.LocalizedMethodFault        `xml:\"error,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmPlacementCompatibilityResult\", reflect.TypeOf((*PbmPlacementCompatibilityResult)(nil)).Elem())\n}\n\ntype PbmPlacementHub struct {\n\ttypes.DynamicData\n\n\tHubType string `xml:\"hubType\"`\n\tHubId   string `xml:\"hubId\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmPlacementHub\", reflect.TypeOf((*PbmPlacementHub)(nil)).Elem())\n}\n\ntype PbmPlacementMatchingReplicationResources struct {\n\tPbmPlacementMatchingResources\n\n\tReplicationGroup []types.ReplicationGroupId `xml:\"replicationGroup,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmPlacementMatchingReplicationResources\", reflect.TypeOf((*PbmPlacementMatchingReplicationResources)(nil)).Elem())\n}\n\ntype PbmPlacementMatchingResources struct {\n\ttypes.DynamicData\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmPlacementMatchingResources\", reflect.TypeOf((*PbmPlacementMatchingResources)(nil)).Elem())\n}\n\ntype PbmPlacementRequirement struct {\n\ttypes.DynamicData\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmPlacementRequirement\", reflect.TypeOf((*PbmPlacementRequirement)(nil)).Elem())\n}\n\ntype PbmPlacementResourceUtilization struct {\n\ttypes.DynamicData\n\n\tName            PbmExtendedElementDescription `xml:\"name\"`\n\tDescription     PbmExtendedElementDescription `xml:\"description\"`\n\tAvailableBefore int64                         `xml:\"availableBefore,omitempty\"`\n\tAvailableAfter  int64                         `xml:\"availableAfter,omitempty\"`\n\tTotal           int64                         `xml:\"total,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmPlacementResourceUtilization\", reflect.TypeOf((*PbmPlacementResourceUtilization)(nil)).Elem())\n}\n\ntype PbmProfile struct {\n\ttypes.DynamicData\n\n\tProfileId       PbmProfileId `xml:\"profileId\"`\n\tName            string       `xml:\"name\"`\n\tDescription     string       `xml:\"description,omitempty\"`\n\tCreationTime    time.Time    `xml:\"creationTime\"`\n\tCreatedBy       string       `xml:\"createdBy\"`\n\tLastUpdatedTime time.Time    `xml:\"lastUpdatedTime\"`\n\tLastUpdatedBy   string       `xml:\"lastUpdatedBy\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmProfile\", reflect.TypeOf((*PbmProfile)(nil)).Elem())\n}\n\ntype PbmProfileId struct {\n\ttypes.DynamicData\n\n\tUniqueId string `xml:\"uniqueId\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmProfileId\", reflect.TypeOf((*PbmProfileId)(nil)).Elem())\n}\n\ntype PbmProfileOperationOutcome struct {\n\ttypes.DynamicData\n\n\tProfileId PbmProfileId                `xml:\"profileId\"`\n\tFault     *types.LocalizedMethodFault `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmProfileOperationOutcome\", reflect.TypeOf((*PbmProfileOperationOutcome)(nil)).Elem())\n}\n\ntype PbmProfileResourceType struct {\n\ttypes.DynamicData\n\n\tResourceType string `xml:\"resourceType\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmProfileResourceType\", reflect.TypeOf((*PbmProfileResourceType)(nil)).Elem())\n}\n\ntype PbmProfileType struct {\n\ttypes.DynamicData\n\n\tUniqueId string `xml:\"uniqueId\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmProfileType\", reflect.TypeOf((*PbmProfileType)(nil)).Elem())\n}\n\ntype PbmPropertyMismatchFault struct {\n\tPbmCompatibilityCheckFault\n\n\tCapabilityInstanceId        PbmCapabilityMetadataUniqueId `xml:\"capabilityInstanceId\"`\n\tRequirementPropertyInstance PbmCapabilityPropertyInstance `xml:\"requirementPropertyInstance\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmPropertyMismatchFault\", reflect.TypeOf((*PbmPropertyMismatchFault)(nil)).Elem())\n}\n\ntype PbmPropertyMismatchFaultFault BasePbmPropertyMismatchFault\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmPropertyMismatchFaultFault\", reflect.TypeOf((*PbmPropertyMismatchFaultFault)(nil)).Elem())\n}\n\ntype PbmQueryAssociatedEntities PbmQueryAssociatedEntitiesRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryAssociatedEntities\", reflect.TypeOf((*PbmQueryAssociatedEntities)(nil)).Elem())\n}\n\ntype PbmQueryAssociatedEntitiesRequestType struct {\n\tThis     types.ManagedObjectReference `xml:\"_this\"`\n\tProfiles []PbmProfileId               `xml:\"profiles,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryAssociatedEntitiesRequestType\", reflect.TypeOf((*PbmQueryAssociatedEntitiesRequestType)(nil)).Elem())\n}\n\ntype PbmQueryAssociatedEntitiesResponse struct {\n\tReturnval []PbmQueryProfileResult `xml:\"returnval,omitempty\"`\n}\n\ntype PbmQueryAssociatedEntity PbmQueryAssociatedEntityRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryAssociatedEntity\", reflect.TypeOf((*PbmQueryAssociatedEntity)(nil)).Elem())\n}\n\ntype PbmQueryAssociatedEntityRequestType struct {\n\tThis       types.ManagedObjectReference `xml:\"_this\"`\n\tProfile    PbmProfileId                 `xml:\"profile\"`\n\tEntityType string                       `xml:\"entityType,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryAssociatedEntityRequestType\", reflect.TypeOf((*PbmQueryAssociatedEntityRequestType)(nil)).Elem())\n}\n\ntype PbmQueryAssociatedEntityResponse struct {\n\tReturnval []PbmServerObjectRef `xml:\"returnval,omitempty\"`\n}\n\ntype PbmQueryAssociatedProfile PbmQueryAssociatedProfileRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryAssociatedProfile\", reflect.TypeOf((*PbmQueryAssociatedProfile)(nil)).Elem())\n}\n\ntype PbmQueryAssociatedProfileRequestType struct {\n\tThis   types.ManagedObjectReference `xml:\"_this\"`\n\tEntity PbmServerObjectRef           `xml:\"entity\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryAssociatedProfileRequestType\", reflect.TypeOf((*PbmQueryAssociatedProfileRequestType)(nil)).Elem())\n}\n\ntype PbmQueryAssociatedProfileResponse struct {\n\tReturnval []PbmProfileId `xml:\"returnval,omitempty\"`\n}\n\ntype PbmQueryAssociatedProfiles PbmQueryAssociatedProfilesRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryAssociatedProfiles\", reflect.TypeOf((*PbmQueryAssociatedProfiles)(nil)).Elem())\n}\n\ntype PbmQueryAssociatedProfilesRequestType struct {\n\tThis     types.ManagedObjectReference `xml:\"_this\"`\n\tEntities []PbmServerObjectRef         `xml:\"entities\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryAssociatedProfilesRequestType\", reflect.TypeOf((*PbmQueryAssociatedProfilesRequestType)(nil)).Elem())\n}\n\ntype PbmQueryAssociatedProfilesResponse struct {\n\tReturnval []PbmQueryProfileResult `xml:\"returnval,omitempty\"`\n}\n\ntype PbmQueryByRollupComplianceStatus PbmQueryByRollupComplianceStatusRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryByRollupComplianceStatus\", reflect.TypeOf((*PbmQueryByRollupComplianceStatus)(nil)).Elem())\n}\n\ntype PbmQueryByRollupComplianceStatusRequestType struct {\n\tThis   types.ManagedObjectReference `xml:\"_this\"`\n\tStatus string                       `xml:\"status\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryByRollupComplianceStatusRequestType\", reflect.TypeOf((*PbmQueryByRollupComplianceStatusRequestType)(nil)).Elem())\n}\n\ntype PbmQueryByRollupComplianceStatusResponse struct {\n\tReturnval []PbmServerObjectRef `xml:\"returnval,omitempty\"`\n}\n\ntype PbmQueryDefaultRequirementProfile PbmQueryDefaultRequirementProfileRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryDefaultRequirementProfile\", reflect.TypeOf((*PbmQueryDefaultRequirementProfile)(nil)).Elem())\n}\n\ntype PbmQueryDefaultRequirementProfileRequestType struct {\n\tThis types.ManagedObjectReference `xml:\"_this\"`\n\tHub  PbmPlacementHub              `xml:\"hub\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryDefaultRequirementProfileRequestType\", reflect.TypeOf((*PbmQueryDefaultRequirementProfileRequestType)(nil)).Elem())\n}\n\ntype PbmQueryDefaultRequirementProfileResponse struct {\n\tReturnval *PbmProfileId `xml:\"returnval,omitempty\"`\n}\n\ntype PbmQueryDefaultRequirementProfiles PbmQueryDefaultRequirementProfilesRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryDefaultRequirementProfiles\", reflect.TypeOf((*PbmQueryDefaultRequirementProfiles)(nil)).Elem())\n}\n\ntype PbmQueryDefaultRequirementProfilesRequestType struct {\n\tThis       types.ManagedObjectReference `xml:\"_this\"`\n\tDatastores []PbmPlacementHub            `xml:\"datastores\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryDefaultRequirementProfilesRequestType\", reflect.TypeOf((*PbmQueryDefaultRequirementProfilesRequestType)(nil)).Elem())\n}\n\ntype PbmQueryDefaultRequirementProfilesResponse struct {\n\tReturnval []PbmDefaultProfileInfo `xml:\"returnval\"`\n}\n\ntype PbmQueryMatchingHub PbmQueryMatchingHubRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryMatchingHub\", reflect.TypeOf((*PbmQueryMatchingHub)(nil)).Elem())\n}\n\ntype PbmQueryMatchingHubRequestType struct {\n\tThis         types.ManagedObjectReference `xml:\"_this\"`\n\tHubsToSearch []PbmPlacementHub            `xml:\"hubsToSearch,omitempty\"`\n\tProfile      PbmProfileId                 `xml:\"profile\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryMatchingHubRequestType\", reflect.TypeOf((*PbmQueryMatchingHubRequestType)(nil)).Elem())\n}\n\ntype PbmQueryMatchingHubResponse struct {\n\tReturnval []PbmPlacementHub `xml:\"returnval,omitempty\"`\n}\n\ntype PbmQueryMatchingHubWithSpec PbmQueryMatchingHubWithSpecRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryMatchingHubWithSpec\", reflect.TypeOf((*PbmQueryMatchingHubWithSpec)(nil)).Elem())\n}\n\ntype PbmQueryMatchingHubWithSpecRequestType struct {\n\tThis         types.ManagedObjectReference   `xml:\"_this\"`\n\tHubsToSearch []PbmPlacementHub              `xml:\"hubsToSearch,omitempty\"`\n\tCreateSpec   PbmCapabilityProfileCreateSpec `xml:\"createSpec\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryMatchingHubWithSpecRequestType\", reflect.TypeOf((*PbmQueryMatchingHubWithSpecRequestType)(nil)).Elem())\n}\n\ntype PbmQueryMatchingHubWithSpecResponse struct {\n\tReturnval []PbmPlacementHub `xml:\"returnval,omitempty\"`\n}\n\ntype PbmQueryProfile PbmQueryProfileRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryProfile\", reflect.TypeOf((*PbmQueryProfile)(nil)).Elem())\n}\n\ntype PbmQueryProfileRequestType struct {\n\tThis            types.ManagedObjectReference `xml:\"_this\"`\n\tResourceType    PbmProfileResourceType       `xml:\"resourceType\"`\n\tProfileCategory string                       `xml:\"profileCategory,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryProfileRequestType\", reflect.TypeOf((*PbmQueryProfileRequestType)(nil)).Elem())\n}\n\ntype PbmQueryProfileResponse struct {\n\tReturnval []PbmProfileId `xml:\"returnval,omitempty\"`\n}\n\ntype PbmQueryProfileResult struct {\n\ttypes.DynamicData\n\n\tObject    PbmServerObjectRef          `xml:\"object\"`\n\tProfileId []PbmProfileId              `xml:\"profileId,omitempty\"`\n\tFault     *types.LocalizedMethodFault `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryProfileResult\", reflect.TypeOf((*PbmQueryProfileResult)(nil)).Elem())\n}\n\ntype PbmQueryReplicationGroupResult struct {\n\ttypes.DynamicData\n\n\tObject             PbmServerObjectRef          `xml:\"object\"`\n\tReplicationGroupId *types.ReplicationGroupId   `xml:\"replicationGroupId,omitempty\"`\n\tFault              *types.LocalizedMethodFault `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryReplicationGroupResult\", reflect.TypeOf((*PbmQueryReplicationGroupResult)(nil)).Elem())\n}\n\ntype PbmQueryReplicationGroups PbmQueryReplicationGroupsRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryReplicationGroups\", reflect.TypeOf((*PbmQueryReplicationGroups)(nil)).Elem())\n}\n\ntype PbmQueryReplicationGroupsRequestType struct {\n\tThis     types.ManagedObjectReference `xml:\"_this\"`\n\tEntities []PbmServerObjectRef         `xml:\"entities,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQueryReplicationGroupsRequestType\", reflect.TypeOf((*PbmQueryReplicationGroupsRequestType)(nil)).Elem())\n}\n\ntype PbmQueryReplicationGroupsResponse struct {\n\tReturnval []PbmQueryReplicationGroupResult `xml:\"returnval,omitempty\"`\n}\n\ntype PbmQuerySpaceStatsForStorageContainer PbmQuerySpaceStatsForStorageContainerRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQuerySpaceStatsForStorageContainer\", reflect.TypeOf((*PbmQuerySpaceStatsForStorageContainer)(nil)).Elem())\n}\n\ntype PbmQuerySpaceStatsForStorageContainerRequestType struct {\n\tThis                types.ManagedObjectReference `xml:\"_this\"`\n\tDatastore           PbmServerObjectRef           `xml:\"datastore\"`\n\tCapabilityProfileId []PbmProfileId               `xml:\"capabilityProfileId,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmQuerySpaceStatsForStorageContainerRequestType\", reflect.TypeOf((*PbmQuerySpaceStatsForStorageContainerRequestType)(nil)).Elem())\n}\n\ntype PbmQuerySpaceStatsForStorageContainerResponse struct {\n\tReturnval []PbmDatastoreSpaceStatistics `xml:\"returnval,omitempty\"`\n}\n\ntype PbmResetDefaultRequirementProfile PbmResetDefaultRequirementProfileRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmResetDefaultRequirementProfile\", reflect.TypeOf((*PbmResetDefaultRequirementProfile)(nil)).Elem())\n}\n\ntype PbmResetDefaultRequirementProfileRequestType struct {\n\tThis    types.ManagedObjectReference `xml:\"_this\"`\n\tProfile *PbmProfileId                `xml:\"profile,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmResetDefaultRequirementProfileRequestType\", reflect.TypeOf((*PbmResetDefaultRequirementProfileRequestType)(nil)).Elem())\n}\n\ntype PbmResetDefaultRequirementProfileResponse struct {\n}\n\ntype PbmResetVSanDefaultProfile PbmResetVSanDefaultProfileRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmResetVSanDefaultProfile\", reflect.TypeOf((*PbmResetVSanDefaultProfile)(nil)).Elem())\n}\n\ntype PbmResetVSanDefaultProfileRequestType struct {\n\tThis types.ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmResetVSanDefaultProfileRequestType\", reflect.TypeOf((*PbmResetVSanDefaultProfileRequestType)(nil)).Elem())\n}\n\ntype PbmResetVSanDefaultProfileResponse struct {\n}\n\ntype PbmResourceInUse struct {\n\tPbmFault\n\n\tType string `xml:\"type,omitempty\"`\n\tName string `xml:\"name,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmResourceInUse\", reflect.TypeOf((*PbmResourceInUse)(nil)).Elem())\n}\n\ntype PbmResourceInUseFault PbmResourceInUse\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmResourceInUseFault\", reflect.TypeOf((*PbmResourceInUseFault)(nil)).Elem())\n}\n\ntype PbmRetrieveContent PbmRetrieveContentRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmRetrieveContent\", reflect.TypeOf((*PbmRetrieveContent)(nil)).Elem())\n}\n\ntype PbmRetrieveContentRequestType struct {\n\tThis       types.ManagedObjectReference `xml:\"_this\"`\n\tProfileIds []PbmProfileId               `xml:\"profileIds\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmRetrieveContentRequestType\", reflect.TypeOf((*PbmRetrieveContentRequestType)(nil)).Elem())\n}\n\ntype PbmRetrieveContentResponse struct {\n\tReturnval []BasePbmProfile `xml:\"returnval,typeattr\"`\n}\n\ntype PbmRetrieveServiceContent PbmRetrieveServiceContentRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmRetrieveServiceContent\", reflect.TypeOf((*PbmRetrieveServiceContent)(nil)).Elem())\n}\n\ntype PbmRetrieveServiceContentRequestType struct {\n\tThis types.ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmRetrieveServiceContentRequestType\", reflect.TypeOf((*PbmRetrieveServiceContentRequestType)(nil)).Elem())\n}\n\ntype PbmRetrieveServiceContentResponse struct {\n\tReturnval PbmServiceInstanceContent `xml:\"returnval\"`\n}\n\ntype PbmRollupComplianceResult struct {\n\ttypes.DynamicData\n\n\tOldestCheckTime             time.Time                    `xml:\"oldestCheckTime\"`\n\tEntity                      PbmServerObjectRef           `xml:\"entity\"`\n\tOverallComplianceStatus     string                       `xml:\"overallComplianceStatus\"`\n\tOverallComplianceTaskStatus string                       `xml:\"overallComplianceTaskStatus,omitempty\"`\n\tResult                      []PbmComplianceResult        `xml:\"result,omitempty\"`\n\tErrorCause                  []types.LocalizedMethodFault `xml:\"errorCause,omitempty\"`\n\tProfileMismatch             bool                         `xml:\"profileMismatch\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmRollupComplianceResult\", reflect.TypeOf((*PbmRollupComplianceResult)(nil)).Elem())\n}\n\ntype PbmServerObjectRef struct {\n\ttypes.DynamicData\n\n\tObjectType string `xml:\"objectType\"`\n\tKey        string `xml:\"key\"`\n\tServerUuid string `xml:\"serverUuid,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmServerObjectRef\", reflect.TypeOf((*PbmServerObjectRef)(nil)).Elem())\n}\n\ntype PbmServiceInstanceContent struct {\n\ttypes.DynamicData\n\n\tAboutInfo                 PbmAboutInfo                  `xml:\"aboutInfo\"`\n\tSessionManager            types.ManagedObjectReference  `xml:\"sessionManager\"`\n\tCapabilityMetadataManager types.ManagedObjectReference  `xml:\"capabilityMetadataManager\"`\n\tProfileManager            types.ManagedObjectReference  `xml:\"profileManager\"`\n\tComplianceManager         types.ManagedObjectReference  `xml:\"complianceManager\"`\n\tPlacementSolver           types.ManagedObjectReference  `xml:\"placementSolver\"`\n\tReplicationManager        *types.ManagedObjectReference `xml:\"replicationManager,omitempty\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmServiceInstanceContent\", reflect.TypeOf((*PbmServiceInstanceContent)(nil)).Elem())\n}\n\ntype PbmUpdate PbmUpdateRequestType\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmUpdate\", reflect.TypeOf((*PbmUpdate)(nil)).Elem())\n}\n\ntype PbmUpdateRequestType struct {\n\tThis       types.ManagedObjectReference   `xml:\"_this\"`\n\tProfileId  PbmProfileId                   `xml:\"profileId\"`\n\tUpdateSpec PbmCapabilityProfileUpdateSpec `xml:\"updateSpec\"`\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmUpdateRequestType\", reflect.TypeOf((*PbmUpdateRequestType)(nil)).Elem())\n}\n\ntype PbmUpdateResponse struct {\n}\n\ntype PbmVaioDataServiceInfo struct {\n\tPbmLineOfServiceInfo\n}\n\nfunc init() {\n\ttypes.Add(\"pbm:PbmVaioDataServiceInfo\", reflect.TypeOf((*PbmVaioDataServiceInfo)(nil)).Elem())\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/performance/manager.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage performance\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\n// Manager wraps mo.PerformanceManager.\ntype Manager struct {\n\tobject.Common\n\n\tSort bool\n\n\tpm struct {\n\t\tsync.Mutex\n\t\t*mo.PerformanceManager\n\t}\n\n\tproviderSummary struct {\n\t\tsync.Mutex\n\t\tm map[string]*types.PerfProviderSummary\n\t}\n\n\tinfoByName struct {\n\t\tsync.Mutex\n\t\tm map[string]*types.PerfCounterInfo\n\t}\n\n\tinfoByKey struct {\n\t\tsync.Mutex\n\t\tm map[int32]*types.PerfCounterInfo\n\t}\n}\n\n// NewManager creates a new Manager instance.\nfunc NewManager(client *vim25.Client) *Manager {\n\tm := Manager{\n\t\tCommon: object.NewCommon(client, *client.ServiceContent.PerfManager),\n\t}\n\n\tm.pm.PerformanceManager = new(mo.PerformanceManager)\n\n\treturn &m\n}\n\n// IntervalList wraps []types.PerfInterval.\ntype IntervalList []types.PerfInterval\n\n// Enabled returns a map with Level as the key and enabled PerfInterval.Name(s) as the value.\nfunc (l IntervalList) Enabled() map[int32][]string {\n\tenabled := make(map[int32][]string)\n\n\tfor level := int32(0); level <= 4; level++ {\n\t\tvar names []string\n\n\t\tfor _, interval := range l {\n\t\t\tif interval.Enabled && interval.Level >= level {\n\t\t\t\tnames = append(names, interval.Name)\n\t\t\t}\n\t\t}\n\n\t\tenabled[level] = names\n\t}\n\n\treturn enabled\n}\n\n// HistoricalInterval gets the PerformanceManager.HistoricalInterval property and wraps as an IntervalList.\nfunc (m *Manager) HistoricalInterval(ctx context.Context) (IntervalList, error) {\n\tvar pm mo.PerformanceManager\n\n\terr := m.Properties(ctx, m.Reference(), []string{\"historicalInterval\"}, &pm)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn IntervalList(pm.HistoricalInterval), nil\n}\n\n// CounterInfo gets the PerformanceManager.PerfCounter property.\n// The property value is only collected once, subsequent calls return the cached value.\nfunc (m *Manager) CounterInfo(ctx context.Context) ([]types.PerfCounterInfo, error) {\n\tm.pm.Lock()\n\tdefer m.pm.Unlock()\n\n\tif len(m.pm.PerfCounter) == 0 {\n\t\terr := m.Properties(ctx, m.Reference(), []string{\"perfCounter\"}, m.pm.PerformanceManager)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn m.pm.PerfCounter, nil\n}\n\n// CounterInfoByName converts the PerformanceManager.PerfCounter property to a map,\n// where key is types.PerfCounterInfo.Name().\nfunc (m *Manager) CounterInfoByName(ctx context.Context) (map[string]*types.PerfCounterInfo, error) {\n\tm.infoByName.Lock()\n\tdefer m.infoByName.Unlock()\n\n\tif m.infoByName.m != nil {\n\t\treturn m.infoByName.m, nil\n\t}\n\n\tinfo, err := m.CounterInfo(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm.infoByName.m = make(map[string]*types.PerfCounterInfo)\n\n\tfor i := range info {\n\t\tc := &info[i]\n\n\t\tm.infoByName.m[c.Name()] = c\n\t}\n\n\treturn m.infoByName.m, nil\n}\n\n// CounterInfoByKey converts the PerformanceManager.PerfCounter property to a map,\n// where key is types.PerfCounterInfo.Key.\nfunc (m *Manager) CounterInfoByKey(ctx context.Context) (map[int32]*types.PerfCounterInfo, error) {\n\tm.infoByKey.Lock()\n\tdefer m.infoByKey.Unlock()\n\n\tif m.infoByKey.m != nil {\n\t\treturn m.infoByKey.m, nil\n\t}\n\n\tinfo, err := m.CounterInfo(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm.infoByKey.m = make(map[int32]*types.PerfCounterInfo)\n\n\tfor i := range info {\n\t\tc := &info[i]\n\n\t\tm.infoByKey.m[c.Key] = c\n\t}\n\n\treturn m.infoByKey.m, nil\n}\n\n// ProviderSummary wraps the QueryPerfProviderSummary method, caching the value based on entity.Type.\nfunc (m *Manager) ProviderSummary(ctx context.Context, entity types.ManagedObjectReference) (*types.PerfProviderSummary, error) {\n\tm.providerSummary.Lock()\n\tdefer m.providerSummary.Unlock()\n\n\tif m.providerSummary.m == nil {\n\t\tm.providerSummary.m = make(map[string]*types.PerfProviderSummary)\n\t}\n\n\ts, ok := m.providerSummary.m[entity.Type]\n\tif ok {\n\t\treturn s, nil\n\t}\n\n\treq := types.QueryPerfProviderSummary{\n\t\tThis:   m.Reference(),\n\t\tEntity: entity,\n\t}\n\n\tres, err := methods.QueryPerfProviderSummary(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts = &res.Returnval\n\n\tm.providerSummary.m[entity.Type] = s\n\n\treturn s, nil\n}\n\ntype groupPerfCounterInfo struct {\n\tinfo map[int32]*types.PerfCounterInfo\n\tids  []types.PerfMetricId\n}\n\nfunc (d groupPerfCounterInfo) Len() int {\n\treturn len(d.ids)\n}\n\nfunc (d groupPerfCounterInfo) Less(i, j int) bool {\n\tci := d.ids[i].CounterId\n\tcj := d.ids[j].CounterId\n\tgi := d.info[ci].GroupInfo.GetElementDescription()\n\tgj := d.info[cj].GroupInfo.GetElementDescription()\n\n\treturn gi.Key < gj.Key\n}\n\nfunc (d groupPerfCounterInfo) Swap(i, j int) {\n\td.ids[i], d.ids[j] = d.ids[j], d.ids[i]\n}\n\n// MetricList wraps []types.PerfMetricId\ntype MetricList []types.PerfMetricId\n\n// ByKey converts MetricList to map, where key is types.PerfMetricId.CounterId / types.PerfCounterInfo.Key\nfunc (l MetricList) ByKey() map[int32][]*types.PerfMetricId {\n\tids := make(map[int32][]*types.PerfMetricId, len(l))\n\n\tfor i := range l {\n\t\tid := &l[i]\n\t\tids[id.CounterId] = append(ids[id.CounterId], id)\n\t}\n\n\treturn ids\n}\n\n// AvailableMetric wraps the QueryAvailablePerfMetric method.\n// The MetricList is sorted by PerfCounterInfo.GroupInfo.Key if Manager.Sort == true.\nfunc (m *Manager) AvailableMetric(ctx context.Context, entity types.ManagedObjectReference, interval int32) (MetricList, error) {\n\treq := types.QueryAvailablePerfMetric{\n\t\tThis:       m.Reference(),\n\t\tEntity:     entity.Reference(),\n\t\tIntervalId: interval,\n\t}\n\n\tres, err := methods.QueryAvailablePerfMetric(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif m.Sort {\n\t\tinfo, err := m.CounterInfoByKey(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tsort.Sort(groupPerfCounterInfo{info, res.Returnval})\n\t}\n\n\treturn MetricList(res.Returnval), nil\n}\n\n// Query wraps the QueryPerf method.\nfunc (m *Manager) Query(ctx context.Context, spec []types.PerfQuerySpec) ([]types.BasePerfEntityMetricBase, error) {\n\treq := types.QueryPerf{\n\t\tThis:      m.Reference(),\n\t\tQuerySpec: spec,\n\t}\n\n\tres, err := methods.QueryPerf(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\n// SampleByName uses the spec param as a template, constructing a []types.PerfQuerySpec for the given metrics and entities\n// and invoking the Query method.\n// The spec template can specify instances using the MetricId.Instance field, by default all instances are collected.\n// The spec template MaxSample defaults to 1.\n// If the spec template IntervalId is a historical interval and StartTime is not specified,\n// the StartTime is set to the current time - (IntervalId * MaxSample).\nfunc (m *Manager) SampleByName(ctx context.Context, spec types.PerfQuerySpec, metrics []string, entity []types.ManagedObjectReference) ([]types.BasePerfEntityMetricBase, error) {\n\tinfo, err := m.CounterInfoByName(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ids []types.PerfMetricId\n\n\tinstances := spec.MetricId\n\tif len(instances) == 0 {\n\t\t// Default to all instances\n\t\tinstances = []types.PerfMetricId{{Instance: \"*\"}}\n\t}\n\n\tfor _, name := range metrics {\n\t\tcounter, ok := info[name]\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"counter %q not found\", name)\n\t\t}\n\n\t\tfor _, i := range instances {\n\t\t\tids = append(ids, types.PerfMetricId{CounterId: counter.Key, Instance: i.Instance})\n\t\t}\n\t}\n\n\tspec.MetricId = ids\n\n\tif spec.MaxSample == 0 {\n\t\tspec.MaxSample = 1\n\t}\n\n\tif spec.IntervalId >= 60 && spec.StartTime == nil {\n\t\t// Need a StartTime to make use of history\n\t\tnow, err := methods.GetCurrentTime(ctx, m.Client())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Go back in time\n\t\tx := spec.IntervalId * -1 * spec.MaxSample\n\t\tt := now.Add(time.Duration(x) * time.Second)\n\t\tspec.StartTime = &t\n\t}\n\n\tvar query []types.PerfQuerySpec\n\n\tfor _, e := range entity {\n\t\tspec.Entity = e.Reference()\n\t\tquery = append(query, spec)\n\t}\n\n\treturn m.Query(ctx, query)\n}\n\n// MetricSeries contains the same data as types.PerfMetricIntSeries, but with the CounterId converted to Name.\ntype MetricSeries struct {\n\tName     string\n\tunit     string\n\tInstance string\n\tValue    []int64\n}\n\nfunc (s *MetricSeries) Format(val int64) string {\n\tswitch types.PerformanceManagerUnit(s.unit) {\n\tcase types.PerformanceManagerUnitPercent:\n\t\treturn strconv.FormatFloat(float64(val)/100.0, 'f', 2, 64)\n\tdefault:\n\t\treturn strconv.FormatInt(val, 10)\n\t}\n}\n\n// ValueCSV converts the Value field to a CSV string\nfunc (s *MetricSeries) ValueCSV() string {\n\tvals := make([]string, len(s.Value))\n\n\tfor i := range s.Value {\n\t\tvals[i] = s.Format(s.Value[i])\n\t}\n\n\treturn strings.Join(vals, \",\")\n}\n\n// EntityMetric contains the same data as types.PerfEntityMetric, but with MetricSeries type for the Value field.\ntype EntityMetric struct {\n\tEntity types.ManagedObjectReference\n\n\tSampleInfo []types.PerfSampleInfo\n\tValue      []MetricSeries\n}\n\n// SampleInfoCSV converts the SampleInfo field to a CSV string\nfunc (m *EntityMetric) SampleInfoCSV() string {\n\tvals := make([]string, len(m.SampleInfo)*2)\n\n\ti := 0\n\n\tfor _, s := range m.SampleInfo {\n\t\tvals[i] = s.Timestamp.Format(time.RFC3339)\n\t\ti++\n\t\tvals[i] = strconv.Itoa(int(s.Interval))\n\t\ti++\n\t}\n\n\treturn strings.Join(vals, \",\")\n}\n\n// ToMetricSeries converts []BasePerfEntityMetricBase to []EntityMetric\nfunc (m *Manager) ToMetricSeries(ctx context.Context, series []types.BasePerfEntityMetricBase) ([]EntityMetric, error) {\n\tcounters, err := m.CounterInfoByKey(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar result []EntityMetric\n\n\tfor i := range series {\n\t\tvar values []MetricSeries\n\t\ts, ok := series[i].(*types.PerfEntityMetric)\n\t\tif !ok {\n\t\t\tpanic(fmt.Errorf(\"expected type %T, got: %T\", s, series[i]))\n\t\t}\n\n\t\tfor j := range s.Value {\n\t\t\tv := s.Value[j].(*types.PerfMetricIntSeries)\n\n\t\t\tvalues = append(values, MetricSeries{\n\t\t\t\tName:     counters[v.Id.CounterId].Name(),\n\t\t\t\tunit:     counters[v.Id.CounterId].UnitInfo.GetElementDescription().Key,\n\t\t\t\tInstance: v.Id.Instance,\n\t\t\t\tValue:    v.Value,\n\t\t\t})\n\t\t}\n\n\t\tresult = append(result, EntityMetric{\n\t\t\tEntity:     s.Entity,\n\t\t\tSampleInfo: s.SampleInfo,\n\t\t\tValue:      values,\n\t\t})\n\t}\n\n\treturn result, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/property/collector.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage property\n\nimport (\n\t\"context\"\n\t\"errors\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\n// Collector models the PropertyCollector managed object.\n//\n// For more information, see:\n// http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.wssdk.apiref.doc/vmodl.query.PropertyCollector.html\n//\ntype Collector struct {\n\troundTripper soap.RoundTripper\n\treference    types.ManagedObjectReference\n}\n\n// DefaultCollector returns the session's default property collector.\nfunc DefaultCollector(c *vim25.Client) *Collector {\n\tp := Collector{\n\t\troundTripper: c,\n\t\treference:    c.ServiceContent.PropertyCollector,\n\t}\n\n\treturn &p\n}\n\nfunc (p Collector) Reference() types.ManagedObjectReference {\n\treturn p.reference\n}\n\n// Create creates a new session-specific Collector that can be used to\n// retrieve property updates independent of any other Collector.\nfunc (p *Collector) Create(ctx context.Context) (*Collector, error) {\n\treq := types.CreatePropertyCollector{\n\t\tThis: p.Reference(),\n\t}\n\n\tres, err := methods.CreatePropertyCollector(ctx, p.roundTripper, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnewp := Collector{\n\t\troundTripper: p.roundTripper,\n\t\treference:    res.Returnval,\n\t}\n\n\treturn &newp, nil\n}\n\n// Destroy destroys this Collector.\nfunc (p *Collector) Destroy(ctx context.Context) error {\n\treq := types.DestroyPropertyCollector{\n\t\tThis: p.Reference(),\n\t}\n\n\t_, err := methods.DestroyPropertyCollector(ctx, p.roundTripper, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.reference = types.ManagedObjectReference{}\n\treturn nil\n}\n\nfunc (p *Collector) CreateFilter(ctx context.Context, req types.CreateFilter) error {\n\treq.This = p.Reference()\n\n\t_, err := methods.CreateFilter(ctx, p.roundTripper, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (p *Collector) WaitForUpdates(ctx context.Context, v string) (*types.UpdateSet, error) {\n\treq := types.WaitForUpdatesEx{\n\t\tThis:    p.Reference(),\n\t\tVersion: v,\n\t}\n\n\tres, err := methods.WaitForUpdatesEx(ctx, p.roundTripper, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (p *Collector) RetrieveProperties(ctx context.Context, req types.RetrieveProperties) (*types.RetrievePropertiesResponse, error) {\n\treq.This = p.Reference()\n\treturn methods.RetrieveProperties(ctx, p.roundTripper, &req)\n}\n\n// Retrieve loads properties for a slice of managed objects. The dst argument\n// must be a pointer to a []interface{}, which is populated with the instances\n// of the specified managed objects, with the relevant properties filled in. If\n// the properties slice is nil, all properties are loaded.\nfunc (p *Collector) Retrieve(ctx context.Context, objs []types.ManagedObjectReference, ps []string, dst interface{}) error {\n\tvar propSpec *types.PropertySpec\n\tvar objectSet []types.ObjectSpec\n\n\tfor _, obj := range objs {\n\t\t// Ensure that all object reference types are the same\n\t\tif propSpec == nil {\n\t\t\tpropSpec = &types.PropertySpec{\n\t\t\t\tType: obj.Type,\n\t\t\t}\n\n\t\t\tif ps == nil {\n\t\t\t\tpropSpec.All = types.NewBool(true)\n\t\t\t} else {\n\t\t\t\tpropSpec.PathSet = ps\n\t\t\t}\n\t\t} else {\n\t\t\tif obj.Type != propSpec.Type {\n\t\t\t\treturn errors.New(\"object references must have the same type\")\n\t\t\t}\n\t\t}\n\n\t\tobjectSpec := types.ObjectSpec{\n\t\t\tObj:  obj,\n\t\t\tSkip: types.NewBool(false),\n\t\t}\n\n\t\tobjectSet = append(objectSet, objectSpec)\n\t}\n\n\treq := types.RetrieveProperties{\n\t\tSpecSet: []types.PropertyFilterSpec{\n\t\t\t{\n\t\t\t\tObjectSet: objectSet,\n\t\t\t\tPropSet:   []types.PropertySpec{*propSpec},\n\t\t\t},\n\t\t},\n\t}\n\n\tres, err := p.RetrieveProperties(ctx, req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif d, ok := dst.(*[]types.ObjectContent); ok {\n\t\t*d = res.Returnval\n\t\treturn nil\n\t}\n\n\treturn mo.LoadRetrievePropertiesResponse(res, dst)\n}\n\n// RetrieveWithFilter populates dst as Retrieve does, but only for entities matching the given filter.\nfunc (p *Collector) RetrieveWithFilter(ctx context.Context, objs []types.ManagedObjectReference, ps []string, dst interface{}, filter Filter) error {\n\tif len(filter) == 0 {\n\t\treturn p.Retrieve(ctx, objs, ps, dst)\n\t}\n\n\tvar content []types.ObjectContent\n\n\terr := p.Retrieve(ctx, objs, filter.Keys(), &content)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tobjs = filter.MatchObjectContent(content)\n\n\tif len(objs) == 0 {\n\t\treturn nil\n\t}\n\n\treturn p.Retrieve(ctx, objs, ps, dst)\n}\n\n// RetrieveOne calls Retrieve with a single managed object reference.\nfunc (p *Collector) RetrieveOne(ctx context.Context, obj types.ManagedObjectReference, ps []string, dst interface{}) error {\n\tvar objs = []types.ManagedObjectReference{obj}\n\treturn p.Retrieve(ctx, objs, ps, dst)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/property/filter.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage property\n\nimport (\n\t\"fmt\"\n\t\"path/filepath\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\n// Filter provides methods for matching against types.DynamicProperty\ntype Filter map[string]types.AnyType\n\n// Keys returns the Filter map keys as a []string\nfunc (f Filter) Keys() []string {\n\tkeys := make([]string, 0, len(f))\n\n\tfor key := range f {\n\t\tkeys = append(keys, key)\n\t}\n\n\treturn keys\n}\n\n// MatchProperty returns true if a Filter entry matches the given prop.\nfunc (f Filter) MatchProperty(prop types.DynamicProperty) bool {\n\tmatch, ok := f[prop.Name]\n\tif !ok {\n\t\treturn false\n\t}\n\n\tif match == prop.Val {\n\t\treturn true\n\t}\n\n\tptype := reflect.TypeOf(prop.Val)\n\n\tif strings.HasPrefix(ptype.Name(), \"ArrayOf\") {\n\t\tpval := reflect.ValueOf(prop.Val).Field(0)\n\n\t\tfor i := 0; i < pval.Len(); i++ {\n\t\t\tprop.Val = pval.Index(i).Interface()\n\n\t\t\tif f.MatchProperty(prop) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\treturn false\n\t}\n\n\tif reflect.TypeOf(match) != ptype {\n\t\ts, ok := match.(string)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\n\t\t// convert if we can\n\t\tswitch prop.Val.(type) {\n\t\tcase bool:\n\t\t\tmatch, _ = strconv.ParseBool(s)\n\t\tcase int16:\n\t\t\tx, _ := strconv.ParseInt(s, 10, 16)\n\t\t\tmatch = int16(x)\n\t\tcase int32:\n\t\t\tx, _ := strconv.ParseInt(s, 10, 32)\n\t\t\tmatch = int32(x)\n\t\tcase int64:\n\t\t\tmatch, _ = strconv.ParseInt(s, 10, 64)\n\t\tcase float32:\n\t\t\tx, _ := strconv.ParseFloat(s, 32)\n\t\t\tmatch = float32(x)\n\t\tcase float64:\n\t\t\tmatch, _ = strconv.ParseFloat(s, 64)\n\t\tcase fmt.Stringer:\n\t\t\tprop.Val = prop.Val.(fmt.Stringer).String()\n\t\tdefault:\n\t\t\tif ptype.Kind() != reflect.String {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t// An enum type we can convert to a string type\n\t\t\tprop.Val = reflect.ValueOf(prop.Val).String()\n\t\t}\n\t}\n\n\tswitch pval := prop.Val.(type) {\n\tcase string:\n\t\tm, _ := filepath.Match(match.(string), pval)\n\t\treturn m\n\tdefault:\n\t\treturn reflect.DeepEqual(match, pval)\n\t}\n}\n\n// MatchPropertyList returns true if all given props match the Filter.\nfunc (f Filter) MatchPropertyList(props []types.DynamicProperty) bool {\n\tfor _, p := range props {\n\t\tif !f.MatchProperty(p) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\n// MatchObjectContent returns a list of ObjectContent.Obj where the ObjectContent.PropSet matches the Filter.\nfunc (f Filter) MatchObjectContent(objects []types.ObjectContent) []types.ManagedObjectReference {\n\tvar refs []types.ManagedObjectReference\n\n\tfor _, o := range objects {\n\t\tif f.MatchPropertyList(o.PropSet) {\n\t\t\trefs = append(refs, o.Obj)\n\t\t}\n\t}\n\n\treturn refs\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/property/filter_test.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage property\n\nimport (\n\t\"testing\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\nfunc TestMatchProperty(t *testing.T) {\n\ttests := []struct {\n\t\tkey  string\n\t\tval  types.AnyType\n\t\tpass types.AnyType\n\t\tfail types.AnyType\n\t}{\n\t\t{\"string\", \"bar\", \"bar\", \"foo\"},\n\t\t{\"match\", \"foo.bar\", \"foo.*\", \"foobarbaz\"},\n\t\t{\"moref\", types.ManagedObjectReference{Type: \"HostSystem\", Value: \"foo\"}, \"HostSystem:foo\", \"bar\"}, // implements fmt.Stringer\n\t\t{\"morefm\", types.ManagedObjectReference{Type: \"HostSystem\", Value: \"foo\"}, \"*foo\", \"bar\"},\n\t\t{\"morefs\", types.ArrayOfManagedObjectReference{ManagedObjectReference: []types.ManagedObjectReference{{Type: \"HostSystem\", Value: \"foo\"}}}, \"*foo\", \"bar\"},\n\t\t{\"enum\", types.VirtualMachinePowerStatePoweredOn, \"poweredOn\", \"poweredOff\"},\n\t\t{\"int16\", int32(16), int32(16), int32(42)},\n\t\t{\"int32\", int32(32), int32(32), int32(42)},\n\t\t{\"int32s\", int32(32), \"32\", \"42\"},\n\t\t{\"int64\", int64(64), int64(64), int64(42)},\n\t\t{\"int64s\", int64(64), \"64\", \"42\"},\n\t\t{\"float32\", float32(32.32), float32(32.32), float32(42.0)},\n\t\t{\"float32s\", float32(32.32), \"32.32\", \"42.0\"},\n\t\t{\"float64\", float64(64.64), float64(64.64), float64(42.0)},\n\t\t{\"float64s\", float64(64.64), \"64.64\", \"42.0\"},\n\t}\n\n\tfor _, test := range tests {\n\t\tp := types.DynamicProperty{Name: test.key, Val: test.val}\n\n\t\tfor match, value := range map[bool]types.AnyType{true: test.pass, false: test.fail} {\n\t\t\tresult := Filter{test.key: value}.MatchProperty(p)\n\n\t\t\tif result != match {\n\t\t\t\tt.Errorf(\"%s: %t\", test.key, result)\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/property/wait.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage property\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\n// Wait waits for any of the specified properties of the specified managed\n// object to change. It calls the specified function for every update it\n// receives. If this function returns false, it continues waiting for\n// subsequent updates. If this function returns true, it stops waiting and\n// returns.\n//\n// To only receive updates for the specified managed object, the function\n// creates a new property collector and calls CreateFilter. A new property\n// collector is required because filters can only be added, not removed.\n//\n// The newly created collector is destroyed before this function returns (both\n// in case of success or error).\n//\nfunc Wait(ctx context.Context, c *Collector, obj types.ManagedObjectReference, ps []string, f func([]types.PropertyChange) bool) error {\n\tp, err := c.Create(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Attempt to destroy the collector using the background context, as the\n\t// specified context may have timed out or have been cancelled.\n\tdefer p.Destroy(context.Background())\n\n\treq := types.CreateFilter{\n\t\tSpec: types.PropertyFilterSpec{\n\t\t\tObjectSet: []types.ObjectSpec{\n\t\t\t\t{\n\t\t\t\t\tObj: obj,\n\t\t\t\t},\n\t\t\t},\n\t\t\tPropSet: []types.PropertySpec{\n\t\t\t\t{\n\t\t\t\t\tPathSet: ps,\n\t\t\t\t\tType:    obj.Type,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif len(ps) == 0 {\n\t\treq.Spec.PropSet[0].All = types.NewBool(true)\n\t}\n\n\terr = p.CreateFilter(ctx, req)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn waitLoop(ctx, p, func(_ types.ManagedObjectReference, pc []types.PropertyChange) bool {\n\t\treturn f(pc)\n\t})\n}\n\n// WaitForView waits for any of the specified properties of the managed\n// objects in the View to change. It calls the specified function for every update it\n// receives. If this function returns false, it continues waiting for\n// subsequent updates. If this function returns true, it stops waiting and\n// returns.\n//\n// To only receive updates for the View's specified managed objects, the function\n// creates a new property collector and calls CreateFilter. A new property\n// collector is required because filters can only be added, not removed.\n//\n// The newly created collector is destroyed before this function returns (both\n// in case of success or error).\n//\n// The code assumes that all objects in the View are the same type\nfunc WaitForView(ctx context.Context, c *Collector, view types.ManagedObjectReference, obj types.ManagedObjectReference, ps []string, f func(types.ManagedObjectReference, []types.PropertyChange) bool) error {\n\tp, err := c.Create(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Attempt to destroy the collector using the background context, as the\n\t// specified context may have timed out or have been cancelled.\n\tdefer p.Destroy(context.Background())\n\n\treq := types.CreateFilter{\n\t\tSpec: types.PropertyFilterSpec{\n\t\t\tObjectSet: []types.ObjectSpec{\n\t\t\t\t{\n\t\t\t\t\tObj: view,\n\t\t\t\t\tSelectSet: []types.BaseSelectionSpec{\n\t\t\t\t\t\t&types.TraversalSpec{\n\t\t\t\t\t\t\tSelectionSpec: types.SelectionSpec{\n\t\t\t\t\t\t\t\tName: \"traverseEntities\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tPath: \"view\",\n\t\t\t\t\t\t\tType: view.Type}},\n\t\t\t\t},\n\t\t\t},\n\t\t\tPropSet: []types.PropertySpec{\n\t\t\t\t{\n\t\t\t\t\tType:    obj.Type,\n\t\t\t\t\tPathSet: ps,\n\t\t\t\t},\n\t\t\t},\n\t\t}}\n\n\terr = p.CreateFilter(ctx, req)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn waitLoop(ctx, p, f)\n}\n\nfunc waitLoop(ctx context.Context, c *Collector, f func(types.ManagedObjectReference, []types.PropertyChange) bool) error {\n\tfor version := \"\"; ; {\n\t\tres, err := c.WaitForUpdates(ctx, version)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Retry if the result came back empty\n\t\tif res == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tversion = res.Version\n\n\t\tfor _, fs := range res.FilterSet {\n\t\t\tfor _, os := range fs.ObjectSet {\n\t\t\t\tif f(os.Obj, os.ChangeSet) {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/scripts/.gitignore",
    "content": ".wireshark-*\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/scripts/contributors.sh",
    "content": "#!/bin/bash -e\n\nfile=\"$(git rev-parse --show-toplevel)/CONTRIBUTORS\"\n\ncat <<EOF > \"$file\"\n# People who can (and typically have) contributed to this repository.\n#\n# This script is generated by $(basename \"$0\")\n#\n\nEOF\n\ngit log --format='%aN <%aE>' | sort -uf >> \"$file\"\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/scripts/debug-ls.sh",
    "content": "#!/bin/bash\n\n# Copyright (c) 2014 VMware, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -e\n\n# This script shows for every request in a debug trace how long it took\n# and the name of the request body.\n\nfunction body-name {\n  (\n    xmllint --shell $1 <<EOS\n    setns soapenv=http://schemas.xmlsoap.org/soap/envelope/\n    xpath name(//soapenv:Body/*)\nEOS\n  )  | head -1 | sed 's/.*Object is a string : \\(.*\\)$/\\1/'\n}\n\nif [ -n \"$1\" ]; then\n  cd $1\nfi\n\nfor req in $(find . -name '*.req.xml'); do\n  base=$(basename $req .req.xml)\n  session=$(echo $base | awk -F'-' \"{printf \\\"%d\\\", \\$1}\")\n  number=$(echo $base | awk -F'-' \"{printf \\\"%d\\\", \\$2}\")\n  client_log=$(dirname $req)/${session}-client.log\n  took=$(awk \"/ ${number} took / { print \\$4 }\" ${client_log})\n\n  printf \"%s %8s: %s\\n\" ${base} ${took} $(body-name $req)\ndone\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/scripts/debug-xmlformat.sh",
    "content": "#!/bin/bash -e\n\n# pipe the most recent debug run to xmlformat\ncd ${GOVC_DEBUG_PATH-\"$HOME/.govmomi/debug\"}\ncd $(ls -t | head -1)\n\nheader() {\n    printf \"<!-- %s %s/%s\\n%s\\n-->\\n\" \"$1\" \"$PWD\" \"$2\" \"$(tr -d '\\r' < \"$3\")\"\n}\n\nfor file in *.req.xml; do\n    base=$(basename \"$file\" .req.xml)\n    header Request \"$file\" \"${base}.req.headers\"\n    xmlformat < \"$file\"\n    file=\"${base}.res.xml\"\n    header Response \"$file\" \"${base}.res.headers\"\n    xmlformat < \"$file\"\ndone\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/scripts/govc-env.bash",
    "content": "# Copyright (c) 2015 VMware, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Provide a simple shell extension to save and load govc\n# environments to disk. No more running `export GOVC_ABC=xyz`\n# in different shells over and over again. Loading the right\n# govc environment variables is now only one short and\n# autocompleted command away!\n#\n# Usage:\n# * Source this file from your `~/.bashrc` or running shell.\n# * Execute `govc-env` to print GOVC_* variables.\n# * Execute `govc-env --save <name>` to save GOVC_* variables.\n# * Execute `govc-env <name>` to load GOVC_* variables.\n#\n\n_govc_env_dir=$HOME/.govmomi/env\nmkdir -p \"${_govc_env_dir}\"\n\n_govc-env-complete() {\n  local w=\"${COMP_WORDS[COMP_CWORD]}\"\n  local c=\"$(find ${_govc_env_dir} -mindepth 1 -maxdepth 1 -type f  | sort | xargs -r -L1 basename | xargs echo)\"\n\n  # Only allow completion if preceding argument if the function itself\n  if [ \"$3\" == \"govc-env\" ]; then\n    COMPREPLY=( $(compgen -W \"${c}\" -- \"${w}\") )\n  fi\n}\n\ngovc-env() {\n  # Print current environment\n  if [ -z \"$1\" ]; then\n    for VAR in $(env | grep ^GOVC_ | cut -d= -f1); do\n      echo \"export ${VAR}='${!VAR}'\"\n    done\n\n    return\n  fi\n\n  # Save current environment\n  if [ \"$1\" == \"--save\" ]; then\n    govc-env > ${_govc_env_dir}/$2\n    return\n  fi\n\n  # Load specified environment\n  source ${_govc_env_dir}/$1\n}\n\ncomplete -F _govc-env-complete govc-env\n\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/scripts/govc_bash_completion",
    "content": "#!/bin/bash\n\n# govc Bash completion script\n# place in etc/bash_completion.d/ or source on command line with \".\"\n\n_govc_complete()\n{\n\tlocal cur prev subcmd\n\tprev=${COMP_WORDS[COMP_CWORD-1]}\n\tcur=${COMP_WORDS[COMP_CWORD]}\n\tsubcmd=${COMP_WORDS[1]}\n\tCOMPREPLY=()\n\n\tif [[ ${prev} == \"govc\" ]] ; then # show subcommands, no options\n\t\tCOMPREPLY=( $(compgen -W \"$(govc -h | grep -v Usage | tr -s '\\n' ' ')\" -- ${cur}) )\n\t\treturn 0\n\n\telif [[ ${cur} == \"-\"* ]] ; then\n\t\t: # drop out and show options\n\n\telif [[ ${subcmd} == \"ls\" ]] ; then # not completing an option, try for appropriate values\n\t\tif [[ ${prev} == \"-t\" ]] ; then\n\t\t\tCOMPREPLY=( $(compgen -W \"$(govc ls -l \"/**\" | awk '{print $2}' | \\\n\t\t\t\tsort -u | tr -d '()' | tr '\\n' ' '  )\" -- ${cur}) )\n\t\telse\n\t\t\tCOMPREPLY=( $(compgen -W \"$(govc ls \"${cur:-/*}*\" | tr -s '\\n' ' ' )\" -- ${cur}) )\n\t\tfi\n\n\telif [[ ${subcmd} == \"vm.\"* || ${prev} == \"-vm\" ]] ; then  \n\t\tCOMPREPLY=( $(compgen -W \"$(govc ls -t VirtualMachine -l \"${cur}*\" | \\\n\t\t\tawk '{print $1}' | tr -s '\\n' ' ' )\" -- ${cur}) )\n\tfi\n\n\t# did not hit any specifcs so show all options from help\n\tif [[ -z ${COMPREPLY} ]]; then\n\t\tCOMPREPLY=( $(compgen -W \"-h $(govc ${subcmd} -h | awk '{print $1}' | \\\n\t\t   \tgrep \"^-\" | sed -e 's/=.*//g' | tr -s '\\n' ' ' )\" -- ${cur}) )\n\tfi\n\n\treturn 0\n}\ncomplete -F _govc_complete govc\n\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/scripts/headers/go.txt",
    "content": "/*\nCopyright (c) ${YEARS} VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/scripts/headers/rb.txt",
    "content": "# Copyright (c) ${YEARS} VMware, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/scripts/license.sh",
    "content": "#!/bin/bash\n\nset -e\n\nheader_dir=$(dirname $0)/headers\n\ntmpfile=$(mktemp)\ntrap \"rm -f ${tmpfile}\" EXIT\n\ngit ls-files | while read file; do\n  years=( $(git log --format='%ai' $file | cut -d- -f1 | sort -u) )\n  num_years=${#years[@]}\n\n  if [ \"${num_years}\" == 0 ]; then\n    export YEARS=\"$(date +%Y)\"\n  else\n    yearA=${years[0]}\n    yearB=${years[$((${num_years}-1))]}\n\n    if [ ${yearA} == ${yearB} ]; then\n      export YEARS=\"${yearA}\"\n    else\n      export YEARS=\"${yearA}-${yearB}\"\n    fi\n  fi\n\n  case \"$file\" in\n    vim25/xml/*)\n      # Ignore\n      ;;\n    *.go)\n      sed -e \"s/\\${YEARS}/${YEARS}/\" ${header_dir}/go.txt > ${tmpfile}\n      last_header_line=$(grep -n '\\*/' ${file} | head -1 | cut -d: -f1)\n      tail -n +$((${last_header_line} + 1)) ${file} >> ${tmpfile}\n      mv ${tmpfile} ${file}\n      ;;\n    *.rb)\n      sed -e \"s/\\${YEARS}/${YEARS}/\" ${header_dir}/rb.txt > ${tmpfile}\n      last_header_line=$(grep -n '^$' ${file} | head -1 | cut -d: -f1)\n      tail -n +$((${last_header_line})) ${file} >> ${tmpfile}\n      mv ${tmpfile} ${file}\n      ;;\n    *)\n      echo \"Unhandled file: $file\"\n      ;;\n  esac\ndone\n\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/.gitignore",
    "content": "*.box\n*.ova\n.vagrant\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/Vagrantfile",
    "content": "# -*- mode: ruby -*-\n# vi: set ft=ruby :\n\n# Using the VCSA base box, no provisioning, inventory will be empty.\n\nVagrant.configure(\"2\") do |config|\n  config.vm.hostname = \"vcsa\"\n\n  config.vm.box = \"vcsa\"\n  config.vm.synced_folder \".\", \"/vagrant\", disabled: true\n\n  config.vm.network \"forwarded_port\", guest: 443, host: 16443\nend\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/create-box.sh",
    "content": "#!/bin/sh\n\nset -e\n\nif [ \"$(uname -s)\" == \"Darwin\" ]; then\n  PATH=\"/Applications/VMware Fusion.app/Contents/Library:$PATH\"\n  PATH=\"/Applications/VMware Fusion.app/Contents/Library/VMware OVF Tool:$PATH\"\nfi\n\novf=\"$1\"\n\nif [ -z \"$ovf\" ]; then\n  ovf=\"./VMware-vCenter-Server-Appliance-5.5.0.10300-2000350_OVA10.ova\"\nfi\n\n# check for greadlink and gmktemp\nreadlink=$(type -p greadlink readlink | head -1)\nmktemp=$(type -p gmktemp mktemp | head -1)\n\ndir=$($readlink -nf $(dirname $0))\ntmp=$($mktemp -d)\ntrap \"rm -rf $tmp\" EXIT\n\ncd $tmp\n\necho \"Converting ovf...\"\novftool \\\n  --noSSLVerify \\\n  --acceptAllEulas \\\n  --overwrite \\\n  --powerOffTarget \\\n  $ovf vcsa.vmx\n\necho \"Starting vm...\"\nvmrun start vcsa.vmx nogui\n\necho \"Waiting for vm ip...\"\nip=$(vmrun getGuestIPAddress vcsa.vmx -wait)\n\necho \"Configuring vm for use with vagrant...\"\nvmrun -gu root -gp vmware CopyFileFromHostToGuest vcsa.vmx \\\n      $dir/vagrant.sh /tmp/vagrant.sh\n\nvmrun -gu root -gp vmware runProgramInGuest vcsa.vmx \\\n      /bin/sh -e /tmp/vagrant.sh\n\nvmrun -gu root -gp vmware deleteFileInGuest vcsa.vmx \\\n      /tmp/vagrant.sh\n\necho \"Configuring vCenter Server Appliance...\"\n\nssh_opts=\"-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -T\"\n\nssh ${ssh_opts} -i ~/.vagrant.d/insecure_private_key vagrant@$ip <<EOS\necho \"Accepting EULA ...\"\nsudo /usr/sbin/vpxd_servicecfg eula accept\n\necho \"Configuring Embedded DB ...\"\nsudo /usr/sbin/vpxd_servicecfg db write embedded\n\necho \"Configuring SSO...\"\nsudo /usr/sbin/vpxd_servicecfg sso write embedded\n\necho \"Starting VCSA ...\"\nsudo /usr/sbin/vpxd_servicecfg service start\nEOS\n\necho \"Stopping vm...\"\nvmrun stop vcsa.vmx\n\nrm -f vmware.log\n\nsed -i -e 's/\"bridged\"/\"nat\"/' vcsa.vmx\n\necho '{\"provider\":\"vmware_desktop\"}' > ./metadata.json\n\ncd $dir\n\ntar -C $tmp -cvzf vcsa.box .\n\nvagrant box add --name vcsa vcsa.box\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/scripts/vagrant/vcsa/vagrant.sh",
    "content": "#!/bin/sh\n\nuseradd vagrant -m -s /bin/bash\ngroupmod -A vagrant wheel\n\necho \"vagrant ALL=(ALL) NOPASSWD: ALL\" >> /etc/sudoers\n\nmkdir ~vagrant/.ssh\nwget --no-check-certificate \\\n     https://raw.githubusercontent.com/mitchellh/vagrant/master/keys/vagrant.pub \\\n     -O ~vagrant/.ssh/authorized_keys\nchown -R vagrant ~vagrant/.ssh\nchmod -R go-rwsx ~vagrant/.ssh\n\nsed -i -e 's/^#UseDNS yes/UseDNS no/' /etc/ssh/sshd_config\nsed -i -e 's/^AllowTcpForwarding no//' /etc/ssh/sshd_config\nsed -i -e 's/^PermitTunnel no//' /etc/ssh/sshd_config\nsed -i -e 's/^MaxSessions 1//' /etc/ssh/sshd_config\n\n# disable password expiration\nfor uid in root vagrant; do\n  chage -I -1 -E -1 -m 0 -M -1 $uid\ndone\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/scripts/wireshark-esx.sh",
    "content": "#!/bin/bash -e\n#\n# Capture ESXi traffic and decrypt SOAP traffic on port 443 via wireshark\n\n# Device to capture\ndev=\"${1-vmk0}\"\n\n# Device to get the ip for wireshark ssl_keys config\nif [ \"$dev\" = \"lo0\" ] ; then\n  ip_dev=\"vmk0\"\nelse\n  ip_dev=\"$dev\"\nfi\n\nip=$(govc host.info -k -json | \\\n        jq -r \".HostSystems[].Config.Network.Vnic[] | select(.Device == \\\"${ip_dev}\\\") | .Spec.Ip.IpAddress\")\n\nscp=(scp)\nssh=(ssh)\n\n# Check if vagrant ssh-config applies to $ip\nif [ -d \".vagrant\" ] ; then\n  vssh_opts=($(vagrant ssh-config | awk NF | awk -v ORS=' ' '{print \"-o \" $1 \"=\" $2}'))\n  if grep \"HostName=${ip}\" >/dev/null <<<\"${vssh_opts[*]}\" ; then\n    ssh_opts=(\"${vssh_opts[@]}\")\n  fi\nfi\n\n# Otherwise, use default ssh opts + sshpass if available\nif [ ${#ssh_opts[@]} -eq 0 ] ; then\n  user=\"$(govc env GOVC_USERNAME)\"\n  ssh_opts=(-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=FATAL -o User=$user)\n\n  if [ -x \"$(which sshpass)\" ] ; then\n    password=\"$(govc env GOVC_PASSWORD)\"\n    scp=(sshpass -p $password scp)\n    ssh=(sshpass -p $password ssh)\n  fi\nfi\n\nif [ \"$dev\" != \"lo0\" ] ; then\n  # If you change this filter, be sure to exclude the ssh port (not tcp port 22)\n  filter=\"host $ip and \\(port 80 or port 443\\)\"\n\n  dst=\"$HOME/.wireshark/rui-${ip}.key\"\n  if [ ! -f \"$dst\" ] ; then\n    # Copy key from ESX\n    \"${scp[@]}\" \"${ssh_opts[@]}\" \"${ip}:/etc/vmware/ssl/rui.key\" \"$dst\"\n  fi\n\n  if ! grep \"$ip\" ~/.wireshark/ssl_keys 2>/dev/null ; then\n    # Add key to wireshark ssl_keys config\n    echo \"adding rui.key for $ip\"\n\n    cat <<EOF >> ~/.wireshark/ssl_keys\n\"$ip\",\"443\",\"http\",\"$dst\",\"\"\nEOF\n  fi\nfi\n\necho \"Capturing $dev on $ip...\"\n\n\"${ssh[@]}\" \"${ssh_opts[@]}\" \"$ip\" tcpdump-uw -i \"$dev\" -s0 -v -w - \"$filter\" | wireshark -k -i -\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/scripts/wireshark-vcsa.sh",
    "content": "#!/bin/bash -e\n#\n# Capture SOAP traffic between web client and vpxd on 127.0.0.1:8085.\n#\n# Caveats: tested with VCSA 6.0, unlikely to work for other versions.\n#\n\nset -e\n\ncache_deb() {\n  wget $1\n  ar x *.deb data.tar.gz\n  tar zxf data.tar.gz\n  rm -f data.tar.gz\n  rm -f *.deb\n}\n\ndirname=\"$(dirname $0)\"\nbasename=\"$(basename $0)\"\nbindir=\"${dirname}/.${basename}\"\n\nmkdir -p \"${bindir}\"\n\n# Cache binaries required to run tcpdump on vcsa\nif [ ! -f \"${bindir}/.done\" ]; then\n  pushd ${bindir}\n  cache_deb https://launchpadlibrarian.net/200649143/libssl0.9.8_0.9.8k-7ubuntu8.27_amd64.deb\n  cache_deb https://launchpadlibrarian.net/37430984/libpcap0.8_1.0.0-6_amd64.deb\n  cache_deb https://launchpadlibrarian.net/41774869/tcpdump_4.0.0-6ubuntu3_amd64.deb\n  touch .done\n  popd\nfi\n\nscp=(scp)\nssh=(ssh)\n\n# Extract host from GOVC_URL\nhost=\"$(govc env -x GOVC_HOST)\"\nusername=root\npassword=\"$(govc env GOVC_PASSWORD)\"\n\nif [ -x \"$(which sshpass)\" ] ; then\n  scp=(sshpass -p \"$password\" scp)\n  ssh=(sshpass -p \"$password\" ssh)\nfi\n\nssh_opts=(-o UserKnownHostsFile=/dev/null\n          -o StrictHostKeyChecking=no\n          -o LogLevel=FATAL\n          -o User=${username}\n          -o ControlMaster=no)\ndev=\"lo\"\nfilter=\"port 8085\"\ntcpdump=\"env LD_LIBRARY_PATH=/tmp /tmp/tcpdump\"\n\necho \"Capturing $dev on $host...\"\n\n\"${scp[@]}\" \"${ssh_opts[@]}\" \\\n            \"${bindir}/lib/libcrypto.so.0.9.8\" \\\n            \"${bindir}/usr/lib/libpcap.so.0.8\" \\\n            \"${bindir}/usr/sbin/tcpdump\" \\\n            \"${host}:/tmp\"\n\n\"${ssh[@]}\" \"${ssh_opts[@]}\" \"$host\" ${tcpdump} -i \"$dev\" -s0 -v -w - \"$filter\" | wireshark -k -i - 2>/dev/null\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/session/keep_alive.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage session\n\nimport (\n\t\"context\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n)\n\ntype keepAlive struct {\n\tsync.Mutex\n\n\troundTripper    soap.RoundTripper\n\tidleTime        time.Duration\n\tnotifyRequest   chan struct{}\n\tnotifyStop      chan struct{}\n\tnotifyWaitGroup sync.WaitGroup\n\n\t// keepAlive executes a request in the background with the purpose of\n\t// keeping the session active. The response for this request is discarded.\n\tkeepAlive func(soap.RoundTripper) error\n}\n\nfunc defaultKeepAlive(roundTripper soap.RoundTripper) error {\n\t_, _ = methods.GetCurrentTime(context.Background(), roundTripper)\n\treturn nil\n}\n\n// KeepAlive wraps the specified soap.RoundTripper and executes a meaningless\n// API request in the background after the RoundTripper has been idle for the\n// specified amount of idle time. The keep alive process only starts once a\n// user logs in and runs until the user logs out again.\nfunc KeepAlive(roundTripper soap.RoundTripper, idleTime time.Duration) soap.RoundTripper {\n\treturn KeepAliveHandler(roundTripper, idleTime, defaultKeepAlive)\n}\n\n// KeepAliveHandler works as KeepAlive() does, but the handler param can decide how to handle errors.\n// For example, if connectivity to ESX/VC is down long enough for a session to expire, a handler can choose to\n// Login() on a types.NotAuthenticated error.  If handler returns non-nil, the keep alive go routine will be stopped.\nfunc KeepAliveHandler(roundTripper soap.RoundTripper, idleTime time.Duration, handler func(soap.RoundTripper) error) soap.RoundTripper {\n\tk := &keepAlive{\n\t\troundTripper:  roundTripper,\n\t\tidleTime:      idleTime,\n\t\tnotifyRequest: make(chan struct{}),\n\t}\n\n\tk.keepAlive = handler\n\n\treturn k\n}\n\nfunc (k *keepAlive) start() {\n\tk.Lock()\n\tdefer k.Unlock()\n\n\tif k.notifyStop != nil {\n\t\treturn\n\t}\n\n\t// This channel must be closed to terminate idle timer.\n\tk.notifyStop = make(chan struct{})\n\tk.notifyWaitGroup.Add(1)\n\n\tgo func() {\n\t\tdefer k.notifyWaitGroup.Done()\n\n\t\tfor t := time.NewTimer(k.idleTime); ; {\n\t\t\tselect {\n\t\t\tcase <-k.notifyStop:\n\t\t\t\treturn\n\t\t\tcase <-k.notifyRequest:\n\t\t\t\tt.Reset(k.idleTime)\n\t\t\tcase <-t.C:\n\t\t\t\tif err := k.keepAlive(k.roundTripper); err != nil {\n\t\t\t\t\tk.stop()\n\t\t\t\t}\n\t\t\t\tt = time.NewTimer(k.idleTime)\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (k *keepAlive) stop() {\n\tk.Lock()\n\tdefer k.Unlock()\n\n\tif k.notifyStop != nil {\n\t\tclose(k.notifyStop)\n\t\tk.notifyWaitGroup.Wait()\n\t\tk.notifyStop = nil\n\t}\n}\n\nfunc (k *keepAlive) RoundTrip(ctx context.Context, req, res soap.HasFault) error {\n\terr := k.roundTripper.RoundTrip(ctx, req, res)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Start ticker on login, stop ticker on logout.\n\tswitch req.(type) {\n\tcase *methods.LoginBody, *methods.LoginExtensionByCertificateBody:\n\t\tk.start()\n\tcase *methods.LogoutBody:\n\t\tk.stop()\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/session/keep_alive_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage session\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"os\"\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/test\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype testKeepAlive int\n\nfunc (t *testKeepAlive) Func(soap.RoundTripper) error {\n\t*t++\n\treturn nil\n}\n\nfunc newManager(t *testing.T) (*Manager, *url.URL) {\n\tu := test.URL()\n\tif u == nil {\n\t\tt.SkipNow()\n\t}\n\n\tsoapClient := soap.NewClient(u, true)\n\tvimClient, err := vim25.NewClient(context.Background(), soapClient)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treturn NewManager(vimClient), u\n}\n\nfunc TestKeepAlive(t *testing.T) {\n\tvar i testKeepAlive\n\tvar j int\n\n\tm, u := newManager(t)\n\tk := KeepAlive(m.client.RoundTripper, time.Millisecond)\n\tk.(*keepAlive).keepAlive = i.Func\n\tm.client.RoundTripper = k\n\n\t// Expect keep alive to not have triggered yet\n\tif i != 0 {\n\t\tt.Errorf(\"Expected i == 0, got i: %d\", i)\n\t}\n\n\t// Logging in starts keep alive\n\terr := m.Login(context.Background(), u.User)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\ttime.Sleep(2 * time.Millisecond)\n\n\t// Expect keep alive to triggered at least once\n\tif i == 0 {\n\t\tt.Errorf(\"Expected i != 0, got i: %d\", i)\n\t}\n\n\tj = int(i)\n\ttime.Sleep(2 * time.Millisecond)\n\n\t// Expect keep alive to triggered at least once more\n\tif int(i) <= j {\n\t\tt.Errorf(\"Expected i > j, got i: %d, j: %d\", i, j)\n\t}\n\n\t// Logging out stops keep alive\n\terr = m.Logout(context.Background())\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tj = int(i)\n\ttime.Sleep(2 * time.Millisecond)\n\n\t// Expect keep alive to have stopped\n\tif int(i) != j {\n\t\tt.Errorf(\"Expected i == j, got i: %d, j: %d\", i, j)\n\t}\n}\n\nfunc testSessionOK(t *testing.T, m *Manager, ok bool) {\n\ts, err := m.UserSession(context.Background())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, file, line, _ := runtime.Caller(1)\n\tprefix := fmt.Sprintf(\"%s:%d\", file, line)\n\n\tif ok && s == nil {\n\t\tt.Fatalf(\"%s: Expected session to be OK, but is invalid\", prefix)\n\t}\n\n\tif !ok && s != nil {\n\t\tt.Fatalf(\"%s: Expected session to be invalid, but is OK\", prefix)\n\t}\n}\n\n// Run with:\n//\n//   env GOVMOMI_KEEPALIVE_TEST=1 go test -timeout=60m -run TestRealKeepAlive\n//\nfunc TestRealKeepAlive(t *testing.T) {\n\tif os.Getenv(\"GOVMOMI_KEEPALIVE_TEST\") != \"1\" {\n\t\tt.SkipNow()\n\t}\n\n\tm1, u1 := newManager(t)\n\tm2, u2 := newManager(t)\n\n\t// Enable keepalive on m2\n\tk := KeepAlive(m2.client.RoundTripper, 10*time.Minute)\n\tm2.client.RoundTripper = k\n\n\t// Expect both sessions to be invalid\n\ttestSessionOK(t, m1, false)\n\ttestSessionOK(t, m2, false)\n\n\t// Logging in starts keep alive\n\tif err := m1.Login(context.Background(), u1.User); err != nil {\n\t\tt.Error(err)\n\t}\n\tif err := m2.Login(context.Background(), u2.User); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Expect both sessions to be valid\n\ttestSessionOK(t, m1, true)\n\ttestSessionOK(t, m2, true)\n\n\t// Wait for m1 to time out\n\tdelay := 31 * time.Minute\n\tfmt.Printf(\"%s: Waiting %d minutes for session to time out...\\n\", time.Now(), int(delay.Minutes()))\n\ttime.Sleep(delay)\n\n\t// Expect m1's session to be invalid, m2's session to be valid\n\ttestSessionOK(t, m1, false)\n\ttestSessionOK(t, m2, true)\n}\n\nfunc isNotAuthenticated(err error) bool {\n\tif soap.IsSoapFault(err) {\n\t\tswitch soap.ToSoapFault(err).VimFault().(type) {\n\t\tcase types.NotAuthenticated:\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc isInvalidLogin(err error) bool {\n\tif soap.IsSoapFault(err) {\n\t\tswitch soap.ToSoapFault(err).VimFault().(type) {\n\t\tcase types.InvalidLogin:\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc TestKeepAliveHandler(t *testing.T) {\n\tu := test.URL()\n\tif u == nil {\n\t\tt.SkipNow()\n\t}\n\n\tm1, u1 := newManager(t)\n\tm2, u2 := newManager(t)\n\n\treauth := make(chan bool)\n\n\t// Keep alive handler that will re-login.\n\t// Real-world case: connectivity to ESX/VC is down long enough for the session to expire\n\t// Test-world case: we call TerminateSession below\n\tk := KeepAliveHandler(m2.client.RoundTripper, 2*time.Second, func(roundTripper soap.RoundTripper) error {\n\t\t_, err := methods.GetCurrentTime(context.Background(), roundTripper)\n\t\tif err != nil {\n\t\t\tif isNotAuthenticated(err) {\n\t\t\t\terr = m2.Login(context.Background(), u2.User)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tif isInvalidLogin(err) {\n\t\t\t\t\t\treauth <- false\n\t\t\t\t\t\tt.Log(\"failed to re-authenticate, quitting keep alive handler\")\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\treauth <- true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tm2.client.RoundTripper = k\n\n\t// Logging in starts keep alive\n\tif err := m1.Login(context.Background(), u1.User); err != nil {\n\t\tt.Error(err)\n\t}\n\tif err := m2.Login(context.Background(), u2.User); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Terminate session for m2.  Note that self terminate fails, so we need 2 sessions for this test.\n\ts, err := m2.UserSession(context.Background())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = m1.TerminateSession(context.Background(), []string{s.Key})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = methods.GetCurrentTime(context.Background(), m2.client)\n\tif err == nil {\n\t\tt.Error(\"expected to fail\")\n\t}\n\n\t// Wait for keepalive to re-authenticate\n\t<-reauth\n\n\t_, err = methods.GetCurrentTime(context.Background(), m2.client)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Clear credentials to test re-authentication failure\n\tu2.User = nil\n\n\ts, err = m2.UserSession(context.Background())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = m1.TerminateSession(context.Background(), []string{s.Key})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Wait for keepalive re-authenticate attempt\n\tresult := <-reauth\n\n\t_, err = methods.GetCurrentTime(context.Background(), m2.client)\n\tif err == nil {\n\t\tt.Error(\"expected to fail\")\n\t}\n\n\tif result {\n\t\tt.Errorf(\"expected reauth to fail\")\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/session/manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage session\n\nimport (\n\t\"context\"\n\t\"net/url\"\n\t\"os\"\n\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\n// Locale defaults to \"en_US\" and can be overridden via this var or the GOVMOMI_LOCALE env var.\n// A value of \"_\" uses the server locale setting.\nvar Locale = os.Getenv(\"GOVMOMI_LOCALE\")\n\nfunc init() {\n\tif Locale == \"_\" {\n\t\tLocale = \"\"\n\t} else if Locale == \"\" {\n\t\tLocale = \"en_US\"\n\t}\n}\n\ntype Manager struct {\n\tclient      *vim25.Client\n\tuserSession *types.UserSession\n}\n\nfunc NewManager(client *vim25.Client) *Manager {\n\tm := Manager{\n\t\tclient: client,\n\t}\n\n\treturn &m\n}\n\nfunc (sm Manager) Reference() types.ManagedObjectReference {\n\treturn *sm.client.ServiceContent.SessionManager\n}\n\nfunc (sm *Manager) SetLocale(ctx context.Context, locale string) error {\n\treq := types.SetLocale{\n\t\tThis:   sm.Reference(),\n\t\tLocale: locale,\n\t}\n\n\t_, err := methods.SetLocale(ctx, sm.client, &req)\n\treturn err\n}\n\nfunc (sm *Manager) Login(ctx context.Context, u *url.Userinfo) error {\n\treq := types.Login{\n\t\tThis:   sm.Reference(),\n\t\tLocale: Locale,\n\t}\n\n\tif u != nil {\n\t\treq.UserName = u.Username()\n\t\tif pw, ok := u.Password(); ok {\n\t\t\treq.Password = pw\n\t\t}\n\t}\n\n\tlogin, err := methods.Login(ctx, sm.client, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsm.userSession = &login.Returnval\n\treturn nil\n}\n\nfunc (sm *Manager) LoginExtensionByCertificate(ctx context.Context, key string, locale string) error {\n\treq := types.LoginExtensionByCertificate{\n\t\tThis:         sm.Reference(),\n\t\tExtensionKey: key,\n\t\tLocale:       locale,\n\t}\n\n\tlogin, err := methods.LoginExtensionByCertificate(ctx, sm.client, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsm.userSession = &login.Returnval\n\treturn nil\n}\n\nfunc (sm *Manager) Logout(ctx context.Context) error {\n\treq := types.Logout{\n\t\tThis: sm.Reference(),\n\t}\n\n\t_, err := methods.Logout(ctx, sm.client, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsm.userSession = nil\n\treturn nil\n}\n\n// UserSession retrieves and returns the SessionManager's CurrentSession field.\n// Nil is returned if the session is not authenticated.\nfunc (sm *Manager) UserSession(ctx context.Context) (*types.UserSession, error) {\n\tvar mgr mo.SessionManager\n\n\tpc := property.DefaultCollector(sm.client)\n\terr := pc.RetrieveOne(ctx, sm.Reference(), []string{\"currentSession\"}, &mgr)\n\tif err != nil {\n\t\t// It's OK if we can't retrieve properties because we're not authenticated\n\t\tif f, ok := err.(types.HasFault); ok {\n\t\t\tswitch f.Fault().(type) {\n\t\t\tcase *types.NotAuthenticated:\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t}\n\n\t\treturn nil, err\n\t}\n\n\treturn mgr.CurrentSession, nil\n}\n\nfunc (sm *Manager) TerminateSession(ctx context.Context, sessionId []string) error {\n\treq := types.TerminateSession{\n\t\tThis:      sm.Reference(),\n\t\tSessionId: sessionId,\n\t}\n\n\t_, err := methods.TerminateSession(ctx, sm.client, &req)\n\treturn err\n}\n\n// SessionIsActive checks whether the session that was created at login is\n// still valid. This function only works against vCenter.\nfunc (sm *Manager) SessionIsActive(ctx context.Context) (bool, error) {\n\tif sm.userSession == nil {\n\t\treturn false, nil\n\t}\n\n\treq := types.SessionIsActive{\n\t\tThis:      sm.Reference(),\n\t\tSessionID: sm.userSession.Key,\n\t\tUserName:  sm.userSession.UserName,\n\t}\n\n\tactive, err := methods.SessionIsActive(ctx, sm.client, &req)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn active.Returnval, err\n}\n\nfunc (sm *Manager) AcquireGenericServiceTicket(ctx context.Context, spec types.BaseSessionManagerServiceRequestSpec) (*types.SessionManagerGenericServiceTicket, error) {\n\treq := types.AcquireGenericServiceTicket{\n\t\tThis: sm.Reference(),\n\t\tSpec: spec,\n\t}\n\n\tres, err := methods.AcquireGenericServiceTicket(ctx, sm.client, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n\nfunc (sm *Manager) AcquireLocalTicket(ctx context.Context, userName string) (*types.SessionManagerLocalTicket, error) {\n\treq := types.AcquireLocalTicket{\n\t\tThis:     sm.Reference(),\n\t\tUserName: userName,\n\t}\n\n\tres, err := methods.AcquireLocalTicket(ctx, sm.client, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/session/manager_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage session\n\nimport (\n\t\"context\"\n\t\"net/url\"\n\t\"testing\"\n\n\t\"github.com/vmware/govmomi/test\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n)\n\nfunc sessionClient(u *url.URL, t *testing.T) *Manager {\n\tsoapClient := soap.NewClient(u, true)\n\tvimClient, err := vim25.NewClient(context.Background(), soapClient)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treturn NewManager(vimClient)\n}\n\nfunc TestLogin(t *testing.T) {\n\tu := test.URL()\n\tif u == nil {\n\t\tt.SkipNow()\n\t}\n\n\tsession := sessionClient(u, t)\n\terr := session.Login(context.Background(), u.User)\n\tif err != nil {\n\t\tt.Errorf(\"Expected no error, got %v\", err)\n\t}\n}\n\nfunc TestLogout(t *testing.T) {\n\tu := test.URL()\n\tif u == nil {\n\t\tt.SkipNow()\n\t}\n\n\tsession := sessionClient(u, t)\n\terr := session.Login(context.Background(), u.User)\n\tif err != nil {\n\t\tt.Error(\"Login Error: \", err)\n\t}\n\n\terr = session.Logout(context.Background())\n\tif err != nil {\n\t\tt.Errorf(\"Expected nil, got %v\", err)\n\t}\n\n\terr = session.Logout(context.Background())\n\tif err == nil {\n\t\tt.Errorf(\"Expected NotAuthenticated, got nil\")\n\t}\n}\n\nfunc TestSessionIsActive(t *testing.T) {\n\tu := test.URL()\n\tif u == nil {\n\t\tt.SkipNow()\n\t}\n\n\tsession := sessionClient(u, t)\n\n\t// Skip test against ESXi -- SessionIsActive is not implemented\n\tif session.client.ServiceContent.About.ApiType != \"VirtualCenter\" {\n\t\tt.Skipf(\"Talking to %s instead of %s\", session.client.ServiceContent.About.ApiType, \"VirtualCenter\")\n\t}\n\n\terr := session.Login(context.Background(), u.User)\n\tif err != nil {\n\t\tt.Error(\"Login Error: \", err)\n\t}\n\n\tactive, err := session.SessionIsActive(context.Background())\n\tif err != nil || !active {\n\t\tt.Errorf(\"Expected %t, got %t\", true, active)\n\t\tt.Errorf(\"Expected nil, got %v\", err)\n\t}\n\n\tsession.Logout(context.Background())\n\n\tactive, err = session.SessionIsActive(context.Background())\n\tif err == nil || active {\n\t\tt.Errorf(\"Expected %t, got %t\", false, active)\n\t\tt.Errorf(\"Expected NotAuthenticated, got %v\", err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/task/error.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage task\n\nimport \"github.com/vmware/govmomi/vim25/types\"\n\ntype Error struct {\n\t*types.LocalizedMethodFault\n}\n\n// Error returns the task's localized fault message.\nfunc (e Error) Error() string {\n\treturn e.LocalizedMethodFault.LocalizedMessage\n}\n\nfunc (e Error) Fault() types.BaseMethodFault {\n\treturn e.LocalizedMethodFault.Fault\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/task/wait.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage task\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25/progress\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype taskProgress struct {\n\tinfo *types.TaskInfo\n}\n\nfunc (t taskProgress) Percentage() float32 {\n\treturn float32(t.info.Progress)\n}\n\nfunc (t taskProgress) Detail() string {\n\treturn \"\"\n}\n\nfunc (t taskProgress) Error() error {\n\tif t.info.Error != nil {\n\t\treturn Error{t.info.Error}\n\t}\n\n\treturn nil\n}\n\ntype taskCallback struct {\n\tch   chan<- progress.Report\n\tinfo *types.TaskInfo\n\terr  error\n}\n\nfunc (t *taskCallback) fn(pc []types.PropertyChange) bool {\n\tfor _, c := range pc {\n\t\tif c.Name != \"info\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif c.Op != types.PropertyChangeOpAssign {\n\t\t\tcontinue\n\t\t}\n\n\t\tif c.Val == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tti := c.Val.(types.TaskInfo)\n\t\tt.info = &ti\n\t}\n\n\t// t.info could be nil if pc can't satify the rules above\n\tif t.info == nil {\n\t\treturn false\n\t}\n\n\tpr := taskProgress{t.info}\n\n\t// Store copy of error, so Wait() can return it as well.\n\tt.err = pr.Error()\n\n\tswitch t.info.State {\n\tcase types.TaskInfoStateQueued, types.TaskInfoStateRunning:\n\t\tif t.ch != nil {\n\t\t\t// Don't care if this is dropped\n\t\t\tselect {\n\t\t\tcase t.ch <- pr:\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t\treturn false\n\tcase types.TaskInfoStateSuccess, types.TaskInfoStateError:\n\t\tif t.ch != nil {\n\t\t\t// Last one must always be delivered\n\t\t\tt.ch <- pr\n\t\t}\n\t\treturn true\n\tdefault:\n\t\tpanic(\"unknown state: \" + t.info.State)\n\t}\n}\n\n// Wait waits for a task to finish with either success or failure. It does so\n// by waiting for the \"info\" property of task managed object to change. The\n// function returns when it finds the task in the \"success\" or \"error\" state.\n// In the former case, the return value is nil. In the latter case the return\n// value is an instance of this package's Error struct.\n//\n// Any error returned while waiting for property changes causes the function to\n// return immediately and propagate the error.\n//\n// If the progress.Sinker argument is specified, any progress updates for the\n// task are sent here. The completion percentage is passed through directly.\n// The detail for the progress update is set to an empty string. If the task\n// finishes in the error state, the error instance is passed through as well.\n// Note that this error is the same error that is returned by this function.\n//\nfunc Wait(ctx context.Context, ref types.ManagedObjectReference, pc *property.Collector, s progress.Sinker) (*types.TaskInfo, error) {\n\tcb := &taskCallback{}\n\n\t// Include progress sink if specified\n\tif s != nil {\n\t\tcb.ch = s.Sink()\n\t\tdefer close(cb.ch)\n\t}\n\n\terr := property.Wait(ctx, pc, ref, []string{\"info\"}, cb.fn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cb.info, cb.err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/task/wait_test.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage task\n\nimport (\n\t\"testing\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\nfunc TestCallbackFn(t *testing.T) {\n\tcb := &taskCallback{}\n\n\tfor _, o := range []types.PropertyChangeOp{types.PropertyChangeOpAdd, types.PropertyChangeOpRemove, types.PropertyChangeOpAssign, types.PropertyChangeOpIndirectRemove} {\n\t\tfor _, s := range []types.TaskInfoState{types.TaskInfoStateQueued, types.TaskInfoStateRunning, types.TaskInfoStateSuccess, types.TaskInfoStateError} {\n\t\t\tc := types.PropertyChange{\n\t\t\t\tName: \"info\",\n\t\t\t\tOp:   o,\n\t\t\t\tVal: types.TaskInfo{\n\t\t\t\t\tState: s,\n\t\t\t\t},\n\t\t\t}\n\t\t\tt.Logf(\"Op: %s State: %s\", o, s)\n\t\t\tcb.fn([]types.PropertyChange{c})\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/test/doc.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n/*\nPackage test contains functions that implement common functionality between\ntests. The code (non-test) in this package intentionally does not take any\ndependendies outside the vim25 tree.\n*/\npackage test\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/test/functional/helper.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage functional\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com/vmware/govmomi/find\"\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/test\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype Helper struct {\n\t*testing.T\n\n\tc   *vim25.Client\n\tf   *find.Finder\n\tfns []func()\n}\n\nfunc NewHelper(t *testing.T) *Helper {\n\th := &Helper{\n\t\tT: t,\n\n\t\tc:   test.NewAuthenticatedClient(t),\n\t\tfns: make([]func(), 0),\n\t}\n\n\th.f = find.NewFinder(h.c, true)\n\n\treturn h\n}\n\nfunc (h *Helper) Defer(fn func()) {\n\th.fns = append(h.fns, fn)\n}\n\nfunc (h *Helper) Teardown() {\n\tfor _, fn := range h.fns {\n\t\tfn()\n\t}\n}\n\nfunc (h *Helper) RequireVirtualCenter() {\n\tvar expect = \"VirtualCenter\"\n\tvar actual = h.c.ServiceContent.About.ApiType\n\tif actual != expect {\n\t\th.Skipf(\"Requires %s, running against %s\", expect, actual)\n\t}\n}\n\nfunc (h *Helper) Datacenter() *object.Datacenter {\n\tdc, err := h.f.DefaultDatacenter(context.Background())\n\tif err != nil {\n\t\th.Fatal(err)\n\t}\n\n\th.f.SetDatacenter(dc)\n\n\treturn dc\n}\n\nfunc (h *Helper) DatacenterFolders() *object.DatacenterFolders {\n\tdf, err := h.Datacenter().Folders(context.Background())\n\tif err != nil {\n\t\th.Fatal(err)\n\t}\n\n\treturn df\n}\n\nfunc (h *Helper) ComputeResource() *object.ComputeResource {\n\tcr, err := h.f.DefaultComputeResource(context.Background())\n\tif err != nil {\n\t\th.Fatal(err)\n\t}\n\n\treturn cr\n}\n\nfunc (h *Helper) LocalDatastores(ctx context.Context, cr *object.ComputeResource) ([]*object.Datastore, error) {\n\t// List datastores for compute resource\n\tdss, err := cr.Datastores(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Filter local datastores\n\tvar ldss []*object.Datastore\n\tfor _, ds := range dss {\n\t\tvar mds mo.Datastore\n\t\terr = property.DefaultCollector(h.c).RetrieveOne(ctx, ds.Reference(), nil, &mds)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tswitch i := mds.Info.(type) {\n\t\tcase *types.VmfsDatastoreInfo:\n\t\t\tif i.Vmfs.Local != nil && *i.Vmfs.Local == true {\n\t\t\t\tbreak\n\t\t\t}\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\n\t\tds.InventoryPath = mds.Name\n\t\tldss = append(ldss, ds)\n\t}\n\n\treturn ldss, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/test/functional/issue_242_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage functional\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\nfunc TestIssue242(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\th := NewHelper(t)\n\tdefer h.Teardown()\n\n\th.RequireVirtualCenter()\n\n\tdf, err := h.Datacenter().Folders(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcr := h.ComputeResource()\n\n\t// Get local datastores for compute resource\n\tdss, err := h.LocalDatastores(ctx, cr)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(dss) == 0 {\n\t\tt.Fatalf(\"No local datastores\")\n\t}\n\n\t// Get root resource pool for compute resource\n\trp, err := cr.ResourcePool(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tspec := types.VirtualMachineConfigSpec{\n\t\tName:     fmt.Sprintf(\"govmomi-test-%s\", time.Now().Format(time.RFC3339)),\n\t\tFiles:    &types.VirtualMachineFileInfo{VmPathName: fmt.Sprintf(\"[%s]\", dss[0].Name())},\n\t\tNumCPUs:  1,\n\t\tMemoryMB: 32,\n\t}\n\n\t// Create new VM\n\ttask, err := df.VmFolder.CreateVM(context.Background(), spec, rp, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tinfo, err := task.WaitForResult(context.Background(), nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvm := object.NewVirtualMachine(h.c, info.Result.(types.ManagedObjectReference))\n\tdefer func() {\n\t\ttask, err := vm.Destroy(context.Background())\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\terr = task.Wait(context.Background())\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\t// Mark VM as template\n\terr = vm.MarkAsTemplate(context.Background())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Get \"environmentBrowser\" property for VM template\n\tvar mvm mo.VirtualMachine\n\terr = property.DefaultCollector(h.c).RetrieveOne(ctx, vm.Reference(), []string{\"environmentBrowser\"}, &mvm)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/test/helper.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage test\n\nimport (\n\t\"context\"\n\t\"net/url\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\n// URL parses the GOVMOMI_TEST_URL environment variable if set.\nfunc URL() *url.URL {\n\ts := os.Getenv(\"GOVMOMI_TEST_URL\")\n\tif s == \"\" {\n\t\treturn nil\n\t}\n\tu, err := soap.ParseURL(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn u\n}\n\n// NewAuthenticatedClient creates a new vim25.Client, authenticates the user\n// specified in the test URL, and returns it.\nfunc NewAuthenticatedClient(t *testing.T) *vim25.Client {\n\tu := URL()\n\tif u == nil {\n\t\tt.SkipNow()\n\t}\n\n\tsoapClient := soap.NewClient(u, true)\n\tvimClient, err := vim25.NewClient(context.Background(), soapClient)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treq := types.Login{\n\t\tThis: *vimClient.ServiceContent.SessionManager,\n\t}\n\n\treq.UserName = u.User.Username()\n\tif pw, ok := u.User.Password(); ok {\n\t\treq.Password = pw\n\t}\n\n\t_, err = methods.Login(context.Background(), vimClient, &req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treturn vimClient\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/units/size.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage units\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strconv\"\n)\n\ntype ByteSize int64\n\nconst (\n\t_  = iota\n\tKB = 1 << (10 * iota)\n\tMB\n\tGB\n\tTB\n\tPB\n\tEB\n)\n\nfunc (b ByteSize) String() string {\n\tswitch {\n\tcase b >= EB:\n\t\treturn fmt.Sprintf(\"%.1fEB\", float32(b)/EB)\n\tcase b >= PB:\n\t\treturn fmt.Sprintf(\"%.1fPB\", float32(b)/PB)\n\tcase b >= TB:\n\t\treturn fmt.Sprintf(\"%.1fTB\", float32(b)/TB)\n\tcase b >= GB:\n\t\treturn fmt.Sprintf(\"%.1fGB\", float32(b)/GB)\n\tcase b >= MB:\n\t\treturn fmt.Sprintf(\"%.1fMB\", float32(b)/MB)\n\tcase b >= KB:\n\t\treturn fmt.Sprintf(\"%.1fKB\", float32(b)/KB)\n\t}\n\treturn fmt.Sprintf(\"%dB\", b)\n}\n\ntype FileSize int64\n\nfunc (b FileSize) String() string {\n\tswitch {\n\tcase b >= EB:\n\t\treturn fmt.Sprintf(\"%.1fE\", float32(b)/EB)\n\tcase b >= PB:\n\t\treturn fmt.Sprintf(\"%.1fP\", float32(b)/PB)\n\tcase b >= TB:\n\t\treturn fmt.Sprintf(\"%.1fT\", float32(b)/TB)\n\tcase b >= GB:\n\t\treturn fmt.Sprintf(\"%.1fG\", float32(b)/GB)\n\tcase b >= MB:\n\t\treturn fmt.Sprintf(\"%.1fM\", float32(b)/MB)\n\tcase b >= KB:\n\t\treturn fmt.Sprintf(\"%.1fK\", float32(b)/KB)\n\t}\n\treturn fmt.Sprintf(\"%d\", b)\n}\n\nvar bytesRegexp = regexp.MustCompile(`^(?i)(\\d+)([BKMGTPE]?)(ib|b)?$`)\n\nfunc (b *ByteSize) Set(s string) error {\n\tm := bytesRegexp.FindStringSubmatch(s)\n\tif len(m) == 0 {\n\t\treturn errors.New(\"invalid byte value\")\n\t}\n\n\ti, err := strconv.ParseInt(m[1], 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*b = ByteSize(i)\n\n\tswitch m[2] {\n\tcase \"B\", \"b\", \"\":\n\tcase \"K\", \"k\":\n\t\t*b *= ByteSize(KB)\n\tcase \"M\", \"m\":\n\t\t*b *= ByteSize(MB)\n\tcase \"G\", \"g\":\n\t\t*b *= ByteSize(GB)\n\tcase \"T\", \"t\":\n\t\t*b *= ByteSize(TB)\n\tcase \"P\", \"p\":\n\t\t*b *= ByteSize(PB)\n\tcase \"E\", \"e\":\n\t\t*b *= ByteSize(EB)\n\tdefault:\n\t\treturn errors.New(\"invalid byte suffix\")\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/units/size_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage units\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"testing\"\n)\n\nfunc TestMB(t *testing.T) {\n\tb := ByteSize(1024 * 1024)\n\texpected := \"1.0MB\"\n\tif b.String() != expected {\n\t\tt.Errorf(\"Expected '%v' but got '%v'\", expected, b)\n\t}\n}\n\nfunc TestTenMB(t *testing.T) {\n\tb := ByteSize(10 * 1024 * 1024)\n\tactual := fmt.Sprintf(\"%s\", b)\n\texpected := \"10.0MB\"\n\tif actual != expected {\n\t\tt.Errorf(\"Expected '%v' but got '%v'\", expected, actual)\n\t}\n}\n\nfunc assertEquals(t *testing.T, expected string, actual ByteSize) {\n\tif expected != actual.String() {\n\t\tt.Errorf(\"Expected '%v' but got '%v'\", expected, actual.String())\n\t}\n}\n\nfunc TestByteSize(t *testing.T) {\n\tassertEquals(t, \"1B\", ByteSize(1))\n\tassertEquals(t, \"10B\", ByteSize(10))\n\tassertEquals(t, \"100B\", ByteSize(100))\n\tassertEquals(t, \"1000B\", ByteSize(1000))\n\tassertEquals(t, \"1.0KB\", ByteSize(1024))\n\tassertEquals(t, \"1.0MB\", ByteSize(1024*1024))\n\tassertEquals(t, \"1.0MB\", ByteSize(1048576))\n\tassertEquals(t, \"10.0MB\", ByteSize(10*math.Pow(1024, 2)))\n\tassertEquals(t, \"1.0GB\", ByteSize(1024*1024*1024))\n\tassertEquals(t, \"1.0TB\", ByteSize(1024*1024*1024*1024))\n\tassertEquals(t, \"1.0TB\", ByteSize(1048576*1048576))\n\tassertEquals(t, \"1.0PB\", ByteSize(1024*1024*1024*1024*1024))\n\tassertEquals(t, \"1.0EB\", ByteSize(1024*1024*1024*1024*1024*1024))\n\tassertEquals(t, \"1.0EB\", ByteSize(1048576*1048576*1048576))\n}\n\nfunc TestByteSizeSet(t *testing.T) {\n\tvar tests = []struct {\n\t\tIn     string\n\t\tOutStr string\n\t\tOut    ByteSize\n\t}{\n\t\t{\n\t\t\tIn:     \"345\",\n\t\t\tOutStr: \"345B\",\n\t\t\tOut:    345.0,\n\t\t},\n\t\t{\n\t\t\tIn:     \"345b\",\n\t\t\tOutStr: \"345B\",\n\t\t\tOut:    345.0,\n\t\t},\n\t\t{\n\t\t\tIn:     \"345K\",\n\t\t\tOutStr: \"345.0KB\",\n\t\t\tOut:    345 * KB,\n\t\t},\n\t\t{\n\t\t\tIn:     \"345kb\",\n\t\t\tOutStr: \"345.0KB\",\n\t\t\tOut:    345 * KB,\n\t\t},\n\t\t{\n\t\t\tIn:     \"345kib\",\n\t\t\tOutStr: \"345.0KB\",\n\t\t\tOut:    345 * KB,\n\t\t},\n\t\t{\n\t\t\tIn:     \"345KiB\",\n\t\t\tOutStr: \"345.0KB\",\n\t\t\tOut:    345 * KB,\n\t\t},\n\t\t{\n\t\t\tIn:     \"345M\",\n\t\t\tOutStr: \"345.0MB\",\n\t\t\tOut:    345 * MB,\n\t\t},\n\t\t{\n\t\t\tIn:     \"345G\",\n\t\t\tOutStr: \"345.0GB\",\n\t\t\tOut:    345 * GB,\n\t\t},\n\t\t{\n\t\t\tIn:     \"345T\",\n\t\t\tOutStr: \"345.0TB\",\n\t\t\tOut:    345 * TB,\n\t\t},\n\t\t{\n\t\t\tIn:     \"345P\",\n\t\t\tOutStr: \"345.0PB\",\n\t\t\tOut:    345 * PB,\n\t\t},\n\t\t{\n\t\t\tIn:     \"3E\",\n\t\t\tOutStr: \"3.0EB\",\n\t\t\tOut:    3 * EB,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tvar v ByteSize\n\t\terr := v.Set(test.In)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Error: %s [%v]\", err, test.In)\n\t\t\tcontinue\n\t\t}\n\n\t\tif v != test.Out {\n\t\t\tt.Errorf(\"Out: expect '%v' actual '%v'\", test.Out, v)\n\t\t\tcontinue\n\t\t}\n\n\t\tif v.String() != test.OutStr {\n\t\t\tt.Errorf(\"String: expect '%v' actual '%v'\", test.OutStr, v.String())\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/view/container_view.go",
    "content": "/*\nCopyright (c) 2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage view\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype ContainerView struct {\n\tobject.Common\n}\n\nfunc NewContainerView(c *vim25.Client, ref types.ManagedObjectReference) *ContainerView {\n\treturn &ContainerView{\n\t\tCommon: object.NewCommon(c, ref),\n\t}\n}\n\nfunc (v ContainerView) Destroy(ctx context.Context) error {\n\treq := types.DestroyView{\n\t\tThis: v.Reference(),\n\t}\n\t_, err := methods.DestroyView(ctx, v.Client(), &req)\n\treturn err\n}\n\n// Retrieve populates dst as property.Collector.Retrieve does, for all entities in the view of types specified by kind.\nfunc (v ContainerView) Retrieve(ctx context.Context, kind []string, ps []string, dst interface{}) error {\n\tpc := property.DefaultCollector(v.Client())\n\n\tospec := types.ObjectSpec{\n\t\tObj:  v.Reference(),\n\t\tSkip: types.NewBool(true),\n\t\tSelectSet: []types.BaseSelectionSpec{\n\t\t\t&types.TraversalSpec{\n\t\t\t\tType: v.Reference().Type,\n\t\t\t\tPath: \"view\",\n\t\t\t},\n\t\t},\n\t}\n\n\tvar pspec []types.PropertySpec\n\n\tif len(kind) == 0 {\n\t\tkind = []string{\"ManagedEntity\"}\n\t}\n\n\tfor _, t := range kind {\n\t\tspec := types.PropertySpec{\n\t\t\tType: t,\n\t\t}\n\n\t\tif len(ps) == 0 {\n\t\t\tspec.All = types.NewBool(true)\n\t\t} else {\n\t\t\tspec.PathSet = ps\n\t\t}\n\n\t\tpspec = append(pspec, spec)\n\t}\n\n\treq := types.RetrieveProperties{\n\t\tSpecSet: []types.PropertyFilterSpec{\n\t\t\t{\n\t\t\t\tObjectSet: []types.ObjectSpec{ospec},\n\t\t\t\tPropSet:   pspec,\n\t\t\t},\n\t\t},\n\t}\n\n\tres, err := pc.RetrieveProperties(ctx, req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif d, ok := dst.(*[]types.ObjectContent); ok {\n\t\t*d = res.Returnval\n\t\treturn nil\n\t}\n\n\treturn mo.LoadRetrievePropertiesResponse(res, dst)\n}\n\n// RetrieveWithFilter populates dst as Retrieve does, but only for entities matching the given filter.\nfunc (v ContainerView) RetrieveWithFilter(ctx context.Context, kind []string, ps []string, dst interface{}, filter property.Filter) error {\n\tif len(filter) == 0 {\n\t\treturn v.Retrieve(ctx, kind, ps, dst)\n\t}\n\n\tvar content []types.ObjectContent\n\n\terr := v.Retrieve(ctx, kind, filter.Keys(), &content)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tobjs := filter.MatchObjectContent(content)\n\n\tpc := property.DefaultCollector(v.Client())\n\n\treturn pc.Retrieve(ctx, objs, ps, dst)\n}\n\n// Find returns object references for entities of type kind, matching the given filter.\nfunc (v ContainerView) Find(ctx context.Context, kind []string, filter property.Filter) ([]types.ManagedObjectReference, error) {\n\tif len(filter) == 0 {\n\t\t// Ensure we have at least 1 filter to avoid retrieving all properties.\n\t\tfilter = property.Filter{\"name\": \"*\"}\n\t}\n\n\tvar content []types.ObjectContent\n\n\terr := v.Retrieve(ctx, kind, filter.Keys(), &content)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn filter.MatchObjectContent(content), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/view/list_view.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage view\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype ListView struct {\n\tobject.Common\n}\n\nfunc NewListView(c *vim25.Client, ref types.ManagedObjectReference) *ListView {\n\treturn &ListView{\n\t\tCommon: object.NewCommon(c, ref),\n\t}\n}\n\nfunc (v ListView) Destroy(ctx context.Context) error {\n\treq := types.DestroyView{\n\t\tThis: v.Reference(),\n\t}\n\t_, err := methods.DestroyView(ctx, v.Client(), &req)\n\treturn err\n}\n\nfunc (v ListView) Add(ctx context.Context, refs []types.ManagedObjectReference) error {\n\treq := types.ModifyListView{\n\t\tThis: v.Reference(),\n\t\tAdd:  refs,\n\t}\n\t_, err := methods.ModifyListView(ctx, v.Client(), &req)\n\treturn err\n}\n\nfunc (v ListView) Remove(ctx context.Context, refs []types.ManagedObjectReference) error {\n\treq := types.ModifyListView{\n\t\tThis:   v.Reference(),\n\t\tRemove: refs,\n\t}\n\t_, err := methods.ModifyListView(ctx, v.Client(), &req)\n\treturn err\n}\n\nfunc (v ListView) Reset(ctx context.Context, refs []types.ManagedObjectReference) error {\n\treq := types.ResetListView{\n\t\tThis: v.Reference(),\n\t\tObj:  refs,\n\t}\n\t_, err := methods.ResetListView(ctx, v.Client(), &req)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/view/manager.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage view\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/object\"\n\t\"github.com/vmware/govmomi/vim25\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype Manager struct {\n\tobject.Common\n}\n\nfunc NewManager(c *vim25.Client) *Manager {\n\tm := Manager{\n\t\tobject.NewCommon(c, *c.ServiceContent.ViewManager),\n\t}\n\n\treturn &m\n}\n\nfunc (m Manager) CreateListView(ctx context.Context, objects []types.ManagedObjectReference) (*ListView, error) {\n\treq := types.CreateListView{\n\t\tThis: m.Common.Reference(),\n\t\tObj:  objects,\n\t}\n\n\tres, err := methods.CreateListView(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewListView(m.Client(), res.Returnval), nil\n}\n\nfunc (m Manager) CreateContainerView(ctx context.Context, container types.ManagedObjectReference, managedObjectTypes []string, recursive bool) (*ContainerView, error) {\n\n\treq := types.CreateContainerView{\n\t\tThis:      m.Common.Reference(),\n\t\tContainer: container,\n\t\tRecursive: recursive,\n\t\tType:      managedObjectTypes,\n\t}\n\n\tres, err := methods.CreateContainerView(ctx, m.Client(), &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewContainerView(m.Client(), res.Returnval), nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/client.go",
    "content": "/*\nCopyright (c) 2015-2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vim25\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\n// Client is a tiny wrapper around the vim25/soap Client that stores session\n// specific state (i.e. state that only needs to be retrieved once after the\n// client has been created). This means the client can be reused after\n// serialization without performing additional requests for initialization.\ntype Client struct {\n\t*soap.Client\n\n\tServiceContent types.ServiceContent\n\n\t// RoundTripper is a separate field such that the client's implementation of\n\t// the RoundTripper interface can be wrapped by separate implementations for\n\t// extra functionality (for example, reauthentication on session timeout).\n\tRoundTripper soap.RoundTripper\n}\n\n// NewClient creates and returns a new client wirh the ServiceContent field\n// filled in.\nfunc NewClient(ctx context.Context, rt soap.RoundTripper) (*Client, error) {\n\tserviceContent, err := methods.GetServiceContent(ctx, rt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := Client{\n\t\tServiceContent: serviceContent,\n\t\tRoundTripper:   rt,\n\t}\n\n\t// Set client if it happens to be a soap.Client\n\tif sc, ok := rt.(*soap.Client); ok {\n\t\tc.Client = sc\n\t}\n\n\treturn &c, nil\n}\n\n// RoundTrip dispatches to the RoundTripper field.\nfunc (c *Client) RoundTrip(ctx context.Context, req, res soap.HasFault) error {\n\treturn c.RoundTripper.RoundTrip(ctx, req, res)\n}\n\ntype marshaledClient struct {\n\tSoapClient     *soap.Client\n\tServiceContent types.ServiceContent\n}\n\nfunc (c *Client) MarshalJSON() ([]byte, error) {\n\tm := marshaledClient{\n\t\tSoapClient:     c.Client,\n\t\tServiceContent: c.ServiceContent,\n\t}\n\n\treturn json.Marshal(m)\n}\n\nfunc (c *Client) UnmarshalJSON(b []byte) error {\n\tvar m marshaledClient\n\n\terr := json.Unmarshal(b, &m)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*c = Client{\n\t\tClient:         m.SoapClient,\n\t\tServiceContent: m.ServiceContent,\n\t\tRoundTripper:   m.SoapClient,\n\t}\n\n\treturn nil\n}\n\n// Valid returns whether or not the client is valid and ready for use.\n// This should be called after unmarshalling the client.\nfunc (c *Client) Valid() bool {\n\tif c == nil {\n\t\treturn false\n\t}\n\n\tif c.Client == nil {\n\t\treturn false\n\t}\n\n\t// Use arbitrary pointer field in the service content.\n\t// Doesn't matter which one, as long as it is populated by default.\n\tif c.ServiceContent.SessionManager == nil {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n// IsVC returns true if we are connected to a vCenter\nfunc (c *Client) IsVC() bool {\n\treturn c.ServiceContent.About.ApiType == \"VirtualCenter\"\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/client_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vim25\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"net/url\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\n// Duplicated to prevent cyclic dependency...\nfunc testURL(t *testing.T) *url.URL {\n\ts := os.Getenv(\"GOVMOMI_TEST_URL\")\n\tif s == \"\" {\n\t\tt.SkipNow()\n\t}\n\tu, err := soap.ParseURL(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn u\n}\n\nfunc sessionLogin(t *testing.T, c *Client) {\n\treq := types.Login{\n\t\tThis: *c.ServiceContent.SessionManager,\n\t}\n\n\tu := testURL(t).User\n\treq.UserName = u.Username()\n\tif pw, ok := u.Password(); ok {\n\t\treq.Password = pw\n\t}\n\n\t_, err := methods.Login(context.Background(), c, &req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc sessionCheck(t *testing.T, c *Client) {\n\tvar mgr mo.SessionManager\n\n\terr := mo.RetrieveProperties(context.Background(), c, c.ServiceContent.PropertyCollector, *c.ServiceContent.SessionManager, &mgr)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestClientSerialization(t *testing.T) {\n\tvar c1, c2 *Client\n\n\tsoapClient := soap.NewClient(testURL(t), true)\n\tc1, err := NewClient(context.Background(), soapClient)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Login\n\tsessionLogin(t, c1)\n\tsessionCheck(t, c1)\n\n\t// Serialize/deserialize\n\tb, err := json.Marshal(c1)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tc2 = &Client{}\n\terr = json.Unmarshal(b, c2)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Check the session is still valid\n\tsessionCheck(t, c2)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/debug/debug.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage debug\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"path\"\n)\n\n// Provider specified the interface types must implement to be used as a\n// debugging sink. Having multiple such sink implementations allows it to be\n// changed externally (for example when running tests).\ntype Provider interface {\n\tNewFile(s string) io.WriteCloser\n\tFlush()\n}\n\nvar currentProvider Provider = nil\n\nfunc SetProvider(p Provider) {\n\tif currentProvider != nil {\n\t\tcurrentProvider.Flush()\n\t}\n\tcurrentProvider = p\n}\n\n// Enabled returns whether debugging is enabled or not.\nfunc Enabled() bool {\n\treturn currentProvider != nil\n}\n\n// NewFile dispatches to the current provider's NewFile function.\nfunc NewFile(s string) io.WriteCloser {\n\treturn currentProvider.NewFile(s)\n}\n\n// Flush dispatches to the current provider's Flush function.\nfunc Flush() {\n\tcurrentProvider.Flush()\n}\n\n// FileProvider implements a debugging provider that creates a real file for\n// every call to NewFile. It maintains a list of all files that it creates,\n// such that it can close them when its Flush function is called.\ntype FileProvider struct {\n\tPath string\n\n\tfiles []*os.File\n}\n\nfunc (fp *FileProvider) NewFile(p string) io.WriteCloser {\n\tf, err := os.Create(path.Join(fp.Path, p))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfp.files = append(fp.files, f)\n\n\treturn f\n}\n\nfunc (fp *FileProvider) Flush() {\n\tfor _, f := range fp.files {\n\t\tf.Close()\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/doc.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n/*\nPackage vim25 provides a minimal client implementation to use with other\npackages in the vim25 tree. The code in this package intentionally does not\ntake any dependendies outside the vim25 tree.\n\nThe client implementation in this package embeds the soap.Client structure.\nAdditionally, it stores the value of the session's ServiceContent object. This\nobject stores references to a variety of subsystems, such as the root property\ncollector, the session manager, and the search index. The client is fully\nfunctional after serialization and deserialization, without the need for\nadditional requests for initialization.\n*/\npackage vim25\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/methods/fault_test.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage methods\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n\t\"github.com/vmware/govmomi/vim25/xml\"\n)\n\nvar invalidLoginFault = `\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<soapenv:Envelope xmlns:soapenc=\"http://schemas.xmlsoap.org/soap/encoding/\"\n xmlns:soapenv=\"http://schemas.xmlsoap.org/soap/envelope/\"\n xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n<soapenv:Body>\n<soapenv:Fault><faultcode>ServerFaultCode</faultcode><faultstring>Cannot complete login due to an incorrect user name or password.</faultstring><detail><InvalidLoginFault xmlns=\"urn:vim25\" xsi:type=\"InvalidLogin\"></InvalidLoginFault></detail></soapenv:Fault>\n</soapenv:Body>\n</soapenv:Envelope>`\n\ntype TestBody struct {\n\tFault *soap.Fault `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc TestFaultDetail(t *testing.T) {\n\tbody := TestBody{}\n\tenv := soap.Envelope{Body: &body}\n\n\tdec := xml.NewDecoder(bytes.NewReader([]byte(invalidLoginFault)))\n\tdec.TypeFunc = types.TypeFunc()\n\n\terr := dec.Decode(&env)\n\tif err != nil {\n\t\tt.Fatalf(\"Decode: %s\", err)\n\t}\n\n\tif body.Fault == nil {\n\t\tt.Fatal(\"Expected fault\")\n\t}\n\n\tif _, ok := body.Fault.Detail.Fault.(types.InvalidLogin); !ok {\n\t\tt.Fatalf(\"Expected InvalidLogin, got: %#v\", body.Fault.Detail.Fault)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/methods/internal.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage methods\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype RetrieveDynamicTypeManagerBody struct {\n\tReq    *types.RetrieveDynamicTypeManager         `xml:\"urn:vim25 RetrieveDynamicTypeManager\"`\n\tRes    *types.RetrieveDynamicTypeManagerResponse `xml:\"urn:vim25 RetrieveDynamicTypeManagerResponse\"`\n\tFault_ *soap.Fault\n}\n\nfunc (b *RetrieveDynamicTypeManagerBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveDynamicTypeManager(ctx context.Context, r soap.RoundTripper, req *types.RetrieveDynamicTypeManager) (*types.RetrieveDynamicTypeManagerResponse, error) {\n\tvar reqBody, resBody RetrieveDynamicTypeManagerBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveManagedMethodExecuterBody struct {\n\tReq    *types.RetrieveManagedMethodExecuter         `xml:\"urn:vim25 RetrieveManagedMethodExecuter\"`\n\tRes    *types.RetrieveManagedMethodExecuterResponse `xml:\"urn:vim25 RetrieveManagedMethodExecuterResponse\"`\n\tFault_ *soap.Fault\n}\n\nfunc (b *RetrieveManagedMethodExecuterBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveManagedMethodExecuter(ctx context.Context, r soap.RoundTripper, req *types.RetrieveManagedMethodExecuter) (*types.RetrieveManagedMethodExecuterResponse, error) {\n\tvar reqBody, resBody RetrieveManagedMethodExecuterBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DynamicTypeMgrQueryMoInstancesBody struct {\n\tReq    *types.DynamicTypeMgrQueryMoInstances         `xml:\"urn:vim25 DynamicTypeMgrQueryMoInstances\"`\n\tRes    *types.DynamicTypeMgrQueryMoInstancesResponse `xml:\"urn:vim25 DynamicTypeMgrQueryMoInstancesResponse\"`\n\tFault_ *soap.Fault\n}\n\nfunc (b *DynamicTypeMgrQueryMoInstancesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DynamicTypeMgrQueryMoInstances(ctx context.Context, r soap.RoundTripper, req *types.DynamicTypeMgrQueryMoInstances) (*types.DynamicTypeMgrQueryMoInstancesResponse, error) {\n\tvar reqBody, resBody DynamicTypeMgrQueryMoInstancesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DynamicTypeMgrQueryTypeInfoBody struct {\n\tReq    *types.DynamicTypeMgrQueryTypeInfo         `xml:\"urn:vim25 DynamicTypeMgrQueryTypeInfo\"`\n\tRes    *types.DynamicTypeMgrQueryTypeInfoResponse `xml:\"urn:vim25 DynamicTypeMgrQueryTypeInfoResponse\"`\n\tFault_ *soap.Fault\n}\n\nfunc (b *DynamicTypeMgrQueryTypeInfoBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DynamicTypeMgrQueryTypeInfo(ctx context.Context, r soap.RoundTripper, req *types.DynamicTypeMgrQueryTypeInfo) (*types.DynamicTypeMgrQueryTypeInfoResponse, error) {\n\tvar reqBody, resBody DynamicTypeMgrQueryTypeInfoBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ExecuteSoapBody struct {\n\tReq    *types.ExecuteSoap         `xml:\"urn:vim25 ExecuteSoap\"`\n\tRes    *types.ExecuteSoapResponse `xml:\"urn:vim25 ExecuteSoapResponse\"`\n\tFault_ *soap.Fault\n}\n\nfunc (b *ExecuteSoapBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ExecuteSoap(ctx context.Context, r soap.RoundTripper, req *types.ExecuteSoap) (*types.ExecuteSoapResponse, error) {\n\tvar reqBody, resBody ExecuteSoapBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/methods/methods.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage methods\n\nimport (\n\t\"context\"\n\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype AbdicateDomOwnershipBody struct {\n\tReq    *types.AbdicateDomOwnership         `xml:\"urn:vim25 AbdicateDomOwnership,omitempty\"`\n\tRes    *types.AbdicateDomOwnershipResponse `xml:\"urn:vim25 AbdicateDomOwnershipResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AbdicateDomOwnershipBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AbdicateDomOwnership(ctx context.Context, r soap.RoundTripper, req *types.AbdicateDomOwnership) (*types.AbdicateDomOwnershipResponse, error) {\n\tvar reqBody, resBody AbdicateDomOwnershipBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AcknowledgeAlarmBody struct {\n\tReq    *types.AcknowledgeAlarm         `xml:\"urn:vim25 AcknowledgeAlarm,omitempty\"`\n\tRes    *types.AcknowledgeAlarmResponse `xml:\"urn:vim25 AcknowledgeAlarmResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AcknowledgeAlarmBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AcknowledgeAlarm(ctx context.Context, r soap.RoundTripper, req *types.AcknowledgeAlarm) (*types.AcknowledgeAlarmResponse, error) {\n\tvar reqBody, resBody AcknowledgeAlarmBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AcquireCimServicesTicketBody struct {\n\tReq    *types.AcquireCimServicesTicket         `xml:\"urn:vim25 AcquireCimServicesTicket,omitempty\"`\n\tRes    *types.AcquireCimServicesTicketResponse `xml:\"urn:vim25 AcquireCimServicesTicketResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AcquireCimServicesTicketBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AcquireCimServicesTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireCimServicesTicket) (*types.AcquireCimServicesTicketResponse, error) {\n\tvar reqBody, resBody AcquireCimServicesTicketBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AcquireCloneTicketBody struct {\n\tReq    *types.AcquireCloneTicket         `xml:\"urn:vim25 AcquireCloneTicket,omitempty\"`\n\tRes    *types.AcquireCloneTicketResponse `xml:\"urn:vim25 AcquireCloneTicketResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AcquireCloneTicketBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AcquireCloneTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireCloneTicket) (*types.AcquireCloneTicketResponse, error) {\n\tvar reqBody, resBody AcquireCloneTicketBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AcquireCredentialsInGuestBody struct {\n\tReq    *types.AcquireCredentialsInGuest         `xml:\"urn:vim25 AcquireCredentialsInGuest,omitempty\"`\n\tRes    *types.AcquireCredentialsInGuestResponse `xml:\"urn:vim25 AcquireCredentialsInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AcquireCredentialsInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AcquireCredentialsInGuest(ctx context.Context, r soap.RoundTripper, req *types.AcquireCredentialsInGuest) (*types.AcquireCredentialsInGuestResponse, error) {\n\tvar reqBody, resBody AcquireCredentialsInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AcquireGenericServiceTicketBody struct {\n\tReq    *types.AcquireGenericServiceTicket         `xml:\"urn:vim25 AcquireGenericServiceTicket,omitempty\"`\n\tRes    *types.AcquireGenericServiceTicketResponse `xml:\"urn:vim25 AcquireGenericServiceTicketResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AcquireGenericServiceTicketBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AcquireGenericServiceTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireGenericServiceTicket) (*types.AcquireGenericServiceTicketResponse, error) {\n\tvar reqBody, resBody AcquireGenericServiceTicketBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AcquireLocalTicketBody struct {\n\tReq    *types.AcquireLocalTicket         `xml:\"urn:vim25 AcquireLocalTicket,omitempty\"`\n\tRes    *types.AcquireLocalTicketResponse `xml:\"urn:vim25 AcquireLocalTicketResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AcquireLocalTicketBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AcquireLocalTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireLocalTicket) (*types.AcquireLocalTicketResponse, error) {\n\tvar reqBody, resBody AcquireLocalTicketBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AcquireMksTicketBody struct {\n\tReq    *types.AcquireMksTicket         `xml:\"urn:vim25 AcquireMksTicket,omitempty\"`\n\tRes    *types.AcquireMksTicketResponse `xml:\"urn:vim25 AcquireMksTicketResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AcquireMksTicketBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AcquireMksTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireMksTicket) (*types.AcquireMksTicketResponse, error) {\n\tvar reqBody, resBody AcquireMksTicketBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AcquireTicketBody struct {\n\tReq    *types.AcquireTicket         `xml:\"urn:vim25 AcquireTicket,omitempty\"`\n\tRes    *types.AcquireTicketResponse `xml:\"urn:vim25 AcquireTicketResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AcquireTicketBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AcquireTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireTicket) (*types.AcquireTicketResponse, error) {\n\tvar reqBody, resBody AcquireTicketBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddAuthorizationRoleBody struct {\n\tReq    *types.AddAuthorizationRole         `xml:\"urn:vim25 AddAuthorizationRole,omitempty\"`\n\tRes    *types.AddAuthorizationRoleResponse `xml:\"urn:vim25 AddAuthorizationRoleResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddAuthorizationRoleBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddAuthorizationRole(ctx context.Context, r soap.RoundTripper, req *types.AddAuthorizationRole) (*types.AddAuthorizationRoleResponse, error) {\n\tvar reqBody, resBody AddAuthorizationRoleBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddCustomFieldDefBody struct {\n\tReq    *types.AddCustomFieldDef         `xml:\"urn:vim25 AddCustomFieldDef,omitempty\"`\n\tRes    *types.AddCustomFieldDefResponse `xml:\"urn:vim25 AddCustomFieldDefResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddCustomFieldDefBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddCustomFieldDef(ctx context.Context, r soap.RoundTripper, req *types.AddCustomFieldDef) (*types.AddCustomFieldDefResponse, error) {\n\tvar reqBody, resBody AddCustomFieldDefBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddDVPortgroup_TaskBody struct {\n\tReq    *types.AddDVPortgroup_Task         `xml:\"urn:vim25 AddDVPortgroup_Task,omitempty\"`\n\tRes    *types.AddDVPortgroup_TaskResponse `xml:\"urn:vim25 AddDVPortgroup_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddDVPortgroup_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddDVPortgroup_Task(ctx context.Context, r soap.RoundTripper, req *types.AddDVPortgroup_Task) (*types.AddDVPortgroup_TaskResponse, error) {\n\tvar reqBody, resBody AddDVPortgroup_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddDisks_TaskBody struct {\n\tReq    *types.AddDisks_Task         `xml:\"urn:vim25 AddDisks_Task,omitempty\"`\n\tRes    *types.AddDisks_TaskResponse `xml:\"urn:vim25 AddDisks_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddDisks_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.AddDisks_Task) (*types.AddDisks_TaskResponse, error) {\n\tvar reqBody, resBody AddDisks_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddFilterBody struct {\n\tReq    *types.AddFilter         `xml:\"urn:vim25 AddFilter,omitempty\"`\n\tRes    *types.AddFilterResponse `xml:\"urn:vim25 AddFilterResponse,omitempty\"`\n\tFault_ *soap.Fault              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddFilterBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddFilter(ctx context.Context, r soap.RoundTripper, req *types.AddFilter) (*types.AddFilterResponse, error) {\n\tvar reqBody, resBody AddFilterBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddFilterEntitiesBody struct {\n\tReq    *types.AddFilterEntities         `xml:\"urn:vim25 AddFilterEntities,omitempty\"`\n\tRes    *types.AddFilterEntitiesResponse `xml:\"urn:vim25 AddFilterEntitiesResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddFilterEntitiesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddFilterEntities(ctx context.Context, r soap.RoundTripper, req *types.AddFilterEntities) (*types.AddFilterEntitiesResponse, error) {\n\tvar reqBody, resBody AddFilterEntitiesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddGuestAliasBody struct {\n\tReq    *types.AddGuestAlias         `xml:\"urn:vim25 AddGuestAlias,omitempty\"`\n\tRes    *types.AddGuestAliasResponse `xml:\"urn:vim25 AddGuestAliasResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddGuestAliasBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddGuestAlias(ctx context.Context, r soap.RoundTripper, req *types.AddGuestAlias) (*types.AddGuestAliasResponse, error) {\n\tvar reqBody, resBody AddGuestAliasBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddHost_TaskBody struct {\n\tReq    *types.AddHost_Task         `xml:\"urn:vim25 AddHost_Task,omitempty\"`\n\tRes    *types.AddHost_TaskResponse `xml:\"urn:vim25 AddHost_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddHost_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddHost_Task(ctx context.Context, r soap.RoundTripper, req *types.AddHost_Task) (*types.AddHost_TaskResponse, error) {\n\tvar reqBody, resBody AddHost_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddInternetScsiSendTargetsBody struct {\n\tReq    *types.AddInternetScsiSendTargets         `xml:\"urn:vim25 AddInternetScsiSendTargets,omitempty\"`\n\tRes    *types.AddInternetScsiSendTargetsResponse `xml:\"urn:vim25 AddInternetScsiSendTargetsResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddInternetScsiSendTargetsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddInternetScsiSendTargets(ctx context.Context, r soap.RoundTripper, req *types.AddInternetScsiSendTargets) (*types.AddInternetScsiSendTargetsResponse, error) {\n\tvar reqBody, resBody AddInternetScsiSendTargetsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddInternetScsiStaticTargetsBody struct {\n\tReq    *types.AddInternetScsiStaticTargets         `xml:\"urn:vim25 AddInternetScsiStaticTargets,omitempty\"`\n\tRes    *types.AddInternetScsiStaticTargetsResponse `xml:\"urn:vim25 AddInternetScsiStaticTargetsResponse,omitempty\"`\n\tFault_ *soap.Fault                                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddInternetScsiStaticTargetsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddInternetScsiStaticTargets(ctx context.Context, r soap.RoundTripper, req *types.AddInternetScsiStaticTargets) (*types.AddInternetScsiStaticTargetsResponse, error) {\n\tvar reqBody, resBody AddInternetScsiStaticTargetsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddKeyBody struct {\n\tReq    *types.AddKey         `xml:\"urn:vim25 AddKey,omitempty\"`\n\tRes    *types.AddKeyResponse `xml:\"urn:vim25 AddKeyResponse,omitempty\"`\n\tFault_ *soap.Fault           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddKeyBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddKey(ctx context.Context, r soap.RoundTripper, req *types.AddKey) (*types.AddKeyResponse, error) {\n\tvar reqBody, resBody AddKeyBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddKeysBody struct {\n\tReq    *types.AddKeys         `xml:\"urn:vim25 AddKeys,omitempty\"`\n\tRes    *types.AddKeysResponse `xml:\"urn:vim25 AddKeysResponse,omitempty\"`\n\tFault_ *soap.Fault            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddKeysBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddKeys(ctx context.Context, r soap.RoundTripper, req *types.AddKeys) (*types.AddKeysResponse, error) {\n\tvar reqBody, resBody AddKeysBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddLicenseBody struct {\n\tReq    *types.AddLicense         `xml:\"urn:vim25 AddLicense,omitempty\"`\n\tRes    *types.AddLicenseResponse `xml:\"urn:vim25 AddLicenseResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddLicenseBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddLicense(ctx context.Context, r soap.RoundTripper, req *types.AddLicense) (*types.AddLicenseResponse, error) {\n\tvar reqBody, resBody AddLicenseBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddMonitoredEntitiesBody struct {\n\tReq    *types.AddMonitoredEntities         `xml:\"urn:vim25 AddMonitoredEntities,omitempty\"`\n\tRes    *types.AddMonitoredEntitiesResponse `xml:\"urn:vim25 AddMonitoredEntitiesResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddMonitoredEntitiesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddMonitoredEntities(ctx context.Context, r soap.RoundTripper, req *types.AddMonitoredEntities) (*types.AddMonitoredEntitiesResponse, error) {\n\tvar reqBody, resBody AddMonitoredEntitiesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddNetworkResourcePoolBody struct {\n\tReq    *types.AddNetworkResourcePool         `xml:\"urn:vim25 AddNetworkResourcePool,omitempty\"`\n\tRes    *types.AddNetworkResourcePoolResponse `xml:\"urn:vim25 AddNetworkResourcePoolResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddNetworkResourcePoolBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddNetworkResourcePool(ctx context.Context, r soap.RoundTripper, req *types.AddNetworkResourcePool) (*types.AddNetworkResourcePoolResponse, error) {\n\tvar reqBody, resBody AddNetworkResourcePoolBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddPortGroupBody struct {\n\tReq    *types.AddPortGroup         `xml:\"urn:vim25 AddPortGroup,omitempty\"`\n\tRes    *types.AddPortGroupResponse `xml:\"urn:vim25 AddPortGroupResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddPortGroupBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddPortGroup(ctx context.Context, r soap.RoundTripper, req *types.AddPortGroup) (*types.AddPortGroupResponse, error) {\n\tvar reqBody, resBody AddPortGroupBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddServiceConsoleVirtualNicBody struct {\n\tReq    *types.AddServiceConsoleVirtualNic         `xml:\"urn:vim25 AddServiceConsoleVirtualNic,omitempty\"`\n\tRes    *types.AddServiceConsoleVirtualNicResponse `xml:\"urn:vim25 AddServiceConsoleVirtualNicResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddServiceConsoleVirtualNicBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.AddServiceConsoleVirtualNic) (*types.AddServiceConsoleVirtualNicResponse, error) {\n\tvar reqBody, resBody AddServiceConsoleVirtualNicBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddStandaloneHost_TaskBody struct {\n\tReq    *types.AddStandaloneHost_Task         `xml:\"urn:vim25 AddStandaloneHost_Task,omitempty\"`\n\tRes    *types.AddStandaloneHost_TaskResponse `xml:\"urn:vim25 AddStandaloneHost_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddStandaloneHost_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddStandaloneHost_Task(ctx context.Context, r soap.RoundTripper, req *types.AddStandaloneHost_Task) (*types.AddStandaloneHost_TaskResponse, error) {\n\tvar reqBody, resBody AddStandaloneHost_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddVirtualNicBody struct {\n\tReq    *types.AddVirtualNic         `xml:\"urn:vim25 AddVirtualNic,omitempty\"`\n\tRes    *types.AddVirtualNicResponse `xml:\"urn:vim25 AddVirtualNicResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddVirtualNicBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.AddVirtualNic) (*types.AddVirtualNicResponse, error) {\n\tvar reqBody, resBody AddVirtualNicBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AddVirtualSwitchBody struct {\n\tReq    *types.AddVirtualSwitch         `xml:\"urn:vim25 AddVirtualSwitch,omitempty\"`\n\tRes    *types.AddVirtualSwitchResponse `xml:\"urn:vim25 AddVirtualSwitchResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AddVirtualSwitchBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AddVirtualSwitch(ctx context.Context, r soap.RoundTripper, req *types.AddVirtualSwitch) (*types.AddVirtualSwitchResponse, error) {\n\tvar reqBody, resBody AddVirtualSwitchBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AllocateIpv4AddressBody struct {\n\tReq    *types.AllocateIpv4Address         `xml:\"urn:vim25 AllocateIpv4Address,omitempty\"`\n\tRes    *types.AllocateIpv4AddressResponse `xml:\"urn:vim25 AllocateIpv4AddressResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AllocateIpv4AddressBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AllocateIpv4Address(ctx context.Context, r soap.RoundTripper, req *types.AllocateIpv4Address) (*types.AllocateIpv4AddressResponse, error) {\n\tvar reqBody, resBody AllocateIpv4AddressBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AllocateIpv6AddressBody struct {\n\tReq    *types.AllocateIpv6Address         `xml:\"urn:vim25 AllocateIpv6Address,omitempty\"`\n\tRes    *types.AllocateIpv6AddressResponse `xml:\"urn:vim25 AllocateIpv6AddressResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AllocateIpv6AddressBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AllocateIpv6Address(ctx context.Context, r soap.RoundTripper, req *types.AllocateIpv6Address) (*types.AllocateIpv6AddressResponse, error) {\n\tvar reqBody, resBody AllocateIpv6AddressBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AnswerVMBody struct {\n\tReq    *types.AnswerVM         `xml:\"urn:vim25 AnswerVM,omitempty\"`\n\tRes    *types.AnswerVMResponse `xml:\"urn:vim25 AnswerVMResponse,omitempty\"`\n\tFault_ *soap.Fault             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AnswerVMBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AnswerVM(ctx context.Context, r soap.RoundTripper, req *types.AnswerVM) (*types.AnswerVMResponse, error) {\n\tvar reqBody, resBody AnswerVMBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ApplyEntitiesConfig_TaskBody struct {\n\tReq    *types.ApplyEntitiesConfig_Task         `xml:\"urn:vim25 ApplyEntitiesConfig_Task,omitempty\"`\n\tRes    *types.ApplyEntitiesConfig_TaskResponse `xml:\"urn:vim25 ApplyEntitiesConfig_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ApplyEntitiesConfig_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ApplyEntitiesConfig_Task(ctx context.Context, r soap.RoundTripper, req *types.ApplyEntitiesConfig_Task) (*types.ApplyEntitiesConfig_TaskResponse, error) {\n\tvar reqBody, resBody ApplyEntitiesConfig_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ApplyHostConfig_TaskBody struct {\n\tReq    *types.ApplyHostConfig_Task         `xml:\"urn:vim25 ApplyHostConfig_Task,omitempty\"`\n\tRes    *types.ApplyHostConfig_TaskResponse `xml:\"urn:vim25 ApplyHostConfig_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ApplyHostConfig_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ApplyHostConfig_Task(ctx context.Context, r soap.RoundTripper, req *types.ApplyHostConfig_Task) (*types.ApplyHostConfig_TaskResponse, error) {\n\tvar reqBody, resBody ApplyHostConfig_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ApplyRecommendationBody struct {\n\tReq    *types.ApplyRecommendation         `xml:\"urn:vim25 ApplyRecommendation,omitempty\"`\n\tRes    *types.ApplyRecommendationResponse `xml:\"urn:vim25 ApplyRecommendationResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ApplyRecommendationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ApplyRecommendation(ctx context.Context, r soap.RoundTripper, req *types.ApplyRecommendation) (*types.ApplyRecommendationResponse, error) {\n\tvar reqBody, resBody ApplyRecommendationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ApplyStorageDrsRecommendationToPod_TaskBody struct {\n\tReq    *types.ApplyStorageDrsRecommendationToPod_Task         `xml:\"urn:vim25 ApplyStorageDrsRecommendationToPod_Task,omitempty\"`\n\tRes    *types.ApplyStorageDrsRecommendationToPod_TaskResponse `xml:\"urn:vim25 ApplyStorageDrsRecommendationToPod_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ApplyStorageDrsRecommendationToPod_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ApplyStorageDrsRecommendationToPod_Task(ctx context.Context, r soap.RoundTripper, req *types.ApplyStorageDrsRecommendationToPod_Task) (*types.ApplyStorageDrsRecommendationToPod_TaskResponse, error) {\n\tvar reqBody, resBody ApplyStorageDrsRecommendationToPod_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ApplyStorageDrsRecommendation_TaskBody struct {\n\tReq    *types.ApplyStorageDrsRecommendation_Task         `xml:\"urn:vim25 ApplyStorageDrsRecommendation_Task,omitempty\"`\n\tRes    *types.ApplyStorageDrsRecommendation_TaskResponse `xml:\"urn:vim25 ApplyStorageDrsRecommendation_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ApplyStorageDrsRecommendation_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ApplyStorageDrsRecommendation_Task(ctx context.Context, r soap.RoundTripper, req *types.ApplyStorageDrsRecommendation_Task) (*types.ApplyStorageDrsRecommendation_TaskResponse, error) {\n\tvar reqBody, resBody ApplyStorageDrsRecommendation_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AreAlarmActionsEnabledBody struct {\n\tReq    *types.AreAlarmActionsEnabled         `xml:\"urn:vim25 AreAlarmActionsEnabled,omitempty\"`\n\tRes    *types.AreAlarmActionsEnabledResponse `xml:\"urn:vim25 AreAlarmActionsEnabledResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AreAlarmActionsEnabledBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AreAlarmActionsEnabled(ctx context.Context, r soap.RoundTripper, req *types.AreAlarmActionsEnabled) (*types.AreAlarmActionsEnabledResponse, error) {\n\tvar reqBody, resBody AreAlarmActionsEnabledBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AssignUserToGroupBody struct {\n\tReq    *types.AssignUserToGroup         `xml:\"urn:vim25 AssignUserToGroup,omitempty\"`\n\tRes    *types.AssignUserToGroupResponse `xml:\"urn:vim25 AssignUserToGroupResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AssignUserToGroupBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AssignUserToGroup(ctx context.Context, r soap.RoundTripper, req *types.AssignUserToGroup) (*types.AssignUserToGroupResponse, error) {\n\tvar reqBody, resBody AssignUserToGroupBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AssociateProfileBody struct {\n\tReq    *types.AssociateProfile         `xml:\"urn:vim25 AssociateProfile,omitempty\"`\n\tRes    *types.AssociateProfileResponse `xml:\"urn:vim25 AssociateProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AssociateProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AssociateProfile(ctx context.Context, r soap.RoundTripper, req *types.AssociateProfile) (*types.AssociateProfileResponse, error) {\n\tvar reqBody, resBody AssociateProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AttachDisk_TaskBody struct {\n\tReq    *types.AttachDisk_Task         `xml:\"urn:vim25 AttachDisk_Task,omitempty\"`\n\tRes    *types.AttachDisk_TaskResponse `xml:\"urn:vim25 AttachDisk_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AttachDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AttachDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.AttachDisk_Task) (*types.AttachDisk_TaskResponse, error) {\n\tvar reqBody, resBody AttachDisk_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AttachScsiLunBody struct {\n\tReq    *types.AttachScsiLun         `xml:\"urn:vim25 AttachScsiLun,omitempty\"`\n\tRes    *types.AttachScsiLunResponse `xml:\"urn:vim25 AttachScsiLunResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AttachScsiLunBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AttachScsiLun(ctx context.Context, r soap.RoundTripper, req *types.AttachScsiLun) (*types.AttachScsiLunResponse, error) {\n\tvar reqBody, resBody AttachScsiLunBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AttachScsiLunEx_TaskBody struct {\n\tReq    *types.AttachScsiLunEx_Task         `xml:\"urn:vim25 AttachScsiLunEx_Task,omitempty\"`\n\tRes    *types.AttachScsiLunEx_TaskResponse `xml:\"urn:vim25 AttachScsiLunEx_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AttachScsiLunEx_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AttachScsiLunEx_Task(ctx context.Context, r soap.RoundTripper, req *types.AttachScsiLunEx_Task) (*types.AttachScsiLunEx_TaskResponse, error) {\n\tvar reqBody, resBody AttachScsiLunEx_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AttachTagToVStorageObjectBody struct {\n\tReq    *types.AttachTagToVStorageObject         `xml:\"urn:vim25 AttachTagToVStorageObject,omitempty\"`\n\tRes    *types.AttachTagToVStorageObjectResponse `xml:\"urn:vim25 AttachTagToVStorageObjectResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AttachTagToVStorageObjectBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AttachTagToVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.AttachTagToVStorageObject) (*types.AttachTagToVStorageObjectResponse, error) {\n\tvar reqBody, resBody AttachTagToVStorageObjectBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AttachVmfsExtentBody struct {\n\tReq    *types.AttachVmfsExtent         `xml:\"urn:vim25 AttachVmfsExtent,omitempty\"`\n\tRes    *types.AttachVmfsExtentResponse `xml:\"urn:vim25 AttachVmfsExtentResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AttachVmfsExtentBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AttachVmfsExtent(ctx context.Context, r soap.RoundTripper, req *types.AttachVmfsExtent) (*types.AttachVmfsExtentResponse, error) {\n\tvar reqBody, resBody AttachVmfsExtentBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AutoStartPowerOffBody struct {\n\tReq    *types.AutoStartPowerOff         `xml:\"urn:vim25 AutoStartPowerOff,omitempty\"`\n\tRes    *types.AutoStartPowerOffResponse `xml:\"urn:vim25 AutoStartPowerOffResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AutoStartPowerOffBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AutoStartPowerOff(ctx context.Context, r soap.RoundTripper, req *types.AutoStartPowerOff) (*types.AutoStartPowerOffResponse, error) {\n\tvar reqBody, resBody AutoStartPowerOffBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype AutoStartPowerOnBody struct {\n\tReq    *types.AutoStartPowerOn         `xml:\"urn:vim25 AutoStartPowerOn,omitempty\"`\n\tRes    *types.AutoStartPowerOnResponse `xml:\"urn:vim25 AutoStartPowerOnResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *AutoStartPowerOnBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc AutoStartPowerOn(ctx context.Context, r soap.RoundTripper, req *types.AutoStartPowerOn) (*types.AutoStartPowerOnResponse, error) {\n\tvar reqBody, resBody AutoStartPowerOnBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype BackupFirmwareConfigurationBody struct {\n\tReq    *types.BackupFirmwareConfiguration         `xml:\"urn:vim25 BackupFirmwareConfiguration,omitempty\"`\n\tRes    *types.BackupFirmwareConfigurationResponse `xml:\"urn:vim25 BackupFirmwareConfigurationResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *BackupFirmwareConfigurationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc BackupFirmwareConfiguration(ctx context.Context, r soap.RoundTripper, req *types.BackupFirmwareConfiguration) (*types.BackupFirmwareConfigurationResponse, error) {\n\tvar reqBody, resBody BackupFirmwareConfigurationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype BindVnicBody struct {\n\tReq    *types.BindVnic         `xml:\"urn:vim25 BindVnic,omitempty\"`\n\tRes    *types.BindVnicResponse `xml:\"urn:vim25 BindVnicResponse,omitempty\"`\n\tFault_ *soap.Fault             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *BindVnicBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc BindVnic(ctx context.Context, r soap.RoundTripper, req *types.BindVnic) (*types.BindVnicResponse, error) {\n\tvar reqBody, resBody BindVnicBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype BrowseDiagnosticLogBody struct {\n\tReq    *types.BrowseDiagnosticLog         `xml:\"urn:vim25 BrowseDiagnosticLog,omitempty\"`\n\tRes    *types.BrowseDiagnosticLogResponse `xml:\"urn:vim25 BrowseDiagnosticLogResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *BrowseDiagnosticLogBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc BrowseDiagnosticLog(ctx context.Context, r soap.RoundTripper, req *types.BrowseDiagnosticLog) (*types.BrowseDiagnosticLogResponse, error) {\n\tvar reqBody, resBody BrowseDiagnosticLogBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CanProvisionObjectsBody struct {\n\tReq    *types.CanProvisionObjects         `xml:\"urn:vim25 CanProvisionObjects,omitempty\"`\n\tRes    *types.CanProvisionObjectsResponse `xml:\"urn:vim25 CanProvisionObjectsResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CanProvisionObjectsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CanProvisionObjects(ctx context.Context, r soap.RoundTripper, req *types.CanProvisionObjects) (*types.CanProvisionObjectsResponse, error) {\n\tvar reqBody, resBody CanProvisionObjectsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CancelRecommendationBody struct {\n\tReq    *types.CancelRecommendation         `xml:\"urn:vim25 CancelRecommendation,omitempty\"`\n\tRes    *types.CancelRecommendationResponse `xml:\"urn:vim25 CancelRecommendationResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CancelRecommendationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CancelRecommendation(ctx context.Context, r soap.RoundTripper, req *types.CancelRecommendation) (*types.CancelRecommendationResponse, error) {\n\tvar reqBody, resBody CancelRecommendationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CancelRetrievePropertiesExBody struct {\n\tReq    *types.CancelRetrievePropertiesEx         `xml:\"urn:vim25 CancelRetrievePropertiesEx,omitempty\"`\n\tRes    *types.CancelRetrievePropertiesExResponse `xml:\"urn:vim25 CancelRetrievePropertiesExResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CancelRetrievePropertiesExBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CancelRetrievePropertiesEx(ctx context.Context, r soap.RoundTripper, req *types.CancelRetrievePropertiesEx) (*types.CancelRetrievePropertiesExResponse, error) {\n\tvar reqBody, resBody CancelRetrievePropertiesExBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CancelStorageDrsRecommendationBody struct {\n\tReq    *types.CancelStorageDrsRecommendation         `xml:\"urn:vim25 CancelStorageDrsRecommendation,omitempty\"`\n\tRes    *types.CancelStorageDrsRecommendationResponse `xml:\"urn:vim25 CancelStorageDrsRecommendationResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CancelStorageDrsRecommendationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CancelStorageDrsRecommendation(ctx context.Context, r soap.RoundTripper, req *types.CancelStorageDrsRecommendation) (*types.CancelStorageDrsRecommendationResponse, error) {\n\tvar reqBody, resBody CancelStorageDrsRecommendationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CancelTaskBody struct {\n\tReq    *types.CancelTask         `xml:\"urn:vim25 CancelTask,omitempty\"`\n\tRes    *types.CancelTaskResponse `xml:\"urn:vim25 CancelTaskResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CancelTaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CancelTask(ctx context.Context, r soap.RoundTripper, req *types.CancelTask) (*types.CancelTaskResponse, error) {\n\tvar reqBody, resBody CancelTaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CancelWaitForUpdatesBody struct {\n\tReq    *types.CancelWaitForUpdates         `xml:\"urn:vim25 CancelWaitForUpdates,omitempty\"`\n\tRes    *types.CancelWaitForUpdatesResponse `xml:\"urn:vim25 CancelWaitForUpdatesResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CancelWaitForUpdatesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CancelWaitForUpdates(ctx context.Context, r soap.RoundTripper, req *types.CancelWaitForUpdates) (*types.CancelWaitForUpdatesResponse, error) {\n\tvar reqBody, resBody CancelWaitForUpdatesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CertMgrRefreshCACertificatesAndCRLs_TaskBody struct {\n\tReq    *types.CertMgrRefreshCACertificatesAndCRLs_Task         `xml:\"urn:vim25 CertMgrRefreshCACertificatesAndCRLs_Task,omitempty\"`\n\tRes    *types.CertMgrRefreshCACertificatesAndCRLs_TaskResponse `xml:\"urn:vim25 CertMgrRefreshCACertificatesAndCRLs_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CertMgrRefreshCACertificatesAndCRLs_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CertMgrRefreshCACertificatesAndCRLs_Task(ctx context.Context, r soap.RoundTripper, req *types.CertMgrRefreshCACertificatesAndCRLs_Task) (*types.CertMgrRefreshCACertificatesAndCRLs_TaskResponse, error) {\n\tvar reqBody, resBody CertMgrRefreshCACertificatesAndCRLs_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CertMgrRefreshCertificates_TaskBody struct {\n\tReq    *types.CertMgrRefreshCertificates_Task         `xml:\"urn:vim25 CertMgrRefreshCertificates_Task,omitempty\"`\n\tRes    *types.CertMgrRefreshCertificates_TaskResponse `xml:\"urn:vim25 CertMgrRefreshCertificates_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CertMgrRefreshCertificates_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CertMgrRefreshCertificates_Task(ctx context.Context, r soap.RoundTripper, req *types.CertMgrRefreshCertificates_Task) (*types.CertMgrRefreshCertificates_TaskResponse, error) {\n\tvar reqBody, resBody CertMgrRefreshCertificates_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CertMgrRevokeCertificates_TaskBody struct {\n\tReq    *types.CertMgrRevokeCertificates_Task         `xml:\"urn:vim25 CertMgrRevokeCertificates_Task,omitempty\"`\n\tRes    *types.CertMgrRevokeCertificates_TaskResponse `xml:\"urn:vim25 CertMgrRevokeCertificates_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CertMgrRevokeCertificates_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CertMgrRevokeCertificates_Task(ctx context.Context, r soap.RoundTripper, req *types.CertMgrRevokeCertificates_Task) (*types.CertMgrRevokeCertificates_TaskResponse, error) {\n\tvar reqBody, resBody CertMgrRevokeCertificates_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ChangeAccessModeBody struct {\n\tReq    *types.ChangeAccessMode         `xml:\"urn:vim25 ChangeAccessMode,omitempty\"`\n\tRes    *types.ChangeAccessModeResponse `xml:\"urn:vim25 ChangeAccessModeResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ChangeAccessModeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ChangeAccessMode(ctx context.Context, r soap.RoundTripper, req *types.ChangeAccessMode) (*types.ChangeAccessModeResponse, error) {\n\tvar reqBody, resBody ChangeAccessModeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ChangeFileAttributesInGuestBody struct {\n\tReq    *types.ChangeFileAttributesInGuest         `xml:\"urn:vim25 ChangeFileAttributesInGuest,omitempty\"`\n\tRes    *types.ChangeFileAttributesInGuestResponse `xml:\"urn:vim25 ChangeFileAttributesInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ChangeFileAttributesInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ChangeFileAttributesInGuest(ctx context.Context, r soap.RoundTripper, req *types.ChangeFileAttributesInGuest) (*types.ChangeFileAttributesInGuestResponse, error) {\n\tvar reqBody, resBody ChangeFileAttributesInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ChangeLockdownModeBody struct {\n\tReq    *types.ChangeLockdownMode         `xml:\"urn:vim25 ChangeLockdownMode,omitempty\"`\n\tRes    *types.ChangeLockdownModeResponse `xml:\"urn:vim25 ChangeLockdownModeResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ChangeLockdownModeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ChangeLockdownMode(ctx context.Context, r soap.RoundTripper, req *types.ChangeLockdownMode) (*types.ChangeLockdownModeResponse, error) {\n\tvar reqBody, resBody ChangeLockdownModeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ChangeNFSUserPasswordBody struct {\n\tReq    *types.ChangeNFSUserPassword         `xml:\"urn:vim25 ChangeNFSUserPassword,omitempty\"`\n\tRes    *types.ChangeNFSUserPasswordResponse `xml:\"urn:vim25 ChangeNFSUserPasswordResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ChangeNFSUserPasswordBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ChangeNFSUserPassword(ctx context.Context, r soap.RoundTripper, req *types.ChangeNFSUserPassword) (*types.ChangeNFSUserPasswordResponse, error) {\n\tvar reqBody, resBody ChangeNFSUserPasswordBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ChangeOwnerBody struct {\n\tReq    *types.ChangeOwner         `xml:\"urn:vim25 ChangeOwner,omitempty\"`\n\tRes    *types.ChangeOwnerResponse `xml:\"urn:vim25 ChangeOwnerResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ChangeOwnerBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ChangeOwner(ctx context.Context, r soap.RoundTripper, req *types.ChangeOwner) (*types.ChangeOwnerResponse, error) {\n\tvar reqBody, resBody ChangeOwnerBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CheckAddHostEvc_TaskBody struct {\n\tReq    *types.CheckAddHostEvc_Task         `xml:\"urn:vim25 CheckAddHostEvc_Task,omitempty\"`\n\tRes    *types.CheckAddHostEvc_TaskResponse `xml:\"urn:vim25 CheckAddHostEvc_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CheckAddHostEvc_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CheckAddHostEvc_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckAddHostEvc_Task) (*types.CheckAddHostEvc_TaskResponse, error) {\n\tvar reqBody, resBody CheckAddHostEvc_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CheckAnswerFileStatus_TaskBody struct {\n\tReq    *types.CheckAnswerFileStatus_Task         `xml:\"urn:vim25 CheckAnswerFileStatus_Task,omitempty\"`\n\tRes    *types.CheckAnswerFileStatus_TaskResponse `xml:\"urn:vim25 CheckAnswerFileStatus_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CheckAnswerFileStatus_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CheckAnswerFileStatus_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckAnswerFileStatus_Task) (*types.CheckAnswerFileStatus_TaskResponse, error) {\n\tvar reqBody, resBody CheckAnswerFileStatus_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CheckCompatibility_TaskBody struct {\n\tReq    *types.CheckCompatibility_Task         `xml:\"urn:vim25 CheckCompatibility_Task,omitempty\"`\n\tRes    *types.CheckCompatibility_TaskResponse `xml:\"urn:vim25 CheckCompatibility_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CheckCompatibility_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CheckCompatibility_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckCompatibility_Task) (*types.CheckCompatibility_TaskResponse, error) {\n\tvar reqBody, resBody CheckCompatibility_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CheckCompliance_TaskBody struct {\n\tReq    *types.CheckCompliance_Task         `xml:\"urn:vim25 CheckCompliance_Task,omitempty\"`\n\tRes    *types.CheckCompliance_TaskResponse `xml:\"urn:vim25 CheckCompliance_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CheckCompliance_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CheckCompliance_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckCompliance_Task) (*types.CheckCompliance_TaskResponse, error) {\n\tvar reqBody, resBody CheckCompliance_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CheckConfigureEvcMode_TaskBody struct {\n\tReq    *types.CheckConfigureEvcMode_Task         `xml:\"urn:vim25 CheckConfigureEvcMode_Task,omitempty\"`\n\tRes    *types.CheckConfigureEvcMode_TaskResponse `xml:\"urn:vim25 CheckConfigureEvcMode_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CheckConfigureEvcMode_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CheckConfigureEvcMode_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckConfigureEvcMode_Task) (*types.CheckConfigureEvcMode_TaskResponse, error) {\n\tvar reqBody, resBody CheckConfigureEvcMode_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CheckCustomizationResourcesBody struct {\n\tReq    *types.CheckCustomizationResources         `xml:\"urn:vim25 CheckCustomizationResources,omitempty\"`\n\tRes    *types.CheckCustomizationResourcesResponse `xml:\"urn:vim25 CheckCustomizationResourcesResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CheckCustomizationResourcesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CheckCustomizationResources(ctx context.Context, r soap.RoundTripper, req *types.CheckCustomizationResources) (*types.CheckCustomizationResourcesResponse, error) {\n\tvar reqBody, resBody CheckCustomizationResourcesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CheckCustomizationSpecBody struct {\n\tReq    *types.CheckCustomizationSpec         `xml:\"urn:vim25 CheckCustomizationSpec,omitempty\"`\n\tRes    *types.CheckCustomizationSpecResponse `xml:\"urn:vim25 CheckCustomizationSpecResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CheckCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CheckCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.CheckCustomizationSpec) (*types.CheckCustomizationSpecResponse, error) {\n\tvar reqBody, resBody CheckCustomizationSpecBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CheckForUpdatesBody struct {\n\tReq    *types.CheckForUpdates         `xml:\"urn:vim25 CheckForUpdates,omitempty\"`\n\tRes    *types.CheckForUpdatesResponse `xml:\"urn:vim25 CheckForUpdatesResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CheckForUpdatesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CheckForUpdates(ctx context.Context, r soap.RoundTripper, req *types.CheckForUpdates) (*types.CheckForUpdatesResponse, error) {\n\tvar reqBody, resBody CheckForUpdatesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CheckHostPatch_TaskBody struct {\n\tReq    *types.CheckHostPatch_Task         `xml:\"urn:vim25 CheckHostPatch_Task,omitempty\"`\n\tRes    *types.CheckHostPatch_TaskResponse `xml:\"urn:vim25 CheckHostPatch_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CheckHostPatch_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CheckHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckHostPatch_Task) (*types.CheckHostPatch_TaskResponse, error) {\n\tvar reqBody, resBody CheckHostPatch_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CheckLicenseFeatureBody struct {\n\tReq    *types.CheckLicenseFeature         `xml:\"urn:vim25 CheckLicenseFeature,omitempty\"`\n\tRes    *types.CheckLicenseFeatureResponse `xml:\"urn:vim25 CheckLicenseFeatureResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CheckLicenseFeatureBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CheckLicenseFeature(ctx context.Context, r soap.RoundTripper, req *types.CheckLicenseFeature) (*types.CheckLicenseFeatureResponse, error) {\n\tvar reqBody, resBody CheckLicenseFeatureBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CheckMigrate_TaskBody struct {\n\tReq    *types.CheckMigrate_Task         `xml:\"urn:vim25 CheckMigrate_Task,omitempty\"`\n\tRes    *types.CheckMigrate_TaskResponse `xml:\"urn:vim25 CheckMigrate_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CheckMigrate_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CheckMigrate_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckMigrate_Task) (*types.CheckMigrate_TaskResponse, error) {\n\tvar reqBody, resBody CheckMigrate_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CheckProfileCompliance_TaskBody struct {\n\tReq    *types.CheckProfileCompliance_Task         `xml:\"urn:vim25 CheckProfileCompliance_Task,omitempty\"`\n\tRes    *types.CheckProfileCompliance_TaskResponse `xml:\"urn:vim25 CheckProfileCompliance_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CheckProfileCompliance_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CheckProfileCompliance_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckProfileCompliance_Task) (*types.CheckProfileCompliance_TaskResponse, error) {\n\tvar reqBody, resBody CheckProfileCompliance_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CheckRelocate_TaskBody struct {\n\tReq    *types.CheckRelocate_Task         `xml:\"urn:vim25 CheckRelocate_Task,omitempty\"`\n\tRes    *types.CheckRelocate_TaskResponse `xml:\"urn:vim25 CheckRelocate_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CheckRelocate_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CheckRelocate_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckRelocate_Task) (*types.CheckRelocate_TaskResponse, error) {\n\tvar reqBody, resBody CheckRelocate_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ClearComplianceStatusBody struct {\n\tReq    *types.ClearComplianceStatus         `xml:\"urn:vim25 ClearComplianceStatus,omitempty\"`\n\tRes    *types.ClearComplianceStatusResponse `xml:\"urn:vim25 ClearComplianceStatusResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ClearComplianceStatusBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ClearComplianceStatus(ctx context.Context, r soap.RoundTripper, req *types.ClearComplianceStatus) (*types.ClearComplianceStatusResponse, error) {\n\tvar reqBody, resBody ClearComplianceStatusBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ClearNFSUserBody struct {\n\tReq    *types.ClearNFSUser         `xml:\"urn:vim25 ClearNFSUser,omitempty\"`\n\tRes    *types.ClearNFSUserResponse `xml:\"urn:vim25 ClearNFSUserResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ClearNFSUserBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ClearNFSUser(ctx context.Context, r soap.RoundTripper, req *types.ClearNFSUser) (*types.ClearNFSUserResponse, error) {\n\tvar reqBody, resBody ClearNFSUserBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ClearSystemEventLogBody struct {\n\tReq    *types.ClearSystemEventLog         `xml:\"urn:vim25 ClearSystemEventLog,omitempty\"`\n\tRes    *types.ClearSystemEventLogResponse `xml:\"urn:vim25 ClearSystemEventLogResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ClearSystemEventLogBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ClearSystemEventLog(ctx context.Context, r soap.RoundTripper, req *types.ClearSystemEventLog) (*types.ClearSystemEventLogResponse, error) {\n\tvar reqBody, resBody ClearSystemEventLogBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CloneSessionBody struct {\n\tReq    *types.CloneSession         `xml:\"urn:vim25 CloneSession,omitempty\"`\n\tRes    *types.CloneSessionResponse `xml:\"urn:vim25 CloneSessionResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CloneSessionBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CloneSession(ctx context.Context, r soap.RoundTripper, req *types.CloneSession) (*types.CloneSessionResponse, error) {\n\tvar reqBody, resBody CloneSessionBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CloneVApp_TaskBody struct {\n\tReq    *types.CloneVApp_Task         `xml:\"urn:vim25 CloneVApp_Task,omitempty\"`\n\tRes    *types.CloneVApp_TaskResponse `xml:\"urn:vim25 CloneVApp_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CloneVApp_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CloneVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.CloneVApp_Task) (*types.CloneVApp_TaskResponse, error) {\n\tvar reqBody, resBody CloneVApp_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CloneVM_TaskBody struct {\n\tReq    *types.CloneVM_Task         `xml:\"urn:vim25 CloneVM_Task,omitempty\"`\n\tRes    *types.CloneVM_TaskResponse `xml:\"urn:vim25 CloneVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CloneVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CloneVM_Task(ctx context.Context, r soap.RoundTripper, req *types.CloneVM_Task) (*types.CloneVM_TaskResponse, error) {\n\tvar reqBody, resBody CloneVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CloneVStorageObject_TaskBody struct {\n\tReq    *types.CloneVStorageObject_Task         `xml:\"urn:vim25 CloneVStorageObject_Task,omitempty\"`\n\tRes    *types.CloneVStorageObject_TaskResponse `xml:\"urn:vim25 CloneVStorageObject_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CloneVStorageObject_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CloneVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req *types.CloneVStorageObject_Task) (*types.CloneVStorageObject_TaskResponse, error) {\n\tvar reqBody, resBody CloneVStorageObject_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CloseInventoryViewFolderBody struct {\n\tReq    *types.CloseInventoryViewFolder         `xml:\"urn:vim25 CloseInventoryViewFolder,omitempty\"`\n\tRes    *types.CloseInventoryViewFolderResponse `xml:\"urn:vim25 CloseInventoryViewFolderResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CloseInventoryViewFolderBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CloseInventoryViewFolder(ctx context.Context, r soap.RoundTripper, req *types.CloseInventoryViewFolder) (*types.CloseInventoryViewFolderResponse, error) {\n\tvar reqBody, resBody CloseInventoryViewFolderBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ClusterEnterMaintenanceModeBody struct {\n\tReq    *types.ClusterEnterMaintenanceMode         `xml:\"urn:vim25 ClusterEnterMaintenanceMode,omitempty\"`\n\tRes    *types.ClusterEnterMaintenanceModeResponse `xml:\"urn:vim25 ClusterEnterMaintenanceModeResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ClusterEnterMaintenanceModeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ClusterEnterMaintenanceMode(ctx context.Context, r soap.RoundTripper, req *types.ClusterEnterMaintenanceMode) (*types.ClusterEnterMaintenanceModeResponse, error) {\n\tvar reqBody, resBody ClusterEnterMaintenanceModeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ComputeDiskPartitionInfoBody struct {\n\tReq    *types.ComputeDiskPartitionInfo         `xml:\"urn:vim25 ComputeDiskPartitionInfo,omitempty\"`\n\tRes    *types.ComputeDiskPartitionInfoResponse `xml:\"urn:vim25 ComputeDiskPartitionInfoResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ComputeDiskPartitionInfoBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ComputeDiskPartitionInfo(ctx context.Context, r soap.RoundTripper, req *types.ComputeDiskPartitionInfo) (*types.ComputeDiskPartitionInfoResponse, error) {\n\tvar reqBody, resBody ComputeDiskPartitionInfoBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ComputeDiskPartitionInfoForResizeBody struct {\n\tReq    *types.ComputeDiskPartitionInfoForResize         `xml:\"urn:vim25 ComputeDiskPartitionInfoForResize,omitempty\"`\n\tRes    *types.ComputeDiskPartitionInfoForResizeResponse `xml:\"urn:vim25 ComputeDiskPartitionInfoForResizeResponse,omitempty\"`\n\tFault_ *soap.Fault                                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ComputeDiskPartitionInfoForResizeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ComputeDiskPartitionInfoForResize(ctx context.Context, r soap.RoundTripper, req *types.ComputeDiskPartitionInfoForResize) (*types.ComputeDiskPartitionInfoForResizeResponse, error) {\n\tvar reqBody, resBody ComputeDiskPartitionInfoForResizeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ConfigureCryptoKeyBody struct {\n\tReq    *types.ConfigureCryptoKey         `xml:\"urn:vim25 ConfigureCryptoKey,omitempty\"`\n\tRes    *types.ConfigureCryptoKeyResponse `xml:\"urn:vim25 ConfigureCryptoKeyResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ConfigureCryptoKeyBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ConfigureCryptoKey(ctx context.Context, r soap.RoundTripper, req *types.ConfigureCryptoKey) (*types.ConfigureCryptoKeyResponse, error) {\n\tvar reqBody, resBody ConfigureCryptoKeyBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ConfigureDatastoreIORM_TaskBody struct {\n\tReq    *types.ConfigureDatastoreIORM_Task         `xml:\"urn:vim25 ConfigureDatastoreIORM_Task,omitempty\"`\n\tRes    *types.ConfigureDatastoreIORM_TaskResponse `xml:\"urn:vim25 ConfigureDatastoreIORM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ConfigureDatastoreIORM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ConfigureDatastoreIORM_Task(ctx context.Context, r soap.RoundTripper, req *types.ConfigureDatastoreIORM_Task) (*types.ConfigureDatastoreIORM_TaskResponse, error) {\n\tvar reqBody, resBody ConfigureDatastoreIORM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ConfigureDatastorePrincipalBody struct {\n\tReq    *types.ConfigureDatastorePrincipal         `xml:\"urn:vim25 ConfigureDatastorePrincipal,omitempty\"`\n\tRes    *types.ConfigureDatastorePrincipalResponse `xml:\"urn:vim25 ConfigureDatastorePrincipalResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ConfigureDatastorePrincipalBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ConfigureDatastorePrincipal(ctx context.Context, r soap.RoundTripper, req *types.ConfigureDatastorePrincipal) (*types.ConfigureDatastorePrincipalResponse, error) {\n\tvar reqBody, resBody ConfigureDatastorePrincipalBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ConfigureEvcMode_TaskBody struct {\n\tReq    *types.ConfigureEvcMode_Task         `xml:\"urn:vim25 ConfigureEvcMode_Task,omitempty\"`\n\tRes    *types.ConfigureEvcMode_TaskResponse `xml:\"urn:vim25 ConfigureEvcMode_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ConfigureEvcMode_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ConfigureEvcMode_Task(ctx context.Context, r soap.RoundTripper, req *types.ConfigureEvcMode_Task) (*types.ConfigureEvcMode_TaskResponse, error) {\n\tvar reqBody, resBody ConfigureEvcMode_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ConfigureHostCache_TaskBody struct {\n\tReq    *types.ConfigureHostCache_Task         `xml:\"urn:vim25 ConfigureHostCache_Task,omitempty\"`\n\tRes    *types.ConfigureHostCache_TaskResponse `xml:\"urn:vim25 ConfigureHostCache_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ConfigureHostCache_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ConfigureHostCache_Task(ctx context.Context, r soap.RoundTripper, req *types.ConfigureHostCache_Task) (*types.ConfigureHostCache_TaskResponse, error) {\n\tvar reqBody, resBody ConfigureHostCache_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ConfigureLicenseSourceBody struct {\n\tReq    *types.ConfigureLicenseSource         `xml:\"urn:vim25 ConfigureLicenseSource,omitempty\"`\n\tRes    *types.ConfigureLicenseSourceResponse `xml:\"urn:vim25 ConfigureLicenseSourceResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ConfigureLicenseSourceBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ConfigureLicenseSource(ctx context.Context, r soap.RoundTripper, req *types.ConfigureLicenseSource) (*types.ConfigureLicenseSourceResponse, error) {\n\tvar reqBody, resBody ConfigureLicenseSourceBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ConfigurePowerPolicyBody struct {\n\tReq    *types.ConfigurePowerPolicy         `xml:\"urn:vim25 ConfigurePowerPolicy,omitempty\"`\n\tRes    *types.ConfigurePowerPolicyResponse `xml:\"urn:vim25 ConfigurePowerPolicyResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ConfigurePowerPolicyBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ConfigurePowerPolicy(ctx context.Context, r soap.RoundTripper, req *types.ConfigurePowerPolicy) (*types.ConfigurePowerPolicyResponse, error) {\n\tvar reqBody, resBody ConfigurePowerPolicyBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ConfigureStorageDrsForPod_TaskBody struct {\n\tReq    *types.ConfigureStorageDrsForPod_Task         `xml:\"urn:vim25 ConfigureStorageDrsForPod_Task,omitempty\"`\n\tRes    *types.ConfigureStorageDrsForPod_TaskResponse `xml:\"urn:vim25 ConfigureStorageDrsForPod_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ConfigureStorageDrsForPod_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ConfigureStorageDrsForPod_Task(ctx context.Context, r soap.RoundTripper, req *types.ConfigureStorageDrsForPod_Task) (*types.ConfigureStorageDrsForPod_TaskResponse, error) {\n\tvar reqBody, resBody ConfigureStorageDrsForPod_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ConfigureVFlashResourceEx_TaskBody struct {\n\tReq    *types.ConfigureVFlashResourceEx_Task         `xml:\"urn:vim25 ConfigureVFlashResourceEx_Task,omitempty\"`\n\tRes    *types.ConfigureVFlashResourceEx_TaskResponse `xml:\"urn:vim25 ConfigureVFlashResourceEx_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ConfigureVFlashResourceEx_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ConfigureVFlashResourceEx_Task(ctx context.Context, r soap.RoundTripper, req *types.ConfigureVFlashResourceEx_Task) (*types.ConfigureVFlashResourceEx_TaskResponse, error) {\n\tvar reqBody, resBody ConfigureVFlashResourceEx_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ConsolidateVMDisks_TaskBody struct {\n\tReq    *types.ConsolidateVMDisks_Task         `xml:\"urn:vim25 ConsolidateVMDisks_Task,omitempty\"`\n\tRes    *types.ConsolidateVMDisks_TaskResponse `xml:\"urn:vim25 ConsolidateVMDisks_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ConsolidateVMDisks_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ConsolidateVMDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.ConsolidateVMDisks_Task) (*types.ConsolidateVMDisks_TaskResponse, error) {\n\tvar reqBody, resBody ConsolidateVMDisks_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ContinueRetrievePropertiesExBody struct {\n\tReq    *types.ContinueRetrievePropertiesEx         `xml:\"urn:vim25 ContinueRetrievePropertiesEx,omitempty\"`\n\tRes    *types.ContinueRetrievePropertiesExResponse `xml:\"urn:vim25 ContinueRetrievePropertiesExResponse,omitempty\"`\n\tFault_ *soap.Fault                                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ContinueRetrievePropertiesExBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ContinueRetrievePropertiesEx(ctx context.Context, r soap.RoundTripper, req *types.ContinueRetrievePropertiesEx) (*types.ContinueRetrievePropertiesExResponse, error) {\n\tvar reqBody, resBody ContinueRetrievePropertiesExBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ConvertNamespacePathToUuidPathBody struct {\n\tReq    *types.ConvertNamespacePathToUuidPath         `xml:\"urn:vim25 ConvertNamespacePathToUuidPath,omitempty\"`\n\tRes    *types.ConvertNamespacePathToUuidPathResponse `xml:\"urn:vim25 ConvertNamespacePathToUuidPathResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ConvertNamespacePathToUuidPathBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ConvertNamespacePathToUuidPath(ctx context.Context, r soap.RoundTripper, req *types.ConvertNamespacePathToUuidPath) (*types.ConvertNamespacePathToUuidPathResponse, error) {\n\tvar reqBody, resBody ConvertNamespacePathToUuidPathBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CopyDatastoreFile_TaskBody struct {\n\tReq    *types.CopyDatastoreFile_Task         `xml:\"urn:vim25 CopyDatastoreFile_Task,omitempty\"`\n\tRes    *types.CopyDatastoreFile_TaskResponse `xml:\"urn:vim25 CopyDatastoreFile_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CopyDatastoreFile_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CopyDatastoreFile_Task(ctx context.Context, r soap.RoundTripper, req *types.CopyDatastoreFile_Task) (*types.CopyDatastoreFile_TaskResponse, error) {\n\tvar reqBody, resBody CopyDatastoreFile_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CopyVirtualDisk_TaskBody struct {\n\tReq    *types.CopyVirtualDisk_Task         `xml:\"urn:vim25 CopyVirtualDisk_Task,omitempty\"`\n\tRes    *types.CopyVirtualDisk_TaskResponse `xml:\"urn:vim25 CopyVirtualDisk_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CopyVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CopyVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.CopyVirtualDisk_Task) (*types.CopyVirtualDisk_TaskResponse, error) {\n\tvar reqBody, resBody CopyVirtualDisk_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateAlarmBody struct {\n\tReq    *types.CreateAlarm         `xml:\"urn:vim25 CreateAlarm,omitempty\"`\n\tRes    *types.CreateAlarmResponse `xml:\"urn:vim25 CreateAlarmResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateAlarmBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateAlarm(ctx context.Context, r soap.RoundTripper, req *types.CreateAlarm) (*types.CreateAlarmResponse, error) {\n\tvar reqBody, resBody CreateAlarmBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateChildVM_TaskBody struct {\n\tReq    *types.CreateChildVM_Task         `xml:\"urn:vim25 CreateChildVM_Task,omitempty\"`\n\tRes    *types.CreateChildVM_TaskResponse `xml:\"urn:vim25 CreateChildVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateChildVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateChildVM_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateChildVM_Task) (*types.CreateChildVM_TaskResponse, error) {\n\tvar reqBody, resBody CreateChildVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateClusterBody struct {\n\tReq    *types.CreateCluster         `xml:\"urn:vim25 CreateCluster,omitempty\"`\n\tRes    *types.CreateClusterResponse `xml:\"urn:vim25 CreateClusterResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateClusterBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateCluster(ctx context.Context, r soap.RoundTripper, req *types.CreateCluster) (*types.CreateClusterResponse, error) {\n\tvar reqBody, resBody CreateClusterBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateClusterExBody struct {\n\tReq    *types.CreateClusterEx         `xml:\"urn:vim25 CreateClusterEx,omitempty\"`\n\tRes    *types.CreateClusterExResponse `xml:\"urn:vim25 CreateClusterExResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateClusterExBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateClusterEx(ctx context.Context, r soap.RoundTripper, req *types.CreateClusterEx) (*types.CreateClusterExResponse, error) {\n\tvar reqBody, resBody CreateClusterExBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateCollectorForEventsBody struct {\n\tReq    *types.CreateCollectorForEvents         `xml:\"urn:vim25 CreateCollectorForEvents,omitempty\"`\n\tRes    *types.CreateCollectorForEventsResponse `xml:\"urn:vim25 CreateCollectorForEventsResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateCollectorForEventsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateCollectorForEvents(ctx context.Context, r soap.RoundTripper, req *types.CreateCollectorForEvents) (*types.CreateCollectorForEventsResponse, error) {\n\tvar reqBody, resBody CreateCollectorForEventsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateCollectorForTasksBody struct {\n\tReq    *types.CreateCollectorForTasks         `xml:\"urn:vim25 CreateCollectorForTasks,omitempty\"`\n\tRes    *types.CreateCollectorForTasksResponse `xml:\"urn:vim25 CreateCollectorForTasksResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateCollectorForTasksBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateCollectorForTasks(ctx context.Context, r soap.RoundTripper, req *types.CreateCollectorForTasks) (*types.CreateCollectorForTasksResponse, error) {\n\tvar reqBody, resBody CreateCollectorForTasksBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateContainerViewBody struct {\n\tReq    *types.CreateContainerView         `xml:\"urn:vim25 CreateContainerView,omitempty\"`\n\tRes    *types.CreateContainerViewResponse `xml:\"urn:vim25 CreateContainerViewResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateContainerViewBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateContainerView(ctx context.Context, r soap.RoundTripper, req *types.CreateContainerView) (*types.CreateContainerViewResponse, error) {\n\tvar reqBody, resBody CreateContainerViewBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateCustomizationSpecBody struct {\n\tReq    *types.CreateCustomizationSpec         `xml:\"urn:vim25 CreateCustomizationSpec,omitempty\"`\n\tRes    *types.CreateCustomizationSpecResponse `xml:\"urn:vim25 CreateCustomizationSpecResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.CreateCustomizationSpec) (*types.CreateCustomizationSpecResponse, error) {\n\tvar reqBody, resBody CreateCustomizationSpecBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateDVPortgroup_TaskBody struct {\n\tReq    *types.CreateDVPortgroup_Task         `xml:\"urn:vim25 CreateDVPortgroup_Task,omitempty\"`\n\tRes    *types.CreateDVPortgroup_TaskResponse `xml:\"urn:vim25 CreateDVPortgroup_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateDVPortgroup_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateDVPortgroup_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateDVPortgroup_Task) (*types.CreateDVPortgroup_TaskResponse, error) {\n\tvar reqBody, resBody CreateDVPortgroup_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateDVS_TaskBody struct {\n\tReq    *types.CreateDVS_Task         `xml:\"urn:vim25 CreateDVS_Task,omitempty\"`\n\tRes    *types.CreateDVS_TaskResponse `xml:\"urn:vim25 CreateDVS_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateDVS_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateDVS_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateDVS_Task) (*types.CreateDVS_TaskResponse, error) {\n\tvar reqBody, resBody CreateDVS_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateDatacenterBody struct {\n\tReq    *types.CreateDatacenter         `xml:\"urn:vim25 CreateDatacenter,omitempty\"`\n\tRes    *types.CreateDatacenterResponse `xml:\"urn:vim25 CreateDatacenterResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateDatacenterBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateDatacenter(ctx context.Context, r soap.RoundTripper, req *types.CreateDatacenter) (*types.CreateDatacenterResponse, error) {\n\tvar reqBody, resBody CreateDatacenterBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateDefaultProfileBody struct {\n\tReq    *types.CreateDefaultProfile         `xml:\"urn:vim25 CreateDefaultProfile,omitempty\"`\n\tRes    *types.CreateDefaultProfileResponse `xml:\"urn:vim25 CreateDefaultProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateDefaultProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateDefaultProfile(ctx context.Context, r soap.RoundTripper, req *types.CreateDefaultProfile) (*types.CreateDefaultProfileResponse, error) {\n\tvar reqBody, resBody CreateDefaultProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateDescriptorBody struct {\n\tReq    *types.CreateDescriptor         `xml:\"urn:vim25 CreateDescriptor,omitempty\"`\n\tRes    *types.CreateDescriptorResponse `xml:\"urn:vim25 CreateDescriptorResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateDescriptorBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateDescriptor(ctx context.Context, r soap.RoundTripper, req *types.CreateDescriptor) (*types.CreateDescriptorResponse, error) {\n\tvar reqBody, resBody CreateDescriptorBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateDiagnosticPartitionBody struct {\n\tReq    *types.CreateDiagnosticPartition         `xml:\"urn:vim25 CreateDiagnosticPartition,omitempty\"`\n\tRes    *types.CreateDiagnosticPartitionResponse `xml:\"urn:vim25 CreateDiagnosticPartitionResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateDiagnosticPartitionBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateDiagnosticPartition(ctx context.Context, r soap.RoundTripper, req *types.CreateDiagnosticPartition) (*types.CreateDiagnosticPartitionResponse, error) {\n\tvar reqBody, resBody CreateDiagnosticPartitionBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateDirectoryBody struct {\n\tReq    *types.CreateDirectory         `xml:\"urn:vim25 CreateDirectory,omitempty\"`\n\tRes    *types.CreateDirectoryResponse `xml:\"urn:vim25 CreateDirectoryResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateDirectoryBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateDirectory(ctx context.Context, r soap.RoundTripper, req *types.CreateDirectory) (*types.CreateDirectoryResponse, error) {\n\tvar reqBody, resBody CreateDirectoryBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateDisk_TaskBody struct {\n\tReq    *types.CreateDisk_Task         `xml:\"urn:vim25 CreateDisk_Task,omitempty\"`\n\tRes    *types.CreateDisk_TaskResponse `xml:\"urn:vim25 CreateDisk_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateDisk_Task) (*types.CreateDisk_TaskResponse, error) {\n\tvar reqBody, resBody CreateDisk_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateFilterBody struct {\n\tReq    *types.CreateFilter         `xml:\"urn:vim25 CreateFilter,omitempty\"`\n\tRes    *types.CreateFilterResponse `xml:\"urn:vim25 CreateFilterResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateFilterBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateFilter(ctx context.Context, r soap.RoundTripper, req *types.CreateFilter) (*types.CreateFilterResponse, error) {\n\tvar reqBody, resBody CreateFilterBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateFolderBody struct {\n\tReq    *types.CreateFolder         `xml:\"urn:vim25 CreateFolder,omitempty\"`\n\tRes    *types.CreateFolderResponse `xml:\"urn:vim25 CreateFolderResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateFolderBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateFolder(ctx context.Context, r soap.RoundTripper, req *types.CreateFolder) (*types.CreateFolderResponse, error) {\n\tvar reqBody, resBody CreateFolderBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateGroupBody struct {\n\tReq    *types.CreateGroup         `xml:\"urn:vim25 CreateGroup,omitempty\"`\n\tRes    *types.CreateGroupResponse `xml:\"urn:vim25 CreateGroupResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateGroupBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateGroup(ctx context.Context, r soap.RoundTripper, req *types.CreateGroup) (*types.CreateGroupResponse, error) {\n\tvar reqBody, resBody CreateGroupBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateImportSpecBody struct {\n\tReq    *types.CreateImportSpec         `xml:\"urn:vim25 CreateImportSpec,omitempty\"`\n\tRes    *types.CreateImportSpecResponse `xml:\"urn:vim25 CreateImportSpecResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateImportSpecBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateImportSpec(ctx context.Context, r soap.RoundTripper, req *types.CreateImportSpec) (*types.CreateImportSpecResponse, error) {\n\tvar reqBody, resBody CreateImportSpecBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateInventoryViewBody struct {\n\tReq    *types.CreateInventoryView         `xml:\"urn:vim25 CreateInventoryView,omitempty\"`\n\tRes    *types.CreateInventoryViewResponse `xml:\"urn:vim25 CreateInventoryViewResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateInventoryViewBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateInventoryView(ctx context.Context, r soap.RoundTripper, req *types.CreateInventoryView) (*types.CreateInventoryViewResponse, error) {\n\tvar reqBody, resBody CreateInventoryViewBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateIpPoolBody struct {\n\tReq    *types.CreateIpPool         `xml:\"urn:vim25 CreateIpPool,omitempty\"`\n\tRes    *types.CreateIpPoolResponse `xml:\"urn:vim25 CreateIpPoolResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateIpPoolBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateIpPool(ctx context.Context, r soap.RoundTripper, req *types.CreateIpPool) (*types.CreateIpPoolResponse, error) {\n\tvar reqBody, resBody CreateIpPoolBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateListViewBody struct {\n\tReq    *types.CreateListView         `xml:\"urn:vim25 CreateListView,omitempty\"`\n\tRes    *types.CreateListViewResponse `xml:\"urn:vim25 CreateListViewResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateListViewBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateListView(ctx context.Context, r soap.RoundTripper, req *types.CreateListView) (*types.CreateListViewResponse, error) {\n\tvar reqBody, resBody CreateListViewBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateListViewFromViewBody struct {\n\tReq    *types.CreateListViewFromView         `xml:\"urn:vim25 CreateListViewFromView,omitempty\"`\n\tRes    *types.CreateListViewFromViewResponse `xml:\"urn:vim25 CreateListViewFromViewResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateListViewFromViewBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateListViewFromView(ctx context.Context, r soap.RoundTripper, req *types.CreateListViewFromView) (*types.CreateListViewFromViewResponse, error) {\n\tvar reqBody, resBody CreateListViewFromViewBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateLocalDatastoreBody struct {\n\tReq    *types.CreateLocalDatastore         `xml:\"urn:vim25 CreateLocalDatastore,omitempty\"`\n\tRes    *types.CreateLocalDatastoreResponse `xml:\"urn:vim25 CreateLocalDatastoreResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateLocalDatastoreBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateLocalDatastore(ctx context.Context, r soap.RoundTripper, req *types.CreateLocalDatastore) (*types.CreateLocalDatastoreResponse, error) {\n\tvar reqBody, resBody CreateLocalDatastoreBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateNasDatastoreBody struct {\n\tReq    *types.CreateNasDatastore         `xml:\"urn:vim25 CreateNasDatastore,omitempty\"`\n\tRes    *types.CreateNasDatastoreResponse `xml:\"urn:vim25 CreateNasDatastoreResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateNasDatastoreBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateNasDatastore(ctx context.Context, r soap.RoundTripper, req *types.CreateNasDatastore) (*types.CreateNasDatastoreResponse, error) {\n\tvar reqBody, resBody CreateNasDatastoreBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateObjectScheduledTaskBody struct {\n\tReq    *types.CreateObjectScheduledTask         `xml:\"urn:vim25 CreateObjectScheduledTask,omitempty\"`\n\tRes    *types.CreateObjectScheduledTaskResponse `xml:\"urn:vim25 CreateObjectScheduledTaskResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateObjectScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateObjectScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.CreateObjectScheduledTask) (*types.CreateObjectScheduledTaskResponse, error) {\n\tvar reqBody, resBody CreateObjectScheduledTaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreatePerfIntervalBody struct {\n\tReq    *types.CreatePerfInterval         `xml:\"urn:vim25 CreatePerfInterval,omitempty\"`\n\tRes    *types.CreatePerfIntervalResponse `xml:\"urn:vim25 CreatePerfIntervalResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreatePerfIntervalBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreatePerfInterval(ctx context.Context, r soap.RoundTripper, req *types.CreatePerfInterval) (*types.CreatePerfIntervalResponse, error) {\n\tvar reqBody, resBody CreatePerfIntervalBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateProfileBody struct {\n\tReq    *types.CreateProfile         `xml:\"urn:vim25 CreateProfile,omitempty\"`\n\tRes    *types.CreateProfileResponse `xml:\"urn:vim25 CreateProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateProfile(ctx context.Context, r soap.RoundTripper, req *types.CreateProfile) (*types.CreateProfileResponse, error) {\n\tvar reqBody, resBody CreateProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreatePropertyCollectorBody struct {\n\tReq    *types.CreatePropertyCollector         `xml:\"urn:vim25 CreatePropertyCollector,omitempty\"`\n\tRes    *types.CreatePropertyCollectorResponse `xml:\"urn:vim25 CreatePropertyCollectorResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreatePropertyCollectorBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreatePropertyCollector(ctx context.Context, r soap.RoundTripper, req *types.CreatePropertyCollector) (*types.CreatePropertyCollectorResponse, error) {\n\tvar reqBody, resBody CreatePropertyCollectorBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateRegistryKeyInGuestBody struct {\n\tReq    *types.CreateRegistryKeyInGuest         `xml:\"urn:vim25 CreateRegistryKeyInGuest,omitempty\"`\n\tRes    *types.CreateRegistryKeyInGuestResponse `xml:\"urn:vim25 CreateRegistryKeyInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateRegistryKeyInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateRegistryKeyInGuest(ctx context.Context, r soap.RoundTripper, req *types.CreateRegistryKeyInGuest) (*types.CreateRegistryKeyInGuestResponse, error) {\n\tvar reqBody, resBody CreateRegistryKeyInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateResourcePoolBody struct {\n\tReq    *types.CreateResourcePool         `xml:\"urn:vim25 CreateResourcePool,omitempty\"`\n\tRes    *types.CreateResourcePoolResponse `xml:\"urn:vim25 CreateResourcePoolResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateResourcePoolBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateResourcePool(ctx context.Context, r soap.RoundTripper, req *types.CreateResourcePool) (*types.CreateResourcePoolResponse, error) {\n\tvar reqBody, resBody CreateResourcePoolBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateScheduledTaskBody struct {\n\tReq    *types.CreateScheduledTask         `xml:\"urn:vim25 CreateScheduledTask,omitempty\"`\n\tRes    *types.CreateScheduledTaskResponse `xml:\"urn:vim25 CreateScheduledTaskResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.CreateScheduledTask) (*types.CreateScheduledTaskResponse, error) {\n\tvar reqBody, resBody CreateScheduledTaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateScreenshot_TaskBody struct {\n\tReq    *types.CreateScreenshot_Task         `xml:\"urn:vim25 CreateScreenshot_Task,omitempty\"`\n\tRes    *types.CreateScreenshot_TaskResponse `xml:\"urn:vim25 CreateScreenshot_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateScreenshot_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateScreenshot_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateScreenshot_Task) (*types.CreateScreenshot_TaskResponse, error) {\n\tvar reqBody, resBody CreateScreenshot_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateSecondaryVMEx_TaskBody struct {\n\tReq    *types.CreateSecondaryVMEx_Task         `xml:\"urn:vim25 CreateSecondaryVMEx_Task,omitempty\"`\n\tRes    *types.CreateSecondaryVMEx_TaskResponse `xml:\"urn:vim25 CreateSecondaryVMEx_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateSecondaryVMEx_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateSecondaryVMEx_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateSecondaryVMEx_Task) (*types.CreateSecondaryVMEx_TaskResponse, error) {\n\tvar reqBody, resBody CreateSecondaryVMEx_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateSecondaryVM_TaskBody struct {\n\tReq    *types.CreateSecondaryVM_Task         `xml:\"urn:vim25 CreateSecondaryVM_Task,omitempty\"`\n\tRes    *types.CreateSecondaryVM_TaskResponse `xml:\"urn:vim25 CreateSecondaryVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateSecondaryVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateSecondaryVM_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateSecondaryVM_Task) (*types.CreateSecondaryVM_TaskResponse, error) {\n\tvar reqBody, resBody CreateSecondaryVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateSnapshotEx_TaskBody struct {\n\tReq    *types.CreateSnapshotEx_Task         `xml:\"urn:vim25 CreateSnapshotEx_Task,omitempty\"`\n\tRes    *types.CreateSnapshotEx_TaskResponse `xml:\"urn:vim25 CreateSnapshotEx_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateSnapshotEx_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateSnapshotEx_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateSnapshotEx_Task) (*types.CreateSnapshotEx_TaskResponse, error) {\n\tvar reqBody, resBody CreateSnapshotEx_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateSnapshot_TaskBody struct {\n\tReq    *types.CreateSnapshot_Task         `xml:\"urn:vim25 CreateSnapshot_Task,omitempty\"`\n\tRes    *types.CreateSnapshot_TaskResponse `xml:\"urn:vim25 CreateSnapshot_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateSnapshot_Task) (*types.CreateSnapshot_TaskResponse, error) {\n\tvar reqBody, resBody CreateSnapshot_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateStoragePodBody struct {\n\tReq    *types.CreateStoragePod         `xml:\"urn:vim25 CreateStoragePod,omitempty\"`\n\tRes    *types.CreateStoragePodResponse `xml:\"urn:vim25 CreateStoragePodResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateStoragePodBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateStoragePod(ctx context.Context, r soap.RoundTripper, req *types.CreateStoragePod) (*types.CreateStoragePodResponse, error) {\n\tvar reqBody, resBody CreateStoragePodBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateTaskBody struct {\n\tReq    *types.CreateTask         `xml:\"urn:vim25 CreateTask,omitempty\"`\n\tRes    *types.CreateTaskResponse `xml:\"urn:vim25 CreateTaskResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateTaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateTask(ctx context.Context, r soap.RoundTripper, req *types.CreateTask) (*types.CreateTaskResponse, error) {\n\tvar reqBody, resBody CreateTaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateTemporaryDirectoryInGuestBody struct {\n\tReq    *types.CreateTemporaryDirectoryInGuest         `xml:\"urn:vim25 CreateTemporaryDirectoryInGuest,omitempty\"`\n\tRes    *types.CreateTemporaryDirectoryInGuestResponse `xml:\"urn:vim25 CreateTemporaryDirectoryInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateTemporaryDirectoryInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateTemporaryDirectoryInGuest(ctx context.Context, r soap.RoundTripper, req *types.CreateTemporaryDirectoryInGuest) (*types.CreateTemporaryDirectoryInGuestResponse, error) {\n\tvar reqBody, resBody CreateTemporaryDirectoryInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateTemporaryFileInGuestBody struct {\n\tReq    *types.CreateTemporaryFileInGuest         `xml:\"urn:vim25 CreateTemporaryFileInGuest,omitempty\"`\n\tRes    *types.CreateTemporaryFileInGuestResponse `xml:\"urn:vim25 CreateTemporaryFileInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateTemporaryFileInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateTemporaryFileInGuest(ctx context.Context, r soap.RoundTripper, req *types.CreateTemporaryFileInGuest) (*types.CreateTemporaryFileInGuestResponse, error) {\n\tvar reqBody, resBody CreateTemporaryFileInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateUserBody struct {\n\tReq    *types.CreateUser         `xml:\"urn:vim25 CreateUser,omitempty\"`\n\tRes    *types.CreateUserResponse `xml:\"urn:vim25 CreateUserResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateUserBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateUser(ctx context.Context, r soap.RoundTripper, req *types.CreateUser) (*types.CreateUserResponse, error) {\n\tvar reqBody, resBody CreateUserBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateVAppBody struct {\n\tReq    *types.CreateVApp         `xml:\"urn:vim25 CreateVApp,omitempty\"`\n\tRes    *types.CreateVAppResponse `xml:\"urn:vim25 CreateVAppResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateVAppBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateVApp(ctx context.Context, r soap.RoundTripper, req *types.CreateVApp) (*types.CreateVAppResponse, error) {\n\tvar reqBody, resBody CreateVAppBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateVM_TaskBody struct {\n\tReq    *types.CreateVM_Task         `xml:\"urn:vim25 CreateVM_Task,omitempty\"`\n\tRes    *types.CreateVM_TaskResponse `xml:\"urn:vim25 CreateVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateVM_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateVM_Task) (*types.CreateVM_TaskResponse, error) {\n\tvar reqBody, resBody CreateVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateVirtualDisk_TaskBody struct {\n\tReq    *types.CreateVirtualDisk_Task         `xml:\"urn:vim25 CreateVirtualDisk_Task,omitempty\"`\n\tRes    *types.CreateVirtualDisk_TaskResponse `xml:\"urn:vim25 CreateVirtualDisk_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateVirtualDisk_Task) (*types.CreateVirtualDisk_TaskResponse, error) {\n\tvar reqBody, resBody CreateVirtualDisk_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateVmfsDatastoreBody struct {\n\tReq    *types.CreateVmfsDatastore         `xml:\"urn:vim25 CreateVmfsDatastore,omitempty\"`\n\tRes    *types.CreateVmfsDatastoreResponse `xml:\"urn:vim25 CreateVmfsDatastoreResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateVmfsDatastoreBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateVmfsDatastore(ctx context.Context, r soap.RoundTripper, req *types.CreateVmfsDatastore) (*types.CreateVmfsDatastoreResponse, error) {\n\tvar reqBody, resBody CreateVmfsDatastoreBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CreateVvolDatastoreBody struct {\n\tReq    *types.CreateVvolDatastore         `xml:\"urn:vim25 CreateVvolDatastore,omitempty\"`\n\tRes    *types.CreateVvolDatastoreResponse `xml:\"urn:vim25 CreateVvolDatastoreResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CreateVvolDatastoreBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CreateVvolDatastore(ctx context.Context, r soap.RoundTripper, req *types.CreateVvolDatastore) (*types.CreateVvolDatastoreResponse, error) {\n\tvar reqBody, resBody CreateVvolDatastoreBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CurrentTimeBody struct {\n\tReq    *types.CurrentTime         `xml:\"urn:vim25 CurrentTime,omitempty\"`\n\tRes    *types.CurrentTimeResponse `xml:\"urn:vim25 CurrentTimeResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CurrentTimeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CurrentTime(ctx context.Context, r soap.RoundTripper, req *types.CurrentTime) (*types.CurrentTimeResponse, error) {\n\tvar reqBody, resBody CurrentTimeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CustomizationSpecItemToXmlBody struct {\n\tReq    *types.CustomizationSpecItemToXml         `xml:\"urn:vim25 CustomizationSpecItemToXml,omitempty\"`\n\tRes    *types.CustomizationSpecItemToXmlResponse `xml:\"urn:vim25 CustomizationSpecItemToXmlResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CustomizationSpecItemToXmlBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CustomizationSpecItemToXml(ctx context.Context, r soap.RoundTripper, req *types.CustomizationSpecItemToXml) (*types.CustomizationSpecItemToXmlResponse, error) {\n\tvar reqBody, resBody CustomizationSpecItemToXmlBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype CustomizeVM_TaskBody struct {\n\tReq    *types.CustomizeVM_Task         `xml:\"urn:vim25 CustomizeVM_Task,omitempty\"`\n\tRes    *types.CustomizeVM_TaskResponse `xml:\"urn:vim25 CustomizeVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *CustomizeVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc CustomizeVM_Task(ctx context.Context, r soap.RoundTripper, req *types.CustomizeVM_Task) (*types.CustomizeVM_TaskResponse, error) {\n\tvar reqBody, resBody CustomizeVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DVPortgroupRollback_TaskBody struct {\n\tReq    *types.DVPortgroupRollback_Task         `xml:\"urn:vim25 DVPortgroupRollback_Task,omitempty\"`\n\tRes    *types.DVPortgroupRollback_TaskResponse `xml:\"urn:vim25 DVPortgroupRollback_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DVPortgroupRollback_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DVPortgroupRollback_Task(ctx context.Context, r soap.RoundTripper, req *types.DVPortgroupRollback_Task) (*types.DVPortgroupRollback_TaskResponse, error) {\n\tvar reqBody, resBody DVPortgroupRollback_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DVSManagerExportEntity_TaskBody struct {\n\tReq    *types.DVSManagerExportEntity_Task         `xml:\"urn:vim25 DVSManagerExportEntity_Task,omitempty\"`\n\tRes    *types.DVSManagerExportEntity_TaskResponse `xml:\"urn:vim25 DVSManagerExportEntity_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DVSManagerExportEntity_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DVSManagerExportEntity_Task(ctx context.Context, r soap.RoundTripper, req *types.DVSManagerExportEntity_Task) (*types.DVSManagerExportEntity_TaskResponse, error) {\n\tvar reqBody, resBody DVSManagerExportEntity_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DVSManagerImportEntity_TaskBody struct {\n\tReq    *types.DVSManagerImportEntity_Task         `xml:\"urn:vim25 DVSManagerImportEntity_Task,omitempty\"`\n\tRes    *types.DVSManagerImportEntity_TaskResponse `xml:\"urn:vim25 DVSManagerImportEntity_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DVSManagerImportEntity_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DVSManagerImportEntity_Task(ctx context.Context, r soap.RoundTripper, req *types.DVSManagerImportEntity_Task) (*types.DVSManagerImportEntity_TaskResponse, error) {\n\tvar reqBody, resBody DVSManagerImportEntity_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DVSManagerLookupDvPortGroupBody struct {\n\tReq    *types.DVSManagerLookupDvPortGroup         `xml:\"urn:vim25 DVSManagerLookupDvPortGroup,omitempty\"`\n\tRes    *types.DVSManagerLookupDvPortGroupResponse `xml:\"urn:vim25 DVSManagerLookupDvPortGroupResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DVSManagerLookupDvPortGroupBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DVSManagerLookupDvPortGroup(ctx context.Context, r soap.RoundTripper, req *types.DVSManagerLookupDvPortGroup) (*types.DVSManagerLookupDvPortGroupResponse, error) {\n\tvar reqBody, resBody DVSManagerLookupDvPortGroupBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DVSRollback_TaskBody struct {\n\tReq    *types.DVSRollback_Task         `xml:\"urn:vim25 DVSRollback_Task,omitempty\"`\n\tRes    *types.DVSRollback_TaskResponse `xml:\"urn:vim25 DVSRollback_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DVSRollback_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DVSRollback_Task(ctx context.Context, r soap.RoundTripper, req *types.DVSRollback_Task) (*types.DVSRollback_TaskResponse, error) {\n\tvar reqBody, resBody DVSRollback_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DatastoreEnterMaintenanceModeBody struct {\n\tReq    *types.DatastoreEnterMaintenanceMode         `xml:\"urn:vim25 DatastoreEnterMaintenanceMode,omitempty\"`\n\tRes    *types.DatastoreEnterMaintenanceModeResponse `xml:\"urn:vim25 DatastoreEnterMaintenanceModeResponse,omitempty\"`\n\tFault_ *soap.Fault                                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DatastoreEnterMaintenanceModeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DatastoreEnterMaintenanceMode(ctx context.Context, r soap.RoundTripper, req *types.DatastoreEnterMaintenanceMode) (*types.DatastoreEnterMaintenanceModeResponse, error) {\n\tvar reqBody, resBody DatastoreEnterMaintenanceModeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DatastoreExitMaintenanceMode_TaskBody struct {\n\tReq    *types.DatastoreExitMaintenanceMode_Task         `xml:\"urn:vim25 DatastoreExitMaintenanceMode_Task,omitempty\"`\n\tRes    *types.DatastoreExitMaintenanceMode_TaskResponse `xml:\"urn:vim25 DatastoreExitMaintenanceMode_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DatastoreExitMaintenanceMode_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DatastoreExitMaintenanceMode_Task(ctx context.Context, r soap.RoundTripper, req *types.DatastoreExitMaintenanceMode_Task) (*types.DatastoreExitMaintenanceMode_TaskResponse, error) {\n\tvar reqBody, resBody DatastoreExitMaintenanceMode_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DecodeLicenseBody struct {\n\tReq    *types.DecodeLicense         `xml:\"urn:vim25 DecodeLicense,omitempty\"`\n\tRes    *types.DecodeLicenseResponse `xml:\"urn:vim25 DecodeLicenseResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DecodeLicenseBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DecodeLicense(ctx context.Context, r soap.RoundTripper, req *types.DecodeLicense) (*types.DecodeLicenseResponse, error) {\n\tvar reqBody, resBody DecodeLicenseBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DefragmentAllDisksBody struct {\n\tReq    *types.DefragmentAllDisks         `xml:\"urn:vim25 DefragmentAllDisks,omitempty\"`\n\tRes    *types.DefragmentAllDisksResponse `xml:\"urn:vim25 DefragmentAllDisksResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DefragmentAllDisksBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DefragmentAllDisks(ctx context.Context, r soap.RoundTripper, req *types.DefragmentAllDisks) (*types.DefragmentAllDisksResponse, error) {\n\tvar reqBody, resBody DefragmentAllDisksBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DefragmentVirtualDisk_TaskBody struct {\n\tReq    *types.DefragmentVirtualDisk_Task         `xml:\"urn:vim25 DefragmentVirtualDisk_Task,omitempty\"`\n\tRes    *types.DefragmentVirtualDisk_TaskResponse `xml:\"urn:vim25 DefragmentVirtualDisk_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DefragmentVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DefragmentVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.DefragmentVirtualDisk_Task) (*types.DefragmentVirtualDisk_TaskResponse, error) {\n\tvar reqBody, resBody DefragmentVirtualDisk_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DeleteCustomizationSpecBody struct {\n\tReq    *types.DeleteCustomizationSpec         `xml:\"urn:vim25 DeleteCustomizationSpec,omitempty\"`\n\tRes    *types.DeleteCustomizationSpecResponse `xml:\"urn:vim25 DeleteCustomizationSpecResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DeleteCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DeleteCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.DeleteCustomizationSpec) (*types.DeleteCustomizationSpecResponse, error) {\n\tvar reqBody, resBody DeleteCustomizationSpecBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DeleteDatastoreFile_TaskBody struct {\n\tReq    *types.DeleteDatastoreFile_Task         `xml:\"urn:vim25 DeleteDatastoreFile_Task,omitempty\"`\n\tRes    *types.DeleteDatastoreFile_TaskResponse `xml:\"urn:vim25 DeleteDatastoreFile_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DeleteDatastoreFile_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DeleteDatastoreFile_Task(ctx context.Context, r soap.RoundTripper, req *types.DeleteDatastoreFile_Task) (*types.DeleteDatastoreFile_TaskResponse, error) {\n\tvar reqBody, resBody DeleteDatastoreFile_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DeleteDirectoryBody struct {\n\tReq    *types.DeleteDirectory         `xml:\"urn:vim25 DeleteDirectory,omitempty\"`\n\tRes    *types.DeleteDirectoryResponse `xml:\"urn:vim25 DeleteDirectoryResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DeleteDirectoryBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DeleteDirectory(ctx context.Context, r soap.RoundTripper, req *types.DeleteDirectory) (*types.DeleteDirectoryResponse, error) {\n\tvar reqBody, resBody DeleteDirectoryBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DeleteDirectoryInGuestBody struct {\n\tReq    *types.DeleteDirectoryInGuest         `xml:\"urn:vim25 DeleteDirectoryInGuest,omitempty\"`\n\tRes    *types.DeleteDirectoryInGuestResponse `xml:\"urn:vim25 DeleteDirectoryInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DeleteDirectoryInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DeleteDirectoryInGuest(ctx context.Context, r soap.RoundTripper, req *types.DeleteDirectoryInGuest) (*types.DeleteDirectoryInGuestResponse, error) {\n\tvar reqBody, resBody DeleteDirectoryInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DeleteFileBody struct {\n\tReq    *types.DeleteFile         `xml:\"urn:vim25 DeleteFile,omitempty\"`\n\tRes    *types.DeleteFileResponse `xml:\"urn:vim25 DeleteFileResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DeleteFileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DeleteFile(ctx context.Context, r soap.RoundTripper, req *types.DeleteFile) (*types.DeleteFileResponse, error) {\n\tvar reqBody, resBody DeleteFileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DeleteFileInGuestBody struct {\n\tReq    *types.DeleteFileInGuest         `xml:\"urn:vim25 DeleteFileInGuest,omitempty\"`\n\tRes    *types.DeleteFileInGuestResponse `xml:\"urn:vim25 DeleteFileInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DeleteFileInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DeleteFileInGuest(ctx context.Context, r soap.RoundTripper, req *types.DeleteFileInGuest) (*types.DeleteFileInGuestResponse, error) {\n\tvar reqBody, resBody DeleteFileInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DeleteHostSpecificationBody struct {\n\tReq    *types.DeleteHostSpecification         `xml:\"urn:vim25 DeleteHostSpecification,omitempty\"`\n\tRes    *types.DeleteHostSpecificationResponse `xml:\"urn:vim25 DeleteHostSpecificationResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DeleteHostSpecificationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DeleteHostSpecification(ctx context.Context, r soap.RoundTripper, req *types.DeleteHostSpecification) (*types.DeleteHostSpecificationResponse, error) {\n\tvar reqBody, resBody DeleteHostSpecificationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DeleteHostSubSpecificationBody struct {\n\tReq    *types.DeleteHostSubSpecification         `xml:\"urn:vim25 DeleteHostSubSpecification,omitempty\"`\n\tRes    *types.DeleteHostSubSpecificationResponse `xml:\"urn:vim25 DeleteHostSubSpecificationResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DeleteHostSubSpecificationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DeleteHostSubSpecification(ctx context.Context, r soap.RoundTripper, req *types.DeleteHostSubSpecification) (*types.DeleteHostSubSpecificationResponse, error) {\n\tvar reqBody, resBody DeleteHostSubSpecificationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DeleteRegistryKeyInGuestBody struct {\n\tReq    *types.DeleteRegistryKeyInGuest         `xml:\"urn:vim25 DeleteRegistryKeyInGuest,omitempty\"`\n\tRes    *types.DeleteRegistryKeyInGuestResponse `xml:\"urn:vim25 DeleteRegistryKeyInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DeleteRegistryKeyInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DeleteRegistryKeyInGuest(ctx context.Context, r soap.RoundTripper, req *types.DeleteRegistryKeyInGuest) (*types.DeleteRegistryKeyInGuestResponse, error) {\n\tvar reqBody, resBody DeleteRegistryKeyInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DeleteRegistryValueInGuestBody struct {\n\tReq    *types.DeleteRegistryValueInGuest         `xml:\"urn:vim25 DeleteRegistryValueInGuest,omitempty\"`\n\tRes    *types.DeleteRegistryValueInGuestResponse `xml:\"urn:vim25 DeleteRegistryValueInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DeleteRegistryValueInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DeleteRegistryValueInGuest(ctx context.Context, r soap.RoundTripper, req *types.DeleteRegistryValueInGuest) (*types.DeleteRegistryValueInGuestResponse, error) {\n\tvar reqBody, resBody DeleteRegistryValueInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DeleteScsiLunStateBody struct {\n\tReq    *types.DeleteScsiLunState         `xml:\"urn:vim25 DeleteScsiLunState,omitempty\"`\n\tRes    *types.DeleteScsiLunStateResponse `xml:\"urn:vim25 DeleteScsiLunStateResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DeleteScsiLunStateBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DeleteScsiLunState(ctx context.Context, r soap.RoundTripper, req *types.DeleteScsiLunState) (*types.DeleteScsiLunStateResponse, error) {\n\tvar reqBody, resBody DeleteScsiLunStateBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DeleteVStorageObject_TaskBody struct {\n\tReq    *types.DeleteVStorageObject_Task         `xml:\"urn:vim25 DeleteVStorageObject_Task,omitempty\"`\n\tRes    *types.DeleteVStorageObject_TaskResponse `xml:\"urn:vim25 DeleteVStorageObject_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DeleteVStorageObject_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DeleteVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req *types.DeleteVStorageObject_Task) (*types.DeleteVStorageObject_TaskResponse, error) {\n\tvar reqBody, resBody DeleteVStorageObject_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DeleteVffsVolumeStateBody struct {\n\tReq    *types.DeleteVffsVolumeState         `xml:\"urn:vim25 DeleteVffsVolumeState,omitempty\"`\n\tRes    *types.DeleteVffsVolumeStateResponse `xml:\"urn:vim25 DeleteVffsVolumeStateResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DeleteVffsVolumeStateBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DeleteVffsVolumeState(ctx context.Context, r soap.RoundTripper, req *types.DeleteVffsVolumeState) (*types.DeleteVffsVolumeStateResponse, error) {\n\tvar reqBody, resBody DeleteVffsVolumeStateBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DeleteVirtualDisk_TaskBody struct {\n\tReq    *types.DeleteVirtualDisk_Task         `xml:\"urn:vim25 DeleteVirtualDisk_Task,omitempty\"`\n\tRes    *types.DeleteVirtualDisk_TaskResponse `xml:\"urn:vim25 DeleteVirtualDisk_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DeleteVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DeleteVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.DeleteVirtualDisk_Task) (*types.DeleteVirtualDisk_TaskResponse, error) {\n\tvar reqBody, resBody DeleteVirtualDisk_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DeleteVmfsVolumeStateBody struct {\n\tReq    *types.DeleteVmfsVolumeState         `xml:\"urn:vim25 DeleteVmfsVolumeState,omitempty\"`\n\tRes    *types.DeleteVmfsVolumeStateResponse `xml:\"urn:vim25 DeleteVmfsVolumeStateResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DeleteVmfsVolumeStateBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DeleteVmfsVolumeState(ctx context.Context, r soap.RoundTripper, req *types.DeleteVmfsVolumeState) (*types.DeleteVmfsVolumeStateResponse, error) {\n\tvar reqBody, resBody DeleteVmfsVolumeStateBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DeleteVsanObjectsBody struct {\n\tReq    *types.DeleteVsanObjects         `xml:\"urn:vim25 DeleteVsanObjects,omitempty\"`\n\tRes    *types.DeleteVsanObjectsResponse `xml:\"urn:vim25 DeleteVsanObjectsResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DeleteVsanObjectsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DeleteVsanObjects(ctx context.Context, r soap.RoundTripper, req *types.DeleteVsanObjects) (*types.DeleteVsanObjectsResponse, error) {\n\tvar reqBody, resBody DeleteVsanObjectsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DeselectVnicBody struct {\n\tReq    *types.DeselectVnic         `xml:\"urn:vim25 DeselectVnic,omitempty\"`\n\tRes    *types.DeselectVnicResponse `xml:\"urn:vim25 DeselectVnicResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DeselectVnicBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DeselectVnic(ctx context.Context, r soap.RoundTripper, req *types.DeselectVnic) (*types.DeselectVnicResponse, error) {\n\tvar reqBody, resBody DeselectVnicBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DeselectVnicForNicTypeBody struct {\n\tReq    *types.DeselectVnicForNicType         `xml:\"urn:vim25 DeselectVnicForNicType,omitempty\"`\n\tRes    *types.DeselectVnicForNicTypeResponse `xml:\"urn:vim25 DeselectVnicForNicTypeResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DeselectVnicForNicTypeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DeselectVnicForNicType(ctx context.Context, r soap.RoundTripper, req *types.DeselectVnicForNicType) (*types.DeselectVnicForNicTypeResponse, error) {\n\tvar reqBody, resBody DeselectVnicForNicTypeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DestroyChildrenBody struct {\n\tReq    *types.DestroyChildren         `xml:\"urn:vim25 DestroyChildren,omitempty\"`\n\tRes    *types.DestroyChildrenResponse `xml:\"urn:vim25 DestroyChildrenResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DestroyChildrenBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DestroyChildren(ctx context.Context, r soap.RoundTripper, req *types.DestroyChildren) (*types.DestroyChildrenResponse, error) {\n\tvar reqBody, resBody DestroyChildrenBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DestroyCollectorBody struct {\n\tReq    *types.DestroyCollector         `xml:\"urn:vim25 DestroyCollector,omitempty\"`\n\tRes    *types.DestroyCollectorResponse `xml:\"urn:vim25 DestroyCollectorResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DestroyCollectorBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DestroyCollector(ctx context.Context, r soap.RoundTripper, req *types.DestroyCollector) (*types.DestroyCollectorResponse, error) {\n\tvar reqBody, resBody DestroyCollectorBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DestroyDatastoreBody struct {\n\tReq    *types.DestroyDatastore         `xml:\"urn:vim25 DestroyDatastore,omitempty\"`\n\tRes    *types.DestroyDatastoreResponse `xml:\"urn:vim25 DestroyDatastoreResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DestroyDatastoreBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DestroyDatastore(ctx context.Context, r soap.RoundTripper, req *types.DestroyDatastore) (*types.DestroyDatastoreResponse, error) {\n\tvar reqBody, resBody DestroyDatastoreBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DestroyIpPoolBody struct {\n\tReq    *types.DestroyIpPool         `xml:\"urn:vim25 DestroyIpPool,omitempty\"`\n\tRes    *types.DestroyIpPoolResponse `xml:\"urn:vim25 DestroyIpPoolResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DestroyIpPoolBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DestroyIpPool(ctx context.Context, r soap.RoundTripper, req *types.DestroyIpPool) (*types.DestroyIpPoolResponse, error) {\n\tvar reqBody, resBody DestroyIpPoolBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DestroyNetworkBody struct {\n\tReq    *types.DestroyNetwork         `xml:\"urn:vim25 DestroyNetwork,omitempty\"`\n\tRes    *types.DestroyNetworkResponse `xml:\"urn:vim25 DestroyNetworkResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DestroyNetworkBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DestroyNetwork(ctx context.Context, r soap.RoundTripper, req *types.DestroyNetwork) (*types.DestroyNetworkResponse, error) {\n\tvar reqBody, resBody DestroyNetworkBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DestroyProfileBody struct {\n\tReq    *types.DestroyProfile         `xml:\"urn:vim25 DestroyProfile,omitempty\"`\n\tRes    *types.DestroyProfileResponse `xml:\"urn:vim25 DestroyProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DestroyProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DestroyProfile(ctx context.Context, r soap.RoundTripper, req *types.DestroyProfile) (*types.DestroyProfileResponse, error) {\n\tvar reqBody, resBody DestroyProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DestroyPropertyCollectorBody struct {\n\tReq    *types.DestroyPropertyCollector         `xml:\"urn:vim25 DestroyPropertyCollector,omitempty\"`\n\tRes    *types.DestroyPropertyCollectorResponse `xml:\"urn:vim25 DestroyPropertyCollectorResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DestroyPropertyCollectorBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DestroyPropertyCollector(ctx context.Context, r soap.RoundTripper, req *types.DestroyPropertyCollector) (*types.DestroyPropertyCollectorResponse, error) {\n\tvar reqBody, resBody DestroyPropertyCollectorBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DestroyPropertyFilterBody struct {\n\tReq    *types.DestroyPropertyFilter         `xml:\"urn:vim25 DestroyPropertyFilter,omitempty\"`\n\tRes    *types.DestroyPropertyFilterResponse `xml:\"urn:vim25 DestroyPropertyFilterResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DestroyPropertyFilterBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DestroyPropertyFilter(ctx context.Context, r soap.RoundTripper, req *types.DestroyPropertyFilter) (*types.DestroyPropertyFilterResponse, error) {\n\tvar reqBody, resBody DestroyPropertyFilterBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DestroyVffsBody struct {\n\tReq    *types.DestroyVffs         `xml:\"urn:vim25 DestroyVffs,omitempty\"`\n\tRes    *types.DestroyVffsResponse `xml:\"urn:vim25 DestroyVffsResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DestroyVffsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DestroyVffs(ctx context.Context, r soap.RoundTripper, req *types.DestroyVffs) (*types.DestroyVffsResponse, error) {\n\tvar reqBody, resBody DestroyVffsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DestroyViewBody struct {\n\tReq    *types.DestroyView         `xml:\"urn:vim25 DestroyView,omitempty\"`\n\tRes    *types.DestroyViewResponse `xml:\"urn:vim25 DestroyViewResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DestroyViewBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DestroyView(ctx context.Context, r soap.RoundTripper, req *types.DestroyView) (*types.DestroyViewResponse, error) {\n\tvar reqBody, resBody DestroyViewBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype Destroy_TaskBody struct {\n\tReq    *types.Destroy_Task         `xml:\"urn:vim25 Destroy_Task,omitempty\"`\n\tRes    *types.Destroy_TaskResponse `xml:\"urn:vim25 Destroy_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *Destroy_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc Destroy_Task(ctx context.Context, r soap.RoundTripper, req *types.Destroy_Task) (*types.Destroy_TaskResponse, error) {\n\tvar reqBody, resBody Destroy_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DetachDisk_TaskBody struct {\n\tReq    *types.DetachDisk_Task         `xml:\"urn:vim25 DetachDisk_Task,omitempty\"`\n\tRes    *types.DetachDisk_TaskResponse `xml:\"urn:vim25 DetachDisk_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DetachDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DetachDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.DetachDisk_Task) (*types.DetachDisk_TaskResponse, error) {\n\tvar reqBody, resBody DetachDisk_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DetachScsiLunBody struct {\n\tReq    *types.DetachScsiLun         `xml:\"urn:vim25 DetachScsiLun,omitempty\"`\n\tRes    *types.DetachScsiLunResponse `xml:\"urn:vim25 DetachScsiLunResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DetachScsiLunBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DetachScsiLun(ctx context.Context, r soap.RoundTripper, req *types.DetachScsiLun) (*types.DetachScsiLunResponse, error) {\n\tvar reqBody, resBody DetachScsiLunBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DetachScsiLunEx_TaskBody struct {\n\tReq    *types.DetachScsiLunEx_Task         `xml:\"urn:vim25 DetachScsiLunEx_Task,omitempty\"`\n\tRes    *types.DetachScsiLunEx_TaskResponse `xml:\"urn:vim25 DetachScsiLunEx_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DetachScsiLunEx_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DetachScsiLunEx_Task(ctx context.Context, r soap.RoundTripper, req *types.DetachScsiLunEx_Task) (*types.DetachScsiLunEx_TaskResponse, error) {\n\tvar reqBody, resBody DetachScsiLunEx_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DetachTagFromVStorageObjectBody struct {\n\tReq    *types.DetachTagFromVStorageObject         `xml:\"urn:vim25 DetachTagFromVStorageObject,omitempty\"`\n\tRes    *types.DetachTagFromVStorageObjectResponse `xml:\"urn:vim25 DetachTagFromVStorageObjectResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DetachTagFromVStorageObjectBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DetachTagFromVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.DetachTagFromVStorageObject) (*types.DetachTagFromVStorageObjectResponse, error) {\n\tvar reqBody, resBody DetachTagFromVStorageObjectBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DisableEvcMode_TaskBody struct {\n\tReq    *types.DisableEvcMode_Task         `xml:\"urn:vim25 DisableEvcMode_Task,omitempty\"`\n\tRes    *types.DisableEvcMode_TaskResponse `xml:\"urn:vim25 DisableEvcMode_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DisableEvcMode_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DisableEvcMode_Task(ctx context.Context, r soap.RoundTripper, req *types.DisableEvcMode_Task) (*types.DisableEvcMode_TaskResponse, error) {\n\tvar reqBody, resBody DisableEvcMode_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DisableFeatureBody struct {\n\tReq    *types.DisableFeature         `xml:\"urn:vim25 DisableFeature,omitempty\"`\n\tRes    *types.DisableFeatureResponse `xml:\"urn:vim25 DisableFeatureResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DisableFeatureBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DisableFeature(ctx context.Context, r soap.RoundTripper, req *types.DisableFeature) (*types.DisableFeatureResponse, error) {\n\tvar reqBody, resBody DisableFeatureBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DisableHyperThreadingBody struct {\n\tReq    *types.DisableHyperThreading         `xml:\"urn:vim25 DisableHyperThreading,omitempty\"`\n\tRes    *types.DisableHyperThreadingResponse `xml:\"urn:vim25 DisableHyperThreadingResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DisableHyperThreadingBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DisableHyperThreading(ctx context.Context, r soap.RoundTripper, req *types.DisableHyperThreading) (*types.DisableHyperThreadingResponse, error) {\n\tvar reqBody, resBody DisableHyperThreadingBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DisableMultipathPathBody struct {\n\tReq    *types.DisableMultipathPath         `xml:\"urn:vim25 DisableMultipathPath,omitempty\"`\n\tRes    *types.DisableMultipathPathResponse `xml:\"urn:vim25 DisableMultipathPathResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DisableMultipathPathBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DisableMultipathPath(ctx context.Context, r soap.RoundTripper, req *types.DisableMultipathPath) (*types.DisableMultipathPathResponse, error) {\n\tvar reqBody, resBody DisableMultipathPathBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DisableRulesetBody struct {\n\tReq    *types.DisableRuleset         `xml:\"urn:vim25 DisableRuleset,omitempty\"`\n\tRes    *types.DisableRulesetResponse `xml:\"urn:vim25 DisableRulesetResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DisableRulesetBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DisableRuleset(ctx context.Context, r soap.RoundTripper, req *types.DisableRuleset) (*types.DisableRulesetResponse, error) {\n\tvar reqBody, resBody DisableRulesetBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DisableSecondaryVM_TaskBody struct {\n\tReq    *types.DisableSecondaryVM_Task         `xml:\"urn:vim25 DisableSecondaryVM_Task,omitempty\"`\n\tRes    *types.DisableSecondaryVM_TaskResponse `xml:\"urn:vim25 DisableSecondaryVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DisableSecondaryVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DisableSecondaryVM_Task(ctx context.Context, r soap.RoundTripper, req *types.DisableSecondaryVM_Task) (*types.DisableSecondaryVM_TaskResponse, error) {\n\tvar reqBody, resBody DisableSecondaryVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DisableSmartCardAuthenticationBody struct {\n\tReq    *types.DisableSmartCardAuthentication         `xml:\"urn:vim25 DisableSmartCardAuthentication,omitempty\"`\n\tRes    *types.DisableSmartCardAuthenticationResponse `xml:\"urn:vim25 DisableSmartCardAuthenticationResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DisableSmartCardAuthenticationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DisableSmartCardAuthentication(ctx context.Context, r soap.RoundTripper, req *types.DisableSmartCardAuthentication) (*types.DisableSmartCardAuthenticationResponse, error) {\n\tvar reqBody, resBody DisableSmartCardAuthenticationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DisconnectHost_TaskBody struct {\n\tReq    *types.DisconnectHost_Task         `xml:\"urn:vim25 DisconnectHost_Task,omitempty\"`\n\tRes    *types.DisconnectHost_TaskResponse `xml:\"urn:vim25 DisconnectHost_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DisconnectHost_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DisconnectHost_Task(ctx context.Context, r soap.RoundTripper, req *types.DisconnectHost_Task) (*types.DisconnectHost_TaskResponse, error) {\n\tvar reqBody, resBody DisconnectHost_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DiscoverFcoeHbasBody struct {\n\tReq    *types.DiscoverFcoeHbas         `xml:\"urn:vim25 DiscoverFcoeHbas,omitempty\"`\n\tRes    *types.DiscoverFcoeHbasResponse `xml:\"urn:vim25 DiscoverFcoeHbasResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DiscoverFcoeHbasBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DiscoverFcoeHbas(ctx context.Context, r soap.RoundTripper, req *types.DiscoverFcoeHbas) (*types.DiscoverFcoeHbasResponse, error) {\n\tvar reqBody, resBody DiscoverFcoeHbasBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DissociateProfileBody struct {\n\tReq    *types.DissociateProfile         `xml:\"urn:vim25 DissociateProfile,omitempty\"`\n\tRes    *types.DissociateProfileResponse `xml:\"urn:vim25 DissociateProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DissociateProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DissociateProfile(ctx context.Context, r soap.RoundTripper, req *types.DissociateProfile) (*types.DissociateProfileResponse, error) {\n\tvar reqBody, resBody DissociateProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DoesCustomizationSpecExistBody struct {\n\tReq    *types.DoesCustomizationSpecExist         `xml:\"urn:vim25 DoesCustomizationSpecExist,omitempty\"`\n\tRes    *types.DoesCustomizationSpecExistResponse `xml:\"urn:vim25 DoesCustomizationSpecExistResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DoesCustomizationSpecExistBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DoesCustomizationSpecExist(ctx context.Context, r soap.RoundTripper, req *types.DoesCustomizationSpecExist) (*types.DoesCustomizationSpecExistResponse, error) {\n\tvar reqBody, resBody DoesCustomizationSpecExistBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DuplicateCustomizationSpecBody struct {\n\tReq    *types.DuplicateCustomizationSpec         `xml:\"urn:vim25 DuplicateCustomizationSpec,omitempty\"`\n\tRes    *types.DuplicateCustomizationSpecResponse `xml:\"urn:vim25 DuplicateCustomizationSpecResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DuplicateCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DuplicateCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.DuplicateCustomizationSpec) (*types.DuplicateCustomizationSpecResponse, error) {\n\tvar reqBody, resBody DuplicateCustomizationSpecBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype DvsReconfigureVmVnicNetworkResourcePool_TaskBody struct {\n\tReq    *types.DvsReconfigureVmVnicNetworkResourcePool_Task         `xml:\"urn:vim25 DvsReconfigureVmVnicNetworkResourcePool_Task,omitempty\"`\n\tRes    *types.DvsReconfigureVmVnicNetworkResourcePool_TaskResponse `xml:\"urn:vim25 DvsReconfigureVmVnicNetworkResourcePool_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *DvsReconfigureVmVnicNetworkResourcePool_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc DvsReconfigureVmVnicNetworkResourcePool_Task(ctx context.Context, r soap.RoundTripper, req *types.DvsReconfigureVmVnicNetworkResourcePool_Task) (*types.DvsReconfigureVmVnicNetworkResourcePool_TaskResponse, error) {\n\tvar reqBody, resBody DvsReconfigureVmVnicNetworkResourcePool_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype EagerZeroVirtualDisk_TaskBody struct {\n\tReq    *types.EagerZeroVirtualDisk_Task         `xml:\"urn:vim25 EagerZeroVirtualDisk_Task,omitempty\"`\n\tRes    *types.EagerZeroVirtualDisk_TaskResponse `xml:\"urn:vim25 EagerZeroVirtualDisk_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *EagerZeroVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc EagerZeroVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.EagerZeroVirtualDisk_Task) (*types.EagerZeroVirtualDisk_TaskResponse, error) {\n\tvar reqBody, resBody EagerZeroVirtualDisk_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype EnableAlarmActionsBody struct {\n\tReq    *types.EnableAlarmActions         `xml:\"urn:vim25 EnableAlarmActions,omitempty\"`\n\tRes    *types.EnableAlarmActionsResponse `xml:\"urn:vim25 EnableAlarmActionsResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *EnableAlarmActionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc EnableAlarmActions(ctx context.Context, r soap.RoundTripper, req *types.EnableAlarmActions) (*types.EnableAlarmActionsResponse, error) {\n\tvar reqBody, resBody EnableAlarmActionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype EnableCryptoBody struct {\n\tReq    *types.EnableCrypto         `xml:\"urn:vim25 EnableCrypto,omitempty\"`\n\tRes    *types.EnableCryptoResponse `xml:\"urn:vim25 EnableCryptoResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *EnableCryptoBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc EnableCrypto(ctx context.Context, r soap.RoundTripper, req *types.EnableCrypto) (*types.EnableCryptoResponse, error) {\n\tvar reqBody, resBody EnableCryptoBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype EnableFeatureBody struct {\n\tReq    *types.EnableFeature         `xml:\"urn:vim25 EnableFeature,omitempty\"`\n\tRes    *types.EnableFeatureResponse `xml:\"urn:vim25 EnableFeatureResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *EnableFeatureBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc EnableFeature(ctx context.Context, r soap.RoundTripper, req *types.EnableFeature) (*types.EnableFeatureResponse, error) {\n\tvar reqBody, resBody EnableFeatureBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype EnableHyperThreadingBody struct {\n\tReq    *types.EnableHyperThreading         `xml:\"urn:vim25 EnableHyperThreading,omitempty\"`\n\tRes    *types.EnableHyperThreadingResponse `xml:\"urn:vim25 EnableHyperThreadingResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *EnableHyperThreadingBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc EnableHyperThreading(ctx context.Context, r soap.RoundTripper, req *types.EnableHyperThreading) (*types.EnableHyperThreadingResponse, error) {\n\tvar reqBody, resBody EnableHyperThreadingBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype EnableMultipathPathBody struct {\n\tReq    *types.EnableMultipathPath         `xml:\"urn:vim25 EnableMultipathPath,omitempty\"`\n\tRes    *types.EnableMultipathPathResponse `xml:\"urn:vim25 EnableMultipathPathResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *EnableMultipathPathBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc EnableMultipathPath(ctx context.Context, r soap.RoundTripper, req *types.EnableMultipathPath) (*types.EnableMultipathPathResponse, error) {\n\tvar reqBody, resBody EnableMultipathPathBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype EnableNetworkResourceManagementBody struct {\n\tReq    *types.EnableNetworkResourceManagement         `xml:\"urn:vim25 EnableNetworkResourceManagement,omitempty\"`\n\tRes    *types.EnableNetworkResourceManagementResponse `xml:\"urn:vim25 EnableNetworkResourceManagementResponse,omitempty\"`\n\tFault_ *soap.Fault                                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *EnableNetworkResourceManagementBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc EnableNetworkResourceManagement(ctx context.Context, r soap.RoundTripper, req *types.EnableNetworkResourceManagement) (*types.EnableNetworkResourceManagementResponse, error) {\n\tvar reqBody, resBody EnableNetworkResourceManagementBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype EnableRulesetBody struct {\n\tReq    *types.EnableRuleset         `xml:\"urn:vim25 EnableRuleset,omitempty\"`\n\tRes    *types.EnableRulesetResponse `xml:\"urn:vim25 EnableRulesetResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *EnableRulesetBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc EnableRuleset(ctx context.Context, r soap.RoundTripper, req *types.EnableRuleset) (*types.EnableRulesetResponse, error) {\n\tvar reqBody, resBody EnableRulesetBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype EnableSecondaryVM_TaskBody struct {\n\tReq    *types.EnableSecondaryVM_Task         `xml:\"urn:vim25 EnableSecondaryVM_Task,omitempty\"`\n\tRes    *types.EnableSecondaryVM_TaskResponse `xml:\"urn:vim25 EnableSecondaryVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *EnableSecondaryVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc EnableSecondaryVM_Task(ctx context.Context, r soap.RoundTripper, req *types.EnableSecondaryVM_Task) (*types.EnableSecondaryVM_TaskResponse, error) {\n\tvar reqBody, resBody EnableSecondaryVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype EnableSmartCardAuthenticationBody struct {\n\tReq    *types.EnableSmartCardAuthentication         `xml:\"urn:vim25 EnableSmartCardAuthentication,omitempty\"`\n\tRes    *types.EnableSmartCardAuthenticationResponse `xml:\"urn:vim25 EnableSmartCardAuthenticationResponse,omitempty\"`\n\tFault_ *soap.Fault                                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *EnableSmartCardAuthenticationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc EnableSmartCardAuthentication(ctx context.Context, r soap.RoundTripper, req *types.EnableSmartCardAuthentication) (*types.EnableSmartCardAuthenticationResponse, error) {\n\tvar reqBody, resBody EnableSmartCardAuthenticationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype EnterLockdownModeBody struct {\n\tReq    *types.EnterLockdownMode         `xml:\"urn:vim25 EnterLockdownMode,omitempty\"`\n\tRes    *types.EnterLockdownModeResponse `xml:\"urn:vim25 EnterLockdownModeResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *EnterLockdownModeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc EnterLockdownMode(ctx context.Context, r soap.RoundTripper, req *types.EnterLockdownMode) (*types.EnterLockdownModeResponse, error) {\n\tvar reqBody, resBody EnterLockdownModeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype EnterMaintenanceMode_TaskBody struct {\n\tReq    *types.EnterMaintenanceMode_Task         `xml:\"urn:vim25 EnterMaintenanceMode_Task,omitempty\"`\n\tRes    *types.EnterMaintenanceMode_TaskResponse `xml:\"urn:vim25 EnterMaintenanceMode_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *EnterMaintenanceMode_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc EnterMaintenanceMode_Task(ctx context.Context, r soap.RoundTripper, req *types.EnterMaintenanceMode_Task) (*types.EnterMaintenanceMode_TaskResponse, error) {\n\tvar reqBody, resBody EnterMaintenanceMode_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype EstimateDatabaseSizeBody struct {\n\tReq    *types.EstimateDatabaseSize         `xml:\"urn:vim25 EstimateDatabaseSize,omitempty\"`\n\tRes    *types.EstimateDatabaseSizeResponse `xml:\"urn:vim25 EstimateDatabaseSizeResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *EstimateDatabaseSizeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc EstimateDatabaseSize(ctx context.Context, r soap.RoundTripper, req *types.EstimateDatabaseSize) (*types.EstimateDatabaseSizeResponse, error) {\n\tvar reqBody, resBody EstimateDatabaseSizeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype EstimateStorageForConsolidateSnapshots_TaskBody struct {\n\tReq    *types.EstimateStorageForConsolidateSnapshots_Task         `xml:\"urn:vim25 EstimateStorageForConsolidateSnapshots_Task,omitempty\"`\n\tRes    *types.EstimateStorageForConsolidateSnapshots_TaskResponse `xml:\"urn:vim25 EstimateStorageForConsolidateSnapshots_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *EstimateStorageForConsolidateSnapshots_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc EstimateStorageForConsolidateSnapshots_Task(ctx context.Context, r soap.RoundTripper, req *types.EstimateStorageForConsolidateSnapshots_Task) (*types.EstimateStorageForConsolidateSnapshots_TaskResponse, error) {\n\tvar reqBody, resBody EstimateStorageForConsolidateSnapshots_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype EsxAgentHostManagerUpdateConfigBody struct {\n\tReq    *types.EsxAgentHostManagerUpdateConfig         `xml:\"urn:vim25 EsxAgentHostManagerUpdateConfig,omitempty\"`\n\tRes    *types.EsxAgentHostManagerUpdateConfigResponse `xml:\"urn:vim25 EsxAgentHostManagerUpdateConfigResponse,omitempty\"`\n\tFault_ *soap.Fault                                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *EsxAgentHostManagerUpdateConfigBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc EsxAgentHostManagerUpdateConfig(ctx context.Context, r soap.RoundTripper, req *types.EsxAgentHostManagerUpdateConfig) (*types.EsxAgentHostManagerUpdateConfigResponse, error) {\n\tvar reqBody, resBody EsxAgentHostManagerUpdateConfigBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype EvacuateVsanNode_TaskBody struct {\n\tReq    *types.EvacuateVsanNode_Task         `xml:\"urn:vim25 EvacuateVsanNode_Task,omitempty\"`\n\tRes    *types.EvacuateVsanNode_TaskResponse `xml:\"urn:vim25 EvacuateVsanNode_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *EvacuateVsanNode_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc EvacuateVsanNode_Task(ctx context.Context, r soap.RoundTripper, req *types.EvacuateVsanNode_Task) (*types.EvacuateVsanNode_TaskResponse, error) {\n\tvar reqBody, resBody EvacuateVsanNode_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype EvcManagerBody struct {\n\tReq    *types.EvcManager         `xml:\"urn:vim25 EvcManager,omitempty\"`\n\tRes    *types.EvcManagerResponse `xml:\"urn:vim25 EvcManagerResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *EvcManagerBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc EvcManager(ctx context.Context, r soap.RoundTripper, req *types.EvcManager) (*types.EvcManagerResponse, error) {\n\tvar reqBody, resBody EvcManagerBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ExecuteHostProfileBody struct {\n\tReq    *types.ExecuteHostProfile         `xml:\"urn:vim25 ExecuteHostProfile,omitempty\"`\n\tRes    *types.ExecuteHostProfileResponse `xml:\"urn:vim25 ExecuteHostProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ExecuteHostProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ExecuteHostProfile(ctx context.Context, r soap.RoundTripper, req *types.ExecuteHostProfile) (*types.ExecuteHostProfileResponse, error) {\n\tvar reqBody, resBody ExecuteHostProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ExecuteSimpleCommandBody struct {\n\tReq    *types.ExecuteSimpleCommand         `xml:\"urn:vim25 ExecuteSimpleCommand,omitempty\"`\n\tRes    *types.ExecuteSimpleCommandResponse `xml:\"urn:vim25 ExecuteSimpleCommandResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ExecuteSimpleCommandBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ExecuteSimpleCommand(ctx context.Context, r soap.RoundTripper, req *types.ExecuteSimpleCommand) (*types.ExecuteSimpleCommandResponse, error) {\n\tvar reqBody, resBody ExecuteSimpleCommandBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ExitLockdownModeBody struct {\n\tReq    *types.ExitLockdownMode         `xml:\"urn:vim25 ExitLockdownMode,omitempty\"`\n\tRes    *types.ExitLockdownModeResponse `xml:\"urn:vim25 ExitLockdownModeResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ExitLockdownModeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ExitLockdownMode(ctx context.Context, r soap.RoundTripper, req *types.ExitLockdownMode) (*types.ExitLockdownModeResponse, error) {\n\tvar reqBody, resBody ExitLockdownModeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ExitMaintenanceMode_TaskBody struct {\n\tReq    *types.ExitMaintenanceMode_Task         `xml:\"urn:vim25 ExitMaintenanceMode_Task,omitempty\"`\n\tRes    *types.ExitMaintenanceMode_TaskResponse `xml:\"urn:vim25 ExitMaintenanceMode_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ExitMaintenanceMode_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ExitMaintenanceMode_Task(ctx context.Context, r soap.RoundTripper, req *types.ExitMaintenanceMode_Task) (*types.ExitMaintenanceMode_TaskResponse, error) {\n\tvar reqBody, resBody ExitMaintenanceMode_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ExpandVmfsDatastoreBody struct {\n\tReq    *types.ExpandVmfsDatastore         `xml:\"urn:vim25 ExpandVmfsDatastore,omitempty\"`\n\tRes    *types.ExpandVmfsDatastoreResponse `xml:\"urn:vim25 ExpandVmfsDatastoreResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ExpandVmfsDatastoreBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ExpandVmfsDatastore(ctx context.Context, r soap.RoundTripper, req *types.ExpandVmfsDatastore) (*types.ExpandVmfsDatastoreResponse, error) {\n\tvar reqBody, resBody ExpandVmfsDatastoreBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ExpandVmfsExtentBody struct {\n\tReq    *types.ExpandVmfsExtent         `xml:\"urn:vim25 ExpandVmfsExtent,omitempty\"`\n\tRes    *types.ExpandVmfsExtentResponse `xml:\"urn:vim25 ExpandVmfsExtentResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ExpandVmfsExtentBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ExpandVmfsExtent(ctx context.Context, r soap.RoundTripper, req *types.ExpandVmfsExtent) (*types.ExpandVmfsExtentResponse, error) {\n\tvar reqBody, resBody ExpandVmfsExtentBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ExportAnswerFile_TaskBody struct {\n\tReq    *types.ExportAnswerFile_Task         `xml:\"urn:vim25 ExportAnswerFile_Task,omitempty\"`\n\tRes    *types.ExportAnswerFile_TaskResponse `xml:\"urn:vim25 ExportAnswerFile_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ExportAnswerFile_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ExportAnswerFile_Task(ctx context.Context, r soap.RoundTripper, req *types.ExportAnswerFile_Task) (*types.ExportAnswerFile_TaskResponse, error) {\n\tvar reqBody, resBody ExportAnswerFile_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ExportProfileBody struct {\n\tReq    *types.ExportProfile         `xml:\"urn:vim25 ExportProfile,omitempty\"`\n\tRes    *types.ExportProfileResponse `xml:\"urn:vim25 ExportProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ExportProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ExportProfile(ctx context.Context, r soap.RoundTripper, req *types.ExportProfile) (*types.ExportProfileResponse, error) {\n\tvar reqBody, resBody ExportProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ExportSnapshotBody struct {\n\tReq    *types.ExportSnapshot         `xml:\"urn:vim25 ExportSnapshot,omitempty\"`\n\tRes    *types.ExportSnapshotResponse `xml:\"urn:vim25 ExportSnapshotResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ExportSnapshotBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ExportSnapshot(ctx context.Context, r soap.RoundTripper, req *types.ExportSnapshot) (*types.ExportSnapshotResponse, error) {\n\tvar reqBody, resBody ExportSnapshotBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ExportVAppBody struct {\n\tReq    *types.ExportVApp         `xml:\"urn:vim25 ExportVApp,omitempty\"`\n\tRes    *types.ExportVAppResponse `xml:\"urn:vim25 ExportVAppResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ExportVAppBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ExportVApp(ctx context.Context, r soap.RoundTripper, req *types.ExportVApp) (*types.ExportVAppResponse, error) {\n\tvar reqBody, resBody ExportVAppBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ExportVmBody struct {\n\tReq    *types.ExportVm         `xml:\"urn:vim25 ExportVm,omitempty\"`\n\tRes    *types.ExportVmResponse `xml:\"urn:vim25 ExportVmResponse,omitempty\"`\n\tFault_ *soap.Fault             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ExportVmBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ExportVm(ctx context.Context, r soap.RoundTripper, req *types.ExportVm) (*types.ExportVmResponse, error) {\n\tvar reqBody, resBody ExportVmBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ExtendDisk_TaskBody struct {\n\tReq    *types.ExtendDisk_Task         `xml:\"urn:vim25 ExtendDisk_Task,omitempty\"`\n\tRes    *types.ExtendDisk_TaskResponse `xml:\"urn:vim25 ExtendDisk_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ExtendDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ExtendDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.ExtendDisk_Task) (*types.ExtendDisk_TaskResponse, error) {\n\tvar reqBody, resBody ExtendDisk_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ExtendVffsBody struct {\n\tReq    *types.ExtendVffs         `xml:\"urn:vim25 ExtendVffs,omitempty\"`\n\tRes    *types.ExtendVffsResponse `xml:\"urn:vim25 ExtendVffsResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ExtendVffsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ExtendVffs(ctx context.Context, r soap.RoundTripper, req *types.ExtendVffs) (*types.ExtendVffsResponse, error) {\n\tvar reqBody, resBody ExtendVffsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ExtendVirtualDisk_TaskBody struct {\n\tReq    *types.ExtendVirtualDisk_Task         `xml:\"urn:vim25 ExtendVirtualDisk_Task,omitempty\"`\n\tRes    *types.ExtendVirtualDisk_TaskResponse `xml:\"urn:vim25 ExtendVirtualDisk_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ExtendVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ExtendVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.ExtendVirtualDisk_Task) (*types.ExtendVirtualDisk_TaskResponse, error) {\n\tvar reqBody, resBody ExtendVirtualDisk_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ExtendVmfsDatastoreBody struct {\n\tReq    *types.ExtendVmfsDatastore         `xml:\"urn:vim25 ExtendVmfsDatastore,omitempty\"`\n\tRes    *types.ExtendVmfsDatastoreResponse `xml:\"urn:vim25 ExtendVmfsDatastoreResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ExtendVmfsDatastoreBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ExtendVmfsDatastore(ctx context.Context, r soap.RoundTripper, req *types.ExtendVmfsDatastore) (*types.ExtendVmfsDatastoreResponse, error) {\n\tvar reqBody, resBody ExtendVmfsDatastoreBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ExtractOvfEnvironmentBody struct {\n\tReq    *types.ExtractOvfEnvironment         `xml:\"urn:vim25 ExtractOvfEnvironment,omitempty\"`\n\tRes    *types.ExtractOvfEnvironmentResponse `xml:\"urn:vim25 ExtractOvfEnvironmentResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ExtractOvfEnvironmentBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ExtractOvfEnvironment(ctx context.Context, r soap.RoundTripper, req *types.ExtractOvfEnvironment) (*types.ExtractOvfEnvironmentResponse, error) {\n\tvar reqBody, resBody ExtractOvfEnvironmentBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype FetchDVPortKeysBody struct {\n\tReq    *types.FetchDVPortKeys         `xml:\"urn:vim25 FetchDVPortKeys,omitempty\"`\n\tRes    *types.FetchDVPortKeysResponse `xml:\"urn:vim25 FetchDVPortKeysResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *FetchDVPortKeysBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc FetchDVPortKeys(ctx context.Context, r soap.RoundTripper, req *types.FetchDVPortKeys) (*types.FetchDVPortKeysResponse, error) {\n\tvar reqBody, resBody FetchDVPortKeysBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype FetchDVPortsBody struct {\n\tReq    *types.FetchDVPorts         `xml:\"urn:vim25 FetchDVPorts,omitempty\"`\n\tRes    *types.FetchDVPortsResponse `xml:\"urn:vim25 FetchDVPortsResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *FetchDVPortsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc FetchDVPorts(ctx context.Context, r soap.RoundTripper, req *types.FetchDVPorts) (*types.FetchDVPortsResponse, error) {\n\tvar reqBody, resBody FetchDVPortsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype FetchSystemEventLogBody struct {\n\tReq    *types.FetchSystemEventLog         `xml:\"urn:vim25 FetchSystemEventLog,omitempty\"`\n\tRes    *types.FetchSystemEventLogResponse `xml:\"urn:vim25 FetchSystemEventLogResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *FetchSystemEventLogBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc FetchSystemEventLog(ctx context.Context, r soap.RoundTripper, req *types.FetchSystemEventLog) (*types.FetchSystemEventLogResponse, error) {\n\tvar reqBody, resBody FetchSystemEventLogBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype FetchUserPrivilegeOnEntitiesBody struct {\n\tReq    *types.FetchUserPrivilegeOnEntities         `xml:\"urn:vim25 FetchUserPrivilegeOnEntities,omitempty\"`\n\tRes    *types.FetchUserPrivilegeOnEntitiesResponse `xml:\"urn:vim25 FetchUserPrivilegeOnEntitiesResponse,omitempty\"`\n\tFault_ *soap.Fault                                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *FetchUserPrivilegeOnEntitiesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc FetchUserPrivilegeOnEntities(ctx context.Context, r soap.RoundTripper, req *types.FetchUserPrivilegeOnEntities) (*types.FetchUserPrivilegeOnEntitiesResponse, error) {\n\tvar reqBody, resBody FetchUserPrivilegeOnEntitiesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype FindAllByDnsNameBody struct {\n\tReq    *types.FindAllByDnsName         `xml:\"urn:vim25 FindAllByDnsName,omitempty\"`\n\tRes    *types.FindAllByDnsNameResponse `xml:\"urn:vim25 FindAllByDnsNameResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *FindAllByDnsNameBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc FindAllByDnsName(ctx context.Context, r soap.RoundTripper, req *types.FindAllByDnsName) (*types.FindAllByDnsNameResponse, error) {\n\tvar reqBody, resBody FindAllByDnsNameBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype FindAllByIpBody struct {\n\tReq    *types.FindAllByIp         `xml:\"urn:vim25 FindAllByIp,omitempty\"`\n\tRes    *types.FindAllByIpResponse `xml:\"urn:vim25 FindAllByIpResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *FindAllByIpBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc FindAllByIp(ctx context.Context, r soap.RoundTripper, req *types.FindAllByIp) (*types.FindAllByIpResponse, error) {\n\tvar reqBody, resBody FindAllByIpBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype FindAllByUuidBody struct {\n\tReq    *types.FindAllByUuid         `xml:\"urn:vim25 FindAllByUuid,omitempty\"`\n\tRes    *types.FindAllByUuidResponse `xml:\"urn:vim25 FindAllByUuidResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *FindAllByUuidBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc FindAllByUuid(ctx context.Context, r soap.RoundTripper, req *types.FindAllByUuid) (*types.FindAllByUuidResponse, error) {\n\tvar reqBody, resBody FindAllByUuidBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype FindAssociatedProfileBody struct {\n\tReq    *types.FindAssociatedProfile         `xml:\"urn:vim25 FindAssociatedProfile,omitempty\"`\n\tRes    *types.FindAssociatedProfileResponse `xml:\"urn:vim25 FindAssociatedProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *FindAssociatedProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc FindAssociatedProfile(ctx context.Context, r soap.RoundTripper, req *types.FindAssociatedProfile) (*types.FindAssociatedProfileResponse, error) {\n\tvar reqBody, resBody FindAssociatedProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype FindByDatastorePathBody struct {\n\tReq    *types.FindByDatastorePath         `xml:\"urn:vim25 FindByDatastorePath,omitempty\"`\n\tRes    *types.FindByDatastorePathResponse `xml:\"urn:vim25 FindByDatastorePathResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *FindByDatastorePathBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc FindByDatastorePath(ctx context.Context, r soap.RoundTripper, req *types.FindByDatastorePath) (*types.FindByDatastorePathResponse, error) {\n\tvar reqBody, resBody FindByDatastorePathBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype FindByDnsNameBody struct {\n\tReq    *types.FindByDnsName         `xml:\"urn:vim25 FindByDnsName,omitempty\"`\n\tRes    *types.FindByDnsNameResponse `xml:\"urn:vim25 FindByDnsNameResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *FindByDnsNameBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc FindByDnsName(ctx context.Context, r soap.RoundTripper, req *types.FindByDnsName) (*types.FindByDnsNameResponse, error) {\n\tvar reqBody, resBody FindByDnsNameBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype FindByInventoryPathBody struct {\n\tReq    *types.FindByInventoryPath         `xml:\"urn:vim25 FindByInventoryPath,omitempty\"`\n\tRes    *types.FindByInventoryPathResponse `xml:\"urn:vim25 FindByInventoryPathResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *FindByInventoryPathBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc FindByInventoryPath(ctx context.Context, r soap.RoundTripper, req *types.FindByInventoryPath) (*types.FindByInventoryPathResponse, error) {\n\tvar reqBody, resBody FindByInventoryPathBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype FindByIpBody struct {\n\tReq    *types.FindByIp         `xml:\"urn:vim25 FindByIp,omitempty\"`\n\tRes    *types.FindByIpResponse `xml:\"urn:vim25 FindByIpResponse,omitempty\"`\n\tFault_ *soap.Fault             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *FindByIpBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc FindByIp(ctx context.Context, r soap.RoundTripper, req *types.FindByIp) (*types.FindByIpResponse, error) {\n\tvar reqBody, resBody FindByIpBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype FindByUuidBody struct {\n\tReq    *types.FindByUuid         `xml:\"urn:vim25 FindByUuid,omitempty\"`\n\tRes    *types.FindByUuidResponse `xml:\"urn:vim25 FindByUuidResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *FindByUuidBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc FindByUuid(ctx context.Context, r soap.RoundTripper, req *types.FindByUuid) (*types.FindByUuidResponse, error) {\n\tvar reqBody, resBody FindByUuidBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype FindChildBody struct {\n\tReq    *types.FindChild         `xml:\"urn:vim25 FindChild,omitempty\"`\n\tRes    *types.FindChildResponse `xml:\"urn:vim25 FindChildResponse,omitempty\"`\n\tFault_ *soap.Fault              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *FindChildBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc FindChild(ctx context.Context, r soap.RoundTripper, req *types.FindChild) (*types.FindChildResponse, error) {\n\tvar reqBody, resBody FindChildBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype FindExtensionBody struct {\n\tReq    *types.FindExtension         `xml:\"urn:vim25 FindExtension,omitempty\"`\n\tRes    *types.FindExtensionResponse `xml:\"urn:vim25 FindExtensionResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *FindExtensionBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc FindExtension(ctx context.Context, r soap.RoundTripper, req *types.FindExtension) (*types.FindExtensionResponse, error) {\n\tvar reqBody, resBody FindExtensionBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype FindRulesForVmBody struct {\n\tReq    *types.FindRulesForVm         `xml:\"urn:vim25 FindRulesForVm,omitempty\"`\n\tRes    *types.FindRulesForVmResponse `xml:\"urn:vim25 FindRulesForVmResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *FindRulesForVmBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc FindRulesForVm(ctx context.Context, r soap.RoundTripper, req *types.FindRulesForVm) (*types.FindRulesForVmResponse, error) {\n\tvar reqBody, resBody FindRulesForVmBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype FormatVffsBody struct {\n\tReq    *types.FormatVffs         `xml:\"urn:vim25 FormatVffs,omitempty\"`\n\tRes    *types.FormatVffsResponse `xml:\"urn:vim25 FormatVffsResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *FormatVffsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc FormatVffs(ctx context.Context, r soap.RoundTripper, req *types.FormatVffs) (*types.FormatVffsResponse, error) {\n\tvar reqBody, resBody FormatVffsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype FormatVmfsBody struct {\n\tReq    *types.FormatVmfs         `xml:\"urn:vim25 FormatVmfs,omitempty\"`\n\tRes    *types.FormatVmfsResponse `xml:\"urn:vim25 FormatVmfsResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *FormatVmfsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc FormatVmfs(ctx context.Context, r soap.RoundTripper, req *types.FormatVmfs) (*types.FormatVmfsResponse, error) {\n\tvar reqBody, resBody FormatVmfsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype GenerateCertificateSigningRequestBody struct {\n\tReq    *types.GenerateCertificateSigningRequest         `xml:\"urn:vim25 GenerateCertificateSigningRequest,omitempty\"`\n\tRes    *types.GenerateCertificateSigningRequestResponse `xml:\"urn:vim25 GenerateCertificateSigningRequestResponse,omitempty\"`\n\tFault_ *soap.Fault                                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *GenerateCertificateSigningRequestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc GenerateCertificateSigningRequest(ctx context.Context, r soap.RoundTripper, req *types.GenerateCertificateSigningRequest) (*types.GenerateCertificateSigningRequestResponse, error) {\n\tvar reqBody, resBody GenerateCertificateSigningRequestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype GenerateCertificateSigningRequestByDnBody struct {\n\tReq    *types.GenerateCertificateSigningRequestByDn         `xml:\"urn:vim25 GenerateCertificateSigningRequestByDn,omitempty\"`\n\tRes    *types.GenerateCertificateSigningRequestByDnResponse `xml:\"urn:vim25 GenerateCertificateSigningRequestByDnResponse,omitempty\"`\n\tFault_ *soap.Fault                                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *GenerateCertificateSigningRequestByDnBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc GenerateCertificateSigningRequestByDn(ctx context.Context, r soap.RoundTripper, req *types.GenerateCertificateSigningRequestByDn) (*types.GenerateCertificateSigningRequestByDnResponse, error) {\n\tvar reqBody, resBody GenerateCertificateSigningRequestByDnBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype GenerateClientCsrBody struct {\n\tReq    *types.GenerateClientCsr         `xml:\"urn:vim25 GenerateClientCsr,omitempty\"`\n\tRes    *types.GenerateClientCsrResponse `xml:\"urn:vim25 GenerateClientCsrResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *GenerateClientCsrBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc GenerateClientCsr(ctx context.Context, r soap.RoundTripper, req *types.GenerateClientCsr) (*types.GenerateClientCsrResponse, error) {\n\tvar reqBody, resBody GenerateClientCsrBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype GenerateConfigTaskListBody struct {\n\tReq    *types.GenerateConfigTaskList         `xml:\"urn:vim25 GenerateConfigTaskList,omitempty\"`\n\tRes    *types.GenerateConfigTaskListResponse `xml:\"urn:vim25 GenerateConfigTaskListResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *GenerateConfigTaskListBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc GenerateConfigTaskList(ctx context.Context, r soap.RoundTripper, req *types.GenerateConfigTaskList) (*types.GenerateConfigTaskListResponse, error) {\n\tvar reqBody, resBody GenerateConfigTaskListBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype GenerateHostConfigTaskSpec_TaskBody struct {\n\tReq    *types.GenerateHostConfigTaskSpec_Task         `xml:\"urn:vim25 GenerateHostConfigTaskSpec_Task,omitempty\"`\n\tRes    *types.GenerateHostConfigTaskSpec_TaskResponse `xml:\"urn:vim25 GenerateHostConfigTaskSpec_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *GenerateHostConfigTaskSpec_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc GenerateHostConfigTaskSpec_Task(ctx context.Context, r soap.RoundTripper, req *types.GenerateHostConfigTaskSpec_Task) (*types.GenerateHostConfigTaskSpec_TaskResponse, error) {\n\tvar reqBody, resBody GenerateHostConfigTaskSpec_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype GenerateHostProfileTaskList_TaskBody struct {\n\tReq    *types.GenerateHostProfileTaskList_Task         `xml:\"urn:vim25 GenerateHostProfileTaskList_Task,omitempty\"`\n\tRes    *types.GenerateHostProfileTaskList_TaskResponse `xml:\"urn:vim25 GenerateHostProfileTaskList_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *GenerateHostProfileTaskList_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc GenerateHostProfileTaskList_Task(ctx context.Context, r soap.RoundTripper, req *types.GenerateHostProfileTaskList_Task) (*types.GenerateHostProfileTaskList_TaskResponse, error) {\n\tvar reqBody, resBody GenerateHostProfileTaskList_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype GenerateKeyBody struct {\n\tReq    *types.GenerateKey         `xml:\"urn:vim25 GenerateKey,omitempty\"`\n\tRes    *types.GenerateKeyResponse `xml:\"urn:vim25 GenerateKeyResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *GenerateKeyBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc GenerateKey(ctx context.Context, r soap.RoundTripper, req *types.GenerateKey) (*types.GenerateKeyResponse, error) {\n\tvar reqBody, resBody GenerateKeyBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype GenerateLogBundles_TaskBody struct {\n\tReq    *types.GenerateLogBundles_Task         `xml:\"urn:vim25 GenerateLogBundles_Task,omitempty\"`\n\tRes    *types.GenerateLogBundles_TaskResponse `xml:\"urn:vim25 GenerateLogBundles_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *GenerateLogBundles_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc GenerateLogBundles_Task(ctx context.Context, r soap.RoundTripper, req *types.GenerateLogBundles_Task) (*types.GenerateLogBundles_TaskResponse, error) {\n\tvar reqBody, resBody GenerateLogBundles_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype GenerateSelfSignedClientCertBody struct {\n\tReq    *types.GenerateSelfSignedClientCert         `xml:\"urn:vim25 GenerateSelfSignedClientCert,omitempty\"`\n\tRes    *types.GenerateSelfSignedClientCertResponse `xml:\"urn:vim25 GenerateSelfSignedClientCertResponse,omitempty\"`\n\tFault_ *soap.Fault                                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *GenerateSelfSignedClientCertBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc GenerateSelfSignedClientCert(ctx context.Context, r soap.RoundTripper, req *types.GenerateSelfSignedClientCert) (*types.GenerateSelfSignedClientCertResponse, error) {\n\tvar reqBody, resBody GenerateSelfSignedClientCertBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype GetAlarmBody struct {\n\tReq    *types.GetAlarm         `xml:\"urn:vim25 GetAlarm,omitempty\"`\n\tRes    *types.GetAlarmResponse `xml:\"urn:vim25 GetAlarmResponse,omitempty\"`\n\tFault_ *soap.Fault             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *GetAlarmBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc GetAlarm(ctx context.Context, r soap.RoundTripper, req *types.GetAlarm) (*types.GetAlarmResponse, error) {\n\tvar reqBody, resBody GetAlarmBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype GetAlarmStateBody struct {\n\tReq    *types.GetAlarmState         `xml:\"urn:vim25 GetAlarmState,omitempty\"`\n\tRes    *types.GetAlarmStateResponse `xml:\"urn:vim25 GetAlarmStateResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *GetAlarmStateBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc GetAlarmState(ctx context.Context, r soap.RoundTripper, req *types.GetAlarmState) (*types.GetAlarmStateResponse, error) {\n\tvar reqBody, resBody GetAlarmStateBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype GetCustomizationSpecBody struct {\n\tReq    *types.GetCustomizationSpec         `xml:\"urn:vim25 GetCustomizationSpec,omitempty\"`\n\tRes    *types.GetCustomizationSpecResponse `xml:\"urn:vim25 GetCustomizationSpecResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *GetCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc GetCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.GetCustomizationSpec) (*types.GetCustomizationSpecResponse, error) {\n\tvar reqBody, resBody GetCustomizationSpecBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype GetPublicKeyBody struct {\n\tReq    *types.GetPublicKey         `xml:\"urn:vim25 GetPublicKey,omitempty\"`\n\tRes    *types.GetPublicKeyResponse `xml:\"urn:vim25 GetPublicKeyResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *GetPublicKeyBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc GetPublicKey(ctx context.Context, r soap.RoundTripper, req *types.GetPublicKey) (*types.GetPublicKeyResponse, error) {\n\tvar reqBody, resBody GetPublicKeyBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype GetResourceUsageBody struct {\n\tReq    *types.GetResourceUsage         `xml:\"urn:vim25 GetResourceUsage,omitempty\"`\n\tRes    *types.GetResourceUsageResponse `xml:\"urn:vim25 GetResourceUsageResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *GetResourceUsageBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc GetResourceUsage(ctx context.Context, r soap.RoundTripper, req *types.GetResourceUsage) (*types.GetResourceUsageResponse, error) {\n\tvar reqBody, resBody GetResourceUsageBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype GetVchaClusterHealthBody struct {\n\tReq    *types.GetVchaClusterHealth         `xml:\"urn:vim25 GetVchaClusterHealth,omitempty\"`\n\tRes    *types.GetVchaClusterHealthResponse `xml:\"urn:vim25 GetVchaClusterHealthResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *GetVchaClusterHealthBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc GetVchaClusterHealth(ctx context.Context, r soap.RoundTripper, req *types.GetVchaClusterHealth) (*types.GetVchaClusterHealthResponse, error) {\n\tvar reqBody, resBody GetVchaClusterHealthBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype GetVsanObjExtAttrsBody struct {\n\tReq    *types.GetVsanObjExtAttrs         `xml:\"urn:vim25 GetVsanObjExtAttrs,omitempty\"`\n\tRes    *types.GetVsanObjExtAttrsResponse `xml:\"urn:vim25 GetVsanObjExtAttrsResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *GetVsanObjExtAttrsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc GetVsanObjExtAttrs(ctx context.Context, r soap.RoundTripper, req *types.GetVsanObjExtAttrs) (*types.GetVsanObjExtAttrsResponse, error) {\n\tvar reqBody, resBody GetVsanObjExtAttrsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HasMonitoredEntityBody struct {\n\tReq    *types.HasMonitoredEntity         `xml:\"urn:vim25 HasMonitoredEntity,omitempty\"`\n\tRes    *types.HasMonitoredEntityResponse `xml:\"urn:vim25 HasMonitoredEntityResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HasMonitoredEntityBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HasMonitoredEntity(ctx context.Context, r soap.RoundTripper, req *types.HasMonitoredEntity) (*types.HasMonitoredEntityResponse, error) {\n\tvar reqBody, resBody HasMonitoredEntityBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HasPrivilegeOnEntitiesBody struct {\n\tReq    *types.HasPrivilegeOnEntities         `xml:\"urn:vim25 HasPrivilegeOnEntities,omitempty\"`\n\tRes    *types.HasPrivilegeOnEntitiesResponse `xml:\"urn:vim25 HasPrivilegeOnEntitiesResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HasPrivilegeOnEntitiesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HasPrivilegeOnEntities(ctx context.Context, r soap.RoundTripper, req *types.HasPrivilegeOnEntities) (*types.HasPrivilegeOnEntitiesResponse, error) {\n\tvar reqBody, resBody HasPrivilegeOnEntitiesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HasPrivilegeOnEntityBody struct {\n\tReq    *types.HasPrivilegeOnEntity         `xml:\"urn:vim25 HasPrivilegeOnEntity,omitempty\"`\n\tRes    *types.HasPrivilegeOnEntityResponse `xml:\"urn:vim25 HasPrivilegeOnEntityResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HasPrivilegeOnEntityBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HasPrivilegeOnEntity(ctx context.Context, r soap.RoundTripper, req *types.HasPrivilegeOnEntity) (*types.HasPrivilegeOnEntityResponse, error) {\n\tvar reqBody, resBody HasPrivilegeOnEntityBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HasProviderBody struct {\n\tReq    *types.HasProvider         `xml:\"urn:vim25 HasProvider,omitempty\"`\n\tRes    *types.HasProviderResponse `xml:\"urn:vim25 HasProviderResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HasProviderBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HasProvider(ctx context.Context, r soap.RoundTripper, req *types.HasProvider) (*types.HasProviderResponse, error) {\n\tvar reqBody, resBody HasProviderBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HasUserPrivilegeOnEntitiesBody struct {\n\tReq    *types.HasUserPrivilegeOnEntities         `xml:\"urn:vim25 HasUserPrivilegeOnEntities,omitempty\"`\n\tRes    *types.HasUserPrivilegeOnEntitiesResponse `xml:\"urn:vim25 HasUserPrivilegeOnEntitiesResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HasUserPrivilegeOnEntitiesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HasUserPrivilegeOnEntities(ctx context.Context, r soap.RoundTripper, req *types.HasUserPrivilegeOnEntities) (*types.HasUserPrivilegeOnEntitiesResponse, error) {\n\tvar reqBody, resBody HasUserPrivilegeOnEntitiesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostCloneVStorageObject_TaskBody struct {\n\tReq    *types.HostCloneVStorageObject_Task         `xml:\"urn:vim25 HostCloneVStorageObject_Task,omitempty\"`\n\tRes    *types.HostCloneVStorageObject_TaskResponse `xml:\"urn:vim25 HostCloneVStorageObject_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostCloneVStorageObject_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostCloneVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req *types.HostCloneVStorageObject_Task) (*types.HostCloneVStorageObject_TaskResponse, error) {\n\tvar reqBody, resBody HostCloneVStorageObject_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostConfigVFlashCacheBody struct {\n\tReq    *types.HostConfigVFlashCache         `xml:\"urn:vim25 HostConfigVFlashCache,omitempty\"`\n\tRes    *types.HostConfigVFlashCacheResponse `xml:\"urn:vim25 HostConfigVFlashCacheResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostConfigVFlashCacheBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostConfigVFlashCache(ctx context.Context, r soap.RoundTripper, req *types.HostConfigVFlashCache) (*types.HostConfigVFlashCacheResponse, error) {\n\tvar reqBody, resBody HostConfigVFlashCacheBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostConfigureVFlashResourceBody struct {\n\tReq    *types.HostConfigureVFlashResource         `xml:\"urn:vim25 HostConfigureVFlashResource,omitempty\"`\n\tRes    *types.HostConfigureVFlashResourceResponse `xml:\"urn:vim25 HostConfigureVFlashResourceResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostConfigureVFlashResourceBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostConfigureVFlashResource(ctx context.Context, r soap.RoundTripper, req *types.HostConfigureVFlashResource) (*types.HostConfigureVFlashResourceResponse, error) {\n\tvar reqBody, resBody HostConfigureVFlashResourceBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostCreateDisk_TaskBody struct {\n\tReq    *types.HostCreateDisk_Task         `xml:\"urn:vim25 HostCreateDisk_Task,omitempty\"`\n\tRes    *types.HostCreateDisk_TaskResponse `xml:\"urn:vim25 HostCreateDisk_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostCreateDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostCreateDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.HostCreateDisk_Task) (*types.HostCreateDisk_TaskResponse, error) {\n\tvar reqBody, resBody HostCreateDisk_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostDeleteVStorageObject_TaskBody struct {\n\tReq    *types.HostDeleteVStorageObject_Task         `xml:\"urn:vim25 HostDeleteVStorageObject_Task,omitempty\"`\n\tRes    *types.HostDeleteVStorageObject_TaskResponse `xml:\"urn:vim25 HostDeleteVStorageObject_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostDeleteVStorageObject_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostDeleteVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req *types.HostDeleteVStorageObject_Task) (*types.HostDeleteVStorageObject_TaskResponse, error) {\n\tvar reqBody, resBody HostDeleteVStorageObject_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostExtendDisk_TaskBody struct {\n\tReq    *types.HostExtendDisk_Task         `xml:\"urn:vim25 HostExtendDisk_Task,omitempty\"`\n\tRes    *types.HostExtendDisk_TaskResponse `xml:\"urn:vim25 HostExtendDisk_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostExtendDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostExtendDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.HostExtendDisk_Task) (*types.HostExtendDisk_TaskResponse, error) {\n\tvar reqBody, resBody HostExtendDisk_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostGetVFlashModuleDefaultConfigBody struct {\n\tReq    *types.HostGetVFlashModuleDefaultConfig         `xml:\"urn:vim25 HostGetVFlashModuleDefaultConfig,omitempty\"`\n\tRes    *types.HostGetVFlashModuleDefaultConfigResponse `xml:\"urn:vim25 HostGetVFlashModuleDefaultConfigResponse,omitempty\"`\n\tFault_ *soap.Fault                                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostGetVFlashModuleDefaultConfigBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostGetVFlashModuleDefaultConfig(ctx context.Context, r soap.RoundTripper, req *types.HostGetVFlashModuleDefaultConfig) (*types.HostGetVFlashModuleDefaultConfigResponse, error) {\n\tvar reqBody, resBody HostGetVFlashModuleDefaultConfigBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostImageConfigGetAcceptanceBody struct {\n\tReq    *types.HostImageConfigGetAcceptance         `xml:\"urn:vim25 HostImageConfigGetAcceptance,omitempty\"`\n\tRes    *types.HostImageConfigGetAcceptanceResponse `xml:\"urn:vim25 HostImageConfigGetAcceptanceResponse,omitempty\"`\n\tFault_ *soap.Fault                                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostImageConfigGetAcceptanceBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostImageConfigGetAcceptance(ctx context.Context, r soap.RoundTripper, req *types.HostImageConfigGetAcceptance) (*types.HostImageConfigGetAcceptanceResponse, error) {\n\tvar reqBody, resBody HostImageConfigGetAcceptanceBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostImageConfigGetProfileBody struct {\n\tReq    *types.HostImageConfigGetProfile         `xml:\"urn:vim25 HostImageConfigGetProfile,omitempty\"`\n\tRes    *types.HostImageConfigGetProfileResponse `xml:\"urn:vim25 HostImageConfigGetProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostImageConfigGetProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostImageConfigGetProfile(ctx context.Context, r soap.RoundTripper, req *types.HostImageConfigGetProfile) (*types.HostImageConfigGetProfileResponse, error) {\n\tvar reqBody, resBody HostImageConfigGetProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostInflateDisk_TaskBody struct {\n\tReq    *types.HostInflateDisk_Task         `xml:\"urn:vim25 HostInflateDisk_Task,omitempty\"`\n\tRes    *types.HostInflateDisk_TaskResponse `xml:\"urn:vim25 HostInflateDisk_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostInflateDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostInflateDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.HostInflateDisk_Task) (*types.HostInflateDisk_TaskResponse, error) {\n\tvar reqBody, resBody HostInflateDisk_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostListVStorageObjectBody struct {\n\tReq    *types.HostListVStorageObject         `xml:\"urn:vim25 HostListVStorageObject,omitempty\"`\n\tRes    *types.HostListVStorageObjectResponse `xml:\"urn:vim25 HostListVStorageObjectResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostListVStorageObjectBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostListVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.HostListVStorageObject) (*types.HostListVStorageObjectResponse, error) {\n\tvar reqBody, resBody HostListVStorageObjectBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostReconcileDatastoreInventory_TaskBody struct {\n\tReq    *types.HostReconcileDatastoreInventory_Task         `xml:\"urn:vim25 HostReconcileDatastoreInventory_Task,omitempty\"`\n\tRes    *types.HostReconcileDatastoreInventory_TaskResponse `xml:\"urn:vim25 HostReconcileDatastoreInventory_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostReconcileDatastoreInventory_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostReconcileDatastoreInventory_Task(ctx context.Context, r soap.RoundTripper, req *types.HostReconcileDatastoreInventory_Task) (*types.HostReconcileDatastoreInventory_TaskResponse, error) {\n\tvar reqBody, resBody HostReconcileDatastoreInventory_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostRegisterDiskBody struct {\n\tReq    *types.HostRegisterDisk         `xml:\"urn:vim25 HostRegisterDisk,omitempty\"`\n\tRes    *types.HostRegisterDiskResponse `xml:\"urn:vim25 HostRegisterDiskResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostRegisterDiskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostRegisterDisk(ctx context.Context, r soap.RoundTripper, req *types.HostRegisterDisk) (*types.HostRegisterDiskResponse, error) {\n\tvar reqBody, resBody HostRegisterDiskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostRelocateVStorageObject_TaskBody struct {\n\tReq    *types.HostRelocateVStorageObject_Task         `xml:\"urn:vim25 HostRelocateVStorageObject_Task,omitempty\"`\n\tRes    *types.HostRelocateVStorageObject_TaskResponse `xml:\"urn:vim25 HostRelocateVStorageObject_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostRelocateVStorageObject_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostRelocateVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req *types.HostRelocateVStorageObject_Task) (*types.HostRelocateVStorageObject_TaskResponse, error) {\n\tvar reqBody, resBody HostRelocateVStorageObject_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostRemoveVFlashResourceBody struct {\n\tReq    *types.HostRemoveVFlashResource         `xml:\"urn:vim25 HostRemoveVFlashResource,omitempty\"`\n\tRes    *types.HostRemoveVFlashResourceResponse `xml:\"urn:vim25 HostRemoveVFlashResourceResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostRemoveVFlashResourceBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostRemoveVFlashResource(ctx context.Context, r soap.RoundTripper, req *types.HostRemoveVFlashResource) (*types.HostRemoveVFlashResourceResponse, error) {\n\tvar reqBody, resBody HostRemoveVFlashResourceBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostRenameVStorageObjectBody struct {\n\tReq    *types.HostRenameVStorageObject         `xml:\"urn:vim25 HostRenameVStorageObject,omitempty\"`\n\tRes    *types.HostRenameVStorageObjectResponse `xml:\"urn:vim25 HostRenameVStorageObjectResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostRenameVStorageObjectBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostRenameVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.HostRenameVStorageObject) (*types.HostRenameVStorageObjectResponse, error) {\n\tvar reqBody, resBody HostRenameVStorageObjectBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostRetrieveVStorageObjectBody struct {\n\tReq    *types.HostRetrieveVStorageObject         `xml:\"urn:vim25 HostRetrieveVStorageObject,omitempty\"`\n\tRes    *types.HostRetrieveVStorageObjectResponse `xml:\"urn:vim25 HostRetrieveVStorageObjectResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostRetrieveVStorageObjectBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostRetrieveVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.HostRetrieveVStorageObject) (*types.HostRetrieveVStorageObjectResponse, error) {\n\tvar reqBody, resBody HostRetrieveVStorageObjectBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostRetrieveVStorageObjectStateBody struct {\n\tReq    *types.HostRetrieveVStorageObjectState         `xml:\"urn:vim25 HostRetrieveVStorageObjectState,omitempty\"`\n\tRes    *types.HostRetrieveVStorageObjectStateResponse `xml:\"urn:vim25 HostRetrieveVStorageObjectStateResponse,omitempty\"`\n\tFault_ *soap.Fault                                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostRetrieveVStorageObjectStateBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostRetrieveVStorageObjectState(ctx context.Context, r soap.RoundTripper, req *types.HostRetrieveVStorageObjectState) (*types.HostRetrieveVStorageObjectStateResponse, error) {\n\tvar reqBody, resBody HostRetrieveVStorageObjectStateBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostScheduleReconcileDatastoreInventoryBody struct {\n\tReq    *types.HostScheduleReconcileDatastoreInventory         `xml:\"urn:vim25 HostScheduleReconcileDatastoreInventory,omitempty\"`\n\tRes    *types.HostScheduleReconcileDatastoreInventoryResponse `xml:\"urn:vim25 HostScheduleReconcileDatastoreInventoryResponse,omitempty\"`\n\tFault_ *soap.Fault                                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostScheduleReconcileDatastoreInventoryBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostScheduleReconcileDatastoreInventory(ctx context.Context, r soap.RoundTripper, req *types.HostScheduleReconcileDatastoreInventory) (*types.HostScheduleReconcileDatastoreInventoryResponse, error) {\n\tvar reqBody, resBody HostScheduleReconcileDatastoreInventoryBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HostSpecGetUpdatedHostsBody struct {\n\tReq    *types.HostSpecGetUpdatedHosts         `xml:\"urn:vim25 HostSpecGetUpdatedHosts,omitempty\"`\n\tRes    *types.HostSpecGetUpdatedHostsResponse `xml:\"urn:vim25 HostSpecGetUpdatedHostsResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HostSpecGetUpdatedHostsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HostSpecGetUpdatedHosts(ctx context.Context, r soap.RoundTripper, req *types.HostSpecGetUpdatedHosts) (*types.HostSpecGetUpdatedHostsResponse, error) {\n\tvar reqBody, resBody HostSpecGetUpdatedHostsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HttpNfcLeaseAbortBody struct {\n\tReq    *types.HttpNfcLeaseAbort         `xml:\"urn:vim25 HttpNfcLeaseAbort,omitempty\"`\n\tRes    *types.HttpNfcLeaseAbortResponse `xml:\"urn:vim25 HttpNfcLeaseAbortResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HttpNfcLeaseAbortBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HttpNfcLeaseAbort(ctx context.Context, r soap.RoundTripper, req *types.HttpNfcLeaseAbort) (*types.HttpNfcLeaseAbortResponse, error) {\n\tvar reqBody, resBody HttpNfcLeaseAbortBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HttpNfcLeaseCompleteBody struct {\n\tReq    *types.HttpNfcLeaseComplete         `xml:\"urn:vim25 HttpNfcLeaseComplete,omitempty\"`\n\tRes    *types.HttpNfcLeaseCompleteResponse `xml:\"urn:vim25 HttpNfcLeaseCompleteResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HttpNfcLeaseCompleteBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HttpNfcLeaseComplete(ctx context.Context, r soap.RoundTripper, req *types.HttpNfcLeaseComplete) (*types.HttpNfcLeaseCompleteResponse, error) {\n\tvar reqBody, resBody HttpNfcLeaseCompleteBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HttpNfcLeaseGetManifestBody struct {\n\tReq    *types.HttpNfcLeaseGetManifest         `xml:\"urn:vim25 HttpNfcLeaseGetManifest,omitempty\"`\n\tRes    *types.HttpNfcLeaseGetManifestResponse `xml:\"urn:vim25 HttpNfcLeaseGetManifestResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HttpNfcLeaseGetManifestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HttpNfcLeaseGetManifest(ctx context.Context, r soap.RoundTripper, req *types.HttpNfcLeaseGetManifest) (*types.HttpNfcLeaseGetManifestResponse, error) {\n\tvar reqBody, resBody HttpNfcLeaseGetManifestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype HttpNfcLeaseProgressBody struct {\n\tReq    *types.HttpNfcLeaseProgress         `xml:\"urn:vim25 HttpNfcLeaseProgress,omitempty\"`\n\tRes    *types.HttpNfcLeaseProgressResponse `xml:\"urn:vim25 HttpNfcLeaseProgressResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *HttpNfcLeaseProgressBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc HttpNfcLeaseProgress(ctx context.Context, r soap.RoundTripper, req *types.HttpNfcLeaseProgress) (*types.HttpNfcLeaseProgressResponse, error) {\n\tvar reqBody, resBody HttpNfcLeaseProgressBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ImpersonateUserBody struct {\n\tReq    *types.ImpersonateUser         `xml:\"urn:vim25 ImpersonateUser,omitempty\"`\n\tRes    *types.ImpersonateUserResponse `xml:\"urn:vim25 ImpersonateUserResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ImpersonateUserBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ImpersonateUser(ctx context.Context, r soap.RoundTripper, req *types.ImpersonateUser) (*types.ImpersonateUserResponse, error) {\n\tvar reqBody, resBody ImpersonateUserBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ImportCertificateForCAM_TaskBody struct {\n\tReq    *types.ImportCertificateForCAM_Task         `xml:\"urn:vim25 ImportCertificateForCAM_Task,omitempty\"`\n\tRes    *types.ImportCertificateForCAM_TaskResponse `xml:\"urn:vim25 ImportCertificateForCAM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ImportCertificateForCAM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ImportCertificateForCAM_Task(ctx context.Context, r soap.RoundTripper, req *types.ImportCertificateForCAM_Task) (*types.ImportCertificateForCAM_TaskResponse, error) {\n\tvar reqBody, resBody ImportCertificateForCAM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ImportUnmanagedSnapshotBody struct {\n\tReq    *types.ImportUnmanagedSnapshot         `xml:\"urn:vim25 ImportUnmanagedSnapshot,omitempty\"`\n\tRes    *types.ImportUnmanagedSnapshotResponse `xml:\"urn:vim25 ImportUnmanagedSnapshotResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ImportUnmanagedSnapshotBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ImportUnmanagedSnapshot(ctx context.Context, r soap.RoundTripper, req *types.ImportUnmanagedSnapshot) (*types.ImportUnmanagedSnapshotResponse, error) {\n\tvar reqBody, resBody ImportUnmanagedSnapshotBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ImportVAppBody struct {\n\tReq    *types.ImportVApp         `xml:\"urn:vim25 ImportVApp,omitempty\"`\n\tRes    *types.ImportVAppResponse `xml:\"urn:vim25 ImportVAppResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ImportVAppBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ImportVApp(ctx context.Context, r soap.RoundTripper, req *types.ImportVApp) (*types.ImportVAppResponse, error) {\n\tvar reqBody, resBody ImportVAppBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype InflateDisk_TaskBody struct {\n\tReq    *types.InflateDisk_Task         `xml:\"urn:vim25 InflateDisk_Task,omitempty\"`\n\tRes    *types.InflateDisk_TaskResponse `xml:\"urn:vim25 InflateDisk_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *InflateDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc InflateDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.InflateDisk_Task) (*types.InflateDisk_TaskResponse, error) {\n\tvar reqBody, resBody InflateDisk_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype InflateVirtualDisk_TaskBody struct {\n\tReq    *types.InflateVirtualDisk_Task         `xml:\"urn:vim25 InflateVirtualDisk_Task,omitempty\"`\n\tRes    *types.InflateVirtualDisk_TaskResponse `xml:\"urn:vim25 InflateVirtualDisk_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *InflateVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc InflateVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.InflateVirtualDisk_Task) (*types.InflateVirtualDisk_TaskResponse, error) {\n\tvar reqBody, resBody InflateVirtualDisk_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype InitializeDisks_TaskBody struct {\n\tReq    *types.InitializeDisks_Task         `xml:\"urn:vim25 InitializeDisks_Task,omitempty\"`\n\tRes    *types.InitializeDisks_TaskResponse `xml:\"urn:vim25 InitializeDisks_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *InitializeDisks_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc InitializeDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.InitializeDisks_Task) (*types.InitializeDisks_TaskResponse, error) {\n\tvar reqBody, resBody InitializeDisks_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype InitiateFileTransferFromGuestBody struct {\n\tReq    *types.InitiateFileTransferFromGuest         `xml:\"urn:vim25 InitiateFileTransferFromGuest,omitempty\"`\n\tRes    *types.InitiateFileTransferFromGuestResponse `xml:\"urn:vim25 InitiateFileTransferFromGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *InitiateFileTransferFromGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc InitiateFileTransferFromGuest(ctx context.Context, r soap.RoundTripper, req *types.InitiateFileTransferFromGuest) (*types.InitiateFileTransferFromGuestResponse, error) {\n\tvar reqBody, resBody InitiateFileTransferFromGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype InitiateFileTransferToGuestBody struct {\n\tReq    *types.InitiateFileTransferToGuest         `xml:\"urn:vim25 InitiateFileTransferToGuest,omitempty\"`\n\tRes    *types.InitiateFileTransferToGuestResponse `xml:\"urn:vim25 InitiateFileTransferToGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *InitiateFileTransferToGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc InitiateFileTransferToGuest(ctx context.Context, r soap.RoundTripper, req *types.InitiateFileTransferToGuest) (*types.InitiateFileTransferToGuestResponse, error) {\n\tvar reqBody, resBody InitiateFileTransferToGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype InstallHostPatchV2_TaskBody struct {\n\tReq    *types.InstallHostPatchV2_Task         `xml:\"urn:vim25 InstallHostPatchV2_Task,omitempty\"`\n\tRes    *types.InstallHostPatchV2_TaskResponse `xml:\"urn:vim25 InstallHostPatchV2_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *InstallHostPatchV2_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc InstallHostPatchV2_Task(ctx context.Context, r soap.RoundTripper, req *types.InstallHostPatchV2_Task) (*types.InstallHostPatchV2_TaskResponse, error) {\n\tvar reqBody, resBody InstallHostPatchV2_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype InstallHostPatch_TaskBody struct {\n\tReq    *types.InstallHostPatch_Task         `xml:\"urn:vim25 InstallHostPatch_Task,omitempty\"`\n\tRes    *types.InstallHostPatch_TaskResponse `xml:\"urn:vim25 InstallHostPatch_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *InstallHostPatch_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc InstallHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.InstallHostPatch_Task) (*types.InstallHostPatch_TaskResponse, error) {\n\tvar reqBody, resBody InstallHostPatch_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype InstallIoFilter_TaskBody struct {\n\tReq    *types.InstallIoFilter_Task         `xml:\"urn:vim25 InstallIoFilter_Task,omitempty\"`\n\tRes    *types.InstallIoFilter_TaskResponse `xml:\"urn:vim25 InstallIoFilter_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *InstallIoFilter_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc InstallIoFilter_Task(ctx context.Context, r soap.RoundTripper, req *types.InstallIoFilter_Task) (*types.InstallIoFilter_TaskResponse, error) {\n\tvar reqBody, resBody InstallIoFilter_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype InstallServerCertificateBody struct {\n\tReq    *types.InstallServerCertificate         `xml:\"urn:vim25 InstallServerCertificate,omitempty\"`\n\tRes    *types.InstallServerCertificateResponse `xml:\"urn:vim25 InstallServerCertificateResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *InstallServerCertificateBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc InstallServerCertificate(ctx context.Context, r soap.RoundTripper, req *types.InstallServerCertificate) (*types.InstallServerCertificateResponse, error) {\n\tvar reqBody, resBody InstallServerCertificateBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype InstallSmartCardTrustAnchorBody struct {\n\tReq    *types.InstallSmartCardTrustAnchor         `xml:\"urn:vim25 InstallSmartCardTrustAnchor,omitempty\"`\n\tRes    *types.InstallSmartCardTrustAnchorResponse `xml:\"urn:vim25 InstallSmartCardTrustAnchorResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *InstallSmartCardTrustAnchorBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc InstallSmartCardTrustAnchor(ctx context.Context, r soap.RoundTripper, req *types.InstallSmartCardTrustAnchor) (*types.InstallSmartCardTrustAnchorResponse, error) {\n\tvar reqBody, resBody InstallSmartCardTrustAnchorBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype IsSharedGraphicsActiveBody struct {\n\tReq    *types.IsSharedGraphicsActive         `xml:\"urn:vim25 IsSharedGraphicsActive,omitempty\"`\n\tRes    *types.IsSharedGraphicsActiveResponse `xml:\"urn:vim25 IsSharedGraphicsActiveResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *IsSharedGraphicsActiveBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc IsSharedGraphicsActive(ctx context.Context, r soap.RoundTripper, req *types.IsSharedGraphicsActive) (*types.IsSharedGraphicsActiveResponse, error) {\n\tvar reqBody, resBody IsSharedGraphicsActiveBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype JoinDomainWithCAM_TaskBody struct {\n\tReq    *types.JoinDomainWithCAM_Task         `xml:\"urn:vim25 JoinDomainWithCAM_Task,omitempty\"`\n\tRes    *types.JoinDomainWithCAM_TaskResponse `xml:\"urn:vim25 JoinDomainWithCAM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *JoinDomainWithCAM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc JoinDomainWithCAM_Task(ctx context.Context, r soap.RoundTripper, req *types.JoinDomainWithCAM_Task) (*types.JoinDomainWithCAM_TaskResponse, error) {\n\tvar reqBody, resBody JoinDomainWithCAM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype JoinDomain_TaskBody struct {\n\tReq    *types.JoinDomain_Task         `xml:\"urn:vim25 JoinDomain_Task,omitempty\"`\n\tRes    *types.JoinDomain_TaskResponse `xml:\"urn:vim25 JoinDomain_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *JoinDomain_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc JoinDomain_Task(ctx context.Context, r soap.RoundTripper, req *types.JoinDomain_Task) (*types.JoinDomain_TaskResponse, error) {\n\tvar reqBody, resBody JoinDomain_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype LeaveCurrentDomain_TaskBody struct {\n\tReq    *types.LeaveCurrentDomain_Task         `xml:\"urn:vim25 LeaveCurrentDomain_Task,omitempty\"`\n\tRes    *types.LeaveCurrentDomain_TaskResponse `xml:\"urn:vim25 LeaveCurrentDomain_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *LeaveCurrentDomain_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc LeaveCurrentDomain_Task(ctx context.Context, r soap.RoundTripper, req *types.LeaveCurrentDomain_Task) (*types.LeaveCurrentDomain_TaskResponse, error) {\n\tvar reqBody, resBody LeaveCurrentDomain_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ListCACertificateRevocationListsBody struct {\n\tReq    *types.ListCACertificateRevocationLists         `xml:\"urn:vim25 ListCACertificateRevocationLists,omitempty\"`\n\tRes    *types.ListCACertificateRevocationListsResponse `xml:\"urn:vim25 ListCACertificateRevocationListsResponse,omitempty\"`\n\tFault_ *soap.Fault                                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ListCACertificateRevocationListsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ListCACertificateRevocationLists(ctx context.Context, r soap.RoundTripper, req *types.ListCACertificateRevocationLists) (*types.ListCACertificateRevocationListsResponse, error) {\n\tvar reqBody, resBody ListCACertificateRevocationListsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ListCACertificatesBody struct {\n\tReq    *types.ListCACertificates         `xml:\"urn:vim25 ListCACertificates,omitempty\"`\n\tRes    *types.ListCACertificatesResponse `xml:\"urn:vim25 ListCACertificatesResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ListCACertificatesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ListCACertificates(ctx context.Context, r soap.RoundTripper, req *types.ListCACertificates) (*types.ListCACertificatesResponse, error) {\n\tvar reqBody, resBody ListCACertificatesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ListFilesInGuestBody struct {\n\tReq    *types.ListFilesInGuest         `xml:\"urn:vim25 ListFilesInGuest,omitempty\"`\n\tRes    *types.ListFilesInGuestResponse `xml:\"urn:vim25 ListFilesInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ListFilesInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ListFilesInGuest(ctx context.Context, r soap.RoundTripper, req *types.ListFilesInGuest) (*types.ListFilesInGuestResponse, error) {\n\tvar reqBody, resBody ListFilesInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ListGuestAliasesBody struct {\n\tReq    *types.ListGuestAliases         `xml:\"urn:vim25 ListGuestAliases,omitempty\"`\n\tRes    *types.ListGuestAliasesResponse `xml:\"urn:vim25 ListGuestAliasesResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ListGuestAliasesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ListGuestAliases(ctx context.Context, r soap.RoundTripper, req *types.ListGuestAliases) (*types.ListGuestAliasesResponse, error) {\n\tvar reqBody, resBody ListGuestAliasesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ListGuestMappedAliasesBody struct {\n\tReq    *types.ListGuestMappedAliases         `xml:\"urn:vim25 ListGuestMappedAliases,omitempty\"`\n\tRes    *types.ListGuestMappedAliasesResponse `xml:\"urn:vim25 ListGuestMappedAliasesResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ListGuestMappedAliasesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ListGuestMappedAliases(ctx context.Context, r soap.RoundTripper, req *types.ListGuestMappedAliases) (*types.ListGuestMappedAliasesResponse, error) {\n\tvar reqBody, resBody ListGuestMappedAliasesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ListKeysBody struct {\n\tReq    *types.ListKeys         `xml:\"urn:vim25 ListKeys,omitempty\"`\n\tRes    *types.ListKeysResponse `xml:\"urn:vim25 ListKeysResponse,omitempty\"`\n\tFault_ *soap.Fault             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ListKeysBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ListKeys(ctx context.Context, r soap.RoundTripper, req *types.ListKeys) (*types.ListKeysResponse, error) {\n\tvar reqBody, resBody ListKeysBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ListKmipServersBody struct {\n\tReq    *types.ListKmipServers         `xml:\"urn:vim25 ListKmipServers,omitempty\"`\n\tRes    *types.ListKmipServersResponse `xml:\"urn:vim25 ListKmipServersResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ListKmipServersBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ListKmipServers(ctx context.Context, r soap.RoundTripper, req *types.ListKmipServers) (*types.ListKmipServersResponse, error) {\n\tvar reqBody, resBody ListKmipServersBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ListProcessesInGuestBody struct {\n\tReq    *types.ListProcessesInGuest         `xml:\"urn:vim25 ListProcessesInGuest,omitempty\"`\n\tRes    *types.ListProcessesInGuestResponse `xml:\"urn:vim25 ListProcessesInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ListProcessesInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ListProcessesInGuest(ctx context.Context, r soap.RoundTripper, req *types.ListProcessesInGuest) (*types.ListProcessesInGuestResponse, error) {\n\tvar reqBody, resBody ListProcessesInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ListRegistryKeysInGuestBody struct {\n\tReq    *types.ListRegistryKeysInGuest         `xml:\"urn:vim25 ListRegistryKeysInGuest,omitempty\"`\n\tRes    *types.ListRegistryKeysInGuestResponse `xml:\"urn:vim25 ListRegistryKeysInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ListRegistryKeysInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ListRegistryKeysInGuest(ctx context.Context, r soap.RoundTripper, req *types.ListRegistryKeysInGuest) (*types.ListRegistryKeysInGuestResponse, error) {\n\tvar reqBody, resBody ListRegistryKeysInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ListRegistryValuesInGuestBody struct {\n\tReq    *types.ListRegistryValuesInGuest         `xml:\"urn:vim25 ListRegistryValuesInGuest,omitempty\"`\n\tRes    *types.ListRegistryValuesInGuestResponse `xml:\"urn:vim25 ListRegistryValuesInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ListRegistryValuesInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ListRegistryValuesInGuest(ctx context.Context, r soap.RoundTripper, req *types.ListRegistryValuesInGuest) (*types.ListRegistryValuesInGuestResponse, error) {\n\tvar reqBody, resBody ListRegistryValuesInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ListSmartCardTrustAnchorsBody struct {\n\tReq    *types.ListSmartCardTrustAnchors         `xml:\"urn:vim25 ListSmartCardTrustAnchors,omitempty\"`\n\tRes    *types.ListSmartCardTrustAnchorsResponse `xml:\"urn:vim25 ListSmartCardTrustAnchorsResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ListSmartCardTrustAnchorsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ListSmartCardTrustAnchors(ctx context.Context, r soap.RoundTripper, req *types.ListSmartCardTrustAnchors) (*types.ListSmartCardTrustAnchorsResponse, error) {\n\tvar reqBody, resBody ListSmartCardTrustAnchorsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ListTagsAttachedToVStorageObjectBody struct {\n\tReq    *types.ListTagsAttachedToVStorageObject         `xml:\"urn:vim25 ListTagsAttachedToVStorageObject,omitempty\"`\n\tRes    *types.ListTagsAttachedToVStorageObjectResponse `xml:\"urn:vim25 ListTagsAttachedToVStorageObjectResponse,omitempty\"`\n\tFault_ *soap.Fault                                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ListTagsAttachedToVStorageObjectBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ListTagsAttachedToVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.ListTagsAttachedToVStorageObject) (*types.ListTagsAttachedToVStorageObjectResponse, error) {\n\tvar reqBody, resBody ListTagsAttachedToVStorageObjectBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ListVStorageObjectBody struct {\n\tReq    *types.ListVStorageObject         `xml:\"urn:vim25 ListVStorageObject,omitempty\"`\n\tRes    *types.ListVStorageObjectResponse `xml:\"urn:vim25 ListVStorageObjectResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ListVStorageObjectBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ListVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.ListVStorageObject) (*types.ListVStorageObjectResponse, error) {\n\tvar reqBody, resBody ListVStorageObjectBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ListVStorageObjectsAttachedToTagBody struct {\n\tReq    *types.ListVStorageObjectsAttachedToTag         `xml:\"urn:vim25 ListVStorageObjectsAttachedToTag,omitempty\"`\n\tRes    *types.ListVStorageObjectsAttachedToTagResponse `xml:\"urn:vim25 ListVStorageObjectsAttachedToTagResponse,omitempty\"`\n\tFault_ *soap.Fault                                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ListVStorageObjectsAttachedToTagBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ListVStorageObjectsAttachedToTag(ctx context.Context, r soap.RoundTripper, req *types.ListVStorageObjectsAttachedToTag) (*types.ListVStorageObjectsAttachedToTagResponse, error) {\n\tvar reqBody, resBody ListVStorageObjectsAttachedToTagBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype LogUserEventBody struct {\n\tReq    *types.LogUserEvent         `xml:\"urn:vim25 LogUserEvent,omitempty\"`\n\tRes    *types.LogUserEventResponse `xml:\"urn:vim25 LogUserEventResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *LogUserEventBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc LogUserEvent(ctx context.Context, r soap.RoundTripper, req *types.LogUserEvent) (*types.LogUserEventResponse, error) {\n\tvar reqBody, resBody LogUserEventBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype LoginBody struct {\n\tReq    *types.Login         `xml:\"urn:vim25 Login,omitempty\"`\n\tRes    *types.LoginResponse `xml:\"urn:vim25 LoginResponse,omitempty\"`\n\tFault_ *soap.Fault          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *LoginBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc Login(ctx context.Context, r soap.RoundTripper, req *types.Login) (*types.LoginResponse, error) {\n\tvar reqBody, resBody LoginBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype LoginBySSPIBody struct {\n\tReq    *types.LoginBySSPI         `xml:\"urn:vim25 LoginBySSPI,omitempty\"`\n\tRes    *types.LoginBySSPIResponse `xml:\"urn:vim25 LoginBySSPIResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *LoginBySSPIBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc LoginBySSPI(ctx context.Context, r soap.RoundTripper, req *types.LoginBySSPI) (*types.LoginBySSPIResponse, error) {\n\tvar reqBody, resBody LoginBySSPIBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype LoginByTokenBody struct {\n\tReq    *types.LoginByToken         `xml:\"urn:vim25 LoginByToken,omitempty\"`\n\tRes    *types.LoginByTokenResponse `xml:\"urn:vim25 LoginByTokenResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *LoginByTokenBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc LoginByToken(ctx context.Context, r soap.RoundTripper, req *types.LoginByToken) (*types.LoginByTokenResponse, error) {\n\tvar reqBody, resBody LoginByTokenBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype LoginExtensionByCertificateBody struct {\n\tReq    *types.LoginExtensionByCertificate         `xml:\"urn:vim25 LoginExtensionByCertificate,omitempty\"`\n\tRes    *types.LoginExtensionByCertificateResponse `xml:\"urn:vim25 LoginExtensionByCertificateResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *LoginExtensionByCertificateBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc LoginExtensionByCertificate(ctx context.Context, r soap.RoundTripper, req *types.LoginExtensionByCertificate) (*types.LoginExtensionByCertificateResponse, error) {\n\tvar reqBody, resBody LoginExtensionByCertificateBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype LoginExtensionBySubjectNameBody struct {\n\tReq    *types.LoginExtensionBySubjectName         `xml:\"urn:vim25 LoginExtensionBySubjectName,omitempty\"`\n\tRes    *types.LoginExtensionBySubjectNameResponse `xml:\"urn:vim25 LoginExtensionBySubjectNameResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *LoginExtensionBySubjectNameBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc LoginExtensionBySubjectName(ctx context.Context, r soap.RoundTripper, req *types.LoginExtensionBySubjectName) (*types.LoginExtensionBySubjectNameResponse, error) {\n\tvar reqBody, resBody LoginExtensionBySubjectNameBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype LogoutBody struct {\n\tReq    *types.Logout         `xml:\"urn:vim25 Logout,omitempty\"`\n\tRes    *types.LogoutResponse `xml:\"urn:vim25 LogoutResponse,omitempty\"`\n\tFault_ *soap.Fault           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *LogoutBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc Logout(ctx context.Context, r soap.RoundTripper, req *types.Logout) (*types.LogoutResponse, error) {\n\tvar reqBody, resBody LogoutBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype LookupDvPortGroupBody struct {\n\tReq    *types.LookupDvPortGroup         `xml:\"urn:vim25 LookupDvPortGroup,omitempty\"`\n\tRes    *types.LookupDvPortGroupResponse `xml:\"urn:vim25 LookupDvPortGroupResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *LookupDvPortGroupBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc LookupDvPortGroup(ctx context.Context, r soap.RoundTripper, req *types.LookupDvPortGroup) (*types.LookupDvPortGroupResponse, error) {\n\tvar reqBody, resBody LookupDvPortGroupBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype LookupVmOverheadMemoryBody struct {\n\tReq    *types.LookupVmOverheadMemory         `xml:\"urn:vim25 LookupVmOverheadMemory,omitempty\"`\n\tRes    *types.LookupVmOverheadMemoryResponse `xml:\"urn:vim25 LookupVmOverheadMemoryResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *LookupVmOverheadMemoryBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc LookupVmOverheadMemory(ctx context.Context, r soap.RoundTripper, req *types.LookupVmOverheadMemory) (*types.LookupVmOverheadMemoryResponse, error) {\n\tvar reqBody, resBody LookupVmOverheadMemoryBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MakeDirectoryBody struct {\n\tReq    *types.MakeDirectory         `xml:\"urn:vim25 MakeDirectory,omitempty\"`\n\tRes    *types.MakeDirectoryResponse `xml:\"urn:vim25 MakeDirectoryResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MakeDirectoryBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MakeDirectory(ctx context.Context, r soap.RoundTripper, req *types.MakeDirectory) (*types.MakeDirectoryResponse, error) {\n\tvar reqBody, resBody MakeDirectoryBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MakeDirectoryInGuestBody struct {\n\tReq    *types.MakeDirectoryInGuest         `xml:\"urn:vim25 MakeDirectoryInGuest,omitempty\"`\n\tRes    *types.MakeDirectoryInGuestResponse `xml:\"urn:vim25 MakeDirectoryInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MakeDirectoryInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MakeDirectoryInGuest(ctx context.Context, r soap.RoundTripper, req *types.MakeDirectoryInGuest) (*types.MakeDirectoryInGuestResponse, error) {\n\tvar reqBody, resBody MakeDirectoryInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MakePrimaryVM_TaskBody struct {\n\tReq    *types.MakePrimaryVM_Task         `xml:\"urn:vim25 MakePrimaryVM_Task,omitempty\"`\n\tRes    *types.MakePrimaryVM_TaskResponse `xml:\"urn:vim25 MakePrimaryVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MakePrimaryVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MakePrimaryVM_Task(ctx context.Context, r soap.RoundTripper, req *types.MakePrimaryVM_Task) (*types.MakePrimaryVM_TaskResponse, error) {\n\tvar reqBody, resBody MakePrimaryVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MarkAsLocal_TaskBody struct {\n\tReq    *types.MarkAsLocal_Task         `xml:\"urn:vim25 MarkAsLocal_Task,omitempty\"`\n\tRes    *types.MarkAsLocal_TaskResponse `xml:\"urn:vim25 MarkAsLocal_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MarkAsLocal_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MarkAsLocal_Task(ctx context.Context, r soap.RoundTripper, req *types.MarkAsLocal_Task) (*types.MarkAsLocal_TaskResponse, error) {\n\tvar reqBody, resBody MarkAsLocal_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MarkAsNonLocal_TaskBody struct {\n\tReq    *types.MarkAsNonLocal_Task         `xml:\"urn:vim25 MarkAsNonLocal_Task,omitempty\"`\n\tRes    *types.MarkAsNonLocal_TaskResponse `xml:\"urn:vim25 MarkAsNonLocal_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MarkAsNonLocal_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MarkAsNonLocal_Task(ctx context.Context, r soap.RoundTripper, req *types.MarkAsNonLocal_Task) (*types.MarkAsNonLocal_TaskResponse, error) {\n\tvar reqBody, resBody MarkAsNonLocal_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MarkAsNonSsd_TaskBody struct {\n\tReq    *types.MarkAsNonSsd_Task         `xml:\"urn:vim25 MarkAsNonSsd_Task,omitempty\"`\n\tRes    *types.MarkAsNonSsd_TaskResponse `xml:\"urn:vim25 MarkAsNonSsd_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MarkAsNonSsd_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MarkAsNonSsd_Task(ctx context.Context, r soap.RoundTripper, req *types.MarkAsNonSsd_Task) (*types.MarkAsNonSsd_TaskResponse, error) {\n\tvar reqBody, resBody MarkAsNonSsd_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MarkAsSsd_TaskBody struct {\n\tReq    *types.MarkAsSsd_Task         `xml:\"urn:vim25 MarkAsSsd_Task,omitempty\"`\n\tRes    *types.MarkAsSsd_TaskResponse `xml:\"urn:vim25 MarkAsSsd_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MarkAsSsd_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MarkAsSsd_Task(ctx context.Context, r soap.RoundTripper, req *types.MarkAsSsd_Task) (*types.MarkAsSsd_TaskResponse, error) {\n\tvar reqBody, resBody MarkAsSsd_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MarkAsTemplateBody struct {\n\tReq    *types.MarkAsTemplate         `xml:\"urn:vim25 MarkAsTemplate,omitempty\"`\n\tRes    *types.MarkAsTemplateResponse `xml:\"urn:vim25 MarkAsTemplateResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MarkAsTemplateBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MarkAsTemplate(ctx context.Context, r soap.RoundTripper, req *types.MarkAsTemplate) (*types.MarkAsTemplateResponse, error) {\n\tvar reqBody, resBody MarkAsTemplateBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MarkAsVirtualMachineBody struct {\n\tReq    *types.MarkAsVirtualMachine         `xml:\"urn:vim25 MarkAsVirtualMachine,omitempty\"`\n\tRes    *types.MarkAsVirtualMachineResponse `xml:\"urn:vim25 MarkAsVirtualMachineResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MarkAsVirtualMachineBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MarkAsVirtualMachine(ctx context.Context, r soap.RoundTripper, req *types.MarkAsVirtualMachine) (*types.MarkAsVirtualMachineResponse, error) {\n\tvar reqBody, resBody MarkAsVirtualMachineBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MarkDefaultBody struct {\n\tReq    *types.MarkDefault         `xml:\"urn:vim25 MarkDefault,omitempty\"`\n\tRes    *types.MarkDefaultResponse `xml:\"urn:vim25 MarkDefaultResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MarkDefaultBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MarkDefault(ctx context.Context, r soap.RoundTripper, req *types.MarkDefault) (*types.MarkDefaultResponse, error) {\n\tvar reqBody, resBody MarkDefaultBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MarkForRemovalBody struct {\n\tReq    *types.MarkForRemoval         `xml:\"urn:vim25 MarkForRemoval,omitempty\"`\n\tRes    *types.MarkForRemovalResponse `xml:\"urn:vim25 MarkForRemovalResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MarkForRemovalBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MarkForRemoval(ctx context.Context, r soap.RoundTripper, req *types.MarkForRemoval) (*types.MarkForRemovalResponse, error) {\n\tvar reqBody, resBody MarkForRemovalBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MergeDvs_TaskBody struct {\n\tReq    *types.MergeDvs_Task         `xml:\"urn:vim25 MergeDvs_Task,omitempty\"`\n\tRes    *types.MergeDvs_TaskResponse `xml:\"urn:vim25 MergeDvs_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MergeDvs_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MergeDvs_Task(ctx context.Context, r soap.RoundTripper, req *types.MergeDvs_Task) (*types.MergeDvs_TaskResponse, error) {\n\tvar reqBody, resBody MergeDvs_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MergePermissionsBody struct {\n\tReq    *types.MergePermissions         `xml:\"urn:vim25 MergePermissions,omitempty\"`\n\tRes    *types.MergePermissionsResponse `xml:\"urn:vim25 MergePermissionsResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MergePermissionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MergePermissions(ctx context.Context, r soap.RoundTripper, req *types.MergePermissions) (*types.MergePermissionsResponse, error) {\n\tvar reqBody, resBody MergePermissionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MigrateVM_TaskBody struct {\n\tReq    *types.MigrateVM_Task         `xml:\"urn:vim25 MigrateVM_Task,omitempty\"`\n\tRes    *types.MigrateVM_TaskResponse `xml:\"urn:vim25 MigrateVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MigrateVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MigrateVM_Task(ctx context.Context, r soap.RoundTripper, req *types.MigrateVM_Task) (*types.MigrateVM_TaskResponse, error) {\n\tvar reqBody, resBody MigrateVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ModifyListViewBody struct {\n\tReq    *types.ModifyListView         `xml:\"urn:vim25 ModifyListView,omitempty\"`\n\tRes    *types.ModifyListViewResponse `xml:\"urn:vim25 ModifyListViewResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ModifyListViewBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ModifyListView(ctx context.Context, r soap.RoundTripper, req *types.ModifyListView) (*types.ModifyListViewResponse, error) {\n\tvar reqBody, resBody ModifyListViewBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MountToolsInstallerBody struct {\n\tReq    *types.MountToolsInstaller         `xml:\"urn:vim25 MountToolsInstaller,omitempty\"`\n\tRes    *types.MountToolsInstallerResponse `xml:\"urn:vim25 MountToolsInstallerResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MountToolsInstallerBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MountToolsInstaller(ctx context.Context, r soap.RoundTripper, req *types.MountToolsInstaller) (*types.MountToolsInstallerResponse, error) {\n\tvar reqBody, resBody MountToolsInstallerBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MountVffsVolumeBody struct {\n\tReq    *types.MountVffsVolume         `xml:\"urn:vim25 MountVffsVolume,omitempty\"`\n\tRes    *types.MountVffsVolumeResponse `xml:\"urn:vim25 MountVffsVolumeResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MountVffsVolumeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MountVffsVolume(ctx context.Context, r soap.RoundTripper, req *types.MountVffsVolume) (*types.MountVffsVolumeResponse, error) {\n\tvar reqBody, resBody MountVffsVolumeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MountVmfsVolumeBody struct {\n\tReq    *types.MountVmfsVolume         `xml:\"urn:vim25 MountVmfsVolume,omitempty\"`\n\tRes    *types.MountVmfsVolumeResponse `xml:\"urn:vim25 MountVmfsVolumeResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MountVmfsVolumeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MountVmfsVolume(ctx context.Context, r soap.RoundTripper, req *types.MountVmfsVolume) (*types.MountVmfsVolumeResponse, error) {\n\tvar reqBody, resBody MountVmfsVolumeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MountVmfsVolumeEx_TaskBody struct {\n\tReq    *types.MountVmfsVolumeEx_Task         `xml:\"urn:vim25 MountVmfsVolumeEx_Task,omitempty\"`\n\tRes    *types.MountVmfsVolumeEx_TaskResponse `xml:\"urn:vim25 MountVmfsVolumeEx_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MountVmfsVolumeEx_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MountVmfsVolumeEx_Task(ctx context.Context, r soap.RoundTripper, req *types.MountVmfsVolumeEx_Task) (*types.MountVmfsVolumeEx_TaskResponse, error) {\n\tvar reqBody, resBody MountVmfsVolumeEx_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MoveDVPort_TaskBody struct {\n\tReq    *types.MoveDVPort_Task         `xml:\"urn:vim25 MoveDVPort_Task,omitempty\"`\n\tRes    *types.MoveDVPort_TaskResponse `xml:\"urn:vim25 MoveDVPort_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MoveDVPort_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MoveDVPort_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveDVPort_Task) (*types.MoveDVPort_TaskResponse, error) {\n\tvar reqBody, resBody MoveDVPort_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MoveDatastoreFile_TaskBody struct {\n\tReq    *types.MoveDatastoreFile_Task         `xml:\"urn:vim25 MoveDatastoreFile_Task,omitempty\"`\n\tRes    *types.MoveDatastoreFile_TaskResponse `xml:\"urn:vim25 MoveDatastoreFile_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MoveDatastoreFile_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MoveDatastoreFile_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveDatastoreFile_Task) (*types.MoveDatastoreFile_TaskResponse, error) {\n\tvar reqBody, resBody MoveDatastoreFile_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MoveDirectoryInGuestBody struct {\n\tReq    *types.MoveDirectoryInGuest         `xml:\"urn:vim25 MoveDirectoryInGuest,omitempty\"`\n\tRes    *types.MoveDirectoryInGuestResponse `xml:\"urn:vim25 MoveDirectoryInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MoveDirectoryInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MoveDirectoryInGuest(ctx context.Context, r soap.RoundTripper, req *types.MoveDirectoryInGuest) (*types.MoveDirectoryInGuestResponse, error) {\n\tvar reqBody, resBody MoveDirectoryInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MoveFileInGuestBody struct {\n\tReq    *types.MoveFileInGuest         `xml:\"urn:vim25 MoveFileInGuest,omitempty\"`\n\tRes    *types.MoveFileInGuestResponse `xml:\"urn:vim25 MoveFileInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MoveFileInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MoveFileInGuest(ctx context.Context, r soap.RoundTripper, req *types.MoveFileInGuest) (*types.MoveFileInGuestResponse, error) {\n\tvar reqBody, resBody MoveFileInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MoveHostInto_TaskBody struct {\n\tReq    *types.MoveHostInto_Task         `xml:\"urn:vim25 MoveHostInto_Task,omitempty\"`\n\tRes    *types.MoveHostInto_TaskResponse `xml:\"urn:vim25 MoveHostInto_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MoveHostInto_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MoveHostInto_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveHostInto_Task) (*types.MoveHostInto_TaskResponse, error) {\n\tvar reqBody, resBody MoveHostInto_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MoveIntoFolder_TaskBody struct {\n\tReq    *types.MoveIntoFolder_Task         `xml:\"urn:vim25 MoveIntoFolder_Task,omitempty\"`\n\tRes    *types.MoveIntoFolder_TaskResponse `xml:\"urn:vim25 MoveIntoFolder_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MoveIntoFolder_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MoveIntoFolder_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveIntoFolder_Task) (*types.MoveIntoFolder_TaskResponse, error) {\n\tvar reqBody, resBody MoveIntoFolder_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MoveIntoResourcePoolBody struct {\n\tReq    *types.MoveIntoResourcePool         `xml:\"urn:vim25 MoveIntoResourcePool,omitempty\"`\n\tRes    *types.MoveIntoResourcePoolResponse `xml:\"urn:vim25 MoveIntoResourcePoolResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MoveIntoResourcePoolBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MoveIntoResourcePool(ctx context.Context, r soap.RoundTripper, req *types.MoveIntoResourcePool) (*types.MoveIntoResourcePoolResponse, error) {\n\tvar reqBody, resBody MoveIntoResourcePoolBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MoveInto_TaskBody struct {\n\tReq    *types.MoveInto_Task         `xml:\"urn:vim25 MoveInto_Task,omitempty\"`\n\tRes    *types.MoveInto_TaskResponse `xml:\"urn:vim25 MoveInto_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MoveInto_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MoveInto_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveInto_Task) (*types.MoveInto_TaskResponse, error) {\n\tvar reqBody, resBody MoveInto_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype MoveVirtualDisk_TaskBody struct {\n\tReq    *types.MoveVirtualDisk_Task         `xml:\"urn:vim25 MoveVirtualDisk_Task,omitempty\"`\n\tRes    *types.MoveVirtualDisk_TaskResponse `xml:\"urn:vim25 MoveVirtualDisk_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *MoveVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc MoveVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveVirtualDisk_Task) (*types.MoveVirtualDisk_TaskResponse, error) {\n\tvar reqBody, resBody MoveVirtualDisk_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype OpenInventoryViewFolderBody struct {\n\tReq    *types.OpenInventoryViewFolder         `xml:\"urn:vim25 OpenInventoryViewFolder,omitempty\"`\n\tRes    *types.OpenInventoryViewFolderResponse `xml:\"urn:vim25 OpenInventoryViewFolderResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *OpenInventoryViewFolderBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc OpenInventoryViewFolder(ctx context.Context, r soap.RoundTripper, req *types.OpenInventoryViewFolder) (*types.OpenInventoryViewFolderResponse, error) {\n\tvar reqBody, resBody OpenInventoryViewFolderBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype OverwriteCustomizationSpecBody struct {\n\tReq    *types.OverwriteCustomizationSpec         `xml:\"urn:vim25 OverwriteCustomizationSpec,omitempty\"`\n\tRes    *types.OverwriteCustomizationSpecResponse `xml:\"urn:vim25 OverwriteCustomizationSpecResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *OverwriteCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc OverwriteCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.OverwriteCustomizationSpec) (*types.OverwriteCustomizationSpecResponse, error) {\n\tvar reqBody, resBody OverwriteCustomizationSpecBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ParseDescriptorBody struct {\n\tReq    *types.ParseDescriptor         `xml:\"urn:vim25 ParseDescriptor,omitempty\"`\n\tRes    *types.ParseDescriptorResponse `xml:\"urn:vim25 ParseDescriptorResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ParseDescriptorBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ParseDescriptor(ctx context.Context, r soap.RoundTripper, req *types.ParseDescriptor) (*types.ParseDescriptorResponse, error) {\n\tvar reqBody, resBody ParseDescriptorBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PerformDvsProductSpecOperation_TaskBody struct {\n\tReq    *types.PerformDvsProductSpecOperation_Task         `xml:\"urn:vim25 PerformDvsProductSpecOperation_Task,omitempty\"`\n\tRes    *types.PerformDvsProductSpecOperation_TaskResponse `xml:\"urn:vim25 PerformDvsProductSpecOperation_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PerformDvsProductSpecOperation_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PerformDvsProductSpecOperation_Task(ctx context.Context, r soap.RoundTripper, req *types.PerformDvsProductSpecOperation_Task) (*types.PerformDvsProductSpecOperation_TaskResponse, error) {\n\tvar reqBody, resBody PerformDvsProductSpecOperation_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PerformVsanUpgradePreflightCheckBody struct {\n\tReq    *types.PerformVsanUpgradePreflightCheck         `xml:\"urn:vim25 PerformVsanUpgradePreflightCheck,omitempty\"`\n\tRes    *types.PerformVsanUpgradePreflightCheckResponse `xml:\"urn:vim25 PerformVsanUpgradePreflightCheckResponse,omitempty\"`\n\tFault_ *soap.Fault                                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PerformVsanUpgradePreflightCheckBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PerformVsanUpgradePreflightCheck(ctx context.Context, r soap.RoundTripper, req *types.PerformVsanUpgradePreflightCheck) (*types.PerformVsanUpgradePreflightCheckResponse, error) {\n\tvar reqBody, resBody PerformVsanUpgradePreflightCheckBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PerformVsanUpgrade_TaskBody struct {\n\tReq    *types.PerformVsanUpgrade_Task         `xml:\"urn:vim25 PerformVsanUpgrade_Task,omitempty\"`\n\tRes    *types.PerformVsanUpgrade_TaskResponse `xml:\"urn:vim25 PerformVsanUpgrade_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PerformVsanUpgrade_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PerformVsanUpgrade_Task(ctx context.Context, r soap.RoundTripper, req *types.PerformVsanUpgrade_Task) (*types.PerformVsanUpgrade_TaskResponse, error) {\n\tvar reqBody, resBody PerformVsanUpgrade_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PlaceVmBody struct {\n\tReq    *types.PlaceVm         `xml:\"urn:vim25 PlaceVm,omitempty\"`\n\tRes    *types.PlaceVmResponse `xml:\"urn:vim25 PlaceVmResponse,omitempty\"`\n\tFault_ *soap.Fault            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PlaceVmBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PlaceVm(ctx context.Context, r soap.RoundTripper, req *types.PlaceVm) (*types.PlaceVmResponse, error) {\n\tvar reqBody, resBody PlaceVmBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PostEventBody struct {\n\tReq    *types.PostEvent         `xml:\"urn:vim25 PostEvent,omitempty\"`\n\tRes    *types.PostEventResponse `xml:\"urn:vim25 PostEventResponse,omitempty\"`\n\tFault_ *soap.Fault              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PostEventBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PostEvent(ctx context.Context, r soap.RoundTripper, req *types.PostEvent) (*types.PostEventResponse, error) {\n\tvar reqBody, resBody PostEventBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PostHealthUpdatesBody struct {\n\tReq    *types.PostHealthUpdates         `xml:\"urn:vim25 PostHealthUpdates,omitempty\"`\n\tRes    *types.PostHealthUpdatesResponse `xml:\"urn:vim25 PostHealthUpdatesResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PostHealthUpdatesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PostHealthUpdates(ctx context.Context, r soap.RoundTripper, req *types.PostHealthUpdates) (*types.PostHealthUpdatesResponse, error) {\n\tvar reqBody, resBody PostHealthUpdatesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PowerDownHostToStandBy_TaskBody struct {\n\tReq    *types.PowerDownHostToStandBy_Task         `xml:\"urn:vim25 PowerDownHostToStandBy_Task,omitempty\"`\n\tRes    *types.PowerDownHostToStandBy_TaskResponse `xml:\"urn:vim25 PowerDownHostToStandBy_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PowerDownHostToStandBy_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PowerDownHostToStandBy_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerDownHostToStandBy_Task) (*types.PowerDownHostToStandBy_TaskResponse, error) {\n\tvar reqBody, resBody PowerDownHostToStandBy_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PowerOffVApp_TaskBody struct {\n\tReq    *types.PowerOffVApp_Task         `xml:\"urn:vim25 PowerOffVApp_Task,omitempty\"`\n\tRes    *types.PowerOffVApp_TaskResponse `xml:\"urn:vim25 PowerOffVApp_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PowerOffVApp_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PowerOffVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerOffVApp_Task) (*types.PowerOffVApp_TaskResponse, error) {\n\tvar reqBody, resBody PowerOffVApp_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PowerOffVM_TaskBody struct {\n\tReq    *types.PowerOffVM_Task         `xml:\"urn:vim25 PowerOffVM_Task,omitempty\"`\n\tRes    *types.PowerOffVM_TaskResponse `xml:\"urn:vim25 PowerOffVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PowerOffVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PowerOffVM_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerOffVM_Task) (*types.PowerOffVM_TaskResponse, error) {\n\tvar reqBody, resBody PowerOffVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PowerOnMultiVM_TaskBody struct {\n\tReq    *types.PowerOnMultiVM_Task         `xml:\"urn:vim25 PowerOnMultiVM_Task,omitempty\"`\n\tRes    *types.PowerOnMultiVM_TaskResponse `xml:\"urn:vim25 PowerOnMultiVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PowerOnMultiVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PowerOnMultiVM_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerOnMultiVM_Task) (*types.PowerOnMultiVM_TaskResponse, error) {\n\tvar reqBody, resBody PowerOnMultiVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PowerOnVApp_TaskBody struct {\n\tReq    *types.PowerOnVApp_Task         `xml:\"urn:vim25 PowerOnVApp_Task,omitempty\"`\n\tRes    *types.PowerOnVApp_TaskResponse `xml:\"urn:vim25 PowerOnVApp_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PowerOnVApp_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PowerOnVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerOnVApp_Task) (*types.PowerOnVApp_TaskResponse, error) {\n\tvar reqBody, resBody PowerOnVApp_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PowerOnVM_TaskBody struct {\n\tReq    *types.PowerOnVM_Task         `xml:\"urn:vim25 PowerOnVM_Task,omitempty\"`\n\tRes    *types.PowerOnVM_TaskResponse `xml:\"urn:vim25 PowerOnVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PowerOnVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PowerOnVM_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerOnVM_Task) (*types.PowerOnVM_TaskResponse, error) {\n\tvar reqBody, resBody PowerOnVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PowerUpHostFromStandBy_TaskBody struct {\n\tReq    *types.PowerUpHostFromStandBy_Task         `xml:\"urn:vim25 PowerUpHostFromStandBy_Task,omitempty\"`\n\tRes    *types.PowerUpHostFromStandBy_TaskResponse `xml:\"urn:vim25 PowerUpHostFromStandBy_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PowerUpHostFromStandBy_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PowerUpHostFromStandBy_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerUpHostFromStandBy_Task) (*types.PowerUpHostFromStandBy_TaskResponse, error) {\n\tvar reqBody, resBody PowerUpHostFromStandBy_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PrepareCryptoBody struct {\n\tReq    *types.PrepareCrypto         `xml:\"urn:vim25 PrepareCrypto,omitempty\"`\n\tRes    *types.PrepareCryptoResponse `xml:\"urn:vim25 PrepareCryptoResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PrepareCryptoBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PrepareCrypto(ctx context.Context, r soap.RoundTripper, req *types.PrepareCrypto) (*types.PrepareCryptoResponse, error) {\n\tvar reqBody, resBody PrepareCryptoBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PromoteDisks_TaskBody struct {\n\tReq    *types.PromoteDisks_Task         `xml:\"urn:vim25 PromoteDisks_Task,omitempty\"`\n\tRes    *types.PromoteDisks_TaskResponse `xml:\"urn:vim25 PromoteDisks_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PromoteDisks_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PromoteDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.PromoteDisks_Task) (*types.PromoteDisks_TaskResponse, error) {\n\tvar reqBody, resBody PromoteDisks_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype PutUsbScanCodesBody struct {\n\tReq    *types.PutUsbScanCodes         `xml:\"urn:vim25 PutUsbScanCodes,omitempty\"`\n\tRes    *types.PutUsbScanCodesResponse `xml:\"urn:vim25 PutUsbScanCodesResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *PutUsbScanCodesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc PutUsbScanCodes(ctx context.Context, r soap.RoundTripper, req *types.PutUsbScanCodes) (*types.PutUsbScanCodesResponse, error) {\n\tvar reqBody, resBody PutUsbScanCodesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryAnswerFileStatusBody struct {\n\tReq    *types.QueryAnswerFileStatus         `xml:\"urn:vim25 QueryAnswerFileStatus,omitempty\"`\n\tRes    *types.QueryAnswerFileStatusResponse `xml:\"urn:vim25 QueryAnswerFileStatusResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryAnswerFileStatusBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryAnswerFileStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryAnswerFileStatus) (*types.QueryAnswerFileStatusResponse, error) {\n\tvar reqBody, resBody QueryAnswerFileStatusBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryAssignedLicensesBody struct {\n\tReq    *types.QueryAssignedLicenses         `xml:\"urn:vim25 QueryAssignedLicenses,omitempty\"`\n\tRes    *types.QueryAssignedLicensesResponse `xml:\"urn:vim25 QueryAssignedLicensesResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryAssignedLicensesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryAssignedLicenses(ctx context.Context, r soap.RoundTripper, req *types.QueryAssignedLicenses) (*types.QueryAssignedLicensesResponse, error) {\n\tvar reqBody, resBody QueryAssignedLicensesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryAvailableDisksForVmfsBody struct {\n\tReq    *types.QueryAvailableDisksForVmfs         `xml:\"urn:vim25 QueryAvailableDisksForVmfs,omitempty\"`\n\tRes    *types.QueryAvailableDisksForVmfsResponse `xml:\"urn:vim25 QueryAvailableDisksForVmfsResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryAvailableDisksForVmfsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryAvailableDisksForVmfs(ctx context.Context, r soap.RoundTripper, req *types.QueryAvailableDisksForVmfs) (*types.QueryAvailableDisksForVmfsResponse, error) {\n\tvar reqBody, resBody QueryAvailableDisksForVmfsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryAvailableDvsSpecBody struct {\n\tReq    *types.QueryAvailableDvsSpec         `xml:\"urn:vim25 QueryAvailableDvsSpec,omitempty\"`\n\tRes    *types.QueryAvailableDvsSpecResponse `xml:\"urn:vim25 QueryAvailableDvsSpecResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryAvailableDvsSpecBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryAvailableDvsSpec(ctx context.Context, r soap.RoundTripper, req *types.QueryAvailableDvsSpec) (*types.QueryAvailableDvsSpecResponse, error) {\n\tvar reqBody, resBody QueryAvailableDvsSpecBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryAvailablePartitionBody struct {\n\tReq    *types.QueryAvailablePartition         `xml:\"urn:vim25 QueryAvailablePartition,omitempty\"`\n\tRes    *types.QueryAvailablePartitionResponse `xml:\"urn:vim25 QueryAvailablePartitionResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryAvailablePartitionBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryAvailablePartition(ctx context.Context, r soap.RoundTripper, req *types.QueryAvailablePartition) (*types.QueryAvailablePartitionResponse, error) {\n\tvar reqBody, resBody QueryAvailablePartitionBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryAvailablePerfMetricBody struct {\n\tReq    *types.QueryAvailablePerfMetric         `xml:\"urn:vim25 QueryAvailablePerfMetric,omitempty\"`\n\tRes    *types.QueryAvailablePerfMetricResponse `xml:\"urn:vim25 QueryAvailablePerfMetricResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryAvailablePerfMetricBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryAvailablePerfMetric(ctx context.Context, r soap.RoundTripper, req *types.QueryAvailablePerfMetric) (*types.QueryAvailablePerfMetricResponse, error) {\n\tvar reqBody, resBody QueryAvailablePerfMetricBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryAvailableSsdsBody struct {\n\tReq    *types.QueryAvailableSsds         `xml:\"urn:vim25 QueryAvailableSsds,omitempty\"`\n\tRes    *types.QueryAvailableSsdsResponse `xml:\"urn:vim25 QueryAvailableSsdsResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryAvailableSsdsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryAvailableSsds(ctx context.Context, r soap.RoundTripper, req *types.QueryAvailableSsds) (*types.QueryAvailableSsdsResponse, error) {\n\tvar reqBody, resBody QueryAvailableSsdsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryAvailableTimeZonesBody struct {\n\tReq    *types.QueryAvailableTimeZones         `xml:\"urn:vim25 QueryAvailableTimeZones,omitempty\"`\n\tRes    *types.QueryAvailableTimeZonesResponse `xml:\"urn:vim25 QueryAvailableTimeZonesResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryAvailableTimeZonesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryAvailableTimeZones(ctx context.Context, r soap.RoundTripper, req *types.QueryAvailableTimeZones) (*types.QueryAvailableTimeZonesResponse, error) {\n\tvar reqBody, resBody QueryAvailableTimeZonesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryBootDevicesBody struct {\n\tReq    *types.QueryBootDevices         `xml:\"urn:vim25 QueryBootDevices,omitempty\"`\n\tRes    *types.QueryBootDevicesResponse `xml:\"urn:vim25 QueryBootDevicesResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryBootDevicesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryBootDevices(ctx context.Context, r soap.RoundTripper, req *types.QueryBootDevices) (*types.QueryBootDevicesResponse, error) {\n\tvar reqBody, resBody QueryBootDevicesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryBoundVnicsBody struct {\n\tReq    *types.QueryBoundVnics         `xml:\"urn:vim25 QueryBoundVnics,omitempty\"`\n\tRes    *types.QueryBoundVnicsResponse `xml:\"urn:vim25 QueryBoundVnicsResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryBoundVnicsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryBoundVnics(ctx context.Context, r soap.RoundTripper, req *types.QueryBoundVnics) (*types.QueryBoundVnicsResponse, error) {\n\tvar reqBody, resBody QueryBoundVnicsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryCandidateNicsBody struct {\n\tReq    *types.QueryCandidateNics         `xml:\"urn:vim25 QueryCandidateNics,omitempty\"`\n\tRes    *types.QueryCandidateNicsResponse `xml:\"urn:vim25 QueryCandidateNicsResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryCandidateNicsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryCandidateNics(ctx context.Context, r soap.RoundTripper, req *types.QueryCandidateNics) (*types.QueryCandidateNicsResponse, error) {\n\tvar reqBody, resBody QueryCandidateNicsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryChangedDiskAreasBody struct {\n\tReq    *types.QueryChangedDiskAreas         `xml:\"urn:vim25 QueryChangedDiskAreas,omitempty\"`\n\tRes    *types.QueryChangedDiskAreasResponse `xml:\"urn:vim25 QueryChangedDiskAreasResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryChangedDiskAreasBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryChangedDiskAreas(ctx context.Context, r soap.RoundTripper, req *types.QueryChangedDiskAreas) (*types.QueryChangedDiskAreasResponse, error) {\n\tvar reqBody, resBody QueryChangedDiskAreasBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryCmmdsBody struct {\n\tReq    *types.QueryCmmds         `xml:\"urn:vim25 QueryCmmds,omitempty\"`\n\tRes    *types.QueryCmmdsResponse `xml:\"urn:vim25 QueryCmmdsResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryCmmdsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryCmmds(ctx context.Context, r soap.RoundTripper, req *types.QueryCmmds) (*types.QueryCmmdsResponse, error) {\n\tvar reqBody, resBody QueryCmmdsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryCompatibleHostForExistingDvsBody struct {\n\tReq    *types.QueryCompatibleHostForExistingDvs         `xml:\"urn:vim25 QueryCompatibleHostForExistingDvs,omitempty\"`\n\tRes    *types.QueryCompatibleHostForExistingDvsResponse `xml:\"urn:vim25 QueryCompatibleHostForExistingDvsResponse,omitempty\"`\n\tFault_ *soap.Fault                                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryCompatibleHostForExistingDvsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryCompatibleHostForExistingDvs(ctx context.Context, r soap.RoundTripper, req *types.QueryCompatibleHostForExistingDvs) (*types.QueryCompatibleHostForExistingDvsResponse, error) {\n\tvar reqBody, resBody QueryCompatibleHostForExistingDvsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryCompatibleHostForNewDvsBody struct {\n\tReq    *types.QueryCompatibleHostForNewDvs         `xml:\"urn:vim25 QueryCompatibleHostForNewDvs,omitempty\"`\n\tRes    *types.QueryCompatibleHostForNewDvsResponse `xml:\"urn:vim25 QueryCompatibleHostForNewDvsResponse,omitempty\"`\n\tFault_ *soap.Fault                                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryCompatibleHostForNewDvsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryCompatibleHostForNewDvs(ctx context.Context, r soap.RoundTripper, req *types.QueryCompatibleHostForNewDvs) (*types.QueryCompatibleHostForNewDvsResponse, error) {\n\tvar reqBody, resBody QueryCompatibleHostForNewDvsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryComplianceStatusBody struct {\n\tReq    *types.QueryComplianceStatus         `xml:\"urn:vim25 QueryComplianceStatus,omitempty\"`\n\tRes    *types.QueryComplianceStatusResponse `xml:\"urn:vim25 QueryComplianceStatusResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryComplianceStatusBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryComplianceStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryComplianceStatus) (*types.QueryComplianceStatusResponse, error) {\n\tvar reqBody, resBody QueryComplianceStatusBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryConfigOptionBody struct {\n\tReq    *types.QueryConfigOption         `xml:\"urn:vim25 QueryConfigOption,omitempty\"`\n\tRes    *types.QueryConfigOptionResponse `xml:\"urn:vim25 QueryConfigOptionResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryConfigOptionBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryConfigOption(ctx context.Context, r soap.RoundTripper, req *types.QueryConfigOption) (*types.QueryConfigOptionResponse, error) {\n\tvar reqBody, resBody QueryConfigOptionBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryConfigOptionDescriptorBody struct {\n\tReq    *types.QueryConfigOptionDescriptor         `xml:\"urn:vim25 QueryConfigOptionDescriptor,omitempty\"`\n\tRes    *types.QueryConfigOptionDescriptorResponse `xml:\"urn:vim25 QueryConfigOptionDescriptorResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryConfigOptionDescriptorBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryConfigOptionDescriptor(ctx context.Context, r soap.RoundTripper, req *types.QueryConfigOptionDescriptor) (*types.QueryConfigOptionDescriptorResponse, error) {\n\tvar reqBody, resBody QueryConfigOptionDescriptorBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryConfigOptionExBody struct {\n\tReq    *types.QueryConfigOptionEx         `xml:\"urn:vim25 QueryConfigOptionEx,omitempty\"`\n\tRes    *types.QueryConfigOptionExResponse `xml:\"urn:vim25 QueryConfigOptionExResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryConfigOptionExBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryConfigOptionEx(ctx context.Context, r soap.RoundTripper, req *types.QueryConfigOptionEx) (*types.QueryConfigOptionExResponse, error) {\n\tvar reqBody, resBody QueryConfigOptionExBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryConfigTargetBody struct {\n\tReq    *types.QueryConfigTarget         `xml:\"urn:vim25 QueryConfigTarget,omitempty\"`\n\tRes    *types.QueryConfigTargetResponse `xml:\"urn:vim25 QueryConfigTargetResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryConfigTargetBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryConfigTarget(ctx context.Context, r soap.RoundTripper, req *types.QueryConfigTarget) (*types.QueryConfigTargetResponse, error) {\n\tvar reqBody, resBody QueryConfigTargetBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryConfiguredModuleOptionStringBody struct {\n\tReq    *types.QueryConfiguredModuleOptionString         `xml:\"urn:vim25 QueryConfiguredModuleOptionString,omitempty\"`\n\tRes    *types.QueryConfiguredModuleOptionStringResponse `xml:\"urn:vim25 QueryConfiguredModuleOptionStringResponse,omitempty\"`\n\tFault_ *soap.Fault                                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryConfiguredModuleOptionStringBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryConfiguredModuleOptionString(ctx context.Context, r soap.RoundTripper, req *types.QueryConfiguredModuleOptionString) (*types.QueryConfiguredModuleOptionStringResponse, error) {\n\tvar reqBody, resBody QueryConfiguredModuleOptionStringBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryConnectionInfoBody struct {\n\tReq    *types.QueryConnectionInfo         `xml:\"urn:vim25 QueryConnectionInfo,omitempty\"`\n\tRes    *types.QueryConnectionInfoResponse `xml:\"urn:vim25 QueryConnectionInfoResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryConnectionInfoBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryConnectionInfo(ctx context.Context, r soap.RoundTripper, req *types.QueryConnectionInfo) (*types.QueryConnectionInfoResponse, error) {\n\tvar reqBody, resBody QueryConnectionInfoBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryConnectionInfoViaSpecBody struct {\n\tReq    *types.QueryConnectionInfoViaSpec         `xml:\"urn:vim25 QueryConnectionInfoViaSpec,omitempty\"`\n\tRes    *types.QueryConnectionInfoViaSpecResponse `xml:\"urn:vim25 QueryConnectionInfoViaSpecResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryConnectionInfoViaSpecBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryConnectionInfoViaSpec(ctx context.Context, r soap.RoundTripper, req *types.QueryConnectionInfoViaSpec) (*types.QueryConnectionInfoViaSpecResponse, error) {\n\tvar reqBody, resBody QueryConnectionInfoViaSpecBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryDatastorePerformanceSummaryBody struct {\n\tReq    *types.QueryDatastorePerformanceSummary         `xml:\"urn:vim25 QueryDatastorePerformanceSummary,omitempty\"`\n\tRes    *types.QueryDatastorePerformanceSummaryResponse `xml:\"urn:vim25 QueryDatastorePerformanceSummaryResponse,omitempty\"`\n\tFault_ *soap.Fault                                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryDatastorePerformanceSummaryBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryDatastorePerformanceSummary(ctx context.Context, r soap.RoundTripper, req *types.QueryDatastorePerformanceSummary) (*types.QueryDatastorePerformanceSummaryResponse, error) {\n\tvar reqBody, resBody QueryDatastorePerformanceSummaryBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryDateTimeBody struct {\n\tReq    *types.QueryDateTime         `xml:\"urn:vim25 QueryDateTime,omitempty\"`\n\tRes    *types.QueryDateTimeResponse `xml:\"urn:vim25 QueryDateTimeResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryDateTimeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryDateTime(ctx context.Context, r soap.RoundTripper, req *types.QueryDateTime) (*types.QueryDateTimeResponse, error) {\n\tvar reqBody, resBody QueryDateTimeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryDescriptionsBody struct {\n\tReq    *types.QueryDescriptions         `xml:\"urn:vim25 QueryDescriptions,omitempty\"`\n\tRes    *types.QueryDescriptionsResponse `xml:\"urn:vim25 QueryDescriptionsResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryDescriptionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryDescriptions(ctx context.Context, r soap.RoundTripper, req *types.QueryDescriptions) (*types.QueryDescriptionsResponse, error) {\n\tvar reqBody, resBody QueryDescriptionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryDisksForVsanBody struct {\n\tReq    *types.QueryDisksForVsan         `xml:\"urn:vim25 QueryDisksForVsan,omitempty\"`\n\tRes    *types.QueryDisksForVsanResponse `xml:\"urn:vim25 QueryDisksForVsanResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryDisksForVsanBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryDisksForVsan(ctx context.Context, r soap.RoundTripper, req *types.QueryDisksForVsan) (*types.QueryDisksForVsanResponse, error) {\n\tvar reqBody, resBody QueryDisksForVsanBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryDisksUsingFilterBody struct {\n\tReq    *types.QueryDisksUsingFilter         `xml:\"urn:vim25 QueryDisksUsingFilter,omitempty\"`\n\tRes    *types.QueryDisksUsingFilterResponse `xml:\"urn:vim25 QueryDisksUsingFilterResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryDisksUsingFilterBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryDisksUsingFilter(ctx context.Context, r soap.RoundTripper, req *types.QueryDisksUsingFilter) (*types.QueryDisksUsingFilterResponse, error) {\n\tvar reqBody, resBody QueryDisksUsingFilterBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryDvsByUuidBody struct {\n\tReq    *types.QueryDvsByUuid         `xml:\"urn:vim25 QueryDvsByUuid,omitempty\"`\n\tRes    *types.QueryDvsByUuidResponse `xml:\"urn:vim25 QueryDvsByUuidResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryDvsByUuidBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryDvsByUuid(ctx context.Context, r soap.RoundTripper, req *types.QueryDvsByUuid) (*types.QueryDvsByUuidResponse, error) {\n\tvar reqBody, resBody QueryDvsByUuidBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryDvsCheckCompatibilityBody struct {\n\tReq    *types.QueryDvsCheckCompatibility         `xml:\"urn:vim25 QueryDvsCheckCompatibility,omitempty\"`\n\tRes    *types.QueryDvsCheckCompatibilityResponse `xml:\"urn:vim25 QueryDvsCheckCompatibilityResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryDvsCheckCompatibilityBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryDvsCheckCompatibility(ctx context.Context, r soap.RoundTripper, req *types.QueryDvsCheckCompatibility) (*types.QueryDvsCheckCompatibilityResponse, error) {\n\tvar reqBody, resBody QueryDvsCheckCompatibilityBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryDvsCompatibleHostSpecBody struct {\n\tReq    *types.QueryDvsCompatibleHostSpec         `xml:\"urn:vim25 QueryDvsCompatibleHostSpec,omitempty\"`\n\tRes    *types.QueryDvsCompatibleHostSpecResponse `xml:\"urn:vim25 QueryDvsCompatibleHostSpecResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryDvsCompatibleHostSpecBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryDvsCompatibleHostSpec(ctx context.Context, r soap.RoundTripper, req *types.QueryDvsCompatibleHostSpec) (*types.QueryDvsCompatibleHostSpecResponse, error) {\n\tvar reqBody, resBody QueryDvsCompatibleHostSpecBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryDvsConfigTargetBody struct {\n\tReq    *types.QueryDvsConfigTarget         `xml:\"urn:vim25 QueryDvsConfigTarget,omitempty\"`\n\tRes    *types.QueryDvsConfigTargetResponse `xml:\"urn:vim25 QueryDvsConfigTargetResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryDvsConfigTargetBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryDvsConfigTarget(ctx context.Context, r soap.RoundTripper, req *types.QueryDvsConfigTarget) (*types.QueryDvsConfigTargetResponse, error) {\n\tvar reqBody, resBody QueryDvsConfigTargetBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryDvsFeatureCapabilityBody struct {\n\tReq    *types.QueryDvsFeatureCapability         `xml:\"urn:vim25 QueryDvsFeatureCapability,omitempty\"`\n\tRes    *types.QueryDvsFeatureCapabilityResponse `xml:\"urn:vim25 QueryDvsFeatureCapabilityResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryDvsFeatureCapabilityBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryDvsFeatureCapability(ctx context.Context, r soap.RoundTripper, req *types.QueryDvsFeatureCapability) (*types.QueryDvsFeatureCapabilityResponse, error) {\n\tvar reqBody, resBody QueryDvsFeatureCapabilityBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryEventsBody struct {\n\tReq    *types.QueryEvents         `xml:\"urn:vim25 QueryEvents,omitempty\"`\n\tRes    *types.QueryEventsResponse `xml:\"urn:vim25 QueryEventsResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryEventsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryEvents(ctx context.Context, r soap.RoundTripper, req *types.QueryEvents) (*types.QueryEventsResponse, error) {\n\tvar reqBody, resBody QueryEventsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryExpressionMetadataBody struct {\n\tReq    *types.QueryExpressionMetadata         `xml:\"urn:vim25 QueryExpressionMetadata,omitempty\"`\n\tRes    *types.QueryExpressionMetadataResponse `xml:\"urn:vim25 QueryExpressionMetadataResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryExpressionMetadataBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryExpressionMetadata(ctx context.Context, r soap.RoundTripper, req *types.QueryExpressionMetadata) (*types.QueryExpressionMetadataResponse, error) {\n\tvar reqBody, resBody QueryExpressionMetadataBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryExtensionIpAllocationUsageBody struct {\n\tReq    *types.QueryExtensionIpAllocationUsage         `xml:\"urn:vim25 QueryExtensionIpAllocationUsage,omitempty\"`\n\tRes    *types.QueryExtensionIpAllocationUsageResponse `xml:\"urn:vim25 QueryExtensionIpAllocationUsageResponse,omitempty\"`\n\tFault_ *soap.Fault                                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryExtensionIpAllocationUsageBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryExtensionIpAllocationUsage(ctx context.Context, r soap.RoundTripper, req *types.QueryExtensionIpAllocationUsage) (*types.QueryExtensionIpAllocationUsageResponse, error) {\n\tvar reqBody, resBody QueryExtensionIpAllocationUsageBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryFaultToleranceCompatibilityBody struct {\n\tReq    *types.QueryFaultToleranceCompatibility         `xml:\"urn:vim25 QueryFaultToleranceCompatibility,omitempty\"`\n\tRes    *types.QueryFaultToleranceCompatibilityResponse `xml:\"urn:vim25 QueryFaultToleranceCompatibilityResponse,omitempty\"`\n\tFault_ *soap.Fault                                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryFaultToleranceCompatibilityBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryFaultToleranceCompatibility(ctx context.Context, r soap.RoundTripper, req *types.QueryFaultToleranceCompatibility) (*types.QueryFaultToleranceCompatibilityResponse, error) {\n\tvar reqBody, resBody QueryFaultToleranceCompatibilityBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryFaultToleranceCompatibilityExBody struct {\n\tReq    *types.QueryFaultToleranceCompatibilityEx         `xml:\"urn:vim25 QueryFaultToleranceCompatibilityEx,omitempty\"`\n\tRes    *types.QueryFaultToleranceCompatibilityExResponse `xml:\"urn:vim25 QueryFaultToleranceCompatibilityExResponse,omitempty\"`\n\tFault_ *soap.Fault                                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryFaultToleranceCompatibilityExBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryFaultToleranceCompatibilityEx(ctx context.Context, r soap.RoundTripper, req *types.QueryFaultToleranceCompatibilityEx) (*types.QueryFaultToleranceCompatibilityExResponse, error) {\n\tvar reqBody, resBody QueryFaultToleranceCompatibilityExBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryFilterEntitiesBody struct {\n\tReq    *types.QueryFilterEntities         `xml:\"urn:vim25 QueryFilterEntities,omitempty\"`\n\tRes    *types.QueryFilterEntitiesResponse `xml:\"urn:vim25 QueryFilterEntitiesResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryFilterEntitiesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryFilterEntities(ctx context.Context, r soap.RoundTripper, req *types.QueryFilterEntities) (*types.QueryFilterEntitiesResponse, error) {\n\tvar reqBody, resBody QueryFilterEntitiesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryFilterInfoIdsBody struct {\n\tReq    *types.QueryFilterInfoIds         `xml:\"urn:vim25 QueryFilterInfoIds,omitempty\"`\n\tRes    *types.QueryFilterInfoIdsResponse `xml:\"urn:vim25 QueryFilterInfoIdsResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryFilterInfoIdsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryFilterInfoIds(ctx context.Context, r soap.RoundTripper, req *types.QueryFilterInfoIds) (*types.QueryFilterInfoIdsResponse, error) {\n\tvar reqBody, resBody QueryFilterInfoIdsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryFilterListBody struct {\n\tReq    *types.QueryFilterList         `xml:\"urn:vim25 QueryFilterList,omitempty\"`\n\tRes    *types.QueryFilterListResponse `xml:\"urn:vim25 QueryFilterListResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryFilterListBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryFilterList(ctx context.Context, r soap.RoundTripper, req *types.QueryFilterList) (*types.QueryFilterListResponse, error) {\n\tvar reqBody, resBody QueryFilterListBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryFilterNameBody struct {\n\tReq    *types.QueryFilterName         `xml:\"urn:vim25 QueryFilterName,omitempty\"`\n\tRes    *types.QueryFilterNameResponse `xml:\"urn:vim25 QueryFilterNameResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryFilterNameBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryFilterName(ctx context.Context, r soap.RoundTripper, req *types.QueryFilterName) (*types.QueryFilterNameResponse, error) {\n\tvar reqBody, resBody QueryFilterNameBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryFirmwareConfigUploadURLBody struct {\n\tReq    *types.QueryFirmwareConfigUploadURL         `xml:\"urn:vim25 QueryFirmwareConfigUploadURL,omitempty\"`\n\tRes    *types.QueryFirmwareConfigUploadURLResponse `xml:\"urn:vim25 QueryFirmwareConfigUploadURLResponse,omitempty\"`\n\tFault_ *soap.Fault                                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryFirmwareConfigUploadURLBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryFirmwareConfigUploadURL(ctx context.Context, r soap.RoundTripper, req *types.QueryFirmwareConfigUploadURL) (*types.QueryFirmwareConfigUploadURLResponse, error) {\n\tvar reqBody, resBody QueryFirmwareConfigUploadURLBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryHealthUpdateInfosBody struct {\n\tReq    *types.QueryHealthUpdateInfos         `xml:\"urn:vim25 QueryHealthUpdateInfos,omitempty\"`\n\tRes    *types.QueryHealthUpdateInfosResponse `xml:\"urn:vim25 QueryHealthUpdateInfosResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryHealthUpdateInfosBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryHealthUpdateInfos(ctx context.Context, r soap.RoundTripper, req *types.QueryHealthUpdateInfos) (*types.QueryHealthUpdateInfosResponse, error) {\n\tvar reqBody, resBody QueryHealthUpdateInfosBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryHealthUpdatesBody struct {\n\tReq    *types.QueryHealthUpdates         `xml:\"urn:vim25 QueryHealthUpdates,omitempty\"`\n\tRes    *types.QueryHealthUpdatesResponse `xml:\"urn:vim25 QueryHealthUpdatesResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryHealthUpdatesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryHealthUpdates(ctx context.Context, r soap.RoundTripper, req *types.QueryHealthUpdates) (*types.QueryHealthUpdatesResponse, error) {\n\tvar reqBody, resBody QueryHealthUpdatesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryHostConnectionInfoBody struct {\n\tReq    *types.QueryHostConnectionInfo         `xml:\"urn:vim25 QueryHostConnectionInfo,omitempty\"`\n\tRes    *types.QueryHostConnectionInfoResponse `xml:\"urn:vim25 QueryHostConnectionInfoResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryHostConnectionInfoBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryHostConnectionInfo(ctx context.Context, r soap.RoundTripper, req *types.QueryHostConnectionInfo) (*types.QueryHostConnectionInfoResponse, error) {\n\tvar reqBody, resBody QueryHostConnectionInfoBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryHostPatch_TaskBody struct {\n\tReq    *types.QueryHostPatch_Task         `xml:\"urn:vim25 QueryHostPatch_Task,omitempty\"`\n\tRes    *types.QueryHostPatch_TaskResponse `xml:\"urn:vim25 QueryHostPatch_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryHostPatch_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.QueryHostPatch_Task) (*types.QueryHostPatch_TaskResponse, error) {\n\tvar reqBody, resBody QueryHostPatch_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryHostProfileMetadataBody struct {\n\tReq    *types.QueryHostProfileMetadata         `xml:\"urn:vim25 QueryHostProfileMetadata,omitempty\"`\n\tRes    *types.QueryHostProfileMetadataResponse `xml:\"urn:vim25 QueryHostProfileMetadataResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryHostProfileMetadataBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryHostProfileMetadata(ctx context.Context, r soap.RoundTripper, req *types.QueryHostProfileMetadata) (*types.QueryHostProfileMetadataResponse, error) {\n\tvar reqBody, resBody QueryHostProfileMetadataBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryHostStatusBody struct {\n\tReq    *types.QueryHostStatus         `xml:\"urn:vim25 QueryHostStatus,omitempty\"`\n\tRes    *types.QueryHostStatusResponse `xml:\"urn:vim25 QueryHostStatusResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryHostStatusBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryHostStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryHostStatus) (*types.QueryHostStatusResponse, error) {\n\tvar reqBody, resBody QueryHostStatusBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryIORMConfigOptionBody struct {\n\tReq    *types.QueryIORMConfigOption         `xml:\"urn:vim25 QueryIORMConfigOption,omitempty\"`\n\tRes    *types.QueryIORMConfigOptionResponse `xml:\"urn:vim25 QueryIORMConfigOptionResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryIORMConfigOptionBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryIORMConfigOption(ctx context.Context, r soap.RoundTripper, req *types.QueryIORMConfigOption) (*types.QueryIORMConfigOptionResponse, error) {\n\tvar reqBody, resBody QueryIORMConfigOptionBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryIPAllocationsBody struct {\n\tReq    *types.QueryIPAllocations         `xml:\"urn:vim25 QueryIPAllocations,omitempty\"`\n\tRes    *types.QueryIPAllocationsResponse `xml:\"urn:vim25 QueryIPAllocationsResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryIPAllocationsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryIPAllocations(ctx context.Context, r soap.RoundTripper, req *types.QueryIPAllocations) (*types.QueryIPAllocationsResponse, error) {\n\tvar reqBody, resBody QueryIPAllocationsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryIoFilterInfoBody struct {\n\tReq    *types.QueryIoFilterInfo         `xml:\"urn:vim25 QueryIoFilterInfo,omitempty\"`\n\tRes    *types.QueryIoFilterInfoResponse `xml:\"urn:vim25 QueryIoFilterInfoResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryIoFilterInfoBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryIoFilterInfo(ctx context.Context, r soap.RoundTripper, req *types.QueryIoFilterInfo) (*types.QueryIoFilterInfoResponse, error) {\n\tvar reqBody, resBody QueryIoFilterInfoBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryIoFilterIssuesBody struct {\n\tReq    *types.QueryIoFilterIssues         `xml:\"urn:vim25 QueryIoFilterIssues,omitempty\"`\n\tRes    *types.QueryIoFilterIssuesResponse `xml:\"urn:vim25 QueryIoFilterIssuesResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryIoFilterIssuesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryIoFilterIssues(ctx context.Context, r soap.RoundTripper, req *types.QueryIoFilterIssues) (*types.QueryIoFilterIssuesResponse, error) {\n\tvar reqBody, resBody QueryIoFilterIssuesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryIpPoolsBody struct {\n\tReq    *types.QueryIpPools         `xml:\"urn:vim25 QueryIpPools,omitempty\"`\n\tRes    *types.QueryIpPoolsResponse `xml:\"urn:vim25 QueryIpPoolsResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryIpPoolsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryIpPools(ctx context.Context, r soap.RoundTripper, req *types.QueryIpPools) (*types.QueryIpPoolsResponse, error) {\n\tvar reqBody, resBody QueryIpPoolsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryLicenseSourceAvailabilityBody struct {\n\tReq    *types.QueryLicenseSourceAvailability         `xml:\"urn:vim25 QueryLicenseSourceAvailability,omitempty\"`\n\tRes    *types.QueryLicenseSourceAvailabilityResponse `xml:\"urn:vim25 QueryLicenseSourceAvailabilityResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryLicenseSourceAvailabilityBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryLicenseSourceAvailability(ctx context.Context, r soap.RoundTripper, req *types.QueryLicenseSourceAvailability) (*types.QueryLicenseSourceAvailabilityResponse, error) {\n\tvar reqBody, resBody QueryLicenseSourceAvailabilityBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryLicenseUsageBody struct {\n\tReq    *types.QueryLicenseUsage         `xml:\"urn:vim25 QueryLicenseUsage,omitempty\"`\n\tRes    *types.QueryLicenseUsageResponse `xml:\"urn:vim25 QueryLicenseUsageResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryLicenseUsageBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryLicenseUsage(ctx context.Context, r soap.RoundTripper, req *types.QueryLicenseUsage) (*types.QueryLicenseUsageResponse, error) {\n\tvar reqBody, resBody QueryLicenseUsageBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryLockdownExceptionsBody struct {\n\tReq    *types.QueryLockdownExceptions         `xml:\"urn:vim25 QueryLockdownExceptions,omitempty\"`\n\tRes    *types.QueryLockdownExceptionsResponse `xml:\"urn:vim25 QueryLockdownExceptionsResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryLockdownExceptionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryLockdownExceptions(ctx context.Context, r soap.RoundTripper, req *types.QueryLockdownExceptions) (*types.QueryLockdownExceptionsResponse, error) {\n\tvar reqBody, resBody QueryLockdownExceptionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryManagedByBody struct {\n\tReq    *types.QueryManagedBy         `xml:\"urn:vim25 QueryManagedBy,omitempty\"`\n\tRes    *types.QueryManagedByResponse `xml:\"urn:vim25 QueryManagedByResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryManagedByBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryManagedBy(ctx context.Context, r soap.RoundTripper, req *types.QueryManagedBy) (*types.QueryManagedByResponse, error) {\n\tvar reqBody, resBody QueryManagedByBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryMemoryOverheadBody struct {\n\tReq    *types.QueryMemoryOverhead         `xml:\"urn:vim25 QueryMemoryOverhead,omitempty\"`\n\tRes    *types.QueryMemoryOverheadResponse `xml:\"urn:vim25 QueryMemoryOverheadResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryMemoryOverheadBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryMemoryOverhead(ctx context.Context, r soap.RoundTripper, req *types.QueryMemoryOverhead) (*types.QueryMemoryOverheadResponse, error) {\n\tvar reqBody, resBody QueryMemoryOverheadBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryMemoryOverheadExBody struct {\n\tReq    *types.QueryMemoryOverheadEx         `xml:\"urn:vim25 QueryMemoryOverheadEx,omitempty\"`\n\tRes    *types.QueryMemoryOverheadExResponse `xml:\"urn:vim25 QueryMemoryOverheadExResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryMemoryOverheadExBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryMemoryOverheadEx(ctx context.Context, r soap.RoundTripper, req *types.QueryMemoryOverheadEx) (*types.QueryMemoryOverheadExResponse, error) {\n\tvar reqBody, resBody QueryMemoryOverheadExBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryMigrationDependenciesBody struct {\n\tReq    *types.QueryMigrationDependencies         `xml:\"urn:vim25 QueryMigrationDependencies,omitempty\"`\n\tRes    *types.QueryMigrationDependenciesResponse `xml:\"urn:vim25 QueryMigrationDependenciesResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryMigrationDependenciesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryMigrationDependencies(ctx context.Context, r soap.RoundTripper, req *types.QueryMigrationDependencies) (*types.QueryMigrationDependenciesResponse, error) {\n\tvar reqBody, resBody QueryMigrationDependenciesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryModulesBody struct {\n\tReq    *types.QueryModules         `xml:\"urn:vim25 QueryModules,omitempty\"`\n\tRes    *types.QueryModulesResponse `xml:\"urn:vim25 QueryModulesResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryModulesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryModules(ctx context.Context, r soap.RoundTripper, req *types.QueryModules) (*types.QueryModulesResponse, error) {\n\tvar reqBody, resBody QueryModulesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryMonitoredEntitiesBody struct {\n\tReq    *types.QueryMonitoredEntities         `xml:\"urn:vim25 QueryMonitoredEntities,omitempty\"`\n\tRes    *types.QueryMonitoredEntitiesResponse `xml:\"urn:vim25 QueryMonitoredEntitiesResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryMonitoredEntitiesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryMonitoredEntities(ctx context.Context, r soap.RoundTripper, req *types.QueryMonitoredEntities) (*types.QueryMonitoredEntitiesResponse, error) {\n\tvar reqBody, resBody QueryMonitoredEntitiesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryNFSUserBody struct {\n\tReq    *types.QueryNFSUser         `xml:\"urn:vim25 QueryNFSUser,omitempty\"`\n\tRes    *types.QueryNFSUserResponse `xml:\"urn:vim25 QueryNFSUserResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryNFSUserBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryNFSUser(ctx context.Context, r soap.RoundTripper, req *types.QueryNFSUser) (*types.QueryNFSUserResponse, error) {\n\tvar reqBody, resBody QueryNFSUserBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryNetConfigBody struct {\n\tReq    *types.QueryNetConfig         `xml:\"urn:vim25 QueryNetConfig,omitempty\"`\n\tRes    *types.QueryNetConfigResponse `xml:\"urn:vim25 QueryNetConfigResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryNetConfigBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryNetConfig(ctx context.Context, r soap.RoundTripper, req *types.QueryNetConfig) (*types.QueryNetConfigResponse, error) {\n\tvar reqBody, resBody QueryNetConfigBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryNetworkHintBody struct {\n\tReq    *types.QueryNetworkHint         `xml:\"urn:vim25 QueryNetworkHint,omitempty\"`\n\tRes    *types.QueryNetworkHintResponse `xml:\"urn:vim25 QueryNetworkHintResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryNetworkHintBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryNetworkHint(ctx context.Context, r soap.RoundTripper, req *types.QueryNetworkHint) (*types.QueryNetworkHintResponse, error) {\n\tvar reqBody, resBody QueryNetworkHintBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryObjectsOnPhysicalVsanDiskBody struct {\n\tReq    *types.QueryObjectsOnPhysicalVsanDisk         `xml:\"urn:vim25 QueryObjectsOnPhysicalVsanDisk,omitempty\"`\n\tRes    *types.QueryObjectsOnPhysicalVsanDiskResponse `xml:\"urn:vim25 QueryObjectsOnPhysicalVsanDiskResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryObjectsOnPhysicalVsanDiskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryObjectsOnPhysicalVsanDisk(ctx context.Context, r soap.RoundTripper, req *types.QueryObjectsOnPhysicalVsanDisk) (*types.QueryObjectsOnPhysicalVsanDiskResponse, error) {\n\tvar reqBody, resBody QueryObjectsOnPhysicalVsanDiskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryOptionsBody struct {\n\tReq    *types.QueryOptions         `xml:\"urn:vim25 QueryOptions,omitempty\"`\n\tRes    *types.QueryOptionsResponse `xml:\"urn:vim25 QueryOptionsResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryOptionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryOptions) (*types.QueryOptionsResponse, error) {\n\tvar reqBody, resBody QueryOptionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryPartitionCreateDescBody struct {\n\tReq    *types.QueryPartitionCreateDesc         `xml:\"urn:vim25 QueryPartitionCreateDesc,omitempty\"`\n\tRes    *types.QueryPartitionCreateDescResponse `xml:\"urn:vim25 QueryPartitionCreateDescResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryPartitionCreateDescBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryPartitionCreateDesc(ctx context.Context, r soap.RoundTripper, req *types.QueryPartitionCreateDesc) (*types.QueryPartitionCreateDescResponse, error) {\n\tvar reqBody, resBody QueryPartitionCreateDescBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryPartitionCreateOptionsBody struct {\n\tReq    *types.QueryPartitionCreateOptions         `xml:\"urn:vim25 QueryPartitionCreateOptions,omitempty\"`\n\tRes    *types.QueryPartitionCreateOptionsResponse `xml:\"urn:vim25 QueryPartitionCreateOptionsResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryPartitionCreateOptionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryPartitionCreateOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryPartitionCreateOptions) (*types.QueryPartitionCreateOptionsResponse, error) {\n\tvar reqBody, resBody QueryPartitionCreateOptionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryPathSelectionPolicyOptionsBody struct {\n\tReq    *types.QueryPathSelectionPolicyOptions         `xml:\"urn:vim25 QueryPathSelectionPolicyOptions,omitempty\"`\n\tRes    *types.QueryPathSelectionPolicyOptionsResponse `xml:\"urn:vim25 QueryPathSelectionPolicyOptionsResponse,omitempty\"`\n\tFault_ *soap.Fault                                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryPathSelectionPolicyOptionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryPathSelectionPolicyOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryPathSelectionPolicyOptions) (*types.QueryPathSelectionPolicyOptionsResponse, error) {\n\tvar reqBody, resBody QueryPathSelectionPolicyOptionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryPerfBody struct {\n\tReq    *types.QueryPerf         `xml:\"urn:vim25 QueryPerf,omitempty\"`\n\tRes    *types.QueryPerfResponse `xml:\"urn:vim25 QueryPerfResponse,omitempty\"`\n\tFault_ *soap.Fault              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryPerfBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryPerf(ctx context.Context, r soap.RoundTripper, req *types.QueryPerf) (*types.QueryPerfResponse, error) {\n\tvar reqBody, resBody QueryPerfBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryPerfCompositeBody struct {\n\tReq    *types.QueryPerfComposite         `xml:\"urn:vim25 QueryPerfComposite,omitempty\"`\n\tRes    *types.QueryPerfCompositeResponse `xml:\"urn:vim25 QueryPerfCompositeResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryPerfCompositeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryPerfComposite(ctx context.Context, r soap.RoundTripper, req *types.QueryPerfComposite) (*types.QueryPerfCompositeResponse, error) {\n\tvar reqBody, resBody QueryPerfCompositeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryPerfCounterBody struct {\n\tReq    *types.QueryPerfCounter         `xml:\"urn:vim25 QueryPerfCounter,omitempty\"`\n\tRes    *types.QueryPerfCounterResponse `xml:\"urn:vim25 QueryPerfCounterResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryPerfCounterBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryPerfCounter(ctx context.Context, r soap.RoundTripper, req *types.QueryPerfCounter) (*types.QueryPerfCounterResponse, error) {\n\tvar reqBody, resBody QueryPerfCounterBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryPerfCounterByLevelBody struct {\n\tReq    *types.QueryPerfCounterByLevel         `xml:\"urn:vim25 QueryPerfCounterByLevel,omitempty\"`\n\tRes    *types.QueryPerfCounterByLevelResponse `xml:\"urn:vim25 QueryPerfCounterByLevelResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryPerfCounterByLevelBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryPerfCounterByLevel(ctx context.Context, r soap.RoundTripper, req *types.QueryPerfCounterByLevel) (*types.QueryPerfCounterByLevelResponse, error) {\n\tvar reqBody, resBody QueryPerfCounterByLevelBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryPerfProviderSummaryBody struct {\n\tReq    *types.QueryPerfProviderSummary         `xml:\"urn:vim25 QueryPerfProviderSummary,omitempty\"`\n\tRes    *types.QueryPerfProviderSummaryResponse `xml:\"urn:vim25 QueryPerfProviderSummaryResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryPerfProviderSummaryBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryPerfProviderSummary(ctx context.Context, r soap.RoundTripper, req *types.QueryPerfProviderSummary) (*types.QueryPerfProviderSummaryResponse, error) {\n\tvar reqBody, resBody QueryPerfProviderSummaryBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryPhysicalVsanDisksBody struct {\n\tReq    *types.QueryPhysicalVsanDisks         `xml:\"urn:vim25 QueryPhysicalVsanDisks,omitempty\"`\n\tRes    *types.QueryPhysicalVsanDisksResponse `xml:\"urn:vim25 QueryPhysicalVsanDisksResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryPhysicalVsanDisksBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryPhysicalVsanDisks(ctx context.Context, r soap.RoundTripper, req *types.QueryPhysicalVsanDisks) (*types.QueryPhysicalVsanDisksResponse, error) {\n\tvar reqBody, resBody QueryPhysicalVsanDisksBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryPnicStatusBody struct {\n\tReq    *types.QueryPnicStatus         `xml:\"urn:vim25 QueryPnicStatus,omitempty\"`\n\tRes    *types.QueryPnicStatusResponse `xml:\"urn:vim25 QueryPnicStatusResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryPnicStatusBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryPnicStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryPnicStatus) (*types.QueryPnicStatusResponse, error) {\n\tvar reqBody, resBody QueryPnicStatusBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryPolicyMetadataBody struct {\n\tReq    *types.QueryPolicyMetadata         `xml:\"urn:vim25 QueryPolicyMetadata,omitempty\"`\n\tRes    *types.QueryPolicyMetadataResponse `xml:\"urn:vim25 QueryPolicyMetadataResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryPolicyMetadataBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryPolicyMetadata(ctx context.Context, r soap.RoundTripper, req *types.QueryPolicyMetadata) (*types.QueryPolicyMetadataResponse, error) {\n\tvar reqBody, resBody QueryPolicyMetadataBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryProfileStructureBody struct {\n\tReq    *types.QueryProfileStructure         `xml:\"urn:vim25 QueryProfileStructure,omitempty\"`\n\tRes    *types.QueryProfileStructureResponse `xml:\"urn:vim25 QueryProfileStructureResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryProfileStructureBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryProfileStructure(ctx context.Context, r soap.RoundTripper, req *types.QueryProfileStructure) (*types.QueryProfileStructureResponse, error) {\n\tvar reqBody, resBody QueryProfileStructureBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryProviderListBody struct {\n\tReq    *types.QueryProviderList         `xml:\"urn:vim25 QueryProviderList,omitempty\"`\n\tRes    *types.QueryProviderListResponse `xml:\"urn:vim25 QueryProviderListResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryProviderListBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryProviderList(ctx context.Context, r soap.RoundTripper, req *types.QueryProviderList) (*types.QueryProviderListResponse, error) {\n\tvar reqBody, resBody QueryProviderListBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryProviderNameBody struct {\n\tReq    *types.QueryProviderName         `xml:\"urn:vim25 QueryProviderName,omitempty\"`\n\tRes    *types.QueryProviderNameResponse `xml:\"urn:vim25 QueryProviderNameResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryProviderNameBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryProviderName(ctx context.Context, r soap.RoundTripper, req *types.QueryProviderName) (*types.QueryProviderNameResponse, error) {\n\tvar reqBody, resBody QueryProviderNameBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryResourceConfigOptionBody struct {\n\tReq    *types.QueryResourceConfigOption         `xml:\"urn:vim25 QueryResourceConfigOption,omitempty\"`\n\tRes    *types.QueryResourceConfigOptionResponse `xml:\"urn:vim25 QueryResourceConfigOptionResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryResourceConfigOptionBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryResourceConfigOption(ctx context.Context, r soap.RoundTripper, req *types.QueryResourceConfigOption) (*types.QueryResourceConfigOptionResponse, error) {\n\tvar reqBody, resBody QueryResourceConfigOptionBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryServiceListBody struct {\n\tReq    *types.QueryServiceList         `xml:\"urn:vim25 QueryServiceList,omitempty\"`\n\tRes    *types.QueryServiceListResponse `xml:\"urn:vim25 QueryServiceListResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryServiceListBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryServiceList(ctx context.Context, r soap.RoundTripper, req *types.QueryServiceList) (*types.QueryServiceListResponse, error) {\n\tvar reqBody, resBody QueryServiceListBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryStorageArrayTypePolicyOptionsBody struct {\n\tReq    *types.QueryStorageArrayTypePolicyOptions         `xml:\"urn:vim25 QueryStorageArrayTypePolicyOptions,omitempty\"`\n\tRes    *types.QueryStorageArrayTypePolicyOptionsResponse `xml:\"urn:vim25 QueryStorageArrayTypePolicyOptionsResponse,omitempty\"`\n\tFault_ *soap.Fault                                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryStorageArrayTypePolicyOptionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryStorageArrayTypePolicyOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryStorageArrayTypePolicyOptions) (*types.QueryStorageArrayTypePolicyOptionsResponse, error) {\n\tvar reqBody, resBody QueryStorageArrayTypePolicyOptionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QuerySupportedFeaturesBody struct {\n\tReq    *types.QuerySupportedFeatures         `xml:\"urn:vim25 QuerySupportedFeatures,omitempty\"`\n\tRes    *types.QuerySupportedFeaturesResponse `xml:\"urn:vim25 QuerySupportedFeaturesResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QuerySupportedFeaturesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QuerySupportedFeatures(ctx context.Context, r soap.RoundTripper, req *types.QuerySupportedFeatures) (*types.QuerySupportedFeaturesResponse, error) {\n\tvar reqBody, resBody QuerySupportedFeaturesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QuerySyncingVsanObjectsBody struct {\n\tReq    *types.QuerySyncingVsanObjects         `xml:\"urn:vim25 QuerySyncingVsanObjects,omitempty\"`\n\tRes    *types.QuerySyncingVsanObjectsResponse `xml:\"urn:vim25 QuerySyncingVsanObjectsResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QuerySyncingVsanObjectsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QuerySyncingVsanObjects(ctx context.Context, r soap.RoundTripper, req *types.QuerySyncingVsanObjects) (*types.QuerySyncingVsanObjectsResponse, error) {\n\tvar reqBody, resBody QuerySyncingVsanObjectsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QuerySystemUsersBody struct {\n\tReq    *types.QuerySystemUsers         `xml:\"urn:vim25 QuerySystemUsers,omitempty\"`\n\tRes    *types.QuerySystemUsersResponse `xml:\"urn:vim25 QuerySystemUsersResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QuerySystemUsersBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QuerySystemUsers(ctx context.Context, r soap.RoundTripper, req *types.QuerySystemUsers) (*types.QuerySystemUsersResponse, error) {\n\tvar reqBody, resBody QuerySystemUsersBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryTargetCapabilitiesBody struct {\n\tReq    *types.QueryTargetCapabilities         `xml:\"urn:vim25 QueryTargetCapabilities,omitempty\"`\n\tRes    *types.QueryTargetCapabilitiesResponse `xml:\"urn:vim25 QueryTargetCapabilitiesResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryTargetCapabilitiesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryTargetCapabilities(ctx context.Context, r soap.RoundTripper, req *types.QueryTargetCapabilities) (*types.QueryTargetCapabilitiesResponse, error) {\n\tvar reqBody, resBody QueryTargetCapabilitiesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryTpmAttestationReportBody struct {\n\tReq    *types.QueryTpmAttestationReport         `xml:\"urn:vim25 QueryTpmAttestationReport,omitempty\"`\n\tRes    *types.QueryTpmAttestationReportResponse `xml:\"urn:vim25 QueryTpmAttestationReportResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryTpmAttestationReportBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryTpmAttestationReport(ctx context.Context, r soap.RoundTripper, req *types.QueryTpmAttestationReport) (*types.QueryTpmAttestationReportResponse, error) {\n\tvar reqBody, resBody QueryTpmAttestationReportBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryUnmonitoredHostsBody struct {\n\tReq    *types.QueryUnmonitoredHosts         `xml:\"urn:vim25 QueryUnmonitoredHosts,omitempty\"`\n\tRes    *types.QueryUnmonitoredHostsResponse `xml:\"urn:vim25 QueryUnmonitoredHostsResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryUnmonitoredHostsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryUnmonitoredHosts(ctx context.Context, r soap.RoundTripper, req *types.QueryUnmonitoredHosts) (*types.QueryUnmonitoredHostsResponse, error) {\n\tvar reqBody, resBody QueryUnmonitoredHostsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryUnownedFilesBody struct {\n\tReq    *types.QueryUnownedFiles         `xml:\"urn:vim25 QueryUnownedFiles,omitempty\"`\n\tRes    *types.QueryUnownedFilesResponse `xml:\"urn:vim25 QueryUnownedFilesResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryUnownedFilesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryUnownedFiles(ctx context.Context, r soap.RoundTripper, req *types.QueryUnownedFiles) (*types.QueryUnownedFilesResponse, error) {\n\tvar reqBody, resBody QueryUnownedFilesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryUnresolvedVmfsVolumeBody struct {\n\tReq    *types.QueryUnresolvedVmfsVolume         `xml:\"urn:vim25 QueryUnresolvedVmfsVolume,omitempty\"`\n\tRes    *types.QueryUnresolvedVmfsVolumeResponse `xml:\"urn:vim25 QueryUnresolvedVmfsVolumeResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryUnresolvedVmfsVolumeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryUnresolvedVmfsVolume(ctx context.Context, r soap.RoundTripper, req *types.QueryUnresolvedVmfsVolume) (*types.QueryUnresolvedVmfsVolumeResponse, error) {\n\tvar reqBody, resBody QueryUnresolvedVmfsVolumeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryUnresolvedVmfsVolumesBody struct {\n\tReq    *types.QueryUnresolvedVmfsVolumes         `xml:\"urn:vim25 QueryUnresolvedVmfsVolumes,omitempty\"`\n\tRes    *types.QueryUnresolvedVmfsVolumesResponse `xml:\"urn:vim25 QueryUnresolvedVmfsVolumesResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryUnresolvedVmfsVolumesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryUnresolvedVmfsVolumes(ctx context.Context, r soap.RoundTripper, req *types.QueryUnresolvedVmfsVolumes) (*types.QueryUnresolvedVmfsVolumesResponse, error) {\n\tvar reqBody, resBody QueryUnresolvedVmfsVolumesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryUsedVlanIdInDvsBody struct {\n\tReq    *types.QueryUsedVlanIdInDvs         `xml:\"urn:vim25 QueryUsedVlanIdInDvs,omitempty\"`\n\tRes    *types.QueryUsedVlanIdInDvsResponse `xml:\"urn:vim25 QueryUsedVlanIdInDvsResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryUsedVlanIdInDvsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryUsedVlanIdInDvs(ctx context.Context, r soap.RoundTripper, req *types.QueryUsedVlanIdInDvs) (*types.QueryUsedVlanIdInDvsResponse, error) {\n\tvar reqBody, resBody QueryUsedVlanIdInDvsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryVMotionCompatibilityBody struct {\n\tReq    *types.QueryVMotionCompatibility         `xml:\"urn:vim25 QueryVMotionCompatibility,omitempty\"`\n\tRes    *types.QueryVMotionCompatibilityResponse `xml:\"urn:vim25 QueryVMotionCompatibilityResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryVMotionCompatibilityBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryVMotionCompatibility(ctx context.Context, r soap.RoundTripper, req *types.QueryVMotionCompatibility) (*types.QueryVMotionCompatibilityResponse, error) {\n\tvar reqBody, resBody QueryVMotionCompatibilityBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryVMotionCompatibilityEx_TaskBody struct {\n\tReq    *types.QueryVMotionCompatibilityEx_Task         `xml:\"urn:vim25 QueryVMotionCompatibilityEx_Task,omitempty\"`\n\tRes    *types.QueryVMotionCompatibilityEx_TaskResponse `xml:\"urn:vim25 QueryVMotionCompatibilityEx_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryVMotionCompatibilityEx_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryVMotionCompatibilityEx_Task(ctx context.Context, r soap.RoundTripper, req *types.QueryVMotionCompatibilityEx_Task) (*types.QueryVMotionCompatibilityEx_TaskResponse, error) {\n\tvar reqBody, resBody QueryVMotionCompatibilityEx_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryVirtualDiskFragmentationBody struct {\n\tReq    *types.QueryVirtualDiskFragmentation         `xml:\"urn:vim25 QueryVirtualDiskFragmentation,omitempty\"`\n\tRes    *types.QueryVirtualDiskFragmentationResponse `xml:\"urn:vim25 QueryVirtualDiskFragmentationResponse,omitempty\"`\n\tFault_ *soap.Fault                                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryVirtualDiskFragmentationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryVirtualDiskFragmentation(ctx context.Context, r soap.RoundTripper, req *types.QueryVirtualDiskFragmentation) (*types.QueryVirtualDiskFragmentationResponse, error) {\n\tvar reqBody, resBody QueryVirtualDiskFragmentationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryVirtualDiskGeometryBody struct {\n\tReq    *types.QueryVirtualDiskGeometry         `xml:\"urn:vim25 QueryVirtualDiskGeometry,omitempty\"`\n\tRes    *types.QueryVirtualDiskGeometryResponse `xml:\"urn:vim25 QueryVirtualDiskGeometryResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryVirtualDiskGeometryBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryVirtualDiskGeometry(ctx context.Context, r soap.RoundTripper, req *types.QueryVirtualDiskGeometry) (*types.QueryVirtualDiskGeometryResponse, error) {\n\tvar reqBody, resBody QueryVirtualDiskGeometryBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryVirtualDiskUuidBody struct {\n\tReq    *types.QueryVirtualDiskUuid         `xml:\"urn:vim25 QueryVirtualDiskUuid,omitempty\"`\n\tRes    *types.QueryVirtualDiskUuidResponse `xml:\"urn:vim25 QueryVirtualDiskUuidResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryVirtualDiskUuidBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryVirtualDiskUuid(ctx context.Context, r soap.RoundTripper, req *types.QueryVirtualDiskUuid) (*types.QueryVirtualDiskUuidResponse, error) {\n\tvar reqBody, resBody QueryVirtualDiskUuidBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryVmfsConfigOptionBody struct {\n\tReq    *types.QueryVmfsConfigOption         `xml:\"urn:vim25 QueryVmfsConfigOption,omitempty\"`\n\tRes    *types.QueryVmfsConfigOptionResponse `xml:\"urn:vim25 QueryVmfsConfigOptionResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryVmfsConfigOptionBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryVmfsConfigOption(ctx context.Context, r soap.RoundTripper, req *types.QueryVmfsConfigOption) (*types.QueryVmfsConfigOptionResponse, error) {\n\tvar reqBody, resBody QueryVmfsConfigOptionBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryVmfsDatastoreCreateOptionsBody struct {\n\tReq    *types.QueryVmfsDatastoreCreateOptions         `xml:\"urn:vim25 QueryVmfsDatastoreCreateOptions,omitempty\"`\n\tRes    *types.QueryVmfsDatastoreCreateOptionsResponse `xml:\"urn:vim25 QueryVmfsDatastoreCreateOptionsResponse,omitempty\"`\n\tFault_ *soap.Fault                                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryVmfsDatastoreCreateOptionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryVmfsDatastoreCreateOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryVmfsDatastoreCreateOptions) (*types.QueryVmfsDatastoreCreateOptionsResponse, error) {\n\tvar reqBody, resBody QueryVmfsDatastoreCreateOptionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryVmfsDatastoreExpandOptionsBody struct {\n\tReq    *types.QueryVmfsDatastoreExpandOptions         `xml:\"urn:vim25 QueryVmfsDatastoreExpandOptions,omitempty\"`\n\tRes    *types.QueryVmfsDatastoreExpandOptionsResponse `xml:\"urn:vim25 QueryVmfsDatastoreExpandOptionsResponse,omitempty\"`\n\tFault_ *soap.Fault                                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryVmfsDatastoreExpandOptionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryVmfsDatastoreExpandOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryVmfsDatastoreExpandOptions) (*types.QueryVmfsDatastoreExpandOptionsResponse, error) {\n\tvar reqBody, resBody QueryVmfsDatastoreExpandOptionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryVmfsDatastoreExtendOptionsBody struct {\n\tReq    *types.QueryVmfsDatastoreExtendOptions         `xml:\"urn:vim25 QueryVmfsDatastoreExtendOptions,omitempty\"`\n\tRes    *types.QueryVmfsDatastoreExtendOptionsResponse `xml:\"urn:vim25 QueryVmfsDatastoreExtendOptionsResponse,omitempty\"`\n\tFault_ *soap.Fault                                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryVmfsDatastoreExtendOptionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryVmfsDatastoreExtendOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryVmfsDatastoreExtendOptions) (*types.QueryVmfsDatastoreExtendOptionsResponse, error) {\n\tvar reqBody, resBody QueryVmfsDatastoreExtendOptionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryVnicStatusBody struct {\n\tReq    *types.QueryVnicStatus         `xml:\"urn:vim25 QueryVnicStatus,omitempty\"`\n\tRes    *types.QueryVnicStatusResponse `xml:\"urn:vim25 QueryVnicStatusResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryVnicStatusBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryVnicStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryVnicStatus) (*types.QueryVnicStatusResponse, error) {\n\tvar reqBody, resBody QueryVnicStatusBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryVsanObjectUuidsByFilterBody struct {\n\tReq    *types.QueryVsanObjectUuidsByFilter         `xml:\"urn:vim25 QueryVsanObjectUuidsByFilter,omitempty\"`\n\tRes    *types.QueryVsanObjectUuidsByFilterResponse `xml:\"urn:vim25 QueryVsanObjectUuidsByFilterResponse,omitempty\"`\n\tFault_ *soap.Fault                                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryVsanObjectUuidsByFilterBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryVsanObjectUuidsByFilter(ctx context.Context, r soap.RoundTripper, req *types.QueryVsanObjectUuidsByFilter) (*types.QueryVsanObjectUuidsByFilterResponse, error) {\n\tvar reqBody, resBody QueryVsanObjectUuidsByFilterBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryVsanObjectsBody struct {\n\tReq    *types.QueryVsanObjects         `xml:\"urn:vim25 QueryVsanObjects,omitempty\"`\n\tRes    *types.QueryVsanObjectsResponse `xml:\"urn:vim25 QueryVsanObjectsResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryVsanObjectsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryVsanObjects(ctx context.Context, r soap.RoundTripper, req *types.QueryVsanObjects) (*types.QueryVsanObjectsResponse, error) {\n\tvar reqBody, resBody QueryVsanObjectsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryVsanStatisticsBody struct {\n\tReq    *types.QueryVsanStatistics         `xml:\"urn:vim25 QueryVsanStatistics,omitempty\"`\n\tRes    *types.QueryVsanStatisticsResponse `xml:\"urn:vim25 QueryVsanStatisticsResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryVsanStatisticsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryVsanStatistics(ctx context.Context, r soap.RoundTripper, req *types.QueryVsanStatistics) (*types.QueryVsanStatisticsResponse, error) {\n\tvar reqBody, resBody QueryVsanStatisticsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype QueryVsanUpgradeStatusBody struct {\n\tReq    *types.QueryVsanUpgradeStatus         `xml:\"urn:vim25 QueryVsanUpgradeStatus,omitempty\"`\n\tRes    *types.QueryVsanUpgradeStatusResponse `xml:\"urn:vim25 QueryVsanUpgradeStatusResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *QueryVsanUpgradeStatusBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc QueryVsanUpgradeStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryVsanUpgradeStatus) (*types.QueryVsanUpgradeStatusResponse, error) {\n\tvar reqBody, resBody QueryVsanUpgradeStatusBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReadEnvironmentVariableInGuestBody struct {\n\tReq    *types.ReadEnvironmentVariableInGuest         `xml:\"urn:vim25 ReadEnvironmentVariableInGuest,omitempty\"`\n\tRes    *types.ReadEnvironmentVariableInGuestResponse `xml:\"urn:vim25 ReadEnvironmentVariableInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReadEnvironmentVariableInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReadEnvironmentVariableInGuest(ctx context.Context, r soap.RoundTripper, req *types.ReadEnvironmentVariableInGuest) (*types.ReadEnvironmentVariableInGuestResponse, error) {\n\tvar reqBody, resBody ReadEnvironmentVariableInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReadNextEventsBody struct {\n\tReq    *types.ReadNextEvents         `xml:\"urn:vim25 ReadNextEvents,omitempty\"`\n\tRes    *types.ReadNextEventsResponse `xml:\"urn:vim25 ReadNextEventsResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReadNextEventsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReadNextEvents(ctx context.Context, r soap.RoundTripper, req *types.ReadNextEvents) (*types.ReadNextEventsResponse, error) {\n\tvar reqBody, resBody ReadNextEventsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReadNextTasksBody struct {\n\tReq    *types.ReadNextTasks         `xml:\"urn:vim25 ReadNextTasks,omitempty\"`\n\tRes    *types.ReadNextTasksResponse `xml:\"urn:vim25 ReadNextTasksResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReadNextTasksBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReadNextTasks(ctx context.Context, r soap.RoundTripper, req *types.ReadNextTasks) (*types.ReadNextTasksResponse, error) {\n\tvar reqBody, resBody ReadNextTasksBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReadPreviousEventsBody struct {\n\tReq    *types.ReadPreviousEvents         `xml:\"urn:vim25 ReadPreviousEvents,omitempty\"`\n\tRes    *types.ReadPreviousEventsResponse `xml:\"urn:vim25 ReadPreviousEventsResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReadPreviousEventsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReadPreviousEvents(ctx context.Context, r soap.RoundTripper, req *types.ReadPreviousEvents) (*types.ReadPreviousEventsResponse, error) {\n\tvar reqBody, resBody ReadPreviousEventsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReadPreviousTasksBody struct {\n\tReq    *types.ReadPreviousTasks         `xml:\"urn:vim25 ReadPreviousTasks,omitempty\"`\n\tRes    *types.ReadPreviousTasksResponse `xml:\"urn:vim25 ReadPreviousTasksResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReadPreviousTasksBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReadPreviousTasks(ctx context.Context, r soap.RoundTripper, req *types.ReadPreviousTasks) (*types.ReadPreviousTasksResponse, error) {\n\tvar reqBody, resBody ReadPreviousTasksBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RebootGuestBody struct {\n\tReq    *types.RebootGuest         `xml:\"urn:vim25 RebootGuest,omitempty\"`\n\tRes    *types.RebootGuestResponse `xml:\"urn:vim25 RebootGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RebootGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RebootGuest(ctx context.Context, r soap.RoundTripper, req *types.RebootGuest) (*types.RebootGuestResponse, error) {\n\tvar reqBody, resBody RebootGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RebootHost_TaskBody struct {\n\tReq    *types.RebootHost_Task         `xml:\"urn:vim25 RebootHost_Task,omitempty\"`\n\tRes    *types.RebootHost_TaskResponse `xml:\"urn:vim25 RebootHost_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RebootHost_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RebootHost_Task(ctx context.Context, r soap.RoundTripper, req *types.RebootHost_Task) (*types.RebootHost_TaskResponse, error) {\n\tvar reqBody, resBody RebootHost_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RecommendDatastoresBody struct {\n\tReq    *types.RecommendDatastores         `xml:\"urn:vim25 RecommendDatastores,omitempty\"`\n\tRes    *types.RecommendDatastoresResponse `xml:\"urn:vim25 RecommendDatastoresResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RecommendDatastoresBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RecommendDatastores(ctx context.Context, r soap.RoundTripper, req *types.RecommendDatastores) (*types.RecommendDatastoresResponse, error) {\n\tvar reqBody, resBody RecommendDatastoresBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RecommendHostsForVmBody struct {\n\tReq    *types.RecommendHostsForVm         `xml:\"urn:vim25 RecommendHostsForVm,omitempty\"`\n\tRes    *types.RecommendHostsForVmResponse `xml:\"urn:vim25 RecommendHostsForVmResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RecommendHostsForVmBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RecommendHostsForVm(ctx context.Context, r soap.RoundTripper, req *types.RecommendHostsForVm) (*types.RecommendHostsForVmResponse, error) {\n\tvar reqBody, resBody RecommendHostsForVmBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RecommissionVsanNode_TaskBody struct {\n\tReq    *types.RecommissionVsanNode_Task         `xml:\"urn:vim25 RecommissionVsanNode_Task,omitempty\"`\n\tRes    *types.RecommissionVsanNode_TaskResponse `xml:\"urn:vim25 RecommissionVsanNode_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RecommissionVsanNode_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RecommissionVsanNode_Task(ctx context.Context, r soap.RoundTripper, req *types.RecommissionVsanNode_Task) (*types.RecommissionVsanNode_TaskResponse, error) {\n\tvar reqBody, resBody RecommissionVsanNode_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReconcileDatastoreInventory_TaskBody struct {\n\tReq    *types.ReconcileDatastoreInventory_Task         `xml:\"urn:vim25 ReconcileDatastoreInventory_Task,omitempty\"`\n\tRes    *types.ReconcileDatastoreInventory_TaskResponse `xml:\"urn:vim25 ReconcileDatastoreInventory_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReconcileDatastoreInventory_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReconcileDatastoreInventory_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconcileDatastoreInventory_Task) (*types.ReconcileDatastoreInventory_TaskResponse, error) {\n\tvar reqBody, resBody ReconcileDatastoreInventory_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReconfigVM_TaskBody struct {\n\tReq    *types.ReconfigVM_Task         `xml:\"urn:vim25 ReconfigVM_Task,omitempty\"`\n\tRes    *types.ReconfigVM_TaskResponse `xml:\"urn:vim25 ReconfigVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReconfigVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReconfigVM_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigVM_Task) (*types.ReconfigVM_TaskResponse, error) {\n\tvar reqBody, resBody ReconfigVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReconfigurationSatisfiableBody struct {\n\tReq    *types.ReconfigurationSatisfiable         `xml:\"urn:vim25 ReconfigurationSatisfiable,omitempty\"`\n\tRes    *types.ReconfigurationSatisfiableResponse `xml:\"urn:vim25 ReconfigurationSatisfiableResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReconfigurationSatisfiableBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReconfigurationSatisfiable(ctx context.Context, r soap.RoundTripper, req *types.ReconfigurationSatisfiable) (*types.ReconfigurationSatisfiableResponse, error) {\n\tvar reqBody, resBody ReconfigurationSatisfiableBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReconfigureAlarmBody struct {\n\tReq    *types.ReconfigureAlarm         `xml:\"urn:vim25 ReconfigureAlarm,omitempty\"`\n\tRes    *types.ReconfigureAlarmResponse `xml:\"urn:vim25 ReconfigureAlarmResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReconfigureAlarmBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReconfigureAlarm(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureAlarm) (*types.ReconfigureAlarmResponse, error) {\n\tvar reqBody, resBody ReconfigureAlarmBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReconfigureAutostartBody struct {\n\tReq    *types.ReconfigureAutostart         `xml:\"urn:vim25 ReconfigureAutostart,omitempty\"`\n\tRes    *types.ReconfigureAutostartResponse `xml:\"urn:vim25 ReconfigureAutostartResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReconfigureAutostartBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReconfigureAutostart(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureAutostart) (*types.ReconfigureAutostartResponse, error) {\n\tvar reqBody, resBody ReconfigureAutostartBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReconfigureCluster_TaskBody struct {\n\tReq    *types.ReconfigureCluster_Task         `xml:\"urn:vim25 ReconfigureCluster_Task,omitempty\"`\n\tRes    *types.ReconfigureCluster_TaskResponse `xml:\"urn:vim25 ReconfigureCluster_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReconfigureCluster_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReconfigureCluster_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureCluster_Task) (*types.ReconfigureCluster_TaskResponse, error) {\n\tvar reqBody, resBody ReconfigureCluster_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReconfigureComputeResource_TaskBody struct {\n\tReq    *types.ReconfigureComputeResource_Task         `xml:\"urn:vim25 ReconfigureComputeResource_Task,omitempty\"`\n\tRes    *types.ReconfigureComputeResource_TaskResponse `xml:\"urn:vim25 ReconfigureComputeResource_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReconfigureComputeResource_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReconfigureComputeResource_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureComputeResource_Task) (*types.ReconfigureComputeResource_TaskResponse, error) {\n\tvar reqBody, resBody ReconfigureComputeResource_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReconfigureDVPort_TaskBody struct {\n\tReq    *types.ReconfigureDVPort_Task         `xml:\"urn:vim25 ReconfigureDVPort_Task,omitempty\"`\n\tRes    *types.ReconfigureDVPort_TaskResponse `xml:\"urn:vim25 ReconfigureDVPort_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReconfigureDVPort_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReconfigureDVPort_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureDVPort_Task) (*types.ReconfigureDVPort_TaskResponse, error) {\n\tvar reqBody, resBody ReconfigureDVPort_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReconfigureDVPortgroup_TaskBody struct {\n\tReq    *types.ReconfigureDVPortgroup_Task         `xml:\"urn:vim25 ReconfigureDVPortgroup_Task,omitempty\"`\n\tRes    *types.ReconfigureDVPortgroup_TaskResponse `xml:\"urn:vim25 ReconfigureDVPortgroup_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReconfigureDVPortgroup_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReconfigureDVPortgroup_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureDVPortgroup_Task) (*types.ReconfigureDVPortgroup_TaskResponse, error) {\n\tvar reqBody, resBody ReconfigureDVPortgroup_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReconfigureDatacenter_TaskBody struct {\n\tReq    *types.ReconfigureDatacenter_Task         `xml:\"urn:vim25 ReconfigureDatacenter_Task,omitempty\"`\n\tRes    *types.ReconfigureDatacenter_TaskResponse `xml:\"urn:vim25 ReconfigureDatacenter_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReconfigureDatacenter_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReconfigureDatacenter_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureDatacenter_Task) (*types.ReconfigureDatacenter_TaskResponse, error) {\n\tvar reqBody, resBody ReconfigureDatacenter_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReconfigureDomObjectBody struct {\n\tReq    *types.ReconfigureDomObject         `xml:\"urn:vim25 ReconfigureDomObject,omitempty\"`\n\tRes    *types.ReconfigureDomObjectResponse `xml:\"urn:vim25 ReconfigureDomObjectResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReconfigureDomObjectBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReconfigureDomObject(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureDomObject) (*types.ReconfigureDomObjectResponse, error) {\n\tvar reqBody, resBody ReconfigureDomObjectBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReconfigureDvs_TaskBody struct {\n\tReq    *types.ReconfigureDvs_Task         `xml:\"urn:vim25 ReconfigureDvs_Task,omitempty\"`\n\tRes    *types.ReconfigureDvs_TaskResponse `xml:\"urn:vim25 ReconfigureDvs_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReconfigureDvs_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReconfigureDvs_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureDvs_Task) (*types.ReconfigureDvs_TaskResponse, error) {\n\tvar reqBody, resBody ReconfigureDvs_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReconfigureHostForDAS_TaskBody struct {\n\tReq    *types.ReconfigureHostForDAS_Task         `xml:\"urn:vim25 ReconfigureHostForDAS_Task,omitempty\"`\n\tRes    *types.ReconfigureHostForDAS_TaskResponse `xml:\"urn:vim25 ReconfigureHostForDAS_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReconfigureHostForDAS_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReconfigureHostForDAS_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureHostForDAS_Task) (*types.ReconfigureHostForDAS_TaskResponse, error) {\n\tvar reqBody, resBody ReconfigureHostForDAS_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReconfigureScheduledTaskBody struct {\n\tReq    *types.ReconfigureScheduledTask         `xml:\"urn:vim25 ReconfigureScheduledTask,omitempty\"`\n\tRes    *types.ReconfigureScheduledTaskResponse `xml:\"urn:vim25 ReconfigureScheduledTaskResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReconfigureScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReconfigureScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureScheduledTask) (*types.ReconfigureScheduledTaskResponse, error) {\n\tvar reqBody, resBody ReconfigureScheduledTaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReconfigureServiceConsoleReservationBody struct {\n\tReq    *types.ReconfigureServiceConsoleReservation         `xml:\"urn:vim25 ReconfigureServiceConsoleReservation,omitempty\"`\n\tRes    *types.ReconfigureServiceConsoleReservationResponse `xml:\"urn:vim25 ReconfigureServiceConsoleReservationResponse,omitempty\"`\n\tFault_ *soap.Fault                                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReconfigureServiceConsoleReservationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReconfigureServiceConsoleReservation(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureServiceConsoleReservation) (*types.ReconfigureServiceConsoleReservationResponse, error) {\n\tvar reqBody, resBody ReconfigureServiceConsoleReservationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReconfigureSnmpAgentBody struct {\n\tReq    *types.ReconfigureSnmpAgent         `xml:\"urn:vim25 ReconfigureSnmpAgent,omitempty\"`\n\tRes    *types.ReconfigureSnmpAgentResponse `xml:\"urn:vim25 ReconfigureSnmpAgentResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReconfigureSnmpAgentBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReconfigureSnmpAgent(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureSnmpAgent) (*types.ReconfigureSnmpAgentResponse, error) {\n\tvar reqBody, resBody ReconfigureSnmpAgentBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReconfigureVirtualMachineReservationBody struct {\n\tReq    *types.ReconfigureVirtualMachineReservation         `xml:\"urn:vim25 ReconfigureVirtualMachineReservation,omitempty\"`\n\tRes    *types.ReconfigureVirtualMachineReservationResponse `xml:\"urn:vim25 ReconfigureVirtualMachineReservationResponse,omitempty\"`\n\tFault_ *soap.Fault                                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReconfigureVirtualMachineReservationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReconfigureVirtualMachineReservation(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureVirtualMachineReservation) (*types.ReconfigureVirtualMachineReservationResponse, error) {\n\tvar reqBody, resBody ReconfigureVirtualMachineReservationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReconnectHost_TaskBody struct {\n\tReq    *types.ReconnectHost_Task         `xml:\"urn:vim25 ReconnectHost_Task,omitempty\"`\n\tRes    *types.ReconnectHost_TaskResponse `xml:\"urn:vim25 ReconnectHost_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReconnectHost_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReconnectHost_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconnectHost_Task) (*types.ReconnectHost_TaskResponse, error) {\n\tvar reqBody, resBody ReconnectHost_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RectifyDvsHost_TaskBody struct {\n\tReq    *types.RectifyDvsHost_Task         `xml:\"urn:vim25 RectifyDvsHost_Task,omitempty\"`\n\tRes    *types.RectifyDvsHost_TaskResponse `xml:\"urn:vim25 RectifyDvsHost_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RectifyDvsHost_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RectifyDvsHost_Task(ctx context.Context, r soap.RoundTripper, req *types.RectifyDvsHost_Task) (*types.RectifyDvsHost_TaskResponse, error) {\n\tvar reqBody, resBody RectifyDvsHost_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RectifyDvsOnHost_TaskBody struct {\n\tReq    *types.RectifyDvsOnHost_Task         `xml:\"urn:vim25 RectifyDvsOnHost_Task,omitempty\"`\n\tRes    *types.RectifyDvsOnHost_TaskResponse `xml:\"urn:vim25 RectifyDvsOnHost_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RectifyDvsOnHost_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RectifyDvsOnHost_Task(ctx context.Context, r soap.RoundTripper, req *types.RectifyDvsOnHost_Task) (*types.RectifyDvsOnHost_TaskResponse, error) {\n\tvar reqBody, resBody RectifyDvsOnHost_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RefreshBody struct {\n\tReq    *types.Refresh         `xml:\"urn:vim25 Refresh,omitempty\"`\n\tRes    *types.RefreshResponse `xml:\"urn:vim25 RefreshResponse,omitempty\"`\n\tFault_ *soap.Fault            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RefreshBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc Refresh(ctx context.Context, r soap.RoundTripper, req *types.Refresh) (*types.RefreshResponse, error) {\n\tvar reqBody, resBody RefreshBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RefreshDVPortStateBody struct {\n\tReq    *types.RefreshDVPortState         `xml:\"urn:vim25 RefreshDVPortState,omitempty\"`\n\tRes    *types.RefreshDVPortStateResponse `xml:\"urn:vim25 RefreshDVPortStateResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RefreshDVPortStateBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RefreshDVPortState(ctx context.Context, r soap.RoundTripper, req *types.RefreshDVPortState) (*types.RefreshDVPortStateResponse, error) {\n\tvar reqBody, resBody RefreshDVPortStateBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RefreshDatastoreBody struct {\n\tReq    *types.RefreshDatastore         `xml:\"urn:vim25 RefreshDatastore,omitempty\"`\n\tRes    *types.RefreshDatastoreResponse `xml:\"urn:vim25 RefreshDatastoreResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RefreshDatastoreBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RefreshDatastore(ctx context.Context, r soap.RoundTripper, req *types.RefreshDatastore) (*types.RefreshDatastoreResponse, error) {\n\tvar reqBody, resBody RefreshDatastoreBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RefreshDatastoreStorageInfoBody struct {\n\tReq    *types.RefreshDatastoreStorageInfo         `xml:\"urn:vim25 RefreshDatastoreStorageInfo,omitempty\"`\n\tRes    *types.RefreshDatastoreStorageInfoResponse `xml:\"urn:vim25 RefreshDatastoreStorageInfoResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RefreshDatastoreStorageInfoBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RefreshDatastoreStorageInfo(ctx context.Context, r soap.RoundTripper, req *types.RefreshDatastoreStorageInfo) (*types.RefreshDatastoreStorageInfoResponse, error) {\n\tvar reqBody, resBody RefreshDatastoreStorageInfoBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RefreshDateTimeSystemBody struct {\n\tReq    *types.RefreshDateTimeSystem         `xml:\"urn:vim25 RefreshDateTimeSystem,omitempty\"`\n\tRes    *types.RefreshDateTimeSystemResponse `xml:\"urn:vim25 RefreshDateTimeSystemResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RefreshDateTimeSystemBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RefreshDateTimeSystem(ctx context.Context, r soap.RoundTripper, req *types.RefreshDateTimeSystem) (*types.RefreshDateTimeSystemResponse, error) {\n\tvar reqBody, resBody RefreshDateTimeSystemBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RefreshFirewallBody struct {\n\tReq    *types.RefreshFirewall         `xml:\"urn:vim25 RefreshFirewall,omitempty\"`\n\tRes    *types.RefreshFirewallResponse `xml:\"urn:vim25 RefreshFirewallResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RefreshFirewallBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RefreshFirewall(ctx context.Context, r soap.RoundTripper, req *types.RefreshFirewall) (*types.RefreshFirewallResponse, error) {\n\tvar reqBody, resBody RefreshFirewallBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RefreshGraphicsManagerBody struct {\n\tReq    *types.RefreshGraphicsManager         `xml:\"urn:vim25 RefreshGraphicsManager,omitempty\"`\n\tRes    *types.RefreshGraphicsManagerResponse `xml:\"urn:vim25 RefreshGraphicsManagerResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RefreshGraphicsManagerBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RefreshGraphicsManager(ctx context.Context, r soap.RoundTripper, req *types.RefreshGraphicsManager) (*types.RefreshGraphicsManagerResponse, error) {\n\tvar reqBody, resBody RefreshGraphicsManagerBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RefreshHealthStatusSystemBody struct {\n\tReq    *types.RefreshHealthStatusSystem         `xml:\"urn:vim25 RefreshHealthStatusSystem,omitempty\"`\n\tRes    *types.RefreshHealthStatusSystemResponse `xml:\"urn:vim25 RefreshHealthStatusSystemResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RefreshHealthStatusSystemBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RefreshHealthStatusSystem(ctx context.Context, r soap.RoundTripper, req *types.RefreshHealthStatusSystem) (*types.RefreshHealthStatusSystemResponse, error) {\n\tvar reqBody, resBody RefreshHealthStatusSystemBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RefreshNetworkSystemBody struct {\n\tReq    *types.RefreshNetworkSystem         `xml:\"urn:vim25 RefreshNetworkSystem,omitempty\"`\n\tRes    *types.RefreshNetworkSystemResponse `xml:\"urn:vim25 RefreshNetworkSystemResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RefreshNetworkSystemBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RefreshNetworkSystem(ctx context.Context, r soap.RoundTripper, req *types.RefreshNetworkSystem) (*types.RefreshNetworkSystemResponse, error) {\n\tvar reqBody, resBody RefreshNetworkSystemBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RefreshRecommendationBody struct {\n\tReq    *types.RefreshRecommendation         `xml:\"urn:vim25 RefreshRecommendation,omitempty\"`\n\tRes    *types.RefreshRecommendationResponse `xml:\"urn:vim25 RefreshRecommendationResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RefreshRecommendationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RefreshRecommendation(ctx context.Context, r soap.RoundTripper, req *types.RefreshRecommendation) (*types.RefreshRecommendationResponse, error) {\n\tvar reqBody, resBody RefreshRecommendationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RefreshRuntimeBody struct {\n\tReq    *types.RefreshRuntime         `xml:\"urn:vim25 RefreshRuntime,omitempty\"`\n\tRes    *types.RefreshRuntimeResponse `xml:\"urn:vim25 RefreshRuntimeResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RefreshRuntimeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RefreshRuntime(ctx context.Context, r soap.RoundTripper, req *types.RefreshRuntime) (*types.RefreshRuntimeResponse, error) {\n\tvar reqBody, resBody RefreshRuntimeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RefreshServicesBody struct {\n\tReq    *types.RefreshServices         `xml:\"urn:vim25 RefreshServices,omitempty\"`\n\tRes    *types.RefreshServicesResponse `xml:\"urn:vim25 RefreshServicesResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RefreshServicesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RefreshServices(ctx context.Context, r soap.RoundTripper, req *types.RefreshServices) (*types.RefreshServicesResponse, error) {\n\tvar reqBody, resBody RefreshServicesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RefreshStorageDrsRecommendationBody struct {\n\tReq    *types.RefreshStorageDrsRecommendation         `xml:\"urn:vim25 RefreshStorageDrsRecommendation,omitempty\"`\n\tRes    *types.RefreshStorageDrsRecommendationResponse `xml:\"urn:vim25 RefreshStorageDrsRecommendationResponse,omitempty\"`\n\tFault_ *soap.Fault                                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RefreshStorageDrsRecommendationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RefreshStorageDrsRecommendation(ctx context.Context, r soap.RoundTripper, req *types.RefreshStorageDrsRecommendation) (*types.RefreshStorageDrsRecommendationResponse, error) {\n\tvar reqBody, resBody RefreshStorageDrsRecommendationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RefreshStorageInfoBody struct {\n\tReq    *types.RefreshStorageInfo         `xml:\"urn:vim25 RefreshStorageInfo,omitempty\"`\n\tRes    *types.RefreshStorageInfoResponse `xml:\"urn:vim25 RefreshStorageInfoResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RefreshStorageInfoBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RefreshStorageInfo(ctx context.Context, r soap.RoundTripper, req *types.RefreshStorageInfo) (*types.RefreshStorageInfoResponse, error) {\n\tvar reqBody, resBody RefreshStorageInfoBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RefreshStorageSystemBody struct {\n\tReq    *types.RefreshStorageSystem         `xml:\"urn:vim25 RefreshStorageSystem,omitempty\"`\n\tRes    *types.RefreshStorageSystemResponse `xml:\"urn:vim25 RefreshStorageSystemResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RefreshStorageSystemBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RefreshStorageSystem(ctx context.Context, r soap.RoundTripper, req *types.RefreshStorageSystem) (*types.RefreshStorageSystemResponse, error) {\n\tvar reqBody, resBody RefreshStorageSystemBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RegisterChildVM_TaskBody struct {\n\tReq    *types.RegisterChildVM_Task         `xml:\"urn:vim25 RegisterChildVM_Task,omitempty\"`\n\tRes    *types.RegisterChildVM_TaskResponse `xml:\"urn:vim25 RegisterChildVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RegisterChildVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RegisterChildVM_Task(ctx context.Context, r soap.RoundTripper, req *types.RegisterChildVM_Task) (*types.RegisterChildVM_TaskResponse, error) {\n\tvar reqBody, resBody RegisterChildVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RegisterDiskBody struct {\n\tReq    *types.RegisterDisk         `xml:\"urn:vim25 RegisterDisk,omitempty\"`\n\tRes    *types.RegisterDiskResponse `xml:\"urn:vim25 RegisterDiskResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RegisterDiskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RegisterDisk(ctx context.Context, r soap.RoundTripper, req *types.RegisterDisk) (*types.RegisterDiskResponse, error) {\n\tvar reqBody, resBody RegisterDiskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RegisterExtensionBody struct {\n\tReq    *types.RegisterExtension         `xml:\"urn:vim25 RegisterExtension,omitempty\"`\n\tRes    *types.RegisterExtensionResponse `xml:\"urn:vim25 RegisterExtensionResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RegisterExtensionBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RegisterExtension(ctx context.Context, r soap.RoundTripper, req *types.RegisterExtension) (*types.RegisterExtensionResponse, error) {\n\tvar reqBody, resBody RegisterExtensionBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RegisterHealthUpdateProviderBody struct {\n\tReq    *types.RegisterHealthUpdateProvider         `xml:\"urn:vim25 RegisterHealthUpdateProvider,omitempty\"`\n\tRes    *types.RegisterHealthUpdateProviderResponse `xml:\"urn:vim25 RegisterHealthUpdateProviderResponse,omitempty\"`\n\tFault_ *soap.Fault                                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RegisterHealthUpdateProviderBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RegisterHealthUpdateProvider(ctx context.Context, r soap.RoundTripper, req *types.RegisterHealthUpdateProvider) (*types.RegisterHealthUpdateProviderResponse, error) {\n\tvar reqBody, resBody RegisterHealthUpdateProviderBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RegisterKmipServerBody struct {\n\tReq    *types.RegisterKmipServer         `xml:\"urn:vim25 RegisterKmipServer,omitempty\"`\n\tRes    *types.RegisterKmipServerResponse `xml:\"urn:vim25 RegisterKmipServerResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RegisterKmipServerBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RegisterKmipServer(ctx context.Context, r soap.RoundTripper, req *types.RegisterKmipServer) (*types.RegisterKmipServerResponse, error) {\n\tvar reqBody, resBody RegisterKmipServerBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RegisterVM_TaskBody struct {\n\tReq    *types.RegisterVM_Task         `xml:\"urn:vim25 RegisterVM_Task,omitempty\"`\n\tRes    *types.RegisterVM_TaskResponse `xml:\"urn:vim25 RegisterVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RegisterVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RegisterVM_Task(ctx context.Context, r soap.RoundTripper, req *types.RegisterVM_Task) (*types.RegisterVM_TaskResponse, error) {\n\tvar reqBody, resBody RegisterVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReleaseCredentialsInGuestBody struct {\n\tReq    *types.ReleaseCredentialsInGuest         `xml:\"urn:vim25 ReleaseCredentialsInGuest,omitempty\"`\n\tRes    *types.ReleaseCredentialsInGuestResponse `xml:\"urn:vim25 ReleaseCredentialsInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReleaseCredentialsInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReleaseCredentialsInGuest(ctx context.Context, r soap.RoundTripper, req *types.ReleaseCredentialsInGuest) (*types.ReleaseCredentialsInGuestResponse, error) {\n\tvar reqBody, resBody ReleaseCredentialsInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReleaseIpAllocationBody struct {\n\tReq    *types.ReleaseIpAllocation         `xml:\"urn:vim25 ReleaseIpAllocation,omitempty\"`\n\tRes    *types.ReleaseIpAllocationResponse `xml:\"urn:vim25 ReleaseIpAllocationResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReleaseIpAllocationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReleaseIpAllocation(ctx context.Context, r soap.RoundTripper, req *types.ReleaseIpAllocation) (*types.ReleaseIpAllocationResponse, error) {\n\tvar reqBody, resBody ReleaseIpAllocationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReleaseManagedSnapshotBody struct {\n\tReq    *types.ReleaseManagedSnapshot         `xml:\"urn:vim25 ReleaseManagedSnapshot,omitempty\"`\n\tRes    *types.ReleaseManagedSnapshotResponse `xml:\"urn:vim25 ReleaseManagedSnapshotResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReleaseManagedSnapshotBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReleaseManagedSnapshot(ctx context.Context, r soap.RoundTripper, req *types.ReleaseManagedSnapshot) (*types.ReleaseManagedSnapshotResponse, error) {\n\tvar reqBody, resBody ReleaseManagedSnapshotBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReloadBody struct {\n\tReq    *types.Reload         `xml:\"urn:vim25 Reload,omitempty\"`\n\tRes    *types.ReloadResponse `xml:\"urn:vim25 ReloadResponse,omitempty\"`\n\tFault_ *soap.Fault           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReloadBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc Reload(ctx context.Context, r soap.RoundTripper, req *types.Reload) (*types.ReloadResponse, error) {\n\tvar reqBody, resBody ReloadBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RelocateVM_TaskBody struct {\n\tReq    *types.RelocateVM_Task         `xml:\"urn:vim25 RelocateVM_Task,omitempty\"`\n\tRes    *types.RelocateVM_TaskResponse `xml:\"urn:vim25 RelocateVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RelocateVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RelocateVM_Task(ctx context.Context, r soap.RoundTripper, req *types.RelocateVM_Task) (*types.RelocateVM_TaskResponse, error) {\n\tvar reqBody, resBody RelocateVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RelocateVStorageObject_TaskBody struct {\n\tReq    *types.RelocateVStorageObject_Task         `xml:\"urn:vim25 RelocateVStorageObject_Task,omitempty\"`\n\tRes    *types.RelocateVStorageObject_TaskResponse `xml:\"urn:vim25 RelocateVStorageObject_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RelocateVStorageObject_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RelocateVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req *types.RelocateVStorageObject_Task) (*types.RelocateVStorageObject_TaskResponse, error) {\n\tvar reqBody, resBody RelocateVStorageObject_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveAlarmBody struct {\n\tReq    *types.RemoveAlarm         `xml:\"urn:vim25 RemoveAlarm,omitempty\"`\n\tRes    *types.RemoveAlarmResponse `xml:\"urn:vim25 RemoveAlarmResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveAlarmBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveAlarm(ctx context.Context, r soap.RoundTripper, req *types.RemoveAlarm) (*types.RemoveAlarmResponse, error) {\n\tvar reqBody, resBody RemoveAlarmBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveAllSnapshots_TaskBody struct {\n\tReq    *types.RemoveAllSnapshots_Task         `xml:\"urn:vim25 RemoveAllSnapshots_Task,omitempty\"`\n\tRes    *types.RemoveAllSnapshots_TaskResponse `xml:\"urn:vim25 RemoveAllSnapshots_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveAllSnapshots_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveAllSnapshots_Task(ctx context.Context, r soap.RoundTripper, req *types.RemoveAllSnapshots_Task) (*types.RemoveAllSnapshots_TaskResponse, error) {\n\tvar reqBody, resBody RemoveAllSnapshots_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveAssignedLicenseBody struct {\n\tReq    *types.RemoveAssignedLicense         `xml:\"urn:vim25 RemoveAssignedLicense,omitempty\"`\n\tRes    *types.RemoveAssignedLicenseResponse `xml:\"urn:vim25 RemoveAssignedLicenseResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveAssignedLicenseBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveAssignedLicense(ctx context.Context, r soap.RoundTripper, req *types.RemoveAssignedLicense) (*types.RemoveAssignedLicenseResponse, error) {\n\tvar reqBody, resBody RemoveAssignedLicenseBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveAuthorizationRoleBody struct {\n\tReq    *types.RemoveAuthorizationRole         `xml:\"urn:vim25 RemoveAuthorizationRole,omitempty\"`\n\tRes    *types.RemoveAuthorizationRoleResponse `xml:\"urn:vim25 RemoveAuthorizationRoleResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveAuthorizationRoleBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveAuthorizationRole(ctx context.Context, r soap.RoundTripper, req *types.RemoveAuthorizationRole) (*types.RemoveAuthorizationRoleResponse, error) {\n\tvar reqBody, resBody RemoveAuthorizationRoleBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveCustomFieldDefBody struct {\n\tReq    *types.RemoveCustomFieldDef         `xml:\"urn:vim25 RemoveCustomFieldDef,omitempty\"`\n\tRes    *types.RemoveCustomFieldDefResponse `xml:\"urn:vim25 RemoveCustomFieldDefResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveCustomFieldDefBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveCustomFieldDef(ctx context.Context, r soap.RoundTripper, req *types.RemoveCustomFieldDef) (*types.RemoveCustomFieldDefResponse, error) {\n\tvar reqBody, resBody RemoveCustomFieldDefBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveDatastoreBody struct {\n\tReq    *types.RemoveDatastore         `xml:\"urn:vim25 RemoveDatastore,omitempty\"`\n\tRes    *types.RemoveDatastoreResponse `xml:\"urn:vim25 RemoveDatastoreResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveDatastoreBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveDatastore(ctx context.Context, r soap.RoundTripper, req *types.RemoveDatastore) (*types.RemoveDatastoreResponse, error) {\n\tvar reqBody, resBody RemoveDatastoreBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveDatastoreEx_TaskBody struct {\n\tReq    *types.RemoveDatastoreEx_Task         `xml:\"urn:vim25 RemoveDatastoreEx_Task,omitempty\"`\n\tRes    *types.RemoveDatastoreEx_TaskResponse `xml:\"urn:vim25 RemoveDatastoreEx_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveDatastoreEx_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveDatastoreEx_Task(ctx context.Context, r soap.RoundTripper, req *types.RemoveDatastoreEx_Task) (*types.RemoveDatastoreEx_TaskResponse, error) {\n\tvar reqBody, resBody RemoveDatastoreEx_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveDiskMapping_TaskBody struct {\n\tReq    *types.RemoveDiskMapping_Task         `xml:\"urn:vim25 RemoveDiskMapping_Task,omitempty\"`\n\tRes    *types.RemoveDiskMapping_TaskResponse `xml:\"urn:vim25 RemoveDiskMapping_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveDiskMapping_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveDiskMapping_Task(ctx context.Context, r soap.RoundTripper, req *types.RemoveDiskMapping_Task) (*types.RemoveDiskMapping_TaskResponse, error) {\n\tvar reqBody, resBody RemoveDiskMapping_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveDisk_TaskBody struct {\n\tReq    *types.RemoveDisk_Task         `xml:\"urn:vim25 RemoveDisk_Task,omitempty\"`\n\tRes    *types.RemoveDisk_TaskResponse `xml:\"urn:vim25 RemoveDisk_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.RemoveDisk_Task) (*types.RemoveDisk_TaskResponse, error) {\n\tvar reqBody, resBody RemoveDisk_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveEntityPermissionBody struct {\n\tReq    *types.RemoveEntityPermission         `xml:\"urn:vim25 RemoveEntityPermission,omitempty\"`\n\tRes    *types.RemoveEntityPermissionResponse `xml:\"urn:vim25 RemoveEntityPermissionResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveEntityPermissionBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveEntityPermission(ctx context.Context, r soap.RoundTripper, req *types.RemoveEntityPermission) (*types.RemoveEntityPermissionResponse, error) {\n\tvar reqBody, resBody RemoveEntityPermissionBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveFilterBody struct {\n\tReq    *types.RemoveFilter         `xml:\"urn:vim25 RemoveFilter,omitempty\"`\n\tRes    *types.RemoveFilterResponse `xml:\"urn:vim25 RemoveFilterResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveFilterBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveFilter(ctx context.Context, r soap.RoundTripper, req *types.RemoveFilter) (*types.RemoveFilterResponse, error) {\n\tvar reqBody, resBody RemoveFilterBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveFilterEntitiesBody struct {\n\tReq    *types.RemoveFilterEntities         `xml:\"urn:vim25 RemoveFilterEntities,omitempty\"`\n\tRes    *types.RemoveFilterEntitiesResponse `xml:\"urn:vim25 RemoveFilterEntitiesResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveFilterEntitiesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveFilterEntities(ctx context.Context, r soap.RoundTripper, req *types.RemoveFilterEntities) (*types.RemoveFilterEntitiesResponse, error) {\n\tvar reqBody, resBody RemoveFilterEntitiesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveGroupBody struct {\n\tReq    *types.RemoveGroup         `xml:\"urn:vim25 RemoveGroup,omitempty\"`\n\tRes    *types.RemoveGroupResponse `xml:\"urn:vim25 RemoveGroupResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveGroupBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveGroup(ctx context.Context, r soap.RoundTripper, req *types.RemoveGroup) (*types.RemoveGroupResponse, error) {\n\tvar reqBody, resBody RemoveGroupBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveGuestAliasBody struct {\n\tReq    *types.RemoveGuestAlias         `xml:\"urn:vim25 RemoveGuestAlias,omitempty\"`\n\tRes    *types.RemoveGuestAliasResponse `xml:\"urn:vim25 RemoveGuestAliasResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveGuestAliasBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveGuestAlias(ctx context.Context, r soap.RoundTripper, req *types.RemoveGuestAlias) (*types.RemoveGuestAliasResponse, error) {\n\tvar reqBody, resBody RemoveGuestAliasBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveGuestAliasByCertBody struct {\n\tReq    *types.RemoveGuestAliasByCert         `xml:\"urn:vim25 RemoveGuestAliasByCert,omitempty\"`\n\tRes    *types.RemoveGuestAliasByCertResponse `xml:\"urn:vim25 RemoveGuestAliasByCertResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveGuestAliasByCertBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveGuestAliasByCert(ctx context.Context, r soap.RoundTripper, req *types.RemoveGuestAliasByCert) (*types.RemoveGuestAliasByCertResponse, error) {\n\tvar reqBody, resBody RemoveGuestAliasByCertBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveInternetScsiSendTargetsBody struct {\n\tReq    *types.RemoveInternetScsiSendTargets         `xml:\"urn:vim25 RemoveInternetScsiSendTargets,omitempty\"`\n\tRes    *types.RemoveInternetScsiSendTargetsResponse `xml:\"urn:vim25 RemoveInternetScsiSendTargetsResponse,omitempty\"`\n\tFault_ *soap.Fault                                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveInternetScsiSendTargetsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveInternetScsiSendTargets(ctx context.Context, r soap.RoundTripper, req *types.RemoveInternetScsiSendTargets) (*types.RemoveInternetScsiSendTargetsResponse, error) {\n\tvar reqBody, resBody RemoveInternetScsiSendTargetsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveInternetScsiStaticTargetsBody struct {\n\tReq    *types.RemoveInternetScsiStaticTargets         `xml:\"urn:vim25 RemoveInternetScsiStaticTargets,omitempty\"`\n\tRes    *types.RemoveInternetScsiStaticTargetsResponse `xml:\"urn:vim25 RemoveInternetScsiStaticTargetsResponse,omitempty\"`\n\tFault_ *soap.Fault                                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveInternetScsiStaticTargetsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveInternetScsiStaticTargets(ctx context.Context, r soap.RoundTripper, req *types.RemoveInternetScsiStaticTargets) (*types.RemoveInternetScsiStaticTargetsResponse, error) {\n\tvar reqBody, resBody RemoveInternetScsiStaticTargetsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveKeyBody struct {\n\tReq    *types.RemoveKey         `xml:\"urn:vim25 RemoveKey,omitempty\"`\n\tRes    *types.RemoveKeyResponse `xml:\"urn:vim25 RemoveKeyResponse,omitempty\"`\n\tFault_ *soap.Fault              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveKeyBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveKey(ctx context.Context, r soap.RoundTripper, req *types.RemoveKey) (*types.RemoveKeyResponse, error) {\n\tvar reqBody, resBody RemoveKeyBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveKeysBody struct {\n\tReq    *types.RemoveKeys         `xml:\"urn:vim25 RemoveKeys,omitempty\"`\n\tRes    *types.RemoveKeysResponse `xml:\"urn:vim25 RemoveKeysResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveKeysBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveKeys(ctx context.Context, r soap.RoundTripper, req *types.RemoveKeys) (*types.RemoveKeysResponse, error) {\n\tvar reqBody, resBody RemoveKeysBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveKmipServerBody struct {\n\tReq    *types.RemoveKmipServer         `xml:\"urn:vim25 RemoveKmipServer,omitempty\"`\n\tRes    *types.RemoveKmipServerResponse `xml:\"urn:vim25 RemoveKmipServerResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveKmipServerBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveKmipServer(ctx context.Context, r soap.RoundTripper, req *types.RemoveKmipServer) (*types.RemoveKmipServerResponse, error) {\n\tvar reqBody, resBody RemoveKmipServerBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveLicenseBody struct {\n\tReq    *types.RemoveLicense         `xml:\"urn:vim25 RemoveLicense,omitempty\"`\n\tRes    *types.RemoveLicenseResponse `xml:\"urn:vim25 RemoveLicenseResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveLicenseBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveLicense(ctx context.Context, r soap.RoundTripper, req *types.RemoveLicense) (*types.RemoveLicenseResponse, error) {\n\tvar reqBody, resBody RemoveLicenseBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveLicenseLabelBody struct {\n\tReq    *types.RemoveLicenseLabel         `xml:\"urn:vim25 RemoveLicenseLabel,omitempty\"`\n\tRes    *types.RemoveLicenseLabelResponse `xml:\"urn:vim25 RemoveLicenseLabelResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveLicenseLabelBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveLicenseLabel(ctx context.Context, r soap.RoundTripper, req *types.RemoveLicenseLabel) (*types.RemoveLicenseLabelResponse, error) {\n\tvar reqBody, resBody RemoveLicenseLabelBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveMonitoredEntitiesBody struct {\n\tReq    *types.RemoveMonitoredEntities         `xml:\"urn:vim25 RemoveMonitoredEntities,omitempty\"`\n\tRes    *types.RemoveMonitoredEntitiesResponse `xml:\"urn:vim25 RemoveMonitoredEntitiesResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveMonitoredEntitiesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveMonitoredEntities(ctx context.Context, r soap.RoundTripper, req *types.RemoveMonitoredEntities) (*types.RemoveMonitoredEntitiesResponse, error) {\n\tvar reqBody, resBody RemoveMonitoredEntitiesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveNetworkResourcePoolBody struct {\n\tReq    *types.RemoveNetworkResourcePool         `xml:\"urn:vim25 RemoveNetworkResourcePool,omitempty\"`\n\tRes    *types.RemoveNetworkResourcePoolResponse `xml:\"urn:vim25 RemoveNetworkResourcePoolResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveNetworkResourcePoolBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveNetworkResourcePool(ctx context.Context, r soap.RoundTripper, req *types.RemoveNetworkResourcePool) (*types.RemoveNetworkResourcePoolResponse, error) {\n\tvar reqBody, resBody RemoveNetworkResourcePoolBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemovePerfIntervalBody struct {\n\tReq    *types.RemovePerfInterval         `xml:\"urn:vim25 RemovePerfInterval,omitempty\"`\n\tRes    *types.RemovePerfIntervalResponse `xml:\"urn:vim25 RemovePerfIntervalResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemovePerfIntervalBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemovePerfInterval(ctx context.Context, r soap.RoundTripper, req *types.RemovePerfInterval) (*types.RemovePerfIntervalResponse, error) {\n\tvar reqBody, resBody RemovePerfIntervalBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemovePortGroupBody struct {\n\tReq    *types.RemovePortGroup         `xml:\"urn:vim25 RemovePortGroup,omitempty\"`\n\tRes    *types.RemovePortGroupResponse `xml:\"urn:vim25 RemovePortGroupResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemovePortGroupBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemovePortGroup(ctx context.Context, r soap.RoundTripper, req *types.RemovePortGroup) (*types.RemovePortGroupResponse, error) {\n\tvar reqBody, resBody RemovePortGroupBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveScheduledTaskBody struct {\n\tReq    *types.RemoveScheduledTask         `xml:\"urn:vim25 RemoveScheduledTask,omitempty\"`\n\tRes    *types.RemoveScheduledTaskResponse `xml:\"urn:vim25 RemoveScheduledTaskResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.RemoveScheduledTask) (*types.RemoveScheduledTaskResponse, error) {\n\tvar reqBody, resBody RemoveScheduledTaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveServiceConsoleVirtualNicBody struct {\n\tReq    *types.RemoveServiceConsoleVirtualNic         `xml:\"urn:vim25 RemoveServiceConsoleVirtualNic,omitempty\"`\n\tRes    *types.RemoveServiceConsoleVirtualNicResponse `xml:\"urn:vim25 RemoveServiceConsoleVirtualNicResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveServiceConsoleVirtualNicBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.RemoveServiceConsoleVirtualNic) (*types.RemoveServiceConsoleVirtualNicResponse, error) {\n\tvar reqBody, resBody RemoveServiceConsoleVirtualNicBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveSmartCardTrustAnchorBody struct {\n\tReq    *types.RemoveSmartCardTrustAnchor         `xml:\"urn:vim25 RemoveSmartCardTrustAnchor,omitempty\"`\n\tRes    *types.RemoveSmartCardTrustAnchorResponse `xml:\"urn:vim25 RemoveSmartCardTrustAnchorResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveSmartCardTrustAnchorBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveSmartCardTrustAnchor(ctx context.Context, r soap.RoundTripper, req *types.RemoveSmartCardTrustAnchor) (*types.RemoveSmartCardTrustAnchorResponse, error) {\n\tvar reqBody, resBody RemoveSmartCardTrustAnchorBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveSmartCardTrustAnchorByFingerprintBody struct {\n\tReq    *types.RemoveSmartCardTrustAnchorByFingerprint         `xml:\"urn:vim25 RemoveSmartCardTrustAnchorByFingerprint,omitempty\"`\n\tRes    *types.RemoveSmartCardTrustAnchorByFingerprintResponse `xml:\"urn:vim25 RemoveSmartCardTrustAnchorByFingerprintResponse,omitempty\"`\n\tFault_ *soap.Fault                                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveSmartCardTrustAnchorByFingerprintBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveSmartCardTrustAnchorByFingerprint(ctx context.Context, r soap.RoundTripper, req *types.RemoveSmartCardTrustAnchorByFingerprint) (*types.RemoveSmartCardTrustAnchorByFingerprintResponse, error) {\n\tvar reqBody, resBody RemoveSmartCardTrustAnchorByFingerprintBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveSnapshot_TaskBody struct {\n\tReq    *types.RemoveSnapshot_Task         `xml:\"urn:vim25 RemoveSnapshot_Task,omitempty\"`\n\tRes    *types.RemoveSnapshot_TaskResponse `xml:\"urn:vim25 RemoveSnapshot_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.RemoveSnapshot_Task) (*types.RemoveSnapshot_TaskResponse, error) {\n\tvar reqBody, resBody RemoveSnapshot_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveUserBody struct {\n\tReq    *types.RemoveUser         `xml:\"urn:vim25 RemoveUser,omitempty\"`\n\tRes    *types.RemoveUserResponse `xml:\"urn:vim25 RemoveUserResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveUserBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveUser(ctx context.Context, r soap.RoundTripper, req *types.RemoveUser) (*types.RemoveUserResponse, error) {\n\tvar reqBody, resBody RemoveUserBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveVirtualNicBody struct {\n\tReq    *types.RemoveVirtualNic         `xml:\"urn:vim25 RemoveVirtualNic,omitempty\"`\n\tRes    *types.RemoveVirtualNicResponse `xml:\"urn:vim25 RemoveVirtualNicResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveVirtualNicBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.RemoveVirtualNic) (*types.RemoveVirtualNicResponse, error) {\n\tvar reqBody, resBody RemoveVirtualNicBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RemoveVirtualSwitchBody struct {\n\tReq    *types.RemoveVirtualSwitch         `xml:\"urn:vim25 RemoveVirtualSwitch,omitempty\"`\n\tRes    *types.RemoveVirtualSwitchResponse `xml:\"urn:vim25 RemoveVirtualSwitchResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RemoveVirtualSwitchBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RemoveVirtualSwitch(ctx context.Context, r soap.RoundTripper, req *types.RemoveVirtualSwitch) (*types.RemoveVirtualSwitchResponse, error) {\n\tvar reqBody, resBody RemoveVirtualSwitchBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RenameCustomFieldDefBody struct {\n\tReq    *types.RenameCustomFieldDef         `xml:\"urn:vim25 RenameCustomFieldDef,omitempty\"`\n\tRes    *types.RenameCustomFieldDefResponse `xml:\"urn:vim25 RenameCustomFieldDefResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RenameCustomFieldDefBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RenameCustomFieldDef(ctx context.Context, r soap.RoundTripper, req *types.RenameCustomFieldDef) (*types.RenameCustomFieldDefResponse, error) {\n\tvar reqBody, resBody RenameCustomFieldDefBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RenameCustomizationSpecBody struct {\n\tReq    *types.RenameCustomizationSpec         `xml:\"urn:vim25 RenameCustomizationSpec,omitempty\"`\n\tRes    *types.RenameCustomizationSpecResponse `xml:\"urn:vim25 RenameCustomizationSpecResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RenameCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RenameCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.RenameCustomizationSpec) (*types.RenameCustomizationSpecResponse, error) {\n\tvar reqBody, resBody RenameCustomizationSpecBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RenameDatastoreBody struct {\n\tReq    *types.RenameDatastore         `xml:\"urn:vim25 RenameDatastore,omitempty\"`\n\tRes    *types.RenameDatastoreResponse `xml:\"urn:vim25 RenameDatastoreResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RenameDatastoreBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RenameDatastore(ctx context.Context, r soap.RoundTripper, req *types.RenameDatastore) (*types.RenameDatastoreResponse, error) {\n\tvar reqBody, resBody RenameDatastoreBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RenameSnapshotBody struct {\n\tReq    *types.RenameSnapshot         `xml:\"urn:vim25 RenameSnapshot,omitempty\"`\n\tRes    *types.RenameSnapshotResponse `xml:\"urn:vim25 RenameSnapshotResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RenameSnapshotBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RenameSnapshot(ctx context.Context, r soap.RoundTripper, req *types.RenameSnapshot) (*types.RenameSnapshotResponse, error) {\n\tvar reqBody, resBody RenameSnapshotBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RenameVStorageObjectBody struct {\n\tReq    *types.RenameVStorageObject         `xml:\"urn:vim25 RenameVStorageObject,omitempty\"`\n\tRes    *types.RenameVStorageObjectResponse `xml:\"urn:vim25 RenameVStorageObjectResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RenameVStorageObjectBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RenameVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.RenameVStorageObject) (*types.RenameVStorageObjectResponse, error) {\n\tvar reqBody, resBody RenameVStorageObjectBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype Rename_TaskBody struct {\n\tReq    *types.Rename_Task         `xml:\"urn:vim25 Rename_Task,omitempty\"`\n\tRes    *types.Rename_TaskResponse `xml:\"urn:vim25 Rename_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *Rename_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc Rename_Task(ctx context.Context, r soap.RoundTripper, req *types.Rename_Task) (*types.Rename_TaskResponse, error) {\n\tvar reqBody, resBody Rename_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReplaceCACertificatesAndCRLsBody struct {\n\tReq    *types.ReplaceCACertificatesAndCRLs         `xml:\"urn:vim25 ReplaceCACertificatesAndCRLs,omitempty\"`\n\tRes    *types.ReplaceCACertificatesAndCRLsResponse `xml:\"urn:vim25 ReplaceCACertificatesAndCRLsResponse,omitempty\"`\n\tFault_ *soap.Fault                                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReplaceCACertificatesAndCRLsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReplaceCACertificatesAndCRLs(ctx context.Context, r soap.RoundTripper, req *types.ReplaceCACertificatesAndCRLs) (*types.ReplaceCACertificatesAndCRLsResponse, error) {\n\tvar reqBody, resBody ReplaceCACertificatesAndCRLsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ReplaceSmartCardTrustAnchorsBody struct {\n\tReq    *types.ReplaceSmartCardTrustAnchors         `xml:\"urn:vim25 ReplaceSmartCardTrustAnchors,omitempty\"`\n\tRes    *types.ReplaceSmartCardTrustAnchorsResponse `xml:\"urn:vim25 ReplaceSmartCardTrustAnchorsResponse,omitempty\"`\n\tFault_ *soap.Fault                                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ReplaceSmartCardTrustAnchorsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ReplaceSmartCardTrustAnchors(ctx context.Context, r soap.RoundTripper, req *types.ReplaceSmartCardTrustAnchors) (*types.ReplaceSmartCardTrustAnchorsResponse, error) {\n\tvar reqBody, resBody ReplaceSmartCardTrustAnchorsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RescanAllHbaBody struct {\n\tReq    *types.RescanAllHba         `xml:\"urn:vim25 RescanAllHba,omitempty\"`\n\tRes    *types.RescanAllHbaResponse `xml:\"urn:vim25 RescanAllHbaResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RescanAllHbaBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RescanAllHba(ctx context.Context, r soap.RoundTripper, req *types.RescanAllHba) (*types.RescanAllHbaResponse, error) {\n\tvar reqBody, resBody RescanAllHbaBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RescanHbaBody struct {\n\tReq    *types.RescanHba         `xml:\"urn:vim25 RescanHba,omitempty\"`\n\tRes    *types.RescanHbaResponse `xml:\"urn:vim25 RescanHbaResponse,omitempty\"`\n\tFault_ *soap.Fault              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RescanHbaBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RescanHba(ctx context.Context, r soap.RoundTripper, req *types.RescanHba) (*types.RescanHbaResponse, error) {\n\tvar reqBody, resBody RescanHbaBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RescanVffsBody struct {\n\tReq    *types.RescanVffs         `xml:\"urn:vim25 RescanVffs,omitempty\"`\n\tRes    *types.RescanVffsResponse `xml:\"urn:vim25 RescanVffsResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RescanVffsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RescanVffs(ctx context.Context, r soap.RoundTripper, req *types.RescanVffs) (*types.RescanVffsResponse, error) {\n\tvar reqBody, resBody RescanVffsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RescanVmfsBody struct {\n\tReq    *types.RescanVmfs         `xml:\"urn:vim25 RescanVmfs,omitempty\"`\n\tRes    *types.RescanVmfsResponse `xml:\"urn:vim25 RescanVmfsResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RescanVmfsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RescanVmfs(ctx context.Context, r soap.RoundTripper, req *types.RescanVmfs) (*types.RescanVmfsResponse, error) {\n\tvar reqBody, resBody RescanVmfsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ResetCollectorBody struct {\n\tReq    *types.ResetCollector         `xml:\"urn:vim25 ResetCollector,omitempty\"`\n\tRes    *types.ResetCollectorResponse `xml:\"urn:vim25 ResetCollectorResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ResetCollectorBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ResetCollector(ctx context.Context, r soap.RoundTripper, req *types.ResetCollector) (*types.ResetCollectorResponse, error) {\n\tvar reqBody, resBody ResetCollectorBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ResetCounterLevelMappingBody struct {\n\tReq    *types.ResetCounterLevelMapping         `xml:\"urn:vim25 ResetCounterLevelMapping,omitempty\"`\n\tRes    *types.ResetCounterLevelMappingResponse `xml:\"urn:vim25 ResetCounterLevelMappingResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ResetCounterLevelMappingBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ResetCounterLevelMapping(ctx context.Context, r soap.RoundTripper, req *types.ResetCounterLevelMapping) (*types.ResetCounterLevelMappingResponse, error) {\n\tvar reqBody, resBody ResetCounterLevelMappingBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ResetEntityPermissionsBody struct {\n\tReq    *types.ResetEntityPermissions         `xml:\"urn:vim25 ResetEntityPermissions,omitempty\"`\n\tRes    *types.ResetEntityPermissionsResponse `xml:\"urn:vim25 ResetEntityPermissionsResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ResetEntityPermissionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ResetEntityPermissions(ctx context.Context, r soap.RoundTripper, req *types.ResetEntityPermissions) (*types.ResetEntityPermissionsResponse, error) {\n\tvar reqBody, resBody ResetEntityPermissionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ResetFirmwareToFactoryDefaultsBody struct {\n\tReq    *types.ResetFirmwareToFactoryDefaults         `xml:\"urn:vim25 ResetFirmwareToFactoryDefaults,omitempty\"`\n\tRes    *types.ResetFirmwareToFactoryDefaultsResponse `xml:\"urn:vim25 ResetFirmwareToFactoryDefaultsResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ResetFirmwareToFactoryDefaultsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ResetFirmwareToFactoryDefaults(ctx context.Context, r soap.RoundTripper, req *types.ResetFirmwareToFactoryDefaults) (*types.ResetFirmwareToFactoryDefaultsResponse, error) {\n\tvar reqBody, resBody ResetFirmwareToFactoryDefaultsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ResetGuestInformationBody struct {\n\tReq    *types.ResetGuestInformation         `xml:\"urn:vim25 ResetGuestInformation,omitempty\"`\n\tRes    *types.ResetGuestInformationResponse `xml:\"urn:vim25 ResetGuestInformationResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ResetGuestInformationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ResetGuestInformation(ctx context.Context, r soap.RoundTripper, req *types.ResetGuestInformation) (*types.ResetGuestInformationResponse, error) {\n\tvar reqBody, resBody ResetGuestInformationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ResetListViewBody struct {\n\tReq    *types.ResetListView         `xml:\"urn:vim25 ResetListView,omitempty\"`\n\tRes    *types.ResetListViewResponse `xml:\"urn:vim25 ResetListViewResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ResetListViewBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ResetListView(ctx context.Context, r soap.RoundTripper, req *types.ResetListView) (*types.ResetListViewResponse, error) {\n\tvar reqBody, resBody ResetListViewBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ResetListViewFromViewBody struct {\n\tReq    *types.ResetListViewFromView         `xml:\"urn:vim25 ResetListViewFromView,omitempty\"`\n\tRes    *types.ResetListViewFromViewResponse `xml:\"urn:vim25 ResetListViewFromViewResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ResetListViewFromViewBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ResetListViewFromView(ctx context.Context, r soap.RoundTripper, req *types.ResetListViewFromView) (*types.ResetListViewFromViewResponse, error) {\n\tvar reqBody, resBody ResetListViewFromViewBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ResetSystemHealthInfoBody struct {\n\tReq    *types.ResetSystemHealthInfo         `xml:\"urn:vim25 ResetSystemHealthInfo,omitempty\"`\n\tRes    *types.ResetSystemHealthInfoResponse `xml:\"urn:vim25 ResetSystemHealthInfoResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ResetSystemHealthInfoBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ResetSystemHealthInfo(ctx context.Context, r soap.RoundTripper, req *types.ResetSystemHealthInfo) (*types.ResetSystemHealthInfoResponse, error) {\n\tvar reqBody, resBody ResetSystemHealthInfoBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ResetVM_TaskBody struct {\n\tReq    *types.ResetVM_Task         `xml:\"urn:vim25 ResetVM_Task,omitempty\"`\n\tRes    *types.ResetVM_TaskResponse `xml:\"urn:vim25 ResetVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ResetVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ResetVM_Task(ctx context.Context, r soap.RoundTripper, req *types.ResetVM_Task) (*types.ResetVM_TaskResponse, error) {\n\tvar reqBody, resBody ResetVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ResignatureUnresolvedVmfsVolume_TaskBody struct {\n\tReq    *types.ResignatureUnresolvedVmfsVolume_Task         `xml:\"urn:vim25 ResignatureUnresolvedVmfsVolume_Task,omitempty\"`\n\tRes    *types.ResignatureUnresolvedVmfsVolume_TaskResponse `xml:\"urn:vim25 ResignatureUnresolvedVmfsVolume_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ResignatureUnresolvedVmfsVolume_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ResignatureUnresolvedVmfsVolume_Task(ctx context.Context, r soap.RoundTripper, req *types.ResignatureUnresolvedVmfsVolume_Task) (*types.ResignatureUnresolvedVmfsVolume_TaskResponse, error) {\n\tvar reqBody, resBody ResignatureUnresolvedVmfsVolume_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ResolveInstallationErrorsOnCluster_TaskBody struct {\n\tReq    *types.ResolveInstallationErrorsOnCluster_Task         `xml:\"urn:vim25 ResolveInstallationErrorsOnCluster_Task,omitempty\"`\n\tRes    *types.ResolveInstallationErrorsOnCluster_TaskResponse `xml:\"urn:vim25 ResolveInstallationErrorsOnCluster_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ResolveInstallationErrorsOnCluster_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ResolveInstallationErrorsOnCluster_Task(ctx context.Context, r soap.RoundTripper, req *types.ResolveInstallationErrorsOnCluster_Task) (*types.ResolveInstallationErrorsOnCluster_TaskResponse, error) {\n\tvar reqBody, resBody ResolveInstallationErrorsOnCluster_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ResolveInstallationErrorsOnHost_TaskBody struct {\n\tReq    *types.ResolveInstallationErrorsOnHost_Task         `xml:\"urn:vim25 ResolveInstallationErrorsOnHost_Task,omitempty\"`\n\tRes    *types.ResolveInstallationErrorsOnHost_TaskResponse `xml:\"urn:vim25 ResolveInstallationErrorsOnHost_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ResolveInstallationErrorsOnHost_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ResolveInstallationErrorsOnHost_Task(ctx context.Context, r soap.RoundTripper, req *types.ResolveInstallationErrorsOnHost_Task) (*types.ResolveInstallationErrorsOnHost_TaskResponse, error) {\n\tvar reqBody, resBody ResolveInstallationErrorsOnHost_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ResolveMultipleUnresolvedVmfsVolumesBody struct {\n\tReq    *types.ResolveMultipleUnresolvedVmfsVolumes         `xml:\"urn:vim25 ResolveMultipleUnresolvedVmfsVolumes,omitempty\"`\n\tRes    *types.ResolveMultipleUnresolvedVmfsVolumesResponse `xml:\"urn:vim25 ResolveMultipleUnresolvedVmfsVolumesResponse,omitempty\"`\n\tFault_ *soap.Fault                                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ResolveMultipleUnresolvedVmfsVolumesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ResolveMultipleUnresolvedVmfsVolumes(ctx context.Context, r soap.RoundTripper, req *types.ResolveMultipleUnresolvedVmfsVolumes) (*types.ResolveMultipleUnresolvedVmfsVolumesResponse, error) {\n\tvar reqBody, resBody ResolveMultipleUnresolvedVmfsVolumesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ResolveMultipleUnresolvedVmfsVolumesEx_TaskBody struct {\n\tReq    *types.ResolveMultipleUnresolvedVmfsVolumesEx_Task         `xml:\"urn:vim25 ResolveMultipleUnresolvedVmfsVolumesEx_Task,omitempty\"`\n\tRes    *types.ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse `xml:\"urn:vim25 ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ResolveMultipleUnresolvedVmfsVolumesEx_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ResolveMultipleUnresolvedVmfsVolumesEx_Task(ctx context.Context, r soap.RoundTripper, req *types.ResolveMultipleUnresolvedVmfsVolumesEx_Task) (*types.ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse, error) {\n\tvar reqBody, resBody ResolveMultipleUnresolvedVmfsVolumesEx_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RestartServiceBody struct {\n\tReq    *types.RestartService         `xml:\"urn:vim25 RestartService,omitempty\"`\n\tRes    *types.RestartServiceResponse `xml:\"urn:vim25 RestartServiceResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RestartServiceBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RestartService(ctx context.Context, r soap.RoundTripper, req *types.RestartService) (*types.RestartServiceResponse, error) {\n\tvar reqBody, resBody RestartServiceBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RestartServiceConsoleVirtualNicBody struct {\n\tReq    *types.RestartServiceConsoleVirtualNic         `xml:\"urn:vim25 RestartServiceConsoleVirtualNic,omitempty\"`\n\tRes    *types.RestartServiceConsoleVirtualNicResponse `xml:\"urn:vim25 RestartServiceConsoleVirtualNicResponse,omitempty\"`\n\tFault_ *soap.Fault                                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RestartServiceConsoleVirtualNicBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RestartServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.RestartServiceConsoleVirtualNic) (*types.RestartServiceConsoleVirtualNicResponse, error) {\n\tvar reqBody, resBody RestartServiceConsoleVirtualNicBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RestoreFirmwareConfigurationBody struct {\n\tReq    *types.RestoreFirmwareConfiguration         `xml:\"urn:vim25 RestoreFirmwareConfiguration,omitempty\"`\n\tRes    *types.RestoreFirmwareConfigurationResponse `xml:\"urn:vim25 RestoreFirmwareConfigurationResponse,omitempty\"`\n\tFault_ *soap.Fault                                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RestoreFirmwareConfigurationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RestoreFirmwareConfiguration(ctx context.Context, r soap.RoundTripper, req *types.RestoreFirmwareConfiguration) (*types.RestoreFirmwareConfigurationResponse, error) {\n\tvar reqBody, resBody RestoreFirmwareConfigurationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveAllPermissionsBody struct {\n\tReq    *types.RetrieveAllPermissions         `xml:\"urn:vim25 RetrieveAllPermissions,omitempty\"`\n\tRes    *types.RetrieveAllPermissionsResponse `xml:\"urn:vim25 RetrieveAllPermissionsResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveAllPermissionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveAllPermissions(ctx context.Context, r soap.RoundTripper, req *types.RetrieveAllPermissions) (*types.RetrieveAllPermissionsResponse, error) {\n\tvar reqBody, resBody RetrieveAllPermissionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveAnswerFileBody struct {\n\tReq    *types.RetrieveAnswerFile         `xml:\"urn:vim25 RetrieveAnswerFile,omitempty\"`\n\tRes    *types.RetrieveAnswerFileResponse `xml:\"urn:vim25 RetrieveAnswerFileResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveAnswerFileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveAnswerFile(ctx context.Context, r soap.RoundTripper, req *types.RetrieveAnswerFile) (*types.RetrieveAnswerFileResponse, error) {\n\tvar reqBody, resBody RetrieveAnswerFileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveAnswerFileForProfileBody struct {\n\tReq    *types.RetrieveAnswerFileForProfile         `xml:\"urn:vim25 RetrieveAnswerFileForProfile,omitempty\"`\n\tRes    *types.RetrieveAnswerFileForProfileResponse `xml:\"urn:vim25 RetrieveAnswerFileForProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveAnswerFileForProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveAnswerFileForProfile(ctx context.Context, r soap.RoundTripper, req *types.RetrieveAnswerFileForProfile) (*types.RetrieveAnswerFileForProfileResponse, error) {\n\tvar reqBody, resBody RetrieveAnswerFileForProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveArgumentDescriptionBody struct {\n\tReq    *types.RetrieveArgumentDescription         `xml:\"urn:vim25 RetrieveArgumentDescription,omitempty\"`\n\tRes    *types.RetrieveArgumentDescriptionResponse `xml:\"urn:vim25 RetrieveArgumentDescriptionResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveArgumentDescriptionBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveArgumentDescription(ctx context.Context, r soap.RoundTripper, req *types.RetrieveArgumentDescription) (*types.RetrieveArgumentDescriptionResponse, error) {\n\tvar reqBody, resBody RetrieveArgumentDescriptionBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveClientCertBody struct {\n\tReq    *types.RetrieveClientCert         `xml:\"urn:vim25 RetrieveClientCert,omitempty\"`\n\tRes    *types.RetrieveClientCertResponse `xml:\"urn:vim25 RetrieveClientCertResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveClientCertBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveClientCert(ctx context.Context, r soap.RoundTripper, req *types.RetrieveClientCert) (*types.RetrieveClientCertResponse, error) {\n\tvar reqBody, resBody RetrieveClientCertBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveClientCsrBody struct {\n\tReq    *types.RetrieveClientCsr         `xml:\"urn:vim25 RetrieveClientCsr,omitempty\"`\n\tRes    *types.RetrieveClientCsrResponse `xml:\"urn:vim25 RetrieveClientCsrResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveClientCsrBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveClientCsr(ctx context.Context, r soap.RoundTripper, req *types.RetrieveClientCsr) (*types.RetrieveClientCsrResponse, error) {\n\tvar reqBody, resBody RetrieveClientCsrBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveDasAdvancedRuntimeInfoBody struct {\n\tReq    *types.RetrieveDasAdvancedRuntimeInfo         `xml:\"urn:vim25 RetrieveDasAdvancedRuntimeInfo,omitempty\"`\n\tRes    *types.RetrieveDasAdvancedRuntimeInfoResponse `xml:\"urn:vim25 RetrieveDasAdvancedRuntimeInfoResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveDasAdvancedRuntimeInfoBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveDasAdvancedRuntimeInfo(ctx context.Context, r soap.RoundTripper, req *types.RetrieveDasAdvancedRuntimeInfo) (*types.RetrieveDasAdvancedRuntimeInfoResponse, error) {\n\tvar reqBody, resBody RetrieveDasAdvancedRuntimeInfoBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveDescriptionBody struct {\n\tReq    *types.RetrieveDescription         `xml:\"urn:vim25 RetrieveDescription,omitempty\"`\n\tRes    *types.RetrieveDescriptionResponse `xml:\"urn:vim25 RetrieveDescriptionResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveDescriptionBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveDescription(ctx context.Context, r soap.RoundTripper, req *types.RetrieveDescription) (*types.RetrieveDescriptionResponse, error) {\n\tvar reqBody, resBody RetrieveDescriptionBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveDiskPartitionInfoBody struct {\n\tReq    *types.RetrieveDiskPartitionInfo         `xml:\"urn:vim25 RetrieveDiskPartitionInfo,omitempty\"`\n\tRes    *types.RetrieveDiskPartitionInfoResponse `xml:\"urn:vim25 RetrieveDiskPartitionInfoResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveDiskPartitionInfoBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveDiskPartitionInfo(ctx context.Context, r soap.RoundTripper, req *types.RetrieveDiskPartitionInfo) (*types.RetrieveDiskPartitionInfoResponse, error) {\n\tvar reqBody, resBody RetrieveDiskPartitionInfoBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveEntityPermissionsBody struct {\n\tReq    *types.RetrieveEntityPermissions         `xml:\"urn:vim25 RetrieveEntityPermissions,omitempty\"`\n\tRes    *types.RetrieveEntityPermissionsResponse `xml:\"urn:vim25 RetrieveEntityPermissionsResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveEntityPermissionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveEntityPermissions(ctx context.Context, r soap.RoundTripper, req *types.RetrieveEntityPermissions) (*types.RetrieveEntityPermissionsResponse, error) {\n\tvar reqBody, resBody RetrieveEntityPermissionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveEntityScheduledTaskBody struct {\n\tReq    *types.RetrieveEntityScheduledTask         `xml:\"urn:vim25 RetrieveEntityScheduledTask,omitempty\"`\n\tRes    *types.RetrieveEntityScheduledTaskResponse `xml:\"urn:vim25 RetrieveEntityScheduledTaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveEntityScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveEntityScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.RetrieveEntityScheduledTask) (*types.RetrieveEntityScheduledTaskResponse, error) {\n\tvar reqBody, resBody RetrieveEntityScheduledTaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveHardwareUptimeBody struct {\n\tReq    *types.RetrieveHardwareUptime         `xml:\"urn:vim25 RetrieveHardwareUptime,omitempty\"`\n\tRes    *types.RetrieveHardwareUptimeResponse `xml:\"urn:vim25 RetrieveHardwareUptimeResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveHardwareUptimeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveHardwareUptime(ctx context.Context, r soap.RoundTripper, req *types.RetrieveHardwareUptime) (*types.RetrieveHardwareUptimeResponse, error) {\n\tvar reqBody, resBody RetrieveHardwareUptimeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveHostAccessControlEntriesBody struct {\n\tReq    *types.RetrieveHostAccessControlEntries         `xml:\"urn:vim25 RetrieveHostAccessControlEntries,omitempty\"`\n\tRes    *types.RetrieveHostAccessControlEntriesResponse `xml:\"urn:vim25 RetrieveHostAccessControlEntriesResponse,omitempty\"`\n\tFault_ *soap.Fault                                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveHostAccessControlEntriesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveHostAccessControlEntries(ctx context.Context, r soap.RoundTripper, req *types.RetrieveHostAccessControlEntries) (*types.RetrieveHostAccessControlEntriesResponse, error) {\n\tvar reqBody, resBody RetrieveHostAccessControlEntriesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveHostCustomizationsBody struct {\n\tReq    *types.RetrieveHostCustomizations         `xml:\"urn:vim25 RetrieveHostCustomizations,omitempty\"`\n\tRes    *types.RetrieveHostCustomizationsResponse `xml:\"urn:vim25 RetrieveHostCustomizationsResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveHostCustomizationsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveHostCustomizations(ctx context.Context, r soap.RoundTripper, req *types.RetrieveHostCustomizations) (*types.RetrieveHostCustomizationsResponse, error) {\n\tvar reqBody, resBody RetrieveHostCustomizationsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveHostCustomizationsForProfileBody struct {\n\tReq    *types.RetrieveHostCustomizationsForProfile         `xml:\"urn:vim25 RetrieveHostCustomizationsForProfile,omitempty\"`\n\tRes    *types.RetrieveHostCustomizationsForProfileResponse `xml:\"urn:vim25 RetrieveHostCustomizationsForProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveHostCustomizationsForProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveHostCustomizationsForProfile(ctx context.Context, r soap.RoundTripper, req *types.RetrieveHostCustomizationsForProfile) (*types.RetrieveHostCustomizationsForProfileResponse, error) {\n\tvar reqBody, resBody RetrieveHostCustomizationsForProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveHostSpecificationBody struct {\n\tReq    *types.RetrieveHostSpecification         `xml:\"urn:vim25 RetrieveHostSpecification,omitempty\"`\n\tRes    *types.RetrieveHostSpecificationResponse `xml:\"urn:vim25 RetrieveHostSpecificationResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveHostSpecificationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveHostSpecification(ctx context.Context, r soap.RoundTripper, req *types.RetrieveHostSpecification) (*types.RetrieveHostSpecificationResponse, error) {\n\tvar reqBody, resBody RetrieveHostSpecificationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveKmipServerCertBody struct {\n\tReq    *types.RetrieveKmipServerCert         `xml:\"urn:vim25 RetrieveKmipServerCert,omitempty\"`\n\tRes    *types.RetrieveKmipServerCertResponse `xml:\"urn:vim25 RetrieveKmipServerCertResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveKmipServerCertBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveKmipServerCert(ctx context.Context, r soap.RoundTripper, req *types.RetrieveKmipServerCert) (*types.RetrieveKmipServerCertResponse, error) {\n\tvar reqBody, resBody RetrieveKmipServerCertBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveKmipServersStatus_TaskBody struct {\n\tReq    *types.RetrieveKmipServersStatus_Task         `xml:\"urn:vim25 RetrieveKmipServersStatus_Task,omitempty\"`\n\tRes    *types.RetrieveKmipServersStatus_TaskResponse `xml:\"urn:vim25 RetrieveKmipServersStatus_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveKmipServersStatus_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveKmipServersStatus_Task(ctx context.Context, r soap.RoundTripper, req *types.RetrieveKmipServersStatus_Task) (*types.RetrieveKmipServersStatus_TaskResponse, error) {\n\tvar reqBody, resBody RetrieveKmipServersStatus_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveObjectScheduledTaskBody struct {\n\tReq    *types.RetrieveObjectScheduledTask         `xml:\"urn:vim25 RetrieveObjectScheduledTask,omitempty\"`\n\tRes    *types.RetrieveObjectScheduledTaskResponse `xml:\"urn:vim25 RetrieveObjectScheduledTaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveObjectScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveObjectScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.RetrieveObjectScheduledTask) (*types.RetrieveObjectScheduledTaskResponse, error) {\n\tvar reqBody, resBody RetrieveObjectScheduledTaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveProductComponentsBody struct {\n\tReq    *types.RetrieveProductComponents         `xml:\"urn:vim25 RetrieveProductComponents,omitempty\"`\n\tRes    *types.RetrieveProductComponentsResponse `xml:\"urn:vim25 RetrieveProductComponentsResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveProductComponentsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveProductComponents(ctx context.Context, r soap.RoundTripper, req *types.RetrieveProductComponents) (*types.RetrieveProductComponentsResponse, error) {\n\tvar reqBody, resBody RetrieveProductComponentsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrievePropertiesBody struct {\n\tReq    *types.RetrieveProperties         `xml:\"urn:vim25 RetrieveProperties,omitempty\"`\n\tRes    *types.RetrievePropertiesResponse `xml:\"urn:vim25 RetrievePropertiesResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrievePropertiesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveProperties(ctx context.Context, r soap.RoundTripper, req *types.RetrieveProperties) (*types.RetrievePropertiesResponse, error) {\n\tvar reqBody, resBody RetrievePropertiesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrievePropertiesExBody struct {\n\tReq    *types.RetrievePropertiesEx         `xml:\"urn:vim25 RetrievePropertiesEx,omitempty\"`\n\tRes    *types.RetrievePropertiesExResponse `xml:\"urn:vim25 RetrievePropertiesExResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrievePropertiesExBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrievePropertiesEx(ctx context.Context, r soap.RoundTripper, req *types.RetrievePropertiesEx) (*types.RetrievePropertiesExResponse, error) {\n\tvar reqBody, resBody RetrievePropertiesExBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveRolePermissionsBody struct {\n\tReq    *types.RetrieveRolePermissions         `xml:\"urn:vim25 RetrieveRolePermissions,omitempty\"`\n\tRes    *types.RetrieveRolePermissionsResponse `xml:\"urn:vim25 RetrieveRolePermissionsResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveRolePermissionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveRolePermissions(ctx context.Context, r soap.RoundTripper, req *types.RetrieveRolePermissions) (*types.RetrieveRolePermissionsResponse, error) {\n\tvar reqBody, resBody RetrieveRolePermissionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveSelfSignedClientCertBody struct {\n\tReq    *types.RetrieveSelfSignedClientCert         `xml:\"urn:vim25 RetrieveSelfSignedClientCert,omitempty\"`\n\tRes    *types.RetrieveSelfSignedClientCertResponse `xml:\"urn:vim25 RetrieveSelfSignedClientCertResponse,omitempty\"`\n\tFault_ *soap.Fault                                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveSelfSignedClientCertBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveSelfSignedClientCert(ctx context.Context, r soap.RoundTripper, req *types.RetrieveSelfSignedClientCert) (*types.RetrieveSelfSignedClientCertResponse, error) {\n\tvar reqBody, resBody RetrieveSelfSignedClientCertBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveServiceContentBody struct {\n\tReq    *types.RetrieveServiceContent         `xml:\"urn:vim25 RetrieveServiceContent,omitempty\"`\n\tRes    *types.RetrieveServiceContentResponse `xml:\"urn:vim25 RetrieveServiceContentResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveServiceContentBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveServiceContent(ctx context.Context, r soap.RoundTripper, req *types.RetrieveServiceContent) (*types.RetrieveServiceContentResponse, error) {\n\tvar reqBody, resBody RetrieveServiceContentBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveUserGroupsBody struct {\n\tReq    *types.RetrieveUserGroups         `xml:\"urn:vim25 RetrieveUserGroups,omitempty\"`\n\tRes    *types.RetrieveUserGroupsResponse `xml:\"urn:vim25 RetrieveUserGroupsResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveUserGroupsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveUserGroups(ctx context.Context, r soap.RoundTripper, req *types.RetrieveUserGroups) (*types.RetrieveUserGroupsResponse, error) {\n\tvar reqBody, resBody RetrieveUserGroupsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveVStorageObjectBody struct {\n\tReq    *types.RetrieveVStorageObject         `xml:\"urn:vim25 RetrieveVStorageObject,omitempty\"`\n\tRes    *types.RetrieveVStorageObjectResponse `xml:\"urn:vim25 RetrieveVStorageObjectResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveVStorageObjectBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.RetrieveVStorageObject) (*types.RetrieveVStorageObjectResponse, error) {\n\tvar reqBody, resBody RetrieveVStorageObjectBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RetrieveVStorageObjectStateBody struct {\n\tReq    *types.RetrieveVStorageObjectState         `xml:\"urn:vim25 RetrieveVStorageObjectState,omitempty\"`\n\tRes    *types.RetrieveVStorageObjectStateResponse `xml:\"urn:vim25 RetrieveVStorageObjectStateResponse,omitempty\"`\n\tFault_ *soap.Fault                                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RetrieveVStorageObjectStateBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RetrieveVStorageObjectState(ctx context.Context, r soap.RoundTripper, req *types.RetrieveVStorageObjectState) (*types.RetrieveVStorageObjectStateResponse, error) {\n\tvar reqBody, resBody RetrieveVStorageObjectStateBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RevertToCurrentSnapshot_TaskBody struct {\n\tReq    *types.RevertToCurrentSnapshot_Task         `xml:\"urn:vim25 RevertToCurrentSnapshot_Task,omitempty\"`\n\tRes    *types.RevertToCurrentSnapshot_TaskResponse `xml:\"urn:vim25 RevertToCurrentSnapshot_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RevertToCurrentSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RevertToCurrentSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.RevertToCurrentSnapshot_Task) (*types.RevertToCurrentSnapshot_TaskResponse, error) {\n\tvar reqBody, resBody RevertToCurrentSnapshot_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RevertToSnapshot_TaskBody struct {\n\tReq    *types.RevertToSnapshot_Task         `xml:\"urn:vim25 RevertToSnapshot_Task,omitempty\"`\n\tRes    *types.RevertToSnapshot_TaskResponse `xml:\"urn:vim25 RevertToSnapshot_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RevertToSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RevertToSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.RevertToSnapshot_Task) (*types.RevertToSnapshot_TaskResponse, error) {\n\tvar reqBody, resBody RevertToSnapshot_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RewindCollectorBody struct {\n\tReq    *types.RewindCollector         `xml:\"urn:vim25 RewindCollector,omitempty\"`\n\tRes    *types.RewindCollectorResponse `xml:\"urn:vim25 RewindCollectorResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RewindCollectorBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RewindCollector(ctx context.Context, r soap.RoundTripper, req *types.RewindCollector) (*types.RewindCollectorResponse, error) {\n\tvar reqBody, resBody RewindCollectorBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RunScheduledTaskBody struct {\n\tReq    *types.RunScheduledTask         `xml:\"urn:vim25 RunScheduledTask,omitempty\"`\n\tRes    *types.RunScheduledTaskResponse `xml:\"urn:vim25 RunScheduledTaskResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RunScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RunScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.RunScheduledTask) (*types.RunScheduledTaskResponse, error) {\n\tvar reqBody, resBody RunScheduledTaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype RunVsanPhysicalDiskDiagnosticsBody struct {\n\tReq    *types.RunVsanPhysicalDiskDiagnostics         `xml:\"urn:vim25 RunVsanPhysicalDiskDiagnostics,omitempty\"`\n\tRes    *types.RunVsanPhysicalDiskDiagnosticsResponse `xml:\"urn:vim25 RunVsanPhysicalDiskDiagnosticsResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *RunVsanPhysicalDiskDiagnosticsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc RunVsanPhysicalDiskDiagnostics(ctx context.Context, r soap.RoundTripper, req *types.RunVsanPhysicalDiskDiagnostics) (*types.RunVsanPhysicalDiskDiagnosticsResponse, error) {\n\tvar reqBody, resBody RunVsanPhysicalDiskDiagnosticsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ScanHostPatchV2_TaskBody struct {\n\tReq    *types.ScanHostPatchV2_Task         `xml:\"urn:vim25 ScanHostPatchV2_Task,omitempty\"`\n\tRes    *types.ScanHostPatchV2_TaskResponse `xml:\"urn:vim25 ScanHostPatchV2_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ScanHostPatchV2_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ScanHostPatchV2_Task(ctx context.Context, r soap.RoundTripper, req *types.ScanHostPatchV2_Task) (*types.ScanHostPatchV2_TaskResponse, error) {\n\tvar reqBody, resBody ScanHostPatchV2_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ScanHostPatch_TaskBody struct {\n\tReq    *types.ScanHostPatch_Task         `xml:\"urn:vim25 ScanHostPatch_Task,omitempty\"`\n\tRes    *types.ScanHostPatch_TaskResponse `xml:\"urn:vim25 ScanHostPatch_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ScanHostPatch_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ScanHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.ScanHostPatch_Task) (*types.ScanHostPatch_TaskResponse, error) {\n\tvar reqBody, resBody ScanHostPatch_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ScheduleReconcileDatastoreInventoryBody struct {\n\tReq    *types.ScheduleReconcileDatastoreInventory         `xml:\"urn:vim25 ScheduleReconcileDatastoreInventory,omitempty\"`\n\tRes    *types.ScheduleReconcileDatastoreInventoryResponse `xml:\"urn:vim25 ScheduleReconcileDatastoreInventoryResponse,omitempty\"`\n\tFault_ *soap.Fault                                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ScheduleReconcileDatastoreInventoryBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ScheduleReconcileDatastoreInventory(ctx context.Context, r soap.RoundTripper, req *types.ScheduleReconcileDatastoreInventory) (*types.ScheduleReconcileDatastoreInventoryResponse, error) {\n\tvar reqBody, resBody ScheduleReconcileDatastoreInventoryBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SearchDatastoreSubFolders_TaskBody struct {\n\tReq    *types.SearchDatastoreSubFolders_Task         `xml:\"urn:vim25 SearchDatastoreSubFolders_Task,omitempty\"`\n\tRes    *types.SearchDatastoreSubFolders_TaskResponse `xml:\"urn:vim25 SearchDatastoreSubFolders_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SearchDatastoreSubFolders_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SearchDatastoreSubFolders_Task(ctx context.Context, r soap.RoundTripper, req *types.SearchDatastoreSubFolders_Task) (*types.SearchDatastoreSubFolders_TaskResponse, error) {\n\tvar reqBody, resBody SearchDatastoreSubFolders_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SearchDatastore_TaskBody struct {\n\tReq    *types.SearchDatastore_Task         `xml:\"urn:vim25 SearchDatastore_Task,omitempty\"`\n\tRes    *types.SearchDatastore_TaskResponse `xml:\"urn:vim25 SearchDatastore_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SearchDatastore_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SearchDatastore_Task(ctx context.Context, r soap.RoundTripper, req *types.SearchDatastore_Task) (*types.SearchDatastore_TaskResponse, error) {\n\tvar reqBody, resBody SearchDatastore_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SelectActivePartitionBody struct {\n\tReq    *types.SelectActivePartition         `xml:\"urn:vim25 SelectActivePartition,omitempty\"`\n\tRes    *types.SelectActivePartitionResponse `xml:\"urn:vim25 SelectActivePartitionResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SelectActivePartitionBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SelectActivePartition(ctx context.Context, r soap.RoundTripper, req *types.SelectActivePartition) (*types.SelectActivePartitionResponse, error) {\n\tvar reqBody, resBody SelectActivePartitionBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SelectVnicBody struct {\n\tReq    *types.SelectVnic         `xml:\"urn:vim25 SelectVnic,omitempty\"`\n\tRes    *types.SelectVnicResponse `xml:\"urn:vim25 SelectVnicResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SelectVnicBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SelectVnic(ctx context.Context, r soap.RoundTripper, req *types.SelectVnic) (*types.SelectVnicResponse, error) {\n\tvar reqBody, resBody SelectVnicBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SelectVnicForNicTypeBody struct {\n\tReq    *types.SelectVnicForNicType         `xml:\"urn:vim25 SelectVnicForNicType,omitempty\"`\n\tRes    *types.SelectVnicForNicTypeResponse `xml:\"urn:vim25 SelectVnicForNicTypeResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SelectVnicForNicTypeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SelectVnicForNicType(ctx context.Context, r soap.RoundTripper, req *types.SelectVnicForNicType) (*types.SelectVnicForNicTypeResponse, error) {\n\tvar reqBody, resBody SelectVnicForNicTypeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SendNMIBody struct {\n\tReq    *types.SendNMI         `xml:\"urn:vim25 SendNMI,omitempty\"`\n\tRes    *types.SendNMIResponse `xml:\"urn:vim25 SendNMIResponse,omitempty\"`\n\tFault_ *soap.Fault            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SendNMIBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SendNMI(ctx context.Context, r soap.RoundTripper, req *types.SendNMI) (*types.SendNMIResponse, error) {\n\tvar reqBody, resBody SendNMIBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SendTestNotificationBody struct {\n\tReq    *types.SendTestNotification         `xml:\"urn:vim25 SendTestNotification,omitempty\"`\n\tRes    *types.SendTestNotificationResponse `xml:\"urn:vim25 SendTestNotificationResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SendTestNotificationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SendTestNotification(ctx context.Context, r soap.RoundTripper, req *types.SendTestNotification) (*types.SendTestNotificationResponse, error) {\n\tvar reqBody, resBody SendTestNotificationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SessionIsActiveBody struct {\n\tReq    *types.SessionIsActive         `xml:\"urn:vim25 SessionIsActive,omitempty\"`\n\tRes    *types.SessionIsActiveResponse `xml:\"urn:vim25 SessionIsActiveResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SessionIsActiveBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SessionIsActive(ctx context.Context, r soap.RoundTripper, req *types.SessionIsActive) (*types.SessionIsActiveResponse, error) {\n\tvar reqBody, resBody SessionIsActiveBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SetCollectorPageSizeBody struct {\n\tReq    *types.SetCollectorPageSize         `xml:\"urn:vim25 SetCollectorPageSize,omitempty\"`\n\tRes    *types.SetCollectorPageSizeResponse `xml:\"urn:vim25 SetCollectorPageSizeResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SetCollectorPageSizeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SetCollectorPageSize(ctx context.Context, r soap.RoundTripper, req *types.SetCollectorPageSize) (*types.SetCollectorPageSizeResponse, error) {\n\tvar reqBody, resBody SetCollectorPageSizeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SetDisplayTopologyBody struct {\n\tReq    *types.SetDisplayTopology         `xml:\"urn:vim25 SetDisplayTopology,omitempty\"`\n\tRes    *types.SetDisplayTopologyResponse `xml:\"urn:vim25 SetDisplayTopologyResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SetDisplayTopologyBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SetDisplayTopology(ctx context.Context, r soap.RoundTripper, req *types.SetDisplayTopology) (*types.SetDisplayTopologyResponse, error) {\n\tvar reqBody, resBody SetDisplayTopologyBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SetEntityPermissionsBody struct {\n\tReq    *types.SetEntityPermissions         `xml:\"urn:vim25 SetEntityPermissions,omitempty\"`\n\tRes    *types.SetEntityPermissionsResponse `xml:\"urn:vim25 SetEntityPermissionsResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SetEntityPermissionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SetEntityPermissions(ctx context.Context, r soap.RoundTripper, req *types.SetEntityPermissions) (*types.SetEntityPermissionsResponse, error) {\n\tvar reqBody, resBody SetEntityPermissionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SetExtensionCertificateBody struct {\n\tReq    *types.SetExtensionCertificate         `xml:\"urn:vim25 SetExtensionCertificate,omitempty\"`\n\tRes    *types.SetExtensionCertificateResponse `xml:\"urn:vim25 SetExtensionCertificateResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SetExtensionCertificateBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SetExtensionCertificate(ctx context.Context, r soap.RoundTripper, req *types.SetExtensionCertificate) (*types.SetExtensionCertificateResponse, error) {\n\tvar reqBody, resBody SetExtensionCertificateBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SetFieldBody struct {\n\tReq    *types.SetField         `xml:\"urn:vim25 SetField,omitempty\"`\n\tRes    *types.SetFieldResponse `xml:\"urn:vim25 SetFieldResponse,omitempty\"`\n\tFault_ *soap.Fault             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SetFieldBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SetField(ctx context.Context, r soap.RoundTripper, req *types.SetField) (*types.SetFieldResponse, error) {\n\tvar reqBody, resBody SetFieldBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SetLicenseEditionBody struct {\n\tReq    *types.SetLicenseEdition         `xml:\"urn:vim25 SetLicenseEdition,omitempty\"`\n\tRes    *types.SetLicenseEditionResponse `xml:\"urn:vim25 SetLicenseEditionResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SetLicenseEditionBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SetLicenseEdition(ctx context.Context, r soap.RoundTripper, req *types.SetLicenseEdition) (*types.SetLicenseEditionResponse, error) {\n\tvar reqBody, resBody SetLicenseEditionBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SetLocaleBody struct {\n\tReq    *types.SetLocale         `xml:\"urn:vim25 SetLocale,omitempty\"`\n\tRes    *types.SetLocaleResponse `xml:\"urn:vim25 SetLocaleResponse,omitempty\"`\n\tFault_ *soap.Fault              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SetLocaleBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SetLocale(ctx context.Context, r soap.RoundTripper, req *types.SetLocale) (*types.SetLocaleResponse, error) {\n\tvar reqBody, resBody SetLocaleBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SetMultipathLunPolicyBody struct {\n\tReq    *types.SetMultipathLunPolicy         `xml:\"urn:vim25 SetMultipathLunPolicy,omitempty\"`\n\tRes    *types.SetMultipathLunPolicyResponse `xml:\"urn:vim25 SetMultipathLunPolicyResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SetMultipathLunPolicyBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SetMultipathLunPolicy(ctx context.Context, r soap.RoundTripper, req *types.SetMultipathLunPolicy) (*types.SetMultipathLunPolicyResponse, error) {\n\tvar reqBody, resBody SetMultipathLunPolicyBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SetNFSUserBody struct {\n\tReq    *types.SetNFSUser         `xml:\"urn:vim25 SetNFSUser,omitempty\"`\n\tRes    *types.SetNFSUserResponse `xml:\"urn:vim25 SetNFSUserResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SetNFSUserBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SetNFSUser(ctx context.Context, r soap.RoundTripper, req *types.SetNFSUser) (*types.SetNFSUserResponse, error) {\n\tvar reqBody, resBody SetNFSUserBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SetPublicKeyBody struct {\n\tReq    *types.SetPublicKey         `xml:\"urn:vim25 SetPublicKey,omitempty\"`\n\tRes    *types.SetPublicKeyResponse `xml:\"urn:vim25 SetPublicKeyResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SetPublicKeyBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SetPublicKey(ctx context.Context, r soap.RoundTripper, req *types.SetPublicKey) (*types.SetPublicKeyResponse, error) {\n\tvar reqBody, resBody SetPublicKeyBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SetRegistryValueInGuestBody struct {\n\tReq    *types.SetRegistryValueInGuest         `xml:\"urn:vim25 SetRegistryValueInGuest,omitempty\"`\n\tRes    *types.SetRegistryValueInGuestResponse `xml:\"urn:vim25 SetRegistryValueInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SetRegistryValueInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SetRegistryValueInGuest(ctx context.Context, r soap.RoundTripper, req *types.SetRegistryValueInGuest) (*types.SetRegistryValueInGuestResponse, error) {\n\tvar reqBody, resBody SetRegistryValueInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SetScreenResolutionBody struct {\n\tReq    *types.SetScreenResolution         `xml:\"urn:vim25 SetScreenResolution,omitempty\"`\n\tRes    *types.SetScreenResolutionResponse `xml:\"urn:vim25 SetScreenResolutionResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SetScreenResolutionBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SetScreenResolution(ctx context.Context, r soap.RoundTripper, req *types.SetScreenResolution) (*types.SetScreenResolutionResponse, error) {\n\tvar reqBody, resBody SetScreenResolutionBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SetTaskDescriptionBody struct {\n\tReq    *types.SetTaskDescription         `xml:\"urn:vim25 SetTaskDescription,omitempty\"`\n\tRes    *types.SetTaskDescriptionResponse `xml:\"urn:vim25 SetTaskDescriptionResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SetTaskDescriptionBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SetTaskDescription(ctx context.Context, r soap.RoundTripper, req *types.SetTaskDescription) (*types.SetTaskDescriptionResponse, error) {\n\tvar reqBody, resBody SetTaskDescriptionBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SetTaskStateBody struct {\n\tReq    *types.SetTaskState         `xml:\"urn:vim25 SetTaskState,omitempty\"`\n\tRes    *types.SetTaskStateResponse `xml:\"urn:vim25 SetTaskStateResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SetTaskStateBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SetTaskState(ctx context.Context, r soap.RoundTripper, req *types.SetTaskState) (*types.SetTaskStateResponse, error) {\n\tvar reqBody, resBody SetTaskStateBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SetVirtualDiskUuidBody struct {\n\tReq    *types.SetVirtualDiskUuid         `xml:\"urn:vim25 SetVirtualDiskUuid,omitempty\"`\n\tRes    *types.SetVirtualDiskUuidResponse `xml:\"urn:vim25 SetVirtualDiskUuidResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SetVirtualDiskUuidBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SetVirtualDiskUuid(ctx context.Context, r soap.RoundTripper, req *types.SetVirtualDiskUuid) (*types.SetVirtualDiskUuidResponse, error) {\n\tvar reqBody, resBody SetVirtualDiskUuidBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ShrinkVirtualDisk_TaskBody struct {\n\tReq    *types.ShrinkVirtualDisk_Task         `xml:\"urn:vim25 ShrinkVirtualDisk_Task,omitempty\"`\n\tRes    *types.ShrinkVirtualDisk_TaskResponse `xml:\"urn:vim25 ShrinkVirtualDisk_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ShrinkVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ShrinkVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.ShrinkVirtualDisk_Task) (*types.ShrinkVirtualDisk_TaskResponse, error) {\n\tvar reqBody, resBody ShrinkVirtualDisk_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ShutdownGuestBody struct {\n\tReq    *types.ShutdownGuest         `xml:\"urn:vim25 ShutdownGuest,omitempty\"`\n\tRes    *types.ShutdownGuestResponse `xml:\"urn:vim25 ShutdownGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ShutdownGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ShutdownGuest(ctx context.Context, r soap.RoundTripper, req *types.ShutdownGuest) (*types.ShutdownGuestResponse, error) {\n\tvar reqBody, resBody ShutdownGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ShutdownHost_TaskBody struct {\n\tReq    *types.ShutdownHost_Task         `xml:\"urn:vim25 ShutdownHost_Task,omitempty\"`\n\tRes    *types.ShutdownHost_TaskResponse `xml:\"urn:vim25 ShutdownHost_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ShutdownHost_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ShutdownHost_Task(ctx context.Context, r soap.RoundTripper, req *types.ShutdownHost_Task) (*types.ShutdownHost_TaskResponse, error) {\n\tvar reqBody, resBody ShutdownHost_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype StageHostPatch_TaskBody struct {\n\tReq    *types.StageHostPatch_Task         `xml:\"urn:vim25 StageHostPatch_Task,omitempty\"`\n\tRes    *types.StageHostPatch_TaskResponse `xml:\"urn:vim25 StageHostPatch_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *StageHostPatch_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc StageHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.StageHostPatch_Task) (*types.StageHostPatch_TaskResponse, error) {\n\tvar reqBody, resBody StageHostPatch_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype StampAllRulesWithUuid_TaskBody struct {\n\tReq    *types.StampAllRulesWithUuid_Task         `xml:\"urn:vim25 StampAllRulesWithUuid_Task,omitempty\"`\n\tRes    *types.StampAllRulesWithUuid_TaskResponse `xml:\"urn:vim25 StampAllRulesWithUuid_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *StampAllRulesWithUuid_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc StampAllRulesWithUuid_Task(ctx context.Context, r soap.RoundTripper, req *types.StampAllRulesWithUuid_Task) (*types.StampAllRulesWithUuid_TaskResponse, error) {\n\tvar reqBody, resBody StampAllRulesWithUuid_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype StandbyGuestBody struct {\n\tReq    *types.StandbyGuest         `xml:\"urn:vim25 StandbyGuest,omitempty\"`\n\tRes    *types.StandbyGuestResponse `xml:\"urn:vim25 StandbyGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *StandbyGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc StandbyGuest(ctx context.Context, r soap.RoundTripper, req *types.StandbyGuest) (*types.StandbyGuestResponse, error) {\n\tvar reqBody, resBody StandbyGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype StartProgramInGuestBody struct {\n\tReq    *types.StartProgramInGuest         `xml:\"urn:vim25 StartProgramInGuest,omitempty\"`\n\tRes    *types.StartProgramInGuestResponse `xml:\"urn:vim25 StartProgramInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *StartProgramInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc StartProgramInGuest(ctx context.Context, r soap.RoundTripper, req *types.StartProgramInGuest) (*types.StartProgramInGuestResponse, error) {\n\tvar reqBody, resBody StartProgramInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype StartRecording_TaskBody struct {\n\tReq    *types.StartRecording_Task         `xml:\"urn:vim25 StartRecording_Task,omitempty\"`\n\tRes    *types.StartRecording_TaskResponse `xml:\"urn:vim25 StartRecording_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *StartRecording_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc StartRecording_Task(ctx context.Context, r soap.RoundTripper, req *types.StartRecording_Task) (*types.StartRecording_TaskResponse, error) {\n\tvar reqBody, resBody StartRecording_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype StartReplaying_TaskBody struct {\n\tReq    *types.StartReplaying_Task         `xml:\"urn:vim25 StartReplaying_Task,omitempty\"`\n\tRes    *types.StartReplaying_TaskResponse `xml:\"urn:vim25 StartReplaying_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *StartReplaying_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc StartReplaying_Task(ctx context.Context, r soap.RoundTripper, req *types.StartReplaying_Task) (*types.StartReplaying_TaskResponse, error) {\n\tvar reqBody, resBody StartReplaying_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype StartServiceBody struct {\n\tReq    *types.StartService         `xml:\"urn:vim25 StartService,omitempty\"`\n\tRes    *types.StartServiceResponse `xml:\"urn:vim25 StartServiceResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *StartServiceBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc StartService(ctx context.Context, r soap.RoundTripper, req *types.StartService) (*types.StartServiceResponse, error) {\n\tvar reqBody, resBody StartServiceBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype StopRecording_TaskBody struct {\n\tReq    *types.StopRecording_Task         `xml:\"urn:vim25 StopRecording_Task,omitempty\"`\n\tRes    *types.StopRecording_TaskResponse `xml:\"urn:vim25 StopRecording_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *StopRecording_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc StopRecording_Task(ctx context.Context, r soap.RoundTripper, req *types.StopRecording_Task) (*types.StopRecording_TaskResponse, error) {\n\tvar reqBody, resBody StopRecording_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype StopReplaying_TaskBody struct {\n\tReq    *types.StopReplaying_Task         `xml:\"urn:vim25 StopReplaying_Task,omitempty\"`\n\tRes    *types.StopReplaying_TaskResponse `xml:\"urn:vim25 StopReplaying_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *StopReplaying_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc StopReplaying_Task(ctx context.Context, r soap.RoundTripper, req *types.StopReplaying_Task) (*types.StopReplaying_TaskResponse, error) {\n\tvar reqBody, resBody StopReplaying_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype StopServiceBody struct {\n\tReq    *types.StopService         `xml:\"urn:vim25 StopService,omitempty\"`\n\tRes    *types.StopServiceResponse `xml:\"urn:vim25 StopServiceResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *StopServiceBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc StopService(ctx context.Context, r soap.RoundTripper, req *types.StopService) (*types.StopServiceResponse, error) {\n\tvar reqBody, resBody StopServiceBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SuspendVApp_TaskBody struct {\n\tReq    *types.SuspendVApp_Task         `xml:\"urn:vim25 SuspendVApp_Task,omitempty\"`\n\tRes    *types.SuspendVApp_TaskResponse `xml:\"urn:vim25 SuspendVApp_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SuspendVApp_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SuspendVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.SuspendVApp_Task) (*types.SuspendVApp_TaskResponse, error) {\n\tvar reqBody, resBody SuspendVApp_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype SuspendVM_TaskBody struct {\n\tReq    *types.SuspendVM_Task         `xml:\"urn:vim25 SuspendVM_Task,omitempty\"`\n\tRes    *types.SuspendVM_TaskResponse `xml:\"urn:vim25 SuspendVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *SuspendVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc SuspendVM_Task(ctx context.Context, r soap.RoundTripper, req *types.SuspendVM_Task) (*types.SuspendVM_TaskResponse, error) {\n\tvar reqBody, resBody SuspendVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype TerminateFaultTolerantVM_TaskBody struct {\n\tReq    *types.TerminateFaultTolerantVM_Task         `xml:\"urn:vim25 TerminateFaultTolerantVM_Task,omitempty\"`\n\tRes    *types.TerminateFaultTolerantVM_TaskResponse `xml:\"urn:vim25 TerminateFaultTolerantVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *TerminateFaultTolerantVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc TerminateFaultTolerantVM_Task(ctx context.Context, r soap.RoundTripper, req *types.TerminateFaultTolerantVM_Task) (*types.TerminateFaultTolerantVM_TaskResponse, error) {\n\tvar reqBody, resBody TerminateFaultTolerantVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype TerminateProcessInGuestBody struct {\n\tReq    *types.TerminateProcessInGuest         `xml:\"urn:vim25 TerminateProcessInGuest,omitempty\"`\n\tRes    *types.TerminateProcessInGuestResponse `xml:\"urn:vim25 TerminateProcessInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *TerminateProcessInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc TerminateProcessInGuest(ctx context.Context, r soap.RoundTripper, req *types.TerminateProcessInGuest) (*types.TerminateProcessInGuestResponse, error) {\n\tvar reqBody, resBody TerminateProcessInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype TerminateSessionBody struct {\n\tReq    *types.TerminateSession         `xml:\"urn:vim25 TerminateSession,omitempty\"`\n\tRes    *types.TerminateSessionResponse `xml:\"urn:vim25 TerminateSessionResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *TerminateSessionBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc TerminateSession(ctx context.Context, r soap.RoundTripper, req *types.TerminateSession) (*types.TerminateSessionResponse, error) {\n\tvar reqBody, resBody TerminateSessionBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype TerminateVMBody struct {\n\tReq    *types.TerminateVM         `xml:\"urn:vim25 TerminateVM,omitempty\"`\n\tRes    *types.TerminateVMResponse `xml:\"urn:vim25 TerminateVMResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *TerminateVMBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc TerminateVM(ctx context.Context, r soap.RoundTripper, req *types.TerminateVM) (*types.TerminateVMResponse, error) {\n\tvar reqBody, resBody TerminateVMBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype TurnDiskLocatorLedOff_TaskBody struct {\n\tReq    *types.TurnDiskLocatorLedOff_Task         `xml:\"urn:vim25 TurnDiskLocatorLedOff_Task,omitempty\"`\n\tRes    *types.TurnDiskLocatorLedOff_TaskResponse `xml:\"urn:vim25 TurnDiskLocatorLedOff_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *TurnDiskLocatorLedOff_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc TurnDiskLocatorLedOff_Task(ctx context.Context, r soap.RoundTripper, req *types.TurnDiskLocatorLedOff_Task) (*types.TurnDiskLocatorLedOff_TaskResponse, error) {\n\tvar reqBody, resBody TurnDiskLocatorLedOff_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype TurnDiskLocatorLedOn_TaskBody struct {\n\tReq    *types.TurnDiskLocatorLedOn_Task         `xml:\"urn:vim25 TurnDiskLocatorLedOn_Task,omitempty\"`\n\tRes    *types.TurnDiskLocatorLedOn_TaskResponse `xml:\"urn:vim25 TurnDiskLocatorLedOn_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *TurnDiskLocatorLedOn_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc TurnDiskLocatorLedOn_Task(ctx context.Context, r soap.RoundTripper, req *types.TurnDiskLocatorLedOn_Task) (*types.TurnDiskLocatorLedOn_TaskResponse, error) {\n\tvar reqBody, resBody TurnDiskLocatorLedOn_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype TurnOffFaultToleranceForVM_TaskBody struct {\n\tReq    *types.TurnOffFaultToleranceForVM_Task         `xml:\"urn:vim25 TurnOffFaultToleranceForVM_Task,omitempty\"`\n\tRes    *types.TurnOffFaultToleranceForVM_TaskResponse `xml:\"urn:vim25 TurnOffFaultToleranceForVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *TurnOffFaultToleranceForVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc TurnOffFaultToleranceForVM_Task(ctx context.Context, r soap.RoundTripper, req *types.TurnOffFaultToleranceForVM_Task) (*types.TurnOffFaultToleranceForVM_TaskResponse, error) {\n\tvar reqBody, resBody TurnOffFaultToleranceForVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UnassignUserFromGroupBody struct {\n\tReq    *types.UnassignUserFromGroup         `xml:\"urn:vim25 UnassignUserFromGroup,omitempty\"`\n\tRes    *types.UnassignUserFromGroupResponse `xml:\"urn:vim25 UnassignUserFromGroupResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UnassignUserFromGroupBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UnassignUserFromGroup(ctx context.Context, r soap.RoundTripper, req *types.UnassignUserFromGroup) (*types.UnassignUserFromGroupResponse, error) {\n\tvar reqBody, resBody UnassignUserFromGroupBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UnbindVnicBody struct {\n\tReq    *types.UnbindVnic         `xml:\"urn:vim25 UnbindVnic,omitempty\"`\n\tRes    *types.UnbindVnicResponse `xml:\"urn:vim25 UnbindVnicResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UnbindVnicBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UnbindVnic(ctx context.Context, r soap.RoundTripper, req *types.UnbindVnic) (*types.UnbindVnicResponse, error) {\n\tvar reqBody, resBody UnbindVnicBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UninstallHostPatch_TaskBody struct {\n\tReq    *types.UninstallHostPatch_Task         `xml:\"urn:vim25 UninstallHostPatch_Task,omitempty\"`\n\tRes    *types.UninstallHostPatch_TaskResponse `xml:\"urn:vim25 UninstallHostPatch_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UninstallHostPatch_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UninstallHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.UninstallHostPatch_Task) (*types.UninstallHostPatch_TaskResponse, error) {\n\tvar reqBody, resBody UninstallHostPatch_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UninstallIoFilter_TaskBody struct {\n\tReq    *types.UninstallIoFilter_Task         `xml:\"urn:vim25 UninstallIoFilter_Task,omitempty\"`\n\tRes    *types.UninstallIoFilter_TaskResponse `xml:\"urn:vim25 UninstallIoFilter_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UninstallIoFilter_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UninstallIoFilter_Task(ctx context.Context, r soap.RoundTripper, req *types.UninstallIoFilter_Task) (*types.UninstallIoFilter_TaskResponse, error) {\n\tvar reqBody, resBody UninstallIoFilter_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UninstallServiceBody struct {\n\tReq    *types.UninstallService         `xml:\"urn:vim25 UninstallService,omitempty\"`\n\tRes    *types.UninstallServiceResponse `xml:\"urn:vim25 UninstallServiceResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UninstallServiceBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UninstallService(ctx context.Context, r soap.RoundTripper, req *types.UninstallService) (*types.UninstallServiceResponse, error) {\n\tvar reqBody, resBody UninstallServiceBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UnmapVmfsVolumeEx_TaskBody struct {\n\tReq    *types.UnmapVmfsVolumeEx_Task         `xml:\"urn:vim25 UnmapVmfsVolumeEx_Task,omitempty\"`\n\tRes    *types.UnmapVmfsVolumeEx_TaskResponse `xml:\"urn:vim25 UnmapVmfsVolumeEx_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UnmapVmfsVolumeEx_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UnmapVmfsVolumeEx_Task(ctx context.Context, r soap.RoundTripper, req *types.UnmapVmfsVolumeEx_Task) (*types.UnmapVmfsVolumeEx_TaskResponse, error) {\n\tvar reqBody, resBody UnmapVmfsVolumeEx_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UnmountDiskMapping_TaskBody struct {\n\tReq    *types.UnmountDiskMapping_Task         `xml:\"urn:vim25 UnmountDiskMapping_Task,omitempty\"`\n\tRes    *types.UnmountDiskMapping_TaskResponse `xml:\"urn:vim25 UnmountDiskMapping_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UnmountDiskMapping_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UnmountDiskMapping_Task(ctx context.Context, r soap.RoundTripper, req *types.UnmountDiskMapping_Task) (*types.UnmountDiskMapping_TaskResponse, error) {\n\tvar reqBody, resBody UnmountDiskMapping_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UnmountForceMountedVmfsVolumeBody struct {\n\tReq    *types.UnmountForceMountedVmfsVolume         `xml:\"urn:vim25 UnmountForceMountedVmfsVolume,omitempty\"`\n\tRes    *types.UnmountForceMountedVmfsVolumeResponse `xml:\"urn:vim25 UnmountForceMountedVmfsVolumeResponse,omitempty\"`\n\tFault_ *soap.Fault                                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UnmountForceMountedVmfsVolumeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UnmountForceMountedVmfsVolume(ctx context.Context, r soap.RoundTripper, req *types.UnmountForceMountedVmfsVolume) (*types.UnmountForceMountedVmfsVolumeResponse, error) {\n\tvar reqBody, resBody UnmountForceMountedVmfsVolumeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UnmountToolsInstallerBody struct {\n\tReq    *types.UnmountToolsInstaller         `xml:\"urn:vim25 UnmountToolsInstaller,omitempty\"`\n\tRes    *types.UnmountToolsInstallerResponse `xml:\"urn:vim25 UnmountToolsInstallerResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UnmountToolsInstallerBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UnmountToolsInstaller(ctx context.Context, r soap.RoundTripper, req *types.UnmountToolsInstaller) (*types.UnmountToolsInstallerResponse, error) {\n\tvar reqBody, resBody UnmountToolsInstallerBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UnmountVffsVolumeBody struct {\n\tReq    *types.UnmountVffsVolume         `xml:\"urn:vim25 UnmountVffsVolume,omitempty\"`\n\tRes    *types.UnmountVffsVolumeResponse `xml:\"urn:vim25 UnmountVffsVolumeResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UnmountVffsVolumeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UnmountVffsVolume(ctx context.Context, r soap.RoundTripper, req *types.UnmountVffsVolume) (*types.UnmountVffsVolumeResponse, error) {\n\tvar reqBody, resBody UnmountVffsVolumeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UnmountVmfsVolumeBody struct {\n\tReq    *types.UnmountVmfsVolume         `xml:\"urn:vim25 UnmountVmfsVolume,omitempty\"`\n\tRes    *types.UnmountVmfsVolumeResponse `xml:\"urn:vim25 UnmountVmfsVolumeResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UnmountVmfsVolumeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UnmountVmfsVolume(ctx context.Context, r soap.RoundTripper, req *types.UnmountVmfsVolume) (*types.UnmountVmfsVolumeResponse, error) {\n\tvar reqBody, resBody UnmountVmfsVolumeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UnmountVmfsVolumeEx_TaskBody struct {\n\tReq    *types.UnmountVmfsVolumeEx_Task         `xml:\"urn:vim25 UnmountVmfsVolumeEx_Task,omitempty\"`\n\tRes    *types.UnmountVmfsVolumeEx_TaskResponse `xml:\"urn:vim25 UnmountVmfsVolumeEx_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UnmountVmfsVolumeEx_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UnmountVmfsVolumeEx_Task(ctx context.Context, r soap.RoundTripper, req *types.UnmountVmfsVolumeEx_Task) (*types.UnmountVmfsVolumeEx_TaskResponse, error) {\n\tvar reqBody, resBody UnmountVmfsVolumeEx_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UnregisterAndDestroy_TaskBody struct {\n\tReq    *types.UnregisterAndDestroy_Task         `xml:\"urn:vim25 UnregisterAndDestroy_Task,omitempty\"`\n\tRes    *types.UnregisterAndDestroy_TaskResponse `xml:\"urn:vim25 UnregisterAndDestroy_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UnregisterAndDestroy_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UnregisterAndDestroy_Task(ctx context.Context, r soap.RoundTripper, req *types.UnregisterAndDestroy_Task) (*types.UnregisterAndDestroy_TaskResponse, error) {\n\tvar reqBody, resBody UnregisterAndDestroy_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UnregisterExtensionBody struct {\n\tReq    *types.UnregisterExtension         `xml:\"urn:vim25 UnregisterExtension,omitempty\"`\n\tRes    *types.UnregisterExtensionResponse `xml:\"urn:vim25 UnregisterExtensionResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UnregisterExtensionBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UnregisterExtension(ctx context.Context, r soap.RoundTripper, req *types.UnregisterExtension) (*types.UnregisterExtensionResponse, error) {\n\tvar reqBody, resBody UnregisterExtensionBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UnregisterHealthUpdateProviderBody struct {\n\tReq    *types.UnregisterHealthUpdateProvider         `xml:\"urn:vim25 UnregisterHealthUpdateProvider,omitempty\"`\n\tRes    *types.UnregisterHealthUpdateProviderResponse `xml:\"urn:vim25 UnregisterHealthUpdateProviderResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UnregisterHealthUpdateProviderBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UnregisterHealthUpdateProvider(ctx context.Context, r soap.RoundTripper, req *types.UnregisterHealthUpdateProvider) (*types.UnregisterHealthUpdateProviderResponse, error) {\n\tvar reqBody, resBody UnregisterHealthUpdateProviderBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UnregisterVMBody struct {\n\tReq    *types.UnregisterVM         `xml:\"urn:vim25 UnregisterVM,omitempty\"`\n\tRes    *types.UnregisterVMResponse `xml:\"urn:vim25 UnregisterVMResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UnregisterVMBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UnregisterVM(ctx context.Context, r soap.RoundTripper, req *types.UnregisterVM) (*types.UnregisterVMResponse, error) {\n\tvar reqBody, resBody UnregisterVMBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateAnswerFile_TaskBody struct {\n\tReq    *types.UpdateAnswerFile_Task         `xml:\"urn:vim25 UpdateAnswerFile_Task,omitempty\"`\n\tRes    *types.UpdateAnswerFile_TaskResponse `xml:\"urn:vim25 UpdateAnswerFile_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateAnswerFile_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateAnswerFile_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateAnswerFile_Task) (*types.UpdateAnswerFile_TaskResponse, error) {\n\tvar reqBody, resBody UpdateAnswerFile_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateAssignedLicenseBody struct {\n\tReq    *types.UpdateAssignedLicense         `xml:\"urn:vim25 UpdateAssignedLicense,omitempty\"`\n\tRes    *types.UpdateAssignedLicenseResponse `xml:\"urn:vim25 UpdateAssignedLicenseResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateAssignedLicenseBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateAssignedLicense(ctx context.Context, r soap.RoundTripper, req *types.UpdateAssignedLicense) (*types.UpdateAssignedLicenseResponse, error) {\n\tvar reqBody, resBody UpdateAssignedLicenseBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateAuthorizationRoleBody struct {\n\tReq    *types.UpdateAuthorizationRole         `xml:\"urn:vim25 UpdateAuthorizationRole,omitempty\"`\n\tRes    *types.UpdateAuthorizationRoleResponse `xml:\"urn:vim25 UpdateAuthorizationRoleResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateAuthorizationRoleBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateAuthorizationRole(ctx context.Context, r soap.RoundTripper, req *types.UpdateAuthorizationRole) (*types.UpdateAuthorizationRoleResponse, error) {\n\tvar reqBody, resBody UpdateAuthorizationRoleBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateBootDeviceBody struct {\n\tReq    *types.UpdateBootDevice         `xml:\"urn:vim25 UpdateBootDevice,omitempty\"`\n\tRes    *types.UpdateBootDeviceResponse `xml:\"urn:vim25 UpdateBootDeviceResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateBootDeviceBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateBootDevice(ctx context.Context, r soap.RoundTripper, req *types.UpdateBootDevice) (*types.UpdateBootDeviceResponse, error) {\n\tvar reqBody, resBody UpdateBootDeviceBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateChildResourceConfigurationBody struct {\n\tReq    *types.UpdateChildResourceConfiguration         `xml:\"urn:vim25 UpdateChildResourceConfiguration,omitempty\"`\n\tRes    *types.UpdateChildResourceConfigurationResponse `xml:\"urn:vim25 UpdateChildResourceConfigurationResponse,omitempty\"`\n\tFault_ *soap.Fault                                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateChildResourceConfigurationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateChildResourceConfiguration(ctx context.Context, r soap.RoundTripper, req *types.UpdateChildResourceConfiguration) (*types.UpdateChildResourceConfigurationResponse, error) {\n\tvar reqBody, resBody UpdateChildResourceConfigurationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateClusterProfileBody struct {\n\tReq    *types.UpdateClusterProfile         `xml:\"urn:vim25 UpdateClusterProfile,omitempty\"`\n\tRes    *types.UpdateClusterProfileResponse `xml:\"urn:vim25 UpdateClusterProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateClusterProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateClusterProfile(ctx context.Context, r soap.RoundTripper, req *types.UpdateClusterProfile) (*types.UpdateClusterProfileResponse, error) {\n\tvar reqBody, resBody UpdateClusterProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateConfigBody struct {\n\tReq    *types.UpdateConfig         `xml:\"urn:vim25 UpdateConfig,omitempty\"`\n\tRes    *types.UpdateConfigResponse `xml:\"urn:vim25 UpdateConfigResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateConfigBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateConfig) (*types.UpdateConfigResponse, error) {\n\tvar reqBody, resBody UpdateConfigBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateConsoleIpRouteConfigBody struct {\n\tReq    *types.UpdateConsoleIpRouteConfig         `xml:\"urn:vim25 UpdateConsoleIpRouteConfig,omitempty\"`\n\tRes    *types.UpdateConsoleIpRouteConfigResponse `xml:\"urn:vim25 UpdateConsoleIpRouteConfigResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateConsoleIpRouteConfigBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateConsoleIpRouteConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateConsoleIpRouteConfig) (*types.UpdateConsoleIpRouteConfigResponse, error) {\n\tvar reqBody, resBody UpdateConsoleIpRouteConfigBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateCounterLevelMappingBody struct {\n\tReq    *types.UpdateCounterLevelMapping         `xml:\"urn:vim25 UpdateCounterLevelMapping,omitempty\"`\n\tRes    *types.UpdateCounterLevelMappingResponse `xml:\"urn:vim25 UpdateCounterLevelMappingResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateCounterLevelMappingBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateCounterLevelMapping(ctx context.Context, r soap.RoundTripper, req *types.UpdateCounterLevelMapping) (*types.UpdateCounterLevelMappingResponse, error) {\n\tvar reqBody, resBody UpdateCounterLevelMappingBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateDVSHealthCheckConfig_TaskBody struct {\n\tReq    *types.UpdateDVSHealthCheckConfig_Task         `xml:\"urn:vim25 UpdateDVSHealthCheckConfig_Task,omitempty\"`\n\tRes    *types.UpdateDVSHealthCheckConfig_TaskResponse `xml:\"urn:vim25 UpdateDVSHealthCheckConfig_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateDVSHealthCheckConfig_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateDVSHealthCheckConfig_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateDVSHealthCheckConfig_Task) (*types.UpdateDVSHealthCheckConfig_TaskResponse, error) {\n\tvar reqBody, resBody UpdateDVSHealthCheckConfig_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateDVSLacpGroupConfig_TaskBody struct {\n\tReq    *types.UpdateDVSLacpGroupConfig_Task         `xml:\"urn:vim25 UpdateDVSLacpGroupConfig_Task,omitempty\"`\n\tRes    *types.UpdateDVSLacpGroupConfig_TaskResponse `xml:\"urn:vim25 UpdateDVSLacpGroupConfig_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateDVSLacpGroupConfig_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateDVSLacpGroupConfig_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateDVSLacpGroupConfig_Task) (*types.UpdateDVSLacpGroupConfig_TaskResponse, error) {\n\tvar reqBody, resBody UpdateDVSLacpGroupConfig_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateDateTimeBody struct {\n\tReq    *types.UpdateDateTime         `xml:\"urn:vim25 UpdateDateTime,omitempty\"`\n\tRes    *types.UpdateDateTimeResponse `xml:\"urn:vim25 UpdateDateTimeResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateDateTimeBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateDateTime(ctx context.Context, r soap.RoundTripper, req *types.UpdateDateTime) (*types.UpdateDateTimeResponse, error) {\n\tvar reqBody, resBody UpdateDateTimeBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateDateTimeConfigBody struct {\n\tReq    *types.UpdateDateTimeConfig         `xml:\"urn:vim25 UpdateDateTimeConfig,omitempty\"`\n\tRes    *types.UpdateDateTimeConfigResponse `xml:\"urn:vim25 UpdateDateTimeConfigResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateDateTimeConfigBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateDateTimeConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateDateTimeConfig) (*types.UpdateDateTimeConfigResponse, error) {\n\tvar reqBody, resBody UpdateDateTimeConfigBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateDefaultPolicyBody struct {\n\tReq    *types.UpdateDefaultPolicy         `xml:\"urn:vim25 UpdateDefaultPolicy,omitempty\"`\n\tRes    *types.UpdateDefaultPolicyResponse `xml:\"urn:vim25 UpdateDefaultPolicyResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateDefaultPolicyBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateDefaultPolicy(ctx context.Context, r soap.RoundTripper, req *types.UpdateDefaultPolicy) (*types.UpdateDefaultPolicyResponse, error) {\n\tvar reqBody, resBody UpdateDefaultPolicyBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateDiskPartitionsBody struct {\n\tReq    *types.UpdateDiskPartitions         `xml:\"urn:vim25 UpdateDiskPartitions,omitempty\"`\n\tRes    *types.UpdateDiskPartitionsResponse `xml:\"urn:vim25 UpdateDiskPartitionsResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateDiskPartitionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateDiskPartitions(ctx context.Context, r soap.RoundTripper, req *types.UpdateDiskPartitions) (*types.UpdateDiskPartitionsResponse, error) {\n\tvar reqBody, resBody UpdateDiskPartitionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateDnsConfigBody struct {\n\tReq    *types.UpdateDnsConfig         `xml:\"urn:vim25 UpdateDnsConfig,omitempty\"`\n\tRes    *types.UpdateDnsConfigResponse `xml:\"urn:vim25 UpdateDnsConfigResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateDnsConfigBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateDnsConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateDnsConfig) (*types.UpdateDnsConfigResponse, error) {\n\tvar reqBody, resBody UpdateDnsConfigBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateDvsCapabilityBody struct {\n\tReq    *types.UpdateDvsCapability         `xml:\"urn:vim25 UpdateDvsCapability,omitempty\"`\n\tRes    *types.UpdateDvsCapabilityResponse `xml:\"urn:vim25 UpdateDvsCapabilityResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateDvsCapabilityBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateDvsCapability(ctx context.Context, r soap.RoundTripper, req *types.UpdateDvsCapability) (*types.UpdateDvsCapabilityResponse, error) {\n\tvar reqBody, resBody UpdateDvsCapabilityBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateExtensionBody struct {\n\tReq    *types.UpdateExtension         `xml:\"urn:vim25 UpdateExtension,omitempty\"`\n\tRes    *types.UpdateExtensionResponse `xml:\"urn:vim25 UpdateExtensionResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateExtensionBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateExtension(ctx context.Context, r soap.RoundTripper, req *types.UpdateExtension) (*types.UpdateExtensionResponse, error) {\n\tvar reqBody, resBody UpdateExtensionBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateFlagsBody struct {\n\tReq    *types.UpdateFlags         `xml:\"urn:vim25 UpdateFlags,omitempty\"`\n\tRes    *types.UpdateFlagsResponse `xml:\"urn:vim25 UpdateFlagsResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateFlagsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateFlags(ctx context.Context, r soap.RoundTripper, req *types.UpdateFlags) (*types.UpdateFlagsResponse, error) {\n\tvar reqBody, resBody UpdateFlagsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateGraphicsConfigBody struct {\n\tReq    *types.UpdateGraphicsConfig         `xml:\"urn:vim25 UpdateGraphicsConfig,omitempty\"`\n\tRes    *types.UpdateGraphicsConfigResponse `xml:\"urn:vim25 UpdateGraphicsConfigResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateGraphicsConfigBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateGraphicsConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateGraphicsConfig) (*types.UpdateGraphicsConfigResponse, error) {\n\tvar reqBody, resBody UpdateGraphicsConfigBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateHostCustomizations_TaskBody struct {\n\tReq    *types.UpdateHostCustomizations_Task         `xml:\"urn:vim25 UpdateHostCustomizations_Task,omitempty\"`\n\tRes    *types.UpdateHostCustomizations_TaskResponse `xml:\"urn:vim25 UpdateHostCustomizations_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateHostCustomizations_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateHostCustomizations_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateHostCustomizations_Task) (*types.UpdateHostCustomizations_TaskResponse, error) {\n\tvar reqBody, resBody UpdateHostCustomizations_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateHostImageAcceptanceLevelBody struct {\n\tReq    *types.UpdateHostImageAcceptanceLevel         `xml:\"urn:vim25 UpdateHostImageAcceptanceLevel,omitempty\"`\n\tRes    *types.UpdateHostImageAcceptanceLevelResponse `xml:\"urn:vim25 UpdateHostImageAcceptanceLevelResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateHostImageAcceptanceLevelBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateHostImageAcceptanceLevel(ctx context.Context, r soap.RoundTripper, req *types.UpdateHostImageAcceptanceLevel) (*types.UpdateHostImageAcceptanceLevelResponse, error) {\n\tvar reqBody, resBody UpdateHostImageAcceptanceLevelBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateHostProfileBody struct {\n\tReq    *types.UpdateHostProfile         `xml:\"urn:vim25 UpdateHostProfile,omitempty\"`\n\tRes    *types.UpdateHostProfileResponse `xml:\"urn:vim25 UpdateHostProfileResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateHostProfileBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateHostProfile(ctx context.Context, r soap.RoundTripper, req *types.UpdateHostProfile) (*types.UpdateHostProfileResponse, error) {\n\tvar reqBody, resBody UpdateHostProfileBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateHostSpecificationBody struct {\n\tReq    *types.UpdateHostSpecification         `xml:\"urn:vim25 UpdateHostSpecification,omitempty\"`\n\tRes    *types.UpdateHostSpecificationResponse `xml:\"urn:vim25 UpdateHostSpecificationResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateHostSpecificationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateHostSpecification(ctx context.Context, r soap.RoundTripper, req *types.UpdateHostSpecification) (*types.UpdateHostSpecificationResponse, error) {\n\tvar reqBody, resBody UpdateHostSpecificationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateHostSubSpecificationBody struct {\n\tReq    *types.UpdateHostSubSpecification         `xml:\"urn:vim25 UpdateHostSubSpecification,omitempty\"`\n\tRes    *types.UpdateHostSubSpecificationResponse `xml:\"urn:vim25 UpdateHostSubSpecificationResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateHostSubSpecificationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateHostSubSpecification(ctx context.Context, r soap.RoundTripper, req *types.UpdateHostSubSpecification) (*types.UpdateHostSubSpecificationResponse, error) {\n\tvar reqBody, resBody UpdateHostSubSpecificationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateInternetScsiAdvancedOptionsBody struct {\n\tReq    *types.UpdateInternetScsiAdvancedOptions         `xml:\"urn:vim25 UpdateInternetScsiAdvancedOptions,omitempty\"`\n\tRes    *types.UpdateInternetScsiAdvancedOptionsResponse `xml:\"urn:vim25 UpdateInternetScsiAdvancedOptionsResponse,omitempty\"`\n\tFault_ *soap.Fault                                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateInternetScsiAdvancedOptionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateInternetScsiAdvancedOptions(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiAdvancedOptions) (*types.UpdateInternetScsiAdvancedOptionsResponse, error) {\n\tvar reqBody, resBody UpdateInternetScsiAdvancedOptionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateInternetScsiAliasBody struct {\n\tReq    *types.UpdateInternetScsiAlias         `xml:\"urn:vim25 UpdateInternetScsiAlias,omitempty\"`\n\tRes    *types.UpdateInternetScsiAliasResponse `xml:\"urn:vim25 UpdateInternetScsiAliasResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateInternetScsiAliasBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateInternetScsiAlias(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiAlias) (*types.UpdateInternetScsiAliasResponse, error) {\n\tvar reqBody, resBody UpdateInternetScsiAliasBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateInternetScsiAuthenticationPropertiesBody struct {\n\tReq    *types.UpdateInternetScsiAuthenticationProperties         `xml:\"urn:vim25 UpdateInternetScsiAuthenticationProperties,omitempty\"`\n\tRes    *types.UpdateInternetScsiAuthenticationPropertiesResponse `xml:\"urn:vim25 UpdateInternetScsiAuthenticationPropertiesResponse,omitempty\"`\n\tFault_ *soap.Fault                                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateInternetScsiAuthenticationPropertiesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateInternetScsiAuthenticationProperties(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiAuthenticationProperties) (*types.UpdateInternetScsiAuthenticationPropertiesResponse, error) {\n\tvar reqBody, resBody UpdateInternetScsiAuthenticationPropertiesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateInternetScsiDigestPropertiesBody struct {\n\tReq    *types.UpdateInternetScsiDigestProperties         `xml:\"urn:vim25 UpdateInternetScsiDigestProperties,omitempty\"`\n\tRes    *types.UpdateInternetScsiDigestPropertiesResponse `xml:\"urn:vim25 UpdateInternetScsiDigestPropertiesResponse,omitempty\"`\n\tFault_ *soap.Fault                                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateInternetScsiDigestPropertiesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateInternetScsiDigestProperties(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiDigestProperties) (*types.UpdateInternetScsiDigestPropertiesResponse, error) {\n\tvar reqBody, resBody UpdateInternetScsiDigestPropertiesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateInternetScsiDiscoveryPropertiesBody struct {\n\tReq    *types.UpdateInternetScsiDiscoveryProperties         `xml:\"urn:vim25 UpdateInternetScsiDiscoveryProperties,omitempty\"`\n\tRes    *types.UpdateInternetScsiDiscoveryPropertiesResponse `xml:\"urn:vim25 UpdateInternetScsiDiscoveryPropertiesResponse,omitempty\"`\n\tFault_ *soap.Fault                                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateInternetScsiDiscoveryPropertiesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateInternetScsiDiscoveryProperties(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiDiscoveryProperties) (*types.UpdateInternetScsiDiscoveryPropertiesResponse, error) {\n\tvar reqBody, resBody UpdateInternetScsiDiscoveryPropertiesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateInternetScsiIPPropertiesBody struct {\n\tReq    *types.UpdateInternetScsiIPProperties         `xml:\"urn:vim25 UpdateInternetScsiIPProperties,omitempty\"`\n\tRes    *types.UpdateInternetScsiIPPropertiesResponse `xml:\"urn:vim25 UpdateInternetScsiIPPropertiesResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateInternetScsiIPPropertiesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateInternetScsiIPProperties(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiIPProperties) (*types.UpdateInternetScsiIPPropertiesResponse, error) {\n\tvar reqBody, resBody UpdateInternetScsiIPPropertiesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateInternetScsiNameBody struct {\n\tReq    *types.UpdateInternetScsiName         `xml:\"urn:vim25 UpdateInternetScsiName,omitempty\"`\n\tRes    *types.UpdateInternetScsiNameResponse `xml:\"urn:vim25 UpdateInternetScsiNameResponse,omitempty\"`\n\tFault_ *soap.Fault                           `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateInternetScsiNameBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateInternetScsiName(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiName) (*types.UpdateInternetScsiNameResponse, error) {\n\tvar reqBody, resBody UpdateInternetScsiNameBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateIpConfigBody struct {\n\tReq    *types.UpdateIpConfig         `xml:\"urn:vim25 UpdateIpConfig,omitempty\"`\n\tRes    *types.UpdateIpConfigResponse `xml:\"urn:vim25 UpdateIpConfigResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateIpConfigBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateIpConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateIpConfig) (*types.UpdateIpConfigResponse, error) {\n\tvar reqBody, resBody UpdateIpConfigBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateIpPoolBody struct {\n\tReq    *types.UpdateIpPool         `xml:\"urn:vim25 UpdateIpPool,omitempty\"`\n\tRes    *types.UpdateIpPoolResponse `xml:\"urn:vim25 UpdateIpPoolResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateIpPoolBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateIpPool(ctx context.Context, r soap.RoundTripper, req *types.UpdateIpPool) (*types.UpdateIpPoolResponse, error) {\n\tvar reqBody, resBody UpdateIpPoolBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateIpRouteConfigBody struct {\n\tReq    *types.UpdateIpRouteConfig         `xml:\"urn:vim25 UpdateIpRouteConfig,omitempty\"`\n\tRes    *types.UpdateIpRouteConfigResponse `xml:\"urn:vim25 UpdateIpRouteConfigResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateIpRouteConfigBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateIpRouteConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateIpRouteConfig) (*types.UpdateIpRouteConfigResponse, error) {\n\tvar reqBody, resBody UpdateIpRouteConfigBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateIpRouteTableConfigBody struct {\n\tReq    *types.UpdateIpRouteTableConfig         `xml:\"urn:vim25 UpdateIpRouteTableConfig,omitempty\"`\n\tRes    *types.UpdateIpRouteTableConfigResponse `xml:\"urn:vim25 UpdateIpRouteTableConfigResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateIpRouteTableConfigBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateIpRouteTableConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateIpRouteTableConfig) (*types.UpdateIpRouteTableConfigResponse, error) {\n\tvar reqBody, resBody UpdateIpRouteTableConfigBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateIpmiBody struct {\n\tReq    *types.UpdateIpmi         `xml:\"urn:vim25 UpdateIpmi,omitempty\"`\n\tRes    *types.UpdateIpmiResponse `xml:\"urn:vim25 UpdateIpmiResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateIpmiBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateIpmi(ctx context.Context, r soap.RoundTripper, req *types.UpdateIpmi) (*types.UpdateIpmiResponse, error) {\n\tvar reqBody, resBody UpdateIpmiBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateKmipServerBody struct {\n\tReq    *types.UpdateKmipServer         `xml:\"urn:vim25 UpdateKmipServer,omitempty\"`\n\tRes    *types.UpdateKmipServerResponse `xml:\"urn:vim25 UpdateKmipServerResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateKmipServerBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateKmipServer(ctx context.Context, r soap.RoundTripper, req *types.UpdateKmipServer) (*types.UpdateKmipServerResponse, error) {\n\tvar reqBody, resBody UpdateKmipServerBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateKmsSignedCsrClientCertBody struct {\n\tReq    *types.UpdateKmsSignedCsrClientCert         `xml:\"urn:vim25 UpdateKmsSignedCsrClientCert,omitempty\"`\n\tRes    *types.UpdateKmsSignedCsrClientCertResponse `xml:\"urn:vim25 UpdateKmsSignedCsrClientCertResponse,omitempty\"`\n\tFault_ *soap.Fault                                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateKmsSignedCsrClientCertBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateKmsSignedCsrClientCert(ctx context.Context, r soap.RoundTripper, req *types.UpdateKmsSignedCsrClientCert) (*types.UpdateKmsSignedCsrClientCertResponse, error) {\n\tvar reqBody, resBody UpdateKmsSignedCsrClientCertBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateLicenseBody struct {\n\tReq    *types.UpdateLicense         `xml:\"urn:vim25 UpdateLicense,omitempty\"`\n\tRes    *types.UpdateLicenseResponse `xml:\"urn:vim25 UpdateLicenseResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateLicenseBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateLicense(ctx context.Context, r soap.RoundTripper, req *types.UpdateLicense) (*types.UpdateLicenseResponse, error) {\n\tvar reqBody, resBody UpdateLicenseBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateLicenseLabelBody struct {\n\tReq    *types.UpdateLicenseLabel         `xml:\"urn:vim25 UpdateLicenseLabel,omitempty\"`\n\tRes    *types.UpdateLicenseLabelResponse `xml:\"urn:vim25 UpdateLicenseLabelResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateLicenseLabelBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateLicenseLabel(ctx context.Context, r soap.RoundTripper, req *types.UpdateLicenseLabel) (*types.UpdateLicenseLabelResponse, error) {\n\tvar reqBody, resBody UpdateLicenseLabelBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateLinkedChildrenBody struct {\n\tReq    *types.UpdateLinkedChildren         `xml:\"urn:vim25 UpdateLinkedChildren,omitempty\"`\n\tRes    *types.UpdateLinkedChildrenResponse `xml:\"urn:vim25 UpdateLinkedChildrenResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateLinkedChildrenBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateLinkedChildren(ctx context.Context, r soap.RoundTripper, req *types.UpdateLinkedChildren) (*types.UpdateLinkedChildrenResponse, error) {\n\tvar reqBody, resBody UpdateLinkedChildrenBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateLocalSwapDatastoreBody struct {\n\tReq    *types.UpdateLocalSwapDatastore         `xml:\"urn:vim25 UpdateLocalSwapDatastore,omitempty\"`\n\tRes    *types.UpdateLocalSwapDatastoreResponse `xml:\"urn:vim25 UpdateLocalSwapDatastoreResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateLocalSwapDatastoreBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateLocalSwapDatastore(ctx context.Context, r soap.RoundTripper, req *types.UpdateLocalSwapDatastore) (*types.UpdateLocalSwapDatastoreResponse, error) {\n\tvar reqBody, resBody UpdateLocalSwapDatastoreBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateLockdownExceptionsBody struct {\n\tReq    *types.UpdateLockdownExceptions         `xml:\"urn:vim25 UpdateLockdownExceptions,omitempty\"`\n\tRes    *types.UpdateLockdownExceptionsResponse `xml:\"urn:vim25 UpdateLockdownExceptionsResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateLockdownExceptionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateLockdownExceptions(ctx context.Context, r soap.RoundTripper, req *types.UpdateLockdownExceptions) (*types.UpdateLockdownExceptionsResponse, error) {\n\tvar reqBody, resBody UpdateLockdownExceptionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateModuleOptionStringBody struct {\n\tReq    *types.UpdateModuleOptionString         `xml:\"urn:vim25 UpdateModuleOptionString,omitempty\"`\n\tRes    *types.UpdateModuleOptionStringResponse `xml:\"urn:vim25 UpdateModuleOptionStringResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateModuleOptionStringBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateModuleOptionString(ctx context.Context, r soap.RoundTripper, req *types.UpdateModuleOptionString) (*types.UpdateModuleOptionStringResponse, error) {\n\tvar reqBody, resBody UpdateModuleOptionStringBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateNetworkConfigBody struct {\n\tReq    *types.UpdateNetworkConfig         `xml:\"urn:vim25 UpdateNetworkConfig,omitempty\"`\n\tRes    *types.UpdateNetworkConfigResponse `xml:\"urn:vim25 UpdateNetworkConfigResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateNetworkConfigBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateNetworkConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateNetworkConfig) (*types.UpdateNetworkConfigResponse, error) {\n\tvar reqBody, resBody UpdateNetworkConfigBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateNetworkResourcePoolBody struct {\n\tReq    *types.UpdateNetworkResourcePool         `xml:\"urn:vim25 UpdateNetworkResourcePool,omitempty\"`\n\tRes    *types.UpdateNetworkResourcePoolResponse `xml:\"urn:vim25 UpdateNetworkResourcePoolResponse,omitempty\"`\n\tFault_ *soap.Fault                              `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateNetworkResourcePoolBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateNetworkResourcePool(ctx context.Context, r soap.RoundTripper, req *types.UpdateNetworkResourcePool) (*types.UpdateNetworkResourcePoolResponse, error) {\n\tvar reqBody, resBody UpdateNetworkResourcePoolBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateOptionsBody struct {\n\tReq    *types.UpdateOptions         `xml:\"urn:vim25 UpdateOptions,omitempty\"`\n\tRes    *types.UpdateOptionsResponse `xml:\"urn:vim25 UpdateOptionsResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateOptionsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateOptions(ctx context.Context, r soap.RoundTripper, req *types.UpdateOptions) (*types.UpdateOptionsResponse, error) {\n\tvar reqBody, resBody UpdateOptionsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdatePassthruConfigBody struct {\n\tReq    *types.UpdatePassthruConfig         `xml:\"urn:vim25 UpdatePassthruConfig,omitempty\"`\n\tRes    *types.UpdatePassthruConfigResponse `xml:\"urn:vim25 UpdatePassthruConfigResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdatePassthruConfigBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdatePassthruConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdatePassthruConfig) (*types.UpdatePassthruConfigResponse, error) {\n\tvar reqBody, resBody UpdatePassthruConfigBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdatePerfIntervalBody struct {\n\tReq    *types.UpdatePerfInterval         `xml:\"urn:vim25 UpdatePerfInterval,omitempty\"`\n\tRes    *types.UpdatePerfIntervalResponse `xml:\"urn:vim25 UpdatePerfIntervalResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdatePerfIntervalBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdatePerfInterval(ctx context.Context, r soap.RoundTripper, req *types.UpdatePerfInterval) (*types.UpdatePerfIntervalResponse, error) {\n\tvar reqBody, resBody UpdatePerfIntervalBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdatePhysicalNicLinkSpeedBody struct {\n\tReq    *types.UpdatePhysicalNicLinkSpeed         `xml:\"urn:vim25 UpdatePhysicalNicLinkSpeed,omitempty\"`\n\tRes    *types.UpdatePhysicalNicLinkSpeedResponse `xml:\"urn:vim25 UpdatePhysicalNicLinkSpeedResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdatePhysicalNicLinkSpeedBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdatePhysicalNicLinkSpeed(ctx context.Context, r soap.RoundTripper, req *types.UpdatePhysicalNicLinkSpeed) (*types.UpdatePhysicalNicLinkSpeedResponse, error) {\n\tvar reqBody, resBody UpdatePhysicalNicLinkSpeedBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdatePortGroupBody struct {\n\tReq    *types.UpdatePortGroup         `xml:\"urn:vim25 UpdatePortGroup,omitempty\"`\n\tRes    *types.UpdatePortGroupResponse `xml:\"urn:vim25 UpdatePortGroupResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdatePortGroupBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdatePortGroup(ctx context.Context, r soap.RoundTripper, req *types.UpdatePortGroup) (*types.UpdatePortGroupResponse, error) {\n\tvar reqBody, resBody UpdatePortGroupBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateProgressBody struct {\n\tReq    *types.UpdateProgress         `xml:\"urn:vim25 UpdateProgress,omitempty\"`\n\tRes    *types.UpdateProgressResponse `xml:\"urn:vim25 UpdateProgressResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateProgressBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateProgress(ctx context.Context, r soap.RoundTripper, req *types.UpdateProgress) (*types.UpdateProgressResponse, error) {\n\tvar reqBody, resBody UpdateProgressBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateReferenceHostBody struct {\n\tReq    *types.UpdateReferenceHost         `xml:\"urn:vim25 UpdateReferenceHost,omitempty\"`\n\tRes    *types.UpdateReferenceHostResponse `xml:\"urn:vim25 UpdateReferenceHostResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateReferenceHostBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateReferenceHost(ctx context.Context, r soap.RoundTripper, req *types.UpdateReferenceHost) (*types.UpdateReferenceHostResponse, error) {\n\tvar reqBody, resBody UpdateReferenceHostBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateRulesetBody struct {\n\tReq    *types.UpdateRuleset         `xml:\"urn:vim25 UpdateRuleset,omitempty\"`\n\tRes    *types.UpdateRulesetResponse `xml:\"urn:vim25 UpdateRulesetResponse,omitempty\"`\n\tFault_ *soap.Fault                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateRulesetBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateRuleset(ctx context.Context, r soap.RoundTripper, req *types.UpdateRuleset) (*types.UpdateRulesetResponse, error) {\n\tvar reqBody, resBody UpdateRulesetBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateScsiLunDisplayNameBody struct {\n\tReq    *types.UpdateScsiLunDisplayName         `xml:\"urn:vim25 UpdateScsiLunDisplayName,omitempty\"`\n\tRes    *types.UpdateScsiLunDisplayNameResponse `xml:\"urn:vim25 UpdateScsiLunDisplayNameResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateScsiLunDisplayNameBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateScsiLunDisplayName(ctx context.Context, r soap.RoundTripper, req *types.UpdateScsiLunDisplayName) (*types.UpdateScsiLunDisplayNameResponse, error) {\n\tvar reqBody, resBody UpdateScsiLunDisplayNameBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateSelfSignedClientCertBody struct {\n\tReq    *types.UpdateSelfSignedClientCert         `xml:\"urn:vim25 UpdateSelfSignedClientCert,omitempty\"`\n\tRes    *types.UpdateSelfSignedClientCertResponse `xml:\"urn:vim25 UpdateSelfSignedClientCertResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateSelfSignedClientCertBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateSelfSignedClientCert(ctx context.Context, r soap.RoundTripper, req *types.UpdateSelfSignedClientCert) (*types.UpdateSelfSignedClientCertResponse, error) {\n\tvar reqBody, resBody UpdateSelfSignedClientCertBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateServiceConsoleVirtualNicBody struct {\n\tReq    *types.UpdateServiceConsoleVirtualNic         `xml:\"urn:vim25 UpdateServiceConsoleVirtualNic,omitempty\"`\n\tRes    *types.UpdateServiceConsoleVirtualNicResponse `xml:\"urn:vim25 UpdateServiceConsoleVirtualNicResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateServiceConsoleVirtualNicBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.UpdateServiceConsoleVirtualNic) (*types.UpdateServiceConsoleVirtualNicResponse, error) {\n\tvar reqBody, resBody UpdateServiceConsoleVirtualNicBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateServiceMessageBody struct {\n\tReq    *types.UpdateServiceMessage         `xml:\"urn:vim25 UpdateServiceMessage,omitempty\"`\n\tRes    *types.UpdateServiceMessageResponse `xml:\"urn:vim25 UpdateServiceMessageResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateServiceMessageBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateServiceMessage(ctx context.Context, r soap.RoundTripper, req *types.UpdateServiceMessage) (*types.UpdateServiceMessageResponse, error) {\n\tvar reqBody, resBody UpdateServiceMessageBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateServicePolicyBody struct {\n\tReq    *types.UpdateServicePolicy         `xml:\"urn:vim25 UpdateServicePolicy,omitempty\"`\n\tRes    *types.UpdateServicePolicyResponse `xml:\"urn:vim25 UpdateServicePolicyResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateServicePolicyBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateServicePolicy(ctx context.Context, r soap.RoundTripper, req *types.UpdateServicePolicy) (*types.UpdateServicePolicyResponse, error) {\n\tvar reqBody, resBody UpdateServicePolicyBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateSoftwareInternetScsiEnabledBody struct {\n\tReq    *types.UpdateSoftwareInternetScsiEnabled         `xml:\"urn:vim25 UpdateSoftwareInternetScsiEnabled,omitempty\"`\n\tRes    *types.UpdateSoftwareInternetScsiEnabledResponse `xml:\"urn:vim25 UpdateSoftwareInternetScsiEnabledResponse,omitempty\"`\n\tFault_ *soap.Fault                                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateSoftwareInternetScsiEnabledBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateSoftwareInternetScsiEnabled(ctx context.Context, r soap.RoundTripper, req *types.UpdateSoftwareInternetScsiEnabled) (*types.UpdateSoftwareInternetScsiEnabledResponse, error) {\n\tvar reqBody, resBody UpdateSoftwareInternetScsiEnabledBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateSystemResourcesBody struct {\n\tReq    *types.UpdateSystemResources         `xml:\"urn:vim25 UpdateSystemResources,omitempty\"`\n\tRes    *types.UpdateSystemResourcesResponse `xml:\"urn:vim25 UpdateSystemResourcesResponse,omitempty\"`\n\tFault_ *soap.Fault                          `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateSystemResourcesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateSystemResources(ctx context.Context, r soap.RoundTripper, req *types.UpdateSystemResources) (*types.UpdateSystemResourcesResponse, error) {\n\tvar reqBody, resBody UpdateSystemResourcesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateSystemSwapConfigurationBody struct {\n\tReq    *types.UpdateSystemSwapConfiguration         `xml:\"urn:vim25 UpdateSystemSwapConfiguration,omitempty\"`\n\tRes    *types.UpdateSystemSwapConfigurationResponse `xml:\"urn:vim25 UpdateSystemSwapConfigurationResponse,omitempty\"`\n\tFault_ *soap.Fault                                  `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateSystemSwapConfigurationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateSystemSwapConfiguration(ctx context.Context, r soap.RoundTripper, req *types.UpdateSystemSwapConfiguration) (*types.UpdateSystemSwapConfigurationResponse, error) {\n\tvar reqBody, resBody UpdateSystemSwapConfigurationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateSystemUsersBody struct {\n\tReq    *types.UpdateSystemUsers         `xml:\"urn:vim25 UpdateSystemUsers,omitempty\"`\n\tRes    *types.UpdateSystemUsersResponse `xml:\"urn:vim25 UpdateSystemUsersResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateSystemUsersBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateSystemUsers(ctx context.Context, r soap.RoundTripper, req *types.UpdateSystemUsers) (*types.UpdateSystemUsersResponse, error) {\n\tvar reqBody, resBody UpdateSystemUsersBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateUserBody struct {\n\tReq    *types.UpdateUser         `xml:\"urn:vim25 UpdateUser,omitempty\"`\n\tRes    *types.UpdateUserResponse `xml:\"urn:vim25 UpdateUserResponse,omitempty\"`\n\tFault_ *soap.Fault               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateUserBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateUser(ctx context.Context, r soap.RoundTripper, req *types.UpdateUser) (*types.UpdateUserResponse, error) {\n\tvar reqBody, resBody UpdateUserBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateVAppConfigBody struct {\n\tReq    *types.UpdateVAppConfig         `xml:\"urn:vim25 UpdateVAppConfig,omitempty\"`\n\tRes    *types.UpdateVAppConfigResponse `xml:\"urn:vim25 UpdateVAppConfigResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateVAppConfigBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateVAppConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateVAppConfig) (*types.UpdateVAppConfigResponse, error) {\n\tvar reqBody, resBody UpdateVAppConfigBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateVVolVirtualMachineFiles_TaskBody struct {\n\tReq    *types.UpdateVVolVirtualMachineFiles_Task         `xml:\"urn:vim25 UpdateVVolVirtualMachineFiles_Task,omitempty\"`\n\tRes    *types.UpdateVVolVirtualMachineFiles_TaskResponse `xml:\"urn:vim25 UpdateVVolVirtualMachineFiles_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateVVolVirtualMachineFiles_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateVVolVirtualMachineFiles_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateVVolVirtualMachineFiles_Task) (*types.UpdateVVolVirtualMachineFiles_TaskResponse, error) {\n\tvar reqBody, resBody UpdateVVolVirtualMachineFiles_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateVirtualMachineFiles_TaskBody struct {\n\tReq    *types.UpdateVirtualMachineFiles_Task         `xml:\"urn:vim25 UpdateVirtualMachineFiles_Task,omitempty\"`\n\tRes    *types.UpdateVirtualMachineFiles_TaskResponse `xml:\"urn:vim25 UpdateVirtualMachineFiles_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateVirtualMachineFiles_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateVirtualMachineFiles_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateVirtualMachineFiles_Task) (*types.UpdateVirtualMachineFiles_TaskResponse, error) {\n\tvar reqBody, resBody UpdateVirtualMachineFiles_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateVirtualNicBody struct {\n\tReq    *types.UpdateVirtualNic         `xml:\"urn:vim25 UpdateVirtualNic,omitempty\"`\n\tRes    *types.UpdateVirtualNicResponse `xml:\"urn:vim25 UpdateVirtualNicResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateVirtualNicBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.UpdateVirtualNic) (*types.UpdateVirtualNicResponse, error) {\n\tvar reqBody, resBody UpdateVirtualNicBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateVirtualSwitchBody struct {\n\tReq    *types.UpdateVirtualSwitch         `xml:\"urn:vim25 UpdateVirtualSwitch,omitempty\"`\n\tRes    *types.UpdateVirtualSwitchResponse `xml:\"urn:vim25 UpdateVirtualSwitchResponse,omitempty\"`\n\tFault_ *soap.Fault                        `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateVirtualSwitchBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateVirtualSwitch(ctx context.Context, r soap.RoundTripper, req *types.UpdateVirtualSwitch) (*types.UpdateVirtualSwitchResponse, error) {\n\tvar reqBody, resBody UpdateVirtualSwitchBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateVmfsUnmapPriorityBody struct {\n\tReq    *types.UpdateVmfsUnmapPriority         `xml:\"urn:vim25 UpdateVmfsUnmapPriority,omitempty\"`\n\tRes    *types.UpdateVmfsUnmapPriorityResponse `xml:\"urn:vim25 UpdateVmfsUnmapPriorityResponse,omitempty\"`\n\tFault_ *soap.Fault                            `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateVmfsUnmapPriorityBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateVmfsUnmapPriority(ctx context.Context, r soap.RoundTripper, req *types.UpdateVmfsUnmapPriority) (*types.UpdateVmfsUnmapPriorityResponse, error) {\n\tvar reqBody, resBody UpdateVmfsUnmapPriorityBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpdateVsan_TaskBody struct {\n\tReq    *types.UpdateVsan_Task         `xml:\"urn:vim25 UpdateVsan_Task,omitempty\"`\n\tRes    *types.UpdateVsan_TaskResponse `xml:\"urn:vim25 UpdateVsan_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpdateVsan_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpdateVsan_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateVsan_Task) (*types.UpdateVsan_TaskResponse, error) {\n\tvar reqBody, resBody UpdateVsan_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpgradeIoFilter_TaskBody struct {\n\tReq    *types.UpgradeIoFilter_Task         `xml:\"urn:vim25 UpgradeIoFilter_Task,omitempty\"`\n\tRes    *types.UpgradeIoFilter_TaskResponse `xml:\"urn:vim25 UpgradeIoFilter_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpgradeIoFilter_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpgradeIoFilter_Task(ctx context.Context, r soap.RoundTripper, req *types.UpgradeIoFilter_Task) (*types.UpgradeIoFilter_TaskResponse, error) {\n\tvar reqBody, resBody UpgradeIoFilter_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpgradeTools_TaskBody struct {\n\tReq    *types.UpgradeTools_Task         `xml:\"urn:vim25 UpgradeTools_Task,omitempty\"`\n\tRes    *types.UpgradeTools_TaskResponse `xml:\"urn:vim25 UpgradeTools_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpgradeTools_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpgradeTools_Task(ctx context.Context, r soap.RoundTripper, req *types.UpgradeTools_Task) (*types.UpgradeTools_TaskResponse, error) {\n\tvar reqBody, resBody UpgradeTools_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpgradeVM_TaskBody struct {\n\tReq    *types.UpgradeVM_Task         `xml:\"urn:vim25 UpgradeVM_Task,omitempty\"`\n\tRes    *types.UpgradeVM_TaskResponse `xml:\"urn:vim25 UpgradeVM_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpgradeVM_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpgradeVM_Task(ctx context.Context, r soap.RoundTripper, req *types.UpgradeVM_Task) (*types.UpgradeVM_TaskResponse, error) {\n\tvar reqBody, resBody UpgradeVM_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpgradeVmLayoutBody struct {\n\tReq    *types.UpgradeVmLayout         `xml:\"urn:vim25 UpgradeVmLayout,omitempty\"`\n\tRes    *types.UpgradeVmLayoutResponse `xml:\"urn:vim25 UpgradeVmLayoutResponse,omitempty\"`\n\tFault_ *soap.Fault                    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpgradeVmLayoutBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpgradeVmLayout(ctx context.Context, r soap.RoundTripper, req *types.UpgradeVmLayout) (*types.UpgradeVmLayoutResponse, error) {\n\tvar reqBody, resBody UpgradeVmLayoutBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpgradeVmfsBody struct {\n\tReq    *types.UpgradeVmfs         `xml:\"urn:vim25 UpgradeVmfs,omitempty\"`\n\tRes    *types.UpgradeVmfsResponse `xml:\"urn:vim25 UpgradeVmfsResponse,omitempty\"`\n\tFault_ *soap.Fault                `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpgradeVmfsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpgradeVmfs(ctx context.Context, r soap.RoundTripper, req *types.UpgradeVmfs) (*types.UpgradeVmfsResponse, error) {\n\tvar reqBody, resBody UpgradeVmfsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UpgradeVsanObjectsBody struct {\n\tReq    *types.UpgradeVsanObjects         `xml:\"urn:vim25 UpgradeVsanObjects,omitempty\"`\n\tRes    *types.UpgradeVsanObjectsResponse `xml:\"urn:vim25 UpgradeVsanObjectsResponse,omitempty\"`\n\tFault_ *soap.Fault                       `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UpgradeVsanObjectsBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UpgradeVsanObjects(ctx context.Context, r soap.RoundTripper, req *types.UpgradeVsanObjects) (*types.UpgradeVsanObjectsResponse, error) {\n\tvar reqBody, resBody UpgradeVsanObjectsBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UploadClientCertBody struct {\n\tReq    *types.UploadClientCert         `xml:\"urn:vim25 UploadClientCert,omitempty\"`\n\tRes    *types.UploadClientCertResponse `xml:\"urn:vim25 UploadClientCertResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UploadClientCertBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UploadClientCert(ctx context.Context, r soap.RoundTripper, req *types.UploadClientCert) (*types.UploadClientCertResponse, error) {\n\tvar reqBody, resBody UploadClientCertBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype UploadKmipServerCertBody struct {\n\tReq    *types.UploadKmipServerCert         `xml:\"urn:vim25 UploadKmipServerCert,omitempty\"`\n\tRes    *types.UploadKmipServerCertResponse `xml:\"urn:vim25 UploadKmipServerCertResponse,omitempty\"`\n\tFault_ *soap.Fault                         `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *UploadKmipServerCertBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc UploadKmipServerCert(ctx context.Context, r soap.RoundTripper, req *types.UploadKmipServerCert) (*types.UploadKmipServerCertResponse, error) {\n\tvar reqBody, resBody UploadKmipServerCertBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ValidateCredentialsInGuestBody struct {\n\tReq    *types.ValidateCredentialsInGuest         `xml:\"urn:vim25 ValidateCredentialsInGuest,omitempty\"`\n\tRes    *types.ValidateCredentialsInGuestResponse `xml:\"urn:vim25 ValidateCredentialsInGuestResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ValidateCredentialsInGuestBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ValidateCredentialsInGuest(ctx context.Context, r soap.RoundTripper, req *types.ValidateCredentialsInGuest) (*types.ValidateCredentialsInGuestResponse, error) {\n\tvar reqBody, resBody ValidateCredentialsInGuestBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ValidateHostBody struct {\n\tReq    *types.ValidateHost         `xml:\"urn:vim25 ValidateHost,omitempty\"`\n\tRes    *types.ValidateHostResponse `xml:\"urn:vim25 ValidateHostResponse,omitempty\"`\n\tFault_ *soap.Fault                 `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ValidateHostBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ValidateHost(ctx context.Context, r soap.RoundTripper, req *types.ValidateHost) (*types.ValidateHostResponse, error) {\n\tvar reqBody, resBody ValidateHostBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ValidateMigrationBody struct {\n\tReq    *types.ValidateMigration         `xml:\"urn:vim25 ValidateMigration,omitempty\"`\n\tRes    *types.ValidateMigrationResponse `xml:\"urn:vim25 ValidateMigrationResponse,omitempty\"`\n\tFault_ *soap.Fault                      `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ValidateMigrationBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ValidateMigration(ctx context.Context, r soap.RoundTripper, req *types.ValidateMigration) (*types.ValidateMigrationResponse, error) {\n\tvar reqBody, resBody ValidateMigrationBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype WaitForUpdatesBody struct {\n\tReq    *types.WaitForUpdates         `xml:\"urn:vim25 WaitForUpdates,omitempty\"`\n\tRes    *types.WaitForUpdatesResponse `xml:\"urn:vim25 WaitForUpdatesResponse,omitempty\"`\n\tFault_ *soap.Fault                   `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *WaitForUpdatesBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc WaitForUpdates(ctx context.Context, r soap.RoundTripper, req *types.WaitForUpdates) (*types.WaitForUpdatesResponse, error) {\n\tvar reqBody, resBody WaitForUpdatesBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype WaitForUpdatesExBody struct {\n\tReq    *types.WaitForUpdatesEx         `xml:\"urn:vim25 WaitForUpdatesEx,omitempty\"`\n\tRes    *types.WaitForUpdatesExResponse `xml:\"urn:vim25 WaitForUpdatesExResponse,omitempty\"`\n\tFault_ *soap.Fault                     `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *WaitForUpdatesExBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc WaitForUpdatesEx(ctx context.Context, r soap.RoundTripper, req *types.WaitForUpdatesEx) (*types.WaitForUpdatesExResponse, error) {\n\tvar reqBody, resBody WaitForUpdatesExBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype XmlToCustomizationSpecItemBody struct {\n\tReq    *types.XmlToCustomizationSpecItem         `xml:\"urn:vim25 XmlToCustomizationSpecItem,omitempty\"`\n\tRes    *types.XmlToCustomizationSpecItemResponse `xml:\"urn:vim25 XmlToCustomizationSpecItemResponse,omitempty\"`\n\tFault_ *soap.Fault                               `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *XmlToCustomizationSpecItemBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc XmlToCustomizationSpecItem(ctx context.Context, r soap.RoundTripper, req *types.XmlToCustomizationSpecItem) (*types.XmlToCustomizationSpecItemResponse, error) {\n\tvar reqBody, resBody XmlToCustomizationSpecItemBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n\ntype ZeroFillVirtualDisk_TaskBody struct {\n\tReq    *types.ZeroFillVirtualDisk_Task         `xml:\"urn:vim25 ZeroFillVirtualDisk_Task,omitempty\"`\n\tRes    *types.ZeroFillVirtualDisk_TaskResponse `xml:\"urn:vim25 ZeroFillVirtualDisk_TaskResponse,omitempty\"`\n\tFault_ *soap.Fault                             `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty\"`\n}\n\nfunc (b *ZeroFillVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ }\n\nfunc ZeroFillVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.ZeroFillVirtualDisk_Task) (*types.ZeroFillVirtualDisk_TaskResponse, error) {\n\tvar reqBody, resBody ZeroFillVirtualDisk_TaskBody\n\n\treqBody.Req = req\n\n\tif err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resBody.Res, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/methods/service_content.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage methods\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\nvar ServiceInstance = types.ManagedObjectReference{\n\tType:  \"ServiceInstance\",\n\tValue: \"ServiceInstance\",\n}\n\nfunc GetServiceContent(ctx context.Context, r soap.RoundTripper) (types.ServiceContent, error) {\n\treq := types.RetrieveServiceContent{\n\t\tThis: ServiceInstance,\n\t}\n\n\tres, err := RetrieveServiceContent(ctx, r, &req)\n\tif err != nil {\n\t\treturn types.ServiceContent{}, err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc GetCurrentTime(ctx context.Context, r soap.RoundTripper) (*time.Time, error) {\n\treq := types.CurrentTime{\n\t\tThis: ServiceInstance,\n\t}\n\n\tres, err := CurrentTime(ctx, r, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/mo/ancestors.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage mo\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\n// Ancestors returns the entire ancestry tree of a specified managed object.\n// The return value includes the root node and the specified object itself.\nfunc Ancestors(ctx context.Context, rt soap.RoundTripper, pc, obj types.ManagedObjectReference) ([]ManagedEntity, error) {\n\tospec := types.ObjectSpec{\n\t\tObj: obj,\n\t\tSelectSet: []types.BaseSelectionSpec{\n\t\t\t&types.TraversalSpec{\n\t\t\t\tSelectionSpec: types.SelectionSpec{Name: \"traverseParent\"},\n\t\t\t\tType:          \"ManagedEntity\",\n\t\t\t\tPath:          \"parent\",\n\t\t\t\tSkip:          types.NewBool(false),\n\t\t\t\tSelectSet: []types.BaseSelectionSpec{\n\t\t\t\t\t&types.SelectionSpec{Name: \"traverseParent\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\t&types.TraversalSpec{\n\t\t\t\tSelectionSpec: types.SelectionSpec{},\n\t\t\t\tType:          \"VirtualMachine\",\n\t\t\t\tPath:          \"parentVApp\",\n\t\t\t\tSkip:          types.NewBool(false),\n\t\t\t\tSelectSet: []types.BaseSelectionSpec{\n\t\t\t\t\t&types.SelectionSpec{Name: \"traverseParent\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tSkip: types.NewBool(false),\n\t}\n\n\tpspec := []types.PropertySpec{\n\t\t{\n\t\t\tType:    \"ManagedEntity\",\n\t\t\tPathSet: []string{\"name\", \"parent\"},\n\t\t},\n\t\t{\n\t\t\tType:    \"VirtualMachine\",\n\t\t\tPathSet: []string{\"parentVApp\"},\n\t\t},\n\t}\n\n\treq := types.RetrieveProperties{\n\t\tThis: pc,\n\t\tSpecSet: []types.PropertyFilterSpec{\n\t\t\t{\n\t\t\t\tObjectSet: []types.ObjectSpec{ospec},\n\t\t\t\tPropSet:   pspec,\n\t\t\t},\n\t\t},\n\t}\n\n\tvar ifaces []interface{}\n\terr := RetrievePropertiesForRequest(ctx, rt, req, &ifaces)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar out []ManagedEntity\n\n\t// Build ancestry tree by iteratively finding a new child.\n\tfor len(out) < len(ifaces) {\n\t\tvar find types.ManagedObjectReference\n\n\t\tif len(out) > 0 {\n\t\t\tfind = out[len(out)-1].Self\n\t\t}\n\n\t\t// Find entity we're looking for given the last entity in the current tree.\n\t\tfor _, iface := range ifaces {\n\t\t\tme := iface.(IsManagedEntity).GetManagedEntity()\n\n\t\t\tif me.Name == \"\" {\n\t\t\t\t// The types below have their own 'Name' field, so ManagedEntity.Name (me.Name) is empty.\n\t\t\t\t// We only hit this case when the 'obj' param is one of these types.\n\t\t\t\t// In most cases, 'obj' is a Folder so Name isn't collected in this call.\n\t\t\t\tswitch x := iface.(type) {\n\t\t\t\tcase Network:\n\t\t\t\t\tme.Name = x.Name\n\t\t\t\tcase DistributedVirtualSwitch:\n\t\t\t\t\tme.Name = x.Name\n\t\t\t\tcase DistributedVirtualPortgroup:\n\t\t\t\t\tme.Name = x.Name\n\t\t\t\tcase OpaqueNetwork:\n\t\t\t\t\tme.Name = x.Name\n\t\t\t\tdefault:\n\t\t\t\t\t// ManagedEntity always has a Name, if we hit this point we missed a case above.\n\t\t\t\t\tpanic(fmt.Sprintf(\"%#v Name is empty\", me.Reference()))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif me.Parent == nil {\n\t\t\t\t// Special case for VirtualMachine within VirtualApp,\n\t\t\t\t// unlikely to hit this other than via Finder.Element()\n\t\t\t\tswitch x := iface.(type) {\n\t\t\t\tcase VirtualMachine:\n\t\t\t\t\tme.Parent = x.ParentVApp\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif me.Parent == nil {\n\t\t\t\tout = append(out, me)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif *me.Parent == find {\n\t\t\t\tout = append(out, me)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn out, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/mo/entity.go",
    "content": "/*\nCopyright (c) 2016 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage mo\n\n// Entity is the interface that is implemented by all managed objects\n// that extend ManagedEntity.\ntype Entity interface {\n\tReference\n\tEntity() *ManagedEntity\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/mo/extra.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage mo\n\ntype IsManagedEntity interface {\n\tGetManagedEntity() ManagedEntity\n}\n\nfunc (m ComputeResource) GetManagedEntity() ManagedEntity {\n\treturn m.ManagedEntity\n}\n\nfunc (m Datacenter) GetManagedEntity() ManagedEntity {\n\treturn m.ManagedEntity\n}\n\nfunc (m Datastore) GetManagedEntity() ManagedEntity {\n\treturn m.ManagedEntity\n}\n\nfunc (m DistributedVirtualSwitch) GetManagedEntity() ManagedEntity {\n\treturn m.ManagedEntity\n}\n\nfunc (m DistributedVirtualPortgroup) GetManagedEntity() ManagedEntity {\n\treturn m.ManagedEntity\n}\n\nfunc (m Folder) GetManagedEntity() ManagedEntity {\n\treturn m.ManagedEntity\n}\n\nfunc (m HostSystem) GetManagedEntity() ManagedEntity {\n\treturn m.ManagedEntity\n}\n\nfunc (m Network) GetManagedEntity() ManagedEntity {\n\treturn m.ManagedEntity\n}\n\nfunc (m ResourcePool) GetManagedEntity() ManagedEntity {\n\treturn m.ManagedEntity\n}\n\nfunc (m VirtualMachine) GetManagedEntity() ManagedEntity {\n\treturn m.ManagedEntity\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/mo/fixtures/cluster_host_property.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<RetrievePropertiesResponse xmlns=\"urn:vim25\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n <returnval>\n  <obj type=\"ClusterComputeResource\">domain-c7</obj>\n  <propSet>\n   <name>host</name>\n   <val xsi:type=\"ArrayOfManagedObjectReference\">\n    <ManagedObjectReference type=\"HostSystem\" xsi:type=\"ManagedObjectReference\">host-14</ManagedObjectReference>\n    <ManagedObjectReference type=\"HostSystem\" xsi:type=\"ManagedObjectReference\">host-17</ManagedObjectReference>\n    <ManagedObjectReference type=\"HostSystem\" xsi:type=\"ManagedObjectReference\">host-19</ManagedObjectReference>\n    <ManagedObjectReference type=\"HostSystem\" xsi:type=\"ManagedObjectReference\">host-52</ManagedObjectReference>\n   </val>\n  </propSet>\n </returnval>\n</RetrievePropertiesResponse>\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/mo/fixtures/hostsystem_list_name_property.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<RetrievePropertiesResponse xmlns=\"urn:vim25\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n <returnval>\n  <obj type=\"HostSystem\">host-10</obj>\n  <propSet>\n   <name>name</name>\n   <val xsi:type=\"xsd:string\">host-01.example.com</val>\n  </propSet>\n </returnval>\n <returnval>\n  <obj type=\"HostSystem\">host-30</obj>\n  <propSet>\n   <name>name</name>\n   <val xsi:type=\"xsd:string\">host-02.example.com</val>\n  </propSet>\n </returnval>\n</RetrievePropertiesResponse>\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/mo/fixtures/nested_property.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<RetrievePropertiesResponse xmlns=\"urn:vim25\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n  <returnval>\n    <obj type=\"VirtualMachine\">vm-411</obj>\n    <propSet>\n      <name>config.name</name>\n      <val xsi:type=\"xsd:string\">kubernetes-master</val>\n    </propSet>\n    <propSet>\n      <name>config.uuid</name>\n      <val xsi:type=\"xsd:string\">422ec880-ab06-06b4-23f3-beb7a052a4c9</val>\n    </propSet>\n  </returnval>\n</RetrievePropertiesResponse>\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/mo/fixtures/not_authenticated_fault.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<RetrievePropertiesResponse xmlns=\"urn:vim25\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n  <returnval>\n    <obj type=\"SessionManager\">SessionManager</obj>\n    <propSet>\n      <name>defaultLocale</name>\n      <val xsi:type=\"xsd:string\">en</val>\n    </propSet>\n    <propSet>\n      <name>messageLocaleList</name>\n      <val xsi:type=\"ArrayOfString\">\n        <string xsi:type=\"xsd:string\">ja</string>\n        <string xsi:type=\"xsd:string\">zh_CN</string>\n        <string xsi:type=\"xsd:string\">en</string>\n        <string xsi:type=\"xsd:string\">de</string>\n        <string xsi:type=\"xsd:string\">zh_TW</string>\n        <string xsi:type=\"xsd:string\">ko</string>\n        <string xsi:type=\"xsd:string\">fr</string>\n      </val>\n    </propSet>\n    <missingSet>\n      <path>message</path>\n      <fault>\n        <fault xsi:type=\"NotAuthenticated\">\n          <object type=\"Folder\">group-d1</object>\n          <privilegeId>System.View</privilegeId>\n        </fault>\n        <localizedMessage/>\n      </fault>\n    </missingSet>\n    <missingSet>\n      <path>sessionList</path>\n      <fault>\n        <fault xsi:type=\"NotAuthenticated\">\n          <object type=\"Folder\">group-d1</object>\n          <privilegeId>Sessions.TerminateSession</privilegeId>\n        </fault>\n        <localizedMessage/>\n      </fault>\n    </missingSet>\n  </returnval>\n</RetrievePropertiesResponse>\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/mo/fixtures/pointer_property.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<RetrievePropertiesResponse xmlns=\"urn:vim25\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n  <returnval>\n    <obj type=\"VirtualMachine\">vm-411</obj>\n    <propSet>\n      <name>config.bootOptions</name>\n      <val xsi:type=\"VirtualMachineBootOptions\">\n        <bootDelay>0</bootDelay>\n        <enterBIOSSetup>false</enterBIOSSetup>\n        <bootRetryEnabled>false</bootRetryEnabled>\n        <bootRetryDelay>10000</bootRetryDelay>\n      </val>\n    </propSet>\n  </returnval>\n</RetrievePropertiesResponse>\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/mo/mo.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage mo\n\nimport (\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype Alarm struct {\n\tExtensibleManagedObject\n\n\tInfo types.AlarmInfo `mo:\"info\"`\n}\n\nfunc init() {\n\tt[\"Alarm\"] = reflect.TypeOf((*Alarm)(nil)).Elem()\n}\n\ntype AlarmManager struct {\n\tSelf types.ManagedObjectReference\n\n\tDefaultExpression []types.BaseAlarmExpression `mo:\"defaultExpression\"`\n\tDescription       types.AlarmDescription      `mo:\"description\"`\n}\n\nfunc (m AlarmManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"AlarmManager\"] = reflect.TypeOf((*AlarmManager)(nil)).Elem()\n}\n\ntype AuthorizationManager struct {\n\tSelf types.ManagedObjectReference\n\n\tPrivilegeList []types.AuthorizationPrivilege `mo:\"privilegeList\"`\n\tRoleList      []types.AuthorizationRole      `mo:\"roleList\"`\n\tDescription   types.AuthorizationDescription `mo:\"description\"`\n}\n\nfunc (m AuthorizationManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"AuthorizationManager\"] = reflect.TypeOf((*AuthorizationManager)(nil)).Elem()\n}\n\ntype CertificateManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m CertificateManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"CertificateManager\"] = reflect.TypeOf((*CertificateManager)(nil)).Elem()\n}\n\ntype ClusterComputeResource struct {\n\tComputeResource\n\n\tConfiguration     types.ClusterConfigInfo          `mo:\"configuration\"`\n\tRecommendation    []types.ClusterRecommendation    `mo:\"recommendation\"`\n\tDrsRecommendation []types.ClusterDrsRecommendation `mo:\"drsRecommendation\"`\n\tMigrationHistory  []types.ClusterDrsMigration      `mo:\"migrationHistory\"`\n\tActionHistory     []types.ClusterActionHistory     `mo:\"actionHistory\"`\n\tDrsFault          []types.ClusterDrsFaults         `mo:\"drsFault\"`\n}\n\nfunc init() {\n\tt[\"ClusterComputeResource\"] = reflect.TypeOf((*ClusterComputeResource)(nil)).Elem()\n}\n\ntype ClusterEVCManager struct {\n\tExtensibleManagedObject\n\n\tManagedCluster types.ManagedObjectReference    `mo:\"managedCluster\"`\n\tEvcState       types.ClusterEVCManagerEVCState `mo:\"evcState\"`\n}\n\nfunc init() {\n\tt[\"ClusterEVCManager\"] = reflect.TypeOf((*ClusterEVCManager)(nil)).Elem()\n}\n\ntype ClusterProfile struct {\n\tProfile\n}\n\nfunc init() {\n\tt[\"ClusterProfile\"] = reflect.TypeOf((*ClusterProfile)(nil)).Elem()\n}\n\ntype ClusterProfileManager struct {\n\tProfileManager\n}\n\nfunc init() {\n\tt[\"ClusterProfileManager\"] = reflect.TypeOf((*ClusterProfileManager)(nil)).Elem()\n}\n\ntype ComputeResource struct {\n\tManagedEntity\n\n\tResourcePool       *types.ManagedObjectReference       `mo:\"resourcePool\"`\n\tHost               []types.ManagedObjectReference      `mo:\"host\"`\n\tDatastore          []types.ManagedObjectReference      `mo:\"datastore\"`\n\tNetwork            []types.ManagedObjectReference      `mo:\"network\"`\n\tSummary            types.BaseComputeResourceSummary    `mo:\"summary\"`\n\tEnvironmentBrowser *types.ManagedObjectReference       `mo:\"environmentBrowser\"`\n\tConfigurationEx    types.BaseComputeResourceConfigInfo `mo:\"configurationEx\"`\n}\n\nfunc (m *ComputeResource) Entity() *ManagedEntity {\n\treturn &m.ManagedEntity\n}\n\nfunc init() {\n\tt[\"ComputeResource\"] = reflect.TypeOf((*ComputeResource)(nil)).Elem()\n}\n\ntype ContainerView struct {\n\tManagedObjectView\n\n\tContainer types.ManagedObjectReference `mo:\"container\"`\n\tType      []string                     `mo:\"type\"`\n\tRecursive bool                         `mo:\"recursive\"`\n}\n\nfunc init() {\n\tt[\"ContainerView\"] = reflect.TypeOf((*ContainerView)(nil)).Elem()\n}\n\ntype CryptoManager struct {\n\tSelf types.ManagedObjectReference\n\n\tEnabled bool `mo:\"enabled\"`\n}\n\nfunc (m CryptoManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"CryptoManager\"] = reflect.TypeOf((*CryptoManager)(nil)).Elem()\n}\n\ntype CryptoManagerKmip struct {\n\tCryptoManager\n\n\tKmipServers []types.KmipClusterInfo `mo:\"kmipServers\"`\n}\n\nfunc init() {\n\tt[\"CryptoManagerKmip\"] = reflect.TypeOf((*CryptoManagerKmip)(nil)).Elem()\n}\n\ntype CustomFieldsManager struct {\n\tSelf types.ManagedObjectReference\n\n\tField []types.CustomFieldDef `mo:\"field\"`\n}\n\nfunc (m CustomFieldsManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"CustomFieldsManager\"] = reflect.TypeOf((*CustomFieldsManager)(nil)).Elem()\n}\n\ntype CustomizationSpecManager struct {\n\tSelf types.ManagedObjectReference\n\n\tInfo          []types.CustomizationSpecInfo `mo:\"info\"`\n\tEncryptionKey []byte                        `mo:\"encryptionKey\"`\n}\n\nfunc (m CustomizationSpecManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"CustomizationSpecManager\"] = reflect.TypeOf((*CustomizationSpecManager)(nil)).Elem()\n}\n\ntype Datacenter struct {\n\tManagedEntity\n\n\tVmFolder        types.ManagedObjectReference   `mo:\"vmFolder\"`\n\tHostFolder      types.ManagedObjectReference   `mo:\"hostFolder\"`\n\tDatastoreFolder types.ManagedObjectReference   `mo:\"datastoreFolder\"`\n\tNetworkFolder   types.ManagedObjectReference   `mo:\"networkFolder\"`\n\tDatastore       []types.ManagedObjectReference `mo:\"datastore\"`\n\tNetwork         []types.ManagedObjectReference `mo:\"network\"`\n\tConfiguration   types.DatacenterConfigInfo     `mo:\"configuration\"`\n}\n\nfunc (m *Datacenter) Entity() *ManagedEntity {\n\treturn &m.ManagedEntity\n}\n\nfunc init() {\n\tt[\"Datacenter\"] = reflect.TypeOf((*Datacenter)(nil)).Elem()\n}\n\ntype Datastore struct {\n\tManagedEntity\n\n\tInfo              types.BaseDatastoreInfo        `mo:\"info\"`\n\tSummary           types.DatastoreSummary         `mo:\"summary\"`\n\tHost              []types.DatastoreHostMount     `mo:\"host\"`\n\tVm                []types.ManagedObjectReference `mo:\"vm\"`\n\tBrowser           types.ManagedObjectReference   `mo:\"browser\"`\n\tCapability        types.DatastoreCapability      `mo:\"capability\"`\n\tIormConfiguration *types.StorageIORMInfo         `mo:\"iormConfiguration\"`\n}\n\nfunc (m *Datastore) Entity() *ManagedEntity {\n\treturn &m.ManagedEntity\n}\n\nfunc init() {\n\tt[\"Datastore\"] = reflect.TypeOf((*Datastore)(nil)).Elem()\n}\n\ntype DatastoreNamespaceManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m DatastoreNamespaceManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"DatastoreNamespaceManager\"] = reflect.TypeOf((*DatastoreNamespaceManager)(nil)).Elem()\n}\n\ntype DiagnosticManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m DiagnosticManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"DiagnosticManager\"] = reflect.TypeOf((*DiagnosticManager)(nil)).Elem()\n}\n\ntype DistributedVirtualPortgroup struct {\n\tNetwork\n\n\tKey      string                      `mo:\"key\"`\n\tConfig   types.DVPortgroupConfigInfo `mo:\"config\"`\n\tPortKeys []string                    `mo:\"portKeys\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualPortgroup\"] = reflect.TypeOf((*DistributedVirtualPortgroup)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitch struct {\n\tManagedEntity\n\n\tUuid                string                         `mo:\"uuid\"`\n\tCapability          types.DVSCapability            `mo:\"capability\"`\n\tSummary             types.DVSSummary               `mo:\"summary\"`\n\tConfig              types.BaseDVSConfigInfo        `mo:\"config\"`\n\tNetworkResourcePool []types.DVSNetworkResourcePool `mo:\"networkResourcePool\"`\n\tPortgroup           []types.ManagedObjectReference `mo:\"portgroup\"`\n\tRuntime             *types.DVSRuntimeInfo          `mo:\"runtime\"`\n}\n\nfunc (m *DistributedVirtualSwitch) Entity() *ManagedEntity {\n\treturn &m.ManagedEntity\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitch\"] = reflect.TypeOf((*DistributedVirtualSwitch)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m DistributedVirtualSwitchManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchManager\"] = reflect.TypeOf((*DistributedVirtualSwitchManager)(nil)).Elem()\n}\n\ntype EnvironmentBrowser struct {\n\tSelf types.ManagedObjectReference\n\n\tDatastoreBrowser *types.ManagedObjectReference `mo:\"datastoreBrowser\"`\n}\n\nfunc (m EnvironmentBrowser) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"EnvironmentBrowser\"] = reflect.TypeOf((*EnvironmentBrowser)(nil)).Elem()\n}\n\ntype EventHistoryCollector struct {\n\tHistoryCollector\n\n\tLatestPage []types.BaseEvent `mo:\"latestPage\"`\n}\n\nfunc init() {\n\tt[\"EventHistoryCollector\"] = reflect.TypeOf((*EventHistoryCollector)(nil)).Elem()\n}\n\ntype EventManager struct {\n\tSelf types.ManagedObjectReference\n\n\tDescription  types.EventDescription `mo:\"description\"`\n\tLatestEvent  types.BaseEvent        `mo:\"latestEvent\"`\n\tMaxCollector int32                  `mo:\"maxCollector\"`\n}\n\nfunc (m EventManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"EventManager\"] = reflect.TypeOf((*EventManager)(nil)).Elem()\n}\n\ntype ExtensibleManagedObject struct {\n\tSelf types.ManagedObjectReference\n\n\tValue          []types.BaseCustomFieldValue `mo:\"value\"`\n\tAvailableField []types.CustomFieldDef       `mo:\"availableField\"`\n}\n\nfunc (m ExtensibleManagedObject) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"ExtensibleManagedObject\"] = reflect.TypeOf((*ExtensibleManagedObject)(nil)).Elem()\n}\n\ntype ExtensionManager struct {\n\tSelf types.ManagedObjectReference\n\n\tExtensionList []types.Extension `mo:\"extensionList\"`\n}\n\nfunc (m ExtensionManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"ExtensionManager\"] = reflect.TypeOf((*ExtensionManager)(nil)).Elem()\n}\n\ntype FailoverClusterConfigurator struct {\n\tSelf types.ManagedObjectReference\n\n\tDisabledConfigureMethod []string `mo:\"disabledConfigureMethod\"`\n}\n\nfunc (m FailoverClusterConfigurator) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"FailoverClusterConfigurator\"] = reflect.TypeOf((*FailoverClusterConfigurator)(nil)).Elem()\n}\n\ntype FailoverClusterManager struct {\n\tSelf types.ManagedObjectReference\n\n\tDisabledClusterMethod []string `mo:\"disabledClusterMethod\"`\n}\n\nfunc (m FailoverClusterManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"FailoverClusterManager\"] = reflect.TypeOf((*FailoverClusterManager)(nil)).Elem()\n}\n\ntype FileManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m FileManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"FileManager\"] = reflect.TypeOf((*FileManager)(nil)).Elem()\n}\n\ntype Folder struct {\n\tManagedEntity\n\n\tChildType   []string                       `mo:\"childType\"`\n\tChildEntity []types.ManagedObjectReference `mo:\"childEntity\"`\n}\n\nfunc (m *Folder) Entity() *ManagedEntity {\n\treturn &m.ManagedEntity\n}\n\nfunc init() {\n\tt[\"Folder\"] = reflect.TypeOf((*Folder)(nil)).Elem()\n}\n\ntype GuestAliasManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m GuestAliasManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"GuestAliasManager\"] = reflect.TypeOf((*GuestAliasManager)(nil)).Elem()\n}\n\ntype GuestAuthManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m GuestAuthManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"GuestAuthManager\"] = reflect.TypeOf((*GuestAuthManager)(nil)).Elem()\n}\n\ntype GuestFileManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m GuestFileManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"GuestFileManager\"] = reflect.TypeOf((*GuestFileManager)(nil)).Elem()\n}\n\ntype GuestOperationsManager struct {\n\tSelf types.ManagedObjectReference\n\n\tAuthManager                 *types.ManagedObjectReference `mo:\"authManager\"`\n\tFileManager                 *types.ManagedObjectReference `mo:\"fileManager\"`\n\tProcessManager              *types.ManagedObjectReference `mo:\"processManager\"`\n\tGuestWindowsRegistryManager *types.ManagedObjectReference `mo:\"guestWindowsRegistryManager\"`\n\tAliasManager                *types.ManagedObjectReference `mo:\"aliasManager\"`\n}\n\nfunc (m GuestOperationsManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"GuestOperationsManager\"] = reflect.TypeOf((*GuestOperationsManager)(nil)).Elem()\n}\n\ntype GuestProcessManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m GuestProcessManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"GuestProcessManager\"] = reflect.TypeOf((*GuestProcessManager)(nil)).Elem()\n}\n\ntype GuestWindowsRegistryManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m GuestWindowsRegistryManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"GuestWindowsRegistryManager\"] = reflect.TypeOf((*GuestWindowsRegistryManager)(nil)).Elem()\n}\n\ntype HealthUpdateManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m HealthUpdateManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HealthUpdateManager\"] = reflect.TypeOf((*HealthUpdateManager)(nil)).Elem()\n}\n\ntype HistoryCollector struct {\n\tSelf types.ManagedObjectReference\n\n\tFilter types.AnyType `mo:\"filter\"`\n}\n\nfunc (m HistoryCollector) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HistoryCollector\"] = reflect.TypeOf((*HistoryCollector)(nil)).Elem()\n}\n\ntype HostAccessManager struct {\n\tSelf types.ManagedObjectReference\n\n\tLockdownMode types.HostLockdownMode `mo:\"lockdownMode\"`\n}\n\nfunc (m HostAccessManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostAccessManager\"] = reflect.TypeOf((*HostAccessManager)(nil)).Elem()\n}\n\ntype HostActiveDirectoryAuthentication struct {\n\tHostDirectoryStore\n}\n\nfunc init() {\n\tt[\"HostActiveDirectoryAuthentication\"] = reflect.TypeOf((*HostActiveDirectoryAuthentication)(nil)).Elem()\n}\n\ntype HostAuthenticationManager struct {\n\tSelf types.ManagedObjectReference\n\n\tInfo           types.HostAuthenticationManagerInfo `mo:\"info\"`\n\tSupportedStore []types.ManagedObjectReference      `mo:\"supportedStore\"`\n}\n\nfunc (m HostAuthenticationManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostAuthenticationManager\"] = reflect.TypeOf((*HostAuthenticationManager)(nil)).Elem()\n}\n\ntype HostAuthenticationStore struct {\n\tSelf types.ManagedObjectReference\n\n\tInfo types.BaseHostAuthenticationStoreInfo `mo:\"info\"`\n}\n\nfunc (m HostAuthenticationStore) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostAuthenticationStore\"] = reflect.TypeOf((*HostAuthenticationStore)(nil)).Elem()\n}\n\ntype HostAutoStartManager struct {\n\tSelf types.ManagedObjectReference\n\n\tConfig types.HostAutoStartManagerConfig `mo:\"config\"`\n}\n\nfunc (m HostAutoStartManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostAutoStartManager\"] = reflect.TypeOf((*HostAutoStartManager)(nil)).Elem()\n}\n\ntype HostBootDeviceSystem struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m HostBootDeviceSystem) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostBootDeviceSystem\"] = reflect.TypeOf((*HostBootDeviceSystem)(nil)).Elem()\n}\n\ntype HostCacheConfigurationManager struct {\n\tSelf types.ManagedObjectReference\n\n\tCacheConfigurationInfo []types.HostCacheConfigurationInfo `mo:\"cacheConfigurationInfo\"`\n}\n\nfunc (m HostCacheConfigurationManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostCacheConfigurationManager\"] = reflect.TypeOf((*HostCacheConfigurationManager)(nil)).Elem()\n}\n\ntype HostCertificateManager struct {\n\tSelf types.ManagedObjectReference\n\n\tCertificateInfo types.HostCertificateManagerCertificateInfo `mo:\"certificateInfo\"`\n}\n\nfunc (m HostCertificateManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostCertificateManager\"] = reflect.TypeOf((*HostCertificateManager)(nil)).Elem()\n}\n\ntype HostCpuSchedulerSystem struct {\n\tExtensibleManagedObject\n\n\tHyperthreadInfo *types.HostHyperThreadScheduleInfo `mo:\"hyperthreadInfo\"`\n}\n\nfunc init() {\n\tt[\"HostCpuSchedulerSystem\"] = reflect.TypeOf((*HostCpuSchedulerSystem)(nil)).Elem()\n}\n\ntype HostDatastoreBrowser struct {\n\tSelf types.ManagedObjectReference\n\n\tDatastore     []types.ManagedObjectReference `mo:\"datastore\"`\n\tSupportedType []types.BaseFileQuery          `mo:\"supportedType\"`\n}\n\nfunc (m HostDatastoreBrowser) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostDatastoreBrowser\"] = reflect.TypeOf((*HostDatastoreBrowser)(nil)).Elem()\n}\n\ntype HostDatastoreSystem struct {\n\tSelf types.ManagedObjectReference\n\n\tDatastore    []types.ManagedObjectReference        `mo:\"datastore\"`\n\tCapabilities types.HostDatastoreSystemCapabilities `mo:\"capabilities\"`\n}\n\nfunc (m HostDatastoreSystem) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostDatastoreSystem\"] = reflect.TypeOf((*HostDatastoreSystem)(nil)).Elem()\n}\n\ntype HostDateTimeSystem struct {\n\tSelf types.ManagedObjectReference\n\n\tDateTimeInfo types.HostDateTimeInfo `mo:\"dateTimeInfo\"`\n}\n\nfunc (m HostDateTimeSystem) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostDateTimeSystem\"] = reflect.TypeOf((*HostDateTimeSystem)(nil)).Elem()\n}\n\ntype HostDiagnosticSystem struct {\n\tSelf types.ManagedObjectReference\n\n\tActivePartition *types.HostDiagnosticPartition `mo:\"activePartition\"`\n}\n\nfunc (m HostDiagnosticSystem) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostDiagnosticSystem\"] = reflect.TypeOf((*HostDiagnosticSystem)(nil)).Elem()\n}\n\ntype HostDirectoryStore struct {\n\tHostAuthenticationStore\n}\n\nfunc init() {\n\tt[\"HostDirectoryStore\"] = reflect.TypeOf((*HostDirectoryStore)(nil)).Elem()\n}\n\ntype HostEsxAgentHostManager struct {\n\tSelf types.ManagedObjectReference\n\n\tConfigInfo types.HostEsxAgentHostManagerConfigInfo `mo:\"configInfo\"`\n}\n\nfunc (m HostEsxAgentHostManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostEsxAgentHostManager\"] = reflect.TypeOf((*HostEsxAgentHostManager)(nil)).Elem()\n}\n\ntype HostFirewallSystem struct {\n\tExtensibleManagedObject\n\n\tFirewallInfo *types.HostFirewallInfo `mo:\"firewallInfo\"`\n}\n\nfunc init() {\n\tt[\"HostFirewallSystem\"] = reflect.TypeOf((*HostFirewallSystem)(nil)).Elem()\n}\n\ntype HostFirmwareSystem struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m HostFirmwareSystem) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostFirmwareSystem\"] = reflect.TypeOf((*HostFirmwareSystem)(nil)).Elem()\n}\n\ntype HostGraphicsManager struct {\n\tExtensibleManagedObject\n\n\tGraphicsInfo           []types.HostGraphicsInfo  `mo:\"graphicsInfo\"`\n\tGraphicsConfig         *types.HostGraphicsConfig `mo:\"graphicsConfig\"`\n\tSharedPassthruGpuTypes []string                  `mo:\"sharedPassthruGpuTypes\"`\n}\n\nfunc init() {\n\tt[\"HostGraphicsManager\"] = reflect.TypeOf((*HostGraphicsManager)(nil)).Elem()\n}\n\ntype HostHealthStatusSystem struct {\n\tSelf types.ManagedObjectReference\n\n\tRuntime types.HealthSystemRuntime `mo:\"runtime\"`\n}\n\nfunc (m HostHealthStatusSystem) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostHealthStatusSystem\"] = reflect.TypeOf((*HostHealthStatusSystem)(nil)).Elem()\n}\n\ntype HostImageConfigManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m HostImageConfigManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostImageConfigManager\"] = reflect.TypeOf((*HostImageConfigManager)(nil)).Elem()\n}\n\ntype HostKernelModuleSystem struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m HostKernelModuleSystem) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostKernelModuleSystem\"] = reflect.TypeOf((*HostKernelModuleSystem)(nil)).Elem()\n}\n\ntype HostLocalAccountManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m HostLocalAccountManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostLocalAccountManager\"] = reflect.TypeOf((*HostLocalAccountManager)(nil)).Elem()\n}\n\ntype HostLocalAuthentication struct {\n\tHostAuthenticationStore\n}\n\nfunc init() {\n\tt[\"HostLocalAuthentication\"] = reflect.TypeOf((*HostLocalAuthentication)(nil)).Elem()\n}\n\ntype HostMemorySystem struct {\n\tExtensibleManagedObject\n\n\tConsoleReservationInfo        *types.ServiceConsoleReservationInfo       `mo:\"consoleReservationInfo\"`\n\tVirtualMachineReservationInfo *types.VirtualMachineMemoryReservationInfo `mo:\"virtualMachineReservationInfo\"`\n}\n\nfunc init() {\n\tt[\"HostMemorySystem\"] = reflect.TypeOf((*HostMemorySystem)(nil)).Elem()\n}\n\ntype HostNetworkSystem struct {\n\tExtensibleManagedObject\n\n\tCapabilities         *types.HostNetCapabilities        `mo:\"capabilities\"`\n\tNetworkInfo          *types.HostNetworkInfo            `mo:\"networkInfo\"`\n\tOffloadCapabilities  *types.HostNetOffloadCapabilities `mo:\"offloadCapabilities\"`\n\tNetworkConfig        *types.HostNetworkConfig          `mo:\"networkConfig\"`\n\tDnsConfig            types.BaseHostDnsConfig           `mo:\"dnsConfig\"`\n\tIpRouteConfig        types.BaseHostIpRouteConfig       `mo:\"ipRouteConfig\"`\n\tConsoleIpRouteConfig types.BaseHostIpRouteConfig       `mo:\"consoleIpRouteConfig\"`\n}\n\nfunc init() {\n\tt[\"HostNetworkSystem\"] = reflect.TypeOf((*HostNetworkSystem)(nil)).Elem()\n}\n\ntype HostPatchManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m HostPatchManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostPatchManager\"] = reflect.TypeOf((*HostPatchManager)(nil)).Elem()\n}\n\ntype HostPciPassthruSystem struct {\n\tExtensibleManagedObject\n\n\tPciPassthruInfo     []types.BaseHostPciPassthruInfo     `mo:\"pciPassthruInfo\"`\n\tSriovDevicePoolInfo []types.BaseHostSriovDevicePoolInfo `mo:\"sriovDevicePoolInfo\"`\n}\n\nfunc init() {\n\tt[\"HostPciPassthruSystem\"] = reflect.TypeOf((*HostPciPassthruSystem)(nil)).Elem()\n}\n\ntype HostPowerSystem struct {\n\tSelf types.ManagedObjectReference\n\n\tCapability types.PowerSystemCapability `mo:\"capability\"`\n\tInfo       types.PowerSystemInfo       `mo:\"info\"`\n}\n\nfunc (m HostPowerSystem) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostPowerSystem\"] = reflect.TypeOf((*HostPowerSystem)(nil)).Elem()\n}\n\ntype HostProfile struct {\n\tProfile\n\n\tReferenceHost *types.ManagedObjectReference `mo:\"referenceHost\"`\n}\n\nfunc init() {\n\tt[\"HostProfile\"] = reflect.TypeOf((*HostProfile)(nil)).Elem()\n}\n\ntype HostProfileManager struct {\n\tProfileManager\n}\n\nfunc init() {\n\tt[\"HostProfileManager\"] = reflect.TypeOf((*HostProfileManager)(nil)).Elem()\n}\n\ntype HostServiceSystem struct {\n\tExtensibleManagedObject\n\n\tServiceInfo types.HostServiceInfo `mo:\"serviceInfo\"`\n}\n\nfunc init() {\n\tt[\"HostServiceSystem\"] = reflect.TypeOf((*HostServiceSystem)(nil)).Elem()\n}\n\ntype HostSnmpSystem struct {\n\tSelf types.ManagedObjectReference\n\n\tConfiguration types.HostSnmpConfigSpec        `mo:\"configuration\"`\n\tLimits        types.HostSnmpSystemAgentLimits `mo:\"limits\"`\n}\n\nfunc (m HostSnmpSystem) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostSnmpSystem\"] = reflect.TypeOf((*HostSnmpSystem)(nil)).Elem()\n}\n\ntype HostSpecificationManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m HostSpecificationManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostSpecificationManager\"] = reflect.TypeOf((*HostSpecificationManager)(nil)).Elem()\n}\n\ntype HostStorageSystem struct {\n\tExtensibleManagedObject\n\n\tStorageDeviceInfo    *types.HostStorageDeviceInfo   `mo:\"storageDeviceInfo\"`\n\tFileSystemVolumeInfo types.HostFileSystemVolumeInfo `mo:\"fileSystemVolumeInfo\"`\n\tSystemFile           []string                       `mo:\"systemFile\"`\n\tMultipathStateInfo   *types.HostMultipathStateInfo  `mo:\"multipathStateInfo\"`\n}\n\nfunc init() {\n\tt[\"HostStorageSystem\"] = reflect.TypeOf((*HostStorageSystem)(nil)).Elem()\n}\n\ntype HostSystem struct {\n\tManagedEntity\n\n\tRuntime            types.HostRuntimeInfo            `mo:\"runtime\"`\n\tSummary            types.HostListSummary            `mo:\"summary\"`\n\tHardware           *types.HostHardwareInfo          `mo:\"hardware\"`\n\tCapability         *types.HostCapability            `mo:\"capability\"`\n\tLicensableResource types.HostLicensableResourceInfo `mo:\"licensableResource\"`\n\tConfigManager      types.HostConfigManager          `mo:\"configManager\"`\n\tConfig             *types.HostConfigInfo            `mo:\"config\"`\n\tVm                 []types.ManagedObjectReference   `mo:\"vm\"`\n\tDatastore          []types.ManagedObjectReference   `mo:\"datastore\"`\n\tNetwork            []types.ManagedObjectReference   `mo:\"network\"`\n\tDatastoreBrowser   types.ManagedObjectReference     `mo:\"datastoreBrowser\"`\n\tSystemResources    *types.HostSystemResourceInfo    `mo:\"systemResources\"`\n}\n\nfunc (m *HostSystem) Entity() *ManagedEntity {\n\treturn &m.ManagedEntity\n}\n\nfunc init() {\n\tt[\"HostSystem\"] = reflect.TypeOf((*HostSystem)(nil)).Elem()\n}\n\ntype HostVFlashManager struct {\n\tSelf types.ManagedObjectReference\n\n\tVFlashConfigInfo *types.HostVFlashManagerVFlashConfigInfo `mo:\"vFlashConfigInfo\"`\n}\n\nfunc (m HostVFlashManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostVFlashManager\"] = reflect.TypeOf((*HostVFlashManager)(nil)).Elem()\n}\n\ntype HostVMotionSystem struct {\n\tExtensibleManagedObject\n\n\tNetConfig *types.HostVMotionNetConfig `mo:\"netConfig\"`\n\tIpConfig  *types.HostIpConfig         `mo:\"ipConfig\"`\n}\n\nfunc init() {\n\tt[\"HostVMotionSystem\"] = reflect.TypeOf((*HostVMotionSystem)(nil)).Elem()\n}\n\ntype HostVStorageObjectManager struct {\n\tVStorageObjectManagerBase\n}\n\nfunc init() {\n\tt[\"HostVStorageObjectManager\"] = reflect.TypeOf((*HostVStorageObjectManager)(nil)).Elem()\n}\n\ntype HostVirtualNicManager struct {\n\tExtensibleManagedObject\n\n\tInfo types.HostVirtualNicManagerInfo `mo:\"info\"`\n}\n\nfunc init() {\n\tt[\"HostVirtualNicManager\"] = reflect.TypeOf((*HostVirtualNicManager)(nil)).Elem()\n}\n\ntype HostVsanInternalSystem struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m HostVsanInternalSystem) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostVsanInternalSystem\"] = reflect.TypeOf((*HostVsanInternalSystem)(nil)).Elem()\n}\n\ntype HostVsanSystem struct {\n\tSelf types.ManagedObjectReference\n\n\tConfig types.VsanHostConfigInfo `mo:\"config\"`\n}\n\nfunc (m HostVsanSystem) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HostVsanSystem\"] = reflect.TypeOf((*HostVsanSystem)(nil)).Elem()\n}\n\ntype HttpNfcLease struct {\n\tSelf types.ManagedObjectReference\n\n\tInitializeProgress int32                       `mo:\"initializeProgress\"`\n\tInfo               *types.HttpNfcLeaseInfo     `mo:\"info\"`\n\tState              types.HttpNfcLeaseState     `mo:\"state\"`\n\tError              *types.LocalizedMethodFault `mo:\"error\"`\n}\n\nfunc (m HttpNfcLease) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"HttpNfcLease\"] = reflect.TypeOf((*HttpNfcLease)(nil)).Elem()\n}\n\ntype InventoryView struct {\n\tManagedObjectView\n}\n\nfunc init() {\n\tt[\"InventoryView\"] = reflect.TypeOf((*InventoryView)(nil)).Elem()\n}\n\ntype IoFilterManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m IoFilterManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"IoFilterManager\"] = reflect.TypeOf((*IoFilterManager)(nil)).Elem()\n}\n\ntype IpPoolManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m IpPoolManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"IpPoolManager\"] = reflect.TypeOf((*IpPoolManager)(nil)).Elem()\n}\n\ntype IscsiManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m IscsiManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"IscsiManager\"] = reflect.TypeOf((*IscsiManager)(nil)).Elem()\n}\n\ntype LicenseAssignmentManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m LicenseAssignmentManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"LicenseAssignmentManager\"] = reflect.TypeOf((*LicenseAssignmentManager)(nil)).Elem()\n}\n\ntype LicenseManager struct {\n\tSelf types.ManagedObjectReference\n\n\tSource                   types.BaseLicenseSource            `mo:\"source\"`\n\tSourceAvailable          bool                               `mo:\"sourceAvailable\"`\n\tDiagnostics              *types.LicenseDiagnostics          `mo:\"diagnostics\"`\n\tFeatureInfo              []types.LicenseFeatureInfo         `mo:\"featureInfo\"`\n\tLicensedEdition          string                             `mo:\"licensedEdition\"`\n\tLicenses                 []types.LicenseManagerLicenseInfo  `mo:\"licenses\"`\n\tLicenseAssignmentManager *types.ManagedObjectReference      `mo:\"licenseAssignmentManager\"`\n\tEvaluation               types.LicenseManagerEvaluationInfo `mo:\"evaluation\"`\n}\n\nfunc (m LicenseManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"LicenseManager\"] = reflect.TypeOf((*LicenseManager)(nil)).Elem()\n}\n\ntype ListView struct {\n\tManagedObjectView\n}\n\nfunc init() {\n\tt[\"ListView\"] = reflect.TypeOf((*ListView)(nil)).Elem()\n}\n\ntype LocalizationManager struct {\n\tSelf types.ManagedObjectReference\n\n\tCatalog []types.LocalizationManagerMessageCatalog `mo:\"catalog\"`\n}\n\nfunc (m LocalizationManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"LocalizationManager\"] = reflect.TypeOf((*LocalizationManager)(nil)).Elem()\n}\n\ntype ManagedEntity struct {\n\tExtensibleManagedObject\n\n\tParent              *types.ManagedObjectReference  `mo:\"parent\"`\n\tCustomValue         []types.BaseCustomFieldValue   `mo:\"customValue\"`\n\tOverallStatus       types.ManagedEntityStatus      `mo:\"overallStatus\"`\n\tConfigStatus        types.ManagedEntityStatus      `mo:\"configStatus\"`\n\tConfigIssue         []types.BaseEvent              `mo:\"configIssue\"`\n\tEffectiveRole       []int32                        `mo:\"effectiveRole\"`\n\tPermission          []types.Permission             `mo:\"permission\"`\n\tName                string                         `mo:\"name\"`\n\tDisabledMethod      []string                       `mo:\"disabledMethod\"`\n\tRecentTask          []types.ManagedObjectReference `mo:\"recentTask\"`\n\tDeclaredAlarmState  []types.AlarmState             `mo:\"declaredAlarmState\"`\n\tTriggeredAlarmState []types.AlarmState             `mo:\"triggeredAlarmState\"`\n\tAlarmActionsEnabled *bool                          `mo:\"alarmActionsEnabled\"`\n\tTag                 []types.Tag                    `mo:\"tag\"`\n}\n\nfunc init() {\n\tt[\"ManagedEntity\"] = reflect.TypeOf((*ManagedEntity)(nil)).Elem()\n}\n\ntype ManagedObjectView struct {\n\tSelf types.ManagedObjectReference\n\n\tView []types.ManagedObjectReference `mo:\"view\"`\n}\n\nfunc (m ManagedObjectView) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"ManagedObjectView\"] = reflect.TypeOf((*ManagedObjectView)(nil)).Elem()\n}\n\ntype MessageBusProxy struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m MessageBusProxy) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"MessageBusProxy\"] = reflect.TypeOf((*MessageBusProxy)(nil)).Elem()\n}\n\ntype Network struct {\n\tManagedEntity\n\n\tName    string                         `mo:\"name\"`\n\tSummary types.BaseNetworkSummary       `mo:\"summary\"`\n\tHost    []types.ManagedObjectReference `mo:\"host\"`\n\tVm      []types.ManagedObjectReference `mo:\"vm\"`\n}\n\nfunc (m *Network) Entity() *ManagedEntity {\n\treturn &m.ManagedEntity\n}\n\nfunc init() {\n\tt[\"Network\"] = reflect.TypeOf((*Network)(nil)).Elem()\n}\n\ntype OpaqueNetwork struct {\n\tNetwork\n\n\tCapability  *types.OpaqueNetworkCapability `mo:\"capability\"`\n\tExtraConfig []types.BaseOptionValue        `mo:\"extraConfig\"`\n}\n\nfunc init() {\n\tt[\"OpaqueNetwork\"] = reflect.TypeOf((*OpaqueNetwork)(nil)).Elem()\n}\n\ntype OptionManager struct {\n\tSelf types.ManagedObjectReference\n\n\tSupportedOption []types.OptionDef       `mo:\"supportedOption\"`\n\tSetting         []types.BaseOptionValue `mo:\"setting\"`\n}\n\nfunc (m OptionManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"OptionManager\"] = reflect.TypeOf((*OptionManager)(nil)).Elem()\n}\n\ntype OverheadMemoryManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m OverheadMemoryManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"OverheadMemoryManager\"] = reflect.TypeOf((*OverheadMemoryManager)(nil)).Elem()\n}\n\ntype OvfManager struct {\n\tSelf types.ManagedObjectReference\n\n\tOvfImportOption []types.OvfOptionInfo `mo:\"ovfImportOption\"`\n\tOvfExportOption []types.OvfOptionInfo `mo:\"ovfExportOption\"`\n}\n\nfunc (m OvfManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"OvfManager\"] = reflect.TypeOf((*OvfManager)(nil)).Elem()\n}\n\ntype PerformanceManager struct {\n\tSelf types.ManagedObjectReference\n\n\tDescription        types.PerformanceDescription `mo:\"description\"`\n\tHistoricalInterval []types.PerfInterval         `mo:\"historicalInterval\"`\n\tPerfCounter        []types.PerfCounterInfo      `mo:\"perfCounter\"`\n}\n\nfunc (m PerformanceManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"PerformanceManager\"] = reflect.TypeOf((*PerformanceManager)(nil)).Elem()\n}\n\ntype Profile struct {\n\tSelf types.ManagedObjectReference\n\n\tConfig           types.BaseProfileConfigInfo    `mo:\"config\"`\n\tDescription      *types.ProfileDescription      `mo:\"description\"`\n\tName             string                         `mo:\"name\"`\n\tCreatedTime      time.Time                      `mo:\"createdTime\"`\n\tModifiedTime     time.Time                      `mo:\"modifiedTime\"`\n\tEntity           []types.ManagedObjectReference `mo:\"entity\"`\n\tComplianceStatus string                         `mo:\"complianceStatus\"`\n}\n\nfunc (m Profile) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"Profile\"] = reflect.TypeOf((*Profile)(nil)).Elem()\n}\n\ntype ProfileComplianceManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m ProfileComplianceManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"ProfileComplianceManager\"] = reflect.TypeOf((*ProfileComplianceManager)(nil)).Elem()\n}\n\ntype ProfileManager struct {\n\tSelf types.ManagedObjectReference\n\n\tProfile []types.ManagedObjectReference `mo:\"profile\"`\n}\n\nfunc (m ProfileManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"ProfileManager\"] = reflect.TypeOf((*ProfileManager)(nil)).Elem()\n}\n\ntype PropertyCollector struct {\n\tSelf types.ManagedObjectReference\n\n\tFilter []types.ManagedObjectReference `mo:\"filter\"`\n}\n\nfunc (m PropertyCollector) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"PropertyCollector\"] = reflect.TypeOf((*PropertyCollector)(nil)).Elem()\n}\n\ntype PropertyFilter struct {\n\tSelf types.ManagedObjectReference\n\n\tSpec           types.PropertyFilterSpec `mo:\"spec\"`\n\tPartialUpdates bool                     `mo:\"partialUpdates\"`\n}\n\nfunc (m PropertyFilter) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"PropertyFilter\"] = reflect.TypeOf((*PropertyFilter)(nil)).Elem()\n}\n\ntype ResourcePlanningManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m ResourcePlanningManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"ResourcePlanningManager\"] = reflect.TypeOf((*ResourcePlanningManager)(nil)).Elem()\n}\n\ntype ResourcePool struct {\n\tManagedEntity\n\n\tSummary            types.BaseResourcePoolSummary  `mo:\"summary\"`\n\tRuntime            types.ResourcePoolRuntimeInfo  `mo:\"runtime\"`\n\tOwner              types.ManagedObjectReference   `mo:\"owner\"`\n\tResourcePool       []types.ManagedObjectReference `mo:\"resourcePool\"`\n\tVm                 []types.ManagedObjectReference `mo:\"vm\"`\n\tConfig             types.ResourceConfigSpec       `mo:\"config\"`\n\tChildConfiguration []types.ResourceConfigSpec     `mo:\"childConfiguration\"`\n}\n\nfunc (m *ResourcePool) Entity() *ManagedEntity {\n\treturn &m.ManagedEntity\n}\n\nfunc init() {\n\tt[\"ResourcePool\"] = reflect.TypeOf((*ResourcePool)(nil)).Elem()\n}\n\ntype ScheduledTask struct {\n\tExtensibleManagedObject\n\n\tInfo types.ScheduledTaskInfo `mo:\"info\"`\n}\n\nfunc init() {\n\tt[\"ScheduledTask\"] = reflect.TypeOf((*ScheduledTask)(nil)).Elem()\n}\n\ntype ScheduledTaskManager struct {\n\tSelf types.ManagedObjectReference\n\n\tScheduledTask []types.ManagedObjectReference `mo:\"scheduledTask\"`\n\tDescription   types.ScheduledTaskDescription `mo:\"description\"`\n}\n\nfunc (m ScheduledTaskManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"ScheduledTaskManager\"] = reflect.TypeOf((*ScheduledTaskManager)(nil)).Elem()\n}\n\ntype SearchIndex struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m SearchIndex) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"SearchIndex\"] = reflect.TypeOf((*SearchIndex)(nil)).Elem()\n}\n\ntype ServiceInstance struct {\n\tSelf types.ManagedObjectReference\n\n\tServerClock time.Time            `mo:\"serverClock\"`\n\tCapability  types.Capability     `mo:\"capability\"`\n\tContent     types.ServiceContent `mo:\"content\"`\n}\n\nfunc (m ServiceInstance) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"ServiceInstance\"] = reflect.TypeOf((*ServiceInstance)(nil)).Elem()\n}\n\ntype ServiceManager struct {\n\tSelf types.ManagedObjectReference\n\n\tService []types.ServiceManagerServiceInfo `mo:\"service\"`\n}\n\nfunc (m ServiceManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"ServiceManager\"] = reflect.TypeOf((*ServiceManager)(nil)).Elem()\n}\n\ntype SessionManager struct {\n\tSelf types.ManagedObjectReference\n\n\tSessionList         []types.UserSession `mo:\"sessionList\"`\n\tCurrentSession      *types.UserSession  `mo:\"currentSession\"`\n\tMessage             *string             `mo:\"message\"`\n\tMessageLocaleList   []string            `mo:\"messageLocaleList\"`\n\tSupportedLocaleList []string            `mo:\"supportedLocaleList\"`\n\tDefaultLocale       string              `mo:\"defaultLocale\"`\n}\n\nfunc (m SessionManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"SessionManager\"] = reflect.TypeOf((*SessionManager)(nil)).Elem()\n}\n\ntype SimpleCommand struct {\n\tSelf types.ManagedObjectReference\n\n\tEncodingType types.SimpleCommandEncoding     `mo:\"encodingType\"`\n\tEntity       types.ServiceManagerServiceInfo `mo:\"entity\"`\n}\n\nfunc (m SimpleCommand) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"SimpleCommand\"] = reflect.TypeOf((*SimpleCommand)(nil)).Elem()\n}\n\ntype StoragePod struct {\n\tFolder\n\n\tSummary            *types.StoragePodSummary  `mo:\"summary\"`\n\tPodStorageDrsEntry *types.PodStorageDrsEntry `mo:\"podStorageDrsEntry\"`\n}\n\nfunc init() {\n\tt[\"StoragePod\"] = reflect.TypeOf((*StoragePod)(nil)).Elem()\n}\n\ntype StorageResourceManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m StorageResourceManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"StorageResourceManager\"] = reflect.TypeOf((*StorageResourceManager)(nil)).Elem()\n}\n\ntype Task struct {\n\tExtensibleManagedObject\n\n\tInfo types.TaskInfo `mo:\"info\"`\n}\n\nfunc init() {\n\tt[\"Task\"] = reflect.TypeOf((*Task)(nil)).Elem()\n}\n\ntype TaskHistoryCollector struct {\n\tHistoryCollector\n\n\tLatestPage []types.TaskInfo `mo:\"latestPage\"`\n}\n\nfunc init() {\n\tt[\"TaskHistoryCollector\"] = reflect.TypeOf((*TaskHistoryCollector)(nil)).Elem()\n}\n\ntype TaskManager struct {\n\tSelf types.ManagedObjectReference\n\n\tRecentTask   []types.ManagedObjectReference `mo:\"recentTask\"`\n\tDescription  types.TaskDescription          `mo:\"description\"`\n\tMaxCollector int32                          `mo:\"maxCollector\"`\n}\n\nfunc (m TaskManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"TaskManager\"] = reflect.TypeOf((*TaskManager)(nil)).Elem()\n}\n\ntype UserDirectory struct {\n\tSelf types.ManagedObjectReference\n\n\tDomainList []string `mo:\"domainList\"`\n}\n\nfunc (m UserDirectory) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"UserDirectory\"] = reflect.TypeOf((*UserDirectory)(nil)).Elem()\n}\n\ntype VStorageObjectManagerBase struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m VStorageObjectManagerBase) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"VStorageObjectManagerBase\"] = reflect.TypeOf((*VStorageObjectManagerBase)(nil)).Elem()\n}\n\ntype VcenterVStorageObjectManager struct {\n\tVStorageObjectManagerBase\n}\n\nfunc init() {\n\tt[\"VcenterVStorageObjectManager\"] = reflect.TypeOf((*VcenterVStorageObjectManager)(nil)).Elem()\n}\n\ntype View struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m View) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"View\"] = reflect.TypeOf((*View)(nil)).Elem()\n}\n\ntype ViewManager struct {\n\tSelf types.ManagedObjectReference\n\n\tViewList []types.ManagedObjectReference `mo:\"viewList\"`\n}\n\nfunc (m ViewManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"ViewManager\"] = reflect.TypeOf((*ViewManager)(nil)).Elem()\n}\n\ntype VirtualApp struct {\n\tResourcePool\n\n\tParentFolder *types.ManagedObjectReference  `mo:\"parentFolder\"`\n\tDatastore    []types.ManagedObjectReference `mo:\"datastore\"`\n\tNetwork      []types.ManagedObjectReference `mo:\"network\"`\n\tVAppConfig   *types.VAppConfigInfo          `mo:\"vAppConfig\"`\n\tParentVApp   *types.ManagedObjectReference  `mo:\"parentVApp\"`\n\tChildLink    []types.VirtualAppLinkInfo     `mo:\"childLink\"`\n}\n\nfunc init() {\n\tt[\"VirtualApp\"] = reflect.TypeOf((*VirtualApp)(nil)).Elem()\n}\n\ntype VirtualDiskManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m VirtualDiskManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"VirtualDiskManager\"] = reflect.TypeOf((*VirtualDiskManager)(nil)).Elem()\n}\n\ntype VirtualMachine struct {\n\tManagedEntity\n\n\tCapability           types.VirtualMachineCapability    `mo:\"capability\"`\n\tConfig               *types.VirtualMachineConfigInfo   `mo:\"config\"`\n\tLayout               *types.VirtualMachineFileLayout   `mo:\"layout\"`\n\tLayoutEx             *types.VirtualMachineFileLayoutEx `mo:\"layoutEx\"`\n\tStorage              *types.VirtualMachineStorageInfo  `mo:\"storage\"`\n\tEnvironmentBrowser   types.ManagedObjectReference      `mo:\"environmentBrowser\"`\n\tResourcePool         *types.ManagedObjectReference     `mo:\"resourcePool\"`\n\tParentVApp           *types.ManagedObjectReference     `mo:\"parentVApp\"`\n\tResourceConfig       *types.ResourceConfigSpec         `mo:\"resourceConfig\"`\n\tRuntime              types.VirtualMachineRuntimeInfo   `mo:\"runtime\"`\n\tGuest                *types.GuestInfo                  `mo:\"guest\"`\n\tSummary              types.VirtualMachineSummary       `mo:\"summary\"`\n\tDatastore            []types.ManagedObjectReference    `mo:\"datastore\"`\n\tNetwork              []types.ManagedObjectReference    `mo:\"network\"`\n\tSnapshot             *types.VirtualMachineSnapshotInfo `mo:\"snapshot\"`\n\tRootSnapshot         []types.ManagedObjectReference    `mo:\"rootSnapshot\"`\n\tGuestHeartbeatStatus types.ManagedEntityStatus         `mo:\"guestHeartbeatStatus\"`\n}\n\nfunc (m *VirtualMachine) Entity() *ManagedEntity {\n\treturn &m.ManagedEntity\n}\n\nfunc init() {\n\tt[\"VirtualMachine\"] = reflect.TypeOf((*VirtualMachine)(nil)).Elem()\n}\n\ntype VirtualMachineCompatibilityChecker struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m VirtualMachineCompatibilityChecker) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"VirtualMachineCompatibilityChecker\"] = reflect.TypeOf((*VirtualMachineCompatibilityChecker)(nil)).Elem()\n}\n\ntype VirtualMachineProvisioningChecker struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m VirtualMachineProvisioningChecker) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"VirtualMachineProvisioningChecker\"] = reflect.TypeOf((*VirtualMachineProvisioningChecker)(nil)).Elem()\n}\n\ntype VirtualMachineSnapshot struct {\n\tExtensibleManagedObject\n\n\tConfig        types.VirtualMachineConfigInfo `mo:\"config\"`\n\tChildSnapshot []types.ManagedObjectReference `mo:\"childSnapshot\"`\n\tVm            types.ManagedObjectReference   `mo:\"vm\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineSnapshot\"] = reflect.TypeOf((*VirtualMachineSnapshot)(nil)).Elem()\n}\n\ntype VirtualizationManager struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m VirtualizationManager) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"VirtualizationManager\"] = reflect.TypeOf((*VirtualizationManager)(nil)).Elem()\n}\n\ntype VmwareDistributedVirtualSwitch struct {\n\tDistributedVirtualSwitch\n}\n\nfunc init() {\n\tt[\"VmwareDistributedVirtualSwitch\"] = reflect.TypeOf((*VmwareDistributedVirtualSwitch)(nil)).Elem()\n}\n\ntype VsanUpgradeSystem struct {\n\tSelf types.ManagedObjectReference\n}\n\nfunc (m VsanUpgradeSystem) Reference() types.ManagedObjectReference {\n\treturn m.Self\n}\n\nfunc init() {\n\tt[\"VsanUpgradeSystem\"] = reflect.TypeOf((*VsanUpgradeSystem)(nil)).Elem()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/mo/reference.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage mo\n\nimport \"github.com/vmware/govmomi/vim25/types\"\n\n// Reference is the interface that is implemented by all the managed objects\n// defined in this package. It specifies that these managed objects have a\n// function that returns the managed object reference to themselves.\ntype Reference interface {\n\tReference() types.ManagedObjectReference\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/mo/registry.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage mo\n\nimport \"reflect\"\n\nvar t = map[string]reflect.Type{}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage mo\n\nimport (\n\t\"context\"\n\t\"reflect\"\n\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\nfunc ignoreMissingProperty(ref types.ManagedObjectReference, p types.MissingProperty) bool {\n\tswitch ref.Type {\n\tcase \"VirtualMachine\":\n\t\tswitch p.Path {\n\t\tcase \"environmentBrowser\":\n\t\t\t// See https://github.com/vmware/govmomi/pull/242\n\t\t\treturn true\n\t\tcase \"alarmActionsEnabled\":\n\t\t\t// Seen with vApp child VM\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n// ObjectContentToType loads an ObjectContent value into the value it\n// represents. If the ObjectContent value has a non-empty 'MissingSet' field,\n// it returns the first fault it finds there as error. If the 'MissingSet'\n// field is empty, it returns a pointer to a reflect.Value. It handles contain\n// nested properties, such as 'guest.ipAddress' or 'config.hardware'.\nfunc ObjectContentToType(o types.ObjectContent) (interface{}, error) {\n\t// Expect no properties in the missing set\n\tfor _, p := range o.MissingSet {\n\t\tif ignoreMissingProperty(o.Obj, p) {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn nil, soap.WrapVimFault(p.Fault.Fault)\n\t}\n\n\tti := typeInfoForType(o.Obj.Type)\n\tv, err := ti.LoadFromObjectContent(o)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn v.Elem().Interface(), nil\n}\n\n// LoadRetrievePropertiesResponse converts the response of a call to\n// RetrieveProperties to one or more managed objects.\nfunc LoadRetrievePropertiesResponse(res *types.RetrievePropertiesResponse, dst interface{}) error {\n\trt := reflect.TypeOf(dst)\n\tif rt == nil || rt.Kind() != reflect.Ptr {\n\t\tpanic(\"need pointer\")\n\t}\n\n\trv := reflect.ValueOf(dst).Elem()\n\tif !rv.CanSet() {\n\t\tpanic(\"cannot set dst\")\n\t}\n\n\tisSlice := false\n\tswitch rt.Elem().Kind() {\n\tcase reflect.Struct:\n\tcase reflect.Slice:\n\t\tisSlice = true\n\tdefault:\n\t\tpanic(\"unexpected type\")\n\t}\n\n\tif isSlice {\n\t\tfor _, p := range res.Returnval {\n\t\t\tv, err := ObjectContentToType(p)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tvt := reflect.TypeOf(v)\n\n\t\t\tif !rv.Type().AssignableTo(vt) {\n\t\t\t\t// For example: dst is []ManagedEntity, res is []HostSystem\n\t\t\t\tif field, ok := vt.FieldByName(rt.Elem().Elem().Name()); ok && field.Anonymous {\n\t\t\t\t\trv.Set(reflect.Append(rv, reflect.ValueOf(v).FieldByIndex(field.Index)))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\trv.Set(reflect.Append(rv, reflect.ValueOf(v)))\n\t\t}\n\t} else {\n\t\tswitch len(res.Returnval) {\n\t\tcase 0:\n\t\tcase 1:\n\t\t\tv, err := ObjectContentToType(res.Returnval[0])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tvt := reflect.TypeOf(v)\n\n\t\t\tif !rv.Type().AssignableTo(vt) {\n\t\t\t\t// For example: dst is ComputeResource, res is ClusterComputeResource\n\t\t\t\tif field, ok := vt.FieldByName(rt.Elem().Name()); ok && field.Anonymous {\n\t\t\t\t\trv.Set(reflect.ValueOf(v).FieldByIndex(field.Index))\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\trv.Set(reflect.ValueOf(v))\n\t\tdefault:\n\t\t\t// If dst is not a slice, expect to receive 0 or 1 results\n\t\t\tpanic(\"more than 1 result\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// RetrievePropertiesForRequest calls the RetrieveProperties method with the\n// specified request and decodes the response struct into the value pointed to\n// by dst.\nfunc RetrievePropertiesForRequest(ctx context.Context, r soap.RoundTripper, req types.RetrieveProperties, dst interface{}) error {\n\tres, err := methods.RetrieveProperties(ctx, r, &req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn LoadRetrievePropertiesResponse(res, dst)\n}\n\n// RetrieveProperties retrieves the properties of the managed object specified\n// as obj and decodes the response struct into the value pointed to by dst.\nfunc RetrieveProperties(ctx context.Context, r soap.RoundTripper, pc, obj types.ManagedObjectReference, dst interface{}) error {\n\treq := types.RetrieveProperties{\n\t\tThis: pc,\n\t\tSpecSet: []types.PropertyFilterSpec{\n\t\t\t{\n\t\t\t\tObjectSet: []types.ObjectSpec{\n\t\t\t\t\t{\n\t\t\t\t\t\tObj:  obj,\n\t\t\t\t\t\tSkip: types.NewBool(false),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tPropSet: []types.PropertySpec{\n\t\t\t\t\t{\n\t\t\t\t\t\tAll:  types.NewBool(true),\n\t\t\t\t\t\tType: obj.Type,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn RetrievePropertiesForRequest(ctx, r, req, dst)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/mo/retrieve_test.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage mo\n\nimport (\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/vmware/govmomi/vim25/soap\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n\t\"github.com/vmware/govmomi/vim25/xml\"\n)\n\nfunc load(name string) *types.RetrievePropertiesResponse {\n\tf, err := os.Open(name)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer f.Close()\n\n\tvar b types.RetrievePropertiesResponse\n\n\tdec := xml.NewDecoder(f)\n\tdec.TypeFunc = types.TypeFunc()\n\tif err := dec.Decode(&b); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn &b\n}\n\nfunc TestNotAuthenticatedFault(t *testing.T) {\n\tvar s SessionManager\n\n\terr := LoadRetrievePropertiesResponse(load(\"fixtures/not_authenticated_fault.xml\"), &s)\n\tif !soap.IsVimFault(err) {\n\t\tt.Errorf(\"Expected IsVimFault\")\n\t}\n\n\tfault := soap.ToVimFault(err).(*types.NotAuthenticated)\n\tif fault.PrivilegeId != \"System.View\" {\n\t\tt.Errorf(\"Expected first fault to be returned\")\n\t}\n}\n\nfunc TestNestedProperty(t *testing.T) {\n\tvar vm VirtualMachine\n\n\terr := LoadRetrievePropertiesResponse(load(\"fixtures/nested_property.xml\"), &vm)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected no error, got: %s\", err)\n\t}\n\n\tself := types.ManagedObjectReference{\n\t\tType:  \"VirtualMachine\",\n\t\tValue: \"vm-411\",\n\t}\n\n\tif vm.Self != self {\n\t\tt.Fatalf(\"Expected vm.Self to be set\")\n\t}\n\n\tif vm.Config == nil {\n\t\tt.Fatalf(\"Expected vm.Config to be set\")\n\t}\n\n\tif vm.Config.Name != \"kubernetes-master\" {\n\t\tt.Errorf(\"Got: %s\", vm.Config.Name)\n\t}\n\n\tif vm.Config.Uuid != \"422ec880-ab06-06b4-23f3-beb7a052a4c9\" {\n\t\tt.Errorf(\"Got: %s\", vm.Config.Uuid)\n\t}\n}\n\nfunc TestPointerProperty(t *testing.T) {\n\tvar vm VirtualMachine\n\n\terr := LoadRetrievePropertiesResponse(load(\"fixtures/pointer_property.xml\"), &vm)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected no error, got: %s\", err)\n\t}\n\n\tif vm.Config == nil {\n\t\tt.Fatalf(\"Expected vm.Config to be set\")\n\t}\n\n\tif vm.Config.BootOptions == nil {\n\t\tt.Fatalf(\"Expected vm.Config.BootOptions to be set\")\n\t}\n}\n\nfunc TestEmbeddedTypeProperty(t *testing.T) {\n\t// Test that we avoid in this case:\n\t// panic: reflect.Set: value of type mo.ClusterComputeResource is not assignable to type mo.ComputeResource\n\tvar cr ComputeResource\n\n\terr := LoadRetrievePropertiesResponse(load(\"fixtures/cluster_host_property.xml\"), &cr)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected no error, got: %s\", err)\n\t}\n\n\tif len(cr.Host) != 4 {\n\t\tt.Fatalf(\"Expected cr.Host to be set\")\n\t}\n}\n\nfunc TestEmbeddedTypePropertySlice(t *testing.T) {\n\tvar me []ManagedEntity\n\n\terr := LoadRetrievePropertiesResponse(load(\"fixtures/hostsystem_list_name_property.xml\"), &me)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected no error, got: %s\", err)\n\t}\n\n\tif len(me) != 2 {\n\t\tt.Fatalf(\"Expected 2 elements\")\n\t}\n\n\tfor _, m := range me {\n\t\tif m.Name == \"\" {\n\t\t\tt.Fatal(\"Expected Name field to be set\")\n\t\t}\n\t}\n\n\tif me[0].Name == me[1].Name {\n\t\tt.Fatal(\"Name fields should not be the same\")\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/mo/type_info.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage mo\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype typeInfo struct {\n\ttyp reflect.Type\n\n\t// Field indices of \"Self\" field.\n\tself []int\n\n\t// Map property names to field indices.\n\tprops map[string][]int\n}\n\nvar typeInfoLock sync.RWMutex\nvar typeInfoMap = make(map[string]*typeInfo)\n\nfunc typeInfoForType(tname string) *typeInfo {\n\ttypeInfoLock.RLock()\n\tti, ok := typeInfoMap[tname]\n\ttypeInfoLock.RUnlock()\n\n\tif ok {\n\t\treturn ti\n\t}\n\n\t// Create new typeInfo for type.\n\tif typ, ok := t[tname]; !ok {\n\t\tpanic(\"unknown type: \" + tname)\n\t} else {\n\t\t// Multiple routines may race to set it, but the result is the same.\n\t\ttypeInfoLock.Lock()\n\t\tti = newTypeInfo(typ)\n\t\ttypeInfoMap[tname] = ti\n\t\ttypeInfoLock.Unlock()\n\t}\n\n\treturn ti\n}\n\nfunc newTypeInfo(typ reflect.Type) *typeInfo {\n\tt := typeInfo{\n\t\ttyp:   typ,\n\t\tprops: make(map[string][]int),\n\t}\n\n\tt.build(typ, \"\", []int{})\n\n\treturn &t\n}\n\nvar managedObjectRefType = reflect.TypeOf((*types.ManagedObjectReference)(nil)).Elem()\n\nfunc buildName(fn string, f reflect.StructField) string {\n\tif fn != \"\" {\n\t\tfn += \".\"\n\t}\n\n\tmotag := f.Tag.Get(\"mo\")\n\tif motag != \"\" {\n\t\treturn fn + motag\n\t}\n\n\txmltag := f.Tag.Get(\"xml\")\n\tif xmltag != \"\" {\n\t\ttokens := strings.Split(xmltag, \",\")\n\t\tif tokens[0] != \"\" {\n\t\t\treturn fn + tokens[0]\n\t\t}\n\t}\n\n\treturn \"\"\n}\n\nfunc (t *typeInfo) build(typ reflect.Type, fn string, fi []int) {\n\tif typ.Kind() == reflect.Ptr {\n\t\ttyp = typ.Elem()\n\t}\n\n\tif typ.Kind() != reflect.Struct {\n\t\tpanic(\"need struct\")\n\t}\n\n\tfor i := 0; i < typ.NumField(); i++ {\n\t\tf := typ.Field(i)\n\t\tftyp := f.Type\n\n\t\t// Copy field indices so they can be passed along.\n\t\tfic := make([]int, len(fi)+1)\n\t\tcopy(fic, fi)\n\t\tfic[len(fi)] = i\n\n\t\t// Recurse into embedded field.\n\t\tif f.Anonymous {\n\t\t\tt.build(ftyp, fn, fic)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Top level type has a \"Self\" field.\n\t\tif f.Name == \"Self\" && ftyp == managedObjectRefType {\n\t\t\tt.self = fic\n\t\t\tcontinue\n\t\t}\n\n\t\tfnc := buildName(fn, f)\n\t\tif fnc == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tt.props[fnc] = fic\n\n\t\t// Dereference pointer.\n\t\tif ftyp.Kind() == reflect.Ptr {\n\t\t\tftyp = ftyp.Elem()\n\t\t}\n\n\t\t// Slices are not addressable by `foo.bar.qux`.\n\t\tif ftyp.Kind() == reflect.Slice {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Skip the managed reference type.\n\t\tif ftyp == managedObjectRefType {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Recurse into structs.\n\t\tif ftyp.Kind() == reflect.Struct {\n\t\t\tt.build(ftyp, fnc, fic)\n\t\t}\n\t}\n}\n\n// assignValue assignes a value 'pv' to the struct pointed to by 'val', given a\n// slice of field indices. It recurses into the struct until it finds the field\n// specified by the indices. It creates new values for pointer types where\n// needed.\nfunc assignValue(val reflect.Value, fi []int, pv reflect.Value) {\n\t// Create new value if necessary.\n\tif val.Kind() == reflect.Ptr {\n\t\tif val.IsNil() {\n\t\t\tval.Set(reflect.New(val.Type().Elem()))\n\t\t}\n\n\t\tval = val.Elem()\n\t}\n\n\trv := val.Field(fi[0])\n\tfi = fi[1:]\n\tif len(fi) == 0 {\n\t\trt := rv.Type()\n\t\tpt := pv.Type()\n\n\t\t// If type is a pointer, create new instance of type.\n\t\tif rt.Kind() == reflect.Ptr {\n\t\t\trv.Set(reflect.New(rt.Elem()))\n\t\t\trv = rv.Elem()\n\t\t\trt = rv.Type()\n\t\t}\n\n\t\t// If type is an interface, check if pv implements it.\n\t\tif rt.Kind() == reflect.Interface && !pt.Implements(rt) {\n\t\t\t// Check if pointer to pv implements it.\n\t\t\tif reflect.PtrTo(pt).Implements(rt) {\n\t\t\t\tnpv := reflect.New(pt)\n\t\t\t\tnpv.Elem().Set(pv)\n\t\t\t\tpv = npv\n\t\t\t\tpt = pv.Type()\n\t\t\t} else {\n\t\t\t\tpanic(fmt.Sprintf(\"type %s doesn't implement %s\", pt.Name(), rt.Name()))\n\t\t\t}\n\t\t}\n\n\t\tif pt.AssignableTo(rt) {\n\t\t\trv.Set(pv)\n\t\t} else if rt.ConvertibleTo(pt) {\n\t\t\trv.Set(pv.Convert(rt))\n\t\t} else {\n\t\t\tpanic(fmt.Sprintf(\"cannot assign %s (%s) to %s (%s)\", rt.Name(), rt.Kind(), pt.Name(), pt.Kind()))\n\t\t}\n\n\t\treturn\n\t}\n\n\tassignValue(rv, fi, pv)\n}\n\nvar arrayOfRegexp = regexp.MustCompile(\"ArrayOf(.*)$\")\n\nfunc anyTypeToValue(t interface{}) reflect.Value {\n\trt := reflect.TypeOf(t)\n\trv := reflect.ValueOf(t)\n\n\t// Dereference if ArrayOfXYZ type\n\tm := arrayOfRegexp.FindStringSubmatch(rt.Name())\n\tif len(m) > 0 {\n\t\t// ArrayOfXYZ type has single field named XYZ\n\t\trv = rv.FieldByName(m[1])\n\t\tif !rv.IsValid() {\n\t\t\tpanic(fmt.Sprintf(\"expected %s type to have field %s\", m[0], m[1]))\n\t\t}\n\t}\n\n\treturn rv\n}\n\n// LoadObjectFromContent loads properties from the 'PropSet' field in the\n// specified ObjectContent value into the value it represents, which is\n// returned as a reflect.Value.\nfunc (t *typeInfo) LoadFromObjectContent(o types.ObjectContent) (reflect.Value, error) {\n\tv := reflect.New(t.typ)\n\tassignValue(v, t.self, reflect.ValueOf(o.Obj))\n\n\tfor _, p := range o.PropSet {\n\t\trv, ok := t.props[p.Name]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tassignValue(v, rv, anyTypeToValue(p.Val))\n\t}\n\n\treturn v, nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/mo/type_info_test.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage mo\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestLoadAll(*testing.T) {\n\tfor _, typ := range t {\n\t\tnewTypeInfo(typ)\n\t}\n}\n\n// The virtual machine managed object has about 500 nested properties.\n// It's likely to be indicative of the function's performance in general.\nfunc BenchmarkLoadVirtualMachine(b *testing.B) {\n\tvmtyp := reflect.TypeOf((*VirtualMachine)(nil)).Elem()\n\tfor i := 0; i < b.N; i++ {\n\t\tnewTypeInfo(vmtyp)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/progress/aggregator.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage progress\n\nimport \"sync\"\n\ntype Aggregator struct {\n\tdownstream Sinker\n\tupstream   chan (<-chan Report)\n\n\tdone chan struct{}\n\tw    sync.WaitGroup\n}\n\nfunc NewAggregator(s Sinker) *Aggregator {\n\ta := &Aggregator{\n\t\tdownstream: s,\n\t\tupstream:   make(chan (<-chan Report)),\n\n\t\tdone: make(chan struct{}),\n\t}\n\n\ta.w.Add(1)\n\tgo a.loop()\n\n\treturn a\n}\n\nfunc (a *Aggregator) loop() {\n\tdefer a.w.Done()\n\n\tdch := a.downstream.Sink()\n\tdefer close(dch)\n\n\tfor {\n\t\tselect {\n\t\tcase uch := <-a.upstream:\n\t\t\t// Drain upstream channel\n\t\t\tfor e := range uch {\n\t\t\t\tdch <- e\n\t\t\t}\n\t\tcase <-a.done:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (a *Aggregator) Sink() chan<- Report {\n\tch := make(chan Report)\n\ta.upstream <- ch\n\treturn ch\n}\n\n// Done marks the aggregator as done. No more calls to Sink() may be made and\n// the downstream progress report channel will be closed when Done() returns.\nfunc (a *Aggregator) Done() {\n\tclose(a.done)\n\ta.w.Wait()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/progress/aggregator_test.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage progress\n\nimport (\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestAggregatorNoSinks(t *testing.T) {\n\tch := make(chan Report)\n\ta := NewAggregator(dummySinker{ch})\n\ta.Done()\n\n\t_, ok := <-ch\n\tif ok {\n\t\tt.Errorf(\"Expected channel to be closed\")\n\t}\n}\n\nfunc TestAggregatorMultipleSinks(t *testing.T) {\n\tch := make(chan Report)\n\ta := NewAggregator(dummySinker{ch})\n\n\tfor i := 0; i < 5; i++ {\n\t\tgo func(ch chan<- Report) {\n\t\t\tch <- dummyReport{}\n\t\t\tch <- dummyReport{}\n\t\t\tclose(ch)\n\t\t}(a.Sink())\n\n\t\t<-ch\n\t\t<-ch\n\t}\n\n\ta.Done()\n\n\t_, ok := <-ch\n\tif ok {\n\t\tt.Errorf(\"Expected channel to be closed\")\n\t}\n}\n\nfunc TestAggregatorSinkInFlightOnDone(t *testing.T) {\n\tch := make(chan Report)\n\ta := NewAggregator(dummySinker{ch})\n\n\t// Simulate upstream\n\tgo func(ch chan<- Report) {\n\t\ttime.Sleep(1 * time.Millisecond)\n\t\tch <- dummyReport{}\n\t\tclose(ch)\n\t}(a.Sink())\n\n\t// Drain downstream\n\tgo func(ch <-chan Report) {\n\t\t<-ch\n\t}(ch)\n\n\t// This should wait for upstream to complete\n\ta.Done()\n\n\t_, ok := <-ch\n\tif ok {\n\t\tt.Errorf(\"Expected channel to be closed\")\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/progress/common_test.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage progress\n\ntype dummySinker struct {\n\tch chan Report\n}\n\nfunc (d dummySinker) Sink() chan<- Report {\n\treturn d.ch\n}\n\ntype dummyReport struct {\n\tp float32\n\td string\n\te error\n}\n\nfunc (p dummyReport) Percentage() float32 {\n\treturn p.p\n}\n\nfunc (p dummyReport) Detail() string {\n\treturn p.d\n}\n\nfunc (p dummyReport) Error() error {\n\treturn p.e\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/progress/doc.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage progress\n\n/*\nThe progress package contains functionality to deal with progress reporting.\nThe functionality is built to serve progress reporting for infrastructure\noperations when talking the vSphere API, but is generic enough to be used\nelsewhere.\n\nAt the core of this progress reporting API lies the Sinker interface. This\ninterface is implemented by any object that can act as a sink for progress\nreports. Callers of the Sink() function receives a send-only channel for\nprogress reports. They are responsible for closing the channel when done.\nThis semantic makes it easy to keep track of multiple progress report channels;\nthey are only created when Sink() is called and assumed closed when any\nfunction that receives a Sinker parameter returns.\n*/\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/progress/prefix.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage progress\n\nimport \"fmt\"\n\ntype prefixedReport struct {\n\tReport\n\tprefix string\n}\n\nfunc (r prefixedReport) Detail() string {\n\tif d := r.Report.Detail(); d != \"\" {\n\t\treturn fmt.Sprintf(\"%s: %s\", r.prefix, d)\n\t}\n\n\treturn r.prefix\n}\n\nfunc prefixLoop(upstream <-chan Report, downstream chan<- Report, prefix string) {\n\tdefer close(downstream)\n\n\tfor r := range upstream {\n\t\tdownstream <- prefixedReport{\n\t\t\tReport: r,\n\t\t\tprefix: prefix,\n\t\t}\n\t}\n}\n\nfunc Prefix(s Sinker, prefix string) Sinker {\n\tfn := func() chan<- Report {\n\t\tupstream := make(chan Report)\n\t\tdownstream := s.Sink()\n\t\tgo prefixLoop(upstream, downstream, prefix)\n\t\treturn upstream\n\t}\n\n\treturn SinkFunc(fn)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/progress/prefix_test.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage progress\n\nimport \"testing\"\n\nfunc TestPrefix(t *testing.T) {\n\tvar r Report\n\n\tch := make(chan Report, 1)\n\ts := Prefix(dummySinker{ch}, \"prefix\").Sink()\n\n\t// No detail\n\ts <- dummyReport{d: \"\"}\n\tr = <-ch\n\tif r.Detail() != \"prefix\" {\n\t\tt.Errorf(\"Expected detail to be prefixed\")\n\t}\n\n\t// With detail\n\ts <- dummyReport{d: \"something\"}\n\tr = <-ch\n\tif r.Detail() != \"prefix: something\" {\n\t\tt.Errorf(\"Expected detail to be prefixed\")\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/progress/reader.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage progress\n\nimport (\n\t\"container/list\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync/atomic\"\n\t\"time\"\n)\n\ntype readerReport struct {\n\tt time.Time\n\n\tpos  int64\n\tsize int64\n\tbps  *uint64\n\n\terr error\n}\n\nfunc (r readerReport) Percentage() float32 {\n\treturn 100.0 * float32(r.pos) / float32(r.size)\n}\n\nfunc (r readerReport) Detail() string {\n\tconst (\n\t\tKiB = 1024\n\t\tMiB = 1024 * KiB\n\t\tGiB = 1024 * MiB\n\t)\n\n\t// Use the reader's bps field, so this report returns an up-to-date number.\n\t//\n\t// For example: if there hasn't been progress for the last 5 seconds, the\n\t// most recent report should return \"0B/s\".\n\t//\n\tbps := atomic.LoadUint64(r.bps)\n\n\tswitch {\n\tcase bps >= GiB:\n\t\treturn fmt.Sprintf(\"%.1fGiB/s\", float32(bps)/float32(GiB))\n\tcase bps >= MiB:\n\t\treturn fmt.Sprintf(\"%.1fMiB/s\", float32(bps)/float32(MiB))\n\tcase bps >= KiB:\n\t\treturn fmt.Sprintf(\"%.1fKiB/s\", float32(bps)/float32(KiB))\n\tdefault:\n\t\treturn fmt.Sprintf(\"%dB/s\", bps)\n\t}\n}\n\nfunc (p readerReport) Error() error {\n\treturn p.err\n}\n\n// reader wraps an io.Reader and sends a progress report over a channel for\n// every read it handles.\ntype reader struct {\n\tr io.Reader\n\n\tpos  int64\n\tsize int64\n\n\tbps uint64\n\n\tch chan<- Report\n}\n\nfunc NewReader(s Sinker, r io.Reader, size int64) *reader {\n\tpr := reader{\n\t\tr: r,\n\n\t\tsize: size,\n\t}\n\n\t// Reports must be sent downstream and to the bps computation loop.\n\tpr.ch = Tee(s, newBpsLoop(&pr.bps)).Sink()\n\n\treturn &pr\n}\n\n// Read calls the Read function on the underlying io.Reader. Additionally,\n// every read causes a progress report to be sent to the progress reader's\n// underlying channel.\nfunc (r *reader) Read(b []byte) (int, error) {\n\tn, err := r.r.Read(b)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\tr.pos += int64(n)\n\tq := readerReport{\n\t\tt:    time.Now(),\n\t\tpos:  r.pos,\n\t\tsize: r.size,\n\t\tbps:  &r.bps,\n\t}\n\n\tr.ch <- q\n\n\treturn n, err\n}\n\n// Done marks the progress reader as done, optionally including an error in the\n// progress report. After sending it, the underlying channel is closed.\nfunc (r *reader) Done(err error) {\n\tq := readerReport{\n\t\tt:    time.Now(),\n\t\tpos:  r.pos,\n\t\tsize: r.size,\n\t\tbps:  &r.bps,\n\t\terr:  err,\n\t}\n\n\tr.ch <- q\n\tclose(r.ch)\n}\n\n// newBpsLoop returns a sink that monitors and stores throughput.\nfunc newBpsLoop(dst *uint64) SinkFunc {\n\tfn := func() chan<- Report {\n\t\tsink := make(chan Report)\n\t\tgo bpsLoop(sink, dst)\n\t\treturn sink\n\t}\n\n\treturn fn\n}\n\nfunc bpsLoop(ch <-chan Report, dst *uint64) {\n\tl := list.New()\n\n\tfor {\n\t\tvar tch <-chan time.Time\n\n\t\t// Setup timer for front of list to become stale.\n\t\tif e := l.Front(); e != nil {\n\t\t\tdt := time.Second - time.Now().Sub(e.Value.(readerReport).t)\n\t\t\ttch = time.After(dt)\n\t\t}\n\n\t\tselect {\n\t\tcase q, ok := <-ch:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tl.PushBack(q)\n\t\tcase <-tch:\n\t\t\tl.Remove(l.Front())\n\t\t}\n\n\t\t// Compute new bps\n\t\tif l.Len() == 0 {\n\t\t\tatomic.StoreUint64(dst, 0)\n\t\t} else {\n\t\t\tf := l.Front().Value.(readerReport)\n\t\t\tb := l.Back().Value.(readerReport)\n\t\t\tatomic.StoreUint64(dst, uint64(b.pos-f.pos))\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/progress/reader_test.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage progress\n\nimport (\n\t\"io\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestReader(t *testing.T) {\n\ts := \"helloworld\"\n\tch := make(chan Report, 1)\n\tpr := NewReader(&dummySinker{ch}, strings.NewReader(s), int64(len(s)))\n\n\tvar buf [10]byte\n\tvar q Report\n\tvar n int\n\tvar err error\n\n\t// Read first byte\n\tn, err = pr.Read(buf[0:1])\n\tif n != 1 {\n\t\tt.Errorf(\"Expected n=1, but got: %d\", n)\n\t}\n\n\tif err != nil {\n\t\tt.Errorf(\"Error: %s\", err)\n\t}\n\n\tq = <-ch\n\tif q.Error() != nil {\n\t\tt.Errorf(\"Error: %s\", err)\n\t}\n\n\tif f := q.Percentage(); f != 10.0 {\n\t\tt.Errorf(\"Expected percentage after 1 byte to be 10%%, but got: %.0f%%\", f)\n\t}\n\n\t// Read remaining bytes\n\tn, err = pr.Read(buf[:])\n\tif n != 9 {\n\t\tt.Errorf(\"Expected n=1, but got: %d\", n)\n\t}\n\tif err != nil {\n\t\tt.Errorf(\"Error: %s\", err)\n\t}\n\n\tq = <-ch\n\tif q.Error() != nil {\n\t\tt.Errorf(\"Error: %s\", err)\n\t}\n\n\tif f := q.Percentage(); f != 100.0 {\n\t\tt.Errorf(\"Expected percentage after 10 bytes to be 100%%, but got: %.0f%%\", f)\n\t}\n\n\t// Read EOF\n\t_, err = pr.Read(buf[:])\n\tif err != io.EOF {\n\t\tt.Errorf(\"Expected io.EOF, but got: %s\", err)\n\t}\n\n\t// Mark progress reader as done\n\tpr.Done(io.EOF)\n\tq = <-ch\n\tif err != io.EOF {\n\t\tt.Errorf(\"Expected io.EOF, but got: %s\", err)\n\t}\n\n\t// Progress channel should be closed after progress reader is marked done\n\t_, ok := <-ch\n\tif ok {\n\t\tt.Errorf(\"Expected channel to be closed\")\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/progress/report.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage progress\n\n// Report defines the interface for types that can deliver progress reports.\n// Examples include uploads/downloads in the http client and the task info\n// field in the task managed object.\ntype Report interface {\n\tPercentage() float32\n\tDetail() string\n\tError() error\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/progress/scale.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage progress\n\ntype scaledReport struct {\n\tReport\n\tn int\n\ti int\n}\n\nfunc (r scaledReport) Percentage() float32 {\n\tb := 100 * float32(r.i) / float32(r.n)\n\treturn b + (r.Report.Percentage() / float32(r.n))\n}\n\ntype scaleOne struct {\n\ts Sinker\n\tn int\n\ti int\n}\n\nfunc (s scaleOne) Sink() chan<- Report {\n\tupstream := make(chan Report)\n\tdownstream := s.s.Sink()\n\tgo s.loop(upstream, downstream)\n\treturn upstream\n}\n\nfunc (s scaleOne) loop(upstream <-chan Report, downstream chan<- Report) {\n\tdefer close(downstream)\n\n\tfor r := range upstream {\n\t\tdownstream <- scaledReport{\n\t\t\tReport: r,\n\t\t\tn:      s.n,\n\t\t\ti:      s.i,\n\t\t}\n\t}\n}\n\ntype scaleMany struct {\n\ts Sinker\n\tn int\n\ti int\n}\n\nfunc Scale(s Sinker, n int) Sinker {\n\treturn &scaleMany{\n\t\ts: s,\n\t\tn: n,\n\t}\n}\n\nfunc (s *scaleMany) Sink() chan<- Report {\n\tif s.i == s.n {\n\t\ts.n++\n\t}\n\n\tch := scaleOne{s: s.s, n: s.n, i: s.i}.Sink()\n\ts.i++\n\treturn ch\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/progress/scale_test.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage progress\n\nimport \"testing\"\n\nfunc TestScaleMany(t *testing.T) {\n\tch := make(chan Report)\n\ta := NewAggregator(dummySinker{ch})\n\tdefer a.Done()\n\n\ts := Scale(a, 5)\n\n\tgo func() {\n\t\tfor i := 0; i < 5; i++ {\n\t\t\tgo func(ch chan<- Report) {\n\t\t\t\tch <- dummyReport{p: 0.0}\n\t\t\t\tch <- dummyReport{p: 50.0}\n\t\t\t\tclose(ch)\n\t\t\t}(s.Sink())\n\t\t}\n\t}()\n\n\t// Expect percentages to be scaled across sinks\n\tfor p := float32(0.0); p < 100.0; p += 10.0 {\n\t\tr := <-ch\n\t\tif r.Percentage() != p {\n\t\t\tt.Errorf(\"Expected percentage to be: %.0f%%\", p)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/progress/sinker.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage progress\n\n// Sinker defines what is expected of a type that can act as a sink for\n// progress reports. The semantics are as follows. If you call Sink(), you are\n// responsible for closing the returned channel. Closing this channel means\n// that the related task is done, or resulted in error.\ntype Sinker interface {\n\tSink() chan<- Report\n}\n\n// SinkFunc defines a function that returns a progress report channel.\ntype SinkFunc func() chan<- Report\n\n// Sink makes the SinkFunc implement the Sinker interface.\nfunc (fn SinkFunc) Sink() chan<- Report {\n\treturn fn()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/progress/tee.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage progress\n\n// Tee works like Unix tee; it forwards all progress reports it receives to the\n// specified sinks\nfunc Tee(s1, s2 Sinker) Sinker {\n\tfn := func() chan<- Report {\n\t\td1 := s1.Sink()\n\t\td2 := s2.Sink()\n\t\tu := make(chan Report)\n\t\tgo tee(u, d1, d2)\n\t\treturn u\n\t}\n\n\treturn SinkFunc(fn)\n}\n\nfunc tee(u <-chan Report, d1, d2 chan<- Report) {\n\tdefer close(d1)\n\tdefer close(d2)\n\n\tfor r := range u {\n\t\td1 <- r\n\t\td2 <- r\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/progress/tee_test.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage progress\n\nimport \"testing\"\n\nfunc TestTee(t *testing.T) {\n\tvar ok bool\n\n\tch1 := make(chan Report)\n\tch2 := make(chan Report)\n\n\ts := Tee(&dummySinker{ch: ch1}, &dummySinker{ch: ch2})\n\n\tin := s.Sink()\n\tin <- dummyReport{}\n\tclose(in)\n\n\t// Receive dummy on both sinks\n\t<-ch1\n\t<-ch2\n\n\t_, ok = <-ch1\n\tif ok {\n\t\tt.Errorf(\"Expected channel to be closed\")\n\t}\n\n\t_, ok = <-ch2\n\tif ok {\n\t\tt.Errorf(\"Expected channel to be closed\")\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/retry.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vim25\n\nimport (\n\t\"context\"\n\t\"net\"\n\t\"net/url\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/vim25/soap\"\n)\n\ntype RetryFunc func(err error) (retry bool, delay time.Duration)\n\n// TemporaryNetworkError returns a RetryFunc that retries up to a maximum of n\n// times, only if the error returned by the RoundTrip function is a temporary\n// network error (for example: a connect timeout).\nfunc TemporaryNetworkError(n int) RetryFunc {\n\treturn func(err error) (retry bool, delay time.Duration) {\n\t\tvar nerr net.Error\n\t\tvar ok bool\n\n\t\t// Never retry if this is not a network error.\n\t\tswitch rerr := err.(type) {\n\t\tcase *url.Error:\n\t\t\tif nerr, ok = rerr.Err.(net.Error); !ok {\n\t\t\t\treturn false, 0\n\t\t\t}\n\t\tcase net.Error:\n\t\t\tnerr = rerr\n\t\tdefault:\n\t\t\treturn false, 0\n\t\t}\n\n\t\tif !nerr.Temporary() {\n\t\t\treturn false, 0\n\t\t}\n\n\t\t// Don't retry if we're out of tries.\n\t\tif n--; n <= 0 {\n\t\t\treturn false, 0\n\t\t}\n\n\t\treturn true, 0\n\t}\n}\n\ntype retry struct {\n\troundTripper soap.RoundTripper\n\n\t// fn is a custom function that is called when an error occurs.\n\t// It returns whether or not to retry, and if so, how long to\n\t// delay before retrying.\n\tfn RetryFunc\n}\n\n// Retry wraps the specified soap.RoundTripper and invokes the\n// specified RetryFunc. The RetryFunc returns whether or not to\n// retry the call, and if so, how long to wait before retrying. If\n// the result of this function is to not retry, the original error\n// is returned from the RoundTrip function.\nfunc Retry(roundTripper soap.RoundTripper, fn RetryFunc) soap.RoundTripper {\n\tr := &retry{\n\t\troundTripper: roundTripper,\n\t\tfn:           fn,\n\t}\n\n\treturn r\n}\n\nfunc (r *retry) RoundTrip(ctx context.Context, req, res soap.HasFault) error {\n\tvar err error\n\n\tfor {\n\t\terr = r.roundTripper.RoundTrip(ctx, req, res)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Invoke retry function to see if another attempt should be made.\n\t\tif retry, delay := r.fn(err); retry {\n\t\t\ttime.Sleep(delay)\n\t\t\tcontinue\n\t\t}\n\n\t\tbreak\n\t}\n\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/retry_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage vim25\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com/vmware/govmomi/vim25/soap\"\n)\n\ntype tempError struct{}\n\nfunc (tempError) Error() string   { return \"tempError\" }\nfunc (tempError) Timeout() bool   { return true }\nfunc (tempError) Temporary() bool { return true }\n\ntype nonTempError struct{}\n\nfunc (nonTempError) Error() string   { return \"nonTempError\" }\nfunc (nonTempError) Timeout() bool   { return false }\nfunc (nonTempError) Temporary() bool { return false }\n\ntype fakeRoundTripper struct {\n\terrs []error\n}\n\nfunc (f *fakeRoundTripper) RoundTrip(ctx context.Context, req, res soap.HasFault) error {\n\terr := f.errs[0]\n\tf.errs = f.errs[1:]\n\treturn err\n}\n\nfunc TestRetry(t *testing.T) {\n\tvar tcs = []struct {\n\t\terrs     []error\n\t\texpected error\n\t}{\n\t\t{\n\t\t\terrs:     []error{nil},\n\t\t\texpected: nil,\n\t\t},\n\t\t{\n\t\t\terrs:     []error{tempError{}, nil},\n\t\t\texpected: nil,\n\t\t},\n\t\t{\n\t\t\terrs:     []error{tempError{}, tempError{}},\n\t\t\texpected: tempError{},\n\t\t},\n\t\t{\n\t\t\terrs:     []error{nonTempError{}},\n\t\t\texpected: nonTempError{},\n\t\t},\n\t\t{\n\t\t\terrs:     []error{tempError{}, nonTempError{}},\n\t\t\texpected: nonTempError{},\n\t\t},\n\t}\n\n\tfor _, tc := range tcs {\n\t\tvar rt soap.RoundTripper\n\n\t\trt = &fakeRoundTripper{errs: tc.errs}\n\t\trt = Retry(rt, TemporaryNetworkError(2))\n\n\t\terr := rt.RoundTrip(nil, nil, nil)\n\t\tif err != tc.expected {\n\t\t\tt.Errorf(\"Expected: %s, got: %s\", tc.expected, err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/soap/client.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage soap\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/sha1\"\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/cookiejar\"\n\t\"net/url\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/vim25/progress\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n\t\"github.com/vmware/govmomi/vim25/xml\"\n)\n\ntype HasFault interface {\n\tFault() *Fault\n}\n\ntype RoundTripper interface {\n\tRoundTrip(ctx context.Context, req, res HasFault) error\n}\n\nconst (\n\tDefaultVimNamespace  = \"urn:vim25\"\n\tDefaultVimVersion    = \"6.5\"\n\tDefaultMinVimVersion = \"5.5\"\n)\n\ntype header struct {\n\tCookie string `xml:\"vcSessionCookie,omitempty\"`\n}\n\ntype Client struct {\n\thttp.Client\n\n\tu *url.URL\n\tk bool // Named after curl's -k flag\n\td *debugContainer\n\tt *http.Transport\n\tp *url.URL\n\n\thostsMu sync.Mutex\n\thosts   map[string]string\n\n\tNamespace string // Vim namespace\n\tVersion   string // Vim version\n\tUserAgent string\n\n\theader *header\n}\n\nvar schemeMatch = regexp.MustCompile(`^\\w+://`)\n\n// ParseURL is wrapper around url.Parse, where Scheme defaults to \"https\" and Path defaults to \"/sdk\"\nfunc ParseURL(s string) (*url.URL, error) {\n\tvar err error\n\tvar u *url.URL\n\n\tif s != \"\" {\n\t\t// Default the scheme to https\n\t\tif !schemeMatch.MatchString(s) {\n\t\t\ts = \"https://\" + s\n\t\t}\n\n\t\tu, err = url.Parse(s)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Default the path to /sdk\n\t\tif u.Path == \"\" {\n\t\t\tu.Path = \"/sdk\"\n\t\t}\n\n\t\tif u.User == nil {\n\t\t\tu.User = url.UserPassword(\"\", \"\")\n\t\t}\n\t}\n\n\treturn u, nil\n}\n\nfunc NewClient(u *url.URL, insecure bool) *Client {\n\tc := Client{\n\t\tu: u,\n\t\tk: insecure,\n\t\td: newDebug(),\n\t}\n\n\t// Initialize http.RoundTripper on client, so we can customize it below\n\tif t, ok := http.DefaultTransport.(*http.Transport); ok {\n\t\tc.t = &http.Transport{\n\t\t\tProxy:                 t.Proxy,\n\t\t\tDialContext:           t.DialContext,\n\t\t\tMaxIdleConns:          t.MaxIdleConns,\n\t\t\tIdleConnTimeout:       t.IdleConnTimeout,\n\t\t\tTLSHandshakeTimeout:   t.TLSHandshakeTimeout,\n\t\t\tExpectContinueTimeout: t.ExpectContinueTimeout,\n\t\t}\n\t} else {\n\t\tc.t = new(http.Transport)\n\t}\n\n\tc.hosts = make(map[string]string)\n\tc.t.TLSClientConfig = &tls.Config{InsecureSkipVerify: c.k}\n\t// Don't bother setting DialTLS if InsecureSkipVerify=true\n\tif !c.k {\n\t\tc.t.DialTLS = c.dialTLS\n\t}\n\n\tc.Client.Transport = c.t\n\tc.Client.Jar, _ = cookiejar.New(nil)\n\n\t// Remove user information from a copy of the URL\n\tc.u = c.URL()\n\tc.u.User = nil\n\n\tc.Namespace = DefaultVimNamespace\n\tc.Version = DefaultVimVersion\n\n\treturn &c\n}\n\n// NewServiceClient creates a NewClient with the given URL.Path and namespace.\nfunc (c *Client) NewServiceClient(path string, namespace string) *Client {\n\tu := c.URL()\n\tu.Path = path\n\n\tclient := NewClient(u, c.k)\n\n\tclient.Namespace = namespace\n\n\t// Copy the cookies\n\tclient.Client.Jar.SetCookies(u, c.Client.Jar.Cookies(u))\n\n\t// Set SOAP Header cookie\n\tfor _, cookie := range client.Jar.Cookies(u) {\n\t\tif cookie.Name == \"vmware_soap_session\" {\n\t\t\tclient.header = &header{\n\t\t\t\tCookie: cookie.Value,\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn client\n}\n\n// SetRootCAs defines the set of root certificate authorities\n// that clients use when verifying server certificates.\n// By default TLS uses the host's root CA set.\n//\n// See: http.Client.Transport.TLSClientConfig.RootCAs\nfunc (c *Client) SetRootCAs(file string) error {\n\tpool := x509.NewCertPool()\n\n\tfor _, name := range filepath.SplitList(file) {\n\t\tpem, err := ioutil.ReadFile(name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpool.AppendCertsFromPEM(pem)\n\t}\n\n\tc.t.TLSClientConfig.RootCAs = pool\n\n\treturn nil\n}\n\n// Add default https port if missing\nfunc hostAddr(addr string) string {\n\t_, port := splitHostPort(addr)\n\tif port == \"\" {\n\t\treturn addr + \":443\"\n\t}\n\treturn addr\n}\n\n// SetThumbprint sets the known certificate thumbprint for the given host.\n// A custom DialTLS function is used to support thumbprint based verification.\n// We first try tls.Dial with the default tls.Config, only falling back to thumbprint verification\n// if it fails with an x509.UnknownAuthorityError or x509.HostnameError\n//\n// See: http.Client.Transport.DialTLS\nfunc (c *Client) SetThumbprint(host string, thumbprint string) {\n\thost = hostAddr(host)\n\n\tc.hostsMu.Lock()\n\tif thumbprint == \"\" {\n\t\tdelete(c.hosts, host)\n\t} else {\n\t\tc.hosts[host] = thumbprint\n\t}\n\tc.hostsMu.Unlock()\n}\n\n// Thumbprint returns the certificate thumbprint for the given host if known to this client.\nfunc (c *Client) Thumbprint(host string) string {\n\thost = hostAddr(host)\n\tc.hostsMu.Lock()\n\tdefer c.hostsMu.Unlock()\n\treturn c.hosts[host]\n}\n\n// LoadThumbprints from file with the give name.\n// If name is empty or name does not exist this function will return nil.\nfunc (c *Client) LoadThumbprints(file string) error {\n\tif file == \"\" {\n\t\treturn nil\n\t}\n\n\tfor _, name := range filepath.SplitList(file) {\n\t\terr := c.loadThumbprints(name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (c *Client) loadThumbprints(name string) error {\n\tf, err := os.Open(name)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\tscanner := bufio.NewScanner(f)\n\n\tfor scanner.Scan() {\n\t\te := strings.SplitN(scanner.Text(), \" \", 2)\n\t\tif len(e) != 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\tc.SetThumbprint(e[0], e[1])\n\t}\n\n\t_ = f.Close()\n\n\treturn scanner.Err()\n}\n\n// ThumbprintSHA1 returns the thumbprint of the given cert in the same format used by the SDK and Client.SetThumbprint.\n//\n// See: SSLVerifyFault.Thumbprint, SessionManagerGenericServiceTicket.Thumbprint, HostConnectSpec.SslThumbprint\nfunc ThumbprintSHA1(cert *x509.Certificate) string {\n\tsum := sha1.Sum(cert.Raw)\n\thex := make([]string, len(sum))\n\tfor i, b := range sum {\n\t\thex[i] = fmt.Sprintf(\"%02X\", b)\n\t}\n\treturn strings.Join(hex, \":\")\n}\n\nfunc (c *Client) dialTLS(network string, addr string) (net.Conn, error) {\n\t// Would be nice if there was a tls.Config.Verify func,\n\t// see tls.clientHandshakeState.doFullHandshake\n\n\tconn, err := tls.Dial(network, addr, c.t.TLSClientConfig)\n\n\tif err == nil {\n\t\treturn conn, nil\n\t}\n\n\tswitch err.(type) {\n\tcase x509.UnknownAuthorityError:\n\tcase x509.HostnameError:\n\tdefault:\n\t\treturn nil, err\n\t}\n\n\tthumbprint := c.Thumbprint(addr)\n\tif thumbprint == \"\" {\n\t\treturn nil, err\n\t}\n\n\tconfig := &tls.Config{InsecureSkipVerify: true}\n\tconn, err = tls.Dial(network, addr, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcert := conn.ConnectionState().PeerCertificates[0]\n\tpeer := ThumbprintSHA1(cert)\n\tif thumbprint != peer {\n\t\t_ = conn.Close()\n\n\t\treturn nil, fmt.Errorf(\"Host %q thumbprint does not match %q\", addr, thumbprint)\n\t}\n\n\treturn conn, nil\n}\n\n// splitHostPort is similar to net.SplitHostPort,\n// but rather than return error if there isn't a ':port',\n// return an empty string for the port.\nfunc splitHostPort(host string) (string, string) {\n\tix := strings.LastIndex(host, \":\")\n\n\tif ix <= strings.LastIndex(host, \"]\") {\n\t\treturn host, \"\"\n\t}\n\n\tname := host[:ix]\n\tport := host[ix+1:]\n\n\treturn name, port\n}\n\nconst sdkTunnel = \"sdkTunnel:8089\"\n\nfunc (c *Client) SetCertificate(cert tls.Certificate) {\n\tt := c.Client.Transport.(*http.Transport)\n\n\t// Extension certificate\n\tt.TLSClientConfig.Certificates = []tls.Certificate{cert}\n\n\t// Proxy to vCenter host on port 80\n\thost, _ := splitHostPort(c.u.Host)\n\n\t// Should be no reason to change the default port other than testing\n\tkey := \"GOVMOMI_TUNNEL_PROXY_PORT\"\n\n\tport := c.URL().Query().Get(key)\n\tif port == \"\" {\n\t\tport = os.Getenv(key)\n\t}\n\n\tif port != \"\" {\n\t\thost += \":\" + port\n\t}\n\n\tc.p = &url.URL{\n\t\tScheme: \"http\",\n\t\tHost:   host,\n\t}\n\tt.Proxy = func(r *http.Request) (*url.URL, error) {\n\t\t// Only sdk requests should be proxied\n\t\tif r.URL.Path == \"/sdk\" {\n\t\t\treturn c.p, nil\n\t\t}\n\t\treturn http.ProxyFromEnvironment(r)\n\t}\n\n\t// Rewrite url Host to use the sdk tunnel, required for a certificate request.\n\tc.u.Host = sdkTunnel\n}\n\nfunc (c *Client) URL() *url.URL {\n\turlCopy := *c.u\n\treturn &urlCopy\n}\n\ntype marshaledClient struct {\n\tCookies  []*http.Cookie\n\tURL      *url.URL\n\tInsecure bool\n}\n\nfunc (c *Client) MarshalJSON() ([]byte, error) {\n\tm := marshaledClient{\n\t\tCookies:  c.Jar.Cookies(c.u),\n\t\tURL:      c.u,\n\t\tInsecure: c.k,\n\t}\n\n\treturn json.Marshal(m)\n}\n\nfunc (c *Client) UnmarshalJSON(b []byte) error {\n\tvar m marshaledClient\n\n\terr := json.Unmarshal(b, &m)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t*c = *NewClient(m.URL, m.Insecure)\n\tc.Jar.SetCookies(m.URL, m.Cookies)\n\n\treturn nil\n}\n\nfunc (c *Client) do(ctx context.Context, req *http.Request) (*http.Response, error) {\n\tif nil == ctx || nil == ctx.Done() { // ctx.Done() is for ctx\n\t\treturn c.Client.Do(req)\n\t}\n\n\treturn c.Client.Do(req.WithContext(ctx))\n}\n\nfunc (c *Client) RoundTrip(ctx context.Context, reqBody, resBody HasFault) error {\n\tvar err error\n\n\treqEnv := Envelope{Body: reqBody}\n\tresEnv := Envelope{Body: resBody}\n\n\treqEnv.Header = c.header\n\n\t// Create debugging context for this round trip\n\td := c.d.newRoundTrip()\n\tif d.enabled() {\n\t\tdefer d.done()\n\t}\n\n\tb, err := xml.Marshal(reqEnv)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\trawReqBody := io.MultiReader(strings.NewReader(xml.Header), bytes.NewReader(b))\n\treq, err := http.NewRequest(\"POST\", c.u.String(), rawReqBody)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treq.Header.Set(`Content-Type`, `text/xml; charset=\"utf-8\"`)\n\tsoapAction := fmt.Sprintf(\"%s/%s\", c.Namespace, c.Version)\n\treq.Header.Set(`SOAPAction`, soapAction)\n\tif c.UserAgent != \"\" {\n\t\treq.Header.Set(`User-Agent`, c.UserAgent)\n\t}\n\n\tif d.enabled() {\n\t\td.debugRequest(req)\n\t}\n\n\ttstart := time.Now()\n\tres, err := c.do(ctx, req)\n\ttstop := time.Now()\n\n\tif d.enabled() {\n\t\td.logf(\"%6dms (%T)\", tstop.Sub(tstart)/time.Millisecond, resBody)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif d.enabled() {\n\t\td.debugResponse(res)\n\t}\n\n\t// Close response regardless of what happens next\n\tdefer res.Body.Close()\n\n\tswitch res.StatusCode {\n\tcase http.StatusOK:\n\t\t// OK\n\tcase http.StatusInternalServerError:\n\t\t// Error, but typically includes a body explaining the error\n\tdefault:\n\t\treturn errors.New(res.Status)\n\t}\n\n\tdec := xml.NewDecoder(res.Body)\n\tdec.TypeFunc = types.TypeFunc()\n\terr = dec.Decode(&resEnv)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif f := resBody.Fault(); f != nil {\n\t\treturn WrapSoapFault(f)\n\t}\n\n\treturn err\n}\n\nfunc (c *Client) CloseIdleConnections() {\n\tc.t.CloseIdleConnections()\n}\n\n// ParseURL wraps url.Parse to rewrite the URL.Host field\n// In the case of VM guest uploads or NFC lease URLs, a Host\n// field with a value of \"*\" is rewritten to the Client's URL.Host.\nfunc (c *Client) ParseURL(urlStr string) (*url.URL, error) {\n\tu, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thost, _ := splitHostPort(u.Host)\n\tif host == \"*\" {\n\t\t// Also use Client's port, to support port forwarding\n\t\tu.Host = c.URL().Host\n\t}\n\n\treturn u, nil\n}\n\ntype Upload struct {\n\tType          string\n\tMethod        string\n\tContentLength int64\n\tHeaders       map[string]string\n\tTicket        *http.Cookie\n\tProgress      progress.Sinker\n}\n\nvar DefaultUpload = Upload{\n\tType:   \"application/octet-stream\",\n\tMethod: \"PUT\",\n}\n\n// Upload PUTs the local file to the given URL\nfunc (c *Client) Upload(f io.Reader, u *url.URL, param *Upload) error {\n\tvar err error\n\n\tif param.Progress != nil {\n\t\tpr := progress.NewReader(param.Progress, f, param.ContentLength)\n\t\tf = pr\n\n\t\t// Mark progress reader as done when returning from this function.\n\t\tdefer func() {\n\t\t\tpr.Done(err)\n\t\t}()\n\t}\n\n\treq, err := http.NewRequest(param.Method, u.String(), f)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq.ContentLength = param.ContentLength\n\treq.Header.Set(\"Content-Type\", param.Type)\n\n\tfor k, v := range param.Headers {\n\t\treq.Header.Add(k, v)\n\t}\n\n\tif param.Ticket != nil {\n\t\treq.AddCookie(param.Ticket)\n\t}\n\n\tres, err := c.Client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch res.StatusCode {\n\tcase http.StatusOK:\n\tcase http.StatusCreated:\n\tdefault:\n\t\terr = errors.New(res.Status)\n\t}\n\n\treturn err\n}\n\n// UploadFile PUTs the local file to the given URL\nfunc (c *Client) UploadFile(file string, u *url.URL, param *Upload) error {\n\tif param == nil {\n\t\tp := DefaultUpload // Copy since we set ContentLength\n\t\tparam = &p\n\t}\n\n\ts, err := os.Stat(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf, err := os.Open(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tparam.ContentLength = s.Size()\n\n\treturn c.Upload(f, u, param)\n}\n\ntype Download struct {\n\tMethod   string\n\tHeaders  map[string]string\n\tTicket   *http.Cookie\n\tProgress progress.Sinker\n}\n\nvar DefaultDownload = Download{\n\tMethod: \"GET\",\n}\n\n// DownloadRequest wraps http.Client.Do, returning the http.Response without checking its StatusCode\nfunc (c *Client) DownloadRequest(u *url.URL, param *Download) (*http.Response, error) {\n\treq, err := http.NewRequest(param.Method, u.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor k, v := range param.Headers {\n\t\treq.Header.Add(k, v)\n\t}\n\n\tif param.Ticket != nil {\n\t\treq.AddCookie(param.Ticket)\n\t}\n\n\treturn c.Client.Do(req)\n}\n\n// directoryReader wraps an io.ReadCloser to support streaming download\n// of a guest directory, stops reading once it sees the stream trailer.\n// This is only useful when guest tools is the Go toolbox.\n// The trailer is required since TransferFromGuest requires a Content-Length,\n// which toolbox doesn't know ahead of time as the gzip'd tarball never touches the disk.\n// We opted to wrap this here for now rather than guest.FileManager so\n// DownloadFile can be also be used as-is to handle this use case.\ntype directoryReader struct {\n\tio.ReadCloser\n}\n\nvar (\n\tgzipHeader    = []byte{0x1f, 0x8b, 0x08} // rfc1952 {ID1, ID2, CM}\n\tgzipHeaderLen = len(gzipHeader)\n)\n\nfunc (r *directoryReader) Read(buf []byte) (int, error) {\n\tnr, err := r.ReadCloser.Read(buf)\n\n\t// Stop reading if the last N bytes are the gzipTrailer\n\tif nr >= gzipHeaderLen {\n\t\tif bytes.Equal(buf[nr-gzipHeaderLen:nr], gzipHeader) {\n\t\t\tnr -= gzipHeaderLen\n\t\t\terr = io.EOF\n\t\t}\n\t}\n\n\treturn nr, err\n}\n\n// Download GETs the remote file from the given URL\nfunc (c *Client) Download(u *url.URL, param *Download) (io.ReadCloser, int64, error) {\n\tres, err := c.DownloadRequest(u, param)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tswitch res.StatusCode {\n\tcase http.StatusOK:\n\tdefault:\n\t\terr = errors.New(res.Status)\n\t}\n\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tr := res.Body\n\n\tif strings.HasSuffix(u.Path, \"/\") {\n\t\tr = &directoryReader{ReadCloser: r}\n\t}\n\n\treturn r, res.ContentLength, nil\n}\n\n// DownloadFile GETs the given URL to a local file\nfunc (c *Client) DownloadFile(file string, u *url.URL, param *Download) error {\n\tvar err error\n\tif param == nil {\n\t\tparam = &DefaultDownload\n\t}\n\n\trc, contentLength, err := c.Download(u, param)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rc.Close()\n\n\tvar r io.Reader = rc\n\n\tfh, err := os.Create(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer fh.Close()\n\n\tif param.Progress != nil {\n\t\tpr := progress.NewReader(param.Progress, r, contentLength)\n\t\tr = pr\n\n\t\t// Mark progress reader as done when returning from this function.\n\t\tdefer func() {\n\t\t\tpr.Done(err)\n\t\t}()\n\t}\n\n\t_, err = io.Copy(fh, r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Assign error before returning so that it gets picked up by the deferred\n\t// function marking the progress reader as done.\n\terr = fh.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/soap/client_test.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage soap\n\nimport \"testing\"\n\nfunc TestSplitHostPort(t *testing.T) {\n\ttests := []struct {\n\t\turl  string\n\t\thost string\n\t\tport string\n\t}{\n\t\t{\"127.0.0.1\", \"127.0.0.1\", \"\"},\n\t\t{\"*:1234\", \"*\", \"1234\"},\n\t\t{\"127.0.0.1:80\", \"127.0.0.1\", \"80\"},\n\t\t{\"[::1]:6767\", \"[::1]\", \"6767\"},\n\t\t{\"[::1]\", \"[::1]\", \"\"},\n\t}\n\n\tfor _, test := range tests {\n\t\thost, port := splitHostPort(test.url)\n\t\tif host != test.host {\n\t\t\tt.Errorf(\"(%s) %s != %s\", test.url, host, test.host)\n\t\t}\n\t\tif port != test.port {\n\t\t\tt.Errorf(\"(%s) %s != %s\", test.url, port, test.port)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/soap/debug.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage soap\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/httputil\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/vmware/govmomi/vim25/debug\"\n)\n\n// teeReader wraps io.TeeReader and patches through the Close() function.\ntype teeReader struct {\n\tio.Reader\n\tio.Closer\n}\n\nfunc newTeeReader(rc io.ReadCloser, w io.Writer) io.ReadCloser {\n\treturn teeReader{\n\t\tReader: io.TeeReader(rc, w),\n\t\tCloser: rc,\n\t}\n}\n\n// debugRoundTrip contains state and logic needed to debug a single round trip.\ntype debugRoundTrip struct {\n\tcn  uint64         // Client number\n\trn  uint64         // Request number\n\tlog io.WriteCloser // Request log\n\tcs  []io.Closer    // Files that need closing when done\n}\n\nfunc (d *debugRoundTrip) logf(format string, a ...interface{}) {\n\tnow := time.Now().Format(\"2006-01-02T15-04-05.000000000\")\n\tfmt.Fprintf(d.log, \"%s - %04d: \", now, d.rn)\n\tfmt.Fprintf(d.log, format, a...)\n\tfmt.Fprintf(d.log, \"\\n\")\n}\n\nfunc (d *debugRoundTrip) enabled() bool {\n\treturn d != nil\n}\n\nfunc (d *debugRoundTrip) done() {\n\tfor _, c := range d.cs {\n\t\tc.Close()\n\t}\n}\n\nfunc (d *debugRoundTrip) newFile(suffix string) io.WriteCloser {\n\treturn debug.NewFile(fmt.Sprintf(\"%d-%04d.%s\", d.cn, d.rn, suffix))\n}\n\nfunc (d *debugRoundTrip) debugRequest(req *http.Request) {\n\tif d == nil {\n\t\treturn\n\t}\n\n\tvar wc io.WriteCloser\n\n\t// Capture headers\n\twc = d.newFile(\"req.headers\")\n\tb, _ := httputil.DumpRequest(req, false)\n\twc.Write(b)\n\twc.Close()\n\n\t// Capture body\n\twc = d.newFile(\"req.xml\")\n\treq.Body = newTeeReader(req.Body, wc)\n\n\t// Delay closing until marked done\n\td.cs = append(d.cs, wc)\n}\n\nfunc (d *debugRoundTrip) debugResponse(res *http.Response) {\n\tif d == nil {\n\t\treturn\n\t}\n\n\tvar wc io.WriteCloser\n\n\t// Capture headers\n\twc = d.newFile(\"res.headers\")\n\tb, _ := httputil.DumpResponse(res, false)\n\twc.Write(b)\n\twc.Close()\n\n\t// Capture body\n\twc = d.newFile(\"res.xml\")\n\tres.Body = newTeeReader(res.Body, wc)\n\n\t// Delay closing until marked done\n\td.cs = append(d.cs, wc)\n}\n\nvar cn uint64 // Client counter\n\n// debugContainer wraps the debugging state for a single client.\ntype debugContainer struct {\n\tcn  uint64         // Client number\n\trn  uint64         // Request counter\n\tlog io.WriteCloser // Request log\n}\n\nfunc newDebug() *debugContainer {\n\td := debugContainer{\n\t\tcn: atomic.AddUint64(&cn, 1),\n\t\trn: 0,\n\t}\n\n\tif !debug.Enabled() {\n\t\treturn nil\n\t}\n\n\td.log = debug.NewFile(fmt.Sprintf(\"%d-client.log\", d.cn))\n\treturn &d\n}\n\nfunc (d *debugContainer) newRoundTrip() *debugRoundTrip {\n\tif d == nil {\n\t\treturn nil\n\t}\n\n\tdrt := debugRoundTrip{\n\t\tcn:  d.cn,\n\t\trn:  atomic.AddUint64(&d.rn, 1),\n\t\tlog: d.log,\n\t}\n\n\treturn &drt\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/soap/error.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage soap\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com/vmware/govmomi/vim25/types\"\n)\n\ntype regularError struct {\n\terr error\n}\n\nfunc (r regularError) Error() string {\n\treturn r.err.Error()\n}\n\ntype soapFaultError struct {\n\tfault *Fault\n}\n\nfunc (s soapFaultError) Error() string {\n\tmsg := s.fault.String\n\n\tif msg == \"\" {\n\t\tmsg = reflect.TypeOf(s.fault.Detail.Fault).Name()\n\t}\n\n\treturn fmt.Sprintf(\"%s: %s\", s.fault.Code, msg)\n}\n\ntype vimFaultError struct {\n\tfault types.BaseMethodFault\n}\n\nfunc (v vimFaultError) Error() string {\n\ttyp := reflect.TypeOf(v.fault)\n\tfor typ.Kind() == reflect.Ptr {\n\t\ttyp = typ.Elem()\n\t}\n\n\treturn typ.Name()\n}\n\nfunc (v vimFaultError) Fault() types.BaseMethodFault {\n\treturn v.fault\n}\n\nfunc Wrap(err error) error {\n\tswitch err.(type) {\n\tcase regularError:\n\t\treturn err\n\tcase soapFaultError:\n\t\treturn err\n\tcase vimFaultError:\n\t\treturn err\n\t}\n\n\treturn WrapRegularError(err)\n}\n\nfunc WrapRegularError(err error) error {\n\treturn regularError{err}\n}\n\nfunc IsRegularError(err error) bool {\n\t_, ok := err.(regularError)\n\treturn ok\n}\n\nfunc ToRegularError(err error) error {\n\treturn err.(regularError).err\n}\n\nfunc WrapSoapFault(f *Fault) error {\n\treturn soapFaultError{f}\n}\n\nfunc IsSoapFault(err error) bool {\n\t_, ok := err.(soapFaultError)\n\treturn ok\n}\n\nfunc ToSoapFault(err error) *Fault {\n\treturn err.(soapFaultError).fault\n}\n\nfunc WrapVimFault(v types.BaseMethodFault) error {\n\treturn vimFaultError{v}\n}\n\nfunc IsVimFault(err error) bool {\n\t_, ok := err.(vimFaultError)\n\treturn ok\n}\n\nfunc ToVimFault(err error) types.BaseMethodFault {\n\treturn err.(vimFaultError).fault\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/soap/soap.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage soap\n\nimport (\n\t\"github.com/vmware/govmomi/vim25/types\"\n\t\"github.com/vmware/govmomi/vim25/xml\"\n)\n\ntype Envelope struct {\n\tXMLName xml.Name    `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Envelope\"`\n\tHeader  interface{} `xml:\",omitempty\"`\n\tBody    interface{}\n}\n\ntype Fault struct {\n\tXMLName xml.Name `xml:\"http://schemas.xmlsoap.org/soap/envelope/ Fault\"`\n\tCode    string   `xml:\"faultcode\"`\n\tString  string   `xml:\"faultstring\"`\n\tDetail  struct {\n\t\tFault types.AnyType `xml:\",any,typeattr\"`\n\t} `xml:\"detail\"`\n}\n\nfunc (f *Fault) VimFault() types.AnyType {\n\treturn f.Detail.Fault\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/soap/soap_test.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage soap\n\nimport (\n\t\"testing\"\n\n\t\"github.com/vmware/govmomi/vim25/xml\"\n)\n\nfunc TestEmptyEnvelope(t *testing.T) {\n\tenv := Envelope{}\n\n\tb, err := xml.Marshal(env)\n\tif err != nil {\n\t\tt.Errorf(\"error: %s\", err)\n\t\treturn\n\t}\n\n\texpected := `<Envelope xmlns=\"http://schemas.xmlsoap.org/soap/envelope/\"></Envelope>`\n\tactual := string(b)\n\tif expected != actual {\n\t\tt.Fatalf(\"expected: %s, actual: %s\", expected, actual)\n\t}\n}\n\nfunc TestNonEmptyHeader(t *testing.T) {\n\tenv := Envelope{\n\t\tHeader: struct {\n\t\t\tFoo string\n\t\t}{\"bar\"},\n\t}\n\n\tb, err := xml.Marshal(env)\n\tif err != nil {\n\t\tt.Errorf(\"error: %s\", err)\n\t\treturn\n\t}\n\n\tenv = Envelope{}\n\terr = xml.Unmarshal(b, &env)\n\tif err != nil {\n\t\tt.Errorf(\"error: %s\", err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/types/base.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage types\n\ntype AnyType interface{}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/types/base_test.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage types\n\nimport (\n\t\"bytes\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/vmware/govmomi/vim25/xml\"\n)\n\nfunc TestAnyType(t *testing.T) {\n\tx := func(s string) []byte {\n\t\ts = `<root xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">` + s\n\t\ts += `</root>`\n\t\treturn []byte(s)\n\t}\n\n\ttests := []struct {\n\t\tInput []byte\n\t\tValue interface{}\n\t}{\n\t\t{\n\t\t\tInput: x(`<name xsi:type=\"xsd:string\">test</name>`),\n\t\t\tValue: \"test\",\n\t\t},\n\t\t{\n\t\t\tInput: x(`<name xsi:type=\"ArrayOfString\"><string>AA</string><string>BB</string></name>`),\n\t\t\tValue: ArrayOfString{String: []string{\"AA\", \"BB\"}},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tvar r struct {\n\t\t\tA interface{} `xml:\"name,typeattr\"`\n\t\t}\n\n\t\tdec := xml.NewDecoder(bytes.NewReader(test.Input))\n\t\tdec.TypeFunc = TypeFunc()\n\n\t\terr := dec.Decode(&r)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Decode: %s\", err)\n\t\t}\n\n\t\tif !reflect.DeepEqual(r.A, test.Value) {\n\t\t\tt.Errorf(\"Expected: %#v, actual: %#v\", r.A, test.Value)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/types/enum.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage types\n\nimport \"reflect\"\n\ntype ActionParameter string\n\nconst (\n\tActionParameterTargetName        = ActionParameter(\"targetName\")\n\tActionParameterAlarmName         = ActionParameter(\"alarmName\")\n\tActionParameterOldStatus         = ActionParameter(\"oldStatus\")\n\tActionParameterNewStatus         = ActionParameter(\"newStatus\")\n\tActionParameterTriggeringSummary = ActionParameter(\"triggeringSummary\")\n\tActionParameterDeclaringSummary  = ActionParameter(\"declaringSummary\")\n\tActionParameterEventDescription  = ActionParameter(\"eventDescription\")\n\tActionParameterTarget            = ActionParameter(\"target\")\n\tActionParameterAlarm             = ActionParameter(\"alarm\")\n)\n\nfunc init() {\n\tt[\"ActionParameter\"] = reflect.TypeOf((*ActionParameter)(nil)).Elem()\n}\n\ntype ActionType string\n\nconst (\n\tActionTypeMigrationV1         = ActionType(\"MigrationV1\")\n\tActionTypeVmPowerV1           = ActionType(\"VmPowerV1\")\n\tActionTypeHostPowerV1         = ActionType(\"HostPowerV1\")\n\tActionTypeHostMaintenanceV1   = ActionType(\"HostMaintenanceV1\")\n\tActionTypeStorageMigrationV1  = ActionType(\"StorageMigrationV1\")\n\tActionTypeStoragePlacementV1  = ActionType(\"StoragePlacementV1\")\n\tActionTypePlacementV1         = ActionType(\"PlacementV1\")\n\tActionTypeHostInfraUpdateHaV1 = ActionType(\"HostInfraUpdateHaV1\")\n)\n\nfunc init() {\n\tt[\"ActionType\"] = reflect.TypeOf((*ActionType)(nil)).Elem()\n}\n\ntype AffinityType string\n\nconst (\n\tAffinityTypeMemory = AffinityType(\"memory\")\n\tAffinityTypeCpu    = AffinityType(\"cpu\")\n)\n\nfunc init() {\n\tt[\"AffinityType\"] = reflect.TypeOf((*AffinityType)(nil)).Elem()\n}\n\ntype AgentInstallFailedReason string\n\nconst (\n\tAgentInstallFailedReasonNotEnoughSpaceOnDevice      = AgentInstallFailedReason(\"NotEnoughSpaceOnDevice\")\n\tAgentInstallFailedReasonPrepareToUpgradeFailed      = AgentInstallFailedReason(\"PrepareToUpgradeFailed\")\n\tAgentInstallFailedReasonAgentNotRunning             = AgentInstallFailedReason(\"AgentNotRunning\")\n\tAgentInstallFailedReasonAgentNotReachable           = AgentInstallFailedReason(\"AgentNotReachable\")\n\tAgentInstallFailedReasonInstallTimedout             = AgentInstallFailedReason(\"InstallTimedout\")\n\tAgentInstallFailedReasonSignatureVerificationFailed = AgentInstallFailedReason(\"SignatureVerificationFailed\")\n\tAgentInstallFailedReasonAgentUploadFailed           = AgentInstallFailedReason(\"AgentUploadFailed\")\n\tAgentInstallFailedReasonAgentUploadTimedout         = AgentInstallFailedReason(\"AgentUploadTimedout\")\n\tAgentInstallFailedReasonUnknownInstallerError       = AgentInstallFailedReason(\"UnknownInstallerError\")\n)\n\nfunc init() {\n\tt[\"AgentInstallFailedReason\"] = reflect.TypeOf((*AgentInstallFailedReason)(nil)).Elem()\n}\n\ntype ArrayUpdateOperation string\n\nconst (\n\tArrayUpdateOperationAdd    = ArrayUpdateOperation(\"add\")\n\tArrayUpdateOperationRemove = ArrayUpdateOperation(\"remove\")\n\tArrayUpdateOperationEdit   = ArrayUpdateOperation(\"edit\")\n)\n\nfunc init() {\n\tt[\"ArrayUpdateOperation\"] = reflect.TypeOf((*ArrayUpdateOperation)(nil)).Elem()\n}\n\ntype AutoStartAction string\n\nconst (\n\tAutoStartActionNone          = AutoStartAction(\"none\")\n\tAutoStartActionSystemDefault = AutoStartAction(\"systemDefault\")\n\tAutoStartActionPowerOn       = AutoStartAction(\"powerOn\")\n\tAutoStartActionPowerOff      = AutoStartAction(\"powerOff\")\n\tAutoStartActionGuestShutdown = AutoStartAction(\"guestShutdown\")\n\tAutoStartActionSuspend       = AutoStartAction(\"suspend\")\n)\n\nfunc init() {\n\tt[\"AutoStartAction\"] = reflect.TypeOf((*AutoStartAction)(nil)).Elem()\n}\n\ntype AutoStartWaitHeartbeatSetting string\n\nconst (\n\tAutoStartWaitHeartbeatSettingYes           = AutoStartWaitHeartbeatSetting(\"yes\")\n\tAutoStartWaitHeartbeatSettingNo            = AutoStartWaitHeartbeatSetting(\"no\")\n\tAutoStartWaitHeartbeatSettingSystemDefault = AutoStartWaitHeartbeatSetting(\"systemDefault\")\n)\n\nfunc init() {\n\tt[\"AutoStartWaitHeartbeatSetting\"] = reflect.TypeOf((*AutoStartWaitHeartbeatSetting)(nil)).Elem()\n}\n\ntype BaseConfigInfoDiskFileBackingInfoProvisioningType string\n\nconst (\n\tBaseConfigInfoDiskFileBackingInfoProvisioningTypeThin             = BaseConfigInfoDiskFileBackingInfoProvisioningType(\"thin\")\n\tBaseConfigInfoDiskFileBackingInfoProvisioningTypeEagerZeroedThick = BaseConfigInfoDiskFileBackingInfoProvisioningType(\"eagerZeroedThick\")\n\tBaseConfigInfoDiskFileBackingInfoProvisioningTypeLazyZeroedThick  = BaseConfigInfoDiskFileBackingInfoProvisioningType(\"lazyZeroedThick\")\n)\n\nfunc init() {\n\tt[\"BaseConfigInfoDiskFileBackingInfoProvisioningType\"] = reflect.TypeOf((*BaseConfigInfoDiskFileBackingInfoProvisioningType)(nil)).Elem()\n}\n\ntype BatchResultResult string\n\nconst (\n\tBatchResultResultSuccess = BatchResultResult(\"success\")\n\tBatchResultResultFail    = BatchResultResult(\"fail\")\n)\n\nfunc init() {\n\tt[\"BatchResultResult\"] = reflect.TypeOf((*BatchResultResult)(nil)).Elem()\n}\n\ntype CannotEnableVmcpForClusterReason string\n\nconst (\n\tCannotEnableVmcpForClusterReasonAPDTimeoutDisabled      = CannotEnableVmcpForClusterReason(\"APDTimeoutDisabled\")\n\tCannotEnableVmcpForClusterReasonIncompatibleHostVersion = CannotEnableVmcpForClusterReason(\"IncompatibleHostVersion\")\n)\n\nfunc init() {\n\tt[\"CannotEnableVmcpForClusterReason\"] = reflect.TypeOf((*CannotEnableVmcpForClusterReason)(nil)).Elem()\n}\n\ntype CannotMoveFaultToleranceVmMoveType string\n\nconst (\n\tCannotMoveFaultToleranceVmMoveTypeResourcePool = CannotMoveFaultToleranceVmMoveType(\"resourcePool\")\n\tCannotMoveFaultToleranceVmMoveTypeCluster      = CannotMoveFaultToleranceVmMoveType(\"cluster\")\n)\n\nfunc init() {\n\tt[\"CannotMoveFaultToleranceVmMoveType\"] = reflect.TypeOf((*CannotMoveFaultToleranceVmMoveType)(nil)).Elem()\n}\n\ntype CannotPowerOffVmInClusterOperation string\n\nconst (\n\tCannotPowerOffVmInClusterOperationSuspend       = CannotPowerOffVmInClusterOperation(\"suspend\")\n\tCannotPowerOffVmInClusterOperationPowerOff      = CannotPowerOffVmInClusterOperation(\"powerOff\")\n\tCannotPowerOffVmInClusterOperationGuestShutdown = CannotPowerOffVmInClusterOperation(\"guestShutdown\")\n\tCannotPowerOffVmInClusterOperationGuestSuspend  = CannotPowerOffVmInClusterOperation(\"guestSuspend\")\n)\n\nfunc init() {\n\tt[\"CannotPowerOffVmInClusterOperation\"] = reflect.TypeOf((*CannotPowerOffVmInClusterOperation)(nil)).Elem()\n}\n\ntype CannotUseNetworkReason string\n\nconst (\n\tCannotUseNetworkReasonNetworkReservationNotSupported  = CannotUseNetworkReason(\"NetworkReservationNotSupported\")\n\tCannotUseNetworkReasonMismatchedNetworkPolicies       = CannotUseNetworkReason(\"MismatchedNetworkPolicies\")\n\tCannotUseNetworkReasonMismatchedDvsVersionOrVendor    = CannotUseNetworkReason(\"MismatchedDvsVersionOrVendor\")\n\tCannotUseNetworkReasonVMotionToUnsupportedNetworkType = CannotUseNetworkReason(\"VMotionToUnsupportedNetworkType\")\n)\n\nfunc init() {\n\tt[\"CannotUseNetworkReason\"] = reflect.TypeOf((*CannotUseNetworkReason)(nil)).Elem()\n}\n\ntype CheckTestType string\n\nconst (\n\tCheckTestTypeSourceTests       = CheckTestType(\"sourceTests\")\n\tCheckTestTypeHostTests         = CheckTestType(\"hostTests\")\n\tCheckTestTypeResourcePoolTests = CheckTestType(\"resourcePoolTests\")\n\tCheckTestTypeDatastoreTests    = CheckTestType(\"datastoreTests\")\n\tCheckTestTypeNetworkTests      = CheckTestType(\"networkTests\")\n)\n\nfunc init() {\n\tt[\"CheckTestType\"] = reflect.TypeOf((*CheckTestType)(nil)).Elem()\n}\n\ntype ClusterDasAamNodeStateDasState string\n\nconst (\n\tClusterDasAamNodeStateDasStateUninitialized = ClusterDasAamNodeStateDasState(\"uninitialized\")\n\tClusterDasAamNodeStateDasStateInitialized   = ClusterDasAamNodeStateDasState(\"initialized\")\n\tClusterDasAamNodeStateDasStateConfiguring   = ClusterDasAamNodeStateDasState(\"configuring\")\n\tClusterDasAamNodeStateDasStateUnconfiguring = ClusterDasAamNodeStateDasState(\"unconfiguring\")\n\tClusterDasAamNodeStateDasStateRunning       = ClusterDasAamNodeStateDasState(\"running\")\n\tClusterDasAamNodeStateDasStateError         = ClusterDasAamNodeStateDasState(\"error\")\n\tClusterDasAamNodeStateDasStateAgentShutdown = ClusterDasAamNodeStateDasState(\"agentShutdown\")\n\tClusterDasAamNodeStateDasStateNodeFailed    = ClusterDasAamNodeStateDasState(\"nodeFailed\")\n)\n\nfunc init() {\n\tt[\"ClusterDasAamNodeStateDasState\"] = reflect.TypeOf((*ClusterDasAamNodeStateDasState)(nil)).Elem()\n}\n\ntype ClusterDasConfigInfoHBDatastoreCandidate string\n\nconst (\n\tClusterDasConfigInfoHBDatastoreCandidateUserSelectedDs                  = ClusterDasConfigInfoHBDatastoreCandidate(\"userSelectedDs\")\n\tClusterDasConfigInfoHBDatastoreCandidateAllFeasibleDs                   = ClusterDasConfigInfoHBDatastoreCandidate(\"allFeasibleDs\")\n\tClusterDasConfigInfoHBDatastoreCandidateAllFeasibleDsWithUserPreference = ClusterDasConfigInfoHBDatastoreCandidate(\"allFeasibleDsWithUserPreference\")\n)\n\nfunc init() {\n\tt[\"ClusterDasConfigInfoHBDatastoreCandidate\"] = reflect.TypeOf((*ClusterDasConfigInfoHBDatastoreCandidate)(nil)).Elem()\n}\n\ntype ClusterDasConfigInfoServiceState string\n\nconst (\n\tClusterDasConfigInfoServiceStateDisabled = ClusterDasConfigInfoServiceState(\"disabled\")\n\tClusterDasConfigInfoServiceStateEnabled  = ClusterDasConfigInfoServiceState(\"enabled\")\n)\n\nfunc init() {\n\tt[\"ClusterDasConfigInfoServiceState\"] = reflect.TypeOf((*ClusterDasConfigInfoServiceState)(nil)).Elem()\n}\n\ntype ClusterDasConfigInfoVmMonitoringState string\n\nconst (\n\tClusterDasConfigInfoVmMonitoringStateVmMonitoringDisabled = ClusterDasConfigInfoVmMonitoringState(\"vmMonitoringDisabled\")\n\tClusterDasConfigInfoVmMonitoringStateVmMonitoringOnly     = ClusterDasConfigInfoVmMonitoringState(\"vmMonitoringOnly\")\n\tClusterDasConfigInfoVmMonitoringStateVmAndAppMonitoring   = ClusterDasConfigInfoVmMonitoringState(\"vmAndAppMonitoring\")\n)\n\nfunc init() {\n\tt[\"ClusterDasConfigInfoVmMonitoringState\"] = reflect.TypeOf((*ClusterDasConfigInfoVmMonitoringState)(nil)).Elem()\n}\n\ntype ClusterDasFdmAvailabilityState string\n\nconst (\n\tClusterDasFdmAvailabilityStateUninitialized                = ClusterDasFdmAvailabilityState(\"uninitialized\")\n\tClusterDasFdmAvailabilityStateElection                     = ClusterDasFdmAvailabilityState(\"election\")\n\tClusterDasFdmAvailabilityStateMaster                       = ClusterDasFdmAvailabilityState(\"master\")\n\tClusterDasFdmAvailabilityStateConnectedToMaster            = ClusterDasFdmAvailabilityState(\"connectedToMaster\")\n\tClusterDasFdmAvailabilityStateNetworkPartitionedFromMaster = ClusterDasFdmAvailabilityState(\"networkPartitionedFromMaster\")\n\tClusterDasFdmAvailabilityStateNetworkIsolated              = ClusterDasFdmAvailabilityState(\"networkIsolated\")\n\tClusterDasFdmAvailabilityStateHostDown                     = ClusterDasFdmAvailabilityState(\"hostDown\")\n\tClusterDasFdmAvailabilityStateInitializationError          = ClusterDasFdmAvailabilityState(\"initializationError\")\n\tClusterDasFdmAvailabilityStateUninitializationError        = ClusterDasFdmAvailabilityState(\"uninitializationError\")\n\tClusterDasFdmAvailabilityStateFdmUnreachable               = ClusterDasFdmAvailabilityState(\"fdmUnreachable\")\n)\n\nfunc init() {\n\tt[\"ClusterDasFdmAvailabilityState\"] = reflect.TypeOf((*ClusterDasFdmAvailabilityState)(nil)).Elem()\n}\n\ntype ClusterDasVmSettingsIsolationResponse string\n\nconst (\n\tClusterDasVmSettingsIsolationResponseNone                     = ClusterDasVmSettingsIsolationResponse(\"none\")\n\tClusterDasVmSettingsIsolationResponsePowerOff                 = ClusterDasVmSettingsIsolationResponse(\"powerOff\")\n\tClusterDasVmSettingsIsolationResponseShutdown                 = ClusterDasVmSettingsIsolationResponse(\"shutdown\")\n\tClusterDasVmSettingsIsolationResponseClusterIsolationResponse = ClusterDasVmSettingsIsolationResponse(\"clusterIsolationResponse\")\n)\n\nfunc init() {\n\tt[\"ClusterDasVmSettingsIsolationResponse\"] = reflect.TypeOf((*ClusterDasVmSettingsIsolationResponse)(nil)).Elem()\n}\n\ntype ClusterDasVmSettingsRestartPriority string\n\nconst (\n\tClusterDasVmSettingsRestartPriorityDisabled               = ClusterDasVmSettingsRestartPriority(\"disabled\")\n\tClusterDasVmSettingsRestartPriorityLowest                 = ClusterDasVmSettingsRestartPriority(\"lowest\")\n\tClusterDasVmSettingsRestartPriorityLow                    = ClusterDasVmSettingsRestartPriority(\"low\")\n\tClusterDasVmSettingsRestartPriorityMedium                 = ClusterDasVmSettingsRestartPriority(\"medium\")\n\tClusterDasVmSettingsRestartPriorityHigh                   = ClusterDasVmSettingsRestartPriority(\"high\")\n\tClusterDasVmSettingsRestartPriorityHighest                = ClusterDasVmSettingsRestartPriority(\"highest\")\n\tClusterDasVmSettingsRestartPriorityClusterRestartPriority = ClusterDasVmSettingsRestartPriority(\"clusterRestartPriority\")\n)\n\nfunc init() {\n\tt[\"ClusterDasVmSettingsRestartPriority\"] = reflect.TypeOf((*ClusterDasVmSettingsRestartPriority)(nil)).Elem()\n}\n\ntype ClusterHostInfraUpdateHaModeActionOperationType string\n\nconst (\n\tClusterHostInfraUpdateHaModeActionOperationTypeEnterQuarantine  = ClusterHostInfraUpdateHaModeActionOperationType(\"enterQuarantine\")\n\tClusterHostInfraUpdateHaModeActionOperationTypeExitQuarantine   = ClusterHostInfraUpdateHaModeActionOperationType(\"exitQuarantine\")\n\tClusterHostInfraUpdateHaModeActionOperationTypeEnterMaintenance = ClusterHostInfraUpdateHaModeActionOperationType(\"enterMaintenance\")\n)\n\nfunc init() {\n\tt[\"ClusterHostInfraUpdateHaModeActionOperationType\"] = reflect.TypeOf((*ClusterHostInfraUpdateHaModeActionOperationType)(nil)).Elem()\n}\n\ntype ClusterInfraUpdateHaConfigInfoBehaviorType string\n\nconst (\n\tClusterInfraUpdateHaConfigInfoBehaviorTypeManual    = ClusterInfraUpdateHaConfigInfoBehaviorType(\"Manual\")\n\tClusterInfraUpdateHaConfigInfoBehaviorTypeAutomated = ClusterInfraUpdateHaConfigInfoBehaviorType(\"Automated\")\n)\n\nfunc init() {\n\tt[\"ClusterInfraUpdateHaConfigInfoBehaviorType\"] = reflect.TypeOf((*ClusterInfraUpdateHaConfigInfoBehaviorType)(nil)).Elem()\n}\n\ntype ClusterInfraUpdateHaConfigInfoRemediationType string\n\nconst (\n\tClusterInfraUpdateHaConfigInfoRemediationTypeQuarantineMode  = ClusterInfraUpdateHaConfigInfoRemediationType(\"QuarantineMode\")\n\tClusterInfraUpdateHaConfigInfoRemediationTypeMaintenanceMode = ClusterInfraUpdateHaConfigInfoRemediationType(\"MaintenanceMode\")\n)\n\nfunc init() {\n\tt[\"ClusterInfraUpdateHaConfigInfoRemediationType\"] = reflect.TypeOf((*ClusterInfraUpdateHaConfigInfoRemediationType)(nil)).Elem()\n}\n\ntype ClusterPowerOnVmOption string\n\nconst (\n\tClusterPowerOnVmOptionOverrideAutomationLevel = ClusterPowerOnVmOption(\"OverrideAutomationLevel\")\n\tClusterPowerOnVmOptionReserveResources        = ClusterPowerOnVmOption(\"ReserveResources\")\n)\n\nfunc init() {\n\tt[\"ClusterPowerOnVmOption\"] = reflect.TypeOf((*ClusterPowerOnVmOption)(nil)).Elem()\n}\n\ntype ClusterProfileServiceType string\n\nconst (\n\tClusterProfileServiceTypeDRS = ClusterProfileServiceType(\"DRS\")\n\tClusterProfileServiceTypeHA  = ClusterProfileServiceType(\"HA\")\n\tClusterProfileServiceTypeDPM = ClusterProfileServiceType(\"DPM\")\n\tClusterProfileServiceTypeFT  = ClusterProfileServiceType(\"FT\")\n)\n\nfunc init() {\n\tt[\"ClusterProfileServiceType\"] = reflect.TypeOf((*ClusterProfileServiceType)(nil)).Elem()\n}\n\ntype ClusterVmComponentProtectionSettingsStorageVmReaction string\n\nconst (\n\tClusterVmComponentProtectionSettingsStorageVmReactionDisabled            = ClusterVmComponentProtectionSettingsStorageVmReaction(\"disabled\")\n\tClusterVmComponentProtectionSettingsStorageVmReactionWarning             = ClusterVmComponentProtectionSettingsStorageVmReaction(\"warning\")\n\tClusterVmComponentProtectionSettingsStorageVmReactionRestartConservative = ClusterVmComponentProtectionSettingsStorageVmReaction(\"restartConservative\")\n\tClusterVmComponentProtectionSettingsStorageVmReactionRestartAggressive   = ClusterVmComponentProtectionSettingsStorageVmReaction(\"restartAggressive\")\n\tClusterVmComponentProtectionSettingsStorageVmReactionClusterDefault      = ClusterVmComponentProtectionSettingsStorageVmReaction(\"clusterDefault\")\n)\n\nfunc init() {\n\tt[\"ClusterVmComponentProtectionSettingsStorageVmReaction\"] = reflect.TypeOf((*ClusterVmComponentProtectionSettingsStorageVmReaction)(nil)).Elem()\n}\n\ntype ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared string\n\nconst (\n\tClusterVmComponentProtectionSettingsVmReactionOnAPDClearedNone              = ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared(\"none\")\n\tClusterVmComponentProtectionSettingsVmReactionOnAPDClearedReset             = ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared(\"reset\")\n\tClusterVmComponentProtectionSettingsVmReactionOnAPDClearedUseClusterDefault = ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared(\"useClusterDefault\")\n)\n\nfunc init() {\n\tt[\"ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared\"] = reflect.TypeOf((*ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared)(nil)).Elem()\n}\n\ntype ClusterVmReadinessReadyCondition string\n\nconst (\n\tClusterVmReadinessReadyConditionNone               = ClusterVmReadinessReadyCondition(\"none\")\n\tClusterVmReadinessReadyConditionPoweredOn          = ClusterVmReadinessReadyCondition(\"poweredOn\")\n\tClusterVmReadinessReadyConditionGuestHbStatusGreen = ClusterVmReadinessReadyCondition(\"guestHbStatusGreen\")\n\tClusterVmReadinessReadyConditionAppHbStatusGreen   = ClusterVmReadinessReadyCondition(\"appHbStatusGreen\")\n\tClusterVmReadinessReadyConditionUseClusterDefault  = ClusterVmReadinessReadyCondition(\"useClusterDefault\")\n)\n\nfunc init() {\n\tt[\"ClusterVmReadinessReadyCondition\"] = reflect.TypeOf((*ClusterVmReadinessReadyCondition)(nil)).Elem()\n}\n\ntype ComplianceResultStatus string\n\nconst (\n\tComplianceResultStatusCompliant    = ComplianceResultStatus(\"compliant\")\n\tComplianceResultStatusNonCompliant = ComplianceResultStatus(\"nonCompliant\")\n\tComplianceResultStatusUnknown      = ComplianceResultStatus(\"unknown\")\n)\n\nfunc init() {\n\tt[\"ComplianceResultStatus\"] = reflect.TypeOf((*ComplianceResultStatus)(nil)).Elem()\n}\n\ntype ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState string\n\nconst (\n\tComputeResourceHostSPBMLicenseInfoHostSPBMLicenseStateLicensed   = ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState(\"licensed\")\n\tComputeResourceHostSPBMLicenseInfoHostSPBMLicenseStateUnlicensed = ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState(\"unlicensed\")\n\tComputeResourceHostSPBMLicenseInfoHostSPBMLicenseStateUnknown    = ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState(\"unknown\")\n)\n\nfunc init() {\n\tt[\"ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState\"] = reflect.TypeOf((*ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState)(nil)).Elem()\n}\n\ntype ConfigSpecOperation string\n\nconst (\n\tConfigSpecOperationAdd    = ConfigSpecOperation(\"add\")\n\tConfigSpecOperationEdit   = ConfigSpecOperation(\"edit\")\n\tConfigSpecOperationRemove = ConfigSpecOperation(\"remove\")\n)\n\nfunc init() {\n\tt[\"ConfigSpecOperation\"] = reflect.TypeOf((*ConfigSpecOperation)(nil)).Elem()\n}\n\ntype CustomizationLicenseDataMode string\n\nconst (\n\tCustomizationLicenseDataModePerServer = CustomizationLicenseDataMode(\"perServer\")\n\tCustomizationLicenseDataModePerSeat   = CustomizationLicenseDataMode(\"perSeat\")\n)\n\nfunc init() {\n\tt[\"CustomizationLicenseDataMode\"] = reflect.TypeOf((*CustomizationLicenseDataMode)(nil)).Elem()\n}\n\ntype CustomizationNetBIOSMode string\n\nconst (\n\tCustomizationNetBIOSModeEnableNetBIOSViaDhcp = CustomizationNetBIOSMode(\"enableNetBIOSViaDhcp\")\n\tCustomizationNetBIOSModeEnableNetBIOS        = CustomizationNetBIOSMode(\"enableNetBIOS\")\n\tCustomizationNetBIOSModeDisableNetBIOS       = CustomizationNetBIOSMode(\"disableNetBIOS\")\n)\n\nfunc init() {\n\tt[\"CustomizationNetBIOSMode\"] = reflect.TypeOf((*CustomizationNetBIOSMode)(nil)).Elem()\n}\n\ntype CustomizationSysprepRebootOption string\n\nconst (\n\tCustomizationSysprepRebootOptionReboot   = CustomizationSysprepRebootOption(\"reboot\")\n\tCustomizationSysprepRebootOptionNoreboot = CustomizationSysprepRebootOption(\"noreboot\")\n\tCustomizationSysprepRebootOptionShutdown = CustomizationSysprepRebootOption(\"shutdown\")\n)\n\nfunc init() {\n\tt[\"CustomizationSysprepRebootOption\"] = reflect.TypeOf((*CustomizationSysprepRebootOption)(nil)).Elem()\n}\n\ntype DVPortStatusVmDirectPathGen2InactiveReasonNetwork string\n\nconst (\n\tDVPortStatusVmDirectPathGen2InactiveReasonNetworkPortNptIncompatibleDvs             = DVPortStatusVmDirectPathGen2InactiveReasonNetwork(\"portNptIncompatibleDvs\")\n\tDVPortStatusVmDirectPathGen2InactiveReasonNetworkPortNptNoCompatibleNics            = DVPortStatusVmDirectPathGen2InactiveReasonNetwork(\"portNptNoCompatibleNics\")\n\tDVPortStatusVmDirectPathGen2InactiveReasonNetworkPortNptNoVirtualFunctionsAvailable = DVPortStatusVmDirectPathGen2InactiveReasonNetwork(\"portNptNoVirtualFunctionsAvailable\")\n\tDVPortStatusVmDirectPathGen2InactiveReasonNetworkPortNptDisabledForPort             = DVPortStatusVmDirectPathGen2InactiveReasonNetwork(\"portNptDisabledForPort\")\n)\n\nfunc init() {\n\tt[\"DVPortStatusVmDirectPathGen2InactiveReasonNetwork\"] = reflect.TypeOf((*DVPortStatusVmDirectPathGen2InactiveReasonNetwork)(nil)).Elem()\n}\n\ntype DVPortStatusVmDirectPathGen2InactiveReasonOther string\n\nconst (\n\tDVPortStatusVmDirectPathGen2InactiveReasonOtherPortNptIncompatibleHost      = DVPortStatusVmDirectPathGen2InactiveReasonOther(\"portNptIncompatibleHost\")\n\tDVPortStatusVmDirectPathGen2InactiveReasonOtherPortNptIncompatibleConnectee = DVPortStatusVmDirectPathGen2InactiveReasonOther(\"portNptIncompatibleConnectee\")\n)\n\nfunc init() {\n\tt[\"DVPortStatusVmDirectPathGen2InactiveReasonOther\"] = reflect.TypeOf((*DVPortStatusVmDirectPathGen2InactiveReasonOther)(nil)).Elem()\n}\n\ntype DasConfigFaultDasConfigFaultReason string\n\nconst (\n\tDasConfigFaultDasConfigFaultReasonHostNetworkMisconfiguration = DasConfigFaultDasConfigFaultReason(\"HostNetworkMisconfiguration\")\n\tDasConfigFaultDasConfigFaultReasonHostMisconfiguration        = DasConfigFaultDasConfigFaultReason(\"HostMisconfiguration\")\n\tDasConfigFaultDasConfigFaultReasonInsufficientPrivileges      = DasConfigFaultDasConfigFaultReason(\"InsufficientPrivileges\")\n\tDasConfigFaultDasConfigFaultReasonNoPrimaryAgentAvailable     = DasConfigFaultDasConfigFaultReason(\"NoPrimaryAgentAvailable\")\n\tDasConfigFaultDasConfigFaultReasonOther                       = DasConfigFaultDasConfigFaultReason(\"Other\")\n\tDasConfigFaultDasConfigFaultReasonNoDatastoresConfigured      = DasConfigFaultDasConfigFaultReason(\"NoDatastoresConfigured\")\n\tDasConfigFaultDasConfigFaultReasonCreateConfigVvolFailed      = DasConfigFaultDasConfigFaultReason(\"CreateConfigVvolFailed\")\n\tDasConfigFaultDasConfigFaultReasonVSanNotSupportedOnHost      = DasConfigFaultDasConfigFaultReason(\"VSanNotSupportedOnHost\")\n\tDasConfigFaultDasConfigFaultReasonDasNetworkMisconfiguration  = DasConfigFaultDasConfigFaultReason(\"DasNetworkMisconfiguration\")\n)\n\nfunc init() {\n\tt[\"DasConfigFaultDasConfigFaultReason\"] = reflect.TypeOf((*DasConfigFaultDasConfigFaultReason)(nil)).Elem()\n}\n\ntype DasVmPriority string\n\nconst (\n\tDasVmPriorityDisabled = DasVmPriority(\"disabled\")\n\tDasVmPriorityLow      = DasVmPriority(\"low\")\n\tDasVmPriorityMedium   = DasVmPriority(\"medium\")\n\tDasVmPriorityHigh     = DasVmPriority(\"high\")\n)\n\nfunc init() {\n\tt[\"DasVmPriority\"] = reflect.TypeOf((*DasVmPriority)(nil)).Elem()\n}\n\ntype DatastoreAccessible string\n\nconst (\n\tDatastoreAccessibleTrue  = DatastoreAccessible(\"True\")\n\tDatastoreAccessibleFalse = DatastoreAccessible(\"False\")\n)\n\nfunc init() {\n\tt[\"DatastoreAccessible\"] = reflect.TypeOf((*DatastoreAccessible)(nil)).Elem()\n}\n\ntype DatastoreSummaryMaintenanceModeState string\n\nconst (\n\tDatastoreSummaryMaintenanceModeStateNormal              = DatastoreSummaryMaintenanceModeState(\"normal\")\n\tDatastoreSummaryMaintenanceModeStateEnteringMaintenance = DatastoreSummaryMaintenanceModeState(\"enteringMaintenance\")\n\tDatastoreSummaryMaintenanceModeStateInMaintenance       = DatastoreSummaryMaintenanceModeState(\"inMaintenance\")\n)\n\nfunc init() {\n\tt[\"DatastoreSummaryMaintenanceModeState\"] = reflect.TypeOf((*DatastoreSummaryMaintenanceModeState)(nil)).Elem()\n}\n\ntype DayOfWeek string\n\nconst (\n\tDayOfWeekSunday    = DayOfWeek(\"sunday\")\n\tDayOfWeekMonday    = DayOfWeek(\"monday\")\n\tDayOfWeekTuesday   = DayOfWeek(\"tuesday\")\n\tDayOfWeekWednesday = DayOfWeek(\"wednesday\")\n\tDayOfWeekThursday  = DayOfWeek(\"thursday\")\n\tDayOfWeekFriday    = DayOfWeek(\"friday\")\n\tDayOfWeekSaturday  = DayOfWeek(\"saturday\")\n)\n\nfunc init() {\n\tt[\"DayOfWeek\"] = reflect.TypeOf((*DayOfWeek)(nil)).Elem()\n}\n\ntype DeviceNotSupportedReason string\n\nconst (\n\tDeviceNotSupportedReasonHost  = DeviceNotSupportedReason(\"host\")\n\tDeviceNotSupportedReasonGuest = DeviceNotSupportedReason(\"guest\")\n)\n\nfunc init() {\n\tt[\"DeviceNotSupportedReason\"] = reflect.TypeOf((*DeviceNotSupportedReason)(nil)).Elem()\n}\n\ntype DiagnosticManagerLogCreator string\n\nconst (\n\tDiagnosticManagerLogCreatorVpxd      = DiagnosticManagerLogCreator(\"vpxd\")\n\tDiagnosticManagerLogCreatorVpxa      = DiagnosticManagerLogCreator(\"vpxa\")\n\tDiagnosticManagerLogCreatorHostd     = DiagnosticManagerLogCreator(\"hostd\")\n\tDiagnosticManagerLogCreatorServerd   = DiagnosticManagerLogCreator(\"serverd\")\n\tDiagnosticManagerLogCreatorInstall   = DiagnosticManagerLogCreator(\"install\")\n\tDiagnosticManagerLogCreatorVpxClient = DiagnosticManagerLogCreator(\"vpxClient\")\n\tDiagnosticManagerLogCreatorRecordLog = DiagnosticManagerLogCreator(\"recordLog\")\n)\n\nfunc init() {\n\tt[\"DiagnosticManagerLogCreator\"] = reflect.TypeOf((*DiagnosticManagerLogCreator)(nil)).Elem()\n}\n\ntype DiagnosticManagerLogFormat string\n\nconst (\n\tDiagnosticManagerLogFormatPlain = DiagnosticManagerLogFormat(\"plain\")\n)\n\nfunc init() {\n\tt[\"DiagnosticManagerLogFormat\"] = reflect.TypeOf((*DiagnosticManagerLogFormat)(nil)).Elem()\n}\n\ntype DiagnosticPartitionStorageType string\n\nconst (\n\tDiagnosticPartitionStorageTypeDirectAttached  = DiagnosticPartitionStorageType(\"directAttached\")\n\tDiagnosticPartitionStorageTypeNetworkAttached = DiagnosticPartitionStorageType(\"networkAttached\")\n)\n\nfunc init() {\n\tt[\"DiagnosticPartitionStorageType\"] = reflect.TypeOf((*DiagnosticPartitionStorageType)(nil)).Elem()\n}\n\ntype DiagnosticPartitionType string\n\nconst (\n\tDiagnosticPartitionTypeSingleHost = DiagnosticPartitionType(\"singleHost\")\n\tDiagnosticPartitionTypeMultiHost  = DiagnosticPartitionType(\"multiHost\")\n)\n\nfunc init() {\n\tt[\"DiagnosticPartitionType\"] = reflect.TypeOf((*DiagnosticPartitionType)(nil)).Elem()\n}\n\ntype DisallowedChangeByServiceDisallowedChange string\n\nconst (\n\tDisallowedChangeByServiceDisallowedChangeHotExtendDisk = DisallowedChangeByServiceDisallowedChange(\"hotExtendDisk\")\n)\n\nfunc init() {\n\tt[\"DisallowedChangeByServiceDisallowedChange\"] = reflect.TypeOf((*DisallowedChangeByServiceDisallowedChange)(nil)).Elem()\n}\n\ntype DistributedVirtualPortgroupMetaTagName string\n\nconst (\n\tDistributedVirtualPortgroupMetaTagNameDvsName       = DistributedVirtualPortgroupMetaTagName(\"dvsName\")\n\tDistributedVirtualPortgroupMetaTagNamePortgroupName = DistributedVirtualPortgroupMetaTagName(\"portgroupName\")\n\tDistributedVirtualPortgroupMetaTagNamePortIndex     = DistributedVirtualPortgroupMetaTagName(\"portIndex\")\n)\n\nfunc init() {\n\tt[\"DistributedVirtualPortgroupMetaTagName\"] = reflect.TypeOf((*DistributedVirtualPortgroupMetaTagName)(nil)).Elem()\n}\n\ntype DistributedVirtualPortgroupPortgroupType string\n\nconst (\n\tDistributedVirtualPortgroupPortgroupTypeEarlyBinding = DistributedVirtualPortgroupPortgroupType(\"earlyBinding\")\n\tDistributedVirtualPortgroupPortgroupTypeLateBinding  = DistributedVirtualPortgroupPortgroupType(\"lateBinding\")\n\tDistributedVirtualPortgroupPortgroupTypeEphemeral    = DistributedVirtualPortgroupPortgroupType(\"ephemeral\")\n)\n\nfunc init() {\n\tt[\"DistributedVirtualPortgroupPortgroupType\"] = reflect.TypeOf((*DistributedVirtualPortgroupPortgroupType)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchHostInfrastructureTrafficClass string\n\nconst (\n\tDistributedVirtualSwitchHostInfrastructureTrafficClassManagement     = DistributedVirtualSwitchHostInfrastructureTrafficClass(\"management\")\n\tDistributedVirtualSwitchHostInfrastructureTrafficClassFaultTolerance = DistributedVirtualSwitchHostInfrastructureTrafficClass(\"faultTolerance\")\n\tDistributedVirtualSwitchHostInfrastructureTrafficClassVmotion        = DistributedVirtualSwitchHostInfrastructureTrafficClass(\"vmotion\")\n\tDistributedVirtualSwitchHostInfrastructureTrafficClassVirtualMachine = DistributedVirtualSwitchHostInfrastructureTrafficClass(\"virtualMachine\")\n\tDistributedVirtualSwitchHostInfrastructureTrafficClassISCSI          = DistributedVirtualSwitchHostInfrastructureTrafficClass(\"iSCSI\")\n\tDistributedVirtualSwitchHostInfrastructureTrafficClassNfs            = DistributedVirtualSwitchHostInfrastructureTrafficClass(\"nfs\")\n\tDistributedVirtualSwitchHostInfrastructureTrafficClassHbr            = DistributedVirtualSwitchHostInfrastructureTrafficClass(\"hbr\")\n\tDistributedVirtualSwitchHostInfrastructureTrafficClassVsan           = DistributedVirtualSwitchHostInfrastructureTrafficClass(\"vsan\")\n\tDistributedVirtualSwitchHostInfrastructureTrafficClassVdp            = DistributedVirtualSwitchHostInfrastructureTrafficClass(\"vdp\")\n)\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchHostInfrastructureTrafficClass\"] = reflect.TypeOf((*DistributedVirtualSwitchHostInfrastructureTrafficClass)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchHostMemberHostComponentState string\n\nconst (\n\tDistributedVirtualSwitchHostMemberHostComponentStateUp           = DistributedVirtualSwitchHostMemberHostComponentState(\"up\")\n\tDistributedVirtualSwitchHostMemberHostComponentStatePending      = DistributedVirtualSwitchHostMemberHostComponentState(\"pending\")\n\tDistributedVirtualSwitchHostMemberHostComponentStateOutOfSync    = DistributedVirtualSwitchHostMemberHostComponentState(\"outOfSync\")\n\tDistributedVirtualSwitchHostMemberHostComponentStateWarning      = DistributedVirtualSwitchHostMemberHostComponentState(\"warning\")\n\tDistributedVirtualSwitchHostMemberHostComponentStateDisconnected = DistributedVirtualSwitchHostMemberHostComponentState(\"disconnected\")\n\tDistributedVirtualSwitchHostMemberHostComponentStateDown         = DistributedVirtualSwitchHostMemberHostComponentState(\"down\")\n)\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchHostMemberHostComponentState\"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberHostComponentState)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchNetworkResourceControlVersion string\n\nconst (\n\tDistributedVirtualSwitchNetworkResourceControlVersionVersion2 = DistributedVirtualSwitchNetworkResourceControlVersion(\"version2\")\n\tDistributedVirtualSwitchNetworkResourceControlVersionVersion3 = DistributedVirtualSwitchNetworkResourceControlVersion(\"version3\")\n)\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchNetworkResourceControlVersion\"] = reflect.TypeOf((*DistributedVirtualSwitchNetworkResourceControlVersion)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchNicTeamingPolicyMode string\n\nconst (\n\tDistributedVirtualSwitchNicTeamingPolicyModeLoadbalance_ip        = DistributedVirtualSwitchNicTeamingPolicyMode(\"loadbalance_ip\")\n\tDistributedVirtualSwitchNicTeamingPolicyModeLoadbalance_srcmac    = DistributedVirtualSwitchNicTeamingPolicyMode(\"loadbalance_srcmac\")\n\tDistributedVirtualSwitchNicTeamingPolicyModeLoadbalance_srcid     = DistributedVirtualSwitchNicTeamingPolicyMode(\"loadbalance_srcid\")\n\tDistributedVirtualSwitchNicTeamingPolicyModeFailover_explicit     = DistributedVirtualSwitchNicTeamingPolicyMode(\"failover_explicit\")\n\tDistributedVirtualSwitchNicTeamingPolicyModeLoadbalance_loadbased = DistributedVirtualSwitchNicTeamingPolicyMode(\"loadbalance_loadbased\")\n)\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchNicTeamingPolicyMode\"] = reflect.TypeOf((*DistributedVirtualSwitchNicTeamingPolicyMode)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchPortConnecteeConnecteeType string\n\nconst (\n\tDistributedVirtualSwitchPortConnecteeConnecteeTypePnic            = DistributedVirtualSwitchPortConnecteeConnecteeType(\"pnic\")\n\tDistributedVirtualSwitchPortConnecteeConnecteeTypeVmVnic          = DistributedVirtualSwitchPortConnecteeConnecteeType(\"vmVnic\")\n\tDistributedVirtualSwitchPortConnecteeConnecteeTypeHostConsoleVnic = DistributedVirtualSwitchPortConnecteeConnecteeType(\"hostConsoleVnic\")\n\tDistributedVirtualSwitchPortConnecteeConnecteeTypeHostVmkVnic     = DistributedVirtualSwitchPortConnecteeConnecteeType(\"hostVmkVnic\")\n)\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchPortConnecteeConnecteeType\"] = reflect.TypeOf((*DistributedVirtualSwitchPortConnecteeConnecteeType)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchProductSpecOperationType string\n\nconst (\n\tDistributedVirtualSwitchProductSpecOperationTypePreInstall             = DistributedVirtualSwitchProductSpecOperationType(\"preInstall\")\n\tDistributedVirtualSwitchProductSpecOperationTypeUpgrade                = DistributedVirtualSwitchProductSpecOperationType(\"upgrade\")\n\tDistributedVirtualSwitchProductSpecOperationTypeNotifyAvailableUpgrade = DistributedVirtualSwitchProductSpecOperationType(\"notifyAvailableUpgrade\")\n\tDistributedVirtualSwitchProductSpecOperationTypeProceedWithUpgrade     = DistributedVirtualSwitchProductSpecOperationType(\"proceedWithUpgrade\")\n\tDistributedVirtualSwitchProductSpecOperationTypeUpdateBundleInfo       = DistributedVirtualSwitchProductSpecOperationType(\"updateBundleInfo\")\n)\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchProductSpecOperationType\"] = reflect.TypeOf((*DistributedVirtualSwitchProductSpecOperationType)(nil)).Elem()\n}\n\ntype DpmBehavior string\n\nconst (\n\tDpmBehaviorManual    = DpmBehavior(\"manual\")\n\tDpmBehaviorAutomated = DpmBehavior(\"automated\")\n)\n\nfunc init() {\n\tt[\"DpmBehavior\"] = reflect.TypeOf((*DpmBehavior)(nil)).Elem()\n}\n\ntype DrsBehavior string\n\nconst (\n\tDrsBehaviorManual             = DrsBehavior(\"manual\")\n\tDrsBehaviorPartiallyAutomated = DrsBehavior(\"partiallyAutomated\")\n\tDrsBehaviorFullyAutomated     = DrsBehavior(\"fullyAutomated\")\n)\n\nfunc init() {\n\tt[\"DrsBehavior\"] = reflect.TypeOf((*DrsBehavior)(nil)).Elem()\n}\n\ntype DrsInjectorWorkloadCorrelationState string\n\nconst (\n\tDrsInjectorWorkloadCorrelationStateCorrelated   = DrsInjectorWorkloadCorrelationState(\"Correlated\")\n\tDrsInjectorWorkloadCorrelationStateUncorrelated = DrsInjectorWorkloadCorrelationState(\"Uncorrelated\")\n)\n\nfunc init() {\n\tt[\"DrsInjectorWorkloadCorrelationState\"] = reflect.TypeOf((*DrsInjectorWorkloadCorrelationState)(nil)).Elem()\n}\n\ntype DrsRecommendationReasonCode string\n\nconst (\n\tDrsRecommendationReasonCodeFairnessCpuAvg = DrsRecommendationReasonCode(\"fairnessCpuAvg\")\n\tDrsRecommendationReasonCodeFairnessMemAvg = DrsRecommendationReasonCode(\"fairnessMemAvg\")\n\tDrsRecommendationReasonCodeJointAffin     = DrsRecommendationReasonCode(\"jointAffin\")\n\tDrsRecommendationReasonCodeAntiAffin      = DrsRecommendationReasonCode(\"antiAffin\")\n\tDrsRecommendationReasonCodeHostMaint      = DrsRecommendationReasonCode(\"hostMaint\")\n)\n\nfunc init() {\n\tt[\"DrsRecommendationReasonCode\"] = reflect.TypeOf((*DrsRecommendationReasonCode)(nil)).Elem()\n}\n\ntype DvsEventPortBlockState string\n\nconst (\n\tDvsEventPortBlockStateUnset     = DvsEventPortBlockState(\"unset\")\n\tDvsEventPortBlockStateBlocked   = DvsEventPortBlockState(\"blocked\")\n\tDvsEventPortBlockStateUnblocked = DvsEventPortBlockState(\"unblocked\")\n\tDvsEventPortBlockStateUnknown   = DvsEventPortBlockState(\"unknown\")\n)\n\nfunc init() {\n\tt[\"DvsEventPortBlockState\"] = reflect.TypeOf((*DvsEventPortBlockState)(nil)).Elem()\n}\n\ntype DvsFilterOnFailure string\n\nconst (\n\tDvsFilterOnFailureFailOpen   = DvsFilterOnFailure(\"failOpen\")\n\tDvsFilterOnFailureFailClosed = DvsFilterOnFailure(\"failClosed\")\n)\n\nfunc init() {\n\tt[\"DvsFilterOnFailure\"] = reflect.TypeOf((*DvsFilterOnFailure)(nil)).Elem()\n}\n\ntype DvsNetworkRuleDirectionType string\n\nconst (\n\tDvsNetworkRuleDirectionTypeIncomingPackets = DvsNetworkRuleDirectionType(\"incomingPackets\")\n\tDvsNetworkRuleDirectionTypeOutgoingPackets = DvsNetworkRuleDirectionType(\"outgoingPackets\")\n\tDvsNetworkRuleDirectionTypeBoth            = DvsNetworkRuleDirectionType(\"both\")\n)\n\nfunc init() {\n\tt[\"DvsNetworkRuleDirectionType\"] = reflect.TypeOf((*DvsNetworkRuleDirectionType)(nil)).Elem()\n}\n\ntype EntityImportType string\n\nconst (\n\tEntityImportTypeCreateEntityWithNewIdentifier      = EntityImportType(\"createEntityWithNewIdentifier\")\n\tEntityImportTypeCreateEntityWithOriginalIdentifier = EntityImportType(\"createEntityWithOriginalIdentifier\")\n\tEntityImportTypeApplyToEntitySpecified             = EntityImportType(\"applyToEntitySpecified\")\n)\n\nfunc init() {\n\tt[\"EntityImportType\"] = reflect.TypeOf((*EntityImportType)(nil)).Elem()\n}\n\ntype EntityType string\n\nconst (\n\tEntityTypeDistributedVirtualSwitch    = EntityType(\"distributedVirtualSwitch\")\n\tEntityTypeDistributedVirtualPortgroup = EntityType(\"distributedVirtualPortgroup\")\n)\n\nfunc init() {\n\tt[\"EntityType\"] = reflect.TypeOf((*EntityType)(nil)).Elem()\n}\n\ntype EventAlarmExpressionComparisonOperator string\n\nconst (\n\tEventAlarmExpressionComparisonOperatorEquals           = EventAlarmExpressionComparisonOperator(\"equals\")\n\tEventAlarmExpressionComparisonOperatorNotEqualTo       = EventAlarmExpressionComparisonOperator(\"notEqualTo\")\n\tEventAlarmExpressionComparisonOperatorStartsWith       = EventAlarmExpressionComparisonOperator(\"startsWith\")\n\tEventAlarmExpressionComparisonOperatorDoesNotStartWith = EventAlarmExpressionComparisonOperator(\"doesNotStartWith\")\n\tEventAlarmExpressionComparisonOperatorEndsWith         = EventAlarmExpressionComparisonOperator(\"endsWith\")\n\tEventAlarmExpressionComparisonOperatorDoesNotEndWith   = EventAlarmExpressionComparisonOperator(\"doesNotEndWith\")\n)\n\nfunc init() {\n\tt[\"EventAlarmExpressionComparisonOperator\"] = reflect.TypeOf((*EventAlarmExpressionComparisonOperator)(nil)).Elem()\n}\n\ntype EventCategory string\n\nconst (\n\tEventCategoryInfo    = EventCategory(\"info\")\n\tEventCategoryWarning = EventCategory(\"warning\")\n\tEventCategoryError   = EventCategory(\"error\")\n\tEventCategoryUser    = EventCategory(\"user\")\n)\n\nfunc init() {\n\tt[\"EventCategory\"] = reflect.TypeOf((*EventCategory)(nil)).Elem()\n}\n\ntype EventEventSeverity string\n\nconst (\n\tEventEventSeverityError   = EventEventSeverity(\"error\")\n\tEventEventSeverityWarning = EventEventSeverity(\"warning\")\n\tEventEventSeverityInfo    = EventEventSeverity(\"info\")\n\tEventEventSeverityUser    = EventEventSeverity(\"user\")\n)\n\nfunc init() {\n\tt[\"EventEventSeverity\"] = reflect.TypeOf((*EventEventSeverity)(nil)).Elem()\n}\n\ntype EventFilterSpecRecursionOption string\n\nconst (\n\tEventFilterSpecRecursionOptionSelf     = EventFilterSpecRecursionOption(\"self\")\n\tEventFilterSpecRecursionOptionChildren = EventFilterSpecRecursionOption(\"children\")\n\tEventFilterSpecRecursionOptionAll      = EventFilterSpecRecursionOption(\"all\")\n)\n\nfunc init() {\n\tt[\"EventFilterSpecRecursionOption\"] = reflect.TypeOf((*EventFilterSpecRecursionOption)(nil)).Elem()\n}\n\ntype FibreChannelPortType string\n\nconst (\n\tFibreChannelPortTypeFabric       = FibreChannelPortType(\"fabric\")\n\tFibreChannelPortTypeLoop         = FibreChannelPortType(\"loop\")\n\tFibreChannelPortTypePointToPoint = FibreChannelPortType(\"pointToPoint\")\n\tFibreChannelPortTypeUnknown      = FibreChannelPortType(\"unknown\")\n)\n\nfunc init() {\n\tt[\"FibreChannelPortType\"] = reflect.TypeOf((*FibreChannelPortType)(nil)).Elem()\n}\n\ntype FileSystemMountInfoVStorageSupportStatus string\n\nconst (\n\tFileSystemMountInfoVStorageSupportStatusVStorageSupported   = FileSystemMountInfoVStorageSupportStatus(\"vStorageSupported\")\n\tFileSystemMountInfoVStorageSupportStatusVStorageUnsupported = FileSystemMountInfoVStorageSupportStatus(\"vStorageUnsupported\")\n\tFileSystemMountInfoVStorageSupportStatusVStorageUnknown     = FileSystemMountInfoVStorageSupportStatus(\"vStorageUnknown\")\n)\n\nfunc init() {\n\tt[\"FileSystemMountInfoVStorageSupportStatus\"] = reflect.TypeOf((*FileSystemMountInfoVStorageSupportStatus)(nil)).Elem()\n}\n\ntype FtIssuesOnHostHostSelectionType string\n\nconst (\n\tFtIssuesOnHostHostSelectionTypeUser = FtIssuesOnHostHostSelectionType(\"user\")\n\tFtIssuesOnHostHostSelectionTypeVc   = FtIssuesOnHostHostSelectionType(\"vc\")\n\tFtIssuesOnHostHostSelectionTypeDrs  = FtIssuesOnHostHostSelectionType(\"drs\")\n)\n\nfunc init() {\n\tt[\"FtIssuesOnHostHostSelectionType\"] = reflect.TypeOf((*FtIssuesOnHostHostSelectionType)(nil)).Elem()\n}\n\ntype GuestFileType string\n\nconst (\n\tGuestFileTypeFile      = GuestFileType(\"file\")\n\tGuestFileTypeDirectory = GuestFileType(\"directory\")\n\tGuestFileTypeSymlink   = GuestFileType(\"symlink\")\n)\n\nfunc init() {\n\tt[\"GuestFileType\"] = reflect.TypeOf((*GuestFileType)(nil)).Elem()\n}\n\ntype GuestInfoAppStateType string\n\nconst (\n\tGuestInfoAppStateTypeNone              = GuestInfoAppStateType(\"none\")\n\tGuestInfoAppStateTypeAppStateOk        = GuestInfoAppStateType(\"appStateOk\")\n\tGuestInfoAppStateTypeAppStateNeedReset = GuestInfoAppStateType(\"appStateNeedReset\")\n)\n\nfunc init() {\n\tt[\"GuestInfoAppStateType\"] = reflect.TypeOf((*GuestInfoAppStateType)(nil)).Elem()\n}\n\ntype GuestOsDescriptorFirmwareType string\n\nconst (\n\tGuestOsDescriptorFirmwareTypeBios = GuestOsDescriptorFirmwareType(\"bios\")\n\tGuestOsDescriptorFirmwareTypeEfi  = GuestOsDescriptorFirmwareType(\"efi\")\n)\n\nfunc init() {\n\tt[\"GuestOsDescriptorFirmwareType\"] = reflect.TypeOf((*GuestOsDescriptorFirmwareType)(nil)).Elem()\n}\n\ntype GuestOsDescriptorSupportLevel string\n\nconst (\n\tGuestOsDescriptorSupportLevelExperimental = GuestOsDescriptorSupportLevel(\"experimental\")\n\tGuestOsDescriptorSupportLevelLegacy       = GuestOsDescriptorSupportLevel(\"legacy\")\n\tGuestOsDescriptorSupportLevelTerminated   = GuestOsDescriptorSupportLevel(\"terminated\")\n\tGuestOsDescriptorSupportLevelSupported    = GuestOsDescriptorSupportLevel(\"supported\")\n\tGuestOsDescriptorSupportLevelUnsupported  = GuestOsDescriptorSupportLevel(\"unsupported\")\n\tGuestOsDescriptorSupportLevelDeprecated   = GuestOsDescriptorSupportLevel(\"deprecated\")\n\tGuestOsDescriptorSupportLevelTechPreview  = GuestOsDescriptorSupportLevel(\"techPreview\")\n)\n\nfunc init() {\n\tt[\"GuestOsDescriptorSupportLevel\"] = reflect.TypeOf((*GuestOsDescriptorSupportLevel)(nil)).Elem()\n}\n\ntype GuestRegKeyWowSpec string\n\nconst (\n\tGuestRegKeyWowSpecWOWNative = GuestRegKeyWowSpec(\"WOWNative\")\n\tGuestRegKeyWowSpecWOW32     = GuestRegKeyWowSpec(\"WOW32\")\n\tGuestRegKeyWowSpecWOW64     = GuestRegKeyWowSpec(\"WOW64\")\n)\n\nfunc init() {\n\tt[\"GuestRegKeyWowSpec\"] = reflect.TypeOf((*GuestRegKeyWowSpec)(nil)).Elem()\n}\n\ntype HealthUpdateInfoComponentType string\n\nconst (\n\tHealthUpdateInfoComponentTypeMemory  = HealthUpdateInfoComponentType(\"Memory\")\n\tHealthUpdateInfoComponentTypePower   = HealthUpdateInfoComponentType(\"Power\")\n\tHealthUpdateInfoComponentTypeFan     = HealthUpdateInfoComponentType(\"Fan\")\n\tHealthUpdateInfoComponentTypeNetwork = HealthUpdateInfoComponentType(\"Network\")\n\tHealthUpdateInfoComponentTypeStorage = HealthUpdateInfoComponentType(\"Storage\")\n)\n\nfunc init() {\n\tt[\"HealthUpdateInfoComponentType\"] = reflect.TypeOf((*HealthUpdateInfoComponentType)(nil)).Elem()\n}\n\ntype HostAccessMode string\n\nconst (\n\tHostAccessModeAccessNone     = HostAccessMode(\"accessNone\")\n\tHostAccessModeAccessAdmin    = HostAccessMode(\"accessAdmin\")\n\tHostAccessModeAccessNoAccess = HostAccessMode(\"accessNoAccess\")\n\tHostAccessModeAccessReadOnly = HostAccessMode(\"accessReadOnly\")\n\tHostAccessModeAccessOther    = HostAccessMode(\"accessOther\")\n)\n\nfunc init() {\n\tt[\"HostAccessMode\"] = reflect.TypeOf((*HostAccessMode)(nil)).Elem()\n}\n\ntype HostActiveDirectoryAuthenticationCertificateDigest string\n\nconst (\n\tHostActiveDirectoryAuthenticationCertificateDigestSHA1 = HostActiveDirectoryAuthenticationCertificateDigest(\"SHA1\")\n)\n\nfunc init() {\n\tt[\"HostActiveDirectoryAuthenticationCertificateDigest\"] = reflect.TypeOf((*HostActiveDirectoryAuthenticationCertificateDigest)(nil)).Elem()\n}\n\ntype HostActiveDirectoryInfoDomainMembershipStatus string\n\nconst (\n\tHostActiveDirectoryInfoDomainMembershipStatusUnknown           = HostActiveDirectoryInfoDomainMembershipStatus(\"unknown\")\n\tHostActiveDirectoryInfoDomainMembershipStatusOk                = HostActiveDirectoryInfoDomainMembershipStatus(\"ok\")\n\tHostActiveDirectoryInfoDomainMembershipStatusNoServers         = HostActiveDirectoryInfoDomainMembershipStatus(\"noServers\")\n\tHostActiveDirectoryInfoDomainMembershipStatusClientTrustBroken = HostActiveDirectoryInfoDomainMembershipStatus(\"clientTrustBroken\")\n\tHostActiveDirectoryInfoDomainMembershipStatusServerTrustBroken = HostActiveDirectoryInfoDomainMembershipStatus(\"serverTrustBroken\")\n\tHostActiveDirectoryInfoDomainMembershipStatusInconsistentTrust = HostActiveDirectoryInfoDomainMembershipStatus(\"inconsistentTrust\")\n\tHostActiveDirectoryInfoDomainMembershipStatusOtherProblem      = HostActiveDirectoryInfoDomainMembershipStatus(\"otherProblem\")\n)\n\nfunc init() {\n\tt[\"HostActiveDirectoryInfoDomainMembershipStatus\"] = reflect.TypeOf((*HostActiveDirectoryInfoDomainMembershipStatus)(nil)).Elem()\n}\n\ntype HostCapabilityFtUnsupportedReason string\n\nconst (\n\tHostCapabilityFtUnsupportedReasonVMotionNotLicensed  = HostCapabilityFtUnsupportedReason(\"vMotionNotLicensed\")\n\tHostCapabilityFtUnsupportedReasonMissingVMotionNic   = HostCapabilityFtUnsupportedReason(\"missingVMotionNic\")\n\tHostCapabilityFtUnsupportedReasonMissingFTLoggingNic = HostCapabilityFtUnsupportedReason(\"missingFTLoggingNic\")\n\tHostCapabilityFtUnsupportedReasonFtNotLicensed       = HostCapabilityFtUnsupportedReason(\"ftNotLicensed\")\n\tHostCapabilityFtUnsupportedReasonHaAgentIssue        = HostCapabilityFtUnsupportedReason(\"haAgentIssue\")\n\tHostCapabilityFtUnsupportedReasonUnsupportedProduct  = HostCapabilityFtUnsupportedReason(\"unsupportedProduct\")\n\tHostCapabilityFtUnsupportedReasonCpuHvUnsupported    = HostCapabilityFtUnsupportedReason(\"cpuHvUnsupported\")\n\tHostCapabilityFtUnsupportedReasonCpuHwmmuUnsupported = HostCapabilityFtUnsupportedReason(\"cpuHwmmuUnsupported\")\n\tHostCapabilityFtUnsupportedReasonCpuHvDisabled       = HostCapabilityFtUnsupportedReason(\"cpuHvDisabled\")\n)\n\nfunc init() {\n\tt[\"HostCapabilityFtUnsupportedReason\"] = reflect.TypeOf((*HostCapabilityFtUnsupportedReason)(nil)).Elem()\n}\n\ntype HostCapabilityVmDirectPathGen2UnsupportedReason string\n\nconst (\n\tHostCapabilityVmDirectPathGen2UnsupportedReasonHostNptIncompatibleProduct  = HostCapabilityVmDirectPathGen2UnsupportedReason(\"hostNptIncompatibleProduct\")\n\tHostCapabilityVmDirectPathGen2UnsupportedReasonHostNptIncompatibleHardware = HostCapabilityVmDirectPathGen2UnsupportedReason(\"hostNptIncompatibleHardware\")\n\tHostCapabilityVmDirectPathGen2UnsupportedReasonHostNptDisabled             = HostCapabilityVmDirectPathGen2UnsupportedReason(\"hostNptDisabled\")\n)\n\nfunc init() {\n\tt[\"HostCapabilityVmDirectPathGen2UnsupportedReason\"] = reflect.TypeOf((*HostCapabilityVmDirectPathGen2UnsupportedReason)(nil)).Elem()\n}\n\ntype HostCertificateManagerCertificateInfoCertificateStatus string\n\nconst (\n\tHostCertificateManagerCertificateInfoCertificateStatusUnknown            = HostCertificateManagerCertificateInfoCertificateStatus(\"unknown\")\n\tHostCertificateManagerCertificateInfoCertificateStatusExpired            = HostCertificateManagerCertificateInfoCertificateStatus(\"expired\")\n\tHostCertificateManagerCertificateInfoCertificateStatusExpiring           = HostCertificateManagerCertificateInfoCertificateStatus(\"expiring\")\n\tHostCertificateManagerCertificateInfoCertificateStatusExpiringShortly    = HostCertificateManagerCertificateInfoCertificateStatus(\"expiringShortly\")\n\tHostCertificateManagerCertificateInfoCertificateStatusExpirationImminent = HostCertificateManagerCertificateInfoCertificateStatus(\"expirationImminent\")\n\tHostCertificateManagerCertificateInfoCertificateStatusGood               = HostCertificateManagerCertificateInfoCertificateStatus(\"good\")\n)\n\nfunc init() {\n\tt[\"HostCertificateManagerCertificateInfoCertificateStatus\"] = reflect.TypeOf((*HostCertificateManagerCertificateInfoCertificateStatus)(nil)).Elem()\n}\n\ntype HostConfigChangeMode string\n\nconst (\n\tHostConfigChangeModeModify  = HostConfigChangeMode(\"modify\")\n\tHostConfigChangeModeReplace = HostConfigChangeMode(\"replace\")\n)\n\nfunc init() {\n\tt[\"HostConfigChangeMode\"] = reflect.TypeOf((*HostConfigChangeMode)(nil)).Elem()\n}\n\ntype HostConfigChangeOperation string\n\nconst (\n\tHostConfigChangeOperationAdd    = HostConfigChangeOperation(\"add\")\n\tHostConfigChangeOperationRemove = HostConfigChangeOperation(\"remove\")\n\tHostConfigChangeOperationEdit   = HostConfigChangeOperation(\"edit\")\n\tHostConfigChangeOperationIgnore = HostConfigChangeOperation(\"ignore\")\n)\n\nfunc init() {\n\tt[\"HostConfigChangeOperation\"] = reflect.TypeOf((*HostConfigChangeOperation)(nil)).Elem()\n}\n\ntype HostCpuPackageVendor string\n\nconst (\n\tHostCpuPackageVendorUnknown = HostCpuPackageVendor(\"unknown\")\n\tHostCpuPackageVendorIntel   = HostCpuPackageVendor(\"intel\")\n\tHostCpuPackageVendorAmd     = HostCpuPackageVendor(\"amd\")\n)\n\nfunc init() {\n\tt[\"HostCpuPackageVendor\"] = reflect.TypeOf((*HostCpuPackageVendor)(nil)).Elem()\n}\n\ntype HostCpuPowerManagementInfoPolicyType string\n\nconst (\n\tHostCpuPowerManagementInfoPolicyTypeOff           = HostCpuPowerManagementInfoPolicyType(\"off\")\n\tHostCpuPowerManagementInfoPolicyTypeStaticPolicy  = HostCpuPowerManagementInfoPolicyType(\"staticPolicy\")\n\tHostCpuPowerManagementInfoPolicyTypeDynamicPolicy = HostCpuPowerManagementInfoPolicyType(\"dynamicPolicy\")\n)\n\nfunc init() {\n\tt[\"HostCpuPowerManagementInfoPolicyType\"] = reflect.TypeOf((*HostCpuPowerManagementInfoPolicyType)(nil)).Elem()\n}\n\ntype HostCryptoState string\n\nconst (\n\tHostCryptoStateIncapable = HostCryptoState(\"incapable\")\n\tHostCryptoStatePrepared  = HostCryptoState(\"prepared\")\n\tHostCryptoStateSafe      = HostCryptoState(\"safe\")\n)\n\nfunc init() {\n\tt[\"HostCryptoState\"] = reflect.TypeOf((*HostCryptoState)(nil)).Elem()\n}\n\ntype HostDasErrorEventHostDasErrorReason string\n\nconst (\n\tHostDasErrorEventHostDasErrorReasonConfigFailed               = HostDasErrorEventHostDasErrorReason(\"configFailed\")\n\tHostDasErrorEventHostDasErrorReasonTimeout                    = HostDasErrorEventHostDasErrorReason(\"timeout\")\n\tHostDasErrorEventHostDasErrorReasonCommunicationInitFailed    = HostDasErrorEventHostDasErrorReason(\"communicationInitFailed\")\n\tHostDasErrorEventHostDasErrorReasonHealthCheckScriptFailed    = HostDasErrorEventHostDasErrorReason(\"healthCheckScriptFailed\")\n\tHostDasErrorEventHostDasErrorReasonAgentFailed                = HostDasErrorEventHostDasErrorReason(\"agentFailed\")\n\tHostDasErrorEventHostDasErrorReasonAgentShutdown              = HostDasErrorEventHostDasErrorReason(\"agentShutdown\")\n\tHostDasErrorEventHostDasErrorReasonIsolationAddressUnpingable = HostDasErrorEventHostDasErrorReason(\"isolationAddressUnpingable\")\n\tHostDasErrorEventHostDasErrorReasonOther                      = HostDasErrorEventHostDasErrorReason(\"other\")\n)\n\nfunc init() {\n\tt[\"HostDasErrorEventHostDasErrorReason\"] = reflect.TypeOf((*HostDasErrorEventHostDasErrorReason)(nil)).Elem()\n}\n\ntype HostDigestInfoDigestMethodType string\n\nconst (\n\tHostDigestInfoDigestMethodTypeSHA1 = HostDigestInfoDigestMethodType(\"SHA1\")\n\tHostDigestInfoDigestMethodTypeMD5  = HostDigestInfoDigestMethodType(\"MD5\")\n)\n\nfunc init() {\n\tt[\"HostDigestInfoDigestMethodType\"] = reflect.TypeOf((*HostDigestInfoDigestMethodType)(nil)).Elem()\n}\n\ntype HostDisconnectedEventReasonCode string\n\nconst (\n\tHostDisconnectedEventReasonCodeSslThumbprintVerifyFailed = HostDisconnectedEventReasonCode(\"sslThumbprintVerifyFailed\")\n\tHostDisconnectedEventReasonCodeLicenseExpired            = HostDisconnectedEventReasonCode(\"licenseExpired\")\n\tHostDisconnectedEventReasonCodeAgentUpgrade              = HostDisconnectedEventReasonCode(\"agentUpgrade\")\n\tHostDisconnectedEventReasonCodeUserRequest               = HostDisconnectedEventReasonCode(\"userRequest\")\n\tHostDisconnectedEventReasonCodeInsufficientLicenses      = HostDisconnectedEventReasonCode(\"insufficientLicenses\")\n\tHostDisconnectedEventReasonCodeAgentOutOfDate            = HostDisconnectedEventReasonCode(\"agentOutOfDate\")\n\tHostDisconnectedEventReasonCodePasswordDecryptFailure    = HostDisconnectedEventReasonCode(\"passwordDecryptFailure\")\n\tHostDisconnectedEventReasonCodeUnknown                   = HostDisconnectedEventReasonCode(\"unknown\")\n\tHostDisconnectedEventReasonCodeVcVRAMCapacityExceeded    = HostDisconnectedEventReasonCode(\"vcVRAMCapacityExceeded\")\n)\n\nfunc init() {\n\tt[\"HostDisconnectedEventReasonCode\"] = reflect.TypeOf((*HostDisconnectedEventReasonCode)(nil)).Elem()\n}\n\ntype HostDiskPartitionInfoPartitionFormat string\n\nconst (\n\tHostDiskPartitionInfoPartitionFormatGpt     = HostDiskPartitionInfoPartitionFormat(\"gpt\")\n\tHostDiskPartitionInfoPartitionFormatMbr     = HostDiskPartitionInfoPartitionFormat(\"mbr\")\n\tHostDiskPartitionInfoPartitionFormatUnknown = HostDiskPartitionInfoPartitionFormat(\"unknown\")\n)\n\nfunc init() {\n\tt[\"HostDiskPartitionInfoPartitionFormat\"] = reflect.TypeOf((*HostDiskPartitionInfoPartitionFormat)(nil)).Elem()\n}\n\ntype HostDiskPartitionInfoType string\n\nconst (\n\tHostDiskPartitionInfoTypeNone          = HostDiskPartitionInfoType(\"none\")\n\tHostDiskPartitionInfoTypeVmfs          = HostDiskPartitionInfoType(\"vmfs\")\n\tHostDiskPartitionInfoTypeLinuxNative   = HostDiskPartitionInfoType(\"linuxNative\")\n\tHostDiskPartitionInfoTypeLinuxSwap     = HostDiskPartitionInfoType(\"linuxSwap\")\n\tHostDiskPartitionInfoTypeExtended      = HostDiskPartitionInfoType(\"extended\")\n\tHostDiskPartitionInfoTypeNtfs          = HostDiskPartitionInfoType(\"ntfs\")\n\tHostDiskPartitionInfoTypeVmkDiagnostic = HostDiskPartitionInfoType(\"vmkDiagnostic\")\n\tHostDiskPartitionInfoTypeVffs          = HostDiskPartitionInfoType(\"vffs\")\n)\n\nfunc init() {\n\tt[\"HostDiskPartitionInfoType\"] = reflect.TypeOf((*HostDiskPartitionInfoType)(nil)).Elem()\n}\n\ntype HostFeatureVersionKey string\n\nconst (\n\tHostFeatureVersionKeyFaultTolerance = HostFeatureVersionKey(\"faultTolerance\")\n)\n\nfunc init() {\n\tt[\"HostFeatureVersionKey\"] = reflect.TypeOf((*HostFeatureVersionKey)(nil)).Elem()\n}\n\ntype HostFileSystemVolumeFileSystemType string\n\nconst (\n\tHostFileSystemVolumeFileSystemTypeVMFS  = HostFileSystemVolumeFileSystemType(\"VMFS\")\n\tHostFileSystemVolumeFileSystemTypeNFS   = HostFileSystemVolumeFileSystemType(\"NFS\")\n\tHostFileSystemVolumeFileSystemTypeNFS41 = HostFileSystemVolumeFileSystemType(\"NFS41\")\n\tHostFileSystemVolumeFileSystemTypeCIFS  = HostFileSystemVolumeFileSystemType(\"CIFS\")\n\tHostFileSystemVolumeFileSystemTypeVsan  = HostFileSystemVolumeFileSystemType(\"vsan\")\n\tHostFileSystemVolumeFileSystemTypeVFFS  = HostFileSystemVolumeFileSystemType(\"VFFS\")\n\tHostFileSystemVolumeFileSystemTypeVVOL  = HostFileSystemVolumeFileSystemType(\"VVOL\")\n\tHostFileSystemVolumeFileSystemTypeOTHER = HostFileSystemVolumeFileSystemType(\"OTHER\")\n)\n\nfunc init() {\n\tt[\"HostFileSystemVolumeFileSystemType\"] = reflect.TypeOf((*HostFileSystemVolumeFileSystemType)(nil)).Elem()\n}\n\ntype HostFirewallRuleDirection string\n\nconst (\n\tHostFirewallRuleDirectionInbound  = HostFirewallRuleDirection(\"inbound\")\n\tHostFirewallRuleDirectionOutbound = HostFirewallRuleDirection(\"outbound\")\n)\n\nfunc init() {\n\tt[\"HostFirewallRuleDirection\"] = reflect.TypeOf((*HostFirewallRuleDirection)(nil)).Elem()\n}\n\ntype HostFirewallRulePortType string\n\nconst (\n\tHostFirewallRulePortTypeSrc = HostFirewallRulePortType(\"src\")\n\tHostFirewallRulePortTypeDst = HostFirewallRulePortType(\"dst\")\n)\n\nfunc init() {\n\tt[\"HostFirewallRulePortType\"] = reflect.TypeOf((*HostFirewallRulePortType)(nil)).Elem()\n}\n\ntype HostFirewallRuleProtocol string\n\nconst (\n\tHostFirewallRuleProtocolTcp = HostFirewallRuleProtocol(\"tcp\")\n\tHostFirewallRuleProtocolUdp = HostFirewallRuleProtocol(\"udp\")\n)\n\nfunc init() {\n\tt[\"HostFirewallRuleProtocol\"] = reflect.TypeOf((*HostFirewallRuleProtocol)(nil)).Elem()\n}\n\ntype HostGraphicsConfigGraphicsType string\n\nconst (\n\tHostGraphicsConfigGraphicsTypeShared       = HostGraphicsConfigGraphicsType(\"shared\")\n\tHostGraphicsConfigGraphicsTypeSharedDirect = HostGraphicsConfigGraphicsType(\"sharedDirect\")\n)\n\nfunc init() {\n\tt[\"HostGraphicsConfigGraphicsType\"] = reflect.TypeOf((*HostGraphicsConfigGraphicsType)(nil)).Elem()\n}\n\ntype HostGraphicsConfigSharedPassthruAssignmentPolicy string\n\nconst (\n\tHostGraphicsConfigSharedPassthruAssignmentPolicyPerformance   = HostGraphicsConfigSharedPassthruAssignmentPolicy(\"performance\")\n\tHostGraphicsConfigSharedPassthruAssignmentPolicyConsolidation = HostGraphicsConfigSharedPassthruAssignmentPolicy(\"consolidation\")\n)\n\nfunc init() {\n\tt[\"HostGraphicsConfigSharedPassthruAssignmentPolicy\"] = reflect.TypeOf((*HostGraphicsConfigSharedPassthruAssignmentPolicy)(nil)).Elem()\n}\n\ntype HostGraphicsInfoGraphicsType string\n\nconst (\n\tHostGraphicsInfoGraphicsTypeBasic        = HostGraphicsInfoGraphicsType(\"basic\")\n\tHostGraphicsInfoGraphicsTypeShared       = HostGraphicsInfoGraphicsType(\"shared\")\n\tHostGraphicsInfoGraphicsTypeDirect       = HostGraphicsInfoGraphicsType(\"direct\")\n\tHostGraphicsInfoGraphicsTypeSharedDirect = HostGraphicsInfoGraphicsType(\"sharedDirect\")\n)\n\nfunc init() {\n\tt[\"HostGraphicsInfoGraphicsType\"] = reflect.TypeOf((*HostGraphicsInfoGraphicsType)(nil)).Elem()\n}\n\ntype HostHardwareElementStatus string\n\nconst (\n\tHostHardwareElementStatusUnknown = HostHardwareElementStatus(\"Unknown\")\n\tHostHardwareElementStatusGreen   = HostHardwareElementStatus(\"Green\")\n\tHostHardwareElementStatusYellow  = HostHardwareElementStatus(\"Yellow\")\n\tHostHardwareElementStatusRed     = HostHardwareElementStatus(\"Red\")\n)\n\nfunc init() {\n\tt[\"HostHardwareElementStatus\"] = reflect.TypeOf((*HostHardwareElementStatus)(nil)).Elem()\n}\n\ntype HostHasComponentFailureHostComponentType string\n\nconst (\n\tHostHasComponentFailureHostComponentTypeDatastore = HostHasComponentFailureHostComponentType(\"Datastore\")\n)\n\nfunc init() {\n\tt[\"HostHasComponentFailureHostComponentType\"] = reflect.TypeOf((*HostHasComponentFailureHostComponentType)(nil)).Elem()\n}\n\ntype HostImageAcceptanceLevel string\n\nconst (\n\tHostImageAcceptanceLevelVmware_certified = HostImageAcceptanceLevel(\"vmware_certified\")\n\tHostImageAcceptanceLevelVmware_accepted  = HostImageAcceptanceLevel(\"vmware_accepted\")\n\tHostImageAcceptanceLevelPartner          = HostImageAcceptanceLevel(\"partner\")\n\tHostImageAcceptanceLevelCommunity        = HostImageAcceptanceLevel(\"community\")\n)\n\nfunc init() {\n\tt[\"HostImageAcceptanceLevel\"] = reflect.TypeOf((*HostImageAcceptanceLevel)(nil)).Elem()\n}\n\ntype HostIncompatibleForFaultToleranceReason string\n\nconst (\n\tHostIncompatibleForFaultToleranceReasonProduct   = HostIncompatibleForFaultToleranceReason(\"product\")\n\tHostIncompatibleForFaultToleranceReasonProcessor = HostIncompatibleForFaultToleranceReason(\"processor\")\n)\n\nfunc init() {\n\tt[\"HostIncompatibleForFaultToleranceReason\"] = reflect.TypeOf((*HostIncompatibleForFaultToleranceReason)(nil)).Elem()\n}\n\ntype HostIncompatibleForRecordReplayReason string\n\nconst (\n\tHostIncompatibleForRecordReplayReasonProduct   = HostIncompatibleForRecordReplayReason(\"product\")\n\tHostIncompatibleForRecordReplayReasonProcessor = HostIncompatibleForRecordReplayReason(\"processor\")\n)\n\nfunc init() {\n\tt[\"HostIncompatibleForRecordReplayReason\"] = reflect.TypeOf((*HostIncompatibleForRecordReplayReason)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaChapAuthenticationType string\n\nconst (\n\tHostInternetScsiHbaChapAuthenticationTypeChapProhibited  = HostInternetScsiHbaChapAuthenticationType(\"chapProhibited\")\n\tHostInternetScsiHbaChapAuthenticationTypeChapDiscouraged = HostInternetScsiHbaChapAuthenticationType(\"chapDiscouraged\")\n\tHostInternetScsiHbaChapAuthenticationTypeChapPreferred   = HostInternetScsiHbaChapAuthenticationType(\"chapPreferred\")\n\tHostInternetScsiHbaChapAuthenticationTypeChapRequired    = HostInternetScsiHbaChapAuthenticationType(\"chapRequired\")\n)\n\nfunc init() {\n\tt[\"HostInternetScsiHbaChapAuthenticationType\"] = reflect.TypeOf((*HostInternetScsiHbaChapAuthenticationType)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaDigestType string\n\nconst (\n\tHostInternetScsiHbaDigestTypeDigestProhibited  = HostInternetScsiHbaDigestType(\"digestProhibited\")\n\tHostInternetScsiHbaDigestTypeDigestDiscouraged = HostInternetScsiHbaDigestType(\"digestDiscouraged\")\n\tHostInternetScsiHbaDigestTypeDigestPreferred   = HostInternetScsiHbaDigestType(\"digestPreferred\")\n\tHostInternetScsiHbaDigestTypeDigestRequired    = HostInternetScsiHbaDigestType(\"digestRequired\")\n)\n\nfunc init() {\n\tt[\"HostInternetScsiHbaDigestType\"] = reflect.TypeOf((*HostInternetScsiHbaDigestType)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType string\n\nconst (\n\tHostInternetScsiHbaIscsiIpv6AddressAddressConfigurationTypeDHCP           = HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType(\"DHCP\")\n\tHostInternetScsiHbaIscsiIpv6AddressAddressConfigurationTypeAutoConfigured = HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType(\"AutoConfigured\")\n\tHostInternetScsiHbaIscsiIpv6AddressAddressConfigurationTypeStatic         = HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType(\"Static\")\n\tHostInternetScsiHbaIscsiIpv6AddressAddressConfigurationTypeOther          = HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType(\"Other\")\n)\n\nfunc init() {\n\tt[\"HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType\"] = reflect.TypeOf((*HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation string\n\nconst (\n\tHostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperationAdd    = HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation(\"add\")\n\tHostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperationRemove = HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation(\"remove\")\n)\n\nfunc init() {\n\tt[\"HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation\"] = reflect.TypeOf((*HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaNetworkBindingSupportType string\n\nconst (\n\tHostInternetScsiHbaNetworkBindingSupportTypeNotsupported = HostInternetScsiHbaNetworkBindingSupportType(\"notsupported\")\n\tHostInternetScsiHbaNetworkBindingSupportTypeOptional     = HostInternetScsiHbaNetworkBindingSupportType(\"optional\")\n\tHostInternetScsiHbaNetworkBindingSupportTypeRequired     = HostInternetScsiHbaNetworkBindingSupportType(\"required\")\n)\n\nfunc init() {\n\tt[\"HostInternetScsiHbaNetworkBindingSupportType\"] = reflect.TypeOf((*HostInternetScsiHbaNetworkBindingSupportType)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaStaticTargetTargetDiscoveryMethod string\n\nconst (\n\tHostInternetScsiHbaStaticTargetTargetDiscoveryMethodStaticMethod     = HostInternetScsiHbaStaticTargetTargetDiscoveryMethod(\"staticMethod\")\n\tHostInternetScsiHbaStaticTargetTargetDiscoveryMethodSendTargetMethod = HostInternetScsiHbaStaticTargetTargetDiscoveryMethod(\"sendTargetMethod\")\n\tHostInternetScsiHbaStaticTargetTargetDiscoveryMethodSlpMethod        = HostInternetScsiHbaStaticTargetTargetDiscoveryMethod(\"slpMethod\")\n\tHostInternetScsiHbaStaticTargetTargetDiscoveryMethodIsnsMethod       = HostInternetScsiHbaStaticTargetTargetDiscoveryMethod(\"isnsMethod\")\n\tHostInternetScsiHbaStaticTargetTargetDiscoveryMethodUnknownMethod    = HostInternetScsiHbaStaticTargetTargetDiscoveryMethod(\"unknownMethod\")\n)\n\nfunc init() {\n\tt[\"HostInternetScsiHbaStaticTargetTargetDiscoveryMethod\"] = reflect.TypeOf((*HostInternetScsiHbaStaticTargetTargetDiscoveryMethod)(nil)).Elem()\n}\n\ntype HostIpConfigIpV6AddressConfigType string\n\nconst (\n\tHostIpConfigIpV6AddressConfigTypeOther     = HostIpConfigIpV6AddressConfigType(\"other\")\n\tHostIpConfigIpV6AddressConfigTypeManual    = HostIpConfigIpV6AddressConfigType(\"manual\")\n\tHostIpConfigIpV6AddressConfigTypeDhcp      = HostIpConfigIpV6AddressConfigType(\"dhcp\")\n\tHostIpConfigIpV6AddressConfigTypeLinklayer = HostIpConfigIpV6AddressConfigType(\"linklayer\")\n\tHostIpConfigIpV6AddressConfigTypeRandom    = HostIpConfigIpV6AddressConfigType(\"random\")\n)\n\nfunc init() {\n\tt[\"HostIpConfigIpV6AddressConfigType\"] = reflect.TypeOf((*HostIpConfigIpV6AddressConfigType)(nil)).Elem()\n}\n\ntype HostIpConfigIpV6AddressStatus string\n\nconst (\n\tHostIpConfigIpV6AddressStatusPreferred    = HostIpConfigIpV6AddressStatus(\"preferred\")\n\tHostIpConfigIpV6AddressStatusDeprecated   = HostIpConfigIpV6AddressStatus(\"deprecated\")\n\tHostIpConfigIpV6AddressStatusInvalid      = HostIpConfigIpV6AddressStatus(\"invalid\")\n\tHostIpConfigIpV6AddressStatusInaccessible = HostIpConfigIpV6AddressStatus(\"inaccessible\")\n\tHostIpConfigIpV6AddressStatusUnknown      = HostIpConfigIpV6AddressStatus(\"unknown\")\n\tHostIpConfigIpV6AddressStatusTentative    = HostIpConfigIpV6AddressStatus(\"tentative\")\n\tHostIpConfigIpV6AddressStatusDuplicate    = HostIpConfigIpV6AddressStatus(\"duplicate\")\n)\n\nfunc init() {\n\tt[\"HostIpConfigIpV6AddressStatus\"] = reflect.TypeOf((*HostIpConfigIpV6AddressStatus)(nil)).Elem()\n}\n\ntype HostLicensableResourceKey string\n\nconst (\n\tHostLicensableResourceKeyNumCpuPackages = HostLicensableResourceKey(\"numCpuPackages\")\n\tHostLicensableResourceKeyNumCpuCores    = HostLicensableResourceKey(\"numCpuCores\")\n\tHostLicensableResourceKeyMemorySize     = HostLicensableResourceKey(\"memorySize\")\n\tHostLicensableResourceKeyMemoryForVms   = HostLicensableResourceKey(\"memoryForVms\")\n\tHostLicensableResourceKeyNumVmsStarted  = HostLicensableResourceKey(\"numVmsStarted\")\n\tHostLicensableResourceKeyNumVmsStarting = HostLicensableResourceKey(\"numVmsStarting\")\n)\n\nfunc init() {\n\tt[\"HostLicensableResourceKey\"] = reflect.TypeOf((*HostLicensableResourceKey)(nil)).Elem()\n}\n\ntype HostLockdownMode string\n\nconst (\n\tHostLockdownModeLockdownDisabled = HostLockdownMode(\"lockdownDisabled\")\n\tHostLockdownModeLockdownNormal   = HostLockdownMode(\"lockdownNormal\")\n\tHostLockdownModeLockdownStrict   = HostLockdownMode(\"lockdownStrict\")\n)\n\nfunc init() {\n\tt[\"HostLockdownMode\"] = reflect.TypeOf((*HostLockdownMode)(nil)).Elem()\n}\n\ntype HostLowLevelProvisioningManagerFileType string\n\nconst (\n\tHostLowLevelProvisioningManagerFileTypeFile        = HostLowLevelProvisioningManagerFileType(\"File\")\n\tHostLowLevelProvisioningManagerFileTypeVirtualDisk = HostLowLevelProvisioningManagerFileType(\"VirtualDisk\")\n\tHostLowLevelProvisioningManagerFileTypeDirectory   = HostLowLevelProvisioningManagerFileType(\"Directory\")\n)\n\nfunc init() {\n\tt[\"HostLowLevelProvisioningManagerFileType\"] = reflect.TypeOf((*HostLowLevelProvisioningManagerFileType)(nil)).Elem()\n}\n\ntype HostLowLevelProvisioningManagerReloadTarget string\n\nconst (\n\tHostLowLevelProvisioningManagerReloadTargetCurrentConfig  = HostLowLevelProvisioningManagerReloadTarget(\"currentConfig\")\n\tHostLowLevelProvisioningManagerReloadTargetSnapshotConfig = HostLowLevelProvisioningManagerReloadTarget(\"snapshotConfig\")\n)\n\nfunc init() {\n\tt[\"HostLowLevelProvisioningManagerReloadTarget\"] = reflect.TypeOf((*HostLowLevelProvisioningManagerReloadTarget)(nil)).Elem()\n}\n\ntype HostMountInfoInaccessibleReason string\n\nconst (\n\tHostMountInfoInaccessibleReasonAllPathsDown_Start   = HostMountInfoInaccessibleReason(\"AllPathsDown_Start\")\n\tHostMountInfoInaccessibleReasonAllPathsDown_Timeout = HostMountInfoInaccessibleReason(\"AllPathsDown_Timeout\")\n\tHostMountInfoInaccessibleReasonPermanentDeviceLoss  = HostMountInfoInaccessibleReason(\"PermanentDeviceLoss\")\n)\n\nfunc init() {\n\tt[\"HostMountInfoInaccessibleReason\"] = reflect.TypeOf((*HostMountInfoInaccessibleReason)(nil)).Elem()\n}\n\ntype HostMountMode string\n\nconst (\n\tHostMountModeReadWrite = HostMountMode(\"readWrite\")\n\tHostMountModeReadOnly  = HostMountMode(\"readOnly\")\n)\n\nfunc init() {\n\tt[\"HostMountMode\"] = reflect.TypeOf((*HostMountMode)(nil)).Elem()\n}\n\ntype HostNasVolumeSecurityType string\n\nconst (\n\tHostNasVolumeSecurityTypeAUTH_SYS  = HostNasVolumeSecurityType(\"AUTH_SYS\")\n\tHostNasVolumeSecurityTypeSEC_KRB5  = HostNasVolumeSecurityType(\"SEC_KRB5\")\n\tHostNasVolumeSecurityTypeSEC_KRB5I = HostNasVolumeSecurityType(\"SEC_KRB5I\")\n)\n\nfunc init() {\n\tt[\"HostNasVolumeSecurityType\"] = reflect.TypeOf((*HostNasVolumeSecurityType)(nil)).Elem()\n}\n\ntype HostNetStackInstanceCongestionControlAlgorithmType string\n\nconst (\n\tHostNetStackInstanceCongestionControlAlgorithmTypeNewreno = HostNetStackInstanceCongestionControlAlgorithmType(\"newreno\")\n\tHostNetStackInstanceCongestionControlAlgorithmTypeCubic   = HostNetStackInstanceCongestionControlAlgorithmType(\"cubic\")\n)\n\nfunc init() {\n\tt[\"HostNetStackInstanceCongestionControlAlgorithmType\"] = reflect.TypeOf((*HostNetStackInstanceCongestionControlAlgorithmType)(nil)).Elem()\n}\n\ntype HostNetStackInstanceSystemStackKey string\n\nconst (\n\tHostNetStackInstanceSystemStackKeyDefaultTcpipStack   = HostNetStackInstanceSystemStackKey(\"defaultTcpipStack\")\n\tHostNetStackInstanceSystemStackKeyVmotion             = HostNetStackInstanceSystemStackKey(\"vmotion\")\n\tHostNetStackInstanceSystemStackKeyVSphereProvisioning = HostNetStackInstanceSystemStackKey(\"vSphereProvisioning\")\n)\n\nfunc init() {\n\tt[\"HostNetStackInstanceSystemStackKey\"] = reflect.TypeOf((*HostNetStackInstanceSystemStackKey)(nil)).Elem()\n}\n\ntype HostNumericSensorHealthState string\n\nconst (\n\tHostNumericSensorHealthStateUnknown = HostNumericSensorHealthState(\"unknown\")\n\tHostNumericSensorHealthStateGreen   = HostNumericSensorHealthState(\"green\")\n\tHostNumericSensorHealthStateYellow  = HostNumericSensorHealthState(\"yellow\")\n\tHostNumericSensorHealthStateRed     = HostNumericSensorHealthState(\"red\")\n)\n\nfunc init() {\n\tt[\"HostNumericSensorHealthState\"] = reflect.TypeOf((*HostNumericSensorHealthState)(nil)).Elem()\n}\n\ntype HostNumericSensorType string\n\nconst (\n\tHostNumericSensorTypeFan         = HostNumericSensorType(\"fan\")\n\tHostNumericSensorTypePower       = HostNumericSensorType(\"power\")\n\tHostNumericSensorTypeTemperature = HostNumericSensorType(\"temperature\")\n\tHostNumericSensorTypeVoltage     = HostNumericSensorType(\"voltage\")\n\tHostNumericSensorTypeOther       = HostNumericSensorType(\"other\")\n\tHostNumericSensorTypeProcessor   = HostNumericSensorType(\"processor\")\n\tHostNumericSensorTypeMemory      = HostNumericSensorType(\"memory\")\n\tHostNumericSensorTypeStorage     = HostNumericSensorType(\"storage\")\n\tHostNumericSensorTypeSystemBoard = HostNumericSensorType(\"systemBoard\")\n\tHostNumericSensorTypeBattery     = HostNumericSensorType(\"battery\")\n\tHostNumericSensorTypeBios        = HostNumericSensorType(\"bios\")\n\tHostNumericSensorTypeCable       = HostNumericSensorType(\"cable\")\n\tHostNumericSensorTypeWatchdog    = HostNumericSensorType(\"watchdog\")\n)\n\nfunc init() {\n\tt[\"HostNumericSensorType\"] = reflect.TypeOf((*HostNumericSensorType)(nil)).Elem()\n}\n\ntype HostOpaqueSwitchOpaqueSwitchState string\n\nconst (\n\tHostOpaqueSwitchOpaqueSwitchStateUp      = HostOpaqueSwitchOpaqueSwitchState(\"up\")\n\tHostOpaqueSwitchOpaqueSwitchStateWarning = HostOpaqueSwitchOpaqueSwitchState(\"warning\")\n\tHostOpaqueSwitchOpaqueSwitchStateDown    = HostOpaqueSwitchOpaqueSwitchState(\"down\")\n)\n\nfunc init() {\n\tt[\"HostOpaqueSwitchOpaqueSwitchState\"] = reflect.TypeOf((*HostOpaqueSwitchOpaqueSwitchState)(nil)).Elem()\n}\n\ntype HostPatchManagerInstallState string\n\nconst (\n\tHostPatchManagerInstallStateHostRestarted = HostPatchManagerInstallState(\"hostRestarted\")\n\tHostPatchManagerInstallStateImageActive   = HostPatchManagerInstallState(\"imageActive\")\n)\n\nfunc init() {\n\tt[\"HostPatchManagerInstallState\"] = reflect.TypeOf((*HostPatchManagerInstallState)(nil)).Elem()\n}\n\ntype HostPatchManagerIntegrityStatus string\n\nconst (\n\tHostPatchManagerIntegrityStatusValidated           = HostPatchManagerIntegrityStatus(\"validated\")\n\tHostPatchManagerIntegrityStatusKeyNotFound         = HostPatchManagerIntegrityStatus(\"keyNotFound\")\n\tHostPatchManagerIntegrityStatusKeyRevoked          = HostPatchManagerIntegrityStatus(\"keyRevoked\")\n\tHostPatchManagerIntegrityStatusKeyExpired          = HostPatchManagerIntegrityStatus(\"keyExpired\")\n\tHostPatchManagerIntegrityStatusDigestMismatch      = HostPatchManagerIntegrityStatus(\"digestMismatch\")\n\tHostPatchManagerIntegrityStatusNotEnoughSignatures = HostPatchManagerIntegrityStatus(\"notEnoughSignatures\")\n\tHostPatchManagerIntegrityStatusValidationError     = HostPatchManagerIntegrityStatus(\"validationError\")\n)\n\nfunc init() {\n\tt[\"HostPatchManagerIntegrityStatus\"] = reflect.TypeOf((*HostPatchManagerIntegrityStatus)(nil)).Elem()\n}\n\ntype HostPatchManagerReason string\n\nconst (\n\tHostPatchManagerReasonObsoleted         = HostPatchManagerReason(\"obsoleted\")\n\tHostPatchManagerReasonMissingPatch      = HostPatchManagerReason(\"missingPatch\")\n\tHostPatchManagerReasonMissingLib        = HostPatchManagerReason(\"missingLib\")\n\tHostPatchManagerReasonHasDependentPatch = HostPatchManagerReason(\"hasDependentPatch\")\n\tHostPatchManagerReasonConflictPatch     = HostPatchManagerReason(\"conflictPatch\")\n\tHostPatchManagerReasonConflictLib       = HostPatchManagerReason(\"conflictLib\")\n)\n\nfunc init() {\n\tt[\"HostPatchManagerReason\"] = reflect.TypeOf((*HostPatchManagerReason)(nil)).Elem()\n}\n\ntype HostPowerOperationType string\n\nconst (\n\tHostPowerOperationTypePowerOn  = HostPowerOperationType(\"powerOn\")\n\tHostPowerOperationTypePowerOff = HostPowerOperationType(\"powerOff\")\n)\n\nfunc init() {\n\tt[\"HostPowerOperationType\"] = reflect.TypeOf((*HostPowerOperationType)(nil)).Elem()\n}\n\ntype HostProfileManagerAnswerFileStatus string\n\nconst (\n\tHostProfileManagerAnswerFileStatusValid   = HostProfileManagerAnswerFileStatus(\"valid\")\n\tHostProfileManagerAnswerFileStatusInvalid = HostProfileManagerAnswerFileStatus(\"invalid\")\n\tHostProfileManagerAnswerFileStatusUnknown = HostProfileManagerAnswerFileStatus(\"unknown\")\n)\n\nfunc init() {\n\tt[\"HostProfileManagerAnswerFileStatus\"] = reflect.TypeOf((*HostProfileManagerAnswerFileStatus)(nil)).Elem()\n}\n\ntype HostProfileManagerTaskListRequirement string\n\nconst (\n\tHostProfileManagerTaskListRequirementMaintenanceModeRequired = HostProfileManagerTaskListRequirement(\"maintenanceModeRequired\")\n\tHostProfileManagerTaskListRequirementRebootRequired          = HostProfileManagerTaskListRequirement(\"rebootRequired\")\n)\n\nfunc init() {\n\tt[\"HostProfileManagerTaskListRequirement\"] = reflect.TypeOf((*HostProfileManagerTaskListRequirement)(nil)).Elem()\n}\n\ntype HostProtocolEndpointPEType string\n\nconst (\n\tHostProtocolEndpointPETypeBlock = HostProtocolEndpointPEType(\"block\")\n\tHostProtocolEndpointPETypeNas   = HostProtocolEndpointPEType(\"nas\")\n)\n\nfunc init() {\n\tt[\"HostProtocolEndpointPEType\"] = reflect.TypeOf((*HostProtocolEndpointPEType)(nil)).Elem()\n}\n\ntype HostProtocolEndpointProtocolEndpointType string\n\nconst (\n\tHostProtocolEndpointProtocolEndpointTypeScsi  = HostProtocolEndpointProtocolEndpointType(\"scsi\")\n\tHostProtocolEndpointProtocolEndpointTypeNfs   = HostProtocolEndpointProtocolEndpointType(\"nfs\")\n\tHostProtocolEndpointProtocolEndpointTypeNfs4x = HostProtocolEndpointProtocolEndpointType(\"nfs4x\")\n)\n\nfunc init() {\n\tt[\"HostProtocolEndpointProtocolEndpointType\"] = reflect.TypeOf((*HostProtocolEndpointProtocolEndpointType)(nil)).Elem()\n}\n\ntype HostReplayUnsupportedReason string\n\nconst (\n\tHostReplayUnsupportedReasonIncompatibleProduct = HostReplayUnsupportedReason(\"incompatibleProduct\")\n\tHostReplayUnsupportedReasonIncompatibleCpu     = HostReplayUnsupportedReason(\"incompatibleCpu\")\n\tHostReplayUnsupportedReasonHvDisabled          = HostReplayUnsupportedReason(\"hvDisabled\")\n\tHostReplayUnsupportedReasonCpuidLimitSet       = HostReplayUnsupportedReason(\"cpuidLimitSet\")\n\tHostReplayUnsupportedReasonOldBIOS             = HostReplayUnsupportedReason(\"oldBIOS\")\n\tHostReplayUnsupportedReasonUnknown             = HostReplayUnsupportedReason(\"unknown\")\n)\n\nfunc init() {\n\tt[\"HostReplayUnsupportedReason\"] = reflect.TypeOf((*HostReplayUnsupportedReason)(nil)).Elem()\n}\n\ntype HostRuntimeInfoNetStackInstanceRuntimeInfoState string\n\nconst (\n\tHostRuntimeInfoNetStackInstanceRuntimeInfoStateInactive     = HostRuntimeInfoNetStackInstanceRuntimeInfoState(\"inactive\")\n\tHostRuntimeInfoNetStackInstanceRuntimeInfoStateActive       = HostRuntimeInfoNetStackInstanceRuntimeInfoState(\"active\")\n\tHostRuntimeInfoNetStackInstanceRuntimeInfoStateDeactivating = HostRuntimeInfoNetStackInstanceRuntimeInfoState(\"deactivating\")\n\tHostRuntimeInfoNetStackInstanceRuntimeInfoStateActivating   = HostRuntimeInfoNetStackInstanceRuntimeInfoState(\"activating\")\n)\n\nfunc init() {\n\tt[\"HostRuntimeInfoNetStackInstanceRuntimeInfoState\"] = reflect.TypeOf((*HostRuntimeInfoNetStackInstanceRuntimeInfoState)(nil)).Elem()\n}\n\ntype HostServicePolicy string\n\nconst (\n\tHostServicePolicyOn        = HostServicePolicy(\"on\")\n\tHostServicePolicyAutomatic = HostServicePolicy(\"automatic\")\n\tHostServicePolicyOff       = HostServicePolicy(\"off\")\n)\n\nfunc init() {\n\tt[\"HostServicePolicy\"] = reflect.TypeOf((*HostServicePolicy)(nil)).Elem()\n}\n\ntype HostSnmpAgentCapability string\n\nconst (\n\tHostSnmpAgentCapabilityCOMPLETE      = HostSnmpAgentCapability(\"COMPLETE\")\n\tHostSnmpAgentCapabilityDIAGNOSTICS   = HostSnmpAgentCapability(\"DIAGNOSTICS\")\n\tHostSnmpAgentCapabilityCONFIGURATION = HostSnmpAgentCapability(\"CONFIGURATION\")\n)\n\nfunc init() {\n\tt[\"HostSnmpAgentCapability\"] = reflect.TypeOf((*HostSnmpAgentCapability)(nil)).Elem()\n}\n\ntype HostStandbyMode string\n\nconst (\n\tHostStandbyModeEntering = HostStandbyMode(\"entering\")\n\tHostStandbyModeExiting  = HostStandbyMode(\"exiting\")\n\tHostStandbyModeIn       = HostStandbyMode(\"in\")\n\tHostStandbyModeNone     = HostStandbyMode(\"none\")\n)\n\nfunc init() {\n\tt[\"HostStandbyMode\"] = reflect.TypeOf((*HostStandbyMode)(nil)).Elem()\n}\n\ntype HostSystemConnectionState string\n\nconst (\n\tHostSystemConnectionStateConnected     = HostSystemConnectionState(\"connected\")\n\tHostSystemConnectionStateNotResponding = HostSystemConnectionState(\"notResponding\")\n\tHostSystemConnectionStateDisconnected  = HostSystemConnectionState(\"disconnected\")\n)\n\nfunc init() {\n\tt[\"HostSystemConnectionState\"] = reflect.TypeOf((*HostSystemConnectionState)(nil)).Elem()\n}\n\ntype HostSystemIdentificationInfoIdentifier string\n\nconst (\n\tHostSystemIdentificationInfoIdentifierAssetTag          = HostSystemIdentificationInfoIdentifier(\"AssetTag\")\n\tHostSystemIdentificationInfoIdentifierServiceTag        = HostSystemIdentificationInfoIdentifier(\"ServiceTag\")\n\tHostSystemIdentificationInfoIdentifierOemSpecificString = HostSystemIdentificationInfoIdentifier(\"OemSpecificString\")\n)\n\nfunc init() {\n\tt[\"HostSystemIdentificationInfoIdentifier\"] = reflect.TypeOf((*HostSystemIdentificationInfoIdentifier)(nil)).Elem()\n}\n\ntype HostSystemPowerState string\n\nconst (\n\tHostSystemPowerStatePoweredOn  = HostSystemPowerState(\"poweredOn\")\n\tHostSystemPowerStatePoweredOff = HostSystemPowerState(\"poweredOff\")\n\tHostSystemPowerStateStandBy    = HostSystemPowerState(\"standBy\")\n\tHostSystemPowerStateUnknown    = HostSystemPowerState(\"unknown\")\n)\n\nfunc init() {\n\tt[\"HostSystemPowerState\"] = reflect.TypeOf((*HostSystemPowerState)(nil)).Elem()\n}\n\ntype HostUnresolvedVmfsExtentUnresolvedReason string\n\nconst (\n\tHostUnresolvedVmfsExtentUnresolvedReasonDiskIdMismatch = HostUnresolvedVmfsExtentUnresolvedReason(\"diskIdMismatch\")\n\tHostUnresolvedVmfsExtentUnresolvedReasonUuidConflict   = HostUnresolvedVmfsExtentUnresolvedReason(\"uuidConflict\")\n)\n\nfunc init() {\n\tt[\"HostUnresolvedVmfsExtentUnresolvedReason\"] = reflect.TypeOf((*HostUnresolvedVmfsExtentUnresolvedReason)(nil)).Elem()\n}\n\ntype HostUnresolvedVmfsResolutionSpecVmfsUuidResolution string\n\nconst (\n\tHostUnresolvedVmfsResolutionSpecVmfsUuidResolutionResignature = HostUnresolvedVmfsResolutionSpecVmfsUuidResolution(\"resignature\")\n\tHostUnresolvedVmfsResolutionSpecVmfsUuidResolutionForceMount  = HostUnresolvedVmfsResolutionSpecVmfsUuidResolution(\"forceMount\")\n)\n\nfunc init() {\n\tt[\"HostUnresolvedVmfsResolutionSpecVmfsUuidResolution\"] = reflect.TypeOf((*HostUnresolvedVmfsResolutionSpecVmfsUuidResolution)(nil)).Elem()\n}\n\ntype HostVirtualNicManagerNicType string\n\nconst (\n\tHostVirtualNicManagerNicTypeVmotion               = HostVirtualNicManagerNicType(\"vmotion\")\n\tHostVirtualNicManagerNicTypeFaultToleranceLogging = HostVirtualNicManagerNicType(\"faultToleranceLogging\")\n\tHostVirtualNicManagerNicTypeVSphereReplication    = HostVirtualNicManagerNicType(\"vSphereReplication\")\n\tHostVirtualNicManagerNicTypeVSphereReplicationNFC = HostVirtualNicManagerNicType(\"vSphereReplicationNFC\")\n\tHostVirtualNicManagerNicTypeManagement            = HostVirtualNicManagerNicType(\"management\")\n\tHostVirtualNicManagerNicTypeVsan                  = HostVirtualNicManagerNicType(\"vsan\")\n\tHostVirtualNicManagerNicTypeVSphereProvisioning   = HostVirtualNicManagerNicType(\"vSphereProvisioning\")\n\tHostVirtualNicManagerNicTypeVsanWitness           = HostVirtualNicManagerNicType(\"vsanWitness\")\n)\n\nfunc init() {\n\tt[\"HostVirtualNicManagerNicType\"] = reflect.TypeOf((*HostVirtualNicManagerNicType)(nil)).Elem()\n}\n\ntype HostVmciAccessManagerMode string\n\nconst (\n\tHostVmciAccessManagerModeGrant   = HostVmciAccessManagerMode(\"grant\")\n\tHostVmciAccessManagerModeReplace = HostVmciAccessManagerMode(\"replace\")\n\tHostVmciAccessManagerModeRevoke  = HostVmciAccessManagerMode(\"revoke\")\n)\n\nfunc init() {\n\tt[\"HostVmciAccessManagerMode\"] = reflect.TypeOf((*HostVmciAccessManagerMode)(nil)).Elem()\n}\n\ntype HostVmfsVolumeUnmapPriority string\n\nconst (\n\tHostVmfsVolumeUnmapPriorityNone = HostVmfsVolumeUnmapPriority(\"none\")\n\tHostVmfsVolumeUnmapPriorityLow  = HostVmfsVolumeUnmapPriority(\"low\")\n)\n\nfunc init() {\n\tt[\"HostVmfsVolumeUnmapPriority\"] = reflect.TypeOf((*HostVmfsVolumeUnmapPriority)(nil)).Elem()\n}\n\ntype HttpNfcLeaseState string\n\nconst (\n\tHttpNfcLeaseStateInitializing = HttpNfcLeaseState(\"initializing\")\n\tHttpNfcLeaseStateReady        = HttpNfcLeaseState(\"ready\")\n\tHttpNfcLeaseStateDone         = HttpNfcLeaseState(\"done\")\n\tHttpNfcLeaseStateError        = HttpNfcLeaseState(\"error\")\n)\n\nfunc init() {\n\tt[\"HttpNfcLeaseState\"] = reflect.TypeOf((*HttpNfcLeaseState)(nil)).Elem()\n}\n\ntype IncompatibleHostForVmReplicationIncompatibleReason string\n\nconst (\n\tIncompatibleHostForVmReplicationIncompatibleReasonRpo            = IncompatibleHostForVmReplicationIncompatibleReason(\"rpo\")\n\tIncompatibleHostForVmReplicationIncompatibleReasonNetCompression = IncompatibleHostForVmReplicationIncompatibleReason(\"netCompression\")\n)\n\nfunc init() {\n\tt[\"IncompatibleHostForVmReplicationIncompatibleReason\"] = reflect.TypeOf((*IncompatibleHostForVmReplicationIncompatibleReason)(nil)).Elem()\n}\n\ntype InternetScsiSnsDiscoveryMethod string\n\nconst (\n\tInternetScsiSnsDiscoveryMethodIsnsStatic = InternetScsiSnsDiscoveryMethod(\"isnsStatic\")\n\tInternetScsiSnsDiscoveryMethodIsnsDhcp   = InternetScsiSnsDiscoveryMethod(\"isnsDhcp\")\n\tInternetScsiSnsDiscoveryMethodIsnsSlp    = InternetScsiSnsDiscoveryMethod(\"isnsSlp\")\n)\n\nfunc init() {\n\tt[\"InternetScsiSnsDiscoveryMethod\"] = reflect.TypeOf((*InternetScsiSnsDiscoveryMethod)(nil)).Elem()\n}\n\ntype InvalidDasConfigArgumentEntryForInvalidArgument string\n\nconst (\n\tInvalidDasConfigArgumentEntryForInvalidArgumentAdmissionControl = InvalidDasConfigArgumentEntryForInvalidArgument(\"admissionControl\")\n\tInvalidDasConfigArgumentEntryForInvalidArgumentUserHeartbeatDs  = InvalidDasConfigArgumentEntryForInvalidArgument(\"userHeartbeatDs\")\n\tInvalidDasConfigArgumentEntryForInvalidArgumentVmConfig         = InvalidDasConfigArgumentEntryForInvalidArgument(\"vmConfig\")\n)\n\nfunc init() {\n\tt[\"InvalidDasConfigArgumentEntryForInvalidArgument\"] = reflect.TypeOf((*InvalidDasConfigArgumentEntryForInvalidArgument)(nil)).Elem()\n}\n\ntype InvalidProfileReferenceHostReason string\n\nconst (\n\tInvalidProfileReferenceHostReasonIncompatibleVersion  = InvalidProfileReferenceHostReason(\"incompatibleVersion\")\n\tInvalidProfileReferenceHostReasonMissingReferenceHost = InvalidProfileReferenceHostReason(\"missingReferenceHost\")\n)\n\nfunc init() {\n\tt[\"InvalidProfileReferenceHostReason\"] = reflect.TypeOf((*InvalidProfileReferenceHostReason)(nil)).Elem()\n}\n\ntype IoFilterOperation string\n\nconst (\n\tIoFilterOperationInstall   = IoFilterOperation(\"install\")\n\tIoFilterOperationUninstall = IoFilterOperation(\"uninstall\")\n\tIoFilterOperationUpgrade   = IoFilterOperation(\"upgrade\")\n)\n\nfunc init() {\n\tt[\"IoFilterOperation\"] = reflect.TypeOf((*IoFilterOperation)(nil)).Elem()\n}\n\ntype IoFilterType string\n\nconst (\n\tIoFilterTypeCache              = IoFilterType(\"cache\")\n\tIoFilterTypeReplication        = IoFilterType(\"replication\")\n\tIoFilterTypeEncryption         = IoFilterType(\"encryption\")\n\tIoFilterTypeCompression        = IoFilterType(\"compression\")\n\tIoFilterTypeInspection         = IoFilterType(\"inspection\")\n\tIoFilterTypeDatastoreIoControl = IoFilterType(\"datastoreIoControl\")\n\tIoFilterTypeDataProvider       = IoFilterType(\"dataProvider\")\n)\n\nfunc init() {\n\tt[\"IoFilterType\"] = reflect.TypeOf((*IoFilterType)(nil)).Elem()\n}\n\ntype IscsiPortInfoPathStatus string\n\nconst (\n\tIscsiPortInfoPathStatusNotUsed    = IscsiPortInfoPathStatus(\"notUsed\")\n\tIscsiPortInfoPathStatusActive     = IscsiPortInfoPathStatus(\"active\")\n\tIscsiPortInfoPathStatusStandBy    = IscsiPortInfoPathStatus(\"standBy\")\n\tIscsiPortInfoPathStatusLastActive = IscsiPortInfoPathStatus(\"lastActive\")\n)\n\nfunc init() {\n\tt[\"IscsiPortInfoPathStatus\"] = reflect.TypeOf((*IscsiPortInfoPathStatus)(nil)).Elem()\n}\n\ntype LatencySensitivitySensitivityLevel string\n\nconst (\n\tLatencySensitivitySensitivityLevelLow    = LatencySensitivitySensitivityLevel(\"low\")\n\tLatencySensitivitySensitivityLevelNormal = LatencySensitivitySensitivityLevel(\"normal\")\n\tLatencySensitivitySensitivityLevelMedium = LatencySensitivitySensitivityLevel(\"medium\")\n\tLatencySensitivitySensitivityLevelHigh   = LatencySensitivitySensitivityLevel(\"high\")\n\tLatencySensitivitySensitivityLevelCustom = LatencySensitivitySensitivityLevel(\"custom\")\n)\n\nfunc init() {\n\tt[\"LatencySensitivitySensitivityLevel\"] = reflect.TypeOf((*LatencySensitivitySensitivityLevel)(nil)).Elem()\n}\n\ntype LicenseAssignmentFailedReason string\n\nconst (\n\tLicenseAssignmentFailedReasonKeyEntityMismatch                                    = LicenseAssignmentFailedReason(\"keyEntityMismatch\")\n\tLicenseAssignmentFailedReasonDowngradeDisallowed                                  = LicenseAssignmentFailedReason(\"downgradeDisallowed\")\n\tLicenseAssignmentFailedReasonInventoryNotManageableByVirtualCenter                = LicenseAssignmentFailedReason(\"inventoryNotManageableByVirtualCenter\")\n\tLicenseAssignmentFailedReasonHostsUnmanageableByVirtualCenterWithoutLicenseServer = LicenseAssignmentFailedReason(\"hostsUnmanageableByVirtualCenterWithoutLicenseServer\")\n)\n\nfunc init() {\n\tt[\"LicenseAssignmentFailedReason\"] = reflect.TypeOf((*LicenseAssignmentFailedReason)(nil)).Elem()\n}\n\ntype LicenseFeatureInfoSourceRestriction string\n\nconst (\n\tLicenseFeatureInfoSourceRestrictionUnrestricted = LicenseFeatureInfoSourceRestriction(\"unrestricted\")\n\tLicenseFeatureInfoSourceRestrictionServed       = LicenseFeatureInfoSourceRestriction(\"served\")\n\tLicenseFeatureInfoSourceRestrictionFile         = LicenseFeatureInfoSourceRestriction(\"file\")\n)\n\nfunc init() {\n\tt[\"LicenseFeatureInfoSourceRestriction\"] = reflect.TypeOf((*LicenseFeatureInfoSourceRestriction)(nil)).Elem()\n}\n\ntype LicenseFeatureInfoState string\n\nconst (\n\tLicenseFeatureInfoStateEnabled  = LicenseFeatureInfoState(\"enabled\")\n\tLicenseFeatureInfoStateDisabled = LicenseFeatureInfoState(\"disabled\")\n\tLicenseFeatureInfoStateOptional = LicenseFeatureInfoState(\"optional\")\n)\n\nfunc init() {\n\tt[\"LicenseFeatureInfoState\"] = reflect.TypeOf((*LicenseFeatureInfoState)(nil)).Elem()\n}\n\ntype LicenseFeatureInfoUnit string\n\nconst (\n\tLicenseFeatureInfoUnitHost       = LicenseFeatureInfoUnit(\"host\")\n\tLicenseFeatureInfoUnitCpuCore    = LicenseFeatureInfoUnit(\"cpuCore\")\n\tLicenseFeatureInfoUnitCpuPackage = LicenseFeatureInfoUnit(\"cpuPackage\")\n\tLicenseFeatureInfoUnitServer     = LicenseFeatureInfoUnit(\"server\")\n\tLicenseFeatureInfoUnitVm         = LicenseFeatureInfoUnit(\"vm\")\n)\n\nfunc init() {\n\tt[\"LicenseFeatureInfoUnit\"] = reflect.TypeOf((*LicenseFeatureInfoUnit)(nil)).Elem()\n}\n\ntype LicenseManagerLicenseKey string\n\nconst (\n\tLicenseManagerLicenseKeyEsxFull    = LicenseManagerLicenseKey(\"esxFull\")\n\tLicenseManagerLicenseKeyEsxVmtn    = LicenseManagerLicenseKey(\"esxVmtn\")\n\tLicenseManagerLicenseKeyEsxExpress = LicenseManagerLicenseKey(\"esxExpress\")\n\tLicenseManagerLicenseKeySan        = LicenseManagerLicenseKey(\"san\")\n\tLicenseManagerLicenseKeyIscsi      = LicenseManagerLicenseKey(\"iscsi\")\n\tLicenseManagerLicenseKeyNas        = LicenseManagerLicenseKey(\"nas\")\n\tLicenseManagerLicenseKeyVsmp       = LicenseManagerLicenseKey(\"vsmp\")\n\tLicenseManagerLicenseKeyBackup     = LicenseManagerLicenseKey(\"backup\")\n\tLicenseManagerLicenseKeyVc         = LicenseManagerLicenseKey(\"vc\")\n\tLicenseManagerLicenseKeyVcExpress  = LicenseManagerLicenseKey(\"vcExpress\")\n\tLicenseManagerLicenseKeyEsxHost    = LicenseManagerLicenseKey(\"esxHost\")\n\tLicenseManagerLicenseKeyGsxHost    = LicenseManagerLicenseKey(\"gsxHost\")\n\tLicenseManagerLicenseKeyServerHost = LicenseManagerLicenseKey(\"serverHost\")\n\tLicenseManagerLicenseKeyDrsPower   = LicenseManagerLicenseKey(\"drsPower\")\n\tLicenseManagerLicenseKeyVmotion    = LicenseManagerLicenseKey(\"vmotion\")\n\tLicenseManagerLicenseKeyDrs        = LicenseManagerLicenseKey(\"drs\")\n\tLicenseManagerLicenseKeyDas        = LicenseManagerLicenseKey(\"das\")\n)\n\nfunc init() {\n\tt[\"LicenseManagerLicenseKey\"] = reflect.TypeOf((*LicenseManagerLicenseKey)(nil)).Elem()\n}\n\ntype LicenseManagerState string\n\nconst (\n\tLicenseManagerStateInitializing = LicenseManagerState(\"initializing\")\n\tLicenseManagerStateNormal       = LicenseManagerState(\"normal\")\n\tLicenseManagerStateMarginal     = LicenseManagerState(\"marginal\")\n\tLicenseManagerStateFault        = LicenseManagerState(\"fault\")\n)\n\nfunc init() {\n\tt[\"LicenseManagerState\"] = reflect.TypeOf((*LicenseManagerState)(nil)).Elem()\n}\n\ntype LicenseReservationInfoState string\n\nconst (\n\tLicenseReservationInfoStateNotUsed       = LicenseReservationInfoState(\"notUsed\")\n\tLicenseReservationInfoStateNoLicense     = LicenseReservationInfoState(\"noLicense\")\n\tLicenseReservationInfoStateUnlicensedUse = LicenseReservationInfoState(\"unlicensedUse\")\n\tLicenseReservationInfoStateLicensed      = LicenseReservationInfoState(\"licensed\")\n)\n\nfunc init() {\n\tt[\"LicenseReservationInfoState\"] = reflect.TypeOf((*LicenseReservationInfoState)(nil)).Elem()\n}\n\ntype LinkDiscoveryProtocolConfigOperationType string\n\nconst (\n\tLinkDiscoveryProtocolConfigOperationTypeNone      = LinkDiscoveryProtocolConfigOperationType(\"none\")\n\tLinkDiscoveryProtocolConfigOperationTypeListen    = LinkDiscoveryProtocolConfigOperationType(\"listen\")\n\tLinkDiscoveryProtocolConfigOperationTypeAdvertise = LinkDiscoveryProtocolConfigOperationType(\"advertise\")\n\tLinkDiscoveryProtocolConfigOperationTypeBoth      = LinkDiscoveryProtocolConfigOperationType(\"both\")\n)\n\nfunc init() {\n\tt[\"LinkDiscoveryProtocolConfigOperationType\"] = reflect.TypeOf((*LinkDiscoveryProtocolConfigOperationType)(nil)).Elem()\n}\n\ntype LinkDiscoveryProtocolConfigProtocolType string\n\nconst (\n\tLinkDiscoveryProtocolConfigProtocolTypeCdp  = LinkDiscoveryProtocolConfigProtocolType(\"cdp\")\n\tLinkDiscoveryProtocolConfigProtocolTypeLldp = LinkDiscoveryProtocolConfigProtocolType(\"lldp\")\n)\n\nfunc init() {\n\tt[\"LinkDiscoveryProtocolConfigProtocolType\"] = reflect.TypeOf((*LinkDiscoveryProtocolConfigProtocolType)(nil)).Elem()\n}\n\ntype ManagedEntityStatus string\n\nconst (\n\tManagedEntityStatusGray   = ManagedEntityStatus(\"gray\")\n\tManagedEntityStatusGreen  = ManagedEntityStatus(\"green\")\n\tManagedEntityStatusYellow = ManagedEntityStatus(\"yellow\")\n\tManagedEntityStatusRed    = ManagedEntityStatus(\"red\")\n)\n\nfunc init() {\n\tt[\"ManagedEntityStatus\"] = reflect.TypeOf((*ManagedEntityStatus)(nil)).Elem()\n}\n\ntype MetricAlarmOperator string\n\nconst (\n\tMetricAlarmOperatorIsAbove = MetricAlarmOperator(\"isAbove\")\n\tMetricAlarmOperatorIsBelow = MetricAlarmOperator(\"isBelow\")\n)\n\nfunc init() {\n\tt[\"MetricAlarmOperator\"] = reflect.TypeOf((*MetricAlarmOperator)(nil)).Elem()\n}\n\ntype MultipathState string\n\nconst (\n\tMultipathStateStandby  = MultipathState(\"standby\")\n\tMultipathStateActive   = MultipathState(\"active\")\n\tMultipathStateDisabled = MultipathState(\"disabled\")\n\tMultipathStateDead     = MultipathState(\"dead\")\n\tMultipathStateUnknown  = MultipathState(\"unknown\")\n)\n\nfunc init() {\n\tt[\"MultipathState\"] = reflect.TypeOf((*MultipathState)(nil)).Elem()\n}\n\ntype NetBIOSConfigInfoMode string\n\nconst (\n\tNetBIOSConfigInfoModeUnknown        = NetBIOSConfigInfoMode(\"unknown\")\n\tNetBIOSConfigInfoModeEnabled        = NetBIOSConfigInfoMode(\"enabled\")\n\tNetBIOSConfigInfoModeDisabled       = NetBIOSConfigInfoMode(\"disabled\")\n\tNetBIOSConfigInfoModeEnabledViaDHCP = NetBIOSConfigInfoMode(\"enabledViaDHCP\")\n)\n\nfunc init() {\n\tt[\"NetBIOSConfigInfoMode\"] = reflect.TypeOf((*NetBIOSConfigInfoMode)(nil)).Elem()\n}\n\ntype NetIpConfigInfoIpAddressOrigin string\n\nconst (\n\tNetIpConfigInfoIpAddressOriginOther     = NetIpConfigInfoIpAddressOrigin(\"other\")\n\tNetIpConfigInfoIpAddressOriginManual    = NetIpConfigInfoIpAddressOrigin(\"manual\")\n\tNetIpConfigInfoIpAddressOriginDhcp      = NetIpConfigInfoIpAddressOrigin(\"dhcp\")\n\tNetIpConfigInfoIpAddressOriginLinklayer = NetIpConfigInfoIpAddressOrigin(\"linklayer\")\n\tNetIpConfigInfoIpAddressOriginRandom    = NetIpConfigInfoIpAddressOrigin(\"random\")\n)\n\nfunc init() {\n\tt[\"NetIpConfigInfoIpAddressOrigin\"] = reflect.TypeOf((*NetIpConfigInfoIpAddressOrigin)(nil)).Elem()\n}\n\ntype NetIpConfigInfoIpAddressStatus string\n\nconst (\n\tNetIpConfigInfoIpAddressStatusPreferred    = NetIpConfigInfoIpAddressStatus(\"preferred\")\n\tNetIpConfigInfoIpAddressStatusDeprecated   = NetIpConfigInfoIpAddressStatus(\"deprecated\")\n\tNetIpConfigInfoIpAddressStatusInvalid      = NetIpConfigInfoIpAddressStatus(\"invalid\")\n\tNetIpConfigInfoIpAddressStatusInaccessible = NetIpConfigInfoIpAddressStatus(\"inaccessible\")\n\tNetIpConfigInfoIpAddressStatusUnknown      = NetIpConfigInfoIpAddressStatus(\"unknown\")\n\tNetIpConfigInfoIpAddressStatusTentative    = NetIpConfigInfoIpAddressStatus(\"tentative\")\n\tNetIpConfigInfoIpAddressStatusDuplicate    = NetIpConfigInfoIpAddressStatus(\"duplicate\")\n)\n\nfunc init() {\n\tt[\"NetIpConfigInfoIpAddressStatus\"] = reflect.TypeOf((*NetIpConfigInfoIpAddressStatus)(nil)).Elem()\n}\n\ntype NetIpStackInfoEntryType string\n\nconst (\n\tNetIpStackInfoEntryTypeOther   = NetIpStackInfoEntryType(\"other\")\n\tNetIpStackInfoEntryTypeInvalid = NetIpStackInfoEntryType(\"invalid\")\n\tNetIpStackInfoEntryTypeDynamic = NetIpStackInfoEntryType(\"dynamic\")\n\tNetIpStackInfoEntryTypeManual  = NetIpStackInfoEntryType(\"manual\")\n)\n\nfunc init() {\n\tt[\"NetIpStackInfoEntryType\"] = reflect.TypeOf((*NetIpStackInfoEntryType)(nil)).Elem()\n}\n\ntype NetIpStackInfoPreference string\n\nconst (\n\tNetIpStackInfoPreferenceReserved = NetIpStackInfoPreference(\"reserved\")\n\tNetIpStackInfoPreferenceLow      = NetIpStackInfoPreference(\"low\")\n\tNetIpStackInfoPreferenceMedium   = NetIpStackInfoPreference(\"medium\")\n\tNetIpStackInfoPreferenceHigh     = NetIpStackInfoPreference(\"high\")\n)\n\nfunc init() {\n\tt[\"NetIpStackInfoPreference\"] = reflect.TypeOf((*NetIpStackInfoPreference)(nil)).Elem()\n}\n\ntype NotSupportedDeviceForFTDeviceType string\n\nconst (\n\tNotSupportedDeviceForFTDeviceTypeVirtualVmxnet3            = NotSupportedDeviceForFTDeviceType(\"virtualVmxnet3\")\n\tNotSupportedDeviceForFTDeviceTypeParaVirtualSCSIController = NotSupportedDeviceForFTDeviceType(\"paraVirtualSCSIController\")\n)\n\nfunc init() {\n\tt[\"NotSupportedDeviceForFTDeviceType\"] = reflect.TypeOf((*NotSupportedDeviceForFTDeviceType)(nil)).Elem()\n}\n\ntype NumVirtualCpusIncompatibleReason string\n\nconst (\n\tNumVirtualCpusIncompatibleReasonRecordReplay   = NumVirtualCpusIncompatibleReason(\"recordReplay\")\n\tNumVirtualCpusIncompatibleReasonFaultTolerance = NumVirtualCpusIncompatibleReason(\"faultTolerance\")\n)\n\nfunc init() {\n\tt[\"NumVirtualCpusIncompatibleReason\"] = reflect.TypeOf((*NumVirtualCpusIncompatibleReason)(nil)).Elem()\n}\n\ntype ObjectUpdateKind string\n\nconst (\n\tObjectUpdateKindModify = ObjectUpdateKind(\"modify\")\n\tObjectUpdateKindEnter  = ObjectUpdateKind(\"enter\")\n\tObjectUpdateKindLeave  = ObjectUpdateKind(\"leave\")\n)\n\nfunc init() {\n\tt[\"ObjectUpdateKind\"] = reflect.TypeOf((*ObjectUpdateKind)(nil)).Elem()\n}\n\ntype OvfConsumerOstNodeType string\n\nconst (\n\tOvfConsumerOstNodeTypeEnvelope                = OvfConsumerOstNodeType(\"envelope\")\n\tOvfConsumerOstNodeTypeVirtualSystem           = OvfConsumerOstNodeType(\"virtualSystem\")\n\tOvfConsumerOstNodeTypeVirtualSystemCollection = OvfConsumerOstNodeType(\"virtualSystemCollection\")\n)\n\nfunc init() {\n\tt[\"OvfConsumerOstNodeType\"] = reflect.TypeOf((*OvfConsumerOstNodeType)(nil)).Elem()\n}\n\ntype OvfCreateImportSpecParamsDiskProvisioningType string\n\nconst (\n\tOvfCreateImportSpecParamsDiskProvisioningTypeMonolithicSparse     = OvfCreateImportSpecParamsDiskProvisioningType(\"monolithicSparse\")\n\tOvfCreateImportSpecParamsDiskProvisioningTypeMonolithicFlat       = OvfCreateImportSpecParamsDiskProvisioningType(\"monolithicFlat\")\n\tOvfCreateImportSpecParamsDiskProvisioningTypeTwoGbMaxExtentSparse = OvfCreateImportSpecParamsDiskProvisioningType(\"twoGbMaxExtentSparse\")\n\tOvfCreateImportSpecParamsDiskProvisioningTypeTwoGbMaxExtentFlat   = OvfCreateImportSpecParamsDiskProvisioningType(\"twoGbMaxExtentFlat\")\n\tOvfCreateImportSpecParamsDiskProvisioningTypeThin                 = OvfCreateImportSpecParamsDiskProvisioningType(\"thin\")\n\tOvfCreateImportSpecParamsDiskProvisioningTypeThick                = OvfCreateImportSpecParamsDiskProvisioningType(\"thick\")\n\tOvfCreateImportSpecParamsDiskProvisioningTypeSeSparse             = OvfCreateImportSpecParamsDiskProvisioningType(\"seSparse\")\n\tOvfCreateImportSpecParamsDiskProvisioningTypeEagerZeroedThick     = OvfCreateImportSpecParamsDiskProvisioningType(\"eagerZeroedThick\")\n\tOvfCreateImportSpecParamsDiskProvisioningTypeSparse               = OvfCreateImportSpecParamsDiskProvisioningType(\"sparse\")\n\tOvfCreateImportSpecParamsDiskProvisioningTypeFlat                 = OvfCreateImportSpecParamsDiskProvisioningType(\"flat\")\n)\n\nfunc init() {\n\tt[\"OvfCreateImportSpecParamsDiskProvisioningType\"] = reflect.TypeOf((*OvfCreateImportSpecParamsDiskProvisioningType)(nil)).Elem()\n}\n\ntype PerfFormat string\n\nconst (\n\tPerfFormatNormal = PerfFormat(\"normal\")\n\tPerfFormatCsv    = PerfFormat(\"csv\")\n)\n\nfunc init() {\n\tt[\"PerfFormat\"] = reflect.TypeOf((*PerfFormat)(nil)).Elem()\n}\n\ntype PerfStatsType string\n\nconst (\n\tPerfStatsTypeAbsolute = PerfStatsType(\"absolute\")\n\tPerfStatsTypeDelta    = PerfStatsType(\"delta\")\n\tPerfStatsTypeRate     = PerfStatsType(\"rate\")\n)\n\nfunc init() {\n\tt[\"PerfStatsType\"] = reflect.TypeOf((*PerfStatsType)(nil)).Elem()\n}\n\ntype PerfSummaryType string\n\nconst (\n\tPerfSummaryTypeAverage   = PerfSummaryType(\"average\")\n\tPerfSummaryTypeMaximum   = PerfSummaryType(\"maximum\")\n\tPerfSummaryTypeMinimum   = PerfSummaryType(\"minimum\")\n\tPerfSummaryTypeLatest    = PerfSummaryType(\"latest\")\n\tPerfSummaryTypeSummation = PerfSummaryType(\"summation\")\n\tPerfSummaryTypeNone      = PerfSummaryType(\"none\")\n)\n\nfunc init() {\n\tt[\"PerfSummaryType\"] = reflect.TypeOf((*PerfSummaryType)(nil)).Elem()\n}\n\ntype PerformanceManagerUnit string\n\nconst (\n\tPerformanceManagerUnitPercent            = PerformanceManagerUnit(\"percent\")\n\tPerformanceManagerUnitKiloBytes          = PerformanceManagerUnit(\"kiloBytes\")\n\tPerformanceManagerUnitMegaBytes          = PerformanceManagerUnit(\"megaBytes\")\n\tPerformanceManagerUnitMegaHertz          = PerformanceManagerUnit(\"megaHertz\")\n\tPerformanceManagerUnitNumber             = PerformanceManagerUnit(\"number\")\n\tPerformanceManagerUnitMicrosecond        = PerformanceManagerUnit(\"microsecond\")\n\tPerformanceManagerUnitMillisecond        = PerformanceManagerUnit(\"millisecond\")\n\tPerformanceManagerUnitSecond             = PerformanceManagerUnit(\"second\")\n\tPerformanceManagerUnitKiloBytesPerSecond = PerformanceManagerUnit(\"kiloBytesPerSecond\")\n\tPerformanceManagerUnitMegaBytesPerSecond = PerformanceManagerUnit(\"megaBytesPerSecond\")\n\tPerformanceManagerUnitWatt               = PerformanceManagerUnit(\"watt\")\n\tPerformanceManagerUnitJoule              = PerformanceManagerUnit(\"joule\")\n\tPerformanceManagerUnitTeraBytes          = PerformanceManagerUnit(\"teraBytes\")\n)\n\nfunc init() {\n\tt[\"PerformanceManagerUnit\"] = reflect.TypeOf((*PerformanceManagerUnit)(nil)).Elem()\n}\n\ntype PhysicalNicResourcePoolSchedulerDisallowedReason string\n\nconst (\n\tPhysicalNicResourcePoolSchedulerDisallowedReasonUserOptOut          = PhysicalNicResourcePoolSchedulerDisallowedReason(\"userOptOut\")\n\tPhysicalNicResourcePoolSchedulerDisallowedReasonHardwareUnsupported = PhysicalNicResourcePoolSchedulerDisallowedReason(\"hardwareUnsupported\")\n)\n\nfunc init() {\n\tt[\"PhysicalNicResourcePoolSchedulerDisallowedReason\"] = reflect.TypeOf((*PhysicalNicResourcePoolSchedulerDisallowedReason)(nil)).Elem()\n}\n\ntype PhysicalNicVmDirectPathGen2SupportedMode string\n\nconst (\n\tPhysicalNicVmDirectPathGen2SupportedModeUpt = PhysicalNicVmDirectPathGen2SupportedMode(\"upt\")\n)\n\nfunc init() {\n\tt[\"PhysicalNicVmDirectPathGen2SupportedMode\"] = reflect.TypeOf((*PhysicalNicVmDirectPathGen2SupportedMode)(nil)).Elem()\n}\n\ntype PlacementAffinityRuleRuleScope string\n\nconst (\n\tPlacementAffinityRuleRuleScopeCluster    = PlacementAffinityRuleRuleScope(\"cluster\")\n\tPlacementAffinityRuleRuleScopeHost       = PlacementAffinityRuleRuleScope(\"host\")\n\tPlacementAffinityRuleRuleScopeStoragePod = PlacementAffinityRuleRuleScope(\"storagePod\")\n\tPlacementAffinityRuleRuleScopeDatastore  = PlacementAffinityRuleRuleScope(\"datastore\")\n)\n\nfunc init() {\n\tt[\"PlacementAffinityRuleRuleScope\"] = reflect.TypeOf((*PlacementAffinityRuleRuleScope)(nil)).Elem()\n}\n\ntype PlacementAffinityRuleRuleType string\n\nconst (\n\tPlacementAffinityRuleRuleTypeAffinity         = PlacementAffinityRuleRuleType(\"affinity\")\n\tPlacementAffinityRuleRuleTypeAntiAffinity     = PlacementAffinityRuleRuleType(\"antiAffinity\")\n\tPlacementAffinityRuleRuleTypeSoftAffinity     = PlacementAffinityRuleRuleType(\"softAffinity\")\n\tPlacementAffinityRuleRuleTypeSoftAntiAffinity = PlacementAffinityRuleRuleType(\"softAntiAffinity\")\n)\n\nfunc init() {\n\tt[\"PlacementAffinityRuleRuleType\"] = reflect.TypeOf((*PlacementAffinityRuleRuleType)(nil)).Elem()\n}\n\ntype PlacementSpecPlacementType string\n\nconst (\n\tPlacementSpecPlacementTypeCreate      = PlacementSpecPlacementType(\"create\")\n\tPlacementSpecPlacementTypeReconfigure = PlacementSpecPlacementType(\"reconfigure\")\n\tPlacementSpecPlacementTypeRelocate    = PlacementSpecPlacementType(\"relocate\")\n\tPlacementSpecPlacementTypeClone       = PlacementSpecPlacementType(\"clone\")\n)\n\nfunc init() {\n\tt[\"PlacementSpecPlacementType\"] = reflect.TypeOf((*PlacementSpecPlacementType)(nil)).Elem()\n}\n\ntype PortGroupConnecteeType string\n\nconst (\n\tPortGroupConnecteeTypeVirtualMachine   = PortGroupConnecteeType(\"virtualMachine\")\n\tPortGroupConnecteeTypeSystemManagement = PortGroupConnecteeType(\"systemManagement\")\n\tPortGroupConnecteeTypeHost             = PortGroupConnecteeType(\"host\")\n\tPortGroupConnecteeTypeUnknown          = PortGroupConnecteeType(\"unknown\")\n)\n\nfunc init() {\n\tt[\"PortGroupConnecteeType\"] = reflect.TypeOf((*PortGroupConnecteeType)(nil)).Elem()\n}\n\ntype ProfileExecuteResultStatus string\n\nconst (\n\tProfileExecuteResultStatusSuccess   = ProfileExecuteResultStatus(\"success\")\n\tProfileExecuteResultStatusNeedInput = ProfileExecuteResultStatus(\"needInput\")\n\tProfileExecuteResultStatusError     = ProfileExecuteResultStatus(\"error\")\n)\n\nfunc init() {\n\tt[\"ProfileExecuteResultStatus\"] = reflect.TypeOf((*ProfileExecuteResultStatus)(nil)).Elem()\n}\n\ntype ProfileNumericComparator string\n\nconst (\n\tProfileNumericComparatorLessThan         = ProfileNumericComparator(\"lessThan\")\n\tProfileNumericComparatorLessThanEqual    = ProfileNumericComparator(\"lessThanEqual\")\n\tProfileNumericComparatorEqual            = ProfileNumericComparator(\"equal\")\n\tProfileNumericComparatorNotEqual         = ProfileNumericComparator(\"notEqual\")\n\tProfileNumericComparatorGreaterThanEqual = ProfileNumericComparator(\"greaterThanEqual\")\n\tProfileNumericComparatorGreaterThan      = ProfileNumericComparator(\"greaterThan\")\n)\n\nfunc init() {\n\tt[\"ProfileNumericComparator\"] = reflect.TypeOf((*ProfileNumericComparator)(nil)).Elem()\n}\n\ntype PropertyChangeOp string\n\nconst (\n\tPropertyChangeOpAdd            = PropertyChangeOp(\"add\")\n\tPropertyChangeOpRemove         = PropertyChangeOp(\"remove\")\n\tPropertyChangeOpAssign         = PropertyChangeOp(\"assign\")\n\tPropertyChangeOpIndirectRemove = PropertyChangeOp(\"indirectRemove\")\n)\n\nfunc init() {\n\tt[\"PropertyChangeOp\"] = reflect.TypeOf((*PropertyChangeOp)(nil)).Elem()\n}\n\ntype QuarantineModeFaultFaultType string\n\nconst (\n\tQuarantineModeFaultFaultTypeNoCompatibleNonQuarantinedHost = QuarantineModeFaultFaultType(\"NoCompatibleNonQuarantinedHost\")\n\tQuarantineModeFaultFaultTypeCorrectionDisallowed           = QuarantineModeFaultFaultType(\"CorrectionDisallowed\")\n\tQuarantineModeFaultFaultTypeCorrectionImpact               = QuarantineModeFaultFaultType(\"CorrectionImpact\")\n)\n\nfunc init() {\n\tt[\"QuarantineModeFaultFaultType\"] = reflect.TypeOf((*QuarantineModeFaultFaultType)(nil)).Elem()\n}\n\ntype QuiesceMode string\n\nconst (\n\tQuiesceModeApplication = QuiesceMode(\"application\")\n\tQuiesceModeFilesystem  = QuiesceMode(\"filesystem\")\n\tQuiesceModeNone        = QuiesceMode(\"none\")\n)\n\nfunc init() {\n\tt[\"QuiesceMode\"] = reflect.TypeOf((*QuiesceMode)(nil)).Elem()\n}\n\ntype RecommendationReasonCode string\n\nconst (\n\tRecommendationReasonCodeFairnessCpuAvg                  = RecommendationReasonCode(\"fairnessCpuAvg\")\n\tRecommendationReasonCodeFairnessMemAvg                  = RecommendationReasonCode(\"fairnessMemAvg\")\n\tRecommendationReasonCodeJointAffin                      = RecommendationReasonCode(\"jointAffin\")\n\tRecommendationReasonCodeAntiAffin                       = RecommendationReasonCode(\"antiAffin\")\n\tRecommendationReasonCodeHostMaint                       = RecommendationReasonCode(\"hostMaint\")\n\tRecommendationReasonCodeEnterStandby                    = RecommendationReasonCode(\"enterStandby\")\n\tRecommendationReasonCodeReservationCpu                  = RecommendationReasonCode(\"reservationCpu\")\n\tRecommendationReasonCodeReservationMem                  = RecommendationReasonCode(\"reservationMem\")\n\tRecommendationReasonCodePowerOnVm                       = RecommendationReasonCode(\"powerOnVm\")\n\tRecommendationReasonCodePowerSaving                     = RecommendationReasonCode(\"powerSaving\")\n\tRecommendationReasonCodeIncreaseCapacity                = RecommendationReasonCode(\"increaseCapacity\")\n\tRecommendationReasonCodeCheckResource                   = RecommendationReasonCode(\"checkResource\")\n\tRecommendationReasonCodeUnreservedCapacity              = RecommendationReasonCode(\"unreservedCapacity\")\n\tRecommendationReasonCodeVmHostHardAffinity              = RecommendationReasonCode(\"vmHostHardAffinity\")\n\tRecommendationReasonCodeVmHostSoftAffinity              = RecommendationReasonCode(\"vmHostSoftAffinity\")\n\tRecommendationReasonCodeBalanceDatastoreSpaceUsage      = RecommendationReasonCode(\"balanceDatastoreSpaceUsage\")\n\tRecommendationReasonCodeBalanceDatastoreIOLoad          = RecommendationReasonCode(\"balanceDatastoreIOLoad\")\n\tRecommendationReasonCodeBalanceDatastoreIOPSReservation = RecommendationReasonCode(\"balanceDatastoreIOPSReservation\")\n\tRecommendationReasonCodeDatastoreMaint                  = RecommendationReasonCode(\"datastoreMaint\")\n\tRecommendationReasonCodeVirtualDiskJointAffin           = RecommendationReasonCode(\"virtualDiskJointAffin\")\n\tRecommendationReasonCodeVirtualDiskAntiAffin            = RecommendationReasonCode(\"virtualDiskAntiAffin\")\n\tRecommendationReasonCodeDatastoreSpaceOutage            = RecommendationReasonCode(\"datastoreSpaceOutage\")\n\tRecommendationReasonCodeStoragePlacement                = RecommendationReasonCode(\"storagePlacement\")\n\tRecommendationReasonCodeIolbDisabledInternal            = RecommendationReasonCode(\"iolbDisabledInternal\")\n\tRecommendationReasonCodeXvmotionPlacement               = RecommendationReasonCode(\"xvmotionPlacement\")\n\tRecommendationReasonCodeNetworkBandwidthReservation     = RecommendationReasonCode(\"networkBandwidthReservation\")\n\tRecommendationReasonCodeHostInDegradation               = RecommendationReasonCode(\"hostInDegradation\")\n\tRecommendationReasonCodeHostExitDegradation             = RecommendationReasonCode(\"hostExitDegradation\")\n\tRecommendationReasonCodeMaxVmsConstraint                = RecommendationReasonCode(\"maxVmsConstraint\")\n\tRecommendationReasonCodeFtConstraints                   = RecommendationReasonCode(\"ftConstraints\")\n)\n\nfunc init() {\n\tt[\"RecommendationReasonCode\"] = reflect.TypeOf((*RecommendationReasonCode)(nil)).Elem()\n}\n\ntype RecommendationType string\n\nconst (\n\tRecommendationTypeV1 = RecommendationType(\"V1\")\n)\n\nfunc init() {\n\tt[\"RecommendationType\"] = reflect.TypeOf((*RecommendationType)(nil)).Elem()\n}\n\ntype ReplicationDiskConfigFaultReasonForFault string\n\nconst (\n\tReplicationDiskConfigFaultReasonForFaultDiskNotFound                           = ReplicationDiskConfigFaultReasonForFault(\"diskNotFound\")\n\tReplicationDiskConfigFaultReasonForFaultDiskTypeNotSupported                   = ReplicationDiskConfigFaultReasonForFault(\"diskTypeNotSupported\")\n\tReplicationDiskConfigFaultReasonForFaultInvalidDiskKey                         = ReplicationDiskConfigFaultReasonForFault(\"invalidDiskKey\")\n\tReplicationDiskConfigFaultReasonForFaultInvalidDiskReplicationId               = ReplicationDiskConfigFaultReasonForFault(\"invalidDiskReplicationId\")\n\tReplicationDiskConfigFaultReasonForFaultDuplicateDiskReplicationId             = ReplicationDiskConfigFaultReasonForFault(\"duplicateDiskReplicationId\")\n\tReplicationDiskConfigFaultReasonForFaultInvalidPersistentFilePath              = ReplicationDiskConfigFaultReasonForFault(\"invalidPersistentFilePath\")\n\tReplicationDiskConfigFaultReasonForFaultReconfigureDiskReplicationIdNotAllowed = ReplicationDiskConfigFaultReasonForFault(\"reconfigureDiskReplicationIdNotAllowed\")\n)\n\nfunc init() {\n\tt[\"ReplicationDiskConfigFaultReasonForFault\"] = reflect.TypeOf((*ReplicationDiskConfigFaultReasonForFault)(nil)).Elem()\n}\n\ntype ReplicationVmConfigFaultReasonForFault string\n\nconst (\n\tReplicationVmConfigFaultReasonForFaultIncompatibleHwVersion                    = ReplicationVmConfigFaultReasonForFault(\"incompatibleHwVersion\")\n\tReplicationVmConfigFaultReasonForFaultInvalidVmReplicationId                   = ReplicationVmConfigFaultReasonForFault(\"invalidVmReplicationId\")\n\tReplicationVmConfigFaultReasonForFaultInvalidGenerationNumber                  = ReplicationVmConfigFaultReasonForFault(\"invalidGenerationNumber\")\n\tReplicationVmConfigFaultReasonForFaultOutOfBoundsRpoValue                      = ReplicationVmConfigFaultReasonForFault(\"outOfBoundsRpoValue\")\n\tReplicationVmConfigFaultReasonForFaultInvalidDestinationIpAddress              = ReplicationVmConfigFaultReasonForFault(\"invalidDestinationIpAddress\")\n\tReplicationVmConfigFaultReasonForFaultInvalidDestinationPort                   = ReplicationVmConfigFaultReasonForFault(\"invalidDestinationPort\")\n\tReplicationVmConfigFaultReasonForFaultInvalidExtraVmOptions                    = ReplicationVmConfigFaultReasonForFault(\"invalidExtraVmOptions\")\n\tReplicationVmConfigFaultReasonForFaultStaleGenerationNumber                    = ReplicationVmConfigFaultReasonForFault(\"staleGenerationNumber\")\n\tReplicationVmConfigFaultReasonForFaultReconfigureVmReplicationIdNotAllowed     = ReplicationVmConfigFaultReasonForFault(\"reconfigureVmReplicationIdNotAllowed\")\n\tReplicationVmConfigFaultReasonForFaultCannotRetrieveVmReplicationConfiguration = ReplicationVmConfigFaultReasonForFault(\"cannotRetrieveVmReplicationConfiguration\")\n\tReplicationVmConfigFaultReasonForFaultReplicationAlreadyEnabled                = ReplicationVmConfigFaultReasonForFault(\"replicationAlreadyEnabled\")\n\tReplicationVmConfigFaultReasonForFaultInvalidPriorConfiguration                = ReplicationVmConfigFaultReasonForFault(\"invalidPriorConfiguration\")\n\tReplicationVmConfigFaultReasonForFaultReplicationNotEnabled                    = ReplicationVmConfigFaultReasonForFault(\"replicationNotEnabled\")\n\tReplicationVmConfigFaultReasonForFaultReplicationConfigurationFailed           = ReplicationVmConfigFaultReasonForFault(\"replicationConfigurationFailed\")\n\tReplicationVmConfigFaultReasonForFaultEncryptedVm                              = ReplicationVmConfigFaultReasonForFault(\"encryptedVm\")\n)\n\nfunc init() {\n\tt[\"ReplicationVmConfigFaultReasonForFault\"] = reflect.TypeOf((*ReplicationVmConfigFaultReasonForFault)(nil)).Elem()\n}\n\ntype ReplicationVmFaultReasonForFault string\n\nconst (\n\tReplicationVmFaultReasonForFaultNotConfigured      = ReplicationVmFaultReasonForFault(\"notConfigured\")\n\tReplicationVmFaultReasonForFaultPoweredOff         = ReplicationVmFaultReasonForFault(\"poweredOff\")\n\tReplicationVmFaultReasonForFaultSuspended          = ReplicationVmFaultReasonForFault(\"suspended\")\n\tReplicationVmFaultReasonForFaultPoweredOn          = ReplicationVmFaultReasonForFault(\"poweredOn\")\n\tReplicationVmFaultReasonForFaultOfflineReplicating = ReplicationVmFaultReasonForFault(\"offlineReplicating\")\n\tReplicationVmFaultReasonForFaultInvalidState       = ReplicationVmFaultReasonForFault(\"invalidState\")\n\tReplicationVmFaultReasonForFaultInvalidInstanceId  = ReplicationVmFaultReasonForFault(\"invalidInstanceId\")\n\tReplicationVmFaultReasonForFaultCloseDiskError     = ReplicationVmFaultReasonForFault(\"closeDiskError\")\n)\n\nfunc init() {\n\tt[\"ReplicationVmFaultReasonForFault\"] = reflect.TypeOf((*ReplicationVmFaultReasonForFault)(nil)).Elem()\n}\n\ntype ReplicationVmInProgressFaultActivity string\n\nconst (\n\tReplicationVmInProgressFaultActivityFullSync = ReplicationVmInProgressFaultActivity(\"fullSync\")\n\tReplicationVmInProgressFaultActivityDelta    = ReplicationVmInProgressFaultActivity(\"delta\")\n)\n\nfunc init() {\n\tt[\"ReplicationVmInProgressFaultActivity\"] = reflect.TypeOf((*ReplicationVmInProgressFaultActivity)(nil)).Elem()\n}\n\ntype ReplicationVmState string\n\nconst (\n\tReplicationVmStateNone    = ReplicationVmState(\"none\")\n\tReplicationVmStatePaused  = ReplicationVmState(\"paused\")\n\tReplicationVmStateSyncing = ReplicationVmState(\"syncing\")\n\tReplicationVmStateIdle    = ReplicationVmState(\"idle\")\n\tReplicationVmStateActive  = ReplicationVmState(\"active\")\n\tReplicationVmStateError   = ReplicationVmState(\"error\")\n)\n\nfunc init() {\n\tt[\"ReplicationVmState\"] = reflect.TypeOf((*ReplicationVmState)(nil)).Elem()\n}\n\ntype ScheduledHardwareUpgradeInfoHardwareUpgradePolicy string\n\nconst (\n\tScheduledHardwareUpgradeInfoHardwareUpgradePolicyNever          = ScheduledHardwareUpgradeInfoHardwareUpgradePolicy(\"never\")\n\tScheduledHardwareUpgradeInfoHardwareUpgradePolicyOnSoftPowerOff = ScheduledHardwareUpgradeInfoHardwareUpgradePolicy(\"onSoftPowerOff\")\n\tScheduledHardwareUpgradeInfoHardwareUpgradePolicyAlways         = ScheduledHardwareUpgradeInfoHardwareUpgradePolicy(\"always\")\n)\n\nfunc init() {\n\tt[\"ScheduledHardwareUpgradeInfoHardwareUpgradePolicy\"] = reflect.TypeOf((*ScheduledHardwareUpgradeInfoHardwareUpgradePolicy)(nil)).Elem()\n}\n\ntype ScheduledHardwareUpgradeInfoHardwareUpgradeStatus string\n\nconst (\n\tScheduledHardwareUpgradeInfoHardwareUpgradeStatusNone    = ScheduledHardwareUpgradeInfoHardwareUpgradeStatus(\"none\")\n\tScheduledHardwareUpgradeInfoHardwareUpgradeStatusPending = ScheduledHardwareUpgradeInfoHardwareUpgradeStatus(\"pending\")\n\tScheduledHardwareUpgradeInfoHardwareUpgradeStatusSuccess = ScheduledHardwareUpgradeInfoHardwareUpgradeStatus(\"success\")\n\tScheduledHardwareUpgradeInfoHardwareUpgradeStatusFailed  = ScheduledHardwareUpgradeInfoHardwareUpgradeStatus(\"failed\")\n)\n\nfunc init() {\n\tt[\"ScheduledHardwareUpgradeInfoHardwareUpgradeStatus\"] = reflect.TypeOf((*ScheduledHardwareUpgradeInfoHardwareUpgradeStatus)(nil)).Elem()\n}\n\ntype ScsiDiskType string\n\nconst (\n\tScsiDiskTypeNative512   = ScsiDiskType(\"native512\")\n\tScsiDiskTypeEmulated512 = ScsiDiskType(\"emulated512\")\n\tScsiDiskTypeNative4k    = ScsiDiskType(\"native4k\")\n\tScsiDiskTypeUnknown     = ScsiDiskType(\"unknown\")\n)\n\nfunc init() {\n\tt[\"ScsiDiskType\"] = reflect.TypeOf((*ScsiDiskType)(nil)).Elem()\n}\n\ntype ScsiLunDescriptorQuality string\n\nconst (\n\tScsiLunDescriptorQualityHighQuality    = ScsiLunDescriptorQuality(\"highQuality\")\n\tScsiLunDescriptorQualityMediumQuality  = ScsiLunDescriptorQuality(\"mediumQuality\")\n\tScsiLunDescriptorQualityLowQuality     = ScsiLunDescriptorQuality(\"lowQuality\")\n\tScsiLunDescriptorQualityUnknownQuality = ScsiLunDescriptorQuality(\"unknownQuality\")\n)\n\nfunc init() {\n\tt[\"ScsiLunDescriptorQuality\"] = reflect.TypeOf((*ScsiLunDescriptorQuality)(nil)).Elem()\n}\n\ntype ScsiLunState string\n\nconst (\n\tScsiLunStateUnknownState      = ScsiLunState(\"unknownState\")\n\tScsiLunStateOk                = ScsiLunState(\"ok\")\n\tScsiLunStateError             = ScsiLunState(\"error\")\n\tScsiLunStateOff               = ScsiLunState(\"off\")\n\tScsiLunStateQuiesced          = ScsiLunState(\"quiesced\")\n\tScsiLunStateDegraded          = ScsiLunState(\"degraded\")\n\tScsiLunStateLostCommunication = ScsiLunState(\"lostCommunication\")\n\tScsiLunStateTimeout           = ScsiLunState(\"timeout\")\n)\n\nfunc init() {\n\tt[\"ScsiLunState\"] = reflect.TypeOf((*ScsiLunState)(nil)).Elem()\n}\n\ntype ScsiLunType string\n\nconst (\n\tScsiLunTypeDisk                   = ScsiLunType(\"disk\")\n\tScsiLunTypeTape                   = ScsiLunType(\"tape\")\n\tScsiLunTypePrinter                = ScsiLunType(\"printer\")\n\tScsiLunTypeProcessor              = ScsiLunType(\"processor\")\n\tScsiLunTypeWorm                   = ScsiLunType(\"worm\")\n\tScsiLunTypeCdrom                  = ScsiLunType(\"cdrom\")\n\tScsiLunTypeScanner                = ScsiLunType(\"scanner\")\n\tScsiLunTypeOpticalDevice          = ScsiLunType(\"opticalDevice\")\n\tScsiLunTypeMediaChanger           = ScsiLunType(\"mediaChanger\")\n\tScsiLunTypeCommunications         = ScsiLunType(\"communications\")\n\tScsiLunTypeStorageArrayController = ScsiLunType(\"storageArrayController\")\n\tScsiLunTypeEnclosure              = ScsiLunType(\"enclosure\")\n\tScsiLunTypeUnknown                = ScsiLunType(\"unknown\")\n)\n\nfunc init() {\n\tt[\"ScsiLunType\"] = reflect.TypeOf((*ScsiLunType)(nil)).Elem()\n}\n\ntype ScsiLunVStorageSupportStatus string\n\nconst (\n\tScsiLunVStorageSupportStatusVStorageSupported   = ScsiLunVStorageSupportStatus(\"vStorageSupported\")\n\tScsiLunVStorageSupportStatusVStorageUnsupported = ScsiLunVStorageSupportStatus(\"vStorageUnsupported\")\n\tScsiLunVStorageSupportStatusVStorageUnknown     = ScsiLunVStorageSupportStatus(\"vStorageUnknown\")\n)\n\nfunc init() {\n\tt[\"ScsiLunVStorageSupportStatus\"] = reflect.TypeOf((*ScsiLunVStorageSupportStatus)(nil)).Elem()\n}\n\ntype SessionManagerHttpServiceRequestSpecMethod string\n\nconst (\n\tSessionManagerHttpServiceRequestSpecMethodHttpOptions = SessionManagerHttpServiceRequestSpecMethod(\"httpOptions\")\n\tSessionManagerHttpServiceRequestSpecMethodHttpGet     = SessionManagerHttpServiceRequestSpecMethod(\"httpGet\")\n\tSessionManagerHttpServiceRequestSpecMethodHttpHead    = SessionManagerHttpServiceRequestSpecMethod(\"httpHead\")\n\tSessionManagerHttpServiceRequestSpecMethodHttpPost    = SessionManagerHttpServiceRequestSpecMethod(\"httpPost\")\n\tSessionManagerHttpServiceRequestSpecMethodHttpPut     = SessionManagerHttpServiceRequestSpecMethod(\"httpPut\")\n\tSessionManagerHttpServiceRequestSpecMethodHttpDelete  = SessionManagerHttpServiceRequestSpecMethod(\"httpDelete\")\n\tSessionManagerHttpServiceRequestSpecMethodHttpTrace   = SessionManagerHttpServiceRequestSpecMethod(\"httpTrace\")\n\tSessionManagerHttpServiceRequestSpecMethodHttpConnect = SessionManagerHttpServiceRequestSpecMethod(\"httpConnect\")\n)\n\nfunc init() {\n\tt[\"SessionManagerHttpServiceRequestSpecMethod\"] = reflect.TypeOf((*SessionManagerHttpServiceRequestSpecMethod)(nil)).Elem()\n}\n\ntype SharesLevel string\n\nconst (\n\tSharesLevelLow    = SharesLevel(\"low\")\n\tSharesLevelNormal = SharesLevel(\"normal\")\n\tSharesLevelHigh   = SharesLevel(\"high\")\n\tSharesLevelCustom = SharesLevel(\"custom\")\n)\n\nfunc init() {\n\tt[\"SharesLevel\"] = reflect.TypeOf((*SharesLevel)(nil)).Elem()\n}\n\ntype SimpleCommandEncoding string\n\nconst (\n\tSimpleCommandEncodingCSV    = SimpleCommandEncoding(\"CSV\")\n\tSimpleCommandEncodingHEX    = SimpleCommandEncoding(\"HEX\")\n\tSimpleCommandEncodingSTRING = SimpleCommandEncoding(\"STRING\")\n)\n\nfunc init() {\n\tt[\"SimpleCommandEncoding\"] = reflect.TypeOf((*SimpleCommandEncoding)(nil)).Elem()\n}\n\ntype SlpDiscoveryMethod string\n\nconst (\n\tSlpDiscoveryMethodSlpDhcp          = SlpDiscoveryMethod(\"slpDhcp\")\n\tSlpDiscoveryMethodSlpAutoUnicast   = SlpDiscoveryMethod(\"slpAutoUnicast\")\n\tSlpDiscoveryMethodSlpAutoMulticast = SlpDiscoveryMethod(\"slpAutoMulticast\")\n\tSlpDiscoveryMethodSlpManual        = SlpDiscoveryMethod(\"slpManual\")\n)\n\nfunc init() {\n\tt[\"SlpDiscoveryMethod\"] = reflect.TypeOf((*SlpDiscoveryMethod)(nil)).Elem()\n}\n\ntype SoftwarePackageConstraint string\n\nconst (\n\tSoftwarePackageConstraintEquals           = SoftwarePackageConstraint(\"equals\")\n\tSoftwarePackageConstraintLessThan         = SoftwarePackageConstraint(\"lessThan\")\n\tSoftwarePackageConstraintLessThanEqual    = SoftwarePackageConstraint(\"lessThanEqual\")\n\tSoftwarePackageConstraintGreaterThanEqual = SoftwarePackageConstraint(\"greaterThanEqual\")\n\tSoftwarePackageConstraintGreaterThan      = SoftwarePackageConstraint(\"greaterThan\")\n)\n\nfunc init() {\n\tt[\"SoftwarePackageConstraint\"] = reflect.TypeOf((*SoftwarePackageConstraint)(nil)).Elem()\n}\n\ntype SoftwarePackageVibType string\n\nconst (\n\tSoftwarePackageVibTypeBootbank = SoftwarePackageVibType(\"bootbank\")\n\tSoftwarePackageVibTypeTools    = SoftwarePackageVibType(\"tools\")\n\tSoftwarePackageVibTypeMeta     = SoftwarePackageVibType(\"meta\")\n)\n\nfunc init() {\n\tt[\"SoftwarePackageVibType\"] = reflect.TypeOf((*SoftwarePackageVibType)(nil)).Elem()\n}\n\ntype StateAlarmOperator string\n\nconst (\n\tStateAlarmOperatorIsEqual   = StateAlarmOperator(\"isEqual\")\n\tStateAlarmOperatorIsUnequal = StateAlarmOperator(\"isUnequal\")\n)\n\nfunc init() {\n\tt[\"StateAlarmOperator\"] = reflect.TypeOf((*StateAlarmOperator)(nil)).Elem()\n}\n\ntype StorageDrsPodConfigInfoBehavior string\n\nconst (\n\tStorageDrsPodConfigInfoBehaviorManual    = StorageDrsPodConfigInfoBehavior(\"manual\")\n\tStorageDrsPodConfigInfoBehaviorAutomated = StorageDrsPodConfigInfoBehavior(\"automated\")\n)\n\nfunc init() {\n\tt[\"StorageDrsPodConfigInfoBehavior\"] = reflect.TypeOf((*StorageDrsPodConfigInfoBehavior)(nil)).Elem()\n}\n\ntype StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode string\n\nconst (\n\tStorageDrsSpaceLoadBalanceConfigSpaceThresholdModeUtilization = StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode(\"utilization\")\n\tStorageDrsSpaceLoadBalanceConfigSpaceThresholdModeFreeSpace   = StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode(\"freeSpace\")\n)\n\nfunc init() {\n\tt[\"StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode\"] = reflect.TypeOf((*StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode)(nil)).Elem()\n}\n\ntype StorageIORMThresholdMode string\n\nconst (\n\tStorageIORMThresholdModeAutomatic = StorageIORMThresholdMode(\"automatic\")\n\tStorageIORMThresholdModeManual    = StorageIORMThresholdMode(\"manual\")\n)\n\nfunc init() {\n\tt[\"StorageIORMThresholdMode\"] = reflect.TypeOf((*StorageIORMThresholdMode)(nil)).Elem()\n}\n\ntype StoragePlacementSpecPlacementType string\n\nconst (\n\tStoragePlacementSpecPlacementTypeCreate      = StoragePlacementSpecPlacementType(\"create\")\n\tStoragePlacementSpecPlacementTypeReconfigure = StoragePlacementSpecPlacementType(\"reconfigure\")\n\tStoragePlacementSpecPlacementTypeRelocate    = StoragePlacementSpecPlacementType(\"relocate\")\n\tStoragePlacementSpecPlacementTypeClone       = StoragePlacementSpecPlacementType(\"clone\")\n)\n\nfunc init() {\n\tt[\"StoragePlacementSpecPlacementType\"] = reflect.TypeOf((*StoragePlacementSpecPlacementType)(nil)).Elem()\n}\n\ntype TaskFilterSpecRecursionOption string\n\nconst (\n\tTaskFilterSpecRecursionOptionSelf     = TaskFilterSpecRecursionOption(\"self\")\n\tTaskFilterSpecRecursionOptionChildren = TaskFilterSpecRecursionOption(\"children\")\n\tTaskFilterSpecRecursionOptionAll      = TaskFilterSpecRecursionOption(\"all\")\n)\n\nfunc init() {\n\tt[\"TaskFilterSpecRecursionOption\"] = reflect.TypeOf((*TaskFilterSpecRecursionOption)(nil)).Elem()\n}\n\ntype TaskFilterSpecTimeOption string\n\nconst (\n\tTaskFilterSpecTimeOptionQueuedTime    = TaskFilterSpecTimeOption(\"queuedTime\")\n\tTaskFilterSpecTimeOptionStartedTime   = TaskFilterSpecTimeOption(\"startedTime\")\n\tTaskFilterSpecTimeOptionCompletedTime = TaskFilterSpecTimeOption(\"completedTime\")\n)\n\nfunc init() {\n\tt[\"TaskFilterSpecTimeOption\"] = reflect.TypeOf((*TaskFilterSpecTimeOption)(nil)).Elem()\n}\n\ntype TaskInfoState string\n\nconst (\n\tTaskInfoStateQueued  = TaskInfoState(\"queued\")\n\tTaskInfoStateRunning = TaskInfoState(\"running\")\n\tTaskInfoStateSuccess = TaskInfoState(\"success\")\n\tTaskInfoStateError   = TaskInfoState(\"error\")\n)\n\nfunc init() {\n\tt[\"TaskInfoState\"] = reflect.TypeOf((*TaskInfoState)(nil)).Elem()\n}\n\ntype ThirdPartyLicenseAssignmentFailedReason string\n\nconst (\n\tThirdPartyLicenseAssignmentFailedReasonLicenseAssignmentFailed = ThirdPartyLicenseAssignmentFailedReason(\"licenseAssignmentFailed\")\n\tThirdPartyLicenseAssignmentFailedReasonModuleNotInstalled      = ThirdPartyLicenseAssignmentFailedReason(\"moduleNotInstalled\")\n)\n\nfunc init() {\n\tt[\"ThirdPartyLicenseAssignmentFailedReason\"] = reflect.TypeOf((*ThirdPartyLicenseAssignmentFailedReason)(nil)).Elem()\n}\n\ntype UpgradePolicy string\n\nconst (\n\tUpgradePolicyManual              = UpgradePolicy(\"manual\")\n\tUpgradePolicyUpgradeAtPowerCycle = UpgradePolicy(\"upgradeAtPowerCycle\")\n)\n\nfunc init() {\n\tt[\"UpgradePolicy\"] = reflect.TypeOf((*UpgradePolicy)(nil)).Elem()\n}\n\ntype VAppAutoStartAction string\n\nconst (\n\tVAppAutoStartActionNone          = VAppAutoStartAction(\"none\")\n\tVAppAutoStartActionPowerOn       = VAppAutoStartAction(\"powerOn\")\n\tVAppAutoStartActionPowerOff      = VAppAutoStartAction(\"powerOff\")\n\tVAppAutoStartActionGuestShutdown = VAppAutoStartAction(\"guestShutdown\")\n\tVAppAutoStartActionSuspend       = VAppAutoStartAction(\"suspend\")\n)\n\nfunc init() {\n\tt[\"VAppAutoStartAction\"] = reflect.TypeOf((*VAppAutoStartAction)(nil)).Elem()\n}\n\ntype VAppCloneSpecProvisioningType string\n\nconst (\n\tVAppCloneSpecProvisioningTypeSameAsSource = VAppCloneSpecProvisioningType(\"sameAsSource\")\n\tVAppCloneSpecProvisioningTypeThin         = VAppCloneSpecProvisioningType(\"thin\")\n\tVAppCloneSpecProvisioningTypeThick        = VAppCloneSpecProvisioningType(\"thick\")\n)\n\nfunc init() {\n\tt[\"VAppCloneSpecProvisioningType\"] = reflect.TypeOf((*VAppCloneSpecProvisioningType)(nil)).Elem()\n}\n\ntype VAppIPAssignmentInfoAllocationSchemes string\n\nconst (\n\tVAppIPAssignmentInfoAllocationSchemesDhcp   = VAppIPAssignmentInfoAllocationSchemes(\"dhcp\")\n\tVAppIPAssignmentInfoAllocationSchemesOvfenv = VAppIPAssignmentInfoAllocationSchemes(\"ovfenv\")\n)\n\nfunc init() {\n\tt[\"VAppIPAssignmentInfoAllocationSchemes\"] = reflect.TypeOf((*VAppIPAssignmentInfoAllocationSchemes)(nil)).Elem()\n}\n\ntype VAppIPAssignmentInfoIpAllocationPolicy string\n\nconst (\n\tVAppIPAssignmentInfoIpAllocationPolicyDhcpPolicy           = VAppIPAssignmentInfoIpAllocationPolicy(\"dhcpPolicy\")\n\tVAppIPAssignmentInfoIpAllocationPolicyTransientPolicy      = VAppIPAssignmentInfoIpAllocationPolicy(\"transientPolicy\")\n\tVAppIPAssignmentInfoIpAllocationPolicyFixedPolicy          = VAppIPAssignmentInfoIpAllocationPolicy(\"fixedPolicy\")\n\tVAppIPAssignmentInfoIpAllocationPolicyFixedAllocatedPolicy = VAppIPAssignmentInfoIpAllocationPolicy(\"fixedAllocatedPolicy\")\n)\n\nfunc init() {\n\tt[\"VAppIPAssignmentInfoIpAllocationPolicy\"] = reflect.TypeOf((*VAppIPAssignmentInfoIpAllocationPolicy)(nil)).Elem()\n}\n\ntype VAppIPAssignmentInfoProtocols string\n\nconst (\n\tVAppIPAssignmentInfoProtocolsIPv4 = VAppIPAssignmentInfoProtocols(\"IPv4\")\n\tVAppIPAssignmentInfoProtocolsIPv6 = VAppIPAssignmentInfoProtocols(\"IPv6\")\n)\n\nfunc init() {\n\tt[\"VAppIPAssignmentInfoProtocols\"] = reflect.TypeOf((*VAppIPAssignmentInfoProtocols)(nil)).Elem()\n}\n\ntype VFlashModuleNotSupportedReason string\n\nconst (\n\tVFlashModuleNotSupportedReasonCacheModeNotSupported            = VFlashModuleNotSupportedReason(\"CacheModeNotSupported\")\n\tVFlashModuleNotSupportedReasonCacheConsistencyTypeNotSupported = VFlashModuleNotSupportedReason(\"CacheConsistencyTypeNotSupported\")\n\tVFlashModuleNotSupportedReasonCacheBlockSizeNotSupported       = VFlashModuleNotSupportedReason(\"CacheBlockSizeNotSupported\")\n\tVFlashModuleNotSupportedReasonCacheReservationNotSupported     = VFlashModuleNotSupportedReason(\"CacheReservationNotSupported\")\n\tVFlashModuleNotSupportedReasonDiskSizeNotSupported             = VFlashModuleNotSupportedReason(\"DiskSizeNotSupported\")\n)\n\nfunc init() {\n\tt[\"VFlashModuleNotSupportedReason\"] = reflect.TypeOf((*VFlashModuleNotSupportedReason)(nil)).Elem()\n}\n\ntype VMotionCompatibilityType string\n\nconst (\n\tVMotionCompatibilityTypeCpu      = VMotionCompatibilityType(\"cpu\")\n\tVMotionCompatibilityTypeSoftware = VMotionCompatibilityType(\"software\")\n)\n\nfunc init() {\n\tt[\"VMotionCompatibilityType\"] = reflect.TypeOf((*VMotionCompatibilityType)(nil)).Elem()\n}\n\ntype VMwareDVSTeamingMatchStatus string\n\nconst (\n\tVMwareDVSTeamingMatchStatusIphashMatch       = VMwareDVSTeamingMatchStatus(\"iphashMatch\")\n\tVMwareDVSTeamingMatchStatusNonIphashMatch    = VMwareDVSTeamingMatchStatus(\"nonIphashMatch\")\n\tVMwareDVSTeamingMatchStatusIphashMismatch    = VMwareDVSTeamingMatchStatus(\"iphashMismatch\")\n\tVMwareDVSTeamingMatchStatusNonIphashMismatch = VMwareDVSTeamingMatchStatus(\"nonIphashMismatch\")\n)\n\nfunc init() {\n\tt[\"VMwareDVSTeamingMatchStatus\"] = reflect.TypeOf((*VMwareDVSTeamingMatchStatus)(nil)).Elem()\n}\n\ntype VMwareDVSVspanSessionEncapType string\n\nconst (\n\tVMwareDVSVspanSessionEncapTypeGre     = VMwareDVSVspanSessionEncapType(\"gre\")\n\tVMwareDVSVspanSessionEncapTypeErspan2 = VMwareDVSVspanSessionEncapType(\"erspan2\")\n\tVMwareDVSVspanSessionEncapTypeErspan3 = VMwareDVSVspanSessionEncapType(\"erspan3\")\n)\n\nfunc init() {\n\tt[\"VMwareDVSVspanSessionEncapType\"] = reflect.TypeOf((*VMwareDVSVspanSessionEncapType)(nil)).Elem()\n}\n\ntype VMwareDVSVspanSessionType string\n\nconst (\n\tVMwareDVSVspanSessionTypeMixedDestMirror                = VMwareDVSVspanSessionType(\"mixedDestMirror\")\n\tVMwareDVSVspanSessionTypeDvPortMirror                   = VMwareDVSVspanSessionType(\"dvPortMirror\")\n\tVMwareDVSVspanSessionTypeRemoteMirrorSource             = VMwareDVSVspanSessionType(\"remoteMirrorSource\")\n\tVMwareDVSVspanSessionTypeRemoteMirrorDest               = VMwareDVSVspanSessionType(\"remoteMirrorDest\")\n\tVMwareDVSVspanSessionTypeEncapsulatedRemoteMirrorSource = VMwareDVSVspanSessionType(\"encapsulatedRemoteMirrorSource\")\n)\n\nfunc init() {\n\tt[\"VMwareDVSVspanSessionType\"] = reflect.TypeOf((*VMwareDVSVspanSessionType)(nil)).Elem()\n}\n\ntype VMwareDvsLacpApiVersion string\n\nconst (\n\tVMwareDvsLacpApiVersionSingleLag   = VMwareDvsLacpApiVersion(\"singleLag\")\n\tVMwareDvsLacpApiVersionMultipleLag = VMwareDvsLacpApiVersion(\"multipleLag\")\n)\n\nfunc init() {\n\tt[\"VMwareDvsLacpApiVersion\"] = reflect.TypeOf((*VMwareDvsLacpApiVersion)(nil)).Elem()\n}\n\ntype VMwareDvsLacpLoadBalanceAlgorithm string\n\nconst (\n\tVMwareDvsLacpLoadBalanceAlgorithmSrcMac                  = VMwareDvsLacpLoadBalanceAlgorithm(\"srcMac\")\n\tVMwareDvsLacpLoadBalanceAlgorithmDestMac                 = VMwareDvsLacpLoadBalanceAlgorithm(\"destMac\")\n\tVMwareDvsLacpLoadBalanceAlgorithmSrcDestMac              = VMwareDvsLacpLoadBalanceAlgorithm(\"srcDestMac\")\n\tVMwareDvsLacpLoadBalanceAlgorithmDestIpVlan              = VMwareDvsLacpLoadBalanceAlgorithm(\"destIpVlan\")\n\tVMwareDvsLacpLoadBalanceAlgorithmSrcIpVlan               = VMwareDvsLacpLoadBalanceAlgorithm(\"srcIpVlan\")\n\tVMwareDvsLacpLoadBalanceAlgorithmSrcDestIpVlan           = VMwareDvsLacpLoadBalanceAlgorithm(\"srcDestIpVlan\")\n\tVMwareDvsLacpLoadBalanceAlgorithmDestTcpUdpPort          = VMwareDvsLacpLoadBalanceAlgorithm(\"destTcpUdpPort\")\n\tVMwareDvsLacpLoadBalanceAlgorithmSrcTcpUdpPort           = VMwareDvsLacpLoadBalanceAlgorithm(\"srcTcpUdpPort\")\n\tVMwareDvsLacpLoadBalanceAlgorithmSrcDestTcpUdpPort       = VMwareDvsLacpLoadBalanceAlgorithm(\"srcDestTcpUdpPort\")\n\tVMwareDvsLacpLoadBalanceAlgorithmDestIpTcpUdpPort        = VMwareDvsLacpLoadBalanceAlgorithm(\"destIpTcpUdpPort\")\n\tVMwareDvsLacpLoadBalanceAlgorithmSrcIpTcpUdpPort         = VMwareDvsLacpLoadBalanceAlgorithm(\"srcIpTcpUdpPort\")\n\tVMwareDvsLacpLoadBalanceAlgorithmSrcDestIpTcpUdpPort     = VMwareDvsLacpLoadBalanceAlgorithm(\"srcDestIpTcpUdpPort\")\n\tVMwareDvsLacpLoadBalanceAlgorithmDestIpTcpUdpPortVlan    = VMwareDvsLacpLoadBalanceAlgorithm(\"destIpTcpUdpPortVlan\")\n\tVMwareDvsLacpLoadBalanceAlgorithmSrcIpTcpUdpPortVlan     = VMwareDvsLacpLoadBalanceAlgorithm(\"srcIpTcpUdpPortVlan\")\n\tVMwareDvsLacpLoadBalanceAlgorithmSrcDestIpTcpUdpPortVlan = VMwareDvsLacpLoadBalanceAlgorithm(\"srcDestIpTcpUdpPortVlan\")\n\tVMwareDvsLacpLoadBalanceAlgorithmDestIp                  = VMwareDvsLacpLoadBalanceAlgorithm(\"destIp\")\n\tVMwareDvsLacpLoadBalanceAlgorithmSrcIp                   = VMwareDvsLacpLoadBalanceAlgorithm(\"srcIp\")\n\tVMwareDvsLacpLoadBalanceAlgorithmSrcDestIp               = VMwareDvsLacpLoadBalanceAlgorithm(\"srcDestIp\")\n\tVMwareDvsLacpLoadBalanceAlgorithmVlan                    = VMwareDvsLacpLoadBalanceAlgorithm(\"vlan\")\n\tVMwareDvsLacpLoadBalanceAlgorithmSrcPortId               = VMwareDvsLacpLoadBalanceAlgorithm(\"srcPortId\")\n)\n\nfunc init() {\n\tt[\"VMwareDvsLacpLoadBalanceAlgorithm\"] = reflect.TypeOf((*VMwareDvsLacpLoadBalanceAlgorithm)(nil)).Elem()\n}\n\ntype VMwareDvsMulticastFilteringMode string\n\nconst (\n\tVMwareDvsMulticastFilteringModeLegacyFiltering = VMwareDvsMulticastFilteringMode(\"legacyFiltering\")\n\tVMwareDvsMulticastFilteringModeSnooping        = VMwareDvsMulticastFilteringMode(\"snooping\")\n)\n\nfunc init() {\n\tt[\"VMwareDvsMulticastFilteringMode\"] = reflect.TypeOf((*VMwareDvsMulticastFilteringMode)(nil)).Elem()\n}\n\ntype VMwareUplinkLacpMode string\n\nconst (\n\tVMwareUplinkLacpModeActive  = VMwareUplinkLacpMode(\"active\")\n\tVMwareUplinkLacpModePassive = VMwareUplinkLacpMode(\"passive\")\n)\n\nfunc init() {\n\tt[\"VMwareUplinkLacpMode\"] = reflect.TypeOf((*VMwareUplinkLacpMode)(nil)).Elem()\n}\n\ntype VStorageObjectConsumptionType string\n\nconst (\n\tVStorageObjectConsumptionTypeDisk = VStorageObjectConsumptionType(\"disk\")\n)\n\nfunc init() {\n\tt[\"VStorageObjectConsumptionType\"] = reflect.TypeOf((*VStorageObjectConsumptionType)(nil)).Elem()\n}\n\ntype ValidateMigrationTestType string\n\nconst (\n\tValidateMigrationTestTypeSourceTests            = ValidateMigrationTestType(\"sourceTests\")\n\tValidateMigrationTestTypeCompatibilityTests     = ValidateMigrationTestType(\"compatibilityTests\")\n\tValidateMigrationTestTypeDiskAccessibilityTests = ValidateMigrationTestType(\"diskAccessibilityTests\")\n\tValidateMigrationTestTypeResourceTests          = ValidateMigrationTestType(\"resourceTests\")\n)\n\nfunc init() {\n\tt[\"ValidateMigrationTestType\"] = reflect.TypeOf((*ValidateMigrationTestType)(nil)).Elem()\n}\n\ntype VchaClusterMode string\n\nconst (\n\tVchaClusterModeEnabled     = VchaClusterMode(\"enabled\")\n\tVchaClusterModeDisabled    = VchaClusterMode(\"disabled\")\n\tVchaClusterModeMaintenance = VchaClusterMode(\"maintenance\")\n)\n\nfunc init() {\n\tt[\"VchaClusterMode\"] = reflect.TypeOf((*VchaClusterMode)(nil)).Elem()\n}\n\ntype VchaClusterState string\n\nconst (\n\tVchaClusterStateHealthy  = VchaClusterState(\"healthy\")\n\tVchaClusterStateDegraded = VchaClusterState(\"degraded\")\n\tVchaClusterStateIsolated = VchaClusterState(\"isolated\")\n)\n\nfunc init() {\n\tt[\"VchaClusterState\"] = reflect.TypeOf((*VchaClusterState)(nil)).Elem()\n}\n\ntype VchaNodeRole string\n\nconst (\n\tVchaNodeRoleActive  = VchaNodeRole(\"active\")\n\tVchaNodeRolePassive = VchaNodeRole(\"passive\")\n\tVchaNodeRoleWitness = VchaNodeRole(\"witness\")\n)\n\nfunc init() {\n\tt[\"VchaNodeRole\"] = reflect.TypeOf((*VchaNodeRole)(nil)).Elem()\n}\n\ntype VchaNodeState string\n\nconst (\n\tVchaNodeStateUp   = VchaNodeState(\"up\")\n\tVchaNodeStateDown = VchaNodeState(\"down\")\n)\n\nfunc init() {\n\tt[\"VchaNodeState\"] = reflect.TypeOf((*VchaNodeState)(nil)).Elem()\n}\n\ntype VchaState string\n\nconst (\n\tVchaStateConfigured    = VchaState(\"configured\")\n\tVchaStateNotConfigured = VchaState(\"notConfigured\")\n\tVchaStateInvalid       = VchaState(\"invalid\")\n\tVchaStatePrepared      = VchaState(\"prepared\")\n)\n\nfunc init() {\n\tt[\"VchaState\"] = reflect.TypeOf((*VchaState)(nil)).Elem()\n}\n\ntype VirtualAppVAppState string\n\nconst (\n\tVirtualAppVAppStateStarted  = VirtualAppVAppState(\"started\")\n\tVirtualAppVAppStateStopped  = VirtualAppVAppState(\"stopped\")\n\tVirtualAppVAppStateStarting = VirtualAppVAppState(\"starting\")\n\tVirtualAppVAppStateStopping = VirtualAppVAppState(\"stopping\")\n)\n\nfunc init() {\n\tt[\"VirtualAppVAppState\"] = reflect.TypeOf((*VirtualAppVAppState)(nil)).Elem()\n}\n\ntype VirtualDeviceConfigSpecFileOperation string\n\nconst (\n\tVirtualDeviceConfigSpecFileOperationCreate  = VirtualDeviceConfigSpecFileOperation(\"create\")\n\tVirtualDeviceConfigSpecFileOperationDestroy = VirtualDeviceConfigSpecFileOperation(\"destroy\")\n\tVirtualDeviceConfigSpecFileOperationReplace = VirtualDeviceConfigSpecFileOperation(\"replace\")\n)\n\nfunc init() {\n\tt[\"VirtualDeviceConfigSpecFileOperation\"] = reflect.TypeOf((*VirtualDeviceConfigSpecFileOperation)(nil)).Elem()\n}\n\ntype VirtualDeviceConfigSpecOperation string\n\nconst (\n\tVirtualDeviceConfigSpecOperationAdd    = VirtualDeviceConfigSpecOperation(\"add\")\n\tVirtualDeviceConfigSpecOperationRemove = VirtualDeviceConfigSpecOperation(\"remove\")\n\tVirtualDeviceConfigSpecOperationEdit   = VirtualDeviceConfigSpecOperation(\"edit\")\n)\n\nfunc init() {\n\tt[\"VirtualDeviceConfigSpecOperation\"] = reflect.TypeOf((*VirtualDeviceConfigSpecOperation)(nil)).Elem()\n}\n\ntype VirtualDeviceConnectInfoStatus string\n\nconst (\n\tVirtualDeviceConnectInfoStatusOk                 = VirtualDeviceConnectInfoStatus(\"ok\")\n\tVirtualDeviceConnectInfoStatusRecoverableError   = VirtualDeviceConnectInfoStatus(\"recoverableError\")\n\tVirtualDeviceConnectInfoStatusUnrecoverableError = VirtualDeviceConnectInfoStatus(\"unrecoverableError\")\n\tVirtualDeviceConnectInfoStatusUntried            = VirtualDeviceConnectInfoStatus(\"untried\")\n)\n\nfunc init() {\n\tt[\"VirtualDeviceConnectInfoStatus\"] = reflect.TypeOf((*VirtualDeviceConnectInfoStatus)(nil)).Elem()\n}\n\ntype VirtualDeviceFileExtension string\n\nconst (\n\tVirtualDeviceFileExtensionIso  = VirtualDeviceFileExtension(\"iso\")\n\tVirtualDeviceFileExtensionFlp  = VirtualDeviceFileExtension(\"flp\")\n\tVirtualDeviceFileExtensionVmdk = VirtualDeviceFileExtension(\"vmdk\")\n\tVirtualDeviceFileExtensionDsk  = VirtualDeviceFileExtension(\"dsk\")\n\tVirtualDeviceFileExtensionRdm  = VirtualDeviceFileExtension(\"rdm\")\n)\n\nfunc init() {\n\tt[\"VirtualDeviceFileExtension\"] = reflect.TypeOf((*VirtualDeviceFileExtension)(nil)).Elem()\n}\n\ntype VirtualDeviceURIBackingOptionDirection string\n\nconst (\n\tVirtualDeviceURIBackingOptionDirectionServer = VirtualDeviceURIBackingOptionDirection(\"server\")\n\tVirtualDeviceURIBackingOptionDirectionClient = VirtualDeviceURIBackingOptionDirection(\"client\")\n)\n\nfunc init() {\n\tt[\"VirtualDeviceURIBackingOptionDirection\"] = reflect.TypeOf((*VirtualDeviceURIBackingOptionDirection)(nil)).Elem()\n}\n\ntype VirtualDiskAdapterType string\n\nconst (\n\tVirtualDiskAdapterTypeIde      = VirtualDiskAdapterType(\"ide\")\n\tVirtualDiskAdapterTypeBusLogic = VirtualDiskAdapterType(\"busLogic\")\n\tVirtualDiskAdapterTypeLsiLogic = VirtualDiskAdapterType(\"lsiLogic\")\n)\n\nfunc init() {\n\tt[\"VirtualDiskAdapterType\"] = reflect.TypeOf((*VirtualDiskAdapterType)(nil)).Elem()\n}\n\ntype VirtualDiskCompatibilityMode string\n\nconst (\n\tVirtualDiskCompatibilityModeVirtualMode  = VirtualDiskCompatibilityMode(\"virtualMode\")\n\tVirtualDiskCompatibilityModePhysicalMode = VirtualDiskCompatibilityMode(\"physicalMode\")\n)\n\nfunc init() {\n\tt[\"VirtualDiskCompatibilityMode\"] = reflect.TypeOf((*VirtualDiskCompatibilityMode)(nil)).Elem()\n}\n\ntype VirtualDiskDeltaDiskFormat string\n\nconst (\n\tVirtualDiskDeltaDiskFormatRedoLogFormat  = VirtualDiskDeltaDiskFormat(\"redoLogFormat\")\n\tVirtualDiskDeltaDiskFormatNativeFormat   = VirtualDiskDeltaDiskFormat(\"nativeFormat\")\n\tVirtualDiskDeltaDiskFormatSeSparseFormat = VirtualDiskDeltaDiskFormat(\"seSparseFormat\")\n)\n\nfunc init() {\n\tt[\"VirtualDiskDeltaDiskFormat\"] = reflect.TypeOf((*VirtualDiskDeltaDiskFormat)(nil)).Elem()\n}\n\ntype VirtualDiskDeltaDiskFormatVariant string\n\nconst (\n\tVirtualDiskDeltaDiskFormatVariantVmfsSparseVariant = VirtualDiskDeltaDiskFormatVariant(\"vmfsSparseVariant\")\n\tVirtualDiskDeltaDiskFormatVariantVsanSparseVariant = VirtualDiskDeltaDiskFormatVariant(\"vsanSparseVariant\")\n)\n\nfunc init() {\n\tt[\"VirtualDiskDeltaDiskFormatVariant\"] = reflect.TypeOf((*VirtualDiskDeltaDiskFormatVariant)(nil)).Elem()\n}\n\ntype VirtualDiskMode string\n\nconst (\n\tVirtualDiskModePersistent                = VirtualDiskMode(\"persistent\")\n\tVirtualDiskModeNonpersistent             = VirtualDiskMode(\"nonpersistent\")\n\tVirtualDiskModeUndoable                  = VirtualDiskMode(\"undoable\")\n\tVirtualDiskModeIndependent_persistent    = VirtualDiskMode(\"independent_persistent\")\n\tVirtualDiskModeIndependent_nonpersistent = VirtualDiskMode(\"independent_nonpersistent\")\n\tVirtualDiskModeAppend                    = VirtualDiskMode(\"append\")\n)\n\nfunc init() {\n\tt[\"VirtualDiskMode\"] = reflect.TypeOf((*VirtualDiskMode)(nil)).Elem()\n}\n\ntype VirtualDiskSharing string\n\nconst (\n\tVirtualDiskSharingSharingNone        = VirtualDiskSharing(\"sharingNone\")\n\tVirtualDiskSharingSharingMultiWriter = VirtualDiskSharing(\"sharingMultiWriter\")\n)\n\nfunc init() {\n\tt[\"VirtualDiskSharing\"] = reflect.TypeOf((*VirtualDiskSharing)(nil)).Elem()\n}\n\ntype VirtualDiskType string\n\nconst (\n\tVirtualDiskTypePreallocated     = VirtualDiskType(\"preallocated\")\n\tVirtualDiskTypeThin             = VirtualDiskType(\"thin\")\n\tVirtualDiskTypeSeSparse         = VirtualDiskType(\"seSparse\")\n\tVirtualDiskTypeRdm              = VirtualDiskType(\"rdm\")\n\tVirtualDiskTypeRdmp             = VirtualDiskType(\"rdmp\")\n\tVirtualDiskTypeRaw              = VirtualDiskType(\"raw\")\n\tVirtualDiskTypeDelta            = VirtualDiskType(\"delta\")\n\tVirtualDiskTypeSparse2Gb        = VirtualDiskType(\"sparse2Gb\")\n\tVirtualDiskTypeThick2Gb         = VirtualDiskType(\"thick2Gb\")\n\tVirtualDiskTypeEagerZeroedThick = VirtualDiskType(\"eagerZeroedThick\")\n\tVirtualDiskTypeSparseMonolithic = VirtualDiskType(\"sparseMonolithic\")\n\tVirtualDiskTypeFlatMonolithic   = VirtualDiskType(\"flatMonolithic\")\n\tVirtualDiskTypeThick            = VirtualDiskType(\"thick\")\n)\n\nfunc init() {\n\tt[\"VirtualDiskType\"] = reflect.TypeOf((*VirtualDiskType)(nil)).Elem()\n}\n\ntype VirtualDiskVFlashCacheConfigInfoCacheConsistencyType string\n\nconst (\n\tVirtualDiskVFlashCacheConfigInfoCacheConsistencyTypeStrong = VirtualDiskVFlashCacheConfigInfoCacheConsistencyType(\"strong\")\n\tVirtualDiskVFlashCacheConfigInfoCacheConsistencyTypeWeak   = VirtualDiskVFlashCacheConfigInfoCacheConsistencyType(\"weak\")\n)\n\nfunc init() {\n\tt[\"VirtualDiskVFlashCacheConfigInfoCacheConsistencyType\"] = reflect.TypeOf((*VirtualDiskVFlashCacheConfigInfoCacheConsistencyType)(nil)).Elem()\n}\n\ntype VirtualDiskVFlashCacheConfigInfoCacheMode string\n\nconst (\n\tVirtualDiskVFlashCacheConfigInfoCacheModeWrite_thru = VirtualDiskVFlashCacheConfigInfoCacheMode(\"write_thru\")\n\tVirtualDiskVFlashCacheConfigInfoCacheModeWrite_back = VirtualDiskVFlashCacheConfigInfoCacheMode(\"write_back\")\n)\n\nfunc init() {\n\tt[\"VirtualDiskVFlashCacheConfigInfoCacheMode\"] = reflect.TypeOf((*VirtualDiskVFlashCacheConfigInfoCacheMode)(nil)).Elem()\n}\n\ntype VirtualEthernetCardLegacyNetworkDeviceName string\n\nconst (\n\tVirtualEthernetCardLegacyNetworkDeviceNameBridged  = VirtualEthernetCardLegacyNetworkDeviceName(\"bridged\")\n\tVirtualEthernetCardLegacyNetworkDeviceNameNat      = VirtualEthernetCardLegacyNetworkDeviceName(\"nat\")\n\tVirtualEthernetCardLegacyNetworkDeviceNameHostonly = VirtualEthernetCardLegacyNetworkDeviceName(\"hostonly\")\n)\n\nfunc init() {\n\tt[\"VirtualEthernetCardLegacyNetworkDeviceName\"] = reflect.TypeOf((*VirtualEthernetCardLegacyNetworkDeviceName)(nil)).Elem()\n}\n\ntype VirtualEthernetCardMacType string\n\nconst (\n\tVirtualEthernetCardMacTypeManual    = VirtualEthernetCardMacType(\"manual\")\n\tVirtualEthernetCardMacTypeGenerated = VirtualEthernetCardMacType(\"generated\")\n\tVirtualEthernetCardMacTypeAssigned  = VirtualEthernetCardMacType(\"assigned\")\n)\n\nfunc init() {\n\tt[\"VirtualEthernetCardMacType\"] = reflect.TypeOf((*VirtualEthernetCardMacType)(nil)).Elem()\n}\n\ntype VirtualMachineAppHeartbeatStatusType string\n\nconst (\n\tVirtualMachineAppHeartbeatStatusTypeAppStatusGray  = VirtualMachineAppHeartbeatStatusType(\"appStatusGray\")\n\tVirtualMachineAppHeartbeatStatusTypeAppStatusGreen = VirtualMachineAppHeartbeatStatusType(\"appStatusGreen\")\n\tVirtualMachineAppHeartbeatStatusTypeAppStatusRed   = VirtualMachineAppHeartbeatStatusType(\"appStatusRed\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineAppHeartbeatStatusType\"] = reflect.TypeOf((*VirtualMachineAppHeartbeatStatusType)(nil)).Elem()\n}\n\ntype VirtualMachineBootOptionsNetworkBootProtocolType string\n\nconst (\n\tVirtualMachineBootOptionsNetworkBootProtocolTypeIpv4 = VirtualMachineBootOptionsNetworkBootProtocolType(\"ipv4\")\n\tVirtualMachineBootOptionsNetworkBootProtocolTypeIpv6 = VirtualMachineBootOptionsNetworkBootProtocolType(\"ipv6\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineBootOptionsNetworkBootProtocolType\"] = reflect.TypeOf((*VirtualMachineBootOptionsNetworkBootProtocolType)(nil)).Elem()\n}\n\ntype VirtualMachineConfigInfoNpivWwnType string\n\nconst (\n\tVirtualMachineConfigInfoNpivWwnTypeVc       = VirtualMachineConfigInfoNpivWwnType(\"vc\")\n\tVirtualMachineConfigInfoNpivWwnTypeHost     = VirtualMachineConfigInfoNpivWwnType(\"host\")\n\tVirtualMachineConfigInfoNpivWwnTypeExternal = VirtualMachineConfigInfoNpivWwnType(\"external\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineConfigInfoNpivWwnType\"] = reflect.TypeOf((*VirtualMachineConfigInfoNpivWwnType)(nil)).Elem()\n}\n\ntype VirtualMachineConfigInfoSwapPlacementType string\n\nconst (\n\tVirtualMachineConfigInfoSwapPlacementTypeInherit     = VirtualMachineConfigInfoSwapPlacementType(\"inherit\")\n\tVirtualMachineConfigInfoSwapPlacementTypeVmDirectory = VirtualMachineConfigInfoSwapPlacementType(\"vmDirectory\")\n\tVirtualMachineConfigInfoSwapPlacementTypeHostLocal   = VirtualMachineConfigInfoSwapPlacementType(\"hostLocal\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineConfigInfoSwapPlacementType\"] = reflect.TypeOf((*VirtualMachineConfigInfoSwapPlacementType)(nil)).Elem()\n}\n\ntype VirtualMachineConfigSpecEncryptedVMotionModes string\n\nconst (\n\tVirtualMachineConfigSpecEncryptedVMotionModesDisabled      = VirtualMachineConfigSpecEncryptedVMotionModes(\"disabled\")\n\tVirtualMachineConfigSpecEncryptedVMotionModesOpportunistic = VirtualMachineConfigSpecEncryptedVMotionModes(\"opportunistic\")\n\tVirtualMachineConfigSpecEncryptedVMotionModesRequired      = VirtualMachineConfigSpecEncryptedVMotionModes(\"required\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineConfigSpecEncryptedVMotionModes\"] = reflect.TypeOf((*VirtualMachineConfigSpecEncryptedVMotionModes)(nil)).Elem()\n}\n\ntype VirtualMachineConfigSpecNpivWwnOp string\n\nconst (\n\tVirtualMachineConfigSpecNpivWwnOpGenerate = VirtualMachineConfigSpecNpivWwnOp(\"generate\")\n\tVirtualMachineConfigSpecNpivWwnOpSet      = VirtualMachineConfigSpecNpivWwnOp(\"set\")\n\tVirtualMachineConfigSpecNpivWwnOpRemove   = VirtualMachineConfigSpecNpivWwnOp(\"remove\")\n\tVirtualMachineConfigSpecNpivWwnOpExtend   = VirtualMachineConfigSpecNpivWwnOp(\"extend\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineConfigSpecNpivWwnOp\"] = reflect.TypeOf((*VirtualMachineConfigSpecNpivWwnOp)(nil)).Elem()\n}\n\ntype VirtualMachineConnectionState string\n\nconst (\n\tVirtualMachineConnectionStateConnected    = VirtualMachineConnectionState(\"connected\")\n\tVirtualMachineConnectionStateDisconnected = VirtualMachineConnectionState(\"disconnected\")\n\tVirtualMachineConnectionStateOrphaned     = VirtualMachineConnectionState(\"orphaned\")\n\tVirtualMachineConnectionStateInaccessible = VirtualMachineConnectionState(\"inaccessible\")\n\tVirtualMachineConnectionStateInvalid      = VirtualMachineConnectionState(\"invalid\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineConnectionState\"] = reflect.TypeOf((*VirtualMachineConnectionState)(nil)).Elem()\n}\n\ntype VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther string\n\nconst (\n\tVirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOtherVmNptIncompatibleHost    = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther(\"vmNptIncompatibleHost\")\n\tVirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOtherVmNptIncompatibleNetwork = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther(\"vmNptIncompatibleNetwork\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther\"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther)(nil)).Elem()\n}\n\ntype VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm string\n\nconst (\n\tVirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleGuest                      = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm(\"vmNptIncompatibleGuest\")\n\tVirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleGuestDriver                = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm(\"vmNptIncompatibleGuestDriver\")\n\tVirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleAdapterType                = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm(\"vmNptIncompatibleAdapterType\")\n\tVirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptDisabledOrDisconnectedAdapter          = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm(\"vmNptDisabledOrDisconnectedAdapter\")\n\tVirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleAdapterFeatures            = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm(\"vmNptIncompatibleAdapterFeatures\")\n\tVirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleBackingType                = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm(\"vmNptIncompatibleBackingType\")\n\tVirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptInsufficientMemoryReservation          = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm(\"vmNptInsufficientMemoryReservation\")\n\tVirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptFaultToleranceOrRecordReplayConfigured = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm(\"vmNptFaultToleranceOrRecordReplayConfigured\")\n\tVirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptConflictingIOChainConfigured           = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm(\"vmNptConflictingIOChainConfigured\")\n\tVirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptMonitorBlocks                          = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm(\"vmNptMonitorBlocks\")\n\tVirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptConflictingOperationInProgress         = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm(\"vmNptConflictingOperationInProgress\")\n\tVirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptRuntimeError                           = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm(\"vmNptRuntimeError\")\n\tVirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptOutOfIntrVector                        = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm(\"vmNptOutOfIntrVector\")\n\tVirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptVMCIActive                             = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm(\"vmNptVMCIActive\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm\"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm)(nil)).Elem()\n}\n\ntype VirtualMachineFaultToleranceState string\n\nconst (\n\tVirtualMachineFaultToleranceStateNotConfigured = VirtualMachineFaultToleranceState(\"notConfigured\")\n\tVirtualMachineFaultToleranceStateDisabled      = VirtualMachineFaultToleranceState(\"disabled\")\n\tVirtualMachineFaultToleranceStateEnabled       = VirtualMachineFaultToleranceState(\"enabled\")\n\tVirtualMachineFaultToleranceStateNeedSecondary = VirtualMachineFaultToleranceState(\"needSecondary\")\n\tVirtualMachineFaultToleranceStateStarting      = VirtualMachineFaultToleranceState(\"starting\")\n\tVirtualMachineFaultToleranceStateRunning       = VirtualMachineFaultToleranceState(\"running\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineFaultToleranceState\"] = reflect.TypeOf((*VirtualMachineFaultToleranceState)(nil)).Elem()\n}\n\ntype VirtualMachineFaultToleranceType string\n\nconst (\n\tVirtualMachineFaultToleranceTypeUnset         = VirtualMachineFaultToleranceType(\"unset\")\n\tVirtualMachineFaultToleranceTypeRecordReplay  = VirtualMachineFaultToleranceType(\"recordReplay\")\n\tVirtualMachineFaultToleranceTypeCheckpointing = VirtualMachineFaultToleranceType(\"checkpointing\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineFaultToleranceType\"] = reflect.TypeOf((*VirtualMachineFaultToleranceType)(nil)).Elem()\n}\n\ntype VirtualMachineFileLayoutExFileType string\n\nconst (\n\tVirtualMachineFileLayoutExFileTypeConfig               = VirtualMachineFileLayoutExFileType(\"config\")\n\tVirtualMachineFileLayoutExFileTypeExtendedConfig       = VirtualMachineFileLayoutExFileType(\"extendedConfig\")\n\tVirtualMachineFileLayoutExFileTypeDiskDescriptor       = VirtualMachineFileLayoutExFileType(\"diskDescriptor\")\n\tVirtualMachineFileLayoutExFileTypeDiskExtent           = VirtualMachineFileLayoutExFileType(\"diskExtent\")\n\tVirtualMachineFileLayoutExFileTypeDigestDescriptor     = VirtualMachineFileLayoutExFileType(\"digestDescriptor\")\n\tVirtualMachineFileLayoutExFileTypeDigestExtent         = VirtualMachineFileLayoutExFileType(\"digestExtent\")\n\tVirtualMachineFileLayoutExFileTypeDiskReplicationState = VirtualMachineFileLayoutExFileType(\"diskReplicationState\")\n\tVirtualMachineFileLayoutExFileTypeLog                  = VirtualMachineFileLayoutExFileType(\"log\")\n\tVirtualMachineFileLayoutExFileTypeStat                 = VirtualMachineFileLayoutExFileType(\"stat\")\n\tVirtualMachineFileLayoutExFileTypeNamespaceData        = VirtualMachineFileLayoutExFileType(\"namespaceData\")\n\tVirtualMachineFileLayoutExFileTypeNvram                = VirtualMachineFileLayoutExFileType(\"nvram\")\n\tVirtualMachineFileLayoutExFileTypeSnapshotData         = VirtualMachineFileLayoutExFileType(\"snapshotData\")\n\tVirtualMachineFileLayoutExFileTypeSnapshotMemory       = VirtualMachineFileLayoutExFileType(\"snapshotMemory\")\n\tVirtualMachineFileLayoutExFileTypeSnapshotList         = VirtualMachineFileLayoutExFileType(\"snapshotList\")\n\tVirtualMachineFileLayoutExFileTypeSnapshotManifestList = VirtualMachineFileLayoutExFileType(\"snapshotManifestList\")\n\tVirtualMachineFileLayoutExFileTypeSuspend              = VirtualMachineFileLayoutExFileType(\"suspend\")\n\tVirtualMachineFileLayoutExFileTypeSuspendMemory        = VirtualMachineFileLayoutExFileType(\"suspendMemory\")\n\tVirtualMachineFileLayoutExFileTypeSwap                 = VirtualMachineFileLayoutExFileType(\"swap\")\n\tVirtualMachineFileLayoutExFileTypeUwswap               = VirtualMachineFileLayoutExFileType(\"uwswap\")\n\tVirtualMachineFileLayoutExFileTypeCore                 = VirtualMachineFileLayoutExFileType(\"core\")\n\tVirtualMachineFileLayoutExFileTypeScreenshot           = VirtualMachineFileLayoutExFileType(\"screenshot\")\n\tVirtualMachineFileLayoutExFileTypeFtMetadata           = VirtualMachineFileLayoutExFileType(\"ftMetadata\")\n\tVirtualMachineFileLayoutExFileTypeGuestCustomization   = VirtualMachineFileLayoutExFileType(\"guestCustomization\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineFileLayoutExFileType\"] = reflect.TypeOf((*VirtualMachineFileLayoutExFileType)(nil)).Elem()\n}\n\ntype VirtualMachineFlagInfoMonitorType string\n\nconst (\n\tVirtualMachineFlagInfoMonitorTypeRelease = VirtualMachineFlagInfoMonitorType(\"release\")\n\tVirtualMachineFlagInfoMonitorTypeDebug   = VirtualMachineFlagInfoMonitorType(\"debug\")\n\tVirtualMachineFlagInfoMonitorTypeStats   = VirtualMachineFlagInfoMonitorType(\"stats\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineFlagInfoMonitorType\"] = reflect.TypeOf((*VirtualMachineFlagInfoMonitorType)(nil)).Elem()\n}\n\ntype VirtualMachineFlagInfoVirtualExecUsage string\n\nconst (\n\tVirtualMachineFlagInfoVirtualExecUsageHvAuto = VirtualMachineFlagInfoVirtualExecUsage(\"hvAuto\")\n\tVirtualMachineFlagInfoVirtualExecUsageHvOn   = VirtualMachineFlagInfoVirtualExecUsage(\"hvOn\")\n\tVirtualMachineFlagInfoVirtualExecUsageHvOff  = VirtualMachineFlagInfoVirtualExecUsage(\"hvOff\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineFlagInfoVirtualExecUsage\"] = reflect.TypeOf((*VirtualMachineFlagInfoVirtualExecUsage)(nil)).Elem()\n}\n\ntype VirtualMachineFlagInfoVirtualMmuUsage string\n\nconst (\n\tVirtualMachineFlagInfoVirtualMmuUsageAutomatic = VirtualMachineFlagInfoVirtualMmuUsage(\"automatic\")\n\tVirtualMachineFlagInfoVirtualMmuUsageOn        = VirtualMachineFlagInfoVirtualMmuUsage(\"on\")\n\tVirtualMachineFlagInfoVirtualMmuUsageOff       = VirtualMachineFlagInfoVirtualMmuUsage(\"off\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineFlagInfoVirtualMmuUsage\"] = reflect.TypeOf((*VirtualMachineFlagInfoVirtualMmuUsage)(nil)).Elem()\n}\n\ntype VirtualMachineForkConfigInfoChildType string\n\nconst (\n\tVirtualMachineForkConfigInfoChildTypeNone          = VirtualMachineForkConfigInfoChildType(\"none\")\n\tVirtualMachineForkConfigInfoChildTypePersistent    = VirtualMachineForkConfigInfoChildType(\"persistent\")\n\tVirtualMachineForkConfigInfoChildTypeNonpersistent = VirtualMachineForkConfigInfoChildType(\"nonpersistent\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineForkConfigInfoChildType\"] = reflect.TypeOf((*VirtualMachineForkConfigInfoChildType)(nil)).Elem()\n}\n\ntype VirtualMachineGuestOsFamily string\n\nconst (\n\tVirtualMachineGuestOsFamilyWindowsGuest      = VirtualMachineGuestOsFamily(\"windowsGuest\")\n\tVirtualMachineGuestOsFamilyLinuxGuest        = VirtualMachineGuestOsFamily(\"linuxGuest\")\n\tVirtualMachineGuestOsFamilyNetwareGuest      = VirtualMachineGuestOsFamily(\"netwareGuest\")\n\tVirtualMachineGuestOsFamilySolarisGuest      = VirtualMachineGuestOsFamily(\"solarisGuest\")\n\tVirtualMachineGuestOsFamilyDarwinGuestFamily = VirtualMachineGuestOsFamily(\"darwinGuestFamily\")\n\tVirtualMachineGuestOsFamilyOtherGuestFamily  = VirtualMachineGuestOsFamily(\"otherGuestFamily\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineGuestOsFamily\"] = reflect.TypeOf((*VirtualMachineGuestOsFamily)(nil)).Elem()\n}\n\ntype VirtualMachineGuestOsIdentifier string\n\nconst (\n\tVirtualMachineGuestOsIdentifierDosGuest                = VirtualMachineGuestOsIdentifier(\"dosGuest\")\n\tVirtualMachineGuestOsIdentifierWin31Guest              = VirtualMachineGuestOsIdentifier(\"win31Guest\")\n\tVirtualMachineGuestOsIdentifierWin95Guest              = VirtualMachineGuestOsIdentifier(\"win95Guest\")\n\tVirtualMachineGuestOsIdentifierWin98Guest              = VirtualMachineGuestOsIdentifier(\"win98Guest\")\n\tVirtualMachineGuestOsIdentifierWinMeGuest              = VirtualMachineGuestOsIdentifier(\"winMeGuest\")\n\tVirtualMachineGuestOsIdentifierWinNTGuest              = VirtualMachineGuestOsIdentifier(\"winNTGuest\")\n\tVirtualMachineGuestOsIdentifierWin2000ProGuest         = VirtualMachineGuestOsIdentifier(\"win2000ProGuest\")\n\tVirtualMachineGuestOsIdentifierWin2000ServGuest        = VirtualMachineGuestOsIdentifier(\"win2000ServGuest\")\n\tVirtualMachineGuestOsIdentifierWin2000AdvServGuest     = VirtualMachineGuestOsIdentifier(\"win2000AdvServGuest\")\n\tVirtualMachineGuestOsIdentifierWinXPHomeGuest          = VirtualMachineGuestOsIdentifier(\"winXPHomeGuest\")\n\tVirtualMachineGuestOsIdentifierWinXPProGuest           = VirtualMachineGuestOsIdentifier(\"winXPProGuest\")\n\tVirtualMachineGuestOsIdentifierWinXPPro64Guest         = VirtualMachineGuestOsIdentifier(\"winXPPro64Guest\")\n\tVirtualMachineGuestOsIdentifierWinNetWebGuest          = VirtualMachineGuestOsIdentifier(\"winNetWebGuest\")\n\tVirtualMachineGuestOsIdentifierWinNetStandardGuest     = VirtualMachineGuestOsIdentifier(\"winNetStandardGuest\")\n\tVirtualMachineGuestOsIdentifierWinNetEnterpriseGuest   = VirtualMachineGuestOsIdentifier(\"winNetEnterpriseGuest\")\n\tVirtualMachineGuestOsIdentifierWinNetDatacenterGuest   = VirtualMachineGuestOsIdentifier(\"winNetDatacenterGuest\")\n\tVirtualMachineGuestOsIdentifierWinNetBusinessGuest     = VirtualMachineGuestOsIdentifier(\"winNetBusinessGuest\")\n\tVirtualMachineGuestOsIdentifierWinNetStandard64Guest   = VirtualMachineGuestOsIdentifier(\"winNetStandard64Guest\")\n\tVirtualMachineGuestOsIdentifierWinNetEnterprise64Guest = VirtualMachineGuestOsIdentifier(\"winNetEnterprise64Guest\")\n\tVirtualMachineGuestOsIdentifierWinLonghornGuest        = VirtualMachineGuestOsIdentifier(\"winLonghornGuest\")\n\tVirtualMachineGuestOsIdentifierWinLonghorn64Guest      = VirtualMachineGuestOsIdentifier(\"winLonghorn64Guest\")\n\tVirtualMachineGuestOsIdentifierWinNetDatacenter64Guest = VirtualMachineGuestOsIdentifier(\"winNetDatacenter64Guest\")\n\tVirtualMachineGuestOsIdentifierWinVistaGuest           = VirtualMachineGuestOsIdentifier(\"winVistaGuest\")\n\tVirtualMachineGuestOsIdentifierWinVista64Guest         = VirtualMachineGuestOsIdentifier(\"winVista64Guest\")\n\tVirtualMachineGuestOsIdentifierWindows7Guest           = VirtualMachineGuestOsIdentifier(\"windows7Guest\")\n\tVirtualMachineGuestOsIdentifierWindows7_64Guest        = VirtualMachineGuestOsIdentifier(\"windows7_64Guest\")\n\tVirtualMachineGuestOsIdentifierWindows7Server64Guest   = VirtualMachineGuestOsIdentifier(\"windows7Server64Guest\")\n\tVirtualMachineGuestOsIdentifierWindows8Guest           = VirtualMachineGuestOsIdentifier(\"windows8Guest\")\n\tVirtualMachineGuestOsIdentifierWindows8_64Guest        = VirtualMachineGuestOsIdentifier(\"windows8_64Guest\")\n\tVirtualMachineGuestOsIdentifierWindows8Server64Guest   = VirtualMachineGuestOsIdentifier(\"windows8Server64Guest\")\n\tVirtualMachineGuestOsIdentifierWindows9Guest           = VirtualMachineGuestOsIdentifier(\"windows9Guest\")\n\tVirtualMachineGuestOsIdentifierWindows9_64Guest        = VirtualMachineGuestOsIdentifier(\"windows9_64Guest\")\n\tVirtualMachineGuestOsIdentifierWindows9Server64Guest   = VirtualMachineGuestOsIdentifier(\"windows9Server64Guest\")\n\tVirtualMachineGuestOsIdentifierWindowsHyperVGuest      = VirtualMachineGuestOsIdentifier(\"windowsHyperVGuest\")\n\tVirtualMachineGuestOsIdentifierFreebsdGuest            = VirtualMachineGuestOsIdentifier(\"freebsdGuest\")\n\tVirtualMachineGuestOsIdentifierFreebsd64Guest          = VirtualMachineGuestOsIdentifier(\"freebsd64Guest\")\n\tVirtualMachineGuestOsIdentifierRedhatGuest             = VirtualMachineGuestOsIdentifier(\"redhatGuest\")\n\tVirtualMachineGuestOsIdentifierRhel2Guest              = VirtualMachineGuestOsIdentifier(\"rhel2Guest\")\n\tVirtualMachineGuestOsIdentifierRhel3Guest              = VirtualMachineGuestOsIdentifier(\"rhel3Guest\")\n\tVirtualMachineGuestOsIdentifierRhel3_64Guest           = VirtualMachineGuestOsIdentifier(\"rhel3_64Guest\")\n\tVirtualMachineGuestOsIdentifierRhel4Guest              = VirtualMachineGuestOsIdentifier(\"rhel4Guest\")\n\tVirtualMachineGuestOsIdentifierRhel4_64Guest           = VirtualMachineGuestOsIdentifier(\"rhel4_64Guest\")\n\tVirtualMachineGuestOsIdentifierRhel5Guest              = VirtualMachineGuestOsIdentifier(\"rhel5Guest\")\n\tVirtualMachineGuestOsIdentifierRhel5_64Guest           = VirtualMachineGuestOsIdentifier(\"rhel5_64Guest\")\n\tVirtualMachineGuestOsIdentifierRhel6Guest              = VirtualMachineGuestOsIdentifier(\"rhel6Guest\")\n\tVirtualMachineGuestOsIdentifierRhel6_64Guest           = VirtualMachineGuestOsIdentifier(\"rhel6_64Guest\")\n\tVirtualMachineGuestOsIdentifierRhel7Guest              = VirtualMachineGuestOsIdentifier(\"rhel7Guest\")\n\tVirtualMachineGuestOsIdentifierRhel7_64Guest           = VirtualMachineGuestOsIdentifier(\"rhel7_64Guest\")\n\tVirtualMachineGuestOsIdentifierCentosGuest             = VirtualMachineGuestOsIdentifier(\"centosGuest\")\n\tVirtualMachineGuestOsIdentifierCentos64Guest           = VirtualMachineGuestOsIdentifier(\"centos64Guest\")\n\tVirtualMachineGuestOsIdentifierCentos6Guest            = VirtualMachineGuestOsIdentifier(\"centos6Guest\")\n\tVirtualMachineGuestOsIdentifierCentos6_64Guest         = VirtualMachineGuestOsIdentifier(\"centos6_64Guest\")\n\tVirtualMachineGuestOsIdentifierCentos7Guest            = VirtualMachineGuestOsIdentifier(\"centos7Guest\")\n\tVirtualMachineGuestOsIdentifierCentos7_64Guest         = VirtualMachineGuestOsIdentifier(\"centos7_64Guest\")\n\tVirtualMachineGuestOsIdentifierOracleLinuxGuest        = VirtualMachineGuestOsIdentifier(\"oracleLinuxGuest\")\n\tVirtualMachineGuestOsIdentifierOracleLinux64Guest      = VirtualMachineGuestOsIdentifier(\"oracleLinux64Guest\")\n\tVirtualMachineGuestOsIdentifierOracleLinux6Guest       = VirtualMachineGuestOsIdentifier(\"oracleLinux6Guest\")\n\tVirtualMachineGuestOsIdentifierOracleLinux6_64Guest    = VirtualMachineGuestOsIdentifier(\"oracleLinux6_64Guest\")\n\tVirtualMachineGuestOsIdentifierOracleLinux7Guest       = VirtualMachineGuestOsIdentifier(\"oracleLinux7Guest\")\n\tVirtualMachineGuestOsIdentifierOracleLinux7_64Guest    = VirtualMachineGuestOsIdentifier(\"oracleLinux7_64Guest\")\n\tVirtualMachineGuestOsIdentifierSuseGuest               = VirtualMachineGuestOsIdentifier(\"suseGuest\")\n\tVirtualMachineGuestOsIdentifierSuse64Guest             = VirtualMachineGuestOsIdentifier(\"suse64Guest\")\n\tVirtualMachineGuestOsIdentifierSlesGuest               = VirtualMachineGuestOsIdentifier(\"slesGuest\")\n\tVirtualMachineGuestOsIdentifierSles64Guest             = VirtualMachineGuestOsIdentifier(\"sles64Guest\")\n\tVirtualMachineGuestOsIdentifierSles10Guest             = VirtualMachineGuestOsIdentifier(\"sles10Guest\")\n\tVirtualMachineGuestOsIdentifierSles10_64Guest          = VirtualMachineGuestOsIdentifier(\"sles10_64Guest\")\n\tVirtualMachineGuestOsIdentifierSles11Guest             = VirtualMachineGuestOsIdentifier(\"sles11Guest\")\n\tVirtualMachineGuestOsIdentifierSles11_64Guest          = VirtualMachineGuestOsIdentifier(\"sles11_64Guest\")\n\tVirtualMachineGuestOsIdentifierSles12Guest             = VirtualMachineGuestOsIdentifier(\"sles12Guest\")\n\tVirtualMachineGuestOsIdentifierSles12_64Guest          = VirtualMachineGuestOsIdentifier(\"sles12_64Guest\")\n\tVirtualMachineGuestOsIdentifierNld9Guest               = VirtualMachineGuestOsIdentifier(\"nld9Guest\")\n\tVirtualMachineGuestOsIdentifierOesGuest                = VirtualMachineGuestOsIdentifier(\"oesGuest\")\n\tVirtualMachineGuestOsIdentifierSjdsGuest               = VirtualMachineGuestOsIdentifier(\"sjdsGuest\")\n\tVirtualMachineGuestOsIdentifierMandrakeGuest           = VirtualMachineGuestOsIdentifier(\"mandrakeGuest\")\n\tVirtualMachineGuestOsIdentifierMandrivaGuest           = VirtualMachineGuestOsIdentifier(\"mandrivaGuest\")\n\tVirtualMachineGuestOsIdentifierMandriva64Guest         = VirtualMachineGuestOsIdentifier(\"mandriva64Guest\")\n\tVirtualMachineGuestOsIdentifierTurboLinuxGuest         = VirtualMachineGuestOsIdentifier(\"turboLinuxGuest\")\n\tVirtualMachineGuestOsIdentifierTurboLinux64Guest       = VirtualMachineGuestOsIdentifier(\"turboLinux64Guest\")\n\tVirtualMachineGuestOsIdentifierUbuntuGuest             = VirtualMachineGuestOsIdentifier(\"ubuntuGuest\")\n\tVirtualMachineGuestOsIdentifierUbuntu64Guest           = VirtualMachineGuestOsIdentifier(\"ubuntu64Guest\")\n\tVirtualMachineGuestOsIdentifierDebian4Guest            = VirtualMachineGuestOsIdentifier(\"debian4Guest\")\n\tVirtualMachineGuestOsIdentifierDebian4_64Guest         = VirtualMachineGuestOsIdentifier(\"debian4_64Guest\")\n\tVirtualMachineGuestOsIdentifierDebian5Guest            = VirtualMachineGuestOsIdentifier(\"debian5Guest\")\n\tVirtualMachineGuestOsIdentifierDebian5_64Guest         = VirtualMachineGuestOsIdentifier(\"debian5_64Guest\")\n\tVirtualMachineGuestOsIdentifierDebian6Guest            = VirtualMachineGuestOsIdentifier(\"debian6Guest\")\n\tVirtualMachineGuestOsIdentifierDebian6_64Guest         = VirtualMachineGuestOsIdentifier(\"debian6_64Guest\")\n\tVirtualMachineGuestOsIdentifierDebian7Guest            = VirtualMachineGuestOsIdentifier(\"debian7Guest\")\n\tVirtualMachineGuestOsIdentifierDebian7_64Guest         = VirtualMachineGuestOsIdentifier(\"debian7_64Guest\")\n\tVirtualMachineGuestOsIdentifierDebian8Guest            = VirtualMachineGuestOsIdentifier(\"debian8Guest\")\n\tVirtualMachineGuestOsIdentifierDebian8_64Guest         = VirtualMachineGuestOsIdentifier(\"debian8_64Guest\")\n\tVirtualMachineGuestOsIdentifierDebian9Guest            = VirtualMachineGuestOsIdentifier(\"debian9Guest\")\n\tVirtualMachineGuestOsIdentifierDebian9_64Guest         = VirtualMachineGuestOsIdentifier(\"debian9_64Guest\")\n\tVirtualMachineGuestOsIdentifierDebian10Guest           = VirtualMachineGuestOsIdentifier(\"debian10Guest\")\n\tVirtualMachineGuestOsIdentifierDebian10_64Guest        = VirtualMachineGuestOsIdentifier(\"debian10_64Guest\")\n\tVirtualMachineGuestOsIdentifierAsianux3Guest           = VirtualMachineGuestOsIdentifier(\"asianux3Guest\")\n\tVirtualMachineGuestOsIdentifierAsianux3_64Guest        = VirtualMachineGuestOsIdentifier(\"asianux3_64Guest\")\n\tVirtualMachineGuestOsIdentifierAsianux4Guest           = VirtualMachineGuestOsIdentifier(\"asianux4Guest\")\n\tVirtualMachineGuestOsIdentifierAsianux4_64Guest        = VirtualMachineGuestOsIdentifier(\"asianux4_64Guest\")\n\tVirtualMachineGuestOsIdentifierAsianux5_64Guest        = VirtualMachineGuestOsIdentifier(\"asianux5_64Guest\")\n\tVirtualMachineGuestOsIdentifierAsianux7_64Guest        = VirtualMachineGuestOsIdentifier(\"asianux7_64Guest\")\n\tVirtualMachineGuestOsIdentifierOpensuseGuest           = VirtualMachineGuestOsIdentifier(\"opensuseGuest\")\n\tVirtualMachineGuestOsIdentifierOpensuse64Guest         = VirtualMachineGuestOsIdentifier(\"opensuse64Guest\")\n\tVirtualMachineGuestOsIdentifierFedoraGuest             = VirtualMachineGuestOsIdentifier(\"fedoraGuest\")\n\tVirtualMachineGuestOsIdentifierFedora64Guest           = VirtualMachineGuestOsIdentifier(\"fedora64Guest\")\n\tVirtualMachineGuestOsIdentifierCoreos64Guest           = VirtualMachineGuestOsIdentifier(\"coreos64Guest\")\n\tVirtualMachineGuestOsIdentifierVmwarePhoton64Guest     = VirtualMachineGuestOsIdentifier(\"vmwarePhoton64Guest\")\n\tVirtualMachineGuestOsIdentifierOther24xLinuxGuest      = VirtualMachineGuestOsIdentifier(\"other24xLinuxGuest\")\n\tVirtualMachineGuestOsIdentifierOther26xLinuxGuest      = VirtualMachineGuestOsIdentifier(\"other26xLinuxGuest\")\n\tVirtualMachineGuestOsIdentifierOtherLinuxGuest         = VirtualMachineGuestOsIdentifier(\"otherLinuxGuest\")\n\tVirtualMachineGuestOsIdentifierOther3xLinuxGuest       = VirtualMachineGuestOsIdentifier(\"other3xLinuxGuest\")\n\tVirtualMachineGuestOsIdentifierGenericLinuxGuest       = VirtualMachineGuestOsIdentifier(\"genericLinuxGuest\")\n\tVirtualMachineGuestOsIdentifierOther24xLinux64Guest    = VirtualMachineGuestOsIdentifier(\"other24xLinux64Guest\")\n\tVirtualMachineGuestOsIdentifierOther26xLinux64Guest    = VirtualMachineGuestOsIdentifier(\"other26xLinux64Guest\")\n\tVirtualMachineGuestOsIdentifierOther3xLinux64Guest     = VirtualMachineGuestOsIdentifier(\"other3xLinux64Guest\")\n\tVirtualMachineGuestOsIdentifierOtherLinux64Guest       = VirtualMachineGuestOsIdentifier(\"otherLinux64Guest\")\n\tVirtualMachineGuestOsIdentifierSolaris6Guest           = VirtualMachineGuestOsIdentifier(\"solaris6Guest\")\n\tVirtualMachineGuestOsIdentifierSolaris7Guest           = VirtualMachineGuestOsIdentifier(\"solaris7Guest\")\n\tVirtualMachineGuestOsIdentifierSolaris8Guest           = VirtualMachineGuestOsIdentifier(\"solaris8Guest\")\n\tVirtualMachineGuestOsIdentifierSolaris9Guest           = VirtualMachineGuestOsIdentifier(\"solaris9Guest\")\n\tVirtualMachineGuestOsIdentifierSolaris10Guest          = VirtualMachineGuestOsIdentifier(\"solaris10Guest\")\n\tVirtualMachineGuestOsIdentifierSolaris10_64Guest       = VirtualMachineGuestOsIdentifier(\"solaris10_64Guest\")\n\tVirtualMachineGuestOsIdentifierSolaris11_64Guest       = VirtualMachineGuestOsIdentifier(\"solaris11_64Guest\")\n\tVirtualMachineGuestOsIdentifierOs2Guest                = VirtualMachineGuestOsIdentifier(\"os2Guest\")\n\tVirtualMachineGuestOsIdentifierEComStationGuest        = VirtualMachineGuestOsIdentifier(\"eComStationGuest\")\n\tVirtualMachineGuestOsIdentifierEComStation2Guest       = VirtualMachineGuestOsIdentifier(\"eComStation2Guest\")\n\tVirtualMachineGuestOsIdentifierNetware4Guest           = VirtualMachineGuestOsIdentifier(\"netware4Guest\")\n\tVirtualMachineGuestOsIdentifierNetware5Guest           = VirtualMachineGuestOsIdentifier(\"netware5Guest\")\n\tVirtualMachineGuestOsIdentifierNetware6Guest           = VirtualMachineGuestOsIdentifier(\"netware6Guest\")\n\tVirtualMachineGuestOsIdentifierOpenServer5Guest        = VirtualMachineGuestOsIdentifier(\"openServer5Guest\")\n\tVirtualMachineGuestOsIdentifierOpenServer6Guest        = VirtualMachineGuestOsIdentifier(\"openServer6Guest\")\n\tVirtualMachineGuestOsIdentifierUnixWare7Guest          = VirtualMachineGuestOsIdentifier(\"unixWare7Guest\")\n\tVirtualMachineGuestOsIdentifierDarwinGuest             = VirtualMachineGuestOsIdentifier(\"darwinGuest\")\n\tVirtualMachineGuestOsIdentifierDarwin64Guest           = VirtualMachineGuestOsIdentifier(\"darwin64Guest\")\n\tVirtualMachineGuestOsIdentifierDarwin10Guest           = VirtualMachineGuestOsIdentifier(\"darwin10Guest\")\n\tVirtualMachineGuestOsIdentifierDarwin10_64Guest        = VirtualMachineGuestOsIdentifier(\"darwin10_64Guest\")\n\tVirtualMachineGuestOsIdentifierDarwin11Guest           = VirtualMachineGuestOsIdentifier(\"darwin11Guest\")\n\tVirtualMachineGuestOsIdentifierDarwin11_64Guest        = VirtualMachineGuestOsIdentifier(\"darwin11_64Guest\")\n\tVirtualMachineGuestOsIdentifierDarwin12_64Guest        = VirtualMachineGuestOsIdentifier(\"darwin12_64Guest\")\n\tVirtualMachineGuestOsIdentifierDarwin13_64Guest        = VirtualMachineGuestOsIdentifier(\"darwin13_64Guest\")\n\tVirtualMachineGuestOsIdentifierDarwin14_64Guest        = VirtualMachineGuestOsIdentifier(\"darwin14_64Guest\")\n\tVirtualMachineGuestOsIdentifierDarwin15_64Guest        = VirtualMachineGuestOsIdentifier(\"darwin15_64Guest\")\n\tVirtualMachineGuestOsIdentifierDarwin16_64Guest        = VirtualMachineGuestOsIdentifier(\"darwin16_64Guest\")\n\tVirtualMachineGuestOsIdentifierVmkernelGuest           = VirtualMachineGuestOsIdentifier(\"vmkernelGuest\")\n\tVirtualMachineGuestOsIdentifierVmkernel5Guest          = VirtualMachineGuestOsIdentifier(\"vmkernel5Guest\")\n\tVirtualMachineGuestOsIdentifierVmkernel6Guest          = VirtualMachineGuestOsIdentifier(\"vmkernel6Guest\")\n\tVirtualMachineGuestOsIdentifierVmkernel65Guest         = VirtualMachineGuestOsIdentifier(\"vmkernel65Guest\")\n\tVirtualMachineGuestOsIdentifierOtherGuest              = VirtualMachineGuestOsIdentifier(\"otherGuest\")\n\tVirtualMachineGuestOsIdentifierOtherGuest64            = VirtualMachineGuestOsIdentifier(\"otherGuest64\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineGuestOsIdentifier\"] = reflect.TypeOf((*VirtualMachineGuestOsIdentifier)(nil)).Elem()\n}\n\ntype VirtualMachineGuestState string\n\nconst (\n\tVirtualMachineGuestStateRunning      = VirtualMachineGuestState(\"running\")\n\tVirtualMachineGuestStateShuttingDown = VirtualMachineGuestState(\"shuttingDown\")\n\tVirtualMachineGuestStateResetting    = VirtualMachineGuestState(\"resetting\")\n\tVirtualMachineGuestStateStandby      = VirtualMachineGuestState(\"standby\")\n\tVirtualMachineGuestStateNotRunning   = VirtualMachineGuestState(\"notRunning\")\n\tVirtualMachineGuestStateUnknown      = VirtualMachineGuestState(\"unknown\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineGuestState\"] = reflect.TypeOf((*VirtualMachineGuestState)(nil)).Elem()\n}\n\ntype VirtualMachineHtSharing string\n\nconst (\n\tVirtualMachineHtSharingAny      = VirtualMachineHtSharing(\"any\")\n\tVirtualMachineHtSharingNone     = VirtualMachineHtSharing(\"none\")\n\tVirtualMachineHtSharingInternal = VirtualMachineHtSharing(\"internal\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineHtSharing\"] = reflect.TypeOf((*VirtualMachineHtSharing)(nil)).Elem()\n}\n\ntype VirtualMachineMemoryAllocationPolicy string\n\nconst (\n\tVirtualMachineMemoryAllocationPolicySwapNone = VirtualMachineMemoryAllocationPolicy(\"swapNone\")\n\tVirtualMachineMemoryAllocationPolicySwapSome = VirtualMachineMemoryAllocationPolicy(\"swapSome\")\n\tVirtualMachineMemoryAllocationPolicySwapMost = VirtualMachineMemoryAllocationPolicy(\"swapMost\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineMemoryAllocationPolicy\"] = reflect.TypeOf((*VirtualMachineMemoryAllocationPolicy)(nil)).Elem()\n}\n\ntype VirtualMachineMetadataManagerVmMetadataOp string\n\nconst (\n\tVirtualMachineMetadataManagerVmMetadataOpUpdate = VirtualMachineMetadataManagerVmMetadataOp(\"Update\")\n\tVirtualMachineMetadataManagerVmMetadataOpRemove = VirtualMachineMetadataManagerVmMetadataOp(\"Remove\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineMetadataManagerVmMetadataOp\"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataOp)(nil)).Elem()\n}\n\ntype VirtualMachineMetadataManagerVmMetadataOwnerOwner string\n\nconst (\n\tVirtualMachineMetadataManagerVmMetadataOwnerOwnerComVmwareVsphereHA = VirtualMachineMetadataManagerVmMetadataOwnerOwner(\"ComVmwareVsphereHA\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineMetadataManagerVmMetadataOwnerOwner\"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataOwnerOwner)(nil)).Elem()\n}\n\ntype VirtualMachineMovePriority string\n\nconst (\n\tVirtualMachineMovePriorityLowPriority     = VirtualMachineMovePriority(\"lowPriority\")\n\tVirtualMachineMovePriorityHighPriority    = VirtualMachineMovePriority(\"highPriority\")\n\tVirtualMachineMovePriorityDefaultPriority = VirtualMachineMovePriority(\"defaultPriority\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineMovePriority\"] = reflect.TypeOf((*VirtualMachineMovePriority)(nil)).Elem()\n}\n\ntype VirtualMachineNeedSecondaryReason string\n\nconst (\n\tVirtualMachineNeedSecondaryReasonInitializing           = VirtualMachineNeedSecondaryReason(\"initializing\")\n\tVirtualMachineNeedSecondaryReasonDivergence             = VirtualMachineNeedSecondaryReason(\"divergence\")\n\tVirtualMachineNeedSecondaryReasonLostConnection         = VirtualMachineNeedSecondaryReason(\"lostConnection\")\n\tVirtualMachineNeedSecondaryReasonPartialHardwareFailure = VirtualMachineNeedSecondaryReason(\"partialHardwareFailure\")\n\tVirtualMachineNeedSecondaryReasonUserAction             = VirtualMachineNeedSecondaryReason(\"userAction\")\n\tVirtualMachineNeedSecondaryReasonCheckpointError        = VirtualMachineNeedSecondaryReason(\"checkpointError\")\n\tVirtualMachineNeedSecondaryReasonOther                  = VirtualMachineNeedSecondaryReason(\"other\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineNeedSecondaryReason\"] = reflect.TypeOf((*VirtualMachineNeedSecondaryReason)(nil)).Elem()\n}\n\ntype VirtualMachinePowerOffBehavior string\n\nconst (\n\tVirtualMachinePowerOffBehaviorPowerOff = VirtualMachinePowerOffBehavior(\"powerOff\")\n\tVirtualMachinePowerOffBehaviorRevert   = VirtualMachinePowerOffBehavior(\"revert\")\n\tVirtualMachinePowerOffBehaviorPrompt   = VirtualMachinePowerOffBehavior(\"prompt\")\n\tVirtualMachinePowerOffBehaviorTake     = VirtualMachinePowerOffBehavior(\"take\")\n)\n\nfunc init() {\n\tt[\"VirtualMachinePowerOffBehavior\"] = reflect.TypeOf((*VirtualMachinePowerOffBehavior)(nil)).Elem()\n}\n\ntype VirtualMachinePowerOpType string\n\nconst (\n\tVirtualMachinePowerOpTypeSoft   = VirtualMachinePowerOpType(\"soft\")\n\tVirtualMachinePowerOpTypeHard   = VirtualMachinePowerOpType(\"hard\")\n\tVirtualMachinePowerOpTypePreset = VirtualMachinePowerOpType(\"preset\")\n)\n\nfunc init() {\n\tt[\"VirtualMachinePowerOpType\"] = reflect.TypeOf((*VirtualMachinePowerOpType)(nil)).Elem()\n}\n\ntype VirtualMachinePowerState string\n\nconst (\n\tVirtualMachinePowerStatePoweredOff = VirtualMachinePowerState(\"poweredOff\")\n\tVirtualMachinePowerStatePoweredOn  = VirtualMachinePowerState(\"poweredOn\")\n\tVirtualMachinePowerStateSuspended  = VirtualMachinePowerState(\"suspended\")\n)\n\nfunc init() {\n\tt[\"VirtualMachinePowerState\"] = reflect.TypeOf((*VirtualMachinePowerState)(nil)).Elem()\n}\n\ntype VirtualMachineRecordReplayState string\n\nconst (\n\tVirtualMachineRecordReplayStateRecording = VirtualMachineRecordReplayState(\"recording\")\n\tVirtualMachineRecordReplayStateReplaying = VirtualMachineRecordReplayState(\"replaying\")\n\tVirtualMachineRecordReplayStateInactive  = VirtualMachineRecordReplayState(\"inactive\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineRecordReplayState\"] = reflect.TypeOf((*VirtualMachineRecordReplayState)(nil)).Elem()\n}\n\ntype VirtualMachineRelocateDiskMoveOptions string\n\nconst (\n\tVirtualMachineRelocateDiskMoveOptionsMoveAllDiskBackingsAndAllowSharing    = VirtualMachineRelocateDiskMoveOptions(\"moveAllDiskBackingsAndAllowSharing\")\n\tVirtualMachineRelocateDiskMoveOptionsMoveAllDiskBackingsAndDisallowSharing = VirtualMachineRelocateDiskMoveOptions(\"moveAllDiskBackingsAndDisallowSharing\")\n\tVirtualMachineRelocateDiskMoveOptionsMoveChildMostDiskBacking              = VirtualMachineRelocateDiskMoveOptions(\"moveChildMostDiskBacking\")\n\tVirtualMachineRelocateDiskMoveOptionsCreateNewChildDiskBacking             = VirtualMachineRelocateDiskMoveOptions(\"createNewChildDiskBacking\")\n\tVirtualMachineRelocateDiskMoveOptionsMoveAllDiskBackingsAndConsolidate     = VirtualMachineRelocateDiskMoveOptions(\"moveAllDiskBackingsAndConsolidate\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineRelocateDiskMoveOptions\"] = reflect.TypeOf((*VirtualMachineRelocateDiskMoveOptions)(nil)).Elem()\n}\n\ntype VirtualMachineRelocateTransformation string\n\nconst (\n\tVirtualMachineRelocateTransformationFlat   = VirtualMachineRelocateTransformation(\"flat\")\n\tVirtualMachineRelocateTransformationSparse = VirtualMachineRelocateTransformation(\"sparse\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineRelocateTransformation\"] = reflect.TypeOf((*VirtualMachineRelocateTransformation)(nil)).Elem()\n}\n\ntype VirtualMachineScsiPassthroughType string\n\nconst (\n\tVirtualMachineScsiPassthroughTypeDisk      = VirtualMachineScsiPassthroughType(\"disk\")\n\tVirtualMachineScsiPassthroughTypeTape      = VirtualMachineScsiPassthroughType(\"tape\")\n\tVirtualMachineScsiPassthroughTypePrinter   = VirtualMachineScsiPassthroughType(\"printer\")\n\tVirtualMachineScsiPassthroughTypeProcessor = VirtualMachineScsiPassthroughType(\"processor\")\n\tVirtualMachineScsiPassthroughTypeWorm      = VirtualMachineScsiPassthroughType(\"worm\")\n\tVirtualMachineScsiPassthroughTypeCdrom     = VirtualMachineScsiPassthroughType(\"cdrom\")\n\tVirtualMachineScsiPassthroughTypeScanner   = VirtualMachineScsiPassthroughType(\"scanner\")\n\tVirtualMachineScsiPassthroughTypeOptical   = VirtualMachineScsiPassthroughType(\"optical\")\n\tVirtualMachineScsiPassthroughTypeMedia     = VirtualMachineScsiPassthroughType(\"media\")\n\tVirtualMachineScsiPassthroughTypeCom       = VirtualMachineScsiPassthroughType(\"com\")\n\tVirtualMachineScsiPassthroughTypeRaid      = VirtualMachineScsiPassthroughType(\"raid\")\n\tVirtualMachineScsiPassthroughTypeUnknown   = VirtualMachineScsiPassthroughType(\"unknown\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineScsiPassthroughType\"] = reflect.TypeOf((*VirtualMachineScsiPassthroughType)(nil)).Elem()\n}\n\ntype VirtualMachineStandbyActionType string\n\nconst (\n\tVirtualMachineStandbyActionTypeCheckpoint     = VirtualMachineStandbyActionType(\"checkpoint\")\n\tVirtualMachineStandbyActionTypePowerOnSuspend = VirtualMachineStandbyActionType(\"powerOnSuspend\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineStandbyActionType\"] = reflect.TypeOf((*VirtualMachineStandbyActionType)(nil)).Elem()\n}\n\ntype VirtualMachineTargetInfoConfigurationTag string\n\nconst (\n\tVirtualMachineTargetInfoConfigurationTagCompliant   = VirtualMachineTargetInfoConfigurationTag(\"compliant\")\n\tVirtualMachineTargetInfoConfigurationTagClusterWide = VirtualMachineTargetInfoConfigurationTag(\"clusterWide\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineTargetInfoConfigurationTag\"] = reflect.TypeOf((*VirtualMachineTargetInfoConfigurationTag)(nil)).Elem()\n}\n\ntype VirtualMachineTicketType string\n\nconst (\n\tVirtualMachineTicketTypeMks          = VirtualMachineTicketType(\"mks\")\n\tVirtualMachineTicketTypeDevice       = VirtualMachineTicketType(\"device\")\n\tVirtualMachineTicketTypeGuestControl = VirtualMachineTicketType(\"guestControl\")\n\tVirtualMachineTicketTypeWebmks       = VirtualMachineTicketType(\"webmks\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineTicketType\"] = reflect.TypeOf((*VirtualMachineTicketType)(nil)).Elem()\n}\n\ntype VirtualMachineToolsInstallType string\n\nconst (\n\tVirtualMachineToolsInstallTypeGuestToolsTypeUnknown     = VirtualMachineToolsInstallType(\"guestToolsTypeUnknown\")\n\tVirtualMachineToolsInstallTypeGuestToolsTypeMSI         = VirtualMachineToolsInstallType(\"guestToolsTypeMSI\")\n\tVirtualMachineToolsInstallTypeGuestToolsTypeTar         = VirtualMachineToolsInstallType(\"guestToolsTypeTar\")\n\tVirtualMachineToolsInstallTypeGuestToolsTypeOSP         = VirtualMachineToolsInstallType(\"guestToolsTypeOSP\")\n\tVirtualMachineToolsInstallTypeGuestToolsTypeOpenVMTools = VirtualMachineToolsInstallType(\"guestToolsTypeOpenVMTools\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineToolsInstallType\"] = reflect.TypeOf((*VirtualMachineToolsInstallType)(nil)).Elem()\n}\n\ntype VirtualMachineToolsRunningStatus string\n\nconst (\n\tVirtualMachineToolsRunningStatusGuestToolsNotRunning       = VirtualMachineToolsRunningStatus(\"guestToolsNotRunning\")\n\tVirtualMachineToolsRunningStatusGuestToolsRunning          = VirtualMachineToolsRunningStatus(\"guestToolsRunning\")\n\tVirtualMachineToolsRunningStatusGuestToolsExecutingScripts = VirtualMachineToolsRunningStatus(\"guestToolsExecutingScripts\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineToolsRunningStatus\"] = reflect.TypeOf((*VirtualMachineToolsRunningStatus)(nil)).Elem()\n}\n\ntype VirtualMachineToolsStatus string\n\nconst (\n\tVirtualMachineToolsStatusToolsNotInstalled = VirtualMachineToolsStatus(\"toolsNotInstalled\")\n\tVirtualMachineToolsStatusToolsNotRunning   = VirtualMachineToolsStatus(\"toolsNotRunning\")\n\tVirtualMachineToolsStatusToolsOld          = VirtualMachineToolsStatus(\"toolsOld\")\n\tVirtualMachineToolsStatusToolsOk           = VirtualMachineToolsStatus(\"toolsOk\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineToolsStatus\"] = reflect.TypeOf((*VirtualMachineToolsStatus)(nil)).Elem()\n}\n\ntype VirtualMachineToolsVersionStatus string\n\nconst (\n\tVirtualMachineToolsVersionStatusGuestToolsNotInstalled = VirtualMachineToolsVersionStatus(\"guestToolsNotInstalled\")\n\tVirtualMachineToolsVersionStatusGuestToolsNeedUpgrade  = VirtualMachineToolsVersionStatus(\"guestToolsNeedUpgrade\")\n\tVirtualMachineToolsVersionStatusGuestToolsCurrent      = VirtualMachineToolsVersionStatus(\"guestToolsCurrent\")\n\tVirtualMachineToolsVersionStatusGuestToolsUnmanaged    = VirtualMachineToolsVersionStatus(\"guestToolsUnmanaged\")\n\tVirtualMachineToolsVersionStatusGuestToolsTooOld       = VirtualMachineToolsVersionStatus(\"guestToolsTooOld\")\n\tVirtualMachineToolsVersionStatusGuestToolsSupportedOld = VirtualMachineToolsVersionStatus(\"guestToolsSupportedOld\")\n\tVirtualMachineToolsVersionStatusGuestToolsSupportedNew = VirtualMachineToolsVersionStatus(\"guestToolsSupportedNew\")\n\tVirtualMachineToolsVersionStatusGuestToolsTooNew       = VirtualMachineToolsVersionStatus(\"guestToolsTooNew\")\n\tVirtualMachineToolsVersionStatusGuestToolsBlacklisted  = VirtualMachineToolsVersionStatus(\"guestToolsBlacklisted\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineToolsVersionStatus\"] = reflect.TypeOf((*VirtualMachineToolsVersionStatus)(nil)).Elem()\n}\n\ntype VirtualMachineUsbInfoFamily string\n\nconst (\n\tVirtualMachineUsbInfoFamilyAudio           = VirtualMachineUsbInfoFamily(\"audio\")\n\tVirtualMachineUsbInfoFamilyHid             = VirtualMachineUsbInfoFamily(\"hid\")\n\tVirtualMachineUsbInfoFamilyHid_bootable    = VirtualMachineUsbInfoFamily(\"hid_bootable\")\n\tVirtualMachineUsbInfoFamilyPhysical        = VirtualMachineUsbInfoFamily(\"physical\")\n\tVirtualMachineUsbInfoFamilyCommunication   = VirtualMachineUsbInfoFamily(\"communication\")\n\tVirtualMachineUsbInfoFamilyImaging         = VirtualMachineUsbInfoFamily(\"imaging\")\n\tVirtualMachineUsbInfoFamilyPrinter         = VirtualMachineUsbInfoFamily(\"printer\")\n\tVirtualMachineUsbInfoFamilyStorage         = VirtualMachineUsbInfoFamily(\"storage\")\n\tVirtualMachineUsbInfoFamilyHub             = VirtualMachineUsbInfoFamily(\"hub\")\n\tVirtualMachineUsbInfoFamilySmart_card      = VirtualMachineUsbInfoFamily(\"smart_card\")\n\tVirtualMachineUsbInfoFamilySecurity        = VirtualMachineUsbInfoFamily(\"security\")\n\tVirtualMachineUsbInfoFamilyVideo           = VirtualMachineUsbInfoFamily(\"video\")\n\tVirtualMachineUsbInfoFamilyWireless        = VirtualMachineUsbInfoFamily(\"wireless\")\n\tVirtualMachineUsbInfoFamilyBluetooth       = VirtualMachineUsbInfoFamily(\"bluetooth\")\n\tVirtualMachineUsbInfoFamilyWusb            = VirtualMachineUsbInfoFamily(\"wusb\")\n\tVirtualMachineUsbInfoFamilyPda             = VirtualMachineUsbInfoFamily(\"pda\")\n\tVirtualMachineUsbInfoFamilyVendor_specific = VirtualMachineUsbInfoFamily(\"vendor_specific\")\n\tVirtualMachineUsbInfoFamilyOther           = VirtualMachineUsbInfoFamily(\"other\")\n\tVirtualMachineUsbInfoFamilyUnknownFamily   = VirtualMachineUsbInfoFamily(\"unknownFamily\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineUsbInfoFamily\"] = reflect.TypeOf((*VirtualMachineUsbInfoFamily)(nil)).Elem()\n}\n\ntype VirtualMachineUsbInfoSpeed string\n\nconst (\n\tVirtualMachineUsbInfoSpeedLow          = VirtualMachineUsbInfoSpeed(\"low\")\n\tVirtualMachineUsbInfoSpeedFull         = VirtualMachineUsbInfoSpeed(\"full\")\n\tVirtualMachineUsbInfoSpeedHigh         = VirtualMachineUsbInfoSpeed(\"high\")\n\tVirtualMachineUsbInfoSpeedSuperSpeed   = VirtualMachineUsbInfoSpeed(\"superSpeed\")\n\tVirtualMachineUsbInfoSpeedUnknownSpeed = VirtualMachineUsbInfoSpeed(\"unknownSpeed\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineUsbInfoSpeed\"] = reflect.TypeOf((*VirtualMachineUsbInfoSpeed)(nil)).Elem()\n}\n\ntype VirtualMachineVMCIDeviceAction string\n\nconst (\n\tVirtualMachineVMCIDeviceActionAllow = VirtualMachineVMCIDeviceAction(\"allow\")\n\tVirtualMachineVMCIDeviceActionDeny  = VirtualMachineVMCIDeviceAction(\"deny\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineVMCIDeviceAction\"] = reflect.TypeOf((*VirtualMachineVMCIDeviceAction)(nil)).Elem()\n}\n\ntype VirtualMachineVMCIDeviceDirection string\n\nconst (\n\tVirtualMachineVMCIDeviceDirectionGuest        = VirtualMachineVMCIDeviceDirection(\"guest\")\n\tVirtualMachineVMCIDeviceDirectionHost         = VirtualMachineVMCIDeviceDirection(\"host\")\n\tVirtualMachineVMCIDeviceDirectionAnyDirection = VirtualMachineVMCIDeviceDirection(\"anyDirection\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineVMCIDeviceDirection\"] = reflect.TypeOf((*VirtualMachineVMCIDeviceDirection)(nil)).Elem()\n}\n\ntype VirtualMachineVMCIDeviceProtocol string\n\nconst (\n\tVirtualMachineVMCIDeviceProtocolHypervisor  = VirtualMachineVMCIDeviceProtocol(\"hypervisor\")\n\tVirtualMachineVMCIDeviceProtocolDoorbell    = VirtualMachineVMCIDeviceProtocol(\"doorbell\")\n\tVirtualMachineVMCIDeviceProtocolQueuepair   = VirtualMachineVMCIDeviceProtocol(\"queuepair\")\n\tVirtualMachineVMCIDeviceProtocolDatagram    = VirtualMachineVMCIDeviceProtocol(\"datagram\")\n\tVirtualMachineVMCIDeviceProtocolStream      = VirtualMachineVMCIDeviceProtocol(\"stream\")\n\tVirtualMachineVMCIDeviceProtocolAnyProtocol = VirtualMachineVMCIDeviceProtocol(\"anyProtocol\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineVMCIDeviceProtocol\"] = reflect.TypeOf((*VirtualMachineVMCIDeviceProtocol)(nil)).Elem()\n}\n\ntype VirtualMachineVideoCardUse3dRenderer string\n\nconst (\n\tVirtualMachineVideoCardUse3dRendererAutomatic = VirtualMachineVideoCardUse3dRenderer(\"automatic\")\n\tVirtualMachineVideoCardUse3dRendererSoftware  = VirtualMachineVideoCardUse3dRenderer(\"software\")\n\tVirtualMachineVideoCardUse3dRendererHardware  = VirtualMachineVideoCardUse3dRenderer(\"hardware\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineVideoCardUse3dRenderer\"] = reflect.TypeOf((*VirtualMachineVideoCardUse3dRenderer)(nil)).Elem()\n}\n\ntype VirtualMachineWindowsQuiesceSpecVssBackupContext string\n\nconst (\n\tVirtualMachineWindowsQuiesceSpecVssBackupContextCtx_auto              = VirtualMachineWindowsQuiesceSpecVssBackupContext(\"ctx_auto\")\n\tVirtualMachineWindowsQuiesceSpecVssBackupContextCtx_backup            = VirtualMachineWindowsQuiesceSpecVssBackupContext(\"ctx_backup\")\n\tVirtualMachineWindowsQuiesceSpecVssBackupContextCtx_file_share_backup = VirtualMachineWindowsQuiesceSpecVssBackupContext(\"ctx_file_share_backup\")\n)\n\nfunc init() {\n\tt[\"VirtualMachineWindowsQuiesceSpecVssBackupContext\"] = reflect.TypeOf((*VirtualMachineWindowsQuiesceSpecVssBackupContext)(nil)).Elem()\n}\n\ntype VirtualPointingDeviceHostChoice string\n\nconst (\n\tVirtualPointingDeviceHostChoiceAutodetect           = VirtualPointingDeviceHostChoice(\"autodetect\")\n\tVirtualPointingDeviceHostChoiceIntellimouseExplorer = VirtualPointingDeviceHostChoice(\"intellimouseExplorer\")\n\tVirtualPointingDeviceHostChoiceIntellimousePs2      = VirtualPointingDeviceHostChoice(\"intellimousePs2\")\n\tVirtualPointingDeviceHostChoiceLogitechMouseman     = VirtualPointingDeviceHostChoice(\"logitechMouseman\")\n\tVirtualPointingDeviceHostChoiceMicrosoft_serial     = VirtualPointingDeviceHostChoice(\"microsoft_serial\")\n\tVirtualPointingDeviceHostChoiceMouseSystems         = VirtualPointingDeviceHostChoice(\"mouseSystems\")\n\tVirtualPointingDeviceHostChoiceMousemanSerial       = VirtualPointingDeviceHostChoice(\"mousemanSerial\")\n\tVirtualPointingDeviceHostChoicePs2                  = VirtualPointingDeviceHostChoice(\"ps2\")\n)\n\nfunc init() {\n\tt[\"VirtualPointingDeviceHostChoice\"] = reflect.TypeOf((*VirtualPointingDeviceHostChoice)(nil)).Elem()\n}\n\ntype VirtualSCSISharing string\n\nconst (\n\tVirtualSCSISharingNoSharing       = VirtualSCSISharing(\"noSharing\")\n\tVirtualSCSISharingVirtualSharing  = VirtualSCSISharing(\"virtualSharing\")\n\tVirtualSCSISharingPhysicalSharing = VirtualSCSISharing(\"physicalSharing\")\n)\n\nfunc init() {\n\tt[\"VirtualSCSISharing\"] = reflect.TypeOf((*VirtualSCSISharing)(nil)).Elem()\n}\n\ntype VirtualSerialPortEndPoint string\n\nconst (\n\tVirtualSerialPortEndPointClient = VirtualSerialPortEndPoint(\"client\")\n\tVirtualSerialPortEndPointServer = VirtualSerialPortEndPoint(\"server\")\n)\n\nfunc init() {\n\tt[\"VirtualSerialPortEndPoint\"] = reflect.TypeOf((*VirtualSerialPortEndPoint)(nil)).Elem()\n}\n\ntype VmDasBeingResetEventReasonCode string\n\nconst (\n\tVmDasBeingResetEventReasonCodeVmtoolsHeartbeatFailure  = VmDasBeingResetEventReasonCode(\"vmtoolsHeartbeatFailure\")\n\tVmDasBeingResetEventReasonCodeAppHeartbeatFailure      = VmDasBeingResetEventReasonCode(\"appHeartbeatFailure\")\n\tVmDasBeingResetEventReasonCodeAppImmediateResetRequest = VmDasBeingResetEventReasonCode(\"appImmediateResetRequest\")\n\tVmDasBeingResetEventReasonCodeVmcpResetApdCleared      = VmDasBeingResetEventReasonCode(\"vmcpResetApdCleared\")\n)\n\nfunc init() {\n\tt[\"VmDasBeingResetEventReasonCode\"] = reflect.TypeOf((*VmDasBeingResetEventReasonCode)(nil)).Elem()\n}\n\ntype VmFailedStartingSecondaryEventFailureReason string\n\nconst (\n\tVmFailedStartingSecondaryEventFailureReasonIncompatibleHost = VmFailedStartingSecondaryEventFailureReason(\"incompatibleHost\")\n\tVmFailedStartingSecondaryEventFailureReasonLoginFailed      = VmFailedStartingSecondaryEventFailureReason(\"loginFailed\")\n\tVmFailedStartingSecondaryEventFailureReasonRegisterVmFailed = VmFailedStartingSecondaryEventFailureReason(\"registerVmFailed\")\n\tVmFailedStartingSecondaryEventFailureReasonMigrateFailed    = VmFailedStartingSecondaryEventFailureReason(\"migrateFailed\")\n)\n\nfunc init() {\n\tt[\"VmFailedStartingSecondaryEventFailureReason\"] = reflect.TypeOf((*VmFailedStartingSecondaryEventFailureReason)(nil)).Elem()\n}\n\ntype VmFaultToleranceConfigIssueReasonForIssue string\n\nconst (\n\tVmFaultToleranceConfigIssueReasonForIssueHaNotEnabled                   = VmFaultToleranceConfigIssueReasonForIssue(\"haNotEnabled\")\n\tVmFaultToleranceConfigIssueReasonForIssueMoreThanOneSecondary           = VmFaultToleranceConfigIssueReasonForIssue(\"moreThanOneSecondary\")\n\tVmFaultToleranceConfigIssueReasonForIssueRecordReplayNotSupported       = VmFaultToleranceConfigIssueReasonForIssue(\"recordReplayNotSupported\")\n\tVmFaultToleranceConfigIssueReasonForIssueReplayNotSupported             = VmFaultToleranceConfigIssueReasonForIssue(\"replayNotSupported\")\n\tVmFaultToleranceConfigIssueReasonForIssueTemplateVm                     = VmFaultToleranceConfigIssueReasonForIssue(\"templateVm\")\n\tVmFaultToleranceConfigIssueReasonForIssueMultipleVCPU                   = VmFaultToleranceConfigIssueReasonForIssue(\"multipleVCPU\")\n\tVmFaultToleranceConfigIssueReasonForIssueHostInactive                   = VmFaultToleranceConfigIssueReasonForIssue(\"hostInactive\")\n\tVmFaultToleranceConfigIssueReasonForIssueFtUnsupportedHardware          = VmFaultToleranceConfigIssueReasonForIssue(\"ftUnsupportedHardware\")\n\tVmFaultToleranceConfigIssueReasonForIssueFtUnsupportedProduct           = VmFaultToleranceConfigIssueReasonForIssue(\"ftUnsupportedProduct\")\n\tVmFaultToleranceConfigIssueReasonForIssueMissingVMotionNic              = VmFaultToleranceConfigIssueReasonForIssue(\"missingVMotionNic\")\n\tVmFaultToleranceConfigIssueReasonForIssueMissingFTLoggingNic            = VmFaultToleranceConfigIssueReasonForIssue(\"missingFTLoggingNic\")\n\tVmFaultToleranceConfigIssueReasonForIssueThinDisk                       = VmFaultToleranceConfigIssueReasonForIssue(\"thinDisk\")\n\tVmFaultToleranceConfigIssueReasonForIssueVerifySSLCertificateFlagNotSet = VmFaultToleranceConfigIssueReasonForIssue(\"verifySSLCertificateFlagNotSet\")\n\tVmFaultToleranceConfigIssueReasonForIssueHasSnapshots                   = VmFaultToleranceConfigIssueReasonForIssue(\"hasSnapshots\")\n\tVmFaultToleranceConfigIssueReasonForIssueNoConfig                       = VmFaultToleranceConfigIssueReasonForIssue(\"noConfig\")\n\tVmFaultToleranceConfigIssueReasonForIssueFtSecondaryVm                  = VmFaultToleranceConfigIssueReasonForIssue(\"ftSecondaryVm\")\n\tVmFaultToleranceConfigIssueReasonForIssueHasLocalDisk                   = VmFaultToleranceConfigIssueReasonForIssue(\"hasLocalDisk\")\n\tVmFaultToleranceConfigIssueReasonForIssueEsxAgentVm                     = VmFaultToleranceConfigIssueReasonForIssue(\"esxAgentVm\")\n\tVmFaultToleranceConfigIssueReasonForIssueVideo3dEnabled                 = VmFaultToleranceConfigIssueReasonForIssue(\"video3dEnabled\")\n\tVmFaultToleranceConfigIssueReasonForIssueHasUnsupportedDisk             = VmFaultToleranceConfigIssueReasonForIssue(\"hasUnsupportedDisk\")\n\tVmFaultToleranceConfigIssueReasonForIssueInsufficientBandwidth          = VmFaultToleranceConfigIssueReasonForIssue(\"insufficientBandwidth\")\n\tVmFaultToleranceConfigIssueReasonForIssueHasNestedHVConfiguration       = VmFaultToleranceConfigIssueReasonForIssue(\"hasNestedHVConfiguration\")\n\tVmFaultToleranceConfigIssueReasonForIssueHasVFlashConfiguration         = VmFaultToleranceConfigIssueReasonForIssue(\"hasVFlashConfiguration\")\n\tVmFaultToleranceConfigIssueReasonForIssueUnsupportedProduct             = VmFaultToleranceConfigIssueReasonForIssue(\"unsupportedProduct\")\n\tVmFaultToleranceConfigIssueReasonForIssueCpuHvUnsupported               = VmFaultToleranceConfigIssueReasonForIssue(\"cpuHvUnsupported\")\n\tVmFaultToleranceConfigIssueReasonForIssueCpuHwmmuUnsupported            = VmFaultToleranceConfigIssueReasonForIssue(\"cpuHwmmuUnsupported\")\n\tVmFaultToleranceConfigIssueReasonForIssueCpuHvDisabled                  = VmFaultToleranceConfigIssueReasonForIssue(\"cpuHvDisabled\")\n\tVmFaultToleranceConfigIssueReasonForIssueHasEFIFirmware                 = VmFaultToleranceConfigIssueReasonForIssue(\"hasEFIFirmware\")\n)\n\nfunc init() {\n\tt[\"VmFaultToleranceConfigIssueReasonForIssue\"] = reflect.TypeOf((*VmFaultToleranceConfigIssueReasonForIssue)(nil)).Elem()\n}\n\ntype VmFaultToleranceInvalidFileBackingDeviceType string\n\nconst (\n\tVmFaultToleranceInvalidFileBackingDeviceTypeVirtualFloppy       = VmFaultToleranceInvalidFileBackingDeviceType(\"virtualFloppy\")\n\tVmFaultToleranceInvalidFileBackingDeviceTypeVirtualCdrom        = VmFaultToleranceInvalidFileBackingDeviceType(\"virtualCdrom\")\n\tVmFaultToleranceInvalidFileBackingDeviceTypeVirtualSerialPort   = VmFaultToleranceInvalidFileBackingDeviceType(\"virtualSerialPort\")\n\tVmFaultToleranceInvalidFileBackingDeviceTypeVirtualParallelPort = VmFaultToleranceInvalidFileBackingDeviceType(\"virtualParallelPort\")\n\tVmFaultToleranceInvalidFileBackingDeviceTypeVirtualDisk         = VmFaultToleranceInvalidFileBackingDeviceType(\"virtualDisk\")\n)\n\nfunc init() {\n\tt[\"VmFaultToleranceInvalidFileBackingDeviceType\"] = reflect.TypeOf((*VmFaultToleranceInvalidFileBackingDeviceType)(nil)).Elem()\n}\n\ntype VmShutdownOnIsolationEventOperation string\n\nconst (\n\tVmShutdownOnIsolationEventOperationShutdown   = VmShutdownOnIsolationEventOperation(\"shutdown\")\n\tVmShutdownOnIsolationEventOperationPoweredOff = VmShutdownOnIsolationEventOperation(\"poweredOff\")\n)\n\nfunc init() {\n\tt[\"VmShutdownOnIsolationEventOperation\"] = reflect.TypeOf((*VmShutdownOnIsolationEventOperation)(nil)).Elem()\n}\n\ntype VmwareDistributedVirtualSwitchPvlanPortType string\n\nconst (\n\tVmwareDistributedVirtualSwitchPvlanPortTypePromiscuous = VmwareDistributedVirtualSwitchPvlanPortType(\"promiscuous\")\n\tVmwareDistributedVirtualSwitchPvlanPortTypeIsolated    = VmwareDistributedVirtualSwitchPvlanPortType(\"isolated\")\n\tVmwareDistributedVirtualSwitchPvlanPortTypeCommunity   = VmwareDistributedVirtualSwitchPvlanPortType(\"community\")\n)\n\nfunc init() {\n\tt[\"VmwareDistributedVirtualSwitchPvlanPortType\"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchPvlanPortType)(nil)).Elem()\n}\n\ntype VsanDiskIssueType string\n\nconst (\n\tVsanDiskIssueTypeNonExist      = VsanDiskIssueType(\"nonExist\")\n\tVsanDiskIssueTypeStampMismatch = VsanDiskIssueType(\"stampMismatch\")\n\tVsanDiskIssueTypeUnknown       = VsanDiskIssueType(\"unknown\")\n)\n\nfunc init() {\n\tt[\"VsanDiskIssueType\"] = reflect.TypeOf((*VsanDiskIssueType)(nil)).Elem()\n}\n\ntype VsanHostDecommissionModeObjectAction string\n\nconst (\n\tVsanHostDecommissionModeObjectActionNoAction                  = VsanHostDecommissionModeObjectAction(\"noAction\")\n\tVsanHostDecommissionModeObjectActionEnsureObjectAccessibility = VsanHostDecommissionModeObjectAction(\"ensureObjectAccessibility\")\n\tVsanHostDecommissionModeObjectActionEvacuateAllData           = VsanHostDecommissionModeObjectAction(\"evacuateAllData\")\n)\n\nfunc init() {\n\tt[\"VsanHostDecommissionModeObjectAction\"] = reflect.TypeOf((*VsanHostDecommissionModeObjectAction)(nil)).Elem()\n}\n\ntype VsanHostDiskResultState string\n\nconst (\n\tVsanHostDiskResultStateInUse      = VsanHostDiskResultState(\"inUse\")\n\tVsanHostDiskResultStateEligible   = VsanHostDiskResultState(\"eligible\")\n\tVsanHostDiskResultStateIneligible = VsanHostDiskResultState(\"ineligible\")\n)\n\nfunc init() {\n\tt[\"VsanHostDiskResultState\"] = reflect.TypeOf((*VsanHostDiskResultState)(nil)).Elem()\n}\n\ntype VsanHostHealthState string\n\nconst (\n\tVsanHostHealthStateUnknown   = VsanHostHealthState(\"unknown\")\n\tVsanHostHealthStateHealthy   = VsanHostHealthState(\"healthy\")\n\tVsanHostHealthStateUnhealthy = VsanHostHealthState(\"unhealthy\")\n)\n\nfunc init() {\n\tt[\"VsanHostHealthState\"] = reflect.TypeOf((*VsanHostHealthState)(nil)).Elem()\n}\n\ntype VsanHostNodeState string\n\nconst (\n\tVsanHostNodeStateError                   = VsanHostNodeState(\"error\")\n\tVsanHostNodeStateDisabled                = VsanHostNodeState(\"disabled\")\n\tVsanHostNodeStateAgent                   = VsanHostNodeState(\"agent\")\n\tVsanHostNodeStateMaster                  = VsanHostNodeState(\"master\")\n\tVsanHostNodeStateBackup                  = VsanHostNodeState(\"backup\")\n\tVsanHostNodeStateStarting                = VsanHostNodeState(\"starting\")\n\tVsanHostNodeStateStopping                = VsanHostNodeState(\"stopping\")\n\tVsanHostNodeStateEnteringMaintenanceMode = VsanHostNodeState(\"enteringMaintenanceMode\")\n\tVsanHostNodeStateExitingMaintenanceMode  = VsanHostNodeState(\"exitingMaintenanceMode\")\n\tVsanHostNodeStateDecommissioning         = VsanHostNodeState(\"decommissioning\")\n)\n\nfunc init() {\n\tt[\"VsanHostNodeState\"] = reflect.TypeOf((*VsanHostNodeState)(nil)).Elem()\n}\n\ntype VsanUpgradeSystemUpgradeHistoryDiskGroupOpType string\n\nconst (\n\tVsanUpgradeSystemUpgradeHistoryDiskGroupOpTypeAdd    = VsanUpgradeSystemUpgradeHistoryDiskGroupOpType(\"add\")\n\tVsanUpgradeSystemUpgradeHistoryDiskGroupOpTypeRemove = VsanUpgradeSystemUpgradeHistoryDiskGroupOpType(\"remove\")\n)\n\nfunc init() {\n\tt[\"VsanUpgradeSystemUpgradeHistoryDiskGroupOpType\"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeHistoryDiskGroupOpType)(nil)).Elem()\n}\n\ntype WeekOfMonth string\n\nconst (\n\tWeekOfMonthFirst  = WeekOfMonth(\"first\")\n\tWeekOfMonthSecond = WeekOfMonth(\"second\")\n\tWeekOfMonthThird  = WeekOfMonth(\"third\")\n\tWeekOfMonthFourth = WeekOfMonth(\"fourth\")\n\tWeekOfMonthLast   = WeekOfMonth(\"last\")\n)\n\nfunc init() {\n\tt[\"WeekOfMonth\"] = reflect.TypeOf((*WeekOfMonth)(nil)).Elem()\n}\n\ntype WillLoseHAProtectionResolution string\n\nconst (\n\tWillLoseHAProtectionResolutionSvmotion = WillLoseHAProtectionResolution(\"svmotion\")\n\tWillLoseHAProtectionResolutionRelocate = WillLoseHAProtectionResolution(\"relocate\")\n)\n\nfunc init() {\n\tt[\"WillLoseHAProtectionResolution\"] = reflect.TypeOf((*WillLoseHAProtectionResolution)(nil)).Elem()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/types/fault.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage types\n\ntype HasFault interface {\n\tFault() BaseMethodFault\n}\n\nfunc IsFileNotFound(err error) bool {\n\tif f, ok := err.(HasFault); ok {\n\t\tswitch f.Fault().(type) {\n\t\tcase *FileNotFound:\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/types/helpers.go",
    "content": "/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage types\n\nimport \"strings\"\n\nfunc NewBool(v bool) *bool {\n\treturn &v\n}\n\nfunc NewReference(r ManagedObjectReference) *ManagedObjectReference {\n\treturn &r\n}\n\nfunc (r ManagedObjectReference) Reference() ManagedObjectReference {\n\treturn r\n}\n\nfunc (r ManagedObjectReference) String() string {\n\treturn strings.Join([]string{r.Type, r.Value}, \":\")\n}\n\nfunc (r *ManagedObjectReference) FromString(o string) bool {\n\ts := strings.SplitN(o, \":\", 2)\n\n\tif len(s) < 2 {\n\t\treturn false\n\t}\n\n\tr.Type = s[0]\n\tr.Value = s[1]\n\n\treturn true\n}\n\nfunc (c *PerfCounterInfo) Name() string {\n\treturn c.GroupInfo.GetElementDescription().Key + \".\" + c.NameInfo.GetElementDescription().Key + \".\" + string(c.RollupType)\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/types/if.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage types\n\nimport \"reflect\"\n\nfunc (b *Action) GetAction() *Action { return b }\n\ntype BaseAction interface {\n\tGetAction() *Action\n}\n\nfunc init() {\n\tt[\"BaseAction\"] = reflect.TypeOf((*Action)(nil)).Elem()\n}\n\nfunc (b *ActiveDirectoryFault) GetActiveDirectoryFault() *ActiveDirectoryFault { return b }\n\ntype BaseActiveDirectoryFault interface {\n\tGetActiveDirectoryFault() *ActiveDirectoryFault\n}\n\nfunc init() {\n\tt[\"BaseActiveDirectoryFault\"] = reflect.TypeOf((*ActiveDirectoryFault)(nil)).Elem()\n}\n\nfunc (b *AlarmAction) GetAlarmAction() *AlarmAction { return b }\n\ntype BaseAlarmAction interface {\n\tGetAlarmAction() *AlarmAction\n}\n\nfunc init() {\n\tt[\"BaseAlarmAction\"] = reflect.TypeOf((*AlarmAction)(nil)).Elem()\n}\n\nfunc (b *AlarmEvent) GetAlarmEvent() *AlarmEvent { return b }\n\ntype BaseAlarmEvent interface {\n\tGetAlarmEvent() *AlarmEvent\n}\n\nfunc init() {\n\tt[\"BaseAlarmEvent\"] = reflect.TypeOf((*AlarmEvent)(nil)).Elem()\n}\n\nfunc (b *AlarmExpression) GetAlarmExpression() *AlarmExpression { return b }\n\ntype BaseAlarmExpression interface {\n\tGetAlarmExpression() *AlarmExpression\n}\n\nfunc init() {\n\tt[\"BaseAlarmExpression\"] = reflect.TypeOf((*AlarmExpression)(nil)).Elem()\n}\n\nfunc (b *AlarmSpec) GetAlarmSpec() *AlarmSpec { return b }\n\ntype BaseAlarmSpec interface {\n\tGetAlarmSpec() *AlarmSpec\n}\n\nfunc init() {\n\tt[\"BaseAlarmSpec\"] = reflect.TypeOf((*AlarmSpec)(nil)).Elem()\n}\n\nfunc (b *AnswerFileCreateSpec) GetAnswerFileCreateSpec() *AnswerFileCreateSpec { return b }\n\ntype BaseAnswerFileCreateSpec interface {\n\tGetAnswerFileCreateSpec() *AnswerFileCreateSpec\n}\n\nfunc init() {\n\tt[\"BaseAnswerFileCreateSpec\"] = reflect.TypeOf((*AnswerFileCreateSpec)(nil)).Elem()\n}\n\nfunc (b *ApplyProfile) GetApplyProfile() *ApplyProfile { return b }\n\ntype BaseApplyProfile interface {\n\tGetApplyProfile() *ApplyProfile\n}\n\nfunc init() {\n\tt[\"BaseApplyProfile\"] = reflect.TypeOf((*ApplyProfile)(nil)).Elem()\n}\n\nfunc (b *ArrayUpdateSpec) GetArrayUpdateSpec() *ArrayUpdateSpec { return b }\n\ntype BaseArrayUpdateSpec interface {\n\tGetArrayUpdateSpec() *ArrayUpdateSpec\n}\n\nfunc init() {\n\tt[\"BaseArrayUpdateSpec\"] = reflect.TypeOf((*ArrayUpdateSpec)(nil)).Elem()\n}\n\nfunc (b *AuthorizationEvent) GetAuthorizationEvent() *AuthorizationEvent { return b }\n\ntype BaseAuthorizationEvent interface {\n\tGetAuthorizationEvent() *AuthorizationEvent\n}\n\nfunc init() {\n\tt[\"BaseAuthorizationEvent\"] = reflect.TypeOf((*AuthorizationEvent)(nil)).Elem()\n}\n\nfunc (b *BaseConfigInfo) GetBaseConfigInfo() *BaseConfigInfo { return b }\n\ntype BaseBaseConfigInfo interface {\n\tGetBaseConfigInfo() *BaseConfigInfo\n}\n\nfunc init() {\n\tt[\"BaseBaseConfigInfo\"] = reflect.TypeOf((*BaseConfigInfo)(nil)).Elem()\n}\n\nfunc (b *BaseConfigInfoBackingInfo) GetBaseConfigInfoBackingInfo() *BaseConfigInfoBackingInfo {\n\treturn b\n}\n\ntype BaseBaseConfigInfoBackingInfo interface {\n\tGetBaseConfigInfoBackingInfo() *BaseConfigInfoBackingInfo\n}\n\nfunc init() {\n\tt[\"BaseBaseConfigInfoBackingInfo\"] = reflect.TypeOf((*BaseConfigInfoBackingInfo)(nil)).Elem()\n}\n\nfunc (b *BaseConfigInfoFileBackingInfo) GetBaseConfigInfoFileBackingInfo() *BaseConfigInfoFileBackingInfo {\n\treturn b\n}\n\ntype BaseBaseConfigInfoFileBackingInfo interface {\n\tGetBaseConfigInfoFileBackingInfo() *BaseConfigInfoFileBackingInfo\n}\n\nfunc init() {\n\tt[\"BaseBaseConfigInfoFileBackingInfo\"] = reflect.TypeOf((*BaseConfigInfoFileBackingInfo)(nil)).Elem()\n}\n\nfunc (b *CannotAccessNetwork) GetCannotAccessNetwork() *CannotAccessNetwork { return b }\n\ntype BaseCannotAccessNetwork interface {\n\tGetCannotAccessNetwork() *CannotAccessNetwork\n}\n\nfunc init() {\n\tt[\"BaseCannotAccessNetwork\"] = reflect.TypeOf((*CannotAccessNetwork)(nil)).Elem()\n}\n\nfunc (b *CannotAccessVmComponent) GetCannotAccessVmComponent() *CannotAccessVmComponent { return b }\n\ntype BaseCannotAccessVmComponent interface {\n\tGetCannotAccessVmComponent() *CannotAccessVmComponent\n}\n\nfunc init() {\n\tt[\"BaseCannotAccessVmComponent\"] = reflect.TypeOf((*CannotAccessVmComponent)(nil)).Elem()\n}\n\nfunc (b *CannotAccessVmDevice) GetCannotAccessVmDevice() *CannotAccessVmDevice { return b }\n\ntype BaseCannotAccessVmDevice interface {\n\tGetCannotAccessVmDevice() *CannotAccessVmDevice\n}\n\nfunc init() {\n\tt[\"BaseCannotAccessVmDevice\"] = reflect.TypeOf((*CannotAccessVmDevice)(nil)).Elem()\n}\n\nfunc (b *CannotAccessVmDisk) GetCannotAccessVmDisk() *CannotAccessVmDisk { return b }\n\ntype BaseCannotAccessVmDisk interface {\n\tGetCannotAccessVmDisk() *CannotAccessVmDisk\n}\n\nfunc init() {\n\tt[\"BaseCannotAccessVmDisk\"] = reflect.TypeOf((*CannotAccessVmDisk)(nil)).Elem()\n}\n\nfunc (b *CannotMoveVsanEnabledHost) GetCannotMoveVsanEnabledHost() *CannotMoveVsanEnabledHost {\n\treturn b\n}\n\ntype BaseCannotMoveVsanEnabledHost interface {\n\tGetCannotMoveVsanEnabledHost() *CannotMoveVsanEnabledHost\n}\n\nfunc init() {\n\tt[\"BaseCannotMoveVsanEnabledHost\"] = reflect.TypeOf((*CannotMoveVsanEnabledHost)(nil)).Elem()\n}\n\nfunc (b *ClusterAction) GetClusterAction() *ClusterAction { return b }\n\ntype BaseClusterAction interface {\n\tGetClusterAction() *ClusterAction\n}\n\nfunc init() {\n\tt[\"BaseClusterAction\"] = reflect.TypeOf((*ClusterAction)(nil)).Elem()\n}\n\nfunc (b *ClusterDasAdmissionControlInfo) GetClusterDasAdmissionControlInfo() *ClusterDasAdmissionControlInfo {\n\treturn b\n}\n\ntype BaseClusterDasAdmissionControlInfo interface {\n\tGetClusterDasAdmissionControlInfo() *ClusterDasAdmissionControlInfo\n}\n\nfunc init() {\n\tt[\"BaseClusterDasAdmissionControlInfo\"] = reflect.TypeOf((*ClusterDasAdmissionControlInfo)(nil)).Elem()\n}\n\nfunc (b *ClusterDasAdmissionControlPolicy) GetClusterDasAdmissionControlPolicy() *ClusterDasAdmissionControlPolicy {\n\treturn b\n}\n\ntype BaseClusterDasAdmissionControlPolicy interface {\n\tGetClusterDasAdmissionControlPolicy() *ClusterDasAdmissionControlPolicy\n}\n\nfunc init() {\n\tt[\"BaseClusterDasAdmissionControlPolicy\"] = reflect.TypeOf((*ClusterDasAdmissionControlPolicy)(nil)).Elem()\n}\n\nfunc (b *ClusterDasAdvancedRuntimeInfo) GetClusterDasAdvancedRuntimeInfo() *ClusterDasAdvancedRuntimeInfo {\n\treturn b\n}\n\ntype BaseClusterDasAdvancedRuntimeInfo interface {\n\tGetClusterDasAdvancedRuntimeInfo() *ClusterDasAdvancedRuntimeInfo\n}\n\nfunc init() {\n\tt[\"BaseClusterDasAdvancedRuntimeInfo\"] = reflect.TypeOf((*ClusterDasAdvancedRuntimeInfo)(nil)).Elem()\n}\n\nfunc (b *ClusterDasData) GetClusterDasData() *ClusterDasData { return b }\n\ntype BaseClusterDasData interface {\n\tGetClusterDasData() *ClusterDasData\n}\n\nfunc init() {\n\tt[\"BaseClusterDasData\"] = reflect.TypeOf((*ClusterDasData)(nil)).Elem()\n}\n\nfunc (b *ClusterDasHostInfo) GetClusterDasHostInfo() *ClusterDasHostInfo { return b }\n\ntype BaseClusterDasHostInfo interface {\n\tGetClusterDasHostInfo() *ClusterDasHostInfo\n}\n\nfunc init() {\n\tt[\"BaseClusterDasHostInfo\"] = reflect.TypeOf((*ClusterDasHostInfo)(nil)).Elem()\n}\n\nfunc (b *ClusterDrsFaultsFaultsByVm) GetClusterDrsFaultsFaultsByVm() *ClusterDrsFaultsFaultsByVm {\n\treturn b\n}\n\ntype BaseClusterDrsFaultsFaultsByVm interface {\n\tGetClusterDrsFaultsFaultsByVm() *ClusterDrsFaultsFaultsByVm\n}\n\nfunc init() {\n\tt[\"BaseClusterDrsFaultsFaultsByVm\"] = reflect.TypeOf((*ClusterDrsFaultsFaultsByVm)(nil)).Elem()\n}\n\nfunc (b *ClusterEvent) GetClusterEvent() *ClusterEvent { return b }\n\ntype BaseClusterEvent interface {\n\tGetClusterEvent() *ClusterEvent\n}\n\nfunc init() {\n\tt[\"BaseClusterEvent\"] = reflect.TypeOf((*ClusterEvent)(nil)).Elem()\n}\n\nfunc (b *ClusterGroupInfo) GetClusterGroupInfo() *ClusterGroupInfo { return b }\n\ntype BaseClusterGroupInfo interface {\n\tGetClusterGroupInfo() *ClusterGroupInfo\n}\n\nfunc init() {\n\tt[\"BaseClusterGroupInfo\"] = reflect.TypeOf((*ClusterGroupInfo)(nil)).Elem()\n}\n\nfunc (b *ClusterOvercommittedEvent) GetClusterOvercommittedEvent() *ClusterOvercommittedEvent {\n\treturn b\n}\n\ntype BaseClusterOvercommittedEvent interface {\n\tGetClusterOvercommittedEvent() *ClusterOvercommittedEvent\n}\n\nfunc init() {\n\tt[\"BaseClusterOvercommittedEvent\"] = reflect.TypeOf((*ClusterOvercommittedEvent)(nil)).Elem()\n}\n\nfunc (b *ClusterProfileConfigSpec) GetClusterProfileConfigSpec() *ClusterProfileConfigSpec { return b }\n\ntype BaseClusterProfileConfigSpec interface {\n\tGetClusterProfileConfigSpec() *ClusterProfileConfigSpec\n}\n\nfunc init() {\n\tt[\"BaseClusterProfileConfigSpec\"] = reflect.TypeOf((*ClusterProfileConfigSpec)(nil)).Elem()\n}\n\nfunc (b *ClusterProfileCreateSpec) GetClusterProfileCreateSpec() *ClusterProfileCreateSpec { return b }\n\ntype BaseClusterProfileCreateSpec interface {\n\tGetClusterProfileCreateSpec() *ClusterProfileCreateSpec\n}\n\nfunc init() {\n\tt[\"BaseClusterProfileCreateSpec\"] = reflect.TypeOf((*ClusterProfileCreateSpec)(nil)).Elem()\n}\n\nfunc (b *ClusterRuleInfo) GetClusterRuleInfo() *ClusterRuleInfo { return b }\n\ntype BaseClusterRuleInfo interface {\n\tGetClusterRuleInfo() *ClusterRuleInfo\n}\n\nfunc init() {\n\tt[\"BaseClusterRuleInfo\"] = reflect.TypeOf((*ClusterRuleInfo)(nil)).Elem()\n}\n\nfunc (b *ClusterSlotPolicy) GetClusterSlotPolicy() *ClusterSlotPolicy { return b }\n\ntype BaseClusterSlotPolicy interface {\n\tGetClusterSlotPolicy() *ClusterSlotPolicy\n}\n\nfunc init() {\n\tt[\"BaseClusterSlotPolicy\"] = reflect.TypeOf((*ClusterSlotPolicy)(nil)).Elem()\n}\n\nfunc (b *ClusterStatusChangedEvent) GetClusterStatusChangedEvent() *ClusterStatusChangedEvent {\n\treturn b\n}\n\ntype BaseClusterStatusChangedEvent interface {\n\tGetClusterStatusChangedEvent() *ClusterStatusChangedEvent\n}\n\nfunc init() {\n\tt[\"BaseClusterStatusChangedEvent\"] = reflect.TypeOf((*ClusterStatusChangedEvent)(nil)).Elem()\n}\n\nfunc (b *ComputeResourceConfigInfo) GetComputeResourceConfigInfo() *ComputeResourceConfigInfo {\n\treturn b\n}\n\ntype BaseComputeResourceConfigInfo interface {\n\tGetComputeResourceConfigInfo() *ComputeResourceConfigInfo\n}\n\nfunc init() {\n\tt[\"BaseComputeResourceConfigInfo\"] = reflect.TypeOf((*ComputeResourceConfigInfo)(nil)).Elem()\n}\n\nfunc (b *ComputeResourceConfigSpec) GetComputeResourceConfigSpec() *ComputeResourceConfigSpec {\n\treturn b\n}\n\ntype BaseComputeResourceConfigSpec interface {\n\tGetComputeResourceConfigSpec() *ComputeResourceConfigSpec\n}\n\nfunc init() {\n\tt[\"BaseComputeResourceConfigSpec\"] = reflect.TypeOf((*ComputeResourceConfigSpec)(nil)).Elem()\n}\n\nfunc (b *ComputeResourceSummary) GetComputeResourceSummary() *ComputeResourceSummary { return b }\n\ntype BaseComputeResourceSummary interface {\n\tGetComputeResourceSummary() *ComputeResourceSummary\n}\n\nfunc init() {\n\tt[\"BaseComputeResourceSummary\"] = reflect.TypeOf((*ComputeResourceSummary)(nil)).Elem()\n}\n\nfunc (b *CpuIncompatible) GetCpuIncompatible() *CpuIncompatible { return b }\n\ntype BaseCpuIncompatible interface {\n\tGetCpuIncompatible() *CpuIncompatible\n}\n\nfunc init() {\n\tt[\"BaseCpuIncompatible\"] = reflect.TypeOf((*CpuIncompatible)(nil)).Elem()\n}\n\nfunc (b *CryptoSpec) GetCryptoSpec() *CryptoSpec { return b }\n\ntype BaseCryptoSpec interface {\n\tGetCryptoSpec() *CryptoSpec\n}\n\nfunc init() {\n\tt[\"BaseCryptoSpec\"] = reflect.TypeOf((*CryptoSpec)(nil)).Elem()\n}\n\nfunc (b *CryptoSpecNoOp) GetCryptoSpecNoOp() *CryptoSpecNoOp { return b }\n\ntype BaseCryptoSpecNoOp interface {\n\tGetCryptoSpecNoOp() *CryptoSpecNoOp\n}\n\nfunc init() {\n\tt[\"BaseCryptoSpecNoOp\"] = reflect.TypeOf((*CryptoSpecNoOp)(nil)).Elem()\n}\n\nfunc (b *CustomFieldDefEvent) GetCustomFieldDefEvent() *CustomFieldDefEvent { return b }\n\ntype BaseCustomFieldDefEvent interface {\n\tGetCustomFieldDefEvent() *CustomFieldDefEvent\n}\n\nfunc init() {\n\tt[\"BaseCustomFieldDefEvent\"] = reflect.TypeOf((*CustomFieldDefEvent)(nil)).Elem()\n}\n\nfunc (b *CustomFieldEvent) GetCustomFieldEvent() *CustomFieldEvent { return b }\n\ntype BaseCustomFieldEvent interface {\n\tGetCustomFieldEvent() *CustomFieldEvent\n}\n\nfunc init() {\n\tt[\"BaseCustomFieldEvent\"] = reflect.TypeOf((*CustomFieldEvent)(nil)).Elem()\n}\n\nfunc (b *CustomFieldValue) GetCustomFieldValue() *CustomFieldValue { return b }\n\ntype BaseCustomFieldValue interface {\n\tGetCustomFieldValue() *CustomFieldValue\n}\n\nfunc init() {\n\tt[\"BaseCustomFieldValue\"] = reflect.TypeOf((*CustomFieldValue)(nil)).Elem()\n}\n\nfunc (b *CustomizationEvent) GetCustomizationEvent() *CustomizationEvent { return b }\n\ntype BaseCustomizationEvent interface {\n\tGetCustomizationEvent() *CustomizationEvent\n}\n\nfunc init() {\n\tt[\"BaseCustomizationEvent\"] = reflect.TypeOf((*CustomizationEvent)(nil)).Elem()\n}\n\nfunc (b *CustomizationFailed) GetCustomizationFailed() *CustomizationFailed { return b }\n\ntype BaseCustomizationFailed interface {\n\tGetCustomizationFailed() *CustomizationFailed\n}\n\nfunc init() {\n\tt[\"BaseCustomizationFailed\"] = reflect.TypeOf((*CustomizationFailed)(nil)).Elem()\n}\n\nfunc (b *CustomizationFault) GetCustomizationFault() *CustomizationFault { return b }\n\ntype BaseCustomizationFault interface {\n\tGetCustomizationFault() *CustomizationFault\n}\n\nfunc init() {\n\tt[\"BaseCustomizationFault\"] = reflect.TypeOf((*CustomizationFault)(nil)).Elem()\n}\n\nfunc (b *CustomizationIdentitySettings) GetCustomizationIdentitySettings() *CustomizationIdentitySettings {\n\treturn b\n}\n\ntype BaseCustomizationIdentitySettings interface {\n\tGetCustomizationIdentitySettings() *CustomizationIdentitySettings\n}\n\nfunc init() {\n\tt[\"BaseCustomizationIdentitySettings\"] = reflect.TypeOf((*CustomizationIdentitySettings)(nil)).Elem()\n}\n\nfunc (b *CustomizationIpGenerator) GetCustomizationIpGenerator() *CustomizationIpGenerator { return b }\n\ntype BaseCustomizationIpGenerator interface {\n\tGetCustomizationIpGenerator() *CustomizationIpGenerator\n}\n\nfunc init() {\n\tt[\"BaseCustomizationIpGenerator\"] = reflect.TypeOf((*CustomizationIpGenerator)(nil)).Elem()\n}\n\nfunc (b *CustomizationIpV6Generator) GetCustomizationIpV6Generator() *CustomizationIpV6Generator {\n\treturn b\n}\n\ntype BaseCustomizationIpV6Generator interface {\n\tGetCustomizationIpV6Generator() *CustomizationIpV6Generator\n}\n\nfunc init() {\n\tt[\"BaseCustomizationIpV6Generator\"] = reflect.TypeOf((*CustomizationIpV6Generator)(nil)).Elem()\n}\n\nfunc (b *CustomizationName) GetCustomizationName() *CustomizationName { return b }\n\ntype BaseCustomizationName interface {\n\tGetCustomizationName() *CustomizationName\n}\n\nfunc init() {\n\tt[\"BaseCustomizationName\"] = reflect.TypeOf((*CustomizationName)(nil)).Elem()\n}\n\nfunc (b *CustomizationOptions) GetCustomizationOptions() *CustomizationOptions { return b }\n\ntype BaseCustomizationOptions interface {\n\tGetCustomizationOptions() *CustomizationOptions\n}\n\nfunc init() {\n\tt[\"BaseCustomizationOptions\"] = reflect.TypeOf((*CustomizationOptions)(nil)).Elem()\n}\n\nfunc (b *DVPortSetting) GetDVPortSetting() *DVPortSetting { return b }\n\ntype BaseDVPortSetting interface {\n\tGetDVPortSetting() *DVPortSetting\n}\n\nfunc init() {\n\tt[\"BaseDVPortSetting\"] = reflect.TypeOf((*DVPortSetting)(nil)).Elem()\n}\n\nfunc (b *DVPortgroupEvent) GetDVPortgroupEvent() *DVPortgroupEvent { return b }\n\ntype BaseDVPortgroupEvent interface {\n\tGetDVPortgroupEvent() *DVPortgroupEvent\n}\n\nfunc init() {\n\tt[\"BaseDVPortgroupEvent\"] = reflect.TypeOf((*DVPortgroupEvent)(nil)).Elem()\n}\n\nfunc (b *DVPortgroupPolicy) GetDVPortgroupPolicy() *DVPortgroupPolicy { return b }\n\ntype BaseDVPortgroupPolicy interface {\n\tGetDVPortgroupPolicy() *DVPortgroupPolicy\n}\n\nfunc init() {\n\tt[\"BaseDVPortgroupPolicy\"] = reflect.TypeOf((*DVPortgroupPolicy)(nil)).Elem()\n}\n\nfunc (b *DVSConfigInfo) GetDVSConfigInfo() *DVSConfigInfo { return b }\n\ntype BaseDVSConfigInfo interface {\n\tGetDVSConfigInfo() *DVSConfigInfo\n}\n\nfunc init() {\n\tt[\"BaseDVSConfigInfo\"] = reflect.TypeOf((*DVSConfigInfo)(nil)).Elem()\n}\n\nfunc (b *DVSConfigSpec) GetDVSConfigSpec() *DVSConfigSpec { return b }\n\ntype BaseDVSConfigSpec interface {\n\tGetDVSConfigSpec() *DVSConfigSpec\n}\n\nfunc init() {\n\tt[\"BaseDVSConfigSpec\"] = reflect.TypeOf((*DVSConfigSpec)(nil)).Elem()\n}\n\nfunc (b *DVSFeatureCapability) GetDVSFeatureCapability() *DVSFeatureCapability { return b }\n\ntype BaseDVSFeatureCapability interface {\n\tGetDVSFeatureCapability() *DVSFeatureCapability\n}\n\nfunc init() {\n\tt[\"BaseDVSFeatureCapability\"] = reflect.TypeOf((*DVSFeatureCapability)(nil)).Elem()\n}\n\nfunc (b *DVSHealthCheckCapability) GetDVSHealthCheckCapability() *DVSHealthCheckCapability { return b }\n\ntype BaseDVSHealthCheckCapability interface {\n\tGetDVSHealthCheckCapability() *DVSHealthCheckCapability\n}\n\nfunc init() {\n\tt[\"BaseDVSHealthCheckCapability\"] = reflect.TypeOf((*DVSHealthCheckCapability)(nil)).Elem()\n}\n\nfunc (b *DVSHealthCheckConfig) GetDVSHealthCheckConfig() *DVSHealthCheckConfig { return b }\n\ntype BaseDVSHealthCheckConfig interface {\n\tGetDVSHealthCheckConfig() *DVSHealthCheckConfig\n}\n\nfunc init() {\n\tt[\"BaseDVSHealthCheckConfig\"] = reflect.TypeOf((*DVSHealthCheckConfig)(nil)).Elem()\n}\n\nfunc (b *DVSUplinkPortPolicy) GetDVSUplinkPortPolicy() *DVSUplinkPortPolicy { return b }\n\ntype BaseDVSUplinkPortPolicy interface {\n\tGetDVSUplinkPortPolicy() *DVSUplinkPortPolicy\n}\n\nfunc init() {\n\tt[\"BaseDVSUplinkPortPolicy\"] = reflect.TypeOf((*DVSUplinkPortPolicy)(nil)).Elem()\n}\n\nfunc (b *DailyTaskScheduler) GetDailyTaskScheduler() *DailyTaskScheduler { return b }\n\ntype BaseDailyTaskScheduler interface {\n\tGetDailyTaskScheduler() *DailyTaskScheduler\n}\n\nfunc init() {\n\tt[\"BaseDailyTaskScheduler\"] = reflect.TypeOf((*DailyTaskScheduler)(nil)).Elem()\n}\n\nfunc (b *DatacenterEvent) GetDatacenterEvent() *DatacenterEvent { return b }\n\ntype BaseDatacenterEvent interface {\n\tGetDatacenterEvent() *DatacenterEvent\n}\n\nfunc init() {\n\tt[\"BaseDatacenterEvent\"] = reflect.TypeOf((*DatacenterEvent)(nil)).Elem()\n}\n\nfunc (b *DatastoreEvent) GetDatastoreEvent() *DatastoreEvent { return b }\n\ntype BaseDatastoreEvent interface {\n\tGetDatastoreEvent() *DatastoreEvent\n}\n\nfunc init() {\n\tt[\"BaseDatastoreEvent\"] = reflect.TypeOf((*DatastoreEvent)(nil)).Elem()\n}\n\nfunc (b *DatastoreFileEvent) GetDatastoreFileEvent() *DatastoreFileEvent { return b }\n\ntype BaseDatastoreFileEvent interface {\n\tGetDatastoreFileEvent() *DatastoreFileEvent\n}\n\nfunc init() {\n\tt[\"BaseDatastoreFileEvent\"] = reflect.TypeOf((*DatastoreFileEvent)(nil)).Elem()\n}\n\nfunc (b *DatastoreInfo) GetDatastoreInfo() *DatastoreInfo { return b }\n\ntype BaseDatastoreInfo interface {\n\tGetDatastoreInfo() *DatastoreInfo\n}\n\nfunc init() {\n\tt[\"BaseDatastoreInfo\"] = reflect.TypeOf((*DatastoreInfo)(nil)).Elem()\n}\n\nfunc (b *DatastoreNotWritableOnHost) GetDatastoreNotWritableOnHost() *DatastoreNotWritableOnHost {\n\treturn b\n}\n\ntype BaseDatastoreNotWritableOnHost interface {\n\tGetDatastoreNotWritableOnHost() *DatastoreNotWritableOnHost\n}\n\nfunc init() {\n\tt[\"BaseDatastoreNotWritableOnHost\"] = reflect.TypeOf((*DatastoreNotWritableOnHost)(nil)).Elem()\n}\n\nfunc (b *Description) GetDescription() *Description { return b }\n\ntype BaseDescription interface {\n\tGetDescription() *Description\n}\n\nfunc init() {\n\tt[\"BaseDescription\"] = reflect.TypeOf((*Description)(nil)).Elem()\n}\n\nfunc (b *DeviceBackingNotSupported) GetDeviceBackingNotSupported() *DeviceBackingNotSupported {\n\treturn b\n}\n\ntype BaseDeviceBackingNotSupported interface {\n\tGetDeviceBackingNotSupported() *DeviceBackingNotSupported\n}\n\nfunc init() {\n\tt[\"BaseDeviceBackingNotSupported\"] = reflect.TypeOf((*DeviceBackingNotSupported)(nil)).Elem()\n}\n\nfunc (b *DeviceNotSupported) GetDeviceNotSupported() *DeviceNotSupported { return b }\n\ntype BaseDeviceNotSupported interface {\n\tGetDeviceNotSupported() *DeviceNotSupported\n}\n\nfunc init() {\n\tt[\"BaseDeviceNotSupported\"] = reflect.TypeOf((*DeviceNotSupported)(nil)).Elem()\n}\n\nfunc (b *DiskNotSupported) GetDiskNotSupported() *DiskNotSupported { return b }\n\ntype BaseDiskNotSupported interface {\n\tGetDiskNotSupported() *DiskNotSupported\n}\n\nfunc init() {\n\tt[\"BaseDiskNotSupported\"] = reflect.TypeOf((*DiskNotSupported)(nil)).Elem()\n}\n\nfunc (b *DistributedVirtualSwitchHostMemberBacking) GetDistributedVirtualSwitchHostMemberBacking() *DistributedVirtualSwitchHostMemberBacking {\n\treturn b\n}\n\ntype BaseDistributedVirtualSwitchHostMemberBacking interface {\n\tGetDistributedVirtualSwitchHostMemberBacking() *DistributedVirtualSwitchHostMemberBacking\n}\n\nfunc init() {\n\tt[\"BaseDistributedVirtualSwitchHostMemberBacking\"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberBacking)(nil)).Elem()\n}\n\nfunc (b *DistributedVirtualSwitchManagerHostDvsFilterSpec) GetDistributedVirtualSwitchManagerHostDvsFilterSpec() *DistributedVirtualSwitchManagerHostDvsFilterSpec {\n\treturn b\n}\n\ntype BaseDistributedVirtualSwitchManagerHostDvsFilterSpec interface {\n\tGetDistributedVirtualSwitchManagerHostDvsFilterSpec() *DistributedVirtualSwitchManagerHostDvsFilterSpec\n}\n\nfunc init() {\n\tt[\"BaseDistributedVirtualSwitchManagerHostDvsFilterSpec\"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostDvsFilterSpec)(nil)).Elem()\n}\n\nfunc (b *DvsEvent) GetDvsEvent() *DvsEvent { return b }\n\ntype BaseDvsEvent interface {\n\tGetDvsEvent() *DvsEvent\n}\n\nfunc init() {\n\tt[\"BaseDvsEvent\"] = reflect.TypeOf((*DvsEvent)(nil)).Elem()\n}\n\nfunc (b *DvsFault) GetDvsFault() *DvsFault { return b }\n\ntype BaseDvsFault interface {\n\tGetDvsFault() *DvsFault\n}\n\nfunc init() {\n\tt[\"BaseDvsFault\"] = reflect.TypeOf((*DvsFault)(nil)).Elem()\n}\n\nfunc (b *DvsFilterConfig) GetDvsFilterConfig() *DvsFilterConfig { return b }\n\ntype BaseDvsFilterConfig interface {\n\tGetDvsFilterConfig() *DvsFilterConfig\n}\n\nfunc init() {\n\tt[\"BaseDvsFilterConfig\"] = reflect.TypeOf((*DvsFilterConfig)(nil)).Elem()\n}\n\nfunc (b *DvsHealthStatusChangeEvent) GetDvsHealthStatusChangeEvent() *DvsHealthStatusChangeEvent {\n\treturn b\n}\n\ntype BaseDvsHealthStatusChangeEvent interface {\n\tGetDvsHealthStatusChangeEvent() *DvsHealthStatusChangeEvent\n}\n\nfunc init() {\n\tt[\"BaseDvsHealthStatusChangeEvent\"] = reflect.TypeOf((*DvsHealthStatusChangeEvent)(nil)).Elem()\n}\n\nfunc (b *DvsIpPort) GetDvsIpPort() *DvsIpPort { return b }\n\ntype BaseDvsIpPort interface {\n\tGetDvsIpPort() *DvsIpPort\n}\n\nfunc init() {\n\tt[\"BaseDvsIpPort\"] = reflect.TypeOf((*DvsIpPort)(nil)).Elem()\n}\n\nfunc (b *DvsNetworkRuleAction) GetDvsNetworkRuleAction() *DvsNetworkRuleAction { return b }\n\ntype BaseDvsNetworkRuleAction interface {\n\tGetDvsNetworkRuleAction() *DvsNetworkRuleAction\n}\n\nfunc init() {\n\tt[\"BaseDvsNetworkRuleAction\"] = reflect.TypeOf((*DvsNetworkRuleAction)(nil)).Elem()\n}\n\nfunc (b *DvsNetworkRuleQualifier) GetDvsNetworkRuleQualifier() *DvsNetworkRuleQualifier { return b }\n\ntype BaseDvsNetworkRuleQualifier interface {\n\tGetDvsNetworkRuleQualifier() *DvsNetworkRuleQualifier\n}\n\nfunc init() {\n\tt[\"BaseDvsNetworkRuleQualifier\"] = reflect.TypeOf((*DvsNetworkRuleQualifier)(nil)).Elem()\n}\n\nfunc (b *DvsTrafficFilterConfig) GetDvsTrafficFilterConfig() *DvsTrafficFilterConfig { return b }\n\ntype BaseDvsTrafficFilterConfig interface {\n\tGetDvsTrafficFilterConfig() *DvsTrafficFilterConfig\n}\n\nfunc init() {\n\tt[\"BaseDvsTrafficFilterConfig\"] = reflect.TypeOf((*DvsTrafficFilterConfig)(nil)).Elem()\n}\n\nfunc (b *DvsVNicProfile) GetDvsVNicProfile() *DvsVNicProfile { return b }\n\ntype BaseDvsVNicProfile interface {\n\tGetDvsVNicProfile() *DvsVNicProfile\n}\n\nfunc init() {\n\tt[\"BaseDvsVNicProfile\"] = reflect.TypeOf((*DvsVNicProfile)(nil)).Elem()\n}\n\nfunc (b *DynamicData) GetDynamicData() *DynamicData { return b }\n\ntype BaseDynamicData interface {\n\tGetDynamicData() *DynamicData\n}\n\nfunc init() {\n\tt[\"BaseDynamicData\"] = reflect.TypeOf((*DynamicData)(nil)).Elem()\n}\n\nfunc (b *EVCAdmissionFailed) GetEVCAdmissionFailed() *EVCAdmissionFailed { return b }\n\ntype BaseEVCAdmissionFailed interface {\n\tGetEVCAdmissionFailed() *EVCAdmissionFailed\n}\n\nfunc init() {\n\tt[\"BaseEVCAdmissionFailed\"] = reflect.TypeOf((*EVCAdmissionFailed)(nil)).Elem()\n}\n\nfunc (b *EVCConfigFault) GetEVCConfigFault() *EVCConfigFault { return b }\n\ntype BaseEVCConfigFault interface {\n\tGetEVCConfigFault() *EVCConfigFault\n}\n\nfunc init() {\n\tt[\"BaseEVCConfigFault\"] = reflect.TypeOf((*EVCConfigFault)(nil)).Elem()\n}\n\nfunc (b *ElementDescription) GetElementDescription() *ElementDescription { return b }\n\ntype BaseElementDescription interface {\n\tGetElementDescription() *ElementDescription\n}\n\nfunc init() {\n\tt[\"BaseElementDescription\"] = reflect.TypeOf((*ElementDescription)(nil)).Elem()\n}\n\nfunc (b *EnteredStandbyModeEvent) GetEnteredStandbyModeEvent() *EnteredStandbyModeEvent { return b }\n\ntype BaseEnteredStandbyModeEvent interface {\n\tGetEnteredStandbyModeEvent() *EnteredStandbyModeEvent\n}\n\nfunc init() {\n\tt[\"BaseEnteredStandbyModeEvent\"] = reflect.TypeOf((*EnteredStandbyModeEvent)(nil)).Elem()\n}\n\nfunc (b *EnteringStandbyModeEvent) GetEnteringStandbyModeEvent() *EnteringStandbyModeEvent { return b }\n\ntype BaseEnteringStandbyModeEvent interface {\n\tGetEnteringStandbyModeEvent() *EnteringStandbyModeEvent\n}\n\nfunc init() {\n\tt[\"BaseEnteringStandbyModeEvent\"] = reflect.TypeOf((*EnteringStandbyModeEvent)(nil)).Elem()\n}\n\nfunc (b *EntityEventArgument) GetEntityEventArgument() *EntityEventArgument { return b }\n\ntype BaseEntityEventArgument interface {\n\tGetEntityEventArgument() *EntityEventArgument\n}\n\nfunc init() {\n\tt[\"BaseEntityEventArgument\"] = reflect.TypeOf((*EntityEventArgument)(nil)).Elem()\n}\n\nfunc (b *Event) GetEvent() *Event { return b }\n\ntype BaseEvent interface {\n\tGetEvent() *Event\n}\n\nfunc init() {\n\tt[\"BaseEvent\"] = reflect.TypeOf((*Event)(nil)).Elem()\n}\n\nfunc (b *EventArgument) GetEventArgument() *EventArgument { return b }\n\ntype BaseEventArgument interface {\n\tGetEventArgument() *EventArgument\n}\n\nfunc init() {\n\tt[\"BaseEventArgument\"] = reflect.TypeOf((*EventArgument)(nil)).Elem()\n}\n\nfunc (b *ExitStandbyModeFailedEvent) GetExitStandbyModeFailedEvent() *ExitStandbyModeFailedEvent {\n\treturn b\n}\n\ntype BaseExitStandbyModeFailedEvent interface {\n\tGetExitStandbyModeFailedEvent() *ExitStandbyModeFailedEvent\n}\n\nfunc init() {\n\tt[\"BaseExitStandbyModeFailedEvent\"] = reflect.TypeOf((*ExitStandbyModeFailedEvent)(nil)).Elem()\n}\n\nfunc (b *ExitedStandbyModeEvent) GetExitedStandbyModeEvent() *ExitedStandbyModeEvent { return b }\n\ntype BaseExitedStandbyModeEvent interface {\n\tGetExitedStandbyModeEvent() *ExitedStandbyModeEvent\n}\n\nfunc init() {\n\tt[\"BaseExitedStandbyModeEvent\"] = reflect.TypeOf((*ExitedStandbyModeEvent)(nil)).Elem()\n}\n\nfunc (b *ExitingStandbyModeEvent) GetExitingStandbyModeEvent() *ExitingStandbyModeEvent { return b }\n\ntype BaseExitingStandbyModeEvent interface {\n\tGetExitingStandbyModeEvent() *ExitingStandbyModeEvent\n}\n\nfunc init() {\n\tt[\"BaseExitingStandbyModeEvent\"] = reflect.TypeOf((*ExitingStandbyModeEvent)(nil)).Elem()\n}\n\nfunc (b *ExpiredFeatureLicense) GetExpiredFeatureLicense() *ExpiredFeatureLicense { return b }\n\ntype BaseExpiredFeatureLicense interface {\n\tGetExpiredFeatureLicense() *ExpiredFeatureLicense\n}\n\nfunc init() {\n\tt[\"BaseExpiredFeatureLicense\"] = reflect.TypeOf((*ExpiredFeatureLicense)(nil)).Elem()\n}\n\nfunc (b *FaultToleranceConfigInfo) GetFaultToleranceConfigInfo() *FaultToleranceConfigInfo { return b }\n\ntype BaseFaultToleranceConfigInfo interface {\n\tGetFaultToleranceConfigInfo() *FaultToleranceConfigInfo\n}\n\nfunc init() {\n\tt[\"BaseFaultToleranceConfigInfo\"] = reflect.TypeOf((*FaultToleranceConfigInfo)(nil)).Elem()\n}\n\nfunc (b *FcoeFault) GetFcoeFault() *FcoeFault { return b }\n\ntype BaseFcoeFault interface {\n\tGetFcoeFault() *FcoeFault\n}\n\nfunc init() {\n\tt[\"BaseFcoeFault\"] = reflect.TypeOf((*FcoeFault)(nil)).Elem()\n}\n\nfunc (b *FileBackedVirtualDiskSpec) GetFileBackedVirtualDiskSpec() *FileBackedVirtualDiskSpec {\n\treturn b\n}\n\ntype BaseFileBackedVirtualDiskSpec interface {\n\tGetFileBackedVirtualDiskSpec() *FileBackedVirtualDiskSpec\n}\n\nfunc init() {\n\tt[\"BaseFileBackedVirtualDiskSpec\"] = reflect.TypeOf((*FileBackedVirtualDiskSpec)(nil)).Elem()\n}\n\nfunc (b *FileFault) GetFileFault() *FileFault { return b }\n\ntype BaseFileFault interface {\n\tGetFileFault() *FileFault\n}\n\nfunc init() {\n\tt[\"BaseFileFault\"] = reflect.TypeOf((*FileFault)(nil)).Elem()\n}\n\nfunc (b *FileInfo) GetFileInfo() *FileInfo { return b }\n\ntype BaseFileInfo interface {\n\tGetFileInfo() *FileInfo\n}\n\nfunc init() {\n\tt[\"BaseFileInfo\"] = reflect.TypeOf((*FileInfo)(nil)).Elem()\n}\n\nfunc (b *FileQuery) GetFileQuery() *FileQuery { return b }\n\ntype BaseFileQuery interface {\n\tGetFileQuery() *FileQuery\n}\n\nfunc init() {\n\tt[\"BaseFileQuery\"] = reflect.TypeOf((*FileQuery)(nil)).Elem()\n}\n\nfunc (b *GatewayConnectFault) GetGatewayConnectFault() *GatewayConnectFault { return b }\n\ntype BaseGatewayConnectFault interface {\n\tGetGatewayConnectFault() *GatewayConnectFault\n}\n\nfunc init() {\n\tt[\"BaseGatewayConnectFault\"] = reflect.TypeOf((*GatewayConnectFault)(nil)).Elem()\n}\n\nfunc (b *GatewayToHostConnectFault) GetGatewayToHostConnectFault() *GatewayToHostConnectFault {\n\treturn b\n}\n\ntype BaseGatewayToHostConnectFault interface {\n\tGetGatewayToHostConnectFault() *GatewayToHostConnectFault\n}\n\nfunc init() {\n\tt[\"BaseGatewayToHostConnectFault\"] = reflect.TypeOf((*GatewayToHostConnectFault)(nil)).Elem()\n}\n\nfunc (b *GeneralEvent) GetGeneralEvent() *GeneralEvent { return b }\n\ntype BaseGeneralEvent interface {\n\tGetGeneralEvent() *GeneralEvent\n}\n\nfunc init() {\n\tt[\"BaseGeneralEvent\"] = reflect.TypeOf((*GeneralEvent)(nil)).Elem()\n}\n\nfunc (b *GuestAuthSubject) GetGuestAuthSubject() *GuestAuthSubject { return b }\n\ntype BaseGuestAuthSubject interface {\n\tGetGuestAuthSubject() *GuestAuthSubject\n}\n\nfunc init() {\n\tt[\"BaseGuestAuthSubject\"] = reflect.TypeOf((*GuestAuthSubject)(nil)).Elem()\n}\n\nfunc (b *GuestAuthentication) GetGuestAuthentication() *GuestAuthentication { return b }\n\ntype BaseGuestAuthentication interface {\n\tGetGuestAuthentication() *GuestAuthentication\n}\n\nfunc init() {\n\tt[\"BaseGuestAuthentication\"] = reflect.TypeOf((*GuestAuthentication)(nil)).Elem()\n}\n\nfunc (b *GuestFileAttributes) GetGuestFileAttributes() *GuestFileAttributes { return b }\n\ntype BaseGuestFileAttributes interface {\n\tGetGuestFileAttributes() *GuestFileAttributes\n}\n\nfunc init() {\n\tt[\"BaseGuestFileAttributes\"] = reflect.TypeOf((*GuestFileAttributes)(nil)).Elem()\n}\n\nfunc (b *GuestOperationsFault) GetGuestOperationsFault() *GuestOperationsFault { return b }\n\ntype BaseGuestOperationsFault interface {\n\tGetGuestOperationsFault() *GuestOperationsFault\n}\n\nfunc init() {\n\tt[\"BaseGuestOperationsFault\"] = reflect.TypeOf((*GuestOperationsFault)(nil)).Elem()\n}\n\nfunc (b *GuestProgramSpec) GetGuestProgramSpec() *GuestProgramSpec { return b }\n\ntype BaseGuestProgramSpec interface {\n\tGetGuestProgramSpec() *GuestProgramSpec\n}\n\nfunc init() {\n\tt[\"BaseGuestProgramSpec\"] = reflect.TypeOf((*GuestProgramSpec)(nil)).Elem()\n}\n\nfunc (b *GuestRegValueDataSpec) GetGuestRegValueDataSpec() *GuestRegValueDataSpec { return b }\n\ntype BaseGuestRegValueDataSpec interface {\n\tGetGuestRegValueDataSpec() *GuestRegValueDataSpec\n}\n\nfunc init() {\n\tt[\"BaseGuestRegValueDataSpec\"] = reflect.TypeOf((*GuestRegValueDataSpec)(nil)).Elem()\n}\n\nfunc (b *GuestRegistryFault) GetGuestRegistryFault() *GuestRegistryFault { return b }\n\ntype BaseGuestRegistryFault interface {\n\tGetGuestRegistryFault() *GuestRegistryFault\n}\n\nfunc init() {\n\tt[\"BaseGuestRegistryFault\"] = reflect.TypeOf((*GuestRegistryFault)(nil)).Elem()\n}\n\nfunc (b *GuestRegistryKeyFault) GetGuestRegistryKeyFault() *GuestRegistryKeyFault { return b }\n\ntype BaseGuestRegistryKeyFault interface {\n\tGetGuestRegistryKeyFault() *GuestRegistryKeyFault\n}\n\nfunc init() {\n\tt[\"BaseGuestRegistryKeyFault\"] = reflect.TypeOf((*GuestRegistryKeyFault)(nil)).Elem()\n}\n\nfunc (b *GuestRegistryValueFault) GetGuestRegistryValueFault() *GuestRegistryValueFault { return b }\n\ntype BaseGuestRegistryValueFault interface {\n\tGetGuestRegistryValueFault() *GuestRegistryValueFault\n}\n\nfunc init() {\n\tt[\"BaseGuestRegistryValueFault\"] = reflect.TypeOf((*GuestRegistryValueFault)(nil)).Elem()\n}\n\nfunc (b *HostAccountSpec) GetHostAccountSpec() *HostAccountSpec { return b }\n\ntype BaseHostAccountSpec interface {\n\tGetHostAccountSpec() *HostAccountSpec\n}\n\nfunc init() {\n\tt[\"BaseHostAccountSpec\"] = reflect.TypeOf((*HostAccountSpec)(nil)).Elem()\n}\n\nfunc (b *HostAuthenticationStoreInfo) GetHostAuthenticationStoreInfo() *HostAuthenticationStoreInfo {\n\treturn b\n}\n\ntype BaseHostAuthenticationStoreInfo interface {\n\tGetHostAuthenticationStoreInfo() *HostAuthenticationStoreInfo\n}\n\nfunc init() {\n\tt[\"BaseHostAuthenticationStoreInfo\"] = reflect.TypeOf((*HostAuthenticationStoreInfo)(nil)).Elem()\n}\n\nfunc (b *HostCommunication) GetHostCommunication() *HostCommunication { return b }\n\ntype BaseHostCommunication interface {\n\tGetHostCommunication() *HostCommunication\n}\n\nfunc init() {\n\tt[\"BaseHostCommunication\"] = reflect.TypeOf((*HostCommunication)(nil)).Elem()\n}\n\nfunc (b *HostConfigFault) GetHostConfigFault() *HostConfigFault { return b }\n\ntype BaseHostConfigFault interface {\n\tGetHostConfigFault() *HostConfigFault\n}\n\nfunc init() {\n\tt[\"BaseHostConfigFault\"] = reflect.TypeOf((*HostConfigFault)(nil)).Elem()\n}\n\nfunc (b *HostConnectFault) GetHostConnectFault() *HostConnectFault { return b }\n\ntype BaseHostConnectFault interface {\n\tGetHostConnectFault() *HostConnectFault\n}\n\nfunc init() {\n\tt[\"BaseHostConnectFault\"] = reflect.TypeOf((*HostConnectFault)(nil)).Elem()\n}\n\nfunc (b *HostConnectInfoNetworkInfo) GetHostConnectInfoNetworkInfo() *HostConnectInfoNetworkInfo {\n\treturn b\n}\n\ntype BaseHostConnectInfoNetworkInfo interface {\n\tGetHostConnectInfoNetworkInfo() *HostConnectInfoNetworkInfo\n}\n\nfunc init() {\n\tt[\"BaseHostConnectInfoNetworkInfo\"] = reflect.TypeOf((*HostConnectInfoNetworkInfo)(nil)).Elem()\n}\n\nfunc (b *HostDasEvent) GetHostDasEvent() *HostDasEvent { return b }\n\ntype BaseHostDasEvent interface {\n\tGetHostDasEvent() *HostDasEvent\n}\n\nfunc init() {\n\tt[\"BaseHostDasEvent\"] = reflect.TypeOf((*HostDasEvent)(nil)).Elem()\n}\n\nfunc (b *HostDatastoreConnectInfo) GetHostDatastoreConnectInfo() *HostDatastoreConnectInfo { return b }\n\ntype BaseHostDatastoreConnectInfo interface {\n\tGetHostDatastoreConnectInfo() *HostDatastoreConnectInfo\n}\n\nfunc init() {\n\tt[\"BaseHostDatastoreConnectInfo\"] = reflect.TypeOf((*HostDatastoreConnectInfo)(nil)).Elem()\n}\n\nfunc (b *HostDevice) GetHostDevice() *HostDevice { return b }\n\ntype BaseHostDevice interface {\n\tGetHostDevice() *HostDevice\n}\n\nfunc init() {\n\tt[\"BaseHostDevice\"] = reflect.TypeOf((*HostDevice)(nil)).Elem()\n}\n\nfunc (b *HostDigestInfo) GetHostDigestInfo() *HostDigestInfo { return b }\n\ntype BaseHostDigestInfo interface {\n\tGetHostDigestInfo() *HostDigestInfo\n}\n\nfunc init() {\n\tt[\"BaseHostDigestInfo\"] = reflect.TypeOf((*HostDigestInfo)(nil)).Elem()\n}\n\nfunc (b *HostDirectoryStoreInfo) GetHostDirectoryStoreInfo() *HostDirectoryStoreInfo { return b }\n\ntype BaseHostDirectoryStoreInfo interface {\n\tGetHostDirectoryStoreInfo() *HostDirectoryStoreInfo\n}\n\nfunc init() {\n\tt[\"BaseHostDirectoryStoreInfo\"] = reflect.TypeOf((*HostDirectoryStoreInfo)(nil)).Elem()\n}\n\nfunc (b *HostDnsConfig) GetHostDnsConfig() *HostDnsConfig { return b }\n\ntype BaseHostDnsConfig interface {\n\tGetHostDnsConfig() *HostDnsConfig\n}\n\nfunc init() {\n\tt[\"BaseHostDnsConfig\"] = reflect.TypeOf((*HostDnsConfig)(nil)).Elem()\n}\n\nfunc (b *HostEvent) GetHostEvent() *HostEvent { return b }\n\ntype BaseHostEvent interface {\n\tGetHostEvent() *HostEvent\n}\n\nfunc init() {\n\tt[\"BaseHostEvent\"] = reflect.TypeOf((*HostEvent)(nil)).Elem()\n}\n\nfunc (b *HostFibreChannelHba) GetHostFibreChannelHba() *HostFibreChannelHba { return b }\n\ntype BaseHostFibreChannelHba interface {\n\tGetHostFibreChannelHba() *HostFibreChannelHba\n}\n\nfunc init() {\n\tt[\"BaseHostFibreChannelHba\"] = reflect.TypeOf((*HostFibreChannelHba)(nil)).Elem()\n}\n\nfunc (b *HostFibreChannelTargetTransport) GetHostFibreChannelTargetTransport() *HostFibreChannelTargetTransport {\n\treturn b\n}\n\ntype BaseHostFibreChannelTargetTransport interface {\n\tGetHostFibreChannelTargetTransport() *HostFibreChannelTargetTransport\n}\n\nfunc init() {\n\tt[\"BaseHostFibreChannelTargetTransport\"] = reflect.TypeOf((*HostFibreChannelTargetTransport)(nil)).Elem()\n}\n\nfunc (b *HostFileSystemVolume) GetHostFileSystemVolume() *HostFileSystemVolume { return b }\n\ntype BaseHostFileSystemVolume interface {\n\tGetHostFileSystemVolume() *HostFileSystemVolume\n}\n\nfunc init() {\n\tt[\"BaseHostFileSystemVolume\"] = reflect.TypeOf((*HostFileSystemVolume)(nil)).Elem()\n}\n\nfunc (b *HostHardwareElementInfo) GetHostHardwareElementInfo() *HostHardwareElementInfo { return b }\n\ntype BaseHostHardwareElementInfo interface {\n\tGetHostHardwareElementInfo() *HostHardwareElementInfo\n}\n\nfunc init() {\n\tt[\"BaseHostHardwareElementInfo\"] = reflect.TypeOf((*HostHardwareElementInfo)(nil)).Elem()\n}\n\nfunc (b *HostHostBusAdapter) GetHostHostBusAdapter() *HostHostBusAdapter { return b }\n\ntype BaseHostHostBusAdapter interface {\n\tGetHostHostBusAdapter() *HostHostBusAdapter\n}\n\nfunc init() {\n\tt[\"BaseHostHostBusAdapter\"] = reflect.TypeOf((*HostHostBusAdapter)(nil)).Elem()\n}\n\nfunc (b *HostIpRouteConfig) GetHostIpRouteConfig() *HostIpRouteConfig { return b }\n\ntype BaseHostIpRouteConfig interface {\n\tGetHostIpRouteConfig() *HostIpRouteConfig\n}\n\nfunc init() {\n\tt[\"BaseHostIpRouteConfig\"] = reflect.TypeOf((*HostIpRouteConfig)(nil)).Elem()\n}\n\nfunc (b *HostMemberHealthCheckResult) GetHostMemberHealthCheckResult() *HostMemberHealthCheckResult {\n\treturn b\n}\n\ntype BaseHostMemberHealthCheckResult interface {\n\tGetHostMemberHealthCheckResult() *HostMemberHealthCheckResult\n}\n\nfunc init() {\n\tt[\"BaseHostMemberHealthCheckResult\"] = reflect.TypeOf((*HostMemberHealthCheckResult)(nil)).Elem()\n}\n\nfunc (b *HostMemberUplinkHealthCheckResult) GetHostMemberUplinkHealthCheckResult() *HostMemberUplinkHealthCheckResult {\n\treturn b\n}\n\ntype BaseHostMemberUplinkHealthCheckResult interface {\n\tGetHostMemberUplinkHealthCheckResult() *HostMemberUplinkHealthCheckResult\n}\n\nfunc init() {\n\tt[\"BaseHostMemberUplinkHealthCheckResult\"] = reflect.TypeOf((*HostMemberUplinkHealthCheckResult)(nil)).Elem()\n}\n\nfunc (b *HostMultipathInfoLogicalUnitPolicy) GetHostMultipathInfoLogicalUnitPolicy() *HostMultipathInfoLogicalUnitPolicy {\n\treturn b\n}\n\ntype BaseHostMultipathInfoLogicalUnitPolicy interface {\n\tGetHostMultipathInfoLogicalUnitPolicy() *HostMultipathInfoLogicalUnitPolicy\n}\n\nfunc init() {\n\tt[\"BaseHostMultipathInfoLogicalUnitPolicy\"] = reflect.TypeOf((*HostMultipathInfoLogicalUnitPolicy)(nil)).Elem()\n}\n\nfunc (b *HostPciPassthruConfig) GetHostPciPassthruConfig() *HostPciPassthruConfig { return b }\n\ntype BaseHostPciPassthruConfig interface {\n\tGetHostPciPassthruConfig() *HostPciPassthruConfig\n}\n\nfunc init() {\n\tt[\"BaseHostPciPassthruConfig\"] = reflect.TypeOf((*HostPciPassthruConfig)(nil)).Elem()\n}\n\nfunc (b *HostPciPassthruInfo) GetHostPciPassthruInfo() *HostPciPassthruInfo { return b }\n\ntype BaseHostPciPassthruInfo interface {\n\tGetHostPciPassthruInfo() *HostPciPassthruInfo\n}\n\nfunc init() {\n\tt[\"BaseHostPciPassthruInfo\"] = reflect.TypeOf((*HostPciPassthruInfo)(nil)).Elem()\n}\n\nfunc (b *HostPowerOpFailed) GetHostPowerOpFailed() *HostPowerOpFailed { return b }\n\ntype BaseHostPowerOpFailed interface {\n\tGetHostPowerOpFailed() *HostPowerOpFailed\n}\n\nfunc init() {\n\tt[\"BaseHostPowerOpFailed\"] = reflect.TypeOf((*HostPowerOpFailed)(nil)).Elem()\n}\n\nfunc (b *HostProfileConfigSpec) GetHostProfileConfigSpec() *HostProfileConfigSpec { return b }\n\ntype BaseHostProfileConfigSpec interface {\n\tGetHostProfileConfigSpec() *HostProfileConfigSpec\n}\n\nfunc init() {\n\tt[\"BaseHostProfileConfigSpec\"] = reflect.TypeOf((*HostProfileConfigSpec)(nil)).Elem()\n}\n\nfunc (b *HostProfilesEntityCustomizations) GetHostProfilesEntityCustomizations() *HostProfilesEntityCustomizations {\n\treturn b\n}\n\ntype BaseHostProfilesEntityCustomizations interface {\n\tGetHostProfilesEntityCustomizations() *HostProfilesEntityCustomizations\n}\n\nfunc init() {\n\tt[\"BaseHostProfilesEntityCustomizations\"] = reflect.TypeOf((*HostProfilesEntityCustomizations)(nil)).Elem()\n}\n\nfunc (b *HostSriovDevicePoolInfo) GetHostSriovDevicePoolInfo() *HostSriovDevicePoolInfo { return b }\n\ntype BaseHostSriovDevicePoolInfo interface {\n\tGetHostSriovDevicePoolInfo() *HostSriovDevicePoolInfo\n}\n\nfunc init() {\n\tt[\"BaseHostSriovDevicePoolInfo\"] = reflect.TypeOf((*HostSriovDevicePoolInfo)(nil)).Elem()\n}\n\nfunc (b *HostSystemSwapConfigurationSystemSwapOption) GetHostSystemSwapConfigurationSystemSwapOption() *HostSystemSwapConfigurationSystemSwapOption {\n\treturn b\n}\n\ntype BaseHostSystemSwapConfigurationSystemSwapOption interface {\n\tGetHostSystemSwapConfigurationSystemSwapOption() *HostSystemSwapConfigurationSystemSwapOption\n}\n\nfunc init() {\n\tt[\"BaseHostSystemSwapConfigurationSystemSwapOption\"] = reflect.TypeOf((*HostSystemSwapConfigurationSystemSwapOption)(nil)).Elem()\n}\n\nfunc (b *HostTargetTransport) GetHostTargetTransport() *HostTargetTransport { return b }\n\ntype BaseHostTargetTransport interface {\n\tGetHostTargetTransport() *HostTargetTransport\n}\n\nfunc init() {\n\tt[\"BaseHostTargetTransport\"] = reflect.TypeOf((*HostTargetTransport)(nil)).Elem()\n}\n\nfunc (b *HostTpmEventDetails) GetHostTpmEventDetails() *HostTpmEventDetails { return b }\n\ntype BaseHostTpmEventDetails interface {\n\tGetHostTpmEventDetails() *HostTpmEventDetails\n}\n\nfunc init() {\n\tt[\"BaseHostTpmEventDetails\"] = reflect.TypeOf((*HostTpmEventDetails)(nil)).Elem()\n}\n\nfunc (b *HostVirtualSwitchBridge) GetHostVirtualSwitchBridge() *HostVirtualSwitchBridge { return b }\n\ntype BaseHostVirtualSwitchBridge interface {\n\tGetHostVirtualSwitchBridge() *HostVirtualSwitchBridge\n}\n\nfunc init() {\n\tt[\"BaseHostVirtualSwitchBridge\"] = reflect.TypeOf((*HostVirtualSwitchBridge)(nil)).Elem()\n}\n\nfunc (b *HourlyTaskScheduler) GetHourlyTaskScheduler() *HourlyTaskScheduler { return b }\n\ntype BaseHourlyTaskScheduler interface {\n\tGetHourlyTaskScheduler() *HourlyTaskScheduler\n}\n\nfunc init() {\n\tt[\"BaseHourlyTaskScheduler\"] = reflect.TypeOf((*HourlyTaskScheduler)(nil)).Elem()\n}\n\nfunc (b *ImportSpec) GetImportSpec() *ImportSpec { return b }\n\ntype BaseImportSpec interface {\n\tGetImportSpec() *ImportSpec\n}\n\nfunc init() {\n\tt[\"BaseImportSpec\"] = reflect.TypeOf((*ImportSpec)(nil)).Elem()\n}\n\nfunc (b *InaccessibleDatastore) GetInaccessibleDatastore() *InaccessibleDatastore { return b }\n\ntype BaseInaccessibleDatastore interface {\n\tGetInaccessibleDatastore() *InaccessibleDatastore\n}\n\nfunc init() {\n\tt[\"BaseInaccessibleDatastore\"] = reflect.TypeOf((*InaccessibleDatastore)(nil)).Elem()\n}\n\nfunc (b *InheritablePolicy) GetInheritablePolicy() *InheritablePolicy { return b }\n\ntype BaseInheritablePolicy interface {\n\tGetInheritablePolicy() *InheritablePolicy\n}\n\nfunc init() {\n\tt[\"BaseInheritablePolicy\"] = reflect.TypeOf((*InheritablePolicy)(nil)).Elem()\n}\n\nfunc (b *InsufficientHostCapacityFault) GetInsufficientHostCapacityFault() *InsufficientHostCapacityFault {\n\treturn b\n}\n\ntype BaseInsufficientHostCapacityFault interface {\n\tGetInsufficientHostCapacityFault() *InsufficientHostCapacityFault\n}\n\nfunc init() {\n\tt[\"BaseInsufficientHostCapacityFault\"] = reflect.TypeOf((*InsufficientHostCapacityFault)(nil)).Elem()\n}\n\nfunc (b *InsufficientResourcesFault) GetInsufficientResourcesFault() *InsufficientResourcesFault {\n\treturn b\n}\n\ntype BaseInsufficientResourcesFault interface {\n\tGetInsufficientResourcesFault() *InsufficientResourcesFault\n}\n\nfunc init() {\n\tt[\"BaseInsufficientResourcesFault\"] = reflect.TypeOf((*InsufficientResourcesFault)(nil)).Elem()\n}\n\nfunc (b *InsufficientStandbyResource) GetInsufficientStandbyResource() *InsufficientStandbyResource {\n\treturn b\n}\n\ntype BaseInsufficientStandbyResource interface {\n\tGetInsufficientStandbyResource() *InsufficientStandbyResource\n}\n\nfunc init() {\n\tt[\"BaseInsufficientStandbyResource\"] = reflect.TypeOf((*InsufficientStandbyResource)(nil)).Elem()\n}\n\nfunc (b *InvalidArgument) GetInvalidArgument() *InvalidArgument { return b }\n\ntype BaseInvalidArgument interface {\n\tGetInvalidArgument() *InvalidArgument\n}\n\nfunc init() {\n\tt[\"BaseInvalidArgument\"] = reflect.TypeOf((*InvalidArgument)(nil)).Elem()\n}\n\nfunc (b *InvalidCAMServer) GetInvalidCAMServer() *InvalidCAMServer { return b }\n\ntype BaseInvalidCAMServer interface {\n\tGetInvalidCAMServer() *InvalidCAMServer\n}\n\nfunc init() {\n\tt[\"BaseInvalidCAMServer\"] = reflect.TypeOf((*InvalidCAMServer)(nil)).Elem()\n}\n\nfunc (b *InvalidDatastore) GetInvalidDatastore() *InvalidDatastore { return b }\n\ntype BaseInvalidDatastore interface {\n\tGetInvalidDatastore() *InvalidDatastore\n}\n\nfunc init() {\n\tt[\"BaseInvalidDatastore\"] = reflect.TypeOf((*InvalidDatastore)(nil)).Elem()\n}\n\nfunc (b *InvalidDeviceSpec) GetInvalidDeviceSpec() *InvalidDeviceSpec { return b }\n\ntype BaseInvalidDeviceSpec interface {\n\tGetInvalidDeviceSpec() *InvalidDeviceSpec\n}\n\nfunc init() {\n\tt[\"BaseInvalidDeviceSpec\"] = reflect.TypeOf((*InvalidDeviceSpec)(nil)).Elem()\n}\n\nfunc (b *InvalidFolder) GetInvalidFolder() *InvalidFolder { return b }\n\ntype BaseInvalidFolder interface {\n\tGetInvalidFolder() *InvalidFolder\n}\n\nfunc init() {\n\tt[\"BaseInvalidFolder\"] = reflect.TypeOf((*InvalidFolder)(nil)).Elem()\n}\n\nfunc (b *InvalidFormat) GetInvalidFormat() *InvalidFormat { return b }\n\ntype BaseInvalidFormat interface {\n\tGetInvalidFormat() *InvalidFormat\n}\n\nfunc init() {\n\tt[\"BaseInvalidFormat\"] = reflect.TypeOf((*InvalidFormat)(nil)).Elem()\n}\n\nfunc (b *InvalidHostState) GetInvalidHostState() *InvalidHostState { return b }\n\ntype BaseInvalidHostState interface {\n\tGetInvalidHostState() *InvalidHostState\n}\n\nfunc init() {\n\tt[\"BaseInvalidHostState\"] = reflect.TypeOf((*InvalidHostState)(nil)).Elem()\n}\n\nfunc (b *InvalidLogin) GetInvalidLogin() *InvalidLogin { return b }\n\ntype BaseInvalidLogin interface {\n\tGetInvalidLogin() *InvalidLogin\n}\n\nfunc init() {\n\tt[\"BaseInvalidLogin\"] = reflect.TypeOf((*InvalidLogin)(nil)).Elem()\n}\n\nfunc (b *InvalidPropertyValue) GetInvalidPropertyValue() *InvalidPropertyValue { return b }\n\ntype BaseInvalidPropertyValue interface {\n\tGetInvalidPropertyValue() *InvalidPropertyValue\n}\n\nfunc init() {\n\tt[\"BaseInvalidPropertyValue\"] = reflect.TypeOf((*InvalidPropertyValue)(nil)).Elem()\n}\n\nfunc (b *InvalidRequest) GetInvalidRequest() *InvalidRequest { return b }\n\ntype BaseInvalidRequest interface {\n\tGetInvalidRequest() *InvalidRequest\n}\n\nfunc init() {\n\tt[\"BaseInvalidRequest\"] = reflect.TypeOf((*InvalidRequest)(nil)).Elem()\n}\n\nfunc (b *InvalidState) GetInvalidState() *InvalidState { return b }\n\ntype BaseInvalidState interface {\n\tGetInvalidState() *InvalidState\n}\n\nfunc init() {\n\tt[\"BaseInvalidState\"] = reflect.TypeOf((*InvalidState)(nil)).Elem()\n}\n\nfunc (b *InvalidVmConfig) GetInvalidVmConfig() *InvalidVmConfig { return b }\n\ntype BaseInvalidVmConfig interface {\n\tGetInvalidVmConfig() *InvalidVmConfig\n}\n\nfunc init() {\n\tt[\"BaseInvalidVmConfig\"] = reflect.TypeOf((*InvalidVmConfig)(nil)).Elem()\n}\n\nfunc (b *IoFilterInfo) GetIoFilterInfo() *IoFilterInfo { return b }\n\ntype BaseIoFilterInfo interface {\n\tGetIoFilterInfo() *IoFilterInfo\n}\n\nfunc init() {\n\tt[\"BaseIoFilterInfo\"] = reflect.TypeOf((*IoFilterInfo)(nil)).Elem()\n}\n\nfunc (b *IpAddress) GetIpAddress() *IpAddress { return b }\n\ntype BaseIpAddress interface {\n\tGetIpAddress() *IpAddress\n}\n\nfunc init() {\n\tt[\"BaseIpAddress\"] = reflect.TypeOf((*IpAddress)(nil)).Elem()\n}\n\nfunc (b *IscsiFault) GetIscsiFault() *IscsiFault { return b }\n\ntype BaseIscsiFault interface {\n\tGetIscsiFault() *IscsiFault\n}\n\nfunc init() {\n\tt[\"BaseIscsiFault\"] = reflect.TypeOf((*IscsiFault)(nil)).Elem()\n}\n\nfunc (b *LicenseEvent) GetLicenseEvent() *LicenseEvent { return b }\n\ntype BaseLicenseEvent interface {\n\tGetLicenseEvent() *LicenseEvent\n}\n\nfunc init() {\n\tt[\"BaseLicenseEvent\"] = reflect.TypeOf((*LicenseEvent)(nil)).Elem()\n}\n\nfunc (b *LicenseSource) GetLicenseSource() *LicenseSource { return b }\n\ntype BaseLicenseSource interface {\n\tGetLicenseSource() *LicenseSource\n}\n\nfunc init() {\n\tt[\"BaseLicenseSource\"] = reflect.TypeOf((*LicenseSource)(nil)).Elem()\n}\n\nfunc (b *MacAddress) GetMacAddress() *MacAddress { return b }\n\ntype BaseMacAddress interface {\n\tGetMacAddress() *MacAddress\n}\n\nfunc init() {\n\tt[\"BaseMacAddress\"] = reflect.TypeOf((*MacAddress)(nil)).Elem()\n}\n\nfunc (b *MethodFault) GetMethodFault() *MethodFault { return b }\n\ntype BaseMethodFault interface {\n\tGetMethodFault() *MethodFault\n}\n\nfunc init() {\n\tt[\"BaseMethodFault\"] = reflect.TypeOf((*MethodFault)(nil)).Elem()\n}\n\nfunc (b *MigrationEvent) GetMigrationEvent() *MigrationEvent { return b }\n\ntype BaseMigrationEvent interface {\n\tGetMigrationEvent() *MigrationEvent\n}\n\nfunc init() {\n\tt[\"BaseMigrationEvent\"] = reflect.TypeOf((*MigrationEvent)(nil)).Elem()\n}\n\nfunc (b *MigrationFault) GetMigrationFault() *MigrationFault { return b }\n\ntype BaseMigrationFault interface {\n\tGetMigrationFault() *MigrationFault\n}\n\nfunc init() {\n\tt[\"BaseMigrationFault\"] = reflect.TypeOf((*MigrationFault)(nil)).Elem()\n}\n\nfunc (b *MigrationFeatureNotSupported) GetMigrationFeatureNotSupported() *MigrationFeatureNotSupported {\n\treturn b\n}\n\ntype BaseMigrationFeatureNotSupported interface {\n\tGetMigrationFeatureNotSupported() *MigrationFeatureNotSupported\n}\n\nfunc init() {\n\tt[\"BaseMigrationFeatureNotSupported\"] = reflect.TypeOf((*MigrationFeatureNotSupported)(nil)).Elem()\n}\n\nfunc (b *MonthlyTaskScheduler) GetMonthlyTaskScheduler() *MonthlyTaskScheduler { return b }\n\ntype BaseMonthlyTaskScheduler interface {\n\tGetMonthlyTaskScheduler() *MonthlyTaskScheduler\n}\n\nfunc init() {\n\tt[\"BaseMonthlyTaskScheduler\"] = reflect.TypeOf((*MonthlyTaskScheduler)(nil)).Elem()\n}\n\nfunc (b *NasConfigFault) GetNasConfigFault() *NasConfigFault { return b }\n\ntype BaseNasConfigFault interface {\n\tGetNasConfigFault() *NasConfigFault\n}\n\nfunc init() {\n\tt[\"BaseNasConfigFault\"] = reflect.TypeOf((*NasConfigFault)(nil)).Elem()\n}\n\nfunc (b *NegatableExpression) GetNegatableExpression() *NegatableExpression { return b }\n\ntype BaseNegatableExpression interface {\n\tGetNegatableExpression() *NegatableExpression\n}\n\nfunc init() {\n\tt[\"BaseNegatableExpression\"] = reflect.TypeOf((*NegatableExpression)(nil)).Elem()\n}\n\nfunc (b *NetBIOSConfigInfo) GetNetBIOSConfigInfo() *NetBIOSConfigInfo { return b }\n\ntype BaseNetBIOSConfigInfo interface {\n\tGetNetBIOSConfigInfo() *NetBIOSConfigInfo\n}\n\nfunc init() {\n\tt[\"BaseNetBIOSConfigInfo\"] = reflect.TypeOf((*NetBIOSConfigInfo)(nil)).Elem()\n}\n\nfunc (b *NetworkSummary) GetNetworkSummary() *NetworkSummary { return b }\n\ntype BaseNetworkSummary interface {\n\tGetNetworkSummary() *NetworkSummary\n}\n\nfunc init() {\n\tt[\"BaseNetworkSummary\"] = reflect.TypeOf((*NetworkSummary)(nil)).Elem()\n}\n\nfunc (b *NoCompatibleHost) GetNoCompatibleHost() *NoCompatibleHost { return b }\n\ntype BaseNoCompatibleHost interface {\n\tGetNoCompatibleHost() *NoCompatibleHost\n}\n\nfunc init() {\n\tt[\"BaseNoCompatibleHost\"] = reflect.TypeOf((*NoCompatibleHost)(nil)).Elem()\n}\n\nfunc (b *NoPermission) GetNoPermission() *NoPermission { return b }\n\ntype BaseNoPermission interface {\n\tGetNoPermission() *NoPermission\n}\n\nfunc init() {\n\tt[\"BaseNoPermission\"] = reflect.TypeOf((*NoPermission)(nil)).Elem()\n}\n\nfunc (b *NodeDeploymentSpec) GetNodeDeploymentSpec() *NodeDeploymentSpec { return b }\n\ntype BaseNodeDeploymentSpec interface {\n\tGetNodeDeploymentSpec() *NodeDeploymentSpec\n}\n\nfunc init() {\n\tt[\"BaseNodeDeploymentSpec\"] = reflect.TypeOf((*NodeDeploymentSpec)(nil)).Elem()\n}\n\nfunc (b *NodeNetworkSpec) GetNodeNetworkSpec() *NodeNetworkSpec { return b }\n\ntype BaseNodeNetworkSpec interface {\n\tGetNodeNetworkSpec() *NodeNetworkSpec\n}\n\nfunc init() {\n\tt[\"BaseNodeNetworkSpec\"] = reflect.TypeOf((*NodeNetworkSpec)(nil)).Elem()\n}\n\nfunc (b *NotEnoughCpus) GetNotEnoughCpus() *NotEnoughCpus { return b }\n\ntype BaseNotEnoughCpus interface {\n\tGetNotEnoughCpus() *NotEnoughCpus\n}\n\nfunc init() {\n\tt[\"BaseNotEnoughCpus\"] = reflect.TypeOf((*NotEnoughCpus)(nil)).Elem()\n}\n\nfunc (b *NotEnoughLicenses) GetNotEnoughLicenses() *NotEnoughLicenses { return b }\n\ntype BaseNotEnoughLicenses interface {\n\tGetNotEnoughLicenses() *NotEnoughLicenses\n}\n\nfunc init() {\n\tt[\"BaseNotEnoughLicenses\"] = reflect.TypeOf((*NotEnoughLicenses)(nil)).Elem()\n}\n\nfunc (b *NotSupported) GetNotSupported() *NotSupported { return b }\n\ntype BaseNotSupported interface {\n\tGetNotSupported() *NotSupported\n}\n\nfunc init() {\n\tt[\"BaseNotSupported\"] = reflect.TypeOf((*NotSupported)(nil)).Elem()\n}\n\nfunc (b *NotSupportedHost) GetNotSupportedHost() *NotSupportedHost { return b }\n\ntype BaseNotSupportedHost interface {\n\tGetNotSupportedHost() *NotSupportedHost\n}\n\nfunc init() {\n\tt[\"BaseNotSupportedHost\"] = reflect.TypeOf((*NotSupportedHost)(nil)).Elem()\n}\n\nfunc (b *NotSupportedHostInCluster) GetNotSupportedHostInCluster() *NotSupportedHostInCluster {\n\treturn b\n}\n\ntype BaseNotSupportedHostInCluster interface {\n\tGetNotSupportedHostInCluster() *NotSupportedHostInCluster\n}\n\nfunc init() {\n\tt[\"BaseNotSupportedHostInCluster\"] = reflect.TypeOf((*NotSupportedHostInCluster)(nil)).Elem()\n}\n\nfunc (b *OptionType) GetOptionType() *OptionType { return b }\n\ntype BaseOptionType interface {\n\tGetOptionType() *OptionType\n}\n\nfunc init() {\n\tt[\"BaseOptionType\"] = reflect.TypeOf((*OptionType)(nil)).Elem()\n}\n\nfunc (b *OptionValue) GetOptionValue() *OptionValue { return b }\n\ntype BaseOptionValue interface {\n\tGetOptionValue() *OptionValue\n}\n\nfunc init() {\n\tt[\"BaseOptionValue\"] = reflect.TypeOf((*OptionValue)(nil)).Elem()\n}\n\nfunc (b *OvfAttribute) GetOvfAttribute() *OvfAttribute { return b }\n\ntype BaseOvfAttribute interface {\n\tGetOvfAttribute() *OvfAttribute\n}\n\nfunc init() {\n\tt[\"BaseOvfAttribute\"] = reflect.TypeOf((*OvfAttribute)(nil)).Elem()\n}\n\nfunc (b *OvfConnectedDevice) GetOvfConnectedDevice() *OvfConnectedDevice { return b }\n\ntype BaseOvfConnectedDevice interface {\n\tGetOvfConnectedDevice() *OvfConnectedDevice\n}\n\nfunc init() {\n\tt[\"BaseOvfConnectedDevice\"] = reflect.TypeOf((*OvfConnectedDevice)(nil)).Elem()\n}\n\nfunc (b *OvfConstraint) GetOvfConstraint() *OvfConstraint { return b }\n\ntype BaseOvfConstraint interface {\n\tGetOvfConstraint() *OvfConstraint\n}\n\nfunc init() {\n\tt[\"BaseOvfConstraint\"] = reflect.TypeOf((*OvfConstraint)(nil)).Elem()\n}\n\nfunc (b *OvfConsumerCallbackFault) GetOvfConsumerCallbackFault() *OvfConsumerCallbackFault { return b }\n\ntype BaseOvfConsumerCallbackFault interface {\n\tGetOvfConsumerCallbackFault() *OvfConsumerCallbackFault\n}\n\nfunc init() {\n\tt[\"BaseOvfConsumerCallbackFault\"] = reflect.TypeOf((*OvfConsumerCallbackFault)(nil)).Elem()\n}\n\nfunc (b *OvfElement) GetOvfElement() *OvfElement { return b }\n\ntype BaseOvfElement interface {\n\tGetOvfElement() *OvfElement\n}\n\nfunc init() {\n\tt[\"BaseOvfElement\"] = reflect.TypeOf((*OvfElement)(nil)).Elem()\n}\n\nfunc (b *OvfExport) GetOvfExport() *OvfExport { return b }\n\ntype BaseOvfExport interface {\n\tGetOvfExport() *OvfExport\n}\n\nfunc init() {\n\tt[\"BaseOvfExport\"] = reflect.TypeOf((*OvfExport)(nil)).Elem()\n}\n\nfunc (b *OvfFault) GetOvfFault() *OvfFault { return b }\n\ntype BaseOvfFault interface {\n\tGetOvfFault() *OvfFault\n}\n\nfunc init() {\n\tt[\"BaseOvfFault\"] = reflect.TypeOf((*OvfFault)(nil)).Elem()\n}\n\nfunc (b *OvfHardwareExport) GetOvfHardwareExport() *OvfHardwareExport { return b }\n\ntype BaseOvfHardwareExport interface {\n\tGetOvfHardwareExport() *OvfHardwareExport\n}\n\nfunc init() {\n\tt[\"BaseOvfHardwareExport\"] = reflect.TypeOf((*OvfHardwareExport)(nil)).Elem()\n}\n\nfunc (b *OvfImport) GetOvfImport() *OvfImport { return b }\n\ntype BaseOvfImport interface {\n\tGetOvfImport() *OvfImport\n}\n\nfunc init() {\n\tt[\"BaseOvfImport\"] = reflect.TypeOf((*OvfImport)(nil)).Elem()\n}\n\nfunc (b *OvfInvalidPackage) GetOvfInvalidPackage() *OvfInvalidPackage { return b }\n\ntype BaseOvfInvalidPackage interface {\n\tGetOvfInvalidPackage() *OvfInvalidPackage\n}\n\nfunc init() {\n\tt[\"BaseOvfInvalidPackage\"] = reflect.TypeOf((*OvfInvalidPackage)(nil)).Elem()\n}\n\nfunc (b *OvfInvalidValue) GetOvfInvalidValue() *OvfInvalidValue { return b }\n\ntype BaseOvfInvalidValue interface {\n\tGetOvfInvalidValue() *OvfInvalidValue\n}\n\nfunc init() {\n\tt[\"BaseOvfInvalidValue\"] = reflect.TypeOf((*OvfInvalidValue)(nil)).Elem()\n}\n\nfunc (b *OvfManagerCommonParams) GetOvfManagerCommonParams() *OvfManagerCommonParams { return b }\n\ntype BaseOvfManagerCommonParams interface {\n\tGetOvfManagerCommonParams() *OvfManagerCommonParams\n}\n\nfunc init() {\n\tt[\"BaseOvfManagerCommonParams\"] = reflect.TypeOf((*OvfManagerCommonParams)(nil)).Elem()\n}\n\nfunc (b *OvfMissingElement) GetOvfMissingElement() *OvfMissingElement { return b }\n\ntype BaseOvfMissingElement interface {\n\tGetOvfMissingElement() *OvfMissingElement\n}\n\nfunc init() {\n\tt[\"BaseOvfMissingElement\"] = reflect.TypeOf((*OvfMissingElement)(nil)).Elem()\n}\n\nfunc (b *OvfProperty) GetOvfProperty() *OvfProperty { return b }\n\ntype BaseOvfProperty interface {\n\tGetOvfProperty() *OvfProperty\n}\n\nfunc init() {\n\tt[\"BaseOvfProperty\"] = reflect.TypeOf((*OvfProperty)(nil)).Elem()\n}\n\nfunc (b *OvfSystemFault) GetOvfSystemFault() *OvfSystemFault { return b }\n\ntype BaseOvfSystemFault interface {\n\tGetOvfSystemFault() *OvfSystemFault\n}\n\nfunc init() {\n\tt[\"BaseOvfSystemFault\"] = reflect.TypeOf((*OvfSystemFault)(nil)).Elem()\n}\n\nfunc (b *OvfUnsupportedAttribute) GetOvfUnsupportedAttribute() *OvfUnsupportedAttribute { return b }\n\ntype BaseOvfUnsupportedAttribute interface {\n\tGetOvfUnsupportedAttribute() *OvfUnsupportedAttribute\n}\n\nfunc init() {\n\tt[\"BaseOvfUnsupportedAttribute\"] = reflect.TypeOf((*OvfUnsupportedAttribute)(nil)).Elem()\n}\n\nfunc (b *OvfUnsupportedElement) GetOvfUnsupportedElement() *OvfUnsupportedElement { return b }\n\ntype BaseOvfUnsupportedElement interface {\n\tGetOvfUnsupportedElement() *OvfUnsupportedElement\n}\n\nfunc init() {\n\tt[\"BaseOvfUnsupportedElement\"] = reflect.TypeOf((*OvfUnsupportedElement)(nil)).Elem()\n}\n\nfunc (b *OvfUnsupportedPackage) GetOvfUnsupportedPackage() *OvfUnsupportedPackage { return b }\n\ntype BaseOvfUnsupportedPackage interface {\n\tGetOvfUnsupportedPackage() *OvfUnsupportedPackage\n}\n\nfunc init() {\n\tt[\"BaseOvfUnsupportedPackage\"] = reflect.TypeOf((*OvfUnsupportedPackage)(nil)).Elem()\n}\n\nfunc (b *PatchMetadataInvalid) GetPatchMetadataInvalid() *PatchMetadataInvalid { return b }\n\ntype BasePatchMetadataInvalid interface {\n\tGetPatchMetadataInvalid() *PatchMetadataInvalid\n}\n\nfunc init() {\n\tt[\"BasePatchMetadataInvalid\"] = reflect.TypeOf((*PatchMetadataInvalid)(nil)).Elem()\n}\n\nfunc (b *PatchNotApplicable) GetPatchNotApplicable() *PatchNotApplicable { return b }\n\ntype BasePatchNotApplicable interface {\n\tGetPatchNotApplicable() *PatchNotApplicable\n}\n\nfunc init() {\n\tt[\"BasePatchNotApplicable\"] = reflect.TypeOf((*PatchNotApplicable)(nil)).Elem()\n}\n\nfunc (b *PerfEntityMetricBase) GetPerfEntityMetricBase() *PerfEntityMetricBase { return b }\n\ntype BasePerfEntityMetricBase interface {\n\tGetPerfEntityMetricBase() *PerfEntityMetricBase\n}\n\nfunc init() {\n\tt[\"BasePerfEntityMetricBase\"] = reflect.TypeOf((*PerfEntityMetricBase)(nil)).Elem()\n}\n\nfunc (b *PerfMetricSeries) GetPerfMetricSeries() *PerfMetricSeries { return b }\n\ntype BasePerfMetricSeries interface {\n\tGetPerfMetricSeries() *PerfMetricSeries\n}\n\nfunc init() {\n\tt[\"BasePerfMetricSeries\"] = reflect.TypeOf((*PerfMetricSeries)(nil)).Elem()\n}\n\nfunc (b *PermissionEvent) GetPermissionEvent() *PermissionEvent { return b }\n\ntype BasePermissionEvent interface {\n\tGetPermissionEvent() *PermissionEvent\n}\n\nfunc init() {\n\tt[\"BasePermissionEvent\"] = reflect.TypeOf((*PermissionEvent)(nil)).Elem()\n}\n\nfunc (b *PhysicalNicHint) GetPhysicalNicHint() *PhysicalNicHint { return b }\n\ntype BasePhysicalNicHint interface {\n\tGetPhysicalNicHint() *PhysicalNicHint\n}\n\nfunc init() {\n\tt[\"BasePhysicalNicHint\"] = reflect.TypeOf((*PhysicalNicHint)(nil)).Elem()\n}\n\nfunc (b *PlatformConfigFault) GetPlatformConfigFault() *PlatformConfigFault { return b }\n\ntype BasePlatformConfigFault interface {\n\tGetPlatformConfigFault() *PlatformConfigFault\n}\n\nfunc init() {\n\tt[\"BasePlatformConfigFault\"] = reflect.TypeOf((*PlatformConfigFault)(nil)).Elem()\n}\n\nfunc (b *PolicyOption) GetPolicyOption() *PolicyOption { return b }\n\ntype BasePolicyOption interface {\n\tGetPolicyOption() *PolicyOption\n}\n\nfunc init() {\n\tt[\"BasePolicyOption\"] = reflect.TypeOf((*PolicyOption)(nil)).Elem()\n}\n\nfunc (b *PortGroupProfile) GetPortGroupProfile() *PortGroupProfile { return b }\n\ntype BasePortGroupProfile interface {\n\tGetPortGroupProfile() *PortGroupProfile\n}\n\nfunc init() {\n\tt[\"BasePortGroupProfile\"] = reflect.TypeOf((*PortGroupProfile)(nil)).Elem()\n}\n\nfunc (b *ProfileConfigInfo) GetProfileConfigInfo() *ProfileConfigInfo { return b }\n\ntype BaseProfileConfigInfo interface {\n\tGetProfileConfigInfo() *ProfileConfigInfo\n}\n\nfunc init() {\n\tt[\"BaseProfileConfigInfo\"] = reflect.TypeOf((*ProfileConfigInfo)(nil)).Elem()\n}\n\nfunc (b *ProfileCreateSpec) GetProfileCreateSpec() *ProfileCreateSpec { return b }\n\ntype BaseProfileCreateSpec interface {\n\tGetProfileCreateSpec() *ProfileCreateSpec\n}\n\nfunc init() {\n\tt[\"BaseProfileCreateSpec\"] = reflect.TypeOf((*ProfileCreateSpec)(nil)).Elem()\n}\n\nfunc (b *ProfileEvent) GetProfileEvent() *ProfileEvent { return b }\n\ntype BaseProfileEvent interface {\n\tGetProfileEvent() *ProfileEvent\n}\n\nfunc init() {\n\tt[\"BaseProfileEvent\"] = reflect.TypeOf((*ProfileEvent)(nil)).Elem()\n}\n\nfunc (b *ProfileExecuteResult) GetProfileExecuteResult() *ProfileExecuteResult { return b }\n\ntype BaseProfileExecuteResult interface {\n\tGetProfileExecuteResult() *ProfileExecuteResult\n}\n\nfunc init() {\n\tt[\"BaseProfileExecuteResult\"] = reflect.TypeOf((*ProfileExecuteResult)(nil)).Elem()\n}\n\nfunc (b *ProfileExpression) GetProfileExpression() *ProfileExpression { return b }\n\ntype BaseProfileExpression interface {\n\tGetProfileExpression() *ProfileExpression\n}\n\nfunc init() {\n\tt[\"BaseProfileExpression\"] = reflect.TypeOf((*ProfileExpression)(nil)).Elem()\n}\n\nfunc (b *ProfilePolicyOptionMetadata) GetProfilePolicyOptionMetadata() *ProfilePolicyOptionMetadata {\n\treturn b\n}\n\ntype BaseProfilePolicyOptionMetadata interface {\n\tGetProfilePolicyOptionMetadata() *ProfilePolicyOptionMetadata\n}\n\nfunc init() {\n\tt[\"BaseProfilePolicyOptionMetadata\"] = reflect.TypeOf((*ProfilePolicyOptionMetadata)(nil)).Elem()\n}\n\nfunc (b *ProfileSerializedCreateSpec) GetProfileSerializedCreateSpec() *ProfileSerializedCreateSpec {\n\treturn b\n}\n\ntype BaseProfileSerializedCreateSpec interface {\n\tGetProfileSerializedCreateSpec() *ProfileSerializedCreateSpec\n}\n\nfunc init() {\n\tt[\"BaseProfileSerializedCreateSpec\"] = reflect.TypeOf((*ProfileSerializedCreateSpec)(nil)).Elem()\n}\n\nfunc (b *RDMNotSupported) GetRDMNotSupported() *RDMNotSupported { return b }\n\ntype BaseRDMNotSupported interface {\n\tGetRDMNotSupported() *RDMNotSupported\n}\n\nfunc init() {\n\tt[\"BaseRDMNotSupported\"] = reflect.TypeOf((*RDMNotSupported)(nil)).Elem()\n}\n\nfunc (b *RecurrentTaskScheduler) GetRecurrentTaskScheduler() *RecurrentTaskScheduler { return b }\n\ntype BaseRecurrentTaskScheduler interface {\n\tGetRecurrentTaskScheduler() *RecurrentTaskScheduler\n}\n\nfunc init() {\n\tt[\"BaseRecurrentTaskScheduler\"] = reflect.TypeOf((*RecurrentTaskScheduler)(nil)).Elem()\n}\n\nfunc (b *ReplicationConfigFault) GetReplicationConfigFault() *ReplicationConfigFault { return b }\n\ntype BaseReplicationConfigFault interface {\n\tGetReplicationConfigFault() *ReplicationConfigFault\n}\n\nfunc init() {\n\tt[\"BaseReplicationConfigFault\"] = reflect.TypeOf((*ReplicationConfigFault)(nil)).Elem()\n}\n\nfunc (b *ReplicationFault) GetReplicationFault() *ReplicationFault { return b }\n\ntype BaseReplicationFault interface {\n\tGetReplicationFault() *ReplicationFault\n}\n\nfunc init() {\n\tt[\"BaseReplicationFault\"] = reflect.TypeOf((*ReplicationFault)(nil)).Elem()\n}\n\nfunc (b *ReplicationVmFault) GetReplicationVmFault() *ReplicationVmFault { return b }\n\ntype BaseReplicationVmFault interface {\n\tGetReplicationVmFault() *ReplicationVmFault\n}\n\nfunc init() {\n\tt[\"BaseReplicationVmFault\"] = reflect.TypeOf((*ReplicationVmFault)(nil)).Elem()\n}\n\nfunc (b *ResourceAllocationInfo) GetResourceAllocationInfo() *ResourceAllocationInfo { return b }\n\ntype BaseResourceAllocationInfo interface {\n\tGetResourceAllocationInfo() *ResourceAllocationInfo\n}\n\nfunc init() {\n\tt[\"BaseResourceAllocationInfo\"] = reflect.TypeOf((*ResourceAllocationInfo)(nil)).Elem()\n}\n\nfunc (b *ResourceInUse) GetResourceInUse() *ResourceInUse { return b }\n\ntype BaseResourceInUse interface {\n\tGetResourceInUse() *ResourceInUse\n}\n\nfunc init() {\n\tt[\"BaseResourceInUse\"] = reflect.TypeOf((*ResourceInUse)(nil)).Elem()\n}\n\nfunc (b *ResourcePoolEvent) GetResourcePoolEvent() *ResourcePoolEvent { return b }\n\ntype BaseResourcePoolEvent interface {\n\tGetResourcePoolEvent() *ResourcePoolEvent\n}\n\nfunc init() {\n\tt[\"BaseResourcePoolEvent\"] = reflect.TypeOf((*ResourcePoolEvent)(nil)).Elem()\n}\n\nfunc (b *ResourcePoolSummary) GetResourcePoolSummary() *ResourcePoolSummary { return b }\n\ntype BaseResourcePoolSummary interface {\n\tGetResourcePoolSummary() *ResourcePoolSummary\n}\n\nfunc init() {\n\tt[\"BaseResourcePoolSummary\"] = reflect.TypeOf((*ResourcePoolSummary)(nil)).Elem()\n}\n\nfunc (b *RoleEvent) GetRoleEvent() *RoleEvent { return b }\n\ntype BaseRoleEvent interface {\n\tGetRoleEvent() *RoleEvent\n}\n\nfunc init() {\n\tt[\"BaseRoleEvent\"] = reflect.TypeOf((*RoleEvent)(nil)).Elem()\n}\n\nfunc (b *RuntimeFault) GetRuntimeFault() *RuntimeFault { return b }\n\ntype BaseRuntimeFault interface {\n\tGetRuntimeFault() *RuntimeFault\n}\n\nfunc init() {\n\tt[\"BaseRuntimeFault\"] = reflect.TypeOf((*RuntimeFault)(nil)).Elem()\n}\n\nfunc (b *ScheduledTaskEvent) GetScheduledTaskEvent() *ScheduledTaskEvent { return b }\n\ntype BaseScheduledTaskEvent interface {\n\tGetScheduledTaskEvent() *ScheduledTaskEvent\n}\n\nfunc init() {\n\tt[\"BaseScheduledTaskEvent\"] = reflect.TypeOf((*ScheduledTaskEvent)(nil)).Elem()\n}\n\nfunc (b *ScheduledTaskSpec) GetScheduledTaskSpec() *ScheduledTaskSpec { return b }\n\ntype BaseScheduledTaskSpec interface {\n\tGetScheduledTaskSpec() *ScheduledTaskSpec\n}\n\nfunc init() {\n\tt[\"BaseScheduledTaskSpec\"] = reflect.TypeOf((*ScheduledTaskSpec)(nil)).Elem()\n}\n\nfunc (b *ScsiLun) GetScsiLun() *ScsiLun { return b }\n\ntype BaseScsiLun interface {\n\tGetScsiLun() *ScsiLun\n}\n\nfunc init() {\n\tt[\"BaseScsiLun\"] = reflect.TypeOf((*ScsiLun)(nil)).Elem()\n}\n\nfunc (b *SecurityError) GetSecurityError() *SecurityError { return b }\n\ntype BaseSecurityError interface {\n\tGetSecurityError() *SecurityError\n}\n\nfunc init() {\n\tt[\"BaseSecurityError\"] = reflect.TypeOf((*SecurityError)(nil)).Elem()\n}\n\nfunc (b *SelectionSet) GetSelectionSet() *SelectionSet { return b }\n\ntype BaseSelectionSet interface {\n\tGetSelectionSet() *SelectionSet\n}\n\nfunc init() {\n\tt[\"BaseSelectionSet\"] = reflect.TypeOf((*SelectionSet)(nil)).Elem()\n}\n\nfunc (b *SelectionSpec) GetSelectionSpec() *SelectionSpec { return b }\n\ntype BaseSelectionSpec interface {\n\tGetSelectionSpec() *SelectionSpec\n}\n\nfunc init() {\n\tt[\"BaseSelectionSpec\"] = reflect.TypeOf((*SelectionSpec)(nil)).Elem()\n}\n\nfunc (b *ServiceLocatorCredential) GetServiceLocatorCredential() *ServiceLocatorCredential { return b }\n\ntype BaseServiceLocatorCredential interface {\n\tGetServiceLocatorCredential() *ServiceLocatorCredential\n}\n\nfunc init() {\n\tt[\"BaseServiceLocatorCredential\"] = reflect.TypeOf((*ServiceLocatorCredential)(nil)).Elem()\n}\n\nfunc (b *SessionEvent) GetSessionEvent() *SessionEvent { return b }\n\ntype BaseSessionEvent interface {\n\tGetSessionEvent() *SessionEvent\n}\n\nfunc init() {\n\tt[\"BaseSessionEvent\"] = reflect.TypeOf((*SessionEvent)(nil)).Elem()\n}\n\nfunc (b *SessionManagerServiceRequestSpec) GetSessionManagerServiceRequestSpec() *SessionManagerServiceRequestSpec {\n\treturn b\n}\n\ntype BaseSessionManagerServiceRequestSpec interface {\n\tGetSessionManagerServiceRequestSpec() *SessionManagerServiceRequestSpec\n}\n\nfunc init() {\n\tt[\"BaseSessionManagerServiceRequestSpec\"] = reflect.TypeOf((*SessionManagerServiceRequestSpec)(nil)).Elem()\n}\n\nfunc (b *SnapshotCopyNotSupported) GetSnapshotCopyNotSupported() *SnapshotCopyNotSupported { return b }\n\ntype BaseSnapshotCopyNotSupported interface {\n\tGetSnapshotCopyNotSupported() *SnapshotCopyNotSupported\n}\n\nfunc init() {\n\tt[\"BaseSnapshotCopyNotSupported\"] = reflect.TypeOf((*SnapshotCopyNotSupported)(nil)).Elem()\n}\n\nfunc (b *SnapshotFault) GetSnapshotFault() *SnapshotFault { return b }\n\ntype BaseSnapshotFault interface {\n\tGetSnapshotFault() *SnapshotFault\n}\n\nfunc init() {\n\tt[\"BaseSnapshotFault\"] = reflect.TypeOf((*SnapshotFault)(nil)).Elem()\n}\n\nfunc (b *TaskEvent) GetTaskEvent() *TaskEvent { return b }\n\ntype BaseTaskEvent interface {\n\tGetTaskEvent() *TaskEvent\n}\n\nfunc init() {\n\tt[\"BaseTaskEvent\"] = reflect.TypeOf((*TaskEvent)(nil)).Elem()\n}\n\nfunc (b *TaskInProgress) GetTaskInProgress() *TaskInProgress { return b }\n\ntype BaseTaskInProgress interface {\n\tGetTaskInProgress() *TaskInProgress\n}\n\nfunc init() {\n\tt[\"BaseTaskInProgress\"] = reflect.TypeOf((*TaskInProgress)(nil)).Elem()\n}\n\nfunc (b *TaskReason) GetTaskReason() *TaskReason { return b }\n\ntype BaseTaskReason interface {\n\tGetTaskReason() *TaskReason\n}\n\nfunc init() {\n\tt[\"BaseTaskReason\"] = reflect.TypeOf((*TaskReason)(nil)).Elem()\n}\n\nfunc (b *TaskScheduler) GetTaskScheduler() *TaskScheduler { return b }\n\ntype BaseTaskScheduler interface {\n\tGetTaskScheduler() *TaskScheduler\n}\n\nfunc init() {\n\tt[\"BaseTaskScheduler\"] = reflect.TypeOf((*TaskScheduler)(nil)).Elem()\n}\n\nfunc (b *TemplateUpgradeEvent) GetTemplateUpgradeEvent() *TemplateUpgradeEvent { return b }\n\ntype BaseTemplateUpgradeEvent interface {\n\tGetTemplateUpgradeEvent() *TemplateUpgradeEvent\n}\n\nfunc init() {\n\tt[\"BaseTemplateUpgradeEvent\"] = reflect.TypeOf((*TemplateUpgradeEvent)(nil)).Elem()\n}\n\nfunc (b *Timedout) GetTimedout() *Timedout { return b }\n\ntype BaseTimedout interface {\n\tGetTimedout() *Timedout\n}\n\nfunc init() {\n\tt[\"BaseTimedout\"] = reflect.TypeOf((*Timedout)(nil)).Elem()\n}\n\nfunc (b *TypeDescription) GetTypeDescription() *TypeDescription { return b }\n\ntype BaseTypeDescription interface {\n\tGetTypeDescription() *TypeDescription\n}\n\nfunc init() {\n\tt[\"BaseTypeDescription\"] = reflect.TypeOf((*TypeDescription)(nil)).Elem()\n}\n\nfunc (b *UnsupportedDatastore) GetUnsupportedDatastore() *UnsupportedDatastore { return b }\n\ntype BaseUnsupportedDatastore interface {\n\tGetUnsupportedDatastore() *UnsupportedDatastore\n}\n\nfunc init() {\n\tt[\"BaseUnsupportedDatastore\"] = reflect.TypeOf((*UnsupportedDatastore)(nil)).Elem()\n}\n\nfunc (b *UpgradeEvent) GetUpgradeEvent() *UpgradeEvent { return b }\n\ntype BaseUpgradeEvent interface {\n\tGetUpgradeEvent() *UpgradeEvent\n}\n\nfunc init() {\n\tt[\"BaseUpgradeEvent\"] = reflect.TypeOf((*UpgradeEvent)(nil)).Elem()\n}\n\nfunc (b *UserSearchResult) GetUserSearchResult() *UserSearchResult { return b }\n\ntype BaseUserSearchResult interface {\n\tGetUserSearchResult() *UserSearchResult\n}\n\nfunc init() {\n\tt[\"BaseUserSearchResult\"] = reflect.TypeOf((*UserSearchResult)(nil)).Elem()\n}\n\nfunc (b *VAppConfigFault) GetVAppConfigFault() *VAppConfigFault { return b }\n\ntype BaseVAppConfigFault interface {\n\tGetVAppConfigFault() *VAppConfigFault\n}\n\nfunc init() {\n\tt[\"BaseVAppConfigFault\"] = reflect.TypeOf((*VAppConfigFault)(nil)).Elem()\n}\n\nfunc (b *VAppPropertyFault) GetVAppPropertyFault() *VAppPropertyFault { return b }\n\ntype BaseVAppPropertyFault interface {\n\tGetVAppPropertyFault() *VAppPropertyFault\n}\n\nfunc init() {\n\tt[\"BaseVAppPropertyFault\"] = reflect.TypeOf((*VAppPropertyFault)(nil)).Elem()\n}\n\nfunc (b *VMotionInterfaceIssue) GetVMotionInterfaceIssue() *VMotionInterfaceIssue { return b }\n\ntype BaseVMotionInterfaceIssue interface {\n\tGetVMotionInterfaceIssue() *VMotionInterfaceIssue\n}\n\nfunc init() {\n\tt[\"BaseVMotionInterfaceIssue\"] = reflect.TypeOf((*VMotionInterfaceIssue)(nil)).Elem()\n}\n\nfunc (b *VMwareDVSHealthCheckConfig) GetVMwareDVSHealthCheckConfig() *VMwareDVSHealthCheckConfig {\n\treturn b\n}\n\ntype BaseVMwareDVSHealthCheckConfig interface {\n\tGetVMwareDVSHealthCheckConfig() *VMwareDVSHealthCheckConfig\n}\n\nfunc init() {\n\tt[\"BaseVMwareDVSHealthCheckConfig\"] = reflect.TypeOf((*VMwareDVSHealthCheckConfig)(nil)).Elem()\n}\n\nfunc (b *VimFault) GetVimFault() *VimFault { return b }\n\ntype BaseVimFault interface {\n\tGetVimFault() *VimFault\n}\n\nfunc init() {\n\tt[\"BaseVimFault\"] = reflect.TypeOf((*VimFault)(nil)).Elem()\n}\n\nfunc (b *VirtualController) GetVirtualController() *VirtualController { return b }\n\ntype BaseVirtualController interface {\n\tGetVirtualController() *VirtualController\n}\n\nfunc init() {\n\tt[\"BaseVirtualController\"] = reflect.TypeOf((*VirtualController)(nil)).Elem()\n}\n\nfunc (b *VirtualControllerOption) GetVirtualControllerOption() *VirtualControllerOption { return b }\n\ntype BaseVirtualControllerOption interface {\n\tGetVirtualControllerOption() *VirtualControllerOption\n}\n\nfunc init() {\n\tt[\"BaseVirtualControllerOption\"] = reflect.TypeOf((*VirtualControllerOption)(nil)).Elem()\n}\n\nfunc (b *VirtualDevice) GetVirtualDevice() *VirtualDevice { return b }\n\ntype BaseVirtualDevice interface {\n\tGetVirtualDevice() *VirtualDevice\n}\n\nfunc init() {\n\tt[\"BaseVirtualDevice\"] = reflect.TypeOf((*VirtualDevice)(nil)).Elem()\n}\n\nfunc (b *VirtualDeviceBackingInfo) GetVirtualDeviceBackingInfo() *VirtualDeviceBackingInfo { return b }\n\ntype BaseVirtualDeviceBackingInfo interface {\n\tGetVirtualDeviceBackingInfo() *VirtualDeviceBackingInfo\n}\n\nfunc init() {\n\tt[\"BaseVirtualDeviceBackingInfo\"] = reflect.TypeOf((*VirtualDeviceBackingInfo)(nil)).Elem()\n}\n\nfunc (b *VirtualDeviceBackingOption) GetVirtualDeviceBackingOption() *VirtualDeviceBackingOption {\n\treturn b\n}\n\ntype BaseVirtualDeviceBackingOption interface {\n\tGetVirtualDeviceBackingOption() *VirtualDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"BaseVirtualDeviceBackingOption\"] = reflect.TypeOf((*VirtualDeviceBackingOption)(nil)).Elem()\n}\n\nfunc (b *VirtualDeviceBusSlotInfo) GetVirtualDeviceBusSlotInfo() *VirtualDeviceBusSlotInfo { return b }\n\ntype BaseVirtualDeviceBusSlotInfo interface {\n\tGetVirtualDeviceBusSlotInfo() *VirtualDeviceBusSlotInfo\n}\n\nfunc init() {\n\tt[\"BaseVirtualDeviceBusSlotInfo\"] = reflect.TypeOf((*VirtualDeviceBusSlotInfo)(nil)).Elem()\n}\n\nfunc (b *VirtualDeviceConfigSpec) GetVirtualDeviceConfigSpec() *VirtualDeviceConfigSpec { return b }\n\ntype BaseVirtualDeviceConfigSpec interface {\n\tGetVirtualDeviceConfigSpec() *VirtualDeviceConfigSpec\n}\n\nfunc init() {\n\tt[\"BaseVirtualDeviceConfigSpec\"] = reflect.TypeOf((*VirtualDeviceConfigSpec)(nil)).Elem()\n}\n\nfunc (b *VirtualDeviceDeviceBackingInfo) GetVirtualDeviceDeviceBackingInfo() *VirtualDeviceDeviceBackingInfo {\n\treturn b\n}\n\ntype BaseVirtualDeviceDeviceBackingInfo interface {\n\tGetVirtualDeviceDeviceBackingInfo() *VirtualDeviceDeviceBackingInfo\n}\n\nfunc init() {\n\tt[\"BaseVirtualDeviceDeviceBackingInfo\"] = reflect.TypeOf((*VirtualDeviceDeviceBackingInfo)(nil)).Elem()\n}\n\nfunc (b *VirtualDeviceDeviceBackingOption) GetVirtualDeviceDeviceBackingOption() *VirtualDeviceDeviceBackingOption {\n\treturn b\n}\n\ntype BaseVirtualDeviceDeviceBackingOption interface {\n\tGetVirtualDeviceDeviceBackingOption() *VirtualDeviceDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"BaseVirtualDeviceDeviceBackingOption\"] = reflect.TypeOf((*VirtualDeviceDeviceBackingOption)(nil)).Elem()\n}\n\nfunc (b *VirtualDeviceFileBackingInfo) GetVirtualDeviceFileBackingInfo() *VirtualDeviceFileBackingInfo {\n\treturn b\n}\n\ntype BaseVirtualDeviceFileBackingInfo interface {\n\tGetVirtualDeviceFileBackingInfo() *VirtualDeviceFileBackingInfo\n}\n\nfunc init() {\n\tt[\"BaseVirtualDeviceFileBackingInfo\"] = reflect.TypeOf((*VirtualDeviceFileBackingInfo)(nil)).Elem()\n}\n\nfunc (b *VirtualDeviceFileBackingOption) GetVirtualDeviceFileBackingOption() *VirtualDeviceFileBackingOption {\n\treturn b\n}\n\ntype BaseVirtualDeviceFileBackingOption interface {\n\tGetVirtualDeviceFileBackingOption() *VirtualDeviceFileBackingOption\n}\n\nfunc init() {\n\tt[\"BaseVirtualDeviceFileBackingOption\"] = reflect.TypeOf((*VirtualDeviceFileBackingOption)(nil)).Elem()\n}\n\nfunc (b *VirtualDeviceOption) GetVirtualDeviceOption() *VirtualDeviceOption { return b }\n\ntype BaseVirtualDeviceOption interface {\n\tGetVirtualDeviceOption() *VirtualDeviceOption\n}\n\nfunc init() {\n\tt[\"BaseVirtualDeviceOption\"] = reflect.TypeOf((*VirtualDeviceOption)(nil)).Elem()\n}\n\nfunc (b *VirtualDevicePciBusSlotInfo) GetVirtualDevicePciBusSlotInfo() *VirtualDevicePciBusSlotInfo {\n\treturn b\n}\n\ntype BaseVirtualDevicePciBusSlotInfo interface {\n\tGetVirtualDevicePciBusSlotInfo() *VirtualDevicePciBusSlotInfo\n}\n\nfunc init() {\n\tt[\"BaseVirtualDevicePciBusSlotInfo\"] = reflect.TypeOf((*VirtualDevicePciBusSlotInfo)(nil)).Elem()\n}\n\nfunc (b *VirtualDevicePipeBackingInfo) GetVirtualDevicePipeBackingInfo() *VirtualDevicePipeBackingInfo {\n\treturn b\n}\n\ntype BaseVirtualDevicePipeBackingInfo interface {\n\tGetVirtualDevicePipeBackingInfo() *VirtualDevicePipeBackingInfo\n}\n\nfunc init() {\n\tt[\"BaseVirtualDevicePipeBackingInfo\"] = reflect.TypeOf((*VirtualDevicePipeBackingInfo)(nil)).Elem()\n}\n\nfunc (b *VirtualDevicePipeBackingOption) GetVirtualDevicePipeBackingOption() *VirtualDevicePipeBackingOption {\n\treturn b\n}\n\ntype BaseVirtualDevicePipeBackingOption interface {\n\tGetVirtualDevicePipeBackingOption() *VirtualDevicePipeBackingOption\n}\n\nfunc init() {\n\tt[\"BaseVirtualDevicePipeBackingOption\"] = reflect.TypeOf((*VirtualDevicePipeBackingOption)(nil)).Elem()\n}\n\nfunc (b *VirtualDeviceRemoteDeviceBackingInfo) GetVirtualDeviceRemoteDeviceBackingInfo() *VirtualDeviceRemoteDeviceBackingInfo {\n\treturn b\n}\n\ntype BaseVirtualDeviceRemoteDeviceBackingInfo interface {\n\tGetVirtualDeviceRemoteDeviceBackingInfo() *VirtualDeviceRemoteDeviceBackingInfo\n}\n\nfunc init() {\n\tt[\"BaseVirtualDeviceRemoteDeviceBackingInfo\"] = reflect.TypeOf((*VirtualDeviceRemoteDeviceBackingInfo)(nil)).Elem()\n}\n\nfunc (b *VirtualDeviceRemoteDeviceBackingOption) GetVirtualDeviceRemoteDeviceBackingOption() *VirtualDeviceRemoteDeviceBackingOption {\n\treturn b\n}\n\ntype BaseVirtualDeviceRemoteDeviceBackingOption interface {\n\tGetVirtualDeviceRemoteDeviceBackingOption() *VirtualDeviceRemoteDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"BaseVirtualDeviceRemoteDeviceBackingOption\"] = reflect.TypeOf((*VirtualDeviceRemoteDeviceBackingOption)(nil)).Elem()\n}\n\nfunc (b *VirtualDeviceURIBackingInfo) GetVirtualDeviceURIBackingInfo() *VirtualDeviceURIBackingInfo {\n\treturn b\n}\n\ntype BaseVirtualDeviceURIBackingInfo interface {\n\tGetVirtualDeviceURIBackingInfo() *VirtualDeviceURIBackingInfo\n}\n\nfunc init() {\n\tt[\"BaseVirtualDeviceURIBackingInfo\"] = reflect.TypeOf((*VirtualDeviceURIBackingInfo)(nil)).Elem()\n}\n\nfunc (b *VirtualDeviceURIBackingOption) GetVirtualDeviceURIBackingOption() *VirtualDeviceURIBackingOption {\n\treturn b\n}\n\ntype BaseVirtualDeviceURIBackingOption interface {\n\tGetVirtualDeviceURIBackingOption() *VirtualDeviceURIBackingOption\n}\n\nfunc init() {\n\tt[\"BaseVirtualDeviceURIBackingOption\"] = reflect.TypeOf((*VirtualDeviceURIBackingOption)(nil)).Elem()\n}\n\nfunc (b *VirtualDiskRawDiskVer2BackingInfo) GetVirtualDiskRawDiskVer2BackingInfo() *VirtualDiskRawDiskVer2BackingInfo {\n\treturn b\n}\n\ntype BaseVirtualDiskRawDiskVer2BackingInfo interface {\n\tGetVirtualDiskRawDiskVer2BackingInfo() *VirtualDiskRawDiskVer2BackingInfo\n}\n\nfunc init() {\n\tt[\"BaseVirtualDiskRawDiskVer2BackingInfo\"] = reflect.TypeOf((*VirtualDiskRawDiskVer2BackingInfo)(nil)).Elem()\n}\n\nfunc (b *VirtualDiskRawDiskVer2BackingOption) GetVirtualDiskRawDiskVer2BackingOption() *VirtualDiskRawDiskVer2BackingOption {\n\treturn b\n}\n\ntype BaseVirtualDiskRawDiskVer2BackingOption interface {\n\tGetVirtualDiskRawDiskVer2BackingOption() *VirtualDiskRawDiskVer2BackingOption\n}\n\nfunc init() {\n\tt[\"BaseVirtualDiskRawDiskVer2BackingOption\"] = reflect.TypeOf((*VirtualDiskRawDiskVer2BackingOption)(nil)).Elem()\n}\n\nfunc (b *VirtualDiskSpec) GetVirtualDiskSpec() *VirtualDiskSpec { return b }\n\ntype BaseVirtualDiskSpec interface {\n\tGetVirtualDiskSpec() *VirtualDiskSpec\n}\n\nfunc init() {\n\tt[\"BaseVirtualDiskSpec\"] = reflect.TypeOf((*VirtualDiskSpec)(nil)).Elem()\n}\n\nfunc (b *VirtualEthernetCard) GetVirtualEthernetCard() *VirtualEthernetCard { return b }\n\ntype BaseVirtualEthernetCard interface {\n\tGetVirtualEthernetCard() *VirtualEthernetCard\n}\n\nfunc init() {\n\tt[\"BaseVirtualEthernetCard\"] = reflect.TypeOf((*VirtualEthernetCard)(nil)).Elem()\n}\n\nfunc (b *VirtualEthernetCardOption) GetVirtualEthernetCardOption() *VirtualEthernetCardOption {\n\treturn b\n}\n\ntype BaseVirtualEthernetCardOption interface {\n\tGetVirtualEthernetCardOption() *VirtualEthernetCardOption\n}\n\nfunc init() {\n\tt[\"BaseVirtualEthernetCardOption\"] = reflect.TypeOf((*VirtualEthernetCardOption)(nil)).Elem()\n}\n\nfunc (b *VirtualHardwareCompatibilityIssue) GetVirtualHardwareCompatibilityIssue() *VirtualHardwareCompatibilityIssue {\n\treturn b\n}\n\ntype BaseVirtualHardwareCompatibilityIssue interface {\n\tGetVirtualHardwareCompatibilityIssue() *VirtualHardwareCompatibilityIssue\n}\n\nfunc init() {\n\tt[\"BaseVirtualHardwareCompatibilityIssue\"] = reflect.TypeOf((*VirtualHardwareCompatibilityIssue)(nil)).Elem()\n}\n\nfunc (b *VirtualMachineBootOptionsBootableDevice) GetVirtualMachineBootOptionsBootableDevice() *VirtualMachineBootOptionsBootableDevice {\n\treturn b\n}\n\ntype BaseVirtualMachineBootOptionsBootableDevice interface {\n\tGetVirtualMachineBootOptionsBootableDevice() *VirtualMachineBootOptionsBootableDevice\n}\n\nfunc init() {\n\tt[\"BaseVirtualMachineBootOptionsBootableDevice\"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableDevice)(nil)).Elem()\n}\n\nfunc (b *VirtualMachineDeviceRuntimeInfoDeviceRuntimeState) GetVirtualMachineDeviceRuntimeInfoDeviceRuntimeState() *VirtualMachineDeviceRuntimeInfoDeviceRuntimeState {\n\treturn b\n}\n\ntype BaseVirtualMachineDeviceRuntimeInfoDeviceRuntimeState interface {\n\tGetVirtualMachineDeviceRuntimeInfoDeviceRuntimeState() *VirtualMachineDeviceRuntimeInfoDeviceRuntimeState\n}\n\nfunc init() {\n\tt[\"BaseVirtualMachineDeviceRuntimeInfoDeviceRuntimeState\"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfoDeviceRuntimeState)(nil)).Elem()\n}\n\nfunc (b *VirtualMachineDiskDeviceInfo) GetVirtualMachineDiskDeviceInfo() *VirtualMachineDiskDeviceInfo {\n\treturn b\n}\n\ntype BaseVirtualMachineDiskDeviceInfo interface {\n\tGetVirtualMachineDiskDeviceInfo() *VirtualMachineDiskDeviceInfo\n}\n\nfunc init() {\n\tt[\"BaseVirtualMachineDiskDeviceInfo\"] = reflect.TypeOf((*VirtualMachineDiskDeviceInfo)(nil)).Elem()\n}\n\nfunc (b *VirtualMachineGuestQuiesceSpec) GetVirtualMachineGuestQuiesceSpec() *VirtualMachineGuestQuiesceSpec {\n\treturn b\n}\n\ntype BaseVirtualMachineGuestQuiesceSpec interface {\n\tGetVirtualMachineGuestQuiesceSpec() *VirtualMachineGuestQuiesceSpec\n}\n\nfunc init() {\n\tt[\"BaseVirtualMachineGuestQuiesceSpec\"] = reflect.TypeOf((*VirtualMachineGuestQuiesceSpec)(nil)).Elem()\n}\n\nfunc (b *VirtualMachinePciPassthroughInfo) GetVirtualMachinePciPassthroughInfo() *VirtualMachinePciPassthroughInfo {\n\treturn b\n}\n\ntype BaseVirtualMachinePciPassthroughInfo interface {\n\tGetVirtualMachinePciPassthroughInfo() *VirtualMachinePciPassthroughInfo\n}\n\nfunc init() {\n\tt[\"BaseVirtualMachinePciPassthroughInfo\"] = reflect.TypeOf((*VirtualMachinePciPassthroughInfo)(nil)).Elem()\n}\n\nfunc (b *VirtualMachineProfileSpec) GetVirtualMachineProfileSpec() *VirtualMachineProfileSpec {\n\treturn b\n}\n\ntype BaseVirtualMachineProfileSpec interface {\n\tGetVirtualMachineProfileSpec() *VirtualMachineProfileSpec\n}\n\nfunc init() {\n\tt[\"BaseVirtualMachineProfileSpec\"] = reflect.TypeOf((*VirtualMachineProfileSpec)(nil)).Elem()\n}\n\nfunc (b *VirtualMachineSriovDevicePoolInfo) GetVirtualMachineSriovDevicePoolInfo() *VirtualMachineSriovDevicePoolInfo {\n\treturn b\n}\n\ntype BaseVirtualMachineSriovDevicePoolInfo interface {\n\tGetVirtualMachineSriovDevicePoolInfo() *VirtualMachineSriovDevicePoolInfo\n}\n\nfunc init() {\n\tt[\"BaseVirtualMachineSriovDevicePoolInfo\"] = reflect.TypeOf((*VirtualMachineSriovDevicePoolInfo)(nil)).Elem()\n}\n\nfunc (b *VirtualMachineTargetInfo) GetVirtualMachineTargetInfo() *VirtualMachineTargetInfo { return b }\n\ntype BaseVirtualMachineTargetInfo interface {\n\tGetVirtualMachineTargetInfo() *VirtualMachineTargetInfo\n}\n\nfunc init() {\n\tt[\"BaseVirtualMachineTargetInfo\"] = reflect.TypeOf((*VirtualMachineTargetInfo)(nil)).Elem()\n}\n\nfunc (b *VirtualPCIPassthroughPluginBackingInfo) GetVirtualPCIPassthroughPluginBackingInfo() *VirtualPCIPassthroughPluginBackingInfo {\n\treturn b\n}\n\ntype BaseVirtualPCIPassthroughPluginBackingInfo interface {\n\tGetVirtualPCIPassthroughPluginBackingInfo() *VirtualPCIPassthroughPluginBackingInfo\n}\n\nfunc init() {\n\tt[\"BaseVirtualPCIPassthroughPluginBackingInfo\"] = reflect.TypeOf((*VirtualPCIPassthroughPluginBackingInfo)(nil)).Elem()\n}\n\nfunc (b *VirtualPCIPassthroughPluginBackingOption) GetVirtualPCIPassthroughPluginBackingOption() *VirtualPCIPassthroughPluginBackingOption {\n\treturn b\n}\n\ntype BaseVirtualPCIPassthroughPluginBackingOption interface {\n\tGetVirtualPCIPassthroughPluginBackingOption() *VirtualPCIPassthroughPluginBackingOption\n}\n\nfunc init() {\n\tt[\"BaseVirtualPCIPassthroughPluginBackingOption\"] = reflect.TypeOf((*VirtualPCIPassthroughPluginBackingOption)(nil)).Elem()\n}\n\nfunc (b *VirtualSATAController) GetVirtualSATAController() *VirtualSATAController { return b }\n\ntype BaseVirtualSATAController interface {\n\tGetVirtualSATAController() *VirtualSATAController\n}\n\nfunc init() {\n\tt[\"BaseVirtualSATAController\"] = reflect.TypeOf((*VirtualSATAController)(nil)).Elem()\n}\n\nfunc (b *VirtualSATAControllerOption) GetVirtualSATAControllerOption() *VirtualSATAControllerOption {\n\treturn b\n}\n\ntype BaseVirtualSATAControllerOption interface {\n\tGetVirtualSATAControllerOption() *VirtualSATAControllerOption\n}\n\nfunc init() {\n\tt[\"BaseVirtualSATAControllerOption\"] = reflect.TypeOf((*VirtualSATAControllerOption)(nil)).Elem()\n}\n\nfunc (b *VirtualSCSIController) GetVirtualSCSIController() *VirtualSCSIController { return b }\n\ntype BaseVirtualSCSIController interface {\n\tGetVirtualSCSIController() *VirtualSCSIController\n}\n\nfunc init() {\n\tt[\"BaseVirtualSCSIController\"] = reflect.TypeOf((*VirtualSCSIController)(nil)).Elem()\n}\n\nfunc (b *VirtualSCSIControllerOption) GetVirtualSCSIControllerOption() *VirtualSCSIControllerOption {\n\treturn b\n}\n\ntype BaseVirtualSCSIControllerOption interface {\n\tGetVirtualSCSIControllerOption() *VirtualSCSIControllerOption\n}\n\nfunc init() {\n\tt[\"BaseVirtualSCSIControllerOption\"] = reflect.TypeOf((*VirtualSCSIControllerOption)(nil)).Elem()\n}\n\nfunc (b *VirtualSoundCard) GetVirtualSoundCard() *VirtualSoundCard { return b }\n\ntype BaseVirtualSoundCard interface {\n\tGetVirtualSoundCard() *VirtualSoundCard\n}\n\nfunc init() {\n\tt[\"BaseVirtualSoundCard\"] = reflect.TypeOf((*VirtualSoundCard)(nil)).Elem()\n}\n\nfunc (b *VirtualSoundCardOption) GetVirtualSoundCardOption() *VirtualSoundCardOption { return b }\n\ntype BaseVirtualSoundCardOption interface {\n\tGetVirtualSoundCardOption() *VirtualSoundCardOption\n}\n\nfunc init() {\n\tt[\"BaseVirtualSoundCardOption\"] = reflect.TypeOf((*VirtualSoundCardOption)(nil)).Elem()\n}\n\nfunc (b *VirtualVmxnet) GetVirtualVmxnet() *VirtualVmxnet { return b }\n\ntype BaseVirtualVmxnet interface {\n\tGetVirtualVmxnet() *VirtualVmxnet\n}\n\nfunc init() {\n\tt[\"BaseVirtualVmxnet\"] = reflect.TypeOf((*VirtualVmxnet)(nil)).Elem()\n}\n\nfunc (b *VirtualVmxnet3) GetVirtualVmxnet3() *VirtualVmxnet3 { return b }\n\ntype BaseVirtualVmxnet3 interface {\n\tGetVirtualVmxnet3() *VirtualVmxnet3\n}\n\nfunc init() {\n\tt[\"BaseVirtualVmxnet3\"] = reflect.TypeOf((*VirtualVmxnet3)(nil)).Elem()\n}\n\nfunc (b *VirtualVmxnet3Option) GetVirtualVmxnet3Option() *VirtualVmxnet3Option { return b }\n\ntype BaseVirtualVmxnet3Option interface {\n\tGetVirtualVmxnet3Option() *VirtualVmxnet3Option\n}\n\nfunc init() {\n\tt[\"BaseVirtualVmxnet3Option\"] = reflect.TypeOf((*VirtualVmxnet3Option)(nil)).Elem()\n}\n\nfunc (b *VirtualVmxnetOption) GetVirtualVmxnetOption() *VirtualVmxnetOption { return b }\n\ntype BaseVirtualVmxnetOption interface {\n\tGetVirtualVmxnetOption() *VirtualVmxnetOption\n}\n\nfunc init() {\n\tt[\"BaseVirtualVmxnetOption\"] = reflect.TypeOf((*VirtualVmxnetOption)(nil)).Elem()\n}\n\nfunc (b *VmCloneEvent) GetVmCloneEvent() *VmCloneEvent { return b }\n\ntype BaseVmCloneEvent interface {\n\tGetVmCloneEvent() *VmCloneEvent\n}\n\nfunc init() {\n\tt[\"BaseVmCloneEvent\"] = reflect.TypeOf((*VmCloneEvent)(nil)).Elem()\n}\n\nfunc (b *VmConfigFault) GetVmConfigFault() *VmConfigFault { return b }\n\ntype BaseVmConfigFault interface {\n\tGetVmConfigFault() *VmConfigFault\n}\n\nfunc init() {\n\tt[\"BaseVmConfigFault\"] = reflect.TypeOf((*VmConfigFault)(nil)).Elem()\n}\n\nfunc (b *VmConfigFileInfo) GetVmConfigFileInfo() *VmConfigFileInfo { return b }\n\ntype BaseVmConfigFileInfo interface {\n\tGetVmConfigFileInfo() *VmConfigFileInfo\n}\n\nfunc init() {\n\tt[\"BaseVmConfigFileInfo\"] = reflect.TypeOf((*VmConfigFileInfo)(nil)).Elem()\n}\n\nfunc (b *VmConfigFileQuery) GetVmConfigFileQuery() *VmConfigFileQuery { return b }\n\ntype BaseVmConfigFileQuery interface {\n\tGetVmConfigFileQuery() *VmConfigFileQuery\n}\n\nfunc init() {\n\tt[\"BaseVmConfigFileQuery\"] = reflect.TypeOf((*VmConfigFileQuery)(nil)).Elem()\n}\n\nfunc (b *VmConfigInfo) GetVmConfigInfo() *VmConfigInfo { return b }\n\ntype BaseVmConfigInfo interface {\n\tGetVmConfigInfo() *VmConfigInfo\n}\n\nfunc init() {\n\tt[\"BaseVmConfigInfo\"] = reflect.TypeOf((*VmConfigInfo)(nil)).Elem()\n}\n\nfunc (b *VmConfigSpec) GetVmConfigSpec() *VmConfigSpec { return b }\n\ntype BaseVmConfigSpec interface {\n\tGetVmConfigSpec() *VmConfigSpec\n}\n\nfunc init() {\n\tt[\"BaseVmConfigSpec\"] = reflect.TypeOf((*VmConfigSpec)(nil)).Elem()\n}\n\nfunc (b *VmDasBeingResetEvent) GetVmDasBeingResetEvent() *VmDasBeingResetEvent { return b }\n\ntype BaseVmDasBeingResetEvent interface {\n\tGetVmDasBeingResetEvent() *VmDasBeingResetEvent\n}\n\nfunc init() {\n\tt[\"BaseVmDasBeingResetEvent\"] = reflect.TypeOf((*VmDasBeingResetEvent)(nil)).Elem()\n}\n\nfunc (b *VmEvent) GetVmEvent() *VmEvent { return b }\n\ntype BaseVmEvent interface {\n\tGetVmEvent() *VmEvent\n}\n\nfunc init() {\n\tt[\"BaseVmEvent\"] = reflect.TypeOf((*VmEvent)(nil)).Elem()\n}\n\nfunc (b *VmFaultToleranceIssue) GetVmFaultToleranceIssue() *VmFaultToleranceIssue { return b }\n\ntype BaseVmFaultToleranceIssue interface {\n\tGetVmFaultToleranceIssue() *VmFaultToleranceIssue\n}\n\nfunc init() {\n\tt[\"BaseVmFaultToleranceIssue\"] = reflect.TypeOf((*VmFaultToleranceIssue)(nil)).Elem()\n}\n\nfunc (b *VmMigratedEvent) GetVmMigratedEvent() *VmMigratedEvent { return b }\n\ntype BaseVmMigratedEvent interface {\n\tGetVmMigratedEvent() *VmMigratedEvent\n}\n\nfunc init() {\n\tt[\"BaseVmMigratedEvent\"] = reflect.TypeOf((*VmMigratedEvent)(nil)).Elem()\n}\n\nfunc (b *VmPoweredOffEvent) GetVmPoweredOffEvent() *VmPoweredOffEvent { return b }\n\ntype BaseVmPoweredOffEvent interface {\n\tGetVmPoweredOffEvent() *VmPoweredOffEvent\n}\n\nfunc init() {\n\tt[\"BaseVmPoweredOffEvent\"] = reflect.TypeOf((*VmPoweredOffEvent)(nil)).Elem()\n}\n\nfunc (b *VmPoweredOnEvent) GetVmPoweredOnEvent() *VmPoweredOnEvent { return b }\n\ntype BaseVmPoweredOnEvent interface {\n\tGetVmPoweredOnEvent() *VmPoweredOnEvent\n}\n\nfunc init() {\n\tt[\"BaseVmPoweredOnEvent\"] = reflect.TypeOf((*VmPoweredOnEvent)(nil)).Elem()\n}\n\nfunc (b *VmRelocateSpecEvent) GetVmRelocateSpecEvent() *VmRelocateSpecEvent { return b }\n\ntype BaseVmRelocateSpecEvent interface {\n\tGetVmRelocateSpecEvent() *VmRelocateSpecEvent\n}\n\nfunc init() {\n\tt[\"BaseVmRelocateSpecEvent\"] = reflect.TypeOf((*VmRelocateSpecEvent)(nil)).Elem()\n}\n\nfunc (b *VmStartingEvent) GetVmStartingEvent() *VmStartingEvent { return b }\n\ntype BaseVmStartingEvent interface {\n\tGetVmStartingEvent() *VmStartingEvent\n}\n\nfunc init() {\n\tt[\"BaseVmStartingEvent\"] = reflect.TypeOf((*VmStartingEvent)(nil)).Elem()\n}\n\nfunc (b *VmToolsUpgradeFault) GetVmToolsUpgradeFault() *VmToolsUpgradeFault { return b }\n\ntype BaseVmToolsUpgradeFault interface {\n\tGetVmToolsUpgradeFault() *VmToolsUpgradeFault\n}\n\nfunc init() {\n\tt[\"BaseVmToolsUpgradeFault\"] = reflect.TypeOf((*VmToolsUpgradeFault)(nil)).Elem()\n}\n\nfunc (b *VmfsDatastoreBaseOption) GetVmfsDatastoreBaseOption() *VmfsDatastoreBaseOption { return b }\n\ntype BaseVmfsDatastoreBaseOption interface {\n\tGetVmfsDatastoreBaseOption() *VmfsDatastoreBaseOption\n}\n\nfunc init() {\n\tt[\"BaseVmfsDatastoreBaseOption\"] = reflect.TypeOf((*VmfsDatastoreBaseOption)(nil)).Elem()\n}\n\nfunc (b *VmfsDatastoreSingleExtentOption) GetVmfsDatastoreSingleExtentOption() *VmfsDatastoreSingleExtentOption {\n\treturn b\n}\n\ntype BaseVmfsDatastoreSingleExtentOption interface {\n\tGetVmfsDatastoreSingleExtentOption() *VmfsDatastoreSingleExtentOption\n}\n\nfunc init() {\n\tt[\"BaseVmfsDatastoreSingleExtentOption\"] = reflect.TypeOf((*VmfsDatastoreSingleExtentOption)(nil)).Elem()\n}\n\nfunc (b *VmfsDatastoreSpec) GetVmfsDatastoreSpec() *VmfsDatastoreSpec { return b }\n\ntype BaseVmfsDatastoreSpec interface {\n\tGetVmfsDatastoreSpec() *VmfsDatastoreSpec\n}\n\nfunc init() {\n\tt[\"BaseVmfsDatastoreSpec\"] = reflect.TypeOf((*VmfsDatastoreSpec)(nil)).Elem()\n}\n\nfunc (b *VmfsMountFault) GetVmfsMountFault() *VmfsMountFault { return b }\n\ntype BaseVmfsMountFault interface {\n\tGetVmfsMountFault() *VmfsMountFault\n}\n\nfunc init() {\n\tt[\"BaseVmfsMountFault\"] = reflect.TypeOf((*VmfsMountFault)(nil)).Elem()\n}\n\nfunc (b *VmwareDistributedVirtualSwitchVlanSpec) GetVmwareDistributedVirtualSwitchVlanSpec() *VmwareDistributedVirtualSwitchVlanSpec {\n\treturn b\n}\n\ntype BaseVmwareDistributedVirtualSwitchVlanSpec interface {\n\tGetVmwareDistributedVirtualSwitchVlanSpec() *VmwareDistributedVirtualSwitchVlanSpec\n}\n\nfunc init() {\n\tt[\"BaseVmwareDistributedVirtualSwitchVlanSpec\"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchVlanSpec)(nil)).Elem()\n}\n\nfunc (b *VsanDiskFault) GetVsanDiskFault() *VsanDiskFault { return b }\n\ntype BaseVsanDiskFault interface {\n\tGetVsanDiskFault() *VsanDiskFault\n}\n\nfunc init() {\n\tt[\"BaseVsanDiskFault\"] = reflect.TypeOf((*VsanDiskFault)(nil)).Elem()\n}\n\nfunc (b *VsanFault) GetVsanFault() *VsanFault { return b }\n\ntype BaseVsanFault interface {\n\tGetVsanFault() *VsanFault\n}\n\nfunc init() {\n\tt[\"BaseVsanFault\"] = reflect.TypeOf((*VsanFault)(nil)).Elem()\n}\n\nfunc (b *VsanUpgradeSystemPreflightCheckIssue) GetVsanUpgradeSystemPreflightCheckIssue() *VsanUpgradeSystemPreflightCheckIssue {\n\treturn b\n}\n\ntype BaseVsanUpgradeSystemPreflightCheckIssue interface {\n\tGetVsanUpgradeSystemPreflightCheckIssue() *VsanUpgradeSystemPreflightCheckIssue\n}\n\nfunc init() {\n\tt[\"BaseVsanUpgradeSystemPreflightCheckIssue\"] = reflect.TypeOf((*VsanUpgradeSystemPreflightCheckIssue)(nil)).Elem()\n}\n\nfunc (b *VsanUpgradeSystemUpgradeHistoryItem) GetVsanUpgradeSystemUpgradeHistoryItem() *VsanUpgradeSystemUpgradeHistoryItem {\n\treturn b\n}\n\ntype BaseVsanUpgradeSystemUpgradeHistoryItem interface {\n\tGetVsanUpgradeSystemUpgradeHistoryItem() *VsanUpgradeSystemUpgradeHistoryItem\n}\n\nfunc init() {\n\tt[\"BaseVsanUpgradeSystemUpgradeHistoryItem\"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeHistoryItem)(nil)).Elem()\n}\n\nfunc (b *VslmCreateSpecBackingSpec) GetVslmCreateSpecBackingSpec() *VslmCreateSpecBackingSpec {\n\treturn b\n}\n\ntype BaseVslmCreateSpecBackingSpec interface {\n\tGetVslmCreateSpecBackingSpec() *VslmCreateSpecBackingSpec\n}\n\nfunc init() {\n\tt[\"BaseVslmCreateSpecBackingSpec\"] = reflect.TypeOf((*VslmCreateSpecBackingSpec)(nil)).Elem()\n}\n\nfunc (b *VslmMigrateSpec) GetVslmMigrateSpec() *VslmMigrateSpec { return b }\n\ntype BaseVslmMigrateSpec interface {\n\tGetVslmMigrateSpec() *VslmMigrateSpec\n}\n\nfunc init() {\n\tt[\"BaseVslmMigrateSpec\"] = reflect.TypeOf((*VslmMigrateSpec)(nil)).Elem()\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/types/internal.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage types\n\nimport \"reflect\"\n\ntype DynamicTypeMgrQueryMoInstances struct {\n\tThis       ManagedObjectReference       `xml:\"_this\"`\n\tFilterSpec BaseDynamicTypeMgrFilterSpec `xml:\"filterSpec,omitempty,typeattr\"`\n}\n\ntype DynamicTypeMgrQueryMoInstancesResponse struct {\n\tReturnval []DynamicTypeMgrMoInstance `xml:\"urn:vim25 returnval\"`\n}\n\ntype DynamicTypeEnumTypeInfo struct {\n\tDynamicData\n\n\tName       string                     `xml:\"name\"`\n\tWsdlName   string                     `xml:\"wsdlName\"`\n\tVersion    string                     `xml:\"version\"`\n\tValue      []string                   `xml:\"value,omitempty\"`\n\tAnnotation []DynamicTypeMgrAnnotation `xml:\"annotation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DynamicTypeEnumTypeInfo\"] = reflect.TypeOf((*DynamicTypeEnumTypeInfo)(nil)).Elem()\n}\n\ntype DynamicTypeMgrAllTypeInfo struct {\n\tDynamicData\n\n\tManagedTypeInfo []DynamicTypeMgrManagedTypeInfo `xml:\"managedTypeInfo,omitempty\"`\n\tEnumTypeInfo    []DynamicTypeEnumTypeInfo       `xml:\"enumTypeInfo,omitempty\"`\n\tDataTypeInfo    []DynamicTypeMgrDataTypeInfo    `xml:\"dataTypeInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DynamicTypeMgrAllTypeInfo\"] = reflect.TypeOf((*DynamicTypeMgrAllTypeInfo)(nil)).Elem()\n}\n\ntype DynamicTypeMgrAnnotation struct {\n\tDynamicData\n\n\tName      string   `xml:\"name\"`\n\tParameter []string `xml:\"parameter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DynamicTypeMgrAnnotation\"] = reflect.TypeOf((*DynamicTypeMgrAnnotation)(nil)).Elem()\n}\n\ntype DynamicTypeMgrDataTypeInfo struct {\n\tDynamicData\n\n\tName       string                           `xml:\"name\"`\n\tWsdlName   string                           `xml:\"wsdlName\"`\n\tVersion    string                           `xml:\"version\"`\n\tBase       []string                         `xml:\"base,omitempty\"`\n\tProperty   []DynamicTypeMgrPropertyTypeInfo `xml:\"property,omitempty\"`\n\tAnnotation []DynamicTypeMgrAnnotation       `xml:\"annotation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DynamicTypeMgrDataTypeInfo\"] = reflect.TypeOf((*DynamicTypeMgrDataTypeInfo)(nil)).Elem()\n}\n\nfunc (b *DynamicTypeMgrFilterSpec) GetDynamicTypeMgrFilterSpec() *DynamicTypeMgrFilterSpec { return b }\n\ntype BaseDynamicTypeMgrFilterSpec interface {\n\tGetDynamicTypeMgrFilterSpec() *DynamicTypeMgrFilterSpec\n}\n\ntype DynamicTypeMgrFilterSpec struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"DynamicTypeMgrFilterSpec\"] = reflect.TypeOf((*DynamicTypeMgrFilterSpec)(nil)).Elem()\n}\n\ntype DynamicTypeMgrManagedTypeInfo struct {\n\tDynamicData\n\n\tName       string                           `xml:\"name\"`\n\tWsdlName   string                           `xml:\"wsdlName\"`\n\tVersion    string                           `xml:\"version\"`\n\tBase       []string                         `xml:\"base,omitempty\"`\n\tProperty   []DynamicTypeMgrPropertyTypeInfo `xml:\"property,omitempty\"`\n\tMethod     []DynamicTypeMgrMethodTypeInfo   `xml:\"method,omitempty\"`\n\tAnnotation []DynamicTypeMgrAnnotation       `xml:\"annotation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DynamicTypeMgrManagedTypeInfo\"] = reflect.TypeOf((*DynamicTypeMgrManagedTypeInfo)(nil)).Elem()\n}\n\ntype DynamicTypeMgrMethodTypeInfo struct {\n\tDynamicData\n\n\tName           string                        `xml:\"name\"`\n\tWsdlName       string                        `xml:\"wsdlName\"`\n\tVersion        string                        `xml:\"version\"`\n\tParamTypeInfo  []DynamicTypeMgrParamTypeInfo `xml:\"paramTypeInfo,omitempty\"`\n\tReturnTypeInfo *DynamicTypeMgrParamTypeInfo  `xml:\"returnTypeInfo,omitempty\"`\n\tFault          []string                      `xml:\"fault,omitempty\"`\n\tPrivId         string                        `xml:\"privId,omitempty\"`\n\tAnnotation     []DynamicTypeMgrAnnotation    `xml:\"annotation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DynamicTypeMgrMethodTypeInfo\"] = reflect.TypeOf((*DynamicTypeMgrMethodTypeInfo)(nil)).Elem()\n}\n\ntype DynamicTypeMgrMoFilterSpec struct {\n\tDynamicTypeMgrFilterSpec\n\n\tId         string `xml:\"id,omitempty\"`\n\tTypeSubstr string `xml:\"typeSubstr,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DynamicTypeMgrMoFilterSpec\"] = reflect.TypeOf((*DynamicTypeMgrMoFilterSpec)(nil)).Elem()\n}\n\ntype DynamicTypeMgrMoInstance struct {\n\tDynamicData\n\n\tId     string `xml:\"id\"`\n\tMoType string `xml:\"moType\"`\n}\n\nfunc init() {\n\tt[\"DynamicTypeMgrMoInstance\"] = reflect.TypeOf((*DynamicTypeMgrMoInstance)(nil)).Elem()\n}\n\ntype DynamicTypeMgrParamTypeInfo struct {\n\tDynamicData\n\n\tName       string                     `xml:\"name\"`\n\tVersion    string                     `xml:\"version\"`\n\tType       string                     `xml:\"type\"`\n\tPrivId     string                     `xml:\"privId,omitempty\"`\n\tAnnotation []DynamicTypeMgrAnnotation `xml:\"annotation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DynamicTypeMgrParamTypeInfo\"] = reflect.TypeOf((*DynamicTypeMgrParamTypeInfo)(nil)).Elem()\n}\n\ntype DynamicTypeMgrPropertyTypeInfo struct {\n\tDynamicData\n\n\tName        string                     `xml:\"name\"`\n\tVersion     string                     `xml:\"version\"`\n\tType        string                     `xml:\"type\"`\n\tPrivId      string                     `xml:\"privId,omitempty\"`\n\tMsgIdFormat string                     `xml:\"msgIdFormat,omitempty\"`\n\tAnnotation  []DynamicTypeMgrAnnotation `xml:\"annotation,omitempty\"`\n}\n\ntype DynamicTypeMgrQueryTypeInfo struct {\n\tThis       ManagedObjectReference       `xml:\"_this\"`\n\tFilterSpec BaseDynamicTypeMgrFilterSpec `xml:\"filterSpec,omitempty,typeattr\"`\n}\n\ntype DynamicTypeMgrQueryTypeInfoResponse struct {\n\tReturnval DynamicTypeMgrAllTypeInfo `xml:\"urn:vim25 returnval\"`\n}\n\nfunc init() {\n\tt[\"DynamicTypeMgrPropertyTypeInfo\"] = reflect.TypeOf((*DynamicTypeMgrPropertyTypeInfo)(nil)).Elem()\n}\n\ntype DynamicTypeMgrTypeFilterSpec struct {\n\tDynamicTypeMgrFilterSpec\n\n\tTypeSubstr string `xml:\"typeSubstr,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DynamicTypeMgrTypeFilterSpec\"] = reflect.TypeOf((*DynamicTypeMgrTypeFilterSpec)(nil)).Elem()\n}\n\ntype ReflectManagedMethodExecuterSoapArgument struct {\n\tDynamicData\n\n\tName string `xml:\"name\"`\n\tVal  string `xml:\"val\"`\n}\n\nfunc init() {\n\tt[\"ReflectManagedMethodExecuterSoapArgument\"] = reflect.TypeOf((*ReflectManagedMethodExecuterSoapArgument)(nil)).Elem()\n}\n\ntype ReflectManagedMethodExecuterSoapFault struct {\n\tDynamicData\n\n\tFaultMsg    string `xml:\"faultMsg\"`\n\tFaultDetail string `xml:\"faultDetail,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ReflectManagedMethodExecuterSoapFault\"] = reflect.TypeOf((*ReflectManagedMethodExecuterSoapFault)(nil)).Elem()\n}\n\ntype ReflectManagedMethodExecuterSoapResult struct {\n\tDynamicData\n\n\tResponse string                                 `xml:\"response,omitempty\"`\n\tFault    *ReflectManagedMethodExecuterSoapFault `xml:\"fault,omitempty\"`\n}\n\ntype RetrieveDynamicTypeManager struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\ntype RetrieveDynamicTypeManagerResponse struct {\n\tReturnval *InternalDynamicTypeManager `xml:\"urn:vim25 returnval\"`\n}\n\ntype RetrieveManagedMethodExecuter struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RetrieveManagedMethodExecuter\"] = reflect.TypeOf((*RetrieveManagedMethodExecuter)(nil)).Elem()\n}\n\ntype RetrieveManagedMethodExecuterResponse struct {\n\tReturnval *ReflectManagedMethodExecuter `xml:\"urn:vim25 returnval\"`\n}\n\ntype InternalDynamicTypeManager struct {\n\tManagedObjectReference\n}\n\ntype ReflectManagedMethodExecuter struct {\n\tManagedObjectReference\n}\n\ntype ExecuteSoap struct {\n\tThis     ManagedObjectReference                     `xml:\"_this\"`\n\tMoid     string                                     `xml:\"moid\"`\n\tVersion  string                                     `xml:\"version\"`\n\tMethod   string                                     `xml:\"method\"`\n\tArgument []ReflectManagedMethodExecuterSoapArgument `xml:\"argument,omitempty\"`\n}\n\ntype ExecuteSoapResponse struct {\n\tReturnval *ReflectManagedMethodExecuterSoapResult `xml:\"urn:vim25 returnval\"`\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/types/registry.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage types\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n)\n\nvar t = map[string]reflect.Type{}\n\nfunc Add(name string, kind reflect.Type) {\n\tt[name] = kind\n}\n\ntype Func func(string) (reflect.Type, bool)\n\nfunc TypeFunc() Func {\n\treturn func(name string) (reflect.Type, bool) {\n\t\ttyp, ok := t[name]\n\t\tif !ok {\n\t\t\t// The /sdk endpoint does not prefix types with the namespace,\n\t\t\t// but extension endpoints, such as /pbm/sdk do.\n\t\t\tname = strings.TrimPrefix(name, \"vim25:\")\n\t\t\ttyp, ok = t[name]\n\t\t}\n\t\treturn typ, ok\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/types/registry_test.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage types\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestTypeFunc(t *testing.T) {\n\tvar ok bool\n\n\tfn := TypeFunc()\n\n\t_, ok = fn(\"unknown\")\n\tif ok {\n\t\tt.Errorf(\"Expected ok==false\")\n\t}\n\n\tactual, ok := fn(\"UserProfile\")\n\tif !ok {\n\t\tt.Errorf(\"Expected ok==true\")\n\t}\n\n\texpected := reflect.TypeOf(UserProfile{})\n\tif !reflect.DeepEqual(expected, actual) {\n\t\tt.Errorf(\"Expected: %#v, actual: %#v\", expected, actual)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/types/types.go",
    "content": "/*\nCopyright (c) 2014-2017 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage types\n\nimport (\n\t\"net/url\"\n\t\"reflect\"\n\t\"time\"\n)\n\ntype AbdicateDomOwnership AbdicateDomOwnershipRequestType\n\nfunc init() {\n\tt[\"AbdicateDomOwnership\"] = reflect.TypeOf((*AbdicateDomOwnership)(nil)).Elem()\n}\n\ntype AbdicateDomOwnershipRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tUuids []string               `xml:\"uuids\"`\n}\n\nfunc init() {\n\tt[\"AbdicateDomOwnershipRequestType\"] = reflect.TypeOf((*AbdicateDomOwnershipRequestType)(nil)).Elem()\n}\n\ntype AbdicateDomOwnershipResponse struct {\n\tReturnval []string `xml:\"returnval,omitempty\"`\n}\n\ntype AboutInfo struct {\n\tDynamicData\n\n\tName                  string `xml:\"name\"`\n\tFullName              string `xml:\"fullName\"`\n\tVendor                string `xml:\"vendor\"`\n\tVersion               string `xml:\"version\"`\n\tBuild                 string `xml:\"build\"`\n\tLocaleVersion         string `xml:\"localeVersion,omitempty\"`\n\tLocaleBuild           string `xml:\"localeBuild,omitempty\"`\n\tOsType                string `xml:\"osType\"`\n\tProductLineId         string `xml:\"productLineId\"`\n\tApiType               string `xml:\"apiType\"`\n\tApiVersion            string `xml:\"apiVersion\"`\n\tInstanceUuid          string `xml:\"instanceUuid,omitempty\"`\n\tLicenseProductName    string `xml:\"licenseProductName,omitempty\"`\n\tLicenseProductVersion string `xml:\"licenseProductVersion,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AboutInfo\"] = reflect.TypeOf((*AboutInfo)(nil)).Elem()\n}\n\ntype AccountCreatedEvent struct {\n\tHostEvent\n\n\tSpec  BaseHostAccountSpec `xml:\"spec,typeattr\"`\n\tGroup bool                `xml:\"group\"`\n}\n\nfunc init() {\n\tt[\"AccountCreatedEvent\"] = reflect.TypeOf((*AccountCreatedEvent)(nil)).Elem()\n}\n\ntype AccountRemovedEvent struct {\n\tHostEvent\n\n\tAccount string `xml:\"account\"`\n\tGroup   bool   `xml:\"group\"`\n}\n\nfunc init() {\n\tt[\"AccountRemovedEvent\"] = reflect.TypeOf((*AccountRemovedEvent)(nil)).Elem()\n}\n\ntype AccountUpdatedEvent struct {\n\tHostEvent\n\n\tSpec            BaseHostAccountSpec `xml:\"spec,typeattr\"`\n\tGroup           bool                `xml:\"group\"`\n\tPrevDescription string              `xml:\"prevDescription,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AccountUpdatedEvent\"] = reflect.TypeOf((*AccountUpdatedEvent)(nil)).Elem()\n}\n\ntype AcknowledgeAlarm AcknowledgeAlarmRequestType\n\nfunc init() {\n\tt[\"AcknowledgeAlarm\"] = reflect.TypeOf((*AcknowledgeAlarm)(nil)).Elem()\n}\n\ntype AcknowledgeAlarmRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tAlarm  ManagedObjectReference `xml:\"alarm\"`\n\tEntity ManagedObjectReference `xml:\"entity\"`\n}\n\nfunc init() {\n\tt[\"AcknowledgeAlarmRequestType\"] = reflect.TypeOf((*AcknowledgeAlarmRequestType)(nil)).Elem()\n}\n\ntype AcknowledgeAlarmResponse struct {\n}\n\ntype AcquireCimServicesTicket AcquireCimServicesTicketRequestType\n\nfunc init() {\n\tt[\"AcquireCimServicesTicket\"] = reflect.TypeOf((*AcquireCimServicesTicket)(nil)).Elem()\n}\n\ntype AcquireCimServicesTicketRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"AcquireCimServicesTicketRequestType\"] = reflect.TypeOf((*AcquireCimServicesTicketRequestType)(nil)).Elem()\n}\n\ntype AcquireCimServicesTicketResponse struct {\n\tReturnval HostServiceTicket `xml:\"returnval\"`\n}\n\ntype AcquireCloneTicket AcquireCloneTicketRequestType\n\nfunc init() {\n\tt[\"AcquireCloneTicket\"] = reflect.TypeOf((*AcquireCloneTicket)(nil)).Elem()\n}\n\ntype AcquireCloneTicketRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"AcquireCloneTicketRequestType\"] = reflect.TypeOf((*AcquireCloneTicketRequestType)(nil)).Elem()\n}\n\ntype AcquireCloneTicketResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype AcquireCredentialsInGuest AcquireCredentialsInGuestRequestType\n\nfunc init() {\n\tt[\"AcquireCredentialsInGuest\"] = reflect.TypeOf((*AcquireCredentialsInGuest)(nil)).Elem()\n}\n\ntype AcquireCredentialsInGuestRequestType struct {\n\tThis          ManagedObjectReference  `xml:\"_this\"`\n\tVm            ManagedObjectReference  `xml:\"vm\"`\n\tRequestedAuth BaseGuestAuthentication `xml:\"requestedAuth,typeattr\"`\n\tSessionID     int64                   `xml:\"sessionID,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AcquireCredentialsInGuestRequestType\"] = reflect.TypeOf((*AcquireCredentialsInGuestRequestType)(nil)).Elem()\n}\n\ntype AcquireCredentialsInGuestResponse struct {\n\tReturnval BaseGuestAuthentication `xml:\"returnval,typeattr\"`\n}\n\ntype AcquireGenericServiceTicket AcquireGenericServiceTicketRequestType\n\nfunc init() {\n\tt[\"AcquireGenericServiceTicket\"] = reflect.TypeOf((*AcquireGenericServiceTicket)(nil)).Elem()\n}\n\ntype AcquireGenericServiceTicketRequestType struct {\n\tThis ManagedObjectReference               `xml:\"_this\"`\n\tSpec BaseSessionManagerServiceRequestSpec `xml:\"spec,typeattr\"`\n}\n\nfunc init() {\n\tt[\"AcquireGenericServiceTicketRequestType\"] = reflect.TypeOf((*AcquireGenericServiceTicketRequestType)(nil)).Elem()\n}\n\ntype AcquireGenericServiceTicketResponse struct {\n\tReturnval SessionManagerGenericServiceTicket `xml:\"returnval\"`\n}\n\ntype AcquireLocalTicket AcquireLocalTicketRequestType\n\nfunc init() {\n\tt[\"AcquireLocalTicket\"] = reflect.TypeOf((*AcquireLocalTicket)(nil)).Elem()\n}\n\ntype AcquireLocalTicketRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tUserName string                 `xml:\"userName\"`\n}\n\nfunc init() {\n\tt[\"AcquireLocalTicketRequestType\"] = reflect.TypeOf((*AcquireLocalTicketRequestType)(nil)).Elem()\n}\n\ntype AcquireLocalTicketResponse struct {\n\tReturnval SessionManagerLocalTicket `xml:\"returnval\"`\n}\n\ntype AcquireMksTicket AcquireMksTicketRequestType\n\nfunc init() {\n\tt[\"AcquireMksTicket\"] = reflect.TypeOf((*AcquireMksTicket)(nil)).Elem()\n}\n\ntype AcquireMksTicketRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"AcquireMksTicketRequestType\"] = reflect.TypeOf((*AcquireMksTicketRequestType)(nil)).Elem()\n}\n\ntype AcquireMksTicketResponse struct {\n\tReturnval VirtualMachineMksTicket `xml:\"returnval\"`\n}\n\ntype AcquireTicket AcquireTicketRequestType\n\nfunc init() {\n\tt[\"AcquireTicket\"] = reflect.TypeOf((*AcquireTicket)(nil)).Elem()\n}\n\ntype AcquireTicketRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tTicketType string                 `xml:\"ticketType\"`\n}\n\nfunc init() {\n\tt[\"AcquireTicketRequestType\"] = reflect.TypeOf((*AcquireTicketRequestType)(nil)).Elem()\n}\n\ntype AcquireTicketResponse struct {\n\tReturnval VirtualMachineTicket `xml:\"returnval\"`\n}\n\ntype Action struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"Action\"] = reflect.TypeOf((*Action)(nil)).Elem()\n}\n\ntype ActiveDirectoryFault struct {\n\tVimFault\n\n\tErrorCode int32 `xml:\"errorCode,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ActiveDirectoryFault\"] = reflect.TypeOf((*ActiveDirectoryFault)(nil)).Elem()\n}\n\ntype ActiveDirectoryFaultFault BaseActiveDirectoryFault\n\nfunc init() {\n\tt[\"ActiveDirectoryFaultFault\"] = reflect.TypeOf((*ActiveDirectoryFaultFault)(nil)).Elem()\n}\n\ntype ActiveDirectoryProfile struct {\n\tApplyProfile\n}\n\nfunc init() {\n\tt[\"ActiveDirectoryProfile\"] = reflect.TypeOf((*ActiveDirectoryProfile)(nil)).Elem()\n}\n\ntype ActiveVMsBlockingEVC struct {\n\tEVCConfigFault\n\n\tEvcMode  string                   `xml:\"evcMode,omitempty\"`\n\tHost     []ManagedObjectReference `xml:\"host,omitempty\"`\n\tHostName []string                 `xml:\"hostName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ActiveVMsBlockingEVC\"] = reflect.TypeOf((*ActiveVMsBlockingEVC)(nil)).Elem()\n}\n\ntype ActiveVMsBlockingEVCFault ActiveVMsBlockingEVC\n\nfunc init() {\n\tt[\"ActiveVMsBlockingEVCFault\"] = reflect.TypeOf((*ActiveVMsBlockingEVCFault)(nil)).Elem()\n}\n\ntype AddAuthorizationRole AddAuthorizationRoleRequestType\n\nfunc init() {\n\tt[\"AddAuthorizationRole\"] = reflect.TypeOf((*AddAuthorizationRole)(nil)).Elem()\n}\n\ntype AddAuthorizationRoleRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tName    string                 `xml:\"name\"`\n\tPrivIds []string               `xml:\"privIds,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AddAuthorizationRoleRequestType\"] = reflect.TypeOf((*AddAuthorizationRoleRequestType)(nil)).Elem()\n}\n\ntype AddAuthorizationRoleResponse struct {\n\tReturnval int32 `xml:\"returnval\"`\n}\n\ntype AddCustomFieldDef AddCustomFieldDefRequestType\n\nfunc init() {\n\tt[\"AddCustomFieldDef\"] = reflect.TypeOf((*AddCustomFieldDef)(nil)).Elem()\n}\n\ntype AddCustomFieldDefRequestType struct {\n\tThis           ManagedObjectReference `xml:\"_this\"`\n\tName           string                 `xml:\"name\"`\n\tMoType         string                 `xml:\"moType,omitempty\"`\n\tFieldDefPolicy *PrivilegePolicyDef    `xml:\"fieldDefPolicy,omitempty\"`\n\tFieldPolicy    *PrivilegePolicyDef    `xml:\"fieldPolicy,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AddCustomFieldDefRequestType\"] = reflect.TypeOf((*AddCustomFieldDefRequestType)(nil)).Elem()\n}\n\ntype AddCustomFieldDefResponse struct {\n\tReturnval CustomFieldDef `xml:\"returnval\"`\n}\n\ntype AddDVPortgroupRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tSpec []DVPortgroupConfigSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"AddDVPortgroupRequestType\"] = reflect.TypeOf((*AddDVPortgroupRequestType)(nil)).Elem()\n}\n\ntype AddDVPortgroup_Task AddDVPortgroupRequestType\n\nfunc init() {\n\tt[\"AddDVPortgroup_Task\"] = reflect.TypeOf((*AddDVPortgroup_Task)(nil)).Elem()\n}\n\ntype AddDVPortgroup_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype AddDisksRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tDisk []HostScsiDisk         `xml:\"disk\"`\n}\n\nfunc init() {\n\tt[\"AddDisksRequestType\"] = reflect.TypeOf((*AddDisksRequestType)(nil)).Elem()\n}\n\ntype AddDisks_Task AddDisksRequestType\n\nfunc init() {\n\tt[\"AddDisks_Task\"] = reflect.TypeOf((*AddDisks_Task)(nil)).Elem()\n}\n\ntype AddDisks_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype AddFilter AddFilterRequestType\n\nfunc init() {\n\tt[\"AddFilter\"] = reflect.TypeOf((*AddFilter)(nil)).Elem()\n}\n\ntype AddFilterEntities AddFilterEntitiesRequestType\n\nfunc init() {\n\tt[\"AddFilterEntities\"] = reflect.TypeOf((*AddFilterEntities)(nil)).Elem()\n}\n\ntype AddFilterEntitiesRequestType struct {\n\tThis     ManagedObjectReference   `xml:\"_this\"`\n\tFilterId string                   `xml:\"filterId\"`\n\tEntities []ManagedObjectReference `xml:\"entities,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AddFilterEntitiesRequestType\"] = reflect.TypeOf((*AddFilterEntitiesRequestType)(nil)).Elem()\n}\n\ntype AddFilterEntitiesResponse struct {\n}\n\ntype AddFilterRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tProviderId string                 `xml:\"providerId\"`\n\tFilterName string                 `xml:\"filterName\"`\n\tInfoIds    []string               `xml:\"infoIds,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AddFilterRequestType\"] = reflect.TypeOf((*AddFilterRequestType)(nil)).Elem()\n}\n\ntype AddFilterResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype AddGuestAlias AddGuestAliasRequestType\n\nfunc init() {\n\tt[\"AddGuestAlias\"] = reflect.TypeOf((*AddGuestAlias)(nil)).Elem()\n}\n\ntype AddGuestAliasRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tVm         ManagedObjectReference  `xml:\"vm\"`\n\tAuth       BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tUsername   string                  `xml:\"username\"`\n\tMapCert    bool                    `xml:\"mapCert\"`\n\tBase64Cert string                  `xml:\"base64Cert\"`\n\tAliasInfo  GuestAuthAliasInfo      `xml:\"aliasInfo\"`\n}\n\nfunc init() {\n\tt[\"AddGuestAliasRequestType\"] = reflect.TypeOf((*AddGuestAliasRequestType)(nil)).Elem()\n}\n\ntype AddGuestAliasResponse struct {\n}\n\ntype AddHostRequestType struct {\n\tThis         ManagedObjectReference  `xml:\"_this\"`\n\tSpec         HostConnectSpec         `xml:\"spec\"`\n\tAsConnected  bool                    `xml:\"asConnected\"`\n\tResourcePool *ManagedObjectReference `xml:\"resourcePool,omitempty\"`\n\tLicense      string                  `xml:\"license,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AddHostRequestType\"] = reflect.TypeOf((*AddHostRequestType)(nil)).Elem()\n}\n\ntype AddHost_Task AddHostRequestType\n\nfunc init() {\n\tt[\"AddHost_Task\"] = reflect.TypeOf((*AddHost_Task)(nil)).Elem()\n}\n\ntype AddHost_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype AddInternetScsiSendTargets AddInternetScsiSendTargetsRequestType\n\nfunc init() {\n\tt[\"AddInternetScsiSendTargets\"] = reflect.TypeOf((*AddInternetScsiSendTargets)(nil)).Elem()\n}\n\ntype AddInternetScsiSendTargetsRequestType struct {\n\tThis           ManagedObjectReference          `xml:\"_this\"`\n\tIScsiHbaDevice string                          `xml:\"iScsiHbaDevice\"`\n\tTargets        []HostInternetScsiHbaSendTarget `xml:\"targets\"`\n}\n\nfunc init() {\n\tt[\"AddInternetScsiSendTargetsRequestType\"] = reflect.TypeOf((*AddInternetScsiSendTargetsRequestType)(nil)).Elem()\n}\n\ntype AddInternetScsiSendTargetsResponse struct {\n}\n\ntype AddInternetScsiStaticTargets AddInternetScsiStaticTargetsRequestType\n\nfunc init() {\n\tt[\"AddInternetScsiStaticTargets\"] = reflect.TypeOf((*AddInternetScsiStaticTargets)(nil)).Elem()\n}\n\ntype AddInternetScsiStaticTargetsRequestType struct {\n\tThis           ManagedObjectReference            `xml:\"_this\"`\n\tIScsiHbaDevice string                            `xml:\"iScsiHbaDevice\"`\n\tTargets        []HostInternetScsiHbaStaticTarget `xml:\"targets\"`\n}\n\nfunc init() {\n\tt[\"AddInternetScsiStaticTargetsRequestType\"] = reflect.TypeOf((*AddInternetScsiStaticTargetsRequestType)(nil)).Elem()\n}\n\ntype AddInternetScsiStaticTargetsResponse struct {\n}\n\ntype AddKey AddKeyRequestType\n\nfunc init() {\n\tt[\"AddKey\"] = reflect.TypeOf((*AddKey)(nil)).Elem()\n}\n\ntype AddKeyRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tKey  CryptoKeyPlain         `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"AddKeyRequestType\"] = reflect.TypeOf((*AddKeyRequestType)(nil)).Elem()\n}\n\ntype AddKeyResponse struct {\n}\n\ntype AddKeys AddKeysRequestType\n\nfunc init() {\n\tt[\"AddKeys\"] = reflect.TypeOf((*AddKeys)(nil)).Elem()\n}\n\ntype AddKeysRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tKeys []CryptoKeyPlain       `xml:\"keys,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AddKeysRequestType\"] = reflect.TypeOf((*AddKeysRequestType)(nil)).Elem()\n}\n\ntype AddKeysResponse struct {\n\tReturnval []CryptoKeyResult `xml:\"returnval,omitempty\"`\n}\n\ntype AddLicense AddLicenseRequestType\n\nfunc init() {\n\tt[\"AddLicense\"] = reflect.TypeOf((*AddLicense)(nil)).Elem()\n}\n\ntype AddLicenseRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tLicenseKey string                 `xml:\"licenseKey\"`\n\tLabels     []KeyValue             `xml:\"labels,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AddLicenseRequestType\"] = reflect.TypeOf((*AddLicenseRequestType)(nil)).Elem()\n}\n\ntype AddLicenseResponse struct {\n\tReturnval LicenseManagerLicenseInfo `xml:\"returnval\"`\n}\n\ntype AddMonitoredEntities AddMonitoredEntitiesRequestType\n\nfunc init() {\n\tt[\"AddMonitoredEntities\"] = reflect.TypeOf((*AddMonitoredEntities)(nil)).Elem()\n}\n\ntype AddMonitoredEntitiesRequestType struct {\n\tThis       ManagedObjectReference   `xml:\"_this\"`\n\tProviderId string                   `xml:\"providerId\"`\n\tEntities   []ManagedObjectReference `xml:\"entities,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AddMonitoredEntitiesRequestType\"] = reflect.TypeOf((*AddMonitoredEntitiesRequestType)(nil)).Elem()\n}\n\ntype AddMonitoredEntitiesResponse struct {\n}\n\ntype AddNetworkResourcePool AddNetworkResourcePoolRequestType\n\nfunc init() {\n\tt[\"AddNetworkResourcePool\"] = reflect.TypeOf((*AddNetworkResourcePool)(nil)).Elem()\n}\n\ntype AddNetworkResourcePoolRequestType struct {\n\tThis       ManagedObjectReference             `xml:\"_this\"`\n\tConfigSpec []DVSNetworkResourcePoolConfigSpec `xml:\"configSpec\"`\n}\n\nfunc init() {\n\tt[\"AddNetworkResourcePoolRequestType\"] = reflect.TypeOf((*AddNetworkResourcePoolRequestType)(nil)).Elem()\n}\n\ntype AddNetworkResourcePoolResponse struct {\n}\n\ntype AddPortGroup AddPortGroupRequestType\n\nfunc init() {\n\tt[\"AddPortGroup\"] = reflect.TypeOf((*AddPortGroup)(nil)).Elem()\n}\n\ntype AddPortGroupRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tPortgrp HostPortGroupSpec      `xml:\"portgrp\"`\n}\n\nfunc init() {\n\tt[\"AddPortGroupRequestType\"] = reflect.TypeOf((*AddPortGroupRequestType)(nil)).Elem()\n}\n\ntype AddPortGroupResponse struct {\n}\n\ntype AddServiceConsoleVirtualNic AddServiceConsoleVirtualNicRequestType\n\nfunc init() {\n\tt[\"AddServiceConsoleVirtualNic\"] = reflect.TypeOf((*AddServiceConsoleVirtualNic)(nil)).Elem()\n}\n\ntype AddServiceConsoleVirtualNicRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tPortgroup string                 `xml:\"portgroup\"`\n\tNic       HostVirtualNicSpec     `xml:\"nic\"`\n}\n\nfunc init() {\n\tt[\"AddServiceConsoleVirtualNicRequestType\"] = reflect.TypeOf((*AddServiceConsoleVirtualNicRequestType)(nil)).Elem()\n}\n\ntype AddServiceConsoleVirtualNicResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype AddStandaloneHostRequestType struct {\n\tThis         ManagedObjectReference        `xml:\"_this\"`\n\tSpec         HostConnectSpec               `xml:\"spec\"`\n\tCompResSpec  BaseComputeResourceConfigSpec `xml:\"compResSpec,omitempty,typeattr\"`\n\tAddConnected bool                          `xml:\"addConnected\"`\n\tLicense      string                        `xml:\"license,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AddStandaloneHostRequestType\"] = reflect.TypeOf((*AddStandaloneHostRequestType)(nil)).Elem()\n}\n\ntype AddStandaloneHost_Task AddStandaloneHostRequestType\n\nfunc init() {\n\tt[\"AddStandaloneHost_Task\"] = reflect.TypeOf((*AddStandaloneHost_Task)(nil)).Elem()\n}\n\ntype AddStandaloneHost_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype AddVirtualNic AddVirtualNicRequestType\n\nfunc init() {\n\tt[\"AddVirtualNic\"] = reflect.TypeOf((*AddVirtualNic)(nil)).Elem()\n}\n\ntype AddVirtualNicRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tPortgroup string                 `xml:\"portgroup\"`\n\tNic       HostVirtualNicSpec     `xml:\"nic\"`\n}\n\nfunc init() {\n\tt[\"AddVirtualNicRequestType\"] = reflect.TypeOf((*AddVirtualNicRequestType)(nil)).Elem()\n}\n\ntype AddVirtualNicResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype AddVirtualSwitch AddVirtualSwitchRequestType\n\nfunc init() {\n\tt[\"AddVirtualSwitch\"] = reflect.TypeOf((*AddVirtualSwitch)(nil)).Elem()\n}\n\ntype AddVirtualSwitchRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tVswitchName string                 `xml:\"vswitchName\"`\n\tSpec        *HostVirtualSwitchSpec `xml:\"spec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AddVirtualSwitchRequestType\"] = reflect.TypeOf((*AddVirtualSwitchRequestType)(nil)).Elem()\n}\n\ntype AddVirtualSwitchResponse struct {\n}\n\ntype AdminDisabled struct {\n\tHostConfigFault\n}\n\nfunc init() {\n\tt[\"AdminDisabled\"] = reflect.TypeOf((*AdminDisabled)(nil)).Elem()\n}\n\ntype AdminDisabledFault AdminDisabled\n\nfunc init() {\n\tt[\"AdminDisabledFault\"] = reflect.TypeOf((*AdminDisabledFault)(nil)).Elem()\n}\n\ntype AdminNotDisabled struct {\n\tHostConfigFault\n}\n\nfunc init() {\n\tt[\"AdminNotDisabled\"] = reflect.TypeOf((*AdminNotDisabled)(nil)).Elem()\n}\n\ntype AdminNotDisabledFault AdminNotDisabled\n\nfunc init() {\n\tt[\"AdminNotDisabledFault\"] = reflect.TypeOf((*AdminNotDisabledFault)(nil)).Elem()\n}\n\ntype AdminPasswordNotChangedEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"AdminPasswordNotChangedEvent\"] = reflect.TypeOf((*AdminPasswordNotChangedEvent)(nil)).Elem()\n}\n\ntype AffinityConfigured struct {\n\tMigrationFault\n\n\tConfiguredAffinity []string `xml:\"configuredAffinity\"`\n}\n\nfunc init() {\n\tt[\"AffinityConfigured\"] = reflect.TypeOf((*AffinityConfigured)(nil)).Elem()\n}\n\ntype AffinityConfiguredFault AffinityConfigured\n\nfunc init() {\n\tt[\"AffinityConfiguredFault\"] = reflect.TypeOf((*AffinityConfiguredFault)(nil)).Elem()\n}\n\ntype AfterStartupTaskScheduler struct {\n\tTaskScheduler\n\n\tMinute int32 `xml:\"minute\"`\n}\n\nfunc init() {\n\tt[\"AfterStartupTaskScheduler\"] = reflect.TypeOf((*AfterStartupTaskScheduler)(nil)).Elem()\n}\n\ntype AgentInstallFailed struct {\n\tHostConnectFault\n\n\tReason          string `xml:\"reason,omitempty\"`\n\tStatusCode      int32  `xml:\"statusCode,omitempty\"`\n\tInstallerOutput string `xml:\"installerOutput,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AgentInstallFailed\"] = reflect.TypeOf((*AgentInstallFailed)(nil)).Elem()\n}\n\ntype AgentInstallFailedFault AgentInstallFailed\n\nfunc init() {\n\tt[\"AgentInstallFailedFault\"] = reflect.TypeOf((*AgentInstallFailedFault)(nil)).Elem()\n}\n\ntype AlarmAcknowledgedEvent struct {\n\tAlarmEvent\n\n\tSource ManagedEntityEventArgument `xml:\"source\"`\n\tEntity ManagedEntityEventArgument `xml:\"entity\"`\n}\n\nfunc init() {\n\tt[\"AlarmAcknowledgedEvent\"] = reflect.TypeOf((*AlarmAcknowledgedEvent)(nil)).Elem()\n}\n\ntype AlarmAction struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"AlarmAction\"] = reflect.TypeOf((*AlarmAction)(nil)).Elem()\n}\n\ntype AlarmActionTriggeredEvent struct {\n\tAlarmEvent\n\n\tSource ManagedEntityEventArgument `xml:\"source\"`\n\tEntity ManagedEntityEventArgument `xml:\"entity\"`\n}\n\nfunc init() {\n\tt[\"AlarmActionTriggeredEvent\"] = reflect.TypeOf((*AlarmActionTriggeredEvent)(nil)).Elem()\n}\n\ntype AlarmClearedEvent struct {\n\tAlarmEvent\n\n\tSource ManagedEntityEventArgument `xml:\"source\"`\n\tEntity ManagedEntityEventArgument `xml:\"entity\"`\n\tFrom   string                     `xml:\"from\"`\n}\n\nfunc init() {\n\tt[\"AlarmClearedEvent\"] = reflect.TypeOf((*AlarmClearedEvent)(nil)).Elem()\n}\n\ntype AlarmCreatedEvent struct {\n\tAlarmEvent\n\n\tEntity ManagedEntityEventArgument `xml:\"entity\"`\n}\n\nfunc init() {\n\tt[\"AlarmCreatedEvent\"] = reflect.TypeOf((*AlarmCreatedEvent)(nil)).Elem()\n}\n\ntype AlarmDescription struct {\n\tDynamicData\n\n\tExpr                               []BaseTypeDescription    `xml:\"expr,typeattr\"`\n\tStateOperator                      []BaseElementDescription `xml:\"stateOperator,typeattr\"`\n\tMetricOperator                     []BaseElementDescription `xml:\"metricOperator,typeattr\"`\n\tHostSystemConnectionState          []BaseElementDescription `xml:\"hostSystemConnectionState,typeattr\"`\n\tVirtualMachinePowerState           []BaseElementDescription `xml:\"virtualMachinePowerState,typeattr\"`\n\tDatastoreConnectionState           []BaseElementDescription `xml:\"datastoreConnectionState,omitempty,typeattr\"`\n\tHostSystemPowerState               []BaseElementDescription `xml:\"hostSystemPowerState,omitempty,typeattr\"`\n\tVirtualMachineGuestHeartbeatStatus []BaseElementDescription `xml:\"virtualMachineGuestHeartbeatStatus,omitempty,typeattr\"`\n\tEntityStatus                       []BaseElementDescription `xml:\"entityStatus,typeattr\"`\n\tAction                             []BaseTypeDescription    `xml:\"action,typeattr\"`\n}\n\nfunc init() {\n\tt[\"AlarmDescription\"] = reflect.TypeOf((*AlarmDescription)(nil)).Elem()\n}\n\ntype AlarmEmailCompletedEvent struct {\n\tAlarmEvent\n\n\tEntity ManagedEntityEventArgument `xml:\"entity\"`\n\tTo     string                     `xml:\"to\"`\n}\n\nfunc init() {\n\tt[\"AlarmEmailCompletedEvent\"] = reflect.TypeOf((*AlarmEmailCompletedEvent)(nil)).Elem()\n}\n\ntype AlarmEmailFailedEvent struct {\n\tAlarmEvent\n\n\tEntity ManagedEntityEventArgument `xml:\"entity\"`\n\tTo     string                     `xml:\"to\"`\n\tReason LocalizedMethodFault       `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"AlarmEmailFailedEvent\"] = reflect.TypeOf((*AlarmEmailFailedEvent)(nil)).Elem()\n}\n\ntype AlarmEvent struct {\n\tEvent\n\n\tAlarm AlarmEventArgument `xml:\"alarm\"`\n}\n\nfunc init() {\n\tt[\"AlarmEvent\"] = reflect.TypeOf((*AlarmEvent)(nil)).Elem()\n}\n\ntype AlarmEventArgument struct {\n\tEntityEventArgument\n\n\tAlarm ManagedObjectReference `xml:\"alarm\"`\n}\n\nfunc init() {\n\tt[\"AlarmEventArgument\"] = reflect.TypeOf((*AlarmEventArgument)(nil)).Elem()\n}\n\ntype AlarmExpression struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"AlarmExpression\"] = reflect.TypeOf((*AlarmExpression)(nil)).Elem()\n}\n\ntype AlarmInfo struct {\n\tAlarmSpec\n\n\tKey              string                 `xml:\"key\"`\n\tAlarm            ManagedObjectReference `xml:\"alarm\"`\n\tEntity           ManagedObjectReference `xml:\"entity\"`\n\tLastModifiedTime time.Time              `xml:\"lastModifiedTime\"`\n\tLastModifiedUser string                 `xml:\"lastModifiedUser\"`\n\tCreationEventId  int32                  `xml:\"creationEventId\"`\n}\n\nfunc init() {\n\tt[\"AlarmInfo\"] = reflect.TypeOf((*AlarmInfo)(nil)).Elem()\n}\n\ntype AlarmReconfiguredEvent struct {\n\tAlarmEvent\n\n\tEntity        ManagedEntityEventArgument `xml:\"entity\"`\n\tConfigChanges *ChangesInfoEventArgument  `xml:\"configChanges,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AlarmReconfiguredEvent\"] = reflect.TypeOf((*AlarmReconfiguredEvent)(nil)).Elem()\n}\n\ntype AlarmRemovedEvent struct {\n\tAlarmEvent\n\n\tEntity ManagedEntityEventArgument `xml:\"entity\"`\n}\n\nfunc init() {\n\tt[\"AlarmRemovedEvent\"] = reflect.TypeOf((*AlarmRemovedEvent)(nil)).Elem()\n}\n\ntype AlarmScriptCompleteEvent struct {\n\tAlarmEvent\n\n\tEntity ManagedEntityEventArgument `xml:\"entity\"`\n\tScript string                     `xml:\"script\"`\n}\n\nfunc init() {\n\tt[\"AlarmScriptCompleteEvent\"] = reflect.TypeOf((*AlarmScriptCompleteEvent)(nil)).Elem()\n}\n\ntype AlarmScriptFailedEvent struct {\n\tAlarmEvent\n\n\tEntity ManagedEntityEventArgument `xml:\"entity\"`\n\tScript string                     `xml:\"script\"`\n\tReason LocalizedMethodFault       `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"AlarmScriptFailedEvent\"] = reflect.TypeOf((*AlarmScriptFailedEvent)(nil)).Elem()\n}\n\ntype AlarmSetting struct {\n\tDynamicData\n\n\tToleranceRange     int32 `xml:\"toleranceRange\"`\n\tReportingFrequency int32 `xml:\"reportingFrequency\"`\n}\n\nfunc init() {\n\tt[\"AlarmSetting\"] = reflect.TypeOf((*AlarmSetting)(nil)).Elem()\n}\n\ntype AlarmSnmpCompletedEvent struct {\n\tAlarmEvent\n\n\tEntity ManagedEntityEventArgument `xml:\"entity\"`\n}\n\nfunc init() {\n\tt[\"AlarmSnmpCompletedEvent\"] = reflect.TypeOf((*AlarmSnmpCompletedEvent)(nil)).Elem()\n}\n\ntype AlarmSnmpFailedEvent struct {\n\tAlarmEvent\n\n\tEntity ManagedEntityEventArgument `xml:\"entity\"`\n\tReason LocalizedMethodFault       `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"AlarmSnmpFailedEvent\"] = reflect.TypeOf((*AlarmSnmpFailedEvent)(nil)).Elem()\n}\n\ntype AlarmSpec struct {\n\tDynamicData\n\n\tName            string              `xml:\"name\"`\n\tSystemName      string              `xml:\"systemName,omitempty\"`\n\tDescription     string              `xml:\"description\"`\n\tEnabled         bool                `xml:\"enabled\"`\n\tExpression      BaseAlarmExpression `xml:\"expression,typeattr\"`\n\tAction          BaseAlarmAction     `xml:\"action,omitempty,typeattr\"`\n\tActionFrequency int32               `xml:\"actionFrequency,omitempty\"`\n\tSetting         *AlarmSetting       `xml:\"setting,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AlarmSpec\"] = reflect.TypeOf((*AlarmSpec)(nil)).Elem()\n}\n\ntype AlarmState struct {\n\tDynamicData\n\n\tKey                string                 `xml:\"key\"`\n\tEntity             ManagedObjectReference `xml:\"entity\"`\n\tAlarm              ManagedObjectReference `xml:\"alarm\"`\n\tOverallStatus      ManagedEntityStatus    `xml:\"overallStatus\"`\n\tTime               time.Time              `xml:\"time\"`\n\tAcknowledged       *bool                  `xml:\"acknowledged\"`\n\tAcknowledgedByUser string                 `xml:\"acknowledgedByUser,omitempty\"`\n\tAcknowledgedTime   *time.Time             `xml:\"acknowledgedTime\"`\n\tEventKey           int32                  `xml:\"eventKey,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AlarmState\"] = reflect.TypeOf((*AlarmState)(nil)).Elem()\n}\n\ntype AlarmStatusChangedEvent struct {\n\tAlarmEvent\n\n\tSource ManagedEntityEventArgument `xml:\"source\"`\n\tEntity ManagedEntityEventArgument `xml:\"entity\"`\n\tFrom   string                     `xml:\"from\"`\n\tTo     string                     `xml:\"to\"`\n}\n\nfunc init() {\n\tt[\"AlarmStatusChangedEvent\"] = reflect.TypeOf((*AlarmStatusChangedEvent)(nil)).Elem()\n}\n\ntype AlarmTriggeringAction struct {\n\tAlarmAction\n\n\tAction          BaseAction                            `xml:\"action,typeattr\"`\n\tTransitionSpecs []AlarmTriggeringActionTransitionSpec `xml:\"transitionSpecs,omitempty\"`\n\tGreen2yellow    bool                                  `xml:\"green2yellow\"`\n\tYellow2red      bool                                  `xml:\"yellow2red\"`\n\tRed2yellow      bool                                  `xml:\"red2yellow\"`\n\tYellow2green    bool                                  `xml:\"yellow2green\"`\n}\n\nfunc init() {\n\tt[\"AlarmTriggeringAction\"] = reflect.TypeOf((*AlarmTriggeringAction)(nil)).Elem()\n}\n\ntype AlarmTriggeringActionTransitionSpec struct {\n\tDynamicData\n\n\tStartState ManagedEntityStatus `xml:\"startState\"`\n\tFinalState ManagedEntityStatus `xml:\"finalState\"`\n\tRepeats    bool                `xml:\"repeats\"`\n}\n\nfunc init() {\n\tt[\"AlarmTriggeringActionTransitionSpec\"] = reflect.TypeOf((*AlarmTriggeringActionTransitionSpec)(nil)).Elem()\n}\n\ntype AllVirtualMachinesLicensedEvent struct {\n\tLicenseEvent\n}\n\nfunc init() {\n\tt[\"AllVirtualMachinesLicensedEvent\"] = reflect.TypeOf((*AllVirtualMachinesLicensedEvent)(nil)).Elem()\n}\n\ntype AllocateIpv4Address AllocateIpv4AddressRequestType\n\nfunc init() {\n\tt[\"AllocateIpv4Address\"] = reflect.TypeOf((*AllocateIpv4Address)(nil)).Elem()\n}\n\ntype AllocateIpv4AddressRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tDc           ManagedObjectReference `xml:\"dc\"`\n\tPoolId       int32                  `xml:\"poolId\"`\n\tAllocationId string                 `xml:\"allocationId\"`\n}\n\nfunc init() {\n\tt[\"AllocateIpv4AddressRequestType\"] = reflect.TypeOf((*AllocateIpv4AddressRequestType)(nil)).Elem()\n}\n\ntype AllocateIpv4AddressResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype AllocateIpv6Address AllocateIpv6AddressRequestType\n\nfunc init() {\n\tt[\"AllocateIpv6Address\"] = reflect.TypeOf((*AllocateIpv6Address)(nil)).Elem()\n}\n\ntype AllocateIpv6AddressRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tDc           ManagedObjectReference `xml:\"dc\"`\n\tPoolId       int32                  `xml:\"poolId\"`\n\tAllocationId string                 `xml:\"allocationId\"`\n}\n\nfunc init() {\n\tt[\"AllocateIpv6AddressRequestType\"] = reflect.TypeOf((*AllocateIpv6AddressRequestType)(nil)).Elem()\n}\n\ntype AllocateIpv6AddressResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype AlreadyAuthenticatedSessionEvent struct {\n\tSessionEvent\n}\n\nfunc init() {\n\tt[\"AlreadyAuthenticatedSessionEvent\"] = reflect.TypeOf((*AlreadyAuthenticatedSessionEvent)(nil)).Elem()\n}\n\ntype AlreadyBeingManaged struct {\n\tHostConnectFault\n\n\tIpAddress string `xml:\"ipAddress\"`\n}\n\nfunc init() {\n\tt[\"AlreadyBeingManaged\"] = reflect.TypeOf((*AlreadyBeingManaged)(nil)).Elem()\n}\n\ntype AlreadyBeingManagedFault AlreadyBeingManaged\n\nfunc init() {\n\tt[\"AlreadyBeingManagedFault\"] = reflect.TypeOf((*AlreadyBeingManagedFault)(nil)).Elem()\n}\n\ntype AlreadyConnected struct {\n\tHostConnectFault\n\n\tName string `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"AlreadyConnected\"] = reflect.TypeOf((*AlreadyConnected)(nil)).Elem()\n}\n\ntype AlreadyConnectedFault AlreadyConnected\n\nfunc init() {\n\tt[\"AlreadyConnectedFault\"] = reflect.TypeOf((*AlreadyConnectedFault)(nil)).Elem()\n}\n\ntype AlreadyExists struct {\n\tVimFault\n\n\tName string `xml:\"name,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AlreadyExists\"] = reflect.TypeOf((*AlreadyExists)(nil)).Elem()\n}\n\ntype AlreadyExistsFault AlreadyExists\n\nfunc init() {\n\tt[\"AlreadyExistsFault\"] = reflect.TypeOf((*AlreadyExistsFault)(nil)).Elem()\n}\n\ntype AlreadyUpgraded struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"AlreadyUpgraded\"] = reflect.TypeOf((*AlreadyUpgraded)(nil)).Elem()\n}\n\ntype AlreadyUpgradedFault AlreadyUpgraded\n\nfunc init() {\n\tt[\"AlreadyUpgradedFault\"] = reflect.TypeOf((*AlreadyUpgradedFault)(nil)).Elem()\n}\n\ntype AndAlarmExpression struct {\n\tAlarmExpression\n\n\tExpression []BaseAlarmExpression `xml:\"expression,typeattr\"`\n}\n\nfunc init() {\n\tt[\"AndAlarmExpression\"] = reflect.TypeOf((*AndAlarmExpression)(nil)).Elem()\n}\n\ntype AnswerFile struct {\n\tDynamicData\n\n\tUserInput    []ProfileDeferredPolicyOptionParameter `xml:\"userInput,omitempty\"`\n\tCreatedTime  time.Time                              `xml:\"createdTime\"`\n\tModifiedTime time.Time                              `xml:\"modifiedTime\"`\n}\n\nfunc init() {\n\tt[\"AnswerFile\"] = reflect.TypeOf((*AnswerFile)(nil)).Elem()\n}\n\ntype AnswerFileCreateSpec struct {\n\tDynamicData\n\n\tValidating *bool `xml:\"validating\"`\n}\n\nfunc init() {\n\tt[\"AnswerFileCreateSpec\"] = reflect.TypeOf((*AnswerFileCreateSpec)(nil)).Elem()\n}\n\ntype AnswerFileOptionsCreateSpec struct {\n\tAnswerFileCreateSpec\n\n\tUserInput []ProfileDeferredPolicyOptionParameter `xml:\"userInput,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AnswerFileOptionsCreateSpec\"] = reflect.TypeOf((*AnswerFileOptionsCreateSpec)(nil)).Elem()\n}\n\ntype AnswerFileSerializedCreateSpec struct {\n\tAnswerFileCreateSpec\n\n\tAnswerFileConfigString string `xml:\"answerFileConfigString\"`\n}\n\nfunc init() {\n\tt[\"AnswerFileSerializedCreateSpec\"] = reflect.TypeOf((*AnswerFileSerializedCreateSpec)(nil)).Elem()\n}\n\ntype AnswerFileStatusError struct {\n\tDynamicData\n\n\tUserInputPath ProfilePropertyPath `xml:\"userInputPath\"`\n\tErrMsg        LocalizableMessage  `xml:\"errMsg\"`\n}\n\nfunc init() {\n\tt[\"AnswerFileStatusError\"] = reflect.TypeOf((*AnswerFileStatusError)(nil)).Elem()\n}\n\ntype AnswerFileStatusResult struct {\n\tDynamicData\n\n\tCheckedTime time.Time               `xml:\"checkedTime\"`\n\tHost        ManagedObjectReference  `xml:\"host\"`\n\tStatus      string                  `xml:\"status\"`\n\tError       []AnswerFileStatusError `xml:\"error,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AnswerFileStatusResult\"] = reflect.TypeOf((*AnswerFileStatusResult)(nil)).Elem()\n}\n\ntype AnswerFileUpdateFailed struct {\n\tVimFault\n\n\tFailure []AnswerFileUpdateFailure `xml:\"failure\"`\n}\n\nfunc init() {\n\tt[\"AnswerFileUpdateFailed\"] = reflect.TypeOf((*AnswerFileUpdateFailed)(nil)).Elem()\n}\n\ntype AnswerFileUpdateFailedFault AnswerFileUpdateFailed\n\nfunc init() {\n\tt[\"AnswerFileUpdateFailedFault\"] = reflect.TypeOf((*AnswerFileUpdateFailedFault)(nil)).Elem()\n}\n\ntype AnswerFileUpdateFailure struct {\n\tDynamicData\n\n\tUserInputPath ProfilePropertyPath `xml:\"userInputPath\"`\n\tErrMsg        LocalizableMessage  `xml:\"errMsg\"`\n}\n\nfunc init() {\n\tt[\"AnswerFileUpdateFailure\"] = reflect.TypeOf((*AnswerFileUpdateFailure)(nil)).Elem()\n}\n\ntype AnswerVM AnswerVMRequestType\n\nfunc init() {\n\tt[\"AnswerVM\"] = reflect.TypeOf((*AnswerVM)(nil)).Elem()\n}\n\ntype AnswerVMRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tQuestionId   string                 `xml:\"questionId\"`\n\tAnswerChoice string                 `xml:\"answerChoice\"`\n}\n\nfunc init() {\n\tt[\"AnswerVMRequestType\"] = reflect.TypeOf((*AnswerVMRequestType)(nil)).Elem()\n}\n\ntype AnswerVMResponse struct {\n}\n\ntype ApplicationQuiesceFault struct {\n\tSnapshotFault\n}\n\nfunc init() {\n\tt[\"ApplicationQuiesceFault\"] = reflect.TypeOf((*ApplicationQuiesceFault)(nil)).Elem()\n}\n\ntype ApplicationQuiesceFaultFault ApplicationQuiesceFault\n\nfunc init() {\n\tt[\"ApplicationQuiesceFaultFault\"] = reflect.TypeOf((*ApplicationQuiesceFaultFault)(nil)).Elem()\n}\n\ntype ApplyEntitiesConfigRequestType struct {\n\tThis             ManagedObjectReference              `xml:\"_this\"`\n\tApplyConfigSpecs []ApplyHostProfileConfigurationSpec `xml:\"applyConfigSpecs,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ApplyEntitiesConfigRequestType\"] = reflect.TypeOf((*ApplyEntitiesConfigRequestType)(nil)).Elem()\n}\n\ntype ApplyEntitiesConfig_Task ApplyEntitiesConfigRequestType\n\nfunc init() {\n\tt[\"ApplyEntitiesConfig_Task\"] = reflect.TypeOf((*ApplyEntitiesConfig_Task)(nil)).Elem()\n}\n\ntype ApplyEntitiesConfig_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ApplyHostConfigRequestType struct {\n\tThis       ManagedObjectReference                 `xml:\"_this\"`\n\tHost       ManagedObjectReference                 `xml:\"host\"`\n\tConfigSpec HostConfigSpec                         `xml:\"configSpec\"`\n\tUserInput  []ProfileDeferredPolicyOptionParameter `xml:\"userInput,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ApplyHostConfigRequestType\"] = reflect.TypeOf((*ApplyHostConfigRequestType)(nil)).Elem()\n}\n\ntype ApplyHostConfig_Task ApplyHostConfigRequestType\n\nfunc init() {\n\tt[\"ApplyHostConfig_Task\"] = reflect.TypeOf((*ApplyHostConfig_Task)(nil)).Elem()\n}\n\ntype ApplyHostConfig_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ApplyHostProfileConfigurationSpec struct {\n\tProfileExecuteResult\n\n\tHost                ManagedObjectReference `xml:\"host\"`\n\tTaskListRequirement []string               `xml:\"taskListRequirement,omitempty\"`\n\tTaskDescription     []LocalizableMessage   `xml:\"taskDescription,omitempty\"`\n\tRebootStateless     *bool                  `xml:\"rebootStateless\"`\n\tRebootHost          *bool                  `xml:\"rebootHost\"`\n\tFaultData           *LocalizedMethodFault  `xml:\"faultData,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ApplyHostProfileConfigurationSpec\"] = reflect.TypeOf((*ApplyHostProfileConfigurationSpec)(nil)).Elem()\n}\n\ntype ApplyProfile struct {\n\tDynamicData\n\n\tEnabled          bool                          `xml:\"enabled\"`\n\tPolicy           []ProfilePolicy               `xml:\"policy,omitempty\"`\n\tProfileTypeName  string                        `xml:\"profileTypeName,omitempty\"`\n\tProfileVersion   string                        `xml:\"profileVersion,omitempty\"`\n\tProperty         []ProfileApplyProfileProperty `xml:\"property,omitempty\"`\n\tFavorite         *bool                         `xml:\"favorite\"`\n\tToBeMerged       *bool                         `xml:\"toBeMerged\"`\n\tToReplaceWith    *bool                         `xml:\"toReplaceWith\"`\n\tToBeDeleted      *bool                         `xml:\"toBeDeleted\"`\n\tCopyEnableStatus *bool                         `xml:\"copyEnableStatus\"`\n}\n\nfunc init() {\n\tt[\"ApplyProfile\"] = reflect.TypeOf((*ApplyProfile)(nil)).Elem()\n}\n\ntype ApplyRecommendation ApplyRecommendationRequestType\n\nfunc init() {\n\tt[\"ApplyRecommendation\"] = reflect.TypeOf((*ApplyRecommendation)(nil)).Elem()\n}\n\ntype ApplyRecommendationRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tKey  string                 `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"ApplyRecommendationRequestType\"] = reflect.TypeOf((*ApplyRecommendationRequestType)(nil)).Elem()\n}\n\ntype ApplyRecommendationResponse struct {\n}\n\ntype ApplyStorageDrsRecommendationRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tKey  []string               `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"ApplyStorageDrsRecommendationRequestType\"] = reflect.TypeOf((*ApplyStorageDrsRecommendationRequestType)(nil)).Elem()\n}\n\ntype ApplyStorageDrsRecommendationToPodRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tPod  ManagedObjectReference `xml:\"pod\"`\n\tKey  string                 `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"ApplyStorageDrsRecommendationToPodRequestType\"] = reflect.TypeOf((*ApplyStorageDrsRecommendationToPodRequestType)(nil)).Elem()\n}\n\ntype ApplyStorageDrsRecommendationToPod_Task ApplyStorageDrsRecommendationToPodRequestType\n\nfunc init() {\n\tt[\"ApplyStorageDrsRecommendationToPod_Task\"] = reflect.TypeOf((*ApplyStorageDrsRecommendationToPod_Task)(nil)).Elem()\n}\n\ntype ApplyStorageDrsRecommendationToPod_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ApplyStorageDrsRecommendation_Task ApplyStorageDrsRecommendationRequestType\n\nfunc init() {\n\tt[\"ApplyStorageDrsRecommendation_Task\"] = reflect.TypeOf((*ApplyStorageDrsRecommendation_Task)(nil)).Elem()\n}\n\ntype ApplyStorageDrsRecommendation_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ApplyStorageRecommendationResult struct {\n\tDynamicData\n\n\tVm *ManagedObjectReference `xml:\"vm,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ApplyStorageRecommendationResult\"] = reflect.TypeOf((*ApplyStorageRecommendationResult)(nil)).Elem()\n}\n\ntype AreAlarmActionsEnabled AreAlarmActionsEnabledRequestType\n\nfunc init() {\n\tt[\"AreAlarmActionsEnabled\"] = reflect.TypeOf((*AreAlarmActionsEnabled)(nil)).Elem()\n}\n\ntype AreAlarmActionsEnabledRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tEntity ManagedObjectReference `xml:\"entity\"`\n}\n\nfunc init() {\n\tt[\"AreAlarmActionsEnabledRequestType\"] = reflect.TypeOf((*AreAlarmActionsEnabledRequestType)(nil)).Elem()\n}\n\ntype AreAlarmActionsEnabledResponse struct {\n\tReturnval bool `xml:\"returnval\"`\n}\n\ntype ArrayOfAlarmAction struct {\n\tAlarmAction []BaseAlarmAction `xml:\"AlarmAction,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfAlarmAction\"] = reflect.TypeOf((*ArrayOfAlarmAction)(nil)).Elem()\n}\n\ntype ArrayOfAlarmExpression struct {\n\tAlarmExpression []BaseAlarmExpression `xml:\"AlarmExpression,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfAlarmExpression\"] = reflect.TypeOf((*ArrayOfAlarmExpression)(nil)).Elem()\n}\n\ntype ArrayOfAlarmState struct {\n\tAlarmState []AlarmState `xml:\"AlarmState,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfAlarmState\"] = reflect.TypeOf((*ArrayOfAlarmState)(nil)).Elem()\n}\n\ntype ArrayOfAlarmTriggeringActionTransitionSpec struct {\n\tAlarmTriggeringActionTransitionSpec []AlarmTriggeringActionTransitionSpec `xml:\"AlarmTriggeringActionTransitionSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfAlarmTriggeringActionTransitionSpec\"] = reflect.TypeOf((*ArrayOfAlarmTriggeringActionTransitionSpec)(nil)).Elem()\n}\n\ntype ArrayOfAnswerFileStatusError struct {\n\tAnswerFileStatusError []AnswerFileStatusError `xml:\"AnswerFileStatusError,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfAnswerFileStatusError\"] = reflect.TypeOf((*ArrayOfAnswerFileStatusError)(nil)).Elem()\n}\n\ntype ArrayOfAnswerFileStatusResult struct {\n\tAnswerFileStatusResult []AnswerFileStatusResult `xml:\"AnswerFileStatusResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfAnswerFileStatusResult\"] = reflect.TypeOf((*ArrayOfAnswerFileStatusResult)(nil)).Elem()\n}\n\ntype ArrayOfAnswerFileUpdateFailure struct {\n\tAnswerFileUpdateFailure []AnswerFileUpdateFailure `xml:\"AnswerFileUpdateFailure,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfAnswerFileUpdateFailure\"] = reflect.TypeOf((*ArrayOfAnswerFileUpdateFailure)(nil)).Elem()\n}\n\ntype ArrayOfAnyType struct {\n\tAnyType []AnyType `xml:\"anyType,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfAnyType\"] = reflect.TypeOf((*ArrayOfAnyType)(nil)).Elem()\n}\n\ntype ArrayOfAnyURI struct {\n\tAnyURI []url.URL `xml:\"anyURI,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfAnyURI\"] = reflect.TypeOf((*ArrayOfAnyURI)(nil)).Elem()\n}\n\ntype ArrayOfApplyHostProfileConfigurationSpec struct {\n\tApplyHostProfileConfigurationSpec []ApplyHostProfileConfigurationSpec `xml:\"ApplyHostProfileConfigurationSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfApplyHostProfileConfigurationSpec\"] = reflect.TypeOf((*ArrayOfApplyHostProfileConfigurationSpec)(nil)).Elem()\n}\n\ntype ArrayOfApplyProfile struct {\n\tApplyProfile []BaseApplyProfile `xml:\"ApplyProfile,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfApplyProfile\"] = reflect.TypeOf((*ArrayOfApplyProfile)(nil)).Elem()\n}\n\ntype ArrayOfAuthorizationPrivilege struct {\n\tAuthorizationPrivilege []AuthorizationPrivilege `xml:\"AuthorizationPrivilege,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfAuthorizationPrivilege\"] = reflect.TypeOf((*ArrayOfAuthorizationPrivilege)(nil)).Elem()\n}\n\ntype ArrayOfAuthorizationRole struct {\n\tAuthorizationRole []AuthorizationRole `xml:\"AuthorizationRole,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfAuthorizationRole\"] = reflect.TypeOf((*ArrayOfAuthorizationRole)(nil)).Elem()\n}\n\ntype ArrayOfAutoStartPowerInfo struct {\n\tAutoStartPowerInfo []AutoStartPowerInfo `xml:\"AutoStartPowerInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfAutoStartPowerInfo\"] = reflect.TypeOf((*ArrayOfAutoStartPowerInfo)(nil)).Elem()\n}\n\ntype ArrayOfBoolean struct {\n\tBoolean []bool `xml:\"boolean,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfBoolean\"] = reflect.TypeOf((*ArrayOfBoolean)(nil)).Elem()\n}\n\ntype ArrayOfByte struct {\n\tByte []byte `xml:\"byte,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfByte\"] = reflect.TypeOf((*ArrayOfByte)(nil)).Elem()\n}\n\ntype ArrayOfChangesInfoEventArgument struct {\n\tChangesInfoEventArgument []ChangesInfoEventArgument `xml:\"ChangesInfoEventArgument,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfChangesInfoEventArgument\"] = reflect.TypeOf((*ArrayOfChangesInfoEventArgument)(nil)).Elem()\n}\n\ntype ArrayOfCheckResult struct {\n\tCheckResult []CheckResult `xml:\"CheckResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfCheckResult\"] = reflect.TypeOf((*ArrayOfCheckResult)(nil)).Elem()\n}\n\ntype ArrayOfClusterAction struct {\n\tClusterAction []BaseClusterAction `xml:\"ClusterAction,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterAction\"] = reflect.TypeOf((*ArrayOfClusterAction)(nil)).Elem()\n}\n\ntype ArrayOfClusterActionHistory struct {\n\tClusterActionHistory []ClusterActionHistory `xml:\"ClusterActionHistory,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterActionHistory\"] = reflect.TypeOf((*ArrayOfClusterActionHistory)(nil)).Elem()\n}\n\ntype ArrayOfClusterAttemptedVmInfo struct {\n\tClusterAttemptedVmInfo []ClusterAttemptedVmInfo `xml:\"ClusterAttemptedVmInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterAttemptedVmInfo\"] = reflect.TypeOf((*ArrayOfClusterAttemptedVmInfo)(nil)).Elem()\n}\n\ntype ArrayOfClusterDasAamNodeState struct {\n\tClusterDasAamNodeState []ClusterDasAamNodeState `xml:\"ClusterDasAamNodeState,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterDasAamNodeState\"] = reflect.TypeOf((*ArrayOfClusterDasAamNodeState)(nil)).Elem()\n}\n\ntype ArrayOfClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots struct {\n\tClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots []ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots `xml:\"ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots\"] = reflect.TypeOf((*ArrayOfClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots)(nil)).Elem()\n}\n\ntype ArrayOfClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots struct {\n\tClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots []ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots `xml:\"ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots\"] = reflect.TypeOf((*ArrayOfClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots)(nil)).Elem()\n}\n\ntype ArrayOfClusterDasVmConfigInfo struct {\n\tClusterDasVmConfigInfo []ClusterDasVmConfigInfo `xml:\"ClusterDasVmConfigInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterDasVmConfigInfo\"] = reflect.TypeOf((*ArrayOfClusterDasVmConfigInfo)(nil)).Elem()\n}\n\ntype ArrayOfClusterDasVmConfigSpec struct {\n\tClusterDasVmConfigSpec []ClusterDasVmConfigSpec `xml:\"ClusterDasVmConfigSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterDasVmConfigSpec\"] = reflect.TypeOf((*ArrayOfClusterDasVmConfigSpec)(nil)).Elem()\n}\n\ntype ArrayOfClusterDpmHostConfigInfo struct {\n\tClusterDpmHostConfigInfo []ClusterDpmHostConfigInfo `xml:\"ClusterDpmHostConfigInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterDpmHostConfigInfo\"] = reflect.TypeOf((*ArrayOfClusterDpmHostConfigInfo)(nil)).Elem()\n}\n\ntype ArrayOfClusterDpmHostConfigSpec struct {\n\tClusterDpmHostConfigSpec []ClusterDpmHostConfigSpec `xml:\"ClusterDpmHostConfigSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterDpmHostConfigSpec\"] = reflect.TypeOf((*ArrayOfClusterDpmHostConfigSpec)(nil)).Elem()\n}\n\ntype ArrayOfClusterDrsFaults struct {\n\tClusterDrsFaults []ClusterDrsFaults `xml:\"ClusterDrsFaults,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterDrsFaults\"] = reflect.TypeOf((*ArrayOfClusterDrsFaults)(nil)).Elem()\n}\n\ntype ArrayOfClusterDrsFaultsFaultsByVm struct {\n\tClusterDrsFaultsFaultsByVm []BaseClusterDrsFaultsFaultsByVm `xml:\"ClusterDrsFaultsFaultsByVm,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterDrsFaultsFaultsByVm\"] = reflect.TypeOf((*ArrayOfClusterDrsFaultsFaultsByVm)(nil)).Elem()\n}\n\ntype ArrayOfClusterDrsMigration struct {\n\tClusterDrsMigration []ClusterDrsMigration `xml:\"ClusterDrsMigration,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterDrsMigration\"] = reflect.TypeOf((*ArrayOfClusterDrsMigration)(nil)).Elem()\n}\n\ntype ArrayOfClusterDrsRecommendation struct {\n\tClusterDrsRecommendation []ClusterDrsRecommendation `xml:\"ClusterDrsRecommendation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterDrsRecommendation\"] = reflect.TypeOf((*ArrayOfClusterDrsRecommendation)(nil)).Elem()\n}\n\ntype ArrayOfClusterDrsVmConfigInfo struct {\n\tClusterDrsVmConfigInfo []ClusterDrsVmConfigInfo `xml:\"ClusterDrsVmConfigInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterDrsVmConfigInfo\"] = reflect.TypeOf((*ArrayOfClusterDrsVmConfigInfo)(nil)).Elem()\n}\n\ntype ArrayOfClusterDrsVmConfigSpec struct {\n\tClusterDrsVmConfigSpec []ClusterDrsVmConfigSpec `xml:\"ClusterDrsVmConfigSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterDrsVmConfigSpec\"] = reflect.TypeOf((*ArrayOfClusterDrsVmConfigSpec)(nil)).Elem()\n}\n\ntype ArrayOfClusterEVCManagerCheckResult struct {\n\tClusterEVCManagerCheckResult []ClusterEVCManagerCheckResult `xml:\"ClusterEVCManagerCheckResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterEVCManagerCheckResult\"] = reflect.TypeOf((*ArrayOfClusterEVCManagerCheckResult)(nil)).Elem()\n}\n\ntype ArrayOfClusterFailoverHostAdmissionControlInfoHostStatus struct {\n\tClusterFailoverHostAdmissionControlInfoHostStatus []ClusterFailoverHostAdmissionControlInfoHostStatus `xml:\"ClusterFailoverHostAdmissionControlInfoHostStatus,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterFailoverHostAdmissionControlInfoHostStatus\"] = reflect.TypeOf((*ArrayOfClusterFailoverHostAdmissionControlInfoHostStatus)(nil)).Elem()\n}\n\ntype ArrayOfClusterGroupInfo struct {\n\tClusterGroupInfo []BaseClusterGroupInfo `xml:\"ClusterGroupInfo,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterGroupInfo\"] = reflect.TypeOf((*ArrayOfClusterGroupInfo)(nil)).Elem()\n}\n\ntype ArrayOfClusterGroupSpec struct {\n\tClusterGroupSpec []ClusterGroupSpec `xml:\"ClusterGroupSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterGroupSpec\"] = reflect.TypeOf((*ArrayOfClusterGroupSpec)(nil)).Elem()\n}\n\ntype ArrayOfClusterHostRecommendation struct {\n\tClusterHostRecommendation []ClusterHostRecommendation `xml:\"ClusterHostRecommendation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterHostRecommendation\"] = reflect.TypeOf((*ArrayOfClusterHostRecommendation)(nil)).Elem()\n}\n\ntype ArrayOfClusterIoFilterInfo struct {\n\tClusterIoFilterInfo []ClusterIoFilterInfo `xml:\"ClusterIoFilterInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterIoFilterInfo\"] = reflect.TypeOf((*ArrayOfClusterIoFilterInfo)(nil)).Elem()\n}\n\ntype ArrayOfClusterNotAttemptedVmInfo struct {\n\tClusterNotAttemptedVmInfo []ClusterNotAttemptedVmInfo `xml:\"ClusterNotAttemptedVmInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterNotAttemptedVmInfo\"] = reflect.TypeOf((*ArrayOfClusterNotAttemptedVmInfo)(nil)).Elem()\n}\n\ntype ArrayOfClusterRecommendation struct {\n\tClusterRecommendation []ClusterRecommendation `xml:\"ClusterRecommendation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterRecommendation\"] = reflect.TypeOf((*ArrayOfClusterRecommendation)(nil)).Elem()\n}\n\ntype ArrayOfClusterRuleInfo struct {\n\tClusterRuleInfo []BaseClusterRuleInfo `xml:\"ClusterRuleInfo,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterRuleInfo\"] = reflect.TypeOf((*ArrayOfClusterRuleInfo)(nil)).Elem()\n}\n\ntype ArrayOfClusterRuleSpec struct {\n\tClusterRuleSpec []ClusterRuleSpec `xml:\"ClusterRuleSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterRuleSpec\"] = reflect.TypeOf((*ArrayOfClusterRuleSpec)(nil)).Elem()\n}\n\ntype ArrayOfClusterVmOrchestrationInfo struct {\n\tClusterVmOrchestrationInfo []ClusterVmOrchestrationInfo `xml:\"ClusterVmOrchestrationInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterVmOrchestrationInfo\"] = reflect.TypeOf((*ArrayOfClusterVmOrchestrationInfo)(nil)).Elem()\n}\n\ntype ArrayOfClusterVmOrchestrationSpec struct {\n\tClusterVmOrchestrationSpec []ClusterVmOrchestrationSpec `xml:\"ClusterVmOrchestrationSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfClusterVmOrchestrationSpec\"] = reflect.TypeOf((*ArrayOfClusterVmOrchestrationSpec)(nil)).Elem()\n}\n\ntype ArrayOfComplianceFailure struct {\n\tComplianceFailure []ComplianceFailure `xml:\"ComplianceFailure,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfComplianceFailure\"] = reflect.TypeOf((*ArrayOfComplianceFailure)(nil)).Elem()\n}\n\ntype ArrayOfComplianceFailureComplianceFailureValues struct {\n\tComplianceFailureComplianceFailureValues []ComplianceFailureComplianceFailureValues `xml:\"ComplianceFailureComplianceFailureValues,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfComplianceFailureComplianceFailureValues\"] = reflect.TypeOf((*ArrayOfComplianceFailureComplianceFailureValues)(nil)).Elem()\n}\n\ntype ArrayOfComplianceLocator struct {\n\tComplianceLocator []ComplianceLocator `xml:\"ComplianceLocator,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfComplianceLocator\"] = reflect.TypeOf((*ArrayOfComplianceLocator)(nil)).Elem()\n}\n\ntype ArrayOfComplianceResult struct {\n\tComplianceResult []ComplianceResult `xml:\"ComplianceResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfComplianceResult\"] = reflect.TypeOf((*ArrayOfComplianceResult)(nil)).Elem()\n}\n\ntype ArrayOfComputeResourceHostSPBMLicenseInfo struct {\n\tComputeResourceHostSPBMLicenseInfo []ComputeResourceHostSPBMLicenseInfo `xml:\"ComputeResourceHostSPBMLicenseInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfComputeResourceHostSPBMLicenseInfo\"] = reflect.TypeOf((*ArrayOfComputeResourceHostSPBMLicenseInfo)(nil)).Elem()\n}\n\ntype ArrayOfConflictingConfigurationConfig struct {\n\tConflictingConfigurationConfig []ConflictingConfigurationConfig `xml:\"ConflictingConfigurationConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfConflictingConfigurationConfig\"] = reflect.TypeOf((*ArrayOfConflictingConfigurationConfig)(nil)).Elem()\n}\n\ntype ArrayOfCryptoKeyId struct {\n\tCryptoKeyId []CryptoKeyId `xml:\"CryptoKeyId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfCryptoKeyId\"] = reflect.TypeOf((*ArrayOfCryptoKeyId)(nil)).Elem()\n}\n\ntype ArrayOfCryptoKeyPlain struct {\n\tCryptoKeyPlain []CryptoKeyPlain `xml:\"CryptoKeyPlain,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfCryptoKeyPlain\"] = reflect.TypeOf((*ArrayOfCryptoKeyPlain)(nil)).Elem()\n}\n\ntype ArrayOfCryptoKeyResult struct {\n\tCryptoKeyResult []CryptoKeyResult `xml:\"CryptoKeyResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfCryptoKeyResult\"] = reflect.TypeOf((*ArrayOfCryptoKeyResult)(nil)).Elem()\n}\n\ntype ArrayOfCryptoManagerKmipClusterStatus struct {\n\tCryptoManagerKmipClusterStatus []CryptoManagerKmipClusterStatus `xml:\"CryptoManagerKmipClusterStatus,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfCryptoManagerKmipClusterStatus\"] = reflect.TypeOf((*ArrayOfCryptoManagerKmipClusterStatus)(nil)).Elem()\n}\n\ntype ArrayOfCryptoManagerKmipServerStatus struct {\n\tCryptoManagerKmipServerStatus []CryptoManagerKmipServerStatus `xml:\"CryptoManagerKmipServerStatus,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfCryptoManagerKmipServerStatus\"] = reflect.TypeOf((*ArrayOfCryptoManagerKmipServerStatus)(nil)).Elem()\n}\n\ntype ArrayOfCustomFieldDef struct {\n\tCustomFieldDef []CustomFieldDef `xml:\"CustomFieldDef,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfCustomFieldDef\"] = reflect.TypeOf((*ArrayOfCustomFieldDef)(nil)).Elem()\n}\n\ntype ArrayOfCustomFieldValue struct {\n\tCustomFieldValue []BaseCustomFieldValue `xml:\"CustomFieldValue,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfCustomFieldValue\"] = reflect.TypeOf((*ArrayOfCustomFieldValue)(nil)).Elem()\n}\n\ntype ArrayOfCustomizationAdapterMapping struct {\n\tCustomizationAdapterMapping []CustomizationAdapterMapping `xml:\"CustomizationAdapterMapping,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfCustomizationAdapterMapping\"] = reflect.TypeOf((*ArrayOfCustomizationAdapterMapping)(nil)).Elem()\n}\n\ntype ArrayOfCustomizationIpV6Generator struct {\n\tCustomizationIpV6Generator []BaseCustomizationIpV6Generator `xml:\"CustomizationIpV6Generator,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfCustomizationIpV6Generator\"] = reflect.TypeOf((*ArrayOfCustomizationIpV6Generator)(nil)).Elem()\n}\n\ntype ArrayOfCustomizationSpecInfo struct {\n\tCustomizationSpecInfo []CustomizationSpecInfo `xml:\"CustomizationSpecInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfCustomizationSpecInfo\"] = reflect.TypeOf((*ArrayOfCustomizationSpecInfo)(nil)).Elem()\n}\n\ntype ArrayOfDVPortConfigSpec struct {\n\tDVPortConfigSpec []DVPortConfigSpec `xml:\"DVPortConfigSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDVPortConfigSpec\"] = reflect.TypeOf((*ArrayOfDVPortConfigSpec)(nil)).Elem()\n}\n\ntype ArrayOfDVPortgroupConfigSpec struct {\n\tDVPortgroupConfigSpec []DVPortgroupConfigSpec `xml:\"DVPortgroupConfigSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDVPortgroupConfigSpec\"] = reflect.TypeOf((*ArrayOfDVPortgroupConfigSpec)(nil)).Elem()\n}\n\ntype ArrayOfDVSHealthCheckConfig struct {\n\tDVSHealthCheckConfig []BaseDVSHealthCheckConfig `xml:\"DVSHealthCheckConfig,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDVSHealthCheckConfig\"] = reflect.TypeOf((*ArrayOfDVSHealthCheckConfig)(nil)).Elem()\n}\n\ntype ArrayOfDVSNetworkResourcePool struct {\n\tDVSNetworkResourcePool []DVSNetworkResourcePool `xml:\"DVSNetworkResourcePool,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDVSNetworkResourcePool\"] = reflect.TypeOf((*ArrayOfDVSNetworkResourcePool)(nil)).Elem()\n}\n\ntype ArrayOfDVSNetworkResourcePoolConfigSpec struct {\n\tDVSNetworkResourcePoolConfigSpec []DVSNetworkResourcePoolConfigSpec `xml:\"DVSNetworkResourcePoolConfigSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDVSNetworkResourcePoolConfigSpec\"] = reflect.TypeOf((*ArrayOfDVSNetworkResourcePoolConfigSpec)(nil)).Elem()\n}\n\ntype ArrayOfDVSVmVnicNetworkResourcePool struct {\n\tDVSVmVnicNetworkResourcePool []DVSVmVnicNetworkResourcePool `xml:\"DVSVmVnicNetworkResourcePool,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDVSVmVnicNetworkResourcePool\"] = reflect.TypeOf((*ArrayOfDVSVmVnicNetworkResourcePool)(nil)).Elem()\n}\n\ntype ArrayOfDasHeartbeatDatastoreInfo struct {\n\tDasHeartbeatDatastoreInfo []DasHeartbeatDatastoreInfo `xml:\"DasHeartbeatDatastoreInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDasHeartbeatDatastoreInfo\"] = reflect.TypeOf((*ArrayOfDasHeartbeatDatastoreInfo)(nil)).Elem()\n}\n\ntype ArrayOfDatacenterMismatchArgument struct {\n\tDatacenterMismatchArgument []DatacenterMismatchArgument `xml:\"DatacenterMismatchArgument,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDatacenterMismatchArgument\"] = reflect.TypeOf((*ArrayOfDatacenterMismatchArgument)(nil)).Elem()\n}\n\ntype ArrayOfDatastoreHostMount struct {\n\tDatastoreHostMount []DatastoreHostMount `xml:\"DatastoreHostMount,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDatastoreHostMount\"] = reflect.TypeOf((*ArrayOfDatastoreHostMount)(nil)).Elem()\n}\n\ntype ArrayOfDatastoreMountPathDatastorePair struct {\n\tDatastoreMountPathDatastorePair []DatastoreMountPathDatastorePair `xml:\"DatastoreMountPathDatastorePair,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDatastoreMountPathDatastorePair\"] = reflect.TypeOf((*ArrayOfDatastoreMountPathDatastorePair)(nil)).Elem()\n}\n\ntype ArrayOfDatastoreVVolContainerFailoverPair struct {\n\tDatastoreVVolContainerFailoverPair []DatastoreVVolContainerFailoverPair `xml:\"DatastoreVVolContainerFailoverPair,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDatastoreVVolContainerFailoverPair\"] = reflect.TypeOf((*ArrayOfDatastoreVVolContainerFailoverPair)(nil)).Elem()\n}\n\ntype ArrayOfDiagnosticManagerBundleInfo struct {\n\tDiagnosticManagerBundleInfo []DiagnosticManagerBundleInfo `xml:\"DiagnosticManagerBundleInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDiagnosticManagerBundleInfo\"] = reflect.TypeOf((*ArrayOfDiagnosticManagerBundleInfo)(nil)).Elem()\n}\n\ntype ArrayOfDiagnosticManagerLogDescriptor struct {\n\tDiagnosticManagerLogDescriptor []DiagnosticManagerLogDescriptor `xml:\"DiagnosticManagerLogDescriptor,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDiagnosticManagerLogDescriptor\"] = reflect.TypeOf((*ArrayOfDiagnosticManagerLogDescriptor)(nil)).Elem()\n}\n\ntype ArrayOfDiskChangeExtent struct {\n\tDiskChangeExtent []DiskChangeExtent `xml:\"DiskChangeExtent,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDiskChangeExtent\"] = reflect.TypeOf((*ArrayOfDiskChangeExtent)(nil)).Elem()\n}\n\ntype ArrayOfDistributedVirtualPort struct {\n\tDistributedVirtualPort []DistributedVirtualPort `xml:\"DistributedVirtualPort,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDistributedVirtualPort\"] = reflect.TypeOf((*ArrayOfDistributedVirtualPort)(nil)).Elem()\n}\n\ntype ArrayOfDistributedVirtualPortgroupInfo struct {\n\tDistributedVirtualPortgroupInfo []DistributedVirtualPortgroupInfo `xml:\"DistributedVirtualPortgroupInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDistributedVirtualPortgroupInfo\"] = reflect.TypeOf((*ArrayOfDistributedVirtualPortgroupInfo)(nil)).Elem()\n}\n\ntype ArrayOfDistributedVirtualSwitchHostMember struct {\n\tDistributedVirtualSwitchHostMember []DistributedVirtualSwitchHostMember `xml:\"DistributedVirtualSwitchHostMember,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDistributedVirtualSwitchHostMember\"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchHostMember)(nil)).Elem()\n}\n\ntype ArrayOfDistributedVirtualSwitchHostMemberConfigSpec struct {\n\tDistributedVirtualSwitchHostMemberConfigSpec []DistributedVirtualSwitchHostMemberConfigSpec `xml:\"DistributedVirtualSwitchHostMemberConfigSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDistributedVirtualSwitchHostMemberConfigSpec\"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchHostMemberConfigSpec)(nil)).Elem()\n}\n\ntype ArrayOfDistributedVirtualSwitchHostMemberPnicSpec struct {\n\tDistributedVirtualSwitchHostMemberPnicSpec []DistributedVirtualSwitchHostMemberPnicSpec `xml:\"DistributedVirtualSwitchHostMemberPnicSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDistributedVirtualSwitchHostMemberPnicSpec\"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchHostMemberPnicSpec)(nil)).Elem()\n}\n\ntype ArrayOfDistributedVirtualSwitchHostProductSpec struct {\n\tDistributedVirtualSwitchHostProductSpec []DistributedVirtualSwitchHostProductSpec `xml:\"DistributedVirtualSwitchHostProductSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDistributedVirtualSwitchHostProductSpec\"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchHostProductSpec)(nil)).Elem()\n}\n\ntype ArrayOfDistributedVirtualSwitchInfo struct {\n\tDistributedVirtualSwitchInfo []DistributedVirtualSwitchInfo `xml:\"DistributedVirtualSwitchInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDistributedVirtualSwitchInfo\"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchInfo)(nil)).Elem()\n}\n\ntype ArrayOfDistributedVirtualSwitchKeyedOpaqueBlob struct {\n\tDistributedVirtualSwitchKeyedOpaqueBlob []DistributedVirtualSwitchKeyedOpaqueBlob `xml:\"DistributedVirtualSwitchKeyedOpaqueBlob,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDistributedVirtualSwitchKeyedOpaqueBlob\"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchKeyedOpaqueBlob)(nil)).Elem()\n}\n\ntype ArrayOfDistributedVirtualSwitchManagerCompatibilityResult struct {\n\tDistributedVirtualSwitchManagerCompatibilityResult []DistributedVirtualSwitchManagerCompatibilityResult `xml:\"DistributedVirtualSwitchManagerCompatibilityResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDistributedVirtualSwitchManagerCompatibilityResult\"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchManagerCompatibilityResult)(nil)).Elem()\n}\n\ntype ArrayOfDistributedVirtualSwitchManagerHostDvsFilterSpec struct {\n\tDistributedVirtualSwitchManagerHostDvsFilterSpec []BaseDistributedVirtualSwitchManagerHostDvsFilterSpec `xml:\"DistributedVirtualSwitchManagerHostDvsFilterSpec,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDistributedVirtualSwitchManagerHostDvsFilterSpec\"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchManagerHostDvsFilterSpec)(nil)).Elem()\n}\n\ntype ArrayOfDistributedVirtualSwitchProductSpec struct {\n\tDistributedVirtualSwitchProductSpec []DistributedVirtualSwitchProductSpec `xml:\"DistributedVirtualSwitchProductSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDistributedVirtualSwitchProductSpec\"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchProductSpec)(nil)).Elem()\n}\n\ntype ArrayOfDouble struct {\n\tDouble []float64 `xml:\"double,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDouble\"] = reflect.TypeOf((*ArrayOfDouble)(nil)).Elem()\n}\n\ntype ArrayOfDvsApplyOperationFaultFaultOnObject struct {\n\tDvsApplyOperationFaultFaultOnObject []DvsApplyOperationFaultFaultOnObject `xml:\"DvsApplyOperationFaultFaultOnObject,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDvsApplyOperationFaultFaultOnObject\"] = reflect.TypeOf((*ArrayOfDvsApplyOperationFaultFaultOnObject)(nil)).Elem()\n}\n\ntype ArrayOfDvsFilterConfig struct {\n\tDvsFilterConfig []BaseDvsFilterConfig `xml:\"DvsFilterConfig,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDvsFilterConfig\"] = reflect.TypeOf((*ArrayOfDvsFilterConfig)(nil)).Elem()\n}\n\ntype ArrayOfDvsHostInfrastructureTrafficResource struct {\n\tDvsHostInfrastructureTrafficResource []DvsHostInfrastructureTrafficResource `xml:\"DvsHostInfrastructureTrafficResource,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDvsHostInfrastructureTrafficResource\"] = reflect.TypeOf((*ArrayOfDvsHostInfrastructureTrafficResource)(nil)).Elem()\n}\n\ntype ArrayOfDvsHostVNicProfile struct {\n\tDvsHostVNicProfile []DvsHostVNicProfile `xml:\"DvsHostVNicProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDvsHostVNicProfile\"] = reflect.TypeOf((*ArrayOfDvsHostVNicProfile)(nil)).Elem()\n}\n\ntype ArrayOfDvsNetworkRuleQualifier struct {\n\tDvsNetworkRuleQualifier []BaseDvsNetworkRuleQualifier `xml:\"DvsNetworkRuleQualifier,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDvsNetworkRuleQualifier\"] = reflect.TypeOf((*ArrayOfDvsNetworkRuleQualifier)(nil)).Elem()\n}\n\ntype ArrayOfDvsOperationBulkFaultFaultOnHost struct {\n\tDvsOperationBulkFaultFaultOnHost []DvsOperationBulkFaultFaultOnHost `xml:\"DvsOperationBulkFaultFaultOnHost,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDvsOperationBulkFaultFaultOnHost\"] = reflect.TypeOf((*ArrayOfDvsOperationBulkFaultFaultOnHost)(nil)).Elem()\n}\n\ntype ArrayOfDvsOutOfSyncHostArgument struct {\n\tDvsOutOfSyncHostArgument []DvsOutOfSyncHostArgument `xml:\"DvsOutOfSyncHostArgument,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDvsOutOfSyncHostArgument\"] = reflect.TypeOf((*ArrayOfDvsOutOfSyncHostArgument)(nil)).Elem()\n}\n\ntype ArrayOfDvsProfile struct {\n\tDvsProfile []DvsProfile `xml:\"DvsProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDvsProfile\"] = reflect.TypeOf((*ArrayOfDvsProfile)(nil)).Elem()\n}\n\ntype ArrayOfDvsServiceConsoleVNicProfile struct {\n\tDvsServiceConsoleVNicProfile []DvsServiceConsoleVNicProfile `xml:\"DvsServiceConsoleVNicProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDvsServiceConsoleVNicProfile\"] = reflect.TypeOf((*ArrayOfDvsServiceConsoleVNicProfile)(nil)).Elem()\n}\n\ntype ArrayOfDvsTrafficRule struct {\n\tDvsTrafficRule []DvsTrafficRule `xml:\"DvsTrafficRule,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDvsTrafficRule\"] = reflect.TypeOf((*ArrayOfDvsTrafficRule)(nil)).Elem()\n}\n\ntype ArrayOfDvsVmVnicNetworkResourcePoolRuntimeInfo struct {\n\tDvsVmVnicNetworkResourcePoolRuntimeInfo []DvsVmVnicNetworkResourcePoolRuntimeInfo `xml:\"DvsVmVnicNetworkResourcePoolRuntimeInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDvsVmVnicNetworkResourcePoolRuntimeInfo\"] = reflect.TypeOf((*ArrayOfDvsVmVnicNetworkResourcePoolRuntimeInfo)(nil)).Elem()\n}\n\ntype ArrayOfDvsVmVnicResourcePoolConfigSpec struct {\n\tDvsVmVnicResourcePoolConfigSpec []DvsVmVnicResourcePoolConfigSpec `xml:\"DvsVmVnicResourcePoolConfigSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDvsVmVnicResourcePoolConfigSpec\"] = reflect.TypeOf((*ArrayOfDvsVmVnicResourcePoolConfigSpec)(nil)).Elem()\n}\n\ntype ArrayOfDvsVnicAllocatedResource struct {\n\tDvsVnicAllocatedResource []DvsVnicAllocatedResource `xml:\"DvsVnicAllocatedResource,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDvsVnicAllocatedResource\"] = reflect.TypeOf((*ArrayOfDvsVnicAllocatedResource)(nil)).Elem()\n}\n\ntype ArrayOfDynamicProperty struct {\n\tDynamicProperty []DynamicProperty `xml:\"DynamicProperty,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfDynamicProperty\"] = reflect.TypeOf((*ArrayOfDynamicProperty)(nil)).Elem()\n}\n\ntype ArrayOfEVCMode struct {\n\tEVCMode []EVCMode `xml:\"EVCMode,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfEVCMode\"] = reflect.TypeOf((*ArrayOfEVCMode)(nil)).Elem()\n}\n\ntype ArrayOfElementDescription struct {\n\tElementDescription []BaseElementDescription `xml:\"ElementDescription,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfElementDescription\"] = reflect.TypeOf((*ArrayOfElementDescription)(nil)).Elem()\n}\n\ntype ArrayOfEntityBackupConfig struct {\n\tEntityBackupConfig []EntityBackupConfig `xml:\"EntityBackupConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfEntityBackupConfig\"] = reflect.TypeOf((*ArrayOfEntityBackupConfig)(nil)).Elem()\n}\n\ntype ArrayOfEntityPrivilege struct {\n\tEntityPrivilege []EntityPrivilege `xml:\"EntityPrivilege,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfEntityPrivilege\"] = reflect.TypeOf((*ArrayOfEntityPrivilege)(nil)).Elem()\n}\n\ntype ArrayOfEnumDescription struct {\n\tEnumDescription []EnumDescription `xml:\"EnumDescription,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfEnumDescription\"] = reflect.TypeOf((*ArrayOfEnumDescription)(nil)).Elem()\n}\n\ntype ArrayOfEvent struct {\n\tEvent []BaseEvent `xml:\"Event,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfEvent\"] = reflect.TypeOf((*ArrayOfEvent)(nil)).Elem()\n}\n\ntype ArrayOfEventAlarmExpressionComparison struct {\n\tEventAlarmExpressionComparison []EventAlarmExpressionComparison `xml:\"EventAlarmExpressionComparison,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfEventAlarmExpressionComparison\"] = reflect.TypeOf((*ArrayOfEventAlarmExpressionComparison)(nil)).Elem()\n}\n\ntype ArrayOfEventArgDesc struct {\n\tEventArgDesc []EventArgDesc `xml:\"EventArgDesc,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfEventArgDesc\"] = reflect.TypeOf((*ArrayOfEventArgDesc)(nil)).Elem()\n}\n\ntype ArrayOfEventDescriptionEventDetail struct {\n\tEventDescriptionEventDetail []EventDescriptionEventDetail `xml:\"EventDescriptionEventDetail,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfEventDescriptionEventDetail\"] = reflect.TypeOf((*ArrayOfEventDescriptionEventDetail)(nil)).Elem()\n}\n\ntype ArrayOfExtManagedEntityInfo struct {\n\tExtManagedEntityInfo []ExtManagedEntityInfo `xml:\"ExtManagedEntityInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfExtManagedEntityInfo\"] = reflect.TypeOf((*ArrayOfExtManagedEntityInfo)(nil)).Elem()\n}\n\ntype ArrayOfExtSolutionManagerInfoTabInfo struct {\n\tExtSolutionManagerInfoTabInfo []ExtSolutionManagerInfoTabInfo `xml:\"ExtSolutionManagerInfoTabInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfExtSolutionManagerInfoTabInfo\"] = reflect.TypeOf((*ArrayOfExtSolutionManagerInfoTabInfo)(nil)).Elem()\n}\n\ntype ArrayOfExtendedEventPair struct {\n\tExtendedEventPair []ExtendedEventPair `xml:\"ExtendedEventPair,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfExtendedEventPair\"] = reflect.TypeOf((*ArrayOfExtendedEventPair)(nil)).Elem()\n}\n\ntype ArrayOfExtension struct {\n\tExtension []Extension `xml:\"Extension,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfExtension\"] = reflect.TypeOf((*ArrayOfExtension)(nil)).Elem()\n}\n\ntype ArrayOfExtensionClientInfo struct {\n\tExtensionClientInfo []ExtensionClientInfo `xml:\"ExtensionClientInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfExtensionClientInfo\"] = reflect.TypeOf((*ArrayOfExtensionClientInfo)(nil)).Elem()\n}\n\ntype ArrayOfExtensionEventTypeInfo struct {\n\tExtensionEventTypeInfo []ExtensionEventTypeInfo `xml:\"ExtensionEventTypeInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfExtensionEventTypeInfo\"] = reflect.TypeOf((*ArrayOfExtensionEventTypeInfo)(nil)).Elem()\n}\n\ntype ArrayOfExtensionFaultTypeInfo struct {\n\tExtensionFaultTypeInfo []ExtensionFaultTypeInfo `xml:\"ExtensionFaultTypeInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfExtensionFaultTypeInfo\"] = reflect.TypeOf((*ArrayOfExtensionFaultTypeInfo)(nil)).Elem()\n}\n\ntype ArrayOfExtensionManagerIpAllocationUsage struct {\n\tExtensionManagerIpAllocationUsage []ExtensionManagerIpAllocationUsage `xml:\"ExtensionManagerIpAllocationUsage,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfExtensionManagerIpAllocationUsage\"] = reflect.TypeOf((*ArrayOfExtensionManagerIpAllocationUsage)(nil)).Elem()\n}\n\ntype ArrayOfExtensionPrivilegeInfo struct {\n\tExtensionPrivilegeInfo []ExtensionPrivilegeInfo `xml:\"ExtensionPrivilegeInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfExtensionPrivilegeInfo\"] = reflect.TypeOf((*ArrayOfExtensionPrivilegeInfo)(nil)).Elem()\n}\n\ntype ArrayOfExtensionResourceInfo struct {\n\tExtensionResourceInfo []ExtensionResourceInfo `xml:\"ExtensionResourceInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfExtensionResourceInfo\"] = reflect.TypeOf((*ArrayOfExtensionResourceInfo)(nil)).Elem()\n}\n\ntype ArrayOfExtensionServerInfo struct {\n\tExtensionServerInfo []ExtensionServerInfo `xml:\"ExtensionServerInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfExtensionServerInfo\"] = reflect.TypeOf((*ArrayOfExtensionServerInfo)(nil)).Elem()\n}\n\ntype ArrayOfExtensionTaskTypeInfo struct {\n\tExtensionTaskTypeInfo []ExtensionTaskTypeInfo `xml:\"ExtensionTaskTypeInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfExtensionTaskTypeInfo\"] = reflect.TypeOf((*ArrayOfExtensionTaskTypeInfo)(nil)).Elem()\n}\n\ntype ArrayOfFaultToleranceDiskSpec struct {\n\tFaultToleranceDiskSpec []FaultToleranceDiskSpec `xml:\"FaultToleranceDiskSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfFaultToleranceDiskSpec\"] = reflect.TypeOf((*ArrayOfFaultToleranceDiskSpec)(nil)).Elem()\n}\n\ntype ArrayOfFcoeConfigVlanRange struct {\n\tFcoeConfigVlanRange []FcoeConfigVlanRange `xml:\"FcoeConfigVlanRange,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfFcoeConfigVlanRange\"] = reflect.TypeOf((*ArrayOfFcoeConfigVlanRange)(nil)).Elem()\n}\n\ntype ArrayOfFileInfo struct {\n\tFileInfo []BaseFileInfo `xml:\"FileInfo,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfFileInfo\"] = reflect.TypeOf((*ArrayOfFileInfo)(nil)).Elem()\n}\n\ntype ArrayOfFileQuery struct {\n\tFileQuery []BaseFileQuery `xml:\"FileQuery,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfFileQuery\"] = reflect.TypeOf((*ArrayOfFileQuery)(nil)).Elem()\n}\n\ntype ArrayOfFirewallProfileRulesetProfile struct {\n\tFirewallProfileRulesetProfile []FirewallProfileRulesetProfile `xml:\"FirewallProfileRulesetProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfFirewallProfileRulesetProfile\"] = reflect.TypeOf((*ArrayOfFirewallProfileRulesetProfile)(nil)).Elem()\n}\n\ntype ArrayOfGuestAliases struct {\n\tGuestAliases []GuestAliases `xml:\"GuestAliases,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfGuestAliases\"] = reflect.TypeOf((*ArrayOfGuestAliases)(nil)).Elem()\n}\n\ntype ArrayOfGuestAuthAliasInfo struct {\n\tGuestAuthAliasInfo []GuestAuthAliasInfo `xml:\"GuestAuthAliasInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfGuestAuthAliasInfo\"] = reflect.TypeOf((*ArrayOfGuestAuthAliasInfo)(nil)).Elem()\n}\n\ntype ArrayOfGuestAuthSubject struct {\n\tGuestAuthSubject []BaseGuestAuthSubject `xml:\"GuestAuthSubject,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfGuestAuthSubject\"] = reflect.TypeOf((*ArrayOfGuestAuthSubject)(nil)).Elem()\n}\n\ntype ArrayOfGuestDiskInfo struct {\n\tGuestDiskInfo []GuestDiskInfo `xml:\"GuestDiskInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfGuestDiskInfo\"] = reflect.TypeOf((*ArrayOfGuestDiskInfo)(nil)).Elem()\n}\n\ntype ArrayOfGuestFileInfo struct {\n\tGuestFileInfo []GuestFileInfo `xml:\"GuestFileInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfGuestFileInfo\"] = reflect.TypeOf((*ArrayOfGuestFileInfo)(nil)).Elem()\n}\n\ntype ArrayOfGuestInfoNamespaceGenerationInfo struct {\n\tGuestInfoNamespaceGenerationInfo []GuestInfoNamespaceGenerationInfo `xml:\"GuestInfoNamespaceGenerationInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfGuestInfoNamespaceGenerationInfo\"] = reflect.TypeOf((*ArrayOfGuestInfoNamespaceGenerationInfo)(nil)).Elem()\n}\n\ntype ArrayOfGuestMappedAliases struct {\n\tGuestMappedAliases []GuestMappedAliases `xml:\"GuestMappedAliases,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfGuestMappedAliases\"] = reflect.TypeOf((*ArrayOfGuestMappedAliases)(nil)).Elem()\n}\n\ntype ArrayOfGuestNicInfo struct {\n\tGuestNicInfo []GuestNicInfo `xml:\"GuestNicInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfGuestNicInfo\"] = reflect.TypeOf((*ArrayOfGuestNicInfo)(nil)).Elem()\n}\n\ntype ArrayOfGuestOsDescriptor struct {\n\tGuestOsDescriptor []GuestOsDescriptor `xml:\"GuestOsDescriptor,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfGuestOsDescriptor\"] = reflect.TypeOf((*ArrayOfGuestOsDescriptor)(nil)).Elem()\n}\n\ntype ArrayOfGuestProcessInfo struct {\n\tGuestProcessInfo []GuestProcessInfo `xml:\"GuestProcessInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfGuestProcessInfo\"] = reflect.TypeOf((*ArrayOfGuestProcessInfo)(nil)).Elem()\n}\n\ntype ArrayOfGuestRegKeyRecordSpec struct {\n\tGuestRegKeyRecordSpec []GuestRegKeyRecordSpec `xml:\"GuestRegKeyRecordSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfGuestRegKeyRecordSpec\"] = reflect.TypeOf((*ArrayOfGuestRegKeyRecordSpec)(nil)).Elem()\n}\n\ntype ArrayOfGuestRegValueSpec struct {\n\tGuestRegValueSpec []GuestRegValueSpec `xml:\"GuestRegValueSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfGuestRegValueSpec\"] = reflect.TypeOf((*ArrayOfGuestRegValueSpec)(nil)).Elem()\n}\n\ntype ArrayOfGuestStackInfo struct {\n\tGuestStackInfo []GuestStackInfo `xml:\"GuestStackInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfGuestStackInfo\"] = reflect.TypeOf((*ArrayOfGuestStackInfo)(nil)).Elem()\n}\n\ntype ArrayOfHbrManagerVmReplicationCapability struct {\n\tHbrManagerVmReplicationCapability []HbrManagerVmReplicationCapability `xml:\"HbrManagerVmReplicationCapability,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHbrManagerVmReplicationCapability\"] = reflect.TypeOf((*ArrayOfHbrManagerVmReplicationCapability)(nil)).Elem()\n}\n\ntype ArrayOfHealthUpdate struct {\n\tHealthUpdate []HealthUpdate `xml:\"HealthUpdate,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHealthUpdate\"] = reflect.TypeOf((*ArrayOfHealthUpdate)(nil)).Elem()\n}\n\ntype ArrayOfHealthUpdateInfo struct {\n\tHealthUpdateInfo []HealthUpdateInfo `xml:\"HealthUpdateInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHealthUpdateInfo\"] = reflect.TypeOf((*ArrayOfHealthUpdateInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostAccessControlEntry struct {\n\tHostAccessControlEntry []HostAccessControlEntry `xml:\"HostAccessControlEntry,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostAccessControlEntry\"] = reflect.TypeOf((*ArrayOfHostAccessControlEntry)(nil)).Elem()\n}\n\ntype ArrayOfHostAccountSpec struct {\n\tHostAccountSpec []BaseHostAccountSpec `xml:\"HostAccountSpec,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostAccountSpec\"] = reflect.TypeOf((*ArrayOfHostAccountSpec)(nil)).Elem()\n}\n\ntype ArrayOfHostActiveDirectory struct {\n\tHostActiveDirectory []HostActiveDirectory `xml:\"HostActiveDirectory,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostActiveDirectory\"] = reflect.TypeOf((*ArrayOfHostActiveDirectory)(nil)).Elem()\n}\n\ntype ArrayOfHostAuthenticationStoreInfo struct {\n\tHostAuthenticationStoreInfo []BaseHostAuthenticationStoreInfo `xml:\"HostAuthenticationStoreInfo,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostAuthenticationStoreInfo\"] = reflect.TypeOf((*ArrayOfHostAuthenticationStoreInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostBootDevice struct {\n\tHostBootDevice []HostBootDevice `xml:\"HostBootDevice,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostBootDevice\"] = reflect.TypeOf((*ArrayOfHostBootDevice)(nil)).Elem()\n}\n\ntype ArrayOfHostCacheConfigurationInfo struct {\n\tHostCacheConfigurationInfo []HostCacheConfigurationInfo `xml:\"HostCacheConfigurationInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostCacheConfigurationInfo\"] = reflect.TypeOf((*ArrayOfHostCacheConfigurationInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostConnectInfoNetworkInfo struct {\n\tHostConnectInfoNetworkInfo []BaseHostConnectInfoNetworkInfo `xml:\"HostConnectInfoNetworkInfo,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostConnectInfoNetworkInfo\"] = reflect.TypeOf((*ArrayOfHostConnectInfoNetworkInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostCpuIdInfo struct {\n\tHostCpuIdInfo []HostCpuIdInfo `xml:\"HostCpuIdInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostCpuIdInfo\"] = reflect.TypeOf((*ArrayOfHostCpuIdInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostCpuPackage struct {\n\tHostCpuPackage []HostCpuPackage `xml:\"HostCpuPackage,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostCpuPackage\"] = reflect.TypeOf((*ArrayOfHostCpuPackage)(nil)).Elem()\n}\n\ntype ArrayOfHostDatastoreBrowserSearchResults struct {\n\tHostDatastoreBrowserSearchResults []HostDatastoreBrowserSearchResults `xml:\"HostDatastoreBrowserSearchResults,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostDatastoreBrowserSearchResults\"] = reflect.TypeOf((*ArrayOfHostDatastoreBrowserSearchResults)(nil)).Elem()\n}\n\ntype ArrayOfHostDatastoreConnectInfo struct {\n\tHostDatastoreConnectInfo []BaseHostDatastoreConnectInfo `xml:\"HostDatastoreConnectInfo,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostDatastoreConnectInfo\"] = reflect.TypeOf((*ArrayOfHostDatastoreConnectInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostDatastoreSystemDatastoreResult struct {\n\tHostDatastoreSystemDatastoreResult []HostDatastoreSystemDatastoreResult `xml:\"HostDatastoreSystemDatastoreResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostDatastoreSystemDatastoreResult\"] = reflect.TypeOf((*ArrayOfHostDatastoreSystemDatastoreResult)(nil)).Elem()\n}\n\ntype ArrayOfHostDateTimeSystemTimeZone struct {\n\tHostDateTimeSystemTimeZone []HostDateTimeSystemTimeZone `xml:\"HostDateTimeSystemTimeZone,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostDateTimeSystemTimeZone\"] = reflect.TypeOf((*ArrayOfHostDateTimeSystemTimeZone)(nil)).Elem()\n}\n\ntype ArrayOfHostDhcpService struct {\n\tHostDhcpService []HostDhcpService `xml:\"HostDhcpService,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostDhcpService\"] = reflect.TypeOf((*ArrayOfHostDhcpService)(nil)).Elem()\n}\n\ntype ArrayOfHostDhcpServiceConfig struct {\n\tHostDhcpServiceConfig []HostDhcpServiceConfig `xml:\"HostDhcpServiceConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostDhcpServiceConfig\"] = reflect.TypeOf((*ArrayOfHostDhcpServiceConfig)(nil)).Elem()\n}\n\ntype ArrayOfHostDiagnosticPartition struct {\n\tHostDiagnosticPartition []HostDiagnosticPartition `xml:\"HostDiagnosticPartition,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostDiagnosticPartition\"] = reflect.TypeOf((*ArrayOfHostDiagnosticPartition)(nil)).Elem()\n}\n\ntype ArrayOfHostDiagnosticPartitionCreateOption struct {\n\tHostDiagnosticPartitionCreateOption []HostDiagnosticPartitionCreateOption `xml:\"HostDiagnosticPartitionCreateOption,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostDiagnosticPartitionCreateOption\"] = reflect.TypeOf((*ArrayOfHostDiagnosticPartitionCreateOption)(nil)).Elem()\n}\n\ntype ArrayOfHostDiskConfigurationResult struct {\n\tHostDiskConfigurationResult []HostDiskConfigurationResult `xml:\"HostDiskConfigurationResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostDiskConfigurationResult\"] = reflect.TypeOf((*ArrayOfHostDiskConfigurationResult)(nil)).Elem()\n}\n\ntype ArrayOfHostDiskMappingPartitionOption struct {\n\tHostDiskMappingPartitionOption []HostDiskMappingPartitionOption `xml:\"HostDiskMappingPartitionOption,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostDiskMappingPartitionOption\"] = reflect.TypeOf((*ArrayOfHostDiskMappingPartitionOption)(nil)).Elem()\n}\n\ntype ArrayOfHostDiskPartitionAttributes struct {\n\tHostDiskPartitionAttributes []HostDiskPartitionAttributes `xml:\"HostDiskPartitionAttributes,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostDiskPartitionAttributes\"] = reflect.TypeOf((*ArrayOfHostDiskPartitionAttributes)(nil)).Elem()\n}\n\ntype ArrayOfHostDiskPartitionBlockRange struct {\n\tHostDiskPartitionBlockRange []HostDiskPartitionBlockRange `xml:\"HostDiskPartitionBlockRange,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostDiskPartitionBlockRange\"] = reflect.TypeOf((*ArrayOfHostDiskPartitionBlockRange)(nil)).Elem()\n}\n\ntype ArrayOfHostDiskPartitionInfo struct {\n\tHostDiskPartitionInfo []HostDiskPartitionInfo `xml:\"HostDiskPartitionInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostDiskPartitionInfo\"] = reflect.TypeOf((*ArrayOfHostDiskPartitionInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostEventArgument struct {\n\tHostEventArgument []HostEventArgument `xml:\"HostEventArgument,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostEventArgument\"] = reflect.TypeOf((*ArrayOfHostEventArgument)(nil)).Elem()\n}\n\ntype ArrayOfHostFeatureCapability struct {\n\tHostFeatureCapability []HostFeatureCapability `xml:\"HostFeatureCapability,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostFeatureCapability\"] = reflect.TypeOf((*ArrayOfHostFeatureCapability)(nil)).Elem()\n}\n\ntype ArrayOfHostFeatureMask struct {\n\tHostFeatureMask []HostFeatureMask `xml:\"HostFeatureMask,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostFeatureMask\"] = reflect.TypeOf((*ArrayOfHostFeatureMask)(nil)).Elem()\n}\n\ntype ArrayOfHostFeatureVersionInfo struct {\n\tHostFeatureVersionInfo []HostFeatureVersionInfo `xml:\"HostFeatureVersionInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostFeatureVersionInfo\"] = reflect.TypeOf((*ArrayOfHostFeatureVersionInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostFileSystemMountInfo struct {\n\tHostFileSystemMountInfo []HostFileSystemMountInfo `xml:\"HostFileSystemMountInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostFileSystemMountInfo\"] = reflect.TypeOf((*ArrayOfHostFileSystemMountInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostFirewallConfigRuleSetConfig struct {\n\tHostFirewallConfigRuleSetConfig []HostFirewallConfigRuleSetConfig `xml:\"HostFirewallConfigRuleSetConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostFirewallConfigRuleSetConfig\"] = reflect.TypeOf((*ArrayOfHostFirewallConfigRuleSetConfig)(nil)).Elem()\n}\n\ntype ArrayOfHostFirewallRule struct {\n\tHostFirewallRule []HostFirewallRule `xml:\"HostFirewallRule,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostFirewallRule\"] = reflect.TypeOf((*ArrayOfHostFirewallRule)(nil)).Elem()\n}\n\ntype ArrayOfHostFirewallRuleset struct {\n\tHostFirewallRuleset []HostFirewallRuleset `xml:\"HostFirewallRuleset,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostFirewallRuleset\"] = reflect.TypeOf((*ArrayOfHostFirewallRuleset)(nil)).Elem()\n}\n\ntype ArrayOfHostFirewallRulesetIpNetwork struct {\n\tHostFirewallRulesetIpNetwork []HostFirewallRulesetIpNetwork `xml:\"HostFirewallRulesetIpNetwork,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostFirewallRulesetIpNetwork\"] = reflect.TypeOf((*ArrayOfHostFirewallRulesetIpNetwork)(nil)).Elem()\n}\n\ntype ArrayOfHostGraphicsConfigDeviceType struct {\n\tHostGraphicsConfigDeviceType []HostGraphicsConfigDeviceType `xml:\"HostGraphicsConfigDeviceType,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostGraphicsConfigDeviceType\"] = reflect.TypeOf((*ArrayOfHostGraphicsConfigDeviceType)(nil)).Elem()\n}\n\ntype ArrayOfHostGraphicsInfo struct {\n\tHostGraphicsInfo []HostGraphicsInfo `xml:\"HostGraphicsInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostGraphicsInfo\"] = reflect.TypeOf((*ArrayOfHostGraphicsInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostHardwareElementInfo struct {\n\tHostHardwareElementInfo []BaseHostHardwareElementInfo `xml:\"HostHardwareElementInfo,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostHardwareElementInfo\"] = reflect.TypeOf((*ArrayOfHostHardwareElementInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostHostBusAdapter struct {\n\tHostHostBusAdapter []BaseHostHostBusAdapter `xml:\"HostHostBusAdapter,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostHostBusAdapter\"] = reflect.TypeOf((*ArrayOfHostHostBusAdapter)(nil)).Elem()\n}\n\ntype ArrayOfHostInternetScsiHbaIscsiIpv6Address struct {\n\tHostInternetScsiHbaIscsiIpv6Address []HostInternetScsiHbaIscsiIpv6Address `xml:\"HostInternetScsiHbaIscsiIpv6Address,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostInternetScsiHbaIscsiIpv6Address\"] = reflect.TypeOf((*ArrayOfHostInternetScsiHbaIscsiIpv6Address)(nil)).Elem()\n}\n\ntype ArrayOfHostInternetScsiHbaParamValue struct {\n\tHostInternetScsiHbaParamValue []HostInternetScsiHbaParamValue `xml:\"HostInternetScsiHbaParamValue,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostInternetScsiHbaParamValue\"] = reflect.TypeOf((*ArrayOfHostInternetScsiHbaParamValue)(nil)).Elem()\n}\n\ntype ArrayOfHostInternetScsiHbaSendTarget struct {\n\tHostInternetScsiHbaSendTarget []HostInternetScsiHbaSendTarget `xml:\"HostInternetScsiHbaSendTarget,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostInternetScsiHbaSendTarget\"] = reflect.TypeOf((*ArrayOfHostInternetScsiHbaSendTarget)(nil)).Elem()\n}\n\ntype ArrayOfHostInternetScsiHbaStaticTarget struct {\n\tHostInternetScsiHbaStaticTarget []HostInternetScsiHbaStaticTarget `xml:\"HostInternetScsiHbaStaticTarget,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostInternetScsiHbaStaticTarget\"] = reflect.TypeOf((*ArrayOfHostInternetScsiHbaStaticTarget)(nil)).Elem()\n}\n\ntype ArrayOfHostIoFilterInfo struct {\n\tHostIoFilterInfo []HostIoFilterInfo `xml:\"HostIoFilterInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostIoFilterInfo\"] = reflect.TypeOf((*ArrayOfHostIoFilterInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostIpConfigIpV6Address struct {\n\tHostIpConfigIpV6Address []HostIpConfigIpV6Address `xml:\"HostIpConfigIpV6Address,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostIpConfigIpV6Address\"] = reflect.TypeOf((*ArrayOfHostIpConfigIpV6Address)(nil)).Elem()\n}\n\ntype ArrayOfHostIpRouteEntry struct {\n\tHostIpRouteEntry []HostIpRouteEntry `xml:\"HostIpRouteEntry,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostIpRouteEntry\"] = reflect.TypeOf((*ArrayOfHostIpRouteEntry)(nil)).Elem()\n}\n\ntype ArrayOfHostIpRouteOp struct {\n\tHostIpRouteOp []HostIpRouteOp `xml:\"HostIpRouteOp,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostIpRouteOp\"] = reflect.TypeOf((*ArrayOfHostIpRouteOp)(nil)).Elem()\n}\n\ntype ArrayOfHostLowLevelProvisioningManagerDiskLayoutSpec struct {\n\tHostLowLevelProvisioningManagerDiskLayoutSpec []HostLowLevelProvisioningManagerDiskLayoutSpec `xml:\"HostLowLevelProvisioningManagerDiskLayoutSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostLowLevelProvisioningManagerDiskLayoutSpec\"] = reflect.TypeOf((*ArrayOfHostLowLevelProvisioningManagerDiskLayoutSpec)(nil)).Elem()\n}\n\ntype ArrayOfHostLowLevelProvisioningManagerFileDeleteResult struct {\n\tHostLowLevelProvisioningManagerFileDeleteResult []HostLowLevelProvisioningManagerFileDeleteResult `xml:\"HostLowLevelProvisioningManagerFileDeleteResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostLowLevelProvisioningManagerFileDeleteResult\"] = reflect.TypeOf((*ArrayOfHostLowLevelProvisioningManagerFileDeleteResult)(nil)).Elem()\n}\n\ntype ArrayOfHostLowLevelProvisioningManagerFileDeleteSpec struct {\n\tHostLowLevelProvisioningManagerFileDeleteSpec []HostLowLevelProvisioningManagerFileDeleteSpec `xml:\"HostLowLevelProvisioningManagerFileDeleteSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostLowLevelProvisioningManagerFileDeleteSpec\"] = reflect.TypeOf((*ArrayOfHostLowLevelProvisioningManagerFileDeleteSpec)(nil)).Elem()\n}\n\ntype ArrayOfHostLowLevelProvisioningManagerFileReserveResult struct {\n\tHostLowLevelProvisioningManagerFileReserveResult []HostLowLevelProvisioningManagerFileReserveResult `xml:\"HostLowLevelProvisioningManagerFileReserveResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostLowLevelProvisioningManagerFileReserveResult\"] = reflect.TypeOf((*ArrayOfHostLowLevelProvisioningManagerFileReserveResult)(nil)).Elem()\n}\n\ntype ArrayOfHostLowLevelProvisioningManagerFileReserveSpec struct {\n\tHostLowLevelProvisioningManagerFileReserveSpec []HostLowLevelProvisioningManagerFileReserveSpec `xml:\"HostLowLevelProvisioningManagerFileReserveSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostLowLevelProvisioningManagerFileReserveSpec\"] = reflect.TypeOf((*ArrayOfHostLowLevelProvisioningManagerFileReserveSpec)(nil)).Elem()\n}\n\ntype ArrayOfHostLowLevelProvisioningManagerSnapshotLayoutSpec struct {\n\tHostLowLevelProvisioningManagerSnapshotLayoutSpec []HostLowLevelProvisioningManagerSnapshotLayoutSpec `xml:\"HostLowLevelProvisioningManagerSnapshotLayoutSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostLowLevelProvisioningManagerSnapshotLayoutSpec\"] = reflect.TypeOf((*ArrayOfHostLowLevelProvisioningManagerSnapshotLayoutSpec)(nil)).Elem()\n}\n\ntype ArrayOfHostMemberHealthCheckResult struct {\n\tHostMemberHealthCheckResult []BaseHostMemberHealthCheckResult `xml:\"HostMemberHealthCheckResult,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostMemberHealthCheckResult\"] = reflect.TypeOf((*ArrayOfHostMemberHealthCheckResult)(nil)).Elem()\n}\n\ntype ArrayOfHostMemberRuntimeInfo struct {\n\tHostMemberRuntimeInfo []HostMemberRuntimeInfo `xml:\"HostMemberRuntimeInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostMemberRuntimeInfo\"] = reflect.TypeOf((*ArrayOfHostMemberRuntimeInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostMultipathInfoLogicalUnit struct {\n\tHostMultipathInfoLogicalUnit []HostMultipathInfoLogicalUnit `xml:\"HostMultipathInfoLogicalUnit,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostMultipathInfoLogicalUnit\"] = reflect.TypeOf((*ArrayOfHostMultipathInfoLogicalUnit)(nil)).Elem()\n}\n\ntype ArrayOfHostMultipathInfoPath struct {\n\tHostMultipathInfoPath []HostMultipathInfoPath `xml:\"HostMultipathInfoPath,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostMultipathInfoPath\"] = reflect.TypeOf((*ArrayOfHostMultipathInfoPath)(nil)).Elem()\n}\n\ntype ArrayOfHostMultipathStateInfoPath struct {\n\tHostMultipathStateInfoPath []HostMultipathStateInfoPath `xml:\"HostMultipathStateInfoPath,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostMultipathStateInfoPath\"] = reflect.TypeOf((*ArrayOfHostMultipathStateInfoPath)(nil)).Elem()\n}\n\ntype ArrayOfHostNasVolumeConfig struct {\n\tHostNasVolumeConfig []HostNasVolumeConfig `xml:\"HostNasVolumeConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostNasVolumeConfig\"] = reflect.TypeOf((*ArrayOfHostNasVolumeConfig)(nil)).Elem()\n}\n\ntype ArrayOfHostNatService struct {\n\tHostNatService []HostNatService `xml:\"HostNatService,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostNatService\"] = reflect.TypeOf((*ArrayOfHostNatService)(nil)).Elem()\n}\n\ntype ArrayOfHostNatServiceConfig struct {\n\tHostNatServiceConfig []HostNatServiceConfig `xml:\"HostNatServiceConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostNatServiceConfig\"] = reflect.TypeOf((*ArrayOfHostNatServiceConfig)(nil)).Elem()\n}\n\ntype ArrayOfHostNatServicePortForwardSpec struct {\n\tHostNatServicePortForwardSpec []HostNatServicePortForwardSpec `xml:\"HostNatServicePortForwardSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostNatServicePortForwardSpec\"] = reflect.TypeOf((*ArrayOfHostNatServicePortForwardSpec)(nil)).Elem()\n}\n\ntype ArrayOfHostNetStackInstance struct {\n\tHostNetStackInstance []HostNetStackInstance `xml:\"HostNetStackInstance,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostNetStackInstance\"] = reflect.TypeOf((*ArrayOfHostNetStackInstance)(nil)).Elem()\n}\n\ntype ArrayOfHostNetworkConfigNetStackSpec struct {\n\tHostNetworkConfigNetStackSpec []HostNetworkConfigNetStackSpec `xml:\"HostNetworkConfigNetStackSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostNetworkConfigNetStackSpec\"] = reflect.TypeOf((*ArrayOfHostNetworkConfigNetStackSpec)(nil)).Elem()\n}\n\ntype ArrayOfHostNumaNode struct {\n\tHostNumaNode []HostNumaNode `xml:\"HostNumaNode,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostNumaNode\"] = reflect.TypeOf((*ArrayOfHostNumaNode)(nil)).Elem()\n}\n\ntype ArrayOfHostNumericSensorInfo struct {\n\tHostNumericSensorInfo []HostNumericSensorInfo `xml:\"HostNumericSensorInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostNumericSensorInfo\"] = reflect.TypeOf((*ArrayOfHostNumericSensorInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostOpaqueNetworkInfo struct {\n\tHostOpaqueNetworkInfo []HostOpaqueNetworkInfo `xml:\"HostOpaqueNetworkInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostOpaqueNetworkInfo\"] = reflect.TypeOf((*ArrayOfHostOpaqueNetworkInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostOpaqueSwitch struct {\n\tHostOpaqueSwitch []HostOpaqueSwitch `xml:\"HostOpaqueSwitch,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostOpaqueSwitch\"] = reflect.TypeOf((*ArrayOfHostOpaqueSwitch)(nil)).Elem()\n}\n\ntype ArrayOfHostOpaqueSwitchPhysicalNicZone struct {\n\tHostOpaqueSwitchPhysicalNicZone []HostOpaqueSwitchPhysicalNicZone `xml:\"HostOpaqueSwitchPhysicalNicZone,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostOpaqueSwitchPhysicalNicZone\"] = reflect.TypeOf((*ArrayOfHostOpaqueSwitchPhysicalNicZone)(nil)).Elem()\n}\n\ntype ArrayOfHostPatchManagerStatus struct {\n\tHostPatchManagerStatus []HostPatchManagerStatus `xml:\"HostPatchManagerStatus,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostPatchManagerStatus\"] = reflect.TypeOf((*ArrayOfHostPatchManagerStatus)(nil)).Elem()\n}\n\ntype ArrayOfHostPatchManagerStatusPrerequisitePatch struct {\n\tHostPatchManagerStatusPrerequisitePatch []HostPatchManagerStatusPrerequisitePatch `xml:\"HostPatchManagerStatusPrerequisitePatch,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostPatchManagerStatusPrerequisitePatch\"] = reflect.TypeOf((*ArrayOfHostPatchManagerStatusPrerequisitePatch)(nil)).Elem()\n}\n\ntype ArrayOfHostPathSelectionPolicyOption struct {\n\tHostPathSelectionPolicyOption []HostPathSelectionPolicyOption `xml:\"HostPathSelectionPolicyOption,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostPathSelectionPolicyOption\"] = reflect.TypeOf((*ArrayOfHostPathSelectionPolicyOption)(nil)).Elem()\n}\n\ntype ArrayOfHostPciDevice struct {\n\tHostPciDevice []HostPciDevice `xml:\"HostPciDevice,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostPciDevice\"] = reflect.TypeOf((*ArrayOfHostPciDevice)(nil)).Elem()\n}\n\ntype ArrayOfHostPciPassthruConfig struct {\n\tHostPciPassthruConfig []BaseHostPciPassthruConfig `xml:\"HostPciPassthruConfig,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostPciPassthruConfig\"] = reflect.TypeOf((*ArrayOfHostPciPassthruConfig)(nil)).Elem()\n}\n\ntype ArrayOfHostPciPassthruInfo struct {\n\tHostPciPassthruInfo []BaseHostPciPassthruInfo `xml:\"HostPciPassthruInfo,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostPciPassthruInfo\"] = reflect.TypeOf((*ArrayOfHostPciPassthruInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostPlacedVirtualNicIdentifier struct {\n\tHostPlacedVirtualNicIdentifier []HostPlacedVirtualNicIdentifier `xml:\"HostPlacedVirtualNicIdentifier,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostPlacedVirtualNicIdentifier\"] = reflect.TypeOf((*ArrayOfHostPlacedVirtualNicIdentifier)(nil)).Elem()\n}\n\ntype ArrayOfHostPlugStoreTopologyAdapter struct {\n\tHostPlugStoreTopologyAdapter []HostPlugStoreTopologyAdapter `xml:\"HostPlugStoreTopologyAdapter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostPlugStoreTopologyAdapter\"] = reflect.TypeOf((*ArrayOfHostPlugStoreTopologyAdapter)(nil)).Elem()\n}\n\ntype ArrayOfHostPlugStoreTopologyDevice struct {\n\tHostPlugStoreTopologyDevice []HostPlugStoreTopologyDevice `xml:\"HostPlugStoreTopologyDevice,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostPlugStoreTopologyDevice\"] = reflect.TypeOf((*ArrayOfHostPlugStoreTopologyDevice)(nil)).Elem()\n}\n\ntype ArrayOfHostPlugStoreTopologyPath struct {\n\tHostPlugStoreTopologyPath []HostPlugStoreTopologyPath `xml:\"HostPlugStoreTopologyPath,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostPlugStoreTopologyPath\"] = reflect.TypeOf((*ArrayOfHostPlugStoreTopologyPath)(nil)).Elem()\n}\n\ntype ArrayOfHostPlugStoreTopologyPlugin struct {\n\tHostPlugStoreTopologyPlugin []HostPlugStoreTopologyPlugin `xml:\"HostPlugStoreTopologyPlugin,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostPlugStoreTopologyPlugin\"] = reflect.TypeOf((*ArrayOfHostPlugStoreTopologyPlugin)(nil)).Elem()\n}\n\ntype ArrayOfHostPlugStoreTopologyTarget struct {\n\tHostPlugStoreTopologyTarget []HostPlugStoreTopologyTarget `xml:\"HostPlugStoreTopologyTarget,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostPlugStoreTopologyTarget\"] = reflect.TypeOf((*ArrayOfHostPlugStoreTopologyTarget)(nil)).Elem()\n}\n\ntype ArrayOfHostPnicNetworkResourceInfo struct {\n\tHostPnicNetworkResourceInfo []HostPnicNetworkResourceInfo `xml:\"HostPnicNetworkResourceInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostPnicNetworkResourceInfo\"] = reflect.TypeOf((*ArrayOfHostPnicNetworkResourceInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostPortGroup struct {\n\tHostPortGroup []HostPortGroup `xml:\"HostPortGroup,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostPortGroup\"] = reflect.TypeOf((*ArrayOfHostPortGroup)(nil)).Elem()\n}\n\ntype ArrayOfHostPortGroupConfig struct {\n\tHostPortGroupConfig []HostPortGroupConfig `xml:\"HostPortGroupConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostPortGroupConfig\"] = reflect.TypeOf((*ArrayOfHostPortGroupConfig)(nil)).Elem()\n}\n\ntype ArrayOfHostPortGroupPort struct {\n\tHostPortGroupPort []HostPortGroupPort `xml:\"HostPortGroupPort,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostPortGroupPort\"] = reflect.TypeOf((*ArrayOfHostPortGroupPort)(nil)).Elem()\n}\n\ntype ArrayOfHostPortGroupProfile struct {\n\tHostPortGroupProfile []HostPortGroupProfile `xml:\"HostPortGroupProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostPortGroupProfile\"] = reflect.TypeOf((*ArrayOfHostPortGroupProfile)(nil)).Elem()\n}\n\ntype ArrayOfHostPowerPolicy struct {\n\tHostPowerPolicy []HostPowerPolicy `xml:\"HostPowerPolicy,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostPowerPolicy\"] = reflect.TypeOf((*ArrayOfHostPowerPolicy)(nil)).Elem()\n}\n\ntype ArrayOfHostProfileManagerCompositionValidationResultResultElement struct {\n\tHostProfileManagerCompositionValidationResultResultElement []HostProfileManagerCompositionValidationResultResultElement `xml:\"HostProfileManagerCompositionValidationResultResultElement,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostProfileManagerCompositionValidationResultResultElement\"] = reflect.TypeOf((*ArrayOfHostProfileManagerCompositionValidationResultResultElement)(nil)).Elem()\n}\n\ntype ArrayOfHostProfileManagerHostToConfigSpecMap struct {\n\tHostProfileManagerHostToConfigSpecMap []HostProfileManagerHostToConfigSpecMap `xml:\"HostProfileManagerHostToConfigSpecMap,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostProfileManagerHostToConfigSpecMap\"] = reflect.TypeOf((*ArrayOfHostProfileManagerHostToConfigSpecMap)(nil)).Elem()\n}\n\ntype ArrayOfHostProfilesEntityCustomizations struct {\n\tHostProfilesEntityCustomizations []BaseHostProfilesEntityCustomizations `xml:\"HostProfilesEntityCustomizations,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostProfilesEntityCustomizations\"] = reflect.TypeOf((*ArrayOfHostProfilesEntityCustomizations)(nil)).Elem()\n}\n\ntype ArrayOfHostProtocolEndpoint struct {\n\tHostProtocolEndpoint []HostProtocolEndpoint `xml:\"HostProtocolEndpoint,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostProtocolEndpoint\"] = reflect.TypeOf((*ArrayOfHostProtocolEndpoint)(nil)).Elem()\n}\n\ntype ArrayOfHostProxySwitch struct {\n\tHostProxySwitch []HostProxySwitch `xml:\"HostProxySwitch,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostProxySwitch\"] = reflect.TypeOf((*ArrayOfHostProxySwitch)(nil)).Elem()\n}\n\ntype ArrayOfHostProxySwitchConfig struct {\n\tHostProxySwitchConfig []HostProxySwitchConfig `xml:\"HostProxySwitchConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostProxySwitchConfig\"] = reflect.TypeOf((*ArrayOfHostProxySwitchConfig)(nil)).Elem()\n}\n\ntype ArrayOfHostProxySwitchHostLagConfig struct {\n\tHostProxySwitchHostLagConfig []HostProxySwitchHostLagConfig `xml:\"HostProxySwitchHostLagConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostProxySwitchHostLagConfig\"] = reflect.TypeOf((*ArrayOfHostProxySwitchHostLagConfig)(nil)).Elem()\n}\n\ntype ArrayOfHostRuntimeInfoNetStackInstanceRuntimeInfo struct {\n\tHostRuntimeInfoNetStackInstanceRuntimeInfo []HostRuntimeInfoNetStackInstanceRuntimeInfo `xml:\"HostRuntimeInfoNetStackInstanceRuntimeInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostRuntimeInfoNetStackInstanceRuntimeInfo\"] = reflect.TypeOf((*ArrayOfHostRuntimeInfoNetStackInstanceRuntimeInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostScsiDisk struct {\n\tHostScsiDisk []HostScsiDisk `xml:\"HostScsiDisk,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostScsiDisk\"] = reflect.TypeOf((*ArrayOfHostScsiDisk)(nil)).Elem()\n}\n\ntype ArrayOfHostScsiDiskPartition struct {\n\tHostScsiDiskPartition []HostScsiDiskPartition `xml:\"HostScsiDiskPartition,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostScsiDiskPartition\"] = reflect.TypeOf((*ArrayOfHostScsiDiskPartition)(nil)).Elem()\n}\n\ntype ArrayOfHostScsiTopologyInterface struct {\n\tHostScsiTopologyInterface []HostScsiTopologyInterface `xml:\"HostScsiTopologyInterface,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostScsiTopologyInterface\"] = reflect.TypeOf((*ArrayOfHostScsiTopologyInterface)(nil)).Elem()\n}\n\ntype ArrayOfHostScsiTopologyLun struct {\n\tHostScsiTopologyLun []HostScsiTopologyLun `xml:\"HostScsiTopologyLun,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostScsiTopologyLun\"] = reflect.TypeOf((*ArrayOfHostScsiTopologyLun)(nil)).Elem()\n}\n\ntype ArrayOfHostScsiTopologyTarget struct {\n\tHostScsiTopologyTarget []HostScsiTopologyTarget `xml:\"HostScsiTopologyTarget,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostScsiTopologyTarget\"] = reflect.TypeOf((*ArrayOfHostScsiTopologyTarget)(nil)).Elem()\n}\n\ntype ArrayOfHostService struct {\n\tHostService []HostService `xml:\"HostService,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostService\"] = reflect.TypeOf((*ArrayOfHostService)(nil)).Elem()\n}\n\ntype ArrayOfHostServiceConfig struct {\n\tHostServiceConfig []HostServiceConfig `xml:\"HostServiceConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostServiceConfig\"] = reflect.TypeOf((*ArrayOfHostServiceConfig)(nil)).Elem()\n}\n\ntype ArrayOfHostSnmpDestination struct {\n\tHostSnmpDestination []HostSnmpDestination `xml:\"HostSnmpDestination,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostSnmpDestination\"] = reflect.TypeOf((*ArrayOfHostSnmpDestination)(nil)).Elem()\n}\n\ntype ArrayOfHostSriovDevicePoolInfo struct {\n\tHostSriovDevicePoolInfo []BaseHostSriovDevicePoolInfo `xml:\"HostSriovDevicePoolInfo,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostSriovDevicePoolInfo\"] = reflect.TypeOf((*ArrayOfHostSriovDevicePoolInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostSslThumbprintInfo struct {\n\tHostSslThumbprintInfo []HostSslThumbprintInfo `xml:\"HostSslThumbprintInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostSslThumbprintInfo\"] = reflect.TypeOf((*ArrayOfHostSslThumbprintInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostStorageArrayTypePolicyOption struct {\n\tHostStorageArrayTypePolicyOption []HostStorageArrayTypePolicyOption `xml:\"HostStorageArrayTypePolicyOption,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostStorageArrayTypePolicyOption\"] = reflect.TypeOf((*ArrayOfHostStorageArrayTypePolicyOption)(nil)).Elem()\n}\n\ntype ArrayOfHostStorageElementInfo struct {\n\tHostStorageElementInfo []HostStorageElementInfo `xml:\"HostStorageElementInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostStorageElementInfo\"] = reflect.TypeOf((*ArrayOfHostStorageElementInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostStorageOperationalInfo struct {\n\tHostStorageOperationalInfo []HostStorageOperationalInfo `xml:\"HostStorageOperationalInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostStorageOperationalInfo\"] = reflect.TypeOf((*ArrayOfHostStorageOperationalInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostStorageSystemDiskLocatorLedResult struct {\n\tHostStorageSystemDiskLocatorLedResult []HostStorageSystemDiskLocatorLedResult `xml:\"HostStorageSystemDiskLocatorLedResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostStorageSystemDiskLocatorLedResult\"] = reflect.TypeOf((*ArrayOfHostStorageSystemDiskLocatorLedResult)(nil)).Elem()\n}\n\ntype ArrayOfHostStorageSystemScsiLunResult struct {\n\tHostStorageSystemScsiLunResult []HostStorageSystemScsiLunResult `xml:\"HostStorageSystemScsiLunResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostStorageSystemScsiLunResult\"] = reflect.TypeOf((*ArrayOfHostStorageSystemScsiLunResult)(nil)).Elem()\n}\n\ntype ArrayOfHostStorageSystemVmfsVolumeResult struct {\n\tHostStorageSystemVmfsVolumeResult []HostStorageSystemVmfsVolumeResult `xml:\"HostStorageSystemVmfsVolumeResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostStorageSystemVmfsVolumeResult\"] = reflect.TypeOf((*ArrayOfHostStorageSystemVmfsVolumeResult)(nil)).Elem()\n}\n\ntype ArrayOfHostSubSpecification struct {\n\tHostSubSpecification []HostSubSpecification `xml:\"HostSubSpecification,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostSubSpecification\"] = reflect.TypeOf((*ArrayOfHostSubSpecification)(nil)).Elem()\n}\n\ntype ArrayOfHostSystemIdentificationInfo struct {\n\tHostSystemIdentificationInfo []HostSystemIdentificationInfo `xml:\"HostSystemIdentificationInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostSystemIdentificationInfo\"] = reflect.TypeOf((*ArrayOfHostSystemIdentificationInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostSystemResourceInfo struct {\n\tHostSystemResourceInfo []HostSystemResourceInfo `xml:\"HostSystemResourceInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostSystemResourceInfo\"] = reflect.TypeOf((*ArrayOfHostSystemResourceInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostSystemSwapConfigurationSystemSwapOption struct {\n\tHostSystemSwapConfigurationSystemSwapOption []BaseHostSystemSwapConfigurationSystemSwapOption `xml:\"HostSystemSwapConfigurationSystemSwapOption,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostSystemSwapConfigurationSystemSwapOption\"] = reflect.TypeOf((*ArrayOfHostSystemSwapConfigurationSystemSwapOption)(nil)).Elem()\n}\n\ntype ArrayOfHostTpmDigestInfo struct {\n\tHostTpmDigestInfo []HostTpmDigestInfo `xml:\"HostTpmDigestInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostTpmDigestInfo\"] = reflect.TypeOf((*ArrayOfHostTpmDigestInfo)(nil)).Elem()\n}\n\ntype ArrayOfHostTpmEventLogEntry struct {\n\tHostTpmEventLogEntry []HostTpmEventLogEntry `xml:\"HostTpmEventLogEntry,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostTpmEventLogEntry\"] = reflect.TypeOf((*ArrayOfHostTpmEventLogEntry)(nil)).Elem()\n}\n\ntype ArrayOfHostUnresolvedVmfsExtent struct {\n\tHostUnresolvedVmfsExtent []HostUnresolvedVmfsExtent `xml:\"HostUnresolvedVmfsExtent,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostUnresolvedVmfsExtent\"] = reflect.TypeOf((*ArrayOfHostUnresolvedVmfsExtent)(nil)).Elem()\n}\n\ntype ArrayOfHostUnresolvedVmfsResolutionResult struct {\n\tHostUnresolvedVmfsResolutionResult []HostUnresolvedVmfsResolutionResult `xml:\"HostUnresolvedVmfsResolutionResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostUnresolvedVmfsResolutionResult\"] = reflect.TypeOf((*ArrayOfHostUnresolvedVmfsResolutionResult)(nil)).Elem()\n}\n\ntype ArrayOfHostUnresolvedVmfsResolutionSpec struct {\n\tHostUnresolvedVmfsResolutionSpec []HostUnresolvedVmfsResolutionSpec `xml:\"HostUnresolvedVmfsResolutionSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostUnresolvedVmfsResolutionSpec\"] = reflect.TypeOf((*ArrayOfHostUnresolvedVmfsResolutionSpec)(nil)).Elem()\n}\n\ntype ArrayOfHostUnresolvedVmfsVolume struct {\n\tHostUnresolvedVmfsVolume []HostUnresolvedVmfsVolume `xml:\"HostUnresolvedVmfsVolume,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostUnresolvedVmfsVolume\"] = reflect.TypeOf((*ArrayOfHostUnresolvedVmfsVolume)(nil)).Elem()\n}\n\ntype ArrayOfHostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption struct {\n\tHostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption []HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption `xml:\"HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption\"] = reflect.TypeOf((*ArrayOfHostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption)(nil)).Elem()\n}\n\ntype ArrayOfHostVMotionCompatibility struct {\n\tHostVMotionCompatibility []HostVMotionCompatibility `xml:\"HostVMotionCompatibility,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostVMotionCompatibility\"] = reflect.TypeOf((*ArrayOfHostVMotionCompatibility)(nil)).Elem()\n}\n\ntype ArrayOfHostVirtualNic struct {\n\tHostVirtualNic []HostVirtualNic `xml:\"HostVirtualNic,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostVirtualNic\"] = reflect.TypeOf((*ArrayOfHostVirtualNic)(nil)).Elem()\n}\n\ntype ArrayOfHostVirtualNicConfig struct {\n\tHostVirtualNicConfig []HostVirtualNicConfig `xml:\"HostVirtualNicConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostVirtualNicConfig\"] = reflect.TypeOf((*ArrayOfHostVirtualNicConfig)(nil)).Elem()\n}\n\ntype ArrayOfHostVirtualNicManagerNicTypeSelection struct {\n\tHostVirtualNicManagerNicTypeSelection []HostVirtualNicManagerNicTypeSelection `xml:\"HostVirtualNicManagerNicTypeSelection,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostVirtualNicManagerNicTypeSelection\"] = reflect.TypeOf((*ArrayOfHostVirtualNicManagerNicTypeSelection)(nil)).Elem()\n}\n\ntype ArrayOfHostVirtualSwitch struct {\n\tHostVirtualSwitch []HostVirtualSwitch `xml:\"HostVirtualSwitch,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostVirtualSwitch\"] = reflect.TypeOf((*ArrayOfHostVirtualSwitch)(nil)).Elem()\n}\n\ntype ArrayOfHostVirtualSwitchConfig struct {\n\tHostVirtualSwitchConfig []HostVirtualSwitchConfig `xml:\"HostVirtualSwitchConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostVirtualSwitchConfig\"] = reflect.TypeOf((*ArrayOfHostVirtualSwitchConfig)(nil)).Elem()\n}\n\ntype ArrayOfHostVmciAccessManagerAccessSpec struct {\n\tHostVmciAccessManagerAccessSpec []HostVmciAccessManagerAccessSpec `xml:\"HostVmciAccessManagerAccessSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostVmciAccessManagerAccessSpec\"] = reflect.TypeOf((*ArrayOfHostVmciAccessManagerAccessSpec)(nil)).Elem()\n}\n\ntype ArrayOfHostVmfsRescanResult struct {\n\tHostVmfsRescanResult []HostVmfsRescanResult `xml:\"HostVmfsRescanResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostVmfsRescanResult\"] = reflect.TypeOf((*ArrayOfHostVmfsRescanResult)(nil)).Elem()\n}\n\ntype ArrayOfHostVsanInternalSystemCmmdsQuery struct {\n\tHostVsanInternalSystemCmmdsQuery []HostVsanInternalSystemCmmdsQuery `xml:\"HostVsanInternalSystemCmmdsQuery,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostVsanInternalSystemCmmdsQuery\"] = reflect.TypeOf((*ArrayOfHostVsanInternalSystemCmmdsQuery)(nil)).Elem()\n}\n\ntype ArrayOfHostVsanInternalSystemDeleteVsanObjectsResult struct {\n\tHostVsanInternalSystemDeleteVsanObjectsResult []HostVsanInternalSystemDeleteVsanObjectsResult `xml:\"HostVsanInternalSystemDeleteVsanObjectsResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostVsanInternalSystemDeleteVsanObjectsResult\"] = reflect.TypeOf((*ArrayOfHostVsanInternalSystemDeleteVsanObjectsResult)(nil)).Elem()\n}\n\ntype ArrayOfHostVsanInternalSystemVsanObjectOperationResult struct {\n\tHostVsanInternalSystemVsanObjectOperationResult []HostVsanInternalSystemVsanObjectOperationResult `xml:\"HostVsanInternalSystemVsanObjectOperationResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostVsanInternalSystemVsanObjectOperationResult\"] = reflect.TypeOf((*ArrayOfHostVsanInternalSystemVsanObjectOperationResult)(nil)).Elem()\n}\n\ntype ArrayOfHostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult struct {\n\tHostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult []HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult `xml:\"HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult\"] = reflect.TypeOf((*ArrayOfHostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult)(nil)).Elem()\n}\n\ntype ArrayOfHttpNfcLeaseDatastoreLeaseInfo struct {\n\tHttpNfcLeaseDatastoreLeaseInfo []HttpNfcLeaseDatastoreLeaseInfo `xml:\"HttpNfcLeaseDatastoreLeaseInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHttpNfcLeaseDatastoreLeaseInfo\"] = reflect.TypeOf((*ArrayOfHttpNfcLeaseDatastoreLeaseInfo)(nil)).Elem()\n}\n\ntype ArrayOfHttpNfcLeaseDeviceUrl struct {\n\tHttpNfcLeaseDeviceUrl []HttpNfcLeaseDeviceUrl `xml:\"HttpNfcLeaseDeviceUrl,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHttpNfcLeaseDeviceUrl\"] = reflect.TypeOf((*ArrayOfHttpNfcLeaseDeviceUrl)(nil)).Elem()\n}\n\ntype ArrayOfHttpNfcLeaseHostInfo struct {\n\tHttpNfcLeaseHostInfo []HttpNfcLeaseHostInfo `xml:\"HttpNfcLeaseHostInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHttpNfcLeaseHostInfo\"] = reflect.TypeOf((*ArrayOfHttpNfcLeaseHostInfo)(nil)).Elem()\n}\n\ntype ArrayOfHttpNfcLeaseManifestEntry struct {\n\tHttpNfcLeaseManifestEntry []HttpNfcLeaseManifestEntry `xml:\"HttpNfcLeaseManifestEntry,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfHttpNfcLeaseManifestEntry\"] = reflect.TypeOf((*ArrayOfHttpNfcLeaseManifestEntry)(nil)).Elem()\n}\n\ntype ArrayOfID struct {\n\tID []ID `xml:\"ID,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfID\"] = reflect.TypeOf((*ArrayOfID)(nil)).Elem()\n}\n\ntype ArrayOfImportOperationBulkFaultFaultOnImport struct {\n\tImportOperationBulkFaultFaultOnImport []ImportOperationBulkFaultFaultOnImport `xml:\"ImportOperationBulkFaultFaultOnImport,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfImportOperationBulkFaultFaultOnImport\"] = reflect.TypeOf((*ArrayOfImportOperationBulkFaultFaultOnImport)(nil)).Elem()\n}\n\ntype ArrayOfImportSpec struct {\n\tImportSpec []BaseImportSpec `xml:\"ImportSpec,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfImportSpec\"] = reflect.TypeOf((*ArrayOfImportSpec)(nil)).Elem()\n}\n\ntype ArrayOfInt struct {\n\tInt []int32 `xml:\"int,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfInt\"] = reflect.TypeOf((*ArrayOfInt)(nil)).Elem()\n}\n\ntype ArrayOfIoFilterHostIssue struct {\n\tIoFilterHostIssue []IoFilterHostIssue `xml:\"IoFilterHostIssue,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfIoFilterHostIssue\"] = reflect.TypeOf((*ArrayOfIoFilterHostIssue)(nil)).Elem()\n}\n\ntype ArrayOfIpPool struct {\n\tIpPool []IpPool `xml:\"IpPool,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfIpPool\"] = reflect.TypeOf((*ArrayOfIpPool)(nil)).Elem()\n}\n\ntype ArrayOfIpPoolAssociation struct {\n\tIpPoolAssociation []IpPoolAssociation `xml:\"IpPoolAssociation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfIpPoolAssociation\"] = reflect.TypeOf((*ArrayOfIpPoolAssociation)(nil)).Elem()\n}\n\ntype ArrayOfIpPoolManagerIpAllocation struct {\n\tIpPoolManagerIpAllocation []IpPoolManagerIpAllocation `xml:\"IpPoolManagerIpAllocation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfIpPoolManagerIpAllocation\"] = reflect.TypeOf((*ArrayOfIpPoolManagerIpAllocation)(nil)).Elem()\n}\n\ntype ArrayOfIscsiDependencyEntity struct {\n\tIscsiDependencyEntity []IscsiDependencyEntity `xml:\"IscsiDependencyEntity,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfIscsiDependencyEntity\"] = reflect.TypeOf((*ArrayOfIscsiDependencyEntity)(nil)).Elem()\n}\n\ntype ArrayOfIscsiPortInfo struct {\n\tIscsiPortInfo []IscsiPortInfo `xml:\"IscsiPortInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfIscsiPortInfo\"] = reflect.TypeOf((*ArrayOfIscsiPortInfo)(nil)).Elem()\n}\n\ntype ArrayOfKernelModuleInfo struct {\n\tKernelModuleInfo []KernelModuleInfo `xml:\"KernelModuleInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfKernelModuleInfo\"] = reflect.TypeOf((*ArrayOfKernelModuleInfo)(nil)).Elem()\n}\n\ntype ArrayOfKeyAnyValue struct {\n\tKeyAnyValue []KeyAnyValue `xml:\"KeyAnyValue,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfKeyAnyValue\"] = reflect.TypeOf((*ArrayOfKeyAnyValue)(nil)).Elem()\n}\n\ntype ArrayOfKeyValue struct {\n\tKeyValue []KeyValue `xml:\"KeyValue,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfKeyValue\"] = reflect.TypeOf((*ArrayOfKeyValue)(nil)).Elem()\n}\n\ntype ArrayOfKmipClusterInfo struct {\n\tKmipClusterInfo []KmipClusterInfo `xml:\"KmipClusterInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfKmipClusterInfo\"] = reflect.TypeOf((*ArrayOfKmipClusterInfo)(nil)).Elem()\n}\n\ntype ArrayOfKmipServerInfo struct {\n\tKmipServerInfo []KmipServerInfo `xml:\"KmipServerInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfKmipServerInfo\"] = reflect.TypeOf((*ArrayOfKmipServerInfo)(nil)).Elem()\n}\n\ntype ArrayOfLicenseAssignmentManagerLicenseAssignment struct {\n\tLicenseAssignmentManagerLicenseAssignment []LicenseAssignmentManagerLicenseAssignment `xml:\"LicenseAssignmentManagerLicenseAssignment,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfLicenseAssignmentManagerLicenseAssignment\"] = reflect.TypeOf((*ArrayOfLicenseAssignmentManagerLicenseAssignment)(nil)).Elem()\n}\n\ntype ArrayOfLicenseAvailabilityInfo struct {\n\tLicenseAvailabilityInfo []LicenseAvailabilityInfo `xml:\"LicenseAvailabilityInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfLicenseAvailabilityInfo\"] = reflect.TypeOf((*ArrayOfLicenseAvailabilityInfo)(nil)).Elem()\n}\n\ntype ArrayOfLicenseFeatureInfo struct {\n\tLicenseFeatureInfo []LicenseFeatureInfo `xml:\"LicenseFeatureInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfLicenseFeatureInfo\"] = reflect.TypeOf((*ArrayOfLicenseFeatureInfo)(nil)).Elem()\n}\n\ntype ArrayOfLicenseManagerLicenseInfo struct {\n\tLicenseManagerLicenseInfo []LicenseManagerLicenseInfo `xml:\"LicenseManagerLicenseInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfLicenseManagerLicenseInfo\"] = reflect.TypeOf((*ArrayOfLicenseManagerLicenseInfo)(nil)).Elem()\n}\n\ntype ArrayOfLicenseReservationInfo struct {\n\tLicenseReservationInfo []LicenseReservationInfo `xml:\"LicenseReservationInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfLicenseReservationInfo\"] = reflect.TypeOf((*ArrayOfLicenseReservationInfo)(nil)).Elem()\n}\n\ntype ArrayOfLocalizableMessage struct {\n\tLocalizableMessage []LocalizableMessage `xml:\"LocalizableMessage,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfLocalizableMessage\"] = reflect.TypeOf((*ArrayOfLocalizableMessage)(nil)).Elem()\n}\n\ntype ArrayOfLocalizationManagerMessageCatalog struct {\n\tLocalizationManagerMessageCatalog []LocalizationManagerMessageCatalog `xml:\"LocalizationManagerMessageCatalog,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfLocalizationManagerMessageCatalog\"] = reflect.TypeOf((*ArrayOfLocalizationManagerMessageCatalog)(nil)).Elem()\n}\n\ntype ArrayOfLong struct {\n\tLong []int64 `xml:\"long,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfLong\"] = reflect.TypeOf((*ArrayOfLong)(nil)).Elem()\n}\n\ntype ArrayOfManagedObjectReference struct {\n\tManagedObjectReference []ManagedObjectReference `xml:\"ManagedObjectReference,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfManagedObjectReference\"] = reflect.TypeOf((*ArrayOfManagedObjectReference)(nil)).Elem()\n}\n\ntype ArrayOfMethodActionArgument struct {\n\tMethodActionArgument []MethodActionArgument `xml:\"MethodActionArgument,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfMethodActionArgument\"] = reflect.TypeOf((*ArrayOfMethodActionArgument)(nil)).Elem()\n}\n\ntype ArrayOfMethodFault struct {\n\tMethodFault []BaseMethodFault `xml:\"MethodFault,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfMethodFault\"] = reflect.TypeOf((*ArrayOfMethodFault)(nil)).Elem()\n}\n\ntype ArrayOfMissingObject struct {\n\tMissingObject []MissingObject `xml:\"MissingObject,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfMissingObject\"] = reflect.TypeOf((*ArrayOfMissingObject)(nil)).Elem()\n}\n\ntype ArrayOfMissingProperty struct {\n\tMissingProperty []MissingProperty `xml:\"MissingProperty,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfMissingProperty\"] = reflect.TypeOf((*ArrayOfMissingProperty)(nil)).Elem()\n}\n\ntype ArrayOfMultipleCertificatesVerifyFaultThumbprintData struct {\n\tMultipleCertificatesVerifyFaultThumbprintData []MultipleCertificatesVerifyFaultThumbprintData `xml:\"MultipleCertificatesVerifyFaultThumbprintData,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfMultipleCertificatesVerifyFaultThumbprintData\"] = reflect.TypeOf((*ArrayOfMultipleCertificatesVerifyFaultThumbprintData)(nil)).Elem()\n}\n\ntype ArrayOfNasStorageProfile struct {\n\tNasStorageProfile []NasStorageProfile `xml:\"NasStorageProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfNasStorageProfile\"] = reflect.TypeOf((*ArrayOfNasStorageProfile)(nil)).Elem()\n}\n\ntype ArrayOfNetIpConfigInfoIpAddress struct {\n\tNetIpConfigInfoIpAddress []NetIpConfigInfoIpAddress `xml:\"NetIpConfigInfoIpAddress,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfNetIpConfigInfoIpAddress\"] = reflect.TypeOf((*ArrayOfNetIpConfigInfoIpAddress)(nil)).Elem()\n}\n\ntype ArrayOfNetIpConfigSpecIpAddressSpec struct {\n\tNetIpConfigSpecIpAddressSpec []NetIpConfigSpecIpAddressSpec `xml:\"NetIpConfigSpecIpAddressSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfNetIpConfigSpecIpAddressSpec\"] = reflect.TypeOf((*ArrayOfNetIpConfigSpecIpAddressSpec)(nil)).Elem()\n}\n\ntype ArrayOfNetIpRouteConfigInfoIpRoute struct {\n\tNetIpRouteConfigInfoIpRoute []NetIpRouteConfigInfoIpRoute `xml:\"NetIpRouteConfigInfoIpRoute,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfNetIpRouteConfigInfoIpRoute\"] = reflect.TypeOf((*ArrayOfNetIpRouteConfigInfoIpRoute)(nil)).Elem()\n}\n\ntype ArrayOfNetIpRouteConfigSpecIpRouteSpec struct {\n\tNetIpRouteConfigSpecIpRouteSpec []NetIpRouteConfigSpecIpRouteSpec `xml:\"NetIpRouteConfigSpecIpRouteSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfNetIpRouteConfigSpecIpRouteSpec\"] = reflect.TypeOf((*ArrayOfNetIpRouteConfigSpecIpRouteSpec)(nil)).Elem()\n}\n\ntype ArrayOfNetIpStackInfoDefaultRouter struct {\n\tNetIpStackInfoDefaultRouter []NetIpStackInfoDefaultRouter `xml:\"NetIpStackInfoDefaultRouter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfNetIpStackInfoDefaultRouter\"] = reflect.TypeOf((*ArrayOfNetIpStackInfoDefaultRouter)(nil)).Elem()\n}\n\ntype ArrayOfNetIpStackInfoNetToMedia struct {\n\tNetIpStackInfoNetToMedia []NetIpStackInfoNetToMedia `xml:\"NetIpStackInfoNetToMedia,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfNetIpStackInfoNetToMedia\"] = reflect.TypeOf((*ArrayOfNetIpStackInfoNetToMedia)(nil)).Elem()\n}\n\ntype ArrayOfNetStackInstanceProfile struct {\n\tNetStackInstanceProfile []NetStackInstanceProfile `xml:\"NetStackInstanceProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfNetStackInstanceProfile\"] = reflect.TypeOf((*ArrayOfNetStackInstanceProfile)(nil)).Elem()\n}\n\ntype ArrayOfNumericRange struct {\n\tNumericRange []NumericRange `xml:\"NumericRange,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfNumericRange\"] = reflect.TypeOf((*ArrayOfNumericRange)(nil)).Elem()\n}\n\ntype ArrayOfObjectContent struct {\n\tObjectContent []ObjectContent `xml:\"ObjectContent,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfObjectContent\"] = reflect.TypeOf((*ArrayOfObjectContent)(nil)).Elem()\n}\n\ntype ArrayOfObjectSpec struct {\n\tObjectSpec []ObjectSpec `xml:\"ObjectSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfObjectSpec\"] = reflect.TypeOf((*ArrayOfObjectSpec)(nil)).Elem()\n}\n\ntype ArrayOfObjectUpdate struct {\n\tObjectUpdate []ObjectUpdate `xml:\"ObjectUpdate,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfObjectUpdate\"] = reflect.TypeOf((*ArrayOfObjectUpdate)(nil)).Elem()\n}\n\ntype ArrayOfOpaqueNetworkTargetInfo struct {\n\tOpaqueNetworkTargetInfo []OpaqueNetworkTargetInfo `xml:\"OpaqueNetworkTargetInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfOpaqueNetworkTargetInfo\"] = reflect.TypeOf((*ArrayOfOpaqueNetworkTargetInfo)(nil)).Elem()\n}\n\ntype ArrayOfOptionDef struct {\n\tOptionDef []OptionDef `xml:\"OptionDef,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfOptionDef\"] = reflect.TypeOf((*ArrayOfOptionDef)(nil)).Elem()\n}\n\ntype ArrayOfOptionProfile struct {\n\tOptionProfile []OptionProfile `xml:\"OptionProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfOptionProfile\"] = reflect.TypeOf((*ArrayOfOptionProfile)(nil)).Elem()\n}\n\ntype ArrayOfOptionValue struct {\n\tOptionValue []BaseOptionValue `xml:\"OptionValue,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfOptionValue\"] = reflect.TypeOf((*ArrayOfOptionValue)(nil)).Elem()\n}\n\ntype ArrayOfOvfConsumerOstNode struct {\n\tOvfConsumerOstNode []OvfConsumerOstNode `xml:\"OvfConsumerOstNode,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfOvfConsumerOstNode\"] = reflect.TypeOf((*ArrayOfOvfConsumerOstNode)(nil)).Elem()\n}\n\ntype ArrayOfOvfConsumerOvfSection struct {\n\tOvfConsumerOvfSection []OvfConsumerOvfSection `xml:\"OvfConsumerOvfSection,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfOvfConsumerOvfSection\"] = reflect.TypeOf((*ArrayOfOvfConsumerOvfSection)(nil)).Elem()\n}\n\ntype ArrayOfOvfDeploymentOption struct {\n\tOvfDeploymentOption []OvfDeploymentOption `xml:\"OvfDeploymentOption,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfOvfDeploymentOption\"] = reflect.TypeOf((*ArrayOfOvfDeploymentOption)(nil)).Elem()\n}\n\ntype ArrayOfOvfFile struct {\n\tOvfFile []OvfFile `xml:\"OvfFile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfOvfFile\"] = reflect.TypeOf((*ArrayOfOvfFile)(nil)).Elem()\n}\n\ntype ArrayOfOvfFileItem struct {\n\tOvfFileItem []OvfFileItem `xml:\"OvfFileItem,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfOvfFileItem\"] = reflect.TypeOf((*ArrayOfOvfFileItem)(nil)).Elem()\n}\n\ntype ArrayOfOvfNetworkInfo struct {\n\tOvfNetworkInfo []OvfNetworkInfo `xml:\"OvfNetworkInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfOvfNetworkInfo\"] = reflect.TypeOf((*ArrayOfOvfNetworkInfo)(nil)).Elem()\n}\n\ntype ArrayOfOvfNetworkMapping struct {\n\tOvfNetworkMapping []OvfNetworkMapping `xml:\"OvfNetworkMapping,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfOvfNetworkMapping\"] = reflect.TypeOf((*ArrayOfOvfNetworkMapping)(nil)).Elem()\n}\n\ntype ArrayOfOvfOptionInfo struct {\n\tOvfOptionInfo []OvfOptionInfo `xml:\"OvfOptionInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfOvfOptionInfo\"] = reflect.TypeOf((*ArrayOfOvfOptionInfo)(nil)).Elem()\n}\n\ntype ArrayOfOvfResourceMap struct {\n\tOvfResourceMap []OvfResourceMap `xml:\"OvfResourceMap,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfOvfResourceMap\"] = reflect.TypeOf((*ArrayOfOvfResourceMap)(nil)).Elem()\n}\n\ntype ArrayOfPerfCounterInfo struct {\n\tPerfCounterInfo []PerfCounterInfo `xml:\"PerfCounterInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPerfCounterInfo\"] = reflect.TypeOf((*ArrayOfPerfCounterInfo)(nil)).Elem()\n}\n\ntype ArrayOfPerfEntityMetricBase struct {\n\tPerfEntityMetricBase []BasePerfEntityMetricBase `xml:\"PerfEntityMetricBase,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPerfEntityMetricBase\"] = reflect.TypeOf((*ArrayOfPerfEntityMetricBase)(nil)).Elem()\n}\n\ntype ArrayOfPerfInterval struct {\n\tPerfInterval []PerfInterval `xml:\"PerfInterval,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPerfInterval\"] = reflect.TypeOf((*ArrayOfPerfInterval)(nil)).Elem()\n}\n\ntype ArrayOfPerfMetricId struct {\n\tPerfMetricId []PerfMetricId `xml:\"PerfMetricId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPerfMetricId\"] = reflect.TypeOf((*ArrayOfPerfMetricId)(nil)).Elem()\n}\n\ntype ArrayOfPerfMetricSeries struct {\n\tPerfMetricSeries []BasePerfMetricSeries `xml:\"PerfMetricSeries,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPerfMetricSeries\"] = reflect.TypeOf((*ArrayOfPerfMetricSeries)(nil)).Elem()\n}\n\ntype ArrayOfPerfMetricSeriesCSV struct {\n\tPerfMetricSeriesCSV []PerfMetricSeriesCSV `xml:\"PerfMetricSeriesCSV,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPerfMetricSeriesCSV\"] = reflect.TypeOf((*ArrayOfPerfMetricSeriesCSV)(nil)).Elem()\n}\n\ntype ArrayOfPerfQuerySpec struct {\n\tPerfQuerySpec []PerfQuerySpec `xml:\"PerfQuerySpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPerfQuerySpec\"] = reflect.TypeOf((*ArrayOfPerfQuerySpec)(nil)).Elem()\n}\n\ntype ArrayOfPerfSampleInfo struct {\n\tPerfSampleInfo []PerfSampleInfo `xml:\"PerfSampleInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPerfSampleInfo\"] = reflect.TypeOf((*ArrayOfPerfSampleInfo)(nil)).Elem()\n}\n\ntype ArrayOfPerformanceManagerCounterLevelMapping struct {\n\tPerformanceManagerCounterLevelMapping []PerformanceManagerCounterLevelMapping `xml:\"PerformanceManagerCounterLevelMapping,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPerformanceManagerCounterLevelMapping\"] = reflect.TypeOf((*ArrayOfPerformanceManagerCounterLevelMapping)(nil)).Elem()\n}\n\ntype ArrayOfPermission struct {\n\tPermission []Permission `xml:\"Permission,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPermission\"] = reflect.TypeOf((*ArrayOfPermission)(nil)).Elem()\n}\n\ntype ArrayOfPermissionProfile struct {\n\tPermissionProfile []PermissionProfile `xml:\"PermissionProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPermissionProfile\"] = reflect.TypeOf((*ArrayOfPermissionProfile)(nil)).Elem()\n}\n\ntype ArrayOfPhysicalNic struct {\n\tPhysicalNic []PhysicalNic `xml:\"PhysicalNic,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPhysicalNic\"] = reflect.TypeOf((*ArrayOfPhysicalNic)(nil)).Elem()\n}\n\ntype ArrayOfPhysicalNicConfig struct {\n\tPhysicalNicConfig []PhysicalNicConfig `xml:\"PhysicalNicConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPhysicalNicConfig\"] = reflect.TypeOf((*ArrayOfPhysicalNicConfig)(nil)).Elem()\n}\n\ntype ArrayOfPhysicalNicHintInfo struct {\n\tPhysicalNicHintInfo []PhysicalNicHintInfo `xml:\"PhysicalNicHintInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPhysicalNicHintInfo\"] = reflect.TypeOf((*ArrayOfPhysicalNicHintInfo)(nil)).Elem()\n}\n\ntype ArrayOfPhysicalNicIpHint struct {\n\tPhysicalNicIpHint []PhysicalNicIpHint `xml:\"PhysicalNicIpHint,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPhysicalNicIpHint\"] = reflect.TypeOf((*ArrayOfPhysicalNicIpHint)(nil)).Elem()\n}\n\ntype ArrayOfPhysicalNicLinkInfo struct {\n\tPhysicalNicLinkInfo []PhysicalNicLinkInfo `xml:\"PhysicalNicLinkInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPhysicalNicLinkInfo\"] = reflect.TypeOf((*ArrayOfPhysicalNicLinkInfo)(nil)).Elem()\n}\n\ntype ArrayOfPhysicalNicNameHint struct {\n\tPhysicalNicNameHint []PhysicalNicNameHint `xml:\"PhysicalNicNameHint,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPhysicalNicNameHint\"] = reflect.TypeOf((*ArrayOfPhysicalNicNameHint)(nil)).Elem()\n}\n\ntype ArrayOfPhysicalNicProfile struct {\n\tPhysicalNicProfile []PhysicalNicProfile `xml:\"PhysicalNicProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPhysicalNicProfile\"] = reflect.TypeOf((*ArrayOfPhysicalNicProfile)(nil)).Elem()\n}\n\ntype ArrayOfPlacementAffinityRule struct {\n\tPlacementAffinityRule []PlacementAffinityRule `xml:\"PlacementAffinityRule,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPlacementAffinityRule\"] = reflect.TypeOf((*ArrayOfPlacementAffinityRule)(nil)).Elem()\n}\n\ntype ArrayOfPlacementSpec struct {\n\tPlacementSpec []PlacementSpec `xml:\"PlacementSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPlacementSpec\"] = reflect.TypeOf((*ArrayOfPlacementSpec)(nil)).Elem()\n}\n\ntype ArrayOfPnicUplinkProfile struct {\n\tPnicUplinkProfile []PnicUplinkProfile `xml:\"PnicUplinkProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPnicUplinkProfile\"] = reflect.TypeOf((*ArrayOfPnicUplinkProfile)(nil)).Elem()\n}\n\ntype ArrayOfPodDiskLocator struct {\n\tPodDiskLocator []PodDiskLocator `xml:\"PodDiskLocator,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPodDiskLocator\"] = reflect.TypeOf((*ArrayOfPodDiskLocator)(nil)).Elem()\n}\n\ntype ArrayOfPolicyOption struct {\n\tPolicyOption []BasePolicyOption `xml:\"PolicyOption,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPolicyOption\"] = reflect.TypeOf((*ArrayOfPolicyOption)(nil)).Elem()\n}\n\ntype ArrayOfPrivilegeAvailability struct {\n\tPrivilegeAvailability []PrivilegeAvailability `xml:\"PrivilegeAvailability,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPrivilegeAvailability\"] = reflect.TypeOf((*ArrayOfPrivilegeAvailability)(nil)).Elem()\n}\n\ntype ArrayOfProductComponentInfo struct {\n\tProductComponentInfo []ProductComponentInfo `xml:\"ProductComponentInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfProductComponentInfo\"] = reflect.TypeOf((*ArrayOfProductComponentInfo)(nil)).Elem()\n}\n\ntype ArrayOfProfileApplyProfileProperty struct {\n\tProfileApplyProfileProperty []ProfileApplyProfileProperty `xml:\"ProfileApplyProfileProperty,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfProfileApplyProfileProperty\"] = reflect.TypeOf((*ArrayOfProfileApplyProfileProperty)(nil)).Elem()\n}\n\ntype ArrayOfProfileDeferredPolicyOptionParameter struct {\n\tProfileDeferredPolicyOptionParameter []ProfileDeferredPolicyOptionParameter `xml:\"ProfileDeferredPolicyOptionParameter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfProfileDeferredPolicyOptionParameter\"] = reflect.TypeOf((*ArrayOfProfileDeferredPolicyOptionParameter)(nil)).Elem()\n}\n\ntype ArrayOfProfileDescriptionSection struct {\n\tProfileDescriptionSection []ProfileDescriptionSection `xml:\"ProfileDescriptionSection,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfProfileDescriptionSection\"] = reflect.TypeOf((*ArrayOfProfileDescriptionSection)(nil)).Elem()\n}\n\ntype ArrayOfProfileExecuteError struct {\n\tProfileExecuteError []ProfileExecuteError `xml:\"ProfileExecuteError,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfProfileExecuteError\"] = reflect.TypeOf((*ArrayOfProfileExecuteError)(nil)).Elem()\n}\n\ntype ArrayOfProfileExpression struct {\n\tProfileExpression []BaseProfileExpression `xml:\"ProfileExpression,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfProfileExpression\"] = reflect.TypeOf((*ArrayOfProfileExpression)(nil)).Elem()\n}\n\ntype ArrayOfProfileExpressionMetadata struct {\n\tProfileExpressionMetadata []ProfileExpressionMetadata `xml:\"ProfileExpressionMetadata,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfProfileExpressionMetadata\"] = reflect.TypeOf((*ArrayOfProfileExpressionMetadata)(nil)).Elem()\n}\n\ntype ArrayOfProfileMetadata struct {\n\tProfileMetadata []ProfileMetadata `xml:\"ProfileMetadata,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfProfileMetadata\"] = reflect.TypeOf((*ArrayOfProfileMetadata)(nil)).Elem()\n}\n\ntype ArrayOfProfileMetadataProfileSortSpec struct {\n\tProfileMetadataProfileSortSpec []ProfileMetadataProfileSortSpec `xml:\"ProfileMetadataProfileSortSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfProfileMetadataProfileSortSpec\"] = reflect.TypeOf((*ArrayOfProfileMetadataProfileSortSpec)(nil)).Elem()\n}\n\ntype ArrayOfProfileParameterMetadata struct {\n\tProfileParameterMetadata []ProfileParameterMetadata `xml:\"ProfileParameterMetadata,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfProfileParameterMetadata\"] = reflect.TypeOf((*ArrayOfProfileParameterMetadata)(nil)).Elem()\n}\n\ntype ArrayOfProfilePolicy struct {\n\tProfilePolicy []ProfilePolicy `xml:\"ProfilePolicy,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfProfilePolicy\"] = reflect.TypeOf((*ArrayOfProfilePolicy)(nil)).Elem()\n}\n\ntype ArrayOfProfilePolicyMetadata struct {\n\tProfilePolicyMetadata []ProfilePolicyMetadata `xml:\"ProfilePolicyMetadata,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfProfilePolicyMetadata\"] = reflect.TypeOf((*ArrayOfProfilePolicyMetadata)(nil)).Elem()\n}\n\ntype ArrayOfProfilePolicyOptionMetadata struct {\n\tProfilePolicyOptionMetadata []BaseProfilePolicyOptionMetadata `xml:\"ProfilePolicyOptionMetadata,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfProfilePolicyOptionMetadata\"] = reflect.TypeOf((*ArrayOfProfilePolicyOptionMetadata)(nil)).Elem()\n}\n\ntype ArrayOfProfileProfileStructureProperty struct {\n\tProfileProfileStructureProperty []ProfileProfileStructureProperty `xml:\"ProfileProfileStructureProperty,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfProfileProfileStructureProperty\"] = reflect.TypeOf((*ArrayOfProfileProfileStructureProperty)(nil)).Elem()\n}\n\ntype ArrayOfProfilePropertyPath struct {\n\tProfilePropertyPath []ProfilePropertyPath `xml:\"ProfilePropertyPath,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfProfilePropertyPath\"] = reflect.TypeOf((*ArrayOfProfilePropertyPath)(nil)).Elem()\n}\n\ntype ArrayOfProfileUpdateFailedUpdateFailure struct {\n\tProfileUpdateFailedUpdateFailure []ProfileUpdateFailedUpdateFailure `xml:\"ProfileUpdateFailedUpdateFailure,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfProfileUpdateFailedUpdateFailure\"] = reflect.TypeOf((*ArrayOfProfileUpdateFailedUpdateFailure)(nil)).Elem()\n}\n\ntype ArrayOfPropertyChange struct {\n\tPropertyChange []PropertyChange `xml:\"PropertyChange,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPropertyChange\"] = reflect.TypeOf((*ArrayOfPropertyChange)(nil)).Elem()\n}\n\ntype ArrayOfPropertyFilterSpec struct {\n\tPropertyFilterSpec []PropertyFilterSpec `xml:\"PropertyFilterSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPropertyFilterSpec\"] = reflect.TypeOf((*ArrayOfPropertyFilterSpec)(nil)).Elem()\n}\n\ntype ArrayOfPropertyFilterUpdate struct {\n\tPropertyFilterUpdate []PropertyFilterUpdate `xml:\"PropertyFilterUpdate,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPropertyFilterUpdate\"] = reflect.TypeOf((*ArrayOfPropertyFilterUpdate)(nil)).Elem()\n}\n\ntype ArrayOfPropertySpec struct {\n\tPropertySpec []PropertySpec `xml:\"PropertySpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfPropertySpec\"] = reflect.TypeOf((*ArrayOfPropertySpec)(nil)).Elem()\n}\n\ntype ArrayOfRelation struct {\n\tRelation []Relation `xml:\"Relation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfRelation\"] = reflect.TypeOf((*ArrayOfRelation)(nil)).Elem()\n}\n\ntype ArrayOfReplicationInfoDiskSettings struct {\n\tReplicationInfoDiskSettings []ReplicationInfoDiskSettings `xml:\"ReplicationInfoDiskSettings,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfReplicationInfoDiskSettings\"] = reflect.TypeOf((*ArrayOfReplicationInfoDiskSettings)(nil)).Elem()\n}\n\ntype ArrayOfResourceConfigSpec struct {\n\tResourceConfigSpec []ResourceConfigSpec `xml:\"ResourceConfigSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfResourceConfigSpec\"] = reflect.TypeOf((*ArrayOfResourceConfigSpec)(nil)).Elem()\n}\n\ntype ArrayOfScheduledTaskDetail struct {\n\tScheduledTaskDetail []ScheduledTaskDetail `xml:\"ScheduledTaskDetail,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfScheduledTaskDetail\"] = reflect.TypeOf((*ArrayOfScheduledTaskDetail)(nil)).Elem()\n}\n\ntype ArrayOfScsiLun struct {\n\tScsiLun []BaseScsiLun `xml:\"ScsiLun,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfScsiLun\"] = reflect.TypeOf((*ArrayOfScsiLun)(nil)).Elem()\n}\n\ntype ArrayOfScsiLunDescriptor struct {\n\tScsiLunDescriptor []ScsiLunDescriptor `xml:\"ScsiLunDescriptor,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfScsiLunDescriptor\"] = reflect.TypeOf((*ArrayOfScsiLunDescriptor)(nil)).Elem()\n}\n\ntype ArrayOfScsiLunDurableName struct {\n\tScsiLunDurableName []ScsiLunDurableName `xml:\"ScsiLunDurableName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfScsiLunDurableName\"] = reflect.TypeOf((*ArrayOfScsiLunDurableName)(nil)).Elem()\n}\n\ntype ArrayOfSelectionSet struct {\n\tSelectionSet []BaseSelectionSet `xml:\"SelectionSet,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfSelectionSet\"] = reflect.TypeOf((*ArrayOfSelectionSet)(nil)).Elem()\n}\n\ntype ArrayOfSelectionSpec struct {\n\tSelectionSpec []BaseSelectionSpec `xml:\"SelectionSpec,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfSelectionSpec\"] = reflect.TypeOf((*ArrayOfSelectionSpec)(nil)).Elem()\n}\n\ntype ArrayOfServiceConsolePortGroupProfile struct {\n\tServiceConsolePortGroupProfile []ServiceConsolePortGroupProfile `xml:\"ServiceConsolePortGroupProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfServiceConsolePortGroupProfile\"] = reflect.TypeOf((*ArrayOfServiceConsolePortGroupProfile)(nil)).Elem()\n}\n\ntype ArrayOfServiceLocator struct {\n\tServiceLocator []ServiceLocator `xml:\"ServiceLocator,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfServiceLocator\"] = reflect.TypeOf((*ArrayOfServiceLocator)(nil)).Elem()\n}\n\ntype ArrayOfServiceManagerServiceInfo struct {\n\tServiceManagerServiceInfo []ServiceManagerServiceInfo `xml:\"ServiceManagerServiceInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfServiceManagerServiceInfo\"] = reflect.TypeOf((*ArrayOfServiceManagerServiceInfo)(nil)).Elem()\n}\n\ntype ArrayOfServiceProfile struct {\n\tServiceProfile []ServiceProfile `xml:\"ServiceProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfServiceProfile\"] = reflect.TypeOf((*ArrayOfServiceProfile)(nil)).Elem()\n}\n\ntype ArrayOfShort struct {\n\tShort []int16 `xml:\"short,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfShort\"] = reflect.TypeOf((*ArrayOfShort)(nil)).Elem()\n}\n\ntype ArrayOfSoftwarePackage struct {\n\tSoftwarePackage []SoftwarePackage `xml:\"SoftwarePackage,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfSoftwarePackage\"] = reflect.TypeOf((*ArrayOfSoftwarePackage)(nil)).Elem()\n}\n\ntype ArrayOfStaticRouteProfile struct {\n\tStaticRouteProfile []StaticRouteProfile `xml:\"StaticRouteProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfStaticRouteProfile\"] = reflect.TypeOf((*ArrayOfStaticRouteProfile)(nil)).Elem()\n}\n\ntype ArrayOfStorageDrsOptionSpec struct {\n\tStorageDrsOptionSpec []StorageDrsOptionSpec `xml:\"StorageDrsOptionSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfStorageDrsOptionSpec\"] = reflect.TypeOf((*ArrayOfStorageDrsOptionSpec)(nil)).Elem()\n}\n\ntype ArrayOfStorageDrsPlacementRankVmSpec struct {\n\tStorageDrsPlacementRankVmSpec []StorageDrsPlacementRankVmSpec `xml:\"StorageDrsPlacementRankVmSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfStorageDrsPlacementRankVmSpec\"] = reflect.TypeOf((*ArrayOfStorageDrsPlacementRankVmSpec)(nil)).Elem()\n}\n\ntype ArrayOfStorageDrsVmConfigInfo struct {\n\tStorageDrsVmConfigInfo []StorageDrsVmConfigInfo `xml:\"StorageDrsVmConfigInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfStorageDrsVmConfigInfo\"] = reflect.TypeOf((*ArrayOfStorageDrsVmConfigInfo)(nil)).Elem()\n}\n\ntype ArrayOfStorageDrsVmConfigSpec struct {\n\tStorageDrsVmConfigSpec []StorageDrsVmConfigSpec `xml:\"StorageDrsVmConfigSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfStorageDrsVmConfigSpec\"] = reflect.TypeOf((*ArrayOfStorageDrsVmConfigSpec)(nil)).Elem()\n}\n\ntype ArrayOfStoragePerformanceSummary struct {\n\tStoragePerformanceSummary []StoragePerformanceSummary `xml:\"StoragePerformanceSummary,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfStoragePerformanceSummary\"] = reflect.TypeOf((*ArrayOfStoragePerformanceSummary)(nil)).Elem()\n}\n\ntype ArrayOfStorageRequirement struct {\n\tStorageRequirement []StorageRequirement `xml:\"StorageRequirement,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfStorageRequirement\"] = reflect.TypeOf((*ArrayOfStorageRequirement)(nil)).Elem()\n}\n\ntype ArrayOfString struct {\n\tString []string `xml:\"string,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfString\"] = reflect.TypeOf((*ArrayOfString)(nil)).Elem()\n}\n\ntype ArrayOfStructuredCustomizations struct {\n\tStructuredCustomizations []StructuredCustomizations `xml:\"StructuredCustomizations,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfStructuredCustomizations\"] = reflect.TypeOf((*ArrayOfStructuredCustomizations)(nil)).Elem()\n}\n\ntype ArrayOfSystemEventInfo struct {\n\tSystemEventInfo []SystemEventInfo `xml:\"SystemEventInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfSystemEventInfo\"] = reflect.TypeOf((*ArrayOfSystemEventInfo)(nil)).Elem()\n}\n\ntype ArrayOfTag struct {\n\tTag []Tag `xml:\"Tag,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfTag\"] = reflect.TypeOf((*ArrayOfTag)(nil)).Elem()\n}\n\ntype ArrayOfTaskInfo struct {\n\tTaskInfo []TaskInfo `xml:\"TaskInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfTaskInfo\"] = reflect.TypeOf((*ArrayOfTaskInfo)(nil)).Elem()\n}\n\ntype ArrayOfTaskInfoState struct {\n\tTaskInfoState []TaskInfoState `xml:\"TaskInfoState,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfTaskInfoState\"] = reflect.TypeOf((*ArrayOfTaskInfoState)(nil)).Elem()\n}\n\ntype ArrayOfTypeDescription struct {\n\tTypeDescription []BaseTypeDescription `xml:\"TypeDescription,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfTypeDescription\"] = reflect.TypeOf((*ArrayOfTypeDescription)(nil)).Elem()\n}\n\ntype ArrayOfUpdateVirtualMachineFilesResultFailedVmFileInfo struct {\n\tUpdateVirtualMachineFilesResultFailedVmFileInfo []UpdateVirtualMachineFilesResultFailedVmFileInfo `xml:\"UpdateVirtualMachineFilesResultFailedVmFileInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfUpdateVirtualMachineFilesResultFailedVmFileInfo\"] = reflect.TypeOf((*ArrayOfUpdateVirtualMachineFilesResultFailedVmFileInfo)(nil)).Elem()\n}\n\ntype ArrayOfUsbScanCodeSpecKeyEvent struct {\n\tUsbScanCodeSpecKeyEvent []UsbScanCodeSpecKeyEvent `xml:\"UsbScanCodeSpecKeyEvent,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfUsbScanCodeSpecKeyEvent\"] = reflect.TypeOf((*ArrayOfUsbScanCodeSpecKeyEvent)(nil)).Elem()\n}\n\ntype ArrayOfUserGroupProfile struct {\n\tUserGroupProfile []UserGroupProfile `xml:\"UserGroupProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfUserGroupProfile\"] = reflect.TypeOf((*ArrayOfUserGroupProfile)(nil)).Elem()\n}\n\ntype ArrayOfUserPrivilegeResult struct {\n\tUserPrivilegeResult []UserPrivilegeResult `xml:\"UserPrivilegeResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfUserPrivilegeResult\"] = reflect.TypeOf((*ArrayOfUserPrivilegeResult)(nil)).Elem()\n}\n\ntype ArrayOfUserProfile struct {\n\tUserProfile []UserProfile `xml:\"UserProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfUserProfile\"] = reflect.TypeOf((*ArrayOfUserProfile)(nil)).Elem()\n}\n\ntype ArrayOfUserSearchResult struct {\n\tUserSearchResult []BaseUserSearchResult `xml:\"UserSearchResult,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfUserSearchResult\"] = reflect.TypeOf((*ArrayOfUserSearchResult)(nil)).Elem()\n}\n\ntype ArrayOfUserSession struct {\n\tUserSession []UserSession `xml:\"UserSession,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfUserSession\"] = reflect.TypeOf((*ArrayOfUserSession)(nil)).Elem()\n}\n\ntype ArrayOfVASAStorageArray struct {\n\tVASAStorageArray []VASAStorageArray `xml:\"VASAStorageArray,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVASAStorageArray\"] = reflect.TypeOf((*ArrayOfVASAStorageArray)(nil)).Elem()\n}\n\ntype ArrayOfVAppCloneSpecNetworkMappingPair struct {\n\tVAppCloneSpecNetworkMappingPair []VAppCloneSpecNetworkMappingPair `xml:\"VAppCloneSpecNetworkMappingPair,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVAppCloneSpecNetworkMappingPair\"] = reflect.TypeOf((*ArrayOfVAppCloneSpecNetworkMappingPair)(nil)).Elem()\n}\n\ntype ArrayOfVAppCloneSpecResourceMap struct {\n\tVAppCloneSpecResourceMap []VAppCloneSpecResourceMap `xml:\"VAppCloneSpecResourceMap,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVAppCloneSpecResourceMap\"] = reflect.TypeOf((*ArrayOfVAppCloneSpecResourceMap)(nil)).Elem()\n}\n\ntype ArrayOfVAppEntityConfigInfo struct {\n\tVAppEntityConfigInfo []VAppEntityConfigInfo `xml:\"VAppEntityConfigInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVAppEntityConfigInfo\"] = reflect.TypeOf((*ArrayOfVAppEntityConfigInfo)(nil)).Elem()\n}\n\ntype ArrayOfVAppOvfSectionInfo struct {\n\tVAppOvfSectionInfo []VAppOvfSectionInfo `xml:\"VAppOvfSectionInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVAppOvfSectionInfo\"] = reflect.TypeOf((*ArrayOfVAppOvfSectionInfo)(nil)).Elem()\n}\n\ntype ArrayOfVAppOvfSectionSpec struct {\n\tVAppOvfSectionSpec []VAppOvfSectionSpec `xml:\"VAppOvfSectionSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVAppOvfSectionSpec\"] = reflect.TypeOf((*ArrayOfVAppOvfSectionSpec)(nil)).Elem()\n}\n\ntype ArrayOfVAppProductInfo struct {\n\tVAppProductInfo []VAppProductInfo `xml:\"VAppProductInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVAppProductInfo\"] = reflect.TypeOf((*ArrayOfVAppProductInfo)(nil)).Elem()\n}\n\ntype ArrayOfVAppProductSpec struct {\n\tVAppProductSpec []VAppProductSpec `xml:\"VAppProductSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVAppProductSpec\"] = reflect.TypeOf((*ArrayOfVAppProductSpec)(nil)).Elem()\n}\n\ntype ArrayOfVAppPropertyInfo struct {\n\tVAppPropertyInfo []VAppPropertyInfo `xml:\"VAppPropertyInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVAppPropertyInfo\"] = reflect.TypeOf((*ArrayOfVAppPropertyInfo)(nil)).Elem()\n}\n\ntype ArrayOfVAppPropertySpec struct {\n\tVAppPropertySpec []VAppPropertySpec `xml:\"VAppPropertySpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVAppPropertySpec\"] = reflect.TypeOf((*ArrayOfVAppPropertySpec)(nil)).Elem()\n}\n\ntype ArrayOfVMwareDVSPvlanConfigSpec struct {\n\tVMwareDVSPvlanConfigSpec []VMwareDVSPvlanConfigSpec `xml:\"VMwareDVSPvlanConfigSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVMwareDVSPvlanConfigSpec\"] = reflect.TypeOf((*ArrayOfVMwareDVSPvlanConfigSpec)(nil)).Elem()\n}\n\ntype ArrayOfVMwareDVSPvlanMapEntry struct {\n\tVMwareDVSPvlanMapEntry []VMwareDVSPvlanMapEntry `xml:\"VMwareDVSPvlanMapEntry,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVMwareDVSPvlanMapEntry\"] = reflect.TypeOf((*ArrayOfVMwareDVSPvlanMapEntry)(nil)).Elem()\n}\n\ntype ArrayOfVMwareDVSVspanConfigSpec struct {\n\tVMwareDVSVspanConfigSpec []VMwareDVSVspanConfigSpec `xml:\"VMwareDVSVspanConfigSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVMwareDVSVspanConfigSpec\"] = reflect.TypeOf((*ArrayOfVMwareDVSVspanConfigSpec)(nil)).Elem()\n}\n\ntype ArrayOfVMwareDvsLacpGroupConfig struct {\n\tVMwareDvsLacpGroupConfig []VMwareDvsLacpGroupConfig `xml:\"VMwareDvsLacpGroupConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVMwareDvsLacpGroupConfig\"] = reflect.TypeOf((*ArrayOfVMwareDvsLacpGroupConfig)(nil)).Elem()\n}\n\ntype ArrayOfVMwareDvsLacpGroupSpec struct {\n\tVMwareDvsLacpGroupSpec []VMwareDvsLacpGroupSpec `xml:\"VMwareDvsLacpGroupSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVMwareDvsLacpGroupSpec\"] = reflect.TypeOf((*ArrayOfVMwareDvsLacpGroupSpec)(nil)).Elem()\n}\n\ntype ArrayOfVMwareVspanSession struct {\n\tVMwareVspanSession []VMwareVspanSession `xml:\"VMwareVspanSession,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVMwareVspanSession\"] = reflect.TypeOf((*ArrayOfVMwareVspanSession)(nil)).Elem()\n}\n\ntype ArrayOfVVolHostPE struct {\n\tVVolHostPE []VVolHostPE `xml:\"VVolHostPE,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVVolHostPE\"] = reflect.TypeOf((*ArrayOfVVolHostPE)(nil)).Elem()\n}\n\ntype ArrayOfVVolVmConfigFileUpdateResultFailedVmConfigFileInfo struct {\n\tVVolVmConfigFileUpdateResultFailedVmConfigFileInfo []VVolVmConfigFileUpdateResultFailedVmConfigFileInfo `xml:\"VVolVmConfigFileUpdateResultFailedVmConfigFileInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVVolVmConfigFileUpdateResultFailedVmConfigFileInfo\"] = reflect.TypeOf((*ArrayOfVVolVmConfigFileUpdateResultFailedVmConfigFileInfo)(nil)).Elem()\n}\n\ntype ArrayOfVchaNodeRuntimeInfo struct {\n\tVchaNodeRuntimeInfo []VchaNodeRuntimeInfo `xml:\"VchaNodeRuntimeInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVchaNodeRuntimeInfo\"] = reflect.TypeOf((*ArrayOfVchaNodeRuntimeInfo)(nil)).Elem()\n}\n\ntype ArrayOfVimVasaProviderInfo struct {\n\tVimVasaProviderInfo []VimVasaProviderInfo `xml:\"VimVasaProviderInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVimVasaProviderInfo\"] = reflect.TypeOf((*ArrayOfVimVasaProviderInfo)(nil)).Elem()\n}\n\ntype ArrayOfVimVasaProviderStatePerArray struct {\n\tVimVasaProviderStatePerArray []VimVasaProviderStatePerArray `xml:\"VimVasaProviderStatePerArray,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVimVasaProviderStatePerArray\"] = reflect.TypeOf((*ArrayOfVimVasaProviderStatePerArray)(nil)).Elem()\n}\n\ntype ArrayOfVirtualAppLinkInfo struct {\n\tVirtualAppLinkInfo []VirtualAppLinkInfo `xml:\"VirtualAppLinkInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualAppLinkInfo\"] = reflect.TypeOf((*ArrayOfVirtualAppLinkInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualDevice struct {\n\tVirtualDevice []BaseVirtualDevice `xml:\"VirtualDevice,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualDevice\"] = reflect.TypeOf((*ArrayOfVirtualDevice)(nil)).Elem()\n}\n\ntype ArrayOfVirtualDeviceBackingOption struct {\n\tVirtualDeviceBackingOption []BaseVirtualDeviceBackingOption `xml:\"VirtualDeviceBackingOption,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualDeviceBackingOption\"] = reflect.TypeOf((*ArrayOfVirtualDeviceBackingOption)(nil)).Elem()\n}\n\ntype ArrayOfVirtualDeviceConfigSpec struct {\n\tVirtualDeviceConfigSpec []BaseVirtualDeviceConfigSpec `xml:\"VirtualDeviceConfigSpec,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualDeviceConfigSpec\"] = reflect.TypeOf((*ArrayOfVirtualDeviceConfigSpec)(nil)).Elem()\n}\n\ntype ArrayOfVirtualDeviceOption struct {\n\tVirtualDeviceOption []BaseVirtualDeviceOption `xml:\"VirtualDeviceOption,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualDeviceOption\"] = reflect.TypeOf((*ArrayOfVirtualDeviceOption)(nil)).Elem()\n}\n\ntype ArrayOfVirtualDisk struct {\n\tVirtualDisk []VirtualDisk `xml:\"VirtualDisk,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualDisk\"] = reflect.TypeOf((*ArrayOfVirtualDisk)(nil)).Elem()\n}\n\ntype ArrayOfVirtualDiskDeltaDiskFormatsSupported struct {\n\tVirtualDiskDeltaDiskFormatsSupported []VirtualDiskDeltaDiskFormatsSupported `xml:\"VirtualDiskDeltaDiskFormatsSupported,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualDiskDeltaDiskFormatsSupported\"] = reflect.TypeOf((*ArrayOfVirtualDiskDeltaDiskFormatsSupported)(nil)).Elem()\n}\n\ntype ArrayOfVirtualDiskId struct {\n\tVirtualDiskId []VirtualDiskId `xml:\"VirtualDiskId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualDiskId\"] = reflect.TypeOf((*ArrayOfVirtualDiskId)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineBootOptionsBootableDevice struct {\n\tVirtualMachineBootOptionsBootableDevice []BaseVirtualMachineBootOptionsBootableDevice `xml:\"VirtualMachineBootOptionsBootableDevice,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineBootOptionsBootableDevice\"] = reflect.TypeOf((*ArrayOfVirtualMachineBootOptionsBootableDevice)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineCdromInfo struct {\n\tVirtualMachineCdromInfo []VirtualMachineCdromInfo `xml:\"VirtualMachineCdromInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineCdromInfo\"] = reflect.TypeOf((*ArrayOfVirtualMachineCdromInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineConfigInfoDatastoreUrlPair struct {\n\tVirtualMachineConfigInfoDatastoreUrlPair []VirtualMachineConfigInfoDatastoreUrlPair `xml:\"VirtualMachineConfigInfoDatastoreUrlPair,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineConfigInfoDatastoreUrlPair\"] = reflect.TypeOf((*ArrayOfVirtualMachineConfigInfoDatastoreUrlPair)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineConfigOptionDescriptor struct {\n\tVirtualMachineConfigOptionDescriptor []VirtualMachineConfigOptionDescriptor `xml:\"VirtualMachineConfigOptionDescriptor,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineConfigOptionDescriptor\"] = reflect.TypeOf((*ArrayOfVirtualMachineConfigOptionDescriptor)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineCpuIdInfoSpec struct {\n\tVirtualMachineCpuIdInfoSpec []VirtualMachineCpuIdInfoSpec `xml:\"VirtualMachineCpuIdInfoSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineCpuIdInfoSpec\"] = reflect.TypeOf((*ArrayOfVirtualMachineCpuIdInfoSpec)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineDatastoreInfo struct {\n\tVirtualMachineDatastoreInfo []VirtualMachineDatastoreInfo `xml:\"VirtualMachineDatastoreInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineDatastoreInfo\"] = reflect.TypeOf((*ArrayOfVirtualMachineDatastoreInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineDatastoreVolumeOption struct {\n\tVirtualMachineDatastoreVolumeOption []VirtualMachineDatastoreVolumeOption `xml:\"VirtualMachineDatastoreVolumeOption,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineDatastoreVolumeOption\"] = reflect.TypeOf((*ArrayOfVirtualMachineDatastoreVolumeOption)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineDeviceRuntimeInfo struct {\n\tVirtualMachineDeviceRuntimeInfo []VirtualMachineDeviceRuntimeInfo `xml:\"VirtualMachineDeviceRuntimeInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineDeviceRuntimeInfo\"] = reflect.TypeOf((*ArrayOfVirtualMachineDeviceRuntimeInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineDisplayTopology struct {\n\tVirtualMachineDisplayTopology []VirtualMachineDisplayTopology `xml:\"VirtualMachineDisplayTopology,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineDisplayTopology\"] = reflect.TypeOf((*ArrayOfVirtualMachineDisplayTopology)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineFeatureRequirement struct {\n\tVirtualMachineFeatureRequirement []VirtualMachineFeatureRequirement `xml:\"VirtualMachineFeatureRequirement,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineFeatureRequirement\"] = reflect.TypeOf((*ArrayOfVirtualMachineFeatureRequirement)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineFileLayoutDiskLayout struct {\n\tVirtualMachineFileLayoutDiskLayout []VirtualMachineFileLayoutDiskLayout `xml:\"VirtualMachineFileLayoutDiskLayout,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineFileLayoutDiskLayout\"] = reflect.TypeOf((*ArrayOfVirtualMachineFileLayoutDiskLayout)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineFileLayoutExDiskLayout struct {\n\tVirtualMachineFileLayoutExDiskLayout []VirtualMachineFileLayoutExDiskLayout `xml:\"VirtualMachineFileLayoutExDiskLayout,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineFileLayoutExDiskLayout\"] = reflect.TypeOf((*ArrayOfVirtualMachineFileLayoutExDiskLayout)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineFileLayoutExDiskUnit struct {\n\tVirtualMachineFileLayoutExDiskUnit []VirtualMachineFileLayoutExDiskUnit `xml:\"VirtualMachineFileLayoutExDiskUnit,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineFileLayoutExDiskUnit\"] = reflect.TypeOf((*ArrayOfVirtualMachineFileLayoutExDiskUnit)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineFileLayoutExFileInfo struct {\n\tVirtualMachineFileLayoutExFileInfo []VirtualMachineFileLayoutExFileInfo `xml:\"VirtualMachineFileLayoutExFileInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineFileLayoutExFileInfo\"] = reflect.TypeOf((*ArrayOfVirtualMachineFileLayoutExFileInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineFileLayoutExSnapshotLayout struct {\n\tVirtualMachineFileLayoutExSnapshotLayout []VirtualMachineFileLayoutExSnapshotLayout `xml:\"VirtualMachineFileLayoutExSnapshotLayout,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineFileLayoutExSnapshotLayout\"] = reflect.TypeOf((*ArrayOfVirtualMachineFileLayoutExSnapshotLayout)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineFileLayoutSnapshotLayout struct {\n\tVirtualMachineFileLayoutSnapshotLayout []VirtualMachineFileLayoutSnapshotLayout `xml:\"VirtualMachineFileLayoutSnapshotLayout,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineFileLayoutSnapshotLayout\"] = reflect.TypeOf((*ArrayOfVirtualMachineFileLayoutSnapshotLayout)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineFloppyInfo struct {\n\tVirtualMachineFloppyInfo []VirtualMachineFloppyInfo `xml:\"VirtualMachineFloppyInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineFloppyInfo\"] = reflect.TypeOf((*ArrayOfVirtualMachineFloppyInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineIdeDiskDeviceInfo struct {\n\tVirtualMachineIdeDiskDeviceInfo []VirtualMachineIdeDiskDeviceInfo `xml:\"VirtualMachineIdeDiskDeviceInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineIdeDiskDeviceInfo\"] = reflect.TypeOf((*ArrayOfVirtualMachineIdeDiskDeviceInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineIdeDiskDevicePartitionInfo struct {\n\tVirtualMachineIdeDiskDevicePartitionInfo []VirtualMachineIdeDiskDevicePartitionInfo `xml:\"VirtualMachineIdeDiskDevicePartitionInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineIdeDiskDevicePartitionInfo\"] = reflect.TypeOf((*ArrayOfVirtualMachineIdeDiskDevicePartitionInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineLegacyNetworkSwitchInfo struct {\n\tVirtualMachineLegacyNetworkSwitchInfo []VirtualMachineLegacyNetworkSwitchInfo `xml:\"VirtualMachineLegacyNetworkSwitchInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineLegacyNetworkSwitchInfo\"] = reflect.TypeOf((*ArrayOfVirtualMachineLegacyNetworkSwitchInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineMessage struct {\n\tVirtualMachineMessage []VirtualMachineMessage `xml:\"VirtualMachineMessage,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineMessage\"] = reflect.TypeOf((*ArrayOfVirtualMachineMessage)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineMetadataManagerVmMetadataInput struct {\n\tVirtualMachineMetadataManagerVmMetadataInput []VirtualMachineMetadataManagerVmMetadataInput `xml:\"VirtualMachineMetadataManagerVmMetadataInput,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineMetadataManagerVmMetadataInput\"] = reflect.TypeOf((*ArrayOfVirtualMachineMetadataManagerVmMetadataInput)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineMetadataManagerVmMetadataResult struct {\n\tVirtualMachineMetadataManagerVmMetadataResult []VirtualMachineMetadataManagerVmMetadataResult `xml:\"VirtualMachineMetadataManagerVmMetadataResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineMetadataManagerVmMetadataResult\"] = reflect.TypeOf((*ArrayOfVirtualMachineMetadataManagerVmMetadataResult)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineNetworkInfo struct {\n\tVirtualMachineNetworkInfo []VirtualMachineNetworkInfo `xml:\"VirtualMachineNetworkInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineNetworkInfo\"] = reflect.TypeOf((*ArrayOfVirtualMachineNetworkInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineParallelInfo struct {\n\tVirtualMachineParallelInfo []VirtualMachineParallelInfo `xml:\"VirtualMachineParallelInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineParallelInfo\"] = reflect.TypeOf((*ArrayOfVirtualMachineParallelInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachinePciPassthroughInfo struct {\n\tVirtualMachinePciPassthroughInfo []BaseVirtualMachinePciPassthroughInfo `xml:\"VirtualMachinePciPassthroughInfo,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachinePciPassthroughInfo\"] = reflect.TypeOf((*ArrayOfVirtualMachinePciPassthroughInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachinePciSharedGpuPassthroughInfo struct {\n\tVirtualMachinePciSharedGpuPassthroughInfo []VirtualMachinePciSharedGpuPassthroughInfo `xml:\"VirtualMachinePciSharedGpuPassthroughInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachinePciSharedGpuPassthroughInfo\"] = reflect.TypeOf((*ArrayOfVirtualMachinePciSharedGpuPassthroughInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineProfileSpec struct {\n\tVirtualMachineProfileSpec []BaseVirtualMachineProfileSpec `xml:\"VirtualMachineProfileSpec,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineProfileSpec\"] = reflect.TypeOf((*ArrayOfVirtualMachineProfileSpec)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineRelocateSpecDiskLocator struct {\n\tVirtualMachineRelocateSpecDiskLocator []VirtualMachineRelocateSpecDiskLocator `xml:\"VirtualMachineRelocateSpecDiskLocator,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineRelocateSpecDiskLocator\"] = reflect.TypeOf((*ArrayOfVirtualMachineRelocateSpecDiskLocator)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineScsiDiskDeviceInfo struct {\n\tVirtualMachineScsiDiskDeviceInfo []VirtualMachineScsiDiskDeviceInfo `xml:\"VirtualMachineScsiDiskDeviceInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineScsiDiskDeviceInfo\"] = reflect.TypeOf((*ArrayOfVirtualMachineScsiDiskDeviceInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineScsiPassthroughInfo struct {\n\tVirtualMachineScsiPassthroughInfo []VirtualMachineScsiPassthroughInfo `xml:\"VirtualMachineScsiPassthroughInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineScsiPassthroughInfo\"] = reflect.TypeOf((*ArrayOfVirtualMachineScsiPassthroughInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineSerialInfo struct {\n\tVirtualMachineSerialInfo []VirtualMachineSerialInfo `xml:\"VirtualMachineSerialInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineSerialInfo\"] = reflect.TypeOf((*ArrayOfVirtualMachineSerialInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineSnapshotTree struct {\n\tVirtualMachineSnapshotTree []VirtualMachineSnapshotTree `xml:\"VirtualMachineSnapshotTree,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineSnapshotTree\"] = reflect.TypeOf((*ArrayOfVirtualMachineSnapshotTree)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineSoundInfo struct {\n\tVirtualMachineSoundInfo []VirtualMachineSoundInfo `xml:\"VirtualMachineSoundInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineSoundInfo\"] = reflect.TypeOf((*ArrayOfVirtualMachineSoundInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineSriovInfo struct {\n\tVirtualMachineSriovInfo []VirtualMachineSriovInfo `xml:\"VirtualMachineSriovInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineSriovInfo\"] = reflect.TypeOf((*ArrayOfVirtualMachineSriovInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineSummary struct {\n\tVirtualMachineSummary []VirtualMachineSummary `xml:\"VirtualMachineSummary,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineSummary\"] = reflect.TypeOf((*ArrayOfVirtualMachineSummary)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineUsageOnDatastore struct {\n\tVirtualMachineUsageOnDatastore []VirtualMachineUsageOnDatastore `xml:\"VirtualMachineUsageOnDatastore,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineUsageOnDatastore\"] = reflect.TypeOf((*ArrayOfVirtualMachineUsageOnDatastore)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineUsbInfo struct {\n\tVirtualMachineUsbInfo []VirtualMachineUsbInfo `xml:\"VirtualMachineUsbInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineUsbInfo\"] = reflect.TypeOf((*ArrayOfVirtualMachineUsbInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineVFlashModuleInfo struct {\n\tVirtualMachineVFlashModuleInfo []VirtualMachineVFlashModuleInfo `xml:\"VirtualMachineVFlashModuleInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineVFlashModuleInfo\"] = reflect.TypeOf((*ArrayOfVirtualMachineVFlashModuleInfo)(nil)).Elem()\n}\n\ntype ArrayOfVirtualMachineVMCIDeviceFilterSpec struct {\n\tVirtualMachineVMCIDeviceFilterSpec []VirtualMachineVMCIDeviceFilterSpec `xml:\"VirtualMachineVMCIDeviceFilterSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualMachineVMCIDeviceFilterSpec\"] = reflect.TypeOf((*ArrayOfVirtualMachineVMCIDeviceFilterSpec)(nil)).Elem()\n}\n\ntype ArrayOfVirtualNicManagerNetConfig struct {\n\tVirtualNicManagerNetConfig []VirtualNicManagerNetConfig `xml:\"VirtualNicManagerNetConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualNicManagerNetConfig\"] = reflect.TypeOf((*ArrayOfVirtualNicManagerNetConfig)(nil)).Elem()\n}\n\ntype ArrayOfVirtualSCSISharing struct {\n\tVirtualSCSISharing []VirtualSCSISharing `xml:\"VirtualSCSISharing,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualSCSISharing\"] = reflect.TypeOf((*ArrayOfVirtualSCSISharing)(nil)).Elem()\n}\n\ntype ArrayOfVirtualSwitchProfile struct {\n\tVirtualSwitchProfile []VirtualSwitchProfile `xml:\"VirtualSwitchProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVirtualSwitchProfile\"] = reflect.TypeOf((*ArrayOfVirtualSwitchProfile)(nil)).Elem()\n}\n\ntype ArrayOfVmEventArgument struct {\n\tVmEventArgument []VmEventArgument `xml:\"VmEventArgument,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVmEventArgument\"] = reflect.TypeOf((*ArrayOfVmEventArgument)(nil)).Elem()\n}\n\ntype ArrayOfVmPodConfigForPlacement struct {\n\tVmPodConfigForPlacement []VmPodConfigForPlacement `xml:\"VmPodConfigForPlacement,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVmPodConfigForPlacement\"] = reflect.TypeOf((*ArrayOfVmPodConfigForPlacement)(nil)).Elem()\n}\n\ntype ArrayOfVmPortGroupProfile struct {\n\tVmPortGroupProfile []VmPortGroupProfile `xml:\"VmPortGroupProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVmPortGroupProfile\"] = reflect.TypeOf((*ArrayOfVmPortGroupProfile)(nil)).Elem()\n}\n\ntype ArrayOfVmfsConfigOption struct {\n\tVmfsConfigOption []VmfsConfigOption `xml:\"VmfsConfigOption,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVmfsConfigOption\"] = reflect.TypeOf((*ArrayOfVmfsConfigOption)(nil)).Elem()\n}\n\ntype ArrayOfVmfsDatastoreOption struct {\n\tVmfsDatastoreOption []VmfsDatastoreOption `xml:\"VmfsDatastoreOption,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVmfsDatastoreOption\"] = reflect.TypeOf((*ArrayOfVmfsDatastoreOption)(nil)).Elem()\n}\n\ntype ArrayOfVnicPortArgument struct {\n\tVnicPortArgument []VnicPortArgument `xml:\"VnicPortArgument,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVnicPortArgument\"] = reflect.TypeOf((*ArrayOfVnicPortArgument)(nil)).Elem()\n}\n\ntype ArrayOfVsanHostConfigInfo struct {\n\tVsanHostConfigInfo []VsanHostConfigInfo `xml:\"VsanHostConfigInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVsanHostConfigInfo\"] = reflect.TypeOf((*ArrayOfVsanHostConfigInfo)(nil)).Elem()\n}\n\ntype ArrayOfVsanHostConfigInfoNetworkInfoPortConfig struct {\n\tVsanHostConfigInfoNetworkInfoPortConfig []VsanHostConfigInfoNetworkInfoPortConfig `xml:\"VsanHostConfigInfoNetworkInfoPortConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVsanHostConfigInfoNetworkInfoPortConfig\"] = reflect.TypeOf((*ArrayOfVsanHostConfigInfoNetworkInfoPortConfig)(nil)).Elem()\n}\n\ntype ArrayOfVsanHostDiskMapInfo struct {\n\tVsanHostDiskMapInfo []VsanHostDiskMapInfo `xml:\"VsanHostDiskMapInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVsanHostDiskMapInfo\"] = reflect.TypeOf((*ArrayOfVsanHostDiskMapInfo)(nil)).Elem()\n}\n\ntype ArrayOfVsanHostDiskMapResult struct {\n\tVsanHostDiskMapResult []VsanHostDiskMapResult `xml:\"VsanHostDiskMapResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVsanHostDiskMapResult\"] = reflect.TypeOf((*ArrayOfVsanHostDiskMapResult)(nil)).Elem()\n}\n\ntype ArrayOfVsanHostDiskMapping struct {\n\tVsanHostDiskMapping []VsanHostDiskMapping `xml:\"VsanHostDiskMapping,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVsanHostDiskMapping\"] = reflect.TypeOf((*ArrayOfVsanHostDiskMapping)(nil)).Elem()\n}\n\ntype ArrayOfVsanHostDiskResult struct {\n\tVsanHostDiskResult []VsanHostDiskResult `xml:\"VsanHostDiskResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVsanHostDiskResult\"] = reflect.TypeOf((*ArrayOfVsanHostDiskResult)(nil)).Elem()\n}\n\ntype ArrayOfVsanHostMembershipInfo struct {\n\tVsanHostMembershipInfo []VsanHostMembershipInfo `xml:\"VsanHostMembershipInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVsanHostMembershipInfo\"] = reflect.TypeOf((*ArrayOfVsanHostMembershipInfo)(nil)).Elem()\n}\n\ntype ArrayOfVsanHostRuntimeInfoDiskIssue struct {\n\tVsanHostRuntimeInfoDiskIssue []VsanHostRuntimeInfoDiskIssue `xml:\"VsanHostRuntimeInfoDiskIssue,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVsanHostRuntimeInfoDiskIssue\"] = reflect.TypeOf((*ArrayOfVsanHostRuntimeInfoDiskIssue)(nil)).Elem()\n}\n\ntype ArrayOfVsanNewPolicyBatch struct {\n\tVsanNewPolicyBatch []VsanNewPolicyBatch `xml:\"VsanNewPolicyBatch,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVsanNewPolicyBatch\"] = reflect.TypeOf((*ArrayOfVsanNewPolicyBatch)(nil)).Elem()\n}\n\ntype ArrayOfVsanPolicyChangeBatch struct {\n\tVsanPolicyChangeBatch []VsanPolicyChangeBatch `xml:\"VsanPolicyChangeBatch,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVsanPolicyChangeBatch\"] = reflect.TypeOf((*ArrayOfVsanPolicyChangeBatch)(nil)).Elem()\n}\n\ntype ArrayOfVsanPolicySatisfiability struct {\n\tVsanPolicySatisfiability []VsanPolicySatisfiability `xml:\"VsanPolicySatisfiability,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVsanPolicySatisfiability\"] = reflect.TypeOf((*ArrayOfVsanPolicySatisfiability)(nil)).Elem()\n}\n\ntype ArrayOfVsanUpgradeSystemNetworkPartitionInfo struct {\n\tVsanUpgradeSystemNetworkPartitionInfo []VsanUpgradeSystemNetworkPartitionInfo `xml:\"VsanUpgradeSystemNetworkPartitionInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVsanUpgradeSystemNetworkPartitionInfo\"] = reflect.TypeOf((*ArrayOfVsanUpgradeSystemNetworkPartitionInfo)(nil)).Elem()\n}\n\ntype ArrayOfVsanUpgradeSystemPreflightCheckIssue struct {\n\tVsanUpgradeSystemPreflightCheckIssue []BaseVsanUpgradeSystemPreflightCheckIssue `xml:\"VsanUpgradeSystemPreflightCheckIssue,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVsanUpgradeSystemPreflightCheckIssue\"] = reflect.TypeOf((*ArrayOfVsanUpgradeSystemPreflightCheckIssue)(nil)).Elem()\n}\n\ntype ArrayOfVsanUpgradeSystemUpgradeHistoryItem struct {\n\tVsanUpgradeSystemUpgradeHistoryItem []BaseVsanUpgradeSystemUpgradeHistoryItem `xml:\"VsanUpgradeSystemUpgradeHistoryItem,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVsanUpgradeSystemUpgradeHistoryItem\"] = reflect.TypeOf((*ArrayOfVsanUpgradeSystemUpgradeHistoryItem)(nil)).Elem()\n}\n\ntype ArrayOfVslmTagEntry struct {\n\tVslmTagEntry []VslmTagEntry `xml:\"VslmTagEntry,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ArrayOfVslmTagEntry\"] = reflect.TypeOf((*ArrayOfVslmTagEntry)(nil)).Elem()\n}\n\ntype ArrayUpdateSpec struct {\n\tDynamicData\n\n\tOperation ArrayUpdateOperation `xml:\"operation\"`\n\tRemoveKey AnyType              `xml:\"removeKey,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ArrayUpdateSpec\"] = reflect.TypeOf((*ArrayUpdateSpec)(nil)).Elem()\n}\n\ntype AssignUserToGroup AssignUserToGroupRequestType\n\nfunc init() {\n\tt[\"AssignUserToGroup\"] = reflect.TypeOf((*AssignUserToGroup)(nil)).Elem()\n}\n\ntype AssignUserToGroupRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tUser  string                 `xml:\"user\"`\n\tGroup string                 `xml:\"group\"`\n}\n\nfunc init() {\n\tt[\"AssignUserToGroupRequestType\"] = reflect.TypeOf((*AssignUserToGroupRequestType)(nil)).Elem()\n}\n\ntype AssignUserToGroupResponse struct {\n}\n\ntype AssociateProfile AssociateProfileRequestType\n\nfunc init() {\n\tt[\"AssociateProfile\"] = reflect.TypeOf((*AssociateProfile)(nil)).Elem()\n}\n\ntype AssociateProfileRequestType struct {\n\tThis   ManagedObjectReference   `xml:\"_this\"`\n\tEntity []ManagedObjectReference `xml:\"entity\"`\n}\n\nfunc init() {\n\tt[\"AssociateProfileRequestType\"] = reflect.TypeOf((*AssociateProfileRequestType)(nil)).Elem()\n}\n\ntype AssociateProfileResponse struct {\n}\n\ntype AttachDiskRequestType struct {\n\tThis          ManagedObjectReference `xml:\"_this\"`\n\tDiskId        ID                     `xml:\"diskId\"`\n\tDatastore     ManagedObjectReference `xml:\"datastore\"`\n\tControllerKey int32                  `xml:\"controllerKey,omitempty\"`\n\tUnitNumber    *int32                 `xml:\"unitNumber\"`\n}\n\nfunc init() {\n\tt[\"AttachDiskRequestType\"] = reflect.TypeOf((*AttachDiskRequestType)(nil)).Elem()\n}\n\ntype AttachDisk_Task AttachDiskRequestType\n\nfunc init() {\n\tt[\"AttachDisk_Task\"] = reflect.TypeOf((*AttachDisk_Task)(nil)).Elem()\n}\n\ntype AttachDisk_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype AttachScsiLun AttachScsiLunRequestType\n\nfunc init() {\n\tt[\"AttachScsiLun\"] = reflect.TypeOf((*AttachScsiLun)(nil)).Elem()\n}\n\ntype AttachScsiLunExRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tLunUuid []string               `xml:\"lunUuid\"`\n}\n\nfunc init() {\n\tt[\"AttachScsiLunExRequestType\"] = reflect.TypeOf((*AttachScsiLunExRequestType)(nil)).Elem()\n}\n\ntype AttachScsiLunEx_Task AttachScsiLunExRequestType\n\nfunc init() {\n\tt[\"AttachScsiLunEx_Task\"] = reflect.TypeOf((*AttachScsiLunEx_Task)(nil)).Elem()\n}\n\ntype AttachScsiLunEx_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype AttachScsiLunRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tLunUuid string                 `xml:\"lunUuid\"`\n}\n\nfunc init() {\n\tt[\"AttachScsiLunRequestType\"] = reflect.TypeOf((*AttachScsiLunRequestType)(nil)).Elem()\n}\n\ntype AttachScsiLunResponse struct {\n}\n\ntype AttachTagToVStorageObject AttachTagToVStorageObjectRequestType\n\nfunc init() {\n\tt[\"AttachTagToVStorageObject\"] = reflect.TypeOf((*AttachTagToVStorageObject)(nil)).Elem()\n}\n\ntype AttachTagToVStorageObjectRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tId       ID                     `xml:\"id\"`\n\tCategory string                 `xml:\"category\"`\n\tTag      string                 `xml:\"tag\"`\n}\n\nfunc init() {\n\tt[\"AttachTagToVStorageObjectRequestType\"] = reflect.TypeOf((*AttachTagToVStorageObjectRequestType)(nil)).Elem()\n}\n\ntype AttachTagToVStorageObjectResponse struct {\n}\n\ntype AttachVmfsExtent AttachVmfsExtentRequestType\n\nfunc init() {\n\tt[\"AttachVmfsExtent\"] = reflect.TypeOf((*AttachVmfsExtent)(nil)).Elem()\n}\n\ntype AttachVmfsExtentRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tVmfsPath string                 `xml:\"vmfsPath\"`\n\tExtent   HostScsiDiskPartition  `xml:\"extent\"`\n}\n\nfunc init() {\n\tt[\"AttachVmfsExtentRequestType\"] = reflect.TypeOf((*AttachVmfsExtentRequestType)(nil)).Elem()\n}\n\ntype AttachVmfsExtentResponse struct {\n}\n\ntype AuthMinimumAdminPermission struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"AuthMinimumAdminPermission\"] = reflect.TypeOf((*AuthMinimumAdminPermission)(nil)).Elem()\n}\n\ntype AuthMinimumAdminPermissionFault AuthMinimumAdminPermission\n\nfunc init() {\n\tt[\"AuthMinimumAdminPermissionFault\"] = reflect.TypeOf((*AuthMinimumAdminPermissionFault)(nil)).Elem()\n}\n\ntype AuthenticationProfile struct {\n\tApplyProfile\n\n\tActiveDirectory *ActiveDirectoryProfile `xml:\"activeDirectory,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AuthenticationProfile\"] = reflect.TypeOf((*AuthenticationProfile)(nil)).Elem()\n}\n\ntype AuthorizationDescription struct {\n\tDynamicData\n\n\tPrivilege      []BaseElementDescription `xml:\"privilege,typeattr\"`\n\tPrivilegeGroup []BaseElementDescription `xml:\"privilegeGroup,typeattr\"`\n}\n\nfunc init() {\n\tt[\"AuthorizationDescription\"] = reflect.TypeOf((*AuthorizationDescription)(nil)).Elem()\n}\n\ntype AuthorizationEvent struct {\n\tEvent\n}\n\nfunc init() {\n\tt[\"AuthorizationEvent\"] = reflect.TypeOf((*AuthorizationEvent)(nil)).Elem()\n}\n\ntype AuthorizationPrivilege struct {\n\tDynamicData\n\n\tPrivId        string `xml:\"privId\"`\n\tOnParent      bool   `xml:\"onParent\"`\n\tName          string `xml:\"name\"`\n\tPrivGroupName string `xml:\"privGroupName\"`\n}\n\nfunc init() {\n\tt[\"AuthorizationPrivilege\"] = reflect.TypeOf((*AuthorizationPrivilege)(nil)).Elem()\n}\n\ntype AuthorizationRole struct {\n\tDynamicData\n\n\tRoleId    int32           `xml:\"roleId\"`\n\tSystem    bool            `xml:\"system\"`\n\tName      string          `xml:\"name\"`\n\tInfo      BaseDescription `xml:\"info,typeattr\"`\n\tPrivilege []string        `xml:\"privilege,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AuthorizationRole\"] = reflect.TypeOf((*AuthorizationRole)(nil)).Elem()\n}\n\ntype AutoStartDefaults struct {\n\tDynamicData\n\n\tEnabled          *bool  `xml:\"enabled\"`\n\tStartDelay       int32  `xml:\"startDelay,omitempty\"`\n\tStopDelay        int32  `xml:\"stopDelay,omitempty\"`\n\tWaitForHeartbeat *bool  `xml:\"waitForHeartbeat\"`\n\tStopAction       string `xml:\"stopAction,omitempty\"`\n}\n\nfunc init() {\n\tt[\"AutoStartDefaults\"] = reflect.TypeOf((*AutoStartDefaults)(nil)).Elem()\n}\n\ntype AutoStartPowerInfo struct {\n\tDynamicData\n\n\tKey              ManagedObjectReference        `xml:\"key\"`\n\tStartOrder       int32                         `xml:\"startOrder\"`\n\tStartDelay       int32                         `xml:\"startDelay\"`\n\tWaitForHeartbeat AutoStartWaitHeartbeatSetting `xml:\"waitForHeartbeat\"`\n\tStartAction      string                        `xml:\"startAction\"`\n\tStopDelay        int32                         `xml:\"stopDelay\"`\n\tStopAction       string                        `xml:\"stopAction\"`\n}\n\nfunc init() {\n\tt[\"AutoStartPowerInfo\"] = reflect.TypeOf((*AutoStartPowerInfo)(nil)).Elem()\n}\n\ntype AutoStartPowerOff AutoStartPowerOffRequestType\n\nfunc init() {\n\tt[\"AutoStartPowerOff\"] = reflect.TypeOf((*AutoStartPowerOff)(nil)).Elem()\n}\n\ntype AutoStartPowerOffRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"AutoStartPowerOffRequestType\"] = reflect.TypeOf((*AutoStartPowerOffRequestType)(nil)).Elem()\n}\n\ntype AutoStartPowerOffResponse struct {\n}\n\ntype AutoStartPowerOn AutoStartPowerOnRequestType\n\nfunc init() {\n\tt[\"AutoStartPowerOn\"] = reflect.TypeOf((*AutoStartPowerOn)(nil)).Elem()\n}\n\ntype AutoStartPowerOnRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"AutoStartPowerOnRequestType\"] = reflect.TypeOf((*AutoStartPowerOnRequestType)(nil)).Elem()\n}\n\ntype AutoStartPowerOnResponse struct {\n}\n\ntype BackupBlobReadFailure struct {\n\tDvsFault\n\n\tEntityName string               `xml:\"entityName\"`\n\tEntityType string               `xml:\"entityType\"`\n\tFault      LocalizedMethodFault `xml:\"fault\"`\n}\n\nfunc init() {\n\tt[\"BackupBlobReadFailure\"] = reflect.TypeOf((*BackupBlobReadFailure)(nil)).Elem()\n}\n\ntype BackupBlobReadFailureFault BackupBlobReadFailure\n\nfunc init() {\n\tt[\"BackupBlobReadFailureFault\"] = reflect.TypeOf((*BackupBlobReadFailureFault)(nil)).Elem()\n}\n\ntype BackupBlobWriteFailure struct {\n\tDvsFault\n\n\tEntityName string               `xml:\"entityName\"`\n\tEntityType string               `xml:\"entityType\"`\n\tFault      LocalizedMethodFault `xml:\"fault\"`\n}\n\nfunc init() {\n\tt[\"BackupBlobWriteFailure\"] = reflect.TypeOf((*BackupBlobWriteFailure)(nil)).Elem()\n}\n\ntype BackupBlobWriteFailureFault BackupBlobWriteFailure\n\nfunc init() {\n\tt[\"BackupBlobWriteFailureFault\"] = reflect.TypeOf((*BackupBlobWriteFailureFault)(nil)).Elem()\n}\n\ntype BackupFirmwareConfiguration BackupFirmwareConfigurationRequestType\n\nfunc init() {\n\tt[\"BackupFirmwareConfiguration\"] = reflect.TypeOf((*BackupFirmwareConfiguration)(nil)).Elem()\n}\n\ntype BackupFirmwareConfigurationRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"BackupFirmwareConfigurationRequestType\"] = reflect.TypeOf((*BackupFirmwareConfigurationRequestType)(nil)).Elem()\n}\n\ntype BackupFirmwareConfigurationResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype BadUsernameSessionEvent struct {\n\tSessionEvent\n\n\tIpAddress string `xml:\"ipAddress\"`\n}\n\nfunc init() {\n\tt[\"BadUsernameSessionEvent\"] = reflect.TypeOf((*BadUsernameSessionEvent)(nil)).Elem()\n}\n\ntype BaseConfigInfo struct {\n\tDynamicData\n\n\tId         ID                            `xml:\"id\"`\n\tName       string                        `xml:\"name\"`\n\tCreateTime time.Time                     `xml:\"createTime\"`\n\tBacking    BaseBaseConfigInfoBackingInfo `xml:\"backing,typeattr\"`\n}\n\nfunc init() {\n\tt[\"BaseConfigInfo\"] = reflect.TypeOf((*BaseConfigInfo)(nil)).Elem()\n}\n\ntype BaseConfigInfoBackingInfo struct {\n\tDynamicData\n\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"BaseConfigInfoBackingInfo\"] = reflect.TypeOf((*BaseConfigInfoBackingInfo)(nil)).Elem()\n}\n\ntype BaseConfigInfoDiskFileBackingInfo struct {\n\tBaseConfigInfoFileBackingInfo\n\n\tProvisioningType string `xml:\"provisioningType\"`\n}\n\nfunc init() {\n\tt[\"BaseConfigInfoDiskFileBackingInfo\"] = reflect.TypeOf((*BaseConfigInfoDiskFileBackingInfo)(nil)).Elem()\n}\n\ntype BaseConfigInfoFileBackingInfo struct {\n\tBaseConfigInfoBackingInfo\n\n\tFilePath        string                            `xml:\"filePath\"`\n\tBackingObjectId string                            `xml:\"backingObjectId,omitempty\"`\n\tParent          BaseBaseConfigInfoFileBackingInfo `xml:\"parent,omitempty,typeattr\"`\n\tDeltaSizeInMB   int64                             `xml:\"deltaSizeInMB,omitempty\"`\n}\n\nfunc init() {\n\tt[\"BaseConfigInfoFileBackingInfo\"] = reflect.TypeOf((*BaseConfigInfoFileBackingInfo)(nil)).Elem()\n}\n\ntype BaseConfigInfoRawDiskMappingBackingInfo struct {\n\tBaseConfigInfoFileBackingInfo\n\n\tLunUuid           string `xml:\"lunUuid\"`\n\tCompatibilityMode string `xml:\"compatibilityMode\"`\n}\n\nfunc init() {\n\tt[\"BaseConfigInfoRawDiskMappingBackingInfo\"] = reflect.TypeOf((*BaseConfigInfoRawDiskMappingBackingInfo)(nil)).Elem()\n}\n\ntype BatchResult struct {\n\tDynamicData\n\n\tResult  string                  `xml:\"result\"`\n\tHostKey string                  `xml:\"hostKey\"`\n\tDs      *ManagedObjectReference `xml:\"ds,omitempty\"`\n\tFault   *LocalizedMethodFault   `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"BatchResult\"] = reflect.TypeOf((*BatchResult)(nil)).Elem()\n}\n\ntype BindVnic BindVnicRequestType\n\nfunc init() {\n\tt[\"BindVnic\"] = reflect.TypeOf((*BindVnic)(nil)).Elem()\n}\n\ntype BindVnicRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tIScsiHbaName string                 `xml:\"iScsiHbaName\"`\n\tVnicDevice   string                 `xml:\"vnicDevice\"`\n}\n\nfunc init() {\n\tt[\"BindVnicRequestType\"] = reflect.TypeOf((*BindVnicRequestType)(nil)).Elem()\n}\n\ntype BindVnicResponse struct {\n}\n\ntype BlockedByFirewall struct {\n\tHostConfigFault\n}\n\nfunc init() {\n\tt[\"BlockedByFirewall\"] = reflect.TypeOf((*BlockedByFirewall)(nil)).Elem()\n}\n\ntype BlockedByFirewallFault BlockedByFirewall\n\nfunc init() {\n\tt[\"BlockedByFirewallFault\"] = reflect.TypeOf((*BlockedByFirewallFault)(nil)).Elem()\n}\n\ntype BoolOption struct {\n\tOptionType\n\n\tSupported    bool `xml:\"supported\"`\n\tDefaultValue bool `xml:\"defaultValue\"`\n}\n\nfunc init() {\n\tt[\"BoolOption\"] = reflect.TypeOf((*BoolOption)(nil)).Elem()\n}\n\ntype BoolPolicy struct {\n\tInheritablePolicy\n\n\tValue *bool `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"BoolPolicy\"] = reflect.TypeOf((*BoolPolicy)(nil)).Elem()\n}\n\ntype BrowseDiagnosticLog BrowseDiagnosticLogRequestType\n\nfunc init() {\n\tt[\"BrowseDiagnosticLog\"] = reflect.TypeOf((*BrowseDiagnosticLog)(nil)).Elem()\n}\n\ntype BrowseDiagnosticLogRequestType struct {\n\tThis  ManagedObjectReference  `xml:\"_this\"`\n\tHost  *ManagedObjectReference `xml:\"host,omitempty\"`\n\tKey   string                  `xml:\"key\"`\n\tStart int32                   `xml:\"start,omitempty\"`\n\tLines int32                   `xml:\"lines,omitempty\"`\n}\n\nfunc init() {\n\tt[\"BrowseDiagnosticLogRequestType\"] = reflect.TypeOf((*BrowseDiagnosticLogRequestType)(nil)).Elem()\n}\n\ntype BrowseDiagnosticLogResponse struct {\n\tReturnval DiagnosticManagerLogHeader `xml:\"returnval\"`\n}\n\ntype CAMServerRefusedConnection struct {\n\tInvalidCAMServer\n}\n\nfunc init() {\n\tt[\"CAMServerRefusedConnection\"] = reflect.TypeOf((*CAMServerRefusedConnection)(nil)).Elem()\n}\n\ntype CAMServerRefusedConnectionFault CAMServerRefusedConnection\n\nfunc init() {\n\tt[\"CAMServerRefusedConnectionFault\"] = reflect.TypeOf((*CAMServerRefusedConnectionFault)(nil)).Elem()\n}\n\ntype CanProvisionObjects CanProvisionObjectsRequestType\n\nfunc init() {\n\tt[\"CanProvisionObjects\"] = reflect.TypeOf((*CanProvisionObjects)(nil)).Elem()\n}\n\ntype CanProvisionObjectsRequestType struct {\n\tThis                 ManagedObjectReference `xml:\"_this\"`\n\tNpbs                 []VsanNewPolicyBatch   `xml:\"npbs\"`\n\tIgnoreSatisfiability *bool                  `xml:\"ignoreSatisfiability\"`\n}\n\nfunc init() {\n\tt[\"CanProvisionObjectsRequestType\"] = reflect.TypeOf((*CanProvisionObjectsRequestType)(nil)).Elem()\n}\n\ntype CanProvisionObjectsResponse struct {\n\tReturnval []VsanPolicySatisfiability `xml:\"returnval\"`\n}\n\ntype CancelRecommendation CancelRecommendationRequestType\n\nfunc init() {\n\tt[\"CancelRecommendation\"] = reflect.TypeOf((*CancelRecommendation)(nil)).Elem()\n}\n\ntype CancelRecommendationRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tKey  string                 `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"CancelRecommendationRequestType\"] = reflect.TypeOf((*CancelRecommendationRequestType)(nil)).Elem()\n}\n\ntype CancelRecommendationResponse struct {\n}\n\ntype CancelRetrievePropertiesEx CancelRetrievePropertiesExRequestType\n\nfunc init() {\n\tt[\"CancelRetrievePropertiesEx\"] = reflect.TypeOf((*CancelRetrievePropertiesEx)(nil)).Elem()\n}\n\ntype CancelRetrievePropertiesExRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tToken string                 `xml:\"token\"`\n}\n\nfunc init() {\n\tt[\"CancelRetrievePropertiesExRequestType\"] = reflect.TypeOf((*CancelRetrievePropertiesExRequestType)(nil)).Elem()\n}\n\ntype CancelRetrievePropertiesExResponse struct {\n}\n\ntype CancelStorageDrsRecommendation CancelStorageDrsRecommendationRequestType\n\nfunc init() {\n\tt[\"CancelStorageDrsRecommendation\"] = reflect.TypeOf((*CancelStorageDrsRecommendation)(nil)).Elem()\n}\n\ntype CancelStorageDrsRecommendationRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tKey  []string               `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"CancelStorageDrsRecommendationRequestType\"] = reflect.TypeOf((*CancelStorageDrsRecommendationRequestType)(nil)).Elem()\n}\n\ntype CancelStorageDrsRecommendationResponse struct {\n}\n\ntype CancelTask CancelTaskRequestType\n\nfunc init() {\n\tt[\"CancelTask\"] = reflect.TypeOf((*CancelTask)(nil)).Elem()\n}\n\ntype CancelTaskRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"CancelTaskRequestType\"] = reflect.TypeOf((*CancelTaskRequestType)(nil)).Elem()\n}\n\ntype CancelTaskResponse struct {\n}\n\ntype CancelWaitForUpdates CancelWaitForUpdatesRequestType\n\nfunc init() {\n\tt[\"CancelWaitForUpdates\"] = reflect.TypeOf((*CancelWaitForUpdates)(nil)).Elem()\n}\n\ntype CancelWaitForUpdatesRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"CancelWaitForUpdatesRequestType\"] = reflect.TypeOf((*CancelWaitForUpdatesRequestType)(nil)).Elem()\n}\n\ntype CancelWaitForUpdatesResponse struct {\n}\n\ntype CanceledHostOperationEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"CanceledHostOperationEvent\"] = reflect.TypeOf((*CanceledHostOperationEvent)(nil)).Elem()\n}\n\ntype CannotAccessFile struct {\n\tFileFault\n}\n\nfunc init() {\n\tt[\"CannotAccessFile\"] = reflect.TypeOf((*CannotAccessFile)(nil)).Elem()\n}\n\ntype CannotAccessFileFault CannotAccessFile\n\nfunc init() {\n\tt[\"CannotAccessFileFault\"] = reflect.TypeOf((*CannotAccessFileFault)(nil)).Elem()\n}\n\ntype CannotAccessLocalSource struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"CannotAccessLocalSource\"] = reflect.TypeOf((*CannotAccessLocalSource)(nil)).Elem()\n}\n\ntype CannotAccessLocalSourceFault CannotAccessLocalSource\n\nfunc init() {\n\tt[\"CannotAccessLocalSourceFault\"] = reflect.TypeOf((*CannotAccessLocalSourceFault)(nil)).Elem()\n}\n\ntype CannotAccessNetwork struct {\n\tCannotAccessVmDevice\n\n\tNetwork *ManagedObjectReference `xml:\"network,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CannotAccessNetwork\"] = reflect.TypeOf((*CannotAccessNetwork)(nil)).Elem()\n}\n\ntype CannotAccessNetworkFault BaseCannotAccessNetwork\n\nfunc init() {\n\tt[\"CannotAccessNetworkFault\"] = reflect.TypeOf((*CannotAccessNetworkFault)(nil)).Elem()\n}\n\ntype CannotAccessVmComponent struct {\n\tVmConfigFault\n}\n\nfunc init() {\n\tt[\"CannotAccessVmComponent\"] = reflect.TypeOf((*CannotAccessVmComponent)(nil)).Elem()\n}\n\ntype CannotAccessVmComponentFault BaseCannotAccessVmComponent\n\nfunc init() {\n\tt[\"CannotAccessVmComponentFault\"] = reflect.TypeOf((*CannotAccessVmComponentFault)(nil)).Elem()\n}\n\ntype CannotAccessVmConfig struct {\n\tCannotAccessVmComponent\n\n\tReason LocalizedMethodFault `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"CannotAccessVmConfig\"] = reflect.TypeOf((*CannotAccessVmConfig)(nil)).Elem()\n}\n\ntype CannotAccessVmConfigFault CannotAccessVmConfig\n\nfunc init() {\n\tt[\"CannotAccessVmConfigFault\"] = reflect.TypeOf((*CannotAccessVmConfigFault)(nil)).Elem()\n}\n\ntype CannotAccessVmDevice struct {\n\tCannotAccessVmComponent\n\n\tDevice    string `xml:\"device\"`\n\tBacking   string `xml:\"backing\"`\n\tConnected bool   `xml:\"connected\"`\n}\n\nfunc init() {\n\tt[\"CannotAccessVmDevice\"] = reflect.TypeOf((*CannotAccessVmDevice)(nil)).Elem()\n}\n\ntype CannotAccessVmDeviceFault BaseCannotAccessVmDevice\n\nfunc init() {\n\tt[\"CannotAccessVmDeviceFault\"] = reflect.TypeOf((*CannotAccessVmDeviceFault)(nil)).Elem()\n}\n\ntype CannotAccessVmDisk struct {\n\tCannotAccessVmDevice\n\n\tFault LocalizedMethodFault `xml:\"fault\"`\n}\n\nfunc init() {\n\tt[\"CannotAccessVmDisk\"] = reflect.TypeOf((*CannotAccessVmDisk)(nil)).Elem()\n}\n\ntype CannotAccessVmDiskFault BaseCannotAccessVmDisk\n\nfunc init() {\n\tt[\"CannotAccessVmDiskFault\"] = reflect.TypeOf((*CannotAccessVmDiskFault)(nil)).Elem()\n}\n\ntype CannotAddHostWithFTVmAsStandalone struct {\n\tHostConnectFault\n}\n\nfunc init() {\n\tt[\"CannotAddHostWithFTVmAsStandalone\"] = reflect.TypeOf((*CannotAddHostWithFTVmAsStandalone)(nil)).Elem()\n}\n\ntype CannotAddHostWithFTVmAsStandaloneFault CannotAddHostWithFTVmAsStandalone\n\nfunc init() {\n\tt[\"CannotAddHostWithFTVmAsStandaloneFault\"] = reflect.TypeOf((*CannotAddHostWithFTVmAsStandaloneFault)(nil)).Elem()\n}\n\ntype CannotAddHostWithFTVmToDifferentCluster struct {\n\tHostConnectFault\n}\n\nfunc init() {\n\tt[\"CannotAddHostWithFTVmToDifferentCluster\"] = reflect.TypeOf((*CannotAddHostWithFTVmToDifferentCluster)(nil)).Elem()\n}\n\ntype CannotAddHostWithFTVmToDifferentClusterFault CannotAddHostWithFTVmToDifferentCluster\n\nfunc init() {\n\tt[\"CannotAddHostWithFTVmToDifferentClusterFault\"] = reflect.TypeOf((*CannotAddHostWithFTVmToDifferentClusterFault)(nil)).Elem()\n}\n\ntype CannotAddHostWithFTVmToNonHACluster struct {\n\tHostConnectFault\n}\n\nfunc init() {\n\tt[\"CannotAddHostWithFTVmToNonHACluster\"] = reflect.TypeOf((*CannotAddHostWithFTVmToNonHACluster)(nil)).Elem()\n}\n\ntype CannotAddHostWithFTVmToNonHAClusterFault CannotAddHostWithFTVmToNonHACluster\n\nfunc init() {\n\tt[\"CannotAddHostWithFTVmToNonHAClusterFault\"] = reflect.TypeOf((*CannotAddHostWithFTVmToNonHAClusterFault)(nil)).Elem()\n}\n\ntype CannotChangeDrsBehaviorForFtSecondary struct {\n\tVmFaultToleranceIssue\n\n\tVm     ManagedObjectReference `xml:\"vm\"`\n\tVmName string                 `xml:\"vmName\"`\n}\n\nfunc init() {\n\tt[\"CannotChangeDrsBehaviorForFtSecondary\"] = reflect.TypeOf((*CannotChangeDrsBehaviorForFtSecondary)(nil)).Elem()\n}\n\ntype CannotChangeDrsBehaviorForFtSecondaryFault CannotChangeDrsBehaviorForFtSecondary\n\nfunc init() {\n\tt[\"CannotChangeDrsBehaviorForFtSecondaryFault\"] = reflect.TypeOf((*CannotChangeDrsBehaviorForFtSecondaryFault)(nil)).Elem()\n}\n\ntype CannotChangeHaSettingsForFtSecondary struct {\n\tVmFaultToleranceIssue\n\n\tVm     ManagedObjectReference `xml:\"vm\"`\n\tVmName string                 `xml:\"vmName\"`\n}\n\nfunc init() {\n\tt[\"CannotChangeHaSettingsForFtSecondary\"] = reflect.TypeOf((*CannotChangeHaSettingsForFtSecondary)(nil)).Elem()\n}\n\ntype CannotChangeHaSettingsForFtSecondaryFault CannotChangeHaSettingsForFtSecondary\n\nfunc init() {\n\tt[\"CannotChangeHaSettingsForFtSecondaryFault\"] = reflect.TypeOf((*CannotChangeHaSettingsForFtSecondaryFault)(nil)).Elem()\n}\n\ntype CannotChangeVsanClusterUuid struct {\n\tVsanFault\n}\n\nfunc init() {\n\tt[\"CannotChangeVsanClusterUuid\"] = reflect.TypeOf((*CannotChangeVsanClusterUuid)(nil)).Elem()\n}\n\ntype CannotChangeVsanClusterUuidFault CannotChangeVsanClusterUuid\n\nfunc init() {\n\tt[\"CannotChangeVsanClusterUuidFault\"] = reflect.TypeOf((*CannotChangeVsanClusterUuidFault)(nil)).Elem()\n}\n\ntype CannotChangeVsanNodeUuid struct {\n\tVsanFault\n}\n\nfunc init() {\n\tt[\"CannotChangeVsanNodeUuid\"] = reflect.TypeOf((*CannotChangeVsanNodeUuid)(nil)).Elem()\n}\n\ntype CannotChangeVsanNodeUuidFault CannotChangeVsanNodeUuid\n\nfunc init() {\n\tt[\"CannotChangeVsanNodeUuidFault\"] = reflect.TypeOf((*CannotChangeVsanNodeUuidFault)(nil)).Elem()\n}\n\ntype CannotComputeFTCompatibleHosts struct {\n\tVmFaultToleranceIssue\n\n\tVm     ManagedObjectReference `xml:\"vm\"`\n\tVmName string                 `xml:\"vmName\"`\n}\n\nfunc init() {\n\tt[\"CannotComputeFTCompatibleHosts\"] = reflect.TypeOf((*CannotComputeFTCompatibleHosts)(nil)).Elem()\n}\n\ntype CannotComputeFTCompatibleHostsFault CannotComputeFTCompatibleHosts\n\nfunc init() {\n\tt[\"CannotComputeFTCompatibleHostsFault\"] = reflect.TypeOf((*CannotComputeFTCompatibleHostsFault)(nil)).Elem()\n}\n\ntype CannotCreateFile struct {\n\tFileFault\n}\n\nfunc init() {\n\tt[\"CannotCreateFile\"] = reflect.TypeOf((*CannotCreateFile)(nil)).Elem()\n}\n\ntype CannotCreateFileFault CannotCreateFile\n\nfunc init() {\n\tt[\"CannotCreateFileFault\"] = reflect.TypeOf((*CannotCreateFileFault)(nil)).Elem()\n}\n\ntype CannotDecryptPasswords struct {\n\tCustomizationFault\n}\n\nfunc init() {\n\tt[\"CannotDecryptPasswords\"] = reflect.TypeOf((*CannotDecryptPasswords)(nil)).Elem()\n}\n\ntype CannotDecryptPasswordsFault CannotDecryptPasswords\n\nfunc init() {\n\tt[\"CannotDecryptPasswordsFault\"] = reflect.TypeOf((*CannotDecryptPasswordsFault)(nil)).Elem()\n}\n\ntype CannotDeleteFile struct {\n\tFileFault\n}\n\nfunc init() {\n\tt[\"CannotDeleteFile\"] = reflect.TypeOf((*CannotDeleteFile)(nil)).Elem()\n}\n\ntype CannotDeleteFileFault CannotDeleteFile\n\nfunc init() {\n\tt[\"CannotDeleteFileFault\"] = reflect.TypeOf((*CannotDeleteFileFault)(nil)).Elem()\n}\n\ntype CannotDisableDrsOnClustersWithVApps struct {\n\tRuntimeFault\n}\n\nfunc init() {\n\tt[\"CannotDisableDrsOnClustersWithVApps\"] = reflect.TypeOf((*CannotDisableDrsOnClustersWithVApps)(nil)).Elem()\n}\n\ntype CannotDisableDrsOnClustersWithVAppsFault CannotDisableDrsOnClustersWithVApps\n\nfunc init() {\n\tt[\"CannotDisableDrsOnClustersWithVAppsFault\"] = reflect.TypeOf((*CannotDisableDrsOnClustersWithVAppsFault)(nil)).Elem()\n}\n\ntype CannotDisableSnapshot struct {\n\tVmConfigFault\n}\n\nfunc init() {\n\tt[\"CannotDisableSnapshot\"] = reflect.TypeOf((*CannotDisableSnapshot)(nil)).Elem()\n}\n\ntype CannotDisableSnapshotFault CannotDisableSnapshot\n\nfunc init() {\n\tt[\"CannotDisableSnapshotFault\"] = reflect.TypeOf((*CannotDisableSnapshotFault)(nil)).Elem()\n}\n\ntype CannotDisconnectHostWithFaultToleranceVm struct {\n\tVimFault\n\n\tHostName string `xml:\"hostName\"`\n}\n\nfunc init() {\n\tt[\"CannotDisconnectHostWithFaultToleranceVm\"] = reflect.TypeOf((*CannotDisconnectHostWithFaultToleranceVm)(nil)).Elem()\n}\n\ntype CannotDisconnectHostWithFaultToleranceVmFault CannotDisconnectHostWithFaultToleranceVm\n\nfunc init() {\n\tt[\"CannotDisconnectHostWithFaultToleranceVmFault\"] = reflect.TypeOf((*CannotDisconnectHostWithFaultToleranceVmFault)(nil)).Elem()\n}\n\ntype CannotEnableVmcpForCluster struct {\n\tVimFault\n\n\tHost     *ManagedObjectReference `xml:\"host,omitempty\"`\n\tHostName string                  `xml:\"hostName,omitempty\"`\n\tReason   string                  `xml:\"reason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CannotEnableVmcpForCluster\"] = reflect.TypeOf((*CannotEnableVmcpForCluster)(nil)).Elem()\n}\n\ntype CannotEnableVmcpForClusterFault CannotEnableVmcpForCluster\n\nfunc init() {\n\tt[\"CannotEnableVmcpForClusterFault\"] = reflect.TypeOf((*CannotEnableVmcpForClusterFault)(nil)).Elem()\n}\n\ntype CannotModifyConfigCpuRequirements struct {\n\tMigrationFault\n}\n\nfunc init() {\n\tt[\"CannotModifyConfigCpuRequirements\"] = reflect.TypeOf((*CannotModifyConfigCpuRequirements)(nil)).Elem()\n}\n\ntype CannotModifyConfigCpuRequirementsFault CannotModifyConfigCpuRequirements\n\nfunc init() {\n\tt[\"CannotModifyConfigCpuRequirementsFault\"] = reflect.TypeOf((*CannotModifyConfigCpuRequirementsFault)(nil)).Elem()\n}\n\ntype CannotMoveFaultToleranceVm struct {\n\tVimFault\n\n\tMoveType string `xml:\"moveType\"`\n\tVmName   string `xml:\"vmName\"`\n}\n\nfunc init() {\n\tt[\"CannotMoveFaultToleranceVm\"] = reflect.TypeOf((*CannotMoveFaultToleranceVm)(nil)).Elem()\n}\n\ntype CannotMoveFaultToleranceVmFault CannotMoveFaultToleranceVm\n\nfunc init() {\n\tt[\"CannotMoveFaultToleranceVmFault\"] = reflect.TypeOf((*CannotMoveFaultToleranceVmFault)(nil)).Elem()\n}\n\ntype CannotMoveHostWithFaultToleranceVm struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"CannotMoveHostWithFaultToleranceVm\"] = reflect.TypeOf((*CannotMoveHostWithFaultToleranceVm)(nil)).Elem()\n}\n\ntype CannotMoveHostWithFaultToleranceVmFault CannotMoveHostWithFaultToleranceVm\n\nfunc init() {\n\tt[\"CannotMoveHostWithFaultToleranceVmFault\"] = reflect.TypeOf((*CannotMoveHostWithFaultToleranceVmFault)(nil)).Elem()\n}\n\ntype CannotMoveVmWithDeltaDisk struct {\n\tMigrationFault\n\n\tDevice string `xml:\"device\"`\n}\n\nfunc init() {\n\tt[\"CannotMoveVmWithDeltaDisk\"] = reflect.TypeOf((*CannotMoveVmWithDeltaDisk)(nil)).Elem()\n}\n\ntype CannotMoveVmWithDeltaDiskFault CannotMoveVmWithDeltaDisk\n\nfunc init() {\n\tt[\"CannotMoveVmWithDeltaDiskFault\"] = reflect.TypeOf((*CannotMoveVmWithDeltaDiskFault)(nil)).Elem()\n}\n\ntype CannotMoveVmWithNativeDeltaDisk struct {\n\tMigrationFault\n}\n\nfunc init() {\n\tt[\"CannotMoveVmWithNativeDeltaDisk\"] = reflect.TypeOf((*CannotMoveVmWithNativeDeltaDisk)(nil)).Elem()\n}\n\ntype CannotMoveVmWithNativeDeltaDiskFault CannotMoveVmWithNativeDeltaDisk\n\nfunc init() {\n\tt[\"CannotMoveVmWithNativeDeltaDiskFault\"] = reflect.TypeOf((*CannotMoveVmWithNativeDeltaDiskFault)(nil)).Elem()\n}\n\ntype CannotMoveVsanEnabledHost struct {\n\tVsanFault\n}\n\nfunc init() {\n\tt[\"CannotMoveVsanEnabledHost\"] = reflect.TypeOf((*CannotMoveVsanEnabledHost)(nil)).Elem()\n}\n\ntype CannotMoveVsanEnabledHostFault BaseCannotMoveVsanEnabledHost\n\nfunc init() {\n\tt[\"CannotMoveVsanEnabledHostFault\"] = reflect.TypeOf((*CannotMoveVsanEnabledHostFault)(nil)).Elem()\n}\n\ntype CannotPlaceWithoutPrerequisiteMoves struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"CannotPlaceWithoutPrerequisiteMoves\"] = reflect.TypeOf((*CannotPlaceWithoutPrerequisiteMoves)(nil)).Elem()\n}\n\ntype CannotPlaceWithoutPrerequisiteMovesFault CannotPlaceWithoutPrerequisiteMoves\n\nfunc init() {\n\tt[\"CannotPlaceWithoutPrerequisiteMovesFault\"] = reflect.TypeOf((*CannotPlaceWithoutPrerequisiteMovesFault)(nil)).Elem()\n}\n\ntype CannotPowerOffVmInCluster struct {\n\tInvalidState\n\n\tOperation string                 `xml:\"operation\"`\n\tVm        ManagedObjectReference `xml:\"vm\"`\n\tVmName    string                 `xml:\"vmName\"`\n}\n\nfunc init() {\n\tt[\"CannotPowerOffVmInCluster\"] = reflect.TypeOf((*CannotPowerOffVmInCluster)(nil)).Elem()\n}\n\ntype CannotPowerOffVmInClusterFault CannotPowerOffVmInCluster\n\nfunc init() {\n\tt[\"CannotPowerOffVmInClusterFault\"] = reflect.TypeOf((*CannotPowerOffVmInClusterFault)(nil)).Elem()\n}\n\ntype CannotReconfigureVsanWhenHaEnabled struct {\n\tVsanFault\n}\n\nfunc init() {\n\tt[\"CannotReconfigureVsanWhenHaEnabled\"] = reflect.TypeOf((*CannotReconfigureVsanWhenHaEnabled)(nil)).Elem()\n}\n\ntype CannotReconfigureVsanWhenHaEnabledFault CannotReconfigureVsanWhenHaEnabled\n\nfunc init() {\n\tt[\"CannotReconfigureVsanWhenHaEnabledFault\"] = reflect.TypeOf((*CannotReconfigureVsanWhenHaEnabledFault)(nil)).Elem()\n}\n\ntype CannotUseNetwork struct {\n\tVmConfigFault\n\n\tDevice    string                  `xml:\"device\"`\n\tBacking   string                  `xml:\"backing\"`\n\tConnected bool                    `xml:\"connected\"`\n\tReason    string                  `xml:\"reason\"`\n\tNetwork   *ManagedObjectReference `xml:\"network,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CannotUseNetwork\"] = reflect.TypeOf((*CannotUseNetwork)(nil)).Elem()\n}\n\ntype CannotUseNetworkFault CannotUseNetwork\n\nfunc init() {\n\tt[\"CannotUseNetworkFault\"] = reflect.TypeOf((*CannotUseNetworkFault)(nil)).Elem()\n}\n\ntype Capability struct {\n\tDynamicData\n\n\tProvisioningSupported            bool      `xml:\"provisioningSupported\"`\n\tMultiHostSupported               bool      `xml:\"multiHostSupported\"`\n\tUserShellAccessSupported         bool      `xml:\"userShellAccessSupported\"`\n\tSupportedEVCMode                 []EVCMode `xml:\"supportedEVCMode,omitempty\"`\n\tNetworkBackupAndRestoreSupported *bool     `xml:\"networkBackupAndRestoreSupported\"`\n}\n\nfunc init() {\n\tt[\"Capability\"] = reflect.TypeOf((*Capability)(nil)).Elem()\n}\n\ntype CertMgrRefreshCACertificatesAndCRLsRequestType struct {\n\tThis ManagedObjectReference   `xml:\"_this\"`\n\tHost []ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"CertMgrRefreshCACertificatesAndCRLsRequestType\"] = reflect.TypeOf((*CertMgrRefreshCACertificatesAndCRLsRequestType)(nil)).Elem()\n}\n\ntype CertMgrRefreshCACertificatesAndCRLs_Task CertMgrRefreshCACertificatesAndCRLsRequestType\n\nfunc init() {\n\tt[\"CertMgrRefreshCACertificatesAndCRLs_Task\"] = reflect.TypeOf((*CertMgrRefreshCACertificatesAndCRLs_Task)(nil)).Elem()\n}\n\ntype CertMgrRefreshCACertificatesAndCRLs_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CertMgrRefreshCertificatesRequestType struct {\n\tThis ManagedObjectReference   `xml:\"_this\"`\n\tHost []ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"CertMgrRefreshCertificatesRequestType\"] = reflect.TypeOf((*CertMgrRefreshCertificatesRequestType)(nil)).Elem()\n}\n\ntype CertMgrRefreshCertificates_Task CertMgrRefreshCertificatesRequestType\n\nfunc init() {\n\tt[\"CertMgrRefreshCertificates_Task\"] = reflect.TypeOf((*CertMgrRefreshCertificates_Task)(nil)).Elem()\n}\n\ntype CertMgrRefreshCertificates_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CertMgrRevokeCertificatesRequestType struct {\n\tThis ManagedObjectReference   `xml:\"_this\"`\n\tHost []ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"CertMgrRevokeCertificatesRequestType\"] = reflect.TypeOf((*CertMgrRevokeCertificatesRequestType)(nil)).Elem()\n}\n\ntype CertMgrRevokeCertificates_Task CertMgrRevokeCertificatesRequestType\n\nfunc init() {\n\tt[\"CertMgrRevokeCertificates_Task\"] = reflect.TypeOf((*CertMgrRevokeCertificates_Task)(nil)).Elem()\n}\n\ntype CertMgrRevokeCertificates_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ChangeAccessMode ChangeAccessModeRequestType\n\nfunc init() {\n\tt[\"ChangeAccessMode\"] = reflect.TypeOf((*ChangeAccessMode)(nil)).Elem()\n}\n\ntype ChangeAccessModeRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tPrincipal  string                 `xml:\"principal\"`\n\tIsGroup    bool                   `xml:\"isGroup\"`\n\tAccessMode HostAccessMode         `xml:\"accessMode\"`\n}\n\nfunc init() {\n\tt[\"ChangeAccessModeRequestType\"] = reflect.TypeOf((*ChangeAccessModeRequestType)(nil)).Elem()\n}\n\ntype ChangeAccessModeResponse struct {\n}\n\ntype ChangeFileAttributesInGuest ChangeFileAttributesInGuestRequestType\n\nfunc init() {\n\tt[\"ChangeFileAttributesInGuest\"] = reflect.TypeOf((*ChangeFileAttributesInGuest)(nil)).Elem()\n}\n\ntype ChangeFileAttributesInGuestRequestType struct {\n\tThis           ManagedObjectReference  `xml:\"_this\"`\n\tVm             ManagedObjectReference  `xml:\"vm\"`\n\tAuth           BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tGuestFilePath  string                  `xml:\"guestFilePath\"`\n\tFileAttributes BaseGuestFileAttributes `xml:\"fileAttributes,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ChangeFileAttributesInGuestRequestType\"] = reflect.TypeOf((*ChangeFileAttributesInGuestRequestType)(nil)).Elem()\n}\n\ntype ChangeFileAttributesInGuestResponse struct {\n}\n\ntype ChangeLockdownMode ChangeLockdownModeRequestType\n\nfunc init() {\n\tt[\"ChangeLockdownMode\"] = reflect.TypeOf((*ChangeLockdownMode)(nil)).Elem()\n}\n\ntype ChangeLockdownModeRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tMode HostLockdownMode       `xml:\"mode\"`\n}\n\nfunc init() {\n\tt[\"ChangeLockdownModeRequestType\"] = reflect.TypeOf((*ChangeLockdownModeRequestType)(nil)).Elem()\n}\n\ntype ChangeLockdownModeResponse struct {\n}\n\ntype ChangeNFSUserPassword ChangeNFSUserPasswordRequestType\n\nfunc init() {\n\tt[\"ChangeNFSUserPassword\"] = reflect.TypeOf((*ChangeNFSUserPassword)(nil)).Elem()\n}\n\ntype ChangeNFSUserPasswordRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tPassword string                 `xml:\"password\"`\n}\n\nfunc init() {\n\tt[\"ChangeNFSUserPasswordRequestType\"] = reflect.TypeOf((*ChangeNFSUserPasswordRequestType)(nil)).Elem()\n}\n\ntype ChangeNFSUserPasswordResponse struct {\n}\n\ntype ChangeOwner ChangeOwnerRequestType\n\nfunc init() {\n\tt[\"ChangeOwner\"] = reflect.TypeOf((*ChangeOwner)(nil)).Elem()\n}\n\ntype ChangeOwnerRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tName       string                  `xml:\"name\"`\n\tDatacenter *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n\tOwner      string                  `xml:\"owner\"`\n}\n\nfunc init() {\n\tt[\"ChangeOwnerRequestType\"] = reflect.TypeOf((*ChangeOwnerRequestType)(nil)).Elem()\n}\n\ntype ChangeOwnerResponse struct {\n}\n\ntype ChangesInfoEventArgument struct {\n\tDynamicData\n\n\tModified string `xml:\"modified,omitempty\"`\n\tAdded    string `xml:\"added,omitempty\"`\n\tDeleted  string `xml:\"deleted,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ChangesInfoEventArgument\"] = reflect.TypeOf((*ChangesInfoEventArgument)(nil)).Elem()\n}\n\ntype CheckAddHostEvcRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tCnxSpec HostConnectSpec        `xml:\"cnxSpec\"`\n}\n\nfunc init() {\n\tt[\"CheckAddHostEvcRequestType\"] = reflect.TypeOf((*CheckAddHostEvcRequestType)(nil)).Elem()\n}\n\ntype CheckAddHostEvc_Task CheckAddHostEvcRequestType\n\nfunc init() {\n\tt[\"CheckAddHostEvc_Task\"] = reflect.TypeOf((*CheckAddHostEvc_Task)(nil)).Elem()\n}\n\ntype CheckAddHostEvc_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CheckAnswerFileStatusRequestType struct {\n\tThis ManagedObjectReference   `xml:\"_this\"`\n\tHost []ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"CheckAnswerFileStatusRequestType\"] = reflect.TypeOf((*CheckAnswerFileStatusRequestType)(nil)).Elem()\n}\n\ntype CheckAnswerFileStatus_Task CheckAnswerFileStatusRequestType\n\nfunc init() {\n\tt[\"CheckAnswerFileStatus_Task\"] = reflect.TypeOf((*CheckAnswerFileStatus_Task)(nil)).Elem()\n}\n\ntype CheckAnswerFileStatus_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CheckCompatibilityRequestType struct {\n\tThis     ManagedObjectReference  `xml:\"_this\"`\n\tVm       ManagedObjectReference  `xml:\"vm\"`\n\tHost     *ManagedObjectReference `xml:\"host,omitempty\"`\n\tPool     *ManagedObjectReference `xml:\"pool,omitempty\"`\n\tTestType []string                `xml:\"testType,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CheckCompatibilityRequestType\"] = reflect.TypeOf((*CheckCompatibilityRequestType)(nil)).Elem()\n}\n\ntype CheckCompatibility_Task CheckCompatibilityRequestType\n\nfunc init() {\n\tt[\"CheckCompatibility_Task\"] = reflect.TypeOf((*CheckCompatibility_Task)(nil)).Elem()\n}\n\ntype CheckCompatibility_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CheckComplianceRequestType struct {\n\tThis    ManagedObjectReference   `xml:\"_this\"`\n\tProfile []ManagedObjectReference `xml:\"profile,omitempty\"`\n\tEntity  []ManagedObjectReference `xml:\"entity,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CheckComplianceRequestType\"] = reflect.TypeOf((*CheckComplianceRequestType)(nil)).Elem()\n}\n\ntype CheckCompliance_Task CheckComplianceRequestType\n\nfunc init() {\n\tt[\"CheckCompliance_Task\"] = reflect.TypeOf((*CheckCompliance_Task)(nil)).Elem()\n}\n\ntype CheckCompliance_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CheckConfigureEvcModeRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tEvcModeKey string                 `xml:\"evcModeKey\"`\n}\n\nfunc init() {\n\tt[\"CheckConfigureEvcModeRequestType\"] = reflect.TypeOf((*CheckConfigureEvcModeRequestType)(nil)).Elem()\n}\n\ntype CheckConfigureEvcMode_Task CheckConfigureEvcModeRequestType\n\nfunc init() {\n\tt[\"CheckConfigureEvcMode_Task\"] = reflect.TypeOf((*CheckConfigureEvcMode_Task)(nil)).Elem()\n}\n\ntype CheckConfigureEvcMode_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CheckCustomizationResources CheckCustomizationResourcesRequestType\n\nfunc init() {\n\tt[\"CheckCustomizationResources\"] = reflect.TypeOf((*CheckCustomizationResources)(nil)).Elem()\n}\n\ntype CheckCustomizationResourcesRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tGuestOs string                 `xml:\"guestOs\"`\n}\n\nfunc init() {\n\tt[\"CheckCustomizationResourcesRequestType\"] = reflect.TypeOf((*CheckCustomizationResourcesRequestType)(nil)).Elem()\n}\n\ntype CheckCustomizationResourcesResponse struct {\n}\n\ntype CheckCustomizationSpec CheckCustomizationSpecRequestType\n\nfunc init() {\n\tt[\"CheckCustomizationSpec\"] = reflect.TypeOf((*CheckCustomizationSpec)(nil)).Elem()\n}\n\ntype CheckCustomizationSpecRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tSpec CustomizationSpec      `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"CheckCustomizationSpecRequestType\"] = reflect.TypeOf((*CheckCustomizationSpecRequestType)(nil)).Elem()\n}\n\ntype CheckCustomizationSpecResponse struct {\n}\n\ntype CheckForUpdates CheckForUpdatesRequestType\n\nfunc init() {\n\tt[\"CheckForUpdates\"] = reflect.TypeOf((*CheckForUpdates)(nil)).Elem()\n}\n\ntype CheckForUpdatesRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tVersion string                 `xml:\"version,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CheckForUpdatesRequestType\"] = reflect.TypeOf((*CheckForUpdatesRequestType)(nil)).Elem()\n}\n\ntype CheckForUpdatesResponse struct {\n\tReturnval *UpdateSet `xml:\"returnval,omitempty\"`\n}\n\ntype CheckHostPatchRequestType struct {\n\tThis       ManagedObjectReference                     `xml:\"_this\"`\n\tMetaUrls   []string                                   `xml:\"metaUrls,omitempty\"`\n\tBundleUrls []string                                   `xml:\"bundleUrls,omitempty\"`\n\tSpec       *HostPatchManagerPatchManagerOperationSpec `xml:\"spec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CheckHostPatchRequestType\"] = reflect.TypeOf((*CheckHostPatchRequestType)(nil)).Elem()\n}\n\ntype CheckHostPatch_Task CheckHostPatchRequestType\n\nfunc init() {\n\tt[\"CheckHostPatch_Task\"] = reflect.TypeOf((*CheckHostPatch_Task)(nil)).Elem()\n}\n\ntype CheckHostPatch_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CheckLicenseFeature CheckLicenseFeatureRequestType\n\nfunc init() {\n\tt[\"CheckLicenseFeature\"] = reflect.TypeOf((*CheckLicenseFeature)(nil)).Elem()\n}\n\ntype CheckLicenseFeatureRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tHost       *ManagedObjectReference `xml:\"host,omitempty\"`\n\tFeatureKey string                  `xml:\"featureKey\"`\n}\n\nfunc init() {\n\tt[\"CheckLicenseFeatureRequestType\"] = reflect.TypeOf((*CheckLicenseFeatureRequestType)(nil)).Elem()\n}\n\ntype CheckLicenseFeatureResponse struct {\n\tReturnval bool `xml:\"returnval\"`\n}\n\ntype CheckMigrateRequestType struct {\n\tThis     ManagedObjectReference   `xml:\"_this\"`\n\tVm       ManagedObjectReference   `xml:\"vm\"`\n\tHost     *ManagedObjectReference  `xml:\"host,omitempty\"`\n\tPool     *ManagedObjectReference  `xml:\"pool,omitempty\"`\n\tState    VirtualMachinePowerState `xml:\"state,omitempty\"`\n\tTestType []string                 `xml:\"testType,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CheckMigrateRequestType\"] = reflect.TypeOf((*CheckMigrateRequestType)(nil)).Elem()\n}\n\ntype CheckMigrate_Task CheckMigrateRequestType\n\nfunc init() {\n\tt[\"CheckMigrate_Task\"] = reflect.TypeOf((*CheckMigrate_Task)(nil)).Elem()\n}\n\ntype CheckMigrate_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CheckProfileComplianceRequestType struct {\n\tThis   ManagedObjectReference   `xml:\"_this\"`\n\tEntity []ManagedObjectReference `xml:\"entity,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CheckProfileComplianceRequestType\"] = reflect.TypeOf((*CheckProfileComplianceRequestType)(nil)).Elem()\n}\n\ntype CheckProfileCompliance_Task CheckProfileComplianceRequestType\n\nfunc init() {\n\tt[\"CheckProfileCompliance_Task\"] = reflect.TypeOf((*CheckProfileCompliance_Task)(nil)).Elem()\n}\n\ntype CheckProfileCompliance_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CheckRelocateRequestType struct {\n\tThis     ManagedObjectReference     `xml:\"_this\"`\n\tVm       ManagedObjectReference     `xml:\"vm\"`\n\tSpec     VirtualMachineRelocateSpec `xml:\"spec\"`\n\tTestType []string                   `xml:\"testType,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CheckRelocateRequestType\"] = reflect.TypeOf((*CheckRelocateRequestType)(nil)).Elem()\n}\n\ntype CheckRelocate_Task CheckRelocateRequestType\n\nfunc init() {\n\tt[\"CheckRelocate_Task\"] = reflect.TypeOf((*CheckRelocate_Task)(nil)).Elem()\n}\n\ntype CheckRelocate_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CheckResult struct {\n\tDynamicData\n\n\tVm      *ManagedObjectReference `xml:\"vm,omitempty\"`\n\tHost    *ManagedObjectReference `xml:\"host,omitempty\"`\n\tWarning []LocalizedMethodFault  `xml:\"warning,omitempty\"`\n\tError   []LocalizedMethodFault  `xml:\"error,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CheckResult\"] = reflect.TypeOf((*CheckResult)(nil)).Elem()\n}\n\ntype ChoiceOption struct {\n\tOptionType\n\n\tChoiceInfo   []BaseElementDescription `xml:\"choiceInfo,typeattr\"`\n\tDefaultIndex int32                    `xml:\"defaultIndex,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ChoiceOption\"] = reflect.TypeOf((*ChoiceOption)(nil)).Elem()\n}\n\ntype ClearComplianceStatus ClearComplianceStatusRequestType\n\nfunc init() {\n\tt[\"ClearComplianceStatus\"] = reflect.TypeOf((*ClearComplianceStatus)(nil)).Elem()\n}\n\ntype ClearComplianceStatusRequestType struct {\n\tThis    ManagedObjectReference   `xml:\"_this\"`\n\tProfile []ManagedObjectReference `xml:\"profile,omitempty\"`\n\tEntity  []ManagedObjectReference `xml:\"entity,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClearComplianceStatusRequestType\"] = reflect.TypeOf((*ClearComplianceStatusRequestType)(nil)).Elem()\n}\n\ntype ClearComplianceStatusResponse struct {\n}\n\ntype ClearNFSUser ClearNFSUserRequestType\n\nfunc init() {\n\tt[\"ClearNFSUser\"] = reflect.TypeOf((*ClearNFSUser)(nil)).Elem()\n}\n\ntype ClearNFSUserRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ClearNFSUserRequestType\"] = reflect.TypeOf((*ClearNFSUserRequestType)(nil)).Elem()\n}\n\ntype ClearNFSUserResponse struct {\n}\n\ntype ClearSystemEventLog ClearSystemEventLogRequestType\n\nfunc init() {\n\tt[\"ClearSystemEventLog\"] = reflect.TypeOf((*ClearSystemEventLog)(nil)).Elem()\n}\n\ntype ClearSystemEventLogRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ClearSystemEventLogRequestType\"] = reflect.TypeOf((*ClearSystemEventLogRequestType)(nil)).Elem()\n}\n\ntype ClearSystemEventLogResponse struct {\n}\n\ntype ClockSkew struct {\n\tHostConfigFault\n}\n\nfunc init() {\n\tt[\"ClockSkew\"] = reflect.TypeOf((*ClockSkew)(nil)).Elem()\n}\n\ntype ClockSkewFault ClockSkew\n\nfunc init() {\n\tt[\"ClockSkewFault\"] = reflect.TypeOf((*ClockSkewFault)(nil)).Elem()\n}\n\ntype CloneFromSnapshotNotSupported struct {\n\tMigrationFault\n}\n\nfunc init() {\n\tt[\"CloneFromSnapshotNotSupported\"] = reflect.TypeOf((*CloneFromSnapshotNotSupported)(nil)).Elem()\n}\n\ntype CloneFromSnapshotNotSupportedFault CloneFromSnapshotNotSupported\n\nfunc init() {\n\tt[\"CloneFromSnapshotNotSupportedFault\"] = reflect.TypeOf((*CloneFromSnapshotNotSupportedFault)(nil)).Elem()\n}\n\ntype CloneSession CloneSessionRequestType\n\nfunc init() {\n\tt[\"CloneSession\"] = reflect.TypeOf((*CloneSession)(nil)).Elem()\n}\n\ntype CloneSessionRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tCloneTicket string                 `xml:\"cloneTicket\"`\n}\n\nfunc init() {\n\tt[\"CloneSessionRequestType\"] = reflect.TypeOf((*CloneSessionRequestType)(nil)).Elem()\n}\n\ntype CloneSessionResponse struct {\n\tReturnval UserSession `xml:\"returnval\"`\n}\n\ntype CloneVAppRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tName   string                 `xml:\"name\"`\n\tTarget ManagedObjectReference `xml:\"target\"`\n\tSpec   VAppCloneSpec          `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"CloneVAppRequestType\"] = reflect.TypeOf((*CloneVAppRequestType)(nil)).Elem()\n}\n\ntype CloneVApp_Task CloneVAppRequestType\n\nfunc init() {\n\tt[\"CloneVApp_Task\"] = reflect.TypeOf((*CloneVApp_Task)(nil)).Elem()\n}\n\ntype CloneVApp_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CloneVMRequestType struct {\n\tThis   ManagedObjectReference  `xml:\"_this\"`\n\tFolder ManagedObjectReference  `xml:\"folder\"`\n\tName   string                  `xml:\"name\"`\n\tSpec   VirtualMachineCloneSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"CloneVMRequestType\"] = reflect.TypeOf((*CloneVMRequestType)(nil)).Elem()\n}\n\ntype CloneVM_Task CloneVMRequestType\n\nfunc init() {\n\tt[\"CloneVM_Task\"] = reflect.TypeOf((*CloneVM_Task)(nil)).Elem()\n}\n\ntype CloneVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CloneVStorageObjectRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tId        ID                     `xml:\"id\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n\tSpec      VslmCloneSpec          `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"CloneVStorageObjectRequestType\"] = reflect.TypeOf((*CloneVStorageObjectRequestType)(nil)).Elem()\n}\n\ntype CloneVStorageObject_Task CloneVStorageObjectRequestType\n\nfunc init() {\n\tt[\"CloneVStorageObject_Task\"] = reflect.TypeOf((*CloneVStorageObject_Task)(nil)).Elem()\n}\n\ntype CloneVStorageObject_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CloseInventoryViewFolder CloseInventoryViewFolderRequestType\n\nfunc init() {\n\tt[\"CloseInventoryViewFolder\"] = reflect.TypeOf((*CloseInventoryViewFolder)(nil)).Elem()\n}\n\ntype CloseInventoryViewFolderRequestType struct {\n\tThis   ManagedObjectReference   `xml:\"_this\"`\n\tEntity []ManagedObjectReference `xml:\"entity\"`\n}\n\nfunc init() {\n\tt[\"CloseInventoryViewFolderRequestType\"] = reflect.TypeOf((*CloseInventoryViewFolderRequestType)(nil)).Elem()\n}\n\ntype CloseInventoryViewFolderResponse struct {\n\tReturnval []ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype ClusterAction struct {\n\tDynamicData\n\n\tType   string                  `xml:\"type\"`\n\tTarget *ManagedObjectReference `xml:\"target,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterAction\"] = reflect.TypeOf((*ClusterAction)(nil)).Elem()\n}\n\ntype ClusterActionHistory struct {\n\tDynamicData\n\n\tAction BaseClusterAction `xml:\"action,typeattr\"`\n\tTime   time.Time         `xml:\"time\"`\n}\n\nfunc init() {\n\tt[\"ClusterActionHistory\"] = reflect.TypeOf((*ClusterActionHistory)(nil)).Elem()\n}\n\ntype ClusterAffinityRuleSpec struct {\n\tClusterRuleInfo\n\n\tVm []ManagedObjectReference `xml:\"vm\"`\n}\n\nfunc init() {\n\tt[\"ClusterAffinityRuleSpec\"] = reflect.TypeOf((*ClusterAffinityRuleSpec)(nil)).Elem()\n}\n\ntype ClusterAntiAffinityRuleSpec struct {\n\tClusterRuleInfo\n\n\tVm []ManagedObjectReference `xml:\"vm\"`\n}\n\nfunc init() {\n\tt[\"ClusterAntiAffinityRuleSpec\"] = reflect.TypeOf((*ClusterAntiAffinityRuleSpec)(nil)).Elem()\n}\n\ntype ClusterAttemptedVmInfo struct {\n\tDynamicData\n\n\tVm   ManagedObjectReference  `xml:\"vm\"`\n\tTask *ManagedObjectReference `xml:\"task,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterAttemptedVmInfo\"] = reflect.TypeOf((*ClusterAttemptedVmInfo)(nil)).Elem()\n}\n\ntype ClusterComplianceCheckedEvent struct {\n\tClusterEvent\n\n\tProfile ProfileEventArgument `xml:\"profile\"`\n}\n\nfunc init() {\n\tt[\"ClusterComplianceCheckedEvent\"] = reflect.TypeOf((*ClusterComplianceCheckedEvent)(nil)).Elem()\n}\n\ntype ClusterComputeResourceSummary struct {\n\tComputeResourceSummary\n\n\tCurrentFailoverLevel int32                              `xml:\"currentFailoverLevel\"`\n\tAdmissionControlInfo BaseClusterDasAdmissionControlInfo `xml:\"admissionControlInfo,omitempty,typeattr\"`\n\tNumVmotions          int32                              `xml:\"numVmotions\"`\n\tTargetBalance        int32                              `xml:\"targetBalance,omitempty\"`\n\tCurrentBalance       int32                              `xml:\"currentBalance,omitempty\"`\n\tUsageSummary         *ClusterUsageSummary               `xml:\"usageSummary,omitempty\"`\n\tCurrentEVCModeKey    string                             `xml:\"currentEVCModeKey,omitempty\"`\n\tDasData              BaseClusterDasData                 `xml:\"dasData,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ClusterComputeResourceSummary\"] = reflect.TypeOf((*ClusterComputeResourceSummary)(nil)).Elem()\n}\n\ntype ClusterConfigInfo struct {\n\tDynamicData\n\n\tDasConfig   ClusterDasConfigInfo     `xml:\"dasConfig\"`\n\tDasVmConfig []ClusterDasVmConfigInfo `xml:\"dasVmConfig,omitempty\"`\n\tDrsConfig   ClusterDrsConfigInfo     `xml:\"drsConfig\"`\n\tDrsVmConfig []ClusterDrsVmConfigInfo `xml:\"drsVmConfig,omitempty\"`\n\tRule        []BaseClusterRuleInfo    `xml:\"rule,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ClusterConfigInfo\"] = reflect.TypeOf((*ClusterConfigInfo)(nil)).Elem()\n}\n\ntype ClusterConfigInfoEx struct {\n\tComputeResourceConfigInfo\n\n\tDasConfig           ClusterDasConfigInfo            `xml:\"dasConfig\"`\n\tDasVmConfig         []ClusterDasVmConfigInfo        `xml:\"dasVmConfig,omitempty\"`\n\tDrsConfig           ClusterDrsConfigInfo            `xml:\"drsConfig\"`\n\tDrsVmConfig         []ClusterDrsVmConfigInfo        `xml:\"drsVmConfig,omitempty\"`\n\tRule                []BaseClusterRuleInfo           `xml:\"rule,omitempty,typeattr\"`\n\tOrchestration       *ClusterOrchestrationInfo       `xml:\"orchestration,omitempty\"`\n\tVmOrchestration     []ClusterVmOrchestrationInfo    `xml:\"vmOrchestration,omitempty\"`\n\tDpmConfigInfo       *ClusterDpmConfigInfo           `xml:\"dpmConfigInfo,omitempty\"`\n\tDpmHostConfig       []ClusterDpmHostConfigInfo      `xml:\"dpmHostConfig,omitempty\"`\n\tVsanConfigInfo      *VsanClusterConfigInfo          `xml:\"vsanConfigInfo,omitempty\"`\n\tVsanHostConfig      []VsanHostConfigInfo            `xml:\"vsanHostConfig,omitempty\"`\n\tGroup               []BaseClusterGroupInfo          `xml:\"group,omitempty,typeattr\"`\n\tInfraUpdateHaConfig *ClusterInfraUpdateHaConfigInfo `xml:\"infraUpdateHaConfig,omitempty\"`\n\tProactiveDrsConfig  *ClusterProactiveDrsConfigInfo  `xml:\"proactiveDrsConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterConfigInfoEx\"] = reflect.TypeOf((*ClusterConfigInfoEx)(nil)).Elem()\n}\n\ntype ClusterConfigSpec struct {\n\tDynamicData\n\n\tDasConfig       *ClusterDasConfigInfo    `xml:\"dasConfig,omitempty\"`\n\tDasVmConfigSpec []ClusterDasVmConfigSpec `xml:\"dasVmConfigSpec,omitempty\"`\n\tDrsConfig       *ClusterDrsConfigInfo    `xml:\"drsConfig,omitempty\"`\n\tDrsVmConfigSpec []ClusterDrsVmConfigSpec `xml:\"drsVmConfigSpec,omitempty\"`\n\tRulesSpec       []ClusterRuleSpec        `xml:\"rulesSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterConfigSpec\"] = reflect.TypeOf((*ClusterConfigSpec)(nil)).Elem()\n}\n\ntype ClusterConfigSpecEx struct {\n\tComputeResourceConfigSpec\n\n\tDasConfig           *ClusterDasConfigInfo           `xml:\"dasConfig,omitempty\"`\n\tDasVmConfigSpec     []ClusterDasVmConfigSpec        `xml:\"dasVmConfigSpec,omitempty\"`\n\tDrsConfig           *ClusterDrsConfigInfo           `xml:\"drsConfig,omitempty\"`\n\tDrsVmConfigSpec     []ClusterDrsVmConfigSpec        `xml:\"drsVmConfigSpec,omitempty\"`\n\tRulesSpec           []ClusterRuleSpec               `xml:\"rulesSpec,omitempty\"`\n\tOrchestration       *ClusterOrchestrationInfo       `xml:\"orchestration,omitempty\"`\n\tVmOrchestrationSpec []ClusterVmOrchestrationSpec    `xml:\"vmOrchestrationSpec,omitempty\"`\n\tDpmConfig           *ClusterDpmConfigInfo           `xml:\"dpmConfig,omitempty\"`\n\tDpmHostConfigSpec   []ClusterDpmHostConfigSpec      `xml:\"dpmHostConfigSpec,omitempty\"`\n\tVsanConfig          *VsanClusterConfigInfo          `xml:\"vsanConfig,omitempty\"`\n\tVsanHostConfigSpec  []VsanHostConfigInfo            `xml:\"vsanHostConfigSpec,omitempty\"`\n\tGroupSpec           []ClusterGroupSpec              `xml:\"groupSpec,omitempty\"`\n\tInfraUpdateHaConfig *ClusterInfraUpdateHaConfigInfo `xml:\"infraUpdateHaConfig,omitempty\"`\n\tProactiveDrsConfig  *ClusterProactiveDrsConfigInfo  `xml:\"proactiveDrsConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterConfigSpecEx\"] = reflect.TypeOf((*ClusterConfigSpecEx)(nil)).Elem()\n}\n\ntype ClusterCreatedEvent struct {\n\tClusterEvent\n\n\tParent FolderEventArgument `xml:\"parent\"`\n}\n\nfunc init() {\n\tt[\"ClusterCreatedEvent\"] = reflect.TypeOf((*ClusterCreatedEvent)(nil)).Elem()\n}\n\ntype ClusterDasAamHostInfo struct {\n\tClusterDasHostInfo\n\n\tHostDasState []ClusterDasAamNodeState `xml:\"hostDasState,omitempty\"`\n\tPrimaryHosts []string                 `xml:\"primaryHosts,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterDasAamHostInfo\"] = reflect.TypeOf((*ClusterDasAamHostInfo)(nil)).Elem()\n}\n\ntype ClusterDasAamNodeState struct {\n\tDynamicData\n\n\tHost         ManagedObjectReference `xml:\"host\"`\n\tName         string                 `xml:\"name\"`\n\tConfigState  string                 `xml:\"configState\"`\n\tRuntimeState string                 `xml:\"runtimeState\"`\n}\n\nfunc init() {\n\tt[\"ClusterDasAamNodeState\"] = reflect.TypeOf((*ClusterDasAamNodeState)(nil)).Elem()\n}\n\ntype ClusterDasAdmissionControlInfo struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"ClusterDasAdmissionControlInfo\"] = reflect.TypeOf((*ClusterDasAdmissionControlInfo)(nil)).Elem()\n}\n\ntype ClusterDasAdmissionControlPolicy struct {\n\tDynamicData\n\n\tResourceReductionToToleratePercent int32 `xml:\"resourceReductionToToleratePercent,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterDasAdmissionControlPolicy\"] = reflect.TypeOf((*ClusterDasAdmissionControlPolicy)(nil)).Elem()\n}\n\ntype ClusterDasAdvancedRuntimeInfo struct {\n\tDynamicData\n\n\tDasHostInfo            BaseClusterDasHostInfo                           `xml:\"dasHostInfo,omitempty,typeattr\"`\n\tVmcpSupported          *ClusterDasAdvancedRuntimeInfoVmcpCapabilityInfo `xml:\"vmcpSupported,omitempty\"`\n\tHeartbeatDatastoreInfo []DasHeartbeatDatastoreInfo                      `xml:\"heartbeatDatastoreInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterDasAdvancedRuntimeInfo\"] = reflect.TypeOf((*ClusterDasAdvancedRuntimeInfo)(nil)).Elem()\n}\n\ntype ClusterDasAdvancedRuntimeInfoVmcpCapabilityInfo struct {\n\tDynamicData\n\n\tStorageAPDSupported bool `xml:\"storageAPDSupported\"`\n\tStoragePDLSupported bool `xml:\"storagePDLSupported\"`\n}\n\nfunc init() {\n\tt[\"ClusterDasAdvancedRuntimeInfoVmcpCapabilityInfo\"] = reflect.TypeOf((*ClusterDasAdvancedRuntimeInfoVmcpCapabilityInfo)(nil)).Elem()\n}\n\ntype ClusterDasConfigInfo struct {\n\tDynamicData\n\n\tEnabled                    *bool                                `xml:\"enabled\"`\n\tVmMonitoring               string                               `xml:\"vmMonitoring,omitempty\"`\n\tHostMonitoring             string                               `xml:\"hostMonitoring,omitempty\"`\n\tVmComponentProtecting      string                               `xml:\"vmComponentProtecting,omitempty\"`\n\tFailoverLevel              int32                                `xml:\"failoverLevel,omitempty\"`\n\tAdmissionControlPolicy     BaseClusterDasAdmissionControlPolicy `xml:\"admissionControlPolicy,omitempty,typeattr\"`\n\tAdmissionControlEnabled    *bool                                `xml:\"admissionControlEnabled\"`\n\tDefaultVmSettings          *ClusterDasVmSettings                `xml:\"defaultVmSettings,omitempty\"`\n\tOption                     []BaseOptionValue                    `xml:\"option,omitempty,typeattr\"`\n\tHeartbeatDatastore         []ManagedObjectReference             `xml:\"heartbeatDatastore,omitempty\"`\n\tHBDatastoreCandidatePolicy string                               `xml:\"hBDatastoreCandidatePolicy,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterDasConfigInfo\"] = reflect.TypeOf((*ClusterDasConfigInfo)(nil)).Elem()\n}\n\ntype ClusterDasData struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"ClusterDasData\"] = reflect.TypeOf((*ClusterDasData)(nil)).Elem()\n}\n\ntype ClusterDasDataSummary struct {\n\tClusterDasData\n\n\tHostListVersion      int64 `xml:\"hostListVersion\"`\n\tClusterConfigVersion int64 `xml:\"clusterConfigVersion\"`\n\tCompatListVersion    int64 `xml:\"compatListVersion\"`\n}\n\nfunc init() {\n\tt[\"ClusterDasDataSummary\"] = reflect.TypeOf((*ClusterDasDataSummary)(nil)).Elem()\n}\n\ntype ClusterDasFailoverLevelAdvancedRuntimeInfo struct {\n\tClusterDasAdvancedRuntimeInfo\n\n\tSlotInfo                  ClusterDasFailoverLevelAdvancedRuntimeInfoSlotInfo    `xml:\"slotInfo\"`\n\tTotalSlots                int32                                                 `xml:\"totalSlots\"`\n\tUsedSlots                 int32                                                 `xml:\"usedSlots\"`\n\tUnreservedSlots           int32                                                 `xml:\"unreservedSlots\"`\n\tTotalVms                  int32                                                 `xml:\"totalVms\"`\n\tTotalHosts                int32                                                 `xml:\"totalHosts\"`\n\tTotalGoodHosts            int32                                                 `xml:\"totalGoodHosts\"`\n\tHostSlots                 []ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots `xml:\"hostSlots,omitempty\"`\n\tVmsRequiringMultipleSlots []ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots   `xml:\"vmsRequiringMultipleSlots,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterDasFailoverLevelAdvancedRuntimeInfo\"] = reflect.TypeOf((*ClusterDasFailoverLevelAdvancedRuntimeInfo)(nil)).Elem()\n}\n\ntype ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots struct {\n\tDynamicData\n\n\tHost  ManagedObjectReference `xml:\"host\"`\n\tSlots int32                  `xml:\"slots\"`\n}\n\nfunc init() {\n\tt[\"ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots\"] = reflect.TypeOf((*ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots)(nil)).Elem()\n}\n\ntype ClusterDasFailoverLevelAdvancedRuntimeInfoSlotInfo struct {\n\tDynamicData\n\n\tNumVcpus int32 `xml:\"numVcpus\"`\n\tCpuMHz   int32 `xml:\"cpuMHz\"`\n\tMemoryMB int32 `xml:\"memoryMB\"`\n}\n\nfunc init() {\n\tt[\"ClusterDasFailoverLevelAdvancedRuntimeInfoSlotInfo\"] = reflect.TypeOf((*ClusterDasFailoverLevelAdvancedRuntimeInfoSlotInfo)(nil)).Elem()\n}\n\ntype ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots struct {\n\tDynamicData\n\n\tVm    ManagedObjectReference `xml:\"vm\"`\n\tSlots int32                  `xml:\"slots\"`\n}\n\nfunc init() {\n\tt[\"ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots\"] = reflect.TypeOf((*ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots)(nil)).Elem()\n}\n\ntype ClusterDasFdmHostState struct {\n\tDynamicData\n\n\tState         string                  `xml:\"state\"`\n\tStateReporter *ManagedObjectReference `xml:\"stateReporter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterDasFdmHostState\"] = reflect.TypeOf((*ClusterDasFdmHostState)(nil)).Elem()\n}\n\ntype ClusterDasHostInfo struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"ClusterDasHostInfo\"] = reflect.TypeOf((*ClusterDasHostInfo)(nil)).Elem()\n}\n\ntype ClusterDasHostRecommendation struct {\n\tDynamicData\n\n\tHost      ManagedObjectReference `xml:\"host\"`\n\tDrsRating int32                  `xml:\"drsRating,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterDasHostRecommendation\"] = reflect.TypeOf((*ClusterDasHostRecommendation)(nil)).Elem()\n}\n\ntype ClusterDasVmConfigInfo struct {\n\tDynamicData\n\n\tKey                 ManagedObjectReference `xml:\"key\"`\n\tRestartPriority     DasVmPriority          `xml:\"restartPriority,omitempty\"`\n\tPowerOffOnIsolation *bool                  `xml:\"powerOffOnIsolation\"`\n\tDasSettings         *ClusterDasVmSettings  `xml:\"dasSettings,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterDasVmConfigInfo\"] = reflect.TypeOf((*ClusterDasVmConfigInfo)(nil)).Elem()\n}\n\ntype ClusterDasVmConfigSpec struct {\n\tArrayUpdateSpec\n\n\tInfo *ClusterDasVmConfigInfo `xml:\"info,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterDasVmConfigSpec\"] = reflect.TypeOf((*ClusterDasVmConfigSpec)(nil)).Elem()\n}\n\ntype ClusterDasVmSettings struct {\n\tDynamicData\n\n\tRestartPriority               string                                `xml:\"restartPriority,omitempty\"`\n\tRestartPriorityTimeout        int32                                 `xml:\"restartPriorityTimeout,omitempty\"`\n\tIsolationResponse             string                                `xml:\"isolationResponse,omitempty\"`\n\tVmToolsMonitoringSettings     *ClusterVmToolsMonitoringSettings     `xml:\"vmToolsMonitoringSettings,omitempty\"`\n\tVmComponentProtectionSettings *ClusterVmComponentProtectionSettings `xml:\"vmComponentProtectionSettings,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterDasVmSettings\"] = reflect.TypeOf((*ClusterDasVmSettings)(nil)).Elem()\n}\n\ntype ClusterDependencyRuleInfo struct {\n\tClusterRuleInfo\n\n\tVmGroup          string `xml:\"vmGroup\"`\n\tDependsOnVmGroup string `xml:\"dependsOnVmGroup\"`\n}\n\nfunc init() {\n\tt[\"ClusterDependencyRuleInfo\"] = reflect.TypeOf((*ClusterDependencyRuleInfo)(nil)).Elem()\n}\n\ntype ClusterDestroyedEvent struct {\n\tClusterEvent\n}\n\nfunc init() {\n\tt[\"ClusterDestroyedEvent\"] = reflect.TypeOf((*ClusterDestroyedEvent)(nil)).Elem()\n}\n\ntype ClusterDpmConfigInfo struct {\n\tDynamicData\n\n\tEnabled             *bool             `xml:\"enabled\"`\n\tDefaultDpmBehavior  DpmBehavior       `xml:\"defaultDpmBehavior,omitempty\"`\n\tHostPowerActionRate int32             `xml:\"hostPowerActionRate,omitempty\"`\n\tOption              []BaseOptionValue `xml:\"option,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ClusterDpmConfigInfo\"] = reflect.TypeOf((*ClusterDpmConfigInfo)(nil)).Elem()\n}\n\ntype ClusterDpmHostConfigInfo struct {\n\tDynamicData\n\n\tKey      ManagedObjectReference `xml:\"key\"`\n\tEnabled  *bool                  `xml:\"enabled\"`\n\tBehavior DpmBehavior            `xml:\"behavior,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterDpmHostConfigInfo\"] = reflect.TypeOf((*ClusterDpmHostConfigInfo)(nil)).Elem()\n}\n\ntype ClusterDpmHostConfigSpec struct {\n\tArrayUpdateSpec\n\n\tInfo *ClusterDpmHostConfigInfo `xml:\"info,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterDpmHostConfigSpec\"] = reflect.TypeOf((*ClusterDpmHostConfigSpec)(nil)).Elem()\n}\n\ntype ClusterDrsConfigInfo struct {\n\tDynamicData\n\n\tEnabled                   *bool             `xml:\"enabled\"`\n\tEnableVmBehaviorOverrides *bool             `xml:\"enableVmBehaviorOverrides\"`\n\tDefaultVmBehavior         DrsBehavior       `xml:\"defaultVmBehavior,omitempty\"`\n\tVmotionRate               int32             `xml:\"vmotionRate,omitempty\"`\n\tOption                    []BaseOptionValue `xml:\"option,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ClusterDrsConfigInfo\"] = reflect.TypeOf((*ClusterDrsConfigInfo)(nil)).Elem()\n}\n\ntype ClusterDrsFaults struct {\n\tDynamicData\n\n\tReason     string                           `xml:\"reason\"`\n\tFaultsByVm []BaseClusterDrsFaultsFaultsByVm `xml:\"faultsByVm,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ClusterDrsFaults\"] = reflect.TypeOf((*ClusterDrsFaults)(nil)).Elem()\n}\n\ntype ClusterDrsFaultsFaultsByVirtualDisk struct {\n\tClusterDrsFaultsFaultsByVm\n\n\tDisk *VirtualDiskId `xml:\"disk,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterDrsFaultsFaultsByVirtualDisk\"] = reflect.TypeOf((*ClusterDrsFaultsFaultsByVirtualDisk)(nil)).Elem()\n}\n\ntype ClusterDrsFaultsFaultsByVm struct {\n\tDynamicData\n\n\tVm    *ManagedObjectReference `xml:\"vm,omitempty\"`\n\tFault []LocalizedMethodFault  `xml:\"fault\"`\n}\n\nfunc init() {\n\tt[\"ClusterDrsFaultsFaultsByVm\"] = reflect.TypeOf((*ClusterDrsFaultsFaultsByVm)(nil)).Elem()\n}\n\ntype ClusterDrsMigration struct {\n\tDynamicData\n\n\tKey                   string                 `xml:\"key\"`\n\tTime                  time.Time              `xml:\"time\"`\n\tVm                    ManagedObjectReference `xml:\"vm\"`\n\tCpuLoad               int32                  `xml:\"cpuLoad,omitempty\"`\n\tMemoryLoad            int64                  `xml:\"memoryLoad,omitempty\"`\n\tSource                ManagedObjectReference `xml:\"source\"`\n\tSourceCpuLoad         int32                  `xml:\"sourceCpuLoad,omitempty\"`\n\tSourceMemoryLoad      int64                  `xml:\"sourceMemoryLoad,omitempty\"`\n\tDestination           ManagedObjectReference `xml:\"destination\"`\n\tDestinationCpuLoad    int32                  `xml:\"destinationCpuLoad,omitempty\"`\n\tDestinationMemoryLoad int64                  `xml:\"destinationMemoryLoad,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterDrsMigration\"] = reflect.TypeOf((*ClusterDrsMigration)(nil)).Elem()\n}\n\ntype ClusterDrsRecommendation struct {\n\tDynamicData\n\n\tKey           string                `xml:\"key\"`\n\tRating        int32                 `xml:\"rating\"`\n\tReason        string                `xml:\"reason\"`\n\tReasonText    string                `xml:\"reasonText\"`\n\tMigrationList []ClusterDrsMigration `xml:\"migrationList\"`\n}\n\nfunc init() {\n\tt[\"ClusterDrsRecommendation\"] = reflect.TypeOf((*ClusterDrsRecommendation)(nil)).Elem()\n}\n\ntype ClusterDrsVmConfigInfo struct {\n\tDynamicData\n\n\tKey      ManagedObjectReference `xml:\"key\"`\n\tEnabled  *bool                  `xml:\"enabled\"`\n\tBehavior DrsBehavior            `xml:\"behavior,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterDrsVmConfigInfo\"] = reflect.TypeOf((*ClusterDrsVmConfigInfo)(nil)).Elem()\n}\n\ntype ClusterDrsVmConfigSpec struct {\n\tArrayUpdateSpec\n\n\tInfo *ClusterDrsVmConfigInfo `xml:\"info,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterDrsVmConfigSpec\"] = reflect.TypeOf((*ClusterDrsVmConfigSpec)(nil)).Elem()\n}\n\ntype ClusterEVCManagerCheckResult struct {\n\tDynamicData\n\n\tEvcModeKey string                   `xml:\"evcModeKey\"`\n\tError      LocalizedMethodFault     `xml:\"error\"`\n\tHost       []ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterEVCManagerCheckResult\"] = reflect.TypeOf((*ClusterEVCManagerCheckResult)(nil)).Elem()\n}\n\ntype ClusterEVCManagerEVCState struct {\n\tDynamicData\n\n\tSupportedEVCMode      []EVCMode                          `xml:\"supportedEVCMode\"`\n\tCurrentEVCModeKey     string                             `xml:\"currentEVCModeKey,omitempty\"`\n\tGuaranteedCPUFeatures []HostCpuIdInfo                    `xml:\"guaranteedCPUFeatures,omitempty\"`\n\tFeatureCapability     []HostFeatureCapability            `xml:\"featureCapability,omitempty\"`\n\tFeatureMask           []HostFeatureMask                  `xml:\"featureMask,omitempty\"`\n\tFeatureRequirement    []VirtualMachineFeatureRequirement `xml:\"featureRequirement,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterEVCManagerEVCState\"] = reflect.TypeOf((*ClusterEVCManagerEVCState)(nil)).Elem()\n}\n\ntype ClusterEnterMaintenanceMode ClusterEnterMaintenanceModeRequestType\n\nfunc init() {\n\tt[\"ClusterEnterMaintenanceMode\"] = reflect.TypeOf((*ClusterEnterMaintenanceMode)(nil)).Elem()\n}\n\ntype ClusterEnterMaintenanceModeRequestType struct {\n\tThis   ManagedObjectReference   `xml:\"_this\"`\n\tHost   []ManagedObjectReference `xml:\"host\"`\n\tOption []BaseOptionValue        `xml:\"option,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ClusterEnterMaintenanceModeRequestType\"] = reflect.TypeOf((*ClusterEnterMaintenanceModeRequestType)(nil)).Elem()\n}\n\ntype ClusterEnterMaintenanceModeResponse struct {\n\tReturnval ClusterEnterMaintenanceResult `xml:\"returnval\"`\n}\n\ntype ClusterEnterMaintenanceResult struct {\n\tDynamicData\n\n\tRecommendations []ClusterRecommendation `xml:\"recommendations,omitempty\"`\n\tFault           *ClusterDrsFaults       `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterEnterMaintenanceResult\"] = reflect.TypeOf((*ClusterEnterMaintenanceResult)(nil)).Elem()\n}\n\ntype ClusterEvent struct {\n\tEvent\n}\n\nfunc init() {\n\tt[\"ClusterEvent\"] = reflect.TypeOf((*ClusterEvent)(nil)).Elem()\n}\n\ntype ClusterFailoverHostAdmissionControlInfo struct {\n\tClusterDasAdmissionControlInfo\n\n\tHostStatus []ClusterFailoverHostAdmissionControlInfoHostStatus `xml:\"hostStatus,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterFailoverHostAdmissionControlInfo\"] = reflect.TypeOf((*ClusterFailoverHostAdmissionControlInfo)(nil)).Elem()\n}\n\ntype ClusterFailoverHostAdmissionControlInfoHostStatus struct {\n\tDynamicData\n\n\tHost   ManagedObjectReference `xml:\"host\"`\n\tStatus ManagedEntityStatus    `xml:\"status\"`\n}\n\nfunc init() {\n\tt[\"ClusterFailoverHostAdmissionControlInfoHostStatus\"] = reflect.TypeOf((*ClusterFailoverHostAdmissionControlInfoHostStatus)(nil)).Elem()\n}\n\ntype ClusterFailoverHostAdmissionControlPolicy struct {\n\tClusterDasAdmissionControlPolicy\n\n\tFailoverHosts []ManagedObjectReference `xml:\"failoverHosts,omitempty\"`\n\tFailoverLevel int32                    `xml:\"failoverLevel,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterFailoverHostAdmissionControlPolicy\"] = reflect.TypeOf((*ClusterFailoverHostAdmissionControlPolicy)(nil)).Elem()\n}\n\ntype ClusterFailoverLevelAdmissionControlInfo struct {\n\tClusterDasAdmissionControlInfo\n\n\tCurrentFailoverLevel int32 `xml:\"currentFailoverLevel\"`\n}\n\nfunc init() {\n\tt[\"ClusterFailoverLevelAdmissionControlInfo\"] = reflect.TypeOf((*ClusterFailoverLevelAdmissionControlInfo)(nil)).Elem()\n}\n\ntype ClusterFailoverLevelAdmissionControlPolicy struct {\n\tClusterDasAdmissionControlPolicy\n\n\tFailoverLevel int32                 `xml:\"failoverLevel\"`\n\tSlotPolicy    BaseClusterSlotPolicy `xml:\"slotPolicy,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ClusterFailoverLevelAdmissionControlPolicy\"] = reflect.TypeOf((*ClusterFailoverLevelAdmissionControlPolicy)(nil)).Elem()\n}\n\ntype ClusterFailoverResourcesAdmissionControlInfo struct {\n\tClusterDasAdmissionControlInfo\n\n\tCurrentCpuFailoverResourcesPercent    int32 `xml:\"currentCpuFailoverResourcesPercent\"`\n\tCurrentMemoryFailoverResourcesPercent int32 `xml:\"currentMemoryFailoverResourcesPercent\"`\n}\n\nfunc init() {\n\tt[\"ClusterFailoverResourcesAdmissionControlInfo\"] = reflect.TypeOf((*ClusterFailoverResourcesAdmissionControlInfo)(nil)).Elem()\n}\n\ntype ClusterFailoverResourcesAdmissionControlPolicy struct {\n\tClusterDasAdmissionControlPolicy\n\n\tCpuFailoverResourcesPercent    int32 `xml:\"cpuFailoverResourcesPercent\"`\n\tMemoryFailoverResourcesPercent int32 `xml:\"memoryFailoverResourcesPercent\"`\n\tFailoverLevel                  int32 `xml:\"failoverLevel,omitempty\"`\n\tAutoComputePercentages         *bool `xml:\"autoComputePercentages\"`\n}\n\nfunc init() {\n\tt[\"ClusterFailoverResourcesAdmissionControlPolicy\"] = reflect.TypeOf((*ClusterFailoverResourcesAdmissionControlPolicy)(nil)).Elem()\n}\n\ntype ClusterFixedSizeSlotPolicy struct {\n\tClusterSlotPolicy\n\n\tCpu    int32 `xml:\"cpu\"`\n\tMemory int32 `xml:\"memory\"`\n}\n\nfunc init() {\n\tt[\"ClusterFixedSizeSlotPolicy\"] = reflect.TypeOf((*ClusterFixedSizeSlotPolicy)(nil)).Elem()\n}\n\ntype ClusterGroupInfo struct {\n\tDynamicData\n\n\tName        string `xml:\"name\"`\n\tUserCreated *bool  `xml:\"userCreated\"`\n\tUniqueID    string `xml:\"uniqueID,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterGroupInfo\"] = reflect.TypeOf((*ClusterGroupInfo)(nil)).Elem()\n}\n\ntype ClusterGroupSpec struct {\n\tArrayUpdateSpec\n\n\tInfo BaseClusterGroupInfo `xml:\"info,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ClusterGroupSpec\"] = reflect.TypeOf((*ClusterGroupSpec)(nil)).Elem()\n}\n\ntype ClusterHostGroup struct {\n\tClusterGroupInfo\n\n\tHost []ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterHostGroup\"] = reflect.TypeOf((*ClusterHostGroup)(nil)).Elem()\n}\n\ntype ClusterHostInfraUpdateHaModeAction struct {\n\tClusterAction\n\n\tOperationType string `xml:\"operationType\"`\n}\n\nfunc init() {\n\tt[\"ClusterHostInfraUpdateHaModeAction\"] = reflect.TypeOf((*ClusterHostInfraUpdateHaModeAction)(nil)).Elem()\n}\n\ntype ClusterHostPowerAction struct {\n\tClusterAction\n\n\tOperationType        HostPowerOperationType `xml:\"operationType\"`\n\tPowerConsumptionWatt int32                  `xml:\"powerConsumptionWatt,omitempty\"`\n\tCpuCapacityMHz       int32                  `xml:\"cpuCapacityMHz,omitempty\"`\n\tMemCapacityMB        int32                  `xml:\"memCapacityMB,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterHostPowerAction\"] = reflect.TypeOf((*ClusterHostPowerAction)(nil)).Elem()\n}\n\ntype ClusterHostRecommendation struct {\n\tDynamicData\n\n\tHost   ManagedObjectReference `xml:\"host\"`\n\tRating int32                  `xml:\"rating\"`\n}\n\nfunc init() {\n\tt[\"ClusterHostRecommendation\"] = reflect.TypeOf((*ClusterHostRecommendation)(nil)).Elem()\n}\n\ntype ClusterInfraUpdateHaConfigInfo struct {\n\tDynamicData\n\n\tEnabled             *bool    `xml:\"enabled\"`\n\tBehavior            string   `xml:\"behavior,omitempty\"`\n\tModerateRemediation string   `xml:\"moderateRemediation,omitempty\"`\n\tSevereRemediation   string   `xml:\"severeRemediation,omitempty\"`\n\tProviders           []string `xml:\"providers,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterInfraUpdateHaConfigInfo\"] = reflect.TypeOf((*ClusterInfraUpdateHaConfigInfo)(nil)).Elem()\n}\n\ntype ClusterInitialPlacementAction struct {\n\tClusterAction\n\n\tTargetHost ManagedObjectReference  `xml:\"targetHost\"`\n\tPool       *ManagedObjectReference `xml:\"pool,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterInitialPlacementAction\"] = reflect.TypeOf((*ClusterInitialPlacementAction)(nil)).Elem()\n}\n\ntype ClusterIoFilterInfo struct {\n\tIoFilterInfo\n\n\tOpType string `xml:\"opType\"`\n\tVibUrl string `xml:\"vibUrl,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterIoFilterInfo\"] = reflect.TypeOf((*ClusterIoFilterInfo)(nil)).Elem()\n}\n\ntype ClusterMigrationAction struct {\n\tClusterAction\n\n\tDrsMigration *ClusterDrsMigration `xml:\"drsMigration,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterMigrationAction\"] = reflect.TypeOf((*ClusterMigrationAction)(nil)).Elem()\n}\n\ntype ClusterNetworkConfigSpec struct {\n\tDynamicData\n\n\tNetworkPortGroup ManagedObjectReference  `xml:\"networkPortGroup\"`\n\tIpSettings       CustomizationIPSettings `xml:\"ipSettings\"`\n}\n\nfunc init() {\n\tt[\"ClusterNetworkConfigSpec\"] = reflect.TypeOf((*ClusterNetworkConfigSpec)(nil)).Elem()\n}\n\ntype ClusterNotAttemptedVmInfo struct {\n\tDynamicData\n\n\tVm    ManagedObjectReference `xml:\"vm\"`\n\tFault LocalizedMethodFault   `xml:\"fault\"`\n}\n\nfunc init() {\n\tt[\"ClusterNotAttemptedVmInfo\"] = reflect.TypeOf((*ClusterNotAttemptedVmInfo)(nil)).Elem()\n}\n\ntype ClusterOrchestrationInfo struct {\n\tDynamicData\n\n\tDefaultVmReadiness *ClusterVmReadiness `xml:\"defaultVmReadiness,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterOrchestrationInfo\"] = reflect.TypeOf((*ClusterOrchestrationInfo)(nil)).Elem()\n}\n\ntype ClusterOvercommittedEvent struct {\n\tClusterEvent\n}\n\nfunc init() {\n\tt[\"ClusterOvercommittedEvent\"] = reflect.TypeOf((*ClusterOvercommittedEvent)(nil)).Elem()\n}\n\ntype ClusterPowerOnVmResult struct {\n\tDynamicData\n\n\tAttempted       []ClusterAttemptedVmInfo    `xml:\"attempted,omitempty\"`\n\tNotAttempted    []ClusterNotAttemptedVmInfo `xml:\"notAttempted,omitempty\"`\n\tRecommendations []ClusterRecommendation     `xml:\"recommendations,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterPowerOnVmResult\"] = reflect.TypeOf((*ClusterPowerOnVmResult)(nil)).Elem()\n}\n\ntype ClusterProactiveDrsConfigInfo struct {\n\tDynamicData\n\n\tEnabled *bool `xml:\"enabled\"`\n}\n\nfunc init() {\n\tt[\"ClusterProactiveDrsConfigInfo\"] = reflect.TypeOf((*ClusterProactiveDrsConfigInfo)(nil)).Elem()\n}\n\ntype ClusterProfileCompleteConfigSpec struct {\n\tClusterProfileConfigSpec\n\n\tComplyProfile *ComplianceProfile `xml:\"complyProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterProfileCompleteConfigSpec\"] = reflect.TypeOf((*ClusterProfileCompleteConfigSpec)(nil)).Elem()\n}\n\ntype ClusterProfileConfigInfo struct {\n\tProfileConfigInfo\n\n\tComplyProfile *ComplianceProfile `xml:\"complyProfile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterProfileConfigInfo\"] = reflect.TypeOf((*ClusterProfileConfigInfo)(nil)).Elem()\n}\n\ntype ClusterProfileConfigServiceCreateSpec struct {\n\tClusterProfileConfigSpec\n\n\tServiceType []string `xml:\"serviceType,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterProfileConfigServiceCreateSpec\"] = reflect.TypeOf((*ClusterProfileConfigServiceCreateSpec)(nil)).Elem()\n}\n\ntype ClusterProfileConfigSpec struct {\n\tClusterProfileCreateSpec\n}\n\nfunc init() {\n\tt[\"ClusterProfileConfigSpec\"] = reflect.TypeOf((*ClusterProfileConfigSpec)(nil)).Elem()\n}\n\ntype ClusterProfileCreateSpec struct {\n\tProfileCreateSpec\n}\n\nfunc init() {\n\tt[\"ClusterProfileCreateSpec\"] = reflect.TypeOf((*ClusterProfileCreateSpec)(nil)).Elem()\n}\n\ntype ClusterRecommendation struct {\n\tDynamicData\n\n\tKey            string                  `xml:\"key\"`\n\tType           string                  `xml:\"type\"`\n\tTime           time.Time               `xml:\"time\"`\n\tRating         int32                   `xml:\"rating\"`\n\tReason         string                  `xml:\"reason\"`\n\tReasonText     string                  `xml:\"reasonText\"`\n\tWarningText    string                  `xml:\"warningText,omitempty\"`\n\tWarningDetails *LocalizableMessage     `xml:\"warningDetails,omitempty\"`\n\tPrerequisite   []string                `xml:\"prerequisite,omitempty\"`\n\tAction         []BaseClusterAction     `xml:\"action,omitempty,typeattr\"`\n\tTarget         *ManagedObjectReference `xml:\"target,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterRecommendation\"] = reflect.TypeOf((*ClusterRecommendation)(nil)).Elem()\n}\n\ntype ClusterReconfiguredEvent struct {\n\tClusterEvent\n\n\tConfigChanges *ChangesInfoEventArgument `xml:\"configChanges,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterReconfiguredEvent\"] = reflect.TypeOf((*ClusterReconfiguredEvent)(nil)).Elem()\n}\n\ntype ClusterResourceUsageSummary struct {\n\tDynamicData\n\n\tCpuUsedMHz        int32 `xml:\"cpuUsedMHz\"`\n\tCpuCapacityMHz    int32 `xml:\"cpuCapacityMHz\"`\n\tMemUsedMB         int32 `xml:\"memUsedMB\"`\n\tMemCapacityMB     int32 `xml:\"memCapacityMB\"`\n\tStorageUsedMB     int64 `xml:\"storageUsedMB\"`\n\tStorageCapacityMB int64 `xml:\"storageCapacityMB\"`\n}\n\nfunc init() {\n\tt[\"ClusterResourceUsageSummary\"] = reflect.TypeOf((*ClusterResourceUsageSummary)(nil)).Elem()\n}\n\ntype ClusterRuleInfo struct {\n\tDynamicData\n\n\tKey          int32               `xml:\"key,omitempty\"`\n\tStatus       ManagedEntityStatus `xml:\"status,omitempty\"`\n\tEnabled      *bool               `xml:\"enabled\"`\n\tName         string              `xml:\"name,omitempty\"`\n\tMandatory    *bool               `xml:\"mandatory\"`\n\tUserCreated  *bool               `xml:\"userCreated\"`\n\tInCompliance *bool               `xml:\"inCompliance\"`\n\tRuleUuid     string              `xml:\"ruleUuid,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterRuleInfo\"] = reflect.TypeOf((*ClusterRuleInfo)(nil)).Elem()\n}\n\ntype ClusterRuleSpec struct {\n\tArrayUpdateSpec\n\n\tInfo BaseClusterRuleInfo `xml:\"info,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ClusterRuleSpec\"] = reflect.TypeOf((*ClusterRuleSpec)(nil)).Elem()\n}\n\ntype ClusterSlotPolicy struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"ClusterSlotPolicy\"] = reflect.TypeOf((*ClusterSlotPolicy)(nil)).Elem()\n}\n\ntype ClusterStatusChangedEvent struct {\n\tClusterEvent\n\n\tOldStatus string `xml:\"oldStatus\"`\n\tNewStatus string `xml:\"newStatus\"`\n}\n\nfunc init() {\n\tt[\"ClusterStatusChangedEvent\"] = reflect.TypeOf((*ClusterStatusChangedEvent)(nil)).Elem()\n}\n\ntype ClusterUsageSummary struct {\n\tDynamicData\n\n\tTotalCpuCapacityMhz         int32 `xml:\"totalCpuCapacityMhz\"`\n\tTotalMemCapacityMB          int32 `xml:\"totalMemCapacityMB\"`\n\tCpuReservationMhz           int32 `xml:\"cpuReservationMhz\"`\n\tMemReservationMB            int32 `xml:\"memReservationMB\"`\n\tPoweredOffCpuReservationMhz int32 `xml:\"poweredOffCpuReservationMhz,omitempty\"`\n\tPoweredOffMemReservationMB  int32 `xml:\"poweredOffMemReservationMB,omitempty\"`\n\tCpuDemandMhz                int32 `xml:\"cpuDemandMhz\"`\n\tMemDemandMB                 int32 `xml:\"memDemandMB\"`\n\tStatsGenNumber              int64 `xml:\"statsGenNumber\"`\n\tCpuEntitledMhz              int32 `xml:\"cpuEntitledMhz\"`\n\tMemEntitledMB               int32 `xml:\"memEntitledMB\"`\n\tPoweredOffVmCount           int32 `xml:\"poweredOffVmCount\"`\n\tTotalVmCount                int32 `xml:\"totalVmCount\"`\n}\n\nfunc init() {\n\tt[\"ClusterUsageSummary\"] = reflect.TypeOf((*ClusterUsageSummary)(nil)).Elem()\n}\n\ntype ClusterVmComponentProtectionSettings struct {\n\tDynamicData\n\n\tVmStorageProtectionForAPD string `xml:\"vmStorageProtectionForAPD,omitempty\"`\n\tEnableAPDTimeoutForHosts  *bool  `xml:\"enableAPDTimeoutForHosts\"`\n\tVmTerminateDelayForAPDSec int32  `xml:\"vmTerminateDelayForAPDSec,omitempty\"`\n\tVmReactionOnAPDCleared    string `xml:\"vmReactionOnAPDCleared,omitempty\"`\n\tVmStorageProtectionForPDL string `xml:\"vmStorageProtectionForPDL,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterVmComponentProtectionSettings\"] = reflect.TypeOf((*ClusterVmComponentProtectionSettings)(nil)).Elem()\n}\n\ntype ClusterVmGroup struct {\n\tClusterGroupInfo\n\n\tVm []ManagedObjectReference `xml:\"vm,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterVmGroup\"] = reflect.TypeOf((*ClusterVmGroup)(nil)).Elem()\n}\n\ntype ClusterVmHostRuleInfo struct {\n\tClusterRuleInfo\n\n\tVmGroupName             string `xml:\"vmGroupName,omitempty\"`\n\tAffineHostGroupName     string `xml:\"affineHostGroupName,omitempty\"`\n\tAntiAffineHostGroupName string `xml:\"antiAffineHostGroupName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterVmHostRuleInfo\"] = reflect.TypeOf((*ClusterVmHostRuleInfo)(nil)).Elem()\n}\n\ntype ClusterVmOrchestrationInfo struct {\n\tDynamicData\n\n\tVm          ManagedObjectReference `xml:\"vm\"`\n\tVmReadiness ClusterVmReadiness     `xml:\"vmReadiness\"`\n}\n\nfunc init() {\n\tt[\"ClusterVmOrchestrationInfo\"] = reflect.TypeOf((*ClusterVmOrchestrationInfo)(nil)).Elem()\n}\n\ntype ClusterVmOrchestrationSpec struct {\n\tArrayUpdateSpec\n\n\tInfo *ClusterVmOrchestrationInfo `xml:\"info,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterVmOrchestrationSpec\"] = reflect.TypeOf((*ClusterVmOrchestrationSpec)(nil)).Elem()\n}\n\ntype ClusterVmReadiness struct {\n\tDynamicData\n\n\tReadyCondition string `xml:\"readyCondition,omitempty\"`\n\tPostReadyDelay int32  `xml:\"postReadyDelay,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterVmReadiness\"] = reflect.TypeOf((*ClusterVmReadiness)(nil)).Elem()\n}\n\ntype ClusterVmToolsMonitoringSettings struct {\n\tDynamicData\n\n\tEnabled          *bool  `xml:\"enabled\"`\n\tVmMonitoring     string `xml:\"vmMonitoring,omitempty\"`\n\tClusterSettings  *bool  `xml:\"clusterSettings\"`\n\tFailureInterval  int32  `xml:\"failureInterval,omitempty\"`\n\tMinUpTime        int32  `xml:\"minUpTime,omitempty\"`\n\tMaxFailures      int32  `xml:\"maxFailures,omitempty\"`\n\tMaxFailureWindow int32  `xml:\"maxFailureWindow,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ClusterVmToolsMonitoringSettings\"] = reflect.TypeOf((*ClusterVmToolsMonitoringSettings)(nil)).Elem()\n}\n\ntype CollectorAddressUnset struct {\n\tDvsFault\n}\n\nfunc init() {\n\tt[\"CollectorAddressUnset\"] = reflect.TypeOf((*CollectorAddressUnset)(nil)).Elem()\n}\n\ntype CollectorAddressUnsetFault CollectorAddressUnset\n\nfunc init() {\n\tt[\"CollectorAddressUnsetFault\"] = reflect.TypeOf((*CollectorAddressUnsetFault)(nil)).Elem()\n}\n\ntype ComplianceFailure struct {\n\tDynamicData\n\n\tFailureType    string                                     `xml:\"failureType\"`\n\tMessage        LocalizableMessage                         `xml:\"message\"`\n\tExpressionName string                                     `xml:\"expressionName,omitempty\"`\n\tFailureValues  []ComplianceFailureComplianceFailureValues `xml:\"failureValues,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ComplianceFailure\"] = reflect.TypeOf((*ComplianceFailure)(nil)).Elem()\n}\n\ntype ComplianceFailureComplianceFailureValues struct {\n\tDynamicData\n\n\tComparisonIdentifier string  `xml:\"comparisonIdentifier\"`\n\tProfileInstance      string  `xml:\"profileInstance,omitempty\"`\n\tHostValue            AnyType `xml:\"hostValue,omitempty,typeattr\"`\n\tProfileValue         AnyType `xml:\"profileValue,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ComplianceFailureComplianceFailureValues\"] = reflect.TypeOf((*ComplianceFailureComplianceFailureValues)(nil)).Elem()\n}\n\ntype ComplianceLocator struct {\n\tDynamicData\n\n\tExpressionName string              `xml:\"expressionName\"`\n\tApplyPath      ProfilePropertyPath `xml:\"applyPath\"`\n}\n\nfunc init() {\n\tt[\"ComplianceLocator\"] = reflect.TypeOf((*ComplianceLocator)(nil)).Elem()\n}\n\ntype ComplianceProfile struct {\n\tDynamicData\n\n\tExpression     []BaseProfileExpression `xml:\"expression,typeattr\"`\n\tRootExpression string                  `xml:\"rootExpression\"`\n}\n\nfunc init() {\n\tt[\"ComplianceProfile\"] = reflect.TypeOf((*ComplianceProfile)(nil)).Elem()\n}\n\ntype ComplianceResult struct {\n\tDynamicData\n\n\tProfile          *ManagedObjectReference `xml:\"profile,omitempty\"`\n\tComplianceStatus string                  `xml:\"complianceStatus\"`\n\tEntity           *ManagedObjectReference `xml:\"entity,omitempty\"`\n\tCheckTime        *time.Time              `xml:\"checkTime\"`\n\tFailure          []ComplianceFailure     `xml:\"failure,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ComplianceResult\"] = reflect.TypeOf((*ComplianceResult)(nil)).Elem()\n}\n\ntype CompositePolicyOption struct {\n\tPolicyOption\n\n\tOption []BasePolicyOption `xml:\"option,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"CompositePolicyOption\"] = reflect.TypeOf((*CompositePolicyOption)(nil)).Elem()\n}\n\ntype ComputeDiskPartitionInfo ComputeDiskPartitionInfoRequestType\n\nfunc init() {\n\tt[\"ComputeDiskPartitionInfo\"] = reflect.TypeOf((*ComputeDiskPartitionInfo)(nil)).Elem()\n}\n\ntype ComputeDiskPartitionInfoForResize ComputeDiskPartitionInfoForResizeRequestType\n\nfunc init() {\n\tt[\"ComputeDiskPartitionInfoForResize\"] = reflect.TypeOf((*ComputeDiskPartitionInfoForResize)(nil)).Elem()\n}\n\ntype ComputeDiskPartitionInfoForResizeRequestType struct {\n\tThis            ManagedObjectReference      `xml:\"_this\"`\n\tPartition       HostScsiDiskPartition       `xml:\"partition\"`\n\tBlockRange      HostDiskPartitionBlockRange `xml:\"blockRange\"`\n\tPartitionFormat string                      `xml:\"partitionFormat,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ComputeDiskPartitionInfoForResizeRequestType\"] = reflect.TypeOf((*ComputeDiskPartitionInfoForResizeRequestType)(nil)).Elem()\n}\n\ntype ComputeDiskPartitionInfoForResizeResponse struct {\n\tReturnval HostDiskPartitionInfo `xml:\"returnval\"`\n}\n\ntype ComputeDiskPartitionInfoRequestType struct {\n\tThis            ManagedObjectReference  `xml:\"_this\"`\n\tDevicePath      string                  `xml:\"devicePath\"`\n\tLayout          HostDiskPartitionLayout `xml:\"layout\"`\n\tPartitionFormat string                  `xml:\"partitionFormat,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ComputeDiskPartitionInfoRequestType\"] = reflect.TypeOf((*ComputeDiskPartitionInfoRequestType)(nil)).Elem()\n}\n\ntype ComputeDiskPartitionInfoResponse struct {\n\tReturnval HostDiskPartitionInfo `xml:\"returnval\"`\n}\n\ntype ComputeResourceConfigInfo struct {\n\tDynamicData\n\n\tVmSwapPlacement           string `xml:\"vmSwapPlacement\"`\n\tSpbmEnabled               *bool  `xml:\"spbmEnabled\"`\n\tDefaultHardwareVersionKey string `xml:\"defaultHardwareVersionKey,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ComputeResourceConfigInfo\"] = reflect.TypeOf((*ComputeResourceConfigInfo)(nil)).Elem()\n}\n\ntype ComputeResourceConfigSpec struct {\n\tDynamicData\n\n\tVmSwapPlacement           string `xml:\"vmSwapPlacement,omitempty\"`\n\tSpbmEnabled               *bool  `xml:\"spbmEnabled\"`\n\tDefaultHardwareVersionKey string `xml:\"defaultHardwareVersionKey,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ComputeResourceConfigSpec\"] = reflect.TypeOf((*ComputeResourceConfigSpec)(nil)).Elem()\n}\n\ntype ComputeResourceEventArgument struct {\n\tEntityEventArgument\n\n\tComputeResource ManagedObjectReference `xml:\"computeResource\"`\n}\n\nfunc init() {\n\tt[\"ComputeResourceEventArgument\"] = reflect.TypeOf((*ComputeResourceEventArgument)(nil)).Elem()\n}\n\ntype ComputeResourceHostSPBMLicenseInfo struct {\n\tDynamicData\n\n\tHost         ManagedObjectReference                                 `xml:\"host\"`\n\tLicenseState ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState `xml:\"licenseState\"`\n}\n\nfunc init() {\n\tt[\"ComputeResourceHostSPBMLicenseInfo\"] = reflect.TypeOf((*ComputeResourceHostSPBMLicenseInfo)(nil)).Elem()\n}\n\ntype ComputeResourceSummary struct {\n\tDynamicData\n\n\tTotalCpu          int32               `xml:\"totalCpu\"`\n\tTotalMemory       int64               `xml:\"totalMemory\"`\n\tNumCpuCores       int16               `xml:\"numCpuCores\"`\n\tNumCpuThreads     int16               `xml:\"numCpuThreads\"`\n\tEffectiveCpu      int32               `xml:\"effectiveCpu\"`\n\tEffectiveMemory   int64               `xml:\"effectiveMemory\"`\n\tNumHosts          int32               `xml:\"numHosts\"`\n\tNumEffectiveHosts int32               `xml:\"numEffectiveHosts\"`\n\tOverallStatus     ManagedEntityStatus `xml:\"overallStatus\"`\n}\n\nfunc init() {\n\tt[\"ComputeResourceSummary\"] = reflect.TypeOf((*ComputeResourceSummary)(nil)).Elem()\n}\n\ntype ConcurrentAccess struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"ConcurrentAccess\"] = reflect.TypeOf((*ConcurrentAccess)(nil)).Elem()\n}\n\ntype ConcurrentAccessFault ConcurrentAccess\n\nfunc init() {\n\tt[\"ConcurrentAccessFault\"] = reflect.TypeOf((*ConcurrentAccessFault)(nil)).Elem()\n}\n\ntype ConfigTarget struct {\n\tDynamicData\n\n\tNumCpus                     int32                                       `xml:\"numCpus\"`\n\tNumCpuCores                 int32                                       `xml:\"numCpuCores\"`\n\tNumNumaNodes                int32                                       `xml:\"numNumaNodes\"`\n\tSmcPresent                  *bool                                       `xml:\"smcPresent\"`\n\tDatastore                   []VirtualMachineDatastoreInfo               `xml:\"datastore,omitempty\"`\n\tNetwork                     []VirtualMachineNetworkInfo                 `xml:\"network,omitempty\"`\n\tOpaqueNetwork               []OpaqueNetworkTargetInfo                   `xml:\"opaqueNetwork,omitempty\"`\n\tDistributedVirtualPortgroup []DistributedVirtualPortgroupInfo           `xml:\"distributedVirtualPortgroup,omitempty\"`\n\tDistributedVirtualSwitch    []DistributedVirtualSwitchInfo              `xml:\"distributedVirtualSwitch,omitempty\"`\n\tCdRom                       []VirtualMachineCdromInfo                   `xml:\"cdRom,omitempty\"`\n\tSerial                      []VirtualMachineSerialInfo                  `xml:\"serial,omitempty\"`\n\tParallel                    []VirtualMachineParallelInfo                `xml:\"parallel,omitempty\"`\n\tSound                       []VirtualMachineSoundInfo                   `xml:\"sound,omitempty\"`\n\tUsb                         []VirtualMachineUsbInfo                     `xml:\"usb,omitempty\"`\n\tFloppy                      []VirtualMachineFloppyInfo                  `xml:\"floppy,omitempty\"`\n\tLegacyNetworkInfo           []VirtualMachineLegacyNetworkSwitchInfo     `xml:\"legacyNetworkInfo,omitempty\"`\n\tScsiPassthrough             []VirtualMachineScsiPassthroughInfo         `xml:\"scsiPassthrough,omitempty\"`\n\tScsiDisk                    []VirtualMachineScsiDiskDeviceInfo          `xml:\"scsiDisk,omitempty\"`\n\tIdeDisk                     []VirtualMachineIdeDiskDeviceInfo           `xml:\"ideDisk,omitempty\"`\n\tMaxMemMBOptimalPerf         int32                                       `xml:\"maxMemMBOptimalPerf\"`\n\tResourcePool                *ResourcePoolRuntimeInfo                    `xml:\"resourcePool,omitempty\"`\n\tAutoVmotion                 *bool                                       `xml:\"autoVmotion\"`\n\tPciPassthrough              []BaseVirtualMachinePciPassthroughInfo      `xml:\"pciPassthrough,omitempty,typeattr\"`\n\tSriov                       []VirtualMachineSriovInfo                   `xml:\"sriov,omitempty\"`\n\tVFlashModule                []VirtualMachineVFlashModuleInfo            `xml:\"vFlashModule,omitempty\"`\n\tSharedGpuPassthroughTypes   []VirtualMachinePciSharedGpuPassthroughInfo `xml:\"sharedGpuPassthroughTypes,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ConfigTarget\"] = reflect.TypeOf((*ConfigTarget)(nil)).Elem()\n}\n\ntype ConfigureCryptoKey ConfigureCryptoKeyRequestType\n\nfunc init() {\n\tt[\"ConfigureCryptoKey\"] = reflect.TypeOf((*ConfigureCryptoKey)(nil)).Elem()\n}\n\ntype ConfigureCryptoKeyRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tKeyId *CryptoKeyId           `xml:\"keyId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ConfigureCryptoKeyRequestType\"] = reflect.TypeOf((*ConfigureCryptoKeyRequestType)(nil)).Elem()\n}\n\ntype ConfigureCryptoKeyResponse struct {\n}\n\ntype ConfigureDatastoreIORMRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n\tSpec      StorageIORMConfigSpec  `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"ConfigureDatastoreIORMRequestType\"] = reflect.TypeOf((*ConfigureDatastoreIORMRequestType)(nil)).Elem()\n}\n\ntype ConfigureDatastoreIORM_Task ConfigureDatastoreIORMRequestType\n\nfunc init() {\n\tt[\"ConfigureDatastoreIORM_Task\"] = reflect.TypeOf((*ConfigureDatastoreIORM_Task)(nil)).Elem()\n}\n\ntype ConfigureDatastoreIORM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ConfigureDatastorePrincipal ConfigureDatastorePrincipalRequestType\n\nfunc init() {\n\tt[\"ConfigureDatastorePrincipal\"] = reflect.TypeOf((*ConfigureDatastorePrincipal)(nil)).Elem()\n}\n\ntype ConfigureDatastorePrincipalRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tUserName string                 `xml:\"userName\"`\n\tPassword string                 `xml:\"password,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ConfigureDatastorePrincipalRequestType\"] = reflect.TypeOf((*ConfigureDatastorePrincipalRequestType)(nil)).Elem()\n}\n\ntype ConfigureDatastorePrincipalResponse struct {\n}\n\ntype ConfigureEvcModeRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tEvcModeKey string                 `xml:\"evcModeKey\"`\n}\n\nfunc init() {\n\tt[\"ConfigureEvcModeRequestType\"] = reflect.TypeOf((*ConfigureEvcModeRequestType)(nil)).Elem()\n}\n\ntype ConfigureEvcMode_Task ConfigureEvcModeRequestType\n\nfunc init() {\n\tt[\"ConfigureEvcMode_Task\"] = reflect.TypeOf((*ConfigureEvcMode_Task)(nil)).Elem()\n}\n\ntype ConfigureEvcMode_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ConfigureHostCacheRequestType struct {\n\tThis ManagedObjectReference     `xml:\"_this\"`\n\tSpec HostCacheConfigurationSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"ConfigureHostCacheRequestType\"] = reflect.TypeOf((*ConfigureHostCacheRequestType)(nil)).Elem()\n}\n\ntype ConfigureHostCache_Task ConfigureHostCacheRequestType\n\nfunc init() {\n\tt[\"ConfigureHostCache_Task\"] = reflect.TypeOf((*ConfigureHostCache_Task)(nil)).Elem()\n}\n\ntype ConfigureHostCache_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ConfigureLicenseSource ConfigureLicenseSourceRequestType\n\nfunc init() {\n\tt[\"ConfigureLicenseSource\"] = reflect.TypeOf((*ConfigureLicenseSource)(nil)).Elem()\n}\n\ntype ConfigureLicenseSourceRequestType struct {\n\tThis          ManagedObjectReference  `xml:\"_this\"`\n\tHost          *ManagedObjectReference `xml:\"host,omitempty\"`\n\tLicenseSource BaseLicenseSource       `xml:\"licenseSource,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ConfigureLicenseSourceRequestType\"] = reflect.TypeOf((*ConfigureLicenseSourceRequestType)(nil)).Elem()\n}\n\ntype ConfigureLicenseSourceResponse struct {\n}\n\ntype ConfigurePowerPolicy ConfigurePowerPolicyRequestType\n\nfunc init() {\n\tt[\"ConfigurePowerPolicy\"] = reflect.TypeOf((*ConfigurePowerPolicy)(nil)).Elem()\n}\n\ntype ConfigurePowerPolicyRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tKey  int32                  `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"ConfigurePowerPolicyRequestType\"] = reflect.TypeOf((*ConfigurePowerPolicyRequestType)(nil)).Elem()\n}\n\ntype ConfigurePowerPolicyResponse struct {\n}\n\ntype ConfigureStorageDrsForPodRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tPod    ManagedObjectReference `xml:\"pod\"`\n\tSpec   StorageDrsConfigSpec   `xml:\"spec\"`\n\tModify bool                   `xml:\"modify\"`\n}\n\nfunc init() {\n\tt[\"ConfigureStorageDrsForPodRequestType\"] = reflect.TypeOf((*ConfigureStorageDrsForPodRequestType)(nil)).Elem()\n}\n\ntype ConfigureStorageDrsForPod_Task ConfigureStorageDrsForPodRequestType\n\nfunc init() {\n\tt[\"ConfigureStorageDrsForPod_Task\"] = reflect.TypeOf((*ConfigureStorageDrsForPod_Task)(nil)).Elem()\n}\n\ntype ConfigureStorageDrsForPod_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ConfigureVFlashResourceExRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tDevicePath []string               `xml:\"devicePath,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ConfigureVFlashResourceExRequestType\"] = reflect.TypeOf((*ConfigureVFlashResourceExRequestType)(nil)).Elem()\n}\n\ntype ConfigureVFlashResourceEx_Task ConfigureVFlashResourceExRequestType\n\nfunc init() {\n\tt[\"ConfigureVFlashResourceEx_Task\"] = reflect.TypeOf((*ConfigureVFlashResourceEx_Task)(nil)).Elem()\n}\n\ntype ConfigureVFlashResourceEx_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ConflictingConfiguration struct {\n\tDvsFault\n\n\tConfigInConflict []ConflictingConfigurationConfig `xml:\"configInConflict\"`\n}\n\nfunc init() {\n\tt[\"ConflictingConfiguration\"] = reflect.TypeOf((*ConflictingConfiguration)(nil)).Elem()\n}\n\ntype ConflictingConfigurationConfig struct {\n\tDynamicData\n\n\tEntity       *ManagedObjectReference `xml:\"entity,omitempty\"`\n\tPropertyPath string                  `xml:\"propertyPath\"`\n}\n\nfunc init() {\n\tt[\"ConflictingConfigurationConfig\"] = reflect.TypeOf((*ConflictingConfigurationConfig)(nil)).Elem()\n}\n\ntype ConflictingConfigurationFault ConflictingConfiguration\n\nfunc init() {\n\tt[\"ConflictingConfigurationFault\"] = reflect.TypeOf((*ConflictingConfigurationFault)(nil)).Elem()\n}\n\ntype ConflictingDatastoreFound struct {\n\tRuntimeFault\n\n\tName string `xml:\"name\"`\n\tUrl  string `xml:\"url\"`\n}\n\nfunc init() {\n\tt[\"ConflictingDatastoreFound\"] = reflect.TypeOf((*ConflictingDatastoreFound)(nil)).Elem()\n}\n\ntype ConflictingDatastoreFoundFault ConflictingDatastoreFound\n\nfunc init() {\n\tt[\"ConflictingDatastoreFoundFault\"] = reflect.TypeOf((*ConflictingDatastoreFoundFault)(nil)).Elem()\n}\n\ntype ConnectedIso struct {\n\tOvfExport\n\n\tCdrom    VirtualCdrom `xml:\"cdrom\"`\n\tFilename string       `xml:\"filename\"`\n}\n\nfunc init() {\n\tt[\"ConnectedIso\"] = reflect.TypeOf((*ConnectedIso)(nil)).Elem()\n}\n\ntype ConnectedIsoFault ConnectedIso\n\nfunc init() {\n\tt[\"ConnectedIsoFault\"] = reflect.TypeOf((*ConnectedIsoFault)(nil)).Elem()\n}\n\ntype ConsolidateVMDisksRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ConsolidateVMDisksRequestType\"] = reflect.TypeOf((*ConsolidateVMDisksRequestType)(nil)).Elem()\n}\n\ntype ConsolidateVMDisks_Task ConsolidateVMDisksRequestType\n\nfunc init() {\n\tt[\"ConsolidateVMDisks_Task\"] = reflect.TypeOf((*ConsolidateVMDisks_Task)(nil)).Elem()\n}\n\ntype ConsolidateVMDisks_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ContinueRetrievePropertiesEx ContinueRetrievePropertiesExRequestType\n\nfunc init() {\n\tt[\"ContinueRetrievePropertiesEx\"] = reflect.TypeOf((*ContinueRetrievePropertiesEx)(nil)).Elem()\n}\n\ntype ContinueRetrievePropertiesExRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tToken string                 `xml:\"token\"`\n}\n\nfunc init() {\n\tt[\"ContinueRetrievePropertiesExRequestType\"] = reflect.TypeOf((*ContinueRetrievePropertiesExRequestType)(nil)).Elem()\n}\n\ntype ContinueRetrievePropertiesExResponse struct {\n\tReturnval RetrieveResult `xml:\"returnval\"`\n}\n\ntype ConvertNamespacePathToUuidPath ConvertNamespacePathToUuidPathRequestType\n\nfunc init() {\n\tt[\"ConvertNamespacePathToUuidPath\"] = reflect.TypeOf((*ConvertNamespacePathToUuidPath)(nil)).Elem()\n}\n\ntype ConvertNamespacePathToUuidPathRequestType struct {\n\tThis         ManagedObjectReference  `xml:\"_this\"`\n\tDatacenter   *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n\tNamespaceUrl string                  `xml:\"namespaceUrl\"`\n}\n\nfunc init() {\n\tt[\"ConvertNamespacePathToUuidPathRequestType\"] = reflect.TypeOf((*ConvertNamespacePathToUuidPathRequestType)(nil)).Elem()\n}\n\ntype ConvertNamespacePathToUuidPathResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype CopyDatastoreFileRequestType struct {\n\tThis                  ManagedObjectReference  `xml:\"_this\"`\n\tSourceName            string                  `xml:\"sourceName\"`\n\tSourceDatacenter      *ManagedObjectReference `xml:\"sourceDatacenter,omitempty\"`\n\tDestinationName       string                  `xml:\"destinationName\"`\n\tDestinationDatacenter *ManagedObjectReference `xml:\"destinationDatacenter,omitempty\"`\n\tForce                 *bool                   `xml:\"force\"`\n}\n\nfunc init() {\n\tt[\"CopyDatastoreFileRequestType\"] = reflect.TypeOf((*CopyDatastoreFileRequestType)(nil)).Elem()\n}\n\ntype CopyDatastoreFile_Task CopyDatastoreFileRequestType\n\nfunc init() {\n\tt[\"CopyDatastoreFile_Task\"] = reflect.TypeOf((*CopyDatastoreFile_Task)(nil)).Elem()\n}\n\ntype CopyDatastoreFile_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CopyVirtualDiskRequestType struct {\n\tThis             ManagedObjectReference  `xml:\"_this\"`\n\tSourceName       string                  `xml:\"sourceName\"`\n\tSourceDatacenter *ManagedObjectReference `xml:\"sourceDatacenter,omitempty\"`\n\tDestName         string                  `xml:\"destName\"`\n\tDestDatacenter   *ManagedObjectReference `xml:\"destDatacenter,omitempty\"`\n\tDestSpec         BaseVirtualDiskSpec     `xml:\"destSpec,omitempty,typeattr\"`\n\tForce            *bool                   `xml:\"force\"`\n}\n\nfunc init() {\n\tt[\"CopyVirtualDiskRequestType\"] = reflect.TypeOf((*CopyVirtualDiskRequestType)(nil)).Elem()\n}\n\ntype CopyVirtualDisk_Task CopyVirtualDiskRequestType\n\nfunc init() {\n\tt[\"CopyVirtualDisk_Task\"] = reflect.TypeOf((*CopyVirtualDisk_Task)(nil)).Elem()\n}\n\ntype CopyVirtualDisk_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CpuCompatibilityUnknown struct {\n\tCpuIncompatible\n}\n\nfunc init() {\n\tt[\"CpuCompatibilityUnknown\"] = reflect.TypeOf((*CpuCompatibilityUnknown)(nil)).Elem()\n}\n\ntype CpuCompatibilityUnknownFault CpuCompatibilityUnknown\n\nfunc init() {\n\tt[\"CpuCompatibilityUnknownFault\"] = reflect.TypeOf((*CpuCompatibilityUnknownFault)(nil)).Elem()\n}\n\ntype CpuHotPlugNotSupported struct {\n\tVmConfigFault\n}\n\nfunc init() {\n\tt[\"CpuHotPlugNotSupported\"] = reflect.TypeOf((*CpuHotPlugNotSupported)(nil)).Elem()\n}\n\ntype CpuHotPlugNotSupportedFault CpuHotPlugNotSupported\n\nfunc init() {\n\tt[\"CpuHotPlugNotSupportedFault\"] = reflect.TypeOf((*CpuHotPlugNotSupportedFault)(nil)).Elem()\n}\n\ntype CpuIncompatible struct {\n\tVirtualHardwareCompatibilityIssue\n\n\tLevel        int32                   `xml:\"level\"`\n\tRegisterName string                  `xml:\"registerName\"`\n\tRegisterBits string                  `xml:\"registerBits,omitempty\"`\n\tDesiredBits  string                  `xml:\"desiredBits,omitempty\"`\n\tHost         *ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CpuIncompatible\"] = reflect.TypeOf((*CpuIncompatible)(nil)).Elem()\n}\n\ntype CpuIncompatible1ECX struct {\n\tCpuIncompatible\n\n\tSse3      bool  `xml:\"sse3\"`\n\tPclmulqdq *bool `xml:\"pclmulqdq\"`\n\tSsse3     bool  `xml:\"ssse3\"`\n\tSse41     bool  `xml:\"sse41\"`\n\tSse42     bool  `xml:\"sse42\"`\n\tAes       *bool `xml:\"aes\"`\n\tOther     bool  `xml:\"other\"`\n\tOtherOnly bool  `xml:\"otherOnly\"`\n}\n\nfunc init() {\n\tt[\"CpuIncompatible1ECX\"] = reflect.TypeOf((*CpuIncompatible1ECX)(nil)).Elem()\n}\n\ntype CpuIncompatible1ECXFault CpuIncompatible1ECX\n\nfunc init() {\n\tt[\"CpuIncompatible1ECXFault\"] = reflect.TypeOf((*CpuIncompatible1ECXFault)(nil)).Elem()\n}\n\ntype CpuIncompatible81EDX struct {\n\tCpuIncompatible\n\n\tNx        bool `xml:\"nx\"`\n\tFfxsr     bool `xml:\"ffxsr\"`\n\tRdtscp    bool `xml:\"rdtscp\"`\n\tLm        bool `xml:\"lm\"`\n\tOther     bool `xml:\"other\"`\n\tOtherOnly bool `xml:\"otherOnly\"`\n}\n\nfunc init() {\n\tt[\"CpuIncompatible81EDX\"] = reflect.TypeOf((*CpuIncompatible81EDX)(nil)).Elem()\n}\n\ntype CpuIncompatible81EDXFault CpuIncompatible81EDX\n\nfunc init() {\n\tt[\"CpuIncompatible81EDXFault\"] = reflect.TypeOf((*CpuIncompatible81EDXFault)(nil)).Elem()\n}\n\ntype CpuIncompatibleFault BaseCpuIncompatible\n\nfunc init() {\n\tt[\"CpuIncompatibleFault\"] = reflect.TypeOf((*CpuIncompatibleFault)(nil)).Elem()\n}\n\ntype CreateAlarm CreateAlarmRequestType\n\nfunc init() {\n\tt[\"CreateAlarm\"] = reflect.TypeOf((*CreateAlarm)(nil)).Elem()\n}\n\ntype CreateAlarmRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tEntity ManagedObjectReference `xml:\"entity\"`\n\tSpec   BaseAlarmSpec          `xml:\"spec,typeattr\"`\n}\n\nfunc init() {\n\tt[\"CreateAlarmRequestType\"] = reflect.TypeOf((*CreateAlarmRequestType)(nil)).Elem()\n}\n\ntype CreateAlarmResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateChildVMRequestType struct {\n\tThis   ManagedObjectReference   `xml:\"_this\"`\n\tConfig VirtualMachineConfigSpec `xml:\"config\"`\n\tHost   *ManagedObjectReference  `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CreateChildVMRequestType\"] = reflect.TypeOf((*CreateChildVMRequestType)(nil)).Elem()\n}\n\ntype CreateChildVM_Task CreateChildVMRequestType\n\nfunc init() {\n\tt[\"CreateChildVM_Task\"] = reflect.TypeOf((*CreateChildVM_Task)(nil)).Elem()\n}\n\ntype CreateChildVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateCluster CreateClusterRequestType\n\nfunc init() {\n\tt[\"CreateCluster\"] = reflect.TypeOf((*CreateCluster)(nil)).Elem()\n}\n\ntype CreateClusterEx CreateClusterExRequestType\n\nfunc init() {\n\tt[\"CreateClusterEx\"] = reflect.TypeOf((*CreateClusterEx)(nil)).Elem()\n}\n\ntype CreateClusterExRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tName string                 `xml:\"name\"`\n\tSpec ClusterConfigSpecEx    `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"CreateClusterExRequestType\"] = reflect.TypeOf((*CreateClusterExRequestType)(nil)).Elem()\n}\n\ntype CreateClusterExResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateClusterRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tName string                 `xml:\"name\"`\n\tSpec ClusterConfigSpec      `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"CreateClusterRequestType\"] = reflect.TypeOf((*CreateClusterRequestType)(nil)).Elem()\n}\n\ntype CreateClusterResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateCollectorForEvents CreateCollectorForEventsRequestType\n\nfunc init() {\n\tt[\"CreateCollectorForEvents\"] = reflect.TypeOf((*CreateCollectorForEvents)(nil)).Elem()\n}\n\ntype CreateCollectorForEventsRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tFilter EventFilterSpec        `xml:\"filter\"`\n}\n\nfunc init() {\n\tt[\"CreateCollectorForEventsRequestType\"] = reflect.TypeOf((*CreateCollectorForEventsRequestType)(nil)).Elem()\n}\n\ntype CreateCollectorForEventsResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateCollectorForTasks CreateCollectorForTasksRequestType\n\nfunc init() {\n\tt[\"CreateCollectorForTasks\"] = reflect.TypeOf((*CreateCollectorForTasks)(nil)).Elem()\n}\n\ntype CreateCollectorForTasksRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tFilter TaskFilterSpec         `xml:\"filter\"`\n}\n\nfunc init() {\n\tt[\"CreateCollectorForTasksRequestType\"] = reflect.TypeOf((*CreateCollectorForTasksRequestType)(nil)).Elem()\n}\n\ntype CreateCollectorForTasksResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateContainerView CreateContainerViewRequestType\n\nfunc init() {\n\tt[\"CreateContainerView\"] = reflect.TypeOf((*CreateContainerView)(nil)).Elem()\n}\n\ntype CreateContainerViewRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tContainer ManagedObjectReference `xml:\"container\"`\n\tType      []string               `xml:\"type,omitempty\"`\n\tRecursive bool                   `xml:\"recursive\"`\n}\n\nfunc init() {\n\tt[\"CreateContainerViewRequestType\"] = reflect.TypeOf((*CreateContainerViewRequestType)(nil)).Elem()\n}\n\ntype CreateContainerViewResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateCustomizationSpec CreateCustomizationSpecRequestType\n\nfunc init() {\n\tt[\"CreateCustomizationSpec\"] = reflect.TypeOf((*CreateCustomizationSpec)(nil)).Elem()\n}\n\ntype CreateCustomizationSpecRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tItem CustomizationSpecItem  `xml:\"item\"`\n}\n\nfunc init() {\n\tt[\"CreateCustomizationSpecRequestType\"] = reflect.TypeOf((*CreateCustomizationSpecRequestType)(nil)).Elem()\n}\n\ntype CreateCustomizationSpecResponse struct {\n}\n\ntype CreateDVPortgroupRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tSpec DVPortgroupConfigSpec  `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"CreateDVPortgroupRequestType\"] = reflect.TypeOf((*CreateDVPortgroupRequestType)(nil)).Elem()\n}\n\ntype CreateDVPortgroup_Task CreateDVPortgroupRequestType\n\nfunc init() {\n\tt[\"CreateDVPortgroup_Task\"] = reflect.TypeOf((*CreateDVPortgroup_Task)(nil)).Elem()\n}\n\ntype CreateDVPortgroup_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateDVSRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tSpec DVSCreateSpec          `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"CreateDVSRequestType\"] = reflect.TypeOf((*CreateDVSRequestType)(nil)).Elem()\n}\n\ntype CreateDVS_Task CreateDVSRequestType\n\nfunc init() {\n\tt[\"CreateDVS_Task\"] = reflect.TypeOf((*CreateDVS_Task)(nil)).Elem()\n}\n\ntype CreateDVS_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateDatacenter CreateDatacenterRequestType\n\nfunc init() {\n\tt[\"CreateDatacenter\"] = reflect.TypeOf((*CreateDatacenter)(nil)).Elem()\n}\n\ntype CreateDatacenterRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tName string                 `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"CreateDatacenterRequestType\"] = reflect.TypeOf((*CreateDatacenterRequestType)(nil)).Elem()\n}\n\ntype CreateDatacenterResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateDefaultProfile CreateDefaultProfileRequestType\n\nfunc init() {\n\tt[\"CreateDefaultProfile\"] = reflect.TypeOf((*CreateDefaultProfile)(nil)).Elem()\n}\n\ntype CreateDefaultProfileRequestType struct {\n\tThis            ManagedObjectReference  `xml:\"_this\"`\n\tProfileType     string                  `xml:\"profileType\"`\n\tProfileTypeName string                  `xml:\"profileTypeName,omitempty\"`\n\tProfile         *ManagedObjectReference `xml:\"profile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CreateDefaultProfileRequestType\"] = reflect.TypeOf((*CreateDefaultProfileRequestType)(nil)).Elem()\n}\n\ntype CreateDefaultProfileResponse struct {\n\tReturnval BaseApplyProfile `xml:\"returnval,typeattr\"`\n}\n\ntype CreateDescriptor CreateDescriptorRequestType\n\nfunc init() {\n\tt[\"CreateDescriptor\"] = reflect.TypeOf((*CreateDescriptor)(nil)).Elem()\n}\n\ntype CreateDescriptorRequestType struct {\n\tThis ManagedObjectReference    `xml:\"_this\"`\n\tObj  ManagedObjectReference    `xml:\"obj\"`\n\tCdp  OvfCreateDescriptorParams `xml:\"cdp\"`\n}\n\nfunc init() {\n\tt[\"CreateDescriptorRequestType\"] = reflect.TypeOf((*CreateDescriptorRequestType)(nil)).Elem()\n}\n\ntype CreateDescriptorResponse struct {\n\tReturnval OvfCreateDescriptorResult `xml:\"returnval\"`\n}\n\ntype CreateDiagnosticPartition CreateDiagnosticPartitionRequestType\n\nfunc init() {\n\tt[\"CreateDiagnosticPartition\"] = reflect.TypeOf((*CreateDiagnosticPartition)(nil)).Elem()\n}\n\ntype CreateDiagnosticPartitionRequestType struct {\n\tThis ManagedObjectReference            `xml:\"_this\"`\n\tSpec HostDiagnosticPartitionCreateSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"CreateDiagnosticPartitionRequestType\"] = reflect.TypeOf((*CreateDiagnosticPartitionRequestType)(nil)).Elem()\n}\n\ntype CreateDiagnosticPartitionResponse struct {\n}\n\ntype CreateDirectory CreateDirectoryRequestType\n\nfunc init() {\n\tt[\"CreateDirectory\"] = reflect.TypeOf((*CreateDirectory)(nil)).Elem()\n}\n\ntype CreateDirectoryRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tDatastore   ManagedObjectReference `xml:\"datastore\"`\n\tDisplayName string                 `xml:\"displayName,omitempty\"`\n\tPolicy      string                 `xml:\"policy,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CreateDirectoryRequestType\"] = reflect.TypeOf((*CreateDirectoryRequestType)(nil)).Elem()\n}\n\ntype CreateDirectoryResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype CreateDiskRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tSpec VslmCreateSpec         `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"CreateDiskRequestType\"] = reflect.TypeOf((*CreateDiskRequestType)(nil)).Elem()\n}\n\ntype CreateDisk_Task CreateDiskRequestType\n\nfunc init() {\n\tt[\"CreateDisk_Task\"] = reflect.TypeOf((*CreateDisk_Task)(nil)).Elem()\n}\n\ntype CreateDisk_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateFilter CreateFilterRequestType\n\nfunc init() {\n\tt[\"CreateFilter\"] = reflect.TypeOf((*CreateFilter)(nil)).Elem()\n}\n\ntype CreateFilterRequestType struct {\n\tThis           ManagedObjectReference `xml:\"_this\"`\n\tSpec           PropertyFilterSpec     `xml:\"spec\"`\n\tPartialUpdates bool                   `xml:\"partialUpdates\"`\n}\n\nfunc init() {\n\tt[\"CreateFilterRequestType\"] = reflect.TypeOf((*CreateFilterRequestType)(nil)).Elem()\n}\n\ntype CreateFilterResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateFolder CreateFolderRequestType\n\nfunc init() {\n\tt[\"CreateFolder\"] = reflect.TypeOf((*CreateFolder)(nil)).Elem()\n}\n\ntype CreateFolderRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tName string                 `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"CreateFolderRequestType\"] = reflect.TypeOf((*CreateFolderRequestType)(nil)).Elem()\n}\n\ntype CreateFolderResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateGroup CreateGroupRequestType\n\nfunc init() {\n\tt[\"CreateGroup\"] = reflect.TypeOf((*CreateGroup)(nil)).Elem()\n}\n\ntype CreateGroupRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tGroup BaseHostAccountSpec    `xml:\"group,typeattr\"`\n}\n\nfunc init() {\n\tt[\"CreateGroupRequestType\"] = reflect.TypeOf((*CreateGroupRequestType)(nil)).Elem()\n}\n\ntype CreateGroupResponse struct {\n}\n\ntype CreateImportSpec CreateImportSpecRequestType\n\nfunc init() {\n\tt[\"CreateImportSpec\"] = reflect.TypeOf((*CreateImportSpec)(nil)).Elem()\n}\n\ntype CreateImportSpecRequestType struct {\n\tThis          ManagedObjectReference    `xml:\"_this\"`\n\tOvfDescriptor string                    `xml:\"ovfDescriptor\"`\n\tResourcePool  ManagedObjectReference    `xml:\"resourcePool\"`\n\tDatastore     ManagedObjectReference    `xml:\"datastore\"`\n\tCisp          OvfCreateImportSpecParams `xml:\"cisp\"`\n}\n\nfunc init() {\n\tt[\"CreateImportSpecRequestType\"] = reflect.TypeOf((*CreateImportSpecRequestType)(nil)).Elem()\n}\n\ntype CreateImportSpecResponse struct {\n\tReturnval OvfCreateImportSpecResult `xml:\"returnval\"`\n}\n\ntype CreateInventoryView CreateInventoryViewRequestType\n\nfunc init() {\n\tt[\"CreateInventoryView\"] = reflect.TypeOf((*CreateInventoryView)(nil)).Elem()\n}\n\ntype CreateInventoryViewRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"CreateInventoryViewRequestType\"] = reflect.TypeOf((*CreateInventoryViewRequestType)(nil)).Elem()\n}\n\ntype CreateInventoryViewResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateIpPool CreateIpPoolRequestType\n\nfunc init() {\n\tt[\"CreateIpPool\"] = reflect.TypeOf((*CreateIpPool)(nil)).Elem()\n}\n\ntype CreateIpPoolRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tDc   ManagedObjectReference `xml:\"dc\"`\n\tPool IpPool                 `xml:\"pool\"`\n}\n\nfunc init() {\n\tt[\"CreateIpPoolRequestType\"] = reflect.TypeOf((*CreateIpPoolRequestType)(nil)).Elem()\n}\n\ntype CreateIpPoolResponse struct {\n\tReturnval int32 `xml:\"returnval\"`\n}\n\ntype CreateListView CreateListViewRequestType\n\nfunc init() {\n\tt[\"CreateListView\"] = reflect.TypeOf((*CreateListView)(nil)).Elem()\n}\n\ntype CreateListViewFromView CreateListViewFromViewRequestType\n\nfunc init() {\n\tt[\"CreateListViewFromView\"] = reflect.TypeOf((*CreateListViewFromView)(nil)).Elem()\n}\n\ntype CreateListViewFromViewRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tView ManagedObjectReference `xml:\"view\"`\n}\n\nfunc init() {\n\tt[\"CreateListViewFromViewRequestType\"] = reflect.TypeOf((*CreateListViewFromViewRequestType)(nil)).Elem()\n}\n\ntype CreateListViewFromViewResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateListViewRequestType struct {\n\tThis ManagedObjectReference   `xml:\"_this\"`\n\tObj  []ManagedObjectReference `xml:\"obj,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CreateListViewRequestType\"] = reflect.TypeOf((*CreateListViewRequestType)(nil)).Elem()\n}\n\ntype CreateListViewResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateLocalDatastore CreateLocalDatastoreRequestType\n\nfunc init() {\n\tt[\"CreateLocalDatastore\"] = reflect.TypeOf((*CreateLocalDatastore)(nil)).Elem()\n}\n\ntype CreateLocalDatastoreRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tName string                 `xml:\"name\"`\n\tPath string                 `xml:\"path\"`\n}\n\nfunc init() {\n\tt[\"CreateLocalDatastoreRequestType\"] = reflect.TypeOf((*CreateLocalDatastoreRequestType)(nil)).Elem()\n}\n\ntype CreateLocalDatastoreResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateNasDatastore CreateNasDatastoreRequestType\n\nfunc init() {\n\tt[\"CreateNasDatastore\"] = reflect.TypeOf((*CreateNasDatastore)(nil)).Elem()\n}\n\ntype CreateNasDatastoreRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tSpec HostNasVolumeSpec      `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"CreateNasDatastoreRequestType\"] = reflect.TypeOf((*CreateNasDatastoreRequestType)(nil)).Elem()\n}\n\ntype CreateNasDatastoreResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateObjectScheduledTask CreateObjectScheduledTaskRequestType\n\nfunc init() {\n\tt[\"CreateObjectScheduledTask\"] = reflect.TypeOf((*CreateObjectScheduledTask)(nil)).Elem()\n}\n\ntype CreateObjectScheduledTaskRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tObj  ManagedObjectReference `xml:\"obj\"`\n\tSpec BaseScheduledTaskSpec  `xml:\"spec,typeattr\"`\n}\n\nfunc init() {\n\tt[\"CreateObjectScheduledTaskRequestType\"] = reflect.TypeOf((*CreateObjectScheduledTaskRequestType)(nil)).Elem()\n}\n\ntype CreateObjectScheduledTaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreatePerfInterval CreatePerfIntervalRequestType\n\nfunc init() {\n\tt[\"CreatePerfInterval\"] = reflect.TypeOf((*CreatePerfInterval)(nil)).Elem()\n}\n\ntype CreatePerfIntervalRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tIntervalId PerfInterval           `xml:\"intervalId\"`\n}\n\nfunc init() {\n\tt[\"CreatePerfIntervalRequestType\"] = reflect.TypeOf((*CreatePerfIntervalRequestType)(nil)).Elem()\n}\n\ntype CreatePerfIntervalResponse struct {\n}\n\ntype CreateProfile CreateProfileRequestType\n\nfunc init() {\n\tt[\"CreateProfile\"] = reflect.TypeOf((*CreateProfile)(nil)).Elem()\n}\n\ntype CreateProfileRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tCreateSpec BaseProfileCreateSpec  `xml:\"createSpec,typeattr\"`\n}\n\nfunc init() {\n\tt[\"CreateProfileRequestType\"] = reflect.TypeOf((*CreateProfileRequestType)(nil)).Elem()\n}\n\ntype CreateProfileResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreatePropertyCollector CreatePropertyCollectorRequestType\n\nfunc init() {\n\tt[\"CreatePropertyCollector\"] = reflect.TypeOf((*CreatePropertyCollector)(nil)).Elem()\n}\n\ntype CreatePropertyCollectorRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"CreatePropertyCollectorRequestType\"] = reflect.TypeOf((*CreatePropertyCollectorRequestType)(nil)).Elem()\n}\n\ntype CreatePropertyCollectorResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateRegistryKeyInGuest CreateRegistryKeyInGuestRequestType\n\nfunc init() {\n\tt[\"CreateRegistryKeyInGuest\"] = reflect.TypeOf((*CreateRegistryKeyInGuest)(nil)).Elem()\n}\n\ntype CreateRegistryKeyInGuestRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tVm         ManagedObjectReference  `xml:\"vm\"`\n\tAuth       BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tKeyName    GuestRegKeyNameSpec     `xml:\"keyName\"`\n\tIsVolatile bool                    `xml:\"isVolatile\"`\n\tClassType  string                  `xml:\"classType,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CreateRegistryKeyInGuestRequestType\"] = reflect.TypeOf((*CreateRegistryKeyInGuestRequestType)(nil)).Elem()\n}\n\ntype CreateRegistryKeyInGuestResponse struct {\n}\n\ntype CreateResourcePool CreateResourcePoolRequestType\n\nfunc init() {\n\tt[\"CreateResourcePool\"] = reflect.TypeOf((*CreateResourcePool)(nil)).Elem()\n}\n\ntype CreateResourcePoolRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tName string                 `xml:\"name\"`\n\tSpec ResourceConfigSpec     `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"CreateResourcePoolRequestType\"] = reflect.TypeOf((*CreateResourcePoolRequestType)(nil)).Elem()\n}\n\ntype CreateResourcePoolResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateScheduledTask CreateScheduledTaskRequestType\n\nfunc init() {\n\tt[\"CreateScheduledTask\"] = reflect.TypeOf((*CreateScheduledTask)(nil)).Elem()\n}\n\ntype CreateScheduledTaskRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tEntity ManagedObjectReference `xml:\"entity\"`\n\tSpec   BaseScheduledTaskSpec  `xml:\"spec,typeattr\"`\n}\n\nfunc init() {\n\tt[\"CreateScheduledTaskRequestType\"] = reflect.TypeOf((*CreateScheduledTaskRequestType)(nil)).Elem()\n}\n\ntype CreateScheduledTaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateScreenshotRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"CreateScreenshotRequestType\"] = reflect.TypeOf((*CreateScreenshotRequestType)(nil)).Elem()\n}\n\ntype CreateScreenshot_Task CreateScreenshotRequestType\n\nfunc init() {\n\tt[\"CreateScreenshot_Task\"] = reflect.TypeOf((*CreateScreenshot_Task)(nil)).Elem()\n}\n\ntype CreateScreenshot_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateSecondaryVMExRequestType struct {\n\tThis ManagedObjectReference    `xml:\"_this\"`\n\tHost *ManagedObjectReference   `xml:\"host,omitempty\"`\n\tSpec *FaultToleranceConfigSpec `xml:\"spec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CreateSecondaryVMExRequestType\"] = reflect.TypeOf((*CreateSecondaryVMExRequestType)(nil)).Elem()\n}\n\ntype CreateSecondaryVMEx_Task CreateSecondaryVMExRequestType\n\nfunc init() {\n\tt[\"CreateSecondaryVMEx_Task\"] = reflect.TypeOf((*CreateSecondaryVMEx_Task)(nil)).Elem()\n}\n\ntype CreateSecondaryVMEx_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateSecondaryVMRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tHost *ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CreateSecondaryVMRequestType\"] = reflect.TypeOf((*CreateSecondaryVMRequestType)(nil)).Elem()\n}\n\ntype CreateSecondaryVM_Task CreateSecondaryVMRequestType\n\nfunc init() {\n\tt[\"CreateSecondaryVM_Task\"] = reflect.TypeOf((*CreateSecondaryVM_Task)(nil)).Elem()\n}\n\ntype CreateSecondaryVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateSnapshotExRequestType struct {\n\tThis        ManagedObjectReference             `xml:\"_this\"`\n\tName        string                             `xml:\"name\"`\n\tDescription string                             `xml:\"description,omitempty\"`\n\tMemory      bool                               `xml:\"memory\"`\n\tQuiesceSpec BaseVirtualMachineGuestQuiesceSpec `xml:\"quiesceSpec,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"CreateSnapshotExRequestType\"] = reflect.TypeOf((*CreateSnapshotExRequestType)(nil)).Elem()\n}\n\ntype CreateSnapshotEx_Task CreateSnapshotExRequestType\n\nfunc init() {\n\tt[\"CreateSnapshotEx_Task\"] = reflect.TypeOf((*CreateSnapshotEx_Task)(nil)).Elem()\n}\n\ntype CreateSnapshotEx_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateSnapshotRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tName        string                 `xml:\"name\"`\n\tDescription string                 `xml:\"description,omitempty\"`\n\tMemory      bool                   `xml:\"memory\"`\n\tQuiesce     bool                   `xml:\"quiesce\"`\n}\n\nfunc init() {\n\tt[\"CreateSnapshotRequestType\"] = reflect.TypeOf((*CreateSnapshotRequestType)(nil)).Elem()\n}\n\ntype CreateSnapshot_Task CreateSnapshotRequestType\n\nfunc init() {\n\tt[\"CreateSnapshot_Task\"] = reflect.TypeOf((*CreateSnapshot_Task)(nil)).Elem()\n}\n\ntype CreateSnapshot_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateStoragePod CreateStoragePodRequestType\n\nfunc init() {\n\tt[\"CreateStoragePod\"] = reflect.TypeOf((*CreateStoragePod)(nil)).Elem()\n}\n\ntype CreateStoragePodRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tName string                 `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"CreateStoragePodRequestType\"] = reflect.TypeOf((*CreateStoragePodRequestType)(nil)).Elem()\n}\n\ntype CreateStoragePodResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateTask CreateTaskRequestType\n\nfunc init() {\n\tt[\"CreateTask\"] = reflect.TypeOf((*CreateTask)(nil)).Elem()\n}\n\ntype CreateTaskAction struct {\n\tAction\n\n\tTaskTypeId string `xml:\"taskTypeId\"`\n\tCancelable bool   `xml:\"cancelable\"`\n}\n\nfunc init() {\n\tt[\"CreateTaskAction\"] = reflect.TypeOf((*CreateTaskAction)(nil)).Elem()\n}\n\ntype CreateTaskRequestType struct {\n\tThis          ManagedObjectReference `xml:\"_this\"`\n\tObj           ManagedObjectReference `xml:\"obj\"`\n\tTaskTypeId    string                 `xml:\"taskTypeId\"`\n\tInitiatedBy   string                 `xml:\"initiatedBy,omitempty\"`\n\tCancelable    bool                   `xml:\"cancelable\"`\n\tParentTaskKey string                 `xml:\"parentTaskKey,omitempty\"`\n\tActivationId  string                 `xml:\"activationId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CreateTaskRequestType\"] = reflect.TypeOf((*CreateTaskRequestType)(nil)).Elem()\n}\n\ntype CreateTaskResponse struct {\n\tReturnval TaskInfo `xml:\"returnval\"`\n}\n\ntype CreateTemporaryDirectoryInGuest CreateTemporaryDirectoryInGuestRequestType\n\nfunc init() {\n\tt[\"CreateTemporaryDirectoryInGuest\"] = reflect.TypeOf((*CreateTemporaryDirectoryInGuest)(nil)).Elem()\n}\n\ntype CreateTemporaryDirectoryInGuestRequestType struct {\n\tThis          ManagedObjectReference  `xml:\"_this\"`\n\tVm            ManagedObjectReference  `xml:\"vm\"`\n\tAuth          BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tPrefix        string                  `xml:\"prefix\"`\n\tSuffix        string                  `xml:\"suffix\"`\n\tDirectoryPath string                  `xml:\"directoryPath,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CreateTemporaryDirectoryInGuestRequestType\"] = reflect.TypeOf((*CreateTemporaryDirectoryInGuestRequestType)(nil)).Elem()\n}\n\ntype CreateTemporaryDirectoryInGuestResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype CreateTemporaryFileInGuest CreateTemporaryFileInGuestRequestType\n\nfunc init() {\n\tt[\"CreateTemporaryFileInGuest\"] = reflect.TypeOf((*CreateTemporaryFileInGuest)(nil)).Elem()\n}\n\ntype CreateTemporaryFileInGuestRequestType struct {\n\tThis          ManagedObjectReference  `xml:\"_this\"`\n\tVm            ManagedObjectReference  `xml:\"vm\"`\n\tAuth          BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tPrefix        string                  `xml:\"prefix\"`\n\tSuffix        string                  `xml:\"suffix\"`\n\tDirectoryPath string                  `xml:\"directoryPath,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CreateTemporaryFileInGuestRequestType\"] = reflect.TypeOf((*CreateTemporaryFileInGuestRequestType)(nil)).Elem()\n}\n\ntype CreateTemporaryFileInGuestResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype CreateUser CreateUserRequestType\n\nfunc init() {\n\tt[\"CreateUser\"] = reflect.TypeOf((*CreateUser)(nil)).Elem()\n}\n\ntype CreateUserRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tUser BaseHostAccountSpec    `xml:\"user,typeattr\"`\n}\n\nfunc init() {\n\tt[\"CreateUserRequestType\"] = reflect.TypeOf((*CreateUserRequestType)(nil)).Elem()\n}\n\ntype CreateUserResponse struct {\n}\n\ntype CreateVApp CreateVAppRequestType\n\nfunc init() {\n\tt[\"CreateVApp\"] = reflect.TypeOf((*CreateVApp)(nil)).Elem()\n}\n\ntype CreateVAppRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tName       string                  `xml:\"name\"`\n\tResSpec    ResourceConfigSpec      `xml:\"resSpec\"`\n\tConfigSpec VAppConfigSpec          `xml:\"configSpec\"`\n\tVmFolder   *ManagedObjectReference `xml:\"vmFolder,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CreateVAppRequestType\"] = reflect.TypeOf((*CreateVAppRequestType)(nil)).Elem()\n}\n\ntype CreateVAppResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateVMRequestType struct {\n\tThis   ManagedObjectReference   `xml:\"_this\"`\n\tConfig VirtualMachineConfigSpec `xml:\"config\"`\n\tPool   ManagedObjectReference   `xml:\"pool\"`\n\tHost   *ManagedObjectReference  `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CreateVMRequestType\"] = reflect.TypeOf((*CreateVMRequestType)(nil)).Elem()\n}\n\ntype CreateVM_Task CreateVMRequestType\n\nfunc init() {\n\tt[\"CreateVM_Task\"] = reflect.TypeOf((*CreateVM_Task)(nil)).Elem()\n}\n\ntype CreateVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateVirtualDiskRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tName       string                  `xml:\"name\"`\n\tDatacenter *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n\tSpec       BaseVirtualDiskSpec     `xml:\"spec,typeattr\"`\n}\n\nfunc init() {\n\tt[\"CreateVirtualDiskRequestType\"] = reflect.TypeOf((*CreateVirtualDiskRequestType)(nil)).Elem()\n}\n\ntype CreateVirtualDisk_Task CreateVirtualDiskRequestType\n\nfunc init() {\n\tt[\"CreateVirtualDisk_Task\"] = reflect.TypeOf((*CreateVirtualDisk_Task)(nil)).Elem()\n}\n\ntype CreateVirtualDisk_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateVmfsDatastore CreateVmfsDatastoreRequestType\n\nfunc init() {\n\tt[\"CreateVmfsDatastore\"] = reflect.TypeOf((*CreateVmfsDatastore)(nil)).Elem()\n}\n\ntype CreateVmfsDatastoreRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tSpec VmfsDatastoreCreateSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"CreateVmfsDatastoreRequestType\"] = reflect.TypeOf((*CreateVmfsDatastoreRequestType)(nil)).Elem()\n}\n\ntype CreateVmfsDatastoreResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CreateVvolDatastore CreateVvolDatastoreRequestType\n\nfunc init() {\n\tt[\"CreateVvolDatastore\"] = reflect.TypeOf((*CreateVvolDatastore)(nil)).Elem()\n}\n\ntype CreateVvolDatastoreRequestType struct {\n\tThis ManagedObjectReference               `xml:\"_this\"`\n\tSpec HostDatastoreSystemVvolDatastoreSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"CreateVvolDatastoreRequestType\"] = reflect.TypeOf((*CreateVvolDatastoreRequestType)(nil)).Elem()\n}\n\ntype CreateVvolDatastoreResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype CryptoKeyId struct {\n\tDynamicData\n\n\tKeyId      string         `xml:\"keyId\"`\n\tProviderId *KeyProviderId `xml:\"providerId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CryptoKeyId\"] = reflect.TypeOf((*CryptoKeyId)(nil)).Elem()\n}\n\ntype CryptoKeyPlain struct {\n\tDynamicData\n\n\tKeyId     CryptoKeyId `xml:\"keyId\"`\n\tAlgorithm string      `xml:\"algorithm\"`\n\tKeyData   string      `xml:\"keyData\"`\n}\n\nfunc init() {\n\tt[\"CryptoKeyPlain\"] = reflect.TypeOf((*CryptoKeyPlain)(nil)).Elem()\n}\n\ntype CryptoKeyResult struct {\n\tDynamicData\n\n\tKeyId   CryptoKeyId `xml:\"keyId\"`\n\tSuccess bool        `xml:\"success\"`\n\tReason  string      `xml:\"reason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CryptoKeyResult\"] = reflect.TypeOf((*CryptoKeyResult)(nil)).Elem()\n}\n\ntype CryptoManagerKmipCertificateInfo struct {\n\tDynamicData\n\n\tSubject             string    `xml:\"subject\"`\n\tIssuer              string    `xml:\"issuer\"`\n\tSerialNumber        string    `xml:\"serialNumber\"`\n\tNotBefore           time.Time `xml:\"notBefore\"`\n\tNotAfter            time.Time `xml:\"notAfter\"`\n\tFingerprint         string    `xml:\"fingerprint\"`\n\tCheckTime           time.Time `xml:\"checkTime\"`\n\tSecondsSinceValid   int32     `xml:\"secondsSinceValid,omitempty\"`\n\tSecondsBeforeExpire int32     `xml:\"secondsBeforeExpire,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CryptoManagerKmipCertificateInfo\"] = reflect.TypeOf((*CryptoManagerKmipCertificateInfo)(nil)).Elem()\n}\n\ntype CryptoManagerKmipClusterStatus struct {\n\tDynamicData\n\n\tClusterId      KeyProviderId                     `xml:\"clusterId\"`\n\tServers        []CryptoManagerKmipServerStatus   `xml:\"servers\"`\n\tClientCertInfo *CryptoManagerKmipCertificateInfo `xml:\"clientCertInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CryptoManagerKmipClusterStatus\"] = reflect.TypeOf((*CryptoManagerKmipClusterStatus)(nil)).Elem()\n}\n\ntype CryptoManagerKmipServerCertInfo struct {\n\tDynamicData\n\n\tCertificate       string                            `xml:\"certificate\"`\n\tCertInfo          *CryptoManagerKmipCertificateInfo `xml:\"certInfo,omitempty\"`\n\tClientTrustServer *bool                             `xml:\"clientTrustServer\"`\n}\n\nfunc init() {\n\tt[\"CryptoManagerKmipServerCertInfo\"] = reflect.TypeOf((*CryptoManagerKmipServerCertInfo)(nil)).Elem()\n}\n\ntype CryptoManagerKmipServerStatus struct {\n\tDynamicData\n\n\tName              string                            `xml:\"name\"`\n\tStatus            ManagedEntityStatus               `xml:\"status\"`\n\tConnectionStatus  string                            `xml:\"connectionStatus\"`\n\tCertInfo          *CryptoManagerKmipCertificateInfo `xml:\"certInfo,omitempty\"`\n\tClientTrustServer *bool                             `xml:\"clientTrustServer\"`\n\tServerTrustClient *bool                             `xml:\"serverTrustClient\"`\n}\n\nfunc init() {\n\tt[\"CryptoManagerKmipServerStatus\"] = reflect.TypeOf((*CryptoManagerKmipServerStatus)(nil)).Elem()\n}\n\ntype CryptoSpec struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"CryptoSpec\"] = reflect.TypeOf((*CryptoSpec)(nil)).Elem()\n}\n\ntype CryptoSpecDecrypt struct {\n\tCryptoSpec\n}\n\nfunc init() {\n\tt[\"CryptoSpecDecrypt\"] = reflect.TypeOf((*CryptoSpecDecrypt)(nil)).Elem()\n}\n\ntype CryptoSpecDeepRecrypt struct {\n\tCryptoSpec\n\n\tNewKeyId CryptoKeyId `xml:\"newKeyId\"`\n}\n\nfunc init() {\n\tt[\"CryptoSpecDeepRecrypt\"] = reflect.TypeOf((*CryptoSpecDeepRecrypt)(nil)).Elem()\n}\n\ntype CryptoSpecEncrypt struct {\n\tCryptoSpec\n\n\tCryptoKeyId CryptoKeyId `xml:\"cryptoKeyId\"`\n}\n\nfunc init() {\n\tt[\"CryptoSpecEncrypt\"] = reflect.TypeOf((*CryptoSpecEncrypt)(nil)).Elem()\n}\n\ntype CryptoSpecNoOp struct {\n\tCryptoSpec\n}\n\nfunc init() {\n\tt[\"CryptoSpecNoOp\"] = reflect.TypeOf((*CryptoSpecNoOp)(nil)).Elem()\n}\n\ntype CryptoSpecRegister struct {\n\tCryptoSpecNoOp\n\n\tCryptoKeyId CryptoKeyId `xml:\"cryptoKeyId\"`\n}\n\nfunc init() {\n\tt[\"CryptoSpecRegister\"] = reflect.TypeOf((*CryptoSpecRegister)(nil)).Elem()\n}\n\ntype CryptoSpecShallowRecrypt struct {\n\tCryptoSpec\n\n\tNewKeyId CryptoKeyId `xml:\"newKeyId\"`\n}\n\nfunc init() {\n\tt[\"CryptoSpecShallowRecrypt\"] = reflect.TypeOf((*CryptoSpecShallowRecrypt)(nil)).Elem()\n}\n\ntype CurrentTime CurrentTimeRequestType\n\nfunc init() {\n\tt[\"CurrentTime\"] = reflect.TypeOf((*CurrentTime)(nil)).Elem()\n}\n\ntype CurrentTimeRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"CurrentTimeRequestType\"] = reflect.TypeOf((*CurrentTimeRequestType)(nil)).Elem()\n}\n\ntype CurrentTimeResponse struct {\n\tReturnval time.Time `xml:\"returnval\"`\n}\n\ntype CustomFieldDef struct {\n\tDynamicData\n\n\tKey                     int32               `xml:\"key\"`\n\tName                    string              `xml:\"name\"`\n\tType                    string              `xml:\"type\"`\n\tManagedObjectType       string              `xml:\"managedObjectType,omitempty\"`\n\tFieldDefPrivileges      *PrivilegePolicyDef `xml:\"fieldDefPrivileges,omitempty\"`\n\tFieldInstancePrivileges *PrivilegePolicyDef `xml:\"fieldInstancePrivileges,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CustomFieldDef\"] = reflect.TypeOf((*CustomFieldDef)(nil)).Elem()\n}\n\ntype CustomFieldDefAddedEvent struct {\n\tCustomFieldDefEvent\n}\n\nfunc init() {\n\tt[\"CustomFieldDefAddedEvent\"] = reflect.TypeOf((*CustomFieldDefAddedEvent)(nil)).Elem()\n}\n\ntype CustomFieldDefEvent struct {\n\tCustomFieldEvent\n\n\tFieldKey int32  `xml:\"fieldKey\"`\n\tName     string `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"CustomFieldDefEvent\"] = reflect.TypeOf((*CustomFieldDefEvent)(nil)).Elem()\n}\n\ntype CustomFieldDefRemovedEvent struct {\n\tCustomFieldDefEvent\n}\n\nfunc init() {\n\tt[\"CustomFieldDefRemovedEvent\"] = reflect.TypeOf((*CustomFieldDefRemovedEvent)(nil)).Elem()\n}\n\ntype CustomFieldDefRenamedEvent struct {\n\tCustomFieldDefEvent\n\n\tNewName string `xml:\"newName\"`\n}\n\nfunc init() {\n\tt[\"CustomFieldDefRenamedEvent\"] = reflect.TypeOf((*CustomFieldDefRenamedEvent)(nil)).Elem()\n}\n\ntype CustomFieldEvent struct {\n\tEvent\n}\n\nfunc init() {\n\tt[\"CustomFieldEvent\"] = reflect.TypeOf((*CustomFieldEvent)(nil)).Elem()\n}\n\ntype CustomFieldStringValue struct {\n\tCustomFieldValue\n\n\tValue string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"CustomFieldStringValue\"] = reflect.TypeOf((*CustomFieldStringValue)(nil)).Elem()\n}\n\ntype CustomFieldValue struct {\n\tDynamicData\n\n\tKey int32 `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"CustomFieldValue\"] = reflect.TypeOf((*CustomFieldValue)(nil)).Elem()\n}\n\ntype CustomFieldValueChangedEvent struct {\n\tCustomFieldEvent\n\n\tEntity    ManagedEntityEventArgument `xml:\"entity\"`\n\tFieldKey  int32                      `xml:\"fieldKey\"`\n\tName      string                     `xml:\"name\"`\n\tValue     string                     `xml:\"value\"`\n\tPrevState string                     `xml:\"prevState,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CustomFieldValueChangedEvent\"] = reflect.TypeOf((*CustomFieldValueChangedEvent)(nil)).Elem()\n}\n\ntype CustomizationAdapterMapping struct {\n\tDynamicData\n\n\tMacAddress string                  `xml:\"macAddress,omitempty\"`\n\tAdapter    CustomizationIPSettings `xml:\"adapter\"`\n}\n\nfunc init() {\n\tt[\"CustomizationAdapterMapping\"] = reflect.TypeOf((*CustomizationAdapterMapping)(nil)).Elem()\n}\n\ntype CustomizationAutoIpV6Generator struct {\n\tCustomizationIpV6Generator\n}\n\nfunc init() {\n\tt[\"CustomizationAutoIpV6Generator\"] = reflect.TypeOf((*CustomizationAutoIpV6Generator)(nil)).Elem()\n}\n\ntype CustomizationCustomIpGenerator struct {\n\tCustomizationIpGenerator\n\n\tArgument string `xml:\"argument,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CustomizationCustomIpGenerator\"] = reflect.TypeOf((*CustomizationCustomIpGenerator)(nil)).Elem()\n}\n\ntype CustomizationCustomIpV6Generator struct {\n\tCustomizationIpV6Generator\n\n\tArgument string `xml:\"argument,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CustomizationCustomIpV6Generator\"] = reflect.TypeOf((*CustomizationCustomIpV6Generator)(nil)).Elem()\n}\n\ntype CustomizationCustomName struct {\n\tCustomizationName\n\n\tArgument string `xml:\"argument,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CustomizationCustomName\"] = reflect.TypeOf((*CustomizationCustomName)(nil)).Elem()\n}\n\ntype CustomizationDhcpIpGenerator struct {\n\tCustomizationIpGenerator\n}\n\nfunc init() {\n\tt[\"CustomizationDhcpIpGenerator\"] = reflect.TypeOf((*CustomizationDhcpIpGenerator)(nil)).Elem()\n}\n\ntype CustomizationDhcpIpV6Generator struct {\n\tCustomizationIpV6Generator\n}\n\nfunc init() {\n\tt[\"CustomizationDhcpIpV6Generator\"] = reflect.TypeOf((*CustomizationDhcpIpV6Generator)(nil)).Elem()\n}\n\ntype CustomizationEvent struct {\n\tVmEvent\n\n\tLogLocation string `xml:\"logLocation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CustomizationEvent\"] = reflect.TypeOf((*CustomizationEvent)(nil)).Elem()\n}\n\ntype CustomizationFailed struct {\n\tCustomizationEvent\n}\n\nfunc init() {\n\tt[\"CustomizationFailed\"] = reflect.TypeOf((*CustomizationFailed)(nil)).Elem()\n}\n\ntype CustomizationFault struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"CustomizationFault\"] = reflect.TypeOf((*CustomizationFault)(nil)).Elem()\n}\n\ntype CustomizationFaultFault BaseCustomizationFault\n\nfunc init() {\n\tt[\"CustomizationFaultFault\"] = reflect.TypeOf((*CustomizationFaultFault)(nil)).Elem()\n}\n\ntype CustomizationFixedIp struct {\n\tCustomizationIpGenerator\n\n\tIpAddress string `xml:\"ipAddress\"`\n}\n\nfunc init() {\n\tt[\"CustomizationFixedIp\"] = reflect.TypeOf((*CustomizationFixedIp)(nil)).Elem()\n}\n\ntype CustomizationFixedIpV6 struct {\n\tCustomizationIpV6Generator\n\n\tIpAddress  string `xml:\"ipAddress\"`\n\tSubnetMask int32  `xml:\"subnetMask\"`\n}\n\nfunc init() {\n\tt[\"CustomizationFixedIpV6\"] = reflect.TypeOf((*CustomizationFixedIpV6)(nil)).Elem()\n}\n\ntype CustomizationFixedName struct {\n\tCustomizationName\n\n\tName string `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"CustomizationFixedName\"] = reflect.TypeOf((*CustomizationFixedName)(nil)).Elem()\n}\n\ntype CustomizationGlobalIPSettings struct {\n\tDynamicData\n\n\tDnsSuffixList []string `xml:\"dnsSuffixList,omitempty\"`\n\tDnsServerList []string `xml:\"dnsServerList,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CustomizationGlobalIPSettings\"] = reflect.TypeOf((*CustomizationGlobalIPSettings)(nil)).Elem()\n}\n\ntype CustomizationGuiRunOnce struct {\n\tDynamicData\n\n\tCommandList []string `xml:\"commandList\"`\n}\n\nfunc init() {\n\tt[\"CustomizationGuiRunOnce\"] = reflect.TypeOf((*CustomizationGuiRunOnce)(nil)).Elem()\n}\n\ntype CustomizationGuiUnattended struct {\n\tDynamicData\n\n\tPassword       *CustomizationPassword `xml:\"password,omitempty\"`\n\tTimeZone       int32                  `xml:\"timeZone\"`\n\tAutoLogon      bool                   `xml:\"autoLogon\"`\n\tAutoLogonCount int32                  `xml:\"autoLogonCount\"`\n}\n\nfunc init() {\n\tt[\"CustomizationGuiUnattended\"] = reflect.TypeOf((*CustomizationGuiUnattended)(nil)).Elem()\n}\n\ntype CustomizationIPSettings struct {\n\tDynamicData\n\n\tIp            BaseCustomizationIpGenerator            `xml:\"ip,typeattr\"`\n\tSubnetMask    string                                  `xml:\"subnetMask,omitempty\"`\n\tGateway       []string                                `xml:\"gateway,omitempty\"`\n\tIpV6Spec      *CustomizationIPSettingsIpV6AddressSpec `xml:\"ipV6Spec,omitempty\"`\n\tDnsServerList []string                                `xml:\"dnsServerList,omitempty\"`\n\tDnsDomain     string                                  `xml:\"dnsDomain,omitempty\"`\n\tPrimaryWINS   string                                  `xml:\"primaryWINS,omitempty\"`\n\tSecondaryWINS string                                  `xml:\"secondaryWINS,omitempty\"`\n\tNetBIOS       CustomizationNetBIOSMode                `xml:\"netBIOS,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CustomizationIPSettings\"] = reflect.TypeOf((*CustomizationIPSettings)(nil)).Elem()\n}\n\ntype CustomizationIPSettingsIpV6AddressSpec struct {\n\tDynamicData\n\n\tIp      []BaseCustomizationIpV6Generator `xml:\"ip,typeattr\"`\n\tGateway []string                         `xml:\"gateway,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CustomizationIPSettingsIpV6AddressSpec\"] = reflect.TypeOf((*CustomizationIPSettingsIpV6AddressSpec)(nil)).Elem()\n}\n\ntype CustomizationIdentification struct {\n\tDynamicData\n\n\tJoinWorkgroup       string                 `xml:\"joinWorkgroup,omitempty\"`\n\tJoinDomain          string                 `xml:\"joinDomain,omitempty\"`\n\tDomainAdmin         string                 `xml:\"domainAdmin,omitempty\"`\n\tDomainAdminPassword *CustomizationPassword `xml:\"domainAdminPassword,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CustomizationIdentification\"] = reflect.TypeOf((*CustomizationIdentification)(nil)).Elem()\n}\n\ntype CustomizationIdentitySettings struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"CustomizationIdentitySettings\"] = reflect.TypeOf((*CustomizationIdentitySettings)(nil)).Elem()\n}\n\ntype CustomizationIpGenerator struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"CustomizationIpGenerator\"] = reflect.TypeOf((*CustomizationIpGenerator)(nil)).Elem()\n}\n\ntype CustomizationIpV6Generator struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"CustomizationIpV6Generator\"] = reflect.TypeOf((*CustomizationIpV6Generator)(nil)).Elem()\n}\n\ntype CustomizationLicenseFilePrintData struct {\n\tDynamicData\n\n\tAutoMode  CustomizationLicenseDataMode `xml:\"autoMode\"`\n\tAutoUsers int32                        `xml:\"autoUsers,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CustomizationLicenseFilePrintData\"] = reflect.TypeOf((*CustomizationLicenseFilePrintData)(nil)).Elem()\n}\n\ntype CustomizationLinuxIdentityFailed struct {\n\tCustomizationFailed\n}\n\nfunc init() {\n\tt[\"CustomizationLinuxIdentityFailed\"] = reflect.TypeOf((*CustomizationLinuxIdentityFailed)(nil)).Elem()\n}\n\ntype CustomizationLinuxOptions struct {\n\tCustomizationOptions\n}\n\nfunc init() {\n\tt[\"CustomizationLinuxOptions\"] = reflect.TypeOf((*CustomizationLinuxOptions)(nil)).Elem()\n}\n\ntype CustomizationLinuxPrep struct {\n\tCustomizationIdentitySettings\n\n\tHostName   BaseCustomizationName `xml:\"hostName,typeattr\"`\n\tDomain     string                `xml:\"domain\"`\n\tTimeZone   string                `xml:\"timeZone,omitempty\"`\n\tHwClockUTC *bool                 `xml:\"hwClockUTC\"`\n}\n\nfunc init() {\n\tt[\"CustomizationLinuxPrep\"] = reflect.TypeOf((*CustomizationLinuxPrep)(nil)).Elem()\n}\n\ntype CustomizationName struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"CustomizationName\"] = reflect.TypeOf((*CustomizationName)(nil)).Elem()\n}\n\ntype CustomizationNetworkSetupFailed struct {\n\tCustomizationFailed\n}\n\nfunc init() {\n\tt[\"CustomizationNetworkSetupFailed\"] = reflect.TypeOf((*CustomizationNetworkSetupFailed)(nil)).Elem()\n}\n\ntype CustomizationOptions struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"CustomizationOptions\"] = reflect.TypeOf((*CustomizationOptions)(nil)).Elem()\n}\n\ntype CustomizationPassword struct {\n\tDynamicData\n\n\tValue     string `xml:\"value\"`\n\tPlainText bool   `xml:\"plainText\"`\n}\n\nfunc init() {\n\tt[\"CustomizationPassword\"] = reflect.TypeOf((*CustomizationPassword)(nil)).Elem()\n}\n\ntype CustomizationPending struct {\n\tCustomizationFault\n}\n\nfunc init() {\n\tt[\"CustomizationPending\"] = reflect.TypeOf((*CustomizationPending)(nil)).Elem()\n}\n\ntype CustomizationPendingFault CustomizationPending\n\nfunc init() {\n\tt[\"CustomizationPendingFault\"] = reflect.TypeOf((*CustomizationPendingFault)(nil)).Elem()\n}\n\ntype CustomizationPrefixName struct {\n\tCustomizationName\n\n\tBase string `xml:\"base\"`\n}\n\nfunc init() {\n\tt[\"CustomizationPrefixName\"] = reflect.TypeOf((*CustomizationPrefixName)(nil)).Elem()\n}\n\ntype CustomizationSpec struct {\n\tDynamicData\n\n\tOptions          BaseCustomizationOptions          `xml:\"options,omitempty,typeattr\"`\n\tIdentity         BaseCustomizationIdentitySettings `xml:\"identity,typeattr\"`\n\tGlobalIPSettings CustomizationGlobalIPSettings     `xml:\"globalIPSettings\"`\n\tNicSettingMap    []CustomizationAdapterMapping     `xml:\"nicSettingMap,omitempty\"`\n\tEncryptionKey    []byte                            `xml:\"encryptionKey,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CustomizationSpec\"] = reflect.TypeOf((*CustomizationSpec)(nil)).Elem()\n}\n\ntype CustomizationSpecInfo struct {\n\tDynamicData\n\n\tName           string     `xml:\"name\"`\n\tDescription    string     `xml:\"description\"`\n\tType           string     `xml:\"type\"`\n\tChangeVersion  string     `xml:\"changeVersion,omitempty\"`\n\tLastUpdateTime *time.Time `xml:\"lastUpdateTime\"`\n}\n\nfunc init() {\n\tt[\"CustomizationSpecInfo\"] = reflect.TypeOf((*CustomizationSpecInfo)(nil)).Elem()\n}\n\ntype CustomizationSpecItem struct {\n\tDynamicData\n\n\tInfo CustomizationSpecInfo `xml:\"info\"`\n\tSpec CustomizationSpec     `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"CustomizationSpecItem\"] = reflect.TypeOf((*CustomizationSpecItem)(nil)).Elem()\n}\n\ntype CustomizationSpecItemToXml CustomizationSpecItemToXmlRequestType\n\nfunc init() {\n\tt[\"CustomizationSpecItemToXml\"] = reflect.TypeOf((*CustomizationSpecItemToXml)(nil)).Elem()\n}\n\ntype CustomizationSpecItemToXmlRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tItem CustomizationSpecItem  `xml:\"item\"`\n}\n\nfunc init() {\n\tt[\"CustomizationSpecItemToXmlRequestType\"] = reflect.TypeOf((*CustomizationSpecItemToXmlRequestType)(nil)).Elem()\n}\n\ntype CustomizationSpecItemToXmlResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype CustomizationStartedEvent struct {\n\tCustomizationEvent\n}\n\nfunc init() {\n\tt[\"CustomizationStartedEvent\"] = reflect.TypeOf((*CustomizationStartedEvent)(nil)).Elem()\n}\n\ntype CustomizationStatelessIpV6Generator struct {\n\tCustomizationIpV6Generator\n}\n\nfunc init() {\n\tt[\"CustomizationStatelessIpV6Generator\"] = reflect.TypeOf((*CustomizationStatelessIpV6Generator)(nil)).Elem()\n}\n\ntype CustomizationSucceeded struct {\n\tCustomizationEvent\n}\n\nfunc init() {\n\tt[\"CustomizationSucceeded\"] = reflect.TypeOf((*CustomizationSucceeded)(nil)).Elem()\n}\n\ntype CustomizationSysprep struct {\n\tCustomizationIdentitySettings\n\n\tGuiUnattended        CustomizationGuiUnattended         `xml:\"guiUnattended\"`\n\tUserData             CustomizationUserData              `xml:\"userData\"`\n\tGuiRunOnce           *CustomizationGuiRunOnce           `xml:\"guiRunOnce,omitempty\"`\n\tIdentification       CustomizationIdentification        `xml:\"identification\"`\n\tLicenseFilePrintData *CustomizationLicenseFilePrintData `xml:\"licenseFilePrintData,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CustomizationSysprep\"] = reflect.TypeOf((*CustomizationSysprep)(nil)).Elem()\n}\n\ntype CustomizationSysprepFailed struct {\n\tCustomizationFailed\n\n\tSysprepVersion string `xml:\"sysprepVersion\"`\n\tSystemVersion  string `xml:\"systemVersion\"`\n}\n\nfunc init() {\n\tt[\"CustomizationSysprepFailed\"] = reflect.TypeOf((*CustomizationSysprepFailed)(nil)).Elem()\n}\n\ntype CustomizationSysprepText struct {\n\tCustomizationIdentitySettings\n\n\tValue string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"CustomizationSysprepText\"] = reflect.TypeOf((*CustomizationSysprepText)(nil)).Elem()\n}\n\ntype CustomizationUnknownFailure struct {\n\tCustomizationFailed\n}\n\nfunc init() {\n\tt[\"CustomizationUnknownFailure\"] = reflect.TypeOf((*CustomizationUnknownFailure)(nil)).Elem()\n}\n\ntype CustomizationUnknownIpGenerator struct {\n\tCustomizationIpGenerator\n}\n\nfunc init() {\n\tt[\"CustomizationUnknownIpGenerator\"] = reflect.TypeOf((*CustomizationUnknownIpGenerator)(nil)).Elem()\n}\n\ntype CustomizationUnknownIpV6Generator struct {\n\tCustomizationIpV6Generator\n}\n\nfunc init() {\n\tt[\"CustomizationUnknownIpV6Generator\"] = reflect.TypeOf((*CustomizationUnknownIpV6Generator)(nil)).Elem()\n}\n\ntype CustomizationUnknownName struct {\n\tCustomizationName\n}\n\nfunc init() {\n\tt[\"CustomizationUnknownName\"] = reflect.TypeOf((*CustomizationUnknownName)(nil)).Elem()\n}\n\ntype CustomizationUserData struct {\n\tDynamicData\n\n\tFullName     string                `xml:\"fullName\"`\n\tOrgName      string                `xml:\"orgName\"`\n\tComputerName BaseCustomizationName `xml:\"computerName,typeattr\"`\n\tProductId    string                `xml:\"productId\"`\n}\n\nfunc init() {\n\tt[\"CustomizationUserData\"] = reflect.TypeOf((*CustomizationUserData)(nil)).Elem()\n}\n\ntype CustomizationVirtualMachineName struct {\n\tCustomizationName\n}\n\nfunc init() {\n\tt[\"CustomizationVirtualMachineName\"] = reflect.TypeOf((*CustomizationVirtualMachineName)(nil)).Elem()\n}\n\ntype CustomizationWinOptions struct {\n\tCustomizationOptions\n\n\tChangeSID      bool                             `xml:\"changeSID\"`\n\tDeleteAccounts bool                             `xml:\"deleteAccounts\"`\n\tReboot         CustomizationSysprepRebootOption `xml:\"reboot,omitempty\"`\n}\n\nfunc init() {\n\tt[\"CustomizationWinOptions\"] = reflect.TypeOf((*CustomizationWinOptions)(nil)).Elem()\n}\n\ntype CustomizeVMRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tSpec CustomizationSpec      `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"CustomizeVMRequestType\"] = reflect.TypeOf((*CustomizeVMRequestType)(nil)).Elem()\n}\n\ntype CustomizeVM_Task CustomizeVMRequestType\n\nfunc init() {\n\tt[\"CustomizeVM_Task\"] = reflect.TypeOf((*CustomizeVM_Task)(nil)).Elem()\n}\n\ntype CustomizeVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype DVPortConfigInfo struct {\n\tDynamicData\n\n\tName          string                   `xml:\"name,omitempty\"`\n\tScope         []ManagedObjectReference `xml:\"scope,omitempty\"`\n\tDescription   string                   `xml:\"description,omitempty\"`\n\tSetting       BaseDVPortSetting        `xml:\"setting,omitempty,typeattr\"`\n\tConfigVersion string                   `xml:\"configVersion\"`\n}\n\nfunc init() {\n\tt[\"DVPortConfigInfo\"] = reflect.TypeOf((*DVPortConfigInfo)(nil)).Elem()\n}\n\ntype DVPortConfigSpec struct {\n\tDynamicData\n\n\tOperation     string                   `xml:\"operation\"`\n\tKey           string                   `xml:\"key,omitempty\"`\n\tName          string                   `xml:\"name,omitempty\"`\n\tScope         []ManagedObjectReference `xml:\"scope,omitempty\"`\n\tDescription   string                   `xml:\"description,omitempty\"`\n\tSetting       BaseDVPortSetting        `xml:\"setting,omitempty,typeattr\"`\n\tConfigVersion string                   `xml:\"configVersion,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVPortConfigSpec\"] = reflect.TypeOf((*DVPortConfigSpec)(nil)).Elem()\n}\n\ntype DVPortNotSupported struct {\n\tDeviceBackingNotSupported\n}\n\nfunc init() {\n\tt[\"DVPortNotSupported\"] = reflect.TypeOf((*DVPortNotSupported)(nil)).Elem()\n}\n\ntype DVPortNotSupportedFault DVPortNotSupported\n\nfunc init() {\n\tt[\"DVPortNotSupportedFault\"] = reflect.TypeOf((*DVPortNotSupportedFault)(nil)).Elem()\n}\n\ntype DVPortSetting struct {\n\tDynamicData\n\n\tBlocked                 *BoolPolicy              `xml:\"blocked,omitempty\"`\n\tVmDirectPathGen2Allowed *BoolPolicy              `xml:\"vmDirectPathGen2Allowed,omitempty\"`\n\tInShapingPolicy         *DVSTrafficShapingPolicy `xml:\"inShapingPolicy,omitempty\"`\n\tOutShapingPolicy        *DVSTrafficShapingPolicy `xml:\"outShapingPolicy,omitempty\"`\n\tVendorSpecificConfig    *DVSVendorSpecificConfig `xml:\"vendorSpecificConfig,omitempty\"`\n\tNetworkResourcePoolKey  *StringPolicy            `xml:\"networkResourcePoolKey,omitempty\"`\n\tFilterPolicy            *DvsFilterPolicy         `xml:\"filterPolicy,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVPortSetting\"] = reflect.TypeOf((*DVPortSetting)(nil)).Elem()\n}\n\ntype DVPortState struct {\n\tDynamicData\n\n\tRuntimeInfo         *DVPortStatus                             `xml:\"runtimeInfo,omitempty\"`\n\tStats               DistributedVirtualSwitchPortStatistics    `xml:\"stats\"`\n\tVendorSpecificState []DistributedVirtualSwitchKeyedOpaqueBlob `xml:\"vendorSpecificState,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVPortState\"] = reflect.TypeOf((*DVPortState)(nil)).Elem()\n}\n\ntype DVPortStatus struct {\n\tDynamicData\n\n\tLinkUp                                 bool           `xml:\"linkUp\"`\n\tBlocked                                bool           `xml:\"blocked\"`\n\tVlanIds                                []NumericRange `xml:\"vlanIds,omitempty\"`\n\tTrunkingMode                           *bool          `xml:\"trunkingMode\"`\n\tMtu                                    int32          `xml:\"mtu,omitempty\"`\n\tLinkPeer                               string         `xml:\"linkPeer,omitempty\"`\n\tMacAddress                             string         `xml:\"macAddress,omitempty\"`\n\tStatusDetail                           string         `xml:\"statusDetail,omitempty\"`\n\tVmDirectPathGen2Active                 *bool          `xml:\"vmDirectPathGen2Active\"`\n\tVmDirectPathGen2InactiveReasonNetwork  []string       `xml:\"vmDirectPathGen2InactiveReasonNetwork,omitempty\"`\n\tVmDirectPathGen2InactiveReasonOther    []string       `xml:\"vmDirectPathGen2InactiveReasonOther,omitempty\"`\n\tVmDirectPathGen2InactiveReasonExtended string         `xml:\"vmDirectPathGen2InactiveReasonExtended,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVPortStatus\"] = reflect.TypeOf((*DVPortStatus)(nil)).Elem()\n}\n\ntype DVPortgroupConfigInfo struct {\n\tDynamicData\n\n\tKey                          string                                    `xml:\"key\"`\n\tName                         string                                    `xml:\"name\"`\n\tNumPorts                     int32                                     `xml:\"numPorts\"`\n\tDistributedVirtualSwitch     *ManagedObjectReference                   `xml:\"distributedVirtualSwitch,omitempty\"`\n\tDefaultPortConfig            BaseDVPortSetting                         `xml:\"defaultPortConfig,omitempty,typeattr\"`\n\tDescription                  string                                    `xml:\"description,omitempty\"`\n\tType                         string                                    `xml:\"type\"`\n\tPolicy                       BaseDVPortgroupPolicy                     `xml:\"policy,typeattr\"`\n\tPortNameFormat               string                                    `xml:\"portNameFormat,omitempty\"`\n\tScope                        []ManagedObjectReference                  `xml:\"scope,omitempty\"`\n\tVendorSpecificConfig         []DistributedVirtualSwitchKeyedOpaqueBlob `xml:\"vendorSpecificConfig,omitempty\"`\n\tConfigVersion                string                                    `xml:\"configVersion,omitempty\"`\n\tAutoExpand                   *bool                                     `xml:\"autoExpand\"`\n\tVmVnicNetworkResourcePoolKey string                                    `xml:\"vmVnicNetworkResourcePoolKey,omitempty\"`\n\tUplink                       *bool                                     `xml:\"uplink\"`\n}\n\nfunc init() {\n\tt[\"DVPortgroupConfigInfo\"] = reflect.TypeOf((*DVPortgroupConfigInfo)(nil)).Elem()\n}\n\ntype DVPortgroupConfigSpec struct {\n\tDynamicData\n\n\tConfigVersion                string                                    `xml:\"configVersion,omitempty\"`\n\tName                         string                                    `xml:\"name,omitempty\"`\n\tNumPorts                     int32                                     `xml:\"numPorts,omitempty\"`\n\tPortNameFormat               string                                    `xml:\"portNameFormat,omitempty\"`\n\tDefaultPortConfig            BaseDVPortSetting                         `xml:\"defaultPortConfig,omitempty,typeattr\"`\n\tDescription                  string                                    `xml:\"description,omitempty\"`\n\tType                         string                                    `xml:\"type,omitempty\"`\n\tScope                        []ManagedObjectReference                  `xml:\"scope,omitempty\"`\n\tPolicy                       BaseDVPortgroupPolicy                     `xml:\"policy,omitempty,typeattr\"`\n\tVendorSpecificConfig         []DistributedVirtualSwitchKeyedOpaqueBlob `xml:\"vendorSpecificConfig,omitempty\"`\n\tAutoExpand                   *bool                                     `xml:\"autoExpand\"`\n\tVmVnicNetworkResourcePoolKey string                                    `xml:\"vmVnicNetworkResourcePoolKey,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVPortgroupConfigSpec\"] = reflect.TypeOf((*DVPortgroupConfigSpec)(nil)).Elem()\n}\n\ntype DVPortgroupCreatedEvent struct {\n\tDVPortgroupEvent\n}\n\nfunc init() {\n\tt[\"DVPortgroupCreatedEvent\"] = reflect.TypeOf((*DVPortgroupCreatedEvent)(nil)).Elem()\n}\n\ntype DVPortgroupDestroyedEvent struct {\n\tDVPortgroupEvent\n}\n\nfunc init() {\n\tt[\"DVPortgroupDestroyedEvent\"] = reflect.TypeOf((*DVPortgroupDestroyedEvent)(nil)).Elem()\n}\n\ntype DVPortgroupEvent struct {\n\tEvent\n}\n\nfunc init() {\n\tt[\"DVPortgroupEvent\"] = reflect.TypeOf((*DVPortgroupEvent)(nil)).Elem()\n}\n\ntype DVPortgroupPolicy struct {\n\tDynamicData\n\n\tBlockOverrideAllowed               bool  `xml:\"blockOverrideAllowed\"`\n\tShapingOverrideAllowed             bool  `xml:\"shapingOverrideAllowed\"`\n\tVendorConfigOverrideAllowed        bool  `xml:\"vendorConfigOverrideAllowed\"`\n\tLivePortMovingAllowed              bool  `xml:\"livePortMovingAllowed\"`\n\tPortConfigResetAtDisconnect        bool  `xml:\"portConfigResetAtDisconnect\"`\n\tNetworkResourcePoolOverrideAllowed *bool `xml:\"networkResourcePoolOverrideAllowed\"`\n\tTrafficFilterOverrideAllowed       *bool `xml:\"trafficFilterOverrideAllowed\"`\n}\n\nfunc init() {\n\tt[\"DVPortgroupPolicy\"] = reflect.TypeOf((*DVPortgroupPolicy)(nil)).Elem()\n}\n\ntype DVPortgroupReconfiguredEvent struct {\n\tDVPortgroupEvent\n\n\tConfigSpec    DVPortgroupConfigSpec     `xml:\"configSpec\"`\n\tConfigChanges *ChangesInfoEventArgument `xml:\"configChanges,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVPortgroupReconfiguredEvent\"] = reflect.TypeOf((*DVPortgroupReconfiguredEvent)(nil)).Elem()\n}\n\ntype DVPortgroupRenamedEvent struct {\n\tDVPortgroupEvent\n\n\tOldName string `xml:\"oldName\"`\n\tNewName string `xml:\"newName\"`\n}\n\nfunc init() {\n\tt[\"DVPortgroupRenamedEvent\"] = reflect.TypeOf((*DVPortgroupRenamedEvent)(nil)).Elem()\n}\n\ntype DVPortgroupRollbackRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tEntityBackup *EntityBackupConfig    `xml:\"entityBackup,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVPortgroupRollbackRequestType\"] = reflect.TypeOf((*DVPortgroupRollbackRequestType)(nil)).Elem()\n}\n\ntype DVPortgroupRollback_Task DVPortgroupRollbackRequestType\n\nfunc init() {\n\tt[\"DVPortgroupRollback_Task\"] = reflect.TypeOf((*DVPortgroupRollback_Task)(nil)).Elem()\n}\n\ntype DVPortgroupRollback_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype DVPortgroupSelection struct {\n\tSelectionSet\n\n\tDvsUuid      string   `xml:\"dvsUuid\"`\n\tPortgroupKey []string `xml:\"portgroupKey\"`\n}\n\nfunc init() {\n\tt[\"DVPortgroupSelection\"] = reflect.TypeOf((*DVPortgroupSelection)(nil)).Elem()\n}\n\ntype DVSBackupRestoreCapability struct {\n\tDynamicData\n\n\tBackupRestoreSupported bool `xml:\"backupRestoreSupported\"`\n}\n\nfunc init() {\n\tt[\"DVSBackupRestoreCapability\"] = reflect.TypeOf((*DVSBackupRestoreCapability)(nil)).Elem()\n}\n\ntype DVSCapability struct {\n\tDynamicData\n\n\tDvsOperationSupported              *bool                                     `xml:\"dvsOperationSupported\"`\n\tDvPortGroupOperationSupported      *bool                                     `xml:\"dvPortGroupOperationSupported\"`\n\tDvPortOperationSupported           *bool                                     `xml:\"dvPortOperationSupported\"`\n\tCompatibleHostComponentProductInfo []DistributedVirtualSwitchHostProductSpec `xml:\"compatibleHostComponentProductInfo,omitempty\"`\n\tFeaturesSupported                  BaseDVSFeatureCapability                  `xml:\"featuresSupported,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"DVSCapability\"] = reflect.TypeOf((*DVSCapability)(nil)).Elem()\n}\n\ntype DVSConfigInfo struct {\n\tDynamicData\n\n\tUuid                                string                                    `xml:\"uuid\"`\n\tName                                string                                    `xml:\"name\"`\n\tNumStandalonePorts                  int32                                     `xml:\"numStandalonePorts\"`\n\tNumPorts                            int32                                     `xml:\"numPorts\"`\n\tMaxPorts                            int32                                     `xml:\"maxPorts\"`\n\tUplinkPortPolicy                    BaseDVSUplinkPortPolicy                   `xml:\"uplinkPortPolicy,typeattr\"`\n\tUplinkPortgroup                     []ManagedObjectReference                  `xml:\"uplinkPortgroup,omitempty\"`\n\tDefaultPortConfig                   BaseDVPortSetting                         `xml:\"defaultPortConfig,typeattr\"`\n\tHost                                []DistributedVirtualSwitchHostMember      `xml:\"host,omitempty\"`\n\tProductInfo                         DistributedVirtualSwitchProductSpec       `xml:\"productInfo\"`\n\tTargetInfo                          *DistributedVirtualSwitchProductSpec      `xml:\"targetInfo,omitempty\"`\n\tExtensionKey                        string                                    `xml:\"extensionKey,omitempty\"`\n\tVendorSpecificConfig                []DistributedVirtualSwitchKeyedOpaqueBlob `xml:\"vendorSpecificConfig,omitempty\"`\n\tPolicy                              *DVSPolicy                                `xml:\"policy,omitempty\"`\n\tDescription                         string                                    `xml:\"description,omitempty\"`\n\tConfigVersion                       string                                    `xml:\"configVersion\"`\n\tContact                             DVSContactInfo                            `xml:\"contact\"`\n\tSwitchIpAddress                     string                                    `xml:\"switchIpAddress,omitempty\"`\n\tCreateTime                          time.Time                                 `xml:\"createTime\"`\n\tNetworkResourceManagementEnabled    *bool                                     `xml:\"networkResourceManagementEnabled\"`\n\tDefaultProxySwitchMaxNumPorts       int32                                     `xml:\"defaultProxySwitchMaxNumPorts,omitempty\"`\n\tHealthCheckConfig                   []BaseDVSHealthCheckConfig                `xml:\"healthCheckConfig,omitempty,typeattr\"`\n\tInfrastructureTrafficResourceConfig []DvsHostInfrastructureTrafficResource    `xml:\"infrastructureTrafficResourceConfig,omitempty\"`\n\tNetworkResourceControlVersion       string                                    `xml:\"networkResourceControlVersion,omitempty\"`\n\tVmVnicNetworkResourcePool           []DVSVmVnicNetworkResourcePool            `xml:\"vmVnicNetworkResourcePool,omitempty\"`\n\tPnicCapacityRatioForReservation     int32                                     `xml:\"pnicCapacityRatioForReservation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVSConfigInfo\"] = reflect.TypeOf((*DVSConfigInfo)(nil)).Elem()\n}\n\ntype DVSConfigSpec struct {\n\tDynamicData\n\n\tConfigVersion                       string                                         `xml:\"configVersion,omitempty\"`\n\tName                                string                                         `xml:\"name,omitempty\"`\n\tNumStandalonePorts                  int32                                          `xml:\"numStandalonePorts,omitempty\"`\n\tMaxPorts                            int32                                          `xml:\"maxPorts,omitempty\"`\n\tUplinkPortPolicy                    BaseDVSUplinkPortPolicy                        `xml:\"uplinkPortPolicy,omitempty,typeattr\"`\n\tUplinkPortgroup                     []ManagedObjectReference                       `xml:\"uplinkPortgroup,omitempty\"`\n\tDefaultPortConfig                   BaseDVPortSetting                              `xml:\"defaultPortConfig,omitempty,typeattr\"`\n\tHost                                []DistributedVirtualSwitchHostMemberConfigSpec `xml:\"host,omitempty\"`\n\tExtensionKey                        string                                         `xml:\"extensionKey,omitempty\"`\n\tDescription                         string                                         `xml:\"description,omitempty\"`\n\tPolicy                              *DVSPolicy                                     `xml:\"policy,omitempty\"`\n\tVendorSpecificConfig                []DistributedVirtualSwitchKeyedOpaqueBlob      `xml:\"vendorSpecificConfig,omitempty\"`\n\tContact                             *DVSContactInfo                                `xml:\"contact,omitempty\"`\n\tSwitchIpAddress                     string                                         `xml:\"switchIpAddress,omitempty\"`\n\tDefaultProxySwitchMaxNumPorts       int32                                          `xml:\"defaultProxySwitchMaxNumPorts,omitempty\"`\n\tInfrastructureTrafficResourceConfig []DvsHostInfrastructureTrafficResource         `xml:\"infrastructureTrafficResourceConfig,omitempty\"`\n\tNetworkResourceControlVersion       string                                         `xml:\"networkResourceControlVersion,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVSConfigSpec\"] = reflect.TypeOf((*DVSConfigSpec)(nil)).Elem()\n}\n\ntype DVSContactInfo struct {\n\tDynamicData\n\n\tName    string `xml:\"name,omitempty\"`\n\tContact string `xml:\"contact,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVSContactInfo\"] = reflect.TypeOf((*DVSContactInfo)(nil)).Elem()\n}\n\ntype DVSCreateSpec struct {\n\tDynamicData\n\n\tConfigSpec  BaseDVSConfigSpec                    `xml:\"configSpec,typeattr\"`\n\tProductInfo *DistributedVirtualSwitchProductSpec `xml:\"productInfo,omitempty\"`\n\tCapability  *DVSCapability                       `xml:\"capability,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVSCreateSpec\"] = reflect.TypeOf((*DVSCreateSpec)(nil)).Elem()\n}\n\ntype DVSFailureCriteria struct {\n\tInheritablePolicy\n\n\tCheckSpeed        *StringPolicy `xml:\"checkSpeed,omitempty\"`\n\tSpeed             *IntPolicy    `xml:\"speed,omitempty\"`\n\tCheckDuplex       *BoolPolicy   `xml:\"checkDuplex,omitempty\"`\n\tFullDuplex        *BoolPolicy   `xml:\"fullDuplex,omitempty\"`\n\tCheckErrorPercent *BoolPolicy   `xml:\"checkErrorPercent,omitempty\"`\n\tPercentage        *IntPolicy    `xml:\"percentage,omitempty\"`\n\tCheckBeacon       *BoolPolicy   `xml:\"checkBeacon,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVSFailureCriteria\"] = reflect.TypeOf((*DVSFailureCriteria)(nil)).Elem()\n}\n\ntype DVSFeatureCapability struct {\n\tDynamicData\n\n\tNetworkResourceManagementSupported  bool                                    `xml:\"networkResourceManagementSupported\"`\n\tVmDirectPathGen2Supported           bool                                    `xml:\"vmDirectPathGen2Supported\"`\n\tNicTeamingPolicy                    []string                                `xml:\"nicTeamingPolicy,omitempty\"`\n\tNetworkResourcePoolHighShareValue   int32                                   `xml:\"networkResourcePoolHighShareValue,omitempty\"`\n\tNetworkResourceManagementCapability *DVSNetworkResourceManagementCapability `xml:\"networkResourceManagementCapability,omitempty\"`\n\tHealthCheckCapability               BaseDVSHealthCheckCapability            `xml:\"healthCheckCapability,omitempty,typeattr\"`\n\tRollbackCapability                  *DVSRollbackCapability                  `xml:\"rollbackCapability,omitempty\"`\n\tBackupRestoreCapability             *DVSBackupRestoreCapability             `xml:\"backupRestoreCapability,omitempty\"`\n\tNetworkFilterSupported              *bool                                   `xml:\"networkFilterSupported\"`\n}\n\nfunc init() {\n\tt[\"DVSFeatureCapability\"] = reflect.TypeOf((*DVSFeatureCapability)(nil)).Elem()\n}\n\ntype DVSHealthCheckCapability struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"DVSHealthCheckCapability\"] = reflect.TypeOf((*DVSHealthCheckCapability)(nil)).Elem()\n}\n\ntype DVSHealthCheckConfig struct {\n\tDynamicData\n\n\tEnable   *bool `xml:\"enable\"`\n\tInterval int32 `xml:\"interval,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVSHealthCheckConfig\"] = reflect.TypeOf((*DVSHealthCheckConfig)(nil)).Elem()\n}\n\ntype DVSHostLocalPortInfo struct {\n\tDynamicData\n\n\tSwitchUuid string            `xml:\"switchUuid\"`\n\tPortKey    string            `xml:\"portKey\"`\n\tSetting    BaseDVPortSetting `xml:\"setting,typeattr\"`\n\tVnic       string            `xml:\"vnic\"`\n}\n\nfunc init() {\n\tt[\"DVSHostLocalPortInfo\"] = reflect.TypeOf((*DVSHostLocalPortInfo)(nil)).Elem()\n}\n\ntype DVSManagerDvsConfigTarget struct {\n\tDynamicData\n\n\tDistributedVirtualPortgroup []DistributedVirtualPortgroupInfo `xml:\"distributedVirtualPortgroup,omitempty\"`\n\tDistributedVirtualSwitch    []DistributedVirtualSwitchInfo    `xml:\"distributedVirtualSwitch,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVSManagerDvsConfigTarget\"] = reflect.TypeOf((*DVSManagerDvsConfigTarget)(nil)).Elem()\n}\n\ntype DVSManagerExportEntityRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tSelectionSet []BaseSelectionSet     `xml:\"selectionSet,typeattr\"`\n}\n\nfunc init() {\n\tt[\"DVSManagerExportEntityRequestType\"] = reflect.TypeOf((*DVSManagerExportEntityRequestType)(nil)).Elem()\n}\n\ntype DVSManagerExportEntity_Task DVSManagerExportEntityRequestType\n\nfunc init() {\n\tt[\"DVSManagerExportEntity_Task\"] = reflect.TypeOf((*DVSManagerExportEntity_Task)(nil)).Elem()\n}\n\ntype DVSManagerExportEntity_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype DVSManagerImportEntityRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tEntityBackup []EntityBackupConfig   `xml:\"entityBackup\"`\n\tImportType   string                 `xml:\"importType\"`\n}\n\nfunc init() {\n\tt[\"DVSManagerImportEntityRequestType\"] = reflect.TypeOf((*DVSManagerImportEntityRequestType)(nil)).Elem()\n}\n\ntype DVSManagerImportEntity_Task DVSManagerImportEntityRequestType\n\nfunc init() {\n\tt[\"DVSManagerImportEntity_Task\"] = reflect.TypeOf((*DVSManagerImportEntity_Task)(nil)).Elem()\n}\n\ntype DVSManagerImportEntity_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype DVSManagerLookupDvPortGroup DVSManagerLookupDvPortGroupRequestType\n\nfunc init() {\n\tt[\"DVSManagerLookupDvPortGroup\"] = reflect.TypeOf((*DVSManagerLookupDvPortGroup)(nil)).Elem()\n}\n\ntype DVSManagerLookupDvPortGroupRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tSwitchUuid   string                 `xml:\"switchUuid\"`\n\tPortgroupKey string                 `xml:\"portgroupKey\"`\n}\n\nfunc init() {\n\tt[\"DVSManagerLookupDvPortGroupRequestType\"] = reflect.TypeOf((*DVSManagerLookupDvPortGroupRequestType)(nil)).Elem()\n}\n\ntype DVSManagerLookupDvPortGroupResponse struct {\n\tReturnval *ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype DVSNameArrayUplinkPortPolicy struct {\n\tDVSUplinkPortPolicy\n\n\tUplinkPortName []string `xml:\"uplinkPortName\"`\n}\n\nfunc init() {\n\tt[\"DVSNameArrayUplinkPortPolicy\"] = reflect.TypeOf((*DVSNameArrayUplinkPortPolicy)(nil)).Elem()\n}\n\ntype DVSNetworkResourceManagementCapability struct {\n\tDynamicData\n\n\tNetworkResourceManagementSupported       bool  `xml:\"networkResourceManagementSupported\"`\n\tNetworkResourcePoolHighShareValue        int32 `xml:\"networkResourcePoolHighShareValue\"`\n\tQosSupported                             bool  `xml:\"qosSupported\"`\n\tUserDefinedNetworkResourcePoolsSupported bool  `xml:\"userDefinedNetworkResourcePoolsSupported\"`\n\tNetworkResourceControlVersion3Supported  *bool `xml:\"networkResourceControlVersion3Supported\"`\n}\n\nfunc init() {\n\tt[\"DVSNetworkResourceManagementCapability\"] = reflect.TypeOf((*DVSNetworkResourceManagementCapability)(nil)).Elem()\n}\n\ntype DVSNetworkResourcePool struct {\n\tDynamicData\n\n\tKey            string                               `xml:\"key\"`\n\tName           string                               `xml:\"name,omitempty\"`\n\tDescription    string                               `xml:\"description,omitempty\"`\n\tConfigVersion  string                               `xml:\"configVersion\"`\n\tAllocationInfo DVSNetworkResourcePoolAllocationInfo `xml:\"allocationInfo\"`\n}\n\nfunc init() {\n\tt[\"DVSNetworkResourcePool\"] = reflect.TypeOf((*DVSNetworkResourcePool)(nil)).Elem()\n}\n\ntype DVSNetworkResourcePoolAllocationInfo struct {\n\tDynamicData\n\n\tLimit       int64       `xml:\"limit,omitempty\"`\n\tShares      *SharesInfo `xml:\"shares,omitempty\"`\n\tPriorityTag int32       `xml:\"priorityTag,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVSNetworkResourcePoolAllocationInfo\"] = reflect.TypeOf((*DVSNetworkResourcePoolAllocationInfo)(nil)).Elem()\n}\n\ntype DVSNetworkResourcePoolConfigSpec struct {\n\tDynamicData\n\n\tKey            string                                `xml:\"key\"`\n\tConfigVersion  string                                `xml:\"configVersion,omitempty\"`\n\tAllocationInfo *DVSNetworkResourcePoolAllocationInfo `xml:\"allocationInfo,omitempty\"`\n\tName           string                                `xml:\"name,omitempty\"`\n\tDescription    string                                `xml:\"description,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVSNetworkResourcePoolConfigSpec\"] = reflect.TypeOf((*DVSNetworkResourcePoolConfigSpec)(nil)).Elem()\n}\n\ntype DVSPolicy struct {\n\tDynamicData\n\n\tAutoPreInstallAllowed *bool `xml:\"autoPreInstallAllowed\"`\n\tAutoUpgradeAllowed    *bool `xml:\"autoUpgradeAllowed\"`\n\tPartialUpgradeAllowed *bool `xml:\"partialUpgradeAllowed\"`\n}\n\nfunc init() {\n\tt[\"DVSPolicy\"] = reflect.TypeOf((*DVSPolicy)(nil)).Elem()\n}\n\ntype DVSRollbackCapability struct {\n\tDynamicData\n\n\tRollbackSupported bool `xml:\"rollbackSupported\"`\n}\n\nfunc init() {\n\tt[\"DVSRollbackCapability\"] = reflect.TypeOf((*DVSRollbackCapability)(nil)).Elem()\n}\n\ntype DVSRollbackRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tEntityBackup *EntityBackupConfig    `xml:\"entityBackup,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVSRollbackRequestType\"] = reflect.TypeOf((*DVSRollbackRequestType)(nil)).Elem()\n}\n\ntype DVSRollback_Task DVSRollbackRequestType\n\nfunc init() {\n\tt[\"DVSRollback_Task\"] = reflect.TypeOf((*DVSRollback_Task)(nil)).Elem()\n}\n\ntype DVSRollback_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype DVSRuntimeInfo struct {\n\tDynamicData\n\n\tHostMemberRuntime   []HostMemberRuntimeInfo `xml:\"hostMemberRuntime,omitempty\"`\n\tResourceRuntimeInfo *DvsResourceRuntimeInfo `xml:\"resourceRuntimeInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVSRuntimeInfo\"] = reflect.TypeOf((*DVSRuntimeInfo)(nil)).Elem()\n}\n\ntype DVSSecurityPolicy struct {\n\tInheritablePolicy\n\n\tAllowPromiscuous *BoolPolicy `xml:\"allowPromiscuous,omitempty\"`\n\tMacChanges       *BoolPolicy `xml:\"macChanges,omitempty\"`\n\tForgedTransmits  *BoolPolicy `xml:\"forgedTransmits,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVSSecurityPolicy\"] = reflect.TypeOf((*DVSSecurityPolicy)(nil)).Elem()\n}\n\ntype DVSSelection struct {\n\tSelectionSet\n\n\tDvsUuid string `xml:\"dvsUuid\"`\n}\n\nfunc init() {\n\tt[\"DVSSelection\"] = reflect.TypeOf((*DVSSelection)(nil)).Elem()\n}\n\ntype DVSSummary struct {\n\tDynamicData\n\n\tName          string                               `xml:\"name\"`\n\tUuid          string                               `xml:\"uuid\"`\n\tNumPorts      int32                                `xml:\"numPorts\"`\n\tProductInfo   *DistributedVirtualSwitchProductSpec `xml:\"productInfo,omitempty\"`\n\tHostMember    []ManagedObjectReference             `xml:\"hostMember,omitempty\"`\n\tVm            []ManagedObjectReference             `xml:\"vm,omitempty\"`\n\tHost          []ManagedObjectReference             `xml:\"host,omitempty\"`\n\tPortgroupName []string                             `xml:\"portgroupName,omitempty\"`\n\tDescription   string                               `xml:\"description,omitempty\"`\n\tContact       *DVSContactInfo                      `xml:\"contact,omitempty\"`\n\tNumHosts      int32                                `xml:\"numHosts,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVSSummary\"] = reflect.TypeOf((*DVSSummary)(nil)).Elem()\n}\n\ntype DVSTrafficShapingPolicy struct {\n\tInheritablePolicy\n\n\tEnabled          *BoolPolicy `xml:\"enabled,omitempty\"`\n\tAverageBandwidth *LongPolicy `xml:\"averageBandwidth,omitempty\"`\n\tPeakBandwidth    *LongPolicy `xml:\"peakBandwidth,omitempty\"`\n\tBurstSize        *LongPolicy `xml:\"burstSize,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVSTrafficShapingPolicy\"] = reflect.TypeOf((*DVSTrafficShapingPolicy)(nil)).Elem()\n}\n\ntype DVSUplinkPortPolicy struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"DVSUplinkPortPolicy\"] = reflect.TypeOf((*DVSUplinkPortPolicy)(nil)).Elem()\n}\n\ntype DVSVendorSpecificConfig struct {\n\tInheritablePolicy\n\n\tKeyValue []DistributedVirtualSwitchKeyedOpaqueBlob `xml:\"keyValue,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVSVendorSpecificConfig\"] = reflect.TypeOf((*DVSVendorSpecificConfig)(nil)).Elem()\n}\n\ntype DVSVmVnicNetworkResourcePool struct {\n\tDynamicData\n\n\tKey            string                       `xml:\"key\"`\n\tName           string                       `xml:\"name,omitempty\"`\n\tDescription    string                       `xml:\"description,omitempty\"`\n\tConfigVersion  string                       `xml:\"configVersion\"`\n\tAllocationInfo *DvsVmVnicResourceAllocation `xml:\"allocationInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DVSVmVnicNetworkResourcePool\"] = reflect.TypeOf((*DVSVmVnicNetworkResourcePool)(nil)).Elem()\n}\n\ntype DailyTaskScheduler struct {\n\tHourlyTaskScheduler\n\n\tHour int32 `xml:\"hour\"`\n}\n\nfunc init() {\n\tt[\"DailyTaskScheduler\"] = reflect.TypeOf((*DailyTaskScheduler)(nil)).Elem()\n}\n\ntype DasAdmissionControlDisabledEvent struct {\n\tClusterEvent\n}\n\nfunc init() {\n\tt[\"DasAdmissionControlDisabledEvent\"] = reflect.TypeOf((*DasAdmissionControlDisabledEvent)(nil)).Elem()\n}\n\ntype DasAdmissionControlEnabledEvent struct {\n\tClusterEvent\n}\n\nfunc init() {\n\tt[\"DasAdmissionControlEnabledEvent\"] = reflect.TypeOf((*DasAdmissionControlEnabledEvent)(nil)).Elem()\n}\n\ntype DasAgentFoundEvent struct {\n\tClusterEvent\n}\n\nfunc init() {\n\tt[\"DasAgentFoundEvent\"] = reflect.TypeOf((*DasAgentFoundEvent)(nil)).Elem()\n}\n\ntype DasAgentUnavailableEvent struct {\n\tClusterEvent\n}\n\nfunc init() {\n\tt[\"DasAgentUnavailableEvent\"] = reflect.TypeOf((*DasAgentUnavailableEvent)(nil)).Elem()\n}\n\ntype DasClusterIsolatedEvent struct {\n\tClusterEvent\n}\n\nfunc init() {\n\tt[\"DasClusterIsolatedEvent\"] = reflect.TypeOf((*DasClusterIsolatedEvent)(nil)).Elem()\n}\n\ntype DasConfigFault struct {\n\tVimFault\n\n\tReason string      `xml:\"reason,omitempty\"`\n\tOutput string      `xml:\"output,omitempty\"`\n\tEvent  []BaseEvent `xml:\"event,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"DasConfigFault\"] = reflect.TypeOf((*DasConfigFault)(nil)).Elem()\n}\n\ntype DasConfigFaultFault DasConfigFault\n\nfunc init() {\n\tt[\"DasConfigFaultFault\"] = reflect.TypeOf((*DasConfigFaultFault)(nil)).Elem()\n}\n\ntype DasDisabledEvent struct {\n\tClusterEvent\n}\n\nfunc init() {\n\tt[\"DasDisabledEvent\"] = reflect.TypeOf((*DasDisabledEvent)(nil)).Elem()\n}\n\ntype DasEnabledEvent struct {\n\tClusterEvent\n}\n\nfunc init() {\n\tt[\"DasEnabledEvent\"] = reflect.TypeOf((*DasEnabledEvent)(nil)).Elem()\n}\n\ntype DasHeartbeatDatastoreInfo struct {\n\tDynamicData\n\n\tDatastore ManagedObjectReference   `xml:\"datastore\"`\n\tHosts     []ManagedObjectReference `xml:\"hosts\"`\n}\n\nfunc init() {\n\tt[\"DasHeartbeatDatastoreInfo\"] = reflect.TypeOf((*DasHeartbeatDatastoreInfo)(nil)).Elem()\n}\n\ntype DasHostFailedEvent struct {\n\tClusterEvent\n\n\tFailedHost HostEventArgument `xml:\"failedHost\"`\n}\n\nfunc init() {\n\tt[\"DasHostFailedEvent\"] = reflect.TypeOf((*DasHostFailedEvent)(nil)).Elem()\n}\n\ntype DasHostIsolatedEvent struct {\n\tClusterEvent\n\n\tIsolatedHost HostEventArgument `xml:\"isolatedHost\"`\n}\n\nfunc init() {\n\tt[\"DasHostIsolatedEvent\"] = reflect.TypeOf((*DasHostIsolatedEvent)(nil)).Elem()\n}\n\ntype DatabaseError struct {\n\tRuntimeFault\n}\n\nfunc init() {\n\tt[\"DatabaseError\"] = reflect.TypeOf((*DatabaseError)(nil)).Elem()\n}\n\ntype DatabaseErrorFault DatabaseError\n\nfunc init() {\n\tt[\"DatabaseErrorFault\"] = reflect.TypeOf((*DatabaseErrorFault)(nil)).Elem()\n}\n\ntype DatabaseSizeEstimate struct {\n\tDynamicData\n\n\tSize int64 `xml:\"size\"`\n}\n\nfunc init() {\n\tt[\"DatabaseSizeEstimate\"] = reflect.TypeOf((*DatabaseSizeEstimate)(nil)).Elem()\n}\n\ntype DatabaseSizeParam struct {\n\tDynamicData\n\n\tInventoryDesc InventoryDescription              `xml:\"inventoryDesc\"`\n\tPerfStatsDesc *PerformanceStatisticsDescription `xml:\"perfStatsDesc,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DatabaseSizeParam\"] = reflect.TypeOf((*DatabaseSizeParam)(nil)).Elem()\n}\n\ntype DatacenterConfigInfo struct {\n\tDynamicData\n\n\tDefaultHardwareVersionKey string `xml:\"defaultHardwareVersionKey,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DatacenterConfigInfo\"] = reflect.TypeOf((*DatacenterConfigInfo)(nil)).Elem()\n}\n\ntype DatacenterConfigSpec struct {\n\tDynamicData\n\n\tDefaultHardwareVersionKey string `xml:\"defaultHardwareVersionKey,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DatacenterConfigSpec\"] = reflect.TypeOf((*DatacenterConfigSpec)(nil)).Elem()\n}\n\ntype DatacenterCreatedEvent struct {\n\tDatacenterEvent\n\n\tParent FolderEventArgument `xml:\"parent\"`\n}\n\nfunc init() {\n\tt[\"DatacenterCreatedEvent\"] = reflect.TypeOf((*DatacenterCreatedEvent)(nil)).Elem()\n}\n\ntype DatacenterEvent struct {\n\tEvent\n}\n\nfunc init() {\n\tt[\"DatacenterEvent\"] = reflect.TypeOf((*DatacenterEvent)(nil)).Elem()\n}\n\ntype DatacenterEventArgument struct {\n\tEntityEventArgument\n\n\tDatacenter ManagedObjectReference `xml:\"datacenter\"`\n}\n\nfunc init() {\n\tt[\"DatacenterEventArgument\"] = reflect.TypeOf((*DatacenterEventArgument)(nil)).Elem()\n}\n\ntype DatacenterMismatch struct {\n\tMigrationFault\n\n\tInvalidArgument    []DatacenterMismatchArgument `xml:\"invalidArgument\"`\n\tExpectedDatacenter ManagedObjectReference       `xml:\"expectedDatacenter\"`\n}\n\nfunc init() {\n\tt[\"DatacenterMismatch\"] = reflect.TypeOf((*DatacenterMismatch)(nil)).Elem()\n}\n\ntype DatacenterMismatchArgument struct {\n\tDynamicData\n\n\tEntity          ManagedObjectReference  `xml:\"entity\"`\n\tInputDatacenter *ManagedObjectReference `xml:\"inputDatacenter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DatacenterMismatchArgument\"] = reflect.TypeOf((*DatacenterMismatchArgument)(nil)).Elem()\n}\n\ntype DatacenterMismatchFault DatacenterMismatch\n\nfunc init() {\n\tt[\"DatacenterMismatchFault\"] = reflect.TypeOf((*DatacenterMismatchFault)(nil)).Elem()\n}\n\ntype DatacenterRenamedEvent struct {\n\tDatacenterEvent\n\n\tOldName string `xml:\"oldName\"`\n\tNewName string `xml:\"newName\"`\n}\n\nfunc init() {\n\tt[\"DatacenterRenamedEvent\"] = reflect.TypeOf((*DatacenterRenamedEvent)(nil)).Elem()\n}\n\ntype DatastoreCapability struct {\n\tDynamicData\n\n\tDirectoryHierarchySupported      bool  `xml:\"directoryHierarchySupported\"`\n\tRawDiskMappingsSupported         bool  `xml:\"rawDiskMappingsSupported\"`\n\tPerFileThinProvisioningSupported bool  `xml:\"perFileThinProvisioningSupported\"`\n\tStorageIORMSupported             *bool `xml:\"storageIORMSupported\"`\n\tNativeSnapshotSupported          *bool `xml:\"nativeSnapshotSupported\"`\n\tTopLevelDirectoryCreateSupported *bool `xml:\"topLevelDirectoryCreateSupported\"`\n\tSeSparseSupported                *bool `xml:\"seSparseSupported\"`\n\tVmfsSparseSupported              *bool `xml:\"vmfsSparseSupported\"`\n\tVsanSparseSupported              *bool `xml:\"vsanSparseSupported\"`\n\tUpitSupported                    *bool `xml:\"upitSupported\"`\n}\n\nfunc init() {\n\tt[\"DatastoreCapability\"] = reflect.TypeOf((*DatastoreCapability)(nil)).Elem()\n}\n\ntype DatastoreCapacityIncreasedEvent struct {\n\tDatastoreEvent\n\n\tOldCapacity int64 `xml:\"oldCapacity\"`\n\tNewCapacity int64 `xml:\"newCapacity\"`\n}\n\nfunc init() {\n\tt[\"DatastoreCapacityIncreasedEvent\"] = reflect.TypeOf((*DatastoreCapacityIncreasedEvent)(nil)).Elem()\n}\n\ntype DatastoreDestroyedEvent struct {\n\tDatastoreEvent\n}\n\nfunc init() {\n\tt[\"DatastoreDestroyedEvent\"] = reflect.TypeOf((*DatastoreDestroyedEvent)(nil)).Elem()\n}\n\ntype DatastoreDiscoveredEvent struct {\n\tHostEvent\n\n\tDatastore DatastoreEventArgument `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"DatastoreDiscoveredEvent\"] = reflect.TypeOf((*DatastoreDiscoveredEvent)(nil)).Elem()\n}\n\ntype DatastoreDuplicatedEvent struct {\n\tDatastoreEvent\n}\n\nfunc init() {\n\tt[\"DatastoreDuplicatedEvent\"] = reflect.TypeOf((*DatastoreDuplicatedEvent)(nil)).Elem()\n}\n\ntype DatastoreEnterMaintenanceMode DatastoreEnterMaintenanceModeRequestType\n\nfunc init() {\n\tt[\"DatastoreEnterMaintenanceMode\"] = reflect.TypeOf((*DatastoreEnterMaintenanceMode)(nil)).Elem()\n}\n\ntype DatastoreEnterMaintenanceModeRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"DatastoreEnterMaintenanceModeRequestType\"] = reflect.TypeOf((*DatastoreEnterMaintenanceModeRequestType)(nil)).Elem()\n}\n\ntype DatastoreEnterMaintenanceModeResponse struct {\n\tReturnval StoragePlacementResult `xml:\"returnval\"`\n}\n\ntype DatastoreEvent struct {\n\tEvent\n\n\tDatastore *DatastoreEventArgument `xml:\"datastore,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DatastoreEvent\"] = reflect.TypeOf((*DatastoreEvent)(nil)).Elem()\n}\n\ntype DatastoreEventArgument struct {\n\tEntityEventArgument\n\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"DatastoreEventArgument\"] = reflect.TypeOf((*DatastoreEventArgument)(nil)).Elem()\n}\n\ntype DatastoreExitMaintenanceModeRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"DatastoreExitMaintenanceModeRequestType\"] = reflect.TypeOf((*DatastoreExitMaintenanceModeRequestType)(nil)).Elem()\n}\n\ntype DatastoreExitMaintenanceMode_Task DatastoreExitMaintenanceModeRequestType\n\nfunc init() {\n\tt[\"DatastoreExitMaintenanceMode_Task\"] = reflect.TypeOf((*DatastoreExitMaintenanceMode_Task)(nil)).Elem()\n}\n\ntype DatastoreExitMaintenanceMode_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype DatastoreFileCopiedEvent struct {\n\tDatastoreFileEvent\n\n\tSourceDatastore DatastoreEventArgument `xml:\"sourceDatastore\"`\n\tSourceFile      string                 `xml:\"sourceFile\"`\n}\n\nfunc init() {\n\tt[\"DatastoreFileCopiedEvent\"] = reflect.TypeOf((*DatastoreFileCopiedEvent)(nil)).Elem()\n}\n\ntype DatastoreFileDeletedEvent struct {\n\tDatastoreFileEvent\n}\n\nfunc init() {\n\tt[\"DatastoreFileDeletedEvent\"] = reflect.TypeOf((*DatastoreFileDeletedEvent)(nil)).Elem()\n}\n\ntype DatastoreFileEvent struct {\n\tDatastoreEvent\n\n\tTargetFile        string `xml:\"targetFile\"`\n\tSourceOfOperation string `xml:\"sourceOfOperation,omitempty\"`\n\tSucceeded         *bool  `xml:\"succeeded\"`\n}\n\nfunc init() {\n\tt[\"DatastoreFileEvent\"] = reflect.TypeOf((*DatastoreFileEvent)(nil)).Elem()\n}\n\ntype DatastoreFileMovedEvent struct {\n\tDatastoreFileEvent\n\n\tSourceDatastore DatastoreEventArgument `xml:\"sourceDatastore\"`\n\tSourceFile      string                 `xml:\"sourceFile\"`\n}\n\nfunc init() {\n\tt[\"DatastoreFileMovedEvent\"] = reflect.TypeOf((*DatastoreFileMovedEvent)(nil)).Elem()\n}\n\ntype DatastoreHostMount struct {\n\tDynamicData\n\n\tKey       ManagedObjectReference `xml:\"key\"`\n\tMountInfo HostMountInfo          `xml:\"mountInfo\"`\n}\n\nfunc init() {\n\tt[\"DatastoreHostMount\"] = reflect.TypeOf((*DatastoreHostMount)(nil)).Elem()\n}\n\ntype DatastoreIORMReconfiguredEvent struct {\n\tDatastoreEvent\n}\n\nfunc init() {\n\tt[\"DatastoreIORMReconfiguredEvent\"] = reflect.TypeOf((*DatastoreIORMReconfiguredEvent)(nil)).Elem()\n}\n\ntype DatastoreInfo struct {\n\tDynamicData\n\n\tName                   string     `xml:\"name\"`\n\tUrl                    string     `xml:\"url\"`\n\tFreeSpace              int64      `xml:\"freeSpace\"`\n\tMaxFileSize            int64      `xml:\"maxFileSize\"`\n\tMaxVirtualDiskCapacity int64      `xml:\"maxVirtualDiskCapacity,omitempty\"`\n\tMaxMemoryFileSize      int64      `xml:\"maxMemoryFileSize,omitempty\"`\n\tTimestamp              *time.Time `xml:\"timestamp\"`\n\tContainerId            string     `xml:\"containerId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DatastoreInfo\"] = reflect.TypeOf((*DatastoreInfo)(nil)).Elem()\n}\n\ntype DatastoreMountPathDatastorePair struct {\n\tDynamicData\n\n\tOldMountPath string                 `xml:\"oldMountPath\"`\n\tDatastore    ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"DatastoreMountPathDatastorePair\"] = reflect.TypeOf((*DatastoreMountPathDatastorePair)(nil)).Elem()\n}\n\ntype DatastoreNotWritableOnHost struct {\n\tInvalidDatastore\n\n\tHost ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"DatastoreNotWritableOnHost\"] = reflect.TypeOf((*DatastoreNotWritableOnHost)(nil)).Elem()\n}\n\ntype DatastoreNotWritableOnHostFault BaseDatastoreNotWritableOnHost\n\nfunc init() {\n\tt[\"DatastoreNotWritableOnHostFault\"] = reflect.TypeOf((*DatastoreNotWritableOnHostFault)(nil)).Elem()\n}\n\ntype DatastoreOption struct {\n\tDynamicData\n\n\tUnsupportedVolumes []VirtualMachineDatastoreVolumeOption `xml:\"unsupportedVolumes,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DatastoreOption\"] = reflect.TypeOf((*DatastoreOption)(nil)).Elem()\n}\n\ntype DatastorePrincipalConfigured struct {\n\tHostEvent\n\n\tDatastorePrincipal string `xml:\"datastorePrincipal\"`\n}\n\nfunc init() {\n\tt[\"DatastorePrincipalConfigured\"] = reflect.TypeOf((*DatastorePrincipalConfigured)(nil)).Elem()\n}\n\ntype DatastoreRemovedOnHostEvent struct {\n\tHostEvent\n\n\tDatastore DatastoreEventArgument `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"DatastoreRemovedOnHostEvent\"] = reflect.TypeOf((*DatastoreRemovedOnHostEvent)(nil)).Elem()\n}\n\ntype DatastoreRenamedEvent struct {\n\tDatastoreEvent\n\n\tOldName string `xml:\"oldName\"`\n\tNewName string `xml:\"newName\"`\n}\n\nfunc init() {\n\tt[\"DatastoreRenamedEvent\"] = reflect.TypeOf((*DatastoreRenamedEvent)(nil)).Elem()\n}\n\ntype DatastoreRenamedOnHostEvent struct {\n\tHostEvent\n\n\tOldName string `xml:\"oldName\"`\n\tNewName string `xml:\"newName\"`\n}\n\nfunc init() {\n\tt[\"DatastoreRenamedOnHostEvent\"] = reflect.TypeOf((*DatastoreRenamedOnHostEvent)(nil)).Elem()\n}\n\ntype DatastoreSummary struct {\n\tDynamicData\n\n\tDatastore          *ManagedObjectReference `xml:\"datastore,omitempty\"`\n\tName               string                  `xml:\"name\"`\n\tUrl                string                  `xml:\"url\"`\n\tCapacity           int64                   `xml:\"capacity\"`\n\tFreeSpace          int64                   `xml:\"freeSpace\"`\n\tUncommitted        int64                   `xml:\"uncommitted,omitempty\"`\n\tAccessible         bool                    `xml:\"accessible\"`\n\tMultipleHostAccess *bool                   `xml:\"multipleHostAccess\"`\n\tType               string                  `xml:\"type\"`\n\tMaintenanceMode    string                  `xml:\"maintenanceMode,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DatastoreSummary\"] = reflect.TypeOf((*DatastoreSummary)(nil)).Elem()\n}\n\ntype DatastoreVVolContainerFailoverPair struct {\n\tDynamicData\n\n\tSrcContainer string     `xml:\"srcContainer,omitempty\"`\n\tTgtContainer string     `xml:\"tgtContainer\"`\n\tVvolMapping  []KeyValue `xml:\"vvolMapping,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DatastoreVVolContainerFailoverPair\"] = reflect.TypeOf((*DatastoreVVolContainerFailoverPair)(nil)).Elem()\n}\n\ntype DateTimeProfile struct {\n\tApplyProfile\n}\n\nfunc init() {\n\tt[\"DateTimeProfile\"] = reflect.TypeOf((*DateTimeProfile)(nil)).Elem()\n}\n\ntype DecodeLicense DecodeLicenseRequestType\n\nfunc init() {\n\tt[\"DecodeLicense\"] = reflect.TypeOf((*DecodeLicense)(nil)).Elem()\n}\n\ntype DecodeLicenseRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tLicenseKey string                 `xml:\"licenseKey\"`\n}\n\nfunc init() {\n\tt[\"DecodeLicenseRequestType\"] = reflect.TypeOf((*DecodeLicenseRequestType)(nil)).Elem()\n}\n\ntype DecodeLicenseResponse struct {\n\tReturnval LicenseManagerLicenseInfo `xml:\"returnval\"`\n}\n\ntype DefragmentAllDisks DefragmentAllDisksRequestType\n\nfunc init() {\n\tt[\"DefragmentAllDisks\"] = reflect.TypeOf((*DefragmentAllDisks)(nil)).Elem()\n}\n\ntype DefragmentAllDisksRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"DefragmentAllDisksRequestType\"] = reflect.TypeOf((*DefragmentAllDisksRequestType)(nil)).Elem()\n}\n\ntype DefragmentAllDisksResponse struct {\n}\n\ntype DefragmentVirtualDiskRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tName       string                  `xml:\"name\"`\n\tDatacenter *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DefragmentVirtualDiskRequestType\"] = reflect.TypeOf((*DefragmentVirtualDiskRequestType)(nil)).Elem()\n}\n\ntype DefragmentVirtualDisk_Task DefragmentVirtualDiskRequestType\n\nfunc init() {\n\tt[\"DefragmentVirtualDisk_Task\"] = reflect.TypeOf((*DefragmentVirtualDisk_Task)(nil)).Elem()\n}\n\ntype DefragmentVirtualDisk_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype DeleteCustomizationSpec DeleteCustomizationSpecRequestType\n\nfunc init() {\n\tt[\"DeleteCustomizationSpec\"] = reflect.TypeOf((*DeleteCustomizationSpec)(nil)).Elem()\n}\n\ntype DeleteCustomizationSpecRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tName string                 `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"DeleteCustomizationSpecRequestType\"] = reflect.TypeOf((*DeleteCustomizationSpecRequestType)(nil)).Elem()\n}\n\ntype DeleteCustomizationSpecResponse struct {\n}\n\ntype DeleteDatastoreFileRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tName       string                  `xml:\"name\"`\n\tDatacenter *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DeleteDatastoreFileRequestType\"] = reflect.TypeOf((*DeleteDatastoreFileRequestType)(nil)).Elem()\n}\n\ntype DeleteDatastoreFile_Task DeleteDatastoreFileRequestType\n\nfunc init() {\n\tt[\"DeleteDatastoreFile_Task\"] = reflect.TypeOf((*DeleteDatastoreFile_Task)(nil)).Elem()\n}\n\ntype DeleteDatastoreFile_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype DeleteDirectory DeleteDirectoryRequestType\n\nfunc init() {\n\tt[\"DeleteDirectory\"] = reflect.TypeOf((*DeleteDirectory)(nil)).Elem()\n}\n\ntype DeleteDirectoryInGuest DeleteDirectoryInGuestRequestType\n\nfunc init() {\n\tt[\"DeleteDirectoryInGuest\"] = reflect.TypeOf((*DeleteDirectoryInGuest)(nil)).Elem()\n}\n\ntype DeleteDirectoryInGuestRequestType struct {\n\tThis          ManagedObjectReference  `xml:\"_this\"`\n\tVm            ManagedObjectReference  `xml:\"vm\"`\n\tAuth          BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tDirectoryPath string                  `xml:\"directoryPath\"`\n\tRecursive     bool                    `xml:\"recursive\"`\n}\n\nfunc init() {\n\tt[\"DeleteDirectoryInGuestRequestType\"] = reflect.TypeOf((*DeleteDirectoryInGuestRequestType)(nil)).Elem()\n}\n\ntype DeleteDirectoryInGuestResponse struct {\n}\n\ntype DeleteDirectoryRequestType struct {\n\tThis          ManagedObjectReference  `xml:\"_this\"`\n\tDatacenter    *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n\tDatastorePath string                  `xml:\"datastorePath\"`\n}\n\nfunc init() {\n\tt[\"DeleteDirectoryRequestType\"] = reflect.TypeOf((*DeleteDirectoryRequestType)(nil)).Elem()\n}\n\ntype DeleteDirectoryResponse struct {\n}\n\ntype DeleteFile DeleteFileRequestType\n\nfunc init() {\n\tt[\"DeleteFile\"] = reflect.TypeOf((*DeleteFile)(nil)).Elem()\n}\n\ntype DeleteFileInGuest DeleteFileInGuestRequestType\n\nfunc init() {\n\tt[\"DeleteFileInGuest\"] = reflect.TypeOf((*DeleteFileInGuest)(nil)).Elem()\n}\n\ntype DeleteFileInGuestRequestType struct {\n\tThis     ManagedObjectReference  `xml:\"_this\"`\n\tVm       ManagedObjectReference  `xml:\"vm\"`\n\tAuth     BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tFilePath string                  `xml:\"filePath\"`\n}\n\nfunc init() {\n\tt[\"DeleteFileInGuestRequestType\"] = reflect.TypeOf((*DeleteFileInGuestRequestType)(nil)).Elem()\n}\n\ntype DeleteFileInGuestResponse struct {\n}\n\ntype DeleteFileRequestType struct {\n\tThis          ManagedObjectReference `xml:\"_this\"`\n\tDatastorePath string                 `xml:\"datastorePath\"`\n}\n\nfunc init() {\n\tt[\"DeleteFileRequestType\"] = reflect.TypeOf((*DeleteFileRequestType)(nil)).Elem()\n}\n\ntype DeleteFileResponse struct {\n}\n\ntype DeleteHostSpecification DeleteHostSpecificationRequestType\n\nfunc init() {\n\tt[\"DeleteHostSpecification\"] = reflect.TypeOf((*DeleteHostSpecification)(nil)).Elem()\n}\n\ntype DeleteHostSpecificationRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tHost ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"DeleteHostSpecificationRequestType\"] = reflect.TypeOf((*DeleteHostSpecificationRequestType)(nil)).Elem()\n}\n\ntype DeleteHostSpecificationResponse struct {\n}\n\ntype DeleteHostSubSpecification DeleteHostSubSpecificationRequestType\n\nfunc init() {\n\tt[\"DeleteHostSubSpecification\"] = reflect.TypeOf((*DeleteHostSubSpecification)(nil)).Elem()\n}\n\ntype DeleteHostSubSpecificationRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tHost        ManagedObjectReference `xml:\"host\"`\n\tSubSpecName string                 `xml:\"subSpecName\"`\n}\n\nfunc init() {\n\tt[\"DeleteHostSubSpecificationRequestType\"] = reflect.TypeOf((*DeleteHostSubSpecificationRequestType)(nil)).Elem()\n}\n\ntype DeleteHostSubSpecificationResponse struct {\n}\n\ntype DeleteRegistryKeyInGuest DeleteRegistryKeyInGuestRequestType\n\nfunc init() {\n\tt[\"DeleteRegistryKeyInGuest\"] = reflect.TypeOf((*DeleteRegistryKeyInGuest)(nil)).Elem()\n}\n\ntype DeleteRegistryKeyInGuestRequestType struct {\n\tThis      ManagedObjectReference  `xml:\"_this\"`\n\tVm        ManagedObjectReference  `xml:\"vm\"`\n\tAuth      BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tKeyName   GuestRegKeyNameSpec     `xml:\"keyName\"`\n\tRecursive bool                    `xml:\"recursive\"`\n}\n\nfunc init() {\n\tt[\"DeleteRegistryKeyInGuestRequestType\"] = reflect.TypeOf((*DeleteRegistryKeyInGuestRequestType)(nil)).Elem()\n}\n\ntype DeleteRegistryKeyInGuestResponse struct {\n}\n\ntype DeleteRegistryValueInGuest DeleteRegistryValueInGuestRequestType\n\nfunc init() {\n\tt[\"DeleteRegistryValueInGuest\"] = reflect.TypeOf((*DeleteRegistryValueInGuest)(nil)).Elem()\n}\n\ntype DeleteRegistryValueInGuestRequestType struct {\n\tThis      ManagedObjectReference  `xml:\"_this\"`\n\tVm        ManagedObjectReference  `xml:\"vm\"`\n\tAuth      BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tValueName GuestRegValueNameSpec   `xml:\"valueName\"`\n}\n\nfunc init() {\n\tt[\"DeleteRegistryValueInGuestRequestType\"] = reflect.TypeOf((*DeleteRegistryValueInGuestRequestType)(nil)).Elem()\n}\n\ntype DeleteRegistryValueInGuestResponse struct {\n}\n\ntype DeleteScsiLunState DeleteScsiLunStateRequestType\n\nfunc init() {\n\tt[\"DeleteScsiLunState\"] = reflect.TypeOf((*DeleteScsiLunState)(nil)).Elem()\n}\n\ntype DeleteScsiLunStateRequestType struct {\n\tThis             ManagedObjectReference `xml:\"_this\"`\n\tLunCanonicalName string                 `xml:\"lunCanonicalName\"`\n}\n\nfunc init() {\n\tt[\"DeleteScsiLunStateRequestType\"] = reflect.TypeOf((*DeleteScsiLunStateRequestType)(nil)).Elem()\n}\n\ntype DeleteScsiLunStateResponse struct {\n}\n\ntype DeleteVStorageObjectRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tId        ID                     `xml:\"id\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"DeleteVStorageObjectRequestType\"] = reflect.TypeOf((*DeleteVStorageObjectRequestType)(nil)).Elem()\n}\n\ntype DeleteVStorageObject_Task DeleteVStorageObjectRequestType\n\nfunc init() {\n\tt[\"DeleteVStorageObject_Task\"] = reflect.TypeOf((*DeleteVStorageObject_Task)(nil)).Elem()\n}\n\ntype DeleteVStorageObject_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype DeleteVffsVolumeState DeleteVffsVolumeStateRequestType\n\nfunc init() {\n\tt[\"DeleteVffsVolumeState\"] = reflect.TypeOf((*DeleteVffsVolumeState)(nil)).Elem()\n}\n\ntype DeleteVffsVolumeStateRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tVffsUuid string                 `xml:\"vffsUuid\"`\n}\n\nfunc init() {\n\tt[\"DeleteVffsVolumeStateRequestType\"] = reflect.TypeOf((*DeleteVffsVolumeStateRequestType)(nil)).Elem()\n}\n\ntype DeleteVffsVolumeStateResponse struct {\n}\n\ntype DeleteVirtualDiskRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tName       string                  `xml:\"name\"`\n\tDatacenter *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DeleteVirtualDiskRequestType\"] = reflect.TypeOf((*DeleteVirtualDiskRequestType)(nil)).Elem()\n}\n\ntype DeleteVirtualDisk_Task DeleteVirtualDiskRequestType\n\nfunc init() {\n\tt[\"DeleteVirtualDisk_Task\"] = reflect.TypeOf((*DeleteVirtualDisk_Task)(nil)).Elem()\n}\n\ntype DeleteVirtualDisk_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype DeleteVmfsVolumeState DeleteVmfsVolumeStateRequestType\n\nfunc init() {\n\tt[\"DeleteVmfsVolumeState\"] = reflect.TypeOf((*DeleteVmfsVolumeState)(nil)).Elem()\n}\n\ntype DeleteVmfsVolumeStateRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tVmfsUuid string                 `xml:\"vmfsUuid\"`\n}\n\nfunc init() {\n\tt[\"DeleteVmfsVolumeStateRequestType\"] = reflect.TypeOf((*DeleteVmfsVolumeStateRequestType)(nil)).Elem()\n}\n\ntype DeleteVmfsVolumeStateResponse struct {\n}\n\ntype DeleteVsanObjects DeleteVsanObjectsRequestType\n\nfunc init() {\n\tt[\"DeleteVsanObjects\"] = reflect.TypeOf((*DeleteVsanObjects)(nil)).Elem()\n}\n\ntype DeleteVsanObjectsRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tUuids []string               `xml:\"uuids\"`\n\tForce *bool                  `xml:\"force\"`\n}\n\nfunc init() {\n\tt[\"DeleteVsanObjectsRequestType\"] = reflect.TypeOf((*DeleteVsanObjectsRequestType)(nil)).Elem()\n}\n\ntype DeleteVsanObjectsResponse struct {\n\tReturnval []HostVsanInternalSystemDeleteVsanObjectsResult `xml:\"returnval\"`\n}\n\ntype DeltaDiskFormatNotSupported struct {\n\tVmConfigFault\n\n\tDatastore       []ManagedObjectReference `xml:\"datastore,omitempty\"`\n\tDeltaDiskFormat string                   `xml:\"deltaDiskFormat\"`\n}\n\nfunc init() {\n\tt[\"DeltaDiskFormatNotSupported\"] = reflect.TypeOf((*DeltaDiskFormatNotSupported)(nil)).Elem()\n}\n\ntype DeltaDiskFormatNotSupportedFault DeltaDiskFormatNotSupported\n\nfunc init() {\n\tt[\"DeltaDiskFormatNotSupportedFault\"] = reflect.TypeOf((*DeltaDiskFormatNotSupportedFault)(nil)).Elem()\n}\n\ntype Description struct {\n\tDynamicData\n\n\tLabel   string `xml:\"label\"`\n\tSummary string `xml:\"summary\"`\n}\n\nfunc init() {\n\tt[\"Description\"] = reflect.TypeOf((*Description)(nil)).Elem()\n}\n\ntype DeselectVnic DeselectVnicRequestType\n\nfunc init() {\n\tt[\"DeselectVnic\"] = reflect.TypeOf((*DeselectVnic)(nil)).Elem()\n}\n\ntype DeselectVnicForNicType DeselectVnicForNicTypeRequestType\n\nfunc init() {\n\tt[\"DeselectVnicForNicType\"] = reflect.TypeOf((*DeselectVnicForNicType)(nil)).Elem()\n}\n\ntype DeselectVnicForNicTypeRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tNicType string                 `xml:\"nicType\"`\n\tDevice  string                 `xml:\"device\"`\n}\n\nfunc init() {\n\tt[\"DeselectVnicForNicTypeRequestType\"] = reflect.TypeOf((*DeselectVnicForNicTypeRequestType)(nil)).Elem()\n}\n\ntype DeselectVnicForNicTypeResponse struct {\n}\n\ntype DeselectVnicRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"DeselectVnicRequestType\"] = reflect.TypeOf((*DeselectVnicRequestType)(nil)).Elem()\n}\n\ntype DeselectVnicResponse struct {\n}\n\ntype DestinationSwitchFull struct {\n\tCannotAccessNetwork\n}\n\nfunc init() {\n\tt[\"DestinationSwitchFull\"] = reflect.TypeOf((*DestinationSwitchFull)(nil)).Elem()\n}\n\ntype DestinationSwitchFullFault DestinationSwitchFull\n\nfunc init() {\n\tt[\"DestinationSwitchFullFault\"] = reflect.TypeOf((*DestinationSwitchFullFault)(nil)).Elem()\n}\n\ntype DestinationVsanDisabled struct {\n\tCannotMoveVsanEnabledHost\n\n\tDestinationCluster string `xml:\"destinationCluster\"`\n}\n\nfunc init() {\n\tt[\"DestinationVsanDisabled\"] = reflect.TypeOf((*DestinationVsanDisabled)(nil)).Elem()\n}\n\ntype DestinationVsanDisabledFault DestinationVsanDisabled\n\nfunc init() {\n\tt[\"DestinationVsanDisabledFault\"] = reflect.TypeOf((*DestinationVsanDisabledFault)(nil)).Elem()\n}\n\ntype DestroyChildren DestroyChildrenRequestType\n\nfunc init() {\n\tt[\"DestroyChildren\"] = reflect.TypeOf((*DestroyChildren)(nil)).Elem()\n}\n\ntype DestroyChildrenRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"DestroyChildrenRequestType\"] = reflect.TypeOf((*DestroyChildrenRequestType)(nil)).Elem()\n}\n\ntype DestroyChildrenResponse struct {\n}\n\ntype DestroyCollector DestroyCollectorRequestType\n\nfunc init() {\n\tt[\"DestroyCollector\"] = reflect.TypeOf((*DestroyCollector)(nil)).Elem()\n}\n\ntype DestroyCollectorRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"DestroyCollectorRequestType\"] = reflect.TypeOf((*DestroyCollectorRequestType)(nil)).Elem()\n}\n\ntype DestroyCollectorResponse struct {\n}\n\ntype DestroyDatastore DestroyDatastoreRequestType\n\nfunc init() {\n\tt[\"DestroyDatastore\"] = reflect.TypeOf((*DestroyDatastore)(nil)).Elem()\n}\n\ntype DestroyDatastoreRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"DestroyDatastoreRequestType\"] = reflect.TypeOf((*DestroyDatastoreRequestType)(nil)).Elem()\n}\n\ntype DestroyDatastoreResponse struct {\n}\n\ntype DestroyIpPool DestroyIpPoolRequestType\n\nfunc init() {\n\tt[\"DestroyIpPool\"] = reflect.TypeOf((*DestroyIpPool)(nil)).Elem()\n}\n\ntype DestroyIpPoolRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tDc    ManagedObjectReference `xml:\"dc\"`\n\tId    int32                  `xml:\"id\"`\n\tForce bool                   `xml:\"force\"`\n}\n\nfunc init() {\n\tt[\"DestroyIpPoolRequestType\"] = reflect.TypeOf((*DestroyIpPoolRequestType)(nil)).Elem()\n}\n\ntype DestroyIpPoolResponse struct {\n}\n\ntype DestroyNetwork DestroyNetworkRequestType\n\nfunc init() {\n\tt[\"DestroyNetwork\"] = reflect.TypeOf((*DestroyNetwork)(nil)).Elem()\n}\n\ntype DestroyNetworkRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"DestroyNetworkRequestType\"] = reflect.TypeOf((*DestroyNetworkRequestType)(nil)).Elem()\n}\n\ntype DestroyNetworkResponse struct {\n}\n\ntype DestroyProfile DestroyProfileRequestType\n\nfunc init() {\n\tt[\"DestroyProfile\"] = reflect.TypeOf((*DestroyProfile)(nil)).Elem()\n}\n\ntype DestroyProfileRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"DestroyProfileRequestType\"] = reflect.TypeOf((*DestroyProfileRequestType)(nil)).Elem()\n}\n\ntype DestroyProfileResponse struct {\n}\n\ntype DestroyPropertyCollector DestroyPropertyCollectorRequestType\n\nfunc init() {\n\tt[\"DestroyPropertyCollector\"] = reflect.TypeOf((*DestroyPropertyCollector)(nil)).Elem()\n}\n\ntype DestroyPropertyCollectorRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"DestroyPropertyCollectorRequestType\"] = reflect.TypeOf((*DestroyPropertyCollectorRequestType)(nil)).Elem()\n}\n\ntype DestroyPropertyCollectorResponse struct {\n}\n\ntype DestroyPropertyFilter DestroyPropertyFilterRequestType\n\nfunc init() {\n\tt[\"DestroyPropertyFilter\"] = reflect.TypeOf((*DestroyPropertyFilter)(nil)).Elem()\n}\n\ntype DestroyPropertyFilterRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"DestroyPropertyFilterRequestType\"] = reflect.TypeOf((*DestroyPropertyFilterRequestType)(nil)).Elem()\n}\n\ntype DestroyPropertyFilterResponse struct {\n}\n\ntype DestroyRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"DestroyRequestType\"] = reflect.TypeOf((*DestroyRequestType)(nil)).Elem()\n}\n\ntype DestroyVffs DestroyVffsRequestType\n\nfunc init() {\n\tt[\"DestroyVffs\"] = reflect.TypeOf((*DestroyVffs)(nil)).Elem()\n}\n\ntype DestroyVffsRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tVffsPath string                 `xml:\"vffsPath\"`\n}\n\nfunc init() {\n\tt[\"DestroyVffsRequestType\"] = reflect.TypeOf((*DestroyVffsRequestType)(nil)).Elem()\n}\n\ntype DestroyVffsResponse struct {\n}\n\ntype DestroyView DestroyViewRequestType\n\nfunc init() {\n\tt[\"DestroyView\"] = reflect.TypeOf((*DestroyView)(nil)).Elem()\n}\n\ntype DestroyViewRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"DestroyViewRequestType\"] = reflect.TypeOf((*DestroyViewRequestType)(nil)).Elem()\n}\n\ntype DestroyViewResponse struct {\n}\n\ntype Destroy_Task DestroyRequestType\n\nfunc init() {\n\tt[\"Destroy_Task\"] = reflect.TypeOf((*Destroy_Task)(nil)).Elem()\n}\n\ntype Destroy_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype DetachDiskRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tDiskId ID                     `xml:\"diskId\"`\n}\n\nfunc init() {\n\tt[\"DetachDiskRequestType\"] = reflect.TypeOf((*DetachDiskRequestType)(nil)).Elem()\n}\n\ntype DetachDisk_Task DetachDiskRequestType\n\nfunc init() {\n\tt[\"DetachDisk_Task\"] = reflect.TypeOf((*DetachDisk_Task)(nil)).Elem()\n}\n\ntype DetachDisk_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype DetachScsiLun DetachScsiLunRequestType\n\nfunc init() {\n\tt[\"DetachScsiLun\"] = reflect.TypeOf((*DetachScsiLun)(nil)).Elem()\n}\n\ntype DetachScsiLunExRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tLunUuid []string               `xml:\"lunUuid\"`\n}\n\nfunc init() {\n\tt[\"DetachScsiLunExRequestType\"] = reflect.TypeOf((*DetachScsiLunExRequestType)(nil)).Elem()\n}\n\ntype DetachScsiLunEx_Task DetachScsiLunExRequestType\n\nfunc init() {\n\tt[\"DetachScsiLunEx_Task\"] = reflect.TypeOf((*DetachScsiLunEx_Task)(nil)).Elem()\n}\n\ntype DetachScsiLunEx_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype DetachScsiLunRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tLunUuid string                 `xml:\"lunUuid\"`\n}\n\nfunc init() {\n\tt[\"DetachScsiLunRequestType\"] = reflect.TypeOf((*DetachScsiLunRequestType)(nil)).Elem()\n}\n\ntype DetachScsiLunResponse struct {\n}\n\ntype DetachTagFromVStorageObject DetachTagFromVStorageObjectRequestType\n\nfunc init() {\n\tt[\"DetachTagFromVStorageObject\"] = reflect.TypeOf((*DetachTagFromVStorageObject)(nil)).Elem()\n}\n\ntype DetachTagFromVStorageObjectRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tId       ID                     `xml:\"id\"`\n\tCategory string                 `xml:\"category\"`\n\tTag      string                 `xml:\"tag\"`\n}\n\nfunc init() {\n\tt[\"DetachTagFromVStorageObjectRequestType\"] = reflect.TypeOf((*DetachTagFromVStorageObjectRequestType)(nil)).Elem()\n}\n\ntype DetachTagFromVStorageObjectResponse struct {\n}\n\ntype DeviceBackedVirtualDiskSpec struct {\n\tVirtualDiskSpec\n\n\tDevice string `xml:\"device\"`\n}\n\nfunc init() {\n\tt[\"DeviceBackedVirtualDiskSpec\"] = reflect.TypeOf((*DeviceBackedVirtualDiskSpec)(nil)).Elem()\n}\n\ntype DeviceBackingNotSupported struct {\n\tDeviceNotSupported\n\n\tBacking string `xml:\"backing\"`\n}\n\nfunc init() {\n\tt[\"DeviceBackingNotSupported\"] = reflect.TypeOf((*DeviceBackingNotSupported)(nil)).Elem()\n}\n\ntype DeviceBackingNotSupportedFault BaseDeviceBackingNotSupported\n\nfunc init() {\n\tt[\"DeviceBackingNotSupportedFault\"] = reflect.TypeOf((*DeviceBackingNotSupportedFault)(nil)).Elem()\n}\n\ntype DeviceControllerNotSupported struct {\n\tDeviceNotSupported\n\n\tController string `xml:\"controller\"`\n}\n\nfunc init() {\n\tt[\"DeviceControllerNotSupported\"] = reflect.TypeOf((*DeviceControllerNotSupported)(nil)).Elem()\n}\n\ntype DeviceControllerNotSupportedFault DeviceControllerNotSupported\n\nfunc init() {\n\tt[\"DeviceControllerNotSupportedFault\"] = reflect.TypeOf((*DeviceControllerNotSupportedFault)(nil)).Elem()\n}\n\ntype DeviceGroupId struct {\n\tDynamicData\n\n\tId string `xml:\"id\"`\n}\n\nfunc init() {\n\tt[\"DeviceGroupId\"] = reflect.TypeOf((*DeviceGroupId)(nil)).Elem()\n}\n\ntype DeviceHotPlugNotSupported struct {\n\tInvalidDeviceSpec\n}\n\nfunc init() {\n\tt[\"DeviceHotPlugNotSupported\"] = reflect.TypeOf((*DeviceHotPlugNotSupported)(nil)).Elem()\n}\n\ntype DeviceHotPlugNotSupportedFault DeviceHotPlugNotSupported\n\nfunc init() {\n\tt[\"DeviceHotPlugNotSupportedFault\"] = reflect.TypeOf((*DeviceHotPlugNotSupportedFault)(nil)).Elem()\n}\n\ntype DeviceNotFound struct {\n\tInvalidDeviceSpec\n}\n\nfunc init() {\n\tt[\"DeviceNotFound\"] = reflect.TypeOf((*DeviceNotFound)(nil)).Elem()\n}\n\ntype DeviceNotFoundFault DeviceNotFound\n\nfunc init() {\n\tt[\"DeviceNotFoundFault\"] = reflect.TypeOf((*DeviceNotFoundFault)(nil)).Elem()\n}\n\ntype DeviceNotSupported struct {\n\tVirtualHardwareCompatibilityIssue\n\n\tDevice string `xml:\"device\"`\n\tReason string `xml:\"reason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DeviceNotSupported\"] = reflect.TypeOf((*DeviceNotSupported)(nil)).Elem()\n}\n\ntype DeviceNotSupportedFault BaseDeviceNotSupported\n\nfunc init() {\n\tt[\"DeviceNotSupportedFault\"] = reflect.TypeOf((*DeviceNotSupportedFault)(nil)).Elem()\n}\n\ntype DeviceUnsupportedForVmPlatform struct {\n\tInvalidDeviceSpec\n}\n\nfunc init() {\n\tt[\"DeviceUnsupportedForVmPlatform\"] = reflect.TypeOf((*DeviceUnsupportedForVmPlatform)(nil)).Elem()\n}\n\ntype DeviceUnsupportedForVmPlatformFault DeviceUnsupportedForVmPlatform\n\nfunc init() {\n\tt[\"DeviceUnsupportedForVmPlatformFault\"] = reflect.TypeOf((*DeviceUnsupportedForVmPlatformFault)(nil)).Elem()\n}\n\ntype DeviceUnsupportedForVmVersion struct {\n\tInvalidDeviceSpec\n\n\tCurrentVersion  string `xml:\"currentVersion\"`\n\tExpectedVersion string `xml:\"expectedVersion\"`\n}\n\nfunc init() {\n\tt[\"DeviceUnsupportedForVmVersion\"] = reflect.TypeOf((*DeviceUnsupportedForVmVersion)(nil)).Elem()\n}\n\ntype DeviceUnsupportedForVmVersionFault DeviceUnsupportedForVmVersion\n\nfunc init() {\n\tt[\"DeviceUnsupportedForVmVersionFault\"] = reflect.TypeOf((*DeviceUnsupportedForVmVersionFault)(nil)).Elem()\n}\n\ntype DiagnosticManagerBundleInfo struct {\n\tDynamicData\n\n\tSystem *ManagedObjectReference `xml:\"system,omitempty\"`\n\tUrl    string                  `xml:\"url\"`\n}\n\nfunc init() {\n\tt[\"DiagnosticManagerBundleInfo\"] = reflect.TypeOf((*DiagnosticManagerBundleInfo)(nil)).Elem()\n}\n\ntype DiagnosticManagerLogDescriptor struct {\n\tDynamicData\n\n\tKey      string          `xml:\"key\"`\n\tFileName string          `xml:\"fileName\"`\n\tCreator  string          `xml:\"creator\"`\n\tFormat   string          `xml:\"format\"`\n\tMimeType string          `xml:\"mimeType\"`\n\tInfo     BaseDescription `xml:\"info,typeattr\"`\n}\n\nfunc init() {\n\tt[\"DiagnosticManagerLogDescriptor\"] = reflect.TypeOf((*DiagnosticManagerLogDescriptor)(nil)).Elem()\n}\n\ntype DiagnosticManagerLogHeader struct {\n\tDynamicData\n\n\tLineStart int32    `xml:\"lineStart\"`\n\tLineEnd   int32    `xml:\"lineEnd\"`\n\tLineText  []string `xml:\"lineText,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DiagnosticManagerLogHeader\"] = reflect.TypeOf((*DiagnosticManagerLogHeader)(nil)).Elem()\n}\n\ntype DigestNotSupported struct {\n\tDeviceNotSupported\n}\n\nfunc init() {\n\tt[\"DigestNotSupported\"] = reflect.TypeOf((*DigestNotSupported)(nil)).Elem()\n}\n\ntype DigestNotSupportedFault DigestNotSupported\n\nfunc init() {\n\tt[\"DigestNotSupportedFault\"] = reflect.TypeOf((*DigestNotSupportedFault)(nil)).Elem()\n}\n\ntype DirectoryNotEmpty struct {\n\tFileFault\n}\n\nfunc init() {\n\tt[\"DirectoryNotEmpty\"] = reflect.TypeOf((*DirectoryNotEmpty)(nil)).Elem()\n}\n\ntype DirectoryNotEmptyFault DirectoryNotEmpty\n\nfunc init() {\n\tt[\"DirectoryNotEmptyFault\"] = reflect.TypeOf((*DirectoryNotEmptyFault)(nil)).Elem()\n}\n\ntype DisableAdminNotSupported struct {\n\tHostConfigFault\n}\n\nfunc init() {\n\tt[\"DisableAdminNotSupported\"] = reflect.TypeOf((*DisableAdminNotSupported)(nil)).Elem()\n}\n\ntype DisableAdminNotSupportedFault DisableAdminNotSupported\n\nfunc init() {\n\tt[\"DisableAdminNotSupportedFault\"] = reflect.TypeOf((*DisableAdminNotSupportedFault)(nil)).Elem()\n}\n\ntype DisableEvcModeRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"DisableEvcModeRequestType\"] = reflect.TypeOf((*DisableEvcModeRequestType)(nil)).Elem()\n}\n\ntype DisableEvcMode_Task DisableEvcModeRequestType\n\nfunc init() {\n\tt[\"DisableEvcMode_Task\"] = reflect.TypeOf((*DisableEvcMode_Task)(nil)).Elem()\n}\n\ntype DisableEvcMode_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype DisableFeature DisableFeatureRequestType\n\nfunc init() {\n\tt[\"DisableFeature\"] = reflect.TypeOf((*DisableFeature)(nil)).Elem()\n}\n\ntype DisableFeatureRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tHost       *ManagedObjectReference `xml:\"host,omitempty\"`\n\tFeatureKey string                  `xml:\"featureKey\"`\n}\n\nfunc init() {\n\tt[\"DisableFeatureRequestType\"] = reflect.TypeOf((*DisableFeatureRequestType)(nil)).Elem()\n}\n\ntype DisableFeatureResponse struct {\n\tReturnval bool `xml:\"returnval\"`\n}\n\ntype DisableHyperThreading DisableHyperThreadingRequestType\n\nfunc init() {\n\tt[\"DisableHyperThreading\"] = reflect.TypeOf((*DisableHyperThreading)(nil)).Elem()\n}\n\ntype DisableHyperThreadingRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"DisableHyperThreadingRequestType\"] = reflect.TypeOf((*DisableHyperThreadingRequestType)(nil)).Elem()\n}\n\ntype DisableHyperThreadingResponse struct {\n}\n\ntype DisableMultipathPath DisableMultipathPathRequestType\n\nfunc init() {\n\tt[\"DisableMultipathPath\"] = reflect.TypeOf((*DisableMultipathPath)(nil)).Elem()\n}\n\ntype DisableMultipathPathRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tPathName string                 `xml:\"pathName\"`\n}\n\nfunc init() {\n\tt[\"DisableMultipathPathRequestType\"] = reflect.TypeOf((*DisableMultipathPathRequestType)(nil)).Elem()\n}\n\ntype DisableMultipathPathResponse struct {\n}\n\ntype DisableRuleset DisableRulesetRequestType\n\nfunc init() {\n\tt[\"DisableRuleset\"] = reflect.TypeOf((*DisableRuleset)(nil)).Elem()\n}\n\ntype DisableRulesetRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tId   string                 `xml:\"id\"`\n}\n\nfunc init() {\n\tt[\"DisableRulesetRequestType\"] = reflect.TypeOf((*DisableRulesetRequestType)(nil)).Elem()\n}\n\ntype DisableRulesetResponse struct {\n}\n\ntype DisableSecondaryVMRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tVm   ManagedObjectReference `xml:\"vm\"`\n}\n\nfunc init() {\n\tt[\"DisableSecondaryVMRequestType\"] = reflect.TypeOf((*DisableSecondaryVMRequestType)(nil)).Elem()\n}\n\ntype DisableSecondaryVM_Task DisableSecondaryVMRequestType\n\nfunc init() {\n\tt[\"DisableSecondaryVM_Task\"] = reflect.TypeOf((*DisableSecondaryVM_Task)(nil)).Elem()\n}\n\ntype DisableSecondaryVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype DisableSmartCardAuthentication DisableSmartCardAuthenticationRequestType\n\nfunc init() {\n\tt[\"DisableSmartCardAuthentication\"] = reflect.TypeOf((*DisableSmartCardAuthentication)(nil)).Elem()\n}\n\ntype DisableSmartCardAuthenticationRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"DisableSmartCardAuthenticationRequestType\"] = reflect.TypeOf((*DisableSmartCardAuthenticationRequestType)(nil)).Elem()\n}\n\ntype DisableSmartCardAuthenticationResponse struct {\n}\n\ntype DisallowedChangeByService struct {\n\tRuntimeFault\n\n\tServiceName      string `xml:\"serviceName\"`\n\tDisallowedChange string `xml:\"disallowedChange,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DisallowedChangeByService\"] = reflect.TypeOf((*DisallowedChangeByService)(nil)).Elem()\n}\n\ntype DisallowedChangeByServiceFault DisallowedChangeByService\n\nfunc init() {\n\tt[\"DisallowedChangeByServiceFault\"] = reflect.TypeOf((*DisallowedChangeByServiceFault)(nil)).Elem()\n}\n\ntype DisallowedDiskModeChange struct {\n\tInvalidDeviceSpec\n}\n\nfunc init() {\n\tt[\"DisallowedDiskModeChange\"] = reflect.TypeOf((*DisallowedDiskModeChange)(nil)).Elem()\n}\n\ntype DisallowedDiskModeChangeFault DisallowedDiskModeChange\n\nfunc init() {\n\tt[\"DisallowedDiskModeChangeFault\"] = reflect.TypeOf((*DisallowedDiskModeChangeFault)(nil)).Elem()\n}\n\ntype DisallowedMigrationDeviceAttached struct {\n\tMigrationFault\n\n\tFault LocalizedMethodFault `xml:\"fault\"`\n}\n\nfunc init() {\n\tt[\"DisallowedMigrationDeviceAttached\"] = reflect.TypeOf((*DisallowedMigrationDeviceAttached)(nil)).Elem()\n}\n\ntype DisallowedMigrationDeviceAttachedFault DisallowedMigrationDeviceAttached\n\nfunc init() {\n\tt[\"DisallowedMigrationDeviceAttachedFault\"] = reflect.TypeOf((*DisallowedMigrationDeviceAttachedFault)(nil)).Elem()\n}\n\ntype DisallowedOperationOnFailoverHost struct {\n\tRuntimeFault\n\n\tHost     ManagedObjectReference `xml:\"host\"`\n\tHostname string                 `xml:\"hostname\"`\n}\n\nfunc init() {\n\tt[\"DisallowedOperationOnFailoverHost\"] = reflect.TypeOf((*DisallowedOperationOnFailoverHost)(nil)).Elem()\n}\n\ntype DisallowedOperationOnFailoverHostFault DisallowedOperationOnFailoverHost\n\nfunc init() {\n\tt[\"DisallowedOperationOnFailoverHostFault\"] = reflect.TypeOf((*DisallowedOperationOnFailoverHostFault)(nil)).Elem()\n}\n\ntype DisconnectHostRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"DisconnectHostRequestType\"] = reflect.TypeOf((*DisconnectHostRequestType)(nil)).Elem()\n}\n\ntype DisconnectHost_Task DisconnectHostRequestType\n\nfunc init() {\n\tt[\"DisconnectHost_Task\"] = reflect.TypeOf((*DisconnectHost_Task)(nil)).Elem()\n}\n\ntype DisconnectHost_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype DisconnectedHostsBlockingEVC struct {\n\tEVCConfigFault\n}\n\nfunc init() {\n\tt[\"DisconnectedHostsBlockingEVC\"] = reflect.TypeOf((*DisconnectedHostsBlockingEVC)(nil)).Elem()\n}\n\ntype DisconnectedHostsBlockingEVCFault DisconnectedHostsBlockingEVC\n\nfunc init() {\n\tt[\"DisconnectedHostsBlockingEVCFault\"] = reflect.TypeOf((*DisconnectedHostsBlockingEVCFault)(nil)).Elem()\n}\n\ntype DiscoverFcoeHbas DiscoverFcoeHbasRequestType\n\nfunc init() {\n\tt[\"DiscoverFcoeHbas\"] = reflect.TypeOf((*DiscoverFcoeHbas)(nil)).Elem()\n}\n\ntype DiscoverFcoeHbasRequestType struct {\n\tThis     ManagedObjectReference      `xml:\"_this\"`\n\tFcoeSpec FcoeConfigFcoeSpecification `xml:\"fcoeSpec\"`\n}\n\nfunc init() {\n\tt[\"DiscoverFcoeHbasRequestType\"] = reflect.TypeOf((*DiscoverFcoeHbasRequestType)(nil)).Elem()\n}\n\ntype DiscoverFcoeHbasResponse struct {\n}\n\ntype DiskChangeExtent struct {\n\tDynamicData\n\n\tStart  int64 `xml:\"start\"`\n\tLength int64 `xml:\"length\"`\n}\n\nfunc init() {\n\tt[\"DiskChangeExtent\"] = reflect.TypeOf((*DiskChangeExtent)(nil)).Elem()\n}\n\ntype DiskChangeInfo struct {\n\tDynamicData\n\n\tStartOffset int64              `xml:\"startOffset\"`\n\tLength      int64              `xml:\"length\"`\n\tChangedArea []DiskChangeExtent `xml:\"changedArea,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DiskChangeInfo\"] = reflect.TypeOf((*DiskChangeInfo)(nil)).Elem()\n}\n\ntype DiskHasPartitions struct {\n\tVsanDiskFault\n}\n\nfunc init() {\n\tt[\"DiskHasPartitions\"] = reflect.TypeOf((*DiskHasPartitions)(nil)).Elem()\n}\n\ntype DiskHasPartitionsFault DiskHasPartitions\n\nfunc init() {\n\tt[\"DiskHasPartitionsFault\"] = reflect.TypeOf((*DiskHasPartitionsFault)(nil)).Elem()\n}\n\ntype DiskIsLastRemainingNonSSD struct {\n\tVsanDiskFault\n}\n\nfunc init() {\n\tt[\"DiskIsLastRemainingNonSSD\"] = reflect.TypeOf((*DiskIsLastRemainingNonSSD)(nil)).Elem()\n}\n\ntype DiskIsLastRemainingNonSSDFault DiskIsLastRemainingNonSSD\n\nfunc init() {\n\tt[\"DiskIsLastRemainingNonSSDFault\"] = reflect.TypeOf((*DiskIsLastRemainingNonSSDFault)(nil)).Elem()\n}\n\ntype DiskIsNonLocal struct {\n\tVsanDiskFault\n}\n\nfunc init() {\n\tt[\"DiskIsNonLocal\"] = reflect.TypeOf((*DiskIsNonLocal)(nil)).Elem()\n}\n\ntype DiskIsNonLocalFault DiskIsNonLocal\n\nfunc init() {\n\tt[\"DiskIsNonLocalFault\"] = reflect.TypeOf((*DiskIsNonLocalFault)(nil)).Elem()\n}\n\ntype DiskIsUSB struct {\n\tVsanDiskFault\n}\n\nfunc init() {\n\tt[\"DiskIsUSB\"] = reflect.TypeOf((*DiskIsUSB)(nil)).Elem()\n}\n\ntype DiskIsUSBFault DiskIsUSB\n\nfunc init() {\n\tt[\"DiskIsUSBFault\"] = reflect.TypeOf((*DiskIsUSBFault)(nil)).Elem()\n}\n\ntype DiskMoveTypeNotSupported struct {\n\tMigrationFault\n}\n\nfunc init() {\n\tt[\"DiskMoveTypeNotSupported\"] = reflect.TypeOf((*DiskMoveTypeNotSupported)(nil)).Elem()\n}\n\ntype DiskMoveTypeNotSupportedFault DiskMoveTypeNotSupported\n\nfunc init() {\n\tt[\"DiskMoveTypeNotSupportedFault\"] = reflect.TypeOf((*DiskMoveTypeNotSupportedFault)(nil)).Elem()\n}\n\ntype DiskNotSupported struct {\n\tVirtualHardwareCompatibilityIssue\n\n\tDisk int32 `xml:\"disk\"`\n}\n\nfunc init() {\n\tt[\"DiskNotSupported\"] = reflect.TypeOf((*DiskNotSupported)(nil)).Elem()\n}\n\ntype DiskNotSupportedFault BaseDiskNotSupported\n\nfunc init() {\n\tt[\"DiskNotSupportedFault\"] = reflect.TypeOf((*DiskNotSupportedFault)(nil)).Elem()\n}\n\ntype DiskTooSmall struct {\n\tVsanDiskFault\n}\n\nfunc init() {\n\tt[\"DiskTooSmall\"] = reflect.TypeOf((*DiskTooSmall)(nil)).Elem()\n}\n\ntype DiskTooSmallFault DiskTooSmall\n\nfunc init() {\n\tt[\"DiskTooSmallFault\"] = reflect.TypeOf((*DiskTooSmallFault)(nil)).Elem()\n}\n\ntype DissociateProfile DissociateProfileRequestType\n\nfunc init() {\n\tt[\"DissociateProfile\"] = reflect.TypeOf((*DissociateProfile)(nil)).Elem()\n}\n\ntype DissociateProfileRequestType struct {\n\tThis   ManagedObjectReference   `xml:\"_this\"`\n\tEntity []ManagedObjectReference `xml:\"entity,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DissociateProfileRequestType\"] = reflect.TypeOf((*DissociateProfileRequestType)(nil)).Elem()\n}\n\ntype DissociateProfileResponse struct {\n}\n\ntype DistributedVirtualPort struct {\n\tDynamicData\n\n\tKey              string                                 `xml:\"key\"`\n\tConfig           DVPortConfigInfo                       `xml:\"config\"`\n\tDvsUuid          string                                 `xml:\"dvsUuid\"`\n\tPortgroupKey     string                                 `xml:\"portgroupKey,omitempty\"`\n\tProxyHost        *ManagedObjectReference                `xml:\"proxyHost,omitempty\"`\n\tConnectee        *DistributedVirtualSwitchPortConnectee `xml:\"connectee,omitempty\"`\n\tConflict         bool                                   `xml:\"conflict\"`\n\tConflictPortKey  string                                 `xml:\"conflictPortKey,omitempty\"`\n\tState            *DVPortState                           `xml:\"state,omitempty\"`\n\tConnectionCookie int32                                  `xml:\"connectionCookie,omitempty\"`\n\tLastStatusChange time.Time                              `xml:\"lastStatusChange\"`\n\tHostLocalPort    *bool                                  `xml:\"hostLocalPort\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualPort\"] = reflect.TypeOf((*DistributedVirtualPort)(nil)).Elem()\n}\n\ntype DistributedVirtualPortgroupInfo struct {\n\tDynamicData\n\n\tSwitchName                  string                 `xml:\"switchName\"`\n\tSwitchUuid                  string                 `xml:\"switchUuid\"`\n\tPortgroupName               string                 `xml:\"portgroupName\"`\n\tPortgroupKey                string                 `xml:\"portgroupKey\"`\n\tPortgroupType               string                 `xml:\"portgroupType\"`\n\tUplinkPortgroup             bool                   `xml:\"uplinkPortgroup\"`\n\tPortgroup                   ManagedObjectReference `xml:\"portgroup\"`\n\tNetworkReservationSupported *bool                  `xml:\"networkReservationSupported\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualPortgroupInfo\"] = reflect.TypeOf((*DistributedVirtualPortgroupInfo)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchHostMember struct {\n\tDynamicData\n\n\tRuntimeState  *DistributedVirtualSwitchHostMemberRuntimeState `xml:\"runtimeState,omitempty\"`\n\tConfig        DistributedVirtualSwitchHostMemberConfigInfo    `xml:\"config\"`\n\tProductInfo   *DistributedVirtualSwitchProductSpec            `xml:\"productInfo,omitempty\"`\n\tUplinkPortKey []string                                        `xml:\"uplinkPortKey,omitempty\"`\n\tStatus        string                                          `xml:\"status\"`\n\tStatusDetail  string                                          `xml:\"statusDetail,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchHostMember\"] = reflect.TypeOf((*DistributedVirtualSwitchHostMember)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchHostMemberBacking struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchHostMemberBacking\"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberBacking)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchHostMemberConfigInfo struct {\n\tDynamicData\n\n\tHost                 *ManagedObjectReference                       `xml:\"host,omitempty\"`\n\tMaxProxySwitchPorts  int32                                         `xml:\"maxProxySwitchPorts\"`\n\tVendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob     `xml:\"vendorSpecificConfig,omitempty\"`\n\tBacking              BaseDistributedVirtualSwitchHostMemberBacking `xml:\"backing,typeattr\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchHostMemberConfigInfo\"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberConfigInfo)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchHostMemberConfigSpec struct {\n\tDynamicData\n\n\tOperation            string                                        `xml:\"operation\"`\n\tHost                 ManagedObjectReference                        `xml:\"host\"`\n\tBacking              BaseDistributedVirtualSwitchHostMemberBacking `xml:\"backing,omitempty,typeattr\"`\n\tMaxProxySwitchPorts  int32                                         `xml:\"maxProxySwitchPorts,omitempty\"`\n\tVendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob     `xml:\"vendorSpecificConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchHostMemberConfigSpec\"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberConfigSpec)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchHostMemberPnicBacking struct {\n\tDistributedVirtualSwitchHostMemberBacking\n\n\tPnicSpec []DistributedVirtualSwitchHostMemberPnicSpec `xml:\"pnicSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchHostMemberPnicBacking\"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberPnicBacking)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchHostMemberPnicSpec struct {\n\tDynamicData\n\n\tPnicDevice         string `xml:\"pnicDevice\"`\n\tUplinkPortKey      string `xml:\"uplinkPortKey,omitempty\"`\n\tUplinkPortgroupKey string `xml:\"uplinkPortgroupKey,omitempty\"`\n\tConnectionCookie   int32  `xml:\"connectionCookie,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchHostMemberPnicSpec\"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberPnicSpec)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchHostMemberRuntimeState struct {\n\tDynamicData\n\n\tCurrentMaxProxySwitchPorts int32 `xml:\"currentMaxProxySwitchPorts\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchHostMemberRuntimeState\"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberRuntimeState)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchHostProductSpec struct {\n\tDynamicData\n\n\tProductLineId string `xml:\"productLineId,omitempty\"`\n\tVersion       string `xml:\"version,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchHostProductSpec\"] = reflect.TypeOf((*DistributedVirtualSwitchHostProductSpec)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchInfo struct {\n\tDynamicData\n\n\tSwitchName                  string                 `xml:\"switchName\"`\n\tSwitchUuid                  string                 `xml:\"switchUuid\"`\n\tDistributedVirtualSwitch    ManagedObjectReference `xml:\"distributedVirtualSwitch\"`\n\tNetworkReservationSupported *bool                  `xml:\"networkReservationSupported\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchInfo\"] = reflect.TypeOf((*DistributedVirtualSwitchInfo)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchKeyedOpaqueBlob struct {\n\tDynamicData\n\n\tKey        string `xml:\"key\"`\n\tOpaqueData string `xml:\"opaqueData\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchKeyedOpaqueBlob\"] = reflect.TypeOf((*DistributedVirtualSwitchKeyedOpaqueBlob)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchManagerCompatibilityResult struct {\n\tDynamicData\n\n\tHost  ManagedObjectReference `xml:\"host\"`\n\tError []LocalizedMethodFault `xml:\"error,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchManagerCompatibilityResult\"] = reflect.TypeOf((*DistributedVirtualSwitchManagerCompatibilityResult)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchManagerDvsProductSpec struct {\n\tDynamicData\n\n\tNewSwitchProductSpec     *DistributedVirtualSwitchProductSpec `xml:\"newSwitchProductSpec,omitempty\"`\n\tDistributedVirtualSwitch *ManagedObjectReference              `xml:\"distributedVirtualSwitch,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchManagerDvsProductSpec\"] = reflect.TypeOf((*DistributedVirtualSwitchManagerDvsProductSpec)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchManagerHostArrayFilter struct {\n\tDistributedVirtualSwitchManagerHostDvsFilterSpec\n\n\tHost []ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchManagerHostArrayFilter\"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostArrayFilter)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchManagerHostContainer struct {\n\tDynamicData\n\n\tContainer ManagedObjectReference `xml:\"container\"`\n\tRecursive bool                   `xml:\"recursive\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchManagerHostContainer\"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostContainer)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchManagerHostContainerFilter struct {\n\tDistributedVirtualSwitchManagerHostDvsFilterSpec\n\n\tHostContainer DistributedVirtualSwitchManagerHostContainer `xml:\"hostContainer\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchManagerHostContainerFilter\"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostContainerFilter)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchManagerHostDvsFilterSpec struct {\n\tDynamicData\n\n\tInclusive bool `xml:\"inclusive\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchManagerHostDvsFilterSpec\"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostDvsFilterSpec)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchManagerHostDvsMembershipFilter struct {\n\tDistributedVirtualSwitchManagerHostDvsFilterSpec\n\n\tDistributedVirtualSwitch ManagedObjectReference `xml:\"distributedVirtualSwitch\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchManagerHostDvsMembershipFilter\"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostDvsMembershipFilter)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchManagerImportResult struct {\n\tDynamicData\n\n\tDistributedVirtualSwitch    []ManagedObjectReference                `xml:\"distributedVirtualSwitch,omitempty\"`\n\tDistributedVirtualPortgroup []ManagedObjectReference                `xml:\"distributedVirtualPortgroup,omitempty\"`\n\tImportFault                 []ImportOperationBulkFaultFaultOnImport `xml:\"importFault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchManagerImportResult\"] = reflect.TypeOf((*DistributedVirtualSwitchManagerImportResult)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchPortConnectee struct {\n\tDynamicData\n\n\tConnectedEntity *ManagedObjectReference `xml:\"connectedEntity,omitempty\"`\n\tNicKey          string                  `xml:\"nicKey,omitempty\"`\n\tType            string                  `xml:\"type,omitempty\"`\n\tAddressHint     string                  `xml:\"addressHint,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchPortConnectee\"] = reflect.TypeOf((*DistributedVirtualSwitchPortConnectee)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchPortConnection struct {\n\tDynamicData\n\n\tSwitchUuid       string `xml:\"switchUuid\"`\n\tPortgroupKey     string `xml:\"portgroupKey,omitempty\"`\n\tPortKey          string `xml:\"portKey,omitempty\"`\n\tConnectionCookie int32  `xml:\"connectionCookie,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchPortConnection\"] = reflect.TypeOf((*DistributedVirtualSwitchPortConnection)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchPortCriteria struct {\n\tDynamicData\n\n\tConnected    *bool                    `xml:\"connected\"`\n\tActive       *bool                    `xml:\"active\"`\n\tUplinkPort   *bool                    `xml:\"uplinkPort\"`\n\tScope        *ManagedObjectReference  `xml:\"scope,omitempty\"`\n\tPortgroupKey []string                 `xml:\"portgroupKey,omitempty\"`\n\tInside       *bool                    `xml:\"inside\"`\n\tPortKey      []string                 `xml:\"portKey,omitempty\"`\n\tHost         []ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchPortCriteria\"] = reflect.TypeOf((*DistributedVirtualSwitchPortCriteria)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchPortStatistics struct {\n\tDynamicData\n\n\tPacketsInMulticast  int64 `xml:\"packetsInMulticast\"`\n\tPacketsOutMulticast int64 `xml:\"packetsOutMulticast\"`\n\tBytesInMulticast    int64 `xml:\"bytesInMulticast\"`\n\tBytesOutMulticast   int64 `xml:\"bytesOutMulticast\"`\n\tPacketsInUnicast    int64 `xml:\"packetsInUnicast\"`\n\tPacketsOutUnicast   int64 `xml:\"packetsOutUnicast\"`\n\tBytesInUnicast      int64 `xml:\"bytesInUnicast\"`\n\tBytesOutUnicast     int64 `xml:\"bytesOutUnicast\"`\n\tPacketsInBroadcast  int64 `xml:\"packetsInBroadcast\"`\n\tPacketsOutBroadcast int64 `xml:\"packetsOutBroadcast\"`\n\tBytesInBroadcast    int64 `xml:\"bytesInBroadcast\"`\n\tBytesOutBroadcast   int64 `xml:\"bytesOutBroadcast\"`\n\tPacketsInDropped    int64 `xml:\"packetsInDropped\"`\n\tPacketsOutDropped   int64 `xml:\"packetsOutDropped\"`\n\tPacketsInException  int64 `xml:\"packetsInException\"`\n\tPacketsOutException int64 `xml:\"packetsOutException\"`\n\tBytesInFromPnic     int64 `xml:\"bytesInFromPnic,omitempty\"`\n\tBytesOutToPnic      int64 `xml:\"bytesOutToPnic,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchPortStatistics\"] = reflect.TypeOf((*DistributedVirtualSwitchPortStatistics)(nil)).Elem()\n}\n\ntype DistributedVirtualSwitchProductSpec struct {\n\tDynamicData\n\n\tName            string `xml:\"name,omitempty\"`\n\tVendor          string `xml:\"vendor,omitempty\"`\n\tVersion         string `xml:\"version,omitempty\"`\n\tBuild           string `xml:\"build,omitempty\"`\n\tForwardingClass string `xml:\"forwardingClass,omitempty\"`\n\tBundleId        string `xml:\"bundleId,omitempty\"`\n\tBundleUrl       string `xml:\"bundleUrl,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DistributedVirtualSwitchProductSpec\"] = reflect.TypeOf((*DistributedVirtualSwitchProductSpec)(nil)).Elem()\n}\n\ntype DoesCustomizationSpecExist DoesCustomizationSpecExistRequestType\n\nfunc init() {\n\tt[\"DoesCustomizationSpecExist\"] = reflect.TypeOf((*DoesCustomizationSpecExist)(nil)).Elem()\n}\n\ntype DoesCustomizationSpecExistRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tName string                 `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"DoesCustomizationSpecExistRequestType\"] = reflect.TypeOf((*DoesCustomizationSpecExistRequestType)(nil)).Elem()\n}\n\ntype DoesCustomizationSpecExistResponse struct {\n\tReturnval bool `xml:\"returnval\"`\n}\n\ntype DomainNotFound struct {\n\tActiveDirectoryFault\n\n\tDomainName string `xml:\"domainName\"`\n}\n\nfunc init() {\n\tt[\"DomainNotFound\"] = reflect.TypeOf((*DomainNotFound)(nil)).Elem()\n}\n\ntype DomainNotFoundFault DomainNotFound\n\nfunc init() {\n\tt[\"DomainNotFoundFault\"] = reflect.TypeOf((*DomainNotFoundFault)(nil)).Elem()\n}\n\ntype DrsDisabledEvent struct {\n\tClusterEvent\n}\n\nfunc init() {\n\tt[\"DrsDisabledEvent\"] = reflect.TypeOf((*DrsDisabledEvent)(nil)).Elem()\n}\n\ntype DrsDisabledOnVm struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"DrsDisabledOnVm\"] = reflect.TypeOf((*DrsDisabledOnVm)(nil)).Elem()\n}\n\ntype DrsDisabledOnVmFault DrsDisabledOnVm\n\nfunc init() {\n\tt[\"DrsDisabledOnVmFault\"] = reflect.TypeOf((*DrsDisabledOnVmFault)(nil)).Elem()\n}\n\ntype DrsEnabledEvent struct {\n\tClusterEvent\n\n\tBehavior string `xml:\"behavior\"`\n}\n\nfunc init() {\n\tt[\"DrsEnabledEvent\"] = reflect.TypeOf((*DrsEnabledEvent)(nil)).Elem()\n}\n\ntype DrsEnteredStandbyModeEvent struct {\n\tEnteredStandbyModeEvent\n}\n\nfunc init() {\n\tt[\"DrsEnteredStandbyModeEvent\"] = reflect.TypeOf((*DrsEnteredStandbyModeEvent)(nil)).Elem()\n}\n\ntype DrsEnteringStandbyModeEvent struct {\n\tEnteringStandbyModeEvent\n}\n\nfunc init() {\n\tt[\"DrsEnteringStandbyModeEvent\"] = reflect.TypeOf((*DrsEnteringStandbyModeEvent)(nil)).Elem()\n}\n\ntype DrsExitStandbyModeFailedEvent struct {\n\tExitStandbyModeFailedEvent\n}\n\nfunc init() {\n\tt[\"DrsExitStandbyModeFailedEvent\"] = reflect.TypeOf((*DrsExitStandbyModeFailedEvent)(nil)).Elem()\n}\n\ntype DrsExitedStandbyModeEvent struct {\n\tExitedStandbyModeEvent\n}\n\nfunc init() {\n\tt[\"DrsExitedStandbyModeEvent\"] = reflect.TypeOf((*DrsExitedStandbyModeEvent)(nil)).Elem()\n}\n\ntype DrsExitingStandbyModeEvent struct {\n\tExitingStandbyModeEvent\n}\n\nfunc init() {\n\tt[\"DrsExitingStandbyModeEvent\"] = reflect.TypeOf((*DrsExitingStandbyModeEvent)(nil)).Elem()\n}\n\ntype DrsInvocationFailedEvent struct {\n\tClusterEvent\n}\n\nfunc init() {\n\tt[\"DrsInvocationFailedEvent\"] = reflect.TypeOf((*DrsInvocationFailedEvent)(nil)).Elem()\n}\n\ntype DrsRecoveredFromFailureEvent struct {\n\tClusterEvent\n}\n\nfunc init() {\n\tt[\"DrsRecoveredFromFailureEvent\"] = reflect.TypeOf((*DrsRecoveredFromFailureEvent)(nil)).Elem()\n}\n\ntype DrsResourceConfigureFailedEvent struct {\n\tHostEvent\n\n\tReason LocalizedMethodFault `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"DrsResourceConfigureFailedEvent\"] = reflect.TypeOf((*DrsResourceConfigureFailedEvent)(nil)).Elem()\n}\n\ntype DrsResourceConfigureSyncedEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"DrsResourceConfigureSyncedEvent\"] = reflect.TypeOf((*DrsResourceConfigureSyncedEvent)(nil)).Elem()\n}\n\ntype DrsRuleComplianceEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"DrsRuleComplianceEvent\"] = reflect.TypeOf((*DrsRuleComplianceEvent)(nil)).Elem()\n}\n\ntype DrsRuleViolationEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"DrsRuleViolationEvent\"] = reflect.TypeOf((*DrsRuleViolationEvent)(nil)).Elem()\n}\n\ntype DrsSoftRuleViolationEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"DrsSoftRuleViolationEvent\"] = reflect.TypeOf((*DrsSoftRuleViolationEvent)(nil)).Elem()\n}\n\ntype DrsVmMigratedEvent struct {\n\tVmMigratedEvent\n}\n\nfunc init() {\n\tt[\"DrsVmMigratedEvent\"] = reflect.TypeOf((*DrsVmMigratedEvent)(nil)).Elem()\n}\n\ntype DrsVmPoweredOnEvent struct {\n\tVmPoweredOnEvent\n}\n\nfunc init() {\n\tt[\"DrsVmPoweredOnEvent\"] = reflect.TypeOf((*DrsVmPoweredOnEvent)(nil)).Elem()\n}\n\ntype DrsVmotionIncompatibleFault struct {\n\tVirtualHardwareCompatibilityIssue\n\n\tHost ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"DrsVmotionIncompatibleFault\"] = reflect.TypeOf((*DrsVmotionIncompatibleFault)(nil)).Elem()\n}\n\ntype DrsVmotionIncompatibleFaultFault DrsVmotionIncompatibleFault\n\nfunc init() {\n\tt[\"DrsVmotionIncompatibleFaultFault\"] = reflect.TypeOf((*DrsVmotionIncompatibleFaultFault)(nil)).Elem()\n}\n\ntype DuplicateCustomizationSpec DuplicateCustomizationSpecRequestType\n\nfunc init() {\n\tt[\"DuplicateCustomizationSpec\"] = reflect.TypeOf((*DuplicateCustomizationSpec)(nil)).Elem()\n}\n\ntype DuplicateCustomizationSpecRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tName    string                 `xml:\"name\"`\n\tNewName string                 `xml:\"newName\"`\n}\n\nfunc init() {\n\tt[\"DuplicateCustomizationSpecRequestType\"] = reflect.TypeOf((*DuplicateCustomizationSpecRequestType)(nil)).Elem()\n}\n\ntype DuplicateCustomizationSpecResponse struct {\n}\n\ntype DuplicateDisks struct {\n\tVsanDiskFault\n}\n\nfunc init() {\n\tt[\"DuplicateDisks\"] = reflect.TypeOf((*DuplicateDisks)(nil)).Elem()\n}\n\ntype DuplicateDisksFault DuplicateDisks\n\nfunc init() {\n\tt[\"DuplicateDisksFault\"] = reflect.TypeOf((*DuplicateDisksFault)(nil)).Elem()\n}\n\ntype DuplicateIpDetectedEvent struct {\n\tHostEvent\n\n\tDuplicateIP string `xml:\"duplicateIP\"`\n\tMacAddress  string `xml:\"macAddress\"`\n}\n\nfunc init() {\n\tt[\"DuplicateIpDetectedEvent\"] = reflect.TypeOf((*DuplicateIpDetectedEvent)(nil)).Elem()\n}\n\ntype DuplicateName struct {\n\tVimFault\n\n\tName   string                 `xml:\"name\"`\n\tObject ManagedObjectReference `xml:\"object\"`\n}\n\nfunc init() {\n\tt[\"DuplicateName\"] = reflect.TypeOf((*DuplicateName)(nil)).Elem()\n}\n\ntype DuplicateNameFault DuplicateName\n\nfunc init() {\n\tt[\"DuplicateNameFault\"] = reflect.TypeOf((*DuplicateNameFault)(nil)).Elem()\n}\n\ntype DuplicateVsanNetworkInterface struct {\n\tVsanFault\n\n\tDevice string `xml:\"device\"`\n}\n\nfunc init() {\n\tt[\"DuplicateVsanNetworkInterface\"] = reflect.TypeOf((*DuplicateVsanNetworkInterface)(nil)).Elem()\n}\n\ntype DuplicateVsanNetworkInterfaceFault DuplicateVsanNetworkInterface\n\nfunc init() {\n\tt[\"DuplicateVsanNetworkInterfaceFault\"] = reflect.TypeOf((*DuplicateVsanNetworkInterfaceFault)(nil)).Elem()\n}\n\ntype DvpgImportEvent struct {\n\tDVPortgroupEvent\n\n\tImportType string `xml:\"importType\"`\n}\n\nfunc init() {\n\tt[\"DvpgImportEvent\"] = reflect.TypeOf((*DvpgImportEvent)(nil)).Elem()\n}\n\ntype DvpgRestoreEvent struct {\n\tDVPortgroupEvent\n}\n\nfunc init() {\n\tt[\"DvpgRestoreEvent\"] = reflect.TypeOf((*DvpgRestoreEvent)(nil)).Elem()\n}\n\ntype DvsAcceptNetworkRuleAction struct {\n\tDvsNetworkRuleAction\n}\n\nfunc init() {\n\tt[\"DvsAcceptNetworkRuleAction\"] = reflect.TypeOf((*DvsAcceptNetworkRuleAction)(nil)).Elem()\n}\n\ntype DvsApplyOperationFault struct {\n\tDvsFault\n\n\tObjectFault []DvsApplyOperationFaultFaultOnObject `xml:\"objectFault\"`\n}\n\nfunc init() {\n\tt[\"DvsApplyOperationFault\"] = reflect.TypeOf((*DvsApplyOperationFault)(nil)).Elem()\n}\n\ntype DvsApplyOperationFaultFault DvsApplyOperationFault\n\nfunc init() {\n\tt[\"DvsApplyOperationFaultFault\"] = reflect.TypeOf((*DvsApplyOperationFaultFault)(nil)).Elem()\n}\n\ntype DvsApplyOperationFaultFaultOnObject struct {\n\tDynamicData\n\n\tObjectId string               `xml:\"objectId\"`\n\tType     string               `xml:\"type\"`\n\tFault    LocalizedMethodFault `xml:\"fault\"`\n}\n\nfunc init() {\n\tt[\"DvsApplyOperationFaultFaultOnObject\"] = reflect.TypeOf((*DvsApplyOperationFaultFaultOnObject)(nil)).Elem()\n}\n\ntype DvsCopyNetworkRuleAction struct {\n\tDvsNetworkRuleAction\n}\n\nfunc init() {\n\tt[\"DvsCopyNetworkRuleAction\"] = reflect.TypeOf((*DvsCopyNetworkRuleAction)(nil)).Elem()\n}\n\ntype DvsCreatedEvent struct {\n\tDvsEvent\n\n\tParent FolderEventArgument `xml:\"parent\"`\n}\n\nfunc init() {\n\tt[\"DvsCreatedEvent\"] = reflect.TypeOf((*DvsCreatedEvent)(nil)).Elem()\n}\n\ntype DvsDestroyedEvent struct {\n\tDvsEvent\n}\n\nfunc init() {\n\tt[\"DvsDestroyedEvent\"] = reflect.TypeOf((*DvsDestroyedEvent)(nil)).Elem()\n}\n\ntype DvsDropNetworkRuleAction struct {\n\tDvsNetworkRuleAction\n}\n\nfunc init() {\n\tt[\"DvsDropNetworkRuleAction\"] = reflect.TypeOf((*DvsDropNetworkRuleAction)(nil)).Elem()\n}\n\ntype DvsEvent struct {\n\tEvent\n}\n\nfunc init() {\n\tt[\"DvsEvent\"] = reflect.TypeOf((*DvsEvent)(nil)).Elem()\n}\n\ntype DvsEventArgument struct {\n\tEntityEventArgument\n\n\tDvs ManagedObjectReference `xml:\"dvs\"`\n}\n\nfunc init() {\n\tt[\"DvsEventArgument\"] = reflect.TypeOf((*DvsEventArgument)(nil)).Elem()\n}\n\ntype DvsFault struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"DvsFault\"] = reflect.TypeOf((*DvsFault)(nil)).Elem()\n}\n\ntype DvsFaultFault BaseDvsFault\n\nfunc init() {\n\tt[\"DvsFaultFault\"] = reflect.TypeOf((*DvsFaultFault)(nil)).Elem()\n}\n\ntype DvsFilterConfig struct {\n\tInheritablePolicy\n\n\tKey        string              `xml:\"key,omitempty\"`\n\tAgentName  string              `xml:\"agentName,omitempty\"`\n\tSlotNumber string              `xml:\"slotNumber,omitempty\"`\n\tParameters *DvsFilterParameter `xml:\"parameters,omitempty\"`\n\tOnFailure  string              `xml:\"onFailure,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsFilterConfig\"] = reflect.TypeOf((*DvsFilterConfig)(nil)).Elem()\n}\n\ntype DvsFilterConfigSpec struct {\n\tDvsFilterConfig\n\n\tOperation string `xml:\"operation\"`\n}\n\nfunc init() {\n\tt[\"DvsFilterConfigSpec\"] = reflect.TypeOf((*DvsFilterConfigSpec)(nil)).Elem()\n}\n\ntype DvsFilterParameter struct {\n\tDynamicData\n\n\tParameters []string `xml:\"parameters,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsFilterParameter\"] = reflect.TypeOf((*DvsFilterParameter)(nil)).Elem()\n}\n\ntype DvsFilterPolicy struct {\n\tInheritablePolicy\n\n\tFilterConfig []BaseDvsFilterConfig `xml:\"filterConfig,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"DvsFilterPolicy\"] = reflect.TypeOf((*DvsFilterPolicy)(nil)).Elem()\n}\n\ntype DvsGreEncapNetworkRuleAction struct {\n\tDvsNetworkRuleAction\n\n\tEncapsulationIp SingleIp `xml:\"encapsulationIp\"`\n}\n\nfunc init() {\n\tt[\"DvsGreEncapNetworkRuleAction\"] = reflect.TypeOf((*DvsGreEncapNetworkRuleAction)(nil)).Elem()\n}\n\ntype DvsHealthStatusChangeEvent struct {\n\tHostEvent\n\n\tSwitchUuid   string                          `xml:\"switchUuid\"`\n\tHealthResult BaseHostMemberHealthCheckResult `xml:\"healthResult,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"DvsHealthStatusChangeEvent\"] = reflect.TypeOf((*DvsHealthStatusChangeEvent)(nil)).Elem()\n}\n\ntype DvsHostBackInSyncEvent struct {\n\tDvsEvent\n\n\tHostBackInSync HostEventArgument `xml:\"hostBackInSync\"`\n}\n\nfunc init() {\n\tt[\"DvsHostBackInSyncEvent\"] = reflect.TypeOf((*DvsHostBackInSyncEvent)(nil)).Elem()\n}\n\ntype DvsHostInfrastructureTrafficResource struct {\n\tDynamicData\n\n\tKey            string                                         `xml:\"key\"`\n\tDescription    string                                         `xml:\"description,omitempty\"`\n\tAllocationInfo DvsHostInfrastructureTrafficResourceAllocation `xml:\"allocationInfo\"`\n}\n\nfunc init() {\n\tt[\"DvsHostInfrastructureTrafficResource\"] = reflect.TypeOf((*DvsHostInfrastructureTrafficResource)(nil)).Elem()\n}\n\ntype DvsHostInfrastructureTrafficResourceAllocation struct {\n\tDynamicData\n\n\tLimit       int64       `xml:\"limit,omitempty\"`\n\tShares      *SharesInfo `xml:\"shares,omitempty\"`\n\tReservation int64       `xml:\"reservation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsHostInfrastructureTrafficResourceAllocation\"] = reflect.TypeOf((*DvsHostInfrastructureTrafficResourceAllocation)(nil)).Elem()\n}\n\ntype DvsHostJoinedEvent struct {\n\tDvsEvent\n\n\tHostJoined HostEventArgument `xml:\"hostJoined\"`\n}\n\nfunc init() {\n\tt[\"DvsHostJoinedEvent\"] = reflect.TypeOf((*DvsHostJoinedEvent)(nil)).Elem()\n}\n\ntype DvsHostLeftEvent struct {\n\tDvsEvent\n\n\tHostLeft HostEventArgument `xml:\"hostLeft\"`\n}\n\nfunc init() {\n\tt[\"DvsHostLeftEvent\"] = reflect.TypeOf((*DvsHostLeftEvent)(nil)).Elem()\n}\n\ntype DvsHostStatusUpdated struct {\n\tDvsEvent\n\n\tHostMember      HostEventArgument `xml:\"hostMember\"`\n\tOldStatus       string            `xml:\"oldStatus,omitempty\"`\n\tNewStatus       string            `xml:\"newStatus,omitempty\"`\n\tOldStatusDetail string            `xml:\"oldStatusDetail,omitempty\"`\n\tNewStatusDetail string            `xml:\"newStatusDetail,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsHostStatusUpdated\"] = reflect.TypeOf((*DvsHostStatusUpdated)(nil)).Elem()\n}\n\ntype DvsHostVNicProfile struct {\n\tDvsVNicProfile\n}\n\nfunc init() {\n\tt[\"DvsHostVNicProfile\"] = reflect.TypeOf((*DvsHostVNicProfile)(nil)).Elem()\n}\n\ntype DvsHostWentOutOfSyncEvent struct {\n\tDvsEvent\n\n\tHostOutOfSync DvsOutOfSyncHostArgument `xml:\"hostOutOfSync\"`\n}\n\nfunc init() {\n\tt[\"DvsHostWentOutOfSyncEvent\"] = reflect.TypeOf((*DvsHostWentOutOfSyncEvent)(nil)).Elem()\n}\n\ntype DvsImportEvent struct {\n\tDvsEvent\n\n\tImportType string `xml:\"importType\"`\n}\n\nfunc init() {\n\tt[\"DvsImportEvent\"] = reflect.TypeOf((*DvsImportEvent)(nil)).Elem()\n}\n\ntype DvsIpNetworkRuleQualifier struct {\n\tDvsNetworkRuleQualifier\n\n\tSourceAddress      BaseIpAddress  `xml:\"sourceAddress,omitempty,typeattr\"`\n\tDestinationAddress BaseIpAddress  `xml:\"destinationAddress,omitempty,typeattr\"`\n\tProtocol           *IntExpression `xml:\"protocol,omitempty\"`\n\tSourceIpPort       BaseDvsIpPort  `xml:\"sourceIpPort,omitempty,typeattr\"`\n\tDestinationIpPort  BaseDvsIpPort  `xml:\"destinationIpPort,omitempty,typeattr\"`\n\tTcpFlags           *IntExpression `xml:\"tcpFlags,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsIpNetworkRuleQualifier\"] = reflect.TypeOf((*DvsIpNetworkRuleQualifier)(nil)).Elem()\n}\n\ntype DvsIpPort struct {\n\tNegatableExpression\n}\n\nfunc init() {\n\tt[\"DvsIpPort\"] = reflect.TypeOf((*DvsIpPort)(nil)).Elem()\n}\n\ntype DvsIpPortRange struct {\n\tDvsIpPort\n\n\tStartPortNumber int32 `xml:\"startPortNumber\"`\n\tEndPortNumber   int32 `xml:\"endPortNumber\"`\n}\n\nfunc init() {\n\tt[\"DvsIpPortRange\"] = reflect.TypeOf((*DvsIpPortRange)(nil)).Elem()\n}\n\ntype DvsLogNetworkRuleAction struct {\n\tDvsNetworkRuleAction\n}\n\nfunc init() {\n\tt[\"DvsLogNetworkRuleAction\"] = reflect.TypeOf((*DvsLogNetworkRuleAction)(nil)).Elem()\n}\n\ntype DvsMacNetworkRuleQualifier struct {\n\tDvsNetworkRuleQualifier\n\n\tSourceAddress      BaseMacAddress `xml:\"sourceAddress,omitempty,typeattr\"`\n\tDestinationAddress BaseMacAddress `xml:\"destinationAddress,omitempty,typeattr\"`\n\tProtocol           *IntExpression `xml:\"protocol,omitempty\"`\n\tVlanId             *IntExpression `xml:\"vlanId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsMacNetworkRuleQualifier\"] = reflect.TypeOf((*DvsMacNetworkRuleQualifier)(nil)).Elem()\n}\n\ntype DvsMacRewriteNetworkRuleAction struct {\n\tDvsNetworkRuleAction\n\n\tRewriteMac string `xml:\"rewriteMac\"`\n}\n\nfunc init() {\n\tt[\"DvsMacRewriteNetworkRuleAction\"] = reflect.TypeOf((*DvsMacRewriteNetworkRuleAction)(nil)).Elem()\n}\n\ntype DvsMergedEvent struct {\n\tDvsEvent\n\n\tSourceDvs      DvsEventArgument `xml:\"sourceDvs\"`\n\tDestinationDvs DvsEventArgument `xml:\"destinationDvs\"`\n}\n\nfunc init() {\n\tt[\"DvsMergedEvent\"] = reflect.TypeOf((*DvsMergedEvent)(nil)).Elem()\n}\n\ntype DvsNetworkRuleAction struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"DvsNetworkRuleAction\"] = reflect.TypeOf((*DvsNetworkRuleAction)(nil)).Elem()\n}\n\ntype DvsNetworkRuleQualifier struct {\n\tDynamicData\n\n\tKey string `xml:\"key,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsNetworkRuleQualifier\"] = reflect.TypeOf((*DvsNetworkRuleQualifier)(nil)).Elem()\n}\n\ntype DvsNotAuthorized struct {\n\tDvsFault\n\n\tSessionExtensionKey string `xml:\"sessionExtensionKey,omitempty\"`\n\tDvsExtensionKey     string `xml:\"dvsExtensionKey,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsNotAuthorized\"] = reflect.TypeOf((*DvsNotAuthorized)(nil)).Elem()\n}\n\ntype DvsNotAuthorizedFault DvsNotAuthorized\n\nfunc init() {\n\tt[\"DvsNotAuthorizedFault\"] = reflect.TypeOf((*DvsNotAuthorizedFault)(nil)).Elem()\n}\n\ntype DvsOperationBulkFault struct {\n\tDvsFault\n\n\tHostFault []DvsOperationBulkFaultFaultOnHost `xml:\"hostFault\"`\n}\n\nfunc init() {\n\tt[\"DvsOperationBulkFault\"] = reflect.TypeOf((*DvsOperationBulkFault)(nil)).Elem()\n}\n\ntype DvsOperationBulkFaultFault DvsOperationBulkFault\n\nfunc init() {\n\tt[\"DvsOperationBulkFaultFault\"] = reflect.TypeOf((*DvsOperationBulkFaultFault)(nil)).Elem()\n}\n\ntype DvsOperationBulkFaultFaultOnHost struct {\n\tDynamicData\n\n\tHost  ManagedObjectReference `xml:\"host\"`\n\tFault LocalizedMethodFault   `xml:\"fault\"`\n}\n\nfunc init() {\n\tt[\"DvsOperationBulkFaultFaultOnHost\"] = reflect.TypeOf((*DvsOperationBulkFaultFaultOnHost)(nil)).Elem()\n}\n\ntype DvsOutOfSyncHostArgument struct {\n\tDynamicData\n\n\tOutOfSyncHost   HostEventArgument `xml:\"outOfSyncHost\"`\n\tConfigParamters []string          `xml:\"configParamters\"`\n}\n\nfunc init() {\n\tt[\"DvsOutOfSyncHostArgument\"] = reflect.TypeOf((*DvsOutOfSyncHostArgument)(nil)).Elem()\n}\n\ntype DvsPortBlockedEvent struct {\n\tDvsEvent\n\n\tPortKey        string        `xml:\"portKey\"`\n\tStatusDetail   string        `xml:\"statusDetail,omitempty\"`\n\tRuntimeInfo    *DVPortStatus `xml:\"runtimeInfo,omitempty\"`\n\tPrevBlockState string        `xml:\"prevBlockState,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsPortBlockedEvent\"] = reflect.TypeOf((*DvsPortBlockedEvent)(nil)).Elem()\n}\n\ntype DvsPortConnectedEvent struct {\n\tDvsEvent\n\n\tPortKey   string                                 `xml:\"portKey\"`\n\tConnectee *DistributedVirtualSwitchPortConnectee `xml:\"connectee,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsPortConnectedEvent\"] = reflect.TypeOf((*DvsPortConnectedEvent)(nil)).Elem()\n}\n\ntype DvsPortCreatedEvent struct {\n\tDvsEvent\n\n\tPortKey []string `xml:\"portKey\"`\n}\n\nfunc init() {\n\tt[\"DvsPortCreatedEvent\"] = reflect.TypeOf((*DvsPortCreatedEvent)(nil)).Elem()\n}\n\ntype DvsPortDeletedEvent struct {\n\tDvsEvent\n\n\tPortKey []string `xml:\"portKey\"`\n}\n\nfunc init() {\n\tt[\"DvsPortDeletedEvent\"] = reflect.TypeOf((*DvsPortDeletedEvent)(nil)).Elem()\n}\n\ntype DvsPortDisconnectedEvent struct {\n\tDvsEvent\n\n\tPortKey   string                                 `xml:\"portKey\"`\n\tConnectee *DistributedVirtualSwitchPortConnectee `xml:\"connectee,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsPortDisconnectedEvent\"] = reflect.TypeOf((*DvsPortDisconnectedEvent)(nil)).Elem()\n}\n\ntype DvsPortEnteredPassthruEvent struct {\n\tDvsEvent\n\n\tPortKey     string        `xml:\"portKey\"`\n\tRuntimeInfo *DVPortStatus `xml:\"runtimeInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsPortEnteredPassthruEvent\"] = reflect.TypeOf((*DvsPortEnteredPassthruEvent)(nil)).Elem()\n}\n\ntype DvsPortExitedPassthruEvent struct {\n\tDvsEvent\n\n\tPortKey     string        `xml:\"portKey\"`\n\tRuntimeInfo *DVPortStatus `xml:\"runtimeInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsPortExitedPassthruEvent\"] = reflect.TypeOf((*DvsPortExitedPassthruEvent)(nil)).Elem()\n}\n\ntype DvsPortJoinPortgroupEvent struct {\n\tDvsEvent\n\n\tPortKey       string `xml:\"portKey\"`\n\tPortgroupKey  string `xml:\"portgroupKey\"`\n\tPortgroupName string `xml:\"portgroupName\"`\n}\n\nfunc init() {\n\tt[\"DvsPortJoinPortgroupEvent\"] = reflect.TypeOf((*DvsPortJoinPortgroupEvent)(nil)).Elem()\n}\n\ntype DvsPortLeavePortgroupEvent struct {\n\tDvsEvent\n\n\tPortKey       string `xml:\"portKey\"`\n\tPortgroupKey  string `xml:\"portgroupKey\"`\n\tPortgroupName string `xml:\"portgroupName\"`\n}\n\nfunc init() {\n\tt[\"DvsPortLeavePortgroupEvent\"] = reflect.TypeOf((*DvsPortLeavePortgroupEvent)(nil)).Elem()\n}\n\ntype DvsPortLinkDownEvent struct {\n\tDvsEvent\n\n\tPortKey     string        `xml:\"portKey\"`\n\tRuntimeInfo *DVPortStatus `xml:\"runtimeInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsPortLinkDownEvent\"] = reflect.TypeOf((*DvsPortLinkDownEvent)(nil)).Elem()\n}\n\ntype DvsPortLinkUpEvent struct {\n\tDvsEvent\n\n\tPortKey     string        `xml:\"portKey\"`\n\tRuntimeInfo *DVPortStatus `xml:\"runtimeInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsPortLinkUpEvent\"] = reflect.TypeOf((*DvsPortLinkUpEvent)(nil)).Elem()\n}\n\ntype DvsPortReconfiguredEvent struct {\n\tDvsEvent\n\n\tPortKey       []string                   `xml:\"portKey\"`\n\tConfigChanges []ChangesInfoEventArgument `xml:\"configChanges,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsPortReconfiguredEvent\"] = reflect.TypeOf((*DvsPortReconfiguredEvent)(nil)).Elem()\n}\n\ntype DvsPortRuntimeChangeEvent struct {\n\tDvsEvent\n\n\tPortKey     string       `xml:\"portKey\"`\n\tRuntimeInfo DVPortStatus `xml:\"runtimeInfo\"`\n}\n\nfunc init() {\n\tt[\"DvsPortRuntimeChangeEvent\"] = reflect.TypeOf((*DvsPortRuntimeChangeEvent)(nil)).Elem()\n}\n\ntype DvsPortUnblockedEvent struct {\n\tDvsEvent\n\n\tPortKey        string        `xml:\"portKey\"`\n\tRuntimeInfo    *DVPortStatus `xml:\"runtimeInfo,omitempty\"`\n\tPrevBlockState string        `xml:\"prevBlockState,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsPortUnblockedEvent\"] = reflect.TypeOf((*DvsPortUnblockedEvent)(nil)).Elem()\n}\n\ntype DvsPortVendorSpecificStateChangeEvent struct {\n\tDvsEvent\n\n\tPortKey string `xml:\"portKey\"`\n}\n\nfunc init() {\n\tt[\"DvsPortVendorSpecificStateChangeEvent\"] = reflect.TypeOf((*DvsPortVendorSpecificStateChangeEvent)(nil)).Elem()\n}\n\ntype DvsProfile struct {\n\tApplyProfile\n\n\tKey    string              `xml:\"key\"`\n\tName   string              `xml:\"name\"`\n\tUplink []PnicUplinkProfile `xml:\"uplink,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsProfile\"] = reflect.TypeOf((*DvsProfile)(nil)).Elem()\n}\n\ntype DvsPuntNetworkRuleAction struct {\n\tDvsNetworkRuleAction\n}\n\nfunc init() {\n\tt[\"DvsPuntNetworkRuleAction\"] = reflect.TypeOf((*DvsPuntNetworkRuleAction)(nil)).Elem()\n}\n\ntype DvsRateLimitNetworkRuleAction struct {\n\tDvsNetworkRuleAction\n\n\tPacketsPerSecond int32 `xml:\"packetsPerSecond\"`\n}\n\nfunc init() {\n\tt[\"DvsRateLimitNetworkRuleAction\"] = reflect.TypeOf((*DvsRateLimitNetworkRuleAction)(nil)).Elem()\n}\n\ntype DvsReconfigureVmVnicNetworkResourcePoolRequestType struct {\n\tThis       ManagedObjectReference            `xml:\"_this\"`\n\tConfigSpec []DvsVmVnicResourcePoolConfigSpec `xml:\"configSpec\"`\n}\n\nfunc init() {\n\tt[\"DvsReconfigureVmVnicNetworkResourcePoolRequestType\"] = reflect.TypeOf((*DvsReconfigureVmVnicNetworkResourcePoolRequestType)(nil)).Elem()\n}\n\ntype DvsReconfigureVmVnicNetworkResourcePool_Task DvsReconfigureVmVnicNetworkResourcePoolRequestType\n\nfunc init() {\n\tt[\"DvsReconfigureVmVnicNetworkResourcePool_Task\"] = reflect.TypeOf((*DvsReconfigureVmVnicNetworkResourcePool_Task)(nil)).Elem()\n}\n\ntype DvsReconfigureVmVnicNetworkResourcePool_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype DvsReconfiguredEvent struct {\n\tDvsEvent\n\n\tConfigSpec    BaseDVSConfigSpec         `xml:\"configSpec,typeattr\"`\n\tConfigChanges *ChangesInfoEventArgument `xml:\"configChanges,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsReconfiguredEvent\"] = reflect.TypeOf((*DvsReconfiguredEvent)(nil)).Elem()\n}\n\ntype DvsRenamedEvent struct {\n\tDvsEvent\n\n\tOldName string `xml:\"oldName\"`\n\tNewName string `xml:\"newName\"`\n}\n\nfunc init() {\n\tt[\"DvsRenamedEvent\"] = reflect.TypeOf((*DvsRenamedEvent)(nil)).Elem()\n}\n\ntype DvsResourceRuntimeInfo struct {\n\tDynamicData\n\n\tCapacity                         int32                                     `xml:\"capacity,omitempty\"`\n\tUsage                            int32                                     `xml:\"usage,omitempty\"`\n\tAvailable                        int32                                     `xml:\"available,omitempty\"`\n\tAllocatedResource                []DvsVnicAllocatedResource                `xml:\"allocatedResource,omitempty\"`\n\tVmVnicNetworkResourcePoolRuntime []DvsVmVnicNetworkResourcePoolRuntimeInfo `xml:\"vmVnicNetworkResourcePoolRuntime,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsResourceRuntimeInfo\"] = reflect.TypeOf((*DvsResourceRuntimeInfo)(nil)).Elem()\n}\n\ntype DvsRestoreEvent struct {\n\tDvsEvent\n}\n\nfunc init() {\n\tt[\"DvsRestoreEvent\"] = reflect.TypeOf((*DvsRestoreEvent)(nil)).Elem()\n}\n\ntype DvsScopeViolated struct {\n\tDvsFault\n\n\tScope  []ManagedObjectReference `xml:\"scope\"`\n\tEntity ManagedObjectReference   `xml:\"entity\"`\n}\n\nfunc init() {\n\tt[\"DvsScopeViolated\"] = reflect.TypeOf((*DvsScopeViolated)(nil)).Elem()\n}\n\ntype DvsScopeViolatedFault DvsScopeViolated\n\nfunc init() {\n\tt[\"DvsScopeViolatedFault\"] = reflect.TypeOf((*DvsScopeViolatedFault)(nil)).Elem()\n}\n\ntype DvsServiceConsoleVNicProfile struct {\n\tDvsVNicProfile\n}\n\nfunc init() {\n\tt[\"DvsServiceConsoleVNicProfile\"] = reflect.TypeOf((*DvsServiceConsoleVNicProfile)(nil)).Elem()\n}\n\ntype DvsSingleIpPort struct {\n\tDvsIpPort\n\n\tPortNumber int32 `xml:\"portNumber\"`\n}\n\nfunc init() {\n\tt[\"DvsSingleIpPort\"] = reflect.TypeOf((*DvsSingleIpPort)(nil)).Elem()\n}\n\ntype DvsSystemTrafficNetworkRuleQualifier struct {\n\tDvsNetworkRuleQualifier\n\n\tTypeOfSystemTraffic *StringExpression `xml:\"typeOfSystemTraffic,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsSystemTrafficNetworkRuleQualifier\"] = reflect.TypeOf((*DvsSystemTrafficNetworkRuleQualifier)(nil)).Elem()\n}\n\ntype DvsTrafficFilterConfig struct {\n\tDvsFilterConfig\n\n\tTrafficRuleset *DvsTrafficRuleset `xml:\"trafficRuleset,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsTrafficFilterConfig\"] = reflect.TypeOf((*DvsTrafficFilterConfig)(nil)).Elem()\n}\n\ntype DvsTrafficFilterConfigSpec struct {\n\tDvsTrafficFilterConfig\n\n\tOperation string `xml:\"operation\"`\n}\n\nfunc init() {\n\tt[\"DvsTrafficFilterConfigSpec\"] = reflect.TypeOf((*DvsTrafficFilterConfigSpec)(nil)).Elem()\n}\n\ntype DvsTrafficRule struct {\n\tDynamicData\n\n\tKey         string                        `xml:\"key,omitempty\"`\n\tDescription string                        `xml:\"description,omitempty\"`\n\tSequence    int32                         `xml:\"sequence,omitempty\"`\n\tQualifier   []BaseDvsNetworkRuleQualifier `xml:\"qualifier,omitempty,typeattr\"`\n\tAction      BaseDvsNetworkRuleAction      `xml:\"action,omitempty,typeattr\"`\n\tDirection   string                        `xml:\"direction,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsTrafficRule\"] = reflect.TypeOf((*DvsTrafficRule)(nil)).Elem()\n}\n\ntype DvsTrafficRuleset struct {\n\tDynamicData\n\n\tKey        string           `xml:\"key,omitempty\"`\n\tEnabled    *bool            `xml:\"enabled\"`\n\tPrecedence int32            `xml:\"precedence,omitempty\"`\n\tRules      []DvsTrafficRule `xml:\"rules,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsTrafficRuleset\"] = reflect.TypeOf((*DvsTrafficRuleset)(nil)).Elem()\n}\n\ntype DvsUpdateTagNetworkRuleAction struct {\n\tDvsNetworkRuleAction\n\n\tQosTag  int32 `xml:\"qosTag,omitempty\"`\n\tDscpTag int32 `xml:\"dscpTag,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsUpdateTagNetworkRuleAction\"] = reflect.TypeOf((*DvsUpdateTagNetworkRuleAction)(nil)).Elem()\n}\n\ntype DvsUpgradeAvailableEvent struct {\n\tDvsEvent\n\n\tProductInfo DistributedVirtualSwitchProductSpec `xml:\"productInfo\"`\n}\n\nfunc init() {\n\tt[\"DvsUpgradeAvailableEvent\"] = reflect.TypeOf((*DvsUpgradeAvailableEvent)(nil)).Elem()\n}\n\ntype DvsUpgradeInProgressEvent struct {\n\tDvsEvent\n\n\tProductInfo DistributedVirtualSwitchProductSpec `xml:\"productInfo\"`\n}\n\nfunc init() {\n\tt[\"DvsUpgradeInProgressEvent\"] = reflect.TypeOf((*DvsUpgradeInProgressEvent)(nil)).Elem()\n}\n\ntype DvsUpgradeRejectedEvent struct {\n\tDvsEvent\n\n\tProductInfo DistributedVirtualSwitchProductSpec `xml:\"productInfo\"`\n}\n\nfunc init() {\n\tt[\"DvsUpgradeRejectedEvent\"] = reflect.TypeOf((*DvsUpgradeRejectedEvent)(nil)).Elem()\n}\n\ntype DvsUpgradedEvent struct {\n\tDvsEvent\n\n\tProductInfo DistributedVirtualSwitchProductSpec `xml:\"productInfo\"`\n}\n\nfunc init() {\n\tt[\"DvsUpgradedEvent\"] = reflect.TypeOf((*DvsUpgradedEvent)(nil)).Elem()\n}\n\ntype DvsVNicProfile struct {\n\tApplyProfile\n\n\tKey      string           `xml:\"key\"`\n\tIpConfig IpAddressProfile `xml:\"ipConfig\"`\n}\n\nfunc init() {\n\tt[\"DvsVNicProfile\"] = reflect.TypeOf((*DvsVNicProfile)(nil)).Elem()\n}\n\ntype DvsVmVnicNetworkResourcePoolRuntimeInfo struct {\n\tDynamicData\n\n\tKey               string                     `xml:\"key\"`\n\tName              string                     `xml:\"name,omitempty\"`\n\tCapacity          int32                      `xml:\"capacity,omitempty\"`\n\tUsage             int32                      `xml:\"usage,omitempty\"`\n\tAvailable         int32                      `xml:\"available,omitempty\"`\n\tStatus            string                     `xml:\"status\"`\n\tAllocatedResource []DvsVnicAllocatedResource `xml:\"allocatedResource,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsVmVnicNetworkResourcePoolRuntimeInfo\"] = reflect.TypeOf((*DvsVmVnicNetworkResourcePoolRuntimeInfo)(nil)).Elem()\n}\n\ntype DvsVmVnicResourceAllocation struct {\n\tDynamicData\n\n\tReservationQuota int64 `xml:\"reservationQuota,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsVmVnicResourceAllocation\"] = reflect.TypeOf((*DvsVmVnicResourceAllocation)(nil)).Elem()\n}\n\ntype DvsVmVnicResourcePoolConfigSpec struct {\n\tDynamicData\n\n\tOperation      string                       `xml:\"operation\"`\n\tKey            string                       `xml:\"key,omitempty\"`\n\tConfigVersion  string                       `xml:\"configVersion,omitempty\"`\n\tAllocationInfo *DvsVmVnicResourceAllocation `xml:\"allocationInfo,omitempty\"`\n\tName           string                       `xml:\"name,omitempty\"`\n\tDescription    string                       `xml:\"description,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsVmVnicResourcePoolConfigSpec\"] = reflect.TypeOf((*DvsVmVnicResourcePoolConfigSpec)(nil)).Elem()\n}\n\ntype DvsVnicAllocatedResource struct {\n\tDynamicData\n\n\tVm          ManagedObjectReference `xml:\"vm\"`\n\tVnicKey     string                 `xml:\"vnicKey\"`\n\tReservation int64                  `xml:\"reservation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"DvsVnicAllocatedResource\"] = reflect.TypeOf((*DvsVnicAllocatedResource)(nil)).Elem()\n}\n\ntype DynamicArray struct {\n\tVal []AnyType `xml:\"val,typeattr\"`\n}\n\nfunc init() {\n\tt[\"DynamicArray\"] = reflect.TypeOf((*DynamicArray)(nil)).Elem()\n}\n\ntype DynamicData struct {\n}\n\nfunc init() {\n\tt[\"DynamicData\"] = reflect.TypeOf((*DynamicData)(nil)).Elem()\n}\n\ntype DynamicProperty struct {\n\tName string  `xml:\"name\"`\n\tVal  AnyType `xml:\"val,typeattr\"`\n}\n\nfunc init() {\n\tt[\"DynamicProperty\"] = reflect.TypeOf((*DynamicProperty)(nil)).Elem()\n}\n\ntype EVCAdmissionFailed struct {\n\tNotSupportedHostInCluster\n\n\tFaults []LocalizedMethodFault `xml:\"faults,omitempty\"`\n}\n\nfunc init() {\n\tt[\"EVCAdmissionFailed\"] = reflect.TypeOf((*EVCAdmissionFailed)(nil)).Elem()\n}\n\ntype EVCAdmissionFailedCPUFeaturesForMode struct {\n\tEVCAdmissionFailed\n\n\tCurrentEVCModeKey string `xml:\"currentEVCModeKey\"`\n}\n\nfunc init() {\n\tt[\"EVCAdmissionFailedCPUFeaturesForMode\"] = reflect.TypeOf((*EVCAdmissionFailedCPUFeaturesForMode)(nil)).Elem()\n}\n\ntype EVCAdmissionFailedCPUFeaturesForModeFault EVCAdmissionFailedCPUFeaturesForMode\n\nfunc init() {\n\tt[\"EVCAdmissionFailedCPUFeaturesForModeFault\"] = reflect.TypeOf((*EVCAdmissionFailedCPUFeaturesForModeFault)(nil)).Elem()\n}\n\ntype EVCAdmissionFailedCPUModel struct {\n\tEVCAdmissionFailed\n}\n\nfunc init() {\n\tt[\"EVCAdmissionFailedCPUModel\"] = reflect.TypeOf((*EVCAdmissionFailedCPUModel)(nil)).Elem()\n}\n\ntype EVCAdmissionFailedCPUModelFault EVCAdmissionFailedCPUModel\n\nfunc init() {\n\tt[\"EVCAdmissionFailedCPUModelFault\"] = reflect.TypeOf((*EVCAdmissionFailedCPUModelFault)(nil)).Elem()\n}\n\ntype EVCAdmissionFailedCPUModelForMode struct {\n\tEVCAdmissionFailed\n\n\tCurrentEVCModeKey string `xml:\"currentEVCModeKey\"`\n}\n\nfunc init() {\n\tt[\"EVCAdmissionFailedCPUModelForMode\"] = reflect.TypeOf((*EVCAdmissionFailedCPUModelForMode)(nil)).Elem()\n}\n\ntype EVCAdmissionFailedCPUModelForModeFault EVCAdmissionFailedCPUModelForMode\n\nfunc init() {\n\tt[\"EVCAdmissionFailedCPUModelForModeFault\"] = reflect.TypeOf((*EVCAdmissionFailedCPUModelForModeFault)(nil)).Elem()\n}\n\ntype EVCAdmissionFailedCPUVendor struct {\n\tEVCAdmissionFailed\n\n\tClusterCPUVendor string `xml:\"clusterCPUVendor\"`\n\tHostCPUVendor    string `xml:\"hostCPUVendor\"`\n}\n\nfunc init() {\n\tt[\"EVCAdmissionFailedCPUVendor\"] = reflect.TypeOf((*EVCAdmissionFailedCPUVendor)(nil)).Elem()\n}\n\ntype EVCAdmissionFailedCPUVendorFault EVCAdmissionFailedCPUVendor\n\nfunc init() {\n\tt[\"EVCAdmissionFailedCPUVendorFault\"] = reflect.TypeOf((*EVCAdmissionFailedCPUVendorFault)(nil)).Elem()\n}\n\ntype EVCAdmissionFailedCPUVendorUnknown struct {\n\tEVCAdmissionFailed\n}\n\nfunc init() {\n\tt[\"EVCAdmissionFailedCPUVendorUnknown\"] = reflect.TypeOf((*EVCAdmissionFailedCPUVendorUnknown)(nil)).Elem()\n}\n\ntype EVCAdmissionFailedCPUVendorUnknownFault EVCAdmissionFailedCPUVendorUnknown\n\nfunc init() {\n\tt[\"EVCAdmissionFailedCPUVendorUnknownFault\"] = reflect.TypeOf((*EVCAdmissionFailedCPUVendorUnknownFault)(nil)).Elem()\n}\n\ntype EVCAdmissionFailedFault BaseEVCAdmissionFailed\n\nfunc init() {\n\tt[\"EVCAdmissionFailedFault\"] = reflect.TypeOf((*EVCAdmissionFailedFault)(nil)).Elem()\n}\n\ntype EVCAdmissionFailedHostDisconnected struct {\n\tEVCAdmissionFailed\n}\n\nfunc init() {\n\tt[\"EVCAdmissionFailedHostDisconnected\"] = reflect.TypeOf((*EVCAdmissionFailedHostDisconnected)(nil)).Elem()\n}\n\ntype EVCAdmissionFailedHostDisconnectedFault EVCAdmissionFailedHostDisconnected\n\nfunc init() {\n\tt[\"EVCAdmissionFailedHostDisconnectedFault\"] = reflect.TypeOf((*EVCAdmissionFailedHostDisconnectedFault)(nil)).Elem()\n}\n\ntype EVCAdmissionFailedHostSoftware struct {\n\tEVCAdmissionFailed\n}\n\nfunc init() {\n\tt[\"EVCAdmissionFailedHostSoftware\"] = reflect.TypeOf((*EVCAdmissionFailedHostSoftware)(nil)).Elem()\n}\n\ntype EVCAdmissionFailedHostSoftwareFault EVCAdmissionFailedHostSoftware\n\nfunc init() {\n\tt[\"EVCAdmissionFailedHostSoftwareFault\"] = reflect.TypeOf((*EVCAdmissionFailedHostSoftwareFault)(nil)).Elem()\n}\n\ntype EVCAdmissionFailedHostSoftwareForMode struct {\n\tEVCAdmissionFailed\n}\n\nfunc init() {\n\tt[\"EVCAdmissionFailedHostSoftwareForMode\"] = reflect.TypeOf((*EVCAdmissionFailedHostSoftwareForMode)(nil)).Elem()\n}\n\ntype EVCAdmissionFailedHostSoftwareForModeFault EVCAdmissionFailedHostSoftwareForMode\n\nfunc init() {\n\tt[\"EVCAdmissionFailedHostSoftwareForModeFault\"] = reflect.TypeOf((*EVCAdmissionFailedHostSoftwareForModeFault)(nil)).Elem()\n}\n\ntype EVCAdmissionFailedVmActive struct {\n\tEVCAdmissionFailed\n}\n\nfunc init() {\n\tt[\"EVCAdmissionFailedVmActive\"] = reflect.TypeOf((*EVCAdmissionFailedVmActive)(nil)).Elem()\n}\n\ntype EVCAdmissionFailedVmActiveFault EVCAdmissionFailedVmActive\n\nfunc init() {\n\tt[\"EVCAdmissionFailedVmActiveFault\"] = reflect.TypeOf((*EVCAdmissionFailedVmActiveFault)(nil)).Elem()\n}\n\ntype EVCConfigFault struct {\n\tVimFault\n\n\tFaults []LocalizedMethodFault `xml:\"faults,omitempty\"`\n}\n\nfunc init() {\n\tt[\"EVCConfigFault\"] = reflect.TypeOf((*EVCConfigFault)(nil)).Elem()\n}\n\ntype EVCConfigFaultFault BaseEVCConfigFault\n\nfunc init() {\n\tt[\"EVCConfigFaultFault\"] = reflect.TypeOf((*EVCConfigFaultFault)(nil)).Elem()\n}\n\ntype EVCMode struct {\n\tElementDescription\n\n\tGuaranteedCPUFeatures []HostCpuIdInfo                    `xml:\"guaranteedCPUFeatures,omitempty\"`\n\tFeatureCapability     []HostFeatureCapability            `xml:\"featureCapability,omitempty\"`\n\tFeatureMask           []HostFeatureMask                  `xml:\"featureMask,omitempty\"`\n\tFeatureRequirement    []VirtualMachineFeatureRequirement `xml:\"featureRequirement,omitempty\"`\n\tVendor                string                             `xml:\"vendor\"`\n\tTrack                 []string                           `xml:\"track,omitempty\"`\n\tVendorTier            int32                              `xml:\"vendorTier\"`\n}\n\nfunc init() {\n\tt[\"EVCMode\"] = reflect.TypeOf((*EVCMode)(nil)).Elem()\n}\n\ntype EVCModeIllegalByVendor struct {\n\tEVCConfigFault\n\n\tClusterCPUVendor string `xml:\"clusterCPUVendor\"`\n\tModeCPUVendor    string `xml:\"modeCPUVendor\"`\n}\n\nfunc init() {\n\tt[\"EVCModeIllegalByVendor\"] = reflect.TypeOf((*EVCModeIllegalByVendor)(nil)).Elem()\n}\n\ntype EVCModeIllegalByVendorFault EVCModeIllegalByVendor\n\nfunc init() {\n\tt[\"EVCModeIllegalByVendorFault\"] = reflect.TypeOf((*EVCModeIllegalByVendorFault)(nil)).Elem()\n}\n\ntype EVCModeUnsupportedByHosts struct {\n\tEVCConfigFault\n\n\tEvcMode  string                   `xml:\"evcMode,omitempty\"`\n\tHost     []ManagedObjectReference `xml:\"host,omitempty\"`\n\tHostName []string                 `xml:\"hostName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"EVCModeUnsupportedByHosts\"] = reflect.TypeOf((*EVCModeUnsupportedByHosts)(nil)).Elem()\n}\n\ntype EVCModeUnsupportedByHostsFault EVCModeUnsupportedByHosts\n\nfunc init() {\n\tt[\"EVCModeUnsupportedByHostsFault\"] = reflect.TypeOf((*EVCModeUnsupportedByHostsFault)(nil)).Elem()\n}\n\ntype EVCUnsupportedByHostHardware struct {\n\tEVCConfigFault\n\n\tHost     []ManagedObjectReference `xml:\"host\"`\n\tHostName []string                 `xml:\"hostName\"`\n}\n\nfunc init() {\n\tt[\"EVCUnsupportedByHostHardware\"] = reflect.TypeOf((*EVCUnsupportedByHostHardware)(nil)).Elem()\n}\n\ntype EVCUnsupportedByHostHardwareFault EVCUnsupportedByHostHardware\n\nfunc init() {\n\tt[\"EVCUnsupportedByHostHardwareFault\"] = reflect.TypeOf((*EVCUnsupportedByHostHardwareFault)(nil)).Elem()\n}\n\ntype EVCUnsupportedByHostSoftware struct {\n\tEVCConfigFault\n\n\tHost     []ManagedObjectReference `xml:\"host\"`\n\tHostName []string                 `xml:\"hostName\"`\n}\n\nfunc init() {\n\tt[\"EVCUnsupportedByHostSoftware\"] = reflect.TypeOf((*EVCUnsupportedByHostSoftware)(nil)).Elem()\n}\n\ntype EVCUnsupportedByHostSoftwareFault EVCUnsupportedByHostSoftware\n\nfunc init() {\n\tt[\"EVCUnsupportedByHostSoftwareFault\"] = reflect.TypeOf((*EVCUnsupportedByHostSoftwareFault)(nil)).Elem()\n}\n\ntype EagerZeroVirtualDiskRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tName       string                  `xml:\"name\"`\n\tDatacenter *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"EagerZeroVirtualDiskRequestType\"] = reflect.TypeOf((*EagerZeroVirtualDiskRequestType)(nil)).Elem()\n}\n\ntype EagerZeroVirtualDisk_Task EagerZeroVirtualDiskRequestType\n\nfunc init() {\n\tt[\"EagerZeroVirtualDisk_Task\"] = reflect.TypeOf((*EagerZeroVirtualDisk_Task)(nil)).Elem()\n}\n\ntype EagerZeroVirtualDisk_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype EightHostLimitViolated struct {\n\tVmConfigFault\n}\n\nfunc init() {\n\tt[\"EightHostLimitViolated\"] = reflect.TypeOf((*EightHostLimitViolated)(nil)).Elem()\n}\n\ntype EightHostLimitViolatedFault EightHostLimitViolated\n\nfunc init() {\n\tt[\"EightHostLimitViolatedFault\"] = reflect.TypeOf((*EightHostLimitViolatedFault)(nil)).Elem()\n}\n\ntype ElementDescription struct {\n\tDescription\n\n\tKey string `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"ElementDescription\"] = reflect.TypeOf((*ElementDescription)(nil)).Elem()\n}\n\ntype EnableAlarmActions EnableAlarmActionsRequestType\n\nfunc init() {\n\tt[\"EnableAlarmActions\"] = reflect.TypeOf((*EnableAlarmActions)(nil)).Elem()\n}\n\ntype EnableAlarmActionsRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tEntity  ManagedObjectReference `xml:\"entity\"`\n\tEnabled bool                   `xml:\"enabled\"`\n}\n\nfunc init() {\n\tt[\"EnableAlarmActionsRequestType\"] = reflect.TypeOf((*EnableAlarmActionsRequestType)(nil)).Elem()\n}\n\ntype EnableAlarmActionsResponse struct {\n}\n\ntype EnableCrypto EnableCryptoRequestType\n\nfunc init() {\n\tt[\"EnableCrypto\"] = reflect.TypeOf((*EnableCrypto)(nil)).Elem()\n}\n\ntype EnableCryptoRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tKeyPlain CryptoKeyPlain         `xml:\"keyPlain\"`\n}\n\nfunc init() {\n\tt[\"EnableCryptoRequestType\"] = reflect.TypeOf((*EnableCryptoRequestType)(nil)).Elem()\n}\n\ntype EnableCryptoResponse struct {\n}\n\ntype EnableFeature EnableFeatureRequestType\n\nfunc init() {\n\tt[\"EnableFeature\"] = reflect.TypeOf((*EnableFeature)(nil)).Elem()\n}\n\ntype EnableFeatureRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tHost       *ManagedObjectReference `xml:\"host,omitempty\"`\n\tFeatureKey string                  `xml:\"featureKey\"`\n}\n\nfunc init() {\n\tt[\"EnableFeatureRequestType\"] = reflect.TypeOf((*EnableFeatureRequestType)(nil)).Elem()\n}\n\ntype EnableFeatureResponse struct {\n\tReturnval bool `xml:\"returnval\"`\n}\n\ntype EnableHyperThreading EnableHyperThreadingRequestType\n\nfunc init() {\n\tt[\"EnableHyperThreading\"] = reflect.TypeOf((*EnableHyperThreading)(nil)).Elem()\n}\n\ntype EnableHyperThreadingRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"EnableHyperThreadingRequestType\"] = reflect.TypeOf((*EnableHyperThreadingRequestType)(nil)).Elem()\n}\n\ntype EnableHyperThreadingResponse struct {\n}\n\ntype EnableMultipathPath EnableMultipathPathRequestType\n\nfunc init() {\n\tt[\"EnableMultipathPath\"] = reflect.TypeOf((*EnableMultipathPath)(nil)).Elem()\n}\n\ntype EnableMultipathPathRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tPathName string                 `xml:\"pathName\"`\n}\n\nfunc init() {\n\tt[\"EnableMultipathPathRequestType\"] = reflect.TypeOf((*EnableMultipathPathRequestType)(nil)).Elem()\n}\n\ntype EnableMultipathPathResponse struct {\n}\n\ntype EnableNetworkResourceManagement EnableNetworkResourceManagementRequestType\n\nfunc init() {\n\tt[\"EnableNetworkResourceManagement\"] = reflect.TypeOf((*EnableNetworkResourceManagement)(nil)).Elem()\n}\n\ntype EnableNetworkResourceManagementRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tEnable bool                   `xml:\"enable\"`\n}\n\nfunc init() {\n\tt[\"EnableNetworkResourceManagementRequestType\"] = reflect.TypeOf((*EnableNetworkResourceManagementRequestType)(nil)).Elem()\n}\n\ntype EnableNetworkResourceManagementResponse struct {\n}\n\ntype EnableRuleset EnableRulesetRequestType\n\nfunc init() {\n\tt[\"EnableRuleset\"] = reflect.TypeOf((*EnableRuleset)(nil)).Elem()\n}\n\ntype EnableRulesetRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tId   string                 `xml:\"id\"`\n}\n\nfunc init() {\n\tt[\"EnableRulesetRequestType\"] = reflect.TypeOf((*EnableRulesetRequestType)(nil)).Elem()\n}\n\ntype EnableRulesetResponse struct {\n}\n\ntype EnableSecondaryVMRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tVm   ManagedObjectReference  `xml:\"vm\"`\n\tHost *ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"EnableSecondaryVMRequestType\"] = reflect.TypeOf((*EnableSecondaryVMRequestType)(nil)).Elem()\n}\n\ntype EnableSecondaryVM_Task EnableSecondaryVMRequestType\n\nfunc init() {\n\tt[\"EnableSecondaryVM_Task\"] = reflect.TypeOf((*EnableSecondaryVM_Task)(nil)).Elem()\n}\n\ntype EnableSecondaryVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype EnableSmartCardAuthentication EnableSmartCardAuthenticationRequestType\n\nfunc init() {\n\tt[\"EnableSmartCardAuthentication\"] = reflect.TypeOf((*EnableSmartCardAuthentication)(nil)).Elem()\n}\n\ntype EnableSmartCardAuthenticationRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"EnableSmartCardAuthenticationRequestType\"] = reflect.TypeOf((*EnableSmartCardAuthenticationRequestType)(nil)).Elem()\n}\n\ntype EnableSmartCardAuthenticationResponse struct {\n}\n\ntype EnterLockdownMode EnterLockdownModeRequestType\n\nfunc init() {\n\tt[\"EnterLockdownMode\"] = reflect.TypeOf((*EnterLockdownMode)(nil)).Elem()\n}\n\ntype EnterLockdownModeRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"EnterLockdownModeRequestType\"] = reflect.TypeOf((*EnterLockdownModeRequestType)(nil)).Elem()\n}\n\ntype EnterLockdownModeResponse struct {\n}\n\ntype EnterMaintenanceModeRequestType struct {\n\tThis                  ManagedObjectReference `xml:\"_this\"`\n\tTimeout               int32                  `xml:\"timeout\"`\n\tEvacuatePoweredOffVms *bool                  `xml:\"evacuatePoweredOffVms\"`\n\tMaintenanceSpec       *HostMaintenanceSpec   `xml:\"maintenanceSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"EnterMaintenanceModeRequestType\"] = reflect.TypeOf((*EnterMaintenanceModeRequestType)(nil)).Elem()\n}\n\ntype EnterMaintenanceMode_Task EnterMaintenanceModeRequestType\n\nfunc init() {\n\tt[\"EnterMaintenanceMode_Task\"] = reflect.TypeOf((*EnterMaintenanceMode_Task)(nil)).Elem()\n}\n\ntype EnterMaintenanceMode_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype EnteredMaintenanceModeEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"EnteredMaintenanceModeEvent\"] = reflect.TypeOf((*EnteredMaintenanceModeEvent)(nil)).Elem()\n}\n\ntype EnteredStandbyModeEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"EnteredStandbyModeEvent\"] = reflect.TypeOf((*EnteredStandbyModeEvent)(nil)).Elem()\n}\n\ntype EnteringMaintenanceModeEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"EnteringMaintenanceModeEvent\"] = reflect.TypeOf((*EnteringMaintenanceModeEvent)(nil)).Elem()\n}\n\ntype EnteringStandbyModeEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"EnteringStandbyModeEvent\"] = reflect.TypeOf((*EnteringStandbyModeEvent)(nil)).Elem()\n}\n\ntype EntityBackup struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"EntityBackup\"] = reflect.TypeOf((*EntityBackup)(nil)).Elem()\n}\n\ntype EntityBackupConfig struct {\n\tDynamicData\n\n\tEntityType    string                  `xml:\"entityType\"`\n\tConfigBlob    []byte                  `xml:\"configBlob\"`\n\tKey           string                  `xml:\"key,omitempty\"`\n\tName          string                  `xml:\"name,omitempty\"`\n\tContainer     *ManagedObjectReference `xml:\"container,omitempty\"`\n\tConfigVersion string                  `xml:\"configVersion,omitempty\"`\n}\n\nfunc init() {\n\tt[\"EntityBackupConfig\"] = reflect.TypeOf((*EntityBackupConfig)(nil)).Elem()\n}\n\ntype EntityEventArgument struct {\n\tEventArgument\n\n\tName string `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"EntityEventArgument\"] = reflect.TypeOf((*EntityEventArgument)(nil)).Elem()\n}\n\ntype EntityPrivilege struct {\n\tDynamicData\n\n\tEntity           ManagedObjectReference  `xml:\"entity\"`\n\tPrivAvailability []PrivilegeAvailability `xml:\"privAvailability\"`\n}\n\nfunc init() {\n\tt[\"EntityPrivilege\"] = reflect.TypeOf((*EntityPrivilege)(nil)).Elem()\n}\n\ntype EnumDescription struct {\n\tDynamicData\n\n\tKey  string                   `xml:\"key\"`\n\tTags []BaseElementDescription `xml:\"tags,typeattr\"`\n}\n\nfunc init() {\n\tt[\"EnumDescription\"] = reflect.TypeOf((*EnumDescription)(nil)).Elem()\n}\n\ntype EnvironmentBrowserConfigOptionQuerySpec struct {\n\tDynamicData\n\n\tKey     string                  `xml:\"key,omitempty\"`\n\tHost    *ManagedObjectReference `xml:\"host,omitempty\"`\n\tGuestId []string                `xml:\"guestId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"EnvironmentBrowserConfigOptionQuerySpec\"] = reflect.TypeOf((*EnvironmentBrowserConfigOptionQuerySpec)(nil)).Elem()\n}\n\ntype ErrorUpgradeEvent struct {\n\tUpgradeEvent\n}\n\nfunc init() {\n\tt[\"ErrorUpgradeEvent\"] = reflect.TypeOf((*ErrorUpgradeEvent)(nil)).Elem()\n}\n\ntype EstimateDatabaseSize EstimateDatabaseSizeRequestType\n\nfunc init() {\n\tt[\"EstimateDatabaseSize\"] = reflect.TypeOf((*EstimateDatabaseSize)(nil)).Elem()\n}\n\ntype EstimateDatabaseSizeRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tDbSizeParam DatabaseSizeParam      `xml:\"dbSizeParam\"`\n}\n\nfunc init() {\n\tt[\"EstimateDatabaseSizeRequestType\"] = reflect.TypeOf((*EstimateDatabaseSizeRequestType)(nil)).Elem()\n}\n\ntype EstimateDatabaseSizeResponse struct {\n\tReturnval DatabaseSizeEstimate `xml:\"returnval\"`\n}\n\ntype EstimateStorageForConsolidateSnapshotsRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"EstimateStorageForConsolidateSnapshotsRequestType\"] = reflect.TypeOf((*EstimateStorageForConsolidateSnapshotsRequestType)(nil)).Elem()\n}\n\ntype EstimateStorageForConsolidateSnapshots_Task EstimateStorageForConsolidateSnapshotsRequestType\n\nfunc init() {\n\tt[\"EstimateStorageForConsolidateSnapshots_Task\"] = reflect.TypeOf((*EstimateStorageForConsolidateSnapshots_Task)(nil)).Elem()\n}\n\ntype EstimateStorageForConsolidateSnapshots_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype EsxAgentHostManagerUpdateConfig EsxAgentHostManagerUpdateConfigRequestType\n\nfunc init() {\n\tt[\"EsxAgentHostManagerUpdateConfig\"] = reflect.TypeOf((*EsxAgentHostManagerUpdateConfig)(nil)).Elem()\n}\n\ntype EsxAgentHostManagerUpdateConfigRequestType struct {\n\tThis       ManagedObjectReference            `xml:\"_this\"`\n\tConfigInfo HostEsxAgentHostManagerConfigInfo `xml:\"configInfo\"`\n}\n\nfunc init() {\n\tt[\"EsxAgentHostManagerUpdateConfigRequestType\"] = reflect.TypeOf((*EsxAgentHostManagerUpdateConfigRequestType)(nil)).Elem()\n}\n\ntype EsxAgentHostManagerUpdateConfigResponse struct {\n}\n\ntype EvacuateVsanNodeRequestType struct {\n\tThis            ManagedObjectReference `xml:\"_this\"`\n\tMaintenanceSpec HostMaintenanceSpec    `xml:\"maintenanceSpec\"`\n\tTimeout         int32                  `xml:\"timeout\"`\n}\n\nfunc init() {\n\tt[\"EvacuateVsanNodeRequestType\"] = reflect.TypeOf((*EvacuateVsanNodeRequestType)(nil)).Elem()\n}\n\ntype EvacuateVsanNode_Task EvacuateVsanNodeRequestType\n\nfunc init() {\n\tt[\"EvacuateVsanNode_Task\"] = reflect.TypeOf((*EvacuateVsanNode_Task)(nil)).Elem()\n}\n\ntype EvacuateVsanNode_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype EvaluationLicenseSource struct {\n\tLicenseSource\n\n\tRemainingHours int64 `xml:\"remainingHours,omitempty\"`\n}\n\nfunc init() {\n\tt[\"EvaluationLicenseSource\"] = reflect.TypeOf((*EvaluationLicenseSource)(nil)).Elem()\n}\n\ntype EvcManager EvcManagerRequestType\n\nfunc init() {\n\tt[\"EvcManager\"] = reflect.TypeOf((*EvcManager)(nil)).Elem()\n}\n\ntype EvcManagerRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"EvcManagerRequestType\"] = reflect.TypeOf((*EvcManagerRequestType)(nil)).Elem()\n}\n\ntype EvcManagerResponse struct {\n\tReturnval *ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype Event struct {\n\tDynamicData\n\n\tKey                  int32                         `xml:\"key\"`\n\tChainId              int32                         `xml:\"chainId\"`\n\tCreatedTime          time.Time                     `xml:\"createdTime\"`\n\tUserName             string                        `xml:\"userName\"`\n\tDatacenter           *DatacenterEventArgument      `xml:\"datacenter,omitempty\"`\n\tComputeResource      *ComputeResourceEventArgument `xml:\"computeResource,omitempty\"`\n\tHost                 *HostEventArgument            `xml:\"host,omitempty\"`\n\tVm                   *VmEventArgument              `xml:\"vm,omitempty\"`\n\tDs                   *DatastoreEventArgument       `xml:\"ds,omitempty\"`\n\tNet                  *NetworkEventArgument         `xml:\"net,omitempty\"`\n\tDvs                  *DvsEventArgument             `xml:\"dvs,omitempty\"`\n\tFullFormattedMessage string                        `xml:\"fullFormattedMessage,omitempty\"`\n\tChangeTag            string                        `xml:\"changeTag,omitempty\"`\n}\n\nfunc init() {\n\tt[\"Event\"] = reflect.TypeOf((*Event)(nil)).Elem()\n}\n\ntype EventAlarmExpression struct {\n\tAlarmExpression\n\n\tComparisons []EventAlarmExpressionComparison `xml:\"comparisons,omitempty\"`\n\tEventType   string                           `xml:\"eventType\"`\n\tEventTypeId string                           `xml:\"eventTypeId,omitempty\"`\n\tObjectType  string                           `xml:\"objectType,omitempty\"`\n\tStatus      ManagedEntityStatus              `xml:\"status,omitempty\"`\n}\n\nfunc init() {\n\tt[\"EventAlarmExpression\"] = reflect.TypeOf((*EventAlarmExpression)(nil)).Elem()\n}\n\ntype EventAlarmExpressionComparison struct {\n\tDynamicData\n\n\tAttributeName string `xml:\"attributeName\"`\n\tOperator      string `xml:\"operator\"`\n\tValue         string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"EventAlarmExpressionComparison\"] = reflect.TypeOf((*EventAlarmExpressionComparison)(nil)).Elem()\n}\n\ntype EventArgDesc struct {\n\tDynamicData\n\n\tName        string                 `xml:\"name\"`\n\tType        string                 `xml:\"type\"`\n\tDescription BaseElementDescription `xml:\"description,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"EventArgDesc\"] = reflect.TypeOf((*EventArgDesc)(nil)).Elem()\n}\n\ntype EventArgument struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"EventArgument\"] = reflect.TypeOf((*EventArgument)(nil)).Elem()\n}\n\ntype EventDescription struct {\n\tDynamicData\n\n\tCategory        []BaseElementDescription      `xml:\"category,typeattr\"`\n\tEventInfo       []EventDescriptionEventDetail `xml:\"eventInfo\"`\n\tEnumeratedTypes []EnumDescription             `xml:\"enumeratedTypes,omitempty\"`\n}\n\nfunc init() {\n\tt[\"EventDescription\"] = reflect.TypeOf((*EventDescription)(nil)).Elem()\n}\n\ntype EventDescriptionEventDetail struct {\n\tDynamicData\n\n\tKey                     string `xml:\"key\"`\n\tDescription             string `xml:\"description,omitempty\"`\n\tCategory                string `xml:\"category\"`\n\tFormatOnDatacenter      string `xml:\"formatOnDatacenter\"`\n\tFormatOnComputeResource string `xml:\"formatOnComputeResource\"`\n\tFormatOnHost            string `xml:\"formatOnHost\"`\n\tFormatOnVm              string `xml:\"formatOnVm\"`\n\tFullFormat              string `xml:\"fullFormat\"`\n\tLongDescription         string `xml:\"longDescription,omitempty\"`\n}\n\nfunc init() {\n\tt[\"EventDescriptionEventDetail\"] = reflect.TypeOf((*EventDescriptionEventDetail)(nil)).Elem()\n}\n\ntype EventEx struct {\n\tEvent\n\n\tEventTypeId string                `xml:\"eventTypeId\"`\n\tSeverity    string                `xml:\"severity,omitempty\"`\n\tMessage     string                `xml:\"message,omitempty\"`\n\tArguments   []KeyAnyValue         `xml:\"arguments,omitempty\"`\n\tObjectId    string                `xml:\"objectId,omitempty\"`\n\tObjectType  string                `xml:\"objectType,omitempty\"`\n\tObjectName  string                `xml:\"objectName,omitempty\"`\n\tFault       *LocalizedMethodFault `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"EventEx\"] = reflect.TypeOf((*EventEx)(nil)).Elem()\n}\n\ntype EventFilterSpec struct {\n\tDynamicData\n\n\tEntity             *EventFilterSpecByEntity   `xml:\"entity,omitempty\"`\n\tTime               *EventFilterSpecByTime     `xml:\"time,omitempty\"`\n\tUserName           *EventFilterSpecByUsername `xml:\"userName,omitempty\"`\n\tEventChainId       int32                      `xml:\"eventChainId,omitempty\"`\n\tAlarm              *ManagedObjectReference    `xml:\"alarm,omitempty\"`\n\tScheduledTask      *ManagedObjectReference    `xml:\"scheduledTask,omitempty\"`\n\tDisableFullMessage *bool                      `xml:\"disableFullMessage\"`\n\tCategory           []string                   `xml:\"category,omitempty\"`\n\tType               []string                   `xml:\"type,omitempty\"`\n\tTag                []string                   `xml:\"tag,omitempty\"`\n\tEventTypeId        []string                   `xml:\"eventTypeId,omitempty\"`\n\tMaxCount           int32                      `xml:\"maxCount,omitempty\"`\n}\n\nfunc init() {\n\tt[\"EventFilterSpec\"] = reflect.TypeOf((*EventFilterSpec)(nil)).Elem()\n}\n\ntype EventFilterSpecByEntity struct {\n\tDynamicData\n\n\tEntity    ManagedObjectReference         `xml:\"entity\"`\n\tRecursion EventFilterSpecRecursionOption `xml:\"recursion\"`\n}\n\nfunc init() {\n\tt[\"EventFilterSpecByEntity\"] = reflect.TypeOf((*EventFilterSpecByEntity)(nil)).Elem()\n}\n\ntype EventFilterSpecByTime struct {\n\tDynamicData\n\n\tBeginTime *time.Time `xml:\"beginTime\"`\n\tEndTime   *time.Time `xml:\"endTime\"`\n}\n\nfunc init() {\n\tt[\"EventFilterSpecByTime\"] = reflect.TypeOf((*EventFilterSpecByTime)(nil)).Elem()\n}\n\ntype EventFilterSpecByUsername struct {\n\tDynamicData\n\n\tSystemUser bool     `xml:\"systemUser\"`\n\tUserList   []string `xml:\"userList,omitempty\"`\n}\n\nfunc init() {\n\tt[\"EventFilterSpecByUsername\"] = reflect.TypeOf((*EventFilterSpecByUsername)(nil)).Elem()\n}\n\ntype ExecuteHostProfile ExecuteHostProfileRequestType\n\nfunc init() {\n\tt[\"ExecuteHostProfile\"] = reflect.TypeOf((*ExecuteHostProfile)(nil)).Elem()\n}\n\ntype ExecuteHostProfileRequestType struct {\n\tThis          ManagedObjectReference                 `xml:\"_this\"`\n\tHost          ManagedObjectReference                 `xml:\"host\"`\n\tDeferredParam []ProfileDeferredPolicyOptionParameter `xml:\"deferredParam,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ExecuteHostProfileRequestType\"] = reflect.TypeOf((*ExecuteHostProfileRequestType)(nil)).Elem()\n}\n\ntype ExecuteHostProfileResponse struct {\n\tReturnval BaseProfileExecuteResult `xml:\"returnval,typeattr\"`\n}\n\ntype ExecuteSimpleCommand ExecuteSimpleCommandRequestType\n\nfunc init() {\n\tt[\"ExecuteSimpleCommand\"] = reflect.TypeOf((*ExecuteSimpleCommand)(nil)).Elem()\n}\n\ntype ExecuteSimpleCommandRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tArguments []string               `xml:\"arguments,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ExecuteSimpleCommandRequestType\"] = reflect.TypeOf((*ExecuteSimpleCommandRequestType)(nil)).Elem()\n}\n\ntype ExecuteSimpleCommandResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype ExitLockdownMode ExitLockdownModeRequestType\n\nfunc init() {\n\tt[\"ExitLockdownMode\"] = reflect.TypeOf((*ExitLockdownMode)(nil)).Elem()\n}\n\ntype ExitLockdownModeRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ExitLockdownModeRequestType\"] = reflect.TypeOf((*ExitLockdownModeRequestType)(nil)).Elem()\n}\n\ntype ExitLockdownModeResponse struct {\n}\n\ntype ExitMaintenanceModeEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"ExitMaintenanceModeEvent\"] = reflect.TypeOf((*ExitMaintenanceModeEvent)(nil)).Elem()\n}\n\ntype ExitMaintenanceModeRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tTimeout int32                  `xml:\"timeout\"`\n}\n\nfunc init() {\n\tt[\"ExitMaintenanceModeRequestType\"] = reflect.TypeOf((*ExitMaintenanceModeRequestType)(nil)).Elem()\n}\n\ntype ExitMaintenanceMode_Task ExitMaintenanceModeRequestType\n\nfunc init() {\n\tt[\"ExitMaintenanceMode_Task\"] = reflect.TypeOf((*ExitMaintenanceMode_Task)(nil)).Elem()\n}\n\ntype ExitMaintenanceMode_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ExitStandbyModeFailedEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"ExitStandbyModeFailedEvent\"] = reflect.TypeOf((*ExitStandbyModeFailedEvent)(nil)).Elem()\n}\n\ntype ExitedStandbyModeEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"ExitedStandbyModeEvent\"] = reflect.TypeOf((*ExitedStandbyModeEvent)(nil)).Elem()\n}\n\ntype ExitingStandbyModeEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"ExitingStandbyModeEvent\"] = reflect.TypeOf((*ExitingStandbyModeEvent)(nil)).Elem()\n}\n\ntype ExpandVmfsDatastore ExpandVmfsDatastoreRequestType\n\nfunc init() {\n\tt[\"ExpandVmfsDatastore\"] = reflect.TypeOf((*ExpandVmfsDatastore)(nil)).Elem()\n}\n\ntype ExpandVmfsDatastoreRequestType struct {\n\tThis      ManagedObjectReference  `xml:\"_this\"`\n\tDatastore ManagedObjectReference  `xml:\"datastore\"`\n\tSpec      VmfsDatastoreExpandSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"ExpandVmfsDatastoreRequestType\"] = reflect.TypeOf((*ExpandVmfsDatastoreRequestType)(nil)).Elem()\n}\n\ntype ExpandVmfsDatastoreResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ExpandVmfsExtent ExpandVmfsExtentRequestType\n\nfunc init() {\n\tt[\"ExpandVmfsExtent\"] = reflect.TypeOf((*ExpandVmfsExtent)(nil)).Elem()\n}\n\ntype ExpandVmfsExtentRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tVmfsPath string                 `xml:\"vmfsPath\"`\n\tExtent   HostScsiDiskPartition  `xml:\"extent\"`\n}\n\nfunc init() {\n\tt[\"ExpandVmfsExtentRequestType\"] = reflect.TypeOf((*ExpandVmfsExtentRequestType)(nil)).Elem()\n}\n\ntype ExpandVmfsExtentResponse struct {\n}\n\ntype ExpiredAddonLicense struct {\n\tExpiredFeatureLicense\n}\n\nfunc init() {\n\tt[\"ExpiredAddonLicense\"] = reflect.TypeOf((*ExpiredAddonLicense)(nil)).Elem()\n}\n\ntype ExpiredAddonLicenseFault ExpiredAddonLicense\n\nfunc init() {\n\tt[\"ExpiredAddonLicenseFault\"] = reflect.TypeOf((*ExpiredAddonLicenseFault)(nil)).Elem()\n}\n\ntype ExpiredEditionLicense struct {\n\tExpiredFeatureLicense\n}\n\nfunc init() {\n\tt[\"ExpiredEditionLicense\"] = reflect.TypeOf((*ExpiredEditionLicense)(nil)).Elem()\n}\n\ntype ExpiredEditionLicenseFault ExpiredEditionLicense\n\nfunc init() {\n\tt[\"ExpiredEditionLicenseFault\"] = reflect.TypeOf((*ExpiredEditionLicenseFault)(nil)).Elem()\n}\n\ntype ExpiredFeatureLicense struct {\n\tNotEnoughLicenses\n\n\tFeature        string    `xml:\"feature\"`\n\tCount          int32     `xml:\"count\"`\n\tExpirationDate time.Time `xml:\"expirationDate\"`\n}\n\nfunc init() {\n\tt[\"ExpiredFeatureLicense\"] = reflect.TypeOf((*ExpiredFeatureLicense)(nil)).Elem()\n}\n\ntype ExpiredFeatureLicenseFault BaseExpiredFeatureLicense\n\nfunc init() {\n\tt[\"ExpiredFeatureLicenseFault\"] = reflect.TypeOf((*ExpiredFeatureLicenseFault)(nil)).Elem()\n}\n\ntype ExportAnswerFileRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tHost ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"ExportAnswerFileRequestType\"] = reflect.TypeOf((*ExportAnswerFileRequestType)(nil)).Elem()\n}\n\ntype ExportAnswerFile_Task ExportAnswerFileRequestType\n\nfunc init() {\n\tt[\"ExportAnswerFile_Task\"] = reflect.TypeOf((*ExportAnswerFile_Task)(nil)).Elem()\n}\n\ntype ExportAnswerFile_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ExportProfile ExportProfileRequestType\n\nfunc init() {\n\tt[\"ExportProfile\"] = reflect.TypeOf((*ExportProfile)(nil)).Elem()\n}\n\ntype ExportProfileRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ExportProfileRequestType\"] = reflect.TypeOf((*ExportProfileRequestType)(nil)).Elem()\n}\n\ntype ExportProfileResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype ExportSnapshot ExportSnapshotRequestType\n\nfunc init() {\n\tt[\"ExportSnapshot\"] = reflect.TypeOf((*ExportSnapshot)(nil)).Elem()\n}\n\ntype ExportSnapshotRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ExportSnapshotRequestType\"] = reflect.TypeOf((*ExportSnapshotRequestType)(nil)).Elem()\n}\n\ntype ExportSnapshotResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ExportVApp ExportVAppRequestType\n\nfunc init() {\n\tt[\"ExportVApp\"] = reflect.TypeOf((*ExportVApp)(nil)).Elem()\n}\n\ntype ExportVAppRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ExportVAppRequestType\"] = reflect.TypeOf((*ExportVAppRequestType)(nil)).Elem()\n}\n\ntype ExportVAppResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ExportVm ExportVmRequestType\n\nfunc init() {\n\tt[\"ExportVm\"] = reflect.TypeOf((*ExportVm)(nil)).Elem()\n}\n\ntype ExportVmRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ExportVmRequestType\"] = reflect.TypeOf((*ExportVmRequestType)(nil)).Elem()\n}\n\ntype ExportVmResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ExtExtendedProductInfo struct {\n\tDynamicData\n\n\tCompanyUrl    string                  `xml:\"companyUrl,omitempty\"`\n\tProductUrl    string                  `xml:\"productUrl,omitempty\"`\n\tManagementUrl string                  `xml:\"managementUrl,omitempty\"`\n\tSelf          *ManagedObjectReference `xml:\"self,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ExtExtendedProductInfo\"] = reflect.TypeOf((*ExtExtendedProductInfo)(nil)).Elem()\n}\n\ntype ExtManagedEntityInfo struct {\n\tDynamicData\n\n\tType         string `xml:\"type\"`\n\tSmallIconUrl string `xml:\"smallIconUrl,omitempty\"`\n\tIconUrl      string `xml:\"iconUrl,omitempty\"`\n\tDescription  string `xml:\"description,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ExtManagedEntityInfo\"] = reflect.TypeOf((*ExtManagedEntityInfo)(nil)).Elem()\n}\n\ntype ExtSolutionManagerInfo struct {\n\tDynamicData\n\n\tTab          []ExtSolutionManagerInfoTabInfo `xml:\"tab,omitempty\"`\n\tSmallIconUrl string                          `xml:\"smallIconUrl,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ExtSolutionManagerInfo\"] = reflect.TypeOf((*ExtSolutionManagerInfo)(nil)).Elem()\n}\n\ntype ExtSolutionManagerInfoTabInfo struct {\n\tDynamicData\n\n\tLabel string `xml:\"label\"`\n\tUrl   string `xml:\"url\"`\n}\n\nfunc init() {\n\tt[\"ExtSolutionManagerInfoTabInfo\"] = reflect.TypeOf((*ExtSolutionManagerInfoTabInfo)(nil)).Elem()\n}\n\ntype ExtendDiskRequestType struct {\n\tThis            ManagedObjectReference `xml:\"_this\"`\n\tId              ID                     `xml:\"id\"`\n\tDatastore       ManagedObjectReference `xml:\"datastore\"`\n\tNewCapacityInMB int64                  `xml:\"newCapacityInMB\"`\n}\n\nfunc init() {\n\tt[\"ExtendDiskRequestType\"] = reflect.TypeOf((*ExtendDiskRequestType)(nil)).Elem()\n}\n\ntype ExtendDisk_Task ExtendDiskRequestType\n\nfunc init() {\n\tt[\"ExtendDisk_Task\"] = reflect.TypeOf((*ExtendDisk_Task)(nil)).Elem()\n}\n\ntype ExtendDisk_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ExtendVffs ExtendVffsRequestType\n\nfunc init() {\n\tt[\"ExtendVffs\"] = reflect.TypeOf((*ExtendVffs)(nil)).Elem()\n}\n\ntype ExtendVffsRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tVffsPath   string                 `xml:\"vffsPath\"`\n\tDevicePath string                 `xml:\"devicePath\"`\n\tSpec       *HostDiskPartitionSpec `xml:\"spec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ExtendVffsRequestType\"] = reflect.TypeOf((*ExtendVffsRequestType)(nil)).Elem()\n}\n\ntype ExtendVffsResponse struct {\n}\n\ntype ExtendVirtualDiskRequestType struct {\n\tThis          ManagedObjectReference  `xml:\"_this\"`\n\tName          string                  `xml:\"name\"`\n\tDatacenter    *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n\tNewCapacityKb int64                   `xml:\"newCapacityKb\"`\n\tEagerZero     *bool                   `xml:\"eagerZero\"`\n}\n\nfunc init() {\n\tt[\"ExtendVirtualDiskRequestType\"] = reflect.TypeOf((*ExtendVirtualDiskRequestType)(nil)).Elem()\n}\n\ntype ExtendVirtualDisk_Task ExtendVirtualDiskRequestType\n\nfunc init() {\n\tt[\"ExtendVirtualDisk_Task\"] = reflect.TypeOf((*ExtendVirtualDisk_Task)(nil)).Elem()\n}\n\ntype ExtendVirtualDisk_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ExtendVmfsDatastore ExtendVmfsDatastoreRequestType\n\nfunc init() {\n\tt[\"ExtendVmfsDatastore\"] = reflect.TypeOf((*ExtendVmfsDatastore)(nil)).Elem()\n}\n\ntype ExtendVmfsDatastoreRequestType struct {\n\tThis      ManagedObjectReference  `xml:\"_this\"`\n\tDatastore ManagedObjectReference  `xml:\"datastore\"`\n\tSpec      VmfsDatastoreExtendSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"ExtendVmfsDatastoreRequestType\"] = reflect.TypeOf((*ExtendVmfsDatastoreRequestType)(nil)).Elem()\n}\n\ntype ExtendVmfsDatastoreResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ExtendedDescription struct {\n\tDescription\n\n\tMessageCatalogKeyPrefix string        `xml:\"messageCatalogKeyPrefix\"`\n\tMessageArg              []KeyAnyValue `xml:\"messageArg,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ExtendedDescription\"] = reflect.TypeOf((*ExtendedDescription)(nil)).Elem()\n}\n\ntype ExtendedElementDescription struct {\n\tElementDescription\n\n\tMessageCatalogKeyPrefix string        `xml:\"messageCatalogKeyPrefix\"`\n\tMessageArg              []KeyAnyValue `xml:\"messageArg,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ExtendedElementDescription\"] = reflect.TypeOf((*ExtendedElementDescription)(nil)).Elem()\n}\n\ntype ExtendedEvent struct {\n\tGeneralEvent\n\n\tEventTypeId   string                 `xml:\"eventTypeId\"`\n\tManagedObject ManagedObjectReference `xml:\"managedObject\"`\n\tData          []ExtendedEventPair    `xml:\"data,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ExtendedEvent\"] = reflect.TypeOf((*ExtendedEvent)(nil)).Elem()\n}\n\ntype ExtendedEventPair struct {\n\tDynamicData\n\n\tKey   string `xml:\"key\"`\n\tValue string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"ExtendedEventPair\"] = reflect.TypeOf((*ExtendedEventPair)(nil)).Elem()\n}\n\ntype ExtendedFault struct {\n\tVimFault\n\n\tFaultTypeId string     `xml:\"faultTypeId\"`\n\tData        []KeyValue `xml:\"data,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ExtendedFault\"] = reflect.TypeOf((*ExtendedFault)(nil)).Elem()\n}\n\ntype ExtendedFaultFault ExtendedFault\n\nfunc init() {\n\tt[\"ExtendedFaultFault\"] = reflect.TypeOf((*ExtendedFaultFault)(nil)).Elem()\n}\n\ntype Extension struct {\n\tDynamicData\n\n\tDescription            BaseDescription           `xml:\"description,typeattr\"`\n\tKey                    string                    `xml:\"key\"`\n\tCompany                string                    `xml:\"company,omitempty\"`\n\tType                   string                    `xml:\"type,omitempty\"`\n\tVersion                string                    `xml:\"version\"`\n\tSubjectName            string                    `xml:\"subjectName,omitempty\"`\n\tServer                 []ExtensionServerInfo     `xml:\"server,omitempty\"`\n\tClient                 []ExtensionClientInfo     `xml:\"client,omitempty\"`\n\tTaskList               []ExtensionTaskTypeInfo   `xml:\"taskList,omitempty\"`\n\tEventList              []ExtensionEventTypeInfo  `xml:\"eventList,omitempty\"`\n\tFaultList              []ExtensionFaultTypeInfo  `xml:\"faultList,omitempty\"`\n\tPrivilegeList          []ExtensionPrivilegeInfo  `xml:\"privilegeList,omitempty\"`\n\tResourceList           []ExtensionResourceInfo   `xml:\"resourceList,omitempty\"`\n\tLastHeartbeatTime      time.Time                 `xml:\"lastHeartbeatTime\"`\n\tHealthInfo             *ExtensionHealthInfo      `xml:\"healthInfo,omitempty\"`\n\tOvfConsumerInfo        *ExtensionOvfConsumerInfo `xml:\"ovfConsumerInfo,omitempty\"`\n\tExtendedProductInfo    *ExtExtendedProductInfo   `xml:\"extendedProductInfo,omitempty\"`\n\tManagedEntityInfo      []ExtManagedEntityInfo    `xml:\"managedEntityInfo,omitempty\"`\n\tShownInSolutionManager *bool                     `xml:\"shownInSolutionManager\"`\n\tSolutionManagerInfo    *ExtSolutionManagerInfo   `xml:\"solutionManagerInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"Extension\"] = reflect.TypeOf((*Extension)(nil)).Elem()\n}\n\ntype ExtensionClientInfo struct {\n\tDynamicData\n\n\tVersion     string          `xml:\"version\"`\n\tDescription BaseDescription `xml:\"description,typeattr\"`\n\tCompany     string          `xml:\"company\"`\n\tType        string          `xml:\"type\"`\n\tUrl         string          `xml:\"url\"`\n}\n\nfunc init() {\n\tt[\"ExtensionClientInfo\"] = reflect.TypeOf((*ExtensionClientInfo)(nil)).Elem()\n}\n\ntype ExtensionEventTypeInfo struct {\n\tDynamicData\n\n\tEventID         string `xml:\"eventID\"`\n\tEventTypeSchema string `xml:\"eventTypeSchema,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ExtensionEventTypeInfo\"] = reflect.TypeOf((*ExtensionEventTypeInfo)(nil)).Elem()\n}\n\ntype ExtensionFaultTypeInfo struct {\n\tDynamicData\n\n\tFaultID string `xml:\"faultID\"`\n}\n\nfunc init() {\n\tt[\"ExtensionFaultTypeInfo\"] = reflect.TypeOf((*ExtensionFaultTypeInfo)(nil)).Elem()\n}\n\ntype ExtensionHealthInfo struct {\n\tDynamicData\n\n\tUrl string `xml:\"url\"`\n}\n\nfunc init() {\n\tt[\"ExtensionHealthInfo\"] = reflect.TypeOf((*ExtensionHealthInfo)(nil)).Elem()\n}\n\ntype ExtensionManagerIpAllocationUsage struct {\n\tDynamicData\n\n\tExtensionKey string `xml:\"extensionKey\"`\n\tNumAddresses int32  `xml:\"numAddresses\"`\n}\n\nfunc init() {\n\tt[\"ExtensionManagerIpAllocationUsage\"] = reflect.TypeOf((*ExtensionManagerIpAllocationUsage)(nil)).Elem()\n}\n\ntype ExtensionOvfConsumerInfo struct {\n\tDynamicData\n\n\tCallbackUrl string   `xml:\"callbackUrl\"`\n\tSectionType []string `xml:\"sectionType\"`\n}\n\nfunc init() {\n\tt[\"ExtensionOvfConsumerInfo\"] = reflect.TypeOf((*ExtensionOvfConsumerInfo)(nil)).Elem()\n}\n\ntype ExtensionPrivilegeInfo struct {\n\tDynamicData\n\n\tPrivID        string `xml:\"privID\"`\n\tPrivGroupName string `xml:\"privGroupName\"`\n}\n\nfunc init() {\n\tt[\"ExtensionPrivilegeInfo\"] = reflect.TypeOf((*ExtensionPrivilegeInfo)(nil)).Elem()\n}\n\ntype ExtensionResourceInfo struct {\n\tDynamicData\n\n\tLocale string     `xml:\"locale\"`\n\tModule string     `xml:\"module\"`\n\tData   []KeyValue `xml:\"data\"`\n}\n\nfunc init() {\n\tt[\"ExtensionResourceInfo\"] = reflect.TypeOf((*ExtensionResourceInfo)(nil)).Elem()\n}\n\ntype ExtensionServerInfo struct {\n\tDynamicData\n\n\tUrl              string          `xml:\"url\"`\n\tDescription      BaseDescription `xml:\"description,typeattr\"`\n\tCompany          string          `xml:\"company\"`\n\tType             string          `xml:\"type\"`\n\tAdminEmail       []string        `xml:\"adminEmail\"`\n\tServerThumbprint string          `xml:\"serverThumbprint,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ExtensionServerInfo\"] = reflect.TypeOf((*ExtensionServerInfo)(nil)).Elem()\n}\n\ntype ExtensionTaskTypeInfo struct {\n\tDynamicData\n\n\tTaskID string `xml:\"taskID\"`\n}\n\nfunc init() {\n\tt[\"ExtensionTaskTypeInfo\"] = reflect.TypeOf((*ExtensionTaskTypeInfo)(nil)).Elem()\n}\n\ntype ExtractOvfEnvironment ExtractOvfEnvironmentRequestType\n\nfunc init() {\n\tt[\"ExtractOvfEnvironment\"] = reflect.TypeOf((*ExtractOvfEnvironment)(nil)).Elem()\n}\n\ntype ExtractOvfEnvironmentRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ExtractOvfEnvironmentRequestType\"] = reflect.TypeOf((*ExtractOvfEnvironmentRequestType)(nil)).Elem()\n}\n\ntype ExtractOvfEnvironmentResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype FailToEnableSPBM struct {\n\tNotEnoughLicenses\n\n\tCs                ManagedObjectReference               `xml:\"cs\"`\n\tCsName            string                               `xml:\"csName\"`\n\tHostLicenseStates []ComputeResourceHostSPBMLicenseInfo `xml:\"hostLicenseStates\"`\n}\n\nfunc init() {\n\tt[\"FailToEnableSPBM\"] = reflect.TypeOf((*FailToEnableSPBM)(nil)).Elem()\n}\n\ntype FailToEnableSPBMFault FailToEnableSPBM\n\nfunc init() {\n\tt[\"FailToEnableSPBMFault\"] = reflect.TypeOf((*FailToEnableSPBMFault)(nil)).Elem()\n}\n\ntype FailToLockFaultToleranceVMs struct {\n\tRuntimeFault\n\n\tVmName          string                 `xml:\"vmName\"`\n\tVm              ManagedObjectReference `xml:\"vm\"`\n\tAlreadyLockedVm ManagedObjectReference `xml:\"alreadyLockedVm\"`\n}\n\nfunc init() {\n\tt[\"FailToLockFaultToleranceVMs\"] = reflect.TypeOf((*FailToLockFaultToleranceVMs)(nil)).Elem()\n}\n\ntype FailToLockFaultToleranceVMsFault FailToLockFaultToleranceVMs\n\nfunc init() {\n\tt[\"FailToLockFaultToleranceVMsFault\"] = reflect.TypeOf((*FailToLockFaultToleranceVMsFault)(nil)).Elem()\n}\n\ntype FailoverLevelRestored struct {\n\tClusterEvent\n}\n\nfunc init() {\n\tt[\"FailoverLevelRestored\"] = reflect.TypeOf((*FailoverLevelRestored)(nil)).Elem()\n}\n\ntype FailoverNodeInfo struct {\n\tDynamicData\n\n\tClusterIpSettings CustomizationIPSettings  `xml:\"clusterIpSettings\"`\n\tFailoverIp        *CustomizationIPSettings `xml:\"failoverIp,omitempty\"`\n\tBiosUuid          string                   `xml:\"biosUuid,omitempty\"`\n}\n\nfunc init() {\n\tt[\"FailoverNodeInfo\"] = reflect.TypeOf((*FailoverNodeInfo)(nil)).Elem()\n}\n\ntype FaultDomainId struct {\n\tDynamicData\n\n\tId string `xml:\"id\"`\n}\n\nfunc init() {\n\tt[\"FaultDomainId\"] = reflect.TypeOf((*FaultDomainId)(nil)).Elem()\n}\n\ntype FaultToleranceAntiAffinityViolated struct {\n\tMigrationFault\n\n\tHostName string                 `xml:\"hostName\"`\n\tHost     ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"FaultToleranceAntiAffinityViolated\"] = reflect.TypeOf((*FaultToleranceAntiAffinityViolated)(nil)).Elem()\n}\n\ntype FaultToleranceAntiAffinityViolatedFault FaultToleranceAntiAffinityViolated\n\nfunc init() {\n\tt[\"FaultToleranceAntiAffinityViolatedFault\"] = reflect.TypeOf((*FaultToleranceAntiAffinityViolatedFault)(nil)).Elem()\n}\n\ntype FaultToleranceCannotEditMem struct {\n\tVmConfigFault\n\n\tVmName string                 `xml:\"vmName\"`\n\tVm     ManagedObjectReference `xml:\"vm\"`\n}\n\nfunc init() {\n\tt[\"FaultToleranceCannotEditMem\"] = reflect.TypeOf((*FaultToleranceCannotEditMem)(nil)).Elem()\n}\n\ntype FaultToleranceCannotEditMemFault FaultToleranceCannotEditMem\n\nfunc init() {\n\tt[\"FaultToleranceCannotEditMemFault\"] = reflect.TypeOf((*FaultToleranceCannotEditMemFault)(nil)).Elem()\n}\n\ntype FaultToleranceConfigInfo struct {\n\tDynamicData\n\n\tRole          int32    `xml:\"role\"`\n\tInstanceUuids []string `xml:\"instanceUuids\"`\n\tConfigPaths   []string `xml:\"configPaths\"`\n\tOrphaned      *bool    `xml:\"orphaned\"`\n}\n\nfunc init() {\n\tt[\"FaultToleranceConfigInfo\"] = reflect.TypeOf((*FaultToleranceConfigInfo)(nil)).Elem()\n}\n\ntype FaultToleranceConfigSpec struct {\n\tDynamicData\n\n\tMetaDataPath    *FaultToleranceMetaSpec     `xml:\"metaDataPath,omitempty\"`\n\tSecondaryVmSpec *FaultToleranceVMConfigSpec `xml:\"secondaryVmSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"FaultToleranceConfigSpec\"] = reflect.TypeOf((*FaultToleranceConfigSpec)(nil)).Elem()\n}\n\ntype FaultToleranceCpuIncompatible struct {\n\tCpuIncompatible\n\n\tModel    bool `xml:\"model\"`\n\tFamily   bool `xml:\"family\"`\n\tStepping bool `xml:\"stepping\"`\n}\n\nfunc init() {\n\tt[\"FaultToleranceCpuIncompatible\"] = reflect.TypeOf((*FaultToleranceCpuIncompatible)(nil)).Elem()\n}\n\ntype FaultToleranceCpuIncompatibleFault FaultToleranceCpuIncompatible\n\nfunc init() {\n\tt[\"FaultToleranceCpuIncompatibleFault\"] = reflect.TypeOf((*FaultToleranceCpuIncompatibleFault)(nil)).Elem()\n}\n\ntype FaultToleranceDiskSpec struct {\n\tDynamicData\n\n\tDisk      BaseVirtualDevice      `xml:\"disk,typeattr\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"FaultToleranceDiskSpec\"] = reflect.TypeOf((*FaultToleranceDiskSpec)(nil)).Elem()\n}\n\ntype FaultToleranceMetaSpec struct {\n\tDynamicData\n\n\tMetaDataDatastore ManagedObjectReference `xml:\"metaDataDatastore\"`\n}\n\nfunc init() {\n\tt[\"FaultToleranceMetaSpec\"] = reflect.TypeOf((*FaultToleranceMetaSpec)(nil)).Elem()\n}\n\ntype FaultToleranceNeedsThickDisk struct {\n\tMigrationFault\n\n\tVmName string `xml:\"vmName\"`\n}\n\nfunc init() {\n\tt[\"FaultToleranceNeedsThickDisk\"] = reflect.TypeOf((*FaultToleranceNeedsThickDisk)(nil)).Elem()\n}\n\ntype FaultToleranceNeedsThickDiskFault FaultToleranceNeedsThickDisk\n\nfunc init() {\n\tt[\"FaultToleranceNeedsThickDiskFault\"] = reflect.TypeOf((*FaultToleranceNeedsThickDiskFault)(nil)).Elem()\n}\n\ntype FaultToleranceNotLicensed struct {\n\tVmFaultToleranceIssue\n\n\tHostName string `xml:\"hostName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"FaultToleranceNotLicensed\"] = reflect.TypeOf((*FaultToleranceNotLicensed)(nil)).Elem()\n}\n\ntype FaultToleranceNotLicensedFault FaultToleranceNotLicensed\n\nfunc init() {\n\tt[\"FaultToleranceNotLicensedFault\"] = reflect.TypeOf((*FaultToleranceNotLicensedFault)(nil)).Elem()\n}\n\ntype FaultToleranceNotSameBuild struct {\n\tMigrationFault\n\n\tBuild string `xml:\"build\"`\n}\n\nfunc init() {\n\tt[\"FaultToleranceNotSameBuild\"] = reflect.TypeOf((*FaultToleranceNotSameBuild)(nil)).Elem()\n}\n\ntype FaultToleranceNotSameBuildFault FaultToleranceNotSameBuild\n\nfunc init() {\n\tt[\"FaultToleranceNotSameBuildFault\"] = reflect.TypeOf((*FaultToleranceNotSameBuildFault)(nil)).Elem()\n}\n\ntype FaultTolerancePrimaryConfigInfo struct {\n\tFaultToleranceConfigInfo\n\n\tSecondaries []ManagedObjectReference `xml:\"secondaries\"`\n}\n\nfunc init() {\n\tt[\"FaultTolerancePrimaryConfigInfo\"] = reflect.TypeOf((*FaultTolerancePrimaryConfigInfo)(nil)).Elem()\n}\n\ntype FaultTolerancePrimaryPowerOnNotAttempted struct {\n\tVmFaultToleranceIssue\n\n\tSecondaryVm ManagedObjectReference `xml:\"secondaryVm\"`\n\tPrimaryVm   ManagedObjectReference `xml:\"primaryVm\"`\n}\n\nfunc init() {\n\tt[\"FaultTolerancePrimaryPowerOnNotAttempted\"] = reflect.TypeOf((*FaultTolerancePrimaryPowerOnNotAttempted)(nil)).Elem()\n}\n\ntype FaultTolerancePrimaryPowerOnNotAttemptedFault FaultTolerancePrimaryPowerOnNotAttempted\n\nfunc init() {\n\tt[\"FaultTolerancePrimaryPowerOnNotAttemptedFault\"] = reflect.TypeOf((*FaultTolerancePrimaryPowerOnNotAttemptedFault)(nil)).Elem()\n}\n\ntype FaultToleranceSecondaryConfigInfo struct {\n\tFaultToleranceConfigInfo\n\n\tPrimaryVM ManagedObjectReference `xml:\"primaryVM\"`\n}\n\nfunc init() {\n\tt[\"FaultToleranceSecondaryConfigInfo\"] = reflect.TypeOf((*FaultToleranceSecondaryConfigInfo)(nil)).Elem()\n}\n\ntype FaultToleranceSecondaryOpResult struct {\n\tDynamicData\n\n\tVm               ManagedObjectReference  `xml:\"vm\"`\n\tPowerOnAttempted bool                    `xml:\"powerOnAttempted\"`\n\tPowerOnResult    *ClusterPowerOnVmResult `xml:\"powerOnResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"FaultToleranceSecondaryOpResult\"] = reflect.TypeOf((*FaultToleranceSecondaryOpResult)(nil)).Elem()\n}\n\ntype FaultToleranceVMConfigSpec struct {\n\tDynamicData\n\n\tVmConfig *ManagedObjectReference  `xml:\"vmConfig,omitempty\"`\n\tDisks    []FaultToleranceDiskSpec `xml:\"disks,omitempty\"`\n}\n\nfunc init() {\n\tt[\"FaultToleranceVMConfigSpec\"] = reflect.TypeOf((*FaultToleranceVMConfigSpec)(nil)).Elem()\n}\n\ntype FaultToleranceVmNotDasProtected struct {\n\tVimFault\n\n\tVm     ManagedObjectReference `xml:\"vm\"`\n\tVmName string                 `xml:\"vmName\"`\n}\n\nfunc init() {\n\tt[\"FaultToleranceVmNotDasProtected\"] = reflect.TypeOf((*FaultToleranceVmNotDasProtected)(nil)).Elem()\n}\n\ntype FaultToleranceVmNotDasProtectedFault FaultToleranceVmNotDasProtected\n\nfunc init() {\n\tt[\"FaultToleranceVmNotDasProtectedFault\"] = reflect.TypeOf((*FaultToleranceVmNotDasProtectedFault)(nil)).Elem()\n}\n\ntype FcoeConfig struct {\n\tDynamicData\n\n\tPriorityClass int32                      `xml:\"priorityClass\"`\n\tSourceMac     string                     `xml:\"sourceMac\"`\n\tVlanRange     []FcoeConfigVlanRange      `xml:\"vlanRange\"`\n\tCapabilities  FcoeConfigFcoeCapabilities `xml:\"capabilities\"`\n\tFcoeActive    bool                       `xml:\"fcoeActive\"`\n}\n\nfunc init() {\n\tt[\"FcoeConfig\"] = reflect.TypeOf((*FcoeConfig)(nil)).Elem()\n}\n\ntype FcoeConfigFcoeCapabilities struct {\n\tDynamicData\n\n\tPriorityClass    bool `xml:\"priorityClass\"`\n\tSourceMacAddress bool `xml:\"sourceMacAddress\"`\n\tVlanRange        bool `xml:\"vlanRange\"`\n}\n\nfunc init() {\n\tt[\"FcoeConfigFcoeCapabilities\"] = reflect.TypeOf((*FcoeConfigFcoeCapabilities)(nil)).Elem()\n}\n\ntype FcoeConfigFcoeSpecification struct {\n\tDynamicData\n\n\tUnderlyingPnic string                `xml:\"underlyingPnic\"`\n\tPriorityClass  int32                 `xml:\"priorityClass,omitempty\"`\n\tSourceMac      string                `xml:\"sourceMac,omitempty\"`\n\tVlanRange      []FcoeConfigVlanRange `xml:\"vlanRange,omitempty\"`\n}\n\nfunc init() {\n\tt[\"FcoeConfigFcoeSpecification\"] = reflect.TypeOf((*FcoeConfigFcoeSpecification)(nil)).Elem()\n}\n\ntype FcoeConfigVlanRange struct {\n\tDynamicData\n\n\tVlanLow  int32 `xml:\"vlanLow\"`\n\tVlanHigh int32 `xml:\"vlanHigh\"`\n}\n\nfunc init() {\n\tt[\"FcoeConfigVlanRange\"] = reflect.TypeOf((*FcoeConfigVlanRange)(nil)).Elem()\n}\n\ntype FcoeFault struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"FcoeFault\"] = reflect.TypeOf((*FcoeFault)(nil)).Elem()\n}\n\ntype FcoeFaultFault BaseFcoeFault\n\nfunc init() {\n\tt[\"FcoeFaultFault\"] = reflect.TypeOf((*FcoeFaultFault)(nil)).Elem()\n}\n\ntype FcoeFaultPnicHasNoPortSet struct {\n\tFcoeFault\n\n\tNicDevice string `xml:\"nicDevice\"`\n}\n\nfunc init() {\n\tt[\"FcoeFaultPnicHasNoPortSet\"] = reflect.TypeOf((*FcoeFaultPnicHasNoPortSet)(nil)).Elem()\n}\n\ntype FcoeFaultPnicHasNoPortSetFault FcoeFaultPnicHasNoPortSet\n\nfunc init() {\n\tt[\"FcoeFaultPnicHasNoPortSetFault\"] = reflect.TypeOf((*FcoeFaultPnicHasNoPortSetFault)(nil)).Elem()\n}\n\ntype FeatureRequirementsNotMet struct {\n\tVirtualHardwareCompatibilityIssue\n\n\tFeatureRequirement []VirtualMachineFeatureRequirement `xml:\"featureRequirement,omitempty\"`\n\tVm                 *ManagedObjectReference            `xml:\"vm,omitempty\"`\n\tHost               *ManagedObjectReference            `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"FeatureRequirementsNotMet\"] = reflect.TypeOf((*FeatureRequirementsNotMet)(nil)).Elem()\n}\n\ntype FeatureRequirementsNotMetFault FeatureRequirementsNotMet\n\nfunc init() {\n\tt[\"FeatureRequirementsNotMetFault\"] = reflect.TypeOf((*FeatureRequirementsNotMetFault)(nil)).Elem()\n}\n\ntype FetchDVPortKeys FetchDVPortKeysRequestType\n\nfunc init() {\n\tt[\"FetchDVPortKeys\"] = reflect.TypeOf((*FetchDVPortKeys)(nil)).Elem()\n}\n\ntype FetchDVPortKeysRequestType struct {\n\tThis     ManagedObjectReference                `xml:\"_this\"`\n\tCriteria *DistributedVirtualSwitchPortCriteria `xml:\"criteria,omitempty\"`\n}\n\nfunc init() {\n\tt[\"FetchDVPortKeysRequestType\"] = reflect.TypeOf((*FetchDVPortKeysRequestType)(nil)).Elem()\n}\n\ntype FetchDVPortKeysResponse struct {\n\tReturnval []string `xml:\"returnval,omitempty\"`\n}\n\ntype FetchDVPorts FetchDVPortsRequestType\n\nfunc init() {\n\tt[\"FetchDVPorts\"] = reflect.TypeOf((*FetchDVPorts)(nil)).Elem()\n}\n\ntype FetchDVPortsRequestType struct {\n\tThis     ManagedObjectReference                `xml:\"_this\"`\n\tCriteria *DistributedVirtualSwitchPortCriteria `xml:\"criteria,omitempty\"`\n}\n\nfunc init() {\n\tt[\"FetchDVPortsRequestType\"] = reflect.TypeOf((*FetchDVPortsRequestType)(nil)).Elem()\n}\n\ntype FetchDVPortsResponse struct {\n\tReturnval []DistributedVirtualPort `xml:\"returnval,omitempty\"`\n}\n\ntype FetchSystemEventLog FetchSystemEventLogRequestType\n\nfunc init() {\n\tt[\"FetchSystemEventLog\"] = reflect.TypeOf((*FetchSystemEventLog)(nil)).Elem()\n}\n\ntype FetchSystemEventLogRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"FetchSystemEventLogRequestType\"] = reflect.TypeOf((*FetchSystemEventLogRequestType)(nil)).Elem()\n}\n\ntype FetchSystemEventLogResponse struct {\n\tReturnval []SystemEventInfo `xml:\"returnval,omitempty\"`\n}\n\ntype FetchUserPrivilegeOnEntities FetchUserPrivilegeOnEntitiesRequestType\n\nfunc init() {\n\tt[\"FetchUserPrivilegeOnEntities\"] = reflect.TypeOf((*FetchUserPrivilegeOnEntities)(nil)).Elem()\n}\n\ntype FetchUserPrivilegeOnEntitiesRequestType struct {\n\tThis     ManagedObjectReference   `xml:\"_this\"`\n\tEntities []ManagedObjectReference `xml:\"entities\"`\n\tUserName string                   `xml:\"userName\"`\n}\n\nfunc init() {\n\tt[\"FetchUserPrivilegeOnEntitiesRequestType\"] = reflect.TypeOf((*FetchUserPrivilegeOnEntitiesRequestType)(nil)).Elem()\n}\n\ntype FetchUserPrivilegeOnEntitiesResponse struct {\n\tReturnval []UserPrivilegeResult `xml:\"returnval,omitempty\"`\n}\n\ntype FileAlreadyExists struct {\n\tFileFault\n}\n\nfunc init() {\n\tt[\"FileAlreadyExists\"] = reflect.TypeOf((*FileAlreadyExists)(nil)).Elem()\n}\n\ntype FileAlreadyExistsFault FileAlreadyExists\n\nfunc init() {\n\tt[\"FileAlreadyExistsFault\"] = reflect.TypeOf((*FileAlreadyExistsFault)(nil)).Elem()\n}\n\ntype FileBackedPortNotSupported struct {\n\tDeviceNotSupported\n}\n\nfunc init() {\n\tt[\"FileBackedPortNotSupported\"] = reflect.TypeOf((*FileBackedPortNotSupported)(nil)).Elem()\n}\n\ntype FileBackedPortNotSupportedFault FileBackedPortNotSupported\n\nfunc init() {\n\tt[\"FileBackedPortNotSupportedFault\"] = reflect.TypeOf((*FileBackedPortNotSupportedFault)(nil)).Elem()\n}\n\ntype FileBackedVirtualDiskSpec struct {\n\tVirtualDiskSpec\n\n\tCapacityKb int64                           `xml:\"capacityKb\"`\n\tProfile    []BaseVirtualMachineProfileSpec `xml:\"profile,omitempty,typeattr\"`\n\tCrypto     BaseCryptoSpec                  `xml:\"crypto,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"FileBackedVirtualDiskSpec\"] = reflect.TypeOf((*FileBackedVirtualDiskSpec)(nil)).Elem()\n}\n\ntype FileFault struct {\n\tVimFault\n\n\tFile string `xml:\"file\"`\n}\n\nfunc init() {\n\tt[\"FileFault\"] = reflect.TypeOf((*FileFault)(nil)).Elem()\n}\n\ntype FileFaultFault BaseFileFault\n\nfunc init() {\n\tt[\"FileFaultFault\"] = reflect.TypeOf((*FileFaultFault)(nil)).Elem()\n}\n\ntype FileInfo struct {\n\tDynamicData\n\n\tPath         string     `xml:\"path\"`\n\tFriendlyName string     `xml:\"friendlyName,omitempty\"`\n\tFileSize     int64      `xml:\"fileSize,omitempty\"`\n\tModification *time.Time `xml:\"modification\"`\n\tOwner        string     `xml:\"owner,omitempty\"`\n}\n\nfunc init() {\n\tt[\"FileInfo\"] = reflect.TypeOf((*FileInfo)(nil)).Elem()\n}\n\ntype FileLocked struct {\n\tFileFault\n}\n\nfunc init() {\n\tt[\"FileLocked\"] = reflect.TypeOf((*FileLocked)(nil)).Elem()\n}\n\ntype FileLockedFault FileLocked\n\nfunc init() {\n\tt[\"FileLockedFault\"] = reflect.TypeOf((*FileLockedFault)(nil)).Elem()\n}\n\ntype FileNameTooLong struct {\n\tFileFault\n}\n\nfunc init() {\n\tt[\"FileNameTooLong\"] = reflect.TypeOf((*FileNameTooLong)(nil)).Elem()\n}\n\ntype FileNameTooLongFault FileNameTooLong\n\nfunc init() {\n\tt[\"FileNameTooLongFault\"] = reflect.TypeOf((*FileNameTooLongFault)(nil)).Elem()\n}\n\ntype FileNotFound struct {\n\tFileFault\n}\n\nfunc init() {\n\tt[\"FileNotFound\"] = reflect.TypeOf((*FileNotFound)(nil)).Elem()\n}\n\ntype FileNotFoundFault FileNotFound\n\nfunc init() {\n\tt[\"FileNotFoundFault\"] = reflect.TypeOf((*FileNotFoundFault)(nil)).Elem()\n}\n\ntype FileNotWritable struct {\n\tFileFault\n}\n\nfunc init() {\n\tt[\"FileNotWritable\"] = reflect.TypeOf((*FileNotWritable)(nil)).Elem()\n}\n\ntype FileNotWritableFault FileNotWritable\n\nfunc init() {\n\tt[\"FileNotWritableFault\"] = reflect.TypeOf((*FileNotWritableFault)(nil)).Elem()\n}\n\ntype FileQuery struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"FileQuery\"] = reflect.TypeOf((*FileQuery)(nil)).Elem()\n}\n\ntype FileQueryFlags struct {\n\tDynamicData\n\n\tFileType     bool  `xml:\"fileType\"`\n\tFileSize     bool  `xml:\"fileSize\"`\n\tModification bool  `xml:\"modification\"`\n\tFileOwner    *bool `xml:\"fileOwner\"`\n}\n\nfunc init() {\n\tt[\"FileQueryFlags\"] = reflect.TypeOf((*FileQueryFlags)(nil)).Elem()\n}\n\ntype FileTooLarge struct {\n\tFileFault\n\n\tDatastore   string `xml:\"datastore\"`\n\tFileSize    int64  `xml:\"fileSize\"`\n\tMaxFileSize int64  `xml:\"maxFileSize,omitempty\"`\n}\n\nfunc init() {\n\tt[\"FileTooLarge\"] = reflect.TypeOf((*FileTooLarge)(nil)).Elem()\n}\n\ntype FileTooLargeFault FileTooLarge\n\nfunc init() {\n\tt[\"FileTooLargeFault\"] = reflect.TypeOf((*FileTooLargeFault)(nil)).Elem()\n}\n\ntype FileTransferInformation struct {\n\tDynamicData\n\n\tAttributes BaseGuestFileAttributes `xml:\"attributes,typeattr\"`\n\tSize       int64                   `xml:\"size\"`\n\tUrl        string                  `xml:\"url\"`\n}\n\nfunc init() {\n\tt[\"FileTransferInformation\"] = reflect.TypeOf((*FileTransferInformation)(nil)).Elem()\n}\n\ntype FilesystemQuiesceFault struct {\n\tSnapshotFault\n}\n\nfunc init() {\n\tt[\"FilesystemQuiesceFault\"] = reflect.TypeOf((*FilesystemQuiesceFault)(nil)).Elem()\n}\n\ntype FilesystemQuiesceFaultFault FilesystemQuiesceFault\n\nfunc init() {\n\tt[\"FilesystemQuiesceFaultFault\"] = reflect.TypeOf((*FilesystemQuiesceFaultFault)(nil)).Elem()\n}\n\ntype FilterInUse struct {\n\tResourceInUse\n\n\tDisk []VirtualDiskId `xml:\"disk,omitempty\"`\n}\n\nfunc init() {\n\tt[\"FilterInUse\"] = reflect.TypeOf((*FilterInUse)(nil)).Elem()\n}\n\ntype FilterInUseFault FilterInUse\n\nfunc init() {\n\tt[\"FilterInUseFault\"] = reflect.TypeOf((*FilterInUseFault)(nil)).Elem()\n}\n\ntype FindAllByDnsName FindAllByDnsNameRequestType\n\nfunc init() {\n\tt[\"FindAllByDnsName\"] = reflect.TypeOf((*FindAllByDnsName)(nil)).Elem()\n}\n\ntype FindAllByDnsNameRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tDatacenter *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n\tDnsName    string                  `xml:\"dnsName\"`\n\tVmSearch   bool                    `xml:\"vmSearch\"`\n}\n\nfunc init() {\n\tt[\"FindAllByDnsNameRequestType\"] = reflect.TypeOf((*FindAllByDnsNameRequestType)(nil)).Elem()\n}\n\ntype FindAllByDnsNameResponse struct {\n\tReturnval []ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype FindAllByIp FindAllByIpRequestType\n\nfunc init() {\n\tt[\"FindAllByIp\"] = reflect.TypeOf((*FindAllByIp)(nil)).Elem()\n}\n\ntype FindAllByIpRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tDatacenter *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n\tIp         string                  `xml:\"ip\"`\n\tVmSearch   bool                    `xml:\"vmSearch\"`\n}\n\nfunc init() {\n\tt[\"FindAllByIpRequestType\"] = reflect.TypeOf((*FindAllByIpRequestType)(nil)).Elem()\n}\n\ntype FindAllByIpResponse struct {\n\tReturnval []ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype FindAllByUuid FindAllByUuidRequestType\n\nfunc init() {\n\tt[\"FindAllByUuid\"] = reflect.TypeOf((*FindAllByUuid)(nil)).Elem()\n}\n\ntype FindAllByUuidRequestType struct {\n\tThis         ManagedObjectReference  `xml:\"_this\"`\n\tDatacenter   *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n\tUuid         string                  `xml:\"uuid\"`\n\tVmSearch     bool                    `xml:\"vmSearch\"`\n\tInstanceUuid *bool                   `xml:\"instanceUuid\"`\n}\n\nfunc init() {\n\tt[\"FindAllByUuidRequestType\"] = reflect.TypeOf((*FindAllByUuidRequestType)(nil)).Elem()\n}\n\ntype FindAllByUuidResponse struct {\n\tReturnval []ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype FindAssociatedProfile FindAssociatedProfileRequestType\n\nfunc init() {\n\tt[\"FindAssociatedProfile\"] = reflect.TypeOf((*FindAssociatedProfile)(nil)).Elem()\n}\n\ntype FindAssociatedProfileRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tEntity ManagedObjectReference `xml:\"entity\"`\n}\n\nfunc init() {\n\tt[\"FindAssociatedProfileRequestType\"] = reflect.TypeOf((*FindAssociatedProfileRequestType)(nil)).Elem()\n}\n\ntype FindAssociatedProfileResponse struct {\n\tReturnval []ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype FindByDatastorePath FindByDatastorePathRequestType\n\nfunc init() {\n\tt[\"FindByDatastorePath\"] = reflect.TypeOf((*FindByDatastorePath)(nil)).Elem()\n}\n\ntype FindByDatastorePathRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tDatacenter ManagedObjectReference `xml:\"datacenter\"`\n\tPath       string                 `xml:\"path\"`\n}\n\nfunc init() {\n\tt[\"FindByDatastorePathRequestType\"] = reflect.TypeOf((*FindByDatastorePathRequestType)(nil)).Elem()\n}\n\ntype FindByDatastorePathResponse struct {\n\tReturnval *ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype FindByDnsName FindByDnsNameRequestType\n\nfunc init() {\n\tt[\"FindByDnsName\"] = reflect.TypeOf((*FindByDnsName)(nil)).Elem()\n}\n\ntype FindByDnsNameRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tDatacenter *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n\tDnsName    string                  `xml:\"dnsName\"`\n\tVmSearch   bool                    `xml:\"vmSearch\"`\n}\n\nfunc init() {\n\tt[\"FindByDnsNameRequestType\"] = reflect.TypeOf((*FindByDnsNameRequestType)(nil)).Elem()\n}\n\ntype FindByDnsNameResponse struct {\n\tReturnval *ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype FindByInventoryPath FindByInventoryPathRequestType\n\nfunc init() {\n\tt[\"FindByInventoryPath\"] = reflect.TypeOf((*FindByInventoryPath)(nil)).Elem()\n}\n\ntype FindByInventoryPathRequestType struct {\n\tThis          ManagedObjectReference `xml:\"_this\"`\n\tInventoryPath string                 `xml:\"inventoryPath\"`\n}\n\nfunc init() {\n\tt[\"FindByInventoryPathRequestType\"] = reflect.TypeOf((*FindByInventoryPathRequestType)(nil)).Elem()\n}\n\ntype FindByInventoryPathResponse struct {\n\tReturnval *ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype FindByIp FindByIpRequestType\n\nfunc init() {\n\tt[\"FindByIp\"] = reflect.TypeOf((*FindByIp)(nil)).Elem()\n}\n\ntype FindByIpRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tDatacenter *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n\tIp         string                  `xml:\"ip\"`\n\tVmSearch   bool                    `xml:\"vmSearch\"`\n}\n\nfunc init() {\n\tt[\"FindByIpRequestType\"] = reflect.TypeOf((*FindByIpRequestType)(nil)).Elem()\n}\n\ntype FindByIpResponse struct {\n\tReturnval *ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype FindByUuid FindByUuidRequestType\n\nfunc init() {\n\tt[\"FindByUuid\"] = reflect.TypeOf((*FindByUuid)(nil)).Elem()\n}\n\ntype FindByUuidRequestType struct {\n\tThis         ManagedObjectReference  `xml:\"_this\"`\n\tDatacenter   *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n\tUuid         string                  `xml:\"uuid\"`\n\tVmSearch     bool                    `xml:\"vmSearch\"`\n\tInstanceUuid *bool                   `xml:\"instanceUuid\"`\n}\n\nfunc init() {\n\tt[\"FindByUuidRequestType\"] = reflect.TypeOf((*FindByUuidRequestType)(nil)).Elem()\n}\n\ntype FindByUuidResponse struct {\n\tReturnval *ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype FindChild FindChildRequestType\n\nfunc init() {\n\tt[\"FindChild\"] = reflect.TypeOf((*FindChild)(nil)).Elem()\n}\n\ntype FindChildRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tEntity ManagedObjectReference `xml:\"entity\"`\n\tName   string                 `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"FindChildRequestType\"] = reflect.TypeOf((*FindChildRequestType)(nil)).Elem()\n}\n\ntype FindChildResponse struct {\n\tReturnval *ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype FindExtension FindExtensionRequestType\n\nfunc init() {\n\tt[\"FindExtension\"] = reflect.TypeOf((*FindExtension)(nil)).Elem()\n}\n\ntype FindExtensionRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tExtensionKey string                 `xml:\"extensionKey\"`\n}\n\nfunc init() {\n\tt[\"FindExtensionRequestType\"] = reflect.TypeOf((*FindExtensionRequestType)(nil)).Elem()\n}\n\ntype FindExtensionResponse struct {\n\tReturnval *Extension `xml:\"returnval,omitempty\"`\n}\n\ntype FindRulesForVm FindRulesForVmRequestType\n\nfunc init() {\n\tt[\"FindRulesForVm\"] = reflect.TypeOf((*FindRulesForVm)(nil)).Elem()\n}\n\ntype FindRulesForVmRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tVm   ManagedObjectReference `xml:\"vm\"`\n}\n\nfunc init() {\n\tt[\"FindRulesForVmRequestType\"] = reflect.TypeOf((*FindRulesForVmRequestType)(nil)).Elem()\n}\n\ntype FindRulesForVmResponse struct {\n\tReturnval []BaseClusterRuleInfo `xml:\"returnval,omitempty,typeattr\"`\n}\n\ntype FirewallProfile struct {\n\tApplyProfile\n\n\tRuleset []FirewallProfileRulesetProfile `xml:\"ruleset,omitempty\"`\n}\n\nfunc init() {\n\tt[\"FirewallProfile\"] = reflect.TypeOf((*FirewallProfile)(nil)).Elem()\n}\n\ntype FirewallProfileRulesetProfile struct {\n\tApplyProfile\n\n\tKey string `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"FirewallProfileRulesetProfile\"] = reflect.TypeOf((*FirewallProfileRulesetProfile)(nil)).Elem()\n}\n\ntype FloatOption struct {\n\tOptionType\n\n\tMin          float32 `xml:\"min\"`\n\tMax          float32 `xml:\"max\"`\n\tDefaultValue float32 `xml:\"defaultValue\"`\n}\n\nfunc init() {\n\tt[\"FloatOption\"] = reflect.TypeOf((*FloatOption)(nil)).Elem()\n}\n\ntype FloppyImageFileInfo struct {\n\tFileInfo\n}\n\nfunc init() {\n\tt[\"FloppyImageFileInfo\"] = reflect.TypeOf((*FloppyImageFileInfo)(nil)).Elem()\n}\n\ntype FloppyImageFileQuery struct {\n\tFileQuery\n}\n\nfunc init() {\n\tt[\"FloppyImageFileQuery\"] = reflect.TypeOf((*FloppyImageFileQuery)(nil)).Elem()\n}\n\ntype FolderEventArgument struct {\n\tEntityEventArgument\n\n\tFolder ManagedObjectReference `xml:\"folder\"`\n}\n\nfunc init() {\n\tt[\"FolderEventArgument\"] = reflect.TypeOf((*FolderEventArgument)(nil)).Elem()\n}\n\ntype FolderFileInfo struct {\n\tFileInfo\n}\n\nfunc init() {\n\tt[\"FolderFileInfo\"] = reflect.TypeOf((*FolderFileInfo)(nil)).Elem()\n}\n\ntype FolderFileQuery struct {\n\tFileQuery\n}\n\nfunc init() {\n\tt[\"FolderFileQuery\"] = reflect.TypeOf((*FolderFileQuery)(nil)).Elem()\n}\n\ntype FormatVffs FormatVffsRequestType\n\nfunc init() {\n\tt[\"FormatVffs\"] = reflect.TypeOf((*FormatVffs)(nil)).Elem()\n}\n\ntype FormatVffsRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tCreateSpec HostVffsSpec           `xml:\"createSpec\"`\n}\n\nfunc init() {\n\tt[\"FormatVffsRequestType\"] = reflect.TypeOf((*FormatVffsRequestType)(nil)).Elem()\n}\n\ntype FormatVffsResponse struct {\n\tReturnval HostVffsVolume `xml:\"returnval\"`\n}\n\ntype FormatVmfs FormatVmfsRequestType\n\nfunc init() {\n\tt[\"FormatVmfs\"] = reflect.TypeOf((*FormatVmfs)(nil)).Elem()\n}\n\ntype FormatVmfsRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tCreateSpec HostVmfsSpec           `xml:\"createSpec\"`\n}\n\nfunc init() {\n\tt[\"FormatVmfsRequestType\"] = reflect.TypeOf((*FormatVmfsRequestType)(nil)).Elem()\n}\n\ntype FormatVmfsResponse struct {\n\tReturnval HostVmfsVolume `xml:\"returnval\"`\n}\n\ntype FtIssuesOnHost struct {\n\tVmFaultToleranceIssue\n\n\tHost     ManagedObjectReference `xml:\"host\"`\n\tHostName string                 `xml:\"hostName\"`\n\tErrors   []LocalizedMethodFault `xml:\"errors,omitempty\"`\n}\n\nfunc init() {\n\tt[\"FtIssuesOnHost\"] = reflect.TypeOf((*FtIssuesOnHost)(nil)).Elem()\n}\n\ntype FtIssuesOnHostFault FtIssuesOnHost\n\nfunc init() {\n\tt[\"FtIssuesOnHostFault\"] = reflect.TypeOf((*FtIssuesOnHostFault)(nil)).Elem()\n}\n\ntype FullStorageVMotionNotSupported struct {\n\tMigrationFeatureNotSupported\n}\n\nfunc init() {\n\tt[\"FullStorageVMotionNotSupported\"] = reflect.TypeOf((*FullStorageVMotionNotSupported)(nil)).Elem()\n}\n\ntype FullStorageVMotionNotSupportedFault FullStorageVMotionNotSupported\n\nfunc init() {\n\tt[\"FullStorageVMotionNotSupportedFault\"] = reflect.TypeOf((*FullStorageVMotionNotSupportedFault)(nil)).Elem()\n}\n\ntype GatewayConnectFault struct {\n\tHostConnectFault\n\n\tGatewayType string              `xml:\"gatewayType\"`\n\tGatewayId   string              `xml:\"gatewayId\"`\n\tGatewayInfo string              `xml:\"gatewayInfo\"`\n\tDetails     *LocalizableMessage `xml:\"details,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GatewayConnectFault\"] = reflect.TypeOf((*GatewayConnectFault)(nil)).Elem()\n}\n\ntype GatewayConnectFaultFault BaseGatewayConnectFault\n\nfunc init() {\n\tt[\"GatewayConnectFaultFault\"] = reflect.TypeOf((*GatewayConnectFaultFault)(nil)).Elem()\n}\n\ntype GatewayHostNotReachable struct {\n\tGatewayToHostConnectFault\n}\n\nfunc init() {\n\tt[\"GatewayHostNotReachable\"] = reflect.TypeOf((*GatewayHostNotReachable)(nil)).Elem()\n}\n\ntype GatewayHostNotReachableFault GatewayHostNotReachable\n\nfunc init() {\n\tt[\"GatewayHostNotReachableFault\"] = reflect.TypeOf((*GatewayHostNotReachableFault)(nil)).Elem()\n}\n\ntype GatewayNotFound struct {\n\tGatewayConnectFault\n}\n\nfunc init() {\n\tt[\"GatewayNotFound\"] = reflect.TypeOf((*GatewayNotFound)(nil)).Elem()\n}\n\ntype GatewayNotFoundFault GatewayNotFound\n\nfunc init() {\n\tt[\"GatewayNotFoundFault\"] = reflect.TypeOf((*GatewayNotFoundFault)(nil)).Elem()\n}\n\ntype GatewayNotReachable struct {\n\tGatewayConnectFault\n}\n\nfunc init() {\n\tt[\"GatewayNotReachable\"] = reflect.TypeOf((*GatewayNotReachable)(nil)).Elem()\n}\n\ntype GatewayNotReachableFault GatewayNotReachable\n\nfunc init() {\n\tt[\"GatewayNotReachableFault\"] = reflect.TypeOf((*GatewayNotReachableFault)(nil)).Elem()\n}\n\ntype GatewayOperationRefused struct {\n\tGatewayConnectFault\n}\n\nfunc init() {\n\tt[\"GatewayOperationRefused\"] = reflect.TypeOf((*GatewayOperationRefused)(nil)).Elem()\n}\n\ntype GatewayOperationRefusedFault GatewayOperationRefused\n\nfunc init() {\n\tt[\"GatewayOperationRefusedFault\"] = reflect.TypeOf((*GatewayOperationRefusedFault)(nil)).Elem()\n}\n\ntype GatewayToHostAuthFault struct {\n\tGatewayToHostConnectFault\n\n\tInvalidProperties []string `xml:\"invalidProperties\"`\n\tMissingProperties []string `xml:\"missingProperties\"`\n}\n\nfunc init() {\n\tt[\"GatewayToHostAuthFault\"] = reflect.TypeOf((*GatewayToHostAuthFault)(nil)).Elem()\n}\n\ntype GatewayToHostAuthFaultFault GatewayToHostAuthFault\n\nfunc init() {\n\tt[\"GatewayToHostAuthFaultFault\"] = reflect.TypeOf((*GatewayToHostAuthFaultFault)(nil)).Elem()\n}\n\ntype GatewayToHostConnectFault struct {\n\tGatewayConnectFault\n\n\tHostname string `xml:\"hostname\"`\n\tPort     int32  `xml:\"port,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GatewayToHostConnectFault\"] = reflect.TypeOf((*GatewayToHostConnectFault)(nil)).Elem()\n}\n\ntype GatewayToHostConnectFaultFault BaseGatewayToHostConnectFault\n\nfunc init() {\n\tt[\"GatewayToHostConnectFaultFault\"] = reflect.TypeOf((*GatewayToHostConnectFaultFault)(nil)).Elem()\n}\n\ntype GatewayToHostTrustVerifyFault struct {\n\tGatewayToHostConnectFault\n\n\tVerificationToken  string     `xml:\"verificationToken\"`\n\tPropertiesToVerify []KeyValue `xml:\"propertiesToVerify\"`\n}\n\nfunc init() {\n\tt[\"GatewayToHostTrustVerifyFault\"] = reflect.TypeOf((*GatewayToHostTrustVerifyFault)(nil)).Elem()\n}\n\ntype GatewayToHostTrustVerifyFaultFault GatewayToHostTrustVerifyFault\n\nfunc init() {\n\tt[\"GatewayToHostTrustVerifyFaultFault\"] = reflect.TypeOf((*GatewayToHostTrustVerifyFaultFault)(nil)).Elem()\n}\n\ntype GeneralEvent struct {\n\tEvent\n\n\tMessage string `xml:\"message\"`\n}\n\nfunc init() {\n\tt[\"GeneralEvent\"] = reflect.TypeOf((*GeneralEvent)(nil)).Elem()\n}\n\ntype GeneralHostErrorEvent struct {\n\tGeneralEvent\n}\n\nfunc init() {\n\tt[\"GeneralHostErrorEvent\"] = reflect.TypeOf((*GeneralHostErrorEvent)(nil)).Elem()\n}\n\ntype GeneralHostInfoEvent struct {\n\tGeneralEvent\n}\n\nfunc init() {\n\tt[\"GeneralHostInfoEvent\"] = reflect.TypeOf((*GeneralHostInfoEvent)(nil)).Elem()\n}\n\ntype GeneralHostWarningEvent struct {\n\tGeneralEvent\n}\n\nfunc init() {\n\tt[\"GeneralHostWarningEvent\"] = reflect.TypeOf((*GeneralHostWarningEvent)(nil)).Elem()\n}\n\ntype GeneralUserEvent struct {\n\tGeneralEvent\n\n\tEntity *ManagedEntityEventArgument `xml:\"entity,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GeneralUserEvent\"] = reflect.TypeOf((*GeneralUserEvent)(nil)).Elem()\n}\n\ntype GeneralVmErrorEvent struct {\n\tGeneralEvent\n}\n\nfunc init() {\n\tt[\"GeneralVmErrorEvent\"] = reflect.TypeOf((*GeneralVmErrorEvent)(nil)).Elem()\n}\n\ntype GeneralVmInfoEvent struct {\n\tGeneralEvent\n}\n\nfunc init() {\n\tt[\"GeneralVmInfoEvent\"] = reflect.TypeOf((*GeneralVmInfoEvent)(nil)).Elem()\n}\n\ntype GeneralVmWarningEvent struct {\n\tGeneralEvent\n}\n\nfunc init() {\n\tt[\"GeneralVmWarningEvent\"] = reflect.TypeOf((*GeneralVmWarningEvent)(nil)).Elem()\n}\n\ntype GenerateCertificateSigningRequest GenerateCertificateSigningRequestRequestType\n\nfunc init() {\n\tt[\"GenerateCertificateSigningRequest\"] = reflect.TypeOf((*GenerateCertificateSigningRequest)(nil)).Elem()\n}\n\ntype GenerateCertificateSigningRequestByDn GenerateCertificateSigningRequestByDnRequestType\n\nfunc init() {\n\tt[\"GenerateCertificateSigningRequestByDn\"] = reflect.TypeOf((*GenerateCertificateSigningRequestByDn)(nil)).Elem()\n}\n\ntype GenerateCertificateSigningRequestByDnRequestType struct {\n\tThis              ManagedObjectReference `xml:\"_this\"`\n\tDistinguishedName string                 `xml:\"distinguishedName\"`\n}\n\nfunc init() {\n\tt[\"GenerateCertificateSigningRequestByDnRequestType\"] = reflect.TypeOf((*GenerateCertificateSigningRequestByDnRequestType)(nil)).Elem()\n}\n\ntype GenerateCertificateSigningRequestByDnResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype GenerateCertificateSigningRequestRequestType struct {\n\tThis                     ManagedObjectReference `xml:\"_this\"`\n\tUseIpAddressAsCommonName bool                   `xml:\"useIpAddressAsCommonName\"`\n}\n\nfunc init() {\n\tt[\"GenerateCertificateSigningRequestRequestType\"] = reflect.TypeOf((*GenerateCertificateSigningRequestRequestType)(nil)).Elem()\n}\n\ntype GenerateCertificateSigningRequestResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype GenerateClientCsr GenerateClientCsrRequestType\n\nfunc init() {\n\tt[\"GenerateClientCsr\"] = reflect.TypeOf((*GenerateClientCsr)(nil)).Elem()\n}\n\ntype GenerateClientCsrRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tCluster KeyProviderId          `xml:\"cluster\"`\n}\n\nfunc init() {\n\tt[\"GenerateClientCsrRequestType\"] = reflect.TypeOf((*GenerateClientCsrRequestType)(nil)).Elem()\n}\n\ntype GenerateClientCsrResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype GenerateConfigTaskList GenerateConfigTaskListRequestType\n\nfunc init() {\n\tt[\"GenerateConfigTaskList\"] = reflect.TypeOf((*GenerateConfigTaskList)(nil)).Elem()\n}\n\ntype GenerateConfigTaskListRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tConfigSpec HostConfigSpec         `xml:\"configSpec\"`\n\tHost       ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"GenerateConfigTaskListRequestType\"] = reflect.TypeOf((*GenerateConfigTaskListRequestType)(nil)).Elem()\n}\n\ntype GenerateConfigTaskListResponse struct {\n\tReturnval HostProfileManagerConfigTaskList `xml:\"returnval\"`\n}\n\ntype GenerateHostConfigTaskSpecRequestType struct {\n\tThis      ManagedObjectReference     `xml:\"_this\"`\n\tHostsInfo []StructuredCustomizations `xml:\"hostsInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GenerateHostConfigTaskSpecRequestType\"] = reflect.TypeOf((*GenerateHostConfigTaskSpecRequestType)(nil)).Elem()\n}\n\ntype GenerateHostConfigTaskSpec_Task GenerateHostConfigTaskSpecRequestType\n\nfunc init() {\n\tt[\"GenerateHostConfigTaskSpec_Task\"] = reflect.TypeOf((*GenerateHostConfigTaskSpec_Task)(nil)).Elem()\n}\n\ntype GenerateHostConfigTaskSpec_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype GenerateHostProfileTaskListRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tConfigSpec HostConfigSpec         `xml:\"configSpec\"`\n\tHost       ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"GenerateHostProfileTaskListRequestType\"] = reflect.TypeOf((*GenerateHostProfileTaskListRequestType)(nil)).Elem()\n}\n\ntype GenerateHostProfileTaskList_Task GenerateHostProfileTaskListRequestType\n\nfunc init() {\n\tt[\"GenerateHostProfileTaskList_Task\"] = reflect.TypeOf((*GenerateHostProfileTaskList_Task)(nil)).Elem()\n}\n\ntype GenerateHostProfileTaskList_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype GenerateKey GenerateKeyRequestType\n\nfunc init() {\n\tt[\"GenerateKey\"] = reflect.TypeOf((*GenerateKey)(nil)).Elem()\n}\n\ntype GenerateKeyRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tKeyProvider *KeyProviderId         `xml:\"keyProvider,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GenerateKeyRequestType\"] = reflect.TypeOf((*GenerateKeyRequestType)(nil)).Elem()\n}\n\ntype GenerateKeyResponse struct {\n\tReturnval CryptoKeyResult `xml:\"returnval\"`\n}\n\ntype GenerateLogBundlesRequestType struct {\n\tThis           ManagedObjectReference   `xml:\"_this\"`\n\tIncludeDefault bool                     `xml:\"includeDefault\"`\n\tHost           []ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GenerateLogBundlesRequestType\"] = reflect.TypeOf((*GenerateLogBundlesRequestType)(nil)).Elem()\n}\n\ntype GenerateLogBundles_Task GenerateLogBundlesRequestType\n\nfunc init() {\n\tt[\"GenerateLogBundles_Task\"] = reflect.TypeOf((*GenerateLogBundles_Task)(nil)).Elem()\n}\n\ntype GenerateLogBundles_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype GenerateSelfSignedClientCert GenerateSelfSignedClientCertRequestType\n\nfunc init() {\n\tt[\"GenerateSelfSignedClientCert\"] = reflect.TypeOf((*GenerateSelfSignedClientCert)(nil)).Elem()\n}\n\ntype GenerateSelfSignedClientCertRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tCluster KeyProviderId          `xml:\"cluster\"`\n}\n\nfunc init() {\n\tt[\"GenerateSelfSignedClientCertRequestType\"] = reflect.TypeOf((*GenerateSelfSignedClientCertRequestType)(nil)).Elem()\n}\n\ntype GenerateSelfSignedClientCertResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype GenericDrsFault struct {\n\tVimFault\n\n\tHostFaults []LocalizedMethodFault `xml:\"hostFaults,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GenericDrsFault\"] = reflect.TypeOf((*GenericDrsFault)(nil)).Elem()\n}\n\ntype GenericDrsFaultFault GenericDrsFault\n\nfunc init() {\n\tt[\"GenericDrsFaultFault\"] = reflect.TypeOf((*GenericDrsFaultFault)(nil)).Elem()\n}\n\ntype GenericVmConfigFault struct {\n\tVmConfigFault\n\n\tReason string `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"GenericVmConfigFault\"] = reflect.TypeOf((*GenericVmConfigFault)(nil)).Elem()\n}\n\ntype GenericVmConfigFaultFault GenericVmConfigFault\n\nfunc init() {\n\tt[\"GenericVmConfigFaultFault\"] = reflect.TypeOf((*GenericVmConfigFaultFault)(nil)).Elem()\n}\n\ntype GetAlarm GetAlarmRequestType\n\nfunc init() {\n\tt[\"GetAlarm\"] = reflect.TypeOf((*GetAlarm)(nil)).Elem()\n}\n\ntype GetAlarmRequestType struct {\n\tThis   ManagedObjectReference  `xml:\"_this\"`\n\tEntity *ManagedObjectReference `xml:\"entity,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GetAlarmRequestType\"] = reflect.TypeOf((*GetAlarmRequestType)(nil)).Elem()\n}\n\ntype GetAlarmResponse struct {\n\tReturnval []ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype GetAlarmState GetAlarmStateRequestType\n\nfunc init() {\n\tt[\"GetAlarmState\"] = reflect.TypeOf((*GetAlarmState)(nil)).Elem()\n}\n\ntype GetAlarmStateRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tEntity ManagedObjectReference `xml:\"entity\"`\n}\n\nfunc init() {\n\tt[\"GetAlarmStateRequestType\"] = reflect.TypeOf((*GetAlarmStateRequestType)(nil)).Elem()\n}\n\ntype GetAlarmStateResponse struct {\n\tReturnval []AlarmState `xml:\"returnval,omitempty\"`\n}\n\ntype GetCustomizationSpec GetCustomizationSpecRequestType\n\nfunc init() {\n\tt[\"GetCustomizationSpec\"] = reflect.TypeOf((*GetCustomizationSpec)(nil)).Elem()\n}\n\ntype GetCustomizationSpecRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tName string                 `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"GetCustomizationSpecRequestType\"] = reflect.TypeOf((*GetCustomizationSpecRequestType)(nil)).Elem()\n}\n\ntype GetCustomizationSpecResponse struct {\n\tReturnval CustomizationSpecItem `xml:\"returnval\"`\n}\n\ntype GetPublicKey GetPublicKeyRequestType\n\nfunc init() {\n\tt[\"GetPublicKey\"] = reflect.TypeOf((*GetPublicKey)(nil)).Elem()\n}\n\ntype GetPublicKeyRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"GetPublicKeyRequestType\"] = reflect.TypeOf((*GetPublicKeyRequestType)(nil)).Elem()\n}\n\ntype GetPublicKeyResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype GetResourceUsage GetResourceUsageRequestType\n\nfunc init() {\n\tt[\"GetResourceUsage\"] = reflect.TypeOf((*GetResourceUsage)(nil)).Elem()\n}\n\ntype GetResourceUsageRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"GetResourceUsageRequestType\"] = reflect.TypeOf((*GetResourceUsageRequestType)(nil)).Elem()\n}\n\ntype GetResourceUsageResponse struct {\n\tReturnval ClusterResourceUsageSummary `xml:\"returnval\"`\n}\n\ntype GetVchaClusterHealth GetVchaClusterHealthRequestType\n\nfunc init() {\n\tt[\"GetVchaClusterHealth\"] = reflect.TypeOf((*GetVchaClusterHealth)(nil)).Elem()\n}\n\ntype GetVchaClusterHealthRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"GetVchaClusterHealthRequestType\"] = reflect.TypeOf((*GetVchaClusterHealthRequestType)(nil)).Elem()\n}\n\ntype GetVchaClusterHealthResponse struct {\n\tReturnval VchaClusterHealth `xml:\"returnval\"`\n}\n\ntype GetVsanObjExtAttrs GetVsanObjExtAttrsRequestType\n\nfunc init() {\n\tt[\"GetVsanObjExtAttrs\"] = reflect.TypeOf((*GetVsanObjExtAttrs)(nil)).Elem()\n}\n\ntype GetVsanObjExtAttrsRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tUuids []string               `xml:\"uuids\"`\n}\n\nfunc init() {\n\tt[\"GetVsanObjExtAttrsRequestType\"] = reflect.TypeOf((*GetVsanObjExtAttrsRequestType)(nil)).Elem()\n}\n\ntype GetVsanObjExtAttrsResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype GhostDvsProxySwitchDetectedEvent struct {\n\tHostEvent\n\n\tSwitchUuid []string `xml:\"switchUuid\"`\n}\n\nfunc init() {\n\tt[\"GhostDvsProxySwitchDetectedEvent\"] = reflect.TypeOf((*GhostDvsProxySwitchDetectedEvent)(nil)).Elem()\n}\n\ntype GhostDvsProxySwitchRemovedEvent struct {\n\tHostEvent\n\n\tSwitchUuid []string `xml:\"switchUuid\"`\n}\n\nfunc init() {\n\tt[\"GhostDvsProxySwitchRemovedEvent\"] = reflect.TypeOf((*GhostDvsProxySwitchRemovedEvent)(nil)).Elem()\n}\n\ntype GlobalMessageChangedEvent struct {\n\tSessionEvent\n\n\tMessage     string `xml:\"message\"`\n\tPrevMessage string `xml:\"prevMessage,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GlobalMessageChangedEvent\"] = reflect.TypeOf((*GlobalMessageChangedEvent)(nil)).Elem()\n}\n\ntype GroupAlarmAction struct {\n\tAlarmAction\n\n\tAction []BaseAlarmAction `xml:\"action,typeattr\"`\n}\n\nfunc init() {\n\tt[\"GroupAlarmAction\"] = reflect.TypeOf((*GroupAlarmAction)(nil)).Elem()\n}\n\ntype GuestAliases struct {\n\tDynamicData\n\n\tBase64Cert string               `xml:\"base64Cert\"`\n\tAliases    []GuestAuthAliasInfo `xml:\"aliases\"`\n}\n\nfunc init() {\n\tt[\"GuestAliases\"] = reflect.TypeOf((*GuestAliases)(nil)).Elem()\n}\n\ntype GuestAuthAliasInfo struct {\n\tDynamicData\n\n\tSubject BaseGuestAuthSubject `xml:\"subject,typeattr\"`\n\tComment string               `xml:\"comment\"`\n}\n\nfunc init() {\n\tt[\"GuestAuthAliasInfo\"] = reflect.TypeOf((*GuestAuthAliasInfo)(nil)).Elem()\n}\n\ntype GuestAuthAnySubject struct {\n\tGuestAuthSubject\n}\n\nfunc init() {\n\tt[\"GuestAuthAnySubject\"] = reflect.TypeOf((*GuestAuthAnySubject)(nil)).Elem()\n}\n\ntype GuestAuthNamedSubject struct {\n\tGuestAuthSubject\n\n\tName string `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"GuestAuthNamedSubject\"] = reflect.TypeOf((*GuestAuthNamedSubject)(nil)).Elem()\n}\n\ntype GuestAuthSubject struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"GuestAuthSubject\"] = reflect.TypeOf((*GuestAuthSubject)(nil)).Elem()\n}\n\ntype GuestAuthentication struct {\n\tDynamicData\n\n\tInteractiveSession bool `xml:\"interactiveSession\"`\n}\n\nfunc init() {\n\tt[\"GuestAuthentication\"] = reflect.TypeOf((*GuestAuthentication)(nil)).Elem()\n}\n\ntype GuestAuthenticationChallenge struct {\n\tGuestOperationsFault\n\n\tServerChallenge BaseGuestAuthentication `xml:\"serverChallenge,typeattr\"`\n\tSessionID       int64                   `xml:\"sessionID\"`\n}\n\nfunc init() {\n\tt[\"GuestAuthenticationChallenge\"] = reflect.TypeOf((*GuestAuthenticationChallenge)(nil)).Elem()\n}\n\ntype GuestAuthenticationChallengeFault GuestAuthenticationChallenge\n\nfunc init() {\n\tt[\"GuestAuthenticationChallengeFault\"] = reflect.TypeOf((*GuestAuthenticationChallengeFault)(nil)).Elem()\n}\n\ntype GuestComponentsOutOfDate struct {\n\tGuestOperationsFault\n}\n\nfunc init() {\n\tt[\"GuestComponentsOutOfDate\"] = reflect.TypeOf((*GuestComponentsOutOfDate)(nil)).Elem()\n}\n\ntype GuestComponentsOutOfDateFault GuestComponentsOutOfDate\n\nfunc init() {\n\tt[\"GuestComponentsOutOfDateFault\"] = reflect.TypeOf((*GuestComponentsOutOfDateFault)(nil)).Elem()\n}\n\ntype GuestDiskInfo struct {\n\tDynamicData\n\n\tDiskPath  string `xml:\"diskPath,omitempty\"`\n\tCapacity  int64  `xml:\"capacity,omitempty\"`\n\tFreeSpace int64  `xml:\"freeSpace,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GuestDiskInfo\"] = reflect.TypeOf((*GuestDiskInfo)(nil)).Elem()\n}\n\ntype GuestFileAttributes struct {\n\tDynamicData\n\n\tModificationTime *time.Time `xml:\"modificationTime\"`\n\tAccessTime       *time.Time `xml:\"accessTime\"`\n\tSymlinkTarget    string     `xml:\"symlinkTarget,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GuestFileAttributes\"] = reflect.TypeOf((*GuestFileAttributes)(nil)).Elem()\n}\n\ntype GuestFileInfo struct {\n\tDynamicData\n\n\tPath       string                  `xml:\"path\"`\n\tType       string                  `xml:\"type\"`\n\tSize       int64                   `xml:\"size\"`\n\tAttributes BaseGuestFileAttributes `xml:\"attributes,typeattr\"`\n}\n\nfunc init() {\n\tt[\"GuestFileInfo\"] = reflect.TypeOf((*GuestFileInfo)(nil)).Elem()\n}\n\ntype GuestInfo struct {\n\tDynamicData\n\n\tToolsStatus                     VirtualMachineToolsStatus          `xml:\"toolsStatus,omitempty\"`\n\tToolsVersionStatus              string                             `xml:\"toolsVersionStatus,omitempty\"`\n\tToolsVersionStatus2             string                             `xml:\"toolsVersionStatus2,omitempty\"`\n\tToolsRunningStatus              string                             `xml:\"toolsRunningStatus,omitempty\"`\n\tToolsVersion                    string                             `xml:\"toolsVersion,omitempty\"`\n\tToolsInstallType                string                             `xml:\"toolsInstallType,omitempty\"`\n\tGuestId                         string                             `xml:\"guestId,omitempty\"`\n\tGuestFamily                     string                             `xml:\"guestFamily,omitempty\"`\n\tGuestFullName                   string                             `xml:\"guestFullName,omitempty\"`\n\tHostName                        string                             `xml:\"hostName,omitempty\"`\n\tIpAddress                       string                             `xml:\"ipAddress,omitempty\"`\n\tNet                             []GuestNicInfo                     `xml:\"net,omitempty\"`\n\tIpStack                         []GuestStackInfo                   `xml:\"ipStack,omitempty\"`\n\tDisk                            []GuestDiskInfo                    `xml:\"disk,omitempty\"`\n\tScreen                          *GuestScreenInfo                   `xml:\"screen,omitempty\"`\n\tGuestState                      string                             `xml:\"guestState\"`\n\tAppHeartbeatStatus              string                             `xml:\"appHeartbeatStatus,omitempty\"`\n\tGuestKernelCrashed              *bool                              `xml:\"guestKernelCrashed\"`\n\tAppState                        string                             `xml:\"appState,omitempty\"`\n\tGuestOperationsReady            *bool                              `xml:\"guestOperationsReady\"`\n\tInteractiveGuestOperationsReady *bool                              `xml:\"interactiveGuestOperationsReady\"`\n\tGuestStateChangeSupported       *bool                              `xml:\"guestStateChangeSupported\"`\n\tGenerationInfo                  []GuestInfoNamespaceGenerationInfo `xml:\"generationInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GuestInfo\"] = reflect.TypeOf((*GuestInfo)(nil)).Elem()\n}\n\ntype GuestInfoNamespaceGenerationInfo struct {\n\tDynamicData\n\n\tKey          string `xml:\"key\"`\n\tGenerationNo int32  `xml:\"generationNo\"`\n}\n\nfunc init() {\n\tt[\"GuestInfoNamespaceGenerationInfo\"] = reflect.TypeOf((*GuestInfoNamespaceGenerationInfo)(nil)).Elem()\n}\n\ntype GuestListFileInfo struct {\n\tDynamicData\n\n\tFiles     []GuestFileInfo `xml:\"files,omitempty\"`\n\tRemaining int32           `xml:\"remaining\"`\n}\n\nfunc init() {\n\tt[\"GuestListFileInfo\"] = reflect.TypeOf((*GuestListFileInfo)(nil)).Elem()\n}\n\ntype GuestMappedAliases struct {\n\tDynamicData\n\n\tBase64Cert string                 `xml:\"base64Cert\"`\n\tUsername   string                 `xml:\"username\"`\n\tSubjects   []BaseGuestAuthSubject `xml:\"subjects,typeattr\"`\n}\n\nfunc init() {\n\tt[\"GuestMappedAliases\"] = reflect.TypeOf((*GuestMappedAliases)(nil)).Elem()\n}\n\ntype GuestMultipleMappings struct {\n\tGuestOperationsFault\n}\n\nfunc init() {\n\tt[\"GuestMultipleMappings\"] = reflect.TypeOf((*GuestMultipleMappings)(nil)).Elem()\n}\n\ntype GuestMultipleMappingsFault GuestMultipleMappings\n\nfunc init() {\n\tt[\"GuestMultipleMappingsFault\"] = reflect.TypeOf((*GuestMultipleMappingsFault)(nil)).Elem()\n}\n\ntype GuestNicInfo struct {\n\tDynamicData\n\n\tNetwork        string                `xml:\"network,omitempty\"`\n\tIpAddress      []string              `xml:\"ipAddress,omitempty\"`\n\tMacAddress     string                `xml:\"macAddress,omitempty\"`\n\tConnected      bool                  `xml:\"connected\"`\n\tDeviceConfigId int32                 `xml:\"deviceConfigId\"`\n\tDnsConfig      *NetDnsConfigInfo     `xml:\"dnsConfig,omitempty\"`\n\tIpConfig       *NetIpConfigInfo      `xml:\"ipConfig,omitempty\"`\n\tNetBIOSConfig  BaseNetBIOSConfigInfo `xml:\"netBIOSConfig,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"GuestNicInfo\"] = reflect.TypeOf((*GuestNicInfo)(nil)).Elem()\n}\n\ntype GuestOperationsFault struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"GuestOperationsFault\"] = reflect.TypeOf((*GuestOperationsFault)(nil)).Elem()\n}\n\ntype GuestOperationsFaultFault BaseGuestOperationsFault\n\nfunc init() {\n\tt[\"GuestOperationsFaultFault\"] = reflect.TypeOf((*GuestOperationsFaultFault)(nil)).Elem()\n}\n\ntype GuestOperationsUnavailable struct {\n\tGuestOperationsFault\n}\n\nfunc init() {\n\tt[\"GuestOperationsUnavailable\"] = reflect.TypeOf((*GuestOperationsUnavailable)(nil)).Elem()\n}\n\ntype GuestOperationsUnavailableFault GuestOperationsUnavailable\n\nfunc init() {\n\tt[\"GuestOperationsUnavailableFault\"] = reflect.TypeOf((*GuestOperationsUnavailableFault)(nil)).Elem()\n}\n\ntype GuestOsDescriptor struct {\n\tDynamicData\n\n\tId                              string          `xml:\"id\"`\n\tFamily                          string          `xml:\"family\"`\n\tFullName                        string          `xml:\"fullName\"`\n\tSupportedMaxCPUs                int32           `xml:\"supportedMaxCPUs\"`\n\tNumSupportedPhysicalSockets     int32           `xml:\"numSupportedPhysicalSockets,omitempty\"`\n\tNumSupportedCoresPerSocket      int32           `xml:\"numSupportedCoresPerSocket,omitempty\"`\n\tSupportedMinMemMB               int32           `xml:\"supportedMinMemMB\"`\n\tSupportedMaxMemMB               int32           `xml:\"supportedMaxMemMB\"`\n\tRecommendedMemMB                int32           `xml:\"recommendedMemMB\"`\n\tRecommendedColorDepth           int32           `xml:\"recommendedColorDepth\"`\n\tSupportedDiskControllerList     []string        `xml:\"supportedDiskControllerList\"`\n\tRecommendedSCSIController       string          `xml:\"recommendedSCSIController,omitempty\"`\n\tRecommendedDiskController       string          `xml:\"recommendedDiskController\"`\n\tSupportedNumDisks               int32           `xml:\"supportedNumDisks\"`\n\tRecommendedDiskSizeMB           int32           `xml:\"recommendedDiskSizeMB\"`\n\tRecommendedCdromController      string          `xml:\"recommendedCdromController,omitempty\"`\n\tSupportedEthernetCard           []string        `xml:\"supportedEthernetCard\"`\n\tRecommendedEthernetCard         string          `xml:\"recommendedEthernetCard,omitempty\"`\n\tSupportsSlaveDisk               *bool           `xml:\"supportsSlaveDisk\"`\n\tCpuFeatureMask                  []HostCpuIdInfo `xml:\"cpuFeatureMask,omitempty\"`\n\tSmcRequired                     *bool           `xml:\"smcRequired\"`\n\tSupportsWakeOnLan               bool            `xml:\"supportsWakeOnLan\"`\n\tSupportsVMI                     *bool           `xml:\"supportsVMI\"`\n\tSupportsMemoryHotAdd            *bool           `xml:\"supportsMemoryHotAdd\"`\n\tSupportsCpuHotAdd               *bool           `xml:\"supportsCpuHotAdd\"`\n\tSupportsCpuHotRemove            *bool           `xml:\"supportsCpuHotRemove\"`\n\tSupportedFirmware               []string        `xml:\"supportedFirmware,omitempty\"`\n\tRecommendedFirmware             string          `xml:\"recommendedFirmware,omitempty\"`\n\tSupportedUSBControllerList      []string        `xml:\"supportedUSBControllerList,omitempty\"`\n\tRecommendedUSBController        string          `xml:\"recommendedUSBController,omitempty\"`\n\tSupports3D                      *bool           `xml:\"supports3D\"`\n\tRecommended3D                   *bool           `xml:\"recommended3D\"`\n\tSmcRecommended                  *bool           `xml:\"smcRecommended\"`\n\tIch7mRecommended                *bool           `xml:\"ich7mRecommended\"`\n\tUsbRecommended                  *bool           `xml:\"usbRecommended\"`\n\tSupportLevel                    string          `xml:\"supportLevel,omitempty\"`\n\tSupportedForCreate              *bool           `xml:\"supportedForCreate\"`\n\tVRAMSizeInKB                    *IntOption      `xml:\"vRAMSizeInKB,omitempty\"`\n\tNumSupportedFloppyDevices       int32           `xml:\"numSupportedFloppyDevices,omitempty\"`\n\tWakeOnLanEthernetCard           []string        `xml:\"wakeOnLanEthernetCard,omitempty\"`\n\tSupportsPvscsiControllerForBoot *bool           `xml:\"supportsPvscsiControllerForBoot\"`\n\tDiskUuidEnabled                 *bool           `xml:\"diskUuidEnabled\"`\n\tSupportsHotPlugPCI              *bool           `xml:\"supportsHotPlugPCI\"`\n\tSupportsSecureBoot              *bool           `xml:\"supportsSecureBoot\"`\n\tDefaultSecureBoot               *bool           `xml:\"defaultSecureBoot\"`\n}\n\nfunc init() {\n\tt[\"GuestOsDescriptor\"] = reflect.TypeOf((*GuestOsDescriptor)(nil)).Elem()\n}\n\ntype GuestPermissionDenied struct {\n\tGuestOperationsFault\n}\n\nfunc init() {\n\tt[\"GuestPermissionDenied\"] = reflect.TypeOf((*GuestPermissionDenied)(nil)).Elem()\n}\n\ntype GuestPermissionDeniedFault GuestPermissionDenied\n\nfunc init() {\n\tt[\"GuestPermissionDeniedFault\"] = reflect.TypeOf((*GuestPermissionDeniedFault)(nil)).Elem()\n}\n\ntype GuestPosixFileAttributes struct {\n\tGuestFileAttributes\n\n\tOwnerId     *int32 `xml:\"ownerId\"`\n\tGroupId     *int32 `xml:\"groupId\"`\n\tPermissions int64  `xml:\"permissions,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GuestPosixFileAttributes\"] = reflect.TypeOf((*GuestPosixFileAttributes)(nil)).Elem()\n}\n\ntype GuestProcessInfo struct {\n\tDynamicData\n\n\tName      string     `xml:\"name\"`\n\tPid       int64      `xml:\"pid\"`\n\tOwner     string     `xml:\"owner\"`\n\tCmdLine   string     `xml:\"cmdLine\"`\n\tStartTime time.Time  `xml:\"startTime\"`\n\tEndTime   *time.Time `xml:\"endTime\"`\n\tExitCode  int32      `xml:\"exitCode,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GuestProcessInfo\"] = reflect.TypeOf((*GuestProcessInfo)(nil)).Elem()\n}\n\ntype GuestProcessNotFound struct {\n\tGuestOperationsFault\n\n\tPid int64 `xml:\"pid\"`\n}\n\nfunc init() {\n\tt[\"GuestProcessNotFound\"] = reflect.TypeOf((*GuestProcessNotFound)(nil)).Elem()\n}\n\ntype GuestProcessNotFoundFault GuestProcessNotFound\n\nfunc init() {\n\tt[\"GuestProcessNotFoundFault\"] = reflect.TypeOf((*GuestProcessNotFoundFault)(nil)).Elem()\n}\n\ntype GuestProgramSpec struct {\n\tDynamicData\n\n\tProgramPath      string   `xml:\"programPath\"`\n\tArguments        string   `xml:\"arguments\"`\n\tWorkingDirectory string   `xml:\"workingDirectory,omitempty\"`\n\tEnvVariables     []string `xml:\"envVariables,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GuestProgramSpec\"] = reflect.TypeOf((*GuestProgramSpec)(nil)).Elem()\n}\n\ntype GuestRegKeyNameSpec struct {\n\tDynamicData\n\n\tRegistryPath string `xml:\"registryPath\"`\n\tWowBitness   string `xml:\"wowBitness\"`\n}\n\nfunc init() {\n\tt[\"GuestRegKeyNameSpec\"] = reflect.TypeOf((*GuestRegKeyNameSpec)(nil)).Elem()\n}\n\ntype GuestRegKeyRecordSpec struct {\n\tDynamicData\n\n\tKey   GuestRegKeySpec       `xml:\"key\"`\n\tFault *LocalizedMethodFault `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GuestRegKeyRecordSpec\"] = reflect.TypeOf((*GuestRegKeyRecordSpec)(nil)).Elem()\n}\n\ntype GuestRegKeySpec struct {\n\tDynamicData\n\n\tKeyName     GuestRegKeyNameSpec `xml:\"keyName\"`\n\tClassType   string              `xml:\"classType\"`\n\tLastWritten time.Time           `xml:\"lastWritten\"`\n}\n\nfunc init() {\n\tt[\"GuestRegKeySpec\"] = reflect.TypeOf((*GuestRegKeySpec)(nil)).Elem()\n}\n\ntype GuestRegValueBinarySpec struct {\n\tGuestRegValueDataSpec\n\n\tValue []byte `xml:\"value,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GuestRegValueBinarySpec\"] = reflect.TypeOf((*GuestRegValueBinarySpec)(nil)).Elem()\n}\n\ntype GuestRegValueDataSpec struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"GuestRegValueDataSpec\"] = reflect.TypeOf((*GuestRegValueDataSpec)(nil)).Elem()\n}\n\ntype GuestRegValueDwordSpec struct {\n\tGuestRegValueDataSpec\n\n\tValue int32 `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"GuestRegValueDwordSpec\"] = reflect.TypeOf((*GuestRegValueDwordSpec)(nil)).Elem()\n}\n\ntype GuestRegValueExpandStringSpec struct {\n\tGuestRegValueDataSpec\n\n\tValue string `xml:\"value,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GuestRegValueExpandStringSpec\"] = reflect.TypeOf((*GuestRegValueExpandStringSpec)(nil)).Elem()\n}\n\ntype GuestRegValueMultiStringSpec struct {\n\tGuestRegValueDataSpec\n\n\tValue []string `xml:\"value,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GuestRegValueMultiStringSpec\"] = reflect.TypeOf((*GuestRegValueMultiStringSpec)(nil)).Elem()\n}\n\ntype GuestRegValueNameSpec struct {\n\tDynamicData\n\n\tKeyName GuestRegKeyNameSpec `xml:\"keyName\"`\n\tName    string              `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"GuestRegValueNameSpec\"] = reflect.TypeOf((*GuestRegValueNameSpec)(nil)).Elem()\n}\n\ntype GuestRegValueQwordSpec struct {\n\tGuestRegValueDataSpec\n\n\tValue int64 `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"GuestRegValueQwordSpec\"] = reflect.TypeOf((*GuestRegValueQwordSpec)(nil)).Elem()\n}\n\ntype GuestRegValueSpec struct {\n\tDynamicData\n\n\tName GuestRegValueNameSpec     `xml:\"name\"`\n\tData BaseGuestRegValueDataSpec `xml:\"data,typeattr\"`\n}\n\nfunc init() {\n\tt[\"GuestRegValueSpec\"] = reflect.TypeOf((*GuestRegValueSpec)(nil)).Elem()\n}\n\ntype GuestRegValueStringSpec struct {\n\tGuestRegValueDataSpec\n\n\tValue string `xml:\"value,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GuestRegValueStringSpec\"] = reflect.TypeOf((*GuestRegValueStringSpec)(nil)).Elem()\n}\n\ntype GuestRegistryFault struct {\n\tGuestOperationsFault\n\n\tWindowsSystemErrorCode int64 `xml:\"windowsSystemErrorCode\"`\n}\n\nfunc init() {\n\tt[\"GuestRegistryFault\"] = reflect.TypeOf((*GuestRegistryFault)(nil)).Elem()\n}\n\ntype GuestRegistryFaultFault BaseGuestRegistryFault\n\nfunc init() {\n\tt[\"GuestRegistryFaultFault\"] = reflect.TypeOf((*GuestRegistryFaultFault)(nil)).Elem()\n}\n\ntype GuestRegistryKeyAlreadyExists struct {\n\tGuestRegistryKeyFault\n}\n\nfunc init() {\n\tt[\"GuestRegistryKeyAlreadyExists\"] = reflect.TypeOf((*GuestRegistryKeyAlreadyExists)(nil)).Elem()\n}\n\ntype GuestRegistryKeyAlreadyExistsFault GuestRegistryKeyAlreadyExists\n\nfunc init() {\n\tt[\"GuestRegistryKeyAlreadyExistsFault\"] = reflect.TypeOf((*GuestRegistryKeyAlreadyExistsFault)(nil)).Elem()\n}\n\ntype GuestRegistryKeyFault struct {\n\tGuestRegistryFault\n\n\tKeyName string `xml:\"keyName\"`\n}\n\nfunc init() {\n\tt[\"GuestRegistryKeyFault\"] = reflect.TypeOf((*GuestRegistryKeyFault)(nil)).Elem()\n}\n\ntype GuestRegistryKeyFaultFault BaseGuestRegistryKeyFault\n\nfunc init() {\n\tt[\"GuestRegistryKeyFaultFault\"] = reflect.TypeOf((*GuestRegistryKeyFaultFault)(nil)).Elem()\n}\n\ntype GuestRegistryKeyHasSubkeys struct {\n\tGuestRegistryKeyFault\n}\n\nfunc init() {\n\tt[\"GuestRegistryKeyHasSubkeys\"] = reflect.TypeOf((*GuestRegistryKeyHasSubkeys)(nil)).Elem()\n}\n\ntype GuestRegistryKeyHasSubkeysFault GuestRegistryKeyHasSubkeys\n\nfunc init() {\n\tt[\"GuestRegistryKeyHasSubkeysFault\"] = reflect.TypeOf((*GuestRegistryKeyHasSubkeysFault)(nil)).Elem()\n}\n\ntype GuestRegistryKeyInvalid struct {\n\tGuestRegistryKeyFault\n}\n\nfunc init() {\n\tt[\"GuestRegistryKeyInvalid\"] = reflect.TypeOf((*GuestRegistryKeyInvalid)(nil)).Elem()\n}\n\ntype GuestRegistryKeyInvalidFault GuestRegistryKeyInvalid\n\nfunc init() {\n\tt[\"GuestRegistryKeyInvalidFault\"] = reflect.TypeOf((*GuestRegistryKeyInvalidFault)(nil)).Elem()\n}\n\ntype GuestRegistryKeyParentVolatile struct {\n\tGuestRegistryKeyFault\n}\n\nfunc init() {\n\tt[\"GuestRegistryKeyParentVolatile\"] = reflect.TypeOf((*GuestRegistryKeyParentVolatile)(nil)).Elem()\n}\n\ntype GuestRegistryKeyParentVolatileFault GuestRegistryKeyParentVolatile\n\nfunc init() {\n\tt[\"GuestRegistryKeyParentVolatileFault\"] = reflect.TypeOf((*GuestRegistryKeyParentVolatileFault)(nil)).Elem()\n}\n\ntype GuestRegistryValueFault struct {\n\tGuestRegistryFault\n\n\tKeyName   string `xml:\"keyName\"`\n\tValueName string `xml:\"valueName\"`\n}\n\nfunc init() {\n\tt[\"GuestRegistryValueFault\"] = reflect.TypeOf((*GuestRegistryValueFault)(nil)).Elem()\n}\n\ntype GuestRegistryValueFaultFault BaseGuestRegistryValueFault\n\nfunc init() {\n\tt[\"GuestRegistryValueFaultFault\"] = reflect.TypeOf((*GuestRegistryValueFaultFault)(nil)).Elem()\n}\n\ntype GuestRegistryValueNotFound struct {\n\tGuestRegistryValueFault\n}\n\nfunc init() {\n\tt[\"GuestRegistryValueNotFound\"] = reflect.TypeOf((*GuestRegistryValueNotFound)(nil)).Elem()\n}\n\ntype GuestRegistryValueNotFoundFault GuestRegistryValueNotFound\n\nfunc init() {\n\tt[\"GuestRegistryValueNotFoundFault\"] = reflect.TypeOf((*GuestRegistryValueNotFoundFault)(nil)).Elem()\n}\n\ntype GuestScreenInfo struct {\n\tDynamicData\n\n\tWidth  int32 `xml:\"width\"`\n\tHeight int32 `xml:\"height\"`\n}\n\nfunc init() {\n\tt[\"GuestScreenInfo\"] = reflect.TypeOf((*GuestScreenInfo)(nil)).Elem()\n}\n\ntype GuestStackInfo struct {\n\tDynamicData\n\n\tDnsConfig     *NetDnsConfigInfo     `xml:\"dnsConfig,omitempty\"`\n\tIpRouteConfig *NetIpRouteConfigInfo `xml:\"ipRouteConfig,omitempty\"`\n\tIpStackConfig []KeyValue            `xml:\"ipStackConfig,omitempty\"`\n\tDhcpConfig    *NetDhcpConfigInfo    `xml:\"dhcpConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"GuestStackInfo\"] = reflect.TypeOf((*GuestStackInfo)(nil)).Elem()\n}\n\ntype GuestWindowsFileAttributes struct {\n\tGuestFileAttributes\n\n\tHidden     *bool      `xml:\"hidden\"`\n\tReadOnly   *bool      `xml:\"readOnly\"`\n\tCreateTime *time.Time `xml:\"createTime\"`\n}\n\nfunc init() {\n\tt[\"GuestWindowsFileAttributes\"] = reflect.TypeOf((*GuestWindowsFileAttributes)(nil)).Elem()\n}\n\ntype GuestWindowsProgramSpec struct {\n\tGuestProgramSpec\n\n\tStartMinimized bool `xml:\"startMinimized\"`\n}\n\nfunc init() {\n\tt[\"GuestWindowsProgramSpec\"] = reflect.TypeOf((*GuestWindowsProgramSpec)(nil)).Elem()\n}\n\ntype HAErrorsAtDest struct {\n\tMigrationFault\n}\n\nfunc init() {\n\tt[\"HAErrorsAtDest\"] = reflect.TypeOf((*HAErrorsAtDest)(nil)).Elem()\n}\n\ntype HAErrorsAtDestFault HAErrorsAtDest\n\nfunc init() {\n\tt[\"HAErrorsAtDestFault\"] = reflect.TypeOf((*HAErrorsAtDestFault)(nil)).Elem()\n}\n\ntype HasMonitoredEntity HasMonitoredEntityRequestType\n\nfunc init() {\n\tt[\"HasMonitoredEntity\"] = reflect.TypeOf((*HasMonitoredEntity)(nil)).Elem()\n}\n\ntype HasMonitoredEntityRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tProviderId string                 `xml:\"providerId\"`\n\tEntity     ManagedObjectReference `xml:\"entity\"`\n}\n\nfunc init() {\n\tt[\"HasMonitoredEntityRequestType\"] = reflect.TypeOf((*HasMonitoredEntityRequestType)(nil)).Elem()\n}\n\ntype HasMonitoredEntityResponse struct {\n\tReturnval bool `xml:\"returnval\"`\n}\n\ntype HasPrivilegeOnEntities HasPrivilegeOnEntitiesRequestType\n\nfunc init() {\n\tt[\"HasPrivilegeOnEntities\"] = reflect.TypeOf((*HasPrivilegeOnEntities)(nil)).Elem()\n}\n\ntype HasPrivilegeOnEntitiesRequestType struct {\n\tThis      ManagedObjectReference   `xml:\"_this\"`\n\tEntity    []ManagedObjectReference `xml:\"entity\"`\n\tSessionId string                   `xml:\"sessionId\"`\n\tPrivId    []string                 `xml:\"privId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HasPrivilegeOnEntitiesRequestType\"] = reflect.TypeOf((*HasPrivilegeOnEntitiesRequestType)(nil)).Elem()\n}\n\ntype HasPrivilegeOnEntitiesResponse struct {\n\tReturnval []EntityPrivilege `xml:\"returnval,omitempty\"`\n}\n\ntype HasPrivilegeOnEntity HasPrivilegeOnEntityRequestType\n\nfunc init() {\n\tt[\"HasPrivilegeOnEntity\"] = reflect.TypeOf((*HasPrivilegeOnEntity)(nil)).Elem()\n}\n\ntype HasPrivilegeOnEntityRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tEntity    ManagedObjectReference `xml:\"entity\"`\n\tSessionId string                 `xml:\"sessionId\"`\n\tPrivId    []string               `xml:\"privId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HasPrivilegeOnEntityRequestType\"] = reflect.TypeOf((*HasPrivilegeOnEntityRequestType)(nil)).Elem()\n}\n\ntype HasPrivilegeOnEntityResponse struct {\n\tReturnval []bool `xml:\"returnval,omitempty\"`\n}\n\ntype HasProvider HasProviderRequestType\n\nfunc init() {\n\tt[\"HasProvider\"] = reflect.TypeOf((*HasProvider)(nil)).Elem()\n}\n\ntype HasProviderRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tId   string                 `xml:\"id\"`\n}\n\nfunc init() {\n\tt[\"HasProviderRequestType\"] = reflect.TypeOf((*HasProviderRequestType)(nil)).Elem()\n}\n\ntype HasProviderResponse struct {\n\tReturnval bool `xml:\"returnval\"`\n}\n\ntype HasUserPrivilegeOnEntities HasUserPrivilegeOnEntitiesRequestType\n\nfunc init() {\n\tt[\"HasUserPrivilegeOnEntities\"] = reflect.TypeOf((*HasUserPrivilegeOnEntities)(nil)).Elem()\n}\n\ntype HasUserPrivilegeOnEntitiesRequestType struct {\n\tThis     ManagedObjectReference   `xml:\"_this\"`\n\tEntities []ManagedObjectReference `xml:\"entities\"`\n\tUserName string                   `xml:\"userName\"`\n\tPrivId   []string                 `xml:\"privId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HasUserPrivilegeOnEntitiesRequestType\"] = reflect.TypeOf((*HasUserPrivilegeOnEntitiesRequestType)(nil)).Elem()\n}\n\ntype HasUserPrivilegeOnEntitiesResponse struct {\n\tReturnval []EntityPrivilege `xml:\"returnval,omitempty\"`\n}\n\ntype HbrDiskMigrationAction struct {\n\tClusterAction\n\n\tCollectionId       string                 `xml:\"collectionId\"`\n\tCollectionName     string                 `xml:\"collectionName\"`\n\tDiskIds            []string               `xml:\"diskIds\"`\n\tSource             ManagedObjectReference `xml:\"source\"`\n\tDestination        ManagedObjectReference `xml:\"destination\"`\n\tSizeTransferred    int64                  `xml:\"sizeTransferred\"`\n\tSpaceUtilSrcBefore float32                `xml:\"spaceUtilSrcBefore,omitempty\"`\n\tSpaceUtilDstBefore float32                `xml:\"spaceUtilDstBefore,omitempty\"`\n\tSpaceUtilSrcAfter  float32                `xml:\"spaceUtilSrcAfter,omitempty\"`\n\tSpaceUtilDstAfter  float32                `xml:\"spaceUtilDstAfter,omitempty\"`\n\tIoLatencySrcBefore float32                `xml:\"ioLatencySrcBefore,omitempty\"`\n\tIoLatencyDstBefore float32                `xml:\"ioLatencyDstBefore,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HbrDiskMigrationAction\"] = reflect.TypeOf((*HbrDiskMigrationAction)(nil)).Elem()\n}\n\ntype HbrManagerReplicationVmInfo struct {\n\tDynamicData\n\n\tState        string                     `xml:\"state\"`\n\tProgressInfo *ReplicationVmProgressInfo `xml:\"progressInfo,omitempty\"`\n\tImageId      string                     `xml:\"imageId,omitempty\"`\n\tLastError    *LocalizedMethodFault      `xml:\"lastError,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HbrManagerReplicationVmInfo\"] = reflect.TypeOf((*HbrManagerReplicationVmInfo)(nil)).Elem()\n}\n\ntype HbrManagerVmReplicationCapability struct {\n\tDynamicData\n\n\tVm                             ManagedObjectReference `xml:\"vm\"`\n\tSupportedQuiesceMode           string                 `xml:\"supportedQuiesceMode\"`\n\tCompressionSupported           bool                   `xml:\"compressionSupported\"`\n\tMaxSupportedSourceDiskCapacity int64                  `xml:\"maxSupportedSourceDiskCapacity\"`\n\tMinRpo                         int64                  `xml:\"minRpo,omitempty\"`\n\tFault                          *LocalizedMethodFault  `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HbrManagerVmReplicationCapability\"] = reflect.TypeOf((*HbrManagerVmReplicationCapability)(nil)).Elem()\n}\n\ntype HealthStatusChangedEvent struct {\n\tEvent\n\n\tComponentId   string `xml:\"componentId\"`\n\tOldStatus     string `xml:\"oldStatus\"`\n\tNewStatus     string `xml:\"newStatus\"`\n\tComponentName string `xml:\"componentName\"`\n\tServiceId     string `xml:\"serviceId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HealthStatusChangedEvent\"] = reflect.TypeOf((*HealthStatusChangedEvent)(nil)).Elem()\n}\n\ntype HealthSystemRuntime struct {\n\tDynamicData\n\n\tSystemHealthInfo   *HostSystemHealthInfo   `xml:\"systemHealthInfo,omitempty\"`\n\tHardwareStatusInfo *HostHardwareStatusInfo `xml:\"hardwareStatusInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HealthSystemRuntime\"] = reflect.TypeOf((*HealthSystemRuntime)(nil)).Elem()\n}\n\ntype HealthUpdate struct {\n\tDynamicData\n\n\tEntity             ManagedObjectReference `xml:\"entity\"`\n\tHealthUpdateInfoId string                 `xml:\"healthUpdateInfoId\"`\n\tId                 string                 `xml:\"id\"`\n\tStatus             ManagedEntityStatus    `xml:\"status\"`\n\tRemediation        string                 `xml:\"remediation\"`\n}\n\nfunc init() {\n\tt[\"HealthUpdate\"] = reflect.TypeOf((*HealthUpdate)(nil)).Elem()\n}\n\ntype HealthUpdateInfo struct {\n\tDynamicData\n\n\tId            string `xml:\"id\"`\n\tComponentType string `xml:\"componentType\"`\n\tDescription   string `xml:\"description\"`\n}\n\nfunc init() {\n\tt[\"HealthUpdateInfo\"] = reflect.TypeOf((*HealthUpdateInfo)(nil)).Elem()\n}\n\ntype HeterogenousHostsBlockingEVC struct {\n\tEVCConfigFault\n}\n\nfunc init() {\n\tt[\"HeterogenousHostsBlockingEVC\"] = reflect.TypeOf((*HeterogenousHostsBlockingEVC)(nil)).Elem()\n}\n\ntype HeterogenousHostsBlockingEVCFault HeterogenousHostsBlockingEVC\n\nfunc init() {\n\tt[\"HeterogenousHostsBlockingEVCFault\"] = reflect.TypeOf((*HeterogenousHostsBlockingEVCFault)(nil)).Elem()\n}\n\ntype HostAccessControlEntry struct {\n\tDynamicData\n\n\tPrincipal  string         `xml:\"principal\"`\n\tGroup      bool           `xml:\"group\"`\n\tAccessMode HostAccessMode `xml:\"accessMode\"`\n}\n\nfunc init() {\n\tt[\"HostAccessControlEntry\"] = reflect.TypeOf((*HostAccessControlEntry)(nil)).Elem()\n}\n\ntype HostAccessRestrictedToManagementServer struct {\n\tNotSupported\n\n\tManagementServer string `xml:\"managementServer\"`\n}\n\nfunc init() {\n\tt[\"HostAccessRestrictedToManagementServer\"] = reflect.TypeOf((*HostAccessRestrictedToManagementServer)(nil)).Elem()\n}\n\ntype HostAccessRestrictedToManagementServerFault HostAccessRestrictedToManagementServer\n\nfunc init() {\n\tt[\"HostAccessRestrictedToManagementServerFault\"] = reflect.TypeOf((*HostAccessRestrictedToManagementServerFault)(nil)).Elem()\n}\n\ntype HostAccountSpec struct {\n\tDynamicData\n\n\tId          string `xml:\"id\"`\n\tPassword    string `xml:\"password,omitempty\"`\n\tDescription string `xml:\"description,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostAccountSpec\"] = reflect.TypeOf((*HostAccountSpec)(nil)).Elem()\n}\n\ntype HostActiveDirectory struct {\n\tDynamicData\n\n\tChangeOperation string                   `xml:\"changeOperation\"`\n\tSpec            *HostActiveDirectorySpec `xml:\"spec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostActiveDirectory\"] = reflect.TypeOf((*HostActiveDirectory)(nil)).Elem()\n}\n\ntype HostActiveDirectoryInfo struct {\n\tHostDirectoryStoreInfo\n\n\tJoinedDomain                   string   `xml:\"joinedDomain,omitempty\"`\n\tTrustedDomain                  []string `xml:\"trustedDomain,omitempty\"`\n\tDomainMembershipStatus         string   `xml:\"domainMembershipStatus,omitempty\"`\n\tSmartCardAuthenticationEnabled *bool    `xml:\"smartCardAuthenticationEnabled\"`\n}\n\nfunc init() {\n\tt[\"HostActiveDirectoryInfo\"] = reflect.TypeOf((*HostActiveDirectoryInfo)(nil)).Elem()\n}\n\ntype HostActiveDirectorySpec struct {\n\tDynamicData\n\n\tDomainName                     string   `xml:\"domainName,omitempty\"`\n\tUserName                       string   `xml:\"userName,omitempty\"`\n\tPassword                       string   `xml:\"password,omitempty\"`\n\tCamServer                      string   `xml:\"camServer,omitempty\"`\n\tThumbprint                     string   `xml:\"thumbprint,omitempty\"`\n\tSmartCardAuthenticationEnabled *bool    `xml:\"smartCardAuthenticationEnabled\"`\n\tSmartCardTrustAnchors          []string `xml:\"smartCardTrustAnchors,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostActiveDirectorySpec\"] = reflect.TypeOf((*HostActiveDirectorySpec)(nil)).Elem()\n}\n\ntype HostAddFailedEvent struct {\n\tHostEvent\n\n\tHostname string `xml:\"hostname\"`\n}\n\nfunc init() {\n\tt[\"HostAddFailedEvent\"] = reflect.TypeOf((*HostAddFailedEvent)(nil)).Elem()\n}\n\ntype HostAddedEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostAddedEvent\"] = reflect.TypeOf((*HostAddedEvent)(nil)).Elem()\n}\n\ntype HostAdminDisableEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostAdminDisableEvent\"] = reflect.TypeOf((*HostAdminDisableEvent)(nil)).Elem()\n}\n\ntype HostAdminEnableEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostAdminEnableEvent\"] = reflect.TypeOf((*HostAdminEnableEvent)(nil)).Elem()\n}\n\ntype HostApplyProfile struct {\n\tApplyProfile\n\n\tMemory           *HostMemoryProfile     `xml:\"memory,omitempty\"`\n\tStorage          *StorageProfile        `xml:\"storage,omitempty\"`\n\tNetwork          *NetworkProfile        `xml:\"network,omitempty\"`\n\tDatetime         *DateTimeProfile       `xml:\"datetime,omitempty\"`\n\tFirewall         *FirewallProfile       `xml:\"firewall,omitempty\"`\n\tSecurity         *SecurityProfile       `xml:\"security,omitempty\"`\n\tService          []ServiceProfile       `xml:\"service,omitempty\"`\n\tOption           []OptionProfile        `xml:\"option,omitempty\"`\n\tUserAccount      []UserProfile          `xml:\"userAccount,omitempty\"`\n\tUsergroupAccount []UserGroupProfile     `xml:\"usergroupAccount,omitempty\"`\n\tAuthentication   *AuthenticationProfile `xml:\"authentication,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostApplyProfile\"] = reflect.TypeOf((*HostApplyProfile)(nil)).Elem()\n}\n\ntype HostAuthenticationManagerInfo struct {\n\tDynamicData\n\n\tAuthConfig []BaseHostAuthenticationStoreInfo `xml:\"authConfig,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostAuthenticationManagerInfo\"] = reflect.TypeOf((*HostAuthenticationManagerInfo)(nil)).Elem()\n}\n\ntype HostAuthenticationStoreInfo struct {\n\tDynamicData\n\n\tEnabled bool `xml:\"enabled\"`\n}\n\nfunc init() {\n\tt[\"HostAuthenticationStoreInfo\"] = reflect.TypeOf((*HostAuthenticationStoreInfo)(nil)).Elem()\n}\n\ntype HostAutoStartManagerConfig struct {\n\tDynamicData\n\n\tDefaults  *AutoStartDefaults   `xml:\"defaults,omitempty\"`\n\tPowerInfo []AutoStartPowerInfo `xml:\"powerInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostAutoStartManagerConfig\"] = reflect.TypeOf((*HostAutoStartManagerConfig)(nil)).Elem()\n}\n\ntype HostBIOSInfo struct {\n\tDynamicData\n\n\tBiosVersion          string     `xml:\"biosVersion,omitempty\"`\n\tReleaseDate          *time.Time `xml:\"releaseDate\"`\n\tVendor               string     `xml:\"vendor,omitempty\"`\n\tMajorRelease         int32      `xml:\"majorRelease,omitempty\"`\n\tMinorRelease         int32      `xml:\"minorRelease,omitempty\"`\n\tFirmwareMajorRelease int32      `xml:\"firmwareMajorRelease,omitempty\"`\n\tFirmwareMinorRelease int32      `xml:\"firmwareMinorRelease,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostBIOSInfo\"] = reflect.TypeOf((*HostBIOSInfo)(nil)).Elem()\n}\n\ntype HostBlockAdapterTargetTransport struct {\n\tHostTargetTransport\n}\n\nfunc init() {\n\tt[\"HostBlockAdapterTargetTransport\"] = reflect.TypeOf((*HostBlockAdapterTargetTransport)(nil)).Elem()\n}\n\ntype HostBlockHba struct {\n\tHostHostBusAdapter\n}\n\nfunc init() {\n\tt[\"HostBlockHba\"] = reflect.TypeOf((*HostBlockHba)(nil)).Elem()\n}\n\ntype HostBootDevice struct {\n\tDynamicData\n\n\tKey         string `xml:\"key\"`\n\tDescription string `xml:\"description\"`\n}\n\nfunc init() {\n\tt[\"HostBootDevice\"] = reflect.TypeOf((*HostBootDevice)(nil)).Elem()\n}\n\ntype HostBootDeviceInfo struct {\n\tDynamicData\n\n\tBootDevices          []HostBootDevice `xml:\"bootDevices,omitempty\"`\n\tCurrentBootDeviceKey string           `xml:\"currentBootDeviceKey,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostBootDeviceInfo\"] = reflect.TypeOf((*HostBootDeviceInfo)(nil)).Elem()\n}\n\ntype HostCacheConfigurationInfo struct {\n\tDynamicData\n\n\tKey      ManagedObjectReference `xml:\"key\"`\n\tSwapSize int64                  `xml:\"swapSize\"`\n}\n\nfunc init() {\n\tt[\"HostCacheConfigurationInfo\"] = reflect.TypeOf((*HostCacheConfigurationInfo)(nil)).Elem()\n}\n\ntype HostCacheConfigurationSpec struct {\n\tDynamicData\n\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n\tSwapSize  int64                  `xml:\"swapSize\"`\n}\n\nfunc init() {\n\tt[\"HostCacheConfigurationSpec\"] = reflect.TypeOf((*HostCacheConfigurationSpec)(nil)).Elem()\n}\n\ntype HostCapability struct {\n\tDynamicData\n\n\tRecursiveResourcePoolsSupported           bool            `xml:\"recursiveResourcePoolsSupported\"`\n\tCpuMemoryResourceConfigurationSupported   bool            `xml:\"cpuMemoryResourceConfigurationSupported\"`\n\tRebootSupported                           bool            `xml:\"rebootSupported\"`\n\tShutdownSupported                         bool            `xml:\"shutdownSupported\"`\n\tVmotionSupported                          bool            `xml:\"vmotionSupported\"`\n\tStandbySupported                          bool            `xml:\"standbySupported\"`\n\tIpmiSupported                             *bool           `xml:\"ipmiSupported\"`\n\tMaxSupportedVMs                           int32           `xml:\"maxSupportedVMs,omitempty\"`\n\tMaxRunningVMs                             int32           `xml:\"maxRunningVMs,omitempty\"`\n\tMaxSupportedVcpus                         int32           `xml:\"maxSupportedVcpus,omitempty\"`\n\tMaxRegisteredVMs                          int32           `xml:\"maxRegisteredVMs,omitempty\"`\n\tDatastorePrincipalSupported               bool            `xml:\"datastorePrincipalSupported\"`\n\tSanSupported                              bool            `xml:\"sanSupported\"`\n\tNfsSupported                              bool            `xml:\"nfsSupported\"`\n\tIscsiSupported                            bool            `xml:\"iscsiSupported\"`\n\tVlanTaggingSupported                      bool            `xml:\"vlanTaggingSupported\"`\n\tNicTeamingSupported                       bool            `xml:\"nicTeamingSupported\"`\n\tHighGuestMemSupported                     bool            `xml:\"highGuestMemSupported\"`\n\tMaintenanceModeSupported                  bool            `xml:\"maintenanceModeSupported\"`\n\tSuspendedRelocateSupported                bool            `xml:\"suspendedRelocateSupported\"`\n\tRestrictedSnapshotRelocateSupported       bool            `xml:\"restrictedSnapshotRelocateSupported\"`\n\tPerVmSwapFiles                            bool            `xml:\"perVmSwapFiles\"`\n\tLocalSwapDatastoreSupported               bool            `xml:\"localSwapDatastoreSupported\"`\n\tUnsharedSwapVMotionSupported              bool            `xml:\"unsharedSwapVMotionSupported\"`\n\tBackgroundSnapshotsSupported              bool            `xml:\"backgroundSnapshotsSupported\"`\n\tPreAssignedPCIUnitNumbersSupported        bool            `xml:\"preAssignedPCIUnitNumbersSupported\"`\n\tScreenshotSupported                       bool            `xml:\"screenshotSupported\"`\n\tScaledScreenshotSupported                 bool            `xml:\"scaledScreenshotSupported\"`\n\tStorageVMotionSupported                   *bool           `xml:\"storageVMotionSupported\"`\n\tVmotionWithStorageVMotionSupported        *bool           `xml:\"vmotionWithStorageVMotionSupported\"`\n\tVmotionAcrossNetworkSupported             *bool           `xml:\"vmotionAcrossNetworkSupported\"`\n\tMaxNumDisksSVMotion                       int32           `xml:\"maxNumDisksSVMotion,omitempty\"`\n\tHbrNicSelectionSupported                  *bool           `xml:\"hbrNicSelectionSupported\"`\n\tVrNfcNicSelectionSupported                *bool           `xml:\"vrNfcNicSelectionSupported\"`\n\tRecordReplaySupported                     *bool           `xml:\"recordReplaySupported\"`\n\tFtSupported                               *bool           `xml:\"ftSupported\"`\n\tReplayUnsupportedReason                   string          `xml:\"replayUnsupportedReason,omitempty\"`\n\tReplayCompatibilityIssues                 []string        `xml:\"replayCompatibilityIssues,omitempty\"`\n\tSmpFtSupported                            *bool           `xml:\"smpFtSupported\"`\n\tFtCompatibilityIssues                     []string        `xml:\"ftCompatibilityIssues,omitempty\"`\n\tSmpFtCompatibilityIssues                  []string        `xml:\"smpFtCompatibilityIssues,omitempty\"`\n\tMaxVcpusPerFtVm                           int32           `xml:\"maxVcpusPerFtVm,omitempty\"`\n\tLoginBySSLThumbprintSupported             *bool           `xml:\"loginBySSLThumbprintSupported\"`\n\tCloneFromSnapshotSupported                *bool           `xml:\"cloneFromSnapshotSupported\"`\n\tDeltaDiskBackingsSupported                *bool           `xml:\"deltaDiskBackingsSupported\"`\n\tPerVMNetworkTrafficShapingSupported       *bool           `xml:\"perVMNetworkTrafficShapingSupported\"`\n\tTpmSupported                              *bool           `xml:\"tpmSupported\"`\n\tSupportedCpuFeature                       []HostCpuIdInfo `xml:\"supportedCpuFeature,omitempty\"`\n\tVirtualExecUsageSupported                 *bool           `xml:\"virtualExecUsageSupported\"`\n\tStorageIORMSupported                      *bool           `xml:\"storageIORMSupported\"`\n\tVmDirectPathGen2Supported                 *bool           `xml:\"vmDirectPathGen2Supported\"`\n\tVmDirectPathGen2UnsupportedReason         []string        `xml:\"vmDirectPathGen2UnsupportedReason,omitempty\"`\n\tVmDirectPathGen2UnsupportedReasonExtended string          `xml:\"vmDirectPathGen2UnsupportedReasonExtended,omitempty\"`\n\tSupportedVmfsMajorVersion                 []int32         `xml:\"supportedVmfsMajorVersion,omitempty\"`\n\tVStorageCapable                           *bool           `xml:\"vStorageCapable\"`\n\tSnapshotRelayoutSupported                 *bool           `xml:\"snapshotRelayoutSupported\"`\n\tFirewallIpRulesSupported                  *bool           `xml:\"firewallIpRulesSupported\"`\n\tServicePackageInfoSupported               *bool           `xml:\"servicePackageInfoSupported\"`\n\tMaxHostRunningVms                         int32           `xml:\"maxHostRunningVms,omitempty\"`\n\tMaxHostSupportedVcpus                     int32           `xml:\"maxHostSupportedVcpus,omitempty\"`\n\tVmfsDatastoreMountCapable                 *bool           `xml:\"vmfsDatastoreMountCapable\"`\n\tEightPlusHostVmfsSharedAccessSupported    *bool           `xml:\"eightPlusHostVmfsSharedAccessSupported\"`\n\tNestedHVSupported                         *bool           `xml:\"nestedHVSupported\"`\n\tVPMCSupported                             *bool           `xml:\"vPMCSupported\"`\n\tInterVMCommunicationThroughVMCISupported  *bool           `xml:\"interVMCommunicationThroughVMCISupported\"`\n\tScheduledHardwareUpgradeSupported         *bool           `xml:\"scheduledHardwareUpgradeSupported\"`\n\tFeatureCapabilitiesSupported              *bool           `xml:\"featureCapabilitiesSupported\"`\n\tLatencySensitivitySupported               *bool           `xml:\"latencySensitivitySupported\"`\n\tStoragePolicySupported                    *bool           `xml:\"storagePolicySupported\"`\n\tAccel3dSupported                          *bool           `xml:\"accel3dSupported\"`\n\tReliableMemoryAware                       *bool           `xml:\"reliableMemoryAware\"`\n\tMultipleNetworkStackInstanceSupported     *bool           `xml:\"multipleNetworkStackInstanceSupported\"`\n\tMessageBusProxySupported                  *bool           `xml:\"messageBusProxySupported\"`\n\tVsanSupported                             *bool           `xml:\"vsanSupported\"`\n\tVFlashSupported                           *bool           `xml:\"vFlashSupported\"`\n\tHostAccessManagerSupported                *bool           `xml:\"hostAccessManagerSupported\"`\n\tProvisioningNicSelectionSupported         *bool           `xml:\"provisioningNicSelectionSupported\"`\n\tNfs41Supported                            *bool           `xml:\"nfs41Supported\"`\n\tNfs41Krb5iSupported                       *bool           `xml:\"nfs41Krb5iSupported\"`\n\tTurnDiskLocatorLedSupported               *bool           `xml:\"turnDiskLocatorLedSupported\"`\n\tVirtualVolumeDatastoreSupported           *bool           `xml:\"virtualVolumeDatastoreSupported\"`\n\tMarkAsSsdSupported                        *bool           `xml:\"markAsSsdSupported\"`\n\tMarkAsLocalSupported                      *bool           `xml:\"markAsLocalSupported\"`\n\tSmartCardAuthenticationSupported          *bool           `xml:\"smartCardAuthenticationSupported\"`\n\tCryptoSupported                           *bool           `xml:\"cryptoSupported\"`\n\tOneKVolumeAPIsSupported                   *bool           `xml:\"oneKVolumeAPIsSupported\"`\n\tGatewayOnNicSupported                     *bool           `xml:\"gatewayOnNicSupported\"`\n\tUpitSupported                             *bool           `xml:\"upitSupported\"`\n\tCpuHwMmuSupported                         *bool           `xml:\"cpuHwMmuSupported\"`\n\tEncryptedVMotionSupported                 *bool           `xml:\"encryptedVMotionSupported\"`\n\tEncryptionChangeOnAddRemoveSupported      *bool           `xml:\"encryptionChangeOnAddRemoveSupported\"`\n\tEncryptionHotOperationSupported           *bool           `xml:\"encryptionHotOperationSupported\"`\n\tEncryptionWithSnapshotsSupported          *bool           `xml:\"encryptionWithSnapshotsSupported\"`\n\tEncryptionFaultToleranceSupported         *bool           `xml:\"encryptionFaultToleranceSupported\"`\n\tEncryptionMemorySaveSupported             *bool           `xml:\"encryptionMemorySaveSupported\"`\n\tEncryptionRDMSupported                    *bool           `xml:\"encryptionRDMSupported\"`\n\tEncryptionVFlashSupported                 *bool           `xml:\"encryptionVFlashSupported\"`\n\tEncryptionCBRCSupported                   *bool           `xml:\"encryptionCBRCSupported\"`\n\tEncryptionHBRSupported                    *bool           `xml:\"encryptionHBRSupported\"`\n}\n\nfunc init() {\n\tt[\"HostCapability\"] = reflect.TypeOf((*HostCapability)(nil)).Elem()\n}\n\ntype HostCertificateManagerCertificateInfo struct {\n\tDynamicData\n\n\tIssuer    string     `xml:\"issuer,omitempty\"`\n\tNotBefore *time.Time `xml:\"notBefore\"`\n\tNotAfter  *time.Time `xml:\"notAfter\"`\n\tSubject   string     `xml:\"subject,omitempty\"`\n\tStatus    string     `xml:\"status\"`\n}\n\nfunc init() {\n\tt[\"HostCertificateManagerCertificateInfo\"] = reflect.TypeOf((*HostCertificateManagerCertificateInfo)(nil)).Elem()\n}\n\ntype HostCloneVStorageObjectRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tId        ID                     `xml:\"id\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n\tSpec      VslmCloneSpec          `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"HostCloneVStorageObjectRequestType\"] = reflect.TypeOf((*HostCloneVStorageObjectRequestType)(nil)).Elem()\n}\n\ntype HostCloneVStorageObject_Task HostCloneVStorageObjectRequestType\n\nfunc init() {\n\tt[\"HostCloneVStorageObject_Task\"] = reflect.TypeOf((*HostCloneVStorageObject_Task)(nil)).Elem()\n}\n\ntype HostCloneVStorageObject_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype HostCnxFailedAccountFailedEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostCnxFailedAccountFailedEvent\"] = reflect.TypeOf((*HostCnxFailedAccountFailedEvent)(nil)).Elem()\n}\n\ntype HostCnxFailedAlreadyManagedEvent struct {\n\tHostEvent\n\n\tServerName string `xml:\"serverName\"`\n}\n\nfunc init() {\n\tt[\"HostCnxFailedAlreadyManagedEvent\"] = reflect.TypeOf((*HostCnxFailedAlreadyManagedEvent)(nil)).Elem()\n}\n\ntype HostCnxFailedBadCcagentEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostCnxFailedBadCcagentEvent\"] = reflect.TypeOf((*HostCnxFailedBadCcagentEvent)(nil)).Elem()\n}\n\ntype HostCnxFailedBadUsernameEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostCnxFailedBadUsernameEvent\"] = reflect.TypeOf((*HostCnxFailedBadUsernameEvent)(nil)).Elem()\n}\n\ntype HostCnxFailedBadVersionEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostCnxFailedBadVersionEvent\"] = reflect.TypeOf((*HostCnxFailedBadVersionEvent)(nil)).Elem()\n}\n\ntype HostCnxFailedCcagentUpgradeEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostCnxFailedCcagentUpgradeEvent\"] = reflect.TypeOf((*HostCnxFailedCcagentUpgradeEvent)(nil)).Elem()\n}\n\ntype HostCnxFailedEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostCnxFailedEvent\"] = reflect.TypeOf((*HostCnxFailedEvent)(nil)).Elem()\n}\n\ntype HostCnxFailedNetworkErrorEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostCnxFailedNetworkErrorEvent\"] = reflect.TypeOf((*HostCnxFailedNetworkErrorEvent)(nil)).Elem()\n}\n\ntype HostCnxFailedNoAccessEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostCnxFailedNoAccessEvent\"] = reflect.TypeOf((*HostCnxFailedNoAccessEvent)(nil)).Elem()\n}\n\ntype HostCnxFailedNoConnectionEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostCnxFailedNoConnectionEvent\"] = reflect.TypeOf((*HostCnxFailedNoConnectionEvent)(nil)).Elem()\n}\n\ntype HostCnxFailedNoLicenseEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostCnxFailedNoLicenseEvent\"] = reflect.TypeOf((*HostCnxFailedNoLicenseEvent)(nil)).Elem()\n}\n\ntype HostCnxFailedNotFoundEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostCnxFailedNotFoundEvent\"] = reflect.TypeOf((*HostCnxFailedNotFoundEvent)(nil)).Elem()\n}\n\ntype HostCnxFailedTimeoutEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostCnxFailedTimeoutEvent\"] = reflect.TypeOf((*HostCnxFailedTimeoutEvent)(nil)).Elem()\n}\n\ntype HostCommunication struct {\n\tRuntimeFault\n}\n\nfunc init() {\n\tt[\"HostCommunication\"] = reflect.TypeOf((*HostCommunication)(nil)).Elem()\n}\n\ntype HostCommunicationFault BaseHostCommunication\n\nfunc init() {\n\tt[\"HostCommunicationFault\"] = reflect.TypeOf((*HostCommunicationFault)(nil)).Elem()\n}\n\ntype HostComplianceCheckedEvent struct {\n\tHostEvent\n\n\tProfile ProfileEventArgument `xml:\"profile\"`\n}\n\nfunc init() {\n\tt[\"HostComplianceCheckedEvent\"] = reflect.TypeOf((*HostComplianceCheckedEvent)(nil)).Elem()\n}\n\ntype HostCompliantEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostCompliantEvent\"] = reflect.TypeOf((*HostCompliantEvent)(nil)).Elem()\n}\n\ntype HostConfigAppliedEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostConfigAppliedEvent\"] = reflect.TypeOf((*HostConfigAppliedEvent)(nil)).Elem()\n}\n\ntype HostConfigChange struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"HostConfigChange\"] = reflect.TypeOf((*HostConfigChange)(nil)).Elem()\n}\n\ntype HostConfigFailed struct {\n\tHostConfigFault\n\n\tFailure []LocalizedMethodFault `xml:\"failure\"`\n}\n\nfunc init() {\n\tt[\"HostConfigFailed\"] = reflect.TypeOf((*HostConfigFailed)(nil)).Elem()\n}\n\ntype HostConfigFailedFault HostConfigFailed\n\nfunc init() {\n\tt[\"HostConfigFailedFault\"] = reflect.TypeOf((*HostConfigFailedFault)(nil)).Elem()\n}\n\ntype HostConfigFault struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"HostConfigFault\"] = reflect.TypeOf((*HostConfigFault)(nil)).Elem()\n}\n\ntype HostConfigFaultFault BaseHostConfigFault\n\nfunc init() {\n\tt[\"HostConfigFaultFault\"] = reflect.TypeOf((*HostConfigFaultFault)(nil)).Elem()\n}\n\ntype HostConfigInfo struct {\n\tDynamicData\n\n\tHost                      ManagedObjectReference               `xml:\"host\"`\n\tProduct                   AboutInfo                            `xml:\"product\"`\n\tDeploymentInfo            *HostDeploymentInfo                  `xml:\"deploymentInfo,omitempty\"`\n\tHyperThread               *HostHyperThreadScheduleInfo         `xml:\"hyperThread,omitempty\"`\n\tConsoleReservation        *ServiceConsoleReservationInfo       `xml:\"consoleReservation,omitempty\"`\n\tVirtualMachineReservation *VirtualMachineMemoryReservationInfo `xml:\"virtualMachineReservation,omitempty\"`\n\tStorageDevice             *HostStorageDeviceInfo               `xml:\"storageDevice,omitempty\"`\n\tMultipathState            *HostMultipathStateInfo              `xml:\"multipathState,omitempty\"`\n\tFileSystemVolume          *HostFileSystemVolumeInfo            `xml:\"fileSystemVolume,omitempty\"`\n\tSystemFile                []string                             `xml:\"systemFile,omitempty\"`\n\tNetwork                   *HostNetworkInfo                     `xml:\"network,omitempty\"`\n\tVmotion                   *HostVMotionInfo                     `xml:\"vmotion,omitempty\"`\n\tVirtualNicManagerInfo     *HostVirtualNicManagerInfo           `xml:\"virtualNicManagerInfo,omitempty\"`\n\tCapabilities              *HostNetCapabilities                 `xml:\"capabilities,omitempty\"`\n\tDatastoreCapabilities     *HostDatastoreSystemCapabilities     `xml:\"datastoreCapabilities,omitempty\"`\n\tOffloadCapabilities       *HostNetOffloadCapabilities          `xml:\"offloadCapabilities,omitempty\"`\n\tService                   *HostServiceInfo                     `xml:\"service,omitempty\"`\n\tFirewall                  *HostFirewallInfo                    `xml:\"firewall,omitempty\"`\n\tAutoStart                 *HostAutoStartManagerConfig          `xml:\"autoStart,omitempty\"`\n\tActiveDiagnosticPartition *HostDiagnosticPartition             `xml:\"activeDiagnosticPartition,omitempty\"`\n\tOption                    []BaseOptionValue                    `xml:\"option,omitempty,typeattr\"`\n\tOptionDef                 []OptionDef                          `xml:\"optionDef,omitempty\"`\n\tDatastorePrincipal        string                               `xml:\"datastorePrincipal,omitempty\"`\n\tLocalSwapDatastore        *ManagedObjectReference              `xml:\"localSwapDatastore,omitempty\"`\n\tSystemSwapConfiguration   *HostSystemSwapConfiguration         `xml:\"systemSwapConfiguration,omitempty\"`\n\tSystemResources           *HostSystemResourceInfo              `xml:\"systemResources,omitempty\"`\n\tDateTimeInfo              *HostDateTimeInfo                    `xml:\"dateTimeInfo,omitempty\"`\n\tFlags                     *HostFlagInfo                        `xml:\"flags,omitempty\"`\n\tAdminDisabled             *bool                                `xml:\"adminDisabled\"`\n\tLockdownMode              HostLockdownMode                     `xml:\"lockdownMode,omitempty\"`\n\tIpmi                      *HostIpmiInfo                        `xml:\"ipmi,omitempty\"`\n\tSslThumbprintInfo         *HostSslThumbprintInfo               `xml:\"sslThumbprintInfo,omitempty\"`\n\tSslThumbprintData         []HostSslThumbprintInfo              `xml:\"sslThumbprintData,omitempty\"`\n\tCertificate               []byte                               `xml:\"certificate,omitempty\"`\n\tPciPassthruInfo           []BaseHostPciPassthruInfo            `xml:\"pciPassthruInfo,omitempty,typeattr\"`\n\tAuthenticationManagerInfo *HostAuthenticationManagerInfo       `xml:\"authenticationManagerInfo,omitempty\"`\n\tFeatureVersion            []HostFeatureVersionInfo             `xml:\"featureVersion,omitempty\"`\n\tPowerSystemCapability     *PowerSystemCapability               `xml:\"powerSystemCapability,omitempty\"`\n\tPowerSystemInfo           *PowerSystemInfo                     `xml:\"powerSystemInfo,omitempty\"`\n\tCacheConfigurationInfo    []HostCacheConfigurationInfo         `xml:\"cacheConfigurationInfo,omitempty\"`\n\tWakeOnLanCapable          *bool                                `xml:\"wakeOnLanCapable\"`\n\tFeatureCapability         []HostFeatureCapability              `xml:\"featureCapability,omitempty\"`\n\tMaskedFeatureCapability   []HostFeatureCapability              `xml:\"maskedFeatureCapability,omitempty\"`\n\tVFlashConfigInfo          *HostVFlashManagerVFlashConfigInfo   `xml:\"vFlashConfigInfo,omitempty\"`\n\tVsanHostConfig            *VsanHostConfigInfo                  `xml:\"vsanHostConfig,omitempty\"`\n\tDomainList                []string                             `xml:\"domainList,omitempty\"`\n\tScriptCheckSum            []byte                               `xml:\"scriptCheckSum,omitempty\"`\n\tHostConfigCheckSum        []byte                               `xml:\"hostConfigCheckSum,omitempty\"`\n\tGraphicsInfo              []HostGraphicsInfo                   `xml:\"graphicsInfo,omitempty\"`\n\tSharedPassthruGpuTypes    []string                             `xml:\"sharedPassthruGpuTypes,omitempty\"`\n\tGraphicsConfig            *HostGraphicsConfig                  `xml:\"graphicsConfig,omitempty\"`\n\tIoFilterInfo              []HostIoFilterInfo                   `xml:\"ioFilterInfo,omitempty\"`\n\tSriovDevicePool           []BaseHostSriovDevicePoolInfo        `xml:\"sriovDevicePool,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostConfigInfo\"] = reflect.TypeOf((*HostConfigInfo)(nil)).Elem()\n}\n\ntype HostConfigManager struct {\n\tDynamicData\n\n\tCpuScheduler              *ManagedObjectReference `xml:\"cpuScheduler,omitempty\"`\n\tDatastoreSystem           *ManagedObjectReference `xml:\"datastoreSystem,omitempty\"`\n\tMemoryManager             *ManagedObjectReference `xml:\"memoryManager,omitempty\"`\n\tStorageSystem             *ManagedObjectReference `xml:\"storageSystem,omitempty\"`\n\tNetworkSystem             *ManagedObjectReference `xml:\"networkSystem,omitempty\"`\n\tVmotionSystem             *ManagedObjectReference `xml:\"vmotionSystem,omitempty\"`\n\tVirtualNicManager         *ManagedObjectReference `xml:\"virtualNicManager,omitempty\"`\n\tServiceSystem             *ManagedObjectReference `xml:\"serviceSystem,omitempty\"`\n\tFirewallSystem            *ManagedObjectReference `xml:\"firewallSystem,omitempty\"`\n\tAdvancedOption            *ManagedObjectReference `xml:\"advancedOption,omitempty\"`\n\tDiagnosticSystem          *ManagedObjectReference `xml:\"diagnosticSystem,omitempty\"`\n\tAutoStartManager          *ManagedObjectReference `xml:\"autoStartManager,omitempty\"`\n\tSnmpSystem                *ManagedObjectReference `xml:\"snmpSystem,omitempty\"`\n\tDateTimeSystem            *ManagedObjectReference `xml:\"dateTimeSystem,omitempty\"`\n\tPatchManager              *ManagedObjectReference `xml:\"patchManager,omitempty\"`\n\tImageConfigManager        *ManagedObjectReference `xml:\"imageConfigManager,omitempty\"`\n\tBootDeviceSystem          *ManagedObjectReference `xml:\"bootDeviceSystem,omitempty\"`\n\tFirmwareSystem            *ManagedObjectReference `xml:\"firmwareSystem,omitempty\"`\n\tHealthStatusSystem        *ManagedObjectReference `xml:\"healthStatusSystem,omitempty\"`\n\tPciPassthruSystem         *ManagedObjectReference `xml:\"pciPassthruSystem,omitempty\"`\n\tLicenseManager            *ManagedObjectReference `xml:\"licenseManager,omitempty\"`\n\tKernelModuleSystem        *ManagedObjectReference `xml:\"kernelModuleSystem,omitempty\"`\n\tAuthenticationManager     *ManagedObjectReference `xml:\"authenticationManager,omitempty\"`\n\tPowerSystem               *ManagedObjectReference `xml:\"powerSystem,omitempty\"`\n\tCacheConfigurationManager *ManagedObjectReference `xml:\"cacheConfigurationManager,omitempty\"`\n\tEsxAgentHostManager       *ManagedObjectReference `xml:\"esxAgentHostManager,omitempty\"`\n\tIscsiManager              *ManagedObjectReference `xml:\"iscsiManager,omitempty\"`\n\tVFlashManager             *ManagedObjectReference `xml:\"vFlashManager,omitempty\"`\n\tVsanSystem                *ManagedObjectReference `xml:\"vsanSystem,omitempty\"`\n\tMessageBusProxy           *ManagedObjectReference `xml:\"messageBusProxy,omitempty\"`\n\tUserDirectory             *ManagedObjectReference `xml:\"userDirectory,omitempty\"`\n\tAccountManager            *ManagedObjectReference `xml:\"accountManager,omitempty\"`\n\tHostAccessManager         *ManagedObjectReference `xml:\"hostAccessManager,omitempty\"`\n\tGraphicsManager           *ManagedObjectReference `xml:\"graphicsManager,omitempty\"`\n\tVsanInternalSystem        *ManagedObjectReference `xml:\"vsanInternalSystem,omitempty\"`\n\tCertificateManager        *ManagedObjectReference `xml:\"certificateManager,omitempty\"`\n\tCryptoManager             *ManagedObjectReference `xml:\"cryptoManager,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostConfigManager\"] = reflect.TypeOf((*HostConfigManager)(nil)).Elem()\n}\n\ntype HostConfigSpec struct {\n\tDynamicData\n\n\tNasDatastore             []HostNasVolumeConfig                   `xml:\"nasDatastore,omitempty\"`\n\tNetwork                  *HostNetworkConfig                      `xml:\"network,omitempty\"`\n\tNicTypeSelection         []HostVirtualNicManagerNicTypeSelection `xml:\"nicTypeSelection,omitempty\"`\n\tService                  []HostServiceConfig                     `xml:\"service,omitempty\"`\n\tFirewall                 *HostFirewallConfig                     `xml:\"firewall,omitempty\"`\n\tOption                   []BaseOptionValue                       `xml:\"option,omitempty,typeattr\"`\n\tDatastorePrincipal       string                                  `xml:\"datastorePrincipal,omitempty\"`\n\tDatastorePrincipalPasswd string                                  `xml:\"datastorePrincipalPasswd,omitempty\"`\n\tDatetime                 *HostDateTimeConfig                     `xml:\"datetime,omitempty\"`\n\tStorageDevice            *HostStorageDeviceInfo                  `xml:\"storageDevice,omitempty\"`\n\tLicense                  *HostLicenseSpec                        `xml:\"license,omitempty\"`\n\tSecurity                 *HostSecuritySpec                       `xml:\"security,omitempty\"`\n\tUserAccount              []BaseHostAccountSpec                   `xml:\"userAccount,omitempty,typeattr\"`\n\tUsergroupAccount         []BaseHostAccountSpec                   `xml:\"usergroupAccount,omitempty,typeattr\"`\n\tMemory                   *HostMemorySpec                         `xml:\"memory,omitempty\"`\n\tActiveDirectory          []HostActiveDirectory                   `xml:\"activeDirectory,omitempty\"`\n\tGenericConfig            []KeyAnyValue                           `xml:\"genericConfig,omitempty\"`\n\tGraphicsConfig           *HostGraphicsConfig                     `xml:\"graphicsConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostConfigSpec\"] = reflect.TypeOf((*HostConfigSpec)(nil)).Elem()\n}\n\ntype HostConfigSummary struct {\n\tDynamicData\n\n\tName                  string                   `xml:\"name\"`\n\tPort                  int32                    `xml:\"port\"`\n\tSslThumbprint         string                   `xml:\"sslThumbprint,omitempty\"`\n\tProduct               *AboutInfo               `xml:\"product,omitempty\"`\n\tVmotionEnabled        bool                     `xml:\"vmotionEnabled\"`\n\tFaultToleranceEnabled *bool                    `xml:\"faultToleranceEnabled\"`\n\tFeatureVersion        []HostFeatureVersionInfo `xml:\"featureVersion,omitempty\"`\n\tAgentVmDatastore      *ManagedObjectReference  `xml:\"agentVmDatastore,omitempty\"`\n\tAgentVmNetwork        *ManagedObjectReference  `xml:\"agentVmNetwork,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostConfigSummary\"] = reflect.TypeOf((*HostConfigSummary)(nil)).Elem()\n}\n\ntype HostConfigVFlashCache HostConfigVFlashCacheRequestType\n\nfunc init() {\n\tt[\"HostConfigVFlashCache\"] = reflect.TypeOf((*HostConfigVFlashCache)(nil)).Elem()\n}\n\ntype HostConfigVFlashCacheRequestType struct {\n\tThis ManagedObjectReference                 `xml:\"_this\"`\n\tSpec HostVFlashManagerVFlashCacheConfigSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"HostConfigVFlashCacheRequestType\"] = reflect.TypeOf((*HostConfigVFlashCacheRequestType)(nil)).Elem()\n}\n\ntype HostConfigVFlashCacheResponse struct {\n}\n\ntype HostConfigureVFlashResource HostConfigureVFlashResourceRequestType\n\nfunc init() {\n\tt[\"HostConfigureVFlashResource\"] = reflect.TypeOf((*HostConfigureVFlashResource)(nil)).Elem()\n}\n\ntype HostConfigureVFlashResourceRequestType struct {\n\tThis ManagedObjectReference                    `xml:\"_this\"`\n\tSpec HostVFlashManagerVFlashResourceConfigSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"HostConfigureVFlashResourceRequestType\"] = reflect.TypeOf((*HostConfigureVFlashResourceRequestType)(nil)).Elem()\n}\n\ntype HostConfigureVFlashResourceResponse struct {\n}\n\ntype HostConnectFault struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"HostConnectFault\"] = reflect.TypeOf((*HostConnectFault)(nil)).Elem()\n}\n\ntype HostConnectFaultFault BaseHostConnectFault\n\nfunc init() {\n\tt[\"HostConnectFaultFault\"] = reflect.TypeOf((*HostConnectFaultFault)(nil)).Elem()\n}\n\ntype HostConnectInfo struct {\n\tDynamicData\n\n\tServerIp               string                           `xml:\"serverIp,omitempty\"`\n\tInDasCluster           *bool                            `xml:\"inDasCluster\"`\n\tHost                   HostListSummary                  `xml:\"host\"`\n\tVm                     []VirtualMachineSummary          `xml:\"vm,omitempty\"`\n\tVimAccountNameRequired *bool                            `xml:\"vimAccountNameRequired\"`\n\tClusterSupported       *bool                            `xml:\"clusterSupported\"`\n\tNetwork                []BaseHostConnectInfoNetworkInfo `xml:\"network,omitempty,typeattr\"`\n\tDatastore              []BaseHostDatastoreConnectInfo   `xml:\"datastore,omitempty,typeattr\"`\n\tLicense                *HostLicenseConnectInfo          `xml:\"license,omitempty\"`\n\tCapability             *HostCapability                  `xml:\"capability,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostConnectInfo\"] = reflect.TypeOf((*HostConnectInfo)(nil)).Elem()\n}\n\ntype HostConnectInfoNetworkInfo struct {\n\tDynamicData\n\n\tSummary BaseNetworkSummary `xml:\"summary,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostConnectInfoNetworkInfo\"] = reflect.TypeOf((*HostConnectInfoNetworkInfo)(nil)).Elem()\n}\n\ntype HostConnectSpec struct {\n\tDynamicData\n\n\tHostName           string                  `xml:\"hostName,omitempty\"`\n\tPort               int32                   `xml:\"port,omitempty\"`\n\tSslThumbprint      string                  `xml:\"sslThumbprint,omitempty\"`\n\tUserName           string                  `xml:\"userName,omitempty\"`\n\tPassword           string                  `xml:\"password,omitempty\"`\n\tVmFolder           *ManagedObjectReference `xml:\"vmFolder,omitempty\"`\n\tForce              bool                    `xml:\"force\"`\n\tVimAccountName     string                  `xml:\"vimAccountName,omitempty\"`\n\tVimAccountPassword string                  `xml:\"vimAccountPassword,omitempty\"`\n\tManagementIp       string                  `xml:\"managementIp,omitempty\"`\n\tLockdownMode       HostLockdownMode        `xml:\"lockdownMode,omitempty\"`\n\tHostGateway        *HostGatewaySpec        `xml:\"hostGateway,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostConnectSpec\"] = reflect.TypeOf((*HostConnectSpec)(nil)).Elem()\n}\n\ntype HostConnectedEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostConnectedEvent\"] = reflect.TypeOf((*HostConnectedEvent)(nil)).Elem()\n}\n\ntype HostConnectionLostEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostConnectionLostEvent\"] = reflect.TypeOf((*HostConnectionLostEvent)(nil)).Elem()\n}\n\ntype HostCpuIdInfo struct {\n\tDynamicData\n\n\tLevel  int32  `xml:\"level\"`\n\tVendor string `xml:\"vendor,omitempty\"`\n\tEax    string `xml:\"eax,omitempty\"`\n\tEbx    string `xml:\"ebx,omitempty\"`\n\tEcx    string `xml:\"ecx,omitempty\"`\n\tEdx    string `xml:\"edx,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostCpuIdInfo\"] = reflect.TypeOf((*HostCpuIdInfo)(nil)).Elem()\n}\n\ntype HostCpuInfo struct {\n\tDynamicData\n\n\tNumCpuPackages int16 `xml:\"numCpuPackages\"`\n\tNumCpuCores    int16 `xml:\"numCpuCores\"`\n\tNumCpuThreads  int16 `xml:\"numCpuThreads\"`\n\tHz             int64 `xml:\"hz\"`\n}\n\nfunc init() {\n\tt[\"HostCpuInfo\"] = reflect.TypeOf((*HostCpuInfo)(nil)).Elem()\n}\n\ntype HostCpuPackage struct {\n\tDynamicData\n\n\tIndex       int16           `xml:\"index\"`\n\tVendor      string          `xml:\"vendor\"`\n\tHz          int64           `xml:\"hz\"`\n\tBusHz       int64           `xml:\"busHz\"`\n\tDescription string          `xml:\"description\"`\n\tThreadId    []int16         `xml:\"threadId\"`\n\tCpuFeature  []HostCpuIdInfo `xml:\"cpuFeature,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostCpuPackage\"] = reflect.TypeOf((*HostCpuPackage)(nil)).Elem()\n}\n\ntype HostCpuPowerManagementInfo struct {\n\tDynamicData\n\n\tCurrentPolicy   string `xml:\"currentPolicy,omitempty\"`\n\tHardwareSupport string `xml:\"hardwareSupport,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostCpuPowerManagementInfo\"] = reflect.TypeOf((*HostCpuPowerManagementInfo)(nil)).Elem()\n}\n\ntype HostCreateDiskRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tSpec VslmCreateSpec         `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"HostCreateDiskRequestType\"] = reflect.TypeOf((*HostCreateDiskRequestType)(nil)).Elem()\n}\n\ntype HostCreateDisk_Task HostCreateDiskRequestType\n\nfunc init() {\n\tt[\"HostCreateDisk_Task\"] = reflect.TypeOf((*HostCreateDisk_Task)(nil)).Elem()\n}\n\ntype HostCreateDisk_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype HostDasDisabledEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostDasDisabledEvent\"] = reflect.TypeOf((*HostDasDisabledEvent)(nil)).Elem()\n}\n\ntype HostDasDisablingEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostDasDisablingEvent\"] = reflect.TypeOf((*HostDasDisablingEvent)(nil)).Elem()\n}\n\ntype HostDasEnabledEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostDasEnabledEvent\"] = reflect.TypeOf((*HostDasEnabledEvent)(nil)).Elem()\n}\n\ntype HostDasEnablingEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostDasEnablingEvent\"] = reflect.TypeOf((*HostDasEnablingEvent)(nil)).Elem()\n}\n\ntype HostDasErrorEvent struct {\n\tHostEvent\n\n\tMessage string `xml:\"message,omitempty\"`\n\tReason  string `xml:\"reason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostDasErrorEvent\"] = reflect.TypeOf((*HostDasErrorEvent)(nil)).Elem()\n}\n\ntype HostDasEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostDasEvent\"] = reflect.TypeOf((*HostDasEvent)(nil)).Elem()\n}\n\ntype HostDasOkEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostDasOkEvent\"] = reflect.TypeOf((*HostDasOkEvent)(nil)).Elem()\n}\n\ntype HostDatastoreBrowserSearchResults struct {\n\tDynamicData\n\n\tDatastore  *ManagedObjectReference `xml:\"datastore,omitempty\"`\n\tFolderPath string                  `xml:\"folderPath,omitempty\"`\n\tFile       []BaseFileInfo          `xml:\"file,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostDatastoreBrowserSearchResults\"] = reflect.TypeOf((*HostDatastoreBrowserSearchResults)(nil)).Elem()\n}\n\ntype HostDatastoreBrowserSearchSpec struct {\n\tDynamicData\n\n\tQuery                 []BaseFileQuery `xml:\"query,omitempty,typeattr\"`\n\tDetails               *FileQueryFlags `xml:\"details,omitempty\"`\n\tSearchCaseInsensitive *bool           `xml:\"searchCaseInsensitive\"`\n\tMatchPattern          []string        `xml:\"matchPattern,omitempty\"`\n\tSortFoldersFirst      *bool           `xml:\"sortFoldersFirst\"`\n}\n\nfunc init() {\n\tt[\"HostDatastoreBrowserSearchSpec\"] = reflect.TypeOf((*HostDatastoreBrowserSearchSpec)(nil)).Elem()\n}\n\ntype HostDatastoreConnectInfo struct {\n\tDynamicData\n\n\tSummary DatastoreSummary `xml:\"summary\"`\n}\n\nfunc init() {\n\tt[\"HostDatastoreConnectInfo\"] = reflect.TypeOf((*HostDatastoreConnectInfo)(nil)).Elem()\n}\n\ntype HostDatastoreExistsConnectInfo struct {\n\tHostDatastoreConnectInfo\n\n\tNewDatastoreName string `xml:\"newDatastoreName\"`\n}\n\nfunc init() {\n\tt[\"HostDatastoreExistsConnectInfo\"] = reflect.TypeOf((*HostDatastoreExistsConnectInfo)(nil)).Elem()\n}\n\ntype HostDatastoreNameConflictConnectInfo struct {\n\tHostDatastoreConnectInfo\n\n\tNewDatastoreName string `xml:\"newDatastoreName\"`\n}\n\nfunc init() {\n\tt[\"HostDatastoreNameConflictConnectInfo\"] = reflect.TypeOf((*HostDatastoreNameConflictConnectInfo)(nil)).Elem()\n}\n\ntype HostDatastoreSystemCapabilities struct {\n\tDynamicData\n\n\tNfsMountCreationRequired     bool  `xml:\"nfsMountCreationRequired\"`\n\tNfsMountCreationSupported    bool  `xml:\"nfsMountCreationSupported\"`\n\tLocalDatastoreSupported      bool  `xml:\"localDatastoreSupported\"`\n\tVmfsExtentExpansionSupported *bool `xml:\"vmfsExtentExpansionSupported\"`\n}\n\nfunc init() {\n\tt[\"HostDatastoreSystemCapabilities\"] = reflect.TypeOf((*HostDatastoreSystemCapabilities)(nil)).Elem()\n}\n\ntype HostDatastoreSystemDatastoreResult struct {\n\tDynamicData\n\n\tKey   ManagedObjectReference `xml:\"key\"`\n\tFault *LocalizedMethodFault  `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostDatastoreSystemDatastoreResult\"] = reflect.TypeOf((*HostDatastoreSystemDatastoreResult)(nil)).Elem()\n}\n\ntype HostDatastoreSystemVvolDatastoreSpec struct {\n\tDynamicData\n\n\tName string `xml:\"name\"`\n\tScId string `xml:\"scId\"`\n}\n\nfunc init() {\n\tt[\"HostDatastoreSystemVvolDatastoreSpec\"] = reflect.TypeOf((*HostDatastoreSystemVvolDatastoreSpec)(nil)).Elem()\n}\n\ntype HostDateTimeConfig struct {\n\tDynamicData\n\n\tTimeZone  string         `xml:\"timeZone,omitempty\"`\n\tNtpConfig *HostNtpConfig `xml:\"ntpConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostDateTimeConfig\"] = reflect.TypeOf((*HostDateTimeConfig)(nil)).Elem()\n}\n\ntype HostDateTimeInfo struct {\n\tDynamicData\n\n\tTimeZone  HostDateTimeSystemTimeZone `xml:\"timeZone\"`\n\tNtpConfig *HostNtpConfig             `xml:\"ntpConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostDateTimeInfo\"] = reflect.TypeOf((*HostDateTimeInfo)(nil)).Elem()\n}\n\ntype HostDateTimeSystemTimeZone struct {\n\tDynamicData\n\n\tKey         string `xml:\"key\"`\n\tName        string `xml:\"name\"`\n\tDescription string `xml:\"description\"`\n\tGmtOffset   int32  `xml:\"gmtOffset\"`\n}\n\nfunc init() {\n\tt[\"HostDateTimeSystemTimeZone\"] = reflect.TypeOf((*HostDateTimeSystemTimeZone)(nil)).Elem()\n}\n\ntype HostDeleteVStorageObjectRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tId        ID                     `xml:\"id\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"HostDeleteVStorageObjectRequestType\"] = reflect.TypeOf((*HostDeleteVStorageObjectRequestType)(nil)).Elem()\n}\n\ntype HostDeleteVStorageObject_Task HostDeleteVStorageObjectRequestType\n\nfunc init() {\n\tt[\"HostDeleteVStorageObject_Task\"] = reflect.TypeOf((*HostDeleteVStorageObject_Task)(nil)).Elem()\n}\n\ntype HostDeleteVStorageObject_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype HostDeploymentInfo struct {\n\tDynamicData\n\n\tBootedFromStatelessCache *bool `xml:\"bootedFromStatelessCache\"`\n}\n\nfunc init() {\n\tt[\"HostDeploymentInfo\"] = reflect.TypeOf((*HostDeploymentInfo)(nil)).Elem()\n}\n\ntype HostDevice struct {\n\tDynamicData\n\n\tDeviceName string `xml:\"deviceName\"`\n\tDeviceType string `xml:\"deviceType\"`\n}\n\nfunc init() {\n\tt[\"HostDevice\"] = reflect.TypeOf((*HostDevice)(nil)).Elem()\n}\n\ntype HostDhcpService struct {\n\tDynamicData\n\n\tKey  string              `xml:\"key\"`\n\tSpec HostDhcpServiceSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"HostDhcpService\"] = reflect.TypeOf((*HostDhcpService)(nil)).Elem()\n}\n\ntype HostDhcpServiceConfig struct {\n\tDynamicData\n\n\tChangeOperation string              `xml:\"changeOperation,omitempty\"`\n\tKey             string              `xml:\"key\"`\n\tSpec            HostDhcpServiceSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"HostDhcpServiceConfig\"] = reflect.TypeOf((*HostDhcpServiceConfig)(nil)).Elem()\n}\n\ntype HostDhcpServiceSpec struct {\n\tDynamicData\n\n\tVirtualSwitch        string `xml:\"virtualSwitch\"`\n\tDefaultLeaseDuration int32  `xml:\"defaultLeaseDuration\"`\n\tLeaseBeginIp         string `xml:\"leaseBeginIp\"`\n\tLeaseEndIp           string `xml:\"leaseEndIp\"`\n\tMaxLeaseDuration     int32  `xml:\"maxLeaseDuration\"`\n\tUnlimitedLease       bool   `xml:\"unlimitedLease\"`\n\tIpSubnetAddr         string `xml:\"ipSubnetAddr\"`\n\tIpSubnetMask         string `xml:\"ipSubnetMask\"`\n}\n\nfunc init() {\n\tt[\"HostDhcpServiceSpec\"] = reflect.TypeOf((*HostDhcpServiceSpec)(nil)).Elem()\n}\n\ntype HostDiagnosticPartition struct {\n\tDynamicData\n\n\tStorageType    string                `xml:\"storageType\"`\n\tDiagnosticType string                `xml:\"diagnosticType\"`\n\tSlots          int32                 `xml:\"slots\"`\n\tId             HostScsiDiskPartition `xml:\"id\"`\n}\n\nfunc init() {\n\tt[\"HostDiagnosticPartition\"] = reflect.TypeOf((*HostDiagnosticPartition)(nil)).Elem()\n}\n\ntype HostDiagnosticPartitionCreateDescription struct {\n\tDynamicData\n\n\tLayout   HostDiskPartitionLayout           `xml:\"layout\"`\n\tDiskUuid string                            `xml:\"diskUuid\"`\n\tSpec     HostDiagnosticPartitionCreateSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"HostDiagnosticPartitionCreateDescription\"] = reflect.TypeOf((*HostDiagnosticPartitionCreateDescription)(nil)).Elem()\n}\n\ntype HostDiagnosticPartitionCreateOption struct {\n\tDynamicData\n\n\tStorageType    string       `xml:\"storageType\"`\n\tDiagnosticType string       `xml:\"diagnosticType\"`\n\tDisk           HostScsiDisk `xml:\"disk\"`\n}\n\nfunc init() {\n\tt[\"HostDiagnosticPartitionCreateOption\"] = reflect.TypeOf((*HostDiagnosticPartitionCreateOption)(nil)).Elem()\n}\n\ntype HostDiagnosticPartitionCreateSpec struct {\n\tDynamicData\n\n\tStorageType    string                `xml:\"storageType\"`\n\tDiagnosticType string                `xml:\"diagnosticType\"`\n\tId             HostScsiDiskPartition `xml:\"id\"`\n\tPartition      HostDiskPartitionSpec `xml:\"partition\"`\n\tActive         *bool                 `xml:\"active\"`\n}\n\nfunc init() {\n\tt[\"HostDiagnosticPartitionCreateSpec\"] = reflect.TypeOf((*HostDiagnosticPartitionCreateSpec)(nil)).Elem()\n}\n\ntype HostDigestInfo struct {\n\tDynamicData\n\n\tDigestMethod string `xml:\"digestMethod\"`\n\tDigestValue  []byte `xml:\"digestValue\"`\n\tObjectName   string `xml:\"objectName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostDigestInfo\"] = reflect.TypeOf((*HostDigestInfo)(nil)).Elem()\n}\n\ntype HostDirectoryStoreInfo struct {\n\tHostAuthenticationStoreInfo\n}\n\nfunc init() {\n\tt[\"HostDirectoryStoreInfo\"] = reflect.TypeOf((*HostDirectoryStoreInfo)(nil)).Elem()\n}\n\ntype HostDisconnectedEvent struct {\n\tHostEvent\n\n\tReason string `xml:\"reason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostDisconnectedEvent\"] = reflect.TypeOf((*HostDisconnectedEvent)(nil)).Elem()\n}\n\ntype HostDiskConfigurationResult struct {\n\tDynamicData\n\n\tDevicePath string                `xml:\"devicePath,omitempty\"`\n\tSuccess    *bool                 `xml:\"success\"`\n\tFault      *LocalizedMethodFault `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostDiskConfigurationResult\"] = reflect.TypeOf((*HostDiskConfigurationResult)(nil)).Elem()\n}\n\ntype HostDiskDimensions struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"HostDiskDimensions\"] = reflect.TypeOf((*HostDiskDimensions)(nil)).Elem()\n}\n\ntype HostDiskDimensionsChs struct {\n\tDynamicData\n\n\tCylinder int64 `xml:\"cylinder\"`\n\tHead     int32 `xml:\"head\"`\n\tSector   int32 `xml:\"sector\"`\n}\n\nfunc init() {\n\tt[\"HostDiskDimensionsChs\"] = reflect.TypeOf((*HostDiskDimensionsChs)(nil)).Elem()\n}\n\ntype HostDiskDimensionsLba struct {\n\tDynamicData\n\n\tBlockSize int32 `xml:\"blockSize\"`\n\tBlock     int64 `xml:\"block\"`\n}\n\nfunc init() {\n\tt[\"HostDiskDimensionsLba\"] = reflect.TypeOf((*HostDiskDimensionsLba)(nil)).Elem()\n}\n\ntype HostDiskMappingInfo struct {\n\tDynamicData\n\n\tPhysicalPartition *HostDiskMappingPartitionInfo `xml:\"physicalPartition,omitempty\"`\n\tName              string                        `xml:\"name\"`\n\tExclusive         *bool                         `xml:\"exclusive\"`\n}\n\nfunc init() {\n\tt[\"HostDiskMappingInfo\"] = reflect.TypeOf((*HostDiskMappingInfo)(nil)).Elem()\n}\n\ntype HostDiskMappingOption struct {\n\tDynamicData\n\n\tPhysicalPartition []HostDiskMappingPartitionOption `xml:\"physicalPartition,omitempty\"`\n\tName              string                           `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"HostDiskMappingOption\"] = reflect.TypeOf((*HostDiskMappingOption)(nil)).Elem()\n}\n\ntype HostDiskMappingPartitionInfo struct {\n\tDynamicData\n\n\tName         string `xml:\"name\"`\n\tFileSystem   string `xml:\"fileSystem\"`\n\tCapacityInKb int64  `xml:\"capacityInKb\"`\n}\n\nfunc init() {\n\tt[\"HostDiskMappingPartitionInfo\"] = reflect.TypeOf((*HostDiskMappingPartitionInfo)(nil)).Elem()\n}\n\ntype HostDiskMappingPartitionOption struct {\n\tDynamicData\n\n\tName         string `xml:\"name\"`\n\tFileSystem   string `xml:\"fileSystem\"`\n\tCapacityInKb int64  `xml:\"capacityInKb\"`\n}\n\nfunc init() {\n\tt[\"HostDiskMappingPartitionOption\"] = reflect.TypeOf((*HostDiskMappingPartitionOption)(nil)).Elem()\n}\n\ntype HostDiskPartitionAttributes struct {\n\tDynamicData\n\n\tPartition          int32  `xml:\"partition\"`\n\tStartSector        int64  `xml:\"startSector\"`\n\tEndSector          int64  `xml:\"endSector\"`\n\tType               string `xml:\"type\"`\n\tGuid               string `xml:\"guid,omitempty\"`\n\tLogical            bool   `xml:\"logical\"`\n\tAttributes         byte   `xml:\"attributes\"`\n\tPartitionAlignment int64  `xml:\"partitionAlignment,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostDiskPartitionAttributes\"] = reflect.TypeOf((*HostDiskPartitionAttributes)(nil)).Elem()\n}\n\ntype HostDiskPartitionBlockRange struct {\n\tDynamicData\n\n\tPartition int32                 `xml:\"partition,omitempty\"`\n\tType      string                `xml:\"type\"`\n\tStart     HostDiskDimensionsLba `xml:\"start\"`\n\tEnd       HostDiskDimensionsLba `xml:\"end\"`\n}\n\nfunc init() {\n\tt[\"HostDiskPartitionBlockRange\"] = reflect.TypeOf((*HostDiskPartitionBlockRange)(nil)).Elem()\n}\n\ntype HostDiskPartitionInfo struct {\n\tDynamicData\n\n\tDeviceName string                  `xml:\"deviceName\"`\n\tSpec       HostDiskPartitionSpec   `xml:\"spec\"`\n\tLayout     HostDiskPartitionLayout `xml:\"layout\"`\n}\n\nfunc init() {\n\tt[\"HostDiskPartitionInfo\"] = reflect.TypeOf((*HostDiskPartitionInfo)(nil)).Elem()\n}\n\ntype HostDiskPartitionLayout struct {\n\tDynamicData\n\n\tTotal     *HostDiskDimensionsLba        `xml:\"total,omitempty\"`\n\tPartition []HostDiskPartitionBlockRange `xml:\"partition\"`\n}\n\nfunc init() {\n\tt[\"HostDiskPartitionLayout\"] = reflect.TypeOf((*HostDiskPartitionLayout)(nil)).Elem()\n}\n\ntype HostDiskPartitionSpec struct {\n\tDynamicData\n\n\tPartitionFormat string                        `xml:\"partitionFormat,omitempty\"`\n\tChs             *HostDiskDimensionsChs        `xml:\"chs,omitempty\"`\n\tTotalSectors    int64                         `xml:\"totalSectors,omitempty\"`\n\tPartition       []HostDiskPartitionAttributes `xml:\"partition,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostDiskPartitionSpec\"] = reflect.TypeOf((*HostDiskPartitionSpec)(nil)).Elem()\n}\n\ntype HostDnsConfig struct {\n\tDynamicData\n\n\tDhcp             bool     `xml:\"dhcp\"`\n\tVirtualNicDevice string   `xml:\"virtualNicDevice,omitempty\"`\n\tHostName         string   `xml:\"hostName\"`\n\tDomainName       string   `xml:\"domainName\"`\n\tAddress          []string `xml:\"address,omitempty\"`\n\tSearchDomain     []string `xml:\"searchDomain,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostDnsConfig\"] = reflect.TypeOf((*HostDnsConfig)(nil)).Elem()\n}\n\ntype HostDnsConfigSpec struct {\n\tHostDnsConfig\n\n\tVirtualNicConnection *HostVirtualNicConnection `xml:\"virtualNicConnection,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostDnsConfigSpec\"] = reflect.TypeOf((*HostDnsConfigSpec)(nil)).Elem()\n}\n\ntype HostEnableAdminFailedEvent struct {\n\tHostEvent\n\n\tPermissions []Permission `xml:\"permissions\"`\n}\n\nfunc init() {\n\tt[\"HostEnableAdminFailedEvent\"] = reflect.TypeOf((*HostEnableAdminFailedEvent)(nil)).Elem()\n}\n\ntype HostEsxAgentHostManagerConfigInfo struct {\n\tDynamicData\n\n\tAgentVmDatastore *ManagedObjectReference `xml:\"agentVmDatastore,omitempty\"`\n\tAgentVmNetwork   *ManagedObjectReference `xml:\"agentVmNetwork,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostEsxAgentHostManagerConfigInfo\"] = reflect.TypeOf((*HostEsxAgentHostManagerConfigInfo)(nil)).Elem()\n}\n\ntype HostEvent struct {\n\tEvent\n}\n\nfunc init() {\n\tt[\"HostEvent\"] = reflect.TypeOf((*HostEvent)(nil)).Elem()\n}\n\ntype HostEventArgument struct {\n\tEntityEventArgument\n\n\tHost ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"HostEventArgument\"] = reflect.TypeOf((*HostEventArgument)(nil)).Elem()\n}\n\ntype HostExtendDiskRequestType struct {\n\tThis            ManagedObjectReference `xml:\"_this\"`\n\tId              ID                     `xml:\"id\"`\n\tDatastore       ManagedObjectReference `xml:\"datastore\"`\n\tNewCapacityInMB int64                  `xml:\"newCapacityInMB\"`\n}\n\nfunc init() {\n\tt[\"HostExtendDiskRequestType\"] = reflect.TypeOf((*HostExtendDiskRequestType)(nil)).Elem()\n}\n\ntype HostExtendDisk_Task HostExtendDiskRequestType\n\nfunc init() {\n\tt[\"HostExtendDisk_Task\"] = reflect.TypeOf((*HostExtendDisk_Task)(nil)).Elem()\n}\n\ntype HostExtendDisk_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype HostExtraNetworksEvent struct {\n\tHostDasEvent\n\n\tIps string `xml:\"ips,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostExtraNetworksEvent\"] = reflect.TypeOf((*HostExtraNetworksEvent)(nil)).Elem()\n}\n\ntype HostFaultToleranceManagerComponentHealthInfo struct {\n\tDynamicData\n\n\tIsStorageHealthy bool `xml:\"isStorageHealthy\"`\n\tIsNetworkHealthy bool `xml:\"isNetworkHealthy\"`\n}\n\nfunc init() {\n\tt[\"HostFaultToleranceManagerComponentHealthInfo\"] = reflect.TypeOf((*HostFaultToleranceManagerComponentHealthInfo)(nil)).Elem()\n}\n\ntype HostFeatureCapability struct {\n\tDynamicData\n\n\tKey         string `xml:\"key\"`\n\tFeatureName string `xml:\"featureName\"`\n\tValue       string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"HostFeatureCapability\"] = reflect.TypeOf((*HostFeatureCapability)(nil)).Elem()\n}\n\ntype HostFeatureMask struct {\n\tDynamicData\n\n\tKey         string `xml:\"key\"`\n\tFeatureName string `xml:\"featureName\"`\n\tValue       string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"HostFeatureMask\"] = reflect.TypeOf((*HostFeatureMask)(nil)).Elem()\n}\n\ntype HostFeatureVersionInfo struct {\n\tDynamicData\n\n\tKey   string `xml:\"key\"`\n\tValue string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"HostFeatureVersionInfo\"] = reflect.TypeOf((*HostFeatureVersionInfo)(nil)).Elem()\n}\n\ntype HostFibreChannelHba struct {\n\tHostHostBusAdapter\n\n\tPortWorldWideName int64                `xml:\"portWorldWideName\"`\n\tNodeWorldWideName int64                `xml:\"nodeWorldWideName\"`\n\tPortType          FibreChannelPortType `xml:\"portType\"`\n\tSpeed             int64                `xml:\"speed\"`\n}\n\nfunc init() {\n\tt[\"HostFibreChannelHba\"] = reflect.TypeOf((*HostFibreChannelHba)(nil)).Elem()\n}\n\ntype HostFibreChannelOverEthernetHba struct {\n\tHostFibreChannelHba\n\n\tUnderlyingNic    string                                  `xml:\"underlyingNic\"`\n\tLinkInfo         HostFibreChannelOverEthernetHbaLinkInfo `xml:\"linkInfo\"`\n\tIsSoftwareFcoe   bool                                    `xml:\"isSoftwareFcoe\"`\n\tMarkedForRemoval bool                                    `xml:\"markedForRemoval\"`\n}\n\nfunc init() {\n\tt[\"HostFibreChannelOverEthernetHba\"] = reflect.TypeOf((*HostFibreChannelOverEthernetHba)(nil)).Elem()\n}\n\ntype HostFibreChannelOverEthernetHbaLinkInfo struct {\n\tDynamicData\n\n\tVnportMac string `xml:\"vnportMac\"`\n\tFcfMac    string `xml:\"fcfMac\"`\n\tVlanId    int32  `xml:\"vlanId\"`\n}\n\nfunc init() {\n\tt[\"HostFibreChannelOverEthernetHbaLinkInfo\"] = reflect.TypeOf((*HostFibreChannelOverEthernetHbaLinkInfo)(nil)).Elem()\n}\n\ntype HostFibreChannelOverEthernetTargetTransport struct {\n\tHostFibreChannelTargetTransport\n\n\tVnportMac string `xml:\"vnportMac\"`\n\tFcfMac    string `xml:\"fcfMac\"`\n\tVlanId    int32  `xml:\"vlanId\"`\n}\n\nfunc init() {\n\tt[\"HostFibreChannelOverEthernetTargetTransport\"] = reflect.TypeOf((*HostFibreChannelOverEthernetTargetTransport)(nil)).Elem()\n}\n\ntype HostFibreChannelTargetTransport struct {\n\tHostTargetTransport\n\n\tPortWorldWideName int64 `xml:\"portWorldWideName\"`\n\tNodeWorldWideName int64 `xml:\"nodeWorldWideName\"`\n}\n\nfunc init() {\n\tt[\"HostFibreChannelTargetTransport\"] = reflect.TypeOf((*HostFibreChannelTargetTransport)(nil)).Elem()\n}\n\ntype HostFileAccess struct {\n\tDynamicData\n\n\tWho  string `xml:\"who\"`\n\tWhat string `xml:\"what\"`\n}\n\nfunc init() {\n\tt[\"HostFileAccess\"] = reflect.TypeOf((*HostFileAccess)(nil)).Elem()\n}\n\ntype HostFileSystemMountInfo struct {\n\tDynamicData\n\n\tMountInfo       HostMountInfo            `xml:\"mountInfo\"`\n\tVolume          BaseHostFileSystemVolume `xml:\"volume,typeattr\"`\n\tVStorageSupport string                   `xml:\"vStorageSupport,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostFileSystemMountInfo\"] = reflect.TypeOf((*HostFileSystemMountInfo)(nil)).Elem()\n}\n\ntype HostFileSystemVolume struct {\n\tDynamicData\n\n\tType     string `xml:\"type\"`\n\tName     string `xml:\"name\"`\n\tCapacity int64  `xml:\"capacity\"`\n}\n\nfunc init() {\n\tt[\"HostFileSystemVolume\"] = reflect.TypeOf((*HostFileSystemVolume)(nil)).Elem()\n}\n\ntype HostFileSystemVolumeInfo struct {\n\tDynamicData\n\n\tVolumeTypeList []string                  `xml:\"volumeTypeList,omitempty\"`\n\tMountInfo      []HostFileSystemMountInfo `xml:\"mountInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostFileSystemVolumeInfo\"] = reflect.TypeOf((*HostFileSystemVolumeInfo)(nil)).Elem()\n}\n\ntype HostFirewallConfig struct {\n\tDynamicData\n\n\tRule                  []HostFirewallConfigRuleSetConfig `xml:\"rule,omitempty\"`\n\tDefaultBlockingPolicy HostFirewallDefaultPolicy         `xml:\"defaultBlockingPolicy\"`\n}\n\nfunc init() {\n\tt[\"HostFirewallConfig\"] = reflect.TypeOf((*HostFirewallConfig)(nil)).Elem()\n}\n\ntype HostFirewallConfigRuleSetConfig struct {\n\tDynamicData\n\n\tRulesetId    string                     `xml:\"rulesetId\"`\n\tEnabled      bool                       `xml:\"enabled\"`\n\tAllowedHosts *HostFirewallRulesetIpList `xml:\"allowedHosts,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostFirewallConfigRuleSetConfig\"] = reflect.TypeOf((*HostFirewallConfigRuleSetConfig)(nil)).Elem()\n}\n\ntype HostFirewallDefaultPolicy struct {\n\tDynamicData\n\n\tIncomingBlocked *bool `xml:\"incomingBlocked\"`\n\tOutgoingBlocked *bool `xml:\"outgoingBlocked\"`\n}\n\nfunc init() {\n\tt[\"HostFirewallDefaultPolicy\"] = reflect.TypeOf((*HostFirewallDefaultPolicy)(nil)).Elem()\n}\n\ntype HostFirewallInfo struct {\n\tDynamicData\n\n\tDefaultPolicy HostFirewallDefaultPolicy `xml:\"defaultPolicy\"`\n\tRuleset       []HostFirewallRuleset     `xml:\"ruleset,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostFirewallInfo\"] = reflect.TypeOf((*HostFirewallInfo)(nil)).Elem()\n}\n\ntype HostFirewallRule struct {\n\tDynamicData\n\n\tPort      int32                     `xml:\"port\"`\n\tEndPort   int32                     `xml:\"endPort,omitempty\"`\n\tDirection HostFirewallRuleDirection `xml:\"direction\"`\n\tPortType  HostFirewallRulePortType  `xml:\"portType,omitempty\"`\n\tProtocol  string                    `xml:\"protocol\"`\n}\n\nfunc init() {\n\tt[\"HostFirewallRule\"] = reflect.TypeOf((*HostFirewallRule)(nil)).Elem()\n}\n\ntype HostFirewallRuleset struct {\n\tDynamicData\n\n\tKey          string                     `xml:\"key\"`\n\tLabel        string                     `xml:\"label\"`\n\tRequired     bool                       `xml:\"required\"`\n\tRule         []HostFirewallRule         `xml:\"rule\"`\n\tService      string                     `xml:\"service,omitempty\"`\n\tEnabled      bool                       `xml:\"enabled\"`\n\tAllowedHosts *HostFirewallRulesetIpList `xml:\"allowedHosts,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostFirewallRuleset\"] = reflect.TypeOf((*HostFirewallRuleset)(nil)).Elem()\n}\n\ntype HostFirewallRulesetIpList struct {\n\tDynamicData\n\n\tIpAddress []string                       `xml:\"ipAddress,omitempty\"`\n\tIpNetwork []HostFirewallRulesetIpNetwork `xml:\"ipNetwork,omitempty\"`\n\tAllIp     bool                           `xml:\"allIp\"`\n}\n\nfunc init() {\n\tt[\"HostFirewallRulesetIpList\"] = reflect.TypeOf((*HostFirewallRulesetIpList)(nil)).Elem()\n}\n\ntype HostFirewallRulesetIpNetwork struct {\n\tDynamicData\n\n\tNetwork      string `xml:\"network\"`\n\tPrefixLength int32  `xml:\"prefixLength\"`\n}\n\nfunc init() {\n\tt[\"HostFirewallRulesetIpNetwork\"] = reflect.TypeOf((*HostFirewallRulesetIpNetwork)(nil)).Elem()\n}\n\ntype HostFirewallRulesetRulesetSpec struct {\n\tDynamicData\n\n\tAllowedHosts HostFirewallRulesetIpList `xml:\"allowedHosts\"`\n}\n\nfunc init() {\n\tt[\"HostFirewallRulesetRulesetSpec\"] = reflect.TypeOf((*HostFirewallRulesetRulesetSpec)(nil)).Elem()\n}\n\ntype HostFlagInfo struct {\n\tDynamicData\n\n\tBackgroundSnapshotsEnabled *bool `xml:\"backgroundSnapshotsEnabled\"`\n}\n\nfunc init() {\n\tt[\"HostFlagInfo\"] = reflect.TypeOf((*HostFlagInfo)(nil)).Elem()\n}\n\ntype HostForceMountedInfo struct {\n\tDynamicData\n\n\tPersist bool `xml:\"persist\"`\n\tMounted bool `xml:\"mounted\"`\n}\n\nfunc init() {\n\tt[\"HostForceMountedInfo\"] = reflect.TypeOf((*HostForceMountedInfo)(nil)).Elem()\n}\n\ntype HostGatewaySpec struct {\n\tDynamicData\n\n\tGatewayType            string     `xml:\"gatewayType\"`\n\tGatewayId              string     `xml:\"gatewayId,omitempty\"`\n\tTrustVerificationToken string     `xml:\"trustVerificationToken,omitempty\"`\n\tHostAuthParams         []KeyValue `xml:\"hostAuthParams,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostGatewaySpec\"] = reflect.TypeOf((*HostGatewaySpec)(nil)).Elem()\n}\n\ntype HostGetShortNameFailedEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostGetShortNameFailedEvent\"] = reflect.TypeOf((*HostGetShortNameFailedEvent)(nil)).Elem()\n}\n\ntype HostGetVFlashModuleDefaultConfig HostGetVFlashModuleDefaultConfigRequestType\n\nfunc init() {\n\tt[\"HostGetVFlashModuleDefaultConfig\"] = reflect.TypeOf((*HostGetVFlashModuleDefaultConfig)(nil)).Elem()\n}\n\ntype HostGetVFlashModuleDefaultConfigRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tVFlashModule string                 `xml:\"vFlashModule\"`\n}\n\nfunc init() {\n\tt[\"HostGetVFlashModuleDefaultConfigRequestType\"] = reflect.TypeOf((*HostGetVFlashModuleDefaultConfigRequestType)(nil)).Elem()\n}\n\ntype HostGetVFlashModuleDefaultConfigResponse struct {\n\tReturnval VirtualDiskVFlashCacheConfigInfo `xml:\"returnval\"`\n}\n\ntype HostGraphicsConfig struct {\n\tDynamicData\n\n\tHostDefaultGraphicsType        string                         `xml:\"hostDefaultGraphicsType\"`\n\tSharedPassthruAssignmentPolicy string                         `xml:\"sharedPassthruAssignmentPolicy\"`\n\tDeviceType                     []HostGraphicsConfigDeviceType `xml:\"deviceType,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostGraphicsConfig\"] = reflect.TypeOf((*HostGraphicsConfig)(nil)).Elem()\n}\n\ntype HostGraphicsConfigDeviceType struct {\n\tDynamicData\n\n\tDeviceId     string `xml:\"deviceId\"`\n\tGraphicsType string `xml:\"graphicsType\"`\n}\n\nfunc init() {\n\tt[\"HostGraphicsConfigDeviceType\"] = reflect.TypeOf((*HostGraphicsConfigDeviceType)(nil)).Elem()\n}\n\ntype HostGraphicsInfo struct {\n\tDynamicData\n\n\tDeviceName     string                   `xml:\"deviceName\"`\n\tVendorName     string                   `xml:\"vendorName\"`\n\tPciId          string                   `xml:\"pciId\"`\n\tGraphicsType   string                   `xml:\"graphicsType\"`\n\tMemorySizeInKB int64                    `xml:\"memorySizeInKB\"`\n\tVm             []ManagedObjectReference `xml:\"vm,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostGraphicsInfo\"] = reflect.TypeOf((*HostGraphicsInfo)(nil)).Elem()\n}\n\ntype HostHardwareElementInfo struct {\n\tDynamicData\n\n\tName   string                 `xml:\"name\"`\n\tStatus BaseElementDescription `xml:\"status,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostHardwareElementInfo\"] = reflect.TypeOf((*HostHardwareElementInfo)(nil)).Elem()\n}\n\ntype HostHardwareInfo struct {\n\tDynamicData\n\n\tSystemInfo             HostSystemInfo              `xml:\"systemInfo\"`\n\tCpuPowerManagementInfo *HostCpuPowerManagementInfo `xml:\"cpuPowerManagementInfo,omitempty\"`\n\tCpuInfo                HostCpuInfo                 `xml:\"cpuInfo\"`\n\tCpuPkg                 []HostCpuPackage            `xml:\"cpuPkg\"`\n\tMemorySize             int64                       `xml:\"memorySize\"`\n\tNumaInfo               *HostNumaInfo               `xml:\"numaInfo,omitempty\"`\n\tSmcPresent             *bool                       `xml:\"smcPresent\"`\n\tPciDevice              []HostPciDevice             `xml:\"pciDevice,omitempty\"`\n\tCpuFeature             []HostCpuIdInfo             `xml:\"cpuFeature,omitempty\"`\n\tBiosInfo               *HostBIOSInfo               `xml:\"biosInfo,omitempty\"`\n\tReliableMemoryInfo     *HostReliableMemoryInfo     `xml:\"reliableMemoryInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostHardwareInfo\"] = reflect.TypeOf((*HostHardwareInfo)(nil)).Elem()\n}\n\ntype HostHardwareStatusInfo struct {\n\tDynamicData\n\n\tMemoryStatusInfo  []BaseHostHardwareElementInfo `xml:\"memoryStatusInfo,omitempty,typeattr\"`\n\tCpuStatusInfo     []BaseHostHardwareElementInfo `xml:\"cpuStatusInfo,omitempty,typeattr\"`\n\tStorageStatusInfo []HostStorageElementInfo      `xml:\"storageStatusInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostHardwareStatusInfo\"] = reflect.TypeOf((*HostHardwareStatusInfo)(nil)).Elem()\n}\n\ntype HostHardwareSummary struct {\n\tDynamicData\n\n\tVendor               string                         `xml:\"vendor\"`\n\tModel                string                         `xml:\"model\"`\n\tUuid                 string                         `xml:\"uuid\"`\n\tOtherIdentifyingInfo []HostSystemIdentificationInfo `xml:\"otherIdentifyingInfo,omitempty\"`\n\tMemorySize           int64                          `xml:\"memorySize\"`\n\tCpuModel             string                         `xml:\"cpuModel\"`\n\tCpuMhz               int32                          `xml:\"cpuMhz\"`\n\tNumCpuPkgs           int16                          `xml:\"numCpuPkgs\"`\n\tNumCpuCores          int16                          `xml:\"numCpuCores\"`\n\tNumCpuThreads        int16                          `xml:\"numCpuThreads\"`\n\tNumNics              int32                          `xml:\"numNics\"`\n\tNumHBAs              int32                          `xml:\"numHBAs\"`\n}\n\nfunc init() {\n\tt[\"HostHardwareSummary\"] = reflect.TypeOf((*HostHardwareSummary)(nil)).Elem()\n}\n\ntype HostHasComponentFailure struct {\n\tVimFault\n\n\tHostName      string `xml:\"hostName\"`\n\tComponentType string `xml:\"componentType\"`\n\tComponentName string `xml:\"componentName\"`\n}\n\nfunc init() {\n\tt[\"HostHasComponentFailure\"] = reflect.TypeOf((*HostHasComponentFailure)(nil)).Elem()\n}\n\ntype HostHasComponentFailureFault HostHasComponentFailure\n\nfunc init() {\n\tt[\"HostHasComponentFailureFault\"] = reflect.TypeOf((*HostHasComponentFailureFault)(nil)).Elem()\n}\n\ntype HostHostBusAdapter struct {\n\tDynamicData\n\n\tKey    string `xml:\"key,omitempty\"`\n\tDevice string `xml:\"device\"`\n\tBus    int32  `xml:\"bus\"`\n\tStatus string `xml:\"status\"`\n\tModel  string `xml:\"model\"`\n\tDriver string `xml:\"driver,omitempty\"`\n\tPci    string `xml:\"pci,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostHostBusAdapter\"] = reflect.TypeOf((*HostHostBusAdapter)(nil)).Elem()\n}\n\ntype HostHyperThreadScheduleInfo struct {\n\tDynamicData\n\n\tAvailable bool `xml:\"available\"`\n\tActive    bool `xml:\"active\"`\n\tConfig    bool `xml:\"config\"`\n}\n\nfunc init() {\n\tt[\"HostHyperThreadScheduleInfo\"] = reflect.TypeOf((*HostHyperThreadScheduleInfo)(nil)).Elem()\n}\n\ntype HostImageConfigGetAcceptance HostImageConfigGetAcceptanceRequestType\n\nfunc init() {\n\tt[\"HostImageConfigGetAcceptance\"] = reflect.TypeOf((*HostImageConfigGetAcceptance)(nil)).Elem()\n}\n\ntype HostImageConfigGetAcceptanceRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"HostImageConfigGetAcceptanceRequestType\"] = reflect.TypeOf((*HostImageConfigGetAcceptanceRequestType)(nil)).Elem()\n}\n\ntype HostImageConfigGetAcceptanceResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype HostImageConfigGetProfile HostImageConfigGetProfileRequestType\n\nfunc init() {\n\tt[\"HostImageConfigGetProfile\"] = reflect.TypeOf((*HostImageConfigGetProfile)(nil)).Elem()\n}\n\ntype HostImageConfigGetProfileRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"HostImageConfigGetProfileRequestType\"] = reflect.TypeOf((*HostImageConfigGetProfileRequestType)(nil)).Elem()\n}\n\ntype HostImageConfigGetProfileResponse struct {\n\tReturnval HostImageProfileSummary `xml:\"returnval\"`\n}\n\ntype HostImageProfileSummary struct {\n\tDynamicData\n\n\tName   string `xml:\"name\"`\n\tVendor string `xml:\"vendor\"`\n}\n\nfunc init() {\n\tt[\"HostImageProfileSummary\"] = reflect.TypeOf((*HostImageProfileSummary)(nil)).Elem()\n}\n\ntype HostInAuditModeEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostInAuditModeEvent\"] = reflect.TypeOf((*HostInAuditModeEvent)(nil)).Elem()\n}\n\ntype HostInDomain struct {\n\tHostConfigFault\n}\n\nfunc init() {\n\tt[\"HostInDomain\"] = reflect.TypeOf((*HostInDomain)(nil)).Elem()\n}\n\ntype HostInDomainFault HostInDomain\n\nfunc init() {\n\tt[\"HostInDomainFault\"] = reflect.TypeOf((*HostInDomainFault)(nil)).Elem()\n}\n\ntype HostIncompatibleForFaultTolerance struct {\n\tVmFaultToleranceIssue\n\n\tHostName string `xml:\"hostName,omitempty\"`\n\tReason   string `xml:\"reason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostIncompatibleForFaultTolerance\"] = reflect.TypeOf((*HostIncompatibleForFaultTolerance)(nil)).Elem()\n}\n\ntype HostIncompatibleForFaultToleranceFault HostIncompatibleForFaultTolerance\n\nfunc init() {\n\tt[\"HostIncompatibleForFaultToleranceFault\"] = reflect.TypeOf((*HostIncompatibleForFaultToleranceFault)(nil)).Elem()\n}\n\ntype HostIncompatibleForRecordReplay struct {\n\tVimFault\n\n\tHostName string `xml:\"hostName,omitempty\"`\n\tReason   string `xml:\"reason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostIncompatibleForRecordReplay\"] = reflect.TypeOf((*HostIncompatibleForRecordReplay)(nil)).Elem()\n}\n\ntype HostIncompatibleForRecordReplayFault HostIncompatibleForRecordReplay\n\nfunc init() {\n\tt[\"HostIncompatibleForRecordReplayFault\"] = reflect.TypeOf((*HostIncompatibleForRecordReplayFault)(nil)).Elem()\n}\n\ntype HostInflateDiskRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tId        ID                     `xml:\"id\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"HostInflateDiskRequestType\"] = reflect.TypeOf((*HostInflateDiskRequestType)(nil)).Elem()\n}\n\ntype HostInflateDisk_Task HostInflateDiskRequestType\n\nfunc init() {\n\tt[\"HostInflateDisk_Task\"] = reflect.TypeOf((*HostInflateDisk_Task)(nil)).Elem()\n}\n\ntype HostInflateDisk_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype HostInternetScsiHba struct {\n\tHostHostBusAdapter\n\n\tIsSoftwareBased            bool                                          `xml:\"isSoftwareBased\"`\n\tCanBeDisabled              *bool                                         `xml:\"canBeDisabled\"`\n\tNetworkBindingSupport      HostInternetScsiHbaNetworkBindingSupportType  `xml:\"networkBindingSupport,omitempty\"`\n\tDiscoveryCapabilities      HostInternetScsiHbaDiscoveryCapabilities      `xml:\"discoveryCapabilities\"`\n\tDiscoveryProperties        HostInternetScsiHbaDiscoveryProperties        `xml:\"discoveryProperties\"`\n\tAuthenticationCapabilities HostInternetScsiHbaAuthenticationCapabilities `xml:\"authenticationCapabilities\"`\n\tAuthenticationProperties   HostInternetScsiHbaAuthenticationProperties   `xml:\"authenticationProperties\"`\n\tDigestCapabilities         *HostInternetScsiHbaDigestCapabilities        `xml:\"digestCapabilities,omitempty\"`\n\tDigestProperties           *HostInternetScsiHbaDigestProperties          `xml:\"digestProperties,omitempty\"`\n\tIpCapabilities             HostInternetScsiHbaIPCapabilities             `xml:\"ipCapabilities\"`\n\tIpProperties               HostInternetScsiHbaIPProperties               `xml:\"ipProperties\"`\n\tSupportedAdvancedOptions   []OptionDef                                   `xml:\"supportedAdvancedOptions,omitempty\"`\n\tAdvancedOptions            []HostInternetScsiHbaParamValue               `xml:\"advancedOptions,omitempty\"`\n\tIScsiName                  string                                        `xml:\"iScsiName\"`\n\tIScsiAlias                 string                                        `xml:\"iScsiAlias,omitempty\"`\n\tConfiguredSendTarget       []HostInternetScsiHbaSendTarget               `xml:\"configuredSendTarget,omitempty\"`\n\tConfiguredStaticTarget     []HostInternetScsiHbaStaticTarget             `xml:\"configuredStaticTarget,omitempty\"`\n\tMaxSpeedMb                 int32                                         `xml:\"maxSpeedMb,omitempty\"`\n\tCurrentSpeedMb             int32                                         `xml:\"currentSpeedMb,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostInternetScsiHba\"] = reflect.TypeOf((*HostInternetScsiHba)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaAuthenticationCapabilities struct {\n\tDynamicData\n\n\tChapAuthSettable         bool  `xml:\"chapAuthSettable\"`\n\tKrb5AuthSettable         bool  `xml:\"krb5AuthSettable\"`\n\tSrpAuthSettable          bool  `xml:\"srpAuthSettable\"`\n\tSpkmAuthSettable         bool  `xml:\"spkmAuthSettable\"`\n\tMutualChapSettable       *bool `xml:\"mutualChapSettable\"`\n\tTargetChapSettable       *bool `xml:\"targetChapSettable\"`\n\tTargetMutualChapSettable *bool `xml:\"targetMutualChapSettable\"`\n}\n\nfunc init() {\n\tt[\"HostInternetScsiHbaAuthenticationCapabilities\"] = reflect.TypeOf((*HostInternetScsiHbaAuthenticationCapabilities)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaAuthenticationProperties struct {\n\tDynamicData\n\n\tChapAuthEnabled              bool   `xml:\"chapAuthEnabled\"`\n\tChapName                     string `xml:\"chapName,omitempty\"`\n\tChapSecret                   string `xml:\"chapSecret,omitempty\"`\n\tChapAuthenticationType       string `xml:\"chapAuthenticationType,omitempty\"`\n\tChapInherited                *bool  `xml:\"chapInherited\"`\n\tMutualChapName               string `xml:\"mutualChapName,omitempty\"`\n\tMutualChapSecret             string `xml:\"mutualChapSecret,omitempty\"`\n\tMutualChapAuthenticationType string `xml:\"mutualChapAuthenticationType,omitempty\"`\n\tMutualChapInherited          *bool  `xml:\"mutualChapInherited\"`\n}\n\nfunc init() {\n\tt[\"HostInternetScsiHbaAuthenticationProperties\"] = reflect.TypeOf((*HostInternetScsiHbaAuthenticationProperties)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaDigestCapabilities struct {\n\tDynamicData\n\n\tHeaderDigestSettable       *bool `xml:\"headerDigestSettable\"`\n\tDataDigestSettable         *bool `xml:\"dataDigestSettable\"`\n\tTargetHeaderDigestSettable *bool `xml:\"targetHeaderDigestSettable\"`\n\tTargetDataDigestSettable   *bool `xml:\"targetDataDigestSettable\"`\n}\n\nfunc init() {\n\tt[\"HostInternetScsiHbaDigestCapabilities\"] = reflect.TypeOf((*HostInternetScsiHbaDigestCapabilities)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaDigestProperties struct {\n\tDynamicData\n\n\tHeaderDigestType      string `xml:\"headerDigestType,omitempty\"`\n\tHeaderDigestInherited *bool  `xml:\"headerDigestInherited\"`\n\tDataDigestType        string `xml:\"dataDigestType,omitempty\"`\n\tDataDigestInherited   *bool  `xml:\"dataDigestInherited\"`\n}\n\nfunc init() {\n\tt[\"HostInternetScsiHbaDigestProperties\"] = reflect.TypeOf((*HostInternetScsiHbaDigestProperties)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaDiscoveryCapabilities struct {\n\tDynamicData\n\n\tISnsDiscoverySettable         bool `xml:\"iSnsDiscoverySettable\"`\n\tSlpDiscoverySettable          bool `xml:\"slpDiscoverySettable\"`\n\tStaticTargetDiscoverySettable bool `xml:\"staticTargetDiscoverySettable\"`\n\tSendTargetsDiscoverySettable  bool `xml:\"sendTargetsDiscoverySettable\"`\n}\n\nfunc init() {\n\tt[\"HostInternetScsiHbaDiscoveryCapabilities\"] = reflect.TypeOf((*HostInternetScsiHbaDiscoveryCapabilities)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaDiscoveryProperties struct {\n\tDynamicData\n\n\tISnsDiscoveryEnabled         bool   `xml:\"iSnsDiscoveryEnabled\"`\n\tISnsDiscoveryMethod          string `xml:\"iSnsDiscoveryMethod,omitempty\"`\n\tISnsHost                     string `xml:\"iSnsHost,omitempty\"`\n\tSlpDiscoveryEnabled          bool   `xml:\"slpDiscoveryEnabled\"`\n\tSlpDiscoveryMethod           string `xml:\"slpDiscoveryMethod,omitempty\"`\n\tSlpHost                      string `xml:\"slpHost,omitempty\"`\n\tStaticTargetDiscoveryEnabled bool   `xml:\"staticTargetDiscoveryEnabled\"`\n\tSendTargetsDiscoveryEnabled  bool   `xml:\"sendTargetsDiscoveryEnabled\"`\n}\n\nfunc init() {\n\tt[\"HostInternetScsiHbaDiscoveryProperties\"] = reflect.TypeOf((*HostInternetScsiHbaDiscoveryProperties)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaIPCapabilities struct {\n\tDynamicData\n\n\tAddressSettable                              bool  `xml:\"addressSettable\"`\n\tIpConfigurationMethodSettable                bool  `xml:\"ipConfigurationMethodSettable\"`\n\tSubnetMaskSettable                           bool  `xml:\"subnetMaskSettable\"`\n\tDefaultGatewaySettable                       bool  `xml:\"defaultGatewaySettable\"`\n\tPrimaryDnsServerAddressSettable              bool  `xml:\"primaryDnsServerAddressSettable\"`\n\tAlternateDnsServerAddressSettable            bool  `xml:\"alternateDnsServerAddressSettable\"`\n\tIpv6Supported                                *bool `xml:\"ipv6Supported\"`\n\tArpRedirectSettable                          *bool `xml:\"arpRedirectSettable\"`\n\tMtuSettable                                  *bool `xml:\"mtuSettable\"`\n\tHostNameAsTargetAddress                      *bool `xml:\"hostNameAsTargetAddress\"`\n\tNameAliasSettable                            *bool `xml:\"nameAliasSettable\"`\n\tIpv4EnableSettable                           *bool `xml:\"ipv4EnableSettable\"`\n\tIpv6EnableSettable                           *bool `xml:\"ipv6EnableSettable\"`\n\tIpv6PrefixLengthSettable                     *bool `xml:\"ipv6PrefixLengthSettable\"`\n\tIpv6PrefixLength                             int32 `xml:\"ipv6PrefixLength,omitempty\"`\n\tIpv6DhcpConfigurationSettable                *bool `xml:\"ipv6DhcpConfigurationSettable\"`\n\tIpv6LinkLocalAutoConfigurationSettable       *bool `xml:\"ipv6LinkLocalAutoConfigurationSettable\"`\n\tIpv6RouterAdvertisementConfigurationSettable *bool `xml:\"ipv6RouterAdvertisementConfigurationSettable\"`\n\tIpv6DefaultGatewaySettable                   *bool `xml:\"ipv6DefaultGatewaySettable\"`\n\tIpv6MaxStaticAddressesSupported              int32 `xml:\"ipv6MaxStaticAddressesSupported,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostInternetScsiHbaIPCapabilities\"] = reflect.TypeOf((*HostInternetScsiHbaIPCapabilities)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaIPProperties struct {\n\tDynamicData\n\n\tMac                       string                             `xml:\"mac,omitempty\"`\n\tAddress                   string                             `xml:\"address,omitempty\"`\n\tDhcpConfigurationEnabled  bool                               `xml:\"dhcpConfigurationEnabled\"`\n\tSubnetMask                string                             `xml:\"subnetMask,omitempty\"`\n\tDefaultGateway            string                             `xml:\"defaultGateway,omitempty\"`\n\tPrimaryDnsServerAddress   string                             `xml:\"primaryDnsServerAddress,omitempty\"`\n\tAlternateDnsServerAddress string                             `xml:\"alternateDnsServerAddress,omitempty\"`\n\tIpv6Address               string                             `xml:\"ipv6Address,omitempty\"`\n\tIpv6SubnetMask            string                             `xml:\"ipv6SubnetMask,omitempty\"`\n\tIpv6DefaultGateway        string                             `xml:\"ipv6DefaultGateway,omitempty\"`\n\tArpRedirectEnabled        *bool                              `xml:\"arpRedirectEnabled\"`\n\tMtu                       int32                              `xml:\"mtu,omitempty\"`\n\tJumboFramesEnabled        *bool                              `xml:\"jumboFramesEnabled\"`\n\tIpv4Enabled               *bool                              `xml:\"ipv4Enabled\"`\n\tIpv6Enabled               *bool                              `xml:\"ipv6Enabled\"`\n\tIpv6properties            *HostInternetScsiHbaIPv6Properties `xml:\"ipv6properties,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostInternetScsiHbaIPProperties\"] = reflect.TypeOf((*HostInternetScsiHbaIPProperties)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaIPv6Properties struct {\n\tDynamicData\n\n\tIscsiIpv6Address                            []HostInternetScsiHbaIscsiIpv6Address `xml:\"iscsiIpv6Address,omitempty\"`\n\tIpv6DhcpConfigurationEnabled                *bool                                 `xml:\"ipv6DhcpConfigurationEnabled\"`\n\tIpv6LinkLocalAutoConfigurationEnabled       *bool                                 `xml:\"ipv6LinkLocalAutoConfigurationEnabled\"`\n\tIpv6RouterAdvertisementConfigurationEnabled *bool                                 `xml:\"ipv6RouterAdvertisementConfigurationEnabled\"`\n\tIpv6DefaultGateway                          string                                `xml:\"ipv6DefaultGateway,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostInternetScsiHbaIPv6Properties\"] = reflect.TypeOf((*HostInternetScsiHbaIPv6Properties)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaIscsiIpv6Address struct {\n\tDynamicData\n\n\tAddress      string `xml:\"address\"`\n\tPrefixLength int32  `xml:\"prefixLength\"`\n\tOrigin       string `xml:\"origin\"`\n\tOperation    string `xml:\"operation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostInternetScsiHbaIscsiIpv6Address\"] = reflect.TypeOf((*HostInternetScsiHbaIscsiIpv6Address)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaParamValue struct {\n\tOptionValue\n\n\tIsInherited *bool `xml:\"isInherited\"`\n}\n\nfunc init() {\n\tt[\"HostInternetScsiHbaParamValue\"] = reflect.TypeOf((*HostInternetScsiHbaParamValue)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaSendTarget struct {\n\tDynamicData\n\n\tAddress                  string                                       `xml:\"address\"`\n\tPort                     int32                                        `xml:\"port,omitempty\"`\n\tAuthenticationProperties *HostInternetScsiHbaAuthenticationProperties `xml:\"authenticationProperties,omitempty\"`\n\tDigestProperties         *HostInternetScsiHbaDigestProperties         `xml:\"digestProperties,omitempty\"`\n\tSupportedAdvancedOptions []OptionDef                                  `xml:\"supportedAdvancedOptions,omitempty\"`\n\tAdvancedOptions          []HostInternetScsiHbaParamValue              `xml:\"advancedOptions,omitempty\"`\n\tParent                   string                                       `xml:\"parent,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostInternetScsiHbaSendTarget\"] = reflect.TypeOf((*HostInternetScsiHbaSendTarget)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaStaticTarget struct {\n\tDynamicData\n\n\tAddress                  string                                       `xml:\"address\"`\n\tPort                     int32                                        `xml:\"port,omitempty\"`\n\tIScsiName                string                                       `xml:\"iScsiName\"`\n\tDiscoveryMethod          string                                       `xml:\"discoveryMethod,omitempty\"`\n\tAuthenticationProperties *HostInternetScsiHbaAuthenticationProperties `xml:\"authenticationProperties,omitempty\"`\n\tDigestProperties         *HostInternetScsiHbaDigestProperties         `xml:\"digestProperties,omitempty\"`\n\tSupportedAdvancedOptions []OptionDef                                  `xml:\"supportedAdvancedOptions,omitempty\"`\n\tAdvancedOptions          []HostInternetScsiHbaParamValue              `xml:\"advancedOptions,omitempty\"`\n\tParent                   string                                       `xml:\"parent,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostInternetScsiHbaStaticTarget\"] = reflect.TypeOf((*HostInternetScsiHbaStaticTarget)(nil)).Elem()\n}\n\ntype HostInternetScsiHbaTargetSet struct {\n\tDynamicData\n\n\tStaticTargets []HostInternetScsiHbaStaticTarget `xml:\"staticTargets,omitempty\"`\n\tSendTargets   []HostInternetScsiHbaSendTarget   `xml:\"sendTargets,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostInternetScsiHbaTargetSet\"] = reflect.TypeOf((*HostInternetScsiHbaTargetSet)(nil)).Elem()\n}\n\ntype HostInternetScsiTargetTransport struct {\n\tHostTargetTransport\n\n\tIScsiName  string   `xml:\"iScsiName\"`\n\tIScsiAlias string   `xml:\"iScsiAlias\"`\n\tAddress    []string `xml:\"address,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostInternetScsiTargetTransport\"] = reflect.TypeOf((*HostInternetScsiTargetTransport)(nil)).Elem()\n}\n\ntype HostInventoryFull struct {\n\tNotEnoughLicenses\n\n\tCapacity int32 `xml:\"capacity\"`\n}\n\nfunc init() {\n\tt[\"HostInventoryFull\"] = reflect.TypeOf((*HostInventoryFull)(nil)).Elem()\n}\n\ntype HostInventoryFullEvent struct {\n\tLicenseEvent\n\n\tCapacity int32 `xml:\"capacity\"`\n}\n\nfunc init() {\n\tt[\"HostInventoryFullEvent\"] = reflect.TypeOf((*HostInventoryFullEvent)(nil)).Elem()\n}\n\ntype HostInventoryFullFault HostInventoryFull\n\nfunc init() {\n\tt[\"HostInventoryFullFault\"] = reflect.TypeOf((*HostInventoryFullFault)(nil)).Elem()\n}\n\ntype HostInventoryUnreadableEvent struct {\n\tEvent\n}\n\nfunc init() {\n\tt[\"HostInventoryUnreadableEvent\"] = reflect.TypeOf((*HostInventoryUnreadableEvent)(nil)).Elem()\n}\n\ntype HostIoFilterInfo struct {\n\tIoFilterInfo\n\n\tAvailable bool `xml:\"available\"`\n}\n\nfunc init() {\n\tt[\"HostIoFilterInfo\"] = reflect.TypeOf((*HostIoFilterInfo)(nil)).Elem()\n}\n\ntype HostIpChangedEvent struct {\n\tHostEvent\n\n\tOldIP string `xml:\"oldIP\"`\n\tNewIP string `xml:\"newIP\"`\n}\n\nfunc init() {\n\tt[\"HostIpChangedEvent\"] = reflect.TypeOf((*HostIpChangedEvent)(nil)).Elem()\n}\n\ntype HostIpConfig struct {\n\tDynamicData\n\n\tDhcp       bool                                  `xml:\"dhcp\"`\n\tIpAddress  string                                `xml:\"ipAddress,omitempty\"`\n\tSubnetMask string                                `xml:\"subnetMask,omitempty\"`\n\tIpV6Config *HostIpConfigIpV6AddressConfiguration `xml:\"ipV6Config,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostIpConfig\"] = reflect.TypeOf((*HostIpConfig)(nil)).Elem()\n}\n\ntype HostIpConfigIpV6Address struct {\n\tDynamicData\n\n\tIpAddress    string     `xml:\"ipAddress\"`\n\tPrefixLength int32      `xml:\"prefixLength\"`\n\tOrigin       string     `xml:\"origin,omitempty\"`\n\tDadState     string     `xml:\"dadState,omitempty\"`\n\tLifetime     *time.Time `xml:\"lifetime\"`\n\tOperation    string     `xml:\"operation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostIpConfigIpV6Address\"] = reflect.TypeOf((*HostIpConfigIpV6Address)(nil)).Elem()\n}\n\ntype HostIpConfigIpV6AddressConfiguration struct {\n\tDynamicData\n\n\tIpV6Address              []HostIpConfigIpV6Address `xml:\"ipV6Address,omitempty\"`\n\tAutoConfigurationEnabled *bool                     `xml:\"autoConfigurationEnabled\"`\n\tDhcpV6Enabled            *bool                     `xml:\"dhcpV6Enabled\"`\n}\n\nfunc init() {\n\tt[\"HostIpConfigIpV6AddressConfiguration\"] = reflect.TypeOf((*HostIpConfigIpV6AddressConfiguration)(nil)).Elem()\n}\n\ntype HostIpInconsistentEvent struct {\n\tHostEvent\n\n\tIpAddress  string `xml:\"ipAddress\"`\n\tIpAddress2 string `xml:\"ipAddress2\"`\n}\n\nfunc init() {\n\tt[\"HostIpInconsistentEvent\"] = reflect.TypeOf((*HostIpInconsistentEvent)(nil)).Elem()\n}\n\ntype HostIpRouteConfig struct {\n\tDynamicData\n\n\tDefaultGateway     string `xml:\"defaultGateway,omitempty\"`\n\tGatewayDevice      string `xml:\"gatewayDevice,omitempty\"`\n\tIpV6DefaultGateway string `xml:\"ipV6DefaultGateway,omitempty\"`\n\tIpV6GatewayDevice  string `xml:\"ipV6GatewayDevice,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostIpRouteConfig\"] = reflect.TypeOf((*HostIpRouteConfig)(nil)).Elem()\n}\n\ntype HostIpRouteConfigSpec struct {\n\tHostIpRouteConfig\n\n\tGatewayDeviceConnection     *HostVirtualNicConnection `xml:\"gatewayDeviceConnection,omitempty\"`\n\tIpV6GatewayDeviceConnection *HostVirtualNicConnection `xml:\"ipV6GatewayDeviceConnection,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostIpRouteConfigSpec\"] = reflect.TypeOf((*HostIpRouteConfigSpec)(nil)).Elem()\n}\n\ntype HostIpRouteEntry struct {\n\tDynamicData\n\n\tNetwork      string `xml:\"network\"`\n\tPrefixLength int32  `xml:\"prefixLength\"`\n\tGateway      string `xml:\"gateway\"`\n\tDeviceName   string `xml:\"deviceName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostIpRouteEntry\"] = reflect.TypeOf((*HostIpRouteEntry)(nil)).Elem()\n}\n\ntype HostIpRouteOp struct {\n\tDynamicData\n\n\tChangeOperation string           `xml:\"changeOperation\"`\n\tRoute           HostIpRouteEntry `xml:\"route\"`\n}\n\nfunc init() {\n\tt[\"HostIpRouteOp\"] = reflect.TypeOf((*HostIpRouteOp)(nil)).Elem()\n}\n\ntype HostIpRouteTableConfig struct {\n\tDynamicData\n\n\tIpRoute   []HostIpRouteOp `xml:\"ipRoute,omitempty\"`\n\tIpv6Route []HostIpRouteOp `xml:\"ipv6Route,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostIpRouteTableConfig\"] = reflect.TypeOf((*HostIpRouteTableConfig)(nil)).Elem()\n}\n\ntype HostIpRouteTableInfo struct {\n\tDynamicData\n\n\tIpRoute   []HostIpRouteEntry `xml:\"ipRoute,omitempty\"`\n\tIpv6Route []HostIpRouteEntry `xml:\"ipv6Route,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostIpRouteTableInfo\"] = reflect.TypeOf((*HostIpRouteTableInfo)(nil)).Elem()\n}\n\ntype HostIpToShortNameFailedEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostIpToShortNameFailedEvent\"] = reflect.TypeOf((*HostIpToShortNameFailedEvent)(nil)).Elem()\n}\n\ntype HostIpmiInfo struct {\n\tDynamicData\n\n\tBmcIpAddress  string `xml:\"bmcIpAddress,omitempty\"`\n\tBmcMacAddress string `xml:\"bmcMacAddress,omitempty\"`\n\tLogin         string `xml:\"login,omitempty\"`\n\tPassword      string `xml:\"password,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostIpmiInfo\"] = reflect.TypeOf((*HostIpmiInfo)(nil)).Elem()\n}\n\ntype HostIsolationIpPingFailedEvent struct {\n\tHostDasEvent\n\n\tIsolationIp string `xml:\"isolationIp\"`\n}\n\nfunc init() {\n\tt[\"HostIsolationIpPingFailedEvent\"] = reflect.TypeOf((*HostIsolationIpPingFailedEvent)(nil)).Elem()\n}\n\ntype HostLicensableResourceInfo struct {\n\tDynamicData\n\n\tResource []KeyAnyValue `xml:\"resource\"`\n}\n\nfunc init() {\n\tt[\"HostLicensableResourceInfo\"] = reflect.TypeOf((*HostLicensableResourceInfo)(nil)).Elem()\n}\n\ntype HostLicenseConnectInfo struct {\n\tDynamicData\n\n\tLicense    LicenseManagerLicenseInfo    `xml:\"license\"`\n\tEvaluation LicenseManagerEvaluationInfo `xml:\"evaluation\"`\n\tResource   *HostLicensableResourceInfo  `xml:\"resource,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostLicenseConnectInfo\"] = reflect.TypeOf((*HostLicenseConnectInfo)(nil)).Elem()\n}\n\ntype HostLicenseExpiredEvent struct {\n\tLicenseEvent\n}\n\nfunc init() {\n\tt[\"HostLicenseExpiredEvent\"] = reflect.TypeOf((*HostLicenseExpiredEvent)(nil)).Elem()\n}\n\ntype HostLicenseSpec struct {\n\tDynamicData\n\n\tSource             BaseLicenseSource `xml:\"source,omitempty,typeattr\"`\n\tEditionKey         string            `xml:\"editionKey,omitempty\"`\n\tDisabledFeatureKey []string          `xml:\"disabledFeatureKey,omitempty\"`\n\tEnabledFeatureKey  []string          `xml:\"enabledFeatureKey,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostLicenseSpec\"] = reflect.TypeOf((*HostLicenseSpec)(nil)).Elem()\n}\n\ntype HostListSummary struct {\n\tDynamicData\n\n\tHost               *ManagedObjectReference        `xml:\"host,omitempty\"`\n\tHardware           *HostHardwareSummary           `xml:\"hardware,omitempty\"`\n\tRuntime            *HostRuntimeInfo               `xml:\"runtime,omitempty\"`\n\tConfig             HostConfigSummary              `xml:\"config\"`\n\tQuickStats         HostListSummaryQuickStats      `xml:\"quickStats\"`\n\tOverallStatus      ManagedEntityStatus            `xml:\"overallStatus\"`\n\tRebootRequired     bool                           `xml:\"rebootRequired\"`\n\tCustomValue        []BaseCustomFieldValue         `xml:\"customValue,omitempty,typeattr\"`\n\tManagementServerIp string                         `xml:\"managementServerIp,omitempty\"`\n\tMaxEVCModeKey      string                         `xml:\"maxEVCModeKey,omitempty\"`\n\tCurrentEVCModeKey  string                         `xml:\"currentEVCModeKey,omitempty\"`\n\tGateway            *HostListSummaryGatewaySummary `xml:\"gateway,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostListSummary\"] = reflect.TypeOf((*HostListSummary)(nil)).Elem()\n}\n\ntype HostListSummaryGatewaySummary struct {\n\tDynamicData\n\n\tGatewayType string `xml:\"gatewayType\"`\n\tGatewayId   string `xml:\"gatewayId\"`\n}\n\nfunc init() {\n\tt[\"HostListSummaryGatewaySummary\"] = reflect.TypeOf((*HostListSummaryGatewaySummary)(nil)).Elem()\n}\n\ntype HostListSummaryQuickStats struct {\n\tDynamicData\n\n\tOverallCpuUsage           int32 `xml:\"overallCpuUsage,omitempty\"`\n\tOverallMemoryUsage        int32 `xml:\"overallMemoryUsage,omitempty\"`\n\tDistributedCpuFairness    int32 `xml:\"distributedCpuFairness,omitempty\"`\n\tDistributedMemoryFairness int32 `xml:\"distributedMemoryFairness,omitempty\"`\n\tUptime                    int32 `xml:\"uptime,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostListSummaryQuickStats\"] = reflect.TypeOf((*HostListSummaryQuickStats)(nil)).Elem()\n}\n\ntype HostListVStorageObject HostListVStorageObjectRequestType\n\nfunc init() {\n\tt[\"HostListVStorageObject\"] = reflect.TypeOf((*HostListVStorageObject)(nil)).Elem()\n}\n\ntype HostListVStorageObjectRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"HostListVStorageObjectRequestType\"] = reflect.TypeOf((*HostListVStorageObjectRequestType)(nil)).Elem()\n}\n\ntype HostListVStorageObjectResponse struct {\n\tReturnval []ID `xml:\"returnval,omitempty\"`\n}\n\ntype HostLocalAuthenticationInfo struct {\n\tHostAuthenticationStoreInfo\n}\n\nfunc init() {\n\tt[\"HostLocalAuthenticationInfo\"] = reflect.TypeOf((*HostLocalAuthenticationInfo)(nil)).Elem()\n}\n\ntype HostLocalFileSystemVolume struct {\n\tHostFileSystemVolume\n\n\tDevice string `xml:\"device\"`\n}\n\nfunc init() {\n\tt[\"HostLocalFileSystemVolume\"] = reflect.TypeOf((*HostLocalFileSystemVolume)(nil)).Elem()\n}\n\ntype HostLocalFileSystemVolumeSpec struct {\n\tDynamicData\n\n\tDevice    string `xml:\"device\"`\n\tLocalPath string `xml:\"localPath\"`\n}\n\nfunc init() {\n\tt[\"HostLocalFileSystemVolumeSpec\"] = reflect.TypeOf((*HostLocalFileSystemVolumeSpec)(nil)).Elem()\n}\n\ntype HostLocalPortCreatedEvent struct {\n\tDvsEvent\n\n\tHostLocalPort DVSHostLocalPortInfo `xml:\"hostLocalPort\"`\n}\n\nfunc init() {\n\tt[\"HostLocalPortCreatedEvent\"] = reflect.TypeOf((*HostLocalPortCreatedEvent)(nil)).Elem()\n}\n\ntype HostLowLevelProvisioningManagerDiskLayoutSpec struct {\n\tDynamicData\n\n\tControllerType string `xml:\"controllerType\"`\n\tBusNumber      int32  `xml:\"busNumber\"`\n\tUnitNumber     *int32 `xml:\"unitNumber\"`\n\tSrcFilename    string `xml:\"srcFilename\"`\n\tDstFilename    string `xml:\"dstFilename\"`\n}\n\nfunc init() {\n\tt[\"HostLowLevelProvisioningManagerDiskLayoutSpec\"] = reflect.TypeOf((*HostLowLevelProvisioningManagerDiskLayoutSpec)(nil)).Elem()\n}\n\ntype HostLowLevelProvisioningManagerFileDeleteResult struct {\n\tDynamicData\n\n\tFileName string               `xml:\"fileName\"`\n\tFault    LocalizedMethodFault `xml:\"fault\"`\n}\n\nfunc init() {\n\tt[\"HostLowLevelProvisioningManagerFileDeleteResult\"] = reflect.TypeOf((*HostLowLevelProvisioningManagerFileDeleteResult)(nil)).Elem()\n}\n\ntype HostLowLevelProvisioningManagerFileDeleteSpec struct {\n\tDynamicData\n\n\tFileName string `xml:\"fileName\"`\n\tFileType string `xml:\"fileType\"`\n}\n\nfunc init() {\n\tt[\"HostLowLevelProvisioningManagerFileDeleteSpec\"] = reflect.TypeOf((*HostLowLevelProvisioningManagerFileDeleteSpec)(nil)).Elem()\n}\n\ntype HostLowLevelProvisioningManagerFileReserveResult struct {\n\tDynamicData\n\n\tBaseName     string `xml:\"baseName\"`\n\tParentDir    string `xml:\"parentDir\"`\n\tReservedName string `xml:\"reservedName\"`\n}\n\nfunc init() {\n\tt[\"HostLowLevelProvisioningManagerFileReserveResult\"] = reflect.TypeOf((*HostLowLevelProvisioningManagerFileReserveResult)(nil)).Elem()\n}\n\ntype HostLowLevelProvisioningManagerFileReserveSpec struct {\n\tDynamicData\n\n\tBaseName       string `xml:\"baseName\"`\n\tParentDir      string `xml:\"parentDir\"`\n\tFileType       string `xml:\"fileType\"`\n\tStorageProfile string `xml:\"storageProfile\"`\n}\n\nfunc init() {\n\tt[\"HostLowLevelProvisioningManagerFileReserveSpec\"] = reflect.TypeOf((*HostLowLevelProvisioningManagerFileReserveSpec)(nil)).Elem()\n}\n\ntype HostLowLevelProvisioningManagerSnapshotLayoutSpec struct {\n\tDynamicData\n\n\tId          int32                                           `xml:\"id\"`\n\tSrcFilename string                                          `xml:\"srcFilename\"`\n\tDstFilename string                                          `xml:\"dstFilename\"`\n\tDisk        []HostLowLevelProvisioningManagerDiskLayoutSpec `xml:\"disk,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostLowLevelProvisioningManagerSnapshotLayoutSpec\"] = reflect.TypeOf((*HostLowLevelProvisioningManagerSnapshotLayoutSpec)(nil)).Elem()\n}\n\ntype HostLowLevelProvisioningManagerVmMigrationStatus struct {\n\tDynamicData\n\n\tMigrationId          int64  `xml:\"migrationId\"`\n\tType                 string `xml:\"type\"`\n\tSource               bool   `xml:\"source\"`\n\tConsideredSuccessful bool   `xml:\"consideredSuccessful\"`\n}\n\nfunc init() {\n\tt[\"HostLowLevelProvisioningManagerVmMigrationStatus\"] = reflect.TypeOf((*HostLowLevelProvisioningManagerVmMigrationStatus)(nil)).Elem()\n}\n\ntype HostLowLevelProvisioningManagerVmRecoveryInfo struct {\n\tDynamicData\n\n\tVersion      string                       `xml:\"version\"`\n\tBiosUUID     string                       `xml:\"biosUUID\"`\n\tInstanceUUID string                       `xml:\"instanceUUID\"`\n\tFtInfo       BaseFaultToleranceConfigInfo `xml:\"ftInfo,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostLowLevelProvisioningManagerVmRecoveryInfo\"] = reflect.TypeOf((*HostLowLevelProvisioningManagerVmRecoveryInfo)(nil)).Elem()\n}\n\ntype HostMaintenanceSpec struct {\n\tDynamicData\n\n\tVsanMode *VsanHostDecommissionMode `xml:\"vsanMode,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostMaintenanceSpec\"] = reflect.TypeOf((*HostMaintenanceSpec)(nil)).Elem()\n}\n\ntype HostMemberHealthCheckResult struct {\n\tDynamicData\n\n\tSummary string `xml:\"summary,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostMemberHealthCheckResult\"] = reflect.TypeOf((*HostMemberHealthCheckResult)(nil)).Elem()\n}\n\ntype HostMemberRuntimeInfo struct {\n\tDynamicData\n\n\tHost              ManagedObjectReference            `xml:\"host\"`\n\tStatus            string                            `xml:\"status,omitempty\"`\n\tStatusDetail      string                            `xml:\"statusDetail,omitempty\"`\n\tHealthCheckResult []BaseHostMemberHealthCheckResult `xml:\"healthCheckResult,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostMemberRuntimeInfo\"] = reflect.TypeOf((*HostMemberRuntimeInfo)(nil)).Elem()\n}\n\ntype HostMemberUplinkHealthCheckResult struct {\n\tHostMemberHealthCheckResult\n\n\tUplinkPortKey string `xml:\"uplinkPortKey\"`\n}\n\nfunc init() {\n\tt[\"HostMemberUplinkHealthCheckResult\"] = reflect.TypeOf((*HostMemberUplinkHealthCheckResult)(nil)).Elem()\n}\n\ntype HostMemoryProfile struct {\n\tApplyProfile\n}\n\nfunc init() {\n\tt[\"HostMemoryProfile\"] = reflect.TypeOf((*HostMemoryProfile)(nil)).Elem()\n}\n\ntype HostMemorySpec struct {\n\tDynamicData\n\n\tServiceConsoleReservation int64 `xml:\"serviceConsoleReservation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostMemorySpec\"] = reflect.TypeOf((*HostMemorySpec)(nil)).Elem()\n}\n\ntype HostMissingNetworksEvent struct {\n\tHostDasEvent\n\n\tIps string `xml:\"ips,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostMissingNetworksEvent\"] = reflect.TypeOf((*HostMissingNetworksEvent)(nil)).Elem()\n}\n\ntype HostMonitoringStateChangedEvent struct {\n\tClusterEvent\n\n\tState     string `xml:\"state\"`\n\tPrevState string `xml:\"prevState,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostMonitoringStateChangedEvent\"] = reflect.TypeOf((*HostMonitoringStateChangedEvent)(nil)).Elem()\n}\n\ntype HostMountInfo struct {\n\tDynamicData\n\n\tPath               string `xml:\"path,omitempty\"`\n\tAccessMode         string `xml:\"accessMode\"`\n\tMounted            *bool  `xml:\"mounted\"`\n\tAccessible         *bool  `xml:\"accessible\"`\n\tInaccessibleReason string `xml:\"inaccessibleReason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostMountInfo\"] = reflect.TypeOf((*HostMountInfo)(nil)).Elem()\n}\n\ntype HostMultipathInfo struct {\n\tDynamicData\n\n\tLun []HostMultipathInfoLogicalUnit `xml:\"lun,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostMultipathInfo\"] = reflect.TypeOf((*HostMultipathInfo)(nil)).Elem()\n}\n\ntype HostMultipathInfoFixedLogicalUnitPolicy struct {\n\tHostMultipathInfoLogicalUnitPolicy\n\n\tPrefer string `xml:\"prefer\"`\n}\n\nfunc init() {\n\tt[\"HostMultipathInfoFixedLogicalUnitPolicy\"] = reflect.TypeOf((*HostMultipathInfoFixedLogicalUnitPolicy)(nil)).Elem()\n}\n\ntype HostMultipathInfoLogicalUnit struct {\n\tDynamicData\n\n\tKey                    string                                              `xml:\"key\"`\n\tId                     string                                              `xml:\"id\"`\n\tLun                    string                                              `xml:\"lun\"`\n\tPath                   []HostMultipathInfoPath                             `xml:\"path\"`\n\tPolicy                 BaseHostMultipathInfoLogicalUnitPolicy              `xml:\"policy,typeattr\"`\n\tStorageArrayTypePolicy *HostMultipathInfoLogicalUnitStorageArrayTypePolicy `xml:\"storageArrayTypePolicy,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostMultipathInfoLogicalUnit\"] = reflect.TypeOf((*HostMultipathInfoLogicalUnit)(nil)).Elem()\n}\n\ntype HostMultipathInfoLogicalUnitPolicy struct {\n\tDynamicData\n\n\tPolicy string `xml:\"policy\"`\n}\n\nfunc init() {\n\tt[\"HostMultipathInfoLogicalUnitPolicy\"] = reflect.TypeOf((*HostMultipathInfoLogicalUnitPolicy)(nil)).Elem()\n}\n\ntype HostMultipathInfoLogicalUnitStorageArrayTypePolicy struct {\n\tDynamicData\n\n\tPolicy string `xml:\"policy\"`\n}\n\nfunc init() {\n\tt[\"HostMultipathInfoLogicalUnitStorageArrayTypePolicy\"] = reflect.TypeOf((*HostMultipathInfoLogicalUnitStorageArrayTypePolicy)(nil)).Elem()\n}\n\ntype HostMultipathInfoPath struct {\n\tDynamicData\n\n\tKey           string                  `xml:\"key\"`\n\tName          string                  `xml:\"name\"`\n\tPathState     string                  `xml:\"pathState\"`\n\tState         string                  `xml:\"state,omitempty\"`\n\tIsWorkingPath *bool                   `xml:\"isWorkingPath\"`\n\tAdapter       string                  `xml:\"adapter\"`\n\tLun           string                  `xml:\"lun\"`\n\tTransport     BaseHostTargetTransport `xml:\"transport,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostMultipathInfoPath\"] = reflect.TypeOf((*HostMultipathInfoPath)(nil)).Elem()\n}\n\ntype HostMultipathStateInfo struct {\n\tDynamicData\n\n\tPath []HostMultipathStateInfoPath `xml:\"path,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostMultipathStateInfo\"] = reflect.TypeOf((*HostMultipathStateInfo)(nil)).Elem()\n}\n\ntype HostMultipathStateInfoPath struct {\n\tDynamicData\n\n\tName      string `xml:\"name\"`\n\tPathState string `xml:\"pathState\"`\n}\n\nfunc init() {\n\tt[\"HostMultipathStateInfoPath\"] = reflect.TypeOf((*HostMultipathStateInfoPath)(nil)).Elem()\n}\n\ntype HostNasVolume struct {\n\tHostFileSystemVolume\n\n\tRemoteHost       string   `xml:\"remoteHost\"`\n\tRemotePath       string   `xml:\"remotePath\"`\n\tUserName         string   `xml:\"userName,omitempty\"`\n\tRemoteHostNames  []string `xml:\"remoteHostNames,omitempty\"`\n\tSecurityType     string   `xml:\"securityType,omitempty\"`\n\tProtocolEndpoint *bool    `xml:\"protocolEndpoint\"`\n}\n\nfunc init() {\n\tt[\"HostNasVolume\"] = reflect.TypeOf((*HostNasVolume)(nil)).Elem()\n}\n\ntype HostNasVolumeConfig struct {\n\tDynamicData\n\n\tChangeOperation string             `xml:\"changeOperation,omitempty\"`\n\tSpec            *HostNasVolumeSpec `xml:\"spec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostNasVolumeConfig\"] = reflect.TypeOf((*HostNasVolumeConfig)(nil)).Elem()\n}\n\ntype HostNasVolumeSpec struct {\n\tDynamicData\n\n\tRemoteHost      string   `xml:\"remoteHost\"`\n\tRemotePath      string   `xml:\"remotePath\"`\n\tLocalPath       string   `xml:\"localPath\"`\n\tAccessMode      string   `xml:\"accessMode\"`\n\tType            string   `xml:\"type,omitempty\"`\n\tUserName        string   `xml:\"userName,omitempty\"`\n\tPassword        string   `xml:\"password,omitempty\"`\n\tRemoteHostNames []string `xml:\"remoteHostNames,omitempty\"`\n\tSecurityType    string   `xml:\"securityType,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostNasVolumeSpec\"] = reflect.TypeOf((*HostNasVolumeSpec)(nil)).Elem()\n}\n\ntype HostNasVolumeUserInfo struct {\n\tDynamicData\n\n\tUser string `xml:\"user\"`\n}\n\nfunc init() {\n\tt[\"HostNasVolumeUserInfo\"] = reflect.TypeOf((*HostNasVolumeUserInfo)(nil)).Elem()\n}\n\ntype HostNatService struct {\n\tDynamicData\n\n\tKey  string             `xml:\"key\"`\n\tSpec HostNatServiceSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"HostNatService\"] = reflect.TypeOf((*HostNatService)(nil)).Elem()\n}\n\ntype HostNatServiceConfig struct {\n\tDynamicData\n\n\tChangeOperation string             `xml:\"changeOperation,omitempty\"`\n\tKey             string             `xml:\"key\"`\n\tSpec            HostNatServiceSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"HostNatServiceConfig\"] = reflect.TypeOf((*HostNatServiceConfig)(nil)).Elem()\n}\n\ntype HostNatServiceNameServiceSpec struct {\n\tDynamicData\n\n\tDnsAutoDetect bool     `xml:\"dnsAutoDetect\"`\n\tDnsPolicy     string   `xml:\"dnsPolicy\"`\n\tDnsRetries    int32    `xml:\"dnsRetries\"`\n\tDnsTimeout    int32    `xml:\"dnsTimeout\"`\n\tDnsNameServer []string `xml:\"dnsNameServer,omitempty\"`\n\tNbdsTimeout   int32    `xml:\"nbdsTimeout\"`\n\tNbnsRetries   int32    `xml:\"nbnsRetries\"`\n\tNbnsTimeout   int32    `xml:\"nbnsTimeout\"`\n}\n\nfunc init() {\n\tt[\"HostNatServiceNameServiceSpec\"] = reflect.TypeOf((*HostNatServiceNameServiceSpec)(nil)).Elem()\n}\n\ntype HostNatServicePortForwardSpec struct {\n\tDynamicData\n\n\tType           string `xml:\"type\"`\n\tName           string `xml:\"name\"`\n\tHostPort       int32  `xml:\"hostPort\"`\n\tGuestPort      int32  `xml:\"guestPort\"`\n\tGuestIpAddress string `xml:\"guestIpAddress\"`\n}\n\nfunc init() {\n\tt[\"HostNatServicePortForwardSpec\"] = reflect.TypeOf((*HostNatServicePortForwardSpec)(nil)).Elem()\n}\n\ntype HostNatServiceSpec struct {\n\tDynamicData\n\n\tVirtualSwitch    string                          `xml:\"virtualSwitch\"`\n\tActiveFtp        bool                            `xml:\"activeFtp\"`\n\tAllowAnyOui      bool                            `xml:\"allowAnyOui\"`\n\tConfigPort       bool                            `xml:\"configPort\"`\n\tIpGatewayAddress string                          `xml:\"ipGatewayAddress\"`\n\tUdpTimeout       int32                           `xml:\"udpTimeout\"`\n\tPortForward      []HostNatServicePortForwardSpec `xml:\"portForward,omitempty\"`\n\tNameService      *HostNatServiceNameServiceSpec  `xml:\"nameService,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostNatServiceSpec\"] = reflect.TypeOf((*HostNatServiceSpec)(nil)).Elem()\n}\n\ntype HostNetCapabilities struct {\n\tDynamicData\n\n\tCanSetPhysicalNicLinkSpeed bool     `xml:\"canSetPhysicalNicLinkSpeed\"`\n\tSupportsNicTeaming         bool     `xml:\"supportsNicTeaming\"`\n\tNicTeamingPolicy           []string `xml:\"nicTeamingPolicy,omitempty\"`\n\tSupportsVlan               bool     `xml:\"supportsVlan\"`\n\tUsesServiceConsoleNic      bool     `xml:\"usesServiceConsoleNic\"`\n\tSupportsNetworkHints       bool     `xml:\"supportsNetworkHints\"`\n\tMaxPortGroupsPerVswitch    int32    `xml:\"maxPortGroupsPerVswitch,omitempty\"`\n\tVswitchConfigSupported     bool     `xml:\"vswitchConfigSupported\"`\n\tVnicConfigSupported        bool     `xml:\"vnicConfigSupported\"`\n\tIpRouteConfigSupported     bool     `xml:\"ipRouteConfigSupported\"`\n\tDnsConfigSupported         bool     `xml:\"dnsConfigSupported\"`\n\tDhcpOnVnicSupported        bool     `xml:\"dhcpOnVnicSupported\"`\n\tIpV6Supported              *bool    `xml:\"ipV6Supported\"`\n}\n\nfunc init() {\n\tt[\"HostNetCapabilities\"] = reflect.TypeOf((*HostNetCapabilities)(nil)).Elem()\n}\n\ntype HostNetOffloadCapabilities struct {\n\tDynamicData\n\n\tCsumOffload     *bool `xml:\"csumOffload\"`\n\tTcpSegmentation *bool `xml:\"tcpSegmentation\"`\n\tZeroCopyXmit    *bool `xml:\"zeroCopyXmit\"`\n}\n\nfunc init() {\n\tt[\"HostNetOffloadCapabilities\"] = reflect.TypeOf((*HostNetOffloadCapabilities)(nil)).Elem()\n}\n\ntype HostNetStackInstance struct {\n\tDynamicData\n\n\tKey                             string                  `xml:\"key,omitempty\"`\n\tName                            string                  `xml:\"name,omitempty\"`\n\tDnsConfig                       BaseHostDnsConfig       `xml:\"dnsConfig,omitempty,typeattr\"`\n\tIpRouteConfig                   BaseHostIpRouteConfig   `xml:\"ipRouteConfig,omitempty,typeattr\"`\n\tRequestedMaxNumberOfConnections int32                   `xml:\"requestedMaxNumberOfConnections,omitempty\"`\n\tCongestionControlAlgorithm      string                  `xml:\"congestionControlAlgorithm,omitempty\"`\n\tIpV6Enabled                     *bool                   `xml:\"ipV6Enabled\"`\n\tRouteTableConfig                *HostIpRouteTableConfig `xml:\"routeTableConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostNetStackInstance\"] = reflect.TypeOf((*HostNetStackInstance)(nil)).Elem()\n}\n\ntype HostNetworkConfig struct {\n\tDynamicData\n\n\tVswitch              []HostVirtualSwitchConfig       `xml:\"vswitch,omitempty\"`\n\tProxySwitch          []HostProxySwitchConfig         `xml:\"proxySwitch,omitempty\"`\n\tPortgroup            []HostPortGroupConfig           `xml:\"portgroup,omitempty\"`\n\tPnic                 []PhysicalNicConfig             `xml:\"pnic,omitempty\"`\n\tVnic                 []HostVirtualNicConfig          `xml:\"vnic,omitempty\"`\n\tConsoleVnic          []HostVirtualNicConfig          `xml:\"consoleVnic,omitempty\"`\n\tDnsConfig            BaseHostDnsConfig               `xml:\"dnsConfig,omitempty,typeattr\"`\n\tIpRouteConfig        BaseHostIpRouteConfig           `xml:\"ipRouteConfig,omitempty,typeattr\"`\n\tConsoleIpRouteConfig BaseHostIpRouteConfig           `xml:\"consoleIpRouteConfig,omitempty,typeattr\"`\n\tRouteTableConfig     *HostIpRouteTableConfig         `xml:\"routeTableConfig,omitempty\"`\n\tDhcp                 []HostDhcpServiceConfig         `xml:\"dhcp,omitempty\"`\n\tNat                  []HostNatServiceConfig          `xml:\"nat,omitempty\"`\n\tIpV6Enabled          *bool                           `xml:\"ipV6Enabled\"`\n\tNetStackSpec         []HostNetworkConfigNetStackSpec `xml:\"netStackSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostNetworkConfig\"] = reflect.TypeOf((*HostNetworkConfig)(nil)).Elem()\n}\n\ntype HostNetworkConfigNetStackSpec struct {\n\tDynamicData\n\n\tNetStackInstance HostNetStackInstance `xml:\"netStackInstance\"`\n\tOperation        string               `xml:\"operation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostNetworkConfigNetStackSpec\"] = reflect.TypeOf((*HostNetworkConfigNetStackSpec)(nil)).Elem()\n}\n\ntype HostNetworkConfigResult struct {\n\tDynamicData\n\n\tVnicDevice        []string `xml:\"vnicDevice,omitempty\"`\n\tConsoleVnicDevice []string `xml:\"consoleVnicDevice,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostNetworkConfigResult\"] = reflect.TypeOf((*HostNetworkConfigResult)(nil)).Elem()\n}\n\ntype HostNetworkInfo struct {\n\tDynamicData\n\n\tVswitch              []HostVirtualSwitch     `xml:\"vswitch,omitempty\"`\n\tProxySwitch          []HostProxySwitch       `xml:\"proxySwitch,omitempty\"`\n\tPortgroup            []HostPortGroup         `xml:\"portgroup,omitempty\"`\n\tPnic                 []PhysicalNic           `xml:\"pnic,omitempty\"`\n\tVnic                 []HostVirtualNic        `xml:\"vnic,omitempty\"`\n\tConsoleVnic          []HostVirtualNic        `xml:\"consoleVnic,omitempty\"`\n\tDnsConfig            BaseHostDnsConfig       `xml:\"dnsConfig,omitempty,typeattr\"`\n\tIpRouteConfig        BaseHostIpRouteConfig   `xml:\"ipRouteConfig,omitempty,typeattr\"`\n\tConsoleIpRouteConfig BaseHostIpRouteConfig   `xml:\"consoleIpRouteConfig,omitempty,typeattr\"`\n\tRouteTableInfo       *HostIpRouteTableInfo   `xml:\"routeTableInfo,omitempty\"`\n\tDhcp                 []HostDhcpService       `xml:\"dhcp,omitempty\"`\n\tNat                  []HostNatService        `xml:\"nat,omitempty\"`\n\tIpV6Enabled          *bool                   `xml:\"ipV6Enabled\"`\n\tAtBootIpV6Enabled    *bool                   `xml:\"atBootIpV6Enabled\"`\n\tNetStackInstance     []HostNetStackInstance  `xml:\"netStackInstance,omitempty\"`\n\tOpaqueSwitch         []HostOpaqueSwitch      `xml:\"opaqueSwitch,omitempty\"`\n\tOpaqueNetwork        []HostOpaqueNetworkInfo `xml:\"opaqueNetwork,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostNetworkInfo\"] = reflect.TypeOf((*HostNetworkInfo)(nil)).Elem()\n}\n\ntype HostNetworkPolicy struct {\n\tDynamicData\n\n\tSecurity      *HostNetworkSecurityPolicy       `xml:\"security,omitempty\"`\n\tNicTeaming    *HostNicTeamingPolicy            `xml:\"nicTeaming,omitempty\"`\n\tOffloadPolicy *HostNetOffloadCapabilities      `xml:\"offloadPolicy,omitempty\"`\n\tShapingPolicy *HostNetworkTrafficShapingPolicy `xml:\"shapingPolicy,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostNetworkPolicy\"] = reflect.TypeOf((*HostNetworkPolicy)(nil)).Elem()\n}\n\ntype HostNetworkResourceRuntime struct {\n\tDynamicData\n\n\tPnicResourceInfo []HostPnicNetworkResourceInfo `xml:\"pnicResourceInfo\"`\n}\n\nfunc init() {\n\tt[\"HostNetworkResourceRuntime\"] = reflect.TypeOf((*HostNetworkResourceRuntime)(nil)).Elem()\n}\n\ntype HostNetworkSecurityPolicy struct {\n\tDynamicData\n\n\tAllowPromiscuous *bool `xml:\"allowPromiscuous\"`\n\tMacChanges       *bool `xml:\"macChanges\"`\n\tForgedTransmits  *bool `xml:\"forgedTransmits\"`\n}\n\nfunc init() {\n\tt[\"HostNetworkSecurityPolicy\"] = reflect.TypeOf((*HostNetworkSecurityPolicy)(nil)).Elem()\n}\n\ntype HostNetworkTrafficShapingPolicy struct {\n\tDynamicData\n\n\tEnabled          *bool `xml:\"enabled\"`\n\tAverageBandwidth int64 `xml:\"averageBandwidth,omitempty\"`\n\tPeakBandwidth    int64 `xml:\"peakBandwidth,omitempty\"`\n\tBurstSize        int64 `xml:\"burstSize,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostNetworkTrafficShapingPolicy\"] = reflect.TypeOf((*HostNetworkTrafficShapingPolicy)(nil)).Elem()\n}\n\ntype HostNewNetworkConnectInfo struct {\n\tHostConnectInfoNetworkInfo\n}\n\nfunc init() {\n\tt[\"HostNewNetworkConnectInfo\"] = reflect.TypeOf((*HostNewNetworkConnectInfo)(nil)).Elem()\n}\n\ntype HostNicFailureCriteria struct {\n\tDynamicData\n\n\tCheckSpeed        string `xml:\"checkSpeed,omitempty\"`\n\tSpeed             int32  `xml:\"speed,omitempty\"`\n\tCheckDuplex       *bool  `xml:\"checkDuplex\"`\n\tFullDuplex        *bool  `xml:\"fullDuplex\"`\n\tCheckErrorPercent *bool  `xml:\"checkErrorPercent\"`\n\tPercentage        int32  `xml:\"percentage,omitempty\"`\n\tCheckBeacon       *bool  `xml:\"checkBeacon\"`\n}\n\nfunc init() {\n\tt[\"HostNicFailureCriteria\"] = reflect.TypeOf((*HostNicFailureCriteria)(nil)).Elem()\n}\n\ntype HostNicOrderPolicy struct {\n\tDynamicData\n\n\tActiveNic  []string `xml:\"activeNic,omitempty\"`\n\tStandbyNic []string `xml:\"standbyNic,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostNicOrderPolicy\"] = reflect.TypeOf((*HostNicOrderPolicy)(nil)).Elem()\n}\n\ntype HostNicTeamingPolicy struct {\n\tDynamicData\n\n\tPolicy          string                  `xml:\"policy,omitempty\"`\n\tReversePolicy   *bool                   `xml:\"reversePolicy\"`\n\tNotifySwitches  *bool                   `xml:\"notifySwitches\"`\n\tRollingOrder    *bool                   `xml:\"rollingOrder\"`\n\tFailureCriteria *HostNicFailureCriteria `xml:\"failureCriteria,omitempty\"`\n\tNicOrder        *HostNicOrderPolicy     `xml:\"nicOrder,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostNicTeamingPolicy\"] = reflect.TypeOf((*HostNicTeamingPolicy)(nil)).Elem()\n}\n\ntype HostNoAvailableNetworksEvent struct {\n\tHostDasEvent\n\n\tIps string `xml:\"ips,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostNoAvailableNetworksEvent\"] = reflect.TypeOf((*HostNoAvailableNetworksEvent)(nil)).Elem()\n}\n\ntype HostNoHAEnabledPortGroupsEvent struct {\n\tHostDasEvent\n}\n\nfunc init() {\n\tt[\"HostNoHAEnabledPortGroupsEvent\"] = reflect.TypeOf((*HostNoHAEnabledPortGroupsEvent)(nil)).Elem()\n}\n\ntype HostNoRedundantManagementNetworkEvent struct {\n\tHostDasEvent\n}\n\nfunc init() {\n\tt[\"HostNoRedundantManagementNetworkEvent\"] = reflect.TypeOf((*HostNoRedundantManagementNetworkEvent)(nil)).Elem()\n}\n\ntype HostNonCompliantEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostNonCompliantEvent\"] = reflect.TypeOf((*HostNonCompliantEvent)(nil)).Elem()\n}\n\ntype HostNotConnected struct {\n\tHostCommunication\n}\n\nfunc init() {\n\tt[\"HostNotConnected\"] = reflect.TypeOf((*HostNotConnected)(nil)).Elem()\n}\n\ntype HostNotConnectedFault HostNotConnected\n\nfunc init() {\n\tt[\"HostNotConnectedFault\"] = reflect.TypeOf((*HostNotConnectedFault)(nil)).Elem()\n}\n\ntype HostNotInClusterEvent struct {\n\tHostDasEvent\n}\n\nfunc init() {\n\tt[\"HostNotInClusterEvent\"] = reflect.TypeOf((*HostNotInClusterEvent)(nil)).Elem()\n}\n\ntype HostNotReachable struct {\n\tHostCommunication\n}\n\nfunc init() {\n\tt[\"HostNotReachable\"] = reflect.TypeOf((*HostNotReachable)(nil)).Elem()\n}\n\ntype HostNotReachableFault HostNotReachable\n\nfunc init() {\n\tt[\"HostNotReachableFault\"] = reflect.TypeOf((*HostNotReachableFault)(nil)).Elem()\n}\n\ntype HostNtpConfig struct {\n\tDynamicData\n\n\tServer     []string `xml:\"server,omitempty\"`\n\tConfigFile []string `xml:\"configFile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostNtpConfig\"] = reflect.TypeOf((*HostNtpConfig)(nil)).Elem()\n}\n\ntype HostNumaInfo struct {\n\tDynamicData\n\n\tType     string         `xml:\"type\"`\n\tNumNodes int32          `xml:\"numNodes\"`\n\tNumaNode []HostNumaNode `xml:\"numaNode,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostNumaInfo\"] = reflect.TypeOf((*HostNumaInfo)(nil)).Elem()\n}\n\ntype HostNumaNode struct {\n\tDynamicData\n\n\tTypeId            byte    `xml:\"typeId\"`\n\tCpuID             []int16 `xml:\"cpuID\"`\n\tMemoryRangeBegin  int64   `xml:\"memoryRangeBegin\"`\n\tMemoryRangeLength int64   `xml:\"memoryRangeLength\"`\n}\n\nfunc init() {\n\tt[\"HostNumaNode\"] = reflect.TypeOf((*HostNumaNode)(nil)).Elem()\n}\n\ntype HostNumericSensorInfo struct {\n\tDynamicData\n\n\tName           string                 `xml:\"name\"`\n\tHealthState    BaseElementDescription `xml:\"healthState,omitempty,typeattr\"`\n\tCurrentReading int64                  `xml:\"currentReading\"`\n\tUnitModifier   int32                  `xml:\"unitModifier\"`\n\tBaseUnits      string                 `xml:\"baseUnits\"`\n\tRateUnits      string                 `xml:\"rateUnits,omitempty\"`\n\tSensorType     string                 `xml:\"sensorType\"`\n\tId             string                 `xml:\"id,omitempty\"`\n\tTimeStamp      string                 `xml:\"timeStamp,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostNumericSensorInfo\"] = reflect.TypeOf((*HostNumericSensorInfo)(nil)).Elem()\n}\n\ntype HostOpaqueNetworkInfo struct {\n\tDynamicData\n\n\tOpaqueNetworkId   string                   `xml:\"opaqueNetworkId\"`\n\tOpaqueNetworkName string                   `xml:\"opaqueNetworkName\"`\n\tOpaqueNetworkType string                   `xml:\"opaqueNetworkType\"`\n\tPnicZone          []string                 `xml:\"pnicZone,omitempty\"`\n\tCapability        *OpaqueNetworkCapability `xml:\"capability,omitempty\"`\n\tExtraConfig       []BaseOptionValue        `xml:\"extraConfig,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostOpaqueNetworkInfo\"] = reflect.TypeOf((*HostOpaqueNetworkInfo)(nil)).Elem()\n}\n\ntype HostOpaqueSwitch struct {\n\tDynamicData\n\n\tKey         string                            `xml:\"key\"`\n\tName        string                            `xml:\"name,omitempty\"`\n\tPnic        []string                          `xml:\"pnic,omitempty\"`\n\tPnicZone    []HostOpaqueSwitchPhysicalNicZone `xml:\"pnicZone,omitempty\"`\n\tStatus      string                            `xml:\"status,omitempty\"`\n\tVtep        []HostVirtualNic                  `xml:\"vtep,omitempty\"`\n\tExtraConfig []BaseOptionValue                 `xml:\"extraConfig,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostOpaqueSwitch\"] = reflect.TypeOf((*HostOpaqueSwitch)(nil)).Elem()\n}\n\ntype HostOpaqueSwitchPhysicalNicZone struct {\n\tDynamicData\n\n\tKey        string   `xml:\"key\"`\n\tPnicDevice []string `xml:\"pnicDevice,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostOpaqueSwitchPhysicalNicZone\"] = reflect.TypeOf((*HostOpaqueSwitchPhysicalNicZone)(nil)).Elem()\n}\n\ntype HostOvercommittedEvent struct {\n\tClusterOvercommittedEvent\n}\n\nfunc init() {\n\tt[\"HostOvercommittedEvent\"] = reflect.TypeOf((*HostOvercommittedEvent)(nil)).Elem()\n}\n\ntype HostParallelScsiHba struct {\n\tHostHostBusAdapter\n}\n\nfunc init() {\n\tt[\"HostParallelScsiHba\"] = reflect.TypeOf((*HostParallelScsiHba)(nil)).Elem()\n}\n\ntype HostParallelScsiTargetTransport struct {\n\tHostTargetTransport\n}\n\nfunc init() {\n\tt[\"HostParallelScsiTargetTransport\"] = reflect.TypeOf((*HostParallelScsiTargetTransport)(nil)).Elem()\n}\n\ntype HostPatchManagerLocator struct {\n\tDynamicData\n\n\tUrl   string `xml:\"url\"`\n\tProxy string `xml:\"proxy,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostPatchManagerLocator\"] = reflect.TypeOf((*HostPatchManagerLocator)(nil)).Elem()\n}\n\ntype HostPatchManagerPatchManagerOperationSpec struct {\n\tDynamicData\n\n\tProxy     string `xml:\"proxy,omitempty\"`\n\tPort      int32  `xml:\"port,omitempty\"`\n\tUserName  string `xml:\"userName,omitempty\"`\n\tPassword  string `xml:\"password,omitempty\"`\n\tCmdOption string `xml:\"cmdOption,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostPatchManagerPatchManagerOperationSpec\"] = reflect.TypeOf((*HostPatchManagerPatchManagerOperationSpec)(nil)).Elem()\n}\n\ntype HostPatchManagerResult struct {\n\tDynamicData\n\n\tVersion   string                   `xml:\"version\"`\n\tStatus    []HostPatchManagerStatus `xml:\"status,omitempty\"`\n\tXmlResult string                   `xml:\"xmlResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostPatchManagerResult\"] = reflect.TypeOf((*HostPatchManagerResult)(nil)).Elem()\n}\n\ntype HostPatchManagerStatus struct {\n\tDynamicData\n\n\tId                 string                                    `xml:\"id\"`\n\tApplicable         bool                                      `xml:\"applicable\"`\n\tReason             []string                                  `xml:\"reason,omitempty\"`\n\tIntegrity          string                                    `xml:\"integrity,omitempty\"`\n\tInstalled          bool                                      `xml:\"installed\"`\n\tInstallState       []string                                  `xml:\"installState,omitempty\"`\n\tPrerequisitePatch  []HostPatchManagerStatusPrerequisitePatch `xml:\"prerequisitePatch,omitempty\"`\n\tRestartRequired    bool                                      `xml:\"restartRequired\"`\n\tReconnectRequired  bool                                      `xml:\"reconnectRequired\"`\n\tVmOffRequired      bool                                      `xml:\"vmOffRequired\"`\n\tSupersededPatchIds []string                                  `xml:\"supersededPatchIds,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostPatchManagerStatus\"] = reflect.TypeOf((*HostPatchManagerStatus)(nil)).Elem()\n}\n\ntype HostPatchManagerStatusPrerequisitePatch struct {\n\tDynamicData\n\n\tId           string   `xml:\"id\"`\n\tInstallState []string `xml:\"installState,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostPatchManagerStatusPrerequisitePatch\"] = reflect.TypeOf((*HostPatchManagerStatusPrerequisitePatch)(nil)).Elem()\n}\n\ntype HostPathSelectionPolicyOption struct {\n\tDynamicData\n\n\tPolicy BaseElementDescription `xml:\"policy,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostPathSelectionPolicyOption\"] = reflect.TypeOf((*HostPathSelectionPolicyOption)(nil)).Elem()\n}\n\ntype HostPciDevice struct {\n\tDynamicData\n\n\tId           string `xml:\"id\"`\n\tClassId      int16  `xml:\"classId\"`\n\tBus          byte   `xml:\"bus\"`\n\tSlot         byte   `xml:\"slot\"`\n\tFunction     byte   `xml:\"function\"`\n\tVendorId     int16  `xml:\"vendorId\"`\n\tSubVendorId  int16  `xml:\"subVendorId\"`\n\tVendorName   string `xml:\"vendorName\"`\n\tDeviceId     int16  `xml:\"deviceId\"`\n\tSubDeviceId  int16  `xml:\"subDeviceId\"`\n\tParentBridge string `xml:\"parentBridge,omitempty\"`\n\tDeviceName   string `xml:\"deviceName\"`\n}\n\nfunc init() {\n\tt[\"HostPciDevice\"] = reflect.TypeOf((*HostPciDevice)(nil)).Elem()\n}\n\ntype HostPciPassthruConfig struct {\n\tDynamicData\n\n\tId              string `xml:\"id\"`\n\tPassthruEnabled bool   `xml:\"passthruEnabled\"`\n}\n\nfunc init() {\n\tt[\"HostPciPassthruConfig\"] = reflect.TypeOf((*HostPciPassthruConfig)(nil)).Elem()\n}\n\ntype HostPciPassthruInfo struct {\n\tDynamicData\n\n\tId              string `xml:\"id\"`\n\tDependentDevice string `xml:\"dependentDevice\"`\n\tPassthruEnabled bool   `xml:\"passthruEnabled\"`\n\tPassthruCapable bool   `xml:\"passthruCapable\"`\n\tPassthruActive  bool   `xml:\"passthruActive\"`\n}\n\nfunc init() {\n\tt[\"HostPciPassthruInfo\"] = reflect.TypeOf((*HostPciPassthruInfo)(nil)).Elem()\n}\n\ntype HostPlacedVirtualNicIdentifier struct {\n\tDynamicData\n\n\tVm          ManagedObjectReference `xml:\"vm\"`\n\tVnicKey     string                 `xml:\"vnicKey\"`\n\tReservation int32                  `xml:\"reservation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostPlacedVirtualNicIdentifier\"] = reflect.TypeOf((*HostPlacedVirtualNicIdentifier)(nil)).Elem()\n}\n\ntype HostPlugStoreTopology struct {\n\tDynamicData\n\n\tAdapter []HostPlugStoreTopologyAdapter `xml:\"adapter,omitempty\"`\n\tPath    []HostPlugStoreTopologyPath    `xml:\"path,omitempty\"`\n\tTarget  []HostPlugStoreTopologyTarget  `xml:\"target,omitempty\"`\n\tDevice  []HostPlugStoreTopologyDevice  `xml:\"device,omitempty\"`\n\tPlugin  []HostPlugStoreTopologyPlugin  `xml:\"plugin,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostPlugStoreTopology\"] = reflect.TypeOf((*HostPlugStoreTopology)(nil)).Elem()\n}\n\ntype HostPlugStoreTopologyAdapter struct {\n\tDynamicData\n\n\tKey     string   `xml:\"key\"`\n\tAdapter string   `xml:\"adapter\"`\n\tPath    []string `xml:\"path,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostPlugStoreTopologyAdapter\"] = reflect.TypeOf((*HostPlugStoreTopologyAdapter)(nil)).Elem()\n}\n\ntype HostPlugStoreTopologyDevice struct {\n\tDynamicData\n\n\tKey  string   `xml:\"key\"`\n\tLun  string   `xml:\"lun\"`\n\tPath []string `xml:\"path,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostPlugStoreTopologyDevice\"] = reflect.TypeOf((*HostPlugStoreTopologyDevice)(nil)).Elem()\n}\n\ntype HostPlugStoreTopologyPath struct {\n\tDynamicData\n\n\tKey           string `xml:\"key\"`\n\tName          string `xml:\"name\"`\n\tChannelNumber int32  `xml:\"channelNumber,omitempty\"`\n\tTargetNumber  int32  `xml:\"targetNumber,omitempty\"`\n\tLunNumber     int32  `xml:\"lunNumber,omitempty\"`\n\tAdapter       string `xml:\"adapter,omitempty\"`\n\tTarget        string `xml:\"target,omitempty\"`\n\tDevice        string `xml:\"device,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostPlugStoreTopologyPath\"] = reflect.TypeOf((*HostPlugStoreTopologyPath)(nil)).Elem()\n}\n\ntype HostPlugStoreTopologyPlugin struct {\n\tDynamicData\n\n\tKey         string   `xml:\"key\"`\n\tName        string   `xml:\"name\"`\n\tDevice      []string `xml:\"device,omitempty\"`\n\tClaimedPath []string `xml:\"claimedPath,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostPlugStoreTopologyPlugin\"] = reflect.TypeOf((*HostPlugStoreTopologyPlugin)(nil)).Elem()\n}\n\ntype HostPlugStoreTopologyTarget struct {\n\tDynamicData\n\n\tKey       string                  `xml:\"key\"`\n\tTransport BaseHostTargetTransport `xml:\"transport,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostPlugStoreTopologyTarget\"] = reflect.TypeOf((*HostPlugStoreTopologyTarget)(nil)).Elem()\n}\n\ntype HostPnicNetworkResourceInfo struct {\n\tDynamicData\n\n\tPnicDevice                     string                           `xml:\"pnicDevice\"`\n\tAvailableBandwidthForVMTraffic int64                            `xml:\"availableBandwidthForVMTraffic,omitempty\"`\n\tUnusedBandwidthForVMTraffic    int64                            `xml:\"unusedBandwidthForVMTraffic,omitempty\"`\n\tPlacedVirtualNics              []HostPlacedVirtualNicIdentifier `xml:\"placedVirtualNics,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostPnicNetworkResourceInfo\"] = reflect.TypeOf((*HostPnicNetworkResourceInfo)(nil)).Elem()\n}\n\ntype HostPortGroup struct {\n\tDynamicData\n\n\tKey            string              `xml:\"key,omitempty\"`\n\tPort           []HostPortGroupPort `xml:\"port,omitempty\"`\n\tVswitch        string              `xml:\"vswitch,omitempty\"`\n\tComputedPolicy HostNetworkPolicy   `xml:\"computedPolicy\"`\n\tSpec           HostPortGroupSpec   `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"HostPortGroup\"] = reflect.TypeOf((*HostPortGroup)(nil)).Elem()\n}\n\ntype HostPortGroupConfig struct {\n\tDynamicData\n\n\tChangeOperation string             `xml:\"changeOperation,omitempty\"`\n\tSpec            *HostPortGroupSpec `xml:\"spec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostPortGroupConfig\"] = reflect.TypeOf((*HostPortGroupConfig)(nil)).Elem()\n}\n\ntype HostPortGroupPort struct {\n\tDynamicData\n\n\tKey  string   `xml:\"key,omitempty\"`\n\tMac  []string `xml:\"mac,omitempty\"`\n\tType string   `xml:\"type\"`\n}\n\nfunc init() {\n\tt[\"HostPortGroupPort\"] = reflect.TypeOf((*HostPortGroupPort)(nil)).Elem()\n}\n\ntype HostPortGroupProfile struct {\n\tPortGroupProfile\n\n\tIpConfig IpAddressProfile `xml:\"ipConfig\"`\n}\n\nfunc init() {\n\tt[\"HostPortGroupProfile\"] = reflect.TypeOf((*HostPortGroupProfile)(nil)).Elem()\n}\n\ntype HostPortGroupSpec struct {\n\tDynamicData\n\n\tName        string            `xml:\"name\"`\n\tVlanId      int32             `xml:\"vlanId\"`\n\tVswitchName string            `xml:\"vswitchName\"`\n\tPolicy      HostNetworkPolicy `xml:\"policy\"`\n}\n\nfunc init() {\n\tt[\"HostPortGroupSpec\"] = reflect.TypeOf((*HostPortGroupSpec)(nil)).Elem()\n}\n\ntype HostPosixAccountSpec struct {\n\tHostAccountSpec\n\n\tPosixId     int32 `xml:\"posixId,omitempty\"`\n\tShellAccess *bool `xml:\"shellAccess\"`\n}\n\nfunc init() {\n\tt[\"HostPosixAccountSpec\"] = reflect.TypeOf((*HostPosixAccountSpec)(nil)).Elem()\n}\n\ntype HostPowerOpFailed struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"HostPowerOpFailed\"] = reflect.TypeOf((*HostPowerOpFailed)(nil)).Elem()\n}\n\ntype HostPowerOpFailedFault BaseHostPowerOpFailed\n\nfunc init() {\n\tt[\"HostPowerOpFailedFault\"] = reflect.TypeOf((*HostPowerOpFailedFault)(nil)).Elem()\n}\n\ntype HostPowerPolicy struct {\n\tDynamicData\n\n\tKey         int32  `xml:\"key\"`\n\tName        string `xml:\"name\"`\n\tShortName   string `xml:\"shortName\"`\n\tDescription string `xml:\"description\"`\n}\n\nfunc init() {\n\tt[\"HostPowerPolicy\"] = reflect.TypeOf((*HostPowerPolicy)(nil)).Elem()\n}\n\ntype HostPrimaryAgentNotShortNameEvent struct {\n\tHostDasEvent\n\n\tPrimaryAgent string `xml:\"primaryAgent\"`\n}\n\nfunc init() {\n\tt[\"HostPrimaryAgentNotShortNameEvent\"] = reflect.TypeOf((*HostPrimaryAgentNotShortNameEvent)(nil)).Elem()\n}\n\ntype HostProfileAppliedEvent struct {\n\tHostEvent\n\n\tProfile ProfileEventArgument `xml:\"profile\"`\n}\n\nfunc init() {\n\tt[\"HostProfileAppliedEvent\"] = reflect.TypeOf((*HostProfileAppliedEvent)(nil)).Elem()\n}\n\ntype HostProfileCompleteConfigSpec struct {\n\tHostProfileConfigSpec\n\n\tApplyProfile                  *HostApplyProfile       `xml:\"applyProfile,omitempty\"`\n\tCustomComplyProfile           *ComplianceProfile      `xml:\"customComplyProfile,omitempty\"`\n\tDisabledExpressionListChanged bool                    `xml:\"disabledExpressionListChanged\"`\n\tDisabledExpressionList        []string                `xml:\"disabledExpressionList,omitempty\"`\n\tValidatorHost                 *ManagedObjectReference `xml:\"validatorHost,omitempty\"`\n\tValidating                    *bool                   `xml:\"validating\"`\n\tHostConfig                    *HostProfileConfigInfo  `xml:\"hostConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostProfileCompleteConfigSpec\"] = reflect.TypeOf((*HostProfileCompleteConfigSpec)(nil)).Elem()\n}\n\ntype HostProfileConfigInfo struct {\n\tProfileConfigInfo\n\n\tApplyProfile           *HostApplyProfile   `xml:\"applyProfile,omitempty\"`\n\tDefaultComplyProfile   *ComplianceProfile  `xml:\"defaultComplyProfile,omitempty\"`\n\tDefaultComplyLocator   []ComplianceLocator `xml:\"defaultComplyLocator,omitempty\"`\n\tCustomComplyProfile    *ComplianceProfile  `xml:\"customComplyProfile,omitempty\"`\n\tDisabledExpressionList []string            `xml:\"disabledExpressionList,omitempty\"`\n\tDescription            *ProfileDescription `xml:\"description,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostProfileConfigInfo\"] = reflect.TypeOf((*HostProfileConfigInfo)(nil)).Elem()\n}\n\ntype HostProfileConfigSpec struct {\n\tProfileCreateSpec\n}\n\nfunc init() {\n\tt[\"HostProfileConfigSpec\"] = reflect.TypeOf((*HostProfileConfigSpec)(nil)).Elem()\n}\n\ntype HostProfileHostBasedConfigSpec struct {\n\tHostProfileConfigSpec\n\n\tHost                 ManagedObjectReference `xml:\"host\"`\n\tUseHostProfileEngine *bool                  `xml:\"useHostProfileEngine\"`\n}\n\nfunc init() {\n\tt[\"HostProfileHostBasedConfigSpec\"] = reflect.TypeOf((*HostProfileHostBasedConfigSpec)(nil)).Elem()\n}\n\ntype HostProfileManagerCompositionValidationResultResultElement struct {\n\tDynamicData\n\n\tTarget                  ManagedObjectReference `xml:\"target\"`\n\tStatus                  string                 `xml:\"status\"`\n\tErrors                  []LocalizableMessage   `xml:\"errors,omitempty\"`\n\tSourceDiffForToBeMerged *HostApplyProfile      `xml:\"sourceDiffForToBeMerged,omitempty\"`\n\tTargetDiffForToBeMerged *HostApplyProfile      `xml:\"targetDiffForToBeMerged,omitempty\"`\n\tToBeAdded               *HostApplyProfile      `xml:\"toBeAdded,omitempty\"`\n\tToBeDeleted             *HostApplyProfile      `xml:\"toBeDeleted,omitempty\"`\n\tToBeDisabled            *HostApplyProfile      `xml:\"toBeDisabled,omitempty\"`\n\tToBeEnabled             *HostApplyProfile      `xml:\"toBeEnabled,omitempty\"`\n\tToBeReenableCC          *HostApplyProfile      `xml:\"toBeReenableCC,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostProfileManagerCompositionValidationResultResultElement\"] = reflect.TypeOf((*HostProfileManagerCompositionValidationResultResultElement)(nil)).Elem()\n}\n\ntype HostProfileManagerConfigTaskList struct {\n\tDynamicData\n\n\tConfigSpec          *HostConfigSpec      `xml:\"configSpec,omitempty\"`\n\tTaskDescription     []LocalizableMessage `xml:\"taskDescription,omitempty\"`\n\tTaskListRequirement []string             `xml:\"taskListRequirement,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostProfileManagerConfigTaskList\"] = reflect.TypeOf((*HostProfileManagerConfigTaskList)(nil)).Elem()\n}\n\ntype HostProfileManagerHostToConfigSpecMap struct {\n\tDynamicData\n\n\tHost       ManagedObjectReference   `xml:\"host\"`\n\tConfigSpec BaseAnswerFileCreateSpec `xml:\"configSpec,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostProfileManagerHostToConfigSpecMap\"] = reflect.TypeOf((*HostProfileManagerHostToConfigSpecMap)(nil)).Elem()\n}\n\ntype HostProfileSerializedHostProfileSpec struct {\n\tProfileSerializedCreateSpec\n\n\tValidatorHost *ManagedObjectReference `xml:\"validatorHost,omitempty\"`\n\tValidating    *bool                   `xml:\"validating\"`\n}\n\nfunc init() {\n\tt[\"HostProfileSerializedHostProfileSpec\"] = reflect.TypeOf((*HostProfileSerializedHostProfileSpec)(nil)).Elem()\n}\n\ntype HostProfilesEntityCustomizations struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"HostProfilesEntityCustomizations\"] = reflect.TypeOf((*HostProfilesEntityCustomizations)(nil)).Elem()\n}\n\ntype HostProtocolEndpoint struct {\n\tDynamicData\n\n\tPeType            string                   `xml:\"peType\"`\n\tType              string                   `xml:\"type,omitempty\"`\n\tUuid              string                   `xml:\"uuid\"`\n\tHostKey           []ManagedObjectReference `xml:\"hostKey,omitempty\"`\n\tStorageArray      string                   `xml:\"storageArray,omitempty\"`\n\tNfsServer         string                   `xml:\"nfsServer,omitempty\"`\n\tNfsDir            string                   `xml:\"nfsDir,omitempty\"`\n\tNfsServerScope    string                   `xml:\"nfsServerScope,omitempty\"`\n\tNfsServerMajor    string                   `xml:\"nfsServerMajor,omitempty\"`\n\tNfsServerAuthType string                   `xml:\"nfsServerAuthType,omitempty\"`\n\tNfsServerUser     string                   `xml:\"nfsServerUser,omitempty\"`\n\tDeviceId          string                   `xml:\"deviceId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostProtocolEndpoint\"] = reflect.TypeOf((*HostProtocolEndpoint)(nil)).Elem()\n}\n\ntype HostProxySwitch struct {\n\tDynamicData\n\n\tDvsUuid                     string                         `xml:\"dvsUuid\"`\n\tDvsName                     string                         `xml:\"dvsName\"`\n\tKey                         string                         `xml:\"key\"`\n\tNumPorts                    int32                          `xml:\"numPorts\"`\n\tConfigNumPorts              int32                          `xml:\"configNumPorts,omitempty\"`\n\tNumPortsAvailable           int32                          `xml:\"numPortsAvailable\"`\n\tUplinkPort                  []KeyValue                     `xml:\"uplinkPort,omitempty\"`\n\tMtu                         int32                          `xml:\"mtu,omitempty\"`\n\tPnic                        []string                       `xml:\"pnic,omitempty\"`\n\tSpec                        HostProxySwitchSpec            `xml:\"spec\"`\n\tHostLag                     []HostProxySwitchHostLagConfig `xml:\"hostLag,omitempty\"`\n\tNetworkReservationSupported *bool                          `xml:\"networkReservationSupported\"`\n}\n\nfunc init() {\n\tt[\"HostProxySwitch\"] = reflect.TypeOf((*HostProxySwitch)(nil)).Elem()\n}\n\ntype HostProxySwitchConfig struct {\n\tDynamicData\n\n\tChangeOperation string               `xml:\"changeOperation,omitempty\"`\n\tUuid            string               `xml:\"uuid\"`\n\tSpec            *HostProxySwitchSpec `xml:\"spec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostProxySwitchConfig\"] = reflect.TypeOf((*HostProxySwitchConfig)(nil)).Elem()\n}\n\ntype HostProxySwitchHostLagConfig struct {\n\tDynamicData\n\n\tLagKey     string     `xml:\"lagKey\"`\n\tLagName    string     `xml:\"lagName,omitempty\"`\n\tUplinkPort []KeyValue `xml:\"uplinkPort,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostProxySwitchHostLagConfig\"] = reflect.TypeOf((*HostProxySwitchHostLagConfig)(nil)).Elem()\n}\n\ntype HostProxySwitchSpec struct {\n\tDynamicData\n\n\tBacking BaseDistributedVirtualSwitchHostMemberBacking `xml:\"backing,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostProxySwitchSpec\"] = reflect.TypeOf((*HostProxySwitchSpec)(nil)).Elem()\n}\n\ntype HostReconcileDatastoreInventoryRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"HostReconcileDatastoreInventoryRequestType\"] = reflect.TypeOf((*HostReconcileDatastoreInventoryRequestType)(nil)).Elem()\n}\n\ntype HostReconcileDatastoreInventory_Task HostReconcileDatastoreInventoryRequestType\n\nfunc init() {\n\tt[\"HostReconcileDatastoreInventory_Task\"] = reflect.TypeOf((*HostReconcileDatastoreInventory_Task)(nil)).Elem()\n}\n\ntype HostReconcileDatastoreInventory_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype HostReconnectionFailedEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostReconnectionFailedEvent\"] = reflect.TypeOf((*HostReconnectionFailedEvent)(nil)).Elem()\n}\n\ntype HostRegisterDisk HostRegisterDiskRequestType\n\nfunc init() {\n\tt[\"HostRegisterDisk\"] = reflect.TypeOf((*HostRegisterDisk)(nil)).Elem()\n}\n\ntype HostRegisterDiskRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tPath string                 `xml:\"path\"`\n\tName string                 `xml:\"name,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostRegisterDiskRequestType\"] = reflect.TypeOf((*HostRegisterDiskRequestType)(nil)).Elem()\n}\n\ntype HostRegisterDiskResponse struct {\n\tReturnval VStorageObject `xml:\"returnval\"`\n}\n\ntype HostReliableMemoryInfo struct {\n\tDynamicData\n\n\tMemorySize int64 `xml:\"memorySize\"`\n}\n\nfunc init() {\n\tt[\"HostReliableMemoryInfo\"] = reflect.TypeOf((*HostReliableMemoryInfo)(nil)).Elem()\n}\n\ntype HostRelocateVStorageObjectRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tId        ID                     `xml:\"id\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n\tSpec      VslmRelocateSpec       `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"HostRelocateVStorageObjectRequestType\"] = reflect.TypeOf((*HostRelocateVStorageObjectRequestType)(nil)).Elem()\n}\n\ntype HostRelocateVStorageObject_Task HostRelocateVStorageObjectRequestType\n\nfunc init() {\n\tt[\"HostRelocateVStorageObject_Task\"] = reflect.TypeOf((*HostRelocateVStorageObject_Task)(nil)).Elem()\n}\n\ntype HostRelocateVStorageObject_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype HostRemoveVFlashResource HostRemoveVFlashResourceRequestType\n\nfunc init() {\n\tt[\"HostRemoveVFlashResource\"] = reflect.TypeOf((*HostRemoveVFlashResource)(nil)).Elem()\n}\n\ntype HostRemoveVFlashResourceRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"HostRemoveVFlashResourceRequestType\"] = reflect.TypeOf((*HostRemoveVFlashResourceRequestType)(nil)).Elem()\n}\n\ntype HostRemoveVFlashResourceResponse struct {\n}\n\ntype HostRemovedEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostRemovedEvent\"] = reflect.TypeOf((*HostRemovedEvent)(nil)).Elem()\n}\n\ntype HostRenameVStorageObject HostRenameVStorageObjectRequestType\n\nfunc init() {\n\tt[\"HostRenameVStorageObject\"] = reflect.TypeOf((*HostRenameVStorageObject)(nil)).Elem()\n}\n\ntype HostRenameVStorageObjectRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tId        ID                     `xml:\"id\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n\tName      string                 `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"HostRenameVStorageObjectRequestType\"] = reflect.TypeOf((*HostRenameVStorageObjectRequestType)(nil)).Elem()\n}\n\ntype HostRenameVStorageObjectResponse struct {\n}\n\ntype HostResignatureRescanResult struct {\n\tDynamicData\n\n\tRescan []HostVmfsRescanResult `xml:\"rescan,omitempty\"`\n\tResult ManagedObjectReference `xml:\"result\"`\n}\n\nfunc init() {\n\tt[\"HostResignatureRescanResult\"] = reflect.TypeOf((*HostResignatureRescanResult)(nil)).Elem()\n}\n\ntype HostRetrieveVStorageObject HostRetrieveVStorageObjectRequestType\n\nfunc init() {\n\tt[\"HostRetrieveVStorageObject\"] = reflect.TypeOf((*HostRetrieveVStorageObject)(nil)).Elem()\n}\n\ntype HostRetrieveVStorageObjectRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tId        ID                     `xml:\"id\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"HostRetrieveVStorageObjectRequestType\"] = reflect.TypeOf((*HostRetrieveVStorageObjectRequestType)(nil)).Elem()\n}\n\ntype HostRetrieveVStorageObjectResponse struct {\n\tReturnval VStorageObject `xml:\"returnval\"`\n}\n\ntype HostRetrieveVStorageObjectState HostRetrieveVStorageObjectStateRequestType\n\nfunc init() {\n\tt[\"HostRetrieveVStorageObjectState\"] = reflect.TypeOf((*HostRetrieveVStorageObjectState)(nil)).Elem()\n}\n\ntype HostRetrieveVStorageObjectStateRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tId        ID                     `xml:\"id\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"HostRetrieveVStorageObjectStateRequestType\"] = reflect.TypeOf((*HostRetrieveVStorageObjectStateRequestType)(nil)).Elem()\n}\n\ntype HostRetrieveVStorageObjectStateResponse struct {\n\tReturnval VStorageObjectStateInfo `xml:\"returnval\"`\n}\n\ntype HostRuntimeInfo struct {\n\tDynamicData\n\n\tConnectionState            HostSystemConnectionState                   `xml:\"connectionState\"`\n\tPowerState                 HostSystemPowerState                        `xml:\"powerState\"`\n\tStandbyMode                string                                      `xml:\"standbyMode,omitempty\"`\n\tInMaintenanceMode          bool                                        `xml:\"inMaintenanceMode\"`\n\tInQuarantineMode           *bool                                       `xml:\"inQuarantineMode\"`\n\tBootTime                   *time.Time                                  `xml:\"bootTime\"`\n\tHealthSystemRuntime        *HealthSystemRuntime                        `xml:\"healthSystemRuntime,omitempty\"`\n\tDasHostState               *ClusterDasFdmHostState                     `xml:\"dasHostState,omitempty\"`\n\tTpmPcrValues               []HostTpmDigestInfo                         `xml:\"tpmPcrValues,omitempty\"`\n\tVsanRuntimeInfo            *VsanHostRuntimeInfo                        `xml:\"vsanRuntimeInfo,omitempty\"`\n\tNetworkRuntimeInfo         *HostRuntimeInfoNetworkRuntimeInfo          `xml:\"networkRuntimeInfo,omitempty\"`\n\tVFlashResourceRuntimeInfo  *HostVFlashManagerVFlashResourceRunTimeInfo `xml:\"vFlashResourceRuntimeInfo,omitempty\"`\n\tHostMaxVirtualDiskCapacity int64                                       `xml:\"hostMaxVirtualDiskCapacity,omitempty\"`\n\tCryptoState                string                                      `xml:\"cryptoState,omitempty\"`\n\tCryptoKeyId                *CryptoKeyId                                `xml:\"cryptoKeyId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostRuntimeInfo\"] = reflect.TypeOf((*HostRuntimeInfo)(nil)).Elem()\n}\n\ntype HostRuntimeInfoNetStackInstanceRuntimeInfo struct {\n\tDynamicData\n\n\tNetStackInstanceKey    string   `xml:\"netStackInstanceKey\"`\n\tState                  string   `xml:\"state,omitempty\"`\n\tVmknicKeys             []string `xml:\"vmknicKeys,omitempty\"`\n\tMaxNumberOfConnections int32    `xml:\"maxNumberOfConnections,omitempty\"`\n\tCurrentIpV6Enabled     *bool    `xml:\"currentIpV6Enabled\"`\n}\n\nfunc init() {\n\tt[\"HostRuntimeInfoNetStackInstanceRuntimeInfo\"] = reflect.TypeOf((*HostRuntimeInfoNetStackInstanceRuntimeInfo)(nil)).Elem()\n}\n\ntype HostRuntimeInfoNetworkRuntimeInfo struct {\n\tDynamicData\n\n\tNetStackInstanceRuntimeInfo []HostRuntimeInfoNetStackInstanceRuntimeInfo `xml:\"netStackInstanceRuntimeInfo,omitempty\"`\n\tNetworkResourceRuntime      *HostNetworkResourceRuntime                  `xml:\"networkResourceRuntime,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostRuntimeInfoNetworkRuntimeInfo\"] = reflect.TypeOf((*HostRuntimeInfoNetworkRuntimeInfo)(nil)).Elem()\n}\n\ntype HostScheduleReconcileDatastoreInventory HostScheduleReconcileDatastoreInventoryRequestType\n\nfunc init() {\n\tt[\"HostScheduleReconcileDatastoreInventory\"] = reflect.TypeOf((*HostScheduleReconcileDatastoreInventory)(nil)).Elem()\n}\n\ntype HostScheduleReconcileDatastoreInventoryRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"HostScheduleReconcileDatastoreInventoryRequestType\"] = reflect.TypeOf((*HostScheduleReconcileDatastoreInventoryRequestType)(nil)).Elem()\n}\n\ntype HostScheduleReconcileDatastoreInventoryResponse struct {\n}\n\ntype HostScsiDisk struct {\n\tScsiLun\n\n\tCapacity              HostDiskDimensionsLba `xml:\"capacity\"`\n\tDevicePath            string                `xml:\"devicePath\"`\n\tSsd                   *bool                 `xml:\"ssd\"`\n\tLocalDisk             *bool                 `xml:\"localDisk\"`\n\tPhysicalLocation      []string              `xml:\"physicalLocation,omitempty\"`\n\tEmulatedDIXDIFEnabled *bool                 `xml:\"emulatedDIXDIFEnabled\"`\n\tVsanDiskInfo          *VsanHostVsanDiskInfo `xml:\"vsanDiskInfo,omitempty\"`\n\tScsiDiskType          string                `xml:\"scsiDiskType,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostScsiDisk\"] = reflect.TypeOf((*HostScsiDisk)(nil)).Elem()\n}\n\ntype HostScsiDiskPartition struct {\n\tDynamicData\n\n\tDiskName  string `xml:\"diskName\"`\n\tPartition int32  `xml:\"partition\"`\n}\n\nfunc init() {\n\tt[\"HostScsiDiskPartition\"] = reflect.TypeOf((*HostScsiDiskPartition)(nil)).Elem()\n}\n\ntype HostScsiTopology struct {\n\tDynamicData\n\n\tAdapter []HostScsiTopologyInterface `xml:\"adapter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostScsiTopology\"] = reflect.TypeOf((*HostScsiTopology)(nil)).Elem()\n}\n\ntype HostScsiTopologyInterface struct {\n\tDynamicData\n\n\tKey     string                   `xml:\"key\"`\n\tAdapter string                   `xml:\"adapter\"`\n\tTarget  []HostScsiTopologyTarget `xml:\"target,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostScsiTopologyInterface\"] = reflect.TypeOf((*HostScsiTopologyInterface)(nil)).Elem()\n}\n\ntype HostScsiTopologyLun struct {\n\tDynamicData\n\n\tKey     string `xml:\"key\"`\n\tLun     int32  `xml:\"lun\"`\n\tScsiLun string `xml:\"scsiLun\"`\n}\n\nfunc init() {\n\tt[\"HostScsiTopologyLun\"] = reflect.TypeOf((*HostScsiTopologyLun)(nil)).Elem()\n}\n\ntype HostScsiTopologyTarget struct {\n\tDynamicData\n\n\tKey       string                  `xml:\"key\"`\n\tTarget    int32                   `xml:\"target\"`\n\tLun       []HostScsiTopologyLun   `xml:\"lun,omitempty\"`\n\tTransport BaseHostTargetTransport `xml:\"transport,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostScsiTopologyTarget\"] = reflect.TypeOf((*HostScsiTopologyTarget)(nil)).Elem()\n}\n\ntype HostSecuritySpec struct {\n\tDynamicData\n\n\tAdminPassword    string       `xml:\"adminPassword,omitempty\"`\n\tRemovePermission []Permission `xml:\"removePermission,omitempty\"`\n\tAddPermission    []Permission `xml:\"addPermission,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostSecuritySpec\"] = reflect.TypeOf((*HostSecuritySpec)(nil)).Elem()\n}\n\ntype HostSerialAttachedHba struct {\n\tHostHostBusAdapter\n\n\tNodeWorldWideName string `xml:\"nodeWorldWideName\"`\n}\n\nfunc init() {\n\tt[\"HostSerialAttachedHba\"] = reflect.TypeOf((*HostSerialAttachedHba)(nil)).Elem()\n}\n\ntype HostSerialAttachedTargetTransport struct {\n\tHostTargetTransport\n}\n\nfunc init() {\n\tt[\"HostSerialAttachedTargetTransport\"] = reflect.TypeOf((*HostSerialAttachedTargetTransport)(nil)).Elem()\n}\n\ntype HostService struct {\n\tDynamicData\n\n\tKey           string                    `xml:\"key\"`\n\tLabel         string                    `xml:\"label\"`\n\tRequired      bool                      `xml:\"required\"`\n\tUninstallable bool                      `xml:\"uninstallable\"`\n\tRunning       bool                      `xml:\"running\"`\n\tRuleset       []string                  `xml:\"ruleset,omitempty\"`\n\tPolicy        string                    `xml:\"policy\"`\n\tSourcePackage *HostServiceSourcePackage `xml:\"sourcePackage,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostService\"] = reflect.TypeOf((*HostService)(nil)).Elem()\n}\n\ntype HostServiceConfig struct {\n\tDynamicData\n\n\tServiceId     string `xml:\"serviceId\"`\n\tStartupPolicy string `xml:\"startupPolicy\"`\n}\n\nfunc init() {\n\tt[\"HostServiceConfig\"] = reflect.TypeOf((*HostServiceConfig)(nil)).Elem()\n}\n\ntype HostServiceInfo struct {\n\tDynamicData\n\n\tService []HostService `xml:\"service,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostServiceInfo\"] = reflect.TypeOf((*HostServiceInfo)(nil)).Elem()\n}\n\ntype HostServiceSourcePackage struct {\n\tDynamicData\n\n\tSourcePackageName string `xml:\"sourcePackageName\"`\n\tDescription       string `xml:\"description\"`\n}\n\nfunc init() {\n\tt[\"HostServiceSourcePackage\"] = reflect.TypeOf((*HostServiceSourcePackage)(nil)).Elem()\n}\n\ntype HostServiceTicket struct {\n\tDynamicData\n\n\tHost           string `xml:\"host,omitempty\"`\n\tPort           int32  `xml:\"port,omitempty\"`\n\tSslThumbprint  string `xml:\"sslThumbprint,omitempty\"`\n\tService        string `xml:\"service\"`\n\tServiceVersion string `xml:\"serviceVersion\"`\n\tSessionId      string `xml:\"sessionId\"`\n}\n\nfunc init() {\n\tt[\"HostServiceTicket\"] = reflect.TypeOf((*HostServiceTicket)(nil)).Elem()\n}\n\ntype HostShortNameInconsistentEvent struct {\n\tHostDasEvent\n\n\tShortName  string `xml:\"shortName\"`\n\tShortName2 string `xml:\"shortName2\"`\n}\n\nfunc init() {\n\tt[\"HostShortNameInconsistentEvent\"] = reflect.TypeOf((*HostShortNameInconsistentEvent)(nil)).Elem()\n}\n\ntype HostShortNameToIpFailedEvent struct {\n\tHostEvent\n\n\tShortName string `xml:\"shortName\"`\n}\n\nfunc init() {\n\tt[\"HostShortNameToIpFailedEvent\"] = reflect.TypeOf((*HostShortNameToIpFailedEvent)(nil)).Elem()\n}\n\ntype HostShutdownEvent struct {\n\tHostEvent\n\n\tReason string `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"HostShutdownEvent\"] = reflect.TypeOf((*HostShutdownEvent)(nil)).Elem()\n}\n\ntype HostSnmpConfigSpec struct {\n\tDynamicData\n\n\tEnabled             *bool                 `xml:\"enabled\"`\n\tPort                int32                 `xml:\"port,omitempty\"`\n\tReadOnlyCommunities []string              `xml:\"readOnlyCommunities,omitempty\"`\n\tTrapTargets         []HostSnmpDestination `xml:\"trapTargets,omitempty\"`\n\tOption              []KeyValue            `xml:\"option,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostSnmpConfigSpec\"] = reflect.TypeOf((*HostSnmpConfigSpec)(nil)).Elem()\n}\n\ntype HostSnmpDestination struct {\n\tDynamicData\n\n\tHostName  string `xml:\"hostName\"`\n\tPort      int32  `xml:\"port\"`\n\tCommunity string `xml:\"community\"`\n}\n\nfunc init() {\n\tt[\"HostSnmpDestination\"] = reflect.TypeOf((*HostSnmpDestination)(nil)).Elem()\n}\n\ntype HostSnmpSystemAgentLimits struct {\n\tDynamicData\n\n\tMaxReadOnlyCommunities int32                   `xml:\"maxReadOnlyCommunities\"`\n\tMaxTrapDestinations    int32                   `xml:\"maxTrapDestinations\"`\n\tMaxCommunityLength     int32                   `xml:\"maxCommunityLength\"`\n\tMaxBufferSize          int32                   `xml:\"maxBufferSize\"`\n\tCapability             HostSnmpAgentCapability `xml:\"capability,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostSnmpSystemAgentLimits\"] = reflect.TypeOf((*HostSnmpSystemAgentLimits)(nil)).Elem()\n}\n\ntype HostSpecGetUpdatedHosts HostSpecGetUpdatedHostsRequestType\n\nfunc init() {\n\tt[\"HostSpecGetUpdatedHosts\"] = reflect.TypeOf((*HostSpecGetUpdatedHosts)(nil)).Elem()\n}\n\ntype HostSpecGetUpdatedHostsRequestType struct {\n\tThis          ManagedObjectReference `xml:\"_this\"`\n\tStartChangeID string                 `xml:\"startChangeID,omitempty\"`\n\tEndChangeID   string                 `xml:\"endChangeID,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostSpecGetUpdatedHostsRequestType\"] = reflect.TypeOf((*HostSpecGetUpdatedHostsRequestType)(nil)).Elem()\n}\n\ntype HostSpecGetUpdatedHostsResponse struct {\n\tReturnval []ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype HostSpecification struct {\n\tDynamicData\n\n\tCreatedTime  time.Time              `xml:\"createdTime\"`\n\tLastModified *time.Time             `xml:\"lastModified\"`\n\tHost         ManagedObjectReference `xml:\"host\"`\n\tSubSpecs     []HostSubSpecification `xml:\"subSpecs,omitempty\"`\n\tChangeID     string                 `xml:\"changeID,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostSpecification\"] = reflect.TypeOf((*HostSpecification)(nil)).Elem()\n}\n\ntype HostSpecificationOperationFailed struct {\n\tVimFault\n\n\tHost ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"HostSpecificationOperationFailed\"] = reflect.TypeOf((*HostSpecificationOperationFailed)(nil)).Elem()\n}\n\ntype HostSpecificationOperationFailedFault HostSpecificationOperationFailed\n\nfunc init() {\n\tt[\"HostSpecificationOperationFailedFault\"] = reflect.TypeOf((*HostSpecificationOperationFailedFault)(nil)).Elem()\n}\n\ntype HostSriovConfig struct {\n\tHostPciPassthruConfig\n\n\tSriovEnabled       bool  `xml:\"sriovEnabled\"`\n\tNumVirtualFunction int32 `xml:\"numVirtualFunction\"`\n}\n\nfunc init() {\n\tt[\"HostSriovConfig\"] = reflect.TypeOf((*HostSriovConfig)(nil)).Elem()\n}\n\ntype HostSriovDevicePoolInfo struct {\n\tDynamicData\n\n\tKey string `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"HostSriovDevicePoolInfo\"] = reflect.TypeOf((*HostSriovDevicePoolInfo)(nil)).Elem()\n}\n\ntype HostSriovInfo struct {\n\tHostPciPassthruInfo\n\n\tSriovEnabled                bool  `xml:\"sriovEnabled\"`\n\tSriovCapable                bool  `xml:\"sriovCapable\"`\n\tSriovActive                 bool  `xml:\"sriovActive\"`\n\tNumVirtualFunctionRequested int32 `xml:\"numVirtualFunctionRequested\"`\n\tNumVirtualFunction          int32 `xml:\"numVirtualFunction\"`\n\tMaxVirtualFunctionSupported int32 `xml:\"maxVirtualFunctionSupported\"`\n}\n\nfunc init() {\n\tt[\"HostSriovInfo\"] = reflect.TypeOf((*HostSriovInfo)(nil)).Elem()\n}\n\ntype HostSriovNetworkDevicePoolInfo struct {\n\tHostSriovDevicePoolInfo\n\n\tSwitchKey  string        `xml:\"switchKey,omitempty\"`\n\tSwitchUuid string        `xml:\"switchUuid,omitempty\"`\n\tPnic       []PhysicalNic `xml:\"pnic,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostSriovNetworkDevicePoolInfo\"] = reflect.TypeOf((*HostSriovNetworkDevicePoolInfo)(nil)).Elem()\n}\n\ntype HostSslThumbprintInfo struct {\n\tDynamicData\n\n\tPrincipal      string   `xml:\"principal\"`\n\tOwnerTag       string   `xml:\"ownerTag,omitempty\"`\n\tSslThumbprints []string `xml:\"sslThumbprints,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostSslThumbprintInfo\"] = reflect.TypeOf((*HostSslThumbprintInfo)(nil)).Elem()\n}\n\ntype HostStatusChangedEvent struct {\n\tClusterStatusChangedEvent\n}\n\nfunc init() {\n\tt[\"HostStatusChangedEvent\"] = reflect.TypeOf((*HostStatusChangedEvent)(nil)).Elem()\n}\n\ntype HostStorageArrayTypePolicyOption struct {\n\tDynamicData\n\n\tPolicy BaseElementDescription `xml:\"policy,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostStorageArrayTypePolicyOption\"] = reflect.TypeOf((*HostStorageArrayTypePolicyOption)(nil)).Elem()\n}\n\ntype HostStorageDeviceInfo struct {\n\tDynamicData\n\n\tHostBusAdapter              []BaseHostHostBusAdapter `xml:\"hostBusAdapter,omitempty,typeattr\"`\n\tScsiLun                     []BaseScsiLun            `xml:\"scsiLun,omitempty,typeattr\"`\n\tScsiTopology                *HostScsiTopology        `xml:\"scsiTopology,omitempty\"`\n\tMultipathInfo               *HostMultipathInfo       `xml:\"multipathInfo,omitempty\"`\n\tPlugStoreTopology           *HostPlugStoreTopology   `xml:\"plugStoreTopology,omitempty\"`\n\tSoftwareInternetScsiEnabled bool                     `xml:\"softwareInternetScsiEnabled\"`\n}\n\nfunc init() {\n\tt[\"HostStorageDeviceInfo\"] = reflect.TypeOf((*HostStorageDeviceInfo)(nil)).Elem()\n}\n\ntype HostStorageElementInfo struct {\n\tHostHardwareElementInfo\n\n\tOperationalInfo []HostStorageOperationalInfo `xml:\"operationalInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostStorageElementInfo\"] = reflect.TypeOf((*HostStorageElementInfo)(nil)).Elem()\n}\n\ntype HostStorageOperationalInfo struct {\n\tDynamicData\n\n\tProperty string `xml:\"property\"`\n\tValue    string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"HostStorageOperationalInfo\"] = reflect.TypeOf((*HostStorageOperationalInfo)(nil)).Elem()\n}\n\ntype HostStorageSystemDiskLocatorLedResult struct {\n\tDynamicData\n\n\tKey   string               `xml:\"key\"`\n\tFault LocalizedMethodFault `xml:\"fault\"`\n}\n\nfunc init() {\n\tt[\"HostStorageSystemDiskLocatorLedResult\"] = reflect.TypeOf((*HostStorageSystemDiskLocatorLedResult)(nil)).Elem()\n}\n\ntype HostStorageSystemScsiLunResult struct {\n\tDynamicData\n\n\tKey   string                `xml:\"key\"`\n\tFault *LocalizedMethodFault `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostStorageSystemScsiLunResult\"] = reflect.TypeOf((*HostStorageSystemScsiLunResult)(nil)).Elem()\n}\n\ntype HostStorageSystemVmfsVolumeResult struct {\n\tDynamicData\n\n\tKey   string                `xml:\"key\"`\n\tFault *LocalizedMethodFault `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostStorageSystemVmfsVolumeResult\"] = reflect.TypeOf((*HostStorageSystemVmfsVolumeResult)(nil)).Elem()\n}\n\ntype HostSubSpecification struct {\n\tDynamicData\n\n\tName        string    `xml:\"name\"`\n\tCreatedTime time.Time `xml:\"createdTime\"`\n\tData        []byte    `xml:\"data,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostSubSpecification\"] = reflect.TypeOf((*HostSubSpecification)(nil)).Elem()\n}\n\ntype HostSyncFailedEvent struct {\n\tHostEvent\n\n\tReason LocalizedMethodFault `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"HostSyncFailedEvent\"] = reflect.TypeOf((*HostSyncFailedEvent)(nil)).Elem()\n}\n\ntype HostSystemHealthInfo struct {\n\tDynamicData\n\n\tNumericSensorInfo []HostNumericSensorInfo `xml:\"numericSensorInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostSystemHealthInfo\"] = reflect.TypeOf((*HostSystemHealthInfo)(nil)).Elem()\n}\n\ntype HostSystemIdentificationInfo struct {\n\tDynamicData\n\n\tIdentifierValue string                 `xml:\"identifierValue\"`\n\tIdentifierType  BaseElementDescription `xml:\"identifierType,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostSystemIdentificationInfo\"] = reflect.TypeOf((*HostSystemIdentificationInfo)(nil)).Elem()\n}\n\ntype HostSystemInfo struct {\n\tDynamicData\n\n\tVendor               string                         `xml:\"vendor\"`\n\tModel                string                         `xml:\"model\"`\n\tUuid                 string                         `xml:\"uuid\"`\n\tOtherIdentifyingInfo []HostSystemIdentificationInfo `xml:\"otherIdentifyingInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostSystemInfo\"] = reflect.TypeOf((*HostSystemInfo)(nil)).Elem()\n}\n\ntype HostSystemReconnectSpec struct {\n\tDynamicData\n\n\tSyncState *bool `xml:\"syncState\"`\n}\n\nfunc init() {\n\tt[\"HostSystemReconnectSpec\"] = reflect.TypeOf((*HostSystemReconnectSpec)(nil)).Elem()\n}\n\ntype HostSystemResourceInfo struct {\n\tDynamicData\n\n\tKey    string                   `xml:\"key\"`\n\tConfig *ResourceConfigSpec      `xml:\"config,omitempty\"`\n\tChild  []HostSystemResourceInfo `xml:\"child,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostSystemResourceInfo\"] = reflect.TypeOf((*HostSystemResourceInfo)(nil)).Elem()\n}\n\ntype HostSystemSwapConfiguration struct {\n\tDynamicData\n\n\tOption []BaseHostSystemSwapConfigurationSystemSwapOption `xml:\"option,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostSystemSwapConfiguration\"] = reflect.TypeOf((*HostSystemSwapConfiguration)(nil)).Elem()\n}\n\ntype HostSystemSwapConfigurationDatastoreOption struct {\n\tHostSystemSwapConfigurationSystemSwapOption\n\n\tDatastore string `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"HostSystemSwapConfigurationDatastoreOption\"] = reflect.TypeOf((*HostSystemSwapConfigurationDatastoreOption)(nil)).Elem()\n}\n\ntype HostSystemSwapConfigurationDisabledOption struct {\n\tHostSystemSwapConfigurationSystemSwapOption\n}\n\nfunc init() {\n\tt[\"HostSystemSwapConfigurationDisabledOption\"] = reflect.TypeOf((*HostSystemSwapConfigurationDisabledOption)(nil)).Elem()\n}\n\ntype HostSystemSwapConfigurationHostCacheOption struct {\n\tHostSystemSwapConfigurationSystemSwapOption\n}\n\nfunc init() {\n\tt[\"HostSystemSwapConfigurationHostCacheOption\"] = reflect.TypeOf((*HostSystemSwapConfigurationHostCacheOption)(nil)).Elem()\n}\n\ntype HostSystemSwapConfigurationHostLocalSwapOption struct {\n\tHostSystemSwapConfigurationSystemSwapOption\n}\n\nfunc init() {\n\tt[\"HostSystemSwapConfigurationHostLocalSwapOption\"] = reflect.TypeOf((*HostSystemSwapConfigurationHostLocalSwapOption)(nil)).Elem()\n}\n\ntype HostSystemSwapConfigurationSystemSwapOption struct {\n\tDynamicData\n\n\tKey int32 `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"HostSystemSwapConfigurationSystemSwapOption\"] = reflect.TypeOf((*HostSystemSwapConfigurationSystemSwapOption)(nil)).Elem()\n}\n\ntype HostTargetTransport struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"HostTargetTransport\"] = reflect.TypeOf((*HostTargetTransport)(nil)).Elem()\n}\n\ntype HostTpmAttestationReport struct {\n\tDynamicData\n\n\tTpmPcrValues   []HostTpmDigestInfo    `xml:\"tpmPcrValues\"`\n\tTpmEvents      []HostTpmEventLogEntry `xml:\"tpmEvents\"`\n\tTpmLogReliable bool                   `xml:\"tpmLogReliable\"`\n}\n\nfunc init() {\n\tt[\"HostTpmAttestationReport\"] = reflect.TypeOf((*HostTpmAttestationReport)(nil)).Elem()\n}\n\ntype HostTpmBootSecurityOptionEventDetails struct {\n\tHostTpmEventDetails\n\n\tBootSecurityOption string `xml:\"bootSecurityOption\"`\n}\n\nfunc init() {\n\tt[\"HostTpmBootSecurityOptionEventDetails\"] = reflect.TypeOf((*HostTpmBootSecurityOptionEventDetails)(nil)).Elem()\n}\n\ntype HostTpmCommandEventDetails struct {\n\tHostTpmEventDetails\n\n\tCommandLine string `xml:\"commandLine\"`\n}\n\nfunc init() {\n\tt[\"HostTpmCommandEventDetails\"] = reflect.TypeOf((*HostTpmCommandEventDetails)(nil)).Elem()\n}\n\ntype HostTpmDigestInfo struct {\n\tHostDigestInfo\n\n\tPcrNumber int32 `xml:\"pcrNumber\"`\n}\n\nfunc init() {\n\tt[\"HostTpmDigestInfo\"] = reflect.TypeOf((*HostTpmDigestInfo)(nil)).Elem()\n}\n\ntype HostTpmEventDetails struct {\n\tDynamicData\n\n\tDataHash []byte `xml:\"dataHash\"`\n}\n\nfunc init() {\n\tt[\"HostTpmEventDetails\"] = reflect.TypeOf((*HostTpmEventDetails)(nil)).Elem()\n}\n\ntype HostTpmEventLogEntry struct {\n\tDynamicData\n\n\tPcrIndex     int32                   `xml:\"pcrIndex\"`\n\tEventDetails BaseHostTpmEventDetails `xml:\"eventDetails,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostTpmEventLogEntry\"] = reflect.TypeOf((*HostTpmEventLogEntry)(nil)).Elem()\n}\n\ntype HostTpmOptionEventDetails struct {\n\tHostTpmEventDetails\n\n\tOptionsFileName string `xml:\"optionsFileName\"`\n\tBootOptions     []byte `xml:\"bootOptions,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostTpmOptionEventDetails\"] = reflect.TypeOf((*HostTpmOptionEventDetails)(nil)).Elem()\n}\n\ntype HostTpmSoftwareComponentEventDetails struct {\n\tHostTpmEventDetails\n\n\tComponentName string `xml:\"componentName\"`\n\tVibName       string `xml:\"vibName\"`\n\tVibVersion    string `xml:\"vibVersion\"`\n\tVibVendor     string `xml:\"vibVendor\"`\n}\n\nfunc init() {\n\tt[\"HostTpmSoftwareComponentEventDetails\"] = reflect.TypeOf((*HostTpmSoftwareComponentEventDetails)(nil)).Elem()\n}\n\ntype HostUnresolvedVmfsExtent struct {\n\tDynamicData\n\n\tDevice       HostScsiDiskPartition `xml:\"device\"`\n\tDevicePath   string                `xml:\"devicePath\"`\n\tVmfsUuid     string                `xml:\"vmfsUuid\"`\n\tIsHeadExtent bool                  `xml:\"isHeadExtent\"`\n\tOrdinal      int32                 `xml:\"ordinal\"`\n\tStartBlock   int32                 `xml:\"startBlock\"`\n\tEndBlock     int32                 `xml:\"endBlock\"`\n\tReason       string                `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"HostUnresolvedVmfsExtent\"] = reflect.TypeOf((*HostUnresolvedVmfsExtent)(nil)).Elem()\n}\n\ntype HostUnresolvedVmfsResignatureSpec struct {\n\tDynamicData\n\n\tExtentDevicePath []string `xml:\"extentDevicePath\"`\n}\n\nfunc init() {\n\tt[\"HostUnresolvedVmfsResignatureSpec\"] = reflect.TypeOf((*HostUnresolvedVmfsResignatureSpec)(nil)).Elem()\n}\n\ntype HostUnresolvedVmfsResolutionResult struct {\n\tDynamicData\n\n\tSpec  HostUnresolvedVmfsResolutionSpec `xml:\"spec\"`\n\tVmfs  *HostVmfsVolume                  `xml:\"vmfs,omitempty\"`\n\tFault *LocalizedMethodFault            `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostUnresolvedVmfsResolutionResult\"] = reflect.TypeOf((*HostUnresolvedVmfsResolutionResult)(nil)).Elem()\n}\n\ntype HostUnresolvedVmfsResolutionSpec struct {\n\tDynamicData\n\n\tExtentDevicePath []string `xml:\"extentDevicePath\"`\n\tUuidResolution   string   `xml:\"uuidResolution\"`\n}\n\nfunc init() {\n\tt[\"HostUnresolvedVmfsResolutionSpec\"] = reflect.TypeOf((*HostUnresolvedVmfsResolutionSpec)(nil)).Elem()\n}\n\ntype HostUnresolvedVmfsVolume struct {\n\tDynamicData\n\n\tExtent        []HostUnresolvedVmfsExtent            `xml:\"extent\"`\n\tVmfsLabel     string                                `xml:\"vmfsLabel\"`\n\tVmfsUuid      string                                `xml:\"vmfsUuid\"`\n\tTotalBlocks   int32                                 `xml:\"totalBlocks\"`\n\tResolveStatus HostUnresolvedVmfsVolumeResolveStatus `xml:\"resolveStatus\"`\n}\n\nfunc init() {\n\tt[\"HostUnresolvedVmfsVolume\"] = reflect.TypeOf((*HostUnresolvedVmfsVolume)(nil)).Elem()\n}\n\ntype HostUnresolvedVmfsVolumeResolveStatus struct {\n\tDynamicData\n\n\tResolvable        bool  `xml:\"resolvable\"`\n\tIncompleteExtents *bool `xml:\"incompleteExtents\"`\n\tMultipleCopies    *bool `xml:\"multipleCopies\"`\n}\n\nfunc init() {\n\tt[\"HostUnresolvedVmfsVolumeResolveStatus\"] = reflect.TypeOf((*HostUnresolvedVmfsVolumeResolveStatus)(nil)).Elem()\n}\n\ntype HostUpgradeFailedEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostUpgradeFailedEvent\"] = reflect.TypeOf((*HostUpgradeFailedEvent)(nil)).Elem()\n}\n\ntype HostUserWorldSwapNotEnabledEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"HostUserWorldSwapNotEnabledEvent\"] = reflect.TypeOf((*HostUserWorldSwapNotEnabledEvent)(nil)).Elem()\n}\n\ntype HostVFlashManagerVFlashCacheConfigInfo struct {\n\tDynamicData\n\n\tVFlashModuleConfigOption []HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption `xml:\"vFlashModuleConfigOption,omitempty\"`\n\tDefaultVFlashModule      string                                                           `xml:\"defaultVFlashModule,omitempty\"`\n\tSwapCacheReservationInGB int64                                                            `xml:\"swapCacheReservationInGB,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVFlashManagerVFlashCacheConfigInfo\"] = reflect.TypeOf((*HostVFlashManagerVFlashCacheConfigInfo)(nil)).Elem()\n}\n\ntype HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption struct {\n\tDynamicData\n\n\tVFlashModule              string       `xml:\"vFlashModule\"`\n\tVFlashModuleVersion       string       `xml:\"vFlashModuleVersion\"`\n\tMinSupportedModuleVersion string       `xml:\"minSupportedModuleVersion\"`\n\tCacheConsistencyType      ChoiceOption `xml:\"cacheConsistencyType\"`\n\tCacheMode                 ChoiceOption `xml:\"cacheMode\"`\n\tBlockSizeInKBOption       LongOption   `xml:\"blockSizeInKBOption\"`\n\tReservationInMBOption     LongOption   `xml:\"reservationInMBOption\"`\n\tMaxDiskSizeInKB           int64        `xml:\"maxDiskSizeInKB\"`\n}\n\nfunc init() {\n\tt[\"HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption\"] = reflect.TypeOf((*HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption)(nil)).Elem()\n}\n\ntype HostVFlashManagerVFlashCacheConfigSpec struct {\n\tDynamicData\n\n\tDefaultVFlashModule      string `xml:\"defaultVFlashModule\"`\n\tSwapCacheReservationInGB int64  `xml:\"swapCacheReservationInGB\"`\n}\n\nfunc init() {\n\tt[\"HostVFlashManagerVFlashCacheConfigSpec\"] = reflect.TypeOf((*HostVFlashManagerVFlashCacheConfigSpec)(nil)).Elem()\n}\n\ntype HostVFlashManagerVFlashConfigInfo struct {\n\tDynamicData\n\n\tVFlashResourceConfigInfo *HostVFlashManagerVFlashResourceConfigInfo `xml:\"vFlashResourceConfigInfo,omitempty\"`\n\tVFlashCacheConfigInfo    *HostVFlashManagerVFlashCacheConfigInfo    `xml:\"vFlashCacheConfigInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVFlashManagerVFlashConfigInfo\"] = reflect.TypeOf((*HostVFlashManagerVFlashConfigInfo)(nil)).Elem()\n}\n\ntype HostVFlashManagerVFlashResourceConfigInfo struct {\n\tDynamicData\n\n\tVffs     *HostVffsVolume `xml:\"vffs,omitempty\"`\n\tCapacity int64           `xml:\"capacity\"`\n}\n\nfunc init() {\n\tt[\"HostVFlashManagerVFlashResourceConfigInfo\"] = reflect.TypeOf((*HostVFlashManagerVFlashResourceConfigInfo)(nil)).Elem()\n}\n\ntype HostVFlashManagerVFlashResourceConfigSpec struct {\n\tDynamicData\n\n\tVffsUuid string `xml:\"vffsUuid\"`\n}\n\nfunc init() {\n\tt[\"HostVFlashManagerVFlashResourceConfigSpec\"] = reflect.TypeOf((*HostVFlashManagerVFlashResourceConfigSpec)(nil)).Elem()\n}\n\ntype HostVFlashManagerVFlashResourceRunTimeInfo struct {\n\tDynamicData\n\n\tUsage              int64 `xml:\"usage\"`\n\tCapacity           int64 `xml:\"capacity\"`\n\tAccessible         bool  `xml:\"accessible\"`\n\tCapacityForVmCache int64 `xml:\"capacityForVmCache\"`\n\tFreeForVmCache     int64 `xml:\"freeForVmCache\"`\n}\n\nfunc init() {\n\tt[\"HostVFlashManagerVFlashResourceRunTimeInfo\"] = reflect.TypeOf((*HostVFlashManagerVFlashResourceRunTimeInfo)(nil)).Elem()\n}\n\ntype HostVFlashResourceConfigurationResult struct {\n\tDynamicData\n\n\tDevicePath              []string                      `xml:\"devicePath,omitempty\"`\n\tVffs                    *HostVffsVolume               `xml:\"vffs,omitempty\"`\n\tDiskConfigurationResult []HostDiskConfigurationResult `xml:\"diskConfigurationResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVFlashResourceConfigurationResult\"] = reflect.TypeOf((*HostVFlashResourceConfigurationResult)(nil)).Elem()\n}\n\ntype HostVMotionCompatibility struct {\n\tDynamicData\n\n\tHost          ManagedObjectReference `xml:\"host\"`\n\tCompatibility []string               `xml:\"compatibility,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVMotionCompatibility\"] = reflect.TypeOf((*HostVMotionCompatibility)(nil)).Elem()\n}\n\ntype HostVMotionConfig struct {\n\tDynamicData\n\n\tVmotionNicKey string `xml:\"vmotionNicKey,omitempty\"`\n\tEnabled       bool   `xml:\"enabled\"`\n}\n\nfunc init() {\n\tt[\"HostVMotionConfig\"] = reflect.TypeOf((*HostVMotionConfig)(nil)).Elem()\n}\n\ntype HostVMotionInfo struct {\n\tDynamicData\n\n\tNetConfig *HostVMotionNetConfig `xml:\"netConfig,omitempty\"`\n\tIpConfig  *HostIpConfig         `xml:\"ipConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVMotionInfo\"] = reflect.TypeOf((*HostVMotionInfo)(nil)).Elem()\n}\n\ntype HostVMotionNetConfig struct {\n\tDynamicData\n\n\tCandidateVnic []HostVirtualNic `xml:\"candidateVnic,omitempty\"`\n\tSelectedVnic  string           `xml:\"selectedVnic,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVMotionNetConfig\"] = reflect.TypeOf((*HostVMotionNetConfig)(nil)).Elem()\n}\n\ntype HostVfatVolume struct {\n\tHostFileSystemVolume\n}\n\nfunc init() {\n\tt[\"HostVfatVolume\"] = reflect.TypeOf((*HostVfatVolume)(nil)).Elem()\n}\n\ntype HostVffsSpec struct {\n\tDynamicData\n\n\tDevicePath   string                 `xml:\"devicePath\"`\n\tPartition    *HostDiskPartitionSpec `xml:\"partition,omitempty\"`\n\tMajorVersion int32                  `xml:\"majorVersion\"`\n\tVolumeName   string                 `xml:\"volumeName\"`\n}\n\nfunc init() {\n\tt[\"HostVffsSpec\"] = reflect.TypeOf((*HostVffsSpec)(nil)).Elem()\n}\n\ntype HostVffsVolume struct {\n\tHostFileSystemVolume\n\n\tMajorVersion int32                   `xml:\"majorVersion\"`\n\tVersion      string                  `xml:\"version\"`\n\tUuid         string                  `xml:\"uuid\"`\n\tExtent       []HostScsiDiskPartition `xml:\"extent\"`\n}\n\nfunc init() {\n\tt[\"HostVffsVolume\"] = reflect.TypeOf((*HostVffsVolume)(nil)).Elem()\n}\n\ntype HostVirtualNic struct {\n\tDynamicData\n\n\tDevice    string             `xml:\"device\"`\n\tKey       string             `xml:\"key\"`\n\tPortgroup string             `xml:\"portgroup\"`\n\tSpec      HostVirtualNicSpec `xml:\"spec\"`\n\tPort      string             `xml:\"port,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVirtualNic\"] = reflect.TypeOf((*HostVirtualNic)(nil)).Elem()\n}\n\ntype HostVirtualNicConfig struct {\n\tDynamicData\n\n\tChangeOperation string              `xml:\"changeOperation,omitempty\"`\n\tDevice          string              `xml:\"device,omitempty\"`\n\tPortgroup       string              `xml:\"portgroup\"`\n\tSpec            *HostVirtualNicSpec `xml:\"spec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVirtualNicConfig\"] = reflect.TypeOf((*HostVirtualNicConfig)(nil)).Elem()\n}\n\ntype HostVirtualNicConnection struct {\n\tDynamicData\n\n\tPortgroup string                                  `xml:\"portgroup,omitempty\"`\n\tDvPort    *DistributedVirtualSwitchPortConnection `xml:\"dvPort,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVirtualNicConnection\"] = reflect.TypeOf((*HostVirtualNicConnection)(nil)).Elem()\n}\n\ntype HostVirtualNicIpRouteSpec struct {\n\tDynamicData\n\n\tIpRouteConfig BaseHostIpRouteConfig `xml:\"ipRouteConfig,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"HostVirtualNicIpRouteSpec\"] = reflect.TypeOf((*HostVirtualNicIpRouteSpec)(nil)).Elem()\n}\n\ntype HostVirtualNicManagerInfo struct {\n\tDynamicData\n\n\tNetConfig []VirtualNicManagerNetConfig `xml:\"netConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVirtualNicManagerInfo\"] = reflect.TypeOf((*HostVirtualNicManagerInfo)(nil)).Elem()\n}\n\ntype HostVirtualNicManagerNicTypeSelection struct {\n\tDynamicData\n\n\tVnic    HostVirtualNicConnection `xml:\"vnic\"`\n\tNicType []string                 `xml:\"nicType,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVirtualNicManagerNicTypeSelection\"] = reflect.TypeOf((*HostVirtualNicManagerNicTypeSelection)(nil)).Elem()\n}\n\ntype HostVirtualNicOpaqueNetworkSpec struct {\n\tDynamicData\n\n\tOpaqueNetworkId   string `xml:\"opaqueNetworkId\"`\n\tOpaqueNetworkType string `xml:\"opaqueNetworkType\"`\n}\n\nfunc init() {\n\tt[\"HostVirtualNicOpaqueNetworkSpec\"] = reflect.TypeOf((*HostVirtualNicOpaqueNetworkSpec)(nil)).Elem()\n}\n\ntype HostVirtualNicSpec struct {\n\tDynamicData\n\n\tIp                     *HostIpConfig                           `xml:\"ip,omitempty\"`\n\tMac                    string                                  `xml:\"mac,omitempty\"`\n\tDistributedVirtualPort *DistributedVirtualSwitchPortConnection `xml:\"distributedVirtualPort,omitempty\"`\n\tPortgroup              string                                  `xml:\"portgroup,omitempty\"`\n\tMtu                    int32                                   `xml:\"mtu,omitempty\"`\n\tTsoEnabled             *bool                                   `xml:\"tsoEnabled\"`\n\tNetStackInstanceKey    string                                  `xml:\"netStackInstanceKey,omitempty\"`\n\tOpaqueNetwork          *HostVirtualNicOpaqueNetworkSpec        `xml:\"opaqueNetwork,omitempty\"`\n\tExternalId             string                                  `xml:\"externalId,omitempty\"`\n\tPinnedPnic             string                                  `xml:\"pinnedPnic,omitempty\"`\n\tIpRouteSpec            *HostVirtualNicIpRouteSpec              `xml:\"ipRouteSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVirtualNicSpec\"] = reflect.TypeOf((*HostVirtualNicSpec)(nil)).Elem()\n}\n\ntype HostVirtualSwitch struct {\n\tDynamicData\n\n\tName              string                `xml:\"name\"`\n\tKey               string                `xml:\"key\"`\n\tNumPorts          int32                 `xml:\"numPorts\"`\n\tNumPortsAvailable int32                 `xml:\"numPortsAvailable\"`\n\tMtu               int32                 `xml:\"mtu,omitempty\"`\n\tPortgroup         []string              `xml:\"portgroup,omitempty\"`\n\tPnic              []string              `xml:\"pnic,omitempty\"`\n\tSpec              HostVirtualSwitchSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"HostVirtualSwitch\"] = reflect.TypeOf((*HostVirtualSwitch)(nil)).Elem()\n}\n\ntype HostVirtualSwitchAutoBridge struct {\n\tHostVirtualSwitchBridge\n\n\tExcludedNicDevice []string `xml:\"excludedNicDevice,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVirtualSwitchAutoBridge\"] = reflect.TypeOf((*HostVirtualSwitchAutoBridge)(nil)).Elem()\n}\n\ntype HostVirtualSwitchBeaconConfig struct {\n\tDynamicData\n\n\tInterval int32 `xml:\"interval\"`\n}\n\nfunc init() {\n\tt[\"HostVirtualSwitchBeaconConfig\"] = reflect.TypeOf((*HostVirtualSwitchBeaconConfig)(nil)).Elem()\n}\n\ntype HostVirtualSwitchBondBridge struct {\n\tHostVirtualSwitchBridge\n\n\tNicDevice                   []string                       `xml:\"nicDevice\"`\n\tBeacon                      *HostVirtualSwitchBeaconConfig `xml:\"beacon,omitempty\"`\n\tLinkDiscoveryProtocolConfig *LinkDiscoveryProtocolConfig   `xml:\"linkDiscoveryProtocolConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVirtualSwitchBondBridge\"] = reflect.TypeOf((*HostVirtualSwitchBondBridge)(nil)).Elem()\n}\n\ntype HostVirtualSwitchBridge struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"HostVirtualSwitchBridge\"] = reflect.TypeOf((*HostVirtualSwitchBridge)(nil)).Elem()\n}\n\ntype HostVirtualSwitchConfig struct {\n\tDynamicData\n\n\tChangeOperation string                 `xml:\"changeOperation,omitempty\"`\n\tName            string                 `xml:\"name\"`\n\tSpec            *HostVirtualSwitchSpec `xml:\"spec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVirtualSwitchConfig\"] = reflect.TypeOf((*HostVirtualSwitchConfig)(nil)).Elem()\n}\n\ntype HostVirtualSwitchSimpleBridge struct {\n\tHostVirtualSwitchBridge\n\n\tNicDevice string `xml:\"nicDevice\"`\n}\n\nfunc init() {\n\tt[\"HostVirtualSwitchSimpleBridge\"] = reflect.TypeOf((*HostVirtualSwitchSimpleBridge)(nil)).Elem()\n}\n\ntype HostVirtualSwitchSpec struct {\n\tDynamicData\n\n\tNumPorts int32                       `xml:\"numPorts\"`\n\tBridge   BaseHostVirtualSwitchBridge `xml:\"bridge,omitempty,typeattr\"`\n\tPolicy   *HostNetworkPolicy          `xml:\"policy,omitempty\"`\n\tMtu      int32                       `xml:\"mtu,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVirtualSwitchSpec\"] = reflect.TypeOf((*HostVirtualSwitchSpec)(nil)).Elem()\n}\n\ntype HostVmciAccessManagerAccessSpec struct {\n\tDynamicData\n\n\tVm       ManagedObjectReference `xml:\"vm\"`\n\tServices []string               `xml:\"services,omitempty\"`\n\tMode     string                 `xml:\"mode\"`\n}\n\nfunc init() {\n\tt[\"HostVmciAccessManagerAccessSpec\"] = reflect.TypeOf((*HostVmciAccessManagerAccessSpec)(nil)).Elem()\n}\n\ntype HostVmfsRescanResult struct {\n\tDynamicData\n\n\tHost  ManagedObjectReference `xml:\"host\"`\n\tFault *LocalizedMethodFault  `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVmfsRescanResult\"] = reflect.TypeOf((*HostVmfsRescanResult)(nil)).Elem()\n}\n\ntype HostVmfsSpec struct {\n\tDynamicData\n\n\tExtent           HostScsiDiskPartition `xml:\"extent\"`\n\tBlockSizeMb      int32                 `xml:\"blockSizeMb,omitempty\"`\n\tMajorVersion     int32                 `xml:\"majorVersion\"`\n\tVolumeName       string                `xml:\"volumeName\"`\n\tBlockSize        int32                 `xml:\"blockSize,omitempty\"`\n\tUnmapGranularity int32                 `xml:\"unmapGranularity,omitempty\"`\n\tUnmapPriority    string                `xml:\"unmapPriority,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVmfsSpec\"] = reflect.TypeOf((*HostVmfsSpec)(nil)).Elem()\n}\n\ntype HostVmfsVolume struct {\n\tHostFileSystemVolume\n\n\tBlockSizeMb      int32                   `xml:\"blockSizeMb\"`\n\tBlockSize        int32                   `xml:\"blockSize,omitempty\"`\n\tUnmapGranularity int32                   `xml:\"unmapGranularity,omitempty\"`\n\tUnmapPriority    string                  `xml:\"unmapPriority,omitempty\"`\n\tMaxBlocks        int32                   `xml:\"maxBlocks\"`\n\tMajorVersion     int32                   `xml:\"majorVersion\"`\n\tVersion          string                  `xml:\"version\"`\n\tUuid             string                  `xml:\"uuid\"`\n\tExtent           []HostScsiDiskPartition `xml:\"extent\"`\n\tVmfsUpgradable   bool                    `xml:\"vmfsUpgradable\"`\n\tForceMountedInfo *HostForceMountedInfo   `xml:\"forceMountedInfo,omitempty\"`\n\tSsd              *bool                   `xml:\"ssd\"`\n\tLocal            *bool                   `xml:\"local\"`\n\tScsiDiskType     string                  `xml:\"scsiDiskType,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVmfsVolume\"] = reflect.TypeOf((*HostVmfsVolume)(nil)).Elem()\n}\n\ntype HostVnicConnectedToCustomizedDVPortEvent struct {\n\tHostEvent\n\n\tVnic        VnicPortArgument `xml:\"vnic\"`\n\tPrevPortKey string           `xml:\"prevPortKey,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVnicConnectedToCustomizedDVPortEvent\"] = reflect.TypeOf((*HostVnicConnectedToCustomizedDVPortEvent)(nil)).Elem()\n}\n\ntype HostVsanInternalSystemCmmdsQuery struct {\n\tDynamicData\n\n\tType  string `xml:\"type,omitempty\"`\n\tUuid  string `xml:\"uuid,omitempty\"`\n\tOwner string `xml:\"owner,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVsanInternalSystemCmmdsQuery\"] = reflect.TypeOf((*HostVsanInternalSystemCmmdsQuery)(nil)).Elem()\n}\n\ntype HostVsanInternalSystemDeleteVsanObjectsResult struct {\n\tDynamicData\n\n\tUuid          string               `xml:\"uuid\"`\n\tSuccess       bool                 `xml:\"success\"`\n\tFailureReason []LocalizableMessage `xml:\"failureReason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVsanInternalSystemDeleteVsanObjectsResult\"] = reflect.TypeOf((*HostVsanInternalSystemDeleteVsanObjectsResult)(nil)).Elem()\n}\n\ntype HostVsanInternalSystemVsanObjectOperationResult struct {\n\tDynamicData\n\n\tUuid          string               `xml:\"uuid\"`\n\tFailureReason []LocalizableMessage `xml:\"failureReason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVsanInternalSystemVsanObjectOperationResult\"] = reflect.TypeOf((*HostVsanInternalSystemVsanObjectOperationResult)(nil)).Elem()\n}\n\ntype HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult struct {\n\tDynamicData\n\n\tDiskUuid      string `xml:\"diskUuid\"`\n\tSuccess       bool   `xml:\"success\"`\n\tFailureReason string `xml:\"failureReason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult\"] = reflect.TypeOf((*HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult)(nil)).Elem()\n}\n\ntype HostVvolVolume struct {\n\tHostFileSystemVolume\n\n\tScId             string                `xml:\"scId\"`\n\tHostPE           []VVolHostPE          `xml:\"hostPE,omitempty\"`\n\tVasaProviderInfo []VimVasaProviderInfo `xml:\"vasaProviderInfo,omitempty\"`\n\tStorageArray     []VASAStorageArray    `xml:\"storageArray,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostVvolVolume\"] = reflect.TypeOf((*HostVvolVolume)(nil)).Elem()\n}\n\ntype HostVvolVolumeSpecification struct {\n\tDynamicData\n\n\tMaxSizeInMB      int64                 `xml:\"maxSizeInMB\"`\n\tVolumeName       string                `xml:\"volumeName\"`\n\tVasaProviderInfo []VimVasaProviderInfo `xml:\"vasaProviderInfo,omitempty\"`\n\tStorageArray     []VASAStorageArray    `xml:\"storageArray,omitempty\"`\n\tUuid             string                `xml:\"uuid\"`\n}\n\nfunc init() {\n\tt[\"HostVvolVolumeSpecification\"] = reflect.TypeOf((*HostVvolVolumeSpecification)(nil)).Elem()\n}\n\ntype HostWwnChangedEvent struct {\n\tHostEvent\n\n\tOldNodeWwns []int64 `xml:\"oldNodeWwns,omitempty\"`\n\tOldPortWwns []int64 `xml:\"oldPortWwns,omitempty\"`\n\tNewNodeWwns []int64 `xml:\"newNodeWwns,omitempty\"`\n\tNewPortWwns []int64 `xml:\"newPortWwns,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HostWwnChangedEvent\"] = reflect.TypeOf((*HostWwnChangedEvent)(nil)).Elem()\n}\n\ntype HostWwnConflictEvent struct {\n\tHostEvent\n\n\tConflictedVms   []VmEventArgument   `xml:\"conflictedVms,omitempty\"`\n\tConflictedHosts []HostEventArgument `xml:\"conflictedHosts,omitempty\"`\n\tWwn             int64               `xml:\"wwn\"`\n}\n\nfunc init() {\n\tt[\"HostWwnConflictEvent\"] = reflect.TypeOf((*HostWwnConflictEvent)(nil)).Elem()\n}\n\ntype HotSnapshotMoveNotSupported struct {\n\tSnapshotCopyNotSupported\n}\n\nfunc init() {\n\tt[\"HotSnapshotMoveNotSupported\"] = reflect.TypeOf((*HotSnapshotMoveNotSupported)(nil)).Elem()\n}\n\ntype HotSnapshotMoveNotSupportedFault HotSnapshotMoveNotSupported\n\nfunc init() {\n\tt[\"HotSnapshotMoveNotSupportedFault\"] = reflect.TypeOf((*HotSnapshotMoveNotSupportedFault)(nil)).Elem()\n}\n\ntype HourlyTaskScheduler struct {\n\tRecurrentTaskScheduler\n\n\tMinute int32 `xml:\"minute\"`\n}\n\nfunc init() {\n\tt[\"HourlyTaskScheduler\"] = reflect.TypeOf((*HourlyTaskScheduler)(nil)).Elem()\n}\n\ntype HttpNfcLeaseAbort HttpNfcLeaseAbortRequestType\n\nfunc init() {\n\tt[\"HttpNfcLeaseAbort\"] = reflect.TypeOf((*HttpNfcLeaseAbort)(nil)).Elem()\n}\n\ntype HttpNfcLeaseAbortRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tFault *LocalizedMethodFault  `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HttpNfcLeaseAbortRequestType\"] = reflect.TypeOf((*HttpNfcLeaseAbortRequestType)(nil)).Elem()\n}\n\ntype HttpNfcLeaseAbortResponse struct {\n}\n\ntype HttpNfcLeaseComplete HttpNfcLeaseCompleteRequestType\n\nfunc init() {\n\tt[\"HttpNfcLeaseComplete\"] = reflect.TypeOf((*HttpNfcLeaseComplete)(nil)).Elem()\n}\n\ntype HttpNfcLeaseCompleteRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"HttpNfcLeaseCompleteRequestType\"] = reflect.TypeOf((*HttpNfcLeaseCompleteRequestType)(nil)).Elem()\n}\n\ntype HttpNfcLeaseCompleteResponse struct {\n}\n\ntype HttpNfcLeaseDatastoreLeaseInfo struct {\n\tDynamicData\n\n\tDatastoreKey string                 `xml:\"datastoreKey\"`\n\tHosts        []HttpNfcLeaseHostInfo `xml:\"hosts\"`\n}\n\nfunc init() {\n\tt[\"HttpNfcLeaseDatastoreLeaseInfo\"] = reflect.TypeOf((*HttpNfcLeaseDatastoreLeaseInfo)(nil)).Elem()\n}\n\ntype HttpNfcLeaseDeviceUrl struct {\n\tDynamicData\n\n\tKey           string `xml:\"key\"`\n\tImportKey     string `xml:\"importKey\"`\n\tUrl           string `xml:\"url\"`\n\tSslThumbprint string `xml:\"sslThumbprint\"`\n\tDisk          *bool  `xml:\"disk\"`\n\tTargetId      string `xml:\"targetId,omitempty\"`\n\tDatastoreKey  string `xml:\"datastoreKey,omitempty\"`\n\tFileSize      int64  `xml:\"fileSize,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HttpNfcLeaseDeviceUrl\"] = reflect.TypeOf((*HttpNfcLeaseDeviceUrl)(nil)).Elem()\n}\n\ntype HttpNfcLeaseGetManifest HttpNfcLeaseGetManifestRequestType\n\nfunc init() {\n\tt[\"HttpNfcLeaseGetManifest\"] = reflect.TypeOf((*HttpNfcLeaseGetManifest)(nil)).Elem()\n}\n\ntype HttpNfcLeaseGetManifestRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"HttpNfcLeaseGetManifestRequestType\"] = reflect.TypeOf((*HttpNfcLeaseGetManifestRequestType)(nil)).Elem()\n}\n\ntype HttpNfcLeaseGetManifestResponse struct {\n\tReturnval []HttpNfcLeaseManifestEntry `xml:\"returnval,omitempty\"`\n}\n\ntype HttpNfcLeaseHostInfo struct {\n\tDynamicData\n\n\tUrl           string `xml:\"url\"`\n\tSslThumbprint string `xml:\"sslThumbprint\"`\n}\n\nfunc init() {\n\tt[\"HttpNfcLeaseHostInfo\"] = reflect.TypeOf((*HttpNfcLeaseHostInfo)(nil)).Elem()\n}\n\ntype HttpNfcLeaseInfo struct {\n\tDynamicData\n\n\tLease                 ManagedObjectReference           `xml:\"lease\"`\n\tEntity                ManagedObjectReference           `xml:\"entity\"`\n\tDeviceUrl             []HttpNfcLeaseDeviceUrl          `xml:\"deviceUrl,omitempty\"`\n\tTotalDiskCapacityInKB int64                            `xml:\"totalDiskCapacityInKB\"`\n\tLeaseTimeout          int32                            `xml:\"leaseTimeout\"`\n\tHostMap               []HttpNfcLeaseDatastoreLeaseInfo `xml:\"hostMap,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HttpNfcLeaseInfo\"] = reflect.TypeOf((*HttpNfcLeaseInfo)(nil)).Elem()\n}\n\ntype HttpNfcLeaseManifestEntry struct {\n\tDynamicData\n\n\tKey           string `xml:\"key\"`\n\tSha1          string `xml:\"sha1\"`\n\tSize          int64  `xml:\"size\"`\n\tDisk          bool   `xml:\"disk\"`\n\tCapacity      int64  `xml:\"capacity,omitempty\"`\n\tPopulatedSize int64  `xml:\"populatedSize,omitempty\"`\n}\n\nfunc init() {\n\tt[\"HttpNfcLeaseManifestEntry\"] = reflect.TypeOf((*HttpNfcLeaseManifestEntry)(nil)).Elem()\n}\n\ntype HttpNfcLeaseProgress HttpNfcLeaseProgressRequestType\n\nfunc init() {\n\tt[\"HttpNfcLeaseProgress\"] = reflect.TypeOf((*HttpNfcLeaseProgress)(nil)).Elem()\n}\n\ntype HttpNfcLeaseProgressRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tPercent int32                  `xml:\"percent\"`\n}\n\nfunc init() {\n\tt[\"HttpNfcLeaseProgressRequestType\"] = reflect.TypeOf((*HttpNfcLeaseProgressRequestType)(nil)).Elem()\n}\n\ntype HttpNfcLeaseProgressResponse struct {\n}\n\ntype ID struct {\n\tDynamicData\n\n\tId string `xml:\"id\"`\n}\n\nfunc init() {\n\tt[\"ID\"] = reflect.TypeOf((*ID)(nil)).Elem()\n}\n\ntype IDEDiskNotSupported struct {\n\tDiskNotSupported\n}\n\nfunc init() {\n\tt[\"IDEDiskNotSupported\"] = reflect.TypeOf((*IDEDiskNotSupported)(nil)).Elem()\n}\n\ntype IDEDiskNotSupportedFault IDEDiskNotSupported\n\nfunc init() {\n\tt[\"IDEDiskNotSupportedFault\"] = reflect.TypeOf((*IDEDiskNotSupportedFault)(nil)).Elem()\n}\n\ntype IORMNotSupportedHostOnDatastore struct {\n\tVimFault\n\n\tDatastore     ManagedObjectReference   `xml:\"datastore\"`\n\tDatastoreName string                   `xml:\"datastoreName\"`\n\tHost          []ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"IORMNotSupportedHostOnDatastore\"] = reflect.TypeOf((*IORMNotSupportedHostOnDatastore)(nil)).Elem()\n}\n\ntype IORMNotSupportedHostOnDatastoreFault IORMNotSupportedHostOnDatastore\n\nfunc init() {\n\tt[\"IORMNotSupportedHostOnDatastoreFault\"] = reflect.TypeOf((*IORMNotSupportedHostOnDatastoreFault)(nil)).Elem()\n}\n\ntype IScsiBootFailureEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"IScsiBootFailureEvent\"] = reflect.TypeOf((*IScsiBootFailureEvent)(nil)).Elem()\n}\n\ntype ImpersonateUser ImpersonateUserRequestType\n\nfunc init() {\n\tt[\"ImpersonateUser\"] = reflect.TypeOf((*ImpersonateUser)(nil)).Elem()\n}\n\ntype ImpersonateUserRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tUserName string                 `xml:\"userName\"`\n\tLocale   string                 `xml:\"locale,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ImpersonateUserRequestType\"] = reflect.TypeOf((*ImpersonateUserRequestType)(nil)).Elem()\n}\n\ntype ImpersonateUserResponse struct {\n\tReturnval UserSession `xml:\"returnval\"`\n}\n\ntype ImportCertificateForCAMRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tCertPath  string                 `xml:\"certPath\"`\n\tCamServer string                 `xml:\"camServer\"`\n}\n\nfunc init() {\n\tt[\"ImportCertificateForCAMRequestType\"] = reflect.TypeOf((*ImportCertificateForCAMRequestType)(nil)).Elem()\n}\n\ntype ImportCertificateForCAM_Task ImportCertificateForCAMRequestType\n\nfunc init() {\n\tt[\"ImportCertificateForCAM_Task\"] = reflect.TypeOf((*ImportCertificateForCAM_Task)(nil)).Elem()\n}\n\ntype ImportCertificateForCAM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ImportHostAddFailure struct {\n\tDvsFault\n\n\tHostIp []string `xml:\"hostIp\"`\n}\n\nfunc init() {\n\tt[\"ImportHostAddFailure\"] = reflect.TypeOf((*ImportHostAddFailure)(nil)).Elem()\n}\n\ntype ImportHostAddFailureFault ImportHostAddFailure\n\nfunc init() {\n\tt[\"ImportHostAddFailureFault\"] = reflect.TypeOf((*ImportHostAddFailureFault)(nil)).Elem()\n}\n\ntype ImportOperationBulkFault struct {\n\tDvsFault\n\n\tImportFaults []ImportOperationBulkFaultFaultOnImport `xml:\"importFaults\"`\n}\n\nfunc init() {\n\tt[\"ImportOperationBulkFault\"] = reflect.TypeOf((*ImportOperationBulkFault)(nil)).Elem()\n}\n\ntype ImportOperationBulkFaultFault ImportOperationBulkFault\n\nfunc init() {\n\tt[\"ImportOperationBulkFaultFault\"] = reflect.TypeOf((*ImportOperationBulkFaultFault)(nil)).Elem()\n}\n\ntype ImportOperationBulkFaultFaultOnImport struct {\n\tDynamicData\n\n\tEntityType string               `xml:\"entityType,omitempty\"`\n\tKey        string               `xml:\"key,omitempty\"`\n\tFault      LocalizedMethodFault `xml:\"fault\"`\n}\n\nfunc init() {\n\tt[\"ImportOperationBulkFaultFaultOnImport\"] = reflect.TypeOf((*ImportOperationBulkFaultFaultOnImport)(nil)).Elem()\n}\n\ntype ImportSpec struct {\n\tDynamicData\n\n\tEntityConfig     *VAppEntityConfigInfo `xml:\"entityConfig,omitempty\"`\n\tInstantiationOst *OvfConsumerOstNode   `xml:\"instantiationOst,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ImportSpec\"] = reflect.TypeOf((*ImportSpec)(nil)).Elem()\n}\n\ntype ImportUnmanagedSnapshot ImportUnmanagedSnapshotRequestType\n\nfunc init() {\n\tt[\"ImportUnmanagedSnapshot\"] = reflect.TypeOf((*ImportUnmanagedSnapshot)(nil)).Elem()\n}\n\ntype ImportUnmanagedSnapshotRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tVdisk      string                  `xml:\"vdisk\"`\n\tDatacenter *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n\tVvolId     string                  `xml:\"vvolId\"`\n}\n\nfunc init() {\n\tt[\"ImportUnmanagedSnapshotRequestType\"] = reflect.TypeOf((*ImportUnmanagedSnapshotRequestType)(nil)).Elem()\n}\n\ntype ImportUnmanagedSnapshotResponse struct {\n}\n\ntype ImportVApp ImportVAppRequestType\n\nfunc init() {\n\tt[\"ImportVApp\"] = reflect.TypeOf((*ImportVApp)(nil)).Elem()\n}\n\ntype ImportVAppRequestType struct {\n\tThis   ManagedObjectReference  `xml:\"_this\"`\n\tSpec   BaseImportSpec          `xml:\"spec,typeattr\"`\n\tFolder *ManagedObjectReference `xml:\"folder,omitempty\"`\n\tHost   *ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ImportVAppRequestType\"] = reflect.TypeOf((*ImportVAppRequestType)(nil)).Elem()\n}\n\ntype ImportVAppResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype InUseFeatureManipulationDisallowed struct {\n\tNotEnoughLicenses\n}\n\nfunc init() {\n\tt[\"InUseFeatureManipulationDisallowed\"] = reflect.TypeOf((*InUseFeatureManipulationDisallowed)(nil)).Elem()\n}\n\ntype InUseFeatureManipulationDisallowedFault InUseFeatureManipulationDisallowed\n\nfunc init() {\n\tt[\"InUseFeatureManipulationDisallowedFault\"] = reflect.TypeOf((*InUseFeatureManipulationDisallowedFault)(nil)).Elem()\n}\n\ntype InaccessibleDatastore struct {\n\tInvalidDatastore\n\n\tDetail string `xml:\"detail,omitempty\"`\n}\n\nfunc init() {\n\tt[\"InaccessibleDatastore\"] = reflect.TypeOf((*InaccessibleDatastore)(nil)).Elem()\n}\n\ntype InaccessibleDatastoreFault BaseInaccessibleDatastore\n\nfunc init() {\n\tt[\"InaccessibleDatastoreFault\"] = reflect.TypeOf((*InaccessibleDatastoreFault)(nil)).Elem()\n}\n\ntype InaccessibleFTMetadataDatastore struct {\n\tInaccessibleDatastore\n}\n\nfunc init() {\n\tt[\"InaccessibleFTMetadataDatastore\"] = reflect.TypeOf((*InaccessibleFTMetadataDatastore)(nil)).Elem()\n}\n\ntype InaccessibleFTMetadataDatastoreFault InaccessibleFTMetadataDatastore\n\nfunc init() {\n\tt[\"InaccessibleFTMetadataDatastoreFault\"] = reflect.TypeOf((*InaccessibleFTMetadataDatastoreFault)(nil)).Elem()\n}\n\ntype InaccessibleVFlashSource struct {\n\tVimFault\n\n\tHostName string `xml:\"hostName\"`\n}\n\nfunc init() {\n\tt[\"InaccessibleVFlashSource\"] = reflect.TypeOf((*InaccessibleVFlashSource)(nil)).Elem()\n}\n\ntype InaccessibleVFlashSourceFault InaccessibleVFlashSource\n\nfunc init() {\n\tt[\"InaccessibleVFlashSourceFault\"] = reflect.TypeOf((*InaccessibleVFlashSourceFault)(nil)).Elem()\n}\n\ntype IncompatibleDefaultDevice struct {\n\tMigrationFault\n\n\tDevice string `xml:\"device\"`\n}\n\nfunc init() {\n\tt[\"IncompatibleDefaultDevice\"] = reflect.TypeOf((*IncompatibleDefaultDevice)(nil)).Elem()\n}\n\ntype IncompatibleDefaultDeviceFault IncompatibleDefaultDevice\n\nfunc init() {\n\tt[\"IncompatibleDefaultDeviceFault\"] = reflect.TypeOf((*IncompatibleDefaultDeviceFault)(nil)).Elem()\n}\n\ntype IncompatibleHostForFtSecondary struct {\n\tVmFaultToleranceIssue\n\n\tHost  ManagedObjectReference `xml:\"host\"`\n\tError []LocalizedMethodFault `xml:\"error,omitempty\"`\n}\n\nfunc init() {\n\tt[\"IncompatibleHostForFtSecondary\"] = reflect.TypeOf((*IncompatibleHostForFtSecondary)(nil)).Elem()\n}\n\ntype IncompatibleHostForFtSecondaryFault IncompatibleHostForFtSecondary\n\nfunc init() {\n\tt[\"IncompatibleHostForFtSecondaryFault\"] = reflect.TypeOf((*IncompatibleHostForFtSecondaryFault)(nil)).Elem()\n}\n\ntype IncompatibleHostForVmReplication struct {\n\tReplicationFault\n\n\tVmName   string `xml:\"vmName\"`\n\tHostName string `xml:\"hostName\"`\n\tReason   string `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"IncompatibleHostForVmReplication\"] = reflect.TypeOf((*IncompatibleHostForVmReplication)(nil)).Elem()\n}\n\ntype IncompatibleHostForVmReplicationFault IncompatibleHostForVmReplication\n\nfunc init() {\n\tt[\"IncompatibleHostForVmReplicationFault\"] = reflect.TypeOf((*IncompatibleHostForVmReplicationFault)(nil)).Elem()\n}\n\ntype IncompatibleSetting struct {\n\tInvalidArgument\n\n\tConflictingProperty string `xml:\"conflictingProperty\"`\n}\n\nfunc init() {\n\tt[\"IncompatibleSetting\"] = reflect.TypeOf((*IncompatibleSetting)(nil)).Elem()\n}\n\ntype IncompatibleSettingFault IncompatibleSetting\n\nfunc init() {\n\tt[\"IncompatibleSettingFault\"] = reflect.TypeOf((*IncompatibleSettingFault)(nil)).Elem()\n}\n\ntype IncorrectFileType struct {\n\tFileFault\n}\n\nfunc init() {\n\tt[\"IncorrectFileType\"] = reflect.TypeOf((*IncorrectFileType)(nil)).Elem()\n}\n\ntype IncorrectFileTypeFault IncorrectFileType\n\nfunc init() {\n\tt[\"IncorrectFileTypeFault\"] = reflect.TypeOf((*IncorrectFileTypeFault)(nil)).Elem()\n}\n\ntype IncorrectHostInformation struct {\n\tNotEnoughLicenses\n}\n\nfunc init() {\n\tt[\"IncorrectHostInformation\"] = reflect.TypeOf((*IncorrectHostInformation)(nil)).Elem()\n}\n\ntype IncorrectHostInformationEvent struct {\n\tLicenseEvent\n}\n\nfunc init() {\n\tt[\"IncorrectHostInformationEvent\"] = reflect.TypeOf((*IncorrectHostInformationEvent)(nil)).Elem()\n}\n\ntype IncorrectHostInformationFault IncorrectHostInformation\n\nfunc init() {\n\tt[\"IncorrectHostInformationFault\"] = reflect.TypeOf((*IncorrectHostInformationFault)(nil)).Elem()\n}\n\ntype IndependentDiskVMotionNotSupported struct {\n\tMigrationFeatureNotSupported\n}\n\nfunc init() {\n\tt[\"IndependentDiskVMotionNotSupported\"] = reflect.TypeOf((*IndependentDiskVMotionNotSupported)(nil)).Elem()\n}\n\ntype IndependentDiskVMotionNotSupportedFault IndependentDiskVMotionNotSupported\n\nfunc init() {\n\tt[\"IndependentDiskVMotionNotSupportedFault\"] = reflect.TypeOf((*IndependentDiskVMotionNotSupportedFault)(nil)).Elem()\n}\n\ntype InflateDiskRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tId        ID                     `xml:\"id\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"InflateDiskRequestType\"] = reflect.TypeOf((*InflateDiskRequestType)(nil)).Elem()\n}\n\ntype InflateDisk_Task InflateDiskRequestType\n\nfunc init() {\n\tt[\"InflateDisk_Task\"] = reflect.TypeOf((*InflateDisk_Task)(nil)).Elem()\n}\n\ntype InflateDisk_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype InflateVirtualDiskRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tName       string                  `xml:\"name\"`\n\tDatacenter *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"InflateVirtualDiskRequestType\"] = reflect.TypeOf((*InflateVirtualDiskRequestType)(nil)).Elem()\n}\n\ntype InflateVirtualDisk_Task InflateVirtualDiskRequestType\n\nfunc init() {\n\tt[\"InflateVirtualDisk_Task\"] = reflect.TypeOf((*InflateVirtualDisk_Task)(nil)).Elem()\n}\n\ntype InflateVirtualDisk_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype InfoUpgradeEvent struct {\n\tUpgradeEvent\n}\n\nfunc init() {\n\tt[\"InfoUpgradeEvent\"] = reflect.TypeOf((*InfoUpgradeEvent)(nil)).Elem()\n}\n\ntype InheritablePolicy struct {\n\tDynamicData\n\n\tInherited bool `xml:\"inherited\"`\n}\n\nfunc init() {\n\tt[\"InheritablePolicy\"] = reflect.TypeOf((*InheritablePolicy)(nil)).Elem()\n}\n\ntype InitializeDisksRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tMapping []VsanHostDiskMapping  `xml:\"mapping\"`\n}\n\nfunc init() {\n\tt[\"InitializeDisksRequestType\"] = reflect.TypeOf((*InitializeDisksRequestType)(nil)).Elem()\n}\n\ntype InitializeDisks_Task InitializeDisksRequestType\n\nfunc init() {\n\tt[\"InitializeDisks_Task\"] = reflect.TypeOf((*InitializeDisks_Task)(nil)).Elem()\n}\n\ntype InitializeDisks_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype InitiateFileTransferFromGuest InitiateFileTransferFromGuestRequestType\n\nfunc init() {\n\tt[\"InitiateFileTransferFromGuest\"] = reflect.TypeOf((*InitiateFileTransferFromGuest)(nil)).Elem()\n}\n\ntype InitiateFileTransferFromGuestRequestType struct {\n\tThis          ManagedObjectReference  `xml:\"_this\"`\n\tVm            ManagedObjectReference  `xml:\"vm\"`\n\tAuth          BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tGuestFilePath string                  `xml:\"guestFilePath\"`\n}\n\nfunc init() {\n\tt[\"InitiateFileTransferFromGuestRequestType\"] = reflect.TypeOf((*InitiateFileTransferFromGuestRequestType)(nil)).Elem()\n}\n\ntype InitiateFileTransferFromGuestResponse struct {\n\tReturnval FileTransferInformation `xml:\"returnval\"`\n}\n\ntype InitiateFileTransferToGuest InitiateFileTransferToGuestRequestType\n\nfunc init() {\n\tt[\"InitiateFileTransferToGuest\"] = reflect.TypeOf((*InitiateFileTransferToGuest)(nil)).Elem()\n}\n\ntype InitiateFileTransferToGuestRequestType struct {\n\tThis           ManagedObjectReference  `xml:\"_this\"`\n\tVm             ManagedObjectReference  `xml:\"vm\"`\n\tAuth           BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tGuestFilePath  string                  `xml:\"guestFilePath\"`\n\tFileAttributes BaseGuestFileAttributes `xml:\"fileAttributes,typeattr\"`\n\tFileSize       int64                   `xml:\"fileSize\"`\n\tOverwrite      bool                    `xml:\"overwrite\"`\n}\n\nfunc init() {\n\tt[\"InitiateFileTransferToGuestRequestType\"] = reflect.TypeOf((*InitiateFileTransferToGuestRequestType)(nil)).Elem()\n}\n\ntype InitiateFileTransferToGuestResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype InstallHostPatchRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tRepository HostPatchManagerLocator `xml:\"repository\"`\n\tUpdateID   string                  `xml:\"updateID\"`\n\tForce      *bool                   `xml:\"force\"`\n}\n\nfunc init() {\n\tt[\"InstallHostPatchRequestType\"] = reflect.TypeOf((*InstallHostPatchRequestType)(nil)).Elem()\n}\n\ntype InstallHostPatchV2RequestType struct {\n\tThis       ManagedObjectReference                     `xml:\"_this\"`\n\tMetaUrls   []string                                   `xml:\"metaUrls,omitempty\"`\n\tBundleUrls []string                                   `xml:\"bundleUrls,omitempty\"`\n\tVibUrls    []string                                   `xml:\"vibUrls,omitempty\"`\n\tSpec       *HostPatchManagerPatchManagerOperationSpec `xml:\"spec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"InstallHostPatchV2RequestType\"] = reflect.TypeOf((*InstallHostPatchV2RequestType)(nil)).Elem()\n}\n\ntype InstallHostPatchV2_Task InstallHostPatchV2RequestType\n\nfunc init() {\n\tt[\"InstallHostPatchV2_Task\"] = reflect.TypeOf((*InstallHostPatchV2_Task)(nil)).Elem()\n}\n\ntype InstallHostPatchV2_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype InstallHostPatch_Task InstallHostPatchRequestType\n\nfunc init() {\n\tt[\"InstallHostPatch_Task\"] = reflect.TypeOf((*InstallHostPatch_Task)(nil)).Elem()\n}\n\ntype InstallHostPatch_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype InstallIoFilterRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tVibUrl  string                 `xml:\"vibUrl\"`\n\tCompRes ManagedObjectReference `xml:\"compRes\"`\n}\n\nfunc init() {\n\tt[\"InstallIoFilterRequestType\"] = reflect.TypeOf((*InstallIoFilterRequestType)(nil)).Elem()\n}\n\ntype InstallIoFilter_Task InstallIoFilterRequestType\n\nfunc init() {\n\tt[\"InstallIoFilter_Task\"] = reflect.TypeOf((*InstallIoFilter_Task)(nil)).Elem()\n}\n\ntype InstallIoFilter_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype InstallServerCertificate InstallServerCertificateRequestType\n\nfunc init() {\n\tt[\"InstallServerCertificate\"] = reflect.TypeOf((*InstallServerCertificate)(nil)).Elem()\n}\n\ntype InstallServerCertificateRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tCert string                 `xml:\"cert\"`\n}\n\nfunc init() {\n\tt[\"InstallServerCertificateRequestType\"] = reflect.TypeOf((*InstallServerCertificateRequestType)(nil)).Elem()\n}\n\ntype InstallServerCertificateResponse struct {\n}\n\ntype InstallSmartCardTrustAnchor InstallSmartCardTrustAnchorRequestType\n\nfunc init() {\n\tt[\"InstallSmartCardTrustAnchor\"] = reflect.TypeOf((*InstallSmartCardTrustAnchor)(nil)).Elem()\n}\n\ntype InstallSmartCardTrustAnchorRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tCert string                 `xml:\"cert\"`\n}\n\nfunc init() {\n\tt[\"InstallSmartCardTrustAnchorRequestType\"] = reflect.TypeOf((*InstallSmartCardTrustAnchorRequestType)(nil)).Elem()\n}\n\ntype InstallSmartCardTrustAnchorResponse struct {\n}\n\ntype InsufficientAgentVmsDeployed struct {\n\tInsufficientResourcesFault\n\n\tHostName            string `xml:\"hostName\"`\n\tRequiredNumAgentVms int32  `xml:\"requiredNumAgentVms\"`\n\tCurrentNumAgentVms  int32  `xml:\"currentNumAgentVms\"`\n}\n\nfunc init() {\n\tt[\"InsufficientAgentVmsDeployed\"] = reflect.TypeOf((*InsufficientAgentVmsDeployed)(nil)).Elem()\n}\n\ntype InsufficientAgentVmsDeployedFault InsufficientAgentVmsDeployed\n\nfunc init() {\n\tt[\"InsufficientAgentVmsDeployedFault\"] = reflect.TypeOf((*InsufficientAgentVmsDeployedFault)(nil)).Elem()\n}\n\ntype InsufficientCpuResourcesFault struct {\n\tInsufficientResourcesFault\n\n\tUnreserved int64 `xml:\"unreserved\"`\n\tRequested  int64 `xml:\"requested\"`\n}\n\nfunc init() {\n\tt[\"InsufficientCpuResourcesFault\"] = reflect.TypeOf((*InsufficientCpuResourcesFault)(nil)).Elem()\n}\n\ntype InsufficientCpuResourcesFaultFault InsufficientCpuResourcesFault\n\nfunc init() {\n\tt[\"InsufficientCpuResourcesFaultFault\"] = reflect.TypeOf((*InsufficientCpuResourcesFaultFault)(nil)).Elem()\n}\n\ntype InsufficientDisks struct {\n\tVsanDiskFault\n}\n\nfunc init() {\n\tt[\"InsufficientDisks\"] = reflect.TypeOf((*InsufficientDisks)(nil)).Elem()\n}\n\ntype InsufficientDisksFault InsufficientDisks\n\nfunc init() {\n\tt[\"InsufficientDisksFault\"] = reflect.TypeOf((*InsufficientDisksFault)(nil)).Elem()\n}\n\ntype InsufficientFailoverResourcesEvent struct {\n\tClusterEvent\n}\n\nfunc init() {\n\tt[\"InsufficientFailoverResourcesEvent\"] = reflect.TypeOf((*InsufficientFailoverResourcesEvent)(nil)).Elem()\n}\n\ntype InsufficientFailoverResourcesFault struct {\n\tInsufficientResourcesFault\n}\n\nfunc init() {\n\tt[\"InsufficientFailoverResourcesFault\"] = reflect.TypeOf((*InsufficientFailoverResourcesFault)(nil)).Elem()\n}\n\ntype InsufficientFailoverResourcesFaultFault InsufficientFailoverResourcesFault\n\nfunc init() {\n\tt[\"InsufficientFailoverResourcesFaultFault\"] = reflect.TypeOf((*InsufficientFailoverResourcesFaultFault)(nil)).Elem()\n}\n\ntype InsufficientGraphicsResourcesFault struct {\n\tInsufficientResourcesFault\n}\n\nfunc init() {\n\tt[\"InsufficientGraphicsResourcesFault\"] = reflect.TypeOf((*InsufficientGraphicsResourcesFault)(nil)).Elem()\n}\n\ntype InsufficientGraphicsResourcesFaultFault InsufficientGraphicsResourcesFault\n\nfunc init() {\n\tt[\"InsufficientGraphicsResourcesFaultFault\"] = reflect.TypeOf((*InsufficientGraphicsResourcesFaultFault)(nil)).Elem()\n}\n\ntype InsufficientHostCapacityFault struct {\n\tInsufficientResourcesFault\n\n\tHost *ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"InsufficientHostCapacityFault\"] = reflect.TypeOf((*InsufficientHostCapacityFault)(nil)).Elem()\n}\n\ntype InsufficientHostCapacityFaultFault BaseInsufficientHostCapacityFault\n\nfunc init() {\n\tt[\"InsufficientHostCapacityFaultFault\"] = reflect.TypeOf((*InsufficientHostCapacityFaultFault)(nil)).Elem()\n}\n\ntype InsufficientHostCpuCapacityFault struct {\n\tInsufficientHostCapacityFault\n\n\tUnreserved int64 `xml:\"unreserved\"`\n\tRequested  int64 `xml:\"requested\"`\n}\n\nfunc init() {\n\tt[\"InsufficientHostCpuCapacityFault\"] = reflect.TypeOf((*InsufficientHostCpuCapacityFault)(nil)).Elem()\n}\n\ntype InsufficientHostCpuCapacityFaultFault InsufficientHostCpuCapacityFault\n\nfunc init() {\n\tt[\"InsufficientHostCpuCapacityFaultFault\"] = reflect.TypeOf((*InsufficientHostCpuCapacityFaultFault)(nil)).Elem()\n}\n\ntype InsufficientHostMemoryCapacityFault struct {\n\tInsufficientHostCapacityFault\n\n\tUnreserved int64 `xml:\"unreserved\"`\n\tRequested  int64 `xml:\"requested\"`\n}\n\nfunc init() {\n\tt[\"InsufficientHostMemoryCapacityFault\"] = reflect.TypeOf((*InsufficientHostMemoryCapacityFault)(nil)).Elem()\n}\n\ntype InsufficientHostMemoryCapacityFaultFault InsufficientHostMemoryCapacityFault\n\nfunc init() {\n\tt[\"InsufficientHostMemoryCapacityFaultFault\"] = reflect.TypeOf((*InsufficientHostMemoryCapacityFaultFault)(nil)).Elem()\n}\n\ntype InsufficientMemoryResourcesFault struct {\n\tInsufficientResourcesFault\n\n\tUnreserved int64 `xml:\"unreserved\"`\n\tRequested  int64 `xml:\"requested\"`\n}\n\nfunc init() {\n\tt[\"InsufficientMemoryResourcesFault\"] = reflect.TypeOf((*InsufficientMemoryResourcesFault)(nil)).Elem()\n}\n\ntype InsufficientMemoryResourcesFaultFault InsufficientMemoryResourcesFault\n\nfunc init() {\n\tt[\"InsufficientMemoryResourcesFaultFault\"] = reflect.TypeOf((*InsufficientMemoryResourcesFaultFault)(nil)).Elem()\n}\n\ntype InsufficientNetworkCapacity struct {\n\tInsufficientResourcesFault\n}\n\nfunc init() {\n\tt[\"InsufficientNetworkCapacity\"] = reflect.TypeOf((*InsufficientNetworkCapacity)(nil)).Elem()\n}\n\ntype InsufficientNetworkCapacityFault InsufficientNetworkCapacity\n\nfunc init() {\n\tt[\"InsufficientNetworkCapacityFault\"] = reflect.TypeOf((*InsufficientNetworkCapacityFault)(nil)).Elem()\n}\n\ntype InsufficientNetworkResourcePoolCapacity struct {\n\tInsufficientResourcesFault\n\n\tDvsName         string   `xml:\"dvsName\"`\n\tDvsUuid         string   `xml:\"dvsUuid\"`\n\tResourcePoolKey string   `xml:\"resourcePoolKey\"`\n\tAvailable       int64    `xml:\"available\"`\n\tRequested       int64    `xml:\"requested\"`\n\tDevice          []string `xml:\"device\"`\n}\n\nfunc init() {\n\tt[\"InsufficientNetworkResourcePoolCapacity\"] = reflect.TypeOf((*InsufficientNetworkResourcePoolCapacity)(nil)).Elem()\n}\n\ntype InsufficientNetworkResourcePoolCapacityFault InsufficientNetworkResourcePoolCapacity\n\nfunc init() {\n\tt[\"InsufficientNetworkResourcePoolCapacityFault\"] = reflect.TypeOf((*InsufficientNetworkResourcePoolCapacityFault)(nil)).Elem()\n}\n\ntype InsufficientPerCpuCapacity struct {\n\tInsufficientHostCapacityFault\n}\n\nfunc init() {\n\tt[\"InsufficientPerCpuCapacity\"] = reflect.TypeOf((*InsufficientPerCpuCapacity)(nil)).Elem()\n}\n\ntype InsufficientPerCpuCapacityFault InsufficientPerCpuCapacity\n\nfunc init() {\n\tt[\"InsufficientPerCpuCapacityFault\"] = reflect.TypeOf((*InsufficientPerCpuCapacityFault)(nil)).Elem()\n}\n\ntype InsufficientResourcesFault struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"InsufficientResourcesFault\"] = reflect.TypeOf((*InsufficientResourcesFault)(nil)).Elem()\n}\n\ntype InsufficientResourcesFaultFault BaseInsufficientResourcesFault\n\nfunc init() {\n\tt[\"InsufficientResourcesFaultFault\"] = reflect.TypeOf((*InsufficientResourcesFaultFault)(nil)).Elem()\n}\n\ntype InsufficientStandbyCpuResource struct {\n\tInsufficientStandbyResource\n\n\tAvailable int64 `xml:\"available\"`\n\tRequested int64 `xml:\"requested\"`\n}\n\nfunc init() {\n\tt[\"InsufficientStandbyCpuResource\"] = reflect.TypeOf((*InsufficientStandbyCpuResource)(nil)).Elem()\n}\n\ntype InsufficientStandbyCpuResourceFault InsufficientStandbyCpuResource\n\nfunc init() {\n\tt[\"InsufficientStandbyCpuResourceFault\"] = reflect.TypeOf((*InsufficientStandbyCpuResourceFault)(nil)).Elem()\n}\n\ntype InsufficientStandbyMemoryResource struct {\n\tInsufficientStandbyResource\n\n\tAvailable int64 `xml:\"available\"`\n\tRequested int64 `xml:\"requested\"`\n}\n\nfunc init() {\n\tt[\"InsufficientStandbyMemoryResource\"] = reflect.TypeOf((*InsufficientStandbyMemoryResource)(nil)).Elem()\n}\n\ntype InsufficientStandbyMemoryResourceFault InsufficientStandbyMemoryResource\n\nfunc init() {\n\tt[\"InsufficientStandbyMemoryResourceFault\"] = reflect.TypeOf((*InsufficientStandbyMemoryResourceFault)(nil)).Elem()\n}\n\ntype InsufficientStandbyResource struct {\n\tInsufficientResourcesFault\n}\n\nfunc init() {\n\tt[\"InsufficientStandbyResource\"] = reflect.TypeOf((*InsufficientStandbyResource)(nil)).Elem()\n}\n\ntype InsufficientStandbyResourceFault BaseInsufficientStandbyResource\n\nfunc init() {\n\tt[\"InsufficientStandbyResourceFault\"] = reflect.TypeOf((*InsufficientStandbyResourceFault)(nil)).Elem()\n}\n\ntype InsufficientStorageIops struct {\n\tVimFault\n\n\tUnreservedIops int64  `xml:\"unreservedIops\"`\n\tRequestedIops  int64  `xml:\"requestedIops\"`\n\tDatastoreName  string `xml:\"datastoreName\"`\n}\n\nfunc init() {\n\tt[\"InsufficientStorageIops\"] = reflect.TypeOf((*InsufficientStorageIops)(nil)).Elem()\n}\n\ntype InsufficientStorageIopsFault InsufficientStorageIops\n\nfunc init() {\n\tt[\"InsufficientStorageIopsFault\"] = reflect.TypeOf((*InsufficientStorageIopsFault)(nil)).Elem()\n}\n\ntype InsufficientStorageSpace struct {\n\tInsufficientResourcesFault\n}\n\nfunc init() {\n\tt[\"InsufficientStorageSpace\"] = reflect.TypeOf((*InsufficientStorageSpace)(nil)).Elem()\n}\n\ntype InsufficientStorageSpaceFault InsufficientStorageSpace\n\nfunc init() {\n\tt[\"InsufficientStorageSpaceFault\"] = reflect.TypeOf((*InsufficientStorageSpaceFault)(nil)).Elem()\n}\n\ntype InsufficientVFlashResourcesFault struct {\n\tInsufficientResourcesFault\n\n\tFreeSpaceInMB      int64 `xml:\"freeSpaceInMB,omitempty\"`\n\tFreeSpace          int64 `xml:\"freeSpace\"`\n\tRequestedSpaceInMB int64 `xml:\"requestedSpaceInMB,omitempty\"`\n\tRequestedSpace     int64 `xml:\"requestedSpace\"`\n}\n\nfunc init() {\n\tt[\"InsufficientVFlashResourcesFault\"] = reflect.TypeOf((*InsufficientVFlashResourcesFault)(nil)).Elem()\n}\n\ntype InsufficientVFlashResourcesFaultFault InsufficientVFlashResourcesFault\n\nfunc init() {\n\tt[\"InsufficientVFlashResourcesFaultFault\"] = reflect.TypeOf((*InsufficientVFlashResourcesFaultFault)(nil)).Elem()\n}\n\ntype IntExpression struct {\n\tNegatableExpression\n\n\tValue int32 `xml:\"value,omitempty\"`\n}\n\nfunc init() {\n\tt[\"IntExpression\"] = reflect.TypeOf((*IntExpression)(nil)).Elem()\n}\n\ntype IntOption struct {\n\tOptionType\n\n\tMin          int32 `xml:\"min\"`\n\tMax          int32 `xml:\"max\"`\n\tDefaultValue int32 `xml:\"defaultValue\"`\n}\n\nfunc init() {\n\tt[\"IntOption\"] = reflect.TypeOf((*IntOption)(nil)).Elem()\n}\n\ntype IntPolicy struct {\n\tInheritablePolicy\n\n\tValue int32 `xml:\"value,omitempty\"`\n}\n\nfunc init() {\n\tt[\"IntPolicy\"] = reflect.TypeOf((*IntPolicy)(nil)).Elem()\n}\n\ntype InvalidAffinitySettingFault struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"InvalidAffinitySettingFault\"] = reflect.TypeOf((*InvalidAffinitySettingFault)(nil)).Elem()\n}\n\ntype InvalidAffinitySettingFaultFault InvalidAffinitySettingFault\n\nfunc init() {\n\tt[\"InvalidAffinitySettingFaultFault\"] = reflect.TypeOf((*InvalidAffinitySettingFaultFault)(nil)).Elem()\n}\n\ntype InvalidArgument struct {\n\tRuntimeFault\n\n\tInvalidProperty string `xml:\"invalidProperty,omitempty\"`\n}\n\nfunc init() {\n\tt[\"InvalidArgument\"] = reflect.TypeOf((*InvalidArgument)(nil)).Elem()\n}\n\ntype InvalidArgumentFault BaseInvalidArgument\n\nfunc init() {\n\tt[\"InvalidArgumentFault\"] = reflect.TypeOf((*InvalidArgumentFault)(nil)).Elem()\n}\n\ntype InvalidBmcRole struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"InvalidBmcRole\"] = reflect.TypeOf((*InvalidBmcRole)(nil)).Elem()\n}\n\ntype InvalidBmcRoleFault InvalidBmcRole\n\nfunc init() {\n\tt[\"InvalidBmcRoleFault\"] = reflect.TypeOf((*InvalidBmcRoleFault)(nil)).Elem()\n}\n\ntype InvalidBundle struct {\n\tPlatformConfigFault\n}\n\nfunc init() {\n\tt[\"InvalidBundle\"] = reflect.TypeOf((*InvalidBundle)(nil)).Elem()\n}\n\ntype InvalidBundleFault InvalidBundle\n\nfunc init() {\n\tt[\"InvalidBundleFault\"] = reflect.TypeOf((*InvalidBundleFault)(nil)).Elem()\n}\n\ntype InvalidCAMCertificate struct {\n\tInvalidCAMServer\n}\n\nfunc init() {\n\tt[\"InvalidCAMCertificate\"] = reflect.TypeOf((*InvalidCAMCertificate)(nil)).Elem()\n}\n\ntype InvalidCAMCertificateFault InvalidCAMCertificate\n\nfunc init() {\n\tt[\"InvalidCAMCertificateFault\"] = reflect.TypeOf((*InvalidCAMCertificateFault)(nil)).Elem()\n}\n\ntype InvalidCAMServer struct {\n\tActiveDirectoryFault\n\n\tCamServer string `xml:\"camServer\"`\n}\n\nfunc init() {\n\tt[\"InvalidCAMServer\"] = reflect.TypeOf((*InvalidCAMServer)(nil)).Elem()\n}\n\ntype InvalidCAMServerFault BaseInvalidCAMServer\n\nfunc init() {\n\tt[\"InvalidCAMServerFault\"] = reflect.TypeOf((*InvalidCAMServerFault)(nil)).Elem()\n}\n\ntype InvalidClientCertificate struct {\n\tInvalidLogin\n}\n\nfunc init() {\n\tt[\"InvalidClientCertificate\"] = reflect.TypeOf((*InvalidClientCertificate)(nil)).Elem()\n}\n\ntype InvalidClientCertificateFault InvalidClientCertificate\n\nfunc init() {\n\tt[\"InvalidClientCertificateFault\"] = reflect.TypeOf((*InvalidClientCertificateFault)(nil)).Elem()\n}\n\ntype InvalidCollectorVersion struct {\n\tMethodFault\n}\n\nfunc init() {\n\tt[\"InvalidCollectorVersion\"] = reflect.TypeOf((*InvalidCollectorVersion)(nil)).Elem()\n}\n\ntype InvalidCollectorVersionFault InvalidCollectorVersion\n\nfunc init() {\n\tt[\"InvalidCollectorVersionFault\"] = reflect.TypeOf((*InvalidCollectorVersionFault)(nil)).Elem()\n}\n\ntype InvalidController struct {\n\tInvalidDeviceSpec\n\n\tControllerKey int32 `xml:\"controllerKey\"`\n}\n\nfunc init() {\n\tt[\"InvalidController\"] = reflect.TypeOf((*InvalidController)(nil)).Elem()\n}\n\ntype InvalidControllerFault InvalidController\n\nfunc init() {\n\tt[\"InvalidControllerFault\"] = reflect.TypeOf((*InvalidControllerFault)(nil)).Elem()\n}\n\ntype InvalidDasConfigArgument struct {\n\tInvalidArgument\n\n\tEntry       string `xml:\"entry,omitempty\"`\n\tClusterName string `xml:\"clusterName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"InvalidDasConfigArgument\"] = reflect.TypeOf((*InvalidDasConfigArgument)(nil)).Elem()\n}\n\ntype InvalidDasConfigArgumentFault InvalidDasConfigArgument\n\nfunc init() {\n\tt[\"InvalidDasConfigArgumentFault\"] = reflect.TypeOf((*InvalidDasConfigArgumentFault)(nil)).Elem()\n}\n\ntype InvalidDasRestartPriorityForFtVm struct {\n\tInvalidArgument\n\n\tVm     ManagedObjectReference `xml:\"vm\"`\n\tVmName string                 `xml:\"vmName\"`\n}\n\nfunc init() {\n\tt[\"InvalidDasRestartPriorityForFtVm\"] = reflect.TypeOf((*InvalidDasRestartPriorityForFtVm)(nil)).Elem()\n}\n\ntype InvalidDasRestartPriorityForFtVmFault InvalidDasRestartPriorityForFtVm\n\nfunc init() {\n\tt[\"InvalidDasRestartPriorityForFtVmFault\"] = reflect.TypeOf((*InvalidDasRestartPriorityForFtVmFault)(nil)).Elem()\n}\n\ntype InvalidDatastore struct {\n\tVimFault\n\n\tDatastore *ManagedObjectReference `xml:\"datastore,omitempty\"`\n\tName      string                  `xml:\"name,omitempty\"`\n}\n\nfunc init() {\n\tt[\"InvalidDatastore\"] = reflect.TypeOf((*InvalidDatastore)(nil)).Elem()\n}\n\ntype InvalidDatastoreFault BaseInvalidDatastore\n\nfunc init() {\n\tt[\"InvalidDatastoreFault\"] = reflect.TypeOf((*InvalidDatastoreFault)(nil)).Elem()\n}\n\ntype InvalidDatastorePath struct {\n\tInvalidDatastore\n\n\tDatastorePath string `xml:\"datastorePath\"`\n}\n\nfunc init() {\n\tt[\"InvalidDatastorePath\"] = reflect.TypeOf((*InvalidDatastorePath)(nil)).Elem()\n}\n\ntype InvalidDatastorePathFault InvalidDatastorePath\n\nfunc init() {\n\tt[\"InvalidDatastorePathFault\"] = reflect.TypeOf((*InvalidDatastorePathFault)(nil)).Elem()\n}\n\ntype InvalidDatastoreState struct {\n\tInvalidState\n\n\tDatastoreName string `xml:\"datastoreName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"InvalidDatastoreState\"] = reflect.TypeOf((*InvalidDatastoreState)(nil)).Elem()\n}\n\ntype InvalidDatastoreStateFault InvalidDatastoreState\n\nfunc init() {\n\tt[\"InvalidDatastoreStateFault\"] = reflect.TypeOf((*InvalidDatastoreStateFault)(nil)).Elem()\n}\n\ntype InvalidDeviceBacking struct {\n\tInvalidDeviceSpec\n}\n\nfunc init() {\n\tt[\"InvalidDeviceBacking\"] = reflect.TypeOf((*InvalidDeviceBacking)(nil)).Elem()\n}\n\ntype InvalidDeviceBackingFault InvalidDeviceBacking\n\nfunc init() {\n\tt[\"InvalidDeviceBackingFault\"] = reflect.TypeOf((*InvalidDeviceBackingFault)(nil)).Elem()\n}\n\ntype InvalidDeviceOperation struct {\n\tInvalidDeviceSpec\n\n\tBadOp     VirtualDeviceConfigSpecOperation     `xml:\"badOp,omitempty\"`\n\tBadFileOp VirtualDeviceConfigSpecFileOperation `xml:\"badFileOp,omitempty\"`\n}\n\nfunc init() {\n\tt[\"InvalidDeviceOperation\"] = reflect.TypeOf((*InvalidDeviceOperation)(nil)).Elem()\n}\n\ntype InvalidDeviceOperationFault InvalidDeviceOperation\n\nfunc init() {\n\tt[\"InvalidDeviceOperationFault\"] = reflect.TypeOf((*InvalidDeviceOperationFault)(nil)).Elem()\n}\n\ntype InvalidDeviceSpec struct {\n\tInvalidVmConfig\n\n\tDeviceIndex int32 `xml:\"deviceIndex\"`\n}\n\nfunc init() {\n\tt[\"InvalidDeviceSpec\"] = reflect.TypeOf((*InvalidDeviceSpec)(nil)).Elem()\n}\n\ntype InvalidDeviceSpecFault BaseInvalidDeviceSpec\n\nfunc init() {\n\tt[\"InvalidDeviceSpecFault\"] = reflect.TypeOf((*InvalidDeviceSpecFault)(nil)).Elem()\n}\n\ntype InvalidDiskFormat struct {\n\tInvalidFormat\n}\n\nfunc init() {\n\tt[\"InvalidDiskFormat\"] = reflect.TypeOf((*InvalidDiskFormat)(nil)).Elem()\n}\n\ntype InvalidDiskFormatFault InvalidDiskFormat\n\nfunc init() {\n\tt[\"InvalidDiskFormatFault\"] = reflect.TypeOf((*InvalidDiskFormatFault)(nil)).Elem()\n}\n\ntype InvalidDrsBehaviorForFtVm struct {\n\tInvalidArgument\n\n\tVm     ManagedObjectReference `xml:\"vm\"`\n\tVmName string                 `xml:\"vmName\"`\n}\n\nfunc init() {\n\tt[\"InvalidDrsBehaviorForFtVm\"] = reflect.TypeOf((*InvalidDrsBehaviorForFtVm)(nil)).Elem()\n}\n\ntype InvalidDrsBehaviorForFtVmFault InvalidDrsBehaviorForFtVm\n\nfunc init() {\n\tt[\"InvalidDrsBehaviorForFtVmFault\"] = reflect.TypeOf((*InvalidDrsBehaviorForFtVmFault)(nil)).Elem()\n}\n\ntype InvalidEditionEvent struct {\n\tLicenseEvent\n\n\tFeature string `xml:\"feature\"`\n}\n\nfunc init() {\n\tt[\"InvalidEditionEvent\"] = reflect.TypeOf((*InvalidEditionEvent)(nil)).Elem()\n}\n\ntype InvalidEditionLicense struct {\n\tNotEnoughLicenses\n\n\tFeature string `xml:\"feature\"`\n}\n\nfunc init() {\n\tt[\"InvalidEditionLicense\"] = reflect.TypeOf((*InvalidEditionLicense)(nil)).Elem()\n}\n\ntype InvalidEditionLicenseFault InvalidEditionLicense\n\nfunc init() {\n\tt[\"InvalidEditionLicenseFault\"] = reflect.TypeOf((*InvalidEditionLicenseFault)(nil)).Elem()\n}\n\ntype InvalidEvent struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"InvalidEvent\"] = reflect.TypeOf((*InvalidEvent)(nil)).Elem()\n}\n\ntype InvalidEventFault InvalidEvent\n\nfunc init() {\n\tt[\"InvalidEventFault\"] = reflect.TypeOf((*InvalidEventFault)(nil)).Elem()\n}\n\ntype InvalidFolder struct {\n\tVimFault\n\n\tTarget ManagedObjectReference `xml:\"target\"`\n}\n\nfunc init() {\n\tt[\"InvalidFolder\"] = reflect.TypeOf((*InvalidFolder)(nil)).Elem()\n}\n\ntype InvalidFolderFault BaseInvalidFolder\n\nfunc init() {\n\tt[\"InvalidFolderFault\"] = reflect.TypeOf((*InvalidFolderFault)(nil)).Elem()\n}\n\ntype InvalidFormat struct {\n\tVmConfigFault\n}\n\nfunc init() {\n\tt[\"InvalidFormat\"] = reflect.TypeOf((*InvalidFormat)(nil)).Elem()\n}\n\ntype InvalidFormatFault BaseInvalidFormat\n\nfunc init() {\n\tt[\"InvalidFormatFault\"] = reflect.TypeOf((*InvalidFormatFault)(nil)).Elem()\n}\n\ntype InvalidGuestLogin struct {\n\tGuestOperationsFault\n}\n\nfunc init() {\n\tt[\"InvalidGuestLogin\"] = reflect.TypeOf((*InvalidGuestLogin)(nil)).Elem()\n}\n\ntype InvalidGuestLoginFault InvalidGuestLogin\n\nfunc init() {\n\tt[\"InvalidGuestLoginFault\"] = reflect.TypeOf((*InvalidGuestLoginFault)(nil)).Elem()\n}\n\ntype InvalidHostConnectionState struct {\n\tInvalidHostState\n}\n\nfunc init() {\n\tt[\"InvalidHostConnectionState\"] = reflect.TypeOf((*InvalidHostConnectionState)(nil)).Elem()\n}\n\ntype InvalidHostConnectionStateFault InvalidHostConnectionState\n\nfunc init() {\n\tt[\"InvalidHostConnectionStateFault\"] = reflect.TypeOf((*InvalidHostConnectionStateFault)(nil)).Elem()\n}\n\ntype InvalidHostName struct {\n\tHostConfigFault\n}\n\nfunc init() {\n\tt[\"InvalidHostName\"] = reflect.TypeOf((*InvalidHostName)(nil)).Elem()\n}\n\ntype InvalidHostNameFault InvalidHostName\n\nfunc init() {\n\tt[\"InvalidHostNameFault\"] = reflect.TypeOf((*InvalidHostNameFault)(nil)).Elem()\n}\n\ntype InvalidHostState struct {\n\tInvalidState\n\n\tHost *ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"InvalidHostState\"] = reflect.TypeOf((*InvalidHostState)(nil)).Elem()\n}\n\ntype InvalidHostStateFault BaseInvalidHostState\n\nfunc init() {\n\tt[\"InvalidHostStateFault\"] = reflect.TypeOf((*InvalidHostStateFault)(nil)).Elem()\n}\n\ntype InvalidIndexArgument struct {\n\tInvalidArgument\n\n\tKey string `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"InvalidIndexArgument\"] = reflect.TypeOf((*InvalidIndexArgument)(nil)).Elem()\n}\n\ntype InvalidIndexArgumentFault InvalidIndexArgument\n\nfunc init() {\n\tt[\"InvalidIndexArgumentFault\"] = reflect.TypeOf((*InvalidIndexArgumentFault)(nil)).Elem()\n}\n\ntype InvalidIpfixConfig struct {\n\tDvsFault\n\n\tProperty string `xml:\"property,omitempty\"`\n}\n\nfunc init() {\n\tt[\"InvalidIpfixConfig\"] = reflect.TypeOf((*InvalidIpfixConfig)(nil)).Elem()\n}\n\ntype InvalidIpfixConfigFault InvalidIpfixConfig\n\nfunc init() {\n\tt[\"InvalidIpfixConfigFault\"] = reflect.TypeOf((*InvalidIpfixConfigFault)(nil)).Elem()\n}\n\ntype InvalidIpmiLoginInfo struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"InvalidIpmiLoginInfo\"] = reflect.TypeOf((*InvalidIpmiLoginInfo)(nil)).Elem()\n}\n\ntype InvalidIpmiLoginInfoFault InvalidIpmiLoginInfo\n\nfunc init() {\n\tt[\"InvalidIpmiLoginInfoFault\"] = reflect.TypeOf((*InvalidIpmiLoginInfoFault)(nil)).Elem()\n}\n\ntype InvalidIpmiMacAddress struct {\n\tVimFault\n\n\tUserProvidedMacAddress string `xml:\"userProvidedMacAddress\"`\n\tObservedMacAddress     string `xml:\"observedMacAddress\"`\n}\n\nfunc init() {\n\tt[\"InvalidIpmiMacAddress\"] = reflect.TypeOf((*InvalidIpmiMacAddress)(nil)).Elem()\n}\n\ntype InvalidIpmiMacAddressFault InvalidIpmiMacAddress\n\nfunc init() {\n\tt[\"InvalidIpmiMacAddressFault\"] = reflect.TypeOf((*InvalidIpmiMacAddressFault)(nil)).Elem()\n}\n\ntype InvalidLicense struct {\n\tVimFault\n\n\tLicenseContent string `xml:\"licenseContent\"`\n}\n\nfunc init() {\n\tt[\"InvalidLicense\"] = reflect.TypeOf((*InvalidLicense)(nil)).Elem()\n}\n\ntype InvalidLicenseFault InvalidLicense\n\nfunc init() {\n\tt[\"InvalidLicenseFault\"] = reflect.TypeOf((*InvalidLicenseFault)(nil)).Elem()\n}\n\ntype InvalidLocale struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"InvalidLocale\"] = reflect.TypeOf((*InvalidLocale)(nil)).Elem()\n}\n\ntype InvalidLocaleFault InvalidLocale\n\nfunc init() {\n\tt[\"InvalidLocaleFault\"] = reflect.TypeOf((*InvalidLocaleFault)(nil)).Elem()\n}\n\ntype InvalidLogin struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"InvalidLogin\"] = reflect.TypeOf((*InvalidLogin)(nil)).Elem()\n}\n\ntype InvalidLoginFault BaseInvalidLogin\n\nfunc init() {\n\tt[\"InvalidLoginFault\"] = reflect.TypeOf((*InvalidLoginFault)(nil)).Elem()\n}\n\ntype InvalidName struct {\n\tVimFault\n\n\tName   string                  `xml:\"name\"`\n\tEntity *ManagedObjectReference `xml:\"entity,omitempty\"`\n}\n\nfunc init() {\n\tt[\"InvalidName\"] = reflect.TypeOf((*InvalidName)(nil)).Elem()\n}\n\ntype InvalidNameFault InvalidName\n\nfunc init() {\n\tt[\"InvalidNameFault\"] = reflect.TypeOf((*InvalidNameFault)(nil)).Elem()\n}\n\ntype InvalidNasCredentials struct {\n\tNasConfigFault\n\n\tUserName string `xml:\"userName\"`\n}\n\nfunc init() {\n\tt[\"InvalidNasCredentials\"] = reflect.TypeOf((*InvalidNasCredentials)(nil)).Elem()\n}\n\ntype InvalidNasCredentialsFault InvalidNasCredentials\n\nfunc init() {\n\tt[\"InvalidNasCredentialsFault\"] = reflect.TypeOf((*InvalidNasCredentialsFault)(nil)).Elem()\n}\n\ntype InvalidNetworkInType struct {\n\tVAppPropertyFault\n}\n\nfunc init() {\n\tt[\"InvalidNetworkInType\"] = reflect.TypeOf((*InvalidNetworkInType)(nil)).Elem()\n}\n\ntype InvalidNetworkInTypeFault InvalidNetworkInType\n\nfunc init() {\n\tt[\"InvalidNetworkInTypeFault\"] = reflect.TypeOf((*InvalidNetworkInTypeFault)(nil)).Elem()\n}\n\ntype InvalidNetworkResource struct {\n\tNasConfigFault\n\n\tRemoteHost string `xml:\"remoteHost\"`\n\tRemotePath string `xml:\"remotePath\"`\n}\n\nfunc init() {\n\tt[\"InvalidNetworkResource\"] = reflect.TypeOf((*InvalidNetworkResource)(nil)).Elem()\n}\n\ntype InvalidNetworkResourceFault InvalidNetworkResource\n\nfunc init() {\n\tt[\"InvalidNetworkResourceFault\"] = reflect.TypeOf((*InvalidNetworkResourceFault)(nil)).Elem()\n}\n\ntype InvalidOperationOnSecondaryVm struct {\n\tVmFaultToleranceIssue\n\n\tInstanceUuid string `xml:\"instanceUuid,omitempty\"`\n}\n\nfunc init() {\n\tt[\"InvalidOperationOnSecondaryVm\"] = reflect.TypeOf((*InvalidOperationOnSecondaryVm)(nil)).Elem()\n}\n\ntype InvalidOperationOnSecondaryVmFault InvalidOperationOnSecondaryVm\n\nfunc init() {\n\tt[\"InvalidOperationOnSecondaryVmFault\"] = reflect.TypeOf((*InvalidOperationOnSecondaryVmFault)(nil)).Elem()\n}\n\ntype InvalidPowerState struct {\n\tInvalidState\n\n\tRequestedState VirtualMachinePowerState `xml:\"requestedState,omitempty\"`\n\tExistingState  VirtualMachinePowerState `xml:\"existingState\"`\n}\n\nfunc init() {\n\tt[\"InvalidPowerState\"] = reflect.TypeOf((*InvalidPowerState)(nil)).Elem()\n}\n\ntype InvalidPowerStateFault InvalidPowerState\n\nfunc init() {\n\tt[\"InvalidPowerStateFault\"] = reflect.TypeOf((*InvalidPowerStateFault)(nil)).Elem()\n}\n\ntype InvalidPrivilege struct {\n\tVimFault\n\n\tPrivilege string `xml:\"privilege\"`\n}\n\nfunc init() {\n\tt[\"InvalidPrivilege\"] = reflect.TypeOf((*InvalidPrivilege)(nil)).Elem()\n}\n\ntype InvalidPrivilegeFault InvalidPrivilege\n\nfunc init() {\n\tt[\"InvalidPrivilegeFault\"] = reflect.TypeOf((*InvalidPrivilegeFault)(nil)).Elem()\n}\n\ntype InvalidProfileReferenceHost struct {\n\tRuntimeFault\n\n\tReason      string                  `xml:\"reason,omitempty\"`\n\tHost        *ManagedObjectReference `xml:\"host,omitempty\"`\n\tProfile     *ManagedObjectReference `xml:\"profile,omitempty\"`\n\tProfileName string                  `xml:\"profileName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"InvalidProfileReferenceHost\"] = reflect.TypeOf((*InvalidProfileReferenceHost)(nil)).Elem()\n}\n\ntype InvalidProfileReferenceHostFault InvalidProfileReferenceHost\n\nfunc init() {\n\tt[\"InvalidProfileReferenceHostFault\"] = reflect.TypeOf((*InvalidProfileReferenceHostFault)(nil)).Elem()\n}\n\ntype InvalidProperty struct {\n\tMethodFault\n\n\tName string `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"InvalidProperty\"] = reflect.TypeOf((*InvalidProperty)(nil)).Elem()\n}\n\ntype InvalidPropertyFault InvalidProperty\n\nfunc init() {\n\tt[\"InvalidPropertyFault\"] = reflect.TypeOf((*InvalidPropertyFault)(nil)).Elem()\n}\n\ntype InvalidPropertyType struct {\n\tVAppPropertyFault\n}\n\nfunc init() {\n\tt[\"InvalidPropertyType\"] = reflect.TypeOf((*InvalidPropertyType)(nil)).Elem()\n}\n\ntype InvalidPropertyTypeFault InvalidPropertyType\n\nfunc init() {\n\tt[\"InvalidPropertyTypeFault\"] = reflect.TypeOf((*InvalidPropertyTypeFault)(nil)).Elem()\n}\n\ntype InvalidPropertyValue struct {\n\tVAppPropertyFault\n}\n\nfunc init() {\n\tt[\"InvalidPropertyValue\"] = reflect.TypeOf((*InvalidPropertyValue)(nil)).Elem()\n}\n\ntype InvalidPropertyValueFault BaseInvalidPropertyValue\n\nfunc init() {\n\tt[\"InvalidPropertyValueFault\"] = reflect.TypeOf((*InvalidPropertyValueFault)(nil)).Elem()\n}\n\ntype InvalidRequest struct {\n\tRuntimeFault\n}\n\nfunc init() {\n\tt[\"InvalidRequest\"] = reflect.TypeOf((*InvalidRequest)(nil)).Elem()\n}\n\ntype InvalidRequestFault BaseInvalidRequest\n\nfunc init() {\n\tt[\"InvalidRequestFault\"] = reflect.TypeOf((*InvalidRequestFault)(nil)).Elem()\n}\n\ntype InvalidResourcePoolStructureFault struct {\n\tInsufficientResourcesFault\n}\n\nfunc init() {\n\tt[\"InvalidResourcePoolStructureFault\"] = reflect.TypeOf((*InvalidResourcePoolStructureFault)(nil)).Elem()\n}\n\ntype InvalidResourcePoolStructureFaultFault InvalidResourcePoolStructureFault\n\nfunc init() {\n\tt[\"InvalidResourcePoolStructureFaultFault\"] = reflect.TypeOf((*InvalidResourcePoolStructureFaultFault)(nil)).Elem()\n}\n\ntype InvalidSnapshotFormat struct {\n\tInvalidFormat\n}\n\nfunc init() {\n\tt[\"InvalidSnapshotFormat\"] = reflect.TypeOf((*InvalidSnapshotFormat)(nil)).Elem()\n}\n\ntype InvalidSnapshotFormatFault InvalidSnapshotFormat\n\nfunc init() {\n\tt[\"InvalidSnapshotFormatFault\"] = reflect.TypeOf((*InvalidSnapshotFormatFault)(nil)).Elem()\n}\n\ntype InvalidState struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"InvalidState\"] = reflect.TypeOf((*InvalidState)(nil)).Elem()\n}\n\ntype InvalidStateFault BaseInvalidState\n\nfunc init() {\n\tt[\"InvalidStateFault\"] = reflect.TypeOf((*InvalidStateFault)(nil)).Elem()\n}\n\ntype InvalidType struct {\n\tInvalidRequest\n\n\tArgument string `xml:\"argument,omitempty\"`\n}\n\nfunc init() {\n\tt[\"InvalidType\"] = reflect.TypeOf((*InvalidType)(nil)).Elem()\n}\n\ntype InvalidTypeFault InvalidType\n\nfunc init() {\n\tt[\"InvalidTypeFault\"] = reflect.TypeOf((*InvalidTypeFault)(nil)).Elem()\n}\n\ntype InvalidVmConfig struct {\n\tVmConfigFault\n\n\tProperty string `xml:\"property,omitempty\"`\n}\n\nfunc init() {\n\tt[\"InvalidVmConfig\"] = reflect.TypeOf((*InvalidVmConfig)(nil)).Elem()\n}\n\ntype InvalidVmConfigFault BaseInvalidVmConfig\n\nfunc init() {\n\tt[\"InvalidVmConfigFault\"] = reflect.TypeOf((*InvalidVmConfigFault)(nil)).Elem()\n}\n\ntype InvalidVmState struct {\n\tInvalidState\n\n\tVm ManagedObjectReference `xml:\"vm\"`\n}\n\nfunc init() {\n\tt[\"InvalidVmState\"] = reflect.TypeOf((*InvalidVmState)(nil)).Elem()\n}\n\ntype InvalidVmStateFault InvalidVmState\n\nfunc init() {\n\tt[\"InvalidVmStateFault\"] = reflect.TypeOf((*InvalidVmStateFault)(nil)).Elem()\n}\n\ntype InventoryDescription struct {\n\tDynamicData\n\n\tNumHosts           int32 `xml:\"numHosts\"`\n\tNumVirtualMachines int32 `xml:\"numVirtualMachines\"`\n\tNumResourcePools   int32 `xml:\"numResourcePools,omitempty\"`\n\tNumClusters        int32 `xml:\"numClusters,omitempty\"`\n\tNumCpuDev          int32 `xml:\"numCpuDev,omitempty\"`\n\tNumNetDev          int32 `xml:\"numNetDev,omitempty\"`\n\tNumDiskDev         int32 `xml:\"numDiskDev,omitempty\"`\n\tNumvCpuDev         int32 `xml:\"numvCpuDev,omitempty\"`\n\tNumvNetDev         int32 `xml:\"numvNetDev,omitempty\"`\n\tNumvDiskDev        int32 `xml:\"numvDiskDev,omitempty\"`\n}\n\nfunc init() {\n\tt[\"InventoryDescription\"] = reflect.TypeOf((*InventoryDescription)(nil)).Elem()\n}\n\ntype InventoryHasStandardAloneHosts struct {\n\tNotEnoughLicenses\n\n\tHosts []string `xml:\"hosts\"`\n}\n\nfunc init() {\n\tt[\"InventoryHasStandardAloneHosts\"] = reflect.TypeOf((*InventoryHasStandardAloneHosts)(nil)).Elem()\n}\n\ntype InventoryHasStandardAloneHostsFault InventoryHasStandardAloneHosts\n\nfunc init() {\n\tt[\"InventoryHasStandardAloneHostsFault\"] = reflect.TypeOf((*InventoryHasStandardAloneHostsFault)(nil)).Elem()\n}\n\ntype IoFilterHostIssue struct {\n\tDynamicData\n\n\tHost  ManagedObjectReference `xml:\"host\"`\n\tIssue []LocalizedMethodFault `xml:\"issue\"`\n}\n\nfunc init() {\n\tt[\"IoFilterHostIssue\"] = reflect.TypeOf((*IoFilterHostIssue)(nil)).Elem()\n}\n\ntype IoFilterInfo struct {\n\tDynamicData\n\n\tId          string `xml:\"id\"`\n\tName        string `xml:\"name\"`\n\tVendor      string `xml:\"vendor\"`\n\tVersion     string `xml:\"version\"`\n\tType        string `xml:\"type,omitempty\"`\n\tSummary     string `xml:\"summary,omitempty\"`\n\tReleaseDate string `xml:\"releaseDate,omitempty\"`\n}\n\nfunc init() {\n\tt[\"IoFilterInfo\"] = reflect.TypeOf((*IoFilterInfo)(nil)).Elem()\n}\n\ntype IoFilterQueryIssueResult struct {\n\tDynamicData\n\n\tOpType    string              `xml:\"opType\"`\n\tHostIssue []IoFilterHostIssue `xml:\"hostIssue,omitempty\"`\n}\n\nfunc init() {\n\tt[\"IoFilterQueryIssueResult\"] = reflect.TypeOf((*IoFilterQueryIssueResult)(nil)).Elem()\n}\n\ntype IpAddress struct {\n\tNegatableExpression\n}\n\nfunc init() {\n\tt[\"IpAddress\"] = reflect.TypeOf((*IpAddress)(nil)).Elem()\n}\n\ntype IpAddressProfile struct {\n\tApplyProfile\n}\n\nfunc init() {\n\tt[\"IpAddressProfile\"] = reflect.TypeOf((*IpAddressProfile)(nil)).Elem()\n}\n\ntype IpHostnameGeneratorError struct {\n\tCustomizationFault\n}\n\nfunc init() {\n\tt[\"IpHostnameGeneratorError\"] = reflect.TypeOf((*IpHostnameGeneratorError)(nil)).Elem()\n}\n\ntype IpHostnameGeneratorErrorFault IpHostnameGeneratorError\n\nfunc init() {\n\tt[\"IpHostnameGeneratorErrorFault\"] = reflect.TypeOf((*IpHostnameGeneratorErrorFault)(nil)).Elem()\n}\n\ntype IpPool struct {\n\tDynamicData\n\n\tId                     int32                   `xml:\"id,omitempty\"`\n\tName                   string                  `xml:\"name,omitempty\"`\n\tIpv4Config             *IpPoolIpPoolConfigInfo `xml:\"ipv4Config,omitempty\"`\n\tIpv6Config             *IpPoolIpPoolConfigInfo `xml:\"ipv6Config,omitempty\"`\n\tDnsDomain              string                  `xml:\"dnsDomain,omitempty\"`\n\tDnsSearchPath          string                  `xml:\"dnsSearchPath,omitempty\"`\n\tHostPrefix             string                  `xml:\"hostPrefix,omitempty\"`\n\tHttpProxy              string                  `xml:\"httpProxy,omitempty\"`\n\tNetworkAssociation     []IpPoolAssociation     `xml:\"networkAssociation,omitempty\"`\n\tAvailableIpv4Addresses int32                   `xml:\"availableIpv4Addresses,omitempty\"`\n\tAvailableIpv6Addresses int32                   `xml:\"availableIpv6Addresses,omitempty\"`\n\tAllocatedIpv4Addresses int32                   `xml:\"allocatedIpv4Addresses,omitempty\"`\n\tAllocatedIpv6Addresses int32                   `xml:\"allocatedIpv6Addresses,omitempty\"`\n}\n\nfunc init() {\n\tt[\"IpPool\"] = reflect.TypeOf((*IpPool)(nil)).Elem()\n}\n\ntype IpPoolAssociation struct {\n\tDynamicData\n\n\tNetwork     *ManagedObjectReference `xml:\"network,omitempty\"`\n\tNetworkName string                  `xml:\"networkName\"`\n}\n\nfunc init() {\n\tt[\"IpPoolAssociation\"] = reflect.TypeOf((*IpPoolAssociation)(nil)).Elem()\n}\n\ntype IpPoolIpPoolConfigInfo struct {\n\tDynamicData\n\n\tSubnetAddress       string   `xml:\"subnetAddress,omitempty\"`\n\tNetmask             string   `xml:\"netmask,omitempty\"`\n\tGateway             string   `xml:\"gateway,omitempty\"`\n\tRange               string   `xml:\"range,omitempty\"`\n\tDns                 []string `xml:\"dns,omitempty\"`\n\tDhcpServerAvailable *bool    `xml:\"dhcpServerAvailable\"`\n\tIpPoolEnabled       *bool    `xml:\"ipPoolEnabled\"`\n}\n\nfunc init() {\n\tt[\"IpPoolIpPoolConfigInfo\"] = reflect.TypeOf((*IpPoolIpPoolConfigInfo)(nil)).Elem()\n}\n\ntype IpPoolManagerIpAllocation struct {\n\tDynamicData\n\n\tIpAddress    string `xml:\"ipAddress\"`\n\tAllocationId string `xml:\"allocationId\"`\n}\n\nfunc init() {\n\tt[\"IpPoolManagerIpAllocation\"] = reflect.TypeOf((*IpPoolManagerIpAllocation)(nil)).Elem()\n}\n\ntype IpRange struct {\n\tIpAddress\n\n\tAddressPrefix string `xml:\"addressPrefix\"`\n\tPrefixLength  int32  `xml:\"prefixLength,omitempty\"`\n}\n\nfunc init() {\n\tt[\"IpRange\"] = reflect.TypeOf((*IpRange)(nil)).Elem()\n}\n\ntype IpRouteProfile struct {\n\tApplyProfile\n\n\tStaticRoute []StaticRouteProfile `xml:\"staticRoute,omitempty\"`\n}\n\nfunc init() {\n\tt[\"IpRouteProfile\"] = reflect.TypeOf((*IpRouteProfile)(nil)).Elem()\n}\n\ntype IsSharedGraphicsActive IsSharedGraphicsActiveRequestType\n\nfunc init() {\n\tt[\"IsSharedGraphicsActive\"] = reflect.TypeOf((*IsSharedGraphicsActive)(nil)).Elem()\n}\n\ntype IsSharedGraphicsActiveRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"IsSharedGraphicsActiveRequestType\"] = reflect.TypeOf((*IsSharedGraphicsActiveRequestType)(nil)).Elem()\n}\n\ntype IsSharedGraphicsActiveResponse struct {\n\tReturnval bool `xml:\"returnval\"`\n}\n\ntype IscsiDependencyEntity struct {\n\tDynamicData\n\n\tPnicDevice string `xml:\"pnicDevice\"`\n\tVnicDevice string `xml:\"vnicDevice\"`\n\tVmhbaName  string `xml:\"vmhbaName\"`\n}\n\nfunc init() {\n\tt[\"IscsiDependencyEntity\"] = reflect.TypeOf((*IscsiDependencyEntity)(nil)).Elem()\n}\n\ntype IscsiFault struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"IscsiFault\"] = reflect.TypeOf((*IscsiFault)(nil)).Elem()\n}\n\ntype IscsiFaultFault BaseIscsiFault\n\nfunc init() {\n\tt[\"IscsiFaultFault\"] = reflect.TypeOf((*IscsiFaultFault)(nil)).Elem()\n}\n\ntype IscsiFaultInvalidVnic struct {\n\tIscsiFault\n\n\tVnicDevice string `xml:\"vnicDevice\"`\n}\n\nfunc init() {\n\tt[\"IscsiFaultInvalidVnic\"] = reflect.TypeOf((*IscsiFaultInvalidVnic)(nil)).Elem()\n}\n\ntype IscsiFaultInvalidVnicFault IscsiFaultInvalidVnic\n\nfunc init() {\n\tt[\"IscsiFaultInvalidVnicFault\"] = reflect.TypeOf((*IscsiFaultInvalidVnicFault)(nil)).Elem()\n}\n\ntype IscsiFaultPnicInUse struct {\n\tIscsiFault\n\n\tPnicDevice string `xml:\"pnicDevice\"`\n}\n\nfunc init() {\n\tt[\"IscsiFaultPnicInUse\"] = reflect.TypeOf((*IscsiFaultPnicInUse)(nil)).Elem()\n}\n\ntype IscsiFaultPnicInUseFault IscsiFaultPnicInUse\n\nfunc init() {\n\tt[\"IscsiFaultPnicInUseFault\"] = reflect.TypeOf((*IscsiFaultPnicInUseFault)(nil)).Elem()\n}\n\ntype IscsiFaultVnicAlreadyBound struct {\n\tIscsiFault\n\n\tVnicDevice string `xml:\"vnicDevice\"`\n}\n\nfunc init() {\n\tt[\"IscsiFaultVnicAlreadyBound\"] = reflect.TypeOf((*IscsiFaultVnicAlreadyBound)(nil)).Elem()\n}\n\ntype IscsiFaultVnicAlreadyBoundFault IscsiFaultVnicAlreadyBound\n\nfunc init() {\n\tt[\"IscsiFaultVnicAlreadyBoundFault\"] = reflect.TypeOf((*IscsiFaultVnicAlreadyBoundFault)(nil)).Elem()\n}\n\ntype IscsiFaultVnicHasActivePaths struct {\n\tIscsiFault\n\n\tVnicDevice string `xml:\"vnicDevice\"`\n}\n\nfunc init() {\n\tt[\"IscsiFaultVnicHasActivePaths\"] = reflect.TypeOf((*IscsiFaultVnicHasActivePaths)(nil)).Elem()\n}\n\ntype IscsiFaultVnicHasActivePathsFault IscsiFaultVnicHasActivePaths\n\nfunc init() {\n\tt[\"IscsiFaultVnicHasActivePathsFault\"] = reflect.TypeOf((*IscsiFaultVnicHasActivePathsFault)(nil)).Elem()\n}\n\ntype IscsiFaultVnicHasMultipleUplinks struct {\n\tIscsiFault\n\n\tVnicDevice string `xml:\"vnicDevice\"`\n}\n\nfunc init() {\n\tt[\"IscsiFaultVnicHasMultipleUplinks\"] = reflect.TypeOf((*IscsiFaultVnicHasMultipleUplinks)(nil)).Elem()\n}\n\ntype IscsiFaultVnicHasMultipleUplinksFault IscsiFaultVnicHasMultipleUplinks\n\nfunc init() {\n\tt[\"IscsiFaultVnicHasMultipleUplinksFault\"] = reflect.TypeOf((*IscsiFaultVnicHasMultipleUplinksFault)(nil)).Elem()\n}\n\ntype IscsiFaultVnicHasNoUplinks struct {\n\tIscsiFault\n\n\tVnicDevice string `xml:\"vnicDevice\"`\n}\n\nfunc init() {\n\tt[\"IscsiFaultVnicHasNoUplinks\"] = reflect.TypeOf((*IscsiFaultVnicHasNoUplinks)(nil)).Elem()\n}\n\ntype IscsiFaultVnicHasNoUplinksFault IscsiFaultVnicHasNoUplinks\n\nfunc init() {\n\tt[\"IscsiFaultVnicHasNoUplinksFault\"] = reflect.TypeOf((*IscsiFaultVnicHasNoUplinksFault)(nil)).Elem()\n}\n\ntype IscsiFaultVnicHasWrongUplink struct {\n\tIscsiFault\n\n\tVnicDevice string `xml:\"vnicDevice\"`\n}\n\nfunc init() {\n\tt[\"IscsiFaultVnicHasWrongUplink\"] = reflect.TypeOf((*IscsiFaultVnicHasWrongUplink)(nil)).Elem()\n}\n\ntype IscsiFaultVnicHasWrongUplinkFault IscsiFaultVnicHasWrongUplink\n\nfunc init() {\n\tt[\"IscsiFaultVnicHasWrongUplinkFault\"] = reflect.TypeOf((*IscsiFaultVnicHasWrongUplinkFault)(nil)).Elem()\n}\n\ntype IscsiFaultVnicInUse struct {\n\tIscsiFault\n\n\tVnicDevice string `xml:\"vnicDevice\"`\n}\n\nfunc init() {\n\tt[\"IscsiFaultVnicInUse\"] = reflect.TypeOf((*IscsiFaultVnicInUse)(nil)).Elem()\n}\n\ntype IscsiFaultVnicInUseFault IscsiFaultVnicInUse\n\nfunc init() {\n\tt[\"IscsiFaultVnicInUseFault\"] = reflect.TypeOf((*IscsiFaultVnicInUseFault)(nil)).Elem()\n}\n\ntype IscsiFaultVnicIsLastPath struct {\n\tIscsiFault\n\n\tVnicDevice string `xml:\"vnicDevice\"`\n}\n\nfunc init() {\n\tt[\"IscsiFaultVnicIsLastPath\"] = reflect.TypeOf((*IscsiFaultVnicIsLastPath)(nil)).Elem()\n}\n\ntype IscsiFaultVnicIsLastPathFault IscsiFaultVnicIsLastPath\n\nfunc init() {\n\tt[\"IscsiFaultVnicIsLastPathFault\"] = reflect.TypeOf((*IscsiFaultVnicIsLastPathFault)(nil)).Elem()\n}\n\ntype IscsiFaultVnicNotBound struct {\n\tIscsiFault\n\n\tVnicDevice string `xml:\"vnicDevice\"`\n}\n\nfunc init() {\n\tt[\"IscsiFaultVnicNotBound\"] = reflect.TypeOf((*IscsiFaultVnicNotBound)(nil)).Elem()\n}\n\ntype IscsiFaultVnicNotBoundFault IscsiFaultVnicNotBound\n\nfunc init() {\n\tt[\"IscsiFaultVnicNotBoundFault\"] = reflect.TypeOf((*IscsiFaultVnicNotBoundFault)(nil)).Elem()\n}\n\ntype IscsiFaultVnicNotFound struct {\n\tIscsiFault\n\n\tVnicDevice string `xml:\"vnicDevice\"`\n}\n\nfunc init() {\n\tt[\"IscsiFaultVnicNotFound\"] = reflect.TypeOf((*IscsiFaultVnicNotFound)(nil)).Elem()\n}\n\ntype IscsiFaultVnicNotFoundFault IscsiFaultVnicNotFound\n\nfunc init() {\n\tt[\"IscsiFaultVnicNotFoundFault\"] = reflect.TypeOf((*IscsiFaultVnicNotFoundFault)(nil)).Elem()\n}\n\ntype IscsiMigrationDependency struct {\n\tDynamicData\n\n\tMigrationAllowed bool                    `xml:\"migrationAllowed\"`\n\tDisallowReason   *IscsiStatus            `xml:\"disallowReason,omitempty\"`\n\tDependency       []IscsiDependencyEntity `xml:\"dependency,omitempty\"`\n}\n\nfunc init() {\n\tt[\"IscsiMigrationDependency\"] = reflect.TypeOf((*IscsiMigrationDependency)(nil)).Elem()\n}\n\ntype IscsiPortInfo struct {\n\tDynamicData\n\n\tVnicDevice        string          `xml:\"vnicDevice,omitempty\"`\n\tVnic              *HostVirtualNic `xml:\"vnic,omitempty\"`\n\tPnicDevice        string          `xml:\"pnicDevice,omitempty\"`\n\tPnic              *PhysicalNic    `xml:\"pnic,omitempty\"`\n\tSwitchName        string          `xml:\"switchName,omitempty\"`\n\tSwitchUuid        string          `xml:\"switchUuid,omitempty\"`\n\tPortgroupName     string          `xml:\"portgroupName,omitempty\"`\n\tPortgroupKey      string          `xml:\"portgroupKey,omitempty\"`\n\tPortKey           string          `xml:\"portKey,omitempty\"`\n\tOpaqueNetworkId   string          `xml:\"opaqueNetworkId,omitempty\"`\n\tOpaqueNetworkType string          `xml:\"opaqueNetworkType,omitempty\"`\n\tOpaqueNetworkName string          `xml:\"opaqueNetworkName,omitempty\"`\n\tExternalId        string          `xml:\"externalId,omitempty\"`\n\tComplianceStatus  *IscsiStatus    `xml:\"complianceStatus,omitempty\"`\n\tPathStatus        string          `xml:\"pathStatus,omitempty\"`\n}\n\nfunc init() {\n\tt[\"IscsiPortInfo\"] = reflect.TypeOf((*IscsiPortInfo)(nil)).Elem()\n}\n\ntype IscsiStatus struct {\n\tDynamicData\n\n\tReason []LocalizedMethodFault `xml:\"reason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"IscsiStatus\"] = reflect.TypeOf((*IscsiStatus)(nil)).Elem()\n}\n\ntype IsoImageFileInfo struct {\n\tFileInfo\n}\n\nfunc init() {\n\tt[\"IsoImageFileInfo\"] = reflect.TypeOf((*IsoImageFileInfo)(nil)).Elem()\n}\n\ntype IsoImageFileQuery struct {\n\tFileQuery\n}\n\nfunc init() {\n\tt[\"IsoImageFileQuery\"] = reflect.TypeOf((*IsoImageFileQuery)(nil)).Elem()\n}\n\ntype JoinDomainRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tDomainName string                 `xml:\"domainName\"`\n\tUserName   string                 `xml:\"userName\"`\n\tPassword   string                 `xml:\"password\"`\n}\n\nfunc init() {\n\tt[\"JoinDomainRequestType\"] = reflect.TypeOf((*JoinDomainRequestType)(nil)).Elem()\n}\n\ntype JoinDomainWithCAMRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tDomainName string                 `xml:\"domainName\"`\n\tCamServer  string                 `xml:\"camServer\"`\n}\n\nfunc init() {\n\tt[\"JoinDomainWithCAMRequestType\"] = reflect.TypeOf((*JoinDomainWithCAMRequestType)(nil)).Elem()\n}\n\ntype JoinDomainWithCAM_Task JoinDomainWithCAMRequestType\n\nfunc init() {\n\tt[\"JoinDomainWithCAM_Task\"] = reflect.TypeOf((*JoinDomainWithCAM_Task)(nil)).Elem()\n}\n\ntype JoinDomainWithCAM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype JoinDomain_Task JoinDomainRequestType\n\nfunc init() {\n\tt[\"JoinDomain_Task\"] = reflect.TypeOf((*JoinDomain_Task)(nil)).Elem()\n}\n\ntype JoinDomain_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype KernelModuleInfo struct {\n\tDynamicData\n\n\tId              int32                   `xml:\"id\"`\n\tName            string                  `xml:\"name\"`\n\tVersion         string                  `xml:\"version\"`\n\tFilename        string                  `xml:\"filename\"`\n\tOptionString    string                  `xml:\"optionString\"`\n\tLoaded          bool                    `xml:\"loaded\"`\n\tEnabled         bool                    `xml:\"enabled\"`\n\tUseCount        int32                   `xml:\"useCount\"`\n\tReadOnlySection KernelModuleSectionInfo `xml:\"readOnlySection\"`\n\tWritableSection KernelModuleSectionInfo `xml:\"writableSection\"`\n\tTextSection     KernelModuleSectionInfo `xml:\"textSection\"`\n\tDataSection     KernelModuleSectionInfo `xml:\"dataSection\"`\n\tBssSection      KernelModuleSectionInfo `xml:\"bssSection\"`\n}\n\nfunc init() {\n\tt[\"KernelModuleInfo\"] = reflect.TypeOf((*KernelModuleInfo)(nil)).Elem()\n}\n\ntype KernelModuleSectionInfo struct {\n\tDynamicData\n\n\tAddress int64 `xml:\"address\"`\n\tLength  int32 `xml:\"length,omitempty\"`\n}\n\nfunc init() {\n\tt[\"KernelModuleSectionInfo\"] = reflect.TypeOf((*KernelModuleSectionInfo)(nil)).Elem()\n}\n\ntype KeyAnyValue struct {\n\tDynamicData\n\n\tKey   string  `xml:\"key\"`\n\tValue AnyType `xml:\"value,typeattr\"`\n}\n\nfunc init() {\n\tt[\"KeyAnyValue\"] = reflect.TypeOf((*KeyAnyValue)(nil)).Elem()\n}\n\ntype KeyProviderId struct {\n\tDynamicData\n\n\tId string `xml:\"id\"`\n}\n\nfunc init() {\n\tt[\"KeyProviderId\"] = reflect.TypeOf((*KeyProviderId)(nil)).Elem()\n}\n\ntype KeyValue struct {\n\tDynamicData\n\n\tKey   string `xml:\"key\"`\n\tValue string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"KeyValue\"] = reflect.TypeOf((*KeyValue)(nil)).Elem()\n}\n\ntype KmipClusterInfo struct {\n\tDynamicData\n\n\tClusterId    KeyProviderId    `xml:\"clusterId\"`\n\tServers      []KmipServerInfo `xml:\"servers,omitempty\"`\n\tUseAsDefault bool             `xml:\"useAsDefault\"`\n}\n\nfunc init() {\n\tt[\"KmipClusterInfo\"] = reflect.TypeOf((*KmipClusterInfo)(nil)).Elem()\n}\n\ntype KmipServerInfo struct {\n\tDynamicData\n\n\tName         string `xml:\"name\"`\n\tAddress      string `xml:\"address\"`\n\tPort         int32  `xml:\"port\"`\n\tProxyAddress string `xml:\"proxyAddress,omitempty\"`\n\tProxyPort    int32  `xml:\"proxyPort,omitempty\"`\n\tReconnect    int32  `xml:\"reconnect,omitempty\"`\n\tProtocol     string `xml:\"protocol,omitempty\"`\n\tNbio         int32  `xml:\"nbio,omitempty\"`\n\tTimeout      int32  `xml:\"timeout,omitempty\"`\n\tUserName     string `xml:\"userName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"KmipServerInfo\"] = reflect.TypeOf((*KmipServerInfo)(nil)).Elem()\n}\n\ntype KmipServerSpec struct {\n\tDynamicData\n\n\tClusterId KeyProviderId  `xml:\"clusterId\"`\n\tInfo      KmipServerInfo `xml:\"info\"`\n\tPassword  string         `xml:\"password,omitempty\"`\n}\n\nfunc init() {\n\tt[\"KmipServerSpec\"] = reflect.TypeOf((*KmipServerSpec)(nil)).Elem()\n}\n\ntype KmipServerStatus struct {\n\tDynamicData\n\n\tClusterId   KeyProviderId       `xml:\"clusterId\"`\n\tName        string              `xml:\"name\"`\n\tStatus      ManagedEntityStatus `xml:\"status\"`\n\tDescription string              `xml:\"description\"`\n}\n\nfunc init() {\n\tt[\"KmipServerStatus\"] = reflect.TypeOf((*KmipServerStatus)(nil)).Elem()\n}\n\ntype LargeRDMConversionNotSupported struct {\n\tMigrationFault\n\n\tDevice string `xml:\"device\"`\n}\n\nfunc init() {\n\tt[\"LargeRDMConversionNotSupported\"] = reflect.TypeOf((*LargeRDMConversionNotSupported)(nil)).Elem()\n}\n\ntype LargeRDMConversionNotSupportedFault LargeRDMConversionNotSupported\n\nfunc init() {\n\tt[\"LargeRDMConversionNotSupportedFault\"] = reflect.TypeOf((*LargeRDMConversionNotSupportedFault)(nil)).Elem()\n}\n\ntype LargeRDMNotSupportedOnDatastore struct {\n\tVmConfigFault\n\n\tDevice        string                 `xml:\"device\"`\n\tDatastore     ManagedObjectReference `xml:\"datastore\"`\n\tDatastoreName string                 `xml:\"datastoreName\"`\n}\n\nfunc init() {\n\tt[\"LargeRDMNotSupportedOnDatastore\"] = reflect.TypeOf((*LargeRDMNotSupportedOnDatastore)(nil)).Elem()\n}\n\ntype LargeRDMNotSupportedOnDatastoreFault LargeRDMNotSupportedOnDatastore\n\nfunc init() {\n\tt[\"LargeRDMNotSupportedOnDatastoreFault\"] = reflect.TypeOf((*LargeRDMNotSupportedOnDatastoreFault)(nil)).Elem()\n}\n\ntype LatencySensitivity struct {\n\tDynamicData\n\n\tLevel       LatencySensitivitySensitivityLevel `xml:\"level\"`\n\tSensitivity int32                              `xml:\"sensitivity,omitempty\"`\n}\n\nfunc init() {\n\tt[\"LatencySensitivity\"] = reflect.TypeOf((*LatencySensitivity)(nil)).Elem()\n}\n\ntype LeaveCurrentDomainRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tForce bool                   `xml:\"force\"`\n}\n\nfunc init() {\n\tt[\"LeaveCurrentDomainRequestType\"] = reflect.TypeOf((*LeaveCurrentDomainRequestType)(nil)).Elem()\n}\n\ntype LeaveCurrentDomain_Task LeaveCurrentDomainRequestType\n\nfunc init() {\n\tt[\"LeaveCurrentDomain_Task\"] = reflect.TypeOf((*LeaveCurrentDomain_Task)(nil)).Elem()\n}\n\ntype LeaveCurrentDomain_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype LegacyNetworkInterfaceInUse struct {\n\tCannotAccessNetwork\n}\n\nfunc init() {\n\tt[\"LegacyNetworkInterfaceInUse\"] = reflect.TypeOf((*LegacyNetworkInterfaceInUse)(nil)).Elem()\n}\n\ntype LegacyNetworkInterfaceInUseFault LegacyNetworkInterfaceInUse\n\nfunc init() {\n\tt[\"LegacyNetworkInterfaceInUseFault\"] = reflect.TypeOf((*LegacyNetworkInterfaceInUseFault)(nil)).Elem()\n}\n\ntype LicenseAssignmentFailed struct {\n\tRuntimeFault\n\n\tReason string `xml:\"reason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"LicenseAssignmentFailed\"] = reflect.TypeOf((*LicenseAssignmentFailed)(nil)).Elem()\n}\n\ntype LicenseAssignmentFailedFault LicenseAssignmentFailed\n\nfunc init() {\n\tt[\"LicenseAssignmentFailedFault\"] = reflect.TypeOf((*LicenseAssignmentFailedFault)(nil)).Elem()\n}\n\ntype LicenseAssignmentManagerLicenseAssignment struct {\n\tDynamicData\n\n\tEntityId          string                    `xml:\"entityId\"`\n\tScope             string                    `xml:\"scope,omitempty\"`\n\tEntityDisplayName string                    `xml:\"entityDisplayName,omitempty\"`\n\tAssignedLicense   LicenseManagerLicenseInfo `xml:\"assignedLicense\"`\n\tProperties        []KeyAnyValue             `xml:\"properties,omitempty\"`\n}\n\nfunc init() {\n\tt[\"LicenseAssignmentManagerLicenseAssignment\"] = reflect.TypeOf((*LicenseAssignmentManagerLicenseAssignment)(nil)).Elem()\n}\n\ntype LicenseAvailabilityInfo struct {\n\tDynamicData\n\n\tFeature   LicenseFeatureInfo `xml:\"feature\"`\n\tTotal     int32              `xml:\"total\"`\n\tAvailable int32              `xml:\"available\"`\n}\n\nfunc init() {\n\tt[\"LicenseAvailabilityInfo\"] = reflect.TypeOf((*LicenseAvailabilityInfo)(nil)).Elem()\n}\n\ntype LicenseDiagnostics struct {\n\tDynamicData\n\n\tSourceLastChanged      time.Time           `xml:\"sourceLastChanged\"`\n\tSourceLost             string              `xml:\"sourceLost\"`\n\tSourceLatency          float32             `xml:\"sourceLatency\"`\n\tLicenseRequests        string              `xml:\"licenseRequests\"`\n\tLicenseRequestFailures string              `xml:\"licenseRequestFailures\"`\n\tLicenseFeatureUnknowns string              `xml:\"licenseFeatureUnknowns\"`\n\tOpState                LicenseManagerState `xml:\"opState\"`\n\tLastStatusUpdate       time.Time           `xml:\"lastStatusUpdate\"`\n\tOpFailureMessage       string              `xml:\"opFailureMessage\"`\n}\n\nfunc init() {\n\tt[\"LicenseDiagnostics\"] = reflect.TypeOf((*LicenseDiagnostics)(nil)).Elem()\n}\n\ntype LicenseDowngradeDisallowed struct {\n\tNotEnoughLicenses\n\n\tEdition  string        `xml:\"edition\"`\n\tEntityId string        `xml:\"entityId\"`\n\tFeatures []KeyAnyValue `xml:\"features\"`\n}\n\nfunc init() {\n\tt[\"LicenseDowngradeDisallowed\"] = reflect.TypeOf((*LicenseDowngradeDisallowed)(nil)).Elem()\n}\n\ntype LicenseDowngradeDisallowedFault LicenseDowngradeDisallowed\n\nfunc init() {\n\tt[\"LicenseDowngradeDisallowedFault\"] = reflect.TypeOf((*LicenseDowngradeDisallowedFault)(nil)).Elem()\n}\n\ntype LicenseEntityNotFound struct {\n\tVimFault\n\n\tEntityId string `xml:\"entityId\"`\n}\n\nfunc init() {\n\tt[\"LicenseEntityNotFound\"] = reflect.TypeOf((*LicenseEntityNotFound)(nil)).Elem()\n}\n\ntype LicenseEntityNotFoundFault LicenseEntityNotFound\n\nfunc init() {\n\tt[\"LicenseEntityNotFoundFault\"] = reflect.TypeOf((*LicenseEntityNotFoundFault)(nil)).Elem()\n}\n\ntype LicenseEvent struct {\n\tEvent\n}\n\nfunc init() {\n\tt[\"LicenseEvent\"] = reflect.TypeOf((*LicenseEvent)(nil)).Elem()\n}\n\ntype LicenseExpired struct {\n\tNotEnoughLicenses\n\n\tLicenseKey string `xml:\"licenseKey\"`\n}\n\nfunc init() {\n\tt[\"LicenseExpired\"] = reflect.TypeOf((*LicenseExpired)(nil)).Elem()\n}\n\ntype LicenseExpiredEvent struct {\n\tEvent\n\n\tFeature LicenseFeatureInfo `xml:\"feature\"`\n}\n\nfunc init() {\n\tt[\"LicenseExpiredEvent\"] = reflect.TypeOf((*LicenseExpiredEvent)(nil)).Elem()\n}\n\ntype LicenseExpiredFault LicenseExpired\n\nfunc init() {\n\tt[\"LicenseExpiredFault\"] = reflect.TypeOf((*LicenseExpiredFault)(nil)).Elem()\n}\n\ntype LicenseFeatureInfo struct {\n\tDynamicData\n\n\tKey                string                  `xml:\"key\"`\n\tFeatureName        string                  `xml:\"featureName\"`\n\tFeatureDescription string                  `xml:\"featureDescription,omitempty\"`\n\tState              LicenseFeatureInfoState `xml:\"state,omitempty\"`\n\tCostUnit           string                  `xml:\"costUnit\"`\n\tSourceRestriction  string                  `xml:\"sourceRestriction,omitempty\"`\n\tDependentKey       []string                `xml:\"dependentKey,omitempty\"`\n\tEdition            *bool                   `xml:\"edition\"`\n\tExpiresOn          *time.Time              `xml:\"expiresOn\"`\n}\n\nfunc init() {\n\tt[\"LicenseFeatureInfo\"] = reflect.TypeOf((*LicenseFeatureInfo)(nil)).Elem()\n}\n\ntype LicenseKeyEntityMismatch struct {\n\tNotEnoughLicenses\n}\n\nfunc init() {\n\tt[\"LicenseKeyEntityMismatch\"] = reflect.TypeOf((*LicenseKeyEntityMismatch)(nil)).Elem()\n}\n\ntype LicenseKeyEntityMismatchFault LicenseKeyEntityMismatch\n\nfunc init() {\n\tt[\"LicenseKeyEntityMismatchFault\"] = reflect.TypeOf((*LicenseKeyEntityMismatchFault)(nil)).Elem()\n}\n\ntype LicenseManagerEvaluationInfo struct {\n\tDynamicData\n\n\tProperties []KeyAnyValue `xml:\"properties\"`\n}\n\nfunc init() {\n\tt[\"LicenseManagerEvaluationInfo\"] = reflect.TypeOf((*LicenseManagerEvaluationInfo)(nil)).Elem()\n}\n\ntype LicenseManagerLicenseInfo struct {\n\tDynamicData\n\n\tLicenseKey string        `xml:\"licenseKey\"`\n\tEditionKey string        `xml:\"editionKey\"`\n\tName       string        `xml:\"name\"`\n\tTotal      int32         `xml:\"total\"`\n\tUsed       int32         `xml:\"used,omitempty\"`\n\tCostUnit   string        `xml:\"costUnit\"`\n\tProperties []KeyAnyValue `xml:\"properties,omitempty\"`\n\tLabels     []KeyValue    `xml:\"labels,omitempty\"`\n}\n\nfunc init() {\n\tt[\"LicenseManagerLicenseInfo\"] = reflect.TypeOf((*LicenseManagerLicenseInfo)(nil)).Elem()\n}\n\ntype LicenseNonComplianceEvent struct {\n\tLicenseEvent\n\n\tUrl string `xml:\"url\"`\n}\n\nfunc init() {\n\tt[\"LicenseNonComplianceEvent\"] = reflect.TypeOf((*LicenseNonComplianceEvent)(nil)).Elem()\n}\n\ntype LicenseReservationInfo struct {\n\tDynamicData\n\n\tKey      string                      `xml:\"key\"`\n\tState    LicenseReservationInfoState `xml:\"state\"`\n\tRequired int32                       `xml:\"required\"`\n}\n\nfunc init() {\n\tt[\"LicenseReservationInfo\"] = reflect.TypeOf((*LicenseReservationInfo)(nil)).Elem()\n}\n\ntype LicenseRestricted struct {\n\tNotEnoughLicenses\n}\n\nfunc init() {\n\tt[\"LicenseRestricted\"] = reflect.TypeOf((*LicenseRestricted)(nil)).Elem()\n}\n\ntype LicenseRestrictedEvent struct {\n\tLicenseEvent\n}\n\nfunc init() {\n\tt[\"LicenseRestrictedEvent\"] = reflect.TypeOf((*LicenseRestrictedEvent)(nil)).Elem()\n}\n\ntype LicenseRestrictedFault LicenseRestricted\n\nfunc init() {\n\tt[\"LicenseRestrictedFault\"] = reflect.TypeOf((*LicenseRestrictedFault)(nil)).Elem()\n}\n\ntype LicenseServerAvailableEvent struct {\n\tLicenseEvent\n\n\tLicenseServer string `xml:\"licenseServer\"`\n}\n\nfunc init() {\n\tt[\"LicenseServerAvailableEvent\"] = reflect.TypeOf((*LicenseServerAvailableEvent)(nil)).Elem()\n}\n\ntype LicenseServerSource struct {\n\tLicenseSource\n\n\tLicenseServer string `xml:\"licenseServer\"`\n}\n\nfunc init() {\n\tt[\"LicenseServerSource\"] = reflect.TypeOf((*LicenseServerSource)(nil)).Elem()\n}\n\ntype LicenseServerUnavailable struct {\n\tVimFault\n\n\tLicenseServer string `xml:\"licenseServer\"`\n}\n\nfunc init() {\n\tt[\"LicenseServerUnavailable\"] = reflect.TypeOf((*LicenseServerUnavailable)(nil)).Elem()\n}\n\ntype LicenseServerUnavailableEvent struct {\n\tLicenseEvent\n\n\tLicenseServer string `xml:\"licenseServer\"`\n}\n\nfunc init() {\n\tt[\"LicenseServerUnavailableEvent\"] = reflect.TypeOf((*LicenseServerUnavailableEvent)(nil)).Elem()\n}\n\ntype LicenseServerUnavailableFault LicenseServerUnavailable\n\nfunc init() {\n\tt[\"LicenseServerUnavailableFault\"] = reflect.TypeOf((*LicenseServerUnavailableFault)(nil)).Elem()\n}\n\ntype LicenseSource struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"LicenseSource\"] = reflect.TypeOf((*LicenseSource)(nil)).Elem()\n}\n\ntype LicenseSourceUnavailable struct {\n\tNotEnoughLicenses\n\n\tLicenseSource BaseLicenseSource `xml:\"licenseSource,typeattr\"`\n}\n\nfunc init() {\n\tt[\"LicenseSourceUnavailable\"] = reflect.TypeOf((*LicenseSourceUnavailable)(nil)).Elem()\n}\n\ntype LicenseSourceUnavailableFault LicenseSourceUnavailable\n\nfunc init() {\n\tt[\"LicenseSourceUnavailableFault\"] = reflect.TypeOf((*LicenseSourceUnavailableFault)(nil)).Elem()\n}\n\ntype LicenseUsageInfo struct {\n\tDynamicData\n\n\tSource          BaseLicenseSource        `xml:\"source,typeattr\"`\n\tSourceAvailable bool                     `xml:\"sourceAvailable\"`\n\tReservationInfo []LicenseReservationInfo `xml:\"reservationInfo,omitempty\"`\n\tFeatureInfo     []LicenseFeatureInfo     `xml:\"featureInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"LicenseUsageInfo\"] = reflect.TypeOf((*LicenseUsageInfo)(nil)).Elem()\n}\n\ntype LimitExceeded struct {\n\tVimFault\n\n\tProperty string `xml:\"property,omitempty\"`\n\tLimit    int32  `xml:\"limit,omitempty\"`\n}\n\nfunc init() {\n\tt[\"LimitExceeded\"] = reflect.TypeOf((*LimitExceeded)(nil)).Elem()\n}\n\ntype LimitExceededFault LimitExceeded\n\nfunc init() {\n\tt[\"LimitExceededFault\"] = reflect.TypeOf((*LimitExceededFault)(nil)).Elem()\n}\n\ntype LinkDiscoveryProtocolConfig struct {\n\tDynamicData\n\n\tProtocol  string `xml:\"protocol\"`\n\tOperation string `xml:\"operation\"`\n}\n\nfunc init() {\n\tt[\"LinkDiscoveryProtocolConfig\"] = reflect.TypeOf((*LinkDiscoveryProtocolConfig)(nil)).Elem()\n}\n\ntype LinkLayerDiscoveryProtocolInfo struct {\n\tDynamicData\n\n\tChassisId  string        `xml:\"chassisId\"`\n\tPortId     string        `xml:\"portId\"`\n\tTimeToLive int32         `xml:\"timeToLive\"`\n\tParameter  []KeyAnyValue `xml:\"parameter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"LinkLayerDiscoveryProtocolInfo\"] = reflect.TypeOf((*LinkLayerDiscoveryProtocolInfo)(nil)).Elem()\n}\n\ntype LinkProfile struct {\n\tApplyProfile\n}\n\nfunc init() {\n\tt[\"LinkProfile\"] = reflect.TypeOf((*LinkProfile)(nil)).Elem()\n}\n\ntype LinuxVolumeNotClean struct {\n\tCustomizationFault\n}\n\nfunc init() {\n\tt[\"LinuxVolumeNotClean\"] = reflect.TypeOf((*LinuxVolumeNotClean)(nil)).Elem()\n}\n\ntype LinuxVolumeNotCleanFault LinuxVolumeNotClean\n\nfunc init() {\n\tt[\"LinuxVolumeNotCleanFault\"] = reflect.TypeOf((*LinuxVolumeNotCleanFault)(nil)).Elem()\n}\n\ntype ListCACertificateRevocationLists ListCACertificateRevocationListsRequestType\n\nfunc init() {\n\tt[\"ListCACertificateRevocationLists\"] = reflect.TypeOf((*ListCACertificateRevocationLists)(nil)).Elem()\n}\n\ntype ListCACertificateRevocationListsRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ListCACertificateRevocationListsRequestType\"] = reflect.TypeOf((*ListCACertificateRevocationListsRequestType)(nil)).Elem()\n}\n\ntype ListCACertificateRevocationListsResponse struct {\n\tReturnval []string `xml:\"returnval,omitempty\"`\n}\n\ntype ListCACertificates ListCACertificatesRequestType\n\nfunc init() {\n\tt[\"ListCACertificates\"] = reflect.TypeOf((*ListCACertificates)(nil)).Elem()\n}\n\ntype ListCACertificatesRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ListCACertificatesRequestType\"] = reflect.TypeOf((*ListCACertificatesRequestType)(nil)).Elem()\n}\n\ntype ListCACertificatesResponse struct {\n\tReturnval []string `xml:\"returnval,omitempty\"`\n}\n\ntype ListFilesInGuest ListFilesInGuestRequestType\n\nfunc init() {\n\tt[\"ListFilesInGuest\"] = reflect.TypeOf((*ListFilesInGuest)(nil)).Elem()\n}\n\ntype ListFilesInGuestRequestType struct {\n\tThis         ManagedObjectReference  `xml:\"_this\"`\n\tVm           ManagedObjectReference  `xml:\"vm\"`\n\tAuth         BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tFilePath     string                  `xml:\"filePath\"`\n\tIndex        int32                   `xml:\"index,omitempty\"`\n\tMaxResults   int32                   `xml:\"maxResults,omitempty\"`\n\tMatchPattern string                  `xml:\"matchPattern,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ListFilesInGuestRequestType\"] = reflect.TypeOf((*ListFilesInGuestRequestType)(nil)).Elem()\n}\n\ntype ListFilesInGuestResponse struct {\n\tReturnval GuestListFileInfo `xml:\"returnval\"`\n}\n\ntype ListGuestAliases ListGuestAliasesRequestType\n\nfunc init() {\n\tt[\"ListGuestAliases\"] = reflect.TypeOf((*ListGuestAliases)(nil)).Elem()\n}\n\ntype ListGuestAliasesRequestType struct {\n\tThis     ManagedObjectReference  `xml:\"_this\"`\n\tVm       ManagedObjectReference  `xml:\"vm\"`\n\tAuth     BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tUsername string                  `xml:\"username\"`\n}\n\nfunc init() {\n\tt[\"ListGuestAliasesRequestType\"] = reflect.TypeOf((*ListGuestAliasesRequestType)(nil)).Elem()\n}\n\ntype ListGuestAliasesResponse struct {\n\tReturnval []GuestAliases `xml:\"returnval,omitempty\"`\n}\n\ntype ListGuestMappedAliases ListGuestMappedAliasesRequestType\n\nfunc init() {\n\tt[\"ListGuestMappedAliases\"] = reflect.TypeOf((*ListGuestMappedAliases)(nil)).Elem()\n}\n\ntype ListGuestMappedAliasesRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tVm   ManagedObjectReference  `xml:\"vm\"`\n\tAuth BaseGuestAuthentication `xml:\"auth,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ListGuestMappedAliasesRequestType\"] = reflect.TypeOf((*ListGuestMappedAliasesRequestType)(nil)).Elem()\n}\n\ntype ListGuestMappedAliasesResponse struct {\n\tReturnval []GuestMappedAliases `xml:\"returnval,omitempty\"`\n}\n\ntype ListKeys ListKeysRequestType\n\nfunc init() {\n\tt[\"ListKeys\"] = reflect.TypeOf((*ListKeys)(nil)).Elem()\n}\n\ntype ListKeysRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tLimit int32                  `xml:\"limit,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ListKeysRequestType\"] = reflect.TypeOf((*ListKeysRequestType)(nil)).Elem()\n}\n\ntype ListKeysResponse struct {\n\tReturnval []CryptoKeyId `xml:\"returnval,omitempty\"`\n}\n\ntype ListKmipServers ListKmipServersRequestType\n\nfunc init() {\n\tt[\"ListKmipServers\"] = reflect.TypeOf((*ListKmipServers)(nil)).Elem()\n}\n\ntype ListKmipServersRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tLimit int32                  `xml:\"limit,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ListKmipServersRequestType\"] = reflect.TypeOf((*ListKmipServersRequestType)(nil)).Elem()\n}\n\ntype ListKmipServersResponse struct {\n\tReturnval []KmipClusterInfo `xml:\"returnval,omitempty\"`\n}\n\ntype ListProcessesInGuest ListProcessesInGuestRequestType\n\nfunc init() {\n\tt[\"ListProcessesInGuest\"] = reflect.TypeOf((*ListProcessesInGuest)(nil)).Elem()\n}\n\ntype ListProcessesInGuestRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tVm   ManagedObjectReference  `xml:\"vm\"`\n\tAuth BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tPids []int64                 `xml:\"pids,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ListProcessesInGuestRequestType\"] = reflect.TypeOf((*ListProcessesInGuestRequestType)(nil)).Elem()\n}\n\ntype ListProcessesInGuestResponse struct {\n\tReturnval []GuestProcessInfo `xml:\"returnval,omitempty\"`\n}\n\ntype ListRegistryKeysInGuest ListRegistryKeysInGuestRequestType\n\nfunc init() {\n\tt[\"ListRegistryKeysInGuest\"] = reflect.TypeOf((*ListRegistryKeysInGuest)(nil)).Elem()\n}\n\ntype ListRegistryKeysInGuestRequestType struct {\n\tThis         ManagedObjectReference  `xml:\"_this\"`\n\tVm           ManagedObjectReference  `xml:\"vm\"`\n\tAuth         BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tKeyName      GuestRegKeyNameSpec     `xml:\"keyName\"`\n\tRecursive    bool                    `xml:\"recursive\"`\n\tMatchPattern string                  `xml:\"matchPattern,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ListRegistryKeysInGuestRequestType\"] = reflect.TypeOf((*ListRegistryKeysInGuestRequestType)(nil)).Elem()\n}\n\ntype ListRegistryKeysInGuestResponse struct {\n\tReturnval []GuestRegKeyRecordSpec `xml:\"returnval,omitempty\"`\n}\n\ntype ListRegistryValuesInGuest ListRegistryValuesInGuestRequestType\n\nfunc init() {\n\tt[\"ListRegistryValuesInGuest\"] = reflect.TypeOf((*ListRegistryValuesInGuest)(nil)).Elem()\n}\n\ntype ListRegistryValuesInGuestRequestType struct {\n\tThis          ManagedObjectReference  `xml:\"_this\"`\n\tVm            ManagedObjectReference  `xml:\"vm\"`\n\tAuth          BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tKeyName       GuestRegKeyNameSpec     `xml:\"keyName\"`\n\tExpandStrings bool                    `xml:\"expandStrings\"`\n\tMatchPattern  string                  `xml:\"matchPattern,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ListRegistryValuesInGuestRequestType\"] = reflect.TypeOf((*ListRegistryValuesInGuestRequestType)(nil)).Elem()\n}\n\ntype ListRegistryValuesInGuestResponse struct {\n\tReturnval []GuestRegValueSpec `xml:\"returnval,omitempty\"`\n}\n\ntype ListSmartCardTrustAnchors ListSmartCardTrustAnchorsRequestType\n\nfunc init() {\n\tt[\"ListSmartCardTrustAnchors\"] = reflect.TypeOf((*ListSmartCardTrustAnchors)(nil)).Elem()\n}\n\ntype ListSmartCardTrustAnchorsRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ListSmartCardTrustAnchorsRequestType\"] = reflect.TypeOf((*ListSmartCardTrustAnchorsRequestType)(nil)).Elem()\n}\n\ntype ListSmartCardTrustAnchorsResponse struct {\n\tReturnval []string `xml:\"returnval,omitempty\"`\n}\n\ntype ListTagsAttachedToVStorageObject ListTagsAttachedToVStorageObjectRequestType\n\nfunc init() {\n\tt[\"ListTagsAttachedToVStorageObject\"] = reflect.TypeOf((*ListTagsAttachedToVStorageObject)(nil)).Elem()\n}\n\ntype ListTagsAttachedToVStorageObjectRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tId   ID                     `xml:\"id\"`\n}\n\nfunc init() {\n\tt[\"ListTagsAttachedToVStorageObjectRequestType\"] = reflect.TypeOf((*ListTagsAttachedToVStorageObjectRequestType)(nil)).Elem()\n}\n\ntype ListTagsAttachedToVStorageObjectResponse struct {\n\tReturnval []VslmTagEntry `xml:\"returnval,omitempty\"`\n}\n\ntype ListVStorageObject ListVStorageObjectRequestType\n\nfunc init() {\n\tt[\"ListVStorageObject\"] = reflect.TypeOf((*ListVStorageObject)(nil)).Elem()\n}\n\ntype ListVStorageObjectRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"ListVStorageObjectRequestType\"] = reflect.TypeOf((*ListVStorageObjectRequestType)(nil)).Elem()\n}\n\ntype ListVStorageObjectResponse struct {\n\tReturnval []ID `xml:\"returnval,omitempty\"`\n}\n\ntype ListVStorageObjectsAttachedToTag ListVStorageObjectsAttachedToTagRequestType\n\nfunc init() {\n\tt[\"ListVStorageObjectsAttachedToTag\"] = reflect.TypeOf((*ListVStorageObjectsAttachedToTag)(nil)).Elem()\n}\n\ntype ListVStorageObjectsAttachedToTagRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tCategory string                 `xml:\"category\"`\n\tTag      string                 `xml:\"tag\"`\n}\n\nfunc init() {\n\tt[\"ListVStorageObjectsAttachedToTagRequestType\"] = reflect.TypeOf((*ListVStorageObjectsAttachedToTagRequestType)(nil)).Elem()\n}\n\ntype ListVStorageObjectsAttachedToTagResponse struct {\n\tReturnval []ID `xml:\"returnval,omitempty\"`\n}\n\ntype LocalDatastoreCreatedEvent struct {\n\tHostEvent\n\n\tDatastore    DatastoreEventArgument `xml:\"datastore\"`\n\tDatastoreUrl string                 `xml:\"datastoreUrl,omitempty\"`\n}\n\nfunc init() {\n\tt[\"LocalDatastoreCreatedEvent\"] = reflect.TypeOf((*LocalDatastoreCreatedEvent)(nil)).Elem()\n}\n\ntype LocalDatastoreInfo struct {\n\tDatastoreInfo\n\n\tPath string `xml:\"path,omitempty\"`\n}\n\nfunc init() {\n\tt[\"LocalDatastoreInfo\"] = reflect.TypeOf((*LocalDatastoreInfo)(nil)).Elem()\n}\n\ntype LocalLicenseSource struct {\n\tLicenseSource\n\n\tLicenseKeys string `xml:\"licenseKeys\"`\n}\n\nfunc init() {\n\tt[\"LocalLicenseSource\"] = reflect.TypeOf((*LocalLicenseSource)(nil)).Elem()\n}\n\ntype LocalTSMEnabledEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"LocalTSMEnabledEvent\"] = reflect.TypeOf((*LocalTSMEnabledEvent)(nil)).Elem()\n}\n\ntype LocalizableMessage struct {\n\tDynamicData\n\n\tKey     string        `xml:\"key\"`\n\tArg     []KeyAnyValue `xml:\"arg,omitempty\"`\n\tMessage string        `xml:\"message,omitempty\"`\n}\n\nfunc init() {\n\tt[\"LocalizableMessage\"] = reflect.TypeOf((*LocalizableMessage)(nil)).Elem()\n}\n\ntype LocalizationManagerMessageCatalog struct {\n\tDynamicData\n\n\tModuleName   string     `xml:\"moduleName\"`\n\tCatalogName  string     `xml:\"catalogName\"`\n\tLocale       string     `xml:\"locale\"`\n\tCatalogUri   string     `xml:\"catalogUri\"`\n\tLastModified *time.Time `xml:\"lastModified\"`\n\tMd5sum       string     `xml:\"md5sum,omitempty\"`\n\tVersion      string     `xml:\"version,omitempty\"`\n}\n\nfunc init() {\n\tt[\"LocalizationManagerMessageCatalog\"] = reflect.TypeOf((*LocalizationManagerMessageCatalog)(nil)).Elem()\n}\n\ntype LocalizedMethodFault struct {\n\tDynamicData\n\n\tFault            BaseMethodFault `xml:\"fault,typeattr\"`\n\tLocalizedMessage string          `xml:\"localizedMessage,omitempty\"`\n}\n\nfunc init() {\n\tt[\"LocalizedMethodFault\"] = reflect.TypeOf((*LocalizedMethodFault)(nil)).Elem()\n}\n\ntype LockerMisconfiguredEvent struct {\n\tEvent\n\n\tDatastore DatastoreEventArgument `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"LockerMisconfiguredEvent\"] = reflect.TypeOf((*LockerMisconfiguredEvent)(nil)).Elem()\n}\n\ntype LockerReconfiguredEvent struct {\n\tEvent\n\n\tOldDatastore *DatastoreEventArgument `xml:\"oldDatastore,omitempty\"`\n\tNewDatastore *DatastoreEventArgument `xml:\"newDatastore,omitempty\"`\n}\n\nfunc init() {\n\tt[\"LockerReconfiguredEvent\"] = reflect.TypeOf((*LockerReconfiguredEvent)(nil)).Elem()\n}\n\ntype LogBundlingFailed struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"LogBundlingFailed\"] = reflect.TypeOf((*LogBundlingFailed)(nil)).Elem()\n}\n\ntype LogBundlingFailedFault LogBundlingFailed\n\nfunc init() {\n\tt[\"LogBundlingFailedFault\"] = reflect.TypeOf((*LogBundlingFailedFault)(nil)).Elem()\n}\n\ntype LogUserEvent LogUserEventRequestType\n\nfunc init() {\n\tt[\"LogUserEvent\"] = reflect.TypeOf((*LogUserEvent)(nil)).Elem()\n}\n\ntype LogUserEventRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tEntity ManagedObjectReference `xml:\"entity\"`\n\tMsg    string                 `xml:\"msg\"`\n}\n\nfunc init() {\n\tt[\"LogUserEventRequestType\"] = reflect.TypeOf((*LogUserEventRequestType)(nil)).Elem()\n}\n\ntype LogUserEventResponse struct {\n}\n\ntype Login LoginRequestType\n\nfunc init() {\n\tt[\"Login\"] = reflect.TypeOf((*Login)(nil)).Elem()\n}\n\ntype LoginBySSPI LoginBySSPIRequestType\n\nfunc init() {\n\tt[\"LoginBySSPI\"] = reflect.TypeOf((*LoginBySSPI)(nil)).Elem()\n}\n\ntype LoginBySSPIRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tBase64Token string                 `xml:\"base64Token\"`\n\tLocale      string                 `xml:\"locale,omitempty\"`\n}\n\nfunc init() {\n\tt[\"LoginBySSPIRequestType\"] = reflect.TypeOf((*LoginBySSPIRequestType)(nil)).Elem()\n}\n\ntype LoginBySSPIResponse struct {\n\tReturnval UserSession `xml:\"returnval\"`\n}\n\ntype LoginByToken LoginByTokenRequestType\n\nfunc init() {\n\tt[\"LoginByToken\"] = reflect.TypeOf((*LoginByToken)(nil)).Elem()\n}\n\ntype LoginByTokenRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tLocale string                 `xml:\"locale,omitempty\"`\n}\n\nfunc init() {\n\tt[\"LoginByTokenRequestType\"] = reflect.TypeOf((*LoginByTokenRequestType)(nil)).Elem()\n}\n\ntype LoginByTokenResponse struct {\n\tReturnval UserSession `xml:\"returnval\"`\n}\n\ntype LoginExtensionByCertificate LoginExtensionByCertificateRequestType\n\nfunc init() {\n\tt[\"LoginExtensionByCertificate\"] = reflect.TypeOf((*LoginExtensionByCertificate)(nil)).Elem()\n}\n\ntype LoginExtensionByCertificateRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tExtensionKey string                 `xml:\"extensionKey\"`\n\tLocale       string                 `xml:\"locale,omitempty\"`\n}\n\nfunc init() {\n\tt[\"LoginExtensionByCertificateRequestType\"] = reflect.TypeOf((*LoginExtensionByCertificateRequestType)(nil)).Elem()\n}\n\ntype LoginExtensionByCertificateResponse struct {\n\tReturnval UserSession `xml:\"returnval\"`\n}\n\ntype LoginExtensionBySubjectName LoginExtensionBySubjectNameRequestType\n\nfunc init() {\n\tt[\"LoginExtensionBySubjectName\"] = reflect.TypeOf((*LoginExtensionBySubjectName)(nil)).Elem()\n}\n\ntype LoginExtensionBySubjectNameRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tExtensionKey string                 `xml:\"extensionKey\"`\n\tLocale       string                 `xml:\"locale,omitempty\"`\n}\n\nfunc init() {\n\tt[\"LoginExtensionBySubjectNameRequestType\"] = reflect.TypeOf((*LoginExtensionBySubjectNameRequestType)(nil)).Elem()\n}\n\ntype LoginExtensionBySubjectNameResponse struct {\n\tReturnval UserSession `xml:\"returnval\"`\n}\n\ntype LoginRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tUserName string                 `xml:\"userName\"`\n\tPassword string                 `xml:\"password\"`\n\tLocale   string                 `xml:\"locale,omitempty\"`\n}\n\nfunc init() {\n\tt[\"LoginRequestType\"] = reflect.TypeOf((*LoginRequestType)(nil)).Elem()\n}\n\ntype LoginResponse struct {\n\tReturnval UserSession `xml:\"returnval\"`\n}\n\ntype Logout LogoutRequestType\n\nfunc init() {\n\tt[\"Logout\"] = reflect.TypeOf((*Logout)(nil)).Elem()\n}\n\ntype LogoutRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"LogoutRequestType\"] = reflect.TypeOf((*LogoutRequestType)(nil)).Elem()\n}\n\ntype LogoutResponse struct {\n}\n\ntype LongOption struct {\n\tOptionType\n\n\tMin          int64 `xml:\"min\"`\n\tMax          int64 `xml:\"max\"`\n\tDefaultValue int64 `xml:\"defaultValue\"`\n}\n\nfunc init() {\n\tt[\"LongOption\"] = reflect.TypeOf((*LongOption)(nil)).Elem()\n}\n\ntype LongPolicy struct {\n\tInheritablePolicy\n\n\tValue int64 `xml:\"value,omitempty\"`\n}\n\nfunc init() {\n\tt[\"LongPolicy\"] = reflect.TypeOf((*LongPolicy)(nil)).Elem()\n}\n\ntype LookupDvPortGroup LookupDvPortGroupRequestType\n\nfunc init() {\n\tt[\"LookupDvPortGroup\"] = reflect.TypeOf((*LookupDvPortGroup)(nil)).Elem()\n}\n\ntype LookupDvPortGroupRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tPortgroupKey string                 `xml:\"portgroupKey\"`\n}\n\nfunc init() {\n\tt[\"LookupDvPortGroupRequestType\"] = reflect.TypeOf((*LookupDvPortGroupRequestType)(nil)).Elem()\n}\n\ntype LookupDvPortGroupResponse struct {\n\tReturnval *ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype LookupVmOverheadMemory LookupVmOverheadMemoryRequestType\n\nfunc init() {\n\tt[\"LookupVmOverheadMemory\"] = reflect.TypeOf((*LookupVmOverheadMemory)(nil)).Elem()\n}\n\ntype LookupVmOverheadMemoryRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tVm   ManagedObjectReference `xml:\"vm\"`\n\tHost ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"LookupVmOverheadMemoryRequestType\"] = reflect.TypeOf((*LookupVmOverheadMemoryRequestType)(nil)).Elem()\n}\n\ntype LookupVmOverheadMemoryResponse struct {\n\tReturnval int64 `xml:\"returnval\"`\n}\n\ntype MacAddress struct {\n\tNegatableExpression\n}\n\nfunc init() {\n\tt[\"MacAddress\"] = reflect.TypeOf((*MacAddress)(nil)).Elem()\n}\n\ntype MacRange struct {\n\tMacAddress\n\n\tAddress string `xml:\"address\"`\n\tMask    string `xml:\"mask\"`\n}\n\nfunc init() {\n\tt[\"MacRange\"] = reflect.TypeOf((*MacRange)(nil)).Elem()\n}\n\ntype MaintenanceModeFileMove struct {\n\tMigrationFault\n}\n\nfunc init() {\n\tt[\"MaintenanceModeFileMove\"] = reflect.TypeOf((*MaintenanceModeFileMove)(nil)).Elem()\n}\n\ntype MaintenanceModeFileMoveFault MaintenanceModeFileMove\n\nfunc init() {\n\tt[\"MaintenanceModeFileMoveFault\"] = reflect.TypeOf((*MaintenanceModeFileMoveFault)(nil)).Elem()\n}\n\ntype MakeDirectory MakeDirectoryRequestType\n\nfunc init() {\n\tt[\"MakeDirectory\"] = reflect.TypeOf((*MakeDirectory)(nil)).Elem()\n}\n\ntype MakeDirectoryInGuest MakeDirectoryInGuestRequestType\n\nfunc init() {\n\tt[\"MakeDirectoryInGuest\"] = reflect.TypeOf((*MakeDirectoryInGuest)(nil)).Elem()\n}\n\ntype MakeDirectoryInGuestRequestType struct {\n\tThis                    ManagedObjectReference  `xml:\"_this\"`\n\tVm                      ManagedObjectReference  `xml:\"vm\"`\n\tAuth                    BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tDirectoryPath           string                  `xml:\"directoryPath\"`\n\tCreateParentDirectories bool                    `xml:\"createParentDirectories\"`\n}\n\nfunc init() {\n\tt[\"MakeDirectoryInGuestRequestType\"] = reflect.TypeOf((*MakeDirectoryInGuestRequestType)(nil)).Elem()\n}\n\ntype MakeDirectoryInGuestResponse struct {\n}\n\ntype MakeDirectoryRequestType struct {\n\tThis                    ManagedObjectReference  `xml:\"_this\"`\n\tName                    string                  `xml:\"name\"`\n\tDatacenter              *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n\tCreateParentDirectories *bool                   `xml:\"createParentDirectories\"`\n}\n\nfunc init() {\n\tt[\"MakeDirectoryRequestType\"] = reflect.TypeOf((*MakeDirectoryRequestType)(nil)).Elem()\n}\n\ntype MakeDirectoryResponse struct {\n}\n\ntype MakePrimaryVMRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tVm   ManagedObjectReference `xml:\"vm\"`\n}\n\nfunc init() {\n\tt[\"MakePrimaryVMRequestType\"] = reflect.TypeOf((*MakePrimaryVMRequestType)(nil)).Elem()\n}\n\ntype MakePrimaryVM_Task MakePrimaryVMRequestType\n\nfunc init() {\n\tt[\"MakePrimaryVM_Task\"] = reflect.TypeOf((*MakePrimaryVM_Task)(nil)).Elem()\n}\n\ntype MakePrimaryVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ManagedByInfo struct {\n\tDynamicData\n\n\tExtensionKey string `xml:\"extensionKey\"`\n\tType         string `xml:\"type\"`\n}\n\nfunc init() {\n\tt[\"ManagedByInfo\"] = reflect.TypeOf((*ManagedByInfo)(nil)).Elem()\n}\n\ntype ManagedEntityEventArgument struct {\n\tEntityEventArgument\n\n\tEntity ManagedObjectReference `xml:\"entity\"`\n}\n\nfunc init() {\n\tt[\"ManagedEntityEventArgument\"] = reflect.TypeOf((*ManagedEntityEventArgument)(nil)).Elem()\n}\n\ntype ManagedObjectNotFound struct {\n\tRuntimeFault\n\n\tObj ManagedObjectReference `xml:\"obj\"`\n}\n\nfunc init() {\n\tt[\"ManagedObjectNotFound\"] = reflect.TypeOf((*ManagedObjectNotFound)(nil)).Elem()\n}\n\ntype ManagedObjectNotFoundFault ManagedObjectNotFound\n\nfunc init() {\n\tt[\"ManagedObjectNotFoundFault\"] = reflect.TypeOf((*ManagedObjectNotFoundFault)(nil)).Elem()\n}\n\ntype ManagedObjectReference struct {\n\tType  string `xml:\"type,attr\"`\n\tValue string `xml:\",chardata\"`\n}\n\nfunc init() {\n\tt[\"ManagedObjectReference\"] = reflect.TypeOf((*ManagedObjectReference)(nil)).Elem()\n}\n\ntype MarkAsLocalRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tScsiDiskUuid string                 `xml:\"scsiDiskUuid\"`\n}\n\nfunc init() {\n\tt[\"MarkAsLocalRequestType\"] = reflect.TypeOf((*MarkAsLocalRequestType)(nil)).Elem()\n}\n\ntype MarkAsLocal_Task MarkAsLocalRequestType\n\nfunc init() {\n\tt[\"MarkAsLocal_Task\"] = reflect.TypeOf((*MarkAsLocal_Task)(nil)).Elem()\n}\n\ntype MarkAsLocal_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype MarkAsNonLocalRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tScsiDiskUuid string                 `xml:\"scsiDiskUuid\"`\n}\n\nfunc init() {\n\tt[\"MarkAsNonLocalRequestType\"] = reflect.TypeOf((*MarkAsNonLocalRequestType)(nil)).Elem()\n}\n\ntype MarkAsNonLocal_Task MarkAsNonLocalRequestType\n\nfunc init() {\n\tt[\"MarkAsNonLocal_Task\"] = reflect.TypeOf((*MarkAsNonLocal_Task)(nil)).Elem()\n}\n\ntype MarkAsNonLocal_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype MarkAsNonSsdRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tScsiDiskUuid string                 `xml:\"scsiDiskUuid\"`\n}\n\nfunc init() {\n\tt[\"MarkAsNonSsdRequestType\"] = reflect.TypeOf((*MarkAsNonSsdRequestType)(nil)).Elem()\n}\n\ntype MarkAsNonSsd_Task MarkAsNonSsdRequestType\n\nfunc init() {\n\tt[\"MarkAsNonSsd_Task\"] = reflect.TypeOf((*MarkAsNonSsd_Task)(nil)).Elem()\n}\n\ntype MarkAsNonSsd_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype MarkAsSsdRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tScsiDiskUuid string                 `xml:\"scsiDiskUuid\"`\n}\n\nfunc init() {\n\tt[\"MarkAsSsdRequestType\"] = reflect.TypeOf((*MarkAsSsdRequestType)(nil)).Elem()\n}\n\ntype MarkAsSsd_Task MarkAsSsdRequestType\n\nfunc init() {\n\tt[\"MarkAsSsd_Task\"] = reflect.TypeOf((*MarkAsSsd_Task)(nil)).Elem()\n}\n\ntype MarkAsSsd_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype MarkAsTemplate MarkAsTemplateRequestType\n\nfunc init() {\n\tt[\"MarkAsTemplate\"] = reflect.TypeOf((*MarkAsTemplate)(nil)).Elem()\n}\n\ntype MarkAsTemplateRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"MarkAsTemplateRequestType\"] = reflect.TypeOf((*MarkAsTemplateRequestType)(nil)).Elem()\n}\n\ntype MarkAsTemplateResponse struct {\n}\n\ntype MarkAsVirtualMachine MarkAsVirtualMachineRequestType\n\nfunc init() {\n\tt[\"MarkAsVirtualMachine\"] = reflect.TypeOf((*MarkAsVirtualMachine)(nil)).Elem()\n}\n\ntype MarkAsVirtualMachineRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tPool ManagedObjectReference  `xml:\"pool\"`\n\tHost *ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"MarkAsVirtualMachineRequestType\"] = reflect.TypeOf((*MarkAsVirtualMachineRequestType)(nil)).Elem()\n}\n\ntype MarkAsVirtualMachineResponse struct {\n}\n\ntype MarkDefault MarkDefaultRequestType\n\nfunc init() {\n\tt[\"MarkDefault\"] = reflect.TypeOf((*MarkDefault)(nil)).Elem()\n}\n\ntype MarkDefaultRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tClusterId KeyProviderId          `xml:\"clusterId\"`\n}\n\nfunc init() {\n\tt[\"MarkDefaultRequestType\"] = reflect.TypeOf((*MarkDefaultRequestType)(nil)).Elem()\n}\n\ntype MarkDefaultResponse struct {\n}\n\ntype MarkForRemoval MarkForRemovalRequestType\n\nfunc init() {\n\tt[\"MarkForRemoval\"] = reflect.TypeOf((*MarkForRemoval)(nil)).Elem()\n}\n\ntype MarkForRemovalRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tHbaName string                 `xml:\"hbaName\"`\n\tRemove  bool                   `xml:\"remove\"`\n}\n\nfunc init() {\n\tt[\"MarkForRemovalRequestType\"] = reflect.TypeOf((*MarkForRemovalRequestType)(nil)).Elem()\n}\n\ntype MarkForRemovalResponse struct {\n}\n\ntype MemoryFileFormatNotSupportedByDatastore struct {\n\tUnsupportedDatastore\n\n\tDatastoreName string `xml:\"datastoreName\"`\n\tType          string `xml:\"type\"`\n}\n\nfunc init() {\n\tt[\"MemoryFileFormatNotSupportedByDatastore\"] = reflect.TypeOf((*MemoryFileFormatNotSupportedByDatastore)(nil)).Elem()\n}\n\ntype MemoryFileFormatNotSupportedByDatastoreFault MemoryFileFormatNotSupportedByDatastore\n\nfunc init() {\n\tt[\"MemoryFileFormatNotSupportedByDatastoreFault\"] = reflect.TypeOf((*MemoryFileFormatNotSupportedByDatastoreFault)(nil)).Elem()\n}\n\ntype MemoryHotPlugNotSupported struct {\n\tVmConfigFault\n}\n\nfunc init() {\n\tt[\"MemoryHotPlugNotSupported\"] = reflect.TypeOf((*MemoryHotPlugNotSupported)(nil)).Elem()\n}\n\ntype MemoryHotPlugNotSupportedFault MemoryHotPlugNotSupported\n\nfunc init() {\n\tt[\"MemoryHotPlugNotSupportedFault\"] = reflect.TypeOf((*MemoryHotPlugNotSupportedFault)(nil)).Elem()\n}\n\ntype MemorySizeNotRecommended struct {\n\tVirtualHardwareCompatibilityIssue\n\n\tMemorySizeMB    int32 `xml:\"memorySizeMB\"`\n\tMinMemorySizeMB int32 `xml:\"minMemorySizeMB\"`\n\tMaxMemorySizeMB int32 `xml:\"maxMemorySizeMB\"`\n}\n\nfunc init() {\n\tt[\"MemorySizeNotRecommended\"] = reflect.TypeOf((*MemorySizeNotRecommended)(nil)).Elem()\n}\n\ntype MemorySizeNotRecommendedFault MemorySizeNotRecommended\n\nfunc init() {\n\tt[\"MemorySizeNotRecommendedFault\"] = reflect.TypeOf((*MemorySizeNotRecommendedFault)(nil)).Elem()\n}\n\ntype MemorySizeNotSupported struct {\n\tVirtualHardwareCompatibilityIssue\n\n\tMemorySizeMB    int32 `xml:\"memorySizeMB\"`\n\tMinMemorySizeMB int32 `xml:\"minMemorySizeMB\"`\n\tMaxMemorySizeMB int32 `xml:\"maxMemorySizeMB\"`\n}\n\nfunc init() {\n\tt[\"MemorySizeNotSupported\"] = reflect.TypeOf((*MemorySizeNotSupported)(nil)).Elem()\n}\n\ntype MemorySizeNotSupportedByDatastore struct {\n\tVirtualHardwareCompatibilityIssue\n\n\tDatastore       ManagedObjectReference `xml:\"datastore\"`\n\tMemorySizeMB    int32                  `xml:\"memorySizeMB\"`\n\tMaxMemorySizeMB int32                  `xml:\"maxMemorySizeMB\"`\n}\n\nfunc init() {\n\tt[\"MemorySizeNotSupportedByDatastore\"] = reflect.TypeOf((*MemorySizeNotSupportedByDatastore)(nil)).Elem()\n}\n\ntype MemorySizeNotSupportedByDatastoreFault MemorySizeNotSupportedByDatastore\n\nfunc init() {\n\tt[\"MemorySizeNotSupportedByDatastoreFault\"] = reflect.TypeOf((*MemorySizeNotSupportedByDatastoreFault)(nil)).Elem()\n}\n\ntype MemorySizeNotSupportedFault MemorySizeNotSupported\n\nfunc init() {\n\tt[\"MemorySizeNotSupportedFault\"] = reflect.TypeOf((*MemorySizeNotSupportedFault)(nil)).Elem()\n}\n\ntype MemorySnapshotOnIndependentDisk struct {\n\tSnapshotFault\n}\n\nfunc init() {\n\tt[\"MemorySnapshotOnIndependentDisk\"] = reflect.TypeOf((*MemorySnapshotOnIndependentDisk)(nil)).Elem()\n}\n\ntype MemorySnapshotOnIndependentDiskFault MemorySnapshotOnIndependentDisk\n\nfunc init() {\n\tt[\"MemorySnapshotOnIndependentDiskFault\"] = reflect.TypeOf((*MemorySnapshotOnIndependentDiskFault)(nil)).Elem()\n}\n\ntype MergeDvsRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tDvs  ManagedObjectReference `xml:\"dvs\"`\n}\n\nfunc init() {\n\tt[\"MergeDvsRequestType\"] = reflect.TypeOf((*MergeDvsRequestType)(nil)).Elem()\n}\n\ntype MergeDvs_Task MergeDvsRequestType\n\nfunc init() {\n\tt[\"MergeDvs_Task\"] = reflect.TypeOf((*MergeDvs_Task)(nil)).Elem()\n}\n\ntype MergeDvs_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype MergePermissions MergePermissionsRequestType\n\nfunc init() {\n\tt[\"MergePermissions\"] = reflect.TypeOf((*MergePermissions)(nil)).Elem()\n}\n\ntype MergePermissionsRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tSrcRoleId int32                  `xml:\"srcRoleId\"`\n\tDstRoleId int32                  `xml:\"dstRoleId\"`\n}\n\nfunc init() {\n\tt[\"MergePermissionsRequestType\"] = reflect.TypeOf((*MergePermissionsRequestType)(nil)).Elem()\n}\n\ntype MergePermissionsResponse struct {\n}\n\ntype MethodAction struct {\n\tAction\n\n\tName     string                 `xml:\"name\"`\n\tArgument []MethodActionArgument `xml:\"argument,omitempty\"`\n}\n\nfunc init() {\n\tt[\"MethodAction\"] = reflect.TypeOf((*MethodAction)(nil)).Elem()\n}\n\ntype MethodActionArgument struct {\n\tDynamicData\n\n\tValue AnyType `xml:\"value,typeattr\"`\n}\n\nfunc init() {\n\tt[\"MethodActionArgument\"] = reflect.TypeOf((*MethodActionArgument)(nil)).Elem()\n}\n\ntype MethodAlreadyDisabledFault struct {\n\tRuntimeFault\n\n\tSourceId string `xml:\"sourceId\"`\n}\n\nfunc init() {\n\tt[\"MethodAlreadyDisabledFault\"] = reflect.TypeOf((*MethodAlreadyDisabledFault)(nil)).Elem()\n}\n\ntype MethodAlreadyDisabledFaultFault MethodAlreadyDisabledFault\n\nfunc init() {\n\tt[\"MethodAlreadyDisabledFaultFault\"] = reflect.TypeOf((*MethodAlreadyDisabledFaultFault)(nil)).Elem()\n}\n\ntype MethodDescription struct {\n\tDescription\n\n\tKey string `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"MethodDescription\"] = reflect.TypeOf((*MethodDescription)(nil)).Elem()\n}\n\ntype MethodDisabled struct {\n\tRuntimeFault\n\n\tSource string `xml:\"source,omitempty\"`\n}\n\nfunc init() {\n\tt[\"MethodDisabled\"] = reflect.TypeOf((*MethodDisabled)(nil)).Elem()\n}\n\ntype MethodDisabledFault MethodDisabled\n\nfunc init() {\n\tt[\"MethodDisabledFault\"] = reflect.TypeOf((*MethodDisabledFault)(nil)).Elem()\n}\n\ntype MethodFault struct {\n\tFaultCause   *LocalizedMethodFault `xml:\"faultCause,omitempty\"`\n\tFaultMessage []LocalizableMessage  `xml:\"faultMessage,omitempty\"`\n}\n\nfunc init() {\n\tt[\"MethodFault\"] = reflect.TypeOf((*MethodFault)(nil)).Elem()\n}\n\ntype MethodFaultFault BaseMethodFault\n\nfunc init() {\n\tt[\"MethodFaultFault\"] = reflect.TypeOf((*MethodFaultFault)(nil)).Elem()\n}\n\ntype MethodNotFound struct {\n\tInvalidRequest\n\n\tReceiver ManagedObjectReference `xml:\"receiver\"`\n\tMethod   string                 `xml:\"method\"`\n}\n\nfunc init() {\n\tt[\"MethodNotFound\"] = reflect.TypeOf((*MethodNotFound)(nil)).Elem()\n}\n\ntype MethodNotFoundFault MethodNotFound\n\nfunc init() {\n\tt[\"MethodNotFoundFault\"] = reflect.TypeOf((*MethodNotFoundFault)(nil)).Elem()\n}\n\ntype MetricAlarmExpression struct {\n\tAlarmExpression\n\n\tOperator       MetricAlarmOperator `xml:\"operator\"`\n\tType           string              `xml:\"type\"`\n\tMetric         PerfMetricId        `xml:\"metric\"`\n\tYellow         int32               `xml:\"yellow,omitempty\"`\n\tYellowInterval int32               `xml:\"yellowInterval,omitempty\"`\n\tRed            int32               `xml:\"red,omitempty\"`\n\tRedInterval    int32               `xml:\"redInterval,omitempty\"`\n}\n\nfunc init() {\n\tt[\"MetricAlarmExpression\"] = reflect.TypeOf((*MetricAlarmExpression)(nil)).Elem()\n}\n\ntype MigrateVMRequestType struct {\n\tThis     ManagedObjectReference     `xml:\"_this\"`\n\tPool     *ManagedObjectReference    `xml:\"pool,omitempty\"`\n\tHost     *ManagedObjectReference    `xml:\"host,omitempty\"`\n\tPriority VirtualMachineMovePriority `xml:\"priority\"`\n\tState    VirtualMachinePowerState   `xml:\"state,omitempty\"`\n}\n\nfunc init() {\n\tt[\"MigrateVMRequestType\"] = reflect.TypeOf((*MigrateVMRequestType)(nil)).Elem()\n}\n\ntype MigrateVM_Task MigrateVMRequestType\n\nfunc init() {\n\tt[\"MigrateVM_Task\"] = reflect.TypeOf((*MigrateVM_Task)(nil)).Elem()\n}\n\ntype MigrateVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype MigrationDisabled struct {\n\tMigrationFault\n}\n\nfunc init() {\n\tt[\"MigrationDisabled\"] = reflect.TypeOf((*MigrationDisabled)(nil)).Elem()\n}\n\ntype MigrationDisabledFault MigrationDisabled\n\nfunc init() {\n\tt[\"MigrationDisabledFault\"] = reflect.TypeOf((*MigrationDisabledFault)(nil)).Elem()\n}\n\ntype MigrationErrorEvent struct {\n\tMigrationEvent\n}\n\nfunc init() {\n\tt[\"MigrationErrorEvent\"] = reflect.TypeOf((*MigrationErrorEvent)(nil)).Elem()\n}\n\ntype MigrationEvent struct {\n\tVmEvent\n\n\tFault LocalizedMethodFault `xml:\"fault\"`\n}\n\nfunc init() {\n\tt[\"MigrationEvent\"] = reflect.TypeOf((*MigrationEvent)(nil)).Elem()\n}\n\ntype MigrationFault struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"MigrationFault\"] = reflect.TypeOf((*MigrationFault)(nil)).Elem()\n}\n\ntype MigrationFaultFault BaseMigrationFault\n\nfunc init() {\n\tt[\"MigrationFaultFault\"] = reflect.TypeOf((*MigrationFaultFault)(nil)).Elem()\n}\n\ntype MigrationFeatureNotSupported struct {\n\tMigrationFault\n\n\tAtSourceHost   bool                   `xml:\"atSourceHost\"`\n\tFailedHostName string                 `xml:\"failedHostName\"`\n\tFailedHost     ManagedObjectReference `xml:\"failedHost\"`\n}\n\nfunc init() {\n\tt[\"MigrationFeatureNotSupported\"] = reflect.TypeOf((*MigrationFeatureNotSupported)(nil)).Elem()\n}\n\ntype MigrationFeatureNotSupportedFault BaseMigrationFeatureNotSupported\n\nfunc init() {\n\tt[\"MigrationFeatureNotSupportedFault\"] = reflect.TypeOf((*MigrationFeatureNotSupportedFault)(nil)).Elem()\n}\n\ntype MigrationHostErrorEvent struct {\n\tMigrationEvent\n\n\tDstHost HostEventArgument `xml:\"dstHost\"`\n}\n\nfunc init() {\n\tt[\"MigrationHostErrorEvent\"] = reflect.TypeOf((*MigrationHostErrorEvent)(nil)).Elem()\n}\n\ntype MigrationHostWarningEvent struct {\n\tMigrationEvent\n\n\tDstHost HostEventArgument `xml:\"dstHost\"`\n}\n\nfunc init() {\n\tt[\"MigrationHostWarningEvent\"] = reflect.TypeOf((*MigrationHostWarningEvent)(nil)).Elem()\n}\n\ntype MigrationNotReady struct {\n\tMigrationFault\n\n\tReason string `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"MigrationNotReady\"] = reflect.TypeOf((*MigrationNotReady)(nil)).Elem()\n}\n\ntype MigrationNotReadyFault MigrationNotReady\n\nfunc init() {\n\tt[\"MigrationNotReadyFault\"] = reflect.TypeOf((*MigrationNotReadyFault)(nil)).Elem()\n}\n\ntype MigrationResourceErrorEvent struct {\n\tMigrationEvent\n\n\tDstPool ResourcePoolEventArgument `xml:\"dstPool\"`\n\tDstHost HostEventArgument         `xml:\"dstHost\"`\n}\n\nfunc init() {\n\tt[\"MigrationResourceErrorEvent\"] = reflect.TypeOf((*MigrationResourceErrorEvent)(nil)).Elem()\n}\n\ntype MigrationResourceWarningEvent struct {\n\tMigrationEvent\n\n\tDstPool ResourcePoolEventArgument `xml:\"dstPool\"`\n\tDstHost HostEventArgument         `xml:\"dstHost\"`\n}\n\nfunc init() {\n\tt[\"MigrationResourceWarningEvent\"] = reflect.TypeOf((*MigrationResourceWarningEvent)(nil)).Elem()\n}\n\ntype MigrationWarningEvent struct {\n\tMigrationEvent\n}\n\nfunc init() {\n\tt[\"MigrationWarningEvent\"] = reflect.TypeOf((*MigrationWarningEvent)(nil)).Elem()\n}\n\ntype MismatchedBundle struct {\n\tVimFault\n\n\tBundleUuid        string `xml:\"bundleUuid\"`\n\tHostUuid          string `xml:\"hostUuid\"`\n\tBundleBuildNumber int32  `xml:\"bundleBuildNumber\"`\n\tHostBuildNumber   int32  `xml:\"hostBuildNumber\"`\n}\n\nfunc init() {\n\tt[\"MismatchedBundle\"] = reflect.TypeOf((*MismatchedBundle)(nil)).Elem()\n}\n\ntype MismatchedBundleFault MismatchedBundle\n\nfunc init() {\n\tt[\"MismatchedBundleFault\"] = reflect.TypeOf((*MismatchedBundleFault)(nil)).Elem()\n}\n\ntype MismatchedNetworkPolicies struct {\n\tMigrationFault\n\n\tDevice    string `xml:\"device\"`\n\tBacking   string `xml:\"backing\"`\n\tConnected bool   `xml:\"connected\"`\n}\n\nfunc init() {\n\tt[\"MismatchedNetworkPolicies\"] = reflect.TypeOf((*MismatchedNetworkPolicies)(nil)).Elem()\n}\n\ntype MismatchedNetworkPoliciesFault MismatchedNetworkPolicies\n\nfunc init() {\n\tt[\"MismatchedNetworkPoliciesFault\"] = reflect.TypeOf((*MismatchedNetworkPoliciesFault)(nil)).Elem()\n}\n\ntype MismatchedVMotionNetworkNames struct {\n\tMigrationFault\n\n\tSourceNetwork string `xml:\"sourceNetwork\"`\n\tDestNetwork   string `xml:\"destNetwork\"`\n}\n\nfunc init() {\n\tt[\"MismatchedVMotionNetworkNames\"] = reflect.TypeOf((*MismatchedVMotionNetworkNames)(nil)).Elem()\n}\n\ntype MismatchedVMotionNetworkNamesFault MismatchedVMotionNetworkNames\n\nfunc init() {\n\tt[\"MismatchedVMotionNetworkNamesFault\"] = reflect.TypeOf((*MismatchedVMotionNetworkNamesFault)(nil)).Elem()\n}\n\ntype MissingBmcSupport struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"MissingBmcSupport\"] = reflect.TypeOf((*MissingBmcSupport)(nil)).Elem()\n}\n\ntype MissingBmcSupportFault MissingBmcSupport\n\nfunc init() {\n\tt[\"MissingBmcSupportFault\"] = reflect.TypeOf((*MissingBmcSupportFault)(nil)).Elem()\n}\n\ntype MissingController struct {\n\tInvalidDeviceSpec\n}\n\nfunc init() {\n\tt[\"MissingController\"] = reflect.TypeOf((*MissingController)(nil)).Elem()\n}\n\ntype MissingControllerFault MissingController\n\nfunc init() {\n\tt[\"MissingControllerFault\"] = reflect.TypeOf((*MissingControllerFault)(nil)).Elem()\n}\n\ntype MissingIpPool struct {\n\tVAppPropertyFault\n}\n\nfunc init() {\n\tt[\"MissingIpPool\"] = reflect.TypeOf((*MissingIpPool)(nil)).Elem()\n}\n\ntype MissingIpPoolFault MissingIpPool\n\nfunc init() {\n\tt[\"MissingIpPoolFault\"] = reflect.TypeOf((*MissingIpPoolFault)(nil)).Elem()\n}\n\ntype MissingLinuxCustResources struct {\n\tCustomizationFault\n}\n\nfunc init() {\n\tt[\"MissingLinuxCustResources\"] = reflect.TypeOf((*MissingLinuxCustResources)(nil)).Elem()\n}\n\ntype MissingLinuxCustResourcesFault MissingLinuxCustResources\n\nfunc init() {\n\tt[\"MissingLinuxCustResourcesFault\"] = reflect.TypeOf((*MissingLinuxCustResourcesFault)(nil)).Elem()\n}\n\ntype MissingNetworkIpConfig struct {\n\tVAppPropertyFault\n}\n\nfunc init() {\n\tt[\"MissingNetworkIpConfig\"] = reflect.TypeOf((*MissingNetworkIpConfig)(nil)).Elem()\n}\n\ntype MissingNetworkIpConfigFault MissingNetworkIpConfig\n\nfunc init() {\n\tt[\"MissingNetworkIpConfigFault\"] = reflect.TypeOf((*MissingNetworkIpConfigFault)(nil)).Elem()\n}\n\ntype MissingObject struct {\n\tDynamicData\n\n\tObj   ManagedObjectReference `xml:\"obj\"`\n\tFault LocalizedMethodFault   `xml:\"fault\"`\n}\n\nfunc init() {\n\tt[\"MissingObject\"] = reflect.TypeOf((*MissingObject)(nil)).Elem()\n}\n\ntype MissingPowerOffConfiguration struct {\n\tVAppConfigFault\n}\n\nfunc init() {\n\tt[\"MissingPowerOffConfiguration\"] = reflect.TypeOf((*MissingPowerOffConfiguration)(nil)).Elem()\n}\n\ntype MissingPowerOffConfigurationFault MissingPowerOffConfiguration\n\nfunc init() {\n\tt[\"MissingPowerOffConfigurationFault\"] = reflect.TypeOf((*MissingPowerOffConfigurationFault)(nil)).Elem()\n}\n\ntype MissingPowerOnConfiguration struct {\n\tVAppConfigFault\n}\n\nfunc init() {\n\tt[\"MissingPowerOnConfiguration\"] = reflect.TypeOf((*MissingPowerOnConfiguration)(nil)).Elem()\n}\n\ntype MissingPowerOnConfigurationFault MissingPowerOnConfiguration\n\nfunc init() {\n\tt[\"MissingPowerOnConfigurationFault\"] = reflect.TypeOf((*MissingPowerOnConfigurationFault)(nil)).Elem()\n}\n\ntype MissingProperty struct {\n\tDynamicData\n\n\tPath  string               `xml:\"path\"`\n\tFault LocalizedMethodFault `xml:\"fault\"`\n}\n\nfunc init() {\n\tt[\"MissingProperty\"] = reflect.TypeOf((*MissingProperty)(nil)).Elem()\n}\n\ntype MissingWindowsCustResources struct {\n\tCustomizationFault\n}\n\nfunc init() {\n\tt[\"MissingWindowsCustResources\"] = reflect.TypeOf((*MissingWindowsCustResources)(nil)).Elem()\n}\n\ntype MissingWindowsCustResourcesFault MissingWindowsCustResources\n\nfunc init() {\n\tt[\"MissingWindowsCustResourcesFault\"] = reflect.TypeOf((*MissingWindowsCustResourcesFault)(nil)).Elem()\n}\n\ntype MksConnectionLimitReached struct {\n\tInvalidState\n\n\tConnectionLimit int32 `xml:\"connectionLimit\"`\n}\n\nfunc init() {\n\tt[\"MksConnectionLimitReached\"] = reflect.TypeOf((*MksConnectionLimitReached)(nil)).Elem()\n}\n\ntype MksConnectionLimitReachedFault MksConnectionLimitReached\n\nfunc init() {\n\tt[\"MksConnectionLimitReachedFault\"] = reflect.TypeOf((*MksConnectionLimitReachedFault)(nil)).Elem()\n}\n\ntype ModeInfo struct {\n\tDynamicData\n\n\tBrowse string `xml:\"browse,omitempty\"`\n\tRead   string `xml:\"read\"`\n\tModify string `xml:\"modify\"`\n\tUse    string `xml:\"use\"`\n\tAdmin  string `xml:\"admin,omitempty\"`\n\tFull   string `xml:\"full\"`\n}\n\nfunc init() {\n\tt[\"ModeInfo\"] = reflect.TypeOf((*ModeInfo)(nil)).Elem()\n}\n\ntype ModifyListView ModifyListViewRequestType\n\nfunc init() {\n\tt[\"ModifyListView\"] = reflect.TypeOf((*ModifyListView)(nil)).Elem()\n}\n\ntype ModifyListViewRequestType struct {\n\tThis   ManagedObjectReference   `xml:\"_this\"`\n\tAdd    []ManagedObjectReference `xml:\"add,omitempty\"`\n\tRemove []ManagedObjectReference `xml:\"remove,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ModifyListViewRequestType\"] = reflect.TypeOf((*ModifyListViewRequestType)(nil)).Elem()\n}\n\ntype ModifyListViewResponse struct {\n\tReturnval []ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype MonthlyByDayTaskScheduler struct {\n\tMonthlyTaskScheduler\n\n\tDay int32 `xml:\"day\"`\n}\n\nfunc init() {\n\tt[\"MonthlyByDayTaskScheduler\"] = reflect.TypeOf((*MonthlyByDayTaskScheduler)(nil)).Elem()\n}\n\ntype MonthlyByWeekdayTaskScheduler struct {\n\tMonthlyTaskScheduler\n\n\tOffset  WeekOfMonth `xml:\"offset\"`\n\tWeekday DayOfWeek   `xml:\"weekday\"`\n}\n\nfunc init() {\n\tt[\"MonthlyByWeekdayTaskScheduler\"] = reflect.TypeOf((*MonthlyByWeekdayTaskScheduler)(nil)).Elem()\n}\n\ntype MonthlyTaskScheduler struct {\n\tDailyTaskScheduler\n}\n\nfunc init() {\n\tt[\"MonthlyTaskScheduler\"] = reflect.TypeOf((*MonthlyTaskScheduler)(nil)).Elem()\n}\n\ntype MountError struct {\n\tCustomizationFault\n\n\tVm        ManagedObjectReference `xml:\"vm\"`\n\tDiskIndex int32                  `xml:\"diskIndex\"`\n}\n\nfunc init() {\n\tt[\"MountError\"] = reflect.TypeOf((*MountError)(nil)).Elem()\n}\n\ntype MountErrorFault MountError\n\nfunc init() {\n\tt[\"MountErrorFault\"] = reflect.TypeOf((*MountErrorFault)(nil)).Elem()\n}\n\ntype MountToolsInstaller MountToolsInstallerRequestType\n\nfunc init() {\n\tt[\"MountToolsInstaller\"] = reflect.TypeOf((*MountToolsInstaller)(nil)).Elem()\n}\n\ntype MountToolsInstallerRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"MountToolsInstallerRequestType\"] = reflect.TypeOf((*MountToolsInstallerRequestType)(nil)).Elem()\n}\n\ntype MountToolsInstallerResponse struct {\n}\n\ntype MountVffsVolume MountVffsVolumeRequestType\n\nfunc init() {\n\tt[\"MountVffsVolume\"] = reflect.TypeOf((*MountVffsVolume)(nil)).Elem()\n}\n\ntype MountVffsVolumeRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tVffsUuid string                 `xml:\"vffsUuid\"`\n}\n\nfunc init() {\n\tt[\"MountVffsVolumeRequestType\"] = reflect.TypeOf((*MountVffsVolumeRequestType)(nil)).Elem()\n}\n\ntype MountVffsVolumeResponse struct {\n}\n\ntype MountVmfsVolume MountVmfsVolumeRequestType\n\nfunc init() {\n\tt[\"MountVmfsVolume\"] = reflect.TypeOf((*MountVmfsVolume)(nil)).Elem()\n}\n\ntype MountVmfsVolumeExRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tVmfsUuid []string               `xml:\"vmfsUuid\"`\n}\n\nfunc init() {\n\tt[\"MountVmfsVolumeExRequestType\"] = reflect.TypeOf((*MountVmfsVolumeExRequestType)(nil)).Elem()\n}\n\ntype MountVmfsVolumeEx_Task MountVmfsVolumeExRequestType\n\nfunc init() {\n\tt[\"MountVmfsVolumeEx_Task\"] = reflect.TypeOf((*MountVmfsVolumeEx_Task)(nil)).Elem()\n}\n\ntype MountVmfsVolumeEx_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype MountVmfsVolumeRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tVmfsUuid string                 `xml:\"vmfsUuid\"`\n}\n\nfunc init() {\n\tt[\"MountVmfsVolumeRequestType\"] = reflect.TypeOf((*MountVmfsVolumeRequestType)(nil)).Elem()\n}\n\ntype MountVmfsVolumeResponse struct {\n}\n\ntype MoveDVPortRequestType struct {\n\tThis                    ManagedObjectReference `xml:\"_this\"`\n\tPortKey                 []string               `xml:\"portKey\"`\n\tDestinationPortgroupKey string                 `xml:\"destinationPortgroupKey,omitempty\"`\n}\n\nfunc init() {\n\tt[\"MoveDVPortRequestType\"] = reflect.TypeOf((*MoveDVPortRequestType)(nil)).Elem()\n}\n\ntype MoveDVPort_Task MoveDVPortRequestType\n\nfunc init() {\n\tt[\"MoveDVPort_Task\"] = reflect.TypeOf((*MoveDVPort_Task)(nil)).Elem()\n}\n\ntype MoveDVPort_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype MoveDatastoreFileRequestType struct {\n\tThis                  ManagedObjectReference  `xml:\"_this\"`\n\tSourceName            string                  `xml:\"sourceName\"`\n\tSourceDatacenter      *ManagedObjectReference `xml:\"sourceDatacenter,omitempty\"`\n\tDestinationName       string                  `xml:\"destinationName\"`\n\tDestinationDatacenter *ManagedObjectReference `xml:\"destinationDatacenter,omitempty\"`\n\tForce                 *bool                   `xml:\"force\"`\n}\n\nfunc init() {\n\tt[\"MoveDatastoreFileRequestType\"] = reflect.TypeOf((*MoveDatastoreFileRequestType)(nil)).Elem()\n}\n\ntype MoveDatastoreFile_Task MoveDatastoreFileRequestType\n\nfunc init() {\n\tt[\"MoveDatastoreFile_Task\"] = reflect.TypeOf((*MoveDatastoreFile_Task)(nil)).Elem()\n}\n\ntype MoveDatastoreFile_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype MoveDirectoryInGuest MoveDirectoryInGuestRequestType\n\nfunc init() {\n\tt[\"MoveDirectoryInGuest\"] = reflect.TypeOf((*MoveDirectoryInGuest)(nil)).Elem()\n}\n\ntype MoveDirectoryInGuestRequestType struct {\n\tThis             ManagedObjectReference  `xml:\"_this\"`\n\tVm               ManagedObjectReference  `xml:\"vm\"`\n\tAuth             BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tSrcDirectoryPath string                  `xml:\"srcDirectoryPath\"`\n\tDstDirectoryPath string                  `xml:\"dstDirectoryPath\"`\n}\n\nfunc init() {\n\tt[\"MoveDirectoryInGuestRequestType\"] = reflect.TypeOf((*MoveDirectoryInGuestRequestType)(nil)).Elem()\n}\n\ntype MoveDirectoryInGuestResponse struct {\n}\n\ntype MoveFileInGuest MoveFileInGuestRequestType\n\nfunc init() {\n\tt[\"MoveFileInGuest\"] = reflect.TypeOf((*MoveFileInGuest)(nil)).Elem()\n}\n\ntype MoveFileInGuestRequestType struct {\n\tThis        ManagedObjectReference  `xml:\"_this\"`\n\tVm          ManagedObjectReference  `xml:\"vm\"`\n\tAuth        BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tSrcFilePath string                  `xml:\"srcFilePath\"`\n\tDstFilePath string                  `xml:\"dstFilePath\"`\n\tOverwrite   bool                    `xml:\"overwrite\"`\n}\n\nfunc init() {\n\tt[\"MoveFileInGuestRequestType\"] = reflect.TypeOf((*MoveFileInGuestRequestType)(nil)).Elem()\n}\n\ntype MoveFileInGuestResponse struct {\n}\n\ntype MoveHostIntoRequestType struct {\n\tThis         ManagedObjectReference  `xml:\"_this\"`\n\tHost         ManagedObjectReference  `xml:\"host\"`\n\tResourcePool *ManagedObjectReference `xml:\"resourcePool,omitempty\"`\n}\n\nfunc init() {\n\tt[\"MoveHostIntoRequestType\"] = reflect.TypeOf((*MoveHostIntoRequestType)(nil)).Elem()\n}\n\ntype MoveHostInto_Task MoveHostIntoRequestType\n\nfunc init() {\n\tt[\"MoveHostInto_Task\"] = reflect.TypeOf((*MoveHostInto_Task)(nil)).Elem()\n}\n\ntype MoveHostInto_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype MoveIntoFolderRequestType struct {\n\tThis ManagedObjectReference   `xml:\"_this\"`\n\tList []ManagedObjectReference `xml:\"list\"`\n}\n\nfunc init() {\n\tt[\"MoveIntoFolderRequestType\"] = reflect.TypeOf((*MoveIntoFolderRequestType)(nil)).Elem()\n}\n\ntype MoveIntoFolder_Task MoveIntoFolderRequestType\n\nfunc init() {\n\tt[\"MoveIntoFolder_Task\"] = reflect.TypeOf((*MoveIntoFolder_Task)(nil)).Elem()\n}\n\ntype MoveIntoFolder_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype MoveIntoRequestType struct {\n\tThis ManagedObjectReference   `xml:\"_this\"`\n\tHost []ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"MoveIntoRequestType\"] = reflect.TypeOf((*MoveIntoRequestType)(nil)).Elem()\n}\n\ntype MoveIntoResourcePool MoveIntoResourcePoolRequestType\n\nfunc init() {\n\tt[\"MoveIntoResourcePool\"] = reflect.TypeOf((*MoveIntoResourcePool)(nil)).Elem()\n}\n\ntype MoveIntoResourcePoolRequestType struct {\n\tThis ManagedObjectReference   `xml:\"_this\"`\n\tList []ManagedObjectReference `xml:\"list\"`\n}\n\nfunc init() {\n\tt[\"MoveIntoResourcePoolRequestType\"] = reflect.TypeOf((*MoveIntoResourcePoolRequestType)(nil)).Elem()\n}\n\ntype MoveIntoResourcePoolResponse struct {\n}\n\ntype MoveInto_Task MoveIntoRequestType\n\nfunc init() {\n\tt[\"MoveInto_Task\"] = reflect.TypeOf((*MoveInto_Task)(nil)).Elem()\n}\n\ntype MoveInto_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype MoveVirtualDiskRequestType struct {\n\tThis             ManagedObjectReference          `xml:\"_this\"`\n\tSourceName       string                          `xml:\"sourceName\"`\n\tSourceDatacenter *ManagedObjectReference         `xml:\"sourceDatacenter,omitempty\"`\n\tDestName         string                          `xml:\"destName\"`\n\tDestDatacenter   *ManagedObjectReference         `xml:\"destDatacenter,omitempty\"`\n\tForce            *bool                           `xml:\"force\"`\n\tProfile          []BaseVirtualMachineProfileSpec `xml:\"profile,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"MoveVirtualDiskRequestType\"] = reflect.TypeOf((*MoveVirtualDiskRequestType)(nil)).Elem()\n}\n\ntype MoveVirtualDisk_Task MoveVirtualDiskRequestType\n\nfunc init() {\n\tt[\"MoveVirtualDisk_Task\"] = reflect.TypeOf((*MoveVirtualDisk_Task)(nil)).Elem()\n}\n\ntype MoveVirtualDisk_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype MtuMatchEvent struct {\n\tDvsHealthStatusChangeEvent\n}\n\nfunc init() {\n\tt[\"MtuMatchEvent\"] = reflect.TypeOf((*MtuMatchEvent)(nil)).Elem()\n}\n\ntype MtuMismatchEvent struct {\n\tDvsHealthStatusChangeEvent\n}\n\nfunc init() {\n\tt[\"MtuMismatchEvent\"] = reflect.TypeOf((*MtuMismatchEvent)(nil)).Elem()\n}\n\ntype MultiWriterNotSupported struct {\n\tDeviceNotSupported\n}\n\nfunc init() {\n\tt[\"MultiWriterNotSupported\"] = reflect.TypeOf((*MultiWriterNotSupported)(nil)).Elem()\n}\n\ntype MultiWriterNotSupportedFault MultiWriterNotSupported\n\nfunc init() {\n\tt[\"MultiWriterNotSupportedFault\"] = reflect.TypeOf((*MultiWriterNotSupportedFault)(nil)).Elem()\n}\n\ntype MultipleCertificatesVerifyFault struct {\n\tHostConnectFault\n\n\tThumbprintData []MultipleCertificatesVerifyFaultThumbprintData `xml:\"thumbprintData\"`\n}\n\nfunc init() {\n\tt[\"MultipleCertificatesVerifyFault\"] = reflect.TypeOf((*MultipleCertificatesVerifyFault)(nil)).Elem()\n}\n\ntype MultipleCertificatesVerifyFaultFault MultipleCertificatesVerifyFault\n\nfunc init() {\n\tt[\"MultipleCertificatesVerifyFaultFault\"] = reflect.TypeOf((*MultipleCertificatesVerifyFaultFault)(nil)).Elem()\n}\n\ntype MultipleCertificatesVerifyFaultThumbprintData struct {\n\tDynamicData\n\n\tPort       int32  `xml:\"port\"`\n\tThumbprint string `xml:\"thumbprint\"`\n}\n\nfunc init() {\n\tt[\"MultipleCertificatesVerifyFaultThumbprintData\"] = reflect.TypeOf((*MultipleCertificatesVerifyFaultThumbprintData)(nil)).Elem()\n}\n\ntype MultipleSnapshotsNotSupported struct {\n\tSnapshotFault\n}\n\nfunc init() {\n\tt[\"MultipleSnapshotsNotSupported\"] = reflect.TypeOf((*MultipleSnapshotsNotSupported)(nil)).Elem()\n}\n\ntype MultipleSnapshotsNotSupportedFault MultipleSnapshotsNotSupported\n\nfunc init() {\n\tt[\"MultipleSnapshotsNotSupportedFault\"] = reflect.TypeOf((*MultipleSnapshotsNotSupportedFault)(nil)).Elem()\n}\n\ntype NASDatastoreCreatedEvent struct {\n\tHostEvent\n\n\tDatastore    DatastoreEventArgument `xml:\"datastore\"`\n\tDatastoreUrl string                 `xml:\"datastoreUrl,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NASDatastoreCreatedEvent\"] = reflect.TypeOf((*NASDatastoreCreatedEvent)(nil)).Elem()\n}\n\ntype NamePasswordAuthentication struct {\n\tGuestAuthentication\n\n\tUsername string `xml:\"username\"`\n\tPassword string `xml:\"password\"`\n}\n\nfunc init() {\n\tt[\"NamePasswordAuthentication\"] = reflect.TypeOf((*NamePasswordAuthentication)(nil)).Elem()\n}\n\ntype NamespaceFull struct {\n\tVimFault\n\n\tName           string `xml:\"name\"`\n\tCurrentMaxSize int64  `xml:\"currentMaxSize\"`\n\tRequiredSize   int64  `xml:\"requiredSize,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NamespaceFull\"] = reflect.TypeOf((*NamespaceFull)(nil)).Elem()\n}\n\ntype NamespaceFullFault NamespaceFull\n\nfunc init() {\n\tt[\"NamespaceFullFault\"] = reflect.TypeOf((*NamespaceFullFault)(nil)).Elem()\n}\n\ntype NamespaceLimitReached struct {\n\tVimFault\n\n\tLimit int32 `xml:\"limit,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NamespaceLimitReached\"] = reflect.TypeOf((*NamespaceLimitReached)(nil)).Elem()\n}\n\ntype NamespaceLimitReachedFault NamespaceLimitReached\n\nfunc init() {\n\tt[\"NamespaceLimitReachedFault\"] = reflect.TypeOf((*NamespaceLimitReachedFault)(nil)).Elem()\n}\n\ntype NamespaceWriteProtected struct {\n\tVimFault\n\n\tName string `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"NamespaceWriteProtected\"] = reflect.TypeOf((*NamespaceWriteProtected)(nil)).Elem()\n}\n\ntype NamespaceWriteProtectedFault NamespaceWriteProtected\n\nfunc init() {\n\tt[\"NamespaceWriteProtectedFault\"] = reflect.TypeOf((*NamespaceWriteProtectedFault)(nil)).Elem()\n}\n\ntype NasConfigFault struct {\n\tHostConfigFault\n\n\tName string `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"NasConfigFault\"] = reflect.TypeOf((*NasConfigFault)(nil)).Elem()\n}\n\ntype NasConfigFaultFault BaseNasConfigFault\n\nfunc init() {\n\tt[\"NasConfigFaultFault\"] = reflect.TypeOf((*NasConfigFaultFault)(nil)).Elem()\n}\n\ntype NasConnectionLimitReached struct {\n\tNasConfigFault\n\n\tRemoteHost string `xml:\"remoteHost\"`\n\tRemotePath string `xml:\"remotePath\"`\n}\n\nfunc init() {\n\tt[\"NasConnectionLimitReached\"] = reflect.TypeOf((*NasConnectionLimitReached)(nil)).Elem()\n}\n\ntype NasConnectionLimitReachedFault NasConnectionLimitReached\n\nfunc init() {\n\tt[\"NasConnectionLimitReachedFault\"] = reflect.TypeOf((*NasConnectionLimitReachedFault)(nil)).Elem()\n}\n\ntype NasDatastoreInfo struct {\n\tDatastoreInfo\n\n\tNas *HostNasVolume `xml:\"nas,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NasDatastoreInfo\"] = reflect.TypeOf((*NasDatastoreInfo)(nil)).Elem()\n}\n\ntype NasSessionCredentialConflict struct {\n\tNasConfigFault\n\n\tRemoteHost string `xml:\"remoteHost\"`\n\tRemotePath string `xml:\"remotePath\"`\n\tUserName   string `xml:\"userName\"`\n}\n\nfunc init() {\n\tt[\"NasSessionCredentialConflict\"] = reflect.TypeOf((*NasSessionCredentialConflict)(nil)).Elem()\n}\n\ntype NasSessionCredentialConflictFault NasSessionCredentialConflict\n\nfunc init() {\n\tt[\"NasSessionCredentialConflictFault\"] = reflect.TypeOf((*NasSessionCredentialConflictFault)(nil)).Elem()\n}\n\ntype NasStorageProfile struct {\n\tApplyProfile\n\n\tKey string `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"NasStorageProfile\"] = reflect.TypeOf((*NasStorageProfile)(nil)).Elem()\n}\n\ntype NasVolumeNotMounted struct {\n\tNasConfigFault\n\n\tRemoteHost string `xml:\"remoteHost\"`\n\tRemotePath string `xml:\"remotePath\"`\n}\n\nfunc init() {\n\tt[\"NasVolumeNotMounted\"] = reflect.TypeOf((*NasVolumeNotMounted)(nil)).Elem()\n}\n\ntype NasVolumeNotMountedFault NasVolumeNotMounted\n\nfunc init() {\n\tt[\"NasVolumeNotMountedFault\"] = reflect.TypeOf((*NasVolumeNotMountedFault)(nil)).Elem()\n}\n\ntype NegatableExpression struct {\n\tDynamicData\n\n\tNegate *bool `xml:\"negate\"`\n}\n\nfunc init() {\n\tt[\"NegatableExpression\"] = reflect.TypeOf((*NegatableExpression)(nil)).Elem()\n}\n\ntype NetBIOSConfigInfo struct {\n\tDynamicData\n\n\tMode string `xml:\"mode\"`\n}\n\nfunc init() {\n\tt[\"NetBIOSConfigInfo\"] = reflect.TypeOf((*NetBIOSConfigInfo)(nil)).Elem()\n}\n\ntype NetDhcpConfigInfo struct {\n\tDynamicData\n\n\tIpv6 *NetDhcpConfigInfoDhcpOptions `xml:\"ipv6,omitempty\"`\n\tIpv4 *NetDhcpConfigInfoDhcpOptions `xml:\"ipv4,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NetDhcpConfigInfo\"] = reflect.TypeOf((*NetDhcpConfigInfo)(nil)).Elem()\n}\n\ntype NetDhcpConfigInfoDhcpOptions struct {\n\tDynamicData\n\n\tEnable bool       `xml:\"enable\"`\n\tConfig []KeyValue `xml:\"config,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NetDhcpConfigInfoDhcpOptions\"] = reflect.TypeOf((*NetDhcpConfigInfoDhcpOptions)(nil)).Elem()\n}\n\ntype NetDhcpConfigSpec struct {\n\tDynamicData\n\n\tIpv6 *NetDhcpConfigSpecDhcpOptionsSpec `xml:\"ipv6,omitempty\"`\n\tIpv4 *NetDhcpConfigSpecDhcpOptionsSpec `xml:\"ipv4,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NetDhcpConfigSpec\"] = reflect.TypeOf((*NetDhcpConfigSpec)(nil)).Elem()\n}\n\ntype NetDhcpConfigSpecDhcpOptionsSpec struct {\n\tDynamicData\n\n\tEnable    *bool      `xml:\"enable\"`\n\tConfig    []KeyValue `xml:\"config\"`\n\tOperation string     `xml:\"operation\"`\n}\n\nfunc init() {\n\tt[\"NetDhcpConfigSpecDhcpOptionsSpec\"] = reflect.TypeOf((*NetDhcpConfigSpecDhcpOptionsSpec)(nil)).Elem()\n}\n\ntype NetDnsConfigInfo struct {\n\tDynamicData\n\n\tDhcp         bool     `xml:\"dhcp\"`\n\tHostName     string   `xml:\"hostName\"`\n\tDomainName   string   `xml:\"domainName\"`\n\tIpAddress    []string `xml:\"ipAddress,omitempty\"`\n\tSearchDomain []string `xml:\"searchDomain,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NetDnsConfigInfo\"] = reflect.TypeOf((*NetDnsConfigInfo)(nil)).Elem()\n}\n\ntype NetDnsConfigSpec struct {\n\tDynamicData\n\n\tDhcp         *bool    `xml:\"dhcp\"`\n\tHostName     string   `xml:\"hostName,omitempty\"`\n\tDomainName   string   `xml:\"domainName,omitempty\"`\n\tIpAddress    []string `xml:\"ipAddress,omitempty\"`\n\tSearchDomain []string `xml:\"searchDomain,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NetDnsConfigSpec\"] = reflect.TypeOf((*NetDnsConfigSpec)(nil)).Elem()\n}\n\ntype NetIpConfigInfo struct {\n\tDynamicData\n\n\tIpAddress                []NetIpConfigInfoIpAddress `xml:\"ipAddress,omitempty\"`\n\tDhcp                     *NetDhcpConfigInfo         `xml:\"dhcp,omitempty\"`\n\tAutoConfigurationEnabled *bool                      `xml:\"autoConfigurationEnabled\"`\n}\n\nfunc init() {\n\tt[\"NetIpConfigInfo\"] = reflect.TypeOf((*NetIpConfigInfo)(nil)).Elem()\n}\n\ntype NetIpConfigInfoIpAddress struct {\n\tDynamicData\n\n\tIpAddress    string     `xml:\"ipAddress\"`\n\tPrefixLength int32      `xml:\"prefixLength\"`\n\tOrigin       string     `xml:\"origin,omitempty\"`\n\tState        string     `xml:\"state,omitempty\"`\n\tLifetime     *time.Time `xml:\"lifetime\"`\n}\n\nfunc init() {\n\tt[\"NetIpConfigInfoIpAddress\"] = reflect.TypeOf((*NetIpConfigInfoIpAddress)(nil)).Elem()\n}\n\ntype NetIpConfigSpec struct {\n\tDynamicData\n\n\tIpAddress                []NetIpConfigSpecIpAddressSpec `xml:\"ipAddress,omitempty\"`\n\tDhcp                     *NetDhcpConfigSpec             `xml:\"dhcp,omitempty\"`\n\tAutoConfigurationEnabled *bool                          `xml:\"autoConfigurationEnabled\"`\n}\n\nfunc init() {\n\tt[\"NetIpConfigSpec\"] = reflect.TypeOf((*NetIpConfigSpec)(nil)).Elem()\n}\n\ntype NetIpConfigSpecIpAddressSpec struct {\n\tDynamicData\n\n\tIpAddress    string `xml:\"ipAddress\"`\n\tPrefixLength int32  `xml:\"prefixLength\"`\n\tOperation    string `xml:\"operation\"`\n}\n\nfunc init() {\n\tt[\"NetIpConfigSpecIpAddressSpec\"] = reflect.TypeOf((*NetIpConfigSpecIpAddressSpec)(nil)).Elem()\n}\n\ntype NetIpRouteConfigInfo struct {\n\tDynamicData\n\n\tIpRoute []NetIpRouteConfigInfoIpRoute `xml:\"ipRoute,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NetIpRouteConfigInfo\"] = reflect.TypeOf((*NetIpRouteConfigInfo)(nil)).Elem()\n}\n\ntype NetIpRouteConfigInfoGateway struct {\n\tDynamicData\n\n\tIpAddress string `xml:\"ipAddress,omitempty\"`\n\tDevice    string `xml:\"device,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NetIpRouteConfigInfoGateway\"] = reflect.TypeOf((*NetIpRouteConfigInfoGateway)(nil)).Elem()\n}\n\ntype NetIpRouteConfigInfoIpRoute struct {\n\tDynamicData\n\n\tNetwork      string                      `xml:\"network\"`\n\tPrefixLength int32                       `xml:\"prefixLength\"`\n\tGateway      NetIpRouteConfigInfoGateway `xml:\"gateway\"`\n}\n\nfunc init() {\n\tt[\"NetIpRouteConfigInfoIpRoute\"] = reflect.TypeOf((*NetIpRouteConfigInfoIpRoute)(nil)).Elem()\n}\n\ntype NetIpRouteConfigSpec struct {\n\tDynamicData\n\n\tIpRoute []NetIpRouteConfigSpecIpRouteSpec `xml:\"ipRoute,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NetIpRouteConfigSpec\"] = reflect.TypeOf((*NetIpRouteConfigSpec)(nil)).Elem()\n}\n\ntype NetIpRouteConfigSpecGatewaySpec struct {\n\tDynamicData\n\n\tIpAddress string `xml:\"ipAddress,omitempty\"`\n\tDevice    string `xml:\"device,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NetIpRouteConfigSpecGatewaySpec\"] = reflect.TypeOf((*NetIpRouteConfigSpecGatewaySpec)(nil)).Elem()\n}\n\ntype NetIpRouteConfigSpecIpRouteSpec struct {\n\tDynamicData\n\n\tNetwork      string                          `xml:\"network\"`\n\tPrefixLength int32                           `xml:\"prefixLength\"`\n\tGateway      NetIpRouteConfigSpecGatewaySpec `xml:\"gateway\"`\n\tOperation    string                          `xml:\"operation\"`\n}\n\nfunc init() {\n\tt[\"NetIpRouteConfigSpecIpRouteSpec\"] = reflect.TypeOf((*NetIpRouteConfigSpecIpRouteSpec)(nil)).Elem()\n}\n\ntype NetIpStackInfo struct {\n\tDynamicData\n\n\tNeighbor      []NetIpStackInfoNetToMedia    `xml:\"neighbor,omitempty\"`\n\tDefaultRouter []NetIpStackInfoDefaultRouter `xml:\"defaultRouter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NetIpStackInfo\"] = reflect.TypeOf((*NetIpStackInfo)(nil)).Elem()\n}\n\ntype NetIpStackInfoDefaultRouter struct {\n\tDynamicData\n\n\tIpAddress  string    `xml:\"ipAddress\"`\n\tDevice     string    `xml:\"device\"`\n\tLifetime   time.Time `xml:\"lifetime\"`\n\tPreference string    `xml:\"preference\"`\n}\n\nfunc init() {\n\tt[\"NetIpStackInfoDefaultRouter\"] = reflect.TypeOf((*NetIpStackInfoDefaultRouter)(nil)).Elem()\n}\n\ntype NetIpStackInfoNetToMedia struct {\n\tDynamicData\n\n\tIpAddress       string `xml:\"ipAddress\"`\n\tPhysicalAddress string `xml:\"physicalAddress\"`\n\tDevice          string `xml:\"device\"`\n\tType            string `xml:\"type\"`\n}\n\nfunc init() {\n\tt[\"NetIpStackInfoNetToMedia\"] = reflect.TypeOf((*NetIpStackInfoNetToMedia)(nil)).Elem()\n}\n\ntype NetStackInstanceProfile struct {\n\tApplyProfile\n\n\tKey           string                         `xml:\"key\"`\n\tDnsConfig     NetworkProfileDnsConfigProfile `xml:\"dnsConfig\"`\n\tIpRouteConfig IpRouteProfile                 `xml:\"ipRouteConfig\"`\n}\n\nfunc init() {\n\tt[\"NetStackInstanceProfile\"] = reflect.TypeOf((*NetStackInstanceProfile)(nil)).Elem()\n}\n\ntype NetworkCopyFault struct {\n\tFileFault\n}\n\nfunc init() {\n\tt[\"NetworkCopyFault\"] = reflect.TypeOf((*NetworkCopyFault)(nil)).Elem()\n}\n\ntype NetworkCopyFaultFault NetworkCopyFault\n\nfunc init() {\n\tt[\"NetworkCopyFaultFault\"] = reflect.TypeOf((*NetworkCopyFaultFault)(nil)).Elem()\n}\n\ntype NetworkDisruptedAndConfigRolledBack struct {\n\tVimFault\n\n\tHost string `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"NetworkDisruptedAndConfigRolledBack\"] = reflect.TypeOf((*NetworkDisruptedAndConfigRolledBack)(nil)).Elem()\n}\n\ntype NetworkDisruptedAndConfigRolledBackFault NetworkDisruptedAndConfigRolledBack\n\nfunc init() {\n\tt[\"NetworkDisruptedAndConfigRolledBackFault\"] = reflect.TypeOf((*NetworkDisruptedAndConfigRolledBackFault)(nil)).Elem()\n}\n\ntype NetworkEventArgument struct {\n\tEntityEventArgument\n\n\tNetwork ManagedObjectReference `xml:\"network\"`\n}\n\nfunc init() {\n\tt[\"NetworkEventArgument\"] = reflect.TypeOf((*NetworkEventArgument)(nil)).Elem()\n}\n\ntype NetworkInaccessible struct {\n\tNasConfigFault\n}\n\nfunc init() {\n\tt[\"NetworkInaccessible\"] = reflect.TypeOf((*NetworkInaccessible)(nil)).Elem()\n}\n\ntype NetworkInaccessibleFault NetworkInaccessible\n\nfunc init() {\n\tt[\"NetworkInaccessibleFault\"] = reflect.TypeOf((*NetworkInaccessibleFault)(nil)).Elem()\n}\n\ntype NetworkPolicyProfile struct {\n\tApplyProfile\n}\n\nfunc init() {\n\tt[\"NetworkPolicyProfile\"] = reflect.TypeOf((*NetworkPolicyProfile)(nil)).Elem()\n}\n\ntype NetworkProfile struct {\n\tApplyProfile\n\n\tVswitch                 []VirtualSwitchProfile           `xml:\"vswitch,omitempty\"`\n\tVmPortGroup             []VmPortGroupProfile             `xml:\"vmPortGroup,omitempty\"`\n\tHostPortGroup           []HostPortGroupProfile           `xml:\"hostPortGroup,omitempty\"`\n\tServiceConsolePortGroup []ServiceConsolePortGroupProfile `xml:\"serviceConsolePortGroup,omitempty\"`\n\tDnsConfig               *NetworkProfileDnsConfigProfile  `xml:\"dnsConfig,omitempty\"`\n\tIpRouteConfig           *IpRouteProfile                  `xml:\"ipRouteConfig,omitempty\"`\n\tConsoleIpRouteConfig    *IpRouteProfile                  `xml:\"consoleIpRouteConfig,omitempty\"`\n\tPnic                    []PhysicalNicProfile             `xml:\"pnic,omitempty\"`\n\tDvswitch                []DvsProfile                     `xml:\"dvswitch,omitempty\"`\n\tDvsServiceConsoleNic    []DvsServiceConsoleVNicProfile   `xml:\"dvsServiceConsoleNic,omitempty\"`\n\tDvsHostNic              []DvsHostVNicProfile             `xml:\"dvsHostNic,omitempty\"`\n\tNetStackInstance        []NetStackInstanceProfile        `xml:\"netStackInstance,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NetworkProfile\"] = reflect.TypeOf((*NetworkProfile)(nil)).Elem()\n}\n\ntype NetworkProfileDnsConfigProfile struct {\n\tApplyProfile\n}\n\nfunc init() {\n\tt[\"NetworkProfileDnsConfigProfile\"] = reflect.TypeOf((*NetworkProfileDnsConfigProfile)(nil)).Elem()\n}\n\ntype NetworkRollbackEvent struct {\n\tEvent\n\n\tMethodName    string `xml:\"methodName\"`\n\tTransactionId string `xml:\"transactionId\"`\n}\n\nfunc init() {\n\tt[\"NetworkRollbackEvent\"] = reflect.TypeOf((*NetworkRollbackEvent)(nil)).Elem()\n}\n\ntype NetworkSummary struct {\n\tDynamicData\n\n\tNetwork    *ManagedObjectReference `xml:\"network,omitempty\"`\n\tName       string                  `xml:\"name\"`\n\tAccessible bool                    `xml:\"accessible\"`\n\tIpPoolName string                  `xml:\"ipPoolName,omitempty\"`\n\tIpPoolId   int32                   `xml:\"ipPoolId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NetworkSummary\"] = reflect.TypeOf((*NetworkSummary)(nil)).Elem()\n}\n\ntype NetworksMayNotBeTheSame struct {\n\tMigrationFault\n\n\tName string `xml:\"name,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NetworksMayNotBeTheSame\"] = reflect.TypeOf((*NetworksMayNotBeTheSame)(nil)).Elem()\n}\n\ntype NetworksMayNotBeTheSameFault NetworksMayNotBeTheSame\n\nfunc init() {\n\tt[\"NetworksMayNotBeTheSameFault\"] = reflect.TypeOf((*NetworksMayNotBeTheSameFault)(nil)).Elem()\n}\n\ntype NicSettingMismatch struct {\n\tCustomizationFault\n\n\tNumberOfNicsInSpec int32 `xml:\"numberOfNicsInSpec\"`\n\tNumberOfNicsInVM   int32 `xml:\"numberOfNicsInVM\"`\n}\n\nfunc init() {\n\tt[\"NicSettingMismatch\"] = reflect.TypeOf((*NicSettingMismatch)(nil)).Elem()\n}\n\ntype NicSettingMismatchFault NicSettingMismatch\n\nfunc init() {\n\tt[\"NicSettingMismatchFault\"] = reflect.TypeOf((*NicSettingMismatchFault)(nil)).Elem()\n}\n\ntype NoAccessUserEvent struct {\n\tSessionEvent\n\n\tIpAddress string `xml:\"ipAddress\"`\n}\n\nfunc init() {\n\tt[\"NoAccessUserEvent\"] = reflect.TypeOf((*NoAccessUserEvent)(nil)).Elem()\n}\n\ntype NoActiveHostInCluster struct {\n\tInvalidState\n\n\tComputeResource ManagedObjectReference `xml:\"computeResource\"`\n}\n\nfunc init() {\n\tt[\"NoActiveHostInCluster\"] = reflect.TypeOf((*NoActiveHostInCluster)(nil)).Elem()\n}\n\ntype NoActiveHostInClusterFault NoActiveHostInCluster\n\nfunc init() {\n\tt[\"NoActiveHostInClusterFault\"] = reflect.TypeOf((*NoActiveHostInClusterFault)(nil)).Elem()\n}\n\ntype NoAvailableIp struct {\n\tVAppPropertyFault\n\n\tNetwork ManagedObjectReference `xml:\"network\"`\n}\n\nfunc init() {\n\tt[\"NoAvailableIp\"] = reflect.TypeOf((*NoAvailableIp)(nil)).Elem()\n}\n\ntype NoAvailableIpFault NoAvailableIp\n\nfunc init() {\n\tt[\"NoAvailableIpFault\"] = reflect.TypeOf((*NoAvailableIpFault)(nil)).Elem()\n}\n\ntype NoClientCertificate struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"NoClientCertificate\"] = reflect.TypeOf((*NoClientCertificate)(nil)).Elem()\n}\n\ntype NoClientCertificateFault NoClientCertificate\n\nfunc init() {\n\tt[\"NoClientCertificateFault\"] = reflect.TypeOf((*NoClientCertificateFault)(nil)).Elem()\n}\n\ntype NoCompatibleDatastore struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"NoCompatibleDatastore\"] = reflect.TypeOf((*NoCompatibleDatastore)(nil)).Elem()\n}\n\ntype NoCompatibleDatastoreFault NoCompatibleDatastore\n\nfunc init() {\n\tt[\"NoCompatibleDatastoreFault\"] = reflect.TypeOf((*NoCompatibleDatastoreFault)(nil)).Elem()\n}\n\ntype NoCompatibleHardAffinityHost struct {\n\tVmConfigFault\n\n\tVmName string `xml:\"vmName\"`\n}\n\nfunc init() {\n\tt[\"NoCompatibleHardAffinityHost\"] = reflect.TypeOf((*NoCompatibleHardAffinityHost)(nil)).Elem()\n}\n\ntype NoCompatibleHardAffinityHostFault NoCompatibleHardAffinityHost\n\nfunc init() {\n\tt[\"NoCompatibleHardAffinityHostFault\"] = reflect.TypeOf((*NoCompatibleHardAffinityHostFault)(nil)).Elem()\n}\n\ntype NoCompatibleHost struct {\n\tVimFault\n\n\tHost  []ManagedObjectReference `xml:\"host,omitempty\"`\n\tError []LocalizedMethodFault   `xml:\"error,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NoCompatibleHost\"] = reflect.TypeOf((*NoCompatibleHost)(nil)).Elem()\n}\n\ntype NoCompatibleHostFault BaseNoCompatibleHost\n\nfunc init() {\n\tt[\"NoCompatibleHostFault\"] = reflect.TypeOf((*NoCompatibleHostFault)(nil)).Elem()\n}\n\ntype NoCompatibleHostWithAccessToDevice struct {\n\tNoCompatibleHost\n}\n\nfunc init() {\n\tt[\"NoCompatibleHostWithAccessToDevice\"] = reflect.TypeOf((*NoCompatibleHostWithAccessToDevice)(nil)).Elem()\n}\n\ntype NoCompatibleHostWithAccessToDeviceFault NoCompatibleHostWithAccessToDevice\n\nfunc init() {\n\tt[\"NoCompatibleHostWithAccessToDeviceFault\"] = reflect.TypeOf((*NoCompatibleHostWithAccessToDeviceFault)(nil)).Elem()\n}\n\ntype NoCompatibleSoftAffinityHost struct {\n\tVmConfigFault\n\n\tVmName string `xml:\"vmName\"`\n}\n\nfunc init() {\n\tt[\"NoCompatibleSoftAffinityHost\"] = reflect.TypeOf((*NoCompatibleSoftAffinityHost)(nil)).Elem()\n}\n\ntype NoCompatibleSoftAffinityHostFault NoCompatibleSoftAffinityHost\n\nfunc init() {\n\tt[\"NoCompatibleSoftAffinityHostFault\"] = reflect.TypeOf((*NoCompatibleSoftAffinityHostFault)(nil)).Elem()\n}\n\ntype NoConnectedDatastore struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"NoConnectedDatastore\"] = reflect.TypeOf((*NoConnectedDatastore)(nil)).Elem()\n}\n\ntype NoConnectedDatastoreFault NoConnectedDatastore\n\nfunc init() {\n\tt[\"NoConnectedDatastoreFault\"] = reflect.TypeOf((*NoConnectedDatastoreFault)(nil)).Elem()\n}\n\ntype NoDatastoresConfiguredEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"NoDatastoresConfiguredEvent\"] = reflect.TypeOf((*NoDatastoresConfiguredEvent)(nil)).Elem()\n}\n\ntype NoDiskFound struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"NoDiskFound\"] = reflect.TypeOf((*NoDiskFound)(nil)).Elem()\n}\n\ntype NoDiskFoundFault NoDiskFound\n\nfunc init() {\n\tt[\"NoDiskFoundFault\"] = reflect.TypeOf((*NoDiskFoundFault)(nil)).Elem()\n}\n\ntype NoDiskSpace struct {\n\tFileFault\n\n\tDatastore string `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"NoDiskSpace\"] = reflect.TypeOf((*NoDiskSpace)(nil)).Elem()\n}\n\ntype NoDiskSpaceFault NoDiskSpace\n\nfunc init() {\n\tt[\"NoDiskSpaceFault\"] = reflect.TypeOf((*NoDiskSpaceFault)(nil)).Elem()\n}\n\ntype NoDisksToCustomize struct {\n\tCustomizationFault\n}\n\nfunc init() {\n\tt[\"NoDisksToCustomize\"] = reflect.TypeOf((*NoDisksToCustomize)(nil)).Elem()\n}\n\ntype NoDisksToCustomizeFault NoDisksToCustomize\n\nfunc init() {\n\tt[\"NoDisksToCustomizeFault\"] = reflect.TypeOf((*NoDisksToCustomizeFault)(nil)).Elem()\n}\n\ntype NoGateway struct {\n\tHostConfigFault\n}\n\nfunc init() {\n\tt[\"NoGateway\"] = reflect.TypeOf((*NoGateway)(nil)).Elem()\n}\n\ntype NoGatewayFault NoGateway\n\nfunc init() {\n\tt[\"NoGatewayFault\"] = reflect.TypeOf((*NoGatewayFault)(nil)).Elem()\n}\n\ntype NoGuestHeartbeat struct {\n\tMigrationFault\n}\n\nfunc init() {\n\tt[\"NoGuestHeartbeat\"] = reflect.TypeOf((*NoGuestHeartbeat)(nil)).Elem()\n}\n\ntype NoGuestHeartbeatFault NoGuestHeartbeat\n\nfunc init() {\n\tt[\"NoGuestHeartbeatFault\"] = reflect.TypeOf((*NoGuestHeartbeatFault)(nil)).Elem()\n}\n\ntype NoHost struct {\n\tHostConnectFault\n\n\tName string `xml:\"name,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NoHost\"] = reflect.TypeOf((*NoHost)(nil)).Elem()\n}\n\ntype NoHostFault NoHost\n\nfunc init() {\n\tt[\"NoHostFault\"] = reflect.TypeOf((*NoHostFault)(nil)).Elem()\n}\n\ntype NoHostSuitableForFtSecondary struct {\n\tVmFaultToleranceIssue\n\n\tVm     ManagedObjectReference `xml:\"vm\"`\n\tVmName string                 `xml:\"vmName\"`\n}\n\nfunc init() {\n\tt[\"NoHostSuitableForFtSecondary\"] = reflect.TypeOf((*NoHostSuitableForFtSecondary)(nil)).Elem()\n}\n\ntype NoHostSuitableForFtSecondaryFault NoHostSuitableForFtSecondary\n\nfunc init() {\n\tt[\"NoHostSuitableForFtSecondaryFault\"] = reflect.TypeOf((*NoHostSuitableForFtSecondaryFault)(nil)).Elem()\n}\n\ntype NoLicenseEvent struct {\n\tLicenseEvent\n\n\tFeature LicenseFeatureInfo `xml:\"feature\"`\n}\n\nfunc init() {\n\tt[\"NoLicenseEvent\"] = reflect.TypeOf((*NoLicenseEvent)(nil)).Elem()\n}\n\ntype NoLicenseServerConfigured struct {\n\tNotEnoughLicenses\n}\n\nfunc init() {\n\tt[\"NoLicenseServerConfigured\"] = reflect.TypeOf((*NoLicenseServerConfigured)(nil)).Elem()\n}\n\ntype NoLicenseServerConfiguredFault NoLicenseServerConfigured\n\nfunc init() {\n\tt[\"NoLicenseServerConfiguredFault\"] = reflect.TypeOf((*NoLicenseServerConfiguredFault)(nil)).Elem()\n}\n\ntype NoMaintenanceModeDrsRecommendationForVM struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"NoMaintenanceModeDrsRecommendationForVM\"] = reflect.TypeOf((*NoMaintenanceModeDrsRecommendationForVM)(nil)).Elem()\n}\n\ntype NoPeerHostFound struct {\n\tHostPowerOpFailed\n}\n\nfunc init() {\n\tt[\"NoPeerHostFound\"] = reflect.TypeOf((*NoPeerHostFound)(nil)).Elem()\n}\n\ntype NoPeerHostFoundFault NoPeerHostFound\n\nfunc init() {\n\tt[\"NoPeerHostFoundFault\"] = reflect.TypeOf((*NoPeerHostFoundFault)(nil)).Elem()\n}\n\ntype NoPermission struct {\n\tSecurityError\n\n\tObject      ManagedObjectReference `xml:\"object\"`\n\tPrivilegeId string                 `xml:\"privilegeId\"`\n}\n\nfunc init() {\n\tt[\"NoPermission\"] = reflect.TypeOf((*NoPermission)(nil)).Elem()\n}\n\ntype NoPermissionFault BaseNoPermission\n\nfunc init() {\n\tt[\"NoPermissionFault\"] = reflect.TypeOf((*NoPermissionFault)(nil)).Elem()\n}\n\ntype NoPermissionOnAD struct {\n\tActiveDirectoryFault\n}\n\nfunc init() {\n\tt[\"NoPermissionOnAD\"] = reflect.TypeOf((*NoPermissionOnAD)(nil)).Elem()\n}\n\ntype NoPermissionOnADFault NoPermissionOnAD\n\nfunc init() {\n\tt[\"NoPermissionOnADFault\"] = reflect.TypeOf((*NoPermissionOnADFault)(nil)).Elem()\n}\n\ntype NoPermissionOnHost struct {\n\tHostConnectFault\n}\n\nfunc init() {\n\tt[\"NoPermissionOnHost\"] = reflect.TypeOf((*NoPermissionOnHost)(nil)).Elem()\n}\n\ntype NoPermissionOnHostFault NoPermissionOnHost\n\nfunc init() {\n\tt[\"NoPermissionOnHostFault\"] = reflect.TypeOf((*NoPermissionOnHostFault)(nil)).Elem()\n}\n\ntype NoPermissionOnNasVolume struct {\n\tNasConfigFault\n\n\tUserName string `xml:\"userName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NoPermissionOnNasVolume\"] = reflect.TypeOf((*NoPermissionOnNasVolume)(nil)).Elem()\n}\n\ntype NoPermissionOnNasVolumeFault NoPermissionOnNasVolume\n\nfunc init() {\n\tt[\"NoPermissionOnNasVolumeFault\"] = reflect.TypeOf((*NoPermissionOnNasVolumeFault)(nil)).Elem()\n}\n\ntype NoSubjectName struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"NoSubjectName\"] = reflect.TypeOf((*NoSubjectName)(nil)).Elem()\n}\n\ntype NoSubjectNameFault NoSubjectName\n\nfunc init() {\n\tt[\"NoSubjectNameFault\"] = reflect.TypeOf((*NoSubjectNameFault)(nil)).Elem()\n}\n\ntype NoVcManagedIpConfigured struct {\n\tVAppPropertyFault\n}\n\nfunc init() {\n\tt[\"NoVcManagedIpConfigured\"] = reflect.TypeOf((*NoVcManagedIpConfigured)(nil)).Elem()\n}\n\ntype NoVcManagedIpConfiguredFault NoVcManagedIpConfigured\n\nfunc init() {\n\tt[\"NoVcManagedIpConfiguredFault\"] = reflect.TypeOf((*NoVcManagedIpConfiguredFault)(nil)).Elem()\n}\n\ntype NoVirtualNic struct {\n\tHostConfigFault\n}\n\nfunc init() {\n\tt[\"NoVirtualNic\"] = reflect.TypeOf((*NoVirtualNic)(nil)).Elem()\n}\n\ntype NoVirtualNicFault NoVirtualNic\n\nfunc init() {\n\tt[\"NoVirtualNicFault\"] = reflect.TypeOf((*NoVirtualNicFault)(nil)).Elem()\n}\n\ntype NoVmInVApp struct {\n\tVAppConfigFault\n}\n\nfunc init() {\n\tt[\"NoVmInVApp\"] = reflect.TypeOf((*NoVmInVApp)(nil)).Elem()\n}\n\ntype NoVmInVAppFault NoVmInVApp\n\nfunc init() {\n\tt[\"NoVmInVAppFault\"] = reflect.TypeOf((*NoVmInVAppFault)(nil)).Elem()\n}\n\ntype NodeDeploymentSpec struct {\n\tDynamicData\n\n\tEsxHost                 *ManagedObjectReference `xml:\"esxHost,omitempty\"`\n\tDatastore               *ManagedObjectReference `xml:\"datastore,omitempty\"`\n\tPublicNetworkPortGroup  *ManagedObjectReference `xml:\"publicNetworkPortGroup,omitempty\"`\n\tClusterNetworkPortGroup *ManagedObjectReference `xml:\"clusterNetworkPortGroup,omitempty\"`\n\tFolder                  ManagedObjectReference  `xml:\"folder\"`\n\tResourcePool            *ManagedObjectReference `xml:\"resourcePool,omitempty\"`\n\tManagementVc            *ServiceLocator         `xml:\"managementVc,omitempty\"`\n\tNodeName                string                  `xml:\"nodeName\"`\n\tIpSettings              CustomizationIPSettings `xml:\"ipSettings\"`\n}\n\nfunc init() {\n\tt[\"NodeDeploymentSpec\"] = reflect.TypeOf((*NodeDeploymentSpec)(nil)).Elem()\n}\n\ntype NodeNetworkSpec struct {\n\tDynamicData\n\n\tIpSettings CustomizationIPSettings `xml:\"ipSettings\"`\n}\n\nfunc init() {\n\tt[\"NodeNetworkSpec\"] = reflect.TypeOf((*NodeNetworkSpec)(nil)).Elem()\n}\n\ntype NonADUserRequired struct {\n\tActiveDirectoryFault\n}\n\nfunc init() {\n\tt[\"NonADUserRequired\"] = reflect.TypeOf((*NonADUserRequired)(nil)).Elem()\n}\n\ntype NonADUserRequiredFault NonADUserRequired\n\nfunc init() {\n\tt[\"NonADUserRequiredFault\"] = reflect.TypeOf((*NonADUserRequiredFault)(nil)).Elem()\n}\n\ntype NonHomeRDMVMotionNotSupported struct {\n\tMigrationFeatureNotSupported\n\n\tDevice string `xml:\"device\"`\n}\n\nfunc init() {\n\tt[\"NonHomeRDMVMotionNotSupported\"] = reflect.TypeOf((*NonHomeRDMVMotionNotSupported)(nil)).Elem()\n}\n\ntype NonHomeRDMVMotionNotSupportedFault NonHomeRDMVMotionNotSupported\n\nfunc init() {\n\tt[\"NonHomeRDMVMotionNotSupportedFault\"] = reflect.TypeOf((*NonHomeRDMVMotionNotSupportedFault)(nil)).Elem()\n}\n\ntype NonPersistentDisksNotSupported struct {\n\tDeviceNotSupported\n}\n\nfunc init() {\n\tt[\"NonPersistentDisksNotSupported\"] = reflect.TypeOf((*NonPersistentDisksNotSupported)(nil)).Elem()\n}\n\ntype NonPersistentDisksNotSupportedFault NonPersistentDisksNotSupported\n\nfunc init() {\n\tt[\"NonPersistentDisksNotSupportedFault\"] = reflect.TypeOf((*NonPersistentDisksNotSupportedFault)(nil)).Elem()\n}\n\ntype NonVIWorkloadDetectedOnDatastoreEvent struct {\n\tDatastoreEvent\n}\n\nfunc init() {\n\tt[\"NonVIWorkloadDetectedOnDatastoreEvent\"] = reflect.TypeOf((*NonVIWorkloadDetectedOnDatastoreEvent)(nil)).Elem()\n}\n\ntype NonVmwareOuiMacNotSupportedHost struct {\n\tNotSupportedHost\n\n\tHostName string `xml:\"hostName\"`\n}\n\nfunc init() {\n\tt[\"NonVmwareOuiMacNotSupportedHost\"] = reflect.TypeOf((*NonVmwareOuiMacNotSupportedHost)(nil)).Elem()\n}\n\ntype NonVmwareOuiMacNotSupportedHostFault NonVmwareOuiMacNotSupportedHost\n\nfunc init() {\n\tt[\"NonVmwareOuiMacNotSupportedHostFault\"] = reflect.TypeOf((*NonVmwareOuiMacNotSupportedHostFault)(nil)).Elem()\n}\n\ntype NotADirectory struct {\n\tFileFault\n}\n\nfunc init() {\n\tt[\"NotADirectory\"] = reflect.TypeOf((*NotADirectory)(nil)).Elem()\n}\n\ntype NotADirectoryFault NotADirectory\n\nfunc init() {\n\tt[\"NotADirectoryFault\"] = reflect.TypeOf((*NotADirectoryFault)(nil)).Elem()\n}\n\ntype NotAFile struct {\n\tFileFault\n}\n\nfunc init() {\n\tt[\"NotAFile\"] = reflect.TypeOf((*NotAFile)(nil)).Elem()\n}\n\ntype NotAFileFault NotAFile\n\nfunc init() {\n\tt[\"NotAFileFault\"] = reflect.TypeOf((*NotAFileFault)(nil)).Elem()\n}\n\ntype NotAuthenticated struct {\n\tNoPermission\n}\n\nfunc init() {\n\tt[\"NotAuthenticated\"] = reflect.TypeOf((*NotAuthenticated)(nil)).Elem()\n}\n\ntype NotAuthenticatedFault NotAuthenticated\n\nfunc init() {\n\tt[\"NotAuthenticatedFault\"] = reflect.TypeOf((*NotAuthenticatedFault)(nil)).Elem()\n}\n\ntype NotEnoughCpus struct {\n\tVirtualHardwareCompatibilityIssue\n\n\tNumCpuDest int32 `xml:\"numCpuDest\"`\n\tNumCpuVm   int32 `xml:\"numCpuVm\"`\n}\n\nfunc init() {\n\tt[\"NotEnoughCpus\"] = reflect.TypeOf((*NotEnoughCpus)(nil)).Elem()\n}\n\ntype NotEnoughCpusFault BaseNotEnoughCpus\n\nfunc init() {\n\tt[\"NotEnoughCpusFault\"] = reflect.TypeOf((*NotEnoughCpusFault)(nil)).Elem()\n}\n\ntype NotEnoughLicenses struct {\n\tRuntimeFault\n}\n\nfunc init() {\n\tt[\"NotEnoughLicenses\"] = reflect.TypeOf((*NotEnoughLicenses)(nil)).Elem()\n}\n\ntype NotEnoughLicensesFault BaseNotEnoughLicenses\n\nfunc init() {\n\tt[\"NotEnoughLicensesFault\"] = reflect.TypeOf((*NotEnoughLicensesFault)(nil)).Elem()\n}\n\ntype NotEnoughLogicalCpus struct {\n\tNotEnoughCpus\n\n\tHost *ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NotEnoughLogicalCpus\"] = reflect.TypeOf((*NotEnoughLogicalCpus)(nil)).Elem()\n}\n\ntype NotEnoughLogicalCpusFault NotEnoughLogicalCpus\n\nfunc init() {\n\tt[\"NotEnoughLogicalCpusFault\"] = reflect.TypeOf((*NotEnoughLogicalCpusFault)(nil)).Elem()\n}\n\ntype NotEnoughResourcesToStartVmEvent struct {\n\tVmEvent\n\n\tReason string `xml:\"reason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NotEnoughResourcesToStartVmEvent\"] = reflect.TypeOf((*NotEnoughResourcesToStartVmEvent)(nil)).Elem()\n}\n\ntype NotFound struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"NotFound\"] = reflect.TypeOf((*NotFound)(nil)).Elem()\n}\n\ntype NotFoundFault NotFound\n\nfunc init() {\n\tt[\"NotFoundFault\"] = reflect.TypeOf((*NotFoundFault)(nil)).Elem()\n}\n\ntype NotImplemented struct {\n\tRuntimeFault\n}\n\nfunc init() {\n\tt[\"NotImplemented\"] = reflect.TypeOf((*NotImplemented)(nil)).Elem()\n}\n\ntype NotImplementedFault NotImplemented\n\nfunc init() {\n\tt[\"NotImplementedFault\"] = reflect.TypeOf((*NotImplementedFault)(nil)).Elem()\n}\n\ntype NotSupported struct {\n\tRuntimeFault\n}\n\nfunc init() {\n\tt[\"NotSupported\"] = reflect.TypeOf((*NotSupported)(nil)).Elem()\n}\n\ntype NotSupportedDeviceForFT struct {\n\tVmFaultToleranceIssue\n\n\tHost        ManagedObjectReference `xml:\"host\"`\n\tHostName    string                 `xml:\"hostName,omitempty\"`\n\tVm          ManagedObjectReference `xml:\"vm\"`\n\tVmName      string                 `xml:\"vmName,omitempty\"`\n\tDeviceType  string                 `xml:\"deviceType\"`\n\tDeviceLabel string                 `xml:\"deviceLabel,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NotSupportedDeviceForFT\"] = reflect.TypeOf((*NotSupportedDeviceForFT)(nil)).Elem()\n}\n\ntype NotSupportedDeviceForFTFault NotSupportedDeviceForFT\n\nfunc init() {\n\tt[\"NotSupportedDeviceForFTFault\"] = reflect.TypeOf((*NotSupportedDeviceForFTFault)(nil)).Elem()\n}\n\ntype NotSupportedFault BaseNotSupported\n\nfunc init() {\n\tt[\"NotSupportedFault\"] = reflect.TypeOf((*NotSupportedFault)(nil)).Elem()\n}\n\ntype NotSupportedHost struct {\n\tHostConnectFault\n\n\tProductName    string `xml:\"productName,omitempty\"`\n\tProductVersion string `xml:\"productVersion,omitempty\"`\n}\n\nfunc init() {\n\tt[\"NotSupportedHost\"] = reflect.TypeOf((*NotSupportedHost)(nil)).Elem()\n}\n\ntype NotSupportedHostFault BaseNotSupportedHost\n\nfunc init() {\n\tt[\"NotSupportedHostFault\"] = reflect.TypeOf((*NotSupportedHostFault)(nil)).Elem()\n}\n\ntype NotSupportedHostForChecksum struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"NotSupportedHostForChecksum\"] = reflect.TypeOf((*NotSupportedHostForChecksum)(nil)).Elem()\n}\n\ntype NotSupportedHostForChecksumFault NotSupportedHostForChecksum\n\nfunc init() {\n\tt[\"NotSupportedHostForChecksumFault\"] = reflect.TypeOf((*NotSupportedHostForChecksumFault)(nil)).Elem()\n}\n\ntype NotSupportedHostForVFlash struct {\n\tNotSupportedHost\n\n\tHostName string `xml:\"hostName\"`\n}\n\nfunc init() {\n\tt[\"NotSupportedHostForVFlash\"] = reflect.TypeOf((*NotSupportedHostForVFlash)(nil)).Elem()\n}\n\ntype NotSupportedHostForVFlashFault NotSupportedHostForVFlash\n\nfunc init() {\n\tt[\"NotSupportedHostForVFlashFault\"] = reflect.TypeOf((*NotSupportedHostForVFlashFault)(nil)).Elem()\n}\n\ntype NotSupportedHostForVmcp struct {\n\tNotSupportedHost\n\n\tHostName string `xml:\"hostName\"`\n}\n\nfunc init() {\n\tt[\"NotSupportedHostForVmcp\"] = reflect.TypeOf((*NotSupportedHostForVmcp)(nil)).Elem()\n}\n\ntype NotSupportedHostForVmcpFault NotSupportedHostForVmcp\n\nfunc init() {\n\tt[\"NotSupportedHostForVmcpFault\"] = reflect.TypeOf((*NotSupportedHostForVmcpFault)(nil)).Elem()\n}\n\ntype NotSupportedHostForVmemFile struct {\n\tNotSupportedHost\n\n\tHostName string `xml:\"hostName\"`\n}\n\nfunc init() {\n\tt[\"NotSupportedHostForVmemFile\"] = reflect.TypeOf((*NotSupportedHostForVmemFile)(nil)).Elem()\n}\n\ntype NotSupportedHostForVmemFileFault NotSupportedHostForVmemFile\n\nfunc init() {\n\tt[\"NotSupportedHostForVmemFileFault\"] = reflect.TypeOf((*NotSupportedHostForVmemFileFault)(nil)).Elem()\n}\n\ntype NotSupportedHostForVsan struct {\n\tNotSupportedHost\n\n\tHostName string `xml:\"hostName\"`\n}\n\nfunc init() {\n\tt[\"NotSupportedHostForVsan\"] = reflect.TypeOf((*NotSupportedHostForVsan)(nil)).Elem()\n}\n\ntype NotSupportedHostForVsanFault NotSupportedHostForVsan\n\nfunc init() {\n\tt[\"NotSupportedHostForVsanFault\"] = reflect.TypeOf((*NotSupportedHostForVsanFault)(nil)).Elem()\n}\n\ntype NotSupportedHostInCluster struct {\n\tNotSupportedHost\n}\n\nfunc init() {\n\tt[\"NotSupportedHostInCluster\"] = reflect.TypeOf((*NotSupportedHostInCluster)(nil)).Elem()\n}\n\ntype NotSupportedHostInClusterFault BaseNotSupportedHostInCluster\n\nfunc init() {\n\tt[\"NotSupportedHostInClusterFault\"] = reflect.TypeOf((*NotSupportedHostInClusterFault)(nil)).Elem()\n}\n\ntype NotSupportedHostInDvs struct {\n\tNotSupportedHost\n\n\tSwitchProductSpec DistributedVirtualSwitchProductSpec `xml:\"switchProductSpec\"`\n}\n\nfunc init() {\n\tt[\"NotSupportedHostInDvs\"] = reflect.TypeOf((*NotSupportedHostInDvs)(nil)).Elem()\n}\n\ntype NotSupportedHostInDvsFault NotSupportedHostInDvs\n\nfunc init() {\n\tt[\"NotSupportedHostInDvsFault\"] = reflect.TypeOf((*NotSupportedHostInDvsFault)(nil)).Elem()\n}\n\ntype NotSupportedHostInHACluster struct {\n\tNotSupportedHost\n\n\tHostName string `xml:\"hostName\"`\n\tBuild    string `xml:\"build\"`\n}\n\nfunc init() {\n\tt[\"NotSupportedHostInHACluster\"] = reflect.TypeOf((*NotSupportedHostInHACluster)(nil)).Elem()\n}\n\ntype NotSupportedHostInHAClusterFault NotSupportedHostInHACluster\n\nfunc init() {\n\tt[\"NotSupportedHostInHAClusterFault\"] = reflect.TypeOf((*NotSupportedHostInHAClusterFault)(nil)).Elem()\n}\n\ntype NotUserConfigurableProperty struct {\n\tVAppPropertyFault\n}\n\nfunc init() {\n\tt[\"NotUserConfigurableProperty\"] = reflect.TypeOf((*NotUserConfigurableProperty)(nil)).Elem()\n}\n\ntype NotUserConfigurablePropertyFault NotUserConfigurableProperty\n\nfunc init() {\n\tt[\"NotUserConfigurablePropertyFault\"] = reflect.TypeOf((*NotUserConfigurablePropertyFault)(nil)).Elem()\n}\n\ntype NumPortsProfile struct {\n\tApplyProfile\n}\n\nfunc init() {\n\tt[\"NumPortsProfile\"] = reflect.TypeOf((*NumPortsProfile)(nil)).Elem()\n}\n\ntype NumVirtualCoresPerSocketNotSupported struct {\n\tVirtualHardwareCompatibilityIssue\n\n\tMaxSupportedCoresPerSocketDest int32 `xml:\"maxSupportedCoresPerSocketDest\"`\n\tNumCoresPerSocketVm            int32 `xml:\"numCoresPerSocketVm\"`\n}\n\nfunc init() {\n\tt[\"NumVirtualCoresPerSocketNotSupported\"] = reflect.TypeOf((*NumVirtualCoresPerSocketNotSupported)(nil)).Elem()\n}\n\ntype NumVirtualCoresPerSocketNotSupportedFault NumVirtualCoresPerSocketNotSupported\n\nfunc init() {\n\tt[\"NumVirtualCoresPerSocketNotSupportedFault\"] = reflect.TypeOf((*NumVirtualCoresPerSocketNotSupportedFault)(nil)).Elem()\n}\n\ntype NumVirtualCpusExceedsLimit struct {\n\tInsufficientResourcesFault\n\n\tMaxSupportedVcpus int32 `xml:\"maxSupportedVcpus\"`\n}\n\nfunc init() {\n\tt[\"NumVirtualCpusExceedsLimit\"] = reflect.TypeOf((*NumVirtualCpusExceedsLimit)(nil)).Elem()\n}\n\ntype NumVirtualCpusExceedsLimitFault NumVirtualCpusExceedsLimit\n\nfunc init() {\n\tt[\"NumVirtualCpusExceedsLimitFault\"] = reflect.TypeOf((*NumVirtualCpusExceedsLimitFault)(nil)).Elem()\n}\n\ntype NumVirtualCpusIncompatible struct {\n\tVmConfigFault\n\n\tReason string `xml:\"reason\"`\n\tNumCpu int32  `xml:\"numCpu\"`\n}\n\nfunc init() {\n\tt[\"NumVirtualCpusIncompatible\"] = reflect.TypeOf((*NumVirtualCpusIncompatible)(nil)).Elem()\n}\n\ntype NumVirtualCpusIncompatibleFault NumVirtualCpusIncompatible\n\nfunc init() {\n\tt[\"NumVirtualCpusIncompatibleFault\"] = reflect.TypeOf((*NumVirtualCpusIncompatibleFault)(nil)).Elem()\n}\n\ntype NumVirtualCpusNotSupported struct {\n\tVirtualHardwareCompatibilityIssue\n\n\tMaxSupportedVcpusDest int32 `xml:\"maxSupportedVcpusDest\"`\n\tNumCpuVm              int32 `xml:\"numCpuVm\"`\n}\n\nfunc init() {\n\tt[\"NumVirtualCpusNotSupported\"] = reflect.TypeOf((*NumVirtualCpusNotSupported)(nil)).Elem()\n}\n\ntype NumVirtualCpusNotSupportedFault NumVirtualCpusNotSupported\n\nfunc init() {\n\tt[\"NumVirtualCpusNotSupportedFault\"] = reflect.TypeOf((*NumVirtualCpusNotSupportedFault)(nil)).Elem()\n}\n\ntype NumericRange struct {\n\tDynamicData\n\n\tStart int32 `xml:\"start\"`\n\tEnd   int32 `xml:\"end\"`\n}\n\nfunc init() {\n\tt[\"NumericRange\"] = reflect.TypeOf((*NumericRange)(nil)).Elem()\n}\n\ntype ObjectContent struct {\n\tDynamicData\n\n\tObj        ManagedObjectReference `xml:\"obj\"`\n\tPropSet    []DynamicProperty      `xml:\"propSet,omitempty\"`\n\tMissingSet []MissingProperty      `xml:\"missingSet,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ObjectContent\"] = reflect.TypeOf((*ObjectContent)(nil)).Elem()\n}\n\ntype ObjectSpec struct {\n\tDynamicData\n\n\tObj       ManagedObjectReference `xml:\"obj\"`\n\tSkip      *bool                  `xml:\"skip\"`\n\tSelectSet []BaseSelectionSpec    `xml:\"selectSet,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ObjectSpec\"] = reflect.TypeOf((*ObjectSpec)(nil)).Elem()\n}\n\ntype ObjectUpdate struct {\n\tDynamicData\n\n\tKind       ObjectUpdateKind       `xml:\"kind\"`\n\tObj        ManagedObjectReference `xml:\"obj\"`\n\tChangeSet  []PropertyChange       `xml:\"changeSet,omitempty\"`\n\tMissingSet []MissingProperty      `xml:\"missingSet,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ObjectUpdate\"] = reflect.TypeOf((*ObjectUpdate)(nil)).Elem()\n}\n\ntype OnceTaskScheduler struct {\n\tTaskScheduler\n\n\tRunAt *time.Time `xml:\"runAt\"`\n}\n\nfunc init() {\n\tt[\"OnceTaskScheduler\"] = reflect.TypeOf((*OnceTaskScheduler)(nil)).Elem()\n}\n\ntype OpaqueNetworkCapability struct {\n\tDynamicData\n\n\tNetworkReservationSupported bool `xml:\"networkReservationSupported\"`\n}\n\nfunc init() {\n\tt[\"OpaqueNetworkCapability\"] = reflect.TypeOf((*OpaqueNetworkCapability)(nil)).Elem()\n}\n\ntype OpaqueNetworkSummary struct {\n\tNetworkSummary\n\n\tOpaqueNetworkId   string `xml:\"opaqueNetworkId\"`\n\tOpaqueNetworkType string `xml:\"opaqueNetworkType\"`\n}\n\nfunc init() {\n\tt[\"OpaqueNetworkSummary\"] = reflect.TypeOf((*OpaqueNetworkSummary)(nil)).Elem()\n}\n\ntype OpaqueNetworkTargetInfo struct {\n\tVirtualMachineTargetInfo\n\n\tNetwork                     OpaqueNetworkSummary `xml:\"network\"`\n\tNetworkReservationSupported *bool                `xml:\"networkReservationSupported\"`\n}\n\nfunc init() {\n\tt[\"OpaqueNetworkTargetInfo\"] = reflect.TypeOf((*OpaqueNetworkTargetInfo)(nil)).Elem()\n}\n\ntype OpenInventoryViewFolder OpenInventoryViewFolderRequestType\n\nfunc init() {\n\tt[\"OpenInventoryViewFolder\"] = reflect.TypeOf((*OpenInventoryViewFolder)(nil)).Elem()\n}\n\ntype OpenInventoryViewFolderRequestType struct {\n\tThis   ManagedObjectReference   `xml:\"_this\"`\n\tEntity []ManagedObjectReference `xml:\"entity\"`\n}\n\nfunc init() {\n\tt[\"OpenInventoryViewFolderRequestType\"] = reflect.TypeOf((*OpenInventoryViewFolderRequestType)(nil)).Elem()\n}\n\ntype OpenInventoryViewFolderResponse struct {\n\tReturnval []ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype OperationDisabledByGuest struct {\n\tGuestOperationsFault\n}\n\nfunc init() {\n\tt[\"OperationDisabledByGuest\"] = reflect.TypeOf((*OperationDisabledByGuest)(nil)).Elem()\n}\n\ntype OperationDisabledByGuestFault OperationDisabledByGuest\n\nfunc init() {\n\tt[\"OperationDisabledByGuestFault\"] = reflect.TypeOf((*OperationDisabledByGuestFault)(nil)).Elem()\n}\n\ntype OperationDisallowedOnHost struct {\n\tRuntimeFault\n}\n\nfunc init() {\n\tt[\"OperationDisallowedOnHost\"] = reflect.TypeOf((*OperationDisallowedOnHost)(nil)).Elem()\n}\n\ntype OperationDisallowedOnHostFault OperationDisallowedOnHost\n\nfunc init() {\n\tt[\"OperationDisallowedOnHostFault\"] = reflect.TypeOf((*OperationDisallowedOnHostFault)(nil)).Elem()\n}\n\ntype OperationNotSupportedByGuest struct {\n\tGuestOperationsFault\n}\n\nfunc init() {\n\tt[\"OperationNotSupportedByGuest\"] = reflect.TypeOf((*OperationNotSupportedByGuest)(nil)).Elem()\n}\n\ntype OperationNotSupportedByGuestFault OperationNotSupportedByGuest\n\nfunc init() {\n\tt[\"OperationNotSupportedByGuestFault\"] = reflect.TypeOf((*OperationNotSupportedByGuestFault)(nil)).Elem()\n}\n\ntype OptionDef struct {\n\tElementDescription\n\n\tOptionType BaseOptionType `xml:\"optionType,typeattr\"`\n}\n\nfunc init() {\n\tt[\"OptionDef\"] = reflect.TypeOf((*OptionDef)(nil)).Elem()\n}\n\ntype OptionProfile struct {\n\tApplyProfile\n\n\tKey string `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"OptionProfile\"] = reflect.TypeOf((*OptionProfile)(nil)).Elem()\n}\n\ntype OptionType struct {\n\tDynamicData\n\n\tValueIsReadonly *bool `xml:\"valueIsReadonly\"`\n}\n\nfunc init() {\n\tt[\"OptionType\"] = reflect.TypeOf((*OptionType)(nil)).Elem()\n}\n\ntype OptionValue struct {\n\tDynamicData\n\n\tKey   string  `xml:\"key\"`\n\tValue AnyType `xml:\"value,typeattr\"`\n}\n\nfunc init() {\n\tt[\"OptionValue\"] = reflect.TypeOf((*OptionValue)(nil)).Elem()\n}\n\ntype OrAlarmExpression struct {\n\tAlarmExpression\n\n\tExpression []BaseAlarmExpression `xml:\"expression,typeattr\"`\n}\n\nfunc init() {\n\tt[\"OrAlarmExpression\"] = reflect.TypeOf((*OrAlarmExpression)(nil)).Elem()\n}\n\ntype OutOfBounds struct {\n\tVimFault\n\n\tArgumentName string `xml:\"argumentName\"`\n}\n\nfunc init() {\n\tt[\"OutOfBounds\"] = reflect.TypeOf((*OutOfBounds)(nil)).Elem()\n}\n\ntype OutOfBoundsFault OutOfBounds\n\nfunc init() {\n\tt[\"OutOfBoundsFault\"] = reflect.TypeOf((*OutOfBoundsFault)(nil)).Elem()\n}\n\ntype OutOfSyncDvsHost struct {\n\tDvsEvent\n\n\tHostOutOfSync []DvsOutOfSyncHostArgument `xml:\"hostOutOfSync\"`\n}\n\nfunc init() {\n\tt[\"OutOfSyncDvsHost\"] = reflect.TypeOf((*OutOfSyncDvsHost)(nil)).Elem()\n}\n\ntype OverwriteCustomizationSpec OverwriteCustomizationSpecRequestType\n\nfunc init() {\n\tt[\"OverwriteCustomizationSpec\"] = reflect.TypeOf((*OverwriteCustomizationSpec)(nil)).Elem()\n}\n\ntype OverwriteCustomizationSpecRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tItem CustomizationSpecItem  `xml:\"item\"`\n}\n\nfunc init() {\n\tt[\"OverwriteCustomizationSpecRequestType\"] = reflect.TypeOf((*OverwriteCustomizationSpecRequestType)(nil)).Elem()\n}\n\ntype OverwriteCustomizationSpecResponse struct {\n}\n\ntype OvfAttribute struct {\n\tOvfInvalidPackage\n\n\tElementName   string `xml:\"elementName\"`\n\tAttributeName string `xml:\"attributeName\"`\n}\n\nfunc init() {\n\tt[\"OvfAttribute\"] = reflect.TypeOf((*OvfAttribute)(nil)).Elem()\n}\n\ntype OvfAttributeFault BaseOvfAttribute\n\nfunc init() {\n\tt[\"OvfAttributeFault\"] = reflect.TypeOf((*OvfAttributeFault)(nil)).Elem()\n}\n\ntype OvfConnectedDevice struct {\n\tOvfHardwareExport\n}\n\nfunc init() {\n\tt[\"OvfConnectedDevice\"] = reflect.TypeOf((*OvfConnectedDevice)(nil)).Elem()\n}\n\ntype OvfConnectedDeviceFault BaseOvfConnectedDevice\n\nfunc init() {\n\tt[\"OvfConnectedDeviceFault\"] = reflect.TypeOf((*OvfConnectedDeviceFault)(nil)).Elem()\n}\n\ntype OvfConnectedDeviceFloppy struct {\n\tOvfConnectedDevice\n\n\tFilename string `xml:\"filename\"`\n}\n\nfunc init() {\n\tt[\"OvfConnectedDeviceFloppy\"] = reflect.TypeOf((*OvfConnectedDeviceFloppy)(nil)).Elem()\n}\n\ntype OvfConnectedDeviceFloppyFault OvfConnectedDeviceFloppy\n\nfunc init() {\n\tt[\"OvfConnectedDeviceFloppyFault\"] = reflect.TypeOf((*OvfConnectedDeviceFloppyFault)(nil)).Elem()\n}\n\ntype OvfConnectedDeviceIso struct {\n\tOvfConnectedDevice\n\n\tFilename string `xml:\"filename\"`\n}\n\nfunc init() {\n\tt[\"OvfConnectedDeviceIso\"] = reflect.TypeOf((*OvfConnectedDeviceIso)(nil)).Elem()\n}\n\ntype OvfConnectedDeviceIsoFault OvfConnectedDeviceIso\n\nfunc init() {\n\tt[\"OvfConnectedDeviceIsoFault\"] = reflect.TypeOf((*OvfConnectedDeviceIsoFault)(nil)).Elem()\n}\n\ntype OvfConstraint struct {\n\tOvfInvalidPackage\n\n\tName string `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"OvfConstraint\"] = reflect.TypeOf((*OvfConstraint)(nil)).Elem()\n}\n\ntype OvfConstraintFault BaseOvfConstraint\n\nfunc init() {\n\tt[\"OvfConstraintFault\"] = reflect.TypeOf((*OvfConstraintFault)(nil)).Elem()\n}\n\ntype OvfConsumerCallbackFault struct {\n\tOvfFault\n\n\tExtensionKey  string `xml:\"extensionKey\"`\n\tExtensionName string `xml:\"extensionName\"`\n}\n\nfunc init() {\n\tt[\"OvfConsumerCallbackFault\"] = reflect.TypeOf((*OvfConsumerCallbackFault)(nil)).Elem()\n}\n\ntype OvfConsumerCallbackFaultFault BaseOvfConsumerCallbackFault\n\nfunc init() {\n\tt[\"OvfConsumerCallbackFaultFault\"] = reflect.TypeOf((*OvfConsumerCallbackFaultFault)(nil)).Elem()\n}\n\ntype OvfConsumerCommunicationError struct {\n\tOvfConsumerCallbackFault\n\n\tDescription string `xml:\"description\"`\n}\n\nfunc init() {\n\tt[\"OvfConsumerCommunicationError\"] = reflect.TypeOf((*OvfConsumerCommunicationError)(nil)).Elem()\n}\n\ntype OvfConsumerCommunicationErrorFault OvfConsumerCommunicationError\n\nfunc init() {\n\tt[\"OvfConsumerCommunicationErrorFault\"] = reflect.TypeOf((*OvfConsumerCommunicationErrorFault)(nil)).Elem()\n}\n\ntype OvfConsumerFault struct {\n\tOvfConsumerCallbackFault\n\n\tErrorKey string     `xml:\"errorKey\"`\n\tMessage  string     `xml:\"message\"`\n\tParams   []KeyValue `xml:\"params,omitempty\"`\n}\n\nfunc init() {\n\tt[\"OvfConsumerFault\"] = reflect.TypeOf((*OvfConsumerFault)(nil)).Elem()\n}\n\ntype OvfConsumerFaultFault OvfConsumerFault\n\nfunc init() {\n\tt[\"OvfConsumerFaultFault\"] = reflect.TypeOf((*OvfConsumerFaultFault)(nil)).Elem()\n}\n\ntype OvfConsumerInvalidSection struct {\n\tOvfConsumerCallbackFault\n\n\tLineNumber  int32  `xml:\"lineNumber\"`\n\tDescription string `xml:\"description\"`\n}\n\nfunc init() {\n\tt[\"OvfConsumerInvalidSection\"] = reflect.TypeOf((*OvfConsumerInvalidSection)(nil)).Elem()\n}\n\ntype OvfConsumerInvalidSectionFault OvfConsumerInvalidSection\n\nfunc init() {\n\tt[\"OvfConsumerInvalidSectionFault\"] = reflect.TypeOf((*OvfConsumerInvalidSectionFault)(nil)).Elem()\n}\n\ntype OvfConsumerOstNode struct {\n\tDynamicData\n\n\tId      string                  `xml:\"id\"`\n\tType    string                  `xml:\"type\"`\n\tSection []OvfConsumerOvfSection `xml:\"section,omitempty\"`\n\tChild   []OvfConsumerOstNode    `xml:\"child,omitempty\"`\n\tEntity  *ManagedObjectReference `xml:\"entity,omitempty\"`\n}\n\nfunc init() {\n\tt[\"OvfConsumerOstNode\"] = reflect.TypeOf((*OvfConsumerOstNode)(nil)).Elem()\n}\n\ntype OvfConsumerOvfSection struct {\n\tDynamicData\n\n\tLineNumber int32  `xml:\"lineNumber\"`\n\tXml        string `xml:\"xml\"`\n}\n\nfunc init() {\n\tt[\"OvfConsumerOvfSection\"] = reflect.TypeOf((*OvfConsumerOvfSection)(nil)).Elem()\n}\n\ntype OvfConsumerPowerOnFault struct {\n\tInvalidState\n\n\tExtensionKey  string `xml:\"extensionKey\"`\n\tExtensionName string `xml:\"extensionName\"`\n\tDescription   string `xml:\"description\"`\n}\n\nfunc init() {\n\tt[\"OvfConsumerPowerOnFault\"] = reflect.TypeOf((*OvfConsumerPowerOnFault)(nil)).Elem()\n}\n\ntype OvfConsumerPowerOnFaultFault OvfConsumerPowerOnFault\n\nfunc init() {\n\tt[\"OvfConsumerPowerOnFaultFault\"] = reflect.TypeOf((*OvfConsumerPowerOnFaultFault)(nil)).Elem()\n}\n\ntype OvfConsumerUndeclaredSection struct {\n\tOvfConsumerCallbackFault\n\n\tQualifiedSectionType string `xml:\"qualifiedSectionType\"`\n}\n\nfunc init() {\n\tt[\"OvfConsumerUndeclaredSection\"] = reflect.TypeOf((*OvfConsumerUndeclaredSection)(nil)).Elem()\n}\n\ntype OvfConsumerUndeclaredSectionFault OvfConsumerUndeclaredSection\n\nfunc init() {\n\tt[\"OvfConsumerUndeclaredSectionFault\"] = reflect.TypeOf((*OvfConsumerUndeclaredSectionFault)(nil)).Elem()\n}\n\ntype OvfConsumerUndefinedPrefix struct {\n\tOvfConsumerCallbackFault\n\n\tPrefix string `xml:\"prefix\"`\n}\n\nfunc init() {\n\tt[\"OvfConsumerUndefinedPrefix\"] = reflect.TypeOf((*OvfConsumerUndefinedPrefix)(nil)).Elem()\n}\n\ntype OvfConsumerUndefinedPrefixFault OvfConsumerUndefinedPrefix\n\nfunc init() {\n\tt[\"OvfConsumerUndefinedPrefixFault\"] = reflect.TypeOf((*OvfConsumerUndefinedPrefixFault)(nil)).Elem()\n}\n\ntype OvfConsumerValidationFault struct {\n\tVmConfigFault\n\n\tExtensionKey  string `xml:\"extensionKey\"`\n\tExtensionName string `xml:\"extensionName\"`\n\tMessage       string `xml:\"message\"`\n}\n\nfunc init() {\n\tt[\"OvfConsumerValidationFault\"] = reflect.TypeOf((*OvfConsumerValidationFault)(nil)).Elem()\n}\n\ntype OvfConsumerValidationFaultFault OvfConsumerValidationFault\n\nfunc init() {\n\tt[\"OvfConsumerValidationFaultFault\"] = reflect.TypeOf((*OvfConsumerValidationFaultFault)(nil)).Elem()\n}\n\ntype OvfCpuCompatibility struct {\n\tOvfImport\n\n\tRegisterName         string `xml:\"registerName\"`\n\tLevel                int32  `xml:\"level\"`\n\tRegisterValue        string `xml:\"registerValue\"`\n\tDesiredRegisterValue string `xml:\"desiredRegisterValue\"`\n}\n\nfunc init() {\n\tt[\"OvfCpuCompatibility\"] = reflect.TypeOf((*OvfCpuCompatibility)(nil)).Elem()\n}\n\ntype OvfCpuCompatibilityCheckNotSupported struct {\n\tOvfImport\n}\n\nfunc init() {\n\tt[\"OvfCpuCompatibilityCheckNotSupported\"] = reflect.TypeOf((*OvfCpuCompatibilityCheckNotSupported)(nil)).Elem()\n}\n\ntype OvfCpuCompatibilityCheckNotSupportedFault OvfCpuCompatibilityCheckNotSupported\n\nfunc init() {\n\tt[\"OvfCpuCompatibilityCheckNotSupportedFault\"] = reflect.TypeOf((*OvfCpuCompatibilityCheckNotSupportedFault)(nil)).Elem()\n}\n\ntype OvfCpuCompatibilityFault OvfCpuCompatibility\n\nfunc init() {\n\tt[\"OvfCpuCompatibilityFault\"] = reflect.TypeOf((*OvfCpuCompatibilityFault)(nil)).Elem()\n}\n\ntype OvfCreateDescriptorParams struct {\n\tDynamicData\n\n\tOvfFiles          []OvfFile               `xml:\"ovfFiles,omitempty\"`\n\tName              string                  `xml:\"name,omitempty\"`\n\tDescription       string                  `xml:\"description,omitempty\"`\n\tIncludeImageFiles *bool                   `xml:\"includeImageFiles\"`\n\tExportOption      []string                `xml:\"exportOption,omitempty\"`\n\tSnapshot          *ManagedObjectReference `xml:\"snapshot,omitempty\"`\n}\n\nfunc init() {\n\tt[\"OvfCreateDescriptorParams\"] = reflect.TypeOf((*OvfCreateDescriptorParams)(nil)).Elem()\n}\n\ntype OvfCreateDescriptorResult struct {\n\tDynamicData\n\n\tOvfDescriptor     string                 `xml:\"ovfDescriptor\"`\n\tError             []LocalizedMethodFault `xml:\"error,omitempty\"`\n\tWarning           []LocalizedMethodFault `xml:\"warning,omitempty\"`\n\tIncludeImageFiles *bool                  `xml:\"includeImageFiles\"`\n}\n\nfunc init() {\n\tt[\"OvfCreateDescriptorResult\"] = reflect.TypeOf((*OvfCreateDescriptorResult)(nil)).Elem()\n}\n\ntype OvfCreateImportSpecParams struct {\n\tOvfManagerCommonParams\n\n\tEntityName         string                  `xml:\"entityName\"`\n\tHostSystem         *ManagedObjectReference `xml:\"hostSystem,omitempty\"`\n\tNetworkMapping     []OvfNetworkMapping     `xml:\"networkMapping,omitempty\"`\n\tIpAllocationPolicy string                  `xml:\"ipAllocationPolicy,omitempty\"`\n\tIpProtocol         string                  `xml:\"ipProtocol,omitempty\"`\n\tPropertyMapping    []KeyValue              `xml:\"propertyMapping,omitempty\"`\n\tResourceMapping    []OvfResourceMap        `xml:\"resourceMapping,omitempty\"`\n\tDiskProvisioning   string                  `xml:\"diskProvisioning,omitempty\"`\n\tInstantiationOst   *OvfConsumerOstNode     `xml:\"instantiationOst,omitempty\"`\n}\n\nfunc init() {\n\tt[\"OvfCreateImportSpecParams\"] = reflect.TypeOf((*OvfCreateImportSpecParams)(nil)).Elem()\n}\n\ntype OvfCreateImportSpecResult struct {\n\tDynamicData\n\n\tImportSpec BaseImportSpec         `xml:\"importSpec,omitempty,typeattr\"`\n\tFileItem   []OvfFileItem          `xml:\"fileItem,omitempty\"`\n\tWarning    []LocalizedMethodFault `xml:\"warning,omitempty\"`\n\tError      []LocalizedMethodFault `xml:\"error,omitempty\"`\n}\n\nfunc init() {\n\tt[\"OvfCreateImportSpecResult\"] = reflect.TypeOf((*OvfCreateImportSpecResult)(nil)).Elem()\n}\n\ntype OvfDeploymentOption struct {\n\tDynamicData\n\n\tKey         string `xml:\"key\"`\n\tLabel       string `xml:\"label\"`\n\tDescription string `xml:\"description\"`\n}\n\nfunc init() {\n\tt[\"OvfDeploymentOption\"] = reflect.TypeOf((*OvfDeploymentOption)(nil)).Elem()\n}\n\ntype OvfDiskMappingNotFound struct {\n\tOvfSystemFault\n\n\tDiskName string `xml:\"diskName\"`\n\tVmName   string `xml:\"vmName\"`\n}\n\nfunc init() {\n\tt[\"OvfDiskMappingNotFound\"] = reflect.TypeOf((*OvfDiskMappingNotFound)(nil)).Elem()\n}\n\ntype OvfDiskMappingNotFoundFault OvfDiskMappingNotFound\n\nfunc init() {\n\tt[\"OvfDiskMappingNotFoundFault\"] = reflect.TypeOf((*OvfDiskMappingNotFoundFault)(nil)).Elem()\n}\n\ntype OvfDiskOrderConstraint struct {\n\tOvfConstraint\n}\n\nfunc init() {\n\tt[\"OvfDiskOrderConstraint\"] = reflect.TypeOf((*OvfDiskOrderConstraint)(nil)).Elem()\n}\n\ntype OvfDiskOrderConstraintFault OvfDiskOrderConstraint\n\nfunc init() {\n\tt[\"OvfDiskOrderConstraintFault\"] = reflect.TypeOf((*OvfDiskOrderConstraintFault)(nil)).Elem()\n}\n\ntype OvfDuplicateElement struct {\n\tOvfElement\n}\n\nfunc init() {\n\tt[\"OvfDuplicateElement\"] = reflect.TypeOf((*OvfDuplicateElement)(nil)).Elem()\n}\n\ntype OvfDuplicateElementFault OvfDuplicateElement\n\nfunc init() {\n\tt[\"OvfDuplicateElementFault\"] = reflect.TypeOf((*OvfDuplicateElementFault)(nil)).Elem()\n}\n\ntype OvfDuplicatedElementBoundary struct {\n\tOvfElement\n\n\tBoundary string `xml:\"boundary\"`\n}\n\nfunc init() {\n\tt[\"OvfDuplicatedElementBoundary\"] = reflect.TypeOf((*OvfDuplicatedElementBoundary)(nil)).Elem()\n}\n\ntype OvfDuplicatedElementBoundaryFault OvfDuplicatedElementBoundary\n\nfunc init() {\n\tt[\"OvfDuplicatedElementBoundaryFault\"] = reflect.TypeOf((*OvfDuplicatedElementBoundaryFault)(nil)).Elem()\n}\n\ntype OvfDuplicatedPropertyIdExport struct {\n\tOvfExport\n\n\tFqid string `xml:\"fqid\"`\n}\n\nfunc init() {\n\tt[\"OvfDuplicatedPropertyIdExport\"] = reflect.TypeOf((*OvfDuplicatedPropertyIdExport)(nil)).Elem()\n}\n\ntype OvfDuplicatedPropertyIdExportFault OvfDuplicatedPropertyIdExport\n\nfunc init() {\n\tt[\"OvfDuplicatedPropertyIdExportFault\"] = reflect.TypeOf((*OvfDuplicatedPropertyIdExportFault)(nil)).Elem()\n}\n\ntype OvfDuplicatedPropertyIdImport struct {\n\tOvfExport\n}\n\nfunc init() {\n\tt[\"OvfDuplicatedPropertyIdImport\"] = reflect.TypeOf((*OvfDuplicatedPropertyIdImport)(nil)).Elem()\n}\n\ntype OvfDuplicatedPropertyIdImportFault OvfDuplicatedPropertyIdImport\n\nfunc init() {\n\tt[\"OvfDuplicatedPropertyIdImportFault\"] = reflect.TypeOf((*OvfDuplicatedPropertyIdImportFault)(nil)).Elem()\n}\n\ntype OvfElement struct {\n\tOvfInvalidPackage\n\n\tName string `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"OvfElement\"] = reflect.TypeOf((*OvfElement)(nil)).Elem()\n}\n\ntype OvfElementFault BaseOvfElement\n\nfunc init() {\n\tt[\"OvfElementFault\"] = reflect.TypeOf((*OvfElementFault)(nil)).Elem()\n}\n\ntype OvfElementInvalidValue struct {\n\tOvfElement\n\n\tValue string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"OvfElementInvalidValue\"] = reflect.TypeOf((*OvfElementInvalidValue)(nil)).Elem()\n}\n\ntype OvfElementInvalidValueFault OvfElementInvalidValue\n\nfunc init() {\n\tt[\"OvfElementInvalidValueFault\"] = reflect.TypeOf((*OvfElementInvalidValueFault)(nil)).Elem()\n}\n\ntype OvfExport struct {\n\tOvfFault\n}\n\nfunc init() {\n\tt[\"OvfExport\"] = reflect.TypeOf((*OvfExport)(nil)).Elem()\n}\n\ntype OvfExportFailed struct {\n\tOvfExport\n}\n\nfunc init() {\n\tt[\"OvfExportFailed\"] = reflect.TypeOf((*OvfExportFailed)(nil)).Elem()\n}\n\ntype OvfExportFailedFault OvfExportFailed\n\nfunc init() {\n\tt[\"OvfExportFailedFault\"] = reflect.TypeOf((*OvfExportFailedFault)(nil)).Elem()\n}\n\ntype OvfExportFault BaseOvfExport\n\nfunc init() {\n\tt[\"OvfExportFault\"] = reflect.TypeOf((*OvfExportFault)(nil)).Elem()\n}\n\ntype OvfFault struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"OvfFault\"] = reflect.TypeOf((*OvfFault)(nil)).Elem()\n}\n\ntype OvfFaultFault BaseOvfFault\n\nfunc init() {\n\tt[\"OvfFaultFault\"] = reflect.TypeOf((*OvfFaultFault)(nil)).Elem()\n}\n\ntype OvfFile struct {\n\tDynamicData\n\n\tDeviceId          string `xml:\"deviceId\"`\n\tPath              string `xml:\"path\"`\n\tCompressionMethod string `xml:\"compressionMethod,omitempty\"`\n\tChunkSize         int64  `xml:\"chunkSize,omitempty\"`\n\tSize              int64  `xml:\"size\"`\n\tCapacity          int64  `xml:\"capacity,omitempty\"`\n\tPopulatedSize     int64  `xml:\"populatedSize,omitempty\"`\n}\n\nfunc init() {\n\tt[\"OvfFile\"] = reflect.TypeOf((*OvfFile)(nil)).Elem()\n}\n\ntype OvfFileItem struct {\n\tDynamicData\n\n\tDeviceId          string `xml:\"deviceId\"`\n\tPath              string `xml:\"path\"`\n\tCompressionMethod string `xml:\"compressionMethod,omitempty\"`\n\tChunkSize         int64  `xml:\"chunkSize,omitempty\"`\n\tSize              int64  `xml:\"size,omitempty\"`\n\tCimType           int32  `xml:\"cimType\"`\n\tCreate            bool   `xml:\"create\"`\n}\n\nfunc init() {\n\tt[\"OvfFileItem\"] = reflect.TypeOf((*OvfFileItem)(nil)).Elem()\n}\n\ntype OvfHardwareCheck struct {\n\tOvfImport\n}\n\nfunc init() {\n\tt[\"OvfHardwareCheck\"] = reflect.TypeOf((*OvfHardwareCheck)(nil)).Elem()\n}\n\ntype OvfHardwareCheckFault OvfHardwareCheck\n\nfunc init() {\n\tt[\"OvfHardwareCheckFault\"] = reflect.TypeOf((*OvfHardwareCheckFault)(nil)).Elem()\n}\n\ntype OvfHardwareExport struct {\n\tOvfExport\n\n\tDevice BaseVirtualDevice `xml:\"device,omitempty,typeattr\"`\n\tVmPath string            `xml:\"vmPath\"`\n}\n\nfunc init() {\n\tt[\"OvfHardwareExport\"] = reflect.TypeOf((*OvfHardwareExport)(nil)).Elem()\n}\n\ntype OvfHardwareExportFault BaseOvfHardwareExport\n\nfunc init() {\n\tt[\"OvfHardwareExportFault\"] = reflect.TypeOf((*OvfHardwareExportFault)(nil)).Elem()\n}\n\ntype OvfHostResourceConstraint struct {\n\tOvfConstraint\n\n\tValue string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"OvfHostResourceConstraint\"] = reflect.TypeOf((*OvfHostResourceConstraint)(nil)).Elem()\n}\n\ntype OvfHostResourceConstraintFault OvfHostResourceConstraint\n\nfunc init() {\n\tt[\"OvfHostResourceConstraintFault\"] = reflect.TypeOf((*OvfHostResourceConstraintFault)(nil)).Elem()\n}\n\ntype OvfHostValueNotParsed struct {\n\tOvfSystemFault\n\n\tProperty string `xml:\"property\"`\n\tValue    string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"OvfHostValueNotParsed\"] = reflect.TypeOf((*OvfHostValueNotParsed)(nil)).Elem()\n}\n\ntype OvfHostValueNotParsedFault OvfHostValueNotParsed\n\nfunc init() {\n\tt[\"OvfHostValueNotParsedFault\"] = reflect.TypeOf((*OvfHostValueNotParsedFault)(nil)).Elem()\n}\n\ntype OvfImport struct {\n\tOvfFault\n}\n\nfunc init() {\n\tt[\"OvfImport\"] = reflect.TypeOf((*OvfImport)(nil)).Elem()\n}\n\ntype OvfImportFailed struct {\n\tOvfImport\n}\n\nfunc init() {\n\tt[\"OvfImportFailed\"] = reflect.TypeOf((*OvfImportFailed)(nil)).Elem()\n}\n\ntype OvfImportFailedFault OvfImportFailed\n\nfunc init() {\n\tt[\"OvfImportFailedFault\"] = reflect.TypeOf((*OvfImportFailedFault)(nil)).Elem()\n}\n\ntype OvfImportFault BaseOvfImport\n\nfunc init() {\n\tt[\"OvfImportFault\"] = reflect.TypeOf((*OvfImportFault)(nil)).Elem()\n}\n\ntype OvfInternalError struct {\n\tOvfSystemFault\n}\n\nfunc init() {\n\tt[\"OvfInternalError\"] = reflect.TypeOf((*OvfInternalError)(nil)).Elem()\n}\n\ntype OvfInternalErrorFault OvfInternalError\n\nfunc init() {\n\tt[\"OvfInternalErrorFault\"] = reflect.TypeOf((*OvfInternalErrorFault)(nil)).Elem()\n}\n\ntype OvfInvalidPackage struct {\n\tOvfFault\n\n\tLineNumber int32 `xml:\"lineNumber\"`\n}\n\nfunc init() {\n\tt[\"OvfInvalidPackage\"] = reflect.TypeOf((*OvfInvalidPackage)(nil)).Elem()\n}\n\ntype OvfInvalidPackageFault BaseOvfInvalidPackage\n\nfunc init() {\n\tt[\"OvfInvalidPackageFault\"] = reflect.TypeOf((*OvfInvalidPackageFault)(nil)).Elem()\n}\n\ntype OvfInvalidValue struct {\n\tOvfAttribute\n\n\tValue string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"OvfInvalidValue\"] = reflect.TypeOf((*OvfInvalidValue)(nil)).Elem()\n}\n\ntype OvfInvalidValueConfiguration struct {\n\tOvfInvalidValue\n}\n\nfunc init() {\n\tt[\"OvfInvalidValueConfiguration\"] = reflect.TypeOf((*OvfInvalidValueConfiguration)(nil)).Elem()\n}\n\ntype OvfInvalidValueConfigurationFault OvfInvalidValueConfiguration\n\nfunc init() {\n\tt[\"OvfInvalidValueConfigurationFault\"] = reflect.TypeOf((*OvfInvalidValueConfigurationFault)(nil)).Elem()\n}\n\ntype OvfInvalidValueEmpty struct {\n\tOvfInvalidValue\n}\n\nfunc init() {\n\tt[\"OvfInvalidValueEmpty\"] = reflect.TypeOf((*OvfInvalidValueEmpty)(nil)).Elem()\n}\n\ntype OvfInvalidValueEmptyFault OvfInvalidValueEmpty\n\nfunc init() {\n\tt[\"OvfInvalidValueEmptyFault\"] = reflect.TypeOf((*OvfInvalidValueEmptyFault)(nil)).Elem()\n}\n\ntype OvfInvalidValueFault BaseOvfInvalidValue\n\nfunc init() {\n\tt[\"OvfInvalidValueFault\"] = reflect.TypeOf((*OvfInvalidValueFault)(nil)).Elem()\n}\n\ntype OvfInvalidValueFormatMalformed struct {\n\tOvfInvalidValue\n}\n\nfunc init() {\n\tt[\"OvfInvalidValueFormatMalformed\"] = reflect.TypeOf((*OvfInvalidValueFormatMalformed)(nil)).Elem()\n}\n\ntype OvfInvalidValueFormatMalformedFault OvfInvalidValueFormatMalformed\n\nfunc init() {\n\tt[\"OvfInvalidValueFormatMalformedFault\"] = reflect.TypeOf((*OvfInvalidValueFormatMalformedFault)(nil)).Elem()\n}\n\ntype OvfInvalidValueReference struct {\n\tOvfInvalidValue\n}\n\nfunc init() {\n\tt[\"OvfInvalidValueReference\"] = reflect.TypeOf((*OvfInvalidValueReference)(nil)).Elem()\n}\n\ntype OvfInvalidValueReferenceFault OvfInvalidValueReference\n\nfunc init() {\n\tt[\"OvfInvalidValueReferenceFault\"] = reflect.TypeOf((*OvfInvalidValueReferenceFault)(nil)).Elem()\n}\n\ntype OvfInvalidVmName struct {\n\tOvfUnsupportedPackage\n\n\tName string `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"OvfInvalidVmName\"] = reflect.TypeOf((*OvfInvalidVmName)(nil)).Elem()\n}\n\ntype OvfInvalidVmNameFault OvfInvalidVmName\n\nfunc init() {\n\tt[\"OvfInvalidVmNameFault\"] = reflect.TypeOf((*OvfInvalidVmNameFault)(nil)).Elem()\n}\n\ntype OvfManagerCommonParams struct {\n\tDynamicData\n\n\tLocale           string     `xml:\"locale\"`\n\tDeploymentOption string     `xml:\"deploymentOption\"`\n\tMsgBundle        []KeyValue `xml:\"msgBundle,omitempty\"`\n\tImportOption     []string   `xml:\"importOption,omitempty\"`\n}\n\nfunc init() {\n\tt[\"OvfManagerCommonParams\"] = reflect.TypeOf((*OvfManagerCommonParams)(nil)).Elem()\n}\n\ntype OvfMappedOsId struct {\n\tOvfImport\n\n\tOvfId             int32  `xml:\"ovfId\"`\n\tOvfDescription    string `xml:\"ovfDescription\"`\n\tTargetDescription string `xml:\"targetDescription\"`\n}\n\nfunc init() {\n\tt[\"OvfMappedOsId\"] = reflect.TypeOf((*OvfMappedOsId)(nil)).Elem()\n}\n\ntype OvfMappedOsIdFault OvfMappedOsId\n\nfunc init() {\n\tt[\"OvfMappedOsIdFault\"] = reflect.TypeOf((*OvfMappedOsIdFault)(nil)).Elem()\n}\n\ntype OvfMissingAttribute struct {\n\tOvfAttribute\n}\n\nfunc init() {\n\tt[\"OvfMissingAttribute\"] = reflect.TypeOf((*OvfMissingAttribute)(nil)).Elem()\n}\n\ntype OvfMissingAttributeFault OvfMissingAttribute\n\nfunc init() {\n\tt[\"OvfMissingAttributeFault\"] = reflect.TypeOf((*OvfMissingAttributeFault)(nil)).Elem()\n}\n\ntype OvfMissingElement struct {\n\tOvfElement\n}\n\nfunc init() {\n\tt[\"OvfMissingElement\"] = reflect.TypeOf((*OvfMissingElement)(nil)).Elem()\n}\n\ntype OvfMissingElementFault BaseOvfMissingElement\n\nfunc init() {\n\tt[\"OvfMissingElementFault\"] = reflect.TypeOf((*OvfMissingElementFault)(nil)).Elem()\n}\n\ntype OvfMissingElementNormalBoundary struct {\n\tOvfMissingElement\n\n\tBoundary string `xml:\"boundary\"`\n}\n\nfunc init() {\n\tt[\"OvfMissingElementNormalBoundary\"] = reflect.TypeOf((*OvfMissingElementNormalBoundary)(nil)).Elem()\n}\n\ntype OvfMissingElementNormalBoundaryFault OvfMissingElementNormalBoundary\n\nfunc init() {\n\tt[\"OvfMissingElementNormalBoundaryFault\"] = reflect.TypeOf((*OvfMissingElementNormalBoundaryFault)(nil)).Elem()\n}\n\ntype OvfMissingHardware struct {\n\tOvfImport\n\n\tName         string `xml:\"name\"`\n\tResourceType int32  `xml:\"resourceType\"`\n}\n\nfunc init() {\n\tt[\"OvfMissingHardware\"] = reflect.TypeOf((*OvfMissingHardware)(nil)).Elem()\n}\n\ntype OvfMissingHardwareFault OvfMissingHardware\n\nfunc init() {\n\tt[\"OvfMissingHardwareFault\"] = reflect.TypeOf((*OvfMissingHardwareFault)(nil)).Elem()\n}\n\ntype OvfNetworkInfo struct {\n\tDynamicData\n\n\tName        string `xml:\"name\"`\n\tDescription string `xml:\"description\"`\n}\n\nfunc init() {\n\tt[\"OvfNetworkInfo\"] = reflect.TypeOf((*OvfNetworkInfo)(nil)).Elem()\n}\n\ntype OvfNetworkMapping struct {\n\tDynamicData\n\n\tName    string                 `xml:\"name\"`\n\tNetwork ManagedObjectReference `xml:\"network\"`\n}\n\nfunc init() {\n\tt[\"OvfNetworkMapping\"] = reflect.TypeOf((*OvfNetworkMapping)(nil)).Elem()\n}\n\ntype OvfNetworkMappingNotSupported struct {\n\tOvfImport\n}\n\nfunc init() {\n\tt[\"OvfNetworkMappingNotSupported\"] = reflect.TypeOf((*OvfNetworkMappingNotSupported)(nil)).Elem()\n}\n\ntype OvfNetworkMappingNotSupportedFault OvfNetworkMappingNotSupported\n\nfunc init() {\n\tt[\"OvfNetworkMappingNotSupportedFault\"] = reflect.TypeOf((*OvfNetworkMappingNotSupportedFault)(nil)).Elem()\n}\n\ntype OvfNoHostNic struct {\n\tOvfUnsupportedPackage\n}\n\nfunc init() {\n\tt[\"OvfNoHostNic\"] = reflect.TypeOf((*OvfNoHostNic)(nil)).Elem()\n}\n\ntype OvfNoHostNicFault OvfNoHostNic\n\nfunc init() {\n\tt[\"OvfNoHostNicFault\"] = reflect.TypeOf((*OvfNoHostNicFault)(nil)).Elem()\n}\n\ntype OvfNoSpaceOnController struct {\n\tOvfUnsupportedElement\n\n\tParent string `xml:\"parent\"`\n}\n\nfunc init() {\n\tt[\"OvfNoSpaceOnController\"] = reflect.TypeOf((*OvfNoSpaceOnController)(nil)).Elem()\n}\n\ntype OvfNoSpaceOnControllerFault OvfNoSpaceOnController\n\nfunc init() {\n\tt[\"OvfNoSpaceOnControllerFault\"] = reflect.TypeOf((*OvfNoSpaceOnControllerFault)(nil)).Elem()\n}\n\ntype OvfNoSupportedHardwareFamily struct {\n\tOvfUnsupportedPackage\n\n\tVersion string `xml:\"version\"`\n}\n\nfunc init() {\n\tt[\"OvfNoSupportedHardwareFamily\"] = reflect.TypeOf((*OvfNoSupportedHardwareFamily)(nil)).Elem()\n}\n\ntype OvfNoSupportedHardwareFamilyFault OvfNoSupportedHardwareFamily\n\nfunc init() {\n\tt[\"OvfNoSupportedHardwareFamilyFault\"] = reflect.TypeOf((*OvfNoSupportedHardwareFamilyFault)(nil)).Elem()\n}\n\ntype OvfOptionInfo struct {\n\tDynamicData\n\n\tOption      string             `xml:\"option\"`\n\tDescription LocalizableMessage `xml:\"description\"`\n}\n\nfunc init() {\n\tt[\"OvfOptionInfo\"] = reflect.TypeOf((*OvfOptionInfo)(nil)).Elem()\n}\n\ntype OvfParseDescriptorParams struct {\n\tOvfManagerCommonParams\n}\n\nfunc init() {\n\tt[\"OvfParseDescriptorParams\"] = reflect.TypeOf((*OvfParseDescriptorParams)(nil)).Elem()\n}\n\ntype OvfParseDescriptorResult struct {\n\tDynamicData\n\n\tEula                            []string               `xml:\"eula,omitempty\"`\n\tNetwork                         []OvfNetworkInfo       `xml:\"network,omitempty\"`\n\tIpAllocationScheme              []string               `xml:\"ipAllocationScheme,omitempty\"`\n\tIpProtocols                     []string               `xml:\"ipProtocols,omitempty\"`\n\tProperty                        []VAppPropertyInfo     `xml:\"property,omitempty\"`\n\tProductInfo                     *VAppProductInfo       `xml:\"productInfo,omitempty\"`\n\tAnnotation                      string                 `xml:\"annotation\"`\n\tApproximateDownloadSize         int64                  `xml:\"approximateDownloadSize,omitempty\"`\n\tApproximateFlatDeploymentSize   int64                  `xml:\"approximateFlatDeploymentSize,omitempty\"`\n\tApproximateSparseDeploymentSize int64                  `xml:\"approximateSparseDeploymentSize,omitempty\"`\n\tDefaultEntityName               string                 `xml:\"defaultEntityName\"`\n\tVirtualApp                      bool                   `xml:\"virtualApp\"`\n\tDeploymentOption                []OvfDeploymentOption  `xml:\"deploymentOption,omitempty\"`\n\tDefaultDeploymentOption         string                 `xml:\"defaultDeploymentOption\"`\n\tEntityName                      []KeyValue             `xml:\"entityName,omitempty\"`\n\tAnnotatedOst                    *OvfConsumerOstNode    `xml:\"annotatedOst,omitempty\"`\n\tError                           []LocalizedMethodFault `xml:\"error,omitempty\"`\n\tWarning                         []LocalizedMethodFault `xml:\"warning,omitempty\"`\n}\n\nfunc init() {\n\tt[\"OvfParseDescriptorResult\"] = reflect.TypeOf((*OvfParseDescriptorResult)(nil)).Elem()\n}\n\ntype OvfProperty struct {\n\tOvfInvalidPackage\n\n\tType  string `xml:\"type\"`\n\tValue string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"OvfProperty\"] = reflect.TypeOf((*OvfProperty)(nil)).Elem()\n}\n\ntype OvfPropertyExport struct {\n\tOvfExport\n\n\tType  string `xml:\"type\"`\n\tValue string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"OvfPropertyExport\"] = reflect.TypeOf((*OvfPropertyExport)(nil)).Elem()\n}\n\ntype OvfPropertyExportFault OvfPropertyExport\n\nfunc init() {\n\tt[\"OvfPropertyExportFault\"] = reflect.TypeOf((*OvfPropertyExportFault)(nil)).Elem()\n}\n\ntype OvfPropertyFault BaseOvfProperty\n\nfunc init() {\n\tt[\"OvfPropertyFault\"] = reflect.TypeOf((*OvfPropertyFault)(nil)).Elem()\n}\n\ntype OvfPropertyNetwork struct {\n\tOvfProperty\n}\n\nfunc init() {\n\tt[\"OvfPropertyNetwork\"] = reflect.TypeOf((*OvfPropertyNetwork)(nil)).Elem()\n}\n\ntype OvfPropertyNetworkExport struct {\n\tOvfExport\n\n\tNetwork string `xml:\"network\"`\n}\n\nfunc init() {\n\tt[\"OvfPropertyNetworkExport\"] = reflect.TypeOf((*OvfPropertyNetworkExport)(nil)).Elem()\n}\n\ntype OvfPropertyNetworkExportFault OvfPropertyNetworkExport\n\nfunc init() {\n\tt[\"OvfPropertyNetworkExportFault\"] = reflect.TypeOf((*OvfPropertyNetworkExportFault)(nil)).Elem()\n}\n\ntype OvfPropertyNetworkFault OvfPropertyNetwork\n\nfunc init() {\n\tt[\"OvfPropertyNetworkFault\"] = reflect.TypeOf((*OvfPropertyNetworkFault)(nil)).Elem()\n}\n\ntype OvfPropertyQualifier struct {\n\tOvfProperty\n\n\tQualifier string `xml:\"qualifier\"`\n}\n\nfunc init() {\n\tt[\"OvfPropertyQualifier\"] = reflect.TypeOf((*OvfPropertyQualifier)(nil)).Elem()\n}\n\ntype OvfPropertyQualifierDuplicate struct {\n\tOvfProperty\n\n\tQualifier string `xml:\"qualifier\"`\n}\n\nfunc init() {\n\tt[\"OvfPropertyQualifierDuplicate\"] = reflect.TypeOf((*OvfPropertyQualifierDuplicate)(nil)).Elem()\n}\n\ntype OvfPropertyQualifierDuplicateFault OvfPropertyQualifierDuplicate\n\nfunc init() {\n\tt[\"OvfPropertyQualifierDuplicateFault\"] = reflect.TypeOf((*OvfPropertyQualifierDuplicateFault)(nil)).Elem()\n}\n\ntype OvfPropertyQualifierFault OvfPropertyQualifier\n\nfunc init() {\n\tt[\"OvfPropertyQualifierFault\"] = reflect.TypeOf((*OvfPropertyQualifierFault)(nil)).Elem()\n}\n\ntype OvfPropertyQualifierIgnored struct {\n\tOvfProperty\n\n\tQualifier string `xml:\"qualifier\"`\n}\n\nfunc init() {\n\tt[\"OvfPropertyQualifierIgnored\"] = reflect.TypeOf((*OvfPropertyQualifierIgnored)(nil)).Elem()\n}\n\ntype OvfPropertyQualifierIgnoredFault OvfPropertyQualifierIgnored\n\nfunc init() {\n\tt[\"OvfPropertyQualifierIgnoredFault\"] = reflect.TypeOf((*OvfPropertyQualifierIgnoredFault)(nil)).Elem()\n}\n\ntype OvfPropertyType struct {\n\tOvfProperty\n}\n\nfunc init() {\n\tt[\"OvfPropertyType\"] = reflect.TypeOf((*OvfPropertyType)(nil)).Elem()\n}\n\ntype OvfPropertyTypeFault OvfPropertyType\n\nfunc init() {\n\tt[\"OvfPropertyTypeFault\"] = reflect.TypeOf((*OvfPropertyTypeFault)(nil)).Elem()\n}\n\ntype OvfPropertyValue struct {\n\tOvfProperty\n}\n\nfunc init() {\n\tt[\"OvfPropertyValue\"] = reflect.TypeOf((*OvfPropertyValue)(nil)).Elem()\n}\n\ntype OvfPropertyValueFault OvfPropertyValue\n\nfunc init() {\n\tt[\"OvfPropertyValueFault\"] = reflect.TypeOf((*OvfPropertyValueFault)(nil)).Elem()\n}\n\ntype OvfResourceMap struct {\n\tDynamicData\n\n\tSource       string                  `xml:\"source\"`\n\tParent       *ManagedObjectReference `xml:\"parent,omitempty\"`\n\tResourceSpec *ResourceConfigSpec     `xml:\"resourceSpec,omitempty\"`\n\tDatastore    *ManagedObjectReference `xml:\"datastore,omitempty\"`\n}\n\nfunc init() {\n\tt[\"OvfResourceMap\"] = reflect.TypeOf((*OvfResourceMap)(nil)).Elem()\n}\n\ntype OvfSystemFault struct {\n\tOvfFault\n}\n\nfunc init() {\n\tt[\"OvfSystemFault\"] = reflect.TypeOf((*OvfSystemFault)(nil)).Elem()\n}\n\ntype OvfSystemFaultFault BaseOvfSystemFault\n\nfunc init() {\n\tt[\"OvfSystemFaultFault\"] = reflect.TypeOf((*OvfSystemFaultFault)(nil)).Elem()\n}\n\ntype OvfToXmlUnsupportedElement struct {\n\tOvfSystemFault\n\n\tName string `xml:\"name,omitempty\"`\n}\n\nfunc init() {\n\tt[\"OvfToXmlUnsupportedElement\"] = reflect.TypeOf((*OvfToXmlUnsupportedElement)(nil)).Elem()\n}\n\ntype OvfToXmlUnsupportedElementFault OvfToXmlUnsupportedElement\n\nfunc init() {\n\tt[\"OvfToXmlUnsupportedElementFault\"] = reflect.TypeOf((*OvfToXmlUnsupportedElementFault)(nil)).Elem()\n}\n\ntype OvfUnableToExportDisk struct {\n\tOvfHardwareExport\n\n\tDiskName string `xml:\"diskName\"`\n}\n\nfunc init() {\n\tt[\"OvfUnableToExportDisk\"] = reflect.TypeOf((*OvfUnableToExportDisk)(nil)).Elem()\n}\n\ntype OvfUnableToExportDiskFault OvfUnableToExportDisk\n\nfunc init() {\n\tt[\"OvfUnableToExportDiskFault\"] = reflect.TypeOf((*OvfUnableToExportDiskFault)(nil)).Elem()\n}\n\ntype OvfUnexpectedElement struct {\n\tOvfElement\n}\n\nfunc init() {\n\tt[\"OvfUnexpectedElement\"] = reflect.TypeOf((*OvfUnexpectedElement)(nil)).Elem()\n}\n\ntype OvfUnexpectedElementFault OvfUnexpectedElement\n\nfunc init() {\n\tt[\"OvfUnexpectedElementFault\"] = reflect.TypeOf((*OvfUnexpectedElementFault)(nil)).Elem()\n}\n\ntype OvfUnknownDevice struct {\n\tOvfSystemFault\n\n\tDevice BaseVirtualDevice `xml:\"device,omitempty,typeattr\"`\n\tVmName string            `xml:\"vmName\"`\n}\n\nfunc init() {\n\tt[\"OvfUnknownDevice\"] = reflect.TypeOf((*OvfUnknownDevice)(nil)).Elem()\n}\n\ntype OvfUnknownDeviceBacking struct {\n\tOvfHardwareExport\n\n\tBacking BaseVirtualDeviceBackingInfo `xml:\"backing,typeattr\"`\n}\n\nfunc init() {\n\tt[\"OvfUnknownDeviceBacking\"] = reflect.TypeOf((*OvfUnknownDeviceBacking)(nil)).Elem()\n}\n\ntype OvfUnknownDeviceBackingFault OvfUnknownDeviceBacking\n\nfunc init() {\n\tt[\"OvfUnknownDeviceBackingFault\"] = reflect.TypeOf((*OvfUnknownDeviceBackingFault)(nil)).Elem()\n}\n\ntype OvfUnknownDeviceFault OvfUnknownDevice\n\nfunc init() {\n\tt[\"OvfUnknownDeviceFault\"] = reflect.TypeOf((*OvfUnknownDeviceFault)(nil)).Elem()\n}\n\ntype OvfUnknownEntity struct {\n\tOvfSystemFault\n\n\tLineNumber int32 `xml:\"lineNumber\"`\n}\n\nfunc init() {\n\tt[\"OvfUnknownEntity\"] = reflect.TypeOf((*OvfUnknownEntity)(nil)).Elem()\n}\n\ntype OvfUnknownEntityFault OvfUnknownEntity\n\nfunc init() {\n\tt[\"OvfUnknownEntityFault\"] = reflect.TypeOf((*OvfUnknownEntityFault)(nil)).Elem()\n}\n\ntype OvfUnsupportedAttribute struct {\n\tOvfUnsupportedPackage\n\n\tElementName   string `xml:\"elementName\"`\n\tAttributeName string `xml:\"attributeName\"`\n}\n\nfunc init() {\n\tt[\"OvfUnsupportedAttribute\"] = reflect.TypeOf((*OvfUnsupportedAttribute)(nil)).Elem()\n}\n\ntype OvfUnsupportedAttributeFault BaseOvfUnsupportedAttribute\n\nfunc init() {\n\tt[\"OvfUnsupportedAttributeFault\"] = reflect.TypeOf((*OvfUnsupportedAttributeFault)(nil)).Elem()\n}\n\ntype OvfUnsupportedAttributeValue struct {\n\tOvfUnsupportedAttribute\n\n\tValue string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"OvfUnsupportedAttributeValue\"] = reflect.TypeOf((*OvfUnsupportedAttributeValue)(nil)).Elem()\n}\n\ntype OvfUnsupportedAttributeValueFault OvfUnsupportedAttributeValue\n\nfunc init() {\n\tt[\"OvfUnsupportedAttributeValueFault\"] = reflect.TypeOf((*OvfUnsupportedAttributeValueFault)(nil)).Elem()\n}\n\ntype OvfUnsupportedDeviceBackingInfo struct {\n\tOvfSystemFault\n\n\tElementName string `xml:\"elementName,omitempty\"`\n\tInstanceId  string `xml:\"instanceId,omitempty\"`\n\tDeviceName  string `xml:\"deviceName\"`\n\tBackingName string `xml:\"backingName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"OvfUnsupportedDeviceBackingInfo\"] = reflect.TypeOf((*OvfUnsupportedDeviceBackingInfo)(nil)).Elem()\n}\n\ntype OvfUnsupportedDeviceBackingInfoFault OvfUnsupportedDeviceBackingInfo\n\nfunc init() {\n\tt[\"OvfUnsupportedDeviceBackingInfoFault\"] = reflect.TypeOf((*OvfUnsupportedDeviceBackingInfoFault)(nil)).Elem()\n}\n\ntype OvfUnsupportedDeviceBackingOption struct {\n\tOvfSystemFault\n\n\tElementName string `xml:\"elementName,omitempty\"`\n\tInstanceId  string `xml:\"instanceId,omitempty\"`\n\tDeviceName  string `xml:\"deviceName\"`\n\tBackingName string `xml:\"backingName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"OvfUnsupportedDeviceBackingOption\"] = reflect.TypeOf((*OvfUnsupportedDeviceBackingOption)(nil)).Elem()\n}\n\ntype OvfUnsupportedDeviceBackingOptionFault OvfUnsupportedDeviceBackingOption\n\nfunc init() {\n\tt[\"OvfUnsupportedDeviceBackingOptionFault\"] = reflect.TypeOf((*OvfUnsupportedDeviceBackingOptionFault)(nil)).Elem()\n}\n\ntype OvfUnsupportedDeviceExport struct {\n\tOvfHardwareExport\n}\n\nfunc init() {\n\tt[\"OvfUnsupportedDeviceExport\"] = reflect.TypeOf((*OvfUnsupportedDeviceExport)(nil)).Elem()\n}\n\ntype OvfUnsupportedDeviceExportFault OvfUnsupportedDeviceExport\n\nfunc init() {\n\tt[\"OvfUnsupportedDeviceExportFault\"] = reflect.TypeOf((*OvfUnsupportedDeviceExportFault)(nil)).Elem()\n}\n\ntype OvfUnsupportedDiskProvisioning struct {\n\tOvfImport\n\n\tDiskProvisioning          string `xml:\"diskProvisioning\"`\n\tSupportedDiskProvisioning string `xml:\"supportedDiskProvisioning\"`\n}\n\nfunc init() {\n\tt[\"OvfUnsupportedDiskProvisioning\"] = reflect.TypeOf((*OvfUnsupportedDiskProvisioning)(nil)).Elem()\n}\n\ntype OvfUnsupportedDiskProvisioningFault OvfUnsupportedDiskProvisioning\n\nfunc init() {\n\tt[\"OvfUnsupportedDiskProvisioningFault\"] = reflect.TypeOf((*OvfUnsupportedDiskProvisioningFault)(nil)).Elem()\n}\n\ntype OvfUnsupportedElement struct {\n\tOvfUnsupportedPackage\n\n\tName string `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"OvfUnsupportedElement\"] = reflect.TypeOf((*OvfUnsupportedElement)(nil)).Elem()\n}\n\ntype OvfUnsupportedElementFault BaseOvfUnsupportedElement\n\nfunc init() {\n\tt[\"OvfUnsupportedElementFault\"] = reflect.TypeOf((*OvfUnsupportedElementFault)(nil)).Elem()\n}\n\ntype OvfUnsupportedElementValue struct {\n\tOvfUnsupportedElement\n\n\tValue string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"OvfUnsupportedElementValue\"] = reflect.TypeOf((*OvfUnsupportedElementValue)(nil)).Elem()\n}\n\ntype OvfUnsupportedElementValueFault OvfUnsupportedElementValue\n\nfunc init() {\n\tt[\"OvfUnsupportedElementValueFault\"] = reflect.TypeOf((*OvfUnsupportedElementValueFault)(nil)).Elem()\n}\n\ntype OvfUnsupportedPackage struct {\n\tOvfFault\n\n\tLineNumber int32 `xml:\"lineNumber,omitempty\"`\n}\n\nfunc init() {\n\tt[\"OvfUnsupportedPackage\"] = reflect.TypeOf((*OvfUnsupportedPackage)(nil)).Elem()\n}\n\ntype OvfUnsupportedPackageFault BaseOvfUnsupportedPackage\n\nfunc init() {\n\tt[\"OvfUnsupportedPackageFault\"] = reflect.TypeOf((*OvfUnsupportedPackageFault)(nil)).Elem()\n}\n\ntype OvfUnsupportedSection struct {\n\tOvfUnsupportedElement\n\n\tInfo string `xml:\"info\"`\n}\n\nfunc init() {\n\tt[\"OvfUnsupportedSection\"] = reflect.TypeOf((*OvfUnsupportedSection)(nil)).Elem()\n}\n\ntype OvfUnsupportedSectionFault OvfUnsupportedSection\n\nfunc init() {\n\tt[\"OvfUnsupportedSectionFault\"] = reflect.TypeOf((*OvfUnsupportedSectionFault)(nil)).Elem()\n}\n\ntype OvfUnsupportedSubType struct {\n\tOvfUnsupportedPackage\n\n\tElementName   string `xml:\"elementName\"`\n\tInstanceId    string `xml:\"instanceId\"`\n\tDeviceType    int32  `xml:\"deviceType\"`\n\tDeviceSubType string `xml:\"deviceSubType\"`\n}\n\nfunc init() {\n\tt[\"OvfUnsupportedSubType\"] = reflect.TypeOf((*OvfUnsupportedSubType)(nil)).Elem()\n}\n\ntype OvfUnsupportedSubTypeFault OvfUnsupportedSubType\n\nfunc init() {\n\tt[\"OvfUnsupportedSubTypeFault\"] = reflect.TypeOf((*OvfUnsupportedSubTypeFault)(nil)).Elem()\n}\n\ntype OvfUnsupportedType struct {\n\tOvfUnsupportedPackage\n\n\tName       string `xml:\"name\"`\n\tInstanceId string `xml:\"instanceId\"`\n\tDeviceType int32  `xml:\"deviceType\"`\n}\n\nfunc init() {\n\tt[\"OvfUnsupportedType\"] = reflect.TypeOf((*OvfUnsupportedType)(nil)).Elem()\n}\n\ntype OvfUnsupportedTypeFault OvfUnsupportedType\n\nfunc init() {\n\tt[\"OvfUnsupportedTypeFault\"] = reflect.TypeOf((*OvfUnsupportedTypeFault)(nil)).Elem()\n}\n\ntype OvfValidateHostParams struct {\n\tOvfManagerCommonParams\n}\n\nfunc init() {\n\tt[\"OvfValidateHostParams\"] = reflect.TypeOf((*OvfValidateHostParams)(nil)).Elem()\n}\n\ntype OvfValidateHostResult struct {\n\tDynamicData\n\n\tDownloadSize              int64                  `xml:\"downloadSize,omitempty\"`\n\tFlatDeploymentSize        int64                  `xml:\"flatDeploymentSize,omitempty\"`\n\tSparseDeploymentSize      int64                  `xml:\"sparseDeploymentSize,omitempty\"`\n\tError                     []LocalizedMethodFault `xml:\"error,omitempty\"`\n\tWarning                   []LocalizedMethodFault `xml:\"warning,omitempty\"`\n\tSupportedDiskProvisioning []string               `xml:\"supportedDiskProvisioning,omitempty\"`\n}\n\nfunc init() {\n\tt[\"OvfValidateHostResult\"] = reflect.TypeOf((*OvfValidateHostResult)(nil)).Elem()\n}\n\ntype OvfWrongElement struct {\n\tOvfElement\n}\n\nfunc init() {\n\tt[\"OvfWrongElement\"] = reflect.TypeOf((*OvfWrongElement)(nil)).Elem()\n}\n\ntype OvfWrongElementFault OvfWrongElement\n\nfunc init() {\n\tt[\"OvfWrongElementFault\"] = reflect.TypeOf((*OvfWrongElementFault)(nil)).Elem()\n}\n\ntype OvfWrongNamespace struct {\n\tOvfInvalidPackage\n\n\tNamespaceName string `xml:\"namespaceName\"`\n}\n\nfunc init() {\n\tt[\"OvfWrongNamespace\"] = reflect.TypeOf((*OvfWrongNamespace)(nil)).Elem()\n}\n\ntype OvfWrongNamespaceFault OvfWrongNamespace\n\nfunc init() {\n\tt[\"OvfWrongNamespaceFault\"] = reflect.TypeOf((*OvfWrongNamespaceFault)(nil)).Elem()\n}\n\ntype OvfXmlFormat struct {\n\tOvfInvalidPackage\n\n\tDescription string `xml:\"description\"`\n}\n\nfunc init() {\n\tt[\"OvfXmlFormat\"] = reflect.TypeOf((*OvfXmlFormat)(nil)).Elem()\n}\n\ntype OvfXmlFormatFault OvfXmlFormat\n\nfunc init() {\n\tt[\"OvfXmlFormatFault\"] = reflect.TypeOf((*OvfXmlFormatFault)(nil)).Elem()\n}\n\ntype ParaVirtualSCSIController struct {\n\tVirtualSCSIController\n}\n\nfunc init() {\n\tt[\"ParaVirtualSCSIController\"] = reflect.TypeOf((*ParaVirtualSCSIController)(nil)).Elem()\n}\n\ntype ParaVirtualSCSIControllerOption struct {\n\tVirtualSCSIControllerOption\n}\n\nfunc init() {\n\tt[\"ParaVirtualSCSIControllerOption\"] = reflect.TypeOf((*ParaVirtualSCSIControllerOption)(nil)).Elem()\n}\n\ntype ParseDescriptor ParseDescriptorRequestType\n\nfunc init() {\n\tt[\"ParseDescriptor\"] = reflect.TypeOf((*ParseDescriptor)(nil)).Elem()\n}\n\ntype ParseDescriptorRequestType struct {\n\tThis          ManagedObjectReference   `xml:\"_this\"`\n\tOvfDescriptor string                   `xml:\"ovfDescriptor\"`\n\tPdp           OvfParseDescriptorParams `xml:\"pdp\"`\n}\n\nfunc init() {\n\tt[\"ParseDescriptorRequestType\"] = reflect.TypeOf((*ParseDescriptorRequestType)(nil)).Elem()\n}\n\ntype ParseDescriptorResponse struct {\n\tReturnval OvfParseDescriptorResult `xml:\"returnval\"`\n}\n\ntype PassiveNodeDeploymentSpec struct {\n\tNodeDeploymentSpec\n\n\tFailoverIpSettings *CustomizationIPSettings `xml:\"failoverIpSettings,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PassiveNodeDeploymentSpec\"] = reflect.TypeOf((*PassiveNodeDeploymentSpec)(nil)).Elem()\n}\n\ntype PassiveNodeNetworkSpec struct {\n\tNodeNetworkSpec\n\n\tFailoverIpSettings *CustomizationIPSettings `xml:\"failoverIpSettings,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PassiveNodeNetworkSpec\"] = reflect.TypeOf((*PassiveNodeNetworkSpec)(nil)).Elem()\n}\n\ntype PasswordField struct {\n\tDynamicData\n\n\tValue string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"PasswordField\"] = reflect.TypeOf((*PasswordField)(nil)).Elem()\n}\n\ntype PatchAlreadyInstalled struct {\n\tPatchNotApplicable\n}\n\nfunc init() {\n\tt[\"PatchAlreadyInstalled\"] = reflect.TypeOf((*PatchAlreadyInstalled)(nil)).Elem()\n}\n\ntype PatchAlreadyInstalledFault PatchAlreadyInstalled\n\nfunc init() {\n\tt[\"PatchAlreadyInstalledFault\"] = reflect.TypeOf((*PatchAlreadyInstalledFault)(nil)).Elem()\n}\n\ntype PatchBinariesNotFound struct {\n\tVimFault\n\n\tPatchID string   `xml:\"patchID\"`\n\tBinary  []string `xml:\"binary,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PatchBinariesNotFound\"] = reflect.TypeOf((*PatchBinariesNotFound)(nil)).Elem()\n}\n\ntype PatchBinariesNotFoundFault PatchBinariesNotFound\n\nfunc init() {\n\tt[\"PatchBinariesNotFoundFault\"] = reflect.TypeOf((*PatchBinariesNotFoundFault)(nil)).Elem()\n}\n\ntype PatchInstallFailed struct {\n\tPlatformConfigFault\n\n\tRolledBack bool `xml:\"rolledBack\"`\n}\n\nfunc init() {\n\tt[\"PatchInstallFailed\"] = reflect.TypeOf((*PatchInstallFailed)(nil)).Elem()\n}\n\ntype PatchInstallFailedFault PatchInstallFailed\n\nfunc init() {\n\tt[\"PatchInstallFailedFault\"] = reflect.TypeOf((*PatchInstallFailedFault)(nil)).Elem()\n}\n\ntype PatchIntegrityError struct {\n\tPlatformConfigFault\n}\n\nfunc init() {\n\tt[\"PatchIntegrityError\"] = reflect.TypeOf((*PatchIntegrityError)(nil)).Elem()\n}\n\ntype PatchIntegrityErrorFault PatchIntegrityError\n\nfunc init() {\n\tt[\"PatchIntegrityErrorFault\"] = reflect.TypeOf((*PatchIntegrityErrorFault)(nil)).Elem()\n}\n\ntype PatchMetadataCorrupted struct {\n\tPatchMetadataInvalid\n}\n\nfunc init() {\n\tt[\"PatchMetadataCorrupted\"] = reflect.TypeOf((*PatchMetadataCorrupted)(nil)).Elem()\n}\n\ntype PatchMetadataCorruptedFault PatchMetadataCorrupted\n\nfunc init() {\n\tt[\"PatchMetadataCorruptedFault\"] = reflect.TypeOf((*PatchMetadataCorruptedFault)(nil)).Elem()\n}\n\ntype PatchMetadataInvalid struct {\n\tVimFault\n\n\tPatchID  string   `xml:\"patchID\"`\n\tMetaData []string `xml:\"metaData,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PatchMetadataInvalid\"] = reflect.TypeOf((*PatchMetadataInvalid)(nil)).Elem()\n}\n\ntype PatchMetadataInvalidFault BasePatchMetadataInvalid\n\nfunc init() {\n\tt[\"PatchMetadataInvalidFault\"] = reflect.TypeOf((*PatchMetadataInvalidFault)(nil)).Elem()\n}\n\ntype PatchMetadataNotFound struct {\n\tPatchMetadataInvalid\n}\n\nfunc init() {\n\tt[\"PatchMetadataNotFound\"] = reflect.TypeOf((*PatchMetadataNotFound)(nil)).Elem()\n}\n\ntype PatchMetadataNotFoundFault PatchMetadataNotFound\n\nfunc init() {\n\tt[\"PatchMetadataNotFoundFault\"] = reflect.TypeOf((*PatchMetadataNotFoundFault)(nil)).Elem()\n}\n\ntype PatchMissingDependencies struct {\n\tPatchNotApplicable\n\n\tPrerequisitePatch []string `xml:\"prerequisitePatch,omitempty\"`\n\tPrerequisiteLib   []string `xml:\"prerequisiteLib,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PatchMissingDependencies\"] = reflect.TypeOf((*PatchMissingDependencies)(nil)).Elem()\n}\n\ntype PatchMissingDependenciesFault PatchMissingDependencies\n\nfunc init() {\n\tt[\"PatchMissingDependenciesFault\"] = reflect.TypeOf((*PatchMissingDependenciesFault)(nil)).Elem()\n}\n\ntype PatchNotApplicable struct {\n\tVimFault\n\n\tPatchID string `xml:\"patchID\"`\n}\n\nfunc init() {\n\tt[\"PatchNotApplicable\"] = reflect.TypeOf((*PatchNotApplicable)(nil)).Elem()\n}\n\ntype PatchNotApplicableFault BasePatchNotApplicable\n\nfunc init() {\n\tt[\"PatchNotApplicableFault\"] = reflect.TypeOf((*PatchNotApplicableFault)(nil)).Elem()\n}\n\ntype PatchSuperseded struct {\n\tPatchNotApplicable\n\n\tSupersede []string `xml:\"supersede,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PatchSuperseded\"] = reflect.TypeOf((*PatchSuperseded)(nil)).Elem()\n}\n\ntype PatchSupersededFault PatchSuperseded\n\nfunc init() {\n\tt[\"PatchSupersededFault\"] = reflect.TypeOf((*PatchSupersededFault)(nil)).Elem()\n}\n\ntype PerfCompositeMetric struct {\n\tDynamicData\n\n\tEntity      BasePerfEntityMetricBase   `xml:\"entity,omitempty,typeattr\"`\n\tChildEntity []BasePerfEntityMetricBase `xml:\"childEntity,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"PerfCompositeMetric\"] = reflect.TypeOf((*PerfCompositeMetric)(nil)).Elem()\n}\n\ntype PerfCounterInfo struct {\n\tDynamicData\n\n\tKey                 int32                  `xml:\"key\"`\n\tNameInfo            BaseElementDescription `xml:\"nameInfo,typeattr\"`\n\tGroupInfo           BaseElementDescription `xml:\"groupInfo,typeattr\"`\n\tUnitInfo            BaseElementDescription `xml:\"unitInfo,typeattr\"`\n\tRollupType          PerfSummaryType        `xml:\"rollupType\"`\n\tStatsType           PerfStatsType          `xml:\"statsType\"`\n\tLevel               int32                  `xml:\"level,omitempty\"`\n\tPerDeviceLevel      int32                  `xml:\"perDeviceLevel,omitempty\"`\n\tAssociatedCounterId []int32                `xml:\"associatedCounterId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PerfCounterInfo\"] = reflect.TypeOf((*PerfCounterInfo)(nil)).Elem()\n}\n\ntype PerfEntityMetric struct {\n\tPerfEntityMetricBase\n\n\tSampleInfo []PerfSampleInfo       `xml:\"sampleInfo,omitempty\"`\n\tValue      []BasePerfMetricSeries `xml:\"value,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"PerfEntityMetric\"] = reflect.TypeOf((*PerfEntityMetric)(nil)).Elem()\n}\n\ntype PerfEntityMetricBase struct {\n\tDynamicData\n\n\tEntity ManagedObjectReference `xml:\"entity\"`\n}\n\nfunc init() {\n\tt[\"PerfEntityMetricBase\"] = reflect.TypeOf((*PerfEntityMetricBase)(nil)).Elem()\n}\n\ntype PerfEntityMetricCSV struct {\n\tPerfEntityMetricBase\n\n\tSampleInfoCSV string                `xml:\"sampleInfoCSV\"`\n\tValue         []PerfMetricSeriesCSV `xml:\"value,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PerfEntityMetricCSV\"] = reflect.TypeOf((*PerfEntityMetricCSV)(nil)).Elem()\n}\n\ntype PerfInterval struct {\n\tDynamicData\n\n\tKey            int32  `xml:\"key\"`\n\tSamplingPeriod int32  `xml:\"samplingPeriod\"`\n\tName           string `xml:\"name\"`\n\tLength         int32  `xml:\"length\"`\n\tLevel          int32  `xml:\"level,omitempty\"`\n\tEnabled        bool   `xml:\"enabled\"`\n}\n\nfunc init() {\n\tt[\"PerfInterval\"] = reflect.TypeOf((*PerfInterval)(nil)).Elem()\n}\n\ntype PerfMetricId struct {\n\tDynamicData\n\n\tCounterId int32  `xml:\"counterId\"`\n\tInstance  string `xml:\"instance\"`\n}\n\nfunc init() {\n\tt[\"PerfMetricId\"] = reflect.TypeOf((*PerfMetricId)(nil)).Elem()\n}\n\ntype PerfMetricIntSeries struct {\n\tPerfMetricSeries\n\n\tValue []int64 `xml:\"value,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PerfMetricIntSeries\"] = reflect.TypeOf((*PerfMetricIntSeries)(nil)).Elem()\n}\n\ntype PerfMetricSeries struct {\n\tDynamicData\n\n\tId PerfMetricId `xml:\"id\"`\n}\n\nfunc init() {\n\tt[\"PerfMetricSeries\"] = reflect.TypeOf((*PerfMetricSeries)(nil)).Elem()\n}\n\ntype PerfMetricSeriesCSV struct {\n\tPerfMetricSeries\n\n\tValue string `xml:\"value,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PerfMetricSeriesCSV\"] = reflect.TypeOf((*PerfMetricSeriesCSV)(nil)).Elem()\n}\n\ntype PerfProviderSummary struct {\n\tDynamicData\n\n\tEntity           ManagedObjectReference `xml:\"entity\"`\n\tCurrentSupported bool                   `xml:\"currentSupported\"`\n\tSummarySupported bool                   `xml:\"summarySupported\"`\n\tRefreshRate      int32                  `xml:\"refreshRate,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PerfProviderSummary\"] = reflect.TypeOf((*PerfProviderSummary)(nil)).Elem()\n}\n\ntype PerfQuerySpec struct {\n\tDynamicData\n\n\tEntity     ManagedObjectReference `xml:\"entity\"`\n\tStartTime  *time.Time             `xml:\"startTime\"`\n\tEndTime    *time.Time             `xml:\"endTime\"`\n\tMaxSample  int32                  `xml:\"maxSample,omitempty\"`\n\tMetricId   []PerfMetricId         `xml:\"metricId,omitempty\"`\n\tIntervalId int32                  `xml:\"intervalId,omitempty\"`\n\tFormat     string                 `xml:\"format,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PerfQuerySpec\"] = reflect.TypeOf((*PerfQuerySpec)(nil)).Elem()\n}\n\ntype PerfSampleInfo struct {\n\tDynamicData\n\n\tTimestamp time.Time `xml:\"timestamp\"`\n\tInterval  int32     `xml:\"interval\"`\n}\n\nfunc init() {\n\tt[\"PerfSampleInfo\"] = reflect.TypeOf((*PerfSampleInfo)(nil)).Elem()\n}\n\ntype PerformDvsProductSpecOperationRequestType struct {\n\tThis        ManagedObjectReference               `xml:\"_this\"`\n\tOperation   string                               `xml:\"operation\"`\n\tProductSpec *DistributedVirtualSwitchProductSpec `xml:\"productSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PerformDvsProductSpecOperationRequestType\"] = reflect.TypeOf((*PerformDvsProductSpecOperationRequestType)(nil)).Elem()\n}\n\ntype PerformDvsProductSpecOperation_Task PerformDvsProductSpecOperationRequestType\n\nfunc init() {\n\tt[\"PerformDvsProductSpecOperation_Task\"] = reflect.TypeOf((*PerformDvsProductSpecOperation_Task)(nil)).Elem()\n}\n\ntype PerformDvsProductSpecOperation_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype PerformVsanUpgradePreflightCheck PerformVsanUpgradePreflightCheckRequestType\n\nfunc init() {\n\tt[\"PerformVsanUpgradePreflightCheck\"] = reflect.TypeOf((*PerformVsanUpgradePreflightCheck)(nil)).Elem()\n}\n\ntype PerformVsanUpgradePreflightCheckRequestType struct {\n\tThis            ManagedObjectReference `xml:\"_this\"`\n\tCluster         ManagedObjectReference `xml:\"cluster\"`\n\tDowngradeFormat *bool                  `xml:\"downgradeFormat\"`\n}\n\nfunc init() {\n\tt[\"PerformVsanUpgradePreflightCheckRequestType\"] = reflect.TypeOf((*PerformVsanUpgradePreflightCheckRequestType)(nil)).Elem()\n}\n\ntype PerformVsanUpgradePreflightCheckResponse struct {\n\tReturnval VsanUpgradeSystemPreflightCheckResult `xml:\"returnval\"`\n}\n\ntype PerformVsanUpgradeRequestType struct {\n\tThis                   ManagedObjectReference   `xml:\"_this\"`\n\tCluster                ManagedObjectReference   `xml:\"cluster\"`\n\tPerformObjectUpgrade   *bool                    `xml:\"performObjectUpgrade\"`\n\tDowngradeFormat        *bool                    `xml:\"downgradeFormat\"`\n\tAllowReducedRedundancy *bool                    `xml:\"allowReducedRedundancy\"`\n\tExcludeHosts           []ManagedObjectReference `xml:\"excludeHosts,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PerformVsanUpgradeRequestType\"] = reflect.TypeOf((*PerformVsanUpgradeRequestType)(nil)).Elem()\n}\n\ntype PerformVsanUpgrade_Task PerformVsanUpgradeRequestType\n\nfunc init() {\n\tt[\"PerformVsanUpgrade_Task\"] = reflect.TypeOf((*PerformVsanUpgrade_Task)(nil)).Elem()\n}\n\ntype PerformVsanUpgrade_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype PerformanceDescription struct {\n\tDynamicData\n\n\tCounterType []BaseElementDescription `xml:\"counterType,typeattr\"`\n\tStatsType   []BaseElementDescription `xml:\"statsType,typeattr\"`\n}\n\nfunc init() {\n\tt[\"PerformanceDescription\"] = reflect.TypeOf((*PerformanceDescription)(nil)).Elem()\n}\n\ntype PerformanceManagerCounterLevelMapping struct {\n\tDynamicData\n\n\tCounterId      int32 `xml:\"counterId\"`\n\tAggregateLevel int32 `xml:\"aggregateLevel,omitempty\"`\n\tPerDeviceLevel int32 `xml:\"perDeviceLevel,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PerformanceManagerCounterLevelMapping\"] = reflect.TypeOf((*PerformanceManagerCounterLevelMapping)(nil)).Elem()\n}\n\ntype PerformanceStatisticsDescription struct {\n\tDynamicData\n\n\tIntervals []PerfInterval `xml:\"intervals,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PerformanceStatisticsDescription\"] = reflect.TypeOf((*PerformanceStatisticsDescription)(nil)).Elem()\n}\n\ntype Permission struct {\n\tDynamicData\n\n\tEntity    *ManagedObjectReference `xml:\"entity,omitempty\"`\n\tPrincipal string                  `xml:\"principal\"`\n\tGroup     bool                    `xml:\"group\"`\n\tRoleId    int32                   `xml:\"roleId\"`\n\tPropagate bool                    `xml:\"propagate\"`\n}\n\nfunc init() {\n\tt[\"Permission\"] = reflect.TypeOf((*Permission)(nil)).Elem()\n}\n\ntype PermissionAddedEvent struct {\n\tPermissionEvent\n\n\tRole      RoleEventArgument `xml:\"role\"`\n\tPropagate bool              `xml:\"propagate\"`\n}\n\nfunc init() {\n\tt[\"PermissionAddedEvent\"] = reflect.TypeOf((*PermissionAddedEvent)(nil)).Elem()\n}\n\ntype PermissionEvent struct {\n\tAuthorizationEvent\n\n\tEntity    ManagedEntityEventArgument `xml:\"entity\"`\n\tPrincipal string                     `xml:\"principal\"`\n\tGroup     bool                       `xml:\"group\"`\n}\n\nfunc init() {\n\tt[\"PermissionEvent\"] = reflect.TypeOf((*PermissionEvent)(nil)).Elem()\n}\n\ntype PermissionProfile struct {\n\tApplyProfile\n\n\tKey string `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"PermissionProfile\"] = reflect.TypeOf((*PermissionProfile)(nil)).Elem()\n}\n\ntype PermissionRemovedEvent struct {\n\tPermissionEvent\n}\n\nfunc init() {\n\tt[\"PermissionRemovedEvent\"] = reflect.TypeOf((*PermissionRemovedEvent)(nil)).Elem()\n}\n\ntype PermissionUpdatedEvent struct {\n\tPermissionEvent\n\n\tRole          RoleEventArgument  `xml:\"role\"`\n\tPropagate     bool               `xml:\"propagate\"`\n\tPrevRole      *RoleEventArgument `xml:\"prevRole,omitempty\"`\n\tPrevPropagate *bool              `xml:\"prevPropagate\"`\n}\n\nfunc init() {\n\tt[\"PermissionUpdatedEvent\"] = reflect.TypeOf((*PermissionUpdatedEvent)(nil)).Elem()\n}\n\ntype PhysCompatRDMNotSupported struct {\n\tRDMNotSupported\n}\n\nfunc init() {\n\tt[\"PhysCompatRDMNotSupported\"] = reflect.TypeOf((*PhysCompatRDMNotSupported)(nil)).Elem()\n}\n\ntype PhysCompatRDMNotSupportedFault PhysCompatRDMNotSupported\n\nfunc init() {\n\tt[\"PhysCompatRDMNotSupportedFault\"] = reflect.TypeOf((*PhysCompatRDMNotSupportedFault)(nil)).Elem()\n}\n\ntype PhysicalNic struct {\n\tDynamicData\n\n\tKey                                   string                `xml:\"key,omitempty\"`\n\tDevice                                string                `xml:\"device\"`\n\tPci                                   string                `xml:\"pci\"`\n\tDriver                                string                `xml:\"driver,omitempty\"`\n\tLinkSpeed                             *PhysicalNicLinkInfo  `xml:\"linkSpeed,omitempty\"`\n\tValidLinkSpecification                []PhysicalNicLinkInfo `xml:\"validLinkSpecification,omitempty\"`\n\tSpec                                  PhysicalNicSpec       `xml:\"spec\"`\n\tWakeOnLanSupported                    bool                  `xml:\"wakeOnLanSupported\"`\n\tMac                                   string                `xml:\"mac\"`\n\tFcoeConfiguration                     *FcoeConfig           `xml:\"fcoeConfiguration,omitempty\"`\n\tVmDirectPathGen2Supported             *bool                 `xml:\"vmDirectPathGen2Supported\"`\n\tVmDirectPathGen2SupportedMode         string                `xml:\"vmDirectPathGen2SupportedMode,omitempty\"`\n\tResourcePoolSchedulerAllowed          *bool                 `xml:\"resourcePoolSchedulerAllowed\"`\n\tResourcePoolSchedulerDisallowedReason []string              `xml:\"resourcePoolSchedulerDisallowedReason,omitempty\"`\n\tAutoNegotiateSupported                *bool                 `xml:\"autoNegotiateSupported\"`\n}\n\nfunc init() {\n\tt[\"PhysicalNic\"] = reflect.TypeOf((*PhysicalNic)(nil)).Elem()\n}\n\ntype PhysicalNicCdpDeviceCapability struct {\n\tDynamicData\n\n\tRouter            bool `xml:\"router\"`\n\tTransparentBridge bool `xml:\"transparentBridge\"`\n\tSourceRouteBridge bool `xml:\"sourceRouteBridge\"`\n\tNetworkSwitch     bool `xml:\"networkSwitch\"`\n\tHost              bool `xml:\"host\"`\n\tIgmpEnabled       bool `xml:\"igmpEnabled\"`\n\tRepeater          bool `xml:\"repeater\"`\n}\n\nfunc init() {\n\tt[\"PhysicalNicCdpDeviceCapability\"] = reflect.TypeOf((*PhysicalNicCdpDeviceCapability)(nil)).Elem()\n}\n\ntype PhysicalNicCdpInfo struct {\n\tDynamicData\n\n\tCdpVersion       int32                           `xml:\"cdpVersion,omitempty\"`\n\tTimeout          int32                           `xml:\"timeout,omitempty\"`\n\tTtl              int32                           `xml:\"ttl,omitempty\"`\n\tSamples          int32                           `xml:\"samples,omitempty\"`\n\tDevId            string                          `xml:\"devId,omitempty\"`\n\tAddress          string                          `xml:\"address,omitempty\"`\n\tPortId           string                          `xml:\"portId,omitempty\"`\n\tDeviceCapability *PhysicalNicCdpDeviceCapability `xml:\"deviceCapability,omitempty\"`\n\tSoftwareVersion  string                          `xml:\"softwareVersion,omitempty\"`\n\tHardwarePlatform string                          `xml:\"hardwarePlatform,omitempty\"`\n\tIpPrefix         string                          `xml:\"ipPrefix,omitempty\"`\n\tIpPrefixLen      int32                           `xml:\"ipPrefixLen,omitempty\"`\n\tVlan             int32                           `xml:\"vlan,omitempty\"`\n\tFullDuplex       *bool                           `xml:\"fullDuplex\"`\n\tMtu              int32                           `xml:\"mtu,omitempty\"`\n\tSystemName       string                          `xml:\"systemName,omitempty\"`\n\tSystemOID        string                          `xml:\"systemOID,omitempty\"`\n\tMgmtAddr         string                          `xml:\"mgmtAddr,omitempty\"`\n\tLocation         string                          `xml:\"location,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PhysicalNicCdpInfo\"] = reflect.TypeOf((*PhysicalNicCdpInfo)(nil)).Elem()\n}\n\ntype PhysicalNicConfig struct {\n\tDynamicData\n\n\tDevice string          `xml:\"device\"`\n\tSpec   PhysicalNicSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"PhysicalNicConfig\"] = reflect.TypeOf((*PhysicalNicConfig)(nil)).Elem()\n}\n\ntype PhysicalNicHint struct {\n\tDynamicData\n\n\tVlanId int32 `xml:\"vlanId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PhysicalNicHint\"] = reflect.TypeOf((*PhysicalNicHint)(nil)).Elem()\n}\n\ntype PhysicalNicHintInfo struct {\n\tDynamicData\n\n\tDevice              string                          `xml:\"device\"`\n\tSubnet              []PhysicalNicIpHint             `xml:\"subnet,omitempty\"`\n\tNetwork             []PhysicalNicNameHint           `xml:\"network,omitempty\"`\n\tConnectedSwitchPort *PhysicalNicCdpInfo             `xml:\"connectedSwitchPort,omitempty\"`\n\tLldpInfo            *LinkLayerDiscoveryProtocolInfo `xml:\"lldpInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PhysicalNicHintInfo\"] = reflect.TypeOf((*PhysicalNicHintInfo)(nil)).Elem()\n}\n\ntype PhysicalNicIpHint struct {\n\tPhysicalNicHint\n\n\tIpSubnet string `xml:\"ipSubnet\"`\n}\n\nfunc init() {\n\tt[\"PhysicalNicIpHint\"] = reflect.TypeOf((*PhysicalNicIpHint)(nil)).Elem()\n}\n\ntype PhysicalNicLinkInfo struct {\n\tDynamicData\n\n\tSpeedMb int32 `xml:\"speedMb\"`\n\tDuplex  bool  `xml:\"duplex\"`\n}\n\nfunc init() {\n\tt[\"PhysicalNicLinkInfo\"] = reflect.TypeOf((*PhysicalNicLinkInfo)(nil)).Elem()\n}\n\ntype PhysicalNicNameHint struct {\n\tPhysicalNicHint\n\n\tNetwork string `xml:\"network\"`\n}\n\nfunc init() {\n\tt[\"PhysicalNicNameHint\"] = reflect.TypeOf((*PhysicalNicNameHint)(nil)).Elem()\n}\n\ntype PhysicalNicProfile struct {\n\tApplyProfile\n\n\tKey string `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"PhysicalNicProfile\"] = reflect.TypeOf((*PhysicalNicProfile)(nil)).Elem()\n}\n\ntype PhysicalNicSpec struct {\n\tDynamicData\n\n\tIp        *HostIpConfig        `xml:\"ip,omitempty\"`\n\tLinkSpeed *PhysicalNicLinkInfo `xml:\"linkSpeed,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PhysicalNicSpec\"] = reflect.TypeOf((*PhysicalNicSpec)(nil)).Elem()\n}\n\ntype PlaceVm PlaceVmRequestType\n\nfunc init() {\n\tt[\"PlaceVm\"] = reflect.TypeOf((*PlaceVm)(nil)).Elem()\n}\n\ntype PlaceVmRequestType struct {\n\tThis          ManagedObjectReference `xml:\"_this\"`\n\tPlacementSpec PlacementSpec          `xml:\"placementSpec\"`\n}\n\nfunc init() {\n\tt[\"PlaceVmRequestType\"] = reflect.TypeOf((*PlaceVmRequestType)(nil)).Elem()\n}\n\ntype PlaceVmResponse struct {\n\tReturnval PlacementResult `xml:\"returnval\"`\n}\n\ntype PlacementAction struct {\n\tClusterAction\n\n\tVm           *ManagedObjectReference     `xml:\"vm,omitempty\"`\n\tTargetHost   *ManagedObjectReference     `xml:\"targetHost,omitempty\"`\n\tRelocateSpec *VirtualMachineRelocateSpec `xml:\"relocateSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PlacementAction\"] = reflect.TypeOf((*PlacementAction)(nil)).Elem()\n}\n\ntype PlacementAffinityRule struct {\n\tDynamicData\n\n\tRuleType  string                   `xml:\"ruleType\"`\n\tRuleScope string                   `xml:\"ruleScope\"`\n\tVms       []ManagedObjectReference `xml:\"vms,omitempty\"`\n\tKeys      []string                 `xml:\"keys,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PlacementAffinityRule\"] = reflect.TypeOf((*PlacementAffinityRule)(nil)).Elem()\n}\n\ntype PlacementRankResult struct {\n\tDynamicData\n\n\tKey             string                 `xml:\"key\"`\n\tCandidate       ManagedObjectReference `xml:\"candidate\"`\n\tReservedSpaceMB int64                  `xml:\"reservedSpaceMB\"`\n\tUsedSpaceMB     int64                  `xml:\"usedSpaceMB\"`\n\tTotalSpaceMB    int64                  `xml:\"totalSpaceMB\"`\n\tUtilization     float64                `xml:\"utilization\"`\n\tFaults          []LocalizedMethodFault `xml:\"faults,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PlacementRankResult\"] = reflect.TypeOf((*PlacementRankResult)(nil)).Elem()\n}\n\ntype PlacementRankSpec struct {\n\tDynamicData\n\n\tSpecs             []PlacementSpec                 `xml:\"specs\"`\n\tClusters          []ManagedObjectReference        `xml:\"clusters\"`\n\tRules             []PlacementAffinityRule         `xml:\"rules,omitempty\"`\n\tPlacementRankByVm []StorageDrsPlacementRankVmSpec `xml:\"placementRankByVm,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PlacementRankSpec\"] = reflect.TypeOf((*PlacementRankSpec)(nil)).Elem()\n}\n\ntype PlacementResult struct {\n\tDynamicData\n\n\tRecommendations []ClusterRecommendation `xml:\"recommendations,omitempty\"`\n\tDrsFault        *ClusterDrsFaults       `xml:\"drsFault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PlacementResult\"] = reflect.TypeOf((*PlacementResult)(nil)).Elem()\n}\n\ntype PlacementSpec struct {\n\tDynamicData\n\n\tPriority                  VirtualMachineMovePriority  `xml:\"priority,omitempty\"`\n\tVm                        *ManagedObjectReference     `xml:\"vm,omitempty\"`\n\tConfigSpec                *VirtualMachineConfigSpec   `xml:\"configSpec,omitempty\"`\n\tRelocateSpec              *VirtualMachineRelocateSpec `xml:\"relocateSpec,omitempty\"`\n\tHosts                     []ManagedObjectReference    `xml:\"hosts,omitempty\"`\n\tDatastores                []ManagedObjectReference    `xml:\"datastores,omitempty\"`\n\tStoragePods               []ManagedObjectReference    `xml:\"storagePods,omitempty\"`\n\tDisallowPrerequisiteMoves *bool                       `xml:\"disallowPrerequisiteMoves\"`\n\tRules                     []BaseClusterRuleInfo       `xml:\"rules,omitempty,typeattr\"`\n\tKey                       string                      `xml:\"key,omitempty\"`\n\tPlacementType             string                      `xml:\"placementType,omitempty\"`\n\tCloneSpec                 *VirtualMachineCloneSpec    `xml:\"cloneSpec,omitempty\"`\n\tCloneName                 string                      `xml:\"cloneName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PlacementSpec\"] = reflect.TypeOf((*PlacementSpec)(nil)).Elem()\n}\n\ntype PlatformConfigFault struct {\n\tHostConfigFault\n\n\tText string `xml:\"text\"`\n}\n\nfunc init() {\n\tt[\"PlatformConfigFault\"] = reflect.TypeOf((*PlatformConfigFault)(nil)).Elem()\n}\n\ntype PlatformConfigFaultFault BasePlatformConfigFault\n\nfunc init() {\n\tt[\"PlatformConfigFaultFault\"] = reflect.TypeOf((*PlatformConfigFaultFault)(nil)).Elem()\n}\n\ntype PnicUplinkProfile struct {\n\tApplyProfile\n\n\tKey string `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"PnicUplinkProfile\"] = reflect.TypeOf((*PnicUplinkProfile)(nil)).Elem()\n}\n\ntype PodDiskLocator struct {\n\tDynamicData\n\n\tDiskId          int32                           `xml:\"diskId\"`\n\tDiskMoveType    string                          `xml:\"diskMoveType,omitempty\"`\n\tDiskBackingInfo BaseVirtualDeviceBackingInfo    `xml:\"diskBackingInfo,omitempty,typeattr\"`\n\tProfile         []BaseVirtualMachineProfileSpec `xml:\"profile,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"PodDiskLocator\"] = reflect.TypeOf((*PodDiskLocator)(nil)).Elem()\n}\n\ntype PodStorageDrsEntry struct {\n\tDynamicData\n\n\tStorageDrsConfig StorageDrsConfigInfo    `xml:\"storageDrsConfig\"`\n\tRecommendation   []ClusterRecommendation `xml:\"recommendation,omitempty\"`\n\tDrsFault         []ClusterDrsFaults      `xml:\"drsFault,omitempty\"`\n\tActionHistory    []ClusterActionHistory  `xml:\"actionHistory,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PodStorageDrsEntry\"] = reflect.TypeOf((*PodStorageDrsEntry)(nil)).Elem()\n}\n\ntype PolicyOption struct {\n\tDynamicData\n\n\tId        string        `xml:\"id\"`\n\tParameter []KeyAnyValue `xml:\"parameter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PolicyOption\"] = reflect.TypeOf((*PolicyOption)(nil)).Elem()\n}\n\ntype PortGroupProfile struct {\n\tApplyProfile\n\n\tKey           string                        `xml:\"key\"`\n\tName          string                        `xml:\"name\"`\n\tVlan          VlanProfile                   `xml:\"vlan\"`\n\tVswitch       VirtualSwitchSelectionProfile `xml:\"vswitch\"`\n\tNetworkPolicy NetworkPolicyProfile          `xml:\"networkPolicy\"`\n}\n\nfunc init() {\n\tt[\"PortGroupProfile\"] = reflect.TypeOf((*PortGroupProfile)(nil)).Elem()\n}\n\ntype PosixUserSearchResult struct {\n\tUserSearchResult\n\n\tId          int32 `xml:\"id\"`\n\tShellAccess *bool `xml:\"shellAccess\"`\n}\n\nfunc init() {\n\tt[\"PosixUserSearchResult\"] = reflect.TypeOf((*PosixUserSearchResult)(nil)).Elem()\n}\n\ntype PostEvent PostEventRequestType\n\nfunc init() {\n\tt[\"PostEvent\"] = reflect.TypeOf((*PostEvent)(nil)).Elem()\n}\n\ntype PostEventRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tEventToPost BaseEvent              `xml:\"eventToPost,typeattr\"`\n\tTaskInfo    *TaskInfo              `xml:\"taskInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PostEventRequestType\"] = reflect.TypeOf((*PostEventRequestType)(nil)).Elem()\n}\n\ntype PostEventResponse struct {\n}\n\ntype PostHealthUpdates PostHealthUpdatesRequestType\n\nfunc init() {\n\tt[\"PostHealthUpdates\"] = reflect.TypeOf((*PostHealthUpdates)(nil)).Elem()\n}\n\ntype PostHealthUpdatesRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tProviderId string                 `xml:\"providerId\"`\n\tUpdates    []HealthUpdate         `xml:\"updates,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PostHealthUpdatesRequestType\"] = reflect.TypeOf((*PostHealthUpdatesRequestType)(nil)).Elem()\n}\n\ntype PostHealthUpdatesResponse struct {\n}\n\ntype PowerDownHostToStandByRequestType struct {\n\tThis                  ManagedObjectReference `xml:\"_this\"`\n\tTimeoutSec            int32                  `xml:\"timeoutSec\"`\n\tEvacuatePoweredOffVms *bool                  `xml:\"evacuatePoweredOffVms\"`\n}\n\nfunc init() {\n\tt[\"PowerDownHostToStandByRequestType\"] = reflect.TypeOf((*PowerDownHostToStandByRequestType)(nil)).Elem()\n}\n\ntype PowerDownHostToStandBy_Task PowerDownHostToStandByRequestType\n\nfunc init() {\n\tt[\"PowerDownHostToStandBy_Task\"] = reflect.TypeOf((*PowerDownHostToStandBy_Task)(nil)).Elem()\n}\n\ntype PowerDownHostToStandBy_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype PowerOffVAppRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tForce bool                   `xml:\"force\"`\n}\n\nfunc init() {\n\tt[\"PowerOffVAppRequestType\"] = reflect.TypeOf((*PowerOffVAppRequestType)(nil)).Elem()\n}\n\ntype PowerOffVApp_Task PowerOffVAppRequestType\n\nfunc init() {\n\tt[\"PowerOffVApp_Task\"] = reflect.TypeOf((*PowerOffVApp_Task)(nil)).Elem()\n}\n\ntype PowerOffVApp_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype PowerOffVMRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"PowerOffVMRequestType\"] = reflect.TypeOf((*PowerOffVMRequestType)(nil)).Elem()\n}\n\ntype PowerOffVM_Task PowerOffVMRequestType\n\nfunc init() {\n\tt[\"PowerOffVM_Task\"] = reflect.TypeOf((*PowerOffVM_Task)(nil)).Elem()\n}\n\ntype PowerOffVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype PowerOnFtSecondaryFailed struct {\n\tVmFaultToleranceIssue\n\n\tVm              ManagedObjectReference          `xml:\"vm\"`\n\tVmName          string                          `xml:\"vmName\"`\n\tHostSelectionBy FtIssuesOnHostHostSelectionType `xml:\"hostSelectionBy\"`\n\tHostErrors      []LocalizedMethodFault          `xml:\"hostErrors,omitempty\"`\n\tRootCause       LocalizedMethodFault            `xml:\"rootCause\"`\n}\n\nfunc init() {\n\tt[\"PowerOnFtSecondaryFailed\"] = reflect.TypeOf((*PowerOnFtSecondaryFailed)(nil)).Elem()\n}\n\ntype PowerOnFtSecondaryFailedFault PowerOnFtSecondaryFailed\n\nfunc init() {\n\tt[\"PowerOnFtSecondaryFailedFault\"] = reflect.TypeOf((*PowerOnFtSecondaryFailedFault)(nil)).Elem()\n}\n\ntype PowerOnFtSecondaryTimedout struct {\n\tTimedout\n\n\tVm      ManagedObjectReference `xml:\"vm\"`\n\tVmName  string                 `xml:\"vmName\"`\n\tTimeout int32                  `xml:\"timeout\"`\n}\n\nfunc init() {\n\tt[\"PowerOnFtSecondaryTimedout\"] = reflect.TypeOf((*PowerOnFtSecondaryTimedout)(nil)).Elem()\n}\n\ntype PowerOnFtSecondaryTimedoutFault PowerOnFtSecondaryTimedout\n\nfunc init() {\n\tt[\"PowerOnFtSecondaryTimedoutFault\"] = reflect.TypeOf((*PowerOnFtSecondaryTimedoutFault)(nil)).Elem()\n}\n\ntype PowerOnMultiVMRequestType struct {\n\tThis   ManagedObjectReference   `xml:\"_this\"`\n\tVm     []ManagedObjectReference `xml:\"vm\"`\n\tOption []BaseOptionValue        `xml:\"option,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"PowerOnMultiVMRequestType\"] = reflect.TypeOf((*PowerOnMultiVMRequestType)(nil)).Elem()\n}\n\ntype PowerOnMultiVM_Task PowerOnMultiVMRequestType\n\nfunc init() {\n\tt[\"PowerOnMultiVM_Task\"] = reflect.TypeOf((*PowerOnMultiVM_Task)(nil)).Elem()\n}\n\ntype PowerOnMultiVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype PowerOnVAppRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"PowerOnVAppRequestType\"] = reflect.TypeOf((*PowerOnVAppRequestType)(nil)).Elem()\n}\n\ntype PowerOnVApp_Task PowerOnVAppRequestType\n\nfunc init() {\n\tt[\"PowerOnVApp_Task\"] = reflect.TypeOf((*PowerOnVApp_Task)(nil)).Elem()\n}\n\ntype PowerOnVApp_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype PowerOnVMRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tHost *ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PowerOnVMRequestType\"] = reflect.TypeOf((*PowerOnVMRequestType)(nil)).Elem()\n}\n\ntype PowerOnVM_Task PowerOnVMRequestType\n\nfunc init() {\n\tt[\"PowerOnVM_Task\"] = reflect.TypeOf((*PowerOnVM_Task)(nil)).Elem()\n}\n\ntype PowerOnVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype PowerSystemCapability struct {\n\tDynamicData\n\n\tAvailablePolicy []HostPowerPolicy `xml:\"availablePolicy\"`\n}\n\nfunc init() {\n\tt[\"PowerSystemCapability\"] = reflect.TypeOf((*PowerSystemCapability)(nil)).Elem()\n}\n\ntype PowerSystemInfo struct {\n\tDynamicData\n\n\tCurrentPolicy HostPowerPolicy `xml:\"currentPolicy\"`\n}\n\nfunc init() {\n\tt[\"PowerSystemInfo\"] = reflect.TypeOf((*PowerSystemInfo)(nil)).Elem()\n}\n\ntype PowerUpHostFromStandByRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tTimeoutSec int32                  `xml:\"timeoutSec\"`\n}\n\nfunc init() {\n\tt[\"PowerUpHostFromStandByRequestType\"] = reflect.TypeOf((*PowerUpHostFromStandByRequestType)(nil)).Elem()\n}\n\ntype PowerUpHostFromStandBy_Task PowerUpHostFromStandByRequestType\n\nfunc init() {\n\tt[\"PowerUpHostFromStandBy_Task\"] = reflect.TypeOf((*PowerUpHostFromStandBy_Task)(nil)).Elem()\n}\n\ntype PowerUpHostFromStandBy_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype PrepareCrypto PrepareCryptoRequestType\n\nfunc init() {\n\tt[\"PrepareCrypto\"] = reflect.TypeOf((*PrepareCrypto)(nil)).Elem()\n}\n\ntype PrepareCryptoRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"PrepareCryptoRequestType\"] = reflect.TypeOf((*PrepareCryptoRequestType)(nil)).Elem()\n}\n\ntype PrepareCryptoResponse struct {\n}\n\ntype PrivilegeAvailability struct {\n\tDynamicData\n\n\tPrivId    string `xml:\"privId\"`\n\tIsGranted bool   `xml:\"isGranted\"`\n}\n\nfunc init() {\n\tt[\"PrivilegeAvailability\"] = reflect.TypeOf((*PrivilegeAvailability)(nil)).Elem()\n}\n\ntype PrivilegePolicyDef struct {\n\tDynamicData\n\n\tCreatePrivilege string `xml:\"createPrivilege\"`\n\tReadPrivilege   string `xml:\"readPrivilege\"`\n\tUpdatePrivilege string `xml:\"updatePrivilege\"`\n\tDeletePrivilege string `xml:\"deletePrivilege\"`\n}\n\nfunc init() {\n\tt[\"PrivilegePolicyDef\"] = reflect.TypeOf((*PrivilegePolicyDef)(nil)).Elem()\n}\n\ntype ProductComponentInfo struct {\n\tDynamicData\n\n\tId      string `xml:\"id\"`\n\tName    string `xml:\"name\"`\n\tVersion string `xml:\"version\"`\n\tRelease int32  `xml:\"release\"`\n}\n\nfunc init() {\n\tt[\"ProductComponentInfo\"] = reflect.TypeOf((*ProductComponentInfo)(nil)).Elem()\n}\n\ntype ProfileApplyProfileElement struct {\n\tApplyProfile\n\n\tKey string `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"ProfileApplyProfileElement\"] = reflect.TypeOf((*ProfileApplyProfileElement)(nil)).Elem()\n}\n\ntype ProfileApplyProfileProperty struct {\n\tDynamicData\n\n\tPropertyName string             `xml:\"propertyName\"`\n\tArray        bool               `xml:\"array\"`\n\tProfile      []BaseApplyProfile `xml:\"profile,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ProfileApplyProfileProperty\"] = reflect.TypeOf((*ProfileApplyProfileProperty)(nil)).Elem()\n}\n\ntype ProfileAssociatedEvent struct {\n\tProfileEvent\n}\n\nfunc init() {\n\tt[\"ProfileAssociatedEvent\"] = reflect.TypeOf((*ProfileAssociatedEvent)(nil)).Elem()\n}\n\ntype ProfileChangedEvent struct {\n\tProfileEvent\n}\n\nfunc init() {\n\tt[\"ProfileChangedEvent\"] = reflect.TypeOf((*ProfileChangedEvent)(nil)).Elem()\n}\n\ntype ProfileCompositeExpression struct {\n\tProfileExpression\n\n\tOperator       string   `xml:\"operator\"`\n\tExpressionName []string `xml:\"expressionName\"`\n}\n\nfunc init() {\n\tt[\"ProfileCompositeExpression\"] = reflect.TypeOf((*ProfileCompositeExpression)(nil)).Elem()\n}\n\ntype ProfileCompositePolicyOptionMetadata struct {\n\tProfilePolicyOptionMetadata\n\n\tOption []string `xml:\"option\"`\n}\n\nfunc init() {\n\tt[\"ProfileCompositePolicyOptionMetadata\"] = reflect.TypeOf((*ProfileCompositePolicyOptionMetadata)(nil)).Elem()\n}\n\ntype ProfileConfigInfo struct {\n\tDynamicData\n\n\tName       string `xml:\"name\"`\n\tAnnotation string `xml:\"annotation,omitempty\"`\n\tEnabled    bool   `xml:\"enabled\"`\n}\n\nfunc init() {\n\tt[\"ProfileConfigInfo\"] = reflect.TypeOf((*ProfileConfigInfo)(nil)).Elem()\n}\n\ntype ProfileCreateSpec struct {\n\tDynamicData\n\n\tName       string `xml:\"name,omitempty\"`\n\tAnnotation string `xml:\"annotation,omitempty\"`\n\tEnabled    *bool  `xml:\"enabled\"`\n}\n\nfunc init() {\n\tt[\"ProfileCreateSpec\"] = reflect.TypeOf((*ProfileCreateSpec)(nil)).Elem()\n}\n\ntype ProfileCreatedEvent struct {\n\tProfileEvent\n}\n\nfunc init() {\n\tt[\"ProfileCreatedEvent\"] = reflect.TypeOf((*ProfileCreatedEvent)(nil)).Elem()\n}\n\ntype ProfileDeferredPolicyOptionParameter struct {\n\tDynamicData\n\n\tInputPath ProfilePropertyPath `xml:\"inputPath\"`\n\tParameter []KeyAnyValue       `xml:\"parameter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ProfileDeferredPolicyOptionParameter\"] = reflect.TypeOf((*ProfileDeferredPolicyOptionParameter)(nil)).Elem()\n}\n\ntype ProfileDescription struct {\n\tDynamicData\n\n\tSection []ProfileDescriptionSection `xml:\"section\"`\n}\n\nfunc init() {\n\tt[\"ProfileDescription\"] = reflect.TypeOf((*ProfileDescription)(nil)).Elem()\n}\n\ntype ProfileDescriptionSection struct {\n\tDynamicData\n\n\tDescription ExtendedElementDescription `xml:\"description\"`\n\tMessage     []LocalizableMessage       `xml:\"message,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ProfileDescriptionSection\"] = reflect.TypeOf((*ProfileDescriptionSection)(nil)).Elem()\n}\n\ntype ProfileDissociatedEvent struct {\n\tProfileEvent\n}\n\nfunc init() {\n\tt[\"ProfileDissociatedEvent\"] = reflect.TypeOf((*ProfileDissociatedEvent)(nil)).Elem()\n}\n\ntype ProfileEvent struct {\n\tEvent\n\n\tProfile ProfileEventArgument `xml:\"profile\"`\n}\n\nfunc init() {\n\tt[\"ProfileEvent\"] = reflect.TypeOf((*ProfileEvent)(nil)).Elem()\n}\n\ntype ProfileEventArgument struct {\n\tEventArgument\n\n\tProfile ManagedObjectReference `xml:\"profile\"`\n\tName    string                 `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"ProfileEventArgument\"] = reflect.TypeOf((*ProfileEventArgument)(nil)).Elem()\n}\n\ntype ProfileExecuteError struct {\n\tDynamicData\n\n\tPath    *ProfilePropertyPath `xml:\"path,omitempty\"`\n\tMessage LocalizableMessage   `xml:\"message\"`\n}\n\nfunc init() {\n\tt[\"ProfileExecuteError\"] = reflect.TypeOf((*ProfileExecuteError)(nil)).Elem()\n}\n\ntype ProfileExecuteResult struct {\n\tDynamicData\n\n\tStatus           string                                 `xml:\"status\"`\n\tConfigSpec       *HostConfigSpec                        `xml:\"configSpec,omitempty\"`\n\tInapplicablePath []string                               `xml:\"inapplicablePath,omitempty\"`\n\tRequireInput     []ProfileDeferredPolicyOptionParameter `xml:\"requireInput,omitempty\"`\n\tError            []ProfileExecuteError                  `xml:\"error,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ProfileExecuteResult\"] = reflect.TypeOf((*ProfileExecuteResult)(nil)).Elem()\n}\n\ntype ProfileExpression struct {\n\tDynamicData\n\n\tId          string `xml:\"id\"`\n\tDisplayName string `xml:\"displayName\"`\n\tNegated     bool   `xml:\"negated\"`\n}\n\nfunc init() {\n\tt[\"ProfileExpression\"] = reflect.TypeOf((*ProfileExpression)(nil)).Elem()\n}\n\ntype ProfileExpressionMetadata struct {\n\tDynamicData\n\n\tExpressionId ExtendedElementDescription `xml:\"expressionId\"`\n\tParameter    []ProfileParameterMetadata `xml:\"parameter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ProfileExpressionMetadata\"] = reflect.TypeOf((*ProfileExpressionMetadata)(nil)).Elem()\n}\n\ntype ProfileMetadata struct {\n\tDynamicData\n\n\tKey              string                           `xml:\"key\"`\n\tProfileTypeName  string                           `xml:\"profileTypeName,omitempty\"`\n\tDescription      *ExtendedDescription             `xml:\"description,omitempty\"`\n\tSortSpec         []ProfileMetadataProfileSortSpec `xml:\"sortSpec,omitempty\"`\n\tProfileCategory  string                           `xml:\"profileCategory,omitempty\"`\n\tProfileComponent string                           `xml:\"profileComponent,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ProfileMetadata\"] = reflect.TypeOf((*ProfileMetadata)(nil)).Elem()\n}\n\ntype ProfileMetadataProfileSortSpec struct {\n\tDynamicData\n\n\tPolicyId  string `xml:\"policyId\"`\n\tParameter string `xml:\"parameter\"`\n}\n\nfunc init() {\n\tt[\"ProfileMetadataProfileSortSpec\"] = reflect.TypeOf((*ProfileMetadataProfileSortSpec)(nil)).Elem()\n}\n\ntype ProfileParameterMetadata struct {\n\tDynamicData\n\n\tId                ExtendedElementDescription `xml:\"id\"`\n\tType              string                     `xml:\"type\"`\n\tOptional          bool                       `xml:\"optional\"`\n\tDefaultValue      AnyType                    `xml:\"defaultValue,omitempty,typeattr\"`\n\tHidden            *bool                      `xml:\"hidden\"`\n\tSecuritySensitive *bool                      `xml:\"securitySensitive\"`\n\tReadOnly          *bool                      `xml:\"readOnly\"`\n}\n\nfunc init() {\n\tt[\"ProfileParameterMetadata\"] = reflect.TypeOf((*ProfileParameterMetadata)(nil)).Elem()\n}\n\ntype ProfilePolicy struct {\n\tDynamicData\n\n\tId           string           `xml:\"id\"`\n\tPolicyOption BasePolicyOption `xml:\"policyOption,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ProfilePolicy\"] = reflect.TypeOf((*ProfilePolicy)(nil)).Elem()\n}\n\ntype ProfilePolicyMetadata struct {\n\tDynamicData\n\n\tId             ExtendedElementDescription        `xml:\"id\"`\n\tPossibleOption []BaseProfilePolicyOptionMetadata `xml:\"possibleOption,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ProfilePolicyMetadata\"] = reflect.TypeOf((*ProfilePolicyMetadata)(nil)).Elem()\n}\n\ntype ProfilePolicyOptionMetadata struct {\n\tDynamicData\n\n\tId        ExtendedElementDescription `xml:\"id\"`\n\tParameter []ProfileParameterMetadata `xml:\"parameter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ProfilePolicyOptionMetadata\"] = reflect.TypeOf((*ProfilePolicyOptionMetadata)(nil)).Elem()\n}\n\ntype ProfileProfileStructure struct {\n\tDynamicData\n\n\tProfileTypeName string                            `xml:\"profileTypeName\"`\n\tChild           []ProfileProfileStructureProperty `xml:\"child,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ProfileProfileStructure\"] = reflect.TypeOf((*ProfileProfileStructure)(nil)).Elem()\n}\n\ntype ProfileProfileStructureProperty struct {\n\tDynamicData\n\n\tPropertyName string                  `xml:\"propertyName\"`\n\tArray        bool                    `xml:\"array\"`\n\tElement      ProfileProfileStructure `xml:\"element\"`\n}\n\nfunc init() {\n\tt[\"ProfileProfileStructureProperty\"] = reflect.TypeOf((*ProfileProfileStructureProperty)(nil)).Elem()\n}\n\ntype ProfilePropertyPath struct {\n\tDynamicData\n\n\tProfilePath string `xml:\"profilePath\"`\n\tPolicyId    string `xml:\"policyId,omitempty\"`\n\tParameterId string `xml:\"parameterId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ProfilePropertyPath\"] = reflect.TypeOf((*ProfilePropertyPath)(nil)).Elem()\n}\n\ntype ProfileReferenceHostChangedEvent struct {\n\tProfileEvent\n\n\tReferenceHost         *ManagedObjectReference `xml:\"referenceHost,omitempty\"`\n\tReferenceHostName     string                  `xml:\"referenceHostName,omitempty\"`\n\tPrevReferenceHostName string                  `xml:\"prevReferenceHostName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ProfileReferenceHostChangedEvent\"] = reflect.TypeOf((*ProfileReferenceHostChangedEvent)(nil)).Elem()\n}\n\ntype ProfileRemovedEvent struct {\n\tProfileEvent\n}\n\nfunc init() {\n\tt[\"ProfileRemovedEvent\"] = reflect.TypeOf((*ProfileRemovedEvent)(nil)).Elem()\n}\n\ntype ProfileSerializedCreateSpec struct {\n\tProfileCreateSpec\n\n\tProfileConfigString string `xml:\"profileConfigString\"`\n}\n\nfunc init() {\n\tt[\"ProfileSerializedCreateSpec\"] = reflect.TypeOf((*ProfileSerializedCreateSpec)(nil)).Elem()\n}\n\ntype ProfileSimpleExpression struct {\n\tProfileExpression\n\n\tExpressionType string        `xml:\"expressionType\"`\n\tParameter      []KeyAnyValue `xml:\"parameter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ProfileSimpleExpression\"] = reflect.TypeOf((*ProfileSimpleExpression)(nil)).Elem()\n}\n\ntype ProfileUpdateFailed struct {\n\tVimFault\n\n\tFailure []ProfileUpdateFailedUpdateFailure `xml:\"failure\"`\n}\n\nfunc init() {\n\tt[\"ProfileUpdateFailed\"] = reflect.TypeOf((*ProfileUpdateFailed)(nil)).Elem()\n}\n\ntype ProfileUpdateFailedFault ProfileUpdateFailed\n\nfunc init() {\n\tt[\"ProfileUpdateFailedFault\"] = reflect.TypeOf((*ProfileUpdateFailedFault)(nil)).Elem()\n}\n\ntype ProfileUpdateFailedUpdateFailure struct {\n\tDynamicData\n\n\tProfilePath ProfilePropertyPath `xml:\"profilePath\"`\n\tErrMsg      LocalizableMessage  `xml:\"errMsg\"`\n}\n\nfunc init() {\n\tt[\"ProfileUpdateFailedUpdateFailure\"] = reflect.TypeOf((*ProfileUpdateFailedUpdateFailure)(nil)).Elem()\n}\n\ntype PromoteDisksRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tUnlink bool                   `xml:\"unlink\"`\n\tDisks  []VirtualDisk          `xml:\"disks,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PromoteDisksRequestType\"] = reflect.TypeOf((*PromoteDisksRequestType)(nil)).Elem()\n}\n\ntype PromoteDisks_Task PromoteDisksRequestType\n\nfunc init() {\n\tt[\"PromoteDisks_Task\"] = reflect.TypeOf((*PromoteDisks_Task)(nil)).Elem()\n}\n\ntype PromoteDisks_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype PropertyChange struct {\n\tDynamicData\n\n\tName string           `xml:\"name\"`\n\tOp   PropertyChangeOp `xml:\"op\"`\n\tVal  AnyType          `xml:\"val,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"PropertyChange\"] = reflect.TypeOf((*PropertyChange)(nil)).Elem()\n}\n\ntype PropertyFilterSpec struct {\n\tDynamicData\n\n\tPropSet                       []PropertySpec `xml:\"propSet\"`\n\tObjectSet                     []ObjectSpec   `xml:\"objectSet\"`\n\tReportMissingObjectsInResults *bool          `xml:\"reportMissingObjectsInResults\"`\n}\n\nfunc init() {\n\tt[\"PropertyFilterSpec\"] = reflect.TypeOf((*PropertyFilterSpec)(nil)).Elem()\n}\n\ntype PropertyFilterUpdate struct {\n\tDynamicData\n\n\tFilter     ManagedObjectReference `xml:\"filter\"`\n\tObjectSet  []ObjectUpdate         `xml:\"objectSet,omitempty\"`\n\tMissingSet []MissingObject        `xml:\"missingSet,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PropertyFilterUpdate\"] = reflect.TypeOf((*PropertyFilterUpdate)(nil)).Elem()\n}\n\ntype PropertySpec struct {\n\tDynamicData\n\n\tType    string   `xml:\"type\"`\n\tAll     *bool    `xml:\"all\"`\n\tPathSet []string `xml:\"pathSet,omitempty\"`\n}\n\nfunc init() {\n\tt[\"PropertySpec\"] = reflect.TypeOf((*PropertySpec)(nil)).Elem()\n}\n\ntype PutUsbScanCodes PutUsbScanCodesRequestType\n\nfunc init() {\n\tt[\"PutUsbScanCodes\"] = reflect.TypeOf((*PutUsbScanCodes)(nil)).Elem()\n}\n\ntype PutUsbScanCodesRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tSpec UsbScanCodeSpec        `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"PutUsbScanCodesRequestType\"] = reflect.TypeOf((*PutUsbScanCodesRequestType)(nil)).Elem()\n}\n\ntype PutUsbScanCodesResponse struct {\n\tReturnval int32 `xml:\"returnval\"`\n}\n\ntype QuarantineModeFault struct {\n\tVmConfigFault\n\n\tVmName    string `xml:\"vmName\"`\n\tFaultType string `xml:\"faultType\"`\n}\n\nfunc init() {\n\tt[\"QuarantineModeFault\"] = reflect.TypeOf((*QuarantineModeFault)(nil)).Elem()\n}\n\ntype QuarantineModeFaultFault QuarantineModeFault\n\nfunc init() {\n\tt[\"QuarantineModeFaultFault\"] = reflect.TypeOf((*QuarantineModeFaultFault)(nil)).Elem()\n}\n\ntype QueryAnswerFileStatus QueryAnswerFileStatusRequestType\n\nfunc init() {\n\tt[\"QueryAnswerFileStatus\"] = reflect.TypeOf((*QueryAnswerFileStatus)(nil)).Elem()\n}\n\ntype QueryAnswerFileStatusRequestType struct {\n\tThis ManagedObjectReference   `xml:\"_this\"`\n\tHost []ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"QueryAnswerFileStatusRequestType\"] = reflect.TypeOf((*QueryAnswerFileStatusRequestType)(nil)).Elem()\n}\n\ntype QueryAnswerFileStatusResponse struct {\n\tReturnval []AnswerFileStatusResult `xml:\"returnval,omitempty\"`\n}\n\ntype QueryAssignedLicenses QueryAssignedLicensesRequestType\n\nfunc init() {\n\tt[\"QueryAssignedLicenses\"] = reflect.TypeOf((*QueryAssignedLicenses)(nil)).Elem()\n}\n\ntype QueryAssignedLicensesRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tEntityId string                 `xml:\"entityId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryAssignedLicensesRequestType\"] = reflect.TypeOf((*QueryAssignedLicensesRequestType)(nil)).Elem()\n}\n\ntype QueryAssignedLicensesResponse struct {\n\tReturnval []LicenseAssignmentManagerLicenseAssignment `xml:\"returnval\"`\n}\n\ntype QueryAvailableDisksForVmfs QueryAvailableDisksForVmfsRequestType\n\nfunc init() {\n\tt[\"QueryAvailableDisksForVmfs\"] = reflect.TypeOf((*QueryAvailableDisksForVmfs)(nil)).Elem()\n}\n\ntype QueryAvailableDisksForVmfsRequestType struct {\n\tThis      ManagedObjectReference  `xml:\"_this\"`\n\tDatastore *ManagedObjectReference `xml:\"datastore,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryAvailableDisksForVmfsRequestType\"] = reflect.TypeOf((*QueryAvailableDisksForVmfsRequestType)(nil)).Elem()\n}\n\ntype QueryAvailableDisksForVmfsResponse struct {\n\tReturnval []HostScsiDisk `xml:\"returnval,omitempty\"`\n}\n\ntype QueryAvailableDvsSpec QueryAvailableDvsSpecRequestType\n\nfunc init() {\n\tt[\"QueryAvailableDvsSpec\"] = reflect.TypeOf((*QueryAvailableDvsSpec)(nil)).Elem()\n}\n\ntype QueryAvailableDvsSpecRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tRecommended *bool                  `xml:\"recommended\"`\n}\n\nfunc init() {\n\tt[\"QueryAvailableDvsSpecRequestType\"] = reflect.TypeOf((*QueryAvailableDvsSpecRequestType)(nil)).Elem()\n}\n\ntype QueryAvailableDvsSpecResponse struct {\n\tReturnval []DistributedVirtualSwitchProductSpec `xml:\"returnval,omitempty\"`\n}\n\ntype QueryAvailablePartition QueryAvailablePartitionRequestType\n\nfunc init() {\n\tt[\"QueryAvailablePartition\"] = reflect.TypeOf((*QueryAvailablePartition)(nil)).Elem()\n}\n\ntype QueryAvailablePartitionRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryAvailablePartitionRequestType\"] = reflect.TypeOf((*QueryAvailablePartitionRequestType)(nil)).Elem()\n}\n\ntype QueryAvailablePartitionResponse struct {\n\tReturnval []HostDiagnosticPartition `xml:\"returnval,omitempty\"`\n}\n\ntype QueryAvailablePerfMetric QueryAvailablePerfMetricRequestType\n\nfunc init() {\n\tt[\"QueryAvailablePerfMetric\"] = reflect.TypeOf((*QueryAvailablePerfMetric)(nil)).Elem()\n}\n\ntype QueryAvailablePerfMetricRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tEntity     ManagedObjectReference `xml:\"entity\"`\n\tBeginTime  *time.Time             `xml:\"beginTime\"`\n\tEndTime    *time.Time             `xml:\"endTime\"`\n\tIntervalId int32                  `xml:\"intervalId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryAvailablePerfMetricRequestType\"] = reflect.TypeOf((*QueryAvailablePerfMetricRequestType)(nil)).Elem()\n}\n\ntype QueryAvailablePerfMetricResponse struct {\n\tReturnval []PerfMetricId `xml:\"returnval,omitempty\"`\n}\n\ntype QueryAvailableSsds QueryAvailableSsdsRequestType\n\nfunc init() {\n\tt[\"QueryAvailableSsds\"] = reflect.TypeOf((*QueryAvailableSsds)(nil)).Elem()\n}\n\ntype QueryAvailableSsdsRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tVffsPath string                 `xml:\"vffsPath,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryAvailableSsdsRequestType\"] = reflect.TypeOf((*QueryAvailableSsdsRequestType)(nil)).Elem()\n}\n\ntype QueryAvailableSsdsResponse struct {\n\tReturnval []HostScsiDisk `xml:\"returnval,omitempty\"`\n}\n\ntype QueryAvailableTimeZones QueryAvailableTimeZonesRequestType\n\nfunc init() {\n\tt[\"QueryAvailableTimeZones\"] = reflect.TypeOf((*QueryAvailableTimeZones)(nil)).Elem()\n}\n\ntype QueryAvailableTimeZonesRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryAvailableTimeZonesRequestType\"] = reflect.TypeOf((*QueryAvailableTimeZonesRequestType)(nil)).Elem()\n}\n\ntype QueryAvailableTimeZonesResponse struct {\n\tReturnval []HostDateTimeSystemTimeZone `xml:\"returnval,omitempty\"`\n}\n\ntype QueryBootDevices QueryBootDevicesRequestType\n\nfunc init() {\n\tt[\"QueryBootDevices\"] = reflect.TypeOf((*QueryBootDevices)(nil)).Elem()\n}\n\ntype QueryBootDevicesRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryBootDevicesRequestType\"] = reflect.TypeOf((*QueryBootDevicesRequestType)(nil)).Elem()\n}\n\ntype QueryBootDevicesResponse struct {\n\tReturnval *HostBootDeviceInfo `xml:\"returnval,omitempty\"`\n}\n\ntype QueryBoundVnics QueryBoundVnicsRequestType\n\nfunc init() {\n\tt[\"QueryBoundVnics\"] = reflect.TypeOf((*QueryBoundVnics)(nil)).Elem()\n}\n\ntype QueryBoundVnicsRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tIScsiHbaName string                 `xml:\"iScsiHbaName\"`\n}\n\nfunc init() {\n\tt[\"QueryBoundVnicsRequestType\"] = reflect.TypeOf((*QueryBoundVnicsRequestType)(nil)).Elem()\n}\n\ntype QueryBoundVnicsResponse struct {\n\tReturnval []IscsiPortInfo `xml:\"returnval,omitempty\"`\n}\n\ntype QueryCandidateNics QueryCandidateNicsRequestType\n\nfunc init() {\n\tt[\"QueryCandidateNics\"] = reflect.TypeOf((*QueryCandidateNics)(nil)).Elem()\n}\n\ntype QueryCandidateNicsRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tIScsiHbaName string                 `xml:\"iScsiHbaName\"`\n}\n\nfunc init() {\n\tt[\"QueryCandidateNicsRequestType\"] = reflect.TypeOf((*QueryCandidateNicsRequestType)(nil)).Elem()\n}\n\ntype QueryCandidateNicsResponse struct {\n\tReturnval []IscsiPortInfo `xml:\"returnval,omitempty\"`\n}\n\ntype QueryChangedDiskAreas QueryChangedDiskAreasRequestType\n\nfunc init() {\n\tt[\"QueryChangedDiskAreas\"] = reflect.TypeOf((*QueryChangedDiskAreas)(nil)).Elem()\n}\n\ntype QueryChangedDiskAreasRequestType struct {\n\tThis        ManagedObjectReference  `xml:\"_this\"`\n\tSnapshot    *ManagedObjectReference `xml:\"snapshot,omitempty\"`\n\tDeviceKey   int32                   `xml:\"deviceKey\"`\n\tStartOffset int64                   `xml:\"startOffset\"`\n\tChangeId    string                  `xml:\"changeId\"`\n}\n\nfunc init() {\n\tt[\"QueryChangedDiskAreasRequestType\"] = reflect.TypeOf((*QueryChangedDiskAreasRequestType)(nil)).Elem()\n}\n\ntype QueryChangedDiskAreasResponse struct {\n\tReturnval DiskChangeInfo `xml:\"returnval\"`\n}\n\ntype QueryCmmds QueryCmmdsRequestType\n\nfunc init() {\n\tt[\"QueryCmmds\"] = reflect.TypeOf((*QueryCmmds)(nil)).Elem()\n}\n\ntype QueryCmmdsRequestType struct {\n\tThis    ManagedObjectReference             `xml:\"_this\"`\n\tQueries []HostVsanInternalSystemCmmdsQuery `xml:\"queries\"`\n}\n\nfunc init() {\n\tt[\"QueryCmmdsRequestType\"] = reflect.TypeOf((*QueryCmmdsRequestType)(nil)).Elem()\n}\n\ntype QueryCmmdsResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype QueryCompatibleHostForExistingDvs QueryCompatibleHostForExistingDvsRequestType\n\nfunc init() {\n\tt[\"QueryCompatibleHostForExistingDvs\"] = reflect.TypeOf((*QueryCompatibleHostForExistingDvs)(nil)).Elem()\n}\n\ntype QueryCompatibleHostForExistingDvsRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tContainer ManagedObjectReference `xml:\"container\"`\n\tRecursive bool                   `xml:\"recursive\"`\n\tDvs       ManagedObjectReference `xml:\"dvs\"`\n}\n\nfunc init() {\n\tt[\"QueryCompatibleHostForExistingDvsRequestType\"] = reflect.TypeOf((*QueryCompatibleHostForExistingDvsRequestType)(nil)).Elem()\n}\n\ntype QueryCompatibleHostForExistingDvsResponse struct {\n\tReturnval []ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype QueryCompatibleHostForNewDvs QueryCompatibleHostForNewDvsRequestType\n\nfunc init() {\n\tt[\"QueryCompatibleHostForNewDvs\"] = reflect.TypeOf((*QueryCompatibleHostForNewDvs)(nil)).Elem()\n}\n\ntype QueryCompatibleHostForNewDvsRequestType struct {\n\tThis              ManagedObjectReference               `xml:\"_this\"`\n\tContainer         ManagedObjectReference               `xml:\"container\"`\n\tRecursive         bool                                 `xml:\"recursive\"`\n\tSwitchProductSpec *DistributedVirtualSwitchProductSpec `xml:\"switchProductSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryCompatibleHostForNewDvsRequestType\"] = reflect.TypeOf((*QueryCompatibleHostForNewDvsRequestType)(nil)).Elem()\n}\n\ntype QueryCompatibleHostForNewDvsResponse struct {\n\tReturnval []ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype QueryComplianceStatus QueryComplianceStatusRequestType\n\nfunc init() {\n\tt[\"QueryComplianceStatus\"] = reflect.TypeOf((*QueryComplianceStatus)(nil)).Elem()\n}\n\ntype QueryComplianceStatusRequestType struct {\n\tThis    ManagedObjectReference   `xml:\"_this\"`\n\tProfile []ManagedObjectReference `xml:\"profile,omitempty\"`\n\tEntity  []ManagedObjectReference `xml:\"entity,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryComplianceStatusRequestType\"] = reflect.TypeOf((*QueryComplianceStatusRequestType)(nil)).Elem()\n}\n\ntype QueryComplianceStatusResponse struct {\n\tReturnval []ComplianceResult `xml:\"returnval,omitempty\"`\n}\n\ntype QueryConfigOption QueryConfigOptionRequestType\n\nfunc init() {\n\tt[\"QueryConfigOption\"] = reflect.TypeOf((*QueryConfigOption)(nil)).Elem()\n}\n\ntype QueryConfigOptionDescriptor QueryConfigOptionDescriptorRequestType\n\nfunc init() {\n\tt[\"QueryConfigOptionDescriptor\"] = reflect.TypeOf((*QueryConfigOptionDescriptor)(nil)).Elem()\n}\n\ntype QueryConfigOptionDescriptorRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryConfigOptionDescriptorRequestType\"] = reflect.TypeOf((*QueryConfigOptionDescriptorRequestType)(nil)).Elem()\n}\n\ntype QueryConfigOptionDescriptorResponse struct {\n\tReturnval []VirtualMachineConfigOptionDescriptor `xml:\"returnval,omitempty\"`\n}\n\ntype QueryConfigOptionEx QueryConfigOptionExRequestType\n\nfunc init() {\n\tt[\"QueryConfigOptionEx\"] = reflect.TypeOf((*QueryConfigOptionEx)(nil)).Elem()\n}\n\ntype QueryConfigOptionExRequestType struct {\n\tThis ManagedObjectReference                   `xml:\"_this\"`\n\tSpec *EnvironmentBrowserConfigOptionQuerySpec `xml:\"spec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryConfigOptionExRequestType\"] = reflect.TypeOf((*QueryConfigOptionExRequestType)(nil)).Elem()\n}\n\ntype QueryConfigOptionExResponse struct {\n\tReturnval *VirtualMachineConfigOption `xml:\"returnval,omitempty\"`\n}\n\ntype QueryConfigOptionRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tKey  string                  `xml:\"key,omitempty\"`\n\tHost *ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryConfigOptionRequestType\"] = reflect.TypeOf((*QueryConfigOptionRequestType)(nil)).Elem()\n}\n\ntype QueryConfigOptionResponse struct {\n\tReturnval *VirtualMachineConfigOption `xml:\"returnval,omitempty\"`\n}\n\ntype QueryConfigTarget QueryConfigTargetRequestType\n\nfunc init() {\n\tt[\"QueryConfigTarget\"] = reflect.TypeOf((*QueryConfigTarget)(nil)).Elem()\n}\n\ntype QueryConfigTargetRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tHost *ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryConfigTargetRequestType\"] = reflect.TypeOf((*QueryConfigTargetRequestType)(nil)).Elem()\n}\n\ntype QueryConfigTargetResponse struct {\n\tReturnval *ConfigTarget `xml:\"returnval,omitempty\"`\n}\n\ntype QueryConfiguredModuleOptionString QueryConfiguredModuleOptionStringRequestType\n\nfunc init() {\n\tt[\"QueryConfiguredModuleOptionString\"] = reflect.TypeOf((*QueryConfiguredModuleOptionString)(nil)).Elem()\n}\n\ntype QueryConfiguredModuleOptionStringRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tName string                 `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"QueryConfiguredModuleOptionStringRequestType\"] = reflect.TypeOf((*QueryConfiguredModuleOptionStringRequestType)(nil)).Elem()\n}\n\ntype QueryConfiguredModuleOptionStringResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype QueryConnectionInfo QueryConnectionInfoRequestType\n\nfunc init() {\n\tt[\"QueryConnectionInfo\"] = reflect.TypeOf((*QueryConnectionInfo)(nil)).Elem()\n}\n\ntype QueryConnectionInfoRequestType struct {\n\tThis          ManagedObjectReference `xml:\"_this\"`\n\tHostname      string                 `xml:\"hostname\"`\n\tPort          int32                  `xml:\"port\"`\n\tUsername      string                 `xml:\"username\"`\n\tPassword      string                 `xml:\"password\"`\n\tSslThumbprint string                 `xml:\"sslThumbprint,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryConnectionInfoRequestType\"] = reflect.TypeOf((*QueryConnectionInfoRequestType)(nil)).Elem()\n}\n\ntype QueryConnectionInfoResponse struct {\n\tReturnval HostConnectInfo `xml:\"returnval\"`\n}\n\ntype QueryConnectionInfoViaSpec QueryConnectionInfoViaSpecRequestType\n\nfunc init() {\n\tt[\"QueryConnectionInfoViaSpec\"] = reflect.TypeOf((*QueryConnectionInfoViaSpec)(nil)).Elem()\n}\n\ntype QueryConnectionInfoViaSpecRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tSpec HostConnectSpec        `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"QueryConnectionInfoViaSpecRequestType\"] = reflect.TypeOf((*QueryConnectionInfoViaSpecRequestType)(nil)).Elem()\n}\n\ntype QueryConnectionInfoViaSpecResponse struct {\n\tReturnval HostConnectInfo `xml:\"returnval\"`\n}\n\ntype QueryDatastorePerformanceSummary QueryDatastorePerformanceSummaryRequestType\n\nfunc init() {\n\tt[\"QueryDatastorePerformanceSummary\"] = reflect.TypeOf((*QueryDatastorePerformanceSummary)(nil)).Elem()\n}\n\ntype QueryDatastorePerformanceSummaryRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"QueryDatastorePerformanceSummaryRequestType\"] = reflect.TypeOf((*QueryDatastorePerformanceSummaryRequestType)(nil)).Elem()\n}\n\ntype QueryDatastorePerformanceSummaryResponse struct {\n\tReturnval []StoragePerformanceSummary `xml:\"returnval,omitempty\"`\n}\n\ntype QueryDateTime QueryDateTimeRequestType\n\nfunc init() {\n\tt[\"QueryDateTime\"] = reflect.TypeOf((*QueryDateTime)(nil)).Elem()\n}\n\ntype QueryDateTimeRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryDateTimeRequestType\"] = reflect.TypeOf((*QueryDateTimeRequestType)(nil)).Elem()\n}\n\ntype QueryDateTimeResponse struct {\n\tReturnval time.Time `xml:\"returnval\"`\n}\n\ntype QueryDescriptions QueryDescriptionsRequestType\n\nfunc init() {\n\tt[\"QueryDescriptions\"] = reflect.TypeOf((*QueryDescriptions)(nil)).Elem()\n}\n\ntype QueryDescriptionsRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tHost *ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryDescriptionsRequestType\"] = reflect.TypeOf((*QueryDescriptionsRequestType)(nil)).Elem()\n}\n\ntype QueryDescriptionsResponse struct {\n\tReturnval []DiagnosticManagerLogDescriptor `xml:\"returnval,omitempty\"`\n}\n\ntype QueryDisksForVsan QueryDisksForVsanRequestType\n\nfunc init() {\n\tt[\"QueryDisksForVsan\"] = reflect.TypeOf((*QueryDisksForVsan)(nil)).Elem()\n}\n\ntype QueryDisksForVsanRequestType struct {\n\tThis          ManagedObjectReference `xml:\"_this\"`\n\tCanonicalName []string               `xml:\"canonicalName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryDisksForVsanRequestType\"] = reflect.TypeOf((*QueryDisksForVsanRequestType)(nil)).Elem()\n}\n\ntype QueryDisksForVsanResponse struct {\n\tReturnval []VsanHostDiskResult `xml:\"returnval,omitempty\"`\n}\n\ntype QueryDisksUsingFilter QueryDisksUsingFilterRequestType\n\nfunc init() {\n\tt[\"QueryDisksUsingFilter\"] = reflect.TypeOf((*QueryDisksUsingFilter)(nil)).Elem()\n}\n\ntype QueryDisksUsingFilterRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tFilterId string                 `xml:\"filterId\"`\n\tCompRes  ManagedObjectReference `xml:\"compRes\"`\n}\n\nfunc init() {\n\tt[\"QueryDisksUsingFilterRequestType\"] = reflect.TypeOf((*QueryDisksUsingFilterRequestType)(nil)).Elem()\n}\n\ntype QueryDisksUsingFilterResponse struct {\n\tReturnval []VirtualDiskId `xml:\"returnval\"`\n}\n\ntype QueryDvsByUuid QueryDvsByUuidRequestType\n\nfunc init() {\n\tt[\"QueryDvsByUuid\"] = reflect.TypeOf((*QueryDvsByUuid)(nil)).Elem()\n}\n\ntype QueryDvsByUuidRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tUuid string                 `xml:\"uuid\"`\n}\n\nfunc init() {\n\tt[\"QueryDvsByUuidRequestType\"] = reflect.TypeOf((*QueryDvsByUuidRequestType)(nil)).Elem()\n}\n\ntype QueryDvsByUuidResponse struct {\n\tReturnval *ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype QueryDvsCheckCompatibility QueryDvsCheckCompatibilityRequestType\n\nfunc init() {\n\tt[\"QueryDvsCheckCompatibility\"] = reflect.TypeOf((*QueryDvsCheckCompatibility)(nil)).Elem()\n}\n\ntype QueryDvsCheckCompatibilityRequestType struct {\n\tThis           ManagedObjectReference                                 `xml:\"_this\"`\n\tHostContainer  DistributedVirtualSwitchManagerHostContainer           `xml:\"hostContainer\"`\n\tDvsProductSpec *DistributedVirtualSwitchManagerDvsProductSpec         `xml:\"dvsProductSpec,omitempty\"`\n\tHostFilterSpec []BaseDistributedVirtualSwitchManagerHostDvsFilterSpec `xml:\"hostFilterSpec,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"QueryDvsCheckCompatibilityRequestType\"] = reflect.TypeOf((*QueryDvsCheckCompatibilityRequestType)(nil)).Elem()\n}\n\ntype QueryDvsCheckCompatibilityResponse struct {\n\tReturnval []DistributedVirtualSwitchManagerCompatibilityResult `xml:\"returnval,omitempty\"`\n}\n\ntype QueryDvsCompatibleHostSpec QueryDvsCompatibleHostSpecRequestType\n\nfunc init() {\n\tt[\"QueryDvsCompatibleHostSpec\"] = reflect.TypeOf((*QueryDvsCompatibleHostSpec)(nil)).Elem()\n}\n\ntype QueryDvsCompatibleHostSpecRequestType struct {\n\tThis              ManagedObjectReference               `xml:\"_this\"`\n\tSwitchProductSpec *DistributedVirtualSwitchProductSpec `xml:\"switchProductSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryDvsCompatibleHostSpecRequestType\"] = reflect.TypeOf((*QueryDvsCompatibleHostSpecRequestType)(nil)).Elem()\n}\n\ntype QueryDvsCompatibleHostSpecResponse struct {\n\tReturnval []DistributedVirtualSwitchHostProductSpec `xml:\"returnval,omitempty\"`\n}\n\ntype QueryDvsConfigTarget QueryDvsConfigTargetRequestType\n\nfunc init() {\n\tt[\"QueryDvsConfigTarget\"] = reflect.TypeOf((*QueryDvsConfigTarget)(nil)).Elem()\n}\n\ntype QueryDvsConfigTargetRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tHost *ManagedObjectReference `xml:\"host,omitempty\"`\n\tDvs  *ManagedObjectReference `xml:\"dvs,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryDvsConfigTargetRequestType\"] = reflect.TypeOf((*QueryDvsConfigTargetRequestType)(nil)).Elem()\n}\n\ntype QueryDvsConfigTargetResponse struct {\n\tReturnval DVSManagerDvsConfigTarget `xml:\"returnval\"`\n}\n\ntype QueryDvsFeatureCapability QueryDvsFeatureCapabilityRequestType\n\nfunc init() {\n\tt[\"QueryDvsFeatureCapability\"] = reflect.TypeOf((*QueryDvsFeatureCapability)(nil)).Elem()\n}\n\ntype QueryDvsFeatureCapabilityRequestType struct {\n\tThis              ManagedObjectReference               `xml:\"_this\"`\n\tSwitchProductSpec *DistributedVirtualSwitchProductSpec `xml:\"switchProductSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryDvsFeatureCapabilityRequestType\"] = reflect.TypeOf((*QueryDvsFeatureCapabilityRequestType)(nil)).Elem()\n}\n\ntype QueryDvsFeatureCapabilityResponse struct {\n\tReturnval BaseDVSFeatureCapability `xml:\"returnval,omitempty,typeattr\"`\n}\n\ntype QueryEvents QueryEventsRequestType\n\nfunc init() {\n\tt[\"QueryEvents\"] = reflect.TypeOf((*QueryEvents)(nil)).Elem()\n}\n\ntype QueryEventsRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tFilter EventFilterSpec        `xml:\"filter\"`\n}\n\nfunc init() {\n\tt[\"QueryEventsRequestType\"] = reflect.TypeOf((*QueryEventsRequestType)(nil)).Elem()\n}\n\ntype QueryEventsResponse struct {\n\tReturnval []BaseEvent `xml:\"returnval,omitempty,typeattr\"`\n}\n\ntype QueryExpressionMetadata QueryExpressionMetadataRequestType\n\nfunc init() {\n\tt[\"QueryExpressionMetadata\"] = reflect.TypeOf((*QueryExpressionMetadata)(nil)).Elem()\n}\n\ntype QueryExpressionMetadataRequestType struct {\n\tThis           ManagedObjectReference  `xml:\"_this\"`\n\tExpressionName []string                `xml:\"expressionName,omitempty\"`\n\tProfile        *ManagedObjectReference `xml:\"profile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryExpressionMetadataRequestType\"] = reflect.TypeOf((*QueryExpressionMetadataRequestType)(nil)).Elem()\n}\n\ntype QueryExpressionMetadataResponse struct {\n\tReturnval []ProfileExpressionMetadata `xml:\"returnval,omitempty\"`\n}\n\ntype QueryExtensionIpAllocationUsage QueryExtensionIpAllocationUsageRequestType\n\nfunc init() {\n\tt[\"QueryExtensionIpAllocationUsage\"] = reflect.TypeOf((*QueryExtensionIpAllocationUsage)(nil)).Elem()\n}\n\ntype QueryExtensionIpAllocationUsageRequestType struct {\n\tThis          ManagedObjectReference `xml:\"_this\"`\n\tExtensionKeys []string               `xml:\"extensionKeys,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryExtensionIpAllocationUsageRequestType\"] = reflect.TypeOf((*QueryExtensionIpAllocationUsageRequestType)(nil)).Elem()\n}\n\ntype QueryExtensionIpAllocationUsageResponse struct {\n\tReturnval []ExtensionManagerIpAllocationUsage `xml:\"returnval,omitempty\"`\n}\n\ntype QueryFaultToleranceCompatibility QueryFaultToleranceCompatibilityRequestType\n\nfunc init() {\n\tt[\"QueryFaultToleranceCompatibility\"] = reflect.TypeOf((*QueryFaultToleranceCompatibility)(nil)).Elem()\n}\n\ntype QueryFaultToleranceCompatibilityEx QueryFaultToleranceCompatibilityExRequestType\n\nfunc init() {\n\tt[\"QueryFaultToleranceCompatibilityEx\"] = reflect.TypeOf((*QueryFaultToleranceCompatibilityEx)(nil)).Elem()\n}\n\ntype QueryFaultToleranceCompatibilityExRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tForLegacyFt *bool                  `xml:\"forLegacyFt\"`\n}\n\nfunc init() {\n\tt[\"QueryFaultToleranceCompatibilityExRequestType\"] = reflect.TypeOf((*QueryFaultToleranceCompatibilityExRequestType)(nil)).Elem()\n}\n\ntype QueryFaultToleranceCompatibilityExResponse struct {\n\tReturnval []LocalizedMethodFault `xml:\"returnval,omitempty\"`\n}\n\ntype QueryFaultToleranceCompatibilityRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryFaultToleranceCompatibilityRequestType\"] = reflect.TypeOf((*QueryFaultToleranceCompatibilityRequestType)(nil)).Elem()\n}\n\ntype QueryFaultToleranceCompatibilityResponse struct {\n\tReturnval []LocalizedMethodFault `xml:\"returnval,omitempty\"`\n}\n\ntype QueryFilterEntities QueryFilterEntitiesRequestType\n\nfunc init() {\n\tt[\"QueryFilterEntities\"] = reflect.TypeOf((*QueryFilterEntities)(nil)).Elem()\n}\n\ntype QueryFilterEntitiesRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tFilterId string                 `xml:\"filterId\"`\n}\n\nfunc init() {\n\tt[\"QueryFilterEntitiesRequestType\"] = reflect.TypeOf((*QueryFilterEntitiesRequestType)(nil)).Elem()\n}\n\ntype QueryFilterEntitiesResponse struct {\n\tReturnval []ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype QueryFilterInfoIds QueryFilterInfoIdsRequestType\n\nfunc init() {\n\tt[\"QueryFilterInfoIds\"] = reflect.TypeOf((*QueryFilterInfoIds)(nil)).Elem()\n}\n\ntype QueryFilterInfoIdsRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tFilterId string                 `xml:\"filterId\"`\n}\n\nfunc init() {\n\tt[\"QueryFilterInfoIdsRequestType\"] = reflect.TypeOf((*QueryFilterInfoIdsRequestType)(nil)).Elem()\n}\n\ntype QueryFilterInfoIdsResponse struct {\n\tReturnval []string `xml:\"returnval,omitempty\"`\n}\n\ntype QueryFilterList QueryFilterListRequestType\n\nfunc init() {\n\tt[\"QueryFilterList\"] = reflect.TypeOf((*QueryFilterList)(nil)).Elem()\n}\n\ntype QueryFilterListRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tProviderId string                 `xml:\"providerId\"`\n}\n\nfunc init() {\n\tt[\"QueryFilterListRequestType\"] = reflect.TypeOf((*QueryFilterListRequestType)(nil)).Elem()\n}\n\ntype QueryFilterListResponse struct {\n\tReturnval []string `xml:\"returnval,omitempty\"`\n}\n\ntype QueryFilterName QueryFilterNameRequestType\n\nfunc init() {\n\tt[\"QueryFilterName\"] = reflect.TypeOf((*QueryFilterName)(nil)).Elem()\n}\n\ntype QueryFilterNameRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tFilterId string                 `xml:\"filterId\"`\n}\n\nfunc init() {\n\tt[\"QueryFilterNameRequestType\"] = reflect.TypeOf((*QueryFilterNameRequestType)(nil)).Elem()\n}\n\ntype QueryFilterNameResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype QueryFirmwareConfigUploadURL QueryFirmwareConfigUploadURLRequestType\n\nfunc init() {\n\tt[\"QueryFirmwareConfigUploadURL\"] = reflect.TypeOf((*QueryFirmwareConfigUploadURL)(nil)).Elem()\n}\n\ntype QueryFirmwareConfigUploadURLRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryFirmwareConfigUploadURLRequestType\"] = reflect.TypeOf((*QueryFirmwareConfigUploadURLRequestType)(nil)).Elem()\n}\n\ntype QueryFirmwareConfigUploadURLResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype QueryHealthUpdateInfos QueryHealthUpdateInfosRequestType\n\nfunc init() {\n\tt[\"QueryHealthUpdateInfos\"] = reflect.TypeOf((*QueryHealthUpdateInfos)(nil)).Elem()\n}\n\ntype QueryHealthUpdateInfosRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tProviderId string                 `xml:\"providerId\"`\n}\n\nfunc init() {\n\tt[\"QueryHealthUpdateInfosRequestType\"] = reflect.TypeOf((*QueryHealthUpdateInfosRequestType)(nil)).Elem()\n}\n\ntype QueryHealthUpdateInfosResponse struct {\n\tReturnval []HealthUpdateInfo `xml:\"returnval,omitempty\"`\n}\n\ntype QueryHealthUpdates QueryHealthUpdatesRequestType\n\nfunc init() {\n\tt[\"QueryHealthUpdates\"] = reflect.TypeOf((*QueryHealthUpdates)(nil)).Elem()\n}\n\ntype QueryHealthUpdatesRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tProviderId string                 `xml:\"providerId\"`\n}\n\nfunc init() {\n\tt[\"QueryHealthUpdatesRequestType\"] = reflect.TypeOf((*QueryHealthUpdatesRequestType)(nil)).Elem()\n}\n\ntype QueryHealthUpdatesResponse struct {\n\tReturnval []HealthUpdate `xml:\"returnval,omitempty\"`\n}\n\ntype QueryHostConnectionInfo QueryHostConnectionInfoRequestType\n\nfunc init() {\n\tt[\"QueryHostConnectionInfo\"] = reflect.TypeOf((*QueryHostConnectionInfo)(nil)).Elem()\n}\n\ntype QueryHostConnectionInfoRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryHostConnectionInfoRequestType\"] = reflect.TypeOf((*QueryHostConnectionInfoRequestType)(nil)).Elem()\n}\n\ntype QueryHostConnectionInfoResponse struct {\n\tReturnval HostConnectInfo `xml:\"returnval\"`\n}\n\ntype QueryHostPatchRequestType struct {\n\tThis ManagedObjectReference                     `xml:\"_this\"`\n\tSpec *HostPatchManagerPatchManagerOperationSpec `xml:\"spec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryHostPatchRequestType\"] = reflect.TypeOf((*QueryHostPatchRequestType)(nil)).Elem()\n}\n\ntype QueryHostPatch_Task QueryHostPatchRequestType\n\nfunc init() {\n\tt[\"QueryHostPatch_Task\"] = reflect.TypeOf((*QueryHostPatch_Task)(nil)).Elem()\n}\n\ntype QueryHostPatch_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype QueryHostProfileMetadata QueryHostProfileMetadataRequestType\n\nfunc init() {\n\tt[\"QueryHostProfileMetadata\"] = reflect.TypeOf((*QueryHostProfileMetadata)(nil)).Elem()\n}\n\ntype QueryHostProfileMetadataRequestType struct {\n\tThis        ManagedObjectReference  `xml:\"_this\"`\n\tProfileName []string                `xml:\"profileName,omitempty\"`\n\tProfile     *ManagedObjectReference `xml:\"profile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryHostProfileMetadataRequestType\"] = reflect.TypeOf((*QueryHostProfileMetadataRequestType)(nil)).Elem()\n}\n\ntype QueryHostProfileMetadataResponse struct {\n\tReturnval []ProfileMetadata `xml:\"returnval,omitempty\"`\n}\n\ntype QueryHostStatus QueryHostStatusRequestType\n\nfunc init() {\n\tt[\"QueryHostStatus\"] = reflect.TypeOf((*QueryHostStatus)(nil)).Elem()\n}\n\ntype QueryHostStatusRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryHostStatusRequestType\"] = reflect.TypeOf((*QueryHostStatusRequestType)(nil)).Elem()\n}\n\ntype QueryHostStatusResponse struct {\n\tReturnval VsanHostClusterStatus `xml:\"returnval\"`\n}\n\ntype QueryIORMConfigOption QueryIORMConfigOptionRequestType\n\nfunc init() {\n\tt[\"QueryIORMConfigOption\"] = reflect.TypeOf((*QueryIORMConfigOption)(nil)).Elem()\n}\n\ntype QueryIORMConfigOptionRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tHost ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"QueryIORMConfigOptionRequestType\"] = reflect.TypeOf((*QueryIORMConfigOptionRequestType)(nil)).Elem()\n}\n\ntype QueryIORMConfigOptionResponse struct {\n\tReturnval StorageIORMConfigOption `xml:\"returnval\"`\n}\n\ntype QueryIPAllocations QueryIPAllocationsRequestType\n\nfunc init() {\n\tt[\"QueryIPAllocations\"] = reflect.TypeOf((*QueryIPAllocations)(nil)).Elem()\n}\n\ntype QueryIPAllocationsRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tDc           ManagedObjectReference `xml:\"dc\"`\n\tPoolId       int32                  `xml:\"poolId\"`\n\tExtensionKey string                 `xml:\"extensionKey\"`\n}\n\nfunc init() {\n\tt[\"QueryIPAllocationsRequestType\"] = reflect.TypeOf((*QueryIPAllocationsRequestType)(nil)).Elem()\n}\n\ntype QueryIPAllocationsResponse struct {\n\tReturnval []IpPoolManagerIpAllocation `xml:\"returnval\"`\n}\n\ntype QueryIoFilterInfo QueryIoFilterInfoRequestType\n\nfunc init() {\n\tt[\"QueryIoFilterInfo\"] = reflect.TypeOf((*QueryIoFilterInfo)(nil)).Elem()\n}\n\ntype QueryIoFilterInfoRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tCompRes ManagedObjectReference `xml:\"compRes\"`\n}\n\nfunc init() {\n\tt[\"QueryIoFilterInfoRequestType\"] = reflect.TypeOf((*QueryIoFilterInfoRequestType)(nil)).Elem()\n}\n\ntype QueryIoFilterInfoResponse struct {\n\tReturnval []ClusterIoFilterInfo `xml:\"returnval,omitempty\"`\n}\n\ntype QueryIoFilterIssues QueryIoFilterIssuesRequestType\n\nfunc init() {\n\tt[\"QueryIoFilterIssues\"] = reflect.TypeOf((*QueryIoFilterIssues)(nil)).Elem()\n}\n\ntype QueryIoFilterIssuesRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tFilterId string                 `xml:\"filterId\"`\n\tCompRes  ManagedObjectReference `xml:\"compRes\"`\n}\n\nfunc init() {\n\tt[\"QueryIoFilterIssuesRequestType\"] = reflect.TypeOf((*QueryIoFilterIssuesRequestType)(nil)).Elem()\n}\n\ntype QueryIoFilterIssuesResponse struct {\n\tReturnval IoFilterQueryIssueResult `xml:\"returnval\"`\n}\n\ntype QueryIpPools QueryIpPoolsRequestType\n\nfunc init() {\n\tt[\"QueryIpPools\"] = reflect.TypeOf((*QueryIpPools)(nil)).Elem()\n}\n\ntype QueryIpPoolsRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tDc   ManagedObjectReference `xml:\"dc\"`\n}\n\nfunc init() {\n\tt[\"QueryIpPoolsRequestType\"] = reflect.TypeOf((*QueryIpPoolsRequestType)(nil)).Elem()\n}\n\ntype QueryIpPoolsResponse struct {\n\tReturnval []IpPool `xml:\"returnval,omitempty\"`\n}\n\ntype QueryLicenseSourceAvailability QueryLicenseSourceAvailabilityRequestType\n\nfunc init() {\n\tt[\"QueryLicenseSourceAvailability\"] = reflect.TypeOf((*QueryLicenseSourceAvailability)(nil)).Elem()\n}\n\ntype QueryLicenseSourceAvailabilityRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tHost *ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryLicenseSourceAvailabilityRequestType\"] = reflect.TypeOf((*QueryLicenseSourceAvailabilityRequestType)(nil)).Elem()\n}\n\ntype QueryLicenseSourceAvailabilityResponse struct {\n\tReturnval []LicenseAvailabilityInfo `xml:\"returnval,omitempty\"`\n}\n\ntype QueryLicenseUsage QueryLicenseUsageRequestType\n\nfunc init() {\n\tt[\"QueryLicenseUsage\"] = reflect.TypeOf((*QueryLicenseUsage)(nil)).Elem()\n}\n\ntype QueryLicenseUsageRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tHost *ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryLicenseUsageRequestType\"] = reflect.TypeOf((*QueryLicenseUsageRequestType)(nil)).Elem()\n}\n\ntype QueryLicenseUsageResponse struct {\n\tReturnval LicenseUsageInfo `xml:\"returnval\"`\n}\n\ntype QueryLockdownExceptions QueryLockdownExceptionsRequestType\n\nfunc init() {\n\tt[\"QueryLockdownExceptions\"] = reflect.TypeOf((*QueryLockdownExceptions)(nil)).Elem()\n}\n\ntype QueryLockdownExceptionsRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryLockdownExceptionsRequestType\"] = reflect.TypeOf((*QueryLockdownExceptionsRequestType)(nil)).Elem()\n}\n\ntype QueryLockdownExceptionsResponse struct {\n\tReturnval []string `xml:\"returnval,omitempty\"`\n}\n\ntype QueryManagedBy QueryManagedByRequestType\n\nfunc init() {\n\tt[\"QueryManagedBy\"] = reflect.TypeOf((*QueryManagedBy)(nil)).Elem()\n}\n\ntype QueryManagedByRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tExtensionKey string                 `xml:\"extensionKey\"`\n}\n\nfunc init() {\n\tt[\"QueryManagedByRequestType\"] = reflect.TypeOf((*QueryManagedByRequestType)(nil)).Elem()\n}\n\ntype QueryManagedByResponse struct {\n\tReturnval []ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype QueryMemoryOverhead QueryMemoryOverheadRequestType\n\nfunc init() {\n\tt[\"QueryMemoryOverhead\"] = reflect.TypeOf((*QueryMemoryOverhead)(nil)).Elem()\n}\n\ntype QueryMemoryOverheadEx QueryMemoryOverheadExRequestType\n\nfunc init() {\n\tt[\"QueryMemoryOverheadEx\"] = reflect.TypeOf((*QueryMemoryOverheadEx)(nil)).Elem()\n}\n\ntype QueryMemoryOverheadExRequestType struct {\n\tThis         ManagedObjectReference   `xml:\"_this\"`\n\tVmConfigInfo VirtualMachineConfigInfo `xml:\"vmConfigInfo\"`\n}\n\nfunc init() {\n\tt[\"QueryMemoryOverheadExRequestType\"] = reflect.TypeOf((*QueryMemoryOverheadExRequestType)(nil)).Elem()\n}\n\ntype QueryMemoryOverheadExResponse struct {\n\tReturnval int64 `xml:\"returnval\"`\n}\n\ntype QueryMemoryOverheadRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tMemorySize   int64                  `xml:\"memorySize\"`\n\tVideoRamSize int32                  `xml:\"videoRamSize,omitempty\"`\n\tNumVcpus     int32                  `xml:\"numVcpus\"`\n}\n\nfunc init() {\n\tt[\"QueryMemoryOverheadRequestType\"] = reflect.TypeOf((*QueryMemoryOverheadRequestType)(nil)).Elem()\n}\n\ntype QueryMemoryOverheadResponse struct {\n\tReturnval int64 `xml:\"returnval\"`\n}\n\ntype QueryMigrationDependencies QueryMigrationDependenciesRequestType\n\nfunc init() {\n\tt[\"QueryMigrationDependencies\"] = reflect.TypeOf((*QueryMigrationDependencies)(nil)).Elem()\n}\n\ntype QueryMigrationDependenciesRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tPnicDevice []string               `xml:\"pnicDevice\"`\n}\n\nfunc init() {\n\tt[\"QueryMigrationDependenciesRequestType\"] = reflect.TypeOf((*QueryMigrationDependenciesRequestType)(nil)).Elem()\n}\n\ntype QueryMigrationDependenciesResponse struct {\n\tReturnval IscsiMigrationDependency `xml:\"returnval\"`\n}\n\ntype QueryModules QueryModulesRequestType\n\nfunc init() {\n\tt[\"QueryModules\"] = reflect.TypeOf((*QueryModules)(nil)).Elem()\n}\n\ntype QueryModulesRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryModulesRequestType\"] = reflect.TypeOf((*QueryModulesRequestType)(nil)).Elem()\n}\n\ntype QueryModulesResponse struct {\n\tReturnval []KernelModuleInfo `xml:\"returnval,omitempty\"`\n}\n\ntype QueryMonitoredEntities QueryMonitoredEntitiesRequestType\n\nfunc init() {\n\tt[\"QueryMonitoredEntities\"] = reflect.TypeOf((*QueryMonitoredEntities)(nil)).Elem()\n}\n\ntype QueryMonitoredEntitiesRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tProviderId string                 `xml:\"providerId\"`\n}\n\nfunc init() {\n\tt[\"QueryMonitoredEntitiesRequestType\"] = reflect.TypeOf((*QueryMonitoredEntitiesRequestType)(nil)).Elem()\n}\n\ntype QueryMonitoredEntitiesResponse struct {\n\tReturnval []ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype QueryNFSUser QueryNFSUserRequestType\n\nfunc init() {\n\tt[\"QueryNFSUser\"] = reflect.TypeOf((*QueryNFSUser)(nil)).Elem()\n}\n\ntype QueryNFSUserRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryNFSUserRequestType\"] = reflect.TypeOf((*QueryNFSUserRequestType)(nil)).Elem()\n}\n\ntype QueryNFSUserResponse struct {\n\tReturnval *HostNasVolumeUserInfo `xml:\"returnval,omitempty\"`\n}\n\ntype QueryNetConfig QueryNetConfigRequestType\n\nfunc init() {\n\tt[\"QueryNetConfig\"] = reflect.TypeOf((*QueryNetConfig)(nil)).Elem()\n}\n\ntype QueryNetConfigRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tNicType string                 `xml:\"nicType\"`\n}\n\nfunc init() {\n\tt[\"QueryNetConfigRequestType\"] = reflect.TypeOf((*QueryNetConfigRequestType)(nil)).Elem()\n}\n\ntype QueryNetConfigResponse struct {\n\tReturnval *VirtualNicManagerNetConfig `xml:\"returnval,omitempty\"`\n}\n\ntype QueryNetworkHint QueryNetworkHintRequestType\n\nfunc init() {\n\tt[\"QueryNetworkHint\"] = reflect.TypeOf((*QueryNetworkHint)(nil)).Elem()\n}\n\ntype QueryNetworkHintRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tDevice []string               `xml:\"device,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryNetworkHintRequestType\"] = reflect.TypeOf((*QueryNetworkHintRequestType)(nil)).Elem()\n}\n\ntype QueryNetworkHintResponse struct {\n\tReturnval []PhysicalNicHintInfo `xml:\"returnval,omitempty\"`\n}\n\ntype QueryObjectsOnPhysicalVsanDisk QueryObjectsOnPhysicalVsanDiskRequestType\n\nfunc init() {\n\tt[\"QueryObjectsOnPhysicalVsanDisk\"] = reflect.TypeOf((*QueryObjectsOnPhysicalVsanDisk)(nil)).Elem()\n}\n\ntype QueryObjectsOnPhysicalVsanDiskRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tDisks []string               `xml:\"disks\"`\n}\n\nfunc init() {\n\tt[\"QueryObjectsOnPhysicalVsanDiskRequestType\"] = reflect.TypeOf((*QueryObjectsOnPhysicalVsanDiskRequestType)(nil)).Elem()\n}\n\ntype QueryObjectsOnPhysicalVsanDiskResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype QueryOptions QueryOptionsRequestType\n\nfunc init() {\n\tt[\"QueryOptions\"] = reflect.TypeOf((*QueryOptions)(nil)).Elem()\n}\n\ntype QueryOptionsRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tName string                 `xml:\"name,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryOptionsRequestType\"] = reflect.TypeOf((*QueryOptionsRequestType)(nil)).Elem()\n}\n\ntype QueryOptionsResponse struct {\n\tReturnval []BaseOptionValue `xml:\"returnval,omitempty,typeattr\"`\n}\n\ntype QueryPartitionCreateDesc QueryPartitionCreateDescRequestType\n\nfunc init() {\n\tt[\"QueryPartitionCreateDesc\"] = reflect.TypeOf((*QueryPartitionCreateDesc)(nil)).Elem()\n}\n\ntype QueryPartitionCreateDescRequestType struct {\n\tThis           ManagedObjectReference `xml:\"_this\"`\n\tDiskUuid       string                 `xml:\"diskUuid\"`\n\tDiagnosticType string                 `xml:\"diagnosticType\"`\n}\n\nfunc init() {\n\tt[\"QueryPartitionCreateDescRequestType\"] = reflect.TypeOf((*QueryPartitionCreateDescRequestType)(nil)).Elem()\n}\n\ntype QueryPartitionCreateDescResponse struct {\n\tReturnval HostDiagnosticPartitionCreateDescription `xml:\"returnval\"`\n}\n\ntype QueryPartitionCreateOptions QueryPartitionCreateOptionsRequestType\n\nfunc init() {\n\tt[\"QueryPartitionCreateOptions\"] = reflect.TypeOf((*QueryPartitionCreateOptions)(nil)).Elem()\n}\n\ntype QueryPartitionCreateOptionsRequestType struct {\n\tThis           ManagedObjectReference `xml:\"_this\"`\n\tStorageType    string                 `xml:\"storageType\"`\n\tDiagnosticType string                 `xml:\"diagnosticType\"`\n}\n\nfunc init() {\n\tt[\"QueryPartitionCreateOptionsRequestType\"] = reflect.TypeOf((*QueryPartitionCreateOptionsRequestType)(nil)).Elem()\n}\n\ntype QueryPartitionCreateOptionsResponse struct {\n\tReturnval []HostDiagnosticPartitionCreateOption `xml:\"returnval,omitempty\"`\n}\n\ntype QueryPathSelectionPolicyOptions QueryPathSelectionPolicyOptionsRequestType\n\nfunc init() {\n\tt[\"QueryPathSelectionPolicyOptions\"] = reflect.TypeOf((*QueryPathSelectionPolicyOptions)(nil)).Elem()\n}\n\ntype QueryPathSelectionPolicyOptionsRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryPathSelectionPolicyOptionsRequestType\"] = reflect.TypeOf((*QueryPathSelectionPolicyOptionsRequestType)(nil)).Elem()\n}\n\ntype QueryPathSelectionPolicyOptionsResponse struct {\n\tReturnval []HostPathSelectionPolicyOption `xml:\"returnval,omitempty\"`\n}\n\ntype QueryPerf QueryPerfRequestType\n\nfunc init() {\n\tt[\"QueryPerf\"] = reflect.TypeOf((*QueryPerf)(nil)).Elem()\n}\n\ntype QueryPerfComposite QueryPerfCompositeRequestType\n\nfunc init() {\n\tt[\"QueryPerfComposite\"] = reflect.TypeOf((*QueryPerfComposite)(nil)).Elem()\n}\n\ntype QueryPerfCompositeRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tQuerySpec PerfQuerySpec          `xml:\"querySpec\"`\n}\n\nfunc init() {\n\tt[\"QueryPerfCompositeRequestType\"] = reflect.TypeOf((*QueryPerfCompositeRequestType)(nil)).Elem()\n}\n\ntype QueryPerfCompositeResponse struct {\n\tReturnval PerfCompositeMetric `xml:\"returnval\"`\n}\n\ntype QueryPerfCounter QueryPerfCounterRequestType\n\nfunc init() {\n\tt[\"QueryPerfCounter\"] = reflect.TypeOf((*QueryPerfCounter)(nil)).Elem()\n}\n\ntype QueryPerfCounterByLevel QueryPerfCounterByLevelRequestType\n\nfunc init() {\n\tt[\"QueryPerfCounterByLevel\"] = reflect.TypeOf((*QueryPerfCounterByLevel)(nil)).Elem()\n}\n\ntype QueryPerfCounterByLevelRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tLevel int32                  `xml:\"level\"`\n}\n\nfunc init() {\n\tt[\"QueryPerfCounterByLevelRequestType\"] = reflect.TypeOf((*QueryPerfCounterByLevelRequestType)(nil)).Elem()\n}\n\ntype QueryPerfCounterByLevelResponse struct {\n\tReturnval []PerfCounterInfo `xml:\"returnval\"`\n}\n\ntype QueryPerfCounterRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tCounterId []int32                `xml:\"counterId\"`\n}\n\nfunc init() {\n\tt[\"QueryPerfCounterRequestType\"] = reflect.TypeOf((*QueryPerfCounterRequestType)(nil)).Elem()\n}\n\ntype QueryPerfCounterResponse struct {\n\tReturnval []PerfCounterInfo `xml:\"returnval,omitempty\"`\n}\n\ntype QueryPerfProviderSummary QueryPerfProviderSummaryRequestType\n\nfunc init() {\n\tt[\"QueryPerfProviderSummary\"] = reflect.TypeOf((*QueryPerfProviderSummary)(nil)).Elem()\n}\n\ntype QueryPerfProviderSummaryRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tEntity ManagedObjectReference `xml:\"entity\"`\n}\n\nfunc init() {\n\tt[\"QueryPerfProviderSummaryRequestType\"] = reflect.TypeOf((*QueryPerfProviderSummaryRequestType)(nil)).Elem()\n}\n\ntype QueryPerfProviderSummaryResponse struct {\n\tReturnval PerfProviderSummary `xml:\"returnval\"`\n}\n\ntype QueryPerfRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tQuerySpec []PerfQuerySpec        `xml:\"querySpec\"`\n}\n\nfunc init() {\n\tt[\"QueryPerfRequestType\"] = reflect.TypeOf((*QueryPerfRequestType)(nil)).Elem()\n}\n\ntype QueryPerfResponse struct {\n\tReturnval []BasePerfEntityMetricBase `xml:\"returnval,omitempty,typeattr\"`\n}\n\ntype QueryPhysicalVsanDisks QueryPhysicalVsanDisksRequestType\n\nfunc init() {\n\tt[\"QueryPhysicalVsanDisks\"] = reflect.TypeOf((*QueryPhysicalVsanDisks)(nil)).Elem()\n}\n\ntype QueryPhysicalVsanDisksRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tProps []string               `xml:\"props,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryPhysicalVsanDisksRequestType\"] = reflect.TypeOf((*QueryPhysicalVsanDisksRequestType)(nil)).Elem()\n}\n\ntype QueryPhysicalVsanDisksResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype QueryPnicStatus QueryPnicStatusRequestType\n\nfunc init() {\n\tt[\"QueryPnicStatus\"] = reflect.TypeOf((*QueryPnicStatus)(nil)).Elem()\n}\n\ntype QueryPnicStatusRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tPnicDevice string                 `xml:\"pnicDevice\"`\n}\n\nfunc init() {\n\tt[\"QueryPnicStatusRequestType\"] = reflect.TypeOf((*QueryPnicStatusRequestType)(nil)).Elem()\n}\n\ntype QueryPnicStatusResponse struct {\n\tReturnval IscsiStatus `xml:\"returnval\"`\n}\n\ntype QueryPolicyMetadata QueryPolicyMetadataRequestType\n\nfunc init() {\n\tt[\"QueryPolicyMetadata\"] = reflect.TypeOf((*QueryPolicyMetadata)(nil)).Elem()\n}\n\ntype QueryPolicyMetadataRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tPolicyName []string                `xml:\"policyName,omitempty\"`\n\tProfile    *ManagedObjectReference `xml:\"profile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryPolicyMetadataRequestType\"] = reflect.TypeOf((*QueryPolicyMetadataRequestType)(nil)).Elem()\n}\n\ntype QueryPolicyMetadataResponse struct {\n\tReturnval []ProfilePolicyMetadata `xml:\"returnval,omitempty\"`\n}\n\ntype QueryProfileStructure QueryProfileStructureRequestType\n\nfunc init() {\n\tt[\"QueryProfileStructure\"] = reflect.TypeOf((*QueryProfileStructure)(nil)).Elem()\n}\n\ntype QueryProfileStructureRequestType struct {\n\tThis    ManagedObjectReference  `xml:\"_this\"`\n\tProfile *ManagedObjectReference `xml:\"profile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryProfileStructureRequestType\"] = reflect.TypeOf((*QueryProfileStructureRequestType)(nil)).Elem()\n}\n\ntype QueryProfileStructureResponse struct {\n\tReturnval ProfileProfileStructure `xml:\"returnval\"`\n}\n\ntype QueryProviderList QueryProviderListRequestType\n\nfunc init() {\n\tt[\"QueryProviderList\"] = reflect.TypeOf((*QueryProviderList)(nil)).Elem()\n}\n\ntype QueryProviderListRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryProviderListRequestType\"] = reflect.TypeOf((*QueryProviderListRequestType)(nil)).Elem()\n}\n\ntype QueryProviderListResponse struct {\n\tReturnval []string `xml:\"returnval,omitempty\"`\n}\n\ntype QueryProviderName QueryProviderNameRequestType\n\nfunc init() {\n\tt[\"QueryProviderName\"] = reflect.TypeOf((*QueryProviderName)(nil)).Elem()\n}\n\ntype QueryProviderNameRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tId   string                 `xml:\"id\"`\n}\n\nfunc init() {\n\tt[\"QueryProviderNameRequestType\"] = reflect.TypeOf((*QueryProviderNameRequestType)(nil)).Elem()\n}\n\ntype QueryProviderNameResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype QueryResourceConfigOption QueryResourceConfigOptionRequestType\n\nfunc init() {\n\tt[\"QueryResourceConfigOption\"] = reflect.TypeOf((*QueryResourceConfigOption)(nil)).Elem()\n}\n\ntype QueryResourceConfigOptionRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryResourceConfigOptionRequestType\"] = reflect.TypeOf((*QueryResourceConfigOptionRequestType)(nil)).Elem()\n}\n\ntype QueryResourceConfigOptionResponse struct {\n\tReturnval ResourceConfigOption `xml:\"returnval\"`\n}\n\ntype QueryServiceList QueryServiceListRequestType\n\nfunc init() {\n\tt[\"QueryServiceList\"] = reflect.TypeOf((*QueryServiceList)(nil)).Elem()\n}\n\ntype QueryServiceListRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tServiceName string                 `xml:\"serviceName,omitempty\"`\n\tLocation    []string               `xml:\"location,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryServiceListRequestType\"] = reflect.TypeOf((*QueryServiceListRequestType)(nil)).Elem()\n}\n\ntype QueryServiceListResponse struct {\n\tReturnval []ServiceManagerServiceInfo `xml:\"returnval,omitempty\"`\n}\n\ntype QueryStorageArrayTypePolicyOptions QueryStorageArrayTypePolicyOptionsRequestType\n\nfunc init() {\n\tt[\"QueryStorageArrayTypePolicyOptions\"] = reflect.TypeOf((*QueryStorageArrayTypePolicyOptions)(nil)).Elem()\n}\n\ntype QueryStorageArrayTypePolicyOptionsRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryStorageArrayTypePolicyOptionsRequestType\"] = reflect.TypeOf((*QueryStorageArrayTypePolicyOptionsRequestType)(nil)).Elem()\n}\n\ntype QueryStorageArrayTypePolicyOptionsResponse struct {\n\tReturnval []HostStorageArrayTypePolicyOption `xml:\"returnval,omitempty\"`\n}\n\ntype QuerySupportedFeatures QuerySupportedFeaturesRequestType\n\nfunc init() {\n\tt[\"QuerySupportedFeatures\"] = reflect.TypeOf((*QuerySupportedFeatures)(nil)).Elem()\n}\n\ntype QuerySupportedFeaturesRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tHost *ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QuerySupportedFeaturesRequestType\"] = reflect.TypeOf((*QuerySupportedFeaturesRequestType)(nil)).Elem()\n}\n\ntype QuerySupportedFeaturesResponse struct {\n\tReturnval []LicenseFeatureInfo `xml:\"returnval,omitempty\"`\n}\n\ntype QuerySyncingVsanObjects QuerySyncingVsanObjectsRequestType\n\nfunc init() {\n\tt[\"QuerySyncingVsanObjects\"] = reflect.TypeOf((*QuerySyncingVsanObjects)(nil)).Elem()\n}\n\ntype QuerySyncingVsanObjectsRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tUuids []string               `xml:\"uuids,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QuerySyncingVsanObjectsRequestType\"] = reflect.TypeOf((*QuerySyncingVsanObjectsRequestType)(nil)).Elem()\n}\n\ntype QuerySyncingVsanObjectsResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype QuerySystemUsers QuerySystemUsersRequestType\n\nfunc init() {\n\tt[\"QuerySystemUsers\"] = reflect.TypeOf((*QuerySystemUsers)(nil)).Elem()\n}\n\ntype QuerySystemUsersRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QuerySystemUsersRequestType\"] = reflect.TypeOf((*QuerySystemUsersRequestType)(nil)).Elem()\n}\n\ntype QuerySystemUsersResponse struct {\n\tReturnval []string `xml:\"returnval,omitempty\"`\n}\n\ntype QueryTargetCapabilities QueryTargetCapabilitiesRequestType\n\nfunc init() {\n\tt[\"QueryTargetCapabilities\"] = reflect.TypeOf((*QueryTargetCapabilities)(nil)).Elem()\n}\n\ntype QueryTargetCapabilitiesRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tHost *ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryTargetCapabilitiesRequestType\"] = reflect.TypeOf((*QueryTargetCapabilitiesRequestType)(nil)).Elem()\n}\n\ntype QueryTargetCapabilitiesResponse struct {\n\tReturnval *HostCapability `xml:\"returnval,omitempty\"`\n}\n\ntype QueryTpmAttestationReport QueryTpmAttestationReportRequestType\n\nfunc init() {\n\tt[\"QueryTpmAttestationReport\"] = reflect.TypeOf((*QueryTpmAttestationReport)(nil)).Elem()\n}\n\ntype QueryTpmAttestationReportRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryTpmAttestationReportRequestType\"] = reflect.TypeOf((*QueryTpmAttestationReportRequestType)(nil)).Elem()\n}\n\ntype QueryTpmAttestationReportResponse struct {\n\tReturnval *HostTpmAttestationReport `xml:\"returnval,omitempty\"`\n}\n\ntype QueryUnmonitoredHosts QueryUnmonitoredHostsRequestType\n\nfunc init() {\n\tt[\"QueryUnmonitoredHosts\"] = reflect.TypeOf((*QueryUnmonitoredHosts)(nil)).Elem()\n}\n\ntype QueryUnmonitoredHostsRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tProviderId string                 `xml:\"providerId\"`\n\tCluster    ManagedObjectReference `xml:\"cluster\"`\n}\n\nfunc init() {\n\tt[\"QueryUnmonitoredHostsRequestType\"] = reflect.TypeOf((*QueryUnmonitoredHostsRequestType)(nil)).Elem()\n}\n\ntype QueryUnmonitoredHostsResponse struct {\n\tReturnval []ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype QueryUnownedFiles QueryUnownedFilesRequestType\n\nfunc init() {\n\tt[\"QueryUnownedFiles\"] = reflect.TypeOf((*QueryUnownedFiles)(nil)).Elem()\n}\n\ntype QueryUnownedFilesRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryUnownedFilesRequestType\"] = reflect.TypeOf((*QueryUnownedFilesRequestType)(nil)).Elem()\n}\n\ntype QueryUnownedFilesResponse struct {\n\tReturnval []string `xml:\"returnval,omitempty\"`\n}\n\ntype QueryUnresolvedVmfsVolume QueryUnresolvedVmfsVolumeRequestType\n\nfunc init() {\n\tt[\"QueryUnresolvedVmfsVolume\"] = reflect.TypeOf((*QueryUnresolvedVmfsVolume)(nil)).Elem()\n}\n\ntype QueryUnresolvedVmfsVolumeRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryUnresolvedVmfsVolumeRequestType\"] = reflect.TypeOf((*QueryUnresolvedVmfsVolumeRequestType)(nil)).Elem()\n}\n\ntype QueryUnresolvedVmfsVolumeResponse struct {\n\tReturnval []HostUnresolvedVmfsVolume `xml:\"returnval,omitempty\"`\n}\n\ntype QueryUnresolvedVmfsVolumes QueryUnresolvedVmfsVolumesRequestType\n\nfunc init() {\n\tt[\"QueryUnresolvedVmfsVolumes\"] = reflect.TypeOf((*QueryUnresolvedVmfsVolumes)(nil)).Elem()\n}\n\ntype QueryUnresolvedVmfsVolumesRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryUnresolvedVmfsVolumesRequestType\"] = reflect.TypeOf((*QueryUnresolvedVmfsVolumesRequestType)(nil)).Elem()\n}\n\ntype QueryUnresolvedVmfsVolumesResponse struct {\n\tReturnval []HostUnresolvedVmfsVolume `xml:\"returnval,omitempty\"`\n}\n\ntype QueryUsedVlanIdInDvs QueryUsedVlanIdInDvsRequestType\n\nfunc init() {\n\tt[\"QueryUsedVlanIdInDvs\"] = reflect.TypeOf((*QueryUsedVlanIdInDvs)(nil)).Elem()\n}\n\ntype QueryUsedVlanIdInDvsRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryUsedVlanIdInDvsRequestType\"] = reflect.TypeOf((*QueryUsedVlanIdInDvsRequestType)(nil)).Elem()\n}\n\ntype QueryUsedVlanIdInDvsResponse struct {\n\tReturnval []int32 `xml:\"returnval,omitempty\"`\n}\n\ntype QueryVMotionCompatibility QueryVMotionCompatibilityRequestType\n\nfunc init() {\n\tt[\"QueryVMotionCompatibility\"] = reflect.TypeOf((*QueryVMotionCompatibility)(nil)).Elem()\n}\n\ntype QueryVMotionCompatibilityExRequestType struct {\n\tThis ManagedObjectReference   `xml:\"_this\"`\n\tVm   []ManagedObjectReference `xml:\"vm\"`\n\tHost []ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"QueryVMotionCompatibilityExRequestType\"] = reflect.TypeOf((*QueryVMotionCompatibilityExRequestType)(nil)).Elem()\n}\n\ntype QueryVMotionCompatibilityEx_Task QueryVMotionCompatibilityExRequestType\n\nfunc init() {\n\tt[\"QueryVMotionCompatibilityEx_Task\"] = reflect.TypeOf((*QueryVMotionCompatibilityEx_Task)(nil)).Elem()\n}\n\ntype QueryVMotionCompatibilityEx_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype QueryVMotionCompatibilityRequestType struct {\n\tThis          ManagedObjectReference   `xml:\"_this\"`\n\tVm            ManagedObjectReference   `xml:\"vm\"`\n\tHost          []ManagedObjectReference `xml:\"host\"`\n\tCompatibility []string                 `xml:\"compatibility,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryVMotionCompatibilityRequestType\"] = reflect.TypeOf((*QueryVMotionCompatibilityRequestType)(nil)).Elem()\n}\n\ntype QueryVMotionCompatibilityResponse struct {\n\tReturnval []HostVMotionCompatibility `xml:\"returnval,omitempty\"`\n}\n\ntype QueryVirtualDiskFragmentation QueryVirtualDiskFragmentationRequestType\n\nfunc init() {\n\tt[\"QueryVirtualDiskFragmentation\"] = reflect.TypeOf((*QueryVirtualDiskFragmentation)(nil)).Elem()\n}\n\ntype QueryVirtualDiskFragmentationRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tName       string                  `xml:\"name\"`\n\tDatacenter *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryVirtualDiskFragmentationRequestType\"] = reflect.TypeOf((*QueryVirtualDiskFragmentationRequestType)(nil)).Elem()\n}\n\ntype QueryVirtualDiskFragmentationResponse struct {\n\tReturnval int32 `xml:\"returnval\"`\n}\n\ntype QueryVirtualDiskGeometry QueryVirtualDiskGeometryRequestType\n\nfunc init() {\n\tt[\"QueryVirtualDiskGeometry\"] = reflect.TypeOf((*QueryVirtualDiskGeometry)(nil)).Elem()\n}\n\ntype QueryVirtualDiskGeometryRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tName       string                  `xml:\"name\"`\n\tDatacenter *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryVirtualDiskGeometryRequestType\"] = reflect.TypeOf((*QueryVirtualDiskGeometryRequestType)(nil)).Elem()\n}\n\ntype QueryVirtualDiskGeometryResponse struct {\n\tReturnval HostDiskDimensionsChs `xml:\"returnval\"`\n}\n\ntype QueryVirtualDiskUuid QueryVirtualDiskUuidRequestType\n\nfunc init() {\n\tt[\"QueryVirtualDiskUuid\"] = reflect.TypeOf((*QueryVirtualDiskUuid)(nil)).Elem()\n}\n\ntype QueryVirtualDiskUuidRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tName       string                  `xml:\"name\"`\n\tDatacenter *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryVirtualDiskUuidRequestType\"] = reflect.TypeOf((*QueryVirtualDiskUuidRequestType)(nil)).Elem()\n}\n\ntype QueryVirtualDiskUuidResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype QueryVmfsConfigOption QueryVmfsConfigOptionRequestType\n\nfunc init() {\n\tt[\"QueryVmfsConfigOption\"] = reflect.TypeOf((*QueryVmfsConfigOption)(nil)).Elem()\n}\n\ntype QueryVmfsConfigOptionRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"QueryVmfsConfigOptionRequestType\"] = reflect.TypeOf((*QueryVmfsConfigOptionRequestType)(nil)).Elem()\n}\n\ntype QueryVmfsConfigOptionResponse struct {\n\tReturnval []VmfsConfigOption `xml:\"returnval,omitempty\"`\n}\n\ntype QueryVmfsDatastoreCreateOptions QueryVmfsDatastoreCreateOptionsRequestType\n\nfunc init() {\n\tt[\"QueryVmfsDatastoreCreateOptions\"] = reflect.TypeOf((*QueryVmfsDatastoreCreateOptions)(nil)).Elem()\n}\n\ntype QueryVmfsDatastoreCreateOptionsRequestType struct {\n\tThis             ManagedObjectReference `xml:\"_this\"`\n\tDevicePath       string                 `xml:\"devicePath\"`\n\tVmfsMajorVersion int32                  `xml:\"vmfsMajorVersion,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryVmfsDatastoreCreateOptionsRequestType\"] = reflect.TypeOf((*QueryVmfsDatastoreCreateOptionsRequestType)(nil)).Elem()\n}\n\ntype QueryVmfsDatastoreCreateOptionsResponse struct {\n\tReturnval []VmfsDatastoreOption `xml:\"returnval,omitempty\"`\n}\n\ntype QueryVmfsDatastoreExpandOptions QueryVmfsDatastoreExpandOptionsRequestType\n\nfunc init() {\n\tt[\"QueryVmfsDatastoreExpandOptions\"] = reflect.TypeOf((*QueryVmfsDatastoreExpandOptions)(nil)).Elem()\n}\n\ntype QueryVmfsDatastoreExpandOptionsRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"QueryVmfsDatastoreExpandOptionsRequestType\"] = reflect.TypeOf((*QueryVmfsDatastoreExpandOptionsRequestType)(nil)).Elem()\n}\n\ntype QueryVmfsDatastoreExpandOptionsResponse struct {\n\tReturnval []VmfsDatastoreOption `xml:\"returnval,omitempty\"`\n}\n\ntype QueryVmfsDatastoreExtendOptions QueryVmfsDatastoreExtendOptionsRequestType\n\nfunc init() {\n\tt[\"QueryVmfsDatastoreExtendOptions\"] = reflect.TypeOf((*QueryVmfsDatastoreExtendOptions)(nil)).Elem()\n}\n\ntype QueryVmfsDatastoreExtendOptionsRequestType struct {\n\tThis                     ManagedObjectReference `xml:\"_this\"`\n\tDatastore                ManagedObjectReference `xml:\"datastore\"`\n\tDevicePath               string                 `xml:\"devicePath\"`\n\tSuppressExpandCandidates *bool                  `xml:\"suppressExpandCandidates\"`\n}\n\nfunc init() {\n\tt[\"QueryVmfsDatastoreExtendOptionsRequestType\"] = reflect.TypeOf((*QueryVmfsDatastoreExtendOptionsRequestType)(nil)).Elem()\n}\n\ntype QueryVmfsDatastoreExtendOptionsResponse struct {\n\tReturnval []VmfsDatastoreOption `xml:\"returnval,omitempty\"`\n}\n\ntype QueryVnicStatus QueryVnicStatusRequestType\n\nfunc init() {\n\tt[\"QueryVnicStatus\"] = reflect.TypeOf((*QueryVnicStatus)(nil)).Elem()\n}\n\ntype QueryVnicStatusRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tVnicDevice string                 `xml:\"vnicDevice\"`\n}\n\nfunc init() {\n\tt[\"QueryVnicStatusRequestType\"] = reflect.TypeOf((*QueryVnicStatusRequestType)(nil)).Elem()\n}\n\ntype QueryVnicStatusResponse struct {\n\tReturnval IscsiStatus `xml:\"returnval\"`\n}\n\ntype QueryVsanObjectUuidsByFilter QueryVsanObjectUuidsByFilterRequestType\n\nfunc init() {\n\tt[\"QueryVsanObjectUuidsByFilter\"] = reflect.TypeOf((*QueryVsanObjectUuidsByFilter)(nil)).Elem()\n}\n\ntype QueryVsanObjectUuidsByFilterRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tUuids   []string               `xml:\"uuids,omitempty\"`\n\tLimit   int32                  `xml:\"limit,omitempty\"`\n\tVersion int32                  `xml:\"version,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryVsanObjectUuidsByFilterRequestType\"] = reflect.TypeOf((*QueryVsanObjectUuidsByFilterRequestType)(nil)).Elem()\n}\n\ntype QueryVsanObjectUuidsByFilterResponse struct {\n\tReturnval []string `xml:\"returnval,omitempty\"`\n}\n\ntype QueryVsanObjects QueryVsanObjectsRequestType\n\nfunc init() {\n\tt[\"QueryVsanObjects\"] = reflect.TypeOf((*QueryVsanObjects)(nil)).Elem()\n}\n\ntype QueryVsanObjectsRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tUuids []string               `xml:\"uuids,omitempty\"`\n}\n\nfunc init() {\n\tt[\"QueryVsanObjectsRequestType\"] = reflect.TypeOf((*QueryVsanObjectsRequestType)(nil)).Elem()\n}\n\ntype QueryVsanObjectsResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype QueryVsanStatistics QueryVsanStatisticsRequestType\n\nfunc init() {\n\tt[\"QueryVsanStatistics\"] = reflect.TypeOf((*QueryVsanStatistics)(nil)).Elem()\n}\n\ntype QueryVsanStatisticsRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tLabels []string               `xml:\"labels\"`\n}\n\nfunc init() {\n\tt[\"QueryVsanStatisticsRequestType\"] = reflect.TypeOf((*QueryVsanStatisticsRequestType)(nil)).Elem()\n}\n\ntype QueryVsanStatisticsResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype QueryVsanUpgradeStatus QueryVsanUpgradeStatusRequestType\n\nfunc init() {\n\tt[\"QueryVsanUpgradeStatus\"] = reflect.TypeOf((*QueryVsanUpgradeStatus)(nil)).Elem()\n}\n\ntype QueryVsanUpgradeStatusRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tCluster ManagedObjectReference `xml:\"cluster\"`\n}\n\nfunc init() {\n\tt[\"QueryVsanUpgradeStatusRequestType\"] = reflect.TypeOf((*QueryVsanUpgradeStatusRequestType)(nil)).Elem()\n}\n\ntype QueryVsanUpgradeStatusResponse struct {\n\tReturnval VsanUpgradeSystemUpgradeStatus `xml:\"returnval\"`\n}\n\ntype QuestionPending struct {\n\tInvalidState\n\n\tText string `xml:\"text\"`\n}\n\nfunc init() {\n\tt[\"QuestionPending\"] = reflect.TypeOf((*QuestionPending)(nil)).Elem()\n}\n\ntype QuestionPendingFault QuestionPending\n\nfunc init() {\n\tt[\"QuestionPendingFault\"] = reflect.TypeOf((*QuestionPendingFault)(nil)).Elem()\n}\n\ntype QuiesceDatastoreIOForHAFailed struct {\n\tResourceInUse\n\n\tHost     ManagedObjectReference `xml:\"host\"`\n\tHostName string                 `xml:\"hostName\"`\n\tDs       ManagedObjectReference `xml:\"ds\"`\n\tDsName   string                 `xml:\"dsName\"`\n}\n\nfunc init() {\n\tt[\"QuiesceDatastoreIOForHAFailed\"] = reflect.TypeOf((*QuiesceDatastoreIOForHAFailed)(nil)).Elem()\n}\n\ntype QuiesceDatastoreIOForHAFailedFault QuiesceDatastoreIOForHAFailed\n\nfunc init() {\n\tt[\"QuiesceDatastoreIOForHAFailedFault\"] = reflect.TypeOf((*QuiesceDatastoreIOForHAFailedFault)(nil)).Elem()\n}\n\ntype RDMConversionNotSupported struct {\n\tMigrationFault\n\n\tDevice string `xml:\"device\"`\n}\n\nfunc init() {\n\tt[\"RDMConversionNotSupported\"] = reflect.TypeOf((*RDMConversionNotSupported)(nil)).Elem()\n}\n\ntype RDMConversionNotSupportedFault RDMConversionNotSupported\n\nfunc init() {\n\tt[\"RDMConversionNotSupportedFault\"] = reflect.TypeOf((*RDMConversionNotSupportedFault)(nil)).Elem()\n}\n\ntype RDMNotPreserved struct {\n\tMigrationFault\n\n\tDevice string `xml:\"device\"`\n}\n\nfunc init() {\n\tt[\"RDMNotPreserved\"] = reflect.TypeOf((*RDMNotPreserved)(nil)).Elem()\n}\n\ntype RDMNotPreservedFault RDMNotPreserved\n\nfunc init() {\n\tt[\"RDMNotPreservedFault\"] = reflect.TypeOf((*RDMNotPreservedFault)(nil)).Elem()\n}\n\ntype RDMNotSupported struct {\n\tDeviceNotSupported\n}\n\nfunc init() {\n\tt[\"RDMNotSupported\"] = reflect.TypeOf((*RDMNotSupported)(nil)).Elem()\n}\n\ntype RDMNotSupportedFault BaseRDMNotSupported\n\nfunc init() {\n\tt[\"RDMNotSupportedFault\"] = reflect.TypeOf((*RDMNotSupportedFault)(nil)).Elem()\n}\n\ntype RDMNotSupportedOnDatastore struct {\n\tVmConfigFault\n\n\tDevice        string                 `xml:\"device\"`\n\tDatastore     ManagedObjectReference `xml:\"datastore\"`\n\tDatastoreName string                 `xml:\"datastoreName\"`\n}\n\nfunc init() {\n\tt[\"RDMNotSupportedOnDatastore\"] = reflect.TypeOf((*RDMNotSupportedOnDatastore)(nil)).Elem()\n}\n\ntype RDMNotSupportedOnDatastoreFault RDMNotSupportedOnDatastore\n\nfunc init() {\n\tt[\"RDMNotSupportedOnDatastoreFault\"] = reflect.TypeOf((*RDMNotSupportedOnDatastoreFault)(nil)).Elem()\n}\n\ntype RDMPointsToInaccessibleDisk struct {\n\tCannotAccessVmDisk\n}\n\nfunc init() {\n\tt[\"RDMPointsToInaccessibleDisk\"] = reflect.TypeOf((*RDMPointsToInaccessibleDisk)(nil)).Elem()\n}\n\ntype RDMPointsToInaccessibleDiskFault RDMPointsToInaccessibleDisk\n\nfunc init() {\n\tt[\"RDMPointsToInaccessibleDiskFault\"] = reflect.TypeOf((*RDMPointsToInaccessibleDiskFault)(nil)).Elem()\n}\n\ntype RawDiskNotSupported struct {\n\tDeviceNotSupported\n}\n\nfunc init() {\n\tt[\"RawDiskNotSupported\"] = reflect.TypeOf((*RawDiskNotSupported)(nil)).Elem()\n}\n\ntype RawDiskNotSupportedFault RawDiskNotSupported\n\nfunc init() {\n\tt[\"RawDiskNotSupportedFault\"] = reflect.TypeOf((*RawDiskNotSupportedFault)(nil)).Elem()\n}\n\ntype ReadEnvironmentVariableInGuest ReadEnvironmentVariableInGuestRequestType\n\nfunc init() {\n\tt[\"ReadEnvironmentVariableInGuest\"] = reflect.TypeOf((*ReadEnvironmentVariableInGuest)(nil)).Elem()\n}\n\ntype ReadEnvironmentVariableInGuestRequestType struct {\n\tThis  ManagedObjectReference  `xml:\"_this\"`\n\tVm    ManagedObjectReference  `xml:\"vm\"`\n\tAuth  BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tNames []string                `xml:\"names,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ReadEnvironmentVariableInGuestRequestType\"] = reflect.TypeOf((*ReadEnvironmentVariableInGuestRequestType)(nil)).Elem()\n}\n\ntype ReadEnvironmentVariableInGuestResponse struct {\n\tReturnval []string `xml:\"returnval,omitempty\"`\n}\n\ntype ReadHostResourcePoolTreeFailed struct {\n\tHostConnectFault\n}\n\nfunc init() {\n\tt[\"ReadHostResourcePoolTreeFailed\"] = reflect.TypeOf((*ReadHostResourcePoolTreeFailed)(nil)).Elem()\n}\n\ntype ReadHostResourcePoolTreeFailedFault ReadHostResourcePoolTreeFailed\n\nfunc init() {\n\tt[\"ReadHostResourcePoolTreeFailedFault\"] = reflect.TypeOf((*ReadHostResourcePoolTreeFailedFault)(nil)).Elem()\n}\n\ntype ReadNextEvents ReadNextEventsRequestType\n\nfunc init() {\n\tt[\"ReadNextEvents\"] = reflect.TypeOf((*ReadNextEvents)(nil)).Elem()\n}\n\ntype ReadNextEventsRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tMaxCount int32                  `xml:\"maxCount\"`\n}\n\nfunc init() {\n\tt[\"ReadNextEventsRequestType\"] = reflect.TypeOf((*ReadNextEventsRequestType)(nil)).Elem()\n}\n\ntype ReadNextEventsResponse struct {\n\tReturnval []BaseEvent `xml:\"returnval,omitempty,typeattr\"`\n}\n\ntype ReadNextTasks ReadNextTasksRequestType\n\nfunc init() {\n\tt[\"ReadNextTasks\"] = reflect.TypeOf((*ReadNextTasks)(nil)).Elem()\n}\n\ntype ReadNextTasksRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tMaxCount int32                  `xml:\"maxCount\"`\n}\n\nfunc init() {\n\tt[\"ReadNextTasksRequestType\"] = reflect.TypeOf((*ReadNextTasksRequestType)(nil)).Elem()\n}\n\ntype ReadNextTasksResponse struct {\n\tReturnval []TaskInfo `xml:\"returnval,omitempty\"`\n}\n\ntype ReadOnlyDisksWithLegacyDestination struct {\n\tMigrationFault\n\n\tRoDiskCount   int32 `xml:\"roDiskCount\"`\n\tTimeoutDanger bool  `xml:\"timeoutDanger\"`\n}\n\nfunc init() {\n\tt[\"ReadOnlyDisksWithLegacyDestination\"] = reflect.TypeOf((*ReadOnlyDisksWithLegacyDestination)(nil)).Elem()\n}\n\ntype ReadOnlyDisksWithLegacyDestinationFault ReadOnlyDisksWithLegacyDestination\n\nfunc init() {\n\tt[\"ReadOnlyDisksWithLegacyDestinationFault\"] = reflect.TypeOf((*ReadOnlyDisksWithLegacyDestinationFault)(nil)).Elem()\n}\n\ntype ReadPreviousEvents ReadPreviousEventsRequestType\n\nfunc init() {\n\tt[\"ReadPreviousEvents\"] = reflect.TypeOf((*ReadPreviousEvents)(nil)).Elem()\n}\n\ntype ReadPreviousEventsRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tMaxCount int32                  `xml:\"maxCount\"`\n}\n\nfunc init() {\n\tt[\"ReadPreviousEventsRequestType\"] = reflect.TypeOf((*ReadPreviousEventsRequestType)(nil)).Elem()\n}\n\ntype ReadPreviousEventsResponse struct {\n\tReturnval []BaseEvent `xml:\"returnval,omitempty,typeattr\"`\n}\n\ntype ReadPreviousTasks ReadPreviousTasksRequestType\n\nfunc init() {\n\tt[\"ReadPreviousTasks\"] = reflect.TypeOf((*ReadPreviousTasks)(nil)).Elem()\n}\n\ntype ReadPreviousTasksRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tMaxCount int32                  `xml:\"maxCount\"`\n}\n\nfunc init() {\n\tt[\"ReadPreviousTasksRequestType\"] = reflect.TypeOf((*ReadPreviousTasksRequestType)(nil)).Elem()\n}\n\ntype ReadPreviousTasksResponse struct {\n\tReturnval []TaskInfo `xml:\"returnval,omitempty\"`\n}\n\ntype RebootGuest RebootGuestRequestType\n\nfunc init() {\n\tt[\"RebootGuest\"] = reflect.TypeOf((*RebootGuest)(nil)).Elem()\n}\n\ntype RebootGuestRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RebootGuestRequestType\"] = reflect.TypeOf((*RebootGuestRequestType)(nil)).Elem()\n}\n\ntype RebootGuestResponse struct {\n}\n\ntype RebootHostRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tForce bool                   `xml:\"force\"`\n}\n\nfunc init() {\n\tt[\"RebootHostRequestType\"] = reflect.TypeOf((*RebootHostRequestType)(nil)).Elem()\n}\n\ntype RebootHost_Task RebootHostRequestType\n\nfunc init() {\n\tt[\"RebootHost_Task\"] = reflect.TypeOf((*RebootHost_Task)(nil)).Elem()\n}\n\ntype RebootHost_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype RebootRequired struct {\n\tVimFault\n\n\tPatch string `xml:\"patch,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RebootRequired\"] = reflect.TypeOf((*RebootRequired)(nil)).Elem()\n}\n\ntype RebootRequiredFault RebootRequired\n\nfunc init() {\n\tt[\"RebootRequiredFault\"] = reflect.TypeOf((*RebootRequiredFault)(nil)).Elem()\n}\n\ntype RecommendDatastores RecommendDatastoresRequestType\n\nfunc init() {\n\tt[\"RecommendDatastores\"] = reflect.TypeOf((*RecommendDatastores)(nil)).Elem()\n}\n\ntype RecommendDatastoresRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tStorageSpec StoragePlacementSpec   `xml:\"storageSpec\"`\n}\n\nfunc init() {\n\tt[\"RecommendDatastoresRequestType\"] = reflect.TypeOf((*RecommendDatastoresRequestType)(nil)).Elem()\n}\n\ntype RecommendDatastoresResponse struct {\n\tReturnval StoragePlacementResult `xml:\"returnval\"`\n}\n\ntype RecommendHostsForVm RecommendHostsForVmRequestType\n\nfunc init() {\n\tt[\"RecommendHostsForVm\"] = reflect.TypeOf((*RecommendHostsForVm)(nil)).Elem()\n}\n\ntype RecommendHostsForVmRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tVm   ManagedObjectReference  `xml:\"vm\"`\n\tPool *ManagedObjectReference `xml:\"pool,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RecommendHostsForVmRequestType\"] = reflect.TypeOf((*RecommendHostsForVmRequestType)(nil)).Elem()\n}\n\ntype RecommendHostsForVmResponse struct {\n\tReturnval []ClusterHostRecommendation `xml:\"returnval,omitempty\"`\n}\n\ntype RecommissionVsanNodeRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RecommissionVsanNodeRequestType\"] = reflect.TypeOf((*RecommissionVsanNodeRequestType)(nil)).Elem()\n}\n\ntype RecommissionVsanNode_Task RecommissionVsanNodeRequestType\n\nfunc init() {\n\tt[\"RecommissionVsanNode_Task\"] = reflect.TypeOf((*RecommissionVsanNode_Task)(nil)).Elem()\n}\n\ntype RecommissionVsanNode_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ReconcileDatastoreInventoryRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"ReconcileDatastoreInventoryRequestType\"] = reflect.TypeOf((*ReconcileDatastoreInventoryRequestType)(nil)).Elem()\n}\n\ntype ReconcileDatastoreInventory_Task ReconcileDatastoreInventoryRequestType\n\nfunc init() {\n\tt[\"ReconcileDatastoreInventory_Task\"] = reflect.TypeOf((*ReconcileDatastoreInventory_Task)(nil)).Elem()\n}\n\ntype ReconcileDatastoreInventory_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ReconfigVMRequestType struct {\n\tThis ManagedObjectReference   `xml:\"_this\"`\n\tSpec VirtualMachineConfigSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"ReconfigVMRequestType\"] = reflect.TypeOf((*ReconfigVMRequestType)(nil)).Elem()\n}\n\ntype ReconfigVM_Task ReconfigVMRequestType\n\nfunc init() {\n\tt[\"ReconfigVM_Task\"] = reflect.TypeOf((*ReconfigVM_Task)(nil)).Elem()\n}\n\ntype ReconfigVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ReconfigurationSatisfiable ReconfigurationSatisfiableRequestType\n\nfunc init() {\n\tt[\"ReconfigurationSatisfiable\"] = reflect.TypeOf((*ReconfigurationSatisfiable)(nil)).Elem()\n}\n\ntype ReconfigurationSatisfiableRequestType struct {\n\tThis                 ManagedObjectReference  `xml:\"_this\"`\n\tPcbs                 []VsanPolicyChangeBatch `xml:\"pcbs\"`\n\tIgnoreSatisfiability *bool                   `xml:\"ignoreSatisfiability\"`\n}\n\nfunc init() {\n\tt[\"ReconfigurationSatisfiableRequestType\"] = reflect.TypeOf((*ReconfigurationSatisfiableRequestType)(nil)).Elem()\n}\n\ntype ReconfigurationSatisfiableResponse struct {\n\tReturnval []VsanPolicySatisfiability `xml:\"returnval\"`\n}\n\ntype ReconfigureAlarm ReconfigureAlarmRequestType\n\nfunc init() {\n\tt[\"ReconfigureAlarm\"] = reflect.TypeOf((*ReconfigureAlarm)(nil)).Elem()\n}\n\ntype ReconfigureAlarmRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tSpec BaseAlarmSpec          `xml:\"spec,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ReconfigureAlarmRequestType\"] = reflect.TypeOf((*ReconfigureAlarmRequestType)(nil)).Elem()\n}\n\ntype ReconfigureAlarmResponse struct {\n}\n\ntype ReconfigureAutostart ReconfigureAutostartRequestType\n\nfunc init() {\n\tt[\"ReconfigureAutostart\"] = reflect.TypeOf((*ReconfigureAutostart)(nil)).Elem()\n}\n\ntype ReconfigureAutostartRequestType struct {\n\tThis ManagedObjectReference     `xml:\"_this\"`\n\tSpec HostAutoStartManagerConfig `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"ReconfigureAutostartRequestType\"] = reflect.TypeOf((*ReconfigureAutostartRequestType)(nil)).Elem()\n}\n\ntype ReconfigureAutostartResponse struct {\n}\n\ntype ReconfigureClusterRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tSpec   ClusterConfigSpec      `xml:\"spec\"`\n\tModify bool                   `xml:\"modify\"`\n}\n\nfunc init() {\n\tt[\"ReconfigureClusterRequestType\"] = reflect.TypeOf((*ReconfigureClusterRequestType)(nil)).Elem()\n}\n\ntype ReconfigureCluster_Task ReconfigureClusterRequestType\n\nfunc init() {\n\tt[\"ReconfigureCluster_Task\"] = reflect.TypeOf((*ReconfigureCluster_Task)(nil)).Elem()\n}\n\ntype ReconfigureCluster_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ReconfigureComputeResourceRequestType struct {\n\tThis   ManagedObjectReference        `xml:\"_this\"`\n\tSpec   BaseComputeResourceConfigSpec `xml:\"spec,typeattr\"`\n\tModify bool                          `xml:\"modify\"`\n}\n\nfunc init() {\n\tt[\"ReconfigureComputeResourceRequestType\"] = reflect.TypeOf((*ReconfigureComputeResourceRequestType)(nil)).Elem()\n}\n\ntype ReconfigureComputeResource_Task ReconfigureComputeResourceRequestType\n\nfunc init() {\n\tt[\"ReconfigureComputeResource_Task\"] = reflect.TypeOf((*ReconfigureComputeResource_Task)(nil)).Elem()\n}\n\ntype ReconfigureComputeResource_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ReconfigureDVPortRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tPort []DVPortConfigSpec     `xml:\"port\"`\n}\n\nfunc init() {\n\tt[\"ReconfigureDVPortRequestType\"] = reflect.TypeOf((*ReconfigureDVPortRequestType)(nil)).Elem()\n}\n\ntype ReconfigureDVPort_Task ReconfigureDVPortRequestType\n\nfunc init() {\n\tt[\"ReconfigureDVPort_Task\"] = reflect.TypeOf((*ReconfigureDVPort_Task)(nil)).Elem()\n}\n\ntype ReconfigureDVPort_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ReconfigureDVPortgroupRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tSpec DVPortgroupConfigSpec  `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"ReconfigureDVPortgroupRequestType\"] = reflect.TypeOf((*ReconfigureDVPortgroupRequestType)(nil)).Elem()\n}\n\ntype ReconfigureDVPortgroup_Task ReconfigureDVPortgroupRequestType\n\nfunc init() {\n\tt[\"ReconfigureDVPortgroup_Task\"] = reflect.TypeOf((*ReconfigureDVPortgroup_Task)(nil)).Elem()\n}\n\ntype ReconfigureDVPortgroup_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ReconfigureDatacenterRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tSpec   DatacenterConfigSpec   `xml:\"spec\"`\n\tModify bool                   `xml:\"modify\"`\n}\n\nfunc init() {\n\tt[\"ReconfigureDatacenterRequestType\"] = reflect.TypeOf((*ReconfigureDatacenterRequestType)(nil)).Elem()\n}\n\ntype ReconfigureDatacenter_Task ReconfigureDatacenterRequestType\n\nfunc init() {\n\tt[\"ReconfigureDatacenter_Task\"] = reflect.TypeOf((*ReconfigureDatacenter_Task)(nil)).Elem()\n}\n\ntype ReconfigureDatacenter_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ReconfigureDomObject ReconfigureDomObjectRequestType\n\nfunc init() {\n\tt[\"ReconfigureDomObject\"] = reflect.TypeOf((*ReconfigureDomObject)(nil)).Elem()\n}\n\ntype ReconfigureDomObjectRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tUuid   string                 `xml:\"uuid\"`\n\tPolicy string                 `xml:\"policy\"`\n}\n\nfunc init() {\n\tt[\"ReconfigureDomObjectRequestType\"] = reflect.TypeOf((*ReconfigureDomObjectRequestType)(nil)).Elem()\n}\n\ntype ReconfigureDomObjectResponse struct {\n}\n\ntype ReconfigureDvsRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tSpec BaseDVSConfigSpec      `xml:\"spec,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ReconfigureDvsRequestType\"] = reflect.TypeOf((*ReconfigureDvsRequestType)(nil)).Elem()\n}\n\ntype ReconfigureDvs_Task ReconfigureDvsRequestType\n\nfunc init() {\n\tt[\"ReconfigureDvs_Task\"] = reflect.TypeOf((*ReconfigureDvs_Task)(nil)).Elem()\n}\n\ntype ReconfigureDvs_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ReconfigureHostForDASRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ReconfigureHostForDASRequestType\"] = reflect.TypeOf((*ReconfigureHostForDASRequestType)(nil)).Elem()\n}\n\ntype ReconfigureHostForDAS_Task ReconfigureHostForDASRequestType\n\nfunc init() {\n\tt[\"ReconfigureHostForDAS_Task\"] = reflect.TypeOf((*ReconfigureHostForDAS_Task)(nil)).Elem()\n}\n\ntype ReconfigureHostForDAS_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ReconfigureScheduledTask ReconfigureScheduledTaskRequestType\n\nfunc init() {\n\tt[\"ReconfigureScheduledTask\"] = reflect.TypeOf((*ReconfigureScheduledTask)(nil)).Elem()\n}\n\ntype ReconfigureScheduledTaskRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tSpec BaseScheduledTaskSpec  `xml:\"spec,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ReconfigureScheduledTaskRequestType\"] = reflect.TypeOf((*ReconfigureScheduledTaskRequestType)(nil)).Elem()\n}\n\ntype ReconfigureScheduledTaskResponse struct {\n}\n\ntype ReconfigureServiceConsoleReservation ReconfigureServiceConsoleReservationRequestType\n\nfunc init() {\n\tt[\"ReconfigureServiceConsoleReservation\"] = reflect.TypeOf((*ReconfigureServiceConsoleReservation)(nil)).Elem()\n}\n\ntype ReconfigureServiceConsoleReservationRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tCfgBytes int64                  `xml:\"cfgBytes\"`\n}\n\nfunc init() {\n\tt[\"ReconfigureServiceConsoleReservationRequestType\"] = reflect.TypeOf((*ReconfigureServiceConsoleReservationRequestType)(nil)).Elem()\n}\n\ntype ReconfigureServiceConsoleReservationResponse struct {\n}\n\ntype ReconfigureSnmpAgent ReconfigureSnmpAgentRequestType\n\nfunc init() {\n\tt[\"ReconfigureSnmpAgent\"] = reflect.TypeOf((*ReconfigureSnmpAgent)(nil)).Elem()\n}\n\ntype ReconfigureSnmpAgentRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tSpec HostSnmpConfigSpec     `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"ReconfigureSnmpAgentRequestType\"] = reflect.TypeOf((*ReconfigureSnmpAgentRequestType)(nil)).Elem()\n}\n\ntype ReconfigureSnmpAgentResponse struct {\n}\n\ntype ReconfigureVirtualMachineReservation ReconfigureVirtualMachineReservationRequestType\n\nfunc init() {\n\tt[\"ReconfigureVirtualMachineReservation\"] = reflect.TypeOf((*ReconfigureVirtualMachineReservation)(nil)).Elem()\n}\n\ntype ReconfigureVirtualMachineReservationRequestType struct {\n\tThis ManagedObjectReference              `xml:\"_this\"`\n\tSpec VirtualMachineMemoryReservationSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"ReconfigureVirtualMachineReservationRequestType\"] = reflect.TypeOf((*ReconfigureVirtualMachineReservationRequestType)(nil)).Elem()\n}\n\ntype ReconfigureVirtualMachineReservationResponse struct {\n}\n\ntype ReconnectHostRequestType struct {\n\tThis          ManagedObjectReference   `xml:\"_this\"`\n\tCnxSpec       *HostConnectSpec         `xml:\"cnxSpec,omitempty\"`\n\tReconnectSpec *HostSystemReconnectSpec `xml:\"reconnectSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ReconnectHostRequestType\"] = reflect.TypeOf((*ReconnectHostRequestType)(nil)).Elem()\n}\n\ntype ReconnectHost_Task ReconnectHostRequestType\n\nfunc init() {\n\tt[\"ReconnectHost_Task\"] = reflect.TypeOf((*ReconnectHost_Task)(nil)).Elem()\n}\n\ntype ReconnectHost_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype RecordReplayDisabled struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"RecordReplayDisabled\"] = reflect.TypeOf((*RecordReplayDisabled)(nil)).Elem()\n}\n\ntype RecordReplayDisabledFault RecordReplayDisabled\n\nfunc init() {\n\tt[\"RecordReplayDisabledFault\"] = reflect.TypeOf((*RecordReplayDisabledFault)(nil)).Elem()\n}\n\ntype RecoveryEvent struct {\n\tDvsEvent\n\n\tHostName string `xml:\"hostName\"`\n\tPortKey  string `xml:\"portKey\"`\n\tDvsUuid  string `xml:\"dvsUuid,omitempty\"`\n\tVnic     string `xml:\"vnic,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RecoveryEvent\"] = reflect.TypeOf((*RecoveryEvent)(nil)).Elem()\n}\n\ntype RectifyDvsHostRequestType struct {\n\tThis  ManagedObjectReference   `xml:\"_this\"`\n\tHosts []ManagedObjectReference `xml:\"hosts,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RectifyDvsHostRequestType\"] = reflect.TypeOf((*RectifyDvsHostRequestType)(nil)).Elem()\n}\n\ntype RectifyDvsHost_Task RectifyDvsHostRequestType\n\nfunc init() {\n\tt[\"RectifyDvsHost_Task\"] = reflect.TypeOf((*RectifyDvsHost_Task)(nil)).Elem()\n}\n\ntype RectifyDvsHost_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype RectifyDvsOnHostRequestType struct {\n\tThis  ManagedObjectReference   `xml:\"_this\"`\n\tHosts []ManagedObjectReference `xml:\"hosts\"`\n}\n\nfunc init() {\n\tt[\"RectifyDvsOnHostRequestType\"] = reflect.TypeOf((*RectifyDvsOnHostRequestType)(nil)).Elem()\n}\n\ntype RectifyDvsOnHost_Task RectifyDvsOnHostRequestType\n\nfunc init() {\n\tt[\"RectifyDvsOnHost_Task\"] = reflect.TypeOf((*RectifyDvsOnHost_Task)(nil)).Elem()\n}\n\ntype RectifyDvsOnHost_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype RecurrentTaskScheduler struct {\n\tTaskScheduler\n\n\tInterval int32 `xml:\"interval\"`\n}\n\nfunc init() {\n\tt[\"RecurrentTaskScheduler\"] = reflect.TypeOf((*RecurrentTaskScheduler)(nil)).Elem()\n}\n\ntype Refresh RefreshRequestType\n\nfunc init() {\n\tt[\"Refresh\"] = reflect.TypeOf((*Refresh)(nil)).Elem()\n}\n\ntype RefreshDVPortState RefreshDVPortStateRequestType\n\nfunc init() {\n\tt[\"RefreshDVPortState\"] = reflect.TypeOf((*RefreshDVPortState)(nil)).Elem()\n}\n\ntype RefreshDVPortStateRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tPortKeys []string               `xml:\"portKeys,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RefreshDVPortStateRequestType\"] = reflect.TypeOf((*RefreshDVPortStateRequestType)(nil)).Elem()\n}\n\ntype RefreshDVPortStateResponse struct {\n}\n\ntype RefreshDatastore RefreshDatastoreRequestType\n\nfunc init() {\n\tt[\"RefreshDatastore\"] = reflect.TypeOf((*RefreshDatastore)(nil)).Elem()\n}\n\ntype RefreshDatastoreRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RefreshDatastoreRequestType\"] = reflect.TypeOf((*RefreshDatastoreRequestType)(nil)).Elem()\n}\n\ntype RefreshDatastoreResponse struct {\n}\n\ntype RefreshDatastoreStorageInfo RefreshDatastoreStorageInfoRequestType\n\nfunc init() {\n\tt[\"RefreshDatastoreStorageInfo\"] = reflect.TypeOf((*RefreshDatastoreStorageInfo)(nil)).Elem()\n}\n\ntype RefreshDatastoreStorageInfoRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RefreshDatastoreStorageInfoRequestType\"] = reflect.TypeOf((*RefreshDatastoreStorageInfoRequestType)(nil)).Elem()\n}\n\ntype RefreshDatastoreStorageInfoResponse struct {\n}\n\ntype RefreshDateTimeSystem RefreshDateTimeSystemRequestType\n\nfunc init() {\n\tt[\"RefreshDateTimeSystem\"] = reflect.TypeOf((*RefreshDateTimeSystem)(nil)).Elem()\n}\n\ntype RefreshDateTimeSystemRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RefreshDateTimeSystemRequestType\"] = reflect.TypeOf((*RefreshDateTimeSystemRequestType)(nil)).Elem()\n}\n\ntype RefreshDateTimeSystemResponse struct {\n}\n\ntype RefreshFirewall RefreshFirewallRequestType\n\nfunc init() {\n\tt[\"RefreshFirewall\"] = reflect.TypeOf((*RefreshFirewall)(nil)).Elem()\n}\n\ntype RefreshFirewallRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RefreshFirewallRequestType\"] = reflect.TypeOf((*RefreshFirewallRequestType)(nil)).Elem()\n}\n\ntype RefreshFirewallResponse struct {\n}\n\ntype RefreshGraphicsManager RefreshGraphicsManagerRequestType\n\nfunc init() {\n\tt[\"RefreshGraphicsManager\"] = reflect.TypeOf((*RefreshGraphicsManager)(nil)).Elem()\n}\n\ntype RefreshGraphicsManagerRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RefreshGraphicsManagerRequestType\"] = reflect.TypeOf((*RefreshGraphicsManagerRequestType)(nil)).Elem()\n}\n\ntype RefreshGraphicsManagerResponse struct {\n}\n\ntype RefreshHealthStatusSystem RefreshHealthStatusSystemRequestType\n\nfunc init() {\n\tt[\"RefreshHealthStatusSystem\"] = reflect.TypeOf((*RefreshHealthStatusSystem)(nil)).Elem()\n}\n\ntype RefreshHealthStatusSystemRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RefreshHealthStatusSystemRequestType\"] = reflect.TypeOf((*RefreshHealthStatusSystemRequestType)(nil)).Elem()\n}\n\ntype RefreshHealthStatusSystemResponse struct {\n}\n\ntype RefreshNetworkSystem RefreshNetworkSystemRequestType\n\nfunc init() {\n\tt[\"RefreshNetworkSystem\"] = reflect.TypeOf((*RefreshNetworkSystem)(nil)).Elem()\n}\n\ntype RefreshNetworkSystemRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RefreshNetworkSystemRequestType\"] = reflect.TypeOf((*RefreshNetworkSystemRequestType)(nil)).Elem()\n}\n\ntype RefreshNetworkSystemResponse struct {\n}\n\ntype RefreshRecommendation RefreshRecommendationRequestType\n\nfunc init() {\n\tt[\"RefreshRecommendation\"] = reflect.TypeOf((*RefreshRecommendation)(nil)).Elem()\n}\n\ntype RefreshRecommendationRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RefreshRecommendationRequestType\"] = reflect.TypeOf((*RefreshRecommendationRequestType)(nil)).Elem()\n}\n\ntype RefreshRecommendationResponse struct {\n}\n\ntype RefreshRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RefreshRequestType\"] = reflect.TypeOf((*RefreshRequestType)(nil)).Elem()\n}\n\ntype RefreshResponse struct {\n}\n\ntype RefreshRuntime RefreshRuntimeRequestType\n\nfunc init() {\n\tt[\"RefreshRuntime\"] = reflect.TypeOf((*RefreshRuntime)(nil)).Elem()\n}\n\ntype RefreshRuntimeRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RefreshRuntimeRequestType\"] = reflect.TypeOf((*RefreshRuntimeRequestType)(nil)).Elem()\n}\n\ntype RefreshRuntimeResponse struct {\n}\n\ntype RefreshServices RefreshServicesRequestType\n\nfunc init() {\n\tt[\"RefreshServices\"] = reflect.TypeOf((*RefreshServices)(nil)).Elem()\n}\n\ntype RefreshServicesRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RefreshServicesRequestType\"] = reflect.TypeOf((*RefreshServicesRequestType)(nil)).Elem()\n}\n\ntype RefreshServicesResponse struct {\n}\n\ntype RefreshStorageDrsRecommendation RefreshStorageDrsRecommendationRequestType\n\nfunc init() {\n\tt[\"RefreshStorageDrsRecommendation\"] = reflect.TypeOf((*RefreshStorageDrsRecommendation)(nil)).Elem()\n}\n\ntype RefreshStorageDrsRecommendationRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tPod  ManagedObjectReference `xml:\"pod\"`\n}\n\nfunc init() {\n\tt[\"RefreshStorageDrsRecommendationRequestType\"] = reflect.TypeOf((*RefreshStorageDrsRecommendationRequestType)(nil)).Elem()\n}\n\ntype RefreshStorageDrsRecommendationResponse struct {\n}\n\ntype RefreshStorageInfo RefreshStorageInfoRequestType\n\nfunc init() {\n\tt[\"RefreshStorageInfo\"] = reflect.TypeOf((*RefreshStorageInfo)(nil)).Elem()\n}\n\ntype RefreshStorageInfoRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RefreshStorageInfoRequestType\"] = reflect.TypeOf((*RefreshStorageInfoRequestType)(nil)).Elem()\n}\n\ntype RefreshStorageInfoResponse struct {\n}\n\ntype RefreshStorageSystem RefreshStorageSystemRequestType\n\nfunc init() {\n\tt[\"RefreshStorageSystem\"] = reflect.TypeOf((*RefreshStorageSystem)(nil)).Elem()\n}\n\ntype RefreshStorageSystemRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RefreshStorageSystemRequestType\"] = reflect.TypeOf((*RefreshStorageSystemRequestType)(nil)).Elem()\n}\n\ntype RefreshStorageSystemResponse struct {\n}\n\ntype RegisterChildVMRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tPath string                  `xml:\"path\"`\n\tName string                  `xml:\"name,omitempty\"`\n\tHost *ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RegisterChildVMRequestType\"] = reflect.TypeOf((*RegisterChildVMRequestType)(nil)).Elem()\n}\n\ntype RegisterChildVM_Task RegisterChildVMRequestType\n\nfunc init() {\n\tt[\"RegisterChildVM_Task\"] = reflect.TypeOf((*RegisterChildVM_Task)(nil)).Elem()\n}\n\ntype RegisterChildVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype RegisterDisk RegisterDiskRequestType\n\nfunc init() {\n\tt[\"RegisterDisk\"] = reflect.TypeOf((*RegisterDisk)(nil)).Elem()\n}\n\ntype RegisterDiskRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tPath string                 `xml:\"path\"`\n\tName string                 `xml:\"name,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RegisterDiskRequestType\"] = reflect.TypeOf((*RegisterDiskRequestType)(nil)).Elem()\n}\n\ntype RegisterDiskResponse struct {\n\tReturnval VStorageObject `xml:\"returnval\"`\n}\n\ntype RegisterExtension RegisterExtensionRequestType\n\nfunc init() {\n\tt[\"RegisterExtension\"] = reflect.TypeOf((*RegisterExtension)(nil)).Elem()\n}\n\ntype RegisterExtensionRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tExtension Extension              `xml:\"extension\"`\n}\n\nfunc init() {\n\tt[\"RegisterExtensionRequestType\"] = reflect.TypeOf((*RegisterExtensionRequestType)(nil)).Elem()\n}\n\ntype RegisterExtensionResponse struct {\n}\n\ntype RegisterHealthUpdateProvider RegisterHealthUpdateProviderRequestType\n\nfunc init() {\n\tt[\"RegisterHealthUpdateProvider\"] = reflect.TypeOf((*RegisterHealthUpdateProvider)(nil)).Elem()\n}\n\ntype RegisterHealthUpdateProviderRequestType struct {\n\tThis             ManagedObjectReference `xml:\"_this\"`\n\tName             string                 `xml:\"name\"`\n\tHealthUpdateInfo []HealthUpdateInfo     `xml:\"healthUpdateInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RegisterHealthUpdateProviderRequestType\"] = reflect.TypeOf((*RegisterHealthUpdateProviderRequestType)(nil)).Elem()\n}\n\ntype RegisterHealthUpdateProviderResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype RegisterKmipServer RegisterKmipServerRequestType\n\nfunc init() {\n\tt[\"RegisterKmipServer\"] = reflect.TypeOf((*RegisterKmipServer)(nil)).Elem()\n}\n\ntype RegisterKmipServerRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tServer KmipServerSpec         `xml:\"server\"`\n}\n\nfunc init() {\n\tt[\"RegisterKmipServerRequestType\"] = reflect.TypeOf((*RegisterKmipServerRequestType)(nil)).Elem()\n}\n\ntype RegisterKmipServerResponse struct {\n}\n\ntype RegisterVMRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tPath       string                  `xml:\"path\"`\n\tName       string                  `xml:\"name,omitempty\"`\n\tAsTemplate bool                    `xml:\"asTemplate\"`\n\tPool       *ManagedObjectReference `xml:\"pool,omitempty\"`\n\tHost       *ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RegisterVMRequestType\"] = reflect.TypeOf((*RegisterVMRequestType)(nil)).Elem()\n}\n\ntype RegisterVM_Task RegisterVMRequestType\n\nfunc init() {\n\tt[\"RegisterVM_Task\"] = reflect.TypeOf((*RegisterVM_Task)(nil)).Elem()\n}\n\ntype RegisterVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype Relation struct {\n\tDynamicData\n\n\tConstraint string `xml:\"constraint,omitempty\"`\n\tName       string `xml:\"name\"`\n\tVersion    string `xml:\"version,omitempty\"`\n}\n\nfunc init() {\n\tt[\"Relation\"] = reflect.TypeOf((*Relation)(nil)).Elem()\n}\n\ntype ReleaseCredentialsInGuest ReleaseCredentialsInGuestRequestType\n\nfunc init() {\n\tt[\"ReleaseCredentialsInGuest\"] = reflect.TypeOf((*ReleaseCredentialsInGuest)(nil)).Elem()\n}\n\ntype ReleaseCredentialsInGuestRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tVm   ManagedObjectReference  `xml:\"vm\"`\n\tAuth BaseGuestAuthentication `xml:\"auth,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ReleaseCredentialsInGuestRequestType\"] = reflect.TypeOf((*ReleaseCredentialsInGuestRequestType)(nil)).Elem()\n}\n\ntype ReleaseCredentialsInGuestResponse struct {\n}\n\ntype ReleaseIpAllocation ReleaseIpAllocationRequestType\n\nfunc init() {\n\tt[\"ReleaseIpAllocation\"] = reflect.TypeOf((*ReleaseIpAllocation)(nil)).Elem()\n}\n\ntype ReleaseIpAllocationRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tDc           ManagedObjectReference `xml:\"dc\"`\n\tPoolId       int32                  `xml:\"poolId\"`\n\tAllocationId string                 `xml:\"allocationId\"`\n}\n\nfunc init() {\n\tt[\"ReleaseIpAllocationRequestType\"] = reflect.TypeOf((*ReleaseIpAllocationRequestType)(nil)).Elem()\n}\n\ntype ReleaseIpAllocationResponse struct {\n}\n\ntype ReleaseManagedSnapshot ReleaseManagedSnapshotRequestType\n\nfunc init() {\n\tt[\"ReleaseManagedSnapshot\"] = reflect.TypeOf((*ReleaseManagedSnapshot)(nil)).Elem()\n}\n\ntype ReleaseManagedSnapshotRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tVdisk      string                  `xml:\"vdisk\"`\n\tDatacenter *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ReleaseManagedSnapshotRequestType\"] = reflect.TypeOf((*ReleaseManagedSnapshotRequestType)(nil)).Elem()\n}\n\ntype ReleaseManagedSnapshotResponse struct {\n}\n\ntype Reload ReloadRequestType\n\nfunc init() {\n\tt[\"Reload\"] = reflect.TypeOf((*Reload)(nil)).Elem()\n}\n\ntype ReloadRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ReloadRequestType\"] = reflect.TypeOf((*ReloadRequestType)(nil)).Elem()\n}\n\ntype ReloadResponse struct {\n}\n\ntype RelocateVMRequestType struct {\n\tThis     ManagedObjectReference     `xml:\"_this\"`\n\tSpec     VirtualMachineRelocateSpec `xml:\"spec\"`\n\tPriority VirtualMachineMovePriority `xml:\"priority,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RelocateVMRequestType\"] = reflect.TypeOf((*RelocateVMRequestType)(nil)).Elem()\n}\n\ntype RelocateVM_Task RelocateVMRequestType\n\nfunc init() {\n\tt[\"RelocateVM_Task\"] = reflect.TypeOf((*RelocateVM_Task)(nil)).Elem()\n}\n\ntype RelocateVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype RelocateVStorageObjectRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tId        ID                     `xml:\"id\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n\tSpec      VslmRelocateSpec       `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"RelocateVStorageObjectRequestType\"] = reflect.TypeOf((*RelocateVStorageObjectRequestType)(nil)).Elem()\n}\n\ntype RelocateVStorageObject_Task RelocateVStorageObjectRequestType\n\nfunc init() {\n\tt[\"RelocateVStorageObject_Task\"] = reflect.TypeOf((*RelocateVStorageObject_Task)(nil)).Elem()\n}\n\ntype RelocateVStorageObject_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype RemoteDeviceNotSupported struct {\n\tDeviceNotSupported\n}\n\nfunc init() {\n\tt[\"RemoteDeviceNotSupported\"] = reflect.TypeOf((*RemoteDeviceNotSupported)(nil)).Elem()\n}\n\ntype RemoteDeviceNotSupportedFault RemoteDeviceNotSupported\n\nfunc init() {\n\tt[\"RemoteDeviceNotSupportedFault\"] = reflect.TypeOf((*RemoteDeviceNotSupportedFault)(nil)).Elem()\n}\n\ntype RemoteTSMEnabledEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"RemoteTSMEnabledEvent\"] = reflect.TypeOf((*RemoteTSMEnabledEvent)(nil)).Elem()\n}\n\ntype RemoveAlarm RemoveAlarmRequestType\n\nfunc init() {\n\tt[\"RemoveAlarm\"] = reflect.TypeOf((*RemoveAlarm)(nil)).Elem()\n}\n\ntype RemoveAlarmRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RemoveAlarmRequestType\"] = reflect.TypeOf((*RemoveAlarmRequestType)(nil)).Elem()\n}\n\ntype RemoveAlarmResponse struct {\n}\n\ntype RemoveAllSnapshotsRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tConsolidate *bool                  `xml:\"consolidate\"`\n}\n\nfunc init() {\n\tt[\"RemoveAllSnapshotsRequestType\"] = reflect.TypeOf((*RemoveAllSnapshotsRequestType)(nil)).Elem()\n}\n\ntype RemoveAllSnapshots_Task RemoveAllSnapshotsRequestType\n\nfunc init() {\n\tt[\"RemoveAllSnapshots_Task\"] = reflect.TypeOf((*RemoveAllSnapshots_Task)(nil)).Elem()\n}\n\ntype RemoveAllSnapshots_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype RemoveAssignedLicense RemoveAssignedLicenseRequestType\n\nfunc init() {\n\tt[\"RemoveAssignedLicense\"] = reflect.TypeOf((*RemoveAssignedLicense)(nil)).Elem()\n}\n\ntype RemoveAssignedLicenseRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tEntityId string                 `xml:\"entityId\"`\n}\n\nfunc init() {\n\tt[\"RemoveAssignedLicenseRequestType\"] = reflect.TypeOf((*RemoveAssignedLicenseRequestType)(nil)).Elem()\n}\n\ntype RemoveAssignedLicenseResponse struct {\n}\n\ntype RemoveAuthorizationRole RemoveAuthorizationRoleRequestType\n\nfunc init() {\n\tt[\"RemoveAuthorizationRole\"] = reflect.TypeOf((*RemoveAuthorizationRole)(nil)).Elem()\n}\n\ntype RemoveAuthorizationRoleRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tRoleId     int32                  `xml:\"roleId\"`\n\tFailIfUsed bool                   `xml:\"failIfUsed\"`\n}\n\nfunc init() {\n\tt[\"RemoveAuthorizationRoleRequestType\"] = reflect.TypeOf((*RemoveAuthorizationRoleRequestType)(nil)).Elem()\n}\n\ntype RemoveAuthorizationRoleResponse struct {\n}\n\ntype RemoveCustomFieldDef RemoveCustomFieldDefRequestType\n\nfunc init() {\n\tt[\"RemoveCustomFieldDef\"] = reflect.TypeOf((*RemoveCustomFieldDef)(nil)).Elem()\n}\n\ntype RemoveCustomFieldDefRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tKey  int32                  `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"RemoveCustomFieldDefRequestType\"] = reflect.TypeOf((*RemoveCustomFieldDefRequestType)(nil)).Elem()\n}\n\ntype RemoveCustomFieldDefResponse struct {\n}\n\ntype RemoveDatastore RemoveDatastoreRequestType\n\nfunc init() {\n\tt[\"RemoveDatastore\"] = reflect.TypeOf((*RemoveDatastore)(nil)).Elem()\n}\n\ntype RemoveDatastoreExRequestType struct {\n\tThis      ManagedObjectReference   `xml:\"_this\"`\n\tDatastore []ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"RemoveDatastoreExRequestType\"] = reflect.TypeOf((*RemoveDatastoreExRequestType)(nil)).Elem()\n}\n\ntype RemoveDatastoreEx_Task RemoveDatastoreExRequestType\n\nfunc init() {\n\tt[\"RemoveDatastoreEx_Task\"] = reflect.TypeOf((*RemoveDatastoreEx_Task)(nil)).Elem()\n}\n\ntype RemoveDatastoreEx_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype RemoveDatastoreRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"RemoveDatastoreRequestType\"] = reflect.TypeOf((*RemoveDatastoreRequestType)(nil)).Elem()\n}\n\ntype RemoveDatastoreResponse struct {\n}\n\ntype RemoveDiskMappingRequestType struct {\n\tThis            ManagedObjectReference `xml:\"_this\"`\n\tMapping         []VsanHostDiskMapping  `xml:\"mapping\"`\n\tMaintenanceSpec *HostMaintenanceSpec   `xml:\"maintenanceSpec,omitempty\"`\n\tTimeout         int32                  `xml:\"timeout,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RemoveDiskMappingRequestType\"] = reflect.TypeOf((*RemoveDiskMappingRequestType)(nil)).Elem()\n}\n\ntype RemoveDiskMapping_Task RemoveDiskMappingRequestType\n\nfunc init() {\n\tt[\"RemoveDiskMapping_Task\"] = reflect.TypeOf((*RemoveDiskMapping_Task)(nil)).Elem()\n}\n\ntype RemoveDiskMapping_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype RemoveDiskRequestType struct {\n\tThis            ManagedObjectReference `xml:\"_this\"`\n\tDisk            []HostScsiDisk         `xml:\"disk\"`\n\tMaintenanceSpec *HostMaintenanceSpec   `xml:\"maintenanceSpec,omitempty\"`\n\tTimeout         int32                  `xml:\"timeout,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RemoveDiskRequestType\"] = reflect.TypeOf((*RemoveDiskRequestType)(nil)).Elem()\n}\n\ntype RemoveDisk_Task RemoveDiskRequestType\n\nfunc init() {\n\tt[\"RemoveDisk_Task\"] = reflect.TypeOf((*RemoveDisk_Task)(nil)).Elem()\n}\n\ntype RemoveDisk_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype RemoveEntityPermission RemoveEntityPermissionRequestType\n\nfunc init() {\n\tt[\"RemoveEntityPermission\"] = reflect.TypeOf((*RemoveEntityPermission)(nil)).Elem()\n}\n\ntype RemoveEntityPermissionRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tEntity  ManagedObjectReference `xml:\"entity\"`\n\tUser    string                 `xml:\"user\"`\n\tIsGroup bool                   `xml:\"isGroup\"`\n}\n\nfunc init() {\n\tt[\"RemoveEntityPermissionRequestType\"] = reflect.TypeOf((*RemoveEntityPermissionRequestType)(nil)).Elem()\n}\n\ntype RemoveEntityPermissionResponse struct {\n}\n\ntype RemoveFailed struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"RemoveFailed\"] = reflect.TypeOf((*RemoveFailed)(nil)).Elem()\n}\n\ntype RemoveFailedFault RemoveFailed\n\nfunc init() {\n\tt[\"RemoveFailedFault\"] = reflect.TypeOf((*RemoveFailedFault)(nil)).Elem()\n}\n\ntype RemoveFilter RemoveFilterRequestType\n\nfunc init() {\n\tt[\"RemoveFilter\"] = reflect.TypeOf((*RemoveFilter)(nil)).Elem()\n}\n\ntype RemoveFilterEntities RemoveFilterEntitiesRequestType\n\nfunc init() {\n\tt[\"RemoveFilterEntities\"] = reflect.TypeOf((*RemoveFilterEntities)(nil)).Elem()\n}\n\ntype RemoveFilterEntitiesRequestType struct {\n\tThis     ManagedObjectReference   `xml:\"_this\"`\n\tFilterId string                   `xml:\"filterId\"`\n\tEntities []ManagedObjectReference `xml:\"entities,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RemoveFilterEntitiesRequestType\"] = reflect.TypeOf((*RemoveFilterEntitiesRequestType)(nil)).Elem()\n}\n\ntype RemoveFilterEntitiesResponse struct {\n}\n\ntype RemoveFilterRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tFilterId string                 `xml:\"filterId\"`\n}\n\nfunc init() {\n\tt[\"RemoveFilterRequestType\"] = reflect.TypeOf((*RemoveFilterRequestType)(nil)).Elem()\n}\n\ntype RemoveFilterResponse struct {\n}\n\ntype RemoveGroup RemoveGroupRequestType\n\nfunc init() {\n\tt[\"RemoveGroup\"] = reflect.TypeOf((*RemoveGroup)(nil)).Elem()\n}\n\ntype RemoveGroupRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tGroupName string                 `xml:\"groupName\"`\n}\n\nfunc init() {\n\tt[\"RemoveGroupRequestType\"] = reflect.TypeOf((*RemoveGroupRequestType)(nil)).Elem()\n}\n\ntype RemoveGroupResponse struct {\n}\n\ntype RemoveGuestAlias RemoveGuestAliasRequestType\n\nfunc init() {\n\tt[\"RemoveGuestAlias\"] = reflect.TypeOf((*RemoveGuestAlias)(nil)).Elem()\n}\n\ntype RemoveGuestAliasByCert RemoveGuestAliasByCertRequestType\n\nfunc init() {\n\tt[\"RemoveGuestAliasByCert\"] = reflect.TypeOf((*RemoveGuestAliasByCert)(nil)).Elem()\n}\n\ntype RemoveGuestAliasByCertRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tVm         ManagedObjectReference  `xml:\"vm\"`\n\tAuth       BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tUsername   string                  `xml:\"username\"`\n\tBase64Cert string                  `xml:\"base64Cert\"`\n}\n\nfunc init() {\n\tt[\"RemoveGuestAliasByCertRequestType\"] = reflect.TypeOf((*RemoveGuestAliasByCertRequestType)(nil)).Elem()\n}\n\ntype RemoveGuestAliasByCertResponse struct {\n}\n\ntype RemoveGuestAliasRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tVm         ManagedObjectReference  `xml:\"vm\"`\n\tAuth       BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tUsername   string                  `xml:\"username\"`\n\tBase64Cert string                  `xml:\"base64Cert\"`\n\tSubject    BaseGuestAuthSubject    `xml:\"subject,typeattr\"`\n}\n\nfunc init() {\n\tt[\"RemoveGuestAliasRequestType\"] = reflect.TypeOf((*RemoveGuestAliasRequestType)(nil)).Elem()\n}\n\ntype RemoveGuestAliasResponse struct {\n}\n\ntype RemoveInternetScsiSendTargets RemoveInternetScsiSendTargetsRequestType\n\nfunc init() {\n\tt[\"RemoveInternetScsiSendTargets\"] = reflect.TypeOf((*RemoveInternetScsiSendTargets)(nil)).Elem()\n}\n\ntype RemoveInternetScsiSendTargetsRequestType struct {\n\tThis           ManagedObjectReference          `xml:\"_this\"`\n\tIScsiHbaDevice string                          `xml:\"iScsiHbaDevice\"`\n\tTargets        []HostInternetScsiHbaSendTarget `xml:\"targets\"`\n}\n\nfunc init() {\n\tt[\"RemoveInternetScsiSendTargetsRequestType\"] = reflect.TypeOf((*RemoveInternetScsiSendTargetsRequestType)(nil)).Elem()\n}\n\ntype RemoveInternetScsiSendTargetsResponse struct {\n}\n\ntype RemoveInternetScsiStaticTargets RemoveInternetScsiStaticTargetsRequestType\n\nfunc init() {\n\tt[\"RemoveInternetScsiStaticTargets\"] = reflect.TypeOf((*RemoveInternetScsiStaticTargets)(nil)).Elem()\n}\n\ntype RemoveInternetScsiStaticTargetsRequestType struct {\n\tThis           ManagedObjectReference            `xml:\"_this\"`\n\tIScsiHbaDevice string                            `xml:\"iScsiHbaDevice\"`\n\tTargets        []HostInternetScsiHbaStaticTarget `xml:\"targets\"`\n}\n\nfunc init() {\n\tt[\"RemoveInternetScsiStaticTargetsRequestType\"] = reflect.TypeOf((*RemoveInternetScsiStaticTargetsRequestType)(nil)).Elem()\n}\n\ntype RemoveInternetScsiStaticTargetsResponse struct {\n}\n\ntype RemoveKey RemoveKeyRequestType\n\nfunc init() {\n\tt[\"RemoveKey\"] = reflect.TypeOf((*RemoveKey)(nil)).Elem()\n}\n\ntype RemoveKeyRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tKey   CryptoKeyId            `xml:\"key\"`\n\tForce bool                   `xml:\"force\"`\n}\n\nfunc init() {\n\tt[\"RemoveKeyRequestType\"] = reflect.TypeOf((*RemoveKeyRequestType)(nil)).Elem()\n}\n\ntype RemoveKeyResponse struct {\n}\n\ntype RemoveKeys RemoveKeysRequestType\n\nfunc init() {\n\tt[\"RemoveKeys\"] = reflect.TypeOf((*RemoveKeys)(nil)).Elem()\n}\n\ntype RemoveKeysRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tKeys  []CryptoKeyId          `xml:\"keys,omitempty\"`\n\tForce bool                   `xml:\"force\"`\n}\n\nfunc init() {\n\tt[\"RemoveKeysRequestType\"] = reflect.TypeOf((*RemoveKeysRequestType)(nil)).Elem()\n}\n\ntype RemoveKeysResponse struct {\n\tReturnval []CryptoKeyResult `xml:\"returnval,omitempty\"`\n}\n\ntype RemoveKmipServer RemoveKmipServerRequestType\n\nfunc init() {\n\tt[\"RemoveKmipServer\"] = reflect.TypeOf((*RemoveKmipServer)(nil)).Elem()\n}\n\ntype RemoveKmipServerRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tClusterId  KeyProviderId          `xml:\"clusterId\"`\n\tServerName string                 `xml:\"serverName\"`\n}\n\nfunc init() {\n\tt[\"RemoveKmipServerRequestType\"] = reflect.TypeOf((*RemoveKmipServerRequestType)(nil)).Elem()\n}\n\ntype RemoveKmipServerResponse struct {\n}\n\ntype RemoveLicense RemoveLicenseRequestType\n\nfunc init() {\n\tt[\"RemoveLicense\"] = reflect.TypeOf((*RemoveLicense)(nil)).Elem()\n}\n\ntype RemoveLicenseLabel RemoveLicenseLabelRequestType\n\nfunc init() {\n\tt[\"RemoveLicenseLabel\"] = reflect.TypeOf((*RemoveLicenseLabel)(nil)).Elem()\n}\n\ntype RemoveLicenseLabelRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tLicenseKey string                 `xml:\"licenseKey\"`\n\tLabelKey   string                 `xml:\"labelKey\"`\n}\n\nfunc init() {\n\tt[\"RemoveLicenseLabelRequestType\"] = reflect.TypeOf((*RemoveLicenseLabelRequestType)(nil)).Elem()\n}\n\ntype RemoveLicenseLabelResponse struct {\n}\n\ntype RemoveLicenseRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tLicenseKey string                 `xml:\"licenseKey\"`\n}\n\nfunc init() {\n\tt[\"RemoveLicenseRequestType\"] = reflect.TypeOf((*RemoveLicenseRequestType)(nil)).Elem()\n}\n\ntype RemoveLicenseResponse struct {\n}\n\ntype RemoveMonitoredEntities RemoveMonitoredEntitiesRequestType\n\nfunc init() {\n\tt[\"RemoveMonitoredEntities\"] = reflect.TypeOf((*RemoveMonitoredEntities)(nil)).Elem()\n}\n\ntype RemoveMonitoredEntitiesRequestType struct {\n\tThis       ManagedObjectReference   `xml:\"_this\"`\n\tProviderId string                   `xml:\"providerId\"`\n\tEntities   []ManagedObjectReference `xml:\"entities,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RemoveMonitoredEntitiesRequestType\"] = reflect.TypeOf((*RemoveMonitoredEntitiesRequestType)(nil)).Elem()\n}\n\ntype RemoveMonitoredEntitiesResponse struct {\n}\n\ntype RemoveNetworkResourcePool RemoveNetworkResourcePoolRequestType\n\nfunc init() {\n\tt[\"RemoveNetworkResourcePool\"] = reflect.TypeOf((*RemoveNetworkResourcePool)(nil)).Elem()\n}\n\ntype RemoveNetworkResourcePoolRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tKey  []string               `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"RemoveNetworkResourcePoolRequestType\"] = reflect.TypeOf((*RemoveNetworkResourcePoolRequestType)(nil)).Elem()\n}\n\ntype RemoveNetworkResourcePoolResponse struct {\n}\n\ntype RemovePerfInterval RemovePerfIntervalRequestType\n\nfunc init() {\n\tt[\"RemovePerfInterval\"] = reflect.TypeOf((*RemovePerfInterval)(nil)).Elem()\n}\n\ntype RemovePerfIntervalRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tSamplePeriod int32                  `xml:\"samplePeriod\"`\n}\n\nfunc init() {\n\tt[\"RemovePerfIntervalRequestType\"] = reflect.TypeOf((*RemovePerfIntervalRequestType)(nil)).Elem()\n}\n\ntype RemovePerfIntervalResponse struct {\n}\n\ntype RemovePortGroup RemovePortGroupRequestType\n\nfunc init() {\n\tt[\"RemovePortGroup\"] = reflect.TypeOf((*RemovePortGroup)(nil)).Elem()\n}\n\ntype RemovePortGroupRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tPgName string                 `xml:\"pgName\"`\n}\n\nfunc init() {\n\tt[\"RemovePortGroupRequestType\"] = reflect.TypeOf((*RemovePortGroupRequestType)(nil)).Elem()\n}\n\ntype RemovePortGroupResponse struct {\n}\n\ntype RemoveScheduledTask RemoveScheduledTaskRequestType\n\nfunc init() {\n\tt[\"RemoveScheduledTask\"] = reflect.TypeOf((*RemoveScheduledTask)(nil)).Elem()\n}\n\ntype RemoveScheduledTaskRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RemoveScheduledTaskRequestType\"] = reflect.TypeOf((*RemoveScheduledTaskRequestType)(nil)).Elem()\n}\n\ntype RemoveScheduledTaskResponse struct {\n}\n\ntype RemoveServiceConsoleVirtualNic RemoveServiceConsoleVirtualNicRequestType\n\nfunc init() {\n\tt[\"RemoveServiceConsoleVirtualNic\"] = reflect.TypeOf((*RemoveServiceConsoleVirtualNic)(nil)).Elem()\n}\n\ntype RemoveServiceConsoleVirtualNicRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tDevice string                 `xml:\"device\"`\n}\n\nfunc init() {\n\tt[\"RemoveServiceConsoleVirtualNicRequestType\"] = reflect.TypeOf((*RemoveServiceConsoleVirtualNicRequestType)(nil)).Elem()\n}\n\ntype RemoveServiceConsoleVirtualNicResponse struct {\n}\n\ntype RemoveSmartCardTrustAnchor RemoveSmartCardTrustAnchorRequestType\n\nfunc init() {\n\tt[\"RemoveSmartCardTrustAnchor\"] = reflect.TypeOf((*RemoveSmartCardTrustAnchor)(nil)).Elem()\n}\n\ntype RemoveSmartCardTrustAnchorByFingerprint RemoveSmartCardTrustAnchorByFingerprintRequestType\n\nfunc init() {\n\tt[\"RemoveSmartCardTrustAnchorByFingerprint\"] = reflect.TypeOf((*RemoveSmartCardTrustAnchorByFingerprint)(nil)).Elem()\n}\n\ntype RemoveSmartCardTrustAnchorByFingerprintRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tFingerprint string                 `xml:\"fingerprint\"`\n\tDigest      string                 `xml:\"digest\"`\n}\n\nfunc init() {\n\tt[\"RemoveSmartCardTrustAnchorByFingerprintRequestType\"] = reflect.TypeOf((*RemoveSmartCardTrustAnchorByFingerprintRequestType)(nil)).Elem()\n}\n\ntype RemoveSmartCardTrustAnchorByFingerprintResponse struct {\n}\n\ntype RemoveSmartCardTrustAnchorRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tIssuer string                 `xml:\"issuer\"`\n\tSerial string                 `xml:\"serial\"`\n}\n\nfunc init() {\n\tt[\"RemoveSmartCardTrustAnchorRequestType\"] = reflect.TypeOf((*RemoveSmartCardTrustAnchorRequestType)(nil)).Elem()\n}\n\ntype RemoveSmartCardTrustAnchorResponse struct {\n}\n\ntype RemoveSnapshotRequestType struct {\n\tThis           ManagedObjectReference `xml:\"_this\"`\n\tRemoveChildren bool                   `xml:\"removeChildren\"`\n\tConsolidate    *bool                  `xml:\"consolidate\"`\n}\n\nfunc init() {\n\tt[\"RemoveSnapshotRequestType\"] = reflect.TypeOf((*RemoveSnapshotRequestType)(nil)).Elem()\n}\n\ntype RemoveSnapshot_Task RemoveSnapshotRequestType\n\nfunc init() {\n\tt[\"RemoveSnapshot_Task\"] = reflect.TypeOf((*RemoveSnapshot_Task)(nil)).Elem()\n}\n\ntype RemoveSnapshot_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype RemoveUser RemoveUserRequestType\n\nfunc init() {\n\tt[\"RemoveUser\"] = reflect.TypeOf((*RemoveUser)(nil)).Elem()\n}\n\ntype RemoveUserRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tUserName string                 `xml:\"userName\"`\n}\n\nfunc init() {\n\tt[\"RemoveUserRequestType\"] = reflect.TypeOf((*RemoveUserRequestType)(nil)).Elem()\n}\n\ntype RemoveUserResponse struct {\n}\n\ntype RemoveVirtualNic RemoveVirtualNicRequestType\n\nfunc init() {\n\tt[\"RemoveVirtualNic\"] = reflect.TypeOf((*RemoveVirtualNic)(nil)).Elem()\n}\n\ntype RemoveVirtualNicRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tDevice string                 `xml:\"device\"`\n}\n\nfunc init() {\n\tt[\"RemoveVirtualNicRequestType\"] = reflect.TypeOf((*RemoveVirtualNicRequestType)(nil)).Elem()\n}\n\ntype RemoveVirtualNicResponse struct {\n}\n\ntype RemoveVirtualSwitch RemoveVirtualSwitchRequestType\n\nfunc init() {\n\tt[\"RemoveVirtualSwitch\"] = reflect.TypeOf((*RemoveVirtualSwitch)(nil)).Elem()\n}\n\ntype RemoveVirtualSwitchRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tVswitchName string                 `xml:\"vswitchName\"`\n}\n\nfunc init() {\n\tt[\"RemoveVirtualSwitchRequestType\"] = reflect.TypeOf((*RemoveVirtualSwitchRequestType)(nil)).Elem()\n}\n\ntype RemoveVirtualSwitchResponse struct {\n}\n\ntype RenameCustomFieldDef RenameCustomFieldDefRequestType\n\nfunc init() {\n\tt[\"RenameCustomFieldDef\"] = reflect.TypeOf((*RenameCustomFieldDef)(nil)).Elem()\n}\n\ntype RenameCustomFieldDefRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tKey  int32                  `xml:\"key\"`\n\tName string                 `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"RenameCustomFieldDefRequestType\"] = reflect.TypeOf((*RenameCustomFieldDefRequestType)(nil)).Elem()\n}\n\ntype RenameCustomFieldDefResponse struct {\n}\n\ntype RenameCustomizationSpec RenameCustomizationSpecRequestType\n\nfunc init() {\n\tt[\"RenameCustomizationSpec\"] = reflect.TypeOf((*RenameCustomizationSpec)(nil)).Elem()\n}\n\ntype RenameCustomizationSpecRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tName    string                 `xml:\"name\"`\n\tNewName string                 `xml:\"newName\"`\n}\n\nfunc init() {\n\tt[\"RenameCustomizationSpecRequestType\"] = reflect.TypeOf((*RenameCustomizationSpecRequestType)(nil)).Elem()\n}\n\ntype RenameCustomizationSpecResponse struct {\n}\n\ntype RenameDatastore RenameDatastoreRequestType\n\nfunc init() {\n\tt[\"RenameDatastore\"] = reflect.TypeOf((*RenameDatastore)(nil)).Elem()\n}\n\ntype RenameDatastoreRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tNewName string                 `xml:\"newName\"`\n}\n\nfunc init() {\n\tt[\"RenameDatastoreRequestType\"] = reflect.TypeOf((*RenameDatastoreRequestType)(nil)).Elem()\n}\n\ntype RenameDatastoreResponse struct {\n}\n\ntype RenameRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tNewName string                 `xml:\"newName\"`\n}\n\nfunc init() {\n\tt[\"RenameRequestType\"] = reflect.TypeOf((*RenameRequestType)(nil)).Elem()\n}\n\ntype RenameSnapshot RenameSnapshotRequestType\n\nfunc init() {\n\tt[\"RenameSnapshot\"] = reflect.TypeOf((*RenameSnapshot)(nil)).Elem()\n}\n\ntype RenameSnapshotRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tName        string                 `xml:\"name,omitempty\"`\n\tDescription string                 `xml:\"description,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RenameSnapshotRequestType\"] = reflect.TypeOf((*RenameSnapshotRequestType)(nil)).Elem()\n}\n\ntype RenameSnapshotResponse struct {\n}\n\ntype RenameVStorageObject RenameVStorageObjectRequestType\n\nfunc init() {\n\tt[\"RenameVStorageObject\"] = reflect.TypeOf((*RenameVStorageObject)(nil)).Elem()\n}\n\ntype RenameVStorageObjectRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tId        ID                     `xml:\"id\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n\tName      string                 `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"RenameVStorageObjectRequestType\"] = reflect.TypeOf((*RenameVStorageObjectRequestType)(nil)).Elem()\n}\n\ntype RenameVStorageObjectResponse struct {\n}\n\ntype Rename_Task RenameRequestType\n\nfunc init() {\n\tt[\"Rename_Task\"] = reflect.TypeOf((*Rename_Task)(nil)).Elem()\n}\n\ntype Rename_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ReplaceCACertificatesAndCRLs ReplaceCACertificatesAndCRLsRequestType\n\nfunc init() {\n\tt[\"ReplaceCACertificatesAndCRLs\"] = reflect.TypeOf((*ReplaceCACertificatesAndCRLs)(nil)).Elem()\n}\n\ntype ReplaceCACertificatesAndCRLsRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tCaCert []string               `xml:\"caCert\"`\n\tCaCrl  []string               `xml:\"caCrl,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ReplaceCACertificatesAndCRLsRequestType\"] = reflect.TypeOf((*ReplaceCACertificatesAndCRLsRequestType)(nil)).Elem()\n}\n\ntype ReplaceCACertificatesAndCRLsResponse struct {\n}\n\ntype ReplaceSmartCardTrustAnchors ReplaceSmartCardTrustAnchorsRequestType\n\nfunc init() {\n\tt[\"ReplaceSmartCardTrustAnchors\"] = reflect.TypeOf((*ReplaceSmartCardTrustAnchors)(nil)).Elem()\n}\n\ntype ReplaceSmartCardTrustAnchorsRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tCerts []string               `xml:\"certs,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ReplaceSmartCardTrustAnchorsRequestType\"] = reflect.TypeOf((*ReplaceSmartCardTrustAnchorsRequestType)(nil)).Elem()\n}\n\ntype ReplaceSmartCardTrustAnchorsResponse struct {\n}\n\ntype ReplicationConfigFault struct {\n\tReplicationFault\n}\n\nfunc init() {\n\tt[\"ReplicationConfigFault\"] = reflect.TypeOf((*ReplicationConfigFault)(nil)).Elem()\n}\n\ntype ReplicationConfigFaultFault BaseReplicationConfigFault\n\nfunc init() {\n\tt[\"ReplicationConfigFaultFault\"] = reflect.TypeOf((*ReplicationConfigFaultFault)(nil)).Elem()\n}\n\ntype ReplicationConfigSpec struct {\n\tDynamicData\n\n\tGeneration            int64                         `xml:\"generation\"`\n\tVmReplicationId       string                        `xml:\"vmReplicationId\"`\n\tDestination           string                        `xml:\"destination\"`\n\tPort                  int32                         `xml:\"port\"`\n\tRpo                   int64                         `xml:\"rpo\"`\n\tQuiesceGuestEnabled   bool                          `xml:\"quiesceGuestEnabled\"`\n\tPaused                bool                          `xml:\"paused\"`\n\tOppUpdatesEnabled     bool                          `xml:\"oppUpdatesEnabled\"`\n\tNetCompressionEnabled *bool                         `xml:\"netCompressionEnabled\"`\n\tDisk                  []ReplicationInfoDiskSettings `xml:\"disk,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ReplicationConfigSpec\"] = reflect.TypeOf((*ReplicationConfigSpec)(nil)).Elem()\n}\n\ntype ReplicationDiskConfigFault struct {\n\tReplicationConfigFault\n\n\tReason string                  `xml:\"reason,omitempty\"`\n\tVmRef  *ManagedObjectReference `xml:\"vmRef,omitempty\"`\n\tKey    int32                   `xml:\"key,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ReplicationDiskConfigFault\"] = reflect.TypeOf((*ReplicationDiskConfigFault)(nil)).Elem()\n}\n\ntype ReplicationDiskConfigFaultFault ReplicationDiskConfigFault\n\nfunc init() {\n\tt[\"ReplicationDiskConfigFaultFault\"] = reflect.TypeOf((*ReplicationDiskConfigFaultFault)(nil)).Elem()\n}\n\ntype ReplicationFault struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"ReplicationFault\"] = reflect.TypeOf((*ReplicationFault)(nil)).Elem()\n}\n\ntype ReplicationFaultFault BaseReplicationFault\n\nfunc init() {\n\tt[\"ReplicationFaultFault\"] = reflect.TypeOf((*ReplicationFaultFault)(nil)).Elem()\n}\n\ntype ReplicationGroupId struct {\n\tDynamicData\n\n\tFaultDomainId FaultDomainId `xml:\"faultDomainId\"`\n\tDeviceGroupId DeviceGroupId `xml:\"deviceGroupId\"`\n}\n\nfunc init() {\n\tt[\"ReplicationGroupId\"] = reflect.TypeOf((*ReplicationGroupId)(nil)).Elem()\n}\n\ntype ReplicationIncompatibleWithFT struct {\n\tReplicationFault\n}\n\nfunc init() {\n\tt[\"ReplicationIncompatibleWithFT\"] = reflect.TypeOf((*ReplicationIncompatibleWithFT)(nil)).Elem()\n}\n\ntype ReplicationIncompatibleWithFTFault ReplicationIncompatibleWithFT\n\nfunc init() {\n\tt[\"ReplicationIncompatibleWithFTFault\"] = reflect.TypeOf((*ReplicationIncompatibleWithFTFault)(nil)).Elem()\n}\n\ntype ReplicationInfoDiskSettings struct {\n\tDynamicData\n\n\tKey               int32  `xml:\"key\"`\n\tDiskReplicationId string `xml:\"diskReplicationId\"`\n}\n\nfunc init() {\n\tt[\"ReplicationInfoDiskSettings\"] = reflect.TypeOf((*ReplicationInfoDiskSettings)(nil)).Elem()\n}\n\ntype ReplicationInvalidOptions struct {\n\tReplicationFault\n\n\tOptions string                  `xml:\"options\"`\n\tEntity  *ManagedObjectReference `xml:\"entity,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ReplicationInvalidOptions\"] = reflect.TypeOf((*ReplicationInvalidOptions)(nil)).Elem()\n}\n\ntype ReplicationInvalidOptionsFault ReplicationInvalidOptions\n\nfunc init() {\n\tt[\"ReplicationInvalidOptionsFault\"] = reflect.TypeOf((*ReplicationInvalidOptionsFault)(nil)).Elem()\n}\n\ntype ReplicationNotSupportedOnHost struct {\n\tReplicationFault\n}\n\nfunc init() {\n\tt[\"ReplicationNotSupportedOnHost\"] = reflect.TypeOf((*ReplicationNotSupportedOnHost)(nil)).Elem()\n}\n\ntype ReplicationNotSupportedOnHostFault ReplicationNotSupportedOnHost\n\nfunc init() {\n\tt[\"ReplicationNotSupportedOnHostFault\"] = reflect.TypeOf((*ReplicationNotSupportedOnHostFault)(nil)).Elem()\n}\n\ntype ReplicationSpec struct {\n\tDynamicData\n\n\tReplicationGroupId ReplicationGroupId `xml:\"replicationGroupId\"`\n}\n\nfunc init() {\n\tt[\"ReplicationSpec\"] = reflect.TypeOf((*ReplicationSpec)(nil)).Elem()\n}\n\ntype ReplicationVmConfigFault struct {\n\tReplicationConfigFault\n\n\tReason string                  `xml:\"reason,omitempty\"`\n\tVmRef  *ManagedObjectReference `xml:\"vmRef,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ReplicationVmConfigFault\"] = reflect.TypeOf((*ReplicationVmConfigFault)(nil)).Elem()\n}\n\ntype ReplicationVmConfigFaultFault ReplicationVmConfigFault\n\nfunc init() {\n\tt[\"ReplicationVmConfigFaultFault\"] = reflect.TypeOf((*ReplicationVmConfigFaultFault)(nil)).Elem()\n}\n\ntype ReplicationVmFault struct {\n\tReplicationFault\n\n\tReason     string                  `xml:\"reason,omitempty\"`\n\tState      string                  `xml:\"state,omitempty\"`\n\tInstanceId string                  `xml:\"instanceId,omitempty\"`\n\tVm         *ManagedObjectReference `xml:\"vm,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ReplicationVmFault\"] = reflect.TypeOf((*ReplicationVmFault)(nil)).Elem()\n}\n\ntype ReplicationVmFaultFault BaseReplicationVmFault\n\nfunc init() {\n\tt[\"ReplicationVmFaultFault\"] = reflect.TypeOf((*ReplicationVmFaultFault)(nil)).Elem()\n}\n\ntype ReplicationVmInProgressFault struct {\n\tReplicationVmFault\n\n\tRequestedActivity  string `xml:\"requestedActivity\"`\n\tInProgressActivity string `xml:\"inProgressActivity\"`\n}\n\nfunc init() {\n\tt[\"ReplicationVmInProgressFault\"] = reflect.TypeOf((*ReplicationVmInProgressFault)(nil)).Elem()\n}\n\ntype ReplicationVmInProgressFaultFault ReplicationVmInProgressFault\n\nfunc init() {\n\tt[\"ReplicationVmInProgressFaultFault\"] = reflect.TypeOf((*ReplicationVmInProgressFaultFault)(nil)).Elem()\n}\n\ntype ReplicationVmProgressInfo struct {\n\tDynamicData\n\n\tProgress              int32 `xml:\"progress\"`\n\tBytesTransferred      int64 `xml:\"bytesTransferred\"`\n\tBytesToTransfer       int64 `xml:\"bytesToTransfer\"`\n\tChecksumTotalBytes    int64 `xml:\"checksumTotalBytes,omitempty\"`\n\tChecksumComparedBytes int64 `xml:\"checksumComparedBytes,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ReplicationVmProgressInfo\"] = reflect.TypeOf((*ReplicationVmProgressInfo)(nil)).Elem()\n}\n\ntype RequestCanceled struct {\n\tRuntimeFault\n}\n\nfunc init() {\n\tt[\"RequestCanceled\"] = reflect.TypeOf((*RequestCanceled)(nil)).Elem()\n}\n\ntype RequestCanceledFault RequestCanceled\n\nfunc init() {\n\tt[\"RequestCanceledFault\"] = reflect.TypeOf((*RequestCanceledFault)(nil)).Elem()\n}\n\ntype RescanAllHba RescanAllHbaRequestType\n\nfunc init() {\n\tt[\"RescanAllHba\"] = reflect.TypeOf((*RescanAllHba)(nil)).Elem()\n}\n\ntype RescanAllHbaRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RescanAllHbaRequestType\"] = reflect.TypeOf((*RescanAllHbaRequestType)(nil)).Elem()\n}\n\ntype RescanAllHbaResponse struct {\n}\n\ntype RescanHba RescanHbaRequestType\n\nfunc init() {\n\tt[\"RescanHba\"] = reflect.TypeOf((*RescanHba)(nil)).Elem()\n}\n\ntype RescanHbaRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tHbaDevice string                 `xml:\"hbaDevice\"`\n}\n\nfunc init() {\n\tt[\"RescanHbaRequestType\"] = reflect.TypeOf((*RescanHbaRequestType)(nil)).Elem()\n}\n\ntype RescanHbaResponse struct {\n}\n\ntype RescanVffs RescanVffsRequestType\n\nfunc init() {\n\tt[\"RescanVffs\"] = reflect.TypeOf((*RescanVffs)(nil)).Elem()\n}\n\ntype RescanVffsRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RescanVffsRequestType\"] = reflect.TypeOf((*RescanVffsRequestType)(nil)).Elem()\n}\n\ntype RescanVffsResponse struct {\n}\n\ntype RescanVmfs RescanVmfsRequestType\n\nfunc init() {\n\tt[\"RescanVmfs\"] = reflect.TypeOf((*RescanVmfs)(nil)).Elem()\n}\n\ntype RescanVmfsRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RescanVmfsRequestType\"] = reflect.TypeOf((*RescanVmfsRequestType)(nil)).Elem()\n}\n\ntype RescanVmfsResponse struct {\n}\n\ntype ResetCollector ResetCollectorRequestType\n\nfunc init() {\n\tt[\"ResetCollector\"] = reflect.TypeOf((*ResetCollector)(nil)).Elem()\n}\n\ntype ResetCollectorRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ResetCollectorRequestType\"] = reflect.TypeOf((*ResetCollectorRequestType)(nil)).Elem()\n}\n\ntype ResetCollectorResponse struct {\n}\n\ntype ResetCounterLevelMapping ResetCounterLevelMappingRequestType\n\nfunc init() {\n\tt[\"ResetCounterLevelMapping\"] = reflect.TypeOf((*ResetCounterLevelMapping)(nil)).Elem()\n}\n\ntype ResetCounterLevelMappingRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tCounters []int32                `xml:\"counters\"`\n}\n\nfunc init() {\n\tt[\"ResetCounterLevelMappingRequestType\"] = reflect.TypeOf((*ResetCounterLevelMappingRequestType)(nil)).Elem()\n}\n\ntype ResetCounterLevelMappingResponse struct {\n}\n\ntype ResetEntityPermissions ResetEntityPermissionsRequestType\n\nfunc init() {\n\tt[\"ResetEntityPermissions\"] = reflect.TypeOf((*ResetEntityPermissions)(nil)).Elem()\n}\n\ntype ResetEntityPermissionsRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tEntity     ManagedObjectReference `xml:\"entity\"`\n\tPermission []Permission           `xml:\"permission,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ResetEntityPermissionsRequestType\"] = reflect.TypeOf((*ResetEntityPermissionsRequestType)(nil)).Elem()\n}\n\ntype ResetEntityPermissionsResponse struct {\n}\n\ntype ResetFirmwareToFactoryDefaults ResetFirmwareToFactoryDefaultsRequestType\n\nfunc init() {\n\tt[\"ResetFirmwareToFactoryDefaults\"] = reflect.TypeOf((*ResetFirmwareToFactoryDefaults)(nil)).Elem()\n}\n\ntype ResetFirmwareToFactoryDefaultsRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ResetFirmwareToFactoryDefaultsRequestType\"] = reflect.TypeOf((*ResetFirmwareToFactoryDefaultsRequestType)(nil)).Elem()\n}\n\ntype ResetFirmwareToFactoryDefaultsResponse struct {\n}\n\ntype ResetGuestInformation ResetGuestInformationRequestType\n\nfunc init() {\n\tt[\"ResetGuestInformation\"] = reflect.TypeOf((*ResetGuestInformation)(nil)).Elem()\n}\n\ntype ResetGuestInformationRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ResetGuestInformationRequestType\"] = reflect.TypeOf((*ResetGuestInformationRequestType)(nil)).Elem()\n}\n\ntype ResetGuestInformationResponse struct {\n}\n\ntype ResetListView ResetListViewRequestType\n\nfunc init() {\n\tt[\"ResetListView\"] = reflect.TypeOf((*ResetListView)(nil)).Elem()\n}\n\ntype ResetListViewFromView ResetListViewFromViewRequestType\n\nfunc init() {\n\tt[\"ResetListViewFromView\"] = reflect.TypeOf((*ResetListViewFromView)(nil)).Elem()\n}\n\ntype ResetListViewFromViewRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tView ManagedObjectReference `xml:\"view\"`\n}\n\nfunc init() {\n\tt[\"ResetListViewFromViewRequestType\"] = reflect.TypeOf((*ResetListViewFromViewRequestType)(nil)).Elem()\n}\n\ntype ResetListViewFromViewResponse struct {\n}\n\ntype ResetListViewRequestType struct {\n\tThis ManagedObjectReference   `xml:\"_this\"`\n\tObj  []ManagedObjectReference `xml:\"obj,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ResetListViewRequestType\"] = reflect.TypeOf((*ResetListViewRequestType)(nil)).Elem()\n}\n\ntype ResetListViewResponse struct {\n\tReturnval []ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype ResetSystemHealthInfo ResetSystemHealthInfoRequestType\n\nfunc init() {\n\tt[\"ResetSystemHealthInfo\"] = reflect.TypeOf((*ResetSystemHealthInfo)(nil)).Elem()\n}\n\ntype ResetSystemHealthInfoRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ResetSystemHealthInfoRequestType\"] = reflect.TypeOf((*ResetSystemHealthInfoRequestType)(nil)).Elem()\n}\n\ntype ResetSystemHealthInfoResponse struct {\n}\n\ntype ResetVMRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ResetVMRequestType\"] = reflect.TypeOf((*ResetVMRequestType)(nil)).Elem()\n}\n\ntype ResetVM_Task ResetVMRequestType\n\nfunc init() {\n\tt[\"ResetVM_Task\"] = reflect.TypeOf((*ResetVM_Task)(nil)).Elem()\n}\n\ntype ResetVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ResignatureUnresolvedVmfsVolumeRequestType struct {\n\tThis           ManagedObjectReference            `xml:\"_this\"`\n\tResolutionSpec HostUnresolvedVmfsResignatureSpec `xml:\"resolutionSpec\"`\n}\n\nfunc init() {\n\tt[\"ResignatureUnresolvedVmfsVolumeRequestType\"] = reflect.TypeOf((*ResignatureUnresolvedVmfsVolumeRequestType)(nil)).Elem()\n}\n\ntype ResignatureUnresolvedVmfsVolume_Task ResignatureUnresolvedVmfsVolumeRequestType\n\nfunc init() {\n\tt[\"ResignatureUnresolvedVmfsVolume_Task\"] = reflect.TypeOf((*ResignatureUnresolvedVmfsVolume_Task)(nil)).Elem()\n}\n\ntype ResignatureUnresolvedVmfsVolume_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ResolveInstallationErrorsOnClusterRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tFilterId string                 `xml:\"filterId\"`\n\tCluster  ManagedObjectReference `xml:\"cluster\"`\n}\n\nfunc init() {\n\tt[\"ResolveInstallationErrorsOnClusterRequestType\"] = reflect.TypeOf((*ResolveInstallationErrorsOnClusterRequestType)(nil)).Elem()\n}\n\ntype ResolveInstallationErrorsOnCluster_Task ResolveInstallationErrorsOnClusterRequestType\n\nfunc init() {\n\tt[\"ResolveInstallationErrorsOnCluster_Task\"] = reflect.TypeOf((*ResolveInstallationErrorsOnCluster_Task)(nil)).Elem()\n}\n\ntype ResolveInstallationErrorsOnCluster_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ResolveInstallationErrorsOnHostRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tFilterId string                 `xml:\"filterId\"`\n\tHost     ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"ResolveInstallationErrorsOnHostRequestType\"] = reflect.TypeOf((*ResolveInstallationErrorsOnHostRequestType)(nil)).Elem()\n}\n\ntype ResolveInstallationErrorsOnHost_Task ResolveInstallationErrorsOnHostRequestType\n\nfunc init() {\n\tt[\"ResolveInstallationErrorsOnHost_Task\"] = reflect.TypeOf((*ResolveInstallationErrorsOnHost_Task)(nil)).Elem()\n}\n\ntype ResolveInstallationErrorsOnHost_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ResolveMultipleUnresolvedVmfsVolumes ResolveMultipleUnresolvedVmfsVolumesRequestType\n\nfunc init() {\n\tt[\"ResolveMultipleUnresolvedVmfsVolumes\"] = reflect.TypeOf((*ResolveMultipleUnresolvedVmfsVolumes)(nil)).Elem()\n}\n\ntype ResolveMultipleUnresolvedVmfsVolumesExRequestType struct {\n\tThis           ManagedObjectReference             `xml:\"_this\"`\n\tResolutionSpec []HostUnresolvedVmfsResolutionSpec `xml:\"resolutionSpec\"`\n}\n\nfunc init() {\n\tt[\"ResolveMultipleUnresolvedVmfsVolumesExRequestType\"] = reflect.TypeOf((*ResolveMultipleUnresolvedVmfsVolumesExRequestType)(nil)).Elem()\n}\n\ntype ResolveMultipleUnresolvedVmfsVolumesEx_Task ResolveMultipleUnresolvedVmfsVolumesExRequestType\n\nfunc init() {\n\tt[\"ResolveMultipleUnresolvedVmfsVolumesEx_Task\"] = reflect.TypeOf((*ResolveMultipleUnresolvedVmfsVolumesEx_Task)(nil)).Elem()\n}\n\ntype ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ResolveMultipleUnresolvedVmfsVolumesRequestType struct {\n\tThis           ManagedObjectReference             `xml:\"_this\"`\n\tResolutionSpec []HostUnresolvedVmfsResolutionSpec `xml:\"resolutionSpec\"`\n}\n\nfunc init() {\n\tt[\"ResolveMultipleUnresolvedVmfsVolumesRequestType\"] = reflect.TypeOf((*ResolveMultipleUnresolvedVmfsVolumesRequestType)(nil)).Elem()\n}\n\ntype ResolveMultipleUnresolvedVmfsVolumesResponse struct {\n\tReturnval []HostUnresolvedVmfsResolutionResult `xml:\"returnval,omitempty\"`\n}\n\ntype ResourceAllocationInfo struct {\n\tDynamicData\n\n\tReservation           int64       `xml:\"reservation,omitempty\"`\n\tExpandableReservation *bool       `xml:\"expandableReservation\"`\n\tLimit                 int64       `xml:\"limit,omitempty\"`\n\tShares                *SharesInfo `xml:\"shares,omitempty\"`\n\tOverheadLimit         int64       `xml:\"overheadLimit,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ResourceAllocationInfo\"] = reflect.TypeOf((*ResourceAllocationInfo)(nil)).Elem()\n}\n\ntype ResourceAllocationOption struct {\n\tDynamicData\n\n\tSharesOption SharesOption `xml:\"sharesOption\"`\n}\n\nfunc init() {\n\tt[\"ResourceAllocationOption\"] = reflect.TypeOf((*ResourceAllocationOption)(nil)).Elem()\n}\n\ntype ResourceConfigOption struct {\n\tDynamicData\n\n\tCpuAllocationOption    ResourceAllocationOption `xml:\"cpuAllocationOption\"`\n\tMemoryAllocationOption ResourceAllocationOption `xml:\"memoryAllocationOption\"`\n}\n\nfunc init() {\n\tt[\"ResourceConfigOption\"] = reflect.TypeOf((*ResourceConfigOption)(nil)).Elem()\n}\n\ntype ResourceConfigSpec struct {\n\tDynamicData\n\n\tEntity           *ManagedObjectReference    `xml:\"entity,omitempty\"`\n\tChangeVersion    string                     `xml:\"changeVersion,omitempty\"`\n\tLastModified     *time.Time                 `xml:\"lastModified\"`\n\tCpuAllocation    BaseResourceAllocationInfo `xml:\"cpuAllocation,typeattr\"`\n\tMemoryAllocation BaseResourceAllocationInfo `xml:\"memoryAllocation,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ResourceConfigSpec\"] = reflect.TypeOf((*ResourceConfigSpec)(nil)).Elem()\n}\n\ntype ResourceInUse struct {\n\tVimFault\n\n\tType string `xml:\"type,omitempty\"`\n\tName string `xml:\"name,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ResourceInUse\"] = reflect.TypeOf((*ResourceInUse)(nil)).Elem()\n}\n\ntype ResourceInUseFault BaseResourceInUse\n\nfunc init() {\n\tt[\"ResourceInUseFault\"] = reflect.TypeOf((*ResourceInUseFault)(nil)).Elem()\n}\n\ntype ResourceNotAvailable struct {\n\tVimFault\n\n\tContainerType string `xml:\"containerType,omitempty\"`\n\tContainerName string `xml:\"containerName,omitempty\"`\n\tType          string `xml:\"type,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ResourceNotAvailable\"] = reflect.TypeOf((*ResourceNotAvailable)(nil)).Elem()\n}\n\ntype ResourceNotAvailableFault ResourceNotAvailable\n\nfunc init() {\n\tt[\"ResourceNotAvailableFault\"] = reflect.TypeOf((*ResourceNotAvailableFault)(nil)).Elem()\n}\n\ntype ResourcePoolCreatedEvent struct {\n\tResourcePoolEvent\n\n\tParent ResourcePoolEventArgument `xml:\"parent\"`\n}\n\nfunc init() {\n\tt[\"ResourcePoolCreatedEvent\"] = reflect.TypeOf((*ResourcePoolCreatedEvent)(nil)).Elem()\n}\n\ntype ResourcePoolDestroyedEvent struct {\n\tResourcePoolEvent\n}\n\nfunc init() {\n\tt[\"ResourcePoolDestroyedEvent\"] = reflect.TypeOf((*ResourcePoolDestroyedEvent)(nil)).Elem()\n}\n\ntype ResourcePoolEvent struct {\n\tEvent\n\n\tResourcePool ResourcePoolEventArgument `xml:\"resourcePool\"`\n}\n\nfunc init() {\n\tt[\"ResourcePoolEvent\"] = reflect.TypeOf((*ResourcePoolEvent)(nil)).Elem()\n}\n\ntype ResourcePoolEventArgument struct {\n\tEntityEventArgument\n\n\tResourcePool ManagedObjectReference `xml:\"resourcePool\"`\n}\n\nfunc init() {\n\tt[\"ResourcePoolEventArgument\"] = reflect.TypeOf((*ResourcePoolEventArgument)(nil)).Elem()\n}\n\ntype ResourcePoolMovedEvent struct {\n\tResourcePoolEvent\n\n\tOldParent ResourcePoolEventArgument `xml:\"oldParent\"`\n\tNewParent ResourcePoolEventArgument `xml:\"newParent\"`\n}\n\nfunc init() {\n\tt[\"ResourcePoolMovedEvent\"] = reflect.TypeOf((*ResourcePoolMovedEvent)(nil)).Elem()\n}\n\ntype ResourcePoolQuickStats struct {\n\tDynamicData\n\n\tOverallCpuUsage              int64 `xml:\"overallCpuUsage,omitempty\"`\n\tOverallCpuDemand             int64 `xml:\"overallCpuDemand,omitempty\"`\n\tGuestMemoryUsage             int64 `xml:\"guestMemoryUsage,omitempty\"`\n\tHostMemoryUsage              int64 `xml:\"hostMemoryUsage,omitempty\"`\n\tDistributedCpuEntitlement    int64 `xml:\"distributedCpuEntitlement,omitempty\"`\n\tDistributedMemoryEntitlement int64 `xml:\"distributedMemoryEntitlement,omitempty\"`\n\tStaticCpuEntitlement         int32 `xml:\"staticCpuEntitlement,omitempty\"`\n\tStaticMemoryEntitlement      int32 `xml:\"staticMemoryEntitlement,omitempty\"`\n\tPrivateMemory                int64 `xml:\"privateMemory,omitempty\"`\n\tSharedMemory                 int64 `xml:\"sharedMemory,omitempty\"`\n\tSwappedMemory                int64 `xml:\"swappedMemory,omitempty\"`\n\tBalloonedMemory              int64 `xml:\"balloonedMemory,omitempty\"`\n\tOverheadMemory               int64 `xml:\"overheadMemory,omitempty\"`\n\tConsumedOverheadMemory       int64 `xml:\"consumedOverheadMemory,omitempty\"`\n\tCompressedMemory             int64 `xml:\"compressedMemory,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ResourcePoolQuickStats\"] = reflect.TypeOf((*ResourcePoolQuickStats)(nil)).Elem()\n}\n\ntype ResourcePoolReconfiguredEvent struct {\n\tResourcePoolEvent\n\n\tConfigChanges *ChangesInfoEventArgument `xml:\"configChanges,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ResourcePoolReconfiguredEvent\"] = reflect.TypeOf((*ResourcePoolReconfiguredEvent)(nil)).Elem()\n}\n\ntype ResourcePoolResourceUsage struct {\n\tDynamicData\n\n\tReservationUsed      int64 `xml:\"reservationUsed\"`\n\tReservationUsedForVm int64 `xml:\"reservationUsedForVm\"`\n\tUnreservedForPool    int64 `xml:\"unreservedForPool\"`\n\tUnreservedForVm      int64 `xml:\"unreservedForVm\"`\n\tOverallUsage         int64 `xml:\"overallUsage\"`\n\tMaxUsage             int64 `xml:\"maxUsage\"`\n}\n\nfunc init() {\n\tt[\"ResourcePoolResourceUsage\"] = reflect.TypeOf((*ResourcePoolResourceUsage)(nil)).Elem()\n}\n\ntype ResourcePoolRuntimeInfo struct {\n\tDynamicData\n\n\tMemory        ResourcePoolResourceUsage `xml:\"memory\"`\n\tCpu           ResourcePoolResourceUsage `xml:\"cpu\"`\n\tOverallStatus ManagedEntityStatus       `xml:\"overallStatus\"`\n}\n\nfunc init() {\n\tt[\"ResourcePoolRuntimeInfo\"] = reflect.TypeOf((*ResourcePoolRuntimeInfo)(nil)).Elem()\n}\n\ntype ResourcePoolSummary struct {\n\tDynamicData\n\n\tName               string                  `xml:\"name\"`\n\tConfig             ResourceConfigSpec      `xml:\"config\"`\n\tRuntime            ResourcePoolRuntimeInfo `xml:\"runtime\"`\n\tQuickStats         *ResourcePoolQuickStats `xml:\"quickStats,omitempty\"`\n\tConfiguredMemoryMB int32                   `xml:\"configuredMemoryMB,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ResourcePoolSummary\"] = reflect.TypeOf((*ResourcePoolSummary)(nil)).Elem()\n}\n\ntype ResourceViolatedEvent struct {\n\tResourcePoolEvent\n}\n\nfunc init() {\n\tt[\"ResourceViolatedEvent\"] = reflect.TypeOf((*ResourceViolatedEvent)(nil)).Elem()\n}\n\ntype RestartService RestartServiceRequestType\n\nfunc init() {\n\tt[\"RestartService\"] = reflect.TypeOf((*RestartService)(nil)).Elem()\n}\n\ntype RestartServiceConsoleVirtualNic RestartServiceConsoleVirtualNicRequestType\n\nfunc init() {\n\tt[\"RestartServiceConsoleVirtualNic\"] = reflect.TypeOf((*RestartServiceConsoleVirtualNic)(nil)).Elem()\n}\n\ntype RestartServiceConsoleVirtualNicRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tDevice string                 `xml:\"device\"`\n}\n\nfunc init() {\n\tt[\"RestartServiceConsoleVirtualNicRequestType\"] = reflect.TypeOf((*RestartServiceConsoleVirtualNicRequestType)(nil)).Elem()\n}\n\ntype RestartServiceConsoleVirtualNicResponse struct {\n}\n\ntype RestartServiceRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tId   string                 `xml:\"id\"`\n}\n\nfunc init() {\n\tt[\"RestartServiceRequestType\"] = reflect.TypeOf((*RestartServiceRequestType)(nil)).Elem()\n}\n\ntype RestartServiceResponse struct {\n}\n\ntype RestoreFirmwareConfiguration RestoreFirmwareConfigurationRequestType\n\nfunc init() {\n\tt[\"RestoreFirmwareConfiguration\"] = reflect.TypeOf((*RestoreFirmwareConfiguration)(nil)).Elem()\n}\n\ntype RestoreFirmwareConfigurationRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tForce bool                   `xml:\"force\"`\n}\n\nfunc init() {\n\tt[\"RestoreFirmwareConfigurationRequestType\"] = reflect.TypeOf((*RestoreFirmwareConfigurationRequestType)(nil)).Elem()\n}\n\ntype RestoreFirmwareConfigurationResponse struct {\n}\n\ntype RestrictedByAdministrator struct {\n\tRuntimeFault\n\n\tDetails string `xml:\"details\"`\n}\n\nfunc init() {\n\tt[\"RestrictedByAdministrator\"] = reflect.TypeOf((*RestrictedByAdministrator)(nil)).Elem()\n}\n\ntype RestrictedByAdministratorFault RestrictedByAdministrator\n\nfunc init() {\n\tt[\"RestrictedByAdministratorFault\"] = reflect.TypeOf((*RestrictedByAdministratorFault)(nil)).Elem()\n}\n\ntype RestrictedVersion struct {\n\tSecurityError\n}\n\nfunc init() {\n\tt[\"RestrictedVersion\"] = reflect.TypeOf((*RestrictedVersion)(nil)).Elem()\n}\n\ntype RestrictedVersionFault RestrictedVersion\n\nfunc init() {\n\tt[\"RestrictedVersionFault\"] = reflect.TypeOf((*RestrictedVersionFault)(nil)).Elem()\n}\n\ntype RetrieveAllPermissions RetrieveAllPermissionsRequestType\n\nfunc init() {\n\tt[\"RetrieveAllPermissions\"] = reflect.TypeOf((*RetrieveAllPermissions)(nil)).Elem()\n}\n\ntype RetrieveAllPermissionsRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RetrieveAllPermissionsRequestType\"] = reflect.TypeOf((*RetrieveAllPermissionsRequestType)(nil)).Elem()\n}\n\ntype RetrieveAllPermissionsResponse struct {\n\tReturnval []Permission `xml:\"returnval,omitempty\"`\n}\n\ntype RetrieveAnswerFile RetrieveAnswerFileRequestType\n\nfunc init() {\n\tt[\"RetrieveAnswerFile\"] = reflect.TypeOf((*RetrieveAnswerFile)(nil)).Elem()\n}\n\ntype RetrieveAnswerFileForProfile RetrieveAnswerFileForProfileRequestType\n\nfunc init() {\n\tt[\"RetrieveAnswerFileForProfile\"] = reflect.TypeOf((*RetrieveAnswerFileForProfile)(nil)).Elem()\n}\n\ntype RetrieveAnswerFileForProfileRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tHost         ManagedObjectReference `xml:\"host\"`\n\tApplyProfile HostApplyProfile       `xml:\"applyProfile\"`\n}\n\nfunc init() {\n\tt[\"RetrieveAnswerFileForProfileRequestType\"] = reflect.TypeOf((*RetrieveAnswerFileForProfileRequestType)(nil)).Elem()\n}\n\ntype RetrieveAnswerFileForProfileResponse struct {\n\tReturnval *AnswerFile `xml:\"returnval,omitempty\"`\n}\n\ntype RetrieveAnswerFileRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tHost ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"RetrieveAnswerFileRequestType\"] = reflect.TypeOf((*RetrieveAnswerFileRequestType)(nil)).Elem()\n}\n\ntype RetrieveAnswerFileResponse struct {\n\tReturnval *AnswerFile `xml:\"returnval,omitempty\"`\n}\n\ntype RetrieveArgumentDescription RetrieveArgumentDescriptionRequestType\n\nfunc init() {\n\tt[\"RetrieveArgumentDescription\"] = reflect.TypeOf((*RetrieveArgumentDescription)(nil)).Elem()\n}\n\ntype RetrieveArgumentDescriptionRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tEventTypeId string                 `xml:\"eventTypeId\"`\n}\n\nfunc init() {\n\tt[\"RetrieveArgumentDescriptionRequestType\"] = reflect.TypeOf((*RetrieveArgumentDescriptionRequestType)(nil)).Elem()\n}\n\ntype RetrieveArgumentDescriptionResponse struct {\n\tReturnval []EventArgDesc `xml:\"returnval,omitempty\"`\n}\n\ntype RetrieveClientCert RetrieveClientCertRequestType\n\nfunc init() {\n\tt[\"RetrieveClientCert\"] = reflect.TypeOf((*RetrieveClientCert)(nil)).Elem()\n}\n\ntype RetrieveClientCertRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tCluster KeyProviderId          `xml:\"cluster\"`\n}\n\nfunc init() {\n\tt[\"RetrieveClientCertRequestType\"] = reflect.TypeOf((*RetrieveClientCertRequestType)(nil)).Elem()\n}\n\ntype RetrieveClientCertResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype RetrieveClientCsr RetrieveClientCsrRequestType\n\nfunc init() {\n\tt[\"RetrieveClientCsr\"] = reflect.TypeOf((*RetrieveClientCsr)(nil)).Elem()\n}\n\ntype RetrieveClientCsrRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tCluster KeyProviderId          `xml:\"cluster\"`\n}\n\nfunc init() {\n\tt[\"RetrieveClientCsrRequestType\"] = reflect.TypeOf((*RetrieveClientCsrRequestType)(nil)).Elem()\n}\n\ntype RetrieveClientCsrResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype RetrieveDasAdvancedRuntimeInfo RetrieveDasAdvancedRuntimeInfoRequestType\n\nfunc init() {\n\tt[\"RetrieveDasAdvancedRuntimeInfo\"] = reflect.TypeOf((*RetrieveDasAdvancedRuntimeInfo)(nil)).Elem()\n}\n\ntype RetrieveDasAdvancedRuntimeInfoRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RetrieveDasAdvancedRuntimeInfoRequestType\"] = reflect.TypeOf((*RetrieveDasAdvancedRuntimeInfoRequestType)(nil)).Elem()\n}\n\ntype RetrieveDasAdvancedRuntimeInfoResponse struct {\n\tReturnval BaseClusterDasAdvancedRuntimeInfo `xml:\"returnval,omitempty,typeattr\"`\n}\n\ntype RetrieveDescription RetrieveDescriptionRequestType\n\nfunc init() {\n\tt[\"RetrieveDescription\"] = reflect.TypeOf((*RetrieveDescription)(nil)).Elem()\n}\n\ntype RetrieveDescriptionRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RetrieveDescriptionRequestType\"] = reflect.TypeOf((*RetrieveDescriptionRequestType)(nil)).Elem()\n}\n\ntype RetrieveDescriptionResponse struct {\n\tReturnval *ProfileDescription `xml:\"returnval,omitempty\"`\n}\n\ntype RetrieveDiskPartitionInfo RetrieveDiskPartitionInfoRequestType\n\nfunc init() {\n\tt[\"RetrieveDiskPartitionInfo\"] = reflect.TypeOf((*RetrieveDiskPartitionInfo)(nil)).Elem()\n}\n\ntype RetrieveDiskPartitionInfoRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tDevicePath []string               `xml:\"devicePath\"`\n}\n\nfunc init() {\n\tt[\"RetrieveDiskPartitionInfoRequestType\"] = reflect.TypeOf((*RetrieveDiskPartitionInfoRequestType)(nil)).Elem()\n}\n\ntype RetrieveDiskPartitionInfoResponse struct {\n\tReturnval []HostDiskPartitionInfo `xml:\"returnval,omitempty\"`\n}\n\ntype RetrieveEntityPermissions RetrieveEntityPermissionsRequestType\n\nfunc init() {\n\tt[\"RetrieveEntityPermissions\"] = reflect.TypeOf((*RetrieveEntityPermissions)(nil)).Elem()\n}\n\ntype RetrieveEntityPermissionsRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tEntity    ManagedObjectReference `xml:\"entity\"`\n\tInherited bool                   `xml:\"inherited\"`\n}\n\nfunc init() {\n\tt[\"RetrieveEntityPermissionsRequestType\"] = reflect.TypeOf((*RetrieveEntityPermissionsRequestType)(nil)).Elem()\n}\n\ntype RetrieveEntityPermissionsResponse struct {\n\tReturnval []Permission `xml:\"returnval,omitempty\"`\n}\n\ntype RetrieveEntityScheduledTask RetrieveEntityScheduledTaskRequestType\n\nfunc init() {\n\tt[\"RetrieveEntityScheduledTask\"] = reflect.TypeOf((*RetrieveEntityScheduledTask)(nil)).Elem()\n}\n\ntype RetrieveEntityScheduledTaskRequestType struct {\n\tThis   ManagedObjectReference  `xml:\"_this\"`\n\tEntity *ManagedObjectReference `xml:\"entity,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RetrieveEntityScheduledTaskRequestType\"] = reflect.TypeOf((*RetrieveEntityScheduledTaskRequestType)(nil)).Elem()\n}\n\ntype RetrieveEntityScheduledTaskResponse struct {\n\tReturnval []ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype RetrieveHardwareUptime RetrieveHardwareUptimeRequestType\n\nfunc init() {\n\tt[\"RetrieveHardwareUptime\"] = reflect.TypeOf((*RetrieveHardwareUptime)(nil)).Elem()\n}\n\ntype RetrieveHardwareUptimeRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RetrieveHardwareUptimeRequestType\"] = reflect.TypeOf((*RetrieveHardwareUptimeRequestType)(nil)).Elem()\n}\n\ntype RetrieveHardwareUptimeResponse struct {\n\tReturnval int64 `xml:\"returnval\"`\n}\n\ntype RetrieveHostAccessControlEntries RetrieveHostAccessControlEntriesRequestType\n\nfunc init() {\n\tt[\"RetrieveHostAccessControlEntries\"] = reflect.TypeOf((*RetrieveHostAccessControlEntries)(nil)).Elem()\n}\n\ntype RetrieveHostAccessControlEntriesRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RetrieveHostAccessControlEntriesRequestType\"] = reflect.TypeOf((*RetrieveHostAccessControlEntriesRequestType)(nil)).Elem()\n}\n\ntype RetrieveHostAccessControlEntriesResponse struct {\n\tReturnval []HostAccessControlEntry `xml:\"returnval,omitempty\"`\n}\n\ntype RetrieveHostCustomizations RetrieveHostCustomizationsRequestType\n\nfunc init() {\n\tt[\"RetrieveHostCustomizations\"] = reflect.TypeOf((*RetrieveHostCustomizations)(nil)).Elem()\n}\n\ntype RetrieveHostCustomizationsForProfile RetrieveHostCustomizationsForProfileRequestType\n\nfunc init() {\n\tt[\"RetrieveHostCustomizationsForProfile\"] = reflect.TypeOf((*RetrieveHostCustomizationsForProfile)(nil)).Elem()\n}\n\ntype RetrieveHostCustomizationsForProfileRequestType struct {\n\tThis         ManagedObjectReference   `xml:\"_this\"`\n\tHosts        []ManagedObjectReference `xml:\"hosts,omitempty\"`\n\tApplyProfile HostApplyProfile         `xml:\"applyProfile\"`\n}\n\nfunc init() {\n\tt[\"RetrieveHostCustomizationsForProfileRequestType\"] = reflect.TypeOf((*RetrieveHostCustomizationsForProfileRequestType)(nil)).Elem()\n}\n\ntype RetrieveHostCustomizationsForProfileResponse struct {\n\tReturnval []StructuredCustomizations `xml:\"returnval,omitempty\"`\n}\n\ntype RetrieveHostCustomizationsRequestType struct {\n\tThis  ManagedObjectReference   `xml:\"_this\"`\n\tHosts []ManagedObjectReference `xml:\"hosts,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RetrieveHostCustomizationsRequestType\"] = reflect.TypeOf((*RetrieveHostCustomizationsRequestType)(nil)).Elem()\n}\n\ntype RetrieveHostCustomizationsResponse struct {\n\tReturnval []StructuredCustomizations `xml:\"returnval,omitempty\"`\n}\n\ntype RetrieveHostSpecification RetrieveHostSpecificationRequestType\n\nfunc init() {\n\tt[\"RetrieveHostSpecification\"] = reflect.TypeOf((*RetrieveHostSpecification)(nil)).Elem()\n}\n\ntype RetrieveHostSpecificationRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tHost     ManagedObjectReference `xml:\"host\"`\n\tFromHost bool                   `xml:\"fromHost\"`\n}\n\nfunc init() {\n\tt[\"RetrieveHostSpecificationRequestType\"] = reflect.TypeOf((*RetrieveHostSpecificationRequestType)(nil)).Elem()\n}\n\ntype RetrieveHostSpecificationResponse struct {\n\tReturnval HostSpecification `xml:\"returnval\"`\n}\n\ntype RetrieveKmipServerCert RetrieveKmipServerCertRequestType\n\nfunc init() {\n\tt[\"RetrieveKmipServerCert\"] = reflect.TypeOf((*RetrieveKmipServerCert)(nil)).Elem()\n}\n\ntype RetrieveKmipServerCertRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tKeyProvider KeyProviderId          `xml:\"keyProvider\"`\n\tServer      KmipServerInfo         `xml:\"server\"`\n}\n\nfunc init() {\n\tt[\"RetrieveKmipServerCertRequestType\"] = reflect.TypeOf((*RetrieveKmipServerCertRequestType)(nil)).Elem()\n}\n\ntype RetrieveKmipServerCertResponse struct {\n\tReturnval CryptoManagerKmipServerCertInfo `xml:\"returnval\"`\n}\n\ntype RetrieveKmipServersStatusRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tClusters []KmipClusterInfo      `xml:\"clusters,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RetrieveKmipServersStatusRequestType\"] = reflect.TypeOf((*RetrieveKmipServersStatusRequestType)(nil)).Elem()\n}\n\ntype RetrieveKmipServersStatus_Task RetrieveKmipServersStatusRequestType\n\nfunc init() {\n\tt[\"RetrieveKmipServersStatus_Task\"] = reflect.TypeOf((*RetrieveKmipServersStatus_Task)(nil)).Elem()\n}\n\ntype RetrieveKmipServersStatus_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype RetrieveObjectScheduledTask RetrieveObjectScheduledTaskRequestType\n\nfunc init() {\n\tt[\"RetrieveObjectScheduledTask\"] = reflect.TypeOf((*RetrieveObjectScheduledTask)(nil)).Elem()\n}\n\ntype RetrieveObjectScheduledTaskRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tObj  *ManagedObjectReference `xml:\"obj,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RetrieveObjectScheduledTaskRequestType\"] = reflect.TypeOf((*RetrieveObjectScheduledTaskRequestType)(nil)).Elem()\n}\n\ntype RetrieveObjectScheduledTaskResponse struct {\n\tReturnval []ManagedObjectReference `xml:\"returnval,omitempty\"`\n}\n\ntype RetrieveOptions struct {\n\tDynamicData\n\n\tMaxObjects int32 `xml:\"maxObjects,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RetrieveOptions\"] = reflect.TypeOf((*RetrieveOptions)(nil)).Elem()\n}\n\ntype RetrieveProductComponents RetrieveProductComponentsRequestType\n\nfunc init() {\n\tt[\"RetrieveProductComponents\"] = reflect.TypeOf((*RetrieveProductComponents)(nil)).Elem()\n}\n\ntype RetrieveProductComponentsRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RetrieveProductComponentsRequestType\"] = reflect.TypeOf((*RetrieveProductComponentsRequestType)(nil)).Elem()\n}\n\ntype RetrieveProductComponentsResponse struct {\n\tReturnval []ProductComponentInfo `xml:\"returnval,omitempty\"`\n}\n\ntype RetrieveProperties RetrievePropertiesRequestType\n\nfunc init() {\n\tt[\"RetrieveProperties\"] = reflect.TypeOf((*RetrieveProperties)(nil)).Elem()\n}\n\ntype RetrievePropertiesEx RetrievePropertiesExRequestType\n\nfunc init() {\n\tt[\"RetrievePropertiesEx\"] = reflect.TypeOf((*RetrievePropertiesEx)(nil)).Elem()\n}\n\ntype RetrievePropertiesExRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tSpecSet []PropertyFilterSpec   `xml:\"specSet\"`\n\tOptions RetrieveOptions        `xml:\"options\"`\n}\n\nfunc init() {\n\tt[\"RetrievePropertiesExRequestType\"] = reflect.TypeOf((*RetrievePropertiesExRequestType)(nil)).Elem()\n}\n\ntype RetrievePropertiesExResponse struct {\n\tReturnval *RetrieveResult `xml:\"returnval,omitempty\"`\n}\n\ntype RetrievePropertiesRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tSpecSet []PropertyFilterSpec   `xml:\"specSet\"`\n}\n\nfunc init() {\n\tt[\"RetrievePropertiesRequestType\"] = reflect.TypeOf((*RetrievePropertiesRequestType)(nil)).Elem()\n}\n\ntype RetrievePropertiesResponse struct {\n\tReturnval []ObjectContent `xml:\"returnval,omitempty\"`\n}\n\ntype RetrieveResult struct {\n\tDynamicData\n\n\tToken   string          `xml:\"token,omitempty\"`\n\tObjects []ObjectContent `xml:\"objects\"`\n}\n\nfunc init() {\n\tt[\"RetrieveResult\"] = reflect.TypeOf((*RetrieveResult)(nil)).Elem()\n}\n\ntype RetrieveRolePermissions RetrieveRolePermissionsRequestType\n\nfunc init() {\n\tt[\"RetrieveRolePermissions\"] = reflect.TypeOf((*RetrieveRolePermissions)(nil)).Elem()\n}\n\ntype RetrieveRolePermissionsRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tRoleId int32                  `xml:\"roleId\"`\n}\n\nfunc init() {\n\tt[\"RetrieveRolePermissionsRequestType\"] = reflect.TypeOf((*RetrieveRolePermissionsRequestType)(nil)).Elem()\n}\n\ntype RetrieveRolePermissionsResponse struct {\n\tReturnval []Permission `xml:\"returnval,omitempty\"`\n}\n\ntype RetrieveSelfSignedClientCert RetrieveSelfSignedClientCertRequestType\n\nfunc init() {\n\tt[\"RetrieveSelfSignedClientCert\"] = reflect.TypeOf((*RetrieveSelfSignedClientCert)(nil)).Elem()\n}\n\ntype RetrieveSelfSignedClientCertRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tCluster KeyProviderId          `xml:\"cluster\"`\n}\n\nfunc init() {\n\tt[\"RetrieveSelfSignedClientCertRequestType\"] = reflect.TypeOf((*RetrieveSelfSignedClientCertRequestType)(nil)).Elem()\n}\n\ntype RetrieveSelfSignedClientCertResponse struct {\n\tReturnval string `xml:\"returnval\"`\n}\n\ntype RetrieveServiceContent RetrieveServiceContentRequestType\n\nfunc init() {\n\tt[\"RetrieveServiceContent\"] = reflect.TypeOf((*RetrieveServiceContent)(nil)).Elem()\n}\n\ntype RetrieveServiceContentRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RetrieveServiceContentRequestType\"] = reflect.TypeOf((*RetrieveServiceContentRequestType)(nil)).Elem()\n}\n\ntype RetrieveServiceContentResponse struct {\n\tReturnval ServiceContent `xml:\"returnval\"`\n}\n\ntype RetrieveUserGroups RetrieveUserGroupsRequestType\n\nfunc init() {\n\tt[\"RetrieveUserGroups\"] = reflect.TypeOf((*RetrieveUserGroups)(nil)).Elem()\n}\n\ntype RetrieveUserGroupsRequestType struct {\n\tThis           ManagedObjectReference `xml:\"_this\"`\n\tDomain         string                 `xml:\"domain,omitempty\"`\n\tSearchStr      string                 `xml:\"searchStr\"`\n\tBelongsToGroup string                 `xml:\"belongsToGroup,omitempty\"`\n\tBelongsToUser  string                 `xml:\"belongsToUser,omitempty\"`\n\tExactMatch     bool                   `xml:\"exactMatch\"`\n\tFindUsers      bool                   `xml:\"findUsers\"`\n\tFindGroups     bool                   `xml:\"findGroups\"`\n}\n\nfunc init() {\n\tt[\"RetrieveUserGroupsRequestType\"] = reflect.TypeOf((*RetrieveUserGroupsRequestType)(nil)).Elem()\n}\n\ntype RetrieveUserGroupsResponse struct {\n\tReturnval []BaseUserSearchResult `xml:\"returnval,omitempty,typeattr\"`\n}\n\ntype RetrieveVStorageObject RetrieveVStorageObjectRequestType\n\nfunc init() {\n\tt[\"RetrieveVStorageObject\"] = reflect.TypeOf((*RetrieveVStorageObject)(nil)).Elem()\n}\n\ntype RetrieveVStorageObjectRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tId        ID                     `xml:\"id\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"RetrieveVStorageObjectRequestType\"] = reflect.TypeOf((*RetrieveVStorageObjectRequestType)(nil)).Elem()\n}\n\ntype RetrieveVStorageObjectResponse struct {\n\tReturnval VStorageObject `xml:\"returnval\"`\n}\n\ntype RetrieveVStorageObjectState RetrieveVStorageObjectStateRequestType\n\nfunc init() {\n\tt[\"RetrieveVStorageObjectState\"] = reflect.TypeOf((*RetrieveVStorageObjectState)(nil)).Elem()\n}\n\ntype RetrieveVStorageObjectStateRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tId        ID                     `xml:\"id\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"RetrieveVStorageObjectStateRequestType\"] = reflect.TypeOf((*RetrieveVStorageObjectStateRequestType)(nil)).Elem()\n}\n\ntype RetrieveVStorageObjectStateResponse struct {\n\tReturnval VStorageObjectStateInfo `xml:\"returnval\"`\n}\n\ntype RevertToCurrentSnapshotRequestType struct {\n\tThis            ManagedObjectReference  `xml:\"_this\"`\n\tHost            *ManagedObjectReference `xml:\"host,omitempty\"`\n\tSuppressPowerOn *bool                   `xml:\"suppressPowerOn\"`\n}\n\nfunc init() {\n\tt[\"RevertToCurrentSnapshotRequestType\"] = reflect.TypeOf((*RevertToCurrentSnapshotRequestType)(nil)).Elem()\n}\n\ntype RevertToCurrentSnapshot_Task RevertToCurrentSnapshotRequestType\n\nfunc init() {\n\tt[\"RevertToCurrentSnapshot_Task\"] = reflect.TypeOf((*RevertToCurrentSnapshot_Task)(nil)).Elem()\n}\n\ntype RevertToCurrentSnapshot_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype RevertToSnapshotRequestType struct {\n\tThis            ManagedObjectReference  `xml:\"_this\"`\n\tHost            *ManagedObjectReference `xml:\"host,omitempty\"`\n\tSuppressPowerOn *bool                   `xml:\"suppressPowerOn\"`\n}\n\nfunc init() {\n\tt[\"RevertToSnapshotRequestType\"] = reflect.TypeOf((*RevertToSnapshotRequestType)(nil)).Elem()\n}\n\ntype RevertToSnapshot_Task RevertToSnapshotRequestType\n\nfunc init() {\n\tt[\"RevertToSnapshot_Task\"] = reflect.TypeOf((*RevertToSnapshot_Task)(nil)).Elem()\n}\n\ntype RevertToSnapshot_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype RewindCollector RewindCollectorRequestType\n\nfunc init() {\n\tt[\"RewindCollector\"] = reflect.TypeOf((*RewindCollector)(nil)).Elem()\n}\n\ntype RewindCollectorRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RewindCollectorRequestType\"] = reflect.TypeOf((*RewindCollectorRequestType)(nil)).Elem()\n}\n\ntype RewindCollectorResponse struct {\n}\n\ntype RoleAddedEvent struct {\n\tRoleEvent\n\n\tPrivilegeList []string `xml:\"privilegeList,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RoleAddedEvent\"] = reflect.TypeOf((*RoleAddedEvent)(nil)).Elem()\n}\n\ntype RoleEvent struct {\n\tAuthorizationEvent\n\n\tRole RoleEventArgument `xml:\"role\"`\n}\n\nfunc init() {\n\tt[\"RoleEvent\"] = reflect.TypeOf((*RoleEvent)(nil)).Elem()\n}\n\ntype RoleEventArgument struct {\n\tEventArgument\n\n\tRoleId int32  `xml:\"roleId\"`\n\tName   string `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"RoleEventArgument\"] = reflect.TypeOf((*RoleEventArgument)(nil)).Elem()\n}\n\ntype RoleRemovedEvent struct {\n\tRoleEvent\n}\n\nfunc init() {\n\tt[\"RoleRemovedEvent\"] = reflect.TypeOf((*RoleRemovedEvent)(nil)).Elem()\n}\n\ntype RoleUpdatedEvent struct {\n\tRoleEvent\n\n\tPrivilegeList     []string `xml:\"privilegeList,omitempty\"`\n\tPrevRoleName      string   `xml:\"prevRoleName,omitempty\"`\n\tPrivilegesAdded   []string `xml:\"privilegesAdded,omitempty\"`\n\tPrivilegesRemoved []string `xml:\"privilegesRemoved,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RoleUpdatedEvent\"] = reflect.TypeOf((*RoleUpdatedEvent)(nil)).Elem()\n}\n\ntype RollbackEvent struct {\n\tDvsEvent\n\n\tHostName   string `xml:\"hostName\"`\n\tMethodName string `xml:\"methodName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RollbackEvent\"] = reflect.TypeOf((*RollbackEvent)(nil)).Elem()\n}\n\ntype RollbackFailure struct {\n\tDvsFault\n\n\tEntityName string `xml:\"entityName\"`\n\tEntityType string `xml:\"entityType\"`\n}\n\nfunc init() {\n\tt[\"RollbackFailure\"] = reflect.TypeOf((*RollbackFailure)(nil)).Elem()\n}\n\ntype RollbackFailureFault RollbackFailure\n\nfunc init() {\n\tt[\"RollbackFailureFault\"] = reflect.TypeOf((*RollbackFailureFault)(nil)).Elem()\n}\n\ntype RuleViolation struct {\n\tVmConfigFault\n\n\tHost *ManagedObjectReference `xml:\"host,omitempty\"`\n\tRule BaseClusterRuleInfo     `xml:\"rule,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"RuleViolation\"] = reflect.TypeOf((*RuleViolation)(nil)).Elem()\n}\n\ntype RuleViolationFault RuleViolation\n\nfunc init() {\n\tt[\"RuleViolationFault\"] = reflect.TypeOf((*RuleViolationFault)(nil)).Elem()\n}\n\ntype RunScheduledTask RunScheduledTaskRequestType\n\nfunc init() {\n\tt[\"RunScheduledTask\"] = reflect.TypeOf((*RunScheduledTask)(nil)).Elem()\n}\n\ntype RunScheduledTaskRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"RunScheduledTaskRequestType\"] = reflect.TypeOf((*RunScheduledTaskRequestType)(nil)).Elem()\n}\n\ntype RunScheduledTaskResponse struct {\n}\n\ntype RunScriptAction struct {\n\tAction\n\n\tScript string `xml:\"script\"`\n}\n\nfunc init() {\n\tt[\"RunScriptAction\"] = reflect.TypeOf((*RunScriptAction)(nil)).Elem()\n}\n\ntype RunVsanPhysicalDiskDiagnostics RunVsanPhysicalDiskDiagnosticsRequestType\n\nfunc init() {\n\tt[\"RunVsanPhysicalDiskDiagnostics\"] = reflect.TypeOf((*RunVsanPhysicalDiskDiagnostics)(nil)).Elem()\n}\n\ntype RunVsanPhysicalDiskDiagnosticsRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tDisks []string               `xml:\"disks,omitempty\"`\n}\n\nfunc init() {\n\tt[\"RunVsanPhysicalDiskDiagnosticsRequestType\"] = reflect.TypeOf((*RunVsanPhysicalDiskDiagnosticsRequestType)(nil)).Elem()\n}\n\ntype RunVsanPhysicalDiskDiagnosticsResponse struct {\n\tReturnval []HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult `xml:\"returnval\"`\n}\n\ntype RuntimeFault struct {\n\tMethodFault\n}\n\nfunc init() {\n\tt[\"RuntimeFault\"] = reflect.TypeOf((*RuntimeFault)(nil)).Elem()\n}\n\ntype RuntimeFaultFault BaseRuntimeFault\n\nfunc init() {\n\tt[\"RuntimeFaultFault\"] = reflect.TypeOf((*RuntimeFaultFault)(nil)).Elem()\n}\n\ntype SAMLTokenAuthentication struct {\n\tGuestAuthentication\n\n\tToken    string `xml:\"token\"`\n\tUsername string `xml:\"username,omitempty\"`\n}\n\nfunc init() {\n\tt[\"SAMLTokenAuthentication\"] = reflect.TypeOf((*SAMLTokenAuthentication)(nil)).Elem()\n}\n\ntype SSLDisabledFault struct {\n\tHostConnectFault\n}\n\nfunc init() {\n\tt[\"SSLDisabledFault\"] = reflect.TypeOf((*SSLDisabledFault)(nil)).Elem()\n}\n\ntype SSLDisabledFaultFault SSLDisabledFault\n\nfunc init() {\n\tt[\"SSLDisabledFaultFault\"] = reflect.TypeOf((*SSLDisabledFaultFault)(nil)).Elem()\n}\n\ntype SSLVerifyFault struct {\n\tHostConnectFault\n\n\tSelfSigned bool   `xml:\"selfSigned\"`\n\tThumbprint string `xml:\"thumbprint\"`\n}\n\nfunc init() {\n\tt[\"SSLVerifyFault\"] = reflect.TypeOf((*SSLVerifyFault)(nil)).Elem()\n}\n\ntype SSLVerifyFaultFault SSLVerifyFault\n\nfunc init() {\n\tt[\"SSLVerifyFaultFault\"] = reflect.TypeOf((*SSLVerifyFaultFault)(nil)).Elem()\n}\n\ntype SSPIAuthentication struct {\n\tGuestAuthentication\n\n\tSspiToken string `xml:\"sspiToken\"`\n}\n\nfunc init() {\n\tt[\"SSPIAuthentication\"] = reflect.TypeOf((*SSPIAuthentication)(nil)).Elem()\n}\n\ntype SSPIChallenge struct {\n\tVimFault\n\n\tBase64Token string `xml:\"base64Token\"`\n}\n\nfunc init() {\n\tt[\"SSPIChallenge\"] = reflect.TypeOf((*SSPIChallenge)(nil)).Elem()\n}\n\ntype SSPIChallengeFault SSPIChallenge\n\nfunc init() {\n\tt[\"SSPIChallengeFault\"] = reflect.TypeOf((*SSPIChallengeFault)(nil)).Elem()\n}\n\ntype ScanHostPatchRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tRepository HostPatchManagerLocator `xml:\"repository\"`\n\tUpdateID   []string                `xml:\"updateID,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ScanHostPatchRequestType\"] = reflect.TypeOf((*ScanHostPatchRequestType)(nil)).Elem()\n}\n\ntype ScanHostPatchV2RequestType struct {\n\tThis       ManagedObjectReference                     `xml:\"_this\"`\n\tMetaUrls   []string                                   `xml:\"metaUrls,omitempty\"`\n\tBundleUrls []string                                   `xml:\"bundleUrls,omitempty\"`\n\tSpec       *HostPatchManagerPatchManagerOperationSpec `xml:\"spec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ScanHostPatchV2RequestType\"] = reflect.TypeOf((*ScanHostPatchV2RequestType)(nil)).Elem()\n}\n\ntype ScanHostPatchV2_Task ScanHostPatchV2RequestType\n\nfunc init() {\n\tt[\"ScanHostPatchV2_Task\"] = reflect.TypeOf((*ScanHostPatchV2_Task)(nil)).Elem()\n}\n\ntype ScanHostPatchV2_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ScanHostPatch_Task ScanHostPatchRequestType\n\nfunc init() {\n\tt[\"ScanHostPatch_Task\"] = reflect.TypeOf((*ScanHostPatch_Task)(nil)).Elem()\n}\n\ntype ScanHostPatch_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ScheduleReconcileDatastoreInventory ScheduleReconcileDatastoreInventoryRequestType\n\nfunc init() {\n\tt[\"ScheduleReconcileDatastoreInventory\"] = reflect.TypeOf((*ScheduleReconcileDatastoreInventory)(nil)).Elem()\n}\n\ntype ScheduleReconcileDatastoreInventoryRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"ScheduleReconcileDatastoreInventoryRequestType\"] = reflect.TypeOf((*ScheduleReconcileDatastoreInventoryRequestType)(nil)).Elem()\n}\n\ntype ScheduleReconcileDatastoreInventoryResponse struct {\n}\n\ntype ScheduledHardwareUpgradeInfo struct {\n\tDynamicData\n\n\tUpgradePolicy                  string                `xml:\"upgradePolicy,omitempty\"`\n\tVersionKey                     string                `xml:\"versionKey,omitempty\"`\n\tScheduledHardwareUpgradeStatus string                `xml:\"scheduledHardwareUpgradeStatus,omitempty\"`\n\tFault                          *LocalizedMethodFault `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ScheduledHardwareUpgradeInfo\"] = reflect.TypeOf((*ScheduledHardwareUpgradeInfo)(nil)).Elem()\n}\n\ntype ScheduledTaskCompletedEvent struct {\n\tScheduledTaskEvent\n}\n\nfunc init() {\n\tt[\"ScheduledTaskCompletedEvent\"] = reflect.TypeOf((*ScheduledTaskCompletedEvent)(nil)).Elem()\n}\n\ntype ScheduledTaskCreatedEvent struct {\n\tScheduledTaskEvent\n}\n\nfunc init() {\n\tt[\"ScheduledTaskCreatedEvent\"] = reflect.TypeOf((*ScheduledTaskCreatedEvent)(nil)).Elem()\n}\n\ntype ScheduledTaskDescription struct {\n\tDynamicData\n\n\tAction        []BaseTypeDescription    `xml:\"action,typeattr\"`\n\tSchedulerInfo []ScheduledTaskDetail    `xml:\"schedulerInfo\"`\n\tState         []BaseElementDescription `xml:\"state,typeattr\"`\n\tDayOfWeek     []BaseElementDescription `xml:\"dayOfWeek,typeattr\"`\n\tWeekOfMonth   []BaseElementDescription `xml:\"weekOfMonth,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ScheduledTaskDescription\"] = reflect.TypeOf((*ScheduledTaskDescription)(nil)).Elem()\n}\n\ntype ScheduledTaskDetail struct {\n\tTypeDescription\n\n\tFrequency string `xml:\"frequency\"`\n}\n\nfunc init() {\n\tt[\"ScheduledTaskDetail\"] = reflect.TypeOf((*ScheduledTaskDetail)(nil)).Elem()\n}\n\ntype ScheduledTaskEmailCompletedEvent struct {\n\tScheduledTaskEvent\n\n\tTo string `xml:\"to\"`\n}\n\nfunc init() {\n\tt[\"ScheduledTaskEmailCompletedEvent\"] = reflect.TypeOf((*ScheduledTaskEmailCompletedEvent)(nil)).Elem()\n}\n\ntype ScheduledTaskEmailFailedEvent struct {\n\tScheduledTaskEvent\n\n\tTo     string               `xml:\"to\"`\n\tReason LocalizedMethodFault `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"ScheduledTaskEmailFailedEvent\"] = reflect.TypeOf((*ScheduledTaskEmailFailedEvent)(nil)).Elem()\n}\n\ntype ScheduledTaskEvent struct {\n\tEvent\n\n\tScheduledTask ScheduledTaskEventArgument `xml:\"scheduledTask\"`\n\tEntity        ManagedEntityEventArgument `xml:\"entity\"`\n}\n\nfunc init() {\n\tt[\"ScheduledTaskEvent\"] = reflect.TypeOf((*ScheduledTaskEvent)(nil)).Elem()\n}\n\ntype ScheduledTaskEventArgument struct {\n\tEntityEventArgument\n\n\tScheduledTask ManagedObjectReference `xml:\"scheduledTask\"`\n}\n\nfunc init() {\n\tt[\"ScheduledTaskEventArgument\"] = reflect.TypeOf((*ScheduledTaskEventArgument)(nil)).Elem()\n}\n\ntype ScheduledTaskFailedEvent struct {\n\tScheduledTaskEvent\n\n\tReason LocalizedMethodFault `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"ScheduledTaskFailedEvent\"] = reflect.TypeOf((*ScheduledTaskFailedEvent)(nil)).Elem()\n}\n\ntype ScheduledTaskInfo struct {\n\tScheduledTaskSpec\n\n\tScheduledTask    ManagedObjectReference  `xml:\"scheduledTask\"`\n\tEntity           ManagedObjectReference  `xml:\"entity\"`\n\tLastModifiedTime time.Time               `xml:\"lastModifiedTime\"`\n\tLastModifiedUser string                  `xml:\"lastModifiedUser\"`\n\tNextRunTime      *time.Time              `xml:\"nextRunTime\"`\n\tPrevRunTime      *time.Time              `xml:\"prevRunTime\"`\n\tState            TaskInfoState           `xml:\"state\"`\n\tError            *LocalizedMethodFault   `xml:\"error,omitempty\"`\n\tResult           AnyType                 `xml:\"result,omitempty,typeattr\"`\n\tProgress         int32                   `xml:\"progress,omitempty\"`\n\tActiveTask       *ManagedObjectReference `xml:\"activeTask,omitempty\"`\n\tTaskObject       *ManagedObjectReference `xml:\"taskObject,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ScheduledTaskInfo\"] = reflect.TypeOf((*ScheduledTaskInfo)(nil)).Elem()\n}\n\ntype ScheduledTaskReconfiguredEvent struct {\n\tScheduledTaskEvent\n\n\tConfigChanges *ChangesInfoEventArgument `xml:\"configChanges,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ScheduledTaskReconfiguredEvent\"] = reflect.TypeOf((*ScheduledTaskReconfiguredEvent)(nil)).Elem()\n}\n\ntype ScheduledTaskRemovedEvent struct {\n\tScheduledTaskEvent\n}\n\nfunc init() {\n\tt[\"ScheduledTaskRemovedEvent\"] = reflect.TypeOf((*ScheduledTaskRemovedEvent)(nil)).Elem()\n}\n\ntype ScheduledTaskSpec struct {\n\tDynamicData\n\n\tName         string            `xml:\"name\"`\n\tDescription  string            `xml:\"description\"`\n\tEnabled      bool              `xml:\"enabled\"`\n\tScheduler    BaseTaskScheduler `xml:\"scheduler,typeattr\"`\n\tAction       BaseAction        `xml:\"action,typeattr\"`\n\tNotification string            `xml:\"notification,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ScheduledTaskSpec\"] = reflect.TypeOf((*ScheduledTaskSpec)(nil)).Elem()\n}\n\ntype ScheduledTaskStartedEvent struct {\n\tScheduledTaskEvent\n}\n\nfunc init() {\n\tt[\"ScheduledTaskStartedEvent\"] = reflect.TypeOf((*ScheduledTaskStartedEvent)(nil)).Elem()\n}\n\ntype ScsiLun struct {\n\tHostDevice\n\n\tKey              string               `xml:\"key,omitempty\"`\n\tUuid             string               `xml:\"uuid\"`\n\tDescriptor       []ScsiLunDescriptor  `xml:\"descriptor,omitempty\"`\n\tCanonicalName    string               `xml:\"canonicalName,omitempty\"`\n\tDisplayName      string               `xml:\"displayName,omitempty\"`\n\tLunType          string               `xml:\"lunType\"`\n\tVendor           string               `xml:\"vendor,omitempty\"`\n\tModel            string               `xml:\"model,omitempty\"`\n\tRevision         string               `xml:\"revision,omitempty\"`\n\tScsiLevel        int32                `xml:\"scsiLevel,omitempty\"`\n\tSerialNumber     string               `xml:\"serialNumber,omitempty\"`\n\tDurableName      *ScsiLunDurableName  `xml:\"durableName,omitempty\"`\n\tAlternateName    []ScsiLunDurableName `xml:\"alternateName,omitempty\"`\n\tStandardInquiry  []byte               `xml:\"standardInquiry,omitempty\"`\n\tQueueDepth       int32                `xml:\"queueDepth,omitempty\"`\n\tOperationalState []string             `xml:\"operationalState\"`\n\tCapabilities     *ScsiLunCapabilities `xml:\"capabilities,omitempty\"`\n\tVStorageSupport  string               `xml:\"vStorageSupport,omitempty\"`\n\tProtocolEndpoint *bool                `xml:\"protocolEndpoint\"`\n}\n\nfunc init() {\n\tt[\"ScsiLun\"] = reflect.TypeOf((*ScsiLun)(nil)).Elem()\n}\n\ntype ScsiLunCapabilities struct {\n\tDynamicData\n\n\tUpdateDisplayNameSupported bool `xml:\"updateDisplayNameSupported\"`\n}\n\nfunc init() {\n\tt[\"ScsiLunCapabilities\"] = reflect.TypeOf((*ScsiLunCapabilities)(nil)).Elem()\n}\n\ntype ScsiLunDescriptor struct {\n\tDynamicData\n\n\tQuality string `xml:\"quality\"`\n\tId      string `xml:\"id\"`\n}\n\nfunc init() {\n\tt[\"ScsiLunDescriptor\"] = reflect.TypeOf((*ScsiLunDescriptor)(nil)).Elem()\n}\n\ntype ScsiLunDurableName struct {\n\tDynamicData\n\n\tNamespace   string `xml:\"namespace\"`\n\tNamespaceId byte   `xml:\"namespaceId\"`\n\tData        []byte `xml:\"data,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ScsiLunDurableName\"] = reflect.TypeOf((*ScsiLunDurableName)(nil)).Elem()\n}\n\ntype SeSparseVirtualDiskSpec struct {\n\tFileBackedVirtualDiskSpec\n\n\tGrainSizeKb int32 `xml:\"grainSizeKb,omitempty\"`\n}\n\nfunc init() {\n\tt[\"SeSparseVirtualDiskSpec\"] = reflect.TypeOf((*SeSparseVirtualDiskSpec)(nil)).Elem()\n}\n\ntype SearchDatastoreRequestType struct {\n\tThis          ManagedObjectReference          `xml:\"_this\"`\n\tDatastorePath string                          `xml:\"datastorePath\"`\n\tSearchSpec    *HostDatastoreBrowserSearchSpec `xml:\"searchSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"SearchDatastoreRequestType\"] = reflect.TypeOf((*SearchDatastoreRequestType)(nil)).Elem()\n}\n\ntype SearchDatastoreSubFoldersRequestType struct {\n\tThis          ManagedObjectReference          `xml:\"_this\"`\n\tDatastorePath string                          `xml:\"datastorePath\"`\n\tSearchSpec    *HostDatastoreBrowserSearchSpec `xml:\"searchSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"SearchDatastoreSubFoldersRequestType\"] = reflect.TypeOf((*SearchDatastoreSubFoldersRequestType)(nil)).Elem()\n}\n\ntype SearchDatastoreSubFolders_Task SearchDatastoreSubFoldersRequestType\n\nfunc init() {\n\tt[\"SearchDatastoreSubFolders_Task\"] = reflect.TypeOf((*SearchDatastoreSubFolders_Task)(nil)).Elem()\n}\n\ntype SearchDatastoreSubFolders_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype SearchDatastore_Task SearchDatastoreRequestType\n\nfunc init() {\n\tt[\"SearchDatastore_Task\"] = reflect.TypeOf((*SearchDatastore_Task)(nil)).Elem()\n}\n\ntype SearchDatastore_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype SecondaryVmAlreadyDisabled struct {\n\tVmFaultToleranceIssue\n\n\tInstanceUuid string `xml:\"instanceUuid\"`\n}\n\nfunc init() {\n\tt[\"SecondaryVmAlreadyDisabled\"] = reflect.TypeOf((*SecondaryVmAlreadyDisabled)(nil)).Elem()\n}\n\ntype SecondaryVmAlreadyDisabledFault SecondaryVmAlreadyDisabled\n\nfunc init() {\n\tt[\"SecondaryVmAlreadyDisabledFault\"] = reflect.TypeOf((*SecondaryVmAlreadyDisabledFault)(nil)).Elem()\n}\n\ntype SecondaryVmAlreadyEnabled struct {\n\tVmFaultToleranceIssue\n\n\tInstanceUuid string `xml:\"instanceUuid\"`\n}\n\nfunc init() {\n\tt[\"SecondaryVmAlreadyEnabled\"] = reflect.TypeOf((*SecondaryVmAlreadyEnabled)(nil)).Elem()\n}\n\ntype SecondaryVmAlreadyEnabledFault SecondaryVmAlreadyEnabled\n\nfunc init() {\n\tt[\"SecondaryVmAlreadyEnabledFault\"] = reflect.TypeOf((*SecondaryVmAlreadyEnabledFault)(nil)).Elem()\n}\n\ntype SecondaryVmAlreadyRegistered struct {\n\tVmFaultToleranceIssue\n\n\tInstanceUuid string `xml:\"instanceUuid,omitempty\"`\n}\n\nfunc init() {\n\tt[\"SecondaryVmAlreadyRegistered\"] = reflect.TypeOf((*SecondaryVmAlreadyRegistered)(nil)).Elem()\n}\n\ntype SecondaryVmAlreadyRegisteredFault SecondaryVmAlreadyRegistered\n\nfunc init() {\n\tt[\"SecondaryVmAlreadyRegisteredFault\"] = reflect.TypeOf((*SecondaryVmAlreadyRegisteredFault)(nil)).Elem()\n}\n\ntype SecondaryVmNotRegistered struct {\n\tVmFaultToleranceIssue\n\n\tInstanceUuid string `xml:\"instanceUuid,omitempty\"`\n}\n\nfunc init() {\n\tt[\"SecondaryVmNotRegistered\"] = reflect.TypeOf((*SecondaryVmNotRegistered)(nil)).Elem()\n}\n\ntype SecondaryVmNotRegisteredFault SecondaryVmNotRegistered\n\nfunc init() {\n\tt[\"SecondaryVmNotRegisteredFault\"] = reflect.TypeOf((*SecondaryVmNotRegisteredFault)(nil)).Elem()\n}\n\ntype SecurityError struct {\n\tRuntimeFault\n}\n\nfunc init() {\n\tt[\"SecurityError\"] = reflect.TypeOf((*SecurityError)(nil)).Elem()\n}\n\ntype SecurityErrorFault BaseSecurityError\n\nfunc init() {\n\tt[\"SecurityErrorFault\"] = reflect.TypeOf((*SecurityErrorFault)(nil)).Elem()\n}\n\ntype SecurityProfile struct {\n\tApplyProfile\n\n\tPermission []PermissionProfile `xml:\"permission,omitempty\"`\n}\n\nfunc init() {\n\tt[\"SecurityProfile\"] = reflect.TypeOf((*SecurityProfile)(nil)).Elem()\n}\n\ntype SelectActivePartition SelectActivePartitionRequestType\n\nfunc init() {\n\tt[\"SelectActivePartition\"] = reflect.TypeOf((*SelectActivePartition)(nil)).Elem()\n}\n\ntype SelectActivePartitionRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tPartition *HostScsiDiskPartition `xml:\"partition,omitempty\"`\n}\n\nfunc init() {\n\tt[\"SelectActivePartitionRequestType\"] = reflect.TypeOf((*SelectActivePartitionRequestType)(nil)).Elem()\n}\n\ntype SelectActivePartitionResponse struct {\n}\n\ntype SelectVnic SelectVnicRequestType\n\nfunc init() {\n\tt[\"SelectVnic\"] = reflect.TypeOf((*SelectVnic)(nil)).Elem()\n}\n\ntype SelectVnicForNicType SelectVnicForNicTypeRequestType\n\nfunc init() {\n\tt[\"SelectVnicForNicType\"] = reflect.TypeOf((*SelectVnicForNicType)(nil)).Elem()\n}\n\ntype SelectVnicForNicTypeRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tNicType string                 `xml:\"nicType\"`\n\tDevice  string                 `xml:\"device\"`\n}\n\nfunc init() {\n\tt[\"SelectVnicForNicTypeRequestType\"] = reflect.TypeOf((*SelectVnicForNicTypeRequestType)(nil)).Elem()\n}\n\ntype SelectVnicForNicTypeResponse struct {\n}\n\ntype SelectVnicRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tDevice string                 `xml:\"device\"`\n}\n\nfunc init() {\n\tt[\"SelectVnicRequestType\"] = reflect.TypeOf((*SelectVnicRequestType)(nil)).Elem()\n}\n\ntype SelectVnicResponse struct {\n}\n\ntype SelectionSet struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"SelectionSet\"] = reflect.TypeOf((*SelectionSet)(nil)).Elem()\n}\n\ntype SelectionSpec struct {\n\tDynamicData\n\n\tName string `xml:\"name,omitempty\"`\n}\n\nfunc init() {\n\tt[\"SelectionSpec\"] = reflect.TypeOf((*SelectionSpec)(nil)).Elem()\n}\n\ntype SendEmailAction struct {\n\tAction\n\n\tToList  string `xml:\"toList\"`\n\tCcList  string `xml:\"ccList\"`\n\tSubject string `xml:\"subject\"`\n\tBody    string `xml:\"body\"`\n}\n\nfunc init() {\n\tt[\"SendEmailAction\"] = reflect.TypeOf((*SendEmailAction)(nil)).Elem()\n}\n\ntype SendNMI SendNMIRequestType\n\nfunc init() {\n\tt[\"SendNMI\"] = reflect.TypeOf((*SendNMI)(nil)).Elem()\n}\n\ntype SendNMIRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"SendNMIRequestType\"] = reflect.TypeOf((*SendNMIRequestType)(nil)).Elem()\n}\n\ntype SendNMIResponse struct {\n}\n\ntype SendSNMPAction struct {\n\tAction\n}\n\nfunc init() {\n\tt[\"SendSNMPAction\"] = reflect.TypeOf((*SendSNMPAction)(nil)).Elem()\n}\n\ntype SendTestNotification SendTestNotificationRequestType\n\nfunc init() {\n\tt[\"SendTestNotification\"] = reflect.TypeOf((*SendTestNotification)(nil)).Elem()\n}\n\ntype SendTestNotificationRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"SendTestNotificationRequestType\"] = reflect.TypeOf((*SendTestNotificationRequestType)(nil)).Elem()\n}\n\ntype SendTestNotificationResponse struct {\n}\n\ntype ServerLicenseExpiredEvent struct {\n\tLicenseEvent\n\n\tProduct string `xml:\"product\"`\n}\n\nfunc init() {\n\tt[\"ServerLicenseExpiredEvent\"] = reflect.TypeOf((*ServerLicenseExpiredEvent)(nil)).Elem()\n}\n\ntype ServerStartedSessionEvent struct {\n\tSessionEvent\n}\n\nfunc init() {\n\tt[\"ServerStartedSessionEvent\"] = reflect.TypeOf((*ServerStartedSessionEvent)(nil)).Elem()\n}\n\ntype ServiceConsolePortGroupProfile struct {\n\tPortGroupProfile\n\n\tIpConfig IpAddressProfile `xml:\"ipConfig\"`\n}\n\nfunc init() {\n\tt[\"ServiceConsolePortGroupProfile\"] = reflect.TypeOf((*ServiceConsolePortGroupProfile)(nil)).Elem()\n}\n\ntype ServiceConsoleReservationInfo struct {\n\tDynamicData\n\n\tServiceConsoleReservedCfg int64 `xml:\"serviceConsoleReservedCfg\"`\n\tServiceConsoleReserved    int64 `xml:\"serviceConsoleReserved\"`\n\tUnreserved                int64 `xml:\"unreserved\"`\n}\n\nfunc init() {\n\tt[\"ServiceConsoleReservationInfo\"] = reflect.TypeOf((*ServiceConsoleReservationInfo)(nil)).Elem()\n}\n\ntype ServiceContent struct {\n\tDynamicData\n\n\tRootFolder                  ManagedObjectReference  `xml:\"rootFolder\"`\n\tPropertyCollector           ManagedObjectReference  `xml:\"propertyCollector\"`\n\tViewManager                 *ManagedObjectReference `xml:\"viewManager,omitempty\"`\n\tAbout                       AboutInfo               `xml:\"about\"`\n\tSetting                     *ManagedObjectReference `xml:\"setting,omitempty\"`\n\tUserDirectory               *ManagedObjectReference `xml:\"userDirectory,omitempty\"`\n\tSessionManager              *ManagedObjectReference `xml:\"sessionManager,omitempty\"`\n\tAuthorizationManager        *ManagedObjectReference `xml:\"authorizationManager,omitempty\"`\n\tServiceManager              *ManagedObjectReference `xml:\"serviceManager,omitempty\"`\n\tPerfManager                 *ManagedObjectReference `xml:\"perfManager,omitempty\"`\n\tScheduledTaskManager        *ManagedObjectReference `xml:\"scheduledTaskManager,omitempty\"`\n\tAlarmManager                *ManagedObjectReference `xml:\"alarmManager,omitempty\"`\n\tEventManager                *ManagedObjectReference `xml:\"eventManager,omitempty\"`\n\tTaskManager                 *ManagedObjectReference `xml:\"taskManager,omitempty\"`\n\tExtensionManager            *ManagedObjectReference `xml:\"extensionManager,omitempty\"`\n\tCustomizationSpecManager    *ManagedObjectReference `xml:\"customizationSpecManager,omitempty\"`\n\tCustomFieldsManager         *ManagedObjectReference `xml:\"customFieldsManager,omitempty\"`\n\tAccountManager              *ManagedObjectReference `xml:\"accountManager,omitempty\"`\n\tDiagnosticManager           *ManagedObjectReference `xml:\"diagnosticManager,omitempty\"`\n\tLicenseManager              *ManagedObjectReference `xml:\"licenseManager,omitempty\"`\n\tSearchIndex                 *ManagedObjectReference `xml:\"searchIndex,omitempty\"`\n\tFileManager                 *ManagedObjectReference `xml:\"fileManager,omitempty\"`\n\tDatastoreNamespaceManager   *ManagedObjectReference `xml:\"datastoreNamespaceManager,omitempty\"`\n\tVirtualDiskManager          *ManagedObjectReference `xml:\"virtualDiskManager,omitempty\"`\n\tVirtualizationManager       *ManagedObjectReference `xml:\"virtualizationManager,omitempty\"`\n\tSnmpSystem                  *ManagedObjectReference `xml:\"snmpSystem,omitempty\"`\n\tVmProvisioningChecker       *ManagedObjectReference `xml:\"vmProvisioningChecker,omitempty\"`\n\tVmCompatibilityChecker      *ManagedObjectReference `xml:\"vmCompatibilityChecker,omitempty\"`\n\tOvfManager                  *ManagedObjectReference `xml:\"ovfManager,omitempty\"`\n\tIpPoolManager               *ManagedObjectReference `xml:\"ipPoolManager,omitempty\"`\n\tDvSwitchManager             *ManagedObjectReference `xml:\"dvSwitchManager,omitempty\"`\n\tHostProfileManager          *ManagedObjectReference `xml:\"hostProfileManager,omitempty\"`\n\tClusterProfileManager       *ManagedObjectReference `xml:\"clusterProfileManager,omitempty\"`\n\tComplianceManager           *ManagedObjectReference `xml:\"complianceManager,omitempty\"`\n\tLocalizationManager         *ManagedObjectReference `xml:\"localizationManager,omitempty\"`\n\tStorageResourceManager      *ManagedObjectReference `xml:\"storageResourceManager,omitempty\"`\n\tGuestOperationsManager      *ManagedObjectReference `xml:\"guestOperationsManager,omitempty\"`\n\tOverheadMemoryManager       *ManagedObjectReference `xml:\"overheadMemoryManager,omitempty\"`\n\tCertificateManager          *ManagedObjectReference `xml:\"certificateManager,omitempty\"`\n\tIoFilterManager             *ManagedObjectReference `xml:\"ioFilterManager,omitempty\"`\n\tVStorageObjectManager       *ManagedObjectReference `xml:\"vStorageObjectManager,omitempty\"`\n\tHostSpecManager             *ManagedObjectReference `xml:\"hostSpecManager,omitempty\"`\n\tCryptoManager               *ManagedObjectReference `xml:\"cryptoManager,omitempty\"`\n\tHealthUpdateManager         *ManagedObjectReference `xml:\"healthUpdateManager,omitempty\"`\n\tFailoverClusterConfigurator *ManagedObjectReference `xml:\"failoverClusterConfigurator,omitempty\"`\n\tFailoverClusterManager      *ManagedObjectReference `xml:\"failoverClusterManager,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ServiceContent\"] = reflect.TypeOf((*ServiceContent)(nil)).Elem()\n}\n\ntype ServiceLocator struct {\n\tDynamicData\n\n\tInstanceUuid  string                       `xml:\"instanceUuid\"`\n\tUrl           string                       `xml:\"url\"`\n\tCredential    BaseServiceLocatorCredential `xml:\"credential,typeattr\"`\n\tSslThumbprint string                       `xml:\"sslThumbprint,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ServiceLocator\"] = reflect.TypeOf((*ServiceLocator)(nil)).Elem()\n}\n\ntype ServiceLocatorCredential struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"ServiceLocatorCredential\"] = reflect.TypeOf((*ServiceLocatorCredential)(nil)).Elem()\n}\n\ntype ServiceLocatorNamePassword struct {\n\tServiceLocatorCredential\n\n\tUsername string `xml:\"username\"`\n\tPassword string `xml:\"password\"`\n}\n\nfunc init() {\n\tt[\"ServiceLocatorNamePassword\"] = reflect.TypeOf((*ServiceLocatorNamePassword)(nil)).Elem()\n}\n\ntype ServiceLocatorSAMLCredential struct {\n\tServiceLocatorCredential\n\n\tToken string `xml:\"token,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ServiceLocatorSAMLCredential\"] = reflect.TypeOf((*ServiceLocatorSAMLCredential)(nil)).Elem()\n}\n\ntype ServiceManagerServiceInfo struct {\n\tDynamicData\n\n\tServiceName string                 `xml:\"serviceName\"`\n\tLocation    []string               `xml:\"location,omitempty\"`\n\tService     ManagedObjectReference `xml:\"service\"`\n\tDescription string                 `xml:\"description\"`\n}\n\nfunc init() {\n\tt[\"ServiceManagerServiceInfo\"] = reflect.TypeOf((*ServiceManagerServiceInfo)(nil)).Elem()\n}\n\ntype ServiceProfile struct {\n\tApplyProfile\n\n\tKey string `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"ServiceProfile\"] = reflect.TypeOf((*ServiceProfile)(nil)).Elem()\n}\n\ntype SessionEvent struct {\n\tEvent\n}\n\nfunc init() {\n\tt[\"SessionEvent\"] = reflect.TypeOf((*SessionEvent)(nil)).Elem()\n}\n\ntype SessionIsActive SessionIsActiveRequestType\n\nfunc init() {\n\tt[\"SessionIsActive\"] = reflect.TypeOf((*SessionIsActive)(nil)).Elem()\n}\n\ntype SessionIsActiveRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tSessionID string                 `xml:\"sessionID\"`\n\tUserName  string                 `xml:\"userName\"`\n}\n\nfunc init() {\n\tt[\"SessionIsActiveRequestType\"] = reflect.TypeOf((*SessionIsActiveRequestType)(nil)).Elem()\n}\n\ntype SessionIsActiveResponse struct {\n\tReturnval bool `xml:\"returnval\"`\n}\n\ntype SessionManagerGenericServiceTicket struct {\n\tDynamicData\n\n\tId            string `xml:\"id\"`\n\tHostName      string `xml:\"hostName,omitempty\"`\n\tSslThumbprint string `xml:\"sslThumbprint,omitempty\"`\n}\n\nfunc init() {\n\tt[\"SessionManagerGenericServiceTicket\"] = reflect.TypeOf((*SessionManagerGenericServiceTicket)(nil)).Elem()\n}\n\ntype SessionManagerHttpServiceRequestSpec struct {\n\tSessionManagerServiceRequestSpec\n\n\tMethod string `xml:\"method,omitempty\"`\n\tUrl    string `xml:\"url\"`\n}\n\nfunc init() {\n\tt[\"SessionManagerHttpServiceRequestSpec\"] = reflect.TypeOf((*SessionManagerHttpServiceRequestSpec)(nil)).Elem()\n}\n\ntype SessionManagerLocalTicket struct {\n\tDynamicData\n\n\tUserName         string `xml:\"userName\"`\n\tPasswordFilePath string `xml:\"passwordFilePath\"`\n}\n\nfunc init() {\n\tt[\"SessionManagerLocalTicket\"] = reflect.TypeOf((*SessionManagerLocalTicket)(nil)).Elem()\n}\n\ntype SessionManagerServiceRequestSpec struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"SessionManagerServiceRequestSpec\"] = reflect.TypeOf((*SessionManagerServiceRequestSpec)(nil)).Elem()\n}\n\ntype SessionManagerVmomiServiceRequestSpec struct {\n\tSessionManagerServiceRequestSpec\n\n\tMethod string `xml:\"method\"`\n}\n\nfunc init() {\n\tt[\"SessionManagerVmomiServiceRequestSpec\"] = reflect.TypeOf((*SessionManagerVmomiServiceRequestSpec)(nil)).Elem()\n}\n\ntype SessionTerminatedEvent struct {\n\tSessionEvent\n\n\tSessionId          string `xml:\"sessionId\"`\n\tTerminatedUsername string `xml:\"terminatedUsername\"`\n}\n\nfunc init() {\n\tt[\"SessionTerminatedEvent\"] = reflect.TypeOf((*SessionTerminatedEvent)(nil)).Elem()\n}\n\ntype SetCollectorPageSize SetCollectorPageSizeRequestType\n\nfunc init() {\n\tt[\"SetCollectorPageSize\"] = reflect.TypeOf((*SetCollectorPageSize)(nil)).Elem()\n}\n\ntype SetCollectorPageSizeRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tMaxCount int32                  `xml:\"maxCount\"`\n}\n\nfunc init() {\n\tt[\"SetCollectorPageSizeRequestType\"] = reflect.TypeOf((*SetCollectorPageSizeRequestType)(nil)).Elem()\n}\n\ntype SetCollectorPageSizeResponse struct {\n}\n\ntype SetDisplayTopology SetDisplayTopologyRequestType\n\nfunc init() {\n\tt[\"SetDisplayTopology\"] = reflect.TypeOf((*SetDisplayTopology)(nil)).Elem()\n}\n\ntype SetDisplayTopologyRequestType struct {\n\tThis     ManagedObjectReference          `xml:\"_this\"`\n\tDisplays []VirtualMachineDisplayTopology `xml:\"displays\"`\n}\n\nfunc init() {\n\tt[\"SetDisplayTopologyRequestType\"] = reflect.TypeOf((*SetDisplayTopologyRequestType)(nil)).Elem()\n}\n\ntype SetDisplayTopologyResponse struct {\n}\n\ntype SetEntityPermissions SetEntityPermissionsRequestType\n\nfunc init() {\n\tt[\"SetEntityPermissions\"] = reflect.TypeOf((*SetEntityPermissions)(nil)).Elem()\n}\n\ntype SetEntityPermissionsRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tEntity     ManagedObjectReference `xml:\"entity\"`\n\tPermission []Permission           `xml:\"permission,omitempty\"`\n}\n\nfunc init() {\n\tt[\"SetEntityPermissionsRequestType\"] = reflect.TypeOf((*SetEntityPermissionsRequestType)(nil)).Elem()\n}\n\ntype SetEntityPermissionsResponse struct {\n}\n\ntype SetExtensionCertificate SetExtensionCertificateRequestType\n\nfunc init() {\n\tt[\"SetExtensionCertificate\"] = reflect.TypeOf((*SetExtensionCertificate)(nil)).Elem()\n}\n\ntype SetExtensionCertificateRequestType struct {\n\tThis           ManagedObjectReference `xml:\"_this\"`\n\tExtensionKey   string                 `xml:\"extensionKey\"`\n\tCertificatePem string                 `xml:\"certificatePem,omitempty\"`\n}\n\nfunc init() {\n\tt[\"SetExtensionCertificateRequestType\"] = reflect.TypeOf((*SetExtensionCertificateRequestType)(nil)).Elem()\n}\n\ntype SetExtensionCertificateResponse struct {\n}\n\ntype SetField SetFieldRequestType\n\nfunc init() {\n\tt[\"SetField\"] = reflect.TypeOf((*SetField)(nil)).Elem()\n}\n\ntype SetFieldRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tEntity ManagedObjectReference `xml:\"entity\"`\n\tKey    int32                  `xml:\"key\"`\n\tValue  string                 `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"SetFieldRequestType\"] = reflect.TypeOf((*SetFieldRequestType)(nil)).Elem()\n}\n\ntype SetFieldResponse struct {\n}\n\ntype SetLicenseEdition SetLicenseEditionRequestType\n\nfunc init() {\n\tt[\"SetLicenseEdition\"] = reflect.TypeOf((*SetLicenseEdition)(nil)).Elem()\n}\n\ntype SetLicenseEditionRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tHost       *ManagedObjectReference `xml:\"host,omitempty\"`\n\tFeatureKey string                  `xml:\"featureKey,omitempty\"`\n}\n\nfunc init() {\n\tt[\"SetLicenseEditionRequestType\"] = reflect.TypeOf((*SetLicenseEditionRequestType)(nil)).Elem()\n}\n\ntype SetLicenseEditionResponse struct {\n}\n\ntype SetLocale SetLocaleRequestType\n\nfunc init() {\n\tt[\"SetLocale\"] = reflect.TypeOf((*SetLocale)(nil)).Elem()\n}\n\ntype SetLocaleRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tLocale string                 `xml:\"locale\"`\n}\n\nfunc init() {\n\tt[\"SetLocaleRequestType\"] = reflect.TypeOf((*SetLocaleRequestType)(nil)).Elem()\n}\n\ntype SetLocaleResponse struct {\n}\n\ntype SetMultipathLunPolicy SetMultipathLunPolicyRequestType\n\nfunc init() {\n\tt[\"SetMultipathLunPolicy\"] = reflect.TypeOf((*SetMultipathLunPolicy)(nil)).Elem()\n}\n\ntype SetMultipathLunPolicyRequestType struct {\n\tThis   ManagedObjectReference                 `xml:\"_this\"`\n\tLunId  string                                 `xml:\"lunId\"`\n\tPolicy BaseHostMultipathInfoLogicalUnitPolicy `xml:\"policy,typeattr\"`\n}\n\nfunc init() {\n\tt[\"SetMultipathLunPolicyRequestType\"] = reflect.TypeOf((*SetMultipathLunPolicyRequestType)(nil)).Elem()\n}\n\ntype SetMultipathLunPolicyResponse struct {\n}\n\ntype SetNFSUser SetNFSUserRequestType\n\nfunc init() {\n\tt[\"SetNFSUser\"] = reflect.TypeOf((*SetNFSUser)(nil)).Elem()\n}\n\ntype SetNFSUserRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tUser     string                 `xml:\"user\"`\n\tPassword string                 `xml:\"password\"`\n}\n\nfunc init() {\n\tt[\"SetNFSUserRequestType\"] = reflect.TypeOf((*SetNFSUserRequestType)(nil)).Elem()\n}\n\ntype SetNFSUserResponse struct {\n}\n\ntype SetPublicKey SetPublicKeyRequestType\n\nfunc init() {\n\tt[\"SetPublicKey\"] = reflect.TypeOf((*SetPublicKey)(nil)).Elem()\n}\n\ntype SetPublicKeyRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tExtensionKey string                 `xml:\"extensionKey\"`\n\tPublicKey    string                 `xml:\"publicKey\"`\n}\n\nfunc init() {\n\tt[\"SetPublicKeyRequestType\"] = reflect.TypeOf((*SetPublicKeyRequestType)(nil)).Elem()\n}\n\ntype SetPublicKeyResponse struct {\n}\n\ntype SetRegistryValueInGuest SetRegistryValueInGuestRequestType\n\nfunc init() {\n\tt[\"SetRegistryValueInGuest\"] = reflect.TypeOf((*SetRegistryValueInGuest)(nil)).Elem()\n}\n\ntype SetRegistryValueInGuestRequestType struct {\n\tThis  ManagedObjectReference  `xml:\"_this\"`\n\tVm    ManagedObjectReference  `xml:\"vm\"`\n\tAuth  BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tValue GuestRegValueSpec       `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"SetRegistryValueInGuestRequestType\"] = reflect.TypeOf((*SetRegistryValueInGuestRequestType)(nil)).Elem()\n}\n\ntype SetRegistryValueInGuestResponse struct {\n}\n\ntype SetScreenResolution SetScreenResolutionRequestType\n\nfunc init() {\n\tt[\"SetScreenResolution\"] = reflect.TypeOf((*SetScreenResolution)(nil)).Elem()\n}\n\ntype SetScreenResolutionRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tWidth  int32                  `xml:\"width\"`\n\tHeight int32                  `xml:\"height\"`\n}\n\nfunc init() {\n\tt[\"SetScreenResolutionRequestType\"] = reflect.TypeOf((*SetScreenResolutionRequestType)(nil)).Elem()\n}\n\ntype SetScreenResolutionResponse struct {\n}\n\ntype SetTaskDescription SetTaskDescriptionRequestType\n\nfunc init() {\n\tt[\"SetTaskDescription\"] = reflect.TypeOf((*SetTaskDescription)(nil)).Elem()\n}\n\ntype SetTaskDescriptionRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tDescription LocalizableMessage     `xml:\"description\"`\n}\n\nfunc init() {\n\tt[\"SetTaskDescriptionRequestType\"] = reflect.TypeOf((*SetTaskDescriptionRequestType)(nil)).Elem()\n}\n\ntype SetTaskDescriptionResponse struct {\n}\n\ntype SetTaskState SetTaskStateRequestType\n\nfunc init() {\n\tt[\"SetTaskState\"] = reflect.TypeOf((*SetTaskState)(nil)).Elem()\n}\n\ntype SetTaskStateRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tState  TaskInfoState          `xml:\"state\"`\n\tResult AnyType                `xml:\"result,omitempty,typeattr\"`\n\tFault  *LocalizedMethodFault  `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"SetTaskStateRequestType\"] = reflect.TypeOf((*SetTaskStateRequestType)(nil)).Elem()\n}\n\ntype SetTaskStateResponse struct {\n}\n\ntype SetVirtualDiskUuid SetVirtualDiskUuidRequestType\n\nfunc init() {\n\tt[\"SetVirtualDiskUuid\"] = reflect.TypeOf((*SetVirtualDiskUuid)(nil)).Elem()\n}\n\ntype SetVirtualDiskUuidRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tName       string                  `xml:\"name\"`\n\tDatacenter *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n\tUuid       string                  `xml:\"uuid\"`\n}\n\nfunc init() {\n\tt[\"SetVirtualDiskUuidRequestType\"] = reflect.TypeOf((*SetVirtualDiskUuidRequestType)(nil)).Elem()\n}\n\ntype SetVirtualDiskUuidResponse struct {\n}\n\ntype SharedBusControllerNotSupported struct {\n\tDeviceNotSupported\n}\n\nfunc init() {\n\tt[\"SharedBusControllerNotSupported\"] = reflect.TypeOf((*SharedBusControllerNotSupported)(nil)).Elem()\n}\n\ntype SharedBusControllerNotSupportedFault SharedBusControllerNotSupported\n\nfunc init() {\n\tt[\"SharedBusControllerNotSupportedFault\"] = reflect.TypeOf((*SharedBusControllerNotSupportedFault)(nil)).Elem()\n}\n\ntype SharesInfo struct {\n\tDynamicData\n\n\tShares int32       `xml:\"shares\"`\n\tLevel  SharesLevel `xml:\"level\"`\n}\n\nfunc init() {\n\tt[\"SharesInfo\"] = reflect.TypeOf((*SharesInfo)(nil)).Elem()\n}\n\ntype SharesOption struct {\n\tDynamicData\n\n\tSharesOption IntOption   `xml:\"sharesOption\"`\n\tDefaultLevel SharesLevel `xml:\"defaultLevel\"`\n}\n\nfunc init() {\n\tt[\"SharesOption\"] = reflect.TypeOf((*SharesOption)(nil)).Elem()\n}\n\ntype ShrinkDiskFault struct {\n\tVimFault\n\n\tDiskId int32 `xml:\"diskId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ShrinkDiskFault\"] = reflect.TypeOf((*ShrinkDiskFault)(nil)).Elem()\n}\n\ntype ShrinkDiskFaultFault ShrinkDiskFault\n\nfunc init() {\n\tt[\"ShrinkDiskFaultFault\"] = reflect.TypeOf((*ShrinkDiskFaultFault)(nil)).Elem()\n}\n\ntype ShrinkVirtualDiskRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tName       string                  `xml:\"name\"`\n\tDatacenter *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n\tCopy       *bool                   `xml:\"copy\"`\n}\n\nfunc init() {\n\tt[\"ShrinkVirtualDiskRequestType\"] = reflect.TypeOf((*ShrinkVirtualDiskRequestType)(nil)).Elem()\n}\n\ntype ShrinkVirtualDisk_Task ShrinkVirtualDiskRequestType\n\nfunc init() {\n\tt[\"ShrinkVirtualDisk_Task\"] = reflect.TypeOf((*ShrinkVirtualDisk_Task)(nil)).Elem()\n}\n\ntype ShrinkVirtualDisk_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype ShutdownGuest ShutdownGuestRequestType\n\nfunc init() {\n\tt[\"ShutdownGuest\"] = reflect.TypeOf((*ShutdownGuest)(nil)).Elem()\n}\n\ntype ShutdownGuestRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"ShutdownGuestRequestType\"] = reflect.TypeOf((*ShutdownGuestRequestType)(nil)).Elem()\n}\n\ntype ShutdownGuestResponse struct {\n}\n\ntype ShutdownHostRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tForce bool                   `xml:\"force\"`\n}\n\nfunc init() {\n\tt[\"ShutdownHostRequestType\"] = reflect.TypeOf((*ShutdownHostRequestType)(nil)).Elem()\n}\n\ntype ShutdownHost_Task ShutdownHostRequestType\n\nfunc init() {\n\tt[\"ShutdownHost_Task\"] = reflect.TypeOf((*ShutdownHost_Task)(nil)).Elem()\n}\n\ntype ShutdownHost_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype SingleIp struct {\n\tIpAddress\n\n\tAddress string `xml:\"address\"`\n}\n\nfunc init() {\n\tt[\"SingleIp\"] = reflect.TypeOf((*SingleIp)(nil)).Elem()\n}\n\ntype SingleMac struct {\n\tMacAddress\n\n\tAddress string `xml:\"address\"`\n}\n\nfunc init() {\n\tt[\"SingleMac\"] = reflect.TypeOf((*SingleMac)(nil)).Elem()\n}\n\ntype SnapshotCloneNotSupported struct {\n\tSnapshotCopyNotSupported\n}\n\nfunc init() {\n\tt[\"SnapshotCloneNotSupported\"] = reflect.TypeOf((*SnapshotCloneNotSupported)(nil)).Elem()\n}\n\ntype SnapshotCloneNotSupportedFault SnapshotCloneNotSupported\n\nfunc init() {\n\tt[\"SnapshotCloneNotSupportedFault\"] = reflect.TypeOf((*SnapshotCloneNotSupportedFault)(nil)).Elem()\n}\n\ntype SnapshotCopyNotSupported struct {\n\tMigrationFault\n}\n\nfunc init() {\n\tt[\"SnapshotCopyNotSupported\"] = reflect.TypeOf((*SnapshotCopyNotSupported)(nil)).Elem()\n}\n\ntype SnapshotCopyNotSupportedFault BaseSnapshotCopyNotSupported\n\nfunc init() {\n\tt[\"SnapshotCopyNotSupportedFault\"] = reflect.TypeOf((*SnapshotCopyNotSupportedFault)(nil)).Elem()\n}\n\ntype SnapshotDisabled struct {\n\tSnapshotFault\n}\n\nfunc init() {\n\tt[\"SnapshotDisabled\"] = reflect.TypeOf((*SnapshotDisabled)(nil)).Elem()\n}\n\ntype SnapshotDisabledFault SnapshotDisabled\n\nfunc init() {\n\tt[\"SnapshotDisabledFault\"] = reflect.TypeOf((*SnapshotDisabledFault)(nil)).Elem()\n}\n\ntype SnapshotFault struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"SnapshotFault\"] = reflect.TypeOf((*SnapshotFault)(nil)).Elem()\n}\n\ntype SnapshotFaultFault BaseSnapshotFault\n\nfunc init() {\n\tt[\"SnapshotFaultFault\"] = reflect.TypeOf((*SnapshotFaultFault)(nil)).Elem()\n}\n\ntype SnapshotIncompatibleDeviceInVm struct {\n\tSnapshotFault\n\n\tFault LocalizedMethodFault `xml:\"fault\"`\n}\n\nfunc init() {\n\tt[\"SnapshotIncompatibleDeviceInVm\"] = reflect.TypeOf((*SnapshotIncompatibleDeviceInVm)(nil)).Elem()\n}\n\ntype SnapshotIncompatibleDeviceInVmFault SnapshotIncompatibleDeviceInVm\n\nfunc init() {\n\tt[\"SnapshotIncompatibleDeviceInVmFault\"] = reflect.TypeOf((*SnapshotIncompatibleDeviceInVmFault)(nil)).Elem()\n}\n\ntype SnapshotLocked struct {\n\tSnapshotFault\n}\n\nfunc init() {\n\tt[\"SnapshotLocked\"] = reflect.TypeOf((*SnapshotLocked)(nil)).Elem()\n}\n\ntype SnapshotLockedFault SnapshotLocked\n\nfunc init() {\n\tt[\"SnapshotLockedFault\"] = reflect.TypeOf((*SnapshotLockedFault)(nil)).Elem()\n}\n\ntype SnapshotMoveFromNonHomeNotSupported struct {\n\tSnapshotCopyNotSupported\n}\n\nfunc init() {\n\tt[\"SnapshotMoveFromNonHomeNotSupported\"] = reflect.TypeOf((*SnapshotMoveFromNonHomeNotSupported)(nil)).Elem()\n}\n\ntype SnapshotMoveFromNonHomeNotSupportedFault SnapshotMoveFromNonHomeNotSupported\n\nfunc init() {\n\tt[\"SnapshotMoveFromNonHomeNotSupportedFault\"] = reflect.TypeOf((*SnapshotMoveFromNonHomeNotSupportedFault)(nil)).Elem()\n}\n\ntype SnapshotMoveNotSupported struct {\n\tSnapshotCopyNotSupported\n}\n\nfunc init() {\n\tt[\"SnapshotMoveNotSupported\"] = reflect.TypeOf((*SnapshotMoveNotSupported)(nil)).Elem()\n}\n\ntype SnapshotMoveNotSupportedFault SnapshotMoveNotSupported\n\nfunc init() {\n\tt[\"SnapshotMoveNotSupportedFault\"] = reflect.TypeOf((*SnapshotMoveNotSupportedFault)(nil)).Elem()\n}\n\ntype SnapshotMoveToNonHomeNotSupported struct {\n\tSnapshotCopyNotSupported\n}\n\nfunc init() {\n\tt[\"SnapshotMoveToNonHomeNotSupported\"] = reflect.TypeOf((*SnapshotMoveToNonHomeNotSupported)(nil)).Elem()\n}\n\ntype SnapshotMoveToNonHomeNotSupportedFault SnapshotMoveToNonHomeNotSupported\n\nfunc init() {\n\tt[\"SnapshotMoveToNonHomeNotSupportedFault\"] = reflect.TypeOf((*SnapshotMoveToNonHomeNotSupportedFault)(nil)).Elem()\n}\n\ntype SnapshotNoChange struct {\n\tSnapshotFault\n}\n\nfunc init() {\n\tt[\"SnapshotNoChange\"] = reflect.TypeOf((*SnapshotNoChange)(nil)).Elem()\n}\n\ntype SnapshotNoChangeFault SnapshotNoChange\n\nfunc init() {\n\tt[\"SnapshotNoChangeFault\"] = reflect.TypeOf((*SnapshotNoChangeFault)(nil)).Elem()\n}\n\ntype SnapshotRevertIssue struct {\n\tMigrationFault\n\n\tSnapshotName string      `xml:\"snapshotName,omitempty\"`\n\tEvent        []BaseEvent `xml:\"event,omitempty,typeattr\"`\n\tErrors       bool        `xml:\"errors\"`\n}\n\nfunc init() {\n\tt[\"SnapshotRevertIssue\"] = reflect.TypeOf((*SnapshotRevertIssue)(nil)).Elem()\n}\n\ntype SnapshotRevertIssueFault SnapshotRevertIssue\n\nfunc init() {\n\tt[\"SnapshotRevertIssueFault\"] = reflect.TypeOf((*SnapshotRevertIssueFault)(nil)).Elem()\n}\n\ntype SoftRuleVioCorrectionDisallowed struct {\n\tVmConfigFault\n\n\tVmName string `xml:\"vmName\"`\n}\n\nfunc init() {\n\tt[\"SoftRuleVioCorrectionDisallowed\"] = reflect.TypeOf((*SoftRuleVioCorrectionDisallowed)(nil)).Elem()\n}\n\ntype SoftRuleVioCorrectionDisallowedFault SoftRuleVioCorrectionDisallowed\n\nfunc init() {\n\tt[\"SoftRuleVioCorrectionDisallowedFault\"] = reflect.TypeOf((*SoftRuleVioCorrectionDisallowedFault)(nil)).Elem()\n}\n\ntype SoftRuleVioCorrectionImpact struct {\n\tVmConfigFault\n\n\tVmName string `xml:\"vmName\"`\n}\n\nfunc init() {\n\tt[\"SoftRuleVioCorrectionImpact\"] = reflect.TypeOf((*SoftRuleVioCorrectionImpact)(nil)).Elem()\n}\n\ntype SoftRuleVioCorrectionImpactFault SoftRuleVioCorrectionImpact\n\nfunc init() {\n\tt[\"SoftRuleVioCorrectionImpactFault\"] = reflect.TypeOf((*SoftRuleVioCorrectionImpactFault)(nil)).Elem()\n}\n\ntype SoftwarePackage struct {\n\tDynamicData\n\n\tName                      string                    `xml:\"name\"`\n\tVersion                   string                    `xml:\"version\"`\n\tType                      string                    `xml:\"type\"`\n\tVendor                    string                    `xml:\"vendor\"`\n\tAcceptanceLevel           string                    `xml:\"acceptanceLevel\"`\n\tSummary                   string                    `xml:\"summary\"`\n\tDescription               string                    `xml:\"description\"`\n\tReferenceURL              []string                  `xml:\"referenceURL,omitempty\"`\n\tCreationDate              *time.Time                `xml:\"creationDate\"`\n\tDepends                   []Relation                `xml:\"depends,omitempty\"`\n\tConflicts                 []Relation                `xml:\"conflicts,omitempty\"`\n\tReplaces                  []Relation                `xml:\"replaces,omitempty\"`\n\tProvides                  []string                  `xml:\"provides,omitempty\"`\n\tMaintenanceModeRequired   *bool                     `xml:\"maintenanceModeRequired\"`\n\tHardwarePlatformsRequired []string                  `xml:\"hardwarePlatformsRequired,omitempty\"`\n\tCapability                SoftwarePackageCapability `xml:\"capability\"`\n\tTag                       []string                  `xml:\"tag,omitempty\"`\n\tPayload                   []string                  `xml:\"payload,omitempty\"`\n}\n\nfunc init() {\n\tt[\"SoftwarePackage\"] = reflect.TypeOf((*SoftwarePackage)(nil)).Elem()\n}\n\ntype SoftwarePackageCapability struct {\n\tDynamicData\n\n\tLiveInstallAllowed *bool `xml:\"liveInstallAllowed\"`\n\tLiveRemoveAllowed  *bool `xml:\"liveRemoveAllowed\"`\n\tStatelessReady     *bool `xml:\"statelessReady\"`\n\tOverlay            *bool `xml:\"overlay\"`\n}\n\nfunc init() {\n\tt[\"SoftwarePackageCapability\"] = reflect.TypeOf((*SoftwarePackageCapability)(nil)).Elem()\n}\n\ntype SourceNodeSpec struct {\n\tDynamicData\n\n\tManagementVc ServiceLocator         `xml:\"managementVc\"`\n\tActiveVc     ManagedObjectReference `xml:\"activeVc\"`\n}\n\nfunc init() {\n\tt[\"SourceNodeSpec\"] = reflect.TypeOf((*SourceNodeSpec)(nil)).Elem()\n}\n\ntype SsdDiskNotAvailable struct {\n\tVimFault\n\n\tDevicePath string `xml:\"devicePath\"`\n}\n\nfunc init() {\n\tt[\"SsdDiskNotAvailable\"] = reflect.TypeOf((*SsdDiskNotAvailable)(nil)).Elem()\n}\n\ntype SsdDiskNotAvailableFault SsdDiskNotAvailable\n\nfunc init() {\n\tt[\"SsdDiskNotAvailableFault\"] = reflect.TypeOf((*SsdDiskNotAvailableFault)(nil)).Elem()\n}\n\ntype StageHostPatchRequestType struct {\n\tThis       ManagedObjectReference                     `xml:\"_this\"`\n\tMetaUrls   []string                                   `xml:\"metaUrls,omitempty\"`\n\tBundleUrls []string                                   `xml:\"bundleUrls,omitempty\"`\n\tVibUrls    []string                                   `xml:\"vibUrls,omitempty\"`\n\tSpec       *HostPatchManagerPatchManagerOperationSpec `xml:\"spec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StageHostPatchRequestType\"] = reflect.TypeOf((*StageHostPatchRequestType)(nil)).Elem()\n}\n\ntype StageHostPatch_Task StageHostPatchRequestType\n\nfunc init() {\n\tt[\"StageHostPatch_Task\"] = reflect.TypeOf((*StageHostPatch_Task)(nil)).Elem()\n}\n\ntype StageHostPatch_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype StampAllRulesWithUuidRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"StampAllRulesWithUuidRequestType\"] = reflect.TypeOf((*StampAllRulesWithUuidRequestType)(nil)).Elem()\n}\n\ntype StampAllRulesWithUuid_Task StampAllRulesWithUuidRequestType\n\nfunc init() {\n\tt[\"StampAllRulesWithUuid_Task\"] = reflect.TypeOf((*StampAllRulesWithUuid_Task)(nil)).Elem()\n}\n\ntype StampAllRulesWithUuid_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype StandbyGuest StandbyGuestRequestType\n\nfunc init() {\n\tt[\"StandbyGuest\"] = reflect.TypeOf((*StandbyGuest)(nil)).Elem()\n}\n\ntype StandbyGuestRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"StandbyGuestRequestType\"] = reflect.TypeOf((*StandbyGuestRequestType)(nil)).Elem()\n}\n\ntype StandbyGuestResponse struct {\n}\n\ntype StartProgramInGuest StartProgramInGuestRequestType\n\nfunc init() {\n\tt[\"StartProgramInGuest\"] = reflect.TypeOf((*StartProgramInGuest)(nil)).Elem()\n}\n\ntype StartProgramInGuestRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tVm   ManagedObjectReference  `xml:\"vm\"`\n\tAuth BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tSpec BaseGuestProgramSpec    `xml:\"spec,typeattr\"`\n}\n\nfunc init() {\n\tt[\"StartProgramInGuestRequestType\"] = reflect.TypeOf((*StartProgramInGuestRequestType)(nil)).Elem()\n}\n\ntype StartProgramInGuestResponse struct {\n\tReturnval int64 `xml:\"returnval\"`\n}\n\ntype StartRecordingRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tName        string                 `xml:\"name\"`\n\tDescription string                 `xml:\"description,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StartRecordingRequestType\"] = reflect.TypeOf((*StartRecordingRequestType)(nil)).Elem()\n}\n\ntype StartRecording_Task StartRecordingRequestType\n\nfunc init() {\n\tt[\"StartRecording_Task\"] = reflect.TypeOf((*StartRecording_Task)(nil)).Elem()\n}\n\ntype StartRecording_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype StartReplayingRequestType struct {\n\tThis           ManagedObjectReference `xml:\"_this\"`\n\tReplaySnapshot ManagedObjectReference `xml:\"replaySnapshot\"`\n}\n\nfunc init() {\n\tt[\"StartReplayingRequestType\"] = reflect.TypeOf((*StartReplayingRequestType)(nil)).Elem()\n}\n\ntype StartReplaying_Task StartReplayingRequestType\n\nfunc init() {\n\tt[\"StartReplaying_Task\"] = reflect.TypeOf((*StartReplaying_Task)(nil)).Elem()\n}\n\ntype StartReplaying_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype StartService StartServiceRequestType\n\nfunc init() {\n\tt[\"StartService\"] = reflect.TypeOf((*StartService)(nil)).Elem()\n}\n\ntype StartServiceRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tId   string                 `xml:\"id\"`\n}\n\nfunc init() {\n\tt[\"StartServiceRequestType\"] = reflect.TypeOf((*StartServiceRequestType)(nil)).Elem()\n}\n\ntype StartServiceResponse struct {\n}\n\ntype StateAlarmExpression struct {\n\tAlarmExpression\n\n\tOperator  StateAlarmOperator `xml:\"operator\"`\n\tType      string             `xml:\"type\"`\n\tStatePath string             `xml:\"statePath\"`\n\tYellow    string             `xml:\"yellow,omitempty\"`\n\tRed       string             `xml:\"red,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StateAlarmExpression\"] = reflect.TypeOf((*StateAlarmExpression)(nil)).Elem()\n}\n\ntype StaticRouteProfile struct {\n\tApplyProfile\n\n\tKey string `xml:\"key,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StaticRouteProfile\"] = reflect.TypeOf((*StaticRouteProfile)(nil)).Elem()\n}\n\ntype StopRecordingRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"StopRecordingRequestType\"] = reflect.TypeOf((*StopRecordingRequestType)(nil)).Elem()\n}\n\ntype StopRecording_Task StopRecordingRequestType\n\nfunc init() {\n\tt[\"StopRecording_Task\"] = reflect.TypeOf((*StopRecording_Task)(nil)).Elem()\n}\n\ntype StopRecording_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype StopReplayingRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"StopReplayingRequestType\"] = reflect.TypeOf((*StopReplayingRequestType)(nil)).Elem()\n}\n\ntype StopReplaying_Task StopReplayingRequestType\n\nfunc init() {\n\tt[\"StopReplaying_Task\"] = reflect.TypeOf((*StopReplaying_Task)(nil)).Elem()\n}\n\ntype StopReplaying_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype StopService StopServiceRequestType\n\nfunc init() {\n\tt[\"StopService\"] = reflect.TypeOf((*StopService)(nil)).Elem()\n}\n\ntype StopServiceRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tId   string                 `xml:\"id\"`\n}\n\nfunc init() {\n\tt[\"StopServiceRequestType\"] = reflect.TypeOf((*StopServiceRequestType)(nil)).Elem()\n}\n\ntype StopServiceResponse struct {\n}\n\ntype StorageDrsAutomationConfig struct {\n\tDynamicData\n\n\tSpaceLoadBalanceAutomationMode  string `xml:\"spaceLoadBalanceAutomationMode,omitempty\"`\n\tIoLoadBalanceAutomationMode     string `xml:\"ioLoadBalanceAutomationMode,omitempty\"`\n\tRuleEnforcementAutomationMode   string `xml:\"ruleEnforcementAutomationMode,omitempty\"`\n\tPolicyEnforcementAutomationMode string `xml:\"policyEnforcementAutomationMode,omitempty\"`\n\tVmEvacuationAutomationMode      string `xml:\"vmEvacuationAutomationMode,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StorageDrsAutomationConfig\"] = reflect.TypeOf((*StorageDrsAutomationConfig)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveDiskInMultiWriterMode struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveDiskInMultiWriterMode\"] = reflect.TypeOf((*StorageDrsCannotMoveDiskInMultiWriterMode)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveDiskInMultiWriterModeFault StorageDrsCannotMoveDiskInMultiWriterMode\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveDiskInMultiWriterModeFault\"] = reflect.TypeOf((*StorageDrsCannotMoveDiskInMultiWriterModeFault)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveFTVm struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveFTVm\"] = reflect.TypeOf((*StorageDrsCannotMoveFTVm)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveFTVmFault StorageDrsCannotMoveFTVm\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveFTVmFault\"] = reflect.TypeOf((*StorageDrsCannotMoveFTVmFault)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveIndependentDisk struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveIndependentDisk\"] = reflect.TypeOf((*StorageDrsCannotMoveIndependentDisk)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveIndependentDiskFault StorageDrsCannotMoveIndependentDisk\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveIndependentDiskFault\"] = reflect.TypeOf((*StorageDrsCannotMoveIndependentDiskFault)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveManuallyPlacedSwapFile struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveManuallyPlacedSwapFile\"] = reflect.TypeOf((*StorageDrsCannotMoveManuallyPlacedSwapFile)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveManuallyPlacedSwapFileFault StorageDrsCannotMoveManuallyPlacedSwapFile\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveManuallyPlacedSwapFileFault\"] = reflect.TypeOf((*StorageDrsCannotMoveManuallyPlacedSwapFileFault)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveManuallyPlacedVm struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveManuallyPlacedVm\"] = reflect.TypeOf((*StorageDrsCannotMoveManuallyPlacedVm)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveManuallyPlacedVmFault StorageDrsCannotMoveManuallyPlacedVm\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveManuallyPlacedVmFault\"] = reflect.TypeOf((*StorageDrsCannotMoveManuallyPlacedVmFault)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveSharedDisk struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveSharedDisk\"] = reflect.TypeOf((*StorageDrsCannotMoveSharedDisk)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveSharedDiskFault StorageDrsCannotMoveSharedDisk\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveSharedDiskFault\"] = reflect.TypeOf((*StorageDrsCannotMoveSharedDiskFault)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveTemplate struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveTemplate\"] = reflect.TypeOf((*StorageDrsCannotMoveTemplate)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveTemplateFault StorageDrsCannotMoveTemplate\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveTemplateFault\"] = reflect.TypeOf((*StorageDrsCannotMoveTemplateFault)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveVmInUserFolder struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveVmInUserFolder\"] = reflect.TypeOf((*StorageDrsCannotMoveVmInUserFolder)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveVmInUserFolderFault StorageDrsCannotMoveVmInUserFolder\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveVmInUserFolderFault\"] = reflect.TypeOf((*StorageDrsCannotMoveVmInUserFolderFault)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveVmWithMountedCDROM struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveVmWithMountedCDROM\"] = reflect.TypeOf((*StorageDrsCannotMoveVmWithMountedCDROM)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveVmWithMountedCDROMFault StorageDrsCannotMoveVmWithMountedCDROM\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveVmWithMountedCDROMFault\"] = reflect.TypeOf((*StorageDrsCannotMoveVmWithMountedCDROMFault)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveVmWithNoFilesInLayout struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveVmWithNoFilesInLayout\"] = reflect.TypeOf((*StorageDrsCannotMoveVmWithNoFilesInLayout)(nil)).Elem()\n}\n\ntype StorageDrsCannotMoveVmWithNoFilesInLayoutFault StorageDrsCannotMoveVmWithNoFilesInLayout\n\nfunc init() {\n\tt[\"StorageDrsCannotMoveVmWithNoFilesInLayoutFault\"] = reflect.TypeOf((*StorageDrsCannotMoveVmWithNoFilesInLayoutFault)(nil)).Elem()\n}\n\ntype StorageDrsConfigInfo struct {\n\tDynamicData\n\n\tPodConfig StorageDrsPodConfigInfo  `xml:\"podConfig\"`\n\tVmConfig  []StorageDrsVmConfigInfo `xml:\"vmConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StorageDrsConfigInfo\"] = reflect.TypeOf((*StorageDrsConfigInfo)(nil)).Elem()\n}\n\ntype StorageDrsConfigSpec struct {\n\tDynamicData\n\n\tPodConfigSpec *StorageDrsPodConfigSpec `xml:\"podConfigSpec,omitempty\"`\n\tVmConfigSpec  []StorageDrsVmConfigSpec `xml:\"vmConfigSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StorageDrsConfigSpec\"] = reflect.TypeOf((*StorageDrsConfigSpec)(nil)).Elem()\n}\n\ntype StorageDrsDatacentersCannotShareDatastore struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"StorageDrsDatacentersCannotShareDatastore\"] = reflect.TypeOf((*StorageDrsDatacentersCannotShareDatastore)(nil)).Elem()\n}\n\ntype StorageDrsDatacentersCannotShareDatastoreFault StorageDrsDatacentersCannotShareDatastore\n\nfunc init() {\n\tt[\"StorageDrsDatacentersCannotShareDatastoreFault\"] = reflect.TypeOf((*StorageDrsDatacentersCannotShareDatastoreFault)(nil)).Elem()\n}\n\ntype StorageDrsDisabledOnVm struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"StorageDrsDisabledOnVm\"] = reflect.TypeOf((*StorageDrsDisabledOnVm)(nil)).Elem()\n}\n\ntype StorageDrsDisabledOnVmFault StorageDrsDisabledOnVm\n\nfunc init() {\n\tt[\"StorageDrsDisabledOnVmFault\"] = reflect.TypeOf((*StorageDrsDisabledOnVmFault)(nil)).Elem()\n}\n\ntype StorageDrsHbrDiskNotMovable struct {\n\tVimFault\n\n\tNonMovableDiskIds string `xml:\"nonMovableDiskIds\"`\n}\n\nfunc init() {\n\tt[\"StorageDrsHbrDiskNotMovable\"] = reflect.TypeOf((*StorageDrsHbrDiskNotMovable)(nil)).Elem()\n}\n\ntype StorageDrsHbrDiskNotMovableFault StorageDrsHbrDiskNotMovable\n\nfunc init() {\n\tt[\"StorageDrsHbrDiskNotMovableFault\"] = reflect.TypeOf((*StorageDrsHbrDiskNotMovableFault)(nil)).Elem()\n}\n\ntype StorageDrsHmsMoveInProgress struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"StorageDrsHmsMoveInProgress\"] = reflect.TypeOf((*StorageDrsHmsMoveInProgress)(nil)).Elem()\n}\n\ntype StorageDrsHmsMoveInProgressFault StorageDrsHmsMoveInProgress\n\nfunc init() {\n\tt[\"StorageDrsHmsMoveInProgressFault\"] = reflect.TypeOf((*StorageDrsHmsMoveInProgressFault)(nil)).Elem()\n}\n\ntype StorageDrsHmsUnreachable struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"StorageDrsHmsUnreachable\"] = reflect.TypeOf((*StorageDrsHmsUnreachable)(nil)).Elem()\n}\n\ntype StorageDrsHmsUnreachableFault StorageDrsHmsUnreachable\n\nfunc init() {\n\tt[\"StorageDrsHmsUnreachableFault\"] = reflect.TypeOf((*StorageDrsHmsUnreachableFault)(nil)).Elem()\n}\n\ntype StorageDrsIoLoadBalanceConfig struct {\n\tDynamicData\n\n\tReservablePercentThreshold int32  `xml:\"reservablePercentThreshold,omitempty\"`\n\tReservableIopsThreshold    int32  `xml:\"reservableIopsThreshold,omitempty\"`\n\tReservableThresholdMode    string `xml:\"reservableThresholdMode,omitempty\"`\n\tIoLatencyThreshold         int32  `xml:\"ioLatencyThreshold,omitempty\"`\n\tIoLoadImbalanceThreshold   int32  `xml:\"ioLoadImbalanceThreshold,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StorageDrsIoLoadBalanceConfig\"] = reflect.TypeOf((*StorageDrsIoLoadBalanceConfig)(nil)).Elem()\n}\n\ntype StorageDrsIolbDisabledInternally struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"StorageDrsIolbDisabledInternally\"] = reflect.TypeOf((*StorageDrsIolbDisabledInternally)(nil)).Elem()\n}\n\ntype StorageDrsIolbDisabledInternallyFault StorageDrsIolbDisabledInternally\n\nfunc init() {\n\tt[\"StorageDrsIolbDisabledInternallyFault\"] = reflect.TypeOf((*StorageDrsIolbDisabledInternallyFault)(nil)).Elem()\n}\n\ntype StorageDrsOptionSpec struct {\n\tArrayUpdateSpec\n\n\tOption BaseOptionValue `xml:\"option,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"StorageDrsOptionSpec\"] = reflect.TypeOf((*StorageDrsOptionSpec)(nil)).Elem()\n}\n\ntype StorageDrsPlacementRankVmSpec struct {\n\tDynamicData\n\n\tVmPlacementSpec PlacementSpec            `xml:\"vmPlacementSpec\"`\n\tVmClusters      []ManagedObjectReference `xml:\"vmClusters\"`\n}\n\nfunc init() {\n\tt[\"StorageDrsPlacementRankVmSpec\"] = reflect.TypeOf((*StorageDrsPlacementRankVmSpec)(nil)).Elem()\n}\n\ntype StorageDrsPodConfigInfo struct {\n\tDynamicData\n\n\tEnabled                bool                              `xml:\"enabled\"`\n\tIoLoadBalanceEnabled   bool                              `xml:\"ioLoadBalanceEnabled\"`\n\tDefaultVmBehavior      string                            `xml:\"defaultVmBehavior\"`\n\tLoadBalanceInterval    int32                             `xml:\"loadBalanceInterval,omitempty\"`\n\tDefaultIntraVmAffinity *bool                             `xml:\"defaultIntraVmAffinity\"`\n\tSpaceLoadBalanceConfig *StorageDrsSpaceLoadBalanceConfig `xml:\"spaceLoadBalanceConfig,omitempty\"`\n\tIoLoadBalanceConfig    *StorageDrsIoLoadBalanceConfig    `xml:\"ioLoadBalanceConfig,omitempty\"`\n\tAutomationOverrides    *StorageDrsAutomationConfig       `xml:\"automationOverrides,omitempty\"`\n\tRule                   []BaseClusterRuleInfo             `xml:\"rule,omitempty,typeattr\"`\n\tOption                 []BaseOptionValue                 `xml:\"option,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"StorageDrsPodConfigInfo\"] = reflect.TypeOf((*StorageDrsPodConfigInfo)(nil)).Elem()\n}\n\ntype StorageDrsPodConfigSpec struct {\n\tDynamicData\n\n\tEnabled                *bool                             `xml:\"enabled\"`\n\tIoLoadBalanceEnabled   *bool                             `xml:\"ioLoadBalanceEnabled\"`\n\tDefaultVmBehavior      string                            `xml:\"defaultVmBehavior,omitempty\"`\n\tLoadBalanceInterval    int32                             `xml:\"loadBalanceInterval,omitempty\"`\n\tDefaultIntraVmAffinity *bool                             `xml:\"defaultIntraVmAffinity\"`\n\tSpaceLoadBalanceConfig *StorageDrsSpaceLoadBalanceConfig `xml:\"spaceLoadBalanceConfig,omitempty\"`\n\tIoLoadBalanceConfig    *StorageDrsIoLoadBalanceConfig    `xml:\"ioLoadBalanceConfig,omitempty\"`\n\tAutomationOverrides    *StorageDrsAutomationConfig       `xml:\"automationOverrides,omitempty\"`\n\tRule                   []ClusterRuleSpec                 `xml:\"rule,omitempty\"`\n\tOption                 []StorageDrsOptionSpec            `xml:\"option,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StorageDrsPodConfigSpec\"] = reflect.TypeOf((*StorageDrsPodConfigSpec)(nil)).Elem()\n}\n\ntype StorageDrsPodSelectionSpec struct {\n\tDynamicData\n\n\tInitialVmConfig []VmPodConfigForPlacement `xml:\"initialVmConfig,omitempty\"`\n\tStoragePod      *ManagedObjectReference   `xml:\"storagePod,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StorageDrsPodSelectionSpec\"] = reflect.TypeOf((*StorageDrsPodSelectionSpec)(nil)).Elem()\n}\n\ntype StorageDrsRelocateDisabled struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"StorageDrsRelocateDisabled\"] = reflect.TypeOf((*StorageDrsRelocateDisabled)(nil)).Elem()\n}\n\ntype StorageDrsRelocateDisabledFault StorageDrsRelocateDisabled\n\nfunc init() {\n\tt[\"StorageDrsRelocateDisabledFault\"] = reflect.TypeOf((*StorageDrsRelocateDisabledFault)(nil)).Elem()\n}\n\ntype StorageDrsSpaceLoadBalanceConfig struct {\n\tDynamicData\n\n\tSpaceThresholdMode            string `xml:\"spaceThresholdMode,omitempty\"`\n\tSpaceUtilizationThreshold     int32  `xml:\"spaceUtilizationThreshold,omitempty\"`\n\tFreeSpaceThresholdGB          int32  `xml:\"freeSpaceThresholdGB,omitempty\"`\n\tMinSpaceUtilizationDifference int32  `xml:\"minSpaceUtilizationDifference,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StorageDrsSpaceLoadBalanceConfig\"] = reflect.TypeOf((*StorageDrsSpaceLoadBalanceConfig)(nil)).Elem()\n}\n\ntype StorageDrsStaleHmsCollection struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"StorageDrsStaleHmsCollection\"] = reflect.TypeOf((*StorageDrsStaleHmsCollection)(nil)).Elem()\n}\n\ntype StorageDrsStaleHmsCollectionFault StorageDrsStaleHmsCollection\n\nfunc init() {\n\tt[\"StorageDrsStaleHmsCollectionFault\"] = reflect.TypeOf((*StorageDrsStaleHmsCollectionFault)(nil)).Elem()\n}\n\ntype StorageDrsUnableToMoveFiles struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"StorageDrsUnableToMoveFiles\"] = reflect.TypeOf((*StorageDrsUnableToMoveFiles)(nil)).Elem()\n}\n\ntype StorageDrsUnableToMoveFilesFault StorageDrsUnableToMoveFiles\n\nfunc init() {\n\tt[\"StorageDrsUnableToMoveFilesFault\"] = reflect.TypeOf((*StorageDrsUnableToMoveFilesFault)(nil)).Elem()\n}\n\ntype StorageDrsVmConfigInfo struct {\n\tDynamicData\n\n\tVm                  *ManagedObjectReference          `xml:\"vm,omitempty\"`\n\tEnabled             *bool                            `xml:\"enabled\"`\n\tBehavior            string                           `xml:\"behavior,omitempty\"`\n\tIntraVmAffinity     *bool                            `xml:\"intraVmAffinity\"`\n\tIntraVmAntiAffinity *VirtualDiskAntiAffinityRuleSpec `xml:\"intraVmAntiAffinity,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StorageDrsVmConfigInfo\"] = reflect.TypeOf((*StorageDrsVmConfigInfo)(nil)).Elem()\n}\n\ntype StorageDrsVmConfigSpec struct {\n\tArrayUpdateSpec\n\n\tInfo *StorageDrsVmConfigInfo `xml:\"info,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StorageDrsVmConfigSpec\"] = reflect.TypeOf((*StorageDrsVmConfigSpec)(nil)).Elem()\n}\n\ntype StorageIOAllocationInfo struct {\n\tDynamicData\n\n\tLimit       int64       `xml:\"limit,omitempty\"`\n\tShares      *SharesInfo `xml:\"shares,omitempty\"`\n\tReservation int32       `xml:\"reservation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StorageIOAllocationInfo\"] = reflect.TypeOf((*StorageIOAllocationInfo)(nil)).Elem()\n}\n\ntype StorageIOAllocationOption struct {\n\tDynamicData\n\n\tLimitOption  LongOption   `xml:\"limitOption\"`\n\tSharesOption SharesOption `xml:\"sharesOption\"`\n}\n\nfunc init() {\n\tt[\"StorageIOAllocationOption\"] = reflect.TypeOf((*StorageIOAllocationOption)(nil)).Elem()\n}\n\ntype StorageIORMConfigOption struct {\n\tDynamicData\n\n\tEnabledOption                BoolOption  `xml:\"enabledOption\"`\n\tCongestionThresholdOption    IntOption   `xml:\"congestionThresholdOption\"`\n\tStatsCollectionEnabledOption *BoolOption `xml:\"statsCollectionEnabledOption,omitempty\"`\n\tReservationEnabledOption     *BoolOption `xml:\"reservationEnabledOption,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StorageIORMConfigOption\"] = reflect.TypeOf((*StorageIORMConfigOption)(nil)).Elem()\n}\n\ntype StorageIORMConfigSpec struct {\n\tDynamicData\n\n\tEnabled                  *bool  `xml:\"enabled\"`\n\tCongestionThresholdMode  string `xml:\"congestionThresholdMode,omitempty\"`\n\tCongestionThreshold      int32  `xml:\"congestionThreshold,omitempty\"`\n\tPercentOfPeakThroughput  int32  `xml:\"percentOfPeakThroughput,omitempty\"`\n\tStatsCollectionEnabled   *bool  `xml:\"statsCollectionEnabled\"`\n\tReservationEnabled       *bool  `xml:\"reservationEnabled\"`\n\tStatsAggregationDisabled *bool  `xml:\"statsAggregationDisabled\"`\n\tReservableIopsThreshold  int32  `xml:\"reservableIopsThreshold,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StorageIORMConfigSpec\"] = reflect.TypeOf((*StorageIORMConfigSpec)(nil)).Elem()\n}\n\ntype StorageIORMInfo struct {\n\tDynamicData\n\n\tEnabled                  bool   `xml:\"enabled\"`\n\tCongestionThresholdMode  string `xml:\"congestionThresholdMode,omitempty\"`\n\tCongestionThreshold      int32  `xml:\"congestionThreshold\"`\n\tPercentOfPeakThroughput  int32  `xml:\"percentOfPeakThroughput,omitempty\"`\n\tStatsCollectionEnabled   *bool  `xml:\"statsCollectionEnabled\"`\n\tReservationEnabled       *bool  `xml:\"reservationEnabled\"`\n\tStatsAggregationDisabled *bool  `xml:\"statsAggregationDisabled\"`\n\tReservableIopsThreshold  int32  `xml:\"reservableIopsThreshold,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StorageIORMInfo\"] = reflect.TypeOf((*StorageIORMInfo)(nil)).Elem()\n}\n\ntype StorageMigrationAction struct {\n\tClusterAction\n\n\tVm                 ManagedObjectReference     `xml:\"vm\"`\n\tRelocateSpec       VirtualMachineRelocateSpec `xml:\"relocateSpec\"`\n\tSource             ManagedObjectReference     `xml:\"source\"`\n\tDestination        ManagedObjectReference     `xml:\"destination\"`\n\tSizeTransferred    int64                      `xml:\"sizeTransferred\"`\n\tSpaceUtilSrcBefore float32                    `xml:\"spaceUtilSrcBefore,omitempty\"`\n\tSpaceUtilDstBefore float32                    `xml:\"spaceUtilDstBefore,omitempty\"`\n\tSpaceUtilSrcAfter  float32                    `xml:\"spaceUtilSrcAfter,omitempty\"`\n\tSpaceUtilDstAfter  float32                    `xml:\"spaceUtilDstAfter,omitempty\"`\n\tIoLatencySrcBefore float32                    `xml:\"ioLatencySrcBefore,omitempty\"`\n\tIoLatencyDstBefore float32                    `xml:\"ioLatencyDstBefore,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StorageMigrationAction\"] = reflect.TypeOf((*StorageMigrationAction)(nil)).Elem()\n}\n\ntype StoragePerformanceSummary struct {\n\tDynamicData\n\n\tInterval              int32     `xml:\"interval\"`\n\tPercentile            []int32   `xml:\"percentile\"`\n\tDatastoreReadLatency  []float64 `xml:\"datastoreReadLatency\"`\n\tDatastoreWriteLatency []float64 `xml:\"datastoreWriteLatency\"`\n\tDatastoreVmLatency    []float64 `xml:\"datastoreVmLatency\"`\n\tDatastoreReadIops     []float64 `xml:\"datastoreReadIops\"`\n\tDatastoreWriteIops    []float64 `xml:\"datastoreWriteIops\"`\n\tSiocActivityDuration  int32     `xml:\"siocActivityDuration\"`\n}\n\nfunc init() {\n\tt[\"StoragePerformanceSummary\"] = reflect.TypeOf((*StoragePerformanceSummary)(nil)).Elem()\n}\n\ntype StoragePlacementAction struct {\n\tClusterAction\n\n\tVm                *ManagedObjectReference    `xml:\"vm,omitempty\"`\n\tRelocateSpec      VirtualMachineRelocateSpec `xml:\"relocateSpec\"`\n\tDestination       ManagedObjectReference     `xml:\"destination\"`\n\tSpaceUtilBefore   float32                    `xml:\"spaceUtilBefore,omitempty\"`\n\tSpaceDemandBefore float32                    `xml:\"spaceDemandBefore,omitempty\"`\n\tSpaceUtilAfter    float32                    `xml:\"spaceUtilAfter,omitempty\"`\n\tSpaceDemandAfter  float32                    `xml:\"spaceDemandAfter,omitempty\"`\n\tIoLatencyBefore   float32                    `xml:\"ioLatencyBefore,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StoragePlacementAction\"] = reflect.TypeOf((*StoragePlacementAction)(nil)).Elem()\n}\n\ntype StoragePlacementResult struct {\n\tDynamicData\n\n\tRecommendations []ClusterRecommendation `xml:\"recommendations,omitempty\"`\n\tDrsFault        *ClusterDrsFaults       `xml:\"drsFault,omitempty\"`\n\tTask            *ManagedObjectReference `xml:\"task,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StoragePlacementResult\"] = reflect.TypeOf((*StoragePlacementResult)(nil)).Elem()\n}\n\ntype StoragePlacementSpec struct {\n\tDynamicData\n\n\tType                      string                      `xml:\"type\"`\n\tPriority                  VirtualMachineMovePriority  `xml:\"priority,omitempty\"`\n\tVm                        *ManagedObjectReference     `xml:\"vm,omitempty\"`\n\tPodSelectionSpec          StorageDrsPodSelectionSpec  `xml:\"podSelectionSpec\"`\n\tCloneSpec                 *VirtualMachineCloneSpec    `xml:\"cloneSpec,omitempty\"`\n\tCloneName                 string                      `xml:\"cloneName,omitempty\"`\n\tConfigSpec                *VirtualMachineConfigSpec   `xml:\"configSpec,omitempty\"`\n\tRelocateSpec              *VirtualMachineRelocateSpec `xml:\"relocateSpec,omitempty\"`\n\tResourcePool              *ManagedObjectReference     `xml:\"resourcePool,omitempty\"`\n\tHost                      *ManagedObjectReference     `xml:\"host,omitempty\"`\n\tFolder                    *ManagedObjectReference     `xml:\"folder,omitempty\"`\n\tDisallowPrerequisiteMoves *bool                       `xml:\"disallowPrerequisiteMoves\"`\n\tResourceLeaseDurationSec  int32                       `xml:\"resourceLeaseDurationSec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StoragePlacementSpec\"] = reflect.TypeOf((*StoragePlacementSpec)(nil)).Elem()\n}\n\ntype StoragePodSummary struct {\n\tDynamicData\n\n\tName      string `xml:\"name\"`\n\tCapacity  int64  `xml:\"capacity\"`\n\tFreeSpace int64  `xml:\"freeSpace\"`\n}\n\nfunc init() {\n\tt[\"StoragePodSummary\"] = reflect.TypeOf((*StoragePodSummary)(nil)).Elem()\n}\n\ntype StorageProfile struct {\n\tApplyProfile\n\n\tNasStorage []NasStorageProfile `xml:\"nasStorage,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StorageProfile\"] = reflect.TypeOf((*StorageProfile)(nil)).Elem()\n}\n\ntype StorageRequirement struct {\n\tDynamicData\n\n\tDatastore             ManagedObjectReference `xml:\"datastore\"`\n\tFreeSpaceRequiredInKb int64                  `xml:\"freeSpaceRequiredInKb\"`\n}\n\nfunc init() {\n\tt[\"StorageRequirement\"] = reflect.TypeOf((*StorageRequirement)(nil)).Elem()\n}\n\ntype StorageResourceManagerStorageProfileStatistics struct {\n\tDynamicData\n\n\tProfileId    string `xml:\"profileId\"`\n\tTotalSpaceMB int64  `xml:\"totalSpaceMB\"`\n\tUsedSpaceMB  int64  `xml:\"usedSpaceMB\"`\n}\n\nfunc init() {\n\tt[\"StorageResourceManagerStorageProfileStatistics\"] = reflect.TypeOf((*StorageResourceManagerStorageProfileStatistics)(nil)).Elem()\n}\n\ntype StorageVMotionNotSupported struct {\n\tMigrationFeatureNotSupported\n}\n\nfunc init() {\n\tt[\"StorageVMotionNotSupported\"] = reflect.TypeOf((*StorageVMotionNotSupported)(nil)).Elem()\n}\n\ntype StorageVMotionNotSupportedFault StorageVMotionNotSupported\n\nfunc init() {\n\tt[\"StorageVMotionNotSupportedFault\"] = reflect.TypeOf((*StorageVMotionNotSupportedFault)(nil)).Elem()\n}\n\ntype StorageVmotionIncompatible struct {\n\tVirtualHardwareCompatibilityIssue\n\n\tDatastore *ManagedObjectReference `xml:\"datastore,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StorageVmotionIncompatible\"] = reflect.TypeOf((*StorageVmotionIncompatible)(nil)).Elem()\n}\n\ntype StorageVmotionIncompatibleFault StorageVmotionIncompatible\n\nfunc init() {\n\tt[\"StorageVmotionIncompatibleFault\"] = reflect.TypeOf((*StorageVmotionIncompatibleFault)(nil)).Elem()\n}\n\ntype StringExpression struct {\n\tNegatableExpression\n\n\tValue string `xml:\"value,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StringExpression\"] = reflect.TypeOf((*StringExpression)(nil)).Elem()\n}\n\ntype StringOption struct {\n\tOptionType\n\n\tDefaultValue    string `xml:\"defaultValue\"`\n\tValidCharacters string `xml:\"validCharacters,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StringOption\"] = reflect.TypeOf((*StringOption)(nil)).Elem()\n}\n\ntype StringPolicy struct {\n\tInheritablePolicy\n\n\tValue string `xml:\"value,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StringPolicy\"] = reflect.TypeOf((*StringPolicy)(nil)).Elem()\n}\n\ntype StructuredCustomizations struct {\n\tHostProfilesEntityCustomizations\n\n\tEntity         ManagedObjectReference `xml:\"entity\"`\n\tCustomizations *AnswerFile            `xml:\"customizations,omitempty\"`\n}\n\nfunc init() {\n\tt[\"StructuredCustomizations\"] = reflect.TypeOf((*StructuredCustomizations)(nil)).Elem()\n}\n\ntype SuspendVAppRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"SuspendVAppRequestType\"] = reflect.TypeOf((*SuspendVAppRequestType)(nil)).Elem()\n}\n\ntype SuspendVApp_Task SuspendVAppRequestType\n\nfunc init() {\n\tt[\"SuspendVApp_Task\"] = reflect.TypeOf((*SuspendVApp_Task)(nil)).Elem()\n}\n\ntype SuspendVApp_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype SuspendVMRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"SuspendVMRequestType\"] = reflect.TypeOf((*SuspendVMRequestType)(nil)).Elem()\n}\n\ntype SuspendVM_Task SuspendVMRequestType\n\nfunc init() {\n\tt[\"SuspendVM_Task\"] = reflect.TypeOf((*SuspendVM_Task)(nil)).Elem()\n}\n\ntype SuspendVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype SuspendedRelocateNotSupported struct {\n\tMigrationFault\n}\n\nfunc init() {\n\tt[\"SuspendedRelocateNotSupported\"] = reflect.TypeOf((*SuspendedRelocateNotSupported)(nil)).Elem()\n}\n\ntype SuspendedRelocateNotSupportedFault SuspendedRelocateNotSupported\n\nfunc init() {\n\tt[\"SuspendedRelocateNotSupportedFault\"] = reflect.TypeOf((*SuspendedRelocateNotSupportedFault)(nil)).Elem()\n}\n\ntype SwapDatastoreNotWritableOnHost struct {\n\tDatastoreNotWritableOnHost\n}\n\nfunc init() {\n\tt[\"SwapDatastoreNotWritableOnHost\"] = reflect.TypeOf((*SwapDatastoreNotWritableOnHost)(nil)).Elem()\n}\n\ntype SwapDatastoreNotWritableOnHostFault SwapDatastoreNotWritableOnHost\n\nfunc init() {\n\tt[\"SwapDatastoreNotWritableOnHostFault\"] = reflect.TypeOf((*SwapDatastoreNotWritableOnHostFault)(nil)).Elem()\n}\n\ntype SwapDatastoreUnset struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"SwapDatastoreUnset\"] = reflect.TypeOf((*SwapDatastoreUnset)(nil)).Elem()\n}\n\ntype SwapDatastoreUnsetFault SwapDatastoreUnset\n\nfunc init() {\n\tt[\"SwapDatastoreUnsetFault\"] = reflect.TypeOf((*SwapDatastoreUnsetFault)(nil)).Elem()\n}\n\ntype SwapPlacementOverrideNotSupported struct {\n\tInvalidVmConfig\n}\n\nfunc init() {\n\tt[\"SwapPlacementOverrideNotSupported\"] = reflect.TypeOf((*SwapPlacementOverrideNotSupported)(nil)).Elem()\n}\n\ntype SwapPlacementOverrideNotSupportedFault SwapPlacementOverrideNotSupported\n\nfunc init() {\n\tt[\"SwapPlacementOverrideNotSupportedFault\"] = reflect.TypeOf((*SwapPlacementOverrideNotSupportedFault)(nil)).Elem()\n}\n\ntype SwitchIpUnset struct {\n\tDvsFault\n}\n\nfunc init() {\n\tt[\"SwitchIpUnset\"] = reflect.TypeOf((*SwitchIpUnset)(nil)).Elem()\n}\n\ntype SwitchIpUnsetFault SwitchIpUnset\n\nfunc init() {\n\tt[\"SwitchIpUnsetFault\"] = reflect.TypeOf((*SwitchIpUnsetFault)(nil)).Elem()\n}\n\ntype SwitchNotInUpgradeMode struct {\n\tDvsFault\n}\n\nfunc init() {\n\tt[\"SwitchNotInUpgradeMode\"] = reflect.TypeOf((*SwitchNotInUpgradeMode)(nil)).Elem()\n}\n\ntype SwitchNotInUpgradeModeFault SwitchNotInUpgradeMode\n\nfunc init() {\n\tt[\"SwitchNotInUpgradeModeFault\"] = reflect.TypeOf((*SwitchNotInUpgradeModeFault)(nil)).Elem()\n}\n\ntype SystemError struct {\n\tRuntimeFault\n\n\tReason string `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"SystemError\"] = reflect.TypeOf((*SystemError)(nil)).Elem()\n}\n\ntype SystemErrorFault SystemError\n\nfunc init() {\n\tt[\"SystemErrorFault\"] = reflect.TypeOf((*SystemErrorFault)(nil)).Elem()\n}\n\ntype SystemEventInfo struct {\n\tDynamicData\n\n\tRecordId     int64  `xml:\"recordId\"`\n\tWhen         string `xml:\"when\"`\n\tSelType      int64  `xml:\"selType\"`\n\tMessage      string `xml:\"message\"`\n\tSensorNumber int64  `xml:\"sensorNumber\"`\n}\n\nfunc init() {\n\tt[\"SystemEventInfo\"] = reflect.TypeOf((*SystemEventInfo)(nil)).Elem()\n}\n\ntype Tag struct {\n\tDynamicData\n\n\tKey string `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"Tag\"] = reflect.TypeOf((*Tag)(nil)).Elem()\n}\n\ntype TaskDescription struct {\n\tDynamicData\n\n\tMethodInfo []BaseElementDescription `xml:\"methodInfo,typeattr\"`\n\tState      []BaseElementDescription `xml:\"state,typeattr\"`\n\tReason     []BaseTypeDescription    `xml:\"reason,typeattr\"`\n}\n\nfunc init() {\n\tt[\"TaskDescription\"] = reflect.TypeOf((*TaskDescription)(nil)).Elem()\n}\n\ntype TaskEvent struct {\n\tEvent\n\n\tInfo TaskInfo `xml:\"info\"`\n}\n\nfunc init() {\n\tt[\"TaskEvent\"] = reflect.TypeOf((*TaskEvent)(nil)).Elem()\n}\n\ntype TaskFilterSpec struct {\n\tDynamicData\n\n\tEntity        *TaskFilterSpecByEntity   `xml:\"entity,omitempty\"`\n\tTime          *TaskFilterSpecByTime     `xml:\"time,omitempty\"`\n\tUserName      *TaskFilterSpecByUsername `xml:\"userName,omitempty\"`\n\tActivationId  []string                  `xml:\"activationId,omitempty\"`\n\tState         []TaskInfoState           `xml:\"state,omitempty\"`\n\tAlarm         *ManagedObjectReference   `xml:\"alarm,omitempty\"`\n\tScheduledTask *ManagedObjectReference   `xml:\"scheduledTask,omitempty\"`\n\tEventChainId  []int32                   `xml:\"eventChainId,omitempty\"`\n\tTag           []string                  `xml:\"tag,omitempty\"`\n\tParentTaskKey []string                  `xml:\"parentTaskKey,omitempty\"`\n\tRootTaskKey   []string                  `xml:\"rootTaskKey,omitempty\"`\n}\n\nfunc init() {\n\tt[\"TaskFilterSpec\"] = reflect.TypeOf((*TaskFilterSpec)(nil)).Elem()\n}\n\ntype TaskFilterSpecByEntity struct {\n\tDynamicData\n\n\tEntity    ManagedObjectReference        `xml:\"entity\"`\n\tRecursion TaskFilterSpecRecursionOption `xml:\"recursion\"`\n}\n\nfunc init() {\n\tt[\"TaskFilterSpecByEntity\"] = reflect.TypeOf((*TaskFilterSpecByEntity)(nil)).Elem()\n}\n\ntype TaskFilterSpecByTime struct {\n\tDynamicData\n\n\tTimeType  TaskFilterSpecTimeOption `xml:\"timeType\"`\n\tBeginTime *time.Time               `xml:\"beginTime\"`\n\tEndTime   *time.Time               `xml:\"endTime\"`\n}\n\nfunc init() {\n\tt[\"TaskFilterSpecByTime\"] = reflect.TypeOf((*TaskFilterSpecByTime)(nil)).Elem()\n}\n\ntype TaskFilterSpecByUsername struct {\n\tDynamicData\n\n\tSystemUser bool     `xml:\"systemUser\"`\n\tUserList   []string `xml:\"userList,omitempty\"`\n}\n\nfunc init() {\n\tt[\"TaskFilterSpecByUsername\"] = reflect.TypeOf((*TaskFilterSpecByUsername)(nil)).Elem()\n}\n\ntype TaskInProgress struct {\n\tVimFault\n\n\tTask ManagedObjectReference `xml:\"task\"`\n}\n\nfunc init() {\n\tt[\"TaskInProgress\"] = reflect.TypeOf((*TaskInProgress)(nil)).Elem()\n}\n\ntype TaskInProgressFault BaseTaskInProgress\n\nfunc init() {\n\tt[\"TaskInProgressFault\"] = reflect.TypeOf((*TaskInProgressFault)(nil)).Elem()\n}\n\ntype TaskInfo struct {\n\tDynamicData\n\n\tKey           string                   `xml:\"key\"`\n\tTask          ManagedObjectReference   `xml:\"task\"`\n\tDescription   *LocalizableMessage      `xml:\"description,omitempty\"`\n\tName          string                   `xml:\"name,omitempty\"`\n\tDescriptionId string                   `xml:\"descriptionId\"`\n\tEntity        *ManagedObjectReference  `xml:\"entity,omitempty\"`\n\tEntityName    string                   `xml:\"entityName,omitempty\"`\n\tLocked        []ManagedObjectReference `xml:\"locked,omitempty\"`\n\tState         TaskInfoState            `xml:\"state\"`\n\tCancelled     bool                     `xml:\"cancelled\"`\n\tCancelable    bool                     `xml:\"cancelable\"`\n\tError         *LocalizedMethodFault    `xml:\"error,omitempty\"`\n\tResult        AnyType                  `xml:\"result,omitempty,typeattr\"`\n\tProgress      int32                    `xml:\"progress,omitempty\"`\n\tReason        BaseTaskReason           `xml:\"reason,typeattr\"`\n\tQueueTime     time.Time                `xml:\"queueTime\"`\n\tStartTime     *time.Time               `xml:\"startTime\"`\n\tCompleteTime  *time.Time               `xml:\"completeTime\"`\n\tEventChainId  int32                    `xml:\"eventChainId\"`\n\tChangeTag     string                   `xml:\"changeTag,omitempty\"`\n\tParentTaskKey string                   `xml:\"parentTaskKey,omitempty\"`\n\tRootTaskKey   string                   `xml:\"rootTaskKey,omitempty\"`\n\tActivationId  string                   `xml:\"activationId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"TaskInfo\"] = reflect.TypeOf((*TaskInfo)(nil)).Elem()\n}\n\ntype TaskReason struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"TaskReason\"] = reflect.TypeOf((*TaskReason)(nil)).Elem()\n}\n\ntype TaskReasonAlarm struct {\n\tTaskReason\n\n\tAlarmName  string                 `xml:\"alarmName\"`\n\tAlarm      ManagedObjectReference `xml:\"alarm\"`\n\tEntityName string                 `xml:\"entityName\"`\n\tEntity     ManagedObjectReference `xml:\"entity\"`\n}\n\nfunc init() {\n\tt[\"TaskReasonAlarm\"] = reflect.TypeOf((*TaskReasonAlarm)(nil)).Elem()\n}\n\ntype TaskReasonSchedule struct {\n\tTaskReason\n\n\tName          string                 `xml:\"name\"`\n\tScheduledTask ManagedObjectReference `xml:\"scheduledTask\"`\n}\n\nfunc init() {\n\tt[\"TaskReasonSchedule\"] = reflect.TypeOf((*TaskReasonSchedule)(nil)).Elem()\n}\n\ntype TaskReasonSystem struct {\n\tTaskReason\n}\n\nfunc init() {\n\tt[\"TaskReasonSystem\"] = reflect.TypeOf((*TaskReasonSystem)(nil)).Elem()\n}\n\ntype TaskReasonUser struct {\n\tTaskReason\n\n\tUserName string `xml:\"userName\"`\n}\n\nfunc init() {\n\tt[\"TaskReasonUser\"] = reflect.TypeOf((*TaskReasonUser)(nil)).Elem()\n}\n\ntype TaskScheduler struct {\n\tDynamicData\n\n\tActiveTime *time.Time `xml:\"activeTime\"`\n\tExpireTime *time.Time `xml:\"expireTime\"`\n}\n\nfunc init() {\n\tt[\"TaskScheduler\"] = reflect.TypeOf((*TaskScheduler)(nil)).Elem()\n}\n\ntype TaskTimeoutEvent struct {\n\tTaskEvent\n}\n\nfunc init() {\n\tt[\"TaskTimeoutEvent\"] = reflect.TypeOf((*TaskTimeoutEvent)(nil)).Elem()\n}\n\ntype TeamingMatchEvent struct {\n\tDvsHealthStatusChangeEvent\n}\n\nfunc init() {\n\tt[\"TeamingMatchEvent\"] = reflect.TypeOf((*TeamingMatchEvent)(nil)).Elem()\n}\n\ntype TeamingMisMatchEvent struct {\n\tDvsHealthStatusChangeEvent\n}\n\nfunc init() {\n\tt[\"TeamingMisMatchEvent\"] = reflect.TypeOf((*TeamingMisMatchEvent)(nil)).Elem()\n}\n\ntype TemplateBeingUpgradedEvent struct {\n\tTemplateUpgradeEvent\n}\n\nfunc init() {\n\tt[\"TemplateBeingUpgradedEvent\"] = reflect.TypeOf((*TemplateBeingUpgradedEvent)(nil)).Elem()\n}\n\ntype TemplateConfigFileInfo struct {\n\tVmConfigFileInfo\n}\n\nfunc init() {\n\tt[\"TemplateConfigFileInfo\"] = reflect.TypeOf((*TemplateConfigFileInfo)(nil)).Elem()\n}\n\ntype TemplateConfigFileQuery struct {\n\tVmConfigFileQuery\n}\n\nfunc init() {\n\tt[\"TemplateConfigFileQuery\"] = reflect.TypeOf((*TemplateConfigFileQuery)(nil)).Elem()\n}\n\ntype TemplateUpgradeEvent struct {\n\tEvent\n\n\tLegacyTemplate string `xml:\"legacyTemplate\"`\n}\n\nfunc init() {\n\tt[\"TemplateUpgradeEvent\"] = reflect.TypeOf((*TemplateUpgradeEvent)(nil)).Elem()\n}\n\ntype TemplateUpgradeFailedEvent struct {\n\tTemplateUpgradeEvent\n\n\tReason LocalizedMethodFault `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"TemplateUpgradeFailedEvent\"] = reflect.TypeOf((*TemplateUpgradeFailedEvent)(nil)).Elem()\n}\n\ntype TemplateUpgradedEvent struct {\n\tTemplateUpgradeEvent\n}\n\nfunc init() {\n\tt[\"TemplateUpgradedEvent\"] = reflect.TypeOf((*TemplateUpgradedEvent)(nil)).Elem()\n}\n\ntype TerminateFaultTolerantVMRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tVm   *ManagedObjectReference `xml:\"vm,omitempty\"`\n}\n\nfunc init() {\n\tt[\"TerminateFaultTolerantVMRequestType\"] = reflect.TypeOf((*TerminateFaultTolerantVMRequestType)(nil)).Elem()\n}\n\ntype TerminateFaultTolerantVM_Task TerminateFaultTolerantVMRequestType\n\nfunc init() {\n\tt[\"TerminateFaultTolerantVM_Task\"] = reflect.TypeOf((*TerminateFaultTolerantVM_Task)(nil)).Elem()\n}\n\ntype TerminateFaultTolerantVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype TerminateProcessInGuest TerminateProcessInGuestRequestType\n\nfunc init() {\n\tt[\"TerminateProcessInGuest\"] = reflect.TypeOf((*TerminateProcessInGuest)(nil)).Elem()\n}\n\ntype TerminateProcessInGuestRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tVm   ManagedObjectReference  `xml:\"vm\"`\n\tAuth BaseGuestAuthentication `xml:\"auth,typeattr\"`\n\tPid  int64                   `xml:\"pid\"`\n}\n\nfunc init() {\n\tt[\"TerminateProcessInGuestRequestType\"] = reflect.TypeOf((*TerminateProcessInGuestRequestType)(nil)).Elem()\n}\n\ntype TerminateProcessInGuestResponse struct {\n}\n\ntype TerminateSession TerminateSessionRequestType\n\nfunc init() {\n\tt[\"TerminateSession\"] = reflect.TypeOf((*TerminateSession)(nil)).Elem()\n}\n\ntype TerminateSessionRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tSessionId []string               `xml:\"sessionId\"`\n}\n\nfunc init() {\n\tt[\"TerminateSessionRequestType\"] = reflect.TypeOf((*TerminateSessionRequestType)(nil)).Elem()\n}\n\ntype TerminateSessionResponse struct {\n}\n\ntype TerminateVM TerminateVMRequestType\n\nfunc init() {\n\tt[\"TerminateVM\"] = reflect.TypeOf((*TerminateVM)(nil)).Elem()\n}\n\ntype TerminateVMRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"TerminateVMRequestType\"] = reflect.TypeOf((*TerminateVMRequestType)(nil)).Elem()\n}\n\ntype TerminateVMResponse struct {\n}\n\ntype ThirdPartyLicenseAssignmentFailed struct {\n\tRuntimeFault\n\n\tHost   ManagedObjectReference `xml:\"host\"`\n\tModule string                 `xml:\"module\"`\n\tReason string                 `xml:\"reason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ThirdPartyLicenseAssignmentFailed\"] = reflect.TypeOf((*ThirdPartyLicenseAssignmentFailed)(nil)).Elem()\n}\n\ntype ThirdPartyLicenseAssignmentFailedFault ThirdPartyLicenseAssignmentFailed\n\nfunc init() {\n\tt[\"ThirdPartyLicenseAssignmentFailedFault\"] = reflect.TypeOf((*ThirdPartyLicenseAssignmentFailedFault)(nil)).Elem()\n}\n\ntype TicketedSessionAuthentication struct {\n\tGuestAuthentication\n\n\tTicket string `xml:\"ticket\"`\n}\n\nfunc init() {\n\tt[\"TicketedSessionAuthentication\"] = reflect.TypeOf((*TicketedSessionAuthentication)(nil)).Elem()\n}\n\ntype TimedOutHostOperationEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"TimedOutHostOperationEvent\"] = reflect.TypeOf((*TimedOutHostOperationEvent)(nil)).Elem()\n}\n\ntype Timedout struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"Timedout\"] = reflect.TypeOf((*Timedout)(nil)).Elem()\n}\n\ntype TimedoutFault BaseTimedout\n\nfunc init() {\n\tt[\"TimedoutFault\"] = reflect.TypeOf((*TimedoutFault)(nil)).Elem()\n}\n\ntype TooManyConcurrentNativeClones struct {\n\tFileFault\n}\n\nfunc init() {\n\tt[\"TooManyConcurrentNativeClones\"] = reflect.TypeOf((*TooManyConcurrentNativeClones)(nil)).Elem()\n}\n\ntype TooManyConcurrentNativeClonesFault TooManyConcurrentNativeClones\n\nfunc init() {\n\tt[\"TooManyConcurrentNativeClonesFault\"] = reflect.TypeOf((*TooManyConcurrentNativeClonesFault)(nil)).Elem()\n}\n\ntype TooManyConsecutiveOverrides struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"TooManyConsecutiveOverrides\"] = reflect.TypeOf((*TooManyConsecutiveOverrides)(nil)).Elem()\n}\n\ntype TooManyConsecutiveOverridesFault TooManyConsecutiveOverrides\n\nfunc init() {\n\tt[\"TooManyConsecutiveOverridesFault\"] = reflect.TypeOf((*TooManyConsecutiveOverridesFault)(nil)).Elem()\n}\n\ntype TooManyDevices struct {\n\tInvalidVmConfig\n}\n\nfunc init() {\n\tt[\"TooManyDevices\"] = reflect.TypeOf((*TooManyDevices)(nil)).Elem()\n}\n\ntype TooManyDevicesFault TooManyDevices\n\nfunc init() {\n\tt[\"TooManyDevicesFault\"] = reflect.TypeOf((*TooManyDevicesFault)(nil)).Elem()\n}\n\ntype TooManyDisksOnLegacyHost struct {\n\tMigrationFault\n\n\tDiskCount     int32 `xml:\"diskCount\"`\n\tTimeoutDanger bool  `xml:\"timeoutDanger\"`\n}\n\nfunc init() {\n\tt[\"TooManyDisksOnLegacyHost\"] = reflect.TypeOf((*TooManyDisksOnLegacyHost)(nil)).Elem()\n}\n\ntype TooManyDisksOnLegacyHostFault TooManyDisksOnLegacyHost\n\nfunc init() {\n\tt[\"TooManyDisksOnLegacyHostFault\"] = reflect.TypeOf((*TooManyDisksOnLegacyHostFault)(nil)).Elem()\n}\n\ntype TooManyGuestLogons struct {\n\tGuestOperationsFault\n}\n\nfunc init() {\n\tt[\"TooManyGuestLogons\"] = reflect.TypeOf((*TooManyGuestLogons)(nil)).Elem()\n}\n\ntype TooManyGuestLogonsFault TooManyGuestLogons\n\nfunc init() {\n\tt[\"TooManyGuestLogonsFault\"] = reflect.TypeOf((*TooManyGuestLogonsFault)(nil)).Elem()\n}\n\ntype TooManyHosts struct {\n\tHostConnectFault\n}\n\nfunc init() {\n\tt[\"TooManyHosts\"] = reflect.TypeOf((*TooManyHosts)(nil)).Elem()\n}\n\ntype TooManyHostsFault TooManyHosts\n\nfunc init() {\n\tt[\"TooManyHostsFault\"] = reflect.TypeOf((*TooManyHostsFault)(nil)).Elem()\n}\n\ntype TooManyNativeCloneLevels struct {\n\tFileFault\n}\n\nfunc init() {\n\tt[\"TooManyNativeCloneLevels\"] = reflect.TypeOf((*TooManyNativeCloneLevels)(nil)).Elem()\n}\n\ntype TooManyNativeCloneLevelsFault TooManyNativeCloneLevels\n\nfunc init() {\n\tt[\"TooManyNativeCloneLevelsFault\"] = reflect.TypeOf((*TooManyNativeCloneLevelsFault)(nil)).Elem()\n}\n\ntype TooManyNativeClonesOnFile struct {\n\tFileFault\n}\n\nfunc init() {\n\tt[\"TooManyNativeClonesOnFile\"] = reflect.TypeOf((*TooManyNativeClonesOnFile)(nil)).Elem()\n}\n\ntype TooManyNativeClonesOnFileFault TooManyNativeClonesOnFile\n\nfunc init() {\n\tt[\"TooManyNativeClonesOnFileFault\"] = reflect.TypeOf((*TooManyNativeClonesOnFileFault)(nil)).Elem()\n}\n\ntype TooManySnapshotLevels struct {\n\tSnapshotFault\n}\n\nfunc init() {\n\tt[\"TooManySnapshotLevels\"] = reflect.TypeOf((*TooManySnapshotLevels)(nil)).Elem()\n}\n\ntype TooManySnapshotLevelsFault TooManySnapshotLevels\n\nfunc init() {\n\tt[\"TooManySnapshotLevelsFault\"] = reflect.TypeOf((*TooManySnapshotLevelsFault)(nil)).Elem()\n}\n\ntype ToolsAlreadyUpgraded struct {\n\tVmToolsUpgradeFault\n}\n\nfunc init() {\n\tt[\"ToolsAlreadyUpgraded\"] = reflect.TypeOf((*ToolsAlreadyUpgraded)(nil)).Elem()\n}\n\ntype ToolsAlreadyUpgradedFault ToolsAlreadyUpgraded\n\nfunc init() {\n\tt[\"ToolsAlreadyUpgradedFault\"] = reflect.TypeOf((*ToolsAlreadyUpgradedFault)(nil)).Elem()\n}\n\ntype ToolsAutoUpgradeNotSupported struct {\n\tVmToolsUpgradeFault\n}\n\nfunc init() {\n\tt[\"ToolsAutoUpgradeNotSupported\"] = reflect.TypeOf((*ToolsAutoUpgradeNotSupported)(nil)).Elem()\n}\n\ntype ToolsAutoUpgradeNotSupportedFault ToolsAutoUpgradeNotSupported\n\nfunc init() {\n\tt[\"ToolsAutoUpgradeNotSupportedFault\"] = reflect.TypeOf((*ToolsAutoUpgradeNotSupportedFault)(nil)).Elem()\n}\n\ntype ToolsConfigInfo struct {\n\tDynamicData\n\n\tToolsVersion         int32                                `xml:\"toolsVersion,omitempty\"`\n\tToolsInstallType     string                               `xml:\"toolsInstallType,omitempty\"`\n\tAfterPowerOn         *bool                                `xml:\"afterPowerOn\"`\n\tAfterResume          *bool                                `xml:\"afterResume\"`\n\tBeforeGuestStandby   *bool                                `xml:\"beforeGuestStandby\"`\n\tBeforeGuestShutdown  *bool                                `xml:\"beforeGuestShutdown\"`\n\tBeforeGuestReboot    *bool                                `xml:\"beforeGuestReboot\"`\n\tToolsUpgradePolicy   string                               `xml:\"toolsUpgradePolicy,omitempty\"`\n\tPendingCustomization string                               `xml:\"pendingCustomization,omitempty\"`\n\tCustomizationKeyId   *CryptoKeyId                         `xml:\"customizationKeyId,omitempty\"`\n\tSyncTimeWithHost     *bool                                `xml:\"syncTimeWithHost\"`\n\tLastInstallInfo      *ToolsConfigInfoToolsLastInstallInfo `xml:\"lastInstallInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ToolsConfigInfo\"] = reflect.TypeOf((*ToolsConfigInfo)(nil)).Elem()\n}\n\ntype ToolsConfigInfoToolsLastInstallInfo struct {\n\tDynamicData\n\n\tCounter int32                 `xml:\"counter\"`\n\tFault   *LocalizedMethodFault `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ToolsConfigInfoToolsLastInstallInfo\"] = reflect.TypeOf((*ToolsConfigInfoToolsLastInstallInfo)(nil)).Elem()\n}\n\ntype ToolsImageCopyFailed struct {\n\tVmToolsUpgradeFault\n}\n\nfunc init() {\n\tt[\"ToolsImageCopyFailed\"] = reflect.TypeOf((*ToolsImageCopyFailed)(nil)).Elem()\n}\n\ntype ToolsImageCopyFailedFault ToolsImageCopyFailed\n\nfunc init() {\n\tt[\"ToolsImageCopyFailedFault\"] = reflect.TypeOf((*ToolsImageCopyFailedFault)(nil)).Elem()\n}\n\ntype ToolsImageNotAvailable struct {\n\tVmToolsUpgradeFault\n}\n\nfunc init() {\n\tt[\"ToolsImageNotAvailable\"] = reflect.TypeOf((*ToolsImageNotAvailable)(nil)).Elem()\n}\n\ntype ToolsImageNotAvailableFault ToolsImageNotAvailable\n\nfunc init() {\n\tt[\"ToolsImageNotAvailableFault\"] = reflect.TypeOf((*ToolsImageNotAvailableFault)(nil)).Elem()\n}\n\ntype ToolsImageSignatureCheckFailed struct {\n\tVmToolsUpgradeFault\n}\n\nfunc init() {\n\tt[\"ToolsImageSignatureCheckFailed\"] = reflect.TypeOf((*ToolsImageSignatureCheckFailed)(nil)).Elem()\n}\n\ntype ToolsImageSignatureCheckFailedFault ToolsImageSignatureCheckFailed\n\nfunc init() {\n\tt[\"ToolsImageSignatureCheckFailedFault\"] = reflect.TypeOf((*ToolsImageSignatureCheckFailedFault)(nil)).Elem()\n}\n\ntype ToolsInstallationInProgress struct {\n\tMigrationFault\n}\n\nfunc init() {\n\tt[\"ToolsInstallationInProgress\"] = reflect.TypeOf((*ToolsInstallationInProgress)(nil)).Elem()\n}\n\ntype ToolsInstallationInProgressFault ToolsInstallationInProgress\n\nfunc init() {\n\tt[\"ToolsInstallationInProgressFault\"] = reflect.TypeOf((*ToolsInstallationInProgressFault)(nil)).Elem()\n}\n\ntype ToolsUnavailable struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"ToolsUnavailable\"] = reflect.TypeOf((*ToolsUnavailable)(nil)).Elem()\n}\n\ntype ToolsUnavailableFault ToolsUnavailable\n\nfunc init() {\n\tt[\"ToolsUnavailableFault\"] = reflect.TypeOf((*ToolsUnavailableFault)(nil)).Elem()\n}\n\ntype ToolsUpgradeCancelled struct {\n\tVmToolsUpgradeFault\n}\n\nfunc init() {\n\tt[\"ToolsUpgradeCancelled\"] = reflect.TypeOf((*ToolsUpgradeCancelled)(nil)).Elem()\n}\n\ntype ToolsUpgradeCancelledFault ToolsUpgradeCancelled\n\nfunc init() {\n\tt[\"ToolsUpgradeCancelledFault\"] = reflect.TypeOf((*ToolsUpgradeCancelledFault)(nil)).Elem()\n}\n\ntype TraversalSpec struct {\n\tSelectionSpec\n\n\tType      string              `xml:\"type\"`\n\tPath      string              `xml:\"path\"`\n\tSkip      *bool               `xml:\"skip\"`\n\tSelectSet []BaseSelectionSpec `xml:\"selectSet,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"TraversalSpec\"] = reflect.TypeOf((*TraversalSpec)(nil)).Elem()\n}\n\ntype TurnDiskLocatorLedOffRequestType struct {\n\tThis          ManagedObjectReference `xml:\"_this\"`\n\tScsiDiskUuids []string               `xml:\"scsiDiskUuids\"`\n}\n\nfunc init() {\n\tt[\"TurnDiskLocatorLedOffRequestType\"] = reflect.TypeOf((*TurnDiskLocatorLedOffRequestType)(nil)).Elem()\n}\n\ntype TurnDiskLocatorLedOff_Task TurnDiskLocatorLedOffRequestType\n\nfunc init() {\n\tt[\"TurnDiskLocatorLedOff_Task\"] = reflect.TypeOf((*TurnDiskLocatorLedOff_Task)(nil)).Elem()\n}\n\ntype TurnDiskLocatorLedOff_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype TurnDiskLocatorLedOnRequestType struct {\n\tThis          ManagedObjectReference `xml:\"_this\"`\n\tScsiDiskUuids []string               `xml:\"scsiDiskUuids\"`\n}\n\nfunc init() {\n\tt[\"TurnDiskLocatorLedOnRequestType\"] = reflect.TypeOf((*TurnDiskLocatorLedOnRequestType)(nil)).Elem()\n}\n\ntype TurnDiskLocatorLedOn_Task TurnDiskLocatorLedOnRequestType\n\nfunc init() {\n\tt[\"TurnDiskLocatorLedOn_Task\"] = reflect.TypeOf((*TurnDiskLocatorLedOn_Task)(nil)).Elem()\n}\n\ntype TurnDiskLocatorLedOn_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype TurnOffFaultToleranceForVMRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"TurnOffFaultToleranceForVMRequestType\"] = reflect.TypeOf((*TurnOffFaultToleranceForVMRequestType)(nil)).Elem()\n}\n\ntype TurnOffFaultToleranceForVM_Task TurnOffFaultToleranceForVMRequestType\n\nfunc init() {\n\tt[\"TurnOffFaultToleranceForVM_Task\"] = reflect.TypeOf((*TurnOffFaultToleranceForVM_Task)(nil)).Elem()\n}\n\ntype TurnOffFaultToleranceForVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype TypeDescription struct {\n\tDescription\n\n\tKey string `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"TypeDescription\"] = reflect.TypeOf((*TypeDescription)(nil)).Elem()\n}\n\ntype UnSupportedDatastoreForVFlash struct {\n\tUnsupportedDatastore\n\n\tDatastoreName string `xml:\"datastoreName\"`\n\tType          string `xml:\"type\"`\n}\n\nfunc init() {\n\tt[\"UnSupportedDatastoreForVFlash\"] = reflect.TypeOf((*UnSupportedDatastoreForVFlash)(nil)).Elem()\n}\n\ntype UnSupportedDatastoreForVFlashFault UnSupportedDatastoreForVFlash\n\nfunc init() {\n\tt[\"UnSupportedDatastoreForVFlashFault\"] = reflect.TypeOf((*UnSupportedDatastoreForVFlashFault)(nil)).Elem()\n}\n\ntype UnassignUserFromGroup UnassignUserFromGroupRequestType\n\nfunc init() {\n\tt[\"UnassignUserFromGroup\"] = reflect.TypeOf((*UnassignUserFromGroup)(nil)).Elem()\n}\n\ntype UnassignUserFromGroupRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tUser  string                 `xml:\"user\"`\n\tGroup string                 `xml:\"group\"`\n}\n\nfunc init() {\n\tt[\"UnassignUserFromGroupRequestType\"] = reflect.TypeOf((*UnassignUserFromGroupRequestType)(nil)).Elem()\n}\n\ntype UnassignUserFromGroupResponse struct {\n}\n\ntype UnbindVnic UnbindVnicRequestType\n\nfunc init() {\n\tt[\"UnbindVnic\"] = reflect.TypeOf((*UnbindVnic)(nil)).Elem()\n}\n\ntype UnbindVnicRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tIScsiHbaName string                 `xml:\"iScsiHbaName\"`\n\tVnicDevice   string                 `xml:\"vnicDevice\"`\n\tForce        bool                   `xml:\"force\"`\n}\n\nfunc init() {\n\tt[\"UnbindVnicRequestType\"] = reflect.TypeOf((*UnbindVnicRequestType)(nil)).Elem()\n}\n\ntype UnbindVnicResponse struct {\n}\n\ntype UncommittedUndoableDisk struct {\n\tMigrationFault\n}\n\nfunc init() {\n\tt[\"UncommittedUndoableDisk\"] = reflect.TypeOf((*UncommittedUndoableDisk)(nil)).Elem()\n}\n\ntype UncommittedUndoableDiskFault UncommittedUndoableDisk\n\nfunc init() {\n\tt[\"UncommittedUndoableDiskFault\"] = reflect.TypeOf((*UncommittedUndoableDiskFault)(nil)).Elem()\n}\n\ntype UnconfiguredPropertyValue struct {\n\tInvalidPropertyValue\n}\n\nfunc init() {\n\tt[\"UnconfiguredPropertyValue\"] = reflect.TypeOf((*UnconfiguredPropertyValue)(nil)).Elem()\n}\n\ntype UnconfiguredPropertyValueFault UnconfiguredPropertyValue\n\nfunc init() {\n\tt[\"UnconfiguredPropertyValueFault\"] = reflect.TypeOf((*UnconfiguredPropertyValueFault)(nil)).Elem()\n}\n\ntype UncustomizableGuest struct {\n\tCustomizationFault\n\n\tUncustomizableGuestOS string `xml:\"uncustomizableGuestOS\"`\n}\n\nfunc init() {\n\tt[\"UncustomizableGuest\"] = reflect.TypeOf((*UncustomizableGuest)(nil)).Elem()\n}\n\ntype UncustomizableGuestFault UncustomizableGuest\n\nfunc init() {\n\tt[\"UncustomizableGuestFault\"] = reflect.TypeOf((*UncustomizableGuestFault)(nil)).Elem()\n}\n\ntype UnexpectedCustomizationFault struct {\n\tCustomizationFault\n}\n\nfunc init() {\n\tt[\"UnexpectedCustomizationFault\"] = reflect.TypeOf((*UnexpectedCustomizationFault)(nil)).Elem()\n}\n\ntype UnexpectedCustomizationFaultFault UnexpectedCustomizationFault\n\nfunc init() {\n\tt[\"UnexpectedCustomizationFaultFault\"] = reflect.TypeOf((*UnexpectedCustomizationFaultFault)(nil)).Elem()\n}\n\ntype UnexpectedFault struct {\n\tRuntimeFault\n\n\tFaultName string                `xml:\"faultName\"`\n\tFault     *LocalizedMethodFault `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UnexpectedFault\"] = reflect.TypeOf((*UnexpectedFault)(nil)).Elem()\n}\n\ntype UnexpectedFaultFault UnexpectedFault\n\nfunc init() {\n\tt[\"UnexpectedFaultFault\"] = reflect.TypeOf((*UnexpectedFaultFault)(nil)).Elem()\n}\n\ntype UninstallHostPatchRequestType struct {\n\tThis        ManagedObjectReference                     `xml:\"_this\"`\n\tBulletinIds []string                                   `xml:\"bulletinIds,omitempty\"`\n\tSpec        *HostPatchManagerPatchManagerOperationSpec `xml:\"spec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UninstallHostPatchRequestType\"] = reflect.TypeOf((*UninstallHostPatchRequestType)(nil)).Elem()\n}\n\ntype UninstallHostPatch_Task UninstallHostPatchRequestType\n\nfunc init() {\n\tt[\"UninstallHostPatch_Task\"] = reflect.TypeOf((*UninstallHostPatch_Task)(nil)).Elem()\n}\n\ntype UninstallHostPatch_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype UninstallIoFilterRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tFilterId string                 `xml:\"filterId\"`\n\tCompRes  ManagedObjectReference `xml:\"compRes\"`\n}\n\nfunc init() {\n\tt[\"UninstallIoFilterRequestType\"] = reflect.TypeOf((*UninstallIoFilterRequestType)(nil)).Elem()\n}\n\ntype UninstallIoFilter_Task UninstallIoFilterRequestType\n\nfunc init() {\n\tt[\"UninstallIoFilter_Task\"] = reflect.TypeOf((*UninstallIoFilter_Task)(nil)).Elem()\n}\n\ntype UninstallIoFilter_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype UninstallService UninstallServiceRequestType\n\nfunc init() {\n\tt[\"UninstallService\"] = reflect.TypeOf((*UninstallService)(nil)).Elem()\n}\n\ntype UninstallServiceRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tId   string                 `xml:\"id\"`\n}\n\nfunc init() {\n\tt[\"UninstallServiceRequestType\"] = reflect.TypeOf((*UninstallServiceRequestType)(nil)).Elem()\n}\n\ntype UninstallServiceResponse struct {\n}\n\ntype UnlicensedVirtualMachinesEvent struct {\n\tLicenseEvent\n\n\tUnlicensed int32 `xml:\"unlicensed\"`\n\tAvailable  int32 `xml:\"available\"`\n}\n\nfunc init() {\n\tt[\"UnlicensedVirtualMachinesEvent\"] = reflect.TypeOf((*UnlicensedVirtualMachinesEvent)(nil)).Elem()\n}\n\ntype UnlicensedVirtualMachinesFoundEvent struct {\n\tLicenseEvent\n\n\tAvailable int32 `xml:\"available\"`\n}\n\nfunc init() {\n\tt[\"UnlicensedVirtualMachinesFoundEvent\"] = reflect.TypeOf((*UnlicensedVirtualMachinesFoundEvent)(nil)).Elem()\n}\n\ntype UnmapVmfsVolumeExRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tVmfsUuid []string               `xml:\"vmfsUuid\"`\n}\n\nfunc init() {\n\tt[\"UnmapVmfsVolumeExRequestType\"] = reflect.TypeOf((*UnmapVmfsVolumeExRequestType)(nil)).Elem()\n}\n\ntype UnmapVmfsVolumeEx_Task UnmapVmfsVolumeExRequestType\n\nfunc init() {\n\tt[\"UnmapVmfsVolumeEx_Task\"] = reflect.TypeOf((*UnmapVmfsVolumeEx_Task)(nil)).Elem()\n}\n\ntype UnmapVmfsVolumeEx_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype UnmountDiskMappingRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tMapping []VsanHostDiskMapping  `xml:\"mapping\"`\n}\n\nfunc init() {\n\tt[\"UnmountDiskMappingRequestType\"] = reflect.TypeOf((*UnmountDiskMappingRequestType)(nil)).Elem()\n}\n\ntype UnmountDiskMapping_Task UnmountDiskMappingRequestType\n\nfunc init() {\n\tt[\"UnmountDiskMapping_Task\"] = reflect.TypeOf((*UnmountDiskMapping_Task)(nil)).Elem()\n}\n\ntype UnmountDiskMapping_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype UnmountForceMountedVmfsVolume UnmountForceMountedVmfsVolumeRequestType\n\nfunc init() {\n\tt[\"UnmountForceMountedVmfsVolume\"] = reflect.TypeOf((*UnmountForceMountedVmfsVolume)(nil)).Elem()\n}\n\ntype UnmountForceMountedVmfsVolumeRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tVmfsUuid string                 `xml:\"vmfsUuid\"`\n}\n\nfunc init() {\n\tt[\"UnmountForceMountedVmfsVolumeRequestType\"] = reflect.TypeOf((*UnmountForceMountedVmfsVolumeRequestType)(nil)).Elem()\n}\n\ntype UnmountForceMountedVmfsVolumeResponse struct {\n}\n\ntype UnmountToolsInstaller UnmountToolsInstallerRequestType\n\nfunc init() {\n\tt[\"UnmountToolsInstaller\"] = reflect.TypeOf((*UnmountToolsInstaller)(nil)).Elem()\n}\n\ntype UnmountToolsInstallerRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"UnmountToolsInstallerRequestType\"] = reflect.TypeOf((*UnmountToolsInstallerRequestType)(nil)).Elem()\n}\n\ntype UnmountToolsInstallerResponse struct {\n}\n\ntype UnmountVffsVolume UnmountVffsVolumeRequestType\n\nfunc init() {\n\tt[\"UnmountVffsVolume\"] = reflect.TypeOf((*UnmountVffsVolume)(nil)).Elem()\n}\n\ntype UnmountVffsVolumeRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tVffsUuid string                 `xml:\"vffsUuid\"`\n}\n\nfunc init() {\n\tt[\"UnmountVffsVolumeRequestType\"] = reflect.TypeOf((*UnmountVffsVolumeRequestType)(nil)).Elem()\n}\n\ntype UnmountVffsVolumeResponse struct {\n}\n\ntype UnmountVmfsVolume UnmountVmfsVolumeRequestType\n\nfunc init() {\n\tt[\"UnmountVmfsVolume\"] = reflect.TypeOf((*UnmountVmfsVolume)(nil)).Elem()\n}\n\ntype UnmountVmfsVolumeExRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tVmfsUuid []string               `xml:\"vmfsUuid\"`\n}\n\nfunc init() {\n\tt[\"UnmountVmfsVolumeExRequestType\"] = reflect.TypeOf((*UnmountVmfsVolumeExRequestType)(nil)).Elem()\n}\n\ntype UnmountVmfsVolumeEx_Task UnmountVmfsVolumeExRequestType\n\nfunc init() {\n\tt[\"UnmountVmfsVolumeEx_Task\"] = reflect.TypeOf((*UnmountVmfsVolumeEx_Task)(nil)).Elem()\n}\n\ntype UnmountVmfsVolumeEx_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype UnmountVmfsVolumeRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tVmfsUuid string                 `xml:\"vmfsUuid\"`\n}\n\nfunc init() {\n\tt[\"UnmountVmfsVolumeRequestType\"] = reflect.TypeOf((*UnmountVmfsVolumeRequestType)(nil)).Elem()\n}\n\ntype UnmountVmfsVolumeResponse struct {\n}\n\ntype UnrecognizedHost struct {\n\tVimFault\n\n\tHostName string `xml:\"hostName\"`\n}\n\nfunc init() {\n\tt[\"UnrecognizedHost\"] = reflect.TypeOf((*UnrecognizedHost)(nil)).Elem()\n}\n\ntype UnrecognizedHostFault UnrecognizedHost\n\nfunc init() {\n\tt[\"UnrecognizedHostFault\"] = reflect.TypeOf((*UnrecognizedHostFault)(nil)).Elem()\n}\n\ntype UnregisterAndDestroyRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"UnregisterAndDestroyRequestType\"] = reflect.TypeOf((*UnregisterAndDestroyRequestType)(nil)).Elem()\n}\n\ntype UnregisterAndDestroy_Task UnregisterAndDestroyRequestType\n\nfunc init() {\n\tt[\"UnregisterAndDestroy_Task\"] = reflect.TypeOf((*UnregisterAndDestroy_Task)(nil)).Elem()\n}\n\ntype UnregisterAndDestroy_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype UnregisterExtension UnregisterExtensionRequestType\n\nfunc init() {\n\tt[\"UnregisterExtension\"] = reflect.TypeOf((*UnregisterExtension)(nil)).Elem()\n}\n\ntype UnregisterExtensionRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tExtensionKey string                 `xml:\"extensionKey\"`\n}\n\nfunc init() {\n\tt[\"UnregisterExtensionRequestType\"] = reflect.TypeOf((*UnregisterExtensionRequestType)(nil)).Elem()\n}\n\ntype UnregisterExtensionResponse struct {\n}\n\ntype UnregisterHealthUpdateProvider UnregisterHealthUpdateProviderRequestType\n\nfunc init() {\n\tt[\"UnregisterHealthUpdateProvider\"] = reflect.TypeOf((*UnregisterHealthUpdateProvider)(nil)).Elem()\n}\n\ntype UnregisterHealthUpdateProviderRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tProviderId string                 `xml:\"providerId\"`\n}\n\nfunc init() {\n\tt[\"UnregisterHealthUpdateProviderRequestType\"] = reflect.TypeOf((*UnregisterHealthUpdateProviderRequestType)(nil)).Elem()\n}\n\ntype UnregisterHealthUpdateProviderResponse struct {\n}\n\ntype UnregisterVM UnregisterVMRequestType\n\nfunc init() {\n\tt[\"UnregisterVM\"] = reflect.TypeOf((*UnregisterVM)(nil)).Elem()\n}\n\ntype UnregisterVMRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"UnregisterVMRequestType\"] = reflect.TypeOf((*UnregisterVMRequestType)(nil)).Elem()\n}\n\ntype UnregisterVMResponse struct {\n}\n\ntype UnsharedSwapVMotionNotSupported struct {\n\tMigrationFeatureNotSupported\n}\n\nfunc init() {\n\tt[\"UnsharedSwapVMotionNotSupported\"] = reflect.TypeOf((*UnsharedSwapVMotionNotSupported)(nil)).Elem()\n}\n\ntype UnsharedSwapVMotionNotSupportedFault UnsharedSwapVMotionNotSupported\n\nfunc init() {\n\tt[\"UnsharedSwapVMotionNotSupportedFault\"] = reflect.TypeOf((*UnsharedSwapVMotionNotSupportedFault)(nil)).Elem()\n}\n\ntype UnsupportedDatastore struct {\n\tVmConfigFault\n\n\tDatastore *ManagedObjectReference `xml:\"datastore,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UnsupportedDatastore\"] = reflect.TypeOf((*UnsupportedDatastore)(nil)).Elem()\n}\n\ntype UnsupportedDatastoreFault BaseUnsupportedDatastore\n\nfunc init() {\n\tt[\"UnsupportedDatastoreFault\"] = reflect.TypeOf((*UnsupportedDatastoreFault)(nil)).Elem()\n}\n\ntype UnsupportedGuest struct {\n\tInvalidVmConfig\n\n\tUnsupportedGuestOS string `xml:\"unsupportedGuestOS\"`\n}\n\nfunc init() {\n\tt[\"UnsupportedGuest\"] = reflect.TypeOf((*UnsupportedGuest)(nil)).Elem()\n}\n\ntype UnsupportedGuestFault UnsupportedGuest\n\nfunc init() {\n\tt[\"UnsupportedGuestFault\"] = reflect.TypeOf((*UnsupportedGuestFault)(nil)).Elem()\n}\n\ntype UnsupportedVimApiVersion struct {\n\tVimFault\n\n\tVersion string `xml:\"version,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UnsupportedVimApiVersion\"] = reflect.TypeOf((*UnsupportedVimApiVersion)(nil)).Elem()\n}\n\ntype UnsupportedVimApiVersionFault UnsupportedVimApiVersion\n\nfunc init() {\n\tt[\"UnsupportedVimApiVersionFault\"] = reflect.TypeOf((*UnsupportedVimApiVersionFault)(nil)).Elem()\n}\n\ntype UnsupportedVmxLocation struct {\n\tVmConfigFault\n}\n\nfunc init() {\n\tt[\"UnsupportedVmxLocation\"] = reflect.TypeOf((*UnsupportedVmxLocation)(nil)).Elem()\n}\n\ntype UnsupportedVmxLocationFault UnsupportedVmxLocation\n\nfunc init() {\n\tt[\"UnsupportedVmxLocationFault\"] = reflect.TypeOf((*UnsupportedVmxLocationFault)(nil)).Elem()\n}\n\ntype UnusedVirtualDiskBlocksNotScrubbed struct {\n\tDeviceBackingNotSupported\n}\n\nfunc init() {\n\tt[\"UnusedVirtualDiskBlocksNotScrubbed\"] = reflect.TypeOf((*UnusedVirtualDiskBlocksNotScrubbed)(nil)).Elem()\n}\n\ntype UnusedVirtualDiskBlocksNotScrubbedFault UnusedVirtualDiskBlocksNotScrubbed\n\nfunc init() {\n\tt[\"UnusedVirtualDiskBlocksNotScrubbedFault\"] = reflect.TypeOf((*UnusedVirtualDiskBlocksNotScrubbedFault)(nil)).Elem()\n}\n\ntype UpdateAnswerFileRequestType struct {\n\tThis       ManagedObjectReference   `xml:\"_this\"`\n\tHost       ManagedObjectReference   `xml:\"host\"`\n\tConfigSpec BaseAnswerFileCreateSpec `xml:\"configSpec,typeattr\"`\n}\n\nfunc init() {\n\tt[\"UpdateAnswerFileRequestType\"] = reflect.TypeOf((*UpdateAnswerFileRequestType)(nil)).Elem()\n}\n\ntype UpdateAnswerFile_Task UpdateAnswerFileRequestType\n\nfunc init() {\n\tt[\"UpdateAnswerFile_Task\"] = reflect.TypeOf((*UpdateAnswerFile_Task)(nil)).Elem()\n}\n\ntype UpdateAnswerFile_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype UpdateAssignedLicense UpdateAssignedLicenseRequestType\n\nfunc init() {\n\tt[\"UpdateAssignedLicense\"] = reflect.TypeOf((*UpdateAssignedLicense)(nil)).Elem()\n}\n\ntype UpdateAssignedLicenseRequestType struct {\n\tThis              ManagedObjectReference `xml:\"_this\"`\n\tEntity            string                 `xml:\"entity\"`\n\tLicenseKey        string                 `xml:\"licenseKey\"`\n\tEntityDisplayName string                 `xml:\"entityDisplayName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UpdateAssignedLicenseRequestType\"] = reflect.TypeOf((*UpdateAssignedLicenseRequestType)(nil)).Elem()\n}\n\ntype UpdateAssignedLicenseResponse struct {\n\tReturnval LicenseManagerLicenseInfo `xml:\"returnval\"`\n}\n\ntype UpdateAuthorizationRole UpdateAuthorizationRoleRequestType\n\nfunc init() {\n\tt[\"UpdateAuthorizationRole\"] = reflect.TypeOf((*UpdateAuthorizationRole)(nil)).Elem()\n}\n\ntype UpdateAuthorizationRoleRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tRoleId  int32                  `xml:\"roleId\"`\n\tNewName string                 `xml:\"newName\"`\n\tPrivIds []string               `xml:\"privIds,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UpdateAuthorizationRoleRequestType\"] = reflect.TypeOf((*UpdateAuthorizationRoleRequestType)(nil)).Elem()\n}\n\ntype UpdateAuthorizationRoleResponse struct {\n}\n\ntype UpdateBootDevice UpdateBootDeviceRequestType\n\nfunc init() {\n\tt[\"UpdateBootDevice\"] = reflect.TypeOf((*UpdateBootDevice)(nil)).Elem()\n}\n\ntype UpdateBootDeviceRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tKey  string                 `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"UpdateBootDeviceRequestType\"] = reflect.TypeOf((*UpdateBootDeviceRequestType)(nil)).Elem()\n}\n\ntype UpdateBootDeviceResponse struct {\n}\n\ntype UpdateChildResourceConfiguration UpdateChildResourceConfigurationRequestType\n\nfunc init() {\n\tt[\"UpdateChildResourceConfiguration\"] = reflect.TypeOf((*UpdateChildResourceConfiguration)(nil)).Elem()\n}\n\ntype UpdateChildResourceConfigurationRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tSpec []ResourceConfigSpec   `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"UpdateChildResourceConfigurationRequestType\"] = reflect.TypeOf((*UpdateChildResourceConfigurationRequestType)(nil)).Elem()\n}\n\ntype UpdateChildResourceConfigurationResponse struct {\n}\n\ntype UpdateClusterProfile UpdateClusterProfileRequestType\n\nfunc init() {\n\tt[\"UpdateClusterProfile\"] = reflect.TypeOf((*UpdateClusterProfile)(nil)).Elem()\n}\n\ntype UpdateClusterProfileRequestType struct {\n\tThis   ManagedObjectReference       `xml:\"_this\"`\n\tConfig BaseClusterProfileConfigSpec `xml:\"config,typeattr\"`\n}\n\nfunc init() {\n\tt[\"UpdateClusterProfileRequestType\"] = reflect.TypeOf((*UpdateClusterProfileRequestType)(nil)).Elem()\n}\n\ntype UpdateClusterProfileResponse struct {\n}\n\ntype UpdateConfig UpdateConfigRequestType\n\nfunc init() {\n\tt[\"UpdateConfig\"] = reflect.TypeOf((*UpdateConfig)(nil)).Elem()\n}\n\ntype UpdateConfigRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tName   string                 `xml:\"name,omitempty\"`\n\tConfig *ResourceConfigSpec    `xml:\"config,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UpdateConfigRequestType\"] = reflect.TypeOf((*UpdateConfigRequestType)(nil)).Elem()\n}\n\ntype UpdateConfigResponse struct {\n}\n\ntype UpdateConsoleIpRouteConfig UpdateConsoleIpRouteConfigRequestType\n\nfunc init() {\n\tt[\"UpdateConsoleIpRouteConfig\"] = reflect.TypeOf((*UpdateConsoleIpRouteConfig)(nil)).Elem()\n}\n\ntype UpdateConsoleIpRouteConfigRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tConfig BaseHostIpRouteConfig  `xml:\"config,typeattr\"`\n}\n\nfunc init() {\n\tt[\"UpdateConsoleIpRouteConfigRequestType\"] = reflect.TypeOf((*UpdateConsoleIpRouteConfigRequestType)(nil)).Elem()\n}\n\ntype UpdateConsoleIpRouteConfigResponse struct {\n}\n\ntype UpdateCounterLevelMapping UpdateCounterLevelMappingRequestType\n\nfunc init() {\n\tt[\"UpdateCounterLevelMapping\"] = reflect.TypeOf((*UpdateCounterLevelMapping)(nil)).Elem()\n}\n\ntype UpdateCounterLevelMappingRequestType struct {\n\tThis            ManagedObjectReference                  `xml:\"_this\"`\n\tCounterLevelMap []PerformanceManagerCounterLevelMapping `xml:\"counterLevelMap\"`\n}\n\nfunc init() {\n\tt[\"UpdateCounterLevelMappingRequestType\"] = reflect.TypeOf((*UpdateCounterLevelMappingRequestType)(nil)).Elem()\n}\n\ntype UpdateCounterLevelMappingResponse struct {\n}\n\ntype UpdateDVSHealthCheckConfigRequestType struct {\n\tThis              ManagedObjectReference     `xml:\"_this\"`\n\tHealthCheckConfig []BaseDVSHealthCheckConfig `xml:\"healthCheckConfig,typeattr\"`\n}\n\nfunc init() {\n\tt[\"UpdateDVSHealthCheckConfigRequestType\"] = reflect.TypeOf((*UpdateDVSHealthCheckConfigRequestType)(nil)).Elem()\n}\n\ntype UpdateDVSHealthCheckConfig_Task UpdateDVSHealthCheckConfigRequestType\n\nfunc init() {\n\tt[\"UpdateDVSHealthCheckConfig_Task\"] = reflect.TypeOf((*UpdateDVSHealthCheckConfig_Task)(nil)).Elem()\n}\n\ntype UpdateDVSHealthCheckConfig_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype UpdateDVSLacpGroupConfigRequestType struct {\n\tThis          ManagedObjectReference   `xml:\"_this\"`\n\tLacpGroupSpec []VMwareDvsLacpGroupSpec `xml:\"lacpGroupSpec\"`\n}\n\nfunc init() {\n\tt[\"UpdateDVSLacpGroupConfigRequestType\"] = reflect.TypeOf((*UpdateDVSLacpGroupConfigRequestType)(nil)).Elem()\n}\n\ntype UpdateDVSLacpGroupConfig_Task UpdateDVSLacpGroupConfigRequestType\n\nfunc init() {\n\tt[\"UpdateDVSLacpGroupConfig_Task\"] = reflect.TypeOf((*UpdateDVSLacpGroupConfig_Task)(nil)).Elem()\n}\n\ntype UpdateDVSLacpGroupConfig_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype UpdateDateTime UpdateDateTimeRequestType\n\nfunc init() {\n\tt[\"UpdateDateTime\"] = reflect.TypeOf((*UpdateDateTime)(nil)).Elem()\n}\n\ntype UpdateDateTimeConfig UpdateDateTimeConfigRequestType\n\nfunc init() {\n\tt[\"UpdateDateTimeConfig\"] = reflect.TypeOf((*UpdateDateTimeConfig)(nil)).Elem()\n}\n\ntype UpdateDateTimeConfigRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tConfig HostDateTimeConfig     `xml:\"config\"`\n}\n\nfunc init() {\n\tt[\"UpdateDateTimeConfigRequestType\"] = reflect.TypeOf((*UpdateDateTimeConfigRequestType)(nil)).Elem()\n}\n\ntype UpdateDateTimeConfigResponse struct {\n}\n\ntype UpdateDateTimeRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tDateTime time.Time              `xml:\"dateTime\"`\n}\n\nfunc init() {\n\tt[\"UpdateDateTimeRequestType\"] = reflect.TypeOf((*UpdateDateTimeRequestType)(nil)).Elem()\n}\n\ntype UpdateDateTimeResponse struct {\n}\n\ntype UpdateDefaultPolicy UpdateDefaultPolicyRequestType\n\nfunc init() {\n\tt[\"UpdateDefaultPolicy\"] = reflect.TypeOf((*UpdateDefaultPolicy)(nil)).Elem()\n}\n\ntype UpdateDefaultPolicyRequestType struct {\n\tThis          ManagedObjectReference    `xml:\"_this\"`\n\tDefaultPolicy HostFirewallDefaultPolicy `xml:\"defaultPolicy\"`\n}\n\nfunc init() {\n\tt[\"UpdateDefaultPolicyRequestType\"] = reflect.TypeOf((*UpdateDefaultPolicyRequestType)(nil)).Elem()\n}\n\ntype UpdateDefaultPolicyResponse struct {\n}\n\ntype UpdateDiskPartitions UpdateDiskPartitionsRequestType\n\nfunc init() {\n\tt[\"UpdateDiskPartitions\"] = reflect.TypeOf((*UpdateDiskPartitions)(nil)).Elem()\n}\n\ntype UpdateDiskPartitionsRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tDevicePath string                 `xml:\"devicePath\"`\n\tSpec       HostDiskPartitionSpec  `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"UpdateDiskPartitionsRequestType\"] = reflect.TypeOf((*UpdateDiskPartitionsRequestType)(nil)).Elem()\n}\n\ntype UpdateDiskPartitionsResponse struct {\n}\n\ntype UpdateDnsConfig UpdateDnsConfigRequestType\n\nfunc init() {\n\tt[\"UpdateDnsConfig\"] = reflect.TypeOf((*UpdateDnsConfig)(nil)).Elem()\n}\n\ntype UpdateDnsConfigRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tConfig BaseHostDnsConfig      `xml:\"config,typeattr\"`\n}\n\nfunc init() {\n\tt[\"UpdateDnsConfigRequestType\"] = reflect.TypeOf((*UpdateDnsConfigRequestType)(nil)).Elem()\n}\n\ntype UpdateDnsConfigResponse struct {\n}\n\ntype UpdateDvsCapability UpdateDvsCapabilityRequestType\n\nfunc init() {\n\tt[\"UpdateDvsCapability\"] = reflect.TypeOf((*UpdateDvsCapability)(nil)).Elem()\n}\n\ntype UpdateDvsCapabilityRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tCapability DVSCapability          `xml:\"capability\"`\n}\n\nfunc init() {\n\tt[\"UpdateDvsCapabilityRequestType\"] = reflect.TypeOf((*UpdateDvsCapabilityRequestType)(nil)).Elem()\n}\n\ntype UpdateDvsCapabilityResponse struct {\n}\n\ntype UpdateExtension UpdateExtensionRequestType\n\nfunc init() {\n\tt[\"UpdateExtension\"] = reflect.TypeOf((*UpdateExtension)(nil)).Elem()\n}\n\ntype UpdateExtensionRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tExtension Extension              `xml:\"extension\"`\n}\n\nfunc init() {\n\tt[\"UpdateExtensionRequestType\"] = reflect.TypeOf((*UpdateExtensionRequestType)(nil)).Elem()\n}\n\ntype UpdateExtensionResponse struct {\n}\n\ntype UpdateFlags UpdateFlagsRequestType\n\nfunc init() {\n\tt[\"UpdateFlags\"] = reflect.TypeOf((*UpdateFlags)(nil)).Elem()\n}\n\ntype UpdateFlagsRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tFlagInfo HostFlagInfo           `xml:\"flagInfo\"`\n}\n\nfunc init() {\n\tt[\"UpdateFlagsRequestType\"] = reflect.TypeOf((*UpdateFlagsRequestType)(nil)).Elem()\n}\n\ntype UpdateFlagsResponse struct {\n}\n\ntype UpdateGraphicsConfig UpdateGraphicsConfigRequestType\n\nfunc init() {\n\tt[\"UpdateGraphicsConfig\"] = reflect.TypeOf((*UpdateGraphicsConfig)(nil)).Elem()\n}\n\ntype UpdateGraphicsConfigRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tConfig HostGraphicsConfig     `xml:\"config\"`\n}\n\nfunc init() {\n\tt[\"UpdateGraphicsConfigRequestType\"] = reflect.TypeOf((*UpdateGraphicsConfigRequestType)(nil)).Elem()\n}\n\ntype UpdateGraphicsConfigResponse struct {\n}\n\ntype UpdateHostCustomizationsRequestType struct {\n\tThis                ManagedObjectReference                  `xml:\"_this\"`\n\tHostToConfigSpecMap []HostProfileManagerHostToConfigSpecMap `xml:\"hostToConfigSpecMap,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UpdateHostCustomizationsRequestType\"] = reflect.TypeOf((*UpdateHostCustomizationsRequestType)(nil)).Elem()\n}\n\ntype UpdateHostCustomizations_Task UpdateHostCustomizationsRequestType\n\nfunc init() {\n\tt[\"UpdateHostCustomizations_Task\"] = reflect.TypeOf((*UpdateHostCustomizations_Task)(nil)).Elem()\n}\n\ntype UpdateHostCustomizations_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype UpdateHostImageAcceptanceLevel UpdateHostImageAcceptanceLevelRequestType\n\nfunc init() {\n\tt[\"UpdateHostImageAcceptanceLevel\"] = reflect.TypeOf((*UpdateHostImageAcceptanceLevel)(nil)).Elem()\n}\n\ntype UpdateHostImageAcceptanceLevelRequestType struct {\n\tThis               ManagedObjectReference `xml:\"_this\"`\n\tNewAcceptanceLevel string                 `xml:\"newAcceptanceLevel\"`\n}\n\nfunc init() {\n\tt[\"UpdateHostImageAcceptanceLevelRequestType\"] = reflect.TypeOf((*UpdateHostImageAcceptanceLevelRequestType)(nil)).Elem()\n}\n\ntype UpdateHostImageAcceptanceLevelResponse struct {\n}\n\ntype UpdateHostProfile UpdateHostProfileRequestType\n\nfunc init() {\n\tt[\"UpdateHostProfile\"] = reflect.TypeOf((*UpdateHostProfile)(nil)).Elem()\n}\n\ntype UpdateHostProfileRequestType struct {\n\tThis   ManagedObjectReference    `xml:\"_this\"`\n\tConfig BaseHostProfileConfigSpec `xml:\"config,typeattr\"`\n}\n\nfunc init() {\n\tt[\"UpdateHostProfileRequestType\"] = reflect.TypeOf((*UpdateHostProfileRequestType)(nil)).Elem()\n}\n\ntype UpdateHostProfileResponse struct {\n}\n\ntype UpdateHostSpecification UpdateHostSpecificationRequestType\n\nfunc init() {\n\tt[\"UpdateHostSpecification\"] = reflect.TypeOf((*UpdateHostSpecification)(nil)).Elem()\n}\n\ntype UpdateHostSpecificationRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tHost     ManagedObjectReference `xml:\"host\"`\n\tHostSpec HostSpecification      `xml:\"hostSpec\"`\n}\n\nfunc init() {\n\tt[\"UpdateHostSpecificationRequestType\"] = reflect.TypeOf((*UpdateHostSpecificationRequestType)(nil)).Elem()\n}\n\ntype UpdateHostSpecificationResponse struct {\n}\n\ntype UpdateHostSubSpecification UpdateHostSubSpecificationRequestType\n\nfunc init() {\n\tt[\"UpdateHostSubSpecification\"] = reflect.TypeOf((*UpdateHostSubSpecification)(nil)).Elem()\n}\n\ntype UpdateHostSubSpecificationRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tHost        ManagedObjectReference `xml:\"host\"`\n\tHostSubSpec HostSubSpecification   `xml:\"hostSubSpec\"`\n}\n\nfunc init() {\n\tt[\"UpdateHostSubSpecificationRequestType\"] = reflect.TypeOf((*UpdateHostSubSpecificationRequestType)(nil)).Elem()\n}\n\ntype UpdateHostSubSpecificationResponse struct {\n}\n\ntype UpdateInternetScsiAdvancedOptions UpdateInternetScsiAdvancedOptionsRequestType\n\nfunc init() {\n\tt[\"UpdateInternetScsiAdvancedOptions\"] = reflect.TypeOf((*UpdateInternetScsiAdvancedOptions)(nil)).Elem()\n}\n\ntype UpdateInternetScsiAdvancedOptionsRequestType struct {\n\tThis           ManagedObjectReference          `xml:\"_this\"`\n\tIScsiHbaDevice string                          `xml:\"iScsiHbaDevice\"`\n\tTargetSet      *HostInternetScsiHbaTargetSet   `xml:\"targetSet,omitempty\"`\n\tOptions        []HostInternetScsiHbaParamValue `xml:\"options\"`\n}\n\nfunc init() {\n\tt[\"UpdateInternetScsiAdvancedOptionsRequestType\"] = reflect.TypeOf((*UpdateInternetScsiAdvancedOptionsRequestType)(nil)).Elem()\n}\n\ntype UpdateInternetScsiAdvancedOptionsResponse struct {\n}\n\ntype UpdateInternetScsiAlias UpdateInternetScsiAliasRequestType\n\nfunc init() {\n\tt[\"UpdateInternetScsiAlias\"] = reflect.TypeOf((*UpdateInternetScsiAlias)(nil)).Elem()\n}\n\ntype UpdateInternetScsiAliasRequestType struct {\n\tThis           ManagedObjectReference `xml:\"_this\"`\n\tIScsiHbaDevice string                 `xml:\"iScsiHbaDevice\"`\n\tIScsiAlias     string                 `xml:\"iScsiAlias\"`\n}\n\nfunc init() {\n\tt[\"UpdateInternetScsiAliasRequestType\"] = reflect.TypeOf((*UpdateInternetScsiAliasRequestType)(nil)).Elem()\n}\n\ntype UpdateInternetScsiAliasResponse struct {\n}\n\ntype UpdateInternetScsiAuthenticationProperties UpdateInternetScsiAuthenticationPropertiesRequestType\n\nfunc init() {\n\tt[\"UpdateInternetScsiAuthenticationProperties\"] = reflect.TypeOf((*UpdateInternetScsiAuthenticationProperties)(nil)).Elem()\n}\n\ntype UpdateInternetScsiAuthenticationPropertiesRequestType struct {\n\tThis                     ManagedObjectReference                      `xml:\"_this\"`\n\tIScsiHbaDevice           string                                      `xml:\"iScsiHbaDevice\"`\n\tAuthenticationProperties HostInternetScsiHbaAuthenticationProperties `xml:\"authenticationProperties\"`\n\tTargetSet                *HostInternetScsiHbaTargetSet               `xml:\"targetSet,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UpdateInternetScsiAuthenticationPropertiesRequestType\"] = reflect.TypeOf((*UpdateInternetScsiAuthenticationPropertiesRequestType)(nil)).Elem()\n}\n\ntype UpdateInternetScsiAuthenticationPropertiesResponse struct {\n}\n\ntype UpdateInternetScsiDigestProperties UpdateInternetScsiDigestPropertiesRequestType\n\nfunc init() {\n\tt[\"UpdateInternetScsiDigestProperties\"] = reflect.TypeOf((*UpdateInternetScsiDigestProperties)(nil)).Elem()\n}\n\ntype UpdateInternetScsiDigestPropertiesRequestType struct {\n\tThis             ManagedObjectReference              `xml:\"_this\"`\n\tIScsiHbaDevice   string                              `xml:\"iScsiHbaDevice\"`\n\tTargetSet        *HostInternetScsiHbaTargetSet       `xml:\"targetSet,omitempty\"`\n\tDigestProperties HostInternetScsiHbaDigestProperties `xml:\"digestProperties\"`\n}\n\nfunc init() {\n\tt[\"UpdateInternetScsiDigestPropertiesRequestType\"] = reflect.TypeOf((*UpdateInternetScsiDigestPropertiesRequestType)(nil)).Elem()\n}\n\ntype UpdateInternetScsiDigestPropertiesResponse struct {\n}\n\ntype UpdateInternetScsiDiscoveryProperties UpdateInternetScsiDiscoveryPropertiesRequestType\n\nfunc init() {\n\tt[\"UpdateInternetScsiDiscoveryProperties\"] = reflect.TypeOf((*UpdateInternetScsiDiscoveryProperties)(nil)).Elem()\n}\n\ntype UpdateInternetScsiDiscoveryPropertiesRequestType struct {\n\tThis                ManagedObjectReference                 `xml:\"_this\"`\n\tIScsiHbaDevice      string                                 `xml:\"iScsiHbaDevice\"`\n\tDiscoveryProperties HostInternetScsiHbaDiscoveryProperties `xml:\"discoveryProperties\"`\n}\n\nfunc init() {\n\tt[\"UpdateInternetScsiDiscoveryPropertiesRequestType\"] = reflect.TypeOf((*UpdateInternetScsiDiscoveryPropertiesRequestType)(nil)).Elem()\n}\n\ntype UpdateInternetScsiDiscoveryPropertiesResponse struct {\n}\n\ntype UpdateInternetScsiIPProperties UpdateInternetScsiIPPropertiesRequestType\n\nfunc init() {\n\tt[\"UpdateInternetScsiIPProperties\"] = reflect.TypeOf((*UpdateInternetScsiIPProperties)(nil)).Elem()\n}\n\ntype UpdateInternetScsiIPPropertiesRequestType struct {\n\tThis           ManagedObjectReference          `xml:\"_this\"`\n\tIScsiHbaDevice string                          `xml:\"iScsiHbaDevice\"`\n\tIpProperties   HostInternetScsiHbaIPProperties `xml:\"ipProperties\"`\n}\n\nfunc init() {\n\tt[\"UpdateInternetScsiIPPropertiesRequestType\"] = reflect.TypeOf((*UpdateInternetScsiIPPropertiesRequestType)(nil)).Elem()\n}\n\ntype UpdateInternetScsiIPPropertiesResponse struct {\n}\n\ntype UpdateInternetScsiName UpdateInternetScsiNameRequestType\n\nfunc init() {\n\tt[\"UpdateInternetScsiName\"] = reflect.TypeOf((*UpdateInternetScsiName)(nil)).Elem()\n}\n\ntype UpdateInternetScsiNameRequestType struct {\n\tThis           ManagedObjectReference `xml:\"_this\"`\n\tIScsiHbaDevice string                 `xml:\"iScsiHbaDevice\"`\n\tIScsiName      string                 `xml:\"iScsiName\"`\n}\n\nfunc init() {\n\tt[\"UpdateInternetScsiNameRequestType\"] = reflect.TypeOf((*UpdateInternetScsiNameRequestType)(nil)).Elem()\n}\n\ntype UpdateInternetScsiNameResponse struct {\n}\n\ntype UpdateIpConfig UpdateIpConfigRequestType\n\nfunc init() {\n\tt[\"UpdateIpConfig\"] = reflect.TypeOf((*UpdateIpConfig)(nil)).Elem()\n}\n\ntype UpdateIpConfigRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tIpConfig HostIpConfig           `xml:\"ipConfig\"`\n}\n\nfunc init() {\n\tt[\"UpdateIpConfigRequestType\"] = reflect.TypeOf((*UpdateIpConfigRequestType)(nil)).Elem()\n}\n\ntype UpdateIpConfigResponse struct {\n}\n\ntype UpdateIpPool UpdateIpPoolRequestType\n\nfunc init() {\n\tt[\"UpdateIpPool\"] = reflect.TypeOf((*UpdateIpPool)(nil)).Elem()\n}\n\ntype UpdateIpPoolRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tDc   ManagedObjectReference `xml:\"dc\"`\n\tPool IpPool                 `xml:\"pool\"`\n}\n\nfunc init() {\n\tt[\"UpdateIpPoolRequestType\"] = reflect.TypeOf((*UpdateIpPoolRequestType)(nil)).Elem()\n}\n\ntype UpdateIpPoolResponse struct {\n}\n\ntype UpdateIpRouteConfig UpdateIpRouteConfigRequestType\n\nfunc init() {\n\tt[\"UpdateIpRouteConfig\"] = reflect.TypeOf((*UpdateIpRouteConfig)(nil)).Elem()\n}\n\ntype UpdateIpRouteConfigRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tConfig BaseHostIpRouteConfig  `xml:\"config,typeattr\"`\n}\n\nfunc init() {\n\tt[\"UpdateIpRouteConfigRequestType\"] = reflect.TypeOf((*UpdateIpRouteConfigRequestType)(nil)).Elem()\n}\n\ntype UpdateIpRouteConfigResponse struct {\n}\n\ntype UpdateIpRouteTableConfig UpdateIpRouteTableConfigRequestType\n\nfunc init() {\n\tt[\"UpdateIpRouteTableConfig\"] = reflect.TypeOf((*UpdateIpRouteTableConfig)(nil)).Elem()\n}\n\ntype UpdateIpRouteTableConfigRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tConfig HostIpRouteTableConfig `xml:\"config\"`\n}\n\nfunc init() {\n\tt[\"UpdateIpRouteTableConfigRequestType\"] = reflect.TypeOf((*UpdateIpRouteTableConfigRequestType)(nil)).Elem()\n}\n\ntype UpdateIpRouteTableConfigResponse struct {\n}\n\ntype UpdateIpmi UpdateIpmiRequestType\n\nfunc init() {\n\tt[\"UpdateIpmi\"] = reflect.TypeOf((*UpdateIpmi)(nil)).Elem()\n}\n\ntype UpdateIpmiRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tIpmiInfo HostIpmiInfo           `xml:\"ipmiInfo\"`\n}\n\nfunc init() {\n\tt[\"UpdateIpmiRequestType\"] = reflect.TypeOf((*UpdateIpmiRequestType)(nil)).Elem()\n}\n\ntype UpdateIpmiResponse struct {\n}\n\ntype UpdateKmipServer UpdateKmipServerRequestType\n\nfunc init() {\n\tt[\"UpdateKmipServer\"] = reflect.TypeOf((*UpdateKmipServer)(nil)).Elem()\n}\n\ntype UpdateKmipServerRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tServer KmipServerSpec         `xml:\"server\"`\n}\n\nfunc init() {\n\tt[\"UpdateKmipServerRequestType\"] = reflect.TypeOf((*UpdateKmipServerRequestType)(nil)).Elem()\n}\n\ntype UpdateKmipServerResponse struct {\n}\n\ntype UpdateKmsSignedCsrClientCert UpdateKmsSignedCsrClientCertRequestType\n\nfunc init() {\n\tt[\"UpdateKmsSignedCsrClientCert\"] = reflect.TypeOf((*UpdateKmsSignedCsrClientCert)(nil)).Elem()\n}\n\ntype UpdateKmsSignedCsrClientCertRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tCluster     KeyProviderId          `xml:\"cluster\"`\n\tCertificate string                 `xml:\"certificate\"`\n}\n\nfunc init() {\n\tt[\"UpdateKmsSignedCsrClientCertRequestType\"] = reflect.TypeOf((*UpdateKmsSignedCsrClientCertRequestType)(nil)).Elem()\n}\n\ntype UpdateKmsSignedCsrClientCertResponse struct {\n}\n\ntype UpdateLicense UpdateLicenseRequestType\n\nfunc init() {\n\tt[\"UpdateLicense\"] = reflect.TypeOf((*UpdateLicense)(nil)).Elem()\n}\n\ntype UpdateLicenseLabel UpdateLicenseLabelRequestType\n\nfunc init() {\n\tt[\"UpdateLicenseLabel\"] = reflect.TypeOf((*UpdateLicenseLabel)(nil)).Elem()\n}\n\ntype UpdateLicenseLabelRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tLicenseKey string                 `xml:\"licenseKey\"`\n\tLabelKey   string                 `xml:\"labelKey\"`\n\tLabelValue string                 `xml:\"labelValue\"`\n}\n\nfunc init() {\n\tt[\"UpdateLicenseLabelRequestType\"] = reflect.TypeOf((*UpdateLicenseLabelRequestType)(nil)).Elem()\n}\n\ntype UpdateLicenseLabelResponse struct {\n}\n\ntype UpdateLicenseRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tLicenseKey string                 `xml:\"licenseKey\"`\n\tLabels     []KeyValue             `xml:\"labels,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UpdateLicenseRequestType\"] = reflect.TypeOf((*UpdateLicenseRequestType)(nil)).Elem()\n}\n\ntype UpdateLicenseResponse struct {\n\tReturnval LicenseManagerLicenseInfo `xml:\"returnval\"`\n}\n\ntype UpdateLinkedChildren UpdateLinkedChildrenRequestType\n\nfunc init() {\n\tt[\"UpdateLinkedChildren\"] = reflect.TypeOf((*UpdateLinkedChildren)(nil)).Elem()\n}\n\ntype UpdateLinkedChildrenRequestType struct {\n\tThis         ManagedObjectReference   `xml:\"_this\"`\n\tAddChangeSet []VirtualAppLinkInfo     `xml:\"addChangeSet,omitempty\"`\n\tRemoveSet    []ManagedObjectReference `xml:\"removeSet,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UpdateLinkedChildrenRequestType\"] = reflect.TypeOf((*UpdateLinkedChildrenRequestType)(nil)).Elem()\n}\n\ntype UpdateLinkedChildrenResponse struct {\n}\n\ntype UpdateLocalSwapDatastore UpdateLocalSwapDatastoreRequestType\n\nfunc init() {\n\tt[\"UpdateLocalSwapDatastore\"] = reflect.TypeOf((*UpdateLocalSwapDatastore)(nil)).Elem()\n}\n\ntype UpdateLocalSwapDatastoreRequestType struct {\n\tThis      ManagedObjectReference  `xml:\"_this\"`\n\tDatastore *ManagedObjectReference `xml:\"datastore,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UpdateLocalSwapDatastoreRequestType\"] = reflect.TypeOf((*UpdateLocalSwapDatastoreRequestType)(nil)).Elem()\n}\n\ntype UpdateLocalSwapDatastoreResponse struct {\n}\n\ntype UpdateLockdownExceptions UpdateLockdownExceptionsRequestType\n\nfunc init() {\n\tt[\"UpdateLockdownExceptions\"] = reflect.TypeOf((*UpdateLockdownExceptions)(nil)).Elem()\n}\n\ntype UpdateLockdownExceptionsRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tUsers []string               `xml:\"users,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UpdateLockdownExceptionsRequestType\"] = reflect.TypeOf((*UpdateLockdownExceptionsRequestType)(nil)).Elem()\n}\n\ntype UpdateLockdownExceptionsResponse struct {\n}\n\ntype UpdateModuleOptionString UpdateModuleOptionStringRequestType\n\nfunc init() {\n\tt[\"UpdateModuleOptionString\"] = reflect.TypeOf((*UpdateModuleOptionString)(nil)).Elem()\n}\n\ntype UpdateModuleOptionStringRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tName    string                 `xml:\"name\"`\n\tOptions string                 `xml:\"options\"`\n}\n\nfunc init() {\n\tt[\"UpdateModuleOptionStringRequestType\"] = reflect.TypeOf((*UpdateModuleOptionStringRequestType)(nil)).Elem()\n}\n\ntype UpdateModuleOptionStringResponse struct {\n}\n\ntype UpdateNetworkConfig UpdateNetworkConfigRequestType\n\nfunc init() {\n\tt[\"UpdateNetworkConfig\"] = reflect.TypeOf((*UpdateNetworkConfig)(nil)).Elem()\n}\n\ntype UpdateNetworkConfigRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tConfig     HostNetworkConfig      `xml:\"config\"`\n\tChangeMode string                 `xml:\"changeMode\"`\n}\n\nfunc init() {\n\tt[\"UpdateNetworkConfigRequestType\"] = reflect.TypeOf((*UpdateNetworkConfigRequestType)(nil)).Elem()\n}\n\ntype UpdateNetworkConfigResponse struct {\n\tReturnval HostNetworkConfigResult `xml:\"returnval\"`\n}\n\ntype UpdateNetworkResourcePool UpdateNetworkResourcePoolRequestType\n\nfunc init() {\n\tt[\"UpdateNetworkResourcePool\"] = reflect.TypeOf((*UpdateNetworkResourcePool)(nil)).Elem()\n}\n\ntype UpdateNetworkResourcePoolRequestType struct {\n\tThis       ManagedObjectReference             `xml:\"_this\"`\n\tConfigSpec []DVSNetworkResourcePoolConfigSpec `xml:\"configSpec\"`\n}\n\nfunc init() {\n\tt[\"UpdateNetworkResourcePoolRequestType\"] = reflect.TypeOf((*UpdateNetworkResourcePoolRequestType)(nil)).Elem()\n}\n\ntype UpdateNetworkResourcePoolResponse struct {\n}\n\ntype UpdateOptions UpdateOptionsRequestType\n\nfunc init() {\n\tt[\"UpdateOptions\"] = reflect.TypeOf((*UpdateOptions)(nil)).Elem()\n}\n\ntype UpdateOptionsRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tChangedValue []BaseOptionValue      `xml:\"changedValue,typeattr\"`\n}\n\nfunc init() {\n\tt[\"UpdateOptionsRequestType\"] = reflect.TypeOf((*UpdateOptionsRequestType)(nil)).Elem()\n}\n\ntype UpdateOptionsResponse struct {\n}\n\ntype UpdatePassthruConfig UpdatePassthruConfigRequestType\n\nfunc init() {\n\tt[\"UpdatePassthruConfig\"] = reflect.TypeOf((*UpdatePassthruConfig)(nil)).Elem()\n}\n\ntype UpdatePassthruConfigRequestType struct {\n\tThis   ManagedObjectReference      `xml:\"_this\"`\n\tConfig []BaseHostPciPassthruConfig `xml:\"config,typeattr\"`\n}\n\nfunc init() {\n\tt[\"UpdatePassthruConfigRequestType\"] = reflect.TypeOf((*UpdatePassthruConfigRequestType)(nil)).Elem()\n}\n\ntype UpdatePassthruConfigResponse struct {\n}\n\ntype UpdatePerfInterval UpdatePerfIntervalRequestType\n\nfunc init() {\n\tt[\"UpdatePerfInterval\"] = reflect.TypeOf((*UpdatePerfInterval)(nil)).Elem()\n}\n\ntype UpdatePerfIntervalRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tInterval PerfInterval           `xml:\"interval\"`\n}\n\nfunc init() {\n\tt[\"UpdatePerfIntervalRequestType\"] = reflect.TypeOf((*UpdatePerfIntervalRequestType)(nil)).Elem()\n}\n\ntype UpdatePerfIntervalResponse struct {\n}\n\ntype UpdatePhysicalNicLinkSpeed UpdatePhysicalNicLinkSpeedRequestType\n\nfunc init() {\n\tt[\"UpdatePhysicalNicLinkSpeed\"] = reflect.TypeOf((*UpdatePhysicalNicLinkSpeed)(nil)).Elem()\n}\n\ntype UpdatePhysicalNicLinkSpeedRequestType struct {\n\tThis      ManagedObjectReference `xml:\"_this\"`\n\tDevice    string                 `xml:\"device\"`\n\tLinkSpeed *PhysicalNicLinkInfo   `xml:\"linkSpeed,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UpdatePhysicalNicLinkSpeedRequestType\"] = reflect.TypeOf((*UpdatePhysicalNicLinkSpeedRequestType)(nil)).Elem()\n}\n\ntype UpdatePhysicalNicLinkSpeedResponse struct {\n}\n\ntype UpdatePortGroup UpdatePortGroupRequestType\n\nfunc init() {\n\tt[\"UpdatePortGroup\"] = reflect.TypeOf((*UpdatePortGroup)(nil)).Elem()\n}\n\ntype UpdatePortGroupRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tPgName  string                 `xml:\"pgName\"`\n\tPortgrp HostPortGroupSpec      `xml:\"portgrp\"`\n}\n\nfunc init() {\n\tt[\"UpdatePortGroupRequestType\"] = reflect.TypeOf((*UpdatePortGroupRequestType)(nil)).Elem()\n}\n\ntype UpdatePortGroupResponse struct {\n}\n\ntype UpdateProgress UpdateProgressRequestType\n\nfunc init() {\n\tt[\"UpdateProgress\"] = reflect.TypeOf((*UpdateProgress)(nil)).Elem()\n}\n\ntype UpdateProgressRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tPercentDone int32                  `xml:\"percentDone\"`\n}\n\nfunc init() {\n\tt[\"UpdateProgressRequestType\"] = reflect.TypeOf((*UpdateProgressRequestType)(nil)).Elem()\n}\n\ntype UpdateProgressResponse struct {\n}\n\ntype UpdateReferenceHost UpdateReferenceHostRequestType\n\nfunc init() {\n\tt[\"UpdateReferenceHost\"] = reflect.TypeOf((*UpdateReferenceHost)(nil)).Elem()\n}\n\ntype UpdateReferenceHostRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tHost *ManagedObjectReference `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UpdateReferenceHostRequestType\"] = reflect.TypeOf((*UpdateReferenceHostRequestType)(nil)).Elem()\n}\n\ntype UpdateReferenceHostResponse struct {\n}\n\ntype UpdateRuleset UpdateRulesetRequestType\n\nfunc init() {\n\tt[\"UpdateRuleset\"] = reflect.TypeOf((*UpdateRuleset)(nil)).Elem()\n}\n\ntype UpdateRulesetRequestType struct {\n\tThis ManagedObjectReference         `xml:\"_this\"`\n\tId   string                         `xml:\"id\"`\n\tSpec HostFirewallRulesetRulesetSpec `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"UpdateRulesetRequestType\"] = reflect.TypeOf((*UpdateRulesetRequestType)(nil)).Elem()\n}\n\ntype UpdateRulesetResponse struct {\n}\n\ntype UpdateScsiLunDisplayName UpdateScsiLunDisplayNameRequestType\n\nfunc init() {\n\tt[\"UpdateScsiLunDisplayName\"] = reflect.TypeOf((*UpdateScsiLunDisplayName)(nil)).Elem()\n}\n\ntype UpdateScsiLunDisplayNameRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tLunUuid     string                 `xml:\"lunUuid\"`\n\tDisplayName string                 `xml:\"displayName\"`\n}\n\nfunc init() {\n\tt[\"UpdateScsiLunDisplayNameRequestType\"] = reflect.TypeOf((*UpdateScsiLunDisplayNameRequestType)(nil)).Elem()\n}\n\ntype UpdateScsiLunDisplayNameResponse struct {\n}\n\ntype UpdateSelfSignedClientCert UpdateSelfSignedClientCertRequestType\n\nfunc init() {\n\tt[\"UpdateSelfSignedClientCert\"] = reflect.TypeOf((*UpdateSelfSignedClientCert)(nil)).Elem()\n}\n\ntype UpdateSelfSignedClientCertRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tCluster     KeyProviderId          `xml:\"cluster\"`\n\tCertificate string                 `xml:\"certificate\"`\n}\n\nfunc init() {\n\tt[\"UpdateSelfSignedClientCertRequestType\"] = reflect.TypeOf((*UpdateSelfSignedClientCertRequestType)(nil)).Elem()\n}\n\ntype UpdateSelfSignedClientCertResponse struct {\n}\n\ntype UpdateServiceConsoleVirtualNic UpdateServiceConsoleVirtualNicRequestType\n\nfunc init() {\n\tt[\"UpdateServiceConsoleVirtualNic\"] = reflect.TypeOf((*UpdateServiceConsoleVirtualNic)(nil)).Elem()\n}\n\ntype UpdateServiceConsoleVirtualNicRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tDevice string                 `xml:\"device\"`\n\tNic    HostVirtualNicSpec     `xml:\"nic\"`\n}\n\nfunc init() {\n\tt[\"UpdateServiceConsoleVirtualNicRequestType\"] = reflect.TypeOf((*UpdateServiceConsoleVirtualNicRequestType)(nil)).Elem()\n}\n\ntype UpdateServiceConsoleVirtualNicResponse struct {\n}\n\ntype UpdateServiceMessage UpdateServiceMessageRequestType\n\nfunc init() {\n\tt[\"UpdateServiceMessage\"] = reflect.TypeOf((*UpdateServiceMessage)(nil)).Elem()\n}\n\ntype UpdateServiceMessageRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tMessage string                 `xml:\"message\"`\n}\n\nfunc init() {\n\tt[\"UpdateServiceMessageRequestType\"] = reflect.TypeOf((*UpdateServiceMessageRequestType)(nil)).Elem()\n}\n\ntype UpdateServiceMessageResponse struct {\n}\n\ntype UpdateServicePolicy UpdateServicePolicyRequestType\n\nfunc init() {\n\tt[\"UpdateServicePolicy\"] = reflect.TypeOf((*UpdateServicePolicy)(nil)).Elem()\n}\n\ntype UpdateServicePolicyRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tId     string                 `xml:\"id\"`\n\tPolicy string                 `xml:\"policy\"`\n}\n\nfunc init() {\n\tt[\"UpdateServicePolicyRequestType\"] = reflect.TypeOf((*UpdateServicePolicyRequestType)(nil)).Elem()\n}\n\ntype UpdateServicePolicyResponse struct {\n}\n\ntype UpdateSet struct {\n\tDynamicData\n\n\tVersion   string                 `xml:\"version\"`\n\tFilterSet []PropertyFilterUpdate `xml:\"filterSet,omitempty\"`\n\tTruncated *bool                  `xml:\"truncated\"`\n}\n\nfunc init() {\n\tt[\"UpdateSet\"] = reflect.TypeOf((*UpdateSet)(nil)).Elem()\n}\n\ntype UpdateSoftwareInternetScsiEnabled UpdateSoftwareInternetScsiEnabledRequestType\n\nfunc init() {\n\tt[\"UpdateSoftwareInternetScsiEnabled\"] = reflect.TypeOf((*UpdateSoftwareInternetScsiEnabled)(nil)).Elem()\n}\n\ntype UpdateSoftwareInternetScsiEnabledRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tEnabled bool                   `xml:\"enabled\"`\n}\n\nfunc init() {\n\tt[\"UpdateSoftwareInternetScsiEnabledRequestType\"] = reflect.TypeOf((*UpdateSoftwareInternetScsiEnabledRequestType)(nil)).Elem()\n}\n\ntype UpdateSoftwareInternetScsiEnabledResponse struct {\n}\n\ntype UpdateSystemResources UpdateSystemResourcesRequestType\n\nfunc init() {\n\tt[\"UpdateSystemResources\"] = reflect.TypeOf((*UpdateSystemResources)(nil)).Elem()\n}\n\ntype UpdateSystemResourcesRequestType struct {\n\tThis         ManagedObjectReference `xml:\"_this\"`\n\tResourceInfo HostSystemResourceInfo `xml:\"resourceInfo\"`\n}\n\nfunc init() {\n\tt[\"UpdateSystemResourcesRequestType\"] = reflect.TypeOf((*UpdateSystemResourcesRequestType)(nil)).Elem()\n}\n\ntype UpdateSystemResourcesResponse struct {\n}\n\ntype UpdateSystemSwapConfiguration UpdateSystemSwapConfigurationRequestType\n\nfunc init() {\n\tt[\"UpdateSystemSwapConfiguration\"] = reflect.TypeOf((*UpdateSystemSwapConfiguration)(nil)).Elem()\n}\n\ntype UpdateSystemSwapConfigurationRequestType struct {\n\tThis          ManagedObjectReference      `xml:\"_this\"`\n\tSysSwapConfig HostSystemSwapConfiguration `xml:\"sysSwapConfig\"`\n}\n\nfunc init() {\n\tt[\"UpdateSystemSwapConfigurationRequestType\"] = reflect.TypeOf((*UpdateSystemSwapConfigurationRequestType)(nil)).Elem()\n}\n\ntype UpdateSystemSwapConfigurationResponse struct {\n}\n\ntype UpdateSystemUsers UpdateSystemUsersRequestType\n\nfunc init() {\n\tt[\"UpdateSystemUsers\"] = reflect.TypeOf((*UpdateSystemUsers)(nil)).Elem()\n}\n\ntype UpdateSystemUsersRequestType struct {\n\tThis  ManagedObjectReference `xml:\"_this\"`\n\tUsers []string               `xml:\"users,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UpdateSystemUsersRequestType\"] = reflect.TypeOf((*UpdateSystemUsersRequestType)(nil)).Elem()\n}\n\ntype UpdateSystemUsersResponse struct {\n}\n\ntype UpdateUser UpdateUserRequestType\n\nfunc init() {\n\tt[\"UpdateUser\"] = reflect.TypeOf((*UpdateUser)(nil)).Elem()\n}\n\ntype UpdateUserRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tUser BaseHostAccountSpec    `xml:\"user,typeattr\"`\n}\n\nfunc init() {\n\tt[\"UpdateUserRequestType\"] = reflect.TypeOf((*UpdateUserRequestType)(nil)).Elem()\n}\n\ntype UpdateUserResponse struct {\n}\n\ntype UpdateVAppConfig UpdateVAppConfigRequestType\n\nfunc init() {\n\tt[\"UpdateVAppConfig\"] = reflect.TypeOf((*UpdateVAppConfig)(nil)).Elem()\n}\n\ntype UpdateVAppConfigRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n\tSpec VAppConfigSpec         `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"UpdateVAppConfigRequestType\"] = reflect.TypeOf((*UpdateVAppConfigRequestType)(nil)).Elem()\n}\n\ntype UpdateVAppConfigResponse struct {\n}\n\ntype UpdateVVolVirtualMachineFilesRequestType struct {\n\tThis         ManagedObjectReference               `xml:\"_this\"`\n\tFailoverPair []DatastoreVVolContainerFailoverPair `xml:\"failoverPair,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UpdateVVolVirtualMachineFilesRequestType\"] = reflect.TypeOf((*UpdateVVolVirtualMachineFilesRequestType)(nil)).Elem()\n}\n\ntype UpdateVVolVirtualMachineFiles_Task UpdateVVolVirtualMachineFilesRequestType\n\nfunc init() {\n\tt[\"UpdateVVolVirtualMachineFiles_Task\"] = reflect.TypeOf((*UpdateVVolVirtualMachineFiles_Task)(nil)).Elem()\n}\n\ntype UpdateVVolVirtualMachineFiles_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype UpdateVirtualMachineFilesRequestType struct {\n\tThis                      ManagedObjectReference            `xml:\"_this\"`\n\tMountPathDatastoreMapping []DatastoreMountPathDatastorePair `xml:\"mountPathDatastoreMapping\"`\n}\n\nfunc init() {\n\tt[\"UpdateVirtualMachineFilesRequestType\"] = reflect.TypeOf((*UpdateVirtualMachineFilesRequestType)(nil)).Elem()\n}\n\ntype UpdateVirtualMachineFilesResult struct {\n\tDynamicData\n\n\tFailedVmFile []UpdateVirtualMachineFilesResultFailedVmFileInfo `xml:\"failedVmFile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UpdateVirtualMachineFilesResult\"] = reflect.TypeOf((*UpdateVirtualMachineFilesResult)(nil)).Elem()\n}\n\ntype UpdateVirtualMachineFilesResultFailedVmFileInfo struct {\n\tDynamicData\n\n\tVmFile string               `xml:\"vmFile\"`\n\tFault  LocalizedMethodFault `xml:\"fault\"`\n}\n\nfunc init() {\n\tt[\"UpdateVirtualMachineFilesResultFailedVmFileInfo\"] = reflect.TypeOf((*UpdateVirtualMachineFilesResultFailedVmFileInfo)(nil)).Elem()\n}\n\ntype UpdateVirtualMachineFiles_Task UpdateVirtualMachineFilesRequestType\n\nfunc init() {\n\tt[\"UpdateVirtualMachineFiles_Task\"] = reflect.TypeOf((*UpdateVirtualMachineFiles_Task)(nil)).Elem()\n}\n\ntype UpdateVirtualMachineFiles_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype UpdateVirtualNic UpdateVirtualNicRequestType\n\nfunc init() {\n\tt[\"UpdateVirtualNic\"] = reflect.TypeOf((*UpdateVirtualNic)(nil)).Elem()\n}\n\ntype UpdateVirtualNicRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tDevice string                 `xml:\"device\"`\n\tNic    HostVirtualNicSpec     `xml:\"nic\"`\n}\n\nfunc init() {\n\tt[\"UpdateVirtualNicRequestType\"] = reflect.TypeOf((*UpdateVirtualNicRequestType)(nil)).Elem()\n}\n\ntype UpdateVirtualNicResponse struct {\n}\n\ntype UpdateVirtualSwitch UpdateVirtualSwitchRequestType\n\nfunc init() {\n\tt[\"UpdateVirtualSwitch\"] = reflect.TypeOf((*UpdateVirtualSwitch)(nil)).Elem()\n}\n\ntype UpdateVirtualSwitchRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tVswitchName string                 `xml:\"vswitchName\"`\n\tSpec        HostVirtualSwitchSpec  `xml:\"spec\"`\n}\n\nfunc init() {\n\tt[\"UpdateVirtualSwitchRequestType\"] = reflect.TypeOf((*UpdateVirtualSwitchRequestType)(nil)).Elem()\n}\n\ntype UpdateVirtualSwitchResponse struct {\n}\n\ntype UpdateVmfsUnmapPriority UpdateVmfsUnmapPriorityRequestType\n\nfunc init() {\n\tt[\"UpdateVmfsUnmapPriority\"] = reflect.TypeOf((*UpdateVmfsUnmapPriority)(nil)).Elem()\n}\n\ntype UpdateVmfsUnmapPriorityRequestType struct {\n\tThis          ManagedObjectReference `xml:\"_this\"`\n\tVmfsUuid      string                 `xml:\"vmfsUuid\"`\n\tUnmapPriority string                 `xml:\"unmapPriority\"`\n}\n\nfunc init() {\n\tt[\"UpdateVmfsUnmapPriorityRequestType\"] = reflect.TypeOf((*UpdateVmfsUnmapPriorityRequestType)(nil)).Elem()\n}\n\ntype UpdateVmfsUnmapPriorityResponse struct {\n}\n\ntype UpdateVsanRequestType struct {\n\tThis   ManagedObjectReference `xml:\"_this\"`\n\tConfig VsanHostConfigInfo     `xml:\"config\"`\n}\n\nfunc init() {\n\tt[\"UpdateVsanRequestType\"] = reflect.TypeOf((*UpdateVsanRequestType)(nil)).Elem()\n}\n\ntype UpdateVsan_Task UpdateVsanRequestType\n\nfunc init() {\n\tt[\"UpdateVsan_Task\"] = reflect.TypeOf((*UpdateVsan_Task)(nil)).Elem()\n}\n\ntype UpdateVsan_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype UpdatedAgentBeingRestartedEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"UpdatedAgentBeingRestartedEvent\"] = reflect.TypeOf((*UpdatedAgentBeingRestartedEvent)(nil)).Elem()\n}\n\ntype UpgradeEvent struct {\n\tEvent\n\n\tMessage string `xml:\"message\"`\n}\n\nfunc init() {\n\tt[\"UpgradeEvent\"] = reflect.TypeOf((*UpgradeEvent)(nil)).Elem()\n}\n\ntype UpgradeIoFilterRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tFilterId string                 `xml:\"filterId\"`\n\tCompRes  ManagedObjectReference `xml:\"compRes\"`\n\tVibUrl   string                 `xml:\"vibUrl\"`\n}\n\nfunc init() {\n\tt[\"UpgradeIoFilterRequestType\"] = reflect.TypeOf((*UpgradeIoFilterRequestType)(nil)).Elem()\n}\n\ntype UpgradeIoFilter_Task UpgradeIoFilterRequestType\n\nfunc init() {\n\tt[\"UpgradeIoFilter_Task\"] = reflect.TypeOf((*UpgradeIoFilter_Task)(nil)).Elem()\n}\n\ntype UpgradeIoFilter_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype UpgradeToolsRequestType struct {\n\tThis             ManagedObjectReference `xml:\"_this\"`\n\tInstallerOptions string                 `xml:\"installerOptions,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UpgradeToolsRequestType\"] = reflect.TypeOf((*UpgradeToolsRequestType)(nil)).Elem()\n}\n\ntype UpgradeTools_Task UpgradeToolsRequestType\n\nfunc init() {\n\tt[\"UpgradeTools_Task\"] = reflect.TypeOf((*UpgradeTools_Task)(nil)).Elem()\n}\n\ntype UpgradeTools_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype UpgradeVMRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tVersion string                 `xml:\"version,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UpgradeVMRequestType\"] = reflect.TypeOf((*UpgradeVMRequestType)(nil)).Elem()\n}\n\ntype UpgradeVM_Task UpgradeVMRequestType\n\nfunc init() {\n\tt[\"UpgradeVM_Task\"] = reflect.TypeOf((*UpgradeVM_Task)(nil)).Elem()\n}\n\ntype UpgradeVM_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n\ntype UpgradeVmLayout UpgradeVmLayoutRequestType\n\nfunc init() {\n\tt[\"UpgradeVmLayout\"] = reflect.TypeOf((*UpgradeVmLayout)(nil)).Elem()\n}\n\ntype UpgradeVmLayoutRequestType struct {\n\tThis ManagedObjectReference `xml:\"_this\"`\n}\n\nfunc init() {\n\tt[\"UpgradeVmLayoutRequestType\"] = reflect.TypeOf((*UpgradeVmLayoutRequestType)(nil)).Elem()\n}\n\ntype UpgradeVmLayoutResponse struct {\n}\n\ntype UpgradeVmfs UpgradeVmfsRequestType\n\nfunc init() {\n\tt[\"UpgradeVmfs\"] = reflect.TypeOf((*UpgradeVmfs)(nil)).Elem()\n}\n\ntype UpgradeVmfsRequestType struct {\n\tThis     ManagedObjectReference `xml:\"_this\"`\n\tVmfsPath string                 `xml:\"vmfsPath\"`\n}\n\nfunc init() {\n\tt[\"UpgradeVmfsRequestType\"] = reflect.TypeOf((*UpgradeVmfsRequestType)(nil)).Elem()\n}\n\ntype UpgradeVmfsResponse struct {\n}\n\ntype UpgradeVsanObjects UpgradeVsanObjectsRequestType\n\nfunc init() {\n\tt[\"UpgradeVsanObjects\"] = reflect.TypeOf((*UpgradeVsanObjects)(nil)).Elem()\n}\n\ntype UpgradeVsanObjectsRequestType struct {\n\tThis       ManagedObjectReference `xml:\"_this\"`\n\tUuids      []string               `xml:\"uuids\"`\n\tNewVersion int32                  `xml:\"newVersion\"`\n}\n\nfunc init() {\n\tt[\"UpgradeVsanObjectsRequestType\"] = reflect.TypeOf((*UpgradeVsanObjectsRequestType)(nil)).Elem()\n}\n\ntype UpgradeVsanObjectsResponse struct {\n\tReturnval []HostVsanInternalSystemVsanObjectOperationResult `xml:\"returnval,omitempty\"`\n}\n\ntype UplinkPortMtuNotSupportEvent struct {\n\tDvsHealthStatusChangeEvent\n}\n\nfunc init() {\n\tt[\"UplinkPortMtuNotSupportEvent\"] = reflect.TypeOf((*UplinkPortMtuNotSupportEvent)(nil)).Elem()\n}\n\ntype UplinkPortMtuSupportEvent struct {\n\tDvsHealthStatusChangeEvent\n}\n\nfunc init() {\n\tt[\"UplinkPortMtuSupportEvent\"] = reflect.TypeOf((*UplinkPortMtuSupportEvent)(nil)).Elem()\n}\n\ntype UplinkPortVlanTrunkedEvent struct {\n\tDvsHealthStatusChangeEvent\n}\n\nfunc init() {\n\tt[\"UplinkPortVlanTrunkedEvent\"] = reflect.TypeOf((*UplinkPortVlanTrunkedEvent)(nil)).Elem()\n}\n\ntype UplinkPortVlanUntrunkedEvent struct {\n\tDvsHealthStatusChangeEvent\n}\n\nfunc init() {\n\tt[\"UplinkPortVlanUntrunkedEvent\"] = reflect.TypeOf((*UplinkPortVlanUntrunkedEvent)(nil)).Elem()\n}\n\ntype UploadClientCert UploadClientCertRequestType\n\nfunc init() {\n\tt[\"UploadClientCert\"] = reflect.TypeOf((*UploadClientCert)(nil)).Elem()\n}\n\ntype UploadClientCertRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tCluster     KeyProviderId          `xml:\"cluster\"`\n\tCertificate string                 `xml:\"certificate\"`\n\tPrivateKey  string                 `xml:\"privateKey\"`\n}\n\nfunc init() {\n\tt[\"UploadClientCertRequestType\"] = reflect.TypeOf((*UploadClientCertRequestType)(nil)).Elem()\n}\n\ntype UploadClientCertResponse struct {\n}\n\ntype UploadKmipServerCert UploadKmipServerCertRequestType\n\nfunc init() {\n\tt[\"UploadKmipServerCert\"] = reflect.TypeOf((*UploadKmipServerCert)(nil)).Elem()\n}\n\ntype UploadKmipServerCertRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tCluster     KeyProviderId          `xml:\"cluster\"`\n\tCertificate string                 `xml:\"certificate\"`\n}\n\nfunc init() {\n\tt[\"UploadKmipServerCertRequestType\"] = reflect.TypeOf((*UploadKmipServerCertRequestType)(nil)).Elem()\n}\n\ntype UploadKmipServerCertResponse struct {\n}\n\ntype UsbScanCodeSpec struct {\n\tDynamicData\n\n\tKeyEvents []UsbScanCodeSpecKeyEvent `xml:\"keyEvents\"`\n}\n\nfunc init() {\n\tt[\"UsbScanCodeSpec\"] = reflect.TypeOf((*UsbScanCodeSpec)(nil)).Elem()\n}\n\ntype UsbScanCodeSpecKeyEvent struct {\n\tDynamicData\n\n\tUsbHidCode int32                        `xml:\"usbHidCode\"`\n\tModifiers  *UsbScanCodeSpecModifierType `xml:\"modifiers,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UsbScanCodeSpecKeyEvent\"] = reflect.TypeOf((*UsbScanCodeSpecKeyEvent)(nil)).Elem()\n}\n\ntype UsbScanCodeSpecModifierType struct {\n\tDynamicData\n\n\tLeftControl  *bool `xml:\"leftControl\"`\n\tLeftShift    *bool `xml:\"leftShift\"`\n\tLeftAlt      *bool `xml:\"leftAlt\"`\n\tLeftGui      *bool `xml:\"leftGui\"`\n\tRightControl *bool `xml:\"rightControl\"`\n\tRightShift   *bool `xml:\"rightShift\"`\n\tRightAlt     *bool `xml:\"rightAlt\"`\n\tRightGui     *bool `xml:\"rightGui\"`\n}\n\nfunc init() {\n\tt[\"UsbScanCodeSpecModifierType\"] = reflect.TypeOf((*UsbScanCodeSpecModifierType)(nil)).Elem()\n}\n\ntype UserAssignedToGroup struct {\n\tHostEvent\n\n\tUserLogin string `xml:\"userLogin\"`\n\tGroup     string `xml:\"group\"`\n}\n\nfunc init() {\n\tt[\"UserAssignedToGroup\"] = reflect.TypeOf((*UserAssignedToGroup)(nil)).Elem()\n}\n\ntype UserGroupProfile struct {\n\tApplyProfile\n\n\tKey string `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"UserGroupProfile\"] = reflect.TypeOf((*UserGroupProfile)(nil)).Elem()\n}\n\ntype UserInputRequiredParameterMetadata struct {\n\tProfilePolicyOptionMetadata\n\n\tUserInputParameter []ProfileParameterMetadata `xml:\"userInputParameter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UserInputRequiredParameterMetadata\"] = reflect.TypeOf((*UserInputRequiredParameterMetadata)(nil)).Elem()\n}\n\ntype UserLoginSessionEvent struct {\n\tSessionEvent\n\n\tIpAddress string `xml:\"ipAddress\"`\n\tUserAgent string `xml:\"userAgent,omitempty\"`\n\tLocale    string `xml:\"locale\"`\n\tSessionId string `xml:\"sessionId\"`\n}\n\nfunc init() {\n\tt[\"UserLoginSessionEvent\"] = reflect.TypeOf((*UserLoginSessionEvent)(nil)).Elem()\n}\n\ntype UserLogoutSessionEvent struct {\n\tSessionEvent\n\n\tIpAddress string     `xml:\"ipAddress,omitempty\"`\n\tUserAgent string     `xml:\"userAgent,omitempty\"`\n\tCallCount int64      `xml:\"callCount,omitempty\"`\n\tSessionId string     `xml:\"sessionId,omitempty\"`\n\tLoginTime *time.Time `xml:\"loginTime\"`\n}\n\nfunc init() {\n\tt[\"UserLogoutSessionEvent\"] = reflect.TypeOf((*UserLogoutSessionEvent)(nil)).Elem()\n}\n\ntype UserNotFound struct {\n\tVimFault\n\n\tPrincipal  string `xml:\"principal\"`\n\tUnresolved bool   `xml:\"unresolved\"`\n}\n\nfunc init() {\n\tt[\"UserNotFound\"] = reflect.TypeOf((*UserNotFound)(nil)).Elem()\n}\n\ntype UserNotFoundFault UserNotFound\n\nfunc init() {\n\tt[\"UserNotFoundFault\"] = reflect.TypeOf((*UserNotFoundFault)(nil)).Elem()\n}\n\ntype UserPasswordChanged struct {\n\tHostEvent\n\n\tUserLogin string `xml:\"userLogin\"`\n}\n\nfunc init() {\n\tt[\"UserPasswordChanged\"] = reflect.TypeOf((*UserPasswordChanged)(nil)).Elem()\n}\n\ntype UserPrivilegeResult struct {\n\tDynamicData\n\n\tEntity     ManagedObjectReference `xml:\"entity\"`\n\tPrivileges []string               `xml:\"privileges,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UserPrivilegeResult\"] = reflect.TypeOf((*UserPrivilegeResult)(nil)).Elem()\n}\n\ntype UserProfile struct {\n\tApplyProfile\n\n\tKey string `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"UserProfile\"] = reflect.TypeOf((*UserProfile)(nil)).Elem()\n}\n\ntype UserSearchResult struct {\n\tDynamicData\n\n\tPrincipal string `xml:\"principal\"`\n\tFullName  string `xml:\"fullName,omitempty\"`\n\tGroup     bool   `xml:\"group\"`\n}\n\nfunc init() {\n\tt[\"UserSearchResult\"] = reflect.TypeOf((*UserSearchResult)(nil)).Elem()\n}\n\ntype UserSession struct {\n\tDynamicData\n\n\tKey              string    `xml:\"key\"`\n\tUserName         string    `xml:\"userName\"`\n\tFullName         string    `xml:\"fullName\"`\n\tLoginTime        time.Time `xml:\"loginTime\"`\n\tLastActiveTime   time.Time `xml:\"lastActiveTime\"`\n\tLocale           string    `xml:\"locale\"`\n\tMessageLocale    string    `xml:\"messageLocale\"`\n\tExtensionSession *bool     `xml:\"extensionSession\"`\n\tIpAddress        string    `xml:\"ipAddress,omitempty\"`\n\tUserAgent        string    `xml:\"userAgent,omitempty\"`\n\tCallCount        int64     `xml:\"callCount,omitempty\"`\n}\n\nfunc init() {\n\tt[\"UserSession\"] = reflect.TypeOf((*UserSession)(nil)).Elem()\n}\n\ntype UserUnassignedFromGroup struct {\n\tHostEvent\n\n\tUserLogin string `xml:\"userLogin\"`\n\tGroup     string `xml:\"group\"`\n}\n\nfunc init() {\n\tt[\"UserUnassignedFromGroup\"] = reflect.TypeOf((*UserUnassignedFromGroup)(nil)).Elem()\n}\n\ntype UserUpgradeEvent struct {\n\tUpgradeEvent\n}\n\nfunc init() {\n\tt[\"UserUpgradeEvent\"] = reflect.TypeOf((*UserUpgradeEvent)(nil)).Elem()\n}\n\ntype VASAStorageArray struct {\n\tDynamicData\n\n\tName     string `xml:\"name\"`\n\tUuid     string `xml:\"uuid\"`\n\tVendorId string `xml:\"vendorId\"`\n\tModelId  string `xml:\"modelId\"`\n}\n\nfunc init() {\n\tt[\"VASAStorageArray\"] = reflect.TypeOf((*VASAStorageArray)(nil)).Elem()\n}\n\ntype VAppCloneSpec struct {\n\tDynamicData\n\n\tLocation        ManagedObjectReference            `xml:\"location\"`\n\tHost            *ManagedObjectReference           `xml:\"host,omitempty\"`\n\tResourceSpec    *ResourceConfigSpec               `xml:\"resourceSpec,omitempty\"`\n\tVmFolder        *ManagedObjectReference           `xml:\"vmFolder,omitempty\"`\n\tNetworkMapping  []VAppCloneSpecNetworkMappingPair `xml:\"networkMapping,omitempty\"`\n\tProperty        []KeyValue                        `xml:\"property,omitempty\"`\n\tResourceMapping []VAppCloneSpecResourceMap        `xml:\"resourceMapping,omitempty\"`\n\tProvisioning    string                            `xml:\"provisioning,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VAppCloneSpec\"] = reflect.TypeOf((*VAppCloneSpec)(nil)).Elem()\n}\n\ntype VAppCloneSpecNetworkMappingPair struct {\n\tDynamicData\n\n\tSource      ManagedObjectReference `xml:\"source\"`\n\tDestination ManagedObjectReference `xml:\"destination\"`\n}\n\nfunc init() {\n\tt[\"VAppCloneSpecNetworkMappingPair\"] = reflect.TypeOf((*VAppCloneSpecNetworkMappingPair)(nil)).Elem()\n}\n\ntype VAppCloneSpecResourceMap struct {\n\tDynamicData\n\n\tSource       ManagedObjectReference  `xml:\"source\"`\n\tParent       *ManagedObjectReference `xml:\"parent,omitempty\"`\n\tResourceSpec *ResourceConfigSpec     `xml:\"resourceSpec,omitempty\"`\n\tLocation     *ManagedObjectReference `xml:\"location,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VAppCloneSpecResourceMap\"] = reflect.TypeOf((*VAppCloneSpecResourceMap)(nil)).Elem()\n}\n\ntype VAppConfigFault struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"VAppConfigFault\"] = reflect.TypeOf((*VAppConfigFault)(nil)).Elem()\n}\n\ntype VAppConfigFaultFault BaseVAppConfigFault\n\nfunc init() {\n\tt[\"VAppConfigFaultFault\"] = reflect.TypeOf((*VAppConfigFaultFault)(nil)).Elem()\n}\n\ntype VAppConfigInfo struct {\n\tVmConfigInfo\n\n\tEntityConfig []VAppEntityConfigInfo `xml:\"entityConfig,omitempty\"`\n\tAnnotation   string                 `xml:\"annotation\"`\n\tInstanceUuid string                 `xml:\"instanceUuid,omitempty\"`\n\tManagedBy    *ManagedByInfo         `xml:\"managedBy,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VAppConfigInfo\"] = reflect.TypeOf((*VAppConfigInfo)(nil)).Elem()\n}\n\ntype VAppConfigSpec struct {\n\tVmConfigSpec\n\n\tEntityConfig []VAppEntityConfigInfo `xml:\"entityConfig,omitempty\"`\n\tAnnotation   string                 `xml:\"annotation,omitempty\"`\n\tInstanceUuid string                 `xml:\"instanceUuid,omitempty\"`\n\tManagedBy    *ManagedByInfo         `xml:\"managedBy,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VAppConfigSpec\"] = reflect.TypeOf((*VAppConfigSpec)(nil)).Elem()\n}\n\ntype VAppEntityConfigInfo struct {\n\tDynamicData\n\n\tKey               *ManagedObjectReference `xml:\"key,omitempty\"`\n\tTag               string                  `xml:\"tag,omitempty\"`\n\tStartOrder        int32                   `xml:\"startOrder,omitempty\"`\n\tStartDelay        int32                   `xml:\"startDelay,omitempty\"`\n\tWaitingForGuest   *bool                   `xml:\"waitingForGuest\"`\n\tStartAction       string                  `xml:\"startAction,omitempty\"`\n\tStopDelay         int32                   `xml:\"stopDelay,omitempty\"`\n\tStopAction        string                  `xml:\"stopAction,omitempty\"`\n\tDestroyWithParent *bool                   `xml:\"destroyWithParent\"`\n}\n\nfunc init() {\n\tt[\"VAppEntityConfigInfo\"] = reflect.TypeOf((*VAppEntityConfigInfo)(nil)).Elem()\n}\n\ntype VAppIPAssignmentInfo struct {\n\tDynamicData\n\n\tSupportedAllocationScheme []string `xml:\"supportedAllocationScheme,omitempty\"`\n\tIpAllocationPolicy        string   `xml:\"ipAllocationPolicy,omitempty\"`\n\tSupportedIpProtocol       []string `xml:\"supportedIpProtocol,omitempty\"`\n\tIpProtocol                string   `xml:\"ipProtocol,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VAppIPAssignmentInfo\"] = reflect.TypeOf((*VAppIPAssignmentInfo)(nil)).Elem()\n}\n\ntype VAppNotRunning struct {\n\tVmConfigFault\n}\n\nfunc init() {\n\tt[\"VAppNotRunning\"] = reflect.TypeOf((*VAppNotRunning)(nil)).Elem()\n}\n\ntype VAppNotRunningFault VAppNotRunning\n\nfunc init() {\n\tt[\"VAppNotRunningFault\"] = reflect.TypeOf((*VAppNotRunningFault)(nil)).Elem()\n}\n\ntype VAppOperationInProgress struct {\n\tRuntimeFault\n}\n\nfunc init() {\n\tt[\"VAppOperationInProgress\"] = reflect.TypeOf((*VAppOperationInProgress)(nil)).Elem()\n}\n\ntype VAppOperationInProgressFault VAppOperationInProgress\n\nfunc init() {\n\tt[\"VAppOperationInProgressFault\"] = reflect.TypeOf((*VAppOperationInProgressFault)(nil)).Elem()\n}\n\ntype VAppOvfSectionInfo struct {\n\tDynamicData\n\n\tKey             int32  `xml:\"key,omitempty\"`\n\tNamespace       string `xml:\"namespace,omitempty\"`\n\tType            string `xml:\"type,omitempty\"`\n\tAtEnvelopeLevel *bool  `xml:\"atEnvelopeLevel\"`\n\tContents        string `xml:\"contents,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VAppOvfSectionInfo\"] = reflect.TypeOf((*VAppOvfSectionInfo)(nil)).Elem()\n}\n\ntype VAppOvfSectionSpec struct {\n\tArrayUpdateSpec\n\n\tInfo *VAppOvfSectionInfo `xml:\"info,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VAppOvfSectionSpec\"] = reflect.TypeOf((*VAppOvfSectionSpec)(nil)).Elem()\n}\n\ntype VAppProductInfo struct {\n\tDynamicData\n\n\tKey         int32  `xml:\"key\"`\n\tClassId     string `xml:\"classId,omitempty\"`\n\tInstanceId  string `xml:\"instanceId,omitempty\"`\n\tName        string `xml:\"name,omitempty\"`\n\tVendor      string `xml:\"vendor,omitempty\"`\n\tVersion     string `xml:\"version,omitempty\"`\n\tFullVersion string `xml:\"fullVersion,omitempty\"`\n\tVendorUrl   string `xml:\"vendorUrl,omitempty\"`\n\tProductUrl  string `xml:\"productUrl,omitempty\"`\n\tAppUrl      string `xml:\"appUrl,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VAppProductInfo\"] = reflect.TypeOf((*VAppProductInfo)(nil)).Elem()\n}\n\ntype VAppProductSpec struct {\n\tArrayUpdateSpec\n\n\tInfo *VAppProductInfo `xml:\"info,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VAppProductSpec\"] = reflect.TypeOf((*VAppProductSpec)(nil)).Elem()\n}\n\ntype VAppPropertyFault struct {\n\tVmConfigFault\n\n\tId       string `xml:\"id\"`\n\tCategory string `xml:\"category\"`\n\tLabel    string `xml:\"label\"`\n\tType     string `xml:\"type\"`\n\tValue    string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"VAppPropertyFault\"] = reflect.TypeOf((*VAppPropertyFault)(nil)).Elem()\n}\n\ntype VAppPropertyFaultFault BaseVAppPropertyFault\n\nfunc init() {\n\tt[\"VAppPropertyFaultFault\"] = reflect.TypeOf((*VAppPropertyFaultFault)(nil)).Elem()\n}\n\ntype VAppPropertyInfo struct {\n\tDynamicData\n\n\tKey              int32  `xml:\"key\"`\n\tClassId          string `xml:\"classId,omitempty\"`\n\tInstanceId       string `xml:\"instanceId,omitempty\"`\n\tId               string `xml:\"id,omitempty\"`\n\tCategory         string `xml:\"category,omitempty\"`\n\tLabel            string `xml:\"label,omitempty\"`\n\tType             string `xml:\"type,omitempty\"`\n\tTypeReference    string `xml:\"typeReference,omitempty\"`\n\tUserConfigurable *bool  `xml:\"userConfigurable\"`\n\tDefaultValue     string `xml:\"defaultValue,omitempty\"`\n\tValue            string `xml:\"value,omitempty\"`\n\tDescription      string `xml:\"description,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VAppPropertyInfo\"] = reflect.TypeOf((*VAppPropertyInfo)(nil)).Elem()\n}\n\ntype VAppPropertySpec struct {\n\tArrayUpdateSpec\n\n\tInfo *VAppPropertyInfo `xml:\"info,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VAppPropertySpec\"] = reflect.TypeOf((*VAppPropertySpec)(nil)).Elem()\n}\n\ntype VAppTaskInProgress struct {\n\tTaskInProgress\n}\n\nfunc init() {\n\tt[\"VAppTaskInProgress\"] = reflect.TypeOf((*VAppTaskInProgress)(nil)).Elem()\n}\n\ntype VAppTaskInProgressFault VAppTaskInProgress\n\nfunc init() {\n\tt[\"VAppTaskInProgressFault\"] = reflect.TypeOf((*VAppTaskInProgressFault)(nil)).Elem()\n}\n\ntype VFlashCacheHotConfigNotSupported struct {\n\tVmConfigFault\n}\n\nfunc init() {\n\tt[\"VFlashCacheHotConfigNotSupported\"] = reflect.TypeOf((*VFlashCacheHotConfigNotSupported)(nil)).Elem()\n}\n\ntype VFlashCacheHotConfigNotSupportedFault VFlashCacheHotConfigNotSupported\n\nfunc init() {\n\tt[\"VFlashCacheHotConfigNotSupportedFault\"] = reflect.TypeOf((*VFlashCacheHotConfigNotSupportedFault)(nil)).Elem()\n}\n\ntype VFlashModuleNotSupported struct {\n\tVmConfigFault\n\n\tVmName     string `xml:\"vmName\"`\n\tModuleName string `xml:\"moduleName\"`\n\tReason     string `xml:\"reason\"`\n\tHostName   string `xml:\"hostName\"`\n}\n\nfunc init() {\n\tt[\"VFlashModuleNotSupported\"] = reflect.TypeOf((*VFlashModuleNotSupported)(nil)).Elem()\n}\n\ntype VFlashModuleNotSupportedFault VFlashModuleNotSupported\n\nfunc init() {\n\tt[\"VFlashModuleNotSupportedFault\"] = reflect.TypeOf((*VFlashModuleNotSupportedFault)(nil)).Elem()\n}\n\ntype VFlashModuleVersionIncompatible struct {\n\tVimFault\n\n\tModuleName             string `xml:\"moduleName\"`\n\tVmRequestModuleVersion string `xml:\"vmRequestModuleVersion\"`\n\tHostMinSupportedVerson string `xml:\"hostMinSupportedVerson\"`\n\tHostModuleVersion      string `xml:\"hostModuleVersion\"`\n}\n\nfunc init() {\n\tt[\"VFlashModuleVersionIncompatible\"] = reflect.TypeOf((*VFlashModuleVersionIncompatible)(nil)).Elem()\n}\n\ntype VFlashModuleVersionIncompatibleFault VFlashModuleVersionIncompatible\n\nfunc init() {\n\tt[\"VFlashModuleVersionIncompatibleFault\"] = reflect.TypeOf((*VFlashModuleVersionIncompatibleFault)(nil)).Elem()\n}\n\ntype VMFSDatastoreCreatedEvent struct {\n\tHostEvent\n\n\tDatastore    DatastoreEventArgument `xml:\"datastore\"`\n\tDatastoreUrl string                 `xml:\"datastoreUrl,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VMFSDatastoreCreatedEvent\"] = reflect.TypeOf((*VMFSDatastoreCreatedEvent)(nil)).Elem()\n}\n\ntype VMFSDatastoreExpandedEvent struct {\n\tHostEvent\n\n\tDatastore DatastoreEventArgument `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"VMFSDatastoreExpandedEvent\"] = reflect.TypeOf((*VMFSDatastoreExpandedEvent)(nil)).Elem()\n}\n\ntype VMFSDatastoreExtendedEvent struct {\n\tHostEvent\n\n\tDatastore DatastoreEventArgument `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"VMFSDatastoreExtendedEvent\"] = reflect.TypeOf((*VMFSDatastoreExtendedEvent)(nil)).Elem()\n}\n\ntype VMINotSupported struct {\n\tDeviceNotSupported\n}\n\nfunc init() {\n\tt[\"VMINotSupported\"] = reflect.TypeOf((*VMINotSupported)(nil)).Elem()\n}\n\ntype VMINotSupportedFault VMINotSupported\n\nfunc init() {\n\tt[\"VMINotSupportedFault\"] = reflect.TypeOf((*VMINotSupportedFault)(nil)).Elem()\n}\n\ntype VMOnConflictDVPort struct {\n\tCannotAccessNetwork\n}\n\nfunc init() {\n\tt[\"VMOnConflictDVPort\"] = reflect.TypeOf((*VMOnConflictDVPort)(nil)).Elem()\n}\n\ntype VMOnConflictDVPortFault VMOnConflictDVPort\n\nfunc init() {\n\tt[\"VMOnConflictDVPortFault\"] = reflect.TypeOf((*VMOnConflictDVPortFault)(nil)).Elem()\n}\n\ntype VMOnVirtualIntranet struct {\n\tCannotAccessNetwork\n}\n\nfunc init() {\n\tt[\"VMOnVirtualIntranet\"] = reflect.TypeOf((*VMOnVirtualIntranet)(nil)).Elem()\n}\n\ntype VMOnVirtualIntranetFault VMOnVirtualIntranet\n\nfunc init() {\n\tt[\"VMOnVirtualIntranetFault\"] = reflect.TypeOf((*VMOnVirtualIntranetFault)(nil)).Elem()\n}\n\ntype VMotionAcrossNetworkNotSupported struct {\n\tMigrationFeatureNotSupported\n}\n\nfunc init() {\n\tt[\"VMotionAcrossNetworkNotSupported\"] = reflect.TypeOf((*VMotionAcrossNetworkNotSupported)(nil)).Elem()\n}\n\ntype VMotionAcrossNetworkNotSupportedFault VMotionAcrossNetworkNotSupported\n\nfunc init() {\n\tt[\"VMotionAcrossNetworkNotSupportedFault\"] = reflect.TypeOf((*VMotionAcrossNetworkNotSupportedFault)(nil)).Elem()\n}\n\ntype VMotionInterfaceIssue struct {\n\tMigrationFault\n\n\tAtSourceHost     bool                    `xml:\"atSourceHost\"`\n\tFailedHost       string                  `xml:\"failedHost\"`\n\tFailedHostEntity *ManagedObjectReference `xml:\"failedHostEntity,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VMotionInterfaceIssue\"] = reflect.TypeOf((*VMotionInterfaceIssue)(nil)).Elem()\n}\n\ntype VMotionInterfaceIssueFault BaseVMotionInterfaceIssue\n\nfunc init() {\n\tt[\"VMotionInterfaceIssueFault\"] = reflect.TypeOf((*VMotionInterfaceIssueFault)(nil)).Elem()\n}\n\ntype VMotionLicenseExpiredEvent struct {\n\tLicenseEvent\n}\n\nfunc init() {\n\tt[\"VMotionLicenseExpiredEvent\"] = reflect.TypeOf((*VMotionLicenseExpiredEvent)(nil)).Elem()\n}\n\ntype VMotionLinkCapacityLow struct {\n\tVMotionInterfaceIssue\n\n\tNetwork string `xml:\"network\"`\n}\n\nfunc init() {\n\tt[\"VMotionLinkCapacityLow\"] = reflect.TypeOf((*VMotionLinkCapacityLow)(nil)).Elem()\n}\n\ntype VMotionLinkCapacityLowFault VMotionLinkCapacityLow\n\nfunc init() {\n\tt[\"VMotionLinkCapacityLowFault\"] = reflect.TypeOf((*VMotionLinkCapacityLowFault)(nil)).Elem()\n}\n\ntype VMotionLinkDown struct {\n\tVMotionInterfaceIssue\n\n\tNetwork string `xml:\"network\"`\n}\n\nfunc init() {\n\tt[\"VMotionLinkDown\"] = reflect.TypeOf((*VMotionLinkDown)(nil)).Elem()\n}\n\ntype VMotionLinkDownFault VMotionLinkDown\n\nfunc init() {\n\tt[\"VMotionLinkDownFault\"] = reflect.TypeOf((*VMotionLinkDownFault)(nil)).Elem()\n}\n\ntype VMotionNotConfigured struct {\n\tVMotionInterfaceIssue\n}\n\nfunc init() {\n\tt[\"VMotionNotConfigured\"] = reflect.TypeOf((*VMotionNotConfigured)(nil)).Elem()\n}\n\ntype VMotionNotConfiguredFault VMotionNotConfigured\n\nfunc init() {\n\tt[\"VMotionNotConfiguredFault\"] = reflect.TypeOf((*VMotionNotConfiguredFault)(nil)).Elem()\n}\n\ntype VMotionNotLicensed struct {\n\tVMotionInterfaceIssue\n}\n\nfunc init() {\n\tt[\"VMotionNotLicensed\"] = reflect.TypeOf((*VMotionNotLicensed)(nil)).Elem()\n}\n\ntype VMotionNotLicensedFault VMotionNotLicensed\n\nfunc init() {\n\tt[\"VMotionNotLicensedFault\"] = reflect.TypeOf((*VMotionNotLicensedFault)(nil)).Elem()\n}\n\ntype VMotionNotSupported struct {\n\tVMotionInterfaceIssue\n}\n\nfunc init() {\n\tt[\"VMotionNotSupported\"] = reflect.TypeOf((*VMotionNotSupported)(nil)).Elem()\n}\n\ntype VMotionNotSupportedFault VMotionNotSupported\n\nfunc init() {\n\tt[\"VMotionNotSupportedFault\"] = reflect.TypeOf((*VMotionNotSupportedFault)(nil)).Elem()\n}\n\ntype VMotionProtocolIncompatible struct {\n\tMigrationFault\n}\n\nfunc init() {\n\tt[\"VMotionProtocolIncompatible\"] = reflect.TypeOf((*VMotionProtocolIncompatible)(nil)).Elem()\n}\n\ntype VMotionProtocolIncompatibleFault VMotionProtocolIncompatible\n\nfunc init() {\n\tt[\"VMotionProtocolIncompatibleFault\"] = reflect.TypeOf((*VMotionProtocolIncompatibleFault)(nil)).Elem()\n}\n\ntype VMwareDVSConfigInfo struct {\n\tDVSConfigInfo\n\n\tVspanSession                []VMwareVspanSession         `xml:\"vspanSession,omitempty\"`\n\tPvlanConfig                 []VMwareDVSPvlanMapEntry     `xml:\"pvlanConfig,omitempty\"`\n\tMaxMtu                      int32                        `xml:\"maxMtu\"`\n\tLinkDiscoveryProtocolConfig *LinkDiscoveryProtocolConfig `xml:\"linkDiscoveryProtocolConfig,omitempty\"`\n\tIpfixConfig                 *VMwareIpfixConfig           `xml:\"ipfixConfig,omitempty\"`\n\tLacpGroupConfig             []VMwareDvsLacpGroupConfig   `xml:\"lacpGroupConfig,omitempty\"`\n\tLacpApiVersion              string                       `xml:\"lacpApiVersion,omitempty\"`\n\tMulticastFilteringMode      string                       `xml:\"multicastFilteringMode,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VMwareDVSConfigInfo\"] = reflect.TypeOf((*VMwareDVSConfigInfo)(nil)).Elem()\n}\n\ntype VMwareDVSConfigSpec struct {\n\tDVSConfigSpec\n\n\tPvlanConfigSpec             []VMwareDVSPvlanConfigSpec   `xml:\"pvlanConfigSpec,omitempty\"`\n\tVspanConfigSpec             []VMwareDVSVspanConfigSpec   `xml:\"vspanConfigSpec,omitempty\"`\n\tMaxMtu                      int32                        `xml:\"maxMtu,omitempty\"`\n\tLinkDiscoveryProtocolConfig *LinkDiscoveryProtocolConfig `xml:\"linkDiscoveryProtocolConfig,omitempty\"`\n\tIpfixConfig                 *VMwareIpfixConfig           `xml:\"ipfixConfig,omitempty\"`\n\tLacpApiVersion              string                       `xml:\"lacpApiVersion,omitempty\"`\n\tMulticastFilteringMode      string                       `xml:\"multicastFilteringMode,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VMwareDVSConfigSpec\"] = reflect.TypeOf((*VMwareDVSConfigSpec)(nil)).Elem()\n}\n\ntype VMwareDVSFeatureCapability struct {\n\tDVSFeatureCapability\n\n\tVspanSupported             *bool                     `xml:\"vspanSupported\"`\n\tLldpSupported              *bool                     `xml:\"lldpSupported\"`\n\tIpfixSupported             *bool                     `xml:\"ipfixSupported\"`\n\tIpfixCapability            *VMwareDvsIpfixCapability `xml:\"ipfixCapability,omitempty\"`\n\tMulticastSnoopingSupported *bool                     `xml:\"multicastSnoopingSupported\"`\n\tVspanCapability            *VMwareDVSVspanCapability `xml:\"vspanCapability,omitempty\"`\n\tLacpCapability             *VMwareDvsLacpCapability  `xml:\"lacpCapability,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VMwareDVSFeatureCapability\"] = reflect.TypeOf((*VMwareDVSFeatureCapability)(nil)).Elem()\n}\n\ntype VMwareDVSHealthCheckCapability struct {\n\tDVSHealthCheckCapability\n\n\tVlanMtuSupported bool `xml:\"vlanMtuSupported\"`\n\tTeamingSupported bool `xml:\"teamingSupported\"`\n}\n\nfunc init() {\n\tt[\"VMwareDVSHealthCheckCapability\"] = reflect.TypeOf((*VMwareDVSHealthCheckCapability)(nil)).Elem()\n}\n\ntype VMwareDVSHealthCheckConfig struct {\n\tDVSHealthCheckConfig\n}\n\nfunc init() {\n\tt[\"VMwareDVSHealthCheckConfig\"] = reflect.TypeOf((*VMwareDVSHealthCheckConfig)(nil)).Elem()\n}\n\ntype VMwareDVSMtuHealthCheckResult struct {\n\tHostMemberUplinkHealthCheckResult\n\n\tMtuMismatch             bool           `xml:\"mtuMismatch\"`\n\tVlanSupportSwitchMtu    []NumericRange `xml:\"vlanSupportSwitchMtu,omitempty\"`\n\tVlanNotSupportSwitchMtu []NumericRange `xml:\"vlanNotSupportSwitchMtu,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VMwareDVSMtuHealthCheckResult\"] = reflect.TypeOf((*VMwareDVSMtuHealthCheckResult)(nil)).Elem()\n}\n\ntype VMwareDVSPortSetting struct {\n\tDVPortSetting\n\n\tVlan                BaseVmwareDistributedVirtualSwitchVlanSpec `xml:\"vlan,omitempty,typeattr\"`\n\tQosTag              *IntPolicy                                 `xml:\"qosTag,omitempty\"`\n\tUplinkTeamingPolicy *VmwareUplinkPortTeamingPolicy             `xml:\"uplinkTeamingPolicy,omitempty\"`\n\tSecurityPolicy      *DVSSecurityPolicy                         `xml:\"securityPolicy,omitempty\"`\n\tIpfixEnabled        *BoolPolicy                                `xml:\"ipfixEnabled,omitempty\"`\n\tTxUplink            *BoolPolicy                                `xml:\"txUplink,omitempty\"`\n\tLacpPolicy          *VMwareUplinkLacpPolicy                    `xml:\"lacpPolicy,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VMwareDVSPortSetting\"] = reflect.TypeOf((*VMwareDVSPortSetting)(nil)).Elem()\n}\n\ntype VMwareDVSPortgroupPolicy struct {\n\tDVPortgroupPolicy\n\n\tVlanOverrideAllowed           bool  `xml:\"vlanOverrideAllowed\"`\n\tUplinkTeamingOverrideAllowed  bool  `xml:\"uplinkTeamingOverrideAllowed\"`\n\tSecurityPolicyOverrideAllowed bool  `xml:\"securityPolicyOverrideAllowed\"`\n\tIpfixOverrideAllowed          *bool `xml:\"ipfixOverrideAllowed\"`\n}\n\nfunc init() {\n\tt[\"VMwareDVSPortgroupPolicy\"] = reflect.TypeOf((*VMwareDVSPortgroupPolicy)(nil)).Elem()\n}\n\ntype VMwareDVSPvlanConfigSpec struct {\n\tDynamicData\n\n\tPvlanEntry VMwareDVSPvlanMapEntry `xml:\"pvlanEntry\"`\n\tOperation  string                 `xml:\"operation\"`\n}\n\nfunc init() {\n\tt[\"VMwareDVSPvlanConfigSpec\"] = reflect.TypeOf((*VMwareDVSPvlanConfigSpec)(nil)).Elem()\n}\n\ntype VMwareDVSPvlanMapEntry struct {\n\tDynamicData\n\n\tPrimaryVlanId   int32  `xml:\"primaryVlanId\"`\n\tSecondaryVlanId int32  `xml:\"secondaryVlanId\"`\n\tPvlanType       string `xml:\"pvlanType\"`\n}\n\nfunc init() {\n\tt[\"VMwareDVSPvlanMapEntry\"] = reflect.TypeOf((*VMwareDVSPvlanMapEntry)(nil)).Elem()\n}\n\ntype VMwareDVSTeamingHealthCheckConfig struct {\n\tVMwareDVSHealthCheckConfig\n}\n\nfunc init() {\n\tt[\"VMwareDVSTeamingHealthCheckConfig\"] = reflect.TypeOf((*VMwareDVSTeamingHealthCheckConfig)(nil)).Elem()\n}\n\ntype VMwareDVSTeamingHealthCheckResult struct {\n\tHostMemberHealthCheckResult\n\n\tTeamingStatus string `xml:\"teamingStatus\"`\n}\n\nfunc init() {\n\tt[\"VMwareDVSTeamingHealthCheckResult\"] = reflect.TypeOf((*VMwareDVSTeamingHealthCheckResult)(nil)).Elem()\n}\n\ntype VMwareDVSVlanHealthCheckResult struct {\n\tHostMemberUplinkHealthCheckResult\n\n\tTrunkedVlan   []NumericRange `xml:\"trunkedVlan,omitempty\"`\n\tUntrunkedVlan []NumericRange `xml:\"untrunkedVlan,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VMwareDVSVlanHealthCheckResult\"] = reflect.TypeOf((*VMwareDVSVlanHealthCheckResult)(nil)).Elem()\n}\n\ntype VMwareDVSVlanMtuHealthCheckConfig struct {\n\tVMwareDVSHealthCheckConfig\n}\n\nfunc init() {\n\tt[\"VMwareDVSVlanMtuHealthCheckConfig\"] = reflect.TypeOf((*VMwareDVSVlanMtuHealthCheckConfig)(nil)).Elem()\n}\n\ntype VMwareDVSVspanCapability struct {\n\tDynamicData\n\n\tMixedDestSupported         bool  `xml:\"mixedDestSupported\"`\n\tDvportSupported            bool  `xml:\"dvportSupported\"`\n\tRemoteSourceSupported      bool  `xml:\"remoteSourceSupported\"`\n\tRemoteDestSupported        bool  `xml:\"remoteDestSupported\"`\n\tEncapRemoteSourceSupported bool  `xml:\"encapRemoteSourceSupported\"`\n\tErspanProtocolSupported    *bool `xml:\"erspanProtocolSupported\"`\n}\n\nfunc init() {\n\tt[\"VMwareDVSVspanCapability\"] = reflect.TypeOf((*VMwareDVSVspanCapability)(nil)).Elem()\n}\n\ntype VMwareDVSVspanConfigSpec struct {\n\tDynamicData\n\n\tVspanSession VMwareVspanSession `xml:\"vspanSession\"`\n\tOperation    string             `xml:\"operation\"`\n}\n\nfunc init() {\n\tt[\"VMwareDVSVspanConfigSpec\"] = reflect.TypeOf((*VMwareDVSVspanConfigSpec)(nil)).Elem()\n}\n\ntype VMwareDvsIpfixCapability struct {\n\tDynamicData\n\n\tIpfixSupported               *bool `xml:\"ipfixSupported\"`\n\tIpv6ForIpfixSupported        *bool `xml:\"ipv6ForIpfixSupported\"`\n\tObservationDomainIdSupported *bool `xml:\"observationDomainIdSupported\"`\n}\n\nfunc init() {\n\tt[\"VMwareDvsIpfixCapability\"] = reflect.TypeOf((*VMwareDvsIpfixCapability)(nil)).Elem()\n}\n\ntype VMwareDvsLacpCapability struct {\n\tDynamicData\n\n\tLacpSupported           *bool `xml:\"lacpSupported\"`\n\tMultiLacpGroupSupported *bool `xml:\"multiLacpGroupSupported\"`\n}\n\nfunc init() {\n\tt[\"VMwareDvsLacpCapability\"] = reflect.TypeOf((*VMwareDvsLacpCapability)(nil)).Elem()\n}\n\ntype VMwareDvsLacpGroupConfig struct {\n\tDynamicData\n\n\tKey                  string                   `xml:\"key,omitempty\"`\n\tName                 string                   `xml:\"name,omitempty\"`\n\tMode                 string                   `xml:\"mode,omitempty\"`\n\tUplinkNum            int32                    `xml:\"uplinkNum,omitempty\"`\n\tLoadbalanceAlgorithm string                   `xml:\"loadbalanceAlgorithm,omitempty\"`\n\tVlan                 *VMwareDvsLagVlanConfig  `xml:\"vlan,omitempty\"`\n\tIpfix                *VMwareDvsLagIpfixConfig `xml:\"ipfix,omitempty\"`\n\tUplinkName           []string                 `xml:\"uplinkName,omitempty\"`\n\tUplinkPortKey        []string                 `xml:\"uplinkPortKey,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VMwareDvsLacpGroupConfig\"] = reflect.TypeOf((*VMwareDvsLacpGroupConfig)(nil)).Elem()\n}\n\ntype VMwareDvsLacpGroupSpec struct {\n\tDynamicData\n\n\tLacpGroupConfig VMwareDvsLacpGroupConfig `xml:\"lacpGroupConfig\"`\n\tOperation       string                   `xml:\"operation\"`\n}\n\nfunc init() {\n\tt[\"VMwareDvsLacpGroupSpec\"] = reflect.TypeOf((*VMwareDvsLacpGroupSpec)(nil)).Elem()\n}\n\ntype VMwareDvsLagIpfixConfig struct {\n\tDynamicData\n\n\tIpfixEnabled *bool `xml:\"ipfixEnabled\"`\n}\n\nfunc init() {\n\tt[\"VMwareDvsLagIpfixConfig\"] = reflect.TypeOf((*VMwareDvsLagIpfixConfig)(nil)).Elem()\n}\n\ntype VMwareDvsLagVlanConfig struct {\n\tDynamicData\n\n\tVlanId []NumericRange `xml:\"vlanId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VMwareDvsLagVlanConfig\"] = reflect.TypeOf((*VMwareDvsLagVlanConfig)(nil)).Elem()\n}\n\ntype VMwareIpfixConfig struct {\n\tDynamicData\n\n\tCollectorIpAddress  string `xml:\"collectorIpAddress,omitempty\"`\n\tCollectorPort       int32  `xml:\"collectorPort,omitempty\"`\n\tObservationDomainId int64  `xml:\"observationDomainId,omitempty\"`\n\tActiveFlowTimeout   int32  `xml:\"activeFlowTimeout\"`\n\tIdleFlowTimeout     int32  `xml:\"idleFlowTimeout\"`\n\tSamplingRate        int32  `xml:\"samplingRate\"`\n\tInternalFlowsOnly   bool   `xml:\"internalFlowsOnly\"`\n}\n\nfunc init() {\n\tt[\"VMwareIpfixConfig\"] = reflect.TypeOf((*VMwareIpfixConfig)(nil)).Elem()\n}\n\ntype VMwareUplinkLacpPolicy struct {\n\tInheritablePolicy\n\n\tEnable *BoolPolicy   `xml:\"enable,omitempty\"`\n\tMode   *StringPolicy `xml:\"mode,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VMwareUplinkLacpPolicy\"] = reflect.TypeOf((*VMwareUplinkLacpPolicy)(nil)).Elem()\n}\n\ntype VMwareUplinkPortOrderPolicy struct {\n\tInheritablePolicy\n\n\tActiveUplinkPort  []string `xml:\"activeUplinkPort,omitempty\"`\n\tStandbyUplinkPort []string `xml:\"standbyUplinkPort,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VMwareUplinkPortOrderPolicy\"] = reflect.TypeOf((*VMwareUplinkPortOrderPolicy)(nil)).Elem()\n}\n\ntype VMwareVspanPort struct {\n\tDynamicData\n\n\tPortKey                   []string `xml:\"portKey,omitempty\"`\n\tUplinkPortName            []string `xml:\"uplinkPortName,omitempty\"`\n\tWildcardPortConnecteeType []string `xml:\"wildcardPortConnecteeType,omitempty\"`\n\tVlans                     []int32  `xml:\"vlans,omitempty\"`\n\tIpAddress                 []string `xml:\"ipAddress,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VMwareVspanPort\"] = reflect.TypeOf((*VMwareVspanPort)(nil)).Elem()\n}\n\ntype VMwareVspanSession struct {\n\tDynamicData\n\n\tKey                   string           `xml:\"key,omitempty\"`\n\tName                  string           `xml:\"name,omitempty\"`\n\tDescription           string           `xml:\"description,omitempty\"`\n\tEnabled               bool             `xml:\"enabled\"`\n\tSourcePortTransmitted *VMwareVspanPort `xml:\"sourcePortTransmitted,omitempty\"`\n\tSourcePortReceived    *VMwareVspanPort `xml:\"sourcePortReceived,omitempty\"`\n\tDestinationPort       *VMwareVspanPort `xml:\"destinationPort,omitempty\"`\n\tEncapsulationVlanId   int32            `xml:\"encapsulationVlanId,omitempty\"`\n\tStripOriginalVlan     bool             `xml:\"stripOriginalVlan\"`\n\tMirroredPacketLength  int32            `xml:\"mirroredPacketLength,omitempty\"`\n\tNormalTrafficAllowed  bool             `xml:\"normalTrafficAllowed\"`\n\tSessionType           string           `xml:\"sessionType,omitempty\"`\n\tSamplingRate          int32            `xml:\"samplingRate,omitempty\"`\n\tEncapType             string           `xml:\"encapType,omitempty\"`\n\tErspanId              int32            `xml:\"erspanId,omitempty\"`\n\tErspanCOS             int32            `xml:\"erspanCOS,omitempty\"`\n\tErspanGraNanosec      *bool            `xml:\"erspanGraNanosec\"`\n}\n\nfunc init() {\n\tt[\"VMwareVspanSession\"] = reflect.TypeOf((*VMwareVspanSession)(nil)).Elem()\n}\n\ntype VRPEditSpec struct {\n\tDynamicData\n\n\tVrpId            string                     `xml:\"vrpId\"`\n\tDescription      string                     `xml:\"description,omitempty\"`\n\tCpuAllocation    *VrpResourceAllocationInfo `xml:\"cpuAllocation,omitempty\"`\n\tMemoryAllocation *VrpResourceAllocationInfo `xml:\"memoryAllocation,omitempty\"`\n\tAddedHubs        []ManagedObjectReference   `xml:\"addedHubs,omitempty\"`\n\tRemovedHubs      []ManagedObjectReference   `xml:\"removedHubs,omitempty\"`\n\tChangeVersion    int64                      `xml:\"changeVersion,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VRPEditSpec\"] = reflect.TypeOf((*VRPEditSpec)(nil)).Elem()\n}\n\ntype VStorageObject struct {\n\tDynamicData\n\n\tConfig VStorageObjectConfigInfo `xml:\"config\"`\n}\n\nfunc init() {\n\tt[\"VStorageObject\"] = reflect.TypeOf((*VStorageObject)(nil)).Elem()\n}\n\ntype VStorageObjectConfigInfo struct {\n\tBaseConfigInfo\n\n\tCapacityInMB    int64    `xml:\"capacityInMB\"`\n\tConsumptionType []string `xml:\"consumptionType,omitempty\"`\n\tConsumerId      []ID     `xml:\"consumerId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VStorageObjectConfigInfo\"] = reflect.TypeOf((*VStorageObjectConfigInfo)(nil)).Elem()\n}\n\ntype VStorageObjectStateInfo struct {\n\tDynamicData\n\n\tTentative *bool `xml:\"tentative\"`\n}\n\nfunc init() {\n\tt[\"VStorageObjectStateInfo\"] = reflect.TypeOf((*VStorageObjectStateInfo)(nil)).Elem()\n}\n\ntype VVolHostPE struct {\n\tDynamicData\n\n\tKey              ManagedObjectReference `xml:\"key\"`\n\tProtocolEndpoint []HostProtocolEndpoint `xml:\"protocolEndpoint\"`\n}\n\nfunc init() {\n\tt[\"VVolHostPE\"] = reflect.TypeOf((*VVolHostPE)(nil)).Elem()\n}\n\ntype VVolVmConfigFileUpdateResult struct {\n\tDynamicData\n\n\tSucceededVmConfigFile []KeyValue                                           `xml:\"succeededVmConfigFile,omitempty\"`\n\tFailedVmConfigFile    []VVolVmConfigFileUpdateResultFailedVmConfigFileInfo `xml:\"failedVmConfigFile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VVolVmConfigFileUpdateResult\"] = reflect.TypeOf((*VVolVmConfigFileUpdateResult)(nil)).Elem()\n}\n\ntype VVolVmConfigFileUpdateResultFailedVmConfigFileInfo struct {\n\tDynamicData\n\n\tTargetConfigVVolId string               `xml:\"targetConfigVVolId\"`\n\tFault              LocalizedMethodFault `xml:\"fault\"`\n}\n\nfunc init() {\n\tt[\"VVolVmConfigFileUpdateResultFailedVmConfigFileInfo\"] = reflect.TypeOf((*VVolVmConfigFileUpdateResultFailedVmConfigFileInfo)(nil)).Elem()\n}\n\ntype ValidateCredentialsInGuest ValidateCredentialsInGuestRequestType\n\nfunc init() {\n\tt[\"ValidateCredentialsInGuest\"] = reflect.TypeOf((*ValidateCredentialsInGuest)(nil)).Elem()\n}\n\ntype ValidateCredentialsInGuestRequestType struct {\n\tThis ManagedObjectReference  `xml:\"_this\"`\n\tVm   ManagedObjectReference  `xml:\"vm\"`\n\tAuth BaseGuestAuthentication `xml:\"auth,typeattr\"`\n}\n\nfunc init() {\n\tt[\"ValidateCredentialsInGuestRequestType\"] = reflect.TypeOf((*ValidateCredentialsInGuestRequestType)(nil)).Elem()\n}\n\ntype ValidateCredentialsInGuestResponse struct {\n}\n\ntype ValidateHost ValidateHostRequestType\n\nfunc init() {\n\tt[\"ValidateHost\"] = reflect.TypeOf((*ValidateHost)(nil)).Elem()\n}\n\ntype ValidateHostRequestType struct {\n\tThis          ManagedObjectReference `xml:\"_this\"`\n\tOvfDescriptor string                 `xml:\"ovfDescriptor\"`\n\tHost          ManagedObjectReference `xml:\"host\"`\n\tVhp           OvfValidateHostParams  `xml:\"vhp\"`\n}\n\nfunc init() {\n\tt[\"ValidateHostRequestType\"] = reflect.TypeOf((*ValidateHostRequestType)(nil)).Elem()\n}\n\ntype ValidateHostResponse struct {\n\tReturnval OvfValidateHostResult `xml:\"returnval\"`\n}\n\ntype ValidateMigration ValidateMigrationRequestType\n\nfunc init() {\n\tt[\"ValidateMigration\"] = reflect.TypeOf((*ValidateMigration)(nil)).Elem()\n}\n\ntype ValidateMigrationRequestType struct {\n\tThis     ManagedObjectReference   `xml:\"_this\"`\n\tVm       []ManagedObjectReference `xml:\"vm\"`\n\tState    VirtualMachinePowerState `xml:\"state,omitempty\"`\n\tTestType []string                 `xml:\"testType,omitempty\"`\n\tPool     *ManagedObjectReference  `xml:\"pool,omitempty\"`\n\tHost     *ManagedObjectReference  `xml:\"host,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ValidateMigrationRequestType\"] = reflect.TypeOf((*ValidateMigrationRequestType)(nil)).Elem()\n}\n\ntype ValidateMigrationResponse struct {\n\tReturnval []BaseEvent `xml:\"returnval,omitempty,typeattr\"`\n}\n\ntype VasaProviderContainerSpec struct {\n\tDynamicData\n\n\tVasaProviderInfo []VimVasaProviderInfo `xml:\"vasaProviderInfo,omitempty\"`\n\tScId             string                `xml:\"scId\"`\n\tDeleted          bool                  `xml:\"deleted\"`\n}\n\nfunc init() {\n\tt[\"VasaProviderContainerSpec\"] = reflect.TypeOf((*VasaProviderContainerSpec)(nil)).Elem()\n}\n\ntype VcAgentUninstallFailedEvent struct {\n\tHostEvent\n\n\tReason string `xml:\"reason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VcAgentUninstallFailedEvent\"] = reflect.TypeOf((*VcAgentUninstallFailedEvent)(nil)).Elem()\n}\n\ntype VcAgentUninstalledEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"VcAgentUninstalledEvent\"] = reflect.TypeOf((*VcAgentUninstalledEvent)(nil)).Elem()\n}\n\ntype VcAgentUpgradeFailedEvent struct {\n\tHostEvent\n\n\tReason string `xml:\"reason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VcAgentUpgradeFailedEvent\"] = reflect.TypeOf((*VcAgentUpgradeFailedEvent)(nil)).Elem()\n}\n\ntype VcAgentUpgradedEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"VcAgentUpgradedEvent\"] = reflect.TypeOf((*VcAgentUpgradedEvent)(nil)).Elem()\n}\n\ntype VchaClusterConfigInfo struct {\n\tDynamicData\n\n\tFailoverNodeInfo1 *FailoverNodeInfo `xml:\"failoverNodeInfo1,omitempty\"`\n\tFailoverNodeInfo2 *FailoverNodeInfo `xml:\"failoverNodeInfo2,omitempty\"`\n\tWitnessNodeInfo   *WitnessNodeInfo  `xml:\"witnessNodeInfo,omitempty\"`\n\tState             string            `xml:\"state\"`\n}\n\nfunc init() {\n\tt[\"VchaClusterConfigInfo\"] = reflect.TypeOf((*VchaClusterConfigInfo)(nil)).Elem()\n}\n\ntype VchaClusterConfigSpec struct {\n\tDynamicData\n\n\tPassiveIp string `xml:\"passiveIp\"`\n\tWitnessIp string `xml:\"witnessIp\"`\n}\n\nfunc init() {\n\tt[\"VchaClusterConfigSpec\"] = reflect.TypeOf((*VchaClusterConfigSpec)(nil)).Elem()\n}\n\ntype VchaClusterDeploymentSpec struct {\n\tDynamicData\n\n\tPassiveDeploymentSpec PassiveNodeDeploymentSpec `xml:\"passiveDeploymentSpec\"`\n\tWitnessDeploymentSpec BaseNodeDeploymentSpec    `xml:\"witnessDeploymentSpec,typeattr\"`\n\tActiveVcSpec          SourceNodeSpec            `xml:\"activeVcSpec\"`\n\tActiveVcNetworkConfig *ClusterNetworkConfigSpec `xml:\"activeVcNetworkConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VchaClusterDeploymentSpec\"] = reflect.TypeOf((*VchaClusterDeploymentSpec)(nil)).Elem()\n}\n\ntype VchaClusterHealth struct {\n\tDynamicData\n\n\tRuntimeInfo           VchaClusterRuntimeInfo `xml:\"runtimeInfo\"`\n\tHealthMessages        []LocalizableMessage   `xml:\"healthMessages,omitempty\"`\n\tAdditionalInformation []LocalizableMessage   `xml:\"additionalInformation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VchaClusterHealth\"] = reflect.TypeOf((*VchaClusterHealth)(nil)).Elem()\n}\n\ntype VchaClusterNetworkSpec struct {\n\tDynamicData\n\n\tWitnessNetworkSpec BaseNodeNetworkSpec    `xml:\"witnessNetworkSpec,typeattr\"`\n\tPassiveNetworkSpec PassiveNodeNetworkSpec `xml:\"passiveNetworkSpec\"`\n}\n\nfunc init() {\n\tt[\"VchaClusterNetworkSpec\"] = reflect.TypeOf((*VchaClusterNetworkSpec)(nil)).Elem()\n}\n\ntype VchaClusterRuntimeInfo struct {\n\tDynamicData\n\n\tClusterState string                `xml:\"clusterState\"`\n\tNodeInfo     []VchaNodeRuntimeInfo `xml:\"nodeInfo,omitempty\"`\n\tClusterMode  string                `xml:\"clusterMode\"`\n}\n\nfunc init() {\n\tt[\"VchaClusterRuntimeInfo\"] = reflect.TypeOf((*VchaClusterRuntimeInfo)(nil)).Elem()\n}\n\ntype VchaNodeRuntimeInfo struct {\n\tDynamicData\n\n\tNodeState string `xml:\"nodeState\"`\n\tNodeRole  string `xml:\"nodeRole\"`\n\tNodeIp    string `xml:\"nodeIp\"`\n}\n\nfunc init() {\n\tt[\"VchaNodeRuntimeInfo\"] = reflect.TypeOf((*VchaNodeRuntimeInfo)(nil)).Elem()\n}\n\ntype VimAccountPasswordChangedEvent struct {\n\tHostEvent\n}\n\nfunc init() {\n\tt[\"VimAccountPasswordChangedEvent\"] = reflect.TypeOf((*VimAccountPasswordChangedEvent)(nil)).Elem()\n}\n\ntype VimFault struct {\n\tMethodFault\n}\n\nfunc init() {\n\tt[\"VimFault\"] = reflect.TypeOf((*VimFault)(nil)).Elem()\n}\n\ntype VimFaultFault BaseVimFault\n\nfunc init() {\n\tt[\"VimFaultFault\"] = reflect.TypeOf((*VimFaultFault)(nil)).Elem()\n}\n\ntype VimVasaProvider struct {\n\tDynamicData\n\n\tUrl                   string `xml:\"url\"`\n\tName                  string `xml:\"name,omitempty\"`\n\tSelfSignedCertificate string `xml:\"selfSignedCertificate,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VimVasaProvider\"] = reflect.TypeOf((*VimVasaProvider)(nil)).Elem()\n}\n\ntype VimVasaProviderInfo struct {\n\tDynamicData\n\n\tProvider   VimVasaProvider                `xml:\"provider\"`\n\tArrayState []VimVasaProviderStatePerArray `xml:\"arrayState,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VimVasaProviderInfo\"] = reflect.TypeOf((*VimVasaProviderInfo)(nil)).Elem()\n}\n\ntype VimVasaProviderStatePerArray struct {\n\tDynamicData\n\n\tPriority int32  `xml:\"priority\"`\n\tArrayId  string `xml:\"arrayId\"`\n\tActive   bool   `xml:\"active\"`\n}\n\nfunc init() {\n\tt[\"VimVasaProviderStatePerArray\"] = reflect.TypeOf((*VimVasaProviderStatePerArray)(nil)).Elem()\n}\n\ntype VirtualAHCIController struct {\n\tVirtualSATAController\n}\n\nfunc init() {\n\tt[\"VirtualAHCIController\"] = reflect.TypeOf((*VirtualAHCIController)(nil)).Elem()\n}\n\ntype VirtualAHCIControllerOption struct {\n\tVirtualSATAControllerOption\n}\n\nfunc init() {\n\tt[\"VirtualAHCIControllerOption\"] = reflect.TypeOf((*VirtualAHCIControllerOption)(nil)).Elem()\n}\n\ntype VirtualAppImportSpec struct {\n\tImportSpec\n\n\tName             string             `xml:\"name\"`\n\tVAppConfigSpec   VAppConfigSpec     `xml:\"vAppConfigSpec\"`\n\tResourcePoolSpec ResourceConfigSpec `xml:\"resourcePoolSpec\"`\n\tChild            []BaseImportSpec   `xml:\"child,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"VirtualAppImportSpec\"] = reflect.TypeOf((*VirtualAppImportSpec)(nil)).Elem()\n}\n\ntype VirtualAppLinkInfo struct {\n\tDynamicData\n\n\tKey               ManagedObjectReference `xml:\"key\"`\n\tDestroyWithParent *bool                  `xml:\"destroyWithParent\"`\n}\n\nfunc init() {\n\tt[\"VirtualAppLinkInfo\"] = reflect.TypeOf((*VirtualAppLinkInfo)(nil)).Elem()\n}\n\ntype VirtualAppSummary struct {\n\tResourcePoolSummary\n\n\tProduct             *VAppProductInfo    `xml:\"product,omitempty\"`\n\tVAppState           VirtualAppVAppState `xml:\"vAppState,omitempty\"`\n\tSuspended           *bool               `xml:\"suspended\"`\n\tInstallBootRequired *bool               `xml:\"installBootRequired\"`\n\tInstanceUuid        string              `xml:\"instanceUuid,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualAppSummary\"] = reflect.TypeOf((*VirtualAppSummary)(nil)).Elem()\n}\n\ntype VirtualBusLogicController struct {\n\tVirtualSCSIController\n}\n\nfunc init() {\n\tt[\"VirtualBusLogicController\"] = reflect.TypeOf((*VirtualBusLogicController)(nil)).Elem()\n}\n\ntype VirtualBusLogicControllerOption struct {\n\tVirtualSCSIControllerOption\n}\n\nfunc init() {\n\tt[\"VirtualBusLogicControllerOption\"] = reflect.TypeOf((*VirtualBusLogicControllerOption)(nil)).Elem()\n}\n\ntype VirtualCdrom struct {\n\tVirtualDevice\n}\n\nfunc init() {\n\tt[\"VirtualCdrom\"] = reflect.TypeOf((*VirtualCdrom)(nil)).Elem()\n}\n\ntype VirtualCdromAtapiBackingInfo struct {\n\tVirtualDeviceDeviceBackingInfo\n}\n\nfunc init() {\n\tt[\"VirtualCdromAtapiBackingInfo\"] = reflect.TypeOf((*VirtualCdromAtapiBackingInfo)(nil)).Elem()\n}\n\ntype VirtualCdromAtapiBackingOption struct {\n\tVirtualDeviceDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualCdromAtapiBackingOption\"] = reflect.TypeOf((*VirtualCdromAtapiBackingOption)(nil)).Elem()\n}\n\ntype VirtualCdromIsoBackingInfo struct {\n\tVirtualDeviceFileBackingInfo\n}\n\nfunc init() {\n\tt[\"VirtualCdromIsoBackingInfo\"] = reflect.TypeOf((*VirtualCdromIsoBackingInfo)(nil)).Elem()\n}\n\ntype VirtualCdromIsoBackingOption struct {\n\tVirtualDeviceFileBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualCdromIsoBackingOption\"] = reflect.TypeOf((*VirtualCdromIsoBackingOption)(nil)).Elem()\n}\n\ntype VirtualCdromOption struct {\n\tVirtualDeviceOption\n}\n\nfunc init() {\n\tt[\"VirtualCdromOption\"] = reflect.TypeOf((*VirtualCdromOption)(nil)).Elem()\n}\n\ntype VirtualCdromPassthroughBackingInfo struct {\n\tVirtualDeviceDeviceBackingInfo\n\n\tExclusive bool `xml:\"exclusive\"`\n}\n\nfunc init() {\n\tt[\"VirtualCdromPassthroughBackingInfo\"] = reflect.TypeOf((*VirtualCdromPassthroughBackingInfo)(nil)).Elem()\n}\n\ntype VirtualCdromPassthroughBackingOption struct {\n\tVirtualDeviceDeviceBackingOption\n\n\tExclusive BoolOption `xml:\"exclusive\"`\n}\n\nfunc init() {\n\tt[\"VirtualCdromPassthroughBackingOption\"] = reflect.TypeOf((*VirtualCdromPassthroughBackingOption)(nil)).Elem()\n}\n\ntype VirtualCdromRemoteAtapiBackingInfo struct {\n\tVirtualDeviceRemoteDeviceBackingInfo\n}\n\nfunc init() {\n\tt[\"VirtualCdromRemoteAtapiBackingInfo\"] = reflect.TypeOf((*VirtualCdromRemoteAtapiBackingInfo)(nil)).Elem()\n}\n\ntype VirtualCdromRemoteAtapiBackingOption struct {\n\tVirtualDeviceDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualCdromRemoteAtapiBackingOption\"] = reflect.TypeOf((*VirtualCdromRemoteAtapiBackingOption)(nil)).Elem()\n}\n\ntype VirtualCdromRemotePassthroughBackingInfo struct {\n\tVirtualDeviceRemoteDeviceBackingInfo\n\n\tExclusive bool `xml:\"exclusive\"`\n}\n\nfunc init() {\n\tt[\"VirtualCdromRemotePassthroughBackingInfo\"] = reflect.TypeOf((*VirtualCdromRemotePassthroughBackingInfo)(nil)).Elem()\n}\n\ntype VirtualCdromRemotePassthroughBackingOption struct {\n\tVirtualDeviceRemoteDeviceBackingOption\n\n\tExclusive BoolOption `xml:\"exclusive\"`\n}\n\nfunc init() {\n\tt[\"VirtualCdromRemotePassthroughBackingOption\"] = reflect.TypeOf((*VirtualCdromRemotePassthroughBackingOption)(nil)).Elem()\n}\n\ntype VirtualController struct {\n\tVirtualDevice\n\n\tBusNumber int32   `xml:\"busNumber\"`\n\tDevice    []int32 `xml:\"device,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualController\"] = reflect.TypeOf((*VirtualController)(nil)).Elem()\n}\n\ntype VirtualControllerOption struct {\n\tVirtualDeviceOption\n\n\tDevices         IntOption `xml:\"devices\"`\n\tSupportedDevice []string  `xml:\"supportedDevice,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualControllerOption\"] = reflect.TypeOf((*VirtualControllerOption)(nil)).Elem()\n}\n\ntype VirtualDevice struct {\n\tDynamicData\n\n\tKey           int32                        `xml:\"key\"`\n\tDeviceInfo    BaseDescription              `xml:\"deviceInfo,omitempty,typeattr\"`\n\tBacking       BaseVirtualDeviceBackingInfo `xml:\"backing,omitempty,typeattr\"`\n\tConnectable   *VirtualDeviceConnectInfo    `xml:\"connectable,omitempty\"`\n\tSlotInfo      BaseVirtualDeviceBusSlotInfo `xml:\"slotInfo,omitempty,typeattr\"`\n\tControllerKey int32                        `xml:\"controllerKey,omitempty\"`\n\tUnitNumber    *int32                       `xml:\"unitNumber\"`\n}\n\nfunc init() {\n\tt[\"VirtualDevice\"] = reflect.TypeOf((*VirtualDevice)(nil)).Elem()\n}\n\ntype VirtualDeviceBackingInfo struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"VirtualDeviceBackingInfo\"] = reflect.TypeOf((*VirtualDeviceBackingInfo)(nil)).Elem()\n}\n\ntype VirtualDeviceBackingOption struct {\n\tDynamicData\n\n\tType string `xml:\"type\"`\n}\n\nfunc init() {\n\tt[\"VirtualDeviceBackingOption\"] = reflect.TypeOf((*VirtualDeviceBackingOption)(nil)).Elem()\n}\n\ntype VirtualDeviceBusSlotInfo struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"VirtualDeviceBusSlotInfo\"] = reflect.TypeOf((*VirtualDeviceBusSlotInfo)(nil)).Elem()\n}\n\ntype VirtualDeviceBusSlotOption struct {\n\tDynamicData\n\n\tType string `xml:\"type\"`\n}\n\nfunc init() {\n\tt[\"VirtualDeviceBusSlotOption\"] = reflect.TypeOf((*VirtualDeviceBusSlotOption)(nil)).Elem()\n}\n\ntype VirtualDeviceConfigSpec struct {\n\tDynamicData\n\n\tOperation     VirtualDeviceConfigSpecOperation     `xml:\"operation,omitempty\"`\n\tFileOperation VirtualDeviceConfigSpecFileOperation `xml:\"fileOperation,omitempty\"`\n\tDevice        BaseVirtualDevice                    `xml:\"device,typeattr\"`\n\tProfile       []BaseVirtualMachineProfileSpec      `xml:\"profile,omitempty,typeattr\"`\n\tBacking       *VirtualDeviceConfigSpecBackingSpec  `xml:\"backing,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualDeviceConfigSpec\"] = reflect.TypeOf((*VirtualDeviceConfigSpec)(nil)).Elem()\n}\n\ntype VirtualDeviceConfigSpecBackingSpec struct {\n\tDynamicData\n\n\tParent *VirtualDeviceConfigSpecBackingSpec `xml:\"parent,omitempty\"`\n\tCrypto BaseCryptoSpec                      `xml:\"crypto,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"VirtualDeviceConfigSpecBackingSpec\"] = reflect.TypeOf((*VirtualDeviceConfigSpecBackingSpec)(nil)).Elem()\n}\n\ntype VirtualDeviceConnectInfo struct {\n\tDynamicData\n\n\tStartConnected    bool   `xml:\"startConnected\"`\n\tAllowGuestControl bool   `xml:\"allowGuestControl\"`\n\tConnected         bool   `xml:\"connected\"`\n\tStatus            string `xml:\"status,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualDeviceConnectInfo\"] = reflect.TypeOf((*VirtualDeviceConnectInfo)(nil)).Elem()\n}\n\ntype VirtualDeviceConnectOption struct {\n\tDynamicData\n\n\tStartConnected    BoolOption `xml:\"startConnected\"`\n\tAllowGuestControl BoolOption `xml:\"allowGuestControl\"`\n}\n\nfunc init() {\n\tt[\"VirtualDeviceConnectOption\"] = reflect.TypeOf((*VirtualDeviceConnectOption)(nil)).Elem()\n}\n\ntype VirtualDeviceDeviceBackingInfo struct {\n\tVirtualDeviceBackingInfo\n\n\tDeviceName    string `xml:\"deviceName\"`\n\tUseAutoDetect *bool  `xml:\"useAutoDetect\"`\n}\n\nfunc init() {\n\tt[\"VirtualDeviceDeviceBackingInfo\"] = reflect.TypeOf((*VirtualDeviceDeviceBackingInfo)(nil)).Elem()\n}\n\ntype VirtualDeviceDeviceBackingOption struct {\n\tVirtualDeviceBackingOption\n\n\tAutoDetectAvailable BoolOption `xml:\"autoDetectAvailable\"`\n}\n\nfunc init() {\n\tt[\"VirtualDeviceDeviceBackingOption\"] = reflect.TypeOf((*VirtualDeviceDeviceBackingOption)(nil)).Elem()\n}\n\ntype VirtualDeviceFileBackingInfo struct {\n\tVirtualDeviceBackingInfo\n\n\tFileName        string                  `xml:\"fileName\"`\n\tDatastore       *ManagedObjectReference `xml:\"datastore,omitempty\"`\n\tBackingObjectId string                  `xml:\"backingObjectId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualDeviceFileBackingInfo\"] = reflect.TypeOf((*VirtualDeviceFileBackingInfo)(nil)).Elem()\n}\n\ntype VirtualDeviceFileBackingOption struct {\n\tVirtualDeviceBackingOption\n\n\tFileNameExtensions *ChoiceOption `xml:\"fileNameExtensions,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualDeviceFileBackingOption\"] = reflect.TypeOf((*VirtualDeviceFileBackingOption)(nil)).Elem()\n}\n\ntype VirtualDeviceOption struct {\n\tDynamicData\n\n\tType                      string                           `xml:\"type\"`\n\tConnectOption             *VirtualDeviceConnectOption      `xml:\"connectOption,omitempty\"`\n\tBusSlotOption             *VirtualDeviceBusSlotOption      `xml:\"busSlotOption,omitempty\"`\n\tControllerType            string                           `xml:\"controllerType,omitempty\"`\n\tAutoAssignController      *BoolOption                      `xml:\"autoAssignController,omitempty\"`\n\tBackingOption             []BaseVirtualDeviceBackingOption `xml:\"backingOption,omitempty,typeattr\"`\n\tDefaultBackingOptionIndex int32                            `xml:\"defaultBackingOptionIndex,omitempty\"`\n\tLicensingLimit            []string                         `xml:\"licensingLimit,omitempty\"`\n\tDeprecated                bool                             `xml:\"deprecated\"`\n\tPlugAndPlay               bool                             `xml:\"plugAndPlay\"`\n\tHotRemoveSupported        *bool                            `xml:\"hotRemoveSupported\"`\n}\n\nfunc init() {\n\tt[\"VirtualDeviceOption\"] = reflect.TypeOf((*VirtualDeviceOption)(nil)).Elem()\n}\n\ntype VirtualDevicePciBusSlotInfo struct {\n\tVirtualDeviceBusSlotInfo\n\n\tPciSlotNumber int32 `xml:\"pciSlotNumber\"`\n}\n\nfunc init() {\n\tt[\"VirtualDevicePciBusSlotInfo\"] = reflect.TypeOf((*VirtualDevicePciBusSlotInfo)(nil)).Elem()\n}\n\ntype VirtualDevicePipeBackingInfo struct {\n\tVirtualDeviceBackingInfo\n\n\tPipeName string `xml:\"pipeName\"`\n}\n\nfunc init() {\n\tt[\"VirtualDevicePipeBackingInfo\"] = reflect.TypeOf((*VirtualDevicePipeBackingInfo)(nil)).Elem()\n}\n\ntype VirtualDevicePipeBackingOption struct {\n\tVirtualDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualDevicePipeBackingOption\"] = reflect.TypeOf((*VirtualDevicePipeBackingOption)(nil)).Elem()\n}\n\ntype VirtualDeviceRemoteDeviceBackingInfo struct {\n\tVirtualDeviceBackingInfo\n\n\tDeviceName    string `xml:\"deviceName\"`\n\tUseAutoDetect *bool  `xml:\"useAutoDetect\"`\n}\n\nfunc init() {\n\tt[\"VirtualDeviceRemoteDeviceBackingInfo\"] = reflect.TypeOf((*VirtualDeviceRemoteDeviceBackingInfo)(nil)).Elem()\n}\n\ntype VirtualDeviceRemoteDeviceBackingOption struct {\n\tVirtualDeviceBackingOption\n\n\tAutoDetectAvailable BoolOption `xml:\"autoDetectAvailable\"`\n}\n\nfunc init() {\n\tt[\"VirtualDeviceRemoteDeviceBackingOption\"] = reflect.TypeOf((*VirtualDeviceRemoteDeviceBackingOption)(nil)).Elem()\n}\n\ntype VirtualDeviceURIBackingInfo struct {\n\tVirtualDeviceBackingInfo\n\n\tServiceURI string `xml:\"serviceURI\"`\n\tDirection  string `xml:\"direction\"`\n\tProxyURI   string `xml:\"proxyURI,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualDeviceURIBackingInfo\"] = reflect.TypeOf((*VirtualDeviceURIBackingInfo)(nil)).Elem()\n}\n\ntype VirtualDeviceURIBackingOption struct {\n\tVirtualDeviceBackingOption\n\n\tDirections ChoiceOption `xml:\"directions\"`\n}\n\nfunc init() {\n\tt[\"VirtualDeviceURIBackingOption\"] = reflect.TypeOf((*VirtualDeviceURIBackingOption)(nil)).Elem()\n}\n\ntype VirtualDisk struct {\n\tVirtualDevice\n\n\tCapacityInKB          int64                             `xml:\"capacityInKB\"`\n\tCapacityInBytes       int64                             `xml:\"capacityInBytes,omitempty\"`\n\tShares                *SharesInfo                       `xml:\"shares,omitempty\"`\n\tStorageIOAllocation   *StorageIOAllocationInfo          `xml:\"storageIOAllocation,omitempty\"`\n\tDiskObjectId          string                            `xml:\"diskObjectId,omitempty\"`\n\tVFlashCacheConfigInfo *VirtualDiskVFlashCacheConfigInfo `xml:\"vFlashCacheConfigInfo,omitempty\"`\n\tIofilter              []string                          `xml:\"iofilter,omitempty\"`\n\tVDiskId               *ID                               `xml:\"vDiskId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualDisk\"] = reflect.TypeOf((*VirtualDisk)(nil)).Elem()\n}\n\ntype VirtualDiskAntiAffinityRuleSpec struct {\n\tClusterRuleInfo\n\n\tDiskId []int32 `xml:\"diskId\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskAntiAffinityRuleSpec\"] = reflect.TypeOf((*VirtualDiskAntiAffinityRuleSpec)(nil)).Elem()\n}\n\ntype VirtualDiskBlocksNotFullyProvisioned struct {\n\tDeviceBackingNotSupported\n}\n\nfunc init() {\n\tt[\"VirtualDiskBlocksNotFullyProvisioned\"] = reflect.TypeOf((*VirtualDiskBlocksNotFullyProvisioned)(nil)).Elem()\n}\n\ntype VirtualDiskBlocksNotFullyProvisionedFault VirtualDiskBlocksNotFullyProvisioned\n\nfunc init() {\n\tt[\"VirtualDiskBlocksNotFullyProvisionedFault\"] = reflect.TypeOf((*VirtualDiskBlocksNotFullyProvisionedFault)(nil)).Elem()\n}\n\ntype VirtualDiskConfigSpec struct {\n\tVirtualDeviceConfigSpec\n\n\tDiskMoveType string `xml:\"diskMoveType,omitempty\"`\n\tMigrateCache *bool  `xml:\"migrateCache\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskConfigSpec\"] = reflect.TypeOf((*VirtualDiskConfigSpec)(nil)).Elem()\n}\n\ntype VirtualDiskDeltaDiskFormatsSupported struct {\n\tDynamicData\n\n\tDatastoreType   string       `xml:\"datastoreType\"`\n\tDeltaDiskFormat ChoiceOption `xml:\"deltaDiskFormat\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskDeltaDiskFormatsSupported\"] = reflect.TypeOf((*VirtualDiskDeltaDiskFormatsSupported)(nil)).Elem()\n}\n\ntype VirtualDiskFlatVer1BackingInfo struct {\n\tVirtualDeviceFileBackingInfo\n\n\tDiskMode     string                          `xml:\"diskMode\"`\n\tSplit        *bool                           `xml:\"split\"`\n\tWriteThrough *bool                           `xml:\"writeThrough\"`\n\tContentId    string                          `xml:\"contentId,omitempty\"`\n\tParent       *VirtualDiskFlatVer1BackingInfo `xml:\"parent,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskFlatVer1BackingInfo\"] = reflect.TypeOf((*VirtualDiskFlatVer1BackingInfo)(nil)).Elem()\n}\n\ntype VirtualDiskFlatVer1BackingOption struct {\n\tVirtualDeviceFileBackingOption\n\n\tDiskMode     ChoiceOption `xml:\"diskMode\"`\n\tSplit        BoolOption   `xml:\"split\"`\n\tWriteThrough BoolOption   `xml:\"writeThrough\"`\n\tGrowable     bool         `xml:\"growable\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskFlatVer1BackingOption\"] = reflect.TypeOf((*VirtualDiskFlatVer1BackingOption)(nil)).Elem()\n}\n\ntype VirtualDiskFlatVer2BackingInfo struct {\n\tVirtualDeviceFileBackingInfo\n\n\tDiskMode               string                          `xml:\"diskMode\"`\n\tSplit                  *bool                           `xml:\"split\"`\n\tWriteThrough           *bool                           `xml:\"writeThrough\"`\n\tThinProvisioned        *bool                           `xml:\"thinProvisioned\"`\n\tEagerlyScrub           *bool                           `xml:\"eagerlyScrub\"`\n\tUuid                   string                          `xml:\"uuid,omitempty\"`\n\tContentId              string                          `xml:\"contentId,omitempty\"`\n\tChangeId               string                          `xml:\"changeId,omitempty\"`\n\tParent                 *VirtualDiskFlatVer2BackingInfo `xml:\"parent,omitempty\"`\n\tDeltaDiskFormat        string                          `xml:\"deltaDiskFormat,omitempty\"`\n\tDigestEnabled          *bool                           `xml:\"digestEnabled\"`\n\tDeltaGrainSize         int32                           `xml:\"deltaGrainSize,omitempty\"`\n\tDeltaDiskFormatVariant string                          `xml:\"deltaDiskFormatVariant,omitempty\"`\n\tSharing                string                          `xml:\"sharing,omitempty\"`\n\tKeyId                  *CryptoKeyId                    `xml:\"keyId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskFlatVer2BackingInfo\"] = reflect.TypeOf((*VirtualDiskFlatVer2BackingInfo)(nil)).Elem()\n}\n\ntype VirtualDiskFlatVer2BackingOption struct {\n\tVirtualDeviceFileBackingOption\n\n\tDiskMode                  ChoiceOption                           `xml:\"diskMode\"`\n\tSplit                     BoolOption                             `xml:\"split\"`\n\tWriteThrough              BoolOption                             `xml:\"writeThrough\"`\n\tGrowable                  bool                                   `xml:\"growable\"`\n\tHotGrowable               bool                                   `xml:\"hotGrowable\"`\n\tUuid                      bool                                   `xml:\"uuid\"`\n\tThinProvisioned           *BoolOption                            `xml:\"thinProvisioned,omitempty\"`\n\tEagerlyScrub              *BoolOption                            `xml:\"eagerlyScrub,omitempty\"`\n\tDeltaDiskFormat           *ChoiceOption                          `xml:\"deltaDiskFormat,omitempty\"`\n\tDeltaDiskFormatsSupported []VirtualDiskDeltaDiskFormatsSupported `xml:\"deltaDiskFormatsSupported,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskFlatVer2BackingOption\"] = reflect.TypeOf((*VirtualDiskFlatVer2BackingOption)(nil)).Elem()\n}\n\ntype VirtualDiskId struct {\n\tDynamicData\n\n\tVm     ManagedObjectReference `xml:\"vm\"`\n\tDiskId int32                  `xml:\"diskId\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskId\"] = reflect.TypeOf((*VirtualDiskId)(nil)).Elem()\n}\n\ntype VirtualDiskModeNotSupported struct {\n\tDeviceNotSupported\n\n\tMode string `xml:\"mode\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskModeNotSupported\"] = reflect.TypeOf((*VirtualDiskModeNotSupported)(nil)).Elem()\n}\n\ntype VirtualDiskModeNotSupportedFault VirtualDiskModeNotSupported\n\nfunc init() {\n\tt[\"VirtualDiskModeNotSupportedFault\"] = reflect.TypeOf((*VirtualDiskModeNotSupportedFault)(nil)).Elem()\n}\n\ntype VirtualDiskOption struct {\n\tVirtualDeviceOption\n\n\tCapacityInKB            LongOption                                `xml:\"capacityInKB\"`\n\tIoAllocationOption      *StorageIOAllocationOption                `xml:\"ioAllocationOption,omitempty\"`\n\tVFlashCacheConfigOption *VirtualDiskOptionVFlashCacheConfigOption `xml:\"vFlashCacheConfigOption,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskOption\"] = reflect.TypeOf((*VirtualDiskOption)(nil)).Elem()\n}\n\ntype VirtualDiskOptionVFlashCacheConfigOption struct {\n\tDynamicData\n\n\tCacheConsistencyType ChoiceOption `xml:\"cacheConsistencyType\"`\n\tCacheMode            ChoiceOption `xml:\"cacheMode\"`\n\tReservationInMB      LongOption   `xml:\"reservationInMB\"`\n\tBlockSizeInKB        LongOption   `xml:\"blockSizeInKB\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskOptionVFlashCacheConfigOption\"] = reflect.TypeOf((*VirtualDiskOptionVFlashCacheConfigOption)(nil)).Elem()\n}\n\ntype VirtualDiskPartitionedRawDiskVer2BackingInfo struct {\n\tVirtualDiskRawDiskVer2BackingInfo\n\n\tPartition []int32 `xml:\"partition\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskPartitionedRawDiskVer2BackingInfo\"] = reflect.TypeOf((*VirtualDiskPartitionedRawDiskVer2BackingInfo)(nil)).Elem()\n}\n\ntype VirtualDiskPartitionedRawDiskVer2BackingOption struct {\n\tVirtualDiskRawDiskVer2BackingOption\n}\n\nfunc init() {\n\tt[\"VirtualDiskPartitionedRawDiskVer2BackingOption\"] = reflect.TypeOf((*VirtualDiskPartitionedRawDiskVer2BackingOption)(nil)).Elem()\n}\n\ntype VirtualDiskRawDiskMappingVer1BackingInfo struct {\n\tVirtualDeviceFileBackingInfo\n\n\tLunUuid           string                                    `xml:\"lunUuid,omitempty\"`\n\tDeviceName        string                                    `xml:\"deviceName,omitempty\"`\n\tCompatibilityMode string                                    `xml:\"compatibilityMode,omitempty\"`\n\tDiskMode          string                                    `xml:\"diskMode,omitempty\"`\n\tUuid              string                                    `xml:\"uuid,omitempty\"`\n\tContentId         string                                    `xml:\"contentId,omitempty\"`\n\tChangeId          string                                    `xml:\"changeId,omitempty\"`\n\tParent            *VirtualDiskRawDiskMappingVer1BackingInfo `xml:\"parent,omitempty\"`\n\tSharing           string                                    `xml:\"sharing,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskRawDiskMappingVer1BackingInfo\"] = reflect.TypeOf((*VirtualDiskRawDiskMappingVer1BackingInfo)(nil)).Elem()\n}\n\ntype VirtualDiskRawDiskMappingVer1BackingOption struct {\n\tVirtualDeviceDeviceBackingOption\n\n\tDescriptorFileNameExtensions *ChoiceOption `xml:\"descriptorFileNameExtensions,omitempty\"`\n\tCompatibilityMode            ChoiceOption  `xml:\"compatibilityMode\"`\n\tDiskMode                     ChoiceOption  `xml:\"diskMode\"`\n\tUuid                         bool          `xml:\"uuid\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskRawDiskMappingVer1BackingOption\"] = reflect.TypeOf((*VirtualDiskRawDiskMappingVer1BackingOption)(nil)).Elem()\n}\n\ntype VirtualDiskRawDiskVer2BackingInfo struct {\n\tVirtualDeviceDeviceBackingInfo\n\n\tDescriptorFileName string `xml:\"descriptorFileName\"`\n\tUuid               string `xml:\"uuid,omitempty\"`\n\tChangeId           string `xml:\"changeId,omitempty\"`\n\tSharing            string `xml:\"sharing,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskRawDiskVer2BackingInfo\"] = reflect.TypeOf((*VirtualDiskRawDiskVer2BackingInfo)(nil)).Elem()\n}\n\ntype VirtualDiskRawDiskVer2BackingOption struct {\n\tVirtualDeviceDeviceBackingOption\n\n\tDescriptorFileNameExtensions ChoiceOption `xml:\"descriptorFileNameExtensions\"`\n\tUuid                         bool         `xml:\"uuid\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskRawDiskVer2BackingOption\"] = reflect.TypeOf((*VirtualDiskRawDiskVer2BackingOption)(nil)).Elem()\n}\n\ntype VirtualDiskSeSparseBackingInfo struct {\n\tVirtualDeviceFileBackingInfo\n\n\tDiskMode        string                          `xml:\"diskMode\"`\n\tWriteThrough    *bool                           `xml:\"writeThrough\"`\n\tUuid            string                          `xml:\"uuid,omitempty\"`\n\tContentId       string                          `xml:\"contentId,omitempty\"`\n\tChangeId        string                          `xml:\"changeId,omitempty\"`\n\tParent          *VirtualDiskSeSparseBackingInfo `xml:\"parent,omitempty\"`\n\tDeltaDiskFormat string                          `xml:\"deltaDiskFormat,omitempty\"`\n\tDigestEnabled   *bool                           `xml:\"digestEnabled\"`\n\tGrainSize       int32                           `xml:\"grainSize,omitempty\"`\n\tKeyId           *CryptoKeyId                    `xml:\"keyId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskSeSparseBackingInfo\"] = reflect.TypeOf((*VirtualDiskSeSparseBackingInfo)(nil)).Elem()\n}\n\ntype VirtualDiskSeSparseBackingOption struct {\n\tVirtualDeviceFileBackingOption\n\n\tDiskMode                  ChoiceOption                           `xml:\"diskMode\"`\n\tWriteThrough              BoolOption                             `xml:\"writeThrough\"`\n\tGrowable                  bool                                   `xml:\"growable\"`\n\tHotGrowable               bool                                   `xml:\"hotGrowable\"`\n\tUuid                      bool                                   `xml:\"uuid\"`\n\tDeltaDiskFormatsSupported []VirtualDiskDeltaDiskFormatsSupported `xml:\"deltaDiskFormatsSupported\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskSeSparseBackingOption\"] = reflect.TypeOf((*VirtualDiskSeSparseBackingOption)(nil)).Elem()\n}\n\ntype VirtualDiskSparseVer1BackingInfo struct {\n\tVirtualDeviceFileBackingInfo\n\n\tDiskMode      string                            `xml:\"diskMode\"`\n\tSplit         *bool                             `xml:\"split\"`\n\tWriteThrough  *bool                             `xml:\"writeThrough\"`\n\tSpaceUsedInKB int64                             `xml:\"spaceUsedInKB,omitempty\"`\n\tContentId     string                            `xml:\"contentId,omitempty\"`\n\tParent        *VirtualDiskSparseVer1BackingInfo `xml:\"parent,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskSparseVer1BackingInfo\"] = reflect.TypeOf((*VirtualDiskSparseVer1BackingInfo)(nil)).Elem()\n}\n\ntype VirtualDiskSparseVer1BackingOption struct {\n\tVirtualDeviceFileBackingOption\n\n\tDiskModes    ChoiceOption `xml:\"diskModes\"`\n\tSplit        BoolOption   `xml:\"split\"`\n\tWriteThrough BoolOption   `xml:\"writeThrough\"`\n\tGrowable     bool         `xml:\"growable\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskSparseVer1BackingOption\"] = reflect.TypeOf((*VirtualDiskSparseVer1BackingOption)(nil)).Elem()\n}\n\ntype VirtualDiskSparseVer2BackingInfo struct {\n\tVirtualDeviceFileBackingInfo\n\n\tDiskMode      string                            `xml:\"diskMode\"`\n\tSplit         *bool                             `xml:\"split\"`\n\tWriteThrough  *bool                             `xml:\"writeThrough\"`\n\tSpaceUsedInKB int64                             `xml:\"spaceUsedInKB,omitempty\"`\n\tUuid          string                            `xml:\"uuid,omitempty\"`\n\tContentId     string                            `xml:\"contentId,omitempty\"`\n\tChangeId      string                            `xml:\"changeId,omitempty\"`\n\tParent        *VirtualDiskSparseVer2BackingInfo `xml:\"parent,omitempty\"`\n\tKeyId         *CryptoKeyId                      `xml:\"keyId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskSparseVer2BackingInfo\"] = reflect.TypeOf((*VirtualDiskSparseVer2BackingInfo)(nil)).Elem()\n}\n\ntype VirtualDiskSparseVer2BackingOption struct {\n\tVirtualDeviceFileBackingOption\n\n\tDiskMode     ChoiceOption `xml:\"diskMode\"`\n\tSplit        BoolOption   `xml:\"split\"`\n\tWriteThrough BoolOption   `xml:\"writeThrough\"`\n\tGrowable     bool         `xml:\"growable\"`\n\tHotGrowable  bool         `xml:\"hotGrowable\"`\n\tUuid         bool         `xml:\"uuid\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskSparseVer2BackingOption\"] = reflect.TypeOf((*VirtualDiskSparseVer2BackingOption)(nil)).Elem()\n}\n\ntype VirtualDiskSpec struct {\n\tDynamicData\n\n\tDiskType    string `xml:\"diskType\"`\n\tAdapterType string `xml:\"adapterType\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskSpec\"] = reflect.TypeOf((*VirtualDiskSpec)(nil)).Elem()\n}\n\ntype VirtualDiskVFlashCacheConfigInfo struct {\n\tDynamicData\n\n\tVFlashModule         string `xml:\"vFlashModule,omitempty\"`\n\tReservationInMB      int64  `xml:\"reservationInMB,omitempty\"`\n\tCacheConsistencyType string `xml:\"cacheConsistencyType,omitempty\"`\n\tCacheMode            string `xml:\"cacheMode,omitempty\"`\n\tBlockSizeInKB        int64  `xml:\"blockSizeInKB,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualDiskVFlashCacheConfigInfo\"] = reflect.TypeOf((*VirtualDiskVFlashCacheConfigInfo)(nil)).Elem()\n}\n\ntype VirtualE1000 struct {\n\tVirtualEthernetCard\n}\n\nfunc init() {\n\tt[\"VirtualE1000\"] = reflect.TypeOf((*VirtualE1000)(nil)).Elem()\n}\n\ntype VirtualE1000Option struct {\n\tVirtualEthernetCardOption\n}\n\nfunc init() {\n\tt[\"VirtualE1000Option\"] = reflect.TypeOf((*VirtualE1000Option)(nil)).Elem()\n}\n\ntype VirtualE1000e struct {\n\tVirtualEthernetCard\n}\n\nfunc init() {\n\tt[\"VirtualE1000e\"] = reflect.TypeOf((*VirtualE1000e)(nil)).Elem()\n}\n\ntype VirtualE1000eOption struct {\n\tVirtualEthernetCardOption\n}\n\nfunc init() {\n\tt[\"VirtualE1000eOption\"] = reflect.TypeOf((*VirtualE1000eOption)(nil)).Elem()\n}\n\ntype VirtualEnsoniq1371 struct {\n\tVirtualSoundCard\n}\n\nfunc init() {\n\tt[\"VirtualEnsoniq1371\"] = reflect.TypeOf((*VirtualEnsoniq1371)(nil)).Elem()\n}\n\ntype VirtualEnsoniq1371Option struct {\n\tVirtualSoundCardOption\n}\n\nfunc init() {\n\tt[\"VirtualEnsoniq1371Option\"] = reflect.TypeOf((*VirtualEnsoniq1371Option)(nil)).Elem()\n}\n\ntype VirtualEthernetCard struct {\n\tVirtualDevice\n\n\tAddressType             string                                 `xml:\"addressType,omitempty\"`\n\tMacAddress              string                                 `xml:\"macAddress,omitempty\"`\n\tWakeOnLanEnabled        *bool                                  `xml:\"wakeOnLanEnabled\"`\n\tResourceAllocation      *VirtualEthernetCardResourceAllocation `xml:\"resourceAllocation,omitempty\"`\n\tExternalId              string                                 `xml:\"externalId,omitempty\"`\n\tUptCompatibilityEnabled *bool                                  `xml:\"uptCompatibilityEnabled\"`\n}\n\nfunc init() {\n\tt[\"VirtualEthernetCard\"] = reflect.TypeOf((*VirtualEthernetCard)(nil)).Elem()\n}\n\ntype VirtualEthernetCardDVPortBackingOption struct {\n\tVirtualDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualEthernetCardDVPortBackingOption\"] = reflect.TypeOf((*VirtualEthernetCardDVPortBackingOption)(nil)).Elem()\n}\n\ntype VirtualEthernetCardDistributedVirtualPortBackingInfo struct {\n\tVirtualDeviceBackingInfo\n\n\tPort DistributedVirtualSwitchPortConnection `xml:\"port\"`\n}\n\nfunc init() {\n\tt[\"VirtualEthernetCardDistributedVirtualPortBackingInfo\"] = reflect.TypeOf((*VirtualEthernetCardDistributedVirtualPortBackingInfo)(nil)).Elem()\n}\n\ntype VirtualEthernetCardLegacyNetworkBackingInfo struct {\n\tVirtualDeviceDeviceBackingInfo\n}\n\nfunc init() {\n\tt[\"VirtualEthernetCardLegacyNetworkBackingInfo\"] = reflect.TypeOf((*VirtualEthernetCardLegacyNetworkBackingInfo)(nil)).Elem()\n}\n\ntype VirtualEthernetCardLegacyNetworkBackingOption struct {\n\tVirtualDeviceDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualEthernetCardLegacyNetworkBackingOption\"] = reflect.TypeOf((*VirtualEthernetCardLegacyNetworkBackingOption)(nil)).Elem()\n}\n\ntype VirtualEthernetCardNetworkBackingInfo struct {\n\tVirtualDeviceDeviceBackingInfo\n\n\tNetwork           *ManagedObjectReference `xml:\"network,omitempty\"`\n\tInPassthroughMode *bool                   `xml:\"inPassthroughMode\"`\n}\n\nfunc init() {\n\tt[\"VirtualEthernetCardNetworkBackingInfo\"] = reflect.TypeOf((*VirtualEthernetCardNetworkBackingInfo)(nil)).Elem()\n}\n\ntype VirtualEthernetCardNetworkBackingOption struct {\n\tVirtualDeviceDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualEthernetCardNetworkBackingOption\"] = reflect.TypeOf((*VirtualEthernetCardNetworkBackingOption)(nil)).Elem()\n}\n\ntype VirtualEthernetCardNotSupported struct {\n\tDeviceNotSupported\n}\n\nfunc init() {\n\tt[\"VirtualEthernetCardNotSupported\"] = reflect.TypeOf((*VirtualEthernetCardNotSupported)(nil)).Elem()\n}\n\ntype VirtualEthernetCardNotSupportedFault VirtualEthernetCardNotSupported\n\nfunc init() {\n\tt[\"VirtualEthernetCardNotSupportedFault\"] = reflect.TypeOf((*VirtualEthernetCardNotSupportedFault)(nil)).Elem()\n}\n\ntype VirtualEthernetCardOpaqueNetworkBackingInfo struct {\n\tVirtualDeviceBackingInfo\n\n\tOpaqueNetworkId   string `xml:\"opaqueNetworkId\"`\n\tOpaqueNetworkType string `xml:\"opaqueNetworkType\"`\n}\n\nfunc init() {\n\tt[\"VirtualEthernetCardOpaqueNetworkBackingInfo\"] = reflect.TypeOf((*VirtualEthernetCardOpaqueNetworkBackingInfo)(nil)).Elem()\n}\n\ntype VirtualEthernetCardOpaqueNetworkBackingOption struct {\n\tVirtualDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualEthernetCardOpaqueNetworkBackingOption\"] = reflect.TypeOf((*VirtualEthernetCardOpaqueNetworkBackingOption)(nil)).Elem()\n}\n\ntype VirtualEthernetCardOption struct {\n\tVirtualDeviceOption\n\n\tSupportedOUI              ChoiceOption `xml:\"supportedOUI\"`\n\tMacType                   ChoiceOption `xml:\"macType\"`\n\tWakeOnLanEnabled          BoolOption   `xml:\"wakeOnLanEnabled\"`\n\tVmDirectPathGen2Supported *bool        `xml:\"vmDirectPathGen2Supported\"`\n\tUptCompatibilityEnabled   *BoolOption  `xml:\"uptCompatibilityEnabled,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualEthernetCardOption\"] = reflect.TypeOf((*VirtualEthernetCardOption)(nil)).Elem()\n}\n\ntype VirtualEthernetCardResourceAllocation struct {\n\tDynamicData\n\n\tReservation int64      `xml:\"reservation,omitempty\"`\n\tShare       SharesInfo `xml:\"share\"`\n\tLimit       int64      `xml:\"limit,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualEthernetCardResourceAllocation\"] = reflect.TypeOf((*VirtualEthernetCardResourceAllocation)(nil)).Elem()\n}\n\ntype VirtualFloppy struct {\n\tVirtualDevice\n}\n\nfunc init() {\n\tt[\"VirtualFloppy\"] = reflect.TypeOf((*VirtualFloppy)(nil)).Elem()\n}\n\ntype VirtualFloppyDeviceBackingInfo struct {\n\tVirtualDeviceDeviceBackingInfo\n}\n\nfunc init() {\n\tt[\"VirtualFloppyDeviceBackingInfo\"] = reflect.TypeOf((*VirtualFloppyDeviceBackingInfo)(nil)).Elem()\n}\n\ntype VirtualFloppyDeviceBackingOption struct {\n\tVirtualDeviceDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualFloppyDeviceBackingOption\"] = reflect.TypeOf((*VirtualFloppyDeviceBackingOption)(nil)).Elem()\n}\n\ntype VirtualFloppyImageBackingInfo struct {\n\tVirtualDeviceFileBackingInfo\n}\n\nfunc init() {\n\tt[\"VirtualFloppyImageBackingInfo\"] = reflect.TypeOf((*VirtualFloppyImageBackingInfo)(nil)).Elem()\n}\n\ntype VirtualFloppyImageBackingOption struct {\n\tVirtualDeviceFileBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualFloppyImageBackingOption\"] = reflect.TypeOf((*VirtualFloppyImageBackingOption)(nil)).Elem()\n}\n\ntype VirtualFloppyOption struct {\n\tVirtualDeviceOption\n}\n\nfunc init() {\n\tt[\"VirtualFloppyOption\"] = reflect.TypeOf((*VirtualFloppyOption)(nil)).Elem()\n}\n\ntype VirtualFloppyRemoteDeviceBackingInfo struct {\n\tVirtualDeviceRemoteDeviceBackingInfo\n}\n\nfunc init() {\n\tt[\"VirtualFloppyRemoteDeviceBackingInfo\"] = reflect.TypeOf((*VirtualFloppyRemoteDeviceBackingInfo)(nil)).Elem()\n}\n\ntype VirtualFloppyRemoteDeviceBackingOption struct {\n\tVirtualDeviceRemoteDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualFloppyRemoteDeviceBackingOption\"] = reflect.TypeOf((*VirtualFloppyRemoteDeviceBackingOption)(nil)).Elem()\n}\n\ntype VirtualHardware struct {\n\tDynamicData\n\n\tNumCPU              int32               `xml:\"numCPU\"`\n\tNumCoresPerSocket   int32               `xml:\"numCoresPerSocket,omitempty\"`\n\tMemoryMB            int32               `xml:\"memoryMB\"`\n\tVirtualICH7MPresent *bool               `xml:\"virtualICH7MPresent\"`\n\tVirtualSMCPresent   *bool               `xml:\"virtualSMCPresent\"`\n\tDevice              []BaseVirtualDevice `xml:\"device,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"VirtualHardware\"] = reflect.TypeOf((*VirtualHardware)(nil)).Elem()\n}\n\ntype VirtualHardwareCompatibilityIssue struct {\n\tVmConfigFault\n}\n\nfunc init() {\n\tt[\"VirtualHardwareCompatibilityIssue\"] = reflect.TypeOf((*VirtualHardwareCompatibilityIssue)(nil)).Elem()\n}\n\ntype VirtualHardwareCompatibilityIssueFault BaseVirtualHardwareCompatibilityIssue\n\nfunc init() {\n\tt[\"VirtualHardwareCompatibilityIssueFault\"] = reflect.TypeOf((*VirtualHardwareCompatibilityIssueFault)(nil)).Elem()\n}\n\ntype VirtualHardwareOption struct {\n\tDynamicData\n\n\tHwVersion             int32                     `xml:\"hwVersion\"`\n\tVirtualDeviceOption   []BaseVirtualDeviceOption `xml:\"virtualDeviceOption,typeattr\"`\n\tDeviceListReadonly    bool                      `xml:\"deviceListReadonly\"`\n\tNumCPU                []int32                   `xml:\"numCPU\"`\n\tNumCoresPerSocket     *IntOption                `xml:\"numCoresPerSocket,omitempty\"`\n\tNumCpuReadonly        bool                      `xml:\"numCpuReadonly\"`\n\tMemoryMB              LongOption                `xml:\"memoryMB\"`\n\tNumPCIControllers     IntOption                 `xml:\"numPCIControllers\"`\n\tNumIDEControllers     IntOption                 `xml:\"numIDEControllers\"`\n\tNumUSBControllers     IntOption                 `xml:\"numUSBControllers\"`\n\tNumUSBXHCIControllers *IntOption                `xml:\"numUSBXHCIControllers,omitempty\"`\n\tNumSIOControllers     IntOption                 `xml:\"numSIOControllers\"`\n\tNumPS2Controllers     IntOption                 `xml:\"numPS2Controllers\"`\n\tLicensingLimit        []string                  `xml:\"licensingLimit,omitempty\"`\n\tNumSupportedWwnPorts  *IntOption                `xml:\"numSupportedWwnPorts,omitempty\"`\n\tNumSupportedWwnNodes  *IntOption                `xml:\"numSupportedWwnNodes,omitempty\"`\n\tResourceConfigOption  *ResourceConfigOption     `xml:\"resourceConfigOption,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualHardwareOption\"] = reflect.TypeOf((*VirtualHardwareOption)(nil)).Elem()\n}\n\ntype VirtualHardwareVersionNotSupported struct {\n\tVirtualHardwareCompatibilityIssue\n\n\tHostName string                 `xml:\"hostName\"`\n\tHost     ManagedObjectReference `xml:\"host\"`\n}\n\nfunc init() {\n\tt[\"VirtualHardwareVersionNotSupported\"] = reflect.TypeOf((*VirtualHardwareVersionNotSupported)(nil)).Elem()\n}\n\ntype VirtualHardwareVersionNotSupportedFault VirtualHardwareVersionNotSupported\n\nfunc init() {\n\tt[\"VirtualHardwareVersionNotSupportedFault\"] = reflect.TypeOf((*VirtualHardwareVersionNotSupportedFault)(nil)).Elem()\n}\n\ntype VirtualHdAudioCard struct {\n\tVirtualSoundCard\n}\n\nfunc init() {\n\tt[\"VirtualHdAudioCard\"] = reflect.TypeOf((*VirtualHdAudioCard)(nil)).Elem()\n}\n\ntype VirtualHdAudioCardOption struct {\n\tVirtualSoundCardOption\n}\n\nfunc init() {\n\tt[\"VirtualHdAudioCardOption\"] = reflect.TypeOf((*VirtualHdAudioCardOption)(nil)).Elem()\n}\n\ntype VirtualIDEController struct {\n\tVirtualController\n}\n\nfunc init() {\n\tt[\"VirtualIDEController\"] = reflect.TypeOf((*VirtualIDEController)(nil)).Elem()\n}\n\ntype VirtualIDEControllerOption struct {\n\tVirtualControllerOption\n\n\tNumIDEDisks  IntOption `xml:\"numIDEDisks\"`\n\tNumIDECdroms IntOption `xml:\"numIDECdroms\"`\n}\n\nfunc init() {\n\tt[\"VirtualIDEControllerOption\"] = reflect.TypeOf((*VirtualIDEControllerOption)(nil)).Elem()\n}\n\ntype VirtualKeyboard struct {\n\tVirtualDevice\n}\n\nfunc init() {\n\tt[\"VirtualKeyboard\"] = reflect.TypeOf((*VirtualKeyboard)(nil)).Elem()\n}\n\ntype VirtualKeyboardOption struct {\n\tVirtualDeviceOption\n}\n\nfunc init() {\n\tt[\"VirtualKeyboardOption\"] = reflect.TypeOf((*VirtualKeyboardOption)(nil)).Elem()\n}\n\ntype VirtualLsiLogicController struct {\n\tVirtualSCSIController\n}\n\nfunc init() {\n\tt[\"VirtualLsiLogicController\"] = reflect.TypeOf((*VirtualLsiLogicController)(nil)).Elem()\n}\n\ntype VirtualLsiLogicControllerOption struct {\n\tVirtualSCSIControllerOption\n}\n\nfunc init() {\n\tt[\"VirtualLsiLogicControllerOption\"] = reflect.TypeOf((*VirtualLsiLogicControllerOption)(nil)).Elem()\n}\n\ntype VirtualLsiLogicSASController struct {\n\tVirtualSCSIController\n}\n\nfunc init() {\n\tt[\"VirtualLsiLogicSASController\"] = reflect.TypeOf((*VirtualLsiLogicSASController)(nil)).Elem()\n}\n\ntype VirtualLsiLogicSASControllerOption struct {\n\tVirtualSCSIControllerOption\n}\n\nfunc init() {\n\tt[\"VirtualLsiLogicSASControllerOption\"] = reflect.TypeOf((*VirtualLsiLogicSASControllerOption)(nil)).Elem()\n}\n\ntype VirtualMachineAffinityInfo struct {\n\tDynamicData\n\n\tAffinitySet []int32 `xml:\"affinitySet,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineAffinityInfo\"] = reflect.TypeOf((*VirtualMachineAffinityInfo)(nil)).Elem()\n}\n\ntype VirtualMachineBootOptions struct {\n\tDynamicData\n\n\tBootDelay            int64                                         `xml:\"bootDelay,omitempty\"`\n\tEnterBIOSSetup       *bool                                         `xml:\"enterBIOSSetup\"`\n\tEfiSecureBootEnabled *bool                                         `xml:\"efiSecureBootEnabled\"`\n\tBootRetryEnabled     *bool                                         `xml:\"bootRetryEnabled\"`\n\tBootRetryDelay       int64                                         `xml:\"bootRetryDelay,omitempty\"`\n\tBootOrder            []BaseVirtualMachineBootOptionsBootableDevice `xml:\"bootOrder,omitempty,typeattr\"`\n\tNetworkBootProtocol  string                                        `xml:\"networkBootProtocol,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineBootOptions\"] = reflect.TypeOf((*VirtualMachineBootOptions)(nil)).Elem()\n}\n\ntype VirtualMachineBootOptionsBootableCdromDevice struct {\n\tVirtualMachineBootOptionsBootableDevice\n}\n\nfunc init() {\n\tt[\"VirtualMachineBootOptionsBootableCdromDevice\"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableCdromDevice)(nil)).Elem()\n}\n\ntype VirtualMachineBootOptionsBootableDevice struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"VirtualMachineBootOptionsBootableDevice\"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableDevice)(nil)).Elem()\n}\n\ntype VirtualMachineBootOptionsBootableDiskDevice struct {\n\tVirtualMachineBootOptionsBootableDevice\n\n\tDeviceKey int32 `xml:\"deviceKey\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineBootOptionsBootableDiskDevice\"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableDiskDevice)(nil)).Elem()\n}\n\ntype VirtualMachineBootOptionsBootableEthernetDevice struct {\n\tVirtualMachineBootOptionsBootableDevice\n\n\tDeviceKey int32 `xml:\"deviceKey\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineBootOptionsBootableEthernetDevice\"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableEthernetDevice)(nil)).Elem()\n}\n\ntype VirtualMachineBootOptionsBootableFloppyDevice struct {\n\tVirtualMachineBootOptionsBootableDevice\n}\n\nfunc init() {\n\tt[\"VirtualMachineBootOptionsBootableFloppyDevice\"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableFloppyDevice)(nil)).Elem()\n}\n\ntype VirtualMachineCapability struct {\n\tDynamicData\n\n\tSnapshotOperationsSupported         bool  `xml:\"snapshotOperationsSupported\"`\n\tMultipleSnapshotsSupported          bool  `xml:\"multipleSnapshotsSupported\"`\n\tSnapshotConfigSupported             bool  `xml:\"snapshotConfigSupported\"`\n\tPoweredOffSnapshotsSupported        bool  `xml:\"poweredOffSnapshotsSupported\"`\n\tMemorySnapshotsSupported            bool  `xml:\"memorySnapshotsSupported\"`\n\tRevertToSnapshotSupported           bool  `xml:\"revertToSnapshotSupported\"`\n\tQuiescedSnapshotsSupported          bool  `xml:\"quiescedSnapshotsSupported\"`\n\tDisableSnapshotsSupported           bool  `xml:\"disableSnapshotsSupported\"`\n\tLockSnapshotsSupported              bool  `xml:\"lockSnapshotsSupported\"`\n\tConsolePreferencesSupported         bool  `xml:\"consolePreferencesSupported\"`\n\tCpuFeatureMaskSupported             bool  `xml:\"cpuFeatureMaskSupported\"`\n\tS1AcpiManagementSupported           bool  `xml:\"s1AcpiManagementSupported\"`\n\tSettingScreenResolutionSupported    bool  `xml:\"settingScreenResolutionSupported\"`\n\tToolsAutoUpdateSupported            bool  `xml:\"toolsAutoUpdateSupported\"`\n\tVmNpivWwnSupported                  bool  `xml:\"vmNpivWwnSupported\"`\n\tNpivWwnOnNonRdmVmSupported          bool  `xml:\"npivWwnOnNonRdmVmSupported\"`\n\tVmNpivWwnDisableSupported           *bool `xml:\"vmNpivWwnDisableSupported\"`\n\tVmNpivWwnUpdateSupported            *bool `xml:\"vmNpivWwnUpdateSupported\"`\n\tSwapPlacementSupported              bool  `xml:\"swapPlacementSupported\"`\n\tToolsSyncTimeSupported              bool  `xml:\"toolsSyncTimeSupported\"`\n\tVirtualMmuUsageSupported            bool  `xml:\"virtualMmuUsageSupported\"`\n\tDiskSharesSupported                 bool  `xml:\"diskSharesSupported\"`\n\tBootOptionsSupported                bool  `xml:\"bootOptionsSupported\"`\n\tBootRetryOptionsSupported           *bool `xml:\"bootRetryOptionsSupported\"`\n\tSettingVideoRamSizeSupported        bool  `xml:\"settingVideoRamSizeSupported\"`\n\tSettingDisplayTopologySupported     *bool `xml:\"settingDisplayTopologySupported\"`\n\tRecordReplaySupported               *bool `xml:\"recordReplaySupported\"`\n\tChangeTrackingSupported             *bool `xml:\"changeTrackingSupported\"`\n\tMultipleCoresPerSocketSupported     *bool `xml:\"multipleCoresPerSocketSupported\"`\n\tHostBasedReplicationSupported       *bool `xml:\"hostBasedReplicationSupported\"`\n\tGuestAutoLockSupported              *bool `xml:\"guestAutoLockSupported\"`\n\tMemoryReservationLockSupported      *bool `xml:\"memoryReservationLockSupported\"`\n\tFeatureRequirementSupported         *bool `xml:\"featureRequirementSupported\"`\n\tPoweredOnMonitorTypeChangeSupported *bool `xml:\"poweredOnMonitorTypeChangeSupported\"`\n\tSeSparseDiskSupported               *bool `xml:\"seSparseDiskSupported\"`\n\tNestedHVSupported                   *bool `xml:\"nestedHVSupported\"`\n\tVPMCSupported                       *bool `xml:\"vPMCSupported\"`\n\tSecureBootSupported                 *bool `xml:\"secureBootSupported\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineCapability\"] = reflect.TypeOf((*VirtualMachineCapability)(nil)).Elem()\n}\n\ntype VirtualMachineCdromInfo struct {\n\tVirtualMachineTargetInfo\n\n\tDescription string `xml:\"description,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineCdromInfo\"] = reflect.TypeOf((*VirtualMachineCdromInfo)(nil)).Elem()\n}\n\ntype VirtualMachineCloneSpec struct {\n\tDynamicData\n\n\tLocation      VirtualMachineRelocateSpec `xml:\"location\"`\n\tTemplate      bool                       `xml:\"template\"`\n\tConfig        *VirtualMachineConfigSpec  `xml:\"config,omitempty\"`\n\tCustomization *CustomizationSpec         `xml:\"customization,omitempty\"`\n\tPowerOn       bool                       `xml:\"powerOn\"`\n\tSnapshot      *ManagedObjectReference    `xml:\"snapshot,omitempty\"`\n\tMemory        *bool                      `xml:\"memory\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineCloneSpec\"] = reflect.TypeOf((*VirtualMachineCloneSpec)(nil)).Elem()\n}\n\ntype VirtualMachineConfigInfo struct {\n\tDynamicData\n\n\tChangeVersion                string                                     `xml:\"changeVersion\"`\n\tModified                     time.Time                                  `xml:\"modified\"`\n\tName                         string                                     `xml:\"name\"`\n\tGuestFullName                string                                     `xml:\"guestFullName\"`\n\tVersion                      string                                     `xml:\"version\"`\n\tUuid                         string                                     `xml:\"uuid\"`\n\tInstanceUuid                 string                                     `xml:\"instanceUuid,omitempty\"`\n\tNpivNodeWorldWideName        []int64                                    `xml:\"npivNodeWorldWideName,omitempty\"`\n\tNpivPortWorldWideName        []int64                                    `xml:\"npivPortWorldWideName,omitempty\"`\n\tNpivWorldWideNameType        string                                     `xml:\"npivWorldWideNameType,omitempty\"`\n\tNpivDesiredNodeWwns          int16                                      `xml:\"npivDesiredNodeWwns,omitempty\"`\n\tNpivDesiredPortWwns          int16                                      `xml:\"npivDesiredPortWwns,omitempty\"`\n\tNpivTemporaryDisabled        *bool                                      `xml:\"npivTemporaryDisabled\"`\n\tNpivOnNonRdmDisks            *bool                                      `xml:\"npivOnNonRdmDisks\"`\n\tLocationId                   string                                     `xml:\"locationId,omitempty\"`\n\tTemplate                     bool                                       `xml:\"template\"`\n\tGuestId                      string                                     `xml:\"guestId\"`\n\tAlternateGuestName           string                                     `xml:\"alternateGuestName\"`\n\tAnnotation                   string                                     `xml:\"annotation,omitempty\"`\n\tFiles                        VirtualMachineFileInfo                     `xml:\"files\"`\n\tTools                        *ToolsConfigInfo                           `xml:\"tools,omitempty\"`\n\tFlags                        VirtualMachineFlagInfo                     `xml:\"flags\"`\n\tConsolePreferences           *VirtualMachineConsolePreferences          `xml:\"consolePreferences,omitempty\"`\n\tDefaultPowerOps              VirtualMachineDefaultPowerOpInfo           `xml:\"defaultPowerOps\"`\n\tHardware                     VirtualHardware                            `xml:\"hardware\"`\n\tCpuAllocation                BaseResourceAllocationInfo                 `xml:\"cpuAllocation,omitempty,typeattr\"`\n\tMemoryAllocation             BaseResourceAllocationInfo                 `xml:\"memoryAllocation,omitempty,typeattr\"`\n\tLatencySensitivity           *LatencySensitivity                        `xml:\"latencySensitivity,omitempty\"`\n\tMemoryHotAddEnabled          *bool                                      `xml:\"memoryHotAddEnabled\"`\n\tCpuHotAddEnabled             *bool                                      `xml:\"cpuHotAddEnabled\"`\n\tCpuHotRemoveEnabled          *bool                                      `xml:\"cpuHotRemoveEnabled\"`\n\tHotPlugMemoryLimit           int64                                      `xml:\"hotPlugMemoryLimit,omitempty\"`\n\tHotPlugMemoryIncrementSize   int64                                      `xml:\"hotPlugMemoryIncrementSize,omitempty\"`\n\tCpuAffinity                  *VirtualMachineAffinityInfo                `xml:\"cpuAffinity,omitempty\"`\n\tMemoryAffinity               *VirtualMachineAffinityInfo                `xml:\"memoryAffinity,omitempty\"`\n\tNetworkShaper                *VirtualMachineNetworkShaperInfo           `xml:\"networkShaper,omitempty\"`\n\tExtraConfig                  []BaseOptionValue                          `xml:\"extraConfig,omitempty,typeattr\"`\n\tCpuFeatureMask               []HostCpuIdInfo                            `xml:\"cpuFeatureMask,omitempty\"`\n\tDatastoreUrl                 []VirtualMachineConfigInfoDatastoreUrlPair `xml:\"datastoreUrl,omitempty\"`\n\tSwapPlacement                string                                     `xml:\"swapPlacement,omitempty\"`\n\tBootOptions                  *VirtualMachineBootOptions                 `xml:\"bootOptions,omitempty\"`\n\tFtInfo                       BaseFaultToleranceConfigInfo               `xml:\"ftInfo,omitempty,typeattr\"`\n\tRepConfig                    *ReplicationConfigSpec                     `xml:\"repConfig,omitempty\"`\n\tVAppConfig                   BaseVmConfigInfo                           `xml:\"vAppConfig,omitempty,typeattr\"`\n\tVAssertsEnabled              *bool                                      `xml:\"vAssertsEnabled\"`\n\tChangeTrackingEnabled        *bool                                      `xml:\"changeTrackingEnabled\"`\n\tFirmware                     string                                     `xml:\"firmware,omitempty\"`\n\tMaxMksConnections            int32                                      `xml:\"maxMksConnections,omitempty\"`\n\tGuestAutoLockEnabled         *bool                                      `xml:\"guestAutoLockEnabled\"`\n\tManagedBy                    *ManagedByInfo                             `xml:\"managedBy,omitempty\"`\n\tMemoryReservationLockedToMax *bool                                      `xml:\"memoryReservationLockedToMax\"`\n\tInitialOverhead              *VirtualMachineConfigInfoOverheadInfo      `xml:\"initialOverhead,omitempty\"`\n\tNestedHVEnabled              *bool                                      `xml:\"nestedHVEnabled\"`\n\tVPMCEnabled                  *bool                                      `xml:\"vPMCEnabled\"`\n\tScheduledHardwareUpgradeInfo *ScheduledHardwareUpgradeInfo              `xml:\"scheduledHardwareUpgradeInfo,omitempty\"`\n\tForkConfigInfo               *VirtualMachineForkConfigInfo              `xml:\"forkConfigInfo,omitempty\"`\n\tVFlashCacheReservation       int64                                      `xml:\"vFlashCacheReservation,omitempty\"`\n\tVmxConfigChecksum            []byte                                     `xml:\"vmxConfigChecksum,omitempty\"`\n\tMessageBusTunnelEnabled      *bool                                      `xml:\"messageBusTunnelEnabled\"`\n\tVmStorageObjectId            string                                     `xml:\"vmStorageObjectId,omitempty\"`\n\tSwapStorageObjectId          string                                     `xml:\"swapStorageObjectId,omitempty\"`\n\tKeyId                        *CryptoKeyId                               `xml:\"keyId,omitempty\"`\n\tGuestIntegrityInfo           *VirtualMachineGuestIntegrityInfo          `xml:\"guestIntegrityInfo,omitempty\"`\n\tMigrateEncryption            string                                     `xml:\"migrateEncryption,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineConfigInfo\"] = reflect.TypeOf((*VirtualMachineConfigInfo)(nil)).Elem()\n}\n\ntype VirtualMachineConfigInfoDatastoreUrlPair struct {\n\tDynamicData\n\n\tName string `xml:\"name\"`\n\tUrl  string `xml:\"url\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineConfigInfoDatastoreUrlPair\"] = reflect.TypeOf((*VirtualMachineConfigInfoDatastoreUrlPair)(nil)).Elem()\n}\n\ntype VirtualMachineConfigInfoOverheadInfo struct {\n\tDynamicData\n\n\tInitialMemoryReservation int64 `xml:\"initialMemoryReservation,omitempty\"`\n\tInitialSwapReservation   int64 `xml:\"initialSwapReservation,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineConfigInfoOverheadInfo\"] = reflect.TypeOf((*VirtualMachineConfigInfoOverheadInfo)(nil)).Elem()\n}\n\ntype VirtualMachineConfigOption struct {\n\tDynamicData\n\n\tVersion                          string                   `xml:\"version\"`\n\tDescription                      string                   `xml:\"description\"`\n\tGuestOSDescriptor                []GuestOsDescriptor      `xml:\"guestOSDescriptor\"`\n\tGuestOSDefaultIndex              int32                    `xml:\"guestOSDefaultIndex\"`\n\tHardwareOptions                  VirtualHardwareOption    `xml:\"hardwareOptions\"`\n\tCapabilities                     VirtualMachineCapability `xml:\"capabilities\"`\n\tDatastore                        DatastoreOption          `xml:\"datastore\"`\n\tDefaultDevice                    []BaseVirtualDevice      `xml:\"defaultDevice,omitempty,typeattr\"`\n\tSupportedMonitorType             []string                 `xml:\"supportedMonitorType\"`\n\tSupportedOvfEnvironmentTransport []string                 `xml:\"supportedOvfEnvironmentTransport,omitempty\"`\n\tSupportedOvfInstallTransport     []string                 `xml:\"supportedOvfInstallTransport,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineConfigOption\"] = reflect.TypeOf((*VirtualMachineConfigOption)(nil)).Elem()\n}\n\ntype VirtualMachineConfigOptionDescriptor struct {\n\tDynamicData\n\n\tKey                 string                   `xml:\"key\"`\n\tDescription         string                   `xml:\"description,omitempty\"`\n\tHost                []ManagedObjectReference `xml:\"host,omitempty\"`\n\tCreateSupported     *bool                    `xml:\"createSupported\"`\n\tDefaultConfigOption *bool                    `xml:\"defaultConfigOption\"`\n\tRunSupported        *bool                    `xml:\"runSupported\"`\n\tUpgradeSupported    *bool                    `xml:\"upgradeSupported\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineConfigOptionDescriptor\"] = reflect.TypeOf((*VirtualMachineConfigOptionDescriptor)(nil)).Elem()\n}\n\ntype VirtualMachineConfigSpec struct {\n\tDynamicData\n\n\tChangeVersion                string                            `xml:\"changeVersion,omitempty\"`\n\tName                         string                            `xml:\"name,omitempty\"`\n\tVersion                      string                            `xml:\"version,omitempty\"`\n\tUuid                         string                            `xml:\"uuid,omitempty\"`\n\tInstanceUuid                 string                            `xml:\"instanceUuid,omitempty\"`\n\tNpivNodeWorldWideName        []int64                           `xml:\"npivNodeWorldWideName,omitempty\"`\n\tNpivPortWorldWideName        []int64                           `xml:\"npivPortWorldWideName,omitempty\"`\n\tNpivWorldWideNameType        string                            `xml:\"npivWorldWideNameType,omitempty\"`\n\tNpivDesiredNodeWwns          int16                             `xml:\"npivDesiredNodeWwns,omitempty\"`\n\tNpivDesiredPortWwns          int16                             `xml:\"npivDesiredPortWwns,omitempty\"`\n\tNpivTemporaryDisabled        *bool                             `xml:\"npivTemporaryDisabled\"`\n\tNpivOnNonRdmDisks            *bool                             `xml:\"npivOnNonRdmDisks\"`\n\tNpivWorldWideNameOp          string                            `xml:\"npivWorldWideNameOp,omitempty\"`\n\tLocationId                   string                            `xml:\"locationId,omitempty\"`\n\tGuestId                      string                            `xml:\"guestId,omitempty\"`\n\tAlternateGuestName           string                            `xml:\"alternateGuestName,omitempty\"`\n\tAnnotation                   string                            `xml:\"annotation,omitempty\"`\n\tFiles                        *VirtualMachineFileInfo           `xml:\"files,omitempty\"`\n\tTools                        *ToolsConfigInfo                  `xml:\"tools,omitempty\"`\n\tFlags                        *VirtualMachineFlagInfo           `xml:\"flags,omitempty\"`\n\tConsolePreferences           *VirtualMachineConsolePreferences `xml:\"consolePreferences,omitempty\"`\n\tPowerOpInfo                  *VirtualMachineDefaultPowerOpInfo `xml:\"powerOpInfo,omitempty\"`\n\tNumCPUs                      int32                             `xml:\"numCPUs,omitempty\"`\n\tNumCoresPerSocket            int32                             `xml:\"numCoresPerSocket,omitempty\"`\n\tMemoryMB                     int64                             `xml:\"memoryMB,omitempty\"`\n\tMemoryHotAddEnabled          *bool                             `xml:\"memoryHotAddEnabled\"`\n\tCpuHotAddEnabled             *bool                             `xml:\"cpuHotAddEnabled\"`\n\tCpuHotRemoveEnabled          *bool                             `xml:\"cpuHotRemoveEnabled\"`\n\tVirtualICH7MPresent          *bool                             `xml:\"virtualICH7MPresent\"`\n\tVirtualSMCPresent            *bool                             `xml:\"virtualSMCPresent\"`\n\tDeviceChange                 []BaseVirtualDeviceConfigSpec     `xml:\"deviceChange,omitempty,typeattr\"`\n\tCpuAllocation                BaseResourceAllocationInfo        `xml:\"cpuAllocation,omitempty,typeattr\"`\n\tMemoryAllocation             BaseResourceAllocationInfo        `xml:\"memoryAllocation,omitempty,typeattr\"`\n\tLatencySensitivity           *LatencySensitivity               `xml:\"latencySensitivity,omitempty\"`\n\tCpuAffinity                  *VirtualMachineAffinityInfo       `xml:\"cpuAffinity,omitempty\"`\n\tMemoryAffinity               *VirtualMachineAffinityInfo       `xml:\"memoryAffinity,omitempty\"`\n\tNetworkShaper                *VirtualMachineNetworkShaperInfo  `xml:\"networkShaper,omitempty\"`\n\tCpuFeatureMask               []VirtualMachineCpuIdInfoSpec     `xml:\"cpuFeatureMask,omitempty\"`\n\tExtraConfig                  []BaseOptionValue                 `xml:\"extraConfig,omitempty,typeattr\"`\n\tSwapPlacement                string                            `xml:\"swapPlacement,omitempty\"`\n\tBootOptions                  *VirtualMachineBootOptions        `xml:\"bootOptions,omitempty\"`\n\tVAppConfig                   BaseVmConfigSpec                  `xml:\"vAppConfig,omitempty,typeattr\"`\n\tFtInfo                       BaseFaultToleranceConfigInfo      `xml:\"ftInfo,omitempty,typeattr\"`\n\tRepConfig                    *ReplicationConfigSpec            `xml:\"repConfig,omitempty\"`\n\tVAppConfigRemoved            *bool                             `xml:\"vAppConfigRemoved\"`\n\tVAssertsEnabled              *bool                             `xml:\"vAssertsEnabled\"`\n\tChangeTrackingEnabled        *bool                             `xml:\"changeTrackingEnabled\"`\n\tFirmware                     string                            `xml:\"firmware,omitempty\"`\n\tMaxMksConnections            int32                             `xml:\"maxMksConnections,omitempty\"`\n\tGuestAutoLockEnabled         *bool                             `xml:\"guestAutoLockEnabled\"`\n\tManagedBy                    *ManagedByInfo                    `xml:\"managedBy,omitempty\"`\n\tMemoryReservationLockedToMax *bool                             `xml:\"memoryReservationLockedToMax\"`\n\tNestedHVEnabled              *bool                             `xml:\"nestedHVEnabled\"`\n\tVPMCEnabled                  *bool                             `xml:\"vPMCEnabled\"`\n\tScheduledHardwareUpgradeInfo *ScheduledHardwareUpgradeInfo     `xml:\"scheduledHardwareUpgradeInfo,omitempty\"`\n\tVmProfile                    []BaseVirtualMachineProfileSpec   `xml:\"vmProfile,omitempty,typeattr\"`\n\tMessageBusTunnelEnabled      *bool                             `xml:\"messageBusTunnelEnabled\"`\n\tCrypto                       BaseCryptoSpec                    `xml:\"crypto,omitempty,typeattr\"`\n\tMigrateEncryption            string                            `xml:\"migrateEncryption,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineConfigSpec\"] = reflect.TypeOf((*VirtualMachineConfigSpec)(nil)).Elem()\n}\n\ntype VirtualMachineConfigSummary struct {\n\tDynamicData\n\n\tName                string                       `xml:\"name\"`\n\tTemplate            bool                         `xml:\"template\"`\n\tVmPathName          string                       `xml:\"vmPathName\"`\n\tMemorySizeMB        int32                        `xml:\"memorySizeMB,omitempty\"`\n\tCpuReservation      int32                        `xml:\"cpuReservation,omitempty\"`\n\tMemoryReservation   int32                        `xml:\"memoryReservation,omitempty\"`\n\tNumCpu              int32                        `xml:\"numCpu,omitempty\"`\n\tNumEthernetCards    int32                        `xml:\"numEthernetCards,omitempty\"`\n\tNumVirtualDisks     int32                        `xml:\"numVirtualDisks,omitempty\"`\n\tUuid                string                       `xml:\"uuid,omitempty\"`\n\tInstanceUuid        string                       `xml:\"instanceUuid,omitempty\"`\n\tGuestId             string                       `xml:\"guestId,omitempty\"`\n\tGuestFullName       string                       `xml:\"guestFullName,omitempty\"`\n\tAnnotation          string                       `xml:\"annotation,omitempty\"`\n\tProduct             *VAppProductInfo             `xml:\"product,omitempty\"`\n\tInstallBootRequired *bool                        `xml:\"installBootRequired\"`\n\tFtInfo              BaseFaultToleranceConfigInfo `xml:\"ftInfo,omitempty,typeattr\"`\n\tManagedBy           *ManagedByInfo               `xml:\"managedBy,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineConfigSummary\"] = reflect.TypeOf((*VirtualMachineConfigSummary)(nil)).Elem()\n}\n\ntype VirtualMachineConsolePreferences struct {\n\tDynamicData\n\n\tPowerOnWhenOpened        *bool `xml:\"powerOnWhenOpened\"`\n\tEnterFullScreenOnPowerOn *bool `xml:\"enterFullScreenOnPowerOn\"`\n\tCloseOnPowerOffOrSuspend *bool `xml:\"closeOnPowerOffOrSuspend\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineConsolePreferences\"] = reflect.TypeOf((*VirtualMachineConsolePreferences)(nil)).Elem()\n}\n\ntype VirtualMachineCpuIdInfoSpec struct {\n\tArrayUpdateSpec\n\n\tInfo *HostCpuIdInfo `xml:\"info,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineCpuIdInfoSpec\"] = reflect.TypeOf((*VirtualMachineCpuIdInfoSpec)(nil)).Elem()\n}\n\ntype VirtualMachineDatastoreInfo struct {\n\tVirtualMachineTargetInfo\n\n\tDatastore              DatastoreSummary    `xml:\"datastore\"`\n\tCapability             DatastoreCapability `xml:\"capability\"`\n\tMaxFileSize            int64               `xml:\"maxFileSize\"`\n\tMaxVirtualDiskCapacity int64               `xml:\"maxVirtualDiskCapacity,omitempty\"`\n\tMaxPhysicalRDMFileSize int64               `xml:\"maxPhysicalRDMFileSize,omitempty\"`\n\tMaxVirtualRDMFileSize  int64               `xml:\"maxVirtualRDMFileSize,omitempty\"`\n\tMode                   string              `xml:\"mode\"`\n\tVStorageSupport        string              `xml:\"vStorageSupport,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineDatastoreInfo\"] = reflect.TypeOf((*VirtualMachineDatastoreInfo)(nil)).Elem()\n}\n\ntype VirtualMachineDatastoreVolumeOption struct {\n\tDynamicData\n\n\tFileSystemType string `xml:\"fileSystemType\"`\n\tMajorVersion   int32  `xml:\"majorVersion,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineDatastoreVolumeOption\"] = reflect.TypeOf((*VirtualMachineDatastoreVolumeOption)(nil)).Elem()\n}\n\ntype VirtualMachineDefaultPowerOpInfo struct {\n\tDynamicData\n\n\tPowerOffType        string `xml:\"powerOffType,omitempty\"`\n\tSuspendType         string `xml:\"suspendType,omitempty\"`\n\tResetType           string `xml:\"resetType,omitempty\"`\n\tDefaultPowerOffType string `xml:\"defaultPowerOffType,omitempty\"`\n\tDefaultSuspendType  string `xml:\"defaultSuspendType,omitempty\"`\n\tDefaultResetType    string `xml:\"defaultResetType,omitempty\"`\n\tStandbyAction       string `xml:\"standbyAction,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineDefaultPowerOpInfo\"] = reflect.TypeOf((*VirtualMachineDefaultPowerOpInfo)(nil)).Elem()\n}\n\ntype VirtualMachineDefaultProfileSpec struct {\n\tVirtualMachineProfileSpec\n}\n\nfunc init() {\n\tt[\"VirtualMachineDefaultProfileSpec\"] = reflect.TypeOf((*VirtualMachineDefaultProfileSpec)(nil)).Elem()\n}\n\ntype VirtualMachineDefinedProfileSpec struct {\n\tVirtualMachineProfileSpec\n\n\tProfileId       string                        `xml:\"profileId\"`\n\tReplicationSpec *ReplicationSpec              `xml:\"replicationSpec,omitempty\"`\n\tProfileData     *VirtualMachineProfileRawData `xml:\"profileData,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineDefinedProfileSpec\"] = reflect.TypeOf((*VirtualMachineDefinedProfileSpec)(nil)).Elem()\n}\n\ntype VirtualMachineDeviceRuntimeInfo struct {\n\tDynamicData\n\n\tRuntimeState BaseVirtualMachineDeviceRuntimeInfoDeviceRuntimeState `xml:\"runtimeState,typeattr\"`\n\tKey          int32                                                 `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineDeviceRuntimeInfo\"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfo)(nil)).Elem()\n}\n\ntype VirtualMachineDeviceRuntimeInfoDeviceRuntimeState struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"VirtualMachineDeviceRuntimeInfoDeviceRuntimeState\"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfoDeviceRuntimeState)(nil)).Elem()\n}\n\ntype VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeState struct {\n\tVirtualMachineDeviceRuntimeInfoDeviceRuntimeState\n\n\tVmDirectPathGen2Active                 bool     `xml:\"vmDirectPathGen2Active\"`\n\tVmDirectPathGen2InactiveReasonVm       []string `xml:\"vmDirectPathGen2InactiveReasonVm,omitempty\"`\n\tVmDirectPathGen2InactiveReasonOther    []string `xml:\"vmDirectPathGen2InactiveReasonOther,omitempty\"`\n\tVmDirectPathGen2InactiveReasonExtended string   `xml:\"vmDirectPathGen2InactiveReasonExtended,omitempty\"`\n\tReservationStatus                      string   `xml:\"reservationStatus,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeState\"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeState)(nil)).Elem()\n}\n\ntype VirtualMachineDiskDeviceInfo struct {\n\tVirtualMachineTargetInfo\n\n\tCapacity int64                    `xml:\"capacity,omitempty\"`\n\tVm       []ManagedObjectReference `xml:\"vm,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineDiskDeviceInfo\"] = reflect.TypeOf((*VirtualMachineDiskDeviceInfo)(nil)).Elem()\n}\n\ntype VirtualMachineDisplayTopology struct {\n\tDynamicData\n\n\tX      int32 `xml:\"x\"`\n\tY      int32 `xml:\"y\"`\n\tWidth  int32 `xml:\"width\"`\n\tHeight int32 `xml:\"height\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineDisplayTopology\"] = reflect.TypeOf((*VirtualMachineDisplayTopology)(nil)).Elem()\n}\n\ntype VirtualMachineEmptyProfileSpec struct {\n\tVirtualMachineProfileSpec\n}\n\nfunc init() {\n\tt[\"VirtualMachineEmptyProfileSpec\"] = reflect.TypeOf((*VirtualMachineEmptyProfileSpec)(nil)).Elem()\n}\n\ntype VirtualMachineFeatureRequirement struct {\n\tDynamicData\n\n\tKey         string `xml:\"key\"`\n\tFeatureName string `xml:\"featureName\"`\n\tValue       string `xml:\"value\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineFeatureRequirement\"] = reflect.TypeOf((*VirtualMachineFeatureRequirement)(nil)).Elem()\n}\n\ntype VirtualMachineFileInfo struct {\n\tDynamicData\n\n\tVmPathName          string `xml:\"vmPathName,omitempty\"`\n\tSnapshotDirectory   string `xml:\"snapshotDirectory,omitempty\"`\n\tSuspendDirectory    string `xml:\"suspendDirectory,omitempty\"`\n\tLogDirectory        string `xml:\"logDirectory,omitempty\"`\n\tFtMetadataDirectory string `xml:\"ftMetadataDirectory,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineFileInfo\"] = reflect.TypeOf((*VirtualMachineFileInfo)(nil)).Elem()\n}\n\ntype VirtualMachineFileLayout struct {\n\tDynamicData\n\n\tConfigFile []string                                 `xml:\"configFile,omitempty\"`\n\tLogFile    []string                                 `xml:\"logFile,omitempty\"`\n\tDisk       []VirtualMachineFileLayoutDiskLayout     `xml:\"disk,omitempty\"`\n\tSnapshot   []VirtualMachineFileLayoutSnapshotLayout `xml:\"snapshot,omitempty\"`\n\tSwapFile   string                                   `xml:\"swapFile,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineFileLayout\"] = reflect.TypeOf((*VirtualMachineFileLayout)(nil)).Elem()\n}\n\ntype VirtualMachineFileLayoutDiskLayout struct {\n\tDynamicData\n\n\tKey      int32    `xml:\"key\"`\n\tDiskFile []string `xml:\"diskFile\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineFileLayoutDiskLayout\"] = reflect.TypeOf((*VirtualMachineFileLayoutDiskLayout)(nil)).Elem()\n}\n\ntype VirtualMachineFileLayoutEx struct {\n\tDynamicData\n\n\tFile      []VirtualMachineFileLayoutExFileInfo       `xml:\"file,omitempty\"`\n\tDisk      []VirtualMachineFileLayoutExDiskLayout     `xml:\"disk,omitempty\"`\n\tSnapshot  []VirtualMachineFileLayoutExSnapshotLayout `xml:\"snapshot,omitempty\"`\n\tTimestamp time.Time                                  `xml:\"timestamp\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineFileLayoutEx\"] = reflect.TypeOf((*VirtualMachineFileLayoutEx)(nil)).Elem()\n}\n\ntype VirtualMachineFileLayoutExDiskLayout struct {\n\tDynamicData\n\n\tKey   int32                                `xml:\"key\"`\n\tChain []VirtualMachineFileLayoutExDiskUnit `xml:\"chain,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineFileLayoutExDiskLayout\"] = reflect.TypeOf((*VirtualMachineFileLayoutExDiskLayout)(nil)).Elem()\n}\n\ntype VirtualMachineFileLayoutExDiskUnit struct {\n\tDynamicData\n\n\tFileKey []int32 `xml:\"fileKey\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineFileLayoutExDiskUnit\"] = reflect.TypeOf((*VirtualMachineFileLayoutExDiskUnit)(nil)).Elem()\n}\n\ntype VirtualMachineFileLayoutExFileInfo struct {\n\tDynamicData\n\n\tKey             int32  `xml:\"key\"`\n\tName            string `xml:\"name\"`\n\tType            string `xml:\"type\"`\n\tSize            int64  `xml:\"size\"`\n\tUniqueSize      int64  `xml:\"uniqueSize,omitempty\"`\n\tBackingObjectId string `xml:\"backingObjectId,omitempty\"`\n\tAccessible      *bool  `xml:\"accessible\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineFileLayoutExFileInfo\"] = reflect.TypeOf((*VirtualMachineFileLayoutExFileInfo)(nil)).Elem()\n}\n\ntype VirtualMachineFileLayoutExSnapshotLayout struct {\n\tDynamicData\n\n\tKey       ManagedObjectReference                 `xml:\"key\"`\n\tDataKey   int32                                  `xml:\"dataKey\"`\n\tMemoryKey int32                                  `xml:\"memoryKey,omitempty\"`\n\tDisk      []VirtualMachineFileLayoutExDiskLayout `xml:\"disk,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineFileLayoutExSnapshotLayout\"] = reflect.TypeOf((*VirtualMachineFileLayoutExSnapshotLayout)(nil)).Elem()\n}\n\ntype VirtualMachineFileLayoutSnapshotLayout struct {\n\tDynamicData\n\n\tKey          ManagedObjectReference `xml:\"key\"`\n\tSnapshotFile []string               `xml:\"snapshotFile\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineFileLayoutSnapshotLayout\"] = reflect.TypeOf((*VirtualMachineFileLayoutSnapshotLayout)(nil)).Elem()\n}\n\ntype VirtualMachineFlagInfo struct {\n\tDynamicData\n\n\tDisableAcceleration      *bool  `xml:\"disableAcceleration\"`\n\tEnableLogging            *bool  `xml:\"enableLogging\"`\n\tUseToe                   *bool  `xml:\"useToe\"`\n\tRunWithDebugInfo         *bool  `xml:\"runWithDebugInfo\"`\n\tMonitorType              string `xml:\"monitorType,omitempty\"`\n\tHtSharing                string `xml:\"htSharing,omitempty\"`\n\tSnapshotDisabled         *bool  `xml:\"snapshotDisabled\"`\n\tSnapshotLocked           *bool  `xml:\"snapshotLocked\"`\n\tDiskUuidEnabled          *bool  `xml:\"diskUuidEnabled\"`\n\tVirtualMmuUsage          string `xml:\"virtualMmuUsage,omitempty\"`\n\tVirtualExecUsage         string `xml:\"virtualExecUsage,omitempty\"`\n\tSnapshotPowerOffBehavior string `xml:\"snapshotPowerOffBehavior,omitempty\"`\n\tRecordReplayEnabled      *bool  `xml:\"recordReplayEnabled\"`\n\tFaultToleranceType       string `xml:\"faultToleranceType,omitempty\"`\n\tCbrcCacheEnabled         *bool  `xml:\"cbrcCacheEnabled\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineFlagInfo\"] = reflect.TypeOf((*VirtualMachineFlagInfo)(nil)).Elem()\n}\n\ntype VirtualMachineFloppyInfo struct {\n\tVirtualMachineTargetInfo\n}\n\nfunc init() {\n\tt[\"VirtualMachineFloppyInfo\"] = reflect.TypeOf((*VirtualMachineFloppyInfo)(nil)).Elem()\n}\n\ntype VirtualMachineForkConfigInfo struct {\n\tDynamicData\n\n\tParentEnabled     *bool  `xml:\"parentEnabled\"`\n\tChildForkGroupId  string `xml:\"childForkGroupId,omitempty\"`\n\tParentForkGroupId string `xml:\"parentForkGroupId,omitempty\"`\n\tChildType         string `xml:\"childType,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineForkConfigInfo\"] = reflect.TypeOf((*VirtualMachineForkConfigInfo)(nil)).Elem()\n}\n\ntype VirtualMachineGuestIntegrityInfo struct {\n\tDynamicData\n\n\tEnabled *bool `xml:\"enabled\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineGuestIntegrityInfo\"] = reflect.TypeOf((*VirtualMachineGuestIntegrityInfo)(nil)).Elem()\n}\n\ntype VirtualMachineGuestQuiesceSpec struct {\n\tDynamicData\n\n\tTimeout int32 `xml:\"timeout,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineGuestQuiesceSpec\"] = reflect.TypeOf((*VirtualMachineGuestQuiesceSpec)(nil)).Elem()\n}\n\ntype VirtualMachineGuestSummary struct {\n\tDynamicData\n\n\tGuestId             string                    `xml:\"guestId,omitempty\"`\n\tGuestFullName       string                    `xml:\"guestFullName,omitempty\"`\n\tToolsStatus         VirtualMachineToolsStatus `xml:\"toolsStatus,omitempty\"`\n\tToolsVersionStatus  string                    `xml:\"toolsVersionStatus,omitempty\"`\n\tToolsVersionStatus2 string                    `xml:\"toolsVersionStatus2,omitempty\"`\n\tToolsRunningStatus  string                    `xml:\"toolsRunningStatus,omitempty\"`\n\tHostName            string                    `xml:\"hostName,omitempty\"`\n\tIpAddress           string                    `xml:\"ipAddress,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineGuestSummary\"] = reflect.TypeOf((*VirtualMachineGuestSummary)(nil)).Elem()\n}\n\ntype VirtualMachineIdeDiskDeviceInfo struct {\n\tVirtualMachineDiskDeviceInfo\n\n\tPartitionTable []VirtualMachineIdeDiskDevicePartitionInfo `xml:\"partitionTable,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineIdeDiskDeviceInfo\"] = reflect.TypeOf((*VirtualMachineIdeDiskDeviceInfo)(nil)).Elem()\n}\n\ntype VirtualMachineIdeDiskDevicePartitionInfo struct {\n\tDynamicData\n\n\tId       int32 `xml:\"id\"`\n\tCapacity int32 `xml:\"capacity\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineIdeDiskDevicePartitionInfo\"] = reflect.TypeOf((*VirtualMachineIdeDiskDevicePartitionInfo)(nil)).Elem()\n}\n\ntype VirtualMachineImportSpec struct {\n\tImportSpec\n\n\tConfigSpec    VirtualMachineConfigSpec `xml:\"configSpec\"`\n\tResPoolEntity *ManagedObjectReference  `xml:\"resPoolEntity,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineImportSpec\"] = reflect.TypeOf((*VirtualMachineImportSpec)(nil)).Elem()\n}\n\ntype VirtualMachineLegacyNetworkSwitchInfo struct {\n\tDynamicData\n\n\tName string `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineLegacyNetworkSwitchInfo\"] = reflect.TypeOf((*VirtualMachineLegacyNetworkSwitchInfo)(nil)).Elem()\n}\n\ntype VirtualMachineMemoryReservationInfo struct {\n\tDynamicData\n\n\tVirtualMachineMin      int64  `xml:\"virtualMachineMin\"`\n\tVirtualMachineMax      int64  `xml:\"virtualMachineMax\"`\n\tVirtualMachineReserved int64  `xml:\"virtualMachineReserved\"`\n\tAllocationPolicy       string `xml:\"allocationPolicy\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineMemoryReservationInfo\"] = reflect.TypeOf((*VirtualMachineMemoryReservationInfo)(nil)).Elem()\n}\n\ntype VirtualMachineMemoryReservationSpec struct {\n\tDynamicData\n\n\tVirtualMachineReserved int64  `xml:\"virtualMachineReserved,omitempty\"`\n\tAllocationPolicy       string `xml:\"allocationPolicy,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineMemoryReservationSpec\"] = reflect.TypeOf((*VirtualMachineMemoryReservationSpec)(nil)).Elem()\n}\n\ntype VirtualMachineMessage struct {\n\tDynamicData\n\n\tId       string    `xml:\"id\"`\n\tArgument []AnyType `xml:\"argument,omitempty,typeattr\"`\n\tText     string    `xml:\"text,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineMessage\"] = reflect.TypeOf((*VirtualMachineMessage)(nil)).Elem()\n}\n\ntype VirtualMachineMetadataManagerVmMetadata struct {\n\tDynamicData\n\n\tVmId     string `xml:\"vmId\"`\n\tMetadata string `xml:\"metadata,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineMetadataManagerVmMetadata\"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadata)(nil)).Elem()\n}\n\ntype VirtualMachineMetadataManagerVmMetadataInput struct {\n\tDynamicData\n\n\tOperation  string                                  `xml:\"operation\"`\n\tVmMetadata VirtualMachineMetadataManagerVmMetadata `xml:\"vmMetadata\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineMetadataManagerVmMetadataInput\"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataInput)(nil)).Elem()\n}\n\ntype VirtualMachineMetadataManagerVmMetadataOwner struct {\n\tDynamicData\n\n\tName string `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineMetadataManagerVmMetadataOwner\"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataOwner)(nil)).Elem()\n}\n\ntype VirtualMachineMetadataManagerVmMetadataResult struct {\n\tDynamicData\n\n\tVmMetadata VirtualMachineMetadataManagerVmMetadata `xml:\"vmMetadata\"`\n\tError      *LocalizedMethodFault                   `xml:\"error,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineMetadataManagerVmMetadataResult\"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataResult)(nil)).Elem()\n}\n\ntype VirtualMachineMksTicket struct {\n\tDynamicData\n\n\tTicket        string `xml:\"ticket\"`\n\tCfgFile       string `xml:\"cfgFile\"`\n\tHost          string `xml:\"host,omitempty\"`\n\tPort          int32  `xml:\"port,omitempty\"`\n\tSslThumbprint string `xml:\"sslThumbprint,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineMksTicket\"] = reflect.TypeOf((*VirtualMachineMksTicket)(nil)).Elem()\n}\n\ntype VirtualMachineNetworkInfo struct {\n\tVirtualMachineTargetInfo\n\n\tNetwork BaseNetworkSummary `xml:\"network,typeattr\"`\n\tVswitch string             `xml:\"vswitch,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineNetworkInfo\"] = reflect.TypeOf((*VirtualMachineNetworkInfo)(nil)).Elem()\n}\n\ntype VirtualMachineNetworkShaperInfo struct {\n\tDynamicData\n\n\tEnabled    *bool `xml:\"enabled\"`\n\tPeakBps    int64 `xml:\"peakBps,omitempty\"`\n\tAverageBps int64 `xml:\"averageBps,omitempty\"`\n\tBurstSize  int64 `xml:\"burstSize,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineNetworkShaperInfo\"] = reflect.TypeOf((*VirtualMachineNetworkShaperInfo)(nil)).Elem()\n}\n\ntype VirtualMachineParallelInfo struct {\n\tVirtualMachineTargetInfo\n}\n\nfunc init() {\n\tt[\"VirtualMachineParallelInfo\"] = reflect.TypeOf((*VirtualMachineParallelInfo)(nil)).Elem()\n}\n\ntype VirtualMachinePciPassthroughInfo struct {\n\tVirtualMachineTargetInfo\n\n\tPciDevice HostPciDevice `xml:\"pciDevice\"`\n\tSystemId  string        `xml:\"systemId\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachinePciPassthroughInfo\"] = reflect.TypeOf((*VirtualMachinePciPassthroughInfo)(nil)).Elem()\n}\n\ntype VirtualMachinePciSharedGpuPassthroughInfo struct {\n\tVirtualMachineTargetInfo\n\n\tVgpu string `xml:\"vgpu\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachinePciSharedGpuPassthroughInfo\"] = reflect.TypeOf((*VirtualMachinePciSharedGpuPassthroughInfo)(nil)).Elem()\n}\n\ntype VirtualMachineProfileRawData struct {\n\tDynamicData\n\n\tExtensionKey string `xml:\"extensionKey\"`\n\tObjectData   string `xml:\"objectData,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineProfileRawData\"] = reflect.TypeOf((*VirtualMachineProfileRawData)(nil)).Elem()\n}\n\ntype VirtualMachineProfileSpec struct {\n\tDynamicData\n}\n\nfunc init() {\n\tt[\"VirtualMachineProfileSpec\"] = reflect.TypeOf((*VirtualMachineProfileSpec)(nil)).Elem()\n}\n\ntype VirtualMachineQuestionInfo struct {\n\tDynamicData\n\n\tId      string                  `xml:\"id\"`\n\tText    string                  `xml:\"text\"`\n\tChoice  ChoiceOption            `xml:\"choice\"`\n\tMessage []VirtualMachineMessage `xml:\"message,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineQuestionInfo\"] = reflect.TypeOf((*VirtualMachineQuestionInfo)(nil)).Elem()\n}\n\ntype VirtualMachineQuickStats struct {\n\tDynamicData\n\n\tOverallCpuUsage              int32               `xml:\"overallCpuUsage,omitempty\"`\n\tOverallCpuDemand             int32               `xml:\"overallCpuDemand,omitempty\"`\n\tGuestMemoryUsage             int32               `xml:\"guestMemoryUsage,omitempty\"`\n\tHostMemoryUsage              int32               `xml:\"hostMemoryUsage,omitempty\"`\n\tGuestHeartbeatStatus         ManagedEntityStatus `xml:\"guestHeartbeatStatus\"`\n\tDistributedCpuEntitlement    int32               `xml:\"distributedCpuEntitlement,omitempty\"`\n\tDistributedMemoryEntitlement int32               `xml:\"distributedMemoryEntitlement,omitempty\"`\n\tStaticCpuEntitlement         int32               `xml:\"staticCpuEntitlement,omitempty\"`\n\tStaticMemoryEntitlement      int32               `xml:\"staticMemoryEntitlement,omitempty\"`\n\tPrivateMemory                int32               `xml:\"privateMemory,omitempty\"`\n\tSharedMemory                 int32               `xml:\"sharedMemory,omitempty\"`\n\tSwappedMemory                int32               `xml:\"swappedMemory,omitempty\"`\n\tBalloonedMemory              int32               `xml:\"balloonedMemory,omitempty\"`\n\tConsumedOverheadMemory       int32               `xml:\"consumedOverheadMemory,omitempty\"`\n\tFtLogBandwidth               int32               `xml:\"ftLogBandwidth,omitempty\"`\n\tFtSecondaryLatency           int32               `xml:\"ftSecondaryLatency,omitempty\"`\n\tFtLatencyStatus              ManagedEntityStatus `xml:\"ftLatencyStatus,omitempty\"`\n\tCompressedMemory             int64               `xml:\"compressedMemory,omitempty\"`\n\tUptimeSeconds                int32               `xml:\"uptimeSeconds,omitempty\"`\n\tSsdSwappedMemory             int64               `xml:\"ssdSwappedMemory,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineQuickStats\"] = reflect.TypeOf((*VirtualMachineQuickStats)(nil)).Elem()\n}\n\ntype VirtualMachineRelocateSpec struct {\n\tDynamicData\n\n\tService      *ServiceLocator                         `xml:\"service,omitempty\"`\n\tFolder       *ManagedObjectReference                 `xml:\"folder,omitempty\"`\n\tDatastore    *ManagedObjectReference                 `xml:\"datastore,omitempty\"`\n\tDiskMoveType string                                  `xml:\"diskMoveType,omitempty\"`\n\tPool         *ManagedObjectReference                 `xml:\"pool,omitempty\"`\n\tHost         *ManagedObjectReference                 `xml:\"host,omitempty\"`\n\tDisk         []VirtualMachineRelocateSpecDiskLocator `xml:\"disk,omitempty\"`\n\tTransform    VirtualMachineRelocateTransformation    `xml:\"transform,omitempty\"`\n\tDeviceChange []BaseVirtualDeviceConfigSpec           `xml:\"deviceChange,omitempty,typeattr\"`\n\tProfile      []BaseVirtualMachineProfileSpec         `xml:\"profile,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineRelocateSpec\"] = reflect.TypeOf((*VirtualMachineRelocateSpec)(nil)).Elem()\n}\n\ntype VirtualMachineRelocateSpecDiskLocator struct {\n\tDynamicData\n\n\tDiskId          int32                           `xml:\"diskId\"`\n\tDatastore       ManagedObjectReference          `xml:\"datastore\"`\n\tDiskMoveType    string                          `xml:\"diskMoveType,omitempty\"`\n\tDiskBackingInfo BaseVirtualDeviceBackingInfo    `xml:\"diskBackingInfo,omitempty,typeattr\"`\n\tProfile         []BaseVirtualMachineProfileSpec `xml:\"profile,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineRelocateSpecDiskLocator\"] = reflect.TypeOf((*VirtualMachineRelocateSpecDiskLocator)(nil)).Elem()\n}\n\ntype VirtualMachineRuntimeInfo struct {\n\tDynamicData\n\n\tDevice                    []VirtualMachineDeviceRuntimeInfo            `xml:\"device,omitempty\"`\n\tHost                      *ManagedObjectReference                      `xml:\"host,omitempty\"`\n\tConnectionState           VirtualMachineConnectionState                `xml:\"connectionState\"`\n\tPowerState                VirtualMachinePowerState                     `xml:\"powerState\"`\n\tFaultToleranceState       VirtualMachineFaultToleranceState            `xml:\"faultToleranceState,omitempty\"`\n\tDasVmProtection           *VirtualMachineRuntimeInfoDasProtectionState `xml:\"dasVmProtection,omitempty\"`\n\tToolsInstallerMounted     bool                                         `xml:\"toolsInstallerMounted\"`\n\tSuspendTime               *time.Time                                   `xml:\"suspendTime\"`\n\tBootTime                  *time.Time                                   `xml:\"bootTime\"`\n\tSuspendInterval           int64                                        `xml:\"suspendInterval,omitempty\"`\n\tQuestion                  *VirtualMachineQuestionInfo                  `xml:\"question,omitempty\"`\n\tMemoryOverhead            int64                                        `xml:\"memoryOverhead,omitempty\"`\n\tMaxCpuUsage               int32                                        `xml:\"maxCpuUsage,omitempty\"`\n\tMaxMemoryUsage            int32                                        `xml:\"maxMemoryUsage,omitempty\"`\n\tNumMksConnections         int32                                        `xml:\"numMksConnections\"`\n\tRecordReplayState         VirtualMachineRecordReplayState              `xml:\"recordReplayState,omitempty\"`\n\tCleanPowerOff             *bool                                        `xml:\"cleanPowerOff\"`\n\tNeedSecondaryReason       string                                       `xml:\"needSecondaryReason,omitempty\"`\n\tOnlineStandby             *bool                                        `xml:\"onlineStandby\"`\n\tMinRequiredEVCModeKey     string                                       `xml:\"minRequiredEVCModeKey,omitempty\"`\n\tConsolidationNeeded       *bool                                        `xml:\"consolidationNeeded\"`\n\tOfflineFeatureRequirement []VirtualMachineFeatureRequirement           `xml:\"offlineFeatureRequirement,omitempty\"`\n\tFeatureRequirement        []VirtualMachineFeatureRequirement           `xml:\"featureRequirement,omitempty\"`\n\tFeatureMask               []HostFeatureMask                            `xml:\"featureMask,omitempty\"`\n\tVFlashCacheAllocation     int64                                        `xml:\"vFlashCacheAllocation,omitempty\"`\n\tPaused                    *bool                                        `xml:\"paused\"`\n\tSnapshotInBackground      *bool                                        `xml:\"snapshotInBackground\"`\n\tQuiescedForkParent        *bool                                        `xml:\"quiescedForkParent\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineRuntimeInfo\"] = reflect.TypeOf((*VirtualMachineRuntimeInfo)(nil)).Elem()\n}\n\ntype VirtualMachineRuntimeInfoDasProtectionState struct {\n\tDynamicData\n\n\tDasProtected bool `xml:\"dasProtected\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineRuntimeInfoDasProtectionState\"] = reflect.TypeOf((*VirtualMachineRuntimeInfoDasProtectionState)(nil)).Elem()\n}\n\ntype VirtualMachineScsiDiskDeviceInfo struct {\n\tVirtualMachineDiskDeviceInfo\n\n\tDisk          *HostScsiDisk `xml:\"disk,omitempty\"`\n\tTransportHint string        `xml:\"transportHint,omitempty\"`\n\tLunNumber     int32         `xml:\"lunNumber,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineScsiDiskDeviceInfo\"] = reflect.TypeOf((*VirtualMachineScsiDiskDeviceInfo)(nil)).Elem()\n}\n\ntype VirtualMachineScsiPassthroughInfo struct {\n\tVirtualMachineTargetInfo\n\n\tScsiClass          string `xml:\"scsiClass\"`\n\tVendor             string `xml:\"vendor\"`\n\tPhysicalUnitNumber int32  `xml:\"physicalUnitNumber\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineScsiPassthroughInfo\"] = reflect.TypeOf((*VirtualMachineScsiPassthroughInfo)(nil)).Elem()\n}\n\ntype VirtualMachineSerialInfo struct {\n\tVirtualMachineTargetInfo\n}\n\nfunc init() {\n\tt[\"VirtualMachineSerialInfo\"] = reflect.TypeOf((*VirtualMachineSerialInfo)(nil)).Elem()\n}\n\ntype VirtualMachineSnapshotInfo struct {\n\tDynamicData\n\n\tCurrentSnapshot  *ManagedObjectReference      `xml:\"currentSnapshot,omitempty\"`\n\tRootSnapshotList []VirtualMachineSnapshotTree `xml:\"rootSnapshotList\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineSnapshotInfo\"] = reflect.TypeOf((*VirtualMachineSnapshotInfo)(nil)).Elem()\n}\n\ntype VirtualMachineSnapshotTree struct {\n\tDynamicData\n\n\tSnapshot          ManagedObjectReference       `xml:\"snapshot\"`\n\tVm                ManagedObjectReference       `xml:\"vm\"`\n\tName              string                       `xml:\"name\"`\n\tDescription       string                       `xml:\"description\"`\n\tId                int32                        `xml:\"id,omitempty\"`\n\tCreateTime        time.Time                    `xml:\"createTime\"`\n\tState             VirtualMachinePowerState     `xml:\"state\"`\n\tQuiesced          bool                         `xml:\"quiesced\"`\n\tBackupManifest    string                       `xml:\"backupManifest,omitempty\"`\n\tChildSnapshotList []VirtualMachineSnapshotTree `xml:\"childSnapshotList,omitempty\"`\n\tReplaySupported   *bool                        `xml:\"replaySupported\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineSnapshotTree\"] = reflect.TypeOf((*VirtualMachineSnapshotTree)(nil)).Elem()\n}\n\ntype VirtualMachineSoundInfo struct {\n\tVirtualMachineTargetInfo\n}\n\nfunc init() {\n\tt[\"VirtualMachineSoundInfo\"] = reflect.TypeOf((*VirtualMachineSoundInfo)(nil)).Elem()\n}\n\ntype VirtualMachineSriovDevicePoolInfo struct {\n\tDynamicData\n\n\tKey string `xml:\"key\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineSriovDevicePoolInfo\"] = reflect.TypeOf((*VirtualMachineSriovDevicePoolInfo)(nil)).Elem()\n}\n\ntype VirtualMachineSriovInfo struct {\n\tVirtualMachinePciPassthroughInfo\n\n\tVirtualFunction bool                                  `xml:\"virtualFunction\"`\n\tPnic            string                                `xml:\"pnic,omitempty\"`\n\tDevicePool      BaseVirtualMachineSriovDevicePoolInfo `xml:\"devicePool,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineSriovInfo\"] = reflect.TypeOf((*VirtualMachineSriovInfo)(nil)).Elem()\n}\n\ntype VirtualMachineSriovNetworkDevicePoolInfo struct {\n\tVirtualMachineSriovDevicePoolInfo\n\n\tSwitchKey  string `xml:\"switchKey,omitempty\"`\n\tSwitchUuid string `xml:\"switchUuid,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineSriovNetworkDevicePoolInfo\"] = reflect.TypeOf((*VirtualMachineSriovNetworkDevicePoolInfo)(nil)).Elem()\n}\n\ntype VirtualMachineStorageInfo struct {\n\tDynamicData\n\n\tPerDatastoreUsage []VirtualMachineUsageOnDatastore `xml:\"perDatastoreUsage,omitempty\"`\n\tTimestamp         time.Time                        `xml:\"timestamp\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineStorageInfo\"] = reflect.TypeOf((*VirtualMachineStorageInfo)(nil)).Elem()\n}\n\ntype VirtualMachineStorageSummary struct {\n\tDynamicData\n\n\tCommitted   int64     `xml:\"committed\"`\n\tUncommitted int64     `xml:\"uncommitted\"`\n\tUnshared    int64     `xml:\"unshared\"`\n\tTimestamp   time.Time `xml:\"timestamp\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineStorageSummary\"] = reflect.TypeOf((*VirtualMachineStorageSummary)(nil)).Elem()\n}\n\ntype VirtualMachineSummary struct {\n\tDynamicData\n\n\tVm            *ManagedObjectReference       `xml:\"vm,omitempty\"`\n\tRuntime       VirtualMachineRuntimeInfo     `xml:\"runtime\"`\n\tGuest         *VirtualMachineGuestSummary   `xml:\"guest,omitempty\"`\n\tConfig        VirtualMachineConfigSummary   `xml:\"config\"`\n\tStorage       *VirtualMachineStorageSummary `xml:\"storage,omitempty\"`\n\tQuickStats    VirtualMachineQuickStats      `xml:\"quickStats\"`\n\tOverallStatus ManagedEntityStatus           `xml:\"overallStatus\"`\n\tCustomValue   []BaseCustomFieldValue        `xml:\"customValue,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineSummary\"] = reflect.TypeOf((*VirtualMachineSummary)(nil)).Elem()\n}\n\ntype VirtualMachineTargetInfo struct {\n\tDynamicData\n\n\tName             string   `xml:\"name\"`\n\tConfigurationTag []string `xml:\"configurationTag,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineTargetInfo\"] = reflect.TypeOf((*VirtualMachineTargetInfo)(nil)).Elem()\n}\n\ntype VirtualMachineTicket struct {\n\tDynamicData\n\n\tTicket        string `xml:\"ticket\"`\n\tCfgFile       string `xml:\"cfgFile\"`\n\tHost          string `xml:\"host,omitempty\"`\n\tPort          int32  `xml:\"port,omitempty\"`\n\tSslThumbprint string `xml:\"sslThumbprint,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineTicket\"] = reflect.TypeOf((*VirtualMachineTicket)(nil)).Elem()\n}\n\ntype VirtualMachineUsageOnDatastore struct {\n\tDynamicData\n\n\tDatastore   ManagedObjectReference `xml:\"datastore\"`\n\tCommitted   int64                  `xml:\"committed\"`\n\tUncommitted int64                  `xml:\"uncommitted\"`\n\tUnshared    int64                  `xml:\"unshared\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineUsageOnDatastore\"] = reflect.TypeOf((*VirtualMachineUsageOnDatastore)(nil)).Elem()\n}\n\ntype VirtualMachineUsbInfo struct {\n\tVirtualMachineTargetInfo\n\n\tDescription  string                 `xml:\"description\"`\n\tVendor       int32                  `xml:\"vendor\"`\n\tProduct      int32                  `xml:\"product\"`\n\tPhysicalPath string                 `xml:\"physicalPath\"`\n\tFamily       []string               `xml:\"family,omitempty\"`\n\tSpeed        []string               `xml:\"speed,omitempty\"`\n\tSummary      *VirtualMachineSummary `xml:\"summary,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineUsbInfo\"] = reflect.TypeOf((*VirtualMachineUsbInfo)(nil)).Elem()\n}\n\ntype VirtualMachineVFlashModuleInfo struct {\n\tVirtualMachineTargetInfo\n\n\tVFlashModule HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption `xml:\"vFlashModule\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineVFlashModuleInfo\"] = reflect.TypeOf((*VirtualMachineVFlashModuleInfo)(nil)).Elem()\n}\n\ntype VirtualMachineVMCIDevice struct {\n\tVirtualDevice\n\n\tId                             int64                               `xml:\"id,omitempty\"`\n\tAllowUnrestrictedCommunication *bool                               `xml:\"allowUnrestrictedCommunication\"`\n\tFilterEnable                   *bool                               `xml:\"filterEnable\"`\n\tFilterInfo                     *VirtualMachineVMCIDeviceFilterInfo `xml:\"filterInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineVMCIDevice\"] = reflect.TypeOf((*VirtualMachineVMCIDevice)(nil)).Elem()\n}\n\ntype VirtualMachineVMCIDeviceFilterInfo struct {\n\tDynamicData\n\n\tFilters []VirtualMachineVMCIDeviceFilterSpec `xml:\"filters,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineVMCIDeviceFilterInfo\"] = reflect.TypeOf((*VirtualMachineVMCIDeviceFilterInfo)(nil)).Elem()\n}\n\ntype VirtualMachineVMCIDeviceFilterSpec struct {\n\tDynamicData\n\n\tRank                 int64  `xml:\"rank\"`\n\tAction               string `xml:\"action\"`\n\tProtocol             string `xml:\"protocol\"`\n\tDirection            string `xml:\"direction\"`\n\tLowerDstPortBoundary int64  `xml:\"lowerDstPortBoundary,omitempty\"`\n\tUpperDstPortBoundary int64  `xml:\"upperDstPortBoundary,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineVMCIDeviceFilterSpec\"] = reflect.TypeOf((*VirtualMachineVMCIDeviceFilterSpec)(nil)).Elem()\n}\n\ntype VirtualMachineVMCIDeviceOption struct {\n\tVirtualDeviceOption\n\n\tAllowUnrestrictedCommunication BoolOption                                      `xml:\"allowUnrestrictedCommunication\"`\n\tFilterSpecOption               *VirtualMachineVMCIDeviceOptionFilterSpecOption `xml:\"filterSpecOption,omitempty\"`\n\tFilterSupported                *BoolOption                                     `xml:\"filterSupported,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineVMCIDeviceOption\"] = reflect.TypeOf((*VirtualMachineVMCIDeviceOption)(nil)).Elem()\n}\n\ntype VirtualMachineVMCIDeviceOptionFilterSpecOption struct {\n\tDynamicData\n\n\tAction               ChoiceOption `xml:\"action\"`\n\tProtocol             ChoiceOption `xml:\"protocol\"`\n\tDirection            ChoiceOption `xml:\"direction\"`\n\tLowerDstPortBoundary LongOption   `xml:\"lowerDstPortBoundary\"`\n\tUpperDstPortBoundary LongOption   `xml:\"upperDstPortBoundary\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineVMCIDeviceOptionFilterSpecOption\"] = reflect.TypeOf((*VirtualMachineVMCIDeviceOptionFilterSpecOption)(nil)).Elem()\n}\n\ntype VirtualMachineVMIROM struct {\n\tVirtualDevice\n}\n\nfunc init() {\n\tt[\"VirtualMachineVMIROM\"] = reflect.TypeOf((*VirtualMachineVMIROM)(nil)).Elem()\n}\n\ntype VirtualMachineVideoCard struct {\n\tVirtualDevice\n\n\tVideoRamSizeInKB       int64  `xml:\"videoRamSizeInKB,omitempty\"`\n\tNumDisplays            int32  `xml:\"numDisplays,omitempty\"`\n\tUseAutoDetect          *bool  `xml:\"useAutoDetect\"`\n\tEnable3DSupport        *bool  `xml:\"enable3DSupport\"`\n\tUse3dRenderer          string `xml:\"use3dRenderer,omitempty\"`\n\tGraphicsMemorySizeInKB int64  `xml:\"graphicsMemorySizeInKB,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineVideoCard\"] = reflect.TypeOf((*VirtualMachineVideoCard)(nil)).Elem()\n}\n\ntype VirtualMachineWindowsQuiesceSpec struct {\n\tVirtualMachineGuestQuiesceSpec\n\n\tVssBackupType          int32  `xml:\"vssBackupType,omitempty\"`\n\tVssBootableSystemState *bool  `xml:\"vssBootableSystemState\"`\n\tVssPartialFileSupport  *bool  `xml:\"vssPartialFileSupport\"`\n\tVssBackupContext       string `xml:\"vssBackupContext,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineWindowsQuiesceSpec\"] = reflect.TypeOf((*VirtualMachineWindowsQuiesceSpec)(nil)).Elem()\n}\n\ntype VirtualMachineWipeResult struct {\n\tDynamicData\n\n\tDiskId              int32 `xml:\"diskId\"`\n\tShrinkableDiskSpace int64 `xml:\"shrinkableDiskSpace\"`\n}\n\nfunc init() {\n\tt[\"VirtualMachineWipeResult\"] = reflect.TypeOf((*VirtualMachineWipeResult)(nil)).Elem()\n}\n\ntype VirtualNVMEController struct {\n\tVirtualController\n}\n\nfunc init() {\n\tt[\"VirtualNVMEController\"] = reflect.TypeOf((*VirtualNVMEController)(nil)).Elem()\n}\n\ntype VirtualNVMEControllerOption struct {\n\tVirtualControllerOption\n\n\tNumNVMEDisks IntOption `xml:\"numNVMEDisks\"`\n}\n\nfunc init() {\n\tt[\"VirtualNVMEControllerOption\"] = reflect.TypeOf((*VirtualNVMEControllerOption)(nil)).Elem()\n}\n\ntype VirtualNicManagerNetConfig struct {\n\tDynamicData\n\n\tNicType            string           `xml:\"nicType\"`\n\tMultiSelectAllowed bool             `xml:\"multiSelectAllowed\"`\n\tCandidateVnic      []HostVirtualNic `xml:\"candidateVnic,omitempty\"`\n\tSelectedVnic       []string         `xml:\"selectedVnic,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualNicManagerNetConfig\"] = reflect.TypeOf((*VirtualNicManagerNetConfig)(nil)).Elem()\n}\n\ntype VirtualPCIController struct {\n\tVirtualController\n}\n\nfunc init() {\n\tt[\"VirtualPCIController\"] = reflect.TypeOf((*VirtualPCIController)(nil)).Elem()\n}\n\ntype VirtualPCIControllerOption struct {\n\tVirtualControllerOption\n\n\tNumSCSIControllers            IntOption  `xml:\"numSCSIControllers\"`\n\tNumEthernetCards              IntOption  `xml:\"numEthernetCards\"`\n\tNumVideoCards                 IntOption  `xml:\"numVideoCards\"`\n\tNumSoundCards                 IntOption  `xml:\"numSoundCards\"`\n\tNumVmiRoms                    IntOption  `xml:\"numVmiRoms\"`\n\tNumVmciDevices                *IntOption `xml:\"numVmciDevices,omitempty\"`\n\tNumPCIPassthroughDevices      *IntOption `xml:\"numPCIPassthroughDevices,omitempty\"`\n\tNumSasSCSIControllers         *IntOption `xml:\"numSasSCSIControllers,omitempty\"`\n\tNumVmxnet3EthernetCards       *IntOption `xml:\"numVmxnet3EthernetCards,omitempty\"`\n\tNumParaVirtualSCSIControllers *IntOption `xml:\"numParaVirtualSCSIControllers,omitempty\"`\n\tNumSATAControllers            *IntOption `xml:\"numSATAControllers,omitempty\"`\n\tNumNVMEControllers            *IntOption `xml:\"numNVMEControllers,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualPCIControllerOption\"] = reflect.TypeOf((*VirtualPCIControllerOption)(nil)).Elem()\n}\n\ntype VirtualPCIPassthrough struct {\n\tVirtualDevice\n}\n\nfunc init() {\n\tt[\"VirtualPCIPassthrough\"] = reflect.TypeOf((*VirtualPCIPassthrough)(nil)).Elem()\n}\n\ntype VirtualPCIPassthroughDeviceBackingInfo struct {\n\tVirtualDeviceDeviceBackingInfo\n\n\tId       string `xml:\"id\"`\n\tDeviceId string `xml:\"deviceId\"`\n\tSystemId string `xml:\"systemId\"`\n\tVendorId int16  `xml:\"vendorId\"`\n}\n\nfunc init() {\n\tt[\"VirtualPCIPassthroughDeviceBackingInfo\"] = reflect.TypeOf((*VirtualPCIPassthroughDeviceBackingInfo)(nil)).Elem()\n}\n\ntype VirtualPCIPassthroughDeviceBackingOption struct {\n\tVirtualDeviceDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualPCIPassthroughDeviceBackingOption\"] = reflect.TypeOf((*VirtualPCIPassthroughDeviceBackingOption)(nil)).Elem()\n}\n\ntype VirtualPCIPassthroughOption struct {\n\tVirtualDeviceOption\n}\n\nfunc init() {\n\tt[\"VirtualPCIPassthroughOption\"] = reflect.TypeOf((*VirtualPCIPassthroughOption)(nil)).Elem()\n}\n\ntype VirtualPCIPassthroughPluginBackingInfo struct {\n\tVirtualDeviceBackingInfo\n}\n\nfunc init() {\n\tt[\"VirtualPCIPassthroughPluginBackingInfo\"] = reflect.TypeOf((*VirtualPCIPassthroughPluginBackingInfo)(nil)).Elem()\n}\n\ntype VirtualPCIPassthroughPluginBackingOption struct {\n\tVirtualDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualPCIPassthroughPluginBackingOption\"] = reflect.TypeOf((*VirtualPCIPassthroughPluginBackingOption)(nil)).Elem()\n}\n\ntype VirtualPCIPassthroughVmiopBackingInfo struct {\n\tVirtualPCIPassthroughPluginBackingInfo\n\n\tVgpu string `xml:\"vgpu,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualPCIPassthroughVmiopBackingInfo\"] = reflect.TypeOf((*VirtualPCIPassthroughVmiopBackingInfo)(nil)).Elem()\n}\n\ntype VirtualPCIPassthroughVmiopBackingOption struct {\n\tVirtualPCIPassthroughPluginBackingOption\n\n\tVgpu         StringOption `xml:\"vgpu\"`\n\tMaxInstances int32        `xml:\"maxInstances\"`\n}\n\nfunc init() {\n\tt[\"VirtualPCIPassthroughVmiopBackingOption\"] = reflect.TypeOf((*VirtualPCIPassthroughVmiopBackingOption)(nil)).Elem()\n}\n\ntype VirtualPCNet32 struct {\n\tVirtualEthernetCard\n}\n\nfunc init() {\n\tt[\"VirtualPCNet32\"] = reflect.TypeOf((*VirtualPCNet32)(nil)).Elem()\n}\n\ntype VirtualPCNet32Option struct {\n\tVirtualEthernetCardOption\n\n\tSupportsMorphing bool `xml:\"supportsMorphing\"`\n}\n\nfunc init() {\n\tt[\"VirtualPCNet32Option\"] = reflect.TypeOf((*VirtualPCNet32Option)(nil)).Elem()\n}\n\ntype VirtualPS2Controller struct {\n\tVirtualController\n}\n\nfunc init() {\n\tt[\"VirtualPS2Controller\"] = reflect.TypeOf((*VirtualPS2Controller)(nil)).Elem()\n}\n\ntype VirtualPS2ControllerOption struct {\n\tVirtualControllerOption\n\n\tNumKeyboards       IntOption `xml:\"numKeyboards\"`\n\tNumPointingDevices IntOption `xml:\"numPointingDevices\"`\n}\n\nfunc init() {\n\tt[\"VirtualPS2ControllerOption\"] = reflect.TypeOf((*VirtualPS2ControllerOption)(nil)).Elem()\n}\n\ntype VirtualParallelPort struct {\n\tVirtualDevice\n}\n\nfunc init() {\n\tt[\"VirtualParallelPort\"] = reflect.TypeOf((*VirtualParallelPort)(nil)).Elem()\n}\n\ntype VirtualParallelPortDeviceBackingInfo struct {\n\tVirtualDeviceDeviceBackingInfo\n}\n\nfunc init() {\n\tt[\"VirtualParallelPortDeviceBackingInfo\"] = reflect.TypeOf((*VirtualParallelPortDeviceBackingInfo)(nil)).Elem()\n}\n\ntype VirtualParallelPortDeviceBackingOption struct {\n\tVirtualDeviceDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualParallelPortDeviceBackingOption\"] = reflect.TypeOf((*VirtualParallelPortDeviceBackingOption)(nil)).Elem()\n}\n\ntype VirtualParallelPortFileBackingInfo struct {\n\tVirtualDeviceFileBackingInfo\n}\n\nfunc init() {\n\tt[\"VirtualParallelPortFileBackingInfo\"] = reflect.TypeOf((*VirtualParallelPortFileBackingInfo)(nil)).Elem()\n}\n\ntype VirtualParallelPortFileBackingOption struct {\n\tVirtualDeviceFileBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualParallelPortFileBackingOption\"] = reflect.TypeOf((*VirtualParallelPortFileBackingOption)(nil)).Elem()\n}\n\ntype VirtualParallelPortOption struct {\n\tVirtualDeviceOption\n}\n\nfunc init() {\n\tt[\"VirtualParallelPortOption\"] = reflect.TypeOf((*VirtualParallelPortOption)(nil)).Elem()\n}\n\ntype VirtualPointingDevice struct {\n\tVirtualDevice\n}\n\nfunc init() {\n\tt[\"VirtualPointingDevice\"] = reflect.TypeOf((*VirtualPointingDevice)(nil)).Elem()\n}\n\ntype VirtualPointingDeviceBackingOption struct {\n\tVirtualDeviceDeviceBackingOption\n\n\tHostPointingDevice ChoiceOption `xml:\"hostPointingDevice\"`\n}\n\nfunc init() {\n\tt[\"VirtualPointingDeviceBackingOption\"] = reflect.TypeOf((*VirtualPointingDeviceBackingOption)(nil)).Elem()\n}\n\ntype VirtualPointingDeviceDeviceBackingInfo struct {\n\tVirtualDeviceDeviceBackingInfo\n\n\tHostPointingDevice string `xml:\"hostPointingDevice\"`\n}\n\nfunc init() {\n\tt[\"VirtualPointingDeviceDeviceBackingInfo\"] = reflect.TypeOf((*VirtualPointingDeviceDeviceBackingInfo)(nil)).Elem()\n}\n\ntype VirtualPointingDeviceOption struct {\n\tVirtualDeviceOption\n}\n\nfunc init() {\n\tt[\"VirtualPointingDeviceOption\"] = reflect.TypeOf((*VirtualPointingDeviceOption)(nil)).Elem()\n}\n\ntype VirtualResourcePoolSpec struct {\n\tDynamicData\n\n\tVrpId            string                    `xml:\"vrpId,omitempty\"`\n\tVrpName          string                    `xml:\"vrpName,omitempty\"`\n\tDescription      string                    `xml:\"description,omitempty\"`\n\tCpuAllocation    VrpResourceAllocationInfo `xml:\"cpuAllocation\"`\n\tMemoryAllocation VrpResourceAllocationInfo `xml:\"memoryAllocation\"`\n\tRpList           []ManagedObjectReference  `xml:\"rpList,omitempty\"`\n\tHubList          []ManagedObjectReference  `xml:\"hubList,omitempty\"`\n\tRootVRP          *bool                     `xml:\"rootVRP\"`\n\tStaticVRP        *bool                     `xml:\"staticVRP\"`\n\tChangeVersion    int64                     `xml:\"changeVersion,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualResourcePoolSpec\"] = reflect.TypeOf((*VirtualResourcePoolSpec)(nil)).Elem()\n}\n\ntype VirtualResourcePoolUsage struct {\n\tDynamicData\n\n\tVrpId                 string `xml:\"vrpId\"`\n\tCpuReservationMhz     int64  `xml:\"cpuReservationMhz\"`\n\tMemReservationMB      int64  `xml:\"memReservationMB\"`\n\tCpuReservationUsedMhz int64  `xml:\"cpuReservationUsedMhz\"`\n\tMemReservationUsedMB  int64  `xml:\"memReservationUsedMB\"`\n}\n\nfunc init() {\n\tt[\"VirtualResourcePoolUsage\"] = reflect.TypeOf((*VirtualResourcePoolUsage)(nil)).Elem()\n}\n\ntype VirtualSATAController struct {\n\tVirtualController\n}\n\nfunc init() {\n\tt[\"VirtualSATAController\"] = reflect.TypeOf((*VirtualSATAController)(nil)).Elem()\n}\n\ntype VirtualSATAControllerOption struct {\n\tVirtualControllerOption\n\n\tNumSATADisks  IntOption `xml:\"numSATADisks\"`\n\tNumSATACdroms IntOption `xml:\"numSATACdroms\"`\n}\n\nfunc init() {\n\tt[\"VirtualSATAControllerOption\"] = reflect.TypeOf((*VirtualSATAControllerOption)(nil)).Elem()\n}\n\ntype VirtualSCSIController struct {\n\tVirtualController\n\n\tHotAddRemove       *bool              `xml:\"hotAddRemove\"`\n\tSharedBus          VirtualSCSISharing `xml:\"sharedBus\"`\n\tScsiCtlrUnitNumber int32              `xml:\"scsiCtlrUnitNumber,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualSCSIController\"] = reflect.TypeOf((*VirtualSCSIController)(nil)).Elem()\n}\n\ntype VirtualSCSIControllerOption struct {\n\tVirtualControllerOption\n\n\tNumSCSIDisks       IntOption            `xml:\"numSCSIDisks\"`\n\tNumSCSICdroms      IntOption            `xml:\"numSCSICdroms\"`\n\tNumSCSIPassthrough IntOption            `xml:\"numSCSIPassthrough\"`\n\tSharing            []VirtualSCSISharing `xml:\"sharing\"`\n\tDefaultSharedIndex int32                `xml:\"defaultSharedIndex\"`\n\tHotAddRemove       BoolOption           `xml:\"hotAddRemove\"`\n\tScsiCtlrUnitNumber int32                `xml:\"scsiCtlrUnitNumber\"`\n}\n\nfunc init() {\n\tt[\"VirtualSCSIControllerOption\"] = reflect.TypeOf((*VirtualSCSIControllerOption)(nil)).Elem()\n}\n\ntype VirtualSCSIPassthrough struct {\n\tVirtualDevice\n}\n\nfunc init() {\n\tt[\"VirtualSCSIPassthrough\"] = reflect.TypeOf((*VirtualSCSIPassthrough)(nil)).Elem()\n}\n\ntype VirtualSCSIPassthroughDeviceBackingInfo struct {\n\tVirtualDeviceDeviceBackingInfo\n}\n\nfunc init() {\n\tt[\"VirtualSCSIPassthroughDeviceBackingInfo\"] = reflect.TypeOf((*VirtualSCSIPassthroughDeviceBackingInfo)(nil)).Elem()\n}\n\ntype VirtualSCSIPassthroughDeviceBackingOption struct {\n\tVirtualDeviceDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualSCSIPassthroughDeviceBackingOption\"] = reflect.TypeOf((*VirtualSCSIPassthroughDeviceBackingOption)(nil)).Elem()\n}\n\ntype VirtualSCSIPassthroughOption struct {\n\tVirtualDeviceOption\n}\n\nfunc init() {\n\tt[\"VirtualSCSIPassthroughOption\"] = reflect.TypeOf((*VirtualSCSIPassthroughOption)(nil)).Elem()\n}\n\ntype VirtualSIOController struct {\n\tVirtualController\n}\n\nfunc init() {\n\tt[\"VirtualSIOController\"] = reflect.TypeOf((*VirtualSIOController)(nil)).Elem()\n}\n\ntype VirtualSIOControllerOption struct {\n\tVirtualControllerOption\n\n\tNumFloppyDrives  IntOption `xml:\"numFloppyDrives\"`\n\tNumSerialPorts   IntOption `xml:\"numSerialPorts\"`\n\tNumParallelPorts IntOption `xml:\"numParallelPorts\"`\n}\n\nfunc init() {\n\tt[\"VirtualSIOControllerOption\"] = reflect.TypeOf((*VirtualSIOControllerOption)(nil)).Elem()\n}\n\ntype VirtualSerialPort struct {\n\tVirtualDevice\n\n\tYieldOnPoll bool `xml:\"yieldOnPoll\"`\n}\n\nfunc init() {\n\tt[\"VirtualSerialPort\"] = reflect.TypeOf((*VirtualSerialPort)(nil)).Elem()\n}\n\ntype VirtualSerialPortDeviceBackingInfo struct {\n\tVirtualDeviceDeviceBackingInfo\n}\n\nfunc init() {\n\tt[\"VirtualSerialPortDeviceBackingInfo\"] = reflect.TypeOf((*VirtualSerialPortDeviceBackingInfo)(nil)).Elem()\n}\n\ntype VirtualSerialPortDeviceBackingOption struct {\n\tVirtualDeviceDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualSerialPortDeviceBackingOption\"] = reflect.TypeOf((*VirtualSerialPortDeviceBackingOption)(nil)).Elem()\n}\n\ntype VirtualSerialPortFileBackingInfo struct {\n\tVirtualDeviceFileBackingInfo\n}\n\nfunc init() {\n\tt[\"VirtualSerialPortFileBackingInfo\"] = reflect.TypeOf((*VirtualSerialPortFileBackingInfo)(nil)).Elem()\n}\n\ntype VirtualSerialPortFileBackingOption struct {\n\tVirtualDeviceFileBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualSerialPortFileBackingOption\"] = reflect.TypeOf((*VirtualSerialPortFileBackingOption)(nil)).Elem()\n}\n\ntype VirtualSerialPortOption struct {\n\tVirtualDeviceOption\n\n\tYieldOnPoll BoolOption `xml:\"yieldOnPoll\"`\n}\n\nfunc init() {\n\tt[\"VirtualSerialPortOption\"] = reflect.TypeOf((*VirtualSerialPortOption)(nil)).Elem()\n}\n\ntype VirtualSerialPortPipeBackingInfo struct {\n\tVirtualDevicePipeBackingInfo\n\n\tEndpoint string `xml:\"endpoint\"`\n\tNoRxLoss *bool  `xml:\"noRxLoss\"`\n}\n\nfunc init() {\n\tt[\"VirtualSerialPortPipeBackingInfo\"] = reflect.TypeOf((*VirtualSerialPortPipeBackingInfo)(nil)).Elem()\n}\n\ntype VirtualSerialPortPipeBackingOption struct {\n\tVirtualDevicePipeBackingOption\n\n\tEndpoint ChoiceOption `xml:\"endpoint\"`\n\tNoRxLoss BoolOption   `xml:\"noRxLoss\"`\n}\n\nfunc init() {\n\tt[\"VirtualSerialPortPipeBackingOption\"] = reflect.TypeOf((*VirtualSerialPortPipeBackingOption)(nil)).Elem()\n}\n\ntype VirtualSerialPortThinPrintBackingInfo struct {\n\tVirtualDeviceBackingInfo\n}\n\nfunc init() {\n\tt[\"VirtualSerialPortThinPrintBackingInfo\"] = reflect.TypeOf((*VirtualSerialPortThinPrintBackingInfo)(nil)).Elem()\n}\n\ntype VirtualSerialPortThinPrintBackingOption struct {\n\tVirtualDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualSerialPortThinPrintBackingOption\"] = reflect.TypeOf((*VirtualSerialPortThinPrintBackingOption)(nil)).Elem()\n}\n\ntype VirtualSerialPortURIBackingInfo struct {\n\tVirtualDeviceURIBackingInfo\n}\n\nfunc init() {\n\tt[\"VirtualSerialPortURIBackingInfo\"] = reflect.TypeOf((*VirtualSerialPortURIBackingInfo)(nil)).Elem()\n}\n\ntype VirtualSerialPortURIBackingOption struct {\n\tVirtualDeviceURIBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualSerialPortURIBackingOption\"] = reflect.TypeOf((*VirtualSerialPortURIBackingOption)(nil)).Elem()\n}\n\ntype VirtualSoundBlaster16 struct {\n\tVirtualSoundCard\n}\n\nfunc init() {\n\tt[\"VirtualSoundBlaster16\"] = reflect.TypeOf((*VirtualSoundBlaster16)(nil)).Elem()\n}\n\ntype VirtualSoundBlaster16Option struct {\n\tVirtualSoundCardOption\n}\n\nfunc init() {\n\tt[\"VirtualSoundBlaster16Option\"] = reflect.TypeOf((*VirtualSoundBlaster16Option)(nil)).Elem()\n}\n\ntype VirtualSoundCard struct {\n\tVirtualDevice\n}\n\nfunc init() {\n\tt[\"VirtualSoundCard\"] = reflect.TypeOf((*VirtualSoundCard)(nil)).Elem()\n}\n\ntype VirtualSoundCardDeviceBackingInfo struct {\n\tVirtualDeviceDeviceBackingInfo\n}\n\nfunc init() {\n\tt[\"VirtualSoundCardDeviceBackingInfo\"] = reflect.TypeOf((*VirtualSoundCardDeviceBackingInfo)(nil)).Elem()\n}\n\ntype VirtualSoundCardDeviceBackingOption struct {\n\tVirtualDeviceDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualSoundCardDeviceBackingOption\"] = reflect.TypeOf((*VirtualSoundCardDeviceBackingOption)(nil)).Elem()\n}\n\ntype VirtualSoundCardOption struct {\n\tVirtualDeviceOption\n}\n\nfunc init() {\n\tt[\"VirtualSoundCardOption\"] = reflect.TypeOf((*VirtualSoundCardOption)(nil)).Elem()\n}\n\ntype VirtualSriovEthernetCard struct {\n\tVirtualEthernetCard\n\n\tAllowGuestOSMtuChange *bool                                     `xml:\"allowGuestOSMtuChange\"`\n\tSriovBacking          *VirtualSriovEthernetCardSriovBackingInfo `xml:\"sriovBacking,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualSriovEthernetCard\"] = reflect.TypeOf((*VirtualSriovEthernetCard)(nil)).Elem()\n}\n\ntype VirtualSriovEthernetCardOption struct {\n\tVirtualEthernetCardOption\n}\n\nfunc init() {\n\tt[\"VirtualSriovEthernetCardOption\"] = reflect.TypeOf((*VirtualSriovEthernetCardOption)(nil)).Elem()\n}\n\ntype VirtualSriovEthernetCardSriovBackingInfo struct {\n\tVirtualDeviceBackingInfo\n\n\tPhysicalFunctionBacking *VirtualPCIPassthroughDeviceBackingInfo `xml:\"physicalFunctionBacking,omitempty\"`\n\tVirtualFunctionBacking  *VirtualPCIPassthroughDeviceBackingInfo `xml:\"virtualFunctionBacking,omitempty\"`\n\tVirtualFunctionIndex    int32                                   `xml:\"virtualFunctionIndex,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualSriovEthernetCardSriovBackingInfo\"] = reflect.TypeOf((*VirtualSriovEthernetCardSriovBackingInfo)(nil)).Elem()\n}\n\ntype VirtualSriovEthernetCardSriovBackingOption struct {\n\tVirtualDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualSriovEthernetCardSriovBackingOption\"] = reflect.TypeOf((*VirtualSriovEthernetCardSriovBackingOption)(nil)).Elem()\n}\n\ntype VirtualSwitchProfile struct {\n\tApplyProfile\n\n\tKey           string               `xml:\"key\"`\n\tName          string               `xml:\"name\"`\n\tLink          LinkProfile          `xml:\"link\"`\n\tNumPorts      NumPortsProfile      `xml:\"numPorts\"`\n\tNetworkPolicy NetworkPolicyProfile `xml:\"networkPolicy\"`\n}\n\nfunc init() {\n\tt[\"VirtualSwitchProfile\"] = reflect.TypeOf((*VirtualSwitchProfile)(nil)).Elem()\n}\n\ntype VirtualSwitchSelectionProfile struct {\n\tApplyProfile\n}\n\nfunc init() {\n\tt[\"VirtualSwitchSelectionProfile\"] = reflect.TypeOf((*VirtualSwitchSelectionProfile)(nil)).Elem()\n}\n\ntype VirtualUSB struct {\n\tVirtualDevice\n\n\tConnected bool     `xml:\"connected\"`\n\tVendor    int32    `xml:\"vendor,omitempty\"`\n\tProduct   int32    `xml:\"product,omitempty\"`\n\tFamily    []string `xml:\"family,omitempty\"`\n\tSpeed     []string `xml:\"speed,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualUSB\"] = reflect.TypeOf((*VirtualUSB)(nil)).Elem()\n}\n\ntype VirtualUSBController struct {\n\tVirtualController\n\n\tAutoConnectDevices *bool `xml:\"autoConnectDevices\"`\n\tEhciEnabled        *bool `xml:\"ehciEnabled\"`\n}\n\nfunc init() {\n\tt[\"VirtualUSBController\"] = reflect.TypeOf((*VirtualUSBController)(nil)).Elem()\n}\n\ntype VirtualUSBControllerOption struct {\n\tVirtualControllerOption\n\n\tAutoConnectDevices BoolOption `xml:\"autoConnectDevices\"`\n\tEhciSupported      BoolOption `xml:\"ehciSupported\"`\n\tSupportedSpeeds    []string   `xml:\"supportedSpeeds,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualUSBControllerOption\"] = reflect.TypeOf((*VirtualUSBControllerOption)(nil)).Elem()\n}\n\ntype VirtualUSBControllerPciBusSlotInfo struct {\n\tVirtualDevicePciBusSlotInfo\n\n\tEhciPciSlotNumber int32 `xml:\"ehciPciSlotNumber,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualUSBControllerPciBusSlotInfo\"] = reflect.TypeOf((*VirtualUSBControllerPciBusSlotInfo)(nil)).Elem()\n}\n\ntype VirtualUSBOption struct {\n\tVirtualDeviceOption\n}\n\nfunc init() {\n\tt[\"VirtualUSBOption\"] = reflect.TypeOf((*VirtualUSBOption)(nil)).Elem()\n}\n\ntype VirtualUSBRemoteClientBackingInfo struct {\n\tVirtualDeviceRemoteDeviceBackingInfo\n\n\tHostname string `xml:\"hostname\"`\n}\n\nfunc init() {\n\tt[\"VirtualUSBRemoteClientBackingInfo\"] = reflect.TypeOf((*VirtualUSBRemoteClientBackingInfo)(nil)).Elem()\n}\n\ntype VirtualUSBRemoteClientBackingOption struct {\n\tVirtualDeviceRemoteDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualUSBRemoteClientBackingOption\"] = reflect.TypeOf((*VirtualUSBRemoteClientBackingOption)(nil)).Elem()\n}\n\ntype VirtualUSBRemoteHostBackingInfo struct {\n\tVirtualDeviceDeviceBackingInfo\n\n\tHostname string `xml:\"hostname\"`\n}\n\nfunc init() {\n\tt[\"VirtualUSBRemoteHostBackingInfo\"] = reflect.TypeOf((*VirtualUSBRemoteHostBackingInfo)(nil)).Elem()\n}\n\ntype VirtualUSBRemoteHostBackingOption struct {\n\tVirtualDeviceDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualUSBRemoteHostBackingOption\"] = reflect.TypeOf((*VirtualUSBRemoteHostBackingOption)(nil)).Elem()\n}\n\ntype VirtualUSBUSBBackingInfo struct {\n\tVirtualDeviceDeviceBackingInfo\n}\n\nfunc init() {\n\tt[\"VirtualUSBUSBBackingInfo\"] = reflect.TypeOf((*VirtualUSBUSBBackingInfo)(nil)).Elem()\n}\n\ntype VirtualUSBUSBBackingOption struct {\n\tVirtualDeviceDeviceBackingOption\n}\n\nfunc init() {\n\tt[\"VirtualUSBUSBBackingOption\"] = reflect.TypeOf((*VirtualUSBUSBBackingOption)(nil)).Elem()\n}\n\ntype VirtualUSBXHCIController struct {\n\tVirtualController\n\n\tAutoConnectDevices *bool `xml:\"autoConnectDevices\"`\n}\n\nfunc init() {\n\tt[\"VirtualUSBXHCIController\"] = reflect.TypeOf((*VirtualUSBXHCIController)(nil)).Elem()\n}\n\ntype VirtualUSBXHCIControllerOption struct {\n\tVirtualControllerOption\n\n\tAutoConnectDevices BoolOption `xml:\"autoConnectDevices\"`\n\tSupportedSpeeds    []string   `xml:\"supportedSpeeds\"`\n}\n\nfunc init() {\n\tt[\"VirtualUSBXHCIControllerOption\"] = reflect.TypeOf((*VirtualUSBXHCIControllerOption)(nil)).Elem()\n}\n\ntype VirtualVMIROMOption struct {\n\tVirtualDeviceOption\n}\n\nfunc init() {\n\tt[\"VirtualVMIROMOption\"] = reflect.TypeOf((*VirtualVMIROMOption)(nil)).Elem()\n}\n\ntype VirtualVideoCardOption struct {\n\tVirtualDeviceOption\n\n\tVideoRamSizeInKB            *LongOption `xml:\"videoRamSizeInKB,omitempty\"`\n\tNumDisplays                 *IntOption  `xml:\"numDisplays,omitempty\"`\n\tUseAutoDetect               *BoolOption `xml:\"useAutoDetect,omitempty\"`\n\tSupport3D                   *BoolOption `xml:\"support3D,omitempty\"`\n\tUse3dRendererSupported      *BoolOption `xml:\"use3dRendererSupported,omitempty\"`\n\tGraphicsMemorySizeInKB      *LongOption `xml:\"graphicsMemorySizeInKB,omitempty\"`\n\tGraphicsMemorySizeSupported *BoolOption `xml:\"graphicsMemorySizeSupported,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VirtualVideoCardOption\"] = reflect.TypeOf((*VirtualVideoCardOption)(nil)).Elem()\n}\n\ntype VirtualVmxnet struct {\n\tVirtualEthernetCard\n}\n\nfunc init() {\n\tt[\"VirtualVmxnet\"] = reflect.TypeOf((*VirtualVmxnet)(nil)).Elem()\n}\n\ntype VirtualVmxnet2 struct {\n\tVirtualVmxnet\n}\n\nfunc init() {\n\tt[\"VirtualVmxnet2\"] = reflect.TypeOf((*VirtualVmxnet2)(nil)).Elem()\n}\n\ntype VirtualVmxnet2Option struct {\n\tVirtualVmxnetOption\n}\n\nfunc init() {\n\tt[\"VirtualVmxnet2Option\"] = reflect.TypeOf((*VirtualVmxnet2Option)(nil)).Elem()\n}\n\ntype VirtualVmxnet3 struct {\n\tVirtualVmxnet\n}\n\nfunc init() {\n\tt[\"VirtualVmxnet3\"] = reflect.TypeOf((*VirtualVmxnet3)(nil)).Elem()\n}\n\ntype VirtualVmxnet3Option struct {\n\tVirtualVmxnetOption\n}\n\nfunc init() {\n\tt[\"VirtualVmxnet3Option\"] = reflect.TypeOf((*VirtualVmxnet3Option)(nil)).Elem()\n}\n\ntype VirtualVmxnet3Vrdma struct {\n\tVirtualVmxnet3\n}\n\nfunc init() {\n\tt[\"VirtualVmxnet3Vrdma\"] = reflect.TypeOf((*VirtualVmxnet3Vrdma)(nil)).Elem()\n}\n\ntype VirtualVmxnet3VrdmaOption struct {\n\tVirtualVmxnet3Option\n}\n\nfunc init() {\n\tt[\"VirtualVmxnet3VrdmaOption\"] = reflect.TypeOf((*VirtualVmxnet3VrdmaOption)(nil)).Elem()\n}\n\ntype VirtualVmxnetOption struct {\n\tVirtualEthernetCardOption\n}\n\nfunc init() {\n\tt[\"VirtualVmxnetOption\"] = reflect.TypeOf((*VirtualVmxnetOption)(nil)).Elem()\n}\n\ntype VlanProfile struct {\n\tApplyProfile\n}\n\nfunc init() {\n\tt[\"VlanProfile\"] = reflect.TypeOf((*VlanProfile)(nil)).Elem()\n}\n\ntype VmAcquiredMksTicketEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmAcquiredMksTicketEvent\"] = reflect.TypeOf((*VmAcquiredMksTicketEvent)(nil)).Elem()\n}\n\ntype VmAcquiredTicketEvent struct {\n\tVmEvent\n\n\tTicketType string `xml:\"ticketType\"`\n}\n\nfunc init() {\n\tt[\"VmAcquiredTicketEvent\"] = reflect.TypeOf((*VmAcquiredTicketEvent)(nil)).Elem()\n}\n\ntype VmAlreadyExistsInDatacenter struct {\n\tInvalidFolder\n\n\tHost     ManagedObjectReference   `xml:\"host\"`\n\tHostname string                   `xml:\"hostname\"`\n\tVm       []ManagedObjectReference `xml:\"vm\"`\n}\n\nfunc init() {\n\tt[\"VmAlreadyExistsInDatacenter\"] = reflect.TypeOf((*VmAlreadyExistsInDatacenter)(nil)).Elem()\n}\n\ntype VmAlreadyExistsInDatacenterFault VmAlreadyExistsInDatacenter\n\nfunc init() {\n\tt[\"VmAlreadyExistsInDatacenterFault\"] = reflect.TypeOf((*VmAlreadyExistsInDatacenterFault)(nil)).Elem()\n}\n\ntype VmAutoRenameEvent struct {\n\tVmEvent\n\n\tOldName string `xml:\"oldName\"`\n\tNewName string `xml:\"newName\"`\n}\n\nfunc init() {\n\tt[\"VmAutoRenameEvent\"] = reflect.TypeOf((*VmAutoRenameEvent)(nil)).Elem()\n}\n\ntype VmBeingClonedEvent struct {\n\tVmCloneEvent\n\n\tDestFolder FolderEventArgument `xml:\"destFolder\"`\n\tDestName   string              `xml:\"destName\"`\n\tDestHost   HostEventArgument   `xml:\"destHost\"`\n}\n\nfunc init() {\n\tt[\"VmBeingClonedEvent\"] = reflect.TypeOf((*VmBeingClonedEvent)(nil)).Elem()\n}\n\ntype VmBeingClonedNoFolderEvent struct {\n\tVmCloneEvent\n\n\tDestName string            `xml:\"destName\"`\n\tDestHost HostEventArgument `xml:\"destHost\"`\n}\n\nfunc init() {\n\tt[\"VmBeingClonedNoFolderEvent\"] = reflect.TypeOf((*VmBeingClonedNoFolderEvent)(nil)).Elem()\n}\n\ntype VmBeingCreatedEvent struct {\n\tVmEvent\n\n\tConfigSpec *VirtualMachineConfigSpec `xml:\"configSpec,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmBeingCreatedEvent\"] = reflect.TypeOf((*VmBeingCreatedEvent)(nil)).Elem()\n}\n\ntype VmBeingDeployedEvent struct {\n\tVmEvent\n\n\tSrcTemplate VmEventArgument `xml:\"srcTemplate\"`\n}\n\nfunc init() {\n\tt[\"VmBeingDeployedEvent\"] = reflect.TypeOf((*VmBeingDeployedEvent)(nil)).Elem()\n}\n\ntype VmBeingHotMigratedEvent struct {\n\tVmEvent\n\n\tDestHost       HostEventArgument        `xml:\"destHost\"`\n\tDestDatacenter *DatacenterEventArgument `xml:\"destDatacenter,omitempty\"`\n\tDestDatastore  *DatastoreEventArgument  `xml:\"destDatastore,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmBeingHotMigratedEvent\"] = reflect.TypeOf((*VmBeingHotMigratedEvent)(nil)).Elem()\n}\n\ntype VmBeingMigratedEvent struct {\n\tVmEvent\n\n\tDestHost       HostEventArgument        `xml:\"destHost\"`\n\tDestDatacenter *DatacenterEventArgument `xml:\"destDatacenter,omitempty\"`\n\tDestDatastore  *DatastoreEventArgument  `xml:\"destDatastore,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmBeingMigratedEvent\"] = reflect.TypeOf((*VmBeingMigratedEvent)(nil)).Elem()\n}\n\ntype VmBeingRelocatedEvent struct {\n\tVmRelocateSpecEvent\n\n\tDestHost       HostEventArgument        `xml:\"destHost\"`\n\tDestDatacenter *DatacenterEventArgument `xml:\"destDatacenter,omitempty\"`\n\tDestDatastore  *DatastoreEventArgument  `xml:\"destDatastore,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmBeingRelocatedEvent\"] = reflect.TypeOf((*VmBeingRelocatedEvent)(nil)).Elem()\n}\n\ntype VmCloneEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmCloneEvent\"] = reflect.TypeOf((*VmCloneEvent)(nil)).Elem()\n}\n\ntype VmCloneFailedEvent struct {\n\tVmCloneEvent\n\n\tDestFolder FolderEventArgument  `xml:\"destFolder\"`\n\tDestName   string               `xml:\"destName\"`\n\tDestHost   HostEventArgument    `xml:\"destHost\"`\n\tReason     LocalizedMethodFault `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"VmCloneFailedEvent\"] = reflect.TypeOf((*VmCloneFailedEvent)(nil)).Elem()\n}\n\ntype VmClonedEvent struct {\n\tVmCloneEvent\n\n\tSourceVm VmEventArgument `xml:\"sourceVm\"`\n}\n\nfunc init() {\n\tt[\"VmClonedEvent\"] = reflect.TypeOf((*VmClonedEvent)(nil)).Elem()\n}\n\ntype VmConfigFault struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"VmConfigFault\"] = reflect.TypeOf((*VmConfigFault)(nil)).Elem()\n}\n\ntype VmConfigFaultFault BaseVmConfigFault\n\nfunc init() {\n\tt[\"VmConfigFaultFault\"] = reflect.TypeOf((*VmConfigFaultFault)(nil)).Elem()\n}\n\ntype VmConfigFileEncryptionInfo struct {\n\tDynamicData\n\n\tKeyId *CryptoKeyId `xml:\"keyId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmConfigFileEncryptionInfo\"] = reflect.TypeOf((*VmConfigFileEncryptionInfo)(nil)).Elem()\n}\n\ntype VmConfigFileInfo struct {\n\tFileInfo\n\n\tConfigVersion int32                       `xml:\"configVersion,omitempty\"`\n\tEncryption    *VmConfigFileEncryptionInfo `xml:\"encryption,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmConfigFileInfo\"] = reflect.TypeOf((*VmConfigFileInfo)(nil)).Elem()\n}\n\ntype VmConfigFileQuery struct {\n\tFileQuery\n\n\tFilter  *VmConfigFileQueryFilter `xml:\"filter,omitempty\"`\n\tDetails *VmConfigFileQueryFlags  `xml:\"details,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmConfigFileQuery\"] = reflect.TypeOf((*VmConfigFileQuery)(nil)).Elem()\n}\n\ntype VmConfigFileQueryFilter struct {\n\tDynamicData\n\n\tMatchConfigVersion []int32 `xml:\"matchConfigVersion,omitempty\"`\n\tEncrypted          *bool   `xml:\"encrypted\"`\n}\n\nfunc init() {\n\tt[\"VmConfigFileQueryFilter\"] = reflect.TypeOf((*VmConfigFileQueryFilter)(nil)).Elem()\n}\n\ntype VmConfigFileQueryFlags struct {\n\tDynamicData\n\n\tConfigVersion bool  `xml:\"configVersion\"`\n\tEncryption    *bool `xml:\"encryption\"`\n}\n\nfunc init() {\n\tt[\"VmConfigFileQueryFlags\"] = reflect.TypeOf((*VmConfigFileQueryFlags)(nil)).Elem()\n}\n\ntype VmConfigIncompatibleForFaultTolerance struct {\n\tVmConfigFault\n\n\tFault *LocalizedMethodFault `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmConfigIncompatibleForFaultTolerance\"] = reflect.TypeOf((*VmConfigIncompatibleForFaultTolerance)(nil)).Elem()\n}\n\ntype VmConfigIncompatibleForFaultToleranceFault VmConfigIncompatibleForFaultTolerance\n\nfunc init() {\n\tt[\"VmConfigIncompatibleForFaultToleranceFault\"] = reflect.TypeOf((*VmConfigIncompatibleForFaultToleranceFault)(nil)).Elem()\n}\n\ntype VmConfigIncompatibleForRecordReplay struct {\n\tVmConfigFault\n\n\tFault *LocalizedMethodFault `xml:\"fault,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmConfigIncompatibleForRecordReplay\"] = reflect.TypeOf((*VmConfigIncompatibleForRecordReplay)(nil)).Elem()\n}\n\ntype VmConfigIncompatibleForRecordReplayFault VmConfigIncompatibleForRecordReplay\n\nfunc init() {\n\tt[\"VmConfigIncompatibleForRecordReplayFault\"] = reflect.TypeOf((*VmConfigIncompatibleForRecordReplayFault)(nil)).Elem()\n}\n\ntype VmConfigInfo struct {\n\tDynamicData\n\n\tProduct                 []VAppProductInfo    `xml:\"product,omitempty\"`\n\tProperty                []VAppPropertyInfo   `xml:\"property,omitempty\"`\n\tIpAssignment            VAppIPAssignmentInfo `xml:\"ipAssignment\"`\n\tEula                    []string             `xml:\"eula,omitempty\"`\n\tOvfSection              []VAppOvfSectionInfo `xml:\"ovfSection,omitempty\"`\n\tOvfEnvironmentTransport []string             `xml:\"ovfEnvironmentTransport,omitempty\"`\n\tInstallBootRequired     bool                 `xml:\"installBootRequired\"`\n\tInstallBootStopDelay    int32                `xml:\"installBootStopDelay\"`\n}\n\nfunc init() {\n\tt[\"VmConfigInfo\"] = reflect.TypeOf((*VmConfigInfo)(nil)).Elem()\n}\n\ntype VmConfigMissingEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmConfigMissingEvent\"] = reflect.TypeOf((*VmConfigMissingEvent)(nil)).Elem()\n}\n\ntype VmConfigSpec struct {\n\tDynamicData\n\n\tProduct                 []VAppProductSpec     `xml:\"product,omitempty\"`\n\tProperty                []VAppPropertySpec    `xml:\"property,omitempty\"`\n\tIpAssignment            *VAppIPAssignmentInfo `xml:\"ipAssignment,omitempty\"`\n\tEula                    []string              `xml:\"eula,omitempty\"`\n\tOvfSection              []VAppOvfSectionSpec  `xml:\"ovfSection,omitempty\"`\n\tOvfEnvironmentTransport []string              `xml:\"ovfEnvironmentTransport,omitempty\"`\n\tInstallBootRequired     *bool                 `xml:\"installBootRequired\"`\n\tInstallBootStopDelay    int32                 `xml:\"installBootStopDelay,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmConfigSpec\"] = reflect.TypeOf((*VmConfigSpec)(nil)).Elem()\n}\n\ntype VmConnectedEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmConnectedEvent\"] = reflect.TypeOf((*VmConnectedEvent)(nil)).Elem()\n}\n\ntype VmCreatedEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmCreatedEvent\"] = reflect.TypeOf((*VmCreatedEvent)(nil)).Elem()\n}\n\ntype VmDasBeingResetEvent struct {\n\tVmEvent\n\n\tReason string `xml:\"reason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmDasBeingResetEvent\"] = reflect.TypeOf((*VmDasBeingResetEvent)(nil)).Elem()\n}\n\ntype VmDasBeingResetWithScreenshotEvent struct {\n\tVmDasBeingResetEvent\n\n\tScreenshotFilePath string `xml:\"screenshotFilePath\"`\n}\n\nfunc init() {\n\tt[\"VmDasBeingResetWithScreenshotEvent\"] = reflect.TypeOf((*VmDasBeingResetWithScreenshotEvent)(nil)).Elem()\n}\n\ntype VmDasResetFailedEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmDasResetFailedEvent\"] = reflect.TypeOf((*VmDasResetFailedEvent)(nil)).Elem()\n}\n\ntype VmDasUpdateErrorEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmDasUpdateErrorEvent\"] = reflect.TypeOf((*VmDasUpdateErrorEvent)(nil)).Elem()\n}\n\ntype VmDasUpdateOkEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmDasUpdateOkEvent\"] = reflect.TypeOf((*VmDasUpdateOkEvent)(nil)).Elem()\n}\n\ntype VmDateRolledBackEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmDateRolledBackEvent\"] = reflect.TypeOf((*VmDateRolledBackEvent)(nil)).Elem()\n}\n\ntype VmDeployFailedEvent struct {\n\tVmEvent\n\n\tDestDatastore BaseEntityEventArgument `xml:\"destDatastore,typeattr\"`\n\tReason        LocalizedMethodFault    `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"VmDeployFailedEvent\"] = reflect.TypeOf((*VmDeployFailedEvent)(nil)).Elem()\n}\n\ntype VmDeployedEvent struct {\n\tVmEvent\n\n\tSrcTemplate VmEventArgument `xml:\"srcTemplate\"`\n}\n\nfunc init() {\n\tt[\"VmDeployedEvent\"] = reflect.TypeOf((*VmDeployedEvent)(nil)).Elem()\n}\n\ntype VmDisconnectedEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmDisconnectedEvent\"] = reflect.TypeOf((*VmDisconnectedEvent)(nil)).Elem()\n}\n\ntype VmDiscoveredEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmDiscoveredEvent\"] = reflect.TypeOf((*VmDiscoveredEvent)(nil)).Elem()\n}\n\ntype VmDiskFailedEvent struct {\n\tVmEvent\n\n\tDisk   string               `xml:\"disk\"`\n\tReason LocalizedMethodFault `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"VmDiskFailedEvent\"] = reflect.TypeOf((*VmDiskFailedEvent)(nil)).Elem()\n}\n\ntype VmDiskFileEncryptionInfo struct {\n\tDynamicData\n\n\tKeyId *CryptoKeyId `xml:\"keyId,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmDiskFileEncryptionInfo\"] = reflect.TypeOf((*VmDiskFileEncryptionInfo)(nil)).Elem()\n}\n\ntype VmDiskFileInfo struct {\n\tFileInfo\n\n\tDiskType        string                    `xml:\"diskType,omitempty\"`\n\tCapacityKb      int64                     `xml:\"capacityKb,omitempty\"`\n\tHardwareVersion int32                     `xml:\"hardwareVersion,omitempty\"`\n\tControllerType  string                    `xml:\"controllerType,omitempty\"`\n\tDiskExtents     []string                  `xml:\"diskExtents,omitempty\"`\n\tThin            *bool                     `xml:\"thin\"`\n\tEncryption      *VmDiskFileEncryptionInfo `xml:\"encryption,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmDiskFileInfo\"] = reflect.TypeOf((*VmDiskFileInfo)(nil)).Elem()\n}\n\ntype VmDiskFileQuery struct {\n\tFileQuery\n\n\tFilter  *VmDiskFileQueryFilter `xml:\"filter,omitempty\"`\n\tDetails *VmDiskFileQueryFlags  `xml:\"details,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmDiskFileQuery\"] = reflect.TypeOf((*VmDiskFileQuery)(nil)).Elem()\n}\n\ntype VmDiskFileQueryFilter struct {\n\tDynamicData\n\n\tDiskType             []string `xml:\"diskType,omitempty\"`\n\tMatchHardwareVersion []int32  `xml:\"matchHardwareVersion,omitempty\"`\n\tControllerType       []string `xml:\"controllerType,omitempty\"`\n\tThin                 *bool    `xml:\"thin\"`\n\tEncrypted            *bool    `xml:\"encrypted\"`\n}\n\nfunc init() {\n\tt[\"VmDiskFileQueryFilter\"] = reflect.TypeOf((*VmDiskFileQueryFilter)(nil)).Elem()\n}\n\ntype VmDiskFileQueryFlags struct {\n\tDynamicData\n\n\tDiskType        bool  `xml:\"diskType\"`\n\tCapacityKb      bool  `xml:\"capacityKb\"`\n\tHardwareVersion bool  `xml:\"hardwareVersion\"`\n\tControllerType  *bool `xml:\"controllerType\"`\n\tDiskExtents     *bool `xml:\"diskExtents\"`\n\tThin            *bool `xml:\"thin\"`\n\tEncryption      *bool `xml:\"encryption\"`\n}\n\nfunc init() {\n\tt[\"VmDiskFileQueryFlags\"] = reflect.TypeOf((*VmDiskFileQueryFlags)(nil)).Elem()\n}\n\ntype VmEmigratingEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmEmigratingEvent\"] = reflect.TypeOf((*VmEmigratingEvent)(nil)).Elem()\n}\n\ntype VmEndRecordingEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmEndRecordingEvent\"] = reflect.TypeOf((*VmEndRecordingEvent)(nil)).Elem()\n}\n\ntype VmEndReplayingEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmEndReplayingEvent\"] = reflect.TypeOf((*VmEndReplayingEvent)(nil)).Elem()\n}\n\ntype VmEvent struct {\n\tEvent\n\n\tTemplate bool `xml:\"template\"`\n}\n\nfunc init() {\n\tt[\"VmEvent\"] = reflect.TypeOf((*VmEvent)(nil)).Elem()\n}\n\ntype VmEventArgument struct {\n\tEntityEventArgument\n\n\tVm ManagedObjectReference `xml:\"vm\"`\n}\n\nfunc init() {\n\tt[\"VmEventArgument\"] = reflect.TypeOf((*VmEventArgument)(nil)).Elem()\n}\n\ntype VmFailedMigrateEvent struct {\n\tVmEvent\n\n\tDestHost       HostEventArgument        `xml:\"destHost\"`\n\tReason         LocalizedMethodFault     `xml:\"reason\"`\n\tDestDatacenter *DatacenterEventArgument `xml:\"destDatacenter,omitempty\"`\n\tDestDatastore  *DatastoreEventArgument  `xml:\"destDatastore,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmFailedMigrateEvent\"] = reflect.TypeOf((*VmFailedMigrateEvent)(nil)).Elem()\n}\n\ntype VmFailedRelayoutEvent struct {\n\tVmEvent\n\n\tReason LocalizedMethodFault `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"VmFailedRelayoutEvent\"] = reflect.TypeOf((*VmFailedRelayoutEvent)(nil)).Elem()\n}\n\ntype VmFailedRelayoutOnVmfs2DatastoreEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmFailedRelayoutOnVmfs2DatastoreEvent\"] = reflect.TypeOf((*VmFailedRelayoutOnVmfs2DatastoreEvent)(nil)).Elem()\n}\n\ntype VmFailedStartingSecondaryEvent struct {\n\tVmEvent\n\n\tReason string `xml:\"reason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmFailedStartingSecondaryEvent\"] = reflect.TypeOf((*VmFailedStartingSecondaryEvent)(nil)).Elem()\n}\n\ntype VmFailedToPowerOffEvent struct {\n\tVmEvent\n\n\tReason LocalizedMethodFault `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"VmFailedToPowerOffEvent\"] = reflect.TypeOf((*VmFailedToPowerOffEvent)(nil)).Elem()\n}\n\ntype VmFailedToPowerOnEvent struct {\n\tVmEvent\n\n\tReason LocalizedMethodFault `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"VmFailedToPowerOnEvent\"] = reflect.TypeOf((*VmFailedToPowerOnEvent)(nil)).Elem()\n}\n\ntype VmFailedToRebootGuestEvent struct {\n\tVmEvent\n\n\tReason LocalizedMethodFault `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"VmFailedToRebootGuestEvent\"] = reflect.TypeOf((*VmFailedToRebootGuestEvent)(nil)).Elem()\n}\n\ntype VmFailedToResetEvent struct {\n\tVmEvent\n\n\tReason LocalizedMethodFault `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"VmFailedToResetEvent\"] = reflect.TypeOf((*VmFailedToResetEvent)(nil)).Elem()\n}\n\ntype VmFailedToShutdownGuestEvent struct {\n\tVmEvent\n\n\tReason LocalizedMethodFault `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"VmFailedToShutdownGuestEvent\"] = reflect.TypeOf((*VmFailedToShutdownGuestEvent)(nil)).Elem()\n}\n\ntype VmFailedToStandbyGuestEvent struct {\n\tVmEvent\n\n\tReason LocalizedMethodFault `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"VmFailedToStandbyGuestEvent\"] = reflect.TypeOf((*VmFailedToStandbyGuestEvent)(nil)).Elem()\n}\n\ntype VmFailedToSuspendEvent struct {\n\tVmEvent\n\n\tReason LocalizedMethodFault `xml:\"reason\"`\n}\n\nfunc init() {\n\tt[\"VmFailedToSuspendEvent\"] = reflect.TypeOf((*VmFailedToSuspendEvent)(nil)).Elem()\n}\n\ntype VmFailedUpdatingSecondaryConfig struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmFailedUpdatingSecondaryConfig\"] = reflect.TypeOf((*VmFailedUpdatingSecondaryConfig)(nil)).Elem()\n}\n\ntype VmFailoverFailed struct {\n\tVmEvent\n\n\tReason *LocalizedMethodFault `xml:\"reason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmFailoverFailed\"] = reflect.TypeOf((*VmFailoverFailed)(nil)).Elem()\n}\n\ntype VmFaultToleranceConfigIssue struct {\n\tVmFaultToleranceIssue\n\n\tReason     string                  `xml:\"reason,omitempty\"`\n\tEntityName string                  `xml:\"entityName,omitempty\"`\n\tEntity     *ManagedObjectReference `xml:\"entity,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmFaultToleranceConfigIssue\"] = reflect.TypeOf((*VmFaultToleranceConfigIssue)(nil)).Elem()\n}\n\ntype VmFaultToleranceConfigIssueFault VmFaultToleranceConfigIssue\n\nfunc init() {\n\tt[\"VmFaultToleranceConfigIssueFault\"] = reflect.TypeOf((*VmFaultToleranceConfigIssueFault)(nil)).Elem()\n}\n\ntype VmFaultToleranceConfigIssueWrapper struct {\n\tVmFaultToleranceIssue\n\n\tEntityName string                  `xml:\"entityName,omitempty\"`\n\tEntity     *ManagedObjectReference `xml:\"entity,omitempty\"`\n\tError      *LocalizedMethodFault   `xml:\"error,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmFaultToleranceConfigIssueWrapper\"] = reflect.TypeOf((*VmFaultToleranceConfigIssueWrapper)(nil)).Elem()\n}\n\ntype VmFaultToleranceConfigIssueWrapperFault VmFaultToleranceConfigIssueWrapper\n\nfunc init() {\n\tt[\"VmFaultToleranceConfigIssueWrapperFault\"] = reflect.TypeOf((*VmFaultToleranceConfigIssueWrapperFault)(nil)).Elem()\n}\n\ntype VmFaultToleranceInvalidFileBacking struct {\n\tVmFaultToleranceIssue\n\n\tBackingType     string `xml:\"backingType,omitempty\"`\n\tBackingFilename string `xml:\"backingFilename,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmFaultToleranceInvalidFileBacking\"] = reflect.TypeOf((*VmFaultToleranceInvalidFileBacking)(nil)).Elem()\n}\n\ntype VmFaultToleranceInvalidFileBackingFault VmFaultToleranceInvalidFileBacking\n\nfunc init() {\n\tt[\"VmFaultToleranceInvalidFileBackingFault\"] = reflect.TypeOf((*VmFaultToleranceInvalidFileBackingFault)(nil)).Elem()\n}\n\ntype VmFaultToleranceIssue struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"VmFaultToleranceIssue\"] = reflect.TypeOf((*VmFaultToleranceIssue)(nil)).Elem()\n}\n\ntype VmFaultToleranceIssueFault BaseVmFaultToleranceIssue\n\nfunc init() {\n\tt[\"VmFaultToleranceIssueFault\"] = reflect.TypeOf((*VmFaultToleranceIssueFault)(nil)).Elem()\n}\n\ntype VmFaultToleranceOpIssuesList struct {\n\tVmFaultToleranceIssue\n\n\tErrors   []LocalizedMethodFault `xml:\"errors,omitempty\"`\n\tWarnings []LocalizedMethodFault `xml:\"warnings,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmFaultToleranceOpIssuesList\"] = reflect.TypeOf((*VmFaultToleranceOpIssuesList)(nil)).Elem()\n}\n\ntype VmFaultToleranceOpIssuesListFault VmFaultToleranceOpIssuesList\n\nfunc init() {\n\tt[\"VmFaultToleranceOpIssuesListFault\"] = reflect.TypeOf((*VmFaultToleranceOpIssuesListFault)(nil)).Elem()\n}\n\ntype VmFaultToleranceStateChangedEvent struct {\n\tVmEvent\n\n\tOldState VirtualMachineFaultToleranceState `xml:\"oldState\"`\n\tNewState VirtualMachineFaultToleranceState `xml:\"newState\"`\n}\n\nfunc init() {\n\tt[\"VmFaultToleranceStateChangedEvent\"] = reflect.TypeOf((*VmFaultToleranceStateChangedEvent)(nil)).Elem()\n}\n\ntype VmFaultToleranceTooManyFtVcpusOnHost struct {\n\tInsufficientResourcesFault\n\n\tHostName      string `xml:\"hostName,omitempty\"`\n\tMaxNumFtVcpus int32  `xml:\"maxNumFtVcpus\"`\n}\n\nfunc init() {\n\tt[\"VmFaultToleranceTooManyFtVcpusOnHost\"] = reflect.TypeOf((*VmFaultToleranceTooManyFtVcpusOnHost)(nil)).Elem()\n}\n\ntype VmFaultToleranceTooManyFtVcpusOnHostFault VmFaultToleranceTooManyFtVcpusOnHost\n\nfunc init() {\n\tt[\"VmFaultToleranceTooManyFtVcpusOnHostFault\"] = reflect.TypeOf((*VmFaultToleranceTooManyFtVcpusOnHostFault)(nil)).Elem()\n}\n\ntype VmFaultToleranceTooManyVMsOnHost struct {\n\tInsufficientResourcesFault\n\n\tHostName    string `xml:\"hostName,omitempty\"`\n\tMaxNumFtVms int32  `xml:\"maxNumFtVms\"`\n}\n\nfunc init() {\n\tt[\"VmFaultToleranceTooManyVMsOnHost\"] = reflect.TypeOf((*VmFaultToleranceTooManyVMsOnHost)(nil)).Elem()\n}\n\ntype VmFaultToleranceTooManyVMsOnHostFault VmFaultToleranceTooManyVMsOnHost\n\nfunc init() {\n\tt[\"VmFaultToleranceTooManyVMsOnHostFault\"] = reflect.TypeOf((*VmFaultToleranceTooManyVMsOnHostFault)(nil)).Elem()\n}\n\ntype VmFaultToleranceTurnedOffEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmFaultToleranceTurnedOffEvent\"] = reflect.TypeOf((*VmFaultToleranceTurnedOffEvent)(nil)).Elem()\n}\n\ntype VmFaultToleranceVmTerminatedEvent struct {\n\tVmEvent\n\n\tReason string `xml:\"reason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmFaultToleranceVmTerminatedEvent\"] = reflect.TypeOf((*VmFaultToleranceVmTerminatedEvent)(nil)).Elem()\n}\n\ntype VmGuestOSCrashedEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmGuestOSCrashedEvent\"] = reflect.TypeOf((*VmGuestOSCrashedEvent)(nil)).Elem()\n}\n\ntype VmGuestRebootEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmGuestRebootEvent\"] = reflect.TypeOf((*VmGuestRebootEvent)(nil)).Elem()\n}\n\ntype VmGuestShutdownEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmGuestShutdownEvent\"] = reflect.TypeOf((*VmGuestShutdownEvent)(nil)).Elem()\n}\n\ntype VmGuestStandbyEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmGuestStandbyEvent\"] = reflect.TypeOf((*VmGuestStandbyEvent)(nil)).Elem()\n}\n\ntype VmHealthMonitoringStateChangedEvent struct {\n\tClusterEvent\n\n\tState     string `xml:\"state\"`\n\tPrevState string `xml:\"prevState,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmHealthMonitoringStateChangedEvent\"] = reflect.TypeOf((*VmHealthMonitoringStateChangedEvent)(nil)).Elem()\n}\n\ntype VmHostAffinityRuleViolation struct {\n\tVmConfigFault\n\n\tVmName   string `xml:\"vmName\"`\n\tHostName string `xml:\"hostName\"`\n}\n\nfunc init() {\n\tt[\"VmHostAffinityRuleViolation\"] = reflect.TypeOf((*VmHostAffinityRuleViolation)(nil)).Elem()\n}\n\ntype VmHostAffinityRuleViolationFault VmHostAffinityRuleViolation\n\nfunc init() {\n\tt[\"VmHostAffinityRuleViolationFault\"] = reflect.TypeOf((*VmHostAffinityRuleViolationFault)(nil)).Elem()\n}\n\ntype VmInstanceUuidAssignedEvent struct {\n\tVmEvent\n\n\tInstanceUuid string `xml:\"instanceUuid\"`\n}\n\nfunc init() {\n\tt[\"VmInstanceUuidAssignedEvent\"] = reflect.TypeOf((*VmInstanceUuidAssignedEvent)(nil)).Elem()\n}\n\ntype VmInstanceUuidChangedEvent struct {\n\tVmEvent\n\n\tOldInstanceUuid string `xml:\"oldInstanceUuid\"`\n\tNewInstanceUuid string `xml:\"newInstanceUuid\"`\n}\n\nfunc init() {\n\tt[\"VmInstanceUuidChangedEvent\"] = reflect.TypeOf((*VmInstanceUuidChangedEvent)(nil)).Elem()\n}\n\ntype VmInstanceUuidConflictEvent struct {\n\tVmEvent\n\n\tConflictedVm VmEventArgument `xml:\"conflictedVm\"`\n\tInstanceUuid string          `xml:\"instanceUuid\"`\n}\n\nfunc init() {\n\tt[\"VmInstanceUuidConflictEvent\"] = reflect.TypeOf((*VmInstanceUuidConflictEvent)(nil)).Elem()\n}\n\ntype VmLimitLicense struct {\n\tNotEnoughLicenses\n\n\tLimit int32 `xml:\"limit\"`\n}\n\nfunc init() {\n\tt[\"VmLimitLicense\"] = reflect.TypeOf((*VmLimitLicense)(nil)).Elem()\n}\n\ntype VmLimitLicenseFault VmLimitLicense\n\nfunc init() {\n\tt[\"VmLimitLicenseFault\"] = reflect.TypeOf((*VmLimitLicenseFault)(nil)).Elem()\n}\n\ntype VmLogFileInfo struct {\n\tFileInfo\n}\n\nfunc init() {\n\tt[\"VmLogFileInfo\"] = reflect.TypeOf((*VmLogFileInfo)(nil)).Elem()\n}\n\ntype VmLogFileQuery struct {\n\tFileQuery\n}\n\nfunc init() {\n\tt[\"VmLogFileQuery\"] = reflect.TypeOf((*VmLogFileQuery)(nil)).Elem()\n}\n\ntype VmMacAssignedEvent struct {\n\tVmEvent\n\n\tAdapter string `xml:\"adapter\"`\n\tMac     string `xml:\"mac\"`\n}\n\nfunc init() {\n\tt[\"VmMacAssignedEvent\"] = reflect.TypeOf((*VmMacAssignedEvent)(nil)).Elem()\n}\n\ntype VmMacChangedEvent struct {\n\tVmEvent\n\n\tAdapter string `xml:\"adapter\"`\n\tOldMac  string `xml:\"oldMac\"`\n\tNewMac  string `xml:\"newMac\"`\n}\n\nfunc init() {\n\tt[\"VmMacChangedEvent\"] = reflect.TypeOf((*VmMacChangedEvent)(nil)).Elem()\n}\n\ntype VmMacConflictEvent struct {\n\tVmEvent\n\n\tConflictedVm VmEventArgument `xml:\"conflictedVm\"`\n\tMac          string          `xml:\"mac\"`\n}\n\nfunc init() {\n\tt[\"VmMacConflictEvent\"] = reflect.TypeOf((*VmMacConflictEvent)(nil)).Elem()\n}\n\ntype VmMaxFTRestartCountReached struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmMaxFTRestartCountReached\"] = reflect.TypeOf((*VmMaxFTRestartCountReached)(nil)).Elem()\n}\n\ntype VmMaxRestartCountReached struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmMaxRestartCountReached\"] = reflect.TypeOf((*VmMaxRestartCountReached)(nil)).Elem()\n}\n\ntype VmMessageErrorEvent struct {\n\tVmEvent\n\n\tMessage     string                  `xml:\"message\"`\n\tMessageInfo []VirtualMachineMessage `xml:\"messageInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmMessageErrorEvent\"] = reflect.TypeOf((*VmMessageErrorEvent)(nil)).Elem()\n}\n\ntype VmMessageEvent struct {\n\tVmEvent\n\n\tMessage     string                  `xml:\"message\"`\n\tMessageInfo []VirtualMachineMessage `xml:\"messageInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmMessageEvent\"] = reflect.TypeOf((*VmMessageEvent)(nil)).Elem()\n}\n\ntype VmMessageWarningEvent struct {\n\tVmEvent\n\n\tMessage     string                  `xml:\"message\"`\n\tMessageInfo []VirtualMachineMessage `xml:\"messageInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmMessageWarningEvent\"] = reflect.TypeOf((*VmMessageWarningEvent)(nil)).Elem()\n}\n\ntype VmMetadataManagerFault struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"VmMetadataManagerFault\"] = reflect.TypeOf((*VmMetadataManagerFault)(nil)).Elem()\n}\n\ntype VmMetadataManagerFaultFault VmMetadataManagerFault\n\nfunc init() {\n\tt[\"VmMetadataManagerFaultFault\"] = reflect.TypeOf((*VmMetadataManagerFaultFault)(nil)).Elem()\n}\n\ntype VmMigratedEvent struct {\n\tVmEvent\n\n\tSourceHost       HostEventArgument        `xml:\"sourceHost\"`\n\tSourceDatacenter *DatacenterEventArgument `xml:\"sourceDatacenter,omitempty\"`\n\tSourceDatastore  *DatastoreEventArgument  `xml:\"sourceDatastore,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmMigratedEvent\"] = reflect.TypeOf((*VmMigratedEvent)(nil)).Elem()\n}\n\ntype VmMonitorIncompatibleForFaultTolerance struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"VmMonitorIncompatibleForFaultTolerance\"] = reflect.TypeOf((*VmMonitorIncompatibleForFaultTolerance)(nil)).Elem()\n}\n\ntype VmMonitorIncompatibleForFaultToleranceFault VmMonitorIncompatibleForFaultTolerance\n\nfunc init() {\n\tt[\"VmMonitorIncompatibleForFaultToleranceFault\"] = reflect.TypeOf((*VmMonitorIncompatibleForFaultToleranceFault)(nil)).Elem()\n}\n\ntype VmNoCompatibleHostForSecondaryEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmNoCompatibleHostForSecondaryEvent\"] = reflect.TypeOf((*VmNoCompatibleHostForSecondaryEvent)(nil)).Elem()\n}\n\ntype VmNoNetworkAccessEvent struct {\n\tVmEvent\n\n\tDestHost HostEventArgument `xml:\"destHost\"`\n}\n\nfunc init() {\n\tt[\"VmNoNetworkAccessEvent\"] = reflect.TypeOf((*VmNoNetworkAccessEvent)(nil)).Elem()\n}\n\ntype VmNvramFileInfo struct {\n\tFileInfo\n}\n\nfunc init() {\n\tt[\"VmNvramFileInfo\"] = reflect.TypeOf((*VmNvramFileInfo)(nil)).Elem()\n}\n\ntype VmNvramFileQuery struct {\n\tFileQuery\n}\n\nfunc init() {\n\tt[\"VmNvramFileQuery\"] = reflect.TypeOf((*VmNvramFileQuery)(nil)).Elem()\n}\n\ntype VmOrphanedEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmOrphanedEvent\"] = reflect.TypeOf((*VmOrphanedEvent)(nil)).Elem()\n}\n\ntype VmPodConfigForPlacement struct {\n\tDynamicData\n\n\tStoragePod  ManagedObjectReference  `xml:\"storagePod\"`\n\tDisk        []PodDiskLocator        `xml:\"disk,omitempty\"`\n\tVmConfig    *StorageDrsVmConfigInfo `xml:\"vmConfig,omitempty\"`\n\tInterVmRule []BaseClusterRuleInfo   `xml:\"interVmRule,omitempty,typeattr\"`\n}\n\nfunc init() {\n\tt[\"VmPodConfigForPlacement\"] = reflect.TypeOf((*VmPodConfigForPlacement)(nil)).Elem()\n}\n\ntype VmPortGroupProfile struct {\n\tPortGroupProfile\n}\n\nfunc init() {\n\tt[\"VmPortGroupProfile\"] = reflect.TypeOf((*VmPortGroupProfile)(nil)).Elem()\n}\n\ntype VmPowerOffOnIsolationEvent struct {\n\tVmPoweredOffEvent\n\n\tIsolatedHost HostEventArgument `xml:\"isolatedHost\"`\n}\n\nfunc init() {\n\tt[\"VmPowerOffOnIsolationEvent\"] = reflect.TypeOf((*VmPowerOffOnIsolationEvent)(nil)).Elem()\n}\n\ntype VmPowerOnDisabled struct {\n\tInvalidState\n}\n\nfunc init() {\n\tt[\"VmPowerOnDisabled\"] = reflect.TypeOf((*VmPowerOnDisabled)(nil)).Elem()\n}\n\ntype VmPowerOnDisabledFault VmPowerOnDisabled\n\nfunc init() {\n\tt[\"VmPowerOnDisabledFault\"] = reflect.TypeOf((*VmPowerOnDisabledFault)(nil)).Elem()\n}\n\ntype VmPoweredOffEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmPoweredOffEvent\"] = reflect.TypeOf((*VmPoweredOffEvent)(nil)).Elem()\n}\n\ntype VmPoweredOnEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmPoweredOnEvent\"] = reflect.TypeOf((*VmPoweredOnEvent)(nil)).Elem()\n}\n\ntype VmPoweringOnWithCustomizedDVPortEvent struct {\n\tVmEvent\n\n\tVnic []VnicPortArgument `xml:\"vnic\"`\n}\n\nfunc init() {\n\tt[\"VmPoweringOnWithCustomizedDVPortEvent\"] = reflect.TypeOf((*VmPoweringOnWithCustomizedDVPortEvent)(nil)).Elem()\n}\n\ntype VmPrimaryFailoverEvent struct {\n\tVmEvent\n\n\tReason string `xml:\"reason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmPrimaryFailoverEvent\"] = reflect.TypeOf((*VmPrimaryFailoverEvent)(nil)).Elem()\n}\n\ntype VmReconfiguredEvent struct {\n\tVmEvent\n\n\tConfigSpec    VirtualMachineConfigSpec  `xml:\"configSpec\"`\n\tConfigChanges *ChangesInfoEventArgument `xml:\"configChanges,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmReconfiguredEvent\"] = reflect.TypeOf((*VmReconfiguredEvent)(nil)).Elem()\n}\n\ntype VmRegisteredEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmRegisteredEvent\"] = reflect.TypeOf((*VmRegisteredEvent)(nil)).Elem()\n}\n\ntype VmRelayoutSuccessfulEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmRelayoutSuccessfulEvent\"] = reflect.TypeOf((*VmRelayoutSuccessfulEvent)(nil)).Elem()\n}\n\ntype VmRelayoutUpToDateEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmRelayoutUpToDateEvent\"] = reflect.TypeOf((*VmRelayoutUpToDateEvent)(nil)).Elem()\n}\n\ntype VmReloadFromPathEvent struct {\n\tVmEvent\n\n\tConfigPath string `xml:\"configPath\"`\n}\n\nfunc init() {\n\tt[\"VmReloadFromPathEvent\"] = reflect.TypeOf((*VmReloadFromPathEvent)(nil)).Elem()\n}\n\ntype VmReloadFromPathFailedEvent struct {\n\tVmEvent\n\n\tConfigPath string `xml:\"configPath\"`\n}\n\nfunc init() {\n\tt[\"VmReloadFromPathFailedEvent\"] = reflect.TypeOf((*VmReloadFromPathFailedEvent)(nil)).Elem()\n}\n\ntype VmRelocateFailedEvent struct {\n\tVmRelocateSpecEvent\n\n\tDestHost       HostEventArgument        `xml:\"destHost\"`\n\tReason         LocalizedMethodFault     `xml:\"reason\"`\n\tDestDatacenter *DatacenterEventArgument `xml:\"destDatacenter,omitempty\"`\n\tDestDatastore  *DatastoreEventArgument  `xml:\"destDatastore,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmRelocateFailedEvent\"] = reflect.TypeOf((*VmRelocateFailedEvent)(nil)).Elem()\n}\n\ntype VmRelocateSpecEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmRelocateSpecEvent\"] = reflect.TypeOf((*VmRelocateSpecEvent)(nil)).Elem()\n}\n\ntype VmRelocatedEvent struct {\n\tVmRelocateSpecEvent\n\n\tSourceHost       HostEventArgument        `xml:\"sourceHost\"`\n\tSourceDatacenter *DatacenterEventArgument `xml:\"sourceDatacenter,omitempty\"`\n\tSourceDatastore  *DatastoreEventArgument  `xml:\"sourceDatastore,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmRelocatedEvent\"] = reflect.TypeOf((*VmRelocatedEvent)(nil)).Elem()\n}\n\ntype VmRemoteConsoleConnectedEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmRemoteConsoleConnectedEvent\"] = reflect.TypeOf((*VmRemoteConsoleConnectedEvent)(nil)).Elem()\n}\n\ntype VmRemoteConsoleDisconnectedEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmRemoteConsoleDisconnectedEvent\"] = reflect.TypeOf((*VmRemoteConsoleDisconnectedEvent)(nil)).Elem()\n}\n\ntype VmRemovedEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmRemovedEvent\"] = reflect.TypeOf((*VmRemovedEvent)(nil)).Elem()\n}\n\ntype VmRenamedEvent struct {\n\tVmEvent\n\n\tOldName string `xml:\"oldName\"`\n\tNewName string `xml:\"newName\"`\n}\n\nfunc init() {\n\tt[\"VmRenamedEvent\"] = reflect.TypeOf((*VmRenamedEvent)(nil)).Elem()\n}\n\ntype VmRequirementsExceedCurrentEVCModeEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmRequirementsExceedCurrentEVCModeEvent\"] = reflect.TypeOf((*VmRequirementsExceedCurrentEVCModeEvent)(nil)).Elem()\n}\n\ntype VmResettingEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmResettingEvent\"] = reflect.TypeOf((*VmResettingEvent)(nil)).Elem()\n}\n\ntype VmResourcePoolMovedEvent struct {\n\tVmEvent\n\n\tOldParent ResourcePoolEventArgument `xml:\"oldParent\"`\n\tNewParent ResourcePoolEventArgument `xml:\"newParent\"`\n}\n\nfunc init() {\n\tt[\"VmResourcePoolMovedEvent\"] = reflect.TypeOf((*VmResourcePoolMovedEvent)(nil)).Elem()\n}\n\ntype VmResourceReallocatedEvent struct {\n\tVmEvent\n\n\tConfigChanges *ChangesInfoEventArgument `xml:\"configChanges,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmResourceReallocatedEvent\"] = reflect.TypeOf((*VmResourceReallocatedEvent)(nil)).Elem()\n}\n\ntype VmRestartedOnAlternateHostEvent struct {\n\tVmPoweredOnEvent\n\n\tSourceHost HostEventArgument `xml:\"sourceHost\"`\n}\n\nfunc init() {\n\tt[\"VmRestartedOnAlternateHostEvent\"] = reflect.TypeOf((*VmRestartedOnAlternateHostEvent)(nil)).Elem()\n}\n\ntype VmResumingEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmResumingEvent\"] = reflect.TypeOf((*VmResumingEvent)(nil)).Elem()\n}\n\ntype VmSecondaryAddedEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmSecondaryAddedEvent\"] = reflect.TypeOf((*VmSecondaryAddedEvent)(nil)).Elem()\n}\n\ntype VmSecondaryDisabledBySystemEvent struct {\n\tVmEvent\n\n\tReason *LocalizedMethodFault `xml:\"reason,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmSecondaryDisabledBySystemEvent\"] = reflect.TypeOf((*VmSecondaryDisabledBySystemEvent)(nil)).Elem()\n}\n\ntype VmSecondaryDisabledEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmSecondaryDisabledEvent\"] = reflect.TypeOf((*VmSecondaryDisabledEvent)(nil)).Elem()\n}\n\ntype VmSecondaryEnabledEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmSecondaryEnabledEvent\"] = reflect.TypeOf((*VmSecondaryEnabledEvent)(nil)).Elem()\n}\n\ntype VmSecondaryStartedEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmSecondaryStartedEvent\"] = reflect.TypeOf((*VmSecondaryStartedEvent)(nil)).Elem()\n}\n\ntype VmShutdownOnIsolationEvent struct {\n\tVmPoweredOffEvent\n\n\tIsolatedHost   HostEventArgument `xml:\"isolatedHost\"`\n\tShutdownResult string            `xml:\"shutdownResult,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmShutdownOnIsolationEvent\"] = reflect.TypeOf((*VmShutdownOnIsolationEvent)(nil)).Elem()\n}\n\ntype VmSmpFaultToleranceTooManyVMsOnHost struct {\n\tInsufficientResourcesFault\n\n\tHostName       string `xml:\"hostName,omitempty\"`\n\tMaxNumSmpFtVms int32  `xml:\"maxNumSmpFtVms\"`\n}\n\nfunc init() {\n\tt[\"VmSmpFaultToleranceTooManyVMsOnHost\"] = reflect.TypeOf((*VmSmpFaultToleranceTooManyVMsOnHost)(nil)).Elem()\n}\n\ntype VmSmpFaultToleranceTooManyVMsOnHostFault VmSmpFaultToleranceTooManyVMsOnHost\n\nfunc init() {\n\tt[\"VmSmpFaultToleranceTooManyVMsOnHostFault\"] = reflect.TypeOf((*VmSmpFaultToleranceTooManyVMsOnHostFault)(nil)).Elem()\n}\n\ntype VmSnapshotFileInfo struct {\n\tFileInfo\n}\n\nfunc init() {\n\tt[\"VmSnapshotFileInfo\"] = reflect.TypeOf((*VmSnapshotFileInfo)(nil)).Elem()\n}\n\ntype VmSnapshotFileQuery struct {\n\tFileQuery\n}\n\nfunc init() {\n\tt[\"VmSnapshotFileQuery\"] = reflect.TypeOf((*VmSnapshotFileQuery)(nil)).Elem()\n}\n\ntype VmStartRecordingEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmStartRecordingEvent\"] = reflect.TypeOf((*VmStartRecordingEvent)(nil)).Elem()\n}\n\ntype VmStartReplayingEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmStartReplayingEvent\"] = reflect.TypeOf((*VmStartReplayingEvent)(nil)).Elem()\n}\n\ntype VmStartingEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmStartingEvent\"] = reflect.TypeOf((*VmStartingEvent)(nil)).Elem()\n}\n\ntype VmStartingSecondaryEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmStartingSecondaryEvent\"] = reflect.TypeOf((*VmStartingSecondaryEvent)(nil)).Elem()\n}\n\ntype VmStaticMacConflictEvent struct {\n\tVmEvent\n\n\tConflictedVm VmEventArgument `xml:\"conflictedVm\"`\n\tMac          string          `xml:\"mac\"`\n}\n\nfunc init() {\n\tt[\"VmStaticMacConflictEvent\"] = reflect.TypeOf((*VmStaticMacConflictEvent)(nil)).Elem()\n}\n\ntype VmStoppingEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmStoppingEvent\"] = reflect.TypeOf((*VmStoppingEvent)(nil)).Elem()\n}\n\ntype VmSuspendedEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmSuspendedEvent\"] = reflect.TypeOf((*VmSuspendedEvent)(nil)).Elem()\n}\n\ntype VmSuspendingEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmSuspendingEvent\"] = reflect.TypeOf((*VmSuspendingEvent)(nil)).Elem()\n}\n\ntype VmTimedoutStartingSecondaryEvent struct {\n\tVmEvent\n\n\tTimeout int64 `xml:\"timeout,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmTimedoutStartingSecondaryEvent\"] = reflect.TypeOf((*VmTimedoutStartingSecondaryEvent)(nil)).Elem()\n}\n\ntype VmToolsUpgradeFault struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"VmToolsUpgradeFault\"] = reflect.TypeOf((*VmToolsUpgradeFault)(nil)).Elem()\n}\n\ntype VmToolsUpgradeFaultFault BaseVmToolsUpgradeFault\n\nfunc init() {\n\tt[\"VmToolsUpgradeFaultFault\"] = reflect.TypeOf((*VmToolsUpgradeFaultFault)(nil)).Elem()\n}\n\ntype VmUnsupportedStartingEvent struct {\n\tVmStartingEvent\n\n\tGuestId string `xml:\"guestId\"`\n}\n\nfunc init() {\n\tt[\"VmUnsupportedStartingEvent\"] = reflect.TypeOf((*VmUnsupportedStartingEvent)(nil)).Elem()\n}\n\ntype VmUpgradeCompleteEvent struct {\n\tVmEvent\n\n\tVersion string `xml:\"version\"`\n}\n\nfunc init() {\n\tt[\"VmUpgradeCompleteEvent\"] = reflect.TypeOf((*VmUpgradeCompleteEvent)(nil)).Elem()\n}\n\ntype VmUpgradeFailedEvent struct {\n\tVmEvent\n}\n\nfunc init() {\n\tt[\"VmUpgradeFailedEvent\"] = reflect.TypeOf((*VmUpgradeFailedEvent)(nil)).Elem()\n}\n\ntype VmUpgradingEvent struct {\n\tVmEvent\n\n\tVersion string `xml:\"version\"`\n}\n\nfunc init() {\n\tt[\"VmUpgradingEvent\"] = reflect.TypeOf((*VmUpgradingEvent)(nil)).Elem()\n}\n\ntype VmUuidAssignedEvent struct {\n\tVmEvent\n\n\tUuid string `xml:\"uuid\"`\n}\n\nfunc init() {\n\tt[\"VmUuidAssignedEvent\"] = reflect.TypeOf((*VmUuidAssignedEvent)(nil)).Elem()\n}\n\ntype VmUuidChangedEvent struct {\n\tVmEvent\n\n\tOldUuid string `xml:\"oldUuid\"`\n\tNewUuid string `xml:\"newUuid\"`\n}\n\nfunc init() {\n\tt[\"VmUuidChangedEvent\"] = reflect.TypeOf((*VmUuidChangedEvent)(nil)).Elem()\n}\n\ntype VmUuidConflictEvent struct {\n\tVmEvent\n\n\tConflictedVm VmEventArgument `xml:\"conflictedVm\"`\n\tUuid         string          `xml:\"uuid\"`\n}\n\nfunc init() {\n\tt[\"VmUuidConflictEvent\"] = reflect.TypeOf((*VmUuidConflictEvent)(nil)).Elem()\n}\n\ntype VmValidateMaxDevice struct {\n\tVimFault\n\n\tDevice string `xml:\"device\"`\n\tMax    int32  `xml:\"max\"`\n\tCount  int32  `xml:\"count\"`\n}\n\nfunc init() {\n\tt[\"VmValidateMaxDevice\"] = reflect.TypeOf((*VmValidateMaxDevice)(nil)).Elem()\n}\n\ntype VmValidateMaxDeviceFault VmValidateMaxDevice\n\nfunc init() {\n\tt[\"VmValidateMaxDeviceFault\"] = reflect.TypeOf((*VmValidateMaxDeviceFault)(nil)).Elem()\n}\n\ntype VmVnicPoolReservationViolationClearEvent struct {\n\tDvsEvent\n\n\tVmVnicResourcePoolKey  string `xml:\"vmVnicResourcePoolKey\"`\n\tVmVnicResourcePoolName string `xml:\"vmVnicResourcePoolName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmVnicPoolReservationViolationClearEvent\"] = reflect.TypeOf((*VmVnicPoolReservationViolationClearEvent)(nil)).Elem()\n}\n\ntype VmVnicPoolReservationViolationRaiseEvent struct {\n\tDvsEvent\n\n\tVmVnicResourcePoolKey  string `xml:\"vmVnicResourcePoolKey\"`\n\tVmVnicResourcePoolName string `xml:\"vmVnicResourcePoolName,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmVnicPoolReservationViolationRaiseEvent\"] = reflect.TypeOf((*VmVnicPoolReservationViolationRaiseEvent)(nil)).Elem()\n}\n\ntype VmWwnAssignedEvent struct {\n\tVmEvent\n\n\tNodeWwns []int64 `xml:\"nodeWwns\"`\n\tPortWwns []int64 `xml:\"portWwns\"`\n}\n\nfunc init() {\n\tt[\"VmWwnAssignedEvent\"] = reflect.TypeOf((*VmWwnAssignedEvent)(nil)).Elem()\n}\n\ntype VmWwnChangedEvent struct {\n\tVmEvent\n\n\tOldNodeWwns []int64 `xml:\"oldNodeWwns,omitempty\"`\n\tOldPortWwns []int64 `xml:\"oldPortWwns,omitempty\"`\n\tNewNodeWwns []int64 `xml:\"newNodeWwns,omitempty\"`\n\tNewPortWwns []int64 `xml:\"newPortWwns,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmWwnChangedEvent\"] = reflect.TypeOf((*VmWwnChangedEvent)(nil)).Elem()\n}\n\ntype VmWwnConflict struct {\n\tInvalidVmConfig\n\n\tVm   *ManagedObjectReference `xml:\"vm,omitempty\"`\n\tHost *ManagedObjectReference `xml:\"host,omitempty\"`\n\tName string                  `xml:\"name,omitempty\"`\n\tWwn  int64                   `xml:\"wwn,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmWwnConflict\"] = reflect.TypeOf((*VmWwnConflict)(nil)).Elem()\n}\n\ntype VmWwnConflictEvent struct {\n\tVmEvent\n\n\tConflictedVms   []VmEventArgument   `xml:\"conflictedVms,omitempty\"`\n\tConflictedHosts []HostEventArgument `xml:\"conflictedHosts,omitempty\"`\n\tWwn             int64               `xml:\"wwn\"`\n}\n\nfunc init() {\n\tt[\"VmWwnConflictEvent\"] = reflect.TypeOf((*VmWwnConflictEvent)(nil)).Elem()\n}\n\ntype VmWwnConflictFault VmWwnConflict\n\nfunc init() {\n\tt[\"VmWwnConflictFault\"] = reflect.TypeOf((*VmWwnConflictFault)(nil)).Elem()\n}\n\ntype VmfsAlreadyMounted struct {\n\tVmfsMountFault\n}\n\nfunc init() {\n\tt[\"VmfsAlreadyMounted\"] = reflect.TypeOf((*VmfsAlreadyMounted)(nil)).Elem()\n}\n\ntype VmfsAlreadyMountedFault VmfsAlreadyMounted\n\nfunc init() {\n\tt[\"VmfsAlreadyMountedFault\"] = reflect.TypeOf((*VmfsAlreadyMountedFault)(nil)).Elem()\n}\n\ntype VmfsAmbiguousMount struct {\n\tVmfsMountFault\n}\n\nfunc init() {\n\tt[\"VmfsAmbiguousMount\"] = reflect.TypeOf((*VmfsAmbiguousMount)(nil)).Elem()\n}\n\ntype VmfsAmbiguousMountFault VmfsAmbiguousMount\n\nfunc init() {\n\tt[\"VmfsAmbiguousMountFault\"] = reflect.TypeOf((*VmfsAmbiguousMountFault)(nil)).Elem()\n}\n\ntype VmfsConfigOption struct {\n\tDynamicData\n\n\tBlockSizeOption        int32   `xml:\"blockSizeOption\"`\n\tUnmapGranularityOption []int32 `xml:\"unmapGranularityOption,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmfsConfigOption\"] = reflect.TypeOf((*VmfsConfigOption)(nil)).Elem()\n}\n\ntype VmfsDatastoreAllExtentOption struct {\n\tVmfsDatastoreSingleExtentOption\n}\n\nfunc init() {\n\tt[\"VmfsDatastoreAllExtentOption\"] = reflect.TypeOf((*VmfsDatastoreAllExtentOption)(nil)).Elem()\n}\n\ntype VmfsDatastoreBaseOption struct {\n\tDynamicData\n\n\tLayout                HostDiskPartitionLayout `xml:\"layout\"`\n\tPartitionFormatChange *bool                   `xml:\"partitionFormatChange\"`\n}\n\nfunc init() {\n\tt[\"VmfsDatastoreBaseOption\"] = reflect.TypeOf((*VmfsDatastoreBaseOption)(nil)).Elem()\n}\n\ntype VmfsDatastoreCreateSpec struct {\n\tVmfsDatastoreSpec\n\n\tPartition HostDiskPartitionSpec   `xml:\"partition\"`\n\tVmfs      HostVmfsSpec            `xml:\"vmfs\"`\n\tExtent    []HostScsiDiskPartition `xml:\"extent,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmfsDatastoreCreateSpec\"] = reflect.TypeOf((*VmfsDatastoreCreateSpec)(nil)).Elem()\n}\n\ntype VmfsDatastoreExpandSpec struct {\n\tVmfsDatastoreSpec\n\n\tPartition HostDiskPartitionSpec `xml:\"partition\"`\n\tExtent    HostScsiDiskPartition `xml:\"extent\"`\n}\n\nfunc init() {\n\tt[\"VmfsDatastoreExpandSpec\"] = reflect.TypeOf((*VmfsDatastoreExpandSpec)(nil)).Elem()\n}\n\ntype VmfsDatastoreExtendSpec struct {\n\tVmfsDatastoreSpec\n\n\tPartition HostDiskPartitionSpec   `xml:\"partition\"`\n\tExtent    []HostScsiDiskPartition `xml:\"extent\"`\n}\n\nfunc init() {\n\tt[\"VmfsDatastoreExtendSpec\"] = reflect.TypeOf((*VmfsDatastoreExtendSpec)(nil)).Elem()\n}\n\ntype VmfsDatastoreInfo struct {\n\tDatastoreInfo\n\n\tMaxPhysicalRDMFileSize int64           `xml:\"maxPhysicalRDMFileSize,omitempty\"`\n\tMaxVirtualRDMFileSize  int64           `xml:\"maxVirtualRDMFileSize,omitempty\"`\n\tVmfs                   *HostVmfsVolume `xml:\"vmfs,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmfsDatastoreInfo\"] = reflect.TypeOf((*VmfsDatastoreInfo)(nil)).Elem()\n}\n\ntype VmfsDatastoreMultipleExtentOption struct {\n\tVmfsDatastoreBaseOption\n\n\tVmfsExtent []HostDiskPartitionBlockRange `xml:\"vmfsExtent\"`\n}\n\nfunc init() {\n\tt[\"VmfsDatastoreMultipleExtentOption\"] = reflect.TypeOf((*VmfsDatastoreMultipleExtentOption)(nil)).Elem()\n}\n\ntype VmfsDatastoreOption struct {\n\tDynamicData\n\n\tInfo BaseVmfsDatastoreBaseOption `xml:\"info,typeattr\"`\n\tSpec BaseVmfsDatastoreSpec       `xml:\"spec,typeattr\"`\n}\n\nfunc init() {\n\tt[\"VmfsDatastoreOption\"] = reflect.TypeOf((*VmfsDatastoreOption)(nil)).Elem()\n}\n\ntype VmfsDatastoreSingleExtentOption struct {\n\tVmfsDatastoreBaseOption\n\n\tVmfsExtent HostDiskPartitionBlockRange `xml:\"vmfsExtent\"`\n}\n\nfunc init() {\n\tt[\"VmfsDatastoreSingleExtentOption\"] = reflect.TypeOf((*VmfsDatastoreSingleExtentOption)(nil)).Elem()\n}\n\ntype VmfsDatastoreSpec struct {\n\tDynamicData\n\n\tDiskUuid string `xml:\"diskUuid\"`\n}\n\nfunc init() {\n\tt[\"VmfsDatastoreSpec\"] = reflect.TypeOf((*VmfsDatastoreSpec)(nil)).Elem()\n}\n\ntype VmfsMountFault struct {\n\tHostConfigFault\n\n\tUuid string `xml:\"uuid\"`\n}\n\nfunc init() {\n\tt[\"VmfsMountFault\"] = reflect.TypeOf((*VmfsMountFault)(nil)).Elem()\n}\n\ntype VmfsMountFaultFault BaseVmfsMountFault\n\nfunc init() {\n\tt[\"VmfsMountFaultFault\"] = reflect.TypeOf((*VmfsMountFaultFault)(nil)).Elem()\n}\n\ntype VmotionInterfaceNotEnabled struct {\n\tHostPowerOpFailed\n}\n\nfunc init() {\n\tt[\"VmotionInterfaceNotEnabled\"] = reflect.TypeOf((*VmotionInterfaceNotEnabled)(nil)).Elem()\n}\n\ntype VmotionInterfaceNotEnabledFault VmotionInterfaceNotEnabled\n\nfunc init() {\n\tt[\"VmotionInterfaceNotEnabledFault\"] = reflect.TypeOf((*VmotionInterfaceNotEnabledFault)(nil)).Elem()\n}\n\ntype VmwareDistributedVirtualSwitchPvlanSpec struct {\n\tVmwareDistributedVirtualSwitchVlanSpec\n\n\tPvlanId int32 `xml:\"pvlanId\"`\n}\n\nfunc init() {\n\tt[\"VmwareDistributedVirtualSwitchPvlanSpec\"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchPvlanSpec)(nil)).Elem()\n}\n\ntype VmwareDistributedVirtualSwitchTrunkVlanSpec struct {\n\tVmwareDistributedVirtualSwitchVlanSpec\n\n\tVlanId []NumericRange `xml:\"vlanId\"`\n}\n\nfunc init() {\n\tt[\"VmwareDistributedVirtualSwitchTrunkVlanSpec\"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchTrunkVlanSpec)(nil)).Elem()\n}\n\ntype VmwareDistributedVirtualSwitchVlanIdSpec struct {\n\tVmwareDistributedVirtualSwitchVlanSpec\n\n\tVlanId int32 `xml:\"vlanId\"`\n}\n\nfunc init() {\n\tt[\"VmwareDistributedVirtualSwitchVlanIdSpec\"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchVlanIdSpec)(nil)).Elem()\n}\n\ntype VmwareDistributedVirtualSwitchVlanSpec struct {\n\tInheritablePolicy\n}\n\nfunc init() {\n\tt[\"VmwareDistributedVirtualSwitchVlanSpec\"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchVlanSpec)(nil)).Elem()\n}\n\ntype VmwareUplinkPortTeamingPolicy struct {\n\tInheritablePolicy\n\n\tPolicy          *StringPolicy                `xml:\"policy,omitempty\"`\n\tReversePolicy   *BoolPolicy                  `xml:\"reversePolicy,omitempty\"`\n\tNotifySwitches  *BoolPolicy                  `xml:\"notifySwitches,omitempty\"`\n\tRollingOrder    *BoolPolicy                  `xml:\"rollingOrder,omitempty\"`\n\tFailureCriteria *DVSFailureCriteria          `xml:\"failureCriteria,omitempty\"`\n\tUplinkPortOrder *VMwareUplinkPortOrderPolicy `xml:\"uplinkPortOrder,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VmwareUplinkPortTeamingPolicy\"] = reflect.TypeOf((*VmwareUplinkPortTeamingPolicy)(nil)).Elem()\n}\n\ntype VnicPortArgument struct {\n\tDynamicData\n\n\tVnic string                                 `xml:\"vnic\"`\n\tPort DistributedVirtualSwitchPortConnection `xml:\"port\"`\n}\n\nfunc init() {\n\tt[\"VnicPortArgument\"] = reflect.TypeOf((*VnicPortArgument)(nil)).Elem()\n}\n\ntype VolumeEditorError struct {\n\tCustomizationFault\n}\n\nfunc init() {\n\tt[\"VolumeEditorError\"] = reflect.TypeOf((*VolumeEditorError)(nil)).Elem()\n}\n\ntype VolumeEditorErrorFault VolumeEditorError\n\nfunc init() {\n\tt[\"VolumeEditorErrorFault\"] = reflect.TypeOf((*VolumeEditorErrorFault)(nil)).Elem()\n}\n\ntype VramLimitLicense struct {\n\tNotEnoughLicenses\n\n\tLimit int32 `xml:\"limit\"`\n}\n\nfunc init() {\n\tt[\"VramLimitLicense\"] = reflect.TypeOf((*VramLimitLicense)(nil)).Elem()\n}\n\ntype VramLimitLicenseFault VramLimitLicense\n\nfunc init() {\n\tt[\"VramLimitLicenseFault\"] = reflect.TypeOf((*VramLimitLicenseFault)(nil)).Elem()\n}\n\ntype VrpResourceAllocationInfo struct {\n\tResourceAllocationInfo\n\n\tReservationLimit int64 `xml:\"reservationLimit,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VrpResourceAllocationInfo\"] = reflect.TypeOf((*VrpResourceAllocationInfo)(nil)).Elem()\n}\n\ntype VsanClusterConfigInfo struct {\n\tDynamicData\n\n\tEnabled       *bool                                 `xml:\"enabled\"`\n\tDefaultConfig *VsanClusterConfigInfoHostDefaultInfo `xml:\"defaultConfig,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VsanClusterConfigInfo\"] = reflect.TypeOf((*VsanClusterConfigInfo)(nil)).Elem()\n}\n\ntype VsanClusterConfigInfoHostDefaultInfo struct {\n\tDynamicData\n\n\tUuid             string `xml:\"uuid,omitempty\"`\n\tAutoClaimStorage *bool  `xml:\"autoClaimStorage\"`\n\tChecksumEnabled  *bool  `xml:\"checksumEnabled\"`\n}\n\nfunc init() {\n\tt[\"VsanClusterConfigInfoHostDefaultInfo\"] = reflect.TypeOf((*VsanClusterConfigInfoHostDefaultInfo)(nil)).Elem()\n}\n\ntype VsanClusterUuidMismatch struct {\n\tCannotMoveVsanEnabledHost\n\n\tHostClusterUuid        string `xml:\"hostClusterUuid\"`\n\tDestinationClusterUuid string `xml:\"destinationClusterUuid\"`\n}\n\nfunc init() {\n\tt[\"VsanClusterUuidMismatch\"] = reflect.TypeOf((*VsanClusterUuidMismatch)(nil)).Elem()\n}\n\ntype VsanClusterUuidMismatchFault VsanClusterUuidMismatch\n\nfunc init() {\n\tt[\"VsanClusterUuidMismatchFault\"] = reflect.TypeOf((*VsanClusterUuidMismatchFault)(nil)).Elem()\n}\n\ntype VsanDiskFault struct {\n\tVsanFault\n\n\tDevice string `xml:\"device,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VsanDiskFault\"] = reflect.TypeOf((*VsanDiskFault)(nil)).Elem()\n}\n\ntype VsanDiskFaultFault BaseVsanDiskFault\n\nfunc init() {\n\tt[\"VsanDiskFaultFault\"] = reflect.TypeOf((*VsanDiskFaultFault)(nil)).Elem()\n}\n\ntype VsanFault struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"VsanFault\"] = reflect.TypeOf((*VsanFault)(nil)).Elem()\n}\n\ntype VsanFaultFault BaseVsanFault\n\nfunc init() {\n\tt[\"VsanFaultFault\"] = reflect.TypeOf((*VsanFaultFault)(nil)).Elem()\n}\n\ntype VsanHostClusterStatus struct {\n\tDynamicData\n\n\tUuid       string                     `xml:\"uuid,omitempty\"`\n\tNodeUuid   string                     `xml:\"nodeUuid,omitempty\"`\n\tHealth     string                     `xml:\"health\"`\n\tNodeState  VsanHostClusterStatusState `xml:\"nodeState\"`\n\tMemberUuid []string                   `xml:\"memberUuid,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VsanHostClusterStatus\"] = reflect.TypeOf((*VsanHostClusterStatus)(nil)).Elem()\n}\n\ntype VsanHostClusterStatusState struct {\n\tDynamicData\n\n\tState      string                                        `xml:\"state\"`\n\tCompletion *VsanHostClusterStatusStateCompletionEstimate `xml:\"completion,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VsanHostClusterStatusState\"] = reflect.TypeOf((*VsanHostClusterStatusState)(nil)).Elem()\n}\n\ntype VsanHostClusterStatusStateCompletionEstimate struct {\n\tDynamicData\n\n\tCompleteTime    *time.Time `xml:\"completeTime\"`\n\tPercentComplete int32      `xml:\"percentComplete,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VsanHostClusterStatusStateCompletionEstimate\"] = reflect.TypeOf((*VsanHostClusterStatusStateCompletionEstimate)(nil)).Elem()\n}\n\ntype VsanHostConfigInfo struct {\n\tDynamicData\n\n\tEnabled         *bool                          `xml:\"enabled\"`\n\tHostSystem      *ManagedObjectReference        `xml:\"hostSystem,omitempty\"`\n\tClusterInfo     *VsanHostConfigInfoClusterInfo `xml:\"clusterInfo,omitempty\"`\n\tStorageInfo     *VsanHostConfigInfoStorageInfo `xml:\"storageInfo,omitempty\"`\n\tNetworkInfo     *VsanHostConfigInfoNetworkInfo `xml:\"networkInfo,omitempty\"`\n\tFaultDomainInfo *VsanHostFaultDomainInfo       `xml:\"faultDomainInfo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VsanHostConfigInfo\"] = reflect.TypeOf((*VsanHostConfigInfo)(nil)).Elem()\n}\n\ntype VsanHostConfigInfoClusterInfo struct {\n\tDynamicData\n\n\tUuid     string `xml:\"uuid,omitempty\"`\n\tNodeUuid string `xml:\"nodeUuid,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VsanHostConfigInfoClusterInfo\"] = reflect.TypeOf((*VsanHostConfigInfoClusterInfo)(nil)).Elem()\n}\n\ntype VsanHostConfigInfoNetworkInfo struct {\n\tDynamicData\n\n\tPort []VsanHostConfigInfoNetworkInfoPortConfig `xml:\"port,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VsanHostConfigInfoNetworkInfo\"] = reflect.TypeOf((*VsanHostConfigInfoNetworkInfo)(nil)).Elem()\n}\n\ntype VsanHostConfigInfoNetworkInfoPortConfig struct {\n\tDynamicData\n\n\tIpConfig *VsanHostIpConfig `xml:\"ipConfig,omitempty\"`\n\tDevice   string            `xml:\"device\"`\n}\n\nfunc init() {\n\tt[\"VsanHostConfigInfoNetworkInfoPortConfig\"] = reflect.TypeOf((*VsanHostConfigInfoNetworkInfoPortConfig)(nil)).Elem()\n}\n\ntype VsanHostConfigInfoStorageInfo struct {\n\tDynamicData\n\n\tAutoClaimStorage *bool                 `xml:\"autoClaimStorage\"`\n\tDiskMapping      []VsanHostDiskMapping `xml:\"diskMapping,omitempty\"`\n\tDiskMapInfo      []VsanHostDiskMapInfo `xml:\"diskMapInfo,omitempty\"`\n\tChecksumEnabled  *bool                 `xml:\"checksumEnabled\"`\n}\n\nfunc init() {\n\tt[\"VsanHostConfigInfoStorageInfo\"] = reflect.TypeOf((*VsanHostConfigInfoStorageInfo)(nil)).Elem()\n}\n\ntype VsanHostDecommissionMode struct {\n\tDynamicData\n\n\tObjectAction string `xml:\"objectAction\"`\n}\n\nfunc init() {\n\tt[\"VsanHostDecommissionMode\"] = reflect.TypeOf((*VsanHostDecommissionMode)(nil)).Elem()\n}\n\ntype VsanHostDiskMapInfo struct {\n\tDynamicData\n\n\tMapping VsanHostDiskMapping `xml:\"mapping\"`\n\tMounted bool                `xml:\"mounted\"`\n}\n\nfunc init() {\n\tt[\"VsanHostDiskMapInfo\"] = reflect.TypeOf((*VsanHostDiskMapInfo)(nil)).Elem()\n}\n\ntype VsanHostDiskMapResult struct {\n\tDynamicData\n\n\tMapping    VsanHostDiskMapping   `xml:\"mapping\"`\n\tDiskResult []VsanHostDiskResult  `xml:\"diskResult,omitempty\"`\n\tError      *LocalizedMethodFault `xml:\"error,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VsanHostDiskMapResult\"] = reflect.TypeOf((*VsanHostDiskMapResult)(nil)).Elem()\n}\n\ntype VsanHostDiskMapping struct {\n\tDynamicData\n\n\tSsd    HostScsiDisk   `xml:\"ssd\"`\n\tNonSsd []HostScsiDisk `xml:\"nonSsd\"`\n}\n\nfunc init() {\n\tt[\"VsanHostDiskMapping\"] = reflect.TypeOf((*VsanHostDiskMapping)(nil)).Elem()\n}\n\ntype VsanHostDiskResult struct {\n\tDynamicData\n\n\tDisk     HostScsiDisk          `xml:\"disk\"`\n\tState    string                `xml:\"state\"`\n\tVsanUuid string                `xml:\"vsanUuid,omitempty\"`\n\tError    *LocalizedMethodFault `xml:\"error,omitempty\"`\n\tDegraded *bool                 `xml:\"degraded\"`\n}\n\nfunc init() {\n\tt[\"VsanHostDiskResult\"] = reflect.TypeOf((*VsanHostDiskResult)(nil)).Elem()\n}\n\ntype VsanHostFaultDomainInfo struct {\n\tDynamicData\n\n\tName string `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"VsanHostFaultDomainInfo\"] = reflect.TypeOf((*VsanHostFaultDomainInfo)(nil)).Elem()\n}\n\ntype VsanHostIpConfig struct {\n\tDynamicData\n\n\tUpstreamIpAddress   string `xml:\"upstreamIpAddress\"`\n\tDownstreamIpAddress string `xml:\"downstreamIpAddress\"`\n}\n\nfunc init() {\n\tt[\"VsanHostIpConfig\"] = reflect.TypeOf((*VsanHostIpConfig)(nil)).Elem()\n}\n\ntype VsanHostMembershipInfo struct {\n\tDynamicData\n\n\tNodeUuid string `xml:\"nodeUuid\"`\n\tHostname string `xml:\"hostname\"`\n}\n\nfunc init() {\n\tt[\"VsanHostMembershipInfo\"] = reflect.TypeOf((*VsanHostMembershipInfo)(nil)).Elem()\n}\n\ntype VsanHostRuntimeInfo struct {\n\tDynamicData\n\n\tMembershipList []VsanHostMembershipInfo       `xml:\"membershipList,omitempty\"`\n\tDiskIssues     []VsanHostRuntimeInfoDiskIssue `xml:\"diskIssues,omitempty\"`\n\tAccessGenNo    int32                          `xml:\"accessGenNo,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VsanHostRuntimeInfo\"] = reflect.TypeOf((*VsanHostRuntimeInfo)(nil)).Elem()\n}\n\ntype VsanHostRuntimeInfoDiskIssue struct {\n\tDynamicData\n\n\tDiskId string `xml:\"diskId\"`\n\tIssue  string `xml:\"issue\"`\n}\n\nfunc init() {\n\tt[\"VsanHostRuntimeInfoDiskIssue\"] = reflect.TypeOf((*VsanHostRuntimeInfoDiskIssue)(nil)).Elem()\n}\n\ntype VsanHostVsanDiskInfo struct {\n\tDynamicData\n\n\tVsanUuid      string `xml:\"vsanUuid\"`\n\tFormatVersion int32  `xml:\"formatVersion\"`\n}\n\nfunc init() {\n\tt[\"VsanHostVsanDiskInfo\"] = reflect.TypeOf((*VsanHostVsanDiskInfo)(nil)).Elem()\n}\n\ntype VsanIncompatibleDiskMapping struct {\n\tVsanDiskFault\n}\n\nfunc init() {\n\tt[\"VsanIncompatibleDiskMapping\"] = reflect.TypeOf((*VsanIncompatibleDiskMapping)(nil)).Elem()\n}\n\ntype VsanIncompatibleDiskMappingFault VsanIncompatibleDiskMapping\n\nfunc init() {\n\tt[\"VsanIncompatibleDiskMappingFault\"] = reflect.TypeOf((*VsanIncompatibleDiskMappingFault)(nil)).Elem()\n}\n\ntype VsanNewPolicyBatch struct {\n\tDynamicData\n\n\tSize   []int64 `xml:\"size,omitempty\"`\n\tPolicy string  `xml:\"policy,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VsanNewPolicyBatch\"] = reflect.TypeOf((*VsanNewPolicyBatch)(nil)).Elem()\n}\n\ntype VsanPolicyChangeBatch struct {\n\tDynamicData\n\n\tUuid   []string `xml:\"uuid,omitempty\"`\n\tPolicy string   `xml:\"policy,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VsanPolicyChangeBatch\"] = reflect.TypeOf((*VsanPolicyChangeBatch)(nil)).Elem()\n}\n\ntype VsanPolicyCost struct {\n\tDynamicData\n\n\tChangeDataSize                      int64   `xml:\"changeDataSize,omitempty\"`\n\tCurrentDataSize                     int64   `xml:\"currentDataSize,omitempty\"`\n\tTempDataSize                        int64   `xml:\"tempDataSize,omitempty\"`\n\tCopyDataSize                        int64   `xml:\"copyDataSize,omitempty\"`\n\tChangeFlashReadCacheSize            int64   `xml:\"changeFlashReadCacheSize,omitempty\"`\n\tCurrentFlashReadCacheSize           int64   `xml:\"currentFlashReadCacheSize,omitempty\"`\n\tCurrentDiskSpaceToAddressSpaceRatio float32 `xml:\"currentDiskSpaceToAddressSpaceRatio,omitempty\"`\n\tDiskSpaceToAddressSpaceRatio        float32 `xml:\"diskSpaceToAddressSpaceRatio,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VsanPolicyCost\"] = reflect.TypeOf((*VsanPolicyCost)(nil)).Elem()\n}\n\ntype VsanPolicySatisfiability struct {\n\tDynamicData\n\n\tUuid          string              `xml:\"uuid,omitempty\"`\n\tIsSatisfiable bool                `xml:\"isSatisfiable\"`\n\tReason        *LocalizableMessage `xml:\"reason,omitempty\"`\n\tCost          *VsanPolicyCost     `xml:\"cost,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VsanPolicySatisfiability\"] = reflect.TypeOf((*VsanPolicySatisfiability)(nil)).Elem()\n}\n\ntype VsanUpgradeSystemAPIBrokenIssue struct {\n\tVsanUpgradeSystemPreflightCheckIssue\n\n\tHosts []ManagedObjectReference `xml:\"hosts\"`\n}\n\nfunc init() {\n\tt[\"VsanUpgradeSystemAPIBrokenIssue\"] = reflect.TypeOf((*VsanUpgradeSystemAPIBrokenIssue)(nil)).Elem()\n}\n\ntype VsanUpgradeSystemAutoClaimEnabledOnHostsIssue struct {\n\tVsanUpgradeSystemPreflightCheckIssue\n\n\tHosts []ManagedObjectReference `xml:\"hosts\"`\n}\n\nfunc init() {\n\tt[\"VsanUpgradeSystemAutoClaimEnabledOnHostsIssue\"] = reflect.TypeOf((*VsanUpgradeSystemAutoClaimEnabledOnHostsIssue)(nil)).Elem()\n}\n\ntype VsanUpgradeSystemHostsDisconnectedIssue struct {\n\tVsanUpgradeSystemPreflightCheckIssue\n\n\tHosts []ManagedObjectReference `xml:\"hosts\"`\n}\n\nfunc init() {\n\tt[\"VsanUpgradeSystemHostsDisconnectedIssue\"] = reflect.TypeOf((*VsanUpgradeSystemHostsDisconnectedIssue)(nil)).Elem()\n}\n\ntype VsanUpgradeSystemMissingHostsInClusterIssue struct {\n\tVsanUpgradeSystemPreflightCheckIssue\n\n\tHosts []ManagedObjectReference `xml:\"hosts\"`\n}\n\nfunc init() {\n\tt[\"VsanUpgradeSystemMissingHostsInClusterIssue\"] = reflect.TypeOf((*VsanUpgradeSystemMissingHostsInClusterIssue)(nil)).Elem()\n}\n\ntype VsanUpgradeSystemNetworkPartitionInfo struct {\n\tDynamicData\n\n\tHosts []ManagedObjectReference `xml:\"hosts\"`\n}\n\nfunc init() {\n\tt[\"VsanUpgradeSystemNetworkPartitionInfo\"] = reflect.TypeOf((*VsanUpgradeSystemNetworkPartitionInfo)(nil)).Elem()\n}\n\ntype VsanUpgradeSystemNetworkPartitionIssue struct {\n\tVsanUpgradeSystemPreflightCheckIssue\n\n\tPartitions []VsanUpgradeSystemNetworkPartitionInfo `xml:\"partitions\"`\n}\n\nfunc init() {\n\tt[\"VsanUpgradeSystemNetworkPartitionIssue\"] = reflect.TypeOf((*VsanUpgradeSystemNetworkPartitionIssue)(nil)).Elem()\n}\n\ntype VsanUpgradeSystemNotEnoughFreeCapacityIssue struct {\n\tVsanUpgradeSystemPreflightCheckIssue\n\n\tReducedRedundancyUpgradePossible bool `xml:\"reducedRedundancyUpgradePossible\"`\n}\n\nfunc init() {\n\tt[\"VsanUpgradeSystemNotEnoughFreeCapacityIssue\"] = reflect.TypeOf((*VsanUpgradeSystemNotEnoughFreeCapacityIssue)(nil)).Elem()\n}\n\ntype VsanUpgradeSystemPreflightCheckIssue struct {\n\tDynamicData\n\n\tMsg string `xml:\"msg\"`\n}\n\nfunc init() {\n\tt[\"VsanUpgradeSystemPreflightCheckIssue\"] = reflect.TypeOf((*VsanUpgradeSystemPreflightCheckIssue)(nil)).Elem()\n}\n\ntype VsanUpgradeSystemPreflightCheckResult struct {\n\tDynamicData\n\n\tIssues               []BaseVsanUpgradeSystemPreflightCheckIssue `xml:\"issues,omitempty,typeattr\"`\n\tDiskMappingToRestore *VsanHostDiskMapping                       `xml:\"diskMappingToRestore,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VsanUpgradeSystemPreflightCheckResult\"] = reflect.TypeOf((*VsanUpgradeSystemPreflightCheckResult)(nil)).Elem()\n}\n\ntype VsanUpgradeSystemRogueHostsInClusterIssue struct {\n\tVsanUpgradeSystemPreflightCheckIssue\n\n\tUuids []string `xml:\"uuids\"`\n}\n\nfunc init() {\n\tt[\"VsanUpgradeSystemRogueHostsInClusterIssue\"] = reflect.TypeOf((*VsanUpgradeSystemRogueHostsInClusterIssue)(nil)).Elem()\n}\n\ntype VsanUpgradeSystemUpgradeHistoryDiskGroupOp struct {\n\tVsanUpgradeSystemUpgradeHistoryItem\n\n\tOperation   string              `xml:\"operation\"`\n\tDiskMapping VsanHostDiskMapping `xml:\"diskMapping\"`\n}\n\nfunc init() {\n\tt[\"VsanUpgradeSystemUpgradeHistoryDiskGroupOp\"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeHistoryDiskGroupOp)(nil)).Elem()\n}\n\ntype VsanUpgradeSystemUpgradeHistoryItem struct {\n\tDynamicData\n\n\tTimestamp time.Time               `xml:\"timestamp\"`\n\tHost      *ManagedObjectReference `xml:\"host,omitempty\"`\n\tMessage   string                  `xml:\"message\"`\n\tTask      *ManagedObjectReference `xml:\"task,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VsanUpgradeSystemUpgradeHistoryItem\"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeHistoryItem)(nil)).Elem()\n}\n\ntype VsanUpgradeSystemUpgradeHistoryPreflightFail struct {\n\tVsanUpgradeSystemUpgradeHistoryItem\n\n\tPreflightResult VsanUpgradeSystemPreflightCheckResult `xml:\"preflightResult\"`\n}\n\nfunc init() {\n\tt[\"VsanUpgradeSystemUpgradeHistoryPreflightFail\"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeHistoryPreflightFail)(nil)).Elem()\n}\n\ntype VsanUpgradeSystemUpgradeStatus struct {\n\tDynamicData\n\n\tInProgress bool                                      `xml:\"inProgress\"`\n\tHistory    []BaseVsanUpgradeSystemUpgradeHistoryItem `xml:\"history,omitempty,typeattr\"`\n\tAborted    *bool                                     `xml:\"aborted\"`\n\tCompleted  *bool                                     `xml:\"completed\"`\n\tProgress   int32                                     `xml:\"progress,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VsanUpgradeSystemUpgradeStatus\"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeStatus)(nil)).Elem()\n}\n\ntype VsanUpgradeSystemV2ObjectsPresentDuringDowngradeIssue struct {\n\tVsanUpgradeSystemPreflightCheckIssue\n\n\tUuids []string `xml:\"uuids\"`\n}\n\nfunc init() {\n\tt[\"VsanUpgradeSystemV2ObjectsPresentDuringDowngradeIssue\"] = reflect.TypeOf((*VsanUpgradeSystemV2ObjectsPresentDuringDowngradeIssue)(nil)).Elem()\n}\n\ntype VsanUpgradeSystemWrongEsxVersionIssue struct {\n\tVsanUpgradeSystemPreflightCheckIssue\n\n\tHosts []ManagedObjectReference `xml:\"hosts\"`\n}\n\nfunc init() {\n\tt[\"VsanUpgradeSystemWrongEsxVersionIssue\"] = reflect.TypeOf((*VsanUpgradeSystemWrongEsxVersionIssue)(nil)).Elem()\n}\n\ntype VslmCloneSpec struct {\n\tVslmMigrateSpec\n\n\tName string `xml:\"name\"`\n}\n\nfunc init() {\n\tt[\"VslmCloneSpec\"] = reflect.TypeOf((*VslmCloneSpec)(nil)).Elem()\n}\n\ntype VslmCreateSpec struct {\n\tDynamicData\n\n\tName         string                        `xml:\"name\"`\n\tBackingSpec  BaseVslmCreateSpecBackingSpec `xml:\"backingSpec,typeattr\"`\n\tCapacityInMB int64                         `xml:\"capacityInMB\"`\n}\n\nfunc init() {\n\tt[\"VslmCreateSpec\"] = reflect.TypeOf((*VslmCreateSpec)(nil)).Elem()\n}\n\ntype VslmCreateSpecBackingSpec struct {\n\tDynamicData\n\n\tDatastore ManagedObjectReference `xml:\"datastore\"`\n}\n\nfunc init() {\n\tt[\"VslmCreateSpecBackingSpec\"] = reflect.TypeOf((*VslmCreateSpecBackingSpec)(nil)).Elem()\n}\n\ntype VslmCreateSpecDiskFileBackingSpec struct {\n\tVslmCreateSpecBackingSpec\n\n\tProvisioningType string `xml:\"provisioningType,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VslmCreateSpecDiskFileBackingSpec\"] = reflect.TypeOf((*VslmCreateSpecDiskFileBackingSpec)(nil)).Elem()\n}\n\ntype VslmCreateSpecRawDiskMappingBackingSpec struct {\n\tVslmCreateSpecBackingSpec\n\n\tLunUuid           string `xml:\"lunUuid\"`\n\tCompatibilityMode string `xml:\"compatibilityMode\"`\n}\n\nfunc init() {\n\tt[\"VslmCreateSpecRawDiskMappingBackingSpec\"] = reflect.TypeOf((*VslmCreateSpecRawDiskMappingBackingSpec)(nil)).Elem()\n}\n\ntype VslmMigrateSpec struct {\n\tDynamicData\n\n\tBackingSpec BaseVslmCreateSpecBackingSpec `xml:\"backingSpec,typeattr\"`\n\tConsolidate *bool                         `xml:\"consolidate\"`\n}\n\nfunc init() {\n\tt[\"VslmMigrateSpec\"] = reflect.TypeOf((*VslmMigrateSpec)(nil)).Elem()\n}\n\ntype VslmRelocateSpec struct {\n\tVslmMigrateSpec\n}\n\nfunc init() {\n\tt[\"VslmRelocateSpec\"] = reflect.TypeOf((*VslmRelocateSpec)(nil)).Elem()\n}\n\ntype VslmTagEntry struct {\n\tDynamicData\n\n\tTagName            string `xml:\"tagName\"`\n\tParentCategoryName string `xml:\"parentCategoryName\"`\n}\n\nfunc init() {\n\tt[\"VslmTagEntry\"] = reflect.TypeOf((*VslmTagEntry)(nil)).Elem()\n}\n\ntype VspanDestPortConflict struct {\n\tDvsFault\n\n\tVspanSessionKey1 string `xml:\"vspanSessionKey1\"`\n\tVspanSessionKey2 string `xml:\"vspanSessionKey2\"`\n\tPortKey          string `xml:\"portKey\"`\n}\n\nfunc init() {\n\tt[\"VspanDestPortConflict\"] = reflect.TypeOf((*VspanDestPortConflict)(nil)).Elem()\n}\n\ntype VspanDestPortConflictFault VspanDestPortConflict\n\nfunc init() {\n\tt[\"VspanDestPortConflictFault\"] = reflect.TypeOf((*VspanDestPortConflictFault)(nil)).Elem()\n}\n\ntype VspanPortConflict struct {\n\tDvsFault\n\n\tVspanSessionKey1 string `xml:\"vspanSessionKey1\"`\n\tVspanSessionKey2 string `xml:\"vspanSessionKey2\"`\n\tPortKey          string `xml:\"portKey\"`\n}\n\nfunc init() {\n\tt[\"VspanPortConflict\"] = reflect.TypeOf((*VspanPortConflict)(nil)).Elem()\n}\n\ntype VspanPortConflictFault VspanPortConflict\n\nfunc init() {\n\tt[\"VspanPortConflictFault\"] = reflect.TypeOf((*VspanPortConflictFault)(nil)).Elem()\n}\n\ntype VspanPortMoveFault struct {\n\tDvsFault\n\n\tSrcPortgroupName  string `xml:\"srcPortgroupName\"`\n\tDestPortgroupName string `xml:\"destPortgroupName\"`\n\tPortKey           string `xml:\"portKey\"`\n}\n\nfunc init() {\n\tt[\"VspanPortMoveFault\"] = reflect.TypeOf((*VspanPortMoveFault)(nil)).Elem()\n}\n\ntype VspanPortMoveFaultFault VspanPortMoveFault\n\nfunc init() {\n\tt[\"VspanPortMoveFaultFault\"] = reflect.TypeOf((*VspanPortMoveFaultFault)(nil)).Elem()\n}\n\ntype VspanPortPromiscChangeFault struct {\n\tDvsFault\n\n\tPortKey string `xml:\"portKey\"`\n}\n\nfunc init() {\n\tt[\"VspanPortPromiscChangeFault\"] = reflect.TypeOf((*VspanPortPromiscChangeFault)(nil)).Elem()\n}\n\ntype VspanPortPromiscChangeFaultFault VspanPortPromiscChangeFault\n\nfunc init() {\n\tt[\"VspanPortPromiscChangeFaultFault\"] = reflect.TypeOf((*VspanPortPromiscChangeFaultFault)(nil)).Elem()\n}\n\ntype VspanPortgroupPromiscChangeFault struct {\n\tDvsFault\n\n\tPortgroupName string `xml:\"portgroupName\"`\n}\n\nfunc init() {\n\tt[\"VspanPortgroupPromiscChangeFault\"] = reflect.TypeOf((*VspanPortgroupPromiscChangeFault)(nil)).Elem()\n}\n\ntype VspanPortgroupPromiscChangeFaultFault VspanPortgroupPromiscChangeFault\n\nfunc init() {\n\tt[\"VspanPortgroupPromiscChangeFaultFault\"] = reflect.TypeOf((*VspanPortgroupPromiscChangeFaultFault)(nil)).Elem()\n}\n\ntype VspanPortgroupTypeChangeFault struct {\n\tDvsFault\n\n\tPortgroupName string `xml:\"portgroupName\"`\n}\n\nfunc init() {\n\tt[\"VspanPortgroupTypeChangeFault\"] = reflect.TypeOf((*VspanPortgroupTypeChangeFault)(nil)).Elem()\n}\n\ntype VspanPortgroupTypeChangeFaultFault VspanPortgroupTypeChangeFault\n\nfunc init() {\n\tt[\"VspanPortgroupTypeChangeFaultFault\"] = reflect.TypeOf((*VspanPortgroupTypeChangeFaultFault)(nil)).Elem()\n}\n\ntype VspanPromiscuousPortNotSupported struct {\n\tDvsFault\n\n\tVspanSessionKey string `xml:\"vspanSessionKey\"`\n\tPortKey         string `xml:\"portKey\"`\n}\n\nfunc init() {\n\tt[\"VspanPromiscuousPortNotSupported\"] = reflect.TypeOf((*VspanPromiscuousPortNotSupported)(nil)).Elem()\n}\n\ntype VspanPromiscuousPortNotSupportedFault VspanPromiscuousPortNotSupported\n\nfunc init() {\n\tt[\"VspanPromiscuousPortNotSupportedFault\"] = reflect.TypeOf((*VspanPromiscuousPortNotSupportedFault)(nil)).Elem()\n}\n\ntype VspanSameSessionPortConflict struct {\n\tDvsFault\n\n\tVspanSessionKey string `xml:\"vspanSessionKey\"`\n\tPortKey         string `xml:\"portKey\"`\n}\n\nfunc init() {\n\tt[\"VspanSameSessionPortConflict\"] = reflect.TypeOf((*VspanSameSessionPortConflict)(nil)).Elem()\n}\n\ntype VspanSameSessionPortConflictFault VspanSameSessionPortConflict\n\nfunc init() {\n\tt[\"VspanSameSessionPortConflictFault\"] = reflect.TypeOf((*VspanSameSessionPortConflictFault)(nil)).Elem()\n}\n\ntype VvolDatastoreInfo struct {\n\tDatastoreInfo\n\n\tVvolDS *HostVvolVolume `xml:\"vvolDS,omitempty\"`\n}\n\nfunc init() {\n\tt[\"VvolDatastoreInfo\"] = reflect.TypeOf((*VvolDatastoreInfo)(nil)).Elem()\n}\n\ntype WaitForUpdates WaitForUpdatesRequestType\n\nfunc init() {\n\tt[\"WaitForUpdates\"] = reflect.TypeOf((*WaitForUpdates)(nil)).Elem()\n}\n\ntype WaitForUpdatesEx WaitForUpdatesExRequestType\n\nfunc init() {\n\tt[\"WaitForUpdatesEx\"] = reflect.TypeOf((*WaitForUpdatesEx)(nil)).Elem()\n}\n\ntype WaitForUpdatesExRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tVersion string                 `xml:\"version,omitempty\"`\n\tOptions *WaitOptions           `xml:\"options,omitempty\"`\n}\n\nfunc init() {\n\tt[\"WaitForUpdatesExRequestType\"] = reflect.TypeOf((*WaitForUpdatesExRequestType)(nil)).Elem()\n}\n\ntype WaitForUpdatesExResponse struct {\n\tReturnval *UpdateSet `xml:\"returnval,omitempty\"`\n}\n\ntype WaitForUpdatesRequestType struct {\n\tThis    ManagedObjectReference `xml:\"_this\"`\n\tVersion string                 `xml:\"version,omitempty\"`\n}\n\nfunc init() {\n\tt[\"WaitForUpdatesRequestType\"] = reflect.TypeOf((*WaitForUpdatesRequestType)(nil)).Elem()\n}\n\ntype WaitForUpdatesResponse struct {\n\tReturnval UpdateSet `xml:\"returnval\"`\n}\n\ntype WaitOptions struct {\n\tDynamicData\n\n\tMaxWaitSeconds   *int32 `xml:\"maxWaitSeconds\"`\n\tMaxObjectUpdates int32  `xml:\"maxObjectUpdates,omitempty\"`\n}\n\nfunc init() {\n\tt[\"WaitOptions\"] = reflect.TypeOf((*WaitOptions)(nil)).Elem()\n}\n\ntype WakeOnLanNotSupported struct {\n\tVirtualHardwareCompatibilityIssue\n}\n\nfunc init() {\n\tt[\"WakeOnLanNotSupported\"] = reflect.TypeOf((*WakeOnLanNotSupported)(nil)).Elem()\n}\n\ntype WakeOnLanNotSupportedByVmotionNIC struct {\n\tHostPowerOpFailed\n}\n\nfunc init() {\n\tt[\"WakeOnLanNotSupportedByVmotionNIC\"] = reflect.TypeOf((*WakeOnLanNotSupportedByVmotionNIC)(nil)).Elem()\n}\n\ntype WakeOnLanNotSupportedByVmotionNICFault WakeOnLanNotSupportedByVmotionNIC\n\nfunc init() {\n\tt[\"WakeOnLanNotSupportedByVmotionNICFault\"] = reflect.TypeOf((*WakeOnLanNotSupportedByVmotionNICFault)(nil)).Elem()\n}\n\ntype WakeOnLanNotSupportedFault WakeOnLanNotSupported\n\nfunc init() {\n\tt[\"WakeOnLanNotSupportedFault\"] = reflect.TypeOf((*WakeOnLanNotSupportedFault)(nil)).Elem()\n}\n\ntype WarningUpgradeEvent struct {\n\tUpgradeEvent\n}\n\nfunc init() {\n\tt[\"WarningUpgradeEvent\"] = reflect.TypeOf((*WarningUpgradeEvent)(nil)).Elem()\n}\n\ntype WeeklyTaskScheduler struct {\n\tDailyTaskScheduler\n\n\tSunday    bool `xml:\"sunday\"`\n\tMonday    bool `xml:\"monday\"`\n\tTuesday   bool `xml:\"tuesday\"`\n\tWednesday bool `xml:\"wednesday\"`\n\tThursday  bool `xml:\"thursday\"`\n\tFriday    bool `xml:\"friday\"`\n\tSaturday  bool `xml:\"saturday\"`\n}\n\nfunc init() {\n\tt[\"WeeklyTaskScheduler\"] = reflect.TypeOf((*WeeklyTaskScheduler)(nil)).Elem()\n}\n\ntype WillLoseHAProtection struct {\n\tMigrationFault\n\n\tResolution string `xml:\"resolution\"`\n}\n\nfunc init() {\n\tt[\"WillLoseHAProtection\"] = reflect.TypeOf((*WillLoseHAProtection)(nil)).Elem()\n}\n\ntype WillLoseHAProtectionFault WillLoseHAProtection\n\nfunc init() {\n\tt[\"WillLoseHAProtectionFault\"] = reflect.TypeOf((*WillLoseHAProtectionFault)(nil)).Elem()\n}\n\ntype WillModifyConfigCpuRequirements struct {\n\tMigrationFault\n}\n\nfunc init() {\n\tt[\"WillModifyConfigCpuRequirements\"] = reflect.TypeOf((*WillModifyConfigCpuRequirements)(nil)).Elem()\n}\n\ntype WillModifyConfigCpuRequirementsFault WillModifyConfigCpuRequirements\n\nfunc init() {\n\tt[\"WillModifyConfigCpuRequirementsFault\"] = reflect.TypeOf((*WillModifyConfigCpuRequirementsFault)(nil)).Elem()\n}\n\ntype WillResetSnapshotDirectory struct {\n\tMigrationFault\n}\n\nfunc init() {\n\tt[\"WillResetSnapshotDirectory\"] = reflect.TypeOf((*WillResetSnapshotDirectory)(nil)).Elem()\n}\n\ntype WillResetSnapshotDirectoryFault WillResetSnapshotDirectory\n\nfunc init() {\n\tt[\"WillResetSnapshotDirectoryFault\"] = reflect.TypeOf((*WillResetSnapshotDirectoryFault)(nil)).Elem()\n}\n\ntype WinNetBIOSConfigInfo struct {\n\tNetBIOSConfigInfo\n\n\tPrimaryWINS   string `xml:\"primaryWINS\"`\n\tSecondaryWINS string `xml:\"secondaryWINS,omitempty\"`\n}\n\nfunc init() {\n\tt[\"WinNetBIOSConfigInfo\"] = reflect.TypeOf((*WinNetBIOSConfigInfo)(nil)).Elem()\n}\n\ntype WipeDiskFault struct {\n\tVimFault\n}\n\nfunc init() {\n\tt[\"WipeDiskFault\"] = reflect.TypeOf((*WipeDiskFault)(nil)).Elem()\n}\n\ntype WipeDiskFaultFault WipeDiskFault\n\nfunc init() {\n\tt[\"WipeDiskFaultFault\"] = reflect.TypeOf((*WipeDiskFaultFault)(nil)).Elem()\n}\n\ntype WitnessNodeInfo struct {\n\tDynamicData\n\n\tIpSettings CustomizationIPSettings `xml:\"ipSettings\"`\n\tBiosUuid   string                  `xml:\"biosUuid,omitempty\"`\n}\n\nfunc init() {\n\tt[\"WitnessNodeInfo\"] = reflect.TypeOf((*WitnessNodeInfo)(nil)).Elem()\n}\n\ntype XmlToCustomizationSpecItem XmlToCustomizationSpecItemRequestType\n\nfunc init() {\n\tt[\"XmlToCustomizationSpecItem\"] = reflect.TypeOf((*XmlToCustomizationSpecItem)(nil)).Elem()\n}\n\ntype XmlToCustomizationSpecItemRequestType struct {\n\tThis        ManagedObjectReference `xml:\"_this\"`\n\tSpecItemXml string                 `xml:\"specItemXml\"`\n}\n\nfunc init() {\n\tt[\"XmlToCustomizationSpecItemRequestType\"] = reflect.TypeOf((*XmlToCustomizationSpecItemRequestType)(nil)).Elem()\n}\n\ntype XmlToCustomizationSpecItemResponse struct {\n\tReturnval CustomizationSpecItem `xml:\"returnval\"`\n}\n\ntype ZeroFillVirtualDiskRequestType struct {\n\tThis       ManagedObjectReference  `xml:\"_this\"`\n\tName       string                  `xml:\"name\"`\n\tDatacenter *ManagedObjectReference `xml:\"datacenter,omitempty\"`\n}\n\nfunc init() {\n\tt[\"ZeroFillVirtualDiskRequestType\"] = reflect.TypeOf((*ZeroFillVirtualDiskRequestType)(nil)).Elem()\n}\n\ntype ZeroFillVirtualDisk_Task ZeroFillVirtualDiskRequestType\n\nfunc init() {\n\tt[\"ZeroFillVirtualDisk_Task\"] = reflect.TypeOf((*ZeroFillVirtualDisk_Task)(nil)).Elem()\n}\n\ntype ZeroFillVirtualDisk_TaskResponse struct {\n\tReturnval ManagedObjectReference `xml:\"returnval\"`\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/types/types_test.go",
    "content": "/*\nCopyright (c) 2014-2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage types\n\nimport (\n\t\"testing\"\n\n\t\"github.com/vmware/govmomi/vim25/xml\"\n)\n\nfunc TestVirtualMachineConfigSpec(t *testing.T) {\n\tspec := VirtualMachineConfigSpec{\n\t\tName:     \"vm-001\",\n\t\tGuestId:  \"otherGuest\",\n\t\tFiles:    &VirtualMachineFileInfo{VmPathName: \"[datastore1]\"},\n\t\tNumCPUs:  1,\n\t\tMemoryMB: 128,\n\t\tDeviceChange: []BaseVirtualDeviceConfigSpec{\n\t\t\t&VirtualDeviceConfigSpec{\n\t\t\t\tOperation: VirtualDeviceConfigSpecOperationAdd,\n\t\t\t\tDevice: &VirtualLsiLogicController{VirtualSCSIController{\n\t\t\t\t\tSharedBus: VirtualSCSISharingNoSharing,\n\t\t\t\t\tVirtualController: VirtualController{\n\t\t\t\t\t\tBusNumber: 0,\n\t\t\t\t\t\tVirtualDevice: VirtualDevice{\n\t\t\t\t\t\t\tKey: 1000,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t},\n\t\t\t&VirtualDeviceConfigSpec{\n\t\t\t\tOperation:     VirtualDeviceConfigSpecOperationAdd,\n\t\t\t\tFileOperation: VirtualDeviceConfigSpecFileOperationCreate,\n\t\t\t\tDevice: &VirtualDisk{\n\t\t\t\t\tVirtualDevice: VirtualDevice{\n\t\t\t\t\t\tKey:           0,\n\t\t\t\t\t\tControllerKey: 1000,\n\t\t\t\t\t\tUnitNumber:    new(int32), // zero default value\n\t\t\t\t\t\tBacking: &VirtualDiskFlatVer2BackingInfo{\n\t\t\t\t\t\t\tDiskMode:        string(VirtualDiskModePersistent),\n\t\t\t\t\t\t\tThinProvisioned: NewBool(true),\n\t\t\t\t\t\t\tVirtualDeviceFileBackingInfo: VirtualDeviceFileBackingInfo{\n\t\t\t\t\t\t\t\tFileName: \"[datastore1]\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tCapacityInKB: 4000000,\n\t\t\t\t},\n\t\t\t},\n\t\t\t&VirtualDeviceConfigSpec{\n\t\t\t\tOperation: VirtualDeviceConfigSpecOperationAdd,\n\t\t\t\tDevice: &VirtualE1000{VirtualEthernetCard{\n\t\t\t\t\tVirtualDevice: VirtualDevice{\n\t\t\t\t\t\tKey: 0,\n\t\t\t\t\t\tDeviceInfo: &Description{\n\t\t\t\t\t\t\tLabel:   \"Network Adapter 1\",\n\t\t\t\t\t\t\tSummary: \"VM Network\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tBacking: &VirtualEthernetCardNetworkBackingInfo{\n\t\t\t\t\t\t\tVirtualDeviceDeviceBackingInfo: VirtualDeviceDeviceBackingInfo{\n\t\t\t\t\t\t\t\tDeviceName: \"VM Network\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tAddressType: string(VirtualEthernetCardMacTypeGenerated),\n\t\t\t\t}},\n\t\t\t},\n\t\t},\n\t\tExtraConfig: []BaseOptionValue{\n\t\t\t&OptionValue{Key: \"bios.bootOrder\", Value: \"ethernet0\"},\n\t\t},\n\t}\n\n\t_, err := xml.MarshalIndent(spec, \"\", \" \")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/xml/LICENSE",
    "content": "Copyright (c) 2012 The Go Authors. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n   * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n   * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n   * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/xml/atom_test.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage xml\n\nimport \"time\"\n\nvar atomValue = &Feed{\n\tXMLName: Name{\"http://www.w3.org/2005/Atom\", \"feed\"},\n\tTitle:   \"Example Feed\",\n\tLink:    []Link{{Href: \"http://example.org/\"}},\n\tUpdated: ParseTime(\"2003-12-13T18:30:02Z\"),\n\tAuthor:  Person{Name: \"John Doe\"},\n\tId:      \"urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6\",\n\n\tEntry: []Entry{\n\t\t{\n\t\t\tTitle:   \"Atom-Powered Robots Run Amok\",\n\t\t\tLink:    []Link{{Href: \"http://example.org/2003/12/13/atom03\"}},\n\t\t\tId:      \"urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a\",\n\t\t\tUpdated: ParseTime(\"2003-12-13T18:30:02Z\"),\n\t\t\tSummary: NewText(\"Some text.\"),\n\t\t},\n\t},\n}\n\nvar atomXml = `` +\n\t`<feed xmlns=\"http://www.w3.org/2005/Atom\" updated=\"2003-12-13T18:30:02Z\">` +\n\t`<title>Example Feed</title>` +\n\t`<id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id>` +\n\t`<link href=\"http://example.org/\"></link>` +\n\t`<author><name>John Doe</name><uri></uri><email></email></author>` +\n\t`<entry>` +\n\t`<title>Atom-Powered Robots Run Amok</title>` +\n\t`<id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id>` +\n\t`<link href=\"http://example.org/2003/12/13/atom03\"></link>` +\n\t`<updated>2003-12-13T18:30:02Z</updated>` +\n\t`<author><name></name><uri></uri><email></email></author>` +\n\t`<summary>Some text.</summary>` +\n\t`</entry>` +\n\t`</feed>`\n\nfunc ParseTime(str string) time.Time {\n\tt, err := time.Parse(time.RFC3339, str)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn t\n}\n\nfunc NewText(text string) Text {\n\treturn Text{\n\t\tBody: text,\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/xml/example_test.go",
    "content": "// Copyright 2012 The Go Authors.  All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage xml_test\n\nimport (\n\t\"encoding/xml\"\n\t\"fmt\"\n\t\"os\"\n)\n\nfunc ExampleMarshalIndent() {\n\ttype Address struct {\n\t\tCity, State string\n\t}\n\ttype Person struct {\n\t\tXMLName   xml.Name `xml:\"person\"`\n\t\tId        int      `xml:\"id,attr\"`\n\t\tFirstName string   `xml:\"name>first\"`\n\t\tLastName  string   `xml:\"name>last\"`\n\t\tAge       int      `xml:\"age\"`\n\t\tHeight    float32  `xml:\"height,omitempty\"`\n\t\tMarried   bool\n\t\tAddress\n\t\tComment string `xml:\",comment\"`\n\t}\n\n\tv := &Person{Id: 13, FirstName: \"John\", LastName: \"Doe\", Age: 42}\n\tv.Comment = \" Need more details. \"\n\tv.Address = Address{\"Hanga Roa\", \"Easter Island\"}\n\n\toutput, err := xml.MarshalIndent(v, \"  \", \"    \")\n\tif err != nil {\n\t\tfmt.Printf(\"error: %v\\n\", err)\n\t}\n\n\tos.Stdout.Write(output)\n\t// Output:\n\t//   <person id=\"13\">\n\t//       <name>\n\t//           <first>John</first>\n\t//           <last>Doe</last>\n\t//       </name>\n\t//       <age>42</age>\n\t//       <Married>false</Married>\n\t//       <City>Hanga Roa</City>\n\t//       <State>Easter Island</State>\n\t//       <!-- Need more details. -->\n\t//   </person>\n}\n\nfunc ExampleEncoder() {\n\ttype Address struct {\n\t\tCity, State string\n\t}\n\ttype Person struct {\n\t\tXMLName   xml.Name `xml:\"person\"`\n\t\tId        int      `xml:\"id,attr\"`\n\t\tFirstName string   `xml:\"name>first\"`\n\t\tLastName  string   `xml:\"name>last\"`\n\t\tAge       int      `xml:\"age\"`\n\t\tHeight    float32  `xml:\"height,omitempty\"`\n\t\tMarried   bool\n\t\tAddress\n\t\tComment string `xml:\",comment\"`\n\t}\n\n\tv := &Person{Id: 13, FirstName: \"John\", LastName: \"Doe\", Age: 42}\n\tv.Comment = \" Need more details. \"\n\tv.Address = Address{\"Hanga Roa\", \"Easter Island\"}\n\n\tenc := xml.NewEncoder(os.Stdout)\n\tenc.Indent(\"  \", \"    \")\n\tif err := enc.Encode(v); err != nil {\n\t\tfmt.Printf(\"error: %v\\n\", err)\n\t}\n\n\t// Output:\n\t//   <person id=\"13\">\n\t//       <name>\n\t//           <first>John</first>\n\t//           <last>Doe</last>\n\t//       </name>\n\t//       <age>42</age>\n\t//       <Married>false</Married>\n\t//       <City>Hanga Roa</City>\n\t//       <State>Easter Island</State>\n\t//       <!-- Need more details. -->\n\t//   </person>\n}\n\n// This example demonstrates unmarshaling an XML excerpt into a value with\n// some preset fields. Note that the Phone field isn't modified and that\n// the XML <Company> element is ignored. Also, the Groups field is assigned\n// considering the element path provided in its tag.\nfunc ExampleUnmarshal() {\n\ttype Email struct {\n\t\tWhere string `xml:\"where,attr\"`\n\t\tAddr  string\n\t}\n\ttype Address struct {\n\t\tCity, State string\n\t}\n\ttype Result struct {\n\t\tXMLName xml.Name `xml:\"Person\"`\n\t\tName    string   `xml:\"FullName\"`\n\t\tPhone   string\n\t\tEmail   []Email\n\t\tGroups  []string `xml:\"Group>Value\"`\n\t\tAddress\n\t}\n\tv := Result{Name: \"none\", Phone: \"none\"}\n\n\tdata := `\n\t\t<Person>\n\t\t\t<FullName>Grace R. Emlin</FullName>\n\t\t\t<Company>Example Inc.</Company>\n\t\t\t<Email where=\"home\">\n\t\t\t\t<Addr>gre@example.com</Addr>\n\t\t\t</Email>\n\t\t\t<Email where='work'>\n\t\t\t\t<Addr>gre@work.com</Addr>\n\t\t\t</Email>\n\t\t\t<Group>\n\t\t\t\t<Value>Friends</Value>\n\t\t\t\t<Value>Squash</Value>\n\t\t\t</Group>\n\t\t\t<City>Hanga Roa</City>\n\t\t\t<State>Easter Island</State>\n\t\t</Person>\n\t`\n\terr := xml.Unmarshal([]byte(data), &v)\n\tif err != nil {\n\t\tfmt.Printf(\"error: %v\", err)\n\t\treturn\n\t}\n\tfmt.Printf(\"XMLName: %#v\\n\", v.XMLName)\n\tfmt.Printf(\"Name: %q\\n\", v.Name)\n\tfmt.Printf(\"Phone: %q\\n\", v.Phone)\n\tfmt.Printf(\"Email: %v\\n\", v.Email)\n\tfmt.Printf(\"Groups: %v\\n\", v.Groups)\n\tfmt.Printf(\"Address: %v\\n\", v.Address)\n\t// Output:\n\t// XMLName: xml.Name{Space:\"\", Local:\"Person\"}\n\t// Name: \"Grace R. Emlin\"\n\t// Phone: \"none\"\n\t// Email: [{home gre@example.com} {work gre@work.com}]\n\t// Groups: [Friends Squash]\n\t// Address: {Hanga Roa Easter Island}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/xml/extras.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage xml\n\nimport (\n\t\"reflect\"\n\t\"time\"\n)\n\nvar xmlSchemaInstance = Name{Space: \"http://www.w3.org/2001/XMLSchema-instance\", Local: \"type\"}\n\nvar xsiType = Name{Space: \"xsi\", Local: \"type\"}\n\nvar stringToTypeMap = map[string]reflect.Type{\n\t\"xsd:boolean\":       reflect.TypeOf((*bool)(nil)).Elem(),\n\t\"xsd:byte\":          reflect.TypeOf((*int8)(nil)).Elem(),\n\t\"xsd:short\":         reflect.TypeOf((*int16)(nil)).Elem(),\n\t\"xsd:int\":           reflect.TypeOf((*int32)(nil)).Elem(),\n\t\"xsd:long\":          reflect.TypeOf((*int64)(nil)).Elem(),\n\t\"xsd:unsignedByte\":  reflect.TypeOf((*uint8)(nil)).Elem(),\n\t\"xsd:unsignedShort\": reflect.TypeOf((*uint16)(nil)).Elem(),\n\t\"xsd:unsignedInt\":   reflect.TypeOf((*uint32)(nil)).Elem(),\n\t\"xsd:unsignedLong\":  reflect.TypeOf((*uint64)(nil)).Elem(),\n\t\"xsd:float\":         reflect.TypeOf((*float32)(nil)).Elem(),\n\t\"xsd:double\":        reflect.TypeOf((*float64)(nil)).Elem(),\n\t\"xsd:string\":        reflect.TypeOf((*string)(nil)).Elem(),\n\t\"xsd:dateTime\":      reflect.TypeOf((*time.Time)(nil)).Elem(),\n\t\"xsd:base64Binary\":  reflect.TypeOf((*[]byte)(nil)).Elem(),\n}\n\n// Return a reflect.Type for the specified type. Nil if unknown.\nfunc stringToType(s string) reflect.Type {\n\treturn stringToTypeMap[s]\n}\n\n// Return a string for the specified reflect.Type. Panic if unknown.\nfunc typeToString(typ reflect.Type) string {\n\tswitch typ.Kind() {\n\tcase reflect.Bool:\n\t\treturn \"xsd:boolean\"\n\tcase reflect.Int8:\n\t\treturn \"xsd:byte\"\n\tcase reflect.Int16:\n\t\treturn \"xsd:short\"\n\tcase reflect.Int32:\n\t\treturn \"xsd:int\"\n\tcase reflect.Int, reflect.Int64:\n\t\treturn \"xsd:long\"\n\tcase reflect.Uint8:\n\t\treturn \"xsd:unsignedByte\"\n\tcase reflect.Uint16:\n\t\treturn \"xsd:unsignedShort\"\n\tcase reflect.Uint32:\n\t\treturn \"xsd:unsignedInt\"\n\tcase reflect.Uint, reflect.Uint64:\n\t\treturn \"xsd:unsignedLong\"\n\tcase reflect.Float32:\n\t\treturn \"xsd:float\"\n\tcase reflect.Float64:\n\t\treturn \"xsd:double\"\n\tcase reflect.String:\n\t\tname := typ.Name()\n\t\tif name == \"string\" {\n\t\t\treturn \"xsd:string\"\n\t\t}\n\t\treturn name\n\tcase reflect.Struct:\n\t\tif typ == stringToTypeMap[\"xsd:dateTime\"] {\n\t\t\treturn \"xsd:dateTime\"\n\t\t}\n\n\t\t// Expect any other struct to be handled...\n\t\treturn typ.Name()\n\tcase reflect.Slice:\n\t\tif typ.Elem().Kind() == reflect.Uint8 {\n\t\t\treturn \"xsd:base64Binary\"\n\t\t}\n\tcase reflect.Array:\n\t\tif typ.Elem().Kind() == reflect.Uint8 {\n\t\t\treturn \"xsd:base64Binary\"\n\t\t}\n\t}\n\n\tpanic(\"don't know what to do for type: \" + typ.String())\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/xml/extras_test.go",
    "content": "/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage xml\n\nimport (\n\t\"bytes\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n)\n\ntype MyType struct {\n\tValue string\n}\n\nvar myTypes = map[string]reflect.Type{\n\t\"MyType\":      reflect.TypeOf(MyType{}),\n\t\"ValueType\":   reflect.TypeOf(ValueType{}),\n\t\"PointerType\": reflect.TypeOf(PointerType{}),\n}\n\nfunc MyTypes(name string) (reflect.Type, bool) {\n\tt, ok := myTypes[name]\n\treturn t, ok\n}\n\nfunc TestMarshalWithEmptyInterface(t *testing.T) {\n\tvar r1, r2 struct {\n\t\tXMLName Name          `xml:\"root\"`\n\t\tValues  []interface{} `xml:\"value,typeattr\"`\n\t}\n\n\tvar tests = []struct {\n\t\tValue interface{}\n\t}{\n\t\t{Value: bool(true)},\n\t\t{Value: int8(-8)},\n\t\t{Value: int16(-16)},\n\t\t{Value: int32(-32)},\n\t\t{Value: int64(-64)},\n\t\t{Value: uint8(8)},\n\t\t{Value: uint16(16)},\n\t\t{Value: uint32(32)},\n\t\t{Value: uint64(64)},\n\t\t{Value: float32(32.0)},\n\t\t{Value: float64(64.0)},\n\t\t{Value: string(\"string\")},\n\t\t{Value: time.Now()},\n\t\t{Value: ParseTime(\"2009-10-04T01:35:58+00:00\")},\n\t\t{Value: []byte(\"bytes\")},\n\t\t{Value: MyType{Value: \"v\"}},\n\t}\n\n\tfor _, test := range tests {\n\t\tr1.XMLName.Local = \"root\"\n\t\tr1.Values = []interface{}{test.Value}\n\t\tr2.XMLName = Name{}\n\t\tr2.Values = nil\n\n\t\tb, err := Marshal(r1)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Marshal: %s\", err)\n\t\t}\n\n\t\tdec := NewDecoder(bytes.NewReader(b))\n\t\tdec.TypeFunc = MyTypes\n\t\terr = dec.Decode(&r2)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Unmarshal: %s\", err)\n\t\t}\n\n\t\tswitch r1.Values[0].(type) {\n\t\tcase time.Time:\n\t\t\tif !r1.Values[0].(time.Time).Equal(r2.Values[0].(time.Time)) {\n\t\t\t\tt.Errorf(\"Expected: %#v, actual: %#v\", r1, r2)\n\t\t\t}\n\t\tdefault:\n\t\t\tif !reflect.DeepEqual(r1, r2) {\n\t\t\t\tt.Errorf(\"Expected: %#v, actual: %#v\", r1, r2)\n\t\t\t}\n\t\t}\n\t}\n}\n\ntype VIntf interface {\n\tV() string\n}\n\ntype ValueType struct {\n\tValue string `xml:\",chardata\"`\n}\n\ntype PointerType struct {\n\tValue string `xml:\",chardata\"`\n}\n\nfunc (t ValueType) V() string {\n\treturn t.Value\n}\n\nfunc (t *PointerType) V() string {\n\treturn t.Value\n}\n\nfunc TestMarshalWithInterface(t *testing.T) {\n\tvar r1, r2 struct {\n\t\tXMLName Name    `xml:\"root\"`\n\t\tValues  []VIntf `xml:\"value,typeattr\"`\n\t}\n\n\tr1.XMLName.Local = \"root\"\n\tr1.Values = []VIntf{\n\t\tValueType{\"v1\"},\n\t\t&PointerType{\"v2\"},\n\t}\n\n\tb, err := Marshal(r1)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal: %s\", err)\n\t}\n\n\tdec := NewDecoder(bytes.NewReader(b))\n\tdec.TypeFunc = MyTypes\n\terr = dec.Decode(&r2)\n\tif err != nil {\n\t\tt.Fatalf(\"Unmarshal: %s\", err)\n\t}\n\n\tif !reflect.DeepEqual(r1, r2) {\n\t\tt.Errorf(\"expected: %#v, actual: %#v\", r1, r2)\n\t}\n}\n\ntype test3iface interface {\n\tValue() string\n}\n\ntype test3a struct {\n\tV string `xml:\",chardata\"`\n}\n\nfunc (t test3a) Value() string { return t.V }\n\ntype test3b struct {\n\tV string `xml:\",chardata\"`\n}\n\nfunc (t test3b) Value() string { return t.V }\n\nfunc TestUnmarshalInterfaceWithoutTypeAttr(t *testing.T) {\n\tvar r struct {\n\t\tXMLName Name         `xml:\"root\"`\n\t\tValues  []test3iface `xml:\"value,typeattr\"`\n\t}\n\n\tb := `\n\t<root xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n\t<value xsi:type=\"test3a\">A</value>\n\t<value>B</value>\n\t</root>\n\t`\n\n\tfn := func(name string) (reflect.Type, bool) {\n\t\tswitch name {\n\t\tcase \"test3a\":\n\t\t\treturn reflect.TypeOf(test3a{}), true\n\t\tcase \"test3iface\":\n\t\t\treturn reflect.TypeOf(test3b{}), true\n\t\tdefault:\n\t\t\treturn nil, false\n\t\t}\n\t}\n\n\tdec := NewDecoder(bytes.NewReader([]byte(b)))\n\tdec.TypeFunc = fn\n\terr := dec.Decode(&r)\n\tif err != nil {\n\t\tt.Fatalf(\"Unmarshal: %s\", err)\n\t}\n\n\tif len(r.Values) != 2 {\n\t\tt.Errorf(\"Expected 2 values\")\n\t}\n\n\texps := []struct {\n\t\tTyp reflect.Type\n\t\tVal string\n\t}{\n\t\t{\n\t\t\tTyp: reflect.TypeOf(test3a{}),\n\t\t\tVal: \"A\",\n\t\t},\n\t\t{\n\t\t\tTyp: reflect.TypeOf(test3b{}),\n\t\t\tVal: \"B\",\n\t\t},\n\t}\n\n\tfor i, e := range exps {\n\t\tif val := r.Values[i].Value(); val != e.Val {\n\t\t\tt.Errorf(\"Expected: %s, got: %s\", e.Val, val)\n\t\t}\n\n\t\tif typ := reflect.TypeOf(r.Values[i]); typ.Name() != e.Typ.Name() {\n\t\t\tt.Errorf(\"Expected: %s, got: %s\", e.Typ.Name(), typ.Name())\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/xml/marshal.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage xml\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\t// A generic XML header suitable for use with the output of Marshal.\n\t// This is not automatically added to any output of this package,\n\t// it is provided as a convenience.\n\tHeader = `<?xml version=\"1.0\" encoding=\"UTF-8\"?>` + \"\\n\"\n)\n\n// Marshal returns the XML encoding of v.\n//\n// Marshal handles an array or slice by marshalling each of the elements.\n// Marshal handles a pointer by marshalling the value it points at or, if the\n// pointer is nil, by writing nothing.  Marshal handles an interface value by\n// marshalling the value it contains or, if the interface value is nil, by\n// writing nothing.  Marshal handles all other data by writing one or more XML\n// elements containing the data.\n//\n// The name for the XML elements is taken from, in order of preference:\n//     - the tag on the XMLName field, if the data is a struct\n//     - the value of the XMLName field of type xml.Name\n//     - the tag of the struct field used to obtain the data\n//     - the name of the struct field used to obtain the data\n//     - the name of the marshalled type\n//\n// The XML element for a struct contains marshalled elements for each of the\n// exported fields of the struct, with these exceptions:\n//     - the XMLName field, described above, is omitted.\n//     - a field with tag \"-\" is omitted.\n//     - a field with tag \"name,attr\" becomes an attribute with\n//       the given name in the XML element.\n//     - a field with tag \",attr\" becomes an attribute with the\n//       field name in the XML element.\n//     - a field with tag \",chardata\" is written as character data,\n//       not as an XML element.\n//     - a field with tag \",innerxml\" is written verbatim, not subject\n//       to the usual marshalling procedure.\n//     - a field with tag \",comment\" is written as an XML comment, not\n//       subject to the usual marshalling procedure. It must not contain\n//       the \"--\" string within it.\n//     - a field with a tag including the \"omitempty\" option is omitted\n//       if the field value is empty. The empty values are false, 0, any\n//       nil pointer or interface value, and any array, slice, map, or\n//       string of length zero.\n//     - an anonymous struct field is handled as if the fields of its\n//       value were part of the outer struct.\n//\n// If a field uses a tag \"a>b>c\", then the element c will be nested inside\n// parent elements a and b.  Fields that appear next to each other that name\n// the same parent will be enclosed in one XML element.\n//\n// See MarshalIndent for an example.\n//\n// Marshal will return an error if asked to marshal a channel, function, or map.\nfunc Marshal(v interface{}) ([]byte, error) {\n\tvar b bytes.Buffer\n\tif err := NewEncoder(&b).Encode(v); err != nil {\n\t\treturn nil, err\n\t}\n\treturn b.Bytes(), nil\n}\n\n// Marshaler is the interface implemented by objects that can marshal\n// themselves into valid XML elements.\n//\n// MarshalXML encodes the receiver as zero or more XML elements.\n// By convention, arrays or slices are typically encoded as a sequence\n// of elements, one per entry.\n// Using start as the element tag is not required, but doing so\n// will enable Unmarshal to match the XML elements to the correct\n// struct field.\n// One common implementation strategy is to construct a separate\n// value with a layout corresponding to the desired XML and then\n// to encode it using e.EncodeElement.\n// Another common strategy is to use repeated calls to e.EncodeToken\n// to generate the XML output one token at a time.\n// The sequence of encoded tokens must make up zero or more valid\n// XML elements.\ntype Marshaler interface {\n\tMarshalXML(e *Encoder, start StartElement) error\n}\n\n// MarshalerAttr is the interface implemented by objects that can marshal\n// themselves into valid XML attributes.\n//\n// MarshalXMLAttr returns an XML attribute with the encoded value of the receiver.\n// Using name as the attribute name is not required, but doing so\n// will enable Unmarshal to match the attribute to the correct\n// struct field.\n// If MarshalXMLAttr returns the zero attribute Attr{}, no attribute\n// will be generated in the output.\n// MarshalXMLAttr is used only for struct fields with the\n// \"attr\" option in the field tag.\ntype MarshalerAttr interface {\n\tMarshalXMLAttr(name Name) (Attr, error)\n}\n\n// MarshalIndent works like Marshal, but each XML element begins on a new\n// indented line that starts with prefix and is followed by one or more\n// copies of indent according to the nesting depth.\nfunc MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {\n\tvar b bytes.Buffer\n\tenc := NewEncoder(&b)\n\tenc.Indent(prefix, indent)\n\tif err := enc.Encode(v); err != nil {\n\t\treturn nil, err\n\t}\n\treturn b.Bytes(), nil\n}\n\n// An Encoder writes XML data to an output stream.\ntype Encoder struct {\n\tp printer\n}\n\n// NewEncoder returns a new encoder that writes to w.\nfunc NewEncoder(w io.Writer) *Encoder {\n\te := &Encoder{printer{Writer: bufio.NewWriter(w)}}\n\te.p.encoder = e\n\treturn e\n}\n\n// Indent sets the encoder to generate XML in which each element\n// begins on a new indented line that starts with prefix and is followed by\n// one or more copies of indent according to the nesting depth.\nfunc (enc *Encoder) Indent(prefix, indent string) {\n\tenc.p.prefix = prefix\n\tenc.p.indent = indent\n}\n\n// Encode writes the XML encoding of v to the stream.\n//\n// See the documentation for Marshal for details about the conversion\n// of Go values to XML.\n//\n// Encode calls Flush before returning.\nfunc (enc *Encoder) Encode(v interface{}) error {\n\terr := enc.p.marshalValue(reflect.ValueOf(v), nil, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn enc.p.Flush()\n}\n\n// EncodeElement writes the XML encoding of v to the stream,\n// using start as the outermost tag in the encoding.\n//\n// See the documentation for Marshal for details about the conversion\n// of Go values to XML.\n//\n// EncodeElement calls Flush before returning.\nfunc (enc *Encoder) EncodeElement(v interface{}, start StartElement) error {\n\terr := enc.p.marshalValue(reflect.ValueOf(v), nil, &start)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn enc.p.Flush()\n}\n\nvar (\n\tendComment   = []byte(\"-->\")\n\tendProcInst  = []byte(\"?>\")\n\tendDirective = []byte(\">\")\n)\n\n// EncodeToken writes the given XML token to the stream.\n// It returns an error if StartElement and EndElement tokens are not properly matched.\n//\n// EncodeToken does not call Flush, because usually it is part of a larger operation\n// such as Encode or EncodeElement (or a custom Marshaler's MarshalXML invoked\n// during those), and those will call Flush when finished.\n// Callers that create an Encoder and then invoke EncodeToken directly, without\n// using Encode or EncodeElement, need to call Flush when finished to ensure\n// that the XML is written to the underlying writer.\n//\n// EncodeToken allows writing a ProcInst with Target set to \"xml\" only as the first token\n// in the stream.\nfunc (enc *Encoder) EncodeToken(t Token) error {\n\tp := &enc.p\n\tswitch t := t.(type) {\n\tcase StartElement:\n\t\tif err := p.writeStart(&t); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase EndElement:\n\t\tif err := p.writeEnd(t.Name); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase CharData:\n\t\tEscapeText(p, t)\n\tcase Comment:\n\t\tif bytes.Contains(t, endComment) {\n\t\t\treturn fmt.Errorf(\"xml: EncodeToken of Comment containing --> marker\")\n\t\t}\n\t\tp.WriteString(\"<!--\")\n\t\tp.Write(t)\n\t\tp.WriteString(\"-->\")\n\t\treturn p.cachedWriteError()\n\tcase ProcInst:\n\t\t// First token to be encoded which is also a ProcInst with target of xml\n\t\t// is the xml declaration.  The only ProcInst where target of xml is allowed.\n\t\tif t.Target == \"xml\" && p.Buffered() != 0 {\n\t\t\treturn fmt.Errorf(\"xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded\")\n\t\t}\n\t\tif !isNameString(t.Target) {\n\t\t\treturn fmt.Errorf(\"xml: EncodeToken of ProcInst with invalid Target\")\n\t\t}\n\t\tif bytes.Contains(t.Inst, endProcInst) {\n\t\t\treturn fmt.Errorf(\"xml: EncodeToken of ProcInst containing ?> marker\")\n\t\t}\n\t\tp.WriteString(\"<?\")\n\t\tp.WriteString(t.Target)\n\t\tif len(t.Inst) > 0 {\n\t\t\tp.WriteByte(' ')\n\t\t\tp.Write(t.Inst)\n\t\t}\n\t\tp.WriteString(\"?>\")\n\tcase Directive:\n\t\tif bytes.Contains(t, endDirective) {\n\t\t\treturn fmt.Errorf(\"xml: EncodeToken of Directive containing > marker\")\n\t\t}\n\t\tp.WriteString(\"<!\")\n\t\tp.Write(t)\n\t\tp.WriteString(\">\")\n\t}\n\treturn p.cachedWriteError()\n}\n\n// Flush flushes any buffered XML to the underlying writer.\n// See the EncodeToken documentation for details about when it is necessary.\nfunc (enc *Encoder) Flush() error {\n\treturn enc.p.Flush()\n}\n\ntype printer struct {\n\t*bufio.Writer\n\tencoder    *Encoder\n\tseq        int\n\tindent     string\n\tprefix     string\n\tdepth      int\n\tindentedIn bool\n\tputNewline bool\n\tattrNS     map[string]string // map prefix -> name space\n\tattrPrefix map[string]string // map name space -> prefix\n\tprefixes   []string\n\ttags       []Name\n}\n\n// createAttrPrefix finds the name space prefix attribute to use for the given name space,\n// defining a new prefix if necessary. It returns the prefix.\nfunc (p *printer) createAttrPrefix(url string) string {\n\tif prefix := p.attrPrefix[url]; prefix != \"\" {\n\t\treturn prefix\n\t}\n\n\t// The \"http://www.w3.org/XML/1998/namespace\" name space is predefined as \"xml\"\n\t// and must be referred to that way.\n\t// (The \"http://www.w3.org/2000/xmlns/\" name space is also predefined as \"xmlns\",\n\t// but users should not be trying to use that one directly - that's our job.)\n\tif url == xmlURL {\n\t\treturn \"xml\"\n\t}\n\n\t// Need to define a new name space.\n\tif p.attrPrefix == nil {\n\t\tp.attrPrefix = make(map[string]string)\n\t\tp.attrNS = make(map[string]string)\n\t}\n\n\t// Pick a name. We try to use the final element of the path\n\t// but fall back to _.\n\tprefix := strings.TrimRight(url, \"/\")\n\tif i := strings.LastIndex(prefix, \"/\"); i >= 0 {\n\t\tprefix = prefix[i+1:]\n\t}\n\tif prefix == \"\" || !isName([]byte(prefix)) || strings.Contains(prefix, \":\") {\n\t\tprefix = \"_\"\n\t}\n\tif strings.HasPrefix(prefix, \"xml\") {\n\t\t// xmlanything is reserved.\n\t\tprefix = \"_\" + prefix\n\t}\n\tif p.attrNS[prefix] != \"\" {\n\t\t// Name is taken. Find a better one.\n\t\tfor p.seq++; ; p.seq++ {\n\t\t\tif id := prefix + \"_\" + strconv.Itoa(p.seq); p.attrNS[id] == \"\" {\n\t\t\t\tprefix = id\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tp.attrPrefix[url] = prefix\n\tp.attrNS[prefix] = url\n\n\tp.WriteString(`xmlns:`)\n\tp.WriteString(prefix)\n\tp.WriteString(`=\"`)\n\tEscapeText(p, []byte(url))\n\tp.WriteString(`\" `)\n\n\tp.prefixes = append(p.prefixes, prefix)\n\n\treturn prefix\n}\n\n// deleteAttrPrefix removes an attribute name space prefix.\nfunc (p *printer) deleteAttrPrefix(prefix string) {\n\tdelete(p.attrPrefix, p.attrNS[prefix])\n\tdelete(p.attrNS, prefix)\n}\n\nfunc (p *printer) markPrefix() {\n\tp.prefixes = append(p.prefixes, \"\")\n}\n\nfunc (p *printer) popPrefix() {\n\tfor len(p.prefixes) > 0 {\n\t\tprefix := p.prefixes[len(p.prefixes)-1]\n\t\tp.prefixes = p.prefixes[:len(p.prefixes)-1]\n\t\tif prefix == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tp.deleteAttrPrefix(prefix)\n\t}\n}\n\nvar (\n\tmarshalerType     = reflect.TypeOf((*Marshaler)(nil)).Elem()\n\tmarshalerAttrType = reflect.TypeOf((*MarshalerAttr)(nil)).Elem()\n\ttextMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()\n)\n\n// marshalValue writes one or more XML elements representing val.\n// If val was obtained from a struct field, finfo must have its details.\nfunc (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error {\n\tif startTemplate != nil && startTemplate.Name.Local == \"\" {\n\t\treturn fmt.Errorf(\"xml: EncodeElement of StartElement with missing name\")\n\t}\n\n\tif !val.IsValid() {\n\t\treturn nil\n\t}\n\tif finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) {\n\t\treturn nil\n\t}\n\n\t// Drill into interfaces and pointers.\n\t// This can turn into an infinite loop given a cyclic chain,\n\t// but it matches the Go 1 behavior.\n\tfor val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr {\n\t\tif val.IsNil() {\n\t\t\treturn nil\n\t\t}\n\t\tval = val.Elem()\n\t}\n\n\tkind := val.Kind()\n\ttyp := val.Type()\n\n\t// Check for marshaler.\n\tif val.CanInterface() && typ.Implements(marshalerType) {\n\t\treturn p.marshalInterface(val.Interface().(Marshaler), defaultStart(typ, finfo, startTemplate))\n\t}\n\tif val.CanAddr() {\n\t\tpv := val.Addr()\n\t\tif pv.CanInterface() && pv.Type().Implements(marshalerType) {\n\t\t\treturn p.marshalInterface(pv.Interface().(Marshaler), defaultStart(pv.Type(), finfo, startTemplate))\n\t\t}\n\t}\n\n\t// Check for text marshaler.\n\tif val.CanInterface() && typ.Implements(textMarshalerType) {\n\t\treturn p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), defaultStart(typ, finfo, startTemplate))\n\t}\n\tif val.CanAddr() {\n\t\tpv := val.Addr()\n\t\tif pv.CanInterface() && pv.Type().Implements(textMarshalerType) {\n\t\t\treturn p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), defaultStart(pv.Type(), finfo, startTemplate))\n\t\t}\n\t}\n\n\t// Slices and arrays iterate over the elements. They do not have an enclosing tag.\n\tif (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 {\n\t\tfor i, n := 0, val.Len(); i < n; i++ {\n\t\t\tif err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\ttinfo, err := getTypeInfo(typ)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create start element.\n\t// Precedence for the XML element name is:\n\t// 0. startTemplate\n\t// 1. XMLName field in underlying struct;\n\t// 2. field name/tag in the struct field; and\n\t// 3. type name\n\tvar start StartElement\n\n\tif startTemplate != nil {\n\t\tstart.Name = startTemplate.Name\n\t\tstart.Attr = append(start.Attr, startTemplate.Attr...)\n\t} else if tinfo.xmlname != nil {\n\t\txmlname := tinfo.xmlname\n\t\tif xmlname.name != \"\" {\n\t\t\tstart.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name\n\t\t} else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != \"\" {\n\t\t\tstart.Name = v\n\t\t}\n\t}\n\tif start.Name.Local == \"\" && finfo != nil {\n\t\tstart.Name.Space, start.Name.Local = finfo.xmlns, finfo.name\n\t}\n\tif start.Name.Local == \"\" {\n\t\tname := typ.Name()\n\t\tif name == \"\" {\n\t\t\treturn &UnsupportedTypeError{typ}\n\t\t}\n\t\tstart.Name.Local = name\n\t}\n\n\t// Add type attribute if necessary\n\tif finfo != nil && finfo.flags&fTypeAttr != 0 {\n\t\tstart.Attr = append(start.Attr, Attr{xmlSchemaInstance, typeToString(typ)})\n\t}\n\n\t// Attributes\n\tfor i := range tinfo.fields {\n\t\tfinfo := &tinfo.fields[i]\n\t\tif finfo.flags&fAttr == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tfv := finfo.value(val)\n\t\tname := Name{Space: finfo.xmlns, Local: finfo.name}\n\n\t\tif finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif fv.Kind() == reflect.Interface && fv.IsNil() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif fv.CanInterface() && fv.Type().Implements(marshalerAttrType) {\n\t\t\tattr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif attr.Name.Local != \"\" {\n\t\t\t\tstart.Attr = append(start.Attr, attr)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif fv.CanAddr() {\n\t\t\tpv := fv.Addr()\n\t\t\tif pv.CanInterface() && pv.Type().Implements(marshalerAttrType) {\n\t\t\t\tattr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif attr.Name.Local != \"\" {\n\t\t\t\t\tstart.Attr = append(start.Attr, attr)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif fv.CanInterface() && fv.Type().Implements(textMarshalerType) {\n\t\t\ttext, err := fv.Interface().(encoding.TextMarshaler).MarshalText()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tstart.Attr = append(start.Attr, Attr{name, string(text)})\n\t\t\tcontinue\n\t\t}\n\n\t\tif fv.CanAddr() {\n\t\t\tpv := fv.Addr()\n\t\t\tif pv.CanInterface() && pv.Type().Implements(textMarshalerType) {\n\t\t\t\ttext, err := pv.Interface().(encoding.TextMarshaler).MarshalText()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tstart.Attr = append(start.Attr, Attr{name, string(text)})\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// Dereference or skip nil pointer, interface values.\n\t\tswitch fv.Kind() {\n\t\tcase reflect.Ptr, reflect.Interface:\n\t\t\tif fv.IsNil() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfv = fv.Elem()\n\t\t}\n\n\t\ts, b, err := p.marshalSimple(fv.Type(), fv)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif b != nil {\n\t\t\ts = string(b)\n\t\t}\n\t\tstart.Attr = append(start.Attr, Attr{name, s})\n\t}\n\n\tif err := p.writeStart(&start); err != nil {\n\t\treturn err\n\t}\n\n\tif val.Kind() == reflect.Struct {\n\t\terr = p.marshalStruct(tinfo, val)\n\t} else {\n\t\ts, b, err1 := p.marshalSimple(typ, val)\n\t\tif err1 != nil {\n\t\t\terr = err1\n\t\t} else if b != nil {\n\t\t\tEscapeText(p, b)\n\t\t} else {\n\t\t\tp.EscapeString(s)\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := p.writeEnd(start.Name); err != nil {\n\t\treturn err\n\t}\n\n\treturn p.cachedWriteError()\n}\n\n// defaultStart returns the default start element to use,\n// given the reflect type, field info, and start template.\nfunc defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement {\n\tvar start StartElement\n\t// Precedence for the XML element name is as above,\n\t// except that we do not look inside structs for the first field.\n\tif startTemplate != nil {\n\t\tstart.Name = startTemplate.Name\n\t\tstart.Attr = append(start.Attr, startTemplate.Attr...)\n\t} else if finfo != nil && finfo.name != \"\" {\n\t\tstart.Name.Local = finfo.name\n\t\tstart.Name.Space = finfo.xmlns\n\t} else if typ.Name() != \"\" {\n\t\tstart.Name.Local = typ.Name()\n\t} else {\n\t\t// Must be a pointer to a named type,\n\t\t// since it has the Marshaler methods.\n\t\tstart.Name.Local = typ.Elem().Name()\n\t}\n\n\t// Add type attribute if necessary\n\tif finfo != nil && finfo.flags&fTypeAttr != 0 {\n\t\tstart.Attr = append(start.Attr, Attr{xmlSchemaInstance, typeToString(typ)})\n\t}\n\n\treturn start\n}\n\n// marshalInterface marshals a Marshaler interface value.\nfunc (p *printer) marshalInterface(val Marshaler, start StartElement) error {\n\t// Push a marker onto the tag stack so that MarshalXML\n\t// cannot close the XML tags that it did not open.\n\tp.tags = append(p.tags, Name{})\n\tn := len(p.tags)\n\n\terr := val.MarshalXML(p.encoder, start)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark.\n\tif len(p.tags) > n {\n\t\treturn fmt.Errorf(\"xml: %s.MarshalXML wrote invalid XML: <%s> not closed\", receiverType(val), p.tags[len(p.tags)-1].Local)\n\t}\n\tp.tags = p.tags[:n-1]\n\treturn nil\n}\n\n// marshalTextInterface marshals a TextMarshaler interface value.\nfunc (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error {\n\tif err := p.writeStart(&start); err != nil {\n\t\treturn err\n\t}\n\ttext, err := val.MarshalText()\n\tif err != nil {\n\t\treturn err\n\t}\n\tEscapeText(p, text)\n\treturn p.writeEnd(start.Name)\n}\n\n// writeStart writes the given start element.\nfunc (p *printer) writeStart(start *StartElement) error {\n\tif start.Name.Local == \"\" {\n\t\treturn fmt.Errorf(\"xml: start tag with no name\")\n\t}\n\n\tp.tags = append(p.tags, start.Name)\n\tp.markPrefix()\n\n\tp.writeIndent(1)\n\tp.WriteByte('<')\n\tp.WriteString(start.Name.Local)\n\n\tif start.Name.Space != \"\" {\n\t\tp.WriteString(` xmlns=\"`)\n\t\tp.EscapeString(start.Name.Space)\n\t\tp.WriteByte('\"')\n\t}\n\n\t// Attributes\n\tfor _, attr := range start.Attr {\n\t\tname := attr.Name\n\t\tif name.Local == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tp.WriteByte(' ')\n\t\tif name.Space != \"\" {\n\t\t\tp.WriteString(p.createAttrPrefix(name.Space))\n\t\t\tp.WriteByte(':')\n\t\t}\n\t\tp.WriteString(name.Local)\n\t\tp.WriteString(`=\"`)\n\t\tp.EscapeString(attr.Value)\n\t\tp.WriteByte('\"')\n\t}\n\tp.WriteByte('>')\n\treturn nil\n}\n\nfunc (p *printer) writeEnd(name Name) error {\n\tif name.Local == \"\" {\n\t\treturn fmt.Errorf(\"xml: end tag with no name\")\n\t}\n\tif len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == \"\" {\n\t\treturn fmt.Errorf(\"xml: end tag </%s> without start tag\", name.Local)\n\t}\n\tif top := p.tags[len(p.tags)-1]; top != name {\n\t\tif top.Local != name.Local {\n\t\t\treturn fmt.Errorf(\"xml: end tag </%s> does not match start tag <%s>\", name.Local, top.Local)\n\t\t}\n\t\treturn fmt.Errorf(\"xml: end tag </%s> in namespace %s does not match start tag <%s> in namespace %s\", name.Local, name.Space, top.Local, top.Space)\n\t}\n\tp.tags = p.tags[:len(p.tags)-1]\n\n\tp.writeIndent(-1)\n\tp.WriteByte('<')\n\tp.WriteByte('/')\n\tp.WriteString(name.Local)\n\tp.WriteByte('>')\n\tp.popPrefix()\n\treturn nil\n}\n\nfunc (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) {\n\tswitch val.Kind() {\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn strconv.FormatInt(val.Int(), 10), nil, nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\treturn strconv.FormatUint(val.Uint(), 10), nil, nil\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil\n\tcase reflect.String:\n\t\treturn val.String(), nil, nil\n\tcase reflect.Bool:\n\t\treturn strconv.FormatBool(val.Bool()), nil, nil\n\tcase reflect.Array:\n\t\tif typ.Elem().Kind() != reflect.Uint8 {\n\t\t\tbreak\n\t\t}\n\t\t// [...]byte\n\t\tvar bytes []byte\n\t\tif val.CanAddr() {\n\t\t\tbytes = val.Slice(0, val.Len()).Bytes()\n\t\t} else {\n\t\t\tbytes = make([]byte, val.Len())\n\t\t\treflect.Copy(reflect.ValueOf(bytes), val)\n\t\t}\n\t\treturn \"\", bytes, nil\n\tcase reflect.Slice:\n\t\tif typ.Elem().Kind() != reflect.Uint8 {\n\t\t\tbreak\n\t\t}\n\t\t// []byte\n\t\treturn \"\", val.Bytes(), nil\n\t}\n\treturn \"\", nil, &UnsupportedTypeError{typ}\n}\n\nvar ddBytes = []byte(\"--\")\n\nfunc (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error {\n\ts := parentStack{p: p}\n\tfor i := range tinfo.fields {\n\t\tfinfo := &tinfo.fields[i]\n\t\tif finfo.flags&fAttr != 0 {\n\t\t\tcontinue\n\t\t}\n\t\tvf := finfo.value(val)\n\n\t\t// Dereference or skip nil pointer, interface values.\n\t\tswitch vf.Kind() {\n\t\tcase reflect.Ptr, reflect.Interface:\n\t\t\tif !vf.IsNil() {\n\t\t\t\tvf = vf.Elem()\n\t\t\t}\n\t\t}\n\n\t\tswitch finfo.flags & fMode {\n\t\tcase fCharData:\n\t\t\tif vf.CanInterface() && vf.Type().Implements(textMarshalerType) {\n\t\t\t\tdata, err := vf.Interface().(encoding.TextMarshaler).MarshalText()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tEscape(p, data)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif vf.CanAddr() {\n\t\t\t\tpv := vf.Addr()\n\t\t\t\tif pv.CanInterface() && pv.Type().Implements(textMarshalerType) {\n\t\t\t\t\tdata, err := pv.Interface().(encoding.TextMarshaler).MarshalText()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tEscape(p, data)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tvar scratch [64]byte\n\t\t\tswitch vf.Kind() {\n\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t\tEscape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10))\n\t\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\t\t\tEscape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10))\n\t\t\tcase reflect.Float32, reflect.Float64:\n\t\t\t\tEscape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits()))\n\t\t\tcase reflect.Bool:\n\t\t\t\tEscape(p, strconv.AppendBool(scratch[:0], vf.Bool()))\n\t\t\tcase reflect.String:\n\t\t\t\tif err := EscapeText(p, []byte(vf.String())); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tcase reflect.Slice:\n\t\t\t\tif elem, ok := vf.Interface().([]byte); ok {\n\t\t\t\t\tif err := EscapeText(p, elem); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\n\t\tcase fComment:\n\t\t\tk := vf.Kind()\n\t\t\tif !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) {\n\t\t\t\treturn fmt.Errorf(\"xml: bad type for comment field of %s\", val.Type())\n\t\t\t}\n\t\t\tif vf.Len() == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tp.writeIndent(0)\n\t\t\tp.WriteString(\"<!--\")\n\t\t\tdashDash := false\n\t\t\tdashLast := false\n\t\t\tswitch k {\n\t\t\tcase reflect.String:\n\t\t\t\ts := vf.String()\n\t\t\t\tdashDash = strings.Index(s, \"--\") >= 0\n\t\t\t\tdashLast = s[len(s)-1] == '-'\n\t\t\t\tif !dashDash {\n\t\t\t\t\tp.WriteString(s)\n\t\t\t\t}\n\t\t\tcase reflect.Slice:\n\t\t\t\tb := vf.Bytes()\n\t\t\t\tdashDash = bytes.Index(b, ddBytes) >= 0\n\t\t\t\tdashLast = b[len(b)-1] == '-'\n\t\t\t\tif !dashDash {\n\t\t\t\t\tp.Write(b)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tpanic(\"can't happen\")\n\t\t\t}\n\t\t\tif dashDash {\n\t\t\t\treturn fmt.Errorf(`xml: comments must not contain \"--\"`)\n\t\t\t}\n\t\t\tif dashLast {\n\t\t\t\t// \"--->\" is invalid grammar. Make it \"- -->\"\n\t\t\t\tp.WriteByte(' ')\n\t\t\t}\n\t\t\tp.WriteString(\"-->\")\n\t\t\tcontinue\n\n\t\tcase fInnerXml:\n\t\t\tiface := vf.Interface()\n\t\t\tswitch raw := iface.(type) {\n\t\t\tcase []byte:\n\t\t\t\tp.Write(raw)\n\t\t\t\tcontinue\n\t\t\tcase string:\n\t\t\t\tp.WriteString(raw)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\tcase fElement, fElement | fAny:\n\t\t\tif err := s.trim(finfo.parents); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif len(finfo.parents) > len(s.stack) {\n\t\t\t\tif vf.Kind() != reflect.Ptr && vf.Kind() != reflect.Interface || !vf.IsNil() {\n\t\t\t\t\tif err := s.push(finfo.parents[len(s.stack):]); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif err := p.marshalValue(vf, finfo, nil); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\ts.trim(nil)\n\treturn p.cachedWriteError()\n}\n\n// return the bufio Writer's cached write error\nfunc (p *printer) cachedWriteError() error {\n\t_, err := p.Write(nil)\n\treturn err\n}\n\nfunc (p *printer) writeIndent(depthDelta int) {\n\tif len(p.prefix) == 0 && len(p.indent) == 0 {\n\t\treturn\n\t}\n\tif depthDelta < 0 {\n\t\tp.depth--\n\t\tif p.indentedIn {\n\t\t\tp.indentedIn = false\n\t\t\treturn\n\t\t}\n\t\tp.indentedIn = false\n\t}\n\tif p.putNewline {\n\t\tp.WriteByte('\\n')\n\t} else {\n\t\tp.putNewline = true\n\t}\n\tif len(p.prefix) > 0 {\n\t\tp.WriteString(p.prefix)\n\t}\n\tif len(p.indent) > 0 {\n\t\tfor i := 0; i < p.depth; i++ {\n\t\t\tp.WriteString(p.indent)\n\t\t}\n\t}\n\tif depthDelta > 0 {\n\t\tp.depth++\n\t\tp.indentedIn = true\n\t}\n}\n\ntype parentStack struct {\n\tp     *printer\n\tstack []string\n}\n\n// trim updates the XML context to match the longest common prefix of the stack\n// and the given parents.  A closing tag will be written for every parent\n// popped.  Passing a zero slice or nil will close all the elements.\nfunc (s *parentStack) trim(parents []string) error {\n\tsplit := 0\n\tfor ; split < len(parents) && split < len(s.stack); split++ {\n\t\tif parents[split] != s.stack[split] {\n\t\t\tbreak\n\t\t}\n\t}\n\tfor i := len(s.stack) - 1; i >= split; i-- {\n\t\tif err := s.p.writeEnd(Name{Local: s.stack[i]}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\ts.stack = parents[:split]\n\treturn nil\n}\n\n// push adds parent elements to the stack and writes open tags.\nfunc (s *parentStack) push(parents []string) error {\n\tfor i := 0; i < len(parents); i++ {\n\t\tif err := s.p.writeStart(&StartElement{Name: Name{Local: parents[i]}}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\ts.stack = append(s.stack, parents...)\n\treturn nil\n}\n\n// A MarshalXMLError is returned when Marshal encounters a type\n// that cannot be converted into XML.\ntype UnsupportedTypeError struct {\n\tType reflect.Type\n}\n\nfunc (e *UnsupportedTypeError) Error() string {\n\treturn \"xml: unsupported type: \" + e.Type.String()\n}\n\nfunc isEmptyValue(v reflect.Value) bool {\n\tswitch v.Kind() {\n\tcase reflect.Array, reflect.Map, reflect.Slice, reflect.String:\n\t\treturn v.Len() == 0\n\tcase reflect.Bool:\n\t\treturn !v.Bool()\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn v.Int() == 0\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\treturn v.Uint() == 0\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn v.Float() == 0\n\tcase reflect.Interface, reflect.Ptr:\n\t\treturn v.IsNil()\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/xml/marshal_test.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage xml\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\ntype DriveType int\n\nconst (\n\tHyperDrive DriveType = iota\n\tImprobabilityDrive\n)\n\ntype Passenger struct {\n\tName   []string `xml:\"name\"`\n\tWeight float32  `xml:\"weight\"`\n}\n\ntype Ship struct {\n\tXMLName struct{} `xml:\"spaceship\"`\n\n\tName      string       `xml:\"name,attr\"`\n\tPilot     string       `xml:\"pilot,attr\"`\n\tDrive     DriveType    `xml:\"drive\"`\n\tAge       uint         `xml:\"age\"`\n\tPassenger []*Passenger `xml:\"passenger\"`\n\tsecret    string\n}\n\ntype NamedType string\n\ntype Port struct {\n\tXMLName struct{} `xml:\"port\"`\n\tType    string   `xml:\"type,attr,omitempty\"`\n\tComment string   `xml:\",comment\"`\n\tNumber  string   `xml:\",chardata\"`\n}\n\ntype Domain struct {\n\tXMLName struct{} `xml:\"domain\"`\n\tCountry string   `xml:\",attr,omitempty\"`\n\tName    []byte   `xml:\",chardata\"`\n\tComment []byte   `xml:\",comment\"`\n}\n\ntype Book struct {\n\tXMLName struct{} `xml:\"book\"`\n\tTitle   string   `xml:\",chardata\"`\n}\n\ntype Event struct {\n\tXMLName struct{} `xml:\"event\"`\n\tYear    int      `xml:\",chardata\"`\n}\n\ntype Movie struct {\n\tXMLName struct{} `xml:\"movie\"`\n\tLength  uint     `xml:\",chardata\"`\n}\n\ntype Pi struct {\n\tXMLName       struct{} `xml:\"pi\"`\n\tApproximation float32  `xml:\",chardata\"`\n}\n\ntype Universe struct {\n\tXMLName struct{} `xml:\"universe\"`\n\tVisible float64  `xml:\",chardata\"`\n}\n\ntype Particle struct {\n\tXMLName struct{} `xml:\"particle\"`\n\tHasMass bool     `xml:\",chardata\"`\n}\n\ntype Departure struct {\n\tXMLName struct{}  `xml:\"departure\"`\n\tWhen    time.Time `xml:\",chardata\"`\n}\n\ntype SecretAgent struct {\n\tXMLName   struct{} `xml:\"agent\"`\n\tHandle    string   `xml:\"handle,attr\"`\n\tIdentity  string\n\tObfuscate string `xml:\",innerxml\"`\n}\n\ntype NestedItems struct {\n\tXMLName struct{} `xml:\"result\"`\n\tItems   []string `xml:\">item\"`\n\tItem1   []string `xml:\"Items>item1\"`\n}\n\ntype NestedOrder struct {\n\tXMLName struct{} `xml:\"result\"`\n\tField1  string   `xml:\"parent>c\"`\n\tField2  string   `xml:\"parent>b\"`\n\tField3  string   `xml:\"parent>a\"`\n}\n\ntype MixedNested struct {\n\tXMLName struct{} `xml:\"result\"`\n\tA       string   `xml:\"parent1>a\"`\n\tB       string   `xml:\"b\"`\n\tC       string   `xml:\"parent1>parent2>c\"`\n\tD       string   `xml:\"parent1>d\"`\n}\n\ntype NilTest struct {\n\tA interface{} `xml:\"parent1>parent2>a\"`\n\tB interface{} `xml:\"parent1>b\"`\n\tC interface{} `xml:\"parent1>parent2>c\"`\n}\n\ntype Service struct {\n\tXMLName struct{} `xml:\"service\"`\n\tDomain  *Domain  `xml:\"host>domain\"`\n\tPort    *Port    `xml:\"host>port\"`\n\tExtra1  interface{}\n\tExtra2  interface{} `xml:\"host>extra2\"`\n}\n\nvar nilStruct *Ship\n\ntype EmbedA struct {\n\tEmbedC\n\tEmbedB EmbedB\n\tFieldA string\n}\n\ntype EmbedB struct {\n\tFieldB string\n\t*EmbedC\n}\n\ntype EmbedC struct {\n\tFieldA1 string `xml:\"FieldA>A1\"`\n\tFieldA2 string `xml:\"FieldA>A2\"`\n\tFieldB  string\n\tFieldC  string\n}\n\ntype NameCasing struct {\n\tXMLName struct{} `xml:\"casing\"`\n\tXy      string\n\tXY      string\n\tXyA     string `xml:\"Xy,attr\"`\n\tXYA     string `xml:\"XY,attr\"`\n}\n\ntype NamePrecedence struct {\n\tXMLName     Name              `xml:\"Parent\"`\n\tFromTag     XMLNameWithoutTag `xml:\"InTag\"`\n\tFromNameVal XMLNameWithoutTag\n\tFromNameTag XMLNameWithTag\n\tInFieldName string\n}\n\ntype XMLNameWithTag struct {\n\tXMLName Name   `xml:\"InXMLNameTag\"`\n\tValue   string `xml:\",chardata\"`\n}\n\ntype XMLNameWithoutTag struct {\n\tXMLName Name\n\tValue   string `xml:\",chardata\"`\n}\n\ntype NameInField struct {\n\tFoo Name `xml:\"ns foo\"`\n}\n\ntype AttrTest struct {\n\tInt   int     `xml:\",attr\"`\n\tNamed int     `xml:\"int,attr\"`\n\tFloat float64 `xml:\",attr\"`\n\tUint8 uint8   `xml:\",attr\"`\n\tBool  bool    `xml:\",attr\"`\n\tStr   string  `xml:\",attr\"`\n\tBytes []byte  `xml:\",attr\"`\n}\n\ntype OmitAttrTest struct {\n\tInt   int     `xml:\",attr,omitempty\"`\n\tNamed int     `xml:\"int,attr,omitempty\"`\n\tFloat float64 `xml:\",attr,omitempty\"`\n\tUint8 uint8   `xml:\",attr,omitempty\"`\n\tBool  bool    `xml:\",attr,omitempty\"`\n\tStr   string  `xml:\",attr,omitempty\"`\n\tBytes []byte  `xml:\",attr,omitempty\"`\n}\n\ntype OmitFieldTest struct {\n\tInt   int           `xml:\",omitempty\"`\n\tNamed int           `xml:\"int,omitempty\"`\n\tFloat float64       `xml:\",omitempty\"`\n\tUint8 uint8         `xml:\",omitempty\"`\n\tBool  bool          `xml:\",omitempty\"`\n\tStr   string        `xml:\",omitempty\"`\n\tBytes []byte        `xml:\",omitempty\"`\n\tPtr   *PresenceTest `xml:\",omitempty\"`\n}\n\ntype AnyTest struct {\n\tXMLName  struct{}  `xml:\"a\"`\n\tNested   string    `xml:\"nested>value\"`\n\tAnyField AnyHolder `xml:\",any\"`\n}\n\ntype AnyOmitTest struct {\n\tXMLName  struct{}   `xml:\"a\"`\n\tNested   string     `xml:\"nested>value\"`\n\tAnyField *AnyHolder `xml:\",any,omitempty\"`\n}\n\ntype AnySliceTest struct {\n\tXMLName  struct{}    `xml:\"a\"`\n\tNested   string      `xml:\"nested>value\"`\n\tAnyField []AnyHolder `xml:\",any\"`\n}\n\ntype AnyHolder struct {\n\tXMLName Name\n\tXML     string `xml:\",innerxml\"`\n}\n\ntype RecurseA struct {\n\tA string\n\tB *RecurseB\n}\n\ntype RecurseB struct {\n\tA *RecurseA\n\tB string\n}\n\ntype PresenceTest struct {\n\tExists *struct{}\n}\n\ntype IgnoreTest struct {\n\tPublicSecret string `xml:\"-\"`\n}\n\ntype MyBytes []byte\n\ntype Data struct {\n\tBytes  []byte\n\tAttr   []byte `xml:\",attr\"`\n\tCustom MyBytes\n}\n\ntype Plain struct {\n\tV interface{}\n}\n\ntype MyInt int\n\ntype EmbedInt struct {\n\tMyInt\n}\n\ntype Strings struct {\n\tX []string `xml:\"A>B,omitempty\"`\n}\n\ntype PointerFieldsTest struct {\n\tXMLName  Name    `xml:\"dummy\"`\n\tName     *string `xml:\"name,attr\"`\n\tAge      *uint   `xml:\"age,attr\"`\n\tEmpty    *string `xml:\"empty,attr\"`\n\tContents *string `xml:\",chardata\"`\n}\n\ntype ChardataEmptyTest struct {\n\tXMLName  Name    `xml:\"test\"`\n\tContents *string `xml:\",chardata\"`\n}\n\ntype MyMarshalerTest struct {\n}\n\nvar _ Marshaler = (*MyMarshalerTest)(nil)\n\nfunc (m *MyMarshalerTest) MarshalXML(e *Encoder, start StartElement) error {\n\te.EncodeToken(start)\n\te.EncodeToken(CharData([]byte(\"hello world\")))\n\te.EncodeToken(EndElement{start.Name})\n\treturn nil\n}\n\ntype MyMarshalerAttrTest struct {\n}\n\nvar _ MarshalerAttr = (*MyMarshalerAttrTest)(nil)\n\nfunc (m *MyMarshalerAttrTest) MarshalXMLAttr(name Name) (Attr, error) {\n\treturn Attr{name, \"hello world\"}, nil\n}\n\ntype MarshalerStruct struct {\n\tFoo MyMarshalerAttrTest `xml:\",attr\"`\n}\n\ntype InnerStruct struct {\n\tXMLName Name `xml:\"testns outer\"`\n}\n\ntype OuterStruct struct {\n\tInnerStruct\n\tIntAttr int `xml:\"int,attr\"`\n}\n\ntype OuterNamedStruct struct {\n\tInnerStruct\n\tXMLName Name `xml:\"outerns test\"`\n\tIntAttr int  `xml:\"int,attr\"`\n}\n\ntype OuterNamedOrderedStruct struct {\n\tXMLName Name `xml:\"outerns test\"`\n\tInnerStruct\n\tIntAttr int `xml:\"int,attr\"`\n}\n\ntype OuterOuterStruct struct {\n\tOuterStruct\n}\n\nfunc ifaceptr(x interface{}) interface{} {\n\treturn &x\n}\n\nvar (\n\tnameAttr     = \"Sarah\"\n\tageAttr      = uint(12)\n\tcontentsAttr = \"lorem ipsum\"\n)\n\n// Unless explicitly stated as such (or *Plain), all of the\n// tests below are two-way tests. When introducing new tests,\n// please try to make them two-way as well to ensure that\n// marshalling and unmarshalling are as symmetrical as feasible.\nvar marshalTests = []struct {\n\tValue         interface{}\n\tExpectXML     string\n\tMarshalOnly   bool\n\tUnmarshalOnly bool\n}{\n\t// Test nil marshals to nothing\n\t{Value: nil, ExpectXML: ``, MarshalOnly: true},\n\t{Value: nilStruct, ExpectXML: ``, MarshalOnly: true},\n\n\t// Test value types\n\t{Value: &Plain{true}, ExpectXML: `<Plain><V>true</V></Plain>`},\n\t{Value: &Plain{false}, ExpectXML: `<Plain><V>false</V></Plain>`},\n\t{Value: &Plain{int(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},\n\t{Value: &Plain{int8(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},\n\t{Value: &Plain{int16(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},\n\t{Value: &Plain{int32(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},\n\t{Value: &Plain{uint(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},\n\t{Value: &Plain{uint8(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},\n\t{Value: &Plain{uint16(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},\n\t{Value: &Plain{uint32(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},\n\t{Value: &Plain{float32(1.25)}, ExpectXML: `<Plain><V>1.25</V></Plain>`},\n\t{Value: &Plain{float64(1.25)}, ExpectXML: `<Plain><V>1.25</V></Plain>`},\n\t{Value: &Plain{uintptr(0xFFDD)}, ExpectXML: `<Plain><V>65501</V></Plain>`},\n\t{Value: &Plain{\"gopher\"}, ExpectXML: `<Plain><V>gopher</V></Plain>`},\n\t{Value: &Plain{[]byte(\"gopher\")}, ExpectXML: `<Plain><V>gopher</V></Plain>`},\n\t{Value: &Plain{\"</>\"}, ExpectXML: `<Plain><V>&lt;/&gt;</V></Plain>`},\n\t{Value: &Plain{[]byte(\"</>\")}, ExpectXML: `<Plain><V>&lt;/&gt;</V></Plain>`},\n\t{Value: &Plain{[3]byte{'<', '/', '>'}}, ExpectXML: `<Plain><V>&lt;/&gt;</V></Plain>`},\n\t{Value: &Plain{NamedType(\"potato\")}, ExpectXML: `<Plain><V>potato</V></Plain>`},\n\t{Value: &Plain{[]int{1, 2, 3}}, ExpectXML: `<Plain><V>1</V><V>2</V><V>3</V></Plain>`},\n\t{Value: &Plain{[3]int{1, 2, 3}}, ExpectXML: `<Plain><V>1</V><V>2</V><V>3</V></Plain>`},\n\t{Value: ifaceptr(true), MarshalOnly: true, ExpectXML: `<bool>true</bool>`},\n\n\t// Test time.\n\t{\n\t\tValue:     &Plain{time.Unix(1e9, 123456789).UTC()},\n\t\tExpectXML: `<Plain><V>2001-09-09T01:46:40.123456789Z</V></Plain>`,\n\t},\n\n\t// A pointer to struct{} may be used to test for an element's presence.\n\t{\n\t\tValue:     &PresenceTest{new(struct{})},\n\t\tExpectXML: `<PresenceTest><Exists></Exists></PresenceTest>`,\n\t},\n\t{\n\t\tValue:     &PresenceTest{},\n\t\tExpectXML: `<PresenceTest></PresenceTest>`,\n\t},\n\n\t// A pointer to struct{} may be used to test for an element's presence.\n\t{\n\t\tValue:     &PresenceTest{new(struct{})},\n\t\tExpectXML: `<PresenceTest><Exists></Exists></PresenceTest>`,\n\t},\n\t{\n\t\tValue:     &PresenceTest{},\n\t\tExpectXML: `<PresenceTest></PresenceTest>`,\n\t},\n\n\t// A []byte field is only nil if the element was not found.\n\t{\n\t\tValue:         &Data{},\n\t\tExpectXML:     `<Data></Data>`,\n\t\tUnmarshalOnly: true,\n\t},\n\t{\n\t\tValue:         &Data{Bytes: []byte{}, Custom: MyBytes{}, Attr: []byte{}},\n\t\tExpectXML:     `<Data Attr=\"\"><Bytes></Bytes><Custom></Custom></Data>`,\n\t\tUnmarshalOnly: true,\n\t},\n\n\t// Check that []byte works, including named []byte types.\n\t{\n\t\tValue:     &Data{Bytes: []byte(\"ab\"), Custom: MyBytes(\"cd\"), Attr: []byte{'v'}},\n\t\tExpectXML: `<Data Attr=\"v\"><Bytes>ab</Bytes><Custom>cd</Custom></Data>`,\n\t},\n\n\t// Test innerxml\n\t{\n\t\tValue: &SecretAgent{\n\t\t\tHandle:    \"007\",\n\t\t\tIdentity:  \"James Bond\",\n\t\t\tObfuscate: \"<redacted/>\",\n\t\t},\n\t\tExpectXML:   `<agent handle=\"007\"><Identity>James Bond</Identity><redacted/></agent>`,\n\t\tMarshalOnly: true,\n\t},\n\t{\n\t\tValue: &SecretAgent{\n\t\t\tHandle:    \"007\",\n\t\t\tIdentity:  \"James Bond\",\n\t\t\tObfuscate: \"<Identity>James Bond</Identity><redacted/>\",\n\t\t},\n\t\tExpectXML:     `<agent handle=\"007\"><Identity>James Bond</Identity><redacted/></agent>`,\n\t\tUnmarshalOnly: true,\n\t},\n\n\t// Test structs\n\t{Value: &Port{Type: \"ssl\", Number: \"443\"}, ExpectXML: `<port type=\"ssl\">443</port>`},\n\t{Value: &Port{Number: \"443\"}, ExpectXML: `<port>443</port>`},\n\t{Value: &Port{Type: \"<unix>\"}, ExpectXML: `<port type=\"&lt;unix&gt;\"></port>`},\n\t{Value: &Port{Number: \"443\", Comment: \"https\"}, ExpectXML: `<port><!--https-->443</port>`},\n\t{Value: &Port{Number: \"443\", Comment: \"add space-\"}, ExpectXML: `<port><!--add space- -->443</port>`, MarshalOnly: true},\n\t{Value: &Domain{Name: []byte(\"google.com&friends\")}, ExpectXML: `<domain>google.com&amp;friends</domain>`},\n\t{Value: &Domain{Name: []byte(\"google.com\"), Comment: []byte(\" &friends \")}, ExpectXML: `<domain>google.com<!-- &friends --></domain>`},\n\t{Value: &Book{Title: \"Pride & Prejudice\"}, ExpectXML: `<book>Pride &amp; Prejudice</book>`},\n\t{Value: &Event{Year: -3114}, ExpectXML: `<event>-3114</event>`},\n\t{Value: &Movie{Length: 13440}, ExpectXML: `<movie>13440</movie>`},\n\t{Value: &Pi{Approximation: 3.14159265}, ExpectXML: `<pi>3.1415927</pi>`},\n\t{Value: &Universe{Visible: 9.3e13}, ExpectXML: `<universe>9.3e+13</universe>`},\n\t{Value: &Particle{HasMass: true}, ExpectXML: `<particle>true</particle>`},\n\t{Value: &Departure{When: ParseTime(\"2013-01-09T00:15:00-09:00\")}, ExpectXML: `<departure>2013-01-09T00:15:00-09:00</departure>`},\n\t{Value: atomValue, ExpectXML: atomXml},\n\t{\n\t\tValue: &Ship{\n\t\t\tName:  \"Heart of Gold\",\n\t\t\tPilot: \"Computer\",\n\t\t\tAge:   1,\n\t\t\tDrive: ImprobabilityDrive,\n\t\t\tPassenger: []*Passenger{\n\t\t\t\t{\n\t\t\t\t\tName:   []string{\"Zaphod\", \"Beeblebrox\"},\n\t\t\t\t\tWeight: 7.25,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   []string{\"Trisha\", \"McMillen\"},\n\t\t\t\t\tWeight: 5.5,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   []string{\"Ford\", \"Prefect\"},\n\t\t\t\t\tWeight: 7,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   []string{\"Arthur\", \"Dent\"},\n\t\t\t\t\tWeight: 6.75,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExpectXML: `<spaceship name=\"Heart of Gold\" pilot=\"Computer\">` +\n\t\t\t`<drive>` + strconv.Itoa(int(ImprobabilityDrive)) + `</drive>` +\n\t\t\t`<age>1</age>` +\n\t\t\t`<passenger>` +\n\t\t\t`<name>Zaphod</name>` +\n\t\t\t`<name>Beeblebrox</name>` +\n\t\t\t`<weight>7.25</weight>` +\n\t\t\t`</passenger>` +\n\t\t\t`<passenger>` +\n\t\t\t`<name>Trisha</name>` +\n\t\t\t`<name>McMillen</name>` +\n\t\t\t`<weight>5.5</weight>` +\n\t\t\t`</passenger>` +\n\t\t\t`<passenger>` +\n\t\t\t`<name>Ford</name>` +\n\t\t\t`<name>Prefect</name>` +\n\t\t\t`<weight>7</weight>` +\n\t\t\t`</passenger>` +\n\t\t\t`<passenger>` +\n\t\t\t`<name>Arthur</name>` +\n\t\t\t`<name>Dent</name>` +\n\t\t\t`<weight>6.75</weight>` +\n\t\t\t`</passenger>` +\n\t\t\t`</spaceship>`,\n\t},\n\n\t// Test a>b\n\t{\n\t\tValue: &NestedItems{Items: nil, Item1: nil},\n\t\tExpectXML: `<result>` +\n\t\t\t`<Items>` +\n\t\t\t`</Items>` +\n\t\t\t`</result>`,\n\t},\n\t{\n\t\tValue: &NestedItems{Items: []string{}, Item1: []string{}},\n\t\tExpectXML: `<result>` +\n\t\t\t`<Items>` +\n\t\t\t`</Items>` +\n\t\t\t`</result>`,\n\t\tMarshalOnly: true,\n\t},\n\t{\n\t\tValue: &NestedItems{Items: nil, Item1: []string{\"A\"}},\n\t\tExpectXML: `<result>` +\n\t\t\t`<Items>` +\n\t\t\t`<item1>A</item1>` +\n\t\t\t`</Items>` +\n\t\t\t`</result>`,\n\t},\n\t{\n\t\tValue: &NestedItems{Items: []string{\"A\", \"B\"}, Item1: nil},\n\t\tExpectXML: `<result>` +\n\t\t\t`<Items>` +\n\t\t\t`<item>A</item>` +\n\t\t\t`<item>B</item>` +\n\t\t\t`</Items>` +\n\t\t\t`</result>`,\n\t},\n\t{\n\t\tValue: &NestedItems{Items: []string{\"A\", \"B\"}, Item1: []string{\"C\"}},\n\t\tExpectXML: `<result>` +\n\t\t\t`<Items>` +\n\t\t\t`<item>A</item>` +\n\t\t\t`<item>B</item>` +\n\t\t\t`<item1>C</item1>` +\n\t\t\t`</Items>` +\n\t\t\t`</result>`,\n\t},\n\t{\n\t\tValue: &NestedOrder{Field1: \"C\", Field2: \"B\", Field3: \"A\"},\n\t\tExpectXML: `<result>` +\n\t\t\t`<parent>` +\n\t\t\t`<c>C</c>` +\n\t\t\t`<b>B</b>` +\n\t\t\t`<a>A</a>` +\n\t\t\t`</parent>` +\n\t\t\t`</result>`,\n\t},\n\t{\n\t\tValue: &NilTest{A: \"A\", B: nil, C: \"C\"},\n\t\tExpectXML: `<NilTest>` +\n\t\t\t`<parent1>` +\n\t\t\t`<parent2><a>A</a></parent2>` +\n\t\t\t`<parent2><c>C</c></parent2>` +\n\t\t\t`</parent1>` +\n\t\t\t`</NilTest>`,\n\t\tMarshalOnly: true, // Uses interface{}\n\t},\n\t{\n\t\tValue: &MixedNested{A: \"A\", B: \"B\", C: \"C\", D: \"D\"},\n\t\tExpectXML: `<result>` +\n\t\t\t`<parent1><a>A</a></parent1>` +\n\t\t\t`<b>B</b>` +\n\t\t\t`<parent1>` +\n\t\t\t`<parent2><c>C</c></parent2>` +\n\t\t\t`<d>D</d>` +\n\t\t\t`</parent1>` +\n\t\t\t`</result>`,\n\t},\n\t{\n\t\tValue:     &Service{Port: &Port{Number: \"80\"}},\n\t\tExpectXML: `<service><host><port>80</port></host></service>`,\n\t},\n\t{\n\t\tValue:     &Service{},\n\t\tExpectXML: `<service></service>`,\n\t},\n\t{\n\t\tValue: &Service{Port: &Port{Number: \"80\"}, Extra1: \"A\", Extra2: \"B\"},\n\t\tExpectXML: `<service>` +\n\t\t\t`<host><port>80</port></host>` +\n\t\t\t`<Extra1>A</Extra1>` +\n\t\t\t`<host><extra2>B</extra2></host>` +\n\t\t\t`</service>`,\n\t\tMarshalOnly: true,\n\t},\n\t{\n\t\tValue: &Service{Port: &Port{Number: \"80\"}, Extra2: \"example\"},\n\t\tExpectXML: `<service>` +\n\t\t\t`<host><port>80</port></host>` +\n\t\t\t`<host><extra2>example</extra2></host>` +\n\t\t\t`</service>`,\n\t\tMarshalOnly: true,\n\t},\n\n\t// Test struct embedding\n\t{\n\t\tValue: &EmbedA{\n\t\t\tEmbedC: EmbedC{\n\t\t\t\tFieldA1: \"\", // Shadowed by A.A\n\t\t\t\tFieldA2: \"\", // Shadowed by A.A\n\t\t\t\tFieldB:  \"A.C.B\",\n\t\t\t\tFieldC:  \"A.C.C\",\n\t\t\t},\n\t\t\tEmbedB: EmbedB{\n\t\t\t\tFieldB: \"A.B.B\",\n\t\t\t\tEmbedC: &EmbedC{\n\t\t\t\t\tFieldA1: \"A.B.C.A1\",\n\t\t\t\t\tFieldA2: \"A.B.C.A2\",\n\t\t\t\t\tFieldB:  \"\", // Shadowed by A.B.B\n\t\t\t\t\tFieldC:  \"A.B.C.C\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tFieldA: \"A.A\",\n\t\t},\n\t\tExpectXML: `<EmbedA>` +\n\t\t\t`<FieldB>A.C.B</FieldB>` +\n\t\t\t`<FieldC>A.C.C</FieldC>` +\n\t\t\t`<EmbedB>` +\n\t\t\t`<FieldB>A.B.B</FieldB>` +\n\t\t\t`<FieldA>` +\n\t\t\t`<A1>A.B.C.A1</A1>` +\n\t\t\t`<A2>A.B.C.A2</A2>` +\n\t\t\t`</FieldA>` +\n\t\t\t`<FieldC>A.B.C.C</FieldC>` +\n\t\t\t`</EmbedB>` +\n\t\t\t`<FieldA>A.A</FieldA>` +\n\t\t\t`</EmbedA>`,\n\t},\n\n\t// Test that name casing matters\n\t{\n\t\tValue:     &NameCasing{Xy: \"mixed\", XY: \"upper\", XyA: \"mixedA\", XYA: \"upperA\"},\n\t\tExpectXML: `<casing Xy=\"mixedA\" XY=\"upperA\"><Xy>mixed</Xy><XY>upper</XY></casing>`,\n\t},\n\n\t// Test the order in which the XML element name is chosen\n\t{\n\t\tValue: &NamePrecedence{\n\t\t\tFromTag:     XMLNameWithoutTag{Value: \"A\"},\n\t\t\tFromNameVal: XMLNameWithoutTag{XMLName: Name{Local: \"InXMLName\"}, Value: \"B\"},\n\t\t\tFromNameTag: XMLNameWithTag{Value: \"C\"},\n\t\t\tInFieldName: \"D\",\n\t\t},\n\t\tExpectXML: `<Parent>` +\n\t\t\t`<InTag>A</InTag>` +\n\t\t\t`<InXMLName>B</InXMLName>` +\n\t\t\t`<InXMLNameTag>C</InXMLNameTag>` +\n\t\t\t`<InFieldName>D</InFieldName>` +\n\t\t\t`</Parent>`,\n\t\tMarshalOnly: true,\n\t},\n\t{\n\t\tValue: &NamePrecedence{\n\t\t\tXMLName:     Name{Local: \"Parent\"},\n\t\t\tFromTag:     XMLNameWithoutTag{XMLName: Name{Local: \"InTag\"}, Value: \"A\"},\n\t\t\tFromNameVal: XMLNameWithoutTag{XMLName: Name{Local: \"FromNameVal\"}, Value: \"B\"},\n\t\t\tFromNameTag: XMLNameWithTag{XMLName: Name{Local: \"InXMLNameTag\"}, Value: \"C\"},\n\t\t\tInFieldName: \"D\",\n\t\t},\n\t\tExpectXML: `<Parent>` +\n\t\t\t`<InTag>A</InTag>` +\n\t\t\t`<FromNameVal>B</FromNameVal>` +\n\t\t\t`<InXMLNameTag>C</InXMLNameTag>` +\n\t\t\t`<InFieldName>D</InFieldName>` +\n\t\t\t`</Parent>`,\n\t\tUnmarshalOnly: true,\n\t},\n\n\t// xml.Name works in a plain field as well.\n\t{\n\t\tValue:     &NameInField{Name{Space: \"ns\", Local: \"foo\"}},\n\t\tExpectXML: `<NameInField><foo xmlns=\"ns\"></foo></NameInField>`,\n\t},\n\t{\n\t\tValue:         &NameInField{Name{Space: \"ns\", Local: \"foo\"}},\n\t\tExpectXML:     `<NameInField><foo xmlns=\"ns\"><ignore></ignore></foo></NameInField>`,\n\t\tUnmarshalOnly: true,\n\t},\n\n\t// Marshaling zero xml.Name uses the tag or field name.\n\t{\n\t\tValue:       &NameInField{},\n\t\tExpectXML:   `<NameInField><foo xmlns=\"ns\"></foo></NameInField>`,\n\t\tMarshalOnly: true,\n\t},\n\n\t// Test attributes\n\t{\n\t\tValue: &AttrTest{\n\t\t\tInt:   8,\n\t\t\tNamed: 9,\n\t\t\tFloat: 23.5,\n\t\t\tUint8: 255,\n\t\t\tBool:  true,\n\t\t\tStr:   \"str\",\n\t\t\tBytes: []byte(\"byt\"),\n\t\t},\n\t\tExpectXML: `<AttrTest Int=\"8\" int=\"9\" Float=\"23.5\" Uint8=\"255\"` +\n\t\t\t` Bool=\"true\" Str=\"str\" Bytes=\"byt\"></AttrTest>`,\n\t},\n\t{\n\t\tValue: &AttrTest{Bytes: []byte{}},\n\t\tExpectXML: `<AttrTest Int=\"0\" int=\"0\" Float=\"0\" Uint8=\"0\"` +\n\t\t\t` Bool=\"false\" Str=\"\" Bytes=\"\"></AttrTest>`,\n\t},\n\t{\n\t\tValue: &OmitAttrTest{\n\t\t\tInt:   8,\n\t\t\tNamed: 9,\n\t\t\tFloat: 23.5,\n\t\t\tUint8: 255,\n\t\t\tBool:  true,\n\t\t\tStr:   \"str\",\n\t\t\tBytes: []byte(\"byt\"),\n\t\t},\n\t\tExpectXML: `<OmitAttrTest Int=\"8\" int=\"9\" Float=\"23.5\" Uint8=\"255\"` +\n\t\t\t` Bool=\"true\" Str=\"str\" Bytes=\"byt\"></OmitAttrTest>`,\n\t},\n\t{\n\t\tValue:     &OmitAttrTest{},\n\t\tExpectXML: `<OmitAttrTest></OmitAttrTest>`,\n\t},\n\n\t// pointer fields\n\t{\n\t\tValue:       &PointerFieldsTest{Name: &nameAttr, Age: &ageAttr, Contents: &contentsAttr},\n\t\tExpectXML:   `<dummy name=\"Sarah\" age=\"12\">lorem ipsum</dummy>`,\n\t\tMarshalOnly: true,\n\t},\n\n\t// empty chardata pointer field\n\t{\n\t\tValue:       &ChardataEmptyTest{},\n\t\tExpectXML:   `<test></test>`,\n\t\tMarshalOnly: true,\n\t},\n\n\t// omitempty on fields\n\t{\n\t\tValue: &OmitFieldTest{\n\t\t\tInt:   8,\n\t\t\tNamed: 9,\n\t\t\tFloat: 23.5,\n\t\t\tUint8: 255,\n\t\t\tBool:  true,\n\t\t\tStr:   \"str\",\n\t\t\tBytes: []byte(\"byt\"),\n\t\t\tPtr:   &PresenceTest{},\n\t\t},\n\t\tExpectXML: `<OmitFieldTest>` +\n\t\t\t`<Int>8</Int>` +\n\t\t\t`<int>9</int>` +\n\t\t\t`<Float>23.5</Float>` +\n\t\t\t`<Uint8>255</Uint8>` +\n\t\t\t`<Bool>true</Bool>` +\n\t\t\t`<Str>str</Str>` +\n\t\t\t`<Bytes>byt</Bytes>` +\n\t\t\t`<Ptr></Ptr>` +\n\t\t\t`</OmitFieldTest>`,\n\t},\n\t{\n\t\tValue:     &OmitFieldTest{},\n\t\tExpectXML: `<OmitFieldTest></OmitFieldTest>`,\n\t},\n\n\t// Test \",any\"\n\t{\n\t\tExpectXML: `<a><nested><value>known</value></nested><other><sub>unknown</sub></other></a>`,\n\t\tValue: &AnyTest{\n\t\t\tNested: \"known\",\n\t\t\tAnyField: AnyHolder{\n\t\t\t\tXMLName: Name{Local: \"other\"},\n\t\t\t\tXML:     \"<sub>unknown</sub>\",\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tValue: &AnyTest{Nested: \"known\",\n\t\t\tAnyField: AnyHolder{\n\t\t\t\tXML:     \"<unknown/>\",\n\t\t\t\tXMLName: Name{Local: \"AnyField\"},\n\t\t\t},\n\t\t},\n\t\tExpectXML: `<a><nested><value>known</value></nested><AnyField><unknown/></AnyField></a>`,\n\t},\n\t{\n\t\tExpectXML: `<a><nested><value>b</value></nested></a>`,\n\t\tValue: &AnyOmitTest{\n\t\t\tNested: \"b\",\n\t\t},\n\t},\n\t{\n\t\tExpectXML: `<a><nested><value>b</value></nested><c><d>e</d></c><g xmlns=\"f\"><h>i</h></g></a>`,\n\t\tValue: &AnySliceTest{\n\t\t\tNested: \"b\",\n\t\t\tAnyField: []AnyHolder{\n\t\t\t\t{\n\t\t\t\t\tXMLName: Name{Local: \"c\"},\n\t\t\t\t\tXML:     \"<d>e</d>\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tXMLName: Name{Space: \"f\", Local: \"g\"},\n\t\t\t\t\tXML:     \"<h>i</h>\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tExpectXML: `<a><nested><value>b</value></nested></a>`,\n\t\tValue: &AnySliceTest{\n\t\t\tNested: \"b\",\n\t\t},\n\t},\n\n\t// Test recursive types.\n\t{\n\t\tValue: &RecurseA{\n\t\t\tA: \"a1\",\n\t\t\tB: &RecurseB{\n\t\t\t\tA: &RecurseA{\"a2\", nil},\n\t\t\t\tB: \"b1\",\n\t\t\t},\n\t\t},\n\t\tExpectXML: `<RecurseA><A>a1</A><B><A><A>a2</A></A><B>b1</B></B></RecurseA>`,\n\t},\n\n\t// Test ignoring fields via \"-\" tag\n\t{\n\t\tExpectXML: `<IgnoreTest></IgnoreTest>`,\n\t\tValue:     &IgnoreTest{},\n\t},\n\t{\n\t\tExpectXML:   `<IgnoreTest></IgnoreTest>`,\n\t\tValue:       &IgnoreTest{PublicSecret: \"can't tell\"},\n\t\tMarshalOnly: true,\n\t},\n\t{\n\t\tExpectXML:     `<IgnoreTest><PublicSecret>ignore me</PublicSecret></IgnoreTest>`,\n\t\tValue:         &IgnoreTest{},\n\t\tUnmarshalOnly: true,\n\t},\n\n\t// Test escaping.\n\t{\n\t\tExpectXML: `<a><nested><value>dquote: &#34;; squote: &#39;; ampersand: &amp;; less: &lt;; greater: &gt;;</value></nested><empty></empty></a>`,\n\t\tValue: &AnyTest{\n\t\t\tNested:   `dquote: \"; squote: '; ampersand: &; less: <; greater: >;`,\n\t\t\tAnyField: AnyHolder{XMLName: Name{Local: \"empty\"}},\n\t\t},\n\t},\n\t{\n\t\tExpectXML: `<a><nested><value>newline: &#xA;; cr: &#xD;; tab: &#x9;;</value></nested><AnyField></AnyField></a>`,\n\t\tValue: &AnyTest{\n\t\t\tNested:   \"newline: \\n; cr: \\r; tab: \\t;\",\n\t\t\tAnyField: AnyHolder{XMLName: Name{Local: \"AnyField\"}},\n\t\t},\n\t},\n\t{\n\t\tExpectXML: \"<a><nested><value>1\\r2\\r\\n3\\n\\r4\\n5</value></nested></a>\",\n\t\tValue: &AnyTest{\n\t\t\tNested: \"1\\n2\\n3\\n\\n4\\n5\",\n\t\t},\n\t\tUnmarshalOnly: true,\n\t},\n\t{\n\t\tExpectXML: `<EmbedInt><MyInt>42</MyInt></EmbedInt>`,\n\t\tValue: &EmbedInt{\n\t\t\tMyInt: 42,\n\t\t},\n\t},\n\t// Test omitempty with parent chain; see golang.org/issue/4168.\n\t{\n\t\tExpectXML: `<Strings><A></A></Strings>`,\n\t\tValue:     &Strings{},\n\t},\n\t// Custom marshalers.\n\t{\n\t\tExpectXML: `<MyMarshalerTest>hello world</MyMarshalerTest>`,\n\t\tValue:     &MyMarshalerTest{},\n\t},\n\t{\n\t\tExpectXML: `<MarshalerStruct Foo=\"hello world\"></MarshalerStruct>`,\n\t\tValue:     &MarshalerStruct{},\n\t},\n\t{\n\t\tExpectXML: `<outer xmlns=\"testns\" int=\"10\"></outer>`,\n\t\tValue:     &OuterStruct{IntAttr: 10},\n\t},\n\t{\n\t\tExpectXML: `<test xmlns=\"outerns\" int=\"10\"></test>`,\n\t\tValue:     &OuterNamedStruct{XMLName: Name{Space: \"outerns\", Local: \"test\"}, IntAttr: 10},\n\t},\n\t{\n\t\tExpectXML: `<test xmlns=\"outerns\" int=\"10\"></test>`,\n\t\tValue:     &OuterNamedOrderedStruct{XMLName: Name{Space: \"outerns\", Local: \"test\"}, IntAttr: 10},\n\t},\n\t{\n\t\tExpectXML: `<outer xmlns=\"testns\" int=\"10\"></outer>`,\n\t\tValue:     &OuterOuterStruct{OuterStruct{IntAttr: 10}},\n\t},\n}\n\nfunc TestMarshal(t *testing.T) {\n\tfor idx, test := range marshalTests {\n\t\tif test.UnmarshalOnly {\n\t\t\tcontinue\n\t\t}\n\t\tdata, err := Marshal(test.Value)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"#%d: Error: %s\", idx, err)\n\t\t\tcontinue\n\t\t}\n\t\tif got, want := string(data), test.ExpectXML; got != want {\n\t\t\tif strings.Contains(want, \"\\n\") {\n\t\t\t\tt.Errorf(\"#%d: marshal(%#v):\\nHAVE:\\n%s\\nWANT:\\n%s\", idx, test.Value, got, want)\n\t\t\t} else {\n\t\t\t\tt.Errorf(\"#%d: marshal(%#v):\\nhave %#q\\nwant %#q\", idx, test.Value, got, want)\n\t\t\t}\n\t\t}\n\t}\n}\n\ntype AttrParent struct {\n\tX string `xml:\"X>Y,attr\"`\n}\n\ntype BadAttr struct {\n\tName []string `xml:\"name,attr\"`\n}\n\nvar marshalErrorTests = []struct {\n\tValue interface{}\n\tErr   string\n\tKind  reflect.Kind\n}{\n\t{\n\t\tValue: make(chan bool),\n\t\tErr:   \"xml: unsupported type: chan bool\",\n\t\tKind:  reflect.Chan,\n\t},\n\t{\n\t\tValue: map[string]string{\n\t\t\t\"question\": \"What do you get when you multiply six by nine?\",\n\t\t\t\"answer\":   \"42\",\n\t\t},\n\t\tErr:  \"xml: unsupported type: map[string]string\",\n\t\tKind: reflect.Map,\n\t},\n\t{\n\t\tValue: map[*Ship]bool{nil: false},\n\t\tErr:   \"xml: unsupported type: map[*xml.Ship]bool\",\n\t\tKind:  reflect.Map,\n\t},\n\t{\n\t\tValue: &Domain{Comment: []byte(\"f--bar\")},\n\t\tErr:   `xml: comments must not contain \"--\"`,\n\t},\n\t// Reject parent chain with attr, never worked; see golang.org/issue/5033.\n\t{\n\t\tValue: &AttrParent{},\n\t\tErr:   `xml: X>Y chain not valid with attr flag`,\n\t},\n\t{\n\t\tValue: BadAttr{[]string{\"X\", \"Y\"}},\n\t\tErr:   `xml: unsupported type: []string`,\n\t},\n}\n\nvar marshalIndentTests = []struct {\n\tValue     interface{}\n\tPrefix    string\n\tIndent    string\n\tExpectXML string\n}{\n\t{\n\t\tValue: &SecretAgent{\n\t\t\tHandle:    \"007\",\n\t\t\tIdentity:  \"James Bond\",\n\t\t\tObfuscate: \"<redacted/>\",\n\t\t},\n\t\tPrefix:    \"\",\n\t\tIndent:    \"\\t\",\n\t\tExpectXML: fmt.Sprintf(\"<agent handle=\\\"007\\\">\\n\\t<Identity>James Bond</Identity><redacted/>\\n</agent>\"),\n\t},\n}\n\nfunc TestMarshalErrors(t *testing.T) {\n\tfor idx, test := range marshalErrorTests {\n\t\tdata, err := Marshal(test.Value)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"#%d: marshal(%#v) = [success] %q, want error %v\", idx, test.Value, data, test.Err)\n\t\t\tcontinue\n\t\t}\n\t\tif err.Error() != test.Err {\n\t\t\tt.Errorf(\"#%d: marshal(%#v) = [error] %v, want %v\", idx, test.Value, err, test.Err)\n\t\t}\n\t\tif test.Kind != reflect.Invalid {\n\t\t\tif kind := err.(*UnsupportedTypeError).Type.Kind(); kind != test.Kind {\n\t\t\t\tt.Errorf(\"#%d: marshal(%#v) = [error kind] %s, want %s\", idx, test.Value, kind, test.Kind)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Do invertibility testing on the various structures that we test\nfunc TestUnmarshal(t *testing.T) {\n\tfor i, test := range marshalTests {\n\t\tif test.MarshalOnly {\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := test.Value.(*Plain); ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tvt := reflect.TypeOf(test.Value)\n\t\tdest := reflect.New(vt.Elem()).Interface()\n\t\terr := Unmarshal([]byte(test.ExpectXML), dest)\n\n\t\tswitch fix := dest.(type) {\n\t\tcase *Feed:\n\t\t\tfix.Author.InnerXML = \"\"\n\t\t\tfor i := range fix.Entry {\n\t\t\t\tfix.Entry[i].Author.InnerXML = \"\"\n\t\t\t}\n\t\t}\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"#%d: unexpected error: %#v\", i, err)\n\t\t} else if got, want := dest, test.Value; !reflect.DeepEqual(got, want) {\n\t\t\tt.Errorf(\"#%d: unmarshal(%q):\\nhave %#v\\nwant %#v\", i, test.ExpectXML, got, want)\n\t\t}\n\t}\n}\n\nfunc TestMarshalIndent(t *testing.T) {\n\tfor i, test := range marshalIndentTests {\n\t\tdata, err := MarshalIndent(test.Value, test.Prefix, test.Indent)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"#%d: Error: %s\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif got, want := string(data), test.ExpectXML; got != want {\n\t\t\tt.Errorf(\"#%d: MarshalIndent:\\nGot:%s\\nWant:\\n%s\", i, got, want)\n\t\t}\n\t}\n}\n\ntype limitedBytesWriter struct {\n\tw      io.Writer\n\tremain int // until writes fail\n}\n\nfunc (lw *limitedBytesWriter) Write(p []byte) (n int, err error) {\n\tif lw.remain <= 0 {\n\t\tprintln(\"error\")\n\t\treturn 0, errors.New(\"write limit hit\")\n\t}\n\tif len(p) > lw.remain {\n\t\tp = p[:lw.remain]\n\t\tn, _ = lw.w.Write(p)\n\t\tlw.remain = 0\n\t\treturn n, errors.New(\"write limit hit\")\n\t}\n\tn, err = lw.w.Write(p)\n\tlw.remain -= n\n\treturn n, err\n}\n\nfunc TestMarshalWriteErrors(t *testing.T) {\n\tvar buf bytes.Buffer\n\tconst writeCap = 1024\n\tw := &limitedBytesWriter{&buf, writeCap}\n\tenc := NewEncoder(w)\n\tvar err error\n\tvar i int\n\tconst n = 4000\n\tfor i = 1; i <= n; i++ {\n\t\terr = enc.Encode(&Passenger{\n\t\t\tName:   []string{\"Alice\", \"Bob\"},\n\t\t\tWeight: 5,\n\t\t})\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif err == nil {\n\t\tt.Error(\"expected an error\")\n\t}\n\tif i == n {\n\t\tt.Errorf(\"expected to fail before the end\")\n\t}\n\tif buf.Len() != writeCap {\n\t\tt.Errorf(\"buf.Len() = %d; want %d\", buf.Len(), writeCap)\n\t}\n}\n\nfunc TestMarshalWriteIOErrors(t *testing.T) {\n\tenc := NewEncoder(errWriter{})\n\n\texpectErr := \"unwritable\"\n\terr := enc.Encode(&Passenger{})\n\tif err == nil || err.Error() != expectErr {\n\t\tt.Errorf(\"EscapeTest = [error] %v, want %v\", err, expectErr)\n\t}\n}\n\nfunc TestMarshalFlush(t *testing.T) {\n\tvar buf bytes.Buffer\n\tenc := NewEncoder(&buf)\n\tif err := enc.EncodeToken(CharData(\"hello world\")); err != nil {\n\t\tt.Fatalf(\"enc.EncodeToken: %v\", err)\n\t}\n\tif buf.Len() > 0 {\n\t\tt.Fatalf(\"enc.EncodeToken caused actual write: %q\", buf.Bytes())\n\t}\n\tif err := enc.Flush(); err != nil {\n\t\tt.Fatalf(\"enc.Flush: %v\", err)\n\t}\n\tif buf.String() != \"hello world\" {\n\t\tt.Fatalf(\"after enc.Flush, buf.String() = %q, want %q\", buf.String(), \"hello world\")\n\t}\n}\n\nfunc BenchmarkMarshal(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tMarshal(atomValue)\n\t}\n}\n\nfunc BenchmarkUnmarshal(b *testing.B) {\n\txml := []byte(atomXml)\n\tfor i := 0; i < b.N; i++ {\n\t\tUnmarshal(xml, &Feed{})\n\t}\n}\n\n// golang.org/issue/6556\nfunc TestStructPointerMarshal(t *testing.T) {\n\ttype A struct {\n\t\tXMLName string `xml:\"a\"`\n\t\tB       []interface{}\n\t}\n\ttype C struct {\n\t\tXMLName Name\n\t\tValue   string `xml:\"value\"`\n\t}\n\n\ta := new(A)\n\ta.B = append(a.B, &C{\n\t\tXMLName: Name{Local: \"c\"},\n\t\tValue:   \"x\",\n\t})\n\n\tb, err := Marshal(a)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif x := string(b); x != \"<a><c><value>x</value></c></a>\" {\n\t\tt.Fatal(x)\n\t}\n\tvar v A\n\terr = Unmarshal(b, &v)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nvar encodeTokenTests = []struct {\n\ttok  Token\n\twant string\n\tok   bool\n}{\n\t{StartElement{Name{\"space\", \"local\"}, nil}, \"<local xmlns=\\\"space\\\">\", true},\n\t{StartElement{Name{\"space\", \"\"}, nil}, \"\", false},\n\t{EndElement{Name{\"space\", \"\"}}, \"\", false},\n\t{CharData(\"foo\"), \"foo\", true},\n\t{Comment(\"foo\"), \"<!--foo-->\", true},\n\t{Comment(\"foo-->\"), \"\", false},\n\t{ProcInst{\"Target\", []byte(\"Instruction\")}, \"<?Target Instruction?>\", true},\n\t{ProcInst{\"\", []byte(\"Instruction\")}, \"\", false},\n\t{ProcInst{\"Target\", []byte(\"Instruction?>\")}, \"\", false},\n\t{Directive(\"foo\"), \"<!foo>\", true},\n\t{Directive(\"foo>\"), \"\", false},\n}\n\nfunc TestEncodeToken(t *testing.T) {\n\tfor _, tt := range encodeTokenTests {\n\t\tvar buf bytes.Buffer\n\t\tenc := NewEncoder(&buf)\n\t\terr := enc.EncodeToken(tt.tok)\n\t\tswitch {\n\t\tcase !tt.ok && err == nil:\n\t\t\tt.Errorf(\"enc.EncodeToken(%#v): expected error; got none\", tt.tok)\n\t\tcase tt.ok && err != nil:\n\t\t\tt.Fatalf(\"enc.EncodeToken: %v\", err)\n\t\tcase !tt.ok && err != nil:\n\t\t\t// expected error, got one\n\t\t}\n\t\tif err := enc.Flush(); err != nil {\n\t\t\tt.Fatalf(\"enc.EncodeToken: %v\", err)\n\t\t}\n\t\tif got := buf.String(); got != tt.want {\n\t\t\tt.Errorf(\"enc.EncodeToken = %s; want: %s\", got, tt.want)\n\t\t}\n\t}\n}\n\nfunc TestProcInstEncodeToken(t *testing.T) {\n\tvar buf bytes.Buffer\n\tenc := NewEncoder(&buf)\n\n\tif err := enc.EncodeToken(ProcInst{\"xml\", []byte(\"Instruction\")}); err != nil {\n\t\tt.Fatalf(\"enc.EncodeToken: expected to be able to encode xml target ProcInst as first token, %s\", err)\n\t}\n\n\tif err := enc.EncodeToken(ProcInst{\"Target\", []byte(\"Instruction\")}); err != nil {\n\t\tt.Fatalf(\"enc.EncodeToken: expected to be able to add non-xml target ProcInst\")\n\t}\n\n\tif err := enc.EncodeToken(ProcInst{\"xml\", []byte(\"Instruction\")}); err == nil {\n\t\tt.Fatalf(\"enc.EncodeToken: expected to not be allowed to encode xml target ProcInst when not first token\")\n\t}\n}\n\nfunc TestDecodeEncode(t *testing.T) {\n\tvar in, out bytes.Buffer\n\tin.WriteString(`<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<?Target Instruction?>\n<root>\n</root>\t\n`)\n\tdec := NewDecoder(&in)\n\tenc := NewEncoder(&out)\n\tfor tok, err := dec.Token(); err == nil; tok, err = dec.Token() {\n\t\terr = enc.EncodeToken(tok)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"enc.EncodeToken: Unable to encode token (%#v), %v\", tok, err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/xml/read.go",
    "content": "// Copyright 2009 The Go Authors.  All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage xml\n\nimport (\n\t\"bytes\"\n\t\"encoding\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// BUG(rsc): Mapping between XML elements and data structures is inherently flawed:\n// an XML element is an order-dependent collection of anonymous\n// values, while a data structure is an order-independent collection\n// of named values.\n// See package json for a textual representation more suitable\n// to data structures.\n\n// Unmarshal parses the XML-encoded data and stores the result in\n// the value pointed to by v, which must be an arbitrary struct,\n// slice, or string. Well-formed data that does not fit into v is\n// discarded.\n//\n// Because Unmarshal uses the reflect package, it can only assign\n// to exported (upper case) fields.  Unmarshal uses a case-sensitive\n// comparison to match XML element names to tag values and struct\n// field names.\n//\n// Unmarshal maps an XML element to a struct using the following rules.\n// In the rules, the tag of a field refers to the value associated with the\n// key 'xml' in the struct field's tag (see the example above).\n//\n//   * If the struct has a field of type []byte or string with tag\n//      \",innerxml\", Unmarshal accumulates the raw XML nested inside the\n//      element in that field.  The rest of the rules still apply.\n//\n//   * If the struct has a field named XMLName of type xml.Name,\n//      Unmarshal records the element name in that field.\n//\n//   * If the XMLName field has an associated tag of the form\n//      \"name\" or \"namespace-URL name\", the XML element must have\n//      the given name (and, optionally, name space) or else Unmarshal\n//      returns an error.\n//\n//   * If the XML element has an attribute whose name matches a\n//      struct field name with an associated tag containing \",attr\" or\n//      the explicit name in a struct field tag of the form \"name,attr\",\n//      Unmarshal records the attribute value in that field.\n//\n//   * If the XML element contains character data, that data is\n//      accumulated in the first struct field that has tag \",chardata\".\n//      The struct field may have type []byte or string.\n//      If there is no such field, the character data is discarded.\n//\n//   * If the XML element contains comments, they are accumulated in\n//      the first struct field that has tag \",comment\".  The struct\n//      field may have type []byte or string.  If there is no such\n//      field, the comments are discarded.\n//\n//   * If the XML element contains a sub-element whose name matches\n//      the prefix of a tag formatted as \"a\" or \"a>b>c\", unmarshal\n//      will descend into the XML structure looking for elements with the\n//      given names, and will map the innermost elements to that struct\n//      field. A tag starting with \">\" is equivalent to one starting\n//      with the field name followed by \">\".\n//\n//   * If the XML element contains a sub-element whose name matches\n//      a struct field's XMLName tag and the struct field has no\n//      explicit name tag as per the previous rule, unmarshal maps\n//      the sub-element to that struct field.\n//\n//   * If the XML element contains a sub-element whose name matches a\n//      field without any mode flags (\",attr\", \",chardata\", etc), Unmarshal\n//      maps the sub-element to that struct field.\n//\n//   * If the XML element contains a sub-element that hasn't matched any\n//      of the above rules and the struct has a field with tag \",any\",\n//      unmarshal maps the sub-element to that struct field.\n//\n//   * An anonymous struct field is handled as if the fields of its\n//      value were part of the outer struct.\n//\n//   * A struct field with tag \"-\" is never unmarshalled into.\n//\n// Unmarshal maps an XML element to a string or []byte by saving the\n// concatenation of that element's character data in the string or\n// []byte. The saved []byte is never nil.\n//\n// Unmarshal maps an attribute value to a string or []byte by saving\n// the value in the string or slice.\n//\n// Unmarshal maps an XML element to a slice by extending the length of\n// the slice and mapping the element to the newly created value.\n//\n// Unmarshal maps an XML element or attribute value to a bool by\n// setting it to the boolean value represented by the string.\n//\n// Unmarshal maps an XML element or attribute value to an integer or\n// floating-point field by setting the field to the result of\n// interpreting the string value in decimal.  There is no check for\n// overflow.\n//\n// Unmarshal maps an XML element to an xml.Name by recording the\n// element name.\n//\n// Unmarshal maps an XML element to a pointer by setting the pointer\n// to a freshly allocated value and then mapping the element to that value.\n//\nfunc Unmarshal(data []byte, v interface{}) error {\n\treturn NewDecoder(bytes.NewReader(data)).Decode(v)\n}\n\n// Decode works like xml.Unmarshal, except it reads the decoder\n// stream to find the start element.\nfunc (d *Decoder) Decode(v interface{}) error {\n\treturn d.DecodeElement(v, nil)\n}\n\n// DecodeElement works like xml.Unmarshal except that it takes\n// a pointer to the start XML element to decode into v.\n// It is useful when a client reads some raw XML tokens itself\n// but also wants to defer to Unmarshal for some elements.\nfunc (d *Decoder) DecodeElement(v interface{}, start *StartElement) error {\n\tval := reflect.ValueOf(v)\n\tif val.Kind() != reflect.Ptr {\n\t\treturn errors.New(\"non-pointer passed to Unmarshal\")\n\t}\n\treturn d.unmarshal(val.Elem(), start)\n}\n\n// An UnmarshalError represents an error in the unmarshalling process.\ntype UnmarshalError string\n\nfunc (e UnmarshalError) Error() string { return string(e) }\n\n// Unmarshaler is the interface implemented by objects that can unmarshal\n// an XML element description of themselves.\n//\n// UnmarshalXML decodes a single XML element\n// beginning with the given start element.\n// If it returns an error, the outer call to Unmarshal stops and\n// returns that error.\n// UnmarshalXML must consume exactly one XML element.\n// One common implementation strategy is to unmarshal into\n// a separate value with a layout matching the expected XML\n// using d.DecodeElement,  and then to copy the data from\n// that value into the receiver.\n// Another common strategy is to use d.Token to process the\n// XML object one token at a time.\n// UnmarshalXML may not use d.RawToken.\ntype Unmarshaler interface {\n\tUnmarshalXML(d *Decoder, start StartElement) error\n}\n\n// UnmarshalerAttr is the interface implemented by objects that can unmarshal\n// an XML attribute description of themselves.\n//\n// UnmarshalXMLAttr decodes a single XML attribute.\n// If it returns an error, the outer call to Unmarshal stops and\n// returns that error.\n// UnmarshalXMLAttr is used only for struct fields with the\n// \"attr\" option in the field tag.\ntype UnmarshalerAttr interface {\n\tUnmarshalXMLAttr(attr Attr) error\n}\n\n// receiverType returns the receiver type to use in an expression like \"%s.MethodName\".\nfunc receiverType(val interface{}) string {\n\tt := reflect.TypeOf(val)\n\tif t.Name() != \"\" {\n\t\treturn t.String()\n\t}\n\treturn \"(\" + t.String() + \")\"\n}\n\n// unmarshalInterface unmarshals a single XML element into val.\n// start is the opening tag of the element.\nfunc (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error {\n\t// Record that decoder must stop at end tag corresponding to start.\n\tp.pushEOF()\n\n\tp.unmarshalDepth++\n\terr := val.UnmarshalXML(p, *start)\n\tp.unmarshalDepth--\n\tif err != nil {\n\t\tp.popEOF()\n\t\treturn err\n\t}\n\n\tif !p.popEOF() {\n\t\treturn fmt.Errorf(\"xml: %s.UnmarshalXML did not consume entire <%s> element\", receiverType(val), start.Name.Local)\n\t}\n\n\treturn nil\n}\n\n// unmarshalTextInterface unmarshals a single XML element into val.\n// The chardata contained in the element (but not its children)\n// is passed to the text unmarshaler.\nfunc (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error {\n\tvar buf []byte\n\tdepth := 1\n\tfor depth > 0 {\n\t\tt, err := p.Token()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tswitch t := t.(type) {\n\t\tcase CharData:\n\t\t\tif depth == 1 {\n\t\t\t\tbuf = append(buf, t...)\n\t\t\t}\n\t\tcase StartElement:\n\t\t\tdepth++\n\t\tcase EndElement:\n\t\t\tdepth--\n\t\t}\n\t}\n\treturn val.UnmarshalText(buf)\n}\n\n// unmarshalAttr unmarshals a single XML attribute into val.\nfunc (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error {\n\tif val.Kind() == reflect.Ptr {\n\t\tif val.IsNil() {\n\t\t\tval.Set(reflect.New(val.Type().Elem()))\n\t\t}\n\t\tval = val.Elem()\n\t}\n\n\tif val.CanInterface() && val.Type().Implements(unmarshalerAttrType) {\n\t\t// This is an unmarshaler with a non-pointer receiver,\n\t\t// so it's likely to be incorrect, but we do what we're told.\n\t\treturn val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)\n\t}\n\tif val.CanAddr() {\n\t\tpv := val.Addr()\n\t\tif pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) {\n\t\t\treturn pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)\n\t\t}\n\t}\n\n\t// Not an UnmarshalerAttr; try encoding.TextUnmarshaler.\n\tif val.CanInterface() && val.Type().Implements(textUnmarshalerType) {\n\t\t// This is an unmarshaler with a non-pointer receiver,\n\t\t// so it's likely to be incorrect, but we do what we're told.\n\t\treturn val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))\n\t}\n\tif val.CanAddr() {\n\t\tpv := val.Addr()\n\t\tif pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {\n\t\t\treturn pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))\n\t\t}\n\t}\n\n\tcopyValue(val, []byte(attr.Value))\n\treturn nil\n}\n\nvar (\n\tunmarshalerType     = reflect.TypeOf((*Unmarshaler)(nil)).Elem()\n\tunmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem()\n\ttextUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()\n)\n\n// Find reflect.Type for an element's type attribute.\nfunc (p *Decoder) typeForElement(val reflect.Value, start *StartElement) reflect.Type {\n\tt := \"\"\n\tfor i, a := range start.Attr {\n\t\tif a.Name == xmlSchemaInstance || a.Name == xsiType {\n\t\t\tt = a.Value\n\t\t\t// HACK: ensure xsi:type is last in the list to avoid using that value for\n\t\t\t// a \"type\" attribute, such as ManagedObjectReference.Type for example.\n\t\t\t// Note that xsi:type is already the last attribute in VC/ESX responses.\n\t\t\t// This is only an issue with govmomi simulator generated responses.\n\t\t\t// Proper fix will require finding a few needles in this xml package haystack.\n\t\t\t// Note: govmomi uses xmlSchemaInstance, other clients (e.g. rbvmomi) use xsiType.\n\t\t\t// They are the same thing to XML parsers, but not to this hack here.\n\t\t\tx := len(start.Attr) - 1\n\t\t\tif i != x {\n\t\t\t\tstart.Attr[i] = start.Attr[x]\n\t\t\t\tstart.Attr[x] = a\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif t == \"\" {\n\t\t// No type attribute; fall back to looking up type by interface name.\n\t\tt = val.Type().Name()\n\t}\n\n\t// Maybe the type is a basic xsd:* type.\n\ttyp := stringToType(t)\n\tif typ != nil {\n\t\treturn typ\n\t}\n\n\t// Maybe the type is a custom type.\n\tif p.TypeFunc != nil {\n\t\tif typ, ok := p.TypeFunc(t); ok {\n\t\t\treturn typ\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// Unmarshal a single XML element into val.\nfunc (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error {\n\t// Find start element if we need it.\n\tif start == nil {\n\t\tfor {\n\t\t\ttok, err := p.Token()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif t, ok := tok.(StartElement); ok {\n\t\t\t\tstart = &t\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// Try to figure out type for empty interface values.\n\tif val.Kind() == reflect.Interface && val.IsNil() {\n\t\ttyp := p.typeForElement(val, start)\n\t\tif typ != nil {\n\t\t\tpval := reflect.New(typ).Elem()\n\t\t\terr := p.unmarshal(pval, start)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfor i := 0; i < 2; i++ {\n\t\t\t\tif typ.Implements(val.Type()) {\n\t\t\t\t\tval.Set(pval)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\ttyp = reflect.PtrTo(typ)\n\t\t\t\tpval = pval.Addr()\n\t\t\t}\n\n\t\t\tval.Set(pval)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// Load value from interface, but only if the result will be\n\t// usefully addressable.\n\tif val.Kind() == reflect.Interface && !val.IsNil() {\n\t\te := val.Elem()\n\t\tif e.Kind() == reflect.Ptr && !e.IsNil() {\n\t\t\tval = e\n\t\t}\n\t}\n\n\tif val.Kind() == reflect.Ptr {\n\t\tif val.IsNil() {\n\t\t\tval.Set(reflect.New(val.Type().Elem()))\n\t\t}\n\t\tval = val.Elem()\n\t}\n\n\tif val.CanInterface() && val.Type().Implements(unmarshalerType) {\n\t\t// This is an unmarshaler with a non-pointer receiver,\n\t\t// so it's likely to be incorrect, but we do what we're told.\n\t\treturn p.unmarshalInterface(val.Interface().(Unmarshaler), start)\n\t}\n\n\tif val.CanAddr() {\n\t\tpv := val.Addr()\n\t\tif pv.CanInterface() && pv.Type().Implements(unmarshalerType) {\n\t\t\treturn p.unmarshalInterface(pv.Interface().(Unmarshaler), start)\n\t\t}\n\t}\n\n\tif val.CanInterface() && val.Type().Implements(textUnmarshalerType) {\n\t\treturn p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start)\n\t}\n\n\tif val.CanAddr() {\n\t\tpv := val.Addr()\n\t\tif pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {\n\t\t\treturn p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start)\n\t\t}\n\t}\n\n\tvar (\n\t\tdata         []byte\n\t\tsaveData     reflect.Value\n\t\tcomment      []byte\n\t\tsaveComment  reflect.Value\n\t\tsaveXML      reflect.Value\n\t\tsaveXMLIndex int\n\t\tsaveXMLData  []byte\n\t\tsaveAny      reflect.Value\n\t\tsv           reflect.Value\n\t\ttinfo        *typeInfo\n\t\terr          error\n\t)\n\n\tswitch v := val; v.Kind() {\n\tdefault:\n\t\treturn errors.New(\"unknown type \" + v.Type().String())\n\n\tcase reflect.Interface:\n\t\t// TODO: For now, simply ignore the field. In the near\n\t\t//       future we may choose to unmarshal the start\n\t\t//       element on it, if not nil.\n\t\treturn p.Skip()\n\n\tcase reflect.Slice:\n\t\ttyp := v.Type()\n\t\tif typ.Elem().Kind() == reflect.Uint8 {\n\t\t\t// []byte\n\t\t\tsaveData = v\n\t\t\tbreak\n\t\t}\n\n\t\t// Slice of element values.\n\t\t// Grow slice.\n\t\tn := v.Len()\n\t\tif n >= v.Cap() {\n\t\t\tncap := 2 * n\n\t\t\tif ncap < 4 {\n\t\t\t\tncap = 4\n\t\t\t}\n\t\t\tnew := reflect.MakeSlice(typ, n, ncap)\n\t\t\treflect.Copy(new, v)\n\t\t\tv.Set(new)\n\t\t}\n\t\tv.SetLen(n + 1)\n\n\t\t// Recur to read element into slice.\n\t\tif err := p.unmarshal(v.Index(n), start); err != nil {\n\t\t\tv.SetLen(n)\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\n\tcase reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String:\n\t\tsaveData = v\n\n\tcase reflect.Struct:\n\t\ttyp := v.Type()\n\t\tif typ == nameType {\n\t\t\tv.Set(reflect.ValueOf(start.Name))\n\t\t\tbreak\n\t\t}\n\n\t\tsv = v\n\t\ttinfo, err = getTypeInfo(typ)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Validate and assign element name.\n\t\tif tinfo.xmlname != nil {\n\t\t\tfinfo := tinfo.xmlname\n\t\t\tif finfo.name != \"\" && finfo.name != start.Name.Local {\n\t\t\t\treturn UnmarshalError(\"expected element type <\" + finfo.name + \"> but have <\" + start.Name.Local + \">\")\n\t\t\t}\n\t\t\tif finfo.xmlns != \"\" && finfo.xmlns != start.Name.Space {\n\t\t\t\te := \"expected element <\" + finfo.name + \"> in name space \" + finfo.xmlns + \" but have \"\n\t\t\t\tif start.Name.Space == \"\" {\n\t\t\t\t\te += \"no name space\"\n\t\t\t\t} else {\n\t\t\t\t\te += start.Name.Space\n\t\t\t\t}\n\t\t\t\treturn UnmarshalError(e)\n\t\t\t}\n\t\t\tfv := finfo.value(sv)\n\t\t\tif _, ok := fv.Interface().(Name); ok {\n\t\t\t\tfv.Set(reflect.ValueOf(start.Name))\n\t\t\t}\n\t\t}\n\n\t\t// Assign attributes.\n\t\t// Also, determine whether we need to save character data or comments.\n\t\tfor i := range tinfo.fields {\n\t\t\tfinfo := &tinfo.fields[i]\n\t\t\tswitch finfo.flags & fMode {\n\t\t\tcase fAttr:\n\t\t\t\tstrv := finfo.value(sv)\n\t\t\t\t// Look for attribute.\n\t\t\t\tfor _, a := range start.Attr {\n\t\t\t\t\tif a.Name.Local == finfo.name && (finfo.xmlns == \"\" || finfo.xmlns == a.Name.Space) {\n\t\t\t\t\t\tif err := p.unmarshalAttr(strv, a); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase fCharData:\n\t\t\t\tif !saveData.IsValid() {\n\t\t\t\t\tsaveData = finfo.value(sv)\n\t\t\t\t}\n\n\t\t\tcase fComment:\n\t\t\t\tif !saveComment.IsValid() {\n\t\t\t\t\tsaveComment = finfo.value(sv)\n\t\t\t\t}\n\n\t\t\tcase fAny, fAny | fElement:\n\t\t\t\tif !saveAny.IsValid() {\n\t\t\t\t\tsaveAny = finfo.value(sv)\n\t\t\t\t}\n\n\t\t\tcase fInnerXml:\n\t\t\t\tif !saveXML.IsValid() {\n\t\t\t\t\tsaveXML = finfo.value(sv)\n\t\t\t\t\tif p.saved == nil {\n\t\t\t\t\t\tsaveXMLIndex = 0\n\t\t\t\t\t\tp.saved = new(bytes.Buffer)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tsaveXMLIndex = p.savedOffset()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Find end element.\n\t// Process sub-elements along the way.\nLoop:\n\tfor {\n\t\tvar savedOffset int\n\t\tif saveXML.IsValid() {\n\t\t\tsavedOffset = p.savedOffset()\n\t\t}\n\t\ttok, err := p.Token()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tswitch t := tok.(type) {\n\t\tcase StartElement:\n\t\t\tconsumed := false\n\t\t\tif sv.IsValid() {\n\t\t\t\tconsumed, err = p.unmarshalPath(tinfo, sv, nil, &t)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !consumed && saveAny.IsValid() {\n\t\t\t\t\tconsumed = true\n\t\t\t\t\tif err := p.unmarshal(saveAny, &t); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !consumed {\n\t\t\t\tif err := p.Skip(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase EndElement:\n\t\t\tif saveXML.IsValid() {\n\t\t\t\tsaveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset]\n\t\t\t\tif saveXMLIndex == 0 {\n\t\t\t\t\tp.saved = nil\n\t\t\t\t}\n\t\t\t}\n\t\t\tbreak Loop\n\n\t\tcase CharData:\n\t\t\tif saveData.IsValid() {\n\t\t\t\tdata = append(data, t...)\n\t\t\t}\n\n\t\tcase Comment:\n\t\t\tif saveComment.IsValid() {\n\t\t\t\tcomment = append(comment, t...)\n\t\t\t}\n\t\t}\n\t}\n\n\tif saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) {\n\t\tif err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsaveData = reflect.Value{}\n\t}\n\n\tif saveData.IsValid() && saveData.CanAddr() {\n\t\tpv := saveData.Addr()\n\t\tif pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {\n\t\t\tif err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tsaveData = reflect.Value{}\n\t\t}\n\t}\n\n\tif err := copyValue(saveData, data); err != nil {\n\t\treturn err\n\t}\n\n\tswitch t := saveComment; t.Kind() {\n\tcase reflect.String:\n\t\tt.SetString(string(comment))\n\tcase reflect.Slice:\n\t\tt.Set(reflect.ValueOf(comment))\n\t}\n\n\tswitch t := saveXML; t.Kind() {\n\tcase reflect.String:\n\t\tt.SetString(string(saveXMLData))\n\tcase reflect.Slice:\n\t\tt.Set(reflect.ValueOf(saveXMLData))\n\t}\n\n\treturn nil\n}\n\nfunc copyValue(dst reflect.Value, src []byte) (err error) {\n\tdst0 := dst\n\n\tif dst.Kind() == reflect.Ptr {\n\t\tif dst.IsNil() {\n\t\t\tdst.Set(reflect.New(dst.Type().Elem()))\n\t\t}\n\t\tdst = dst.Elem()\n\t}\n\n\t// Save accumulated data.\n\tswitch dst.Kind() {\n\tcase reflect.Invalid:\n\t\t// Probably a comment.\n\tdefault:\n\t\treturn errors.New(\"cannot unmarshal into \" + dst0.Type().String())\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\titmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdst.SetInt(itmp)\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\tvar utmp uint64\n\t\tif len(src) > 0 && src[0] == '-' {\n\t\t\t// Negative value for unsigned field.\n\t\t\t// Assume it was serialized following two's complement.\n\t\t\titmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// Reinterpret value based on type width.\n\t\t\tswitch dst.Type().Bits() {\n\t\t\tcase 8:\n\t\t\t\tutmp = uint64(uint8(itmp))\n\t\t\tcase 16:\n\t\t\t\tutmp = uint64(uint16(itmp))\n\t\t\tcase 32:\n\t\t\t\tutmp = uint64(uint32(itmp))\n\t\t\tcase 64:\n\t\t\t\tutmp = uint64(uint64(itmp))\n\t\t\t}\n\t\t} else {\n\t\t\tutmp, err = strconv.ParseUint(string(src), 10, dst.Type().Bits())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tdst.SetUint(utmp)\n\tcase reflect.Float32, reflect.Float64:\n\t\tftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdst.SetFloat(ftmp)\n\tcase reflect.Bool:\n\t\tvalue, err := strconv.ParseBool(strings.TrimSpace(string(src)))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdst.SetBool(value)\n\tcase reflect.String:\n\t\tdst.SetString(string(src))\n\tcase reflect.Slice:\n\t\tif len(src) == 0 {\n\t\t\t// non-nil to flag presence\n\t\t\tsrc = []byte{}\n\t\t}\n\t\tdst.SetBytes(src)\n\t}\n\treturn nil\n}\n\n// unmarshalPath walks down an XML structure looking for wanted\n// paths, and calls unmarshal on them.\n// The consumed result tells whether XML elements have been consumed\n// from the Decoder until start's matching end element, or if it's\n// still untouched because start is uninteresting for sv's fields.\nfunc (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) {\n\trecurse := false\nLoop:\n\tfor i := range tinfo.fields {\n\t\tfinfo := &tinfo.fields[i]\n\t\tif finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != \"\" && finfo.xmlns != start.Name.Space {\n\t\t\tcontinue\n\t\t}\n\t\tfor j := range parents {\n\t\t\tif parents[j] != finfo.parents[j] {\n\t\t\t\tcontinue Loop\n\t\t\t}\n\t\t}\n\t\tif len(finfo.parents) == len(parents) && finfo.name == start.Name.Local {\n\t\t\t// It's a perfect match, unmarshal the field.\n\t\t\treturn true, p.unmarshal(finfo.value(sv), start)\n\t\t}\n\t\tif len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local {\n\t\t\t// It's a prefix for the field. Break and recurse\n\t\t\t// since it's not ok for one field path to be itself\n\t\t\t// the prefix for another field path.\n\t\t\trecurse = true\n\n\t\t\t// We can reuse the same slice as long as we\n\t\t\t// don't try to append to it.\n\t\t\tparents = finfo.parents[:len(parents)+1]\n\t\t\tbreak\n\t\t}\n\t}\n\tif !recurse {\n\t\t// We have no business with this element.\n\t\treturn false, nil\n\t}\n\t// The element is not a perfect match for any field, but one\n\t// or more fields have the path to this element as a parent\n\t// prefix. Recurse and attempt to match these.\n\tfor {\n\t\tvar tok Token\n\t\ttok, err = p.Token()\n\t\tif err != nil {\n\t\t\treturn true, err\n\t\t}\n\t\tswitch t := tok.(type) {\n\t\tcase StartElement:\n\t\t\tconsumed2, err := p.unmarshalPath(tinfo, sv, parents, &t)\n\t\t\tif err != nil {\n\t\t\t\treturn true, err\n\t\t\t}\n\t\t\tif !consumed2 {\n\t\t\t\tif err := p.Skip(); err != nil {\n\t\t\t\t\treturn true, err\n\t\t\t\t}\n\t\t\t}\n\t\tcase EndElement:\n\t\t\treturn true, nil\n\t\t}\n\t}\n}\n\n// Skip reads tokens until it has consumed the end element\n// matching the most recent start element already consumed.\n// It recurs if it encounters a start element, so it can be used to\n// skip nested structures.\n// It returns nil if it finds an end element matching the start\n// element; otherwise it returns an error describing the problem.\nfunc (d *Decoder) Skip() error {\n\tfor {\n\t\ttok, err := d.Token()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tswitch tok.(type) {\n\t\tcase StartElement:\n\t\t\tif err := d.Skip(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase EndElement:\n\t\t\treturn nil\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/xml/read_test.go",
    "content": "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage xml\n\nimport (\n\t\"io\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\n// Stripped down Atom feed data structures.\n\nfunc TestUnmarshalFeed(t *testing.T) {\n\tvar f Feed\n\tif err := Unmarshal([]byte(atomFeedString), &f); err != nil {\n\t\tt.Fatalf(\"Unmarshal: %s\", err)\n\t}\n\tif !reflect.DeepEqual(f, atomFeed) {\n\t\tt.Fatalf(\"have %#v\\nwant %#v\", f, atomFeed)\n\t}\n}\n\n// hget http://codereview.appspot.com/rss/mine/rsc\nconst atomFeedString = `\n<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<feed xmlns=\"http://www.w3.org/2005/Atom\" xml:lang=\"en-us\" updated=\"2009-10-04T01:35:58+00:00\"><title>Code Review - My issues</title><link href=\"http://codereview.appspot.com/\" rel=\"alternate\"></link><link href=\"http://codereview.appspot.com/rss/mine/rsc\" rel=\"self\"></link><id>http://codereview.appspot.com/</id><author><name>rietveld&lt;&gt;</name></author><entry><title>rietveld: an attempt at pubsubhubbub\n</title><link href=\"http://codereview.appspot.com/126085\" rel=\"alternate\"></link><updated>2009-10-04T01:35:58+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:134d9179c41f806be79b3a5f7877d19a</id><summary type=\"html\">\n  An attempt at adding pubsubhubbub support to Rietveld.\nhttp://code.google.com/p/pubsubhubbub\nhttp://code.google.com/p/rietveld/issues/detail?id=155\n\nThe server side of the protocol is trivial:\n  1. add a &amp;lt;link rel=&amp;quot;hub&amp;quot; href=&amp;quot;hub-server&amp;quot;&amp;gt; tag to all\n     feeds that will be pubsubhubbubbed.\n  2. every time one of those feeds changes, tell the hub\n     with a simple POST request.\n\nI have tested this by adding debug prints to a local hub\nserver and checking that the server got the right publish\nrequests.\n\nI can&amp;#39;t quite get the server to work, but I think the bug\nis not in my code.  I think that the server expects to be\nable to grab the feed and see the feed&amp;#39;s actual URL in\nthe link rel=&amp;quot;self&amp;quot;, but the default value for that drops\nthe :port from the URL, and I cannot for the life of me\nfigure out how to get the Atom generator deep inside\ndjango not to do that, or even where it is doing that,\nor even what code is running to generate the Atom feed.\n(I thought I knew but I added some assert False statements\nand it kept running!)\n\nIgnoring that particular problem, I would appreciate\nfeedback on the right way to get the two values at\nthe top of feeds.py marked NOTE(rsc).\n\n\n</summary></entry><entry><title>rietveld: correct tab handling\n</title><link href=\"http://codereview.appspot.com/124106\" rel=\"alternate\"></link><updated>2009-10-03T23:02:17+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:0a2a4f19bb815101f0ba2904aed7c35a</id><summary type=\"html\">\n  This fixes the buggy tab rendering that can be seen at\nhttp://codereview.appspot.com/116075/diff/1/2\n\nThe fundamental problem was that the tab code was\nnot being told what column the text began in, so it\ndidn&amp;#39;t know where to put the tab stops.  Another problem\nwas that some of the code assumed that string byte\noffsets were the same as column offsets, which is only\ntrue if there are no tabs.\n\nIn the process of fixing this, I cleaned up the arguments\nto Fold and ExpandTabs and renamed them Break and\n_ExpandTabs so that I could be sure that I found all the\ncall sites.  I also wanted to verify that ExpandTabs was\nnot being used from outside intra_region_diff.py.\n\n\n</summary></entry></feed> \t   `\n\ntype Feed struct {\n\tXMLName Name      `xml:\"http://www.w3.org/2005/Atom feed\"`\n\tTitle   string    `xml:\"title\"`\n\tId      string    `xml:\"id\"`\n\tLink    []Link    `xml:\"link\"`\n\tUpdated time.Time `xml:\"updated,attr\"`\n\tAuthor  Person    `xml:\"author\"`\n\tEntry   []Entry   `xml:\"entry\"`\n}\n\ntype Entry struct {\n\tTitle   string    `xml:\"title\"`\n\tId      string    `xml:\"id\"`\n\tLink    []Link    `xml:\"link\"`\n\tUpdated time.Time `xml:\"updated\"`\n\tAuthor  Person    `xml:\"author\"`\n\tSummary Text      `xml:\"summary\"`\n}\n\ntype Link struct {\n\tRel  string `xml:\"rel,attr,omitempty\"`\n\tHref string `xml:\"href,attr\"`\n}\n\ntype Person struct {\n\tName     string `xml:\"name\"`\n\tURI      string `xml:\"uri\"`\n\tEmail    string `xml:\"email\"`\n\tInnerXML string `xml:\",innerxml\"`\n}\n\ntype Text struct {\n\tType string `xml:\"type,attr,omitempty\"`\n\tBody string `xml:\",chardata\"`\n}\n\nvar atomFeed = Feed{\n\tXMLName: Name{\"http://www.w3.org/2005/Atom\", \"feed\"},\n\tTitle:   \"Code Review - My issues\",\n\tLink: []Link{\n\t\t{Rel: \"alternate\", Href: \"http://codereview.appspot.com/\"},\n\t\t{Rel: \"self\", Href: \"http://codereview.appspot.com/rss/mine/rsc\"},\n\t},\n\tId:      \"http://codereview.appspot.com/\",\n\tUpdated: ParseTime(\"2009-10-04T01:35:58+00:00\"),\n\tAuthor: Person{\n\t\tName:     \"rietveld<>\",\n\t\tInnerXML: \"<name>rietveld&lt;&gt;</name>\",\n\t},\n\tEntry: []Entry{\n\t\t{\n\t\t\tTitle: \"rietveld: an attempt at pubsubhubbub\\n\",\n\t\t\tLink: []Link{\n\t\t\t\t{Rel: \"alternate\", Href: \"http://codereview.appspot.com/126085\"},\n\t\t\t},\n\t\t\tUpdated: ParseTime(\"2009-10-04T01:35:58+00:00\"),\n\t\t\tAuthor: Person{\n\t\t\t\tName:     \"email-address-removed\",\n\t\t\t\tInnerXML: \"<name>email-address-removed</name>\",\n\t\t\t},\n\t\t\tId: \"urn:md5:134d9179c41f806be79b3a5f7877d19a\",\n\t\t\tSummary: Text{\n\t\t\t\tType: \"html\",\n\t\t\t\tBody: `\n  An attempt at adding pubsubhubbub support to Rietveld.\nhttp://code.google.com/p/pubsubhubbub\nhttp://code.google.com/p/rietveld/issues/detail?id=155\n\nThe server side of the protocol is trivial:\n  1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all\n     feeds that will be pubsubhubbubbed.\n  2. every time one of those feeds changes, tell the hub\n     with a simple POST request.\n\nI have tested this by adding debug prints to a local hub\nserver and checking that the server got the right publish\nrequests.\n\nI can&#39;t quite get the server to work, but I think the bug\nis not in my code.  I think that the server expects to be\nable to grab the feed and see the feed&#39;s actual URL in\nthe link rel=&quot;self&quot;, but the default value for that drops\nthe :port from the URL, and I cannot for the life of me\nfigure out how to get the Atom generator deep inside\ndjango not to do that, or even where it is doing that,\nor even what code is running to generate the Atom feed.\n(I thought I knew but I added some assert False statements\nand it kept running!)\n\nIgnoring that particular problem, I would appreciate\nfeedback on the right way to get the two values at\nthe top of feeds.py marked NOTE(rsc).\n\n\n`,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tTitle: \"rietveld: correct tab handling\\n\",\n\t\t\tLink: []Link{\n\t\t\t\t{Rel: \"alternate\", Href: \"http://codereview.appspot.com/124106\"},\n\t\t\t},\n\t\t\tUpdated: ParseTime(\"2009-10-03T23:02:17+00:00\"),\n\t\t\tAuthor: Person{\n\t\t\t\tName:     \"email-address-removed\",\n\t\t\t\tInnerXML: \"<name>email-address-removed</name>\",\n\t\t\t},\n\t\t\tId: \"urn:md5:0a2a4f19bb815101f0ba2904aed7c35a\",\n\t\t\tSummary: Text{\n\t\t\t\tType: \"html\",\n\t\t\t\tBody: `\n  This fixes the buggy tab rendering that can be seen at\nhttp://codereview.appspot.com/116075/diff/1/2\n\nThe fundamental problem was that the tab code was\nnot being told what column the text began in, so it\ndidn&#39;t know where to put the tab stops.  Another problem\nwas that some of the code assumed that string byte\noffsets were the same as column offsets, which is only\ntrue if there are no tabs.\n\nIn the process of fixing this, I cleaned up the arguments\nto Fold and ExpandTabs and renamed them Break and\n_ExpandTabs so that I could be sure that I found all the\ncall sites.  I also wanted to verify that ExpandTabs was\nnot being used from outside intra_region_diff.py.\n\n\n`,\n\t\t\t},\n\t\t},\n\t},\n}\n\nconst pathTestString = `\n<Result>\n    <Before>1</Before>\n    <Items>\n        <Item1>\n            <Value>A</Value>\n        </Item1>\n        <Item2>\n            <Value>B</Value>\n        </Item2>\n        <Item1>\n            <Value>C</Value>\n            <Value>D</Value>\n        </Item1>\n        <_>\n            <Value>E</Value>\n        </_>\n    </Items>\n    <After>2</After>\n</Result>\n`\n\ntype PathTestItem struct {\n\tValue string\n}\n\ntype PathTestA struct {\n\tItems         []PathTestItem `xml:\">Item1\"`\n\tBefore, After string\n}\n\ntype PathTestB struct {\n\tOther         []PathTestItem `xml:\"Items>Item1\"`\n\tBefore, After string\n}\n\ntype PathTestC struct {\n\tValues1       []string `xml:\"Items>Item1>Value\"`\n\tValues2       []string `xml:\"Items>Item2>Value\"`\n\tBefore, After string\n}\n\ntype PathTestSet struct {\n\tItem1 []PathTestItem\n}\n\ntype PathTestD struct {\n\tOther         PathTestSet `xml:\"Items\"`\n\tBefore, After string\n}\n\ntype PathTestE struct {\n\tUnderline     string `xml:\"Items>_>Value\"`\n\tBefore, After string\n}\n\nvar pathTests = []interface{}{\n\t&PathTestA{Items: []PathTestItem{{\"A\"}, {\"D\"}}, Before: \"1\", After: \"2\"},\n\t&PathTestB{Other: []PathTestItem{{\"A\"}, {\"D\"}}, Before: \"1\", After: \"2\"},\n\t&PathTestC{Values1: []string{\"A\", \"C\", \"D\"}, Values2: []string{\"B\"}, Before: \"1\", After: \"2\"},\n\t&PathTestD{Other: PathTestSet{Item1: []PathTestItem{{\"A\"}, {\"D\"}}}, Before: \"1\", After: \"2\"},\n\t&PathTestE{Underline: \"E\", Before: \"1\", After: \"2\"},\n}\n\nfunc TestUnmarshalPaths(t *testing.T) {\n\tfor _, pt := range pathTests {\n\t\tv := reflect.New(reflect.TypeOf(pt).Elem()).Interface()\n\t\tif err := Unmarshal([]byte(pathTestString), v); err != nil {\n\t\t\tt.Fatalf(\"Unmarshal: %s\", err)\n\t\t}\n\t\tif !reflect.DeepEqual(v, pt) {\n\t\t\tt.Fatalf(\"have %#v\\nwant %#v\", v, pt)\n\t\t}\n\t}\n}\n\ntype BadPathTestA struct {\n\tFirst  string `xml:\"items>item1\"`\n\tOther  string `xml:\"items>item2\"`\n\tSecond string `xml:\"items\"`\n}\n\ntype BadPathTestB struct {\n\tOther  string `xml:\"items>item2>value\"`\n\tFirst  string `xml:\"items>item1\"`\n\tSecond string `xml:\"items>item1>value\"`\n}\n\ntype BadPathTestC struct {\n\tFirst  string\n\tSecond string `xml:\"First\"`\n}\n\ntype BadPathTestD struct {\n\tBadPathEmbeddedA\n\tBadPathEmbeddedB\n}\n\ntype BadPathEmbeddedA struct {\n\tFirst string\n}\n\ntype BadPathEmbeddedB struct {\n\tSecond string `xml:\"First\"`\n}\n\nvar badPathTests = []struct {\n\tv, e interface{}\n}{\n\t{&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), \"First\", \"items>item1\", \"Second\", \"items\"}},\n\t{&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), \"First\", \"items>item1\", \"Second\", \"items>item1>value\"}},\n\t{&BadPathTestC{}, &TagPathError{reflect.TypeOf(BadPathTestC{}), \"First\", \"\", \"Second\", \"First\"}},\n\t{&BadPathTestD{}, &TagPathError{reflect.TypeOf(BadPathTestD{}), \"First\", \"\", \"Second\", \"First\"}},\n}\n\nfunc TestUnmarshalBadPaths(t *testing.T) {\n\tfor _, tt := range badPathTests {\n\t\terr := Unmarshal([]byte(pathTestString), tt.v)\n\t\tif !reflect.DeepEqual(err, tt.e) {\n\t\t\tt.Fatalf(\"Unmarshal with %#v didn't fail properly:\\nhave %#v,\\nwant %#v\", tt.v, err, tt.e)\n\t\t}\n\t}\n}\n\nconst OK = \"OK\"\nconst withoutNameTypeData = `\n<?xml version=\"1.0\" charset=\"utf-8\"?>\n<Test3 Attr=\"OK\" />`\n\ntype TestThree struct {\n\tXMLName Name   `xml:\"Test3\"`\n\tAttr    string `xml:\",attr\"`\n}\n\nfunc TestUnmarshalWithoutNameType(t *testing.T) {\n\tvar x TestThree\n\tif err := Unmarshal([]byte(withoutNameTypeData), &x); err != nil {\n\t\tt.Fatalf(\"Unmarshal: %s\", err)\n\t}\n\tif x.Attr != OK {\n\t\tt.Fatalf(\"have %v\\nwant %v\", x.Attr, OK)\n\t}\n}\n\nfunc TestUnmarshalAttr(t *testing.T) {\n\ttype ParamVal struct {\n\t\tInt int `xml:\"int,attr\"`\n\t}\n\n\ttype ParamPtr struct {\n\t\tInt *int `xml:\"int,attr\"`\n\t}\n\n\ttype ParamStringPtr struct {\n\t\tInt *string `xml:\"int,attr\"`\n\t}\n\n\tx := []byte(`<Param int=\"1\" />`)\n\n\tp1 := &ParamPtr{}\n\tif err := Unmarshal(x, p1); err != nil {\n\t\tt.Fatalf(\"Unmarshal: %s\", err)\n\t}\n\tif p1.Int == nil {\n\t\tt.Fatalf(\"Unmarshal failed in to *int field\")\n\t} else if *p1.Int != 1 {\n\t\tt.Fatalf(\"Unmarshal with %s failed:\\nhave %#v,\\n want %#v\", x, p1.Int, 1)\n\t}\n\n\tp2 := &ParamVal{}\n\tif err := Unmarshal(x, p2); err != nil {\n\t\tt.Fatalf(\"Unmarshal: %s\", err)\n\t}\n\tif p2.Int != 1 {\n\t\tt.Fatalf(\"Unmarshal with %s failed:\\nhave %#v,\\n want %#v\", x, p2.Int, 1)\n\t}\n\n\tp3 := &ParamStringPtr{}\n\tif err := Unmarshal(x, p3); err != nil {\n\t\tt.Fatalf(\"Unmarshal: %s\", err)\n\t}\n\tif p3.Int == nil {\n\t\tt.Fatalf(\"Unmarshal failed in to *string field\")\n\t} else if *p3.Int != \"1\" {\n\t\tt.Fatalf(\"Unmarshal with %s failed:\\nhave %#v,\\n want %#v\", x, p3.Int, 1)\n\t}\n}\n\ntype Tables struct {\n\tHTable string `xml:\"http://www.w3.org/TR/html4/ table\"`\n\tFTable string `xml:\"http://www.w3schools.com/furniture table\"`\n}\n\nvar tables = []struct {\n\txml string\n\ttab Tables\n\tns  string\n}{\n\t{\n\t\txml: `<Tables>` +\n\t\t\t`<table xmlns=\"http://www.w3.org/TR/html4/\">hello</table>` +\n\t\t\t`<table xmlns=\"http://www.w3schools.com/furniture\">world</table>` +\n\t\t\t`</Tables>`,\n\t\ttab: Tables{\"hello\", \"world\"},\n\t},\n\t{\n\t\txml: `<Tables>` +\n\t\t\t`<table xmlns=\"http://www.w3schools.com/furniture\">world</table>` +\n\t\t\t`<table xmlns=\"http://www.w3.org/TR/html4/\">hello</table>` +\n\t\t\t`</Tables>`,\n\t\ttab: Tables{\"hello\", \"world\"},\n\t},\n\t{\n\t\txml: `<Tables xmlns:f=\"http://www.w3schools.com/furniture\" xmlns:h=\"http://www.w3.org/TR/html4/\">` +\n\t\t\t`<f:table>world</f:table>` +\n\t\t\t`<h:table>hello</h:table>` +\n\t\t\t`</Tables>`,\n\t\ttab: Tables{\"hello\", \"world\"},\n\t},\n\t{\n\t\txml: `<Tables>` +\n\t\t\t`<table>bogus</table>` +\n\t\t\t`</Tables>`,\n\t\ttab: Tables{},\n\t},\n\t{\n\t\txml: `<Tables>` +\n\t\t\t`<table>only</table>` +\n\t\t\t`</Tables>`,\n\t\ttab: Tables{HTable: \"only\"},\n\t\tns:  \"http://www.w3.org/TR/html4/\",\n\t},\n\t{\n\t\txml: `<Tables>` +\n\t\t\t`<table>only</table>` +\n\t\t\t`</Tables>`,\n\t\ttab: Tables{FTable: \"only\"},\n\t\tns:  \"http://www.w3schools.com/furniture\",\n\t},\n\t{\n\t\txml: `<Tables>` +\n\t\t\t`<table>only</table>` +\n\t\t\t`</Tables>`,\n\t\ttab: Tables{},\n\t\tns:  \"something else entirely\",\n\t},\n}\n\nfunc TestUnmarshalNS(t *testing.T) {\n\tfor i, tt := range tables {\n\t\tvar dst Tables\n\t\tvar err error\n\t\tif tt.ns != \"\" {\n\t\t\td := NewDecoder(strings.NewReader(tt.xml))\n\t\t\td.DefaultSpace = tt.ns\n\t\t\terr = d.Decode(&dst)\n\t\t} else {\n\t\t\terr = Unmarshal([]byte(tt.xml), &dst)\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Errorf(\"#%d: Unmarshal: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\twant := tt.tab\n\t\tif dst != want {\n\t\t\tt.Errorf(\"#%d: dst=%+v, want %+v\", i, dst, want)\n\t\t}\n\t}\n}\n\nfunc TestMarshalNS(t *testing.T) {\n\tdst := Tables{\"hello\", \"world\"}\n\tdata, err := Marshal(&dst)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal: %v\", err)\n\t}\n\twant := `<Tables><table xmlns=\"http://www.w3.org/TR/html4/\">hello</table><table xmlns=\"http://www.w3schools.com/furniture\">world</table></Tables>`\n\tstr := string(data)\n\tif str != want {\n\t\tt.Errorf(\"have: %q\\nwant: %q\\n\", str, want)\n\t}\n}\n\ntype TableAttrs struct {\n\tTAttr TAttr\n}\n\ntype TAttr struct {\n\tHTable string `xml:\"http://www.w3.org/TR/html4/ table,attr\"`\n\tFTable string `xml:\"http://www.w3schools.com/furniture table,attr\"`\n\tLang   string `xml:\"http://www.w3.org/XML/1998/namespace lang,attr,omitempty\"`\n\tOther1 string `xml:\"http://golang.org/xml/ other,attr,omitempty\"`\n\tOther2 string `xml:\"http://golang.org/xmlfoo/ other,attr,omitempty\"`\n\tOther3 string `xml:\"http://golang.org/json/ other,attr,omitempty\"`\n\tOther4 string `xml:\"http://golang.org/2/json/ other,attr,omitempty\"`\n}\n\nvar tableAttrs = []struct {\n\txml string\n\ttab TableAttrs\n\tns  string\n}{\n\t{\n\t\txml: `<TableAttrs xmlns:f=\"http://www.w3schools.com/furniture\" xmlns:h=\"http://www.w3.org/TR/html4/\"><TAttr ` +\n\t\t\t`h:table=\"hello\" f:table=\"world\" ` +\n\t\t\t`/></TableAttrs>`,\n\t\ttab: TableAttrs{TAttr{HTable: \"hello\", FTable: \"world\"}},\n\t},\n\t{\n\t\txml: `<TableAttrs><TAttr xmlns:f=\"http://www.w3schools.com/furniture\" xmlns:h=\"http://www.w3.org/TR/html4/\" ` +\n\t\t\t`h:table=\"hello\" f:table=\"world\" ` +\n\t\t\t`/></TableAttrs>`,\n\t\ttab: TableAttrs{TAttr{HTable: \"hello\", FTable: \"world\"}},\n\t},\n\t{\n\t\txml: `<TableAttrs><TAttr ` +\n\t\t\t`h:table=\"hello\" f:table=\"world\" xmlns:f=\"http://www.w3schools.com/furniture\" xmlns:h=\"http://www.w3.org/TR/html4/\" ` +\n\t\t\t`/></TableAttrs>`,\n\t\ttab: TableAttrs{TAttr{HTable: \"hello\", FTable: \"world\"}},\n\t},\n\t{\n\t\t// Default space does not apply to attribute names.\n\t\txml: `<TableAttrs xmlns=\"http://www.w3schools.com/furniture\" xmlns:h=\"http://www.w3.org/TR/html4/\"><TAttr ` +\n\t\t\t`h:table=\"hello\" table=\"world\" ` +\n\t\t\t`/></TableAttrs>`,\n\t\ttab: TableAttrs{TAttr{HTable: \"hello\", FTable: \"\"}},\n\t},\n\t{\n\t\t// Default space does not apply to attribute names.\n\t\txml: `<TableAttrs xmlns:f=\"http://www.w3schools.com/furniture\"><TAttr xmlns=\"http://www.w3.org/TR/html4/\" ` +\n\t\t\t`table=\"hello\" f:table=\"world\" ` +\n\t\t\t`/></TableAttrs>`,\n\t\ttab: TableAttrs{TAttr{HTable: \"\", FTable: \"world\"}},\n\t},\n\t{\n\t\txml: `<TableAttrs><TAttr ` +\n\t\t\t`table=\"bogus\" ` +\n\t\t\t`/></TableAttrs>`,\n\t\ttab: TableAttrs{},\n\t},\n\t{\n\t\t// Default space does not apply to attribute names.\n\t\txml: `<TableAttrs xmlns:h=\"http://www.w3.org/TR/html4/\"><TAttr ` +\n\t\t\t`h:table=\"hello\" table=\"world\" ` +\n\t\t\t`/></TableAttrs>`,\n\t\ttab: TableAttrs{TAttr{HTable: \"hello\", FTable: \"\"}},\n\t\tns:  \"http://www.w3schools.com/furniture\",\n\t},\n\t{\n\t\t// Default space does not apply to attribute names.\n\t\txml: `<TableAttrs xmlns:f=\"http://www.w3schools.com/furniture\"><TAttr ` +\n\t\t\t`table=\"hello\" f:table=\"world\" ` +\n\t\t\t`/></TableAttrs>`,\n\t\ttab: TableAttrs{TAttr{HTable: \"\", FTable: \"world\"}},\n\t\tns:  \"http://www.w3.org/TR/html4/\",\n\t},\n\t{\n\t\txml: `<TableAttrs><TAttr ` +\n\t\t\t`table=\"bogus\" ` +\n\t\t\t`/></TableAttrs>`,\n\t\ttab: TableAttrs{},\n\t\tns:  \"something else entirely\",\n\t},\n}\n\nfunc TestUnmarshalNSAttr(t *testing.T) {\n\tfor i, tt := range tableAttrs {\n\t\tvar dst TableAttrs\n\t\tvar err error\n\t\tif tt.ns != \"\" {\n\t\t\td := NewDecoder(strings.NewReader(tt.xml))\n\t\t\td.DefaultSpace = tt.ns\n\t\t\terr = d.Decode(&dst)\n\t\t} else {\n\t\t\terr = Unmarshal([]byte(tt.xml), &dst)\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Errorf(\"#%d: Unmarshal: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\twant := tt.tab\n\t\tif dst != want {\n\t\t\tt.Errorf(\"#%d: dst=%+v, want %+v\", i, dst, want)\n\t\t}\n\t}\n}\n\nfunc TestMarshalNSAttr(t *testing.T) {\n\tsrc := TableAttrs{TAttr{\"hello\", \"world\", \"en_US\", \"other1\", \"other2\", \"other3\", \"other4\"}}\n\tdata, err := Marshal(&src)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal: %v\", err)\n\t}\n\twant := `<TableAttrs><TAttr xmlns:html4=\"http://www.w3.org/TR/html4/\" html4:table=\"hello\" xmlns:furniture=\"http://www.w3schools.com/furniture\" furniture:table=\"world\" xml:lang=\"en_US\" xmlns:_xml=\"http://golang.org/xml/\" _xml:other=\"other1\" xmlns:_xmlfoo=\"http://golang.org/xmlfoo/\" _xmlfoo:other=\"other2\" xmlns:json=\"http://golang.org/json/\" json:other=\"other3\" xmlns:json_1=\"http://golang.org/2/json/\" json_1:other=\"other4\"></TAttr></TableAttrs>`\n\tstr := string(data)\n\tif str != want {\n\t\tt.Errorf(\"Marshal:\\nhave: %#q\\nwant: %#q\\n\", str, want)\n\t}\n\n\tvar dst TableAttrs\n\tif err := Unmarshal(data, &dst); err != nil {\n\t\tt.Errorf(\"Unmarshal: %v\", err)\n\t}\n\n\tif dst != src {\n\t\tt.Errorf(\"Unmarshal = %q, want %q\", dst, src)\n\t}\n}\n\ntype MyCharData struct {\n\tbody string\n}\n\nfunc (m *MyCharData) UnmarshalXML(d *Decoder, start StartElement) error {\n\tfor {\n\t\tt, err := d.Token()\n\t\tif err == io.EOF { // found end of element\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif char, ok := t.(CharData); ok {\n\t\t\tm.body += string(char)\n\t\t}\n\t}\n\treturn nil\n}\n\nvar _ Unmarshaler = (*MyCharData)(nil)\n\nfunc (m *MyCharData) UnmarshalXMLAttr(attr Attr) error {\n\tpanic(\"must not call\")\n}\n\ntype MyAttr struct {\n\tattr string\n}\n\nfunc (m *MyAttr) UnmarshalXMLAttr(attr Attr) error {\n\tm.attr = attr.Value\n\treturn nil\n}\n\nvar _ UnmarshalerAttr = (*MyAttr)(nil)\n\ntype MyStruct struct {\n\tData *MyCharData\n\tAttr *MyAttr `xml:\",attr\"`\n\n\tData2 MyCharData\n\tAttr2 MyAttr `xml:\",attr\"`\n}\n\nfunc TestUnmarshaler(t *testing.T) {\n\txml := `<?xml version=\"1.0\" encoding=\"utf-8\"?>\n\t\t<MyStruct Attr=\"attr1\" Attr2=\"attr2\">\n\t\t<Data>hello <!-- comment -->world</Data>\n\t\t<Data2>howdy <!-- comment -->world</Data2>\n\t\t</MyStruct>\n\t`\n\n\tvar m MyStruct\n\tif err := Unmarshal([]byte(xml), &m); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif m.Data == nil || m.Attr == nil || m.Data.body != \"hello world\" || m.Attr.attr != \"attr1\" || m.Data2.body != \"howdy world\" || m.Attr2.attr != \"attr2\" {\n\t\tt.Errorf(\"m=%#+v\\n\", m)\n\t}\n}\n\ntype Pea struct {\n\tCotelydon string\n}\n\ntype Pod struct {\n\tPea interface{} `xml:\"Pea\"`\n}\n\n// https://code.google.com/p/go/issues/detail?id=6836\nfunc TestUnmarshalIntoInterface(t *testing.T) {\n\tpod := new(Pod)\n\tpod.Pea = new(Pea)\n\txml := `<Pod><Pea><Cotelydon>Green stuff</Cotelydon></Pea></Pod>`\n\terr := Unmarshal([]byte(xml), pod)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to unmarshal %q: %v\", xml, err)\n\t}\n\tpea, ok := pod.Pea.(*Pea)\n\tif !ok {\n\t\tt.Fatalf(\"unmarshalled into wrong type: have %T want *Pea\", pod.Pea)\n\t}\n\thave, want := pea.Cotelydon, \"Green stuff\"\n\tif have != want {\n\t\tt.Errorf(\"failed to unmarshal into interface, have %q want %q\", have, want)\n\t}\n}\n\n// https://github.com/vmware/govmomi/issues/246\nfunc TestNegativeValuesUnsignedFields(t *testing.T) {\n\ttype T struct {\n\t\tI   string\n\t\tO   interface{}\n\t\tU8  uint8  `xml:\"u8\"`\n\t\tU16 uint16 `xml:\"u16\"`\n\t\tU32 uint32 `xml:\"u32\"`\n\t\tU64 uint64 `xml:\"u64\"`\n\t}\n\n\tvar tests = []T{\n\t\t{I: \"<T><u8>-128</u8></T>\", O: uint8(0x80)},\n\t\t{I: \"<T><u8>-1</u8></T>\", O: uint8(0xff)},\n\t\t{I: \"<T><u16>-32768</u16></T>\", O: uint16(0x8000)},\n\t\t{I: \"<T><u16>-1</u16></T>\", O: uint16(0xffff)},\n\t\t{I: \"<T><u32>-2147483648</u32></T>\", O: uint32(0x80000000)},\n\t\t{I: \"<T><u32>-1</u32></T>\", O: uint32(0xffffffff)},\n\t\t{I: \"<T><u64>-9223372036854775808</u64></T>\", O: uint64(0x8000000000000000)},\n\t\t{I: \"<T><u64>-1</u64></T>\", O: uint64(0xffffffffffffffff)},\n\t}\n\n\tfor _, test := range tests {\n\t\terr := Unmarshal([]byte(test.I), &test)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unmarshal error: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar expected = test.O\n\t\tvar actual interface{}\n\t\tswitch reflect.ValueOf(test.O).Type().Kind() {\n\t\tcase reflect.Uint8:\n\t\t\tactual = test.U8\n\t\tcase reflect.Uint16:\n\t\t\tactual = test.U16\n\t\tcase reflect.Uint32:\n\t\t\tactual = test.U32\n\t\tcase reflect.Uint64:\n\t\t\tactual = test.U64\n\t\t}\n\n\t\tif !reflect.DeepEqual(actual, expected) {\n\t\t\tt.Errorf(\"Actual: %v, expected: %v\", actual, expected)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/xml/typeinfo.go",
    "content": "// Copyright 2011 The Go Authors.  All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage xml\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n)\n\n// typeInfo holds details for the xml representation of a type.\ntype typeInfo struct {\n\txmlname *fieldInfo\n\tfields  []fieldInfo\n}\n\n// fieldInfo holds details for the xml representation of a single field.\ntype fieldInfo struct {\n\tidx     []int\n\tname    string\n\txmlns   string\n\tflags   fieldFlags\n\tparents []string\n}\n\ntype fieldFlags int\n\nconst (\n\tfElement fieldFlags = 1 << iota\n\tfAttr\n\tfCharData\n\tfInnerXml\n\tfComment\n\tfAny\n\n\tfOmitEmpty\n\tfTypeAttr\n\n\tfMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny\n)\n\nvar tinfoMap = make(map[reflect.Type]*typeInfo)\nvar tinfoLock sync.RWMutex\n\nvar nameType = reflect.TypeOf(Name{})\n\n// getTypeInfo returns the typeInfo structure with details necessary\n// for marshalling and unmarshalling typ.\nfunc getTypeInfo(typ reflect.Type) (*typeInfo, error) {\n\ttinfoLock.RLock()\n\ttinfo, ok := tinfoMap[typ]\n\ttinfoLock.RUnlock()\n\tif ok {\n\t\treturn tinfo, nil\n\t}\n\ttinfo = &typeInfo{}\n\tif typ.Kind() == reflect.Struct && typ != nameType {\n\t\tn := typ.NumField()\n\t\tfor i := 0; i < n; i++ {\n\t\t\tf := typ.Field(i)\n\t\t\tif f.PkgPath != \"\" || f.Tag.Get(\"xml\") == \"-\" {\n\t\t\t\tcontinue // Private field\n\t\t\t}\n\n\t\t\t// For embedded structs, embed its fields.\n\t\t\tif f.Anonymous {\n\t\t\t\tt := f.Type\n\t\t\t\tif t.Kind() == reflect.Ptr {\n\t\t\t\t\tt = t.Elem()\n\t\t\t\t}\n\t\t\t\tif t.Kind() == reflect.Struct {\n\t\t\t\t\tinner, err := getTypeInfo(t)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\tif tinfo.xmlname == nil {\n\t\t\t\t\t\ttinfo.xmlname = inner.xmlname\n\t\t\t\t\t}\n\t\t\t\t\tfor _, finfo := range inner.fields {\n\t\t\t\t\t\tfinfo.idx = append([]int{i}, finfo.idx...)\n\t\t\t\t\t\tif err := addFieldInfo(typ, tinfo, &finfo); err != nil {\n\t\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfinfo, err := structFieldInfo(typ, &f)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif f.Name == \"XMLName\" {\n\t\t\t\ttinfo.xmlname = finfo\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Add the field if it doesn't conflict with other fields.\n\t\t\tif err := addFieldInfo(typ, tinfo, finfo); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\ttinfoLock.Lock()\n\ttinfoMap[typ] = tinfo\n\ttinfoLock.Unlock()\n\treturn tinfo, nil\n}\n\n// structFieldInfo builds and returns a fieldInfo for f.\nfunc structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) {\n\tfinfo := &fieldInfo{idx: f.Index}\n\n\t// Split the tag from the xml namespace if necessary.\n\ttag := f.Tag.Get(\"xml\")\n\tif i := strings.Index(tag, \" \"); i >= 0 {\n\t\tfinfo.xmlns, tag = tag[:i], tag[i+1:]\n\t}\n\n\t// Parse flags.\n\ttokens := strings.Split(tag, \",\")\n\tif len(tokens) == 1 {\n\t\tfinfo.flags = fElement\n\t} else {\n\t\ttag = tokens[0]\n\t\tfor _, flag := range tokens[1:] {\n\t\t\tswitch flag {\n\t\t\tcase \"attr\":\n\t\t\t\tfinfo.flags |= fAttr\n\t\t\tcase \"chardata\":\n\t\t\t\tfinfo.flags |= fCharData\n\t\t\tcase \"innerxml\":\n\t\t\t\tfinfo.flags |= fInnerXml\n\t\t\tcase \"comment\":\n\t\t\t\tfinfo.flags |= fComment\n\t\t\tcase \"any\":\n\t\t\t\tfinfo.flags |= fAny\n\t\t\tcase \"omitempty\":\n\t\t\t\tfinfo.flags |= fOmitEmpty\n\t\t\tcase \"typeattr\":\n\t\t\t\tfinfo.flags |= fTypeAttr\n\t\t\t}\n\t\t}\n\n\t\t// Validate the flags used.\n\t\tvalid := true\n\t\tswitch mode := finfo.flags & fMode; mode {\n\t\tcase 0:\n\t\t\tfinfo.flags |= fElement\n\t\tcase fAttr, fCharData, fInnerXml, fComment, fAny:\n\t\t\tif f.Name == \"XMLName\" || tag != \"\" && mode != fAttr {\n\t\t\t\tvalid = false\n\t\t\t}\n\t\tdefault:\n\t\t\t// This will also catch multiple modes in a single field.\n\t\t\tvalid = false\n\t\t}\n\t\tif finfo.flags&fMode == fAny {\n\t\t\tfinfo.flags |= fElement\n\t\t}\n\t\tif finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 {\n\t\t\tvalid = false\n\t\t}\n\t\tif !valid {\n\t\t\treturn nil, fmt.Errorf(\"xml: invalid tag in field %s of type %s: %q\",\n\t\t\t\tf.Name, typ, f.Tag.Get(\"xml\"))\n\t\t}\n\t}\n\n\t// Use of xmlns without a name is not allowed.\n\tif finfo.xmlns != \"\" && tag == \"\" {\n\t\treturn nil, fmt.Errorf(\"xml: namespace without name in field %s of type %s: %q\",\n\t\t\tf.Name, typ, f.Tag.Get(\"xml\"))\n\t}\n\n\tif f.Name == \"XMLName\" {\n\t\t// The XMLName field records the XML element name. Don't\n\t\t// process it as usual because its name should default to\n\t\t// empty rather than to the field name.\n\t\tfinfo.name = tag\n\t\treturn finfo, nil\n\t}\n\n\tif tag == \"\" {\n\t\t// If the name part of the tag is completely empty, get\n\t\t// default from XMLName of underlying struct if feasible,\n\t\t// or field name otherwise.\n\t\tif xmlname := lookupXMLName(f.Type); xmlname != nil {\n\t\t\tfinfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name\n\t\t} else {\n\t\t\tfinfo.name = f.Name\n\t\t}\n\t\treturn finfo, nil\n\t}\n\n\t// Prepare field name and parents.\n\tparents := strings.Split(tag, \">\")\n\tif parents[0] == \"\" {\n\t\tparents[0] = f.Name\n\t}\n\tif parents[len(parents)-1] == \"\" {\n\t\treturn nil, fmt.Errorf(\"xml: trailing '>' in field %s of type %s\", f.Name, typ)\n\t}\n\tfinfo.name = parents[len(parents)-1]\n\tif len(parents) > 1 {\n\t\tif (finfo.flags & fElement) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"xml: %s chain not valid with %s flag\", tag, strings.Join(tokens[1:], \",\"))\n\t\t}\n\t\tfinfo.parents = parents[:len(parents)-1]\n\t}\n\n\t// If the field type has an XMLName field, the names must match\n\t// so that the behavior of both marshalling and unmarshalling\n\t// is straightforward and unambiguous.\n\tif finfo.flags&fElement != 0 {\n\t\tftyp := f.Type\n\t\txmlname := lookupXMLName(ftyp)\n\t\tif xmlname != nil && xmlname.name != finfo.name {\n\t\t\treturn nil, fmt.Errorf(\"xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName\",\n\t\t\t\tfinfo.name, typ, f.Name, xmlname.name, ftyp)\n\t\t}\n\t}\n\treturn finfo, nil\n}\n\n// lookupXMLName returns the fieldInfo for typ's XMLName field\n// in case it exists and has a valid xml field tag, otherwise\n// it returns nil.\nfunc lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) {\n\tfor typ.Kind() == reflect.Ptr {\n\t\ttyp = typ.Elem()\n\t}\n\tif typ.Kind() != reflect.Struct {\n\t\treturn nil\n\t}\n\tfor i, n := 0, typ.NumField(); i < n; i++ {\n\t\tf := typ.Field(i)\n\t\tif f.Name != \"XMLName\" {\n\t\t\tcontinue\n\t\t}\n\t\tfinfo, err := structFieldInfo(typ, &f)\n\t\tif finfo.name != \"\" && err == nil {\n\t\t\treturn finfo\n\t\t}\n\t\t// Also consider errors as a non-existent field tag\n\t\t// and let getTypeInfo itself report the error.\n\t\tbreak\n\t}\n\treturn nil\n}\n\nfunc min(a, b int) int {\n\tif a <= b {\n\t\treturn a\n\t}\n\treturn b\n}\n\n// addFieldInfo adds finfo to tinfo.fields if there are no\n// conflicts, or if conflicts arise from previous fields that were\n// obtained from deeper embedded structures than finfo. In the latter\n// case, the conflicting entries are dropped.\n// A conflict occurs when the path (parent + name) to a field is\n// itself a prefix of another path, or when two paths match exactly.\n// It is okay for field paths to share a common, shorter prefix.\nfunc addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error {\n\tvar conflicts []int\nLoop:\n\t// First, figure all conflicts. Most working code will have none.\n\tfor i := range tinfo.fields {\n\t\toldf := &tinfo.fields[i]\n\t\tif oldf.flags&fMode != newf.flags&fMode {\n\t\t\tcontinue\n\t\t}\n\t\tif oldf.xmlns != \"\" && newf.xmlns != \"\" && oldf.xmlns != newf.xmlns {\n\t\t\tcontinue\n\t\t}\n\t\tminl := min(len(newf.parents), len(oldf.parents))\n\t\tfor p := 0; p < minl; p++ {\n\t\t\tif oldf.parents[p] != newf.parents[p] {\n\t\t\t\tcontinue Loop\n\t\t\t}\n\t\t}\n\t\tif len(oldf.parents) > len(newf.parents) {\n\t\t\tif oldf.parents[len(newf.parents)] == newf.name {\n\t\t\t\tconflicts = append(conflicts, i)\n\t\t\t}\n\t\t} else if len(oldf.parents) < len(newf.parents) {\n\t\t\tif newf.parents[len(oldf.parents)] == oldf.name {\n\t\t\t\tconflicts = append(conflicts, i)\n\t\t\t}\n\t\t} else {\n\t\t\tif newf.name == oldf.name {\n\t\t\t\tconflicts = append(conflicts, i)\n\t\t\t}\n\t\t}\n\t}\n\t// Without conflicts, add the new field and return.\n\tif conflicts == nil {\n\t\ttinfo.fields = append(tinfo.fields, *newf)\n\t\treturn nil\n\t}\n\n\t// If any conflict is shallower, ignore the new field.\n\t// This matches the Go field resolution on embedding.\n\tfor _, i := range conflicts {\n\t\tif len(tinfo.fields[i].idx) < len(newf.idx) {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// Otherwise, if any of them is at the same depth level, it's an error.\n\tfor _, i := range conflicts {\n\t\toldf := &tinfo.fields[i]\n\t\tif len(oldf.idx) == len(newf.idx) {\n\t\t\tf1 := typ.FieldByIndex(oldf.idx)\n\t\t\tf2 := typ.FieldByIndex(newf.idx)\n\t\t\treturn &TagPathError{typ, f1.Name, f1.Tag.Get(\"xml\"), f2.Name, f2.Tag.Get(\"xml\")}\n\t\t}\n\t}\n\n\t// Otherwise, the new field is shallower, and thus takes precedence,\n\t// so drop the conflicting fields from tinfo and append the new one.\n\tfor c := len(conflicts) - 1; c >= 0; c-- {\n\t\ti := conflicts[c]\n\t\tcopy(tinfo.fields[i:], tinfo.fields[i+1:])\n\t\ttinfo.fields = tinfo.fields[:len(tinfo.fields)-1]\n\t}\n\ttinfo.fields = append(tinfo.fields, *newf)\n\treturn nil\n}\n\n// A TagPathError represents an error in the unmarshalling process\n// caused by the use of field tags with conflicting paths.\ntype TagPathError struct {\n\tStruct       reflect.Type\n\tField1, Tag1 string\n\tField2, Tag2 string\n}\n\nfunc (e *TagPathError) Error() string {\n\treturn fmt.Sprintf(\"%s field %q with tag %q conflicts with field %q with tag %q\", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2)\n}\n\n// value returns v's field value corresponding to finfo.\n// It's equivalent to v.FieldByIndex(finfo.idx), but initializes\n// and dereferences pointers as necessary.\nfunc (finfo *fieldInfo) value(v reflect.Value) reflect.Value {\n\tfor i, x := range finfo.idx {\n\t\tif i > 0 {\n\t\t\tt := v.Type()\n\t\t\tif t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {\n\t\t\t\tif v.IsNil() {\n\t\t\t\t\tv.Set(reflect.New(v.Type().Elem()))\n\t\t\t\t}\n\t\t\t\tv = v.Elem()\n\t\t\t}\n\t\t}\n\t\tv = v.Field(x)\n\t}\n\treturn v\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/xml/xml.go",
    "content": "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package xml implements a simple XML 1.0 parser that\n// understands XML name spaces.\npackage xml\n\n// References:\n//    Annotated XML spec: http://www.xml.com/axml/testaxml.htm\n//    XML name spaces: http://www.w3.org/TR/REC-xml-names/\n\n// TODO(rsc):\n//\tTest error handling.\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n)\n\n// A SyntaxError represents a syntax error in the XML input stream.\ntype SyntaxError struct {\n\tMsg  string\n\tLine int\n}\n\nfunc (e *SyntaxError) Error() string {\n\treturn \"XML syntax error on line \" + strconv.Itoa(e.Line) + \": \" + e.Msg\n}\n\n// A Name represents an XML name (Local) annotated\n// with a name space identifier (Space).\n// In tokens returned by Decoder.Token, the Space identifier\n// is given as a canonical URL, not the short prefix used\n// in the document being parsed.\ntype Name struct {\n\tSpace, Local string\n}\n\n// An Attr represents an attribute in an XML element (Name=Value).\ntype Attr struct {\n\tName  Name\n\tValue string\n}\n\n// A Token is an interface holding one of the token types:\n// StartElement, EndElement, CharData, Comment, ProcInst, or Directive.\ntype Token interface{}\n\n// A StartElement represents an XML start element.\ntype StartElement struct {\n\tName Name\n\tAttr []Attr\n}\n\nfunc (e StartElement) Copy() StartElement {\n\tattrs := make([]Attr, len(e.Attr))\n\tcopy(attrs, e.Attr)\n\te.Attr = attrs\n\treturn e\n}\n\n// End returns the corresponding XML end element.\nfunc (e StartElement) End() EndElement {\n\treturn EndElement{e.Name}\n}\n\n// An EndElement represents an XML end element.\ntype EndElement struct {\n\tName Name\n}\n\n// A CharData represents XML character data (raw text),\n// in which XML escape sequences have been replaced by\n// the characters they represent.\ntype CharData []byte\n\nfunc makeCopy(b []byte) []byte {\n\tb1 := make([]byte, len(b))\n\tcopy(b1, b)\n\treturn b1\n}\n\nfunc (c CharData) Copy() CharData { return CharData(makeCopy(c)) }\n\n// A Comment represents an XML comment of the form <!--comment-->.\n// The bytes do not include the <!-- and --> comment markers.\ntype Comment []byte\n\nfunc (c Comment) Copy() Comment { return Comment(makeCopy(c)) }\n\n// A ProcInst represents an XML processing instruction of the form <?target inst?>\ntype ProcInst struct {\n\tTarget string\n\tInst   []byte\n}\n\nfunc (p ProcInst) Copy() ProcInst {\n\tp.Inst = makeCopy(p.Inst)\n\treturn p\n}\n\n// A Directive represents an XML directive of the form <!text>.\n// The bytes do not include the <! and > markers.\ntype Directive []byte\n\nfunc (d Directive) Copy() Directive { return Directive(makeCopy(d)) }\n\n// CopyToken returns a copy of a Token.\nfunc CopyToken(t Token) Token {\n\tswitch v := t.(type) {\n\tcase CharData:\n\t\treturn v.Copy()\n\tcase Comment:\n\t\treturn v.Copy()\n\tcase Directive:\n\t\treturn v.Copy()\n\tcase ProcInst:\n\t\treturn v.Copy()\n\tcase StartElement:\n\t\treturn v.Copy()\n\t}\n\treturn t\n}\n\n// A Decoder represents an XML parser reading a particular input stream.\n// The parser assumes that its input is encoded in UTF-8.\ntype Decoder struct {\n\t// Strict defaults to true, enforcing the requirements\n\t// of the XML specification.\n\t// If set to false, the parser allows input containing common\n\t// mistakes:\n\t//\t* If an element is missing an end tag, the parser invents\n\t//\t  end tags as necessary to keep the return values from Token\n\t//\t  properly balanced.\n\t//\t* In attribute values and character data, unknown or malformed\n\t//\t  character entities (sequences beginning with &) are left alone.\n\t//\n\t// Setting:\n\t//\n\t//\td.Strict = false;\n\t//\td.AutoClose = HTMLAutoClose;\n\t//\td.Entity = HTMLEntity\n\t//\n\t// creates a parser that can handle typical HTML.\n\t//\n\t// Strict mode does not enforce the requirements of the XML name spaces TR.\n\t// In particular it does not reject name space tags using undefined prefixes.\n\t// Such tags are recorded with the unknown prefix as the name space URL.\n\tStrict bool\n\n\t// When Strict == false, AutoClose indicates a set of elements to\n\t// consider closed immediately after they are opened, regardless\n\t// of whether an end element is present.\n\tAutoClose []string\n\n\t// Entity can be used to map non-standard entity names to string replacements.\n\t// The parser behaves as if these standard mappings are present in the map,\n\t// regardless of the actual map content:\n\t//\n\t//\t\"lt\": \"<\",\n\t//\t\"gt\": \">\",\n\t//\t\"amp\": \"&\",\n\t//\t\"apos\": \"'\",\n\t//\t\"quot\": `\"`,\n\tEntity map[string]string\n\n\t// CharsetReader, if non-nil, defines a function to generate\n\t// charset-conversion readers, converting from the provided\n\t// non-UTF-8 charset into UTF-8. If CharsetReader is nil or\n\t// returns an error, parsing stops with an error. One of the\n\t// the CharsetReader's result values must be non-nil.\n\tCharsetReader func(charset string, input io.Reader) (io.Reader, error)\n\n\t// DefaultSpace sets the default name space used for unadorned tags,\n\t// as if the entire XML stream were wrapped in an element containing\n\t// the attribute xmlns=\"DefaultSpace\".\n\tDefaultSpace string\n\n\t// TypeFunc is used to map type names to actual types.\n\tTypeFunc func(string) (reflect.Type, bool)\n\n\tr              io.ByteReader\n\tbuf            bytes.Buffer\n\tsaved          *bytes.Buffer\n\tstk            *stack\n\tfree           *stack\n\tneedClose      bool\n\ttoClose        Name\n\tnextToken      Token\n\tnextByte       int\n\tns             map[string]string\n\terr            error\n\tline           int\n\tunmarshalDepth int\n}\n\n// NewDecoder creates a new XML parser reading from r.\n// If r does not implement io.ByteReader, NewDecoder will\n// do its own buffering.\nfunc NewDecoder(r io.Reader) *Decoder {\n\td := &Decoder{\n\t\tns:       make(map[string]string),\n\t\tnextByte: -1,\n\t\tline:     1,\n\t\tStrict:   true,\n\t}\n\td.switchToReader(r)\n\treturn d\n}\n\n// Token returns the next XML token in the input stream.\n// At the end of the input stream, Token returns nil, io.EOF.\n//\n// Slices of bytes in the returned token data refer to the\n// parser's internal buffer and remain valid only until the next\n// call to Token.  To acquire a copy of the bytes, call CopyToken\n// or the token's Copy method.\n//\n// Token expands self-closing elements such as <br/>\n// into separate start and end elements returned by successive calls.\n//\n// Token guarantees that the StartElement and EndElement\n// tokens it returns are properly nested and matched:\n// if Token encounters an unexpected end element,\n// it will return an error.\n//\n// Token implements XML name spaces as described by\n// http://www.w3.org/TR/REC-xml-names/.  Each of the\n// Name structures contained in the Token has the Space\n// set to the URL identifying its name space when known.\n// If Token encounters an unrecognized name space prefix,\n// it uses the prefix as the Space rather than report an error.\nfunc (d *Decoder) Token() (t Token, err error) {\n\tif d.stk != nil && d.stk.kind == stkEOF {\n\t\terr = io.EOF\n\t\treturn\n\t}\n\tif d.nextToken != nil {\n\t\tt = d.nextToken\n\t\td.nextToken = nil\n\t} else if t, err = d.rawToken(); err != nil {\n\t\treturn\n\t}\n\n\tif !d.Strict {\n\t\tif t1, ok := d.autoClose(t); ok {\n\t\t\td.nextToken = t\n\t\t\tt = t1\n\t\t}\n\t}\n\tswitch t1 := t.(type) {\n\tcase StartElement:\n\t\t// In XML name spaces, the translations listed in the\n\t\t// attributes apply to the element name and\n\t\t// to the other attribute names, so process\n\t\t// the translations first.\n\t\tfor _, a := range t1.Attr {\n\t\t\tif a.Name.Space == \"xmlns\" {\n\t\t\t\tv, ok := d.ns[a.Name.Local]\n\t\t\t\td.pushNs(a.Name.Local, v, ok)\n\t\t\t\td.ns[a.Name.Local] = a.Value\n\t\t\t}\n\t\t\tif a.Name.Space == \"\" && a.Name.Local == \"xmlns\" {\n\t\t\t\t// Default space for untagged names\n\t\t\t\tv, ok := d.ns[\"\"]\n\t\t\t\td.pushNs(\"\", v, ok)\n\t\t\t\td.ns[\"\"] = a.Value\n\t\t\t}\n\t\t}\n\n\t\td.translate(&t1.Name, true)\n\t\tfor i := range t1.Attr {\n\t\t\td.translate(&t1.Attr[i].Name, false)\n\t\t}\n\t\td.pushElement(t1.Name)\n\t\tt = t1\n\n\tcase EndElement:\n\t\td.translate(&t1.Name, true)\n\t\tif !d.popElement(&t1) {\n\t\t\treturn nil, d.err\n\t\t}\n\t\tt = t1\n\t}\n\treturn\n}\n\nconst xmlURL = \"http://www.w3.org/XML/1998/namespace\"\n\n// Apply name space translation to name n.\n// The default name space (for Space==\"\")\n// applies only to element names, not to attribute names.\nfunc (d *Decoder) translate(n *Name, isElementName bool) {\n\tswitch {\n\tcase n.Space == \"xmlns\":\n\t\treturn\n\tcase n.Space == \"\" && !isElementName:\n\t\treturn\n\tcase n.Space == \"xml\":\n\t\tn.Space = xmlURL\n\tcase n.Space == \"\" && n.Local == \"xmlns\":\n\t\treturn\n\t}\n\tif v, ok := d.ns[n.Space]; ok {\n\t\tn.Space = v\n\t} else if n.Space == \"\" {\n\t\tn.Space = d.DefaultSpace\n\t}\n}\n\nfunc (d *Decoder) switchToReader(r io.Reader) {\n\t// Get efficient byte at a time reader.\n\t// Assume that if reader has its own\n\t// ReadByte, it's efficient enough.\n\t// Otherwise, use bufio.\n\tif rb, ok := r.(io.ByteReader); ok {\n\t\td.r = rb\n\t} else {\n\t\td.r = bufio.NewReader(r)\n\t}\n}\n\n// Parsing state - stack holds old name space translations\n// and the current set of open elements.  The translations to pop when\n// ending a given tag are *below* it on the stack, which is\n// more work but forced on us by XML.\ntype stack struct {\n\tnext *stack\n\tkind int\n\tname Name\n\tok   bool\n}\n\nconst (\n\tstkStart = iota\n\tstkNs\n\tstkEOF\n)\n\nfunc (d *Decoder) push(kind int) *stack {\n\ts := d.free\n\tif s != nil {\n\t\td.free = s.next\n\t} else {\n\t\ts = new(stack)\n\t}\n\ts.next = d.stk\n\ts.kind = kind\n\td.stk = s\n\treturn s\n}\n\nfunc (d *Decoder) pop() *stack {\n\ts := d.stk\n\tif s != nil {\n\t\td.stk = s.next\n\t\ts.next = d.free\n\t\td.free = s\n\t}\n\treturn s\n}\n\n// Record that after the current element is finished\n// (that element is already pushed on the stack)\n// Token should return EOF until popEOF is called.\nfunc (d *Decoder) pushEOF() {\n\t// Walk down stack to find Start.\n\t// It might not be the top, because there might be stkNs\n\t// entries above it.\n\tstart := d.stk\n\tfor start.kind != stkStart {\n\t\tstart = start.next\n\t}\n\t// The stkNs entries below a start are associated with that\n\t// element too; skip over them.\n\tfor start.next != nil && start.next.kind == stkNs {\n\t\tstart = start.next\n\t}\n\ts := d.free\n\tif s != nil {\n\t\td.free = s.next\n\t} else {\n\t\ts = new(stack)\n\t}\n\ts.kind = stkEOF\n\ts.next = start.next\n\tstart.next = s\n}\n\n// Undo a pushEOF.\n// The element must have been finished, so the EOF should be at the top of the stack.\nfunc (d *Decoder) popEOF() bool {\n\tif d.stk == nil || d.stk.kind != stkEOF {\n\t\treturn false\n\t}\n\td.pop()\n\treturn true\n}\n\n// Record that we are starting an element with the given name.\nfunc (d *Decoder) pushElement(name Name) {\n\ts := d.push(stkStart)\n\ts.name = name\n}\n\n// Record that we are changing the value of ns[local].\n// The old value is url, ok.\nfunc (d *Decoder) pushNs(local string, url string, ok bool) {\n\ts := d.push(stkNs)\n\ts.name.Local = local\n\ts.name.Space = url\n\ts.ok = ok\n}\n\n// Creates a SyntaxError with the current line number.\nfunc (d *Decoder) syntaxError(msg string) error {\n\treturn &SyntaxError{Msg: msg, Line: d.line}\n}\n\n// Record that we are ending an element with the given name.\n// The name must match the record at the top of the stack,\n// which must be a pushElement record.\n// After popping the element, apply any undo records from\n// the stack to restore the name translations that existed\n// before we saw this element.\nfunc (d *Decoder) popElement(t *EndElement) bool {\n\ts := d.pop()\n\tname := t.Name\n\tswitch {\n\tcase s == nil || s.kind != stkStart:\n\t\td.err = d.syntaxError(\"unexpected end element </\" + name.Local + \">\")\n\t\treturn false\n\tcase s.name.Local != name.Local:\n\t\tif !d.Strict {\n\t\t\td.needClose = true\n\t\t\td.toClose = t.Name\n\t\t\tt.Name = s.name\n\t\t\treturn true\n\t\t}\n\t\td.err = d.syntaxError(\"element <\" + s.name.Local + \"> closed by </\" + name.Local + \">\")\n\t\treturn false\n\tcase s.name.Space != name.Space:\n\t\td.err = d.syntaxError(\"element <\" + s.name.Local + \"> in space \" + s.name.Space +\n\t\t\t\"closed by </\" + name.Local + \"> in space \" + name.Space)\n\t\treturn false\n\t}\n\n\t// Pop stack until a Start or EOF is on the top, undoing the\n\t// translations that were associated with the element we just closed.\n\tfor d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF {\n\t\ts := d.pop()\n\t\tif s.ok {\n\t\t\td.ns[s.name.Local] = s.name.Space\n\t\t} else {\n\t\t\tdelete(d.ns, s.name.Local)\n\t\t}\n\t}\n\n\treturn true\n}\n\n// If the top element on the stack is autoclosing and\n// t is not the end tag, invent the end tag.\nfunc (d *Decoder) autoClose(t Token) (Token, bool) {\n\tif d.stk == nil || d.stk.kind != stkStart {\n\t\treturn nil, false\n\t}\n\tname := strings.ToLower(d.stk.name.Local)\n\tfor _, s := range d.AutoClose {\n\t\tif strings.ToLower(s) == name {\n\t\t\t// This one should be auto closed if t doesn't close it.\n\t\t\tet, ok := t.(EndElement)\n\t\t\tif !ok || et.Name.Local != name {\n\t\t\t\treturn EndElement{d.stk.name}, true\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil, false\n}\n\nvar errRawToken = errors.New(\"xml: cannot use RawToken from UnmarshalXML method\")\n\n// RawToken is like Token but does not verify that\n// start and end elements match and does not translate\n// name space prefixes to their corresponding URLs.\nfunc (d *Decoder) RawToken() (Token, error) {\n\tif d.unmarshalDepth > 0 {\n\t\treturn nil, errRawToken\n\t}\n\treturn d.rawToken()\n}\n\nfunc (d *Decoder) rawToken() (Token, error) {\n\tif d.err != nil {\n\t\treturn nil, d.err\n\t}\n\tif d.needClose {\n\t\t// The last element we read was self-closing and\n\t\t// we returned just the StartElement half.\n\t\t// Return the EndElement half now.\n\t\td.needClose = false\n\t\treturn EndElement{d.toClose}, nil\n\t}\n\n\tb, ok := d.getc()\n\tif !ok {\n\t\treturn nil, d.err\n\t}\n\n\tif b != '<' {\n\t\t// Text section.\n\t\td.ungetc(b)\n\t\tdata := d.text(-1, false)\n\t\tif data == nil {\n\t\t\treturn nil, d.err\n\t\t}\n\t\treturn CharData(data), nil\n\t}\n\n\tif b, ok = d.mustgetc(); !ok {\n\t\treturn nil, d.err\n\t}\n\tswitch b {\n\tcase '/':\n\t\t// </: End element\n\t\tvar name Name\n\t\tif name, ok = d.nsname(); !ok {\n\t\t\tif d.err == nil {\n\t\t\t\td.err = d.syntaxError(\"expected element name after </\")\n\t\t\t}\n\t\t\treturn nil, d.err\n\t\t}\n\t\td.space()\n\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\treturn nil, d.err\n\t\t}\n\t\tif b != '>' {\n\t\t\td.err = d.syntaxError(\"invalid characters between </\" + name.Local + \" and >\")\n\t\t\treturn nil, d.err\n\t\t}\n\t\treturn EndElement{name}, nil\n\n\tcase '?':\n\t\t// <?: Processing instruction.\n\t\t// TODO(rsc): Should parse the <?xml declaration to make sure the version is 1.0.\n\t\tvar target string\n\t\tif target, ok = d.name(); !ok {\n\t\t\tif d.err == nil {\n\t\t\t\td.err = d.syntaxError(\"expected target name after <?\")\n\t\t\t}\n\t\t\treturn nil, d.err\n\t\t}\n\t\td.space()\n\t\td.buf.Reset()\n\t\tvar b0 byte\n\t\tfor {\n\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\treturn nil, d.err\n\t\t\t}\n\t\t\td.buf.WriteByte(b)\n\t\t\tif b0 == '?' && b == '>' {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tb0 = b\n\t\t}\n\t\tdata := d.buf.Bytes()\n\t\tdata = data[0 : len(data)-2] // chop ?>\n\n\t\tif target == \"xml\" {\n\t\t\tenc := procInstEncoding(string(data))\n\t\t\tif enc != \"\" && enc != \"utf-8\" && enc != \"UTF-8\" {\n\t\t\t\tif d.CharsetReader == nil {\n\t\t\t\t\td.err = fmt.Errorf(\"xml: encoding %q declared but Decoder.CharsetReader is nil\", enc)\n\t\t\t\t\treturn nil, d.err\n\t\t\t\t}\n\t\t\t\tnewr, err := d.CharsetReader(enc, d.r.(io.Reader))\n\t\t\t\tif err != nil {\n\t\t\t\t\td.err = fmt.Errorf(\"xml: opening charset %q: %v\", enc, err)\n\t\t\t\t\treturn nil, d.err\n\t\t\t\t}\n\t\t\t\tif newr == nil {\n\t\t\t\t\tpanic(\"CharsetReader returned a nil Reader for charset \" + enc)\n\t\t\t\t}\n\t\t\t\td.switchToReader(newr)\n\t\t\t}\n\t\t}\n\t\treturn ProcInst{target, data}, nil\n\n\tcase '!':\n\t\t// <!: Maybe comment, maybe CDATA.\n\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\treturn nil, d.err\n\t\t}\n\t\tswitch b {\n\t\tcase '-': // <!-\n\t\t\t// Probably <!-- for a comment.\n\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\treturn nil, d.err\n\t\t\t}\n\t\t\tif b != '-' {\n\t\t\t\td.err = d.syntaxError(\"invalid sequence <!- not part of <!--\")\n\t\t\t\treturn nil, d.err\n\t\t\t}\n\t\t\t// Look for terminator.\n\t\t\td.buf.Reset()\n\t\t\tvar b0, b1 byte\n\t\t\tfor {\n\t\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\t\treturn nil, d.err\n\t\t\t\t}\n\t\t\t\td.buf.WriteByte(b)\n\t\t\t\tif b0 == '-' && b1 == '-' && b == '>' {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tb0, b1 = b1, b\n\t\t\t}\n\t\t\tdata := d.buf.Bytes()\n\t\t\tdata = data[0 : len(data)-3] // chop -->\n\t\t\treturn Comment(data), nil\n\n\t\tcase '[': // <![\n\t\t\t// Probably <![CDATA[.\n\t\t\tfor i := 0; i < 6; i++ {\n\t\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\t\treturn nil, d.err\n\t\t\t\t}\n\t\t\t\tif b != \"CDATA[\"[i] {\n\t\t\t\t\td.err = d.syntaxError(\"invalid <![ sequence\")\n\t\t\t\t\treturn nil, d.err\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Have <![CDATA[.  Read text until ]]>.\n\t\t\tdata := d.text(-1, true)\n\t\t\tif data == nil {\n\t\t\t\treturn nil, d.err\n\t\t\t}\n\t\t\treturn CharData(data), nil\n\t\t}\n\n\t\t// Probably a directive: <!DOCTYPE ...>, <!ENTITY ...>, etc.\n\t\t// We don't care, but accumulate for caller. Quoted angle\n\t\t// brackets do not count for nesting.\n\t\td.buf.Reset()\n\t\td.buf.WriteByte(b)\n\t\tinquote := uint8(0)\n\t\tdepth := 0\n\t\tfor {\n\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\treturn nil, d.err\n\t\t\t}\n\t\t\tif inquote == 0 && b == '>' && depth == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\tHandleB:\n\t\t\td.buf.WriteByte(b)\n\t\t\tswitch {\n\t\t\tcase b == inquote:\n\t\t\t\tinquote = 0\n\n\t\t\tcase inquote != 0:\n\t\t\t\t// in quotes, no special action\n\n\t\t\tcase b == '\\'' || b == '\"':\n\t\t\t\tinquote = b\n\n\t\t\tcase b == '>' && inquote == 0:\n\t\t\t\tdepth--\n\n\t\t\tcase b == '<' && inquote == 0:\n\t\t\t\t// Look for <!-- to begin comment.\n\t\t\t\ts := \"!--\"\n\t\t\t\tfor i := 0; i < len(s); i++ {\n\t\t\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\t\t\treturn nil, d.err\n\t\t\t\t\t}\n\t\t\t\t\tif b != s[i] {\n\t\t\t\t\t\tfor j := 0; j < i; j++ {\n\t\t\t\t\t\t\td.buf.WriteByte(s[j])\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdepth++\n\t\t\t\t\t\tgoto HandleB\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Remove < that was written above.\n\t\t\t\td.buf.Truncate(d.buf.Len() - 1)\n\n\t\t\t\t// Look for terminator.\n\t\t\t\tvar b0, b1 byte\n\t\t\t\tfor {\n\t\t\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\t\t\treturn nil, d.err\n\t\t\t\t\t}\n\t\t\t\t\tif b0 == '-' && b1 == '-' && b == '>' {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tb0, b1 = b1, b\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn Directive(d.buf.Bytes()), nil\n\t}\n\n\t// Must be an open element like <a href=\"foo\">\n\td.ungetc(b)\n\n\tvar (\n\t\tname  Name\n\t\tempty bool\n\t\tattr  []Attr\n\t)\n\tif name, ok = d.nsname(); !ok {\n\t\tif d.err == nil {\n\t\t\td.err = d.syntaxError(\"expected element name after <\")\n\t\t}\n\t\treturn nil, d.err\n\t}\n\n\tattr = make([]Attr, 0, 4)\n\tfor {\n\t\td.space()\n\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\treturn nil, d.err\n\t\t}\n\t\tif b == '/' {\n\t\t\tempty = true\n\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\treturn nil, d.err\n\t\t\t}\n\t\t\tif b != '>' {\n\t\t\t\td.err = d.syntaxError(\"expected /> in element\")\n\t\t\t\treturn nil, d.err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tif b == '>' {\n\t\t\tbreak\n\t\t}\n\t\td.ungetc(b)\n\n\t\tn := len(attr)\n\t\tif n >= cap(attr) {\n\t\t\tnattr := make([]Attr, n, 2*cap(attr))\n\t\t\tcopy(nattr, attr)\n\t\t\tattr = nattr\n\t\t}\n\t\tattr = attr[0 : n+1]\n\t\ta := &attr[n]\n\t\tif a.Name, ok = d.nsname(); !ok {\n\t\t\tif d.err == nil {\n\t\t\t\td.err = d.syntaxError(\"expected attribute name in element\")\n\t\t\t}\n\t\t\treturn nil, d.err\n\t\t}\n\t\td.space()\n\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\treturn nil, d.err\n\t\t}\n\t\tif b != '=' {\n\t\t\tif d.Strict {\n\t\t\t\td.err = d.syntaxError(\"attribute name without = in element\")\n\t\t\t\treturn nil, d.err\n\t\t\t} else {\n\t\t\t\td.ungetc(b)\n\t\t\t\ta.Value = a.Name.Local\n\t\t\t}\n\t\t} else {\n\t\t\td.space()\n\t\t\tdata := d.attrval()\n\t\t\tif data == nil {\n\t\t\t\treturn nil, d.err\n\t\t\t}\n\t\t\ta.Value = string(data)\n\t\t}\n\t}\n\tif empty {\n\t\td.needClose = true\n\t\td.toClose = name\n\t}\n\treturn StartElement{name, attr}, nil\n}\n\nfunc (d *Decoder) attrval() []byte {\n\tb, ok := d.mustgetc()\n\tif !ok {\n\t\treturn nil\n\t}\n\t// Handle quoted attribute values\n\tif b == '\"' || b == '\\'' {\n\t\treturn d.text(int(b), false)\n\t}\n\t// Handle unquoted attribute values for strict parsers\n\tif d.Strict {\n\t\td.err = d.syntaxError(\"unquoted or missing attribute value in element\")\n\t\treturn nil\n\t}\n\t// Handle unquoted attribute values for unstrict parsers\n\td.ungetc(b)\n\td.buf.Reset()\n\tfor {\n\t\tb, ok = d.mustgetc()\n\t\tif !ok {\n\t\t\treturn nil\n\t\t}\n\t\t// http://www.w3.org/TR/REC-html40/intro/sgmltut.html#h-3.2.2\n\t\tif 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' ||\n\t\t\t'0' <= b && b <= '9' || b == '_' || b == ':' || b == '-' {\n\t\t\td.buf.WriteByte(b)\n\t\t} else {\n\t\t\td.ungetc(b)\n\t\t\tbreak\n\t\t}\n\t}\n\treturn d.buf.Bytes()\n}\n\n// Skip spaces if any\nfunc (d *Decoder) space() {\n\tfor {\n\t\tb, ok := d.getc()\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tswitch b {\n\t\tcase ' ', '\\r', '\\n', '\\t':\n\t\tdefault:\n\t\t\td.ungetc(b)\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// Read a single byte.\n// If there is no byte to read, return ok==false\n// and leave the error in d.err.\n// Maintain line number.\nfunc (d *Decoder) getc() (b byte, ok bool) {\n\tif d.err != nil {\n\t\treturn 0, false\n\t}\n\tif d.nextByte >= 0 {\n\t\tb = byte(d.nextByte)\n\t\td.nextByte = -1\n\t} else {\n\t\tb, d.err = d.r.ReadByte()\n\t\tif d.err != nil {\n\t\t\treturn 0, false\n\t\t}\n\t\tif d.saved != nil {\n\t\t\td.saved.WriteByte(b)\n\t\t}\n\t}\n\tif b == '\\n' {\n\t\td.line++\n\t}\n\treturn b, true\n}\n\n// Return saved offset.\n// If we did ungetc (nextByte >= 0), have to back up one.\nfunc (d *Decoder) savedOffset() int {\n\tn := d.saved.Len()\n\tif d.nextByte >= 0 {\n\t\tn--\n\t}\n\treturn n\n}\n\n// Must read a single byte.\n// If there is no byte to read,\n// set d.err to SyntaxError(\"unexpected EOF\")\n// and return ok==false\nfunc (d *Decoder) mustgetc() (b byte, ok bool) {\n\tif b, ok = d.getc(); !ok {\n\t\tif d.err == io.EOF {\n\t\t\td.err = d.syntaxError(\"unexpected EOF\")\n\t\t}\n\t}\n\treturn\n}\n\n// Unread a single byte.\nfunc (d *Decoder) ungetc(b byte) {\n\tif b == '\\n' {\n\t\td.line--\n\t}\n\td.nextByte = int(b)\n}\n\nvar entity = map[string]int{\n\t\"lt\":   '<',\n\t\"gt\":   '>',\n\t\"amp\":  '&',\n\t\"apos\": '\\'',\n\t\"quot\": '\"',\n}\n\n// Read plain text section (XML calls it character data).\n// If quote >= 0, we are in a quoted string and need to find the matching quote.\n// If cdata == true, we are in a <![CDATA[ section and need to find ]]>.\n// On failure return nil and leave the error in d.err.\nfunc (d *Decoder) text(quote int, cdata bool) []byte {\n\tvar b0, b1 byte\n\tvar trunc int\n\td.buf.Reset()\nInput:\n\tfor {\n\t\tb, ok := d.getc()\n\t\tif !ok {\n\t\t\tif cdata {\n\t\t\t\tif d.err == io.EOF {\n\t\t\t\t\td.err = d.syntaxError(\"unexpected EOF in CDATA section\")\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tbreak Input\n\t\t}\n\n\t\t// <![CDATA[ section ends with ]]>.\n\t\t// It is an error for ]]> to appear in ordinary text.\n\t\tif b0 == ']' && b1 == ']' && b == '>' {\n\t\t\tif cdata {\n\t\t\t\ttrunc = 2\n\t\t\t\tbreak Input\n\t\t\t}\n\t\t\td.err = d.syntaxError(\"unescaped ]]> not in CDATA section\")\n\t\t\treturn nil\n\t\t}\n\n\t\t// Stop reading text if we see a <.\n\t\tif b == '<' && !cdata {\n\t\t\tif quote >= 0 {\n\t\t\t\td.err = d.syntaxError(\"unescaped < inside quoted string\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\td.ungetc('<')\n\t\t\tbreak Input\n\t\t}\n\t\tif quote >= 0 && b == byte(quote) {\n\t\t\tbreak Input\n\t\t}\n\t\tif b == '&' && !cdata {\n\t\t\t// Read escaped character expression up to semicolon.\n\t\t\t// XML in all its glory allows a document to define and use\n\t\t\t// its own character names with <!ENTITY ...> directives.\n\t\t\t// Parsers are required to recognize lt, gt, amp, apos, and quot\n\t\t\t// even if they have not been declared.\n\t\t\tbefore := d.buf.Len()\n\t\t\td.buf.WriteByte('&')\n\t\t\tvar ok bool\n\t\t\tvar text string\n\t\t\tvar haveText bool\n\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif b == '#' {\n\t\t\t\td.buf.WriteByte(b)\n\t\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tbase := 10\n\t\t\t\tif b == 'x' {\n\t\t\t\t\tbase = 16\n\t\t\t\t\td.buf.WriteByte(b)\n\t\t\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tstart := d.buf.Len()\n\t\t\t\tfor '0' <= b && b <= '9' ||\n\t\t\t\t\tbase == 16 && 'a' <= b && b <= 'f' ||\n\t\t\t\t\tbase == 16 && 'A' <= b && b <= 'F' {\n\t\t\t\t\td.buf.WriteByte(b)\n\t\t\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif b != ';' {\n\t\t\t\t\td.ungetc(b)\n\t\t\t\t} else {\n\t\t\t\t\ts := string(d.buf.Bytes()[start:])\n\t\t\t\t\td.buf.WriteByte(';')\n\t\t\t\t\tn, err := strconv.ParseUint(s, base, 64)\n\t\t\t\t\tif err == nil && n <= unicode.MaxRune {\n\t\t\t\t\t\ttext = string(n)\n\t\t\t\t\t\thaveText = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\td.ungetc(b)\n\t\t\t\tif !d.readName() {\n\t\t\t\t\tif d.err != nil {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\tok = false\n\t\t\t\t}\n\t\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tif b != ';' {\n\t\t\t\t\td.ungetc(b)\n\t\t\t\t} else {\n\t\t\t\t\tname := d.buf.Bytes()[before+1:]\n\t\t\t\t\td.buf.WriteByte(';')\n\t\t\t\t\tif isName(name) {\n\t\t\t\t\t\ts := string(name)\n\t\t\t\t\t\tif r, ok := entity[s]; ok {\n\t\t\t\t\t\t\ttext = string(r)\n\t\t\t\t\t\t\thaveText = true\n\t\t\t\t\t\t} else if d.Entity != nil {\n\t\t\t\t\t\t\ttext, haveText = d.Entity[s]\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif haveText {\n\t\t\t\td.buf.Truncate(before)\n\t\t\t\td.buf.Write([]byte(text))\n\t\t\t\tb0, b1 = 0, 0\n\t\t\t\tcontinue Input\n\t\t\t}\n\t\t\tif !d.Strict {\n\t\t\t\tb0, b1 = 0, 0\n\t\t\t\tcontinue Input\n\t\t\t}\n\t\t\tent := string(d.buf.Bytes()[before:])\n\t\t\tif ent[len(ent)-1] != ';' {\n\t\t\t\tent += \" (no semicolon)\"\n\t\t\t}\n\t\t\td.err = d.syntaxError(\"invalid character entity \" + ent)\n\t\t\treturn nil\n\t\t}\n\n\t\t// We must rewrite unescaped \\r and \\r\\n into \\n.\n\t\tif b == '\\r' {\n\t\t\td.buf.WriteByte('\\n')\n\t\t} else if b1 == '\\r' && b == '\\n' {\n\t\t\t// Skip \\r\\n--we already wrote \\n.\n\t\t} else {\n\t\t\td.buf.WriteByte(b)\n\t\t}\n\n\t\tb0, b1 = b1, b\n\t}\n\tdata := d.buf.Bytes()\n\tdata = data[0 : len(data)-trunc]\n\n\t// Inspect each rune for being a disallowed character.\n\tbuf := data\n\tfor len(buf) > 0 {\n\t\tr, size := utf8.DecodeRune(buf)\n\t\tif r == utf8.RuneError && size == 1 {\n\t\t\td.err = d.syntaxError(\"invalid UTF-8\")\n\t\t\treturn nil\n\t\t}\n\t\tbuf = buf[size:]\n\t\tif !isInCharacterRange(r) {\n\t\t\td.err = d.syntaxError(fmt.Sprintf(\"illegal character code %U\", r))\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn data\n}\n\n// Decide whether the given rune is in the XML Character Range, per\n// the Char production of http://www.xml.com/axml/testaxml.htm,\n// Section 2.2 Characters.\nfunc isInCharacterRange(r rune) (inrange bool) {\n\treturn r == 0x09 ||\n\t\tr == 0x0A ||\n\t\tr == 0x0D ||\n\t\tr >= 0x20 && r <= 0xDF77 ||\n\t\tr >= 0xE000 && r <= 0xFFFD ||\n\t\tr >= 0x10000 && r <= 0x10FFFF\n}\n\n// Get name space name: name with a : stuck in the middle.\n// The part before the : is the name space identifier.\nfunc (d *Decoder) nsname() (name Name, ok bool) {\n\ts, ok := d.name()\n\tif !ok {\n\t\treturn\n\t}\n\ti := strings.Index(s, \":\")\n\tif i < 0 {\n\t\tname.Local = s\n\t} else {\n\t\tname.Space = s[0:i]\n\t\tname.Local = s[i+1:]\n\t}\n\treturn name, true\n}\n\n// Get name: /first(first|second)*/\n// Do not set d.err if the name is missing (unless unexpected EOF is received):\n// let the caller provide better context.\nfunc (d *Decoder) name() (s string, ok bool) {\n\td.buf.Reset()\n\tif !d.readName() {\n\t\treturn \"\", false\n\t}\n\n\t// Now we check the characters.\n\ts = d.buf.String()\n\tif !isName([]byte(s)) {\n\t\td.err = d.syntaxError(\"invalid XML name: \" + s)\n\t\treturn \"\", false\n\t}\n\treturn s, true\n}\n\n// Read a name and append its bytes to d.buf.\n// The name is delimited by any single-byte character not valid in names.\n// All multi-byte characters are accepted; the caller must check their validity.\nfunc (d *Decoder) readName() (ok bool) {\n\tvar b byte\n\tif b, ok = d.mustgetc(); !ok {\n\t\treturn\n\t}\n\tif b < utf8.RuneSelf && !isNameByte(b) {\n\t\td.ungetc(b)\n\t\treturn false\n\t}\n\td.buf.WriteByte(b)\n\n\tfor {\n\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\treturn\n\t\t}\n\t\tif b < utf8.RuneSelf && !isNameByte(b) {\n\t\t\td.ungetc(b)\n\t\t\tbreak\n\t\t}\n\t\td.buf.WriteByte(b)\n\t}\n\treturn true\n}\n\nfunc isNameByte(c byte) bool {\n\treturn 'A' <= c && c <= 'Z' ||\n\t\t'a' <= c && c <= 'z' ||\n\t\t'0' <= c && c <= '9' ||\n\t\tc == '_' || c == ':' || c == '.' || c == '-'\n}\n\nfunc isName(s []byte) bool {\n\tif len(s) == 0 {\n\t\treturn false\n\t}\n\tc, n := utf8.DecodeRune(s)\n\tif c == utf8.RuneError && n == 1 {\n\t\treturn false\n\t}\n\tif !unicode.Is(first, c) {\n\t\treturn false\n\t}\n\tfor n < len(s) {\n\t\ts = s[n:]\n\t\tc, n = utf8.DecodeRune(s)\n\t\tif c == utf8.RuneError && n == 1 {\n\t\t\treturn false\n\t\t}\n\t\tif !unicode.Is(first, c) && !unicode.Is(second, c) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc isNameString(s string) bool {\n\tif len(s) == 0 {\n\t\treturn false\n\t}\n\tc, n := utf8.DecodeRuneInString(s)\n\tif c == utf8.RuneError && n == 1 {\n\t\treturn false\n\t}\n\tif !unicode.Is(first, c) {\n\t\treturn false\n\t}\n\tfor n < len(s) {\n\t\ts = s[n:]\n\t\tc, n = utf8.DecodeRuneInString(s)\n\t\tif c == utf8.RuneError && n == 1 {\n\t\t\treturn false\n\t\t}\n\t\tif !unicode.Is(first, c) && !unicode.Is(second, c) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// These tables were generated by cut and paste from Appendix B of\n// the XML spec at http://www.xml.com/axml/testaxml.htm\n// and then reformatting.  First corresponds to (Letter | '_' | ':')\n// and second corresponds to NameChar.\n\nvar first = &unicode.RangeTable{\n\tR16: []unicode.Range16{\n\t\t{0x003A, 0x003A, 1},\n\t\t{0x0041, 0x005A, 1},\n\t\t{0x005F, 0x005F, 1},\n\t\t{0x0061, 0x007A, 1},\n\t\t{0x00C0, 0x00D6, 1},\n\t\t{0x00D8, 0x00F6, 1},\n\t\t{0x00F8, 0x00FF, 1},\n\t\t{0x0100, 0x0131, 1},\n\t\t{0x0134, 0x013E, 1},\n\t\t{0x0141, 0x0148, 1},\n\t\t{0x014A, 0x017E, 1},\n\t\t{0x0180, 0x01C3, 1},\n\t\t{0x01CD, 0x01F0, 1},\n\t\t{0x01F4, 0x01F5, 1},\n\t\t{0x01FA, 0x0217, 1},\n\t\t{0x0250, 0x02A8, 1},\n\t\t{0x02BB, 0x02C1, 1},\n\t\t{0x0386, 0x0386, 1},\n\t\t{0x0388, 0x038A, 1},\n\t\t{0x038C, 0x038C, 1},\n\t\t{0x038E, 0x03A1, 1},\n\t\t{0x03A3, 0x03CE, 1},\n\t\t{0x03D0, 0x03D6, 1},\n\t\t{0x03DA, 0x03E0, 2},\n\t\t{0x03E2, 0x03F3, 1},\n\t\t{0x0401, 0x040C, 1},\n\t\t{0x040E, 0x044F, 1},\n\t\t{0x0451, 0x045C, 1},\n\t\t{0x045E, 0x0481, 1},\n\t\t{0x0490, 0x04C4, 1},\n\t\t{0x04C7, 0x04C8, 1},\n\t\t{0x04CB, 0x04CC, 1},\n\t\t{0x04D0, 0x04EB, 1},\n\t\t{0x04EE, 0x04F5, 1},\n\t\t{0x04F8, 0x04F9, 1},\n\t\t{0x0531, 0x0556, 1},\n\t\t{0x0559, 0x0559, 1},\n\t\t{0x0561, 0x0586, 1},\n\t\t{0x05D0, 0x05EA, 1},\n\t\t{0x05F0, 0x05F2, 1},\n\t\t{0x0621, 0x063A, 1},\n\t\t{0x0641, 0x064A, 1},\n\t\t{0x0671, 0x06B7, 1},\n\t\t{0x06BA, 0x06BE, 1},\n\t\t{0x06C0, 0x06CE, 1},\n\t\t{0x06D0, 0x06D3, 1},\n\t\t{0x06D5, 0x06D5, 1},\n\t\t{0x06E5, 0x06E6, 1},\n\t\t{0x0905, 0x0939, 1},\n\t\t{0x093D, 0x093D, 1},\n\t\t{0x0958, 0x0961, 1},\n\t\t{0x0985, 0x098C, 1},\n\t\t{0x098F, 0x0990, 1},\n\t\t{0x0993, 0x09A8, 1},\n\t\t{0x09AA, 0x09B0, 1},\n\t\t{0x09B2, 0x09B2, 1},\n\t\t{0x09B6, 0x09B9, 1},\n\t\t{0x09DC, 0x09DD, 1},\n\t\t{0x09DF, 0x09E1, 1},\n\t\t{0x09F0, 0x09F1, 1},\n\t\t{0x0A05, 0x0A0A, 1},\n\t\t{0x0A0F, 0x0A10, 1},\n\t\t{0x0A13, 0x0A28, 1},\n\t\t{0x0A2A, 0x0A30, 1},\n\t\t{0x0A32, 0x0A33, 1},\n\t\t{0x0A35, 0x0A36, 1},\n\t\t{0x0A38, 0x0A39, 1},\n\t\t{0x0A59, 0x0A5C, 1},\n\t\t{0x0A5E, 0x0A5E, 1},\n\t\t{0x0A72, 0x0A74, 1},\n\t\t{0x0A85, 0x0A8B, 1},\n\t\t{0x0A8D, 0x0A8D, 1},\n\t\t{0x0A8F, 0x0A91, 1},\n\t\t{0x0A93, 0x0AA8, 1},\n\t\t{0x0AAA, 0x0AB0, 1},\n\t\t{0x0AB2, 0x0AB3, 1},\n\t\t{0x0AB5, 0x0AB9, 1},\n\t\t{0x0ABD, 0x0AE0, 0x23},\n\t\t{0x0B05, 0x0B0C, 1},\n\t\t{0x0B0F, 0x0B10, 1},\n\t\t{0x0B13, 0x0B28, 1},\n\t\t{0x0B2A, 0x0B30, 1},\n\t\t{0x0B32, 0x0B33, 1},\n\t\t{0x0B36, 0x0B39, 1},\n\t\t{0x0B3D, 0x0B3D, 1},\n\t\t{0x0B5C, 0x0B5D, 1},\n\t\t{0x0B5F, 0x0B61, 1},\n\t\t{0x0B85, 0x0B8A, 1},\n\t\t{0x0B8E, 0x0B90, 1},\n\t\t{0x0B92, 0x0B95, 1},\n\t\t{0x0B99, 0x0B9A, 1},\n\t\t{0x0B9C, 0x0B9C, 1},\n\t\t{0x0B9E, 0x0B9F, 1},\n\t\t{0x0BA3, 0x0BA4, 1},\n\t\t{0x0BA8, 0x0BAA, 1},\n\t\t{0x0BAE, 0x0BB5, 1},\n\t\t{0x0BB7, 0x0BB9, 1},\n\t\t{0x0C05, 0x0C0C, 1},\n\t\t{0x0C0E, 0x0C10, 1},\n\t\t{0x0C12, 0x0C28, 1},\n\t\t{0x0C2A, 0x0C33, 1},\n\t\t{0x0C35, 0x0C39, 1},\n\t\t{0x0C60, 0x0C61, 1},\n\t\t{0x0C85, 0x0C8C, 1},\n\t\t{0x0C8E, 0x0C90, 1},\n\t\t{0x0C92, 0x0CA8, 1},\n\t\t{0x0CAA, 0x0CB3, 1},\n\t\t{0x0CB5, 0x0CB9, 1},\n\t\t{0x0CDE, 0x0CDE, 1},\n\t\t{0x0CE0, 0x0CE1, 1},\n\t\t{0x0D05, 0x0D0C, 1},\n\t\t{0x0D0E, 0x0D10, 1},\n\t\t{0x0D12, 0x0D28, 1},\n\t\t{0x0D2A, 0x0D39, 1},\n\t\t{0x0D60, 0x0D61, 1},\n\t\t{0x0E01, 0x0E2E, 1},\n\t\t{0x0E30, 0x0E30, 1},\n\t\t{0x0E32, 0x0E33, 1},\n\t\t{0x0E40, 0x0E45, 1},\n\t\t{0x0E81, 0x0E82, 1},\n\t\t{0x0E84, 0x0E84, 1},\n\t\t{0x0E87, 0x0E88, 1},\n\t\t{0x0E8A, 0x0E8D, 3},\n\t\t{0x0E94, 0x0E97, 1},\n\t\t{0x0E99, 0x0E9F, 1},\n\t\t{0x0EA1, 0x0EA3, 1},\n\t\t{0x0EA5, 0x0EA7, 2},\n\t\t{0x0EAA, 0x0EAB, 1},\n\t\t{0x0EAD, 0x0EAE, 1},\n\t\t{0x0EB0, 0x0EB0, 1},\n\t\t{0x0EB2, 0x0EB3, 1},\n\t\t{0x0EBD, 0x0EBD, 1},\n\t\t{0x0EC0, 0x0EC4, 1},\n\t\t{0x0F40, 0x0F47, 1},\n\t\t{0x0F49, 0x0F69, 1},\n\t\t{0x10A0, 0x10C5, 1},\n\t\t{0x10D0, 0x10F6, 1},\n\t\t{0x1100, 0x1100, 1},\n\t\t{0x1102, 0x1103, 1},\n\t\t{0x1105, 0x1107, 1},\n\t\t{0x1109, 0x1109, 1},\n\t\t{0x110B, 0x110C, 1},\n\t\t{0x110E, 0x1112, 1},\n\t\t{0x113C, 0x1140, 2},\n\t\t{0x114C, 0x1150, 2},\n\t\t{0x1154, 0x1155, 1},\n\t\t{0x1159, 0x1159, 1},\n\t\t{0x115F, 0x1161, 1},\n\t\t{0x1163, 0x1169, 2},\n\t\t{0x116D, 0x116E, 1},\n\t\t{0x1172, 0x1173, 1},\n\t\t{0x1175, 0x119E, 0x119E - 0x1175},\n\t\t{0x11A8, 0x11AB, 0x11AB - 0x11A8},\n\t\t{0x11AE, 0x11AF, 1},\n\t\t{0x11B7, 0x11B8, 1},\n\t\t{0x11BA, 0x11BA, 1},\n\t\t{0x11BC, 0x11C2, 1},\n\t\t{0x11EB, 0x11F0, 0x11F0 - 0x11EB},\n\t\t{0x11F9, 0x11F9, 1},\n\t\t{0x1E00, 0x1E9B, 1},\n\t\t{0x1EA0, 0x1EF9, 1},\n\t\t{0x1F00, 0x1F15, 1},\n\t\t{0x1F18, 0x1F1D, 1},\n\t\t{0x1F20, 0x1F45, 1},\n\t\t{0x1F48, 0x1F4D, 1},\n\t\t{0x1F50, 0x1F57, 1},\n\t\t{0x1F59, 0x1F5B, 0x1F5B - 0x1F59},\n\t\t{0x1F5D, 0x1F5D, 1},\n\t\t{0x1F5F, 0x1F7D, 1},\n\t\t{0x1F80, 0x1FB4, 1},\n\t\t{0x1FB6, 0x1FBC, 1},\n\t\t{0x1FBE, 0x1FBE, 1},\n\t\t{0x1FC2, 0x1FC4, 1},\n\t\t{0x1FC6, 0x1FCC, 1},\n\t\t{0x1FD0, 0x1FD3, 1},\n\t\t{0x1FD6, 0x1FDB, 1},\n\t\t{0x1FE0, 0x1FEC, 1},\n\t\t{0x1FF2, 0x1FF4, 1},\n\t\t{0x1FF6, 0x1FFC, 1},\n\t\t{0x2126, 0x2126, 1},\n\t\t{0x212A, 0x212B, 1},\n\t\t{0x212E, 0x212E, 1},\n\t\t{0x2180, 0x2182, 1},\n\t\t{0x3007, 0x3007, 1},\n\t\t{0x3021, 0x3029, 1},\n\t\t{0x3041, 0x3094, 1},\n\t\t{0x30A1, 0x30FA, 1},\n\t\t{0x3105, 0x312C, 1},\n\t\t{0x4E00, 0x9FA5, 1},\n\t\t{0xAC00, 0xD7A3, 1},\n\t},\n}\n\nvar second = &unicode.RangeTable{\n\tR16: []unicode.Range16{\n\t\t{0x002D, 0x002E, 1},\n\t\t{0x0030, 0x0039, 1},\n\t\t{0x00B7, 0x00B7, 1},\n\t\t{0x02D0, 0x02D1, 1},\n\t\t{0x0300, 0x0345, 1},\n\t\t{0x0360, 0x0361, 1},\n\t\t{0x0387, 0x0387, 1},\n\t\t{0x0483, 0x0486, 1},\n\t\t{0x0591, 0x05A1, 1},\n\t\t{0x05A3, 0x05B9, 1},\n\t\t{0x05BB, 0x05BD, 1},\n\t\t{0x05BF, 0x05BF, 1},\n\t\t{0x05C1, 0x05C2, 1},\n\t\t{0x05C4, 0x0640, 0x0640 - 0x05C4},\n\t\t{0x064B, 0x0652, 1},\n\t\t{0x0660, 0x0669, 1},\n\t\t{0x0670, 0x0670, 1},\n\t\t{0x06D6, 0x06DC, 1},\n\t\t{0x06DD, 0x06DF, 1},\n\t\t{0x06E0, 0x06E4, 1},\n\t\t{0x06E7, 0x06E8, 1},\n\t\t{0x06EA, 0x06ED, 1},\n\t\t{0x06F0, 0x06F9, 1},\n\t\t{0x0901, 0x0903, 1},\n\t\t{0x093C, 0x093C, 1},\n\t\t{0x093E, 0x094C, 1},\n\t\t{0x094D, 0x094D, 1},\n\t\t{0x0951, 0x0954, 1},\n\t\t{0x0962, 0x0963, 1},\n\t\t{0x0966, 0x096F, 1},\n\t\t{0x0981, 0x0983, 1},\n\t\t{0x09BC, 0x09BC, 1},\n\t\t{0x09BE, 0x09BF, 1},\n\t\t{0x09C0, 0x09C4, 1},\n\t\t{0x09C7, 0x09C8, 1},\n\t\t{0x09CB, 0x09CD, 1},\n\t\t{0x09D7, 0x09D7, 1},\n\t\t{0x09E2, 0x09E3, 1},\n\t\t{0x09E6, 0x09EF, 1},\n\t\t{0x0A02, 0x0A3C, 0x3A},\n\t\t{0x0A3E, 0x0A3F, 1},\n\t\t{0x0A40, 0x0A42, 1},\n\t\t{0x0A47, 0x0A48, 1},\n\t\t{0x0A4B, 0x0A4D, 1},\n\t\t{0x0A66, 0x0A6F, 1},\n\t\t{0x0A70, 0x0A71, 1},\n\t\t{0x0A81, 0x0A83, 1},\n\t\t{0x0ABC, 0x0ABC, 1},\n\t\t{0x0ABE, 0x0AC5, 1},\n\t\t{0x0AC7, 0x0AC9, 1},\n\t\t{0x0ACB, 0x0ACD, 1},\n\t\t{0x0AE6, 0x0AEF, 1},\n\t\t{0x0B01, 0x0B03, 1},\n\t\t{0x0B3C, 0x0B3C, 1},\n\t\t{0x0B3E, 0x0B43, 1},\n\t\t{0x0B47, 0x0B48, 1},\n\t\t{0x0B4B, 0x0B4D, 1},\n\t\t{0x0B56, 0x0B57, 1},\n\t\t{0x0B66, 0x0B6F, 1},\n\t\t{0x0B82, 0x0B83, 1},\n\t\t{0x0BBE, 0x0BC2, 1},\n\t\t{0x0BC6, 0x0BC8, 1},\n\t\t{0x0BCA, 0x0BCD, 1},\n\t\t{0x0BD7, 0x0BD7, 1},\n\t\t{0x0BE7, 0x0BEF, 1},\n\t\t{0x0C01, 0x0C03, 1},\n\t\t{0x0C3E, 0x0C44, 1},\n\t\t{0x0C46, 0x0C48, 1},\n\t\t{0x0C4A, 0x0C4D, 1},\n\t\t{0x0C55, 0x0C56, 1},\n\t\t{0x0C66, 0x0C6F, 1},\n\t\t{0x0C82, 0x0C83, 1},\n\t\t{0x0CBE, 0x0CC4, 1},\n\t\t{0x0CC6, 0x0CC8, 1},\n\t\t{0x0CCA, 0x0CCD, 1},\n\t\t{0x0CD5, 0x0CD6, 1},\n\t\t{0x0CE6, 0x0CEF, 1},\n\t\t{0x0D02, 0x0D03, 1},\n\t\t{0x0D3E, 0x0D43, 1},\n\t\t{0x0D46, 0x0D48, 1},\n\t\t{0x0D4A, 0x0D4D, 1},\n\t\t{0x0D57, 0x0D57, 1},\n\t\t{0x0D66, 0x0D6F, 1},\n\t\t{0x0E31, 0x0E31, 1},\n\t\t{0x0E34, 0x0E3A, 1},\n\t\t{0x0E46, 0x0E46, 1},\n\t\t{0x0E47, 0x0E4E, 1},\n\t\t{0x0E50, 0x0E59, 1},\n\t\t{0x0EB1, 0x0EB1, 1},\n\t\t{0x0EB4, 0x0EB9, 1},\n\t\t{0x0EBB, 0x0EBC, 1},\n\t\t{0x0EC6, 0x0EC6, 1},\n\t\t{0x0EC8, 0x0ECD, 1},\n\t\t{0x0ED0, 0x0ED9, 1},\n\t\t{0x0F18, 0x0F19, 1},\n\t\t{0x0F20, 0x0F29, 1},\n\t\t{0x0F35, 0x0F39, 2},\n\t\t{0x0F3E, 0x0F3F, 1},\n\t\t{0x0F71, 0x0F84, 1},\n\t\t{0x0F86, 0x0F8B, 1},\n\t\t{0x0F90, 0x0F95, 1},\n\t\t{0x0F97, 0x0F97, 1},\n\t\t{0x0F99, 0x0FAD, 1},\n\t\t{0x0FB1, 0x0FB7, 1},\n\t\t{0x0FB9, 0x0FB9, 1},\n\t\t{0x20D0, 0x20DC, 1},\n\t\t{0x20E1, 0x3005, 0x3005 - 0x20E1},\n\t\t{0x302A, 0x302F, 1},\n\t\t{0x3031, 0x3035, 1},\n\t\t{0x3099, 0x309A, 1},\n\t\t{0x309D, 0x309E, 1},\n\t\t{0x30FC, 0x30FE, 1},\n\t},\n}\n\n// HTMLEntity is an entity map containing translations for the\n// standard HTML entity characters.\nvar HTMLEntity = htmlEntity\n\nvar htmlEntity = map[string]string{\n\t/*\n\t\thget http://www.w3.org/TR/html4/sgml/entities.html |\n\t\tssam '\n\t\t\t,y /\\&gt;/ x/\\&lt;(.|\\n)+/ s/\\n/ /g\n\t\t\t,x v/^\\&lt;!ENTITY/d\n\t\t\t,s/\\&lt;!ENTITY ([^ ]+) .*U\\+([0-9A-F][0-9A-F][0-9A-F][0-9A-F]) .+/\t\"\\1\": \"\\\\u\\2\",/g\n\t\t'\n\t*/\n\t\"nbsp\":     \"\\u00A0\",\n\t\"iexcl\":    \"\\u00A1\",\n\t\"cent\":     \"\\u00A2\",\n\t\"pound\":    \"\\u00A3\",\n\t\"curren\":   \"\\u00A4\",\n\t\"yen\":      \"\\u00A5\",\n\t\"brvbar\":   \"\\u00A6\",\n\t\"sect\":     \"\\u00A7\",\n\t\"uml\":      \"\\u00A8\",\n\t\"copy\":     \"\\u00A9\",\n\t\"ordf\":     \"\\u00AA\",\n\t\"laquo\":    \"\\u00AB\",\n\t\"not\":      \"\\u00AC\",\n\t\"shy\":      \"\\u00AD\",\n\t\"reg\":      \"\\u00AE\",\n\t\"macr\":     \"\\u00AF\",\n\t\"deg\":      \"\\u00B0\",\n\t\"plusmn\":   \"\\u00B1\",\n\t\"sup2\":     \"\\u00B2\",\n\t\"sup3\":     \"\\u00B3\",\n\t\"acute\":    \"\\u00B4\",\n\t\"micro\":    \"\\u00B5\",\n\t\"para\":     \"\\u00B6\",\n\t\"middot\":   \"\\u00B7\",\n\t\"cedil\":    \"\\u00B8\",\n\t\"sup1\":     \"\\u00B9\",\n\t\"ordm\":     \"\\u00BA\",\n\t\"raquo\":    \"\\u00BB\",\n\t\"frac14\":   \"\\u00BC\",\n\t\"frac12\":   \"\\u00BD\",\n\t\"frac34\":   \"\\u00BE\",\n\t\"iquest\":   \"\\u00BF\",\n\t\"Agrave\":   \"\\u00C0\",\n\t\"Aacute\":   \"\\u00C1\",\n\t\"Acirc\":    \"\\u00C2\",\n\t\"Atilde\":   \"\\u00C3\",\n\t\"Auml\":     \"\\u00C4\",\n\t\"Aring\":    \"\\u00C5\",\n\t\"AElig\":    \"\\u00C6\",\n\t\"Ccedil\":   \"\\u00C7\",\n\t\"Egrave\":   \"\\u00C8\",\n\t\"Eacute\":   \"\\u00C9\",\n\t\"Ecirc\":    \"\\u00CA\",\n\t\"Euml\":     \"\\u00CB\",\n\t\"Igrave\":   \"\\u00CC\",\n\t\"Iacute\":   \"\\u00CD\",\n\t\"Icirc\":    \"\\u00CE\",\n\t\"Iuml\":     \"\\u00CF\",\n\t\"ETH\":      \"\\u00D0\",\n\t\"Ntilde\":   \"\\u00D1\",\n\t\"Ograve\":   \"\\u00D2\",\n\t\"Oacute\":   \"\\u00D3\",\n\t\"Ocirc\":    \"\\u00D4\",\n\t\"Otilde\":   \"\\u00D5\",\n\t\"Ouml\":     \"\\u00D6\",\n\t\"times\":    \"\\u00D7\",\n\t\"Oslash\":   \"\\u00D8\",\n\t\"Ugrave\":   \"\\u00D9\",\n\t\"Uacute\":   \"\\u00DA\",\n\t\"Ucirc\":    \"\\u00DB\",\n\t\"Uuml\":     \"\\u00DC\",\n\t\"Yacute\":   \"\\u00DD\",\n\t\"THORN\":    \"\\u00DE\",\n\t\"szlig\":    \"\\u00DF\",\n\t\"agrave\":   \"\\u00E0\",\n\t\"aacute\":   \"\\u00E1\",\n\t\"acirc\":    \"\\u00E2\",\n\t\"atilde\":   \"\\u00E3\",\n\t\"auml\":     \"\\u00E4\",\n\t\"aring\":    \"\\u00E5\",\n\t\"aelig\":    \"\\u00E6\",\n\t\"ccedil\":   \"\\u00E7\",\n\t\"egrave\":   \"\\u00E8\",\n\t\"eacute\":   \"\\u00E9\",\n\t\"ecirc\":    \"\\u00EA\",\n\t\"euml\":     \"\\u00EB\",\n\t\"igrave\":   \"\\u00EC\",\n\t\"iacute\":   \"\\u00ED\",\n\t\"icirc\":    \"\\u00EE\",\n\t\"iuml\":     \"\\u00EF\",\n\t\"eth\":      \"\\u00F0\",\n\t\"ntilde\":   \"\\u00F1\",\n\t\"ograve\":   \"\\u00F2\",\n\t\"oacute\":   \"\\u00F3\",\n\t\"ocirc\":    \"\\u00F4\",\n\t\"otilde\":   \"\\u00F5\",\n\t\"ouml\":     \"\\u00F6\",\n\t\"divide\":   \"\\u00F7\",\n\t\"oslash\":   \"\\u00F8\",\n\t\"ugrave\":   \"\\u00F9\",\n\t\"uacute\":   \"\\u00FA\",\n\t\"ucirc\":    \"\\u00FB\",\n\t\"uuml\":     \"\\u00FC\",\n\t\"yacute\":   \"\\u00FD\",\n\t\"thorn\":    \"\\u00FE\",\n\t\"yuml\":     \"\\u00FF\",\n\t\"fnof\":     \"\\u0192\",\n\t\"Alpha\":    \"\\u0391\",\n\t\"Beta\":     \"\\u0392\",\n\t\"Gamma\":    \"\\u0393\",\n\t\"Delta\":    \"\\u0394\",\n\t\"Epsilon\":  \"\\u0395\",\n\t\"Zeta\":     \"\\u0396\",\n\t\"Eta\":      \"\\u0397\",\n\t\"Theta\":    \"\\u0398\",\n\t\"Iota\":     \"\\u0399\",\n\t\"Kappa\":    \"\\u039A\",\n\t\"Lambda\":   \"\\u039B\",\n\t\"Mu\":       \"\\u039C\",\n\t\"Nu\":       \"\\u039D\",\n\t\"Xi\":       \"\\u039E\",\n\t\"Omicron\":  \"\\u039F\",\n\t\"Pi\":       \"\\u03A0\",\n\t\"Rho\":      \"\\u03A1\",\n\t\"Sigma\":    \"\\u03A3\",\n\t\"Tau\":      \"\\u03A4\",\n\t\"Upsilon\":  \"\\u03A5\",\n\t\"Phi\":      \"\\u03A6\",\n\t\"Chi\":      \"\\u03A7\",\n\t\"Psi\":      \"\\u03A8\",\n\t\"Omega\":    \"\\u03A9\",\n\t\"alpha\":    \"\\u03B1\",\n\t\"beta\":     \"\\u03B2\",\n\t\"gamma\":    \"\\u03B3\",\n\t\"delta\":    \"\\u03B4\",\n\t\"epsilon\":  \"\\u03B5\",\n\t\"zeta\":     \"\\u03B6\",\n\t\"eta\":      \"\\u03B7\",\n\t\"theta\":    \"\\u03B8\",\n\t\"iota\":     \"\\u03B9\",\n\t\"kappa\":    \"\\u03BA\",\n\t\"lambda\":   \"\\u03BB\",\n\t\"mu\":       \"\\u03BC\",\n\t\"nu\":       \"\\u03BD\",\n\t\"xi\":       \"\\u03BE\",\n\t\"omicron\":  \"\\u03BF\",\n\t\"pi\":       \"\\u03C0\",\n\t\"rho\":      \"\\u03C1\",\n\t\"sigmaf\":   \"\\u03C2\",\n\t\"sigma\":    \"\\u03C3\",\n\t\"tau\":      \"\\u03C4\",\n\t\"upsilon\":  \"\\u03C5\",\n\t\"phi\":      \"\\u03C6\",\n\t\"chi\":      \"\\u03C7\",\n\t\"psi\":      \"\\u03C8\",\n\t\"omega\":    \"\\u03C9\",\n\t\"thetasym\": \"\\u03D1\",\n\t\"upsih\":    \"\\u03D2\",\n\t\"piv\":      \"\\u03D6\",\n\t\"bull\":     \"\\u2022\",\n\t\"hellip\":   \"\\u2026\",\n\t\"prime\":    \"\\u2032\",\n\t\"Prime\":    \"\\u2033\",\n\t\"oline\":    \"\\u203E\",\n\t\"frasl\":    \"\\u2044\",\n\t\"weierp\":   \"\\u2118\",\n\t\"image\":    \"\\u2111\",\n\t\"real\":     \"\\u211C\",\n\t\"trade\":    \"\\u2122\",\n\t\"alefsym\":  \"\\u2135\",\n\t\"larr\":     \"\\u2190\",\n\t\"uarr\":     \"\\u2191\",\n\t\"rarr\":     \"\\u2192\",\n\t\"darr\":     \"\\u2193\",\n\t\"harr\":     \"\\u2194\",\n\t\"crarr\":    \"\\u21B5\",\n\t\"lArr\":     \"\\u21D0\",\n\t\"uArr\":     \"\\u21D1\",\n\t\"rArr\":     \"\\u21D2\",\n\t\"dArr\":     \"\\u21D3\",\n\t\"hArr\":     \"\\u21D4\",\n\t\"forall\":   \"\\u2200\",\n\t\"part\":     \"\\u2202\",\n\t\"exist\":    \"\\u2203\",\n\t\"empty\":    \"\\u2205\",\n\t\"nabla\":    \"\\u2207\",\n\t\"isin\":     \"\\u2208\",\n\t\"notin\":    \"\\u2209\",\n\t\"ni\":       \"\\u220B\",\n\t\"prod\":     \"\\u220F\",\n\t\"sum\":      \"\\u2211\",\n\t\"minus\":    \"\\u2212\",\n\t\"lowast\":   \"\\u2217\",\n\t\"radic\":    \"\\u221A\",\n\t\"prop\":     \"\\u221D\",\n\t\"infin\":    \"\\u221E\",\n\t\"ang\":      \"\\u2220\",\n\t\"and\":      \"\\u2227\",\n\t\"or\":       \"\\u2228\",\n\t\"cap\":      \"\\u2229\",\n\t\"cup\":      \"\\u222A\",\n\t\"int\":      \"\\u222B\",\n\t\"there4\":   \"\\u2234\",\n\t\"sim\":      \"\\u223C\",\n\t\"cong\":     \"\\u2245\",\n\t\"asymp\":    \"\\u2248\",\n\t\"ne\":       \"\\u2260\",\n\t\"equiv\":    \"\\u2261\",\n\t\"le\":       \"\\u2264\",\n\t\"ge\":       \"\\u2265\",\n\t\"sub\":      \"\\u2282\",\n\t\"sup\":      \"\\u2283\",\n\t\"nsub\":     \"\\u2284\",\n\t\"sube\":     \"\\u2286\",\n\t\"supe\":     \"\\u2287\",\n\t\"oplus\":    \"\\u2295\",\n\t\"otimes\":   \"\\u2297\",\n\t\"perp\":     \"\\u22A5\",\n\t\"sdot\":     \"\\u22C5\",\n\t\"lceil\":    \"\\u2308\",\n\t\"rceil\":    \"\\u2309\",\n\t\"lfloor\":   \"\\u230A\",\n\t\"rfloor\":   \"\\u230B\",\n\t\"lang\":     \"\\u2329\",\n\t\"rang\":     \"\\u232A\",\n\t\"loz\":      \"\\u25CA\",\n\t\"spades\":   \"\\u2660\",\n\t\"clubs\":    \"\\u2663\",\n\t\"hearts\":   \"\\u2665\",\n\t\"diams\":    \"\\u2666\",\n\t\"quot\":     \"\\u0022\",\n\t\"amp\":      \"\\u0026\",\n\t\"lt\":       \"\\u003C\",\n\t\"gt\":       \"\\u003E\",\n\t\"OElig\":    \"\\u0152\",\n\t\"oelig\":    \"\\u0153\",\n\t\"Scaron\":   \"\\u0160\",\n\t\"scaron\":   \"\\u0161\",\n\t\"Yuml\":     \"\\u0178\",\n\t\"circ\":     \"\\u02C6\",\n\t\"tilde\":    \"\\u02DC\",\n\t\"ensp\":     \"\\u2002\",\n\t\"emsp\":     \"\\u2003\",\n\t\"thinsp\":   \"\\u2009\",\n\t\"zwnj\":     \"\\u200C\",\n\t\"zwj\":      \"\\u200D\",\n\t\"lrm\":      \"\\u200E\",\n\t\"rlm\":      \"\\u200F\",\n\t\"ndash\":    \"\\u2013\",\n\t\"mdash\":    \"\\u2014\",\n\t\"lsquo\":    \"\\u2018\",\n\t\"rsquo\":    \"\\u2019\",\n\t\"sbquo\":    \"\\u201A\",\n\t\"ldquo\":    \"\\u201C\",\n\t\"rdquo\":    \"\\u201D\",\n\t\"bdquo\":    \"\\u201E\",\n\t\"dagger\":   \"\\u2020\",\n\t\"Dagger\":   \"\\u2021\",\n\t\"permil\":   \"\\u2030\",\n\t\"lsaquo\":   \"\\u2039\",\n\t\"rsaquo\":   \"\\u203A\",\n\t\"euro\":     \"\\u20AC\",\n}\n\n// HTMLAutoClose is the set of HTML elements that\n// should be considered to close automatically.\nvar HTMLAutoClose = htmlAutoClose\n\nvar htmlAutoClose = []string{\n\t/*\n\t\thget http://www.w3.org/TR/html4/loose.dtd |\n\t\t9 sed -n 's/<!ELEMENT ([^ ]*) +- O EMPTY.+/\t\"\\1\",/p' | tr A-Z a-z\n\t*/\n\t\"basefont\",\n\t\"br\",\n\t\"area\",\n\t\"link\",\n\t\"img\",\n\t\"param\",\n\t\"hr\",\n\t\"input\",\n\t\"col\",\n\t\"frame\",\n\t\"isindex\",\n\t\"base\",\n\t\"meta\",\n}\n\nvar (\n\tesc_quot = []byte(\"&#34;\") // shorter than \"&quot;\"\n\tesc_apos = []byte(\"&#39;\") // shorter than \"&apos;\"\n\tesc_amp  = []byte(\"&amp;\")\n\tesc_lt   = []byte(\"&lt;\")\n\tesc_gt   = []byte(\"&gt;\")\n\tesc_tab  = []byte(\"&#x9;\")\n\tesc_nl   = []byte(\"&#xA;\")\n\tesc_cr   = []byte(\"&#xD;\")\n\tesc_fffd = []byte(\"\\uFFFD\") // Unicode replacement character\n)\n\n// EscapeText writes to w the properly escaped XML equivalent\n// of the plain text data s.\nfunc EscapeText(w io.Writer, s []byte) error {\n\tvar esc []byte\n\tlast := 0\n\tfor i := 0; i < len(s); {\n\t\tr, width := utf8.DecodeRune(s[i:])\n\t\ti += width\n\t\tswitch r {\n\t\tcase '\"':\n\t\t\tesc = esc_quot\n\t\tcase '\\'':\n\t\t\tesc = esc_apos\n\t\tcase '&':\n\t\t\tesc = esc_amp\n\t\tcase '<':\n\t\t\tesc = esc_lt\n\t\tcase '>':\n\t\t\tesc = esc_gt\n\t\tcase '\\t':\n\t\t\tesc = esc_tab\n\t\tcase '\\n':\n\t\t\tesc = esc_nl\n\t\tcase '\\r':\n\t\t\tesc = esc_cr\n\t\tdefault:\n\t\t\tif !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {\n\t\t\t\tesc = esc_fffd\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif _, err := w.Write(s[last : i-width]); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := w.Write(esc); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlast = i\n\t}\n\tif _, err := w.Write(s[last:]); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// EscapeString writes to p the properly escaped XML equivalent\n// of the plain text data s.\nfunc (p *printer) EscapeString(s string) {\n\tvar esc []byte\n\tlast := 0\n\tfor i := 0; i < len(s); {\n\t\tr, width := utf8.DecodeRuneInString(s[i:])\n\t\ti += width\n\t\tswitch r {\n\t\tcase '\"':\n\t\t\tesc = esc_quot\n\t\tcase '\\'':\n\t\t\tesc = esc_apos\n\t\tcase '&':\n\t\t\tesc = esc_amp\n\t\tcase '<':\n\t\t\tesc = esc_lt\n\t\tcase '>':\n\t\t\tesc = esc_gt\n\t\tcase '\\t':\n\t\t\tesc = esc_tab\n\t\tcase '\\n':\n\t\t\tesc = esc_nl\n\t\tcase '\\r':\n\t\t\tesc = esc_cr\n\t\tdefault:\n\t\t\tif !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {\n\t\t\t\tesc = esc_fffd\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tp.WriteString(s[last : i-width])\n\t\tp.Write(esc)\n\t\tlast = i\n\t}\n\tp.WriteString(s[last:])\n}\n\n// Escape is like EscapeText but omits the error return value.\n// It is provided for backwards compatibility with Go 1.0.\n// Code targeting Go 1.1 or later should use EscapeText.\nfunc Escape(w io.Writer, s []byte) {\n\tEscapeText(w, s)\n}\n\n// procInstEncoding parses the `encoding=\"...\"` or `encoding='...'`\n// value out of the provided string, returning \"\" if not found.\nfunc procInstEncoding(s string) string {\n\t// TODO: this parsing is somewhat lame and not exact.\n\t// It works for all actual cases, though.\n\tidx := strings.Index(s, \"encoding=\")\n\tif idx == -1 {\n\t\treturn \"\"\n\t}\n\tv := s[idx+len(\"encoding=\"):]\n\tif v == \"\" {\n\t\treturn \"\"\n\t}\n\tif v[0] != '\\'' && v[0] != '\"' {\n\t\treturn \"\"\n\t}\n\tidx = strings.IndexRune(v[1:], rune(v[0]))\n\tif idx == -1 {\n\t\treturn \"\"\n\t}\n\treturn v[1 : idx+1]\n}\n"
  },
  {
    "path": "vendor/github.com/vmware/govmomi/vim25/xml/xml_test.go",
    "content": "// Copyright 2009 The Go Authors.  All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage xml\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"unicode/utf8\"\n)\n\nconst testInput = `\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n  \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n<body xmlns:foo=\"ns1\" xmlns=\"ns2\" xmlns:tag=\"ns3\" ` +\n\t\"\\r\\n\\t\" + `  >\n  <hello lang=\"en\">World &lt;&gt;&apos;&quot; &#x767d;&#40300;翔</hello>\n  <query>&何; &is-it;</query>\n  <goodbye />\n  <outer foo:attr=\"value\" xmlns:tag=\"ns4\">\n    <inner/>\n  </outer>\n  <tag:name>\n    <![CDATA[Some text here.]]>\n  </tag:name>\n</body><!-- missing final newline -->`\n\nvar testEntity = map[string]string{\"何\": \"What\", \"is-it\": \"is it?\"}\n\nvar rawTokens = []Token{\n\tCharData(\"\\n\"),\n\tProcInst{\"xml\", []byte(`version=\"1.0\" encoding=\"UTF-8\"`)},\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n  \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\"`),\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"body\"}, []Attr{{Name{\"xmlns\", \"foo\"}, \"ns1\"}, {Name{\"\", \"xmlns\"}, \"ns2\"}, {Name{\"xmlns\", \"tag\"}, \"ns3\"}}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"\", \"hello\"}, []Attr{{Name{\"\", \"lang\"}, \"en\"}}},\n\tCharData(\"World <>'\\\" 白鵬翔\"),\n\tEndElement{Name{\"\", \"hello\"}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"\", \"query\"}, []Attr{}},\n\tCharData(\"What is it?\"),\n\tEndElement{Name{\"\", \"query\"}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"\", \"goodbye\"}, []Attr{}},\n\tEndElement{Name{\"\", \"goodbye\"}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"\", \"outer\"}, []Attr{{Name{\"foo\", \"attr\"}, \"value\"}, {Name{\"xmlns\", \"tag\"}, \"ns4\"}}},\n\tCharData(\"\\n    \"),\n\tStartElement{Name{\"\", \"inner\"}, []Attr{}},\n\tEndElement{Name{\"\", \"inner\"}},\n\tCharData(\"\\n  \"),\n\tEndElement{Name{\"\", \"outer\"}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"tag\", \"name\"}, []Attr{}},\n\tCharData(\"\\n    \"),\n\tCharData(\"Some text here.\"),\n\tCharData(\"\\n  \"),\n\tEndElement{Name{\"tag\", \"name\"}},\n\tCharData(\"\\n\"),\n\tEndElement{Name{\"\", \"body\"}},\n\tComment(\" missing final newline \"),\n}\n\nvar cookedTokens = []Token{\n\tCharData(\"\\n\"),\n\tProcInst{\"xml\", []byte(`version=\"1.0\" encoding=\"UTF-8\"`)},\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n  \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\"`),\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"ns2\", \"body\"}, []Attr{{Name{\"xmlns\", \"foo\"}, \"ns1\"}, {Name{\"\", \"xmlns\"}, \"ns2\"}, {Name{\"xmlns\", \"tag\"}, \"ns3\"}}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"ns2\", \"hello\"}, []Attr{{Name{\"\", \"lang\"}, \"en\"}}},\n\tCharData(\"World <>'\\\" 白鵬翔\"),\n\tEndElement{Name{\"ns2\", \"hello\"}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"ns2\", \"query\"}, []Attr{}},\n\tCharData(\"What is it?\"),\n\tEndElement{Name{\"ns2\", \"query\"}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"ns2\", \"goodbye\"}, []Attr{}},\n\tEndElement{Name{\"ns2\", \"goodbye\"}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"ns2\", \"outer\"}, []Attr{{Name{\"ns1\", \"attr\"}, \"value\"}, {Name{\"xmlns\", \"tag\"}, \"ns4\"}}},\n\tCharData(\"\\n    \"),\n\tStartElement{Name{\"ns2\", \"inner\"}, []Attr{}},\n\tEndElement{Name{\"ns2\", \"inner\"}},\n\tCharData(\"\\n  \"),\n\tEndElement{Name{\"ns2\", \"outer\"}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"ns3\", \"name\"}, []Attr{}},\n\tCharData(\"\\n    \"),\n\tCharData(\"Some text here.\"),\n\tCharData(\"\\n  \"),\n\tEndElement{Name{\"ns3\", \"name\"}},\n\tCharData(\"\\n\"),\n\tEndElement{Name{\"ns2\", \"body\"}},\n\tComment(\" missing final newline \"),\n}\n\nconst testInputAltEncoding = `\n<?xml version=\"1.0\" encoding=\"x-testing-uppercase\"?>\n<TAG>VALUE</TAG>`\n\nvar rawTokensAltEncoding = []Token{\n\tCharData(\"\\n\"),\n\tProcInst{\"xml\", []byte(`version=\"1.0\" encoding=\"x-testing-uppercase\"`)},\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"tag\"}, []Attr{}},\n\tCharData(\"value\"),\n\tEndElement{Name{\"\", \"tag\"}},\n}\n\nvar xmlInput = []string{\n\t// unexpected EOF cases\n\t\"<\",\n\t\"<t\",\n\t\"<t \",\n\t\"<t/\",\n\t\"<!\",\n\t\"<!-\",\n\t\"<!--\",\n\t\"<!--c-\",\n\t\"<!--c--\",\n\t\"<!d\",\n\t\"<t></\",\n\t\"<t></t\",\n\t\"<?\",\n\t\"<?p\",\n\t\"<t a\",\n\t\"<t a=\",\n\t\"<t a='\",\n\t\"<t a=''\",\n\t\"<t/><![\",\n\t\"<t/><![C\",\n\t\"<t/><![CDATA[d\",\n\t\"<t/><![CDATA[d]\",\n\t\"<t/><![CDATA[d]]\",\n\n\t// other Syntax errors\n\t\"<>\",\n\t\"<t/a\",\n\t\"<0 />\",\n\t\"<?0 >\",\n\t//\t\"<!0 >\",\t// let the Token() caller handle\n\t\"</0>\",\n\t\"<t 0=''>\",\n\t\"<t a='&'>\",\n\t\"<t a='<'>\",\n\t\"<t>&nbspc;</t>\",\n\t\"<t a>\",\n\t\"<t a=>\",\n\t\"<t a=v>\",\n\t//\t\"<![CDATA[d]]>\",\t// let the Token() caller handle\n\t\"<t></e>\",\n\t\"<t></>\",\n\t\"<t></t!\",\n\t\"<t>cdata]]></t>\",\n}\n\nfunc TestRawToken(t *testing.T) {\n\td := NewDecoder(strings.NewReader(testInput))\n\td.Entity = testEntity\n\ttestRawToken(t, d, rawTokens)\n}\n\nconst nonStrictInput = `\n<tag>non&entity</tag>\n<tag>&unknown;entity</tag>\n<tag>&#123</tag>\n<tag>&#zzz;</tag>\n<tag>&なまえ3;</tag>\n<tag>&lt-gt;</tag>\n<tag>&;</tag>\n<tag>&0a;</tag>\n`\n\nvar nonStringEntity = map[string]string{\"\": \"oops!\", \"0a\": \"oops!\"}\n\nvar nonStrictTokens = []Token{\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"tag\"}, []Attr{}},\n\tCharData(\"non&entity\"),\n\tEndElement{Name{\"\", \"tag\"}},\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"tag\"}, []Attr{}},\n\tCharData(\"&unknown;entity\"),\n\tEndElement{Name{\"\", \"tag\"}},\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"tag\"}, []Attr{}},\n\tCharData(\"&#123\"),\n\tEndElement{Name{\"\", \"tag\"}},\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"tag\"}, []Attr{}},\n\tCharData(\"&#zzz;\"),\n\tEndElement{Name{\"\", \"tag\"}},\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"tag\"}, []Attr{}},\n\tCharData(\"&なまえ3;\"),\n\tEndElement{Name{\"\", \"tag\"}},\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"tag\"}, []Attr{}},\n\tCharData(\"&lt-gt;\"),\n\tEndElement{Name{\"\", \"tag\"}},\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"tag\"}, []Attr{}},\n\tCharData(\"&;\"),\n\tEndElement{Name{\"\", \"tag\"}},\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"tag\"}, []Attr{}},\n\tCharData(\"&0a;\"),\n\tEndElement{Name{\"\", \"tag\"}},\n\tCharData(\"\\n\"),\n}\n\nfunc TestNonStrictRawToken(t *testing.T) {\n\td := NewDecoder(strings.NewReader(nonStrictInput))\n\td.Strict = false\n\ttestRawToken(t, d, nonStrictTokens)\n}\n\ntype downCaser struct {\n\tt *testing.T\n\tr io.ByteReader\n}\n\nfunc (d *downCaser) ReadByte() (c byte, err error) {\n\tc, err = d.r.ReadByte()\n\tif c >= 'A' && c <= 'Z' {\n\t\tc += 'a' - 'A'\n\t}\n\treturn\n}\n\nfunc (d *downCaser) Read(p []byte) (int, error) {\n\td.t.Fatalf(\"unexpected Read call on downCaser reader\")\n\tpanic(\"unreachable\")\n}\n\nfunc TestRawTokenAltEncoding(t *testing.T) {\n\td := NewDecoder(strings.NewReader(testInputAltEncoding))\n\td.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) {\n\t\tif charset != \"x-testing-uppercase\" {\n\t\t\tt.Fatalf(\"unexpected charset %q\", charset)\n\t\t}\n\t\treturn &downCaser{t, input.(io.ByteReader)}, nil\n\t}\n\ttestRawToken(t, d, rawTokensAltEncoding)\n}\n\nfunc TestRawTokenAltEncodingNoConverter(t *testing.T) {\n\td := NewDecoder(strings.NewReader(testInputAltEncoding))\n\ttoken, err := d.RawToken()\n\tif token == nil {\n\t\tt.Fatalf(\"expected a token on first RawToken call\")\n\t}\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttoken, err = d.RawToken()\n\tif token != nil {\n\t\tt.Errorf(\"expected a nil token; got %#v\", token)\n\t}\n\tif err == nil {\n\t\tt.Fatalf(\"expected an error on second RawToken call\")\n\t}\n\tconst encoding = \"x-testing-uppercase\"\n\tif !strings.Contains(err.Error(), encoding) {\n\t\tt.Errorf(\"expected error to contain %q; got error: %v\",\n\t\t\tencoding, err)\n\t}\n}\n\nfunc testRawToken(t *testing.T, d *Decoder, rawTokens []Token) {\n\tfor i, want := range rawTokens {\n\t\thave, err := d.RawToken()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"token %d: unexpected error: %s\", i, err)\n\t\t}\n\t\tif !reflect.DeepEqual(have, want) {\n\t\t\tvar shave, swant string\n\t\t\tif _, ok := have.(CharData); ok {\n\t\t\t\tshave = fmt.Sprintf(\"CharData(%q)\", have)\n\t\t\t} else {\n\t\t\t\tshave = fmt.Sprintf(\"%#v\", have)\n\t\t\t}\n\t\t\tif _, ok := want.(CharData); ok {\n\t\t\t\tswant = fmt.Sprintf(\"CharData(%q)\", want)\n\t\t\t} else {\n\t\t\t\tswant = fmt.Sprintf(\"%#v\", want)\n\t\t\t}\n\t\t\tt.Errorf(\"token %d = %s, want %s\", i, shave, swant)\n\t\t}\n\t}\n}\n\n// Ensure that directives (specifically !DOCTYPE) include the complete\n// text of any nested directives, noting that < and > do not change\n// nesting depth if they are in single or double quotes.\n\nvar nestedDirectivesInput = `\n<!DOCTYPE [<!ENTITY rdf \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\">]>\n<!DOCTYPE [<!ENTITY xlt \">\">]>\n<!DOCTYPE [<!ENTITY xlt \"<\">]>\n<!DOCTYPE [<!ENTITY xlt '>'>]>\n<!DOCTYPE [<!ENTITY xlt '<'>]>\n<!DOCTYPE [<!ENTITY xlt '\">'>]>\n<!DOCTYPE [<!ENTITY xlt \"'<\">]>\n`\n\nvar nestedDirectivesTokens = []Token{\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE [<!ENTITY rdf \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\">]`),\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE [<!ENTITY xlt \">\">]`),\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE [<!ENTITY xlt \"<\">]`),\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE [<!ENTITY xlt '>'>]`),\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE [<!ENTITY xlt '<'>]`),\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE [<!ENTITY xlt '\">'>]`),\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE [<!ENTITY xlt \"'<\">]`),\n\tCharData(\"\\n\"),\n}\n\nfunc TestNestedDirectives(t *testing.T) {\n\td := NewDecoder(strings.NewReader(nestedDirectivesInput))\n\n\tfor i, want := range nestedDirectivesTokens {\n\t\thave, err := d.Token()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"token %d: unexpected error: %s\", i, err)\n\t\t}\n\t\tif !reflect.DeepEqual(have, want) {\n\t\t\tt.Errorf(\"token %d = %#v want %#v\", i, have, want)\n\t\t}\n\t}\n}\n\nfunc TestToken(t *testing.T) {\n\td := NewDecoder(strings.NewReader(testInput))\n\td.Entity = testEntity\n\n\tfor i, want := range cookedTokens {\n\t\thave, err := d.Token()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"token %d: unexpected error: %s\", i, err)\n\t\t}\n\t\tif !reflect.DeepEqual(have, want) {\n\t\t\tt.Errorf(\"token %d = %#v want %#v\", i, have, want)\n\t\t}\n\t}\n}\n\nfunc TestSyntax(t *testing.T) {\n\tfor i := range xmlInput {\n\t\td := NewDecoder(strings.NewReader(xmlInput[i]))\n\t\tvar err error\n\t\tfor _, err = d.Token(); err == nil; _, err = d.Token() {\n\t\t}\n\t\tif _, ok := err.(*SyntaxError); !ok {\n\t\t\tt.Fatalf(`xmlInput \"%s\": expected SyntaxError not received`, xmlInput[i])\n\t\t}\n\t}\n}\n\ntype allScalars struct {\n\tTrue1     bool\n\tTrue2     bool\n\tFalse1    bool\n\tFalse2    bool\n\tInt       int\n\tInt8      int8\n\tInt16     int16\n\tInt32     int32\n\tInt64     int64\n\tUint      int\n\tUint8     uint8\n\tUint16    uint16\n\tUint32    uint32\n\tUint64    uint64\n\tUintptr   uintptr\n\tFloat32   float32\n\tFloat64   float64\n\tString    string\n\tPtrString *string\n}\n\nvar all = allScalars{\n\tTrue1:     true,\n\tTrue2:     true,\n\tFalse1:    false,\n\tFalse2:    false,\n\tInt:       1,\n\tInt8:      -2,\n\tInt16:     3,\n\tInt32:     -4,\n\tInt64:     5,\n\tUint:      6,\n\tUint8:     7,\n\tUint16:    8,\n\tUint32:    9,\n\tUint64:    10,\n\tUintptr:   11,\n\tFloat32:   13.0,\n\tFloat64:   14.0,\n\tString:    \"15\",\n\tPtrString: &sixteen,\n}\n\nvar sixteen = \"16\"\n\nconst testScalarsInput = `<allscalars>\n\t<True1>true</True1>\n\t<True2>1</True2>\n\t<False1>false</False1>\n\t<False2>0</False2>\n\t<Int>1</Int>\n\t<Int8>-2</Int8>\n\t<Int16>3</Int16>\n\t<Int32>-4</Int32>\n\t<Int64>5</Int64>\n\t<Uint>6</Uint>\n\t<Uint8>7</Uint8>\n\t<Uint16>8</Uint16>\n\t<Uint32>9</Uint32>\n\t<Uint64>10</Uint64>\n\t<Uintptr>11</Uintptr>\n\t<Float>12.0</Float>\n\t<Float32>13.0</Float32>\n\t<Float64>14.0</Float64>\n\t<String>15</String>\n\t<PtrString>16</PtrString>\n</allscalars>`\n\nfunc TestAllScalars(t *testing.T) {\n\tvar a allScalars\n\terr := Unmarshal([]byte(testScalarsInput), &a)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !reflect.DeepEqual(a, all) {\n\t\tt.Errorf(\"have %+v want %+v\", a, all)\n\t}\n}\n\ntype item struct {\n\tField_a string\n}\n\nfunc TestIssue569(t *testing.T) {\n\tdata := `<item><Field_a>abcd</Field_a></item>`\n\tvar i item\n\terr := Unmarshal([]byte(data), &i)\n\n\tif err != nil || i.Field_a != \"abcd\" {\n\t\tt.Fatal(\"Expecting abcd\")\n\t}\n}\n\nfunc TestUnquotedAttrs(t *testing.T) {\n\tdata := \"<tag attr=azAZ09:-_\\t>\"\n\td := NewDecoder(strings.NewReader(data))\n\td.Strict = false\n\ttoken, err := d.Token()\n\tif _, ok := err.(*SyntaxError); ok {\n\t\tt.Errorf(\"Unexpected error: %v\", err)\n\t}\n\tif token.(StartElement).Name.Local != \"tag\" {\n\t\tt.Errorf(\"Unexpected tag name: %v\", token.(StartElement).Name.Local)\n\t}\n\tattr := token.(StartElement).Attr[0]\n\tif attr.Value != \"azAZ09:-_\" {\n\t\tt.Errorf(\"Unexpected attribute value: %v\", attr.Value)\n\t}\n\tif attr.Name.Local != \"attr\" {\n\t\tt.Errorf(\"Unexpected attribute name: %v\", attr.Name.Local)\n\t}\n}\n\nfunc TestValuelessAttrs(t *testing.T) {\n\ttests := [][3]string{\n\t\t{\"<p nowrap>\", \"p\", \"nowrap\"},\n\t\t{\"<p nowrap >\", \"p\", \"nowrap\"},\n\t\t{\"<input checked/>\", \"input\", \"checked\"},\n\t\t{\"<input checked />\", \"input\", \"checked\"},\n\t}\n\tfor _, test := range tests {\n\t\td := NewDecoder(strings.NewReader(test[0]))\n\t\td.Strict = false\n\t\ttoken, err := d.Token()\n\t\tif _, ok := err.(*SyntaxError); ok {\n\t\t\tt.Errorf(\"Unexpected error: %v\", err)\n\t\t}\n\t\tif token.(StartElement).Name.Local != test[1] {\n\t\t\tt.Errorf(\"Unexpected tag name: %v\", token.(StartElement).Name.Local)\n\t\t}\n\t\tattr := token.(StartElement).Attr[0]\n\t\tif attr.Value != test[2] {\n\t\t\tt.Errorf(\"Unexpected attribute value: %v\", attr.Value)\n\t\t}\n\t\tif attr.Name.Local != test[2] {\n\t\t\tt.Errorf(\"Unexpected attribute name: %v\", attr.Name.Local)\n\t\t}\n\t}\n}\n\nfunc TestCopyTokenCharData(t *testing.T) {\n\tdata := []byte(\"same data\")\n\tvar tok1 Token = CharData(data)\n\ttok2 := CopyToken(tok1)\n\tif !reflect.DeepEqual(tok1, tok2) {\n\t\tt.Error(\"CopyToken(CharData) != CharData\")\n\t}\n\tdata[1] = 'o'\n\tif reflect.DeepEqual(tok1, tok2) {\n\t\tt.Error(\"CopyToken(CharData) uses same buffer.\")\n\t}\n}\n\nfunc TestCopyTokenStartElement(t *testing.T) {\n\telt := StartElement{Name{\"\", \"hello\"}, []Attr{{Name{\"\", \"lang\"}, \"en\"}}}\n\tvar tok1 Token = elt\n\ttok2 := CopyToken(tok1)\n\tif tok1.(StartElement).Attr[0].Value != \"en\" {\n\t\tt.Error(\"CopyToken overwrote Attr[0]\")\n\t}\n\tif !reflect.DeepEqual(tok1, tok2) {\n\t\tt.Error(\"CopyToken(StartElement) != StartElement\")\n\t}\n\ttok1.(StartElement).Attr[0] = Attr{Name{\"\", \"lang\"}, \"de\"}\n\tif reflect.DeepEqual(tok1, tok2) {\n\t\tt.Error(\"CopyToken(CharData) uses same buffer.\")\n\t}\n}\n\nfunc TestSyntaxErrorLineNum(t *testing.T) {\n\ttestInput := \"<P>Foo<P>\\n\\n<P>Bar</>\\n\"\n\td := NewDecoder(strings.NewReader(testInput))\n\tvar err error\n\tfor _, err = d.Token(); err == nil; _, err = d.Token() {\n\t}\n\tsynerr, ok := err.(*SyntaxError)\n\tif !ok {\n\t\tt.Error(\"Expected SyntaxError.\")\n\t}\n\tif synerr.Line != 3 {\n\t\tt.Error(\"SyntaxError didn't have correct line number.\")\n\t}\n}\n\nfunc TestTrailingRawToken(t *testing.T) {\n\tinput := `<FOO></FOO>  `\n\td := NewDecoder(strings.NewReader(input))\n\tvar err error\n\tfor _, err = d.RawToken(); err == nil; _, err = d.RawToken() {\n\t}\n\tif err != io.EOF {\n\t\tt.Fatalf(\"d.RawToken() = _, %v, want _, io.EOF\", err)\n\t}\n}\n\nfunc TestTrailingToken(t *testing.T) {\n\tinput := `<FOO></FOO>  `\n\td := NewDecoder(strings.NewReader(input))\n\tvar err error\n\tfor _, err = d.Token(); err == nil; _, err = d.Token() {\n\t}\n\tif err != io.EOF {\n\t\tt.Fatalf(\"d.Token() = _, %v, want _, io.EOF\", err)\n\t}\n}\n\nfunc TestEntityInsideCDATA(t *testing.T) {\n\tinput := `<test><![CDATA[ &val=foo ]]></test>`\n\td := NewDecoder(strings.NewReader(input))\n\tvar err error\n\tfor _, err = d.Token(); err == nil; _, err = d.Token() {\n\t}\n\tif err != io.EOF {\n\t\tt.Fatalf(\"d.Token() = _, %v, want _, io.EOF\", err)\n\t}\n}\n\nvar characterTests = []struct {\n\tin  string\n\terr string\n}{\n\t{\"\\x12<doc/>\", \"illegal character code U+0012\"},\n\t{\"<?xml version=\\\"1.0\\\"?>\\x0b<doc/>\", \"illegal character code U+000B\"},\n\t{\"\\xef\\xbf\\xbe<doc/>\", \"illegal character code U+FFFE\"},\n\t{\"<?xml version=\\\"1.0\\\"?><doc>\\r\\n<hiya/>\\x07<toots/></doc>\", \"illegal character code U+0007\"},\n\t{\"<?xml version=\\\"1.0\\\"?><doc \\x12='value'>what's up</doc>\", \"expected attribute name in element\"},\n\t{\"<doc>&abc\\x01;</doc>\", \"invalid character entity &abc (no semicolon)\"},\n\t{\"<doc>&\\x01;</doc>\", \"invalid character entity & (no semicolon)\"},\n\t{\"<doc>&\\xef\\xbf\\xbe;</doc>\", \"invalid character entity &\\uFFFE;\"},\n\t{\"<doc>&hello;</doc>\", \"invalid character entity &hello;\"},\n}\n\nfunc TestDisallowedCharacters(t *testing.T) {\n\n\tfor i, tt := range characterTests {\n\t\td := NewDecoder(strings.NewReader(tt.in))\n\t\tvar err error\n\n\t\tfor err == nil {\n\t\t\t_, err = d.Token()\n\t\t}\n\t\tsynerr, ok := err.(*SyntaxError)\n\t\tif !ok {\n\t\t\tt.Fatalf(\"input %d d.Token() = _, %v, want _, *SyntaxError\", i, err)\n\t\t}\n\t\tif synerr.Msg != tt.err {\n\t\t\tt.Fatalf(\"input %d synerr.Msg wrong: want %q, got %q\", i, tt.err, synerr.Msg)\n\t\t}\n\t}\n}\n\ntype procInstEncodingTest struct {\n\texpect, got string\n}\n\nvar procInstTests = []struct {\n\tinput, expect string\n}{\n\t{`version=\"1.0\" encoding=\"utf-8\"`, \"utf-8\"},\n\t{`version=\"1.0\" encoding='utf-8'`, \"utf-8\"},\n\t{`version=\"1.0\" encoding='utf-8' `, \"utf-8\"},\n\t{`version=\"1.0\" encoding=utf-8`, \"\"},\n\t{`encoding=\"FOO\" `, \"FOO\"},\n}\n\nfunc TestProcInstEncoding(t *testing.T) {\n\tfor _, test := range procInstTests {\n\t\tgot := procInstEncoding(test.input)\n\t\tif got != test.expect {\n\t\t\tt.Errorf(\"procInstEncoding(%q) = %q; want %q\", test.input, got, test.expect)\n\t\t}\n\t}\n}\n\n// Ensure that directives with comments include the complete\n// text of any nested directives.\n\nvar directivesWithCommentsInput = `\n<!DOCTYPE [<!-- a comment --><!ENTITY rdf \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\">]>\n<!DOCTYPE [<!ENTITY go \"Golang\"><!-- a comment-->]>\n<!DOCTYPE <!-> <!> <!----> <!-->--> <!--->--> [<!ENTITY go \"Golang\"><!-- a comment-->]>\n`\n\nvar directivesWithCommentsTokens = []Token{\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE [<!ENTITY rdf \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\">]`),\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE [<!ENTITY go \"Golang\">]`),\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE <!-> <!>    [<!ENTITY go \"Golang\">]`),\n\tCharData(\"\\n\"),\n}\n\nfunc TestDirectivesWithComments(t *testing.T) {\n\td := NewDecoder(strings.NewReader(directivesWithCommentsInput))\n\n\tfor i, want := range directivesWithCommentsTokens {\n\t\thave, err := d.Token()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"token %d: unexpected error: %s\", i, err)\n\t\t}\n\t\tif !reflect.DeepEqual(have, want) {\n\t\t\tt.Errorf(\"token %d = %#v want %#v\", i, have, want)\n\t\t}\n\t}\n}\n\n// Writer whose Write method always returns an error.\ntype errWriter struct{}\n\nfunc (errWriter) Write(p []byte) (n int, err error) { return 0, fmt.Errorf(\"unwritable\") }\n\nfunc TestEscapeTextIOErrors(t *testing.T) {\n\texpectErr := \"unwritable\"\n\terr := EscapeText(errWriter{}, []byte{'A'})\n\n\tif err == nil || err.Error() != expectErr {\n\t\tt.Errorf(\"have %v, want %v\", err, expectErr)\n\t}\n}\n\nfunc TestEscapeTextInvalidChar(t *testing.T) {\n\tinput := []byte(\"A \\x00 terminated string.\")\n\texpected := \"A \\uFFFD terminated string.\"\n\n\tbuff := new(bytes.Buffer)\n\tif err := EscapeText(buff, input); err != nil {\n\t\tt.Fatalf(\"have %v, want nil\", err)\n\t}\n\ttext := buff.String()\n\n\tif text != expected {\n\t\tt.Errorf(\"have %v, want %v\", text, expected)\n\t}\n}\n\nfunc TestIssue5880(t *testing.T) {\n\ttype T []byte\n\tdata, err := Marshal(T{192, 168, 0, 1})\n\tif err != nil {\n\t\tt.Errorf(\"Marshal error: %v\", err)\n\t}\n\tif !utf8.Valid(data) {\n\t\tt.Errorf(\"Marshal generated invalid UTF-8: %x\", data)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/.gitattributes",
    "content": "# Treat all files in this repo as binary, with no git magic updating\n# line endings. Windows users contributing to Go will need to use a\n# modern version of git and editors capable of LF line endings.\n#\n# We'll prevent accidental CRLF line endings from entering the repo\n# via the git-review gofmt checks.\n#\n# See golang.org/issue/9281\n\n* -text\n"
  },
  {
    "path": "vendor/golang.org/x/net/.gitignore",
    "content": "# Add no patterns to .hgignore except for files generated by the build.\nlast-change\n"
  },
  {
    "path": "vendor/golang.org/x/net/AUTHORS",
    "content": "# This source code refers to The Go Authors for copyright purposes.\n# The master list of authors is in the main Go distribution,\n# visible at http://tip.golang.org/AUTHORS.\n"
  },
  {
    "path": "vendor/golang.org/x/net/CONTRIBUTING.md",
    "content": "# Contributing to Go\n\nGo is an open source project.\n\nIt is the work of hundreds of contributors. We appreciate your help!\n\n\n## Filing issues\n\nWhen [filing an issue](https://golang.org/issue/new), make sure to answer these five questions:\n\n1. What version of Go are you using (`go version`)?\n2. What operating system and processor architecture are you using?\n3. What did you do?\n4. What did you expect to see?\n5. What did you see instead?\n\nGeneral questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.\nThe gophers there will answer or ask you to file an issue if you've tripped over a bug.\n\n## Contributing code\n\nPlease read the [Contribution Guidelines](https://golang.org/doc/contribute.html)\nbefore sending patches.\n\n**We do not accept GitHub pull requests**\n(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).\n\nUnless otherwise noted, the Go source files are distributed under\nthe BSD-style license found in the LICENSE file.\n\n"
  },
  {
    "path": "vendor/golang.org/x/net/CONTRIBUTORS",
    "content": "# This source code was written by the Go contributors.\n# The master list of contributors is in the main Go distribution,\n# visible at http://tip.golang.org/CONTRIBUTORS.\n"
  },
  {
    "path": "vendor/golang.org/x/net/LICENSE",
    "content": "Copyright (c) 2009 The Go Authors. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n   * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n   * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n   * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "vendor/golang.org/x/net/PATENTS",
    "content": "Additional IP Rights Grant (Patents)\n\n\"This implementation\" means the copyrightable works distributed by\nGoogle as part of the Go project.\n\nGoogle hereby grants to You a perpetual, worldwide, non-exclusive,\nno-charge, royalty-free, irrevocable (except as stated in this section)\npatent license to make, have made, use, offer to sell, sell, import,\ntransfer and otherwise run, modify and propagate the contents of this\nimplementation of Go, where such license applies only to those patent\nclaims, both currently owned or controlled by Google and acquired in\nthe future, licensable by Google that are necessarily infringed by this\nimplementation of Go.  This grant does not include claims that would be\ninfringed only as a consequence of further modification of this\nimplementation.  If you or your agent or exclusive licensee institute or\norder or agree to the institution of patent litigation against any\nentity (including a cross-claim or counterclaim in a lawsuit) alleging\nthat this implementation of Go or any code incorporated within this\nimplementation of Go constitutes direct or contributory patent\ninfringement, or inducement of patent infringement, then any patent\nrights granted to you under this License for this implementation of Go\nshall terminate as of the date such litigation is filed.\n"
  },
  {
    "path": "vendor/golang.org/x/net/README.md",
    "content": "# Go Networking\n\nThis repository holds supplementary Go networking libraries.\n\n## Download/Install\n\nThe easiest way to install is to run `go get -u golang.org/x/net`. You can\nalso manually git clone the repository to `$GOPATH/src/golang.org/x/net`.\n\n## Report Issues / Send Patches\n\nThis repository uses Gerrit for code changes. To learn how to submit\nchanges to this repository, see https://golang.org/doc/contribute.html.\nThe main issue tracker for the net repository is located at\nhttps://github.com/golang/go/issues. Prefix your issue with \"x/net:\" in the\nsubject line, so it is easy to find.\n"
  },
  {
    "path": "vendor/golang.org/x/net/bpf/asm.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage bpf\n\nimport \"fmt\"\n\n// Assemble converts insts into raw instructions suitable for loading\n// into a BPF virtual machine.\n//\n// Currently, no optimization is attempted, the assembled program flow\n// is exactly as provided.\nfunc Assemble(insts []Instruction) ([]RawInstruction, error) {\n\tret := make([]RawInstruction, len(insts))\n\tvar err error\n\tfor i, inst := range insts {\n\t\tret[i], err = inst.Assemble()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"assembling instruction %d: %s\", i+1, err)\n\t\t}\n\t}\n\treturn ret, nil\n}\n\n// Disassemble attempts to parse raw back into\n// Instructions. Unrecognized RawInstructions are assumed to be an\n// extension not implemented by this package, and are passed through\n// unchanged to the output. The allDecoded value reports whether insts\n// contains no RawInstructions.\nfunc Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) {\n\tinsts = make([]Instruction, len(raw))\n\tallDecoded = true\n\tfor i, r := range raw {\n\t\tinsts[i] = r.Disassemble()\n\t\tif _, ok := insts[i].(RawInstruction); ok {\n\t\t\tallDecoded = false\n\t\t}\n\t}\n\treturn insts, allDecoded\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/bpf/constants.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage bpf\n\n// A Register is a register of the BPF virtual machine.\ntype Register uint16\n\nconst (\n\t// RegA is the accumulator register. RegA is always the\n\t// destination register of ALU operations.\n\tRegA Register = iota\n\t// RegX is the indirection register, used by LoadIndirect\n\t// operations.\n\tRegX\n)\n\n// An ALUOp is an arithmetic or logic operation.\ntype ALUOp uint16\n\n// ALU binary operation types.\nconst (\n\tALUOpAdd ALUOp = iota << 4\n\tALUOpSub\n\tALUOpMul\n\tALUOpDiv\n\tALUOpOr\n\tALUOpAnd\n\tALUOpShiftLeft\n\tALUOpShiftRight\n\taluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type.\n\tALUOpMod\n\tALUOpXor\n)\n\n// A JumpTest is a comparison operator used in conditional jumps.\ntype JumpTest uint16\n\n// Supported operators for conditional jumps.\nconst (\n\t// K == A\n\tJumpEqual JumpTest = iota\n\t// K != A\n\tJumpNotEqual\n\t// K > A\n\tJumpGreaterThan\n\t// K < A\n\tJumpLessThan\n\t// K >= A\n\tJumpGreaterOrEqual\n\t// K <= A\n\tJumpLessOrEqual\n\t// K & A != 0\n\tJumpBitsSet\n\t// K & A == 0\n\tJumpBitsNotSet\n)\n\n// An Extension is a function call provided by the kernel that\n// performs advanced operations that are expensive or impossible\n// within the BPF virtual machine.\n//\n// Extensions are only implemented by the Linux kernel.\n//\n// TODO: should we prune this list? Some of these extensions seem\n// either broken or near-impossible to use correctly, whereas other\n// (len, random, ifindex) are quite useful.\ntype Extension int\n\n// Extension functions available in the Linux kernel.\nconst (\n\t// extOffset is the negative maximum number of instructions used\n\t// to load instructions by overloading the K argument.\n\textOffset = -0x1000\n\t// ExtLen returns the length of the packet.\n\tExtLen Extension = 1\n\t// ExtProto returns the packet's L3 protocol type.\n\tExtProto Extension = 0\n\t// ExtType returns the packet's type (skb->pkt_type in the kernel)\n\t//\n\t// TODO: better documentation. How nice an API do we want to\n\t// provide for these esoteric extensions?\n\tExtType Extension = 4\n\t// ExtPayloadOffset returns the offset of the packet payload, or\n\t// the first protocol header that the kernel does not know how to\n\t// parse.\n\tExtPayloadOffset Extension = 52\n\t// ExtInterfaceIndex returns the index of the interface on which\n\t// the packet was received.\n\tExtInterfaceIndex Extension = 8\n\t// ExtNetlinkAttr returns the netlink attribute of type X at\n\t// offset A.\n\tExtNetlinkAttr Extension = 12\n\t// ExtNetlinkAttrNested returns the nested netlink attribute of\n\t// type X at offset A.\n\tExtNetlinkAttrNested Extension = 16\n\t// ExtMark returns the packet's mark value.\n\tExtMark Extension = 20\n\t// ExtQueue returns the packet's assigned hardware queue.\n\tExtQueue Extension = 24\n\t// ExtLinkLayerType returns the packet's hardware address type\n\t// (e.g. Ethernet, Infiniband).\n\tExtLinkLayerType Extension = 28\n\t// ExtRXHash returns the packets receive hash.\n\t//\n\t// TODO: figure out what this rxhash actually is.\n\tExtRXHash Extension = 32\n\t// ExtCPUID returns the ID of the CPU processing the current\n\t// packet.\n\tExtCPUID Extension = 36\n\t// ExtVLANTag returns the packet's VLAN tag.\n\tExtVLANTag Extension = 44\n\t// ExtVLANTagPresent returns non-zero if the packet has a VLAN\n\t// tag.\n\t//\n\t// TODO: I think this might be a lie: it reads bit 0x1000 of the\n\t// VLAN header, which changed meaning in recent revisions of the\n\t// spec - this extension may now return meaningless information.\n\tExtVLANTagPresent Extension = 48\n\t// ExtVLANProto returns 0x8100 if the frame has a VLAN header,\n\t// 0x88a8 if the frame has a \"Q-in-Q\" double VLAN header, or some\n\t// other value if no VLAN information is present.\n\tExtVLANProto Extension = 60\n\t// ExtRand returns a uniformly random uint32.\n\tExtRand Extension = 56\n)\n\n// The following gives names to various bit patterns used in opcode construction.\n\nconst (\n\topMaskCls uint16 = 0x7\n\t// opClsLoad masks\n\topMaskLoadDest  = 0x01\n\topMaskLoadWidth = 0x18\n\topMaskLoadMode  = 0xe0\n\t// opClsALU\n\topMaskOperandSrc = 0x08\n\topMaskOperator   = 0xf0\n\t// opClsJump\n\topMaskJumpConst = 0x0f\n\topMaskJumpCond  = 0xf0\n)\n\nconst (\n\t// +---------------+-----------------+---+---+---+\n\t// | AddrMode (3b) | LoadWidth (2b)  | 0 | 0 | 0 |\n\t// +---------------+-----------------+---+---+---+\n\topClsLoadA uint16 = iota\n\t// +---------------+-----------------+---+---+---+\n\t// | AddrMode (3b) | LoadWidth (2b)  | 0 | 0 | 1 |\n\t// +---------------+-----------------+---+---+---+\n\topClsLoadX\n\t// +---+---+---+---+---+---+---+---+\n\t// | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |\n\t// +---+---+---+---+---+---+---+---+\n\topClsStoreA\n\t// +---+---+---+---+---+---+---+---+\n\t// | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |\n\t// +---+---+---+---+---+---+---+---+\n\topClsStoreX\n\t// +---------------+-----------------+---+---+---+\n\t// | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 |\n\t// +---------------+-----------------+---+---+---+\n\topClsALU\n\t// +-----------------------------+---+---+---+---+\n\t// |      TestOperator (4b)      | 0 | 1 | 0 | 1 |\n\t// +-----------------------------+---+---+---+---+\n\topClsJump\n\t// +---+-------------------------+---+---+---+---+\n\t// | 0 | 0 | 0 |   RetSrc (1b)   | 0 | 1 | 1 | 0 |\n\t// +---+-------------------------+---+---+---+---+\n\topClsReturn\n\t// +---+-------------------------+---+---+---+---+\n\t// | 0 | 0 | 0 |  TXAorTAX (1b)  | 0 | 1 | 1 | 1 |\n\t// +---+-------------------------+---+---+---+---+\n\topClsMisc\n)\n\nconst (\n\topAddrModeImmediate uint16 = iota << 5\n\topAddrModeAbsolute\n\topAddrModeIndirect\n\topAddrModeScratch\n\topAddrModePacketLen // actually an extension, not an addressing mode.\n\topAddrModeMemShift\n)\n\nconst (\n\topLoadWidth4 uint16 = iota << 3\n\topLoadWidth2\n\topLoadWidth1\n)\n\n// Operator defined by ALUOp*\n\nconst (\n\topALUSrcConstant uint16 = iota << 3\n\topALUSrcX\n)\n\nconst (\n\topJumpAlways = iota << 4\n\topJumpEqual\n\topJumpGT\n\topJumpGE\n\topJumpSet\n)\n\nconst (\n\topRetSrcConstant uint16 = iota << 4\n\topRetSrcA\n)\n\nconst (\n\topMiscTAX = 0x00\n\topMiscTXA = 0x80\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/bpf/doc.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n/*\n\nPackage bpf implements marshaling and unmarshaling of programs for the\nBerkeley Packet Filter virtual machine, and provides a Go implementation\nof the virtual machine.\n\nBPF's main use is to specify a packet filter for network taps, so that\nthe kernel doesn't have to expensively copy every packet it sees to\nuserspace. However, it's been repurposed to other areas where running\nuser code in-kernel is needed. For example, Linux's seccomp uses BPF\nto apply security policies to system calls. For simplicity, this\ndocumentation refers only to packets, but other uses of BPF have their\nown data payloads.\n\nBPF programs run in a restricted virtual machine. It has almost no\naccess to kernel functions, and while conditional branches are\nallowed, they can only jump forwards, to guarantee that there are no\ninfinite loops.\n\nThe virtual machine\n\nThe BPF VM is an accumulator machine. Its main register, called\nregister A, is an implicit source and destination in all arithmetic\nand logic operations. The machine also has 16 scratch registers for\ntemporary storage, and an indirection register (register X) for\nindirect memory access. All registers are 32 bits wide.\n\nEach run of a BPF program is given one packet, which is placed in the\nVM's read-only \"main memory\". LoadAbsolute and LoadIndirect\ninstructions can fetch up to 32 bits at a time into register A for\nexamination.\n\nThe goal of a BPF program is to produce and return a verdict (uint32),\nwhich tells the kernel what to do with the packet. In the context of\npacket filtering, the returned value is the number of bytes of the\npacket to forward to userspace, or 0 to ignore the packet. Other\ncontexts like seccomp define their own return values.\n\nIn order to simplify programs, attempts to read past the end of the\npacket terminate the program execution with a verdict of 0 (ignore\npacket). This means that the vast majority of BPF programs don't need\nto do any explicit bounds checking.\n\nIn addition to the bytes of the packet, some BPF programs have access\nto extensions, which are essentially calls to kernel utility\nfunctions. Currently, the only extensions supported by this package\nare the Linux packet filter extensions.\n\nExamples\n\nThis packet filter selects all ARP packets.\n\n\tbpf.Assemble([]bpf.Instruction{\n\t\t// Load \"EtherType\" field from the ethernet header.\n\t\tbpf.LoadAbsolute{Off: 12, Size: 2},\n\t\t// Skip over the next instruction if EtherType is not ARP.\n\t\tbpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1},\n\t\t// Verdict is \"send up to 4k of the packet to userspace.\"\n\t\tbpf.RetConstant{Val: 4096},\n\t\t// Verdict is \"ignore packet.\"\n\t\tbpf.RetConstant{Val: 0},\n\t})\n\nThis packet filter captures a random 1% sample of traffic.\n\n\tbpf.Assemble([]bpf.Instruction{\n\t\t// Get a 32-bit random number from the Linux kernel.\n\t\tbpf.LoadExtension{Num: bpf.ExtRand},\n\t\t// 1% dice roll?\n\t\tbpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1},\n\t\t// Capture.\n\t\tbpf.RetConstant{Val: 4096},\n\t\t// Ignore.\n\t\tbpf.RetConstant{Val: 0},\n\t})\n\n*/\npackage bpf // import \"golang.org/x/net/bpf\"\n"
  },
  {
    "path": "vendor/golang.org/x/net/bpf/instructions.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage bpf\n\nimport \"fmt\"\n\n// An Instruction is one instruction executed by the BPF virtual\n// machine.\ntype Instruction interface {\n\t// Assemble assembles the Instruction into a RawInstruction.\n\tAssemble() (RawInstruction, error)\n}\n\n// A RawInstruction is a raw BPF virtual machine instruction.\ntype RawInstruction struct {\n\t// Operation to execute.\n\tOp uint16\n\t// For conditional jump instructions, the number of instructions\n\t// to skip if the condition is true/false.\n\tJt uint8\n\tJf uint8\n\t// Constant parameter. The meaning depends on the Op.\n\tK uint32\n}\n\n// Assemble implements the Instruction Assemble method.\nfunc (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil }\n\n// Disassemble parses ri into an Instruction and returns it. If ri is\n// not recognized by this package, ri itself is returned.\nfunc (ri RawInstruction) Disassemble() Instruction {\n\tswitch ri.Op & opMaskCls {\n\tcase opClsLoadA, opClsLoadX:\n\t\treg := Register(ri.Op & opMaskLoadDest)\n\t\tsz := 0\n\t\tswitch ri.Op & opMaskLoadWidth {\n\t\tcase opLoadWidth4:\n\t\t\tsz = 4\n\t\tcase opLoadWidth2:\n\t\t\tsz = 2\n\t\tcase opLoadWidth1:\n\t\t\tsz = 1\n\t\tdefault:\n\t\t\treturn ri\n\t\t}\n\t\tswitch ri.Op & opMaskLoadMode {\n\t\tcase opAddrModeImmediate:\n\t\t\tif sz != 4 {\n\t\t\t\treturn ri\n\t\t\t}\n\t\t\treturn LoadConstant{Dst: reg, Val: ri.K}\n\t\tcase opAddrModeScratch:\n\t\t\tif sz != 4 || ri.K > 15 {\n\t\t\t\treturn ri\n\t\t\t}\n\t\t\treturn LoadScratch{Dst: reg, N: int(ri.K)}\n\t\tcase opAddrModeAbsolute:\n\t\t\tif ri.K > extOffset+0xffffffff {\n\t\t\t\treturn LoadExtension{Num: Extension(-extOffset + ri.K)}\n\t\t\t}\n\t\t\treturn LoadAbsolute{Size: sz, Off: ri.K}\n\t\tcase opAddrModeIndirect:\n\t\t\treturn LoadIndirect{Size: sz, Off: ri.K}\n\t\tcase opAddrModePacketLen:\n\t\t\tif sz != 4 {\n\t\t\t\treturn ri\n\t\t\t}\n\t\t\treturn LoadExtension{Num: ExtLen}\n\t\tcase opAddrModeMemShift:\n\t\t\treturn LoadMemShift{Off: ri.K}\n\t\tdefault:\n\t\t\treturn ri\n\t\t}\n\n\tcase opClsStoreA:\n\t\tif ri.Op != opClsStoreA || ri.K > 15 {\n\t\t\treturn ri\n\t\t}\n\t\treturn StoreScratch{Src: RegA, N: int(ri.K)}\n\n\tcase opClsStoreX:\n\t\tif ri.Op != opClsStoreX || ri.K > 15 {\n\t\t\treturn ri\n\t\t}\n\t\treturn StoreScratch{Src: RegX, N: int(ri.K)}\n\n\tcase opClsALU:\n\t\tswitch op := ALUOp(ri.Op & opMaskOperator); op {\n\t\tcase ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor:\n\t\t\tif ri.Op&opMaskOperandSrc != 0 {\n\t\t\t\treturn ALUOpX{Op: op}\n\t\t\t}\n\t\t\treturn ALUOpConstant{Op: op, Val: ri.K}\n\t\tcase aluOpNeg:\n\t\t\treturn NegateA{}\n\t\tdefault:\n\t\t\treturn ri\n\t\t}\n\n\tcase opClsJump:\n\t\tif ri.Op&opMaskJumpConst != opClsJump {\n\t\t\treturn ri\n\t\t}\n\t\tswitch ri.Op & opMaskJumpCond {\n\t\tcase opJumpAlways:\n\t\t\treturn Jump{Skip: ri.K}\n\t\tcase opJumpEqual:\n\t\t\tif ri.Jt == 0 {\n\t\t\t\treturn JumpIf{\n\t\t\t\t\tCond:      JumpNotEqual,\n\t\t\t\t\tVal:       ri.K,\n\t\t\t\t\tSkipTrue:  ri.Jf,\n\t\t\t\t\tSkipFalse: 0,\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn JumpIf{\n\t\t\t\tCond:      JumpEqual,\n\t\t\t\tVal:       ri.K,\n\t\t\t\tSkipTrue:  ri.Jt,\n\t\t\t\tSkipFalse: ri.Jf,\n\t\t\t}\n\t\tcase opJumpGT:\n\t\t\tif ri.Jt == 0 {\n\t\t\t\treturn JumpIf{\n\t\t\t\t\tCond:      JumpLessOrEqual,\n\t\t\t\t\tVal:       ri.K,\n\t\t\t\t\tSkipTrue:  ri.Jf,\n\t\t\t\t\tSkipFalse: 0,\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn JumpIf{\n\t\t\t\tCond:      JumpGreaterThan,\n\t\t\t\tVal:       ri.K,\n\t\t\t\tSkipTrue:  ri.Jt,\n\t\t\t\tSkipFalse: ri.Jf,\n\t\t\t}\n\t\tcase opJumpGE:\n\t\t\tif ri.Jt == 0 {\n\t\t\t\treturn JumpIf{\n\t\t\t\t\tCond:      JumpLessThan,\n\t\t\t\t\tVal:       ri.K,\n\t\t\t\t\tSkipTrue:  ri.Jf,\n\t\t\t\t\tSkipFalse: 0,\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn JumpIf{\n\t\t\t\tCond:      JumpGreaterOrEqual,\n\t\t\t\tVal:       ri.K,\n\t\t\t\tSkipTrue:  ri.Jt,\n\t\t\t\tSkipFalse: ri.Jf,\n\t\t\t}\n\t\tcase opJumpSet:\n\t\t\treturn JumpIf{\n\t\t\t\tCond:      JumpBitsSet,\n\t\t\t\tVal:       ri.K,\n\t\t\t\tSkipTrue:  ri.Jt,\n\t\t\t\tSkipFalse: ri.Jf,\n\t\t\t}\n\t\tdefault:\n\t\t\treturn ri\n\t\t}\n\n\tcase opClsReturn:\n\t\tswitch ri.Op {\n\t\tcase opClsReturn | opRetSrcA:\n\t\t\treturn RetA{}\n\t\tcase opClsReturn | opRetSrcConstant:\n\t\t\treturn RetConstant{Val: ri.K}\n\t\tdefault:\n\t\t\treturn ri\n\t\t}\n\n\tcase opClsMisc:\n\t\tswitch ri.Op {\n\t\tcase opClsMisc | opMiscTAX:\n\t\t\treturn TAX{}\n\t\tcase opClsMisc | opMiscTXA:\n\t\t\treturn TXA{}\n\t\tdefault:\n\t\t\treturn ri\n\t\t}\n\n\tdefault:\n\t\tpanic(\"unreachable\") // switch is exhaustive on the bit pattern\n\t}\n}\n\n// LoadConstant loads Val into register Dst.\ntype LoadConstant struct {\n\tDst Register\n\tVal uint32\n}\n\n// Assemble implements the Instruction Assemble method.\nfunc (a LoadConstant) Assemble() (RawInstruction, error) {\n\treturn assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val)\n}\n\n// String returns the the instruction in assembler notation.\nfunc (a LoadConstant) String() string {\n\tswitch a.Dst {\n\tcase RegA:\n\t\treturn fmt.Sprintf(\"ld #%d\", a.Val)\n\tcase RegX:\n\t\treturn fmt.Sprintf(\"ldx #%d\", a.Val)\n\tdefault:\n\t\treturn fmt.Sprintf(\"unknown instruction: %#v\", a)\n\t}\n}\n\n// LoadScratch loads scratch[N] into register Dst.\ntype LoadScratch struct {\n\tDst Register\n\tN   int // 0-15\n}\n\n// Assemble implements the Instruction Assemble method.\nfunc (a LoadScratch) Assemble() (RawInstruction, error) {\n\tif a.N < 0 || a.N > 15 {\n\t\treturn RawInstruction{}, fmt.Errorf(\"invalid scratch slot %d\", a.N)\n\t}\n\treturn assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N))\n}\n\n// String returns the the instruction in assembler notation.\nfunc (a LoadScratch) String() string {\n\tswitch a.Dst {\n\tcase RegA:\n\t\treturn fmt.Sprintf(\"ld M[%d]\", a.N)\n\tcase RegX:\n\t\treturn fmt.Sprintf(\"ldx M[%d]\", a.N)\n\tdefault:\n\t\treturn fmt.Sprintf(\"unknown instruction: %#v\", a)\n\t}\n}\n\n// LoadAbsolute loads packet[Off:Off+Size] as an integer value into\n// register A.\ntype LoadAbsolute struct {\n\tOff  uint32\n\tSize int // 1, 2 or 4\n}\n\n// Assemble implements the Instruction Assemble method.\nfunc (a LoadAbsolute) Assemble() (RawInstruction, error) {\n\treturn assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off)\n}\n\n// String returns the the instruction in assembler notation.\nfunc (a LoadAbsolute) String() string {\n\tswitch a.Size {\n\tcase 1: // byte\n\t\treturn fmt.Sprintf(\"ldb [%d]\", a.Off)\n\tcase 2: // half word\n\t\treturn fmt.Sprintf(\"ldh [%d]\", a.Off)\n\tcase 4: // word\n\t\tif a.Off > extOffset+0xffffffff {\n\t\t\treturn LoadExtension{Num: Extension(a.Off + 0x1000)}.String()\n\t\t}\n\t\treturn fmt.Sprintf(\"ld [%d]\", a.Off)\n\tdefault:\n\t\treturn fmt.Sprintf(\"unknown instruction: %#v\", a)\n\t}\n}\n\n// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value\n// into register A.\ntype LoadIndirect struct {\n\tOff  uint32\n\tSize int // 1, 2 or 4\n}\n\n// Assemble implements the Instruction Assemble method.\nfunc (a LoadIndirect) Assemble() (RawInstruction, error) {\n\treturn assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off)\n}\n\n// String returns the the instruction in assembler notation.\nfunc (a LoadIndirect) String() string {\n\tswitch a.Size {\n\tcase 1: // byte\n\t\treturn fmt.Sprintf(\"ldb [x + %d]\", a.Off)\n\tcase 2: // half word\n\t\treturn fmt.Sprintf(\"ldh [x + %d]\", a.Off)\n\tcase 4: // word\n\t\treturn fmt.Sprintf(\"ld [x + %d]\", a.Off)\n\tdefault:\n\t\treturn fmt.Sprintf(\"unknown instruction: %#v\", a)\n\t}\n}\n\n// LoadMemShift multiplies the first 4 bits of the byte at packet[Off]\n// by 4 and stores the result in register X.\n//\n// This instruction is mainly useful to load into X the length of an\n// IPv4 packet header in a single instruction, rather than have to do\n// the arithmetic on the header's first byte by hand.\ntype LoadMemShift struct {\n\tOff uint32\n}\n\n// Assemble implements the Instruction Assemble method.\nfunc (a LoadMemShift) Assemble() (RawInstruction, error) {\n\treturn assembleLoad(RegX, 1, opAddrModeMemShift, a.Off)\n}\n\n// String returns the the instruction in assembler notation.\nfunc (a LoadMemShift) String() string {\n\treturn fmt.Sprintf(\"ldx 4*([%d]&0xf)\", a.Off)\n}\n\n// LoadExtension invokes a linux-specific extension and stores the\n// result in register A.\ntype LoadExtension struct {\n\tNum Extension\n}\n\n// Assemble implements the Instruction Assemble method.\nfunc (a LoadExtension) Assemble() (RawInstruction, error) {\n\tif a.Num == ExtLen {\n\t\treturn assembleLoad(RegA, 4, opAddrModePacketLen, 0)\n\t}\n\treturn assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num))\n}\n\n// String returns the the instruction in assembler notation.\nfunc (a LoadExtension) String() string {\n\tswitch a.Num {\n\tcase ExtLen:\n\t\treturn \"ld #len\"\n\tcase ExtProto:\n\t\treturn \"ld #proto\"\n\tcase ExtType:\n\t\treturn \"ld #type\"\n\tcase ExtPayloadOffset:\n\t\treturn \"ld #poff\"\n\tcase ExtInterfaceIndex:\n\t\treturn \"ld #ifidx\"\n\tcase ExtNetlinkAttr:\n\t\treturn \"ld #nla\"\n\tcase ExtNetlinkAttrNested:\n\t\treturn \"ld #nlan\"\n\tcase ExtMark:\n\t\treturn \"ld #mark\"\n\tcase ExtQueue:\n\t\treturn \"ld #queue\"\n\tcase ExtLinkLayerType:\n\t\treturn \"ld #hatype\"\n\tcase ExtRXHash:\n\t\treturn \"ld #rxhash\"\n\tcase ExtCPUID:\n\t\treturn \"ld #cpu\"\n\tcase ExtVLANTag:\n\t\treturn \"ld #vlan_tci\"\n\tcase ExtVLANTagPresent:\n\t\treturn \"ld #vlan_avail\"\n\tcase ExtVLANProto:\n\t\treturn \"ld #vlan_tpid\"\n\tcase ExtRand:\n\t\treturn \"ld #rand\"\n\tdefault:\n\t\treturn fmt.Sprintf(\"unknown instruction: %#v\", a)\n\t}\n}\n\n// StoreScratch stores register Src into scratch[N].\ntype StoreScratch struct {\n\tSrc Register\n\tN   int // 0-15\n}\n\n// Assemble implements the Instruction Assemble method.\nfunc (a StoreScratch) Assemble() (RawInstruction, error) {\n\tif a.N < 0 || a.N > 15 {\n\t\treturn RawInstruction{}, fmt.Errorf(\"invalid scratch slot %d\", a.N)\n\t}\n\tvar op uint16\n\tswitch a.Src {\n\tcase RegA:\n\t\top = opClsStoreA\n\tcase RegX:\n\t\top = opClsStoreX\n\tdefault:\n\t\treturn RawInstruction{}, fmt.Errorf(\"invalid source register %v\", a.Src)\n\t}\n\n\treturn RawInstruction{\n\t\tOp: op,\n\t\tK:  uint32(a.N),\n\t}, nil\n}\n\n// String returns the the instruction in assembler notation.\nfunc (a StoreScratch) String() string {\n\tswitch a.Src {\n\tcase RegA:\n\t\treturn fmt.Sprintf(\"st M[%d]\", a.N)\n\tcase RegX:\n\t\treturn fmt.Sprintf(\"stx M[%d]\", a.N)\n\tdefault:\n\t\treturn fmt.Sprintf(\"unknown instruction: %#v\", a)\n\t}\n}\n\n// ALUOpConstant executes A = A <Op> Val.\ntype ALUOpConstant struct {\n\tOp  ALUOp\n\tVal uint32\n}\n\n// Assemble implements the Instruction Assemble method.\nfunc (a ALUOpConstant) Assemble() (RawInstruction, error) {\n\treturn RawInstruction{\n\t\tOp: opClsALU | opALUSrcConstant | uint16(a.Op),\n\t\tK:  a.Val,\n\t}, nil\n}\n\n// String returns the the instruction in assembler notation.\nfunc (a ALUOpConstant) String() string {\n\tswitch a.Op {\n\tcase ALUOpAdd:\n\t\treturn fmt.Sprintf(\"add #%d\", a.Val)\n\tcase ALUOpSub:\n\t\treturn fmt.Sprintf(\"sub #%d\", a.Val)\n\tcase ALUOpMul:\n\t\treturn fmt.Sprintf(\"mul #%d\", a.Val)\n\tcase ALUOpDiv:\n\t\treturn fmt.Sprintf(\"div #%d\", a.Val)\n\tcase ALUOpMod:\n\t\treturn fmt.Sprintf(\"mod #%d\", a.Val)\n\tcase ALUOpAnd:\n\t\treturn fmt.Sprintf(\"and #%d\", a.Val)\n\tcase ALUOpOr:\n\t\treturn fmt.Sprintf(\"or #%d\", a.Val)\n\tcase ALUOpXor:\n\t\treturn fmt.Sprintf(\"xor #%d\", a.Val)\n\tcase ALUOpShiftLeft:\n\t\treturn fmt.Sprintf(\"lsh #%d\", a.Val)\n\tcase ALUOpShiftRight:\n\t\treturn fmt.Sprintf(\"rsh #%d\", a.Val)\n\tdefault:\n\t\treturn fmt.Sprintf(\"unknown instruction: %#v\", a)\n\t}\n}\n\n// ALUOpX executes A = A <Op> X\ntype ALUOpX struct {\n\tOp ALUOp\n}\n\n// Assemble implements the Instruction Assemble method.\nfunc (a ALUOpX) Assemble() (RawInstruction, error) {\n\treturn RawInstruction{\n\t\tOp: opClsALU | opALUSrcX | uint16(a.Op),\n\t}, nil\n}\n\n// String returns the the instruction in assembler notation.\nfunc (a ALUOpX) String() string {\n\tswitch a.Op {\n\tcase ALUOpAdd:\n\t\treturn \"add x\"\n\tcase ALUOpSub:\n\t\treturn \"sub x\"\n\tcase ALUOpMul:\n\t\treturn \"mul x\"\n\tcase ALUOpDiv:\n\t\treturn \"div x\"\n\tcase ALUOpMod:\n\t\treturn \"mod x\"\n\tcase ALUOpAnd:\n\t\treturn \"and x\"\n\tcase ALUOpOr:\n\t\treturn \"or x\"\n\tcase ALUOpXor:\n\t\treturn \"xor x\"\n\tcase ALUOpShiftLeft:\n\t\treturn \"lsh x\"\n\tcase ALUOpShiftRight:\n\t\treturn \"rsh x\"\n\tdefault:\n\t\treturn fmt.Sprintf(\"unknown instruction: %#v\", a)\n\t}\n}\n\n// NegateA executes A = -A.\ntype NegateA struct{}\n\n// Assemble implements the Instruction Assemble method.\nfunc (a NegateA) Assemble() (RawInstruction, error) {\n\treturn RawInstruction{\n\t\tOp: opClsALU | uint16(aluOpNeg),\n\t}, nil\n}\n\n// String returns the the instruction in assembler notation.\nfunc (a NegateA) String() string {\n\treturn fmt.Sprintf(\"neg\")\n}\n\n// Jump skips the following Skip instructions in the program.\ntype Jump struct {\n\tSkip uint32\n}\n\n// Assemble implements the Instruction Assemble method.\nfunc (a Jump) Assemble() (RawInstruction, error) {\n\treturn RawInstruction{\n\t\tOp: opClsJump | opJumpAlways,\n\t\tK:  a.Skip,\n\t}, nil\n}\n\n// String returns the the instruction in assembler notation.\nfunc (a Jump) String() string {\n\treturn fmt.Sprintf(\"ja %d\", a.Skip)\n}\n\n// JumpIf skips the following Skip instructions in the program if A\n// <Cond> Val is true.\ntype JumpIf struct {\n\tCond      JumpTest\n\tVal       uint32\n\tSkipTrue  uint8\n\tSkipFalse uint8\n}\n\n// Assemble implements the Instruction Assemble method.\nfunc (a JumpIf) Assemble() (RawInstruction, error) {\n\tvar (\n\t\tcond uint16\n\t\tflip bool\n\t)\n\tswitch a.Cond {\n\tcase JumpEqual:\n\t\tcond = opJumpEqual\n\tcase JumpNotEqual:\n\t\tcond, flip = opJumpEqual, true\n\tcase JumpGreaterThan:\n\t\tcond = opJumpGT\n\tcase JumpLessThan:\n\t\tcond, flip = opJumpGE, true\n\tcase JumpGreaterOrEqual:\n\t\tcond = opJumpGE\n\tcase JumpLessOrEqual:\n\t\tcond, flip = opJumpGT, true\n\tcase JumpBitsSet:\n\t\tcond = opJumpSet\n\tcase JumpBitsNotSet:\n\t\tcond, flip = opJumpSet, true\n\tdefault:\n\t\treturn RawInstruction{}, fmt.Errorf(\"unknown JumpTest %v\", a.Cond)\n\t}\n\tjt, jf := a.SkipTrue, a.SkipFalse\n\tif flip {\n\t\tjt, jf = jf, jt\n\t}\n\treturn RawInstruction{\n\t\tOp: opClsJump | cond,\n\t\tJt: jt,\n\t\tJf: jf,\n\t\tK:  a.Val,\n\t}, nil\n}\n\n// String returns the the instruction in assembler notation.\nfunc (a JumpIf) String() string {\n\tswitch a.Cond {\n\t// K == A\n\tcase JumpEqual:\n\t\treturn conditionalJump(a, \"jeq\", \"jneq\")\n\t// K != A\n\tcase JumpNotEqual:\n\t\treturn fmt.Sprintf(\"jneq #%d,%d\", a.Val, a.SkipTrue)\n\t// K > A\n\tcase JumpGreaterThan:\n\t\treturn conditionalJump(a, \"jgt\", \"jle\")\n\t// K < A\n\tcase JumpLessThan:\n\t\treturn fmt.Sprintf(\"jlt #%d,%d\", a.Val, a.SkipTrue)\n\t// K >= A\n\tcase JumpGreaterOrEqual:\n\t\treturn conditionalJump(a, \"jge\", \"jlt\")\n\t// K <= A\n\tcase JumpLessOrEqual:\n\t\treturn fmt.Sprintf(\"jle #%d,%d\", a.Val, a.SkipTrue)\n\t// K & A != 0\n\tcase JumpBitsSet:\n\t\tif a.SkipFalse > 0 {\n\t\t\treturn fmt.Sprintf(\"jset #%d,%d,%d\", a.Val, a.SkipTrue, a.SkipFalse)\n\t\t}\n\t\treturn fmt.Sprintf(\"jset #%d,%d\", a.Val, a.SkipTrue)\n\t// K & A == 0, there is no assembler instruction for JumpBitNotSet, use JumpBitSet and invert skips\n\tcase JumpBitsNotSet:\n\t\treturn JumpIf{Cond: JumpBitsSet, SkipTrue: a.SkipFalse, SkipFalse: a.SkipTrue, Val: a.Val}.String()\n\tdefault:\n\t\treturn fmt.Sprintf(\"unknown instruction: %#v\", a)\n\t}\n}\n\nfunc conditionalJump(inst JumpIf, positiveJump, negativeJump string) string {\n\tif inst.SkipTrue > 0 {\n\t\tif inst.SkipFalse > 0 {\n\t\t\treturn fmt.Sprintf(\"%s #%d,%d,%d\", positiveJump, inst.Val, inst.SkipTrue, inst.SkipFalse)\n\t\t}\n\t\treturn fmt.Sprintf(\"%s #%d,%d\", positiveJump, inst.Val, inst.SkipTrue)\n\t}\n\treturn fmt.Sprintf(\"%s #%d,%d\", negativeJump, inst.Val, inst.SkipFalse)\n}\n\n// RetA exits the BPF program, returning the value of register A.\ntype RetA struct{}\n\n// Assemble implements the Instruction Assemble method.\nfunc (a RetA) Assemble() (RawInstruction, error) {\n\treturn RawInstruction{\n\t\tOp: opClsReturn | opRetSrcA,\n\t}, nil\n}\n\n// String returns the the instruction in assembler notation.\nfunc (a RetA) String() string {\n\treturn fmt.Sprintf(\"ret a\")\n}\n\n// RetConstant exits the BPF program, returning a constant value.\ntype RetConstant struct {\n\tVal uint32\n}\n\n// Assemble implements the Instruction Assemble method.\nfunc (a RetConstant) Assemble() (RawInstruction, error) {\n\treturn RawInstruction{\n\t\tOp: opClsReturn | opRetSrcConstant,\n\t\tK:  a.Val,\n\t}, nil\n}\n\n// String returns the the instruction in assembler notation.\nfunc (a RetConstant) String() string {\n\treturn fmt.Sprintf(\"ret #%d\", a.Val)\n}\n\n// TXA copies the value of register X to register A.\ntype TXA struct{}\n\n// Assemble implements the Instruction Assemble method.\nfunc (a TXA) Assemble() (RawInstruction, error) {\n\treturn RawInstruction{\n\t\tOp: opClsMisc | opMiscTXA,\n\t}, nil\n}\n\n// String returns the the instruction in assembler notation.\nfunc (a TXA) String() string {\n\treturn fmt.Sprintf(\"txa\")\n}\n\n// TAX copies the value of register A to register X.\ntype TAX struct{}\n\n// Assemble implements the Instruction Assemble method.\nfunc (a TAX) Assemble() (RawInstruction, error) {\n\treturn RawInstruction{\n\t\tOp: opClsMisc | opMiscTAX,\n\t}, nil\n}\n\n// String returns the the instruction in assembler notation.\nfunc (a TAX) String() string {\n\treturn fmt.Sprintf(\"tax\")\n}\n\nfunc assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) {\n\tvar (\n\t\tcls uint16\n\t\tsz  uint16\n\t)\n\tswitch dst {\n\tcase RegA:\n\t\tcls = opClsLoadA\n\tcase RegX:\n\t\tcls = opClsLoadX\n\tdefault:\n\t\treturn RawInstruction{}, fmt.Errorf(\"invalid target register %v\", dst)\n\t}\n\tswitch loadSize {\n\tcase 1:\n\t\tsz = opLoadWidth1\n\tcase 2:\n\t\tsz = opLoadWidth2\n\tcase 4:\n\t\tsz = opLoadWidth4\n\tdefault:\n\t\treturn RawInstruction{}, fmt.Errorf(\"invalid load byte length %d\", sz)\n\t}\n\treturn RawInstruction{\n\t\tOp: cls | sz | mode,\n\t\tK:  k,\n\t}, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/bpf/instructions_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage bpf\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n)\n\n// This is a direct translation of the program in\n// testdata/all_instructions.txt.\nvar allInstructions = []Instruction{\n\tLoadConstant{Dst: RegA, Val: 42},\n\tLoadConstant{Dst: RegX, Val: 42},\n\n\tLoadScratch{Dst: RegA, N: 3},\n\tLoadScratch{Dst: RegX, N: 3},\n\n\tLoadAbsolute{Off: 42, Size: 1},\n\tLoadAbsolute{Off: 42, Size: 2},\n\tLoadAbsolute{Off: 42, Size: 4},\n\n\tLoadIndirect{Off: 42, Size: 1},\n\tLoadIndirect{Off: 42, Size: 2},\n\tLoadIndirect{Off: 42, Size: 4},\n\n\tLoadMemShift{Off: 42},\n\n\tLoadExtension{Num: ExtLen},\n\tLoadExtension{Num: ExtProto},\n\tLoadExtension{Num: ExtType},\n\tLoadExtension{Num: ExtRand},\n\n\tStoreScratch{Src: RegA, N: 3},\n\tStoreScratch{Src: RegX, N: 3},\n\n\tALUOpConstant{Op: ALUOpAdd, Val: 42},\n\tALUOpConstant{Op: ALUOpSub, Val: 42},\n\tALUOpConstant{Op: ALUOpMul, Val: 42},\n\tALUOpConstant{Op: ALUOpDiv, Val: 42},\n\tALUOpConstant{Op: ALUOpOr, Val: 42},\n\tALUOpConstant{Op: ALUOpAnd, Val: 42},\n\tALUOpConstant{Op: ALUOpShiftLeft, Val: 42},\n\tALUOpConstant{Op: ALUOpShiftRight, Val: 42},\n\tALUOpConstant{Op: ALUOpMod, Val: 42},\n\tALUOpConstant{Op: ALUOpXor, Val: 42},\n\n\tALUOpX{Op: ALUOpAdd},\n\tALUOpX{Op: ALUOpSub},\n\tALUOpX{Op: ALUOpMul},\n\tALUOpX{Op: ALUOpDiv},\n\tALUOpX{Op: ALUOpOr},\n\tALUOpX{Op: ALUOpAnd},\n\tALUOpX{Op: ALUOpShiftLeft},\n\tALUOpX{Op: ALUOpShiftRight},\n\tALUOpX{Op: ALUOpMod},\n\tALUOpX{Op: ALUOpXor},\n\n\tNegateA{},\n\n\tJump{Skip: 10},\n\tJumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8, SkipFalse: 9},\n\tJumpIf{Cond: JumpNotEqual, Val: 42, SkipTrue: 8},\n\tJumpIf{Cond: JumpLessThan, Val: 42, SkipTrue: 7},\n\tJumpIf{Cond: JumpLessOrEqual, Val: 42, SkipTrue: 6},\n\tJumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4, SkipFalse: 5},\n\tJumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3, SkipFalse: 4},\n\tJumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2, SkipFalse: 3},\n\n\tTAX{},\n\tTXA{},\n\n\tRetA{},\n\tRetConstant{Val: 42},\n}\nvar allInstructionsExpected = \"testdata/all_instructions.bpf\"\n\n// Check that we produce the same output as the canonical bpf_asm\n// linux kernel tool.\nfunc TestInterop(t *testing.T) {\n\tout, err := Assemble(allInstructions)\n\tif err != nil {\n\t\tt.Fatalf(\"assembly of allInstructions program failed: %s\", err)\n\t}\n\tt.Logf(\"Assembled program is %d instructions long\", len(out))\n\n\tbs, err := ioutil.ReadFile(allInstructionsExpected)\n\tif err != nil {\n\t\tt.Fatalf(\"reading %s: %s\", allInstructionsExpected, err)\n\t}\n\t// First statement is the number of statements, last statement is\n\t// empty. We just ignore both and rely on slice length.\n\tstmts := strings.Split(string(bs), \",\")\n\tif len(stmts)-2 != len(out) {\n\t\tt.Fatalf(\"test program lengths don't match: %s has %d, Go implementation has %d\", allInstructionsExpected, len(stmts)-2, len(allInstructions))\n\t}\n\n\tfor i, stmt := range stmts[1 : len(stmts)-2] {\n\t\tnums := strings.Split(stmt, \" \")\n\t\tif len(nums) != 4 {\n\t\t\tt.Fatalf(\"malformed instruction %d in %s: %s\", i+1, allInstructionsExpected, stmt)\n\t\t}\n\n\t\tactual := out[i]\n\n\t\top, err := strconv.ParseUint(nums[0], 10, 16)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"malformed opcode %s in instruction %d of %s\", nums[0], i+1, allInstructionsExpected)\n\t\t}\n\t\tif actual.Op != uint16(op) {\n\t\t\tt.Errorf(\"opcode mismatch on instruction %d (%#v): got 0x%02x, want 0x%02x\", i+1, allInstructions[i], actual.Op, op)\n\t\t}\n\n\t\tjt, err := strconv.ParseUint(nums[1], 10, 8)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"malformed jt offset %s in instruction %d of %s\", nums[1], i+1, allInstructionsExpected)\n\t\t}\n\t\tif actual.Jt != uint8(jt) {\n\t\t\tt.Errorf(\"jt mismatch on instruction %d (%#v): got %d, want %d\", i+1, allInstructions[i], actual.Jt, jt)\n\t\t}\n\n\t\tjf, err := strconv.ParseUint(nums[2], 10, 8)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"malformed jf offset %s in instruction %d of %s\", nums[2], i+1, allInstructionsExpected)\n\t\t}\n\t\tif actual.Jf != uint8(jf) {\n\t\t\tt.Errorf(\"jf mismatch on instruction %d (%#v): got %d, want %d\", i+1, allInstructions[i], actual.Jf, jf)\n\t\t}\n\n\t\tk, err := strconv.ParseUint(nums[3], 10, 32)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"malformed constant %s in instruction %d of %s\", nums[3], i+1, allInstructionsExpected)\n\t\t}\n\t\tif actual.K != uint32(k) {\n\t\t\tt.Errorf(\"constant mismatch on instruction %d (%#v): got %d, want %d\", i+1, allInstructions[i], actual.K, k)\n\t\t}\n\t}\n}\n\n// Check that assembly and disassembly match each other.\nfunc TestAsmDisasm(t *testing.T) {\n\tprog1, err := Assemble(allInstructions)\n\tif err != nil {\n\t\tt.Fatalf(\"assembly of allInstructions program failed: %s\", err)\n\t}\n\tt.Logf(\"Assembled program is %d instructions long\", len(prog1))\n\n\tgot, allDecoded := Disassemble(prog1)\n\tif !allDecoded {\n\t\tt.Errorf(\"Disassemble(Assemble(allInstructions)) produced unrecognized instructions:\")\n\t\tfor i, inst := range got {\n\t\t\tif r, ok := inst.(RawInstruction); ok {\n\t\t\t\tt.Logf(\"  insn %d, %#v --> %#v\", i+1, allInstructions[i], r)\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(allInstructions) != len(got) {\n\t\tt.Fatalf(\"disassembly changed program size: %d insns before, %d insns after\", len(allInstructions), len(got))\n\t}\n\tif !reflect.DeepEqual(allInstructions, got) {\n\t\tt.Errorf(\"program mutated by disassembly:\")\n\t\tfor i := range got {\n\t\t\tif !reflect.DeepEqual(allInstructions[i], got[i]) {\n\t\t\t\tt.Logf(\"  insn %d, s: %#v, p1: %#v, got: %#v\", i+1, allInstructions[i], prog1[i], got[i])\n\t\t\t}\n\t\t}\n\t}\n}\n\ntype InvalidInstruction struct{}\n\nfunc (a InvalidInstruction) Assemble() (RawInstruction, error) {\n\treturn RawInstruction{}, fmt.Errorf(\"Invalid Instruction\")\n}\n\nfunc (a InvalidInstruction) String() string {\n\treturn fmt.Sprintf(\"unknown instruction: %#v\", a)\n}\n\nfunc TestString(t *testing.T) {\n\ttestCases := []struct {\n\t\tinstruction Instruction\n\t\tassembler   string\n\t}{\n\t\t{\n\t\t\tinstruction: LoadConstant{Dst: RegA, Val: 42},\n\t\t\tassembler:   \"ld #42\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadConstant{Dst: RegX, Val: 42},\n\t\t\tassembler:   \"ldx #42\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadConstant{Dst: 0xffff, Val: 42},\n\t\t\tassembler:   \"unknown instruction: bpf.LoadConstant{Dst:0xffff, Val:0x2a}\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadScratch{Dst: RegA, N: 3},\n\t\t\tassembler:   \"ld M[3]\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadScratch{Dst: RegX, N: 3},\n\t\t\tassembler:   \"ldx M[3]\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadScratch{Dst: 0xffff, N: 3},\n\t\t\tassembler:   \"unknown instruction: bpf.LoadScratch{Dst:0xffff, N:3}\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadAbsolute{Off: 42, Size: 1},\n\t\t\tassembler:   \"ldb [42]\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadAbsolute{Off: 42, Size: 2},\n\t\t\tassembler:   \"ldh [42]\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadAbsolute{Off: 42, Size: 4},\n\t\t\tassembler:   \"ld [42]\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadAbsolute{Off: 42, Size: -1},\n\t\t\tassembler:   \"unknown instruction: bpf.LoadAbsolute{Off:0x2a, Size:-1}\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadIndirect{Off: 42, Size: 1},\n\t\t\tassembler:   \"ldb [x + 42]\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadIndirect{Off: 42, Size: 2},\n\t\t\tassembler:   \"ldh [x + 42]\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadIndirect{Off: 42, Size: 4},\n\t\t\tassembler:   \"ld [x + 42]\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadIndirect{Off: 42, Size: -1},\n\t\t\tassembler:   \"unknown instruction: bpf.LoadIndirect{Off:0x2a, Size:-1}\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadMemShift{Off: 42},\n\t\t\tassembler:   \"ldx 4*([42]&0xf)\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadExtension{Num: ExtLen},\n\t\t\tassembler:   \"ld #len\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadExtension{Num: ExtProto},\n\t\t\tassembler:   \"ld #proto\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadExtension{Num: ExtType},\n\t\t\tassembler:   \"ld #type\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadExtension{Num: ExtPayloadOffset},\n\t\t\tassembler:   \"ld #poff\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadExtension{Num: ExtInterfaceIndex},\n\t\t\tassembler:   \"ld #ifidx\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadExtension{Num: ExtNetlinkAttr},\n\t\t\tassembler:   \"ld #nla\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadExtension{Num: ExtNetlinkAttrNested},\n\t\t\tassembler:   \"ld #nlan\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadExtension{Num: ExtMark},\n\t\t\tassembler:   \"ld #mark\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadExtension{Num: ExtQueue},\n\t\t\tassembler:   \"ld #queue\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadExtension{Num: ExtLinkLayerType},\n\t\t\tassembler:   \"ld #hatype\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadExtension{Num: ExtRXHash},\n\t\t\tassembler:   \"ld #rxhash\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadExtension{Num: ExtCPUID},\n\t\t\tassembler:   \"ld #cpu\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadExtension{Num: ExtVLANTag},\n\t\t\tassembler:   \"ld #vlan_tci\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadExtension{Num: ExtVLANTagPresent},\n\t\t\tassembler:   \"ld #vlan_avail\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadExtension{Num: ExtVLANProto},\n\t\t\tassembler:   \"ld #vlan_tpid\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadExtension{Num: ExtRand},\n\t\t\tassembler:   \"ld #rand\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadAbsolute{Off: 0xfffff038, Size: 4},\n\t\t\tassembler:   \"ld #rand\",\n\t\t},\n\t\t{\n\t\t\tinstruction: LoadExtension{Num: 0xfff},\n\t\t\tassembler:   \"unknown instruction: bpf.LoadExtension{Num:4095}\",\n\t\t},\n\t\t{\n\t\t\tinstruction: StoreScratch{Src: RegA, N: 3},\n\t\t\tassembler:   \"st M[3]\",\n\t\t},\n\t\t{\n\t\t\tinstruction: StoreScratch{Src: RegX, N: 3},\n\t\t\tassembler:   \"stx M[3]\",\n\t\t},\n\t\t{\n\t\t\tinstruction: StoreScratch{Src: 0xffff, N: 3},\n\t\t\tassembler:   \"unknown instruction: bpf.StoreScratch{Src:0xffff, N:3}\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpConstant{Op: ALUOpAdd, Val: 42},\n\t\t\tassembler:   \"add #42\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpConstant{Op: ALUOpSub, Val: 42},\n\t\t\tassembler:   \"sub #42\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpConstant{Op: ALUOpMul, Val: 42},\n\t\t\tassembler:   \"mul #42\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpConstant{Op: ALUOpDiv, Val: 42},\n\t\t\tassembler:   \"div #42\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpConstant{Op: ALUOpOr, Val: 42},\n\t\t\tassembler:   \"or #42\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpConstant{Op: ALUOpAnd, Val: 42},\n\t\t\tassembler:   \"and #42\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpConstant{Op: ALUOpShiftLeft, Val: 42},\n\t\t\tassembler:   \"lsh #42\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpConstant{Op: ALUOpShiftRight, Val: 42},\n\t\t\tassembler:   \"rsh #42\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpConstant{Op: ALUOpMod, Val: 42},\n\t\t\tassembler:   \"mod #42\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpConstant{Op: ALUOpXor, Val: 42},\n\t\t\tassembler:   \"xor #42\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpConstant{Op: 0xffff, Val: 42},\n\t\t\tassembler:   \"unknown instruction: bpf.ALUOpConstant{Op:0xffff, Val:0x2a}\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpX{Op: ALUOpAdd},\n\t\t\tassembler:   \"add x\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpX{Op: ALUOpSub},\n\t\t\tassembler:   \"sub x\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpX{Op: ALUOpMul},\n\t\t\tassembler:   \"mul x\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpX{Op: ALUOpDiv},\n\t\t\tassembler:   \"div x\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpX{Op: ALUOpOr},\n\t\t\tassembler:   \"or x\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpX{Op: ALUOpAnd},\n\t\t\tassembler:   \"and x\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpX{Op: ALUOpShiftLeft},\n\t\t\tassembler:   \"lsh x\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpX{Op: ALUOpShiftRight},\n\t\t\tassembler:   \"rsh x\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpX{Op: ALUOpMod},\n\t\t\tassembler:   \"mod x\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpX{Op: ALUOpXor},\n\t\t\tassembler:   \"xor x\",\n\t\t},\n\t\t{\n\t\t\tinstruction: ALUOpX{Op: 0xffff},\n\t\t\tassembler:   \"unknown instruction: bpf.ALUOpX{Op:0xffff}\",\n\t\t},\n\t\t{\n\t\t\tinstruction: NegateA{},\n\t\t\tassembler:   \"neg\",\n\t\t},\n\t\t{\n\t\t\tinstruction: Jump{Skip: 10},\n\t\t\tassembler:   \"ja 10\",\n\t\t},\n\t\t{\n\t\t\tinstruction: JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8, SkipFalse: 9},\n\t\t\tassembler:   \"jeq #42,8,9\",\n\t\t},\n\t\t{\n\t\t\tinstruction: JumpIf{Cond: JumpEqual, Val: 42, SkipTrue: 8},\n\t\t\tassembler:   \"jeq #42,8\",\n\t\t},\n\t\t{\n\t\t\tinstruction: JumpIf{Cond: JumpEqual, Val: 42, SkipFalse: 8},\n\t\t\tassembler:   \"jneq #42,8\",\n\t\t},\n\t\t{\n\t\t\tinstruction: JumpIf{Cond: JumpNotEqual, Val: 42, SkipTrue: 8},\n\t\t\tassembler:   \"jneq #42,8\",\n\t\t},\n\t\t{\n\t\t\tinstruction: JumpIf{Cond: JumpLessThan, Val: 42, SkipTrue: 7},\n\t\t\tassembler:   \"jlt #42,7\",\n\t\t},\n\t\t{\n\t\t\tinstruction: JumpIf{Cond: JumpLessOrEqual, Val: 42, SkipTrue: 6},\n\t\t\tassembler:   \"jle #42,6\",\n\t\t},\n\t\t{\n\t\t\tinstruction: JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4, SkipFalse: 5},\n\t\t\tassembler:   \"jgt #42,4,5\",\n\t\t},\n\t\t{\n\t\t\tinstruction: JumpIf{Cond: JumpGreaterThan, Val: 42, SkipTrue: 4},\n\t\t\tassembler:   \"jgt #42,4\",\n\t\t},\n\t\t{\n\t\t\tinstruction: JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3, SkipFalse: 4},\n\t\t\tassembler:   \"jge #42,3,4\",\n\t\t},\n\t\t{\n\t\t\tinstruction: JumpIf{Cond: JumpGreaterOrEqual, Val: 42, SkipTrue: 3},\n\t\t\tassembler:   \"jge #42,3\",\n\t\t},\n\t\t{\n\t\t\tinstruction: JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2, SkipFalse: 3},\n\t\t\tassembler:   \"jset #42,2,3\",\n\t\t},\n\t\t{\n\t\t\tinstruction: JumpIf{Cond: JumpBitsSet, Val: 42, SkipTrue: 2},\n\t\t\tassembler:   \"jset #42,2\",\n\t\t},\n\t\t{\n\t\t\tinstruction: JumpIf{Cond: JumpBitsNotSet, Val: 42, SkipTrue: 2, SkipFalse: 3},\n\t\t\tassembler:   \"jset #42,3,2\",\n\t\t},\n\t\t{\n\t\t\tinstruction: JumpIf{Cond: JumpBitsNotSet, Val: 42, SkipTrue: 2},\n\t\t\tassembler:   \"jset #42,0,2\",\n\t\t},\n\t\t{\n\t\t\tinstruction: JumpIf{Cond: 0xffff, Val: 42, SkipTrue: 1, SkipFalse: 2},\n\t\t\tassembler:   \"unknown instruction: bpf.JumpIf{Cond:0xffff, Val:0x2a, SkipTrue:0x1, SkipFalse:0x2}\",\n\t\t},\n\t\t{\n\t\t\tinstruction: TAX{},\n\t\t\tassembler:   \"tax\",\n\t\t},\n\t\t{\n\t\t\tinstruction: TXA{},\n\t\t\tassembler:   \"txa\",\n\t\t},\n\t\t{\n\t\t\tinstruction: RetA{},\n\t\t\tassembler:   \"ret a\",\n\t\t},\n\t\t{\n\t\t\tinstruction: RetConstant{Val: 42},\n\t\t\tassembler:   \"ret #42\",\n\t\t},\n\t\t// Invalid instruction\n\t\t{\n\t\t\tinstruction: InvalidInstruction{},\n\t\t\tassembler:   \"unknown instruction: bpf.InvalidInstruction{}\",\n\t\t},\n\t}\n\n\tfor _, testCase := range testCases {\n\t\tif input, ok := testCase.instruction.(fmt.Stringer); ok {\n\t\t\tgot := input.String()\n\t\t\tif got != testCase.assembler {\n\t\t\t\tt.Errorf(\"String did not return expected assembler notation, expected: %s, got: %s\", testCase.assembler, got)\n\t\t\t}\n\t\t} else {\n\t\t\tt.Errorf(\"Instruction %#v is not a fmt.Stringer\", testCase.instruction)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/bpf/setter.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage bpf\n\n// A Setter is a type which can attach a compiled BPF filter to itself.\ntype Setter interface {\n\tSetBPF(filter []RawInstruction) error\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/bpf/testdata/all_instructions.bpf",
    "content": "50,0 0 0 42,1 0 0 42,96 0 0 3,97 0 0 3,48 0 0 42,40 0 0 42,32 0 0 42,80 0 0 42,72 0 0 42,64 0 0 42,177 0 0 42,128 0 0 0,32 0 0 4294963200,32 0 0 4294963204,32 0 0 4294963256,2 0 0 3,3 0 0 3,4 0 0 42,20 0 0 42,36 0 0 42,52 0 0 42,68 0 0 42,84 0 0 42,100 0 0 42,116 0 0 42,148 0 0 42,164 0 0 42,12 0 0 0,28 0 0 0,44 0 0 0,60 0 0 0,76 0 0 0,92 0 0 0,108 0 0 0,124 0 0 0,156 0 0 0,172 0 0 0,132 0 0 0,5 0 0 10,21 8 9 42,21 0 8 42,53 0 7 42,37 0 6 42,37 4 5 42,53 3 4 42,69 2 3 42,7 0 0 0,135 0 0 0,22 0 0 0,6 0 0 0,\n"
  },
  {
    "path": "vendor/golang.org/x/net/bpf/testdata/all_instructions.txt",
    "content": "# This filter is compiled to all_instructions.bpf by the `bpf_asm`\n# tool, which can be found in the linux kernel source tree under\n# tools/net.\n\n# Load immediate\nld #42\nldx #42\n\n# Load scratch\nld M[3]\nldx M[3]\n\n# Load absolute\nldb [42]\nldh [42]\nld [42]\n\n# Load indirect\nldb [x + 42]\nldh [x + 42]\nld [x + 42]\n\n# Load IPv4 header length\nldx 4*([42]&0xf)\n\n# Run extension function\nld #len\nld #proto\nld #type\nld #rand\n\n# Store scratch\nst M[3]\nstx M[3]\n\n# A <op> constant\nadd #42\nsub #42\nmul #42\ndiv #42\nor #42\nand #42\nlsh #42\nrsh #42\nmod #42\nxor #42\n\n# A <op> X\nadd x\nsub x\nmul x\ndiv x\nor x\nand x\nlsh x\nrsh x\nmod x\nxor x\n\n# !A\nneg\n\n# Jumps\nja end\njeq #42,prev,end\njne #42,end\njlt #42,end\njle #42,end\njgt #42,prev,end\njge #42,prev,end\njset #42,prev,end\n\n# Register transfers\ntax\ntxa\n\n# Returns\nprev: ret a\nend: ret #42\n"
  },
  {
    "path": "vendor/golang.org/x/net/bpf/vm.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage bpf\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n)\n\n// A VM is an emulated BPF virtual machine.\ntype VM struct {\n\tfilter []Instruction\n}\n\n// NewVM returns a new VM using the input BPF program.\nfunc NewVM(filter []Instruction) (*VM, error) {\n\tif len(filter) == 0 {\n\t\treturn nil, errors.New(\"one or more Instructions must be specified\")\n\t}\n\n\tfor i, ins := range filter {\n\t\tcheck := len(filter) - (i + 1)\n\t\tswitch ins := ins.(type) {\n\t\t// Check for out-of-bounds jumps in instructions\n\t\tcase Jump:\n\t\t\tif check <= int(ins.Skip) {\n\t\t\t\treturn nil, fmt.Errorf(\"cannot jump %d instructions; jumping past program bounds\", ins.Skip)\n\t\t\t}\n\t\tcase JumpIf:\n\t\t\tif check <= int(ins.SkipTrue) {\n\t\t\t\treturn nil, fmt.Errorf(\"cannot jump %d instructions in true case; jumping past program bounds\", ins.SkipTrue)\n\t\t\t}\n\t\t\tif check <= int(ins.SkipFalse) {\n\t\t\t\treturn nil, fmt.Errorf(\"cannot jump %d instructions in false case; jumping past program bounds\", ins.SkipFalse)\n\t\t\t}\n\t\t// Check for division or modulus by zero\n\t\tcase ALUOpConstant:\n\t\t\tif ins.Val != 0 {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tswitch ins.Op {\n\t\t\tcase ALUOpDiv, ALUOpMod:\n\t\t\t\treturn nil, errors.New(\"cannot divide by zero using ALUOpConstant\")\n\t\t\t}\n\t\t// Check for unknown extensions\n\t\tcase LoadExtension:\n\t\t\tswitch ins.Num {\n\t\t\tcase ExtLen:\n\t\t\tdefault:\n\t\t\t\treturn nil, fmt.Errorf(\"extension %d not implemented\", ins.Num)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Make sure last instruction is a return instruction\n\tswitch filter[len(filter)-1].(type) {\n\tcase RetA, RetConstant:\n\tdefault:\n\t\treturn nil, errors.New(\"BPF program must end with RetA or RetConstant\")\n\t}\n\n\t// Though our VM works using disassembled instructions, we\n\t// attempt to assemble the input filter anyway to ensure it is compatible\n\t// with an operating system VM.\n\t_, err := Assemble(filter)\n\n\treturn &VM{\n\t\tfilter: filter,\n\t}, err\n}\n\n// Run runs the VM's BPF program against the input bytes.\n// Run returns the number of bytes accepted by the BPF program, and any errors\n// which occurred while processing the program.\nfunc (v *VM) Run(in []byte) (int, error) {\n\tvar (\n\t\t// Registers of the virtual machine\n\t\tregA       uint32\n\t\tregX       uint32\n\t\tregScratch [16]uint32\n\n\t\t// OK is true if the program should continue processing the next\n\t\t// instruction, or false if not, causing the loop to break\n\t\tok = true\n\t)\n\n\t// TODO(mdlayher): implement:\n\t// - NegateA:\n\t//   - would require a change from uint32 registers to int32\n\t//     registers\n\n\t// TODO(mdlayher): add interop tests that check signedness of ALU\n\t// operations against kernel implementation, and make sure Go\n\t// implementation matches behavior\n\n\tfor i := 0; i < len(v.filter) && ok; i++ {\n\t\tins := v.filter[i]\n\n\t\tswitch ins := ins.(type) {\n\t\tcase ALUOpConstant:\n\t\t\tregA = aluOpConstant(ins, regA)\n\t\tcase ALUOpX:\n\t\t\tregA, ok = aluOpX(ins, regA, regX)\n\t\tcase Jump:\n\t\t\ti += int(ins.Skip)\n\t\tcase JumpIf:\n\t\t\tjump := jumpIf(ins, regA)\n\t\t\ti += jump\n\t\tcase LoadAbsolute:\n\t\t\tregA, ok = loadAbsolute(ins, in)\n\t\tcase LoadConstant:\n\t\t\tregA, regX = loadConstant(ins, regA, regX)\n\t\tcase LoadExtension:\n\t\t\tregA = loadExtension(ins, in)\n\t\tcase LoadIndirect:\n\t\t\tregA, ok = loadIndirect(ins, in, regX)\n\t\tcase LoadMemShift:\n\t\t\tregX, ok = loadMemShift(ins, in)\n\t\tcase LoadScratch:\n\t\t\tregA, regX = loadScratch(ins, regScratch, regA, regX)\n\t\tcase RetA:\n\t\t\treturn int(regA), nil\n\t\tcase RetConstant:\n\t\t\treturn int(ins.Val), nil\n\t\tcase StoreScratch:\n\t\t\tregScratch = storeScratch(ins, regScratch, regA, regX)\n\t\tcase TAX:\n\t\t\tregX = regA\n\t\tcase TXA:\n\t\t\tregA = regX\n\t\tdefault:\n\t\t\treturn 0, fmt.Errorf(\"unknown Instruction at index %d: %T\", i, ins)\n\t\t}\n\t}\n\n\treturn 0, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/bpf/vm_aluop_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage bpf_test\n\nimport (\n\t\"testing\"\n\n\t\"golang.org/x/net/bpf\"\n)\n\nfunc TestVMALUOpAdd(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 1,\n\t\t},\n\t\tbpf.ALUOpConstant{\n\t\t\tOp:  bpf.ALUOpAdd,\n\t\t\tVal: 3,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t8, 2, 3,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 3, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMALUOpSub(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 1,\n\t\t},\n\t\tbpf.TAX{},\n\t\tbpf.ALUOpX{\n\t\t\tOp: bpf.ALUOpSub,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t1, 2, 3,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 0, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMALUOpMul(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 1,\n\t\t},\n\t\tbpf.ALUOpConstant{\n\t\t\tOp:  bpf.ALUOpMul,\n\t\t\tVal: 2,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t6, 2, 3, 4,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 4, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMALUOpDiv(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 1,\n\t\t},\n\t\tbpf.ALUOpConstant{\n\t\t\tOp:  bpf.ALUOpDiv,\n\t\t\tVal: 2,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t20, 2, 3, 4,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 2, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMALUOpDivByZeroALUOpConstant(t *testing.T) {\n\t_, _, err := testVM(t, []bpf.Instruction{\n\t\tbpf.ALUOpConstant{\n\t\t\tOp:  bpf.ALUOpDiv,\n\t\t\tVal: 0,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif errStr(err) != \"cannot divide by zero using ALUOpConstant\" {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}\n\nfunc TestVMALUOpDivByZeroALUOpX(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\t// Load byte 0 into X\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 1,\n\t\t},\n\t\tbpf.TAX{},\n\t\t// Load byte 1 into A\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  9,\n\t\t\tSize: 1,\n\t\t},\n\t\t// Attempt to perform 1/0\n\t\tbpf.ALUOpX{\n\t\t\tOp: bpf.ALUOpDiv,\n\t\t},\n\t\t// Return 4 bytes if program does not terminate\n\t\tbpf.LoadConstant{\n\t\t\tVal: 12,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0, 1, 3, 4,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 0, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMALUOpOr(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 2,\n\t\t},\n\t\tbpf.ALUOpConstant{\n\t\t\tOp:  bpf.ALUOpOr,\n\t\t\tVal: 0x01,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0x00, 0x10, 0x03, 0x04,\n\t\t0x05, 0x06, 0x07, 0x08,\n\t\t0x09, 0xff,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 9, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMALUOpAnd(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 2,\n\t\t},\n\t\tbpf.ALUOpConstant{\n\t\t\tOp:  bpf.ALUOpAnd,\n\t\t\tVal: 0x0019,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xaa, 0x09,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 1, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMALUOpShiftLeft(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 1,\n\t\t},\n\t\tbpf.ALUOpConstant{\n\t\t\tOp:  bpf.ALUOpShiftLeft,\n\t\t\tVal: 0x01,\n\t\t},\n\t\tbpf.JumpIf{\n\t\t\tCond:     bpf.JumpEqual,\n\t\t\tVal:      0x02,\n\t\t\tSkipTrue: 1,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 0,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 9,\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0x01, 0xaa,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 1, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMALUOpShiftRight(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 1,\n\t\t},\n\t\tbpf.ALUOpConstant{\n\t\t\tOp:  bpf.ALUOpShiftRight,\n\t\t\tVal: 0x01,\n\t\t},\n\t\tbpf.JumpIf{\n\t\t\tCond:     bpf.JumpEqual,\n\t\t\tVal:      0x04,\n\t\t\tSkipTrue: 1,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 0,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 9,\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0x08, 0xff, 0xff,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 1, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMALUOpMod(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 1,\n\t\t},\n\t\tbpf.ALUOpConstant{\n\t\t\tOp:  bpf.ALUOpMod,\n\t\t\tVal: 20,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t30, 0, 0,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 2, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMALUOpModByZeroALUOpConstant(t *testing.T) {\n\t_, _, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 1,\n\t\t},\n\t\tbpf.ALUOpConstant{\n\t\t\tOp:  bpf.ALUOpMod,\n\t\t\tVal: 0,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif errStr(err) != \"cannot divide by zero using ALUOpConstant\" {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}\n\nfunc TestVMALUOpModByZeroALUOpX(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\t// Load byte 0 into X\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 1,\n\t\t},\n\t\tbpf.TAX{},\n\t\t// Load byte 1 into A\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  9,\n\t\t\tSize: 1,\n\t\t},\n\t\t// Attempt to perform 1%0\n\t\tbpf.ALUOpX{\n\t\t\tOp: bpf.ALUOpMod,\n\t\t},\n\t\t// Return 4 bytes if program does not terminate\n\t\tbpf.LoadConstant{\n\t\t\tVal: 12,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0, 1, 3, 4,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 0, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMALUOpXor(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 1,\n\t\t},\n\t\tbpf.ALUOpConstant{\n\t\t\tOp:  bpf.ALUOpXor,\n\t\t\tVal: 0x0a,\n\t\t},\n\t\tbpf.JumpIf{\n\t\t\tCond:     bpf.JumpEqual,\n\t\t\tVal:      0x01,\n\t\t\tSkipTrue: 1,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 0,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 9,\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0x0b, 0x00, 0x00, 0x00,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 1, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMALUOpUnknown(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 1,\n\t\t},\n\t\tbpf.ALUOpConstant{\n\t\t\tOp:  bpf.ALUOpAdd,\n\t\t\tVal: 1,\n\t\t},\n\t\t// Verify that an unknown operation is a no-op\n\t\tbpf.ALUOpConstant{\n\t\t\tOp: 100,\n\t\t},\n\t\tbpf.JumpIf{\n\t\t\tCond:     bpf.JumpEqual,\n\t\t\tVal:      0x02,\n\t\t\tSkipTrue: 1,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 0,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 9,\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t1,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 1, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/bpf/vm_bpf_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage bpf_test\n\nimport (\n\t\"net\"\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org/x/net/bpf\"\n\t\"golang.org/x/net/ipv4\"\n)\n\n// A virtualMachine is a BPF virtual machine which can process an\n// input packet against a BPF program and render a verdict.\ntype virtualMachine interface {\n\tRun(in []byte) (int, error)\n}\n\n// canUseOSVM indicates if the OS BPF VM is available on this platform.\nfunc canUseOSVM() bool {\n\t// OS BPF VM can only be used on platforms where x/net/ipv4 supports\n\t// attaching a BPF program to a socket.\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n// All BPF tests against both the Go VM and OS VM are assumed to\n// be used with a UDP socket. As a result, the entire contents\n// of a UDP datagram is sent through the BPF program, but only\n// the body after the UDP header will ever be returned in output.\n\n// testVM sets up a Go BPF VM, and if available, a native OS BPF VM\n// for integration testing.\nfunc testVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func(), error) {\n\tgoVM, err := bpf.NewVM(filter)\n\tif err != nil {\n\t\t// Some tests expect an error, so this error must be returned\n\t\t// instead of fatally exiting the test\n\t\treturn nil, nil, err\n\t}\n\n\tmvm := &multiVirtualMachine{\n\t\tgoVM: goVM,\n\n\t\tt: t,\n\t}\n\n\t// If available, add the OS VM for tests which verify that both the Go\n\t// VM and OS VM have exactly the same output for the same input program\n\t// and packet.\n\tdone := func() {}\n\tif canUseOSVM() {\n\t\tosVM, osVMDone := testOSVM(t, filter)\n\t\tdone = func() { osVMDone() }\n\t\tmvm.osVM = osVM\n\t}\n\n\treturn mvm, done, nil\n}\n\n// udpHeaderLen is the length of a UDP header.\nconst udpHeaderLen = 8\n\n// A multiVirtualMachine is a virtualMachine which can call out to both the Go VM\n// and the native OS VM, if the OS VM is available.\ntype multiVirtualMachine struct {\n\tgoVM virtualMachine\n\tosVM virtualMachine\n\n\tt *testing.T\n}\n\nfunc (mvm *multiVirtualMachine) Run(in []byte) (int, error) {\n\tif len(in) < udpHeaderLen {\n\t\tmvm.t.Fatalf(\"input must be at least length of UDP header (%d), got: %d\",\n\t\t\tudpHeaderLen, len(in))\n\t}\n\n\t// All tests have a UDP header as part of input, because the OS VM\n\t// packets always will. For the Go VM, this output is trimmed before\n\t// being sent back to tests.\n\tgoOut, goErr := mvm.goVM.Run(in)\n\tif goOut >= udpHeaderLen {\n\t\tgoOut -= udpHeaderLen\n\t}\n\n\t// If Go output is larger than the size of the packet, packet filtering\n\t// interop tests must trim the output bytes to the length of the packet.\n\t// The BPF VM should not do this on its own, as other uses of it do\n\t// not trim the output byte count.\n\ttrim := len(in) - udpHeaderLen\n\tif goOut > trim {\n\t\tgoOut = trim\n\t}\n\n\t// When the OS VM is not available, process using the Go VM alone\n\tif mvm.osVM == nil {\n\t\treturn goOut, goErr\n\t}\n\n\t// The OS VM will apply its own UDP header, so remove the pseudo header\n\t// that the Go VM needs.\n\tosOut, err := mvm.osVM.Run(in[udpHeaderLen:])\n\tif err != nil {\n\t\tmvm.t.Fatalf(\"error while running OS VM: %v\", err)\n\t}\n\n\t// Verify both VMs return same number of bytes\n\tvar mismatch bool\n\tif goOut != osOut {\n\t\tmismatch = true\n\t\tmvm.t.Logf(\"output byte count does not match:\\n- go: %v\\n- os: %v\", goOut, osOut)\n\t}\n\n\tif mismatch {\n\t\tmvm.t.Fatal(\"Go BPF and OS BPF packet outputs do not match\")\n\t}\n\n\treturn goOut, goErr\n}\n\n// An osVirtualMachine is a virtualMachine which uses the OS's BPF VM for\n// processing BPF programs.\ntype osVirtualMachine struct {\n\tl net.PacketConn\n\ts net.Conn\n}\n\n// testOSVM creates a virtualMachine which uses the OS's BPF VM by injecting\n// packets into a UDP listener with a BPF program attached to it.\nfunc testOSVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func()) {\n\tl, err := net.ListenPacket(\"udp4\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to open OS VM UDP listener: %v\", err)\n\t}\n\n\tprog, err := bpf.Assemble(filter)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to compile BPF program: %v\", err)\n\t}\n\n\tp := ipv4.NewPacketConn(l)\n\tif err = p.SetBPF(prog); err != nil {\n\t\tt.Fatalf(\"failed to attach BPF program to listener: %v\", err)\n\t}\n\n\ts, err := net.Dial(\"udp4\", l.LocalAddr().String())\n\tif err != nil {\n\t\tt.Fatalf(\"failed to dial connection to listener: %v\", err)\n\t}\n\n\tdone := func() {\n\t\t_ = s.Close()\n\t\t_ = l.Close()\n\t}\n\n\treturn &osVirtualMachine{\n\t\tl: l,\n\t\ts: s,\n\t}, done\n}\n\n// Run sends the input bytes into the OS's BPF VM and returns its verdict.\nfunc (vm *osVirtualMachine) Run(in []byte) (int, error) {\n\tgo func() {\n\t\t_, _ = vm.s.Write(in)\n\t}()\n\n\tvm.l.SetDeadline(time.Now().Add(50 * time.Millisecond))\n\n\tvar b [512]byte\n\tn, _, err := vm.l.ReadFrom(b[:])\n\tif err != nil {\n\t\t// A timeout indicates that BPF filtered out the packet, and thus,\n\t\t// no input should be returned.\n\t\tif nerr, ok := err.(net.Error); ok && nerr.Timeout() {\n\t\t\treturn n, nil\n\t\t}\n\n\t\treturn n, err\n\t}\n\n\treturn n, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/bpf/vm_extension_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage bpf_test\n\nimport (\n\t\"testing\"\n\n\t\"golang.org/x/net/bpf\"\n)\n\nfunc TestVMLoadExtensionNotImplemented(t *testing.T) {\n\t_, _, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadExtension{\n\t\t\tNum: 100,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif errStr(err) != \"extension 100 not implemented\" {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}\n\nfunc TestVMLoadExtensionExtLen(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadExtension{\n\t\t\tNum: bpf.ExtLen,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0, 1, 2, 3,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 4, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/bpf/vm_instructions.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage bpf\n\nimport (\n\t\"encoding/binary\"\n\t\"fmt\"\n)\n\nfunc aluOpConstant(ins ALUOpConstant, regA uint32) uint32 {\n\treturn aluOpCommon(ins.Op, regA, ins.Val)\n}\n\nfunc aluOpX(ins ALUOpX, regA uint32, regX uint32) (uint32, bool) {\n\t// Guard against division or modulus by zero by terminating\n\t// the program, as the OS BPF VM does\n\tif regX == 0 {\n\t\tswitch ins.Op {\n\t\tcase ALUOpDiv, ALUOpMod:\n\t\t\treturn 0, false\n\t\t}\n\t}\n\n\treturn aluOpCommon(ins.Op, regA, regX), true\n}\n\nfunc aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 {\n\tswitch op {\n\tcase ALUOpAdd:\n\t\treturn regA + value\n\tcase ALUOpSub:\n\t\treturn regA - value\n\tcase ALUOpMul:\n\t\treturn regA * value\n\tcase ALUOpDiv:\n\t\t// Division by zero not permitted by NewVM and aluOpX checks\n\t\treturn regA / value\n\tcase ALUOpOr:\n\t\treturn regA | value\n\tcase ALUOpAnd:\n\t\treturn regA & value\n\tcase ALUOpShiftLeft:\n\t\treturn regA << value\n\tcase ALUOpShiftRight:\n\t\treturn regA >> value\n\tcase ALUOpMod:\n\t\t// Modulus by zero not permitted by NewVM and aluOpX checks\n\t\treturn regA % value\n\tcase ALUOpXor:\n\t\treturn regA ^ value\n\tdefault:\n\t\treturn regA\n\t}\n}\n\nfunc jumpIf(ins JumpIf, value uint32) int {\n\tvar ok bool\n\tinV := uint32(ins.Val)\n\n\tswitch ins.Cond {\n\tcase JumpEqual:\n\t\tok = value == inV\n\tcase JumpNotEqual:\n\t\tok = value != inV\n\tcase JumpGreaterThan:\n\t\tok = value > inV\n\tcase JumpLessThan:\n\t\tok = value < inV\n\tcase JumpGreaterOrEqual:\n\t\tok = value >= inV\n\tcase JumpLessOrEqual:\n\t\tok = value <= inV\n\tcase JumpBitsSet:\n\t\tok = (value & inV) != 0\n\tcase JumpBitsNotSet:\n\t\tok = (value & inV) == 0\n\t}\n\n\tif ok {\n\t\treturn int(ins.SkipTrue)\n\t}\n\n\treturn int(ins.SkipFalse)\n}\n\nfunc loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) {\n\toffset := int(ins.Off)\n\tsize := int(ins.Size)\n\n\treturn loadCommon(in, offset, size)\n}\n\nfunc loadConstant(ins LoadConstant, regA uint32, regX uint32) (uint32, uint32) {\n\tswitch ins.Dst {\n\tcase RegA:\n\t\tregA = ins.Val\n\tcase RegX:\n\t\tregX = ins.Val\n\t}\n\n\treturn regA, regX\n}\n\nfunc loadExtension(ins LoadExtension, in []byte) uint32 {\n\tswitch ins.Num {\n\tcase ExtLen:\n\t\treturn uint32(len(in))\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unimplemented extension: %d\", ins.Num))\n\t}\n}\n\nfunc loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) {\n\toffset := int(ins.Off) + int(regX)\n\tsize := int(ins.Size)\n\n\treturn loadCommon(in, offset, size)\n}\n\nfunc loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) {\n\toffset := int(ins.Off)\n\n\tif !inBounds(len(in), offset, 0) {\n\t\treturn 0, false\n\t}\n\n\t// Mask off high 4 bits and multiply low 4 bits by 4\n\treturn uint32(in[offset]&0x0f) * 4, true\n}\n\nfunc inBounds(inLen int, offset int, size int) bool {\n\treturn offset+size <= inLen\n}\n\nfunc loadCommon(in []byte, offset int, size int) (uint32, bool) {\n\tif !inBounds(len(in), offset, size) {\n\t\treturn 0, false\n\t}\n\n\tswitch size {\n\tcase 1:\n\t\treturn uint32(in[offset]), true\n\tcase 2:\n\t\treturn uint32(binary.BigEndian.Uint16(in[offset : offset+size])), true\n\tcase 4:\n\t\treturn uint32(binary.BigEndian.Uint32(in[offset : offset+size])), true\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"invalid load size: %d\", size))\n\t}\n}\n\nfunc loadScratch(ins LoadScratch, regScratch [16]uint32, regA uint32, regX uint32) (uint32, uint32) {\n\tswitch ins.Dst {\n\tcase RegA:\n\t\tregA = regScratch[ins.N]\n\tcase RegX:\n\t\tregX = regScratch[ins.N]\n\t}\n\n\treturn regA, regX\n}\n\nfunc storeScratch(ins StoreScratch, regScratch [16]uint32, regA uint32, regX uint32) [16]uint32 {\n\tswitch ins.Src {\n\tcase RegA:\n\t\tregScratch[ins.N] = regA\n\tcase RegX:\n\t\tregScratch[ins.N] = regX\n\t}\n\n\treturn regScratch\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/bpf/vm_jump_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage bpf_test\n\nimport (\n\t\"testing\"\n\n\t\"golang.org/x/net/bpf\"\n)\n\nfunc TestVMJumpOne(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 1,\n\t\t},\n\t\tbpf.Jump{\n\t\t\tSkip: 1,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 0,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 9,\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t1,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 1, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMJumpOutOfProgram(t *testing.T) {\n\t_, _, err := testVM(t, []bpf.Instruction{\n\t\tbpf.Jump{\n\t\t\tSkip: 1,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif errStr(err) != \"cannot jump 1 instructions; jumping past program bounds\" {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}\n\nfunc TestVMJumpIfTrueOutOfProgram(t *testing.T) {\n\t_, _, err := testVM(t, []bpf.Instruction{\n\t\tbpf.JumpIf{\n\t\t\tCond:     bpf.JumpEqual,\n\t\t\tSkipTrue: 2,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif errStr(err) != \"cannot jump 2 instructions in true case; jumping past program bounds\" {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}\n\nfunc TestVMJumpIfFalseOutOfProgram(t *testing.T) {\n\t_, _, err := testVM(t, []bpf.Instruction{\n\t\tbpf.JumpIf{\n\t\t\tCond:      bpf.JumpEqual,\n\t\t\tSkipFalse: 3,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif errStr(err) != \"cannot jump 3 instructions in false case; jumping past program bounds\" {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}\n\nfunc TestVMJumpIfEqual(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 1,\n\t\t},\n\t\tbpf.JumpIf{\n\t\t\tCond:     bpf.JumpEqual,\n\t\t\tVal:      1,\n\t\t\tSkipTrue: 1,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 0,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 9,\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t1,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 1, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMJumpIfNotEqual(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 1,\n\t\t},\n\t\tbpf.JumpIf{\n\t\t\tCond:      bpf.JumpNotEqual,\n\t\t\tVal:       1,\n\t\t\tSkipFalse: 1,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 0,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 9,\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t1,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 1, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMJumpIfGreaterThan(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 4,\n\t\t},\n\t\tbpf.JumpIf{\n\t\t\tCond:     bpf.JumpGreaterThan,\n\t\t\tVal:      0x00010202,\n\t\t\tSkipTrue: 1,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 0,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 12,\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0, 1, 2, 3,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 4, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMJumpIfLessThan(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 4,\n\t\t},\n\t\tbpf.JumpIf{\n\t\t\tCond:     bpf.JumpLessThan,\n\t\t\tVal:      0xff010203,\n\t\t\tSkipTrue: 1,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 0,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 12,\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0, 1, 2, 3,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 4, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMJumpIfGreaterOrEqual(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 4,\n\t\t},\n\t\tbpf.JumpIf{\n\t\t\tCond:     bpf.JumpGreaterOrEqual,\n\t\t\tVal:      0x00010203,\n\t\t\tSkipTrue: 1,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 0,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 12,\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0, 1, 2, 3,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 4, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMJumpIfLessOrEqual(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 4,\n\t\t},\n\t\tbpf.JumpIf{\n\t\t\tCond:     bpf.JumpLessOrEqual,\n\t\t\tVal:      0xff010203,\n\t\t\tSkipTrue: 1,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 0,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 12,\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0, 1, 2, 3,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 4, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMJumpIfBitsSet(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 2,\n\t\t},\n\t\tbpf.JumpIf{\n\t\t\tCond:     bpf.JumpBitsSet,\n\t\t\tVal:      0x1122,\n\t\t\tSkipTrue: 1,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 0,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 10,\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0x01, 0x02,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 2, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMJumpIfBitsNotSet(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 2,\n\t\t},\n\t\tbpf.JumpIf{\n\t\t\tCond:     bpf.JumpBitsNotSet,\n\t\t\tVal:      0x1221,\n\t\t\tSkipTrue: 1,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 0,\n\t\t},\n\t\tbpf.RetConstant{\n\t\t\tVal: 10,\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0x01, 0x02,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 2, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/bpf/vm_load_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage bpf_test\n\nimport (\n\t\"net\"\n\t\"testing\"\n\n\t\"golang.org/x/net/bpf\"\n\t\"golang.org/x/net/ipv4\"\n)\n\nfunc TestVMLoadAbsoluteOffsetOutOfBounds(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  100,\n\t\t\tSize: 2,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0, 1, 2, 3,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 0, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMLoadAbsoluteOffsetPlusSizeOutOfBounds(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 2,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 0, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMLoadAbsoluteBadInstructionSize(t *testing.T) {\n\t_, _, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tSize: 5,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif errStr(err) != \"assembling instruction 1: invalid load byte length 0\" {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}\n\nfunc TestVMLoadConstantOK(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadConstant{\n\t\t\tDst: bpf.RegX,\n\t\t\tVal: 9,\n\t\t},\n\t\tbpf.TXA{},\n\t\tbpf.RetA{},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 1, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMLoadIndirectOutOfBounds(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadIndirect{\n\t\t\tOff:  100,\n\t\t\tSize: 1,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 0, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMLoadMemShiftOutOfBounds(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadMemShift{\n\t\t\tOff: 100,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 0, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nconst (\n\tdhcp4Port = 53\n)\n\nfunc TestVMLoadMemShiftLoadIndirectNoResult(t *testing.T) {\n\tvm, in, done := testDHCPv4(t)\n\tdefer done()\n\n\t// Append mostly empty UDP header with incorrect DHCPv4 port\n\tin = append(in, []byte{\n\t\t0, 0,\n\t\t0, dhcp4Port + 1,\n\t\t0, 0,\n\t\t0, 0,\n\t}...)\n\n\tout, err := vm.Run(in)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 0, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMLoadMemShiftLoadIndirectOK(t *testing.T) {\n\tvm, in, done := testDHCPv4(t)\n\tdefer done()\n\n\t// Append mostly empty UDP header with correct DHCPv4 port\n\tin = append(in, []byte{\n\t\t0, 0,\n\t\t0, dhcp4Port,\n\t\t0, 0,\n\t\t0, 0,\n\t}...)\n\n\tout, err := vm.Run(in)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := len(in)-8, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc testDHCPv4(t *testing.T) (virtualMachine, []byte, func()) {\n\t// DHCPv4 test data courtesy of David Anderson:\n\t// https://github.com/google/netboot/blob/master/dhcp4/conn_linux.go#L59-L70\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\t// Load IPv4 packet length\n\t\tbpf.LoadMemShift{Off: 8},\n\t\t// Get UDP dport\n\t\tbpf.LoadIndirect{Off: 8 + 2, Size: 2},\n\t\t// Correct dport?\n\t\tbpf.JumpIf{Cond: bpf.JumpEqual, Val: dhcp4Port, SkipFalse: 1},\n\t\t// Accept\n\t\tbpf.RetConstant{Val: 1500},\n\t\t// Ignore\n\t\tbpf.RetConstant{Val: 0},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\n\t// Minimal requirements to make a valid IPv4 header\n\th := &ipv4.Header{\n\t\tLen: ipv4.HeaderLen,\n\t\tSrc: net.IPv4(192, 168, 1, 1),\n\t\tDst: net.IPv4(192, 168, 1, 2),\n\t}\n\thb, err := h.Marshal()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to marshal IPv4 header: %v\", err)\n\t}\n\n\thb = append([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t}, hb...)\n\n\treturn vm, hb, done\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/bpf/vm_ret_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage bpf_test\n\nimport (\n\t\"testing\"\n\n\t\"golang.org/x/net/bpf\"\n)\n\nfunc TestVMRetA(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 1,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t9,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 1, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMRetALargerThanInput(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 2,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0, 255,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 2, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMRetConstant(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.RetConstant{\n\t\t\tVal: 9,\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0, 1,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 1, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMRetConstantLargerThanInput(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.RetConstant{\n\t\t\tVal: 16,\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0, 1,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 2, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/bpf/vm_scratch_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage bpf_test\n\nimport (\n\t\"testing\"\n\n\t\"golang.org/x/net/bpf\"\n)\n\nfunc TestVMStoreScratchInvalidScratchRegisterTooSmall(t *testing.T) {\n\t_, _, err := testVM(t, []bpf.Instruction{\n\t\tbpf.StoreScratch{\n\t\t\tSrc: bpf.RegA,\n\t\t\tN:   -1,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif errStr(err) != \"assembling instruction 1: invalid scratch slot -1\" {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}\n\nfunc TestVMStoreScratchInvalidScratchRegisterTooLarge(t *testing.T) {\n\t_, _, err := testVM(t, []bpf.Instruction{\n\t\tbpf.StoreScratch{\n\t\t\tSrc: bpf.RegA,\n\t\t\tN:   16,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif errStr(err) != \"assembling instruction 1: invalid scratch slot 16\" {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}\n\nfunc TestVMStoreScratchUnknownSourceRegister(t *testing.T) {\n\t_, _, err := testVM(t, []bpf.Instruction{\n\t\tbpf.StoreScratch{\n\t\t\tSrc: 100,\n\t\t\tN:   0,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif errStr(err) != \"assembling instruction 1: invalid source register 100\" {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}\n\nfunc TestVMLoadScratchInvalidScratchRegisterTooSmall(t *testing.T) {\n\t_, _, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadScratch{\n\t\t\tDst: bpf.RegX,\n\t\t\tN:   -1,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif errStr(err) != \"assembling instruction 1: invalid scratch slot -1\" {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}\n\nfunc TestVMLoadScratchInvalidScratchRegisterTooLarge(t *testing.T) {\n\t_, _, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadScratch{\n\t\t\tDst: bpf.RegX,\n\t\t\tN:   16,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif errStr(err) != \"assembling instruction 1: invalid scratch slot 16\" {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}\n\nfunc TestVMLoadScratchUnknownDestinationRegister(t *testing.T) {\n\t_, _, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadScratch{\n\t\t\tDst: 100,\n\t\t\tN:   0,\n\t\t},\n\t\tbpf.RetA{},\n\t})\n\tif errStr(err) != \"assembling instruction 1: invalid target register 100\" {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}\n\nfunc TestVMStoreScratchLoadScratchOneValue(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\t// Load byte 255\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 1,\n\t\t},\n\t\t// Copy to X and store in scratch[0]\n\t\tbpf.TAX{},\n\t\tbpf.StoreScratch{\n\t\t\tSrc: bpf.RegX,\n\t\t\tN:   0,\n\t\t},\n\t\t// Load byte 1\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  9,\n\t\t\tSize: 1,\n\t\t},\n\t\t// Overwrite 1 with 255 from scratch[0]\n\t\tbpf.LoadScratch{\n\t\t\tDst: bpf.RegA,\n\t\t\tN:   0,\n\t\t},\n\t\t// Return 255\n\t\tbpf.RetA{},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t255, 1, 2,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 3, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n\nfunc TestVMStoreScratchLoadScratchMultipleValues(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\t// Load byte 10\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  8,\n\t\t\tSize: 1,\n\t\t},\n\t\t// Store in scratch[0]\n\t\tbpf.StoreScratch{\n\t\t\tSrc: bpf.RegA,\n\t\t\tN:   0,\n\t\t},\n\t\t// Load byte 20\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  9,\n\t\t\tSize: 1,\n\t\t},\n\t\t// Store in scratch[1]\n\t\tbpf.StoreScratch{\n\t\t\tSrc: bpf.RegA,\n\t\t\tN:   1,\n\t\t},\n\t\t// Load byte 30\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  10,\n\t\t\tSize: 1,\n\t\t},\n\t\t// Store in scratch[2]\n\t\tbpf.StoreScratch{\n\t\t\tSrc: bpf.RegA,\n\t\t\tN:   2,\n\t\t},\n\t\t// Load byte 1\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  11,\n\t\t\tSize: 1,\n\t\t},\n\t\t// Store in scratch[3]\n\t\tbpf.StoreScratch{\n\t\t\tSrc: bpf.RegA,\n\t\t\tN:   3,\n\t\t},\n\t\t// Load in byte 10 to X\n\t\tbpf.LoadScratch{\n\t\t\tDst: bpf.RegX,\n\t\t\tN:   0,\n\t\t},\n\t\t// Copy X -> A\n\t\tbpf.TXA{},\n\t\t// Verify value is 10\n\t\tbpf.JumpIf{\n\t\t\tCond:     bpf.JumpEqual,\n\t\t\tVal:      10,\n\t\t\tSkipTrue: 1,\n\t\t},\n\t\t// Fail test if incorrect\n\t\tbpf.RetConstant{\n\t\t\tVal: 0,\n\t\t},\n\t\t// Load in byte 20 to A\n\t\tbpf.LoadScratch{\n\t\t\tDst: bpf.RegA,\n\t\t\tN:   1,\n\t\t},\n\t\t// Verify value is 20\n\t\tbpf.JumpIf{\n\t\t\tCond:     bpf.JumpEqual,\n\t\t\tVal:      20,\n\t\t\tSkipTrue: 1,\n\t\t},\n\t\t// Fail test if incorrect\n\t\tbpf.RetConstant{\n\t\t\tVal: 0,\n\t\t},\n\t\t// Load in byte 30 to A\n\t\tbpf.LoadScratch{\n\t\t\tDst: bpf.RegA,\n\t\t\tN:   2,\n\t\t},\n\t\t// Verify value is 30\n\t\tbpf.JumpIf{\n\t\t\tCond:     bpf.JumpEqual,\n\t\t\tVal:      30,\n\t\t\tSkipTrue: 1,\n\t\t},\n\t\t// Fail test if incorrect\n\t\tbpf.RetConstant{\n\t\t\tVal: 0,\n\t\t},\n\t\t// Return first two bytes on success\n\t\tbpf.RetConstant{\n\t\t\tVal: 10,\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to load BPF program: %v\", err)\n\t}\n\tdefer done()\n\n\tout, err := vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t10, 20, 30, 1,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n\tif want, got := 2, out; want != got {\n\t\tt.Fatalf(\"unexpected number of output bytes:\\n- want: %d\\n-  got: %d\",\n\t\t\twant, got)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/bpf/vm_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage bpf_test\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"golang.org/x/net/bpf\"\n)\n\nvar _ bpf.Instruction = unknown{}\n\ntype unknown struct{}\n\nfunc (unknown) Assemble() (bpf.RawInstruction, error) {\n\treturn bpf.RawInstruction{}, nil\n}\n\nfunc TestVMUnknownInstruction(t *testing.T) {\n\tvm, done, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadConstant{\n\t\t\tDst: bpf.RegA,\n\t\t\tVal: 100,\n\t\t},\n\t\t// Should terminate the program with an error immediately\n\t\tunknown{},\n\t\tbpf.RetA{},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tdefer done()\n\n\t_, err = vm.Run([]byte{\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0xff, 0xff, 0xff, 0xff,\n\t\t0x00, 0x00,\n\t})\n\tif errStr(err) != \"unknown Instruction at index 1: bpf_test.unknown\" {\n\t\tt.Fatalf(\"unexpected error while running program: %v\", err)\n\t}\n}\n\nfunc TestVMNoReturnInstruction(t *testing.T) {\n\t_, _, err := testVM(t, []bpf.Instruction{\n\t\tbpf.LoadConstant{\n\t\t\tDst: bpf.RegA,\n\t\t\tVal: 1,\n\t\t},\n\t})\n\tif errStr(err) != \"BPF program must end with RetA or RetConstant\" {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}\n\nfunc TestVMNoInputInstructions(t *testing.T) {\n\t_, _, err := testVM(t, []bpf.Instruction{})\n\tif errStr(err) != \"one or more Instructions must be specified\" {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}\n\n// ExampleNewVM demonstrates usage of a VM, using an Ethernet frame\n// as input and checking its EtherType to determine if it should be accepted.\nfunc ExampleNewVM() {\n\t// Offset | Length | Comment\n\t// -------------------------\n\t//   00   |   06   | Ethernet destination MAC address\n\t//   06   |   06   | Ethernet source MAC address\n\t//   12   |   02   | Ethernet EtherType\n\tconst (\n\t\tetOff = 12\n\t\tetLen = 2\n\n\t\tetARP = 0x0806\n\t)\n\n\t// Set up a VM to filter traffic based on if its EtherType\n\t// matches the ARP EtherType.\n\tvm, err := bpf.NewVM([]bpf.Instruction{\n\t\t// Load EtherType value from Ethernet header\n\t\tbpf.LoadAbsolute{\n\t\t\tOff:  etOff,\n\t\t\tSize: etLen,\n\t\t},\n\t\t// If EtherType is equal to the ARP EtherType, jump to allow\n\t\t// packet to be accepted\n\t\tbpf.JumpIf{\n\t\t\tCond:     bpf.JumpEqual,\n\t\t\tVal:      etARP,\n\t\t\tSkipTrue: 1,\n\t\t},\n\t\t// EtherType does not match the ARP EtherType\n\t\tbpf.RetConstant{\n\t\t\tVal: 0,\n\t\t},\n\t\t// EtherType matches the ARP EtherType, accept up to 1500\n\t\t// bytes of packet\n\t\tbpf.RetConstant{\n\t\t\tVal: 1500,\n\t\t},\n\t})\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to load BPF program: %v\", err))\n\t}\n\n\t// Create an Ethernet frame with the ARP EtherType for testing\n\tframe := []byte{\n\t\t0xff, 0xff, 0xff, 0xff, 0xff, 0xff,\n\t\t0x00, 0x11, 0x22, 0x33, 0x44, 0x55,\n\t\t0x08, 0x06,\n\t\t// Payload omitted for brevity\n\t}\n\n\t// Run our VM's BPF program using the Ethernet frame as input\n\tout, err := vm.Run(frame)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to accept Ethernet frame: %v\", err))\n\t}\n\n\t// BPF VM can return a byte count greater than the number of input\n\t// bytes, so trim the output to match the input byte length\n\tif out > len(frame) {\n\t\tout = len(frame)\n\t}\n\n\tfmt.Printf(\"out: %d bytes\", out)\n\n\t// Output:\n\t// out: 14 bytes\n}\n\n// errStr returns the string representation of an error, or\n// \"<nil>\" if it is nil.\nfunc errStr(err error) string {\n\tif err == nil {\n\t\treturn \"<nil>\"\n\t}\n\n\treturn err.Error()\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/codereview.cfg",
    "content": "issuerepo: golang/go\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/context.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package context defines the Context type, which carries deadlines,\n// cancelation signals, and other request-scoped values across API boundaries\n// and between processes.\n// As of Go 1.7 this package is available in the standard library under the\n// name context.  https://golang.org/pkg/context.\n//\n// Incoming requests to a server should create a Context, and outgoing calls to\n// servers should accept a Context. The chain of function calls between must\n// propagate the Context, optionally replacing it with a modified copy created\n// using WithDeadline, WithTimeout, WithCancel, or WithValue.\n//\n// Programs that use Contexts should follow these rules to keep interfaces\n// consistent across packages and enable static analysis tools to check context\n// propagation:\n//\n// Do not store Contexts inside a struct type; instead, pass a Context\n// explicitly to each function that needs it. The Context should be the first\n// parameter, typically named ctx:\n//\n// \tfunc DoSomething(ctx context.Context, arg Arg) error {\n// \t\t// ... use ctx ...\n// \t}\n//\n// Do not pass a nil Context, even if a function permits it. Pass context.TODO\n// if you are unsure about which Context to use.\n//\n// Use context Values only for request-scoped data that transits processes and\n// APIs, not for passing optional parameters to functions.\n//\n// The same Context may be passed to functions running in different goroutines;\n// Contexts are safe for simultaneous use by multiple goroutines.\n//\n// See http://blog.golang.org/context for example code for a server that uses\n// Contexts.\npackage context // import \"golang.org/x/net/context\"\n\n// Background returns a non-nil, empty Context. It is never canceled, has no\n// values, and has no deadline. It is typically used by the main function,\n// initialization, and tests, and as the top-level Context for incoming\n// requests.\nfunc Background() Context {\n\treturn background\n}\n\n// TODO returns a non-nil, empty Context. Code should use context.TODO when\n// it's unclear which Context to use or it is not yet available (because the\n// surrounding function has not yet been extended to accept a Context\n// parameter).  TODO is recognized by static analysis tools that determine\n// whether Contexts are propagated correctly in a program.\nfunc TODO() Context {\n\treturn todo\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/context_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !go1.7\n\npackage context\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\n// otherContext is a Context that's not one of the types defined in context.go.\n// This lets us test code paths that differ based on the underlying type of the\n// Context.\ntype otherContext struct {\n\tContext\n}\n\nfunc TestBackground(t *testing.T) {\n\tc := Background()\n\tif c == nil {\n\t\tt.Fatalf(\"Background returned nil\")\n\t}\n\tselect {\n\tcase x := <-c.Done():\n\t\tt.Errorf(\"<-c.Done() == %v want nothing (it should block)\", x)\n\tdefault:\n\t}\n\tif got, want := fmt.Sprint(c), \"context.Background\"; got != want {\n\t\tt.Errorf(\"Background().String() = %q want %q\", got, want)\n\t}\n}\n\nfunc TestTODO(t *testing.T) {\n\tc := TODO()\n\tif c == nil {\n\t\tt.Fatalf(\"TODO returned nil\")\n\t}\n\tselect {\n\tcase x := <-c.Done():\n\t\tt.Errorf(\"<-c.Done() == %v want nothing (it should block)\", x)\n\tdefault:\n\t}\n\tif got, want := fmt.Sprint(c), \"context.TODO\"; got != want {\n\t\tt.Errorf(\"TODO().String() = %q want %q\", got, want)\n\t}\n}\n\nfunc TestWithCancel(t *testing.T) {\n\tc1, cancel := WithCancel(Background())\n\n\tif got, want := fmt.Sprint(c1), \"context.Background.WithCancel\"; got != want {\n\t\tt.Errorf(\"c1.String() = %q want %q\", got, want)\n\t}\n\n\to := otherContext{c1}\n\tc2, _ := WithCancel(o)\n\tcontexts := []Context{c1, o, c2}\n\n\tfor i, c := range contexts {\n\t\tif d := c.Done(); d == nil {\n\t\t\tt.Errorf(\"c[%d].Done() == %v want non-nil\", i, d)\n\t\t}\n\t\tif e := c.Err(); e != nil {\n\t\t\tt.Errorf(\"c[%d].Err() == %v want nil\", i, e)\n\t\t}\n\n\t\tselect {\n\t\tcase x := <-c.Done():\n\t\t\tt.Errorf(\"<-c.Done() == %v want nothing (it should block)\", x)\n\t\tdefault:\n\t\t}\n\t}\n\n\tcancel()\n\ttime.Sleep(100 * time.Millisecond) // let cancelation propagate\n\n\tfor i, c := range contexts {\n\t\tselect {\n\t\tcase <-c.Done():\n\t\tdefault:\n\t\t\tt.Errorf(\"<-c[%d].Done() blocked, but shouldn't have\", i)\n\t\t}\n\t\tif e := c.Err(); e != Canceled {\n\t\t\tt.Errorf(\"c[%d].Err() == %v want %v\", i, e, Canceled)\n\t\t}\n\t}\n}\n\nfunc TestParentFinishesChild(t *testing.T) {\n\t// Context tree:\n\t// parent -> cancelChild\n\t// parent -> valueChild -> timerChild\n\tparent, cancel := WithCancel(Background())\n\tcancelChild, stop := WithCancel(parent)\n\tdefer stop()\n\tvalueChild := WithValue(parent, \"key\", \"value\")\n\ttimerChild, stop := WithTimeout(valueChild, 10000*time.Hour)\n\tdefer stop()\n\n\tselect {\n\tcase x := <-parent.Done():\n\t\tt.Errorf(\"<-parent.Done() == %v want nothing (it should block)\", x)\n\tcase x := <-cancelChild.Done():\n\t\tt.Errorf(\"<-cancelChild.Done() == %v want nothing (it should block)\", x)\n\tcase x := <-timerChild.Done():\n\t\tt.Errorf(\"<-timerChild.Done() == %v want nothing (it should block)\", x)\n\tcase x := <-valueChild.Done():\n\t\tt.Errorf(\"<-valueChild.Done() == %v want nothing (it should block)\", x)\n\tdefault:\n\t}\n\n\t// The parent's children should contain the two cancelable children.\n\tpc := parent.(*cancelCtx)\n\tcc := cancelChild.(*cancelCtx)\n\ttc := timerChild.(*timerCtx)\n\tpc.mu.Lock()\n\tif len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] {\n\t\tt.Errorf(\"bad linkage: pc.children = %v, want %v and %v\",\n\t\t\tpc.children, cc, tc)\n\t}\n\tpc.mu.Unlock()\n\n\tif p, ok := parentCancelCtx(cc.Context); !ok || p != pc {\n\t\tt.Errorf(\"bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true\", p, ok, pc)\n\t}\n\tif p, ok := parentCancelCtx(tc.Context); !ok || p != pc {\n\t\tt.Errorf(\"bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true\", p, ok, pc)\n\t}\n\n\tcancel()\n\n\tpc.mu.Lock()\n\tif len(pc.children) != 0 {\n\t\tt.Errorf(\"pc.cancel didn't clear pc.children = %v\", pc.children)\n\t}\n\tpc.mu.Unlock()\n\n\t// parent and children should all be finished.\n\tcheck := func(ctx Context, name string) {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\tdefault:\n\t\t\tt.Errorf(\"<-%s.Done() blocked, but shouldn't have\", name)\n\t\t}\n\t\tif e := ctx.Err(); e != Canceled {\n\t\t\tt.Errorf(\"%s.Err() == %v want %v\", name, e, Canceled)\n\t\t}\n\t}\n\tcheck(parent, \"parent\")\n\tcheck(cancelChild, \"cancelChild\")\n\tcheck(valueChild, \"valueChild\")\n\tcheck(timerChild, \"timerChild\")\n\n\t// WithCancel should return a canceled context on a canceled parent.\n\tprecanceledChild := WithValue(parent, \"key\", \"value\")\n\tselect {\n\tcase <-precanceledChild.Done():\n\tdefault:\n\t\tt.Errorf(\"<-precanceledChild.Done() blocked, but shouldn't have\")\n\t}\n\tif e := precanceledChild.Err(); e != Canceled {\n\t\tt.Errorf(\"precanceledChild.Err() == %v want %v\", e, Canceled)\n\t}\n}\n\nfunc TestChildFinishesFirst(t *testing.T) {\n\tcancelable, stop := WithCancel(Background())\n\tdefer stop()\n\tfor _, parent := range []Context{Background(), cancelable} {\n\t\tchild, cancel := WithCancel(parent)\n\n\t\tselect {\n\t\tcase x := <-parent.Done():\n\t\t\tt.Errorf(\"<-parent.Done() == %v want nothing (it should block)\", x)\n\t\tcase x := <-child.Done():\n\t\t\tt.Errorf(\"<-child.Done() == %v want nothing (it should block)\", x)\n\t\tdefault:\n\t\t}\n\n\t\tcc := child.(*cancelCtx)\n\t\tpc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background()\n\t\tif p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) {\n\t\t\tt.Errorf(\"bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v\", p, ok, pc, pcok)\n\t\t}\n\n\t\tif pcok {\n\t\t\tpc.mu.Lock()\n\t\t\tif len(pc.children) != 1 || !pc.children[cc] {\n\t\t\t\tt.Errorf(\"bad linkage: pc.children = %v, cc = %v\", pc.children, cc)\n\t\t\t}\n\t\t\tpc.mu.Unlock()\n\t\t}\n\n\t\tcancel()\n\n\t\tif pcok {\n\t\t\tpc.mu.Lock()\n\t\t\tif len(pc.children) != 0 {\n\t\t\t\tt.Errorf(\"child's cancel didn't remove self from pc.children = %v\", pc.children)\n\t\t\t}\n\t\t\tpc.mu.Unlock()\n\t\t}\n\n\t\t// child should be finished.\n\t\tselect {\n\t\tcase <-child.Done():\n\t\tdefault:\n\t\t\tt.Errorf(\"<-child.Done() blocked, but shouldn't have\")\n\t\t}\n\t\tif e := child.Err(); e != Canceled {\n\t\t\tt.Errorf(\"child.Err() == %v want %v\", e, Canceled)\n\t\t}\n\n\t\t// parent should not be finished.\n\t\tselect {\n\t\tcase x := <-parent.Done():\n\t\t\tt.Errorf(\"<-parent.Done() == %v want nothing (it should block)\", x)\n\t\tdefault:\n\t\t}\n\t\tif e := parent.Err(); e != nil {\n\t\t\tt.Errorf(\"parent.Err() == %v want nil\", e)\n\t\t}\n\t}\n}\n\nfunc testDeadline(c Context, wait time.Duration, t *testing.T) {\n\tselect {\n\tcase <-time.After(wait):\n\t\tt.Fatalf(\"context should have timed out\")\n\tcase <-c.Done():\n\t}\n\tif e := c.Err(); e != DeadlineExceeded {\n\t\tt.Errorf(\"c.Err() == %v want %v\", e, DeadlineExceeded)\n\t}\n}\n\nfunc TestDeadline(t *testing.T) {\n\tt.Parallel()\n\tconst timeUnit = 500 * time.Millisecond\n\tc, _ := WithDeadline(Background(), time.Now().Add(1*timeUnit))\n\tif got, prefix := fmt.Sprint(c), \"context.Background.WithDeadline(\"; !strings.HasPrefix(got, prefix) {\n\t\tt.Errorf(\"c.String() = %q want prefix %q\", got, prefix)\n\t}\n\ttestDeadline(c, 2*timeUnit, t)\n\n\tc, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit))\n\to := otherContext{c}\n\ttestDeadline(o, 2*timeUnit, t)\n\n\tc, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit))\n\to = otherContext{c}\n\tc, _ = WithDeadline(o, time.Now().Add(3*timeUnit))\n\ttestDeadline(c, 2*timeUnit, t)\n}\n\nfunc TestTimeout(t *testing.T) {\n\tt.Parallel()\n\tconst timeUnit = 500 * time.Millisecond\n\tc, _ := WithTimeout(Background(), 1*timeUnit)\n\tif got, prefix := fmt.Sprint(c), \"context.Background.WithDeadline(\"; !strings.HasPrefix(got, prefix) {\n\t\tt.Errorf(\"c.String() = %q want prefix %q\", got, prefix)\n\t}\n\ttestDeadline(c, 2*timeUnit, t)\n\n\tc, _ = WithTimeout(Background(), 1*timeUnit)\n\to := otherContext{c}\n\ttestDeadline(o, 2*timeUnit, t)\n\n\tc, _ = WithTimeout(Background(), 1*timeUnit)\n\to = otherContext{c}\n\tc, _ = WithTimeout(o, 3*timeUnit)\n\ttestDeadline(c, 2*timeUnit, t)\n}\n\nfunc TestCanceledTimeout(t *testing.T) {\n\tt.Parallel()\n\tconst timeUnit = 500 * time.Millisecond\n\tc, _ := WithTimeout(Background(), 2*timeUnit)\n\to := otherContext{c}\n\tc, cancel := WithTimeout(o, 4*timeUnit)\n\tcancel()\n\ttime.Sleep(1 * timeUnit) // let cancelation propagate\n\tselect {\n\tcase <-c.Done():\n\tdefault:\n\t\tt.Errorf(\"<-c.Done() blocked, but shouldn't have\")\n\t}\n\tif e := c.Err(); e != Canceled {\n\t\tt.Errorf(\"c.Err() == %v want %v\", e, Canceled)\n\t}\n}\n\ntype key1 int\ntype key2 int\n\nvar k1 = key1(1)\nvar k2 = key2(1) // same int as k1, different type\nvar k3 = key2(3) // same type as k2, different int\n\nfunc TestValues(t *testing.T) {\n\tcheck := func(c Context, nm, v1, v2, v3 string) {\n\t\tif v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 {\n\t\t\tt.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0)\n\t\t}\n\t\tif v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 {\n\t\t\tt.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0)\n\t\t}\n\t\tif v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 {\n\t\t\tt.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0)\n\t\t}\n\t}\n\n\tc0 := Background()\n\tcheck(c0, \"c0\", \"\", \"\", \"\")\n\n\tc1 := WithValue(Background(), k1, \"c1k1\")\n\tcheck(c1, \"c1\", \"c1k1\", \"\", \"\")\n\n\tif got, want := fmt.Sprint(c1), `context.Background.WithValue(1, \"c1k1\")`; got != want {\n\t\tt.Errorf(\"c.String() = %q want %q\", got, want)\n\t}\n\n\tc2 := WithValue(c1, k2, \"c2k2\")\n\tcheck(c2, \"c2\", \"c1k1\", \"c2k2\", \"\")\n\n\tc3 := WithValue(c2, k3, \"c3k3\")\n\tcheck(c3, \"c2\", \"c1k1\", \"c2k2\", \"c3k3\")\n\n\tc4 := WithValue(c3, k1, nil)\n\tcheck(c4, \"c4\", \"\", \"c2k2\", \"c3k3\")\n\n\to0 := otherContext{Background()}\n\tcheck(o0, \"o0\", \"\", \"\", \"\")\n\n\to1 := otherContext{WithValue(Background(), k1, \"c1k1\")}\n\tcheck(o1, \"o1\", \"c1k1\", \"\", \"\")\n\n\to2 := WithValue(o1, k2, \"o2k2\")\n\tcheck(o2, \"o2\", \"c1k1\", \"o2k2\", \"\")\n\n\to3 := otherContext{c4}\n\tcheck(o3, \"o3\", \"\", \"c2k2\", \"c3k3\")\n\n\to4 := WithValue(o3, k3, nil)\n\tcheck(o4, \"o4\", \"\", \"c2k2\", \"\")\n}\n\nfunc TestAllocs(t *testing.T) {\n\tbg := Background()\n\tfor _, test := range []struct {\n\t\tdesc       string\n\t\tf          func()\n\t\tlimit      float64\n\t\tgccgoLimit float64\n\t}{\n\t\t{\n\t\t\tdesc:       \"Background()\",\n\t\t\tf:          func() { Background() },\n\t\t\tlimit:      0,\n\t\t\tgccgoLimit: 0,\n\t\t},\n\t\t{\n\t\t\tdesc: fmt.Sprintf(\"WithValue(bg, %v, nil)\", k1),\n\t\t\tf: func() {\n\t\t\t\tc := WithValue(bg, k1, nil)\n\t\t\t\tc.Value(k1)\n\t\t\t},\n\t\t\tlimit:      3,\n\t\t\tgccgoLimit: 3,\n\t\t},\n\t\t{\n\t\t\tdesc: \"WithTimeout(bg, 15*time.Millisecond)\",\n\t\t\tf: func() {\n\t\t\t\tc, _ := WithTimeout(bg, 15*time.Millisecond)\n\t\t\t\t<-c.Done()\n\t\t\t},\n\t\t\tlimit:      8,\n\t\t\tgccgoLimit: 16,\n\t\t},\n\t\t{\n\t\t\tdesc: \"WithCancel(bg)\",\n\t\t\tf: func() {\n\t\t\t\tc, cancel := WithCancel(bg)\n\t\t\t\tcancel()\n\t\t\t\t<-c.Done()\n\t\t\t},\n\t\t\tlimit:      5,\n\t\t\tgccgoLimit: 8,\n\t\t},\n\t\t{\n\t\t\tdesc: \"WithTimeout(bg, 100*time.Millisecond)\",\n\t\t\tf: func() {\n\t\t\t\tc, cancel := WithTimeout(bg, 100*time.Millisecond)\n\t\t\t\tcancel()\n\t\t\t\t<-c.Done()\n\t\t\t},\n\t\t\tlimit:      8,\n\t\t\tgccgoLimit: 25,\n\t\t},\n\t} {\n\t\tlimit := test.limit\n\t\tif runtime.Compiler == \"gccgo\" {\n\t\t\t// gccgo does not yet do escape analysis.\n\t\t\t// TODO(iant): Remove this when gccgo does do escape analysis.\n\t\t\tlimit = test.gccgoLimit\n\t\t}\n\t\tif n := testing.AllocsPerRun(100, test.f); n > limit {\n\t\t\tt.Errorf(\"%s allocs = %f want %d\", test.desc, n, int(limit))\n\t\t}\n\t}\n}\n\nfunc TestSimultaneousCancels(t *testing.T) {\n\troot, cancel := WithCancel(Background())\n\tm := map[Context]CancelFunc{root: cancel}\n\tq := []Context{root}\n\t// Create a tree of contexts.\n\tfor len(q) != 0 && len(m) < 100 {\n\t\tparent := q[0]\n\t\tq = q[1:]\n\t\tfor i := 0; i < 4; i++ {\n\t\t\tctx, cancel := WithCancel(parent)\n\t\t\tm[ctx] = cancel\n\t\t\tq = append(q, ctx)\n\t\t}\n\t}\n\t// Start all the cancels in a random order.\n\tvar wg sync.WaitGroup\n\twg.Add(len(m))\n\tfor _, cancel := range m {\n\t\tgo func(cancel CancelFunc) {\n\t\t\tcancel()\n\t\t\twg.Done()\n\t\t}(cancel)\n\t}\n\t// Wait on all the contexts in a random order.\n\tfor ctx := range m {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\tcase <-time.After(1 * time.Second):\n\t\t\tbuf := make([]byte, 10<<10)\n\t\t\tn := runtime.Stack(buf, true)\n\t\t\tt.Fatalf(\"timed out waiting for <-ctx.Done(); stacks:\\n%s\", buf[:n])\n\t\t}\n\t}\n\t// Wait for all the cancel functions to return.\n\tdone := make(chan struct{})\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(done)\n\t}()\n\tselect {\n\tcase <-done:\n\tcase <-time.After(1 * time.Second):\n\t\tbuf := make([]byte, 10<<10)\n\t\tn := runtime.Stack(buf, true)\n\t\tt.Fatalf(\"timed out waiting for cancel functions; stacks:\\n%s\", buf[:n])\n\t}\n}\n\nfunc TestInterlockedCancels(t *testing.T) {\n\tparent, cancelParent := WithCancel(Background())\n\tchild, cancelChild := WithCancel(parent)\n\tgo func() {\n\t\tparent.Done()\n\t\tcancelChild()\n\t}()\n\tcancelParent()\n\tselect {\n\tcase <-child.Done():\n\tcase <-time.After(1 * time.Second):\n\t\tbuf := make([]byte, 10<<10)\n\t\tn := runtime.Stack(buf, true)\n\t\tt.Fatalf(\"timed out waiting for child.Done(); stacks:\\n%s\", buf[:n])\n\t}\n}\n\nfunc TestLayersCancel(t *testing.T) {\n\ttestLayers(t, time.Now().UnixNano(), false)\n}\n\nfunc TestLayersTimeout(t *testing.T) {\n\ttestLayers(t, time.Now().UnixNano(), true)\n}\n\nfunc testLayers(t *testing.T, seed int64, testTimeout bool) {\n\trand.Seed(seed)\n\terrorf := func(format string, a ...interface{}) {\n\t\tt.Errorf(fmt.Sprintf(\"seed=%d: %s\", seed, format), a...)\n\t}\n\tconst (\n\t\ttimeout   = 200 * time.Millisecond\n\t\tminLayers = 30\n\t)\n\ttype value int\n\tvar (\n\t\tvals      []*value\n\t\tcancels   []CancelFunc\n\t\tnumTimers int\n\t\tctx       = Background()\n\t)\n\tfor i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ {\n\t\tswitch rand.Intn(3) {\n\t\tcase 0:\n\t\t\tv := new(value)\n\t\t\tctx = WithValue(ctx, v, v)\n\t\t\tvals = append(vals, v)\n\t\tcase 1:\n\t\t\tvar cancel CancelFunc\n\t\t\tctx, cancel = WithCancel(ctx)\n\t\t\tcancels = append(cancels, cancel)\n\t\tcase 2:\n\t\t\tvar cancel CancelFunc\n\t\t\tctx, cancel = WithTimeout(ctx, timeout)\n\t\t\tcancels = append(cancels, cancel)\n\t\t\tnumTimers++\n\t\t}\n\t}\n\tcheckValues := func(when string) {\n\t\tfor _, key := range vals {\n\t\t\tif val := ctx.Value(key).(*value); key != val {\n\t\t\t\terrorf(\"%s: ctx.Value(%p) = %p want %p\", when, key, val, key)\n\t\t\t}\n\t\t}\n\t}\n\tselect {\n\tcase <-ctx.Done():\n\t\terrorf(\"ctx should not be canceled yet\")\n\tdefault:\n\t}\n\tif s, prefix := fmt.Sprint(ctx), \"context.Background.\"; !strings.HasPrefix(s, prefix) {\n\t\tt.Errorf(\"ctx.String() = %q want prefix %q\", s, prefix)\n\t}\n\tt.Log(ctx)\n\tcheckValues(\"before cancel\")\n\tif testTimeout {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\tcase <-time.After(timeout + 100*time.Millisecond):\n\t\t\terrorf(\"ctx should have timed out\")\n\t\t}\n\t\tcheckValues(\"after timeout\")\n\t} else {\n\t\tcancel := cancels[rand.Intn(len(cancels))]\n\t\tcancel()\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\tdefault:\n\t\t\terrorf(\"ctx should be canceled\")\n\t\t}\n\t\tcheckValues(\"after cancel\")\n\t}\n}\n\nfunc TestCancelRemoves(t *testing.T) {\n\tcheckChildren := func(when string, ctx Context, want int) {\n\t\tif got := len(ctx.(*cancelCtx).children); got != want {\n\t\t\tt.Errorf(\"%s: context has %d children, want %d\", when, got, want)\n\t\t}\n\t}\n\n\tctx, _ := WithCancel(Background())\n\tcheckChildren(\"after creation\", ctx, 0)\n\t_, cancel := WithCancel(ctx)\n\tcheckChildren(\"with WithCancel child \", ctx, 1)\n\tcancel()\n\tcheckChildren(\"after cancelling WithCancel child\", ctx, 0)\n\n\tctx, _ = WithCancel(Background())\n\tcheckChildren(\"after creation\", ctx, 0)\n\t_, cancel = WithTimeout(ctx, 60*time.Minute)\n\tcheckChildren(\"with WithTimeout child \", ctx, 1)\n\tcancel()\n\tcheckChildren(\"after cancelling WithTimeout child\", ctx, 0)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.7\n\n// Package ctxhttp provides helper functions for performing context-aware HTTP requests.\npackage ctxhttp // import \"golang.org/x/net/context/ctxhttp\"\n\nimport (\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\n\t\"golang.org/x/net/context\"\n)\n\n// Do sends an HTTP request with the provided http.Client and returns\n// an HTTP response.\n//\n// If the client is nil, http.DefaultClient is used.\n//\n// The provided ctx must be non-nil. If it is canceled or times out,\n// ctx.Err() will be returned.\nfunc Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {\n\tif client == nil {\n\t\tclient = http.DefaultClient\n\t}\n\tresp, err := client.Do(req.WithContext(ctx))\n\t// If we got an error, and the context has been canceled,\n\t// the context's error is probably more useful.\n\tif err != nil {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\terr = ctx.Err()\n\t\tdefault:\n\t\t}\n\t}\n\treturn resp, err\n}\n\n// Get issues a GET request via the Do function.\nfunc Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn Do(ctx, client, req)\n}\n\n// Head issues a HEAD request via the Do function.\nfunc Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {\n\treq, err := http.NewRequest(\"HEAD\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn Do(ctx, client, req)\n}\n\n// Post issues a POST request via the Do function.\nfunc Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {\n\treq, err := http.NewRequest(\"POST\", url, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Content-Type\", bodyType)\n\treturn Do(ctx, client, req)\n}\n\n// PostForm issues a POST request via the Do function.\nfunc PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {\n\treturn Post(ctx, client, url, \"application/x-www-form-urlencoded\", strings.NewReader(data.Encode()))\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !plan9,go1.7\n\npackage ctxhttp\n\nimport (\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\n\t\"context\"\n)\n\nfunc TestGo17Context(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tio.WriteString(w, \"ok\")\n\t}))\n\tdefer ts.Close()\n\tctx := context.Background()\n\tresp, err := Get(ctx, http.DefaultClient, ts.URL)\n\tif resp == nil || err != nil {\n\t\tt.Fatalf(\"error received from client: %v %v\", err, resp)\n\t}\n\tresp.Body.Close()\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !go1.7\n\npackage ctxhttp // import \"golang.org/x/net/context/ctxhttp\"\n\nimport (\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\n\t\"golang.org/x/net/context\"\n)\n\nfunc nop() {}\n\nvar (\n\ttestHookContextDoneBeforeHeaders = nop\n\ttestHookDoReturned               = nop\n\ttestHookDidBodyClose             = nop\n)\n\n// Do sends an HTTP request with the provided http.Client and returns an HTTP response.\n// If the client is nil, http.DefaultClient is used.\n// If the context is canceled or times out, ctx.Err() will be returned.\nfunc Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {\n\tif client == nil {\n\t\tclient = http.DefaultClient\n\t}\n\n\t// TODO(djd): Respect any existing value of req.Cancel.\n\tcancel := make(chan struct{})\n\treq.Cancel = cancel\n\n\ttype responseAndError struct {\n\t\tresp *http.Response\n\t\terr  error\n\t}\n\tresult := make(chan responseAndError, 1)\n\n\t// Make local copies of test hooks closed over by goroutines below.\n\t// Prevents data races in tests.\n\ttestHookDoReturned := testHookDoReturned\n\ttestHookDidBodyClose := testHookDidBodyClose\n\n\tgo func() {\n\t\tresp, err := client.Do(req)\n\t\ttestHookDoReturned()\n\t\tresult <- responseAndError{resp, err}\n\t}()\n\n\tvar resp *http.Response\n\n\tselect {\n\tcase <-ctx.Done():\n\t\ttestHookContextDoneBeforeHeaders()\n\t\tclose(cancel)\n\t\t// Clean up after the goroutine calling client.Do:\n\t\tgo func() {\n\t\t\tif r := <-result; r.resp != nil {\n\t\t\t\ttestHookDidBodyClose()\n\t\t\t\tr.resp.Body.Close()\n\t\t\t}\n\t\t}()\n\t\treturn nil, ctx.Err()\n\tcase r := <-result:\n\t\tvar err error\n\t\tresp, err = r.resp, r.err\n\t\tif err != nil {\n\t\t\treturn resp, err\n\t\t}\n\t}\n\n\tc := make(chan struct{})\n\tgo func() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tclose(cancel)\n\t\tcase <-c:\n\t\t\t// The response's Body is closed.\n\t\t}\n\t}()\n\tresp.Body = &notifyingReader{resp.Body, c}\n\n\treturn resp, nil\n}\n\n// Get issues a GET request via the Do function.\nfunc Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn Do(ctx, client, req)\n}\n\n// Head issues a HEAD request via the Do function.\nfunc Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {\n\treq, err := http.NewRequest(\"HEAD\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn Do(ctx, client, req)\n}\n\n// Post issues a POST request via the Do function.\nfunc Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {\n\treq, err := http.NewRequest(\"POST\", url, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Content-Type\", bodyType)\n\treturn Do(ctx, client, req)\n}\n\n// PostForm issues a POST request via the Do function.\nfunc PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {\n\treturn Post(ctx, client, url, \"application/x-www-form-urlencoded\", strings.NewReader(data.Encode()))\n}\n\n// notifyingReader is an io.ReadCloser that closes the notify channel after\n// Close is called or a Read fails on the underlying ReadCloser.\ntype notifyingReader struct {\n\tio.ReadCloser\n\tnotify chan<- struct{}\n}\n\nfunc (r *notifyingReader) Read(p []byte) (int, error) {\n\tn, err := r.ReadCloser.Read(p)\n\tif err != nil && r.notify != nil {\n\t\tclose(r.notify)\n\t\tr.notify = nil\n\t}\n\treturn n, err\n}\n\nfunc (r *notifyingReader) Close() error {\n\terr := r.ReadCloser.Close()\n\tif r.notify != nil {\n\t\tclose(r.notify)\n\t\tr.notify = nil\n\t}\n\treturn err\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !plan9,!go1.7\n\npackage ctxhttp\n\nimport (\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n)\n\n// golang.org/issue/14065\nfunc TestClosesResponseBodyOnCancel(t *testing.T) {\n\tdefer func() { testHookContextDoneBeforeHeaders = nop }()\n\tdefer func() { testHookDoReturned = nop }()\n\tdefer func() { testHookDidBodyClose = nop }()\n\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))\n\tdefer ts.Close()\n\n\tctx, cancel := context.WithCancel(context.Background())\n\n\t// closed when Do enters select case <-ctx.Done()\n\tenteredDonePath := make(chan struct{})\n\n\ttestHookContextDoneBeforeHeaders = func() {\n\t\tclose(enteredDonePath)\n\t}\n\n\ttestHookDoReturned = func() {\n\t\t// We now have the result (the Flush'd headers) at least,\n\t\t// so we can cancel the request.\n\t\tcancel()\n\n\t\t// But block the client.Do goroutine from sending\n\t\t// until Do enters into the <-ctx.Done() path, since\n\t\t// otherwise if both channels are readable, select\n\t\t// picks a random one.\n\t\t<-enteredDonePath\n\t}\n\n\tsawBodyClose := make(chan struct{})\n\ttestHookDidBodyClose = func() { close(sawBodyClose) }\n\n\ttr := &http.Transport{}\n\tdefer tr.CloseIdleConnections()\n\tc := &http.Client{Transport: tr}\n\treq, _ := http.NewRequest(\"GET\", ts.URL, nil)\n\t_, doErr := Do(ctx, c, req)\n\n\tselect {\n\tcase <-sawBodyClose:\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"timeout waiting for body to close\")\n\t}\n\n\tif doErr != ctx.Err() {\n\t\tt.Errorf(\"Do error = %v; want %v\", doErr, ctx.Err())\n\t}\n}\n\ntype noteCloseConn struct {\n\tnet.Conn\n\tonceClose sync.Once\n\tclosefn   func()\n}\n\nfunc (c *noteCloseConn) Close() error {\n\tc.onceClose.Do(c.closefn)\n\treturn c.Conn.Close()\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !plan9\n\npackage ctxhttp\n\nimport (\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n)\n\nconst (\n\trequestDuration = 100 * time.Millisecond\n\trequestBody     = \"ok\"\n)\n\nfunc okHandler(w http.ResponseWriter, r *http.Request) {\n\ttime.Sleep(requestDuration)\n\tio.WriteString(w, requestBody)\n}\n\nfunc TestNoTimeout(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(okHandler))\n\tdefer ts.Close()\n\n\tctx := context.Background()\n\tres, err := Get(ctx, nil, ts.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer res.Body.Close()\n\tslurp, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif string(slurp) != requestBody {\n\t\tt.Errorf(\"body = %q; want %q\", slurp, requestBody)\n\t}\n}\n\nfunc TestCancelBeforeHeaders(t *testing.T) {\n\tctx, cancel := context.WithCancel(context.Background())\n\n\tblockServer := make(chan struct{})\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tcancel()\n\t\t<-blockServer\n\t\tio.WriteString(w, requestBody)\n\t}))\n\tdefer ts.Close()\n\tdefer close(blockServer)\n\n\tres, err := Get(ctx, nil, ts.URL)\n\tif err == nil {\n\t\tres.Body.Close()\n\t\tt.Fatal(\"Get returned unexpected nil error\")\n\t}\n\tif err != context.Canceled {\n\t\tt.Errorf(\"err = %v; want %v\", err, context.Canceled)\n\t}\n}\n\nfunc TestCancelAfterHangingRequest(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.(http.Flusher).Flush()\n\t\t<-w.(http.CloseNotifier).CloseNotify()\n\t}))\n\tdefer ts.Close()\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tresp, err := Get(ctx, nil, ts.URL)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error in Get: %v\", err)\n\t}\n\n\t// Cancel befer reading the body.\n\t// Reading Request.Body should fail, since the request was\n\t// canceled before anything was written.\n\tcancel()\n\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\tif len(b) != 0 || err == nil {\n\t\t\tt.Errorf(`Read got (%q, %v); want (\"\", error)`, b, err)\n\t\t}\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-time.After(1 * time.Second):\n\t\tt.Errorf(\"Test timed out\")\n\tcase <-done:\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/go17.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.7\n\npackage context\n\nimport (\n\t\"context\" // standard library's context, as of Go 1.7\n\t\"time\"\n)\n\nvar (\n\ttodo       = context.TODO()\n\tbackground = context.Background()\n)\n\n// Canceled is the error returned by Context.Err when the context is canceled.\nvar Canceled = context.Canceled\n\n// DeadlineExceeded is the error returned by Context.Err when the context's\n// deadline passes.\nvar DeadlineExceeded = context.DeadlineExceeded\n\n// WithCancel returns a copy of parent with a new Done channel. The returned\n// context's Done channel is closed when the returned cancel function is called\n// or when the parent context's Done channel is closed, whichever happens first.\n//\n// Canceling this context releases resources associated with it, so code should\n// call cancel as soon as the operations running in this Context complete.\nfunc WithCancel(parent Context) (ctx Context, cancel CancelFunc) {\n\tctx, f := context.WithCancel(parent)\n\treturn ctx, CancelFunc(f)\n}\n\n// WithDeadline returns a copy of the parent context with the deadline adjusted\n// to be no later than d. If the parent's deadline is already earlier than d,\n// WithDeadline(parent, d) is semantically equivalent to parent. The returned\n// context's Done channel is closed when the deadline expires, when the returned\n// cancel function is called, or when the parent context's Done channel is\n// closed, whichever happens first.\n//\n// Canceling this context releases resources associated with it, so code should\n// call cancel as soon as the operations running in this Context complete.\nfunc WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {\n\tctx, f := context.WithDeadline(parent, deadline)\n\treturn ctx, CancelFunc(f)\n}\n\n// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).\n//\n// Canceling this context releases resources associated with it, so code should\n// call cancel as soon as the operations running in this Context complete:\n//\n// \tfunc slowOperationWithTimeout(ctx context.Context) (Result, error) {\n// \t\tctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)\n// \t\tdefer cancel()  // releases resources if slowOperation completes before timeout elapses\n// \t\treturn slowOperation(ctx)\n// \t}\nfunc WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {\n\treturn WithDeadline(parent, time.Now().Add(timeout))\n}\n\n// WithValue returns a copy of parent in which the value associated with key is\n// val.\n//\n// Use context Values only for request-scoped data that transits processes and\n// APIs, not for passing optional parameters to functions.\nfunc WithValue(parent Context, key interface{}, val interface{}) Context {\n\treturn context.WithValue(parent, key, val)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/go19.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.9\n\npackage context\n\nimport \"context\" // standard library's context, as of Go 1.7\n\n// A Context carries a deadline, a cancelation signal, and other values across\n// API boundaries.\n//\n// Context's methods may be called by multiple goroutines simultaneously.\ntype Context = context.Context\n\n// A CancelFunc tells an operation to abandon its work.\n// A CancelFunc does not wait for the work to stop.\n// After the first call, subsequent calls to a CancelFunc do nothing.\ntype CancelFunc = context.CancelFunc\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/pre_go17.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !go1.7\n\npackage context\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n)\n\n// An emptyCtx is never canceled, has no values, and has no deadline. It is not\n// struct{}, since vars of this type must have distinct addresses.\ntype emptyCtx int\n\nfunc (*emptyCtx) Deadline() (deadline time.Time, ok bool) {\n\treturn\n}\n\nfunc (*emptyCtx) Done() <-chan struct{} {\n\treturn nil\n}\n\nfunc (*emptyCtx) Err() error {\n\treturn nil\n}\n\nfunc (*emptyCtx) Value(key interface{}) interface{} {\n\treturn nil\n}\n\nfunc (e *emptyCtx) String() string {\n\tswitch e {\n\tcase background:\n\t\treturn \"context.Background\"\n\tcase todo:\n\t\treturn \"context.TODO\"\n\t}\n\treturn \"unknown empty Context\"\n}\n\nvar (\n\tbackground = new(emptyCtx)\n\ttodo       = new(emptyCtx)\n)\n\n// Canceled is the error returned by Context.Err when the context is canceled.\nvar Canceled = errors.New(\"context canceled\")\n\n// DeadlineExceeded is the error returned by Context.Err when the context's\n// deadline passes.\nvar DeadlineExceeded = errors.New(\"context deadline exceeded\")\n\n// WithCancel returns a copy of parent with a new Done channel. The returned\n// context's Done channel is closed when the returned cancel function is called\n// or when the parent context's Done channel is closed, whichever happens first.\n//\n// Canceling this context releases resources associated with it, so code should\n// call cancel as soon as the operations running in this Context complete.\nfunc WithCancel(parent Context) (ctx Context, cancel CancelFunc) {\n\tc := newCancelCtx(parent)\n\tpropagateCancel(parent, c)\n\treturn c, func() { c.cancel(true, Canceled) }\n}\n\n// newCancelCtx returns an initialized cancelCtx.\nfunc newCancelCtx(parent Context) *cancelCtx {\n\treturn &cancelCtx{\n\t\tContext: parent,\n\t\tdone:    make(chan struct{}),\n\t}\n}\n\n// propagateCancel arranges for child to be canceled when parent is.\nfunc propagateCancel(parent Context, child canceler) {\n\tif parent.Done() == nil {\n\t\treturn // parent is never canceled\n\t}\n\tif p, ok := parentCancelCtx(parent); ok {\n\t\tp.mu.Lock()\n\t\tif p.err != nil {\n\t\t\t// parent has already been canceled\n\t\t\tchild.cancel(false, p.err)\n\t\t} else {\n\t\t\tif p.children == nil {\n\t\t\t\tp.children = make(map[canceler]bool)\n\t\t\t}\n\t\t\tp.children[child] = true\n\t\t}\n\t\tp.mu.Unlock()\n\t} else {\n\t\tgo func() {\n\t\t\tselect {\n\t\t\tcase <-parent.Done():\n\t\t\t\tchild.cancel(false, parent.Err())\n\t\t\tcase <-child.Done():\n\t\t\t}\n\t\t}()\n\t}\n}\n\n// parentCancelCtx follows a chain of parent references until it finds a\n// *cancelCtx. This function understands how each of the concrete types in this\n// package represents its parent.\nfunc parentCancelCtx(parent Context) (*cancelCtx, bool) {\n\tfor {\n\t\tswitch c := parent.(type) {\n\t\tcase *cancelCtx:\n\t\t\treturn c, true\n\t\tcase *timerCtx:\n\t\t\treturn c.cancelCtx, true\n\t\tcase *valueCtx:\n\t\t\tparent = c.Context\n\t\tdefault:\n\t\t\treturn nil, false\n\t\t}\n\t}\n}\n\n// removeChild removes a context from its parent.\nfunc removeChild(parent Context, child canceler) {\n\tp, ok := parentCancelCtx(parent)\n\tif !ok {\n\t\treturn\n\t}\n\tp.mu.Lock()\n\tif p.children != nil {\n\t\tdelete(p.children, child)\n\t}\n\tp.mu.Unlock()\n}\n\n// A canceler is a context type that can be canceled directly. The\n// implementations are *cancelCtx and *timerCtx.\ntype canceler interface {\n\tcancel(removeFromParent bool, err error)\n\tDone() <-chan struct{}\n}\n\n// A cancelCtx can be canceled. When canceled, it also cancels any children\n// that implement canceler.\ntype cancelCtx struct {\n\tContext\n\n\tdone chan struct{} // closed by the first cancel call.\n\n\tmu       sync.Mutex\n\tchildren map[canceler]bool // set to nil by the first cancel call\n\terr      error             // set to non-nil by the first cancel call\n}\n\nfunc (c *cancelCtx) Done() <-chan struct{} {\n\treturn c.done\n}\n\nfunc (c *cancelCtx) Err() error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\treturn c.err\n}\n\nfunc (c *cancelCtx) String() string {\n\treturn fmt.Sprintf(\"%v.WithCancel\", c.Context)\n}\n\n// cancel closes c.done, cancels each of c's children, and, if\n// removeFromParent is true, removes c from its parent's children.\nfunc (c *cancelCtx) cancel(removeFromParent bool, err error) {\n\tif err == nil {\n\t\tpanic(\"context: internal error: missing cancel error\")\n\t}\n\tc.mu.Lock()\n\tif c.err != nil {\n\t\tc.mu.Unlock()\n\t\treturn // already canceled\n\t}\n\tc.err = err\n\tclose(c.done)\n\tfor child := range c.children {\n\t\t// NOTE: acquiring the child's lock while holding parent's lock.\n\t\tchild.cancel(false, err)\n\t}\n\tc.children = nil\n\tc.mu.Unlock()\n\n\tif removeFromParent {\n\t\tremoveChild(c.Context, c)\n\t}\n}\n\n// WithDeadline returns a copy of the parent context with the deadline adjusted\n// to be no later than d. If the parent's deadline is already earlier than d,\n// WithDeadline(parent, d) is semantically equivalent to parent. The returned\n// context's Done channel is closed when the deadline expires, when the returned\n// cancel function is called, or when the parent context's Done channel is\n// closed, whichever happens first.\n//\n// Canceling this context releases resources associated with it, so code should\n// call cancel as soon as the operations running in this Context complete.\nfunc WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {\n\tif cur, ok := parent.Deadline(); ok && cur.Before(deadline) {\n\t\t// The current deadline is already sooner than the new one.\n\t\treturn WithCancel(parent)\n\t}\n\tc := &timerCtx{\n\t\tcancelCtx: newCancelCtx(parent),\n\t\tdeadline:  deadline,\n\t}\n\tpropagateCancel(parent, c)\n\td := deadline.Sub(time.Now())\n\tif d <= 0 {\n\t\tc.cancel(true, DeadlineExceeded) // deadline has already passed\n\t\treturn c, func() { c.cancel(true, Canceled) }\n\t}\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tif c.err == nil {\n\t\tc.timer = time.AfterFunc(d, func() {\n\t\t\tc.cancel(true, DeadlineExceeded)\n\t\t})\n\t}\n\treturn c, func() { c.cancel(true, Canceled) }\n}\n\n// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to\n// implement Done and Err. It implements cancel by stopping its timer then\n// delegating to cancelCtx.cancel.\ntype timerCtx struct {\n\t*cancelCtx\n\ttimer *time.Timer // Under cancelCtx.mu.\n\n\tdeadline time.Time\n}\n\nfunc (c *timerCtx) Deadline() (deadline time.Time, ok bool) {\n\treturn c.deadline, true\n}\n\nfunc (c *timerCtx) String() string {\n\treturn fmt.Sprintf(\"%v.WithDeadline(%s [%s])\", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))\n}\n\nfunc (c *timerCtx) cancel(removeFromParent bool, err error) {\n\tc.cancelCtx.cancel(false, err)\n\tif removeFromParent {\n\t\t// Remove this timerCtx from its parent cancelCtx's children.\n\t\tremoveChild(c.cancelCtx.Context, c)\n\t}\n\tc.mu.Lock()\n\tif c.timer != nil {\n\t\tc.timer.Stop()\n\t\tc.timer = nil\n\t}\n\tc.mu.Unlock()\n}\n\n// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).\n//\n// Canceling this context releases resources associated with it, so code should\n// call cancel as soon as the operations running in this Context complete:\n//\n// \tfunc slowOperationWithTimeout(ctx context.Context) (Result, error) {\n// \t\tctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)\n// \t\tdefer cancel()  // releases resources if slowOperation completes before timeout elapses\n// \t\treturn slowOperation(ctx)\n// \t}\nfunc WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {\n\treturn WithDeadline(parent, time.Now().Add(timeout))\n}\n\n// WithValue returns a copy of parent in which the value associated with key is\n// val.\n//\n// Use context Values only for request-scoped data that transits processes and\n// APIs, not for passing optional parameters to functions.\nfunc WithValue(parent Context, key interface{}, val interface{}) Context {\n\treturn &valueCtx{parent, key, val}\n}\n\n// A valueCtx carries a key-value pair. It implements Value for that key and\n// delegates all other calls to the embedded Context.\ntype valueCtx struct {\n\tContext\n\tkey, val interface{}\n}\n\nfunc (c *valueCtx) String() string {\n\treturn fmt.Sprintf(\"%v.WithValue(%#v, %#v)\", c.Context, c.key, c.val)\n}\n\nfunc (c *valueCtx) Value(key interface{}) interface{} {\n\tif c.key == key {\n\t\treturn c.val\n\t}\n\treturn c.Context.Value(key)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/pre_go19.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !go1.9\n\npackage context\n\nimport \"time\"\n\n// A Context carries a deadline, a cancelation signal, and other values across\n// API boundaries.\n//\n// Context's methods may be called by multiple goroutines simultaneously.\ntype Context interface {\n\t// Deadline returns the time when work done on behalf of this context\n\t// should be canceled. Deadline returns ok==false when no deadline is\n\t// set. Successive calls to Deadline return the same results.\n\tDeadline() (deadline time.Time, ok bool)\n\n\t// Done returns a channel that's closed when work done on behalf of this\n\t// context should be canceled. Done may return nil if this context can\n\t// never be canceled. Successive calls to Done return the same value.\n\t//\n\t// WithCancel arranges for Done to be closed when cancel is called;\n\t// WithDeadline arranges for Done to be closed when the deadline\n\t// expires; WithTimeout arranges for Done to be closed when the timeout\n\t// elapses.\n\t//\n\t// Done is provided for use in select statements:\n\t//\n\t//  // Stream generates values with DoSomething and sends them to out\n\t//  // until DoSomething returns an error or ctx.Done is closed.\n\t//  func Stream(ctx context.Context, out chan<- Value) error {\n\t//  \tfor {\n\t//  \t\tv, err := DoSomething(ctx)\n\t//  \t\tif err != nil {\n\t//  \t\t\treturn err\n\t//  \t\t}\n\t//  \t\tselect {\n\t//  \t\tcase <-ctx.Done():\n\t//  \t\t\treturn ctx.Err()\n\t//  \t\tcase out <- v:\n\t//  \t\t}\n\t//  \t}\n\t//  }\n\t//\n\t// See http://blog.golang.org/pipelines for more examples of how to use\n\t// a Done channel for cancelation.\n\tDone() <-chan struct{}\n\n\t// Err returns a non-nil error value after Done is closed. Err returns\n\t// Canceled if the context was canceled or DeadlineExceeded if the\n\t// context's deadline passed. No other values for Err are defined.\n\t// After Done is closed, successive calls to Err return the same value.\n\tErr() error\n\n\t// Value returns the value associated with this context for key, or nil\n\t// if no value is associated with key. Successive calls to Value with\n\t// the same key returns the same result.\n\t//\n\t// Use context values only for request-scoped data that transits\n\t// processes and API boundaries, not for passing optional parameters to\n\t// functions.\n\t//\n\t// A key identifies a specific value in a Context. Functions that wish\n\t// to store values in Context typically allocate a key in a global\n\t// variable then use that key as the argument to context.WithValue and\n\t// Context.Value. A key can be any type that supports equality;\n\t// packages should define keys as an unexported type to avoid\n\t// collisions.\n\t//\n\t// Packages that define a Context key should provide type-safe accessors\n\t// for the values stores using that key:\n\t//\n\t// \t// Package user defines a User type that's stored in Contexts.\n\t// \tpackage user\n\t//\n\t// \timport \"golang.org/x/net/context\"\n\t//\n\t// \t// User is the type of value stored in the Contexts.\n\t// \ttype User struct {...}\n\t//\n\t// \t// key is an unexported type for keys defined in this package.\n\t// \t// This prevents collisions with keys defined in other packages.\n\t// \ttype key int\n\t//\n\t// \t// userKey is the key for user.User values in Contexts. It is\n\t// \t// unexported; clients use user.NewContext and user.FromContext\n\t// \t// instead of using this key directly.\n\t// \tvar userKey key = 0\n\t//\n\t// \t// NewContext returns a new Context that carries value u.\n\t// \tfunc NewContext(ctx context.Context, u *User) context.Context {\n\t// \t\treturn context.WithValue(ctx, userKey, u)\n\t// \t}\n\t//\n\t// \t// FromContext returns the User value stored in ctx, if any.\n\t// \tfunc FromContext(ctx context.Context) (*User, bool) {\n\t// \t\tu, ok := ctx.Value(userKey).(*User)\n\t// \t\treturn u, ok\n\t// \t}\n\tValue(key interface{}) interface{}\n}\n\n// A CancelFunc tells an operation to abandon its work.\n// A CancelFunc does not wait for the work to stop.\n// After the first call, subsequent calls to a CancelFunc do nothing.\ntype CancelFunc func()\n"
  },
  {
    "path": "vendor/golang.org/x/net/context/withtimeout_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage context_test\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n)\n\n// This example passes a context with a timeout to tell a blocking function that\n// it should abandon its work after the timeout elapses.\nfunc ExampleWithTimeout() {\n\t// Pass a context with a timeout to tell a blocking function that it\n\t// should abandon its work after the timeout elapses.\n\tctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)\n\tdefer cancel()\n\n\tselect {\n\tcase <-time.After(1 * time.Second):\n\t\tfmt.Println(\"overslept\")\n\tcase <-ctx.Done():\n\t\tfmt.Println(ctx.Err()) // prints \"context deadline exceeded\"\n\t}\n\n\t// Output:\n\t// context deadline exceeded\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/dict/dict.go",
    "content": "// Copyright 2010 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package dict implements the Dictionary Server Protocol\n// as defined in RFC 2229.\npackage dict // import \"golang.org/x/net/dict\"\n\nimport (\n\t\"net/textproto\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// A Client represents a client connection to a dictionary server.\ntype Client struct {\n\ttext *textproto.Conn\n}\n\n// Dial returns a new client connected to a dictionary server at\n// addr on the given network.\nfunc Dial(network, addr string) (*Client, error) {\n\ttext, err := textproto.Dial(network, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, _, err = text.ReadCodeLine(220)\n\tif err != nil {\n\t\ttext.Close()\n\t\treturn nil, err\n\t}\n\treturn &Client{text: text}, nil\n}\n\n// Close closes the connection to the dictionary server.\nfunc (c *Client) Close() error {\n\treturn c.text.Close()\n}\n\n// A Dict represents a dictionary available on the server.\ntype Dict struct {\n\tName string // short name of dictionary\n\tDesc string // long description\n}\n\n// Dicts returns a list of the dictionaries available on the server.\nfunc (c *Client) Dicts() ([]Dict, error) {\n\tid, err := c.text.Cmd(\"SHOW DB\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc.text.StartResponse(id)\n\tdefer c.text.EndResponse(id)\n\n\t_, _, err = c.text.ReadCodeLine(110)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlines, err := c.text.ReadDotLines()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, _, err = c.text.ReadCodeLine(250)\n\n\tdicts := make([]Dict, len(lines))\n\tfor i := range dicts {\n\t\td := &dicts[i]\n\t\ta, _ := fields(lines[i])\n\t\tif len(a) < 2 {\n\t\t\treturn nil, textproto.ProtocolError(\"invalid dictionary: \" + lines[i])\n\t\t}\n\t\td.Name = a[0]\n\t\td.Desc = a[1]\n\t}\n\treturn dicts, err\n}\n\n// A Defn represents a definition.\ntype Defn struct {\n\tDict Dict   // Dict where definition was found\n\tWord string // Word being defined\n\tText []byte // Definition text, typically multiple lines\n}\n\n// Define requests the definition of the given word.\n// The argument dict names the dictionary to use,\n// the Name field of a Dict returned by Dicts.\n//\n// The special dictionary name \"*\" means to look in all the\n// server's dictionaries.\n// The special dictionary name \"!\" means to look in all the\n// server's dictionaries in turn, stopping after finding the word\n// in one of them.\nfunc (c *Client) Define(dict, word string) ([]*Defn, error) {\n\tid, err := c.text.Cmd(\"DEFINE %s %q\", dict, word)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc.text.StartResponse(id)\n\tdefer c.text.EndResponse(id)\n\n\t_, line, err := c.text.ReadCodeLine(150)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ta, _ := fields(line)\n\tif len(a) < 1 {\n\t\treturn nil, textproto.ProtocolError(\"malformed response: \" + line)\n\t}\n\tn, err := strconv.Atoi(a[0])\n\tif err != nil {\n\t\treturn nil, textproto.ProtocolError(\"invalid definition count: \" + a[0])\n\t}\n\tdef := make([]*Defn, n)\n\tfor i := 0; i < n; i++ {\n\t\t_, line, err = c.text.ReadCodeLine(151)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ta, _ := fields(line)\n\t\tif len(a) < 3 {\n\t\t\t// skip it, to keep protocol in sync\n\t\t\ti--\n\t\t\tn--\n\t\t\tdef = def[0:n]\n\t\t\tcontinue\n\t\t}\n\t\td := &Defn{Word: a[0], Dict: Dict{a[1], a[2]}}\n\t\td.Text, err = c.text.ReadDotBytes()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdef[i] = d\n\t}\n\t_, _, err = c.text.ReadCodeLine(250)\n\treturn def, err\n}\n\n// Fields returns the fields in s.\n// Fields are space separated unquoted words\n// or quoted with single or double quote.\nfunc fields(s string) ([]string, error) {\n\tvar v []string\n\ti := 0\n\tfor {\n\t\tfor i < len(s) && (s[i] == ' ' || s[i] == '\\t') {\n\t\t\ti++\n\t\t}\n\t\tif i >= len(s) {\n\t\t\tbreak\n\t\t}\n\t\tif s[i] == '\"' || s[i] == '\\'' {\n\t\t\tq := s[i]\n\t\t\t// quoted string\n\t\t\tvar j int\n\t\t\tfor j = i + 1; ; j++ {\n\t\t\t\tif j >= len(s) {\n\t\t\t\t\treturn nil, textproto.ProtocolError(\"malformed quoted string\")\n\t\t\t\t}\n\t\t\t\tif s[j] == '\\\\' {\n\t\t\t\t\tj++\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif s[j] == q {\n\t\t\t\t\tj++\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tv = append(v, unquote(s[i+1:j-1]))\n\t\t\ti = j\n\t\t} else {\n\t\t\t// atom\n\t\t\tvar j int\n\t\t\tfor j = i; j < len(s); j++ {\n\t\t\t\tif s[j] == ' ' || s[j] == '\\t' || s[j] == '\\\\' || s[j] == '\"' || s[j] == '\\'' {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tv = append(v, s[i:j])\n\t\t\ti = j\n\t\t}\n\t\tif i < len(s) {\n\t\t\tc := s[i]\n\t\t\tif c != ' ' && c != '\\t' {\n\t\t\t\treturn nil, textproto.ProtocolError(\"quotes not on word boundaries\")\n\t\t\t}\n\t\t}\n\t}\n\treturn v, nil\n}\n\nfunc unquote(s string) string {\n\tif strings.Index(s, \"\\\\\") < 0 {\n\t\treturn s\n\t}\n\tb := []byte(s)\n\tw := 0\n\tfor r := 0; r < len(b); r++ {\n\t\tc := b[r]\n\t\tif c == '\\\\' {\n\t\t\tr++\n\t\t\tc = b[r]\n\t\t}\n\t\tb[w] = c\n\t\tw++\n\t}\n\treturn string(b[0:w])\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/dns/dnsmessage/example_test.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage dnsmessage_test\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n\n\t\"golang.org/x/net/dns/dnsmessage\"\n)\n\nfunc mustNewName(name string) dnsmessage.Name {\n\tn, err := dnsmessage.NewName(name)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn n\n}\n\nfunc ExampleParser() {\n\tmsg := dnsmessage.Message{\n\t\tHeader: dnsmessage.Header{Response: true, Authoritative: true},\n\t\tQuestions: []dnsmessage.Question{\n\t\t\t{\n\t\t\t\tName:  mustNewName(\"foo.bar.example.com.\"),\n\t\t\t\tType:  dnsmessage.TypeA,\n\t\t\t\tClass: dnsmessage.ClassINET,\n\t\t\t},\n\t\t\t{\n\t\t\t\tName:  mustNewName(\"bar.example.com.\"),\n\t\t\t\tType:  dnsmessage.TypeA,\n\t\t\t\tClass: dnsmessage.ClassINET,\n\t\t\t},\n\t\t},\n\t\tAnswers: []dnsmessage.Resource{\n\t\t\t{\n\t\t\t\tdnsmessage.ResourceHeader{\n\t\t\t\t\tName:  mustNewName(\"foo.bar.example.com.\"),\n\t\t\t\t\tType:  dnsmessage.TypeA,\n\t\t\t\t\tClass: dnsmessage.ClassINET,\n\t\t\t\t},\n\t\t\t\t&dnsmessage.AResource{[4]byte{127, 0, 0, 1}},\n\t\t\t},\n\t\t\t{\n\t\t\t\tdnsmessage.ResourceHeader{\n\t\t\t\t\tName:  mustNewName(\"bar.example.com.\"),\n\t\t\t\t\tType:  dnsmessage.TypeA,\n\t\t\t\t\tClass: dnsmessage.ClassINET,\n\t\t\t\t},\n\t\t\t\t&dnsmessage.AResource{[4]byte{127, 0, 0, 2}},\n\t\t\t},\n\t\t},\n\t}\n\n\tbuf, err := msg.Pack()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\twantName := \"bar.example.com.\"\n\n\tvar p dnsmessage.Parser\n\tif _, err := p.Start(buf); err != nil {\n\t\tpanic(err)\n\t}\n\n\tfor {\n\t\tq, err := p.Question()\n\t\tif err == dnsmessage.ErrSectionDone {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tif q.Name.String() != wantName {\n\t\t\tcontinue\n\t\t}\n\n\t\tfmt.Println(\"Found question for name\", wantName)\n\t\tif err := p.SkipAllQuestions(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tbreak\n\t}\n\n\tvar gotIPs []net.IP\n\tfor {\n\t\th, err := p.AnswerHeader()\n\t\tif err == dnsmessage.ErrSectionDone {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tif (h.Type != dnsmessage.TypeA && h.Type != dnsmessage.TypeAAAA) || h.Class != dnsmessage.ClassINET {\n\t\t\tcontinue\n\t\t}\n\n\t\tif !strings.EqualFold(h.Name.String(), wantName) {\n\t\t\tif err := p.SkipAnswer(); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch h.Type {\n\t\tcase dnsmessage.TypeA:\n\t\t\tr, err := p.AResource()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tgotIPs = append(gotIPs, r.A[:])\n\t\tcase dnsmessage.TypeAAAA:\n\t\t\tr, err := p.AAAAResource()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tgotIPs = append(gotIPs, r.AAAA[:])\n\t\t}\n\t}\n\n\tfmt.Printf(\"Found A/AAAA records for name %s: %v\\n\", wantName, gotIPs)\n\n\t// Output:\n\t// Found question for name bar.example.com.\n\t// Found A/AAAA records for name bar.example.com.: [127.0.0.2]\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/dns/dnsmessage/message.go",
    "content": "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package dnsmessage provides a mostly RFC 1035 compliant implementation of\n// DNS message packing and unpacking.\n//\n// This implementation is designed to minimize heap allocations and avoid\n// unnecessary packing and unpacking as much as possible.\npackage dnsmessage\n\nimport (\n\t\"errors\"\n)\n\n// Packet formats\n\n// A Type is a type of DNS request and response.\ntype Type uint16\n\n// A Class is a type of network.\ntype Class uint16\n\n// An OpCode is a DNS operation code.\ntype OpCode uint16\n\n// An RCode is a DNS response status code.\ntype RCode uint16\n\n// Wire constants.\nconst (\n\t// ResourceHeader.Type and Question.Type\n\tTypeA     Type = 1\n\tTypeNS    Type = 2\n\tTypeCNAME Type = 5\n\tTypeSOA   Type = 6\n\tTypePTR   Type = 12\n\tTypeMX    Type = 15\n\tTypeTXT   Type = 16\n\tTypeAAAA  Type = 28\n\tTypeSRV   Type = 33\n\n\t// Question.Type\n\tTypeWKS   Type = 11\n\tTypeHINFO Type = 13\n\tTypeMINFO Type = 14\n\tTypeAXFR  Type = 252\n\tTypeALL   Type = 255\n\n\t// ResourceHeader.Class and Question.Class\n\tClassINET   Class = 1\n\tClassCSNET  Class = 2\n\tClassCHAOS  Class = 3\n\tClassHESIOD Class = 4\n\n\t// Question.Class\n\tClassANY Class = 255\n\n\t// Message.Rcode\n\tRCodeSuccess        RCode = 0\n\tRCodeFormatError    RCode = 1\n\tRCodeServerFailure  RCode = 2\n\tRCodeNameError      RCode = 3\n\tRCodeNotImplemented RCode = 4\n\tRCodeRefused        RCode = 5\n)\n\nvar (\n\t// ErrNotStarted indicates that the prerequisite information isn't\n\t// available yet because the previous records haven't been appropriately\n\t// parsed, skipped or finished.\n\tErrNotStarted = errors.New(\"parsing/packing of this type isn't available yet\")\n\n\t// ErrSectionDone indicated that all records in the section have been\n\t// parsed or finished.\n\tErrSectionDone = errors.New(\"parsing/packing of this section has completed\")\n\n\terrBaseLen            = errors.New(\"insufficient data for base length type\")\n\terrCalcLen            = errors.New(\"insufficient data for calculated length type\")\n\terrReserved           = errors.New(\"segment prefix is reserved\")\n\terrTooManyPtr         = errors.New(\"too many pointers (>10)\")\n\terrInvalidPtr         = errors.New(\"invalid pointer\")\n\terrNilResouceBody     = errors.New(\"nil resource body\")\n\terrResourceLen        = errors.New(\"insufficient data for resource body length\")\n\terrSegTooLong         = errors.New(\"segment length too long\")\n\terrZeroSegLen         = errors.New(\"zero length segment\")\n\terrResTooLong         = errors.New(\"resource length too long\")\n\terrTooManyQuestions   = errors.New(\"too many Questions to pack (>65535)\")\n\terrTooManyAnswers     = errors.New(\"too many Answers to pack (>65535)\")\n\terrTooManyAuthorities = errors.New(\"too many Authorities to pack (>65535)\")\n\terrTooManyAdditionals = errors.New(\"too many Additionals to pack (>65535)\")\n\terrNonCanonicalName   = errors.New(\"name is not in canonical format (it must end with a .)\")\n)\n\n// Internal constants.\nconst (\n\t// packStartingCap is the default initial buffer size allocated during\n\t// packing.\n\t//\n\t// The starting capacity doesn't matter too much, but most DNS responses\n\t// Will be <= 512 bytes as it is the limit for DNS over UDP.\n\tpackStartingCap = 512\n\n\t// uint16Len is the length (in bytes) of a uint16.\n\tuint16Len = 2\n\n\t// uint32Len is the length (in bytes) of a uint32.\n\tuint32Len = 4\n\n\t// headerLen is the length (in bytes) of a DNS header.\n\t//\n\t// A header is comprised of 6 uint16s and no padding.\n\theaderLen = 6 * uint16Len\n)\n\ntype nestedError struct {\n\t// s is the current level's error message.\n\ts string\n\n\t// err is the nested error.\n\terr error\n}\n\n// nestedError implements error.Error.\nfunc (e *nestedError) Error() string {\n\treturn e.s + \": \" + e.err.Error()\n}\n\n// Header is a representation of a DNS message header.\ntype Header struct {\n\tID                 uint16\n\tResponse           bool\n\tOpCode             OpCode\n\tAuthoritative      bool\n\tTruncated          bool\n\tRecursionDesired   bool\n\tRecursionAvailable bool\n\tRCode              RCode\n}\n\nfunc (m *Header) pack() (id uint16, bits uint16) {\n\tid = m.ID\n\tbits = uint16(m.OpCode)<<11 | uint16(m.RCode)\n\tif m.RecursionAvailable {\n\t\tbits |= headerBitRA\n\t}\n\tif m.RecursionDesired {\n\t\tbits |= headerBitRD\n\t}\n\tif m.Truncated {\n\t\tbits |= headerBitTC\n\t}\n\tif m.Authoritative {\n\t\tbits |= headerBitAA\n\t}\n\tif m.Response {\n\t\tbits |= headerBitQR\n\t}\n\treturn\n}\n\n// Message is a representation of a DNS message.\ntype Message struct {\n\tHeader\n\tQuestions   []Question\n\tAnswers     []Resource\n\tAuthorities []Resource\n\tAdditionals []Resource\n}\n\ntype section uint8\n\nconst (\n\tsectionNotStarted section = iota\n\tsectionHeader\n\tsectionQuestions\n\tsectionAnswers\n\tsectionAuthorities\n\tsectionAdditionals\n\tsectionDone\n\n\theaderBitQR = 1 << 15 // query/response (response=1)\n\theaderBitAA = 1 << 10 // authoritative\n\theaderBitTC = 1 << 9  // truncated\n\theaderBitRD = 1 << 8  // recursion desired\n\theaderBitRA = 1 << 7  // recursion available\n)\n\nvar sectionNames = map[section]string{\n\tsectionHeader:      \"header\",\n\tsectionQuestions:   \"Question\",\n\tsectionAnswers:     \"Answer\",\n\tsectionAuthorities: \"Authority\",\n\tsectionAdditionals: \"Additional\",\n}\n\n// header is the wire format for a DNS message header.\ntype header struct {\n\tid          uint16\n\tbits        uint16\n\tquestions   uint16\n\tanswers     uint16\n\tauthorities uint16\n\tadditionals uint16\n}\n\nfunc (h *header) count(sec section) uint16 {\n\tswitch sec {\n\tcase sectionQuestions:\n\t\treturn h.questions\n\tcase sectionAnswers:\n\t\treturn h.answers\n\tcase sectionAuthorities:\n\t\treturn h.authorities\n\tcase sectionAdditionals:\n\t\treturn h.additionals\n\t}\n\treturn 0\n}\n\nfunc (h *header) pack(msg []byte) []byte {\n\tmsg = packUint16(msg, h.id)\n\tmsg = packUint16(msg, h.bits)\n\tmsg = packUint16(msg, h.questions)\n\tmsg = packUint16(msg, h.answers)\n\tmsg = packUint16(msg, h.authorities)\n\treturn packUint16(msg, h.additionals)\n}\n\nfunc (h *header) unpack(msg []byte, off int) (int, error) {\n\tnewOff := off\n\tvar err error\n\tif h.id, newOff, err = unpackUint16(msg, newOff); err != nil {\n\t\treturn off, &nestedError{\"id\", err}\n\t}\n\tif h.bits, newOff, err = unpackUint16(msg, newOff); err != nil {\n\t\treturn off, &nestedError{\"bits\", err}\n\t}\n\tif h.questions, newOff, err = unpackUint16(msg, newOff); err != nil {\n\t\treturn off, &nestedError{\"questions\", err}\n\t}\n\tif h.answers, newOff, err = unpackUint16(msg, newOff); err != nil {\n\t\treturn off, &nestedError{\"answers\", err}\n\t}\n\tif h.authorities, newOff, err = unpackUint16(msg, newOff); err != nil {\n\t\treturn off, &nestedError{\"authorities\", err}\n\t}\n\tif h.additionals, newOff, err = unpackUint16(msg, newOff); err != nil {\n\t\treturn off, &nestedError{\"additionals\", err}\n\t}\n\treturn newOff, nil\n}\n\nfunc (h *header) header() Header {\n\treturn Header{\n\t\tID:                 h.id,\n\t\tResponse:           (h.bits & headerBitQR) != 0,\n\t\tOpCode:             OpCode(h.bits>>11) & 0xF,\n\t\tAuthoritative:      (h.bits & headerBitAA) != 0,\n\t\tTruncated:          (h.bits & headerBitTC) != 0,\n\t\tRecursionDesired:   (h.bits & headerBitRD) != 0,\n\t\tRecursionAvailable: (h.bits & headerBitRA) != 0,\n\t\tRCode:              RCode(h.bits & 0xF),\n\t}\n}\n\n// A Resource is a DNS resource record.\ntype Resource struct {\n\tHeader ResourceHeader\n\tBody   ResourceBody\n}\n\n// A ResourceBody is a DNS resource record minus the header.\ntype ResourceBody interface {\n\t// pack packs a Resource except for its header.\n\tpack(msg []byte, compression map[string]int) ([]byte, error)\n\n\t// realType returns the actual type of the Resource. This is used to\n\t// fill in the header Type field.\n\trealType() Type\n}\n\nfunc (r *Resource) pack(msg []byte, compression map[string]int) ([]byte, error) {\n\tif r.Body == nil {\n\t\treturn msg, errNilResouceBody\n\t}\n\toldMsg := msg\n\tr.Header.Type = r.Body.realType()\n\tmsg, length, err := r.Header.pack(msg, compression)\n\tif err != nil {\n\t\treturn msg, &nestedError{\"ResourceHeader\", err}\n\t}\n\tpreLen := len(msg)\n\tmsg, err = r.Body.pack(msg, compression)\n\tif err != nil {\n\t\treturn msg, &nestedError{\"content\", err}\n\t}\n\tif err := r.Header.fixLen(msg, length, preLen); err != nil {\n\t\treturn oldMsg, err\n\t}\n\treturn msg, nil\n}\n\n// A Parser allows incrementally parsing a DNS message.\n//\n// When parsing is started, the Header is parsed. Next, each Question can be\n// either parsed or skipped. Alternatively, all Questions can be skipped at\n// once. When all Questions have been parsed, attempting to parse Questions\n// will return (nil, nil) and attempting to skip Questions will return\n// (true, nil). After all Questions have been either parsed or skipped, all\n// Answers, Authorities and Additionals can be either parsed or skipped in the\n// same way, and each type of Resource must be fully parsed or skipped before\n// proceeding to the next type of Resource.\n//\n// Note that there is no requirement to fully skip or parse the message.\ntype Parser struct {\n\tmsg    []byte\n\theader header\n\n\tsection        section\n\toff            int\n\tindex          int\n\tresHeaderValid bool\n\tresHeader      ResourceHeader\n}\n\n// Start parses the header and enables the parsing of Questions.\nfunc (p *Parser) Start(msg []byte) (Header, error) {\n\tif p.msg != nil {\n\t\t*p = Parser{}\n\t}\n\tp.msg = msg\n\tvar err error\n\tif p.off, err = p.header.unpack(msg, 0); err != nil {\n\t\treturn Header{}, &nestedError{\"unpacking header\", err}\n\t}\n\tp.section = sectionQuestions\n\treturn p.header.header(), nil\n}\n\nfunc (p *Parser) checkAdvance(sec section) error {\n\tif p.section < sec {\n\t\treturn ErrNotStarted\n\t}\n\tif p.section > sec {\n\t\treturn ErrSectionDone\n\t}\n\tp.resHeaderValid = false\n\tif p.index == int(p.header.count(sec)) {\n\t\tp.index = 0\n\t\tp.section++\n\t\treturn ErrSectionDone\n\t}\n\treturn nil\n}\n\nfunc (p *Parser) resource(sec section) (Resource, error) {\n\tvar r Resource\n\tvar err error\n\tr.Header, err = p.resourceHeader(sec)\n\tif err != nil {\n\t\treturn r, err\n\t}\n\tp.resHeaderValid = false\n\tr.Body, p.off, err = unpackResourceBody(p.msg, p.off, r.Header)\n\tif err != nil {\n\t\treturn Resource{}, &nestedError{\"unpacking \" + sectionNames[sec], err}\n\t}\n\tp.index++\n\treturn r, nil\n}\n\nfunc (p *Parser) resourceHeader(sec section) (ResourceHeader, error) {\n\tif p.resHeaderValid {\n\t\treturn p.resHeader, nil\n\t}\n\tif err := p.checkAdvance(sec); err != nil {\n\t\treturn ResourceHeader{}, err\n\t}\n\tvar hdr ResourceHeader\n\toff, err := hdr.unpack(p.msg, p.off)\n\tif err != nil {\n\t\treturn ResourceHeader{}, err\n\t}\n\tp.resHeaderValid = true\n\tp.resHeader = hdr\n\tp.off = off\n\treturn hdr, nil\n}\n\nfunc (p *Parser) skipResource(sec section) error {\n\tif p.resHeaderValid {\n\t\tnewOff := p.off + int(p.resHeader.Length)\n\t\tif newOff > len(p.msg) {\n\t\t\treturn errResourceLen\n\t\t}\n\t\tp.off = newOff\n\t\tp.resHeaderValid = false\n\t\tp.index++\n\t\treturn nil\n\t}\n\tif err := p.checkAdvance(sec); err != nil {\n\t\treturn err\n\t}\n\tvar err error\n\tp.off, err = skipResource(p.msg, p.off)\n\tif err != nil {\n\t\treturn &nestedError{\"skipping: \" + sectionNames[sec], err}\n\t}\n\tp.index++\n\treturn nil\n}\n\n// Question parses a single Question.\nfunc (p *Parser) Question() (Question, error) {\n\tif err := p.checkAdvance(sectionQuestions); err != nil {\n\t\treturn Question{}, err\n\t}\n\tvar name Name\n\toff, err := name.unpack(p.msg, p.off)\n\tif err != nil {\n\t\treturn Question{}, &nestedError{\"unpacking Question.Name\", err}\n\t}\n\ttyp, off, err := unpackType(p.msg, off)\n\tif err != nil {\n\t\treturn Question{}, &nestedError{\"unpacking Question.Type\", err}\n\t}\n\tclass, off, err := unpackClass(p.msg, off)\n\tif err != nil {\n\t\treturn Question{}, &nestedError{\"unpacking Question.Class\", err}\n\t}\n\tp.off = off\n\tp.index++\n\treturn Question{name, typ, class}, nil\n}\n\n// AllQuestions parses all Questions.\nfunc (p *Parser) AllQuestions() ([]Question, error) {\n\tqs := make([]Question, 0, p.header.questions)\n\tfor {\n\t\tq, err := p.Question()\n\t\tif err == ErrSectionDone {\n\t\t\treturn qs, nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tqs = append(qs, q)\n\t}\n}\n\n// SkipQuestion skips a single Question.\nfunc (p *Parser) SkipQuestion() error {\n\tif err := p.checkAdvance(sectionQuestions); err != nil {\n\t\treturn err\n\t}\n\toff, err := skipName(p.msg, p.off)\n\tif err != nil {\n\t\treturn &nestedError{\"skipping Question Name\", err}\n\t}\n\tif off, err = skipType(p.msg, off); err != nil {\n\t\treturn &nestedError{\"skipping Question Type\", err}\n\t}\n\tif off, err = skipClass(p.msg, off); err != nil {\n\t\treturn &nestedError{\"skipping Question Class\", err}\n\t}\n\tp.off = off\n\tp.index++\n\treturn nil\n}\n\n// SkipAllQuestions skips all Questions.\nfunc (p *Parser) SkipAllQuestions() error {\n\tfor {\n\t\tif err := p.SkipQuestion(); err == ErrSectionDone {\n\t\t\treturn nil\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\n// AnswerHeader parses a single Answer ResourceHeader.\nfunc (p *Parser) AnswerHeader() (ResourceHeader, error) {\n\treturn p.resourceHeader(sectionAnswers)\n}\n\n// Answer parses a single Answer Resource.\nfunc (p *Parser) Answer() (Resource, error) {\n\treturn p.resource(sectionAnswers)\n}\n\n// AllAnswers parses all Answer Resources.\nfunc (p *Parser) AllAnswers() ([]Resource, error) {\n\tas := make([]Resource, 0, p.header.answers)\n\tfor {\n\t\ta, err := p.Answer()\n\t\tif err == ErrSectionDone {\n\t\t\treturn as, nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tas = append(as, a)\n\t}\n}\n\n// SkipAnswer skips a single Answer Resource.\nfunc (p *Parser) SkipAnswer() error {\n\treturn p.skipResource(sectionAnswers)\n}\n\n// SkipAllAnswers skips all Answer Resources.\nfunc (p *Parser) SkipAllAnswers() error {\n\tfor {\n\t\tif err := p.SkipAnswer(); err == ErrSectionDone {\n\t\t\treturn nil\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\n// AuthorityHeader parses a single Authority ResourceHeader.\nfunc (p *Parser) AuthorityHeader() (ResourceHeader, error) {\n\treturn p.resourceHeader(sectionAuthorities)\n}\n\n// Authority parses a single Authority Resource.\nfunc (p *Parser) Authority() (Resource, error) {\n\treturn p.resource(sectionAuthorities)\n}\n\n// AllAuthorities parses all Authority Resources.\nfunc (p *Parser) AllAuthorities() ([]Resource, error) {\n\tas := make([]Resource, 0, p.header.authorities)\n\tfor {\n\t\ta, err := p.Authority()\n\t\tif err == ErrSectionDone {\n\t\t\treturn as, nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tas = append(as, a)\n\t}\n}\n\n// SkipAuthority skips a single Authority Resource.\nfunc (p *Parser) SkipAuthority() error {\n\treturn p.skipResource(sectionAuthorities)\n}\n\n// SkipAllAuthorities skips all Authority Resources.\nfunc (p *Parser) SkipAllAuthorities() error {\n\tfor {\n\t\tif err := p.SkipAuthority(); err == ErrSectionDone {\n\t\t\treturn nil\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\n// AdditionalHeader parses a single Additional ResourceHeader.\nfunc (p *Parser) AdditionalHeader() (ResourceHeader, error) {\n\treturn p.resourceHeader(sectionAdditionals)\n}\n\n// Additional parses a single Additional Resource.\nfunc (p *Parser) Additional() (Resource, error) {\n\treturn p.resource(sectionAdditionals)\n}\n\n// AllAdditionals parses all Additional Resources.\nfunc (p *Parser) AllAdditionals() ([]Resource, error) {\n\tas := make([]Resource, 0, p.header.additionals)\n\tfor {\n\t\ta, err := p.Additional()\n\t\tif err == ErrSectionDone {\n\t\t\treturn as, nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tas = append(as, a)\n\t}\n}\n\n// SkipAdditional skips a single Additional Resource.\nfunc (p *Parser) SkipAdditional() error {\n\treturn p.skipResource(sectionAdditionals)\n}\n\n// SkipAllAdditionals skips all Additional Resources.\nfunc (p *Parser) SkipAllAdditionals() error {\n\tfor {\n\t\tif err := p.SkipAdditional(); err == ErrSectionDone {\n\t\t\treturn nil\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\n// CNAMEResource parses a single CNAMEResource.\n//\n// One of the XXXHeader methods must have been called before calling this\n// method.\nfunc (p *Parser) CNAMEResource() (CNAMEResource, error) {\n\tif !p.resHeaderValid || p.resHeader.Type != TypeCNAME {\n\t\treturn CNAMEResource{}, ErrNotStarted\n\t}\n\tr, err := unpackCNAMEResource(p.msg, p.off)\n\tif err != nil {\n\t\treturn CNAMEResource{}, err\n\t}\n\tp.off += int(p.resHeader.Length)\n\tp.resHeaderValid = false\n\tp.index++\n\treturn r, nil\n}\n\n// MXResource parses a single MXResource.\n//\n// One of the XXXHeader methods must have been called before calling this\n// method.\nfunc (p *Parser) MXResource() (MXResource, error) {\n\tif !p.resHeaderValid || p.resHeader.Type != TypeMX {\n\t\treturn MXResource{}, ErrNotStarted\n\t}\n\tr, err := unpackMXResource(p.msg, p.off)\n\tif err != nil {\n\t\treturn MXResource{}, err\n\t}\n\tp.off += int(p.resHeader.Length)\n\tp.resHeaderValid = false\n\tp.index++\n\treturn r, nil\n}\n\n// NSResource parses a single NSResource.\n//\n// One of the XXXHeader methods must have been called before calling this\n// method.\nfunc (p *Parser) NSResource() (NSResource, error) {\n\tif !p.resHeaderValid || p.resHeader.Type != TypeNS {\n\t\treturn NSResource{}, ErrNotStarted\n\t}\n\tr, err := unpackNSResource(p.msg, p.off)\n\tif err != nil {\n\t\treturn NSResource{}, err\n\t}\n\tp.off += int(p.resHeader.Length)\n\tp.resHeaderValid = false\n\tp.index++\n\treturn r, nil\n}\n\n// PTRResource parses a single PTRResource.\n//\n// One of the XXXHeader methods must have been called before calling this\n// method.\nfunc (p *Parser) PTRResource() (PTRResource, error) {\n\tif !p.resHeaderValid || p.resHeader.Type != TypePTR {\n\t\treturn PTRResource{}, ErrNotStarted\n\t}\n\tr, err := unpackPTRResource(p.msg, p.off)\n\tif err != nil {\n\t\treturn PTRResource{}, err\n\t}\n\tp.off += int(p.resHeader.Length)\n\tp.resHeaderValid = false\n\tp.index++\n\treturn r, nil\n}\n\n// SOAResource parses a single SOAResource.\n//\n// One of the XXXHeader methods must have been called before calling this\n// method.\nfunc (p *Parser) SOAResource() (SOAResource, error) {\n\tif !p.resHeaderValid || p.resHeader.Type != TypeSOA {\n\t\treturn SOAResource{}, ErrNotStarted\n\t}\n\tr, err := unpackSOAResource(p.msg, p.off)\n\tif err != nil {\n\t\treturn SOAResource{}, err\n\t}\n\tp.off += int(p.resHeader.Length)\n\tp.resHeaderValid = false\n\tp.index++\n\treturn r, nil\n}\n\n// TXTResource parses a single TXTResource.\n//\n// One of the XXXHeader methods must have been called before calling this\n// method.\nfunc (p *Parser) TXTResource() (TXTResource, error) {\n\tif !p.resHeaderValid || p.resHeader.Type != TypeTXT {\n\t\treturn TXTResource{}, ErrNotStarted\n\t}\n\tr, err := unpackTXTResource(p.msg, p.off, p.resHeader.Length)\n\tif err != nil {\n\t\treturn TXTResource{}, err\n\t}\n\tp.off += int(p.resHeader.Length)\n\tp.resHeaderValid = false\n\tp.index++\n\treturn r, nil\n}\n\n// SRVResource parses a single SRVResource.\n//\n// One of the XXXHeader methods must have been called before calling this\n// method.\nfunc (p *Parser) SRVResource() (SRVResource, error) {\n\tif !p.resHeaderValid || p.resHeader.Type != TypeSRV {\n\t\treturn SRVResource{}, ErrNotStarted\n\t}\n\tr, err := unpackSRVResource(p.msg, p.off)\n\tif err != nil {\n\t\treturn SRVResource{}, err\n\t}\n\tp.off += int(p.resHeader.Length)\n\tp.resHeaderValid = false\n\tp.index++\n\treturn r, nil\n}\n\n// AResource parses a single AResource.\n//\n// One of the XXXHeader methods must have been called before calling this\n// method.\nfunc (p *Parser) AResource() (AResource, error) {\n\tif !p.resHeaderValid || p.resHeader.Type != TypeA {\n\t\treturn AResource{}, ErrNotStarted\n\t}\n\tr, err := unpackAResource(p.msg, p.off)\n\tif err != nil {\n\t\treturn AResource{}, err\n\t}\n\tp.off += int(p.resHeader.Length)\n\tp.resHeaderValid = false\n\tp.index++\n\treturn r, nil\n}\n\n// AAAAResource parses a single AAAAResource.\n//\n// One of the XXXHeader methods must have been called before calling this\n// method.\nfunc (p *Parser) AAAAResource() (AAAAResource, error) {\n\tif !p.resHeaderValid || p.resHeader.Type != TypeAAAA {\n\t\treturn AAAAResource{}, ErrNotStarted\n\t}\n\tr, err := unpackAAAAResource(p.msg, p.off)\n\tif err != nil {\n\t\treturn AAAAResource{}, err\n\t}\n\tp.off += int(p.resHeader.Length)\n\tp.resHeaderValid = false\n\tp.index++\n\treturn r, nil\n}\n\n// Unpack parses a full Message.\nfunc (m *Message) Unpack(msg []byte) error {\n\tvar p Parser\n\tvar err error\n\tif m.Header, err = p.Start(msg); err != nil {\n\t\treturn err\n\t}\n\tif m.Questions, err = p.AllQuestions(); err != nil {\n\t\treturn err\n\t}\n\tif m.Answers, err = p.AllAnswers(); err != nil {\n\t\treturn err\n\t}\n\tif m.Authorities, err = p.AllAuthorities(); err != nil {\n\t\treturn err\n\t}\n\tif m.Additionals, err = p.AllAdditionals(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// Pack packs a full Message.\nfunc (m *Message) Pack() ([]byte, error) {\n\t// Validate the lengths. It is very unlikely that anyone will try to\n\t// pack more than 65535 of any particular type, but it is possible and\n\t// we should fail gracefully.\n\tif len(m.Questions) > int(^uint16(0)) {\n\t\treturn nil, errTooManyQuestions\n\t}\n\tif len(m.Answers) > int(^uint16(0)) {\n\t\treturn nil, errTooManyAnswers\n\t}\n\tif len(m.Authorities) > int(^uint16(0)) {\n\t\treturn nil, errTooManyAuthorities\n\t}\n\tif len(m.Additionals) > int(^uint16(0)) {\n\t\treturn nil, errTooManyAdditionals\n\t}\n\n\tvar h header\n\th.id, h.bits = m.Header.pack()\n\n\th.questions = uint16(len(m.Questions))\n\th.answers = uint16(len(m.Answers))\n\th.authorities = uint16(len(m.Authorities))\n\th.additionals = uint16(len(m.Additionals))\n\n\tmsg := make([]byte, 0, packStartingCap)\n\n\tmsg = h.pack(msg)\n\n\t// RFC 1035 allows (but does not require) compression for packing. RFC\n\t// 1035 requires unpacking implementations to support compression, so\n\t// unconditionally enabling it is fine.\n\t//\n\t// DNS lookups are typically done over UDP, and RFC 1035 states that UDP\n\t// DNS packets can be a maximum of 512 bytes long. Without compression,\n\t// many DNS response packets are over this limit, so enabling\n\t// compression will help ensure compliance.\n\tcompression := map[string]int{}\n\n\tfor i := range m.Questions {\n\t\tvar err error\n\t\tif msg, err = m.Questions[i].pack(msg, compression); err != nil {\n\t\t\treturn nil, &nestedError{\"packing Question\", err}\n\t\t}\n\t}\n\tfor i := range m.Answers {\n\t\tvar err error\n\t\tif msg, err = m.Answers[i].pack(msg, compression); err != nil {\n\t\t\treturn nil, &nestedError{\"packing Answer\", err}\n\t\t}\n\t}\n\tfor i := range m.Authorities {\n\t\tvar err error\n\t\tif msg, err = m.Authorities[i].pack(msg, compression); err != nil {\n\t\t\treturn nil, &nestedError{\"packing Authority\", err}\n\t\t}\n\t}\n\tfor i := range m.Additionals {\n\t\tvar err error\n\t\tif msg, err = m.Additionals[i].pack(msg, compression); err != nil {\n\t\t\treturn nil, &nestedError{\"packing Additional\", err}\n\t\t}\n\t}\n\n\treturn msg, nil\n}\n\n// A Builder allows incrementally packing a DNS message.\ntype Builder struct {\n\tmsg         []byte\n\theader      header\n\tsection     section\n\tcompression map[string]int\n}\n\n// Start initializes the builder.\n//\n// buf is optional (nil is fine), but if provided, Start takes ownership of buf.\nfunc (b *Builder) Start(buf []byte, h Header) {\n\tb.StartWithoutCompression(buf, h)\n\tb.compression = map[string]int{}\n}\n\n// StartWithoutCompression initializes the builder with compression disabled.\n//\n// This avoids compression related allocations, but can result in larger message\n// sizes. Be careful with this mode as it can cause messages to exceed the UDP\n// size limit.\n//\n// buf is optional (nil is fine), but if provided, Start takes ownership of buf.\nfunc (b *Builder) StartWithoutCompression(buf []byte, h Header) {\n\t*b = Builder{msg: buf}\n\tb.header.id, b.header.bits = h.pack()\n\tif cap(b.msg) < headerLen {\n\t\tb.msg = make([]byte, 0, packStartingCap)\n\t}\n\tb.msg = b.msg[:headerLen]\n\tb.section = sectionHeader\n}\n\nfunc (b *Builder) startCheck(s section) error {\n\tif b.section <= sectionNotStarted {\n\t\treturn ErrNotStarted\n\t}\n\tif b.section > s {\n\t\treturn ErrSectionDone\n\t}\n\treturn nil\n}\n\n// StartQuestions prepares the builder for packing Questions.\nfunc (b *Builder) StartQuestions() error {\n\tif err := b.startCheck(sectionQuestions); err != nil {\n\t\treturn err\n\t}\n\tb.section = sectionQuestions\n\treturn nil\n}\n\n// StartAnswers prepares the builder for packing Answers.\nfunc (b *Builder) StartAnswers() error {\n\tif err := b.startCheck(sectionAnswers); err != nil {\n\t\treturn err\n\t}\n\tb.section = sectionAnswers\n\treturn nil\n}\n\n// StartAuthorities prepares the builder for packing Authorities.\nfunc (b *Builder) StartAuthorities() error {\n\tif err := b.startCheck(sectionAuthorities); err != nil {\n\t\treturn err\n\t}\n\tb.section = sectionAuthorities\n\treturn nil\n}\n\n// StartAdditionals prepares the builder for packing Additionals.\nfunc (b *Builder) StartAdditionals() error {\n\tif err := b.startCheck(sectionAdditionals); err != nil {\n\t\treturn err\n\t}\n\tb.section = sectionAdditionals\n\treturn nil\n}\n\nfunc (b *Builder) incrementSectionCount() error {\n\tvar count *uint16\n\tvar err error\n\tswitch b.section {\n\tcase sectionQuestions:\n\t\tcount = &b.header.questions\n\t\terr = errTooManyQuestions\n\tcase sectionAnswers:\n\t\tcount = &b.header.answers\n\t\terr = errTooManyAnswers\n\tcase sectionAuthorities:\n\t\tcount = &b.header.authorities\n\t\terr = errTooManyAuthorities\n\tcase sectionAdditionals:\n\t\tcount = &b.header.additionals\n\t\terr = errTooManyAdditionals\n\t}\n\tif *count == ^uint16(0) {\n\t\treturn err\n\t}\n\t*count++\n\treturn nil\n}\n\n// Question adds a single Question.\nfunc (b *Builder) Question(q Question) error {\n\tif b.section < sectionQuestions {\n\t\treturn ErrNotStarted\n\t}\n\tif b.section > sectionQuestions {\n\t\treturn ErrSectionDone\n\t}\n\tmsg, err := q.pack(b.msg, b.compression)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := b.incrementSectionCount(); err != nil {\n\t\treturn err\n\t}\n\tb.msg = msg\n\treturn nil\n}\n\nfunc (b *Builder) checkResourceSection() error {\n\tif b.section < sectionAnswers {\n\t\treturn ErrNotStarted\n\t}\n\tif b.section > sectionAdditionals {\n\t\treturn ErrSectionDone\n\t}\n\treturn nil\n}\n\n// CNAMEResource adds a single CNAMEResource.\nfunc (b *Builder) CNAMEResource(h ResourceHeader, r CNAMEResource) error {\n\tif err := b.checkResourceSection(); err != nil {\n\t\treturn err\n\t}\n\th.Type = r.realType()\n\tmsg, length, err := h.pack(b.msg, b.compression)\n\tif err != nil {\n\t\treturn &nestedError{\"ResourceHeader\", err}\n\t}\n\tpreLen := len(msg)\n\tif msg, err = r.pack(msg, b.compression); err != nil {\n\t\treturn &nestedError{\"CNAMEResource body\", err}\n\t}\n\tif err := h.fixLen(msg, length, preLen); err != nil {\n\t\treturn err\n\t}\n\tif err := b.incrementSectionCount(); err != nil {\n\t\treturn err\n\t}\n\tb.msg = msg\n\treturn nil\n}\n\n// MXResource adds a single MXResource.\nfunc (b *Builder) MXResource(h ResourceHeader, r MXResource) error {\n\tif err := b.checkResourceSection(); err != nil {\n\t\treturn err\n\t}\n\th.Type = r.realType()\n\tmsg, length, err := h.pack(b.msg, b.compression)\n\tif err != nil {\n\t\treturn &nestedError{\"ResourceHeader\", err}\n\t}\n\tpreLen := len(msg)\n\tif msg, err = r.pack(msg, b.compression); err != nil {\n\t\treturn &nestedError{\"MXResource body\", err}\n\t}\n\tif err := h.fixLen(msg, length, preLen); err != nil {\n\t\treturn err\n\t}\n\tif err := b.incrementSectionCount(); err != nil {\n\t\treturn err\n\t}\n\tb.msg = msg\n\treturn nil\n}\n\n// NSResource adds a single NSResource.\nfunc (b *Builder) NSResource(h ResourceHeader, r NSResource) error {\n\tif err := b.checkResourceSection(); err != nil {\n\t\treturn err\n\t}\n\th.Type = r.realType()\n\tmsg, length, err := h.pack(b.msg, b.compression)\n\tif err != nil {\n\t\treturn &nestedError{\"ResourceHeader\", err}\n\t}\n\tpreLen := len(msg)\n\tif msg, err = r.pack(msg, b.compression); err != nil {\n\t\treturn &nestedError{\"NSResource body\", err}\n\t}\n\tif err := h.fixLen(msg, length, preLen); err != nil {\n\t\treturn err\n\t}\n\tif err := b.incrementSectionCount(); err != nil {\n\t\treturn err\n\t}\n\tb.msg = msg\n\treturn nil\n}\n\n// PTRResource adds a single PTRResource.\nfunc (b *Builder) PTRResource(h ResourceHeader, r PTRResource) error {\n\tif err := b.checkResourceSection(); err != nil {\n\t\treturn err\n\t}\n\th.Type = r.realType()\n\tmsg, length, err := h.pack(b.msg, b.compression)\n\tif err != nil {\n\t\treturn &nestedError{\"ResourceHeader\", err}\n\t}\n\tpreLen := len(msg)\n\tif msg, err = r.pack(msg, b.compression); err != nil {\n\t\treturn &nestedError{\"PTRResource body\", err}\n\t}\n\tif err := h.fixLen(msg, length, preLen); err != nil {\n\t\treturn err\n\t}\n\tif err := b.incrementSectionCount(); err != nil {\n\t\treturn err\n\t}\n\tb.msg = msg\n\treturn nil\n}\n\n// SOAResource adds a single SOAResource.\nfunc (b *Builder) SOAResource(h ResourceHeader, r SOAResource) error {\n\tif err := b.checkResourceSection(); err != nil {\n\t\treturn err\n\t}\n\th.Type = r.realType()\n\tmsg, length, err := h.pack(b.msg, b.compression)\n\tif err != nil {\n\t\treturn &nestedError{\"ResourceHeader\", err}\n\t}\n\tpreLen := len(msg)\n\tif msg, err = r.pack(msg, b.compression); err != nil {\n\t\treturn &nestedError{\"SOAResource body\", err}\n\t}\n\tif err := h.fixLen(msg, length, preLen); err != nil {\n\t\treturn err\n\t}\n\tif err := b.incrementSectionCount(); err != nil {\n\t\treturn err\n\t}\n\tb.msg = msg\n\treturn nil\n}\n\n// TXTResource adds a single TXTResource.\nfunc (b *Builder) TXTResource(h ResourceHeader, r TXTResource) error {\n\tif err := b.checkResourceSection(); err != nil {\n\t\treturn err\n\t}\n\th.Type = r.realType()\n\tmsg, length, err := h.pack(b.msg, b.compression)\n\tif err != nil {\n\t\treturn &nestedError{\"ResourceHeader\", err}\n\t}\n\tpreLen := len(msg)\n\tif msg, err = r.pack(msg, b.compression); err != nil {\n\t\treturn &nestedError{\"TXTResource body\", err}\n\t}\n\tif err := h.fixLen(msg, length, preLen); err != nil {\n\t\treturn err\n\t}\n\tif err := b.incrementSectionCount(); err != nil {\n\t\treturn err\n\t}\n\tb.msg = msg\n\treturn nil\n}\n\n// SRVResource adds a single SRVResource.\nfunc (b *Builder) SRVResource(h ResourceHeader, r SRVResource) error {\n\tif err := b.checkResourceSection(); err != nil {\n\t\treturn err\n\t}\n\th.Type = r.realType()\n\tmsg, length, err := h.pack(b.msg, b.compression)\n\tif err != nil {\n\t\treturn &nestedError{\"ResourceHeader\", err}\n\t}\n\tpreLen := len(msg)\n\tif msg, err = r.pack(msg, b.compression); err != nil {\n\t\treturn &nestedError{\"SRVResource body\", err}\n\t}\n\tif err := h.fixLen(msg, length, preLen); err != nil {\n\t\treturn err\n\t}\n\tif err := b.incrementSectionCount(); err != nil {\n\t\treturn err\n\t}\n\tb.msg = msg\n\treturn nil\n}\n\n// AResource adds a single AResource.\nfunc (b *Builder) AResource(h ResourceHeader, r AResource) error {\n\tif err := b.checkResourceSection(); err != nil {\n\t\treturn err\n\t}\n\th.Type = r.realType()\n\tmsg, length, err := h.pack(b.msg, b.compression)\n\tif err != nil {\n\t\treturn &nestedError{\"ResourceHeader\", err}\n\t}\n\tpreLen := len(msg)\n\tif msg, err = r.pack(msg, b.compression); err != nil {\n\t\treturn &nestedError{\"AResource body\", err}\n\t}\n\tif err := h.fixLen(msg, length, preLen); err != nil {\n\t\treturn err\n\t}\n\tif err := b.incrementSectionCount(); err != nil {\n\t\treturn err\n\t}\n\tb.msg = msg\n\treturn nil\n}\n\n// AAAAResource adds a single AAAAResource.\nfunc (b *Builder) AAAAResource(h ResourceHeader, r AAAAResource) error {\n\tif err := b.checkResourceSection(); err != nil {\n\t\treturn err\n\t}\n\th.Type = r.realType()\n\tmsg, length, err := h.pack(b.msg, b.compression)\n\tif err != nil {\n\t\treturn &nestedError{\"ResourceHeader\", err}\n\t}\n\tpreLen := len(msg)\n\tif msg, err = r.pack(msg, b.compression); err != nil {\n\t\treturn &nestedError{\"AAAAResource body\", err}\n\t}\n\tif err := h.fixLen(msg, length, preLen); err != nil {\n\t\treturn err\n\t}\n\tif err := b.incrementSectionCount(); err != nil {\n\t\treturn err\n\t}\n\tb.msg = msg\n\treturn nil\n}\n\n// Finish ends message building and generates a binary packet.\nfunc (b *Builder) Finish() ([]byte, error) {\n\tif b.section < sectionHeader {\n\t\treturn nil, ErrNotStarted\n\t}\n\tb.section = sectionDone\n\tb.header.pack(b.msg[:0])\n\treturn b.msg, nil\n}\n\n// A ResourceHeader is the header of a DNS resource record. There are\n// many types of DNS resource records, but they all share the same header.\ntype ResourceHeader struct {\n\t// Name is the domain name for which this resource record pertains.\n\tName Name\n\n\t// Type is the type of DNS resource record.\n\t//\n\t// This field will be set automatically during packing.\n\tType Type\n\n\t// Class is the class of network to which this DNS resource record\n\t// pertains.\n\tClass Class\n\n\t// TTL is the length of time (measured in seconds) which this resource\n\t// record is valid for (time to live). All Resources in a set should\n\t// have the same TTL (RFC 2181 Section 5.2).\n\tTTL uint32\n\n\t// Length is the length of data in the resource record after the header.\n\t//\n\t// This field will be set automatically during packing.\n\tLength uint16\n}\n\n// pack packs all of the fields in a ResourceHeader except for the length. The\n// length bytes are returned as a slice so they can be filled in after the rest\n// of the Resource has been packed.\nfunc (h *ResourceHeader) pack(oldMsg []byte, compression map[string]int) (msg []byte, length []byte, err error) {\n\tmsg = oldMsg\n\tif msg, err = h.Name.pack(msg, compression); err != nil {\n\t\treturn oldMsg, nil, &nestedError{\"Name\", err}\n\t}\n\tmsg = packType(msg, h.Type)\n\tmsg = packClass(msg, h.Class)\n\tmsg = packUint32(msg, h.TTL)\n\tlenBegin := len(msg)\n\tmsg = packUint16(msg, h.Length)\n\treturn msg, msg[lenBegin : lenBegin+uint16Len], nil\n}\n\nfunc (h *ResourceHeader) unpack(msg []byte, off int) (int, error) {\n\tnewOff := off\n\tvar err error\n\tif newOff, err = h.Name.unpack(msg, newOff); err != nil {\n\t\treturn off, &nestedError{\"Name\", err}\n\t}\n\tif h.Type, newOff, err = unpackType(msg, newOff); err != nil {\n\t\treturn off, &nestedError{\"Type\", err}\n\t}\n\tif h.Class, newOff, err = unpackClass(msg, newOff); err != nil {\n\t\treturn off, &nestedError{\"Class\", err}\n\t}\n\tif h.TTL, newOff, err = unpackUint32(msg, newOff); err != nil {\n\t\treturn off, &nestedError{\"TTL\", err}\n\t}\n\tif h.Length, newOff, err = unpackUint16(msg, newOff); err != nil {\n\t\treturn off, &nestedError{\"Length\", err}\n\t}\n\treturn newOff, nil\n}\n\nfunc (h *ResourceHeader) fixLen(msg []byte, length []byte, preLen int) error {\n\tconLen := len(msg) - preLen\n\tif conLen > int(^uint16(0)) {\n\t\treturn errResTooLong\n\t}\n\n\t// Fill in the length now that we know how long the content is.\n\tpackUint16(length[:0], uint16(conLen))\n\th.Length = uint16(conLen)\n\n\treturn nil\n}\n\nfunc skipResource(msg []byte, off int) (int, error) {\n\tnewOff, err := skipName(msg, off)\n\tif err != nil {\n\t\treturn off, &nestedError{\"Name\", err}\n\t}\n\tif newOff, err = skipType(msg, newOff); err != nil {\n\t\treturn off, &nestedError{\"Type\", err}\n\t}\n\tif newOff, err = skipClass(msg, newOff); err != nil {\n\t\treturn off, &nestedError{\"Class\", err}\n\t}\n\tif newOff, err = skipUint32(msg, newOff); err != nil {\n\t\treturn off, &nestedError{\"TTL\", err}\n\t}\n\tlength, newOff, err := unpackUint16(msg, newOff)\n\tif err != nil {\n\t\treturn off, &nestedError{\"Length\", err}\n\t}\n\tif newOff += int(length); newOff > len(msg) {\n\t\treturn off, errResourceLen\n\t}\n\treturn newOff, nil\n}\n\nfunc packUint16(msg []byte, field uint16) []byte {\n\treturn append(msg, byte(field>>8), byte(field))\n}\n\nfunc unpackUint16(msg []byte, off int) (uint16, int, error) {\n\tif off+uint16Len > len(msg) {\n\t\treturn 0, off, errBaseLen\n\t}\n\treturn uint16(msg[off])<<8 | uint16(msg[off+1]), off + uint16Len, nil\n}\n\nfunc skipUint16(msg []byte, off int) (int, error) {\n\tif off+uint16Len > len(msg) {\n\t\treturn off, errBaseLen\n\t}\n\treturn off + uint16Len, nil\n}\n\nfunc packType(msg []byte, field Type) []byte {\n\treturn packUint16(msg, uint16(field))\n}\n\nfunc unpackType(msg []byte, off int) (Type, int, error) {\n\tt, o, err := unpackUint16(msg, off)\n\treturn Type(t), o, err\n}\n\nfunc skipType(msg []byte, off int) (int, error) {\n\treturn skipUint16(msg, off)\n}\n\nfunc packClass(msg []byte, field Class) []byte {\n\treturn packUint16(msg, uint16(field))\n}\n\nfunc unpackClass(msg []byte, off int) (Class, int, error) {\n\tc, o, err := unpackUint16(msg, off)\n\treturn Class(c), o, err\n}\n\nfunc skipClass(msg []byte, off int) (int, error) {\n\treturn skipUint16(msg, off)\n}\n\nfunc packUint32(msg []byte, field uint32) []byte {\n\treturn append(\n\t\tmsg,\n\t\tbyte(field>>24),\n\t\tbyte(field>>16),\n\t\tbyte(field>>8),\n\t\tbyte(field),\n\t)\n}\n\nfunc unpackUint32(msg []byte, off int) (uint32, int, error) {\n\tif off+uint32Len > len(msg) {\n\t\treturn 0, off, errBaseLen\n\t}\n\tv := uint32(msg[off])<<24 | uint32(msg[off+1])<<16 | uint32(msg[off+2])<<8 | uint32(msg[off+3])\n\treturn v, off + uint32Len, nil\n}\n\nfunc skipUint32(msg []byte, off int) (int, error) {\n\tif off+uint32Len > len(msg) {\n\t\treturn off, errBaseLen\n\t}\n\treturn off + uint32Len, nil\n}\n\nfunc packText(msg []byte, field string) []byte {\n\tfor len(field) > 0 {\n\t\tl := len(field)\n\t\tif l > 255 {\n\t\t\tl = 255\n\t\t}\n\t\tmsg = append(msg, byte(l))\n\t\tmsg = append(msg, field[:l]...)\n\t\tfield = field[l:]\n\t}\n\treturn msg\n}\n\nfunc unpackText(msg []byte, off int) (string, int, error) {\n\tif off >= len(msg) {\n\t\treturn \"\", off, errBaseLen\n\t}\n\tbeginOff := off + 1\n\tendOff := beginOff + int(msg[off])\n\tif endOff > len(msg) {\n\t\treturn \"\", off, errCalcLen\n\t}\n\treturn string(msg[beginOff:endOff]), endOff, nil\n}\n\nfunc skipText(msg []byte, off int) (int, error) {\n\tif off >= len(msg) {\n\t\treturn off, errBaseLen\n\t}\n\tendOff := off + 1 + int(msg[off])\n\tif endOff > len(msg) {\n\t\treturn off, errCalcLen\n\t}\n\treturn endOff, nil\n}\n\nfunc packBytes(msg []byte, field []byte) []byte {\n\treturn append(msg, field...)\n}\n\nfunc unpackBytes(msg []byte, off int, field []byte) (int, error) {\n\tnewOff := off + len(field)\n\tif newOff > len(msg) {\n\t\treturn off, errBaseLen\n\t}\n\tcopy(field, msg[off:newOff])\n\treturn newOff, nil\n}\n\nfunc skipBytes(msg []byte, off int, field []byte) (int, error) {\n\tnewOff := off + len(field)\n\tif newOff > len(msg) {\n\t\treturn off, errBaseLen\n\t}\n\treturn newOff, nil\n}\n\nconst nameLen = 255\n\n// A Name is a non-encoded domain name. It is used instead of strings to avoid\n// allocations.\ntype Name struct {\n\tData   [nameLen]byte\n\tLength uint8\n}\n\n// NewName creates a new Name from a string.\nfunc NewName(name string) (Name, error) {\n\tif len([]byte(name)) > nameLen {\n\t\treturn Name{}, errCalcLen\n\t}\n\tn := Name{Length: uint8(len(name))}\n\tcopy(n.Data[:], []byte(name))\n\treturn n, nil\n}\n\nfunc (n Name) String() string {\n\treturn string(n.Data[:n.Length])\n}\n\n// pack packs a domain name.\n//\n// Domain names are a sequence of counted strings split at the dots. They end\n// with a zero-length string. Compression can be used to reuse domain suffixes.\n//\n// The compression map will be updated with new domain suffixes. If compression\n// is nil, compression will not be used.\nfunc (n *Name) pack(msg []byte, compression map[string]int) ([]byte, error) {\n\toldMsg := msg\n\n\t// Add a trailing dot to canonicalize name.\n\tif n.Length == 0 || n.Data[n.Length-1] != '.' {\n\t\treturn oldMsg, errNonCanonicalName\n\t}\n\n\t// Allow root domain.\n\tif n.Data[0] == '.' && n.Length == 1 {\n\t\treturn append(msg, 0), nil\n\t}\n\n\t// Emit sequence of counted strings, chopping at dots.\n\tfor i, begin := 0, 0; i < int(n.Length); i++ {\n\t\t// Check for the end of the segment.\n\t\tif n.Data[i] == '.' {\n\t\t\t// The two most significant bits have special meaning.\n\t\t\t// It isn't allowed for segments to be long enough to\n\t\t\t// need them.\n\t\t\tif i-begin >= 1<<6 {\n\t\t\t\treturn oldMsg, errSegTooLong\n\t\t\t}\n\n\t\t\t// Segments must have a non-zero length.\n\t\t\tif i-begin == 0 {\n\t\t\t\treturn oldMsg, errZeroSegLen\n\t\t\t}\n\n\t\t\tmsg = append(msg, byte(i-begin))\n\n\t\t\tfor j := begin; j < i; j++ {\n\t\t\t\tmsg = append(msg, n.Data[j])\n\t\t\t}\n\n\t\t\tbegin = i + 1\n\t\t\tcontinue\n\t\t}\n\n\t\t// We can only compress domain suffixes starting with a new\n\t\t// segment. A pointer is two bytes with the two most significant\n\t\t// bits set to 1 to indicate that it is a pointer.\n\t\tif (i == 0 || n.Data[i-1] == '.') && compression != nil {\n\t\t\tif ptr, ok := compression[string(n.Data[i:])]; ok {\n\t\t\t\t// Hit. Emit a pointer instead of the rest of\n\t\t\t\t// the domain.\n\t\t\t\treturn append(msg, byte(ptr>>8|0xC0), byte(ptr)), nil\n\t\t\t}\n\n\t\t\t// Miss. Add the suffix to the compression table if the\n\t\t\t// offset can be stored in the available 14 bytes.\n\t\t\tif len(msg) <= int(^uint16(0)>>2) {\n\t\t\t\tcompression[string(n.Data[i:])] = len(msg)\n\t\t\t}\n\t\t}\n\t}\n\treturn append(msg, 0), nil\n}\n\n// unpack unpacks a domain name.\nfunc (n *Name) unpack(msg []byte, off int) (int, error) {\n\t// currOff is the current working offset.\n\tcurrOff := off\n\n\t// newOff is the offset where the next record will start. Pointers lead\n\t// to data that belongs to other names and thus doesn't count towards to\n\t// the usage of this name.\n\tnewOff := off\n\n\t// ptr is the number of pointers followed.\n\tvar ptr int\n\n\t// Name is a slice representation of the name data.\n\tname := n.Data[:0]\n\nLoop:\n\tfor {\n\t\tif currOff >= len(msg) {\n\t\t\treturn off, errBaseLen\n\t\t}\n\t\tc := int(msg[currOff])\n\t\tcurrOff++\n\t\tswitch c & 0xC0 {\n\t\tcase 0x00: // String segment\n\t\t\tif c == 0x00 {\n\t\t\t\t// A zero length signals the end of the name.\n\t\t\t\tbreak Loop\n\t\t\t}\n\t\t\tendOff := currOff + c\n\t\t\tif endOff > len(msg) {\n\t\t\t\treturn off, errCalcLen\n\t\t\t}\n\t\t\tname = append(name, msg[currOff:endOff]...)\n\t\t\tname = append(name, '.')\n\t\t\tcurrOff = endOff\n\t\tcase 0xC0: // Pointer\n\t\t\tif currOff >= len(msg) {\n\t\t\t\treturn off, errInvalidPtr\n\t\t\t}\n\t\t\tc1 := msg[currOff]\n\t\t\tcurrOff++\n\t\t\tif ptr == 0 {\n\t\t\t\tnewOff = currOff\n\t\t\t}\n\t\t\t// Don't follow too many pointers, maybe there's a loop.\n\t\t\tif ptr++; ptr > 10 {\n\t\t\t\treturn off, errTooManyPtr\n\t\t\t}\n\t\t\tcurrOff = (c^0xC0)<<8 | int(c1)\n\t\tdefault:\n\t\t\t// Prefixes 0x80 and 0x40 are reserved.\n\t\t\treturn off, errReserved\n\t\t}\n\t}\n\tif len(name) == 0 {\n\t\tname = append(name, '.')\n\t}\n\tif len(name) > len(n.Data) {\n\t\treturn off, errCalcLen\n\t}\n\tn.Length = uint8(len(name))\n\tif ptr == 0 {\n\t\tnewOff = currOff\n\t}\n\treturn newOff, nil\n}\n\nfunc skipName(msg []byte, off int) (int, error) {\n\t// newOff is the offset where the next record will start. Pointers lead\n\t// to data that belongs to other names and thus doesn't count towards to\n\t// the usage of this name.\n\tnewOff := off\n\nLoop:\n\tfor {\n\t\tif newOff >= len(msg) {\n\t\t\treturn off, errBaseLen\n\t\t}\n\t\tc := int(msg[newOff])\n\t\tnewOff++\n\t\tswitch c & 0xC0 {\n\t\tcase 0x00:\n\t\t\tif c == 0x00 {\n\t\t\t\t// A zero length signals the end of the name.\n\t\t\t\tbreak Loop\n\t\t\t}\n\t\t\t// literal string\n\t\t\tnewOff += c\n\t\t\tif newOff > len(msg) {\n\t\t\t\treturn off, errCalcLen\n\t\t\t}\n\t\tcase 0xC0:\n\t\t\t// Pointer to somewhere else in msg.\n\n\t\t\t// Pointers are two bytes.\n\t\t\tnewOff++\n\n\t\t\t// Don't follow the pointer as the data here has ended.\n\t\t\tbreak Loop\n\t\tdefault:\n\t\t\t// Prefixes 0x80 and 0x40 are reserved.\n\t\t\treturn off, errReserved\n\t\t}\n\t}\n\n\treturn newOff, nil\n}\n\n// A Question is a DNS query.\ntype Question struct {\n\tName  Name\n\tType  Type\n\tClass Class\n}\n\nfunc (q *Question) pack(msg []byte, compression map[string]int) ([]byte, error) {\n\tmsg, err := q.Name.pack(msg, compression)\n\tif err != nil {\n\t\treturn msg, &nestedError{\"Name\", err}\n\t}\n\tmsg = packType(msg, q.Type)\n\treturn packClass(msg, q.Class), nil\n}\n\nfunc unpackResourceBody(msg []byte, off int, hdr ResourceHeader) (ResourceBody, int, error) {\n\tvar (\n\t\tr    ResourceBody\n\t\terr  error\n\t\tname string\n\t)\n\tswitch hdr.Type {\n\tcase TypeA:\n\t\tvar rb AResource\n\t\trb, err = unpackAResource(msg, off)\n\t\tr = &rb\n\t\tname = \"A\"\n\tcase TypeNS:\n\t\tvar rb NSResource\n\t\trb, err = unpackNSResource(msg, off)\n\t\tr = &rb\n\t\tname = \"NS\"\n\tcase TypeCNAME:\n\t\tvar rb CNAMEResource\n\t\trb, err = unpackCNAMEResource(msg, off)\n\t\tr = &rb\n\t\tname = \"CNAME\"\n\tcase TypeSOA:\n\t\tvar rb SOAResource\n\t\trb, err = unpackSOAResource(msg, off)\n\t\tr = &rb\n\t\tname = \"SOA\"\n\tcase TypePTR:\n\t\tvar rb PTRResource\n\t\trb, err = unpackPTRResource(msg, off)\n\t\tr = &rb\n\t\tname = \"PTR\"\n\tcase TypeMX:\n\t\tvar rb MXResource\n\t\trb, err = unpackMXResource(msg, off)\n\t\tr = &rb\n\t\tname = \"MX\"\n\tcase TypeTXT:\n\t\tvar rb TXTResource\n\t\trb, err = unpackTXTResource(msg, off, hdr.Length)\n\t\tr = &rb\n\t\tname = \"TXT\"\n\tcase TypeAAAA:\n\t\tvar rb AAAAResource\n\t\trb, err = unpackAAAAResource(msg, off)\n\t\tr = &rb\n\t\tname = \"AAAA\"\n\tcase TypeSRV:\n\t\tvar rb SRVResource\n\t\trb, err = unpackSRVResource(msg, off)\n\t\tr = &rb\n\t\tname = \"SRV\"\n\t}\n\tif err != nil {\n\t\treturn nil, off, &nestedError{name + \" record\", err}\n\t}\n\tif r == nil {\n\t\treturn nil, off, errors.New(\"invalid resource type: \" + string(hdr.Type+'0'))\n\t}\n\treturn r, off + int(hdr.Length), nil\n}\n\n// A CNAMEResource is a CNAME Resource record.\ntype CNAMEResource struct {\n\tCNAME Name\n}\n\nfunc (r *CNAMEResource) realType() Type {\n\treturn TypeCNAME\n}\n\nfunc (r *CNAMEResource) pack(msg []byte, compression map[string]int) ([]byte, error) {\n\treturn r.CNAME.pack(msg, compression)\n}\n\nfunc unpackCNAMEResource(msg []byte, off int) (CNAMEResource, error) {\n\tvar cname Name\n\tif _, err := cname.unpack(msg, off); err != nil {\n\t\treturn CNAMEResource{}, err\n\t}\n\treturn CNAMEResource{cname}, nil\n}\n\n// An MXResource is an MX Resource record.\ntype MXResource struct {\n\tPref uint16\n\tMX   Name\n}\n\nfunc (r *MXResource) realType() Type {\n\treturn TypeMX\n}\n\nfunc (r *MXResource) pack(msg []byte, compression map[string]int) ([]byte, error) {\n\toldMsg := msg\n\tmsg = packUint16(msg, r.Pref)\n\tmsg, err := r.MX.pack(msg, compression)\n\tif err != nil {\n\t\treturn oldMsg, &nestedError{\"MXResource.MX\", err}\n\t}\n\treturn msg, nil\n}\n\nfunc unpackMXResource(msg []byte, off int) (MXResource, error) {\n\tpref, off, err := unpackUint16(msg, off)\n\tif err != nil {\n\t\treturn MXResource{}, &nestedError{\"Pref\", err}\n\t}\n\tvar mx Name\n\tif _, err := mx.unpack(msg, off); err != nil {\n\t\treturn MXResource{}, &nestedError{\"MX\", err}\n\t}\n\treturn MXResource{pref, mx}, nil\n}\n\n// An NSResource is an NS Resource record.\ntype NSResource struct {\n\tNS Name\n}\n\nfunc (r *NSResource) realType() Type {\n\treturn TypeNS\n}\n\nfunc (r *NSResource) pack(msg []byte, compression map[string]int) ([]byte, error) {\n\treturn r.NS.pack(msg, compression)\n}\n\nfunc unpackNSResource(msg []byte, off int) (NSResource, error) {\n\tvar ns Name\n\tif _, err := ns.unpack(msg, off); err != nil {\n\t\treturn NSResource{}, err\n\t}\n\treturn NSResource{ns}, nil\n}\n\n// A PTRResource is a PTR Resource record.\ntype PTRResource struct {\n\tPTR Name\n}\n\nfunc (r *PTRResource) realType() Type {\n\treturn TypePTR\n}\n\nfunc (r *PTRResource) pack(msg []byte, compression map[string]int) ([]byte, error) {\n\treturn r.PTR.pack(msg, compression)\n}\n\nfunc unpackPTRResource(msg []byte, off int) (PTRResource, error) {\n\tvar ptr Name\n\tif _, err := ptr.unpack(msg, off); err != nil {\n\t\treturn PTRResource{}, err\n\t}\n\treturn PTRResource{ptr}, nil\n}\n\n// An SOAResource is an SOA Resource record.\ntype SOAResource struct {\n\tNS      Name\n\tMBox    Name\n\tSerial  uint32\n\tRefresh uint32\n\tRetry   uint32\n\tExpire  uint32\n\n\t// MinTTL the is the default TTL of Resources records which did not\n\t// contain a TTL value and the TTL of negative responses. (RFC 2308\n\t// Section 4)\n\tMinTTL uint32\n}\n\nfunc (r *SOAResource) realType() Type {\n\treturn TypeSOA\n}\n\nfunc (r *SOAResource) pack(msg []byte, compression map[string]int) ([]byte, error) {\n\toldMsg := msg\n\tmsg, err := r.NS.pack(msg, compression)\n\tif err != nil {\n\t\treturn oldMsg, &nestedError{\"SOAResource.NS\", err}\n\t}\n\tmsg, err = r.MBox.pack(msg, compression)\n\tif err != nil {\n\t\treturn oldMsg, &nestedError{\"SOAResource.MBox\", err}\n\t}\n\tmsg = packUint32(msg, r.Serial)\n\tmsg = packUint32(msg, r.Refresh)\n\tmsg = packUint32(msg, r.Retry)\n\tmsg = packUint32(msg, r.Expire)\n\treturn packUint32(msg, r.MinTTL), nil\n}\n\nfunc unpackSOAResource(msg []byte, off int) (SOAResource, error) {\n\tvar ns Name\n\toff, err := ns.unpack(msg, off)\n\tif err != nil {\n\t\treturn SOAResource{}, &nestedError{\"NS\", err}\n\t}\n\tvar mbox Name\n\tif off, err = mbox.unpack(msg, off); err != nil {\n\t\treturn SOAResource{}, &nestedError{\"MBox\", err}\n\t}\n\tserial, off, err := unpackUint32(msg, off)\n\tif err != nil {\n\t\treturn SOAResource{}, &nestedError{\"Serial\", err}\n\t}\n\trefresh, off, err := unpackUint32(msg, off)\n\tif err != nil {\n\t\treturn SOAResource{}, &nestedError{\"Refresh\", err}\n\t}\n\tretry, off, err := unpackUint32(msg, off)\n\tif err != nil {\n\t\treturn SOAResource{}, &nestedError{\"Retry\", err}\n\t}\n\texpire, off, err := unpackUint32(msg, off)\n\tif err != nil {\n\t\treturn SOAResource{}, &nestedError{\"Expire\", err}\n\t}\n\tminTTL, _, err := unpackUint32(msg, off)\n\tif err != nil {\n\t\treturn SOAResource{}, &nestedError{\"MinTTL\", err}\n\t}\n\treturn SOAResource{ns, mbox, serial, refresh, retry, expire, minTTL}, nil\n}\n\n// A TXTResource is a TXT Resource record.\ntype TXTResource struct {\n\tTxt string // Not a domain name.\n}\n\nfunc (r *TXTResource) realType() Type {\n\treturn TypeTXT\n}\n\nfunc (r *TXTResource) pack(msg []byte, compression map[string]int) ([]byte, error) {\n\treturn packText(msg, r.Txt), nil\n}\n\nfunc unpackTXTResource(msg []byte, off int, length uint16) (TXTResource, error) {\n\tvar txt string\n\tfor n := uint16(0); n < length; {\n\t\tvar t string\n\t\tvar err error\n\t\tif t, off, err = unpackText(msg, off); err != nil {\n\t\t\treturn TXTResource{}, &nestedError{\"text\", err}\n\t\t}\n\t\t// Check if we got too many bytes.\n\t\tif length-n < uint16(len(t))+1 {\n\t\t\treturn TXTResource{}, errCalcLen\n\t\t}\n\t\tn += uint16(len(t)) + 1\n\t\ttxt += t\n\t}\n\treturn TXTResource{txt}, nil\n}\n\n// An SRVResource is an SRV Resource record.\ntype SRVResource struct {\n\tPriority uint16\n\tWeight   uint16\n\tPort     uint16\n\tTarget   Name // Not compressed as per RFC 2782.\n}\n\nfunc (r *SRVResource) realType() Type {\n\treturn TypeSRV\n}\n\nfunc (r *SRVResource) pack(msg []byte, compression map[string]int) ([]byte, error) {\n\toldMsg := msg\n\tmsg = packUint16(msg, r.Priority)\n\tmsg = packUint16(msg, r.Weight)\n\tmsg = packUint16(msg, r.Port)\n\tmsg, err := r.Target.pack(msg, nil)\n\tif err != nil {\n\t\treturn oldMsg, &nestedError{\"SRVResource.Target\", err}\n\t}\n\treturn msg, nil\n}\n\nfunc unpackSRVResource(msg []byte, off int) (SRVResource, error) {\n\tpriority, off, err := unpackUint16(msg, off)\n\tif err != nil {\n\t\treturn SRVResource{}, &nestedError{\"Priority\", err}\n\t}\n\tweight, off, err := unpackUint16(msg, off)\n\tif err != nil {\n\t\treturn SRVResource{}, &nestedError{\"Weight\", err}\n\t}\n\tport, off, err := unpackUint16(msg, off)\n\tif err != nil {\n\t\treturn SRVResource{}, &nestedError{\"Port\", err}\n\t}\n\tvar target Name\n\tif _, err := target.unpack(msg, off); err != nil {\n\t\treturn SRVResource{}, &nestedError{\"Target\", err}\n\t}\n\treturn SRVResource{priority, weight, port, target}, nil\n}\n\n// An AResource is an A Resource record.\ntype AResource struct {\n\tA [4]byte\n}\n\nfunc (r *AResource) realType() Type {\n\treturn TypeA\n}\n\nfunc (r *AResource) pack(msg []byte, compression map[string]int) ([]byte, error) {\n\treturn packBytes(msg, r.A[:]), nil\n}\n\nfunc unpackAResource(msg []byte, off int) (AResource, error) {\n\tvar a [4]byte\n\tif _, err := unpackBytes(msg, off, a[:]); err != nil {\n\t\treturn AResource{}, err\n\t}\n\treturn AResource{a}, nil\n}\n\n// An AAAAResource is an AAAA Resource record.\ntype AAAAResource struct {\n\tAAAA [16]byte\n}\n\nfunc (r *AAAAResource) realType() Type {\n\treturn TypeAAAA\n}\n\nfunc (r *AAAAResource) pack(msg []byte, compression map[string]int) ([]byte, error) {\n\treturn packBytes(msg, r.AAAA[:]), nil\n}\n\nfunc unpackAAAAResource(msg []byte, off int) (AAAAResource, error) {\n\tvar aaaa [16]byte\n\tif _, err := unpackBytes(msg, off, aaaa[:]); err != nil {\n\t\treturn AAAAResource{}, err\n\t}\n\treturn AAAAResource{aaaa}, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/dns/dnsmessage/message_test.go",
    "content": "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage dnsmessage\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc mustNewName(name string) Name {\n\tn, err := NewName(name)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn n\n}\n\nfunc (m *Message) String() string {\n\ts := fmt.Sprintf(\"Message: %#v\\n\", &m.Header)\n\tif len(m.Questions) > 0 {\n\t\ts += \"-- Questions\\n\"\n\t\tfor _, q := range m.Questions {\n\t\t\ts += fmt.Sprintf(\"%#v\\n\", q)\n\t\t}\n\t}\n\tif len(m.Answers) > 0 {\n\t\ts += \"-- Answers\\n\"\n\t\tfor _, a := range m.Answers {\n\t\t\ts += fmt.Sprintf(\"%#v\\n\", a)\n\t\t}\n\t}\n\tif len(m.Authorities) > 0 {\n\t\ts += \"-- Authorities\\n\"\n\t\tfor _, ns := range m.Authorities {\n\t\t\ts += fmt.Sprintf(\"%#v\\n\", ns)\n\t\t}\n\t}\n\tif len(m.Additionals) > 0 {\n\t\ts += \"-- Additionals\\n\"\n\t\tfor _, e := range m.Additionals {\n\t\t\ts += fmt.Sprintf(\"%#v\\n\", e)\n\t\t}\n\t}\n\treturn s\n}\n\nfunc TestNameString(t *testing.T) {\n\twant := \"foo\"\n\tname := mustNewName(want)\n\tif got := fmt.Sprint(name); got != want {\n\t\tt.Errorf(\"got fmt.Sprint(%#v) = %s, want = %s\", name, got, want)\n\t}\n}\n\nfunc TestQuestionPackUnpack(t *testing.T) {\n\twant := Question{\n\t\tName:  mustNewName(\".\"),\n\t\tType:  TypeA,\n\t\tClass: ClassINET,\n\t}\n\tbuf, err := want.pack(make([]byte, 1, 50), map[string]int{})\n\tif err != nil {\n\t\tt.Fatal(\"Packing failed:\", err)\n\t}\n\tvar p Parser\n\tp.msg = buf\n\tp.header.questions = 1\n\tp.section = sectionQuestions\n\tp.off = 1\n\tgot, err := p.Question()\n\tif err != nil {\n\t\tt.Fatalf(\"Unpacking failed: %v\\n%s\", err, string(buf[1:]))\n\t}\n\tif p.off != len(buf) {\n\t\tt.Errorf(\"Unpacked different amount than packed: got n = %d, want = %d\", p.off, len(buf))\n\t}\n\tif !reflect.DeepEqual(got, want) {\n\t\tt.Errorf(\"Got = %+v, want = %+v\", got, want)\n\t}\n}\n\nfunc TestName(t *testing.T) {\n\ttests := []string{\n\t\t\"\",\n\t\t\".\",\n\t\t\"google..com\",\n\t\t\"google.com\",\n\t\t\"google..com.\",\n\t\t\"google.com.\",\n\t\t\".google.com.\",\n\t\t\"www..google.com.\",\n\t\t\"www.google.com.\",\n\t}\n\n\tfor _, test := range tests {\n\t\tn, err := NewName(test)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Creating name for %q: %v\", test, err)\n\t\t\tcontinue\n\t\t}\n\t\tif ns := n.String(); ns != test {\n\t\t\tt.Errorf(\"Got %#v.String() = %q, want = %q\", n, ns, test)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc TestNamePackUnpack(t *testing.T) {\n\ttests := []struct {\n\t\tin   string\n\t\twant string\n\t\terr  error\n\t}{\n\t\t{\"\", \"\", errNonCanonicalName},\n\t\t{\".\", \".\", nil},\n\t\t{\"google..com\", \"\", errNonCanonicalName},\n\t\t{\"google.com\", \"\", errNonCanonicalName},\n\t\t{\"google..com.\", \"\", errZeroSegLen},\n\t\t{\"google.com.\", \"google.com.\", nil},\n\t\t{\".google.com.\", \"\", errZeroSegLen},\n\t\t{\"www..google.com.\", \"\", errZeroSegLen},\n\t\t{\"www.google.com.\", \"www.google.com.\", nil},\n\t}\n\n\tfor _, test := range tests {\n\t\tin := mustNewName(test.in)\n\t\twant := mustNewName(test.want)\n\t\tbuf, err := in.pack(make([]byte, 0, 30), map[string]int{})\n\t\tif err != test.err {\n\t\t\tt.Errorf(\"Packing of %q: got err = %v, want err = %v\", test.in, err, test.err)\n\t\t\tcontinue\n\t\t}\n\t\tif test.err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tvar got Name\n\t\tn, err := got.unpack(buf, 0)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unpacking for %q failed: %v\", test.in, err)\n\t\t\tcontinue\n\t\t}\n\t\tif n != len(buf) {\n\t\t\tt.Errorf(\n\t\t\t\t\"Unpacked different amount than packed for %q: got n = %d, want = %d\",\n\t\t\t\ttest.in,\n\t\t\t\tn,\n\t\t\t\tlen(buf),\n\t\t\t)\n\t\t}\n\t\tif got != want {\n\t\t\tt.Errorf(\"Unpacking packing of %q: got = %#v, want = %#v\", test.in, got, want)\n\t\t}\n\t}\n}\n\nfunc checkErrorPrefix(err error, prefix string) bool {\n\te, ok := err.(*nestedError)\n\treturn ok && e.s == prefix\n}\n\nfunc TestHeaderUnpackError(t *testing.T) {\n\twants := []string{\n\t\t\"id\",\n\t\t\"bits\",\n\t\t\"questions\",\n\t\t\"answers\",\n\t\t\"authorities\",\n\t\t\"additionals\",\n\t}\n\tvar buf []byte\n\tvar h header\n\tfor _, want := range wants {\n\t\tn, err := h.unpack(buf, 0)\n\t\tif n != 0 || !checkErrorPrefix(err, want) {\n\t\t\tt.Errorf(\"got h.unpack([%d]byte, 0) = %d, %v, want = 0, %s\", len(buf), n, err, want)\n\t\t}\n\t\tbuf = append(buf, 0, 0)\n\t}\n}\n\nfunc TestParserStart(t *testing.T) {\n\tconst want = \"unpacking header\"\n\tvar p Parser\n\tfor i := 0; i <= 1; i++ {\n\t\t_, err := p.Start([]byte{})\n\t\tif !checkErrorPrefix(err, want) {\n\t\t\tt.Errorf(\"got p.Start(nil) = _, %v, want = _, %s\", err, want)\n\t\t}\n\t}\n}\n\nfunc TestResourceNotStarted(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tfn   func(*Parser) error\n\t}{\n\t\t{\"CNAMEResource\", func(p *Parser) error { _, err := p.CNAMEResource(); return err }},\n\t\t{\"MXResource\", func(p *Parser) error { _, err := p.MXResource(); return err }},\n\t\t{\"NSResource\", func(p *Parser) error { _, err := p.NSResource(); return err }},\n\t\t{\"PTRResource\", func(p *Parser) error { _, err := p.PTRResource(); return err }},\n\t\t{\"SOAResource\", func(p *Parser) error { _, err := p.SOAResource(); return err }},\n\t\t{\"TXTResource\", func(p *Parser) error { _, err := p.TXTResource(); return err }},\n\t\t{\"SRVResource\", func(p *Parser) error { _, err := p.SRVResource(); return err }},\n\t\t{\"AResource\", func(p *Parser) error { _, err := p.AResource(); return err }},\n\t\t{\"AAAAResource\", func(p *Parser) error { _, err := p.AAAAResource(); return err }},\n\t}\n\n\tfor _, test := range tests {\n\t\tif err := test.fn(&Parser{}); err != ErrNotStarted {\n\t\t\tt.Errorf(\"got _, %v = p.%s(), want = _, %v\", err, test.name, ErrNotStarted)\n\t\t}\n\t}\n}\n\nfunc TestDNSPackUnpack(t *testing.T) {\n\twants := []Message{\n\t\t{\n\t\t\tQuestions: []Question{\n\t\t\t\t{\n\t\t\t\t\tName:  mustNewName(\".\"),\n\t\t\t\t\tType:  TypeAAAA,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t},\n\t\t\tAnswers:     []Resource{},\n\t\t\tAuthorities: []Resource{},\n\t\t\tAdditionals: []Resource{},\n\t\t},\n\t\tlargeTestMsg(),\n\t}\n\tfor i, want := range wants {\n\t\tb, err := want.Pack()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%d: packing failed: %v\", i, err)\n\t\t}\n\t\tvar got Message\n\t\terr = got.Unpack(b)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%d: unpacking failed: %v\", i, err)\n\t\t}\n\t\tif !reflect.DeepEqual(got, want) {\n\t\t\tt.Errorf(\"%d: got = %+v, want = %+v\", i, &got, &want)\n\t\t}\n\t}\n}\n\nfunc TestSkipAll(t *testing.T) {\n\tmsg := largeTestMsg()\n\tbuf, err := msg.Pack()\n\tif err != nil {\n\t\tt.Fatal(\"Packing large test message:\", err)\n\t}\n\tvar p Parser\n\tif _, err := p.Start(buf); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tf    func() error\n\t}{\n\t\t{\"SkipAllQuestions\", p.SkipAllQuestions},\n\t\t{\"SkipAllAnswers\", p.SkipAllAnswers},\n\t\t{\"SkipAllAuthorities\", p.SkipAllAuthorities},\n\t\t{\"SkipAllAdditionals\", p.SkipAllAdditionals},\n\t}\n\tfor _, test := range tests {\n\t\tfor i := 1; i <= 3; i++ {\n\t\t\tif err := test.f(); err != nil {\n\t\t\t\tt.Errorf(\"Call #%d to %s(): %v\", i, test.name, err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestSkipEach(t *testing.T) {\n\tmsg := smallTestMsg()\n\n\tbuf, err := msg.Pack()\n\tif err != nil {\n\t\tt.Fatal(\"Packing test message:\", err)\n\t}\n\tvar p Parser\n\tif _, err := p.Start(buf); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tf    func() error\n\t}{\n\t\t{\"SkipQuestion\", p.SkipQuestion},\n\t\t{\"SkipAnswer\", p.SkipAnswer},\n\t\t{\"SkipAuthority\", p.SkipAuthority},\n\t\t{\"SkipAdditional\", p.SkipAdditional},\n\t}\n\tfor _, test := range tests {\n\t\tif err := test.f(); err != nil {\n\t\t\tt.Errorf(\"First call: got %s() = %v, want = %v\", test.name, err, nil)\n\t\t}\n\t\tif err := test.f(); err != ErrSectionDone {\n\t\t\tt.Errorf(\"Second call: got %s() = %v, want = %v\", test.name, err, ErrSectionDone)\n\t\t}\n\t}\n}\n\nfunc TestSkipAfterRead(t *testing.T) {\n\tmsg := smallTestMsg()\n\n\tbuf, err := msg.Pack()\n\tif err != nil {\n\t\tt.Fatal(\"Packing test message:\", err)\n\t}\n\tvar p Parser\n\tif _, err := p.Start(buf); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tskip func() error\n\t\tread func() error\n\t}{\n\t\t{\"Question\", p.SkipQuestion, func() error { _, err := p.Question(); return err }},\n\t\t{\"Answer\", p.SkipAnswer, func() error { _, err := p.Answer(); return err }},\n\t\t{\"Authority\", p.SkipAuthority, func() error { _, err := p.Authority(); return err }},\n\t\t{\"Additional\", p.SkipAdditional, func() error { _, err := p.Additional(); return err }},\n\t}\n\tfor _, test := range tests {\n\t\tif err := test.read(); err != nil {\n\t\t\tt.Errorf(\"Got %s() = _, %v, want = _, %v\", test.name, err, nil)\n\t\t}\n\t\tif err := test.skip(); err != ErrSectionDone {\n\t\t\tt.Errorf(\"Got Skip%s() = %v, want = %v\", test.name, err, ErrSectionDone)\n\t\t}\n\t}\n}\n\nfunc TestSkipNotStarted(t *testing.T) {\n\tvar p Parser\n\n\ttests := []struct {\n\t\tname string\n\t\tf    func() error\n\t}{\n\t\t{\"SkipAllQuestions\", p.SkipAllQuestions},\n\t\t{\"SkipAllAnswers\", p.SkipAllAnswers},\n\t\t{\"SkipAllAuthorities\", p.SkipAllAuthorities},\n\t\t{\"SkipAllAdditionals\", p.SkipAllAdditionals},\n\t}\n\tfor _, test := range tests {\n\t\tif err := test.f(); err != ErrNotStarted {\n\t\t\tt.Errorf(\"Got %s() = %v, want = %v\", test.name, err, ErrNotStarted)\n\t\t}\n\t}\n}\n\nfunc TestTooManyRecords(t *testing.T) {\n\tconst recs = int(^uint16(0)) + 1\n\ttests := []struct {\n\t\tname string\n\t\tmsg  Message\n\t\twant error\n\t}{\n\t\t{\n\t\t\t\"Questions\",\n\t\t\tMessage{\n\t\t\t\tQuestions: make([]Question, recs),\n\t\t\t},\n\t\t\terrTooManyQuestions,\n\t\t},\n\t\t{\n\t\t\t\"Answers\",\n\t\t\tMessage{\n\t\t\t\tAnswers: make([]Resource, recs),\n\t\t\t},\n\t\t\terrTooManyAnswers,\n\t\t},\n\t\t{\n\t\t\t\"Authorities\",\n\t\t\tMessage{\n\t\t\t\tAuthorities: make([]Resource, recs),\n\t\t\t},\n\t\t\terrTooManyAuthorities,\n\t\t},\n\t\t{\n\t\t\t\"Additionals\",\n\t\t\tMessage{\n\t\t\t\tAdditionals: make([]Resource, recs),\n\t\t\t},\n\t\t\terrTooManyAdditionals,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tif _, got := test.msg.Pack(); got != test.want {\n\t\t\tt.Errorf(\"Packing %d %s: got = %v, want = %v\", recs, test.name, got, test.want)\n\t\t}\n\t}\n}\n\nfunc TestVeryLongTxt(t *testing.T) {\n\twant := Resource{\n\t\tResourceHeader{\n\t\t\tName:  mustNewName(\"foo.bar.example.com.\"),\n\t\t\tType:  TypeTXT,\n\t\t\tClass: ClassINET,\n\t\t},\n\t\t&TXTResource{loremIpsum},\n\t}\n\tbuf, err := want.pack(make([]byte, 0, 8000), map[string]int{})\n\tif err != nil {\n\t\tt.Fatal(\"Packing failed:\", err)\n\t}\n\tvar got Resource\n\toff, err := got.Header.unpack(buf, 0)\n\tif err != nil {\n\t\tt.Fatal(\"Unpacking ResourceHeader failed:\", err)\n\t}\n\tbody, n, err := unpackResourceBody(buf, off, got.Header)\n\tif err != nil {\n\t\tt.Fatal(\"Unpacking failed:\", err)\n\t}\n\tgot.Body = body\n\tif n != len(buf) {\n\t\tt.Errorf(\"Unpacked different amount than packed: got n = %d, want = %d\", n, len(buf))\n\t}\n\tif !reflect.DeepEqual(got, want) {\n\t\tt.Errorf(\"Got = %#v, want = %#v\", got, want)\n\t}\n}\n\nfunc TestStartError(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tfn   func(*Builder) error\n\t}{\n\t\t{\"Questions\", func(b *Builder) error { return b.StartQuestions() }},\n\t\t{\"Answers\", func(b *Builder) error { return b.StartAnswers() }},\n\t\t{\"Authorities\", func(b *Builder) error { return b.StartAuthorities() }},\n\t\t{\"Additionals\", func(b *Builder) error { return b.StartAdditionals() }},\n\t}\n\n\tenvs := []struct {\n\t\tname string\n\t\tfn   func() *Builder\n\t\twant error\n\t}{\n\t\t{\"sectionNotStarted\", func() *Builder { return &Builder{section: sectionNotStarted} }, ErrNotStarted},\n\t\t{\"sectionDone\", func() *Builder { return &Builder{section: sectionDone} }, ErrSectionDone},\n\t}\n\n\tfor _, env := range envs {\n\t\tfor _, test := range tests {\n\t\t\tif got := test.fn(env.fn()); got != env.want {\n\t\t\t\tt.Errorf(\"got Builder{%s}.Start%s = %v, want = %v\", env.name, test.name, got, env.want)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestBuilderResourceError(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tfn   func(*Builder) error\n\t}{\n\t\t{\"CNAMEResource\", func(b *Builder) error { return b.CNAMEResource(ResourceHeader{}, CNAMEResource{}) }},\n\t\t{\"MXResource\", func(b *Builder) error { return b.MXResource(ResourceHeader{}, MXResource{}) }},\n\t\t{\"NSResource\", func(b *Builder) error { return b.NSResource(ResourceHeader{}, NSResource{}) }},\n\t\t{\"PTRResource\", func(b *Builder) error { return b.PTRResource(ResourceHeader{}, PTRResource{}) }},\n\t\t{\"SOAResource\", func(b *Builder) error { return b.SOAResource(ResourceHeader{}, SOAResource{}) }},\n\t\t{\"TXTResource\", func(b *Builder) error { return b.TXTResource(ResourceHeader{}, TXTResource{}) }},\n\t\t{\"SRVResource\", func(b *Builder) error { return b.SRVResource(ResourceHeader{}, SRVResource{}) }},\n\t\t{\"AResource\", func(b *Builder) error { return b.AResource(ResourceHeader{}, AResource{}) }},\n\t\t{\"AAAAResource\", func(b *Builder) error { return b.AAAAResource(ResourceHeader{}, AAAAResource{}) }},\n\t}\n\n\tenvs := []struct {\n\t\tname string\n\t\tfn   func() *Builder\n\t\twant error\n\t}{\n\t\t{\"sectionNotStarted\", func() *Builder { return &Builder{section: sectionNotStarted} }, ErrNotStarted},\n\t\t{\"sectionHeader\", func() *Builder { return &Builder{section: sectionHeader} }, ErrNotStarted},\n\t\t{\"sectionQuestions\", func() *Builder { return &Builder{section: sectionQuestions} }, ErrNotStarted},\n\t\t{\"sectionDone\", func() *Builder { return &Builder{section: sectionDone} }, ErrSectionDone},\n\t}\n\n\tfor _, env := range envs {\n\t\tfor _, test := range tests {\n\t\t\tif got := test.fn(env.fn()); got != env.want {\n\t\t\t\tt.Errorf(\"got Builder{%s}.%s = %v, want = %v\", env.name, test.name, got, env.want)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestFinishError(t *testing.T) {\n\tvar b Builder\n\twant := ErrNotStarted\n\tif _, got := b.Finish(); got != want {\n\t\tt.Errorf(\"got Builder{}.Finish() = %v, want = %v\", got, want)\n\t}\n}\n\nfunc TestBuilder(t *testing.T) {\n\tmsg := largeTestMsg()\n\twant, err := msg.Pack()\n\tif err != nil {\n\t\tt.Fatal(\"Packing without builder:\", err)\n\t}\n\n\tvar b Builder\n\tb.Start(nil, msg.Header)\n\n\tif err := b.StartQuestions(); err != nil {\n\t\tt.Fatal(\"b.StartQuestions():\", err)\n\t}\n\tfor _, q := range msg.Questions {\n\t\tif err := b.Question(q); err != nil {\n\t\t\tt.Fatalf(\"b.Question(%#v): %v\", q, err)\n\t\t}\n\t}\n\n\tif err := b.StartAnswers(); err != nil {\n\t\tt.Fatal(\"b.StartAnswers():\", err)\n\t}\n\tfor _, a := range msg.Answers {\n\t\tswitch a.Header.Type {\n\t\tcase TypeA:\n\t\t\tif err := b.AResource(a.Header, *a.Body.(*AResource)); err != nil {\n\t\t\t\tt.Fatalf(\"b.AResource(%#v): %v\", a, err)\n\t\t\t}\n\t\tcase TypeNS:\n\t\t\tif err := b.NSResource(a.Header, *a.Body.(*NSResource)); err != nil {\n\t\t\t\tt.Fatalf(\"b.NSResource(%#v): %v\", a, err)\n\t\t\t}\n\t\tcase TypeCNAME:\n\t\t\tif err := b.CNAMEResource(a.Header, *a.Body.(*CNAMEResource)); err != nil {\n\t\t\t\tt.Fatalf(\"b.CNAMEResource(%#v): %v\", a, err)\n\t\t\t}\n\t\tcase TypeSOA:\n\t\t\tif err := b.SOAResource(a.Header, *a.Body.(*SOAResource)); err != nil {\n\t\t\t\tt.Fatalf(\"b.SOAResource(%#v): %v\", a, err)\n\t\t\t}\n\t\tcase TypePTR:\n\t\t\tif err := b.PTRResource(a.Header, *a.Body.(*PTRResource)); err != nil {\n\t\t\t\tt.Fatalf(\"b.PTRResource(%#v): %v\", a, err)\n\t\t\t}\n\t\tcase TypeMX:\n\t\t\tif err := b.MXResource(a.Header, *a.Body.(*MXResource)); err != nil {\n\t\t\t\tt.Fatalf(\"b.MXResource(%#v): %v\", a, err)\n\t\t\t}\n\t\tcase TypeTXT:\n\t\t\tif err := b.TXTResource(a.Header, *a.Body.(*TXTResource)); err != nil {\n\t\t\t\tt.Fatalf(\"b.TXTResource(%#v): %v\", a, err)\n\t\t\t}\n\t\tcase TypeAAAA:\n\t\t\tif err := b.AAAAResource(a.Header, *a.Body.(*AAAAResource)); err != nil {\n\t\t\t\tt.Fatalf(\"b.AAAAResource(%#v): %v\", a, err)\n\t\t\t}\n\t\tcase TypeSRV:\n\t\t\tif err := b.SRVResource(a.Header, *a.Body.(*SRVResource)); err != nil {\n\t\t\t\tt.Fatalf(\"b.SRVResource(%#v): %v\", a, err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := b.StartAuthorities(); err != nil {\n\t\tt.Fatal(\"b.StartAuthorities():\", err)\n\t}\n\tfor _, a := range msg.Authorities {\n\t\tif err := b.NSResource(a.Header, *a.Body.(*NSResource)); err != nil {\n\t\t\tt.Fatalf(\"b.NSResource(%#v): %v\", a, err)\n\t\t}\n\t}\n\n\tif err := b.StartAdditionals(); err != nil {\n\t\tt.Fatal(\"b.StartAdditionals():\", err)\n\t}\n\tfor _, a := range msg.Additionals {\n\t\tif err := b.TXTResource(a.Header, *a.Body.(*TXTResource)); err != nil {\n\t\t\tt.Fatalf(\"b.TXTResource(%#v): %v\", a, err)\n\t\t}\n\t}\n\n\tgot, err := b.Finish()\n\tif err != nil {\n\t\tt.Fatal(\"b.Finish():\", err)\n\t}\n\tif !bytes.Equal(got, want) {\n\t\tt.Fatalf(\"Got from Builder: %#v\\nwant = %#v\", got, want)\n\t}\n}\n\nfunc TestResourcePack(t *testing.T) {\n\tfor _, tt := range []struct {\n\t\tm   Message\n\t\terr error\n\t}{\n\t\t{\n\t\t\tMessage{\n\t\t\t\tQuestions: []Question{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:  mustNewName(\".\"),\n\t\t\t\t\t\tType:  TypeAAAA,\n\t\t\t\t\t\tClass: ClassINET,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAnswers: []Resource{{ResourceHeader{}, nil}},\n\t\t\t},\n\t\t\t&nestedError{\"packing Answer\", errNilResouceBody},\n\t\t},\n\t\t{\n\t\t\tMessage{\n\t\t\t\tQuestions: []Question{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:  mustNewName(\".\"),\n\t\t\t\t\t\tType:  TypeAAAA,\n\t\t\t\t\t\tClass: ClassINET,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAuthorities: []Resource{{ResourceHeader{}, (*NSResource)(nil)}},\n\t\t\t},\n\t\t\t&nestedError{\"packing Authority\",\n\t\t\t\t&nestedError{\"ResourceHeader\",\n\t\t\t\t\t&nestedError{\"Name\", errNonCanonicalName},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tMessage{\n\t\t\t\tQuestions: []Question{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:  mustNewName(\".\"),\n\t\t\t\t\t\tType:  TypeA,\n\t\t\t\t\t\tClass: ClassINET,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAdditionals: []Resource{{ResourceHeader{}, nil}},\n\t\t\t},\n\t\t\t&nestedError{\"packing Additional\", errNilResouceBody},\n\t\t},\n\t} {\n\t\t_, err := tt.m.Pack()\n\t\tif !reflect.DeepEqual(err, tt.err) {\n\t\t\tt.Errorf(\"got %v for %v; want %v\", err, tt.m, tt.err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkParsing(b *testing.B) {\n\tb.ReportAllocs()\n\n\tname := mustNewName(\"foo.bar.example.com.\")\n\tmsg := Message{\n\t\tHeader: Header{Response: true, Authoritative: true},\n\t\tQuestions: []Question{\n\t\t\t{\n\t\t\t\tName:  name,\n\t\t\t\tType:  TypeA,\n\t\t\t\tClass: ClassINET,\n\t\t\t},\n\t\t},\n\t\tAnswers: []Resource{\n\t\t\t{\n\t\t\t\tResourceHeader{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t\t&AResource{[4]byte{}},\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceHeader{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t\t&AAAAResource{[16]byte{}},\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceHeader{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t\t&CNAMEResource{name},\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceHeader{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t\t&NSResource{name},\n\t\t\t},\n\t\t},\n\t}\n\n\tbuf, err := msg.Pack()\n\tif err != nil {\n\t\tb.Fatal(\"msg.Pack():\", err)\n\t}\n\n\tfor i := 0; i < b.N; i++ {\n\t\tvar p Parser\n\t\tif _, err := p.Start(buf); err != nil {\n\t\t\tb.Fatal(\"p.Start(buf):\", err)\n\t\t}\n\n\t\tfor {\n\t\t\t_, err := p.Question()\n\t\t\tif err == ErrSectionDone {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tb.Fatal(\"p.Question():\", err)\n\t\t\t}\n\t\t}\n\n\t\tfor {\n\t\t\th, err := p.AnswerHeader()\n\t\t\tif err == ErrSectionDone {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tswitch h.Type {\n\t\t\tcase TypeA:\n\t\t\t\tif _, err := p.AResource(); err != nil {\n\t\t\t\t\tb.Fatal(\"p.AResource():\", err)\n\t\t\t\t}\n\t\t\tcase TypeAAAA:\n\t\t\t\tif _, err := p.AAAAResource(); err != nil {\n\t\t\t\t\tb.Fatal(\"p.AAAAResource():\", err)\n\t\t\t\t}\n\t\t\tcase TypeCNAME:\n\t\t\t\tif _, err := p.CNAMEResource(); err != nil {\n\t\t\t\t\tb.Fatal(\"p.CNAMEResource():\", err)\n\t\t\t\t}\n\t\t\tcase TypeNS:\n\t\t\t\tif _, err := p.NSResource(); err != nil {\n\t\t\t\t\tb.Fatal(\"p.NSResource():\", err)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tb.Fatalf(\"unknown type: %T\", h)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc BenchmarkBuilding(b *testing.B) {\n\tb.ReportAllocs()\n\n\tname := mustNewName(\"foo.bar.example.com.\")\n\tbuf := make([]byte, 0, packStartingCap)\n\n\tfor i := 0; i < b.N; i++ {\n\t\tvar bld Builder\n\t\tbld.StartWithoutCompression(buf, Header{Response: true, Authoritative: true})\n\n\t\tif err := bld.StartQuestions(); err != nil {\n\t\t\tb.Fatal(\"bld.StartQuestions():\", err)\n\t\t}\n\t\tq := Question{\n\t\t\tName:  name,\n\t\t\tType:  TypeA,\n\t\t\tClass: ClassINET,\n\t\t}\n\t\tif err := bld.Question(q); err != nil {\n\t\t\tb.Fatalf(\"bld.Question(%+v): %v\", q, err)\n\t\t}\n\n\t\thdr := ResourceHeader{\n\t\t\tName:  name,\n\t\t\tClass: ClassINET,\n\t\t}\n\t\tif err := bld.StartAnswers(); err != nil {\n\t\t\tb.Fatal(\"bld.StartQuestions():\", err)\n\t\t}\n\n\t\tar := AResource{[4]byte{}}\n\t\tif err := bld.AResource(hdr, ar); err != nil {\n\t\t\tb.Fatalf(\"bld.AResource(%+v, %+v): %v\", hdr, ar, err)\n\t\t}\n\n\t\taaar := AAAAResource{[16]byte{}}\n\t\tif err := bld.AAAAResource(hdr, aaar); err != nil {\n\t\t\tb.Fatalf(\"bld.AAAAResource(%+v, %+v): %v\", hdr, aaar, err)\n\t\t}\n\n\t\tcnr := CNAMEResource{name}\n\t\tif err := bld.CNAMEResource(hdr, cnr); err != nil {\n\t\t\tb.Fatalf(\"bld.CNAMEResource(%+v, %+v): %v\", hdr, cnr, err)\n\t\t}\n\n\t\tnsr := NSResource{name}\n\t\tif err := bld.NSResource(hdr, nsr); err != nil {\n\t\t\tb.Fatalf(\"bld.NSResource(%+v, %+v): %v\", hdr, nsr, err)\n\t\t}\n\n\t\tif _, err := bld.Finish(); err != nil {\n\t\t\tb.Fatal(\"bld.Finish():\", err)\n\t\t}\n\t}\n}\n\nfunc smallTestMsg() Message {\n\tname := mustNewName(\"example.com.\")\n\treturn Message{\n\t\tHeader: Header{Response: true, Authoritative: true},\n\t\tQuestions: []Question{\n\t\t\t{\n\t\t\t\tName:  name,\n\t\t\t\tType:  TypeA,\n\t\t\t\tClass: ClassINET,\n\t\t\t},\n\t\t},\n\t\tAnswers: []Resource{\n\t\t\t{\n\t\t\t\tResourceHeader{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tType:  TypeA,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t\t&AResource{[4]byte{127, 0, 0, 1}},\n\t\t\t},\n\t\t},\n\t\tAuthorities: []Resource{\n\t\t\t{\n\t\t\t\tResourceHeader{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tType:  TypeA,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t\t&AResource{[4]byte{127, 0, 0, 1}},\n\t\t\t},\n\t\t},\n\t\tAdditionals: []Resource{\n\t\t\t{\n\t\t\t\tResourceHeader{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tType:  TypeA,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t\t&AResource{[4]byte{127, 0, 0, 1}},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc largeTestMsg() Message {\n\tname := mustNewName(\"foo.bar.example.com.\")\n\treturn Message{\n\t\tHeader: Header{Response: true, Authoritative: true},\n\t\tQuestions: []Question{\n\t\t\t{\n\t\t\t\tName:  name,\n\t\t\t\tType:  TypeA,\n\t\t\t\tClass: ClassINET,\n\t\t\t},\n\t\t},\n\t\tAnswers: []Resource{\n\t\t\t{\n\t\t\t\tResourceHeader{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tType:  TypeA,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t\t&AResource{[4]byte{127, 0, 0, 1}},\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceHeader{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tType:  TypeA,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t\t&AResource{[4]byte{127, 0, 0, 2}},\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceHeader{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tType:  TypeAAAA,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t\t&AAAAResource{[16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}},\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceHeader{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tType:  TypeCNAME,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t\t&CNAMEResource{mustNewName(\"alias.example.com.\")},\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceHeader{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tType:  TypeSOA,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t\t&SOAResource{\n\t\t\t\t\tNS:      mustNewName(\"ns1.example.com.\"),\n\t\t\t\t\tMBox:    mustNewName(\"mb.example.com.\"),\n\t\t\t\t\tSerial:  1,\n\t\t\t\t\tRefresh: 2,\n\t\t\t\t\tRetry:   3,\n\t\t\t\t\tExpire:  4,\n\t\t\t\t\tMinTTL:  5,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceHeader{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tType:  TypePTR,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t\t&PTRResource{mustNewName(\"ptr.example.com.\")},\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceHeader{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tType:  TypeMX,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t\t&MXResource{\n\t\t\t\t\t7,\n\t\t\t\t\tmustNewName(\"mx.example.com.\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceHeader{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tType:  TypeSRV,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t\t&SRVResource{\n\t\t\t\t\t8,\n\t\t\t\t\t9,\n\t\t\t\t\t11,\n\t\t\t\t\tmustNewName(\"srv.example.com.\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tAuthorities: []Resource{\n\t\t\t{\n\t\t\t\tResourceHeader{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tType:  TypeNS,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t\t&NSResource{mustNewName(\"ns1.example.com.\")},\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceHeader{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tType:  TypeNS,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t\t&NSResource{mustNewName(\"ns2.example.com.\")},\n\t\t\t},\n\t\t},\n\t\tAdditionals: []Resource{\n\t\t\t{\n\t\t\t\tResourceHeader{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tType:  TypeTXT,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t\t&TXTResource{\"So Long, and Thanks for All the Fish\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceHeader{\n\t\t\t\t\tName:  name,\n\t\t\t\t\tType:  TypeTXT,\n\t\t\t\t\tClass: ClassINET,\n\t\t\t\t},\n\t\t\t\t&TXTResource{\"Hamster Huey and the Gooey Kablooie\"},\n\t\t\t},\n\t\t},\n\t}\n}\n\nconst loremIpsum = `\nLorem ipsum dolor sit amet, nec enim antiopam id, an ullum choro\nnonumes qui, pro eu debet honestatis mediocritatem. No alia enim eos,\nmagna signiferumque ex vis. Mei no aperiri dissentias, cu vel quas\nregione. Malorum quaeque vim ut, eum cu semper aliquid invidunt, ei\nnam ipsum assentior.\n\nNostrum appellantur usu no, vis ex probatus adipiscing. Cu usu illum\nfacilis eleifend. Iusto conceptam complectitur vim id. Tale omnesque\nno usu, ei oblique sadipscing vim. At nullam voluptua usu, mei laudem\nreformidans et. Qui ei eros porro reformidans, ius suas veritus\ntorquatos ex. Mea te facer alterum consequat.\n\nSoleat torquatos democritum sed et, no mea congue appareat, facer\naliquam nec in. Has te ipsum tritani. At justo dicta option nec, movet\nphaedrum ad nam. Ea detracto verterem liberavisse has, delectus\nsuscipiantur in mei. Ex nam meliore complectitur. Ut nam omnis\nhonestatis quaerendum, ea mea nihil affert detracto, ad vix rebum\nmollis.\n\nUt epicurei praesent neglegentur pri, prima fuisset intellegebat ad\nvim. An habemus comprehensam usu, at enim dignissim pro. Eam reque\nvivendum adipisci ea. Vel ne odio choro minimum. Sea admodum\ndissentiet ex. Mundi tamquam evertitur ius cu. Homero postea iisque ut\npro, vel ne saepe senserit consetetur.\n\nNulla utamur facilisis ius ea, in viderer diceret pertinax eum. Mei no\nenim quodsi facilisi, ex sed aeterno appareat mediocritatem, eum\nsententiae deterruisset ut. At suas timeam euismod cum, offendit\nappareat interpretaris ne vix. Vel ea civibus albucius, ex vim quidam\naccusata intellegebat, noluisse instructior sea id. Nec te nonumes\nhabemus appellantur, quis dignissim vituperata eu nam.\n\nAt vix apeirian patrioque vituperatoribus, an usu agam assum. Debet\niisque an mea. Per eu dicant ponderum accommodare. Pri alienum\nplacerat senserit an, ne eum ferri abhorreant vituperatoribus. Ut mea\neligendi disputationi. Ius no tation everti impedit, ei magna quidam\nmediocritatem pri.\n\nLegendos perpetua iracundia ne usu, no ius ullum epicurei intellegam,\nad modus epicuri lucilius eam. In unum quaerendum usu. Ne diam paulo\nhas, ea veri virtute sed. Alia honestatis conclusionemque mea eu, ut\niudico albucius his.\n\nUsu essent probatus eu, sed omnis dolor delicatissimi ex. No qui augue\ndissentias dissentiet. Laudem recteque no usu, vel an velit noluisse,\nan sed utinam eirmod appetere. Ne mea fuisset inimicus ocurreret. At\nvis dicant abhorreant, utinam forensibus nec ne, mei te docendi\nconsequat. Brute inermis persecuti cum id. Ut ipsum munere propriae\nusu, dicit graeco disputando id has.\n\nEros dolore quaerendum nam ei. Timeam ornatus inciderint pro id. Nec\ntorquatos sadipscing ei, ancillae molestie per in. Malis principes duo\nea, usu liber postulant ei.\n\nGraece timeam voluptatibus eu eam. Alia probatus quo no, ea scripta\nfeugiat duo. Congue option meliore ex qui, noster invenire appellantur\nea vel. Eu exerci legendos vel. Consetetur repudiandae vim ut. Vix an\nprobo minimum, et nam illud falli tempor.\n\nCum dico signiferumque eu. Sed ut regione maiorum, id veritus insolens\ntacimates vix. Eu mel sint tamquam lucilius, duo no oporteat\ntacimates. Atqui augue concludaturque vix ei, id mel utroque menandri.\n\nAd oratio blandit aliquando pro. Vis et dolorum rationibus\nphilosophia, ad cum nulla molestie. Hinc fuisset adversarium eum et,\nne qui nisl verear saperet, vel te quaestio forensibus. Per odio\noption delenit an. Alii placerat has no, in pri nihil platonem\ncotidieque. Est ut elit copiosae scaevola, debet tollit maluisset sea\nan.\n\nTe sea hinc debet pericula, liber ridens fabulas cu sed, quem mutat\naccusam mea et. Elitr labitur albucius et pri, an labore feugait mel.\nVelit zril melius usu ea. Ad stet putent interpretaris qui. Mel no\nerror volumus scripserit. In pro paulo iudico, quo ei dolorem\nverterem, affert fabellas dissentiet ea vix.\n\nVis quot deserunt te. Error aliquid detraxit eu usu, vis alia eruditi\nsalutatus cu. Est nostrud bonorum an, ei usu alii salutatus. Vel at\nnisl primis, eum ex aperiri noluisse reformidans. Ad veri velit\nutroque vis, ex equidem detraxit temporibus has.\n\nInermis appareat usu ne. Eros placerat periculis mea ad, in dictas\npericula pro. Errem postulant at usu, ea nec amet ornatus mentitum. Ad\nmazim graeco eum, vel ex percipit volutpat iudicabit, sit ne delicata\ninteresset. Mel sapientem prodesset abhorreant et, oblique suscipit\neam id.\n\nAn maluisset disputando mea, vidit mnesarchum pri et. Malis insolens\ninciderint no sea. Ea persius maluisset vix, ne vim appellantur\ninstructior, consul quidam definiebas pri id. Cum integre feugiat\npericula in, ex sed persius similique, mel ne natum dicit percipitur.\n\nPrimis discere ne pri, errem putent definitionem at vis. Ei mel dolore\nneglegentur, mei tincidunt percipitur ei. Pro ad simul integre\nrationibus. Eu vel alii honestatis definitiones, mea no nonumy\nreprehendunt.\n\nDicta appareat legendos est cu. Eu vel congue dicunt omittam, no vix\nadhuc minimum constituam, quot noluisse id mel. Eu quot sale mutat\nduo, ex nisl munere invenire duo. Ne nec ullum utamur. Pro alterum\ndebitis nostrum no, ut vel aliquid vivendo.\n\nAliquip fierent praesent quo ne, id sit audiam recusabo delicatissimi.\nUsu postulant incorrupte cu. At pro dicit tibique intellegam, cibo\ndolore impedit id eam, et aeque feugait assentior has. Quando sensibus\nnec ex. Possit sensibus pri ad, unum mutat periculis cu vix.\n\nMundi tibique vix te, duo simul partiendo qualisque id, est at vidit\nsonet tempor. No per solet aeterno deseruisse. Petentium salutandi\ndefiniebas pri cu. Munere vivendum est in. Ei justo congue eligendi\nvis, modus offendit omittantur te mel.\n\nIntegre voluptaria in qui, sit habemus tractatos constituam no. Utinam\nmelius conceptam est ne, quo in minimum apeirian delicata, ut ius\nporro recusabo. Dicant expetenda vix no, ludus scripserit sed ex, eu\nhis modo nostro. Ut etiam sonet his, quodsi inciderint philosophia te\nper. Nullam lobortis eu cum, vix an sonet efficiendi repudiandae. Vis\nad idque fabellas intellegebat.\n\nEum commodo senserit conclusionemque ex. Sed forensibus sadipscing ut,\nmei in facer delicata periculis, sea ne hinc putent cetero. Nec ne\nalia corpora invenire, alia prima soleat te cum. Eleifend posidonium\nnam at.\n\nDolorum indoctum cu quo, ex dolor legendos recteque eam, cu pri zril\ndiscere. Nec civibus officiis dissentiunt ex, est te liber ludus\nelaboraret. Cum ea fabellas invenire. Ex vim nostrud eripuit\ncomprehensam, nam te inermis delectus, saepe inermis senserit.\n`\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/atom/atom.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package atom provides integer codes (also known as atoms) for a fixed set of\n// frequently occurring HTML strings: tag names and attribute keys such as \"p\"\n// and \"id\".\n//\n// Sharing an atom's name between all elements with the same tag can result in\n// fewer string allocations when tokenizing and parsing HTML. Integer\n// comparisons are also generally faster than string comparisons.\n//\n// The value of an atom's particular code is not guaranteed to stay the same\n// between versions of this package. Neither is any ordering guaranteed:\n// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to\n// be dense. The only guarantees are that e.g. looking up \"div\" will yield\n// atom.Div, calling atom.Div.String will return \"div\", and atom.Div != 0.\npackage atom // import \"golang.org/x/net/html/atom\"\n\n// Atom is an integer code for a string. The zero value maps to \"\".\ntype Atom uint32\n\n// String returns the atom's name.\nfunc (a Atom) String() string {\n\tstart := uint32(a >> 8)\n\tn := uint32(a & 0xff)\n\tif start+n > uint32(len(atomText)) {\n\t\treturn \"\"\n\t}\n\treturn atomText[start : start+n]\n}\n\nfunc (a Atom) string() string {\n\treturn atomText[a>>8 : a>>8+a&0xff]\n}\n\n// fnv computes the FNV hash with an arbitrary starting value h.\nfunc fnv(h uint32, s []byte) uint32 {\n\tfor i := range s {\n\t\th ^= uint32(s[i])\n\t\th *= 16777619\n\t}\n\treturn h\n}\n\nfunc match(s string, t []byte) bool {\n\tfor i, c := range t {\n\t\tif s[i] != c {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// Lookup returns the atom whose name is s. It returns zero if there is no\n// such atom. The lookup is case sensitive.\nfunc Lookup(s []byte) Atom {\n\tif len(s) == 0 || len(s) > maxAtomLen {\n\t\treturn 0\n\t}\n\th := fnv(hash0, s)\n\tif a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {\n\t\treturn a\n\t}\n\tif a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {\n\t\treturn a\n\t}\n\treturn 0\n}\n\n// String returns a string whose contents are equal to s. In that sense, it is\n// equivalent to string(s) but may be more efficient.\nfunc String(s []byte) string {\n\tif a := Lookup(s); a != 0 {\n\t\treturn a.String()\n\t}\n\treturn string(s)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/atom/atom_test.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage atom\n\nimport (\n\t\"sort\"\n\t\"testing\"\n)\n\nfunc TestKnown(t *testing.T) {\n\tfor _, s := range testAtomList {\n\t\tif atom := Lookup([]byte(s)); atom.String() != s {\n\t\t\tt.Errorf(\"Lookup(%q) = %#x (%q)\", s, uint32(atom), atom.String())\n\t\t}\n\t}\n}\n\nfunc TestHits(t *testing.T) {\n\tfor _, a := range table {\n\t\tif a == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tgot := Lookup([]byte(a.String()))\n\t\tif got != a {\n\t\t\tt.Errorf(\"Lookup(%q) = %#x, want %#x\", a.String(), uint32(got), uint32(a))\n\t\t}\n\t}\n}\n\nfunc TestMisses(t *testing.T) {\n\ttestCases := []string{\n\t\t\"\",\n\t\t\"\\x00\",\n\t\t\"\\xff\",\n\t\t\"A\",\n\t\t\"DIV\",\n\t\t\"Div\",\n\t\t\"dIV\",\n\t\t\"aa\",\n\t\t\"a\\x00\",\n\t\t\"ab\",\n\t\t\"abb\",\n\t\t\"abbr0\",\n\t\t\"abbr \",\n\t\t\" abbr\",\n\t\t\" a\",\n\t\t\"acceptcharset\",\n\t\t\"acceptCharset\",\n\t\t\"accept_charset\",\n\t\t\"h0\",\n\t\t\"h1h2\",\n\t\t\"h7\",\n\t\t\"onClick\",\n\t\t\"λ\",\n\t\t// The following string has the same hash (0xa1d7fab7) as \"onmouseover\".\n\t\t\"\\x00\\x00\\x00\\x00\\x00\\x50\\x18\\xae\\x38\\xd0\\xb7\",\n\t}\n\tfor _, tc := range testCases {\n\t\tgot := Lookup([]byte(tc))\n\t\tif got != 0 {\n\t\t\tt.Errorf(\"Lookup(%q): got %d, want 0\", tc, got)\n\t\t}\n\t}\n}\n\nfunc TestForeignObject(t *testing.T) {\n\tconst (\n\t\tafo = Foreignobject\n\t\tafO = ForeignObject\n\t\tsfo = \"foreignobject\"\n\t\tsfO = \"foreignObject\"\n\t)\n\tif got := Lookup([]byte(sfo)); got != afo {\n\t\tt.Errorf(\"Lookup(%q): got %#v, want %#v\", sfo, got, afo)\n\t}\n\tif got := Lookup([]byte(sfO)); got != afO {\n\t\tt.Errorf(\"Lookup(%q): got %#v, want %#v\", sfO, got, afO)\n\t}\n\tif got := afo.String(); got != sfo {\n\t\tt.Errorf(\"Atom(%#v).String(): got %q, want %q\", afo, got, sfo)\n\t}\n\tif got := afO.String(); got != sfO {\n\t\tt.Errorf(\"Atom(%#v).String(): got %q, want %q\", afO, got, sfO)\n\t}\n}\n\nfunc BenchmarkLookup(b *testing.B) {\n\tsortedTable := make([]string, 0, len(table))\n\tfor _, a := range table {\n\t\tif a != 0 {\n\t\t\tsortedTable = append(sortedTable, a.String())\n\t\t}\n\t}\n\tsort.Strings(sortedTable)\n\n\tx := make([][]byte, 1000)\n\tfor i := range x {\n\t\tx[i] = []byte(sortedTable[i%len(sortedTable)])\n\t}\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tfor _, s := range x {\n\t\t\tLookup(s)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/atom/gen.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n//go:generate go run gen.go\n//go:generate go run gen.go -test\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"go/format\"\n\t\"io/ioutil\"\n\t\"math/rand\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n)\n\n// identifier converts s to a Go exported identifier.\n// It converts \"div\" to \"Div\" and \"accept-charset\" to \"AcceptCharset\".\nfunc identifier(s string) string {\n\tb := make([]byte, 0, len(s))\n\tcap := true\n\tfor _, c := range s {\n\t\tif c == '-' {\n\t\t\tcap = true\n\t\t\tcontinue\n\t\t}\n\t\tif cap && 'a' <= c && c <= 'z' {\n\t\t\tc -= 'a' - 'A'\n\t\t}\n\t\tcap = false\n\t\tb = append(b, byte(c))\n\t}\n\treturn string(b)\n}\n\nvar test = flag.Bool(\"test\", false, \"generate table_test.go\")\n\nfunc genFile(name string, buf *bytes.Buffer) {\n\tb, err := format.Source(buf.Bytes())\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\tif err := ioutil.WriteFile(name, b, 0644); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tvar all []string\n\tall = append(all, elements...)\n\tall = append(all, attributes...)\n\tall = append(all, eventHandlers...)\n\tall = append(all, extra...)\n\tsort.Strings(all)\n\n\t// uniq - lists have dups\n\tw := 0\n\tfor _, s := range all {\n\t\tif w == 0 || all[w-1] != s {\n\t\t\tall[w] = s\n\t\t\tw++\n\t\t}\n\t}\n\tall = all[:w]\n\n\tif *test {\n\t\tvar buf bytes.Buffer\n\t\tfmt.Fprintln(&buf, \"// Code generated by go generate gen.go; DO NOT EDIT.\\n\")\n\t\tfmt.Fprintln(&buf, \"//go:generate go run gen.go -test\\n\")\n\t\tfmt.Fprintln(&buf, \"package atom\\n\")\n\t\tfmt.Fprintln(&buf, \"var testAtomList = []string{\")\n\t\tfor _, s := range all {\n\t\t\tfmt.Fprintf(&buf, \"\\t%q,\\n\", s)\n\t\t}\n\t\tfmt.Fprintln(&buf, \"}\")\n\n\t\tgenFile(\"table_test.go\", &buf)\n\t\treturn\n\t}\n\n\t// Find hash that minimizes table size.\n\tvar best *table\n\tfor i := 0; i < 1000000; i++ {\n\t\tif best != nil && 1<<(best.k-1) < len(all) {\n\t\t\tbreak\n\t\t}\n\t\th := rand.Uint32()\n\t\tfor k := uint(0); k <= 16; k++ {\n\t\t\tif best != nil && k >= best.k {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tvar t table\n\t\t\tif t.init(h, k, all) {\n\t\t\t\tbest = &t\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif best == nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to construct string table\\n\")\n\t\tos.Exit(1)\n\t}\n\n\t// Lay out strings, using overlaps when possible.\n\tlayout := append([]string{}, all...)\n\n\t// Remove strings that are substrings of other strings\n\tfor changed := true; changed; {\n\t\tchanged = false\n\t\tfor i, s := range layout {\n\t\t\tif s == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor j, t := range layout {\n\t\t\t\tif i != j && t != \"\" && strings.Contains(s, t) {\n\t\t\t\t\tchanged = true\n\t\t\t\t\tlayout[j] = \"\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Join strings where one suffix matches another prefix.\n\tfor {\n\t\t// Find best i, j, k such that layout[i][len-k:] == layout[j][:k],\n\t\t// maximizing overlap length k.\n\t\tbesti := -1\n\t\tbestj := -1\n\t\tbestk := 0\n\t\tfor i, s := range layout {\n\t\t\tif s == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor j, t := range layout {\n\t\t\t\tif i == j {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor k := bestk + 1; k <= len(s) && k <= len(t); k++ {\n\t\t\t\t\tif s[len(s)-k:] == t[:k] {\n\t\t\t\t\t\tbesti = i\n\t\t\t\t\t\tbestj = j\n\t\t\t\t\t\tbestk = k\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif bestk > 0 {\n\t\t\tlayout[besti] += layout[bestj][bestk:]\n\t\t\tlayout[bestj] = \"\"\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\n\ttext := strings.Join(layout, \"\")\n\n\tatom := map[string]uint32{}\n\tfor _, s := range all {\n\t\toff := strings.Index(text, s)\n\t\tif off < 0 {\n\t\t\tpanic(\"lost string \" + s)\n\t\t}\n\t\tatom[s] = uint32(off<<8 | len(s))\n\t}\n\n\tvar buf bytes.Buffer\n\t// Generate the Go code.\n\tfmt.Fprintln(&buf, \"// Code generated by go generate gen.go; DO NOT EDIT.\\n\")\n\tfmt.Fprintln(&buf, \"//go:generate go run gen.go\\n\")\n\tfmt.Fprintln(&buf, \"package atom\\n\\nconst (\")\n\n\t// compute max len\n\tmaxLen := 0\n\tfor _, s := range all {\n\t\tif maxLen < len(s) {\n\t\t\tmaxLen = len(s)\n\t\t}\n\t\tfmt.Fprintf(&buf, \"\\t%s Atom = %#x\\n\", identifier(s), atom[s])\n\t}\n\tfmt.Fprintln(&buf, \")\\n\")\n\n\tfmt.Fprintf(&buf, \"const hash0 = %#x\\n\\n\", best.h0)\n\tfmt.Fprintf(&buf, \"const maxAtomLen = %d\\n\\n\", maxLen)\n\n\tfmt.Fprintf(&buf, \"var table = [1<<%d]Atom{\\n\", best.k)\n\tfor i, s := range best.tab {\n\t\tif s == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(&buf, \"\\t%#x: %#x, // %s\\n\", i, atom[s], s)\n\t}\n\tfmt.Fprintf(&buf, \"}\\n\")\n\tdatasize := (1 << best.k) * 4\n\n\tfmt.Fprintln(&buf, \"const atomText =\")\n\ttextsize := len(text)\n\tfor len(text) > 60 {\n\t\tfmt.Fprintf(&buf, \"\\t%q +\\n\", text[:60])\n\t\ttext = text[60:]\n\t}\n\tfmt.Fprintf(&buf, \"\\t%q\\n\\n\", text)\n\n\tgenFile(\"table.go\", &buf)\n\n\tfmt.Fprintf(os.Stdout, \"%d atoms; %d string bytes + %d tables = %d total data\\n\", len(all), textsize, datasize, textsize+datasize)\n}\n\ntype byLen []string\n\nfunc (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) }\nfunc (x byLen) Swap(i, j int)      { x[i], x[j] = x[j], x[i] }\nfunc (x byLen) Len() int           { return len(x) }\n\n// fnv computes the FNV hash with an arbitrary starting value h.\nfunc fnv(h uint32, s string) uint32 {\n\tfor i := 0; i < len(s); i++ {\n\t\th ^= uint32(s[i])\n\t\th *= 16777619\n\t}\n\treturn h\n}\n\n// A table represents an attempt at constructing the lookup table.\n// The lookup table uses cuckoo hashing, meaning that each string\n// can be found in one of two positions.\ntype table struct {\n\th0   uint32\n\tk    uint\n\tmask uint32\n\ttab  []string\n}\n\n// hash returns the two hashes for s.\nfunc (t *table) hash(s string) (h1, h2 uint32) {\n\th := fnv(t.h0, s)\n\th1 = h & t.mask\n\th2 = (h >> 16) & t.mask\n\treturn\n}\n\n// init initializes the table with the given parameters.\n// h0 is the initial hash value,\n// k is the number of bits of hash value to use, and\n// x is the list of strings to store in the table.\n// init returns false if the table cannot be constructed.\nfunc (t *table) init(h0 uint32, k uint, x []string) bool {\n\tt.h0 = h0\n\tt.k = k\n\tt.tab = make([]string, 1<<k)\n\tt.mask = 1<<k - 1\n\tfor _, s := range x {\n\t\tif !t.insert(s) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// insert inserts s in the table.\nfunc (t *table) insert(s string) bool {\n\th1, h2 := t.hash(s)\n\tif t.tab[h1] == \"\" {\n\t\tt.tab[h1] = s\n\t\treturn true\n\t}\n\tif t.tab[h2] == \"\" {\n\t\tt.tab[h2] = s\n\t\treturn true\n\t}\n\tif t.push(h1, 0) {\n\t\tt.tab[h1] = s\n\t\treturn true\n\t}\n\tif t.push(h2, 0) {\n\t\tt.tab[h2] = s\n\t\treturn true\n\t}\n\treturn false\n}\n\n// push attempts to push aside the entry in slot i.\nfunc (t *table) push(i uint32, depth int) bool {\n\tif depth > len(t.tab) {\n\t\treturn false\n\t}\n\ts := t.tab[i]\n\th1, h2 := t.hash(s)\n\tj := h1 + h2 - i\n\tif t.tab[j] != \"\" && !t.push(j, depth+1) {\n\t\treturn false\n\t}\n\tt.tab[j] = s\n\treturn true\n}\n\n// The lists of element names and attribute keys were taken from\n// https://html.spec.whatwg.org/multipage/indices.html#index\n// as of the \"HTML Living Standard - Last Updated 18 September 2017\" version.\n\n// \"command\", \"keygen\" and \"menuitem\" have been removed from the spec,\n// but are kept here for backwards compatibility.\nvar elements = []string{\n\t\"a\",\n\t\"abbr\",\n\t\"address\",\n\t\"area\",\n\t\"article\",\n\t\"aside\",\n\t\"audio\",\n\t\"b\",\n\t\"base\",\n\t\"bdi\",\n\t\"bdo\",\n\t\"blockquote\",\n\t\"body\",\n\t\"br\",\n\t\"button\",\n\t\"canvas\",\n\t\"caption\",\n\t\"cite\",\n\t\"code\",\n\t\"col\",\n\t\"colgroup\",\n\t\"command\",\n\t\"data\",\n\t\"datalist\",\n\t\"dd\",\n\t\"del\",\n\t\"details\",\n\t\"dfn\",\n\t\"dialog\",\n\t\"div\",\n\t\"dl\",\n\t\"dt\",\n\t\"em\",\n\t\"embed\",\n\t\"fieldset\",\n\t\"figcaption\",\n\t\"figure\",\n\t\"footer\",\n\t\"form\",\n\t\"h1\",\n\t\"h2\",\n\t\"h3\",\n\t\"h4\",\n\t\"h5\",\n\t\"h6\",\n\t\"head\",\n\t\"header\",\n\t\"hgroup\",\n\t\"hr\",\n\t\"html\",\n\t\"i\",\n\t\"iframe\",\n\t\"img\",\n\t\"input\",\n\t\"ins\",\n\t\"kbd\",\n\t\"keygen\",\n\t\"label\",\n\t\"legend\",\n\t\"li\",\n\t\"link\",\n\t\"main\",\n\t\"map\",\n\t\"mark\",\n\t\"menu\",\n\t\"menuitem\",\n\t\"meta\",\n\t\"meter\",\n\t\"nav\",\n\t\"noscript\",\n\t\"object\",\n\t\"ol\",\n\t\"optgroup\",\n\t\"option\",\n\t\"output\",\n\t\"p\",\n\t\"param\",\n\t\"picture\",\n\t\"pre\",\n\t\"progress\",\n\t\"q\",\n\t\"rp\",\n\t\"rt\",\n\t\"ruby\",\n\t\"s\",\n\t\"samp\",\n\t\"script\",\n\t\"section\",\n\t\"select\",\n\t\"slot\",\n\t\"small\",\n\t\"source\",\n\t\"span\",\n\t\"strong\",\n\t\"style\",\n\t\"sub\",\n\t\"summary\",\n\t\"sup\",\n\t\"table\",\n\t\"tbody\",\n\t\"td\",\n\t\"template\",\n\t\"textarea\",\n\t\"tfoot\",\n\t\"th\",\n\t\"thead\",\n\t\"time\",\n\t\"title\",\n\t\"tr\",\n\t\"track\",\n\t\"u\",\n\t\"ul\",\n\t\"var\",\n\t\"video\",\n\t\"wbr\",\n}\n\n// https://html.spec.whatwg.org/multipage/indices.html#attributes-3\n//\n// \"challenge\", \"command\", \"contextmenu\", \"dropzone\", \"icon\", \"keytype\", \"mediagroup\",\n// \"radiogroup\", \"spellcheck\", \"scoped\", \"seamless\", \"sortable\" and \"sorted\" have been removed from the spec,\n// but are kept here for backwards compatibility.\nvar attributes = []string{\n\t\"abbr\",\n\t\"accept\",\n\t\"accept-charset\",\n\t\"accesskey\",\n\t\"action\",\n\t\"allowfullscreen\",\n\t\"allowpaymentrequest\",\n\t\"allowusermedia\",\n\t\"alt\",\n\t\"as\",\n\t\"async\",\n\t\"autocomplete\",\n\t\"autofocus\",\n\t\"autoplay\",\n\t\"challenge\",\n\t\"charset\",\n\t\"checked\",\n\t\"cite\",\n\t\"class\",\n\t\"color\",\n\t\"cols\",\n\t\"colspan\",\n\t\"command\",\n\t\"content\",\n\t\"contenteditable\",\n\t\"contextmenu\",\n\t\"controls\",\n\t\"coords\",\n\t\"crossorigin\",\n\t\"data\",\n\t\"datetime\",\n\t\"default\",\n\t\"defer\",\n\t\"dir\",\n\t\"dirname\",\n\t\"disabled\",\n\t\"download\",\n\t\"draggable\",\n\t\"dropzone\",\n\t\"enctype\",\n\t\"for\",\n\t\"form\",\n\t\"formaction\",\n\t\"formenctype\",\n\t\"formmethod\",\n\t\"formnovalidate\",\n\t\"formtarget\",\n\t\"headers\",\n\t\"height\",\n\t\"hidden\",\n\t\"high\",\n\t\"href\",\n\t\"hreflang\",\n\t\"http-equiv\",\n\t\"icon\",\n\t\"id\",\n\t\"inputmode\",\n\t\"integrity\",\n\t\"is\",\n\t\"ismap\",\n\t\"itemid\",\n\t\"itemprop\",\n\t\"itemref\",\n\t\"itemscope\",\n\t\"itemtype\",\n\t\"keytype\",\n\t\"kind\",\n\t\"label\",\n\t\"lang\",\n\t\"list\",\n\t\"loop\",\n\t\"low\",\n\t\"manifest\",\n\t\"max\",\n\t\"maxlength\",\n\t\"media\",\n\t\"mediagroup\",\n\t\"method\",\n\t\"min\",\n\t\"minlength\",\n\t\"multiple\",\n\t\"muted\",\n\t\"name\",\n\t\"nomodule\",\n\t\"nonce\",\n\t\"novalidate\",\n\t\"open\",\n\t\"optimum\",\n\t\"pattern\",\n\t\"ping\",\n\t\"placeholder\",\n\t\"playsinline\",\n\t\"poster\",\n\t\"preload\",\n\t\"radiogroup\",\n\t\"readonly\",\n\t\"referrerpolicy\",\n\t\"rel\",\n\t\"required\",\n\t\"reversed\",\n\t\"rows\",\n\t\"rowspan\",\n\t\"sandbox\",\n\t\"spellcheck\",\n\t\"scope\",\n\t\"scoped\",\n\t\"seamless\",\n\t\"selected\",\n\t\"shape\",\n\t\"size\",\n\t\"sizes\",\n\t\"sortable\",\n\t\"sorted\",\n\t\"slot\",\n\t\"span\",\n\t\"spellcheck\",\n\t\"src\",\n\t\"srcdoc\",\n\t\"srclang\",\n\t\"srcset\",\n\t\"start\",\n\t\"step\",\n\t\"style\",\n\t\"tabindex\",\n\t\"target\",\n\t\"title\",\n\t\"translate\",\n\t\"type\",\n\t\"typemustmatch\",\n\t\"updateviacache\",\n\t\"usemap\",\n\t\"value\",\n\t\"width\",\n\t\"workertype\",\n\t\"wrap\",\n}\n\n// \"onautocomplete\", \"onautocompleteerror\", \"onmousewheel\",\n// \"onshow\" and \"onsort\" have been removed from the spec,\n// but are kept here for backwards compatibility.\nvar eventHandlers = []string{\n\t\"onabort\",\n\t\"onautocomplete\",\n\t\"onautocompleteerror\",\n\t\"onauxclick\",\n\t\"onafterprint\",\n\t\"onbeforeprint\",\n\t\"onbeforeunload\",\n\t\"onblur\",\n\t\"oncancel\",\n\t\"oncanplay\",\n\t\"oncanplaythrough\",\n\t\"onchange\",\n\t\"onclick\",\n\t\"onclose\",\n\t\"oncontextmenu\",\n\t\"oncopy\",\n\t\"oncuechange\",\n\t\"oncut\",\n\t\"ondblclick\",\n\t\"ondrag\",\n\t\"ondragend\",\n\t\"ondragenter\",\n\t\"ondragexit\",\n\t\"ondragleave\",\n\t\"ondragover\",\n\t\"ondragstart\",\n\t\"ondrop\",\n\t\"ondurationchange\",\n\t\"onemptied\",\n\t\"onended\",\n\t\"onerror\",\n\t\"onfocus\",\n\t\"onhashchange\",\n\t\"oninput\",\n\t\"oninvalid\",\n\t\"onkeydown\",\n\t\"onkeypress\",\n\t\"onkeyup\",\n\t\"onlanguagechange\",\n\t\"onload\",\n\t\"onloadeddata\",\n\t\"onloadedmetadata\",\n\t\"onloadend\",\n\t\"onloadstart\",\n\t\"onmessage\",\n\t\"onmessageerror\",\n\t\"onmousedown\",\n\t\"onmouseenter\",\n\t\"onmouseleave\",\n\t\"onmousemove\",\n\t\"onmouseout\",\n\t\"onmouseover\",\n\t\"onmouseup\",\n\t\"onmousewheel\",\n\t\"onwheel\",\n\t\"onoffline\",\n\t\"ononline\",\n\t\"onpagehide\",\n\t\"onpageshow\",\n\t\"onpaste\",\n\t\"onpause\",\n\t\"onplay\",\n\t\"onplaying\",\n\t\"onpopstate\",\n\t\"onprogress\",\n\t\"onratechange\",\n\t\"onreset\",\n\t\"onresize\",\n\t\"onrejectionhandled\",\n\t\"onscroll\",\n\t\"onsecuritypolicyviolation\",\n\t\"onseeked\",\n\t\"onseeking\",\n\t\"onselect\",\n\t\"onshow\",\n\t\"onsort\",\n\t\"onstalled\",\n\t\"onstorage\",\n\t\"onsubmit\",\n\t\"onsuspend\",\n\t\"ontimeupdate\",\n\t\"ontoggle\",\n\t\"onunhandledrejection\",\n\t\"onunload\",\n\t\"onvolumechange\",\n\t\"onwaiting\",\n}\n\n// extra are ad-hoc values not covered by any of the lists above.\nvar extra = []string{\n\t\"align\",\n\t\"annotation\",\n\t\"annotation-xml\",\n\t\"applet\",\n\t\"basefont\",\n\t\"bgsound\",\n\t\"big\",\n\t\"blink\",\n\t\"center\",\n\t\"color\",\n\t\"desc\",\n\t\"face\",\n\t\"font\",\n\t\"foreignObject\", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive.\n\t\"foreignobject\",\n\t\"frame\",\n\t\"frameset\",\n\t\"image\",\n\t\"isindex\",\n\t\"listing\",\n\t\"malignmark\",\n\t\"marquee\",\n\t\"math\",\n\t\"mglyph\",\n\t\"mi\",\n\t\"mn\",\n\t\"mo\",\n\t\"ms\",\n\t\"mtext\",\n\t\"nobr\",\n\t\"noembed\",\n\t\"noframes\",\n\t\"plaintext\",\n\t\"prompt\",\n\t\"public\",\n\t\"spacer\",\n\t\"strike\",\n\t\"svg\",\n\t\"system\",\n\t\"tt\",\n\t\"xmp\",\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/atom/table.go",
    "content": "// Code generated by go generate gen.go; DO NOT EDIT.\n\n//go:generate go run gen.go\n\npackage atom\n\nconst (\n\tA                         Atom = 0x1\n\tAbbr                      Atom = 0x4\n\tAccept                    Atom = 0x1a06\n\tAcceptCharset             Atom = 0x1a0e\n\tAccesskey                 Atom = 0x2c09\n\tAction                    Atom = 0x25a06\n\tAddress                   Atom = 0x6ed07\n\tAlign                     Atom = 0x6d405\n\tAllowfullscreen           Atom = 0x1f00f\n\tAllowpaymentrequest       Atom = 0x6913\n\tAllowusermedia            Atom = 0x850e\n\tAlt                       Atom = 0xb003\n\tAnnotation                Atom = 0x1b90a\n\tAnnotationXml             Atom = 0x1b90e\n\tApplet                    Atom = 0x30106\n\tArea                      Atom = 0x34a04\n\tArticle                   Atom = 0x3f007\n\tAs                        Atom = 0xb902\n\tAside                     Atom = 0xc105\n\tAsync                     Atom = 0xb905\n\tAudio                     Atom = 0xcf05\n\tAutocomplete              Atom = 0x2600c\n\tAutofocus                 Atom = 0xeb09\n\tAutoplay                  Atom = 0x10608\n\tB                         Atom = 0x101\n\tBase                      Atom = 0x11504\n\tBasefont                  Atom = 0x11508\n\tBdi                       Atom = 0x16103\n\tBdo                       Atom = 0x13403\n\tBgsound                   Atom = 0x14707\n\tBig                       Atom = 0x15903\n\tBlink                     Atom = 0x15c05\n\tBlockquote                Atom = 0x1680a\n\tBody                      Atom = 0x2804\n\tBr                        Atom = 0x202\n\tButton                    Atom = 0x17206\n\tCanvas                    Atom = 0xbd06\n\tCaption                   Atom = 0x21907\n\tCenter                    Atom = 0x20806\n\tChallenge                 Atom = 0x28309\n\tCharset                   Atom = 0x2107\n\tChecked                   Atom = 0x46d07\n\tCite                      Atom = 0x55804\n\tClass                     Atom = 0x5b905\n\tCode                      Atom = 0x19004\n\tCol                       Atom = 0x19703\n\tColgroup                  Atom = 0x19708\n\tColor                     Atom = 0x1af05\n\tCols                      Atom = 0x1b404\n\tColspan                   Atom = 0x1b407\n\tCommand                   Atom = 0x1c707\n\tContent                   Atom = 0x57f07\n\tContenteditable           Atom = 0x57f0f\n\tContextmenu               Atom = 0x3740b\n\tControls                  Atom = 0x1ce08\n\tCoords                    Atom = 0x1da06\n\tCrossorigin               Atom = 0x1e30b\n\tData                      Atom = 0x49904\n\tDatalist                  Atom = 0x49908\n\tDatetime                  Atom = 0x2a008\n\tDd                        Atom = 0x2bf02\n\tDefault                   Atom = 0xc407\n\tDefer                     Atom = 0x19205\n\tDel                       Atom = 0x44603\n\tDesc                      Atom = 0x55504\n\tDetails                   Atom = 0x4607\n\tDfn                       Atom = 0x5f03\n\tDialog                    Atom = 0x16206\n\tDir                       Atom = 0xa303\n\tDirname                   Atom = 0xa307\n\tDisabled                  Atom = 0x14d08\n\tDiv                       Atom = 0x15403\n\tDl                        Atom = 0x5e202\n\tDownload                  Atom = 0x45708\n\tDraggable                 Atom = 0x18309\n\tDropzone                  Atom = 0x3f908\n\tDt                        Atom = 0x64702\n\tEm                        Atom = 0x4202\n\tEmbed                     Atom = 0x4205\n\tEnctype                   Atom = 0x27507\n\tFace                      Atom = 0x20604\n\tFieldset                  Atom = 0x20e08\n\tFigcaption                Atom = 0x2160a\n\tFigure                    Atom = 0x23006\n\tFont                      Atom = 0x11904\n\tFooter                    Atom = 0xb306\n\tFor                       Atom = 0x23c03\n\tForeignObject             Atom = 0x23c0d\n\tForeignobject             Atom = 0x2490d\n\tForm                      Atom = 0x25604\n\tFormaction                Atom = 0x2560a\n\tFormenctype               Atom = 0x2710b\n\tFormmethod                Atom = 0x28c0a\n\tFormnovalidate            Atom = 0x2960e\n\tFormtarget                Atom = 0x2a80a\n\tFrame                     Atom = 0x5705\n\tFrameset                  Atom = 0x5708\n\tH1                        Atom = 0x14502\n\tH2                        Atom = 0x2c602\n\tH3                        Atom = 0x2f502\n\tH4                        Atom = 0x33902\n\tH5                        Atom = 0x34302\n\tH6                        Atom = 0x64902\n\tHead                      Atom = 0x32504\n\tHeader                    Atom = 0x32506\n\tHeaders                   Atom = 0x32507\n\tHeight                    Atom = 0x12c06\n\tHgroup                    Atom = 0x2b206\n\tHidden                    Atom = 0x2bd06\n\tHigh                      Atom = 0x2c304\n\tHr                        Atom = 0x14002\n\tHref                      Atom = 0x2c804\n\tHreflang                  Atom = 0x2c808\n\tHtml                      Atom = 0x13004\n\tHttpEquiv                 Atom = 0x2d00a\n\tI                         Atom = 0x601\n\tIcon                      Atom = 0x57e04\n\tId                        Atom = 0xc302\n\tIframe                    Atom = 0x2e406\n\tImage                     Atom = 0x2ea05\n\tImg                       Atom = 0x2ef03\n\tInput                     Atom = 0x43f05\n\tInputmode                 Atom = 0x43f09\n\tIns                       Atom = 0x1ec03\n\tIntegrity                 Atom = 0x22709\n\tIs                        Atom = 0x14e02\n\tIsindex                   Atom = 0x2f707\n\tIsmap                     Atom = 0x2fe05\n\tItemid                    Atom = 0x37f06\n\tItemprop                  Atom = 0x55908\n\tItemref                   Atom = 0x3c107\n\tItemscope                 Atom = 0x66d09\n\tItemtype                  Atom = 0x30708\n\tKbd                       Atom = 0x16003\n\tKeygen                    Atom = 0x3206\n\tKeytype                   Atom = 0x7e07\n\tKind                      Atom = 0x18004\n\tLabel                     Atom = 0xda05\n\tLang                      Atom = 0x2cc04\n\tLegend                    Atom = 0x18a06\n\tLi                        Atom = 0x11102\n\tLink                      Atom = 0x15d04\n\tList                      Atom = 0x49d04\n\tListing                   Atom = 0x49d07\n\tLoop                      Atom = 0xde04\n\tLow                       Atom = 0x6b03\n\tMain                      Atom = 0x1004\n\tMalignmark                Atom = 0x6d30a\n\tManifest                  Atom = 0x30f08\n\tMap                       Atom = 0x30003\n\tMark                      Atom = 0x6d904\n\tMarquee                   Atom = 0x31b07\n\tMath                      Atom = 0x32204\n\tMax                       Atom = 0x33103\n\tMaxlength                 Atom = 0x33109\n\tMedia                     Atom = 0x8e05\n\tMediagroup                Atom = 0x8e0a\n\tMenu                      Atom = 0x37b04\n\tMenuitem                  Atom = 0x37b08\n\tMeta                      Atom = 0x4ac04\n\tMeter                     Atom = 0xa805\n\tMethod                    Atom = 0x29006\n\tMglyph                    Atom = 0x2f006\n\tMi                        Atom = 0x33b02\n\tMin                       Atom = 0x33b03\n\tMinlength                 Atom = 0x33b09\n\tMn                        Atom = 0x29902\n\tMo                        Atom = 0x6302\n\tMs                        Atom = 0x67002\n\tMtext                     Atom = 0x34505\n\tMultiple                  Atom = 0x35308\n\tMuted                     Atom = 0x35b05\n\tName                      Atom = 0xa604\n\tNav                       Atom = 0x1303\n\tNobr                      Atom = 0x3704\n\tNoembed                   Atom = 0x4007\n\tNoframes                  Atom = 0x5508\n\tNomodule                  Atom = 0x6108\n\tNonce                     Atom = 0x56205\n\tNoscript                  Atom = 0x1fe08\n\tNovalidate                Atom = 0x29a0a\n\tObject                    Atom = 0x25006\n\tOl                        Atom = 0x10102\n\tOnabort                   Atom = 0x17607\n\tOnafterprint              Atom = 0x21e0c\n\tOnautocomplete            Atom = 0x25e0e\n\tOnautocompleteerror       Atom = 0x25e13\n\tOnauxclick                Atom = 0x61b0a\n\tOnbeforeprint             Atom = 0x69a0d\n\tOnbeforeunload            Atom = 0x6e10e\n\tOnblur                    Atom = 0x5c206\n\tOncancel                  Atom = 0xd308\n\tOncanplay                 Atom = 0x13609\n\tOncanplaythrough          Atom = 0x13610\n\tOnchange                  Atom = 0x40f08\n\tOnclick                   Atom = 0x2dd07\n\tOnclose                   Atom = 0x36007\n\tOncontextmenu             Atom = 0x3720d\n\tOncopy                    Atom = 0x38506\n\tOncuechange               Atom = 0x38b0b\n\tOncut                     Atom = 0x39605\n\tOndblclick                Atom = 0x39b0a\n\tOndrag                    Atom = 0x3a506\n\tOndragend                 Atom = 0x3a509\n\tOndragenter               Atom = 0x3ae0b\n\tOndragexit                Atom = 0x3b90a\n\tOndragleave               Atom = 0x3d30b\n\tOndragover                Atom = 0x3de0a\n\tOndragstart               Atom = 0x3e80b\n\tOndrop                    Atom = 0x3f706\n\tOndurationchange          Atom = 0x40710\n\tOnemptied                 Atom = 0x3fe09\n\tOnended                   Atom = 0x41707\n\tOnerror                   Atom = 0x41e07\n\tOnfocus                   Atom = 0x42507\n\tOnhashchange              Atom = 0x4310c\n\tOninput                   Atom = 0x43d07\n\tOninvalid                 Atom = 0x44909\n\tOnkeydown                 Atom = 0x45209\n\tOnkeypress                Atom = 0x45f0a\n\tOnkeyup                   Atom = 0x47407\n\tOnlanguagechange          Atom = 0x48110\n\tOnload                    Atom = 0x49106\n\tOnloadeddata              Atom = 0x4910c\n\tOnloadedmetadata          Atom = 0x4a410\n\tOnloadend                 Atom = 0x4ba09\n\tOnloadstart               Atom = 0x4c30b\n\tOnmessage                 Atom = 0x4ce09\n\tOnmessageerror            Atom = 0x4ce0e\n\tOnmousedown               Atom = 0x4dc0b\n\tOnmouseenter              Atom = 0x4e70c\n\tOnmouseleave              Atom = 0x4f30c\n\tOnmousemove               Atom = 0x4ff0b\n\tOnmouseout                Atom = 0x50a0a\n\tOnmouseover               Atom = 0x5170b\n\tOnmouseup                 Atom = 0x52209\n\tOnmousewheel              Atom = 0x5300c\n\tOnoffline                 Atom = 0x53c09\n\tOnonline                  Atom = 0x54508\n\tOnpagehide                Atom = 0x54d0a\n\tOnpageshow                Atom = 0x5670a\n\tOnpaste                   Atom = 0x57307\n\tOnpause                   Atom = 0x58e07\n\tOnplay                    Atom = 0x59806\n\tOnplaying                 Atom = 0x59809\n\tOnpopstate                Atom = 0x5a10a\n\tOnprogress                Atom = 0x5ab0a\n\tOnratechange              Atom = 0x5c80c\n\tOnrejectionhandled        Atom = 0x5d412\n\tOnreset                   Atom = 0x5e607\n\tOnresize                  Atom = 0x5ed08\n\tOnscroll                  Atom = 0x5fc08\n\tOnsecuritypolicyviolation Atom = 0x60419\n\tOnseeked                  Atom = 0x62508\n\tOnseeking                 Atom = 0x62d09\n\tOnselect                  Atom = 0x63608\n\tOnshow                    Atom = 0x64006\n\tOnsort                    Atom = 0x64b06\n\tOnstalled                 Atom = 0x65509\n\tOnstorage                 Atom = 0x65e09\n\tOnsubmit                  Atom = 0x66708\n\tOnsuspend                 Atom = 0x67709\n\tOntimeupdate              Atom = 0x11a0c\n\tOntoggle                  Atom = 0x68008\n\tOnunhandledrejection      Atom = 0x68814\n\tOnunload                  Atom = 0x6a708\n\tOnvolumechange            Atom = 0x6af0e\n\tOnwaiting                 Atom = 0x6bd09\n\tOnwheel                   Atom = 0x6c607\n\tOpen                      Atom = 0x55f04\n\tOptgroup                  Atom = 0xe008\n\tOptimum                   Atom = 0x6cd07\n\tOption                    Atom = 0x6dd06\n\tOutput                    Atom = 0x51106\n\tP                         Atom = 0xc01\n\tParam                     Atom = 0xc05\n\tPattern                   Atom = 0x4f07\n\tPicture                   Atom = 0x9707\n\tPing                      Atom = 0xe704\n\tPlaceholder               Atom = 0xfb0b\n\tPlaintext                 Atom = 0x19e09\n\tPlaysinline               Atom = 0x10a0b\n\tPoster                    Atom = 0x2b706\n\tPre                       Atom = 0x46403\n\tPreload                   Atom = 0x47a07\n\tProgress                  Atom = 0x5ad08\n\tPrompt                    Atom = 0x52a06\n\tPublic                    Atom = 0x57a06\n\tQ                         Atom = 0x7701\n\tRadiogroup                Atom = 0x30a\n\tReadonly                  Atom = 0x34b08\n\tReferrerpolicy            Atom = 0x3c50e\n\tRel                       Atom = 0x47b03\n\tRequired                  Atom = 0x23408\n\tReversed                  Atom = 0x9c08\n\tRows                      Atom = 0x3a04\n\tRowspan                   Atom = 0x3a07\n\tRp                        Atom = 0x22402\n\tRt                        Atom = 0x17b02\n\tRuby                      Atom = 0xac04\n\tS                         Atom = 0x2501\n\tSamp                      Atom = 0x4c04\n\tSandbox                   Atom = 0xf307\n\tScope                     Atom = 0x67105\n\tScoped                    Atom = 0x67106\n\tScript                    Atom = 0x20006\n\tSeamless                  Atom = 0x36508\n\tSection                   Atom = 0x5bd07\n\tSelect                    Atom = 0x63806\n\tSelected                  Atom = 0x63808\n\tShape                     Atom = 0x1d505\n\tSize                      Atom = 0x5f104\n\tSizes                     Atom = 0x5f105\n\tSlot                      Atom = 0x1df04\n\tSmall                     Atom = 0x1ee05\n\tSortable                  Atom = 0x64d08\n\tSorted                    Atom = 0x32b06\n\tSource                    Atom = 0x36c06\n\tSpacer                    Atom = 0x42b06\n\tSpan                      Atom = 0x3d04\n\tSpellcheck                Atom = 0x4680a\n\tSrc                       Atom = 0x5b403\n\tSrcdoc                    Atom = 0x5b406\n\tSrclang                   Atom = 0x5f507\n\tSrcset                    Atom = 0x6f306\n\tStart                     Atom = 0x3ee05\n\tStep                      Atom = 0x57704\n\tStrike                    Atom = 0x7a06\n\tStrong                    Atom = 0x31506\n\tStyle                     Atom = 0x6f905\n\tSub                       Atom = 0x66903\n\tSummary                   Atom = 0x6fe07\n\tSup                       Atom = 0x70503\n\tSvg                       Atom = 0x70803\n\tSystem                    Atom = 0x70b06\n\tTabindex                  Atom = 0x4b208\n\tTable                     Atom = 0x58905\n\tTarget                    Atom = 0x2ac06\n\tTbody                     Atom = 0x2705\n\tTd                        Atom = 0x5e02\n\tTemplate                  Atom = 0x70e08\n\tTextarea                  Atom = 0x34608\n\tTfoot                     Atom = 0xb205\n\tTh                        Atom = 0x13f02\n\tThead                     Atom = 0x32405\n\tTime                      Atom = 0x11c04\n\tTitle                     Atom = 0xca05\n\tTr                        Atom = 0x7402\n\tTrack                     Atom = 0x17c05\n\tTranslate                 Atom = 0x1a609\n\tTt                        Atom = 0x5102\n\tType                      Atom = 0x8104\n\tTypemustmatch             Atom = 0x2780d\n\tU                         Atom = 0xb01\n\tUl                        Atom = 0x6602\n\tUpdateviacache            Atom = 0x1200e\n\tUsemap                    Atom = 0x59206\n\tValue                     Atom = 0x1505\n\tVar                       Atom = 0x15603\n\tVideo                     Atom = 0x2d905\n\tWbr                       Atom = 0x57003\n\tWidth                     Atom = 0x64505\n\tWorkertype                Atom = 0x7160a\n\tWrap                      Atom = 0x72004\n\tXmp                       Atom = 0xf903\n)\n\nconst hash0 = 0x81cdf10e\n\nconst maxAtomLen = 25\n\nvar table = [1 << 9]Atom{\n\t0x1:   0x8e0a,  // mediagroup\n\t0x2:   0x2cc04, // lang\n\t0x4:   0x2c09,  // accesskey\n\t0x5:   0x5708,  // frameset\n\t0x7:   0x63608, // onselect\n\t0x8:   0x70b06, // system\n\t0xa:   0x64505, // width\n\t0xc:   0x2710b, // formenctype\n\t0xd:   0x10102, // ol\n\t0xe:   0x38b0b, // oncuechange\n\t0x10:  0x13403, // bdo\n\t0x11:  0xcf05,  // audio\n\t0x12:  0x18309, // draggable\n\t0x14:  0x2d905, // video\n\t0x15:  0x29902, // mn\n\t0x16:  0x37b04, // menu\n\t0x17:  0x2b706, // poster\n\t0x19:  0xb306,  // footer\n\t0x1a:  0x29006, // method\n\t0x1b:  0x2a008, // datetime\n\t0x1c:  0x17607, // onabort\n\t0x1d:  0x1200e, // updateviacache\n\t0x1e:  0xb905,  // async\n\t0x1f:  0x49106, // onload\n\t0x21:  0xd308,  // oncancel\n\t0x22:  0x62508, // onseeked\n\t0x23:  0x2ea05, // image\n\t0x24:  0x5d412, // onrejectionhandled\n\t0x26:  0x15d04, // link\n\t0x27:  0x51106, // output\n\t0x28:  0x32504, // head\n\t0x29:  0x4f30c, // onmouseleave\n\t0x2a:  0x57307, // onpaste\n\t0x2b:  0x59809, // onplaying\n\t0x2c:  0x1b407, // colspan\n\t0x2f:  0x1af05, // color\n\t0x30:  0x5f104, // size\n\t0x31:  0x2d00a, // http-equiv\n\t0x33:  0x601,   // i\n\t0x34:  0x54d0a, // onpagehide\n\t0x35:  0x68814, // onunhandledrejection\n\t0x37:  0x41e07, // onerror\n\t0x3a:  0x11508, // basefont\n\t0x3f:  0x1303,  // nav\n\t0x40:  0x18004, // kind\n\t0x41:  0x34b08, // readonly\n\t0x42:  0x2f006, // mglyph\n\t0x44:  0x11102, // li\n\t0x46:  0x2bd06, // hidden\n\t0x47:  0x70803, // svg\n\t0x48:  0x57704, // step\n\t0x49:  0x22709, // integrity\n\t0x4a:  0x57a06, // public\n\t0x4c:  0x19703, // col\n\t0x4d:  0x1680a, // blockquote\n\t0x4e:  0x34302, // h5\n\t0x50:  0x5ad08, // progress\n\t0x51:  0x5f105, // sizes\n\t0x52:  0x33902, // h4\n\t0x56:  0x32405, // thead\n\t0x57:  0x7e07,  // keytype\n\t0x58:  0x5ab0a, // onprogress\n\t0x59:  0x43f09, // inputmode\n\t0x5a:  0x3a509, // ondragend\n\t0x5d:  0x39605, // oncut\n\t0x5e:  0x42b06, // spacer\n\t0x5f:  0x19708, // colgroup\n\t0x62:  0x14e02, // is\n\t0x65:  0xb902,  // as\n\t0x66:  0x53c09, // onoffline\n\t0x67:  0x32b06, // sorted\n\t0x69:  0x48110, // onlanguagechange\n\t0x6c:  0x4310c, // onhashchange\n\t0x6d:  0xa604,  // name\n\t0x6e:  0xb205,  // tfoot\n\t0x6f:  0x55504, // desc\n\t0x70:  0x33103, // max\n\t0x72:  0x1da06, // coords\n\t0x73:  0x2f502, // h3\n\t0x74:  0x6e10e, // onbeforeunload\n\t0x75:  0x3a04,  // rows\n\t0x76:  0x63806, // select\n\t0x77:  0xa805,  // meter\n\t0x78:  0x37f06, // itemid\n\t0x79:  0x5300c, // onmousewheel\n\t0x7a:  0x5b406, // srcdoc\n\t0x7d:  0x17c05, // track\n\t0x7f:  0x30708, // itemtype\n\t0x82:  0x6302,  // mo\n\t0x83:  0x40f08, // onchange\n\t0x84:  0x32507, // headers\n\t0x85:  0x5c80c, // onratechange\n\t0x86:  0x60419, // onsecuritypolicyviolation\n\t0x88:  0x49908, // datalist\n\t0x89:  0x4dc0b, // onmousedown\n\t0x8a:  0x1df04, // slot\n\t0x8b:  0x4a410, // onloadedmetadata\n\t0x8c:  0x1a06,  // accept\n\t0x8d:  0x25006, // object\n\t0x91:  0x6af0e, // onvolumechange\n\t0x92:  0x2107,  // charset\n\t0x93:  0x25e13, // onautocompleteerror\n\t0x94:  0x6913,  // allowpaymentrequest\n\t0x95:  0x2804,  // body\n\t0x96:  0xc407,  // default\n\t0x97:  0x63808, // selected\n\t0x98:  0x20604, // face\n\t0x99:  0x1d505, // shape\n\t0x9b:  0x68008, // ontoggle\n\t0x9e:  0x64702, // dt\n\t0x9f:  0x6d904, // mark\n\t0xa1:  0xb01,   // u\n\t0xa4:  0x6a708, // onunload\n\t0xa5:  0xde04,  // loop\n\t0xa6:  0x14d08, // disabled\n\t0xaa:  0x41707, // onended\n\t0xab:  0x6d30a, // malignmark\n\t0xad:  0x67709, // onsuspend\n\t0xae:  0x34505, // mtext\n\t0xaf:  0x64b06, // onsort\n\t0xb0:  0x55908, // itemprop\n\t0xb3:  0x66d09, // itemscope\n\t0xb4:  0x15c05, // blink\n\t0xb6:  0x3a506, // ondrag\n\t0xb7:  0x6602,  // ul\n\t0xb8:  0x25604, // form\n\t0xb9:  0xf307,  // sandbox\n\t0xba:  0x5705,  // frame\n\t0xbb:  0x1505,  // value\n\t0xbc:  0x65e09, // onstorage\n\t0xc0:  0x17b02, // rt\n\t0xc2:  0x202,   // br\n\t0xc3:  0x20e08, // fieldset\n\t0xc4:  0x2780d, // typemustmatch\n\t0xc5:  0x6108,  // nomodule\n\t0xc6:  0x4007,  // noembed\n\t0xc7:  0x69a0d, // onbeforeprint\n\t0xc8:  0x17206, // button\n\t0xc9:  0x2dd07, // onclick\n\t0xca:  0x6fe07, // summary\n\t0xcd:  0xac04,  // ruby\n\t0xce:  0x5b905, // class\n\t0xcf:  0x3e80b, // ondragstart\n\t0xd0:  0x21907, // caption\n\t0xd4:  0x850e,  // allowusermedia\n\t0xd5:  0x4c30b, // onloadstart\n\t0xd9:  0x15403, // div\n\t0xda:  0x49d04, // list\n\t0xdb:  0x32204, // math\n\t0xdc:  0x43f05, // input\n\t0xdf:  0x3de0a, // ondragover\n\t0xe0:  0x2c602, // h2\n\t0xe2:  0x19e09, // plaintext\n\t0xe4:  0x4e70c, // onmouseenter\n\t0xe7:  0x46d07, // checked\n\t0xe8:  0x46403, // pre\n\t0xea:  0x35308, // multiple\n\t0xeb:  0x16103, // bdi\n\t0xec:  0x33109, // maxlength\n\t0xed:  0x7701,  // q\n\t0xee:  0x61b0a, // onauxclick\n\t0xf0:  0x57003, // wbr\n\t0xf2:  0x11504, // base\n\t0xf3:  0x6dd06, // option\n\t0xf5:  0x40710, // ondurationchange\n\t0xf7:  0x5508,  // noframes\n\t0xf9:  0x3f908, // dropzone\n\t0xfb:  0x67105, // scope\n\t0xfc:  0x9c08,  // reversed\n\t0xfd:  0x3ae0b, // ondragenter\n\t0xfe:  0x3ee05, // start\n\t0xff:  0xf903,  // xmp\n\t0x100: 0x5f507, // srclang\n\t0x101: 0x2ef03, // img\n\t0x104: 0x101,   // b\n\t0x105: 0x23c03, // for\n\t0x106: 0xc105,  // aside\n\t0x107: 0x43d07, // oninput\n\t0x108: 0x34a04, // area\n\t0x109: 0x28c0a, // formmethod\n\t0x10a: 0x72004, // wrap\n\t0x10c: 0x22402, // rp\n\t0x10d: 0x45f0a, // onkeypress\n\t0x10e: 0x5102,  // tt\n\t0x110: 0x33b02, // mi\n\t0x111: 0x35b05, // muted\n\t0x112: 0xb003,  // alt\n\t0x113: 0x19004, // code\n\t0x114: 0x4202,  // em\n\t0x115: 0x3b90a, // ondragexit\n\t0x117: 0x3d04,  // span\n\t0x119: 0x30f08, // manifest\n\t0x11a: 0x37b08, // menuitem\n\t0x11b: 0x57f07, // content\n\t0x11d: 0x6bd09, // onwaiting\n\t0x11f: 0x4ba09, // onloadend\n\t0x121: 0x3720d, // oncontextmenu\n\t0x123: 0x5c206, // onblur\n\t0x124: 0x3f007, // article\n\t0x125: 0xa303,  // dir\n\t0x126: 0xe704,  // ping\n\t0x127: 0x23408, // required\n\t0x128: 0x44909, // oninvalid\n\t0x129: 0x6d405, // align\n\t0x12b: 0x57e04, // icon\n\t0x12c: 0x64902, // h6\n\t0x12d: 0x1b404, // cols\n\t0x12e: 0x2160a, // figcaption\n\t0x12f: 0x45209, // onkeydown\n\t0x130: 0x66708, // onsubmit\n\t0x131: 0x13609, // oncanplay\n\t0x132: 0x70503, // sup\n\t0x133: 0xc01,   // p\n\t0x135: 0x3fe09, // onemptied\n\t0x136: 0x38506, // oncopy\n\t0x137: 0x55804, // cite\n\t0x138: 0x39b0a, // ondblclick\n\t0x13a: 0x4ff0b, // onmousemove\n\t0x13c: 0x66903, // sub\n\t0x13d: 0x47b03, // rel\n\t0x13e: 0xe008,  // optgroup\n\t0x142: 0x3a07,  // rowspan\n\t0x143: 0x36c06, // source\n\t0x144: 0x1fe08, // noscript\n\t0x145: 0x55f04, // open\n\t0x146: 0x1ec03, // ins\n\t0x147: 0x23c0d, // foreignObject\n\t0x148: 0x5a10a, // onpopstate\n\t0x14a: 0x27507, // enctype\n\t0x14b: 0x25e0e, // onautocomplete\n\t0x14c: 0x34608, // textarea\n\t0x14e: 0x2600c, // autocomplete\n\t0x14f: 0x14002, // hr\n\t0x150: 0x1ce08, // controls\n\t0x151: 0xc302,  // id\n\t0x153: 0x21e0c, // onafterprint\n\t0x155: 0x2490d, // foreignobject\n\t0x156: 0x31b07, // marquee\n\t0x157: 0x58e07, // onpause\n\t0x158: 0x5e202, // dl\n\t0x159: 0x12c06, // height\n\t0x15a: 0x33b03, // min\n\t0x15b: 0xa307,  // dirname\n\t0x15c: 0x1a609, // translate\n\t0x15d: 0x13004, // html\n\t0x15e: 0x33b09, // minlength\n\t0x15f: 0x47a07, // preload\n\t0x160: 0x70e08, // template\n\t0x161: 0x3d30b, // ondragleave\n\t0x164: 0x5b403, // src\n\t0x165: 0x31506, // strong\n\t0x167: 0x4c04,  // samp\n\t0x168: 0x6ed07, // address\n\t0x169: 0x54508, // ononline\n\t0x16b: 0xfb0b,  // placeholder\n\t0x16c: 0x2ac06, // target\n\t0x16d: 0x1ee05, // small\n\t0x16e: 0x6c607, // onwheel\n\t0x16f: 0x1b90a, // annotation\n\t0x170: 0x4680a, // spellcheck\n\t0x171: 0x4607,  // details\n\t0x172: 0xbd06,  // canvas\n\t0x173: 0xeb09,  // autofocus\n\t0x174: 0xc05,   // param\n\t0x176: 0x45708, // download\n\t0x177: 0x44603, // del\n\t0x178: 0x36007, // onclose\n\t0x179: 0x16003, // kbd\n\t0x17a: 0x30106, // applet\n\t0x17b: 0x2c804, // href\n\t0x17c: 0x5ed08, // onresize\n\t0x17e: 0x4910c, // onloadeddata\n\t0x180: 0x7402,  // tr\n\t0x181: 0x2a80a, // formtarget\n\t0x182: 0xca05,  // title\n\t0x183: 0x6f905, // style\n\t0x184: 0x7a06,  // strike\n\t0x185: 0x59206, // usemap\n\t0x186: 0x2e406, // iframe\n\t0x187: 0x1004,  // main\n\t0x189: 0x9707,  // picture\n\t0x18c: 0x2fe05, // ismap\n\t0x18e: 0x49904, // data\n\t0x18f: 0xda05,  // label\n\t0x191: 0x3c50e, // referrerpolicy\n\t0x192: 0x13f02, // th\n\t0x194: 0x52a06, // prompt\n\t0x195: 0x5bd07, // section\n\t0x197: 0x6cd07, // optimum\n\t0x198: 0x2c304, // high\n\t0x199: 0x14502, // h1\n\t0x19a: 0x65509, // onstalled\n\t0x19b: 0x15603, // var\n\t0x19c: 0x11c04, // time\n\t0x19e: 0x67002, // ms\n\t0x19f: 0x32506, // header\n\t0x1a0: 0x4ce09, // onmessage\n\t0x1a1: 0x56205, // nonce\n\t0x1a2: 0x2560a, // formaction\n\t0x1a3: 0x20806, // center\n\t0x1a4: 0x3704,  // nobr\n\t0x1a5: 0x58905, // table\n\t0x1a6: 0x49d07, // listing\n\t0x1a7: 0x18a06, // legend\n\t0x1a9: 0x28309, // challenge\n\t0x1aa: 0x23006, // figure\n\t0x1ab: 0x8e05,  // media\n\t0x1ae: 0x8104,  // type\n\t0x1af: 0x11904, // font\n\t0x1b0: 0x4ce0e, // onmessageerror\n\t0x1b1: 0x36508, // seamless\n\t0x1b2: 0x5f03,  // dfn\n\t0x1b3: 0x19205, // defer\n\t0x1b4: 0x6b03,  // low\n\t0x1b5: 0x62d09, // onseeking\n\t0x1b6: 0x5170b, // onmouseover\n\t0x1b7: 0x29a0a, // novalidate\n\t0x1b8: 0x7160a, // workertype\n\t0x1ba: 0x3c107, // itemref\n\t0x1bd: 0x1,     // a\n\t0x1be: 0x30003, // map\n\t0x1bf: 0x11a0c, // ontimeupdate\n\t0x1c0: 0x14707, // bgsound\n\t0x1c1: 0x3206,  // keygen\n\t0x1c2: 0x2705,  // tbody\n\t0x1c5: 0x64006, // onshow\n\t0x1c7: 0x2501,  // s\n\t0x1c8: 0x4f07,  // pattern\n\t0x1cc: 0x13610, // oncanplaythrough\n\t0x1ce: 0x2bf02, // dd\n\t0x1cf: 0x6f306, // srcset\n\t0x1d0: 0x15903, // big\n\t0x1d2: 0x64d08, // sortable\n\t0x1d3: 0x47407, // onkeyup\n\t0x1d5: 0x59806, // onplay\n\t0x1d7: 0x4ac04, // meta\n\t0x1d8: 0x3f706, // ondrop\n\t0x1da: 0x5fc08, // onscroll\n\t0x1db: 0x1e30b, // crossorigin\n\t0x1dc: 0x5670a, // onpageshow\n\t0x1dd: 0x4,     // abbr\n\t0x1de: 0x5e02,  // td\n\t0x1df: 0x57f0f, // contenteditable\n\t0x1e0: 0x25a06, // action\n\t0x1e1: 0x10a0b, // playsinline\n\t0x1e2: 0x42507, // onfocus\n\t0x1e3: 0x2c808, // hreflang\n\t0x1e5: 0x50a0a, // onmouseout\n\t0x1e6: 0x5e607, // onreset\n\t0x1e7: 0x10608, // autoplay\n\t0x1ea: 0x67106, // scoped\n\t0x1ec: 0x30a,   // radiogroup\n\t0x1ee: 0x3740b, // contextmenu\n\t0x1ef: 0x52209, // onmouseup\n\t0x1f1: 0x2b206, // hgroup\n\t0x1f2: 0x1f00f, // allowfullscreen\n\t0x1f3: 0x4b208, // tabindex\n\t0x1f6: 0x2f707, // isindex\n\t0x1f7: 0x1a0e,  // accept-charset\n\t0x1f8: 0x2960e, // formnovalidate\n\t0x1fb: 0x1b90e, // annotation-xml\n\t0x1fc: 0x4205,  // embed\n\t0x1fd: 0x20006, // script\n\t0x1fe: 0x16206, // dialog\n\t0x1ff: 0x1c707, // command\n}\n\nconst atomText = \"abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobro\" +\n\t\"wspanoembedetailsampatternoframesetdfnomoduleallowpaymentreq\" +\n\t\"uestrikeytypeallowusermediagroupictureversedirnameterubyaltf\" +\n\t\"ooterasyncanvasidefaultitleaudioncancelabelooptgroupingautof\" +\n\t\"ocusandboxmplaceholderautoplaysinlinebasefontimeupdateviacac\" +\n\t\"heightmlbdoncanplaythrough1bgsoundisabledivarbigblinkbdialog\" +\n\t\"blockquotebuttonabortrackindraggablegendcodefercolgrouplaint\" +\n\t\"extranslatecolorcolspannotation-xmlcommandcontrolshapecoords\" +\n\t\"lotcrossoriginsmallowfullscreenoscriptfacenterfieldsetfigcap\" +\n\t\"tionafterprintegrityfigurequiredforeignObjectforeignobjectfo\" +\n\t\"rmactionautocompleteerrorformenctypemustmatchallengeformmeth\" +\n\t\"odformnovalidatetimeformtargethgrouposterhiddenhigh2hreflang\" +\n\t\"http-equivideonclickiframeimageimglyph3isindexismappletitemt\" +\n\t\"ypemanifestrongmarqueematheadersortedmaxlength4minlength5mte\" +\n\t\"xtareadonlymultiplemutedoncloseamlessourceoncontextmenuitemi\" +\n\t\"doncopyoncuechangeoncutondblclickondragendondragenterondrage\" +\n\t\"xitemreferrerpolicyondragleaveondragoverondragstarticleondro\" +\n\t\"pzonemptiedondurationchangeonendedonerroronfocuspaceronhashc\" +\n\t\"hangeoninputmodeloninvalidonkeydownloadonkeypresspellchecked\" +\n\t\"onkeyupreloadonlanguagechangeonloadeddatalistingonloadedmeta\" +\n\t\"databindexonloadendonloadstartonmessageerroronmousedownonmou\" +\n\t\"seenteronmouseleaveonmousemoveonmouseoutputonmouseoveronmous\" +\n\t\"eupromptonmousewheelonofflineononlineonpagehidescitempropeno\" +\n\t\"nceonpageshowbronpastepublicontenteditableonpausemaponplayin\" +\n\t\"gonpopstateonprogressrcdoclassectionbluronratechangeonreject\" +\n\t\"ionhandledonresetonresizesrclangonscrollonsecuritypolicyviol\" +\n\t\"ationauxclickonseekedonseekingonselectedonshowidth6onsortabl\" +\n\t\"eonstalledonstorageonsubmitemscopedonsuspendontoggleonunhand\" +\n\t\"ledrejectionbeforeprintonunloadonvolumechangeonwaitingonwhee\" +\n\t\"loptimumalignmarkoptionbeforeunloaddressrcsetstylesummarysup\" +\n\t\"svgsystemplateworkertypewrap\"\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/atom/table_test.go",
    "content": "// Code generated by go generate gen.go; DO NOT EDIT.\n\n//go:generate go run gen.go -test\n\npackage atom\n\nvar testAtomList = []string{\n\t\"a\",\n\t\"abbr\",\n\t\"accept\",\n\t\"accept-charset\",\n\t\"accesskey\",\n\t\"action\",\n\t\"address\",\n\t\"align\",\n\t\"allowfullscreen\",\n\t\"allowpaymentrequest\",\n\t\"allowusermedia\",\n\t\"alt\",\n\t\"annotation\",\n\t\"annotation-xml\",\n\t\"applet\",\n\t\"area\",\n\t\"article\",\n\t\"as\",\n\t\"aside\",\n\t\"async\",\n\t\"audio\",\n\t\"autocomplete\",\n\t\"autofocus\",\n\t\"autoplay\",\n\t\"b\",\n\t\"base\",\n\t\"basefont\",\n\t\"bdi\",\n\t\"bdo\",\n\t\"bgsound\",\n\t\"big\",\n\t\"blink\",\n\t\"blockquote\",\n\t\"body\",\n\t\"br\",\n\t\"button\",\n\t\"canvas\",\n\t\"caption\",\n\t\"center\",\n\t\"challenge\",\n\t\"charset\",\n\t\"checked\",\n\t\"cite\",\n\t\"class\",\n\t\"code\",\n\t\"col\",\n\t\"colgroup\",\n\t\"color\",\n\t\"cols\",\n\t\"colspan\",\n\t\"command\",\n\t\"content\",\n\t\"contenteditable\",\n\t\"contextmenu\",\n\t\"controls\",\n\t\"coords\",\n\t\"crossorigin\",\n\t\"data\",\n\t\"datalist\",\n\t\"datetime\",\n\t\"dd\",\n\t\"default\",\n\t\"defer\",\n\t\"del\",\n\t\"desc\",\n\t\"details\",\n\t\"dfn\",\n\t\"dialog\",\n\t\"dir\",\n\t\"dirname\",\n\t\"disabled\",\n\t\"div\",\n\t\"dl\",\n\t\"download\",\n\t\"draggable\",\n\t\"dropzone\",\n\t\"dt\",\n\t\"em\",\n\t\"embed\",\n\t\"enctype\",\n\t\"face\",\n\t\"fieldset\",\n\t\"figcaption\",\n\t\"figure\",\n\t\"font\",\n\t\"footer\",\n\t\"for\",\n\t\"foreignObject\",\n\t\"foreignobject\",\n\t\"form\",\n\t\"formaction\",\n\t\"formenctype\",\n\t\"formmethod\",\n\t\"formnovalidate\",\n\t\"formtarget\",\n\t\"frame\",\n\t\"frameset\",\n\t\"h1\",\n\t\"h2\",\n\t\"h3\",\n\t\"h4\",\n\t\"h5\",\n\t\"h6\",\n\t\"head\",\n\t\"header\",\n\t\"headers\",\n\t\"height\",\n\t\"hgroup\",\n\t\"hidden\",\n\t\"high\",\n\t\"hr\",\n\t\"href\",\n\t\"hreflang\",\n\t\"html\",\n\t\"http-equiv\",\n\t\"i\",\n\t\"icon\",\n\t\"id\",\n\t\"iframe\",\n\t\"image\",\n\t\"img\",\n\t\"input\",\n\t\"inputmode\",\n\t\"ins\",\n\t\"integrity\",\n\t\"is\",\n\t\"isindex\",\n\t\"ismap\",\n\t\"itemid\",\n\t\"itemprop\",\n\t\"itemref\",\n\t\"itemscope\",\n\t\"itemtype\",\n\t\"kbd\",\n\t\"keygen\",\n\t\"keytype\",\n\t\"kind\",\n\t\"label\",\n\t\"lang\",\n\t\"legend\",\n\t\"li\",\n\t\"link\",\n\t\"list\",\n\t\"listing\",\n\t\"loop\",\n\t\"low\",\n\t\"main\",\n\t\"malignmark\",\n\t\"manifest\",\n\t\"map\",\n\t\"mark\",\n\t\"marquee\",\n\t\"math\",\n\t\"max\",\n\t\"maxlength\",\n\t\"media\",\n\t\"mediagroup\",\n\t\"menu\",\n\t\"menuitem\",\n\t\"meta\",\n\t\"meter\",\n\t\"method\",\n\t\"mglyph\",\n\t\"mi\",\n\t\"min\",\n\t\"minlength\",\n\t\"mn\",\n\t\"mo\",\n\t\"ms\",\n\t\"mtext\",\n\t\"multiple\",\n\t\"muted\",\n\t\"name\",\n\t\"nav\",\n\t\"nobr\",\n\t\"noembed\",\n\t\"noframes\",\n\t\"nomodule\",\n\t\"nonce\",\n\t\"noscript\",\n\t\"novalidate\",\n\t\"object\",\n\t\"ol\",\n\t\"onabort\",\n\t\"onafterprint\",\n\t\"onautocomplete\",\n\t\"onautocompleteerror\",\n\t\"onauxclick\",\n\t\"onbeforeprint\",\n\t\"onbeforeunload\",\n\t\"onblur\",\n\t\"oncancel\",\n\t\"oncanplay\",\n\t\"oncanplaythrough\",\n\t\"onchange\",\n\t\"onclick\",\n\t\"onclose\",\n\t\"oncontextmenu\",\n\t\"oncopy\",\n\t\"oncuechange\",\n\t\"oncut\",\n\t\"ondblclick\",\n\t\"ondrag\",\n\t\"ondragend\",\n\t\"ondragenter\",\n\t\"ondragexit\",\n\t\"ondragleave\",\n\t\"ondragover\",\n\t\"ondragstart\",\n\t\"ondrop\",\n\t\"ondurationchange\",\n\t\"onemptied\",\n\t\"onended\",\n\t\"onerror\",\n\t\"onfocus\",\n\t\"onhashchange\",\n\t\"oninput\",\n\t\"oninvalid\",\n\t\"onkeydown\",\n\t\"onkeypress\",\n\t\"onkeyup\",\n\t\"onlanguagechange\",\n\t\"onload\",\n\t\"onloadeddata\",\n\t\"onloadedmetadata\",\n\t\"onloadend\",\n\t\"onloadstart\",\n\t\"onmessage\",\n\t\"onmessageerror\",\n\t\"onmousedown\",\n\t\"onmouseenter\",\n\t\"onmouseleave\",\n\t\"onmousemove\",\n\t\"onmouseout\",\n\t\"onmouseover\",\n\t\"onmouseup\",\n\t\"onmousewheel\",\n\t\"onoffline\",\n\t\"ononline\",\n\t\"onpagehide\",\n\t\"onpageshow\",\n\t\"onpaste\",\n\t\"onpause\",\n\t\"onplay\",\n\t\"onplaying\",\n\t\"onpopstate\",\n\t\"onprogress\",\n\t\"onratechange\",\n\t\"onrejectionhandled\",\n\t\"onreset\",\n\t\"onresize\",\n\t\"onscroll\",\n\t\"onsecuritypolicyviolation\",\n\t\"onseeked\",\n\t\"onseeking\",\n\t\"onselect\",\n\t\"onshow\",\n\t\"onsort\",\n\t\"onstalled\",\n\t\"onstorage\",\n\t\"onsubmit\",\n\t\"onsuspend\",\n\t\"ontimeupdate\",\n\t\"ontoggle\",\n\t\"onunhandledrejection\",\n\t\"onunload\",\n\t\"onvolumechange\",\n\t\"onwaiting\",\n\t\"onwheel\",\n\t\"open\",\n\t\"optgroup\",\n\t\"optimum\",\n\t\"option\",\n\t\"output\",\n\t\"p\",\n\t\"param\",\n\t\"pattern\",\n\t\"picture\",\n\t\"ping\",\n\t\"placeholder\",\n\t\"plaintext\",\n\t\"playsinline\",\n\t\"poster\",\n\t\"pre\",\n\t\"preload\",\n\t\"progress\",\n\t\"prompt\",\n\t\"public\",\n\t\"q\",\n\t\"radiogroup\",\n\t\"readonly\",\n\t\"referrerpolicy\",\n\t\"rel\",\n\t\"required\",\n\t\"reversed\",\n\t\"rows\",\n\t\"rowspan\",\n\t\"rp\",\n\t\"rt\",\n\t\"ruby\",\n\t\"s\",\n\t\"samp\",\n\t\"sandbox\",\n\t\"scope\",\n\t\"scoped\",\n\t\"script\",\n\t\"seamless\",\n\t\"section\",\n\t\"select\",\n\t\"selected\",\n\t\"shape\",\n\t\"size\",\n\t\"sizes\",\n\t\"slot\",\n\t\"small\",\n\t\"sortable\",\n\t\"sorted\",\n\t\"source\",\n\t\"spacer\",\n\t\"span\",\n\t\"spellcheck\",\n\t\"src\",\n\t\"srcdoc\",\n\t\"srclang\",\n\t\"srcset\",\n\t\"start\",\n\t\"step\",\n\t\"strike\",\n\t\"strong\",\n\t\"style\",\n\t\"sub\",\n\t\"summary\",\n\t\"sup\",\n\t\"svg\",\n\t\"system\",\n\t\"tabindex\",\n\t\"table\",\n\t\"target\",\n\t\"tbody\",\n\t\"td\",\n\t\"template\",\n\t\"textarea\",\n\t\"tfoot\",\n\t\"th\",\n\t\"thead\",\n\t\"time\",\n\t\"title\",\n\t\"tr\",\n\t\"track\",\n\t\"translate\",\n\t\"tt\",\n\t\"type\",\n\t\"typemustmatch\",\n\t\"u\",\n\t\"ul\",\n\t\"updateviacache\",\n\t\"usemap\",\n\t\"value\",\n\t\"var\",\n\t\"video\",\n\t\"wbr\",\n\t\"width\",\n\t\"workertype\",\n\t\"wrap\",\n\t\"xmp\",\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/charset/charset.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package charset provides common text encodings for HTML documents.\n//\n// The mapping from encoding labels to encodings is defined at\n// https://encoding.spec.whatwg.org/.\npackage charset // import \"golang.org/x/net/html/charset\"\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"mime\"\n\t\"strings\"\n\t\"unicode/utf8\"\n\n\t\"golang.org/x/net/html\"\n\t\"golang.org/x/text/encoding\"\n\t\"golang.org/x/text/encoding/charmap\"\n\t\"golang.org/x/text/encoding/htmlindex\"\n\t\"golang.org/x/text/transform\"\n)\n\n// Lookup returns the encoding with the specified label, and its canonical\n// name. It returns nil and the empty string if label is not one of the\n// standard encodings for HTML. Matching is case-insensitive and ignores\n// leading and trailing whitespace. Encoders will use HTML escape sequences for\n// runes that are not supported by the character set.\nfunc Lookup(label string) (e encoding.Encoding, name string) {\n\te, err := htmlindex.Get(label)\n\tif err != nil {\n\t\treturn nil, \"\"\n\t}\n\tname, _ = htmlindex.Name(e)\n\treturn &htmlEncoding{e}, name\n}\n\ntype htmlEncoding struct{ encoding.Encoding }\n\nfunc (h *htmlEncoding) NewEncoder() *encoding.Encoder {\n\t// HTML requires a non-terminating legacy encoder. We use HTML escapes to\n\t// substitute unsupported code points.\n\treturn encoding.HTMLEscapeUnsupported(h.Encoding.NewEncoder())\n}\n\n// DetermineEncoding determines the encoding of an HTML document by examining\n// up to the first 1024 bytes of content and the declared Content-Type.\n//\n// See http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#determining-the-character-encoding\nfunc DetermineEncoding(content []byte, contentType string) (e encoding.Encoding, name string, certain bool) {\n\tif len(content) > 1024 {\n\t\tcontent = content[:1024]\n\t}\n\n\tfor _, b := range boms {\n\t\tif bytes.HasPrefix(content, b.bom) {\n\t\t\te, name = Lookup(b.enc)\n\t\t\treturn e, name, true\n\t\t}\n\t}\n\n\tif _, params, err := mime.ParseMediaType(contentType); err == nil {\n\t\tif cs, ok := params[\"charset\"]; ok {\n\t\t\tif e, name = Lookup(cs); e != nil {\n\t\t\t\treturn e, name, true\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(content) > 0 {\n\t\te, name = prescan(content)\n\t\tif e != nil {\n\t\t\treturn e, name, false\n\t\t}\n\t}\n\n\t// Try to detect UTF-8.\n\t// First eliminate any partial rune at the end.\n\tfor i := len(content) - 1; i >= 0 && i > len(content)-4; i-- {\n\t\tb := content[i]\n\t\tif b < 0x80 {\n\t\t\tbreak\n\t\t}\n\t\tif utf8.RuneStart(b) {\n\t\t\tcontent = content[:i]\n\t\t\tbreak\n\t\t}\n\t}\n\thasHighBit := false\n\tfor _, c := range content {\n\t\tif c >= 0x80 {\n\t\t\thasHighBit = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif hasHighBit && utf8.Valid(content) {\n\t\treturn encoding.Nop, \"utf-8\", false\n\t}\n\n\t// TODO: change default depending on user's locale?\n\treturn charmap.Windows1252, \"windows-1252\", false\n}\n\n// NewReader returns an io.Reader that converts the content of r to UTF-8.\n// It calls DetermineEncoding to find out what r's encoding is.\nfunc NewReader(r io.Reader, contentType string) (io.Reader, error) {\n\tpreview := make([]byte, 1024)\n\tn, err := io.ReadFull(r, preview)\n\tswitch {\n\tcase err == io.ErrUnexpectedEOF:\n\t\tpreview = preview[:n]\n\t\tr = bytes.NewReader(preview)\n\tcase err != nil:\n\t\treturn nil, err\n\tdefault:\n\t\tr = io.MultiReader(bytes.NewReader(preview), r)\n\t}\n\n\tif e, _, _ := DetermineEncoding(preview, contentType); e != encoding.Nop {\n\t\tr = transform.NewReader(r, e.NewDecoder())\n\t}\n\treturn r, nil\n}\n\n// NewReaderLabel returns a reader that converts from the specified charset to\n// UTF-8. It uses Lookup to find the encoding that corresponds to label, and\n// returns an error if Lookup returns nil. It is suitable for use as\n// encoding/xml.Decoder's CharsetReader function.\nfunc NewReaderLabel(label string, input io.Reader) (io.Reader, error) {\n\te, _ := Lookup(label)\n\tif e == nil {\n\t\treturn nil, fmt.Errorf(\"unsupported charset: %q\", label)\n\t}\n\treturn transform.NewReader(input, e.NewDecoder()), nil\n}\n\nfunc prescan(content []byte) (e encoding.Encoding, name string) {\n\tz := html.NewTokenizer(bytes.NewReader(content))\n\tfor {\n\t\tswitch z.Next() {\n\t\tcase html.ErrorToken:\n\t\t\treturn nil, \"\"\n\n\t\tcase html.StartTagToken, html.SelfClosingTagToken:\n\t\t\ttagName, hasAttr := z.TagName()\n\t\t\tif !bytes.Equal(tagName, []byte(\"meta\")) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tattrList := make(map[string]bool)\n\t\t\tgotPragma := false\n\n\t\t\tconst (\n\t\t\t\tdontKnow = iota\n\t\t\t\tdoNeedPragma\n\t\t\t\tdoNotNeedPragma\n\t\t\t)\n\t\t\tneedPragma := dontKnow\n\n\t\t\tname = \"\"\n\t\t\te = nil\n\t\t\tfor hasAttr {\n\t\t\t\tvar key, val []byte\n\t\t\t\tkey, val, hasAttr = z.TagAttr()\n\t\t\t\tks := string(key)\n\t\t\t\tif attrList[ks] {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tattrList[ks] = true\n\t\t\t\tfor i, c := range val {\n\t\t\t\t\tif 'A' <= c && c <= 'Z' {\n\t\t\t\t\t\tval[i] = c + 0x20\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tswitch ks {\n\t\t\t\tcase \"http-equiv\":\n\t\t\t\t\tif bytes.Equal(val, []byte(\"content-type\")) {\n\t\t\t\t\t\tgotPragma = true\n\t\t\t\t\t}\n\n\t\t\t\tcase \"content\":\n\t\t\t\t\tif e == nil {\n\t\t\t\t\t\tname = fromMetaElement(string(val))\n\t\t\t\t\t\tif name != \"\" {\n\t\t\t\t\t\t\te, name = Lookup(name)\n\t\t\t\t\t\t\tif e != nil {\n\t\t\t\t\t\t\t\tneedPragma = doNeedPragma\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\tcase \"charset\":\n\t\t\t\t\te, name = Lookup(string(val))\n\t\t\t\t\tneedPragma = doNotNeedPragma\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif strings.HasPrefix(name, \"utf-16\") {\n\t\t\t\tname = \"utf-8\"\n\t\t\t\te = encoding.Nop\n\t\t\t}\n\n\t\t\tif e != nil {\n\t\t\t\treturn e, name\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc fromMetaElement(s string) string {\n\tfor s != \"\" {\n\t\tcsLoc := strings.Index(s, \"charset\")\n\t\tif csLoc == -1 {\n\t\t\treturn \"\"\n\t\t}\n\t\ts = s[csLoc+len(\"charset\"):]\n\t\ts = strings.TrimLeft(s, \" \\t\\n\\f\\r\")\n\t\tif !strings.HasPrefix(s, \"=\") {\n\t\t\tcontinue\n\t\t}\n\t\ts = s[1:]\n\t\ts = strings.TrimLeft(s, \" \\t\\n\\f\\r\")\n\t\tif s == \"\" {\n\t\t\treturn \"\"\n\t\t}\n\t\tif q := s[0]; q == '\"' || q == '\\'' {\n\t\t\ts = s[1:]\n\t\t\tcloseQuote := strings.IndexRune(s, rune(q))\n\t\t\tif closeQuote == -1 {\n\t\t\t\treturn \"\"\n\t\t\t}\n\t\t\treturn s[:closeQuote]\n\t\t}\n\n\t\tend := strings.IndexAny(s, \"; \\t\\n\\f\\r\")\n\t\tif end == -1 {\n\t\t\tend = len(s)\n\t\t}\n\t\treturn s[:end]\n\t}\n\treturn \"\"\n}\n\nvar boms = []struct {\n\tbom []byte\n\tenc string\n}{\n\t{[]byte{0xfe, 0xff}, \"utf-16be\"},\n\t{[]byte{0xff, 0xfe}, \"utf-16le\"},\n\t{[]byte{0xef, 0xbb, 0xbf}, \"utf-8\"},\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/charset/charset_test.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage charset\n\nimport (\n\t\"bytes\"\n\t\"encoding/xml\"\n\t\"io/ioutil\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang.org/x/text/transform\"\n)\n\nfunc transformString(t transform.Transformer, s string) (string, error) {\n\tr := transform.NewReader(strings.NewReader(s), t)\n\tb, err := ioutil.ReadAll(r)\n\treturn string(b), err\n}\n\ntype testCase struct {\n\tutf8, other, otherEncoding string\n}\n\n// testCases for encoding and decoding.\nvar testCases = []testCase{\n\t{\"Résumé\", \"Résumé\", \"utf8\"},\n\t{\"Résumé\", \"R\\xe9sum\\xe9\", \"latin1\"},\n\t{\"これは漢字です。\", \"S0\\x8c0o0\\\"oW[g0Y0\\x020\", \"UTF-16LE\"},\n\t{\"これは漢字です。\", \"0S0\\x8c0oo\\\"[W0g0Y0\\x02\", \"UTF-16BE\"},\n\t{\"Hello, world\", \"Hello, world\", \"ASCII\"},\n\t{\"Gdańsk\", \"Gda\\xf1sk\", \"ISO-8859-2\"},\n\t{\"Ââ Čč Đđ Ŋŋ Õõ Šš Žž Åå Ää\", \"\\xc2\\xe2 \\xc8\\xe8 \\xa9\\xb9 \\xaf\\xbf \\xd5\\xf5 \\xaa\\xba \\xac\\xbc \\xc5\\xe5 \\xc4\\xe4\", \"ISO-8859-10\"},\n\t{\"สำหรับ\", \"\\xca\\xd3\\xcb\\xc3\\u047a\", \"ISO-8859-11\"},\n\t{\"latviešu\", \"latvie\\xf0u\", \"ISO-8859-13\"},\n\t{\"Seònaid\", \"Se\\xf2naid\", \"ISO-8859-14\"},\n\t{\"€1 is cheap\", \"\\xa41 is cheap\", \"ISO-8859-15\"},\n\t{\"românește\", \"rom\\xe2ne\\xbate\", \"ISO-8859-16\"},\n\t{\"nutraĵo\", \"nutra\\xbco\", \"ISO-8859-3\"},\n\t{\"Kalâdlit\", \"Kal\\xe2dlit\", \"ISO-8859-4\"},\n\t{\"русский\", \"\\xe0\\xe3\\xe1\\xe1\\xda\\xd8\\xd9\", \"ISO-8859-5\"},\n\t{\"ελληνικά\", \"\\xe5\\xeb\\xeb\\xe7\\xed\\xe9\\xea\\xdc\", \"ISO-8859-7\"},\n\t{\"Kağan\", \"Ka\\xf0an\", \"ISO-8859-9\"},\n\t{\"Résumé\", \"R\\x8esum\\x8e\", \"macintosh\"},\n\t{\"Gdańsk\", \"Gda\\xf1sk\", \"windows-1250\"},\n\t{\"русский\", \"\\xf0\\xf3\\xf1\\xf1\\xea\\xe8\\xe9\", \"windows-1251\"},\n\t{\"Résumé\", \"R\\xe9sum\\xe9\", \"windows-1252\"},\n\t{\"ελληνικά\", \"\\xe5\\xeb\\xeb\\xe7\\xed\\xe9\\xea\\xdc\", \"windows-1253\"},\n\t{\"Kağan\", \"Ka\\xf0an\", \"windows-1254\"},\n\t{\"עִבְרִית\", \"\\xf2\\xc4\\xe1\\xc0\\xf8\\xc4\\xe9\\xfa\", \"windows-1255\"},\n\t{\"العربية\", \"\\xc7\\xe1\\xda\\xd1\\xc8\\xed\\xc9\", \"windows-1256\"},\n\t{\"latviešu\", \"latvie\\xf0u\", \"windows-1257\"},\n\t{\"Việt\", \"Vi\\xea\\xf2t\", \"windows-1258\"},\n\t{\"สำหรับ\", \"\\xca\\xd3\\xcb\\xc3\\u047a\", \"windows-874\"},\n\t{\"русский\", \"\\xd2\\xd5\\xd3\\xd3\\xcb\\xc9\\xca\", \"KOI8-R\"},\n\t{\"українська\", \"\\xd5\\xcb\\xd2\\xc1\\xa7\\xce\\xd3\\xd8\\xcb\\xc1\", \"KOI8-U\"},\n\t{\"Hello 常用國字標準字體表\", \"Hello \\xb1`\\xa5\\u03b0\\xea\\xa6r\\xbc\\u0437\\u01e6r\\xc5\\xe9\\xaa\\xed\", \"big5\"},\n\t{\"Hello 常用國字標準字體表\", \"Hello \\xb3\\xa3\\xd3\\xc3\\x87\\xf8\\xd7\\xd6\\x98\\xcb\\x9c\\xca\\xd7\\xd6\\xf3\\x77\\xb1\\xed\", \"gbk\"},\n\t{\"Hello 常用國字標準字體表\", \"Hello \\xb3\\xa3\\xd3\\xc3\\x87\\xf8\\xd7\\xd6\\x98\\xcb\\x9c\\xca\\xd7\\xd6\\xf3\\x77\\xb1\\xed\", \"gb18030\"},\n\t{\"עִבְרִית\", \"\\x81\\x30\\xfb\\x30\\x81\\x30\\xf6\\x34\\x81\\x30\\xf9\\x33\\x81\\x30\\xf6\\x30\\x81\\x30\\xfb\\x36\\x81\\x30\\xf6\\x34\\x81\\x30\\xfa\\x31\\x81\\x30\\xfb\\x38\", \"gb18030\"},\n\t{\"㧯\", \"\\x82\\x31\\x89\\x38\", \"gb18030\"},\n\t{\"これは漢字です。\", \"\\x82\\xb1\\x82\\xea\\x82\\xcd\\x8a\\xbf\\x8e\\x9a\\x82\\xc5\\x82\\xb7\\x81B\", \"SJIS\"},\n\t{\"Hello, 世界!\", \"Hello, \\x90\\xa2\\x8aE!\", \"SJIS\"},\n\t{\"ｲｳｴｵｶ\", \"\\xb2\\xb3\\xb4\\xb5\\xb6\", \"SJIS\"},\n\t{\"これは漢字です。\", \"\\xa4\\xb3\\xa4\\xec\\xa4\\u03f4\\xc1\\xbb\\xfa\\xa4\\u01e4\\xb9\\xa1\\xa3\", \"EUC-JP\"},\n\t{\"Hello, 世界!\", \"Hello, \\x1b$B@$3&\\x1b(B!\", \"ISO-2022-JP\"},\n\t{\"다음과 같은 조건을 따라야 합니다: 저작자표시\", \"\\xb4\\xd9\\xc0\\xbd\\xb0\\xfa \\xb0\\xb0\\xc0\\xba \\xc1\\xb6\\xb0\\xc7\\xc0\\xbb \\xb5\\xfb\\xb6\\xf3\\xbe\\xdf \\xc7մϴ\\xd9: \\xc0\\xfa\\xc0\\xdb\\xc0\\xdaǥ\\xbd\\xc3\", \"EUC-KR\"},\n}\n\nfunc TestDecode(t *testing.T) {\n\ttestCases := append(testCases, []testCase{\n\t\t// Replace multi-byte maximum subpart of ill-formed subsequence with\n\t\t// single replacement character (WhatWG requirement).\n\t\t{\"Rés\\ufffdumé\", \"Rés\\xe1\\x80umé\", \"utf8\"},\n\t}...)\n\tfor _, tc := range testCases {\n\t\te, _ := Lookup(tc.otherEncoding)\n\t\tif e == nil {\n\t\t\tt.Errorf(\"%s: not found\", tc.otherEncoding)\n\t\t\tcontinue\n\t\t}\n\t\ts, err := transformString(e.NewDecoder(), tc.other)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: decode %q: %v\", tc.otherEncoding, tc.other, err)\n\t\t\tcontinue\n\t\t}\n\t\tif s != tc.utf8 {\n\t\t\tt.Errorf(\"%s: got %q, want %q\", tc.otherEncoding, s, tc.utf8)\n\t\t}\n\t}\n}\n\nfunc TestEncode(t *testing.T) {\n\ttestCases := append(testCases, []testCase{\n\t\t// Use Go-style replacement.\n\t\t{\"Rés\\xe1\\x80umé\", \"Rés\\ufffd\\ufffdumé\", \"utf8\"},\n\t\t// U+0144 LATIN SMALL LETTER N WITH ACUTE not supported by encoding.\n\t\t{\"Gdańsk\", \"Gda&#324;sk\", \"ISO-8859-11\"},\n\t\t{\"\\ufffd\", \"&#65533;\", \"ISO-8859-11\"},\n\t\t{\"a\\xe1\\x80b\", \"a&#65533;&#65533;b\", \"ISO-8859-11\"},\n\t}...)\n\tfor _, tc := range testCases {\n\t\te, _ := Lookup(tc.otherEncoding)\n\t\tif e == nil {\n\t\t\tt.Errorf(\"%s: not found\", tc.otherEncoding)\n\t\t\tcontinue\n\t\t}\n\t\ts, err := transformString(e.NewEncoder(), tc.utf8)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: encode %q: %s\", tc.otherEncoding, tc.utf8, err)\n\t\t\tcontinue\n\t\t}\n\t\tif s != tc.other {\n\t\t\tt.Errorf(\"%s: got %q, want %q\", tc.otherEncoding, s, tc.other)\n\t\t}\n\t}\n}\n\nvar sniffTestCases = []struct {\n\tfilename, declared, want string\n}{\n\t{\"HTTP-charset.html\", \"text/html; charset=iso-8859-15\", \"iso-8859-15\"},\n\t{\"UTF-16LE-BOM.html\", \"\", \"utf-16le\"},\n\t{\"UTF-16BE-BOM.html\", \"\", \"utf-16be\"},\n\t{\"meta-content-attribute.html\", \"text/html\", \"iso-8859-15\"},\n\t{\"meta-charset-attribute.html\", \"text/html\", \"iso-8859-15\"},\n\t{\"No-encoding-declaration.html\", \"text/html\", \"utf-8\"},\n\t{\"HTTP-vs-UTF-8-BOM.html\", \"text/html; charset=iso-8859-15\", \"utf-8\"},\n\t{\"HTTP-vs-meta-content.html\", \"text/html; charset=iso-8859-15\", \"iso-8859-15\"},\n\t{\"HTTP-vs-meta-charset.html\", \"text/html; charset=iso-8859-15\", \"iso-8859-15\"},\n\t{\"UTF-8-BOM-vs-meta-content.html\", \"text/html\", \"utf-8\"},\n\t{\"UTF-8-BOM-vs-meta-charset.html\", \"text/html\", \"utf-8\"},\n}\n\nfunc TestSniff(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\": // platforms that don't permit direct file system access\n\t\tt.Skipf(\"not supported on %q\", runtime.GOOS)\n\t}\n\n\tfor _, tc := range sniffTestCases {\n\t\tcontent, err := ioutil.ReadFile(\"testdata/\" + tc.filename)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: error reading file: %v\", tc.filename, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t_, name, _ := DetermineEncoding(content, tc.declared)\n\t\tif name != tc.want {\n\t\t\tt.Errorf(\"%s: got %q, want %q\", tc.filename, name, tc.want)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc TestReader(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\": // platforms that don't permit direct file system access\n\t\tt.Skipf(\"not supported on %q\", runtime.GOOS)\n\t}\n\n\tfor _, tc := range sniffTestCases {\n\t\tcontent, err := ioutil.ReadFile(\"testdata/\" + tc.filename)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: error reading file: %v\", tc.filename, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tr, err := NewReader(bytes.NewReader(content), tc.declared)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: error creating reader: %v\", tc.filename, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tgot, err := ioutil.ReadAll(r)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: error reading from charset.NewReader: %v\", tc.filename, err)\n\t\t\tcontinue\n\t\t}\n\n\t\te, _ := Lookup(tc.want)\n\t\twant, err := ioutil.ReadAll(transform.NewReader(bytes.NewReader(content), e.NewDecoder()))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: error decoding with hard-coded charset name: %v\", tc.filename, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif !bytes.Equal(got, want) {\n\t\t\tt.Errorf(\"%s: got %q, want %q\", tc.filename, got, want)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nvar metaTestCases = []struct {\n\tmeta, want string\n}{\n\t{\"\", \"\"},\n\t{\"text/html\", \"\"},\n\t{\"text/html; charset utf-8\", \"\"},\n\t{\"text/html; charset=latin-2\", \"latin-2\"},\n\t{\"text/html; charset; charset = utf-8\", \"utf-8\"},\n\t{`charset=\"big5\"`, \"big5\"},\n\t{\"charset='shift_jis'\", \"shift_jis\"},\n}\n\nfunc TestFromMeta(t *testing.T) {\n\tfor _, tc := range metaTestCases {\n\t\tgot := fromMetaElement(tc.meta)\n\t\tif got != tc.want {\n\t\t\tt.Errorf(\"%q: got %q, want %q\", tc.meta, got, tc.want)\n\t\t}\n\t}\n}\n\nfunc TestXML(t *testing.T) {\n\tconst s = \"<?xml version=\\\"1.0\\\" encoding=\\\"windows-1252\\\"?><a><Word>r\\xe9sum\\xe9</Word></a>\"\n\n\td := xml.NewDecoder(strings.NewReader(s))\n\td.CharsetReader = NewReaderLabel\n\n\tvar a struct {\n\t\tWord string\n\t}\n\terr := d.Decode(&a)\n\tif err != nil {\n\t\tt.Fatalf(\"Decode: %v\", err)\n\t}\n\n\twant := \"résumé\"\n\tif a.Word != want {\n\t\tt.Errorf(\"got %q, want %q\", a.Word, want)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/charset/testdata/HTTP-charset.html",
    "content": "<!DOCTYPE html>\n<html  lang=\"en\" >\n<head>\n  <title>HTTP charset</title>\n<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>\n<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"./generatedtests.css\">\n<script src=\"http://w3c-test.org/resources/testharness.js\"></script>\n<script src=\"http://w3c-test.org/resources/testharnessreport.js\"></script>\n<meta name='flags' content='http'>\n<meta name=\"assert\" content=\"The character encoding of a page can be set using the HTTP header charset declaration.\">\n<style type='text/css'>\n.test div { width: 50px; }</style>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"the-input-byte-stream/support/encodingtests-15.css\">\n</head>\n<body>\n<p class='title'>HTTP charset</p>\n\n\n<div id='log'></div>\n\n\n<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>\n\n\n\n\n\n<div class='description'>\n<p class=\"assertion\" title=\"Assertion\">The character encoding of a page can be set using the HTTP header charset declaration.</p>\n<div class=\"notes\"><p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p><p>The only character encoding declaration for this HTML file is in the HTTP header, which sets the encoding to ISO 8859-15.</p></p>\n</div>\n</div>\n<div class=\"nexttest\"><div><a href=\"generate?test=the-input-byte-stream-003\">Next test</a></div><div class=\"doctype\">HTML5</div>\n<p class=\"jump\">the-input-byte-stream-001<br /><a href=\"/International/tests/html5/the-input-byte-stream/results-basics#basics\" target=\"_blank\">Result summary &amp; related tests</a><br /><a href=\"http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-001\" target=\"_blank\">Detailed results for this test</a><br/>\t<a href=\"http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream\" target=\"_blank\">Link to spec</a></p>\n<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>\n\t\t\t\t<li>The test is read from a server that supports HTTP.</li></ul></div>\n</div>\n<script>\ntest(function() {\nassert_equals(document.getElementById('box').offsetWidth, 100);\n}, \" \");\n</script>\n\n</body>\n</html>\n\n\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html",
    "content": "﻿<!DOCTYPE html>\n<html  lang=\"en\" >\n<head>\n  <title>HTTP vs UTF-8 BOM</title>\n<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>\n<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"./generatedtests.css\">\n<script src=\"http://w3c-test.org/resources/testharness.js\"></script>\n<script src=\"http://w3c-test.org/resources/testharnessreport.js\"></script>\n<meta name='flags' content='http'>\n<meta name=\"assert\" content=\"A character encoding set in the HTTP header has lower precedence than the UTF-8 signature.\">\n<style type='text/css'>\n.test div { width: 50px; }</style>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"the-input-byte-stream/support/encodingtests-utf8.css\">\n</head>\n<body>\n<p class='title'>HTTP vs UTF-8 BOM</p>\n\n\n<div id='log'></div>\n\n\n<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>\n\n\n\n\n\n<div class='description'>\n<p class=\"assertion\" title=\"Assertion\">A character encoding set in the HTTP header has lower precedence than the UTF-8 signature.</p>\n<div class=\"notes\"><p><p>The HTTP header attempts to set the character encoding to ISO 8859-15. The page starts with a UTF-8 signature.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00FD;&#x00E4;&#x00E8;</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p><p>If the test is unsuccessful, the characters &#x00EF;&#x00BB;&#x00BF; should appear at the top of the page.  These represent the bytes that make up the UTF-8 signature when encountered in the ISO 8859-15 encoding.</p></p>\n</div>\n</div>\n<div class=\"nexttest\"><div><a href=\"generate?test=the-input-byte-stream-022\">Next test</a></div><div class=\"doctype\">HTML5</div>\n<p class=\"jump\">the-input-byte-stream-034<br /><a href=\"/International/tests/html5/the-input-byte-stream/results-basics#precedence\" target=\"_blank\">Result summary &amp; related tests</a><br /><a href=\"http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-034\" target=\"_blank\">Detailed results for this test</a><br/>\t<a href=\"http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream\" target=\"_blank\">Link to spec</a></p>\n<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>\n\t\t\t\t<li>The test is read from a server that supports HTTP.</li></ul></div>\n</div>\n<script>\ntest(function() {\nassert_equals(document.getElementById('box').offsetWidth, 100);\n}, \" \");\n</script>\n\n</body>\n</html>\n\n\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html",
    "content": "<!DOCTYPE html>\n<html  lang=\"en\" >\n<head>\n <meta charset=\"iso-8859-1\" > <title>HTTP vs meta charset</title>\n<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>\n<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"./generatedtests.css\">\n<script src=\"http://w3c-test.org/resources/testharness.js\"></script>\n<script src=\"http://w3c-test.org/resources/testharnessreport.js\"></script>\n<meta name='flags' content='http'>\n<meta name=\"assert\" content=\"The HTTP header has a higher precedence than an encoding declaration in a meta charset attribute.\">\n<style type='text/css'>\n.test div { width: 50px; }.test div { width: 90px; }\n</style>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"the-input-byte-stream/support/encodingtests-15.css\">\n</head>\n<body>\n<p class='title'>HTTP vs meta charset</p>\n\n\n<div id='log'></div>\n\n\n<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>\n\n\n\n\n\n<div class='description'>\n<p class=\"assertion\" title=\"Assertion\">The HTTP header has a higher precedence than an encoding declaration in a meta charset attribute.</p>\n<div class=\"notes\"><p><p>The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-1.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p></p>\n</div>\n</div>\n<div class=\"nexttest\"><div><a href=\"generate?test=the-input-byte-stream-037\">Next test</a></div><div class=\"doctype\">HTML5</div>\n<p class=\"jump\">the-input-byte-stream-018<br /><a href=\"/International/tests/html5/the-input-byte-stream/results-basics#precedence\" target=\"_blank\">Result summary &amp; related tests</a><br /><a href=\"http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-018\" target=\"_blank\">Detailed results for this test</a><br/>\t<a href=\"http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream\" target=\"_blank\">Link to spec</a></p>\n<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>\n\t\t\t\t<li>The test is read from a server that supports HTTP.</li></ul></div>\n</div>\n<script>\ntest(function() {\nassert_equals(document.getElementById('box').offsetWidth, 100);\n}, \" \");\n</script>\n\n</body>\n</html>\n\n\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html",
    "content": "<!DOCTYPE html>\n<html  lang=\"en\" >\n<head>\n <meta http-equiv=\"content-type\" content=\"text/html;charset=iso-8859-1\" > <title>HTTP vs meta content</title>\n<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>\n<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"./generatedtests.css\">\n<script src=\"http://w3c-test.org/resources/testharness.js\"></script>\n<script src=\"http://w3c-test.org/resources/testharnessreport.js\"></script>\n<meta name='flags' content='http'>\n<meta name=\"assert\" content=\"The HTTP header has a higher precedence than an encoding declaration in a meta content attribute.\">\n<style type='text/css'>\n.test div { width: 50px; }.test div { width: 90px; }\n</style>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"the-input-byte-stream/support/encodingtests-15.css\">\n</head>\n<body>\n<p class='title'>HTTP vs meta content</p>\n\n\n<div id='log'></div>\n\n\n<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>\n\n\n\n\n\n<div class='description'>\n<p class=\"assertion\" title=\"Assertion\">The HTTP header has a higher precedence than an encoding declaration in a meta content attribute.</p>\n<div class=\"notes\"><p><p>The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-1.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p></p>\n</div>\n</div>\n<div class=\"nexttest\"><div><a href=\"generate?test=the-input-byte-stream-018\">Next test</a></div><div class=\"doctype\">HTML5</div>\n<p class=\"jump\">the-input-byte-stream-016<br /><a href=\"/International/tests/html5/the-input-byte-stream/results-basics#precedence\" target=\"_blank\">Result summary &amp; related tests</a><br /><a href=\"http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-016\" target=\"_blank\">Detailed results for this test</a><br/>\t<a href=\"http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream\" target=\"_blank\">Link to spec</a></p>\n<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>\n\t\t\t\t<li>The test is read from a server that supports HTTP.</li></ul></div>\n</div>\n<script>\ntest(function() {\nassert_equals(document.getElementById('box').offsetWidth, 100);\n}, \" \");\n</script>\n\n</body>\n</html>\n\n\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/charset/testdata/No-encoding-declaration.html",
    "content": "<!DOCTYPE html>\n<html  lang=\"en\" >\n<head>\n  <title>No encoding declaration</title>\n<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>\n<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"./generatedtests.css\">\n<script src=\"http://w3c-test.org/resources/testharness.js\"></script>\n<script src=\"http://w3c-test.org/resources/testharnessreport.js\"></script>\n<meta name='flags' content='http'>\n<meta name=\"assert\" content=\"A page with no encoding information in HTTP, BOM, XML declaration or meta element will be treated as UTF-8.\">\n<style type='text/css'>\n.test div { width: 50px; }</style>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"the-input-byte-stream/support/encodingtests-utf8.css\">\n</head>\n<body>\n<p class='title'>No encoding declaration</p>\n\n\n<div id='log'></div>\n\n\n<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>\n\n\n\n\n\n<div class='description'>\n<p class=\"assertion\" title=\"Assertion\">A page with no encoding information in HTTP, BOM, XML declaration or meta element will be treated as UTF-8.</p>\n<div class=\"notes\"><p><p>The test on this page contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00FD;&#x00E4;&#x00E8;</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p></p>\n</div>\n</div>\n<div class=\"nexttest\"><div><a href=\"generate?test=the-input-byte-stream-034\">Next test</a></div><div class=\"doctype\">HTML5</div>\n<p class=\"jump\">the-input-byte-stream-015<br /><a href=\"/International/tests/html5/the-input-byte-stream/results-basics#basics\" target=\"_blank\">Result summary &amp; related tests</a><br /><a href=\"http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-015\" target=\"_blank\">Detailed results for this test</a><br/>\t<a href=\"http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream\" target=\"_blank\">Link to spec</a></p>\n<div class='prereq'>Assumptions: <ul><li>The test is read from a server that supports HTTP.</li></ul></div>\n</div>\n<script>\ntest(function() {\nassert_equals(document.getElementById('box').offsetWidth, 100);\n}, \" \");\n</script>\n\n</body>\n</html>\n\n\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/charset/testdata/README",
    "content": "These test cases come from\nhttp://www.w3.org/International/tests/repository/html5/the-input-byte-stream/results-basics\n\nDistributed under both the W3C Test Suite License\n(http://www.w3.org/Consortium/Legal/2008/04-testsuite-license)\nand the W3C 3-clause BSD License\n(http://www.w3.org/Consortium/Legal/2008/03-bsd-license).\nTo contribute to a W3C Test Suite, see the policies and contribution\nforms (http://www.w3.org/2004/10/27-testcases).\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-charset.html",
    "content": "﻿<!DOCTYPE html>\n<html  lang=\"en\" >\n<head>\n <meta charset=\"iso-8859-15\"> <title>UTF-8 BOM vs meta charset</title>\n<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>\n<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"./generatedtests.css\">\n<script src=\"http://w3c-test.org/resources/testharness.js\"></script>\n<script src=\"http://w3c-test.org/resources/testharnessreport.js\"></script>\n<meta name='flags' content='http'>\n<meta name=\"assert\" content=\"A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta charset attribute declares a different encoding.\">\n<style type='text/css'>\n.test div { width: 50px; }.test div { width: 90px; }\n</style>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"the-input-byte-stream/support/encodingtests-utf8.css\">\n</head>\n<body>\n<p class='title'>UTF-8 BOM vs meta charset</p>\n\n\n<div id='log'></div>\n\n\n<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>\n\n\n\n\n\n<div class='description'>\n<p class=\"assertion\" title=\"Assertion\">A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta charset attribute declares a different encoding.</p>\n<div class=\"notes\"><p><p>The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00FD;&#x00E4;&#x00E8;</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p></p>\n</div>\n</div>\n<div class=\"nexttest\"><div><a href=\"generate?test=the-input-byte-stream-024\">Next test</a></div><div class=\"doctype\">HTML5</div>\n<p class=\"jump\">the-input-byte-stream-038<br /><a href=\"/International/tests/html5/the-input-byte-stream/results-basics#precedence\" target=\"_blank\">Result summary &amp; related tests</a><br /><a href=\"http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-038\" target=\"_blank\">Detailed results for this test</a><br/>\t<a href=\"http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream\" target=\"_blank\">Link to spec</a></p>\n<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>\n\t\t\t\t<li>The test is read from a server that supports HTTP.</li></ul></div>\n</div>\n<script>\ntest(function() {\nassert_equals(document.getElementById('box').offsetWidth, 100);\n}, \" \");\n</script>\n\n</body>\n</html>\n\n\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/charset/testdata/UTF-8-BOM-vs-meta-content.html",
    "content": "﻿<!DOCTYPE html>\n<html  lang=\"en\" >\n<head>\n <meta http-equiv=\"content-type\" content=\"text/html; charset=iso-8859-15\"> <title>UTF-8 BOM vs meta content</title>\n<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>\n<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"./generatedtests.css\">\n<script src=\"http://w3c-test.org/resources/testharness.js\"></script>\n<script src=\"http://w3c-test.org/resources/testharnessreport.js\"></script>\n<meta name='flags' content='http'>\n<meta name=\"assert\" content=\"A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta content attribute declares a different encoding.\">\n<style type='text/css'>\n.test div { width: 50px; }</style>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"the-input-byte-stream/support/encodingtests-utf8.css\">\n</head>\n<body>\n<p class='title'>UTF-8 BOM vs meta content</p>\n\n\n<div id='log'></div>\n\n\n<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>\n\n\n\n\n\n<div class='description'>\n<p class=\"assertion\" title=\"Assertion\">A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta content attribute declares a different encoding.</p>\n<div class=\"notes\"><p><p>The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00FD;&#x00E4;&#x00E8;</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p></p>\n</div>\n</div>\n<div class=\"nexttest\"><div><a href=\"generate?test=the-input-byte-stream-038\">Next test</a></div><div class=\"doctype\">HTML5</div>\n<p class=\"jump\">the-input-byte-stream-037<br /><a href=\"/International/tests/html5/the-input-byte-stream/results-basics#precedence\" target=\"_blank\">Result summary &amp; related tests</a><br /><a href=\"http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-037\" target=\"_blank\">Detailed results for this test</a><br/>\t<a href=\"http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream\" target=\"_blank\">Link to spec</a></p>\n<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>\n\t\t\t\t<li>The test is read from a server that supports HTTP.</li></ul></div>\n</div>\n<script>\ntest(function() {\nassert_equals(document.getElementById('box').offsetWidth, 100);\n}, \" \");\n</script>\n\n</body>\n</html>\n\n\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html",
    "content": "<!DOCTYPE html>\n<html  lang=\"en\" >\n<head>\n <meta charset=\"iso-8859-15\"> <title>meta charset attribute</title>\n<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>\n<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"./generatedtests.css\">\n<script src=\"http://w3c-test.org/resources/testharness.js\"></script>\n<script src=\"http://w3c-test.org/resources/testharnessreport.js\"></script>\n<meta name='flags' content='http'>\n<meta name=\"assert\" content=\"The character encoding of the page can be set by a meta element with charset attribute.\">\n<style type='text/css'>\n.test div { width: 50px; }</style>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"the-input-byte-stream/support/encodingtests-15.css\">\n</head>\n<body>\n<p class='title'>meta charset attribute</p>\n\n\n<div id='log'></div>\n\n\n<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>\n\n\n\n\n\n<div class='description'>\n<p class=\"assertion\" title=\"Assertion\">The character encoding of the page can be set by a meta element with charset attribute.</p>\n<div class=\"notes\"><p><p>The only character encoding declaration for this HTML file is in the charset attribute of the meta element, which declares the encoding to be ISO 8859-15.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p></p>\n</div>\n</div>\n<div class=\"nexttest\"><div><a href=\"generate?test=the-input-byte-stream-015\">Next test</a></div><div class=\"doctype\">HTML5</div>\n<p class=\"jump\">the-input-byte-stream-009<br /><a href=\"/International/tests/html5/the-input-byte-stream/results-basics#basics\" target=\"_blank\">Result summary &amp; related tests</a><br /><a href=\"http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-009\" target=\"_blank\">Detailed results for this test</a><br/>\t<a href=\"http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream\" target=\"_blank\">Link to spec</a></p>\n<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>\n\t\t\t\t<li>The test is read from a server that supports HTTP.</li></ul></div>\n</div>\n<script>\ntest(function() {\nassert_equals(document.getElementById('box').offsetWidth, 100);\n}, \" \");\n</script>\n\n</body>\n</html>\n\n\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/charset/testdata/meta-content-attribute.html",
    "content": "<!DOCTYPE html>\n<html  lang=\"en\" >\n<head>\n <meta http-equiv=\"content-type\" content=\"text/html; charset=iso-8859-15\"> <title>meta content attribute</title>\n<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>\n<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"./generatedtests.css\">\n<script src=\"http://w3c-test.org/resources/testharness.js\"></script>\n<script src=\"http://w3c-test.org/resources/testharnessreport.js\"></script>\n<meta name='flags' content='http'>\n<meta name=\"assert\" content=\"The character encoding of the page can be set by a meta element with http-equiv and content attributes.\">\n<style type='text/css'>\n.test div { width: 50px; }</style>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"the-input-byte-stream/support/encodingtests-15.css\">\n</head>\n<body>\n<p class='title'>meta content attribute</p>\n\n\n<div id='log'></div>\n\n\n<div class='test'><div id='box' class='ýäè'>&#xA0;</div></div>\n\n\n\n\n\n<div class='description'>\n<p class=\"assertion\" title=\"Assertion\">The character encoding of the page can be set by a meta element with http-equiv and content attributes.</p>\n<div class=\"notes\"><p><p>The only character encoding declaration for this HTML file is in the content attribute of the meta element, which declares the encoding to be ISO 8859-15.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.&#x00C3;&#x0153;&#x00C3;&#x20AC;&#x00C3;&#x0161;</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p></p>\n</div>\n</div>\n<div class=\"nexttest\"><div><a href=\"generate?test=the-input-byte-stream-009\">Next test</a></div><div class=\"doctype\">HTML5</div>\n<p class=\"jump\">the-input-byte-stream-007<br /><a href=\"/International/tests/html5/the-input-byte-stream/results-basics#basics\" target=\"_blank\">Result summary &amp; related tests</a><br /><a href=\"http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-007\" target=\"_blank\">Detailed results for this test</a><br/>\t<a href=\"http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream\" target=\"_blank\">Link to spec</a></p>\n<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>\n\t\t\t\t<li>The test is read from a server that supports HTTP.</li></ul></div>\n</div>\n<script>\ntest(function() {\nassert_equals(document.getElementById('box').offsetWidth, 100);\n}, \" \");\n</script>\n\n</body>\n</html>\n\n\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/const.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage html\n\n// Section 12.2.3.2 of the HTML5 specification says \"The following elements\n// have varying levels of special parsing rules\".\n// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements\nvar isSpecialElementMap = map[string]bool{\n\t\"address\":    true,\n\t\"applet\":     true,\n\t\"area\":       true,\n\t\"article\":    true,\n\t\"aside\":      true,\n\t\"base\":       true,\n\t\"basefont\":   true,\n\t\"bgsound\":    true,\n\t\"blockquote\": true,\n\t\"body\":       true,\n\t\"br\":         true,\n\t\"button\":     true,\n\t\"caption\":    true,\n\t\"center\":     true,\n\t\"col\":        true,\n\t\"colgroup\":   true,\n\t\"dd\":         true,\n\t\"details\":    true,\n\t\"dir\":        true,\n\t\"div\":        true,\n\t\"dl\":         true,\n\t\"dt\":         true,\n\t\"embed\":      true,\n\t\"fieldset\":   true,\n\t\"figcaption\": true,\n\t\"figure\":     true,\n\t\"footer\":     true,\n\t\"form\":       true,\n\t\"frame\":      true,\n\t\"frameset\":   true,\n\t\"h1\":         true,\n\t\"h2\":         true,\n\t\"h3\":         true,\n\t\"h4\":         true,\n\t\"h5\":         true,\n\t\"h6\":         true,\n\t\"head\":       true,\n\t\"header\":     true,\n\t\"hgroup\":     true,\n\t\"hr\":         true,\n\t\"html\":       true,\n\t\"iframe\":     true,\n\t\"img\":        true,\n\t\"input\":      true,\n\t\"isindex\":    true, // The 'isindex' element has been removed, but keep it for backwards compatibility.\n\t\"keygen\":     true,\n\t\"li\":         true,\n\t\"link\":       true,\n\t\"listing\":    true,\n\t\"main\":       true,\n\t\"marquee\":    true,\n\t\"menu\":       true,\n\t\"meta\":       true,\n\t\"nav\":        true,\n\t\"noembed\":    true,\n\t\"noframes\":   true,\n\t\"noscript\":   true,\n\t\"object\":     true,\n\t\"ol\":         true,\n\t\"p\":          true,\n\t\"param\":      true,\n\t\"plaintext\":  true,\n\t\"pre\":        true,\n\t\"script\":     true,\n\t\"section\":    true,\n\t\"select\":     true,\n\t\"source\":     true,\n\t\"style\":      true,\n\t\"summary\":    true,\n\t\"table\":      true,\n\t\"tbody\":      true,\n\t\"td\":         true,\n\t\"template\":   true,\n\t\"textarea\":   true,\n\t\"tfoot\":      true,\n\t\"th\":         true,\n\t\"thead\":      true,\n\t\"title\":      true,\n\t\"tr\":         true,\n\t\"track\":      true,\n\t\"ul\":         true,\n\t\"wbr\":        true,\n\t\"xmp\":        true,\n}\n\nfunc isSpecialElement(element *Node) bool {\n\tswitch element.Namespace {\n\tcase \"\", \"html\":\n\t\treturn isSpecialElementMap[element.Data]\n\tcase \"svg\":\n\t\treturn element.Data == \"foreignObject\"\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/doc.go",
    "content": "// Copyright 2010 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n/*\nPackage html implements an HTML5-compliant tokenizer and parser.\n\nTokenization is done by creating a Tokenizer for an io.Reader r. It is the\ncaller's responsibility to ensure that r provides UTF-8 encoded HTML.\n\n\tz := html.NewTokenizer(r)\n\nGiven a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(),\nwhich parses the next token and returns its type, or an error:\n\n\tfor {\n\t\ttt := z.Next()\n\t\tif tt == html.ErrorToken {\n\t\t\t// ...\n\t\t\treturn ...\n\t\t}\n\t\t// Process the current token.\n\t}\n\nThere are two APIs for retrieving the current token. The high-level API is to\ncall Token; the low-level API is to call Text or TagName / TagAttr. Both APIs\nallow optionally calling Raw after Next but before Token, Text, TagName, or\nTagAttr. In EBNF notation, the valid call sequence per token is:\n\n\tNext {Raw} [ Token | Text | TagName {TagAttr} ]\n\nToken returns an independent data structure that completely describes a token.\nEntities (such as \"&lt;\") are unescaped, tag names and attribute keys are\nlower-cased, and attributes are collected into a []Attribute. For example:\n\n\tfor {\n\t\tif z.Next() == html.ErrorToken {\n\t\t\t// Returning io.EOF indicates success.\n\t\t\treturn z.Err()\n\t\t}\n\t\temitToken(z.Token())\n\t}\n\nThe low-level API performs fewer allocations and copies, but the contents of\nthe []byte values returned by Text, TagName and TagAttr may change on the next\ncall to Next. For example, to extract an HTML page's anchor text:\n\n\tdepth := 0\n\tfor {\n\t\ttt := z.Next()\n\t\tswitch tt {\n\t\tcase ErrorToken:\n\t\t\treturn z.Err()\n\t\tcase TextToken:\n\t\t\tif depth > 0 {\n\t\t\t\t// emitBytes should copy the []byte it receives,\n\t\t\t\t// if it doesn't process it immediately.\n\t\t\t\temitBytes(z.Text())\n\t\t\t}\n\t\tcase StartTagToken, EndTagToken:\n\t\t\ttn, _ := z.TagName()\n\t\t\tif len(tn) == 1 && tn[0] == 'a' {\n\t\t\t\tif tt == StartTagToken {\n\t\t\t\t\tdepth++\n\t\t\t\t} else {\n\t\t\t\t\tdepth--\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\nParsing is done by calling Parse with an io.Reader, which returns the root of\nthe parse tree (the document element) as a *Node. It is the caller's\nresponsibility to ensure that the Reader provides UTF-8 encoded HTML. For\nexample, to process each anchor node in depth-first order:\n\n\tdoc, err := html.Parse(r)\n\tif err != nil {\n\t\t// ...\n\t}\n\tvar f func(*html.Node)\n\tf = func(n *html.Node) {\n\t\tif n.Type == html.ElementNode && n.Data == \"a\" {\n\t\t\t// Do something with n...\n\t\t}\n\t\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\t\tf(c)\n\t\t}\n\t}\n\tf(doc)\n\nThe relevant specifications include:\nhttps://html.spec.whatwg.org/multipage/syntax.html and\nhttps://html.spec.whatwg.org/multipage/syntax.html#tokenization\n*/\npackage html // import \"golang.org/x/net/html\"\n\n// The tokenization algorithm implemented by this package is not a line-by-line\n// transliteration of the relatively verbose state-machine in the WHATWG\n// specification. A more direct approach is used instead, where the program\n// counter implies the state, such as whether it is tokenizing a tag or a text\n// node. Specification compliance is verified by checking expected and actual\n// outputs over a test suite rather than aiming for algorithmic fidelity.\n\n// TODO(nigeltao): Does a DOM API belong in this package or a separate one?\n// TODO(nigeltao): How does parsing interact with a JavaScript engine?\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/doctype.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage html\n\nimport (\n\t\"strings\"\n)\n\n// parseDoctype parses the data from a DoctypeToken into a name,\n// public identifier, and system identifier. It returns a Node whose Type\n// is DoctypeNode, whose Data is the name, and which has attributes\n// named \"system\" and \"public\" for the two identifiers if they were present.\n// quirks is whether the document should be parsed in \"quirks mode\".\nfunc parseDoctype(s string) (n *Node, quirks bool) {\n\tn = &Node{Type: DoctypeNode}\n\n\t// Find the name.\n\tspace := strings.IndexAny(s, whitespace)\n\tif space == -1 {\n\t\tspace = len(s)\n\t}\n\tn.Data = s[:space]\n\t// The comparison to \"html\" is case-sensitive.\n\tif n.Data != \"html\" {\n\t\tquirks = true\n\t}\n\tn.Data = strings.ToLower(n.Data)\n\ts = strings.TrimLeft(s[space:], whitespace)\n\n\tif len(s) < 6 {\n\t\t// It can't start with \"PUBLIC\" or \"SYSTEM\".\n\t\t// Ignore the rest of the string.\n\t\treturn n, quirks || s != \"\"\n\t}\n\n\tkey := strings.ToLower(s[:6])\n\ts = s[6:]\n\tfor key == \"public\" || key == \"system\" {\n\t\ts = strings.TrimLeft(s, whitespace)\n\t\tif s == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tquote := s[0]\n\t\tif quote != '\"' && quote != '\\'' {\n\t\t\tbreak\n\t\t}\n\t\ts = s[1:]\n\t\tq := strings.IndexRune(s, rune(quote))\n\t\tvar id string\n\t\tif q == -1 {\n\t\t\tid = s\n\t\t\ts = \"\"\n\t\t} else {\n\t\t\tid = s[:q]\n\t\t\ts = s[q+1:]\n\t\t}\n\t\tn.Attr = append(n.Attr, Attribute{Key: key, Val: id})\n\t\tif key == \"public\" {\n\t\t\tkey = \"system\"\n\t\t} else {\n\t\t\tkey = \"\"\n\t\t}\n\t}\n\n\tif key != \"\" || s != \"\" {\n\t\tquirks = true\n\t} else if len(n.Attr) > 0 {\n\t\tif n.Attr[0].Key == \"public\" {\n\t\t\tpublic := strings.ToLower(n.Attr[0].Val)\n\t\t\tswitch public {\n\t\t\tcase \"-//w3o//dtd w3 html strict 3.0//en//\", \"-/w3d/dtd html 4.0 transitional/en\", \"html\":\n\t\t\t\tquirks = true\n\t\t\tdefault:\n\t\t\t\tfor _, q := range quirkyIDs {\n\t\t\t\t\tif strings.HasPrefix(public, q) {\n\t\t\t\t\t\tquirks = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t// The following two public IDs only cause quirks mode if there is no system ID.\n\t\t\tif len(n.Attr) == 1 && (strings.HasPrefix(public, \"-//w3c//dtd html 4.01 frameset//\") ||\n\t\t\t\tstrings.HasPrefix(public, \"-//w3c//dtd html 4.01 transitional//\")) {\n\t\t\t\tquirks = true\n\t\t\t}\n\t\t}\n\t\tif lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == \"system\" &&\n\t\t\tstrings.ToLower(lastAttr.Val) == \"http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd\" {\n\t\t\tquirks = true\n\t\t}\n\t}\n\n\treturn n, quirks\n}\n\n// quirkyIDs is a list of public doctype identifiers that cause a document\n// to be interpreted in quirks mode. The identifiers should be in lower case.\nvar quirkyIDs = []string{\n\t\"+//silmaril//dtd html pro v0r11 19970101//\",\n\t\"-//advasoft ltd//dtd html 3.0 aswedit + extensions//\",\n\t\"-//as//dtd html 3.0 aswedit + extensions//\",\n\t\"-//ietf//dtd html 2.0 level 1//\",\n\t\"-//ietf//dtd html 2.0 level 2//\",\n\t\"-//ietf//dtd html 2.0 strict level 1//\",\n\t\"-//ietf//dtd html 2.0 strict level 2//\",\n\t\"-//ietf//dtd html 2.0 strict//\",\n\t\"-//ietf//dtd html 2.0//\",\n\t\"-//ietf//dtd html 2.1e//\",\n\t\"-//ietf//dtd html 3.0//\",\n\t\"-//ietf//dtd html 3.2 final//\",\n\t\"-//ietf//dtd html 3.2//\",\n\t\"-//ietf//dtd html 3//\",\n\t\"-//ietf//dtd html level 0//\",\n\t\"-//ietf//dtd html level 1//\",\n\t\"-//ietf//dtd html level 2//\",\n\t\"-//ietf//dtd html level 3//\",\n\t\"-//ietf//dtd html strict level 0//\",\n\t\"-//ietf//dtd html strict level 1//\",\n\t\"-//ietf//dtd html strict level 2//\",\n\t\"-//ietf//dtd html strict level 3//\",\n\t\"-//ietf//dtd html strict//\",\n\t\"-//ietf//dtd html//\",\n\t\"-//metrius//dtd metrius presentational//\",\n\t\"-//microsoft//dtd internet explorer 2.0 html strict//\",\n\t\"-//microsoft//dtd internet explorer 2.0 html//\",\n\t\"-//microsoft//dtd internet explorer 2.0 tables//\",\n\t\"-//microsoft//dtd internet explorer 3.0 html strict//\",\n\t\"-//microsoft//dtd internet explorer 3.0 html//\",\n\t\"-//microsoft//dtd internet explorer 3.0 tables//\",\n\t\"-//netscape comm. corp.//dtd html//\",\n\t\"-//netscape comm. corp.//dtd strict html//\",\n\t\"-//o'reilly and associates//dtd html 2.0//\",\n\t\"-//o'reilly and associates//dtd html extended 1.0//\",\n\t\"-//o'reilly and associates//dtd html extended relaxed 1.0//\",\n\t\"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//\",\n\t\"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//\",\n\t\"-//spyglass//dtd html 2.0 extended//\",\n\t\"-//sq//dtd html 2.0 hotmetal + extensions//\",\n\t\"-//sun microsystems corp.//dtd hotjava html//\",\n\t\"-//sun microsystems corp.//dtd hotjava strict html//\",\n\t\"-//w3c//dtd html 3 1995-03-24//\",\n\t\"-//w3c//dtd html 3.2 draft//\",\n\t\"-//w3c//dtd html 3.2 final//\",\n\t\"-//w3c//dtd html 3.2//\",\n\t\"-//w3c//dtd html 3.2s draft//\",\n\t\"-//w3c//dtd html 4.0 frameset//\",\n\t\"-//w3c//dtd html 4.0 transitional//\",\n\t\"-//w3c//dtd html experimental 19960712//\",\n\t\"-//w3c//dtd html experimental 970421//\",\n\t\"-//w3c//dtd w3 html//\",\n\t\"-//w3o//dtd w3 html 3.0//\",\n\t\"-//webtechs//dtd mozilla html 2.0//\",\n\t\"-//webtechs//dtd mozilla html//\",\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/entity.go",
    "content": "// Copyright 2010 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage html\n\n// All entities that do not end with ';' are 6 or fewer bytes long.\nconst longestEntityWithoutSemicolon = 6\n\n// entity is a map from HTML entity names to their values. The semicolon matters:\n// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references\n// lists both \"amp\" and \"amp;\" as two separate entries.\n//\n// Note that the HTML5 list is larger than the HTML4 list at\n// http://www.w3.org/TR/html4/sgml/entities.html\nvar entity = map[string]rune{\n\t\"AElig;\":                           '\\U000000C6',\n\t\"AMP;\":                             '\\U00000026',\n\t\"Aacute;\":                          '\\U000000C1',\n\t\"Abreve;\":                          '\\U00000102',\n\t\"Acirc;\":                           '\\U000000C2',\n\t\"Acy;\":                             '\\U00000410',\n\t\"Afr;\":                             '\\U0001D504',\n\t\"Agrave;\":                          '\\U000000C0',\n\t\"Alpha;\":                           '\\U00000391',\n\t\"Amacr;\":                           '\\U00000100',\n\t\"And;\":                             '\\U00002A53',\n\t\"Aogon;\":                           '\\U00000104',\n\t\"Aopf;\":                            '\\U0001D538',\n\t\"ApplyFunction;\":                   '\\U00002061',\n\t\"Aring;\":                           '\\U000000C5',\n\t\"Ascr;\":                            '\\U0001D49C',\n\t\"Assign;\":                          '\\U00002254',\n\t\"Atilde;\":                          '\\U000000C3',\n\t\"Auml;\":                            '\\U000000C4',\n\t\"Backslash;\":                       '\\U00002216',\n\t\"Barv;\":                            '\\U00002AE7',\n\t\"Barwed;\":                          '\\U00002306',\n\t\"Bcy;\":                             '\\U00000411',\n\t\"Because;\":                         '\\U00002235',\n\t\"Bernoullis;\":                      '\\U0000212C',\n\t\"Beta;\":                            '\\U00000392',\n\t\"Bfr;\":                             '\\U0001D505',\n\t\"Bopf;\":                            '\\U0001D539',\n\t\"Breve;\":                           '\\U000002D8',\n\t\"Bscr;\":                            '\\U0000212C',\n\t\"Bumpeq;\":                          '\\U0000224E',\n\t\"CHcy;\":                            '\\U00000427',\n\t\"COPY;\":                            '\\U000000A9',\n\t\"Cacute;\":                          '\\U00000106',\n\t\"Cap;\":                             '\\U000022D2',\n\t\"CapitalDifferentialD;\":            '\\U00002145',\n\t\"Cayleys;\":                         '\\U0000212D',\n\t\"Ccaron;\":                          '\\U0000010C',\n\t\"Ccedil;\":                          '\\U000000C7',\n\t\"Ccirc;\":                           '\\U00000108',\n\t\"Cconint;\":                         '\\U00002230',\n\t\"Cdot;\":                            '\\U0000010A',\n\t\"Cedilla;\":                         '\\U000000B8',\n\t\"CenterDot;\":                       '\\U000000B7',\n\t\"Cfr;\":                             '\\U0000212D',\n\t\"Chi;\":                             '\\U000003A7',\n\t\"CircleDot;\":                       '\\U00002299',\n\t\"CircleMinus;\":                     '\\U00002296',\n\t\"CirclePlus;\":                      '\\U00002295',\n\t\"CircleTimes;\":                     '\\U00002297',\n\t\"ClockwiseContourIntegral;\":        '\\U00002232',\n\t\"CloseCurlyDoubleQuote;\":           '\\U0000201D',\n\t\"CloseCurlyQuote;\":                 '\\U00002019',\n\t\"Colon;\":                           '\\U00002237',\n\t\"Colone;\":                          '\\U00002A74',\n\t\"Congruent;\":                       '\\U00002261',\n\t\"Conint;\":                          '\\U0000222F',\n\t\"ContourIntegral;\":                 '\\U0000222E',\n\t\"Copf;\":                            '\\U00002102',\n\t\"Coproduct;\":                       '\\U00002210',\n\t\"CounterClockwiseContourIntegral;\": '\\U00002233',\n\t\"Cross;\":                    '\\U00002A2F',\n\t\"Cscr;\":                     '\\U0001D49E',\n\t\"Cup;\":                      '\\U000022D3',\n\t\"CupCap;\":                   '\\U0000224D',\n\t\"DD;\":                       '\\U00002145',\n\t\"DDotrahd;\":                 '\\U00002911',\n\t\"DJcy;\":                     '\\U00000402',\n\t\"DScy;\":                     '\\U00000405',\n\t\"DZcy;\":                     '\\U0000040F',\n\t\"Dagger;\":                   '\\U00002021',\n\t\"Darr;\":                     '\\U000021A1',\n\t\"Dashv;\":                    '\\U00002AE4',\n\t\"Dcaron;\":                   '\\U0000010E',\n\t\"Dcy;\":                      '\\U00000414',\n\t\"Del;\":                      '\\U00002207',\n\t\"Delta;\":                    '\\U00000394',\n\t\"Dfr;\":                      '\\U0001D507',\n\t\"DiacriticalAcute;\":         '\\U000000B4',\n\t\"DiacriticalDot;\":           '\\U000002D9',\n\t\"DiacriticalDoubleAcute;\":   '\\U000002DD',\n\t\"DiacriticalGrave;\":         '\\U00000060',\n\t\"DiacriticalTilde;\":         '\\U000002DC',\n\t\"Diamond;\":                  '\\U000022C4',\n\t\"DifferentialD;\":            '\\U00002146',\n\t\"Dopf;\":                     '\\U0001D53B',\n\t\"Dot;\":                      '\\U000000A8',\n\t\"DotDot;\":                   '\\U000020DC',\n\t\"DotEqual;\":                 '\\U00002250',\n\t\"DoubleContourIntegral;\":    '\\U0000222F',\n\t\"DoubleDot;\":                '\\U000000A8',\n\t\"DoubleDownArrow;\":          '\\U000021D3',\n\t\"DoubleLeftArrow;\":          '\\U000021D0',\n\t\"DoubleLeftRightArrow;\":     '\\U000021D4',\n\t\"DoubleLeftTee;\":            '\\U00002AE4',\n\t\"DoubleLongLeftArrow;\":      '\\U000027F8',\n\t\"DoubleLongLeftRightArrow;\": '\\U000027FA',\n\t\"DoubleLongRightArrow;\":     '\\U000027F9',\n\t\"DoubleRightArrow;\":         '\\U000021D2',\n\t\"DoubleRightTee;\":           '\\U000022A8',\n\t\"DoubleUpArrow;\":            '\\U000021D1',\n\t\"DoubleUpDownArrow;\":        '\\U000021D5',\n\t\"DoubleVerticalBar;\":        '\\U00002225',\n\t\"DownArrow;\":                '\\U00002193',\n\t\"DownArrowBar;\":             '\\U00002913',\n\t\"DownArrowUpArrow;\":         '\\U000021F5',\n\t\"DownBreve;\":                '\\U00000311',\n\t\"DownLeftRightVector;\":      '\\U00002950',\n\t\"DownLeftTeeVector;\":        '\\U0000295E',\n\t\"DownLeftVector;\":           '\\U000021BD',\n\t\"DownLeftVectorBar;\":        '\\U00002956',\n\t\"DownRightTeeVector;\":       '\\U0000295F',\n\t\"DownRightVector;\":          '\\U000021C1',\n\t\"DownRightVectorBar;\":       '\\U00002957',\n\t\"DownTee;\":                  '\\U000022A4',\n\t\"DownTeeArrow;\":             '\\U000021A7',\n\t\"Downarrow;\":                '\\U000021D3',\n\t\"Dscr;\":                     '\\U0001D49F',\n\t\"Dstrok;\":                   '\\U00000110',\n\t\"ENG;\":                      '\\U0000014A',\n\t\"ETH;\":                      '\\U000000D0',\n\t\"Eacute;\":                   '\\U000000C9',\n\t\"Ecaron;\":                   '\\U0000011A',\n\t\"Ecirc;\":                    '\\U000000CA',\n\t\"Ecy;\":                      '\\U0000042D',\n\t\"Edot;\":                     '\\U00000116',\n\t\"Efr;\":                      '\\U0001D508',\n\t\"Egrave;\":                   '\\U000000C8',\n\t\"Element;\":                  '\\U00002208',\n\t\"Emacr;\":                    '\\U00000112',\n\t\"EmptySmallSquare;\":         '\\U000025FB',\n\t\"EmptyVerySmallSquare;\":     '\\U000025AB',\n\t\"Eogon;\":                    '\\U00000118',\n\t\"Eopf;\":                     '\\U0001D53C',\n\t\"Epsilon;\":                  '\\U00000395',\n\t\"Equal;\":                    '\\U00002A75',\n\t\"EqualTilde;\":               '\\U00002242',\n\t\"Equilibrium;\":              '\\U000021CC',\n\t\"Escr;\":                     '\\U00002130',\n\t\"Esim;\":                     '\\U00002A73',\n\t\"Eta;\":                      '\\U00000397',\n\t\"Euml;\":                     '\\U000000CB',\n\t\"Exists;\":                   '\\U00002203',\n\t\"ExponentialE;\":             '\\U00002147',\n\t\"Fcy;\":                      '\\U00000424',\n\t\"Ffr;\":                      '\\U0001D509',\n\t\"FilledSmallSquare;\":        '\\U000025FC',\n\t\"FilledVerySmallSquare;\":    '\\U000025AA',\n\t\"Fopf;\":                     '\\U0001D53D',\n\t\"ForAll;\":                   '\\U00002200',\n\t\"Fouriertrf;\":               '\\U00002131',\n\t\"Fscr;\":                     '\\U00002131',\n\t\"GJcy;\":                     '\\U00000403',\n\t\"GT;\":                       '\\U0000003E',\n\t\"Gamma;\":                    '\\U00000393',\n\t\"Gammad;\":                   '\\U000003DC',\n\t\"Gbreve;\":                   '\\U0000011E',\n\t\"Gcedil;\":                   '\\U00000122',\n\t\"Gcirc;\":                    '\\U0000011C',\n\t\"Gcy;\":                      '\\U00000413',\n\t\"Gdot;\":                     '\\U00000120',\n\t\"Gfr;\":                      '\\U0001D50A',\n\t\"Gg;\":                       '\\U000022D9',\n\t\"Gopf;\":                     '\\U0001D53E',\n\t\"GreaterEqual;\":             '\\U00002265',\n\t\"GreaterEqualLess;\":         '\\U000022DB',\n\t\"GreaterFullEqual;\":         '\\U00002267',\n\t\"GreaterGreater;\":           '\\U00002AA2',\n\t\"GreaterLess;\":              '\\U00002277',\n\t\"GreaterSlantEqual;\":        '\\U00002A7E',\n\t\"GreaterTilde;\":             '\\U00002273',\n\t\"Gscr;\":                     '\\U0001D4A2',\n\t\"Gt;\":                       '\\U0000226B',\n\t\"HARDcy;\":                   '\\U0000042A',\n\t\"Hacek;\":                    '\\U000002C7',\n\t\"Hat;\":                      '\\U0000005E',\n\t\"Hcirc;\":                    '\\U00000124',\n\t\"Hfr;\":                      '\\U0000210C',\n\t\"HilbertSpace;\":             '\\U0000210B',\n\t\"Hopf;\":                     '\\U0000210D',\n\t\"HorizontalLine;\":           '\\U00002500',\n\t\"Hscr;\":                     '\\U0000210B',\n\t\"Hstrok;\":                   '\\U00000126',\n\t\"HumpDownHump;\":             '\\U0000224E',\n\t\"HumpEqual;\":                '\\U0000224F',\n\t\"IEcy;\":                     '\\U00000415',\n\t\"IJlig;\":                    '\\U00000132',\n\t\"IOcy;\":                     '\\U00000401',\n\t\"Iacute;\":                   '\\U000000CD',\n\t\"Icirc;\":                    '\\U000000CE',\n\t\"Icy;\":                      '\\U00000418',\n\t\"Idot;\":                     '\\U00000130',\n\t\"Ifr;\":                      '\\U00002111',\n\t\"Igrave;\":                   '\\U000000CC',\n\t\"Im;\":                       '\\U00002111',\n\t\"Imacr;\":                    '\\U0000012A',\n\t\"ImaginaryI;\":               '\\U00002148',\n\t\"Implies;\":                  '\\U000021D2',\n\t\"Int;\":                      '\\U0000222C',\n\t\"Integral;\":                 '\\U0000222B',\n\t\"Intersection;\":             '\\U000022C2',\n\t\"InvisibleComma;\":           '\\U00002063',\n\t\"InvisibleTimes;\":           '\\U00002062',\n\t\"Iogon;\":                    '\\U0000012E',\n\t\"Iopf;\":                     '\\U0001D540',\n\t\"Iota;\":                     '\\U00000399',\n\t\"Iscr;\":                     '\\U00002110',\n\t\"Itilde;\":                   '\\U00000128',\n\t\"Iukcy;\":                    '\\U00000406',\n\t\"Iuml;\":                     '\\U000000CF',\n\t\"Jcirc;\":                    '\\U00000134',\n\t\"Jcy;\":                      '\\U00000419',\n\t\"Jfr;\":                      '\\U0001D50D',\n\t\"Jopf;\":                     '\\U0001D541',\n\t\"Jscr;\":                     '\\U0001D4A5',\n\t\"Jsercy;\":                   '\\U00000408',\n\t\"Jukcy;\":                    '\\U00000404',\n\t\"KHcy;\":                     '\\U00000425',\n\t\"KJcy;\":                     '\\U0000040C',\n\t\"Kappa;\":                    '\\U0000039A',\n\t\"Kcedil;\":                   '\\U00000136',\n\t\"Kcy;\":                      '\\U0000041A',\n\t\"Kfr;\":                      '\\U0001D50E',\n\t\"Kopf;\":                     '\\U0001D542',\n\t\"Kscr;\":                     '\\U0001D4A6',\n\t\"LJcy;\":                     '\\U00000409',\n\t\"LT;\":                       '\\U0000003C',\n\t\"Lacute;\":                   '\\U00000139',\n\t\"Lambda;\":                   '\\U0000039B',\n\t\"Lang;\":                     '\\U000027EA',\n\t\"Laplacetrf;\":               '\\U00002112',\n\t\"Larr;\":                     '\\U0000219E',\n\t\"Lcaron;\":                   '\\U0000013D',\n\t\"Lcedil;\":                   '\\U0000013B',\n\t\"Lcy;\":                      '\\U0000041B',\n\t\"LeftAngleBracket;\":         '\\U000027E8',\n\t\"LeftArrow;\":                '\\U00002190',\n\t\"LeftArrowBar;\":             '\\U000021E4',\n\t\"LeftArrowRightArrow;\":      '\\U000021C6',\n\t\"LeftCeiling;\":              '\\U00002308',\n\t\"LeftDoubleBracket;\":        '\\U000027E6',\n\t\"LeftDownTeeVector;\":        '\\U00002961',\n\t\"LeftDownVector;\":           '\\U000021C3',\n\t\"LeftDownVectorBar;\":        '\\U00002959',\n\t\"LeftFloor;\":                '\\U0000230A',\n\t\"LeftRightArrow;\":           '\\U00002194',\n\t\"LeftRightVector;\":          '\\U0000294E',\n\t\"LeftTee;\":                  '\\U000022A3',\n\t\"LeftTeeArrow;\":             '\\U000021A4',\n\t\"LeftTeeVector;\":            '\\U0000295A',\n\t\"LeftTriangle;\":             '\\U000022B2',\n\t\"LeftTriangleBar;\":          '\\U000029CF',\n\t\"LeftTriangleEqual;\":        '\\U000022B4',\n\t\"LeftUpDownVector;\":         '\\U00002951',\n\t\"LeftUpTeeVector;\":          '\\U00002960',\n\t\"LeftUpVector;\":             '\\U000021BF',\n\t\"LeftUpVectorBar;\":          '\\U00002958',\n\t\"LeftVector;\":               '\\U000021BC',\n\t\"LeftVectorBar;\":            '\\U00002952',\n\t\"Leftarrow;\":                '\\U000021D0',\n\t\"Leftrightarrow;\":           '\\U000021D4',\n\t\"LessEqualGreater;\":         '\\U000022DA',\n\t\"LessFullEqual;\":            '\\U00002266',\n\t\"LessGreater;\":              '\\U00002276',\n\t\"LessLess;\":                 '\\U00002AA1',\n\t\"LessSlantEqual;\":           '\\U00002A7D',\n\t\"LessTilde;\":                '\\U00002272',\n\t\"Lfr;\":                      '\\U0001D50F',\n\t\"Ll;\":                       '\\U000022D8',\n\t\"Lleftarrow;\":               '\\U000021DA',\n\t\"Lmidot;\":                   '\\U0000013F',\n\t\"LongLeftArrow;\":            '\\U000027F5',\n\t\"LongLeftRightArrow;\":       '\\U000027F7',\n\t\"LongRightArrow;\":           '\\U000027F6',\n\t\"Longleftarrow;\":            '\\U000027F8',\n\t\"Longleftrightarrow;\":       '\\U000027FA',\n\t\"Longrightarrow;\":           '\\U000027F9',\n\t\"Lopf;\":                     '\\U0001D543',\n\t\"LowerLeftArrow;\":           '\\U00002199',\n\t\"LowerRightArrow;\":          '\\U00002198',\n\t\"Lscr;\":                     '\\U00002112',\n\t\"Lsh;\":                      '\\U000021B0',\n\t\"Lstrok;\":                   '\\U00000141',\n\t\"Lt;\":                       '\\U0000226A',\n\t\"Map;\":                      '\\U00002905',\n\t\"Mcy;\":                      '\\U0000041C',\n\t\"MediumSpace;\":              '\\U0000205F',\n\t\"Mellintrf;\":                '\\U00002133',\n\t\"Mfr;\":                      '\\U0001D510',\n\t\"MinusPlus;\":                '\\U00002213',\n\t\"Mopf;\":                     '\\U0001D544',\n\t\"Mscr;\":                     '\\U00002133',\n\t\"Mu;\":                       '\\U0000039C',\n\t\"NJcy;\":                     '\\U0000040A',\n\t\"Nacute;\":                   '\\U00000143',\n\t\"Ncaron;\":                   '\\U00000147',\n\t\"Ncedil;\":                   '\\U00000145',\n\t\"Ncy;\":                      '\\U0000041D',\n\t\"NegativeMediumSpace;\":      '\\U0000200B',\n\t\"NegativeThickSpace;\":       '\\U0000200B',\n\t\"NegativeThinSpace;\":        '\\U0000200B',\n\t\"NegativeVeryThinSpace;\":    '\\U0000200B',\n\t\"NestedGreaterGreater;\":     '\\U0000226B',\n\t\"NestedLessLess;\":           '\\U0000226A',\n\t\"NewLine;\":                  '\\U0000000A',\n\t\"Nfr;\":                      '\\U0001D511',\n\t\"NoBreak;\":                  '\\U00002060',\n\t\"NonBreakingSpace;\":         '\\U000000A0',\n\t\"Nopf;\":                     '\\U00002115',\n\t\"Not;\":                      '\\U00002AEC',\n\t\"NotCongruent;\":             '\\U00002262',\n\t\"NotCupCap;\":                '\\U0000226D',\n\t\"NotDoubleVerticalBar;\":     '\\U00002226',\n\t\"NotElement;\":               '\\U00002209',\n\t\"NotEqual;\":                 '\\U00002260',\n\t\"NotExists;\":                '\\U00002204',\n\t\"NotGreater;\":               '\\U0000226F',\n\t\"NotGreaterEqual;\":          '\\U00002271',\n\t\"NotGreaterLess;\":           '\\U00002279',\n\t\"NotGreaterTilde;\":          '\\U00002275',\n\t\"NotLeftTriangle;\":          '\\U000022EA',\n\t\"NotLeftTriangleEqual;\":     '\\U000022EC',\n\t\"NotLess;\":                  '\\U0000226E',\n\t\"NotLessEqual;\":             '\\U00002270',\n\t\"NotLessGreater;\":           '\\U00002278',\n\t\"NotLessTilde;\":             '\\U00002274',\n\t\"NotPrecedes;\":              '\\U00002280',\n\t\"NotPrecedesSlantEqual;\":    '\\U000022E0',\n\t\"NotReverseElement;\":        '\\U0000220C',\n\t\"NotRightTriangle;\":         '\\U000022EB',\n\t\"NotRightTriangleEqual;\":    '\\U000022ED',\n\t\"NotSquareSubsetEqual;\":     '\\U000022E2',\n\t\"NotSquareSupersetEqual;\":   '\\U000022E3',\n\t\"NotSubsetEqual;\":           '\\U00002288',\n\t\"NotSucceeds;\":              '\\U00002281',\n\t\"NotSucceedsSlantEqual;\":    '\\U000022E1',\n\t\"NotSupersetEqual;\":         '\\U00002289',\n\t\"NotTilde;\":                 '\\U00002241',\n\t\"NotTildeEqual;\":            '\\U00002244',\n\t\"NotTildeFullEqual;\":        '\\U00002247',\n\t\"NotTildeTilde;\":            '\\U00002249',\n\t\"NotVerticalBar;\":           '\\U00002224',\n\t\"Nscr;\":                     '\\U0001D4A9',\n\t\"Ntilde;\":                   '\\U000000D1',\n\t\"Nu;\":                       '\\U0000039D',\n\t\"OElig;\":                    '\\U00000152',\n\t\"Oacute;\":                   '\\U000000D3',\n\t\"Ocirc;\":                    '\\U000000D4',\n\t\"Ocy;\":                      '\\U0000041E',\n\t\"Odblac;\":                   '\\U00000150',\n\t\"Ofr;\":                      '\\U0001D512',\n\t\"Ograve;\":                   '\\U000000D2',\n\t\"Omacr;\":                    '\\U0000014C',\n\t\"Omega;\":                    '\\U000003A9',\n\t\"Omicron;\":                  '\\U0000039F',\n\t\"Oopf;\":                     '\\U0001D546',\n\t\"OpenCurlyDoubleQuote;\":     '\\U0000201C',\n\t\"OpenCurlyQuote;\":           '\\U00002018',\n\t\"Or;\":                       '\\U00002A54',\n\t\"Oscr;\":                     '\\U0001D4AA',\n\t\"Oslash;\":                   '\\U000000D8',\n\t\"Otilde;\":                   '\\U000000D5',\n\t\"Otimes;\":                   '\\U00002A37',\n\t\"Ouml;\":                     '\\U000000D6',\n\t\"OverBar;\":                  '\\U0000203E',\n\t\"OverBrace;\":                '\\U000023DE',\n\t\"OverBracket;\":              '\\U000023B4',\n\t\"OverParenthesis;\":          '\\U000023DC',\n\t\"PartialD;\":                 '\\U00002202',\n\t\"Pcy;\":                      '\\U0000041F',\n\t\"Pfr;\":                      '\\U0001D513',\n\t\"Phi;\":                      '\\U000003A6',\n\t\"Pi;\":                       '\\U000003A0',\n\t\"PlusMinus;\":                '\\U000000B1',\n\t\"Poincareplane;\":            '\\U0000210C',\n\t\"Popf;\":                     '\\U00002119',\n\t\"Pr;\":                       '\\U00002ABB',\n\t\"Precedes;\":                 '\\U0000227A',\n\t\"PrecedesEqual;\":            '\\U00002AAF',\n\t\"PrecedesSlantEqual;\":       '\\U0000227C',\n\t\"PrecedesTilde;\":            '\\U0000227E',\n\t\"Prime;\":                    '\\U00002033',\n\t\"Product;\":                  '\\U0000220F',\n\t\"Proportion;\":               '\\U00002237',\n\t\"Proportional;\":             '\\U0000221D',\n\t\"Pscr;\":                     '\\U0001D4AB',\n\t\"Psi;\":                      '\\U000003A8',\n\t\"QUOT;\":                     '\\U00000022',\n\t\"Qfr;\":                      '\\U0001D514',\n\t\"Qopf;\":                     '\\U0000211A',\n\t\"Qscr;\":                     '\\U0001D4AC',\n\t\"RBarr;\":                    '\\U00002910',\n\t\"REG;\":                      '\\U000000AE',\n\t\"Racute;\":                   '\\U00000154',\n\t\"Rang;\":                     '\\U000027EB',\n\t\"Rarr;\":                     '\\U000021A0',\n\t\"Rarrtl;\":                   '\\U00002916',\n\t\"Rcaron;\":                   '\\U00000158',\n\t\"Rcedil;\":                   '\\U00000156',\n\t\"Rcy;\":                      '\\U00000420',\n\t\"Re;\":                       '\\U0000211C',\n\t\"ReverseElement;\":           '\\U0000220B',\n\t\"ReverseEquilibrium;\":       '\\U000021CB',\n\t\"ReverseUpEquilibrium;\":     '\\U0000296F',\n\t\"Rfr;\":                      '\\U0000211C',\n\t\"Rho;\":                      '\\U000003A1',\n\t\"RightAngleBracket;\":        '\\U000027E9',\n\t\"RightArrow;\":               '\\U00002192',\n\t\"RightArrowBar;\":            '\\U000021E5',\n\t\"RightArrowLeftArrow;\":      '\\U000021C4',\n\t\"RightCeiling;\":             '\\U00002309',\n\t\"RightDoubleBracket;\":       '\\U000027E7',\n\t\"RightDownTeeVector;\":       '\\U0000295D',\n\t\"RightDownVector;\":          '\\U000021C2',\n\t\"RightDownVectorBar;\":       '\\U00002955',\n\t\"RightFloor;\":               '\\U0000230B',\n\t\"RightTee;\":                 '\\U000022A2',\n\t\"RightTeeArrow;\":            '\\U000021A6',\n\t\"RightTeeVector;\":           '\\U0000295B',\n\t\"RightTriangle;\":            '\\U000022B3',\n\t\"RightTriangleBar;\":         '\\U000029D0',\n\t\"RightTriangleEqual;\":       '\\U000022B5',\n\t\"RightUpDownVector;\":        '\\U0000294F',\n\t\"RightUpTeeVector;\":         '\\U0000295C',\n\t\"RightUpVector;\":            '\\U000021BE',\n\t\"RightUpVectorBar;\":         '\\U00002954',\n\t\"RightVector;\":              '\\U000021C0',\n\t\"RightVectorBar;\":           '\\U00002953',\n\t\"Rightarrow;\":               '\\U000021D2',\n\t\"Ropf;\":                     '\\U0000211D',\n\t\"RoundImplies;\":             '\\U00002970',\n\t\"Rrightarrow;\":              '\\U000021DB',\n\t\"Rscr;\":                     '\\U0000211B',\n\t\"Rsh;\":                      '\\U000021B1',\n\t\"RuleDelayed;\":              '\\U000029F4',\n\t\"SHCHcy;\":                   '\\U00000429',\n\t\"SHcy;\":                     '\\U00000428',\n\t\"SOFTcy;\":                   '\\U0000042C',\n\t\"Sacute;\":                   '\\U0000015A',\n\t\"Sc;\":                       '\\U00002ABC',\n\t\"Scaron;\":                   '\\U00000160',\n\t\"Scedil;\":                   '\\U0000015E',\n\t\"Scirc;\":                    '\\U0000015C',\n\t\"Scy;\":                      '\\U00000421',\n\t\"Sfr;\":                      '\\U0001D516',\n\t\"ShortDownArrow;\":           '\\U00002193',\n\t\"ShortLeftArrow;\":           '\\U00002190',\n\t\"ShortRightArrow;\":          '\\U00002192',\n\t\"ShortUpArrow;\":             '\\U00002191',\n\t\"Sigma;\":                    '\\U000003A3',\n\t\"SmallCircle;\":              '\\U00002218',\n\t\"Sopf;\":                     '\\U0001D54A',\n\t\"Sqrt;\":                     '\\U0000221A',\n\t\"Square;\":                   '\\U000025A1',\n\t\"SquareIntersection;\":       '\\U00002293',\n\t\"SquareSubset;\":             '\\U0000228F',\n\t\"SquareSubsetEqual;\":        '\\U00002291',\n\t\"SquareSuperset;\":           '\\U00002290',\n\t\"SquareSupersetEqual;\":      '\\U00002292',\n\t\"SquareUnion;\":              '\\U00002294',\n\t\"Sscr;\":                     '\\U0001D4AE',\n\t\"Star;\":                     '\\U000022C6',\n\t\"Sub;\":                      '\\U000022D0',\n\t\"Subset;\":                   '\\U000022D0',\n\t\"SubsetEqual;\":              '\\U00002286',\n\t\"Succeeds;\":                 '\\U0000227B',\n\t\"SucceedsEqual;\":            '\\U00002AB0',\n\t\"SucceedsSlantEqual;\":       '\\U0000227D',\n\t\"SucceedsTilde;\":            '\\U0000227F',\n\t\"SuchThat;\":                 '\\U0000220B',\n\t\"Sum;\":                      '\\U00002211',\n\t\"Sup;\":                      '\\U000022D1',\n\t\"Superset;\":                 '\\U00002283',\n\t\"SupersetEqual;\":            '\\U00002287',\n\t\"Supset;\":                   '\\U000022D1',\n\t\"THORN;\":                    '\\U000000DE',\n\t\"TRADE;\":                    '\\U00002122',\n\t\"TSHcy;\":                    '\\U0000040B',\n\t\"TScy;\":                     '\\U00000426',\n\t\"Tab;\":                      '\\U00000009',\n\t\"Tau;\":                      '\\U000003A4',\n\t\"Tcaron;\":                   '\\U00000164',\n\t\"Tcedil;\":                   '\\U00000162',\n\t\"Tcy;\":                      '\\U00000422',\n\t\"Tfr;\":                      '\\U0001D517',\n\t\"Therefore;\":                '\\U00002234',\n\t\"Theta;\":                    '\\U00000398',\n\t\"ThinSpace;\":                '\\U00002009',\n\t\"Tilde;\":                    '\\U0000223C',\n\t\"TildeEqual;\":               '\\U00002243',\n\t\"TildeFullEqual;\":           '\\U00002245',\n\t\"TildeTilde;\":               '\\U00002248',\n\t\"Topf;\":                     '\\U0001D54B',\n\t\"TripleDot;\":                '\\U000020DB',\n\t\"Tscr;\":                     '\\U0001D4AF',\n\t\"Tstrok;\":                   '\\U00000166',\n\t\"Uacute;\":                   '\\U000000DA',\n\t\"Uarr;\":                     '\\U0000219F',\n\t\"Uarrocir;\":                 '\\U00002949',\n\t\"Ubrcy;\":                    '\\U0000040E',\n\t\"Ubreve;\":                   '\\U0000016C',\n\t\"Ucirc;\":                    '\\U000000DB',\n\t\"Ucy;\":                      '\\U00000423',\n\t\"Udblac;\":                   '\\U00000170',\n\t\"Ufr;\":                      '\\U0001D518',\n\t\"Ugrave;\":                   '\\U000000D9',\n\t\"Umacr;\":                    '\\U0000016A',\n\t\"UnderBar;\":                 '\\U0000005F',\n\t\"UnderBrace;\":               '\\U000023DF',\n\t\"UnderBracket;\":             '\\U000023B5',\n\t\"UnderParenthesis;\":         '\\U000023DD',\n\t\"Union;\":                    '\\U000022C3',\n\t\"UnionPlus;\":                '\\U0000228E',\n\t\"Uogon;\":                    '\\U00000172',\n\t\"Uopf;\":                     '\\U0001D54C',\n\t\"UpArrow;\":                  '\\U00002191',\n\t\"UpArrowBar;\":               '\\U00002912',\n\t\"UpArrowDownArrow;\":         '\\U000021C5',\n\t\"UpDownArrow;\":              '\\U00002195',\n\t\"UpEquilibrium;\":            '\\U0000296E',\n\t\"UpTee;\":                    '\\U000022A5',\n\t\"UpTeeArrow;\":               '\\U000021A5',\n\t\"Uparrow;\":                  '\\U000021D1',\n\t\"Updownarrow;\":              '\\U000021D5',\n\t\"UpperLeftArrow;\":           '\\U00002196',\n\t\"UpperRightArrow;\":          '\\U00002197',\n\t\"Upsi;\":                     '\\U000003D2',\n\t\"Upsilon;\":                  '\\U000003A5',\n\t\"Uring;\":                    '\\U0000016E',\n\t\"Uscr;\":                     '\\U0001D4B0',\n\t\"Utilde;\":                   '\\U00000168',\n\t\"Uuml;\":                     '\\U000000DC',\n\t\"VDash;\":                    '\\U000022AB',\n\t\"Vbar;\":                     '\\U00002AEB',\n\t\"Vcy;\":                      '\\U00000412',\n\t\"Vdash;\":                    '\\U000022A9',\n\t\"Vdashl;\":                   '\\U00002AE6',\n\t\"Vee;\":                      '\\U000022C1',\n\t\"Verbar;\":                   '\\U00002016',\n\t\"Vert;\":                     '\\U00002016',\n\t\"VerticalBar;\":              '\\U00002223',\n\t\"VerticalLine;\":             '\\U0000007C',\n\t\"VerticalSeparator;\":        '\\U00002758',\n\t\"VerticalTilde;\":            '\\U00002240',\n\t\"VeryThinSpace;\":            '\\U0000200A',\n\t\"Vfr;\":                      '\\U0001D519',\n\t\"Vopf;\":                     '\\U0001D54D',\n\t\"Vscr;\":                     '\\U0001D4B1',\n\t\"Vvdash;\":                   '\\U000022AA',\n\t\"Wcirc;\":                    '\\U00000174',\n\t\"Wedge;\":                    '\\U000022C0',\n\t\"Wfr;\":                      '\\U0001D51A',\n\t\"Wopf;\":                     '\\U0001D54E',\n\t\"Wscr;\":                     '\\U0001D4B2',\n\t\"Xfr;\":                      '\\U0001D51B',\n\t\"Xi;\":                       '\\U0000039E',\n\t\"Xopf;\":                     '\\U0001D54F',\n\t\"Xscr;\":                     '\\U0001D4B3',\n\t\"YAcy;\":                     '\\U0000042F',\n\t\"YIcy;\":                     '\\U00000407',\n\t\"YUcy;\":                     '\\U0000042E',\n\t\"Yacute;\":                   '\\U000000DD',\n\t\"Ycirc;\":                    '\\U00000176',\n\t\"Ycy;\":                      '\\U0000042B',\n\t\"Yfr;\":                      '\\U0001D51C',\n\t\"Yopf;\":                     '\\U0001D550',\n\t\"Yscr;\":                     '\\U0001D4B4',\n\t\"Yuml;\":                     '\\U00000178',\n\t\"ZHcy;\":                     '\\U00000416',\n\t\"Zacute;\":                   '\\U00000179',\n\t\"Zcaron;\":                   '\\U0000017D',\n\t\"Zcy;\":                      '\\U00000417',\n\t\"Zdot;\":                     '\\U0000017B',\n\t\"ZeroWidthSpace;\":           '\\U0000200B',\n\t\"Zeta;\":                     '\\U00000396',\n\t\"Zfr;\":                      '\\U00002128',\n\t\"Zopf;\":                     '\\U00002124',\n\t\"Zscr;\":                     '\\U0001D4B5',\n\t\"aacute;\":                   '\\U000000E1',\n\t\"abreve;\":                   '\\U00000103',\n\t\"ac;\":                       '\\U0000223E',\n\t\"acd;\":                      '\\U0000223F',\n\t\"acirc;\":                    '\\U000000E2',\n\t\"acute;\":                    '\\U000000B4',\n\t\"acy;\":                      '\\U00000430',\n\t\"aelig;\":                    '\\U000000E6',\n\t\"af;\":                       '\\U00002061',\n\t\"afr;\":                      '\\U0001D51E',\n\t\"agrave;\":                   '\\U000000E0',\n\t\"alefsym;\":                  '\\U00002135',\n\t\"aleph;\":                    '\\U00002135',\n\t\"alpha;\":                    '\\U000003B1',\n\t\"amacr;\":                    '\\U00000101',\n\t\"amalg;\":                    '\\U00002A3F',\n\t\"amp;\":                      '\\U00000026',\n\t\"and;\":                      '\\U00002227',\n\t\"andand;\":                   '\\U00002A55',\n\t\"andd;\":                     '\\U00002A5C',\n\t\"andslope;\":                 '\\U00002A58',\n\t\"andv;\":                     '\\U00002A5A',\n\t\"ang;\":                      '\\U00002220',\n\t\"ange;\":                     '\\U000029A4',\n\t\"angle;\":                    '\\U00002220',\n\t\"angmsd;\":                   '\\U00002221',\n\t\"angmsdaa;\":                 '\\U000029A8',\n\t\"angmsdab;\":                 '\\U000029A9',\n\t\"angmsdac;\":                 '\\U000029AA',\n\t\"angmsdad;\":                 '\\U000029AB',\n\t\"angmsdae;\":                 '\\U000029AC',\n\t\"angmsdaf;\":                 '\\U000029AD',\n\t\"angmsdag;\":                 '\\U000029AE',\n\t\"angmsdah;\":                 '\\U000029AF',\n\t\"angrt;\":                    '\\U0000221F',\n\t\"angrtvb;\":                  '\\U000022BE',\n\t\"angrtvbd;\":                 '\\U0000299D',\n\t\"angsph;\":                   '\\U00002222',\n\t\"angst;\":                    '\\U000000C5',\n\t\"angzarr;\":                  '\\U0000237C',\n\t\"aogon;\":                    '\\U00000105',\n\t\"aopf;\":                     '\\U0001D552',\n\t\"ap;\":                       '\\U00002248',\n\t\"apE;\":                      '\\U00002A70',\n\t\"apacir;\":                   '\\U00002A6F',\n\t\"ape;\":                      '\\U0000224A',\n\t\"apid;\":                     '\\U0000224B',\n\t\"apos;\":                     '\\U00000027',\n\t\"approx;\":                   '\\U00002248',\n\t\"approxeq;\":                 '\\U0000224A',\n\t\"aring;\":                    '\\U000000E5',\n\t\"ascr;\":                     '\\U0001D4B6',\n\t\"ast;\":                      '\\U0000002A',\n\t\"asymp;\":                    '\\U00002248',\n\t\"asympeq;\":                  '\\U0000224D',\n\t\"atilde;\":                   '\\U000000E3',\n\t\"auml;\":                     '\\U000000E4',\n\t\"awconint;\":                 '\\U00002233',\n\t\"awint;\":                    '\\U00002A11',\n\t\"bNot;\":                     '\\U00002AED',\n\t\"backcong;\":                 '\\U0000224C',\n\t\"backepsilon;\":              '\\U000003F6',\n\t\"backprime;\":                '\\U00002035',\n\t\"backsim;\":                  '\\U0000223D',\n\t\"backsimeq;\":                '\\U000022CD',\n\t\"barvee;\":                   '\\U000022BD',\n\t\"barwed;\":                   '\\U00002305',\n\t\"barwedge;\":                 '\\U00002305',\n\t\"bbrk;\":                     '\\U000023B5',\n\t\"bbrktbrk;\":                 '\\U000023B6',\n\t\"bcong;\":                    '\\U0000224C',\n\t\"bcy;\":                      '\\U00000431',\n\t\"bdquo;\":                    '\\U0000201E',\n\t\"becaus;\":                   '\\U00002235',\n\t\"because;\":                  '\\U00002235',\n\t\"bemptyv;\":                  '\\U000029B0',\n\t\"bepsi;\":                    '\\U000003F6',\n\t\"bernou;\":                   '\\U0000212C',\n\t\"beta;\":                     '\\U000003B2',\n\t\"beth;\":                     '\\U00002136',\n\t\"between;\":                  '\\U0000226C',\n\t\"bfr;\":                      '\\U0001D51F',\n\t\"bigcap;\":                   '\\U000022C2',\n\t\"bigcirc;\":                  '\\U000025EF',\n\t\"bigcup;\":                   '\\U000022C3',\n\t\"bigodot;\":                  '\\U00002A00',\n\t\"bigoplus;\":                 '\\U00002A01',\n\t\"bigotimes;\":                '\\U00002A02',\n\t\"bigsqcup;\":                 '\\U00002A06',\n\t\"bigstar;\":                  '\\U00002605',\n\t\"bigtriangledown;\":          '\\U000025BD',\n\t\"bigtriangleup;\":            '\\U000025B3',\n\t\"biguplus;\":                 '\\U00002A04',\n\t\"bigvee;\":                   '\\U000022C1',\n\t\"bigwedge;\":                 '\\U000022C0',\n\t\"bkarow;\":                   '\\U0000290D',\n\t\"blacklozenge;\":             '\\U000029EB',\n\t\"blacksquare;\":              '\\U000025AA',\n\t\"blacktriangle;\":            '\\U000025B4',\n\t\"blacktriangledown;\":        '\\U000025BE',\n\t\"blacktriangleleft;\":        '\\U000025C2',\n\t\"blacktriangleright;\":       '\\U000025B8',\n\t\"blank;\":                    '\\U00002423',\n\t\"blk12;\":                    '\\U00002592',\n\t\"blk14;\":                    '\\U00002591',\n\t\"blk34;\":                    '\\U00002593',\n\t\"block;\":                    '\\U00002588',\n\t\"bnot;\":                     '\\U00002310',\n\t\"bopf;\":                     '\\U0001D553',\n\t\"bot;\":                      '\\U000022A5',\n\t\"bottom;\":                   '\\U000022A5',\n\t\"bowtie;\":                   '\\U000022C8',\n\t\"boxDL;\":                    '\\U00002557',\n\t\"boxDR;\":                    '\\U00002554',\n\t\"boxDl;\":                    '\\U00002556',\n\t\"boxDr;\":                    '\\U00002553',\n\t\"boxH;\":                     '\\U00002550',\n\t\"boxHD;\":                    '\\U00002566',\n\t\"boxHU;\":                    '\\U00002569',\n\t\"boxHd;\":                    '\\U00002564',\n\t\"boxHu;\":                    '\\U00002567',\n\t\"boxUL;\":                    '\\U0000255D',\n\t\"boxUR;\":                    '\\U0000255A',\n\t\"boxUl;\":                    '\\U0000255C',\n\t\"boxUr;\":                    '\\U00002559',\n\t\"boxV;\":                     '\\U00002551',\n\t\"boxVH;\":                    '\\U0000256C',\n\t\"boxVL;\":                    '\\U00002563',\n\t\"boxVR;\":                    '\\U00002560',\n\t\"boxVh;\":                    '\\U0000256B',\n\t\"boxVl;\":                    '\\U00002562',\n\t\"boxVr;\":                    '\\U0000255F',\n\t\"boxbox;\":                   '\\U000029C9',\n\t\"boxdL;\":                    '\\U00002555',\n\t\"boxdR;\":                    '\\U00002552',\n\t\"boxdl;\":                    '\\U00002510',\n\t\"boxdr;\":                    '\\U0000250C',\n\t\"boxh;\":                     '\\U00002500',\n\t\"boxhD;\":                    '\\U00002565',\n\t\"boxhU;\":                    '\\U00002568',\n\t\"boxhd;\":                    '\\U0000252C',\n\t\"boxhu;\":                    '\\U00002534',\n\t\"boxminus;\":                 '\\U0000229F',\n\t\"boxplus;\":                  '\\U0000229E',\n\t\"boxtimes;\":                 '\\U000022A0',\n\t\"boxuL;\":                    '\\U0000255B',\n\t\"boxuR;\":                    '\\U00002558',\n\t\"boxul;\":                    '\\U00002518',\n\t\"boxur;\":                    '\\U00002514',\n\t\"boxv;\":                     '\\U00002502',\n\t\"boxvH;\":                    '\\U0000256A',\n\t\"boxvL;\":                    '\\U00002561',\n\t\"boxvR;\":                    '\\U0000255E',\n\t\"boxvh;\":                    '\\U0000253C',\n\t\"boxvl;\":                    '\\U00002524',\n\t\"boxvr;\":                    '\\U0000251C',\n\t\"bprime;\":                   '\\U00002035',\n\t\"breve;\":                    '\\U000002D8',\n\t\"brvbar;\":                   '\\U000000A6',\n\t\"bscr;\":                     '\\U0001D4B7',\n\t\"bsemi;\":                    '\\U0000204F',\n\t\"bsim;\":                     '\\U0000223D',\n\t\"bsime;\":                    '\\U000022CD',\n\t\"bsol;\":                     '\\U0000005C',\n\t\"bsolb;\":                    '\\U000029C5',\n\t\"bsolhsub;\":                 '\\U000027C8',\n\t\"bull;\":                     '\\U00002022',\n\t\"bullet;\":                   '\\U00002022',\n\t\"bump;\":                     '\\U0000224E',\n\t\"bumpE;\":                    '\\U00002AAE',\n\t\"bumpe;\":                    '\\U0000224F',\n\t\"bumpeq;\":                   '\\U0000224F',\n\t\"cacute;\":                   '\\U00000107',\n\t\"cap;\":                      '\\U00002229',\n\t\"capand;\":                   '\\U00002A44',\n\t\"capbrcup;\":                 '\\U00002A49',\n\t\"capcap;\":                   '\\U00002A4B',\n\t\"capcup;\":                   '\\U00002A47',\n\t\"capdot;\":                   '\\U00002A40',\n\t\"caret;\":                    '\\U00002041',\n\t\"caron;\":                    '\\U000002C7',\n\t\"ccaps;\":                    '\\U00002A4D',\n\t\"ccaron;\":                   '\\U0000010D',\n\t\"ccedil;\":                   '\\U000000E7',\n\t\"ccirc;\":                    '\\U00000109',\n\t\"ccups;\":                    '\\U00002A4C',\n\t\"ccupssm;\":                  '\\U00002A50',\n\t\"cdot;\":                     '\\U0000010B',\n\t\"cedil;\":                    '\\U000000B8',\n\t\"cemptyv;\":                  '\\U000029B2',\n\t\"cent;\":                     '\\U000000A2',\n\t\"centerdot;\":                '\\U000000B7',\n\t\"cfr;\":                      '\\U0001D520',\n\t\"chcy;\":                     '\\U00000447',\n\t\"check;\":                    '\\U00002713',\n\t\"checkmark;\":                '\\U00002713',\n\t\"chi;\":                      '\\U000003C7',\n\t\"cir;\":                      '\\U000025CB',\n\t\"cirE;\":                     '\\U000029C3',\n\t\"circ;\":                     '\\U000002C6',\n\t\"circeq;\":                   '\\U00002257',\n\t\"circlearrowleft;\":          '\\U000021BA',\n\t\"circlearrowright;\":         '\\U000021BB',\n\t\"circledR;\":                 '\\U000000AE',\n\t\"circledS;\":                 '\\U000024C8',\n\t\"circledast;\":               '\\U0000229B',\n\t\"circledcirc;\":              '\\U0000229A',\n\t\"circleddash;\":              '\\U0000229D',\n\t\"cire;\":                     '\\U00002257',\n\t\"cirfnint;\":                 '\\U00002A10',\n\t\"cirmid;\":                   '\\U00002AEF',\n\t\"cirscir;\":                  '\\U000029C2',\n\t\"clubs;\":                    '\\U00002663',\n\t\"clubsuit;\":                 '\\U00002663',\n\t\"colon;\":                    '\\U0000003A',\n\t\"colone;\":                   '\\U00002254',\n\t\"coloneq;\":                  '\\U00002254',\n\t\"comma;\":                    '\\U0000002C',\n\t\"commat;\":                   '\\U00000040',\n\t\"comp;\":                     '\\U00002201',\n\t\"compfn;\":                   '\\U00002218',\n\t\"complement;\":               '\\U00002201',\n\t\"complexes;\":                '\\U00002102',\n\t\"cong;\":                     '\\U00002245',\n\t\"congdot;\":                  '\\U00002A6D',\n\t\"conint;\":                   '\\U0000222E',\n\t\"copf;\":                     '\\U0001D554',\n\t\"coprod;\":                   '\\U00002210',\n\t\"copy;\":                     '\\U000000A9',\n\t\"copysr;\":                   '\\U00002117',\n\t\"crarr;\":                    '\\U000021B5',\n\t\"cross;\":                    '\\U00002717',\n\t\"cscr;\":                     '\\U0001D4B8',\n\t\"csub;\":                     '\\U00002ACF',\n\t\"csube;\":                    '\\U00002AD1',\n\t\"csup;\":                     '\\U00002AD0',\n\t\"csupe;\":                    '\\U00002AD2',\n\t\"ctdot;\":                    '\\U000022EF',\n\t\"cudarrl;\":                  '\\U00002938',\n\t\"cudarrr;\":                  '\\U00002935',\n\t\"cuepr;\":                    '\\U000022DE',\n\t\"cuesc;\":                    '\\U000022DF',\n\t\"cularr;\":                   '\\U000021B6',\n\t\"cularrp;\":                  '\\U0000293D',\n\t\"cup;\":                      '\\U0000222A',\n\t\"cupbrcap;\":                 '\\U00002A48',\n\t\"cupcap;\":                   '\\U00002A46',\n\t\"cupcup;\":                   '\\U00002A4A',\n\t\"cupdot;\":                   '\\U0000228D',\n\t\"cupor;\":                    '\\U00002A45',\n\t\"curarr;\":                   '\\U000021B7',\n\t\"curarrm;\":                  '\\U0000293C',\n\t\"curlyeqprec;\":              '\\U000022DE',\n\t\"curlyeqsucc;\":              '\\U000022DF',\n\t\"curlyvee;\":                 '\\U000022CE',\n\t\"curlywedge;\":               '\\U000022CF',\n\t\"curren;\":                   '\\U000000A4',\n\t\"curvearrowleft;\":           '\\U000021B6',\n\t\"curvearrowright;\":          '\\U000021B7',\n\t\"cuvee;\":                    '\\U000022CE',\n\t\"cuwed;\":                    '\\U000022CF',\n\t\"cwconint;\":                 '\\U00002232',\n\t\"cwint;\":                    '\\U00002231',\n\t\"cylcty;\":                   '\\U0000232D',\n\t\"dArr;\":                     '\\U000021D3',\n\t\"dHar;\":                     '\\U00002965',\n\t\"dagger;\":                   '\\U00002020',\n\t\"daleth;\":                   '\\U00002138',\n\t\"darr;\":                     '\\U00002193',\n\t\"dash;\":                     '\\U00002010',\n\t\"dashv;\":                    '\\U000022A3',\n\t\"dbkarow;\":                  '\\U0000290F',\n\t\"dblac;\":                    '\\U000002DD',\n\t\"dcaron;\":                   '\\U0000010F',\n\t\"dcy;\":                      '\\U00000434',\n\t\"dd;\":                       '\\U00002146',\n\t\"ddagger;\":                  '\\U00002021',\n\t\"ddarr;\":                    '\\U000021CA',\n\t\"ddotseq;\":                  '\\U00002A77',\n\t\"deg;\":                      '\\U000000B0',\n\t\"delta;\":                    '\\U000003B4',\n\t\"demptyv;\":                  '\\U000029B1',\n\t\"dfisht;\":                   '\\U0000297F',\n\t\"dfr;\":                      '\\U0001D521',\n\t\"dharl;\":                    '\\U000021C3',\n\t\"dharr;\":                    '\\U000021C2',\n\t\"diam;\":                     '\\U000022C4',\n\t\"diamond;\":                  '\\U000022C4',\n\t\"diamondsuit;\":              '\\U00002666',\n\t\"diams;\":                    '\\U00002666',\n\t\"die;\":                      '\\U000000A8',\n\t\"digamma;\":                  '\\U000003DD',\n\t\"disin;\":                    '\\U000022F2',\n\t\"div;\":                      '\\U000000F7',\n\t\"divide;\":                   '\\U000000F7',\n\t\"divideontimes;\":            '\\U000022C7',\n\t\"divonx;\":                   '\\U000022C7',\n\t\"djcy;\":                     '\\U00000452',\n\t\"dlcorn;\":                   '\\U0000231E',\n\t\"dlcrop;\":                   '\\U0000230D',\n\t\"dollar;\":                   '\\U00000024',\n\t\"dopf;\":                     '\\U0001D555',\n\t\"dot;\":                      '\\U000002D9',\n\t\"doteq;\":                    '\\U00002250',\n\t\"doteqdot;\":                 '\\U00002251',\n\t\"dotminus;\":                 '\\U00002238',\n\t\"dotplus;\":                  '\\U00002214',\n\t\"dotsquare;\":                '\\U000022A1',\n\t\"doublebarwedge;\":           '\\U00002306',\n\t\"downarrow;\":                '\\U00002193',\n\t\"downdownarrows;\":           '\\U000021CA',\n\t\"downharpoonleft;\":          '\\U000021C3',\n\t\"downharpoonright;\":         '\\U000021C2',\n\t\"drbkarow;\":                 '\\U00002910',\n\t\"drcorn;\":                   '\\U0000231F',\n\t\"drcrop;\":                   '\\U0000230C',\n\t\"dscr;\":                     '\\U0001D4B9',\n\t\"dscy;\":                     '\\U00000455',\n\t\"dsol;\":                     '\\U000029F6',\n\t\"dstrok;\":                   '\\U00000111',\n\t\"dtdot;\":                    '\\U000022F1',\n\t\"dtri;\":                     '\\U000025BF',\n\t\"dtrif;\":                    '\\U000025BE',\n\t\"duarr;\":                    '\\U000021F5',\n\t\"duhar;\":                    '\\U0000296F',\n\t\"dwangle;\":                  '\\U000029A6',\n\t\"dzcy;\":                     '\\U0000045F',\n\t\"dzigrarr;\":                 '\\U000027FF',\n\t\"eDDot;\":                    '\\U00002A77',\n\t\"eDot;\":                     '\\U00002251',\n\t\"eacute;\":                   '\\U000000E9',\n\t\"easter;\":                   '\\U00002A6E',\n\t\"ecaron;\":                   '\\U0000011B',\n\t\"ecir;\":                     '\\U00002256',\n\t\"ecirc;\":                    '\\U000000EA',\n\t\"ecolon;\":                   '\\U00002255',\n\t\"ecy;\":                      '\\U0000044D',\n\t\"edot;\":                     '\\U00000117',\n\t\"ee;\":                       '\\U00002147',\n\t\"efDot;\":                    '\\U00002252',\n\t\"efr;\":                      '\\U0001D522',\n\t\"eg;\":                       '\\U00002A9A',\n\t\"egrave;\":                   '\\U000000E8',\n\t\"egs;\":                      '\\U00002A96',\n\t\"egsdot;\":                   '\\U00002A98',\n\t\"el;\":                       '\\U00002A99',\n\t\"elinters;\":                 '\\U000023E7',\n\t\"ell;\":                      '\\U00002113',\n\t\"els;\":                      '\\U00002A95',\n\t\"elsdot;\":                   '\\U00002A97',\n\t\"emacr;\":                    '\\U00000113',\n\t\"empty;\":                    '\\U00002205',\n\t\"emptyset;\":                 '\\U00002205',\n\t\"emptyv;\":                   '\\U00002205',\n\t\"emsp;\":                     '\\U00002003',\n\t\"emsp13;\":                   '\\U00002004',\n\t\"emsp14;\":                   '\\U00002005',\n\t\"eng;\":                      '\\U0000014B',\n\t\"ensp;\":                     '\\U00002002',\n\t\"eogon;\":                    '\\U00000119',\n\t\"eopf;\":                     '\\U0001D556',\n\t\"epar;\":                     '\\U000022D5',\n\t\"eparsl;\":                   '\\U000029E3',\n\t\"eplus;\":                    '\\U00002A71',\n\t\"epsi;\":                     '\\U000003B5',\n\t\"epsilon;\":                  '\\U000003B5',\n\t\"epsiv;\":                    '\\U000003F5',\n\t\"eqcirc;\":                   '\\U00002256',\n\t\"eqcolon;\":                  '\\U00002255',\n\t\"eqsim;\":                    '\\U00002242',\n\t\"eqslantgtr;\":               '\\U00002A96',\n\t\"eqslantless;\":              '\\U00002A95',\n\t\"equals;\":                   '\\U0000003D',\n\t\"equest;\":                   '\\U0000225F',\n\t\"equiv;\":                    '\\U00002261',\n\t\"equivDD;\":                  '\\U00002A78',\n\t\"eqvparsl;\":                 '\\U000029E5',\n\t\"erDot;\":                    '\\U00002253',\n\t\"erarr;\":                    '\\U00002971',\n\t\"escr;\":                     '\\U0000212F',\n\t\"esdot;\":                    '\\U00002250',\n\t\"esim;\":                     '\\U00002242',\n\t\"eta;\":                      '\\U000003B7',\n\t\"eth;\":                      '\\U000000F0',\n\t\"euml;\":                     '\\U000000EB',\n\t\"euro;\":                     '\\U000020AC',\n\t\"excl;\":                     '\\U00000021',\n\t\"exist;\":                    '\\U00002203',\n\t\"expectation;\":              '\\U00002130',\n\t\"exponentiale;\":             '\\U00002147',\n\t\"fallingdotseq;\":            '\\U00002252',\n\t\"fcy;\":                      '\\U00000444',\n\t\"female;\":                   '\\U00002640',\n\t\"ffilig;\":                   '\\U0000FB03',\n\t\"fflig;\":                    '\\U0000FB00',\n\t\"ffllig;\":                   '\\U0000FB04',\n\t\"ffr;\":                      '\\U0001D523',\n\t\"filig;\":                    '\\U0000FB01',\n\t\"flat;\":                     '\\U0000266D',\n\t\"fllig;\":                    '\\U0000FB02',\n\t\"fltns;\":                    '\\U000025B1',\n\t\"fnof;\":                     '\\U00000192',\n\t\"fopf;\":                     '\\U0001D557',\n\t\"forall;\":                   '\\U00002200',\n\t\"fork;\":                     '\\U000022D4',\n\t\"forkv;\":                    '\\U00002AD9',\n\t\"fpartint;\":                 '\\U00002A0D',\n\t\"frac12;\":                   '\\U000000BD',\n\t\"frac13;\":                   '\\U00002153',\n\t\"frac14;\":                   '\\U000000BC',\n\t\"frac15;\":                   '\\U00002155',\n\t\"frac16;\":                   '\\U00002159',\n\t\"frac18;\":                   '\\U0000215B',\n\t\"frac23;\":                   '\\U00002154',\n\t\"frac25;\":                   '\\U00002156',\n\t\"frac34;\":                   '\\U000000BE',\n\t\"frac35;\":                   '\\U00002157',\n\t\"frac38;\":                   '\\U0000215C',\n\t\"frac45;\":                   '\\U00002158',\n\t\"frac56;\":                   '\\U0000215A',\n\t\"frac58;\":                   '\\U0000215D',\n\t\"frac78;\":                   '\\U0000215E',\n\t\"frasl;\":                    '\\U00002044',\n\t\"frown;\":                    '\\U00002322',\n\t\"fscr;\":                     '\\U0001D4BB',\n\t\"gE;\":                       '\\U00002267',\n\t\"gEl;\":                      '\\U00002A8C',\n\t\"gacute;\":                   '\\U000001F5',\n\t\"gamma;\":                    '\\U000003B3',\n\t\"gammad;\":                   '\\U000003DD',\n\t\"gap;\":                      '\\U00002A86',\n\t\"gbreve;\":                   '\\U0000011F',\n\t\"gcirc;\":                    '\\U0000011D',\n\t\"gcy;\":                      '\\U00000433',\n\t\"gdot;\":                     '\\U00000121',\n\t\"ge;\":                       '\\U00002265',\n\t\"gel;\":                      '\\U000022DB',\n\t\"geq;\":                      '\\U00002265',\n\t\"geqq;\":                     '\\U00002267',\n\t\"geqslant;\":                 '\\U00002A7E',\n\t\"ges;\":                      '\\U00002A7E',\n\t\"gescc;\":                    '\\U00002AA9',\n\t\"gesdot;\":                   '\\U00002A80',\n\t\"gesdoto;\":                  '\\U00002A82',\n\t\"gesdotol;\":                 '\\U00002A84',\n\t\"gesles;\":                   '\\U00002A94',\n\t\"gfr;\":                      '\\U0001D524',\n\t\"gg;\":                       '\\U0000226B',\n\t\"ggg;\":                      '\\U000022D9',\n\t\"gimel;\":                    '\\U00002137',\n\t\"gjcy;\":                     '\\U00000453',\n\t\"gl;\":                       '\\U00002277',\n\t\"glE;\":                      '\\U00002A92',\n\t\"gla;\":                      '\\U00002AA5',\n\t\"glj;\":                      '\\U00002AA4',\n\t\"gnE;\":                      '\\U00002269',\n\t\"gnap;\":                     '\\U00002A8A',\n\t\"gnapprox;\":                 '\\U00002A8A',\n\t\"gne;\":                      '\\U00002A88',\n\t\"gneq;\":                     '\\U00002A88',\n\t\"gneqq;\":                    '\\U00002269',\n\t\"gnsim;\":                    '\\U000022E7',\n\t\"gopf;\":                     '\\U0001D558',\n\t\"grave;\":                    '\\U00000060',\n\t\"gscr;\":                     '\\U0000210A',\n\t\"gsim;\":                     '\\U00002273',\n\t\"gsime;\":                    '\\U00002A8E',\n\t\"gsiml;\":                    '\\U00002A90',\n\t\"gt;\":                       '\\U0000003E',\n\t\"gtcc;\":                     '\\U00002AA7',\n\t\"gtcir;\":                    '\\U00002A7A',\n\t\"gtdot;\":                    '\\U000022D7',\n\t\"gtlPar;\":                   '\\U00002995',\n\t\"gtquest;\":                  '\\U00002A7C',\n\t\"gtrapprox;\":                '\\U00002A86',\n\t\"gtrarr;\":                   '\\U00002978',\n\t\"gtrdot;\":                   '\\U000022D7',\n\t\"gtreqless;\":                '\\U000022DB',\n\t\"gtreqqless;\":               '\\U00002A8C',\n\t\"gtrless;\":                  '\\U00002277',\n\t\"gtrsim;\":                   '\\U00002273',\n\t\"hArr;\":                     '\\U000021D4',\n\t\"hairsp;\":                   '\\U0000200A',\n\t\"half;\":                     '\\U000000BD',\n\t\"hamilt;\":                   '\\U0000210B',\n\t\"hardcy;\":                   '\\U0000044A',\n\t\"harr;\":                     '\\U00002194',\n\t\"harrcir;\":                  '\\U00002948',\n\t\"harrw;\":                    '\\U000021AD',\n\t\"hbar;\":                     '\\U0000210F',\n\t\"hcirc;\":                    '\\U00000125',\n\t\"hearts;\":                   '\\U00002665',\n\t\"heartsuit;\":                '\\U00002665',\n\t\"hellip;\":                   '\\U00002026',\n\t\"hercon;\":                   '\\U000022B9',\n\t\"hfr;\":                      '\\U0001D525',\n\t\"hksearow;\":                 '\\U00002925',\n\t\"hkswarow;\":                 '\\U00002926',\n\t\"hoarr;\":                    '\\U000021FF',\n\t\"homtht;\":                   '\\U0000223B',\n\t\"hookleftarrow;\":            '\\U000021A9',\n\t\"hookrightarrow;\":           '\\U000021AA',\n\t\"hopf;\":                     '\\U0001D559',\n\t\"horbar;\":                   '\\U00002015',\n\t\"hscr;\":                     '\\U0001D4BD',\n\t\"hslash;\":                   '\\U0000210F',\n\t\"hstrok;\":                   '\\U00000127',\n\t\"hybull;\":                   '\\U00002043',\n\t\"hyphen;\":                   '\\U00002010',\n\t\"iacute;\":                   '\\U000000ED',\n\t\"ic;\":                       '\\U00002063',\n\t\"icirc;\":                    '\\U000000EE',\n\t\"icy;\":                      '\\U00000438',\n\t\"iecy;\":                     '\\U00000435',\n\t\"iexcl;\":                    '\\U000000A1',\n\t\"iff;\":                      '\\U000021D4',\n\t\"ifr;\":                      '\\U0001D526',\n\t\"igrave;\":                   '\\U000000EC',\n\t\"ii;\":                       '\\U00002148',\n\t\"iiiint;\":                   '\\U00002A0C',\n\t\"iiint;\":                    '\\U0000222D',\n\t\"iinfin;\":                   '\\U000029DC',\n\t\"iiota;\":                    '\\U00002129',\n\t\"ijlig;\":                    '\\U00000133',\n\t\"imacr;\":                    '\\U0000012B',\n\t\"image;\":                    '\\U00002111',\n\t\"imagline;\":                 '\\U00002110',\n\t\"imagpart;\":                 '\\U00002111',\n\t\"imath;\":                    '\\U00000131',\n\t\"imof;\":                     '\\U000022B7',\n\t\"imped;\":                    '\\U000001B5',\n\t\"in;\":                       '\\U00002208',\n\t\"incare;\":                   '\\U00002105',\n\t\"infin;\":                    '\\U0000221E',\n\t\"infintie;\":                 '\\U000029DD',\n\t\"inodot;\":                   '\\U00000131',\n\t\"int;\":                      '\\U0000222B',\n\t\"intcal;\":                   '\\U000022BA',\n\t\"integers;\":                 '\\U00002124',\n\t\"intercal;\":                 '\\U000022BA',\n\t\"intlarhk;\":                 '\\U00002A17',\n\t\"intprod;\":                  '\\U00002A3C',\n\t\"iocy;\":                     '\\U00000451',\n\t\"iogon;\":                    '\\U0000012F',\n\t\"iopf;\":                     '\\U0001D55A',\n\t\"iota;\":                     '\\U000003B9',\n\t\"iprod;\":                    '\\U00002A3C',\n\t\"iquest;\":                   '\\U000000BF',\n\t\"iscr;\":                     '\\U0001D4BE',\n\t\"isin;\":                     '\\U00002208',\n\t\"isinE;\":                    '\\U000022F9',\n\t\"isindot;\":                  '\\U000022F5',\n\t\"isins;\":                    '\\U000022F4',\n\t\"isinsv;\":                   '\\U000022F3',\n\t\"isinv;\":                    '\\U00002208',\n\t\"it;\":                       '\\U00002062',\n\t\"itilde;\":                   '\\U00000129',\n\t\"iukcy;\":                    '\\U00000456',\n\t\"iuml;\":                     '\\U000000EF',\n\t\"jcirc;\":                    '\\U00000135',\n\t\"jcy;\":                      '\\U00000439',\n\t\"jfr;\":                      '\\U0001D527',\n\t\"jmath;\":                    '\\U00000237',\n\t\"jopf;\":                     '\\U0001D55B',\n\t\"jscr;\":                     '\\U0001D4BF',\n\t\"jsercy;\":                   '\\U00000458',\n\t\"jukcy;\":                    '\\U00000454',\n\t\"kappa;\":                    '\\U000003BA',\n\t\"kappav;\":                   '\\U000003F0',\n\t\"kcedil;\":                   '\\U00000137',\n\t\"kcy;\":                      '\\U0000043A',\n\t\"kfr;\":                      '\\U0001D528',\n\t\"kgreen;\":                   '\\U00000138',\n\t\"khcy;\":                     '\\U00000445',\n\t\"kjcy;\":                     '\\U0000045C',\n\t\"kopf;\":                     '\\U0001D55C',\n\t\"kscr;\":                     '\\U0001D4C0',\n\t\"lAarr;\":                    '\\U000021DA',\n\t\"lArr;\":                     '\\U000021D0',\n\t\"lAtail;\":                   '\\U0000291B',\n\t\"lBarr;\":                    '\\U0000290E',\n\t\"lE;\":                       '\\U00002266',\n\t\"lEg;\":                      '\\U00002A8B',\n\t\"lHar;\":                     '\\U00002962',\n\t\"lacute;\":                   '\\U0000013A',\n\t\"laemptyv;\":                 '\\U000029B4',\n\t\"lagran;\":                   '\\U00002112',\n\t\"lambda;\":                   '\\U000003BB',\n\t\"lang;\":                     '\\U000027E8',\n\t\"langd;\":                    '\\U00002991',\n\t\"langle;\":                   '\\U000027E8',\n\t\"lap;\":                      '\\U00002A85',\n\t\"laquo;\":                    '\\U000000AB',\n\t\"larr;\":                     '\\U00002190',\n\t\"larrb;\":                    '\\U000021E4',\n\t\"larrbfs;\":                  '\\U0000291F',\n\t\"larrfs;\":                   '\\U0000291D',\n\t\"larrhk;\":                   '\\U000021A9',\n\t\"larrlp;\":                   '\\U000021AB',\n\t\"larrpl;\":                   '\\U00002939',\n\t\"larrsim;\":                  '\\U00002973',\n\t\"larrtl;\":                   '\\U000021A2',\n\t\"lat;\":                      '\\U00002AAB',\n\t\"latail;\":                   '\\U00002919',\n\t\"late;\":                     '\\U00002AAD',\n\t\"lbarr;\":                    '\\U0000290C',\n\t\"lbbrk;\":                    '\\U00002772',\n\t\"lbrace;\":                   '\\U0000007B',\n\t\"lbrack;\":                   '\\U0000005B',\n\t\"lbrke;\":                    '\\U0000298B',\n\t\"lbrksld;\":                  '\\U0000298F',\n\t\"lbrkslu;\":                  '\\U0000298D',\n\t\"lcaron;\":                   '\\U0000013E',\n\t\"lcedil;\":                   '\\U0000013C',\n\t\"lceil;\":                    '\\U00002308',\n\t\"lcub;\":                     '\\U0000007B',\n\t\"lcy;\":                      '\\U0000043B',\n\t\"ldca;\":                     '\\U00002936',\n\t\"ldquo;\":                    '\\U0000201C',\n\t\"ldquor;\":                   '\\U0000201E',\n\t\"ldrdhar;\":                  '\\U00002967',\n\t\"ldrushar;\":                 '\\U0000294B',\n\t\"ldsh;\":                     '\\U000021B2',\n\t\"le;\":                       '\\U00002264',\n\t\"leftarrow;\":                '\\U00002190',\n\t\"leftarrowtail;\":            '\\U000021A2',\n\t\"leftharpoondown;\":          '\\U000021BD',\n\t\"leftharpoonup;\":            '\\U000021BC',\n\t\"leftleftarrows;\":           '\\U000021C7',\n\t\"leftrightarrow;\":           '\\U00002194',\n\t\"leftrightarrows;\":          '\\U000021C6',\n\t\"leftrightharpoons;\":        '\\U000021CB',\n\t\"leftrightsquigarrow;\":      '\\U000021AD',\n\t\"leftthreetimes;\":           '\\U000022CB',\n\t\"leg;\":                      '\\U000022DA',\n\t\"leq;\":                      '\\U00002264',\n\t\"leqq;\":                     '\\U00002266',\n\t\"leqslant;\":                 '\\U00002A7D',\n\t\"les;\":                      '\\U00002A7D',\n\t\"lescc;\":                    '\\U00002AA8',\n\t\"lesdot;\":                   '\\U00002A7F',\n\t\"lesdoto;\":                  '\\U00002A81',\n\t\"lesdotor;\":                 '\\U00002A83',\n\t\"lesges;\":                   '\\U00002A93',\n\t\"lessapprox;\":               '\\U00002A85',\n\t\"lessdot;\":                  '\\U000022D6',\n\t\"lesseqgtr;\":                '\\U000022DA',\n\t\"lesseqqgtr;\":               '\\U00002A8B',\n\t\"lessgtr;\":                  '\\U00002276',\n\t\"lesssim;\":                  '\\U00002272',\n\t\"lfisht;\":                   '\\U0000297C',\n\t\"lfloor;\":                   '\\U0000230A',\n\t\"lfr;\":                      '\\U0001D529',\n\t\"lg;\":                       '\\U00002276',\n\t\"lgE;\":                      '\\U00002A91',\n\t\"lhard;\":                    '\\U000021BD',\n\t\"lharu;\":                    '\\U000021BC',\n\t\"lharul;\":                   '\\U0000296A',\n\t\"lhblk;\":                    '\\U00002584',\n\t\"ljcy;\":                     '\\U00000459',\n\t\"ll;\":                       '\\U0000226A',\n\t\"llarr;\":                    '\\U000021C7',\n\t\"llcorner;\":                 '\\U0000231E',\n\t\"llhard;\":                   '\\U0000296B',\n\t\"lltri;\":                    '\\U000025FA',\n\t\"lmidot;\":                   '\\U00000140',\n\t\"lmoust;\":                   '\\U000023B0',\n\t\"lmoustache;\":               '\\U000023B0',\n\t\"lnE;\":                      '\\U00002268',\n\t\"lnap;\":                     '\\U00002A89',\n\t\"lnapprox;\":                 '\\U00002A89',\n\t\"lne;\":                      '\\U00002A87',\n\t\"lneq;\":                     '\\U00002A87',\n\t\"lneqq;\":                    '\\U00002268',\n\t\"lnsim;\":                    '\\U000022E6',\n\t\"loang;\":                    '\\U000027EC',\n\t\"loarr;\":                    '\\U000021FD',\n\t\"lobrk;\":                    '\\U000027E6',\n\t\"longleftarrow;\":            '\\U000027F5',\n\t\"longleftrightarrow;\":       '\\U000027F7',\n\t\"longmapsto;\":               '\\U000027FC',\n\t\"longrightarrow;\":           '\\U000027F6',\n\t\"looparrowleft;\":            '\\U000021AB',\n\t\"looparrowright;\":           '\\U000021AC',\n\t\"lopar;\":                    '\\U00002985',\n\t\"lopf;\":                     '\\U0001D55D',\n\t\"loplus;\":                   '\\U00002A2D',\n\t\"lotimes;\":                  '\\U00002A34',\n\t\"lowast;\":                   '\\U00002217',\n\t\"lowbar;\":                   '\\U0000005F',\n\t\"loz;\":                      '\\U000025CA',\n\t\"lozenge;\":                  '\\U000025CA',\n\t\"lozf;\":                     '\\U000029EB',\n\t\"lpar;\":                     '\\U00000028',\n\t\"lparlt;\":                   '\\U00002993',\n\t\"lrarr;\":                    '\\U000021C6',\n\t\"lrcorner;\":                 '\\U0000231F',\n\t\"lrhar;\":                    '\\U000021CB',\n\t\"lrhard;\":                   '\\U0000296D',\n\t\"lrm;\":                      '\\U0000200E',\n\t\"lrtri;\":                    '\\U000022BF',\n\t\"lsaquo;\":                   '\\U00002039',\n\t\"lscr;\":                     '\\U0001D4C1',\n\t\"lsh;\":                      '\\U000021B0',\n\t\"lsim;\":                     '\\U00002272',\n\t\"lsime;\":                    '\\U00002A8D',\n\t\"lsimg;\":                    '\\U00002A8F',\n\t\"lsqb;\":                     '\\U0000005B',\n\t\"lsquo;\":                    '\\U00002018',\n\t\"lsquor;\":                   '\\U0000201A',\n\t\"lstrok;\":                   '\\U00000142',\n\t\"lt;\":                       '\\U0000003C',\n\t\"ltcc;\":                     '\\U00002AA6',\n\t\"ltcir;\":                    '\\U00002A79',\n\t\"ltdot;\":                    '\\U000022D6',\n\t\"lthree;\":                   '\\U000022CB',\n\t\"ltimes;\":                   '\\U000022C9',\n\t\"ltlarr;\":                   '\\U00002976',\n\t\"ltquest;\":                  '\\U00002A7B',\n\t\"ltrPar;\":                   '\\U00002996',\n\t\"ltri;\":                     '\\U000025C3',\n\t\"ltrie;\":                    '\\U000022B4',\n\t\"ltrif;\":                    '\\U000025C2',\n\t\"lurdshar;\":                 '\\U0000294A',\n\t\"luruhar;\":                  '\\U00002966',\n\t\"mDDot;\":                    '\\U0000223A',\n\t\"macr;\":                     '\\U000000AF',\n\t\"male;\":                     '\\U00002642',\n\t\"malt;\":                     '\\U00002720',\n\t\"maltese;\":                  '\\U00002720',\n\t\"map;\":                      '\\U000021A6',\n\t\"mapsto;\":                   '\\U000021A6',\n\t\"mapstodown;\":               '\\U000021A7',\n\t\"mapstoleft;\":               '\\U000021A4',\n\t\"mapstoup;\":                 '\\U000021A5',\n\t\"marker;\":                   '\\U000025AE',\n\t\"mcomma;\":                   '\\U00002A29',\n\t\"mcy;\":                      '\\U0000043C',\n\t\"mdash;\":                    '\\U00002014',\n\t\"measuredangle;\":            '\\U00002221',\n\t\"mfr;\":                      '\\U0001D52A',\n\t\"mho;\":                      '\\U00002127',\n\t\"micro;\":                    '\\U000000B5',\n\t\"mid;\":                      '\\U00002223',\n\t\"midast;\":                   '\\U0000002A',\n\t\"midcir;\":                   '\\U00002AF0',\n\t\"middot;\":                   '\\U000000B7',\n\t\"minus;\":                    '\\U00002212',\n\t\"minusb;\":                   '\\U0000229F',\n\t\"minusd;\":                   '\\U00002238',\n\t\"minusdu;\":                  '\\U00002A2A',\n\t\"mlcp;\":                     '\\U00002ADB',\n\t\"mldr;\":                     '\\U00002026',\n\t\"mnplus;\":                   '\\U00002213',\n\t\"models;\":                   '\\U000022A7',\n\t\"mopf;\":                     '\\U0001D55E',\n\t\"mp;\":                       '\\U00002213',\n\t\"mscr;\":                     '\\U0001D4C2',\n\t\"mstpos;\":                   '\\U0000223E',\n\t\"mu;\":                       '\\U000003BC',\n\t\"multimap;\":                 '\\U000022B8',\n\t\"mumap;\":                    '\\U000022B8',\n\t\"nLeftarrow;\":               '\\U000021CD',\n\t\"nLeftrightarrow;\":          '\\U000021CE',\n\t\"nRightarrow;\":              '\\U000021CF',\n\t\"nVDash;\":                   '\\U000022AF',\n\t\"nVdash;\":                   '\\U000022AE',\n\t\"nabla;\":                    '\\U00002207',\n\t\"nacute;\":                   '\\U00000144',\n\t\"nap;\":                      '\\U00002249',\n\t\"napos;\":                    '\\U00000149',\n\t\"napprox;\":                  '\\U00002249',\n\t\"natur;\":                    '\\U0000266E',\n\t\"natural;\":                  '\\U0000266E',\n\t\"naturals;\":                 '\\U00002115',\n\t\"nbsp;\":                     '\\U000000A0',\n\t\"ncap;\":                     '\\U00002A43',\n\t\"ncaron;\":                   '\\U00000148',\n\t\"ncedil;\":                   '\\U00000146',\n\t\"ncong;\":                    '\\U00002247',\n\t\"ncup;\":                     '\\U00002A42',\n\t\"ncy;\":                      '\\U0000043D',\n\t\"ndash;\":                    '\\U00002013',\n\t\"ne;\":                       '\\U00002260',\n\t\"neArr;\":                    '\\U000021D7',\n\t\"nearhk;\":                   '\\U00002924',\n\t\"nearr;\":                    '\\U00002197',\n\t\"nearrow;\":                  '\\U00002197',\n\t\"nequiv;\":                   '\\U00002262',\n\t\"nesear;\":                   '\\U00002928',\n\t\"nexist;\":                   '\\U00002204',\n\t\"nexists;\":                  '\\U00002204',\n\t\"nfr;\":                      '\\U0001D52B',\n\t\"nge;\":                      '\\U00002271',\n\t\"ngeq;\":                     '\\U00002271',\n\t\"ngsim;\":                    '\\U00002275',\n\t\"ngt;\":                      '\\U0000226F',\n\t\"ngtr;\":                     '\\U0000226F',\n\t\"nhArr;\":                    '\\U000021CE',\n\t\"nharr;\":                    '\\U000021AE',\n\t\"nhpar;\":                    '\\U00002AF2',\n\t\"ni;\":                       '\\U0000220B',\n\t\"nis;\":                      '\\U000022FC',\n\t\"nisd;\":                     '\\U000022FA',\n\t\"niv;\":                      '\\U0000220B',\n\t\"njcy;\":                     '\\U0000045A',\n\t\"nlArr;\":                    '\\U000021CD',\n\t\"nlarr;\":                    '\\U0000219A',\n\t\"nldr;\":                     '\\U00002025',\n\t\"nle;\":                      '\\U00002270',\n\t\"nleftarrow;\":               '\\U0000219A',\n\t\"nleftrightarrow;\":          '\\U000021AE',\n\t\"nleq;\":                     '\\U00002270',\n\t\"nless;\":                    '\\U0000226E',\n\t\"nlsim;\":                    '\\U00002274',\n\t\"nlt;\":                      '\\U0000226E',\n\t\"nltri;\":                    '\\U000022EA',\n\t\"nltrie;\":                   '\\U000022EC',\n\t\"nmid;\":                     '\\U00002224',\n\t\"nopf;\":                     '\\U0001D55F',\n\t\"not;\":                      '\\U000000AC',\n\t\"notin;\":                    '\\U00002209',\n\t\"notinva;\":                  '\\U00002209',\n\t\"notinvb;\":                  '\\U000022F7',\n\t\"notinvc;\":                  '\\U000022F6',\n\t\"notni;\":                    '\\U0000220C',\n\t\"notniva;\":                  '\\U0000220C',\n\t\"notnivb;\":                  '\\U000022FE',\n\t\"notnivc;\":                  '\\U000022FD',\n\t\"npar;\":                     '\\U00002226',\n\t\"nparallel;\":                '\\U00002226',\n\t\"npolint;\":                  '\\U00002A14',\n\t\"npr;\":                      '\\U00002280',\n\t\"nprcue;\":                   '\\U000022E0',\n\t\"nprec;\":                    '\\U00002280',\n\t\"nrArr;\":                    '\\U000021CF',\n\t\"nrarr;\":                    '\\U0000219B',\n\t\"nrightarrow;\":              '\\U0000219B',\n\t\"nrtri;\":                    '\\U000022EB',\n\t\"nrtrie;\":                   '\\U000022ED',\n\t\"nsc;\":                      '\\U00002281',\n\t\"nsccue;\":                   '\\U000022E1',\n\t\"nscr;\":                     '\\U0001D4C3',\n\t\"nshortmid;\":                '\\U00002224',\n\t\"nshortparallel;\":           '\\U00002226',\n\t\"nsim;\":                     '\\U00002241',\n\t\"nsime;\":                    '\\U00002244',\n\t\"nsimeq;\":                   '\\U00002244',\n\t\"nsmid;\":                    '\\U00002224',\n\t\"nspar;\":                    '\\U00002226',\n\t\"nsqsube;\":                  '\\U000022E2',\n\t\"nsqsupe;\":                  '\\U000022E3',\n\t\"nsub;\":                     '\\U00002284',\n\t\"nsube;\":                    '\\U00002288',\n\t\"nsubseteq;\":                '\\U00002288',\n\t\"nsucc;\":                    '\\U00002281',\n\t\"nsup;\":                     '\\U00002285',\n\t\"nsupe;\":                    '\\U00002289',\n\t\"nsupseteq;\":                '\\U00002289',\n\t\"ntgl;\":                     '\\U00002279',\n\t\"ntilde;\":                   '\\U000000F1',\n\t\"ntlg;\":                     '\\U00002278',\n\t\"ntriangleleft;\":            '\\U000022EA',\n\t\"ntrianglelefteq;\":          '\\U000022EC',\n\t\"ntriangleright;\":           '\\U000022EB',\n\t\"ntrianglerighteq;\":         '\\U000022ED',\n\t\"nu;\":                       '\\U000003BD',\n\t\"num;\":                      '\\U00000023',\n\t\"numero;\":                   '\\U00002116',\n\t\"numsp;\":                    '\\U00002007',\n\t\"nvDash;\":                   '\\U000022AD',\n\t\"nvHarr;\":                   '\\U00002904',\n\t\"nvdash;\":                   '\\U000022AC',\n\t\"nvinfin;\":                  '\\U000029DE',\n\t\"nvlArr;\":                   '\\U00002902',\n\t\"nvrArr;\":                   '\\U00002903',\n\t\"nwArr;\":                    '\\U000021D6',\n\t\"nwarhk;\":                   '\\U00002923',\n\t\"nwarr;\":                    '\\U00002196',\n\t\"nwarrow;\":                  '\\U00002196',\n\t\"nwnear;\":                   '\\U00002927',\n\t\"oS;\":                       '\\U000024C8',\n\t\"oacute;\":                   '\\U000000F3',\n\t\"oast;\":                     '\\U0000229B',\n\t\"ocir;\":                     '\\U0000229A',\n\t\"ocirc;\":                    '\\U000000F4',\n\t\"ocy;\":                      '\\U0000043E',\n\t\"odash;\":                    '\\U0000229D',\n\t\"odblac;\":                   '\\U00000151',\n\t\"odiv;\":                     '\\U00002A38',\n\t\"odot;\":                     '\\U00002299',\n\t\"odsold;\":                   '\\U000029BC',\n\t\"oelig;\":                    '\\U00000153',\n\t\"ofcir;\":                    '\\U000029BF',\n\t\"ofr;\":                      '\\U0001D52C',\n\t\"ogon;\":                     '\\U000002DB',\n\t\"ograve;\":                   '\\U000000F2',\n\t\"ogt;\":                      '\\U000029C1',\n\t\"ohbar;\":                    '\\U000029B5',\n\t\"ohm;\":                      '\\U000003A9',\n\t\"oint;\":                     '\\U0000222E',\n\t\"olarr;\":                    '\\U000021BA',\n\t\"olcir;\":                    '\\U000029BE',\n\t\"olcross;\":                  '\\U000029BB',\n\t\"oline;\":                    '\\U0000203E',\n\t\"olt;\":                      '\\U000029C0',\n\t\"omacr;\":                    '\\U0000014D',\n\t\"omega;\":                    '\\U000003C9',\n\t\"omicron;\":                  '\\U000003BF',\n\t\"omid;\":                     '\\U000029B6',\n\t\"ominus;\":                   '\\U00002296',\n\t\"oopf;\":                     '\\U0001D560',\n\t\"opar;\":                     '\\U000029B7',\n\t\"operp;\":                    '\\U000029B9',\n\t\"oplus;\":                    '\\U00002295',\n\t\"or;\":                       '\\U00002228',\n\t\"orarr;\":                    '\\U000021BB',\n\t\"ord;\":                      '\\U00002A5D',\n\t\"order;\":                    '\\U00002134',\n\t\"orderof;\":                  '\\U00002134',\n\t\"ordf;\":                     '\\U000000AA',\n\t\"ordm;\":                     '\\U000000BA',\n\t\"origof;\":                   '\\U000022B6',\n\t\"oror;\":                     '\\U00002A56',\n\t\"orslope;\":                  '\\U00002A57',\n\t\"orv;\":                      '\\U00002A5B',\n\t\"oscr;\":                     '\\U00002134',\n\t\"oslash;\":                   '\\U000000F8',\n\t\"osol;\":                     '\\U00002298',\n\t\"otilde;\":                   '\\U000000F5',\n\t\"otimes;\":                   '\\U00002297',\n\t\"otimesas;\":                 '\\U00002A36',\n\t\"ouml;\":                     '\\U000000F6',\n\t\"ovbar;\":                    '\\U0000233D',\n\t\"par;\":                      '\\U00002225',\n\t\"para;\":                     '\\U000000B6',\n\t\"parallel;\":                 '\\U00002225',\n\t\"parsim;\":                   '\\U00002AF3',\n\t\"parsl;\":                    '\\U00002AFD',\n\t\"part;\":                     '\\U00002202',\n\t\"pcy;\":                      '\\U0000043F',\n\t\"percnt;\":                   '\\U00000025',\n\t\"period;\":                   '\\U0000002E',\n\t\"permil;\":                   '\\U00002030',\n\t\"perp;\":                     '\\U000022A5',\n\t\"pertenk;\":                  '\\U00002031',\n\t\"pfr;\":                      '\\U0001D52D',\n\t\"phi;\":                      '\\U000003C6',\n\t\"phiv;\":                     '\\U000003D5',\n\t\"phmmat;\":                   '\\U00002133',\n\t\"phone;\":                    '\\U0000260E',\n\t\"pi;\":                       '\\U000003C0',\n\t\"pitchfork;\":                '\\U000022D4',\n\t\"piv;\":                      '\\U000003D6',\n\t\"planck;\":                   '\\U0000210F',\n\t\"planckh;\":                  '\\U0000210E',\n\t\"plankv;\":                   '\\U0000210F',\n\t\"plus;\":                     '\\U0000002B',\n\t\"plusacir;\":                 '\\U00002A23',\n\t\"plusb;\":                    '\\U0000229E',\n\t\"pluscir;\":                  '\\U00002A22',\n\t\"plusdo;\":                   '\\U00002214',\n\t\"plusdu;\":                   '\\U00002A25',\n\t\"pluse;\":                    '\\U00002A72',\n\t\"plusmn;\":                   '\\U000000B1',\n\t\"plussim;\":                  '\\U00002A26',\n\t\"plustwo;\":                  '\\U00002A27',\n\t\"pm;\":                       '\\U000000B1',\n\t\"pointint;\":                 '\\U00002A15',\n\t\"popf;\":                     '\\U0001D561',\n\t\"pound;\":                    '\\U000000A3',\n\t\"pr;\":                       '\\U0000227A',\n\t\"prE;\":                      '\\U00002AB3',\n\t\"prap;\":                     '\\U00002AB7',\n\t\"prcue;\":                    '\\U0000227C',\n\t\"pre;\":                      '\\U00002AAF',\n\t\"prec;\":                     '\\U0000227A',\n\t\"precapprox;\":               '\\U00002AB7',\n\t\"preccurlyeq;\":              '\\U0000227C',\n\t\"preceq;\":                   '\\U00002AAF',\n\t\"precnapprox;\":              '\\U00002AB9',\n\t\"precneqq;\":                 '\\U00002AB5',\n\t\"precnsim;\":                 '\\U000022E8',\n\t\"precsim;\":                  '\\U0000227E',\n\t\"prime;\":                    '\\U00002032',\n\t\"primes;\":                   '\\U00002119',\n\t\"prnE;\":                     '\\U00002AB5',\n\t\"prnap;\":                    '\\U00002AB9',\n\t\"prnsim;\":                   '\\U000022E8',\n\t\"prod;\":                     '\\U0000220F',\n\t\"profalar;\":                 '\\U0000232E',\n\t\"profline;\":                 '\\U00002312',\n\t\"profsurf;\":                 '\\U00002313',\n\t\"prop;\":                     '\\U0000221D',\n\t\"propto;\":                   '\\U0000221D',\n\t\"prsim;\":                    '\\U0000227E',\n\t\"prurel;\":                   '\\U000022B0',\n\t\"pscr;\":                     '\\U0001D4C5',\n\t\"psi;\":                      '\\U000003C8',\n\t\"puncsp;\":                   '\\U00002008',\n\t\"qfr;\":                      '\\U0001D52E',\n\t\"qint;\":                     '\\U00002A0C',\n\t\"qopf;\":                     '\\U0001D562',\n\t\"qprime;\":                   '\\U00002057',\n\t\"qscr;\":                     '\\U0001D4C6',\n\t\"quaternions;\":              '\\U0000210D',\n\t\"quatint;\":                  '\\U00002A16',\n\t\"quest;\":                    '\\U0000003F',\n\t\"questeq;\":                  '\\U0000225F',\n\t\"quot;\":                     '\\U00000022',\n\t\"rAarr;\":                    '\\U000021DB',\n\t\"rArr;\":                     '\\U000021D2',\n\t\"rAtail;\":                   '\\U0000291C',\n\t\"rBarr;\":                    '\\U0000290F',\n\t\"rHar;\":                     '\\U00002964',\n\t\"racute;\":                   '\\U00000155',\n\t\"radic;\":                    '\\U0000221A',\n\t\"raemptyv;\":                 '\\U000029B3',\n\t\"rang;\":                     '\\U000027E9',\n\t\"rangd;\":                    '\\U00002992',\n\t\"range;\":                    '\\U000029A5',\n\t\"rangle;\":                   '\\U000027E9',\n\t\"raquo;\":                    '\\U000000BB',\n\t\"rarr;\":                     '\\U00002192',\n\t\"rarrap;\":                   '\\U00002975',\n\t\"rarrb;\":                    '\\U000021E5',\n\t\"rarrbfs;\":                  '\\U00002920',\n\t\"rarrc;\":                    '\\U00002933',\n\t\"rarrfs;\":                   '\\U0000291E',\n\t\"rarrhk;\":                   '\\U000021AA',\n\t\"rarrlp;\":                   '\\U000021AC',\n\t\"rarrpl;\":                   '\\U00002945',\n\t\"rarrsim;\":                  '\\U00002974',\n\t\"rarrtl;\":                   '\\U000021A3',\n\t\"rarrw;\":                    '\\U0000219D',\n\t\"ratail;\":                   '\\U0000291A',\n\t\"ratio;\":                    '\\U00002236',\n\t\"rationals;\":                '\\U0000211A',\n\t\"rbarr;\":                    '\\U0000290D',\n\t\"rbbrk;\":                    '\\U00002773',\n\t\"rbrace;\":                   '\\U0000007D',\n\t\"rbrack;\":                   '\\U0000005D',\n\t\"rbrke;\":                    '\\U0000298C',\n\t\"rbrksld;\":                  '\\U0000298E',\n\t\"rbrkslu;\":                  '\\U00002990',\n\t\"rcaron;\":                   '\\U00000159',\n\t\"rcedil;\":                   '\\U00000157',\n\t\"rceil;\":                    '\\U00002309',\n\t\"rcub;\":                     '\\U0000007D',\n\t\"rcy;\":                      '\\U00000440',\n\t\"rdca;\":                     '\\U00002937',\n\t\"rdldhar;\":                  '\\U00002969',\n\t\"rdquo;\":                    '\\U0000201D',\n\t\"rdquor;\":                   '\\U0000201D',\n\t\"rdsh;\":                     '\\U000021B3',\n\t\"real;\":                     '\\U0000211C',\n\t\"realine;\":                  '\\U0000211B',\n\t\"realpart;\":                 '\\U0000211C',\n\t\"reals;\":                    '\\U0000211D',\n\t\"rect;\":                     '\\U000025AD',\n\t\"reg;\":                      '\\U000000AE',\n\t\"rfisht;\":                   '\\U0000297D',\n\t\"rfloor;\":                   '\\U0000230B',\n\t\"rfr;\":                      '\\U0001D52F',\n\t\"rhard;\":                    '\\U000021C1',\n\t\"rharu;\":                    '\\U000021C0',\n\t\"rharul;\":                   '\\U0000296C',\n\t\"rho;\":                      '\\U000003C1',\n\t\"rhov;\":                     '\\U000003F1',\n\t\"rightarrow;\":               '\\U00002192',\n\t\"rightarrowtail;\":           '\\U000021A3',\n\t\"rightharpoondown;\":         '\\U000021C1',\n\t\"rightharpoonup;\":           '\\U000021C0',\n\t\"rightleftarrows;\":          '\\U000021C4',\n\t\"rightleftharpoons;\":        '\\U000021CC',\n\t\"rightrightarrows;\":         '\\U000021C9',\n\t\"rightsquigarrow;\":          '\\U0000219D',\n\t\"rightthreetimes;\":          '\\U000022CC',\n\t\"ring;\":                     '\\U000002DA',\n\t\"risingdotseq;\":             '\\U00002253',\n\t\"rlarr;\":                    '\\U000021C4',\n\t\"rlhar;\":                    '\\U000021CC',\n\t\"rlm;\":                      '\\U0000200F',\n\t\"rmoust;\":                   '\\U000023B1',\n\t\"rmoustache;\":               '\\U000023B1',\n\t\"rnmid;\":                    '\\U00002AEE',\n\t\"roang;\":                    '\\U000027ED',\n\t\"roarr;\":                    '\\U000021FE',\n\t\"robrk;\":                    '\\U000027E7',\n\t\"ropar;\":                    '\\U00002986',\n\t\"ropf;\":                     '\\U0001D563',\n\t\"roplus;\":                   '\\U00002A2E',\n\t\"rotimes;\":                  '\\U00002A35',\n\t\"rpar;\":                     '\\U00000029',\n\t\"rpargt;\":                   '\\U00002994',\n\t\"rppolint;\":                 '\\U00002A12',\n\t\"rrarr;\":                    '\\U000021C9',\n\t\"rsaquo;\":                   '\\U0000203A',\n\t\"rscr;\":                     '\\U0001D4C7',\n\t\"rsh;\":                      '\\U000021B1',\n\t\"rsqb;\":                     '\\U0000005D',\n\t\"rsquo;\":                    '\\U00002019',\n\t\"rsquor;\":                   '\\U00002019',\n\t\"rthree;\":                   '\\U000022CC',\n\t\"rtimes;\":                   '\\U000022CA',\n\t\"rtri;\":                     '\\U000025B9',\n\t\"rtrie;\":                    '\\U000022B5',\n\t\"rtrif;\":                    '\\U000025B8',\n\t\"rtriltri;\":                 '\\U000029CE',\n\t\"ruluhar;\":                  '\\U00002968',\n\t\"rx;\":                       '\\U0000211E',\n\t\"sacute;\":                   '\\U0000015B',\n\t\"sbquo;\":                    '\\U0000201A',\n\t\"sc;\":                       '\\U0000227B',\n\t\"scE;\":                      '\\U00002AB4',\n\t\"scap;\":                     '\\U00002AB8',\n\t\"scaron;\":                   '\\U00000161',\n\t\"sccue;\":                    '\\U0000227D',\n\t\"sce;\":                      '\\U00002AB0',\n\t\"scedil;\":                   '\\U0000015F',\n\t\"scirc;\":                    '\\U0000015D',\n\t\"scnE;\":                     '\\U00002AB6',\n\t\"scnap;\":                    '\\U00002ABA',\n\t\"scnsim;\":                   '\\U000022E9',\n\t\"scpolint;\":                 '\\U00002A13',\n\t\"scsim;\":                    '\\U0000227F',\n\t\"scy;\":                      '\\U00000441',\n\t\"sdot;\":                     '\\U000022C5',\n\t\"sdotb;\":                    '\\U000022A1',\n\t\"sdote;\":                    '\\U00002A66',\n\t\"seArr;\":                    '\\U000021D8',\n\t\"searhk;\":                   '\\U00002925',\n\t\"searr;\":                    '\\U00002198',\n\t\"searrow;\":                  '\\U00002198',\n\t\"sect;\":                     '\\U000000A7',\n\t\"semi;\":                     '\\U0000003B',\n\t\"seswar;\":                   '\\U00002929',\n\t\"setminus;\":                 '\\U00002216',\n\t\"setmn;\":                    '\\U00002216',\n\t\"sext;\":                     '\\U00002736',\n\t\"sfr;\":                      '\\U0001D530',\n\t\"sfrown;\":                   '\\U00002322',\n\t\"sharp;\":                    '\\U0000266F',\n\t\"shchcy;\":                   '\\U00000449',\n\t\"shcy;\":                     '\\U00000448',\n\t\"shortmid;\":                 '\\U00002223',\n\t\"shortparallel;\":            '\\U00002225',\n\t\"shy;\":                      '\\U000000AD',\n\t\"sigma;\":                    '\\U000003C3',\n\t\"sigmaf;\":                   '\\U000003C2',\n\t\"sigmav;\":                   '\\U000003C2',\n\t\"sim;\":                      '\\U0000223C',\n\t\"simdot;\":                   '\\U00002A6A',\n\t\"sime;\":                     '\\U00002243',\n\t\"simeq;\":                    '\\U00002243',\n\t\"simg;\":                     '\\U00002A9E',\n\t\"simgE;\":                    '\\U00002AA0',\n\t\"siml;\":                     '\\U00002A9D',\n\t\"simlE;\":                    '\\U00002A9F',\n\t\"simne;\":                    '\\U00002246',\n\t\"simplus;\":                  '\\U00002A24',\n\t\"simrarr;\":                  '\\U00002972',\n\t\"slarr;\":                    '\\U00002190',\n\t\"smallsetminus;\":            '\\U00002216',\n\t\"smashp;\":                   '\\U00002A33',\n\t\"smeparsl;\":                 '\\U000029E4',\n\t\"smid;\":                     '\\U00002223',\n\t\"smile;\":                    '\\U00002323',\n\t\"smt;\":                      '\\U00002AAA',\n\t\"smte;\":                     '\\U00002AAC',\n\t\"softcy;\":                   '\\U0000044C',\n\t\"sol;\":                      '\\U0000002F',\n\t\"solb;\":                     '\\U000029C4',\n\t\"solbar;\":                   '\\U0000233F',\n\t\"sopf;\":                     '\\U0001D564',\n\t\"spades;\":                   '\\U00002660',\n\t\"spadesuit;\":                '\\U00002660',\n\t\"spar;\":                     '\\U00002225',\n\t\"sqcap;\":                    '\\U00002293',\n\t\"sqcup;\":                    '\\U00002294',\n\t\"sqsub;\":                    '\\U0000228F',\n\t\"sqsube;\":                   '\\U00002291',\n\t\"sqsubset;\":                 '\\U0000228F',\n\t\"sqsubseteq;\":               '\\U00002291',\n\t\"sqsup;\":                    '\\U00002290',\n\t\"sqsupe;\":                   '\\U00002292',\n\t\"sqsupset;\":                 '\\U00002290',\n\t\"sqsupseteq;\":               '\\U00002292',\n\t\"squ;\":                      '\\U000025A1',\n\t\"square;\":                   '\\U000025A1',\n\t\"squarf;\":                   '\\U000025AA',\n\t\"squf;\":                     '\\U000025AA',\n\t\"srarr;\":                    '\\U00002192',\n\t\"sscr;\":                     '\\U0001D4C8',\n\t\"ssetmn;\":                   '\\U00002216',\n\t\"ssmile;\":                   '\\U00002323',\n\t\"sstarf;\":                   '\\U000022C6',\n\t\"star;\":                     '\\U00002606',\n\t\"starf;\":                    '\\U00002605',\n\t\"straightepsilon;\":          '\\U000003F5',\n\t\"straightphi;\":              '\\U000003D5',\n\t\"strns;\":                    '\\U000000AF',\n\t\"sub;\":                      '\\U00002282',\n\t\"subE;\":                     '\\U00002AC5',\n\t\"subdot;\":                   '\\U00002ABD',\n\t\"sube;\":                     '\\U00002286',\n\t\"subedot;\":                  '\\U00002AC3',\n\t\"submult;\":                  '\\U00002AC1',\n\t\"subnE;\":                    '\\U00002ACB',\n\t\"subne;\":                    '\\U0000228A',\n\t\"subplus;\":                  '\\U00002ABF',\n\t\"subrarr;\":                  '\\U00002979',\n\t\"subset;\":                   '\\U00002282',\n\t\"subseteq;\":                 '\\U00002286',\n\t\"subseteqq;\":                '\\U00002AC5',\n\t\"subsetneq;\":                '\\U0000228A',\n\t\"subsetneqq;\":               '\\U00002ACB',\n\t\"subsim;\":                   '\\U00002AC7',\n\t\"subsub;\":                   '\\U00002AD5',\n\t\"subsup;\":                   '\\U00002AD3',\n\t\"succ;\":                     '\\U0000227B',\n\t\"succapprox;\":               '\\U00002AB8',\n\t\"succcurlyeq;\":              '\\U0000227D',\n\t\"succeq;\":                   '\\U00002AB0',\n\t\"succnapprox;\":              '\\U00002ABA',\n\t\"succneqq;\":                 '\\U00002AB6',\n\t\"succnsim;\":                 '\\U000022E9',\n\t\"succsim;\":                  '\\U0000227F',\n\t\"sum;\":                      '\\U00002211',\n\t\"sung;\":                     '\\U0000266A',\n\t\"sup;\":                      '\\U00002283',\n\t\"sup1;\":                     '\\U000000B9',\n\t\"sup2;\":                     '\\U000000B2',\n\t\"sup3;\":                     '\\U000000B3',\n\t\"supE;\":                     '\\U00002AC6',\n\t\"supdot;\":                   '\\U00002ABE',\n\t\"supdsub;\":                  '\\U00002AD8',\n\t\"supe;\":                     '\\U00002287',\n\t\"supedot;\":                  '\\U00002AC4',\n\t\"suphsol;\":                  '\\U000027C9',\n\t\"suphsub;\":                  '\\U00002AD7',\n\t\"suplarr;\":                  '\\U0000297B',\n\t\"supmult;\":                  '\\U00002AC2',\n\t\"supnE;\":                    '\\U00002ACC',\n\t\"supne;\":                    '\\U0000228B',\n\t\"supplus;\":                  '\\U00002AC0',\n\t\"supset;\":                   '\\U00002283',\n\t\"supseteq;\":                 '\\U00002287',\n\t\"supseteqq;\":                '\\U00002AC6',\n\t\"supsetneq;\":                '\\U0000228B',\n\t\"supsetneqq;\":               '\\U00002ACC',\n\t\"supsim;\":                   '\\U00002AC8',\n\t\"supsub;\":                   '\\U00002AD4',\n\t\"supsup;\":                   '\\U00002AD6',\n\t\"swArr;\":                    '\\U000021D9',\n\t\"swarhk;\":                   '\\U00002926',\n\t\"swarr;\":                    '\\U00002199',\n\t\"swarrow;\":                  '\\U00002199',\n\t\"swnwar;\":                   '\\U0000292A',\n\t\"szlig;\":                    '\\U000000DF',\n\t\"target;\":                   '\\U00002316',\n\t\"tau;\":                      '\\U000003C4',\n\t\"tbrk;\":                     '\\U000023B4',\n\t\"tcaron;\":                   '\\U00000165',\n\t\"tcedil;\":                   '\\U00000163',\n\t\"tcy;\":                      '\\U00000442',\n\t\"tdot;\":                     '\\U000020DB',\n\t\"telrec;\":                   '\\U00002315',\n\t\"tfr;\":                      '\\U0001D531',\n\t\"there4;\":                   '\\U00002234',\n\t\"therefore;\":                '\\U00002234',\n\t\"theta;\":                    '\\U000003B8',\n\t\"thetasym;\":                 '\\U000003D1',\n\t\"thetav;\":                   '\\U000003D1',\n\t\"thickapprox;\":              '\\U00002248',\n\t\"thicksim;\":                 '\\U0000223C',\n\t\"thinsp;\":                   '\\U00002009',\n\t\"thkap;\":                    '\\U00002248',\n\t\"thksim;\":                   '\\U0000223C',\n\t\"thorn;\":                    '\\U000000FE',\n\t\"tilde;\":                    '\\U000002DC',\n\t\"times;\":                    '\\U000000D7',\n\t\"timesb;\":                   '\\U000022A0',\n\t\"timesbar;\":                 '\\U00002A31',\n\t\"timesd;\":                   '\\U00002A30',\n\t\"tint;\":                     '\\U0000222D',\n\t\"toea;\":                     '\\U00002928',\n\t\"top;\":                      '\\U000022A4',\n\t\"topbot;\":                   '\\U00002336',\n\t\"topcir;\":                   '\\U00002AF1',\n\t\"topf;\":                     '\\U0001D565',\n\t\"topfork;\":                  '\\U00002ADA',\n\t\"tosa;\":                     '\\U00002929',\n\t\"tprime;\":                   '\\U00002034',\n\t\"trade;\":                    '\\U00002122',\n\t\"triangle;\":                 '\\U000025B5',\n\t\"triangledown;\":             '\\U000025BF',\n\t\"triangleleft;\":             '\\U000025C3',\n\t\"trianglelefteq;\":           '\\U000022B4',\n\t\"triangleq;\":                '\\U0000225C',\n\t\"triangleright;\":            '\\U000025B9',\n\t\"trianglerighteq;\":          '\\U000022B5',\n\t\"tridot;\":                   '\\U000025EC',\n\t\"trie;\":                     '\\U0000225C',\n\t\"triminus;\":                 '\\U00002A3A',\n\t\"triplus;\":                  '\\U00002A39',\n\t\"trisb;\":                    '\\U000029CD',\n\t\"tritime;\":                  '\\U00002A3B',\n\t\"trpezium;\":                 '\\U000023E2',\n\t\"tscr;\":                     '\\U0001D4C9',\n\t\"tscy;\":                     '\\U00000446',\n\t\"tshcy;\":                    '\\U0000045B',\n\t\"tstrok;\":                   '\\U00000167',\n\t\"twixt;\":                    '\\U0000226C',\n\t\"twoheadleftarrow;\":         '\\U0000219E',\n\t\"twoheadrightarrow;\":        '\\U000021A0',\n\t\"uArr;\":                     '\\U000021D1',\n\t\"uHar;\":                     '\\U00002963',\n\t\"uacute;\":                   '\\U000000FA',\n\t\"uarr;\":                     '\\U00002191',\n\t\"ubrcy;\":                    '\\U0000045E',\n\t\"ubreve;\":                   '\\U0000016D',\n\t\"ucirc;\":                    '\\U000000FB',\n\t\"ucy;\":                      '\\U00000443',\n\t\"udarr;\":                    '\\U000021C5',\n\t\"udblac;\":                   '\\U00000171',\n\t\"udhar;\":                    '\\U0000296E',\n\t\"ufisht;\":                   '\\U0000297E',\n\t\"ufr;\":                      '\\U0001D532',\n\t\"ugrave;\":                   '\\U000000F9',\n\t\"uharl;\":                    '\\U000021BF',\n\t\"uharr;\":                    '\\U000021BE',\n\t\"uhblk;\":                    '\\U00002580',\n\t\"ulcorn;\":                   '\\U0000231C',\n\t\"ulcorner;\":                 '\\U0000231C',\n\t\"ulcrop;\":                   '\\U0000230F',\n\t\"ultri;\":                    '\\U000025F8',\n\t\"umacr;\":                    '\\U0000016B',\n\t\"uml;\":                      '\\U000000A8',\n\t\"uogon;\":                    '\\U00000173',\n\t\"uopf;\":                     '\\U0001D566',\n\t\"uparrow;\":                  '\\U00002191',\n\t\"updownarrow;\":              '\\U00002195',\n\t\"upharpoonleft;\":            '\\U000021BF',\n\t\"upharpoonright;\":           '\\U000021BE',\n\t\"uplus;\":                    '\\U0000228E',\n\t\"upsi;\":                     '\\U000003C5',\n\t\"upsih;\":                    '\\U000003D2',\n\t\"upsilon;\":                  '\\U000003C5',\n\t\"upuparrows;\":               '\\U000021C8',\n\t\"urcorn;\":                   '\\U0000231D',\n\t\"urcorner;\":                 '\\U0000231D',\n\t\"urcrop;\":                   '\\U0000230E',\n\t\"uring;\":                    '\\U0000016F',\n\t\"urtri;\":                    '\\U000025F9',\n\t\"uscr;\":                     '\\U0001D4CA',\n\t\"utdot;\":                    '\\U000022F0',\n\t\"utilde;\":                   '\\U00000169',\n\t\"utri;\":                     '\\U000025B5',\n\t\"utrif;\":                    '\\U000025B4',\n\t\"uuarr;\":                    '\\U000021C8',\n\t\"uuml;\":                     '\\U000000FC',\n\t\"uwangle;\":                  '\\U000029A7',\n\t\"vArr;\":                     '\\U000021D5',\n\t\"vBar;\":                     '\\U00002AE8',\n\t\"vBarv;\":                    '\\U00002AE9',\n\t\"vDash;\":                    '\\U000022A8',\n\t\"vangrt;\":                   '\\U0000299C',\n\t\"varepsilon;\":               '\\U000003F5',\n\t\"varkappa;\":                 '\\U000003F0',\n\t\"varnothing;\":               '\\U00002205',\n\t\"varphi;\":                   '\\U000003D5',\n\t\"varpi;\":                    '\\U000003D6',\n\t\"varpropto;\":                '\\U0000221D',\n\t\"varr;\":                     '\\U00002195',\n\t\"varrho;\":                   '\\U000003F1',\n\t\"varsigma;\":                 '\\U000003C2',\n\t\"vartheta;\":                 '\\U000003D1',\n\t\"vartriangleleft;\":          '\\U000022B2',\n\t\"vartriangleright;\":         '\\U000022B3',\n\t\"vcy;\":                      '\\U00000432',\n\t\"vdash;\":                    '\\U000022A2',\n\t\"vee;\":                      '\\U00002228',\n\t\"veebar;\":                   '\\U000022BB',\n\t\"veeeq;\":                    '\\U0000225A',\n\t\"vellip;\":                   '\\U000022EE',\n\t\"verbar;\":                   '\\U0000007C',\n\t\"vert;\":                     '\\U0000007C',\n\t\"vfr;\":                      '\\U0001D533',\n\t\"vltri;\":                    '\\U000022B2',\n\t\"vopf;\":                     '\\U0001D567',\n\t\"vprop;\":                    '\\U0000221D',\n\t\"vrtri;\":                    '\\U000022B3',\n\t\"vscr;\":                     '\\U0001D4CB',\n\t\"vzigzag;\":                  '\\U0000299A',\n\t\"wcirc;\":                    '\\U00000175',\n\t\"wedbar;\":                   '\\U00002A5F',\n\t\"wedge;\":                    '\\U00002227',\n\t\"wedgeq;\":                   '\\U00002259',\n\t\"weierp;\":                   '\\U00002118',\n\t\"wfr;\":                      '\\U0001D534',\n\t\"wopf;\":                     '\\U0001D568',\n\t\"wp;\":                       '\\U00002118',\n\t\"wr;\":                       '\\U00002240',\n\t\"wreath;\":                   '\\U00002240',\n\t\"wscr;\":                     '\\U0001D4CC',\n\t\"xcap;\":                     '\\U000022C2',\n\t\"xcirc;\":                    '\\U000025EF',\n\t\"xcup;\":                     '\\U000022C3',\n\t\"xdtri;\":                    '\\U000025BD',\n\t\"xfr;\":                      '\\U0001D535',\n\t\"xhArr;\":                    '\\U000027FA',\n\t\"xharr;\":                    '\\U000027F7',\n\t\"xi;\":                       '\\U000003BE',\n\t\"xlArr;\":                    '\\U000027F8',\n\t\"xlarr;\":                    '\\U000027F5',\n\t\"xmap;\":                     '\\U000027FC',\n\t\"xnis;\":                     '\\U000022FB',\n\t\"xodot;\":                    '\\U00002A00',\n\t\"xopf;\":                     '\\U0001D569',\n\t\"xoplus;\":                   '\\U00002A01',\n\t\"xotime;\":                   '\\U00002A02',\n\t\"xrArr;\":                    '\\U000027F9',\n\t\"xrarr;\":                    '\\U000027F6',\n\t\"xscr;\":                     '\\U0001D4CD',\n\t\"xsqcup;\":                   '\\U00002A06',\n\t\"xuplus;\":                   '\\U00002A04',\n\t\"xutri;\":                    '\\U000025B3',\n\t\"xvee;\":                     '\\U000022C1',\n\t\"xwedge;\":                   '\\U000022C0',\n\t\"yacute;\":                   '\\U000000FD',\n\t\"yacy;\":                     '\\U0000044F',\n\t\"ycirc;\":                    '\\U00000177',\n\t\"ycy;\":                      '\\U0000044B',\n\t\"yen;\":                      '\\U000000A5',\n\t\"yfr;\":                      '\\U0001D536',\n\t\"yicy;\":                     '\\U00000457',\n\t\"yopf;\":                     '\\U0001D56A',\n\t\"yscr;\":                     '\\U0001D4CE',\n\t\"yucy;\":                     '\\U0000044E',\n\t\"yuml;\":                     '\\U000000FF',\n\t\"zacute;\":                   '\\U0000017A',\n\t\"zcaron;\":                   '\\U0000017E',\n\t\"zcy;\":                      '\\U00000437',\n\t\"zdot;\":                     '\\U0000017C',\n\t\"zeetrf;\":                   '\\U00002128',\n\t\"zeta;\":                     '\\U000003B6',\n\t\"zfr;\":                      '\\U0001D537',\n\t\"zhcy;\":                     '\\U00000436',\n\t\"zigrarr;\":                  '\\U000021DD',\n\t\"zopf;\":                     '\\U0001D56B',\n\t\"zscr;\":                     '\\U0001D4CF',\n\t\"zwj;\":                      '\\U0000200D',\n\t\"zwnj;\":                     '\\U0000200C',\n\t\"AElig\":                     '\\U000000C6',\n\t\"AMP\":                       '\\U00000026',\n\t\"Aacute\":                    '\\U000000C1',\n\t\"Acirc\":                     '\\U000000C2',\n\t\"Agrave\":                    '\\U000000C0',\n\t\"Aring\":                     '\\U000000C5',\n\t\"Atilde\":                    '\\U000000C3',\n\t\"Auml\":                      '\\U000000C4',\n\t\"COPY\":                      '\\U000000A9',\n\t\"Ccedil\":                    '\\U000000C7',\n\t\"ETH\":                       '\\U000000D0',\n\t\"Eacute\":                    '\\U000000C9',\n\t\"Ecirc\":                     '\\U000000CA',\n\t\"Egrave\":                    '\\U000000C8',\n\t\"Euml\":                      '\\U000000CB',\n\t\"GT\":                        '\\U0000003E',\n\t\"Iacute\":                    '\\U000000CD',\n\t\"Icirc\":                     '\\U000000CE',\n\t\"Igrave\":                    '\\U000000CC',\n\t\"Iuml\":                      '\\U000000CF',\n\t\"LT\":                        '\\U0000003C',\n\t\"Ntilde\":                    '\\U000000D1',\n\t\"Oacute\":                    '\\U000000D3',\n\t\"Ocirc\":                     '\\U000000D4',\n\t\"Ograve\":                    '\\U000000D2',\n\t\"Oslash\":                    '\\U000000D8',\n\t\"Otilde\":                    '\\U000000D5',\n\t\"Ouml\":                      '\\U000000D6',\n\t\"QUOT\":                      '\\U00000022',\n\t\"REG\":                       '\\U000000AE',\n\t\"THORN\":                     '\\U000000DE',\n\t\"Uacute\":                    '\\U000000DA',\n\t\"Ucirc\":                     '\\U000000DB',\n\t\"Ugrave\":                    '\\U000000D9',\n\t\"Uuml\":                      '\\U000000DC',\n\t\"Yacute\":                    '\\U000000DD',\n\t\"aacute\":                    '\\U000000E1',\n\t\"acirc\":                     '\\U000000E2',\n\t\"acute\":                     '\\U000000B4',\n\t\"aelig\":                     '\\U000000E6',\n\t\"agrave\":                    '\\U000000E0',\n\t\"amp\":                       '\\U00000026',\n\t\"aring\":                     '\\U000000E5',\n\t\"atilde\":                    '\\U000000E3',\n\t\"auml\":                      '\\U000000E4',\n\t\"brvbar\":                    '\\U000000A6',\n\t\"ccedil\":                    '\\U000000E7',\n\t\"cedil\":                     '\\U000000B8',\n\t\"cent\":                      '\\U000000A2',\n\t\"copy\":                      '\\U000000A9',\n\t\"curren\":                    '\\U000000A4',\n\t\"deg\":                       '\\U000000B0',\n\t\"divide\":                    '\\U000000F7',\n\t\"eacute\":                    '\\U000000E9',\n\t\"ecirc\":                     '\\U000000EA',\n\t\"egrave\":                    '\\U000000E8',\n\t\"eth\":                       '\\U000000F0',\n\t\"euml\":                      '\\U000000EB',\n\t\"frac12\":                    '\\U000000BD',\n\t\"frac14\":                    '\\U000000BC',\n\t\"frac34\":                    '\\U000000BE',\n\t\"gt\":                        '\\U0000003E',\n\t\"iacute\":                    '\\U000000ED',\n\t\"icirc\":                     '\\U000000EE',\n\t\"iexcl\":                     '\\U000000A1',\n\t\"igrave\":                    '\\U000000EC',\n\t\"iquest\":                    '\\U000000BF',\n\t\"iuml\":                      '\\U000000EF',\n\t\"laquo\":                     '\\U000000AB',\n\t\"lt\":                        '\\U0000003C',\n\t\"macr\":                      '\\U000000AF',\n\t\"micro\":                     '\\U000000B5',\n\t\"middot\":                    '\\U000000B7',\n\t\"nbsp\":                      '\\U000000A0',\n\t\"not\":                       '\\U000000AC',\n\t\"ntilde\":                    '\\U000000F1',\n\t\"oacute\":                    '\\U000000F3',\n\t\"ocirc\":                     '\\U000000F4',\n\t\"ograve\":                    '\\U000000F2',\n\t\"ordf\":                      '\\U000000AA',\n\t\"ordm\":                      '\\U000000BA',\n\t\"oslash\":                    '\\U000000F8',\n\t\"otilde\":                    '\\U000000F5',\n\t\"ouml\":                      '\\U000000F6',\n\t\"para\":                      '\\U000000B6',\n\t\"plusmn\":                    '\\U000000B1',\n\t\"pound\":                     '\\U000000A3',\n\t\"quot\":                      '\\U00000022',\n\t\"raquo\":                     '\\U000000BB',\n\t\"reg\":                       '\\U000000AE',\n\t\"sect\":                      '\\U000000A7',\n\t\"shy\":                       '\\U000000AD',\n\t\"sup1\":                      '\\U000000B9',\n\t\"sup2\":                      '\\U000000B2',\n\t\"sup3\":                      '\\U000000B3',\n\t\"szlig\":                     '\\U000000DF',\n\t\"thorn\":                     '\\U000000FE',\n\t\"times\":                     '\\U000000D7',\n\t\"uacute\":                    '\\U000000FA',\n\t\"ucirc\":                     '\\U000000FB',\n\t\"ugrave\":                    '\\U000000F9',\n\t\"uml\":                       '\\U000000A8',\n\t\"uuml\":                      '\\U000000FC',\n\t\"yacute\":                    '\\U000000FD',\n\t\"yen\":                       '\\U000000A5',\n\t\"yuml\":                      '\\U000000FF',\n}\n\n// HTML entities that are two unicode codepoints.\nvar entity2 = map[string][2]rune{\n\t// TODO(nigeltao): Handle replacements that are wider than their names.\n\t// \"nLt;\":                     {'\\u226A', '\\u20D2'},\n\t// \"nGt;\":                     {'\\u226B', '\\u20D2'},\n\t\"NotEqualTilde;\":           {'\\u2242', '\\u0338'},\n\t\"NotGreaterFullEqual;\":     {'\\u2267', '\\u0338'},\n\t\"NotGreaterGreater;\":       {'\\u226B', '\\u0338'},\n\t\"NotGreaterSlantEqual;\":    {'\\u2A7E', '\\u0338'},\n\t\"NotHumpDownHump;\":         {'\\u224E', '\\u0338'},\n\t\"NotHumpEqual;\":            {'\\u224F', '\\u0338'},\n\t\"NotLeftTriangleBar;\":      {'\\u29CF', '\\u0338'},\n\t\"NotLessLess;\":             {'\\u226A', '\\u0338'},\n\t\"NotLessSlantEqual;\":       {'\\u2A7D', '\\u0338'},\n\t\"NotNestedGreaterGreater;\": {'\\u2AA2', '\\u0338'},\n\t\"NotNestedLessLess;\":       {'\\u2AA1', '\\u0338'},\n\t\"NotPrecedesEqual;\":        {'\\u2AAF', '\\u0338'},\n\t\"NotRightTriangleBar;\":     {'\\u29D0', '\\u0338'},\n\t\"NotSquareSubset;\":         {'\\u228F', '\\u0338'},\n\t\"NotSquareSuperset;\":       {'\\u2290', '\\u0338'},\n\t\"NotSubset;\":               {'\\u2282', '\\u20D2'},\n\t\"NotSucceedsEqual;\":        {'\\u2AB0', '\\u0338'},\n\t\"NotSucceedsTilde;\":        {'\\u227F', '\\u0338'},\n\t\"NotSuperset;\":             {'\\u2283', '\\u20D2'},\n\t\"ThickSpace;\":              {'\\u205F', '\\u200A'},\n\t\"acE;\":                     {'\\u223E', '\\u0333'},\n\t\"bne;\":                     {'\\u003D', '\\u20E5'},\n\t\"bnequiv;\":                 {'\\u2261', '\\u20E5'},\n\t\"caps;\":                    {'\\u2229', '\\uFE00'},\n\t\"cups;\":                    {'\\u222A', '\\uFE00'},\n\t\"fjlig;\":                   {'\\u0066', '\\u006A'},\n\t\"gesl;\":                    {'\\u22DB', '\\uFE00'},\n\t\"gvertneqq;\":               {'\\u2269', '\\uFE00'},\n\t\"gvnE;\":                    {'\\u2269', '\\uFE00'},\n\t\"lates;\":                   {'\\u2AAD', '\\uFE00'},\n\t\"lesg;\":                    {'\\u22DA', '\\uFE00'},\n\t\"lvertneqq;\":               {'\\u2268', '\\uFE00'},\n\t\"lvnE;\":                    {'\\u2268', '\\uFE00'},\n\t\"nGg;\":                     {'\\u22D9', '\\u0338'},\n\t\"nGtv;\":                    {'\\u226B', '\\u0338'},\n\t\"nLl;\":                     {'\\u22D8', '\\u0338'},\n\t\"nLtv;\":                    {'\\u226A', '\\u0338'},\n\t\"nang;\":                    {'\\u2220', '\\u20D2'},\n\t\"napE;\":                    {'\\u2A70', '\\u0338'},\n\t\"napid;\":                   {'\\u224B', '\\u0338'},\n\t\"nbump;\":                   {'\\u224E', '\\u0338'},\n\t\"nbumpe;\":                  {'\\u224F', '\\u0338'},\n\t\"ncongdot;\":                {'\\u2A6D', '\\u0338'},\n\t\"nedot;\":                   {'\\u2250', '\\u0338'},\n\t\"nesim;\":                   {'\\u2242', '\\u0338'},\n\t\"ngE;\":                     {'\\u2267', '\\u0338'},\n\t\"ngeqq;\":                   {'\\u2267', '\\u0338'},\n\t\"ngeqslant;\":               {'\\u2A7E', '\\u0338'},\n\t\"nges;\":                    {'\\u2A7E', '\\u0338'},\n\t\"nlE;\":                     {'\\u2266', '\\u0338'},\n\t\"nleqq;\":                   {'\\u2266', '\\u0338'},\n\t\"nleqslant;\":               {'\\u2A7D', '\\u0338'},\n\t\"nles;\":                    {'\\u2A7D', '\\u0338'},\n\t\"notinE;\":                  {'\\u22F9', '\\u0338'},\n\t\"notindot;\":                {'\\u22F5', '\\u0338'},\n\t\"nparsl;\":                  {'\\u2AFD', '\\u20E5'},\n\t\"npart;\":                   {'\\u2202', '\\u0338'},\n\t\"npre;\":                    {'\\u2AAF', '\\u0338'},\n\t\"npreceq;\":                 {'\\u2AAF', '\\u0338'},\n\t\"nrarrc;\":                  {'\\u2933', '\\u0338'},\n\t\"nrarrw;\":                  {'\\u219D', '\\u0338'},\n\t\"nsce;\":                    {'\\u2AB0', '\\u0338'},\n\t\"nsubE;\":                   {'\\u2AC5', '\\u0338'},\n\t\"nsubset;\":                 {'\\u2282', '\\u20D2'},\n\t\"nsubseteqq;\":              {'\\u2AC5', '\\u0338'},\n\t\"nsucceq;\":                 {'\\u2AB0', '\\u0338'},\n\t\"nsupE;\":                   {'\\u2AC6', '\\u0338'},\n\t\"nsupset;\":                 {'\\u2283', '\\u20D2'},\n\t\"nsupseteqq;\":              {'\\u2AC6', '\\u0338'},\n\t\"nvap;\":                    {'\\u224D', '\\u20D2'},\n\t\"nvge;\":                    {'\\u2265', '\\u20D2'},\n\t\"nvgt;\":                    {'\\u003E', '\\u20D2'},\n\t\"nvle;\":                    {'\\u2264', '\\u20D2'},\n\t\"nvlt;\":                    {'\\u003C', '\\u20D2'},\n\t\"nvltrie;\":                 {'\\u22B4', '\\u20D2'},\n\t\"nvrtrie;\":                 {'\\u22B5', '\\u20D2'},\n\t\"nvsim;\":                   {'\\u223C', '\\u20D2'},\n\t\"race;\":                    {'\\u223D', '\\u0331'},\n\t\"smtes;\":                   {'\\u2AAC', '\\uFE00'},\n\t\"sqcaps;\":                  {'\\u2293', '\\uFE00'},\n\t\"sqcups;\":                  {'\\u2294', '\\uFE00'},\n\t\"varsubsetneq;\":            {'\\u228A', '\\uFE00'},\n\t\"varsubsetneqq;\":           {'\\u2ACB', '\\uFE00'},\n\t\"varsupsetneq;\":            {'\\u228B', '\\uFE00'},\n\t\"varsupsetneqq;\":           {'\\u2ACC', '\\uFE00'},\n\t\"vnsub;\":                   {'\\u2282', '\\u20D2'},\n\t\"vnsup;\":                   {'\\u2283', '\\u20D2'},\n\t\"vsubnE;\":                  {'\\u2ACB', '\\uFE00'},\n\t\"vsubne;\":                  {'\\u228A', '\\uFE00'},\n\t\"vsupnE;\":                  {'\\u2ACC', '\\uFE00'},\n\t\"vsupne;\":                  {'\\u228B', '\\uFE00'},\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/entity_test.go",
    "content": "// Copyright 2010 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage html\n\nimport (\n\t\"testing\"\n\t\"unicode/utf8\"\n)\n\nfunc TestEntityLength(t *testing.T) {\n\t// We verify that the length of UTF-8 encoding of each value is <= 1 + len(key).\n\t// The +1 comes from the leading \"&\". This property implies that the length of\n\t// unescaped text is <= the length of escaped text.\n\tfor k, v := range entity {\n\t\tif 1+len(k) < utf8.RuneLen(v) {\n\t\t\tt.Error(\"escaped entity &\" + k + \" is shorter than its UTF-8 encoding \" + string(v))\n\t\t}\n\t\tif len(k) > longestEntityWithoutSemicolon && k[len(k)-1] != ';' {\n\t\t\tt.Errorf(\"entity name %s is %d characters, but longestEntityWithoutSemicolon=%d\", k, len(k), longestEntityWithoutSemicolon)\n\t\t}\n\t}\n\tfor k, v := range entity2 {\n\t\tif 1+len(k) < utf8.RuneLen(v[0])+utf8.RuneLen(v[1]) {\n\t\t\tt.Error(\"escaped entity &\" + k + \" is shorter than its UTF-8 encoding \" + string(v[0]) + string(v[1]))\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/escape.go",
    "content": "// Copyright 2010 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage html\n\nimport (\n\t\"bytes\"\n\t\"strings\"\n\t\"unicode/utf8\"\n)\n\n// These replacements permit compatibility with old numeric entities that\n// assumed Windows-1252 encoding.\n// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference\nvar replacementTable = [...]rune{\n\t'\\u20AC', // First entry is what 0x80 should be replaced with.\n\t'\\u0081',\n\t'\\u201A',\n\t'\\u0192',\n\t'\\u201E',\n\t'\\u2026',\n\t'\\u2020',\n\t'\\u2021',\n\t'\\u02C6',\n\t'\\u2030',\n\t'\\u0160',\n\t'\\u2039',\n\t'\\u0152',\n\t'\\u008D',\n\t'\\u017D',\n\t'\\u008F',\n\t'\\u0090',\n\t'\\u2018',\n\t'\\u2019',\n\t'\\u201C',\n\t'\\u201D',\n\t'\\u2022',\n\t'\\u2013',\n\t'\\u2014',\n\t'\\u02DC',\n\t'\\u2122',\n\t'\\u0161',\n\t'\\u203A',\n\t'\\u0153',\n\t'\\u009D',\n\t'\\u017E',\n\t'\\u0178', // Last entry is 0x9F.\n\t// 0x00->'\\uFFFD' is handled programmatically.\n\t// 0x0D->'\\u000D' is a no-op.\n}\n\n// unescapeEntity reads an entity like \"&lt;\" from b[src:] and writes the\n// corresponding \"<\" to b[dst:], returning the incremented dst and src cursors.\n// Precondition: b[src] == '&' && dst <= src.\n// attribute should be true if parsing an attribute value.\nfunc unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) {\n\t// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference\n\n\t// i starts at 1 because we already know that s[0] == '&'.\n\ti, s := 1, b[src:]\n\n\tif len(s) <= 1 {\n\t\tb[dst] = b[src]\n\t\treturn dst + 1, src + 1\n\t}\n\n\tif s[i] == '#' {\n\t\tif len(s) <= 3 { // We need to have at least \"&#.\".\n\t\t\tb[dst] = b[src]\n\t\t\treturn dst + 1, src + 1\n\t\t}\n\t\ti++\n\t\tc := s[i]\n\t\thex := false\n\t\tif c == 'x' || c == 'X' {\n\t\t\thex = true\n\t\t\ti++\n\t\t}\n\n\t\tx := '\\x00'\n\t\tfor i < len(s) {\n\t\t\tc = s[i]\n\t\t\ti++\n\t\t\tif hex {\n\t\t\t\tif '0' <= c && c <= '9' {\n\t\t\t\t\tx = 16*x + rune(c) - '0'\n\t\t\t\t\tcontinue\n\t\t\t\t} else if 'a' <= c && c <= 'f' {\n\t\t\t\t\tx = 16*x + rune(c) - 'a' + 10\n\t\t\t\t\tcontinue\n\t\t\t\t} else if 'A' <= c && c <= 'F' {\n\t\t\t\t\tx = 16*x + rune(c) - 'A' + 10\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t} else if '0' <= c && c <= '9' {\n\t\t\t\tx = 10*x + rune(c) - '0'\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif c != ';' {\n\t\t\t\ti--\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\tif i <= 3 { // No characters matched.\n\t\t\tb[dst] = b[src]\n\t\t\treturn dst + 1, src + 1\n\t\t}\n\n\t\tif 0x80 <= x && x <= 0x9F {\n\t\t\t// Replace characters from Windows-1252 with UTF-8 equivalents.\n\t\t\tx = replacementTable[x-0x80]\n\t\t} else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF {\n\t\t\t// Replace invalid characters with the replacement character.\n\t\t\tx = '\\uFFFD'\n\t\t}\n\n\t\treturn dst + utf8.EncodeRune(b[dst:], x), src + i\n\t}\n\n\t// Consume the maximum number of characters possible, with the\n\t// consumed characters matching one of the named references.\n\n\tfor i < len(s) {\n\t\tc := s[i]\n\t\ti++\n\t\t// Lower-cased characters are more common in entities, so we check for them first.\n\t\tif 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {\n\t\t\tcontinue\n\t\t}\n\t\tif c != ';' {\n\t\t\ti--\n\t\t}\n\t\tbreak\n\t}\n\n\tentityName := string(s[1:i])\n\tif entityName == \"\" {\n\t\t// No-op.\n\t} else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' {\n\t\t// No-op.\n\t} else if x := entity[entityName]; x != 0 {\n\t\treturn dst + utf8.EncodeRune(b[dst:], x), src + i\n\t} else if x := entity2[entityName]; x[0] != 0 {\n\t\tdst1 := dst + utf8.EncodeRune(b[dst:], x[0])\n\t\treturn dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i\n\t} else if !attribute {\n\t\tmaxLen := len(entityName) - 1\n\t\tif maxLen > longestEntityWithoutSemicolon {\n\t\t\tmaxLen = longestEntityWithoutSemicolon\n\t\t}\n\t\tfor j := maxLen; j > 1; j-- {\n\t\t\tif x := entity[entityName[:j]]; x != 0 {\n\t\t\t\treturn dst + utf8.EncodeRune(b[dst:], x), src + j + 1\n\t\t\t}\n\t\t}\n\t}\n\n\tdst1, src1 = dst+i, src+i\n\tcopy(b[dst:dst1], b[src:src1])\n\treturn dst1, src1\n}\n\n// unescape unescapes b's entities in-place, so that \"a&lt;b\" becomes \"a<b\".\n// attribute should be true if parsing an attribute value.\nfunc unescape(b []byte, attribute bool) []byte {\n\tfor i, c := range b {\n\t\tif c == '&' {\n\t\t\tdst, src := unescapeEntity(b, i, i, attribute)\n\t\t\tfor src < len(b) {\n\t\t\t\tc := b[src]\n\t\t\t\tif c == '&' {\n\t\t\t\t\tdst, src = unescapeEntity(b, dst, src, attribute)\n\t\t\t\t} else {\n\t\t\t\t\tb[dst] = c\n\t\t\t\t\tdst, src = dst+1, src+1\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn b[0:dst]\n\t\t}\n\t}\n\treturn b\n}\n\n// lower lower-cases the A-Z bytes in b in-place, so that \"aBc\" becomes \"abc\".\nfunc lower(b []byte) []byte {\n\tfor i, c := range b {\n\t\tif 'A' <= c && c <= 'Z' {\n\t\t\tb[i] = c + 'a' - 'A'\n\t\t}\n\t}\n\treturn b\n}\n\nconst escapedChars = \"&'<>\\\"\\r\"\n\nfunc escape(w writer, s string) error {\n\ti := strings.IndexAny(s, escapedChars)\n\tfor i != -1 {\n\t\tif _, err := w.WriteString(s[:i]); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar esc string\n\t\tswitch s[i] {\n\t\tcase '&':\n\t\t\tesc = \"&amp;\"\n\t\tcase '\\'':\n\t\t\t// \"&#39;\" is shorter than \"&apos;\" and apos was not in HTML until HTML5.\n\t\t\tesc = \"&#39;\"\n\t\tcase '<':\n\t\t\tesc = \"&lt;\"\n\t\tcase '>':\n\t\t\tesc = \"&gt;\"\n\t\tcase '\"':\n\t\t\t// \"&#34;\" is shorter than \"&quot;\".\n\t\t\tesc = \"&#34;\"\n\t\tcase '\\r':\n\t\t\tesc = \"&#13;\"\n\t\tdefault:\n\t\t\tpanic(\"unrecognized escape character\")\n\t\t}\n\t\ts = s[i+1:]\n\t\tif _, err := w.WriteString(esc); err != nil {\n\t\t\treturn err\n\t\t}\n\t\ti = strings.IndexAny(s, escapedChars)\n\t}\n\t_, err := w.WriteString(s)\n\treturn err\n}\n\n// EscapeString escapes special characters like \"<\" to become \"&lt;\". It\n// escapes only five such characters: <, >, &, ' and \".\n// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't\n// always true.\nfunc EscapeString(s string) string {\n\tif strings.IndexAny(s, escapedChars) == -1 {\n\t\treturn s\n\t}\n\tvar buf bytes.Buffer\n\tescape(&buf, s)\n\treturn buf.String()\n}\n\n// UnescapeString unescapes entities like \"&lt;\" to become \"<\". It unescapes a\n// larger range of entities than EscapeString escapes. For example, \"&aacute;\"\n// unescapes to \"á\", as does \"&#225;\" and \"&xE1;\".\n// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't\n// always true.\nfunc UnescapeString(s string) string {\n\tfor _, c := range s {\n\t\tif c == '&' {\n\t\t\treturn string(unescape([]byte(s), false))\n\t\t}\n\t}\n\treturn s\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/escape_test.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage html\n\nimport \"testing\"\n\ntype unescapeTest struct {\n\t// A short description of the test case.\n\tdesc string\n\t// The HTML text.\n\thtml string\n\t// The unescaped text.\n\tunescaped string\n}\n\nvar unescapeTests = []unescapeTest{\n\t// Handle no entities.\n\t{\n\t\t\"copy\",\n\t\t\"A\\ttext\\nstring\",\n\t\t\"A\\ttext\\nstring\",\n\t},\n\t// Handle simple named entities.\n\t{\n\t\t\"simple\",\n\t\t\"&amp; &gt; &lt;\",\n\t\t\"& > <\",\n\t},\n\t// Handle hitting the end of the string.\n\t{\n\t\t\"stringEnd\",\n\t\t\"&amp &amp\",\n\t\t\"& &\",\n\t},\n\t// Handle entities with two codepoints.\n\t{\n\t\t\"multiCodepoint\",\n\t\t\"text &gesl; blah\",\n\t\t\"text \\u22db\\ufe00 blah\",\n\t},\n\t// Handle decimal numeric entities.\n\t{\n\t\t\"decimalEntity\",\n\t\t\"Delta = &#916; \",\n\t\t\"Delta = Δ \",\n\t},\n\t// Handle hexadecimal numeric entities.\n\t{\n\t\t\"hexadecimalEntity\",\n\t\t\"Lambda = &#x3bb; = &#X3Bb \",\n\t\t\"Lambda = λ = λ \",\n\t},\n\t// Handle numeric early termination.\n\t{\n\t\t\"numericEnds\",\n\t\t\"&# &#x &#128;43 &copy = &#169f = &#xa9\",\n\t\t\"&# &#x €43 © = ©f = ©\",\n\t},\n\t// Handle numeric ISO-8859-1 entity replacements.\n\t{\n\t\t\"numericReplacements\",\n\t\t\"Footnote&#x87;\",\n\t\t\"Footnote‡\",\n\t},\n}\n\nfunc TestUnescape(t *testing.T) {\n\tfor _, tt := range unescapeTests {\n\t\tunescaped := UnescapeString(tt.html)\n\t\tif unescaped != tt.unescaped {\n\t\t\tt.Errorf(\"TestUnescape %s: want %q, got %q\", tt.desc, tt.unescaped, unescaped)\n\t\t}\n\t}\n}\n\nfunc TestUnescapeEscape(t *testing.T) {\n\tss := []string{\n\t\t``,\n\t\t`abc def`,\n\t\t`a & b`,\n\t\t`a&amp;b`,\n\t\t`a &amp b`,\n\t\t`&quot;`,\n\t\t`\"`,\n\t\t`\"<&>\"`,\n\t\t`&quot;&lt;&amp;&gt;&quot;`,\n\t\t`3&5==1 && 0<1, \"0&lt;1\", a+acute=&aacute;`,\n\t\t`The special characters are: <, >, &, ' and \"`,\n\t}\n\tfor _, s := range ss {\n\t\tif got := UnescapeString(EscapeString(s)); got != s {\n\t\t\tt.Errorf(\"got %q want %q\", got, s)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/example_test.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// This example demonstrates parsing HTML data and walking the resulting tree.\npackage html_test\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\n\t\"golang.org/x/net/html\"\n)\n\nfunc ExampleParse() {\n\ts := `<p>Links:</p><ul><li><a href=\"foo\">Foo</a><li><a href=\"/bar/baz\">BarBaz</a></ul>`\n\tdoc, err := html.Parse(strings.NewReader(s))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar f func(*html.Node)\n\tf = func(n *html.Node) {\n\t\tif n.Type == html.ElementNode && n.Data == \"a\" {\n\t\t\tfor _, a := range n.Attr {\n\t\t\t\tif a.Key == \"href\" {\n\t\t\t\t\tfmt.Println(a.Val)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\t\tf(c)\n\t\t}\n\t}\n\tf(doc)\n\t// Output:\n\t// foo\n\t// /bar/baz\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/foreign.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage html\n\nimport (\n\t\"strings\"\n)\n\nfunc adjustAttributeNames(aa []Attribute, nameMap map[string]string) {\n\tfor i := range aa {\n\t\tif newName, ok := nameMap[aa[i].Key]; ok {\n\t\t\taa[i].Key = newName\n\t\t}\n\t}\n}\n\nfunc adjustForeignAttributes(aa []Attribute) {\n\tfor i, a := range aa {\n\t\tif a.Key == \"\" || a.Key[0] != 'x' {\n\t\t\tcontinue\n\t\t}\n\t\tswitch a.Key {\n\t\tcase \"xlink:actuate\", \"xlink:arcrole\", \"xlink:href\", \"xlink:role\", \"xlink:show\",\n\t\t\t\"xlink:title\", \"xlink:type\", \"xml:base\", \"xml:lang\", \"xml:space\", \"xmlns:xlink\":\n\t\t\tj := strings.Index(a.Key, \":\")\n\t\t\taa[i].Namespace = a.Key[:j]\n\t\t\taa[i].Key = a.Key[j+1:]\n\t\t}\n\t}\n}\n\nfunc htmlIntegrationPoint(n *Node) bool {\n\tif n.Type != ElementNode {\n\t\treturn false\n\t}\n\tswitch n.Namespace {\n\tcase \"math\":\n\t\tif n.Data == \"annotation-xml\" {\n\t\t\tfor _, a := range n.Attr {\n\t\t\t\tif a.Key == \"encoding\" {\n\t\t\t\t\tval := strings.ToLower(a.Val)\n\t\t\t\t\tif val == \"text/html\" || val == \"application/xhtml+xml\" {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tcase \"svg\":\n\t\tswitch n.Data {\n\t\tcase \"desc\", \"foreignObject\", \"title\":\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc mathMLTextIntegrationPoint(n *Node) bool {\n\tif n.Namespace != \"math\" {\n\t\treturn false\n\t}\n\tswitch n.Data {\n\tcase \"mi\", \"mo\", \"mn\", \"ms\", \"mtext\":\n\t\treturn true\n\t}\n\treturn false\n}\n\n// Section 12.2.5.5.\nvar breakout = map[string]bool{\n\t\"b\":          true,\n\t\"big\":        true,\n\t\"blockquote\": true,\n\t\"body\":       true,\n\t\"br\":         true,\n\t\"center\":     true,\n\t\"code\":       true,\n\t\"dd\":         true,\n\t\"div\":        true,\n\t\"dl\":         true,\n\t\"dt\":         true,\n\t\"em\":         true,\n\t\"embed\":      true,\n\t\"h1\":         true,\n\t\"h2\":         true,\n\t\"h3\":         true,\n\t\"h4\":         true,\n\t\"h5\":         true,\n\t\"h6\":         true,\n\t\"head\":       true,\n\t\"hr\":         true,\n\t\"i\":          true,\n\t\"img\":        true,\n\t\"li\":         true,\n\t\"listing\":    true,\n\t\"menu\":       true,\n\t\"meta\":       true,\n\t\"nobr\":       true,\n\t\"ol\":         true,\n\t\"p\":          true,\n\t\"pre\":        true,\n\t\"ruby\":       true,\n\t\"s\":          true,\n\t\"small\":      true,\n\t\"span\":       true,\n\t\"strong\":     true,\n\t\"strike\":     true,\n\t\"sub\":        true,\n\t\"sup\":        true,\n\t\"table\":      true,\n\t\"tt\":         true,\n\t\"u\":          true,\n\t\"ul\":         true,\n\t\"var\":        true,\n}\n\n// Section 12.2.5.5.\nvar svgTagNameAdjustments = map[string]string{\n\t\"altglyph\":            \"altGlyph\",\n\t\"altglyphdef\":         \"altGlyphDef\",\n\t\"altglyphitem\":        \"altGlyphItem\",\n\t\"animatecolor\":        \"animateColor\",\n\t\"animatemotion\":       \"animateMotion\",\n\t\"animatetransform\":    \"animateTransform\",\n\t\"clippath\":            \"clipPath\",\n\t\"feblend\":             \"feBlend\",\n\t\"fecolormatrix\":       \"feColorMatrix\",\n\t\"fecomponenttransfer\": \"feComponentTransfer\",\n\t\"fecomposite\":         \"feComposite\",\n\t\"feconvolvematrix\":    \"feConvolveMatrix\",\n\t\"fediffuselighting\":   \"feDiffuseLighting\",\n\t\"fedisplacementmap\":   \"feDisplacementMap\",\n\t\"fedistantlight\":      \"feDistantLight\",\n\t\"feflood\":             \"feFlood\",\n\t\"fefunca\":             \"feFuncA\",\n\t\"fefuncb\":             \"feFuncB\",\n\t\"fefuncg\":             \"feFuncG\",\n\t\"fefuncr\":             \"feFuncR\",\n\t\"fegaussianblur\":      \"feGaussianBlur\",\n\t\"feimage\":             \"feImage\",\n\t\"femerge\":             \"feMerge\",\n\t\"femergenode\":         \"feMergeNode\",\n\t\"femorphology\":        \"feMorphology\",\n\t\"feoffset\":            \"feOffset\",\n\t\"fepointlight\":        \"fePointLight\",\n\t\"fespecularlighting\":  \"feSpecularLighting\",\n\t\"fespotlight\":         \"feSpotLight\",\n\t\"fetile\":              \"feTile\",\n\t\"feturbulence\":        \"feTurbulence\",\n\t\"foreignobject\":       \"foreignObject\",\n\t\"glyphref\":            \"glyphRef\",\n\t\"lineargradient\":      \"linearGradient\",\n\t\"radialgradient\":      \"radialGradient\",\n\t\"textpath\":            \"textPath\",\n}\n\n// Section 12.2.5.1\nvar mathMLAttributeAdjustments = map[string]string{\n\t\"definitionurl\": \"definitionURL\",\n}\n\nvar svgAttributeAdjustments = map[string]string{\n\t\"attributename\":             \"attributeName\",\n\t\"attributetype\":             \"attributeType\",\n\t\"basefrequency\":             \"baseFrequency\",\n\t\"baseprofile\":               \"baseProfile\",\n\t\"calcmode\":                  \"calcMode\",\n\t\"clippathunits\":             \"clipPathUnits\",\n\t\"contentscripttype\":         \"contentScriptType\",\n\t\"contentstyletype\":          \"contentStyleType\",\n\t\"diffuseconstant\":           \"diffuseConstant\",\n\t\"edgemode\":                  \"edgeMode\",\n\t\"externalresourcesrequired\": \"externalResourcesRequired\",\n\t\"filterres\":                 \"filterRes\",\n\t\"filterunits\":               \"filterUnits\",\n\t\"glyphref\":                  \"glyphRef\",\n\t\"gradienttransform\":         \"gradientTransform\",\n\t\"gradientunits\":             \"gradientUnits\",\n\t\"kernelmatrix\":              \"kernelMatrix\",\n\t\"kernelunitlength\":          \"kernelUnitLength\",\n\t\"keypoints\":                 \"keyPoints\",\n\t\"keysplines\":                \"keySplines\",\n\t\"keytimes\":                  \"keyTimes\",\n\t\"lengthadjust\":              \"lengthAdjust\",\n\t\"limitingconeangle\":         \"limitingConeAngle\",\n\t\"markerheight\":              \"markerHeight\",\n\t\"markerunits\":               \"markerUnits\",\n\t\"markerwidth\":               \"markerWidth\",\n\t\"maskcontentunits\":          \"maskContentUnits\",\n\t\"maskunits\":                 \"maskUnits\",\n\t\"numoctaves\":                \"numOctaves\",\n\t\"pathlength\":                \"pathLength\",\n\t\"patterncontentunits\":       \"patternContentUnits\",\n\t\"patterntransform\":          \"patternTransform\",\n\t\"patternunits\":              \"patternUnits\",\n\t\"pointsatx\":                 \"pointsAtX\",\n\t\"pointsaty\":                 \"pointsAtY\",\n\t\"pointsatz\":                 \"pointsAtZ\",\n\t\"preservealpha\":             \"preserveAlpha\",\n\t\"preserveaspectratio\":       \"preserveAspectRatio\",\n\t\"primitiveunits\":            \"primitiveUnits\",\n\t\"refx\":                      \"refX\",\n\t\"refy\":                      \"refY\",\n\t\"repeatcount\":               \"repeatCount\",\n\t\"repeatdur\":                 \"repeatDur\",\n\t\"requiredextensions\":        \"requiredExtensions\",\n\t\"requiredfeatures\":          \"requiredFeatures\",\n\t\"specularconstant\":          \"specularConstant\",\n\t\"specularexponent\":          \"specularExponent\",\n\t\"spreadmethod\":              \"spreadMethod\",\n\t\"startoffset\":               \"startOffset\",\n\t\"stddeviation\":              \"stdDeviation\",\n\t\"stitchtiles\":               \"stitchTiles\",\n\t\"surfacescale\":              \"surfaceScale\",\n\t\"systemlanguage\":            \"systemLanguage\",\n\t\"tablevalues\":               \"tableValues\",\n\t\"targetx\":                   \"targetX\",\n\t\"targety\":                   \"targetY\",\n\t\"textlength\":                \"textLength\",\n\t\"viewbox\":                   \"viewBox\",\n\t\"viewtarget\":                \"viewTarget\",\n\t\"xchannelselector\":          \"xChannelSelector\",\n\t\"ychannelselector\":          \"yChannelSelector\",\n\t\"zoomandpan\":                \"zoomAndPan\",\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/node.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage html\n\nimport (\n\t\"golang.org/x/net/html/atom\"\n)\n\n// A NodeType is the type of a Node.\ntype NodeType uint32\n\nconst (\n\tErrorNode NodeType = iota\n\tTextNode\n\tDocumentNode\n\tElementNode\n\tCommentNode\n\tDoctypeNode\n\tscopeMarkerNode\n)\n\n// Section 12.2.3.3 says \"scope markers are inserted when entering applet\n// elements, buttons, object elements, marquees, table cells, and table\n// captions, and are used to prevent formatting from 'leaking'\".\nvar scopeMarker = Node{Type: scopeMarkerNode}\n\n// A Node consists of a NodeType and some Data (tag name for element nodes,\n// content for text) and are part of a tree of Nodes. Element nodes may also\n// have a Namespace and contain a slice of Attributes. Data is unescaped, so\n// that it looks like \"a<b\" rather than \"a&lt;b\". For element nodes, DataAtom\n// is the atom for Data, or zero if Data is not a known tag name.\n//\n// An empty Namespace implies a \"http://www.w3.org/1999/xhtml\" namespace.\n// Similarly, \"math\" is short for \"http://www.w3.org/1998/Math/MathML\", and\n// \"svg\" is short for \"http://www.w3.org/2000/svg\".\ntype Node struct {\n\tParent, FirstChild, LastChild, PrevSibling, NextSibling *Node\n\n\tType      NodeType\n\tDataAtom  atom.Atom\n\tData      string\n\tNamespace string\n\tAttr      []Attribute\n}\n\n// InsertBefore inserts newChild as a child of n, immediately before oldChild\n// in the sequence of n's children. oldChild may be nil, in which case newChild\n// is appended to the end of n's children.\n//\n// It will panic if newChild already has a parent or siblings.\nfunc (n *Node) InsertBefore(newChild, oldChild *Node) {\n\tif newChild.Parent != nil || newChild.PrevSibling != nil || newChild.NextSibling != nil {\n\t\tpanic(\"html: InsertBefore called for an attached child Node\")\n\t}\n\tvar prev, next *Node\n\tif oldChild != nil {\n\t\tprev, next = oldChild.PrevSibling, oldChild\n\t} else {\n\t\tprev = n.LastChild\n\t}\n\tif prev != nil {\n\t\tprev.NextSibling = newChild\n\t} else {\n\t\tn.FirstChild = newChild\n\t}\n\tif next != nil {\n\t\tnext.PrevSibling = newChild\n\t} else {\n\t\tn.LastChild = newChild\n\t}\n\tnewChild.Parent = n\n\tnewChild.PrevSibling = prev\n\tnewChild.NextSibling = next\n}\n\n// AppendChild adds a node c as a child of n.\n//\n// It will panic if c already has a parent or siblings.\nfunc (n *Node) AppendChild(c *Node) {\n\tif c.Parent != nil || c.PrevSibling != nil || c.NextSibling != nil {\n\t\tpanic(\"html: AppendChild called for an attached child Node\")\n\t}\n\tlast := n.LastChild\n\tif last != nil {\n\t\tlast.NextSibling = c\n\t} else {\n\t\tn.FirstChild = c\n\t}\n\tn.LastChild = c\n\tc.Parent = n\n\tc.PrevSibling = last\n}\n\n// RemoveChild removes a node c that is a child of n. Afterwards, c will have\n// no parent and no siblings.\n//\n// It will panic if c's parent is not n.\nfunc (n *Node) RemoveChild(c *Node) {\n\tif c.Parent != n {\n\t\tpanic(\"html: RemoveChild called for a non-child Node\")\n\t}\n\tif n.FirstChild == c {\n\t\tn.FirstChild = c.NextSibling\n\t}\n\tif c.NextSibling != nil {\n\t\tc.NextSibling.PrevSibling = c.PrevSibling\n\t}\n\tif n.LastChild == c {\n\t\tn.LastChild = c.PrevSibling\n\t}\n\tif c.PrevSibling != nil {\n\t\tc.PrevSibling.NextSibling = c.NextSibling\n\t}\n\tc.Parent = nil\n\tc.PrevSibling = nil\n\tc.NextSibling = nil\n}\n\n// reparentChildren reparents all of src's child nodes to dst.\nfunc reparentChildren(dst, src *Node) {\n\tfor {\n\t\tchild := src.FirstChild\n\t\tif child == nil {\n\t\t\tbreak\n\t\t}\n\t\tsrc.RemoveChild(child)\n\t\tdst.AppendChild(child)\n\t}\n}\n\n// clone returns a new node with the same type, data and attributes.\n// The clone has no parent, no siblings and no children.\nfunc (n *Node) clone() *Node {\n\tm := &Node{\n\t\tType:     n.Type,\n\t\tDataAtom: n.DataAtom,\n\t\tData:     n.Data,\n\t\tAttr:     make([]Attribute, len(n.Attr)),\n\t}\n\tcopy(m.Attr, n.Attr)\n\treturn m\n}\n\n// nodeStack is a stack of nodes.\ntype nodeStack []*Node\n\n// pop pops the stack. It will panic if s is empty.\nfunc (s *nodeStack) pop() *Node {\n\ti := len(*s)\n\tn := (*s)[i-1]\n\t*s = (*s)[:i-1]\n\treturn n\n}\n\n// top returns the most recently pushed node, or nil if s is empty.\nfunc (s *nodeStack) top() *Node {\n\tif i := len(*s); i > 0 {\n\t\treturn (*s)[i-1]\n\t}\n\treturn nil\n}\n\n// index returns the index of the top-most occurrence of n in the stack, or -1\n// if n is not present.\nfunc (s *nodeStack) index(n *Node) int {\n\tfor i := len(*s) - 1; i >= 0; i-- {\n\t\tif (*s)[i] == n {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n\n// insert inserts a node at the given index.\nfunc (s *nodeStack) insert(i int, n *Node) {\n\t(*s) = append(*s, nil)\n\tcopy((*s)[i+1:], (*s)[i:])\n\t(*s)[i] = n\n}\n\n// remove removes a node from the stack. It is a no-op if n is not present.\nfunc (s *nodeStack) remove(n *Node) {\n\ti := s.index(n)\n\tif i == -1 {\n\t\treturn\n\t}\n\tcopy((*s)[i:], (*s)[i+1:])\n\tj := len(*s) - 1\n\t(*s)[j] = nil\n\t*s = (*s)[:j]\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/node_test.go",
    "content": "// Copyright 2010 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage html\n\nimport (\n\t\"fmt\"\n)\n\n// checkTreeConsistency checks that a node and its descendants are all\n// consistent in their parent/child/sibling relationships.\nfunc checkTreeConsistency(n *Node) error {\n\treturn checkTreeConsistency1(n, 0)\n}\n\nfunc checkTreeConsistency1(n *Node, depth int) error {\n\tif depth == 1e4 {\n\t\treturn fmt.Errorf(\"html: tree looks like it contains a cycle\")\n\t}\n\tif err := checkNodeConsistency(n); err != nil {\n\t\treturn err\n\t}\n\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\tif err := checkTreeConsistency1(c, depth+1); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// checkNodeConsistency checks that a node's parent/child/sibling relationships\n// are consistent.\nfunc checkNodeConsistency(n *Node) error {\n\tif n == nil {\n\t\treturn nil\n\t}\n\n\tnParent := 0\n\tfor p := n.Parent; p != nil; p = p.Parent {\n\t\tnParent++\n\t\tif nParent == 1e4 {\n\t\t\treturn fmt.Errorf(\"html: parent list looks like an infinite loop\")\n\t\t}\n\t}\n\n\tnForward := 0\n\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\tnForward++\n\t\tif nForward == 1e6 {\n\t\t\treturn fmt.Errorf(\"html: forward list of children looks like an infinite loop\")\n\t\t}\n\t\tif c.Parent != n {\n\t\t\treturn fmt.Errorf(\"html: inconsistent child/parent relationship\")\n\t\t}\n\t}\n\n\tnBackward := 0\n\tfor c := n.LastChild; c != nil; c = c.PrevSibling {\n\t\tnBackward++\n\t\tif nBackward == 1e6 {\n\t\t\treturn fmt.Errorf(\"html: backward list of children looks like an infinite loop\")\n\t\t}\n\t\tif c.Parent != n {\n\t\t\treturn fmt.Errorf(\"html: inconsistent child/parent relationship\")\n\t\t}\n\t}\n\n\tif n.Parent != nil {\n\t\tif n.Parent == n {\n\t\t\treturn fmt.Errorf(\"html: inconsistent parent relationship\")\n\t\t}\n\t\tif n.Parent == n.FirstChild {\n\t\t\treturn fmt.Errorf(\"html: inconsistent parent/first relationship\")\n\t\t}\n\t\tif n.Parent == n.LastChild {\n\t\t\treturn fmt.Errorf(\"html: inconsistent parent/last relationship\")\n\t\t}\n\t\tif n.Parent == n.PrevSibling {\n\t\t\treturn fmt.Errorf(\"html: inconsistent parent/prev relationship\")\n\t\t}\n\t\tif n.Parent == n.NextSibling {\n\t\t\treturn fmt.Errorf(\"html: inconsistent parent/next relationship\")\n\t\t}\n\n\t\tparentHasNAsAChild := false\n\t\tfor c := n.Parent.FirstChild; c != nil; c = c.NextSibling {\n\t\t\tif c == n {\n\t\t\t\tparentHasNAsAChild = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !parentHasNAsAChild {\n\t\t\treturn fmt.Errorf(\"html: inconsistent parent/child relationship\")\n\t\t}\n\t}\n\n\tif n.PrevSibling != nil && n.PrevSibling.NextSibling != n {\n\t\treturn fmt.Errorf(\"html: inconsistent prev/next relationship\")\n\t}\n\tif n.NextSibling != nil && n.NextSibling.PrevSibling != n {\n\t\treturn fmt.Errorf(\"html: inconsistent next/prev relationship\")\n\t}\n\n\tif (n.FirstChild == nil) != (n.LastChild == nil) {\n\t\treturn fmt.Errorf(\"html: inconsistent first/last relationship\")\n\t}\n\tif n.FirstChild != nil && n.FirstChild == n.LastChild {\n\t\t// We have a sole child.\n\t\tif n.FirstChild.PrevSibling != nil || n.FirstChild.NextSibling != nil {\n\t\t\treturn fmt.Errorf(\"html: inconsistent sole child's sibling relationship\")\n\t\t}\n\t}\n\n\tseen := map[*Node]bool{}\n\n\tvar last *Node\n\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\tif seen[c] {\n\t\t\treturn fmt.Errorf(\"html: inconsistent repeated child\")\n\t\t}\n\t\tseen[c] = true\n\t\tlast = c\n\t}\n\tif last != n.LastChild {\n\t\treturn fmt.Errorf(\"html: inconsistent last relationship\")\n\t}\n\n\tvar first *Node\n\tfor c := n.LastChild; c != nil; c = c.PrevSibling {\n\t\tif !seen[c] {\n\t\t\treturn fmt.Errorf(\"html: inconsistent missing child\")\n\t\t}\n\t\tdelete(seen, c)\n\t\tfirst = c\n\t}\n\tif first != n.FirstChild {\n\t\treturn fmt.Errorf(\"html: inconsistent first relationship\")\n\t}\n\n\tif len(seen) != 0 {\n\t\treturn fmt.Errorf(\"html: inconsistent forwards/backwards child list\")\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/parse.go",
    "content": "// Copyright 2010 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage html\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\ta \"golang.org/x/net/html/atom\"\n)\n\n// A parser implements the HTML5 parsing algorithm:\n// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction\ntype parser struct {\n\t// tokenizer provides the tokens for the parser.\n\ttokenizer *Tokenizer\n\t// tok is the most recently read token.\n\ttok Token\n\t// Self-closing tags like <hr/> are treated as start tags, except that\n\t// hasSelfClosingToken is set while they are being processed.\n\thasSelfClosingToken bool\n\t// doc is the document root element.\n\tdoc *Node\n\t// The stack of open elements (section 12.2.3.2) and active formatting\n\t// elements (section 12.2.3.3).\n\toe, afe nodeStack\n\t// Element pointers (section 12.2.3.4).\n\thead, form *Node\n\t// Other parsing state flags (section 12.2.3.5).\n\tscripting, framesetOK bool\n\t// im is the current insertion mode.\n\tim insertionMode\n\t// originalIM is the insertion mode to go back to after completing a text\n\t// or inTableText insertion mode.\n\toriginalIM insertionMode\n\t// fosterParenting is whether new elements should be inserted according to\n\t// the foster parenting rules (section 12.2.5.3).\n\tfosterParenting bool\n\t// quirks is whether the parser is operating in \"quirks mode.\"\n\tquirks bool\n\t// fragment is whether the parser is parsing an HTML fragment.\n\tfragment bool\n\t// context is the context element when parsing an HTML fragment\n\t// (section 12.4).\n\tcontext *Node\n}\n\nfunc (p *parser) top() *Node {\n\tif n := p.oe.top(); n != nil {\n\t\treturn n\n\t}\n\treturn p.doc\n}\n\n// Stop tags for use in popUntil. These come from section 12.2.3.2.\nvar (\n\tdefaultScopeStopTags = map[string][]a.Atom{\n\t\t\"\":     {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template},\n\t\t\"math\": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext},\n\t\t\"svg\":  {a.Desc, a.ForeignObject, a.Title},\n\t}\n)\n\ntype scope int\n\nconst (\n\tdefaultScope scope = iota\n\tlistItemScope\n\tbuttonScope\n\ttableScope\n\ttableRowScope\n\ttableBodyScope\n\tselectScope\n)\n\n// popUntil pops the stack of open elements at the highest element whose tag\n// is in matchTags, provided there is no higher element in the scope's stop\n// tags (as defined in section 12.2.3.2). It returns whether or not there was\n// such an element. If there was not, popUntil leaves the stack unchanged.\n//\n// For example, the set of stop tags for table scope is: \"html\", \"table\". If\n// the stack was:\n// [\"html\", \"body\", \"font\", \"table\", \"b\", \"i\", \"u\"]\n// then popUntil(tableScope, \"font\") would return false, but\n// popUntil(tableScope, \"i\") would return true and the stack would become:\n// [\"html\", \"body\", \"font\", \"table\", \"b\"]\n//\n// If an element's tag is in both the stop tags and matchTags, then the stack\n// will be popped and the function returns true (provided, of course, there was\n// no higher element in the stack that was also in the stop tags). For example,\n// popUntil(tableScope, \"table\") returns true and leaves:\n// [\"html\", \"body\", \"font\"]\nfunc (p *parser) popUntil(s scope, matchTags ...a.Atom) bool {\n\tif i := p.indexOfElementInScope(s, matchTags...); i != -1 {\n\t\tp.oe = p.oe[:i]\n\t\treturn true\n\t}\n\treturn false\n}\n\n// indexOfElementInScope returns the index in p.oe of the highest element whose\n// tag is in matchTags that is in scope. If no matching element is in scope, it\n// returns -1.\nfunc (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int {\n\tfor i := len(p.oe) - 1; i >= 0; i-- {\n\t\ttagAtom := p.oe[i].DataAtom\n\t\tif p.oe[i].Namespace == \"\" {\n\t\t\tfor _, t := range matchTags {\n\t\t\t\tif t == tagAtom {\n\t\t\t\t\treturn i\n\t\t\t\t}\n\t\t\t}\n\t\t\tswitch s {\n\t\t\tcase defaultScope:\n\t\t\t\t// No-op.\n\t\t\tcase listItemScope:\n\t\t\t\tif tagAtom == a.Ol || tagAtom == a.Ul {\n\t\t\t\t\treturn -1\n\t\t\t\t}\n\t\t\tcase buttonScope:\n\t\t\t\tif tagAtom == a.Button {\n\t\t\t\t\treturn -1\n\t\t\t\t}\n\t\t\tcase tableScope:\n\t\t\t\tif tagAtom == a.Html || tagAtom == a.Table {\n\t\t\t\t\treturn -1\n\t\t\t\t}\n\t\t\tcase selectScope:\n\t\t\t\tif tagAtom != a.Optgroup && tagAtom != a.Option {\n\t\t\t\t\treturn -1\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tpanic(\"unreachable\")\n\t\t\t}\n\t\t}\n\t\tswitch s {\n\t\tcase defaultScope, listItemScope, buttonScope:\n\t\t\tfor _, t := range defaultScopeStopTags[p.oe[i].Namespace] {\n\t\t\t\tif t == tagAtom {\n\t\t\t\t\treturn -1\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn -1\n}\n\n// elementInScope is like popUntil, except that it doesn't modify the stack of\n// open elements.\nfunc (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool {\n\treturn p.indexOfElementInScope(s, matchTags...) != -1\n}\n\n// clearStackToContext pops elements off the stack of open elements until a\n// scope-defined element is found.\nfunc (p *parser) clearStackToContext(s scope) {\n\tfor i := len(p.oe) - 1; i >= 0; i-- {\n\t\ttagAtom := p.oe[i].DataAtom\n\t\tswitch s {\n\t\tcase tableScope:\n\t\t\tif tagAtom == a.Html || tagAtom == a.Table {\n\t\t\t\tp.oe = p.oe[:i+1]\n\t\t\t\treturn\n\t\t\t}\n\t\tcase tableRowScope:\n\t\t\tif tagAtom == a.Html || tagAtom == a.Tr {\n\t\t\t\tp.oe = p.oe[:i+1]\n\t\t\t\treturn\n\t\t\t}\n\t\tcase tableBodyScope:\n\t\t\tif tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead {\n\t\t\t\tp.oe = p.oe[:i+1]\n\t\t\t\treturn\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(\"unreachable\")\n\t\t}\n\t}\n}\n\n// generateImpliedEndTags pops nodes off the stack of open elements as long as\n// the top node has a tag name of dd, dt, li, option, optgroup, p, rp, or rt.\n// If exceptions are specified, nodes with that name will not be popped off.\nfunc (p *parser) generateImpliedEndTags(exceptions ...string) {\n\tvar i int\nloop:\n\tfor i = len(p.oe) - 1; i >= 0; i-- {\n\t\tn := p.oe[i]\n\t\tif n.Type == ElementNode {\n\t\t\tswitch n.DataAtom {\n\t\t\tcase a.Dd, a.Dt, a.Li, a.Option, a.Optgroup, a.P, a.Rp, a.Rt:\n\t\t\t\tfor _, except := range exceptions {\n\t\t\t\t\tif n.Data == except {\n\t\t\t\t\t\tbreak loop\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tbreak\n\t}\n\n\tp.oe = p.oe[:i+1]\n}\n\n// addChild adds a child node n to the top element, and pushes n onto the stack\n// of open elements if it is an element node.\nfunc (p *parser) addChild(n *Node) {\n\tif p.shouldFosterParent() {\n\t\tp.fosterParent(n)\n\t} else {\n\t\tp.top().AppendChild(n)\n\t}\n\n\tif n.Type == ElementNode {\n\t\tp.oe = append(p.oe, n)\n\t}\n}\n\n// shouldFosterParent returns whether the next node to be added should be\n// foster parented.\nfunc (p *parser) shouldFosterParent() bool {\n\tif p.fosterParenting {\n\t\tswitch p.top().DataAtom {\n\t\tcase a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// fosterParent adds a child node according to the foster parenting rules.\n// Section 12.2.5.3, \"foster parenting\".\nfunc (p *parser) fosterParent(n *Node) {\n\tvar table, parent, prev *Node\n\tvar i int\n\tfor i = len(p.oe) - 1; i >= 0; i-- {\n\t\tif p.oe[i].DataAtom == a.Table {\n\t\t\ttable = p.oe[i]\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif table == nil {\n\t\t// The foster parent is the html element.\n\t\tparent = p.oe[0]\n\t} else {\n\t\tparent = table.Parent\n\t}\n\tif parent == nil {\n\t\tparent = p.oe[i-1]\n\t}\n\n\tif table != nil {\n\t\tprev = table.PrevSibling\n\t} else {\n\t\tprev = parent.LastChild\n\t}\n\tif prev != nil && prev.Type == TextNode && n.Type == TextNode {\n\t\tprev.Data += n.Data\n\t\treturn\n\t}\n\n\tparent.InsertBefore(n, table)\n}\n\n// addText adds text to the preceding node if it is a text node, or else it\n// calls addChild with a new text node.\nfunc (p *parser) addText(text string) {\n\tif text == \"\" {\n\t\treturn\n\t}\n\n\tif p.shouldFosterParent() {\n\t\tp.fosterParent(&Node{\n\t\t\tType: TextNode,\n\t\t\tData: text,\n\t\t})\n\t\treturn\n\t}\n\n\tt := p.top()\n\tif n := t.LastChild; n != nil && n.Type == TextNode {\n\t\tn.Data += text\n\t\treturn\n\t}\n\tp.addChild(&Node{\n\t\tType: TextNode,\n\t\tData: text,\n\t})\n}\n\n// addElement adds a child element based on the current token.\nfunc (p *parser) addElement() {\n\tp.addChild(&Node{\n\t\tType:     ElementNode,\n\t\tDataAtom: p.tok.DataAtom,\n\t\tData:     p.tok.Data,\n\t\tAttr:     p.tok.Attr,\n\t})\n}\n\n// Section 12.2.3.3.\nfunc (p *parser) addFormattingElement() {\n\ttagAtom, attr := p.tok.DataAtom, p.tok.Attr\n\tp.addElement()\n\n\t// Implement the Noah's Ark clause, but with three per family instead of two.\n\tidenticalElements := 0\nfindIdenticalElements:\n\tfor i := len(p.afe) - 1; i >= 0; i-- {\n\t\tn := p.afe[i]\n\t\tif n.Type == scopeMarkerNode {\n\t\t\tbreak\n\t\t}\n\t\tif n.Type != ElementNode {\n\t\t\tcontinue\n\t\t}\n\t\tif n.Namespace != \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif n.DataAtom != tagAtom {\n\t\t\tcontinue\n\t\t}\n\t\tif len(n.Attr) != len(attr) {\n\t\t\tcontinue\n\t\t}\n\tcompareAttributes:\n\t\tfor _, t0 := range n.Attr {\n\t\t\tfor _, t1 := range attr {\n\t\t\t\tif t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val {\n\t\t\t\t\t// Found a match for this attribute, continue with the next attribute.\n\t\t\t\t\tcontinue compareAttributes\n\t\t\t\t}\n\t\t\t}\n\t\t\t// If we get here, there is no attribute that matches a.\n\t\t\t// Therefore the element is not identical to the new one.\n\t\t\tcontinue findIdenticalElements\n\t\t}\n\n\t\tidenticalElements++\n\t\tif identicalElements >= 3 {\n\t\t\tp.afe.remove(n)\n\t\t}\n\t}\n\n\tp.afe = append(p.afe, p.top())\n}\n\n// Section 12.2.3.3.\nfunc (p *parser) clearActiveFormattingElements() {\n\tfor {\n\t\tn := p.afe.pop()\n\t\tif len(p.afe) == 0 || n.Type == scopeMarkerNode {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// Section 12.2.3.3.\nfunc (p *parser) reconstructActiveFormattingElements() {\n\tn := p.afe.top()\n\tif n == nil {\n\t\treturn\n\t}\n\tif n.Type == scopeMarkerNode || p.oe.index(n) != -1 {\n\t\treturn\n\t}\n\ti := len(p.afe) - 1\n\tfor n.Type != scopeMarkerNode && p.oe.index(n) == -1 {\n\t\tif i == 0 {\n\t\t\ti = -1\n\t\t\tbreak\n\t\t}\n\t\ti--\n\t\tn = p.afe[i]\n\t}\n\tfor {\n\t\ti++\n\t\tclone := p.afe[i].clone()\n\t\tp.addChild(clone)\n\t\tp.afe[i] = clone\n\t\tif i == len(p.afe)-1 {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\n// Section 12.2.4.\nfunc (p *parser) acknowledgeSelfClosingTag() {\n\tp.hasSelfClosingToken = false\n}\n\n// An insertion mode (section 12.2.3.1) is the state transition function from\n// a particular state in the HTML5 parser's state machine. It updates the\n// parser's fields depending on parser.tok (where ErrorToken means EOF).\n// It returns whether the token was consumed.\ntype insertionMode func(*parser) bool\n\n// setOriginalIM sets the insertion mode to return to after completing a text or\n// inTableText insertion mode.\n// Section 12.2.3.1, \"using the rules for\".\nfunc (p *parser) setOriginalIM() {\n\tif p.originalIM != nil {\n\t\tpanic(\"html: bad parser state: originalIM was set twice\")\n\t}\n\tp.originalIM = p.im\n}\n\n// Section 12.2.3.1, \"reset the insertion mode\".\nfunc (p *parser) resetInsertionMode() {\n\tfor i := len(p.oe) - 1; i >= 0; i-- {\n\t\tn := p.oe[i]\n\t\tif i == 0 && p.context != nil {\n\t\t\tn = p.context\n\t\t}\n\n\t\tswitch n.DataAtom {\n\t\tcase a.Select:\n\t\t\tp.im = inSelectIM\n\t\tcase a.Td, a.Th:\n\t\t\tp.im = inCellIM\n\t\tcase a.Tr:\n\t\t\tp.im = inRowIM\n\t\tcase a.Tbody, a.Thead, a.Tfoot:\n\t\t\tp.im = inTableBodyIM\n\t\tcase a.Caption:\n\t\t\tp.im = inCaptionIM\n\t\tcase a.Colgroup:\n\t\t\tp.im = inColumnGroupIM\n\t\tcase a.Table:\n\t\t\tp.im = inTableIM\n\t\tcase a.Head:\n\t\t\tp.im = inBodyIM\n\t\tcase a.Body:\n\t\t\tp.im = inBodyIM\n\t\tcase a.Frameset:\n\t\t\tp.im = inFramesetIM\n\t\tcase a.Html:\n\t\t\tp.im = beforeHeadIM\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t\treturn\n\t}\n\tp.im = inBodyIM\n}\n\nconst whitespace = \" \\t\\r\\n\\f\"\n\n// Section 12.2.5.4.1.\nfunc initialIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase TextToken:\n\t\tp.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)\n\t\tif len(p.tok.Data) == 0 {\n\t\t\t// It was all whitespace, so ignore it.\n\t\t\treturn true\n\t\t}\n\tcase CommentToken:\n\t\tp.doc.AppendChild(&Node{\n\t\t\tType: CommentNode,\n\t\t\tData: p.tok.Data,\n\t\t})\n\t\treturn true\n\tcase DoctypeToken:\n\t\tn, quirks := parseDoctype(p.tok.Data)\n\t\tp.doc.AppendChild(n)\n\t\tp.quirks = quirks\n\t\tp.im = beforeHTMLIM\n\t\treturn true\n\t}\n\tp.quirks = true\n\tp.im = beforeHTMLIM\n\treturn false\n}\n\n// Section 12.2.5.4.2.\nfunc beforeHTMLIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase DoctypeToken:\n\t\t// Ignore the token.\n\t\treturn true\n\tcase TextToken:\n\t\tp.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)\n\t\tif len(p.tok.Data) == 0 {\n\t\t\t// It was all whitespace, so ignore it.\n\t\t\treturn true\n\t\t}\n\tcase StartTagToken:\n\t\tif p.tok.DataAtom == a.Html {\n\t\t\tp.addElement()\n\t\t\tp.im = beforeHeadIM\n\t\t\treturn true\n\t\t}\n\tcase EndTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Head, a.Body, a.Html, a.Br:\n\t\t\tp.parseImpliedToken(StartTagToken, a.Html, a.Html.String())\n\t\t\treturn false\n\t\tdefault:\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\t}\n\tcase CommentToken:\n\t\tp.doc.AppendChild(&Node{\n\t\t\tType: CommentNode,\n\t\t\tData: p.tok.Data,\n\t\t})\n\t\treturn true\n\t}\n\tp.parseImpliedToken(StartTagToken, a.Html, a.Html.String())\n\treturn false\n}\n\n// Section 12.2.5.4.3.\nfunc beforeHeadIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase TextToken:\n\t\tp.tok.Data = strings.TrimLeft(p.tok.Data, whitespace)\n\t\tif len(p.tok.Data) == 0 {\n\t\t\t// It was all whitespace, so ignore it.\n\t\t\treturn true\n\t\t}\n\tcase StartTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Head:\n\t\t\tp.addElement()\n\t\t\tp.head = p.top()\n\t\t\tp.im = inHeadIM\n\t\t\treturn true\n\t\tcase a.Html:\n\t\t\treturn inBodyIM(p)\n\t\t}\n\tcase EndTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Head, a.Body, a.Html, a.Br:\n\t\t\tp.parseImpliedToken(StartTagToken, a.Head, a.Head.String())\n\t\t\treturn false\n\t\tdefault:\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\t}\n\tcase CommentToken:\n\t\tp.addChild(&Node{\n\t\t\tType: CommentNode,\n\t\t\tData: p.tok.Data,\n\t\t})\n\t\treturn true\n\tcase DoctypeToken:\n\t\t// Ignore the token.\n\t\treturn true\n\t}\n\n\tp.parseImpliedToken(StartTagToken, a.Head, a.Head.String())\n\treturn false\n}\n\n// Section 12.2.5.4.4.\nfunc inHeadIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase TextToken:\n\t\ts := strings.TrimLeft(p.tok.Data, whitespace)\n\t\tif len(s) < len(p.tok.Data) {\n\t\t\t// Add the initial whitespace to the current node.\n\t\t\tp.addText(p.tok.Data[:len(p.tok.Data)-len(s)])\n\t\t\tif s == \"\" {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tp.tok.Data = s\n\t\t}\n\tcase StartTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Html:\n\t\t\treturn inBodyIM(p)\n\t\tcase a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta:\n\t\t\tp.addElement()\n\t\t\tp.oe.pop()\n\t\t\tp.acknowledgeSelfClosingTag()\n\t\t\treturn true\n\t\tcase a.Script, a.Title, a.Noscript, a.Noframes, a.Style:\n\t\t\tp.addElement()\n\t\t\tp.setOriginalIM()\n\t\t\tp.im = textIM\n\t\t\treturn true\n\t\tcase a.Head:\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\t}\n\tcase EndTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Head:\n\t\t\tn := p.oe.pop()\n\t\t\tif n.DataAtom != a.Head {\n\t\t\t\tpanic(\"html: bad parser state: <head> element not found, in the in-head insertion mode\")\n\t\t\t}\n\t\t\tp.im = afterHeadIM\n\t\t\treturn true\n\t\tcase a.Body, a.Html, a.Br:\n\t\t\tp.parseImpliedToken(EndTagToken, a.Head, a.Head.String())\n\t\t\treturn false\n\t\tdefault:\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\t}\n\tcase CommentToken:\n\t\tp.addChild(&Node{\n\t\t\tType: CommentNode,\n\t\t\tData: p.tok.Data,\n\t\t})\n\t\treturn true\n\tcase DoctypeToken:\n\t\t// Ignore the token.\n\t\treturn true\n\t}\n\n\tp.parseImpliedToken(EndTagToken, a.Head, a.Head.String())\n\treturn false\n}\n\n// Section 12.2.5.4.6.\nfunc afterHeadIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase TextToken:\n\t\ts := strings.TrimLeft(p.tok.Data, whitespace)\n\t\tif len(s) < len(p.tok.Data) {\n\t\t\t// Add the initial whitespace to the current node.\n\t\t\tp.addText(p.tok.Data[:len(p.tok.Data)-len(s)])\n\t\t\tif s == \"\" {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tp.tok.Data = s\n\t\t}\n\tcase StartTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Html:\n\t\t\treturn inBodyIM(p)\n\t\tcase a.Body:\n\t\t\tp.addElement()\n\t\t\tp.framesetOK = false\n\t\t\tp.im = inBodyIM\n\t\t\treturn true\n\t\tcase a.Frameset:\n\t\t\tp.addElement()\n\t\t\tp.im = inFramesetIM\n\t\t\treturn true\n\t\tcase a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title:\n\t\t\tp.oe = append(p.oe, p.head)\n\t\t\tdefer p.oe.remove(p.head)\n\t\t\treturn inHeadIM(p)\n\t\tcase a.Head:\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\t}\n\tcase EndTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Body, a.Html, a.Br:\n\t\t\t// Drop down to creating an implied <body> tag.\n\t\tdefault:\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\t}\n\tcase CommentToken:\n\t\tp.addChild(&Node{\n\t\t\tType: CommentNode,\n\t\t\tData: p.tok.Data,\n\t\t})\n\t\treturn true\n\tcase DoctypeToken:\n\t\t// Ignore the token.\n\t\treturn true\n\t}\n\n\tp.parseImpliedToken(StartTagToken, a.Body, a.Body.String())\n\tp.framesetOK = true\n\treturn false\n}\n\n// copyAttributes copies attributes of src not found on dst to dst.\nfunc copyAttributes(dst *Node, src Token) {\n\tif len(src.Attr) == 0 {\n\t\treturn\n\t}\n\tattr := map[string]string{}\n\tfor _, t := range dst.Attr {\n\t\tattr[t.Key] = t.Val\n\t}\n\tfor _, t := range src.Attr {\n\t\tif _, ok := attr[t.Key]; !ok {\n\t\t\tdst.Attr = append(dst.Attr, t)\n\t\t\tattr[t.Key] = t.Val\n\t\t}\n\t}\n}\n\n// Section 12.2.5.4.7.\nfunc inBodyIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase TextToken:\n\t\td := p.tok.Data\n\t\tswitch n := p.oe.top(); n.DataAtom {\n\t\tcase a.Pre, a.Listing:\n\t\t\tif n.FirstChild == nil {\n\t\t\t\t// Ignore a newline at the start of a <pre> block.\n\t\t\t\tif d != \"\" && d[0] == '\\r' {\n\t\t\t\t\td = d[1:]\n\t\t\t\t}\n\t\t\t\tif d != \"\" && d[0] == '\\n' {\n\t\t\t\t\td = d[1:]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\td = strings.Replace(d, \"\\x00\", \"\", -1)\n\t\tif d == \"\" {\n\t\t\treturn true\n\t\t}\n\t\tp.reconstructActiveFormattingElements()\n\t\tp.addText(d)\n\t\tif p.framesetOK && strings.TrimLeft(d, whitespace) != \"\" {\n\t\t\t// There were non-whitespace characters inserted.\n\t\t\tp.framesetOK = false\n\t\t}\n\tcase StartTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Html:\n\t\t\tcopyAttributes(p.oe[0], p.tok)\n\t\tcase a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title:\n\t\t\treturn inHeadIM(p)\n\t\tcase a.Body:\n\t\t\tif len(p.oe) >= 2 {\n\t\t\t\tbody := p.oe[1]\n\t\t\t\tif body.Type == ElementNode && body.DataAtom == a.Body {\n\t\t\t\t\tp.framesetOK = false\n\t\t\t\t\tcopyAttributes(body, p.tok)\n\t\t\t\t}\n\t\t\t}\n\t\tcase a.Frameset:\n\t\t\tif !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {\n\t\t\t\t// Ignore the token.\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tbody := p.oe[1]\n\t\t\tif body.Parent != nil {\n\t\t\t\tbody.Parent.RemoveChild(body)\n\t\t\t}\n\t\t\tp.oe = p.oe[:1]\n\t\t\tp.addElement()\n\t\t\tp.im = inFramesetIM\n\t\t\treturn true\n\t\tcase a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:\n\t\t\tp.popUntil(buttonScope, a.P)\n\t\t\tp.addElement()\n\t\tcase a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:\n\t\t\tp.popUntil(buttonScope, a.P)\n\t\t\tswitch n := p.top(); n.DataAtom {\n\t\t\tcase a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:\n\t\t\t\tp.oe.pop()\n\t\t\t}\n\t\t\tp.addElement()\n\t\tcase a.Pre, a.Listing:\n\t\t\tp.popUntil(buttonScope, a.P)\n\t\t\tp.addElement()\n\t\t\t// The newline, if any, will be dealt with by the TextToken case.\n\t\t\tp.framesetOK = false\n\t\tcase a.Form:\n\t\t\tif p.form == nil {\n\t\t\t\tp.popUntil(buttonScope, a.P)\n\t\t\t\tp.addElement()\n\t\t\t\tp.form = p.top()\n\t\t\t}\n\t\tcase a.Li:\n\t\t\tp.framesetOK = false\n\t\t\tfor i := len(p.oe) - 1; i >= 0; i-- {\n\t\t\t\tnode := p.oe[i]\n\t\t\t\tswitch node.DataAtom {\n\t\t\t\tcase a.Li:\n\t\t\t\t\tp.oe = p.oe[:i]\n\t\t\t\tcase a.Address, a.Div, a.P:\n\t\t\t\t\tcontinue\n\t\t\t\tdefault:\n\t\t\t\t\tif !isSpecialElement(node) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tp.popUntil(buttonScope, a.P)\n\t\t\tp.addElement()\n\t\tcase a.Dd, a.Dt:\n\t\t\tp.framesetOK = false\n\t\t\tfor i := len(p.oe) - 1; i >= 0; i-- {\n\t\t\t\tnode := p.oe[i]\n\t\t\t\tswitch node.DataAtom {\n\t\t\t\tcase a.Dd, a.Dt:\n\t\t\t\t\tp.oe = p.oe[:i]\n\t\t\t\tcase a.Address, a.Div, a.P:\n\t\t\t\t\tcontinue\n\t\t\t\tdefault:\n\t\t\t\t\tif !isSpecialElement(node) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tp.popUntil(buttonScope, a.P)\n\t\t\tp.addElement()\n\t\tcase a.Plaintext:\n\t\t\tp.popUntil(buttonScope, a.P)\n\t\t\tp.addElement()\n\t\tcase a.Button:\n\t\t\tp.popUntil(defaultScope, a.Button)\n\t\t\tp.reconstructActiveFormattingElements()\n\t\t\tp.addElement()\n\t\t\tp.framesetOK = false\n\t\tcase a.A:\n\t\t\tfor i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {\n\t\t\t\tif n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {\n\t\t\t\t\tp.inBodyEndTagFormatting(a.A)\n\t\t\t\t\tp.oe.remove(n)\n\t\t\t\t\tp.afe.remove(n)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tp.reconstructActiveFormattingElements()\n\t\t\tp.addFormattingElement()\n\t\tcase a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:\n\t\t\tp.reconstructActiveFormattingElements()\n\t\t\tp.addFormattingElement()\n\t\tcase a.Nobr:\n\t\t\tp.reconstructActiveFormattingElements()\n\t\t\tif p.elementInScope(defaultScope, a.Nobr) {\n\t\t\t\tp.inBodyEndTagFormatting(a.Nobr)\n\t\t\t\tp.reconstructActiveFormattingElements()\n\t\t\t}\n\t\t\tp.addFormattingElement()\n\t\tcase a.Applet, a.Marquee, a.Object:\n\t\t\tp.reconstructActiveFormattingElements()\n\t\t\tp.addElement()\n\t\t\tp.afe = append(p.afe, &scopeMarker)\n\t\t\tp.framesetOK = false\n\t\tcase a.Table:\n\t\t\tif !p.quirks {\n\t\t\t\tp.popUntil(buttonScope, a.P)\n\t\t\t}\n\t\t\tp.addElement()\n\t\t\tp.framesetOK = false\n\t\t\tp.im = inTableIM\n\t\t\treturn true\n\t\tcase a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:\n\t\t\tp.reconstructActiveFormattingElements()\n\t\t\tp.addElement()\n\t\t\tp.oe.pop()\n\t\t\tp.acknowledgeSelfClosingTag()\n\t\t\tif p.tok.DataAtom == a.Input {\n\t\t\t\tfor _, t := range p.tok.Attr {\n\t\t\t\t\tif t.Key == \"type\" {\n\t\t\t\t\t\tif strings.ToLower(t.Val) == \"hidden\" {\n\t\t\t\t\t\t\t// Skip setting framesetOK = false\n\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tp.framesetOK = false\n\t\tcase a.Param, a.Source, a.Track:\n\t\t\tp.addElement()\n\t\t\tp.oe.pop()\n\t\t\tp.acknowledgeSelfClosingTag()\n\t\tcase a.Hr:\n\t\t\tp.popUntil(buttonScope, a.P)\n\t\t\tp.addElement()\n\t\t\tp.oe.pop()\n\t\t\tp.acknowledgeSelfClosingTag()\n\t\t\tp.framesetOK = false\n\t\tcase a.Image:\n\t\t\tp.tok.DataAtom = a.Img\n\t\t\tp.tok.Data = a.Img.String()\n\t\t\treturn false\n\t\tcase a.Isindex:\n\t\t\tif p.form != nil {\n\t\t\t\t// Ignore the token.\n\t\t\t\treturn true\n\t\t\t}\n\t\t\taction := \"\"\n\t\t\tprompt := \"This is a searchable index. Enter search keywords: \"\n\t\t\tattr := []Attribute{{Key: \"name\", Val: \"isindex\"}}\n\t\t\tfor _, t := range p.tok.Attr {\n\t\t\t\tswitch t.Key {\n\t\t\t\tcase \"action\":\n\t\t\t\t\taction = t.Val\n\t\t\t\tcase \"name\":\n\t\t\t\t\t// Ignore the attribute.\n\t\t\t\tcase \"prompt\":\n\t\t\t\t\tprompt = t.Val\n\t\t\t\tdefault:\n\t\t\t\t\tattr = append(attr, t)\n\t\t\t\t}\n\t\t\t}\n\t\t\tp.acknowledgeSelfClosingTag()\n\t\t\tp.popUntil(buttonScope, a.P)\n\t\t\tp.parseImpliedToken(StartTagToken, a.Form, a.Form.String())\n\t\t\tif action != \"\" {\n\t\t\t\tp.form.Attr = []Attribute{{Key: \"action\", Val: action}}\n\t\t\t}\n\t\t\tp.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())\n\t\t\tp.parseImpliedToken(StartTagToken, a.Label, a.Label.String())\n\t\t\tp.addText(prompt)\n\t\t\tp.addChild(&Node{\n\t\t\t\tType:     ElementNode,\n\t\t\t\tDataAtom: a.Input,\n\t\t\t\tData:     a.Input.String(),\n\t\t\t\tAttr:     attr,\n\t\t\t})\n\t\t\tp.oe.pop()\n\t\t\tp.parseImpliedToken(EndTagToken, a.Label, a.Label.String())\n\t\t\tp.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())\n\t\t\tp.parseImpliedToken(EndTagToken, a.Form, a.Form.String())\n\t\tcase a.Textarea:\n\t\t\tp.addElement()\n\t\t\tp.setOriginalIM()\n\t\t\tp.framesetOK = false\n\t\t\tp.im = textIM\n\t\tcase a.Xmp:\n\t\t\tp.popUntil(buttonScope, a.P)\n\t\t\tp.reconstructActiveFormattingElements()\n\t\t\tp.framesetOK = false\n\t\t\tp.addElement()\n\t\t\tp.setOriginalIM()\n\t\t\tp.im = textIM\n\t\tcase a.Iframe:\n\t\t\tp.framesetOK = false\n\t\t\tp.addElement()\n\t\t\tp.setOriginalIM()\n\t\t\tp.im = textIM\n\t\tcase a.Noembed, a.Noscript:\n\t\t\tp.addElement()\n\t\t\tp.setOriginalIM()\n\t\t\tp.im = textIM\n\t\tcase a.Select:\n\t\t\tp.reconstructActiveFormattingElements()\n\t\t\tp.addElement()\n\t\t\tp.framesetOK = false\n\t\t\tp.im = inSelectIM\n\t\t\treturn true\n\t\tcase a.Optgroup, a.Option:\n\t\t\tif p.top().DataAtom == a.Option {\n\t\t\t\tp.oe.pop()\n\t\t\t}\n\t\t\tp.reconstructActiveFormattingElements()\n\t\t\tp.addElement()\n\t\tcase a.Rp, a.Rt:\n\t\t\tif p.elementInScope(defaultScope, a.Ruby) {\n\t\t\t\tp.generateImpliedEndTags()\n\t\t\t}\n\t\t\tp.addElement()\n\t\tcase a.Math, a.Svg:\n\t\t\tp.reconstructActiveFormattingElements()\n\t\t\tif p.tok.DataAtom == a.Math {\n\t\t\t\tadjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)\n\t\t\t} else {\n\t\t\t\tadjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)\n\t\t\t}\n\t\t\tadjustForeignAttributes(p.tok.Attr)\n\t\t\tp.addElement()\n\t\t\tp.top().Namespace = p.tok.Data\n\t\t\tif p.hasSelfClosingToken {\n\t\t\t\tp.oe.pop()\n\t\t\t\tp.acknowledgeSelfClosingTag()\n\t\t\t}\n\t\t\treturn true\n\t\tcase a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:\n\t\t\t// Ignore the token.\n\t\tdefault:\n\t\t\tp.reconstructActiveFormattingElements()\n\t\t\tp.addElement()\n\t\t}\n\tcase EndTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Body:\n\t\t\tif p.elementInScope(defaultScope, a.Body) {\n\t\t\t\tp.im = afterBodyIM\n\t\t\t}\n\t\tcase a.Html:\n\t\t\tif p.elementInScope(defaultScope, a.Body) {\n\t\t\t\tp.parseImpliedToken(EndTagToken, a.Body, a.Body.String())\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn true\n\t\tcase a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:\n\t\t\tp.popUntil(defaultScope, p.tok.DataAtom)\n\t\tcase a.Form:\n\t\t\tnode := p.form\n\t\t\tp.form = nil\n\t\t\ti := p.indexOfElementInScope(defaultScope, a.Form)\n\t\t\tif node == nil || i == -1 || p.oe[i] != node {\n\t\t\t\t// Ignore the token.\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tp.generateImpliedEndTags()\n\t\t\tp.oe.remove(node)\n\t\tcase a.P:\n\t\t\tif !p.elementInScope(buttonScope, a.P) {\n\t\t\t\tp.parseImpliedToken(StartTagToken, a.P, a.P.String())\n\t\t\t}\n\t\t\tp.popUntil(buttonScope, a.P)\n\t\tcase a.Li:\n\t\t\tp.popUntil(listItemScope, a.Li)\n\t\tcase a.Dd, a.Dt:\n\t\t\tp.popUntil(defaultScope, p.tok.DataAtom)\n\t\tcase a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:\n\t\t\tp.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)\n\t\tcase a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:\n\t\t\tp.inBodyEndTagFormatting(p.tok.DataAtom)\n\t\tcase a.Applet, a.Marquee, a.Object:\n\t\t\tif p.popUntil(defaultScope, p.tok.DataAtom) {\n\t\t\t\tp.clearActiveFormattingElements()\n\t\t\t}\n\t\tcase a.Br:\n\t\t\tp.tok.Type = StartTagToken\n\t\t\treturn false\n\t\tdefault:\n\t\t\tp.inBodyEndTagOther(p.tok.DataAtom)\n\t\t}\n\tcase CommentToken:\n\t\tp.addChild(&Node{\n\t\t\tType: CommentNode,\n\t\t\tData: p.tok.Data,\n\t\t})\n\t}\n\n\treturn true\n}\n\nfunc (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {\n\t// This is the \"adoption agency\" algorithm, described at\n\t// https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency\n\n\t// TODO: this is a fairly literal line-by-line translation of that algorithm.\n\t// Once the code successfully parses the comprehensive test suite, we should\n\t// refactor this code to be more idiomatic.\n\n\t// Steps 1-4. The outer loop.\n\tfor i := 0; i < 8; i++ {\n\t\t// Step 5. Find the formatting element.\n\t\tvar formattingElement *Node\n\t\tfor j := len(p.afe) - 1; j >= 0; j-- {\n\t\t\tif p.afe[j].Type == scopeMarkerNode {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif p.afe[j].DataAtom == tagAtom {\n\t\t\t\tformattingElement = p.afe[j]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif formattingElement == nil {\n\t\t\tp.inBodyEndTagOther(tagAtom)\n\t\t\treturn\n\t\t}\n\t\tfeIndex := p.oe.index(formattingElement)\n\t\tif feIndex == -1 {\n\t\t\tp.afe.remove(formattingElement)\n\t\t\treturn\n\t\t}\n\t\tif !p.elementInScope(defaultScope, tagAtom) {\n\t\t\t// Ignore the tag.\n\t\t\treturn\n\t\t}\n\n\t\t// Steps 9-10. Find the furthest block.\n\t\tvar furthestBlock *Node\n\t\tfor _, e := range p.oe[feIndex:] {\n\t\t\tif isSpecialElement(e) {\n\t\t\t\tfurthestBlock = e\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif furthestBlock == nil {\n\t\t\te := p.oe.pop()\n\t\t\tfor e != formattingElement {\n\t\t\t\te = p.oe.pop()\n\t\t\t}\n\t\t\tp.afe.remove(e)\n\t\t\treturn\n\t\t}\n\n\t\t// Steps 11-12. Find the common ancestor and bookmark node.\n\t\tcommonAncestor := p.oe[feIndex-1]\n\t\tbookmark := p.afe.index(formattingElement)\n\n\t\t// Step 13. The inner loop. Find the lastNode to reparent.\n\t\tlastNode := furthestBlock\n\t\tnode := furthestBlock\n\t\tx := p.oe.index(node)\n\t\t// Steps 13.1-13.2\n\t\tfor j := 0; j < 3; j++ {\n\t\t\t// Step 13.3.\n\t\t\tx--\n\t\t\tnode = p.oe[x]\n\t\t\t// Step 13.4 - 13.5.\n\t\t\tif p.afe.index(node) == -1 {\n\t\t\t\tp.oe.remove(node)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Step 13.6.\n\t\t\tif node == formattingElement {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// Step 13.7.\n\t\t\tclone := node.clone()\n\t\t\tp.afe[p.afe.index(node)] = clone\n\t\t\tp.oe[p.oe.index(node)] = clone\n\t\t\tnode = clone\n\t\t\t// Step 13.8.\n\t\t\tif lastNode == furthestBlock {\n\t\t\t\tbookmark = p.afe.index(node) + 1\n\t\t\t}\n\t\t\t// Step 13.9.\n\t\t\tif lastNode.Parent != nil {\n\t\t\t\tlastNode.Parent.RemoveChild(lastNode)\n\t\t\t}\n\t\t\tnode.AppendChild(lastNode)\n\t\t\t// Step 13.10.\n\t\t\tlastNode = node\n\t\t}\n\n\t\t// Step 14. Reparent lastNode to the common ancestor,\n\t\t// or for misnested table nodes, to the foster parent.\n\t\tif lastNode.Parent != nil {\n\t\t\tlastNode.Parent.RemoveChild(lastNode)\n\t\t}\n\t\tswitch commonAncestor.DataAtom {\n\t\tcase a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:\n\t\t\tp.fosterParent(lastNode)\n\t\tdefault:\n\t\t\tcommonAncestor.AppendChild(lastNode)\n\t\t}\n\n\t\t// Steps 15-17. Reparent nodes from the furthest block's children\n\t\t// to a clone of the formatting element.\n\t\tclone := formattingElement.clone()\n\t\treparentChildren(clone, furthestBlock)\n\t\tfurthestBlock.AppendChild(clone)\n\n\t\t// Step 18. Fix up the list of active formatting elements.\n\t\tif oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {\n\t\t\t// Move the bookmark with the rest of the list.\n\t\t\tbookmark--\n\t\t}\n\t\tp.afe.remove(formattingElement)\n\t\tp.afe.insert(bookmark, clone)\n\n\t\t// Step 19. Fix up the stack of open elements.\n\t\tp.oe.remove(formattingElement)\n\t\tp.oe.insert(p.oe.index(furthestBlock)+1, clone)\n\t}\n}\n\n// inBodyEndTagOther performs the \"any other end tag\" algorithm for inBodyIM.\n// \"Any other end tag\" handling from 12.2.5.5 The rules for parsing tokens in foreign content\n// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign\nfunc (p *parser) inBodyEndTagOther(tagAtom a.Atom) {\n\tfor i := len(p.oe) - 1; i >= 0; i-- {\n\t\tif p.oe[i].DataAtom == tagAtom {\n\t\t\tp.oe = p.oe[:i]\n\t\t\tbreak\n\t\t}\n\t\tif isSpecialElement(p.oe[i]) {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\n// Section 12.2.5.4.8.\nfunc textIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase ErrorToken:\n\t\tp.oe.pop()\n\tcase TextToken:\n\t\td := p.tok.Data\n\t\tif n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {\n\t\t\t// Ignore a newline at the start of a <textarea> block.\n\t\t\tif d != \"\" && d[0] == '\\r' {\n\t\t\t\td = d[1:]\n\t\t\t}\n\t\t\tif d != \"\" && d[0] == '\\n' {\n\t\t\t\td = d[1:]\n\t\t\t}\n\t\t}\n\t\tif d == \"\" {\n\t\t\treturn true\n\t\t}\n\t\tp.addText(d)\n\t\treturn true\n\tcase EndTagToken:\n\t\tp.oe.pop()\n\t}\n\tp.im = p.originalIM\n\tp.originalIM = nil\n\treturn p.tok.Type == EndTagToken\n}\n\n// Section 12.2.5.4.9.\nfunc inTableIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase ErrorToken:\n\t\t// Stop parsing.\n\t\treturn true\n\tcase TextToken:\n\t\tp.tok.Data = strings.Replace(p.tok.Data, \"\\x00\", \"\", -1)\n\t\tswitch p.oe.top().DataAtom {\n\t\tcase a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:\n\t\t\tif strings.Trim(p.tok.Data, whitespace) == \"\" {\n\t\t\t\tp.addText(p.tok.Data)\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\tcase StartTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Caption:\n\t\t\tp.clearStackToContext(tableScope)\n\t\t\tp.afe = append(p.afe, &scopeMarker)\n\t\t\tp.addElement()\n\t\t\tp.im = inCaptionIM\n\t\t\treturn true\n\t\tcase a.Colgroup:\n\t\t\tp.clearStackToContext(tableScope)\n\t\t\tp.addElement()\n\t\t\tp.im = inColumnGroupIM\n\t\t\treturn true\n\t\tcase a.Col:\n\t\t\tp.parseImpliedToken(StartTagToken, a.Colgroup, a.Colgroup.String())\n\t\t\treturn false\n\t\tcase a.Tbody, a.Tfoot, a.Thead:\n\t\t\tp.clearStackToContext(tableScope)\n\t\t\tp.addElement()\n\t\t\tp.im = inTableBodyIM\n\t\t\treturn true\n\t\tcase a.Td, a.Th, a.Tr:\n\t\t\tp.parseImpliedToken(StartTagToken, a.Tbody, a.Tbody.String())\n\t\t\treturn false\n\t\tcase a.Table:\n\t\t\tif p.popUntil(tableScope, a.Table) {\n\t\t\t\tp.resetInsertionMode()\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\tcase a.Style, a.Script:\n\t\t\treturn inHeadIM(p)\n\t\tcase a.Input:\n\t\t\tfor _, t := range p.tok.Attr {\n\t\t\t\tif t.Key == \"type\" && strings.ToLower(t.Val) == \"hidden\" {\n\t\t\t\t\tp.addElement()\n\t\t\t\t\tp.oe.pop()\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Otherwise drop down to the default action.\n\t\tcase a.Form:\n\t\t\tif p.form != nil {\n\t\t\t\t// Ignore the token.\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tp.addElement()\n\t\t\tp.form = p.oe.pop()\n\t\tcase a.Select:\n\t\t\tp.reconstructActiveFormattingElements()\n\t\t\tswitch p.top().DataAtom {\n\t\t\tcase a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:\n\t\t\t\tp.fosterParenting = true\n\t\t\t}\n\t\t\tp.addElement()\n\t\t\tp.fosterParenting = false\n\t\t\tp.framesetOK = false\n\t\t\tp.im = inSelectInTableIM\n\t\t\treturn true\n\t\t}\n\tcase EndTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Table:\n\t\t\tif p.popUntil(tableScope, a.Table) {\n\t\t\t\tp.resetInsertionMode()\n\t\t\t\treturn true\n\t\t\t}\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\tcase a.Body, a.Caption, a.Col, a.Colgroup, a.Html, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\t}\n\tcase CommentToken:\n\t\tp.addChild(&Node{\n\t\t\tType: CommentNode,\n\t\t\tData: p.tok.Data,\n\t\t})\n\t\treturn true\n\tcase DoctypeToken:\n\t\t// Ignore the token.\n\t\treturn true\n\t}\n\n\tp.fosterParenting = true\n\tdefer func() { p.fosterParenting = false }()\n\n\treturn inBodyIM(p)\n}\n\n// Section 12.2.5.4.11.\nfunc inCaptionIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase StartTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Caption, a.Col, a.Colgroup, a.Tbody, a.Td, a.Tfoot, a.Thead, a.Tr:\n\t\t\tif p.popUntil(tableScope, a.Caption) {\n\t\t\t\tp.clearActiveFormattingElements()\n\t\t\t\tp.im = inTableIM\n\t\t\t\treturn false\n\t\t\t} else {\n\t\t\t\t// Ignore the token.\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase a.Select:\n\t\t\tp.reconstructActiveFormattingElements()\n\t\t\tp.addElement()\n\t\t\tp.framesetOK = false\n\t\t\tp.im = inSelectInTableIM\n\t\t\treturn true\n\t\t}\n\tcase EndTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Caption:\n\t\t\tif p.popUntil(tableScope, a.Caption) {\n\t\t\t\tp.clearActiveFormattingElements()\n\t\t\t\tp.im = inTableIM\n\t\t\t}\n\t\t\treturn true\n\t\tcase a.Table:\n\t\t\tif p.popUntil(tableScope, a.Caption) {\n\t\t\t\tp.clearActiveFormattingElements()\n\t\t\t\tp.im = inTableIM\n\t\t\t\treturn false\n\t\t\t} else {\n\t\t\t\t// Ignore the token.\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase a.Body, a.Col, a.Colgroup, a.Html, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\t}\n\t}\n\treturn inBodyIM(p)\n}\n\n// Section 12.2.5.4.12.\nfunc inColumnGroupIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase TextToken:\n\t\ts := strings.TrimLeft(p.tok.Data, whitespace)\n\t\tif len(s) < len(p.tok.Data) {\n\t\t\t// Add the initial whitespace to the current node.\n\t\t\tp.addText(p.tok.Data[:len(p.tok.Data)-len(s)])\n\t\t\tif s == \"\" {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tp.tok.Data = s\n\t\t}\n\tcase CommentToken:\n\t\tp.addChild(&Node{\n\t\t\tType: CommentNode,\n\t\t\tData: p.tok.Data,\n\t\t})\n\t\treturn true\n\tcase DoctypeToken:\n\t\t// Ignore the token.\n\t\treturn true\n\tcase StartTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Html:\n\t\t\treturn inBodyIM(p)\n\t\tcase a.Col:\n\t\t\tp.addElement()\n\t\t\tp.oe.pop()\n\t\t\tp.acknowledgeSelfClosingTag()\n\t\t\treturn true\n\t\t}\n\tcase EndTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Colgroup:\n\t\t\tif p.oe.top().DataAtom != a.Html {\n\t\t\t\tp.oe.pop()\n\t\t\t\tp.im = inTableIM\n\t\t\t}\n\t\t\treturn true\n\t\tcase a.Col:\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\t}\n\t}\n\tif p.oe.top().DataAtom != a.Html {\n\t\tp.oe.pop()\n\t\tp.im = inTableIM\n\t\treturn false\n\t}\n\treturn true\n}\n\n// Section 12.2.5.4.13.\nfunc inTableBodyIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase StartTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Tr:\n\t\t\tp.clearStackToContext(tableBodyScope)\n\t\t\tp.addElement()\n\t\t\tp.im = inRowIM\n\t\t\treturn true\n\t\tcase a.Td, a.Th:\n\t\t\tp.parseImpliedToken(StartTagToken, a.Tr, a.Tr.String())\n\t\t\treturn false\n\t\tcase a.Caption, a.Col, a.Colgroup, a.Tbody, a.Tfoot, a.Thead:\n\t\t\tif p.popUntil(tableScope, a.Tbody, a.Thead, a.Tfoot) {\n\t\t\t\tp.im = inTableIM\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\t}\n\tcase EndTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Tbody, a.Tfoot, a.Thead:\n\t\t\tif p.elementInScope(tableScope, p.tok.DataAtom) {\n\t\t\t\tp.clearStackToContext(tableBodyScope)\n\t\t\t\tp.oe.pop()\n\t\t\t\tp.im = inTableIM\n\t\t\t}\n\t\t\treturn true\n\t\tcase a.Table:\n\t\t\tif p.popUntil(tableScope, a.Tbody, a.Thead, a.Tfoot) {\n\t\t\t\tp.im = inTableIM\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\tcase a.Body, a.Caption, a.Col, a.Colgroup, a.Html, a.Td, a.Th, a.Tr:\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\t}\n\tcase CommentToken:\n\t\tp.addChild(&Node{\n\t\t\tType: CommentNode,\n\t\t\tData: p.tok.Data,\n\t\t})\n\t\treturn true\n\t}\n\n\treturn inTableIM(p)\n}\n\n// Section 12.2.5.4.14.\nfunc inRowIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase StartTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Td, a.Th:\n\t\t\tp.clearStackToContext(tableRowScope)\n\t\t\tp.addElement()\n\t\t\tp.afe = append(p.afe, &scopeMarker)\n\t\t\tp.im = inCellIM\n\t\t\treturn true\n\t\tcase a.Caption, a.Col, a.Colgroup, a.Tbody, a.Tfoot, a.Thead, a.Tr:\n\t\t\tif p.popUntil(tableScope, a.Tr) {\n\t\t\t\tp.im = inTableBodyIM\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\t}\n\tcase EndTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Tr:\n\t\t\tif p.popUntil(tableScope, a.Tr) {\n\t\t\t\tp.im = inTableBodyIM\n\t\t\t\treturn true\n\t\t\t}\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\tcase a.Table:\n\t\t\tif p.popUntil(tableScope, a.Tr) {\n\t\t\t\tp.im = inTableBodyIM\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\tcase a.Tbody, a.Tfoot, a.Thead:\n\t\t\tif p.elementInScope(tableScope, p.tok.DataAtom) {\n\t\t\t\tp.parseImpliedToken(EndTagToken, a.Tr, a.Tr.String())\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\tcase a.Body, a.Caption, a.Col, a.Colgroup, a.Html, a.Td, a.Th:\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn inTableIM(p)\n}\n\n// Section 12.2.5.4.15.\nfunc inCellIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase StartTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Caption, a.Col, a.Colgroup, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:\n\t\t\tif p.popUntil(tableScope, a.Td, a.Th) {\n\t\t\t\t// Close the cell and reprocess.\n\t\t\t\tp.clearActiveFormattingElements()\n\t\t\t\tp.im = inRowIM\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\tcase a.Select:\n\t\t\tp.reconstructActiveFormattingElements()\n\t\t\tp.addElement()\n\t\t\tp.framesetOK = false\n\t\t\tp.im = inSelectInTableIM\n\t\t\treturn true\n\t\t}\n\tcase EndTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Td, a.Th:\n\t\t\tif !p.popUntil(tableScope, p.tok.DataAtom) {\n\t\t\t\t// Ignore the token.\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tp.clearActiveFormattingElements()\n\t\t\tp.im = inRowIM\n\t\t\treturn true\n\t\tcase a.Body, a.Caption, a.Col, a.Colgroup, a.Html:\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\tcase a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:\n\t\t\tif !p.elementInScope(tableScope, p.tok.DataAtom) {\n\t\t\t\t// Ignore the token.\n\t\t\t\treturn true\n\t\t\t}\n\t\t\t// Close the cell and reprocess.\n\t\t\tp.popUntil(tableScope, a.Td, a.Th)\n\t\t\tp.clearActiveFormattingElements()\n\t\t\tp.im = inRowIM\n\t\t\treturn false\n\t\t}\n\t}\n\treturn inBodyIM(p)\n}\n\n// Section 12.2.5.4.16.\nfunc inSelectIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase ErrorToken:\n\t\t// Stop parsing.\n\t\treturn true\n\tcase TextToken:\n\t\tp.addText(strings.Replace(p.tok.Data, \"\\x00\", \"\", -1))\n\tcase StartTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Html:\n\t\t\treturn inBodyIM(p)\n\t\tcase a.Option:\n\t\t\tif p.top().DataAtom == a.Option {\n\t\t\t\tp.oe.pop()\n\t\t\t}\n\t\t\tp.addElement()\n\t\tcase a.Optgroup:\n\t\t\tif p.top().DataAtom == a.Option {\n\t\t\t\tp.oe.pop()\n\t\t\t}\n\t\t\tif p.top().DataAtom == a.Optgroup {\n\t\t\t\tp.oe.pop()\n\t\t\t}\n\t\t\tp.addElement()\n\t\tcase a.Select:\n\t\t\tp.tok.Type = EndTagToken\n\t\t\treturn false\n\t\tcase a.Input, a.Keygen, a.Textarea:\n\t\t\tif p.elementInScope(selectScope, a.Select) {\n\t\t\t\tp.parseImpliedToken(EndTagToken, a.Select, a.Select.String())\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t// In order to properly ignore <textarea>, we need to change the tokenizer mode.\n\t\t\tp.tokenizer.NextIsNotRawText()\n\t\t\t// Ignore the token.\n\t\t\treturn true\n\t\tcase a.Script:\n\t\t\treturn inHeadIM(p)\n\t\t}\n\tcase EndTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Option:\n\t\t\tif p.top().DataAtom == a.Option {\n\t\t\t\tp.oe.pop()\n\t\t\t}\n\t\tcase a.Optgroup:\n\t\t\ti := len(p.oe) - 1\n\t\t\tif p.oe[i].DataAtom == a.Option {\n\t\t\t\ti--\n\t\t\t}\n\t\t\tif p.oe[i].DataAtom == a.Optgroup {\n\t\t\t\tp.oe = p.oe[:i]\n\t\t\t}\n\t\tcase a.Select:\n\t\t\tif p.popUntil(selectScope, a.Select) {\n\t\t\t\tp.resetInsertionMode()\n\t\t\t}\n\t\t}\n\tcase CommentToken:\n\t\tp.addChild(&Node{\n\t\t\tType: CommentNode,\n\t\t\tData: p.tok.Data,\n\t\t})\n\tcase DoctypeToken:\n\t\t// Ignore the token.\n\t\treturn true\n\t}\n\n\treturn true\n}\n\n// Section 12.2.5.4.17.\nfunc inSelectInTableIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase StartTagToken, EndTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Caption, a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr, a.Td, a.Th:\n\t\t\tif p.tok.Type == StartTagToken || p.elementInScope(tableScope, p.tok.DataAtom) {\n\t\t\t\tp.parseImpliedToken(EndTagToken, a.Select, a.Select.String())\n\t\t\t\treturn false\n\t\t\t} else {\n\t\t\t\t// Ignore the token.\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn inSelectIM(p)\n}\n\n// Section 12.2.5.4.18.\nfunc afterBodyIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase ErrorToken:\n\t\t// Stop parsing.\n\t\treturn true\n\tcase TextToken:\n\t\ts := strings.TrimLeft(p.tok.Data, whitespace)\n\t\tif len(s) == 0 {\n\t\t\t// It was all whitespace.\n\t\t\treturn inBodyIM(p)\n\t\t}\n\tcase StartTagToken:\n\t\tif p.tok.DataAtom == a.Html {\n\t\t\treturn inBodyIM(p)\n\t\t}\n\tcase EndTagToken:\n\t\tif p.tok.DataAtom == a.Html {\n\t\t\tif !p.fragment {\n\t\t\t\tp.im = afterAfterBodyIM\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\tcase CommentToken:\n\t\t// The comment is attached to the <html> element.\n\t\tif len(p.oe) < 1 || p.oe[0].DataAtom != a.Html {\n\t\t\tpanic(\"html: bad parser state: <html> element not found, in the after-body insertion mode\")\n\t\t}\n\t\tp.oe[0].AppendChild(&Node{\n\t\t\tType: CommentNode,\n\t\t\tData: p.tok.Data,\n\t\t})\n\t\treturn true\n\t}\n\tp.im = inBodyIM\n\treturn false\n}\n\n// Section 12.2.5.4.19.\nfunc inFramesetIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase CommentToken:\n\t\tp.addChild(&Node{\n\t\t\tType: CommentNode,\n\t\t\tData: p.tok.Data,\n\t\t})\n\tcase TextToken:\n\t\t// Ignore all text but whitespace.\n\t\ts := strings.Map(func(c rune) rune {\n\t\t\tswitch c {\n\t\t\tcase ' ', '\\t', '\\n', '\\f', '\\r':\n\t\t\t\treturn c\n\t\t\t}\n\t\t\treturn -1\n\t\t}, p.tok.Data)\n\t\tif s != \"\" {\n\t\t\tp.addText(s)\n\t\t}\n\tcase StartTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Html:\n\t\t\treturn inBodyIM(p)\n\t\tcase a.Frameset:\n\t\t\tp.addElement()\n\t\tcase a.Frame:\n\t\t\tp.addElement()\n\t\t\tp.oe.pop()\n\t\t\tp.acknowledgeSelfClosingTag()\n\t\tcase a.Noframes:\n\t\t\treturn inHeadIM(p)\n\t\t}\n\tcase EndTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Frameset:\n\t\t\tif p.oe.top().DataAtom != a.Html {\n\t\t\t\tp.oe.pop()\n\t\t\t\tif p.oe.top().DataAtom != a.Frameset {\n\t\t\t\t\tp.im = afterFramesetIM\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tdefault:\n\t\t// Ignore the token.\n\t}\n\treturn true\n}\n\n// Section 12.2.5.4.20.\nfunc afterFramesetIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase CommentToken:\n\t\tp.addChild(&Node{\n\t\t\tType: CommentNode,\n\t\t\tData: p.tok.Data,\n\t\t})\n\tcase TextToken:\n\t\t// Ignore all text but whitespace.\n\t\ts := strings.Map(func(c rune) rune {\n\t\t\tswitch c {\n\t\t\tcase ' ', '\\t', '\\n', '\\f', '\\r':\n\t\t\t\treturn c\n\t\t\t}\n\t\t\treturn -1\n\t\t}, p.tok.Data)\n\t\tif s != \"\" {\n\t\t\tp.addText(s)\n\t\t}\n\tcase StartTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Html:\n\t\t\treturn inBodyIM(p)\n\t\tcase a.Noframes:\n\t\t\treturn inHeadIM(p)\n\t\t}\n\tcase EndTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Html:\n\t\t\tp.im = afterAfterFramesetIM\n\t\t\treturn true\n\t\t}\n\tdefault:\n\t\t// Ignore the token.\n\t}\n\treturn true\n}\n\n// Section 12.2.5.4.21.\nfunc afterAfterBodyIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase ErrorToken:\n\t\t// Stop parsing.\n\t\treturn true\n\tcase TextToken:\n\t\ts := strings.TrimLeft(p.tok.Data, whitespace)\n\t\tif len(s) == 0 {\n\t\t\t// It was all whitespace.\n\t\t\treturn inBodyIM(p)\n\t\t}\n\tcase StartTagToken:\n\t\tif p.tok.DataAtom == a.Html {\n\t\t\treturn inBodyIM(p)\n\t\t}\n\tcase CommentToken:\n\t\tp.doc.AppendChild(&Node{\n\t\t\tType: CommentNode,\n\t\t\tData: p.tok.Data,\n\t\t})\n\t\treturn true\n\tcase DoctypeToken:\n\t\treturn inBodyIM(p)\n\t}\n\tp.im = inBodyIM\n\treturn false\n}\n\n// Section 12.2.5.4.22.\nfunc afterAfterFramesetIM(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase CommentToken:\n\t\tp.doc.AppendChild(&Node{\n\t\t\tType: CommentNode,\n\t\t\tData: p.tok.Data,\n\t\t})\n\tcase TextToken:\n\t\t// Ignore all text but whitespace.\n\t\ts := strings.Map(func(c rune) rune {\n\t\t\tswitch c {\n\t\t\tcase ' ', '\\t', '\\n', '\\f', '\\r':\n\t\t\t\treturn c\n\t\t\t}\n\t\t\treturn -1\n\t\t}, p.tok.Data)\n\t\tif s != \"\" {\n\t\t\tp.tok.Data = s\n\t\t\treturn inBodyIM(p)\n\t\t}\n\tcase StartTagToken:\n\t\tswitch p.tok.DataAtom {\n\t\tcase a.Html:\n\t\t\treturn inBodyIM(p)\n\t\tcase a.Noframes:\n\t\t\treturn inHeadIM(p)\n\t\t}\n\tcase DoctypeToken:\n\t\treturn inBodyIM(p)\n\tdefault:\n\t\t// Ignore the token.\n\t}\n\treturn true\n}\n\nconst whitespaceOrNUL = whitespace + \"\\x00\"\n\n// Section 12.2.5.5.\nfunc parseForeignContent(p *parser) bool {\n\tswitch p.tok.Type {\n\tcase TextToken:\n\t\tif p.framesetOK {\n\t\t\tp.framesetOK = strings.TrimLeft(p.tok.Data, whitespaceOrNUL) == \"\"\n\t\t}\n\t\tp.tok.Data = strings.Replace(p.tok.Data, \"\\x00\", \"\\ufffd\", -1)\n\t\tp.addText(p.tok.Data)\n\tcase CommentToken:\n\t\tp.addChild(&Node{\n\t\t\tType: CommentNode,\n\t\t\tData: p.tok.Data,\n\t\t})\n\tcase StartTagToken:\n\t\tb := breakout[p.tok.Data]\n\t\tif p.tok.DataAtom == a.Font {\n\t\tloop:\n\t\t\tfor _, attr := range p.tok.Attr {\n\t\t\t\tswitch attr.Key {\n\t\t\t\tcase \"color\", \"face\", \"size\":\n\t\t\t\t\tb = true\n\t\t\t\t\tbreak loop\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif b {\n\t\t\tfor i := len(p.oe) - 1; i >= 0; i-- {\n\t\t\t\tn := p.oe[i]\n\t\t\t\tif n.Namespace == \"\" || htmlIntegrationPoint(n) || mathMLTextIntegrationPoint(n) {\n\t\t\t\t\tp.oe = p.oe[:i+1]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t\tswitch p.top().Namespace {\n\t\tcase \"math\":\n\t\t\tadjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)\n\t\tcase \"svg\":\n\t\t\t// Adjust SVG tag names. The tokenizer lower-cases tag names, but\n\t\t\t// SVG wants e.g. \"foreignObject\" with a capital second \"O\".\n\t\t\tif x := svgTagNameAdjustments[p.tok.Data]; x != \"\" {\n\t\t\t\tp.tok.DataAtom = a.Lookup([]byte(x))\n\t\t\t\tp.tok.Data = x\n\t\t\t}\n\t\t\tadjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)\n\t\tdefault:\n\t\t\tpanic(\"html: bad parser state: unexpected namespace\")\n\t\t}\n\t\tadjustForeignAttributes(p.tok.Attr)\n\t\tnamespace := p.top().Namespace\n\t\tp.addElement()\n\t\tp.top().Namespace = namespace\n\t\tif namespace != \"\" {\n\t\t\t// Don't let the tokenizer go into raw text mode in foreign content\n\t\t\t// (e.g. in an SVG <title> tag).\n\t\t\tp.tokenizer.NextIsNotRawText()\n\t\t}\n\t\tif p.hasSelfClosingToken {\n\t\t\tp.oe.pop()\n\t\t\tp.acknowledgeSelfClosingTag()\n\t\t}\n\tcase EndTagToken:\n\t\tfor i := len(p.oe) - 1; i >= 0; i-- {\n\t\t\tif p.oe[i].Namespace == \"\" {\n\t\t\t\treturn p.im(p)\n\t\t\t}\n\t\t\tif strings.EqualFold(p.oe[i].Data, p.tok.Data) {\n\t\t\t\tp.oe = p.oe[:i]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treturn true\n\tdefault:\n\t\t// Ignore the token.\n\t}\n\treturn true\n}\n\n// Section 12.2.5.\nfunc (p *parser) inForeignContent() bool {\n\tif len(p.oe) == 0 {\n\t\treturn false\n\t}\n\tn := p.oe[len(p.oe)-1]\n\tif n.Namespace == \"\" {\n\t\treturn false\n\t}\n\tif mathMLTextIntegrationPoint(n) {\n\t\tif p.tok.Type == StartTagToken && p.tok.DataAtom != a.Mglyph && p.tok.DataAtom != a.Malignmark {\n\t\t\treturn false\n\t\t}\n\t\tif p.tok.Type == TextToken {\n\t\t\treturn false\n\t\t}\n\t}\n\tif n.Namespace == \"math\" && n.DataAtom == a.AnnotationXml && p.tok.Type == StartTagToken && p.tok.DataAtom == a.Svg {\n\t\treturn false\n\t}\n\tif htmlIntegrationPoint(n) && (p.tok.Type == StartTagToken || p.tok.Type == TextToken) {\n\t\treturn false\n\t}\n\tif p.tok.Type == ErrorToken {\n\t\treturn false\n\t}\n\treturn true\n}\n\n// parseImpliedToken parses a token as though it had appeared in the parser's\n// input.\nfunc (p *parser) parseImpliedToken(t TokenType, dataAtom a.Atom, data string) {\n\trealToken, selfClosing := p.tok, p.hasSelfClosingToken\n\tp.tok = Token{\n\t\tType:     t,\n\t\tDataAtom: dataAtom,\n\t\tData:     data,\n\t}\n\tp.hasSelfClosingToken = false\n\tp.parseCurrentToken()\n\tp.tok, p.hasSelfClosingToken = realToken, selfClosing\n}\n\n// parseCurrentToken runs the current token through the parsing routines\n// until it is consumed.\nfunc (p *parser) parseCurrentToken() {\n\tif p.tok.Type == SelfClosingTagToken {\n\t\tp.hasSelfClosingToken = true\n\t\tp.tok.Type = StartTagToken\n\t}\n\n\tconsumed := false\n\tfor !consumed {\n\t\tif p.inForeignContent() {\n\t\t\tconsumed = parseForeignContent(p)\n\t\t} else {\n\t\t\tconsumed = p.im(p)\n\t\t}\n\t}\n\n\tif p.hasSelfClosingToken {\n\t\t// This is a parse error, but ignore it.\n\t\tp.hasSelfClosingToken = false\n\t}\n}\n\nfunc (p *parser) parse() error {\n\t// Iterate until EOF. Any other error will cause an early return.\n\tvar err error\n\tfor err != io.EOF {\n\t\t// CDATA sections are allowed only in foreign content.\n\t\tn := p.oe.top()\n\t\tp.tokenizer.AllowCDATA(n != nil && n.Namespace != \"\")\n\t\t// Read and parse the next token.\n\t\tp.tokenizer.Next()\n\t\tp.tok = p.tokenizer.Token()\n\t\tif p.tok.Type == ErrorToken {\n\t\t\terr = p.tokenizer.Err()\n\t\t\tif err != nil && err != io.EOF {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tp.parseCurrentToken()\n\t}\n\treturn nil\n}\n\n// Parse returns the parse tree for the HTML from the given Reader.\n// The input is assumed to be UTF-8 encoded.\nfunc Parse(r io.Reader) (*Node, error) {\n\tp := &parser{\n\t\ttokenizer: NewTokenizer(r),\n\t\tdoc: &Node{\n\t\t\tType: DocumentNode,\n\t\t},\n\t\tscripting:  true,\n\t\tframesetOK: true,\n\t\tim:         initialIM,\n\t}\n\terr := p.parse()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn p.doc, nil\n}\n\n// ParseFragment parses a fragment of HTML and returns the nodes that were\n// found. If the fragment is the InnerHTML for an existing element, pass that\n// element in context.\nfunc ParseFragment(r io.Reader, context *Node) ([]*Node, error) {\n\tcontextTag := \"\"\n\tif context != nil {\n\t\tif context.Type != ElementNode {\n\t\t\treturn nil, errors.New(\"html: ParseFragment of non-element Node\")\n\t\t}\n\t\t// The next check isn't just context.DataAtom.String() == context.Data because\n\t\t// it is valid to pass an element whose tag isn't a known atom. For example,\n\t\t// DataAtom == 0 and Data = \"tagfromthefuture\" is perfectly consistent.\n\t\tif context.DataAtom != a.Lookup([]byte(context.Data)) {\n\t\t\treturn nil, fmt.Errorf(\"html: inconsistent Node: DataAtom=%q, Data=%q\", context.DataAtom, context.Data)\n\t\t}\n\t\tcontextTag = context.DataAtom.String()\n\t}\n\tp := &parser{\n\t\ttokenizer: NewTokenizerFragment(r, contextTag),\n\t\tdoc: &Node{\n\t\t\tType: DocumentNode,\n\t\t},\n\t\tscripting: true,\n\t\tfragment:  true,\n\t\tcontext:   context,\n\t}\n\n\troot := &Node{\n\t\tType:     ElementNode,\n\t\tDataAtom: a.Html,\n\t\tData:     a.Html.String(),\n\t}\n\tp.doc.AppendChild(root)\n\tp.oe = nodeStack{root}\n\tp.resetInsertionMode()\n\n\tfor n := context; n != nil; n = n.Parent {\n\t\tif n.Type == ElementNode && n.DataAtom == a.Form {\n\t\t\tp.form = n\n\t\t\tbreak\n\t\t}\n\t}\n\n\terr := p.parse()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparent := p.doc\n\tif context != nil {\n\t\tparent = root\n\t}\n\n\tvar result []*Node\n\tfor c := parent.FirstChild; c != nil; {\n\t\tnext := c.NextSibling\n\t\tparent.RemoveChild(c)\n\t\tresult = append(result, c)\n\t\tc = next\n\t}\n\treturn result, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/parse_test.go",
    "content": "// Copyright 2010 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage html\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang.org/x/net/html/atom\"\n)\n\n// readParseTest reads a single test case from r.\nfunc readParseTest(r *bufio.Reader) (text, want, context string, err error) {\n\tline, err := r.ReadSlice('\\n')\n\tif err != nil {\n\t\treturn \"\", \"\", \"\", err\n\t}\n\tvar b []byte\n\n\t// Read the HTML.\n\tif string(line) != \"#data\\n\" {\n\t\treturn \"\", \"\", \"\", fmt.Errorf(`got %q want \"#data\\n\"`, line)\n\t}\n\tfor {\n\t\tline, err = r.ReadSlice('\\n')\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", \"\", err\n\t\t}\n\t\tif line[0] == '#' {\n\t\t\tbreak\n\t\t}\n\t\tb = append(b, line...)\n\t}\n\ttext = strings.TrimSuffix(string(b), \"\\n\")\n\tb = b[:0]\n\n\t// Skip the error list.\n\tif string(line) != \"#errors\\n\" {\n\t\treturn \"\", \"\", \"\", fmt.Errorf(`got %q want \"#errors\\n\"`, line)\n\t}\n\tfor {\n\t\tline, err = r.ReadSlice('\\n')\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", \"\", err\n\t\t}\n\t\tif line[0] == '#' {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif string(line) == \"#document-fragment\\n\" {\n\t\tline, err = r.ReadSlice('\\n')\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", \"\", err\n\t\t}\n\t\tcontext = strings.TrimSpace(string(line))\n\t\tline, err = r.ReadSlice('\\n')\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", \"\", err\n\t\t}\n\t}\n\n\t// Read the dump of what the parse tree should be.\n\tif string(line) != \"#document\\n\" {\n\t\treturn \"\", \"\", \"\", fmt.Errorf(`got %q want \"#document\\n\"`, line)\n\t}\n\tinQuote := false\n\tfor {\n\t\tline, err = r.ReadSlice('\\n')\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn \"\", \"\", \"\", err\n\t\t}\n\t\ttrimmed := bytes.Trim(line, \"| \\n\")\n\t\tif len(trimmed) > 0 {\n\t\t\tif line[0] == '|' && trimmed[0] == '\"' {\n\t\t\t\tinQuote = true\n\t\t\t}\n\t\t\tif trimmed[len(trimmed)-1] == '\"' && !(line[0] == '|' && len(trimmed) == 1) {\n\t\t\t\tinQuote = false\n\t\t\t}\n\t\t}\n\t\tif len(line) == 0 || len(line) == 1 && line[0] == '\\n' && !inQuote {\n\t\t\tbreak\n\t\t}\n\t\tb = append(b, line...)\n\t}\n\treturn text, string(b), context, nil\n}\n\nfunc dumpIndent(w io.Writer, level int) {\n\tio.WriteString(w, \"| \")\n\tfor i := 0; i < level; i++ {\n\t\tio.WriteString(w, \"  \")\n\t}\n}\n\ntype sortedAttributes []Attribute\n\nfunc (a sortedAttributes) Len() int {\n\treturn len(a)\n}\n\nfunc (a sortedAttributes) Less(i, j int) bool {\n\tif a[i].Namespace != a[j].Namespace {\n\t\treturn a[i].Namespace < a[j].Namespace\n\t}\n\treturn a[i].Key < a[j].Key\n}\n\nfunc (a sortedAttributes) Swap(i, j int) {\n\ta[i], a[j] = a[j], a[i]\n}\n\nfunc dumpLevel(w io.Writer, n *Node, level int) error {\n\tdumpIndent(w, level)\n\tswitch n.Type {\n\tcase ErrorNode:\n\t\treturn errors.New(\"unexpected ErrorNode\")\n\tcase DocumentNode:\n\t\treturn errors.New(\"unexpected DocumentNode\")\n\tcase ElementNode:\n\t\tif n.Namespace != \"\" {\n\t\t\tfmt.Fprintf(w, \"<%s %s>\", n.Namespace, n.Data)\n\t\t} else {\n\t\t\tfmt.Fprintf(w, \"<%s>\", n.Data)\n\t\t}\n\t\tattr := sortedAttributes(n.Attr)\n\t\tsort.Sort(attr)\n\t\tfor _, a := range attr {\n\t\t\tio.WriteString(w, \"\\n\")\n\t\t\tdumpIndent(w, level+1)\n\t\t\tif a.Namespace != \"\" {\n\t\t\t\tfmt.Fprintf(w, `%s %s=\"%s\"`, a.Namespace, a.Key, a.Val)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(w, `%s=\"%s\"`, a.Key, a.Val)\n\t\t\t}\n\t\t}\n\tcase TextNode:\n\t\tfmt.Fprintf(w, `\"%s\"`, n.Data)\n\tcase CommentNode:\n\t\tfmt.Fprintf(w, \"<!-- %s -->\", n.Data)\n\tcase DoctypeNode:\n\t\tfmt.Fprintf(w, \"<!DOCTYPE %s\", n.Data)\n\t\tif n.Attr != nil {\n\t\t\tvar p, s string\n\t\t\tfor _, a := range n.Attr {\n\t\t\t\tswitch a.Key {\n\t\t\t\tcase \"public\":\n\t\t\t\t\tp = a.Val\n\t\t\t\tcase \"system\":\n\t\t\t\t\ts = a.Val\n\t\t\t\t}\n\t\t\t}\n\t\t\tif p != \"\" || s != \"\" {\n\t\t\t\tfmt.Fprintf(w, ` \"%s\"`, p)\n\t\t\t\tfmt.Fprintf(w, ` \"%s\"`, s)\n\t\t\t}\n\t\t}\n\t\tio.WriteString(w, \">\")\n\tcase scopeMarkerNode:\n\t\treturn errors.New(\"unexpected scopeMarkerNode\")\n\tdefault:\n\t\treturn errors.New(\"unknown node type\")\n\t}\n\tio.WriteString(w, \"\\n\")\n\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\tif err := dumpLevel(w, c, level+1); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc dump(n *Node) (string, error) {\n\tif n == nil || n.FirstChild == nil {\n\t\treturn \"\", nil\n\t}\n\tvar b bytes.Buffer\n\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\tif err := dumpLevel(&b, c, 0); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn b.String(), nil\n}\n\nconst testDataDir = \"testdata/webkit/\"\n\nfunc TestParser(t *testing.T) {\n\ttestFiles, err := filepath.Glob(testDataDir + \"*.dat\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor _, tf := range testFiles {\n\t\tf, err := os.Open(tf)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer f.Close()\n\t\tr := bufio.NewReader(f)\n\n\t\tfor i := 0; ; i++ {\n\t\t\ttext, want, context, err := readParseTest(r)\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\terr = testParseCase(text, want, context)\n\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"%s test #%d %q, %s\", tf, i, text, err)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// testParseCase tests one test case from the test files. If the test does not\n// pass, it returns an error that explains the failure.\n// text is the HTML to be parsed, want is a dump of the correct parse tree,\n// and context is the name of the context node, if any.\nfunc testParseCase(text, want, context string) (err error) {\n\tdefer func() {\n\t\tif x := recover(); x != nil {\n\t\t\tswitch e := x.(type) {\n\t\t\tcase error:\n\t\t\t\terr = e\n\t\t\tdefault:\n\t\t\t\terr = fmt.Errorf(\"%v\", e)\n\t\t\t}\n\t\t}\n\t}()\n\n\tvar doc *Node\n\tif context == \"\" {\n\t\tdoc, err = Parse(strings.NewReader(text))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tcontextNode := &Node{\n\t\t\tType:     ElementNode,\n\t\t\tDataAtom: atom.Lookup([]byte(context)),\n\t\t\tData:     context,\n\t\t}\n\t\tnodes, err := ParseFragment(strings.NewReader(text), contextNode)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdoc = &Node{\n\t\t\tType: DocumentNode,\n\t\t}\n\t\tfor _, n := range nodes {\n\t\t\tdoc.AppendChild(n)\n\t\t}\n\t}\n\n\tif err := checkTreeConsistency(doc); err != nil {\n\t\treturn err\n\t}\n\n\tgot, err := dump(doc)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Compare the parsed tree to the #document section.\n\tif got != want {\n\t\treturn fmt.Errorf(\"got vs want:\\n----\\n%s----\\n%s----\", got, want)\n\t}\n\n\tif renderTestBlacklist[text] || context != \"\" {\n\t\treturn nil\n\t}\n\n\t// Check that rendering and re-parsing results in an identical tree.\n\tpr, pw := io.Pipe()\n\tgo func() {\n\t\tpw.CloseWithError(Render(pw, doc))\n\t}()\n\tdoc1, err := Parse(pr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgot1, err := dump(doc1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif got != got1 {\n\t\treturn fmt.Errorf(\"got vs got1:\\n----\\n%s----\\n%s----\", got, got1)\n\t}\n\n\treturn nil\n}\n\n// Some test input result in parse trees are not 'well-formed' despite\n// following the HTML5 recovery algorithms. Rendering and re-parsing such a\n// tree will not result in an exact clone of that tree. We blacklist such\n// inputs from the render test.\nvar renderTestBlacklist = map[string]bool{\n\t// The second <a> will be reparented to the first <table>'s parent. This\n\t// results in an <a> whose parent is an <a>, which is not 'well-formed'.\n\t`<a><table><td><a><table></table><a></tr><a></table><b>X</b>C<a>Y`: true,\n\t// The same thing with a <p>:\n\t`<p><table></p>`: true,\n\t// More cases of <a> being reparented:\n\t`<a href=\"blah\">aba<table><a href=\"foo\">br<tr><td></td></tr>x</table>aoe`: true,\n\t`<a><table><a></table><p><a><div><a>`:                                     true,\n\t`<a><table><td><a><table></table><a></tr><a></table><a>`:                  true,\n\t// A similar reparenting situation involving <nobr>:\n\t`<!DOCTYPE html><body><b><nobr>1<table><nobr></b><i><nobr>2<nobr></i>3`: true,\n\t// A <plaintext> element is reparented, putting it before a table.\n\t// A <plaintext> element can't have anything after it in HTML.\n\t`<table><plaintext><td>`:                                   true,\n\t`<!doctype html><table><plaintext></plaintext>`:            true,\n\t`<!doctype html><table><tbody><plaintext></plaintext>`:     true,\n\t`<!doctype html><table><tbody><tr><plaintext></plaintext>`: true,\n\t// A form inside a table inside a form doesn't work either.\n\t`<!doctype html><form><table></form><form></table></form>`: true,\n\t// A script that ends at EOF may escape its own closing tag when rendered.\n\t`<!doctype html><script><!--<script `:          true,\n\t`<!doctype html><script><!--<script <`:         true,\n\t`<!doctype html><script><!--<script <a`:        true,\n\t`<!doctype html><script><!--<script </`:        true,\n\t`<!doctype html><script><!--<script </s`:       true,\n\t`<!doctype html><script><!--<script </script`:  true,\n\t`<!doctype html><script><!--<script </scripta`: true,\n\t`<!doctype html><script><!--<script -`:         true,\n\t`<!doctype html><script><!--<script -a`:        true,\n\t`<!doctype html><script><!--<script -<`:        true,\n\t`<!doctype html><script><!--<script --`:        true,\n\t`<!doctype html><script><!--<script --a`:       true,\n\t`<!doctype html><script><!--<script --<`:       true,\n\t`<script><!--<script `:                         true,\n\t`<script><!--<script <a`:                       true,\n\t`<script><!--<script </script`:                 true,\n\t`<script><!--<script </scripta`:                true,\n\t`<script><!--<script -`:                        true,\n\t`<script><!--<script -a`:                       true,\n\t`<script><!--<script --`:                       true,\n\t`<script><!--<script --a`:                      true,\n\t`<script><!--<script <`:                        true,\n\t`<script><!--<script </`:                       true,\n\t`<script><!--<script </s`:                      true,\n\t// Reconstructing the active formatting elements results in a <plaintext>\n\t// element that contains an <a> element.\n\t`<!doctype html><p><a><plaintext>b`: true,\n}\n\nfunc TestNodeConsistency(t *testing.T) {\n\t// inconsistentNode is a Node whose DataAtom and Data do not agree.\n\tinconsistentNode := &Node{\n\t\tType:     ElementNode,\n\t\tDataAtom: atom.Frameset,\n\t\tData:     \"table\",\n\t}\n\t_, err := ParseFragment(strings.NewReader(\"<p>hello</p>\"), inconsistentNode)\n\tif err == nil {\n\t\tt.Errorf(\"got nil error, want non-nil\")\n\t}\n}\n\nfunc BenchmarkParser(b *testing.B) {\n\tbuf, err := ioutil.ReadFile(\"testdata/go1.html\")\n\tif err != nil {\n\t\tb.Fatalf(\"could not read testdata/go1.html: %v\", err)\n\t}\n\tb.SetBytes(int64(len(buf)))\n\truntime.GC()\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tParse(bytes.NewBuffer(buf))\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/render.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage html\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n)\n\ntype writer interface {\n\tio.Writer\n\tio.ByteWriter\n\tWriteString(string) (int, error)\n}\n\n// Render renders the parse tree n to the given writer.\n//\n// Rendering is done on a 'best effort' basis: calling Parse on the output of\n// Render will always result in something similar to the original tree, but it\n// is not necessarily an exact clone unless the original tree was 'well-formed'.\n// 'Well-formed' is not easily specified; the HTML5 specification is\n// complicated.\n//\n// Calling Parse on arbitrary input typically results in a 'well-formed' parse\n// tree. However, it is possible for Parse to yield a 'badly-formed' parse tree.\n// For example, in a 'well-formed' parse tree, no <a> element is a child of\n// another <a> element: parsing \"<a><a>\" results in two sibling elements.\n// Similarly, in a 'well-formed' parse tree, no <a> element is a child of a\n// <table> element: parsing \"<p><table><a>\" results in a <p> with two sibling\n// children; the <a> is reparented to the <table>'s parent. However, calling\n// Parse on \"<a><table><a>\" does not return an error, but the result has an <a>\n// element with an <a> child, and is therefore not 'well-formed'.\n//\n// Programmatically constructed trees are typically also 'well-formed', but it\n// is possible to construct a tree that looks innocuous but, when rendered and\n// re-parsed, results in a different tree. A simple example is that a solitary\n// text node would become a tree containing <html>, <head> and <body> elements.\n// Another example is that the programmatic equivalent of \"a<head>b</head>c\"\n// becomes \"<html><head><head/><body>abc</body></html>\".\nfunc Render(w io.Writer, n *Node) error {\n\tif x, ok := w.(writer); ok {\n\t\treturn render(x, n)\n\t}\n\tbuf := bufio.NewWriter(w)\n\tif err := render(buf, n); err != nil {\n\t\treturn err\n\t}\n\treturn buf.Flush()\n}\n\n// plaintextAbort is returned from render1 when a <plaintext> element\n// has been rendered. No more end tags should be rendered after that.\nvar plaintextAbort = errors.New(\"html: internal error (plaintext abort)\")\n\nfunc render(w writer, n *Node) error {\n\terr := render1(w, n)\n\tif err == plaintextAbort {\n\t\terr = nil\n\t}\n\treturn err\n}\n\nfunc render1(w writer, n *Node) error {\n\t// Render non-element nodes; these are the easy cases.\n\tswitch n.Type {\n\tcase ErrorNode:\n\t\treturn errors.New(\"html: cannot render an ErrorNode node\")\n\tcase TextNode:\n\t\treturn escape(w, n.Data)\n\tcase DocumentNode:\n\t\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\t\tif err := render1(w, c); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\tcase ElementNode:\n\t\t// No-op.\n\tcase CommentNode:\n\t\tif _, err := w.WriteString(\"<!--\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := w.WriteString(n.Data); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := w.WriteString(\"-->\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase DoctypeNode:\n\t\tif _, err := w.WriteString(\"<!DOCTYPE \"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := w.WriteString(n.Data); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif n.Attr != nil {\n\t\t\tvar p, s string\n\t\t\tfor _, a := range n.Attr {\n\t\t\t\tswitch a.Key {\n\t\t\t\tcase \"public\":\n\t\t\t\t\tp = a.Val\n\t\t\t\tcase \"system\":\n\t\t\t\t\ts = a.Val\n\t\t\t\t}\n\t\t\t}\n\t\t\tif p != \"\" {\n\t\t\t\tif _, err := w.WriteString(\" PUBLIC \"); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := writeQuoted(w, p); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif s != \"\" {\n\t\t\t\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif err := writeQuoted(w, s); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if s != \"\" {\n\t\t\t\tif _, err := w.WriteString(\" SYSTEM \"); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := writeQuoted(w, s); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn w.WriteByte('>')\n\tdefault:\n\t\treturn errors.New(\"html: unknown node type\")\n\t}\n\n\t// Render the <xxx> opening tag.\n\tif err := w.WriteByte('<'); err != nil {\n\t\treturn err\n\t}\n\tif _, err := w.WriteString(n.Data); err != nil {\n\t\treturn err\n\t}\n\tfor _, a := range n.Attr {\n\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif a.Namespace != \"\" {\n\t\t\tif _, err := w.WriteString(a.Namespace); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := w.WriteByte(':'); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif _, err := w.WriteString(a.Key); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := w.WriteString(`=\"`); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := escape(w, a.Val); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := w.WriteByte('\"'); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif voidElements[n.Data] {\n\t\tif n.FirstChild != nil {\n\t\t\treturn fmt.Errorf(\"html: void element <%s> has child nodes\", n.Data)\n\t\t}\n\t\t_, err := w.WriteString(\"/>\")\n\t\treturn err\n\t}\n\tif err := w.WriteByte('>'); err != nil {\n\t\treturn err\n\t}\n\n\t// Add initial newline where there is danger of a newline beging ignored.\n\tif c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, \"\\n\") {\n\t\tswitch n.Data {\n\t\tcase \"pre\", \"listing\", \"textarea\":\n\t\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Render any child nodes.\n\tswitch n.Data {\n\tcase \"iframe\", \"noembed\", \"noframes\", \"noscript\", \"plaintext\", \"script\", \"style\", \"xmp\":\n\t\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\t\tif c.Type == TextNode {\n\t\t\t\tif _, err := w.WriteString(c.Data); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err := render1(w, c); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif n.Data == \"plaintext\" {\n\t\t\t// Don't render anything else. <plaintext> must be the\n\t\t\t// last element in the file, with no closing tag.\n\t\t\treturn plaintextAbort\n\t\t}\n\tdefault:\n\t\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\t\tif err := render1(w, c); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Render the </xxx> closing tag.\n\tif _, err := w.WriteString(\"</\"); err != nil {\n\t\treturn err\n\t}\n\tif _, err := w.WriteString(n.Data); err != nil {\n\t\treturn err\n\t}\n\treturn w.WriteByte('>')\n}\n\n// writeQuoted writes s to w surrounded by quotes. Normally it will use double\n// quotes, but if s contains a double quote, it will use single quotes.\n// It is used for writing the identifiers in a doctype declaration.\n// In valid HTML, they can't contain both types of quotes.\nfunc writeQuoted(w writer, s string) error {\n\tvar q byte = '\"'\n\tif strings.Contains(s, `\"`) {\n\t\tq = '\\''\n\t}\n\tif err := w.WriteByte(q); err != nil {\n\t\treturn err\n\t}\n\tif _, err := w.WriteString(s); err != nil {\n\t\treturn err\n\t}\n\tif err := w.WriteByte(q); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// Section 12.1.2, \"Elements\", gives this list of void elements. Void elements\n// are those that can't have any contents.\nvar voidElements = map[string]bool{\n\t\"area\":    true,\n\t\"base\":    true,\n\t\"br\":      true,\n\t\"col\":     true,\n\t\"command\": true,\n\t\"embed\":   true,\n\t\"hr\":      true,\n\t\"img\":     true,\n\t\"input\":   true,\n\t\"keygen\":  true,\n\t\"link\":    true,\n\t\"meta\":    true,\n\t\"param\":   true,\n\t\"source\":  true,\n\t\"track\":   true,\n\t\"wbr\":     true,\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/render_test.go",
    "content": "// Copyright 2010 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage html\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n)\n\nfunc TestRenderer(t *testing.T) {\n\tnodes := [...]*Node{\n\t\t0: {\n\t\t\tType: ElementNode,\n\t\t\tData: \"html\",\n\t\t},\n\t\t1: {\n\t\t\tType: ElementNode,\n\t\t\tData: \"head\",\n\t\t},\n\t\t2: {\n\t\t\tType: ElementNode,\n\t\t\tData: \"body\",\n\t\t},\n\t\t3: {\n\t\t\tType: TextNode,\n\t\t\tData: \"0<1\",\n\t\t},\n\t\t4: {\n\t\t\tType: ElementNode,\n\t\t\tData: \"p\",\n\t\t\tAttr: []Attribute{\n\t\t\t\t{\n\t\t\t\t\tKey: \"id\",\n\t\t\t\t\tVal: \"A\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tKey: \"foo\",\n\t\t\t\t\tVal: `abc\"def`,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t5: {\n\t\t\tType: TextNode,\n\t\t\tData: \"2\",\n\t\t},\n\t\t6: {\n\t\t\tType: ElementNode,\n\t\t\tData: \"b\",\n\t\t\tAttr: []Attribute{\n\t\t\t\t{\n\t\t\t\t\tKey: \"empty\",\n\t\t\t\t\tVal: \"\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t7: {\n\t\t\tType: TextNode,\n\t\t\tData: \"3\",\n\t\t},\n\t\t8: {\n\t\t\tType: ElementNode,\n\t\t\tData: \"i\",\n\t\t\tAttr: []Attribute{\n\t\t\t\t{\n\t\t\t\t\tKey: \"backslash\",\n\t\t\t\t\tVal: `\\`,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t9: {\n\t\t\tType: TextNode,\n\t\t\tData: \"&4\",\n\t\t},\n\t\t10: {\n\t\t\tType: TextNode,\n\t\t\tData: \"5\",\n\t\t},\n\t\t11: {\n\t\t\tType: ElementNode,\n\t\t\tData: \"blockquote\",\n\t\t},\n\t\t12: {\n\t\t\tType: ElementNode,\n\t\t\tData: \"br\",\n\t\t},\n\t\t13: {\n\t\t\tType: TextNode,\n\t\t\tData: \"6\",\n\t\t},\n\t}\n\n\t// Build a tree out of those nodes, based on a textual representation.\n\t// Only the \".\\t\"s are significant. The trailing HTML-like text is\n\t// just commentary. The \"0:\" prefixes are for easy cross-reference with\n\t// the nodes array.\n\ttreeAsText := [...]string{\n\t\t0: `<html>`,\n\t\t1: `.\t<head>`,\n\t\t2: `.\t<body>`,\n\t\t3: `.\t.\t\"0&lt;1\"`,\n\t\t4: `.\t.\t<p id=\"A\" foo=\"abc&#34;def\">`,\n\t\t5: `.\t.\t.\t\"2\"`,\n\t\t6: `.\t.\t.\t<b empty=\"\">`,\n\t\t7: `.\t.\t.\t.\t\"3\"`,\n\t\t8: `.\t.\t.\t<i backslash=\"\\\">`,\n\t\t9: `.\t.\t.\t.\t\"&amp;4\"`,\n\t\t10: `.\t.\t\"5\"`,\n\t\t11: `.\t.\t<blockquote>`,\n\t\t12: `.\t.\t<br>`,\n\t\t13: `.\t.\t\"6\"`,\n\t}\n\tif len(nodes) != len(treeAsText) {\n\t\tt.Fatal(\"len(nodes) != len(treeAsText)\")\n\t}\n\tvar stack [8]*Node\n\tfor i, line := range treeAsText {\n\t\tlevel := 0\n\t\tfor line[0] == '.' {\n\t\t\t// Strip a leading \".\\t\".\n\t\t\tline = line[2:]\n\t\t\tlevel++\n\t\t}\n\t\tn := nodes[i]\n\t\tif level == 0 {\n\t\t\tif stack[0] != nil {\n\t\t\t\tt.Fatal(\"multiple root nodes\")\n\t\t\t}\n\t\t\tstack[0] = n\n\t\t} else {\n\t\t\tstack[level-1].AppendChild(n)\n\t\t\tstack[level] = n\n\t\t\tfor i := level + 1; i < len(stack); i++ {\n\t\t\t\tstack[i] = nil\n\t\t\t}\n\t\t}\n\t\t// At each stage of tree construction, we check all nodes for consistency.\n\t\tfor j, m := range nodes {\n\t\t\tif err := checkNodeConsistency(m); err != nil {\n\t\t\t\tt.Fatalf(\"i=%d, j=%d: %v\", i, j, err)\n\t\t\t}\n\t\t}\n\t}\n\n\twant := `<html><head></head><body>0&lt;1<p id=\"A\" foo=\"abc&#34;def\">` +\n\t\t`2<b empty=\"\">3</b><i backslash=\"\\\">&amp;4</i></p>` +\n\t\t`5<blockquote></blockquote><br/>6</body></html>`\n\tb := new(bytes.Buffer)\n\tif err := Render(b, nodes[0]); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got := b.String(); got != want {\n\t\tt.Errorf(\"got vs want:\\n%s\\n%s\\n\", got, want)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/testdata/go1.html",
    "content": "<!DOCTYPE html>\n<html>\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">\n\n  <title>Go 1 Release Notes - The Go Programming Language</title>\n\n<link type=\"text/css\" rel=\"stylesheet\" href=\"/doc/style.css\">\n<script type=\"text/javascript\" src=\"/doc/godocs.js\"></script>\n\n<link rel=\"search\" type=\"application/opensearchdescription+xml\" title=\"godoc\" href=\"/opensearch.xml\" />\n\n<script type=\"text/javascript\">\nvar _gaq = _gaq || [];\n_gaq.push([\"_setAccount\", \"UA-11222381-2\"]);\n_gaq.push([\"_trackPageview\"]);\n</script>\n</head>\n<body>\n\n<div id=\"topbar\"><div class=\"container wide\">\n\n<form method=\"GET\" action=\"/search\">\n<div id=\"menu\">\n<a href=\"/doc/\">Documents</a>\n<a href=\"/ref/\">References</a>\n<a href=\"/pkg/\">Packages</a>\n<a href=\"/project/\">The Project</a>\n<a href=\"/help/\">Help</a>\n<input type=\"text\" id=\"search\" name=\"q\" class=\"inactive\" value=\"Search\">\n</div>\n<div id=\"heading\"><a href=\"/\">The Go Programming Language</a></div>\n</form>\n\n</div></div>\n\n<div id=\"page\" class=\"wide\">\n\n\n  <div id=\"plusone\"><g:plusone size=\"small\" annotation=\"none\"></g:plusone></div>\n  <h1>Go 1 Release Notes</h1>\n\n\n\n\n<div id=\"nav\"></div>\n\n\n\n\n<h2 id=\"introduction\">Introduction to Go 1</h2>\n\n<p>\nGo version 1, Go 1 for short, defines a language and a set of core libraries\nthat provide a stable foundation for creating reliable products, projects, and\npublications.\n</p>\n\n<p>\nThe driving motivation for Go 1 is stability for its users. People should be able to\nwrite Go programs and expect that they will continue to compile and run without\nchange, on a time scale of years, including in production environments such as\nGoogle App Engine. Similarly, people should be able to write books about Go, be\nable to say which version of Go the book is describing, and have that version\nnumber still be meaningful much later.\n</p>\n\n<p>\nCode that compiles in Go 1 should, with few exceptions, continue to compile and\nrun throughout the lifetime of that version, even as we issue updates and bug\nfixes such as Go version 1.1, 1.2, and so on. Other than critical fixes, changes\nmade to the language and library for subsequent releases of Go 1 may\nadd functionality but will not break existing Go 1 programs.\n<a href=\"go1compat.html\">The Go 1 compatibility document</a>\nexplains the compatibility guidelines in more detail.\n</p>\n\n<p>\nGo 1 is a representation of Go as it used today, not a wholesale rethinking of\nthe language. We avoided designing new features and instead focused on cleaning\nup problems and inconsistencies and improving portability. There are a number\nchanges to the Go language and packages that we had considered for some time and\nprototyped but not released primarily because they are significant and\nbackwards-incompatible. Go 1 was an opportunity to get them out, which is\nhelpful for the long term, but also means that Go 1 introduces incompatibilities\nfor old programs. Fortunately, the <code>go</code> <code>fix</code> tool can\nautomate much of the work needed to bring programs up to the Go 1 standard.\n</p>\n\n<p>\nThis document outlines the major changes in Go 1 that will affect programmers\nupdating existing code; its reference point is the prior release, r60 (tagged as\nr60.3). It also explains how to update code from r60 to run under Go 1.\n</p>\n\n<h2 id=\"language\">Changes to the language</h2>\n\n<h3 id=\"append\">Append</h3>\n\n<p>\nThe <code>append</code> predeclared variadic function makes it easy to grow a slice\nby adding elements to the end.\nA common use is to add bytes to the end of a byte slice when generating output.\nHowever, <code>append</code> did not provide a way to append a string to a <code>[]byte</code>,\nwhich is another common case.\n</p>\n\n<pre><!--{{code \"/doc/progs/go1.go\" `/greeting := ..byte/` `/append.*hello/`}}\n-->    greeting := []byte{}\n    greeting = append(greeting, []byte(&#34;hello &#34;)...)</pre>\n\n<p>\nBy analogy with the similar property of <code>copy</code>, Go 1\npermits a string to be appended (byte-wise) directly to a byte\nslice, reducing the friction between strings and byte slices.\nThe conversion is no longer necessary:\n</p>\n\n<pre><!--{{code \"/doc/progs/go1.go\" `/append.*world/`}}\n-->    greeting = append(greeting, &#34;world&#34;...)</pre>\n\n<p>\n<em>Updating</em>:\nThis is a new feature, so existing code needs no changes.\n</p>\n\n<h3 id=\"close\">Close</h3>\n\n<p>\nThe <code>close</code> predeclared function provides a mechanism\nfor a sender to signal that no more values will be sent.\nIt is important to the implementation of <code>for</code> <code>range</code>\nloops over channels and is helpful in other situations.\nPartly by design and partly because of race conditions that can occur otherwise,\nit is intended for use only by the goroutine sending on the channel,\nnot by the goroutine receiving data.\nHowever, before Go 1 there was no compile-time checking that <code>close</code>\nwas being used correctly.\n</p>\n\n<p>\nTo close this gap, at least in part, Go 1 disallows <code>close</code> on receive-only channels.\nAttempting to close such a channel is a compile-time error.\n</p>\n\n<pre>\n    var c chan int\n    var csend chan&lt;- int = c\n    var crecv &lt;-chan int = c\n    close(c)     // legal\n    close(csend) // legal\n    close(crecv) // illegal\n</pre>\n\n<p>\n<em>Updating</em>:\nExisting code that attempts to close a receive-only channel was\nerroneous even before Go 1 and should be fixed.  The compiler will\nnow reject such code.\n</p>\n\n<h3 id=\"literals\">Composite literals</h3>\n\n<p>\nIn Go 1, a composite literal of array, slice, or map type can elide the\ntype specification for the elements' initializers if they are of pointer type.\nAll four of the initializations in this example are legal; the last one was illegal before Go 1.\n</p>\n\n<pre><!--{{code \"/doc/progs/go1.go\" `/type Date struct/` `/STOP/`}}\n-->    type Date struct {\n        month string\n        day   int\n    }\n    <span class=\"comment\">// Struct values, fully qualified; always legal.</span>\n    holiday1 := []Date{\n        Date{&#34;Feb&#34;, 14},\n        Date{&#34;Nov&#34;, 11},\n        Date{&#34;Dec&#34;, 25},\n    }\n    <span class=\"comment\">// Struct values, type name elided; always legal.</span>\n    holiday2 := []Date{\n        {&#34;Feb&#34;, 14},\n        {&#34;Nov&#34;, 11},\n        {&#34;Dec&#34;, 25},\n    }\n    <span class=\"comment\">// Pointers, fully qualified, always legal.</span>\n    holiday3 := []*Date{\n        &amp;Date{&#34;Feb&#34;, 14},\n        &amp;Date{&#34;Nov&#34;, 11},\n        &amp;Date{&#34;Dec&#34;, 25},\n    }\n    <span class=\"comment\">// Pointers, type name elided; legal in Go 1.</span>\n    holiday4 := []*Date{\n        {&#34;Feb&#34;, 14},\n        {&#34;Nov&#34;, 11},\n        {&#34;Dec&#34;, 25},\n    }</pre>\n\n<p>\n<em>Updating</em>:\nThis change has no effect on existing code, but the command\n<code>gofmt</code> <code>-s</code> applied to existing source\nwill, among other things, elide explicit element types wherever permitted.\n</p>\n\n\n<h3 id=\"init\">Goroutines during init</h3>\n\n<p>\nThe old language defined that <code>go</code> statements executed during initialization created goroutines but that they did not begin to run until initialization of the entire program was complete.\nThis introduced clumsiness in many places and, in effect, limited the utility\nof the <code>init</code> construct:\nif it was possible for another package to use the library during initialization, the library\nwas forced to avoid goroutines.\nThis design was done for reasons of simplicity and safety but,\nas our confidence in the language grew, it seemed unnecessary.\nRunning goroutines during initialization is no more complex or unsafe than running them during normal execution.\n</p>\n\n<p>\nIn Go 1, code that uses goroutines can be called from\n<code>init</code> routines and global initialization expressions\nwithout introducing a deadlock.\n</p>\n\n<pre><!--{{code \"/doc/progs/go1.go\" `/PackageGlobal/` `/^}/`}}\n-->var PackageGlobal int\n\nfunc init() {\n    c := make(chan int)\n    go initializationFunction(c)\n    PackageGlobal = &lt;-c\n}</pre>\n\n<p>\n<em>Updating</em>:\nThis is a new feature, so existing code needs no changes,\nalthough it's possible that code that depends on goroutines not starting before <code>main</code> will break.\nThere was no such code in the standard repository.\n</p>\n\n<h3 id=\"rune\">The rune type</h3>\n\n<p>\nThe language spec allows the <code>int</code> type to be 32 or 64 bits wide, but current implementations set <code>int</code> to 32 bits even on 64-bit platforms.\nIt would be preferable to have <code>int</code> be 64 bits on 64-bit platforms.\n(There are important consequences for indexing large slices.)\nHowever, this change would waste space when processing Unicode characters with\nthe old language because the <code>int</code> type was also used to hold Unicode code points: each code point would waste an extra 32 bits of storage if <code>int</code> grew from 32 bits to 64.\n</p>\n\n<p>\nTo make changing to 64-bit <code>int</code> feasible,\nGo 1 introduces a new basic type, <code>rune</code>, to represent\nindividual Unicode code points.\nIt is an alias for <code>int32</code>, analogous to <code>byte</code>\nas an alias for <code>uint8</code>.\n</p>\n\n<p>\nCharacter literals such as <code>'a'</code>, <code>'語'</code>, and <code>'\\u0345'</code>\nnow have default type <code>rune</code>,\nanalogous to <code>1.0</code> having default type <code>float64</code>.\nA variable initialized to a character constant will therefore\nhave type <code>rune</code> unless otherwise specified.\n</p>\n\n<p>\nLibraries have been updated to use <code>rune</code> rather than <code>int</code>\nwhen appropriate. For instance, the functions <code>unicode.ToLower</code> and\nrelatives now take and return a <code>rune</code>.\n</p>\n\n<pre><!--{{code \"/doc/progs/go1.go\" `/STARTRUNE/` `/ENDRUNE/`}}\n-->    delta := &#39;δ&#39; <span class=\"comment\">// delta has type rune.</span>\n    var DELTA rune\n    DELTA = unicode.ToUpper(delta)\n    epsilon := unicode.ToLower(DELTA + 1)\n    if epsilon != &#39;δ&#39;+1 {\n        log.Fatal(&#34;inconsistent casing for Greek&#34;)\n    }</pre>\n\n<p>\n<em>Updating</em>:\nMost source code will be unaffected by this because the type inference from\n<code>:=</code> initializers introduces the new type silently, and it propagates\nfrom there.\nSome code may get type errors that a trivial conversion will resolve.\n</p>\n\n<h3 id=\"error\">The error type</h3>\n\n<p>\nGo 1 introduces a new built-in type, <code>error</code>, which has the following definition:\n</p>\n\n<pre>\n    type error interface {\n        Error() string\n    }\n</pre>\n\n<p>\nSince the consequences of this type are all in the package library,\nit is discussed <a href=\"#errors\">below</a>.\n</p>\n\n<h3 id=\"delete\">Deleting from maps</h3>\n\n<p>\nIn the old language, to delete the entry with key <code>k</code> from map <code>m</code>, one wrote the statement,\n</p>\n\n<pre>\n    m[k] = value, false\n</pre>\n\n<p>\nThis syntax was a peculiar special case, the only two-to-one assignment.\nIt required passing a value (usually ignored) that is evaluated but discarded,\nplus a boolean that was nearly always the constant <code>false</code>.\nIt did the job but was odd and a point of contention.\n</p>\n\n<p>\nIn Go 1, that syntax has gone; instead there is a new built-in\nfunction, <code>delete</code>.  The call\n</p>\n\n<pre><!--{{code \"/doc/progs/go1.go\" `/delete\\(m, k\\)/`}}\n-->    delete(m, k)</pre>\n\n<p>\nwill delete the map entry retrieved by the expression <code>m[k]</code>.\nThere is no return value. Deleting a non-existent entry is a no-op.\n</p>\n\n<p>\n<em>Updating</em>:\nRunning <code>go</code> <code>fix</code> will convert expressions of the form <code>m[k] = value,\nfalse</code> into <code>delete(m, k)</code> when it is clear that\nthe ignored value can be safely discarded from the program and\n<code>false</code> refers to the predefined boolean constant.\nThe fix tool\nwill flag other uses of the syntax for inspection by the programmer.\n</p>\n\n<h3 id=\"iteration\">Iterating in maps</h3>\n\n<p>\nThe old language specification did not define the order of iteration for maps,\nand in practice it differed across hardware platforms.\nThis caused tests that iterated over maps to be fragile and non-portable, with the\nunpleasant property that a test might always pass on one machine but break on another.\n</p>\n\n<p>\nIn Go 1, the order in which elements are visited when iterating\nover a map using a <code>for</code> <code>range</code> statement\nis defined to be unpredictable, even if the same loop is run multiple\ntimes with the same map.\nCode should not assume that the elements are visited in any particular order.\n</p>\n\n<p>\nThis change means that code that depends on iteration order is very likely to break early and be fixed long before it becomes a problem.\nJust as important, it allows the map implementation to ensure better map balancing even when programs are using range loops to select an element from a map.\n</p>\n\n<pre><!--{{code \"/doc/progs/go1.go\" `/Sunday/` `/^\t}/`}}\n-->    m := map[string]int{&#34;Sunday&#34;: 0, &#34;Monday&#34;: 1}\n    for name, value := range m {\n        <span class=\"comment\">// This loop should not assume Sunday will be visited first.</span>\n        f(name, value)\n    }</pre>\n\n<p>\n<em>Updating</em>:\nThis is one change where tools cannot help.  Most existing code\nwill be unaffected, but some programs may break or misbehave; we\nrecommend manual checking of all range statements over maps to\nverify they do not depend on iteration order. There were a few such\nexamples in the standard repository; they have been fixed.\nNote that it was already incorrect to depend on the iteration order, which\nwas unspecified. This change codifies the unpredictability.\n</p>\n\n<h3 id=\"multiple_assignment\">Multiple assignment</h3>\n\n<p>\nThe language specification has long guaranteed that in assignments\nthe right-hand-side expressions are all evaluated before any left-hand-side expressions are assigned.\nTo guarantee predictable behavior,\nGo 1 refines the specification further.\n</p>\n\n<p>\nIf the left-hand side of the assignment\nstatement contains expressions that require evaluation, such as\nfunction calls or array indexing operations, these will all be done\nusing the usual left-to-right rule before any variables are assigned\ntheir value.  Once everything is evaluated, the actual assignments\nproceed in left-to-right order.\n</p>\n\n<p>\nThese examples illustrate the behavior.\n</p>\n\n<pre><!--{{code \"/doc/progs/go1.go\" `/sa :=/` `/then sc.0. = 2/`}}\n-->    sa := []int{1, 2, 3}\n    i := 0\n    i, sa[i] = 1, 2 <span class=\"comment\">// sets i = 1, sa[0] = 2</span>\n\n    sb := []int{1, 2, 3}\n    j := 0\n    sb[j], j = 2, 1 <span class=\"comment\">// sets sb[0] = 2, j = 1</span>\n\n    sc := []int{1, 2, 3}\n    sc[0], sc[0] = 1, 2 <span class=\"comment\">// sets sc[0] = 1, then sc[0] = 2 (so sc[0] = 2 at end)</span></pre>\n\n<p>\n<em>Updating</em>:\nThis is one change where tools cannot help, but breakage is unlikely.\nNo code in the standard repository was broken by this change, and code\nthat depended on the previous unspecified behavior was already incorrect.\n</p>\n\n<h3 id=\"shadowing\">Returns and shadowed variables</h3>\n\n<p>\nA common mistake is to use <code>return</code> (without arguments) after an assignment to a variable that has the same name as a result variable but is not the same variable.\nThis situation is called <em>shadowing</em>: the result variable has been shadowed by another variable with the same name declared in an inner scope.\n</p>\n\n<p>\nIn functions with named return values,\nthe Go 1 compilers disallow return statements without arguments if any of the named return values is shadowed at the point of the return statement.\n(It isn't part of the specification, because this is one area we are still exploring;\nthe situation is analogous to the compilers rejecting functions that do not end with an explicit return statement.)\n</p>\n\n<p>\nThis function implicitly returns a shadowed return value and will be rejected by the compiler:\n</p>\n\n<pre>\n    func Bug() (i, j, k int) {\n        for i = 0; i &lt; 5; i++ {\n            for j := 0; j &lt; 5; j++ { // Redeclares j.\n                k += i*j\n                if k > 100 {\n                    return // Rejected: j is shadowed here.\n                }\n            }\n        }\n        return // OK: j is not shadowed here.\n    }\n</pre>\n\n<p>\n<em>Updating</em>:\nCode that shadows return values in this way will be rejected by the compiler and will need to be fixed by hand.\nThe few cases that arose in the standard repository were mostly bugs.\n</p>\n\n<h3 id=\"unexported\">Copying structs with unexported fields</h3>\n\n<p>\nThe old language did not allow a package to make a copy of a struct value containing unexported fields belonging to a different package.\nThere was, however, a required exception for a method receiver;\nalso, the implementations of <code>copy</code> and <code>append</code> have never honored the restriction.\n</p>\n\n<p>\nGo 1 will allow packages to copy struct values containing unexported fields from other packages.\nBesides resolving the inconsistency,\nthis change admits a new kind of API: a package can return an opaque value without resorting to a pointer or interface.\nThe new implementations of <code>time.Time</code> and\n<code>reflect.Value</code> are examples of types taking advantage of this new property.\n</p>\n\n<p>\nAs an example, if package <code>p</code> includes the definitions,\n</p>\n\n<pre>\n    type Struct struct {\n        Public int\n        secret int\n    }\n    func NewStruct(a int) Struct {  // Note: not a pointer.\n        return Struct{a, f(a)}\n    }\n    func (s Struct) String() string {\n        return fmt.Sprintf(\"{%d (secret %d)}\", s.Public, s.secret)\n    }\n</pre>\n\n<p>\na package that imports <code>p</code> can assign and copy values of type\n<code>p.Struct</code> at will.\nBehind the scenes the unexported fields will be assigned and copied just\nas if they were exported,\nbut the client code will never be aware of them. The code\n</p>\n\n<pre>\n    import \"p\"\n\n    myStruct := p.NewStruct(23)\n    copyOfMyStruct := myStruct\n    fmt.Println(myStruct, copyOfMyStruct)\n</pre>\n\n<p>\nwill show that the secret field of the struct has been copied to the new value.\n</p>\n\n<p>\n<em>Updating</em>:\nThis is a new feature, so existing code needs no changes.\n</p>\n\n<h3 id=\"equality\">Equality</h3>\n\n<p>\nBefore Go 1, the language did not define equality on struct and array values.\nThis meant,\namong other things, that structs and arrays could not be used as map keys.\nOn the other hand, Go did define equality on function and map values.\nFunction equality was problematic in the presence of closures\n(when are two closures equal?)\nwhile map equality compared pointers, not the maps' content, which was usually\nnot what the user would want.\n</p>\n\n<p>\nGo 1 addressed these issues.\nFirst, structs and arrays can be compared for equality and inequality\n(<code>==</code> and <code>!=</code>),\nand therefore be used as map keys,\nprovided they are composed from elements for which equality is also defined,\nusing element-wise comparison.\n</p>\n\n<pre><!--{{code \"/doc/progs/go1.go\" `/type Day struct/` `/Printf/`}}\n-->    type Day struct {\n        long  string\n        short string\n    }\n    Christmas := Day{&#34;Christmas&#34;, &#34;XMas&#34;}\n    Thanksgiving := Day{&#34;Thanksgiving&#34;, &#34;Turkey&#34;}\n    holiday := map[Day]bool{\n        Christmas:    true,\n        Thanksgiving: true,\n    }\n    fmt.Printf(&#34;Christmas is a holiday: %t\\n&#34;, holiday[Christmas])</pre>\n\n<p>\nSecond, Go 1 removes the definition of equality for function values,\nexcept for comparison with <code>nil</code>.\nFinally, map equality is gone too, also except for comparison with <code>nil</code>.\n</p>\n\n<p>\nNote that equality is still undefined for slices, for which the\ncalculation is in general infeasible.  Also note that the ordered\ncomparison operators (<code>&lt;</code> <code>&lt;=</code>\n<code>&gt;</code> <code>&gt;=</code>) are still undefined for\nstructs and arrays.\n\n<p>\n<em>Updating</em>:\nStruct and array equality is a new feature, so existing code needs no changes.\nExisting code that depends on function or map equality will be\nrejected by the compiler and will need to be fixed by hand.\nFew programs will be affected, but the fix may require some\nredesign.\n</p>\n\n<h2 id=\"packages\">The package hierarchy</h2>\n\n<p>\nGo 1 addresses many deficiencies in the old standard library and\ncleans up a number of packages, making them more internally consistent\nand portable.\n</p>\n\n<p>\nThis section describes how the packages have been rearranged in Go 1.\nSome have moved, some have been renamed, some have been deleted.\nNew packages are described in later sections.\n</p>\n\n<h3 id=\"hierarchy\">The package hierarchy</h3>\n\n<p>\nGo 1 has a rearranged package hierarchy that groups related items\ninto subdirectories. For instance, <code>utf8</code> and\n<code>utf16</code> now occupy subdirectories of <code>unicode</code>.\nAlso, <a href=\"#subrepo\">some packages</a> have moved into\nsubrepositories of\n<a href=\"http://code.google.com/p/go\"><code>code.google.com/p/go</code></a>\nwhile <a href=\"#deleted\">others</a> have been deleted outright.\n</p>\n\n<table class=\"codetable\" frame=\"border\" summary=\"Moved packages\">\n<colgroup align=\"left\" width=\"60%\"></colgroup>\n<colgroup align=\"left\" width=\"40%\"></colgroup>\n<tr>\n<th align=\"left\">Old path</th>\n<th align=\"left\">New path</th>\n</tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>asn1</td> <td>encoding/asn1</td></tr>\n<tr><td>csv</td> <td>encoding/csv</td></tr>\n<tr><td>gob</td> <td>encoding/gob</td></tr>\n<tr><td>json</td> <td>encoding/json</td></tr>\n<tr><td>xml</td> <td>encoding/xml</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>exp/template/html</td> <td>html/template</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>big</td> <td>math/big</td></tr>\n<tr><td>cmath</td> <td>math/cmplx</td></tr>\n<tr><td>rand</td> <td>math/rand</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>http</td> <td>net/http</td></tr>\n<tr><td>http/cgi</td> <td>net/http/cgi</td></tr>\n<tr><td>http/fcgi</td> <td>net/http/fcgi</td></tr>\n<tr><td>http/httptest</td> <td>net/http/httptest</td></tr>\n<tr><td>http/pprof</td> <td>net/http/pprof</td></tr>\n<tr><td>mail</td> <td>net/mail</td></tr>\n<tr><td>rpc</td> <td>net/rpc</td></tr>\n<tr><td>rpc/jsonrpc</td> <td>net/rpc/jsonrpc</td></tr>\n<tr><td>smtp</td> <td>net/smtp</td></tr>\n<tr><td>url</td> <td>net/url</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>exec</td> <td>os/exec</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>scanner</td> <td>text/scanner</td></tr>\n<tr><td>tabwriter</td> <td>text/tabwriter</td></tr>\n<tr><td>template</td> <td>text/template</td></tr>\n<tr><td>template/parse</td> <td>text/template/parse</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>utf8</td> <td>unicode/utf8</td></tr>\n<tr><td>utf16</td> <td>unicode/utf16</td></tr>\n</table>\n\n<p>\nNote that the package names for the old <code>cmath</code> and\n<code>exp/template/html</code> packages have changed to <code>cmplx</code>\nand <code>template</code>.\n</p>\n\n<p>\n<em>Updating</em>:\nRunning <code>go</code> <code>fix</code> will update all imports and package renames for packages that\nremain inside the standard repository.  Programs that import packages\nthat are no longer in the standard repository will need to be edited\nby hand.\n</p>\n\n<h3 id=\"exp\">The package tree exp</h3>\n\n<p>\nBecause they are not standardized, the packages under the <code>exp</code> directory will not be available in the\nstandard Go 1 release distributions, although they will be available in source code form\nin <a href=\"http://code.google.com/p/go/\">the repository</a> for\ndevelopers who wish to use them.\n</p>\n\n<p>\nSeveral packages have moved under <code>exp</code> at the time of Go 1's release:\n</p>\n\n<ul>\n<li><code>ebnf</code></li>\n<li><code>html</code><sup>&#8224;</sup></li>\n<li><code>go/types</code></li>\n</ul>\n\n<p>\n(<sup>&#8224;</sup>The <code>EscapeString</code> and <code>UnescapeString</code> types remain\nin package <code>html</code>.)\n</p>\n\n<p>\nAll these packages are available under the same names, with the prefix <code>exp/</code>: <code>exp/ebnf</code> etc.\n</p>\n\n<p>\nAlso, the <code>utf8.String</code> type has been moved to its own package, <code>exp/utf8string</code>.\n</p>\n\n<p>\nFinally, the <code>gotype</code> command now resides in <code>exp/gotype</code>, while\n<code>ebnflint</code> is now in <code>exp/ebnflint</code>.\nIf they are installed, they now reside in <code>$GOROOT/bin/tool</code>.\n</p>\n\n<p>\n<em>Updating</em>:\nCode that uses packages in <code>exp</code> will need to be updated by hand,\nor else compiled from an installation that has <code>exp</code> available.\nThe <code>go</code> <code>fix</code> tool or the compiler will complain about such uses.\n</p>\n\n<h3 id=\"old\">The package tree old</h3>\n\n<p>\nBecause they are deprecated, the packages under the <code>old</code> directory will not be available in the\nstandard Go 1 release distributions, although they will be available in source code form for\ndevelopers who wish to use them.\n</p>\n\n<p>\nThe packages in their new locations are:\n</p>\n\n<ul>\n<li><code>old/netchan</code></li>\n<li><code>old/regexp</code></li>\n<li><code>old/template</code></li>\n</ul>\n\n<p>\n<em>Updating</em>:\nCode that uses packages now in <code>old</code> will need to be updated by hand,\nor else compiled from an installation that has <code>old</code> available.\nThe <code>go</code> <code>fix</code> tool will warn about such uses.\n</p>\n\n<h3 id=\"deleted\">Deleted packages</h3>\n\n<p>\nGo 1 deletes several packages outright:\n</p>\n\n<ul>\n<li><code>container/vector</code></li>\n<li><code>exp/datafmt</code></li>\n<li><code>go/typechecker</code></li>\n<li><code>try</code></li>\n</ul>\n\n<p>\nand also the command <code>gotry</code>.\n</p>\n\n<p>\n<em>Updating</em>:\nCode that uses <code>container/vector</code> should be updated to use\nslices directly.  See\n<a href=\"http://code.google.com/p/go-wiki/wiki/SliceTricks\">the Go\nLanguage Community Wiki</a> for some suggestions.\nCode that uses the other packages (there should be almost zero) will need to be rethought.\n</p>\n\n<h3 id=\"subrepo\">Packages moving to subrepositories</h3>\n\n<p>\nGo 1 has moved a number of packages into other repositories, usually sub-repositories of\n<a href=\"http://code.google.com/p/go/\">the main Go repository</a>.\nThis table lists the old and new import paths:\n\n<table class=\"codetable\" frame=\"border\" summary=\"Sub-repositories\">\n<colgroup align=\"left\" width=\"40%\"></colgroup>\n<colgroup align=\"left\" width=\"60%\"></colgroup>\n<tr>\n<th align=\"left\">Old</th>\n<th align=\"left\">New</th>\n</tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>crypto/bcrypt</td> <td>code.google.com/p/go.crypto/bcrypt</tr>\n<tr><td>crypto/blowfish</td> <td>code.google.com/p/go.crypto/blowfish</tr>\n<tr><td>crypto/cast5</td> <td>code.google.com/p/go.crypto/cast5</tr>\n<tr><td>crypto/md4</td> <td>code.google.com/p/go.crypto/md4</tr>\n<tr><td>crypto/ocsp</td> <td>code.google.com/p/go.crypto/ocsp</tr>\n<tr><td>crypto/openpgp</td> <td>code.google.com/p/go.crypto/openpgp</tr>\n<tr><td>crypto/openpgp/armor</td> <td>code.google.com/p/go.crypto/openpgp/armor</tr>\n<tr><td>crypto/openpgp/elgamal</td> <td>code.google.com/p/go.crypto/openpgp/elgamal</tr>\n<tr><td>crypto/openpgp/errors</td> <td>code.google.com/p/go.crypto/openpgp/errors</tr>\n<tr><td>crypto/openpgp/packet</td> <td>code.google.com/p/go.crypto/openpgp/packet</tr>\n<tr><td>crypto/openpgp/s2k</td> <td>code.google.com/p/go.crypto/openpgp/s2k</tr>\n<tr><td>crypto/ripemd160</td> <td>code.google.com/p/go.crypto/ripemd160</tr>\n<tr><td>crypto/twofish</td> <td>code.google.com/p/go.crypto/twofish</tr>\n<tr><td>crypto/xtea</td> <td>code.google.com/p/go.crypto/xtea</tr>\n<tr><td>exp/ssh</td> <td>code.google.com/p/go.crypto/ssh</tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>image/bmp</td> <td>code.google.com/p/go.image/bmp</tr>\n<tr><td>image/tiff</td> <td>code.google.com/p/go.image/tiff</tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>net/dict</td> <td>code.google.com/p/go.net/dict</tr>\n<tr><td>net/websocket</td> <td>code.google.com/p/go.net/websocket</tr>\n<tr><td>exp/spdy</td> <td>code.google.com/p/go.net/spdy</tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>encoding/git85</td> <td>code.google.com/p/go.codereview/git85</tr>\n<tr><td>patch</td> <td>code.google.com/p/go.codereview/patch</tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>exp/wingui</td> <td>code.google.com/p/gowingui</tr>\n</table>\n\n<p>\n<em>Updating</em>:\nRunning <code>go</code> <code>fix</code> will update imports of these packages to use the new import paths.\nInstallations that depend on these packages will need to install them using\na <code>go get</code> command.\n</p>\n\n<h2 id=\"major\">Major changes to the library</h2>\n\n<p>\nThis section describes significant changes to the core libraries, the ones that\naffect the most programs.\n</p>\n\n<h3 id=\"errors\">The error type and errors package</h3>\n\n<p>\nThe placement of <code>os.Error</code> in package <code>os</code> is mostly historical: errors first came up when implementing package <code>os</code>, and they seemed system-related at the time.\nSince then it has become clear that errors are more fundamental than the operating system.  For example, it would be nice to use <code>Errors</code> in packages that <code>os</code> depends on, like <code>syscall</code>.\nAlso, having <code>Error</code> in <code>os</code> introduces many dependencies on <code>os</code> that would otherwise not exist.\n</p>\n\n<p>\nGo 1 solves these problems by introducing a built-in <code>error</code> interface type and a separate <code>errors</code> package (analogous to <code>bytes</code> and <code>strings</code>) that contains utility functions.\nIt replaces <code>os.NewError</code> with\n<a href=\"/pkg/errors/#New\"><code>errors.New</code></a>,\ngiving errors a more central place in the environment.\n</p>\n\n<p>\nSo the widely-used <code>String</code> method does not cause accidental satisfaction\nof the <code>error</code> interface, the <code>error</code> interface uses instead\nthe name <code>Error</code> for that method:\n</p>\n\n<pre>\n    type error interface {\n        Error() string\n    }\n</pre>\n\n<p>\nThe <code>fmt</code> library automatically invokes <code>Error</code>, as it already\ndoes for <code>String</code>, for easy printing of error values.\n</p>\n\n<pre><!--{{code \"/doc/progs/go1.go\" `/START ERROR EXAMPLE/` `/END ERROR EXAMPLE/`}}\n-->type SyntaxError struct {\n    File    string\n    Line    int\n    Message string\n}\n\nfunc (se *SyntaxError) Error() string {\n    return fmt.Sprintf(&#34;%s:%d: %s&#34;, se.File, se.Line, se.Message)\n}</pre>\n\n<p>\nAll standard packages have been updated to use the new interface; the old <code>os.Error</code> is gone.\n</p>\n\n<p>\nA new package, <a href=\"/pkg/errors/\"><code>errors</code></a>, contains the function\n</p>\n\n<pre>\nfunc New(text string) error\n</pre>\n\n<p>\nto turn a string into an error. It replaces the old <code>os.NewError</code>.\n</p>\n\n<pre><!--{{code \"/doc/progs/go1.go\" `/ErrSyntax/`}}\n-->    var ErrSyntax = errors.New(&#34;syntax error&#34;)</pre>\n\t\t\n<p>\n<em>Updating</em>:\nRunning <code>go</code> <code>fix</code> will update almost all code affected by the change.\nCode that defines error types with a <code>String</code> method will need to be updated\nby hand to rename the methods to <code>Error</code>.\n</p>\n\n<h3 id=\"errno\">System call errors</h3>\n\n<p>\nThe old <code>syscall</code> package, which predated <code>os.Error</code>\n(and just about everything else),\nreturned errors as <code>int</code> values.\nIn turn, the <code>os</code> package forwarded many of these errors, such\nas <code>EINVAL</code>, but using a different set of errors on each platform.\nThis behavior was unpleasant and unportable.\n</p>\n\n<p>\nIn Go 1, the\n<a href=\"/pkg/syscall/\"><code>syscall</code></a>\npackage instead returns an <code>error</code> for system call errors.\nOn Unix, the implementation is done by a\n<a href=\"/pkg/syscall/#Errno\"><code>syscall.Errno</code></a> type\nthat satisfies <code>error</code> and replaces the old <code>os.Errno</code>.\n</p>\n\n<p>\nThe changes affecting <code>os.EINVAL</code> and relatives are\ndescribed <a href=\"#os\">elsewhere</a>.\n\n<p>\n<em>Updating</em>:\nRunning <code>go</code> <code>fix</code> will update almost all code affected by the change.\nRegardless, most code should use the <code>os</code> package\nrather than <code>syscall</code> and so will be unaffected.\n</p>\n\n<h3 id=\"time\">Time</h3>\n\n<p>\nTime is always a challenge to support well in a programming language.\nThe old Go <code>time</code> package had <code>int64</code> units, no\nreal type safety,\nand no distinction between absolute times and durations.\n</p>\n\n<p>\nOne of the most sweeping changes in the Go 1 library is therefore a\ncomplete redesign of the\n<a href=\"/pkg/time/\"><code>time</code></a> package.\nInstead of an integer number of nanoseconds as an <code>int64</code>,\nand a separate <code>*time.Time</code> type to deal with human\nunits such as hours and years,\nthere are now two fundamental types:\n<a href=\"/pkg/time/#Time\"><code>time.Time</code></a>\n(a value, so the <code>*</code> is gone), which represents a moment in time;\nand <a href=\"/pkg/time/#Duration\"><code>time.Duration</code></a>,\nwhich represents an interval.\nBoth have nanosecond resolution.\nA <code>Time</code> can represent any time into the ancient\npast and remote future, while a <code>Duration</code> can\nspan plus or minus only about 290 years.\nThere are methods on these types, plus a number of helpful\npredefined constant durations such as <code>time.Second</code>.\n</p>\n\n<p>\nAmong the new methods are things like\n<a href=\"/pkg/time/#Time.Add\"><code>Time.Add</code></a>,\nwhich adds a <code>Duration</code> to a <code>Time</code>, and\n<a href=\"/pkg/time/#Time.Sub\"><code>Time.Sub</code></a>,\nwhich subtracts two <code>Times</code> to yield a <code>Duration</code>.\n</p>\n\n<p>\nThe most important semantic change is that the Unix epoch (Jan 1, 1970) is now\nrelevant only for those functions and methods that mention Unix:\n<a href=\"/pkg/time/#Unix\"><code>time.Unix</code></a>\nand the <a href=\"/pkg/time/#Time.Unix\"><code>Unix</code></a>\nand <a href=\"/pkg/time/#Time.UnixNano\"><code>UnixNano</code></a> methods\nof the <code>Time</code> type.\nIn particular,\n<a href=\"/pkg/time/#Now\"><code>time.Now</code></a>\nreturns a <code>time.Time</code> value rather than, in the old\nAPI, an integer nanosecond count since the Unix epoch.\n</p>\n\n<pre><!--{{code \"/doc/progs/go1.go\" `/sleepUntil/` `/^}/`}}\n--><span class=\"comment\">// sleepUntil sleeps until the specified time. It returns immediately if it&#39;s too late.</span>\nfunc sleepUntil(wakeup time.Time) {\n    now := time.Now() <span class=\"comment\">// A Time.</span>\n    if !wakeup.After(now) {\n        return\n    }\n    delta := wakeup.Sub(now) <span class=\"comment\">// A Duration.</span>\n    fmt.Printf(&#34;Sleeping for %.3fs\\n&#34;, delta.Seconds())\n    time.Sleep(delta)\n}</pre>\n\n<p>\nThe new types, methods, and constants have been propagated through\nall the standard packages that use time, such as <code>os</code> and\nits representation of file time stamps.\n</p>\n\n<p>\n<em>Updating</em>:\nThe <code>go</code> <code>fix</code> tool will update many uses of the old <code>time</code> package to use the new\ntypes and methods, although it does not replace values such as <code>1e9</code>\nrepresenting nanoseconds per second.\nAlso, because of type changes in some of the values that arise,\nsome of the expressions rewritten by the fix tool may require\nfurther hand editing; in such cases the rewrite will include\nthe correct function or method for the old functionality, but\nmay have the wrong type or require further analysis.\n</p>\n\n<h2 id=\"minor\">Minor changes to the library</h2>\n\n<p>\nThis section describes smaller changes, such as those to less commonly\nused packages or that affect\nfew programs beyond the need to run <code>go</code> <code>fix</code>.\nThis category includes packages that are new in Go 1.\nCollectively they improve portability, regularize behavior, and\nmake the interfaces more modern and Go-like.\n</p>\n\n<h3 id=\"archive_zip\">The archive/zip package</h3>\n\n<p>\nIn Go 1, <a href=\"/pkg/archive/zip/#Writer\"><code>*zip.Writer</code></a> no\nlonger has a <code>Write</code> method. Its presence was a mistake.\n</p>\n\n<p>\n<em>Updating</em>:\nWhat little code is affected will be caught by the compiler and must be updated by hand.\n</p>\n\n<h3 id=\"bufio\">The bufio package</h3>\n\n<p>\nIn Go 1, <a href=\"/pkg/bufio/#NewReaderSize\"><code>bufio.NewReaderSize</code></a>\nand\n<a href=\"/pkg/bufio/#NewWriterSize\"><code>bufio.NewWriterSize</code></a>\nfunctions no longer return an error for invalid sizes.\nIf the argument size is too small or invalid, it is adjusted.\n</p>\n\n<p>\n<em>Updating</em>:\nRunning <code>go</code> <code>fix</code> will update calls that assign the error to _.\nCalls that aren't fixed will be caught by the compiler and must be updated by hand.\n</p>\n\n<h3 id=\"compress\">The compress/flate, compress/gzip and compress/zlib packages</h3>\n\n<p>\nIn Go 1, the <code>NewWriterXxx</code> functions in\n<a href=\"/pkg/compress/flate\"><code>compress/flate</code></a>,\n<a href=\"/pkg/compress/gzip\"><code>compress/gzip</code></a> and\n<a href=\"/pkg/compress/zlib\"><code>compress/zlib</code></a>\nall return <code>(*Writer, error)</code> if they take a compression level,\nand <code>*Writer</code> otherwise. Package <code>gzip</code>'s\n<code>Compressor</code> and <code>Decompressor</code> types have been renamed\nto <code>Writer</code> and <code>Reader</code>. Package <code>flate</code>'s\n<code>WrongValueError</code> type has been removed.\n</p>\n\n<p>\n<em>Updating</em>\nRunning <code>go</code> <code>fix</code> will update old names and calls that assign the error to _.\nCalls that aren't fixed will be caught by the compiler and must be updated by hand.\n</p>\n\n<h3 id=\"crypto_aes_des\">The crypto/aes and crypto/des packages</h3>\n\n<p>\nIn Go 1, the <code>Reset</code> method has been removed. Go does not guarantee\nthat memory is not copied and therefore this method was misleading.\n</p>\n\n<p>\nThe cipher-specific types <code>*aes.Cipher</code>, <code>*des.Cipher</code>,\nand <code>*des.TripleDESCipher</code> have been removed in favor of\n<code>cipher.Block</code>.\n</p>\n\n<p>\n<em>Updating</em>:\nRemove the calls to Reset. Replace uses of the specific cipher types with\ncipher.Block.\n</p>\n\n<h3 id=\"crypto_elliptic\">The crypto/elliptic package</h3>\n\n<p>\nIn Go 1, <a href=\"/pkg/crypto/elliptic/#Curve\"><code>elliptic.Curve</code></a>\nhas been made an interface to permit alternative implementations. The curve\nparameters have been moved to the\n<a href=\"/pkg/crypto/elliptic/#CurveParams\"><code>elliptic.CurveParams</code></a>\nstructure.\n</p>\n\n<p>\n<em>Updating</em>:\nExisting users of <code>*elliptic.Curve</code> will need to change to\nsimply <code>elliptic.Curve</code>. Calls to <code>Marshal</code>,\n<code>Unmarshal</code> and <code>GenerateKey</code> are now functions\nin <code>crypto/elliptic</code> that take an <code>elliptic.Curve</code>\nas their first argument.\n</p>\n\n<h3 id=\"crypto_hmac\">The crypto/hmac package</h3>\n\n<p>\nIn Go 1, the hash-specific functions, such as <code>hmac.NewMD5</code>, have\nbeen removed from <code>crypto/hmac</code>. Instead, <code>hmac.New</code> takes\na function that returns a <code>hash.Hash</code>, such as <code>md5.New</code>.\n</p>\n\n<p>\n<em>Updating</em>:\nRunning <code>go</code> <code>fix</code> will perform the needed changes.\n</p>\n\n<h3 id=\"crypto_x509\">The crypto/x509 package</h3>\n\n<p>\nIn Go 1, the\n<a href=\"/pkg/crypto/x509/#CreateCertificate\"><code>CreateCertificate</code></a>\nand\n<a href=\"/pkg/crypto/x509/#CreateCRL\"><code>CreateCRL</code></a>\nfunctions in <code>crypto/x509</code> have been altered to take an\n<code>interface{}</code> where they previously took a <code>*rsa.PublicKey</code>\nor <code>*rsa.PrivateKey</code>. This will allow other public key algorithms\nto be implemented in the future.\n</p>\n\n<p>\n<em>Updating</em>:\nNo changes will be needed.\n</p>\n\n<h3 id=\"encoding_binary\">The encoding/binary package</h3>\n\n<p>\nIn Go 1, the <code>binary.TotalSize</code> function has been replaced by\n<a href=\"/pkg/encoding/binary/#Size\"><code>Size</code></a>,\nwhich takes an <code>interface{}</code> argument rather than\na <code>reflect.Value</code>.\n</p>\n\n<p>\n<em>Updating</em>:\nWhat little code is affected will be caught by the compiler and must be updated by hand.\n</p>\n\n<h3 id=\"encoding_xml\">The encoding/xml package</h3>\n\n<p>\nIn Go 1, the <a href=\"/pkg/encoding/xml/\"><code>xml</code></a> package\nhas been brought closer in design to the other marshaling packages such\nas <a href=\"/pkg/encoding/gob/\"><code>encoding/gob</code></a>.\n</p>\n\n<p>\nThe old <code>Parser</code> type is renamed\n<a href=\"/pkg/encoding/xml/#Decoder\"><code>Decoder</code></a> and has a new\n<a href=\"/pkg/encoding/xml/#Decoder.Decode\"><code>Decode</code></a> method. An\n<a href=\"/pkg/encoding/xml/#Encoder\"><code>Encoder</code></a> type was also introduced.\n</p>\n\n<p>\nThe functions <a href=\"/pkg/encoding/xml/#Marshal\"><code>Marshal</code></a>\nand <a href=\"/pkg/encoding/xml/#Unmarshal\"><code>Unmarshal</code></a>\nwork with <code>[]byte</code> values now. To work with streams,\nuse the new <a href=\"/pkg/encoding/xml/#Encoder\"><code>Encoder</code></a>\nand <a href=\"/pkg/encoding/xml/#Decoder\"><code>Decoder</code></a> types.\n</p>\n\n<p>\nWhen marshaling or unmarshaling values, the format of supported flags in\nfield tags has changed to be closer to the\n<a href=\"/pkg/encoding/json\"><code>json</code></a> package\n(<code>`xml:\"name,flag\"`</code>). The matching done between field tags, field\nnames, and the XML attribute and element names is now case-sensitive.\nThe <code>XMLName</code> field tag, if present, must also match the name\nof the XML element being marshaled.\n</p>\n\n<p>\n<em>Updating</em>:\nRunning <code>go</code> <code>fix</code> will update most uses of the package except for some calls to\n<code>Unmarshal</code>. Special care must be taken with field tags,\nsince the fix tool will not update them and if not fixed by hand they will\nmisbehave silently in some cases. For example, the old\n<code>\"attr\"</code> is now written <code>\",attr\"</code> while plain\n<code>\"attr\"</code> remains valid but with a different meaning.\n</p>\n\n<h3 id=\"expvar\">The expvar package</h3>\n\n<p>\nIn Go 1, the <code>RemoveAll</code> function has been removed.\nThe <code>Iter</code> function and Iter method on <code>*Map</code> have\nbeen replaced by\n<a href=\"/pkg/expvar/#Do\"><code>Do</code></a>\nand\n<a href=\"/pkg/expvar/#Map.Do\"><code>(*Map).Do</code></a>.\n</p>\n\n<p>\n<em>Updating</em>:\nMost code using <code>expvar</code> will not need changing. The rare code that used\n<code>Iter</code> can be updated to pass a closure to <code>Do</code> to achieve the same effect.\n</p>\n\n<h3 id=\"flag\">The flag package</h3>\n\n<p>\nIn Go 1, the interface <a href=\"/pkg/flag/#Value\"><code>flag.Value</code></a> has changed slightly.\nThe <code>Set</code> method now returns an <code>error</code> instead of\na <code>bool</code> to indicate success or failure.\n</p>\n\n<p>\nThere is also a new kind of flag, <code>Duration</code>, to support argument\nvalues specifying time intervals.\nValues for such flags must be given units, just as <code>time.Duration</code>\nformats them: <code>10s</code>, <code>1h30m</code>, etc.\n</p>\n\n<pre><!--{{code \"/doc/progs/go1.go\" `/timeout/`}}\n-->var timeout = flag.Duration(&#34;timeout&#34;, 30*time.Second, &#34;how long to wait for completion&#34;)</pre>\n\n<p>\n<em>Updating</em>:\nPrograms that implement their own flags will need minor manual fixes to update their\n<code>Set</code> methods.\nThe <code>Duration</code> flag is new and affects no existing code.\n</p>\n\n\n<h3 id=\"go\">The go/* packages</h3>\n\n<p>\nSeveral packages under <code>go</code> have slightly revised APIs.\n</p>\n\n<p>\nA concrete <code>Mode</code> type was introduced for configuration mode flags\nin the packages\n<a href=\"/pkg/go/scanner/\"><code>go/scanner</code></a>,\n<a href=\"/pkg/go/parser/\"><code>go/parser</code></a>,\n<a href=\"/pkg/go/printer/\"><code>go/printer</code></a>, and\n<a href=\"/pkg/go/doc/\"><code>go/doc</code></a>.\n</p>\n\n<p>\nThe modes <code>AllowIllegalChars</code> and <code>InsertSemis</code> have been removed\nfrom the <a href=\"/pkg/go/scanner/\"><code>go/scanner</code></a> package. They were mostly\nuseful for scanning text other then Go source files. Instead, the\n<a href=\"/pkg/text/scanner/\"><code>text/scanner</code></a> package should be used\nfor that purpose.\n</p>\n\n<p>\nThe <a href=\"/pkg/go/scanner/#ErrorHandler\"><code>ErrorHandler</code></a> provided\nto the scanner's <a href=\"/pkg/go/scanner/#Scanner.Init\"><code>Init</code></a> method is\nnow simply a function rather than an interface. The <code>ErrorVector</code> type has\nbeen removed in favor of the (existing) <a href=\"/pkg/go/scanner/#ErrorList\"><code>ErrorList</code></a>\ntype, and the <code>ErrorVector</code> methods have been migrated. Instead of embedding\nan <code>ErrorVector</code> in a client of the scanner, now a client should maintain\nan <code>ErrorList</code>.\n</p>\n\n<p>\nThe set of parse functions provided by the <a href=\"/pkg/go/parser/\"><code>go/parser</code></a>\npackage has been reduced to the primary parse function\n<a href=\"/pkg/go/parser/#ParseFile\"><code>ParseFile</code></a>, and a couple of\nconvenience functions <a href=\"/pkg/go/parser/#ParseDir\"><code>ParseDir</code></a>\nand <a href=\"/pkg/go/parser/#ParseExpr\"><code>ParseExpr</code></a>.\n</p>\n\n<p>\nThe <a href=\"/pkg/go/printer/\"><code>go/printer</code></a> package supports an additional\nconfiguration mode <a href=\"/pkg/go/printer/#Mode\"><code>SourcePos</code></a>;\nif set, the printer will emit <code>//line</code> comments such that the generated\noutput contains the original source code position information. The new type\n<a href=\"/pkg/go/printer/#CommentedNode\"><code>CommentedNode</code></a> can be\nused to provide comments associated with an arbitrary\n<a href=\"/pkg/go/ast/#Node\"><code>ast.Node</code></a> (until now only\n<a href=\"/pkg/go/ast/#File\"><code>ast.File</code></a> carried comment information).\n</p>\n\n<p>\nThe type names of the <a href=\"/pkg/go/doc/\"><code>go/doc</code></a> package have been\nstreamlined by removing the <code>Doc</code> suffix: <code>PackageDoc</code>\nis now <code>Package</code>, <code>ValueDoc</code> is <code>Value</code>, etc.\nAlso, all types now consistently have a <code>Name</code> field (or <code>Names</code>,\nin the case of type <code>Value</code>) and <code>Type.Factories</code> has become\n<code>Type.Funcs</code>.\nInstead of calling <code>doc.NewPackageDoc(pkg, importpath)</code>,\ndocumentation for a package is created with:\n</p>\n\n<pre>\n    doc.New(pkg, importpath, mode)\n</pre>\n\n<p>\nwhere the new <code>mode</code> parameter specifies the operation mode:\nif set to <a href=\"/pkg/go/doc/#AllDecls\"><code>AllDecls</code></a>, all declarations\n(not just exported ones) are considered.\nThe function <code>NewFileDoc</code> was removed, and the function\n<code>CommentText</code> has become the method\n<a href=\"/pkg/go/ast/#Text\"><code>Text</code></a> of\n<a href=\"/pkg/go/ast/#CommentGroup\"><code>ast.CommentGroup</code></a>.\n</p>\n\n<p>\nIn package <a href=\"/pkg/go/token/\"><code>go/token</code></a>, the\n<a href=\"/pkg/go/token/#FileSet\"><code>token.FileSet</code></a> method <code>Files</code>\n(which originally returned a channel of <code>*token.File</code>s) has been replaced\nwith the iterator <a href=\"/pkg/go/token/#FileSet.Iterate\"><code>Iterate</code></a> that\naccepts a function argument instead.\n</p>\n\n<p>\nIn package <a href=\"/pkg/go/build/\"><code>go/build</code></a>, the API\nhas been nearly completely replaced.\nThe package still computes Go package information\nbut it does not run the build: the <code>Cmd</code> and <code>Script</code>\ntypes are gone.\n(To build code, use the new\n<a href=\"/cmd/go/\"><code>go</code></a> command instead.)\nThe <code>DirInfo</code> type is now named\n<a href=\"/pkg/go/build/#Package\"><code>Package</code></a>.\n<code>FindTree</code> and <code>ScanDir</code> are replaced by\n<a href=\"/pkg/go/build/#Import\"><code>Import</code></a>\nand\n<a href=\"/pkg/go/build/#ImportDir\"><code>ImportDir</code></a>.\n</p>\n\n<p>\n<em>Updating</em>:\nCode that uses packages in <code>go</code> will have to be updated by hand; the\ncompiler will reject incorrect uses. Templates used in conjunction with any of the\n<code>go/doc</code> types may need manual fixes; the renamed fields will lead\nto run-time errors.\n</p>\n\n<h3 id=\"hash\">The hash package</h3>\n\n<p>\nIn Go 1, the definition of <a href=\"/pkg/hash/#Hash\"><code>hash.Hash</code></a> includes\na new method, <code>BlockSize</code>.  This new method is used primarily in the\ncryptographic libraries.\n</p>\n\n<p>\nThe <code>Sum</code> method of the\n<a href=\"/pkg/hash/#Hash\"><code>hash.Hash</code></a> interface now takes a\n<code>[]byte</code> argument, to which the hash value will be appended.\nThe previous behavior can be recreated by adding a <code>nil</code> argument to the call.\n</p>\n\n<p>\n<em>Updating</em>:\nExisting implementations of <code>hash.Hash</code> will need to add a\n<code>BlockSize</code> method.  Hashes that process the input one byte at\na time can implement <code>BlockSize</code> to return 1.\nRunning <code>go</code> <code>fix</code> will update calls to the <code>Sum</code> methods of the various\nimplementations of <code>hash.Hash</code>.\n</p>\n\n<p>\n<em>Updating</em>:\nSince the package's functionality is new, no updating is necessary.\n</p>\n\n<h3 id=\"http\">The http package</h3>\n\n<p>\nIn Go 1 the <a href=\"/pkg/net/http/\"><code>http</code></a> package is refactored,\nputting some of the utilities into a\n<a href=\"/pkg/net/http/httputil/\"><code>httputil</code></a> subdirectory.\nThese pieces are only rarely needed by HTTP clients.\nThe affected items are:\n</p>\n\n<ul>\n<li>ClientConn</li>\n<li>DumpRequest</li>\n<li>DumpRequestOut</li>\n<li>DumpResponse</li>\n<li>NewChunkedReader</li>\n<li>NewChunkedWriter</li>\n<li>NewClientConn</li>\n<li>NewProxyClientConn</li>\n<li>NewServerConn</li>\n<li>NewSingleHostReverseProxy</li>\n<li>ReverseProxy</li>\n<li>ServerConn</li>\n</ul>\n\n<p>\nThe <code>Request.RawURL</code> field has been removed; it was a\nhistorical artifact.\n</p>\n\n<p>\nThe <code>Handle</code> and <code>HandleFunc</code>\nfunctions, and the similarly-named methods of <code>ServeMux</code>,\nnow panic if an attempt is made to register the same pattern twice.\n</p>\n\n<p>\n<em>Updating</em>:\nRunning <code>go</code> <code>fix</code> will update the few programs that are affected except for\nuses of <code>RawURL</code>, which must be fixed by hand.\n</p>\n\n<h3 id=\"image\">The image package</h3>\n\n<p>\nThe <a href=\"/pkg/image/\"><code>image</code></a> package has had a number of\nminor changes, rearrangements and renamings.\n</p>\n\n<p>\nMost of the color handling code has been moved into its own package,\n<a href=\"/pkg/image/color/\"><code>image/color</code></a>.\nFor the elements that moved, a symmetry arises; for instance,\neach pixel of an\n<a href=\"/pkg/image/#RGBA\"><code>image.RGBA</code></a>\nis a\n<a href=\"/pkg/image/color/#RGBA\"><code>color.RGBA</code></a>.\n</p>\n\n<p>\nThe old <code>image/ycbcr</code> package has been folded, with some\nrenamings, into the\n<a href=\"/pkg/image/\"><code>image</code></a>\nand\n<a href=\"/pkg/image/color/\"><code>image/color</code></a>\npackages.\n</p>\n\n<p>\nThe old <code>image.ColorImage</code> type is still in the <code>image</code>\npackage but has been renamed\n<a href=\"/pkg/image/#Uniform\"><code>image.Uniform</code></a>,\nwhile <code>image.Tiled</code> has been removed.\n</p>\n\n<p>\nThis table lists the renamings.\n</p>\n\n<table class=\"codetable\" frame=\"border\" summary=\"image renames\">\n<colgroup align=\"left\" width=\"50%\"></colgroup>\n<colgroup align=\"left\" width=\"50%\"></colgroup>\n<tr>\n<th align=\"left\">Old</th>\n<th align=\"left\">New</th>\n</tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>image.Color</td> <td>color.Color</td></tr>\n<tr><td>image.ColorModel</td> <td>color.Model</td></tr>\n<tr><td>image.ColorModelFunc</td> <td>color.ModelFunc</td></tr>\n<tr><td>image.PalettedColorModel</td> <td>color.Palette</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>image.RGBAColor</td> <td>color.RGBA</td></tr>\n<tr><td>image.RGBA64Color</td> <td>color.RGBA64</td></tr>\n<tr><td>image.NRGBAColor</td> <td>color.NRGBA</td></tr>\n<tr><td>image.NRGBA64Color</td> <td>color.NRGBA64</td></tr>\n<tr><td>image.AlphaColor</td> <td>color.Alpha</td></tr>\n<tr><td>image.Alpha16Color</td> <td>color.Alpha16</td></tr>\n<tr><td>image.GrayColor</td> <td>color.Gray</td></tr>\n<tr><td>image.Gray16Color</td> <td>color.Gray16</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>image.RGBAColorModel</td> <td>color.RGBAModel</td></tr>\n<tr><td>image.RGBA64ColorModel</td> <td>color.RGBA64Model</td></tr>\n<tr><td>image.NRGBAColorModel</td> <td>color.NRGBAModel</td></tr>\n<tr><td>image.NRGBA64ColorModel</td> <td>color.NRGBA64Model</td></tr>\n<tr><td>image.AlphaColorModel</td> <td>color.AlphaModel</td></tr>\n<tr><td>image.Alpha16ColorModel</td> <td>color.Alpha16Model</td></tr>\n<tr><td>image.GrayColorModel</td> <td>color.GrayModel</td></tr>\n<tr><td>image.Gray16ColorModel</td> <td>color.Gray16Model</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>ycbcr.RGBToYCbCr</td> <td>color.RGBToYCbCr</td></tr>\n<tr><td>ycbcr.YCbCrToRGB</td> <td>color.YCbCrToRGB</td></tr>\n<tr><td>ycbcr.YCbCrColorModel</td> <td>color.YCbCrModel</td></tr>\n<tr><td>ycbcr.YCbCrColor</td> <td>color.YCbCr</td></tr>\n<tr><td>ycbcr.YCbCr</td> <td>image.YCbCr</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>ycbcr.SubsampleRatio444</td> <td>image.YCbCrSubsampleRatio444</td></tr>\n<tr><td>ycbcr.SubsampleRatio422</td> <td>image.YCbCrSubsampleRatio422</td></tr>\n<tr><td>ycbcr.SubsampleRatio420</td> <td>image.YCbCrSubsampleRatio420</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>image.ColorImage</td> <td>image.Uniform</td></tr>\n</table>\n\n<p>\nThe image package's <code>New</code> functions\n(<a href=\"/pkg/image/#NewRGBA\"><code>NewRGBA</code></a>,\n<a href=\"/pkg/image/#NewRGBA64\"><code>NewRGBA64</code></a>, etc.)\ntake an <a href=\"/pkg/image/#Rectangle\"><code>image.Rectangle</code></a> as an argument\ninstead of four integers.\n</p>\n\n<p>\nFinally, there are new predefined <code>color.Color</code> variables\n<a href=\"/pkg/image/color/#Black\"><code>color.Black</code></a>,\n<a href=\"/pkg/image/color/#White\"><code>color.White</code></a>,\n<a href=\"/pkg/image/color/#Opaque\"><code>color.Opaque</code></a>\nand\n<a href=\"/pkg/image/color/#Transparent\"><code>color.Transparent</code></a>.\n</p>\n\n<p>\n<em>Updating</em>:\nRunning <code>go</code> <code>fix</code> will update almost all code affected by the change.\n</p>\n\n<h3 id=\"log_syslog\">The log/syslog package</h3>\n\n<p>\nIn Go 1, the <a href=\"/pkg/log/syslog/#NewLogger\"><code>syslog.NewLogger</code></a>\nfunction returns an error as well as a <code>log.Logger</code>.\n</p>\n\n<p>\n<em>Updating</em>:\nWhat little code is affected will be caught by the compiler and must be updated by hand.\n</p>\n\n<h3 id=\"mime\">The mime package</h3>\n\n<p>\nIn Go 1, the <a href=\"/pkg/mime/#FormatMediaType\"><code>FormatMediaType</code></a> function\nof the <code>mime</code> package has  been simplified to make it\nconsistent with\n<a href=\"/pkg/mime/#ParseMediaType\"><code>ParseMediaType</code></a>.\nIt now takes <code>\"text/html\"</code> rather than <code>\"text\"</code> and <code>\"html\"</code>.\n</p>\n\n<p>\n<em>Updating</em>:\nWhat little code is affected will be caught by the compiler and must be updated by hand.\n</p>\n\n<h3 id=\"net\">The net package</h3>\n\n<p>\nIn Go 1, the various <code>SetTimeout</code>,\n<code>SetReadTimeout</code>, and <code>SetWriteTimeout</code> methods\nhave been replaced with\n<a href=\"/pkg/net/#IPConn.SetDeadline\"><code>SetDeadline</code></a>,\n<a href=\"/pkg/net/#IPConn.SetReadDeadline\"><code>SetReadDeadline</code></a>, and\n<a href=\"/pkg/net/#IPConn.SetWriteDeadline\"><code>SetWriteDeadline</code></a>,\nrespectively.  Rather than taking a timeout value in nanoseconds that\napply to any activity on the connection, the new methods set an\nabsolute deadline (as a <code>time.Time</code> value) after which\nreads and writes will time out and no longer block.\n</p>\n\n<p>\nThere are also new functions\n<a href=\"/pkg/net/#DialTimeout\"><code>net.DialTimeout</code></a>\nto simplify timing out dialing a network address and\n<a href=\"/pkg/net/#ListenMulticastUDP\"><code>net.ListenMulticastUDP</code></a>\nto allow multicast UDP to listen concurrently across multiple listeners.\nThe <code>net.ListenMulticastUDP</code> function replaces the old\n<code>JoinGroup</code> and <code>LeaveGroup</code> methods.\n</p>\n\n<p>\n<em>Updating</em>:\nCode that uses the old methods will fail to compile and must be updated by hand.\nThe semantic change makes it difficult for the fix tool to update automatically.\n</p>\n\n<h3 id=\"os\">The os package</h3>\n\n<p>\nThe <code>Time</code> function has been removed; callers should use\nthe <a href=\"/pkg/time/#Time\"><code>Time</code></a> type from the\n<code>time</code> package.\n</p>\n\n<p>\nThe <code>Exec</code> function has been removed; callers should use\n<code>Exec</code> from the <code>syscall</code> package, where available.\n</p>\n\n<p>\nThe <code>ShellExpand</code> function has been renamed to <a\nhref=\"/pkg/os/#ExpandEnv\"><code>ExpandEnv</code></a>.\n</p>\n\n<p>\nThe <a href=\"/pkg/os/#NewFile\"><code>NewFile</code></a> function\nnow takes a <code>uintptr</code> fd, instead of an <code>int</code>.\nThe <a href=\"/pkg/os/#File.Fd\"><code>Fd</code></a> method on files now\nalso returns a <code>uintptr</code>.\n</p>\n\n<p>\nThere are no longer error constants such as <code>EINVAL</code>\nin the <code>os</code> package, since the set of values varied with\nthe underlying operating system. There are new portable functions like\n<a href=\"/pkg/os/#IsPermission\"><code>IsPermission</code></a>\nto test common error properties, plus a few new error values\nwith more Go-like names, such as\n<a href=\"/pkg/os/#ErrPermission\"><code>ErrPermission</code></a>\nand\n<a href=\"/pkg/os/#ErrNoEnv\"><code>ErrNoEnv</code></a>.\n</p>\n\n<p>\nThe <code>Getenverror</code> function has been removed. To distinguish\nbetween a non-existent environment variable and an empty string,\nuse <a href=\"/pkg/os/#Environ\"><code>os.Environ</code></a> or\n<a href=\"/pkg/syscall/#Getenv\"><code>syscall.Getenv</code></a>.\n</p>\n\n\n<p>\nThe <a href=\"/pkg/os/#Process.Wait\"><code>Process.Wait</code></a> method has\ndropped its option argument and the associated constants are gone\nfrom the package.\nAlso, the function <code>Wait</code> is gone; only the method of\nthe <code>Process</code> type persists.\n</p>\n\n<p>\nThe <code>Waitmsg</code> type returned by\n<a href=\"/pkg/os/#Process.Wait\"><code>Process.Wait</code></a>\nhas been replaced with a more portable\n<a href=\"/pkg/os/#ProcessState\"><code>ProcessState</code></a>\ntype with accessor methods to recover information about the\nprocess.\nBecause of changes to <code>Wait</code>, the <code>ProcessState</code>\nvalue always describes an exited process.\nPortability concerns simplified the interface in other ways, but the values returned by the\n<a href=\"/pkg/os/#ProcessState.Sys\"><code>ProcessState.Sys</code></a> and\n<a href=\"/pkg/os/#ProcessState.SysUsage\"><code>ProcessState.SysUsage</code></a>\nmethods can be type-asserted to underlying system-specific data structures such as\n<a href=\"/pkg/syscall/#WaitStatus\"><code>syscall.WaitStatus</code></a> and\n<a href=\"/pkg/syscall/#Rusage\"><code>syscall.Rusage</code></a> on Unix.\n</p>\n\n<p>\n<em>Updating</em>:\nRunning <code>go</code> <code>fix</code> will drop a zero argument to <code>Process.Wait</code>.\nAll other changes will be caught by the compiler and must be updated by hand.\n</p>\n\n<h4 id=\"os_fileinfo\">The os.FileInfo type</h4>\n\n<p>\nGo 1 redefines the <a href=\"/pkg/os/#FileInfo\"><code>os.FileInfo</code></a> type,\nchanging it from a struct to an interface:\n</p>\n\n<pre>\n    type FileInfo interface {\n        Name() string       // base name of the file\n        Size() int64        // length in bytes\n        Mode() FileMode     // file mode bits\n        ModTime() time.Time // modification time\n        IsDir() bool        // abbreviation for Mode().IsDir()\n        Sys() interface{}   // underlying data source (can return nil)\n    }\n</pre>\n\n<p>\nThe file mode information has been moved into a subtype called\n<a href=\"/pkg/os/#FileMode\"><code>os.FileMode</code></a>,\na simple integer type with <code>IsDir</code>, <code>Perm</code>, and <code>String</code>\nmethods.\n</p>\n\n<p>\nThe system-specific details of file modes and properties such as (on Unix)\ni-number have been removed from <code>FileInfo</code> altogether.\nInstead, each operating system's <code>os</code> package provides an\nimplementation of the <code>FileInfo</code> interface, which\nhas a <code>Sys</code> method that returns the\nsystem-specific representation of file metadata.\nFor instance, to discover the i-number of a file on a Unix system, unpack\nthe <code>FileInfo</code> like this:\n</p>\n\n<pre>\n    fi, err := os.Stat(\"hello.go\")\n    if err != nil {\n        log.Fatal(err)\n    }\n    // Check that it's a Unix file.\n    unixStat, ok := fi.Sys().(*syscall.Stat_t)\n    if !ok {\n        log.Fatal(\"hello.go: not a Unix file\")\n    }\n    fmt.Printf(\"file i-number: %d\\n\", unixStat.Ino)\n</pre>\n\n<p>\nAssuming (which is unwise) that <code>\"hello.go\"</code> is a Unix file,\nthe i-number expression could be contracted to\n</p>\n\n<pre>\n    fi.Sys().(*syscall.Stat_t).Ino\n</pre>\n\n<p>\nThe vast majority of uses of <code>FileInfo</code> need only the methods\nof the standard interface.\n</p>\n\n<p>\nThe <code>os</code> package no longer contains wrappers for the POSIX errors\nsuch as <code>ENOENT</code>.\nFor the few programs that need to verify particular error conditions, there are\nnow the boolean functions\n<a href=\"/pkg/os/#IsExist\"><code>IsExist</code></a>,\n<a href=\"/pkg/os/#IsNotExist\"><code>IsNotExist</code></a>\nand\n<a href=\"/pkg/os/#IsPermission\"><code>IsPermission</code></a>.\n</p>\n\n<pre><!--{{code \"/doc/progs/go1.go\" `/os\\.Open/` `/}/`}}\n-->    f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)\n    if os.IsExist(err) {\n        log.Printf(&#34;%s already exists&#34;, name)\n    }</pre>\n\n<p>\n<em>Updating</em>:\nRunning <code>go</code> <code>fix</code> will update code that uses the old equivalent of the current <code>os.FileInfo</code>\nand <code>os.FileMode</code> API.\nCode that needs system-specific file details will need to be updated by hand.\nCode that uses the old POSIX error values from the <code>os</code> package\nwill fail to compile and will also need to be updated by hand.\n</p>\n\n<h3 id=\"os_signal\">The os/signal package</h3>\n\n<p>\nThe <code>os/signal</code> package in Go 1 replaces the\n<code>Incoming</code> function, which returned a channel\nthat received all incoming signals,\nwith the selective <code>Notify</code> function, which asks\nfor delivery of specific signals on an existing channel.\n</p>\n\n<p>\n<em>Updating</em>:\nCode must be updated by hand.\nA literal translation of\n</p>\n<pre>\nc := signal.Incoming()\n</pre>\n<p>\nis\n</p>\n<pre>\nc := make(chan os.Signal)\nsignal.Notify(c) // ask for all signals\n</pre>\n<p>\nbut most code should list the specific signals it wants to handle instead:\n</p>\n<pre>\nc := make(chan os.Signal)\nsignal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT)\n</pre>\n\n<h3 id=\"path_filepath\">The path/filepath package</h3>\n\n<p>\nIn Go 1, the <a href=\"/pkg/path/filepath/#Walk\"><code>Walk</code></a> function of the\n<code>path/filepath</code> package\nhas been changed to take a function value of type\n<a href=\"/pkg/path/filepath/#WalkFunc\"><code>WalkFunc</code></a>\ninstead of a <code>Visitor</code> interface value.\n<code>WalkFunc</code> unifies the handling of both files and directories.\n</p>\n\n<pre>\n    type WalkFunc func(path string, info os.FileInfo, err error) error\n</pre>\n\n<p>\nThe <code>WalkFunc</code> function will be called even for files or directories that could not be opened;\nin such cases the error argument will describe the failure.\nIf a directory's contents are to be skipped,\nthe function should return the value <a href=\"/pkg/path/filepath/#variables\"><code>filepath.SkipDir</code></a>\n</p>\n\n<pre><!--{{code \"/doc/progs/go1.go\" `/STARTWALK/` `/ENDWALK/`}}\n-->    markFn := func(path string, info os.FileInfo, err error) error {\n        if path == &#34;pictures&#34; { <span class=\"comment\">// Will skip walking of directory pictures and its contents.</span>\n            return filepath.SkipDir\n        }\n        if err != nil {\n            return err\n        }\n        log.Println(path)\n        return nil\n    }\n    err := filepath.Walk(&#34;.&#34;, markFn)\n    if err != nil {\n        log.Fatal(err)\n    }</pre>\n\n<p>\n<em>Updating</em>:\nThe change simplifies most code but has subtle consequences, so affected programs\nwill need to be updated by hand.\nThe compiler will catch code using the old interface.\n</p>\n\n<h3 id=\"regexp\">The regexp package</h3>\n\n<p>\nThe <a href=\"/pkg/regexp/\"><code>regexp</code></a> package has been rewritten.\nIt has the same interface but the specification of the regular expressions\nit supports has changed from the old \"egrep\" form to that of\n<a href=\"http://code.google.com/p/re2/\">RE2</a>.\n</p>\n\n<p>\n<em>Updating</em>:\nCode that uses the package should have its regular expressions checked by hand.\n</p>\n\n<h3 id=\"runtime\">The runtime package</h3>\n\n<p>\nIn Go 1, much of the API exported by package\n<code>runtime</code> has been removed in favor of\nfunctionality provided by other packages.\nCode using the <code>runtime.Type</code> interface\nor its specific concrete type implementations should\nnow use package <a href=\"/pkg/reflect/\"><code>reflect</code></a>.\nCode using <code>runtime.Semacquire</code> or <code>runtime.Semrelease</code>\nshould use channels or the abstractions in package <a href=\"/pkg/sync/\"><code>sync</code></a>.\nThe <code>runtime.Alloc</code>, <code>runtime.Free</code>,\nand <code>runtime.Lookup</code> functions, an unsafe API created for\ndebugging the memory allocator, have no replacement.\n</p>\n\n<p>\nBefore, <code>runtime.MemStats</code> was a global variable holding\nstatistics about memory allocation, and calls to <code>runtime.UpdateMemStats</code>\nensured that it was up to date.\nIn Go 1, <code>runtime.MemStats</code> is a struct type, and code should use\n<a href=\"/pkg/runtime/#ReadMemStats\"><code>runtime.ReadMemStats</code></a>\nto obtain the current statistics.\n</p>\n\n<p>\nThe package adds a new function,\n<a href=\"/pkg/runtime/#NumCPU\"><code>runtime.NumCPU</code></a>, that returns the number of CPUs available\nfor parallel execution, as reported by the operating system kernel.\nIts value can inform the setting of <code>GOMAXPROCS</code>.\nThe <code>runtime.Cgocalls</code> and <code>runtime.Goroutines</code> functions\nhave been renamed to <code>runtime.NumCgoCall</code> and <code>runtime.NumGoroutine</code>.\n</p>\n\n<p>\n<em>Updating</em>:\nRunning <code>go</code> <code>fix</code> will update code for the function renamings.\nOther code will need to be updated by hand.\n</p>\n\n<h3 id=\"strconv\">The strconv package</h3>\n\n<p>\nIn Go 1, the\n<a href=\"/pkg/strconv/\"><code>strconv</code></a>\npackage has been significantly reworked to make it more Go-like and less C-like,\nalthough <code>Atoi</code> lives on (it's similar to\n<code>int(ParseInt(x, 10, 0))</code>, as does\n<code>Itoa(x)</code> (<code>FormatInt(int64(x), 10)</code>).\nThere are also new variants of some of the functions that append to byte slices rather than\nreturn strings, to allow control over allocation.\n</p>\n\n<p>\nThis table summarizes the renamings; see the\n<a href=\"/pkg/strconv/\">package documentation</a>\nfor full details.\n</p>\n\n<table class=\"codetable\" frame=\"border\" summary=\"strconv renames\">\n<colgroup align=\"left\" width=\"50%\"></colgroup>\n<colgroup align=\"left\" width=\"50%\"></colgroup>\n<tr>\n<th align=\"left\">Old call</th>\n<th align=\"left\">New call</th>\n</tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>Atob(x)</td> <td>ParseBool(x)</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>Atof32(x)</td> <td>ParseFloat(x, 32)§</td></tr>\n<tr><td>Atof64(x)</td> <td>ParseFloat(x, 64)</td></tr>\n<tr><td>AtofN(x, n)</td> <td>ParseFloat(x, n)</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>Atoi(x)</td> <td>Atoi(x)</td></tr>\n<tr><td>Atoi(x)</td> <td>ParseInt(x, 10, 0)§</td></tr>\n<tr><td>Atoi64(x)</td> <td>ParseInt(x, 10, 64)</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>Atoui(x)</td> <td>ParseUint(x, 10, 0)§</td></tr>\n<tr><td>Atoui64(x)</td> <td>ParseUint(x, 10, 64)</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>Btoi64(x, b)</td> <td>ParseInt(x, b, 64)</td></tr>\n<tr><td>Btoui64(x, b)</td> <td>ParseUint(x, b, 64)</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>Btoa(x)</td> <td>FormatBool(x)</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>Ftoa32(x, f, p)</td> <td>FormatFloat(float64(x), f, p, 32)</td></tr>\n<tr><td>Ftoa64(x, f, p)</td> <td>FormatFloat(x, f, p, 64)</td></tr>\n<tr><td>FtoaN(x, f, p, n)</td> <td>FormatFloat(x, f, p, n)</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>Itoa(x)</td> <td>Itoa(x)</td></tr>\n<tr><td>Itoa(x)</td> <td>FormatInt(int64(x), 10)</td></tr>\n<tr><td>Itoa64(x)</td> <td>FormatInt(x, 10)</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>Itob(x, b)</td> <td>FormatInt(int64(x), b)</td></tr>\n<tr><td>Itob64(x, b)</td> <td>FormatInt(x, b)</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>Uitoa(x)</td> <td>FormatUint(uint64(x), 10)</td></tr>\n<tr><td>Uitoa64(x)</td> <td>FormatUint(x, 10)</td></tr>\n<tr>\n<td colspan=\"2\"><hr></td>\n</tr>\n<tr><td>Uitob(x, b)</td> <td>FormatUint(uint64(x), b)</td></tr>\n<tr><td>Uitob64(x, b)</td> <td>FormatUint(x, b)</td></tr>\n</table>\n\t\t\n<p>\n<em>Updating</em>:\nRunning <code>go</code> <code>fix</code> will update almost all code affected by the change.\n<br>\n§ <code>Atoi</code> persists but <code>Atoui</code> and <code>Atof32</code> do not, so\nthey may require\na cast that must be added by hand; the <code>go</code> <code>fix</code> tool will warn about it.\n</p>\n\n\n<h3 id=\"templates\">The template packages</h3>\n\n<p>\nThe <code>template</code> and <code>exp/template/html</code> packages have moved to \n<a href=\"/pkg/text/template/\"><code>text/template</code></a> and\n<a href=\"/pkg/html/template/\"><code>html/template</code></a>.\nMore significant, the interface to these packages has been simplified.\nThe template language is the same, but the concept of \"template set\" is gone\nand the functions and methods of the packages have changed accordingly,\noften by elimination.\n</p>\n\n<p>\nInstead of sets, a <code>Template</code> object\nmay contain multiple named template definitions,\nin effect constructing\nname spaces for template invocation.\nA template can invoke any other template associated with it, but only those\ntemplates associated with it.\nThe simplest way to associate templates is to parse them together, something\nmade easier with the new structure of the packages.\n</p>\n\n<p>\n<em>Updating</em>:\nThe imports will be updated by fix tool.\nSingle-template uses will be otherwise be largely unaffected.\nCode that uses multiple templates in concert will need to be updated by hand.\nThe <a href=\"/pkg/text/template/#examples\">examples</a> in\nthe documentation for <code>text/template</code> can provide guidance.\n</p>\n\n<h3 id=\"testing\">The testing package</h3>\n\n<p>\nThe testing package has a type, <code>B</code>, passed as an argument to benchmark functions.\nIn Go 1, <code>B</code> has new methods, analogous to those of <code>T</code>, enabling\nlogging and failure reporting.\n</p>\n\n<pre><!--{{code \"/doc/progs/go1.go\" `/func.*Benchmark/` `/^}/`}}\n-->func BenchmarkSprintf(b *testing.B) {\n    <span class=\"comment\">// Verify correctness before running benchmark.</span>\n    b.StopTimer()\n    got := fmt.Sprintf(&#34;%x&#34;, 23)\n    const expect = &#34;17&#34;\n    if expect != got {\n        b.Fatalf(&#34;expected %q; got %q&#34;, expect, got)\n    }\n    b.StartTimer()\n    for i := 0; i &lt; b.N; i++ {\n        fmt.Sprintf(&#34;%x&#34;, 23)\n    }\n}</pre>\n\n<p>\n<em>Updating</em>:\nExisting code is unaffected, although benchmarks that use <code>println</code>\nor <code>panic</code> should be updated to use the new methods.\n</p>\n\n<h3 id=\"testing_script\">The testing/script package</h3>\n\n<p>\nThe testing/script package has been deleted. It was a dreg.\n</p>\n\n<p>\n<em>Updating</em>:\nNo code is likely to be affected.\n</p>\n\n<h3 id=\"unsafe\">The unsafe package</h3>\n\n<p>\nIn Go 1, the functions\n<code>unsafe.Typeof</code>, <code>unsafe.Reflect</code>,\n<code>unsafe.Unreflect</code>, <code>unsafe.New</code>, and\n<code>unsafe.NewArray</code> have been removed;\nthey duplicated safer functionality provided by\npackage <a href=\"/pkg/reflect/\"><code>reflect</code></a>.\n</p>\n\n<p>\n<em>Updating</em>:\nCode using these functions must be rewritten to use\npackage <a href=\"/pkg/reflect/\"><code>reflect</code></a>.\nThe changes to <a href=\"http://code.google.com/p/go/source/detail?r=2646dc956207\">encoding/gob</a> and the <a href=\"http://code.google.com/p/goprotobuf/source/detail?r=5340ad310031\">protocol buffer library</a>\nmay be helpful as examples.\n</p>\n\n<h3 id=\"url\">The url package</h3>\n\n<p>\nIn Go 1 several fields from the <a href=\"/pkg/net/url/#URL\"><code>url.URL</code></a> type\nwere removed or replaced.\n</p>\n\n<p>\nThe <a href=\"/pkg/net/url/#URL.String\"><code>String</code></a> method now\npredictably rebuilds an encoded URL string using all of <code>URL</code>'s\nfields as necessary. The resulting string will also no longer have\npasswords escaped.\n</p>\n\n<p>\nThe <code>Raw</code> field has been removed. In most cases the <code>String</code>\nmethod may be used in its place.\n</p>\n\n<p>\nThe old <code>RawUserinfo</code> field is replaced by the <code>User</code>\nfield, of type <a href=\"/pkg/net/url/#Userinfo\"><code>*net.Userinfo</code></a>.\nValues of this type may be created using the new <a href=\"/pkg/net/url/#User\"><code>net.User</code></a>\nand <a href=\"/pkg/net/url/#UserPassword\"><code>net.UserPassword</code></a>\nfunctions. The <code>EscapeUserinfo</code> and <code>UnescapeUserinfo</code>\nfunctions are also gone.\n</p>\n\n<p>\nThe <code>RawAuthority</code> field has been removed. The same information is\navailable in the <code>Host</code> and <code>User</code> fields.\n</p>\n\n<p>\nThe <code>RawPath</code> field and the <code>EncodedPath</code> method have\nbeen removed. The path information in rooted URLs (with a slash following the\nschema) is now available only in decoded form in the <code>Path</code> field.\nOccasionally, the encoded data may be required to obtain information that\nwas lost in the decoding process. These cases must be handled by accessing\nthe data the URL was built from.\n</p>\n\n<p>\nURLs with non-rooted paths, such as <code>\"mailto:dev@golang.org?subject=Hi\"</code>,\nare also handled differently. The <code>OpaquePath</code> boolean field has been\nremoved and a new <code>Opaque</code> string field introduced to hold the encoded\npath for such URLs. In Go 1, the cited URL parses as:\n</p>\n\n<pre>\n    URL{\n        Scheme: \"mailto\",\n        Opaque: \"dev@golang.org\",\n        RawQuery: \"subject=Hi\",\n    }\n</pre>\n\n<p>\nA new <a href=\"/pkg/net/url/#URL.RequestURI\"><code>RequestURI</code></a> method was\nadded to <code>URL</code>.\n</p>\n\n<p>\nThe <code>ParseWithReference</code> function has been renamed to <code>ParseWithFragment</code>.\n</p>\n\n<p>\n<em>Updating</em>:\nCode that uses the old fields will fail to compile and must be updated by hand.\nThe semantic changes make it difficult for the fix tool to update automatically.\n</p>\n\n<h2 id=\"cmd_go\">The go command</h2>\n\n<p>\nGo 1 introduces the <a href=\"/cmd/go/\">go command</a>, a tool for fetching,\nbuilding, and installing Go packages and commands. The <code>go</code> command\ndoes away with makefiles, instead using Go source code to find dependencies and\ndetermine build conditions. Most existing Go programs will no longer require\nmakefiles to be built.\n</p>\n\n<p>\nSee <a href=\"/doc/code.html\">How to Write Go Code</a> for a primer on the\n<code>go</code> command and the <a href=\"/cmd/go/\">go command documentation</a>\nfor the full details.\n</p>\n\n<p>\n<em>Updating</em>:\nProjects that depend on the Go project's old makefile-based build\ninfrastructure (<code>Make.pkg</code>, <code>Make.cmd</code>, and so on) should\nswitch to using the <code>go</code> command for building Go code and, if\nnecessary, rewrite their makefiles to perform any auxiliary build tasks.\n</p>\n\n<h2 id=\"cmd_cgo\">The cgo command</h2>\n\n<p>\nIn Go 1, the <a href=\"/cmd/cgo\">cgo command</a>\nuses a different <code>_cgo_export.h</code>\nfile, which is generated for packages containing <code>//export</code> lines.\nThe <code>_cgo_export.h</code> file now begins with the C preamble comment,\nso that exported function definitions can use types defined there.\nThis has the effect of compiling the preamble multiple times, so a\npackage using <code>//export</code> must not put function definitions\nor variable initializations in the C preamble.\n</p>\n\n<h2 id=\"releases\">Packaged releases</h2>\n\n<p>\nOne of the most significant changes associated with Go 1 is the availability\nof prepackaged, downloadable distributions.\nThey are available for many combinations of architecture and operating system\n(including Windows) and the list will grow.\nInstallation details are described on the\n<a href=\"/doc/install\">Getting Started</a> page, while\nthe distributions themselves are listed on the\n<a href=\"http://code.google.com/p/go/downloads/list\">downloads page</a>.\n\n\n</div>\n\n<div id=\"footer\">\nBuild version go1.0.1.<br>\nA link <a href=\"http://code.google.com/policies.html#restrictions\">noted</a>,\nand then, coming up on the very next line, we will\nfind yet another link, link 3.0 if you will,\nafter a few more words <a href=\"/LINK\">link text</a>.<br>\n<a href=\"/doc/tos.html\">Terms of Service</a> | \n<a href=\"http://www.google.com/intl/en/privacy/privacy-policy.html\">Privacy Policy</a>\n</div>\n\n<script type=\"text/javascript\">\n(function() {\n  var ga = document.createElement(\"script\"); ga.type = \"text/javascript\"; ga.async = true;\n  ga.src = (\"https:\" == document.location.protocol ? \"https://ssl\" : \"http://www\") + \".google-analytics.com/ga.js\";\n  var s = document.getElementsByTagName(\"script\")[0]; s.parentNode.insertBefore(ga, s);\n})();\n</script>\n</body>\n<script type=\"text/javascript\">\n  (function() {\n    var po = document.createElement('script'); po.type = 'text/javascript'; po.async = true;\n    po.src = 'https://apis.google.com/js/plusone.js';\n    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(po, s);\n  })();\n</script>\n</html>\n\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/testdata/webkit/README",
    "content": "The *.dat files in this directory are copied from The WebKit Open Source\nProject, specifically $WEBKITROOT/LayoutTests/html5lib/resources.\nWebKit is licensed under a BSD style license.\nhttp://webkit.org/coding/bsd-license.html says:\n\nCopyright (C) 2009 Apple Inc. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice,\nthis list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\nthis list of conditions and the following disclaimer in the documentation\nand/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS \"AS IS\" AND ANY\nEXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/token.go",
    "content": "// Copyright 2010 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage html\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"golang.org/x/net/html/atom\"\n)\n\n// A TokenType is the type of a Token.\ntype TokenType uint32\n\nconst (\n\t// ErrorToken means that an error occurred during tokenization.\n\tErrorToken TokenType = iota\n\t// TextToken means a text node.\n\tTextToken\n\t// A StartTagToken looks like <a>.\n\tStartTagToken\n\t// An EndTagToken looks like </a>.\n\tEndTagToken\n\t// A SelfClosingTagToken tag looks like <br/>.\n\tSelfClosingTagToken\n\t// A CommentToken looks like <!--x-->.\n\tCommentToken\n\t// A DoctypeToken looks like <!DOCTYPE x>\n\tDoctypeToken\n)\n\n// ErrBufferExceeded means that the buffering limit was exceeded.\nvar ErrBufferExceeded = errors.New(\"max buffer exceeded\")\n\n// String returns a string representation of the TokenType.\nfunc (t TokenType) String() string {\n\tswitch t {\n\tcase ErrorToken:\n\t\treturn \"Error\"\n\tcase TextToken:\n\t\treturn \"Text\"\n\tcase StartTagToken:\n\t\treturn \"StartTag\"\n\tcase EndTagToken:\n\t\treturn \"EndTag\"\n\tcase SelfClosingTagToken:\n\t\treturn \"SelfClosingTag\"\n\tcase CommentToken:\n\t\treturn \"Comment\"\n\tcase DoctypeToken:\n\t\treturn \"Doctype\"\n\t}\n\treturn \"Invalid(\" + strconv.Itoa(int(t)) + \")\"\n}\n\n// An Attribute is an attribute namespace-key-value triple. Namespace is\n// non-empty for foreign attributes like xlink, Key is alphabetic (and hence\n// does not contain escapable characters like '&', '<' or '>'), and Val is\n// unescaped (it looks like \"a<b\" rather than \"a&lt;b\").\n//\n// Namespace is only used by the parser, not the tokenizer.\ntype Attribute struct {\n\tNamespace, Key, Val string\n}\n\n// A Token consists of a TokenType and some Data (tag name for start and end\n// tags, content for text, comments and doctypes). A tag Token may also contain\n// a slice of Attributes. Data is unescaped for all Tokens (it looks like \"a<b\"\n// rather than \"a&lt;b\"). For tag Tokens, DataAtom is the atom for Data, or\n// zero if Data is not a known tag name.\ntype Token struct {\n\tType     TokenType\n\tDataAtom atom.Atom\n\tData     string\n\tAttr     []Attribute\n}\n\n// tagString returns a string representation of a tag Token's Data and Attr.\nfunc (t Token) tagString() string {\n\tif len(t.Attr) == 0 {\n\t\treturn t.Data\n\t}\n\tbuf := bytes.NewBufferString(t.Data)\n\tfor _, a := range t.Attr {\n\t\tbuf.WriteByte(' ')\n\t\tbuf.WriteString(a.Key)\n\t\tbuf.WriteString(`=\"`)\n\t\tescape(buf, a.Val)\n\t\tbuf.WriteByte('\"')\n\t}\n\treturn buf.String()\n}\n\n// String returns a string representation of the Token.\nfunc (t Token) String() string {\n\tswitch t.Type {\n\tcase ErrorToken:\n\t\treturn \"\"\n\tcase TextToken:\n\t\treturn EscapeString(t.Data)\n\tcase StartTagToken:\n\t\treturn \"<\" + t.tagString() + \">\"\n\tcase EndTagToken:\n\t\treturn \"</\" + t.tagString() + \">\"\n\tcase SelfClosingTagToken:\n\t\treturn \"<\" + t.tagString() + \"/>\"\n\tcase CommentToken:\n\t\treturn \"<!--\" + t.Data + \"-->\"\n\tcase DoctypeToken:\n\t\treturn \"<!DOCTYPE \" + t.Data + \">\"\n\t}\n\treturn \"Invalid(\" + strconv.Itoa(int(t.Type)) + \")\"\n}\n\n// span is a range of bytes in a Tokenizer's buffer. The start is inclusive,\n// the end is exclusive.\ntype span struct {\n\tstart, end int\n}\n\n// A Tokenizer returns a stream of HTML Tokens.\ntype Tokenizer struct {\n\t// r is the source of the HTML text.\n\tr io.Reader\n\t// tt is the TokenType of the current token.\n\ttt TokenType\n\t// err is the first error encountered during tokenization. It is possible\n\t// for tt != Error && err != nil to hold: this means that Next returned a\n\t// valid token but the subsequent Next call will return an error token.\n\t// For example, if the HTML text input was just \"plain\", then the first\n\t// Next call would set z.err to io.EOF but return a TextToken, and all\n\t// subsequent Next calls would return an ErrorToken.\n\t// err is never reset. Once it becomes non-nil, it stays non-nil.\n\terr error\n\t// readErr is the error returned by the io.Reader r. It is separate from\n\t// err because it is valid for an io.Reader to return (n int, err1 error)\n\t// such that n > 0 && err1 != nil, and callers should always process the\n\t// n > 0 bytes before considering the error err1.\n\treadErr error\n\t// buf[raw.start:raw.end] holds the raw bytes of the current token.\n\t// buf[raw.end:] is buffered input that will yield future tokens.\n\traw span\n\tbuf []byte\n\t// maxBuf limits the data buffered in buf. A value of 0 means unlimited.\n\tmaxBuf int\n\t// buf[data.start:data.end] holds the raw bytes of the current token's data:\n\t// a text token's text, a tag token's tag name, etc.\n\tdata span\n\t// pendingAttr is the attribute key and value currently being tokenized.\n\t// When complete, pendingAttr is pushed onto attr. nAttrReturned is\n\t// incremented on each call to TagAttr.\n\tpendingAttr   [2]span\n\tattr          [][2]span\n\tnAttrReturned int\n\t// rawTag is the \"script\" in \"</script>\" that closes the next token. If\n\t// non-empty, the subsequent call to Next will return a raw or RCDATA text\n\t// token: one that treats \"<p>\" as text instead of an element.\n\t// rawTag's contents are lower-cased.\n\trawTag string\n\t// textIsRaw is whether the current text token's data is not escaped.\n\ttextIsRaw bool\n\t// convertNUL is whether NUL bytes in the current token's data should\n\t// be converted into \\ufffd replacement characters.\n\tconvertNUL bool\n\t// allowCDATA is whether CDATA sections are allowed in the current context.\n\tallowCDATA bool\n}\n\n// AllowCDATA sets whether or not the tokenizer recognizes <![CDATA[foo]]> as\n// the text \"foo\". The default value is false, which means to recognize it as\n// a bogus comment \"<!-- [CDATA[foo]] -->\" instead.\n//\n// Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and\n// only if tokenizing foreign content, such as MathML and SVG. However,\n// tracking foreign-contentness is difficult to do purely in the tokenizer,\n// as opposed to the parser, due to HTML integration points: an <svg> element\n// can contain a <foreignObject> that is foreign-to-SVG but not foreign-to-\n// HTML. For strict compliance with the HTML5 tokenization algorithm, it is the\n// responsibility of the user of a tokenizer to call AllowCDATA as appropriate.\n// In practice, if using the tokenizer without caring whether MathML or SVG\n// CDATA is text or comments, such as tokenizing HTML to find all the anchor\n// text, it is acceptable to ignore this responsibility.\nfunc (z *Tokenizer) AllowCDATA(allowCDATA bool) {\n\tz.allowCDATA = allowCDATA\n}\n\n// NextIsNotRawText instructs the tokenizer that the next token should not be\n// considered as 'raw text'. Some elements, such as script and title elements,\n// normally require the next token after the opening tag to be 'raw text' that\n// has no child elements. For example, tokenizing \"<title>a<b>c</b>d</title>\"\n// yields a start tag token for \"<title>\", a text token for \"a<b>c</b>d\", and\n// an end tag token for \"</title>\". There are no distinct start tag or end tag\n// tokens for the \"<b>\" and \"</b>\".\n//\n// This tokenizer implementation will generally look for raw text at the right\n// times. Strictly speaking, an HTML5 compliant tokenizer should not look for\n// raw text if in foreign content: <title> generally needs raw text, but a\n// <title> inside an <svg> does not. Another example is that a <textarea>\n// generally needs raw text, but a <textarea> is not allowed as an immediate\n// child of a <select>; in normal parsing, a <textarea> implies </select>, but\n// one cannot close the implicit element when parsing a <select>'s InnerHTML.\n// Similarly to AllowCDATA, tracking the correct moment to override raw-text-\n// ness is difficult to do purely in the tokenizer, as opposed to the parser.\n// For strict compliance with the HTML5 tokenization algorithm, it is the\n// responsibility of the user of a tokenizer to call NextIsNotRawText as\n// appropriate. In practice, like AllowCDATA, it is acceptable to ignore this\n// responsibility for basic usage.\n//\n// Note that this 'raw text' concept is different from the one offered by the\n// Tokenizer.Raw method.\nfunc (z *Tokenizer) NextIsNotRawText() {\n\tz.rawTag = \"\"\n}\n\n// Err returns the error associated with the most recent ErrorToken token.\n// This is typically io.EOF, meaning the end of tokenization.\nfunc (z *Tokenizer) Err() error {\n\tif z.tt != ErrorToken {\n\t\treturn nil\n\t}\n\treturn z.err\n}\n\n// readByte returns the next byte from the input stream, doing a buffered read\n// from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte\n// slice that holds all the bytes read so far for the current token.\n// It sets z.err if the underlying reader returns an error.\n// Pre-condition: z.err == nil.\nfunc (z *Tokenizer) readByte() byte {\n\tif z.raw.end >= len(z.buf) {\n\t\t// Our buffer is exhausted and we have to read from z.r. Check if the\n\t\t// previous read resulted in an error.\n\t\tif z.readErr != nil {\n\t\t\tz.err = z.readErr\n\t\t\treturn 0\n\t\t}\n\t\t// We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length\n\t\t// z.raw.end - z.raw.start is more than half the capacity of z.buf, then we\n\t\t// allocate a new buffer before the copy.\n\t\tc := cap(z.buf)\n\t\td := z.raw.end - z.raw.start\n\t\tvar buf1 []byte\n\t\tif 2*d > c {\n\t\t\tbuf1 = make([]byte, d, 2*c)\n\t\t} else {\n\t\t\tbuf1 = z.buf[:d]\n\t\t}\n\t\tcopy(buf1, z.buf[z.raw.start:z.raw.end])\n\t\tif x := z.raw.start; x != 0 {\n\t\t\t// Adjust the data/attr spans to refer to the same contents after the copy.\n\t\t\tz.data.start -= x\n\t\t\tz.data.end -= x\n\t\t\tz.pendingAttr[0].start -= x\n\t\t\tz.pendingAttr[0].end -= x\n\t\t\tz.pendingAttr[1].start -= x\n\t\t\tz.pendingAttr[1].end -= x\n\t\t\tfor i := range z.attr {\n\t\t\t\tz.attr[i][0].start -= x\n\t\t\t\tz.attr[i][0].end -= x\n\t\t\t\tz.attr[i][1].start -= x\n\t\t\t\tz.attr[i][1].end -= x\n\t\t\t}\n\t\t}\n\t\tz.raw.start, z.raw.end, z.buf = 0, d, buf1[:d]\n\t\t// Now that we have copied the live bytes to the start of the buffer,\n\t\t// we read from z.r into the remainder.\n\t\tvar n int\n\t\tn, z.readErr = readAtLeastOneByte(z.r, buf1[d:cap(buf1)])\n\t\tif n == 0 {\n\t\t\tz.err = z.readErr\n\t\t\treturn 0\n\t\t}\n\t\tz.buf = buf1[:d+n]\n\t}\n\tx := z.buf[z.raw.end]\n\tz.raw.end++\n\tif z.maxBuf > 0 && z.raw.end-z.raw.start >= z.maxBuf {\n\t\tz.err = ErrBufferExceeded\n\t\treturn 0\n\t}\n\treturn x\n}\n\n// Buffered returns a slice containing data buffered but not yet tokenized.\nfunc (z *Tokenizer) Buffered() []byte {\n\treturn z.buf[z.raw.end:]\n}\n\n// readAtLeastOneByte wraps an io.Reader so that reading cannot return (0, nil).\n// It returns io.ErrNoProgress if the underlying r.Read method returns (0, nil)\n// too many times in succession.\nfunc readAtLeastOneByte(r io.Reader, b []byte) (int, error) {\n\tfor i := 0; i < 100; i++ {\n\t\tn, err := r.Read(b)\n\t\tif n != 0 || err != nil {\n\t\t\treturn n, err\n\t\t}\n\t}\n\treturn 0, io.ErrNoProgress\n}\n\n// skipWhiteSpace skips past any white space.\nfunc (z *Tokenizer) skipWhiteSpace() {\n\tif z.err != nil {\n\t\treturn\n\t}\n\tfor {\n\t\tc := z.readByte()\n\t\tif z.err != nil {\n\t\t\treturn\n\t\t}\n\t\tswitch c {\n\t\tcase ' ', '\\n', '\\r', '\\t', '\\f':\n\t\t\t// No-op.\n\t\tdefault:\n\t\t\tz.raw.end--\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// readRawOrRCDATA reads until the next \"</foo>\", where \"foo\" is z.rawTag and\n// is typically something like \"script\" or \"textarea\".\nfunc (z *Tokenizer) readRawOrRCDATA() {\n\tif z.rawTag == \"script\" {\n\t\tz.readScript()\n\t\tz.textIsRaw = true\n\t\tz.rawTag = \"\"\n\t\treturn\n\t}\nloop:\n\tfor {\n\t\tc := z.readByte()\n\t\tif z.err != nil {\n\t\t\tbreak loop\n\t\t}\n\t\tif c != '<' {\n\t\t\tcontinue loop\n\t\t}\n\t\tc = z.readByte()\n\t\tif z.err != nil {\n\t\t\tbreak loop\n\t\t}\n\t\tif c != '/' {\n\t\t\tcontinue loop\n\t\t}\n\t\tif z.readRawEndTag() || z.err != nil {\n\t\t\tbreak loop\n\t\t}\n\t}\n\tz.data.end = z.raw.end\n\t// A textarea's or title's RCDATA can contain escaped entities.\n\tz.textIsRaw = z.rawTag != \"textarea\" && z.rawTag != \"title\"\n\tz.rawTag = \"\"\n}\n\n// readRawEndTag attempts to read a tag like \"</foo>\", where \"foo\" is z.rawTag.\n// If it succeeds, it backs up the input position to reconsume the tag and\n// returns true. Otherwise it returns false. The opening \"</\" has already been\n// consumed.\nfunc (z *Tokenizer) readRawEndTag() bool {\n\tfor i := 0; i < len(z.rawTag); i++ {\n\t\tc := z.readByte()\n\t\tif z.err != nil {\n\t\t\treturn false\n\t\t}\n\t\tif c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') {\n\t\t\tz.raw.end--\n\t\t\treturn false\n\t\t}\n\t}\n\tc := z.readByte()\n\tif z.err != nil {\n\t\treturn false\n\t}\n\tswitch c {\n\tcase ' ', '\\n', '\\r', '\\t', '\\f', '/', '>':\n\t\t// The 3 is 2 for the leading \"</\" plus 1 for the trailing character c.\n\t\tz.raw.end -= 3 + len(z.rawTag)\n\t\treturn true\n\t}\n\tz.raw.end--\n\treturn false\n}\n\n// readScript reads until the next </script> tag, following the byzantine\n// rules for escaping/hiding the closing tag.\nfunc (z *Tokenizer) readScript() {\n\tdefer func() {\n\t\tz.data.end = z.raw.end\n\t}()\n\tvar c byte\n\nscriptData:\n\tc = z.readByte()\n\tif z.err != nil {\n\t\treturn\n\t}\n\tif c == '<' {\n\t\tgoto scriptDataLessThanSign\n\t}\n\tgoto scriptData\n\nscriptDataLessThanSign:\n\tc = z.readByte()\n\tif z.err != nil {\n\t\treturn\n\t}\n\tswitch c {\n\tcase '/':\n\t\tgoto scriptDataEndTagOpen\n\tcase '!':\n\t\tgoto scriptDataEscapeStart\n\t}\n\tz.raw.end--\n\tgoto scriptData\n\nscriptDataEndTagOpen:\n\tif z.readRawEndTag() || z.err != nil {\n\t\treturn\n\t}\n\tgoto scriptData\n\nscriptDataEscapeStart:\n\tc = z.readByte()\n\tif z.err != nil {\n\t\treturn\n\t}\n\tif c == '-' {\n\t\tgoto scriptDataEscapeStartDash\n\t}\n\tz.raw.end--\n\tgoto scriptData\n\nscriptDataEscapeStartDash:\n\tc = z.readByte()\n\tif z.err != nil {\n\t\treturn\n\t}\n\tif c == '-' {\n\t\tgoto scriptDataEscapedDashDash\n\t}\n\tz.raw.end--\n\tgoto scriptData\n\nscriptDataEscaped:\n\tc = z.readByte()\n\tif z.err != nil {\n\t\treturn\n\t}\n\tswitch c {\n\tcase '-':\n\t\tgoto scriptDataEscapedDash\n\tcase '<':\n\t\tgoto scriptDataEscapedLessThanSign\n\t}\n\tgoto scriptDataEscaped\n\nscriptDataEscapedDash:\n\tc = z.readByte()\n\tif z.err != nil {\n\t\treturn\n\t}\n\tswitch c {\n\tcase '-':\n\t\tgoto scriptDataEscapedDashDash\n\tcase '<':\n\t\tgoto scriptDataEscapedLessThanSign\n\t}\n\tgoto scriptDataEscaped\n\nscriptDataEscapedDashDash:\n\tc = z.readByte()\n\tif z.err != nil {\n\t\treturn\n\t}\n\tswitch c {\n\tcase '-':\n\t\tgoto scriptDataEscapedDashDash\n\tcase '<':\n\t\tgoto scriptDataEscapedLessThanSign\n\tcase '>':\n\t\tgoto scriptData\n\t}\n\tgoto scriptDataEscaped\n\nscriptDataEscapedLessThanSign:\n\tc = z.readByte()\n\tif z.err != nil {\n\t\treturn\n\t}\n\tif c == '/' {\n\t\tgoto scriptDataEscapedEndTagOpen\n\t}\n\tif 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {\n\t\tgoto scriptDataDoubleEscapeStart\n\t}\n\tz.raw.end--\n\tgoto scriptData\n\nscriptDataEscapedEndTagOpen:\n\tif z.readRawEndTag() || z.err != nil {\n\t\treturn\n\t}\n\tgoto scriptDataEscaped\n\nscriptDataDoubleEscapeStart:\n\tz.raw.end--\n\tfor i := 0; i < len(\"script\"); i++ {\n\t\tc = z.readByte()\n\t\tif z.err != nil {\n\t\t\treturn\n\t\t}\n\t\tif c != \"script\"[i] && c != \"SCRIPT\"[i] {\n\t\t\tz.raw.end--\n\t\t\tgoto scriptDataEscaped\n\t\t}\n\t}\n\tc = z.readByte()\n\tif z.err != nil {\n\t\treturn\n\t}\n\tswitch c {\n\tcase ' ', '\\n', '\\r', '\\t', '\\f', '/', '>':\n\t\tgoto scriptDataDoubleEscaped\n\t}\n\tz.raw.end--\n\tgoto scriptDataEscaped\n\nscriptDataDoubleEscaped:\n\tc = z.readByte()\n\tif z.err != nil {\n\t\treturn\n\t}\n\tswitch c {\n\tcase '-':\n\t\tgoto scriptDataDoubleEscapedDash\n\tcase '<':\n\t\tgoto scriptDataDoubleEscapedLessThanSign\n\t}\n\tgoto scriptDataDoubleEscaped\n\nscriptDataDoubleEscapedDash:\n\tc = z.readByte()\n\tif z.err != nil {\n\t\treturn\n\t}\n\tswitch c {\n\tcase '-':\n\t\tgoto scriptDataDoubleEscapedDashDash\n\tcase '<':\n\t\tgoto scriptDataDoubleEscapedLessThanSign\n\t}\n\tgoto scriptDataDoubleEscaped\n\nscriptDataDoubleEscapedDashDash:\n\tc = z.readByte()\n\tif z.err != nil {\n\t\treturn\n\t}\n\tswitch c {\n\tcase '-':\n\t\tgoto scriptDataDoubleEscapedDashDash\n\tcase '<':\n\t\tgoto scriptDataDoubleEscapedLessThanSign\n\tcase '>':\n\t\tgoto scriptData\n\t}\n\tgoto scriptDataDoubleEscaped\n\nscriptDataDoubleEscapedLessThanSign:\n\tc = z.readByte()\n\tif z.err != nil {\n\t\treturn\n\t}\n\tif c == '/' {\n\t\tgoto scriptDataDoubleEscapeEnd\n\t}\n\tz.raw.end--\n\tgoto scriptDataDoubleEscaped\n\nscriptDataDoubleEscapeEnd:\n\tif z.readRawEndTag() {\n\t\tz.raw.end += len(\"</script>\")\n\t\tgoto scriptDataEscaped\n\t}\n\tif z.err != nil {\n\t\treturn\n\t}\n\tgoto scriptDataDoubleEscaped\n}\n\n// readComment reads the next comment token starting with \"<!--\". The opening\n// \"<!--\" has already been consumed.\nfunc (z *Tokenizer) readComment() {\n\tz.data.start = z.raw.end\n\tdefer func() {\n\t\tif z.data.end < z.data.start {\n\t\t\t// It's a comment with no data, like <!-->.\n\t\t\tz.data.end = z.data.start\n\t\t}\n\t}()\n\tfor dashCount := 2; ; {\n\t\tc := z.readByte()\n\t\tif z.err != nil {\n\t\t\t// Ignore up to two dashes at EOF.\n\t\t\tif dashCount > 2 {\n\t\t\t\tdashCount = 2\n\t\t\t}\n\t\t\tz.data.end = z.raw.end - dashCount\n\t\t\treturn\n\t\t}\n\t\tswitch c {\n\t\tcase '-':\n\t\t\tdashCount++\n\t\t\tcontinue\n\t\tcase '>':\n\t\t\tif dashCount >= 2 {\n\t\t\t\tz.data.end = z.raw.end - len(\"-->\")\n\t\t\t\treturn\n\t\t\t}\n\t\tcase '!':\n\t\t\tif dashCount >= 2 {\n\t\t\t\tc = z.readByte()\n\t\t\t\tif z.err != nil {\n\t\t\t\t\tz.data.end = z.raw.end\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif c == '>' {\n\t\t\t\t\tz.data.end = z.raw.end - len(\"--!>\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tdashCount = 0\n\t}\n}\n\n// readUntilCloseAngle reads until the next \">\".\nfunc (z *Tokenizer) readUntilCloseAngle() {\n\tz.data.start = z.raw.end\n\tfor {\n\t\tc := z.readByte()\n\t\tif z.err != nil {\n\t\t\tz.data.end = z.raw.end\n\t\t\treturn\n\t\t}\n\t\tif c == '>' {\n\t\t\tz.data.end = z.raw.end - len(\">\")\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// readMarkupDeclaration reads the next token starting with \"<!\". It might be\n// a \"<!--comment-->\", a \"<!DOCTYPE foo>\", a \"<![CDATA[section]]>\" or\n// \"<!a bogus comment\". The opening \"<!\" has already been consumed.\nfunc (z *Tokenizer) readMarkupDeclaration() TokenType {\n\tz.data.start = z.raw.end\n\tvar c [2]byte\n\tfor i := 0; i < 2; i++ {\n\t\tc[i] = z.readByte()\n\t\tif z.err != nil {\n\t\t\tz.data.end = z.raw.end\n\t\t\treturn CommentToken\n\t\t}\n\t}\n\tif c[0] == '-' && c[1] == '-' {\n\t\tz.readComment()\n\t\treturn CommentToken\n\t}\n\tz.raw.end -= 2\n\tif z.readDoctype() {\n\t\treturn DoctypeToken\n\t}\n\tif z.allowCDATA && z.readCDATA() {\n\t\tz.convertNUL = true\n\t\treturn TextToken\n\t}\n\t// It's a bogus comment.\n\tz.readUntilCloseAngle()\n\treturn CommentToken\n}\n\n// readDoctype attempts to read a doctype declaration and returns true if\n// successful. The opening \"<!\" has already been consumed.\nfunc (z *Tokenizer) readDoctype() bool {\n\tconst s = \"DOCTYPE\"\n\tfor i := 0; i < len(s); i++ {\n\t\tc := z.readByte()\n\t\tif z.err != nil {\n\t\t\tz.data.end = z.raw.end\n\t\t\treturn false\n\t\t}\n\t\tif c != s[i] && c != s[i]+('a'-'A') {\n\t\t\t// Back up to read the fragment of \"DOCTYPE\" again.\n\t\t\tz.raw.end = z.data.start\n\t\t\treturn false\n\t\t}\n\t}\n\tif z.skipWhiteSpace(); z.err != nil {\n\t\tz.data.start = z.raw.end\n\t\tz.data.end = z.raw.end\n\t\treturn true\n\t}\n\tz.readUntilCloseAngle()\n\treturn true\n}\n\n// readCDATA attempts to read a CDATA section and returns true if\n// successful. The opening \"<!\" has already been consumed.\nfunc (z *Tokenizer) readCDATA() bool {\n\tconst s = \"[CDATA[\"\n\tfor i := 0; i < len(s); i++ {\n\t\tc := z.readByte()\n\t\tif z.err != nil {\n\t\t\tz.data.end = z.raw.end\n\t\t\treturn false\n\t\t}\n\t\tif c != s[i] {\n\t\t\t// Back up to read the fragment of \"[CDATA[\" again.\n\t\t\tz.raw.end = z.data.start\n\t\t\treturn false\n\t\t}\n\t}\n\tz.data.start = z.raw.end\n\tbrackets := 0\n\tfor {\n\t\tc := z.readByte()\n\t\tif z.err != nil {\n\t\t\tz.data.end = z.raw.end\n\t\t\treturn true\n\t\t}\n\t\tswitch c {\n\t\tcase ']':\n\t\t\tbrackets++\n\t\tcase '>':\n\t\t\tif brackets >= 2 {\n\t\t\t\tz.data.end = z.raw.end - len(\"]]>\")\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tbrackets = 0\n\t\tdefault:\n\t\t\tbrackets = 0\n\t\t}\n\t}\n}\n\n// startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end]\n// case-insensitively matches any element of ss.\nfunc (z *Tokenizer) startTagIn(ss ...string) bool {\nloop:\n\tfor _, s := range ss {\n\t\tif z.data.end-z.data.start != len(s) {\n\t\t\tcontinue loop\n\t\t}\n\t\tfor i := 0; i < len(s); i++ {\n\t\t\tc := z.buf[z.data.start+i]\n\t\t\tif 'A' <= c && c <= 'Z' {\n\t\t\t\tc += 'a' - 'A'\n\t\t\t}\n\t\t\tif c != s[i] {\n\t\t\t\tcontinue loop\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}\n\n// readStartTag reads the next start tag token. The opening \"<a\" has already\n// been consumed, where 'a' means anything in [A-Za-z].\nfunc (z *Tokenizer) readStartTag() TokenType {\n\tz.readTag(true)\n\tif z.err != nil {\n\t\treturn ErrorToken\n\t}\n\t// Several tags flag the tokenizer's next token as raw.\n\tc, raw := z.buf[z.data.start], false\n\tif 'A' <= c && c <= 'Z' {\n\t\tc += 'a' - 'A'\n\t}\n\tswitch c {\n\tcase 'i':\n\t\traw = z.startTagIn(\"iframe\")\n\tcase 'n':\n\t\traw = z.startTagIn(\"noembed\", \"noframes\", \"noscript\")\n\tcase 'p':\n\t\traw = z.startTagIn(\"plaintext\")\n\tcase 's':\n\t\traw = z.startTagIn(\"script\", \"style\")\n\tcase 't':\n\t\traw = z.startTagIn(\"textarea\", \"title\")\n\tcase 'x':\n\t\traw = z.startTagIn(\"xmp\")\n\t}\n\tif raw {\n\t\tz.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end]))\n\t}\n\t// Look for a self-closing token like \"<br/>\".\n\tif z.err == nil && z.buf[z.raw.end-2] == '/' {\n\t\treturn SelfClosingTagToken\n\t}\n\treturn StartTagToken\n}\n\n// readTag reads the next tag token and its attributes. If saveAttr, those\n// attributes are saved in z.attr, otherwise z.attr is set to an empty slice.\n// The opening \"<a\" or \"</a\" has already been consumed, where 'a' means anything\n// in [A-Za-z].\nfunc (z *Tokenizer) readTag(saveAttr bool) {\n\tz.attr = z.attr[:0]\n\tz.nAttrReturned = 0\n\t// Read the tag name and attribute key/value pairs.\n\tz.readTagName()\n\tif z.skipWhiteSpace(); z.err != nil {\n\t\treturn\n\t}\n\tfor {\n\t\tc := z.readByte()\n\t\tif z.err != nil || c == '>' {\n\t\t\tbreak\n\t\t}\n\t\tz.raw.end--\n\t\tz.readTagAttrKey()\n\t\tz.readTagAttrVal()\n\t\t// Save pendingAttr if saveAttr and that attribute has a non-empty key.\n\t\tif saveAttr && z.pendingAttr[0].start != z.pendingAttr[0].end {\n\t\t\tz.attr = append(z.attr, z.pendingAttr)\n\t\t}\n\t\tif z.skipWhiteSpace(); z.err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\n// readTagName sets z.data to the \"div\" in \"<div k=v>\". The reader (z.raw.end)\n// is positioned such that the first byte of the tag name (the \"d\" in \"<div\")\n// has already been consumed.\nfunc (z *Tokenizer) readTagName() {\n\tz.data.start = z.raw.end - 1\n\tfor {\n\t\tc := z.readByte()\n\t\tif z.err != nil {\n\t\t\tz.data.end = z.raw.end\n\t\t\treturn\n\t\t}\n\t\tswitch c {\n\t\tcase ' ', '\\n', '\\r', '\\t', '\\f':\n\t\t\tz.data.end = z.raw.end - 1\n\t\t\treturn\n\t\tcase '/', '>':\n\t\t\tz.raw.end--\n\t\t\tz.data.end = z.raw.end\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// readTagAttrKey sets z.pendingAttr[0] to the \"k\" in \"<div k=v>\".\n// Precondition: z.err == nil.\nfunc (z *Tokenizer) readTagAttrKey() {\n\tz.pendingAttr[0].start = z.raw.end\n\tfor {\n\t\tc := z.readByte()\n\t\tif z.err != nil {\n\t\t\tz.pendingAttr[0].end = z.raw.end\n\t\t\treturn\n\t\t}\n\t\tswitch c {\n\t\tcase ' ', '\\n', '\\r', '\\t', '\\f', '/':\n\t\t\tz.pendingAttr[0].end = z.raw.end - 1\n\t\t\treturn\n\t\tcase '=', '>':\n\t\t\tz.raw.end--\n\t\t\tz.pendingAttr[0].end = z.raw.end\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// readTagAttrVal sets z.pendingAttr[1] to the \"v\" in \"<div k=v>\".\nfunc (z *Tokenizer) readTagAttrVal() {\n\tz.pendingAttr[1].start = z.raw.end\n\tz.pendingAttr[1].end = z.raw.end\n\tif z.skipWhiteSpace(); z.err != nil {\n\t\treturn\n\t}\n\tc := z.readByte()\n\tif z.err != nil {\n\t\treturn\n\t}\n\tif c != '=' {\n\t\tz.raw.end--\n\t\treturn\n\t}\n\tif z.skipWhiteSpace(); z.err != nil {\n\t\treturn\n\t}\n\tquote := z.readByte()\n\tif z.err != nil {\n\t\treturn\n\t}\n\tswitch quote {\n\tcase '>':\n\t\tz.raw.end--\n\t\treturn\n\n\tcase '\\'', '\"':\n\t\tz.pendingAttr[1].start = z.raw.end\n\t\tfor {\n\t\t\tc := z.readByte()\n\t\t\tif z.err != nil {\n\t\t\t\tz.pendingAttr[1].end = z.raw.end\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif c == quote {\n\t\t\t\tz.pendingAttr[1].end = z.raw.end - 1\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\tdefault:\n\t\tz.pendingAttr[1].start = z.raw.end - 1\n\t\tfor {\n\t\t\tc := z.readByte()\n\t\t\tif z.err != nil {\n\t\t\t\tz.pendingAttr[1].end = z.raw.end\n\t\t\t\treturn\n\t\t\t}\n\t\t\tswitch c {\n\t\t\tcase ' ', '\\n', '\\r', '\\t', '\\f':\n\t\t\t\tz.pendingAttr[1].end = z.raw.end - 1\n\t\t\t\treturn\n\t\t\tcase '>':\n\t\t\t\tz.raw.end--\n\t\t\t\tz.pendingAttr[1].end = z.raw.end\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Next scans the next token and returns its type.\nfunc (z *Tokenizer) Next() TokenType {\n\tz.raw.start = z.raw.end\n\tz.data.start = z.raw.end\n\tz.data.end = z.raw.end\n\tif z.err != nil {\n\t\tz.tt = ErrorToken\n\t\treturn z.tt\n\t}\n\tif z.rawTag != \"\" {\n\t\tif z.rawTag == \"plaintext\" {\n\t\t\t// Read everything up to EOF.\n\t\t\tfor z.err == nil {\n\t\t\t\tz.readByte()\n\t\t\t}\n\t\t\tz.data.end = z.raw.end\n\t\t\tz.textIsRaw = true\n\t\t} else {\n\t\t\tz.readRawOrRCDATA()\n\t\t}\n\t\tif z.data.end > z.data.start {\n\t\t\tz.tt = TextToken\n\t\t\tz.convertNUL = true\n\t\t\treturn z.tt\n\t\t}\n\t}\n\tz.textIsRaw = false\n\tz.convertNUL = false\n\nloop:\n\tfor {\n\t\tc := z.readByte()\n\t\tif z.err != nil {\n\t\t\tbreak loop\n\t\t}\n\t\tif c != '<' {\n\t\t\tcontinue loop\n\t\t}\n\n\t\t// Check if the '<' we have just read is part of a tag, comment\n\t\t// or doctype. If not, it's part of the accumulated text token.\n\t\tc = z.readByte()\n\t\tif z.err != nil {\n\t\t\tbreak loop\n\t\t}\n\t\tvar tokenType TokenType\n\t\tswitch {\n\t\tcase 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':\n\t\t\ttokenType = StartTagToken\n\t\tcase c == '/':\n\t\t\ttokenType = EndTagToken\n\t\tcase c == '!' || c == '?':\n\t\t\t// We use CommentToken to mean any of \"<!--actual comments-->\",\n\t\t\t// \"<!DOCTYPE declarations>\" and \"<?xml processing instructions?>\".\n\t\t\ttokenType = CommentToken\n\t\tdefault:\n\t\t\t// Reconsume the current character.\n\t\t\tz.raw.end--\n\t\t\tcontinue\n\t\t}\n\n\t\t// We have a non-text token, but we might have accumulated some text\n\t\t// before that. If so, we return the text first, and return the non-\n\t\t// text token on the subsequent call to Next.\n\t\tif x := z.raw.end - len(\"<a\"); z.raw.start < x {\n\t\t\tz.raw.end = x\n\t\t\tz.data.end = x\n\t\t\tz.tt = TextToken\n\t\t\treturn z.tt\n\t\t}\n\t\tswitch tokenType {\n\t\tcase StartTagToken:\n\t\t\tz.tt = z.readStartTag()\n\t\t\treturn z.tt\n\t\tcase EndTagToken:\n\t\t\tc = z.readByte()\n\t\t\tif z.err != nil {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\tif c == '>' {\n\t\t\t\t// \"</>\" does not generate a token at all. Generate an empty comment\n\t\t\t\t// to allow passthrough clients to pick up the data using Raw.\n\t\t\t\t// Reset the tokenizer state and start again.\n\t\t\t\tz.tt = CommentToken\n\t\t\t\treturn z.tt\n\t\t\t}\n\t\t\tif 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {\n\t\t\t\tz.readTag(false)\n\t\t\t\tif z.err != nil {\n\t\t\t\t\tz.tt = ErrorToken\n\t\t\t\t} else {\n\t\t\t\t\tz.tt = EndTagToken\n\t\t\t\t}\n\t\t\t\treturn z.tt\n\t\t\t}\n\t\t\tz.raw.end--\n\t\t\tz.readUntilCloseAngle()\n\t\t\tz.tt = CommentToken\n\t\t\treturn z.tt\n\t\tcase CommentToken:\n\t\t\tif c == '!' {\n\t\t\t\tz.tt = z.readMarkupDeclaration()\n\t\t\t\treturn z.tt\n\t\t\t}\n\t\t\tz.raw.end--\n\t\t\tz.readUntilCloseAngle()\n\t\t\tz.tt = CommentToken\n\t\t\treturn z.tt\n\t\t}\n\t}\n\tif z.raw.start < z.raw.end {\n\t\tz.data.end = z.raw.end\n\t\tz.tt = TextToken\n\t\treturn z.tt\n\t}\n\tz.tt = ErrorToken\n\treturn z.tt\n}\n\n// Raw returns the unmodified text of the current token. Calling Next, Token,\n// Text, TagName or TagAttr may change the contents of the returned slice.\nfunc (z *Tokenizer) Raw() []byte {\n\treturn z.buf[z.raw.start:z.raw.end]\n}\n\n// convertNewlines converts \"\\r\" and \"\\r\\n\" in s to \"\\n\".\n// The conversion happens in place, but the resulting slice may be shorter.\nfunc convertNewlines(s []byte) []byte {\n\tfor i, c := range s {\n\t\tif c != '\\r' {\n\t\t\tcontinue\n\t\t}\n\n\t\tsrc := i + 1\n\t\tif src >= len(s) || s[src] != '\\n' {\n\t\t\ts[i] = '\\n'\n\t\t\tcontinue\n\t\t}\n\n\t\tdst := i\n\t\tfor src < len(s) {\n\t\t\tif s[src] == '\\r' {\n\t\t\t\tif src+1 < len(s) && s[src+1] == '\\n' {\n\t\t\t\t\tsrc++\n\t\t\t\t}\n\t\t\t\ts[dst] = '\\n'\n\t\t\t} else {\n\t\t\t\ts[dst] = s[src]\n\t\t\t}\n\t\t\tsrc++\n\t\t\tdst++\n\t\t}\n\t\treturn s[:dst]\n\t}\n\treturn s\n}\n\nvar (\n\tnul         = []byte(\"\\x00\")\n\treplacement = []byte(\"\\ufffd\")\n)\n\n// Text returns the unescaped text of a text, comment or doctype token. The\n// contents of the returned slice may change on the next call to Next.\nfunc (z *Tokenizer) Text() []byte {\n\tswitch z.tt {\n\tcase TextToken, CommentToken, DoctypeToken:\n\t\ts := z.buf[z.data.start:z.data.end]\n\t\tz.data.start = z.raw.end\n\t\tz.data.end = z.raw.end\n\t\ts = convertNewlines(s)\n\t\tif (z.convertNUL || z.tt == CommentToken) && bytes.Contains(s, nul) {\n\t\t\ts = bytes.Replace(s, nul, replacement, -1)\n\t\t}\n\t\tif !z.textIsRaw {\n\t\t\ts = unescape(s, false)\n\t\t}\n\t\treturn s\n\t}\n\treturn nil\n}\n\n// TagName returns the lower-cased name of a tag token (the `img` out of\n// `<IMG SRC=\"foo\">`) and whether the tag has attributes.\n// The contents of the returned slice may change on the next call to Next.\nfunc (z *Tokenizer) TagName() (name []byte, hasAttr bool) {\n\tif z.data.start < z.data.end {\n\t\tswitch z.tt {\n\t\tcase StartTagToken, EndTagToken, SelfClosingTagToken:\n\t\t\ts := z.buf[z.data.start:z.data.end]\n\t\t\tz.data.start = z.raw.end\n\t\t\tz.data.end = z.raw.end\n\t\t\treturn lower(s), z.nAttrReturned < len(z.attr)\n\t\t}\n\t}\n\treturn nil, false\n}\n\n// TagAttr returns the lower-cased key and unescaped value of the next unparsed\n// attribute for the current tag token and whether there are more attributes.\n// The contents of the returned slices may change on the next call to Next.\nfunc (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) {\n\tif z.nAttrReturned < len(z.attr) {\n\t\tswitch z.tt {\n\t\tcase StartTagToken, SelfClosingTagToken:\n\t\t\tx := z.attr[z.nAttrReturned]\n\t\t\tz.nAttrReturned++\n\t\t\tkey = z.buf[x[0].start:x[0].end]\n\t\t\tval = z.buf[x[1].start:x[1].end]\n\t\t\treturn lower(key), unescape(convertNewlines(val), true), z.nAttrReturned < len(z.attr)\n\t\t}\n\t}\n\treturn nil, nil, false\n}\n\n// Token returns the next Token. The result's Data and Attr values remain valid\n// after subsequent Next calls.\nfunc (z *Tokenizer) Token() Token {\n\tt := Token{Type: z.tt}\n\tswitch z.tt {\n\tcase TextToken, CommentToken, DoctypeToken:\n\t\tt.Data = string(z.Text())\n\tcase StartTagToken, SelfClosingTagToken, EndTagToken:\n\t\tname, moreAttr := z.TagName()\n\t\tfor moreAttr {\n\t\t\tvar key, val []byte\n\t\t\tkey, val, moreAttr = z.TagAttr()\n\t\t\tt.Attr = append(t.Attr, Attribute{\"\", atom.String(key), string(val)})\n\t\t}\n\t\tif a := atom.Lookup(name); a != 0 {\n\t\t\tt.DataAtom, t.Data = a, a.String()\n\t\t} else {\n\t\t\tt.DataAtom, t.Data = 0, string(name)\n\t\t}\n\t}\n\treturn t\n}\n\n// SetMaxBuf sets a limit on the amount of data buffered during tokenization.\n// A value of 0 means unlimited.\nfunc (z *Tokenizer) SetMaxBuf(n int) {\n\tz.maxBuf = n\n}\n\n// NewTokenizer returns a new HTML Tokenizer for the given Reader.\n// The input is assumed to be UTF-8 encoded.\nfunc NewTokenizer(r io.Reader) *Tokenizer {\n\treturn NewTokenizerFragment(r, \"\")\n}\n\n// NewTokenizerFragment returns a new HTML Tokenizer for the given Reader, for\n// tokenizing an existing element's InnerHTML fragment. contextTag is that\n// element's tag, such as \"div\" or \"iframe\".\n//\n// For example, how the InnerHTML \"a<b\" is tokenized depends on whether it is\n// for a <p> tag or a <script> tag.\n//\n// The input is assumed to be UTF-8 encoded.\nfunc NewTokenizerFragment(r io.Reader, contextTag string) *Tokenizer {\n\tz := &Tokenizer{\n\t\tr:   r,\n\t\tbuf: make([]byte, 0, 4096),\n\t}\n\tif contextTag != \"\" {\n\t\tswitch s := strings.ToLower(contextTag); s {\n\t\tcase \"iframe\", \"noembed\", \"noframes\", \"noscript\", \"plaintext\", \"script\", \"style\", \"title\", \"textarea\", \"xmp\":\n\t\t\tz.rawTag = s\n\t\t}\n\t}\n\treturn z\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/html/token_test.go",
    "content": "// Copyright 2010 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage html\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n)\n\ntype tokenTest struct {\n\t// A short description of the test case.\n\tdesc string\n\t// The HTML to parse.\n\thtml string\n\t// The string representations of the expected tokens, joined by '$'.\n\tgolden string\n}\n\nvar tokenTests = []tokenTest{\n\t{\n\t\t\"empty\",\n\t\t\"\",\n\t\t\"\",\n\t},\n\t// A single text node. The tokenizer should not break text nodes on whitespace,\n\t// nor should it normalize whitespace within a text node.\n\t{\n\t\t\"text\",\n\t\t\"foo  bar\",\n\t\t\"foo  bar\",\n\t},\n\t// An entity.\n\t{\n\t\t\"entity\",\n\t\t\"one &lt; two\",\n\t\t\"one &lt; two\",\n\t},\n\t// A start, self-closing and end tag. The tokenizer does not care if the start\n\t// and end tokens don't match; that is the job of the parser.\n\t{\n\t\t\"tags\",\n\t\t\"<a>b<c/>d</e>\",\n\t\t\"<a>$b$<c/>$d$</e>\",\n\t},\n\t// Angle brackets that aren't a tag.\n\t{\n\t\t\"not a tag #0\",\n\t\t\"<\",\n\t\t\"&lt;\",\n\t},\n\t{\n\t\t\"not a tag #1\",\n\t\t\"</\",\n\t\t\"&lt;/\",\n\t},\n\t{\n\t\t\"not a tag #2\",\n\t\t\"</>\",\n\t\t\"<!---->\",\n\t},\n\t{\n\t\t\"not a tag #3\",\n\t\t\"a</>b\",\n\t\t\"a$<!---->$b\",\n\t},\n\t{\n\t\t\"not a tag #4\",\n\t\t\"</ >\",\n\t\t\"<!-- -->\",\n\t},\n\t{\n\t\t\"not a tag #5\",\n\t\t\"</.\",\n\t\t\"<!--.-->\",\n\t},\n\t{\n\t\t\"not a tag #6\",\n\t\t\"</.>\",\n\t\t\"<!--.-->\",\n\t},\n\t{\n\t\t\"not a tag #7\",\n\t\t\"a < b\",\n\t\t\"a &lt; b\",\n\t},\n\t{\n\t\t\"not a tag #8\",\n\t\t\"<.>\",\n\t\t\"&lt;.&gt;\",\n\t},\n\t{\n\t\t\"not a tag #9\",\n\t\t\"a<<<b>>>c\",\n\t\t\"a&lt;&lt;$<b>$&gt;&gt;c\",\n\t},\n\t{\n\t\t\"not a tag #10\",\n\t\t\"if x<0 and y < 0 then x*y>0\",\n\t\t\"if x&lt;0 and y &lt; 0 then x*y&gt;0\",\n\t},\n\t{\n\t\t\"not a tag #11\",\n\t\t\"<<p>\",\n\t\t\"&lt;$<p>\",\n\t},\n\t// EOF in a tag name.\n\t{\n\t\t\"tag name eof #0\",\n\t\t\"<a\",\n\t\t\"\",\n\t},\n\t{\n\t\t\"tag name eof #1\",\n\t\t\"<a \",\n\t\t\"\",\n\t},\n\t{\n\t\t\"tag name eof #2\",\n\t\t\"a<b\",\n\t\t\"a\",\n\t},\n\t{\n\t\t\"tag name eof #3\",\n\t\t\"<a><b\",\n\t\t\"<a>\",\n\t},\n\t{\n\t\t\"tag name eof #4\",\n\t\t`<a x`,\n\t\t``,\n\t},\n\t// Some malformed tags that are missing a '>'.\n\t{\n\t\t\"malformed tag #0\",\n\t\t`<p</p>`,\n\t\t`<p< p=\"\">`,\n\t},\n\t{\n\t\t\"malformed tag #1\",\n\t\t`<p </p>`,\n\t\t`<p <=\"\" p=\"\">`,\n\t},\n\t{\n\t\t\"malformed tag #2\",\n\t\t`<p id`,\n\t\t``,\n\t},\n\t{\n\t\t\"malformed tag #3\",\n\t\t`<p id=`,\n\t\t``,\n\t},\n\t{\n\t\t\"malformed tag #4\",\n\t\t`<p id=>`,\n\t\t`<p id=\"\">`,\n\t},\n\t{\n\t\t\"malformed tag #5\",\n\t\t`<p id=0`,\n\t\t``,\n\t},\n\t{\n\t\t\"malformed tag #6\",\n\t\t`<p id=0</p>`,\n\t\t`<p id=\"0&lt;/p\">`,\n\t},\n\t{\n\t\t\"malformed tag #7\",\n\t\t`<p id=\"0</p>`,\n\t\t``,\n\t},\n\t{\n\t\t\"malformed tag #8\",\n\t\t`<p id=\"0\"</p>`,\n\t\t`<p id=\"0\" <=\"\" p=\"\">`,\n\t},\n\t{\n\t\t\"malformed tag #9\",\n\t\t`<p></p id`,\n\t\t`<p>`,\n\t},\n\t// Raw text and RCDATA.\n\t{\n\t\t\"basic raw text\",\n\t\t\"<script><a></b></script>\",\n\t\t\"<script>$&lt;a&gt;&lt;/b&gt;$</script>\",\n\t},\n\t{\n\t\t\"unfinished script end tag\",\n\t\t\"<SCRIPT>a</SCR\",\n\t\t\"<script>$a&lt;/SCR\",\n\t},\n\t{\n\t\t\"broken script end tag\",\n\t\t\"<SCRIPT>a</SCR ipt>\",\n\t\t\"<script>$a&lt;/SCR ipt&gt;\",\n\t},\n\t{\n\t\t\"EOF in script end tag\",\n\t\t\"<SCRIPT>a</SCRipt\",\n\t\t\"<script>$a&lt;/SCRipt\",\n\t},\n\t{\n\t\t\"scriptx end tag\",\n\t\t\"<SCRIPT>a</SCRiptx\",\n\t\t\"<script>$a&lt;/SCRiptx\",\n\t},\n\t{\n\t\t\"' ' completes script end tag\",\n\t\t\"<SCRIPT>a</SCRipt \",\n\t\t\"<script>$a\",\n\t},\n\t{\n\t\t\"'>' completes script end tag\",\n\t\t\"<SCRIPT>a</SCRipt>\",\n\t\t\"<script>$a$</script>\",\n\t},\n\t{\n\t\t\"self-closing script end tag\",\n\t\t\"<SCRIPT>a</SCRipt/>\",\n\t\t\"<script>$a$</script>\",\n\t},\n\t{\n\t\t\"nested script tag\",\n\t\t\"<SCRIPT>a</SCRipt<script>\",\n\t\t\"<script>$a&lt;/SCRipt&lt;script&gt;\",\n\t},\n\t{\n\t\t\"script end tag after unfinished\",\n\t\t\"<SCRIPT>a</SCRipt</script>\",\n\t\t\"<script>$a&lt;/SCRipt$</script>\",\n\t},\n\t{\n\t\t\"script/style mismatched tags\",\n\t\t\"<script>a</style>\",\n\t\t\"<script>$a&lt;/style&gt;\",\n\t},\n\t{\n\t\t\"style element with entity\",\n\t\t\"<style>&apos;\",\n\t\t\"<style>$&amp;apos;\",\n\t},\n\t{\n\t\t\"textarea with tag\",\n\t\t\"<textarea><div></textarea>\",\n\t\t\"<textarea>$&lt;div&gt;$</textarea>\",\n\t},\n\t{\n\t\t\"title with tag and entity\",\n\t\t\"<title><b>K&amp;R C</b></title>\",\n\t\t\"<title>$&lt;b&gt;K&amp;R C&lt;/b&gt;$</title>\",\n\t},\n\t// DOCTYPE tests.\n\t{\n\t\t\"Proper DOCTYPE\",\n\t\t\"<!DOCTYPE html>\",\n\t\t\"<!DOCTYPE html>\",\n\t},\n\t{\n\t\t\"DOCTYPE with no space\",\n\t\t\"<!doctypehtml>\",\n\t\t\"<!DOCTYPE html>\",\n\t},\n\t{\n\t\t\"DOCTYPE with two spaces\",\n\t\t\"<!doctype  html>\",\n\t\t\"<!DOCTYPE html>\",\n\t},\n\t{\n\t\t\"looks like DOCTYPE but isn't\",\n\t\t\"<!DOCUMENT html>\",\n\t\t\"<!--DOCUMENT html-->\",\n\t},\n\t{\n\t\t\"DOCTYPE at EOF\",\n\t\t\"<!DOCtype\",\n\t\t\"<!DOCTYPE >\",\n\t},\n\t// XML processing instructions.\n\t{\n\t\t\"XML processing instruction\",\n\t\t\"<?xml?>\",\n\t\t\"<!--?xml?-->\",\n\t},\n\t// Comments.\n\t{\n\t\t\"comment0\",\n\t\t\"abc<b><!-- skipme --></b>def\",\n\t\t\"abc$<b>$<!-- skipme -->$</b>$def\",\n\t},\n\t{\n\t\t\"comment1\",\n\t\t\"a<!-->z\",\n\t\t\"a$<!---->$z\",\n\t},\n\t{\n\t\t\"comment2\",\n\t\t\"a<!--->z\",\n\t\t\"a$<!---->$z\",\n\t},\n\t{\n\t\t\"comment3\",\n\t\t\"a<!--x>-->z\",\n\t\t\"a$<!--x>-->$z\",\n\t},\n\t{\n\t\t\"comment4\",\n\t\t\"a<!--x->-->z\",\n\t\t\"a$<!--x->-->$z\",\n\t},\n\t{\n\t\t\"comment5\",\n\t\t\"a<!>z\",\n\t\t\"a$<!---->$z\",\n\t},\n\t{\n\t\t\"comment6\",\n\t\t\"a<!->z\",\n\t\t\"a$<!----->$z\",\n\t},\n\t{\n\t\t\"comment7\",\n\t\t\"a<!---<>z\",\n\t\t\"a$<!---<>z-->\",\n\t},\n\t{\n\t\t\"comment8\",\n\t\t\"a<!--z\",\n\t\t\"a$<!--z-->\",\n\t},\n\t{\n\t\t\"comment9\",\n\t\t\"a<!--z-\",\n\t\t\"a$<!--z-->\",\n\t},\n\t{\n\t\t\"comment10\",\n\t\t\"a<!--z--\",\n\t\t\"a$<!--z-->\",\n\t},\n\t{\n\t\t\"comment11\",\n\t\t\"a<!--z---\",\n\t\t\"a$<!--z--->\",\n\t},\n\t{\n\t\t\"comment12\",\n\t\t\"a<!--z----\",\n\t\t\"a$<!--z---->\",\n\t},\n\t{\n\t\t\"comment13\",\n\t\t\"a<!--x--!>z\",\n\t\t\"a$<!--x-->$z\",\n\t},\n\t// An attribute with a backslash.\n\t{\n\t\t\"backslash\",\n\t\t`<p id=\"a\\\"b\">`,\n\t\t`<p id=\"a\\\" b\"=\"\">`,\n\t},\n\t// Entities, tag name and attribute key lower-casing, and whitespace\n\t// normalization within a tag.\n\t{\n\t\t\"tricky\",\n\t\t\"<p \\t\\n iD=\\\"a&quot;B\\\"  foo=\\\"bar\\\"><EM>te&lt;&amp;;xt</em></p>\",\n\t\t`<p id=\"a&#34;B\" foo=\"bar\">$<em>$te&lt;&amp;;xt$</em>$</p>`,\n\t},\n\t// A nonexistent entity. Tokenizing and converting back to a string should\n\t// escape the \"&\" to become \"&amp;\".\n\t{\n\t\t\"noSuchEntity\",\n\t\t`<a b=\"c&noSuchEntity;d\">&lt;&alsoDoesntExist;&`,\n\t\t`<a b=\"c&amp;noSuchEntity;d\">$&lt;&amp;alsoDoesntExist;&amp;`,\n\t},\n\t{\n\t\t\"entity without semicolon\",\n\t\t`&notit;&notin;<a b=\"q=z&amp=5&notice=hello&not;=world\">`,\n\t\t`¬it;∉$<a b=\"q=z&amp;amp=5&amp;notice=hello¬=world\">`,\n\t},\n\t{\n\t\t\"entity with digits\",\n\t\t\"&frac12;\",\n\t\t\"½\",\n\t},\n\t// Attribute tests:\n\t// http://dev.w3.org/html5/pf-summary/Overview.html#attributes\n\t{\n\t\t\"Empty attribute\",\n\t\t`<input disabled FOO>`,\n\t\t`<input disabled=\"\" foo=\"\">`,\n\t},\n\t{\n\t\t\"Empty attribute, whitespace\",\n\t\t`<input disabled FOO >`,\n\t\t`<input disabled=\"\" foo=\"\">`,\n\t},\n\t{\n\t\t\"Unquoted attribute value\",\n\t\t`<input value=yes FOO=BAR>`,\n\t\t`<input value=\"yes\" foo=\"BAR\">`,\n\t},\n\t{\n\t\t\"Unquoted attribute value, spaces\",\n\t\t`<input value = yes FOO = BAR>`,\n\t\t`<input value=\"yes\" foo=\"BAR\">`,\n\t},\n\t{\n\t\t\"Unquoted attribute value, trailing space\",\n\t\t`<input value=yes FOO=BAR >`,\n\t\t`<input value=\"yes\" foo=\"BAR\">`,\n\t},\n\t{\n\t\t\"Single-quoted attribute value\",\n\t\t`<input value='yes' FOO='BAR'>`,\n\t\t`<input value=\"yes\" foo=\"BAR\">`,\n\t},\n\t{\n\t\t\"Single-quoted attribute value, trailing space\",\n\t\t`<input value='yes' FOO='BAR' >`,\n\t\t`<input value=\"yes\" foo=\"BAR\">`,\n\t},\n\t{\n\t\t\"Double-quoted attribute value\",\n\t\t`<input value=\"I'm an attribute\" FOO=\"BAR\">`,\n\t\t`<input value=\"I&#39;m an attribute\" foo=\"BAR\">`,\n\t},\n\t{\n\t\t\"Attribute name characters\",\n\t\t`<meta http-equiv=\"content-type\">`,\n\t\t`<meta http-equiv=\"content-type\">`,\n\t},\n\t{\n\t\t\"Mixed attributes\",\n\t\t`a<P V=\"0 1\" w='2' X=3 y>z`,\n\t\t`a$<p v=\"0 1\" w=\"2\" x=\"3\" y=\"\">$z`,\n\t},\n\t{\n\t\t\"Attributes with a solitary single quote\",\n\t\t`<p id=can't><p id=won't>`,\n\t\t`<p id=\"can&#39;t\">$<p id=\"won&#39;t\">`,\n\t},\n}\n\nfunc TestTokenizer(t *testing.T) {\nloop:\n\tfor _, tt := range tokenTests {\n\t\tz := NewTokenizer(strings.NewReader(tt.html))\n\t\tif tt.golden != \"\" {\n\t\t\tfor i, s := range strings.Split(tt.golden, \"$\") {\n\t\t\t\tif z.Next() == ErrorToken {\n\t\t\t\t\tt.Errorf(\"%s token %d: want %q got error %v\", tt.desc, i, s, z.Err())\n\t\t\t\t\tcontinue loop\n\t\t\t\t}\n\t\t\t\tactual := z.Token().String()\n\t\t\t\tif s != actual {\n\t\t\t\t\tt.Errorf(\"%s token %d: want %q got %q\", tt.desc, i, s, actual)\n\t\t\t\t\tcontinue loop\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tz.Next()\n\t\tif z.Err() != io.EOF {\n\t\t\tt.Errorf(\"%s: want EOF got %q\", tt.desc, z.Err())\n\t\t}\n\t}\n}\n\nfunc TestMaxBuffer(t *testing.T) {\n\t// Exceeding the maximum buffer size generates ErrBufferExceeded.\n\tz := NewTokenizer(strings.NewReader(\"<\" + strings.Repeat(\"t\", 10)))\n\tz.SetMaxBuf(5)\n\ttt := z.Next()\n\tif got, want := tt, ErrorToken; got != want {\n\t\tt.Fatalf(\"token type: got: %v want: %v\", got, want)\n\t}\n\tif got, want := z.Err(), ErrBufferExceeded; got != want {\n\t\tt.Errorf(\"error type: got: %v want: %v\", got, want)\n\t}\n\tif got, want := string(z.Raw()), \"<tttt\"; got != want {\n\t\tt.Fatalf(\"buffered before overflow: got: %q want: %q\", got, want)\n\t}\n}\n\nfunc TestMaxBufferReconstruction(t *testing.T) {\n\t// Exceeding the maximum buffer size at any point while tokenizing permits\n\t// reconstructing the original input.\ntests:\n\tfor _, test := range tokenTests {\n\t\tfor maxBuf := 1; ; maxBuf++ {\n\t\t\tr := strings.NewReader(test.html)\n\t\t\tz := NewTokenizer(r)\n\t\t\tz.SetMaxBuf(maxBuf)\n\t\t\tvar tokenized bytes.Buffer\n\t\t\tfor {\n\t\t\t\ttt := z.Next()\n\t\t\t\ttokenized.Write(z.Raw())\n\t\t\t\tif tt == ErrorToken {\n\t\t\t\t\tif err := z.Err(); err != io.EOF && err != ErrBufferExceeded {\n\t\t\t\t\t\tt.Errorf(\"%s: unexpected error: %v\", test.desc, err)\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Anything tokenized along with untokenized input or data left in the reader.\n\t\t\tassembled, err := ioutil.ReadAll(io.MultiReader(&tokenized, bytes.NewReader(z.Buffered()), r))\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"%s: ReadAll: %v\", test.desc, err)\n\t\t\t\tcontinue tests\n\t\t\t}\n\t\t\tif got, want := string(assembled), test.html; got != want {\n\t\t\t\tt.Errorf(\"%s: reassembled html:\\n got: %q\\nwant: %q\", test.desc, got, want)\n\t\t\t\tcontinue tests\n\t\t\t}\n\t\t\t// EOF indicates that we completed tokenization and hence found the max\n\t\t\t// maxBuf that generates ErrBufferExceeded, so continue to the next test.\n\t\t\tif z.Err() == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t} // buffer sizes\n\t} // tests\n}\n\nfunc TestPassthrough(t *testing.T) {\n\t// Accumulating the raw output for each parse event should reconstruct the\n\t// original input.\n\tfor _, test := range tokenTests {\n\t\tz := NewTokenizer(strings.NewReader(test.html))\n\t\tvar parsed bytes.Buffer\n\t\tfor {\n\t\t\ttt := z.Next()\n\t\t\tparsed.Write(z.Raw())\n\t\t\tif tt == ErrorToken {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif got, want := parsed.String(), test.html; got != want {\n\t\t\tt.Errorf(\"%s: parsed output:\\n got: %q\\nwant: %q\", test.desc, got, want)\n\t\t}\n\t}\n}\n\nfunc TestBufAPI(t *testing.T) {\n\ts := \"0<a>1</a>2<b>3<a>4<a>5</a>6</b>7</a>8<a/>9\"\n\tz := NewTokenizer(bytes.NewBufferString(s))\n\tvar result bytes.Buffer\n\tdepth := 0\nloop:\n\tfor {\n\t\ttt := z.Next()\n\t\tswitch tt {\n\t\tcase ErrorToken:\n\t\t\tif z.Err() != io.EOF {\n\t\t\t\tt.Error(z.Err())\n\t\t\t}\n\t\t\tbreak loop\n\t\tcase TextToken:\n\t\t\tif depth > 0 {\n\t\t\t\tresult.Write(z.Text())\n\t\t\t}\n\t\tcase StartTagToken, EndTagToken:\n\t\t\ttn, _ := z.TagName()\n\t\t\tif len(tn) == 1 && tn[0] == 'a' {\n\t\t\t\tif tt == StartTagToken {\n\t\t\t\t\tdepth++\n\t\t\t\t} else {\n\t\t\t\t\tdepth--\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tu := \"14567\"\n\tv := string(result.Bytes())\n\tif u != v {\n\t\tt.Errorf(\"TestBufAPI: want %q got %q\", u, v)\n\t}\n}\n\nfunc TestConvertNewlines(t *testing.T) {\n\ttestCases := map[string]string{\n\t\t\"Mac\\rDOS\\r\\nUnix\\n\":    \"Mac\\nDOS\\nUnix\\n\",\n\t\t\"Unix\\nMac\\rDOS\\r\\n\":    \"Unix\\nMac\\nDOS\\n\",\n\t\t\"DOS\\r\\nDOS\\r\\nDOS\\r\\n\": \"DOS\\nDOS\\nDOS\\n\",\n\t\t\"\":         \"\",\n\t\t\"\\n\":       \"\\n\",\n\t\t\"\\n\\r\":     \"\\n\\n\",\n\t\t\"\\r\":       \"\\n\",\n\t\t\"\\r\\n\":     \"\\n\",\n\t\t\"\\r\\n\\n\":   \"\\n\\n\",\n\t\t\"\\r\\n\\r\":   \"\\n\\n\",\n\t\t\"\\r\\n\\r\\n\": \"\\n\\n\",\n\t\t\"\\r\\r\":     \"\\n\\n\",\n\t\t\"\\r\\r\\n\":   \"\\n\\n\",\n\t\t\"\\r\\r\\n\\n\": \"\\n\\n\\n\",\n\t\t\"\\r\\r\\r\\n\": \"\\n\\n\\n\",\n\t\t\"\\r \\n\":    \"\\n \\n\",\n\t\t\"xyz\":      \"xyz\",\n\t}\n\tfor in, want := range testCases {\n\t\tif got := string(convertNewlines([]byte(in))); got != want {\n\t\t\tt.Errorf(\"input %q: got %q, want %q\", in, got, want)\n\t\t}\n\t}\n}\n\nfunc TestReaderEdgeCases(t *testing.T) {\n\tconst s = \"<p>An io.Reader can return (0, nil) or (n, io.EOF).</p>\"\n\ttestCases := []io.Reader{\n\t\t&zeroOneByteReader{s: s},\n\t\t&eofStringsReader{s: s},\n\t\t&stuckReader{},\n\t}\n\tfor i, tc := range testCases {\n\t\tgot := []TokenType{}\n\t\tz := NewTokenizer(tc)\n\t\tfor {\n\t\t\ttt := z.Next()\n\t\t\tif tt == ErrorToken {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tgot = append(got, tt)\n\t\t}\n\t\tif err := z.Err(); err != nil && err != io.EOF {\n\t\t\tif err != io.ErrNoProgress {\n\t\t\t\tt.Errorf(\"i=%d: %v\", i, err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\twant := []TokenType{\n\t\t\tStartTagToken,\n\t\t\tTextToken,\n\t\t\tEndTagToken,\n\t\t}\n\t\tif !reflect.DeepEqual(got, want) {\n\t\t\tt.Errorf(\"i=%d: got %v, want %v\", i, got, want)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\n// zeroOneByteReader is like a strings.Reader that alternates between\n// returning 0 bytes and 1 byte at a time.\ntype zeroOneByteReader struct {\n\ts string\n\tn int\n}\n\nfunc (r *zeroOneByteReader) Read(p []byte) (int, error) {\n\tif len(p) == 0 {\n\t\treturn 0, nil\n\t}\n\tif len(r.s) == 0 {\n\t\treturn 0, io.EOF\n\t}\n\tr.n++\n\tif r.n%2 != 0 {\n\t\treturn 0, nil\n\t}\n\tp[0], r.s = r.s[0], r.s[1:]\n\treturn 1, nil\n}\n\n// eofStringsReader is like a strings.Reader but can return an (n, err) where\n// n > 0 && err != nil.\ntype eofStringsReader struct {\n\ts string\n}\n\nfunc (r *eofStringsReader) Read(p []byte) (int, error) {\n\tn := copy(p, r.s)\n\tr.s = r.s[n:]\n\tif r.s != \"\" {\n\t\treturn n, nil\n\t}\n\treturn n, io.EOF\n}\n\n// stuckReader is an io.Reader that always returns no data and no error.\ntype stuckReader struct{}\n\nfunc (*stuckReader) Read(p []byte) (int, error) {\n\treturn 0, nil\n}\n\nconst (\n\trawLevel = iota\n\tlowLevel\n\thighLevel\n)\n\nfunc benchmarkTokenizer(b *testing.B, level int) {\n\tbuf, err := ioutil.ReadFile(\"testdata/go1.html\")\n\tif err != nil {\n\t\tb.Fatalf(\"could not read testdata/go1.html: %v\", err)\n\t}\n\tb.SetBytes(int64(len(buf)))\n\truntime.GC()\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tz := NewTokenizer(bytes.NewBuffer(buf))\n\t\tfor {\n\t\t\ttt := z.Next()\n\t\t\tif tt == ErrorToken {\n\t\t\t\tif err := z.Err(); err != nil && err != io.EOF {\n\t\t\t\t\tb.Fatalf(\"tokenizer error: %v\", err)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tswitch level {\n\t\t\tcase rawLevel:\n\t\t\t\t// Calling z.Raw just returns the raw bytes of the token. It does\n\t\t\t\t// not unescape &lt; to <, or lower-case tag names and attribute keys.\n\t\t\t\tz.Raw()\n\t\t\tcase lowLevel:\n\t\t\t\t// Caling z.Text, z.TagName and z.TagAttr returns []byte values\n\t\t\t\t// whose contents may change on the next call to z.Next.\n\t\t\t\tswitch tt {\n\t\t\t\tcase TextToken, CommentToken, DoctypeToken:\n\t\t\t\t\tz.Text()\n\t\t\t\tcase StartTagToken, SelfClosingTagToken:\n\t\t\t\t\t_, more := z.TagName()\n\t\t\t\t\tfor more {\n\t\t\t\t\t\t_, _, more = z.TagAttr()\n\t\t\t\t\t}\n\t\t\t\tcase EndTagToken:\n\t\t\t\t\tz.TagName()\n\t\t\t\t}\n\t\t\tcase highLevel:\n\t\t\t\t// Calling z.Token converts []byte values to strings whose validity\n\t\t\t\t// extend beyond the next call to z.Next.\n\t\t\t\tz.Token()\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc BenchmarkRawLevelTokenizer(b *testing.B)  { benchmarkTokenizer(b, rawLevel) }\nfunc BenchmarkLowLevelTokenizer(b *testing.B)  { benchmarkTokenizer(b, lowLevel) }\nfunc BenchmarkHighLevelTokenizer(b *testing.B) { benchmarkTokenizer(b, highLevel) }\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/.gitignore",
    "content": "*~\nh2i/h2i\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/Dockerfile",
    "content": "#\n# This Dockerfile builds a recent curl with HTTP/2 client support, using\n# a recent nghttp2 build.\n#\n# See the Makefile for how to tag it. If Docker and that image is found, the\n# Go tests use this curl binary for integration tests.\n#\n\nFROM ubuntu:trusty\n\nRUN apt-get update && \\\n    apt-get upgrade -y && \\\n    apt-get install -y git-core build-essential wget\n\nRUN apt-get install -y --no-install-recommends \\\n       autotools-dev libtool pkg-config zlib1g-dev \\\n       libcunit1-dev libssl-dev libxml2-dev libevent-dev \\\n       automake autoconf\n\n# The list of packages nghttp2 recommends for h2load:\nRUN apt-get install -y --no-install-recommends make binutils \\\n        autoconf automake autotools-dev \\\n        libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \\\n        libev-dev libevent-dev libjansson-dev libjemalloc-dev \\\n        cython python3.4-dev python-setuptools\n\n# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:\nENV NGHTTP2_VER 895da9a\nRUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git\n\nWORKDIR /root/nghttp2\nRUN git reset --hard $NGHTTP2_VER\nRUN autoreconf -i\nRUN automake\nRUN autoconf\nRUN ./configure\nRUN make\nRUN make install\n\nWORKDIR /root\nRUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz\nRUN tar -zxvf curl-7.45.0.tar.gz\nWORKDIR /root/curl-7.45.0\nRUN ./configure --with-ssl --with-nghttp2=/usr/local\nRUN make\nRUN make install\nRUN ldconfig\n\nCMD [\"-h\"]\nENTRYPOINT [\"/usr/local/bin/curl\"]\n\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/Makefile",
    "content": "curlimage:\n\tdocker build -t gohttp2/curl .\n\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/README",
    "content": "This is a work-in-progress HTTP/2 implementation for Go.\n\nIt will eventually live in the Go standard library and won't require\nany changes to your code to use.  It will just be automatic.\n\nStatus:\n\n* The server support is pretty good. A few things are missing\n  but are being worked on.\n* The client work has just started but shares a lot of code\n  is coming along much quicker.\n\nDocs are at https://godoc.org/golang.org/x/net/http2\n\nDemo test server at https://http2.golang.org/\n\nHelp & bug reports welcome!\n\nContributing: https://golang.org/doc/contribute.html\nBugs:         https://golang.org/issue/new?title=x/net/http2:+\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/ciphers.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\n// A list of the possible cipher suite ids. Taken from\n// http://www.iana.org/assignments/tls-parameters/tls-parameters.txt\n\nconst (\n\tcipher_TLS_NULL_WITH_NULL_NULL               uint16 = 0x0000\n\tcipher_TLS_RSA_WITH_NULL_MD5                 uint16 = 0x0001\n\tcipher_TLS_RSA_WITH_NULL_SHA                 uint16 = 0x0002\n\tcipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5        uint16 = 0x0003\n\tcipher_TLS_RSA_WITH_RC4_128_MD5              uint16 = 0x0004\n\tcipher_TLS_RSA_WITH_RC4_128_SHA              uint16 = 0x0005\n\tcipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5    uint16 = 0x0006\n\tcipher_TLS_RSA_WITH_IDEA_CBC_SHA             uint16 = 0x0007\n\tcipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA     uint16 = 0x0008\n\tcipher_TLS_RSA_WITH_DES_CBC_SHA              uint16 = 0x0009\n\tcipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA         uint16 = 0x000A\n\tcipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA  uint16 = 0x000B\n\tcipher_TLS_DH_DSS_WITH_DES_CBC_SHA           uint16 = 0x000C\n\tcipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA      uint16 = 0x000D\n\tcipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA  uint16 = 0x000E\n\tcipher_TLS_DH_RSA_WITH_DES_CBC_SHA           uint16 = 0x000F\n\tcipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA      uint16 = 0x0010\n\tcipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011\n\tcipher_TLS_DHE_DSS_WITH_DES_CBC_SHA          uint16 = 0x0012\n\tcipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA     uint16 = 0x0013\n\tcipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014\n\tcipher_TLS_DHE_RSA_WITH_DES_CBC_SHA          uint16 = 0x0015\n\tcipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA     uint16 = 0x0016\n\tcipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5    uint16 = 0x0017\n\tcipher_TLS_DH_anon_WITH_RC4_128_MD5          uint16 = 0x0018\n\tcipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019\n\tcipher_TLS_DH_anon_WITH_DES_CBC_SHA          uint16 = 0x001A\n\tcipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA     uint16 = 0x001B\n\t// Reserved uint16 =  0x001C-1D\n\tcipher_TLS_KRB5_WITH_DES_CBC_SHA             uint16 = 0x001E\n\tcipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA        uint16 = 0x001F\n\tcipher_TLS_KRB5_WITH_RC4_128_SHA             uint16 = 0x0020\n\tcipher_TLS_KRB5_WITH_IDEA_CBC_SHA            uint16 = 0x0021\n\tcipher_TLS_KRB5_WITH_DES_CBC_MD5             uint16 = 0x0022\n\tcipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5        uint16 = 0x0023\n\tcipher_TLS_KRB5_WITH_RC4_128_MD5             uint16 = 0x0024\n\tcipher_TLS_KRB5_WITH_IDEA_CBC_MD5            uint16 = 0x0025\n\tcipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA   uint16 = 0x0026\n\tcipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA   uint16 = 0x0027\n\tcipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA       uint16 = 0x0028\n\tcipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5   uint16 = 0x0029\n\tcipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5   uint16 = 0x002A\n\tcipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5       uint16 = 0x002B\n\tcipher_TLS_PSK_WITH_NULL_SHA                 uint16 = 0x002C\n\tcipher_TLS_DHE_PSK_WITH_NULL_SHA             uint16 = 0x002D\n\tcipher_TLS_RSA_PSK_WITH_NULL_SHA             uint16 = 0x002E\n\tcipher_TLS_RSA_WITH_AES_128_CBC_SHA          uint16 = 0x002F\n\tcipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA       uint16 = 0x0030\n\tcipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA       uint16 = 0x0031\n\tcipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA      uint16 = 0x0032\n\tcipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA      uint16 = 0x0033\n\tcipher_TLS_DH_anon_WITH_AES_128_CBC_SHA      uint16 = 0x0034\n\tcipher_TLS_RSA_WITH_AES_256_CBC_SHA          uint16 = 0x0035\n\tcipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA       uint16 = 0x0036\n\tcipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA       uint16 = 0x0037\n\tcipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA      uint16 = 0x0038\n\tcipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA      uint16 = 0x0039\n\tcipher_TLS_DH_anon_WITH_AES_256_CBC_SHA      uint16 = 0x003A\n\tcipher_TLS_RSA_WITH_NULL_SHA256              uint16 = 0x003B\n\tcipher_TLS_RSA_WITH_AES_128_CBC_SHA256       uint16 = 0x003C\n\tcipher_TLS_RSA_WITH_AES_256_CBC_SHA256       uint16 = 0x003D\n\tcipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256    uint16 = 0x003E\n\tcipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256    uint16 = 0x003F\n\tcipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256   uint16 = 0x0040\n\tcipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA     uint16 = 0x0041\n\tcipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA  uint16 = 0x0042\n\tcipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA  uint16 = 0x0043\n\tcipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044\n\tcipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045\n\tcipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046\n\t// Reserved uint16 =  0x0047-4F\n\t// Reserved uint16 =  0x0050-58\n\t// Reserved uint16 =  0x0059-5C\n\t// Unassigned uint16 =  0x005D-5F\n\t// Reserved uint16 =  0x0060-66\n\tcipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067\n\tcipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256  uint16 = 0x0068\n\tcipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256  uint16 = 0x0069\n\tcipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A\n\tcipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B\n\tcipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C\n\tcipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D\n\t// Unassigned uint16 =  0x006E-83\n\tcipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA        uint16 = 0x0084\n\tcipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA     uint16 = 0x0085\n\tcipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA     uint16 = 0x0086\n\tcipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA    uint16 = 0x0087\n\tcipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA    uint16 = 0x0088\n\tcipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA    uint16 = 0x0089\n\tcipher_TLS_PSK_WITH_RC4_128_SHA                 uint16 = 0x008A\n\tcipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA            uint16 = 0x008B\n\tcipher_TLS_PSK_WITH_AES_128_CBC_SHA             uint16 = 0x008C\n\tcipher_TLS_PSK_WITH_AES_256_CBC_SHA             uint16 = 0x008D\n\tcipher_TLS_DHE_PSK_WITH_RC4_128_SHA             uint16 = 0x008E\n\tcipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA        uint16 = 0x008F\n\tcipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA         uint16 = 0x0090\n\tcipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA         uint16 = 0x0091\n\tcipher_TLS_RSA_PSK_WITH_RC4_128_SHA             uint16 = 0x0092\n\tcipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA        uint16 = 0x0093\n\tcipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA         uint16 = 0x0094\n\tcipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA         uint16 = 0x0095\n\tcipher_TLS_RSA_WITH_SEED_CBC_SHA                uint16 = 0x0096\n\tcipher_TLS_DH_DSS_WITH_SEED_CBC_SHA             uint16 = 0x0097\n\tcipher_TLS_DH_RSA_WITH_SEED_CBC_SHA             uint16 = 0x0098\n\tcipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA            uint16 = 0x0099\n\tcipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA            uint16 = 0x009A\n\tcipher_TLS_DH_anon_WITH_SEED_CBC_SHA            uint16 = 0x009B\n\tcipher_TLS_RSA_WITH_AES_128_GCM_SHA256          uint16 = 0x009C\n\tcipher_TLS_RSA_WITH_AES_256_GCM_SHA384          uint16 = 0x009D\n\tcipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256      uint16 = 0x009E\n\tcipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384      uint16 = 0x009F\n\tcipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256       uint16 = 0x00A0\n\tcipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384       uint16 = 0x00A1\n\tcipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256      uint16 = 0x00A2\n\tcipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384      uint16 = 0x00A3\n\tcipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256       uint16 = 0x00A4\n\tcipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384       uint16 = 0x00A5\n\tcipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256      uint16 = 0x00A6\n\tcipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384      uint16 = 0x00A7\n\tcipher_TLS_PSK_WITH_AES_128_GCM_SHA256          uint16 = 0x00A8\n\tcipher_TLS_PSK_WITH_AES_256_GCM_SHA384          uint16 = 0x00A9\n\tcipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256      uint16 = 0x00AA\n\tcipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384      uint16 = 0x00AB\n\tcipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256      uint16 = 0x00AC\n\tcipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384      uint16 = 0x00AD\n\tcipher_TLS_PSK_WITH_AES_128_CBC_SHA256          uint16 = 0x00AE\n\tcipher_TLS_PSK_WITH_AES_256_CBC_SHA384          uint16 = 0x00AF\n\tcipher_TLS_PSK_WITH_NULL_SHA256                 uint16 = 0x00B0\n\tcipher_TLS_PSK_WITH_NULL_SHA384                 uint16 = 0x00B1\n\tcipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256      uint16 = 0x00B2\n\tcipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384      uint16 = 0x00B3\n\tcipher_TLS_DHE_PSK_WITH_NULL_SHA256             uint16 = 0x00B4\n\tcipher_TLS_DHE_PSK_WITH_NULL_SHA384             uint16 = 0x00B5\n\tcipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256      uint16 = 0x00B6\n\tcipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384      uint16 = 0x00B7\n\tcipher_TLS_RSA_PSK_WITH_NULL_SHA256             uint16 = 0x00B8\n\tcipher_TLS_RSA_PSK_WITH_NULL_SHA384             uint16 = 0x00B9\n\tcipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256     uint16 = 0x00BA\n\tcipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256  uint16 = 0x00BB\n\tcipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256  uint16 = 0x00BC\n\tcipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD\n\tcipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE\n\tcipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF\n\tcipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256     uint16 = 0x00C0\n\tcipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256  uint16 = 0x00C1\n\tcipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256  uint16 = 0x00C2\n\tcipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3\n\tcipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4\n\tcipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5\n\t// Unassigned uint16 =  0x00C6-FE\n\tcipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF\n\t// Unassigned uint16 =  0x01-55,*\n\tcipher_TLS_FALLBACK_SCSV uint16 = 0x5600\n\t// Unassigned                                   uint16 = 0x5601 - 0xC000\n\tcipher_TLS_ECDH_ECDSA_WITH_NULL_SHA                 uint16 = 0xC001\n\tcipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA              uint16 = 0xC002\n\tcipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA         uint16 = 0xC003\n\tcipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA          uint16 = 0xC004\n\tcipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA          uint16 = 0xC005\n\tcipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA                uint16 = 0xC006\n\tcipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA             uint16 = 0xC007\n\tcipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA        uint16 = 0xC008\n\tcipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA         uint16 = 0xC009\n\tcipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA         uint16 = 0xC00A\n\tcipher_TLS_ECDH_RSA_WITH_NULL_SHA                   uint16 = 0xC00B\n\tcipher_TLS_ECDH_RSA_WITH_RC4_128_SHA                uint16 = 0xC00C\n\tcipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA           uint16 = 0xC00D\n\tcipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA            uint16 = 0xC00E\n\tcipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA            uint16 = 0xC00F\n\tcipher_TLS_ECDHE_RSA_WITH_NULL_SHA                  uint16 = 0xC010\n\tcipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA               uint16 = 0xC011\n\tcipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA          uint16 = 0xC012\n\tcipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA           uint16 = 0xC013\n\tcipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA           uint16 = 0xC014\n\tcipher_TLS_ECDH_anon_WITH_NULL_SHA                  uint16 = 0xC015\n\tcipher_TLS_ECDH_anon_WITH_RC4_128_SHA               uint16 = 0xC016\n\tcipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA          uint16 = 0xC017\n\tcipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA           uint16 = 0xC018\n\tcipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA           uint16 = 0xC019\n\tcipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA            uint16 = 0xC01A\n\tcipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA        uint16 = 0xC01B\n\tcipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA        uint16 = 0xC01C\n\tcipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA             uint16 = 0xC01D\n\tcipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA         uint16 = 0xC01E\n\tcipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA         uint16 = 0xC01F\n\tcipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA             uint16 = 0xC020\n\tcipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA         uint16 = 0xC021\n\tcipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA         uint16 = 0xC022\n\tcipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256      uint16 = 0xC023\n\tcipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384      uint16 = 0xC024\n\tcipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256       uint16 = 0xC025\n\tcipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384       uint16 = 0xC026\n\tcipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256        uint16 = 0xC027\n\tcipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384        uint16 = 0xC028\n\tcipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256         uint16 = 0xC029\n\tcipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384         uint16 = 0xC02A\n\tcipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256      uint16 = 0xC02B\n\tcipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384      uint16 = 0xC02C\n\tcipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256       uint16 = 0xC02D\n\tcipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384       uint16 = 0xC02E\n\tcipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256        uint16 = 0xC02F\n\tcipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384        uint16 = 0xC030\n\tcipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256         uint16 = 0xC031\n\tcipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384         uint16 = 0xC032\n\tcipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA               uint16 = 0xC033\n\tcipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA          uint16 = 0xC034\n\tcipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA           uint16 = 0xC035\n\tcipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA           uint16 = 0xC036\n\tcipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256        uint16 = 0xC037\n\tcipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384        uint16 = 0xC038\n\tcipher_TLS_ECDHE_PSK_WITH_NULL_SHA                  uint16 = 0xC039\n\tcipher_TLS_ECDHE_PSK_WITH_NULL_SHA256               uint16 = 0xC03A\n\tcipher_TLS_ECDHE_PSK_WITH_NULL_SHA384               uint16 = 0xC03B\n\tcipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256             uint16 = 0xC03C\n\tcipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384             uint16 = 0xC03D\n\tcipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256          uint16 = 0xC03E\n\tcipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384          uint16 = 0xC03F\n\tcipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256          uint16 = 0xC040\n\tcipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384          uint16 = 0xC041\n\tcipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256         uint16 = 0xC042\n\tcipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384         uint16 = 0xC043\n\tcipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256         uint16 = 0xC044\n\tcipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384         uint16 = 0xC045\n\tcipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256         uint16 = 0xC046\n\tcipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384         uint16 = 0xC047\n\tcipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256     uint16 = 0xC048\n\tcipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384     uint16 = 0xC049\n\tcipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256      uint16 = 0xC04A\n\tcipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384      uint16 = 0xC04B\n\tcipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256       uint16 = 0xC04C\n\tcipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384       uint16 = 0xC04D\n\tcipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256        uint16 = 0xC04E\n\tcipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384        uint16 = 0xC04F\n\tcipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256             uint16 = 0xC050\n\tcipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384             uint16 = 0xC051\n\tcipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256         uint16 = 0xC052\n\tcipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384         uint16 = 0xC053\n\tcipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256          uint16 = 0xC054\n\tcipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384          uint16 = 0xC055\n\tcipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256         uint16 = 0xC056\n\tcipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384         uint16 = 0xC057\n\tcipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256          uint16 = 0xC058\n\tcipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384          uint16 = 0xC059\n\tcipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256         uint16 = 0xC05A\n\tcipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384         uint16 = 0xC05B\n\tcipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256     uint16 = 0xC05C\n\tcipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384     uint16 = 0xC05D\n\tcipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256      uint16 = 0xC05E\n\tcipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384      uint16 = 0xC05F\n\tcipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256       uint16 = 0xC060\n\tcipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384       uint16 = 0xC061\n\tcipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256        uint16 = 0xC062\n\tcipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384        uint16 = 0xC063\n\tcipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256             uint16 = 0xC064\n\tcipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384             uint16 = 0xC065\n\tcipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256         uint16 = 0xC066\n\tcipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384         uint16 = 0xC067\n\tcipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256         uint16 = 0xC068\n\tcipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384         uint16 = 0xC069\n\tcipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256             uint16 = 0xC06A\n\tcipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384             uint16 = 0xC06B\n\tcipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256         uint16 = 0xC06C\n\tcipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384         uint16 = 0xC06D\n\tcipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256         uint16 = 0xC06E\n\tcipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384         uint16 = 0xC06F\n\tcipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256       uint16 = 0xC070\n\tcipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384       uint16 = 0xC071\n\tcipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072\n\tcipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073\n\tcipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256  uint16 = 0xC074\n\tcipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384  uint16 = 0xC075\n\tcipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256   uint16 = 0xC076\n\tcipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384   uint16 = 0xC077\n\tcipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256    uint16 = 0xC078\n\tcipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384    uint16 = 0xC079\n\tcipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256         uint16 = 0xC07A\n\tcipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384         uint16 = 0xC07B\n\tcipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256     uint16 = 0xC07C\n\tcipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384     uint16 = 0xC07D\n\tcipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256      uint16 = 0xC07E\n\tcipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384      uint16 = 0xC07F\n\tcipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256     uint16 = 0xC080\n\tcipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384     uint16 = 0xC081\n\tcipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256      uint16 = 0xC082\n\tcipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384      uint16 = 0xC083\n\tcipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256     uint16 = 0xC084\n\tcipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384     uint16 = 0xC085\n\tcipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086\n\tcipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087\n\tcipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256  uint16 = 0xC088\n\tcipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384  uint16 = 0xC089\n\tcipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256   uint16 = 0xC08A\n\tcipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384   uint16 = 0xC08B\n\tcipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256    uint16 = 0xC08C\n\tcipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384    uint16 = 0xC08D\n\tcipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256         uint16 = 0xC08E\n\tcipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384         uint16 = 0xC08F\n\tcipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256     uint16 = 0xC090\n\tcipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384     uint16 = 0xC091\n\tcipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256     uint16 = 0xC092\n\tcipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384     uint16 = 0xC093\n\tcipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256         uint16 = 0xC094\n\tcipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384         uint16 = 0xC095\n\tcipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256     uint16 = 0xC096\n\tcipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384     uint16 = 0xC097\n\tcipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256     uint16 = 0xC098\n\tcipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384     uint16 = 0xC099\n\tcipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256   uint16 = 0xC09A\n\tcipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384   uint16 = 0xC09B\n\tcipher_TLS_RSA_WITH_AES_128_CCM                     uint16 = 0xC09C\n\tcipher_TLS_RSA_WITH_AES_256_CCM                     uint16 = 0xC09D\n\tcipher_TLS_DHE_RSA_WITH_AES_128_CCM                 uint16 = 0xC09E\n\tcipher_TLS_DHE_RSA_WITH_AES_256_CCM                 uint16 = 0xC09F\n\tcipher_TLS_RSA_WITH_AES_128_CCM_8                   uint16 = 0xC0A0\n\tcipher_TLS_RSA_WITH_AES_256_CCM_8                   uint16 = 0xC0A1\n\tcipher_TLS_DHE_RSA_WITH_AES_128_CCM_8               uint16 = 0xC0A2\n\tcipher_TLS_DHE_RSA_WITH_AES_256_CCM_8               uint16 = 0xC0A3\n\tcipher_TLS_PSK_WITH_AES_128_CCM                     uint16 = 0xC0A4\n\tcipher_TLS_PSK_WITH_AES_256_CCM                     uint16 = 0xC0A5\n\tcipher_TLS_DHE_PSK_WITH_AES_128_CCM                 uint16 = 0xC0A6\n\tcipher_TLS_DHE_PSK_WITH_AES_256_CCM                 uint16 = 0xC0A7\n\tcipher_TLS_PSK_WITH_AES_128_CCM_8                   uint16 = 0xC0A8\n\tcipher_TLS_PSK_WITH_AES_256_CCM_8                   uint16 = 0xC0A9\n\tcipher_TLS_PSK_DHE_WITH_AES_128_CCM_8               uint16 = 0xC0AA\n\tcipher_TLS_PSK_DHE_WITH_AES_256_CCM_8               uint16 = 0xC0AB\n\tcipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM             uint16 = 0xC0AC\n\tcipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM             uint16 = 0xC0AD\n\tcipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8           uint16 = 0xC0AE\n\tcipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8           uint16 = 0xC0AF\n\t// Unassigned uint16 =  0xC0B0-FF\n\t// Unassigned uint16 =  0xC1-CB,*\n\t// Unassigned uint16 =  0xCC00-A7\n\tcipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256   uint16 = 0xCCA8\n\tcipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9\n\tcipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256     uint16 = 0xCCAA\n\tcipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256         uint16 = 0xCCAB\n\tcipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256   uint16 = 0xCCAC\n\tcipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256     uint16 = 0xCCAD\n\tcipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256     uint16 = 0xCCAE\n)\n\n// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.\n// References:\n// https://tools.ietf.org/html/rfc7540#appendix-A\n// Reject cipher suites from Appendix A.\n// \"This list includes those cipher suites that do not\n// offer an ephemeral key exchange and those that are\n// based on the TLS null, stream or block cipher type\"\nfunc isBadCipher(cipher uint16) bool {\n\tswitch cipher {\n\tcase cipher_TLS_NULL_WITH_NULL_NULL,\n\t\tcipher_TLS_RSA_WITH_NULL_MD5,\n\t\tcipher_TLS_RSA_WITH_NULL_SHA,\n\t\tcipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,\n\t\tcipher_TLS_RSA_WITH_RC4_128_MD5,\n\t\tcipher_TLS_RSA_WITH_RC4_128_SHA,\n\t\tcipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,\n\t\tcipher_TLS_RSA_WITH_IDEA_CBC_SHA,\n\t\tcipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,\n\t\tcipher_TLS_RSA_WITH_DES_CBC_SHA,\n\t\tcipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,\n\t\tcipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,\n\t\tcipher_TLS_DH_DSS_WITH_DES_CBC_SHA,\n\t\tcipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,\n\t\tcipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,\n\t\tcipher_TLS_DH_RSA_WITH_DES_CBC_SHA,\n\t\tcipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,\n\t\tcipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,\n\t\tcipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,\n\t\tcipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,\n\t\tcipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,\n\t\tcipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,\n\t\tcipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,\n\t\tcipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,\n\t\tcipher_TLS_DH_anon_WITH_RC4_128_MD5,\n\t\tcipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,\n\t\tcipher_TLS_DH_anon_WITH_DES_CBC_SHA,\n\t\tcipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,\n\t\tcipher_TLS_KRB5_WITH_DES_CBC_SHA,\n\t\tcipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,\n\t\tcipher_TLS_KRB5_WITH_RC4_128_SHA,\n\t\tcipher_TLS_KRB5_WITH_IDEA_CBC_SHA,\n\t\tcipher_TLS_KRB5_WITH_DES_CBC_MD5,\n\t\tcipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,\n\t\tcipher_TLS_KRB5_WITH_RC4_128_MD5,\n\t\tcipher_TLS_KRB5_WITH_IDEA_CBC_MD5,\n\t\tcipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,\n\t\tcipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,\n\t\tcipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,\n\t\tcipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,\n\t\tcipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,\n\t\tcipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,\n\t\tcipher_TLS_PSK_WITH_NULL_SHA,\n\t\tcipher_TLS_DHE_PSK_WITH_NULL_SHA,\n\t\tcipher_TLS_RSA_PSK_WITH_NULL_SHA,\n\t\tcipher_TLS_RSA_WITH_AES_128_CBC_SHA,\n\t\tcipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,\n\t\tcipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,\n\t\tcipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,\n\t\tcipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,\n\t\tcipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,\n\t\tcipher_TLS_RSA_WITH_AES_256_CBC_SHA,\n\t\tcipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,\n\t\tcipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,\n\t\tcipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,\n\t\tcipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,\n\t\tcipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,\n\t\tcipher_TLS_RSA_WITH_NULL_SHA256,\n\t\tcipher_TLS_RSA_WITH_AES_128_CBC_SHA256,\n\t\tcipher_TLS_RSA_WITH_AES_256_CBC_SHA256,\n\t\tcipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,\n\t\tcipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,\n\t\tcipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,\n\t\tcipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,\n\t\tcipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,\n\t\tcipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,\n\t\tcipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,\n\t\tcipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,\n\t\tcipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,\n\t\tcipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,\n\t\tcipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,\n\t\tcipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,\n\t\tcipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,\n\t\tcipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,\n\t\tcipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,\n\t\tcipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,\n\t\tcipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,\n\t\tcipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,\n\t\tcipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,\n\t\tcipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,\n\t\tcipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,\n\t\tcipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,\n\t\tcipher_TLS_PSK_WITH_RC4_128_SHA,\n\t\tcipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,\n\t\tcipher_TLS_PSK_WITH_AES_128_CBC_SHA,\n\t\tcipher_TLS_PSK_WITH_AES_256_CBC_SHA,\n\t\tcipher_TLS_DHE_PSK_WITH_RC4_128_SHA,\n\t\tcipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,\n\t\tcipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,\n\t\tcipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,\n\t\tcipher_TLS_RSA_PSK_WITH_RC4_128_SHA,\n\t\tcipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,\n\t\tcipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,\n\t\tcipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,\n\t\tcipher_TLS_RSA_WITH_SEED_CBC_SHA,\n\t\tcipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,\n\t\tcipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,\n\t\tcipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,\n\t\tcipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,\n\t\tcipher_TLS_DH_anon_WITH_SEED_CBC_SHA,\n\t\tcipher_TLS_RSA_WITH_AES_128_GCM_SHA256,\n\t\tcipher_TLS_RSA_WITH_AES_256_GCM_SHA384,\n\t\tcipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,\n\t\tcipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,\n\t\tcipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,\n\t\tcipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,\n\t\tcipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,\n\t\tcipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,\n\t\tcipher_TLS_PSK_WITH_AES_128_GCM_SHA256,\n\t\tcipher_TLS_PSK_WITH_AES_256_GCM_SHA384,\n\t\tcipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,\n\t\tcipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,\n\t\tcipher_TLS_PSK_WITH_AES_128_CBC_SHA256,\n\t\tcipher_TLS_PSK_WITH_AES_256_CBC_SHA384,\n\t\tcipher_TLS_PSK_WITH_NULL_SHA256,\n\t\tcipher_TLS_PSK_WITH_NULL_SHA384,\n\t\tcipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,\n\t\tcipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,\n\t\tcipher_TLS_DHE_PSK_WITH_NULL_SHA256,\n\t\tcipher_TLS_DHE_PSK_WITH_NULL_SHA384,\n\t\tcipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,\n\t\tcipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,\n\t\tcipher_TLS_RSA_PSK_WITH_NULL_SHA256,\n\t\tcipher_TLS_RSA_PSK_WITH_NULL_SHA384,\n\t\tcipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,\n\t\tcipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,\n\t\tcipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,\n\t\tcipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,\n\t\tcipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,\n\t\tcipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,\n\t\tcipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,\n\t\tcipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,\n\t\tcipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,\n\t\tcipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,\n\t\tcipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,\n\t\tcipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,\n\t\tcipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,\n\t\tcipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,\n\t\tcipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,\n\t\tcipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,\n\t\tcipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,\n\t\tcipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,\n\t\tcipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,\n\t\tcipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,\n\t\tcipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,\n\t\tcipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,\n\t\tcipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,\n\t\tcipher_TLS_ECDH_RSA_WITH_NULL_SHA,\n\t\tcipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,\n\t\tcipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,\n\t\tcipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,\n\t\tcipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,\n\t\tcipher_TLS_ECDHE_RSA_WITH_NULL_SHA,\n\t\tcipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,\n\t\tcipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,\n\t\tcipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,\n\t\tcipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,\n\t\tcipher_TLS_ECDH_anon_WITH_NULL_SHA,\n\t\tcipher_TLS_ECDH_anon_WITH_RC4_128_SHA,\n\t\tcipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,\n\t\tcipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,\n\t\tcipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,\n\t\tcipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,\n\t\tcipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,\n\t\tcipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,\n\t\tcipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,\n\t\tcipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,\n\t\tcipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,\n\t\tcipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,\n\t\tcipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,\n\t\tcipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,\n\t\tcipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,\n\t\tcipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,\n\t\tcipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,\n\t\tcipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,\n\t\tcipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,\n\t\tcipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,\n\t\tcipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,\n\t\tcipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,\n\t\tcipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,\n\t\tcipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,\n\t\tcipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,\n\t\tcipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,\n\t\tcipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,\n\t\tcipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,\n\t\tcipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,\n\t\tcipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,\n\t\tcipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,\n\t\tcipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,\n\t\tcipher_TLS_ECDHE_PSK_WITH_NULL_SHA,\n\t\tcipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,\n\t\tcipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,\n\t\tcipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,\n\t\tcipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,\n\t\tcipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,\n\t\tcipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,\n\t\tcipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,\n\t\tcipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,\n\t\tcipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,\n\t\tcipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,\n\t\tcipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,\n\t\tcipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,\n\t\tcipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,\n\t\tcipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,\n\t\tcipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,\n\t\tcipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,\n\t\tcipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,\n\t\tcipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,\n\t\tcipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,\n\t\tcipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,\n\t\tcipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,\n\t\tcipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,\n\t\tcipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,\n\t\tcipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,\n\t\tcipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,\n\t\tcipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,\n\t\tcipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,\n\t\tcipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,\n\t\tcipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,\n\t\tcipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,\n\t\tcipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,\n\t\tcipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,\n\t\tcipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,\n\t\tcipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,\n\t\tcipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,\n\t\tcipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,\n\t\tcipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,\n\t\tcipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,\n\t\tcipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,\n\t\tcipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,\n\t\tcipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,\n\t\tcipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,\n\t\tcipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,\n\t\tcipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,\n\t\tcipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,\n\t\tcipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,\n\t\tcipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,\n\t\tcipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,\n\t\tcipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,\n\t\tcipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,\n\t\tcipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,\n\t\tcipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,\n\t\tcipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,\n\t\tcipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,\n\t\tcipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,\n\t\tcipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,\n\t\tcipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,\n\t\tcipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,\n\t\tcipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,\n\t\tcipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,\n\t\tcipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,\n\t\tcipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,\n\t\tcipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,\n\t\tcipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,\n\t\tcipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,\n\t\tcipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,\n\t\tcipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,\n\t\tcipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,\n\t\tcipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,\n\t\tcipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,\n\t\tcipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,\n\t\tcipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,\n\t\tcipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,\n\t\tcipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,\n\t\tcipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,\n\t\tcipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,\n\t\tcipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,\n\t\tcipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,\n\t\tcipher_TLS_RSA_WITH_AES_128_CCM,\n\t\tcipher_TLS_RSA_WITH_AES_256_CCM,\n\t\tcipher_TLS_RSA_WITH_AES_128_CCM_8,\n\t\tcipher_TLS_RSA_WITH_AES_256_CCM_8,\n\t\tcipher_TLS_PSK_WITH_AES_128_CCM,\n\t\tcipher_TLS_PSK_WITH_AES_256_CCM,\n\t\tcipher_TLS_PSK_WITH_AES_128_CCM_8,\n\t\tcipher_TLS_PSK_WITH_AES_256_CCM_8:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/ciphers_test.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport \"testing\"\n\nfunc TestIsBadCipherBad(t *testing.T) {\n\tfor _, c := range badCiphers {\n\t\tif !isBadCipher(c) {\n\t\t\tt.Errorf(\"Wrong result for isBadCipher(%d), want true\", c)\n\t\t}\n\t}\n}\n\n// verify we don't give false positives on ciphers not on blacklist\nfunc TestIsBadCipherGood(t *testing.T) {\n\tgoodCiphers := map[uint16]string{\n\t\tcipher_TLS_DHE_RSA_WITH_AES_256_CCM:                \"cipher_TLS_DHE_RSA_WITH_AES_256_CCM\",\n\t\tcipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM:            \"cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM\",\n\t\tcipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256: \"cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256\",\n\t}\n\tfor c, name := range goodCiphers {\n\t\tif isBadCipher(c) {\n\t\t\tt.Errorf(\"Wrong result for isBadCipher(%d) %s, want false\", c, name)\n\t\t}\n\t}\n}\n\n// copied from https://http2.github.io/http2-spec/#BadCipherSuites,\nvar badCiphers = []uint16{\n\tcipher_TLS_NULL_WITH_NULL_NULL,\n\tcipher_TLS_RSA_WITH_NULL_MD5,\n\tcipher_TLS_RSA_WITH_NULL_SHA,\n\tcipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,\n\tcipher_TLS_RSA_WITH_RC4_128_MD5,\n\tcipher_TLS_RSA_WITH_RC4_128_SHA,\n\tcipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,\n\tcipher_TLS_RSA_WITH_IDEA_CBC_SHA,\n\tcipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,\n\tcipher_TLS_RSA_WITH_DES_CBC_SHA,\n\tcipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,\n\tcipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,\n\tcipher_TLS_DH_DSS_WITH_DES_CBC_SHA,\n\tcipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,\n\tcipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,\n\tcipher_TLS_DH_RSA_WITH_DES_CBC_SHA,\n\tcipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,\n\tcipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,\n\tcipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,\n\tcipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,\n\tcipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,\n\tcipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,\n\tcipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,\n\tcipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,\n\tcipher_TLS_DH_anon_WITH_RC4_128_MD5,\n\tcipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,\n\tcipher_TLS_DH_anon_WITH_DES_CBC_SHA,\n\tcipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,\n\tcipher_TLS_KRB5_WITH_DES_CBC_SHA,\n\tcipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,\n\tcipher_TLS_KRB5_WITH_RC4_128_SHA,\n\tcipher_TLS_KRB5_WITH_IDEA_CBC_SHA,\n\tcipher_TLS_KRB5_WITH_DES_CBC_MD5,\n\tcipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,\n\tcipher_TLS_KRB5_WITH_RC4_128_MD5,\n\tcipher_TLS_KRB5_WITH_IDEA_CBC_MD5,\n\tcipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,\n\tcipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,\n\tcipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,\n\tcipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,\n\tcipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,\n\tcipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,\n\tcipher_TLS_PSK_WITH_NULL_SHA,\n\tcipher_TLS_DHE_PSK_WITH_NULL_SHA,\n\tcipher_TLS_RSA_PSK_WITH_NULL_SHA,\n\tcipher_TLS_RSA_WITH_AES_128_CBC_SHA,\n\tcipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,\n\tcipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,\n\tcipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,\n\tcipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,\n\tcipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,\n\tcipher_TLS_RSA_WITH_AES_256_CBC_SHA,\n\tcipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,\n\tcipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,\n\tcipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,\n\tcipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,\n\tcipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,\n\tcipher_TLS_RSA_WITH_NULL_SHA256,\n\tcipher_TLS_RSA_WITH_AES_128_CBC_SHA256,\n\tcipher_TLS_RSA_WITH_AES_256_CBC_SHA256,\n\tcipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,\n\tcipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,\n\tcipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,\n\tcipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,\n\tcipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,\n\tcipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,\n\tcipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,\n\tcipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,\n\tcipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,\n\tcipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,\n\tcipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,\n\tcipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,\n\tcipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,\n\tcipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,\n\tcipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,\n\tcipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,\n\tcipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,\n\tcipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,\n\tcipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,\n\tcipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,\n\tcipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,\n\tcipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,\n\tcipher_TLS_PSK_WITH_RC4_128_SHA,\n\tcipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,\n\tcipher_TLS_PSK_WITH_AES_128_CBC_SHA,\n\tcipher_TLS_PSK_WITH_AES_256_CBC_SHA,\n\tcipher_TLS_DHE_PSK_WITH_RC4_128_SHA,\n\tcipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,\n\tcipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,\n\tcipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,\n\tcipher_TLS_RSA_PSK_WITH_RC4_128_SHA,\n\tcipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,\n\tcipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,\n\tcipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,\n\tcipher_TLS_RSA_WITH_SEED_CBC_SHA,\n\tcipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,\n\tcipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,\n\tcipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,\n\tcipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,\n\tcipher_TLS_DH_anon_WITH_SEED_CBC_SHA,\n\tcipher_TLS_RSA_WITH_AES_128_GCM_SHA256,\n\tcipher_TLS_RSA_WITH_AES_256_GCM_SHA384,\n\tcipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,\n\tcipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,\n\tcipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,\n\tcipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,\n\tcipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,\n\tcipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,\n\tcipher_TLS_PSK_WITH_AES_128_GCM_SHA256,\n\tcipher_TLS_PSK_WITH_AES_256_GCM_SHA384,\n\tcipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,\n\tcipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,\n\tcipher_TLS_PSK_WITH_AES_128_CBC_SHA256,\n\tcipher_TLS_PSK_WITH_AES_256_CBC_SHA384,\n\tcipher_TLS_PSK_WITH_NULL_SHA256,\n\tcipher_TLS_PSK_WITH_NULL_SHA384,\n\tcipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,\n\tcipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,\n\tcipher_TLS_DHE_PSK_WITH_NULL_SHA256,\n\tcipher_TLS_DHE_PSK_WITH_NULL_SHA384,\n\tcipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,\n\tcipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,\n\tcipher_TLS_RSA_PSK_WITH_NULL_SHA256,\n\tcipher_TLS_RSA_PSK_WITH_NULL_SHA384,\n\tcipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,\n\tcipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,\n\tcipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,\n\tcipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,\n\tcipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,\n\tcipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,\n\tcipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,\n\tcipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,\n\tcipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,\n\tcipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,\n\tcipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,\n\tcipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,\n\tcipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,\n\tcipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,\n\tcipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,\n\tcipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,\n\tcipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,\n\tcipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,\n\tcipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,\n\tcipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,\n\tcipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,\n\tcipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,\n\tcipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,\n\tcipher_TLS_ECDH_RSA_WITH_NULL_SHA,\n\tcipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,\n\tcipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,\n\tcipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,\n\tcipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,\n\tcipher_TLS_ECDHE_RSA_WITH_NULL_SHA,\n\tcipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,\n\tcipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,\n\tcipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,\n\tcipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,\n\tcipher_TLS_ECDH_anon_WITH_NULL_SHA,\n\tcipher_TLS_ECDH_anon_WITH_RC4_128_SHA,\n\tcipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,\n\tcipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,\n\tcipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,\n\tcipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,\n\tcipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,\n\tcipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,\n\tcipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,\n\tcipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,\n\tcipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,\n\tcipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,\n\tcipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,\n\tcipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,\n\tcipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,\n\tcipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,\n\tcipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,\n\tcipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,\n\tcipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,\n\tcipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,\n\tcipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,\n\tcipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,\n\tcipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,\n\tcipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,\n\tcipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,\n\tcipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,\n\tcipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,\n\tcipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,\n\tcipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,\n\tcipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,\n\tcipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,\n\tcipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,\n\tcipher_TLS_ECDHE_PSK_WITH_NULL_SHA,\n\tcipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,\n\tcipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,\n\tcipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,\n\tcipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,\n\tcipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,\n\tcipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,\n\tcipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,\n\tcipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,\n\tcipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,\n\tcipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,\n\tcipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,\n\tcipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,\n\tcipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,\n\tcipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,\n\tcipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,\n\tcipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,\n\tcipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,\n\tcipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,\n\tcipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,\n\tcipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,\n\tcipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,\n\tcipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,\n\tcipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,\n\tcipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,\n\tcipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,\n\tcipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,\n\tcipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,\n\tcipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,\n\tcipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,\n\tcipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,\n\tcipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,\n\tcipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,\n\tcipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,\n\tcipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,\n\tcipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,\n\tcipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,\n\tcipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,\n\tcipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,\n\tcipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,\n\tcipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,\n\tcipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,\n\tcipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,\n\tcipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,\n\tcipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,\n\tcipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,\n\tcipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,\n\tcipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,\n\tcipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,\n\tcipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,\n\tcipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,\n\tcipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,\n\tcipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,\n\tcipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,\n\tcipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,\n\tcipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,\n\tcipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,\n\tcipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,\n\tcipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,\n\tcipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,\n\tcipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,\n\tcipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,\n\tcipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,\n\tcipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,\n\tcipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,\n\tcipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,\n\tcipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,\n\tcipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,\n\tcipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,\n\tcipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,\n\tcipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,\n\tcipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,\n\tcipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,\n\tcipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,\n\tcipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,\n\tcipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,\n\tcipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,\n\tcipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,\n\tcipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,\n\tcipher_TLS_RSA_WITH_AES_128_CCM,\n\tcipher_TLS_RSA_WITH_AES_256_CCM,\n\tcipher_TLS_RSA_WITH_AES_128_CCM_8,\n\tcipher_TLS_RSA_WITH_AES_256_CCM_8,\n\tcipher_TLS_PSK_WITH_AES_128_CCM,\n\tcipher_TLS_PSK_WITH_AES_256_CCM,\n\tcipher_TLS_PSK_WITH_AES_128_CCM_8,\n\tcipher_TLS_PSK_WITH_AES_256_CCM_8,\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/client_conn_pool.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Transport code's client connection pooling.\n\npackage http2\n\nimport (\n\t\"crypto/tls\"\n\t\"net/http\"\n\t\"sync\"\n)\n\n// ClientConnPool manages a pool of HTTP/2 client connections.\ntype ClientConnPool interface {\n\tGetClientConn(req *http.Request, addr string) (*ClientConn, error)\n\tMarkDead(*ClientConn)\n}\n\n// clientConnPoolIdleCloser is the interface implemented by ClientConnPool\n// implementations which can close their idle connections.\ntype clientConnPoolIdleCloser interface {\n\tClientConnPool\n\tcloseIdleConnections()\n}\n\nvar (\n\t_ clientConnPoolIdleCloser = (*clientConnPool)(nil)\n\t_ clientConnPoolIdleCloser = noDialClientConnPool{}\n)\n\n// TODO: use singleflight for dialing and addConnCalls?\ntype clientConnPool struct {\n\tt *Transport\n\n\tmu sync.Mutex // TODO: maybe switch to RWMutex\n\t// TODO: add support for sharing conns based on cert names\n\t// (e.g. share conn for googleapis.com and appspot.com)\n\tconns        map[string][]*ClientConn // key is host:port\n\tdialing      map[string]*dialCall     // currently in-flight dials\n\tkeys         map[*ClientConn][]string\n\taddConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls\n}\n\nfunc (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {\n\treturn p.getClientConn(req, addr, dialOnMiss)\n}\n\nconst (\n\tdialOnMiss   = true\n\tnoDialOnMiss = false\n)\n\nfunc (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {\n\tif isConnectionCloseRequest(req) && dialOnMiss {\n\t\t// It gets its own connection.\n\t\tconst singleUse = true\n\t\tcc, err := p.t.dialClientConn(addr, singleUse)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn cc, nil\n\t}\n\tp.mu.Lock()\n\tfor _, cc := range p.conns[addr] {\n\t\tif cc.CanTakeNewRequest() {\n\t\t\tp.mu.Unlock()\n\t\t\treturn cc, nil\n\t\t}\n\t}\n\tif !dialOnMiss {\n\t\tp.mu.Unlock()\n\t\treturn nil, ErrNoCachedConn\n\t}\n\tcall := p.getStartDialLocked(addr)\n\tp.mu.Unlock()\n\t<-call.done\n\treturn call.res, call.err\n}\n\n// dialCall is an in-flight Transport dial call to a host.\ntype dialCall struct {\n\tp    *clientConnPool\n\tdone chan struct{} // closed when done\n\tres  *ClientConn   // valid after done is closed\n\terr  error         // valid after done is closed\n}\n\n// requires p.mu is held.\nfunc (p *clientConnPool) getStartDialLocked(addr string) *dialCall {\n\tif call, ok := p.dialing[addr]; ok {\n\t\t// A dial is already in-flight. Don't start another.\n\t\treturn call\n\t}\n\tcall := &dialCall{p: p, done: make(chan struct{})}\n\tif p.dialing == nil {\n\t\tp.dialing = make(map[string]*dialCall)\n\t}\n\tp.dialing[addr] = call\n\tgo call.dial(addr)\n\treturn call\n}\n\n// run in its own goroutine.\nfunc (c *dialCall) dial(addr string) {\n\tconst singleUse = false // shared conn\n\tc.res, c.err = c.p.t.dialClientConn(addr, singleUse)\n\tclose(c.done)\n\n\tc.p.mu.Lock()\n\tdelete(c.p.dialing, addr)\n\tif c.err == nil {\n\t\tc.p.addConnLocked(addr, c.res)\n\t}\n\tc.p.mu.Unlock()\n}\n\n// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't\n// already exist. It coalesces concurrent calls with the same key.\n// This is used by the http1 Transport code when it creates a new connection. Because\n// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know\n// the protocol), it can get into a situation where it has multiple TLS connections.\n// This code decides which ones live or die.\n// The return value used is whether c was used.\n// c is never closed.\nfunc (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {\n\tp.mu.Lock()\n\tfor _, cc := range p.conns[key] {\n\t\tif cc.CanTakeNewRequest() {\n\t\t\tp.mu.Unlock()\n\t\t\treturn false, nil\n\t\t}\n\t}\n\tcall, dup := p.addConnCalls[key]\n\tif !dup {\n\t\tif p.addConnCalls == nil {\n\t\t\tp.addConnCalls = make(map[string]*addConnCall)\n\t\t}\n\t\tcall = &addConnCall{\n\t\t\tp:    p,\n\t\t\tdone: make(chan struct{}),\n\t\t}\n\t\tp.addConnCalls[key] = call\n\t\tgo call.run(t, key, c)\n\t}\n\tp.mu.Unlock()\n\n\t<-call.done\n\tif call.err != nil {\n\t\treturn false, call.err\n\t}\n\treturn !dup, nil\n}\n\ntype addConnCall struct {\n\tp    *clientConnPool\n\tdone chan struct{} // closed when done\n\terr  error\n}\n\nfunc (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {\n\tcc, err := t.NewClientConn(tc)\n\n\tp := c.p\n\tp.mu.Lock()\n\tif err != nil {\n\t\tc.err = err\n\t} else {\n\t\tp.addConnLocked(key, cc)\n\t}\n\tdelete(p.addConnCalls, key)\n\tp.mu.Unlock()\n\tclose(c.done)\n}\n\nfunc (p *clientConnPool) addConn(key string, cc *ClientConn) {\n\tp.mu.Lock()\n\tp.addConnLocked(key, cc)\n\tp.mu.Unlock()\n}\n\n// p.mu must be held\nfunc (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {\n\tfor _, v := range p.conns[key] {\n\t\tif v == cc {\n\t\t\treturn\n\t\t}\n\t}\n\tif p.conns == nil {\n\t\tp.conns = make(map[string][]*ClientConn)\n\t}\n\tif p.keys == nil {\n\t\tp.keys = make(map[*ClientConn][]string)\n\t}\n\tp.conns[key] = append(p.conns[key], cc)\n\tp.keys[cc] = append(p.keys[cc], key)\n}\n\nfunc (p *clientConnPool) MarkDead(cc *ClientConn) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tfor _, key := range p.keys[cc] {\n\t\tvv, ok := p.conns[key]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tnewList := filterOutClientConn(vv, cc)\n\t\tif len(newList) > 0 {\n\t\t\tp.conns[key] = newList\n\t\t} else {\n\t\t\tdelete(p.conns, key)\n\t\t}\n\t}\n\tdelete(p.keys, cc)\n}\n\nfunc (p *clientConnPool) closeIdleConnections() {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\t// TODO: don't close a cc if it was just added to the pool\n\t// milliseconds ago and has never been used. There's currently\n\t// a small race window with the HTTP/1 Transport's integration\n\t// where it can add an idle conn just before using it, and\n\t// somebody else can concurrently call CloseIdleConns and\n\t// break some caller's RoundTrip.\n\tfor _, vv := range p.conns {\n\t\tfor _, cc := range vv {\n\t\t\tcc.closeIfIdle()\n\t\t}\n\t}\n}\n\nfunc filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {\n\tout := in[:0]\n\tfor _, v := range in {\n\t\tif v != exclude {\n\t\t\tout = append(out, v)\n\t\t}\n\t}\n\t// If we filtered it out, zero out the last item to prevent\n\t// the GC from seeing it.\n\tif len(in) != len(out) {\n\t\tin[len(in)-1] = nil\n\t}\n\treturn out\n}\n\n// noDialClientConnPool is an implementation of http2.ClientConnPool\n// which never dials. We let the HTTP/1.1 client dial and use its TLS\n// connection instead.\ntype noDialClientConnPool struct{ *clientConnPool }\n\nfunc (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {\n\treturn p.getClientConn(req, addr, noDialOnMiss)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/configure_transport.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.6\n\npackage http2\n\nimport (\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"net/http\"\n)\n\nfunc configureTransport(t1 *http.Transport) (*Transport, error) {\n\tconnPool := new(clientConnPool)\n\tt2 := &Transport{\n\t\tConnPool: noDialClientConnPool{connPool},\n\t\tt1:       t1,\n\t}\n\tconnPool.t = t2\n\tif err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil {\n\t\treturn nil, err\n\t}\n\tif t1.TLSClientConfig == nil {\n\t\tt1.TLSClientConfig = new(tls.Config)\n\t}\n\tif !strSliceContains(t1.TLSClientConfig.NextProtos, \"h2\") {\n\t\tt1.TLSClientConfig.NextProtos = append([]string{\"h2\"}, t1.TLSClientConfig.NextProtos...)\n\t}\n\tif !strSliceContains(t1.TLSClientConfig.NextProtos, \"http/1.1\") {\n\t\tt1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, \"http/1.1\")\n\t}\n\tupgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {\n\t\taddr := authorityAddr(\"https\", authority)\n\t\tif used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {\n\t\t\tgo c.Close()\n\t\t\treturn erringRoundTripper{err}\n\t\t} else if !used {\n\t\t\t// Turns out we don't need this c.\n\t\t\t// For example, two goroutines made requests to the same host\n\t\t\t// at the same time, both kicking off TCP dials. (since protocol\n\t\t\t// was unknown)\n\t\t\tgo c.Close()\n\t\t}\n\t\treturn t2\n\t}\n\tif m := t1.TLSNextProto; len(m) == 0 {\n\t\tt1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{\n\t\t\t\"h2\": upgradeFn,\n\t\t}\n\t} else {\n\t\tm[\"h2\"] = upgradeFn\n\t}\n\treturn t2, nil\n}\n\n// registerHTTPSProtocol calls Transport.RegisterProtocol but\n// converting panics into errors.\nfunc registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = fmt.Errorf(\"%v\", e)\n\t\t}\n\t}()\n\tt.RegisterProtocol(\"https\", rt)\n\treturn nil\n}\n\n// noDialH2RoundTripper is a RoundTripper which only tries to complete the request\n// if there's already has a cached connection to the host.\ntype noDialH2RoundTripper struct{ t *Transport }\n\nfunc (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {\n\tres, err := rt.t.RoundTrip(req)\n\tif err == ErrNoCachedConn {\n\t\treturn nil, http.ErrSkipAltProtocol\n\t}\n\treturn res, err\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/databuffer.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n)\n\n// Buffer chunks are allocated from a pool to reduce pressure on GC.\n// The maximum wasted space per dataBuffer is 2x the largest size class,\n// which happens when the dataBuffer has multiple chunks and there is\n// one unread byte in both the first and last chunks. We use a few size\n// classes to minimize overheads for servers that typically receive very\n// small request bodies.\n//\n// TODO: Benchmark to determine if the pools are necessary. The GC may have\n// improved enough that we can instead allocate chunks like this:\n// make([]byte, max(16<<10, expectedBytesRemaining))\nvar (\n\tdataChunkSizeClasses = []int{\n\t\t1 << 10,\n\t\t2 << 10,\n\t\t4 << 10,\n\t\t8 << 10,\n\t\t16 << 10,\n\t}\n\tdataChunkPools = [...]sync.Pool{\n\t\t{New: func() interface{} { return make([]byte, 1<<10) }},\n\t\t{New: func() interface{} { return make([]byte, 2<<10) }},\n\t\t{New: func() interface{} { return make([]byte, 4<<10) }},\n\t\t{New: func() interface{} { return make([]byte, 8<<10) }},\n\t\t{New: func() interface{} { return make([]byte, 16<<10) }},\n\t}\n)\n\nfunc getDataBufferChunk(size int64) []byte {\n\ti := 0\n\tfor ; i < len(dataChunkSizeClasses)-1; i++ {\n\t\tif size <= int64(dataChunkSizeClasses[i]) {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn dataChunkPools[i].Get().([]byte)\n}\n\nfunc putDataBufferChunk(p []byte) {\n\tfor i, n := range dataChunkSizeClasses {\n\t\tif len(p) == n {\n\t\t\tdataChunkPools[i].Put(p)\n\t\t\treturn\n\t\t}\n\t}\n\tpanic(fmt.Sprintf(\"unexpected buffer len=%v\", len(p)))\n}\n\n// dataBuffer is an io.ReadWriter backed by a list of data chunks.\n// Each dataBuffer is used to read DATA frames on a single stream.\n// The buffer is divided into chunks so the server can limit the\n// total memory used by a single connection without limiting the\n// request body size on any single stream.\ntype dataBuffer struct {\n\tchunks   [][]byte\n\tr        int   // next byte to read is chunks[0][r]\n\tw        int   // next byte to write is chunks[len(chunks)-1][w]\n\tsize     int   // total buffered bytes\n\texpected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)\n}\n\nvar errReadEmpty = errors.New(\"read from empty dataBuffer\")\n\n// Read copies bytes from the buffer into p.\n// It is an error to read when no data is available.\nfunc (b *dataBuffer) Read(p []byte) (int, error) {\n\tif b.size == 0 {\n\t\treturn 0, errReadEmpty\n\t}\n\tvar ntotal int\n\tfor len(p) > 0 && b.size > 0 {\n\t\treadFrom := b.bytesFromFirstChunk()\n\t\tn := copy(p, readFrom)\n\t\tp = p[n:]\n\t\tntotal += n\n\t\tb.r += n\n\t\tb.size -= n\n\t\t// If the first chunk has been consumed, advance to the next chunk.\n\t\tif b.r == len(b.chunks[0]) {\n\t\t\tputDataBufferChunk(b.chunks[0])\n\t\t\tend := len(b.chunks) - 1\n\t\t\tcopy(b.chunks[:end], b.chunks[1:])\n\t\t\tb.chunks[end] = nil\n\t\t\tb.chunks = b.chunks[:end]\n\t\t\tb.r = 0\n\t\t}\n\t}\n\treturn ntotal, nil\n}\n\nfunc (b *dataBuffer) bytesFromFirstChunk() []byte {\n\tif len(b.chunks) == 1 {\n\t\treturn b.chunks[0][b.r:b.w]\n\t}\n\treturn b.chunks[0][b.r:]\n}\n\n// Len returns the number of bytes of the unread portion of the buffer.\nfunc (b *dataBuffer) Len() int {\n\treturn b.size\n}\n\n// Write appends p to the buffer.\nfunc (b *dataBuffer) Write(p []byte) (int, error) {\n\tntotal := len(p)\n\tfor len(p) > 0 {\n\t\t// If the last chunk is empty, allocate a new chunk. Try to allocate\n\t\t// enough to fully copy p plus any additional bytes we expect to\n\t\t// receive. However, this may allocate less than len(p).\n\t\twant := int64(len(p))\n\t\tif b.expected > want {\n\t\t\twant = b.expected\n\t\t}\n\t\tchunk := b.lastChunkOrAlloc(want)\n\t\tn := copy(chunk[b.w:], p)\n\t\tp = p[n:]\n\t\tb.w += n\n\t\tb.size += n\n\t\tb.expected -= int64(n)\n\t}\n\treturn ntotal, nil\n}\n\nfunc (b *dataBuffer) lastChunkOrAlloc(want int64) []byte {\n\tif len(b.chunks) != 0 {\n\t\tlast := b.chunks[len(b.chunks)-1]\n\t\tif b.w < len(last) {\n\t\t\treturn last\n\t\t}\n\t}\n\tchunk := getDataBufferChunk(want)\n\tb.chunks = append(b.chunks, chunk)\n\tb.w = 0\n\treturn chunk\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/databuffer_test.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.7\n\npackage http2\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc fmtDataChunk(chunk []byte) string {\n\tout := \"\"\n\tvar last byte\n\tvar count int\n\tfor _, c := range chunk {\n\t\tif c != last {\n\t\t\tif count > 0 {\n\t\t\t\tout += fmt.Sprintf(\" x %d \", count)\n\t\t\t\tcount = 0\n\t\t\t}\n\t\t\tout += string([]byte{c})\n\t\t\tlast = c\n\t\t}\n\t\tcount++\n\t}\n\tif count > 0 {\n\t\tout += fmt.Sprintf(\" x %d\", count)\n\t}\n\treturn out\n}\n\nfunc fmtDataChunks(chunks [][]byte) string {\n\tvar out string\n\tfor _, chunk := range chunks {\n\t\tout += fmt.Sprintf(\"{%q}\", fmtDataChunk(chunk))\n\t}\n\treturn out\n}\n\nfunc testDataBuffer(t *testing.T, wantBytes []byte, setup func(t *testing.T) *dataBuffer) {\n\t// Run setup, then read the remaining bytes from the dataBuffer and check\n\t// that they match wantBytes. We use different read sizes to check corner\n\t// cases in Read.\n\tfor _, readSize := range []int{1, 2, 1 * 1024, 32 * 1024} {\n\t\tt.Run(fmt.Sprintf(\"ReadSize=%d\", readSize), func(t *testing.T) {\n\t\t\tb := setup(t)\n\t\t\tbuf := make([]byte, readSize)\n\t\t\tvar gotRead bytes.Buffer\n\t\t\tfor {\n\t\t\t\tn, err := b.Read(buf)\n\t\t\t\tgotRead.Write(buf[:n])\n\t\t\t\tif err == errReadEmpty {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"error after %v bytes: %v\", gotRead.Len(), err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif got, want := gotRead.Bytes(), wantBytes; !bytes.Equal(got, want) {\n\t\t\t\tt.Errorf(\"FinalRead=%q, want %q\", fmtDataChunk(got), fmtDataChunk(want))\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestDataBufferAllocation(t *testing.T) {\n\twrites := [][]byte{\n\t\tbytes.Repeat([]byte(\"a\"), 1*1024-1),\n\t\t[]byte(\"a\"),\n\t\tbytes.Repeat([]byte(\"b\"), 4*1024-1),\n\t\t[]byte(\"b\"),\n\t\tbytes.Repeat([]byte(\"c\"), 8*1024-1),\n\t\t[]byte(\"c\"),\n\t\tbytes.Repeat([]byte(\"d\"), 16*1024-1),\n\t\t[]byte(\"d\"),\n\t\tbytes.Repeat([]byte(\"e\"), 32*1024),\n\t}\n\tvar wantRead bytes.Buffer\n\tfor _, p := range writes {\n\t\twantRead.Write(p)\n\t}\n\n\ttestDataBuffer(t, wantRead.Bytes(), func(t *testing.T) *dataBuffer {\n\t\tb := &dataBuffer{}\n\t\tfor _, p := range writes {\n\t\t\tif n, err := b.Write(p); n != len(p) || err != nil {\n\t\t\t\tt.Fatalf(\"Write(%q x %d)=%v,%v want %v,nil\", p[:1], len(p), n, err, len(p))\n\t\t\t}\n\t\t}\n\t\twant := [][]byte{\n\t\t\tbytes.Repeat([]byte(\"a\"), 1*1024),\n\t\t\tbytes.Repeat([]byte(\"b\"), 4*1024),\n\t\t\tbytes.Repeat([]byte(\"c\"), 8*1024),\n\t\t\tbytes.Repeat([]byte(\"d\"), 16*1024),\n\t\t\tbytes.Repeat([]byte(\"e\"), 16*1024),\n\t\t\tbytes.Repeat([]byte(\"e\"), 16*1024),\n\t\t}\n\t\tif !reflect.DeepEqual(b.chunks, want) {\n\t\t\tt.Errorf(\"dataBuffer.chunks\\ngot:  %s\\nwant: %s\", fmtDataChunks(b.chunks), fmtDataChunks(want))\n\t\t}\n\t\treturn b\n\t})\n}\n\nfunc TestDataBufferAllocationWithExpected(t *testing.T) {\n\twrites := [][]byte{\n\t\tbytes.Repeat([]byte(\"a\"), 1*1024), // allocates 16KB\n\t\tbytes.Repeat([]byte(\"b\"), 14*1024),\n\t\tbytes.Repeat([]byte(\"c\"), 15*1024), // allocates 16KB more\n\t\tbytes.Repeat([]byte(\"d\"), 2*1024),\n\t\tbytes.Repeat([]byte(\"e\"), 1*1024), // overflows 32KB expectation, allocates just 1KB\n\t}\n\tvar wantRead bytes.Buffer\n\tfor _, p := range writes {\n\t\twantRead.Write(p)\n\t}\n\n\ttestDataBuffer(t, wantRead.Bytes(), func(t *testing.T) *dataBuffer {\n\t\tb := &dataBuffer{expected: 32 * 1024}\n\t\tfor _, p := range writes {\n\t\t\tif n, err := b.Write(p); n != len(p) || err != nil {\n\t\t\t\tt.Fatalf(\"Write(%q x %d)=%v,%v want %v,nil\", p[:1], len(p), n, err, len(p))\n\t\t\t}\n\t\t}\n\t\twant := [][]byte{\n\t\t\tappend(bytes.Repeat([]byte(\"a\"), 1*1024), append(bytes.Repeat([]byte(\"b\"), 14*1024), bytes.Repeat([]byte(\"c\"), 1*1024)...)...),\n\t\t\tappend(bytes.Repeat([]byte(\"c\"), 14*1024), bytes.Repeat([]byte(\"d\"), 2*1024)...),\n\t\t\tbytes.Repeat([]byte(\"e\"), 1*1024),\n\t\t}\n\t\tif !reflect.DeepEqual(b.chunks, want) {\n\t\t\tt.Errorf(\"dataBuffer.chunks\\ngot:  %s\\nwant: %s\", fmtDataChunks(b.chunks), fmtDataChunks(want))\n\t\t}\n\t\treturn b\n\t})\n}\n\nfunc TestDataBufferWriteAfterPartialRead(t *testing.T) {\n\ttestDataBuffer(t, []byte(\"cdxyz\"), func(t *testing.T) *dataBuffer {\n\t\tb := &dataBuffer{}\n\t\tif n, err := b.Write([]byte(\"abcd\")); n != 4 || err != nil {\n\t\t\tt.Fatalf(\"Write(\\\"abcd\\\")=%v,%v want 4,nil\", n, err)\n\t\t}\n\t\tp := make([]byte, 2)\n\t\tif n, err := b.Read(p); n != 2 || err != nil || !bytes.Equal(p, []byte(\"ab\")) {\n\t\t\tt.Fatalf(\"Read()=%q,%v,%v want \\\"ab\\\",2,nil\", p, n, err)\n\t\t}\n\t\tif n, err := b.Write([]byte(\"xyz\")); n != 3 || err != nil {\n\t\t\tt.Fatalf(\"Write(\\\"xyz\\\")=%v,%v want 3,nil\", n, err)\n\t\t}\n\t\treturn b\n\t})\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/errors.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n)\n\n// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.\ntype ErrCode uint32\n\nconst (\n\tErrCodeNo                 ErrCode = 0x0\n\tErrCodeProtocol           ErrCode = 0x1\n\tErrCodeInternal           ErrCode = 0x2\n\tErrCodeFlowControl        ErrCode = 0x3\n\tErrCodeSettingsTimeout    ErrCode = 0x4\n\tErrCodeStreamClosed       ErrCode = 0x5\n\tErrCodeFrameSize          ErrCode = 0x6\n\tErrCodeRefusedStream      ErrCode = 0x7\n\tErrCodeCancel             ErrCode = 0x8\n\tErrCodeCompression        ErrCode = 0x9\n\tErrCodeConnect            ErrCode = 0xa\n\tErrCodeEnhanceYourCalm    ErrCode = 0xb\n\tErrCodeInadequateSecurity ErrCode = 0xc\n\tErrCodeHTTP11Required     ErrCode = 0xd\n)\n\nvar errCodeName = map[ErrCode]string{\n\tErrCodeNo:                 \"NO_ERROR\",\n\tErrCodeProtocol:           \"PROTOCOL_ERROR\",\n\tErrCodeInternal:           \"INTERNAL_ERROR\",\n\tErrCodeFlowControl:        \"FLOW_CONTROL_ERROR\",\n\tErrCodeSettingsTimeout:    \"SETTINGS_TIMEOUT\",\n\tErrCodeStreamClosed:       \"STREAM_CLOSED\",\n\tErrCodeFrameSize:          \"FRAME_SIZE_ERROR\",\n\tErrCodeRefusedStream:      \"REFUSED_STREAM\",\n\tErrCodeCancel:             \"CANCEL\",\n\tErrCodeCompression:        \"COMPRESSION_ERROR\",\n\tErrCodeConnect:            \"CONNECT_ERROR\",\n\tErrCodeEnhanceYourCalm:    \"ENHANCE_YOUR_CALM\",\n\tErrCodeInadequateSecurity: \"INADEQUATE_SECURITY\",\n\tErrCodeHTTP11Required:     \"HTTP_1_1_REQUIRED\",\n}\n\nfunc (e ErrCode) String() string {\n\tif s, ok := errCodeName[e]; ok {\n\t\treturn s\n\t}\n\treturn fmt.Sprintf(\"unknown error code 0x%x\", uint32(e))\n}\n\n// ConnectionError is an error that results in the termination of the\n// entire connection.\ntype ConnectionError ErrCode\n\nfunc (e ConnectionError) Error() string { return fmt.Sprintf(\"connection error: %s\", ErrCode(e)) }\n\n// StreamError is an error that only affects one stream within an\n// HTTP/2 connection.\ntype StreamError struct {\n\tStreamID uint32\n\tCode     ErrCode\n\tCause    error // optional additional detail\n}\n\nfunc streamError(id uint32, code ErrCode) StreamError {\n\treturn StreamError{StreamID: id, Code: code}\n}\n\nfunc (e StreamError) Error() string {\n\tif e.Cause != nil {\n\t\treturn fmt.Sprintf(\"stream error: stream ID %d; %v; %v\", e.StreamID, e.Code, e.Cause)\n\t}\n\treturn fmt.Sprintf(\"stream error: stream ID %d; %v\", e.StreamID, e.Code)\n}\n\n// 6.9.1 The Flow Control Window\n// \"If a sender receives a WINDOW_UPDATE that causes a flow control\n// window to exceed this maximum it MUST terminate either the stream\n// or the connection, as appropriate. For streams, [...]; for the\n// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code.\"\ntype goAwayFlowError struct{}\n\nfunc (goAwayFlowError) Error() string { return \"connection exceeded flow control window size\" }\n\n// connError represents an HTTP/2 ConnectionError error code, along\n// with a string (for debugging) explaining why.\n//\n// Errors of this type are only returned by the frame parser functions\n// and converted into ConnectionError(Code), after stashing away\n// the Reason into the Framer's errDetail field, accessible via\n// the (*Framer).ErrorDetail method.\ntype connError struct {\n\tCode   ErrCode // the ConnectionError error code\n\tReason string  // additional reason\n}\n\nfunc (e connError) Error() string {\n\treturn fmt.Sprintf(\"http2: connection error: %v: %v\", e.Code, e.Reason)\n}\n\ntype pseudoHeaderError string\n\nfunc (e pseudoHeaderError) Error() string {\n\treturn fmt.Sprintf(\"invalid pseudo-header %q\", string(e))\n}\n\ntype duplicatePseudoHeaderError string\n\nfunc (e duplicatePseudoHeaderError) Error() string {\n\treturn fmt.Sprintf(\"duplicate pseudo-header %q\", string(e))\n}\n\ntype headerFieldNameError string\n\nfunc (e headerFieldNameError) Error() string {\n\treturn fmt.Sprintf(\"invalid header field name %q\", string(e))\n}\n\ntype headerFieldValueError string\n\nfunc (e headerFieldValueError) Error() string {\n\treturn fmt.Sprintf(\"invalid header field value %q\", string(e))\n}\n\nvar (\n\terrMixPseudoHeaderTypes = errors.New(\"mix of request and response pseudo headers\")\n\terrPseudoAfterRegular   = errors.New(\"pseudo header field after regular\")\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/errors_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport \"testing\"\n\nfunc TestErrCodeString(t *testing.T) {\n\ttests := []struct {\n\t\terr  ErrCode\n\t\twant string\n\t}{\n\t\t{ErrCodeProtocol, \"PROTOCOL_ERROR\"},\n\t\t{0xd, \"HTTP_1_1_REQUIRED\"},\n\t\t{0xf, \"unknown error code 0xf\"},\n\t}\n\tfor i, tt := range tests {\n\t\tgot := tt.err.String()\n\t\tif got != tt.want {\n\t\t\tt.Errorf(\"%d. Error = %q; want %q\", i, got, tt.want)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/flow.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Flow control\n\npackage http2\n\n// flow is the flow control window's size.\ntype flow struct {\n\t// n is the number of DATA bytes we're allowed to send.\n\t// A flow is kept both on a conn and a per-stream.\n\tn int32\n\n\t// conn points to the shared connection-level flow that is\n\t// shared by all streams on that conn. It is nil for the flow\n\t// that's on the conn directly.\n\tconn *flow\n}\n\nfunc (f *flow) setConnFlow(cf *flow) { f.conn = cf }\n\nfunc (f *flow) available() int32 {\n\tn := f.n\n\tif f.conn != nil && f.conn.n < n {\n\t\tn = f.conn.n\n\t}\n\treturn n\n}\n\nfunc (f *flow) take(n int32) {\n\tif n > f.available() {\n\t\tpanic(\"internal error: took too much\")\n\t}\n\tf.n -= n\n\tif f.conn != nil {\n\t\tf.conn.n -= n\n\t}\n}\n\n// add adds n bytes (positive or negative) to the flow control window.\n// It returns false if the sum would exceed 2^31-1.\nfunc (f *flow) add(n int32) bool {\n\tremain := (1<<31 - 1) - f.n\n\tif n > remain {\n\t\treturn false\n\t}\n\tf.n += n\n\treturn true\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/flow_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport \"testing\"\n\nfunc TestFlow(t *testing.T) {\n\tvar st flow\n\tvar conn flow\n\tst.add(3)\n\tconn.add(2)\n\n\tif got, want := st.available(), int32(3); got != want {\n\t\tt.Errorf(\"available = %d; want %d\", got, want)\n\t}\n\tst.setConnFlow(&conn)\n\tif got, want := st.available(), int32(2); got != want {\n\t\tt.Errorf(\"after parent setup, available = %d; want %d\", got, want)\n\t}\n\n\tst.take(2)\n\tif got, want := conn.available(), int32(0); got != want {\n\t\tt.Errorf(\"after taking 2, conn = %d; want %d\", got, want)\n\t}\n\tif got, want := st.available(), int32(0); got != want {\n\t\tt.Errorf(\"after taking 2, stream = %d; want %d\", got, want)\n\t}\n}\n\nfunc TestFlowAdd(t *testing.T) {\n\tvar f flow\n\tif !f.add(1) {\n\t\tt.Fatal(\"failed to add 1\")\n\t}\n\tif !f.add(-1) {\n\t\tt.Fatal(\"failed to add -1\")\n\t}\n\tif got, want := f.available(), int32(0); got != want {\n\t\tt.Fatalf(\"size = %d; want %d\", got, want)\n\t}\n\tif !f.add(1<<31 - 1) {\n\t\tt.Fatal(\"failed to add 2^31-1\")\n\t}\n\tif got, want := f.available(), int32(1<<31-1); got != want {\n\t\tt.Fatalf(\"size = %d; want %d\", got, want)\n\t}\n\tif f.add(1) {\n\t\tt.Fatal(\"adding 1 to max shouldn't be allowed\")\n\t}\n\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/frame.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"golang.org/x/net/http2/hpack\"\n\t\"golang.org/x/net/lex/httplex\"\n)\n\nconst frameHeaderLen = 9\n\nvar padZeros = make([]byte, 255) // zeros for padding\n\n// A FrameType is a registered frame type as defined in\n// http://http2.github.io/http2-spec/#rfc.section.11.2\ntype FrameType uint8\n\nconst (\n\tFrameData         FrameType = 0x0\n\tFrameHeaders      FrameType = 0x1\n\tFramePriority     FrameType = 0x2\n\tFrameRSTStream    FrameType = 0x3\n\tFrameSettings     FrameType = 0x4\n\tFramePushPromise  FrameType = 0x5\n\tFramePing         FrameType = 0x6\n\tFrameGoAway       FrameType = 0x7\n\tFrameWindowUpdate FrameType = 0x8\n\tFrameContinuation FrameType = 0x9\n)\n\nvar frameName = map[FrameType]string{\n\tFrameData:         \"DATA\",\n\tFrameHeaders:      \"HEADERS\",\n\tFramePriority:     \"PRIORITY\",\n\tFrameRSTStream:    \"RST_STREAM\",\n\tFrameSettings:     \"SETTINGS\",\n\tFramePushPromise:  \"PUSH_PROMISE\",\n\tFramePing:         \"PING\",\n\tFrameGoAway:       \"GOAWAY\",\n\tFrameWindowUpdate: \"WINDOW_UPDATE\",\n\tFrameContinuation: \"CONTINUATION\",\n}\n\nfunc (t FrameType) String() string {\n\tif s, ok := frameName[t]; ok {\n\t\treturn s\n\t}\n\treturn fmt.Sprintf(\"UNKNOWN_FRAME_TYPE_%d\", uint8(t))\n}\n\n// Flags is a bitmask of HTTP/2 flags.\n// The meaning of flags varies depending on the frame type.\ntype Flags uint8\n\n// Has reports whether f contains all (0 or more) flags in v.\nfunc (f Flags) Has(v Flags) bool {\n\treturn (f & v) == v\n}\n\n// Frame-specific FrameHeader flag bits.\nconst (\n\t// Data Frame\n\tFlagDataEndStream Flags = 0x1\n\tFlagDataPadded    Flags = 0x8\n\n\t// Headers Frame\n\tFlagHeadersEndStream  Flags = 0x1\n\tFlagHeadersEndHeaders Flags = 0x4\n\tFlagHeadersPadded     Flags = 0x8\n\tFlagHeadersPriority   Flags = 0x20\n\n\t// Settings Frame\n\tFlagSettingsAck Flags = 0x1\n\n\t// Ping Frame\n\tFlagPingAck Flags = 0x1\n\n\t// Continuation Frame\n\tFlagContinuationEndHeaders Flags = 0x4\n\n\tFlagPushPromiseEndHeaders Flags = 0x4\n\tFlagPushPromisePadded     Flags = 0x8\n)\n\nvar flagName = map[FrameType]map[Flags]string{\n\tFrameData: {\n\t\tFlagDataEndStream: \"END_STREAM\",\n\t\tFlagDataPadded:    \"PADDED\",\n\t},\n\tFrameHeaders: {\n\t\tFlagHeadersEndStream:  \"END_STREAM\",\n\t\tFlagHeadersEndHeaders: \"END_HEADERS\",\n\t\tFlagHeadersPadded:     \"PADDED\",\n\t\tFlagHeadersPriority:   \"PRIORITY\",\n\t},\n\tFrameSettings: {\n\t\tFlagSettingsAck: \"ACK\",\n\t},\n\tFramePing: {\n\t\tFlagPingAck: \"ACK\",\n\t},\n\tFrameContinuation: {\n\t\tFlagContinuationEndHeaders: \"END_HEADERS\",\n\t},\n\tFramePushPromise: {\n\t\tFlagPushPromiseEndHeaders: \"END_HEADERS\",\n\t\tFlagPushPromisePadded:     \"PADDED\",\n\t},\n}\n\n// a frameParser parses a frame given its FrameHeader and payload\n// bytes. The length of payload will always equal fh.Length (which\n// might be 0).\ntype frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error)\n\nvar frameParsers = map[FrameType]frameParser{\n\tFrameData:         parseDataFrame,\n\tFrameHeaders:      parseHeadersFrame,\n\tFramePriority:     parsePriorityFrame,\n\tFrameRSTStream:    parseRSTStreamFrame,\n\tFrameSettings:     parseSettingsFrame,\n\tFramePushPromise:  parsePushPromise,\n\tFramePing:         parsePingFrame,\n\tFrameGoAway:       parseGoAwayFrame,\n\tFrameWindowUpdate: parseWindowUpdateFrame,\n\tFrameContinuation: parseContinuationFrame,\n}\n\nfunc typeFrameParser(t FrameType) frameParser {\n\tif f := frameParsers[t]; f != nil {\n\t\treturn f\n\t}\n\treturn parseUnknownFrame\n}\n\n// A FrameHeader is the 9 byte header of all HTTP/2 frames.\n//\n// See http://http2.github.io/http2-spec/#FrameHeader\ntype FrameHeader struct {\n\tvalid bool // caller can access []byte fields in the Frame\n\n\t// Type is the 1 byte frame type. There are ten standard frame\n\t// types, but extension frame types may be written by WriteRawFrame\n\t// and will be returned by ReadFrame (as UnknownFrame).\n\tType FrameType\n\n\t// Flags are the 1 byte of 8 potential bit flags per frame.\n\t// They are specific to the frame type.\n\tFlags Flags\n\n\t// Length is the length of the frame, not including the 9 byte header.\n\t// The maximum size is one byte less than 16MB (uint24), but only\n\t// frames up to 16KB are allowed without peer agreement.\n\tLength uint32\n\n\t// StreamID is which stream this frame is for. Certain frames\n\t// are not stream-specific, in which case this field is 0.\n\tStreamID uint32\n}\n\n// Header returns h. It exists so FrameHeaders can be embedded in other\n// specific frame types and implement the Frame interface.\nfunc (h FrameHeader) Header() FrameHeader { return h }\n\nfunc (h FrameHeader) String() string {\n\tvar buf bytes.Buffer\n\tbuf.WriteString(\"[FrameHeader \")\n\th.writeDebug(&buf)\n\tbuf.WriteByte(']')\n\treturn buf.String()\n}\n\nfunc (h FrameHeader) writeDebug(buf *bytes.Buffer) {\n\tbuf.WriteString(h.Type.String())\n\tif h.Flags != 0 {\n\t\tbuf.WriteString(\" flags=\")\n\t\tset := 0\n\t\tfor i := uint8(0); i < 8; i++ {\n\t\t\tif h.Flags&(1<<i) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tset++\n\t\t\tif set > 1 {\n\t\t\t\tbuf.WriteByte('|')\n\t\t\t}\n\t\t\tname := flagName[h.Type][Flags(1<<i)]\n\t\t\tif name != \"\" {\n\t\t\t\tbuf.WriteString(name)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(buf, \"0x%x\", 1<<i)\n\t\t\t}\n\t\t}\n\t}\n\tif h.StreamID != 0 {\n\t\tfmt.Fprintf(buf, \" stream=%d\", h.StreamID)\n\t}\n\tfmt.Fprintf(buf, \" len=%d\", h.Length)\n}\n\nfunc (h *FrameHeader) checkValid() {\n\tif !h.valid {\n\t\tpanic(\"Frame accessor called on non-owned Frame\")\n\t}\n}\n\nfunc (h *FrameHeader) invalidate() { h.valid = false }\n\n// frame header bytes.\n// Used only by ReadFrameHeader.\nvar fhBytes = sync.Pool{\n\tNew: func() interface{} {\n\t\tbuf := make([]byte, frameHeaderLen)\n\t\treturn &buf\n\t},\n}\n\n// ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.\n// Most users should use Framer.ReadFrame instead.\nfunc ReadFrameHeader(r io.Reader) (FrameHeader, error) {\n\tbufp := fhBytes.Get().(*[]byte)\n\tdefer fhBytes.Put(bufp)\n\treturn readFrameHeader(*bufp, r)\n}\n\nfunc readFrameHeader(buf []byte, r io.Reader) (FrameHeader, error) {\n\t_, err := io.ReadFull(r, buf[:frameHeaderLen])\n\tif err != nil {\n\t\treturn FrameHeader{}, err\n\t}\n\treturn FrameHeader{\n\t\tLength:   (uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])),\n\t\tType:     FrameType(buf[3]),\n\t\tFlags:    Flags(buf[4]),\n\t\tStreamID: binary.BigEndian.Uint32(buf[5:]) & (1<<31 - 1),\n\t\tvalid:    true,\n\t}, nil\n}\n\n// A Frame is the base interface implemented by all frame types.\n// Callers will generally type-assert the specific frame type:\n// *HeadersFrame, *SettingsFrame, *WindowUpdateFrame, etc.\n//\n// Frames are only valid until the next call to Framer.ReadFrame.\ntype Frame interface {\n\tHeader() FrameHeader\n\n\t// invalidate is called by Framer.ReadFrame to make this\n\t// frame's buffers as being invalid, since the subsequent\n\t// frame will reuse them.\n\tinvalidate()\n}\n\n// A Framer reads and writes Frames.\ntype Framer struct {\n\tr         io.Reader\n\tlastFrame Frame\n\terrDetail error\n\n\t// lastHeaderStream is non-zero if the last frame was an\n\t// unfinished HEADERS/CONTINUATION.\n\tlastHeaderStream uint32\n\n\tmaxReadSize uint32\n\theaderBuf   [frameHeaderLen]byte\n\n\t// TODO: let getReadBuf be configurable, and use a less memory-pinning\n\t// allocator in server.go to minimize memory pinned for many idle conns.\n\t// Will probably also need to make frame invalidation have a hook too.\n\tgetReadBuf func(size uint32) []byte\n\treadBuf    []byte // cache for default getReadBuf\n\n\tmaxWriteSize uint32 // zero means unlimited; TODO: implement\n\n\tw    io.Writer\n\twbuf []byte\n\n\t// AllowIllegalWrites permits the Framer's Write methods to\n\t// write frames that do not conform to the HTTP/2 spec. This\n\t// permits using the Framer to test other HTTP/2\n\t// implementations' conformance to the spec.\n\t// If false, the Write methods will prefer to return an error\n\t// rather than comply.\n\tAllowIllegalWrites bool\n\n\t// AllowIllegalReads permits the Framer's ReadFrame method\n\t// to return non-compliant frames or frame orders.\n\t// This is for testing and permits using the Framer to test\n\t// other HTTP/2 implementations' conformance to the spec.\n\t// It is not compatible with ReadMetaHeaders.\n\tAllowIllegalReads bool\n\n\t// ReadMetaHeaders if non-nil causes ReadFrame to merge\n\t// HEADERS and CONTINUATION frames together and return\n\t// MetaHeadersFrame instead.\n\tReadMetaHeaders *hpack.Decoder\n\n\t// MaxHeaderListSize is the http2 MAX_HEADER_LIST_SIZE.\n\t// It's used only if ReadMetaHeaders is set; 0 means a sane default\n\t// (currently 16MB)\n\t// If the limit is hit, MetaHeadersFrame.Truncated is set true.\n\tMaxHeaderListSize uint32\n\n\t// TODO: track which type of frame & with which flags was sent\n\t// last. Then return an error (unless AllowIllegalWrites) if\n\t// we're in the middle of a header block and a\n\t// non-Continuation or Continuation on a different stream is\n\t// attempted to be written.\n\n\tlogReads, logWrites bool\n\n\tdebugFramer       *Framer // only use for logging written writes\n\tdebugFramerBuf    *bytes.Buffer\n\tdebugReadLoggerf  func(string, ...interface{})\n\tdebugWriteLoggerf func(string, ...interface{})\n\n\tframeCache *frameCache // nil if frames aren't reused (default)\n}\n\nfunc (fr *Framer) maxHeaderListSize() uint32 {\n\tif fr.MaxHeaderListSize == 0 {\n\t\treturn 16 << 20 // sane default, per docs\n\t}\n\treturn fr.MaxHeaderListSize\n}\n\nfunc (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {\n\t// Write the FrameHeader.\n\tf.wbuf = append(f.wbuf[:0],\n\t\t0, // 3 bytes of length, filled in in endWrite\n\t\t0,\n\t\t0,\n\t\tbyte(ftype),\n\t\tbyte(flags),\n\t\tbyte(streamID>>24),\n\t\tbyte(streamID>>16),\n\t\tbyte(streamID>>8),\n\t\tbyte(streamID))\n}\n\nfunc (f *Framer) endWrite() error {\n\t// Now that we know the final size, fill in the FrameHeader in\n\t// the space previously reserved for it. Abuse append.\n\tlength := len(f.wbuf) - frameHeaderLen\n\tif length >= (1 << 24) {\n\t\treturn ErrFrameTooLarge\n\t}\n\t_ = append(f.wbuf[:0],\n\t\tbyte(length>>16),\n\t\tbyte(length>>8),\n\t\tbyte(length))\n\tif f.logWrites {\n\t\tf.logWrite()\n\t}\n\n\tn, err := f.w.Write(f.wbuf)\n\tif err == nil && n != len(f.wbuf) {\n\t\terr = io.ErrShortWrite\n\t}\n\treturn err\n}\n\nfunc (f *Framer) logWrite() {\n\tif f.debugFramer == nil {\n\t\tf.debugFramerBuf = new(bytes.Buffer)\n\t\tf.debugFramer = NewFramer(nil, f.debugFramerBuf)\n\t\tf.debugFramer.logReads = false // we log it ourselves, saying \"wrote\" below\n\t\t// Let us read anything, even if we accidentally wrote it\n\t\t// in the wrong order:\n\t\tf.debugFramer.AllowIllegalReads = true\n\t}\n\tf.debugFramerBuf.Write(f.wbuf)\n\tfr, err := f.debugFramer.ReadFrame()\n\tif err != nil {\n\t\tf.debugWriteLoggerf(\"http2: Framer %p: failed to decode just-written frame\", f)\n\t\treturn\n\t}\n\tf.debugWriteLoggerf(\"http2: Framer %p: wrote %v\", f, summarizeFrame(fr))\n}\n\nfunc (f *Framer) writeByte(v byte)     { f.wbuf = append(f.wbuf, v) }\nfunc (f *Framer) writeBytes(v []byte)  { f.wbuf = append(f.wbuf, v...) }\nfunc (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) }\nfunc (f *Framer) writeUint32(v uint32) {\n\tf.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))\n}\n\nconst (\n\tminMaxFrameSize = 1 << 14\n\tmaxFrameSize    = 1<<24 - 1\n)\n\n// SetReuseFrames allows the Framer to reuse Frames.\n// If called on a Framer, Frames returned by calls to ReadFrame are only\n// valid until the next call to ReadFrame.\nfunc (fr *Framer) SetReuseFrames() {\n\tif fr.frameCache != nil {\n\t\treturn\n\t}\n\tfr.frameCache = &frameCache{}\n}\n\ntype frameCache struct {\n\tdataFrame DataFrame\n}\n\nfunc (fc *frameCache) getDataFrame() *DataFrame {\n\tif fc == nil {\n\t\treturn &DataFrame{}\n\t}\n\treturn &fc.dataFrame\n}\n\n// NewFramer returns a Framer that writes frames to w and reads them from r.\nfunc NewFramer(w io.Writer, r io.Reader) *Framer {\n\tfr := &Framer{\n\t\tw:                 w,\n\t\tr:                 r,\n\t\tlogReads:          logFrameReads,\n\t\tlogWrites:         logFrameWrites,\n\t\tdebugReadLoggerf:  log.Printf,\n\t\tdebugWriteLoggerf: log.Printf,\n\t}\n\tfr.getReadBuf = func(size uint32) []byte {\n\t\tif cap(fr.readBuf) >= int(size) {\n\t\t\treturn fr.readBuf[:size]\n\t\t}\n\t\tfr.readBuf = make([]byte, size)\n\t\treturn fr.readBuf\n\t}\n\tfr.SetMaxReadFrameSize(maxFrameSize)\n\treturn fr\n}\n\n// SetMaxReadFrameSize sets the maximum size of a frame\n// that will be read by a subsequent call to ReadFrame.\n// It is the caller's responsibility to advertise this\n// limit with a SETTINGS frame.\nfunc (fr *Framer) SetMaxReadFrameSize(v uint32) {\n\tif v > maxFrameSize {\n\t\tv = maxFrameSize\n\t}\n\tfr.maxReadSize = v\n}\n\n// ErrorDetail returns a more detailed error of the last error\n// returned by Framer.ReadFrame. For instance, if ReadFrame\n// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail\n// will say exactly what was invalid. ErrorDetail is not guaranteed\n// to return a non-nil value and like the rest of the http2 package,\n// its return value is not protected by an API compatibility promise.\n// ErrorDetail is reset after the next call to ReadFrame.\nfunc (fr *Framer) ErrorDetail() error {\n\treturn fr.errDetail\n}\n\n// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer\n// sends a frame that is larger than declared with SetMaxReadFrameSize.\nvar ErrFrameTooLarge = errors.New(\"http2: frame too large\")\n\n// terminalReadFrameError reports whether err is an unrecoverable\n// error from ReadFrame and no other frames should be read.\nfunc terminalReadFrameError(err error) bool {\n\tif _, ok := err.(StreamError); ok {\n\t\treturn false\n\t}\n\treturn err != nil\n}\n\n// ReadFrame reads a single frame. The returned Frame is only valid\n// until the next call to ReadFrame.\n//\n// If the frame is larger than previously set with SetMaxReadFrameSize, the\n// returned error is ErrFrameTooLarge. Other errors may be of type\n// ConnectionError, StreamError, or anything else from the underlying\n// reader.\nfunc (fr *Framer) ReadFrame() (Frame, error) {\n\tfr.errDetail = nil\n\tif fr.lastFrame != nil {\n\t\tfr.lastFrame.invalidate()\n\t}\n\tfh, err := readFrameHeader(fr.headerBuf[:], fr.r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif fh.Length > fr.maxReadSize {\n\t\treturn nil, ErrFrameTooLarge\n\t}\n\tpayload := fr.getReadBuf(fh.Length)\n\tif _, err := io.ReadFull(fr.r, payload); err != nil {\n\t\treturn nil, err\n\t}\n\tf, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload)\n\tif err != nil {\n\t\tif ce, ok := err.(connError); ok {\n\t\t\treturn nil, fr.connError(ce.Code, ce.Reason)\n\t\t}\n\t\treturn nil, err\n\t}\n\tif err := fr.checkFrameOrder(f); err != nil {\n\t\treturn nil, err\n\t}\n\tif fr.logReads {\n\t\tfr.debugReadLoggerf(\"http2: Framer %p: read %v\", fr, summarizeFrame(f))\n\t}\n\tif fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil {\n\t\treturn fr.readMetaFrame(f.(*HeadersFrame))\n\t}\n\treturn f, nil\n}\n\n// connError returns ConnectionError(code) but first\n// stashes away a public reason to the caller can optionally relay it\n// to the peer before hanging up on them. This might help others debug\n// their implementations.\nfunc (fr *Framer) connError(code ErrCode, reason string) error {\n\tfr.errDetail = errors.New(reason)\n\treturn ConnectionError(code)\n}\n\n// checkFrameOrder reports an error if f is an invalid frame to return\n// next from ReadFrame. Mostly it checks whether HEADERS and\n// CONTINUATION frames are contiguous.\nfunc (fr *Framer) checkFrameOrder(f Frame) error {\n\tlast := fr.lastFrame\n\tfr.lastFrame = f\n\tif fr.AllowIllegalReads {\n\t\treturn nil\n\t}\n\n\tfh := f.Header()\n\tif fr.lastHeaderStream != 0 {\n\t\tif fh.Type != FrameContinuation {\n\t\t\treturn fr.connError(ErrCodeProtocol,\n\t\t\t\tfmt.Sprintf(\"got %s for stream %d; expected CONTINUATION following %s for stream %d\",\n\t\t\t\t\tfh.Type, fh.StreamID,\n\t\t\t\t\tlast.Header().Type, fr.lastHeaderStream))\n\t\t}\n\t\tif fh.StreamID != fr.lastHeaderStream {\n\t\t\treturn fr.connError(ErrCodeProtocol,\n\t\t\t\tfmt.Sprintf(\"got CONTINUATION for stream %d; expected stream %d\",\n\t\t\t\t\tfh.StreamID, fr.lastHeaderStream))\n\t\t}\n\t} else if fh.Type == FrameContinuation {\n\t\treturn fr.connError(ErrCodeProtocol, fmt.Sprintf(\"unexpected CONTINUATION for stream %d\", fh.StreamID))\n\t}\n\n\tswitch fh.Type {\n\tcase FrameHeaders, FrameContinuation:\n\t\tif fh.Flags.Has(FlagHeadersEndHeaders) {\n\t\t\tfr.lastHeaderStream = 0\n\t\t} else {\n\t\t\tfr.lastHeaderStream = fh.StreamID\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// A DataFrame conveys arbitrary, variable-length sequences of octets\n// associated with a stream.\n// See http://http2.github.io/http2-spec/#rfc.section.6.1\ntype DataFrame struct {\n\tFrameHeader\n\tdata []byte\n}\n\nfunc (f *DataFrame) StreamEnded() bool {\n\treturn f.FrameHeader.Flags.Has(FlagDataEndStream)\n}\n\n// Data returns the frame's data octets, not including any padding\n// size byte or padding suffix bytes.\n// The caller must not retain the returned memory past the next\n// call to ReadFrame.\nfunc (f *DataFrame) Data() []byte {\n\tf.checkValid()\n\treturn f.data\n}\n\nfunc parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) {\n\tif fh.StreamID == 0 {\n\t\t// DATA frames MUST be associated with a stream. If a\n\t\t// DATA frame is received whose stream identifier\n\t\t// field is 0x0, the recipient MUST respond with a\n\t\t// connection error (Section 5.4.1) of type\n\t\t// PROTOCOL_ERROR.\n\t\treturn nil, connError{ErrCodeProtocol, \"DATA frame with stream ID 0\"}\n\t}\n\tf := fc.getDataFrame()\n\tf.FrameHeader = fh\n\n\tvar padSize byte\n\tif fh.Flags.Has(FlagDataPadded) {\n\t\tvar err error\n\t\tpayload, padSize, err = readByte(payload)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif int(padSize) > len(payload) {\n\t\t// If the length of the padding is greater than the\n\t\t// length of the frame payload, the recipient MUST\n\t\t// treat this as a connection error.\n\t\t// Filed: https://github.com/http2/http2-spec/issues/610\n\t\treturn nil, connError{ErrCodeProtocol, \"pad size larger than data payload\"}\n\t}\n\tf.data = payload[:len(payload)-int(padSize)]\n\treturn f, nil\n}\n\nvar (\n\terrStreamID    = errors.New(\"invalid stream ID\")\n\terrDepStreamID = errors.New(\"invalid dependent stream ID\")\n\terrPadLength   = errors.New(\"pad length too large\")\n\terrPadBytes    = errors.New(\"padding bytes must all be zeros unless AllowIllegalWrites is enabled\")\n)\n\nfunc validStreamIDOrZero(streamID uint32) bool {\n\treturn streamID&(1<<31) == 0\n}\n\nfunc validStreamID(streamID uint32) bool {\n\treturn streamID != 0 && streamID&(1<<31) == 0\n}\n\n// WriteData writes a DATA frame.\n//\n// It will perform exactly one Write to the underlying Writer.\n// It is the caller's responsibility not to violate the maximum frame size\n// and to not call other Write methods concurrently.\nfunc (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {\n\treturn f.WriteDataPadded(streamID, endStream, data, nil)\n}\n\n// WriteData writes a DATA frame with optional padding.\n//\n// If pad is nil, the padding bit is not sent.\n// The length of pad must not exceed 255 bytes.\n// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set.\n//\n// It will perform exactly one Write to the underlying Writer.\n// It is the caller's responsibility not to violate the maximum frame size\n// and to not call other Write methods concurrently.\nfunc (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {\n\tif !validStreamID(streamID) && !f.AllowIllegalWrites {\n\t\treturn errStreamID\n\t}\n\tif len(pad) > 0 {\n\t\tif len(pad) > 255 {\n\t\t\treturn errPadLength\n\t\t}\n\t\tif !f.AllowIllegalWrites {\n\t\t\tfor _, b := range pad {\n\t\t\t\tif b != 0 {\n\t\t\t\t\t// \"Padding octets MUST be set to zero when sending.\"\n\t\t\t\t\treturn errPadBytes\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tvar flags Flags\n\tif endStream {\n\t\tflags |= FlagDataEndStream\n\t}\n\tif pad != nil {\n\t\tflags |= FlagDataPadded\n\t}\n\tf.startWrite(FrameData, flags, streamID)\n\tif pad != nil {\n\t\tf.wbuf = append(f.wbuf, byte(len(pad)))\n\t}\n\tf.wbuf = append(f.wbuf, data...)\n\tf.wbuf = append(f.wbuf, pad...)\n\treturn f.endWrite()\n}\n\n// A SettingsFrame conveys configuration parameters that affect how\n// endpoints communicate, such as preferences and constraints on peer\n// behavior.\n//\n// See http://http2.github.io/http2-spec/#SETTINGS\ntype SettingsFrame struct {\n\tFrameHeader\n\tp []byte\n}\n\nfunc parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {\n\tif fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {\n\t\t// When this (ACK 0x1) bit is set, the payload of the\n\t\t// SETTINGS frame MUST be empty. Receipt of a\n\t\t// SETTINGS frame with the ACK flag set and a length\n\t\t// field value other than 0 MUST be treated as a\n\t\t// connection error (Section 5.4.1) of type\n\t\t// FRAME_SIZE_ERROR.\n\t\treturn nil, ConnectionError(ErrCodeFrameSize)\n\t}\n\tif fh.StreamID != 0 {\n\t\t// SETTINGS frames always apply to a connection,\n\t\t// never a single stream. The stream identifier for a\n\t\t// SETTINGS frame MUST be zero (0x0).  If an endpoint\n\t\t// receives a SETTINGS frame whose stream identifier\n\t\t// field is anything other than 0x0, the endpoint MUST\n\t\t// respond with a connection error (Section 5.4.1) of\n\t\t// type PROTOCOL_ERROR.\n\t\treturn nil, ConnectionError(ErrCodeProtocol)\n\t}\n\tif len(p)%6 != 0 {\n\t\t// Expecting even number of 6 byte settings.\n\t\treturn nil, ConnectionError(ErrCodeFrameSize)\n\t}\n\tf := &SettingsFrame{FrameHeader: fh, p: p}\n\tif v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 {\n\t\t// Values above the maximum flow control window size of 2^31 - 1 MUST\n\t\t// be treated as a connection error (Section 5.4.1) of type\n\t\t// FLOW_CONTROL_ERROR.\n\t\treturn nil, ConnectionError(ErrCodeFlowControl)\n\t}\n\treturn f, nil\n}\n\nfunc (f *SettingsFrame) IsAck() bool {\n\treturn f.FrameHeader.Flags.Has(FlagSettingsAck)\n}\n\nfunc (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) {\n\tf.checkValid()\n\tbuf := f.p\n\tfor len(buf) > 0 {\n\t\tsettingID := SettingID(binary.BigEndian.Uint16(buf[:2]))\n\t\tif settingID == s {\n\t\t\treturn binary.BigEndian.Uint32(buf[2:6]), true\n\t\t}\n\t\tbuf = buf[6:]\n\t}\n\treturn 0, false\n}\n\n// ForeachSetting runs fn for each setting.\n// It stops and returns the first error.\nfunc (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error {\n\tf.checkValid()\n\tbuf := f.p\n\tfor len(buf) > 0 {\n\t\tif err := fn(Setting{\n\t\t\tSettingID(binary.BigEndian.Uint16(buf[:2])),\n\t\t\tbinary.BigEndian.Uint32(buf[2:6]),\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbuf = buf[6:]\n\t}\n\treturn nil\n}\n\n// WriteSettings writes a SETTINGS frame with zero or more settings\n// specified and the ACK bit not set.\n//\n// It will perform exactly one Write to the underlying Writer.\n// It is the caller's responsibility to not call other Write methods concurrently.\nfunc (f *Framer) WriteSettings(settings ...Setting) error {\n\tf.startWrite(FrameSettings, 0, 0)\n\tfor _, s := range settings {\n\t\tf.writeUint16(uint16(s.ID))\n\t\tf.writeUint32(s.Val)\n\t}\n\treturn f.endWrite()\n}\n\n// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set.\n//\n// It will perform exactly one Write to the underlying Writer.\n// It is the caller's responsibility to not call other Write methods concurrently.\nfunc (f *Framer) WriteSettingsAck() error {\n\tf.startWrite(FrameSettings, FlagSettingsAck, 0)\n\treturn f.endWrite()\n}\n\n// A PingFrame is a mechanism for measuring a minimal round trip time\n// from the sender, as well as determining whether an idle connection\n// is still functional.\n// See http://http2.github.io/http2-spec/#rfc.section.6.7\ntype PingFrame struct {\n\tFrameHeader\n\tData [8]byte\n}\n\nfunc (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }\n\nfunc parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {\n\tif len(payload) != 8 {\n\t\treturn nil, ConnectionError(ErrCodeFrameSize)\n\t}\n\tif fh.StreamID != 0 {\n\t\treturn nil, ConnectionError(ErrCodeProtocol)\n\t}\n\tf := &PingFrame{FrameHeader: fh}\n\tcopy(f.Data[:], payload)\n\treturn f, nil\n}\n\nfunc (f *Framer) WritePing(ack bool, data [8]byte) error {\n\tvar flags Flags\n\tif ack {\n\t\tflags = FlagPingAck\n\t}\n\tf.startWrite(FramePing, flags, 0)\n\tf.writeBytes(data[:])\n\treturn f.endWrite()\n}\n\n// A GoAwayFrame informs the remote peer to stop creating streams on this connection.\n// See http://http2.github.io/http2-spec/#rfc.section.6.8\ntype GoAwayFrame struct {\n\tFrameHeader\n\tLastStreamID uint32\n\tErrCode      ErrCode\n\tdebugData    []byte\n}\n\n// DebugData returns any debug data in the GOAWAY frame. Its contents\n// are not defined.\n// The caller must not retain the returned memory past the next\n// call to ReadFrame.\nfunc (f *GoAwayFrame) DebugData() []byte {\n\tf.checkValid()\n\treturn f.debugData\n}\n\nfunc parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {\n\tif fh.StreamID != 0 {\n\t\treturn nil, ConnectionError(ErrCodeProtocol)\n\t}\n\tif len(p) < 8 {\n\t\treturn nil, ConnectionError(ErrCodeFrameSize)\n\t}\n\treturn &GoAwayFrame{\n\t\tFrameHeader:  fh,\n\t\tLastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1),\n\t\tErrCode:      ErrCode(binary.BigEndian.Uint32(p[4:8])),\n\t\tdebugData:    p[8:],\n\t}, nil\n}\n\nfunc (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error {\n\tf.startWrite(FrameGoAway, 0, 0)\n\tf.writeUint32(maxStreamID & (1<<31 - 1))\n\tf.writeUint32(uint32(code))\n\tf.writeBytes(debugData)\n\treturn f.endWrite()\n}\n\n// An UnknownFrame is the frame type returned when the frame type is unknown\n// or no specific frame type parser exists.\ntype UnknownFrame struct {\n\tFrameHeader\n\tp []byte\n}\n\n// Payload returns the frame's payload (after the header).  It is not\n// valid to call this method after a subsequent call to\n// Framer.ReadFrame, nor is it valid to retain the returned slice.\n// The memory is owned by the Framer and is invalidated when the next\n// frame is read.\nfunc (f *UnknownFrame) Payload() []byte {\n\tf.checkValid()\n\treturn f.p\n}\n\nfunc parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {\n\treturn &UnknownFrame{fh, p}, nil\n}\n\n// A WindowUpdateFrame is used to implement flow control.\n// See http://http2.github.io/http2-spec/#rfc.section.6.9\ntype WindowUpdateFrame struct {\n\tFrameHeader\n\tIncrement uint32 // never read with high bit set\n}\n\nfunc parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {\n\tif len(p) != 4 {\n\t\treturn nil, ConnectionError(ErrCodeFrameSize)\n\t}\n\tinc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit\n\tif inc == 0 {\n\t\t// A receiver MUST treat the receipt of a\n\t\t// WINDOW_UPDATE frame with an flow control window\n\t\t// increment of 0 as a stream error (Section 5.4.2) of\n\t\t// type PROTOCOL_ERROR; errors on the connection flow\n\t\t// control window MUST be treated as a connection\n\t\t// error (Section 5.4.1).\n\t\tif fh.StreamID == 0 {\n\t\t\treturn nil, ConnectionError(ErrCodeProtocol)\n\t\t}\n\t\treturn nil, streamError(fh.StreamID, ErrCodeProtocol)\n\t}\n\treturn &WindowUpdateFrame{\n\t\tFrameHeader: fh,\n\t\tIncrement:   inc,\n\t}, nil\n}\n\n// WriteWindowUpdate writes a WINDOW_UPDATE frame.\n// The increment value must be between 1 and 2,147,483,647, inclusive.\n// If the Stream ID is zero, the window update applies to the\n// connection as a whole.\nfunc (f *Framer) WriteWindowUpdate(streamID, incr uint32) error {\n\t// \"The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets.\"\n\tif (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites {\n\t\treturn errors.New(\"illegal window increment value\")\n\t}\n\tf.startWrite(FrameWindowUpdate, 0, streamID)\n\tf.writeUint32(incr)\n\treturn f.endWrite()\n}\n\n// A HeadersFrame is used to open a stream and additionally carries a\n// header block fragment.\ntype HeadersFrame struct {\n\tFrameHeader\n\n\t// Priority is set if FlagHeadersPriority is set in the FrameHeader.\n\tPriority PriorityParam\n\n\theaderFragBuf []byte // not owned\n}\n\nfunc (f *HeadersFrame) HeaderBlockFragment() []byte {\n\tf.checkValid()\n\treturn f.headerFragBuf\n}\n\nfunc (f *HeadersFrame) HeadersEnded() bool {\n\treturn f.FrameHeader.Flags.Has(FlagHeadersEndHeaders)\n}\n\nfunc (f *HeadersFrame) StreamEnded() bool {\n\treturn f.FrameHeader.Flags.Has(FlagHeadersEndStream)\n}\n\nfunc (f *HeadersFrame) HasPriority() bool {\n\treturn f.FrameHeader.Flags.Has(FlagHeadersPriority)\n}\n\nfunc parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {\n\thf := &HeadersFrame{\n\t\tFrameHeader: fh,\n\t}\n\tif fh.StreamID == 0 {\n\t\t// HEADERS frames MUST be associated with a stream. If a HEADERS frame\n\t\t// is received whose stream identifier field is 0x0, the recipient MUST\n\t\t// respond with a connection error (Section 5.4.1) of type\n\t\t// PROTOCOL_ERROR.\n\t\treturn nil, connError{ErrCodeProtocol, \"HEADERS frame with stream ID 0\"}\n\t}\n\tvar padLength uint8\n\tif fh.Flags.Has(FlagHeadersPadded) {\n\t\tif p, padLength, err = readByte(p); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif fh.Flags.Has(FlagHeadersPriority) {\n\t\tvar v uint32\n\t\tp, v, err = readUint32(p)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thf.Priority.StreamDep = v & 0x7fffffff\n\t\thf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set\n\t\tp, hf.Priority.Weight, err = readByte(p)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif len(p)-int(padLength) <= 0 {\n\t\treturn nil, streamError(fh.StreamID, ErrCodeProtocol)\n\t}\n\thf.headerFragBuf = p[:len(p)-int(padLength)]\n\treturn hf, nil\n}\n\n// HeadersFrameParam are the parameters for writing a HEADERS frame.\ntype HeadersFrameParam struct {\n\t// StreamID is the required Stream ID to initiate.\n\tStreamID uint32\n\t// BlockFragment is part (or all) of a Header Block.\n\tBlockFragment []byte\n\n\t// EndStream indicates that the header block is the last that\n\t// the endpoint will send for the identified stream. Setting\n\t// this flag causes the stream to enter one of \"half closed\"\n\t// states.\n\tEndStream bool\n\n\t// EndHeaders indicates that this frame contains an entire\n\t// header block and is not followed by any\n\t// CONTINUATION frames.\n\tEndHeaders bool\n\n\t// PadLength is the optional number of bytes of zeros to add\n\t// to this frame.\n\tPadLength uint8\n\n\t// Priority, if non-zero, includes stream priority information\n\t// in the HEADER frame.\n\tPriority PriorityParam\n}\n\n// WriteHeaders writes a single HEADERS frame.\n//\n// This is a low-level header writing method. Encoding headers and\n// splitting them into any necessary CONTINUATION frames is handled\n// elsewhere.\n//\n// It will perform exactly one Write to the underlying Writer.\n// It is the caller's responsibility to not call other Write methods concurrently.\nfunc (f *Framer) WriteHeaders(p HeadersFrameParam) error {\n\tif !validStreamID(p.StreamID) && !f.AllowIllegalWrites {\n\t\treturn errStreamID\n\t}\n\tvar flags Flags\n\tif p.PadLength != 0 {\n\t\tflags |= FlagHeadersPadded\n\t}\n\tif p.EndStream {\n\t\tflags |= FlagHeadersEndStream\n\t}\n\tif p.EndHeaders {\n\t\tflags |= FlagHeadersEndHeaders\n\t}\n\tif !p.Priority.IsZero() {\n\t\tflags |= FlagHeadersPriority\n\t}\n\tf.startWrite(FrameHeaders, flags, p.StreamID)\n\tif p.PadLength != 0 {\n\t\tf.writeByte(p.PadLength)\n\t}\n\tif !p.Priority.IsZero() {\n\t\tv := p.Priority.StreamDep\n\t\tif !validStreamIDOrZero(v) && !f.AllowIllegalWrites {\n\t\t\treturn errDepStreamID\n\t\t}\n\t\tif p.Priority.Exclusive {\n\t\t\tv |= 1 << 31\n\t\t}\n\t\tf.writeUint32(v)\n\t\tf.writeByte(p.Priority.Weight)\n\t}\n\tf.wbuf = append(f.wbuf, p.BlockFragment...)\n\tf.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)\n\treturn f.endWrite()\n}\n\n// A PriorityFrame specifies the sender-advised priority of a stream.\n// See http://http2.github.io/http2-spec/#rfc.section.6.3\ntype PriorityFrame struct {\n\tFrameHeader\n\tPriorityParam\n}\n\n// PriorityParam are the stream prioritzation parameters.\ntype PriorityParam struct {\n\t// StreamDep is a 31-bit stream identifier for the\n\t// stream that this stream depends on. Zero means no\n\t// dependency.\n\tStreamDep uint32\n\n\t// Exclusive is whether the dependency is exclusive.\n\tExclusive bool\n\n\t// Weight is the stream's zero-indexed weight. It should be\n\t// set together with StreamDep, or neither should be set. Per\n\t// the spec, \"Add one to the value to obtain a weight between\n\t// 1 and 256.\"\n\tWeight uint8\n}\n\nfunc (p PriorityParam) IsZero() bool {\n\treturn p == PriorityParam{}\n}\n\nfunc parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {\n\tif fh.StreamID == 0 {\n\t\treturn nil, connError{ErrCodeProtocol, \"PRIORITY frame with stream ID 0\"}\n\t}\n\tif len(payload) != 5 {\n\t\treturn nil, connError{ErrCodeFrameSize, fmt.Sprintf(\"PRIORITY frame payload size was %d; want 5\", len(payload))}\n\t}\n\tv := binary.BigEndian.Uint32(payload[:4])\n\tstreamID := v & 0x7fffffff // mask off high bit\n\treturn &PriorityFrame{\n\t\tFrameHeader: fh,\n\t\tPriorityParam: PriorityParam{\n\t\t\tWeight:    payload[4],\n\t\t\tStreamDep: streamID,\n\t\t\tExclusive: streamID != v, // was high bit set?\n\t\t},\n\t}, nil\n}\n\n// WritePriority writes a PRIORITY frame.\n//\n// It will perform exactly one Write to the underlying Writer.\n// It is the caller's responsibility to not call other Write methods concurrently.\nfunc (f *Framer) WritePriority(streamID uint32, p PriorityParam) error {\n\tif !validStreamID(streamID) && !f.AllowIllegalWrites {\n\t\treturn errStreamID\n\t}\n\tif !validStreamIDOrZero(p.StreamDep) {\n\t\treturn errDepStreamID\n\t}\n\tf.startWrite(FramePriority, 0, streamID)\n\tv := p.StreamDep\n\tif p.Exclusive {\n\t\tv |= 1 << 31\n\t}\n\tf.writeUint32(v)\n\tf.writeByte(p.Weight)\n\treturn f.endWrite()\n}\n\n// A RSTStreamFrame allows for abnormal termination of a stream.\n// See http://http2.github.io/http2-spec/#rfc.section.6.4\ntype RSTStreamFrame struct {\n\tFrameHeader\n\tErrCode ErrCode\n}\n\nfunc parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {\n\tif len(p) != 4 {\n\t\treturn nil, ConnectionError(ErrCodeFrameSize)\n\t}\n\tif fh.StreamID == 0 {\n\t\treturn nil, ConnectionError(ErrCodeProtocol)\n\t}\n\treturn &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil\n}\n\n// WriteRSTStream writes a RST_STREAM frame.\n//\n// It will perform exactly one Write to the underlying Writer.\n// It is the caller's responsibility to not call other Write methods concurrently.\nfunc (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error {\n\tif !validStreamID(streamID) && !f.AllowIllegalWrites {\n\t\treturn errStreamID\n\t}\n\tf.startWrite(FrameRSTStream, 0, streamID)\n\tf.writeUint32(uint32(code))\n\treturn f.endWrite()\n}\n\n// A ContinuationFrame is used to continue a sequence of header block fragments.\n// See http://http2.github.io/http2-spec/#rfc.section.6.10\ntype ContinuationFrame struct {\n\tFrameHeader\n\theaderFragBuf []byte\n}\n\nfunc parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {\n\tif fh.StreamID == 0 {\n\t\treturn nil, connError{ErrCodeProtocol, \"CONTINUATION frame with stream ID 0\"}\n\t}\n\treturn &ContinuationFrame{fh, p}, nil\n}\n\nfunc (f *ContinuationFrame) HeaderBlockFragment() []byte {\n\tf.checkValid()\n\treturn f.headerFragBuf\n}\n\nfunc (f *ContinuationFrame) HeadersEnded() bool {\n\treturn f.FrameHeader.Flags.Has(FlagContinuationEndHeaders)\n}\n\n// WriteContinuation writes a CONTINUATION frame.\n//\n// It will perform exactly one Write to the underlying Writer.\n// It is the caller's responsibility to not call other Write methods concurrently.\nfunc (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error {\n\tif !validStreamID(streamID) && !f.AllowIllegalWrites {\n\t\treturn errStreamID\n\t}\n\tvar flags Flags\n\tif endHeaders {\n\t\tflags |= FlagContinuationEndHeaders\n\t}\n\tf.startWrite(FrameContinuation, flags, streamID)\n\tf.wbuf = append(f.wbuf, headerBlockFragment...)\n\treturn f.endWrite()\n}\n\n// A PushPromiseFrame is used to initiate a server stream.\n// See http://http2.github.io/http2-spec/#rfc.section.6.6\ntype PushPromiseFrame struct {\n\tFrameHeader\n\tPromiseID     uint32\n\theaderFragBuf []byte // not owned\n}\n\nfunc (f *PushPromiseFrame) HeaderBlockFragment() []byte {\n\tf.checkValid()\n\treturn f.headerFragBuf\n}\n\nfunc (f *PushPromiseFrame) HeadersEnded() bool {\n\treturn f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)\n}\n\nfunc parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {\n\tpp := &PushPromiseFrame{\n\t\tFrameHeader: fh,\n\t}\n\tif pp.StreamID == 0 {\n\t\t// PUSH_PROMISE frames MUST be associated with an existing,\n\t\t// peer-initiated stream. The stream identifier of a\n\t\t// PUSH_PROMISE frame indicates the stream it is associated\n\t\t// with. If the stream identifier field specifies the value\n\t\t// 0x0, a recipient MUST respond with a connection error\n\t\t// (Section 5.4.1) of type PROTOCOL_ERROR.\n\t\treturn nil, ConnectionError(ErrCodeProtocol)\n\t}\n\t// The PUSH_PROMISE frame includes optional padding.\n\t// Padding fields and flags are identical to those defined for DATA frames\n\tvar padLength uint8\n\tif fh.Flags.Has(FlagPushPromisePadded) {\n\t\tif p, padLength, err = readByte(p); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tp, pp.PromiseID, err = readUint32(p)\n\tif err != nil {\n\t\treturn\n\t}\n\tpp.PromiseID = pp.PromiseID & (1<<31 - 1)\n\n\tif int(padLength) > len(p) {\n\t\t// like the DATA frame, error out if padding is longer than the body.\n\t\treturn nil, ConnectionError(ErrCodeProtocol)\n\t}\n\tpp.headerFragBuf = p[:len(p)-int(padLength)]\n\treturn pp, nil\n}\n\n// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame.\ntype PushPromiseParam struct {\n\t// StreamID is the required Stream ID to initiate.\n\tStreamID uint32\n\n\t// PromiseID is the required Stream ID which this\n\t// Push Promises\n\tPromiseID uint32\n\n\t// BlockFragment is part (or all) of a Header Block.\n\tBlockFragment []byte\n\n\t// EndHeaders indicates that this frame contains an entire\n\t// header block and is not followed by any\n\t// CONTINUATION frames.\n\tEndHeaders bool\n\n\t// PadLength is the optional number of bytes of zeros to add\n\t// to this frame.\n\tPadLength uint8\n}\n\n// WritePushPromise writes a single PushPromise Frame.\n//\n// As with Header Frames, This is the low level call for writing\n// individual frames. Continuation frames are handled elsewhere.\n//\n// It will perform exactly one Write to the underlying Writer.\n// It is the caller's responsibility to not call other Write methods concurrently.\nfunc (f *Framer) WritePushPromise(p PushPromiseParam) error {\n\tif !validStreamID(p.StreamID) && !f.AllowIllegalWrites {\n\t\treturn errStreamID\n\t}\n\tvar flags Flags\n\tif p.PadLength != 0 {\n\t\tflags |= FlagPushPromisePadded\n\t}\n\tif p.EndHeaders {\n\t\tflags |= FlagPushPromiseEndHeaders\n\t}\n\tf.startWrite(FramePushPromise, flags, p.StreamID)\n\tif p.PadLength != 0 {\n\t\tf.writeByte(p.PadLength)\n\t}\n\tif !validStreamID(p.PromiseID) && !f.AllowIllegalWrites {\n\t\treturn errStreamID\n\t}\n\tf.writeUint32(p.PromiseID)\n\tf.wbuf = append(f.wbuf, p.BlockFragment...)\n\tf.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)\n\treturn f.endWrite()\n}\n\n// WriteRawFrame writes a raw frame. This can be used to write\n// extension frames unknown to this package.\nfunc (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error {\n\tf.startWrite(t, flags, streamID)\n\tf.writeBytes(payload)\n\treturn f.endWrite()\n}\n\nfunc readByte(p []byte) (remain []byte, b byte, err error) {\n\tif len(p) == 0 {\n\t\treturn nil, 0, io.ErrUnexpectedEOF\n\t}\n\treturn p[1:], p[0], nil\n}\n\nfunc readUint32(p []byte) (remain []byte, v uint32, err error) {\n\tif len(p) < 4 {\n\t\treturn nil, 0, io.ErrUnexpectedEOF\n\t}\n\treturn p[4:], binary.BigEndian.Uint32(p[:4]), nil\n}\n\ntype streamEnder interface {\n\tStreamEnded() bool\n}\n\ntype headersEnder interface {\n\tHeadersEnded() bool\n}\n\ntype headersOrContinuation interface {\n\theadersEnder\n\tHeaderBlockFragment() []byte\n}\n\n// A MetaHeadersFrame is the representation of one HEADERS frame and\n// zero or more contiguous CONTINUATION frames and the decoding of\n// their HPACK-encoded contents.\n//\n// This type of frame does not appear on the wire and is only returned\n// by the Framer when Framer.ReadMetaHeaders is set.\ntype MetaHeadersFrame struct {\n\t*HeadersFrame\n\n\t// Fields are the fields contained in the HEADERS and\n\t// CONTINUATION frames. The underlying slice is owned by the\n\t// Framer and must not be retained after the next call to\n\t// ReadFrame.\n\t//\n\t// Fields are guaranteed to be in the correct http2 order and\n\t// not have unknown pseudo header fields or invalid header\n\t// field names or values. Required pseudo header fields may be\n\t// missing, however. Use the MetaHeadersFrame.Pseudo accessor\n\t// method access pseudo headers.\n\tFields []hpack.HeaderField\n\n\t// Truncated is whether the max header list size limit was hit\n\t// and Fields is incomplete. The hpack decoder state is still\n\t// valid, however.\n\tTruncated bool\n}\n\n// PseudoValue returns the given pseudo header field's value.\n// The provided pseudo field should not contain the leading colon.\nfunc (mh *MetaHeadersFrame) PseudoValue(pseudo string) string {\n\tfor _, hf := range mh.Fields {\n\t\tif !hf.IsPseudo() {\n\t\t\treturn \"\"\n\t\t}\n\t\tif hf.Name[1:] == pseudo {\n\t\t\treturn hf.Value\n\t\t}\n\t}\n\treturn \"\"\n}\n\n// RegularFields returns the regular (non-pseudo) header fields of mh.\n// The caller does not own the returned slice.\nfunc (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField {\n\tfor i, hf := range mh.Fields {\n\t\tif !hf.IsPseudo() {\n\t\t\treturn mh.Fields[i:]\n\t\t}\n\t}\n\treturn nil\n}\n\n// PseudoFields returns the pseudo header fields of mh.\n// The caller does not own the returned slice.\nfunc (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField {\n\tfor i, hf := range mh.Fields {\n\t\tif !hf.IsPseudo() {\n\t\t\treturn mh.Fields[:i]\n\t\t}\n\t}\n\treturn mh.Fields\n}\n\nfunc (mh *MetaHeadersFrame) checkPseudos() error {\n\tvar isRequest, isResponse bool\n\tpf := mh.PseudoFields()\n\tfor i, hf := range pf {\n\t\tswitch hf.Name {\n\t\tcase \":method\", \":path\", \":scheme\", \":authority\":\n\t\t\tisRequest = true\n\t\tcase \":status\":\n\t\t\tisResponse = true\n\t\tdefault:\n\t\t\treturn pseudoHeaderError(hf.Name)\n\t\t}\n\t\t// Check for duplicates.\n\t\t// This would be a bad algorithm, but N is 4.\n\t\t// And this doesn't allocate.\n\t\tfor _, hf2 := range pf[:i] {\n\t\t\tif hf.Name == hf2.Name {\n\t\t\t\treturn duplicatePseudoHeaderError(hf.Name)\n\t\t\t}\n\t\t}\n\t}\n\tif isRequest && isResponse {\n\t\treturn errMixPseudoHeaderTypes\n\t}\n\treturn nil\n}\n\nfunc (fr *Framer) maxHeaderStringLen() int {\n\tv := fr.maxHeaderListSize()\n\tif uint32(int(v)) == v {\n\t\treturn int(v)\n\t}\n\t// They had a crazy big number for MaxHeaderBytes anyway,\n\t// so give them unlimited header lengths:\n\treturn 0\n}\n\n// readMetaFrame returns 0 or more CONTINUATION frames from fr and\n// merge them into into the provided hf and returns a MetaHeadersFrame\n// with the decoded hpack values.\nfunc (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {\n\tif fr.AllowIllegalReads {\n\t\treturn nil, errors.New(\"illegal use of AllowIllegalReads with ReadMetaHeaders\")\n\t}\n\tmh := &MetaHeadersFrame{\n\t\tHeadersFrame: hf,\n\t}\n\tvar remainSize = fr.maxHeaderListSize()\n\tvar sawRegular bool\n\n\tvar invalid error // pseudo header field errors\n\thdec := fr.ReadMetaHeaders\n\thdec.SetEmitEnabled(true)\n\thdec.SetMaxStringLength(fr.maxHeaderStringLen())\n\thdec.SetEmitFunc(func(hf hpack.HeaderField) {\n\t\tif VerboseLogs && fr.logReads {\n\t\t\tfr.debugReadLoggerf(\"http2: decoded hpack field %+v\", hf)\n\t\t}\n\t\tif !httplex.ValidHeaderFieldValue(hf.Value) {\n\t\t\tinvalid = headerFieldValueError(hf.Value)\n\t\t}\n\t\tisPseudo := strings.HasPrefix(hf.Name, \":\")\n\t\tif isPseudo {\n\t\t\tif sawRegular {\n\t\t\t\tinvalid = errPseudoAfterRegular\n\t\t\t}\n\t\t} else {\n\t\t\tsawRegular = true\n\t\t\tif !validWireHeaderFieldName(hf.Name) {\n\t\t\t\tinvalid = headerFieldNameError(hf.Name)\n\t\t\t}\n\t\t}\n\n\t\tif invalid != nil {\n\t\t\thdec.SetEmitEnabled(false)\n\t\t\treturn\n\t\t}\n\n\t\tsize := hf.Size()\n\t\tif size > remainSize {\n\t\t\thdec.SetEmitEnabled(false)\n\t\t\tmh.Truncated = true\n\t\t\treturn\n\t\t}\n\t\tremainSize -= size\n\n\t\tmh.Fields = append(mh.Fields, hf)\n\t})\n\t// Lose reference to MetaHeadersFrame:\n\tdefer hdec.SetEmitFunc(func(hf hpack.HeaderField) {})\n\n\tvar hc headersOrContinuation = hf\n\tfor {\n\t\tfrag := hc.HeaderBlockFragment()\n\t\tif _, err := hdec.Write(frag); err != nil {\n\t\t\treturn nil, ConnectionError(ErrCodeCompression)\n\t\t}\n\n\t\tif hc.HeadersEnded() {\n\t\t\tbreak\n\t\t}\n\t\tif f, err := fr.ReadFrame(); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\thc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder\n\t\t}\n\t}\n\n\tmh.HeadersFrame.headerFragBuf = nil\n\tmh.HeadersFrame.invalidate()\n\n\tif err := hdec.Close(); err != nil {\n\t\treturn nil, ConnectionError(ErrCodeCompression)\n\t}\n\tif invalid != nil {\n\t\tfr.errDetail = invalid\n\t\tif VerboseLogs {\n\t\t\tlog.Printf(\"http2: invalid header: %v\", invalid)\n\t\t}\n\t\treturn nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid}\n\t}\n\tif err := mh.checkPseudos(); err != nil {\n\t\tfr.errDetail = err\n\t\tif VerboseLogs {\n\t\t\tlog.Printf(\"http2: invalid pseudo headers: %v\", err)\n\t\t}\n\t\treturn nil, StreamError{mh.StreamID, ErrCodeProtocol, err}\n\t}\n\treturn mh, nil\n}\n\nfunc summarizeFrame(f Frame) string {\n\tvar buf bytes.Buffer\n\tf.Header().writeDebug(&buf)\n\tswitch f := f.(type) {\n\tcase *SettingsFrame:\n\t\tn := 0\n\t\tf.ForeachSetting(func(s Setting) error {\n\t\t\tn++\n\t\t\tif n == 1 {\n\t\t\t\tbuf.WriteString(\", settings:\")\n\t\t\t}\n\t\t\tfmt.Fprintf(&buf, \" %v=%v,\", s.ID, s.Val)\n\t\t\treturn nil\n\t\t})\n\t\tif n > 0 {\n\t\t\tbuf.Truncate(buf.Len() - 1) // remove trailing comma\n\t\t}\n\tcase *DataFrame:\n\t\tdata := f.Data()\n\t\tconst max = 256\n\t\tif len(data) > max {\n\t\t\tdata = data[:max]\n\t\t}\n\t\tfmt.Fprintf(&buf, \" data=%q\", data)\n\t\tif len(f.Data()) > max {\n\t\t\tfmt.Fprintf(&buf, \" (%d bytes omitted)\", len(f.Data())-max)\n\t\t}\n\tcase *WindowUpdateFrame:\n\t\tif f.StreamID == 0 {\n\t\t\tbuf.WriteString(\" (conn)\")\n\t\t}\n\t\tfmt.Fprintf(&buf, \" incr=%v\", f.Increment)\n\tcase *PingFrame:\n\t\tfmt.Fprintf(&buf, \" ping=%q\", f.Data[:])\n\tcase *GoAwayFrame:\n\t\tfmt.Fprintf(&buf, \" LastStreamID=%v ErrCode=%v Debug=%q\",\n\t\t\tf.LastStreamID, f.ErrCode, f.debugData)\n\tcase *RSTStreamFrame:\n\t\tfmt.Fprintf(&buf, \" ErrCode=%v\", f.ErrCode)\n\t}\n\treturn buf.String()\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/frame_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"unsafe\"\n\n\t\"golang.org/x/net/http2/hpack\"\n)\n\nfunc testFramer() (*Framer, *bytes.Buffer) {\n\tbuf := new(bytes.Buffer)\n\treturn NewFramer(buf, buf), buf\n}\n\nfunc TestFrameSizes(t *testing.T) {\n\t// Catch people rearranging the FrameHeader fields.\n\tif got, want := int(unsafe.Sizeof(FrameHeader{})), 12; got != want {\n\t\tt.Errorf(\"FrameHeader size = %d; want %d\", got, want)\n\t}\n}\n\nfunc TestFrameTypeString(t *testing.T) {\n\ttests := []struct {\n\t\tft   FrameType\n\t\twant string\n\t}{\n\t\t{FrameData, \"DATA\"},\n\t\t{FramePing, \"PING\"},\n\t\t{FrameGoAway, \"GOAWAY\"},\n\t\t{0xf, \"UNKNOWN_FRAME_TYPE_15\"},\n\t}\n\n\tfor i, tt := range tests {\n\t\tgot := tt.ft.String()\n\t\tif got != tt.want {\n\t\t\tt.Errorf(\"%d. String(FrameType %d) = %q; want %q\", i, int(tt.ft), got, tt.want)\n\t\t}\n\t}\n}\n\nfunc TestWriteRST(t *testing.T) {\n\tfr, buf := testFramer()\n\tvar streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4\n\tvar errCode uint32 = 7<<24 + 6<<16 + 5<<8 + 4\n\tfr.WriteRSTStream(streamID, ErrCode(errCode))\n\tconst wantEnc = \"\\x00\\x00\\x04\\x03\\x00\\x01\\x02\\x03\\x04\\x07\\x06\\x05\\x04\"\n\tif buf.String() != wantEnc {\n\t\tt.Errorf(\"encoded as %q; want %q\", buf.Bytes(), wantEnc)\n\t}\n\tf, err := fr.ReadFrame()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\twant := &RSTStreamFrame{\n\t\tFrameHeader: FrameHeader{\n\t\t\tvalid:    true,\n\t\t\tType:     0x3,\n\t\t\tFlags:    0x0,\n\t\t\tLength:   0x4,\n\t\t\tStreamID: 0x1020304,\n\t\t},\n\t\tErrCode: 0x7060504,\n\t}\n\tif !reflect.DeepEqual(f, want) {\n\t\tt.Errorf(\"parsed back %#v; want %#v\", f, want)\n\t}\n}\n\nfunc TestWriteData(t *testing.T) {\n\tfr, buf := testFramer()\n\tvar streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4\n\tdata := []byte(\"ABC\")\n\tfr.WriteData(streamID, true, data)\n\tconst wantEnc = \"\\x00\\x00\\x03\\x00\\x01\\x01\\x02\\x03\\x04ABC\"\n\tif buf.String() != wantEnc {\n\t\tt.Errorf(\"encoded as %q; want %q\", buf.Bytes(), wantEnc)\n\t}\n\tf, err := fr.ReadFrame()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdf, ok := f.(*DataFrame)\n\tif !ok {\n\t\tt.Fatalf(\"got %T; want *DataFrame\", f)\n\t}\n\tif !bytes.Equal(df.Data(), data) {\n\t\tt.Errorf(\"got %q; want %q\", df.Data(), data)\n\t}\n\tif f.Header().Flags&1 == 0 {\n\t\tt.Errorf(\"didn't see END_STREAM flag\")\n\t}\n}\n\nfunc TestWriteDataPadded(t *testing.T) {\n\ttests := [...]struct {\n\t\tstreamID   uint32\n\t\tendStream  bool\n\t\tdata       []byte\n\t\tpad        []byte\n\t\twantHeader FrameHeader\n\t}{\n\t\t// Unpadded:\n\t\t0: {\n\t\t\tstreamID:  1,\n\t\t\tendStream: true,\n\t\t\tdata:      []byte(\"foo\"),\n\t\t\tpad:       nil,\n\t\t\twantHeader: FrameHeader{\n\t\t\t\tType:     FrameData,\n\t\t\t\tFlags:    FlagDataEndStream,\n\t\t\t\tLength:   3,\n\t\t\t\tStreamID: 1,\n\t\t\t},\n\t\t},\n\n\t\t// Padded bit set, but no padding:\n\t\t1: {\n\t\t\tstreamID:  1,\n\t\t\tendStream: true,\n\t\t\tdata:      []byte(\"foo\"),\n\t\t\tpad:       []byte{},\n\t\t\twantHeader: FrameHeader{\n\t\t\t\tType:     FrameData,\n\t\t\t\tFlags:    FlagDataEndStream | FlagDataPadded,\n\t\t\t\tLength:   4,\n\t\t\t\tStreamID: 1,\n\t\t\t},\n\t\t},\n\n\t\t// Padded bit set, with padding:\n\t\t2: {\n\t\t\tstreamID:  1,\n\t\t\tendStream: false,\n\t\t\tdata:      []byte(\"foo\"),\n\t\t\tpad:       []byte{0, 0, 0},\n\t\t\twantHeader: FrameHeader{\n\t\t\t\tType:     FrameData,\n\t\t\t\tFlags:    FlagDataPadded,\n\t\t\t\tLength:   7,\n\t\t\t\tStreamID: 1,\n\t\t\t},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tfr, _ := testFramer()\n\t\tfr.WriteDataPadded(tt.streamID, tt.endStream, tt.data, tt.pad)\n\t\tf, err := fr.ReadFrame()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%d. ReadFrame: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tgot := f.Header()\n\t\ttt.wantHeader.valid = true\n\t\tif got != tt.wantHeader {\n\t\t\tt.Errorf(\"%d. read %+v; want %+v\", i, got, tt.wantHeader)\n\t\t\tcontinue\n\t\t}\n\t\tdf := f.(*DataFrame)\n\t\tif !bytes.Equal(df.Data(), tt.data) {\n\t\t\tt.Errorf(\"%d. got %q; want %q\", i, df.Data(), tt.data)\n\t\t}\n\t}\n}\n\nfunc TestWriteHeaders(t *testing.T) {\n\ttests := []struct {\n\t\tname      string\n\t\tp         HeadersFrameParam\n\t\twantEnc   string\n\t\twantFrame *HeadersFrame\n\t}{\n\t\t{\n\t\t\t\"basic\",\n\t\t\tHeadersFrameParam{\n\t\t\t\tStreamID:      42,\n\t\t\t\tBlockFragment: []byte(\"abc\"),\n\t\t\t\tPriority:      PriorityParam{},\n\t\t\t},\n\t\t\t\"\\x00\\x00\\x03\\x01\\x00\\x00\\x00\\x00*abc\",\n\t\t\t&HeadersFrame{\n\t\t\t\tFrameHeader: FrameHeader{\n\t\t\t\t\tvalid:    true,\n\t\t\t\t\tStreamID: 42,\n\t\t\t\t\tType:     FrameHeaders,\n\t\t\t\t\tLength:   uint32(len(\"abc\")),\n\t\t\t\t},\n\t\t\t\tPriority:      PriorityParam{},\n\t\t\t\theaderFragBuf: []byte(\"abc\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"basic + end flags\",\n\t\t\tHeadersFrameParam{\n\t\t\t\tStreamID:      42,\n\t\t\t\tBlockFragment: []byte(\"abc\"),\n\t\t\t\tEndStream:     true,\n\t\t\t\tEndHeaders:    true,\n\t\t\t\tPriority:      PriorityParam{},\n\t\t\t},\n\t\t\t\"\\x00\\x00\\x03\\x01\\x05\\x00\\x00\\x00*abc\",\n\t\t\t&HeadersFrame{\n\t\t\t\tFrameHeader: FrameHeader{\n\t\t\t\t\tvalid:    true,\n\t\t\t\t\tStreamID: 42,\n\t\t\t\t\tType:     FrameHeaders,\n\t\t\t\t\tFlags:    FlagHeadersEndStream | FlagHeadersEndHeaders,\n\t\t\t\t\tLength:   uint32(len(\"abc\")),\n\t\t\t\t},\n\t\t\t\tPriority:      PriorityParam{},\n\t\t\t\theaderFragBuf: []byte(\"abc\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"with padding\",\n\t\t\tHeadersFrameParam{\n\t\t\t\tStreamID:      42,\n\t\t\t\tBlockFragment: []byte(\"abc\"),\n\t\t\t\tEndStream:     true,\n\t\t\t\tEndHeaders:    true,\n\t\t\t\tPadLength:     5,\n\t\t\t\tPriority:      PriorityParam{},\n\t\t\t},\n\t\t\t\"\\x00\\x00\\t\\x01\\r\\x00\\x00\\x00*\\x05abc\\x00\\x00\\x00\\x00\\x00\",\n\t\t\t&HeadersFrame{\n\t\t\t\tFrameHeader: FrameHeader{\n\t\t\t\t\tvalid:    true,\n\t\t\t\t\tStreamID: 42,\n\t\t\t\t\tType:     FrameHeaders,\n\t\t\t\t\tFlags:    FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded,\n\t\t\t\t\tLength:   uint32(1 + len(\"abc\") + 5), // pad length + contents + padding\n\t\t\t\t},\n\t\t\t\tPriority:      PriorityParam{},\n\t\t\t\theaderFragBuf: []byte(\"abc\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"with priority\",\n\t\t\tHeadersFrameParam{\n\t\t\t\tStreamID:      42,\n\t\t\t\tBlockFragment: []byte(\"abc\"),\n\t\t\t\tEndStream:     true,\n\t\t\t\tEndHeaders:    true,\n\t\t\t\tPadLength:     2,\n\t\t\t\tPriority: PriorityParam{\n\t\t\t\t\tStreamDep: 15,\n\t\t\t\t\tExclusive: true,\n\t\t\t\t\tWeight:    127,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"\\x00\\x00\\v\\x01-\\x00\\x00\\x00*\\x02\\x80\\x00\\x00\\x0f\\u007fabc\\x00\\x00\",\n\t\t\t&HeadersFrame{\n\t\t\t\tFrameHeader: FrameHeader{\n\t\t\t\t\tvalid:    true,\n\t\t\t\t\tStreamID: 42,\n\t\t\t\t\tType:     FrameHeaders,\n\t\t\t\t\tFlags:    FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded | FlagHeadersPriority,\n\t\t\t\t\tLength:   uint32(1 + 5 + len(\"abc\") + 2), // pad length + priority + contents + padding\n\t\t\t\t},\n\t\t\t\tPriority: PriorityParam{\n\t\t\t\t\tStreamDep: 15,\n\t\t\t\t\tExclusive: true,\n\t\t\t\t\tWeight:    127,\n\t\t\t\t},\n\t\t\t\theaderFragBuf: []byte(\"abc\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"with priority stream dep zero\", // golang.org/issue/15444\n\t\t\tHeadersFrameParam{\n\t\t\t\tStreamID:      42,\n\t\t\t\tBlockFragment: []byte(\"abc\"),\n\t\t\t\tEndStream:     true,\n\t\t\t\tEndHeaders:    true,\n\t\t\t\tPadLength:     2,\n\t\t\t\tPriority: PriorityParam{\n\t\t\t\t\tStreamDep: 0,\n\t\t\t\t\tExclusive: true,\n\t\t\t\t\tWeight:    127,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"\\x00\\x00\\v\\x01-\\x00\\x00\\x00*\\x02\\x80\\x00\\x00\\x00\\u007fabc\\x00\\x00\",\n\t\t\t&HeadersFrame{\n\t\t\t\tFrameHeader: FrameHeader{\n\t\t\t\t\tvalid:    true,\n\t\t\t\t\tStreamID: 42,\n\t\t\t\t\tType:     FrameHeaders,\n\t\t\t\t\tFlags:    FlagHeadersEndStream | FlagHeadersEndHeaders | FlagHeadersPadded | FlagHeadersPriority,\n\t\t\t\t\tLength:   uint32(1 + 5 + len(\"abc\") + 2), // pad length + priority + contents + padding\n\t\t\t\t},\n\t\t\t\tPriority: PriorityParam{\n\t\t\t\t\tStreamDep: 0,\n\t\t\t\t\tExclusive: true,\n\t\t\t\t\tWeight:    127,\n\t\t\t\t},\n\t\t\t\theaderFragBuf: []byte(\"abc\"),\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tfr, buf := testFramer()\n\t\tif err := fr.WriteHeaders(tt.p); err != nil {\n\t\t\tt.Errorf(\"test %q: %v\", tt.name, err)\n\t\t\tcontinue\n\t\t}\n\t\tif buf.String() != tt.wantEnc {\n\t\t\tt.Errorf(\"test %q: encoded %q; want %q\", tt.name, buf.Bytes(), tt.wantEnc)\n\t\t}\n\t\tf, err := fr.ReadFrame()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"test %q: failed to read the frame back: %v\", tt.name, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(f, tt.wantFrame) {\n\t\t\tt.Errorf(\"test %q: mismatch.\\n got: %#v\\nwant: %#v\\n\", tt.name, f, tt.wantFrame)\n\t\t}\n\t}\n}\n\nfunc TestWriteInvalidStreamDep(t *testing.T) {\n\tfr, _ := testFramer()\n\terr := fr.WriteHeaders(HeadersFrameParam{\n\t\tStreamID: 42,\n\t\tPriority: PriorityParam{\n\t\t\tStreamDep: 1 << 31,\n\t\t},\n\t})\n\tif err != errDepStreamID {\n\t\tt.Errorf(\"header error = %v; want %q\", err, errDepStreamID)\n\t}\n\n\terr = fr.WritePriority(2, PriorityParam{StreamDep: 1 << 31})\n\tif err != errDepStreamID {\n\t\tt.Errorf(\"priority error = %v; want %q\", err, errDepStreamID)\n\t}\n}\n\nfunc TestWriteContinuation(t *testing.T) {\n\tconst streamID = 42\n\ttests := []struct {\n\t\tname string\n\t\tend  bool\n\t\tfrag []byte\n\n\t\twantFrame *ContinuationFrame\n\t}{\n\t\t{\n\t\t\t\"not end\",\n\t\t\tfalse,\n\t\t\t[]byte(\"abc\"),\n\t\t\t&ContinuationFrame{\n\t\t\t\tFrameHeader: FrameHeader{\n\t\t\t\t\tvalid:    true,\n\t\t\t\t\tStreamID: streamID,\n\t\t\t\t\tType:     FrameContinuation,\n\t\t\t\t\tLength:   uint32(len(\"abc\")),\n\t\t\t\t},\n\t\t\t\theaderFragBuf: []byte(\"abc\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"end\",\n\t\t\ttrue,\n\t\t\t[]byte(\"def\"),\n\t\t\t&ContinuationFrame{\n\t\t\t\tFrameHeader: FrameHeader{\n\t\t\t\t\tvalid:    true,\n\t\t\t\t\tStreamID: streamID,\n\t\t\t\t\tType:     FrameContinuation,\n\t\t\t\t\tFlags:    FlagContinuationEndHeaders,\n\t\t\t\t\tLength:   uint32(len(\"def\")),\n\t\t\t\t},\n\t\t\t\theaderFragBuf: []byte(\"def\"),\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tfr, _ := testFramer()\n\t\tif err := fr.WriteContinuation(streamID, tt.end, tt.frag); err != nil {\n\t\t\tt.Errorf(\"test %q: %v\", tt.name, err)\n\t\t\tcontinue\n\t\t}\n\t\tfr.AllowIllegalReads = true\n\t\tf, err := fr.ReadFrame()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"test %q: failed to read the frame back: %v\", tt.name, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(f, tt.wantFrame) {\n\t\t\tt.Errorf(\"test %q: mismatch.\\n got: %#v\\nwant: %#v\\n\", tt.name, f, tt.wantFrame)\n\t\t}\n\t}\n}\n\nfunc TestWritePriority(t *testing.T) {\n\tconst streamID = 42\n\ttests := []struct {\n\t\tname      string\n\t\tpriority  PriorityParam\n\t\twantFrame *PriorityFrame\n\t}{\n\t\t{\n\t\t\t\"not exclusive\",\n\t\t\tPriorityParam{\n\t\t\t\tStreamDep: 2,\n\t\t\t\tExclusive: false,\n\t\t\t\tWeight:    127,\n\t\t\t},\n\t\t\t&PriorityFrame{\n\t\t\t\tFrameHeader{\n\t\t\t\t\tvalid:    true,\n\t\t\t\t\tStreamID: streamID,\n\t\t\t\t\tType:     FramePriority,\n\t\t\t\t\tLength:   5,\n\t\t\t\t},\n\t\t\t\tPriorityParam{\n\t\t\t\t\tStreamDep: 2,\n\t\t\t\t\tExclusive: false,\n\t\t\t\t\tWeight:    127,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\t\"exclusive\",\n\t\t\tPriorityParam{\n\t\t\t\tStreamDep: 3,\n\t\t\t\tExclusive: true,\n\t\t\t\tWeight:    77,\n\t\t\t},\n\t\t\t&PriorityFrame{\n\t\t\t\tFrameHeader{\n\t\t\t\t\tvalid:    true,\n\t\t\t\t\tStreamID: streamID,\n\t\t\t\t\tType:     FramePriority,\n\t\t\t\t\tLength:   5,\n\t\t\t\t},\n\t\t\t\tPriorityParam{\n\t\t\t\t\tStreamDep: 3,\n\t\t\t\t\tExclusive: true,\n\t\t\t\t\tWeight:    77,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tfr, _ := testFramer()\n\t\tif err := fr.WritePriority(streamID, tt.priority); err != nil {\n\t\t\tt.Errorf(\"test %q: %v\", tt.name, err)\n\t\t\tcontinue\n\t\t}\n\t\tf, err := fr.ReadFrame()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"test %q: failed to read the frame back: %v\", tt.name, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(f, tt.wantFrame) {\n\t\t\tt.Errorf(\"test %q: mismatch.\\n got: %#v\\nwant: %#v\\n\", tt.name, f, tt.wantFrame)\n\t\t}\n\t}\n}\n\nfunc TestWriteSettings(t *testing.T) {\n\tfr, buf := testFramer()\n\tsettings := []Setting{{1, 2}, {3, 4}}\n\tfr.WriteSettings(settings...)\n\tconst wantEnc = \"\\x00\\x00\\f\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x03\\x00\\x00\\x00\\x04\"\n\tif buf.String() != wantEnc {\n\t\tt.Errorf(\"encoded as %q; want %q\", buf.Bytes(), wantEnc)\n\t}\n\tf, err := fr.ReadFrame()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsf, ok := f.(*SettingsFrame)\n\tif !ok {\n\t\tt.Fatalf(\"Got a %T; want a SettingsFrame\", f)\n\t}\n\tvar got []Setting\n\tsf.ForeachSetting(func(s Setting) error {\n\t\tgot = append(got, s)\n\t\tvalBack, ok := sf.Value(s.ID)\n\t\tif !ok || valBack != s.Val {\n\t\t\tt.Errorf(\"Value(%d) = %v, %v; want %v, true\", s.ID, valBack, ok, s.Val)\n\t\t}\n\t\treturn nil\n\t})\n\tif !reflect.DeepEqual(settings, got) {\n\t\tt.Errorf(\"Read settings %+v != written settings %+v\", got, settings)\n\t}\n}\n\nfunc TestWriteSettingsAck(t *testing.T) {\n\tfr, buf := testFramer()\n\tfr.WriteSettingsAck()\n\tconst wantEnc = \"\\x00\\x00\\x00\\x04\\x01\\x00\\x00\\x00\\x00\"\n\tif buf.String() != wantEnc {\n\t\tt.Errorf(\"encoded as %q; want %q\", buf.Bytes(), wantEnc)\n\t}\n}\n\nfunc TestWriteWindowUpdate(t *testing.T) {\n\tfr, buf := testFramer()\n\tconst streamID = 1<<24 + 2<<16 + 3<<8 + 4\n\tconst incr = 7<<24 + 6<<16 + 5<<8 + 4\n\tif err := fr.WriteWindowUpdate(streamID, incr); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tconst wantEnc = \"\\x00\\x00\\x04\\x08\\x00\\x01\\x02\\x03\\x04\\x07\\x06\\x05\\x04\"\n\tif buf.String() != wantEnc {\n\t\tt.Errorf(\"encoded as %q; want %q\", buf.Bytes(), wantEnc)\n\t}\n\tf, err := fr.ReadFrame()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\twant := &WindowUpdateFrame{\n\t\tFrameHeader: FrameHeader{\n\t\t\tvalid:    true,\n\t\t\tType:     0x8,\n\t\t\tFlags:    0x0,\n\t\t\tLength:   0x4,\n\t\t\tStreamID: 0x1020304,\n\t\t},\n\t\tIncrement: 0x7060504,\n\t}\n\tif !reflect.DeepEqual(f, want) {\n\t\tt.Errorf(\"parsed back %#v; want %#v\", f, want)\n\t}\n}\n\nfunc TestWritePing(t *testing.T)    { testWritePing(t, false) }\nfunc TestWritePingAck(t *testing.T) { testWritePing(t, true) }\n\nfunc testWritePing(t *testing.T, ack bool) {\n\tfr, buf := testFramer()\n\tif err := fr.WritePing(ack, [8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvar wantFlags Flags\n\tif ack {\n\t\twantFlags = FlagPingAck\n\t}\n\tvar wantEnc = \"\\x00\\x00\\x08\\x06\" + string(wantFlags) + \"\\x00\\x00\\x00\\x00\" + \"\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\"\n\tif buf.String() != wantEnc {\n\t\tt.Errorf(\"encoded as %q; want %q\", buf.Bytes(), wantEnc)\n\t}\n\n\tf, err := fr.ReadFrame()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\twant := &PingFrame{\n\t\tFrameHeader: FrameHeader{\n\t\t\tvalid:    true,\n\t\t\tType:     0x6,\n\t\t\tFlags:    wantFlags,\n\t\t\tLength:   0x8,\n\t\t\tStreamID: 0,\n\t\t},\n\t\tData: [8]byte{1, 2, 3, 4, 5, 6, 7, 8},\n\t}\n\tif !reflect.DeepEqual(f, want) {\n\t\tt.Errorf(\"parsed back %#v; want %#v\", f, want)\n\t}\n}\n\nfunc TestReadFrameHeader(t *testing.T) {\n\ttests := []struct {\n\t\tin   string\n\t\twant FrameHeader\n\t}{\n\t\t{in: \"\\x00\\x00\\x00\" + \"\\x00\" + \"\\x00\" + \"\\x00\\x00\\x00\\x00\", want: FrameHeader{}},\n\t\t{in: \"\\x01\\x02\\x03\" + \"\\x04\" + \"\\x05\" + \"\\x06\\x07\\x08\\x09\", want: FrameHeader{\n\t\t\tLength: 66051, Type: 4, Flags: 5, StreamID: 101124105,\n\t\t}},\n\t\t// Ignore high bit:\n\t\t{in: \"\\xff\\xff\\xff\" + \"\\xff\" + \"\\xff\" + \"\\xff\\xff\\xff\\xff\", want: FrameHeader{\n\t\t\tLength: 16777215, Type: 255, Flags: 255, StreamID: 2147483647}},\n\t\t{in: \"\\xff\\xff\\xff\" + \"\\xff\" + \"\\xff\" + \"\\x7f\\xff\\xff\\xff\", want: FrameHeader{\n\t\t\tLength: 16777215, Type: 255, Flags: 255, StreamID: 2147483647}},\n\t}\n\tfor i, tt := range tests {\n\t\tgot, err := readFrameHeader(make([]byte, 9), strings.NewReader(tt.in))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%d. readFrameHeader(%q) = %v\", i, tt.in, err)\n\t\t\tcontinue\n\t\t}\n\t\ttt.want.valid = true\n\t\tif got != tt.want {\n\t\t\tt.Errorf(\"%d. readFrameHeader(%q) = %+v; want %+v\", i, tt.in, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc TestReadWriteFrameHeader(t *testing.T) {\n\ttests := []struct {\n\t\tlen      uint32\n\t\ttyp      FrameType\n\t\tflags    Flags\n\t\tstreamID uint32\n\t}{\n\t\t{len: 0, typ: 255, flags: 1, streamID: 0},\n\t\t{len: 0, typ: 255, flags: 1, streamID: 1},\n\t\t{len: 0, typ: 255, flags: 1, streamID: 255},\n\t\t{len: 0, typ: 255, flags: 1, streamID: 256},\n\t\t{len: 0, typ: 255, flags: 1, streamID: 65535},\n\t\t{len: 0, typ: 255, flags: 1, streamID: 65536},\n\n\t\t{len: 0, typ: 1, flags: 255, streamID: 1},\n\t\t{len: 255, typ: 1, flags: 255, streamID: 1},\n\t\t{len: 256, typ: 1, flags: 255, streamID: 1},\n\t\t{len: 65535, typ: 1, flags: 255, streamID: 1},\n\t\t{len: 65536, typ: 1, flags: 255, streamID: 1},\n\t\t{len: 16777215, typ: 1, flags: 255, streamID: 1},\n\t}\n\tfor _, tt := range tests {\n\t\tfr, buf := testFramer()\n\t\tfr.startWrite(tt.typ, tt.flags, tt.streamID)\n\t\tfr.writeBytes(make([]byte, tt.len))\n\t\tfr.endWrite()\n\t\tfh, err := ReadFrameHeader(buf)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"ReadFrameHeader(%+v) = %v\", tt, err)\n\t\t\tcontinue\n\t\t}\n\t\tif fh.Type != tt.typ || fh.Flags != tt.flags || fh.Length != tt.len || fh.StreamID != tt.streamID {\n\t\t\tt.Errorf(\"ReadFrameHeader(%+v) = %+v; mismatch\", tt, fh)\n\t\t}\n\t}\n\n}\n\nfunc TestWriteTooLargeFrame(t *testing.T) {\n\tfr, _ := testFramer()\n\tfr.startWrite(0, 1, 1)\n\tfr.writeBytes(make([]byte, 1<<24))\n\terr := fr.endWrite()\n\tif err != ErrFrameTooLarge {\n\t\tt.Errorf(\"endWrite = %v; want errFrameTooLarge\", err)\n\t}\n}\n\nfunc TestWriteGoAway(t *testing.T) {\n\tconst debug = \"foo\"\n\tfr, buf := testFramer()\n\tif err := fr.WriteGoAway(0x01020304, 0x05060708, []byte(debug)); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tconst wantEnc = \"\\x00\\x00\\v\\a\\x00\\x00\\x00\\x00\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\" + debug\n\tif buf.String() != wantEnc {\n\t\tt.Errorf(\"encoded as %q; want %q\", buf.Bytes(), wantEnc)\n\t}\n\tf, err := fr.ReadFrame()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\twant := &GoAwayFrame{\n\t\tFrameHeader: FrameHeader{\n\t\t\tvalid:    true,\n\t\t\tType:     0x7,\n\t\t\tFlags:    0,\n\t\t\tLength:   uint32(4 + 4 + len(debug)),\n\t\t\tStreamID: 0,\n\t\t},\n\t\tLastStreamID: 0x01020304,\n\t\tErrCode:      0x05060708,\n\t\tdebugData:    []byte(debug),\n\t}\n\tif !reflect.DeepEqual(f, want) {\n\t\tt.Fatalf(\"parsed back:\\n%#v\\nwant:\\n%#v\", f, want)\n\t}\n\tif got := string(f.(*GoAwayFrame).DebugData()); got != debug {\n\t\tt.Errorf(\"debug data = %q; want %q\", got, debug)\n\t}\n}\n\nfunc TestWritePushPromise(t *testing.T) {\n\tpp := PushPromiseParam{\n\t\tStreamID:      42,\n\t\tPromiseID:     42,\n\t\tBlockFragment: []byte(\"abc\"),\n\t}\n\tfr, buf := testFramer()\n\tif err := fr.WritePushPromise(pp); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tconst wantEnc = \"\\x00\\x00\\x07\\x05\\x00\\x00\\x00\\x00*\\x00\\x00\\x00*abc\"\n\tif buf.String() != wantEnc {\n\t\tt.Errorf(\"encoded as %q; want %q\", buf.Bytes(), wantEnc)\n\t}\n\tf, err := fr.ReadFrame()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t_, ok := f.(*PushPromiseFrame)\n\tif !ok {\n\t\tt.Fatalf(\"got %T; want *PushPromiseFrame\", f)\n\t}\n\twant := &PushPromiseFrame{\n\t\tFrameHeader: FrameHeader{\n\t\t\tvalid:    true,\n\t\t\tType:     0x5,\n\t\t\tFlags:    0x0,\n\t\t\tLength:   0x7,\n\t\t\tStreamID: 42,\n\t\t},\n\t\tPromiseID:     42,\n\t\theaderFragBuf: []byte(\"abc\"),\n\t}\n\tif !reflect.DeepEqual(f, want) {\n\t\tt.Fatalf(\"parsed back:\\n%#v\\nwant:\\n%#v\", f, want)\n\t}\n}\n\n// test checkFrameOrder and that HEADERS and CONTINUATION frames can't be intermingled.\nfunc TestReadFrameOrder(t *testing.T) {\n\thead := func(f *Framer, id uint32, end bool) {\n\t\tf.WriteHeaders(HeadersFrameParam{\n\t\t\tStreamID:      id,\n\t\t\tBlockFragment: []byte(\"foo\"), // unused, but non-empty\n\t\t\tEndHeaders:    end,\n\t\t})\n\t}\n\tcont := func(f *Framer, id uint32, end bool) {\n\t\tf.WriteContinuation(id, end, []byte(\"foo\"))\n\t}\n\n\ttests := [...]struct {\n\t\tname    string\n\t\tw       func(*Framer)\n\t\tatLeast int\n\t\twantErr string\n\t}{\n\t\t0: {\n\t\t\tw: func(f *Framer) {\n\t\t\t\thead(f, 1, true)\n\t\t\t},\n\t\t},\n\t\t1: {\n\t\t\tw: func(f *Framer) {\n\t\t\t\thead(f, 1, true)\n\t\t\t\thead(f, 2, true)\n\t\t\t},\n\t\t},\n\t\t2: {\n\t\t\twantErr: \"got HEADERS for stream 2; expected CONTINUATION following HEADERS for stream 1\",\n\t\t\tw: func(f *Framer) {\n\t\t\t\thead(f, 1, false)\n\t\t\t\thead(f, 2, true)\n\t\t\t},\n\t\t},\n\t\t3: {\n\t\t\twantErr: \"got DATA for stream 1; expected CONTINUATION following HEADERS for stream 1\",\n\t\t\tw: func(f *Framer) {\n\t\t\t\thead(f, 1, false)\n\t\t\t},\n\t\t},\n\t\t4: {\n\t\t\tw: func(f *Framer) {\n\t\t\t\thead(f, 1, false)\n\t\t\t\tcont(f, 1, true)\n\t\t\t\thead(f, 2, true)\n\t\t\t},\n\t\t},\n\t\t5: {\n\t\t\twantErr: \"got CONTINUATION for stream 2; expected stream 1\",\n\t\t\tw: func(f *Framer) {\n\t\t\t\thead(f, 1, false)\n\t\t\t\tcont(f, 2, true)\n\t\t\t\thead(f, 2, true)\n\t\t\t},\n\t\t},\n\t\t6: {\n\t\t\twantErr: \"unexpected CONTINUATION for stream 1\",\n\t\t\tw: func(f *Framer) {\n\t\t\t\tcont(f, 1, true)\n\t\t\t},\n\t\t},\n\t\t7: {\n\t\t\twantErr: \"unexpected CONTINUATION for stream 1\",\n\t\t\tw: func(f *Framer) {\n\t\t\t\tcont(f, 1, false)\n\t\t\t},\n\t\t},\n\t\t8: {\n\t\t\twantErr: \"HEADERS frame with stream ID 0\",\n\t\t\tw: func(f *Framer) {\n\t\t\t\thead(f, 0, true)\n\t\t\t},\n\t\t},\n\t\t9: {\n\t\t\twantErr: \"CONTINUATION frame with stream ID 0\",\n\t\t\tw: func(f *Framer) {\n\t\t\t\tcont(f, 0, true)\n\t\t\t},\n\t\t},\n\t\t10: {\n\t\t\twantErr: \"unexpected CONTINUATION for stream 1\",\n\t\t\tatLeast: 5,\n\t\t\tw: func(f *Framer) {\n\t\t\t\thead(f, 1, false)\n\t\t\t\tcont(f, 1, false)\n\t\t\t\tcont(f, 1, false)\n\t\t\t\tcont(f, 1, false)\n\t\t\t\tcont(f, 1, true)\n\t\t\t\tcont(f, 1, false)\n\t\t\t},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tbuf := new(bytes.Buffer)\n\t\tf := NewFramer(buf, buf)\n\t\tf.AllowIllegalWrites = true\n\t\ttt.w(f)\n\t\tf.WriteData(1, true, nil) // to test transition away from last step\n\n\t\tvar err error\n\t\tn := 0\n\t\tvar log bytes.Buffer\n\t\tfor {\n\t\t\tvar got Frame\n\t\t\tgot, err = f.ReadFrame()\n\t\t\tfmt.Fprintf(&log, \"  read %v, %v\\n\", got, err)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tn++\n\t\t}\n\t\tif err == io.EOF {\n\t\t\terr = nil\n\t\t}\n\t\tok := tt.wantErr == \"\"\n\t\tif ok && err != nil {\n\t\t\tt.Errorf(\"%d. after %d good frames, ReadFrame = %v; want success\\n%s\", i, n, err, log.Bytes())\n\t\t\tcontinue\n\t\t}\n\t\tif !ok && err != ConnectionError(ErrCodeProtocol) {\n\t\t\tt.Errorf(\"%d. after %d good frames, ReadFrame = %v; want ConnectionError(ErrCodeProtocol)\\n%s\", i, n, err, log.Bytes())\n\t\t\tcontinue\n\t\t}\n\t\tif !((f.errDetail == nil && tt.wantErr == \"\") || (fmt.Sprint(f.errDetail) == tt.wantErr)) {\n\t\t\tt.Errorf(\"%d. framer eror = %q; want %q\\n%s\", i, f.errDetail, tt.wantErr, log.Bytes())\n\t\t}\n\t\tif n < tt.atLeast {\n\t\t\tt.Errorf(\"%d. framer only read %d frames; want at least %d\\n%s\", i, n, tt.atLeast, log.Bytes())\n\t\t}\n\t}\n}\n\nfunc TestMetaFrameHeader(t *testing.T) {\n\twrite := func(f *Framer, frags ...[]byte) {\n\t\tfor i, frag := range frags {\n\t\t\tend := (i == len(frags)-1)\n\t\t\tif i == 0 {\n\t\t\t\tf.WriteHeaders(HeadersFrameParam{\n\t\t\t\t\tStreamID:      1,\n\t\t\t\t\tBlockFragment: frag,\n\t\t\t\t\tEndHeaders:    end,\n\t\t\t\t})\n\t\t\t} else {\n\t\t\t\tf.WriteContinuation(1, end, frag)\n\t\t\t}\n\t\t}\n\t}\n\n\twant := func(flags Flags, length uint32, pairs ...string) *MetaHeadersFrame {\n\t\tmh := &MetaHeadersFrame{\n\t\t\tHeadersFrame: &HeadersFrame{\n\t\t\t\tFrameHeader: FrameHeader{\n\t\t\t\t\tType:     FrameHeaders,\n\t\t\t\t\tFlags:    flags,\n\t\t\t\t\tLength:   length,\n\t\t\t\t\tStreamID: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t\tFields: []hpack.HeaderField(nil),\n\t\t}\n\t\tfor len(pairs) > 0 {\n\t\t\tmh.Fields = append(mh.Fields, hpack.HeaderField{\n\t\t\t\tName:  pairs[0],\n\t\t\t\tValue: pairs[1],\n\t\t\t})\n\t\t\tpairs = pairs[2:]\n\t\t}\n\t\treturn mh\n\t}\n\ttruncated := func(mh *MetaHeadersFrame) *MetaHeadersFrame {\n\t\tmh.Truncated = true\n\t\treturn mh\n\t}\n\n\tconst noFlags Flags = 0\n\n\toneKBString := strings.Repeat(\"a\", 1<<10)\n\n\ttests := [...]struct {\n\t\tname              string\n\t\tw                 func(*Framer)\n\t\twant              interface{} // *MetaHeaderFrame or error\n\t\twantErrReason     string\n\t\tmaxHeaderListSize uint32\n\t}{\n\t\t0: {\n\t\t\tname: \"single_headers\",\n\t\t\tw: func(f *Framer) {\n\t\t\t\tvar he hpackEncoder\n\t\t\t\tall := he.encodeHeaderRaw(t, \":method\", \"GET\", \":path\", \"/\")\n\t\t\t\twrite(f, all)\n\t\t\t},\n\t\t\twant: want(FlagHeadersEndHeaders, 2, \":method\", \"GET\", \":path\", \"/\"),\n\t\t},\n\t\t1: {\n\t\t\tname: \"with_continuation\",\n\t\t\tw: func(f *Framer) {\n\t\t\t\tvar he hpackEncoder\n\t\t\t\tall := he.encodeHeaderRaw(t, \":method\", \"GET\", \":path\", \"/\", \"foo\", \"bar\")\n\t\t\t\twrite(f, all[:1], all[1:])\n\t\t\t},\n\t\t\twant: want(noFlags, 1, \":method\", \"GET\", \":path\", \"/\", \"foo\", \"bar\"),\n\t\t},\n\t\t2: {\n\t\t\tname: \"with_two_continuation\",\n\t\t\tw: func(f *Framer) {\n\t\t\t\tvar he hpackEncoder\n\t\t\t\tall := he.encodeHeaderRaw(t, \":method\", \"GET\", \":path\", \"/\", \"foo\", \"bar\")\n\t\t\t\twrite(f, all[:2], all[2:4], all[4:])\n\t\t\t},\n\t\t\twant: want(noFlags, 2, \":method\", \"GET\", \":path\", \"/\", \"foo\", \"bar\"),\n\t\t},\n\t\t3: {\n\t\t\tname: \"big_string_okay\",\n\t\t\tw: func(f *Framer) {\n\t\t\t\tvar he hpackEncoder\n\t\t\t\tall := he.encodeHeaderRaw(t, \":method\", \"GET\", \":path\", \"/\", \"foo\", oneKBString)\n\t\t\t\twrite(f, all[:2], all[2:])\n\t\t\t},\n\t\t\twant: want(noFlags, 2, \":method\", \"GET\", \":path\", \"/\", \"foo\", oneKBString),\n\t\t},\n\t\t4: {\n\t\t\tname: \"big_string_error\",\n\t\t\tw: func(f *Framer) {\n\t\t\t\tvar he hpackEncoder\n\t\t\t\tall := he.encodeHeaderRaw(t, \":method\", \"GET\", \":path\", \"/\", \"foo\", oneKBString)\n\t\t\t\twrite(f, all[:2], all[2:])\n\t\t\t},\n\t\t\tmaxHeaderListSize: (1 << 10) / 2,\n\t\t\twant:              ConnectionError(ErrCodeCompression),\n\t\t},\n\t\t5: {\n\t\t\tname: \"max_header_list_truncated\",\n\t\t\tw: func(f *Framer) {\n\t\t\t\tvar he hpackEncoder\n\t\t\t\tvar pairs = []string{\":method\", \"GET\", \":path\", \"/\"}\n\t\t\t\tfor i := 0; i < 100; i++ {\n\t\t\t\t\tpairs = append(pairs, \"foo\", \"bar\")\n\t\t\t\t}\n\t\t\t\tall := he.encodeHeaderRaw(t, pairs...)\n\t\t\t\twrite(f, all[:2], all[2:])\n\t\t\t},\n\t\t\tmaxHeaderListSize: (1 << 10) / 2,\n\t\t\twant: truncated(want(noFlags, 2,\n\t\t\t\t\":method\", \"GET\",\n\t\t\t\t\":path\", \"/\",\n\t\t\t\t\"foo\", \"bar\",\n\t\t\t\t\"foo\", \"bar\",\n\t\t\t\t\"foo\", \"bar\",\n\t\t\t\t\"foo\", \"bar\",\n\t\t\t\t\"foo\", \"bar\",\n\t\t\t\t\"foo\", \"bar\",\n\t\t\t\t\"foo\", \"bar\",\n\t\t\t\t\"foo\", \"bar\",\n\t\t\t\t\"foo\", \"bar\",\n\t\t\t\t\"foo\", \"bar\",\n\t\t\t\t\"foo\", \"bar\", // 11\n\t\t\t)),\n\t\t},\n\t\t6: {\n\t\t\tname: \"pseudo_order\",\n\t\t\tw: func(f *Framer) {\n\t\t\t\twrite(f, encodeHeaderRaw(t,\n\t\t\t\t\t\":method\", \"GET\",\n\t\t\t\t\t\"foo\", \"bar\",\n\t\t\t\t\t\":path\", \"/\", // bogus\n\t\t\t\t))\n\t\t\t},\n\t\t\twant:          streamError(1, ErrCodeProtocol),\n\t\t\twantErrReason: \"pseudo header field after regular\",\n\t\t},\n\t\t7: {\n\t\t\tname: \"pseudo_unknown\",\n\t\t\tw: func(f *Framer) {\n\t\t\t\twrite(f, encodeHeaderRaw(t,\n\t\t\t\t\t\":unknown\", \"foo\", // bogus\n\t\t\t\t\t\"foo\", \"bar\",\n\t\t\t\t))\n\t\t\t},\n\t\t\twant:          streamError(1, ErrCodeProtocol),\n\t\t\twantErrReason: \"invalid pseudo-header \\\":unknown\\\"\",\n\t\t},\n\t\t8: {\n\t\t\tname: \"pseudo_mix_request_response\",\n\t\t\tw: func(f *Framer) {\n\t\t\t\twrite(f, encodeHeaderRaw(t,\n\t\t\t\t\t\":method\", \"GET\",\n\t\t\t\t\t\":status\", \"100\",\n\t\t\t\t))\n\t\t\t},\n\t\t\twant:          streamError(1, ErrCodeProtocol),\n\t\t\twantErrReason: \"mix of request and response pseudo headers\",\n\t\t},\n\t\t9: {\n\t\t\tname: \"pseudo_dup\",\n\t\t\tw: func(f *Framer) {\n\t\t\t\twrite(f, encodeHeaderRaw(t,\n\t\t\t\t\t\":method\", \"GET\",\n\t\t\t\t\t\":method\", \"POST\",\n\t\t\t\t))\n\t\t\t},\n\t\t\twant:          streamError(1, ErrCodeProtocol),\n\t\t\twantErrReason: \"duplicate pseudo-header \\\":method\\\"\",\n\t\t},\n\t\t10: {\n\t\t\tname: \"trailer_okay_no_pseudo\",\n\t\t\tw:    func(f *Framer) { write(f, encodeHeaderRaw(t, \"foo\", \"bar\")) },\n\t\t\twant: want(FlagHeadersEndHeaders, 8, \"foo\", \"bar\"),\n\t\t},\n\t\t11: {\n\t\t\tname:          \"invalid_field_name\",\n\t\t\tw:             func(f *Framer) { write(f, encodeHeaderRaw(t, \"CapitalBad\", \"x\")) },\n\t\t\twant:          streamError(1, ErrCodeProtocol),\n\t\t\twantErrReason: \"invalid header field name \\\"CapitalBad\\\"\",\n\t\t},\n\t\t12: {\n\t\t\tname:          \"invalid_field_value\",\n\t\t\tw:             func(f *Framer) { write(f, encodeHeaderRaw(t, \"key\", \"bad_null\\x00\")) },\n\t\t\twant:          streamError(1, ErrCodeProtocol),\n\t\t\twantErrReason: \"invalid header field value \\\"bad_null\\\\x00\\\"\",\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tbuf := new(bytes.Buffer)\n\t\tf := NewFramer(buf, buf)\n\t\tf.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)\n\t\tf.MaxHeaderListSize = tt.maxHeaderListSize\n\t\ttt.w(f)\n\n\t\tname := tt.name\n\t\tif name == \"\" {\n\t\t\tname = fmt.Sprintf(\"test index %d\", i)\n\t\t}\n\n\t\tvar got interface{}\n\t\tvar err error\n\t\tgot, err = f.ReadFrame()\n\t\tif err != nil {\n\t\t\tgot = err\n\n\t\t\t// Ignore the StreamError.Cause field, if it matches the wantErrReason.\n\t\t\t// The test table above predates the Cause field.\n\t\t\tif se, ok := err.(StreamError); ok && se.Cause != nil && se.Cause.Error() == tt.wantErrReason {\n\t\t\t\tse.Cause = nil\n\t\t\t\tgot = se\n\t\t\t}\n\t\t}\n\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\tif mhg, ok := got.(*MetaHeadersFrame); ok {\n\t\t\t\tif mhw, ok := tt.want.(*MetaHeadersFrame); ok {\n\t\t\t\t\thg := mhg.HeadersFrame\n\t\t\t\t\thw := mhw.HeadersFrame\n\t\t\t\t\tif hg != nil && hw != nil && !reflect.DeepEqual(*hg, *hw) {\n\t\t\t\t\t\tt.Errorf(\"%s: headers differ:\\n got: %+v\\nwant: %+v\\n\", name, *hg, *hw)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tstr := func(v interface{}) string {\n\t\t\t\tif _, ok := v.(error); ok {\n\t\t\t\t\treturn fmt.Sprintf(\"error %v\", v)\n\t\t\t\t} else {\n\t\t\t\t\treturn fmt.Sprintf(\"value %#v\", v)\n\t\t\t\t}\n\t\t\t}\n\t\t\tt.Errorf(\"%s:\\n got: %v\\nwant: %s\", name, str(got), str(tt.want))\n\t\t}\n\t\tif tt.wantErrReason != \"\" && tt.wantErrReason != fmt.Sprint(f.errDetail) {\n\t\t\tt.Errorf(\"%s: got error reason %q; want %q\", name, f.errDetail, tt.wantErrReason)\n\t\t}\n\t}\n}\n\nfunc TestSetReuseFrames(t *testing.T) {\n\tfr, buf := testFramer()\n\tfr.SetReuseFrames()\n\n\t// Check that DataFrames are reused. Note that\n\t// SetReuseFrames only currently implements reuse of DataFrames.\n\tfirstDf := readAndVerifyDataFrame(\"ABC\", 3, fr, buf, t)\n\n\tfor i := 0; i < 10; i++ {\n\t\tdf := readAndVerifyDataFrame(\"XYZ\", 3, fr, buf, t)\n\t\tif df != firstDf {\n\t\t\tt.Errorf(\"Expected Framer to return references to the same DataFrame. Have %v and %v\", &df, &firstDf)\n\t\t}\n\t}\n\n\tfor i := 0; i < 10; i++ {\n\t\tdf := readAndVerifyDataFrame(\"\", 0, fr, buf, t)\n\t\tif df != firstDf {\n\t\t\tt.Errorf(\"Expected Framer to return references to the same DataFrame. Have %v and %v\", &df, &firstDf)\n\t\t}\n\t}\n\n\tfor i := 0; i < 10; i++ {\n\t\tdf := readAndVerifyDataFrame(\"HHH\", 3, fr, buf, t)\n\t\tif df != firstDf {\n\t\t\tt.Errorf(\"Expected Framer to return references to the same DataFrame. Have %v and %v\", &df, &firstDf)\n\t\t}\n\t}\n}\n\nfunc TestSetReuseFramesMoreThanOnce(t *testing.T) {\n\tfr, buf := testFramer()\n\tfr.SetReuseFrames()\n\n\tfirstDf := readAndVerifyDataFrame(\"ABC\", 3, fr, buf, t)\n\tfr.SetReuseFrames()\n\n\tfor i := 0; i < 10; i++ {\n\t\tdf := readAndVerifyDataFrame(\"XYZ\", 3, fr, buf, t)\n\t\t// SetReuseFrames should be idempotent\n\t\tfr.SetReuseFrames()\n\t\tif df != firstDf {\n\t\t\tt.Errorf(\"Expected Framer to return references to the same DataFrame. Have %v and %v\", &df, &firstDf)\n\t\t}\n\t}\n}\n\nfunc TestNoSetReuseFrames(t *testing.T) {\n\tfr, buf := testFramer()\n\tconst numNewDataFrames = 10\n\tdfSoFar := make([]interface{}, numNewDataFrames)\n\n\t// Check that DataFrames are not reused if SetReuseFrames wasn't called.\n\t// SetReuseFrames only currently implements reuse of DataFrames.\n\tfor i := 0; i < numNewDataFrames; i++ {\n\t\tdf := readAndVerifyDataFrame(\"XYZ\", 3, fr, buf, t)\n\t\tfor _, item := range dfSoFar {\n\t\t\tif df == item {\n\t\t\t\tt.Errorf(\"Expected Framer to return new DataFrames since SetNoReuseFrames not set.\")\n\t\t\t}\n\t\t}\n\t\tdfSoFar[i] = df\n\t}\n}\n\nfunc readAndVerifyDataFrame(data string, length byte, fr *Framer, buf *bytes.Buffer, t *testing.T) *DataFrame {\n\tvar streamID uint32 = 1<<24 + 2<<16 + 3<<8 + 4\n\tfr.WriteData(streamID, true, []byte(data))\n\twantEnc := \"\\x00\\x00\" + string(length) + \"\\x00\\x01\\x01\\x02\\x03\\x04\" + data\n\tif buf.String() != wantEnc {\n\t\tt.Errorf(\"encoded as %q; want %q\", buf.Bytes(), wantEnc)\n\t}\n\tf, err := fr.ReadFrame()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdf, ok := f.(*DataFrame)\n\tif !ok {\n\t\tt.Fatalf(\"got %T; want *DataFrame\", f)\n\t}\n\tif !bytes.Equal(df.Data(), []byte(data)) {\n\t\tt.Errorf(\"got %q; want %q\", df.Data(), []byte(data))\n\t}\n\tif f.Header().Flags&1 == 0 {\n\t\tt.Errorf(\"didn't see END_STREAM flag\")\n\t}\n\treturn df\n}\n\nfunc encodeHeaderRaw(t *testing.T, pairs ...string) []byte {\n\tvar he hpackEncoder\n\treturn he.encodeHeaderRaw(t, pairs...)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/go16.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.6\n\npackage http2\n\nimport (\n\t\"net/http\"\n\t\"time\"\n)\n\nfunc transportExpectContinueTimeout(t1 *http.Transport) time.Duration {\n\treturn t1.ExpectContinueTimeout\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/go17.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.7\n\npackage http2\n\nimport (\n\t\"context\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httptrace\"\n\t\"time\"\n)\n\ntype contextContext interface {\n\tcontext.Context\n}\n\nfunc serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {\n\tctx, cancel = context.WithCancel(context.Background())\n\tctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())\n\tif hs := opts.baseConfig(); hs != nil {\n\t\tctx = context.WithValue(ctx, http.ServerContextKey, hs)\n\t}\n\treturn\n}\n\nfunc contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {\n\treturn context.WithCancel(ctx)\n}\n\nfunc requestWithContext(req *http.Request, ctx contextContext) *http.Request {\n\treturn req.WithContext(ctx)\n}\n\ntype clientTrace httptrace.ClientTrace\n\nfunc reqContext(r *http.Request) context.Context { return r.Context() }\n\nfunc (t *Transport) idleConnTimeout() time.Duration {\n\tif t.t1 != nil {\n\t\treturn t.t1.IdleConnTimeout\n\t}\n\treturn 0\n}\n\nfunc setResponseUncompressed(res *http.Response) { res.Uncompressed = true }\n\nfunc traceGotConn(req *http.Request, cc *ClientConn) {\n\ttrace := httptrace.ContextClientTrace(req.Context())\n\tif trace == nil || trace.GotConn == nil {\n\t\treturn\n\t}\n\tci := httptrace.GotConnInfo{Conn: cc.tconn}\n\tcc.mu.Lock()\n\tci.Reused = cc.nextStreamID > 1\n\tci.WasIdle = len(cc.streams) == 0 && ci.Reused\n\tif ci.WasIdle && !cc.lastActive.IsZero() {\n\t\tci.IdleTime = time.Now().Sub(cc.lastActive)\n\t}\n\tcc.mu.Unlock()\n\n\ttrace.GotConn(ci)\n}\n\nfunc traceWroteHeaders(trace *clientTrace) {\n\tif trace != nil && trace.WroteHeaders != nil {\n\t\ttrace.WroteHeaders()\n\t}\n}\n\nfunc traceGot100Continue(trace *clientTrace) {\n\tif trace != nil && trace.Got100Continue != nil {\n\t\ttrace.Got100Continue()\n\t}\n}\n\nfunc traceWait100Continue(trace *clientTrace) {\n\tif trace != nil && trace.Wait100Continue != nil {\n\t\ttrace.Wait100Continue()\n\t}\n}\n\nfunc traceWroteRequest(trace *clientTrace, err error) {\n\tif trace != nil && trace.WroteRequest != nil {\n\t\ttrace.WroteRequest(httptrace.WroteRequestInfo{Err: err})\n\t}\n}\n\nfunc traceFirstResponseByte(trace *clientTrace) {\n\tif trace != nil && trace.GotFirstResponseByte != nil {\n\t\ttrace.GotFirstResponseByte()\n\t}\n}\n\nfunc requestTrace(req *http.Request) *clientTrace {\n\ttrace := httptrace.ContextClientTrace(req.Context())\n\treturn (*clientTrace)(trace)\n}\n\n// Ping sends a PING frame to the server and waits for the ack.\nfunc (cc *ClientConn) Ping(ctx context.Context) error {\n\treturn cc.ping(ctx)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/go17_not18.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.7,!go1.8\n\npackage http2\n\nimport \"crypto/tls\"\n\n// temporary copy of Go 1.7's private tls.Config.clone:\nfunc cloneTLSConfig(c *tls.Config) *tls.Config {\n\treturn &tls.Config{\n\t\tRand:                        c.Rand,\n\t\tTime:                        c.Time,\n\t\tCertificates:                c.Certificates,\n\t\tNameToCertificate:           c.NameToCertificate,\n\t\tGetCertificate:              c.GetCertificate,\n\t\tRootCAs:                     c.RootCAs,\n\t\tNextProtos:                  c.NextProtos,\n\t\tServerName:                  c.ServerName,\n\t\tClientAuth:                  c.ClientAuth,\n\t\tClientCAs:                   c.ClientCAs,\n\t\tInsecureSkipVerify:          c.InsecureSkipVerify,\n\t\tCipherSuites:                c.CipherSuites,\n\t\tPreferServerCipherSuites:    c.PreferServerCipherSuites,\n\t\tSessionTicketsDisabled:      c.SessionTicketsDisabled,\n\t\tSessionTicketKey:            c.SessionTicketKey,\n\t\tClientSessionCache:          c.ClientSessionCache,\n\t\tMinVersion:                  c.MinVersion,\n\t\tMaxVersion:                  c.MaxVersion,\n\t\tCurvePreferences:            c.CurvePreferences,\n\t\tDynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,\n\t\tRenegotiation:               c.Renegotiation,\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/go18.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.8\n\npackage http2\n\nimport (\n\t\"crypto/tls\"\n\t\"io\"\n\t\"net/http\"\n)\n\nfunc cloneTLSConfig(c *tls.Config) *tls.Config {\n\tc2 := c.Clone()\n\tc2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264\n\treturn c2\n}\n\nvar _ http.Pusher = (*responseWriter)(nil)\n\n// Push implements http.Pusher.\nfunc (w *responseWriter) Push(target string, opts *http.PushOptions) error {\n\tinternalOpts := pushOptions{}\n\tif opts != nil {\n\t\tinternalOpts.Method = opts.Method\n\t\tinternalOpts.Header = opts.Header\n\t}\n\treturn w.push(target, internalOpts)\n}\n\nfunc configureServer18(h1 *http.Server, h2 *Server) error {\n\tif h2.IdleTimeout == 0 {\n\t\tif h1.IdleTimeout != 0 {\n\t\t\th2.IdleTimeout = h1.IdleTimeout\n\t\t} else {\n\t\t\th2.IdleTimeout = h1.ReadTimeout\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc shouldLogPanic(panicValue interface{}) bool {\n\treturn panicValue != nil && panicValue != http.ErrAbortHandler\n}\n\nfunc reqGetBody(req *http.Request) func() (io.ReadCloser, error) {\n\treturn req.GetBody\n}\n\nfunc reqBodyIsNoBody(body io.ReadCloser) bool {\n\treturn body == http.NoBody\n}\n\nfunc go18httpNoBody() io.ReadCloser { return http.NoBody } // for tests only\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/go18_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.8\n\npackage http2\n\nimport (\n\t\"crypto/tls\"\n\t\"net/http\"\n\t\"testing\"\n\t\"time\"\n)\n\n// Tests that http2.Server.IdleTimeout is initialized from\n// http.Server.{Idle,Read}Timeout. http.Server.IdleTimeout was\n// added in Go 1.8.\nfunc TestConfigureServerIdleTimeout_Go18(t *testing.T) {\n\tconst timeout = 5 * time.Second\n\tconst notThisOne = 1 * time.Second\n\n\t// With a zero http2.Server, verify that it copies IdleTimeout:\n\t{\n\t\ts1 := &http.Server{\n\t\t\tIdleTimeout: timeout,\n\t\t\tReadTimeout: notThisOne,\n\t\t}\n\t\ts2 := &Server{}\n\t\tif err := ConfigureServer(s1, s2); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif s2.IdleTimeout != timeout {\n\t\t\tt.Errorf(\"s2.IdleTimeout = %v; want %v\", s2.IdleTimeout, timeout)\n\t\t}\n\t}\n\n\t// And that it falls back to ReadTimeout:\n\t{\n\t\ts1 := &http.Server{\n\t\t\tReadTimeout: timeout,\n\t\t}\n\t\ts2 := &Server{}\n\t\tif err := ConfigureServer(s1, s2); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif s2.IdleTimeout != timeout {\n\t\t\tt.Errorf(\"s2.IdleTimeout = %v; want %v\", s2.IdleTimeout, timeout)\n\t\t}\n\t}\n\n\t// Verify that s1's IdleTimeout doesn't overwrite an existing setting:\n\t{\n\t\ts1 := &http.Server{\n\t\t\tIdleTimeout: notThisOne,\n\t\t}\n\t\ts2 := &Server{\n\t\t\tIdleTimeout: timeout,\n\t\t}\n\t\tif err := ConfigureServer(s1, s2); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif s2.IdleTimeout != timeout {\n\t\t\tt.Errorf(\"s2.IdleTimeout = %v; want %v\", s2.IdleTimeout, timeout)\n\t\t}\n\t}\n}\n\nfunc TestCertClone(t *testing.T) {\n\tc := &tls.Config{\n\t\tGetClientCertificate: func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {\n\t\t\tpanic(\"shouldn't be called\")\n\t\t},\n\t}\n\tc2 := cloneTLSConfig(c)\n\tif c2.GetClientCertificate == nil {\n\t\tt.Error(\"GetClientCertificate is nil\")\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/go19.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.9\n\npackage http2\n\nimport (\n\t\"net/http\"\n)\n\nfunc configureServer19(s *http.Server, conf *Server) error {\n\ts.RegisterOnShutdown(conf.state.startGracefulShutdown)\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/go19_test.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.9\n\npackage http2\n\nimport (\n\t\"context\"\n\t\"net/http\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestServerGracefulShutdown(t *testing.T) {\n\tvar st *serverTester\n\thandlerDone := make(chan struct{})\n\tst = newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tdefer close(handlerDone)\n\t\tgo st.ts.Config.Shutdown(context.Background())\n\n\t\tga := st.wantGoAway()\n\t\tif ga.ErrCode != ErrCodeNo {\n\t\t\tt.Errorf(\"GOAWAY error = %v; want ErrCodeNo\", ga.ErrCode)\n\t\t}\n\t\tif ga.LastStreamID != 1 {\n\t\t\tt.Errorf(\"GOAWAY LastStreamID = %v; want 1\", ga.LastStreamID)\n\t\t}\n\n\t\tw.Header().Set(\"x-foo\", \"bar\")\n\t})\n\tdefer st.Close()\n\n\tst.greet()\n\tst.bodylessReq1()\n\n\tselect {\n\tcase <-handlerDone:\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatalf(\"server did not shutdown?\")\n\t}\n\thf := st.wantHeaders()\n\tgoth := st.decodeHeader(hf.HeaderBlockFragment())\n\twanth := [][2]string{\n\t\t{\":status\", \"200\"},\n\t\t{\"x-foo\", \"bar\"},\n\t\t{\"content-type\", \"text/plain; charset=utf-8\"},\n\t\t{\"content-length\", \"0\"},\n\t}\n\tif !reflect.DeepEqual(goth, wanth) {\n\t\tt.Errorf(\"Got headers %v; want %v\", goth, wanth)\n\t}\n\n\tn, err := st.cc.Read([]byte{0})\n\tif n != 0 || err == nil {\n\t\tt.Errorf(\"Read = %v, %v; want 0, non-nil\", n, err)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/gotrack.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Defensive debug-only utility to track that functions run on the\n// goroutine that they're supposed to.\n\npackage http2\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"sync\"\n)\n\nvar DebugGoroutines = os.Getenv(\"DEBUG_HTTP2_GOROUTINES\") == \"1\"\n\ntype goroutineLock uint64\n\nfunc newGoroutineLock() goroutineLock {\n\tif !DebugGoroutines {\n\t\treturn 0\n\t}\n\treturn goroutineLock(curGoroutineID())\n}\n\nfunc (g goroutineLock) check() {\n\tif !DebugGoroutines {\n\t\treturn\n\t}\n\tif curGoroutineID() != uint64(g) {\n\t\tpanic(\"running on the wrong goroutine\")\n\t}\n}\n\nfunc (g goroutineLock) checkNotOn() {\n\tif !DebugGoroutines {\n\t\treturn\n\t}\n\tif curGoroutineID() == uint64(g) {\n\t\tpanic(\"running on the wrong goroutine\")\n\t}\n}\n\nvar goroutineSpace = []byte(\"goroutine \")\n\nfunc curGoroutineID() uint64 {\n\tbp := littleBuf.Get().(*[]byte)\n\tdefer littleBuf.Put(bp)\n\tb := *bp\n\tb = b[:runtime.Stack(b, false)]\n\t// Parse the 4707 out of \"goroutine 4707 [\"\n\tb = bytes.TrimPrefix(b, goroutineSpace)\n\ti := bytes.IndexByte(b, ' ')\n\tif i < 0 {\n\t\tpanic(fmt.Sprintf(\"No space found in %q\", b))\n\t}\n\tb = b[:i]\n\tn, err := parseUintBytes(b, 10, 64)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to parse goroutine ID out of %q: %v\", b, err))\n\t}\n\treturn n\n}\n\nvar littleBuf = sync.Pool{\n\tNew: func() interface{} {\n\t\tbuf := make([]byte, 64)\n\t\treturn &buf\n\t},\n}\n\n// parseUintBytes is like strconv.ParseUint, but using a []byte.\nfunc parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {\n\tvar cutoff, maxVal uint64\n\n\tif bitSize == 0 {\n\t\tbitSize = int(strconv.IntSize)\n\t}\n\n\ts0 := s\n\tswitch {\n\tcase len(s) < 1:\n\t\terr = strconv.ErrSyntax\n\t\tgoto Error\n\n\tcase 2 <= base && base <= 36:\n\t\t// valid base; nothing to do\n\n\tcase base == 0:\n\t\t// Look for octal, hex prefix.\n\t\tswitch {\n\t\tcase s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):\n\t\t\tbase = 16\n\t\t\ts = s[2:]\n\t\t\tif len(s) < 1 {\n\t\t\t\terr = strconv.ErrSyntax\n\t\t\t\tgoto Error\n\t\t\t}\n\t\tcase s[0] == '0':\n\t\t\tbase = 8\n\t\tdefault:\n\t\t\tbase = 10\n\t\t}\n\n\tdefault:\n\t\terr = errors.New(\"invalid base \" + strconv.Itoa(base))\n\t\tgoto Error\n\t}\n\n\tn = 0\n\tcutoff = cutoff64(base)\n\tmaxVal = 1<<uint(bitSize) - 1\n\n\tfor i := 0; i < len(s); i++ {\n\t\tvar v byte\n\t\td := s[i]\n\t\tswitch {\n\t\tcase '0' <= d && d <= '9':\n\t\t\tv = d - '0'\n\t\tcase 'a' <= d && d <= 'z':\n\t\t\tv = d - 'a' + 10\n\t\tcase 'A' <= d && d <= 'Z':\n\t\t\tv = d - 'A' + 10\n\t\tdefault:\n\t\t\tn = 0\n\t\t\terr = strconv.ErrSyntax\n\t\t\tgoto Error\n\t\t}\n\t\tif int(v) >= base {\n\t\t\tn = 0\n\t\t\terr = strconv.ErrSyntax\n\t\t\tgoto Error\n\t\t}\n\n\t\tif n >= cutoff {\n\t\t\t// n*base overflows\n\t\t\tn = 1<<64 - 1\n\t\t\terr = strconv.ErrRange\n\t\t\tgoto Error\n\t\t}\n\t\tn *= uint64(base)\n\n\t\tn1 := n + uint64(v)\n\t\tif n1 < n || n1 > maxVal {\n\t\t\t// n+v overflows\n\t\t\tn = 1<<64 - 1\n\t\t\terr = strconv.ErrRange\n\t\t\tgoto Error\n\t\t}\n\t\tn = n1\n\t}\n\n\treturn n, nil\n\nError:\n\treturn n, &strconv.NumError{Func: \"ParseUint\", Num: string(s0), Err: err}\n}\n\n// Return the first number n such that n*base >= 1<<64.\nfunc cutoff64(base int) uint64 {\n\tif base < 2 {\n\t\treturn 0\n\t}\n\treturn (1<<64-1)/uint64(base) + 1\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/gotrack_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestGoroutineLock(t *testing.T) {\n\toldDebug := DebugGoroutines\n\tDebugGoroutines = true\n\tdefer func() { DebugGoroutines = oldDebug }()\n\n\tg := newGoroutineLock()\n\tg.check()\n\n\tsawPanic := make(chan interface{})\n\tgo func() {\n\t\tdefer func() { sawPanic <- recover() }()\n\t\tg.check() // should panic\n\t}()\n\te := <-sawPanic\n\tif e == nil {\n\t\tt.Fatal(\"did not see panic from check in other goroutine\")\n\t}\n\tif !strings.Contains(fmt.Sprint(e), \"wrong goroutine\") {\n\t\tt.Errorf(\"expected on see panic about running on the wrong goroutine; got %v\", e)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/h2i/README.md",
    "content": "# h2i\n\n**h2i** is an interactive HTTP/2 (\"h2\") console debugger. Miss the good ol'\ndays of telnetting to your HTTP/1.n servers? We're bringing you\nback.\n\nFeatures:\n- send raw HTTP/2 frames\n - PING\n - SETTINGS\n - HEADERS\n - etc\n- type in HTTP/1.n and have it auto-HPACK/frame-ify it for HTTP/2\n- pretty print all received HTTP/2 frames from the peer (including HPACK decoding)\n- tab completion of commands, options\n\nNot yet features, but soon:\n- unnecessary CONTINUATION frames on short boundaries, to test peer implementations \n- request bodies (DATA frames)\n- send invalid frames for testing server implementations (supported by underlying Framer)\n\nLater:\n- act like a server\n\n## Installation\n\n```\n$ go get golang.org/x/net/http2/h2i\n$ h2i <host>\n```\n\n## Demo\n\n```\n$ h2i\nUsage: h2i <hostname>\n  \n  -insecure\n        Whether to skip TLS cert validation\n  -nextproto string\n        Comma-separated list of NPN/ALPN protocol names to negotiate. (default \"h2,h2-14\")\n\n$ h2i google.com\nConnecting to google.com:443 ...\nConnected to 74.125.224.41:443\nNegotiated protocol \"h2-14\"\n[FrameHeader SETTINGS len=18]\n  [MAX_CONCURRENT_STREAMS = 100]\n  [INITIAL_WINDOW_SIZE = 1048576]\n  [MAX_FRAME_SIZE = 16384]\n[FrameHeader WINDOW_UPDATE len=4]\n  Window-Increment = 983041\n  \nh2i> PING h2iSayHI\n[FrameHeader PING flags=ACK len=8]\n  Data = \"h2iSayHI\"\nh2i> headers\n(as HTTP/1.1)> GET / HTTP/1.1\n(as HTTP/1.1)> Host: ip.appspot.com\n(as HTTP/1.1)> User-Agent: h2i/brad-n-blake\n(as HTTP/1.1)>  \nOpening Stream-ID 1:\n :authority = ip.appspot.com\n :method = GET\n :path = /\n :scheme = https\n user-agent = h2i/brad-n-blake\n[FrameHeader HEADERS flags=END_HEADERS stream=1 len=77]\n  :status = \"200\"\n  alternate-protocol = \"443:quic,p=1\"\n  content-length = \"15\"\n  content-type = \"text/html\"\n  date = \"Fri, 01 May 2015 23:06:56 GMT\"\n  server = \"Google Frontend\"\n[FrameHeader DATA flags=END_STREAM stream=1 len=15]\n  \"173.164.155.78\\n\"\n[FrameHeader PING len=8]\n  Data = \"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\nh2i> ping  \n[FrameHeader PING flags=ACK len=8]  \n  Data = \"h2i_ping\"  \nh2i> ping  \n[FrameHeader PING flags=ACK len=8]\n  Data = \"h2i_ping\"\nh2i> ping\n[FrameHeader GOAWAY len=22]\n  Last-Stream-ID = 1; Error-Code = PROTOCOL_ERROR (1)\n\nReadFrame: EOF\n```\n\n## Status\n\nQuick few hour hack. So much yet to do. Feel free to file issues for\nbugs or wishlist items, but [@bmizerany](https://github.com/bmizerany/)\nand I aren't yet accepting pull requests until things settle down.\n\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/h2i/h2i.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !plan9,!solaris\n\n/*\nThe h2i command is an interactive HTTP/2 console.\n\nUsage:\n  $ h2i [flags] <hostname>\n\nInteractive commands in the console: (all parts case-insensitive)\n\n  ping [data]\n  settings ack\n  settings FOO=n BAR=z\n  headers      (open a new stream by typing HTTP/1.1)\n*/\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"golang.org/x/crypto/ssh/terminal\"\n\t\"golang.org/x/net/http2\"\n\t\"golang.org/x/net/http2/hpack\"\n)\n\n// Flags\nvar (\n\tflagNextProto = flag.String(\"nextproto\", \"h2,h2-14\", \"Comma-separated list of NPN/ALPN protocol names to negotiate.\")\n\tflagInsecure  = flag.Bool(\"insecure\", false, \"Whether to skip TLS cert validation\")\n\tflagSettings  = flag.String(\"settings\", \"empty\", \"comma-separated list of KEY=value settings for the initial SETTINGS frame. The magic value 'empty' sends an empty initial settings frame, and the magic value 'omit' causes no initial settings frame to be sent.\")\n\tflagDial      = flag.String(\"dial\", \"\", \"optional ip:port to dial, to connect to a host:port but use a different SNI name (including a SNI name without DNS)\")\n)\n\ntype command struct {\n\trun func(*h2i, []string) error // required\n\n\t// complete optionally specifies tokens (case-insensitive) which are\n\t// valid for this subcommand.\n\tcomplete func() []string\n}\n\nvar commands = map[string]command{\n\t\"ping\": {run: (*h2i).cmdPing},\n\t\"settings\": {\n\t\trun: (*h2i).cmdSettings,\n\t\tcomplete: func() []string {\n\t\t\treturn []string{\n\t\t\t\t\"ACK\",\n\t\t\t\thttp2.SettingHeaderTableSize.String(),\n\t\t\t\thttp2.SettingEnablePush.String(),\n\t\t\t\thttp2.SettingMaxConcurrentStreams.String(),\n\t\t\t\thttp2.SettingInitialWindowSize.String(),\n\t\t\t\thttp2.SettingMaxFrameSize.String(),\n\t\t\t\thttp2.SettingMaxHeaderListSize.String(),\n\t\t\t}\n\t\t},\n\t},\n\t\"quit\":    {run: (*h2i).cmdQuit},\n\t\"headers\": {run: (*h2i).cmdHeaders},\n}\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \"Usage: h2i <hostname>\\n\\n\")\n\tflag.PrintDefaults()\n}\n\n// withPort adds \":443\" if another port isn't already present.\nfunc withPort(host string) string {\n\tif _, _, err := net.SplitHostPort(host); err != nil {\n\t\treturn net.JoinHostPort(host, \"443\")\n\t}\n\treturn host\n}\n\n// withoutPort strips the port from addr if present.\nfunc withoutPort(addr string) string {\n\tif h, _, err := net.SplitHostPort(addr); err == nil {\n\t\treturn h\n\t}\n\treturn addr\n}\n\n// h2i is the app's state.\ntype h2i struct {\n\thost   string\n\ttc     *tls.Conn\n\tframer *http2.Framer\n\tterm   *terminal.Terminal\n\n\t// owned by the command loop:\n\tstreamID uint32\n\thbuf     bytes.Buffer\n\thenc     *hpack.Encoder\n\n\t// owned by the readFrames loop:\n\tpeerSetting map[http2.SettingID]uint32\n\thdec        *hpack.Decoder\n}\n\nfunc main() {\n\tflag.Usage = usage\n\tflag.Parse()\n\tif flag.NArg() != 1 {\n\t\tusage()\n\t\tos.Exit(2)\n\t}\n\tlog.SetFlags(0)\n\n\thost := flag.Arg(0)\n\tapp := &h2i{\n\t\thost:        host,\n\t\tpeerSetting: make(map[http2.SettingID]uint32),\n\t}\n\tapp.henc = hpack.NewEncoder(&app.hbuf)\n\n\tif err := app.Main(); err != nil {\n\t\tif app.term != nil {\n\t\t\tapp.logf(\"%v\\n\", err)\n\t\t} else {\n\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\t}\n\t\tos.Exit(1)\n\t}\n\tfmt.Fprintf(os.Stdout, \"\\n\")\n}\n\nfunc (app *h2i) Main() error {\n\tcfg := &tls.Config{\n\t\tServerName:         withoutPort(app.host),\n\t\tNextProtos:         strings.Split(*flagNextProto, \",\"),\n\t\tInsecureSkipVerify: *flagInsecure,\n\t}\n\n\thostAndPort := *flagDial\n\tif hostAndPort == \"\" {\n\t\thostAndPort = withPort(app.host)\n\t}\n\tlog.Printf(\"Connecting to %s ...\", hostAndPort)\n\ttc, err := tls.Dial(\"tcp\", hostAndPort, cfg)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error dialing %s: %v\", hostAndPort, err)\n\t}\n\tlog.Printf(\"Connected to %v\", tc.RemoteAddr())\n\tdefer tc.Close()\n\n\tif err := tc.Handshake(); err != nil {\n\t\treturn fmt.Errorf(\"TLS handshake: %v\", err)\n\t}\n\tif !*flagInsecure {\n\t\tif err := tc.VerifyHostname(app.host); err != nil {\n\t\t\treturn fmt.Errorf(\"VerifyHostname: %v\", err)\n\t\t}\n\t}\n\tstate := tc.ConnectionState()\n\tlog.Printf(\"Negotiated protocol %q\", state.NegotiatedProtocol)\n\tif !state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol == \"\" {\n\t\treturn fmt.Errorf(\"Could not negotiate protocol mutually\")\n\t}\n\n\tif _, err := io.WriteString(tc, http2.ClientPreface); err != nil {\n\t\treturn err\n\t}\n\n\tapp.framer = http2.NewFramer(tc, tc)\n\n\toldState, err := terminal.MakeRaw(int(os.Stdin.Fd()))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer terminal.Restore(0, oldState)\n\n\tvar screen = struct {\n\t\tio.Reader\n\t\tio.Writer\n\t}{os.Stdin, os.Stdout}\n\n\tapp.term = terminal.NewTerminal(screen, \"h2i> \")\n\tlastWord := regexp.MustCompile(`.+\\W(\\w+)$`)\n\tapp.term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) {\n\t\tif key != '\\t' {\n\t\t\treturn\n\t\t}\n\t\tif pos != len(line) {\n\t\t\t// TODO: we're being lazy for now, only supporting tab completion at the end.\n\t\t\treturn\n\t\t}\n\t\t// Auto-complete for the command itself.\n\t\tif !strings.Contains(line, \" \") {\n\t\t\tvar name string\n\t\t\tname, _, ok = lookupCommand(line)\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\treturn name, len(name), true\n\t\t}\n\t\t_, c, ok := lookupCommand(line[:strings.IndexByte(line, ' ')])\n\t\tif !ok || c.complete == nil {\n\t\t\treturn\n\t\t}\n\t\tif strings.HasSuffix(line, \" \") {\n\t\t\tapp.logf(\"%s\", strings.Join(c.complete(), \" \"))\n\t\t\treturn line, pos, true\n\t\t}\n\t\tm := lastWord.FindStringSubmatch(line)\n\t\tif m == nil {\n\t\t\treturn line, len(line), true\n\t\t}\n\t\tsoFar := m[1]\n\t\tvar match []string\n\t\tfor _, cand := range c.complete() {\n\t\t\tif len(soFar) > len(cand) || !strings.EqualFold(cand[:len(soFar)], soFar) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmatch = append(match, cand)\n\t\t}\n\t\tif len(match) == 0 {\n\t\t\treturn\n\t\t}\n\t\tif len(match) > 1 {\n\t\t\t// TODO: auto-complete any common prefix\n\t\t\tapp.logf(\"%s\", strings.Join(match, \" \"))\n\t\t\treturn line, pos, true\n\t\t}\n\t\tnewLine = line[:len(line)-len(soFar)] + match[0]\n\t\treturn newLine, len(newLine), true\n\n\t}\n\n\terrc := make(chan error, 2)\n\tgo func() { errc <- app.readFrames() }()\n\tgo func() { errc <- app.readConsole() }()\n\treturn <-errc\n}\n\nfunc (app *h2i) logf(format string, args ...interface{}) {\n\tfmt.Fprintf(app.term, format+\"\\r\\n\", args...)\n}\n\nfunc (app *h2i) readConsole() error {\n\tif s := *flagSettings; s != \"omit\" {\n\t\tvar args []string\n\t\tif s != \"empty\" {\n\t\t\targs = strings.Split(s, \",\")\n\t\t}\n\t\t_, c, ok := lookupCommand(\"settings\")\n\t\tif !ok {\n\t\t\tpanic(\"settings command not found\")\n\t\t}\n\t\tc.run(app, args)\n\t}\n\n\tfor {\n\t\tline, err := app.term.ReadLine()\n\t\tif err == io.EOF {\n\t\t\treturn nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"terminal.ReadLine: %v\", err)\n\t\t}\n\t\tf := strings.Fields(line)\n\t\tif len(f) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tcmd, args := f[0], f[1:]\n\t\tif _, c, ok := lookupCommand(cmd); ok {\n\t\t\terr = c.run(app, args)\n\t\t} else {\n\t\t\tapp.logf(\"Unknown command %q\", line)\n\t\t}\n\t\tif err == errExitApp {\n\t\t\treturn nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\nfunc lookupCommand(prefix string) (name string, c command, ok bool) {\n\tprefix = strings.ToLower(prefix)\n\tif c, ok = commands[prefix]; ok {\n\t\treturn prefix, c, ok\n\t}\n\n\tfor full, candidate := range commands {\n\t\tif strings.HasPrefix(full, prefix) {\n\t\t\tif c.run != nil {\n\t\t\t\treturn \"\", command{}, false // ambiguous\n\t\t\t}\n\t\t\tc = candidate\n\t\t\tname = full\n\t\t}\n\t}\n\treturn name, c, c.run != nil\n}\n\nvar errExitApp = errors.New(\"internal sentinel error value to quit the console reading loop\")\n\nfunc (a *h2i) cmdQuit(args []string) error {\n\tif len(args) > 0 {\n\t\ta.logf(\"the QUIT command takes no argument\")\n\t\treturn nil\n\t}\n\treturn errExitApp\n}\n\nfunc (a *h2i) cmdSettings(args []string) error {\n\tif len(args) == 1 && strings.EqualFold(args[0], \"ACK\") {\n\t\treturn a.framer.WriteSettingsAck()\n\t}\n\tvar settings []http2.Setting\n\tfor _, arg := range args {\n\t\tif strings.EqualFold(arg, \"ACK\") {\n\t\t\ta.logf(\"Error: ACK must be only argument with the SETTINGS command\")\n\t\t\treturn nil\n\t\t}\n\t\teq := strings.Index(arg, \"=\")\n\t\tif eq == -1 {\n\t\t\ta.logf(\"Error: invalid argument %q (expected SETTING_NAME=nnnn)\", arg)\n\t\t\treturn nil\n\t\t}\n\t\tsid, ok := settingByName(arg[:eq])\n\t\tif !ok {\n\t\t\ta.logf(\"Error: unknown setting name %q\", arg[:eq])\n\t\t\treturn nil\n\t\t}\n\t\tval, err := strconv.ParseUint(arg[eq+1:], 10, 32)\n\t\tif err != nil {\n\t\t\ta.logf(\"Error: invalid argument %q (expected SETTING_NAME=nnnn)\", arg)\n\t\t\treturn nil\n\t\t}\n\t\tsettings = append(settings, http2.Setting{\n\t\t\tID:  sid,\n\t\t\tVal: uint32(val),\n\t\t})\n\t}\n\ta.logf(\"Sending: %v\", settings)\n\treturn a.framer.WriteSettings(settings...)\n}\n\nfunc settingByName(name string) (http2.SettingID, bool) {\n\tfor _, sid := range [...]http2.SettingID{\n\t\thttp2.SettingHeaderTableSize,\n\t\thttp2.SettingEnablePush,\n\t\thttp2.SettingMaxConcurrentStreams,\n\t\thttp2.SettingInitialWindowSize,\n\t\thttp2.SettingMaxFrameSize,\n\t\thttp2.SettingMaxHeaderListSize,\n\t} {\n\t\tif strings.EqualFold(sid.String(), name) {\n\t\t\treturn sid, true\n\t\t}\n\t}\n\treturn 0, false\n}\n\nfunc (app *h2i) cmdPing(args []string) error {\n\tif len(args) > 1 {\n\t\tapp.logf(\"invalid PING usage: only accepts 0 or 1 args\")\n\t\treturn nil // nil means don't end the program\n\t}\n\tvar data [8]byte\n\tif len(args) == 1 {\n\t\tcopy(data[:], args[0])\n\t} else {\n\t\tcopy(data[:], \"h2i_ping\")\n\t}\n\treturn app.framer.WritePing(false, data)\n}\n\nfunc (app *h2i) cmdHeaders(args []string) error {\n\tif len(args) > 0 {\n\t\tapp.logf(\"Error: HEADERS doesn't yet take arguments.\")\n\t\t// TODO: flags for restricting window size, to force CONTINUATION\n\t\t// frames.\n\t\treturn nil\n\t}\n\tvar h1req bytes.Buffer\n\tapp.term.SetPrompt(\"(as HTTP/1.1)> \")\n\tdefer app.term.SetPrompt(\"h2i> \")\n\tfor {\n\t\tline, err := app.term.ReadLine()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\th1req.WriteString(line)\n\t\th1req.WriteString(\"\\r\\n\")\n\t\tif line == \"\" {\n\t\t\tbreak\n\t\t}\n\t}\n\treq, err := http.ReadRequest(bufio.NewReader(&h1req))\n\tif err != nil {\n\t\tapp.logf(\"Invalid HTTP/1.1 request: %v\", err)\n\t\treturn nil\n\t}\n\tif app.streamID == 0 {\n\t\tapp.streamID = 1\n\t} else {\n\t\tapp.streamID += 2\n\t}\n\tapp.logf(\"Opening Stream-ID %d:\", app.streamID)\n\thbf := app.encodeHeaders(req)\n\tif len(hbf) > 16<<10 {\n\t\tapp.logf(\"TODO: h2i doesn't yet write CONTINUATION frames. Copy it from transport.go\")\n\t\treturn nil\n\t}\n\treturn app.framer.WriteHeaders(http2.HeadersFrameParam{\n\t\tStreamID:      app.streamID,\n\t\tBlockFragment: hbf,\n\t\tEndStream:     req.Method == \"GET\" || req.Method == \"HEAD\", // good enough for now\n\t\tEndHeaders:    true,                                        // for now\n\t})\n}\n\nfunc (app *h2i) readFrames() error {\n\tfor {\n\t\tf, err := app.framer.ReadFrame()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"ReadFrame: %v\", err)\n\t\t}\n\t\tapp.logf(\"%v\", f)\n\t\tswitch f := f.(type) {\n\t\tcase *http2.PingFrame:\n\t\t\tapp.logf(\"  Data = %q\", f.Data)\n\t\tcase *http2.SettingsFrame:\n\t\t\tf.ForeachSetting(func(s http2.Setting) error {\n\t\t\t\tapp.logf(\"  %v\", s)\n\t\t\t\tapp.peerSetting[s.ID] = s.Val\n\t\t\t\treturn nil\n\t\t\t})\n\t\tcase *http2.WindowUpdateFrame:\n\t\t\tapp.logf(\"  Window-Increment = %v\", f.Increment)\n\t\tcase *http2.GoAwayFrame:\n\t\t\tapp.logf(\"  Last-Stream-ID = %d; Error-Code = %v (%d)\", f.LastStreamID, f.ErrCode, f.ErrCode)\n\t\tcase *http2.DataFrame:\n\t\t\tapp.logf(\"  %q\", f.Data())\n\t\tcase *http2.HeadersFrame:\n\t\t\tif f.HasPriority() {\n\t\t\t\tapp.logf(\"  PRIORITY = %v\", f.Priority)\n\t\t\t}\n\t\t\tif app.hdec == nil {\n\t\t\t\t// TODO: if the user uses h2i to send a SETTINGS frame advertising\n\t\t\t\t// something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE\n\t\t\t\t// and stuff here instead of using the 4k default. But for now:\n\t\t\t\ttableSize := uint32(4 << 10)\n\t\t\t\tapp.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField)\n\t\t\t}\n\t\t\tapp.hdec.Write(f.HeaderBlockFragment())\n\t\tcase *http2.PushPromiseFrame:\n\t\t\tif app.hdec == nil {\n\t\t\t\t// TODO: if the user uses h2i to send a SETTINGS frame advertising\n\t\t\t\t// something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE\n\t\t\t\t// and stuff here instead of using the 4k default. But for now:\n\t\t\t\ttableSize := uint32(4 << 10)\n\t\t\t\tapp.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField)\n\t\t\t}\n\t\t\tapp.hdec.Write(f.HeaderBlockFragment())\n\t\t}\n\t}\n}\n\n// called from readLoop\nfunc (app *h2i) onNewHeaderField(f hpack.HeaderField) {\n\tif f.Sensitive {\n\t\tapp.logf(\"  %s = %q (SENSITIVE)\", f.Name, f.Value)\n\t}\n\tapp.logf(\"  %s = %q\", f.Name, f.Value)\n}\n\nfunc (app *h2i) encodeHeaders(req *http.Request) []byte {\n\tapp.hbuf.Reset()\n\n\t// TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go\n\thost := req.Host\n\tif host == \"\" {\n\t\thost = req.URL.Host\n\t}\n\n\tpath := req.RequestURI\n\tif path == \"\" {\n\t\tpath = \"/\"\n\t}\n\n\tapp.writeHeader(\":authority\", host) // probably not right for all sites\n\tapp.writeHeader(\":method\", req.Method)\n\tapp.writeHeader(\":path\", path)\n\tapp.writeHeader(\":scheme\", \"https\")\n\n\tfor k, vv := range req.Header {\n\t\tlowKey := strings.ToLower(k)\n\t\tif lowKey == \"host\" {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, v := range vv {\n\t\t\tapp.writeHeader(lowKey, v)\n\t\t}\n\t}\n\treturn app.hbuf.Bytes()\n}\n\nfunc (app *h2i) writeHeader(name, value string) {\n\tapp.henc.WriteField(hpack.HeaderField{Name: name, Value: value})\n\tapp.logf(\" %s = %s\", name, value)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/headermap.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport (\n\t\"net/http\"\n\t\"strings\"\n)\n\nvar (\n\tcommonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case\n\tcommonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case\n)\n\nfunc init() {\n\tfor _, v := range []string{\n\t\t\"accept\",\n\t\t\"accept-charset\",\n\t\t\"accept-encoding\",\n\t\t\"accept-language\",\n\t\t\"accept-ranges\",\n\t\t\"age\",\n\t\t\"access-control-allow-origin\",\n\t\t\"allow\",\n\t\t\"authorization\",\n\t\t\"cache-control\",\n\t\t\"content-disposition\",\n\t\t\"content-encoding\",\n\t\t\"content-language\",\n\t\t\"content-length\",\n\t\t\"content-location\",\n\t\t\"content-range\",\n\t\t\"content-type\",\n\t\t\"cookie\",\n\t\t\"date\",\n\t\t\"etag\",\n\t\t\"expect\",\n\t\t\"expires\",\n\t\t\"from\",\n\t\t\"host\",\n\t\t\"if-match\",\n\t\t\"if-modified-since\",\n\t\t\"if-none-match\",\n\t\t\"if-unmodified-since\",\n\t\t\"last-modified\",\n\t\t\"link\",\n\t\t\"location\",\n\t\t\"max-forwards\",\n\t\t\"proxy-authenticate\",\n\t\t\"proxy-authorization\",\n\t\t\"range\",\n\t\t\"referer\",\n\t\t\"refresh\",\n\t\t\"retry-after\",\n\t\t\"server\",\n\t\t\"set-cookie\",\n\t\t\"strict-transport-security\",\n\t\t\"trailer\",\n\t\t\"transfer-encoding\",\n\t\t\"user-agent\",\n\t\t\"vary\",\n\t\t\"via\",\n\t\t\"www-authenticate\",\n\t} {\n\t\tchk := http.CanonicalHeaderKey(v)\n\t\tcommonLowerHeader[chk] = v\n\t\tcommonCanonHeader[v] = chk\n\t}\n}\n\nfunc lowerHeader(v string) string {\n\tif s, ok := commonLowerHeader[v]; ok {\n\t\treturn s\n\t}\n\treturn strings.ToLower(v)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/hpack/encode.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage hpack\n\nimport (\n\t\"io\"\n)\n\nconst (\n\tuint32Max              = ^uint32(0)\n\tinitialHeaderTableSize = 4096\n)\n\ntype Encoder struct {\n\tdynTab dynamicTable\n\t// minSize is the minimum table size set by\n\t// SetMaxDynamicTableSize after the previous Header Table Size\n\t// Update.\n\tminSize uint32\n\t// maxSizeLimit is the maximum table size this encoder\n\t// supports. This will protect the encoder from too large\n\t// size.\n\tmaxSizeLimit uint32\n\t// tableSizeUpdate indicates whether \"Header Table Size\n\t// Update\" is required.\n\ttableSizeUpdate bool\n\tw               io.Writer\n\tbuf             []byte\n}\n\n// NewEncoder returns a new Encoder which performs HPACK encoding. An\n// encoded data is written to w.\nfunc NewEncoder(w io.Writer) *Encoder {\n\te := &Encoder{\n\t\tminSize:         uint32Max,\n\t\tmaxSizeLimit:    initialHeaderTableSize,\n\t\ttableSizeUpdate: false,\n\t\tw:               w,\n\t}\n\te.dynTab.table.init()\n\te.dynTab.setMaxSize(initialHeaderTableSize)\n\treturn e\n}\n\n// WriteField encodes f into a single Write to e's underlying Writer.\n// This function may also produce bytes for \"Header Table Size Update\"\n// if necessary. If produced, it is done before encoding f.\nfunc (e *Encoder) WriteField(f HeaderField) error {\n\te.buf = e.buf[:0]\n\n\tif e.tableSizeUpdate {\n\t\te.tableSizeUpdate = false\n\t\tif e.minSize < e.dynTab.maxSize {\n\t\t\te.buf = appendTableSize(e.buf, e.minSize)\n\t\t}\n\t\te.minSize = uint32Max\n\t\te.buf = appendTableSize(e.buf, e.dynTab.maxSize)\n\t}\n\n\tidx, nameValueMatch := e.searchTable(f)\n\tif nameValueMatch {\n\t\te.buf = appendIndexed(e.buf, idx)\n\t} else {\n\t\tindexing := e.shouldIndex(f)\n\t\tif indexing {\n\t\t\te.dynTab.add(f)\n\t\t}\n\n\t\tif idx == 0 {\n\t\t\te.buf = appendNewName(e.buf, f, indexing)\n\t\t} else {\n\t\t\te.buf = appendIndexedName(e.buf, f, idx, indexing)\n\t\t}\n\t}\n\tn, err := e.w.Write(e.buf)\n\tif err == nil && n != len(e.buf) {\n\t\terr = io.ErrShortWrite\n\t}\n\treturn err\n}\n\n// searchTable searches f in both stable and dynamic header tables.\n// The static header table is searched first. Only when there is no\n// exact match for both name and value, the dynamic header table is\n// then searched. If there is no match, i is 0. If both name and value\n// match, i is the matched index and nameValueMatch becomes true. If\n// only name matches, i points to that index and nameValueMatch\n// becomes false.\nfunc (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {\n\ti, nameValueMatch = staticTable.search(f)\n\tif nameValueMatch {\n\t\treturn i, true\n\t}\n\n\tj, nameValueMatch := e.dynTab.table.search(f)\n\tif nameValueMatch || (i == 0 && j != 0) {\n\t\treturn j + uint64(staticTable.len()), nameValueMatch\n\t}\n\n\treturn i, false\n}\n\n// SetMaxDynamicTableSize changes the dynamic header table size to v.\n// The actual size is bounded by the value passed to\n// SetMaxDynamicTableSizeLimit.\nfunc (e *Encoder) SetMaxDynamicTableSize(v uint32) {\n\tif v > e.maxSizeLimit {\n\t\tv = e.maxSizeLimit\n\t}\n\tif v < e.minSize {\n\t\te.minSize = v\n\t}\n\te.tableSizeUpdate = true\n\te.dynTab.setMaxSize(v)\n}\n\n// SetMaxDynamicTableSizeLimit changes the maximum value that can be\n// specified in SetMaxDynamicTableSize to v. By default, it is set to\n// 4096, which is the same size of the default dynamic header table\n// size described in HPACK specification. If the current maximum\n// dynamic header table size is strictly greater than v, \"Header Table\n// Size Update\" will be done in the next WriteField call and the\n// maximum dynamic header table size is truncated to v.\nfunc (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {\n\te.maxSizeLimit = v\n\tif e.dynTab.maxSize > v {\n\t\te.tableSizeUpdate = true\n\t\te.dynTab.setMaxSize(v)\n\t}\n}\n\n// shouldIndex reports whether f should be indexed.\nfunc (e *Encoder) shouldIndex(f HeaderField) bool {\n\treturn !f.Sensitive && f.Size() <= e.dynTab.maxSize\n}\n\n// appendIndexed appends index i, as encoded in \"Indexed Header Field\"\n// representation, to dst and returns the extended buffer.\nfunc appendIndexed(dst []byte, i uint64) []byte {\n\tfirst := len(dst)\n\tdst = appendVarInt(dst, 7, i)\n\tdst[first] |= 0x80\n\treturn dst\n}\n\n// appendNewName appends f, as encoded in one of \"Literal Header field\n// - New Name\" representation variants, to dst and returns the\n// extended buffer.\n//\n// If f.Sensitive is true, \"Never Indexed\" representation is used. If\n// f.Sensitive is false and indexing is true, \"Inremental Indexing\"\n// representation is used.\nfunc appendNewName(dst []byte, f HeaderField, indexing bool) []byte {\n\tdst = append(dst, encodeTypeByte(indexing, f.Sensitive))\n\tdst = appendHpackString(dst, f.Name)\n\treturn appendHpackString(dst, f.Value)\n}\n\n// appendIndexedName appends f and index i referring indexed name\n// entry, as encoded in one of \"Literal Header field - Indexed Name\"\n// representation variants, to dst and returns the extended buffer.\n//\n// If f.Sensitive is true, \"Never Indexed\" representation is used. If\n// f.Sensitive is false and indexing is true, \"Incremental Indexing\"\n// representation is used.\nfunc appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {\n\tfirst := len(dst)\n\tvar n byte\n\tif indexing {\n\t\tn = 6\n\t} else {\n\t\tn = 4\n\t}\n\tdst = appendVarInt(dst, n, i)\n\tdst[first] |= encodeTypeByte(indexing, f.Sensitive)\n\treturn appendHpackString(dst, f.Value)\n}\n\n// appendTableSize appends v, as encoded in \"Header Table Size Update\"\n// representation, to dst and returns the extended buffer.\nfunc appendTableSize(dst []byte, v uint32) []byte {\n\tfirst := len(dst)\n\tdst = appendVarInt(dst, 5, uint64(v))\n\tdst[first] |= 0x20\n\treturn dst\n}\n\n// appendVarInt appends i, as encoded in variable integer form using n\n// bit prefix, to dst and returns the extended buffer.\n//\n// See\n// http://http2.github.io/http2-spec/compression.html#integer.representation\nfunc appendVarInt(dst []byte, n byte, i uint64) []byte {\n\tk := uint64((1 << n) - 1)\n\tif i < k {\n\t\treturn append(dst, byte(i))\n\t}\n\tdst = append(dst, byte(k))\n\ti -= k\n\tfor ; i >= 128; i >>= 7 {\n\t\tdst = append(dst, byte(0x80|(i&0x7f)))\n\t}\n\treturn append(dst, byte(i))\n}\n\n// appendHpackString appends s, as encoded in \"String Literal\"\n// representation, to dst and returns the the extended buffer.\n//\n// s will be encoded in Huffman codes only when it produces strictly\n// shorter byte string.\nfunc appendHpackString(dst []byte, s string) []byte {\n\thuffmanLength := HuffmanEncodeLength(s)\n\tif huffmanLength < uint64(len(s)) {\n\t\tfirst := len(dst)\n\t\tdst = appendVarInt(dst, 7, huffmanLength)\n\t\tdst = AppendHuffmanString(dst, s)\n\t\tdst[first] |= 0x80\n\t} else {\n\t\tdst = appendVarInt(dst, 7, uint64(len(s)))\n\t\tdst = append(dst, s...)\n\t}\n\treturn dst\n}\n\n// encodeTypeByte returns type byte. If sensitive is true, type byte\n// for \"Never Indexed\" representation is returned. If sensitive is\n// false and indexing is true, type byte for \"Incremental Indexing\"\n// representation is returned. Otherwise, type byte for \"Without\n// Indexing\" is returned.\nfunc encodeTypeByte(indexing, sensitive bool) byte {\n\tif sensitive {\n\t\treturn 0x10\n\t}\n\tif indexing {\n\t\treturn 0x40\n\t}\n\treturn 0\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/hpack/encode_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage hpack\n\nimport (\n\t\"bytes\"\n\t\"encoding/hex\"\n\t\"fmt\"\n\t\"math/rand\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestEncoderTableSizeUpdate(t *testing.T) {\n\ttests := []struct {\n\t\tsize1, size2 uint32\n\t\twantHex      string\n\t}{\n\t\t// Should emit 2 table size updates (2048 and 4096)\n\t\t{2048, 4096, \"3fe10f 3fe11f 82\"},\n\n\t\t// Should emit 1 table size update (2048)\n\t\t{16384, 2048, \"3fe10f 82\"},\n\t}\n\tfor _, tt := range tests {\n\t\tvar buf bytes.Buffer\n\t\te := NewEncoder(&buf)\n\t\te.SetMaxDynamicTableSize(tt.size1)\n\t\te.SetMaxDynamicTableSize(tt.size2)\n\t\tif err := e.WriteField(pair(\":method\", \"GET\")); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\twant := removeSpace(tt.wantHex)\n\t\tif got := hex.EncodeToString(buf.Bytes()); got != want {\n\t\t\tt.Errorf(\"e.SetDynamicTableSize %v, %v = %q; want %q\", tt.size1, tt.size2, got, want)\n\t\t}\n\t}\n}\n\nfunc TestEncoderWriteField(t *testing.T) {\n\tvar buf bytes.Buffer\n\te := NewEncoder(&buf)\n\tvar got []HeaderField\n\td := NewDecoder(4<<10, func(f HeaderField) {\n\t\tgot = append(got, f)\n\t})\n\n\ttests := []struct {\n\t\thdrs []HeaderField\n\t}{\n\t\t{[]HeaderField{\n\t\t\tpair(\":method\", \"GET\"),\n\t\t\tpair(\":scheme\", \"http\"),\n\t\t\tpair(\":path\", \"/\"),\n\t\t\tpair(\":authority\", \"www.example.com\"),\n\t\t}},\n\t\t{[]HeaderField{\n\t\t\tpair(\":method\", \"GET\"),\n\t\t\tpair(\":scheme\", \"http\"),\n\t\t\tpair(\":path\", \"/\"),\n\t\t\tpair(\":authority\", \"www.example.com\"),\n\t\t\tpair(\"cache-control\", \"no-cache\"),\n\t\t}},\n\t\t{[]HeaderField{\n\t\t\tpair(\":method\", \"GET\"),\n\t\t\tpair(\":scheme\", \"https\"),\n\t\t\tpair(\":path\", \"/index.html\"),\n\t\t\tpair(\":authority\", \"www.example.com\"),\n\t\t\tpair(\"custom-key\", \"custom-value\"),\n\t\t}},\n\t}\n\tfor i, tt := range tests {\n\t\tbuf.Reset()\n\t\tgot = got[:0]\n\t\tfor _, hf := range tt.hdrs {\n\t\t\tif err := e.WriteField(hf); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t\t_, err := d.Write(buf.Bytes())\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%d. Decoder Write = %v\", i, err)\n\t\t}\n\t\tif !reflect.DeepEqual(got, tt.hdrs) {\n\t\t\tt.Errorf(\"%d. Decoded %+v; want %+v\", i, got, tt.hdrs)\n\t\t}\n\t}\n}\n\nfunc TestEncoderSearchTable(t *testing.T) {\n\te := NewEncoder(nil)\n\n\te.dynTab.add(pair(\"foo\", \"bar\"))\n\te.dynTab.add(pair(\"blake\", \"miz\"))\n\te.dynTab.add(pair(\":method\", \"GET\"))\n\n\ttests := []struct {\n\t\thf        HeaderField\n\t\twantI     uint64\n\t\twantMatch bool\n\t}{\n\t\t// Name and Value match\n\t\t{pair(\"foo\", \"bar\"), uint64(staticTable.len()) + 3, true},\n\t\t{pair(\"blake\", \"miz\"), uint64(staticTable.len()) + 2, true},\n\t\t{pair(\":method\", \"GET\"), 2, true},\n\n\t\t// Only name match because Sensitive == true. This is allowed to match\n\t\t// any \":method\" entry. The current implementation uses the last entry\n\t\t// added in newStaticTable.\n\t\t{HeaderField{\":method\", \"GET\", true}, 3, false},\n\n\t\t// Only Name matches\n\t\t{pair(\"foo\", \"...\"), uint64(staticTable.len()) + 3, false},\n\t\t{pair(\"blake\", \"...\"), uint64(staticTable.len()) + 2, false},\n\t\t// As before, this is allowed to match any \":method\" entry.\n\t\t{pair(\":method\", \"...\"), 3, false},\n\n\t\t// None match\n\t\t{pair(\"foo-\", \"bar\"), 0, false},\n\t}\n\tfor _, tt := range tests {\n\t\tif gotI, gotMatch := e.searchTable(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch {\n\t\t\tt.Errorf(\"d.search(%+v) = %v, %v; want %v, %v\", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch)\n\t\t}\n\t}\n}\n\nfunc TestAppendVarInt(t *testing.T) {\n\ttests := []struct {\n\t\tn    byte\n\t\ti    uint64\n\t\twant []byte\n\t}{\n\t\t// Fits in a byte:\n\t\t{1, 0, []byte{0}},\n\t\t{2, 2, []byte{2}},\n\t\t{3, 6, []byte{6}},\n\t\t{4, 14, []byte{14}},\n\t\t{5, 30, []byte{30}},\n\t\t{6, 62, []byte{62}},\n\t\t{7, 126, []byte{126}},\n\t\t{8, 254, []byte{254}},\n\n\t\t// Multiple bytes:\n\t\t{5, 1337, []byte{31, 154, 10}},\n\t}\n\tfor _, tt := range tests {\n\t\tgot := appendVarInt(nil, tt.n, tt.i)\n\t\tif !bytes.Equal(got, tt.want) {\n\t\t\tt.Errorf(\"appendVarInt(nil, %v, %v) = %v; want %v\", tt.n, tt.i, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc TestAppendHpackString(t *testing.T) {\n\ttests := []struct {\n\t\ts, wantHex string\n\t}{\n\t\t// Huffman encoded\n\t\t{\"www.example.com\", \"8c f1e3 c2e5 f23a 6ba0 ab90 f4ff\"},\n\n\t\t// Not Huffman encoded\n\t\t{\"a\", \"01 61\"},\n\n\t\t// zero length\n\t\t{\"\", \"00\"},\n\t}\n\tfor _, tt := range tests {\n\t\twant := removeSpace(tt.wantHex)\n\t\tbuf := appendHpackString(nil, tt.s)\n\t\tif got := hex.EncodeToString(buf); want != got {\n\t\t\tt.Errorf(\"appendHpackString(nil, %q) = %q; want %q\", tt.s, got, want)\n\t\t}\n\t}\n}\n\nfunc TestAppendIndexed(t *testing.T) {\n\ttests := []struct {\n\t\ti       uint64\n\t\twantHex string\n\t}{\n\t\t// 1 byte\n\t\t{1, \"81\"},\n\t\t{126, \"fe\"},\n\n\t\t// 2 bytes\n\t\t{127, \"ff00\"},\n\t\t{128, \"ff01\"},\n\t}\n\tfor _, tt := range tests {\n\t\twant := removeSpace(tt.wantHex)\n\t\tbuf := appendIndexed(nil, tt.i)\n\t\tif got := hex.EncodeToString(buf); want != got {\n\t\t\tt.Errorf(\"appendIndex(nil, %v) = %q; want %q\", tt.i, got, want)\n\t\t}\n\t}\n}\n\nfunc TestAppendNewName(t *testing.T) {\n\ttests := []struct {\n\t\tf        HeaderField\n\t\tindexing bool\n\t\twantHex  string\n\t}{\n\t\t// Incremental indexing\n\t\t{HeaderField{\"custom-key\", \"custom-value\", false}, true, \"40 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf\"},\n\n\t\t// Without indexing\n\t\t{HeaderField{\"custom-key\", \"custom-value\", false}, false, \"00 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf\"},\n\n\t\t// Never indexed\n\t\t{HeaderField{\"custom-key\", \"custom-value\", true}, true, \"10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf\"},\n\t\t{HeaderField{\"custom-key\", \"custom-value\", true}, false, \"10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf\"},\n\t}\n\tfor _, tt := range tests {\n\t\twant := removeSpace(tt.wantHex)\n\t\tbuf := appendNewName(nil, tt.f, tt.indexing)\n\t\tif got := hex.EncodeToString(buf); want != got {\n\t\t\tt.Errorf(\"appendNewName(nil, %+v, %v) = %q; want %q\", tt.f, tt.indexing, got, want)\n\t\t}\n\t}\n}\n\nfunc TestAppendIndexedName(t *testing.T) {\n\ttests := []struct {\n\t\tf        HeaderField\n\t\ti        uint64\n\t\tindexing bool\n\t\twantHex  string\n\t}{\n\t\t// Incremental indexing\n\t\t{HeaderField{\":status\", \"302\", false}, 8, true, \"48 82 6402\"},\n\n\t\t// Without indexing\n\t\t{HeaderField{\":status\", \"302\", false}, 8, false, \"08 82 6402\"},\n\n\t\t// Never indexed\n\t\t{HeaderField{\":status\", \"302\", true}, 8, true, \"18 82 6402\"},\n\t\t{HeaderField{\":status\", \"302\", true}, 8, false, \"18 82 6402\"},\n\t}\n\tfor _, tt := range tests {\n\t\twant := removeSpace(tt.wantHex)\n\t\tbuf := appendIndexedName(nil, tt.f, tt.i, tt.indexing)\n\t\tif got := hex.EncodeToString(buf); want != got {\n\t\t\tt.Errorf(\"appendIndexedName(nil, %+v, %v) = %q; want %q\", tt.f, tt.indexing, got, want)\n\t\t}\n\t}\n}\n\nfunc TestAppendTableSize(t *testing.T) {\n\ttests := []struct {\n\t\ti       uint32\n\t\twantHex string\n\t}{\n\t\t// Fits into 1 byte\n\t\t{30, \"3e\"},\n\n\t\t// Extra byte\n\t\t{31, \"3f00\"},\n\t\t{32, \"3f01\"},\n\t}\n\tfor _, tt := range tests {\n\t\twant := removeSpace(tt.wantHex)\n\t\tbuf := appendTableSize(nil, tt.i)\n\t\tif got := hex.EncodeToString(buf); want != got {\n\t\t\tt.Errorf(\"appendTableSize(nil, %v) = %q; want %q\", tt.i, got, want)\n\t\t}\n\t}\n}\n\nfunc TestEncoderSetMaxDynamicTableSize(t *testing.T) {\n\tvar buf bytes.Buffer\n\te := NewEncoder(&buf)\n\ttests := []struct {\n\t\tv           uint32\n\t\twantUpdate  bool\n\t\twantMinSize uint32\n\t\twantMaxSize uint32\n\t}{\n\t\t// Set new table size to 2048\n\t\t{2048, true, 2048, 2048},\n\n\t\t// Set new table size to 16384, but still limited to\n\t\t// 4096\n\t\t{16384, true, 2048, 4096},\n\t}\n\tfor _, tt := range tests {\n\t\te.SetMaxDynamicTableSize(tt.v)\n\t\tif got := e.tableSizeUpdate; tt.wantUpdate != got {\n\t\t\tt.Errorf(\"e.tableSizeUpdate = %v; want %v\", got, tt.wantUpdate)\n\t\t}\n\t\tif got := e.minSize; tt.wantMinSize != got {\n\t\t\tt.Errorf(\"e.minSize = %v; want %v\", got, tt.wantMinSize)\n\t\t}\n\t\tif got := e.dynTab.maxSize; tt.wantMaxSize != got {\n\t\t\tt.Errorf(\"e.maxSize = %v; want %v\", got, tt.wantMaxSize)\n\t\t}\n\t}\n}\n\nfunc TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) {\n\te := NewEncoder(nil)\n\t// 4095 < initialHeaderTableSize means maxSize is truncated to\n\t// 4095.\n\te.SetMaxDynamicTableSizeLimit(4095)\n\tif got, want := e.dynTab.maxSize, uint32(4095); got != want {\n\t\tt.Errorf(\"e.dynTab.maxSize = %v; want %v\", got, want)\n\t}\n\tif got, want := e.maxSizeLimit, uint32(4095); got != want {\n\t\tt.Errorf(\"e.maxSizeLimit = %v; want %v\", got, want)\n\t}\n\tif got, want := e.tableSizeUpdate, true; got != want {\n\t\tt.Errorf(\"e.tableSizeUpdate = %v; want %v\", got, want)\n\t}\n\t// maxSize will be truncated to maxSizeLimit\n\te.SetMaxDynamicTableSize(16384)\n\tif got, want := e.dynTab.maxSize, uint32(4095); got != want {\n\t\tt.Errorf(\"e.dynTab.maxSize = %v; want %v\", got, want)\n\t}\n\t// 8192 > current maxSizeLimit, so maxSize does not change.\n\te.SetMaxDynamicTableSizeLimit(8192)\n\tif got, want := e.dynTab.maxSize, uint32(4095); got != want {\n\t\tt.Errorf(\"e.dynTab.maxSize = %v; want %v\", got, want)\n\t}\n\tif got, want := e.maxSizeLimit, uint32(8192); got != want {\n\t\tt.Errorf(\"e.maxSizeLimit = %v; want %v\", got, want)\n\t}\n}\n\nfunc removeSpace(s string) string {\n\treturn strings.Replace(s, \" \", \"\", -1)\n}\n\nfunc BenchmarkEncoderSearchTable(b *testing.B) {\n\te := NewEncoder(nil)\n\n\t// A sample of possible header fields.\n\t// This is not based on any actual data from HTTP/2 traces.\n\tvar possible []HeaderField\n\tfor _, f := range staticTable.ents {\n\t\tif f.Value == \"\" {\n\t\t\tpossible = append(possible, f)\n\t\t\tcontinue\n\t\t}\n\t\t// Generate 5 random values, except for cookie and set-cookie,\n\t\t// which we know can have many values in practice.\n\t\tnum := 5\n\t\tif f.Name == \"cookie\" || f.Name == \"set-cookie\" {\n\t\t\tnum = 25\n\t\t}\n\t\tfor i := 0; i < num; i++ {\n\t\t\tf.Value = fmt.Sprintf(\"%s-%d\", f.Name, i)\n\t\t\tpossible = append(possible, f)\n\t\t}\n\t}\n\tfor k := 0; k < 10; k++ {\n\t\tf := HeaderField{\n\t\t\tName:      fmt.Sprintf(\"x-header-%d\", k),\n\t\t\tSensitive: rand.Int()%2 == 0,\n\t\t}\n\t\tfor i := 0; i < 5; i++ {\n\t\t\tf.Value = fmt.Sprintf(\"%s-%d\", f.Name, i)\n\t\t\tpossible = append(possible, f)\n\t\t}\n\t}\n\n\t// Add a random sample to the dynamic table. This very loosely simulates\n\t// a history of 100 requests with 20 header fields per request.\n\tfor r := 0; r < 100*20; r++ {\n\t\tf := possible[rand.Int31n(int32(len(possible)))]\n\t\t// Skip if this is in the staticTable verbatim.\n\t\tif _, has := staticTable.search(f); !has {\n\t\t\te.dynTab.add(f)\n\t\t}\n\t}\n\n\tb.ResetTimer()\n\tfor n := 0; n < b.N; n++ {\n\t\tfor _, f := range possible {\n\t\t\te.searchTable(f)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/hpack/hpack.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package hpack implements HPACK, a compression format for\n// efficiently representing HTTP header fields in the context of HTTP/2.\n//\n// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09\npackage hpack\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n)\n\n// A DecodingError is something the spec defines as a decoding error.\ntype DecodingError struct {\n\tErr error\n}\n\nfunc (de DecodingError) Error() string {\n\treturn fmt.Sprintf(\"decoding error: %v\", de.Err)\n}\n\n// An InvalidIndexError is returned when an encoder references a table\n// entry before the static table or after the end of the dynamic table.\ntype InvalidIndexError int\n\nfunc (e InvalidIndexError) Error() string {\n\treturn fmt.Sprintf(\"invalid indexed representation index %d\", int(e))\n}\n\n// A HeaderField is a name-value pair. Both the name and value are\n// treated as opaque sequences of octets.\ntype HeaderField struct {\n\tName, Value string\n\n\t// Sensitive means that this header field should never be\n\t// indexed.\n\tSensitive bool\n}\n\n// IsPseudo reports whether the header field is an http2 pseudo header.\n// That is, it reports whether it starts with a colon.\n// It is not otherwise guaranteed to be a valid pseudo header field,\n// though.\nfunc (hf HeaderField) IsPseudo() bool {\n\treturn len(hf.Name) != 0 && hf.Name[0] == ':'\n}\n\nfunc (hf HeaderField) String() string {\n\tvar suffix string\n\tif hf.Sensitive {\n\t\tsuffix = \" (sensitive)\"\n\t}\n\treturn fmt.Sprintf(\"header field %q = %q%s\", hf.Name, hf.Value, suffix)\n}\n\n// Size returns the size of an entry per RFC 7541 section 4.1.\nfunc (hf HeaderField) Size() uint32 {\n\t// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1\n\t// \"The size of the dynamic table is the sum of the size of\n\t// its entries. The size of an entry is the sum of its name's\n\t// length in octets (as defined in Section 5.2), its value's\n\t// length in octets (see Section 5.2), plus 32.  The size of\n\t// an entry is calculated using the length of the name and\n\t// value without any Huffman encoding applied.\"\n\n\t// This can overflow if somebody makes a large HeaderField\n\t// Name and/or Value by hand, but we don't care, because that\n\t// won't happen on the wire because the encoding doesn't allow\n\t// it.\n\treturn uint32(len(hf.Name) + len(hf.Value) + 32)\n}\n\n// A Decoder is the decoding context for incremental processing of\n// header blocks.\ntype Decoder struct {\n\tdynTab dynamicTable\n\temit   func(f HeaderField)\n\n\temitEnabled bool // whether calls to emit are enabled\n\tmaxStrLen   int  // 0 means unlimited\n\n\t// buf is the unparsed buffer. It's only written to\n\t// saveBuf if it was truncated in the middle of a header\n\t// block. Because it's usually not owned, we can only\n\t// process it under Write.\n\tbuf []byte // not owned; only valid during Write\n\n\t// saveBuf is previous data passed to Write which we weren't able\n\t// to fully parse before. Unlike buf, we own this data.\n\tsaveBuf bytes.Buffer\n}\n\n// NewDecoder returns a new decoder with the provided maximum dynamic\n// table size. The emitFunc will be called for each valid field\n// parsed, in the same goroutine as calls to Write, before Write returns.\nfunc NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder {\n\td := &Decoder{\n\t\temit:        emitFunc,\n\t\temitEnabled: true,\n\t}\n\td.dynTab.table.init()\n\td.dynTab.allowedMaxSize = maxDynamicTableSize\n\td.dynTab.setMaxSize(maxDynamicTableSize)\n\treturn d\n}\n\n// ErrStringLength is returned by Decoder.Write when the max string length\n// (as configured by Decoder.SetMaxStringLength) would be violated.\nvar ErrStringLength = errors.New(\"hpack: string too long\")\n\n// SetMaxStringLength sets the maximum size of a HeaderField name or\n// value string. If a string exceeds this length (even after any\n// decompression), Write will return ErrStringLength.\n// A value of 0 means unlimited and is the default from NewDecoder.\nfunc (d *Decoder) SetMaxStringLength(n int) {\n\td.maxStrLen = n\n}\n\n// SetEmitFunc changes the callback used when new header fields\n// are decoded.\n// It must be non-nil. It does not affect EmitEnabled.\nfunc (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {\n\td.emit = emitFunc\n}\n\n// SetEmitEnabled controls whether the emitFunc provided to NewDecoder\n// should be called. The default is true.\n//\n// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE\n// while still decoding and keeping in-sync with decoder state, but\n// without doing unnecessary decompression or generating unnecessary\n// garbage for header fields past the limit.\nfunc (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v }\n\n// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder\n// are currently enabled. The default is true.\nfunc (d *Decoder) EmitEnabled() bool { return d.emitEnabled }\n\n// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their\n// underlying buffers for garbage reasons.\n\nfunc (d *Decoder) SetMaxDynamicTableSize(v uint32) {\n\td.dynTab.setMaxSize(v)\n}\n\n// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded\n// stream (via dynamic table size updates) may set the maximum size\n// to.\nfunc (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {\n\td.dynTab.allowedMaxSize = v\n}\n\ntype dynamicTable struct {\n\t// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2\n\ttable          headerFieldTable\n\tsize           uint32 // in bytes\n\tmaxSize        uint32 // current maxSize\n\tallowedMaxSize uint32 // maxSize may go up to this, inclusive\n}\n\nfunc (dt *dynamicTable) setMaxSize(v uint32) {\n\tdt.maxSize = v\n\tdt.evict()\n}\n\nfunc (dt *dynamicTable) add(f HeaderField) {\n\tdt.table.addEntry(f)\n\tdt.size += f.Size()\n\tdt.evict()\n}\n\n// If we're too big, evict old stuff.\nfunc (dt *dynamicTable) evict() {\n\tvar n int\n\tfor dt.size > dt.maxSize && n < dt.table.len() {\n\t\tdt.size -= dt.table.ents[n].Size()\n\t\tn++\n\t}\n\tdt.table.evictOldest(n)\n}\n\nfunc (d *Decoder) maxTableIndex() int {\n\t// This should never overflow. RFC 7540 Section 6.5.2 limits the size of\n\t// the dynamic table to 2^32 bytes, where each entry will occupy more than\n\t// one byte. Further, the staticTable has a fixed, small length.\n\treturn d.dynTab.table.len() + staticTable.len()\n}\n\nfunc (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {\n\t// See Section 2.3.3.\n\tif i == 0 {\n\t\treturn\n\t}\n\tif i <= uint64(staticTable.len()) {\n\t\treturn staticTable.ents[i-1], true\n\t}\n\tif i > uint64(d.maxTableIndex()) {\n\t\treturn\n\t}\n\t// In the dynamic table, newer entries have lower indices.\n\t// However, dt.ents[0] is the oldest entry. Hence, dt.ents is\n\t// the reversed dynamic table.\n\tdt := d.dynTab.table\n\treturn dt.ents[dt.len()-(int(i)-staticTable.len())], true\n}\n\n// Decode decodes an entire block.\n//\n// TODO: remove this method and make it incremental later? This is\n// easier for debugging now.\nfunc (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {\n\tvar hf []HeaderField\n\tsaveFunc := d.emit\n\tdefer func() { d.emit = saveFunc }()\n\td.emit = func(f HeaderField) { hf = append(hf, f) }\n\tif _, err := d.Write(p); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := d.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn hf, nil\n}\n\nfunc (d *Decoder) Close() error {\n\tif d.saveBuf.Len() > 0 {\n\t\td.saveBuf.Reset()\n\t\treturn DecodingError{errors.New(\"truncated headers\")}\n\t}\n\treturn nil\n}\n\nfunc (d *Decoder) Write(p []byte) (n int, err error) {\n\tif len(p) == 0 {\n\t\t// Prevent state machine CPU attacks (making us redo\n\t\t// work up to the point of finding out we don't have\n\t\t// enough data)\n\t\treturn\n\t}\n\t// Only copy the data if we have to. Optimistically assume\n\t// that p will contain a complete header block.\n\tif d.saveBuf.Len() == 0 {\n\t\td.buf = p\n\t} else {\n\t\td.saveBuf.Write(p)\n\t\td.buf = d.saveBuf.Bytes()\n\t\td.saveBuf.Reset()\n\t}\n\n\tfor len(d.buf) > 0 {\n\t\terr = d.parseHeaderFieldRepr()\n\t\tif err == errNeedMore {\n\t\t\t// Extra paranoia, making sure saveBuf won't\n\t\t\t// get too large. All the varint and string\n\t\t\t// reading code earlier should already catch\n\t\t\t// overlong things and return ErrStringLength,\n\t\t\t// but keep this as a last resort.\n\t\t\tconst varIntOverhead = 8 // conservative\n\t\t\tif d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) {\n\t\t\t\treturn 0, ErrStringLength\n\t\t\t}\n\t\t\td.saveBuf.Write(d.buf)\n\t\t\treturn len(p), nil\n\t\t}\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn len(p), err\n}\n\n// errNeedMore is an internal sentinel error value that means the\n// buffer is truncated and we need to read more data before we can\n// continue parsing.\nvar errNeedMore = errors.New(\"need more data\")\n\ntype indexType int\n\nconst (\n\tindexedTrue indexType = iota\n\tindexedFalse\n\tindexedNever\n)\n\nfunc (v indexType) indexed() bool   { return v == indexedTrue }\nfunc (v indexType) sensitive() bool { return v == indexedNever }\n\n// returns errNeedMore if there isn't enough data available.\n// any other error is fatal.\n// consumes d.buf iff it returns nil.\n// precondition: must be called with len(d.buf) > 0\nfunc (d *Decoder) parseHeaderFieldRepr() error {\n\tb := d.buf[0]\n\tswitch {\n\tcase b&128 != 0:\n\t\t// Indexed representation.\n\t\t// High bit set?\n\t\t// http://http2.github.io/http2-spec/compression.html#rfc.section.6.1\n\t\treturn d.parseFieldIndexed()\n\tcase b&192 == 64:\n\t\t// 6.2.1 Literal Header Field with Incremental Indexing\n\t\t// 0b10xxxxxx: top two bits are 10\n\t\t// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1\n\t\treturn d.parseFieldLiteral(6, indexedTrue)\n\tcase b&240 == 0:\n\t\t// 6.2.2 Literal Header Field without Indexing\n\t\t// 0b0000xxxx: top four bits are 0000\n\t\t// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2\n\t\treturn d.parseFieldLiteral(4, indexedFalse)\n\tcase b&240 == 16:\n\t\t// 6.2.3 Literal Header Field never Indexed\n\t\t// 0b0001xxxx: top four bits are 0001\n\t\t// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3\n\t\treturn d.parseFieldLiteral(4, indexedNever)\n\tcase b&224 == 32:\n\t\t// 6.3 Dynamic Table Size Update\n\t\t// Top three bits are '001'.\n\t\t// http://http2.github.io/http2-spec/compression.html#rfc.section.6.3\n\t\treturn d.parseDynamicTableSizeUpdate()\n\t}\n\n\treturn DecodingError{errors.New(\"invalid encoding\")}\n}\n\n// (same invariants and behavior as parseHeaderFieldRepr)\nfunc (d *Decoder) parseFieldIndexed() error {\n\tbuf := d.buf\n\tidx, buf, err := readVarInt(7, buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\thf, ok := d.at(idx)\n\tif !ok {\n\t\treturn DecodingError{InvalidIndexError(idx)}\n\t}\n\td.buf = buf\n\treturn d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})\n}\n\n// (same invariants and behavior as parseHeaderFieldRepr)\nfunc (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {\n\tbuf := d.buf\n\tnameIdx, buf, err := readVarInt(n, buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar hf HeaderField\n\twantStr := d.emitEnabled || it.indexed()\n\tif nameIdx > 0 {\n\t\tihf, ok := d.at(nameIdx)\n\t\tif !ok {\n\t\t\treturn DecodingError{InvalidIndexError(nameIdx)}\n\t\t}\n\t\thf.Name = ihf.Name\n\t} else {\n\t\thf.Name, buf, err = d.readString(buf, wantStr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\thf.Value, buf, err = d.readString(buf, wantStr)\n\tif err != nil {\n\t\treturn err\n\t}\n\td.buf = buf\n\tif it.indexed() {\n\t\td.dynTab.add(hf)\n\t}\n\thf.Sensitive = it.sensitive()\n\treturn d.callEmit(hf)\n}\n\nfunc (d *Decoder) callEmit(hf HeaderField) error {\n\tif d.maxStrLen != 0 {\n\t\tif len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {\n\t\t\treturn ErrStringLength\n\t\t}\n\t}\n\tif d.emitEnabled {\n\t\td.emit(hf)\n\t}\n\treturn nil\n}\n\n// (same invariants and behavior as parseHeaderFieldRepr)\nfunc (d *Decoder) parseDynamicTableSizeUpdate() error {\n\tbuf := d.buf\n\tsize, buf, err := readVarInt(5, buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif size > uint64(d.dynTab.allowedMaxSize) {\n\t\treturn DecodingError{errors.New(\"dynamic table size update too large\")}\n\t}\n\td.dynTab.setMaxSize(uint32(size))\n\td.buf = buf\n\treturn nil\n}\n\nvar errVarintOverflow = DecodingError{errors.New(\"varint integer overflow\")}\n\n// readVarInt reads an unsigned variable length integer off the\n// beginning of p. n is the parameter as described in\n// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.\n//\n// n must always be between 1 and 8.\n//\n// The returned remain buffer is either a smaller suffix of p, or err != nil.\n// The error is errNeedMore if p doesn't contain a complete integer.\nfunc readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {\n\tif n < 1 || n > 8 {\n\t\tpanic(\"bad n\")\n\t}\n\tif len(p) == 0 {\n\t\treturn 0, p, errNeedMore\n\t}\n\ti = uint64(p[0])\n\tif n < 8 {\n\t\ti &= (1 << uint64(n)) - 1\n\t}\n\tif i < (1<<uint64(n))-1 {\n\t\treturn i, p[1:], nil\n\t}\n\n\torigP := p\n\tp = p[1:]\n\tvar m uint64\n\tfor len(p) > 0 {\n\t\tb := p[0]\n\t\tp = p[1:]\n\t\ti += uint64(b&127) << m\n\t\tif b&128 == 0 {\n\t\t\treturn i, p, nil\n\t\t}\n\t\tm += 7\n\t\tif m >= 63 { // TODO: proper overflow check. making this up.\n\t\t\treturn 0, origP, errVarintOverflow\n\t\t}\n\t}\n\treturn 0, origP, errNeedMore\n}\n\n// readString decodes an hpack string from p.\n//\n// wantStr is whether s will be used. If false, decompression and\n// []byte->string garbage are skipped if s will be ignored\n// anyway. This does mean that huffman decoding errors for non-indexed\n// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server\n// is returning an error anyway, and because they're not indexed, the error\n// won't affect the decoding state.\nfunc (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {\n\tif len(p) == 0 {\n\t\treturn \"\", p, errNeedMore\n\t}\n\tisHuff := p[0]&128 != 0\n\tstrLen, p, err := readVarInt(7, p)\n\tif err != nil {\n\t\treturn \"\", p, err\n\t}\n\tif d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {\n\t\treturn \"\", nil, ErrStringLength\n\t}\n\tif uint64(len(p)) < strLen {\n\t\treturn \"\", p, errNeedMore\n\t}\n\tif !isHuff {\n\t\tif wantStr {\n\t\t\ts = string(p[:strLen])\n\t\t}\n\t\treturn s, p[strLen:], nil\n\t}\n\n\tif wantStr {\n\t\tbuf := bufPool.Get().(*bytes.Buffer)\n\t\tbuf.Reset() // don't trust others\n\t\tdefer bufPool.Put(buf)\n\t\tif err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {\n\t\t\tbuf.Reset()\n\t\t\treturn \"\", nil, err\n\t\t}\n\t\ts = buf.String()\n\t\tbuf.Reset() // be nice to GC\n\t}\n\treturn s, p[strLen:], nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/hpack/hpack_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage hpack\n\nimport (\n\t\"bytes\"\n\t\"encoding/hex\"\n\t\"fmt\"\n\t\"math/rand\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc (d *Decoder) mustAt(idx int) HeaderField {\n\tif hf, ok := d.at(uint64(idx)); !ok {\n\t\tpanic(fmt.Sprintf(\"bogus index %d\", idx))\n\t} else {\n\t\treturn hf\n\t}\n}\n\nfunc TestDynamicTableAt(t *testing.T) {\n\td := NewDecoder(4096, nil)\n\tat := d.mustAt\n\tif got, want := at(2), (pair(\":method\", \"GET\")); got != want {\n\t\tt.Errorf(\"at(2) = %v; want %v\", got, want)\n\t}\n\td.dynTab.add(pair(\"foo\", \"bar\"))\n\td.dynTab.add(pair(\"blake\", \"miz\"))\n\tif got, want := at(staticTable.len()+1), (pair(\"blake\", \"miz\")); got != want {\n\t\tt.Errorf(\"at(dyn 1) = %v; want %v\", got, want)\n\t}\n\tif got, want := at(staticTable.len()+2), (pair(\"foo\", \"bar\")); got != want {\n\t\tt.Errorf(\"at(dyn 2) = %v; want %v\", got, want)\n\t}\n\tif got, want := at(3), (pair(\":method\", \"POST\")); got != want {\n\t\tt.Errorf(\"at(3) = %v; want %v\", got, want)\n\t}\n}\n\nfunc TestDynamicTableSizeEvict(t *testing.T) {\n\td := NewDecoder(4096, nil)\n\tif want := uint32(0); d.dynTab.size != want {\n\t\tt.Fatalf(\"size = %d; want %d\", d.dynTab.size, want)\n\t}\n\tadd := d.dynTab.add\n\tadd(pair(\"blake\", \"eats pizza\"))\n\tif want := uint32(15 + 32); d.dynTab.size != want {\n\t\tt.Fatalf(\"after pizza, size = %d; want %d\", d.dynTab.size, want)\n\t}\n\tadd(pair(\"foo\", \"bar\"))\n\tif want := uint32(15 + 32 + 6 + 32); d.dynTab.size != want {\n\t\tt.Fatalf(\"after foo bar, size = %d; want %d\", d.dynTab.size, want)\n\t}\n\td.dynTab.setMaxSize(15 + 32 + 1 /* slop */)\n\tif want := uint32(6 + 32); d.dynTab.size != want {\n\t\tt.Fatalf(\"after setMaxSize, size = %d; want %d\", d.dynTab.size, want)\n\t}\n\tif got, want := d.mustAt(staticTable.len()+1), (pair(\"foo\", \"bar\")); got != want {\n\t\tt.Errorf(\"at(dyn 1) = %v; want %v\", got, want)\n\t}\n\tadd(pair(\"long\", strings.Repeat(\"x\", 500)))\n\tif want := uint32(0); d.dynTab.size != want {\n\t\tt.Fatalf(\"after big one, size = %d; want %d\", d.dynTab.size, want)\n\t}\n}\n\nfunc TestDecoderDecode(t *testing.T) {\n\ttests := []struct {\n\t\tname       string\n\t\tin         []byte\n\t\twant       []HeaderField\n\t\twantDynTab []HeaderField // newest entry first\n\t}{\n\t\t// C.2.1 Literal Header Field with Indexing\n\t\t// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.1\n\t\t{\"C.2.1\", dehex(\"400a 6375 7374 6f6d 2d6b 6579 0d63 7573 746f 6d2d 6865 6164 6572\"),\n\t\t\t[]HeaderField{pair(\"custom-key\", \"custom-header\")},\n\t\t\t[]HeaderField{pair(\"custom-key\", \"custom-header\")},\n\t\t},\n\n\t\t// C.2.2 Literal Header Field without Indexing\n\t\t// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.2\n\t\t{\"C.2.2\", dehex(\"040c 2f73 616d 706c 652f 7061 7468\"),\n\t\t\t[]HeaderField{pair(\":path\", \"/sample/path\")},\n\t\t\t[]HeaderField{}},\n\n\t\t// C.2.3 Literal Header Field never Indexed\n\t\t// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.3\n\t\t{\"C.2.3\", dehex(\"1008 7061 7373 776f 7264 0673 6563 7265 74\"),\n\t\t\t[]HeaderField{{\"password\", \"secret\", true}},\n\t\t\t[]HeaderField{}},\n\n\t\t// C.2.4 Indexed Header Field\n\t\t// http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.4\n\t\t{\"C.2.4\", []byte(\"\\x82\"),\n\t\t\t[]HeaderField{pair(\":method\", \"GET\")},\n\t\t\t[]HeaderField{}},\n\t}\n\tfor _, tt := range tests {\n\t\td := NewDecoder(4096, nil)\n\t\thf, err := d.DecodeFull(tt.in)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: %v\", tt.name, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(hf, tt.want) {\n\t\t\tt.Errorf(\"%s: Got %v; want %v\", tt.name, hf, tt.want)\n\t\t}\n\t\tgotDynTab := d.dynTab.reverseCopy()\n\t\tif !reflect.DeepEqual(gotDynTab, tt.wantDynTab) {\n\t\t\tt.Errorf(\"%s: dynamic table after = %v; want %v\", tt.name, gotDynTab, tt.wantDynTab)\n\t\t}\n\t}\n}\n\nfunc (dt *dynamicTable) reverseCopy() (hf []HeaderField) {\n\thf = make([]HeaderField, len(dt.table.ents))\n\tfor i := range hf {\n\t\thf[i] = dt.table.ents[len(dt.table.ents)-1-i]\n\t}\n\treturn\n}\n\ntype encAndWant struct {\n\tenc         []byte\n\twant        []HeaderField\n\twantDynTab  []HeaderField\n\twantDynSize uint32\n}\n\n// C.3 Request Examples without Huffman Coding\n// http://http2.github.io/http2-spec/compression.html#rfc.section.C.3\nfunc TestDecodeC3_NoHuffman(t *testing.T) {\n\ttestDecodeSeries(t, 4096, []encAndWant{\n\t\t{dehex(\"8286 8441 0f77 7777 2e65 7861 6d70 6c65 2e63 6f6d\"),\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\":method\", \"GET\"),\n\t\t\t\tpair(\":scheme\", \"http\"),\n\t\t\t\tpair(\":path\", \"/\"),\n\t\t\t\tpair(\":authority\", \"www.example.com\"),\n\t\t\t},\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\":authority\", \"www.example.com\"),\n\t\t\t},\n\t\t\t57,\n\t\t},\n\t\t{dehex(\"8286 84be 5808 6e6f 2d63 6163 6865\"),\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\":method\", \"GET\"),\n\t\t\t\tpair(\":scheme\", \"http\"),\n\t\t\t\tpair(\":path\", \"/\"),\n\t\t\t\tpair(\":authority\", \"www.example.com\"),\n\t\t\t\tpair(\"cache-control\", \"no-cache\"),\n\t\t\t},\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\"cache-control\", \"no-cache\"),\n\t\t\t\tpair(\":authority\", \"www.example.com\"),\n\t\t\t},\n\t\t\t110,\n\t\t},\n\t\t{dehex(\"8287 85bf 400a 6375 7374 6f6d 2d6b 6579 0c63 7573 746f 6d2d 7661 6c75 65\"),\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\":method\", \"GET\"),\n\t\t\t\tpair(\":scheme\", \"https\"),\n\t\t\t\tpair(\":path\", \"/index.html\"),\n\t\t\t\tpair(\":authority\", \"www.example.com\"),\n\t\t\t\tpair(\"custom-key\", \"custom-value\"),\n\t\t\t},\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\"custom-key\", \"custom-value\"),\n\t\t\t\tpair(\"cache-control\", \"no-cache\"),\n\t\t\t\tpair(\":authority\", \"www.example.com\"),\n\t\t\t},\n\t\t\t164,\n\t\t},\n\t})\n}\n\n// C.4 Request Examples with Huffman Coding\n// http://http2.github.io/http2-spec/compression.html#rfc.section.C.4\nfunc TestDecodeC4_Huffman(t *testing.T) {\n\ttestDecodeSeries(t, 4096, []encAndWant{\n\t\t{dehex(\"8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4 ff\"),\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\":method\", \"GET\"),\n\t\t\t\tpair(\":scheme\", \"http\"),\n\t\t\t\tpair(\":path\", \"/\"),\n\t\t\t\tpair(\":authority\", \"www.example.com\"),\n\t\t\t},\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\":authority\", \"www.example.com\"),\n\t\t\t},\n\t\t\t57,\n\t\t},\n\t\t{dehex(\"8286 84be 5886 a8eb 1064 9cbf\"),\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\":method\", \"GET\"),\n\t\t\t\tpair(\":scheme\", \"http\"),\n\t\t\t\tpair(\":path\", \"/\"),\n\t\t\t\tpair(\":authority\", \"www.example.com\"),\n\t\t\t\tpair(\"cache-control\", \"no-cache\"),\n\t\t\t},\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\"cache-control\", \"no-cache\"),\n\t\t\t\tpair(\":authority\", \"www.example.com\"),\n\t\t\t},\n\t\t\t110,\n\t\t},\n\t\t{dehex(\"8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925 a849 e95b b8e8 b4bf\"),\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\":method\", \"GET\"),\n\t\t\t\tpair(\":scheme\", \"https\"),\n\t\t\t\tpair(\":path\", \"/index.html\"),\n\t\t\t\tpair(\":authority\", \"www.example.com\"),\n\t\t\t\tpair(\"custom-key\", \"custom-value\"),\n\t\t\t},\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\"custom-key\", \"custom-value\"),\n\t\t\t\tpair(\"cache-control\", \"no-cache\"),\n\t\t\t\tpair(\":authority\", \"www.example.com\"),\n\t\t\t},\n\t\t\t164,\n\t\t},\n\t})\n}\n\n// http://http2.github.io/http2-spec/compression.html#rfc.section.C.5\n// \"This section shows several consecutive header lists, corresponding\n// to HTTP responses, on the same connection. The HTTP/2 setting\n// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256\n// octets, causing some evictions to occur.\"\nfunc TestDecodeC5_ResponsesNoHuff(t *testing.T) {\n\ttestDecodeSeries(t, 256, []encAndWant{\n\t\t{dehex(`\n4803 3330 3258 0770 7269 7661 7465 611d\n4d6f 6e2c 2032 3120 4f63 7420 3230 3133\n2032 303a 3133 3a32 3120 474d 546e 1768\n7474 7073 3a2f 2f77 7777 2e65 7861 6d70\n6c65 2e63 6f6d\n`),\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\":status\", \"302\"),\n\t\t\t\tpair(\"cache-control\", \"private\"),\n\t\t\t\tpair(\"date\", \"Mon, 21 Oct 2013 20:13:21 GMT\"),\n\t\t\t\tpair(\"location\", \"https://www.example.com\"),\n\t\t\t},\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\"location\", \"https://www.example.com\"),\n\t\t\t\tpair(\"date\", \"Mon, 21 Oct 2013 20:13:21 GMT\"),\n\t\t\t\tpair(\"cache-control\", \"private\"),\n\t\t\t\tpair(\":status\", \"302\"),\n\t\t\t},\n\t\t\t222,\n\t\t},\n\t\t{dehex(\"4803 3330 37c1 c0bf\"),\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\":status\", \"307\"),\n\t\t\t\tpair(\"cache-control\", \"private\"),\n\t\t\t\tpair(\"date\", \"Mon, 21 Oct 2013 20:13:21 GMT\"),\n\t\t\t\tpair(\"location\", \"https://www.example.com\"),\n\t\t\t},\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\":status\", \"307\"),\n\t\t\t\tpair(\"location\", \"https://www.example.com\"),\n\t\t\t\tpair(\"date\", \"Mon, 21 Oct 2013 20:13:21 GMT\"),\n\t\t\t\tpair(\"cache-control\", \"private\"),\n\t\t\t},\n\t\t\t222,\n\t\t},\n\t\t{dehex(`\n88c1 611d 4d6f 6e2c 2032 3120 4f63 7420\n3230 3133 2032 303a 3133 3a32 3220 474d\n54c0 5a04 677a 6970 7738 666f 6f3d 4153\n444a 4b48 514b 425a 584f 5157 454f 5049\n5541 5851 5745 4f49 553b 206d 6178 2d61\n6765 3d33 3630 303b 2076 6572 7369 6f6e\n3d31\n`),\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\":status\", \"200\"),\n\t\t\t\tpair(\"cache-control\", \"private\"),\n\t\t\t\tpair(\"date\", \"Mon, 21 Oct 2013 20:13:22 GMT\"),\n\t\t\t\tpair(\"location\", \"https://www.example.com\"),\n\t\t\t\tpair(\"content-encoding\", \"gzip\"),\n\t\t\t\tpair(\"set-cookie\", \"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1\"),\n\t\t\t},\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\"set-cookie\", \"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1\"),\n\t\t\t\tpair(\"content-encoding\", \"gzip\"),\n\t\t\t\tpair(\"date\", \"Mon, 21 Oct 2013 20:13:22 GMT\"),\n\t\t\t},\n\t\t\t215,\n\t\t},\n\t})\n}\n\n// http://http2.github.io/http2-spec/compression.html#rfc.section.C.6\n// \"This section shows the same examples as the previous section, but\n// using Huffman encoding for the literal values. The HTTP/2 setting\n// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256\n// octets, causing some evictions to occur. The eviction mechanism\n// uses the length of the decoded literal values, so the same\n// evictions occurs as in the previous section.\"\nfunc TestDecodeC6_ResponsesHuffman(t *testing.T) {\n\ttestDecodeSeries(t, 256, []encAndWant{\n\t\t{dehex(`\n4882 6402 5885 aec3 771a 4b61 96d0 7abe\n9410 54d4 44a8 2005 9504 0b81 66e0 82a6\n2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8\ne9ae 82ae 43d3\n`),\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\":status\", \"302\"),\n\t\t\t\tpair(\"cache-control\", \"private\"),\n\t\t\t\tpair(\"date\", \"Mon, 21 Oct 2013 20:13:21 GMT\"),\n\t\t\t\tpair(\"location\", \"https://www.example.com\"),\n\t\t\t},\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\"location\", \"https://www.example.com\"),\n\t\t\t\tpair(\"date\", \"Mon, 21 Oct 2013 20:13:21 GMT\"),\n\t\t\t\tpair(\"cache-control\", \"private\"),\n\t\t\t\tpair(\":status\", \"302\"),\n\t\t\t},\n\t\t\t222,\n\t\t},\n\t\t{dehex(\"4883 640e ffc1 c0bf\"),\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\":status\", \"307\"),\n\t\t\t\tpair(\"cache-control\", \"private\"),\n\t\t\t\tpair(\"date\", \"Mon, 21 Oct 2013 20:13:21 GMT\"),\n\t\t\t\tpair(\"location\", \"https://www.example.com\"),\n\t\t\t},\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\":status\", \"307\"),\n\t\t\t\tpair(\"location\", \"https://www.example.com\"),\n\t\t\t\tpair(\"date\", \"Mon, 21 Oct 2013 20:13:21 GMT\"),\n\t\t\t\tpair(\"cache-control\", \"private\"),\n\t\t\t},\n\t\t\t222,\n\t\t},\n\t\t{dehex(`\n88c1 6196 d07a be94 1054 d444 a820 0595\n040b 8166 e084 a62d 1bff c05a 839b d9ab\n77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b\n3960 d5af 2708 7f36 72c1 ab27 0fb5 291f\n9587 3160 65c0 03ed 4ee5 b106 3d50 07\n`),\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\":status\", \"200\"),\n\t\t\t\tpair(\"cache-control\", \"private\"),\n\t\t\t\tpair(\"date\", \"Mon, 21 Oct 2013 20:13:22 GMT\"),\n\t\t\t\tpair(\"location\", \"https://www.example.com\"),\n\t\t\t\tpair(\"content-encoding\", \"gzip\"),\n\t\t\t\tpair(\"set-cookie\", \"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1\"),\n\t\t\t},\n\t\t\t[]HeaderField{\n\t\t\t\tpair(\"set-cookie\", \"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1\"),\n\t\t\t\tpair(\"content-encoding\", \"gzip\"),\n\t\t\t\tpair(\"date\", \"Mon, 21 Oct 2013 20:13:22 GMT\"),\n\t\t\t},\n\t\t\t215,\n\t\t},\n\t})\n}\n\nfunc testDecodeSeries(t *testing.T, size uint32, steps []encAndWant) {\n\td := NewDecoder(size, nil)\n\tfor i, step := range steps {\n\t\thf, err := d.DecodeFull(step.enc)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error at step index %d: %v\", i, err)\n\t\t}\n\t\tif !reflect.DeepEqual(hf, step.want) {\n\t\t\tt.Fatalf(\"At step index %d: Got headers %v; want %v\", i, hf, step.want)\n\t\t}\n\t\tgotDynTab := d.dynTab.reverseCopy()\n\t\tif !reflect.DeepEqual(gotDynTab, step.wantDynTab) {\n\t\t\tt.Errorf(\"After step index %d, dynamic table = %v; want %v\", i, gotDynTab, step.wantDynTab)\n\t\t}\n\t\tif d.dynTab.size != step.wantDynSize {\n\t\t\tt.Errorf(\"After step index %d, dynamic table size = %v; want %v\", i, d.dynTab.size, step.wantDynSize)\n\t\t}\n\t}\n}\n\nfunc TestHuffmanDecodeExcessPadding(t *testing.T) {\n\ttests := [][]byte{\n\t\t{0xff},                                   // Padding Exceeds 7 bits\n\t\t{0x1f, 0xff},                             // {\"a\", 1 byte excess padding}\n\t\t{0x1f, 0xff, 0xff},                       // {\"a\", 2 byte excess padding}\n\t\t{0x1f, 0xff, 0xff, 0xff},                 // {\"a\", 3 byte excess padding}\n\t\t{0xff, 0x9f, 0xff, 0xff, 0xff},           // {\"a\", 29 bit excess padding}\n\t\t{'R', 0xbc, '0', 0xff, 0xff, 0xff, 0xff}, // Padding ends on partial symbol.\n\t}\n\tfor i, in := range tests {\n\t\tvar buf bytes.Buffer\n\t\tif _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman {\n\t\t\tt.Errorf(\"test-%d: decode(%q) = %v; want ErrInvalidHuffman\", i, in, err)\n\t\t}\n\t}\n}\n\nfunc TestHuffmanDecodeEOS(t *testing.T) {\n\tin := []byte{0xff, 0xff, 0xff, 0xff, 0xfc} // {EOS, \"?\"}\n\tvar buf bytes.Buffer\n\tif _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman {\n\t\tt.Errorf(\"error = %v; want ErrInvalidHuffman\", err)\n\t}\n}\n\nfunc TestHuffmanDecodeMaxLengthOnTrailingByte(t *testing.T) {\n\tin := []byte{0x00, 0x01} // {\"0\", \"0\", \"0\"}\n\tvar buf bytes.Buffer\n\tif err := huffmanDecode(&buf, 2, in); err != ErrStringLength {\n\t\tt.Errorf(\"error = %v; want ErrStringLength\", err)\n\t}\n}\n\nfunc TestHuffmanDecodeCorruptPadding(t *testing.T) {\n\tin := []byte{0x00}\n\tvar buf bytes.Buffer\n\tif _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman {\n\t\tt.Errorf(\"error = %v; want ErrInvalidHuffman\", err)\n\t}\n}\n\nfunc TestHuffmanDecode(t *testing.T) {\n\ttests := []struct {\n\t\tinHex, want string\n\t}{\n\t\t{\"f1e3 c2e5 f23a 6ba0 ab90 f4ff\", \"www.example.com\"},\n\t\t{\"a8eb 1064 9cbf\", \"no-cache\"},\n\t\t{\"25a8 49e9 5ba9 7d7f\", \"custom-key\"},\n\t\t{\"25a8 49e9 5bb8 e8b4 bf\", \"custom-value\"},\n\t\t{\"6402\", \"302\"},\n\t\t{\"aec3 771a 4b\", \"private\"},\n\t\t{\"d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff\", \"Mon, 21 Oct 2013 20:13:21 GMT\"},\n\t\t{\"9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3\", \"https://www.example.com\"},\n\t\t{\"9bd9 ab\", \"gzip\"},\n\t\t{\"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07\",\n\t\t\t\"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1\"},\n\t}\n\tfor i, tt := range tests {\n\t\tvar buf bytes.Buffer\n\t\tin, err := hex.DecodeString(strings.Replace(tt.inHex, \" \", \"\", -1))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%d. hex input error: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif _, err := HuffmanDecode(&buf, in); err != nil {\n\t\t\tt.Errorf(\"%d. decode error: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif got := buf.String(); tt.want != got {\n\t\t\tt.Errorf(\"%d. decode = %q; want %q\", i, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc TestAppendHuffmanString(t *testing.T) {\n\ttests := []struct {\n\t\tin, want string\n\t}{\n\t\t{\"www.example.com\", \"f1e3 c2e5 f23a 6ba0 ab90 f4ff\"},\n\t\t{\"no-cache\", \"a8eb 1064 9cbf\"},\n\t\t{\"custom-key\", \"25a8 49e9 5ba9 7d7f\"},\n\t\t{\"custom-value\", \"25a8 49e9 5bb8 e8b4 bf\"},\n\t\t{\"302\", \"6402\"},\n\t\t{\"private\", \"aec3 771a 4b\"},\n\t\t{\"Mon, 21 Oct 2013 20:13:21 GMT\", \"d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff\"},\n\t\t{\"https://www.example.com\", \"9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3\"},\n\t\t{\"gzip\", \"9bd9 ab\"},\n\t\t{\"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1\",\n\t\t\t\"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07\"},\n\t}\n\tfor i, tt := range tests {\n\t\tbuf := []byte{}\n\t\twant := strings.Replace(tt.want, \" \", \"\", -1)\n\t\tbuf = AppendHuffmanString(buf, tt.in)\n\t\tif got := hex.EncodeToString(buf); want != got {\n\t\t\tt.Errorf(\"%d. encode = %q; want %q\", i, got, want)\n\t\t}\n\t}\n}\n\nfunc TestHuffmanMaxStrLen(t *testing.T) {\n\tconst msg = \"Some string\"\n\thuff := AppendHuffmanString(nil, msg)\n\n\ttestGood := func(max int) {\n\t\tvar out bytes.Buffer\n\t\tif err := huffmanDecode(&out, max, huff); err != nil {\n\t\t\tt.Errorf(\"For maxLen=%d, unexpected error: %v\", max, err)\n\t\t}\n\t\tif out.String() != msg {\n\t\t\tt.Errorf(\"For maxLen=%d, out = %q; want %q\", max, out.String(), msg)\n\t\t}\n\t}\n\ttestGood(0)\n\ttestGood(len(msg))\n\ttestGood(len(msg) + 1)\n\n\tvar out bytes.Buffer\n\tif err := huffmanDecode(&out, len(msg)-1, huff); err != ErrStringLength {\n\t\tt.Errorf(\"err = %v; want ErrStringLength\", err)\n\t}\n}\n\nfunc TestHuffmanRoundtripStress(t *testing.T) {\n\tconst Len = 50 // of uncompressed string\n\tinput := make([]byte, Len)\n\tvar output bytes.Buffer\n\tvar huff []byte\n\n\tn := 5000\n\tif testing.Short() {\n\t\tn = 100\n\t}\n\tseed := time.Now().UnixNano()\n\tt.Logf(\"Seed = %v\", seed)\n\tsrc := rand.New(rand.NewSource(seed))\n\tvar encSize int64\n\tfor i := 0; i < n; i++ {\n\t\tfor l := range input {\n\t\t\tinput[l] = byte(src.Intn(256))\n\t\t}\n\t\thuff = AppendHuffmanString(huff[:0], string(input))\n\t\tencSize += int64(len(huff))\n\t\toutput.Reset()\n\t\tif err := huffmanDecode(&output, 0, huff); err != nil {\n\t\t\tt.Errorf(\"Failed to decode %q -> %q -> error %v\", input, huff, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !bytes.Equal(output.Bytes(), input) {\n\t\t\tt.Errorf(\"Roundtrip failure on %q -> %q -> %q\", input, huff, output.Bytes())\n\t\t}\n\t}\n\tt.Logf(\"Compressed size of original: %0.02f%% (%v -> %v)\", 100*(float64(encSize)/(Len*float64(n))), Len*n, encSize)\n}\n\nfunc TestHuffmanDecodeFuzz(t *testing.T) {\n\tconst Len = 50 // of compressed\n\tvar buf, zbuf bytes.Buffer\n\n\tn := 5000\n\tif testing.Short() {\n\t\tn = 100\n\t}\n\tseed := time.Now().UnixNano()\n\tt.Logf(\"Seed = %v\", seed)\n\tsrc := rand.New(rand.NewSource(seed))\n\tnumFail := 0\n\tfor i := 0; i < n; i++ {\n\t\tzbuf.Reset()\n\t\tif i == 0 {\n\t\t\t// Start with at least one invalid one.\n\t\t\tzbuf.WriteString(\"00\\x91\\xff\\xff\\xff\\xff\\xc8\")\n\t\t} else {\n\t\t\tfor l := 0; l < Len; l++ {\n\t\t\t\tzbuf.WriteByte(byte(src.Intn(256)))\n\t\t\t}\n\t\t}\n\n\t\tbuf.Reset()\n\t\tif err := huffmanDecode(&buf, 0, zbuf.Bytes()); err != nil {\n\t\t\tif err == ErrInvalidHuffman {\n\t\t\t\tnumFail++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt.Errorf(\"Failed to decode %q: %v\", zbuf.Bytes(), err)\n\t\t\tcontinue\n\t\t}\n\t}\n\tt.Logf(\"%0.02f%% are invalid (%d / %d)\", 100*float64(numFail)/float64(n), numFail, n)\n\tif numFail < 1 {\n\t\tt.Error(\"expected at least one invalid huffman encoding (test starts with one)\")\n\t}\n}\n\nfunc TestReadVarInt(t *testing.T) {\n\ttype res struct {\n\t\ti        uint64\n\t\tconsumed int\n\t\terr      error\n\t}\n\ttests := []struct {\n\t\tn    byte\n\t\tp    []byte\n\t\twant res\n\t}{\n\t\t// Fits in a byte:\n\t\t{1, []byte{0}, res{0, 1, nil}},\n\t\t{2, []byte{2}, res{2, 1, nil}},\n\t\t{3, []byte{6}, res{6, 1, nil}},\n\t\t{4, []byte{14}, res{14, 1, nil}},\n\t\t{5, []byte{30}, res{30, 1, nil}},\n\t\t{6, []byte{62}, res{62, 1, nil}},\n\t\t{7, []byte{126}, res{126, 1, nil}},\n\t\t{8, []byte{254}, res{254, 1, nil}},\n\n\t\t// Doesn't fit in a byte:\n\t\t{1, []byte{1}, res{0, 0, errNeedMore}},\n\t\t{2, []byte{3}, res{0, 0, errNeedMore}},\n\t\t{3, []byte{7}, res{0, 0, errNeedMore}},\n\t\t{4, []byte{15}, res{0, 0, errNeedMore}},\n\t\t{5, []byte{31}, res{0, 0, errNeedMore}},\n\t\t{6, []byte{63}, res{0, 0, errNeedMore}},\n\t\t{7, []byte{127}, res{0, 0, errNeedMore}},\n\t\t{8, []byte{255}, res{0, 0, errNeedMore}},\n\n\t\t// Ignoring top bits:\n\t\t{5, []byte{255, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 111\n\t\t{5, []byte{159, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 100\n\t\t{5, []byte{191, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 101\n\n\t\t// Extra byte:\n\t\t{5, []byte{191, 154, 10, 2}, res{1337, 3, nil}}, // extra byte\n\n\t\t// Short a byte:\n\t\t{5, []byte{191, 154}, res{0, 0, errNeedMore}},\n\n\t\t// integer overflow:\n\t\t{1, []byte{255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, res{0, 0, errVarintOverflow}},\n\t}\n\tfor _, tt := range tests {\n\t\ti, remain, err := readVarInt(tt.n, tt.p)\n\t\tconsumed := len(tt.p) - len(remain)\n\t\tgot := res{i, consumed, err}\n\t\tif got != tt.want {\n\t\t\tt.Errorf(\"readVarInt(%d, %v ~ %x) = %+v; want %+v\", tt.n, tt.p, tt.p, got, tt.want)\n\t\t}\n\t}\n}\n\n// Fuzz crash, originally reported at https://github.com/bradfitz/http2/issues/56\nfunc TestHuffmanFuzzCrash(t *testing.T) {\n\tgot, err := HuffmanDecodeToString([]byte(\"00\\x91\\xff\\xff\\xff\\xff\\xc8\"))\n\tif got != \"\" {\n\t\tt.Errorf(\"Got %q; want empty string\", got)\n\t}\n\tif err != ErrInvalidHuffman {\n\t\tt.Errorf(\"Err = %v; want ErrInvalidHuffman\", err)\n\t}\n}\n\nfunc pair(name, value string) HeaderField {\n\treturn HeaderField{Name: name, Value: value}\n}\n\nfunc dehex(s string) []byte {\n\ts = strings.Replace(s, \" \", \"\", -1)\n\ts = strings.Replace(s, \"\\n\", \"\", -1)\n\tb, err := hex.DecodeString(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn b\n}\n\nfunc TestEmitEnabled(t *testing.T) {\n\tvar buf bytes.Buffer\n\tenc := NewEncoder(&buf)\n\tenc.WriteField(HeaderField{Name: \"foo\", Value: \"bar\"})\n\tenc.WriteField(HeaderField{Name: \"foo\", Value: \"bar\"})\n\n\tnumCallback := 0\n\tvar dec *Decoder\n\tdec = NewDecoder(8<<20, func(HeaderField) {\n\t\tnumCallback++\n\t\tdec.SetEmitEnabled(false)\n\t})\n\tif !dec.EmitEnabled() {\n\t\tt.Errorf(\"initial emit enabled = false; want true\")\n\t}\n\tif _, err := dec.Write(buf.Bytes()); err != nil {\n\t\tt.Error(err)\n\t}\n\tif numCallback != 1 {\n\t\tt.Errorf(\"num callbacks = %d; want 1\", numCallback)\n\t}\n\tif dec.EmitEnabled() {\n\t\tt.Errorf(\"emit enabled = true; want false\")\n\t}\n}\n\nfunc TestSaveBufLimit(t *testing.T) {\n\tconst maxStr = 1 << 10\n\tvar got []HeaderField\n\tdec := NewDecoder(initialHeaderTableSize, func(hf HeaderField) {\n\t\tgot = append(got, hf)\n\t})\n\tdec.SetMaxStringLength(maxStr)\n\tvar frag []byte\n\tfrag = append(frag[:0], encodeTypeByte(false, false))\n\tfrag = appendVarInt(frag, 7, 3)\n\tfrag = append(frag, \"foo\"...)\n\tfrag = appendVarInt(frag, 7, 3)\n\tfrag = append(frag, \"bar\"...)\n\n\tif _, err := dec.Write(frag); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twant := []HeaderField{{Name: \"foo\", Value: \"bar\"}}\n\tif !reflect.DeepEqual(got, want) {\n\t\tt.Errorf(\"After small writes, got %v; want %v\", got, want)\n\t}\n\n\tfrag = append(frag[:0], encodeTypeByte(false, false))\n\tfrag = appendVarInt(frag, 7, maxStr*3)\n\tfrag = append(frag, make([]byte, maxStr*3)...)\n\n\t_, err := dec.Write(frag)\n\tif err != ErrStringLength {\n\t\tt.Fatalf(\"Write error = %v; want ErrStringLength\", err)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/hpack/huffman.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage hpack\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n\t\"sync\"\n)\n\nvar bufPool = sync.Pool{\n\tNew: func() interface{} { return new(bytes.Buffer) },\n}\n\n// HuffmanDecode decodes the string in v and writes the expanded\n// result to w, returning the number of bytes written to w and the\n// Write call's return value. At most one Write call is made.\nfunc HuffmanDecode(w io.Writer, v []byte) (int, error) {\n\tbuf := bufPool.Get().(*bytes.Buffer)\n\tbuf.Reset()\n\tdefer bufPool.Put(buf)\n\tif err := huffmanDecode(buf, 0, v); err != nil {\n\t\treturn 0, err\n\t}\n\treturn w.Write(buf.Bytes())\n}\n\n// HuffmanDecodeToString decodes the string in v.\nfunc HuffmanDecodeToString(v []byte) (string, error) {\n\tbuf := bufPool.Get().(*bytes.Buffer)\n\tbuf.Reset()\n\tdefer bufPool.Put(buf)\n\tif err := huffmanDecode(buf, 0, v); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn buf.String(), nil\n}\n\n// ErrInvalidHuffman is returned for errors found decoding\n// Huffman-encoded strings.\nvar ErrInvalidHuffman = errors.New(\"hpack: invalid Huffman-encoded data\")\n\n// huffmanDecode decodes v to buf.\n// If maxLen is greater than 0, attempts to write more to buf than\n// maxLen bytes will return ErrStringLength.\nfunc huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {\n\tn := rootHuffmanNode\n\t// cur is the bit buffer that has not been fed into n.\n\t// cbits is the number of low order bits in cur that are valid.\n\t// sbits is the number of bits of the symbol prefix being decoded.\n\tcur, cbits, sbits := uint(0), uint8(0), uint8(0)\n\tfor _, b := range v {\n\t\tcur = cur<<8 | uint(b)\n\t\tcbits += 8\n\t\tsbits += 8\n\t\tfor cbits >= 8 {\n\t\t\tidx := byte(cur >> (cbits - 8))\n\t\t\tn = n.children[idx]\n\t\t\tif n == nil {\n\t\t\t\treturn ErrInvalidHuffman\n\t\t\t}\n\t\t\tif n.children == nil {\n\t\t\t\tif maxLen != 0 && buf.Len() == maxLen {\n\t\t\t\t\treturn ErrStringLength\n\t\t\t\t}\n\t\t\t\tbuf.WriteByte(n.sym)\n\t\t\t\tcbits -= n.codeLen\n\t\t\t\tn = rootHuffmanNode\n\t\t\t\tsbits = cbits\n\t\t\t} else {\n\t\t\t\tcbits -= 8\n\t\t\t}\n\t\t}\n\t}\n\tfor cbits > 0 {\n\t\tn = n.children[byte(cur<<(8-cbits))]\n\t\tif n == nil {\n\t\t\treturn ErrInvalidHuffman\n\t\t}\n\t\tif n.children != nil || n.codeLen > cbits {\n\t\t\tbreak\n\t\t}\n\t\tif maxLen != 0 && buf.Len() == maxLen {\n\t\t\treturn ErrStringLength\n\t\t}\n\t\tbuf.WriteByte(n.sym)\n\t\tcbits -= n.codeLen\n\t\tn = rootHuffmanNode\n\t\tsbits = cbits\n\t}\n\tif sbits > 7 {\n\t\t// Either there was an incomplete symbol, or overlong padding.\n\t\t// Both are decoding errors per RFC 7541 section 5.2.\n\t\treturn ErrInvalidHuffman\n\t}\n\tif mask := uint(1<<cbits - 1); cur&mask != mask {\n\t\t// Trailing bits must be a prefix of EOS per RFC 7541 section 5.2.\n\t\treturn ErrInvalidHuffman\n\t}\n\n\treturn nil\n}\n\ntype node struct {\n\t// children is non-nil for internal nodes\n\tchildren []*node\n\n\t// The following are only valid if children is nil:\n\tcodeLen uint8 // number of bits that led to the output of sym\n\tsym     byte  // output symbol\n}\n\nfunc newInternalNode() *node {\n\treturn &node{children: make([]*node, 256)}\n}\n\nvar rootHuffmanNode = newInternalNode()\n\nfunc init() {\n\tif len(huffmanCodes) != 256 {\n\t\tpanic(\"unexpected size\")\n\t}\n\tfor i, code := range huffmanCodes {\n\t\taddDecoderNode(byte(i), code, huffmanCodeLen[i])\n\t}\n}\n\nfunc addDecoderNode(sym byte, code uint32, codeLen uint8) {\n\tcur := rootHuffmanNode\n\tfor codeLen > 8 {\n\t\tcodeLen -= 8\n\t\ti := uint8(code >> codeLen)\n\t\tif cur.children[i] == nil {\n\t\t\tcur.children[i] = newInternalNode()\n\t\t}\n\t\tcur = cur.children[i]\n\t}\n\tshift := 8 - codeLen\n\tstart, end := int(uint8(code<<shift)), int(1<<shift)\n\tfor i := start; i < start+end; i++ {\n\t\tcur.children[i] = &node{sym: sym, codeLen: codeLen}\n\t}\n}\n\n// AppendHuffmanString appends s, as encoded in Huffman codes, to dst\n// and returns the extended buffer.\nfunc AppendHuffmanString(dst []byte, s string) []byte {\n\trembits := uint8(8)\n\n\tfor i := 0; i < len(s); i++ {\n\t\tif rembits == 8 {\n\t\t\tdst = append(dst, 0)\n\t\t}\n\t\tdst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])\n\t}\n\n\tif rembits < 8 {\n\t\t// special EOS symbol\n\t\tcode := uint32(0x3fffffff)\n\t\tnbits := uint8(30)\n\n\t\tt := uint8(code >> (nbits - rembits))\n\t\tdst[len(dst)-1] |= t\n\t}\n\n\treturn dst\n}\n\n// HuffmanEncodeLength returns the number of bytes required to encode\n// s in Huffman codes. The result is round up to byte boundary.\nfunc HuffmanEncodeLength(s string) uint64 {\n\tn := uint64(0)\n\tfor i := 0; i < len(s); i++ {\n\t\tn += uint64(huffmanCodeLen[s[i]])\n\t}\n\treturn (n + 7) / 8\n}\n\n// appendByteToHuffmanCode appends Huffman code for c to dst and\n// returns the extended buffer and the remaining bits in the last\n// element. The appending is not byte aligned and the remaining bits\n// in the last element of dst is given in rembits.\nfunc appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {\n\tcode := huffmanCodes[c]\n\tnbits := huffmanCodeLen[c]\n\n\tfor {\n\t\tif rembits > nbits {\n\t\t\tt := uint8(code << (rembits - nbits))\n\t\t\tdst[len(dst)-1] |= t\n\t\t\trembits -= nbits\n\t\t\tbreak\n\t\t}\n\n\t\tt := uint8(code >> (nbits - rembits))\n\t\tdst[len(dst)-1] |= t\n\n\t\tnbits -= rembits\n\t\trembits = 8\n\n\t\tif nbits == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tdst = append(dst, 0)\n\t}\n\n\treturn dst, rembits\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/hpack/tables.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage hpack\n\nimport (\n\t\"fmt\"\n)\n\n// headerFieldTable implements a list of HeaderFields.\n// This is used to implement the static and dynamic tables.\ntype headerFieldTable struct {\n\t// For static tables, entries are never evicted.\n\t//\n\t// For dynamic tables, entries are evicted from ents[0] and added to the end.\n\t// Each entry has a unique id that starts at one and increments for each\n\t// entry that is added. This unique id is stable across evictions, meaning\n\t// it can be used as a pointer to a specific entry. As in hpack, unique ids\n\t// are 1-based. The unique id for ents[k] is k + evictCount + 1.\n\t//\n\t// Zero is not a valid unique id.\n\t//\n\t// evictCount should not overflow in any remotely practical situation. In\n\t// practice, we will have one dynamic table per HTTP/2 connection. If we\n\t// assume a very powerful server that handles 1M QPS per connection and each\n\t// request adds (then evicts) 100 entries from the table, it would still take\n\t// 2M years for evictCount to overflow.\n\tents       []HeaderField\n\tevictCount uint64\n\n\t// byName maps a HeaderField name to the unique id of the newest entry with\n\t// the same name. See above for a definition of \"unique id\".\n\tbyName map[string]uint64\n\n\t// byNameValue maps a HeaderField name/value pair to the unique id of the newest\n\t// entry with the same name and value. See above for a definition of \"unique id\".\n\tbyNameValue map[pairNameValue]uint64\n}\n\ntype pairNameValue struct {\n\tname, value string\n}\n\nfunc (t *headerFieldTable) init() {\n\tt.byName = make(map[string]uint64)\n\tt.byNameValue = make(map[pairNameValue]uint64)\n}\n\n// len reports the number of entries in the table.\nfunc (t *headerFieldTable) len() int {\n\treturn len(t.ents)\n}\n\n// addEntry adds a new entry.\nfunc (t *headerFieldTable) addEntry(f HeaderField) {\n\tid := uint64(t.len()) + t.evictCount + 1\n\tt.byName[f.Name] = id\n\tt.byNameValue[pairNameValue{f.Name, f.Value}] = id\n\tt.ents = append(t.ents, f)\n}\n\n// evictOldest evicts the n oldest entries in the table.\nfunc (t *headerFieldTable) evictOldest(n int) {\n\tif n > t.len() {\n\t\tpanic(fmt.Sprintf(\"evictOldest(%v) on table with %v entries\", n, t.len()))\n\t}\n\tfor k := 0; k < n; k++ {\n\t\tf := t.ents[k]\n\t\tid := t.evictCount + uint64(k) + 1\n\t\tif t.byName[f.Name] == id {\n\t\t\tdelete(t.byName, f.Name)\n\t\t}\n\t\tif p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id {\n\t\t\tdelete(t.byNameValue, p)\n\t\t}\n\t}\n\tcopy(t.ents, t.ents[n:])\n\tfor k := t.len() - n; k < t.len(); k++ {\n\t\tt.ents[k] = HeaderField{} // so strings can be garbage collected\n\t}\n\tt.ents = t.ents[:t.len()-n]\n\tif t.evictCount+uint64(n) < t.evictCount {\n\t\tpanic(\"evictCount overflow\")\n\t}\n\tt.evictCount += uint64(n)\n}\n\n// search finds f in the table. If there is no match, i is 0.\n// If both name and value match, i is the matched index and nameValueMatch\n// becomes true. If only name matches, i points to that index and\n// nameValueMatch becomes false.\n//\n// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says\n// that index 1 should be the newest entry, but t.ents[0] is the oldest entry,\n// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic\n// table, the return value i actually refers to the entry t.ents[t.len()-i].\n//\n// All tables are assumed to be a dynamic tables except for the global\n// staticTable pointer.\n//\n// See Section 2.3.3.\nfunc (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) {\n\tif !f.Sensitive {\n\t\tif id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 {\n\t\t\treturn t.idToIndex(id), true\n\t\t}\n\t}\n\tif id := t.byName[f.Name]; id != 0 {\n\t\treturn t.idToIndex(id), false\n\t}\n\treturn 0, false\n}\n\n// idToIndex converts a unique id to an HPACK index.\n// See Section 2.3.3.\nfunc (t *headerFieldTable) idToIndex(id uint64) uint64 {\n\tif id <= t.evictCount {\n\t\tpanic(fmt.Sprintf(\"id (%v) <= evictCount (%v)\", id, t.evictCount))\n\t}\n\tk := id - t.evictCount - 1 // convert id to an index t.ents[k]\n\tif t != staticTable {\n\t\treturn uint64(t.len()) - k // dynamic table\n\t}\n\treturn k + 1\n}\n\n// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B\nvar staticTable = newStaticTable()\nvar staticTableEntries = [...]HeaderField{\n\t{Name: \":authority\"},\n\t{Name: \":method\", Value: \"GET\"},\n\t{Name: \":method\", Value: \"POST\"},\n\t{Name: \":path\", Value: \"/\"},\n\t{Name: \":path\", Value: \"/index.html\"},\n\t{Name: \":scheme\", Value: \"http\"},\n\t{Name: \":scheme\", Value: \"https\"},\n\t{Name: \":status\", Value: \"200\"},\n\t{Name: \":status\", Value: \"204\"},\n\t{Name: \":status\", Value: \"206\"},\n\t{Name: \":status\", Value: \"304\"},\n\t{Name: \":status\", Value: \"400\"},\n\t{Name: \":status\", Value: \"404\"},\n\t{Name: \":status\", Value: \"500\"},\n\t{Name: \"accept-charset\"},\n\t{Name: \"accept-encoding\", Value: \"gzip, deflate\"},\n\t{Name: \"accept-language\"},\n\t{Name: \"accept-ranges\"},\n\t{Name: \"accept\"},\n\t{Name: \"access-control-allow-origin\"},\n\t{Name: \"age\"},\n\t{Name: \"allow\"},\n\t{Name: \"authorization\"},\n\t{Name: \"cache-control\"},\n\t{Name: \"content-disposition\"},\n\t{Name: \"content-encoding\"},\n\t{Name: \"content-language\"},\n\t{Name: \"content-length\"},\n\t{Name: \"content-location\"},\n\t{Name: \"content-range\"},\n\t{Name: \"content-type\"},\n\t{Name: \"cookie\"},\n\t{Name: \"date\"},\n\t{Name: \"etag\"},\n\t{Name: \"expect\"},\n\t{Name: \"expires\"},\n\t{Name: \"from\"},\n\t{Name: \"host\"},\n\t{Name: \"if-match\"},\n\t{Name: \"if-modified-since\"},\n\t{Name: \"if-none-match\"},\n\t{Name: \"if-range\"},\n\t{Name: \"if-unmodified-since\"},\n\t{Name: \"last-modified\"},\n\t{Name: \"link\"},\n\t{Name: \"location\"},\n\t{Name: \"max-forwards\"},\n\t{Name: \"proxy-authenticate\"},\n\t{Name: \"proxy-authorization\"},\n\t{Name: \"range\"},\n\t{Name: \"referer\"},\n\t{Name: \"refresh\"},\n\t{Name: \"retry-after\"},\n\t{Name: \"server\"},\n\t{Name: \"set-cookie\"},\n\t{Name: \"strict-transport-security\"},\n\t{Name: \"transfer-encoding\"},\n\t{Name: \"user-agent\"},\n\t{Name: \"vary\"},\n\t{Name: \"via\"},\n\t{Name: \"www-authenticate\"},\n}\n\nfunc newStaticTable() *headerFieldTable {\n\tt := &headerFieldTable{}\n\tt.init()\n\tfor _, e := range staticTableEntries[:] {\n\t\tt.addEntry(e)\n\t}\n\treturn t\n}\n\nvar huffmanCodes = [256]uint32{\n\t0x1ff8,\n\t0x7fffd8,\n\t0xfffffe2,\n\t0xfffffe3,\n\t0xfffffe4,\n\t0xfffffe5,\n\t0xfffffe6,\n\t0xfffffe7,\n\t0xfffffe8,\n\t0xffffea,\n\t0x3ffffffc,\n\t0xfffffe9,\n\t0xfffffea,\n\t0x3ffffffd,\n\t0xfffffeb,\n\t0xfffffec,\n\t0xfffffed,\n\t0xfffffee,\n\t0xfffffef,\n\t0xffffff0,\n\t0xffffff1,\n\t0xffffff2,\n\t0x3ffffffe,\n\t0xffffff3,\n\t0xffffff4,\n\t0xffffff5,\n\t0xffffff6,\n\t0xffffff7,\n\t0xffffff8,\n\t0xffffff9,\n\t0xffffffa,\n\t0xffffffb,\n\t0x14,\n\t0x3f8,\n\t0x3f9,\n\t0xffa,\n\t0x1ff9,\n\t0x15,\n\t0xf8,\n\t0x7fa,\n\t0x3fa,\n\t0x3fb,\n\t0xf9,\n\t0x7fb,\n\t0xfa,\n\t0x16,\n\t0x17,\n\t0x18,\n\t0x0,\n\t0x1,\n\t0x2,\n\t0x19,\n\t0x1a,\n\t0x1b,\n\t0x1c,\n\t0x1d,\n\t0x1e,\n\t0x1f,\n\t0x5c,\n\t0xfb,\n\t0x7ffc,\n\t0x20,\n\t0xffb,\n\t0x3fc,\n\t0x1ffa,\n\t0x21,\n\t0x5d,\n\t0x5e,\n\t0x5f,\n\t0x60,\n\t0x61,\n\t0x62,\n\t0x63,\n\t0x64,\n\t0x65,\n\t0x66,\n\t0x67,\n\t0x68,\n\t0x69,\n\t0x6a,\n\t0x6b,\n\t0x6c,\n\t0x6d,\n\t0x6e,\n\t0x6f,\n\t0x70,\n\t0x71,\n\t0x72,\n\t0xfc,\n\t0x73,\n\t0xfd,\n\t0x1ffb,\n\t0x7fff0,\n\t0x1ffc,\n\t0x3ffc,\n\t0x22,\n\t0x7ffd,\n\t0x3,\n\t0x23,\n\t0x4,\n\t0x24,\n\t0x5,\n\t0x25,\n\t0x26,\n\t0x27,\n\t0x6,\n\t0x74,\n\t0x75,\n\t0x28,\n\t0x29,\n\t0x2a,\n\t0x7,\n\t0x2b,\n\t0x76,\n\t0x2c,\n\t0x8,\n\t0x9,\n\t0x2d,\n\t0x77,\n\t0x78,\n\t0x79,\n\t0x7a,\n\t0x7b,\n\t0x7ffe,\n\t0x7fc,\n\t0x3ffd,\n\t0x1ffd,\n\t0xffffffc,\n\t0xfffe6,\n\t0x3fffd2,\n\t0xfffe7,\n\t0xfffe8,\n\t0x3fffd3,\n\t0x3fffd4,\n\t0x3fffd5,\n\t0x7fffd9,\n\t0x3fffd6,\n\t0x7fffda,\n\t0x7fffdb,\n\t0x7fffdc,\n\t0x7fffdd,\n\t0x7fffde,\n\t0xffffeb,\n\t0x7fffdf,\n\t0xffffec,\n\t0xffffed,\n\t0x3fffd7,\n\t0x7fffe0,\n\t0xffffee,\n\t0x7fffe1,\n\t0x7fffe2,\n\t0x7fffe3,\n\t0x7fffe4,\n\t0x1fffdc,\n\t0x3fffd8,\n\t0x7fffe5,\n\t0x3fffd9,\n\t0x7fffe6,\n\t0x7fffe7,\n\t0xffffef,\n\t0x3fffda,\n\t0x1fffdd,\n\t0xfffe9,\n\t0x3fffdb,\n\t0x3fffdc,\n\t0x7fffe8,\n\t0x7fffe9,\n\t0x1fffde,\n\t0x7fffea,\n\t0x3fffdd,\n\t0x3fffde,\n\t0xfffff0,\n\t0x1fffdf,\n\t0x3fffdf,\n\t0x7fffeb,\n\t0x7fffec,\n\t0x1fffe0,\n\t0x1fffe1,\n\t0x3fffe0,\n\t0x1fffe2,\n\t0x7fffed,\n\t0x3fffe1,\n\t0x7fffee,\n\t0x7fffef,\n\t0xfffea,\n\t0x3fffe2,\n\t0x3fffe3,\n\t0x3fffe4,\n\t0x7ffff0,\n\t0x3fffe5,\n\t0x3fffe6,\n\t0x7ffff1,\n\t0x3ffffe0,\n\t0x3ffffe1,\n\t0xfffeb,\n\t0x7fff1,\n\t0x3fffe7,\n\t0x7ffff2,\n\t0x3fffe8,\n\t0x1ffffec,\n\t0x3ffffe2,\n\t0x3ffffe3,\n\t0x3ffffe4,\n\t0x7ffffde,\n\t0x7ffffdf,\n\t0x3ffffe5,\n\t0xfffff1,\n\t0x1ffffed,\n\t0x7fff2,\n\t0x1fffe3,\n\t0x3ffffe6,\n\t0x7ffffe0,\n\t0x7ffffe1,\n\t0x3ffffe7,\n\t0x7ffffe2,\n\t0xfffff2,\n\t0x1fffe4,\n\t0x1fffe5,\n\t0x3ffffe8,\n\t0x3ffffe9,\n\t0xffffffd,\n\t0x7ffffe3,\n\t0x7ffffe4,\n\t0x7ffffe5,\n\t0xfffec,\n\t0xfffff3,\n\t0xfffed,\n\t0x1fffe6,\n\t0x3fffe9,\n\t0x1fffe7,\n\t0x1fffe8,\n\t0x7ffff3,\n\t0x3fffea,\n\t0x3fffeb,\n\t0x1ffffee,\n\t0x1ffffef,\n\t0xfffff4,\n\t0xfffff5,\n\t0x3ffffea,\n\t0x7ffff4,\n\t0x3ffffeb,\n\t0x7ffffe6,\n\t0x3ffffec,\n\t0x3ffffed,\n\t0x7ffffe7,\n\t0x7ffffe8,\n\t0x7ffffe9,\n\t0x7ffffea,\n\t0x7ffffeb,\n\t0xffffffe,\n\t0x7ffffec,\n\t0x7ffffed,\n\t0x7ffffee,\n\t0x7ffffef,\n\t0x7fffff0,\n\t0x3ffffee,\n}\n\nvar huffmanCodeLen = [256]uint8{\n\t13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,\n\t28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,\n\t6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,\n\t5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,\n\t13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,\n\t7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,\n\t15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,\n\t6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,\n\t20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,\n\t24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,\n\t22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,\n\t21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,\n\t26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,\n\t19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,\n\t20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,\n\t26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/hpack/tables_test.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage hpack\n\nimport (\n\t\"bufio\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestHeaderFieldTable(t *testing.T) {\n\ttable := &headerFieldTable{}\n\ttable.init()\n\ttable.addEntry(pair(\"key1\", \"value1-1\"))\n\ttable.addEntry(pair(\"key2\", \"value2-1\"))\n\ttable.addEntry(pair(\"key1\", \"value1-2\"))\n\ttable.addEntry(pair(\"key3\", \"value3-1\"))\n\ttable.addEntry(pair(\"key4\", \"value4-1\"))\n\ttable.addEntry(pair(\"key2\", \"value2-2\"))\n\n\t// Tests will be run twice: once before evicting anything, and\n\t// again after evicting the three oldest entries.\n\ttests := []struct {\n\t\tf                 HeaderField\n\t\tbeforeWantStaticI uint64\n\t\tbeforeWantMatch   bool\n\t\tafterWantStaticI  uint64\n\t\tafterWantMatch    bool\n\t}{\n\t\t{HeaderField{\"key1\", \"value1-1\", false}, 1, true, 0, false},\n\t\t{HeaderField{\"key1\", \"value1-2\", false}, 3, true, 0, false},\n\t\t{HeaderField{\"key1\", \"value1-3\", false}, 3, false, 0, false},\n\t\t{HeaderField{\"key2\", \"value2-1\", false}, 2, true, 3, false},\n\t\t{HeaderField{\"key2\", \"value2-2\", false}, 6, true, 3, true},\n\t\t{HeaderField{\"key2\", \"value2-3\", false}, 6, false, 3, false},\n\t\t{HeaderField{\"key4\", \"value4-1\", false}, 5, true, 2, true},\n\t\t// Name match only, because sensitive.\n\t\t{HeaderField{\"key4\", \"value4-1\", true}, 5, false, 2, false},\n\t\t// Key not found.\n\t\t{HeaderField{\"key5\", \"value5-x\", false}, 0, false, 0, false},\n\t}\n\n\tstaticToDynamic := func(i uint64) uint64 {\n\t\tif i == 0 {\n\t\t\treturn 0\n\t\t}\n\t\treturn uint64(table.len()) - i + 1 // dynamic is the reversed table\n\t}\n\n\tsearchStatic := func(f HeaderField) (uint64, bool) {\n\t\told := staticTable\n\t\tstaticTable = table\n\t\tdefer func() { staticTable = old }()\n\t\treturn staticTable.search(f)\n\t}\n\n\tsearchDynamic := func(f HeaderField) (uint64, bool) {\n\t\treturn table.search(f)\n\t}\n\n\tfor _, test := range tests {\n\t\tgotI, gotMatch := searchStatic(test.f)\n\t\tif wantI, wantMatch := test.beforeWantStaticI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch {\n\t\t\tt.Errorf(\"before evictions: searchStatic(%+v)=%v,%v want %v,%v\", test.f, gotI, gotMatch, wantI, wantMatch)\n\t\t}\n\t\tgotI, gotMatch = searchDynamic(test.f)\n\t\twantDynamicI := staticToDynamic(test.beforeWantStaticI)\n\t\tif wantI, wantMatch := wantDynamicI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch {\n\t\t\tt.Errorf(\"before evictions: searchDynamic(%+v)=%v,%v want %v,%v\", test.f, gotI, gotMatch, wantI, wantMatch)\n\t\t}\n\t}\n\n\ttable.evictOldest(3)\n\n\tfor _, test := range tests {\n\t\tgotI, gotMatch := searchStatic(test.f)\n\t\tif wantI, wantMatch := test.afterWantStaticI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch {\n\t\t\tt.Errorf(\"after evictions: searchStatic(%+v)=%v,%v want %v,%v\", test.f, gotI, gotMatch, wantI, wantMatch)\n\t\t}\n\t\tgotI, gotMatch = searchDynamic(test.f)\n\t\twantDynamicI := staticToDynamic(test.afterWantStaticI)\n\t\tif wantI, wantMatch := wantDynamicI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch {\n\t\t\tt.Errorf(\"after evictions: searchDynamic(%+v)=%v,%v want %v,%v\", test.f, gotI, gotMatch, wantI, wantMatch)\n\t\t}\n\t}\n}\n\nfunc TestHeaderFieldTable_LookupMapEviction(t *testing.T) {\n\ttable := &headerFieldTable{}\n\ttable.init()\n\ttable.addEntry(pair(\"key1\", \"value1-1\"))\n\ttable.addEntry(pair(\"key2\", \"value2-1\"))\n\ttable.addEntry(pair(\"key1\", \"value1-2\"))\n\ttable.addEntry(pair(\"key3\", \"value3-1\"))\n\ttable.addEntry(pair(\"key4\", \"value4-1\"))\n\ttable.addEntry(pair(\"key2\", \"value2-2\"))\n\n\t// evict all pairs\n\ttable.evictOldest(table.len())\n\n\tif l := table.len(); l > 0 {\n\t\tt.Errorf(\"table.len() = %d, want 0\", l)\n\t}\n\n\tif l := len(table.byName); l > 0 {\n\t\tt.Errorf(\"len(table.byName) = %d, want 0\", l)\n\t}\n\n\tif l := len(table.byNameValue); l > 0 {\n\t\tt.Errorf(\"len(table.byNameValue) = %d, want 0\", l)\n\t}\n}\n\nfunc TestStaticTable(t *testing.T) {\n\tfromSpec := `\n          +-------+-----------------------------+---------------+\n          | 1     | :authority                  |               |\n          | 2     | :method                     | GET           |\n          | 3     | :method                     | POST          |\n          | 4     | :path                       | /             |\n          | 5     | :path                       | /index.html   |\n          | 6     | :scheme                     | http          |\n          | 7     | :scheme                     | https         |\n          | 8     | :status                     | 200           |\n          | 9     | :status                     | 204           |\n          | 10    | :status                     | 206           |\n          | 11    | :status                     | 304           |\n          | 12    | :status                     | 400           |\n          | 13    | :status                     | 404           |\n          | 14    | :status                     | 500           |\n          | 15    | accept-charset              |               |\n          | 16    | accept-encoding             | gzip, deflate |\n          | 17    | accept-language             |               |\n          | 18    | accept-ranges               |               |\n          | 19    | accept                      |               |\n          | 20    | access-control-allow-origin |               |\n          | 21    | age                         |               |\n          | 22    | allow                       |               |\n          | 23    | authorization               |               |\n          | 24    | cache-control               |               |\n          | 25    | content-disposition         |               |\n          | 26    | content-encoding            |               |\n          | 27    | content-language            |               |\n          | 28    | content-length              |               |\n          | 29    | content-location            |               |\n          | 30    | content-range               |               |\n          | 31    | content-type                |               |\n          | 32    | cookie                      |               |\n          | 33    | date                        |               |\n          | 34    | etag                        |               |\n          | 35    | expect                      |               |\n          | 36    | expires                     |               |\n          | 37    | from                        |               |\n          | 38    | host                        |               |\n          | 39    | if-match                    |               |\n          | 40    | if-modified-since           |               |\n          | 41    | if-none-match               |               |\n          | 42    | if-range                    |               |\n          | 43    | if-unmodified-since         |               |\n          | 44    | last-modified               |               |\n          | 45    | link                        |               |\n          | 46    | location                    |               |\n          | 47    | max-forwards                |               |\n          | 48    | proxy-authenticate          |               |\n          | 49    | proxy-authorization         |               |\n          | 50    | range                       |               |\n          | 51    | referer                     |               |\n          | 52    | refresh                     |               |\n          | 53    | retry-after                 |               |\n          | 54    | server                      |               |\n          | 55    | set-cookie                  |               |\n          | 56    | strict-transport-security   |               |\n          | 57    | transfer-encoding           |               |\n          | 58    | user-agent                  |               |\n          | 59    | vary                        |               |\n          | 60    | via                         |               |\n          | 61    | www-authenticate            |               |\n          +-------+-----------------------------+---------------+\n`\n\tbs := bufio.NewScanner(strings.NewReader(fromSpec))\n\tre := regexp.MustCompile(`\\| (\\d+)\\s+\\| (\\S+)\\s*\\| (\\S(.*\\S)?)?\\s+\\|`)\n\tfor bs.Scan() {\n\t\tl := bs.Text()\n\t\tif !strings.Contains(l, \"|\") {\n\t\t\tcontinue\n\t\t}\n\t\tm := re.FindStringSubmatch(l)\n\t\tif m == nil {\n\t\t\tcontinue\n\t\t}\n\t\ti, err := strconv.Atoi(m[1])\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Bogus integer on line %q\", l)\n\t\t\tcontinue\n\t\t}\n\t\tif i < 1 || i > staticTable.len() {\n\t\t\tt.Errorf(\"Bogus index %d on line %q\", i, l)\n\t\t\tcontinue\n\t\t}\n\t\tif got, want := staticTable.ents[i-1].Name, m[2]; got != want {\n\t\t\tt.Errorf(\"header index %d name = %q; want %q\", i, got, want)\n\t\t}\n\t\tif got, want := staticTable.ents[i-1].Value, m[3]; got != want {\n\t\t\tt.Errorf(\"header index %d value = %q; want %q\", i, got, want)\n\t\t}\n\t}\n\tif err := bs.Err(); err != nil {\n\t\tt.Error(err)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/http2.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package http2 implements the HTTP/2 protocol.\n//\n// This package is low-level and intended to be used directly by very\n// few people. Most users will use it indirectly through the automatic\n// use by the net/http package (from Go 1.6 and later).\n// For use in earlier Go versions see ConfigureServer. (Transport support\n// requires Go 1.6 or later)\n//\n// See https://http2.github.io/ for more information on HTTP/2.\n//\n// See https://http2.golang.org/ for a test server running this code.\n//\npackage http2 // import \"golang.org/x/net/http2\"\n\nimport (\n\t\"bufio\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"golang.org/x/net/lex/httplex\"\n)\n\nvar (\n\tVerboseLogs    bool\n\tlogFrameWrites bool\n\tlogFrameReads  bool\n\tinTests        bool\n)\n\nfunc init() {\n\te := os.Getenv(\"GODEBUG\")\n\tif strings.Contains(e, \"http2debug=1\") {\n\t\tVerboseLogs = true\n\t}\n\tif strings.Contains(e, \"http2debug=2\") {\n\t\tVerboseLogs = true\n\t\tlogFrameWrites = true\n\t\tlogFrameReads = true\n\t}\n}\n\nconst (\n\t// ClientPreface is the string that must be sent by new\n\t// connections from clients.\n\tClientPreface = \"PRI * HTTP/2.0\\r\\n\\r\\nSM\\r\\n\\r\\n\"\n\n\t// SETTINGS_MAX_FRAME_SIZE default\n\t// http://http2.github.io/http2-spec/#rfc.section.6.5.2\n\tinitialMaxFrameSize = 16384\n\n\t// NextProtoTLS is the NPN/ALPN protocol negotiated during\n\t// HTTP/2's TLS setup.\n\tNextProtoTLS = \"h2\"\n\n\t// http://http2.github.io/http2-spec/#SettingValues\n\tinitialHeaderTableSize = 4096\n\n\tinitialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size\n\n\tdefaultMaxReadFrameSize = 1 << 20\n)\n\nvar (\n\tclientPreface = []byte(ClientPreface)\n)\n\ntype streamState int\n\n// HTTP/2 stream states.\n//\n// See http://tools.ietf.org/html/rfc7540#section-5.1.\n//\n// For simplicity, the server code merges \"reserved (local)\" into\n// \"half-closed (remote)\". This is one less state transition to track.\n// The only downside is that we send PUSH_PROMISEs slightly less\n// liberally than allowable. More discussion here:\n// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html\n//\n// \"reserved (remote)\" is omitted since the client code does not\n// support server push.\nconst (\n\tstateIdle streamState = iota\n\tstateOpen\n\tstateHalfClosedLocal\n\tstateHalfClosedRemote\n\tstateClosed\n)\n\nvar stateName = [...]string{\n\tstateIdle:             \"Idle\",\n\tstateOpen:             \"Open\",\n\tstateHalfClosedLocal:  \"HalfClosedLocal\",\n\tstateHalfClosedRemote: \"HalfClosedRemote\",\n\tstateClosed:           \"Closed\",\n}\n\nfunc (st streamState) String() string {\n\treturn stateName[st]\n}\n\n// Setting is a setting parameter: which setting it is, and its value.\ntype Setting struct {\n\t// ID is which setting is being set.\n\t// See http://http2.github.io/http2-spec/#SettingValues\n\tID SettingID\n\n\t// Val is the value.\n\tVal uint32\n}\n\nfunc (s Setting) String() string {\n\treturn fmt.Sprintf(\"[%v = %d]\", s.ID, s.Val)\n}\n\n// Valid reports whether the setting is valid.\nfunc (s Setting) Valid() error {\n\t// Limits and error codes from 6.5.2 Defined SETTINGS Parameters\n\tswitch s.ID {\n\tcase SettingEnablePush:\n\t\tif s.Val != 1 && s.Val != 0 {\n\t\t\treturn ConnectionError(ErrCodeProtocol)\n\t\t}\n\tcase SettingInitialWindowSize:\n\t\tif s.Val > 1<<31-1 {\n\t\t\treturn ConnectionError(ErrCodeFlowControl)\n\t\t}\n\tcase SettingMaxFrameSize:\n\t\tif s.Val < 16384 || s.Val > 1<<24-1 {\n\t\t\treturn ConnectionError(ErrCodeProtocol)\n\t\t}\n\t}\n\treturn nil\n}\n\n// A SettingID is an HTTP/2 setting as defined in\n// http://http2.github.io/http2-spec/#iana-settings\ntype SettingID uint16\n\nconst (\n\tSettingHeaderTableSize      SettingID = 0x1\n\tSettingEnablePush           SettingID = 0x2\n\tSettingMaxConcurrentStreams SettingID = 0x3\n\tSettingInitialWindowSize    SettingID = 0x4\n\tSettingMaxFrameSize         SettingID = 0x5\n\tSettingMaxHeaderListSize    SettingID = 0x6\n)\n\nvar settingName = map[SettingID]string{\n\tSettingHeaderTableSize:      \"HEADER_TABLE_SIZE\",\n\tSettingEnablePush:           \"ENABLE_PUSH\",\n\tSettingMaxConcurrentStreams: \"MAX_CONCURRENT_STREAMS\",\n\tSettingInitialWindowSize:    \"INITIAL_WINDOW_SIZE\",\n\tSettingMaxFrameSize:         \"MAX_FRAME_SIZE\",\n\tSettingMaxHeaderListSize:    \"MAX_HEADER_LIST_SIZE\",\n}\n\nfunc (s SettingID) String() string {\n\tif v, ok := settingName[s]; ok {\n\t\treturn v\n\t}\n\treturn fmt.Sprintf(\"UNKNOWN_SETTING_%d\", uint16(s))\n}\n\nvar (\n\terrInvalidHeaderFieldName  = errors.New(\"http2: invalid header field name\")\n\terrInvalidHeaderFieldValue = errors.New(\"http2: invalid header field value\")\n)\n\n// validWireHeaderFieldName reports whether v is a valid header field\n// name (key). See httplex.ValidHeaderName for the base rules.\n//\n// Further, http2 says:\n//   \"Just as in HTTP/1.x, header field names are strings of ASCII\n//   characters that are compared in a case-insensitive\n//   fashion. However, header field names MUST be converted to\n//   lowercase prior to their encoding in HTTP/2. \"\nfunc validWireHeaderFieldName(v string) bool {\n\tif len(v) == 0 {\n\t\treturn false\n\t}\n\tfor _, r := range v {\n\t\tif !httplex.IsTokenRune(r) {\n\t\t\treturn false\n\t\t}\n\t\tif 'A' <= r && r <= 'Z' {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nvar httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)\n\nfunc init() {\n\tfor i := 100; i <= 999; i++ {\n\t\tif v := http.StatusText(i); v != \"\" {\n\t\t\thttpCodeStringCommon[i] = strconv.Itoa(i)\n\t\t}\n\t}\n}\n\nfunc httpCodeString(code int) string {\n\tif s, ok := httpCodeStringCommon[code]; ok {\n\t\treturn s\n\t}\n\treturn strconv.Itoa(code)\n}\n\n// from pkg io\ntype stringWriter interface {\n\tWriteString(s string) (n int, err error)\n}\n\n// A gate lets two goroutines coordinate their activities.\ntype gate chan struct{}\n\nfunc (g gate) Done() { g <- struct{}{} }\nfunc (g gate) Wait() { <-g }\n\n// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).\ntype closeWaiter chan struct{}\n\n// Init makes a closeWaiter usable.\n// It exists because so a closeWaiter value can be placed inside a\n// larger struct and have the Mutex and Cond's memory in the same\n// allocation.\nfunc (cw *closeWaiter) Init() {\n\t*cw = make(chan struct{})\n}\n\n// Close marks the closeWaiter as closed and unblocks any waiters.\nfunc (cw closeWaiter) Close() {\n\tclose(cw)\n}\n\n// Wait waits for the closeWaiter to become closed.\nfunc (cw closeWaiter) Wait() {\n\t<-cw\n}\n\n// bufferedWriter is a buffered writer that writes to w.\n// Its buffered writer is lazily allocated as needed, to minimize\n// idle memory usage with many connections.\ntype bufferedWriter struct {\n\tw  io.Writer     // immutable\n\tbw *bufio.Writer // non-nil when data is buffered\n}\n\nfunc newBufferedWriter(w io.Writer) *bufferedWriter {\n\treturn &bufferedWriter{w: w}\n}\n\n// bufWriterPoolBufferSize is the size of bufio.Writer's\n// buffers created using bufWriterPool.\n//\n// TODO: pick a less arbitrary value? this is a bit under\n// (3 x typical 1500 byte MTU) at least. Other than that,\n// not much thought went into it.\nconst bufWriterPoolBufferSize = 4 << 10\n\nvar bufWriterPool = sync.Pool{\n\tNew: func() interface{} {\n\t\treturn bufio.NewWriterSize(nil, bufWriterPoolBufferSize)\n\t},\n}\n\nfunc (w *bufferedWriter) Available() int {\n\tif w.bw == nil {\n\t\treturn bufWriterPoolBufferSize\n\t}\n\treturn w.bw.Available()\n}\n\nfunc (w *bufferedWriter) Write(p []byte) (n int, err error) {\n\tif w.bw == nil {\n\t\tbw := bufWriterPool.Get().(*bufio.Writer)\n\t\tbw.Reset(w.w)\n\t\tw.bw = bw\n\t}\n\treturn w.bw.Write(p)\n}\n\nfunc (w *bufferedWriter) Flush() error {\n\tbw := w.bw\n\tif bw == nil {\n\t\treturn nil\n\t}\n\terr := bw.Flush()\n\tbw.Reset(nil)\n\tbufWriterPool.Put(bw)\n\tw.bw = nil\n\treturn err\n}\n\nfunc mustUint31(v int32) uint32 {\n\tif v < 0 || v > 2147483647 {\n\t\tpanic(\"out of range\")\n\t}\n\treturn uint32(v)\n}\n\n// bodyAllowedForStatus reports whether a given response status code\n// permits a body. See RFC 2616, section 4.4.\nfunc bodyAllowedForStatus(status int) bool {\n\tswitch {\n\tcase status >= 100 && status <= 199:\n\t\treturn false\n\tcase status == 204:\n\t\treturn false\n\tcase status == 304:\n\t\treturn false\n\t}\n\treturn true\n}\n\ntype httpError struct {\n\tmsg     string\n\ttimeout bool\n}\n\nfunc (e *httpError) Error() string   { return e.msg }\nfunc (e *httpError) Timeout() bool   { return e.timeout }\nfunc (e *httpError) Temporary() bool { return true }\n\nvar errTimeout error = &httpError{msg: \"http2: timeout awaiting response headers\", timeout: true}\n\ntype connectionStater interface {\n\tConnectionState() tls.ConnectionState\n}\n\nvar sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }}\n\ntype sorter struct {\n\tv []string // owned by sorter\n}\n\nfunc (s *sorter) Len() int           { return len(s.v) }\nfunc (s *sorter) Swap(i, j int)      { s.v[i], s.v[j] = s.v[j], s.v[i] }\nfunc (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }\n\n// Keys returns the sorted keys of h.\n//\n// The returned slice is only valid until s used again or returned to\n// its pool.\nfunc (s *sorter) Keys(h http.Header) []string {\n\tkeys := s.v[:0]\n\tfor k := range h {\n\t\tkeys = append(keys, k)\n\t}\n\ts.v = keys\n\tsort.Sort(s)\n\treturn keys\n}\n\nfunc (s *sorter) SortStrings(ss []string) {\n\t// Our sorter works on s.v, which sorter owns, so\n\t// stash it away while we sort the user's buffer.\n\tsave := s.v\n\ts.v = ss\n\tsort.Sort(s)\n\ts.v = save\n}\n\n// validPseudoPath reports whether v is a valid :path pseudo-header\n// value. It must be either:\n//\n//     *) a non-empty string starting with '/'\n//     *) the string '*', for OPTIONS requests.\n//\n// For now this is only used a quick check for deciding when to clean\n// up Opaque URLs before sending requests from the Transport.\n// See golang.org/issue/16847\n//\n// We used to enforce that the path also didn't start with \"//\", but\n// Google's GFE accepts such paths and Chrome sends them, so ignore\n// that part of the spec. See golang.org/issue/19103.\nfunc validPseudoPath(v string) bool {\n\treturn (len(v) > 0 && v[0] == '/') || v == \"*\"\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/http2_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"os/exec\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang.org/x/net/http2/hpack\"\n)\n\nvar knownFailing = flag.Bool(\"known_failing\", false, \"Run known-failing tests.\")\n\nfunc condSkipFailingTest(t *testing.T) {\n\tif !*knownFailing {\n\t\tt.Skip(\"Skipping known-failing test without --known_failing\")\n\t}\n}\n\nfunc init() {\n\tinTests = true\n\tDebugGoroutines = true\n\tflag.BoolVar(&VerboseLogs, \"verboseh2\", VerboseLogs, \"Verbose HTTP/2 debug logging\")\n}\n\nfunc TestSettingString(t *testing.T) {\n\ttests := []struct {\n\t\ts    Setting\n\t\twant string\n\t}{\n\t\t{Setting{SettingMaxFrameSize, 123}, \"[MAX_FRAME_SIZE = 123]\"},\n\t\t{Setting{1<<16 - 1, 123}, \"[UNKNOWN_SETTING_65535 = 123]\"},\n\t}\n\tfor i, tt := range tests {\n\t\tgot := fmt.Sprint(tt.s)\n\t\tif got != tt.want {\n\t\t\tt.Errorf(\"%d. for %#v, string = %q; want %q\", i, tt.s, got, tt.want)\n\t\t}\n\t}\n}\n\ntype twriter struct {\n\tt  testing.TB\n\tst *serverTester // optional\n}\n\nfunc (w twriter) Write(p []byte) (n int, err error) {\n\tif w.st != nil {\n\t\tps := string(p)\n\t\tfor _, phrase := range w.st.logFilter {\n\t\t\tif strings.Contains(ps, phrase) {\n\t\t\t\treturn len(p), nil // no logging\n\t\t\t}\n\t\t}\n\t}\n\tw.t.Logf(\"%s\", p)\n\treturn len(p), nil\n}\n\n// like encodeHeader, but don't add implicit pseudo headers.\nfunc encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte {\n\tvar buf bytes.Buffer\n\tenc := hpack.NewEncoder(&buf)\n\tfor len(headers) > 0 {\n\t\tk, v := headers[0], headers[1]\n\t\theaders = headers[2:]\n\t\tif err := enc.WriteField(hpack.HeaderField{Name: k, Value: v}); err != nil {\n\t\t\tt.Fatalf(\"HPACK encoding error for %q/%q: %v\", k, v, err)\n\t\t}\n\t}\n\treturn buf.Bytes()\n}\n\n// Verify that curl has http2.\nfunc requireCurl(t *testing.T) {\n\tout, err := dockerLogs(curl(t, \"--version\"))\n\tif err != nil {\n\t\tt.Skipf(\"failed to determine curl features; skipping test\")\n\t}\n\tif !strings.Contains(string(out), \"HTTP2\") {\n\t\tt.Skip(\"curl doesn't support HTTP2; skipping test\")\n\t}\n}\n\nfunc curl(t *testing.T, args ...string) (container string) {\n\tout, err := exec.Command(\"docker\", append([]string{\"run\", \"-d\", \"--net=host\", \"gohttp2/curl\"}, args...)...).Output()\n\tif err != nil {\n\t\tt.Skipf(\"Failed to run curl in docker: %v, %s\", err, out)\n\t}\n\treturn strings.TrimSpace(string(out))\n}\n\n// Verify that h2load exists.\nfunc requireH2load(t *testing.T) {\n\tout, err := dockerLogs(h2load(t, \"--version\"))\n\tif err != nil {\n\t\tt.Skipf(\"failed to probe h2load; skipping test: %s\", out)\n\t}\n\tif !strings.Contains(string(out), \"h2load nghttp2/\") {\n\t\tt.Skipf(\"h2load not present; skipping test. (Output=%q)\", out)\n\t}\n}\n\nfunc h2load(t *testing.T, args ...string) (container string) {\n\tout, err := exec.Command(\"docker\", append([]string{\"run\", \"-d\", \"--net=host\", \"--entrypoint=/usr/local/bin/h2load\", \"gohttp2/curl\"}, args...)...).Output()\n\tif err != nil {\n\t\tt.Skipf(\"Failed to run h2load in docker: %v, %s\", err, out)\n\t}\n\treturn strings.TrimSpace(string(out))\n}\n\ntype puppetCommand struct {\n\tfn   func(w http.ResponseWriter, r *http.Request)\n\tdone chan<- bool\n}\n\ntype handlerPuppet struct {\n\tch chan puppetCommand\n}\n\nfunc newHandlerPuppet() *handlerPuppet {\n\treturn &handlerPuppet{\n\t\tch: make(chan puppetCommand),\n\t}\n}\n\nfunc (p *handlerPuppet) act(w http.ResponseWriter, r *http.Request) {\n\tfor cmd := range p.ch {\n\t\tcmd.fn(w, r)\n\t\tcmd.done <- true\n\t}\n}\n\nfunc (p *handlerPuppet) done() { close(p.ch) }\nfunc (p *handlerPuppet) do(fn func(http.ResponseWriter, *http.Request)) {\n\tdone := make(chan bool)\n\tp.ch <- puppetCommand{fn, done}\n\t<-done\n}\nfunc dockerLogs(container string) ([]byte, error) {\n\tout, err := exec.Command(\"docker\", \"wait\", container).CombinedOutput()\n\tif err != nil {\n\t\treturn out, err\n\t}\n\texitStatus, err := strconv.Atoi(strings.TrimSpace(string(out)))\n\tif err != nil {\n\t\treturn out, errors.New(\"unexpected exit status from docker wait\")\n\t}\n\tout, err = exec.Command(\"docker\", \"logs\", container).CombinedOutput()\n\texec.Command(\"docker\", \"rm\", container).Run()\n\tif err == nil && exitStatus != 0 {\n\t\terr = fmt.Errorf(\"exit status %d: %s\", exitStatus, out)\n\t}\n\treturn out, err\n}\n\nfunc kill(container string) {\n\texec.Command(\"docker\", \"kill\", container).Run()\n\texec.Command(\"docker\", \"rm\", container).Run()\n}\n\nfunc cleanDate(res *http.Response) {\n\tif d := res.Header[\"Date\"]; len(d) == 1 {\n\t\td[0] = \"XXX\"\n\t}\n}\n\nfunc TestSorterPoolAllocs(t *testing.T) {\n\tss := []string{\"a\", \"b\", \"c\"}\n\th := http.Header{\n\t\t\"a\": nil,\n\t\t\"b\": nil,\n\t\t\"c\": nil,\n\t}\n\tsorter := new(sorter)\n\n\tif allocs := testing.AllocsPerRun(100, func() {\n\t\tsorter.SortStrings(ss)\n\t}); allocs >= 1 {\n\t\tt.Logf(\"SortStrings allocs = %v; want <1\", allocs)\n\t}\n\n\tif allocs := testing.AllocsPerRun(5, func() {\n\t\tif len(sorter.Keys(h)) != 3 {\n\t\t\tt.Fatal(\"wrong result\")\n\t\t}\n\t}); allocs > 0 {\n\t\tt.Logf(\"Keys allocs = %v; want <1\", allocs)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/not_go16.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !go1.6\n\npackage http2\n\nimport (\n\t\"net/http\"\n\t\"time\"\n)\n\nfunc configureTransport(t1 *http.Transport) (*Transport, error) {\n\treturn nil, errTransportVersion\n}\n\nfunc transportExpectContinueTimeout(t1 *http.Transport) time.Duration {\n\treturn 0\n\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/not_go17.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !go1.7\n\npackage http2\n\nimport (\n\t\"crypto/tls\"\n\t\"net\"\n\t\"net/http\"\n\t\"time\"\n)\n\ntype contextContext interface {\n\tDone() <-chan struct{}\n\tErr() error\n}\n\ntype fakeContext struct{}\n\nfunc (fakeContext) Done() <-chan struct{} { return nil }\nfunc (fakeContext) Err() error            { panic(\"should not be called\") }\n\nfunc reqContext(r *http.Request) fakeContext {\n\treturn fakeContext{}\n}\n\nfunc setResponseUncompressed(res *http.Response) {\n\t// Nothing.\n}\n\ntype clientTrace struct{}\n\nfunc requestTrace(*http.Request) *clientTrace { return nil }\nfunc traceGotConn(*http.Request, *ClientConn) {}\nfunc traceFirstResponseByte(*clientTrace)     {}\nfunc traceWroteHeaders(*clientTrace)          {}\nfunc traceWroteRequest(*clientTrace, error)   {}\nfunc traceGot100Continue(trace *clientTrace)  {}\nfunc traceWait100Continue(trace *clientTrace) {}\n\nfunc nop() {}\n\nfunc serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {\n\treturn nil, nop\n}\n\nfunc contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {\n\treturn ctx, nop\n}\n\nfunc requestWithContext(req *http.Request, ctx contextContext) *http.Request {\n\treturn req\n}\n\n// temporary copy of Go 1.6's private tls.Config.clone:\nfunc cloneTLSConfig(c *tls.Config) *tls.Config {\n\treturn &tls.Config{\n\t\tRand:                     c.Rand,\n\t\tTime:                     c.Time,\n\t\tCertificates:             c.Certificates,\n\t\tNameToCertificate:        c.NameToCertificate,\n\t\tGetCertificate:           c.GetCertificate,\n\t\tRootCAs:                  c.RootCAs,\n\t\tNextProtos:               c.NextProtos,\n\t\tServerName:               c.ServerName,\n\t\tClientAuth:               c.ClientAuth,\n\t\tClientCAs:                c.ClientCAs,\n\t\tInsecureSkipVerify:       c.InsecureSkipVerify,\n\t\tCipherSuites:             c.CipherSuites,\n\t\tPreferServerCipherSuites: c.PreferServerCipherSuites,\n\t\tSessionTicketsDisabled:   c.SessionTicketsDisabled,\n\t\tSessionTicketKey:         c.SessionTicketKey,\n\t\tClientSessionCache:       c.ClientSessionCache,\n\t\tMinVersion:               c.MinVersion,\n\t\tMaxVersion:               c.MaxVersion,\n\t\tCurvePreferences:         c.CurvePreferences,\n\t}\n}\n\nfunc (cc *ClientConn) Ping(ctx contextContext) error {\n\treturn cc.ping(ctx)\n}\n\nfunc (t *Transport) idleConnTimeout() time.Duration { return 0 }\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/not_go18.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !go1.8\n\npackage http2\n\nimport (\n\t\"io\"\n\t\"net/http\"\n)\n\nfunc configureServer18(h1 *http.Server, h2 *Server) error {\n\t// No IdleTimeout to sync prior to Go 1.8.\n\treturn nil\n}\n\nfunc shouldLogPanic(panicValue interface{}) bool {\n\treturn panicValue != nil\n}\n\nfunc reqGetBody(req *http.Request) func() (io.ReadCloser, error) {\n\treturn nil\n}\n\nfunc reqBodyIsNoBody(io.ReadCloser) bool { return false }\n\nfunc go18httpNoBody() io.ReadCloser { return nil } // for tests only\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/not_go19.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !go1.9\n\npackage http2\n\nimport (\n\t\"net/http\"\n)\n\nfunc configureServer19(s *http.Server, conf *Server) error {\n\t// not supported prior to go1.9\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/pipe.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"sync\"\n)\n\n// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like\n// io.Pipe except there are no PipeReader/PipeWriter halves, and the\n// underlying buffer is an interface. (io.Pipe is always unbuffered)\ntype pipe struct {\n\tmu       sync.Mutex\n\tc        sync.Cond     // c.L lazily initialized to &p.mu\n\tb        pipeBuffer    // nil when done reading\n\terr      error         // read error once empty. non-nil means closed.\n\tbreakErr error         // immediate read error (caller doesn't see rest of b)\n\tdonec    chan struct{} // closed on error\n\treadFn   func()        // optional code to run in Read before error\n}\n\ntype pipeBuffer interface {\n\tLen() int\n\tio.Writer\n\tio.Reader\n}\n\nfunc (p *pipe) Len() int {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tif p.b == nil {\n\t\treturn 0\n\t}\n\treturn p.b.Len()\n}\n\n// Read waits until data is available and copies bytes\n// from the buffer into p.\nfunc (p *pipe) Read(d []byte) (n int, err error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tif p.c.L == nil {\n\t\tp.c.L = &p.mu\n\t}\n\tfor {\n\t\tif p.breakErr != nil {\n\t\t\treturn 0, p.breakErr\n\t\t}\n\t\tif p.b != nil && p.b.Len() > 0 {\n\t\t\treturn p.b.Read(d)\n\t\t}\n\t\tif p.err != nil {\n\t\t\tif p.readFn != nil {\n\t\t\t\tp.readFn()     // e.g. copy trailers\n\t\t\t\tp.readFn = nil // not sticky like p.err\n\t\t\t}\n\t\t\tp.b = nil\n\t\t\treturn 0, p.err\n\t\t}\n\t\tp.c.Wait()\n\t}\n}\n\nvar errClosedPipeWrite = errors.New(\"write on closed buffer\")\n\n// Write copies bytes from p into the buffer and wakes a reader.\n// It is an error to write more data than the buffer can hold.\nfunc (p *pipe) Write(d []byte) (n int, err error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tif p.c.L == nil {\n\t\tp.c.L = &p.mu\n\t}\n\tdefer p.c.Signal()\n\tif p.err != nil {\n\t\treturn 0, errClosedPipeWrite\n\t}\n\tif p.breakErr != nil {\n\t\treturn len(d), nil // discard when there is no reader\n\t}\n\treturn p.b.Write(d)\n}\n\n// CloseWithError causes the next Read (waking up a current blocked\n// Read if needed) to return the provided err after all data has been\n// read.\n//\n// The error must be non-nil.\nfunc (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }\n\n// BreakWithError causes the next Read (waking up a current blocked\n// Read if needed) to return the provided err immediately, without\n// waiting for unread data.\nfunc (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }\n\n// closeWithErrorAndCode is like CloseWithError but also sets some code to run\n// in the caller's goroutine before returning the error.\nfunc (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }\n\nfunc (p *pipe) closeWithError(dst *error, err error, fn func()) {\n\tif err == nil {\n\t\tpanic(\"err must be non-nil\")\n\t}\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tif p.c.L == nil {\n\t\tp.c.L = &p.mu\n\t}\n\tdefer p.c.Signal()\n\tif *dst != nil {\n\t\t// Already been done.\n\t\treturn\n\t}\n\tp.readFn = fn\n\tif dst == &p.breakErr {\n\t\tp.b = nil\n\t}\n\t*dst = err\n\tp.closeDoneLocked()\n}\n\n// requires p.mu be held.\nfunc (p *pipe) closeDoneLocked() {\n\tif p.donec == nil {\n\t\treturn\n\t}\n\t// Close if unclosed. This isn't racy since we always\n\t// hold p.mu while closing.\n\tselect {\n\tcase <-p.donec:\n\tdefault:\n\t\tclose(p.donec)\n\t}\n}\n\n// Err returns the error (if any) first set by BreakWithError or CloseWithError.\nfunc (p *pipe) Err() error {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tif p.breakErr != nil {\n\t\treturn p.breakErr\n\t}\n\treturn p.err\n}\n\n// Done returns a channel which is closed if and when this pipe is closed\n// with CloseWithError.\nfunc (p *pipe) Done() <-chan struct{} {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tif p.donec == nil {\n\t\tp.donec = make(chan struct{})\n\t\tif p.err != nil || p.breakErr != nil {\n\t\t\t// Already hit an error.\n\t\t\tp.closeDoneLocked()\n\t\t}\n\t}\n\treturn p.donec\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/pipe_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"testing\"\n)\n\nfunc TestPipeClose(t *testing.T) {\n\tvar p pipe\n\tp.b = new(bytes.Buffer)\n\ta := errors.New(\"a\")\n\tb := errors.New(\"b\")\n\tp.CloseWithError(a)\n\tp.CloseWithError(b)\n\t_, err := p.Read(make([]byte, 1))\n\tif err != a {\n\t\tt.Errorf(\"err = %v want %v\", err, a)\n\t}\n}\n\nfunc TestPipeDoneChan(t *testing.T) {\n\tvar p pipe\n\tdone := p.Done()\n\tselect {\n\tcase <-done:\n\t\tt.Fatal(\"done too soon\")\n\tdefault:\n\t}\n\tp.CloseWithError(io.EOF)\n\tselect {\n\tcase <-done:\n\tdefault:\n\t\tt.Fatal(\"should be done\")\n\t}\n}\n\nfunc TestPipeDoneChan_ErrFirst(t *testing.T) {\n\tvar p pipe\n\tp.CloseWithError(io.EOF)\n\tdone := p.Done()\n\tselect {\n\tcase <-done:\n\tdefault:\n\t\tt.Fatal(\"should be done\")\n\t}\n}\n\nfunc TestPipeDoneChan_Break(t *testing.T) {\n\tvar p pipe\n\tdone := p.Done()\n\tselect {\n\tcase <-done:\n\t\tt.Fatal(\"done too soon\")\n\tdefault:\n\t}\n\tp.BreakWithError(io.EOF)\n\tselect {\n\tcase <-done:\n\tdefault:\n\t\tt.Fatal(\"should be done\")\n\t}\n}\n\nfunc TestPipeDoneChan_Break_ErrFirst(t *testing.T) {\n\tvar p pipe\n\tp.BreakWithError(io.EOF)\n\tdone := p.Done()\n\tselect {\n\tcase <-done:\n\tdefault:\n\t\tt.Fatal(\"should be done\")\n\t}\n}\n\nfunc TestPipeCloseWithError(t *testing.T) {\n\tp := &pipe{b: new(bytes.Buffer)}\n\tconst body = \"foo\"\n\tio.WriteString(p, body)\n\ta := errors.New(\"test error\")\n\tp.CloseWithError(a)\n\tall, err := ioutil.ReadAll(p)\n\tif string(all) != body {\n\t\tt.Errorf(\"read bytes = %q; want %q\", all, body)\n\t}\n\tif err != a {\n\t\tt.Logf(\"read error = %v, %v\", err, a)\n\t}\n\t// Read and Write should fail.\n\tif n, err := p.Write([]byte(\"abc\")); err != errClosedPipeWrite || n != 0 {\n\t\tt.Errorf(\"Write(abc) after close\\ngot %v, %v\\nwant 0, %v\", n, err, errClosedPipeWrite)\n\t}\n\tif n, err := p.Read(make([]byte, 1)); err == nil || n != 0 {\n\t\tt.Errorf(\"Read() after close\\ngot %v, nil\\nwant 0, %v\", n, errClosedPipeWrite)\n\t}\n}\n\nfunc TestPipeBreakWithError(t *testing.T) {\n\tp := &pipe{b: new(bytes.Buffer)}\n\tio.WriteString(p, \"foo\")\n\ta := errors.New(\"test err\")\n\tp.BreakWithError(a)\n\tall, err := ioutil.ReadAll(p)\n\tif string(all) != \"\" {\n\t\tt.Errorf(\"read bytes = %q; want empty string\", all)\n\t}\n\tif err != a {\n\t\tt.Logf(\"read error = %v, %v\", err, a)\n\t}\n\tif p.b != nil {\n\t\tt.Errorf(\"buffer should be nil after BreakWithError\")\n\t}\n\t// Write should succeed silently.\n\tif n, err := p.Write([]byte(\"abc\")); err != nil || n != 3 {\n\t\tt.Errorf(\"Write(abc) after break\\ngot %v, %v\\nwant 0, nil\", n, err)\n\t}\n\tif p.b != nil {\n\t\tt.Errorf(\"buffer should be nil after Write\")\n\t}\n\t// Read should fail.\n\tif n, err := p.Read(make([]byte, 1)); err == nil || n != 0 {\n\t\tt.Errorf(\"Read() after close\\ngot %v, nil\\nwant 0, not nil\", n)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/server.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// TODO: turn off the serve goroutine when idle, so\n// an idle conn only has the readFrames goroutine active. (which could\n// also be optimized probably to pin less memory in crypto/tls). This\n// would involve tracking when the serve goroutine is active (atomic\n// int32 read/CAS probably?) and starting it up when frames arrive,\n// and shutting it down when all handlers exit. the occasional PING\n// packets could use time.AfterFunc to call sc.wakeStartServeLoop()\n// (which is a no-op if already running) and then queue the PING write\n// as normal. The serve loop would then exit in most cases (if no\n// Handlers running) and not be woken up again until the PING packet\n// returns.\n\n// TODO (maybe): add a mechanism for Handlers to going into\n// half-closed-local mode (rw.(io.Closer) test?) but not exit their\n// handler, and continue to be able to read from the\n// Request.Body. This would be a somewhat semantic change from HTTP/1\n// (or at least what we expose in net/http), so I'd probably want to\n// add it there too. For now, this package says that returning from\n// the Handler ServeHTTP function means you're both done reading and\n// done writing, without a way to stop just one or the other.\n\npackage http2\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"math\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/textproto\"\n\t\"net/url\"\n\t\"os\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org/x/net/http2/hpack\"\n)\n\nconst (\n\tprefaceTimeout        = 10 * time.Second\n\tfirstSettingsTimeout  = 2 * time.Second // should be in-flight with preface anyway\n\thandlerChunkWriteSize = 4 << 10\n\tdefaultMaxStreams     = 250 // TODO: make this 100 as the GFE seems to?\n)\n\nvar (\n\terrClientDisconnected = errors.New(\"client disconnected\")\n\terrClosedBody         = errors.New(\"body closed by handler\")\n\terrHandlerComplete    = errors.New(\"http2: request body closed due to handler exiting\")\n\terrStreamClosed       = errors.New(\"http2: stream closed\")\n)\n\nvar responseWriterStatePool = sync.Pool{\n\tNew: func() interface{} {\n\t\trws := &responseWriterState{}\n\t\trws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)\n\t\treturn rws\n\t},\n}\n\n// Test hooks.\nvar (\n\ttestHookOnConn        func()\n\ttestHookGetServerConn func(*serverConn)\n\ttestHookOnPanicMu     *sync.Mutex // nil except in tests\n\ttestHookOnPanic       func(sc *serverConn, panicVal interface{}) (rePanic bool)\n)\n\n// Server is an HTTP/2 server.\ntype Server struct {\n\t// MaxHandlers limits the number of http.Handler ServeHTTP goroutines\n\t// which may run at a time over all connections.\n\t// Negative or zero no limit.\n\t// TODO: implement\n\tMaxHandlers int\n\n\t// MaxConcurrentStreams optionally specifies the number of\n\t// concurrent streams that each client may have open at a\n\t// time. This is unrelated to the number of http.Handler goroutines\n\t// which may be active globally, which is MaxHandlers.\n\t// If zero, MaxConcurrentStreams defaults to at least 100, per\n\t// the HTTP/2 spec's recommendations.\n\tMaxConcurrentStreams uint32\n\n\t// MaxReadFrameSize optionally specifies the largest frame\n\t// this server is willing to read. A valid value is between\n\t// 16k and 16M, inclusive. If zero or otherwise invalid, a\n\t// default value is used.\n\tMaxReadFrameSize uint32\n\n\t// PermitProhibitedCipherSuites, if true, permits the use of\n\t// cipher suites prohibited by the HTTP/2 spec.\n\tPermitProhibitedCipherSuites bool\n\n\t// IdleTimeout specifies how long until idle clients should be\n\t// closed with a GOAWAY frame. PING frames are not considered\n\t// activity for the purposes of IdleTimeout.\n\tIdleTimeout time.Duration\n\n\t// MaxUploadBufferPerConnection is the size of the initial flow\n\t// control window for each connections. The HTTP/2 spec does not\n\t// allow this to be smaller than 65535 or larger than 2^32-1.\n\t// If the value is outside this range, a default value will be\n\t// used instead.\n\tMaxUploadBufferPerConnection int32\n\n\t// MaxUploadBufferPerStream is the size of the initial flow control\n\t// window for each stream. The HTTP/2 spec does not allow this to\n\t// be larger than 2^32-1. If the value is zero or larger than the\n\t// maximum, a default value will be used instead.\n\tMaxUploadBufferPerStream int32\n\n\t// NewWriteScheduler constructs a write scheduler for a connection.\n\t// If nil, a default scheduler is chosen.\n\tNewWriteScheduler func() WriteScheduler\n\n\t// Internal state. This is a pointer (rather than embedded directly)\n\t// so that we don't embed a Mutex in this struct, which will make the\n\t// struct non-copyable, which might break some callers.\n\tstate *serverInternalState\n}\n\nfunc (s *Server) initialConnRecvWindowSize() int32 {\n\tif s.MaxUploadBufferPerConnection > initialWindowSize {\n\t\treturn s.MaxUploadBufferPerConnection\n\t}\n\treturn 1 << 20\n}\n\nfunc (s *Server) initialStreamRecvWindowSize() int32 {\n\tif s.MaxUploadBufferPerStream > 0 {\n\t\treturn s.MaxUploadBufferPerStream\n\t}\n\treturn 1 << 20\n}\n\nfunc (s *Server) maxReadFrameSize() uint32 {\n\tif v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {\n\t\treturn v\n\t}\n\treturn defaultMaxReadFrameSize\n}\n\nfunc (s *Server) maxConcurrentStreams() uint32 {\n\tif v := s.MaxConcurrentStreams; v > 0 {\n\t\treturn v\n\t}\n\treturn defaultMaxStreams\n}\n\ntype serverInternalState struct {\n\tmu          sync.Mutex\n\tactiveConns map[*serverConn]struct{}\n}\n\nfunc (s *serverInternalState) registerConn(sc *serverConn) {\n\tif s == nil {\n\t\treturn // if the Server was used without calling ConfigureServer\n\t}\n\ts.mu.Lock()\n\ts.activeConns[sc] = struct{}{}\n\ts.mu.Unlock()\n}\n\nfunc (s *serverInternalState) unregisterConn(sc *serverConn) {\n\tif s == nil {\n\t\treturn // if the Server was used without calling ConfigureServer\n\t}\n\ts.mu.Lock()\n\tdelete(s.activeConns, sc)\n\ts.mu.Unlock()\n}\n\nfunc (s *serverInternalState) startGracefulShutdown() {\n\tif s == nil {\n\t\treturn // if the Server was used without calling ConfigureServer\n\t}\n\ts.mu.Lock()\n\tfor sc := range s.activeConns {\n\t\tsc.startGracefulShutdown()\n\t}\n\ts.mu.Unlock()\n}\n\n// ConfigureServer adds HTTP/2 support to a net/http Server.\n//\n// The configuration conf may be nil.\n//\n// ConfigureServer must be called before s begins serving.\nfunc ConfigureServer(s *http.Server, conf *Server) error {\n\tif s == nil {\n\t\tpanic(\"nil *http.Server\")\n\t}\n\tif conf == nil {\n\t\tconf = new(Server)\n\t}\n\tconf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}\n\tif err := configureServer18(s, conf); err != nil {\n\t\treturn err\n\t}\n\tif err := configureServer19(s, conf); err != nil {\n\t\treturn err\n\t}\n\n\tif s.TLSConfig == nil {\n\t\ts.TLSConfig = new(tls.Config)\n\t} else if s.TLSConfig.CipherSuites != nil {\n\t\t// If they already provided a CipherSuite list, return\n\t\t// an error if it has a bad order or is missing\n\t\t// ECDHE_RSA_WITH_AES_128_GCM_SHA256.\n\t\tconst requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n\t\thaveRequired := false\n\t\tsawBad := false\n\t\tfor i, cs := range s.TLSConfig.CipherSuites {\n\t\t\tif cs == requiredCipher {\n\t\t\t\thaveRequired = true\n\t\t\t}\n\t\t\tif isBadCipher(cs) {\n\t\t\t\tsawBad = true\n\t\t\t} else if sawBad {\n\t\t\t\treturn fmt.Errorf(\"http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.\", i, cs)\n\t\t\t}\n\t\t}\n\t\tif !haveRequired {\n\t\t\treturn fmt.Errorf(\"http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\")\n\t\t}\n\t}\n\n\t// Note: not setting MinVersion to tls.VersionTLS12,\n\t// as we don't want to interfere with HTTP/1.1 traffic\n\t// on the user's server. We enforce TLS 1.2 later once\n\t// we accept a connection. Ideally this should be done\n\t// during next-proto selection, but using TLS <1.2 with\n\t// HTTP/2 is still the client's bug.\n\n\ts.TLSConfig.PreferServerCipherSuites = true\n\n\thaveNPN := false\n\tfor _, p := range s.TLSConfig.NextProtos {\n\t\tif p == NextProtoTLS {\n\t\t\thaveNPN = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !haveNPN {\n\t\ts.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)\n\t}\n\n\tif s.TLSNextProto == nil {\n\t\ts.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}\n\t}\n\tprotoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {\n\t\tif testHookOnConn != nil {\n\t\t\ttestHookOnConn()\n\t\t}\n\t\tconf.ServeConn(c, &ServeConnOpts{\n\t\t\tHandler:    h,\n\t\t\tBaseConfig: hs,\n\t\t})\n\t}\n\ts.TLSNextProto[NextProtoTLS] = protoHandler\n\treturn nil\n}\n\n// ServeConnOpts are options for the Server.ServeConn method.\ntype ServeConnOpts struct {\n\t// BaseConfig optionally sets the base configuration\n\t// for values. If nil, defaults are used.\n\tBaseConfig *http.Server\n\n\t// Handler specifies which handler to use for processing\n\t// requests. If nil, BaseConfig.Handler is used. If BaseConfig\n\t// or BaseConfig.Handler is nil, http.DefaultServeMux is used.\n\tHandler http.Handler\n}\n\nfunc (o *ServeConnOpts) baseConfig() *http.Server {\n\tif o != nil && o.BaseConfig != nil {\n\t\treturn o.BaseConfig\n\t}\n\treturn new(http.Server)\n}\n\nfunc (o *ServeConnOpts) handler() http.Handler {\n\tif o != nil {\n\t\tif o.Handler != nil {\n\t\t\treturn o.Handler\n\t\t}\n\t\tif o.BaseConfig != nil && o.BaseConfig.Handler != nil {\n\t\t\treturn o.BaseConfig.Handler\n\t\t}\n\t}\n\treturn http.DefaultServeMux\n}\n\n// ServeConn serves HTTP/2 requests on the provided connection and\n// blocks until the connection is no longer readable.\n//\n// ServeConn starts speaking HTTP/2 assuming that c has not had any\n// reads or writes. It writes its initial settings frame and expects\n// to be able to read the preface and settings frame from the\n// client. If c has a ConnectionState method like a *tls.Conn, the\n// ConnectionState is used to verify the TLS ciphersuite and to set\n// the Request.TLS field in Handlers.\n//\n// ServeConn does not support h2c by itself. Any h2c support must be\n// implemented in terms of providing a suitably-behaving net.Conn.\n//\n// The opts parameter is optional. If nil, default values are used.\nfunc (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {\n\tbaseCtx, cancel := serverConnBaseContext(c, opts)\n\tdefer cancel()\n\n\tsc := &serverConn{\n\t\tsrv:                         s,\n\t\ths:                          opts.baseConfig(),\n\t\tconn:                        c,\n\t\tbaseCtx:                     baseCtx,\n\t\tremoteAddrStr:               c.RemoteAddr().String(),\n\t\tbw:                          newBufferedWriter(c),\n\t\thandler:                     opts.handler(),\n\t\tstreams:                     make(map[uint32]*stream),\n\t\treadFrameCh:                 make(chan readFrameResult),\n\t\twantWriteFrameCh:            make(chan FrameWriteRequest, 8),\n\t\tserveMsgCh:                  make(chan interface{}, 8),\n\t\twroteFrameCh:                make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync\n\t\tbodyReadCh:                  make(chan bodyReadMsg),         // buffering doesn't matter either way\n\t\tdoneServing:                 make(chan struct{}),\n\t\tclientMaxStreams:            math.MaxUint32, // Section 6.5.2: \"Initially, there is no limit to this value\"\n\t\tadvMaxStreams:               s.maxConcurrentStreams(),\n\t\tinitialStreamSendWindowSize: initialWindowSize,\n\t\tmaxFrameSize:                initialMaxFrameSize,\n\t\theaderTableSize:             initialHeaderTableSize,\n\t\tserveG:                      newGoroutineLock(),\n\t\tpushEnabled:                 true,\n\t}\n\n\ts.state.registerConn(sc)\n\tdefer s.state.unregisterConn(sc)\n\n\t// The net/http package sets the write deadline from the\n\t// http.Server.WriteTimeout during the TLS handshake, but then\n\t// passes the connection off to us with the deadline already set.\n\t// Write deadlines are set per stream in serverConn.newStream.\n\t// Disarm the net.Conn write deadline here.\n\tif sc.hs.WriteTimeout != 0 {\n\t\tsc.conn.SetWriteDeadline(time.Time{})\n\t}\n\n\tif s.NewWriteScheduler != nil {\n\t\tsc.writeSched = s.NewWriteScheduler()\n\t} else {\n\t\tsc.writeSched = NewRandomWriteScheduler()\n\t}\n\n\t// These start at the RFC-specified defaults. If there is a higher\n\t// configured value for inflow, that will be updated when we send a\n\t// WINDOW_UPDATE shortly after sending SETTINGS.\n\tsc.flow.add(initialWindowSize)\n\tsc.inflow.add(initialWindowSize)\n\tsc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)\n\n\tfr := NewFramer(sc.bw, c)\n\tfr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)\n\tfr.MaxHeaderListSize = sc.maxHeaderListSize()\n\tfr.SetMaxReadFrameSize(s.maxReadFrameSize())\n\tsc.framer = fr\n\n\tif tc, ok := c.(connectionStater); ok {\n\t\tsc.tlsState = new(tls.ConnectionState)\n\t\t*sc.tlsState = tc.ConnectionState()\n\t\t// 9.2 Use of TLS Features\n\t\t// An implementation of HTTP/2 over TLS MUST use TLS\n\t\t// 1.2 or higher with the restrictions on feature set\n\t\t// and cipher suite described in this section. Due to\n\t\t// implementation limitations, it might not be\n\t\t// possible to fail TLS negotiation. An endpoint MUST\n\t\t// immediately terminate an HTTP/2 connection that\n\t\t// does not meet the TLS requirements described in\n\t\t// this section with a connection error (Section\n\t\t// 5.4.1) of type INADEQUATE_SECURITY.\n\t\tif sc.tlsState.Version < tls.VersionTLS12 {\n\t\t\tsc.rejectConn(ErrCodeInadequateSecurity, \"TLS version too low\")\n\t\t\treturn\n\t\t}\n\n\t\tif sc.tlsState.ServerName == \"\" {\n\t\t\t// Client must use SNI, but we don't enforce that anymore,\n\t\t\t// since it was causing problems when connecting to bare IP\n\t\t\t// addresses during development.\n\t\t\t//\n\t\t\t// TODO: optionally enforce? Or enforce at the time we receive\n\t\t\t// a new request, and verify the the ServerName matches the :authority?\n\t\t\t// But that precludes proxy situations, perhaps.\n\t\t\t//\n\t\t\t// So for now, do nothing here again.\n\t\t}\n\n\t\tif !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {\n\t\t\t// \"Endpoints MAY choose to generate a connection error\n\t\t\t// (Section 5.4.1) of type INADEQUATE_SECURITY if one of\n\t\t\t// the prohibited cipher suites are negotiated.\"\n\t\t\t//\n\t\t\t// We choose that. In my opinion, the spec is weak\n\t\t\t// here. It also says both parties must support at least\n\t\t\t// TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no\n\t\t\t// excuses here. If we really must, we could allow an\n\t\t\t// \"AllowInsecureWeakCiphers\" option on the server later.\n\t\t\t// Let's see how it plays out first.\n\t\t\tsc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf(\"Prohibited TLS 1.2 Cipher Suite: %x\", sc.tlsState.CipherSuite))\n\t\t\treturn\n\t\t}\n\t}\n\n\tif hook := testHookGetServerConn; hook != nil {\n\t\thook(sc)\n\t}\n\tsc.serve()\n}\n\nfunc (sc *serverConn) rejectConn(err ErrCode, debug string) {\n\tsc.vlogf(\"http2: server rejecting conn: %v, %s\", err, debug)\n\t// ignoring errors. hanging up anyway.\n\tsc.framer.WriteGoAway(0, err, []byte(debug))\n\tsc.bw.Flush()\n\tsc.conn.Close()\n}\n\ntype serverConn struct {\n\t// Immutable:\n\tsrv              *Server\n\ths               *http.Server\n\tconn             net.Conn\n\tbw               *bufferedWriter // writing to conn\n\thandler          http.Handler\n\tbaseCtx          contextContext\n\tframer           *Framer\n\tdoneServing      chan struct{}          // closed when serverConn.serve ends\n\treadFrameCh      chan readFrameResult   // written by serverConn.readFrames\n\twantWriteFrameCh chan FrameWriteRequest // from handlers -> serve\n\twroteFrameCh     chan frameWriteResult  // from writeFrameAsync -> serve, tickles more frame writes\n\tbodyReadCh       chan bodyReadMsg       // from handlers -> serve\n\tserveMsgCh       chan interface{}       // misc messages & code to send to / run on the serve loop\n\tflow             flow                   // conn-wide (not stream-specific) outbound flow control\n\tinflow           flow                   // conn-wide inbound flow control\n\ttlsState         *tls.ConnectionState   // shared by all handlers, like net/http\n\tremoteAddrStr    string\n\twriteSched       WriteScheduler\n\n\t// Everything following is owned by the serve loop; use serveG.check():\n\tserveG                      goroutineLock // used to verify funcs are on serve()\n\tpushEnabled                 bool\n\tsawFirstSettings            bool // got the initial SETTINGS frame after the preface\n\tneedToSendSettingsAck       bool\n\tunackedSettings             int    // how many SETTINGS have we sent without ACKs?\n\tclientMaxStreams            uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)\n\tadvMaxStreams               uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client\n\tcurClientStreams            uint32 // number of open streams initiated by the client\n\tcurPushedStreams            uint32 // number of open streams initiated by server push\n\tmaxClientStreamID           uint32 // max ever seen from client (odd), or 0 if there have been no client requests\n\tmaxPushPromiseID            uint32 // ID of the last push promise (even), or 0 if there have been no pushes\n\tstreams                     map[uint32]*stream\n\tinitialStreamSendWindowSize int32\n\tmaxFrameSize                int32\n\theaderTableSize             uint32\n\tpeerMaxHeaderListSize       uint32            // zero means unknown (default)\n\tcanonHeader                 map[string]string // http2-lower-case -> Go-Canonical-Case\n\twritingFrame                bool              // started writing a frame (on serve goroutine or separate)\n\twritingFrameAsync           bool              // started a frame on its own goroutine but haven't heard back on wroteFrameCh\n\tneedsFrameFlush             bool              // last frame write wasn't a flush\n\tinGoAway                    bool              // we've started to or sent GOAWAY\n\tinFrameScheduleLoop         bool              // whether we're in the scheduleFrameWrite loop\n\tneedToSendGoAway            bool              // we need to schedule a GOAWAY frame write\n\tgoAwayCode                  ErrCode\n\tshutdownTimer               *time.Timer // nil until used\n\tidleTimer                   *time.Timer // nil if unused\n\n\t// Owned by the writeFrameAsync goroutine:\n\theaderWriteBuf bytes.Buffer\n\thpackEncoder   *hpack.Encoder\n\n\t// Used by startGracefulShutdown.\n\tshutdownOnce sync.Once\n}\n\nfunc (sc *serverConn) maxHeaderListSize() uint32 {\n\tn := sc.hs.MaxHeaderBytes\n\tif n <= 0 {\n\t\tn = http.DefaultMaxHeaderBytes\n\t}\n\t// http2's count is in a slightly different unit and includes 32 bytes per pair.\n\t// So, take the net/http.Server value and pad it up a bit, assuming 10 headers.\n\tconst perFieldOverhead = 32 // per http2 spec\n\tconst typicalHeaders = 10   // conservative\n\treturn uint32(n + typicalHeaders*perFieldOverhead)\n}\n\nfunc (sc *serverConn) curOpenStreams() uint32 {\n\tsc.serveG.check()\n\treturn sc.curClientStreams + sc.curPushedStreams\n}\n\n// stream represents a stream. This is the minimal metadata needed by\n// the serve goroutine. Most of the actual stream state is owned by\n// the http.Handler's goroutine in the responseWriter. Because the\n// responseWriter's responseWriterState is recycled at the end of a\n// handler, this struct intentionally has no pointer to the\n// *responseWriter{,State} itself, as the Handler ending nils out the\n// responseWriter's state field.\ntype stream struct {\n\t// immutable:\n\tsc        *serverConn\n\tid        uint32\n\tbody      *pipe       // non-nil if expecting DATA frames\n\tcw        closeWaiter // closed wait stream transitions to closed state\n\tctx       contextContext\n\tcancelCtx func()\n\n\t// owned by serverConn's serve loop:\n\tbodyBytes        int64   // body bytes seen so far\n\tdeclBodyBytes    int64   // or -1 if undeclared\n\tflow             flow    // limits writing from Handler to client\n\tinflow           flow    // what the client is allowed to POST/etc to us\n\tparent           *stream // or nil\n\tnumTrailerValues int64\n\tweight           uint8\n\tstate            streamState\n\tresetQueued      bool        // RST_STREAM queued for write; set by sc.resetStream\n\tgotTrailerHeader bool        // HEADER frame for trailers was seen\n\twroteHeaders     bool        // whether we wrote headers (not status 100)\n\twriteDeadline    *time.Timer // nil if unused\n\n\ttrailer    http.Header // accumulated trailers\n\treqTrailer http.Header // handler's Request.Trailer\n}\n\nfunc (sc *serverConn) Framer() *Framer  { return sc.framer }\nfunc (sc *serverConn) CloseConn() error { return sc.conn.Close() }\nfunc (sc *serverConn) Flush() error     { return sc.bw.Flush() }\nfunc (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {\n\treturn sc.hpackEncoder, &sc.headerWriteBuf\n}\n\nfunc (sc *serverConn) state(streamID uint32) (streamState, *stream) {\n\tsc.serveG.check()\n\t// http://tools.ietf.org/html/rfc7540#section-5.1\n\tif st, ok := sc.streams[streamID]; ok {\n\t\treturn st.state, st\n\t}\n\t// \"The first use of a new stream identifier implicitly closes all\n\t// streams in the \"idle\" state that might have been initiated by\n\t// that peer with a lower-valued stream identifier. For example, if\n\t// a client sends a HEADERS frame on stream 7 without ever sending a\n\t// frame on stream 5, then stream 5 transitions to the \"closed\"\n\t// state when the first frame for stream 7 is sent or received.\"\n\tif streamID%2 == 1 {\n\t\tif streamID <= sc.maxClientStreamID {\n\t\t\treturn stateClosed, nil\n\t\t}\n\t} else {\n\t\tif streamID <= sc.maxPushPromiseID {\n\t\t\treturn stateClosed, nil\n\t\t}\n\t}\n\treturn stateIdle, nil\n}\n\n// setConnState calls the net/http ConnState hook for this connection, if configured.\n// Note that the net/http package does StateNew and StateClosed for us.\n// There is currently no plan for StateHijacked or hijacking HTTP/2 connections.\nfunc (sc *serverConn) setConnState(state http.ConnState) {\n\tif sc.hs.ConnState != nil {\n\t\tsc.hs.ConnState(sc.conn, state)\n\t}\n}\n\nfunc (sc *serverConn) vlogf(format string, args ...interface{}) {\n\tif VerboseLogs {\n\t\tsc.logf(format, args...)\n\t}\n}\n\nfunc (sc *serverConn) logf(format string, args ...interface{}) {\n\tif lg := sc.hs.ErrorLog; lg != nil {\n\t\tlg.Printf(format, args...)\n\t} else {\n\t\tlog.Printf(format, args...)\n\t}\n}\n\n// errno returns v's underlying uintptr, else 0.\n//\n// TODO: remove this helper function once http2 can use build\n// tags. See comment in isClosedConnError.\nfunc errno(v error) uintptr {\n\tif rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {\n\t\treturn uintptr(rv.Uint())\n\t}\n\treturn 0\n}\n\n// isClosedConnError reports whether err is an error from use of a closed\n// network connection.\nfunc isClosedConnError(err error) bool {\n\tif err == nil {\n\t\treturn false\n\t}\n\n\t// TODO: remove this string search and be more like the Windows\n\t// case below. That might involve modifying the standard library\n\t// to return better error types.\n\tstr := err.Error()\n\tif strings.Contains(str, \"use of closed network connection\") {\n\t\treturn true\n\t}\n\n\t// TODO(bradfitz): x/tools/cmd/bundle doesn't really support\n\t// build tags, so I can't make an http2_windows.go file with\n\t// Windows-specific stuff. Fix that and move this, once we\n\t// have a way to bundle this into std's net/http somehow.\n\tif runtime.GOOS == \"windows\" {\n\t\tif oe, ok := err.(*net.OpError); ok && oe.Op == \"read\" {\n\t\t\tif se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == \"wsarecv\" {\n\t\t\t\tconst WSAECONNABORTED = 10053\n\t\t\t\tconst WSAECONNRESET = 10054\n\t\t\t\tif n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (sc *serverConn) condlogf(err error, format string, args ...interface{}) {\n\tif err == nil {\n\t\treturn\n\t}\n\tif err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) {\n\t\t// Boring, expected errors.\n\t\tsc.vlogf(format, args...)\n\t} else {\n\t\tsc.logf(format, args...)\n\t}\n}\n\nfunc (sc *serverConn) canonicalHeader(v string) string {\n\tsc.serveG.check()\n\tcv, ok := commonCanonHeader[v]\n\tif ok {\n\t\treturn cv\n\t}\n\tcv, ok = sc.canonHeader[v]\n\tif ok {\n\t\treturn cv\n\t}\n\tif sc.canonHeader == nil {\n\t\tsc.canonHeader = make(map[string]string)\n\t}\n\tcv = http.CanonicalHeaderKey(v)\n\tsc.canonHeader[v] = cv\n\treturn cv\n}\n\ntype readFrameResult struct {\n\tf   Frame // valid until readMore is called\n\terr error\n\n\t// readMore should be called once the consumer no longer needs or\n\t// retains f. After readMore, f is invalid and more frames can be\n\t// read.\n\treadMore func()\n}\n\n// readFrames is the loop that reads incoming frames.\n// It takes care to only read one frame at a time, blocking until the\n// consumer is done with the frame.\n// It's run on its own goroutine.\nfunc (sc *serverConn) readFrames() {\n\tgate := make(gate)\n\tgateDone := gate.Done\n\tfor {\n\t\tf, err := sc.framer.ReadFrame()\n\t\tselect {\n\t\tcase sc.readFrameCh <- readFrameResult{f, err, gateDone}:\n\t\tcase <-sc.doneServing:\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-gate:\n\t\tcase <-sc.doneServing:\n\t\t\treturn\n\t\t}\n\t\tif terminalReadFrameError(err) {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.\ntype frameWriteResult struct {\n\twr  FrameWriteRequest // what was written (or attempted)\n\terr error             // result of the writeFrame call\n}\n\n// writeFrameAsync runs in its own goroutine and writes a single frame\n// and then reports when it's done.\n// At most one goroutine can be running writeFrameAsync at a time per\n// serverConn.\nfunc (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) {\n\terr := wr.write.writeFrame(sc)\n\tsc.wroteFrameCh <- frameWriteResult{wr, err}\n}\n\nfunc (sc *serverConn) closeAllStreamsOnConnClose() {\n\tsc.serveG.check()\n\tfor _, st := range sc.streams {\n\t\tsc.closeStream(st, errClientDisconnected)\n\t}\n}\n\nfunc (sc *serverConn) stopShutdownTimer() {\n\tsc.serveG.check()\n\tif t := sc.shutdownTimer; t != nil {\n\t\tt.Stop()\n\t}\n}\n\nfunc (sc *serverConn) notePanic() {\n\t// Note: this is for serverConn.serve panicking, not http.Handler code.\n\tif testHookOnPanicMu != nil {\n\t\ttestHookOnPanicMu.Lock()\n\t\tdefer testHookOnPanicMu.Unlock()\n\t}\n\tif testHookOnPanic != nil {\n\t\tif e := recover(); e != nil {\n\t\t\tif testHookOnPanic(sc, e) {\n\t\t\t\tpanic(e)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (sc *serverConn) serve() {\n\tsc.serveG.check()\n\tdefer sc.notePanic()\n\tdefer sc.conn.Close()\n\tdefer sc.closeAllStreamsOnConnClose()\n\tdefer sc.stopShutdownTimer()\n\tdefer close(sc.doneServing) // unblocks handlers trying to send\n\n\tif VerboseLogs {\n\t\tsc.vlogf(\"http2: server connection from %v on %p\", sc.conn.RemoteAddr(), sc.hs)\n\t}\n\n\tsc.writeFrame(FrameWriteRequest{\n\t\twrite: writeSettings{\n\t\t\t{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},\n\t\t\t{SettingMaxConcurrentStreams, sc.advMaxStreams},\n\t\t\t{SettingMaxHeaderListSize, sc.maxHeaderListSize()},\n\t\t\t{SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},\n\t\t},\n\t})\n\tsc.unackedSettings++\n\n\t// Each connection starts with intialWindowSize inflow tokens.\n\t// If a higher value is configured, we add more tokens.\n\tif diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {\n\t\tsc.sendWindowUpdate(nil, int(diff))\n\t}\n\n\tif err := sc.readPreface(); err != nil {\n\t\tsc.condlogf(err, \"http2: server: error reading preface from client %v: %v\", sc.conn.RemoteAddr(), err)\n\t\treturn\n\t}\n\t// Now that we've got the preface, get us out of the\n\t// \"StateNew\" state. We can't go directly to idle, though.\n\t// Active means we read some data and anticipate a request. We'll\n\t// do another Active when we get a HEADERS frame.\n\tsc.setConnState(http.StateActive)\n\tsc.setConnState(http.StateIdle)\n\n\tif sc.srv.IdleTimeout != 0 {\n\t\tsc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)\n\t\tdefer sc.idleTimer.Stop()\n\t}\n\n\tgo sc.readFrames() // closed by defer sc.conn.Close above\n\n\tsettingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)\n\tdefer settingsTimer.Stop()\n\n\tloopNum := 0\n\tfor {\n\t\tloopNum++\n\t\tselect {\n\t\tcase wr := <-sc.wantWriteFrameCh:\n\t\t\tif se, ok := wr.write.(StreamError); ok {\n\t\t\t\tsc.resetStream(se)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsc.writeFrame(wr)\n\t\tcase res := <-sc.wroteFrameCh:\n\t\t\tsc.wroteFrame(res)\n\t\tcase res := <-sc.readFrameCh:\n\t\t\tif !sc.processFrameFromReader(res) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tres.readMore()\n\t\t\tif settingsTimer != nil {\n\t\t\t\tsettingsTimer.Stop()\n\t\t\t\tsettingsTimer = nil\n\t\t\t}\n\t\tcase m := <-sc.bodyReadCh:\n\t\t\tsc.noteBodyRead(m.st, m.n)\n\t\tcase msg := <-sc.serveMsgCh:\n\t\t\tswitch v := msg.(type) {\n\t\t\tcase func(int):\n\t\t\t\tv(loopNum) // for testing\n\t\t\tcase *serverMessage:\n\t\t\t\tswitch v {\n\t\t\t\tcase settingsTimerMsg:\n\t\t\t\t\tsc.logf(\"timeout waiting for SETTINGS frames from %v\", sc.conn.RemoteAddr())\n\t\t\t\t\treturn\n\t\t\t\tcase idleTimerMsg:\n\t\t\t\t\tsc.vlogf(\"connection is idle\")\n\t\t\t\t\tsc.goAway(ErrCodeNo)\n\t\t\t\tcase shutdownTimerMsg:\n\t\t\t\t\tsc.vlogf(\"GOAWAY close timer fired; closing conn from %v\", sc.conn.RemoteAddr())\n\t\t\t\t\treturn\n\t\t\t\tcase gracefulShutdownMsg:\n\t\t\t\t\tsc.startGracefulShutdownInternal()\n\t\t\t\tdefault:\n\t\t\t\t\tpanic(\"unknown timer\")\n\t\t\t\t}\n\t\t\tcase *startPushRequest:\n\t\t\t\tsc.startPush(v)\n\t\t\tdefault:\n\t\t\t\tpanic(fmt.Sprintf(\"unexpected type %T\", v))\n\t\t\t}\n\t\t}\n\n\t\t// Start the shutdown timer after sending a GOAWAY. When sending GOAWAY\n\t\t// with no error code (graceful shutdown), don't start the timer until\n\t\t// all open streams have been completed.\n\t\tsentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame\n\t\tgracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0\n\t\tif sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) {\n\t\t\tsc.shutDownIn(goAwayTimeout)\n\t\t}\n\t}\n}\n\nfunc (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {\n\tselect {\n\tcase <-sc.doneServing:\n\tcase <-sharedCh:\n\t\tclose(privateCh)\n\t}\n}\n\ntype serverMessage int\n\n// Message values sent to serveMsgCh.\nvar (\n\tsettingsTimerMsg    = new(serverMessage)\n\tidleTimerMsg        = new(serverMessage)\n\tshutdownTimerMsg    = new(serverMessage)\n\tgracefulShutdownMsg = new(serverMessage)\n)\n\nfunc (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }\nfunc (sc *serverConn) onIdleTimer()     { sc.sendServeMsg(idleTimerMsg) }\nfunc (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }\n\nfunc (sc *serverConn) sendServeMsg(msg interface{}) {\n\tsc.serveG.checkNotOn() // NOT\n\tselect {\n\tcase sc.serveMsgCh <- msg:\n\tcase <-sc.doneServing:\n\t}\n}\n\n// readPreface reads the ClientPreface greeting from the peer\n// or returns an error on timeout or an invalid greeting.\nfunc (sc *serverConn) readPreface() error {\n\terrc := make(chan error, 1)\n\tgo func() {\n\t\t// Read the client preface\n\t\tbuf := make([]byte, len(ClientPreface))\n\t\tif _, err := io.ReadFull(sc.conn, buf); err != nil {\n\t\t\terrc <- err\n\t\t} else if !bytes.Equal(buf, clientPreface) {\n\t\t\terrc <- fmt.Errorf(\"bogus greeting %q\", buf)\n\t\t} else {\n\t\t\terrc <- nil\n\t\t}\n\t}()\n\ttimer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?\n\tdefer timer.Stop()\n\tselect {\n\tcase <-timer.C:\n\t\treturn errors.New(\"timeout waiting for client preface\")\n\tcase err := <-errc:\n\t\tif err == nil {\n\t\t\tif VerboseLogs {\n\t\t\t\tsc.vlogf(\"http2: server: client %v said hello\", sc.conn.RemoteAddr())\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n}\n\nvar errChanPool = sync.Pool{\n\tNew: func() interface{} { return make(chan error, 1) },\n}\n\nvar writeDataPool = sync.Pool{\n\tNew: func() interface{} { return new(writeData) },\n}\n\n// writeDataFromHandler writes DATA response frames from a handler on\n// the given stream.\nfunc (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {\n\tch := errChanPool.Get().(chan error)\n\twriteArg := writeDataPool.Get().(*writeData)\n\t*writeArg = writeData{stream.id, data, endStream}\n\terr := sc.writeFrameFromHandler(FrameWriteRequest{\n\t\twrite:  writeArg,\n\t\tstream: stream,\n\t\tdone:   ch,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar frameWriteDone bool // the frame write is done (successfully or not)\n\tselect {\n\tcase err = <-ch:\n\t\tframeWriteDone = true\n\tcase <-sc.doneServing:\n\t\treturn errClientDisconnected\n\tcase <-stream.cw:\n\t\t// If both ch and stream.cw were ready (as might\n\t\t// happen on the final Write after an http.Handler\n\t\t// ends), prefer the write result. Otherwise this\n\t\t// might just be us successfully closing the stream.\n\t\t// The writeFrameAsync and serve goroutines guarantee\n\t\t// that the ch send will happen before the stream.cw\n\t\t// close.\n\t\tselect {\n\t\tcase err = <-ch:\n\t\t\tframeWriteDone = true\n\t\tdefault:\n\t\t\treturn errStreamClosed\n\t\t}\n\t}\n\terrChanPool.Put(ch)\n\tif frameWriteDone {\n\t\twriteDataPool.Put(writeArg)\n\t}\n\treturn err\n}\n\n// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts\n// if the connection has gone away.\n//\n// This must not be run from the serve goroutine itself, else it might\n// deadlock writing to sc.wantWriteFrameCh (which is only mildly\n// buffered and is read by serve itself). If you're on the serve\n// goroutine, call writeFrame instead.\nfunc (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error {\n\tsc.serveG.checkNotOn() // NOT\n\tselect {\n\tcase sc.wantWriteFrameCh <- wr:\n\t\treturn nil\n\tcase <-sc.doneServing:\n\t\t// Serve loop is gone.\n\t\t// Client has closed their connection to the server.\n\t\treturn errClientDisconnected\n\t}\n}\n\n// writeFrame schedules a frame to write and sends it if there's nothing\n// already being written.\n//\n// There is no pushback here (the serve goroutine never blocks). It's\n// the http.Handlers that block, waiting for their previous frames to\n// make it onto the wire\n//\n// If you're not on the serve goroutine, use writeFrameFromHandler instead.\nfunc (sc *serverConn) writeFrame(wr FrameWriteRequest) {\n\tsc.serveG.check()\n\n\t// If true, wr will not be written and wr.done will not be signaled.\n\tvar ignoreWrite bool\n\n\t// We are not allowed to write frames on closed streams. RFC 7540 Section\n\t// 5.1.1 says: \"An endpoint MUST NOT send frames other than PRIORITY on\n\t// a closed stream.\" Our server never sends PRIORITY, so that exception\n\t// does not apply.\n\t//\n\t// The serverConn might close an open stream while the stream's handler\n\t// is still running. For example, the server might close a stream when it\n\t// receives bad data from the client. If this happens, the handler might\n\t// attempt to write a frame after the stream has been closed (since the\n\t// handler hasn't yet been notified of the close). In this case, we simply\n\t// ignore the frame. The handler will notice that the stream is closed when\n\t// it waits for the frame to be written.\n\t//\n\t// As an exception to this rule, we allow sending RST_STREAM after close.\n\t// This allows us to immediately reject new streams without tracking any\n\t// state for those streams (except for the queued RST_STREAM frame). This\n\t// may result in duplicate RST_STREAMs in some cases, but the client should\n\t// ignore those.\n\tif wr.StreamID() != 0 {\n\t\t_, isReset := wr.write.(StreamError)\n\t\tif state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset {\n\t\t\tignoreWrite = true\n\t\t}\n\t}\n\n\t// Don't send a 100-continue response if we've already sent headers.\n\t// See golang.org/issue/14030.\n\tswitch wr.write.(type) {\n\tcase *writeResHeaders:\n\t\twr.stream.wroteHeaders = true\n\tcase write100ContinueHeadersFrame:\n\t\tif wr.stream.wroteHeaders {\n\t\t\t// We do not need to notify wr.done because this frame is\n\t\t\t// never written with wr.done != nil.\n\t\t\tif wr.done != nil {\n\t\t\t\tpanic(\"wr.done != nil for write100ContinueHeadersFrame\")\n\t\t\t}\n\t\t\tignoreWrite = true\n\t\t}\n\t}\n\n\tif !ignoreWrite {\n\t\tsc.writeSched.Push(wr)\n\t}\n\tsc.scheduleFrameWrite()\n}\n\n// startFrameWrite starts a goroutine to write wr (in a separate\n// goroutine since that might block on the network), and updates the\n// serve goroutine's state about the world, updated from info in wr.\nfunc (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {\n\tsc.serveG.check()\n\tif sc.writingFrame {\n\t\tpanic(\"internal error: can only be writing one frame at a time\")\n\t}\n\n\tst := wr.stream\n\tif st != nil {\n\t\tswitch st.state {\n\t\tcase stateHalfClosedLocal:\n\t\t\tswitch wr.write.(type) {\n\t\t\tcase StreamError, handlerPanicRST, writeWindowUpdate:\n\t\t\t\t// RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE\n\t\t\t\t// in this state. (We never send PRIORITY from the server, so that is not checked.)\n\t\t\tdefault:\n\t\t\t\tpanic(fmt.Sprintf(\"internal error: attempt to send frame on a half-closed-local stream: %v\", wr))\n\t\t\t}\n\t\tcase stateClosed:\n\t\t\tpanic(fmt.Sprintf(\"internal error: attempt to send frame on a closed stream: %v\", wr))\n\t\t}\n\t}\n\tif wpp, ok := wr.write.(*writePushPromise); ok {\n\t\tvar err error\n\t\twpp.promisedID, err = wpp.allocatePromisedID()\n\t\tif err != nil {\n\t\t\tsc.writingFrameAsync = false\n\t\t\twr.replyToWriter(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tsc.writingFrame = true\n\tsc.needsFrameFlush = true\n\tif wr.write.staysWithinBuffer(sc.bw.Available()) {\n\t\tsc.writingFrameAsync = false\n\t\terr := wr.write.writeFrame(sc)\n\t\tsc.wroteFrame(frameWriteResult{wr, err})\n\t} else {\n\t\tsc.writingFrameAsync = true\n\t\tgo sc.writeFrameAsync(wr)\n\t}\n}\n\n// errHandlerPanicked is the error given to any callers blocked in a read from\n// Request.Body when the main goroutine panics. Since most handlers read in the\n// the main ServeHTTP goroutine, this will show up rarely.\nvar errHandlerPanicked = errors.New(\"http2: handler panicked\")\n\n// wroteFrame is called on the serve goroutine with the result of\n// whatever happened on writeFrameAsync.\nfunc (sc *serverConn) wroteFrame(res frameWriteResult) {\n\tsc.serveG.check()\n\tif !sc.writingFrame {\n\t\tpanic(\"internal error: expected to be already writing a frame\")\n\t}\n\tsc.writingFrame = false\n\tsc.writingFrameAsync = false\n\n\twr := res.wr\n\n\tif writeEndsStream(wr.write) {\n\t\tst := wr.stream\n\t\tif st == nil {\n\t\t\tpanic(\"internal error: expecting non-nil stream\")\n\t\t}\n\t\tswitch st.state {\n\t\tcase stateOpen:\n\t\t\t// Here we would go to stateHalfClosedLocal in\n\t\t\t// theory, but since our handler is done and\n\t\t\t// the net/http package provides no mechanism\n\t\t\t// for closing a ResponseWriter while still\n\t\t\t// reading data (see possible TODO at top of\n\t\t\t// this file), we go into closed state here\n\t\t\t// anyway, after telling the peer we're\n\t\t\t// hanging up on them. We'll transition to\n\t\t\t// stateClosed after the RST_STREAM frame is\n\t\t\t// written.\n\t\t\tst.state = stateHalfClosedLocal\n\t\t\t// Section 8.1: a server MAY request that the client abort\n\t\t\t// transmission of a request without error by sending a\n\t\t\t// RST_STREAM with an error code of NO_ERROR after sending\n\t\t\t// a complete response.\n\t\t\tsc.resetStream(streamError(st.id, ErrCodeNo))\n\t\tcase stateHalfClosedRemote:\n\t\t\tsc.closeStream(st, errHandlerComplete)\n\t\t}\n\t} else {\n\t\tswitch v := wr.write.(type) {\n\t\tcase StreamError:\n\t\t\t// st may be unknown if the RST_STREAM was generated to reject bad input.\n\t\t\tif st, ok := sc.streams[v.StreamID]; ok {\n\t\t\t\tsc.closeStream(st, v)\n\t\t\t}\n\t\tcase handlerPanicRST:\n\t\t\tsc.closeStream(wr.stream, errHandlerPanicked)\n\t\t}\n\t}\n\n\t// Reply (if requested) to unblock the ServeHTTP goroutine.\n\twr.replyToWriter(res.err)\n\n\tsc.scheduleFrameWrite()\n}\n\n// scheduleFrameWrite tickles the frame writing scheduler.\n//\n// If a frame is already being written, nothing happens. This will be called again\n// when the frame is done being written.\n//\n// If a frame isn't being written we need to send one, the best frame\n// to send is selected, preferring first things that aren't\n// stream-specific (e.g. ACKing settings), and then finding the\n// highest priority stream.\n//\n// If a frame isn't being written and there's nothing else to send, we\n// flush the write buffer.\nfunc (sc *serverConn) scheduleFrameWrite() {\n\tsc.serveG.check()\n\tif sc.writingFrame || sc.inFrameScheduleLoop {\n\t\treturn\n\t}\n\tsc.inFrameScheduleLoop = true\n\tfor !sc.writingFrameAsync {\n\t\tif sc.needToSendGoAway {\n\t\t\tsc.needToSendGoAway = false\n\t\t\tsc.startFrameWrite(FrameWriteRequest{\n\t\t\t\twrite: &writeGoAway{\n\t\t\t\t\tmaxStreamID: sc.maxClientStreamID,\n\t\t\t\t\tcode:        sc.goAwayCode,\n\t\t\t\t},\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\t\tif sc.needToSendSettingsAck {\n\t\t\tsc.needToSendSettingsAck = false\n\t\t\tsc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}})\n\t\t\tcontinue\n\t\t}\n\t\tif !sc.inGoAway || sc.goAwayCode == ErrCodeNo {\n\t\t\tif wr, ok := sc.writeSched.Pop(); ok {\n\t\t\t\tsc.startFrameWrite(wr)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif sc.needsFrameFlush {\n\t\t\tsc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}})\n\t\t\tsc.needsFrameFlush = false // after startFrameWrite, since it sets this true\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\tsc.inFrameScheduleLoop = false\n}\n\n// startGracefulShutdown gracefully shuts down a connection. This\n// sends GOAWAY with ErrCodeNo to tell the client we're gracefully\n// shutting down. The connection isn't closed until all current\n// streams are done.\n//\n// startGracefulShutdown returns immediately; it does not wait until\n// the connection has shut down.\nfunc (sc *serverConn) startGracefulShutdown() {\n\tsc.serveG.checkNotOn() // NOT\n\tsc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) })\n}\n\n// After sending GOAWAY, the connection will close after goAwayTimeout.\n// If we close the connection immediately after sending GOAWAY, there may\n// be unsent data in our kernel receive buffer, which will cause the kernel\n// to send a TCP RST on close() instead of a FIN. This RST will abort the\n// connection immediately, whether or not the client had received the GOAWAY.\n//\n// Ideally we should delay for at least 1 RTT + epsilon so the client has\n// a chance to read the GOAWAY and stop sending messages. Measuring RTT\n// is hard, so we approximate with 1 second. See golang.org/issue/18701.\n//\n// This is a var so it can be shorter in tests, where all requests uses the\n// loopback interface making the expected RTT very small.\n//\n// TODO: configurable?\nvar goAwayTimeout = 1 * time.Second\n\nfunc (sc *serverConn) startGracefulShutdownInternal() {\n\tsc.goAway(ErrCodeNo)\n}\n\nfunc (sc *serverConn) goAway(code ErrCode) {\n\tsc.serveG.check()\n\tif sc.inGoAway {\n\t\treturn\n\t}\n\tsc.inGoAway = true\n\tsc.needToSendGoAway = true\n\tsc.goAwayCode = code\n\tsc.scheduleFrameWrite()\n}\n\nfunc (sc *serverConn) shutDownIn(d time.Duration) {\n\tsc.serveG.check()\n\tsc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)\n}\n\nfunc (sc *serverConn) resetStream(se StreamError) {\n\tsc.serveG.check()\n\tsc.writeFrame(FrameWriteRequest{write: se})\n\tif st, ok := sc.streams[se.StreamID]; ok {\n\t\tst.resetQueued = true\n\t}\n}\n\n// processFrameFromReader processes the serve loop's read from readFrameCh from the\n// frame-reading goroutine.\n// processFrameFromReader returns whether the connection should be kept open.\nfunc (sc *serverConn) processFrameFromReader(res readFrameResult) bool {\n\tsc.serveG.check()\n\terr := res.err\n\tif err != nil {\n\t\tif err == ErrFrameTooLarge {\n\t\t\tsc.goAway(ErrCodeFrameSize)\n\t\t\treturn true // goAway will close the loop\n\t\t}\n\t\tclientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err)\n\t\tif clientGone {\n\t\t\t// TODO: could we also get into this state if\n\t\t\t// the peer does a half close\n\t\t\t// (e.g. CloseWrite) because they're done\n\t\t\t// sending frames but they're still wanting\n\t\t\t// our open replies?  Investigate.\n\t\t\t// TODO: add CloseWrite to crypto/tls.Conn first\n\t\t\t// so we have a way to test this? I suppose\n\t\t\t// just for testing we could have a non-TLS mode.\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\tf := res.f\n\t\tif VerboseLogs {\n\t\t\tsc.vlogf(\"http2: server read frame %v\", summarizeFrame(f))\n\t\t}\n\t\terr = sc.processFrame(f)\n\t\tif err == nil {\n\t\t\treturn true\n\t\t}\n\t}\n\n\tswitch ev := err.(type) {\n\tcase StreamError:\n\t\tsc.resetStream(ev)\n\t\treturn true\n\tcase goAwayFlowError:\n\t\tsc.goAway(ErrCodeFlowControl)\n\t\treturn true\n\tcase ConnectionError:\n\t\tsc.logf(\"http2: server connection error from %v: %v\", sc.conn.RemoteAddr(), ev)\n\t\tsc.goAway(ErrCode(ev))\n\t\treturn true // goAway will handle shutdown\n\tdefault:\n\t\tif res.err != nil {\n\t\t\tsc.vlogf(\"http2: server closing client connection; error reading frame from client %s: %v\", sc.conn.RemoteAddr(), err)\n\t\t} else {\n\t\t\tsc.logf(\"http2: server closing client connection: %v\", err)\n\t\t}\n\t\treturn false\n\t}\n}\n\nfunc (sc *serverConn) processFrame(f Frame) error {\n\tsc.serveG.check()\n\n\t// First frame received must be SETTINGS.\n\tif !sc.sawFirstSettings {\n\t\tif _, ok := f.(*SettingsFrame); !ok {\n\t\t\treturn ConnectionError(ErrCodeProtocol)\n\t\t}\n\t\tsc.sawFirstSettings = true\n\t}\n\n\tswitch f := f.(type) {\n\tcase *SettingsFrame:\n\t\treturn sc.processSettings(f)\n\tcase *MetaHeadersFrame:\n\t\treturn sc.processHeaders(f)\n\tcase *WindowUpdateFrame:\n\t\treturn sc.processWindowUpdate(f)\n\tcase *PingFrame:\n\t\treturn sc.processPing(f)\n\tcase *DataFrame:\n\t\treturn sc.processData(f)\n\tcase *RSTStreamFrame:\n\t\treturn sc.processResetStream(f)\n\tcase *PriorityFrame:\n\t\treturn sc.processPriority(f)\n\tcase *GoAwayFrame:\n\t\treturn sc.processGoAway(f)\n\tcase *PushPromiseFrame:\n\t\t// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE\n\t\t// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.\n\t\treturn ConnectionError(ErrCodeProtocol)\n\tdefault:\n\t\tsc.vlogf(\"http2: server ignoring frame: %v\", f.Header())\n\t\treturn nil\n\t}\n}\n\nfunc (sc *serverConn) processPing(f *PingFrame) error {\n\tsc.serveG.check()\n\tif f.IsAck() {\n\t\t// 6.7 PING: \" An endpoint MUST NOT respond to PING frames\n\t\t// containing this flag.\"\n\t\treturn nil\n\t}\n\tif f.StreamID != 0 {\n\t\t// \"PING frames are not associated with any individual\n\t\t// stream. If a PING frame is received with a stream\n\t\t// identifier field value other than 0x0, the recipient MUST\n\t\t// respond with a connection error (Section 5.4.1) of type\n\t\t// PROTOCOL_ERROR.\"\n\t\treturn ConnectionError(ErrCodeProtocol)\n\t}\n\tif sc.inGoAway && sc.goAwayCode != ErrCodeNo {\n\t\treturn nil\n\t}\n\tsc.writeFrame(FrameWriteRequest{write: writePingAck{f}})\n\treturn nil\n}\n\nfunc (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {\n\tsc.serveG.check()\n\tswitch {\n\tcase f.StreamID != 0: // stream-level flow control\n\t\tstate, st := sc.state(f.StreamID)\n\t\tif state == stateIdle {\n\t\t\t// Section 5.1: \"Receiving any frame other than HEADERS\n\t\t\t// or PRIORITY on a stream in this state MUST be\n\t\t\t// treated as a connection error (Section 5.4.1) of\n\t\t\t// type PROTOCOL_ERROR.\"\n\t\t\treturn ConnectionError(ErrCodeProtocol)\n\t\t}\n\t\tif st == nil {\n\t\t\t// \"WINDOW_UPDATE can be sent by a peer that has sent a\n\t\t\t// frame bearing the END_STREAM flag. This means that a\n\t\t\t// receiver could receive a WINDOW_UPDATE frame on a \"half\n\t\t\t// closed (remote)\" or \"closed\" stream. A receiver MUST\n\t\t\t// NOT treat this as an error, see Section 5.1.\"\n\t\t\treturn nil\n\t\t}\n\t\tif !st.flow.add(int32(f.Increment)) {\n\t\t\treturn streamError(f.StreamID, ErrCodeFlowControl)\n\t\t}\n\tdefault: // connection-level flow control\n\t\tif !sc.flow.add(int32(f.Increment)) {\n\t\t\treturn goAwayFlowError{}\n\t\t}\n\t}\n\tsc.scheduleFrameWrite()\n\treturn nil\n}\n\nfunc (sc *serverConn) processResetStream(f *RSTStreamFrame) error {\n\tsc.serveG.check()\n\n\tstate, st := sc.state(f.StreamID)\n\tif state == stateIdle {\n\t\t// 6.4 \"RST_STREAM frames MUST NOT be sent for a\n\t\t// stream in the \"idle\" state. If a RST_STREAM frame\n\t\t// identifying an idle stream is received, the\n\t\t// recipient MUST treat this as a connection error\n\t\t// (Section 5.4.1) of type PROTOCOL_ERROR.\n\t\treturn ConnectionError(ErrCodeProtocol)\n\t}\n\tif st != nil {\n\t\tst.cancelCtx()\n\t\tsc.closeStream(st, streamError(f.StreamID, f.ErrCode))\n\t}\n\treturn nil\n}\n\nfunc (sc *serverConn) closeStream(st *stream, err error) {\n\tsc.serveG.check()\n\tif st.state == stateIdle || st.state == stateClosed {\n\t\tpanic(fmt.Sprintf(\"invariant; can't close stream in state %v\", st.state))\n\t}\n\tst.state = stateClosed\n\tif st.writeDeadline != nil {\n\t\tst.writeDeadline.Stop()\n\t}\n\tif st.isPushed() {\n\t\tsc.curPushedStreams--\n\t} else {\n\t\tsc.curClientStreams--\n\t}\n\tdelete(sc.streams, st.id)\n\tif len(sc.streams) == 0 {\n\t\tsc.setConnState(http.StateIdle)\n\t\tif sc.srv.IdleTimeout != 0 {\n\t\t\tsc.idleTimer.Reset(sc.srv.IdleTimeout)\n\t\t}\n\t\tif h1ServerKeepAlivesDisabled(sc.hs) {\n\t\t\tsc.startGracefulShutdownInternal()\n\t\t}\n\t}\n\tif p := st.body; p != nil {\n\t\t// Return any buffered unread bytes worth of conn-level flow control.\n\t\t// See golang.org/issue/16481\n\t\tsc.sendWindowUpdate(nil, p.Len())\n\n\t\tp.CloseWithError(err)\n\t}\n\tst.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc\n\tsc.writeSched.CloseStream(st.id)\n}\n\nfunc (sc *serverConn) processSettings(f *SettingsFrame) error {\n\tsc.serveG.check()\n\tif f.IsAck() {\n\t\tsc.unackedSettings--\n\t\tif sc.unackedSettings < 0 {\n\t\t\t// Why is the peer ACKing settings we never sent?\n\t\t\t// The spec doesn't mention this case, but\n\t\t\t// hang up on them anyway.\n\t\t\treturn ConnectionError(ErrCodeProtocol)\n\t\t}\n\t\treturn nil\n\t}\n\tif err := f.ForeachSetting(sc.processSetting); err != nil {\n\t\treturn err\n\t}\n\tsc.needToSendSettingsAck = true\n\tsc.scheduleFrameWrite()\n\treturn nil\n}\n\nfunc (sc *serverConn) processSetting(s Setting) error {\n\tsc.serveG.check()\n\tif err := s.Valid(); err != nil {\n\t\treturn err\n\t}\n\tif VerboseLogs {\n\t\tsc.vlogf(\"http2: server processing setting %v\", s)\n\t}\n\tswitch s.ID {\n\tcase SettingHeaderTableSize:\n\t\tsc.headerTableSize = s.Val\n\t\tsc.hpackEncoder.SetMaxDynamicTableSize(s.Val)\n\tcase SettingEnablePush:\n\t\tsc.pushEnabled = s.Val != 0\n\tcase SettingMaxConcurrentStreams:\n\t\tsc.clientMaxStreams = s.Val\n\tcase SettingInitialWindowSize:\n\t\treturn sc.processSettingInitialWindowSize(s.Val)\n\tcase SettingMaxFrameSize:\n\t\tsc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31\n\tcase SettingMaxHeaderListSize:\n\t\tsc.peerMaxHeaderListSize = s.Val\n\tdefault:\n\t\t// Unknown setting: \"An endpoint that receives a SETTINGS\n\t\t// frame with any unknown or unsupported identifier MUST\n\t\t// ignore that setting.\"\n\t\tif VerboseLogs {\n\t\t\tsc.vlogf(\"http2: server ignoring unknown setting %v\", s)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (sc *serverConn) processSettingInitialWindowSize(val uint32) error {\n\tsc.serveG.check()\n\t// Note: val already validated to be within range by\n\t// processSetting's Valid call.\n\n\t// \"A SETTINGS frame can alter the initial flow control window\n\t// size for all current streams. When the value of\n\t// SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST\n\t// adjust the size of all stream flow control windows that it\n\t// maintains by the difference between the new value and the\n\t// old value.\"\n\told := sc.initialStreamSendWindowSize\n\tsc.initialStreamSendWindowSize = int32(val)\n\tgrowth := int32(val) - old // may be negative\n\tfor _, st := range sc.streams {\n\t\tif !st.flow.add(growth) {\n\t\t\t// 6.9.2 Initial Flow Control Window Size\n\t\t\t// \"An endpoint MUST treat a change to\n\t\t\t// SETTINGS_INITIAL_WINDOW_SIZE that causes any flow\n\t\t\t// control window to exceed the maximum size as a\n\t\t\t// connection error (Section 5.4.1) of type\n\t\t\t// FLOW_CONTROL_ERROR.\"\n\t\t\treturn ConnectionError(ErrCodeFlowControl)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (sc *serverConn) processData(f *DataFrame) error {\n\tsc.serveG.check()\n\tif sc.inGoAway && sc.goAwayCode != ErrCodeNo {\n\t\treturn nil\n\t}\n\tdata := f.Data()\n\n\t// \"If a DATA frame is received whose stream is not in \"open\"\n\t// or \"half closed (local)\" state, the recipient MUST respond\n\t// with a stream error (Section 5.4.2) of type STREAM_CLOSED.\"\n\tid := f.Header().StreamID\n\tstate, st := sc.state(id)\n\tif id == 0 || state == stateIdle {\n\t\t// Section 5.1: \"Receiving any frame other than HEADERS\n\t\t// or PRIORITY on a stream in this state MUST be\n\t\t// treated as a connection error (Section 5.4.1) of\n\t\t// type PROTOCOL_ERROR.\"\n\t\treturn ConnectionError(ErrCodeProtocol)\n\t}\n\tif st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued {\n\t\t// This includes sending a RST_STREAM if the stream is\n\t\t// in stateHalfClosedLocal (which currently means that\n\t\t// the http.Handler returned, so it's done reading &\n\t\t// done writing). Try to stop the client from sending\n\t\t// more DATA.\n\n\t\t// But still enforce their connection-level flow control,\n\t\t// and return any flow control bytes since we're not going\n\t\t// to consume them.\n\t\tif sc.inflow.available() < int32(f.Length) {\n\t\t\treturn streamError(id, ErrCodeFlowControl)\n\t\t}\n\t\t// Deduct the flow control from inflow, since we're\n\t\t// going to immediately add it back in\n\t\t// sendWindowUpdate, which also schedules sending the\n\t\t// frames.\n\t\tsc.inflow.take(int32(f.Length))\n\t\tsc.sendWindowUpdate(nil, int(f.Length)) // conn-level\n\n\t\tif st != nil && st.resetQueued {\n\t\t\t// Already have a stream error in flight. Don't send another.\n\t\t\treturn nil\n\t\t}\n\t\treturn streamError(id, ErrCodeStreamClosed)\n\t}\n\tif st.body == nil {\n\t\tpanic(\"internal error: should have a body in this state\")\n\t}\n\n\t// Sender sending more than they'd declared?\n\tif st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {\n\t\tst.body.CloseWithError(fmt.Errorf(\"sender tried to send more than declared Content-Length of %d bytes\", st.declBodyBytes))\n\t\treturn streamError(id, ErrCodeStreamClosed)\n\t}\n\tif f.Length > 0 {\n\t\t// Check whether the client has flow control quota.\n\t\tif st.inflow.available() < int32(f.Length) {\n\t\t\treturn streamError(id, ErrCodeFlowControl)\n\t\t}\n\t\tst.inflow.take(int32(f.Length))\n\n\t\tif len(data) > 0 {\n\t\t\twrote, err := st.body.Write(data)\n\t\t\tif err != nil {\n\t\t\t\treturn streamError(id, ErrCodeStreamClosed)\n\t\t\t}\n\t\t\tif wrote != len(data) {\n\t\t\t\tpanic(\"internal error: bad Writer\")\n\t\t\t}\n\t\t\tst.bodyBytes += int64(len(data))\n\t\t}\n\n\t\t// Return any padded flow control now, since we won't\n\t\t// refund it later on body reads.\n\t\tif pad := int32(f.Length) - int32(len(data)); pad > 0 {\n\t\t\tsc.sendWindowUpdate32(nil, pad)\n\t\t\tsc.sendWindowUpdate32(st, pad)\n\t\t}\n\t}\n\tif f.StreamEnded() {\n\t\tst.endStream()\n\t}\n\treturn nil\n}\n\nfunc (sc *serverConn) processGoAway(f *GoAwayFrame) error {\n\tsc.serveG.check()\n\tif f.ErrCode != ErrCodeNo {\n\t\tsc.logf(\"http2: received GOAWAY %+v, starting graceful shutdown\", f)\n\t} else {\n\t\tsc.vlogf(\"http2: received GOAWAY %+v, starting graceful shutdown\", f)\n\t}\n\tsc.startGracefulShutdownInternal()\n\t// http://tools.ietf.org/html/rfc7540#section-6.8\n\t// We should not create any new streams, which means we should disable push.\n\tsc.pushEnabled = false\n\treturn nil\n}\n\n// isPushed reports whether the stream is server-initiated.\nfunc (st *stream) isPushed() bool {\n\treturn st.id%2 == 0\n}\n\n// endStream closes a Request.Body's pipe. It is called when a DATA\n// frame says a request body is over (or after trailers).\nfunc (st *stream) endStream() {\n\tsc := st.sc\n\tsc.serveG.check()\n\n\tif st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {\n\t\tst.body.CloseWithError(fmt.Errorf(\"request declared a Content-Length of %d but only wrote %d bytes\",\n\t\t\tst.declBodyBytes, st.bodyBytes))\n\t} else {\n\t\tst.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)\n\t\tst.body.CloseWithError(io.EOF)\n\t}\n\tst.state = stateHalfClosedRemote\n}\n\n// copyTrailersToHandlerRequest is run in the Handler's goroutine in\n// its Request.Body.Read just before it gets io.EOF.\nfunc (st *stream) copyTrailersToHandlerRequest() {\n\tfor k, vv := range st.trailer {\n\t\tif _, ok := st.reqTrailer[k]; ok {\n\t\t\t// Only copy it over it was pre-declared.\n\t\t\tst.reqTrailer[k] = vv\n\t\t}\n\t}\n}\n\n// onWriteTimeout is run on its own goroutine (from time.AfterFunc)\n// when the stream's WriteTimeout has fired.\nfunc (st *stream) onWriteTimeout() {\n\tst.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)})\n}\n\nfunc (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {\n\tsc.serveG.check()\n\tid := f.StreamID\n\tif sc.inGoAway {\n\t\t// Ignore.\n\t\treturn nil\n\t}\n\t// http://tools.ietf.org/html/rfc7540#section-5.1.1\n\t// Streams initiated by a client MUST use odd-numbered stream\n\t// identifiers. [...] An endpoint that receives an unexpected\n\t// stream identifier MUST respond with a connection error\n\t// (Section 5.4.1) of type PROTOCOL_ERROR.\n\tif id%2 != 1 {\n\t\treturn ConnectionError(ErrCodeProtocol)\n\t}\n\t// A HEADERS frame can be used to create a new stream or\n\t// send a trailer for an open one. If we already have a stream\n\t// open, let it process its own HEADERS frame (trailers at this\n\t// point, if it's valid).\n\tif st := sc.streams[f.StreamID]; st != nil {\n\t\tif st.resetQueued {\n\t\t\t// We're sending RST_STREAM to close the stream, so don't bother\n\t\t\t// processing this frame.\n\t\t\treturn nil\n\t\t}\n\t\treturn st.processTrailerHeaders(f)\n\t}\n\n\t// [...] The identifier of a newly established stream MUST be\n\t// numerically greater than all streams that the initiating\n\t// endpoint has opened or reserved. [...]  An endpoint that\n\t// receives an unexpected stream identifier MUST respond with\n\t// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.\n\tif id <= sc.maxClientStreamID {\n\t\treturn ConnectionError(ErrCodeProtocol)\n\t}\n\tsc.maxClientStreamID = id\n\n\tif sc.idleTimer != nil {\n\t\tsc.idleTimer.Stop()\n\t}\n\n\t// http://tools.ietf.org/html/rfc7540#section-5.1.2\n\t// [...] Endpoints MUST NOT exceed the limit set by their peer. An\n\t// endpoint that receives a HEADERS frame that causes their\n\t// advertised concurrent stream limit to be exceeded MUST treat\n\t// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR\n\t// or REFUSED_STREAM.\n\tif sc.curClientStreams+1 > sc.advMaxStreams {\n\t\tif sc.unackedSettings == 0 {\n\t\t\t// They should know better.\n\t\t\treturn streamError(id, ErrCodeProtocol)\n\t\t}\n\t\t// Assume it's a network race, where they just haven't\n\t\t// received our last SETTINGS update. But actually\n\t\t// this can't happen yet, because we don't yet provide\n\t\t// a way for users to adjust server parameters at\n\t\t// runtime.\n\t\treturn streamError(id, ErrCodeRefusedStream)\n\t}\n\n\tinitialState := stateOpen\n\tif f.StreamEnded() {\n\t\tinitialState = stateHalfClosedRemote\n\t}\n\tst := sc.newStream(id, 0, initialState)\n\n\tif f.HasPriority() {\n\t\tif err := checkPriority(f.StreamID, f.Priority); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsc.writeSched.AdjustStream(st.id, f.Priority)\n\t}\n\n\trw, req, err := sc.newWriterAndRequest(st, f)\n\tif err != nil {\n\t\treturn err\n\t}\n\tst.reqTrailer = req.Trailer\n\tif st.reqTrailer != nil {\n\t\tst.trailer = make(http.Header)\n\t}\n\tst.body = req.Body.(*requestBody).pipe // may be nil\n\tst.declBodyBytes = req.ContentLength\n\n\thandler := sc.handler.ServeHTTP\n\tif f.Truncated {\n\t\t// Their header list was too long. Send a 431 error.\n\t\thandler = handleHeaderListTooLong\n\t} else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil {\n\t\thandler = new400Handler(err)\n\t}\n\n\t// The net/http package sets the read deadline from the\n\t// http.Server.ReadTimeout during the TLS handshake, but then\n\t// passes the connection off to us with the deadline already\n\t// set. Disarm it here after the request headers are read,\n\t// similar to how the http1 server works. Here it's\n\t// technically more like the http1 Server's ReadHeaderTimeout\n\t// (in Go 1.8), though. That's a more sane option anyway.\n\tif sc.hs.ReadTimeout != 0 {\n\t\tsc.conn.SetReadDeadline(time.Time{})\n\t}\n\n\tgo sc.runHandler(rw, req, handler)\n\treturn nil\n}\n\nfunc (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {\n\tsc := st.sc\n\tsc.serveG.check()\n\tif st.gotTrailerHeader {\n\t\treturn ConnectionError(ErrCodeProtocol)\n\t}\n\tst.gotTrailerHeader = true\n\tif !f.StreamEnded() {\n\t\treturn streamError(st.id, ErrCodeProtocol)\n\t}\n\n\tif len(f.PseudoFields()) > 0 {\n\t\treturn streamError(st.id, ErrCodeProtocol)\n\t}\n\tif st.trailer != nil {\n\t\tfor _, hf := range f.RegularFields() {\n\t\t\tkey := sc.canonicalHeader(hf.Name)\n\t\t\tif !ValidTrailerHeader(key) {\n\t\t\t\t// TODO: send more details to the peer somehow. But http2 has\n\t\t\t\t// no way to send debug data at a stream level. Discuss with\n\t\t\t\t// HTTP folk.\n\t\t\t\treturn streamError(st.id, ErrCodeProtocol)\n\t\t\t}\n\t\t\tst.trailer[key] = append(st.trailer[key], hf.Value)\n\t\t}\n\t}\n\tst.endStream()\n\treturn nil\n}\n\nfunc checkPriority(streamID uint32, p PriorityParam) error {\n\tif streamID == p.StreamDep {\n\t\t// Section 5.3.1: \"A stream cannot depend on itself. An endpoint MUST treat\n\t\t// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR.\"\n\t\t// Section 5.3.3 says that a stream can depend on one of its dependencies,\n\t\t// so it's only self-dependencies that are forbidden.\n\t\treturn streamError(streamID, ErrCodeProtocol)\n\t}\n\treturn nil\n}\n\nfunc (sc *serverConn) processPriority(f *PriorityFrame) error {\n\tif sc.inGoAway {\n\t\treturn nil\n\t}\n\tif err := checkPriority(f.StreamID, f.PriorityParam); err != nil {\n\t\treturn err\n\t}\n\tsc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)\n\treturn nil\n}\n\nfunc (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream {\n\tsc.serveG.check()\n\tif id == 0 {\n\t\tpanic(\"internal error: cannot create stream with id 0\")\n\t}\n\n\tctx, cancelCtx := contextWithCancel(sc.baseCtx)\n\tst := &stream{\n\t\tsc:        sc,\n\t\tid:        id,\n\t\tstate:     state,\n\t\tctx:       ctx,\n\t\tcancelCtx: cancelCtx,\n\t}\n\tst.cw.Init()\n\tst.flow.conn = &sc.flow // link to conn-level counter\n\tst.flow.add(sc.initialStreamSendWindowSize)\n\tst.inflow.conn = &sc.inflow // link to conn-level counter\n\tst.inflow.add(sc.srv.initialStreamRecvWindowSize())\n\tif sc.hs.WriteTimeout != 0 {\n\t\tst.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)\n\t}\n\n\tsc.streams[id] = st\n\tsc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID})\n\tif st.isPushed() {\n\t\tsc.curPushedStreams++\n\t} else {\n\t\tsc.curClientStreams++\n\t}\n\tif sc.curOpenStreams() == 1 {\n\t\tsc.setConnState(http.StateActive)\n\t}\n\n\treturn st\n}\n\nfunc (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {\n\tsc.serveG.check()\n\n\trp := requestParam{\n\t\tmethod:    f.PseudoValue(\"method\"),\n\t\tscheme:    f.PseudoValue(\"scheme\"),\n\t\tauthority: f.PseudoValue(\"authority\"),\n\t\tpath:      f.PseudoValue(\"path\"),\n\t}\n\n\tisConnect := rp.method == \"CONNECT\"\n\tif isConnect {\n\t\tif rp.path != \"\" || rp.scheme != \"\" || rp.authority == \"\" {\n\t\t\treturn nil, nil, streamError(f.StreamID, ErrCodeProtocol)\n\t\t}\n\t} else if rp.method == \"\" || rp.path == \"\" || (rp.scheme != \"https\" && rp.scheme != \"http\") {\n\t\t// See 8.1.2.6 Malformed Requests and Responses:\n\t\t//\n\t\t// Malformed requests or responses that are detected\n\t\t// MUST be treated as a stream error (Section 5.4.2)\n\t\t// of type PROTOCOL_ERROR.\"\n\t\t//\n\t\t// 8.1.2.3 Request Pseudo-Header Fields\n\t\t// \"All HTTP/2 requests MUST include exactly one valid\n\t\t// value for the :method, :scheme, and :path\n\t\t// pseudo-header fields\"\n\t\treturn nil, nil, streamError(f.StreamID, ErrCodeProtocol)\n\t}\n\n\tbodyOpen := !f.StreamEnded()\n\tif rp.method == \"HEAD\" && bodyOpen {\n\t\t// HEAD requests can't have bodies\n\t\treturn nil, nil, streamError(f.StreamID, ErrCodeProtocol)\n\t}\n\n\trp.header = make(http.Header)\n\tfor _, hf := range f.RegularFields() {\n\t\trp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)\n\t}\n\tif rp.authority == \"\" {\n\t\trp.authority = rp.header.Get(\"Host\")\n\t}\n\n\trw, req, err := sc.newWriterAndRequestNoBody(st, rp)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif bodyOpen {\n\t\tif vv, ok := rp.header[\"Content-Length\"]; ok {\n\t\t\treq.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)\n\t\t} else {\n\t\t\treq.ContentLength = -1\n\t\t}\n\t\treq.Body.(*requestBody).pipe = &pipe{\n\t\t\tb: &dataBuffer{expected: req.ContentLength},\n\t\t}\n\t}\n\treturn rw, req, nil\n}\n\ntype requestParam struct {\n\tmethod                  string\n\tscheme, authority, path string\n\theader                  http.Header\n}\n\nfunc (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) {\n\tsc.serveG.check()\n\n\tvar tlsState *tls.ConnectionState // nil if not scheme https\n\tif rp.scheme == \"https\" {\n\t\ttlsState = sc.tlsState\n\t}\n\n\tneedsContinue := rp.header.Get(\"Expect\") == \"100-continue\"\n\tif needsContinue {\n\t\trp.header.Del(\"Expect\")\n\t}\n\t// Merge Cookie headers into one \"; \"-delimited value.\n\tif cookies := rp.header[\"Cookie\"]; len(cookies) > 1 {\n\t\trp.header.Set(\"Cookie\", strings.Join(cookies, \"; \"))\n\t}\n\n\t// Setup Trailers\n\tvar trailer http.Header\n\tfor _, v := range rp.header[\"Trailer\"] {\n\t\tfor _, key := range strings.Split(v, \",\") {\n\t\t\tkey = http.CanonicalHeaderKey(strings.TrimSpace(key))\n\t\t\tswitch key {\n\t\t\tcase \"Transfer-Encoding\", \"Trailer\", \"Content-Length\":\n\t\t\t\t// Bogus. (copy of http1 rules)\n\t\t\t\t// Ignore.\n\t\t\tdefault:\n\t\t\t\tif trailer == nil {\n\t\t\t\t\ttrailer = make(http.Header)\n\t\t\t\t}\n\t\t\t\ttrailer[key] = nil\n\t\t\t}\n\t\t}\n\t}\n\tdelete(rp.header, \"Trailer\")\n\n\tvar url_ *url.URL\n\tvar requestURI string\n\tif rp.method == \"CONNECT\" {\n\t\turl_ = &url.URL{Host: rp.authority}\n\t\trequestURI = rp.authority // mimic HTTP/1 server behavior\n\t} else {\n\t\tvar err error\n\t\turl_, err = url.ParseRequestURI(rp.path)\n\t\tif err != nil {\n\t\t\treturn nil, nil, streamError(st.id, ErrCodeProtocol)\n\t\t}\n\t\trequestURI = rp.path\n\t}\n\n\tbody := &requestBody{\n\t\tconn:          sc,\n\t\tstream:        st,\n\t\tneedsContinue: needsContinue,\n\t}\n\treq := &http.Request{\n\t\tMethod:     rp.method,\n\t\tURL:        url_,\n\t\tRemoteAddr: sc.remoteAddrStr,\n\t\tHeader:     rp.header,\n\t\tRequestURI: requestURI,\n\t\tProto:      \"HTTP/2.0\",\n\t\tProtoMajor: 2,\n\t\tProtoMinor: 0,\n\t\tTLS:        tlsState,\n\t\tHost:       rp.authority,\n\t\tBody:       body,\n\t\tTrailer:    trailer,\n\t}\n\treq = requestWithContext(req, st.ctx)\n\n\trws := responseWriterStatePool.Get().(*responseWriterState)\n\tbwSave := rws.bw\n\t*rws = responseWriterState{} // zero all the fields\n\trws.conn = sc\n\trws.bw = bwSave\n\trws.bw.Reset(chunkWriter{rws})\n\trws.stream = st\n\trws.req = req\n\trws.body = body\n\n\trw := &responseWriter{rws: rws}\n\treturn rw, req, nil\n}\n\n// Run on its own goroutine.\nfunc (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {\n\tdidPanic := true\n\tdefer func() {\n\t\trw.rws.stream.cancelCtx()\n\t\tif didPanic {\n\t\t\te := recover()\n\t\t\tsc.writeFrameFromHandler(FrameWriteRequest{\n\t\t\t\twrite:  handlerPanicRST{rw.rws.stream.id},\n\t\t\t\tstream: rw.rws.stream,\n\t\t\t})\n\t\t\t// Same as net/http:\n\t\t\tif shouldLogPanic(e) {\n\t\t\t\tconst size = 64 << 10\n\t\t\t\tbuf := make([]byte, size)\n\t\t\t\tbuf = buf[:runtime.Stack(buf, false)]\n\t\t\t\tsc.logf(\"http2: panic serving %v: %v\\n%s\", sc.conn.RemoteAddr(), e, buf)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\trw.handlerDone()\n\t}()\n\thandler(rw, req)\n\tdidPanic = false\n}\n\nfunc handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) {\n\t// 10.5.1 Limits on Header Block Size:\n\t// .. \"A server that receives a larger header block than it is\n\t// willing to handle can send an HTTP 431 (Request Header Fields Too\n\t// Large) status code\"\n\tconst statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+\n\tw.WriteHeader(statusRequestHeaderFieldsTooLarge)\n\tio.WriteString(w, \"<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>\")\n}\n\n// called from handler goroutines.\n// h may be nil.\nfunc (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error {\n\tsc.serveG.checkNotOn() // NOT on\n\tvar errc chan error\n\tif headerData.h != nil {\n\t\t// If there's a header map (which we don't own), so we have to block on\n\t\t// waiting for this frame to be written, so an http.Flush mid-handler\n\t\t// writes out the correct value of keys, before a handler later potentially\n\t\t// mutates it.\n\t\terrc = errChanPool.Get().(chan error)\n\t}\n\tif err := sc.writeFrameFromHandler(FrameWriteRequest{\n\t\twrite:  headerData,\n\t\tstream: st,\n\t\tdone:   errc,\n\t}); err != nil {\n\t\treturn err\n\t}\n\tif errc != nil {\n\t\tselect {\n\t\tcase err := <-errc:\n\t\t\terrChanPool.Put(errc)\n\t\t\treturn err\n\t\tcase <-sc.doneServing:\n\t\t\treturn errClientDisconnected\n\t\tcase <-st.cw:\n\t\t\treturn errStreamClosed\n\t\t}\n\t}\n\treturn nil\n}\n\n// called from handler goroutines.\nfunc (sc *serverConn) write100ContinueHeaders(st *stream) {\n\tsc.writeFrameFromHandler(FrameWriteRequest{\n\t\twrite:  write100ContinueHeadersFrame{st.id},\n\t\tstream: st,\n\t})\n}\n\n// A bodyReadMsg tells the server loop that the http.Handler read n\n// bytes of the DATA from the client on the given stream.\ntype bodyReadMsg struct {\n\tst *stream\n\tn  int\n}\n\n// called from handler goroutines.\n// Notes that the handler for the given stream ID read n bytes of its body\n// and schedules flow control tokens to be sent.\nfunc (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {\n\tsc.serveG.checkNotOn() // NOT on\n\tif n > 0 {\n\t\tselect {\n\t\tcase sc.bodyReadCh <- bodyReadMsg{st, n}:\n\t\tcase <-sc.doneServing:\n\t\t}\n\t}\n}\n\nfunc (sc *serverConn) noteBodyRead(st *stream, n int) {\n\tsc.serveG.check()\n\tsc.sendWindowUpdate(nil, n) // conn-level\n\tif st.state != stateHalfClosedRemote && st.state != stateClosed {\n\t\t// Don't send this WINDOW_UPDATE if the stream is closed\n\t\t// remotely.\n\t\tsc.sendWindowUpdate(st, n)\n\t}\n}\n\n// st may be nil for conn-level\nfunc (sc *serverConn) sendWindowUpdate(st *stream, n int) {\n\tsc.serveG.check()\n\t// \"The legal range for the increment to the flow control\n\t// window is 1 to 2^31-1 (2,147,483,647) octets.\"\n\t// A Go Read call on 64-bit machines could in theory read\n\t// a larger Read than this. Very unlikely, but we handle it here\n\t// rather than elsewhere for now.\n\tconst maxUint31 = 1<<31 - 1\n\tfor n >= maxUint31 {\n\t\tsc.sendWindowUpdate32(st, maxUint31)\n\t\tn -= maxUint31\n\t}\n\tsc.sendWindowUpdate32(st, int32(n))\n}\n\n// st may be nil for conn-level\nfunc (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {\n\tsc.serveG.check()\n\tif n == 0 {\n\t\treturn\n\t}\n\tif n < 0 {\n\t\tpanic(\"negative update\")\n\t}\n\tvar streamID uint32\n\tif st != nil {\n\t\tstreamID = st.id\n\t}\n\tsc.writeFrame(FrameWriteRequest{\n\t\twrite:  writeWindowUpdate{streamID: streamID, n: uint32(n)},\n\t\tstream: st,\n\t})\n\tvar ok bool\n\tif st == nil {\n\t\tok = sc.inflow.add(n)\n\t} else {\n\t\tok = st.inflow.add(n)\n\t}\n\tif !ok {\n\t\tpanic(\"internal error; sent too many window updates without decrements?\")\n\t}\n}\n\n// requestBody is the Handler's Request.Body type.\n// Read and Close may be called concurrently.\ntype requestBody struct {\n\tstream        *stream\n\tconn          *serverConn\n\tclosed        bool  // for use by Close only\n\tsawEOF        bool  // for use by Read only\n\tpipe          *pipe // non-nil if we have a HTTP entity message body\n\tneedsContinue bool  // need to send a 100-continue\n}\n\nfunc (b *requestBody) Close() error {\n\tif b.pipe != nil && !b.closed {\n\t\tb.pipe.BreakWithError(errClosedBody)\n\t}\n\tb.closed = true\n\treturn nil\n}\n\nfunc (b *requestBody) Read(p []byte) (n int, err error) {\n\tif b.needsContinue {\n\t\tb.needsContinue = false\n\t\tb.conn.write100ContinueHeaders(b.stream)\n\t}\n\tif b.pipe == nil || b.sawEOF {\n\t\treturn 0, io.EOF\n\t}\n\tn, err = b.pipe.Read(p)\n\tif err == io.EOF {\n\t\tb.sawEOF = true\n\t}\n\tif b.conn == nil && inTests {\n\t\treturn\n\t}\n\tb.conn.noteBodyReadFromHandler(b.stream, n, err)\n\treturn\n}\n\n// responseWriter is the http.ResponseWriter implementation. It's\n// intentionally small (1 pointer wide) to minimize garbage. The\n// responseWriterState pointer inside is zeroed at the end of a\n// request (in handlerDone) and calls on the responseWriter thereafter\n// simply crash (caller's mistake), but the much larger responseWriterState\n// and buffers are reused between multiple requests.\ntype responseWriter struct {\n\trws *responseWriterState\n}\n\n// Optional http.ResponseWriter interfaces implemented.\nvar (\n\t_ http.CloseNotifier = (*responseWriter)(nil)\n\t_ http.Flusher       = (*responseWriter)(nil)\n\t_ stringWriter       = (*responseWriter)(nil)\n)\n\ntype responseWriterState struct {\n\t// immutable within a request:\n\tstream *stream\n\treq    *http.Request\n\tbody   *requestBody // to close at end of request, if DATA frames didn't\n\tconn   *serverConn\n\n\t// TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc\n\tbw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}\n\n\t// mutated by http.Handler goroutine:\n\thandlerHeader http.Header // nil until called\n\tsnapHeader    http.Header // snapshot of handlerHeader at WriteHeader time\n\ttrailers      []string    // set in writeChunk\n\tstatus        int         // status code passed to WriteHeader\n\twroteHeader   bool        // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.\n\tsentHeader    bool        // have we sent the header frame?\n\thandlerDone   bool        // handler has finished\n\tdirty         bool        // a Write failed; don't reuse this responseWriterState\n\n\tsentContentLen int64 // non-zero if handler set a Content-Length header\n\twroteBytes     int64\n\n\tcloseNotifierMu sync.Mutex // guards closeNotifierCh\n\tcloseNotifierCh chan bool  // nil until first used\n}\n\ntype chunkWriter struct{ rws *responseWriterState }\n\nfunc (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }\n\nfunc (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 }\n\n// declareTrailer is called for each Trailer header when the\n// response header is written. It notes that a header will need to be\n// written in the trailers at the end of the response.\nfunc (rws *responseWriterState) declareTrailer(k string) {\n\tk = http.CanonicalHeaderKey(k)\n\tif !ValidTrailerHeader(k) {\n\t\t// Forbidden by RFC 2616 14.40.\n\t\trws.conn.logf(\"ignoring invalid trailer %q\", k)\n\t\treturn\n\t}\n\tif !strSliceContains(rws.trailers, k) {\n\t\trws.trailers = append(rws.trailers, k)\n\t}\n}\n\n// writeChunk writes chunks from the bufio.Writer. But because\n// bufio.Writer may bypass its chunking, sometimes p may be\n// arbitrarily large.\n//\n// writeChunk is also responsible (on the first chunk) for sending the\n// HEADER response.\nfunc (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {\n\tif !rws.wroteHeader {\n\t\trws.writeHeader(200)\n\t}\n\n\tisHeadResp := rws.req.Method == \"HEAD\"\n\tif !rws.sentHeader {\n\t\trws.sentHeader = true\n\t\tvar ctype, clen string\n\t\tif clen = rws.snapHeader.Get(\"Content-Length\"); clen != \"\" {\n\t\t\trws.snapHeader.Del(\"Content-Length\")\n\t\t\tclen64, err := strconv.ParseInt(clen, 10, 64)\n\t\t\tif err == nil && clen64 >= 0 {\n\t\t\t\trws.sentContentLen = clen64\n\t\t\t} else {\n\t\t\t\tclen = \"\"\n\t\t\t}\n\t\t}\n\t\tif clen == \"\" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {\n\t\t\tclen = strconv.Itoa(len(p))\n\t\t}\n\t\t_, hasContentType := rws.snapHeader[\"Content-Type\"]\n\t\tif !hasContentType && bodyAllowedForStatus(rws.status) {\n\t\t\tctype = http.DetectContentType(p)\n\t\t}\n\t\tvar date string\n\t\tif _, ok := rws.snapHeader[\"Date\"]; !ok {\n\t\t\t// TODO(bradfitz): be faster here, like net/http? measure.\n\t\t\tdate = time.Now().UTC().Format(http.TimeFormat)\n\t\t}\n\n\t\tfor _, v := range rws.snapHeader[\"Trailer\"] {\n\t\t\tforeachHeaderElement(v, rws.declareTrailer)\n\t\t}\n\n\t\tendStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp\n\t\terr = rws.conn.writeHeaders(rws.stream, &writeResHeaders{\n\t\t\tstreamID:      rws.stream.id,\n\t\t\thttpResCode:   rws.status,\n\t\t\th:             rws.snapHeader,\n\t\t\tendStream:     endStream,\n\t\t\tcontentType:   ctype,\n\t\t\tcontentLength: clen,\n\t\t\tdate:          date,\n\t\t})\n\t\tif err != nil {\n\t\t\trws.dirty = true\n\t\t\treturn 0, err\n\t\t}\n\t\tif endStream {\n\t\t\treturn 0, nil\n\t\t}\n\t}\n\tif isHeadResp {\n\t\treturn len(p), nil\n\t}\n\tif len(p) == 0 && !rws.handlerDone {\n\t\treturn 0, nil\n\t}\n\n\tif rws.handlerDone {\n\t\trws.promoteUndeclaredTrailers()\n\t}\n\n\tendStream := rws.handlerDone && !rws.hasTrailers()\n\tif len(p) > 0 || endStream {\n\t\t// only send a 0 byte DATA frame if we're ending the stream.\n\t\tif err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {\n\t\t\trws.dirty = true\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tif rws.handlerDone && rws.hasTrailers() {\n\t\terr = rws.conn.writeHeaders(rws.stream, &writeResHeaders{\n\t\t\tstreamID:  rws.stream.id,\n\t\t\th:         rws.handlerHeader,\n\t\t\ttrailers:  rws.trailers,\n\t\t\tendStream: true,\n\t\t})\n\t\tif err != nil {\n\t\t\trws.dirty = true\n\t\t}\n\t\treturn len(p), err\n\t}\n\treturn len(p), nil\n}\n\n// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys\n// that, if present, signals that the map entry is actually for\n// the response trailers, and not the response headers. The prefix\n// is stripped after the ServeHTTP call finishes and the values are\n// sent in the trailers.\n//\n// This mechanism is intended only for trailers that are not known\n// prior to the headers being written. If the set of trailers is fixed\n// or known before the header is written, the normal Go trailers mechanism\n// is preferred:\n//    https://golang.org/pkg/net/http/#ResponseWriter\n//    https://golang.org/pkg/net/http/#example_ResponseWriter_trailers\nconst TrailerPrefix = \"Trailer:\"\n\n// promoteUndeclaredTrailers permits http.Handlers to set trailers\n// after the header has already been flushed. Because the Go\n// ResponseWriter interface has no way to set Trailers (only the\n// Header), and because we didn't want to expand the ResponseWriter\n// interface, and because nobody used trailers, and because RFC 2616\n// says you SHOULD (but not must) predeclare any trailers in the\n// header, the official ResponseWriter rules said trailers in Go must\n// be predeclared, and then we reuse the same ResponseWriter.Header()\n// map to mean both Headers and Trailers. When it's time to write the\n// Trailers, we pick out the fields of Headers that were declared as\n// trailers. That worked for a while, until we found the first major\n// user of Trailers in the wild: gRPC (using them only over http2),\n// and gRPC libraries permit setting trailers mid-stream without\n// predeclarnig them. So: change of plans. We still permit the old\n// way, but we also permit this hack: if a Header() key begins with\n// \"Trailer:\", the suffix of that key is a Trailer. Because ':' is an\n// invalid token byte anyway, there is no ambiguity. (And it's already\n// filtered out) It's mildly hacky, but not terrible.\n//\n// This method runs after the Handler is done and promotes any Header\n// fields to be trailers.\nfunc (rws *responseWriterState) promoteUndeclaredTrailers() {\n\tfor k, vv := range rws.handlerHeader {\n\t\tif !strings.HasPrefix(k, TrailerPrefix) {\n\t\t\tcontinue\n\t\t}\n\t\ttrailerKey := strings.TrimPrefix(k, TrailerPrefix)\n\t\trws.declareTrailer(trailerKey)\n\t\trws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv\n\t}\n\n\tif len(rws.trailers) > 1 {\n\t\tsorter := sorterPool.Get().(*sorter)\n\t\tsorter.SortStrings(rws.trailers)\n\t\tsorterPool.Put(sorter)\n\t}\n}\n\nfunc (w *responseWriter) Flush() {\n\trws := w.rws\n\tif rws == nil {\n\t\tpanic(\"Header called after Handler finished\")\n\t}\n\tif rws.bw.Buffered() > 0 {\n\t\tif err := rws.bw.Flush(); err != nil {\n\t\t\t// Ignore the error. The frame writer already knows.\n\t\t\treturn\n\t\t}\n\t} else {\n\t\t// The bufio.Writer won't call chunkWriter.Write\n\t\t// (writeChunk with zero bytes, so we have to do it\n\t\t// ourselves to force the HTTP response header and/or\n\t\t// final DATA frame (with END_STREAM) to be sent.\n\t\trws.writeChunk(nil)\n\t}\n}\n\nfunc (w *responseWriter) CloseNotify() <-chan bool {\n\trws := w.rws\n\tif rws == nil {\n\t\tpanic(\"CloseNotify called after Handler finished\")\n\t}\n\trws.closeNotifierMu.Lock()\n\tch := rws.closeNotifierCh\n\tif ch == nil {\n\t\tch = make(chan bool, 1)\n\t\trws.closeNotifierCh = ch\n\t\tcw := rws.stream.cw\n\t\tgo func() {\n\t\t\tcw.Wait() // wait for close\n\t\t\tch <- true\n\t\t}()\n\t}\n\trws.closeNotifierMu.Unlock()\n\treturn ch\n}\n\nfunc (w *responseWriter) Header() http.Header {\n\trws := w.rws\n\tif rws == nil {\n\t\tpanic(\"Header called after Handler finished\")\n\t}\n\tif rws.handlerHeader == nil {\n\t\trws.handlerHeader = make(http.Header)\n\t}\n\treturn rws.handlerHeader\n}\n\nfunc (w *responseWriter) WriteHeader(code int) {\n\trws := w.rws\n\tif rws == nil {\n\t\tpanic(\"WriteHeader called after Handler finished\")\n\t}\n\trws.writeHeader(code)\n}\n\nfunc (rws *responseWriterState) writeHeader(code int) {\n\tif !rws.wroteHeader {\n\t\trws.wroteHeader = true\n\t\trws.status = code\n\t\tif len(rws.handlerHeader) > 0 {\n\t\t\trws.snapHeader = cloneHeader(rws.handlerHeader)\n\t\t}\n\t}\n}\n\nfunc cloneHeader(h http.Header) http.Header {\n\th2 := make(http.Header, len(h))\n\tfor k, vv := range h {\n\t\tvv2 := make([]string, len(vv))\n\t\tcopy(vv2, vv)\n\t\th2[k] = vv2\n\t}\n\treturn h2\n}\n\n// The Life Of A Write is like this:\n//\n// * Handler calls w.Write or w.WriteString ->\n// * -> rws.bw (*bufio.Writer) ->\n// * (Handler might call Flush)\n// * -> chunkWriter{rws}\n// * -> responseWriterState.writeChunk(p []byte)\n// * -> responseWriterState.writeChunk (most of the magic; see comment there)\nfunc (w *responseWriter) Write(p []byte) (n int, err error) {\n\treturn w.write(len(p), p, \"\")\n}\n\nfunc (w *responseWriter) WriteString(s string) (n int, err error) {\n\treturn w.write(len(s), nil, s)\n}\n\n// either dataB or dataS is non-zero.\nfunc (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {\n\trws := w.rws\n\tif rws == nil {\n\t\tpanic(\"Write called after Handler finished\")\n\t}\n\tif !rws.wroteHeader {\n\t\tw.WriteHeader(200)\n\t}\n\tif !bodyAllowedForStatus(rws.status) {\n\t\treturn 0, http.ErrBodyNotAllowed\n\t}\n\trws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set\n\tif rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {\n\t\t// TODO: send a RST_STREAM\n\t\treturn 0, errors.New(\"http2: handler wrote more than declared Content-Length\")\n\t}\n\n\tif dataB != nil {\n\t\treturn rws.bw.Write(dataB)\n\t} else {\n\t\treturn rws.bw.WriteString(dataS)\n\t}\n}\n\nfunc (w *responseWriter) handlerDone() {\n\trws := w.rws\n\tdirty := rws.dirty\n\trws.handlerDone = true\n\tw.Flush()\n\tw.rws = nil\n\tif !dirty {\n\t\t// Only recycle the pool if all prior Write calls to\n\t\t// the serverConn goroutine completed successfully. If\n\t\t// they returned earlier due to resets from the peer\n\t\t// there might still be write goroutines outstanding\n\t\t// from the serverConn referencing the rws memory. See\n\t\t// issue 20704.\n\t\tresponseWriterStatePool.Put(rws)\n\t}\n}\n\n// Push errors.\nvar (\n\tErrRecursivePush    = errors.New(\"http2: recursive push not allowed\")\n\tErrPushLimitReached = errors.New(\"http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS\")\n)\n\n// pushOptions is the internal version of http.PushOptions, which we\n// cannot include here because it's only defined in Go 1.8 and later.\ntype pushOptions struct {\n\tMethod string\n\tHeader http.Header\n}\n\nfunc (w *responseWriter) push(target string, opts pushOptions) error {\n\tst := w.rws.stream\n\tsc := st.sc\n\tsc.serveG.checkNotOn()\n\n\t// No recursive pushes: \"PUSH_PROMISE frames MUST only be sent on a peer-initiated stream.\"\n\t// http://tools.ietf.org/html/rfc7540#section-6.6\n\tif st.isPushed() {\n\t\treturn ErrRecursivePush\n\t}\n\n\t// Default options.\n\tif opts.Method == \"\" {\n\t\topts.Method = \"GET\"\n\t}\n\tif opts.Header == nil {\n\t\topts.Header = http.Header{}\n\t}\n\twantScheme := \"http\"\n\tif w.rws.req.TLS != nil {\n\t\twantScheme = \"https\"\n\t}\n\n\t// Validate the request.\n\tu, err := url.Parse(target)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif u.Scheme == \"\" {\n\t\tif !strings.HasPrefix(target, \"/\") {\n\t\t\treturn fmt.Errorf(\"target must be an absolute URL or an absolute path: %q\", target)\n\t\t}\n\t\tu.Scheme = wantScheme\n\t\tu.Host = w.rws.req.Host\n\t} else {\n\t\tif u.Scheme != wantScheme {\n\t\t\treturn fmt.Errorf(\"cannot push URL with scheme %q from request with scheme %q\", u.Scheme, wantScheme)\n\t\t}\n\t\tif u.Host == \"\" {\n\t\t\treturn errors.New(\"URL must have a host\")\n\t\t}\n\t}\n\tfor k := range opts.Header {\n\t\tif strings.HasPrefix(k, \":\") {\n\t\t\treturn fmt.Errorf(\"promised request headers cannot include pseudo header %q\", k)\n\t\t}\n\t\t// These headers are meaningful only if the request has a body,\n\t\t// but PUSH_PROMISE requests cannot have a body.\n\t\t// http://tools.ietf.org/html/rfc7540#section-8.2\n\t\t// Also disallow Host, since the promised URL must be absolute.\n\t\tswitch strings.ToLower(k) {\n\t\tcase \"content-length\", \"content-encoding\", \"trailer\", \"te\", \"expect\", \"host\":\n\t\t\treturn fmt.Errorf(\"promised request headers cannot include %q\", k)\n\t\t}\n\t}\n\tif err := checkValidHTTP2RequestHeaders(opts.Header); err != nil {\n\t\treturn err\n\t}\n\n\t// The RFC effectively limits promised requests to GET and HEAD:\n\t// \"Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]\"\n\t// http://tools.ietf.org/html/rfc7540#section-8.2\n\tif opts.Method != \"GET\" && opts.Method != \"HEAD\" {\n\t\treturn fmt.Errorf(\"method %q must be GET or HEAD\", opts.Method)\n\t}\n\n\tmsg := &startPushRequest{\n\t\tparent: st,\n\t\tmethod: opts.Method,\n\t\turl:    u,\n\t\theader: cloneHeader(opts.Header),\n\t\tdone:   errChanPool.Get().(chan error),\n\t}\n\n\tselect {\n\tcase <-sc.doneServing:\n\t\treturn errClientDisconnected\n\tcase <-st.cw:\n\t\treturn errStreamClosed\n\tcase sc.serveMsgCh <- msg:\n\t}\n\n\tselect {\n\tcase <-sc.doneServing:\n\t\treturn errClientDisconnected\n\tcase <-st.cw:\n\t\treturn errStreamClosed\n\tcase err := <-msg.done:\n\t\terrChanPool.Put(msg.done)\n\t\treturn err\n\t}\n}\n\ntype startPushRequest struct {\n\tparent *stream\n\tmethod string\n\turl    *url.URL\n\theader http.Header\n\tdone   chan error\n}\n\nfunc (sc *serverConn) startPush(msg *startPushRequest) {\n\tsc.serveG.check()\n\n\t// http://tools.ietf.org/html/rfc7540#section-6.6.\n\t// PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that\n\t// is in either the \"open\" or \"half-closed (remote)\" state.\n\tif msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote {\n\t\t// responseWriter.Push checks that the stream is peer-initiaed.\n\t\tmsg.done <- errStreamClosed\n\t\treturn\n\t}\n\n\t// http://tools.ietf.org/html/rfc7540#section-6.6.\n\tif !sc.pushEnabled {\n\t\tmsg.done <- http.ErrNotSupported\n\t\treturn\n\t}\n\n\t// PUSH_PROMISE frames must be sent in increasing order by stream ID, so\n\t// we allocate an ID for the promised stream lazily, when the PUSH_PROMISE\n\t// is written. Once the ID is allocated, we start the request handler.\n\tallocatePromisedID := func() (uint32, error) {\n\t\tsc.serveG.check()\n\n\t\t// Check this again, just in case. Technically, we might have received\n\t\t// an updated SETTINGS by the time we got around to writing this frame.\n\t\tif !sc.pushEnabled {\n\t\t\treturn 0, http.ErrNotSupported\n\t\t}\n\t\t// http://tools.ietf.org/html/rfc7540#section-6.5.2.\n\t\tif sc.curPushedStreams+1 > sc.clientMaxStreams {\n\t\t\treturn 0, ErrPushLimitReached\n\t\t}\n\n\t\t// http://tools.ietf.org/html/rfc7540#section-5.1.1.\n\t\t// Streams initiated by the server MUST use even-numbered identifiers.\n\t\t// A server that is unable to establish a new stream identifier can send a GOAWAY\n\t\t// frame so that the client is forced to open a new connection for new streams.\n\t\tif sc.maxPushPromiseID+2 >= 1<<31 {\n\t\t\tsc.startGracefulShutdownInternal()\n\t\t\treturn 0, ErrPushLimitReached\n\t\t}\n\t\tsc.maxPushPromiseID += 2\n\t\tpromisedID := sc.maxPushPromiseID\n\n\t\t// http://tools.ietf.org/html/rfc7540#section-8.2.\n\t\t// Strictly speaking, the new stream should start in \"reserved (local)\", then\n\t\t// transition to \"half closed (remote)\" after sending the initial HEADERS, but\n\t\t// we start in \"half closed (remote)\" for simplicity.\n\t\t// See further comments at the definition of stateHalfClosedRemote.\n\t\tpromised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)\n\t\trw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{\n\t\t\tmethod:    msg.method,\n\t\t\tscheme:    msg.url.Scheme,\n\t\t\tauthority: msg.url.Host,\n\t\t\tpath:      msg.url.RequestURI(),\n\t\t\theader:    cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE\n\t\t})\n\t\tif err != nil {\n\t\t\t// Should not happen, since we've already validated msg.url.\n\t\t\tpanic(fmt.Sprintf(\"newWriterAndRequestNoBody(%+v): %v\", msg.url, err))\n\t\t}\n\n\t\tgo sc.runHandler(rw, req, sc.handler.ServeHTTP)\n\t\treturn promisedID, nil\n\t}\n\n\tsc.writeFrame(FrameWriteRequest{\n\t\twrite: &writePushPromise{\n\t\t\tstreamID:           msg.parent.id,\n\t\t\tmethod:             msg.method,\n\t\t\turl:                msg.url,\n\t\t\th:                  msg.header,\n\t\t\tallocatePromisedID: allocatePromisedID,\n\t\t},\n\t\tstream: msg.parent,\n\t\tdone:   msg.done,\n\t})\n}\n\n// foreachHeaderElement splits v according to the \"#rule\" construction\n// in RFC 2616 section 2.1 and calls fn for each non-empty element.\nfunc foreachHeaderElement(v string, fn func(string)) {\n\tv = textproto.TrimString(v)\n\tif v == \"\" {\n\t\treturn\n\t}\n\tif !strings.Contains(v, \",\") {\n\t\tfn(v)\n\t\treturn\n\t}\n\tfor _, f := range strings.Split(v, \",\") {\n\t\tif f = textproto.TrimString(f); f != \"\" {\n\t\t\tfn(f)\n\t\t}\n\t}\n}\n\n// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2\nvar connHeaders = []string{\n\t\"Connection\",\n\t\"Keep-Alive\",\n\t\"Proxy-Connection\",\n\t\"Transfer-Encoding\",\n\t\"Upgrade\",\n}\n\n// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request,\n// per RFC 7540 Section 8.1.2.2.\n// The returned error is reported to users.\nfunc checkValidHTTP2RequestHeaders(h http.Header) error {\n\tfor _, k := range connHeaders {\n\t\tif _, ok := h[k]; ok {\n\t\t\treturn fmt.Errorf(\"request header %q is not valid in HTTP/2\", k)\n\t\t}\n\t}\n\tte := h[\"Te\"]\n\tif len(te) > 0 && (len(te) > 1 || (te[0] != \"trailers\" && te[0] != \"\")) {\n\t\treturn errors.New(`request header \"TE\" may only be \"trailers\" in HTTP/2`)\n\t}\n\treturn nil\n}\n\nfunc new400Handler(err error) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t}\n}\n\n// ValidTrailerHeader reports whether name is a valid header field name to appear\n// in trailers.\n// See: http://tools.ietf.org/html/rfc7230#section-4.1.2\nfunc ValidTrailerHeader(name string) bool {\n\tname = http.CanonicalHeaderKey(name)\n\tif strings.HasPrefix(name, \"If-\") || badTrailer[name] {\n\t\treturn false\n\t}\n\treturn true\n}\n\nvar badTrailer = map[string]bool{\n\t\"Authorization\":       true,\n\t\"Cache-Control\":       true,\n\t\"Connection\":          true,\n\t\"Content-Encoding\":    true,\n\t\"Content-Length\":      true,\n\t\"Content-Range\":       true,\n\t\"Content-Type\":        true,\n\t\"Expect\":              true,\n\t\"Host\":                true,\n\t\"Keep-Alive\":          true,\n\t\"Max-Forwards\":        true,\n\t\"Pragma\":              true,\n\t\"Proxy-Authenticate\":  true,\n\t\"Proxy-Authorization\": true,\n\t\"Proxy-Connection\":    true,\n\t\"Range\":               true,\n\t\"Realm\":               true,\n\t\"Te\":                  true,\n\t\"Trailer\":             true,\n\t\"Transfer-Encoding\":   true,\n\t\"Www-Authenticate\":    true,\n}\n\n// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives\n// disabled. See comments on h1ServerShutdownChan above for why\n// the code is written this way.\nfunc h1ServerKeepAlivesDisabled(hs *http.Server) bool {\n\tvar x interface{} = hs\n\ttype I interface {\n\t\tdoKeepAlives() bool\n\t}\n\tif hs, ok := x.(I); ok {\n\t\treturn !hs.doKeepAlives()\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/server_push_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.8\n\npackage http2\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestServer_Push_Success(t *testing.T) {\n\tconst (\n\t\tmainBody   = \"<html>index page</html>\"\n\t\tpushedBody = \"<html>pushed page</html>\"\n\t\tuserAgent  = \"testagent\"\n\t\tcookie     = \"testcookie\"\n\t)\n\n\tvar stURL string\n\tcheckPromisedReq := func(r *http.Request, wantMethod string, wantH http.Header) error {\n\t\tif got, want := r.Method, wantMethod; got != want {\n\t\t\treturn fmt.Errorf(\"promised Req.Method=%q, want %q\", got, want)\n\t\t}\n\t\tif got, want := r.Header, wantH; !reflect.DeepEqual(got, want) {\n\t\t\treturn fmt.Errorf(\"promised Req.Header=%q, want %q\", got, want)\n\t\t}\n\t\tif got, want := \"https://\"+r.Host, stURL; got != want {\n\t\t\treturn fmt.Errorf(\"promised Req.Host=%q, want %q\", got, want)\n\t\t}\n\t\tif r.Body == nil {\n\t\t\treturn fmt.Errorf(\"nil Body\")\n\t\t}\n\t\tif buf, err := ioutil.ReadAll(r.Body); err != nil || len(buf) != 0 {\n\t\t\treturn fmt.Errorf(\"ReadAll(Body)=%q,%v, want '',nil\", buf, err)\n\t\t}\n\t\treturn nil\n\t}\n\n\terrc := make(chan error, 3)\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch r.URL.RequestURI() {\n\t\tcase \"/\":\n\t\t\t// Push \"/pushed?get\" as a GET request, using an absolute URL.\n\t\t\topt := &http.PushOptions{\n\t\t\t\tHeader: http.Header{\n\t\t\t\t\t\"User-Agent\": {userAgent},\n\t\t\t\t},\n\t\t\t}\n\t\t\tif err := w.(http.Pusher).Push(stURL+\"/pushed?get\", opt); err != nil {\n\t\t\t\terrc <- fmt.Errorf(\"error pushing /pushed?get: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// Push \"/pushed?head\" as a HEAD request, using a path.\n\t\t\topt = &http.PushOptions{\n\t\t\t\tMethod: \"HEAD\",\n\t\t\t\tHeader: http.Header{\n\t\t\t\t\t\"User-Agent\": {userAgent},\n\t\t\t\t\t\"Cookie\":     {cookie},\n\t\t\t\t},\n\t\t\t}\n\t\t\tif err := w.(http.Pusher).Push(\"/pushed?head\", opt); err != nil {\n\t\t\t\terrc <- fmt.Errorf(\"error pushing /pushed?head: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.Header().Set(\"Content-Type\", \"text/html\")\n\t\t\tw.Header().Set(\"Content-Length\", strconv.Itoa(len(mainBody)))\n\t\t\tw.WriteHeader(200)\n\t\t\tio.WriteString(w, mainBody)\n\t\t\terrc <- nil\n\n\t\tcase \"/pushed?get\":\n\t\t\twantH := http.Header{}\n\t\t\twantH.Set(\"User-Agent\", userAgent)\n\t\t\tif err := checkPromisedReq(r, \"GET\", wantH); err != nil {\n\t\t\t\terrc <- fmt.Errorf(\"/pushed?get: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.Header().Set(\"Content-Type\", \"text/html\")\n\t\t\tw.Header().Set(\"Content-Length\", strconv.Itoa(len(pushedBody)))\n\t\t\tw.WriteHeader(200)\n\t\t\tio.WriteString(w, pushedBody)\n\t\t\terrc <- nil\n\n\t\tcase \"/pushed?head\":\n\t\t\twantH := http.Header{}\n\t\t\twantH.Set(\"User-Agent\", userAgent)\n\t\t\twantH.Set(\"Cookie\", cookie)\n\t\t\tif err := checkPromisedReq(r, \"HEAD\", wantH); err != nil {\n\t\t\t\terrc <- fmt.Errorf(\"/pushed?head: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.WriteHeader(204)\n\t\t\terrc <- nil\n\n\t\tdefault:\n\t\t\terrc <- fmt.Errorf(\"unknown RequestURL %q\", r.URL.RequestURI())\n\t\t}\n\t})\n\tstURL = st.ts.URL\n\n\t// Send one request, which should push two responses.\n\tst.greet()\n\tgetSlash(st)\n\tfor k := 0; k < 3; k++ {\n\t\tselect {\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tt.Errorf(\"timeout waiting for handler %d to finish\", k)\n\t\tcase err := <-errc:\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n\n\tcheckPushPromise := func(f Frame, promiseID uint32, wantH [][2]string) error {\n\t\tpp, ok := f.(*PushPromiseFrame)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"got a %T; want *PushPromiseFrame\", f)\n\t\t}\n\t\tif !pp.HeadersEnded() {\n\t\t\treturn fmt.Errorf(\"want END_HEADERS flag in PushPromiseFrame\")\n\t\t}\n\t\tif got, want := pp.PromiseID, promiseID; got != want {\n\t\t\treturn fmt.Errorf(\"got PromiseID %v; want %v\", got, want)\n\t\t}\n\t\tgotH := st.decodeHeader(pp.HeaderBlockFragment())\n\t\tif !reflect.DeepEqual(gotH, wantH) {\n\t\t\treturn fmt.Errorf(\"got promised headers %v; want %v\", gotH, wantH)\n\t\t}\n\t\treturn nil\n\t}\n\tcheckHeaders := func(f Frame, wantH [][2]string) error {\n\t\thf, ok := f.(*HeadersFrame)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"got a %T; want *HeadersFrame\", f)\n\t\t}\n\t\tgotH := st.decodeHeader(hf.HeaderBlockFragment())\n\t\tif !reflect.DeepEqual(gotH, wantH) {\n\t\t\treturn fmt.Errorf(\"got response headers %v; want %v\", gotH, wantH)\n\t\t}\n\t\treturn nil\n\t}\n\tcheckData := func(f Frame, wantData string) error {\n\t\tdf, ok := f.(*DataFrame)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"got a %T; want *DataFrame\", f)\n\t\t}\n\t\tif gotData := string(df.Data()); gotData != wantData {\n\t\t\treturn fmt.Errorf(\"got response data %q; want %q\", gotData, wantData)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Stream 1 has 2 PUSH_PROMISE + HEADERS + DATA\n\t// Stream 2 has HEADERS + DATA\n\t// Stream 4 has HEADERS\n\texpected := map[uint32][]func(Frame) error{\n\t\t1: {\n\t\t\tfunc(f Frame) error {\n\t\t\t\treturn checkPushPromise(f, 2, [][2]string{\n\t\t\t\t\t{\":method\", \"GET\"},\n\t\t\t\t\t{\":scheme\", \"https\"},\n\t\t\t\t\t{\":authority\", st.ts.Listener.Addr().String()},\n\t\t\t\t\t{\":path\", \"/pushed?get\"},\n\t\t\t\t\t{\"user-agent\", userAgent},\n\t\t\t\t})\n\t\t\t},\n\t\t\tfunc(f Frame) error {\n\t\t\t\treturn checkPushPromise(f, 4, [][2]string{\n\t\t\t\t\t{\":method\", \"HEAD\"},\n\t\t\t\t\t{\":scheme\", \"https\"},\n\t\t\t\t\t{\":authority\", st.ts.Listener.Addr().String()},\n\t\t\t\t\t{\":path\", \"/pushed?head\"},\n\t\t\t\t\t{\"cookie\", cookie},\n\t\t\t\t\t{\"user-agent\", userAgent},\n\t\t\t\t})\n\t\t\t},\n\t\t\tfunc(f Frame) error {\n\t\t\t\treturn checkHeaders(f, [][2]string{\n\t\t\t\t\t{\":status\", \"200\"},\n\t\t\t\t\t{\"content-type\", \"text/html\"},\n\t\t\t\t\t{\"content-length\", strconv.Itoa(len(mainBody))},\n\t\t\t\t})\n\t\t\t},\n\t\t\tfunc(f Frame) error {\n\t\t\t\treturn checkData(f, mainBody)\n\t\t\t},\n\t\t},\n\t\t2: {\n\t\t\tfunc(f Frame) error {\n\t\t\t\treturn checkHeaders(f, [][2]string{\n\t\t\t\t\t{\":status\", \"200\"},\n\t\t\t\t\t{\"content-type\", \"text/html\"},\n\t\t\t\t\t{\"content-length\", strconv.Itoa(len(pushedBody))},\n\t\t\t\t})\n\t\t\t},\n\t\t\tfunc(f Frame) error {\n\t\t\t\treturn checkData(f, pushedBody)\n\t\t\t},\n\t\t},\n\t\t4: {\n\t\t\tfunc(f Frame) error {\n\t\t\t\treturn checkHeaders(f, [][2]string{\n\t\t\t\t\t{\":status\", \"204\"},\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t}\n\n\tconsumed := map[uint32]int{}\n\tfor k := 0; len(expected) > 0; k++ {\n\t\tf, err := st.readFrame()\n\t\tif err != nil {\n\t\t\tfor id, left := range expected {\n\t\t\t\tt.Errorf(\"stream %d: missing %d frames\", id, len(left))\n\t\t\t}\n\t\t\tt.Fatalf(\"readFrame %d: %v\", k, err)\n\t\t}\n\t\tid := f.Header().StreamID\n\t\tlabel := fmt.Sprintf(\"stream %d, frame %d\", id, consumed[id])\n\t\tif len(expected[id]) == 0 {\n\t\t\tt.Fatalf(\"%s: unexpected frame %#+v\", label, f)\n\t\t}\n\t\tcheck := expected[id][0]\n\t\texpected[id] = expected[id][1:]\n\t\tif len(expected[id]) == 0 {\n\t\t\tdelete(expected, id)\n\t\t}\n\t\tif err := check(f); err != nil {\n\t\t\tt.Fatalf(\"%s: %v\", label, err)\n\t\t}\n\t\tconsumed[id]++\n\t}\n}\n\nfunc TestServer_Push_SuccessNoRace(t *testing.T) {\n\t// Regression test for issue #18326. Ensure the request handler can mutate\n\t// pushed request headers without racing with the PUSH_PROMISE write.\n\terrc := make(chan error, 2)\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch r.URL.RequestURI() {\n\t\tcase \"/\":\n\t\t\topt := &http.PushOptions{\n\t\t\t\tHeader: http.Header{\"User-Agent\": {\"testagent\"}},\n\t\t\t}\n\t\t\tif err := w.(http.Pusher).Push(\"/pushed\", opt); err != nil {\n\t\t\t\terrc <- fmt.Errorf(\"error pushing: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.WriteHeader(200)\n\t\t\terrc <- nil\n\n\t\tcase \"/pushed\":\n\t\t\t// Update request header, ensure there is no race.\n\t\t\tr.Header.Set(\"User-Agent\", \"newagent\")\n\t\t\tr.Header.Set(\"Cookie\", \"cookie\")\n\t\t\tw.WriteHeader(200)\n\t\t\terrc <- nil\n\n\t\tdefault:\n\t\t\terrc <- fmt.Errorf(\"unknown RequestURL %q\", r.URL.RequestURI())\n\t\t}\n\t})\n\n\t// Send one request, which should push one response.\n\tst.greet()\n\tgetSlash(st)\n\tfor k := 0; k < 2; k++ {\n\t\tselect {\n\t\tcase <-time.After(2 * time.Second):\n\t\t\tt.Errorf(\"timeout waiting for handler %d to finish\", k)\n\t\tcase err := <-errc:\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestServer_Push_RejectRecursivePush(t *testing.T) {\n\t// Expect two requests, but might get three if there's a bug and the second push succeeds.\n\terrc := make(chan error, 3)\n\thandler := func(w http.ResponseWriter, r *http.Request) error {\n\t\tbaseURL := \"https://\" + r.Host\n\t\tswitch r.URL.Path {\n\t\tcase \"/\":\n\t\t\tif err := w.(http.Pusher).Push(baseURL+\"/push1\", nil); err != nil {\n\t\t\t\treturn fmt.Errorf(\"first Push()=%v, want nil\", err)\n\t\t\t}\n\t\t\treturn nil\n\n\t\tcase \"/push1\":\n\t\t\tif got, want := w.(http.Pusher).Push(baseURL+\"/push2\", nil), ErrRecursivePush; got != want {\n\t\t\t\treturn fmt.Errorf(\"Push()=%v, want %v\", got, want)\n\t\t\t}\n\t\t\treturn nil\n\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unexpected path: %q\", r.URL.Path)\n\t\t}\n\t}\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\terrc <- handler(w, r)\n\t})\n\tdefer st.Close()\n\tst.greet()\n\tgetSlash(st)\n\tif err := <-errc; err != nil {\n\t\tt.Errorf(\"First request failed: %v\", err)\n\t}\n\tif err := <-errc; err != nil {\n\t\tt.Errorf(\"Second request failed: %v\", err)\n\t}\n}\n\nfunc testServer_Push_RejectSingleRequest(t *testing.T, doPush func(http.Pusher, *http.Request) error, settings ...Setting) {\n\t// Expect one request, but might get two if there's a bug and the push succeeds.\n\terrc := make(chan error, 2)\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\terrc <- doPush(w.(http.Pusher), r)\n\t})\n\tdefer st.Close()\n\tst.greet()\n\tif err := st.fr.WriteSettings(settings...); err != nil {\n\t\tst.t.Fatalf(\"WriteSettings: %v\", err)\n\t}\n\tst.wantSettingsAck()\n\tgetSlash(st)\n\tif err := <-errc; err != nil {\n\t\tt.Error(err)\n\t}\n\t// Should not get a PUSH_PROMISE frame.\n\thf := st.wantHeaders()\n\tif !hf.StreamEnded() {\n\t\tt.Error(\"stream should end after headers\")\n\t}\n}\n\nfunc TestServer_Push_RejectIfDisabled(t *testing.T) {\n\ttestServer_Push_RejectSingleRequest(t,\n\t\tfunc(p http.Pusher, r *http.Request) error {\n\t\t\tif got, want := p.Push(\"https://\"+r.Host+\"/pushed\", nil), http.ErrNotSupported; got != want {\n\t\t\t\treturn fmt.Errorf(\"Push()=%v, want %v\", got, want)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t\tSetting{SettingEnablePush, 0})\n}\n\nfunc TestServer_Push_RejectWhenNoConcurrentStreams(t *testing.T) {\n\ttestServer_Push_RejectSingleRequest(t,\n\t\tfunc(p http.Pusher, r *http.Request) error {\n\t\t\tif got, want := p.Push(\"https://\"+r.Host+\"/pushed\", nil), ErrPushLimitReached; got != want {\n\t\t\t\treturn fmt.Errorf(\"Push()=%v, want %v\", got, want)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t\tSetting{SettingMaxConcurrentStreams, 0})\n}\n\nfunc TestServer_Push_RejectWrongScheme(t *testing.T) {\n\ttestServer_Push_RejectSingleRequest(t,\n\t\tfunc(p http.Pusher, r *http.Request) error {\n\t\t\tif err := p.Push(\"http://\"+r.Host+\"/pushed\", nil); err == nil {\n\t\t\t\treturn errors.New(\"Push() should have failed (push target URL is http)\")\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n}\n\nfunc TestServer_Push_RejectMissingHost(t *testing.T) {\n\ttestServer_Push_RejectSingleRequest(t,\n\t\tfunc(p http.Pusher, r *http.Request) error {\n\t\t\tif err := p.Push(\"https:pushed\", nil); err == nil {\n\t\t\t\treturn errors.New(\"Push() should have failed (push target URL missing host)\")\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n}\n\nfunc TestServer_Push_RejectRelativePath(t *testing.T) {\n\ttestServer_Push_RejectSingleRequest(t,\n\t\tfunc(p http.Pusher, r *http.Request) error {\n\t\t\tif err := p.Push(\"../test\", nil); err == nil {\n\t\t\t\treturn errors.New(\"Push() should have failed (push target is a relative path)\")\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n}\n\nfunc TestServer_Push_RejectForbiddenMethod(t *testing.T) {\n\ttestServer_Push_RejectSingleRequest(t,\n\t\tfunc(p http.Pusher, r *http.Request) error {\n\t\t\tif err := p.Push(\"https://\"+r.Host+\"/pushed\", &http.PushOptions{Method: \"POST\"}); err == nil {\n\t\t\t\treturn errors.New(\"Push() should have failed (cannot promise a POST)\")\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n}\n\nfunc TestServer_Push_RejectForbiddenHeader(t *testing.T) {\n\ttestServer_Push_RejectSingleRequest(t,\n\t\tfunc(p http.Pusher, r *http.Request) error {\n\t\t\theader := http.Header{\n\t\t\t\t\"Content-Length\":   {\"10\"},\n\t\t\t\t\"Content-Encoding\": {\"gzip\"},\n\t\t\t\t\"Trailer\":          {\"Foo\"},\n\t\t\t\t\"Te\":               {\"trailers\"},\n\t\t\t\t\"Host\":             {\"test.com\"},\n\t\t\t\t\":authority\":       {\"test.com\"},\n\t\t\t}\n\t\t\tif err := p.Push(\"https://\"+r.Host+\"/pushed\", &http.PushOptions{Header: header}); err == nil {\n\t\t\t\treturn errors.New(\"Push() should have failed (forbidden headers)\")\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n}\n\nfunc TestServer_Push_StateTransitions(t *testing.T) {\n\tconst body = \"foo\"\n\n\tgotPromise := make(chan bool)\n\tfinishedPush := make(chan bool)\n\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch r.URL.RequestURI() {\n\t\tcase \"/\":\n\t\t\tif err := w.(http.Pusher).Push(\"/pushed\", nil); err != nil {\n\t\t\t\tt.Errorf(\"Push error: %v\", err)\n\t\t\t}\n\t\t\t// Don't finish this request until the push finishes so we don't\n\t\t\t// nondeterministically interleave output frames with the push.\n\t\t\t<-finishedPush\n\t\tcase \"/pushed\":\n\t\t\t<-gotPromise\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"text/html\")\n\t\tw.Header().Set(\"Content-Length\", strconv.Itoa(len(body)))\n\t\tw.WriteHeader(200)\n\t\tio.WriteString(w, body)\n\t})\n\tdefer st.Close()\n\n\tst.greet()\n\tif st.stream(2) != nil {\n\t\tt.Fatal(\"stream 2 should be empty\")\n\t}\n\tif got, want := st.streamState(2), stateIdle; got != want {\n\t\tt.Fatalf(\"streamState(2)=%v, want %v\", got, want)\n\t}\n\tgetSlash(st)\n\t// After the PUSH_PROMISE is sent, the stream should be stateHalfClosedRemote.\n\tst.wantPushPromise()\n\tif got, want := st.streamState(2), stateHalfClosedRemote; got != want {\n\t\tt.Fatalf(\"streamState(2)=%v, want %v\", got, want)\n\t}\n\t// We stall the HTTP handler for \"/pushed\" until the above check. If we don't\n\t// stall the handler, then the handler might write HEADERS and DATA and finish\n\t// the stream before we check st.streamState(2) -- should that happen, we'll\n\t// see stateClosed and fail the above check.\n\tclose(gotPromise)\n\tst.wantHeaders()\n\tif df := st.wantData(); !df.StreamEnded() {\n\t\tt.Fatal(\"expected END_STREAM flag on DATA\")\n\t}\n\tif got, want := st.streamState(2), stateClosed; got != want {\n\t\tt.Fatalf(\"streamState(2)=%v, want %v\", got, want)\n\t}\n\tclose(finishedPush)\n}\n\nfunc TestServer_Push_RejectAfterGoAway(t *testing.T) {\n\tvar readyOnce sync.Once\n\tready := make(chan struct{})\n\terrc := make(chan error, 2)\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tselect {\n\t\tcase <-ready:\n\t\tcase <-time.After(5 * time.Second):\n\t\t\terrc <- fmt.Errorf(\"timeout waiting for GOAWAY to be processed\")\n\t\t}\n\t\tif got, want := w.(http.Pusher).Push(\"https://\"+r.Host+\"/pushed\", nil), http.ErrNotSupported; got != want {\n\t\t\terrc <- fmt.Errorf(\"Push()=%v, want %v\", got, want)\n\t\t}\n\t\terrc <- nil\n\t})\n\tdefer st.Close()\n\tst.greet()\n\tgetSlash(st)\n\n\t// Send GOAWAY and wait for it to be processed.\n\tst.fr.WriteGoAway(1, ErrCodeNo, nil)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ready:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\t\t\tst.sc.serveMsgCh <- func(loopNum int) {\n\t\t\t\tif !st.sc.pushEnabled {\n\t\t\t\t\treadyOnce.Do(func() { close(ready) })\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\tif err := <-errc; err != nil {\n\t\tt.Error(err)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/server_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport (\n\t\"bytes\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"os\"\n\t\"os/exec\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org/x/net/http2/hpack\"\n)\n\nvar stderrVerbose = flag.Bool(\"stderr_verbose\", false, \"Mirror verbosity to stderr, unbuffered\")\n\nfunc stderrv() io.Writer {\n\tif *stderrVerbose {\n\t\treturn os.Stderr\n\t}\n\n\treturn ioutil.Discard\n}\n\ntype serverTester struct {\n\tcc             net.Conn // client conn\n\tt              testing.TB\n\tts             *httptest.Server\n\tfr             *Framer\n\tserverLogBuf   bytes.Buffer // logger for httptest.Server\n\tlogFilter      []string     // substrings to filter out\n\tscMu           sync.Mutex   // guards sc\n\tsc             *serverConn\n\thpackDec       *hpack.Decoder\n\tdecodedHeaders [][2]string\n\n\t// If http2debug!=2, then we capture Frame debug logs that will be written\n\t// to t.Log after a test fails. The read and write logs use separate locks\n\t// and buffers so we don't accidentally introduce synchronization between\n\t// the read and write goroutines, which may hide data races.\n\tframeReadLogMu   sync.Mutex\n\tframeReadLogBuf  bytes.Buffer\n\tframeWriteLogMu  sync.Mutex\n\tframeWriteLogBuf bytes.Buffer\n\n\t// writing headers:\n\theaderBuf bytes.Buffer\n\thpackEnc  *hpack.Encoder\n}\n\nfunc init() {\n\ttestHookOnPanicMu = new(sync.Mutex)\n\tgoAwayTimeout = 25 * time.Millisecond\n}\n\nfunc resetHooks() {\n\ttestHookOnPanicMu.Lock()\n\ttestHookOnPanic = nil\n\ttestHookOnPanicMu.Unlock()\n}\n\ntype serverTesterOpt string\n\nvar optOnlyServer = serverTesterOpt(\"only_server\")\nvar optQuiet = serverTesterOpt(\"quiet_logging\")\nvar optFramerReuseFrames = serverTesterOpt(\"frame_reuse_frames\")\n\nfunc newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester {\n\tresetHooks()\n\n\tts := httptest.NewUnstartedServer(handler)\n\n\ttlsConfig := &tls.Config{\n\t\tInsecureSkipVerify: true,\n\t\tNextProtos:         []string{NextProtoTLS},\n\t}\n\n\tvar onlyServer, quiet, framerReuseFrames bool\n\th2server := new(Server)\n\tfor _, opt := range opts {\n\t\tswitch v := opt.(type) {\n\t\tcase func(*tls.Config):\n\t\t\tv(tlsConfig)\n\t\tcase func(*httptest.Server):\n\t\t\tv(ts)\n\t\tcase func(*Server):\n\t\t\tv(h2server)\n\t\tcase serverTesterOpt:\n\t\t\tswitch v {\n\t\t\tcase optOnlyServer:\n\t\t\t\tonlyServer = true\n\t\t\tcase optQuiet:\n\t\t\t\tquiet = true\n\t\t\tcase optFramerReuseFrames:\n\t\t\t\tframerReuseFrames = true\n\t\t\t}\n\t\tcase func(net.Conn, http.ConnState):\n\t\t\tts.Config.ConnState = v\n\t\tdefault:\n\t\t\tt.Fatalf(\"unknown newServerTester option type %T\", v)\n\t\t}\n\t}\n\n\tConfigureServer(ts.Config, h2server)\n\n\tst := &serverTester{\n\t\tt:  t,\n\t\tts: ts,\n\t}\n\tst.hpackEnc = hpack.NewEncoder(&st.headerBuf)\n\tst.hpackDec = hpack.NewDecoder(initialHeaderTableSize, st.onHeaderField)\n\n\tts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config\n\tif quiet {\n\t\tts.Config.ErrorLog = log.New(ioutil.Discard, \"\", 0)\n\t} else {\n\t\tts.Config.ErrorLog = log.New(io.MultiWriter(stderrv(), twriter{t: t, st: st}, &st.serverLogBuf), \"\", log.LstdFlags)\n\t}\n\tts.StartTLS()\n\n\tif VerboseLogs {\n\t\tt.Logf(\"Running test server at: %s\", ts.URL)\n\t}\n\ttestHookGetServerConn = func(v *serverConn) {\n\t\tst.scMu.Lock()\n\t\tdefer st.scMu.Unlock()\n\t\tst.sc = v\n\t}\n\tlog.SetOutput(io.MultiWriter(stderrv(), twriter{t: t, st: st}))\n\tif !onlyServer {\n\t\tcc, err := tls.Dial(\"tcp\", ts.Listener.Addr().String(), tlsConfig)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tst.cc = cc\n\t\tst.fr = NewFramer(cc, cc)\n\t\tif framerReuseFrames {\n\t\t\tst.fr.SetReuseFrames()\n\t\t}\n\t\tif !logFrameReads && !logFrameWrites {\n\t\t\tst.fr.debugReadLoggerf = func(m string, v ...interface{}) {\n\t\t\t\tm = time.Now().Format(\"2006-01-02 15:04:05.999999999 \") + strings.TrimPrefix(m, \"http2: \") + \"\\n\"\n\t\t\t\tst.frameReadLogMu.Lock()\n\t\t\t\tfmt.Fprintf(&st.frameReadLogBuf, m, v...)\n\t\t\t\tst.frameReadLogMu.Unlock()\n\t\t\t}\n\t\t\tst.fr.debugWriteLoggerf = func(m string, v ...interface{}) {\n\t\t\t\tm = time.Now().Format(\"2006-01-02 15:04:05.999999999 \") + strings.TrimPrefix(m, \"http2: \") + \"\\n\"\n\t\t\t\tst.frameWriteLogMu.Lock()\n\t\t\t\tfmt.Fprintf(&st.frameWriteLogBuf, m, v...)\n\t\t\t\tst.frameWriteLogMu.Unlock()\n\t\t\t}\n\t\t\tst.fr.logReads = true\n\t\t\tst.fr.logWrites = true\n\t\t}\n\t}\n\treturn st\n}\n\nfunc (st *serverTester) closeConn() {\n\tst.scMu.Lock()\n\tdefer st.scMu.Unlock()\n\tst.sc.conn.Close()\n}\n\nfunc (st *serverTester) addLogFilter(phrase string) {\n\tst.logFilter = append(st.logFilter, phrase)\n}\n\nfunc (st *serverTester) stream(id uint32) *stream {\n\tch := make(chan *stream, 1)\n\tst.sc.serveMsgCh <- func(int) {\n\t\tch <- st.sc.streams[id]\n\t}\n\treturn <-ch\n}\n\nfunc (st *serverTester) streamState(id uint32) streamState {\n\tch := make(chan streamState, 1)\n\tst.sc.serveMsgCh <- func(int) {\n\t\tstate, _ := st.sc.state(id)\n\t\tch <- state\n\t}\n\treturn <-ch\n}\n\n// loopNum reports how many times this conn's select loop has gone around.\nfunc (st *serverTester) loopNum() int {\n\tlastc := make(chan int, 1)\n\tst.sc.serveMsgCh <- func(loopNum int) {\n\t\tlastc <- loopNum\n\t}\n\treturn <-lastc\n}\n\n// awaitIdle heuristically awaits for the server conn's select loop to be idle.\n// The heuristic is that the server connection's serve loop must schedule\n// 50 times in a row without any channel sends or receives occurring.\nfunc (st *serverTester) awaitIdle() {\n\tremain := 50\n\tlast := st.loopNum()\n\tfor remain > 0 {\n\t\tn := st.loopNum()\n\t\tif n == last+1 {\n\t\t\tremain--\n\t\t} else {\n\t\t\tremain = 50\n\t\t}\n\t\tlast = n\n\t}\n}\n\nfunc (st *serverTester) Close() {\n\tif st.t.Failed() {\n\t\tst.frameReadLogMu.Lock()\n\t\tif st.frameReadLogBuf.Len() > 0 {\n\t\t\tst.t.Logf(\"Framer read log:\\n%s\", st.frameReadLogBuf.String())\n\t\t}\n\t\tst.frameReadLogMu.Unlock()\n\n\t\tst.frameWriteLogMu.Lock()\n\t\tif st.frameWriteLogBuf.Len() > 0 {\n\t\t\tst.t.Logf(\"Framer write log:\\n%s\", st.frameWriteLogBuf.String())\n\t\t}\n\t\tst.frameWriteLogMu.Unlock()\n\n\t\t// If we failed already (and are likely in a Fatal,\n\t\t// unwindowing), force close the connection, so the\n\t\t// httptest.Server doesn't wait forever for the conn\n\t\t// to close.\n\t\tif st.cc != nil {\n\t\t\tst.cc.Close()\n\t\t}\n\t}\n\tst.ts.Close()\n\tif st.cc != nil {\n\t\tst.cc.Close()\n\t}\n\tlog.SetOutput(os.Stderr)\n}\n\n// greet initiates the client's HTTP/2 connection into a state where\n// frames may be sent.\nfunc (st *serverTester) greet() {\n\tst.greetAndCheckSettings(func(Setting) error { return nil })\n}\n\nfunc (st *serverTester) greetAndCheckSettings(checkSetting func(s Setting) error) {\n\tst.writePreface()\n\tst.writeInitialSettings()\n\tst.wantSettings().ForeachSetting(checkSetting)\n\tst.writeSettingsAck()\n\n\t// The initial WINDOW_UPDATE and SETTINGS ACK can come in any order.\n\tvar gotSettingsAck bool\n\tvar gotWindowUpdate bool\n\n\tfor i := 0; i < 2; i++ {\n\t\tf, err := st.readFrame()\n\t\tif err != nil {\n\t\t\tst.t.Fatal(err)\n\t\t}\n\t\tswitch f := f.(type) {\n\t\tcase *SettingsFrame:\n\t\t\tif !f.Header().Flags.Has(FlagSettingsAck) {\n\t\t\t\tst.t.Fatal(\"Settings Frame didn't have ACK set\")\n\t\t\t}\n\t\t\tgotSettingsAck = true\n\n\t\tcase *WindowUpdateFrame:\n\t\t\tif f.FrameHeader.StreamID != 0 {\n\t\t\t\tst.t.Fatalf(\"WindowUpdate StreamID = %d; want 0\", f.FrameHeader.StreamID)\n\t\t\t}\n\t\t\tincr := uint32((&Server{}).initialConnRecvWindowSize() - initialWindowSize)\n\t\t\tif f.Increment != incr {\n\t\t\t\tst.t.Fatalf(\"WindowUpdate increment = %d; want %d\", f.Increment, incr)\n\t\t\t}\n\t\t\tgotWindowUpdate = true\n\n\t\tdefault:\n\t\t\tst.t.Fatalf(\"Wanting a settings ACK or window update, received a %T\", f)\n\t\t}\n\t}\n\n\tif !gotSettingsAck {\n\t\tst.t.Fatalf(\"Didn't get a settings ACK\")\n\t}\n\tif !gotWindowUpdate {\n\t\tst.t.Fatalf(\"Didn't get a window update\")\n\t}\n}\n\nfunc (st *serverTester) writePreface() {\n\tn, err := st.cc.Write(clientPreface)\n\tif err != nil {\n\t\tst.t.Fatalf(\"Error writing client preface: %v\", err)\n\t}\n\tif n != len(clientPreface) {\n\t\tst.t.Fatalf(\"Writing client preface, wrote %d bytes; want %d\", n, len(clientPreface))\n\t}\n}\n\nfunc (st *serverTester) writeInitialSettings() {\n\tif err := st.fr.WriteSettings(); err != nil {\n\t\tst.t.Fatalf(\"Error writing initial SETTINGS frame from client to server: %v\", err)\n\t}\n}\n\nfunc (st *serverTester) writeSettingsAck() {\n\tif err := st.fr.WriteSettingsAck(); err != nil {\n\t\tst.t.Fatalf(\"Error writing ACK of server's SETTINGS: %v\", err)\n\t}\n}\n\nfunc (st *serverTester) writeHeaders(p HeadersFrameParam) {\n\tif err := st.fr.WriteHeaders(p); err != nil {\n\t\tst.t.Fatalf(\"Error writing HEADERS: %v\", err)\n\t}\n}\n\nfunc (st *serverTester) writePriority(id uint32, p PriorityParam) {\n\tif err := st.fr.WritePriority(id, p); err != nil {\n\t\tst.t.Fatalf(\"Error writing PRIORITY: %v\", err)\n\t}\n}\n\nfunc (st *serverTester) encodeHeaderField(k, v string) {\n\terr := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v})\n\tif err != nil {\n\t\tst.t.Fatalf(\"HPACK encoding error for %q/%q: %v\", k, v, err)\n\t}\n}\n\n// encodeHeaderRaw is the magic-free version of encodeHeader.\n// It takes 0 or more (k, v) pairs and encodes them.\nfunc (st *serverTester) encodeHeaderRaw(headers ...string) []byte {\n\tif len(headers)%2 == 1 {\n\t\tpanic(\"odd number of kv args\")\n\t}\n\tst.headerBuf.Reset()\n\tfor len(headers) > 0 {\n\t\tk, v := headers[0], headers[1]\n\t\tst.encodeHeaderField(k, v)\n\t\theaders = headers[2:]\n\t}\n\treturn st.headerBuf.Bytes()\n}\n\n// encodeHeader encodes headers and returns their HPACK bytes. headers\n// must contain an even number of key/value pairs. There may be\n// multiple pairs for keys (e.g. \"cookie\").  The :method, :path, and\n// :scheme headers default to GET, / and https. The :authority header\n// defaults to st.ts.Listener.Addr().\nfunc (st *serverTester) encodeHeader(headers ...string) []byte {\n\tif len(headers)%2 == 1 {\n\t\tpanic(\"odd number of kv args\")\n\t}\n\n\tst.headerBuf.Reset()\n\tdefaultAuthority := st.ts.Listener.Addr().String()\n\n\tif len(headers) == 0 {\n\t\t// Fast path, mostly for benchmarks, so test code doesn't pollute\n\t\t// profiles when we're looking to improve server allocations.\n\t\tst.encodeHeaderField(\":method\", \"GET\")\n\t\tst.encodeHeaderField(\":scheme\", \"https\")\n\t\tst.encodeHeaderField(\":authority\", defaultAuthority)\n\t\tst.encodeHeaderField(\":path\", \"/\")\n\t\treturn st.headerBuf.Bytes()\n\t}\n\n\tif len(headers) == 2 && headers[0] == \":method\" {\n\t\t// Another fast path for benchmarks.\n\t\tst.encodeHeaderField(\":method\", headers[1])\n\t\tst.encodeHeaderField(\":scheme\", \"https\")\n\t\tst.encodeHeaderField(\":authority\", defaultAuthority)\n\t\tst.encodeHeaderField(\":path\", \"/\")\n\t\treturn st.headerBuf.Bytes()\n\t}\n\n\tpseudoCount := map[string]int{}\n\tkeys := []string{\":method\", \":scheme\", \":authority\", \":path\"}\n\tvals := map[string][]string{\n\t\t\":method\":    {\"GET\"},\n\t\t\":scheme\":    {\"https\"},\n\t\t\":authority\": {defaultAuthority},\n\t\t\":path\":      {\"/\"},\n\t}\n\tfor len(headers) > 0 {\n\t\tk, v := headers[0], headers[1]\n\t\theaders = headers[2:]\n\t\tif _, ok := vals[k]; !ok {\n\t\t\tkeys = append(keys, k)\n\t\t}\n\t\tif strings.HasPrefix(k, \":\") {\n\t\t\tpseudoCount[k]++\n\t\t\tif pseudoCount[k] == 1 {\n\t\t\t\tvals[k] = []string{v}\n\t\t\t} else {\n\t\t\t\t// Allows testing of invalid headers w/ dup pseudo fields.\n\t\t\t\tvals[k] = append(vals[k], v)\n\t\t\t}\n\t\t} else {\n\t\t\tvals[k] = append(vals[k], v)\n\t\t}\n\t}\n\tfor _, k := range keys {\n\t\tfor _, v := range vals[k] {\n\t\t\tst.encodeHeaderField(k, v)\n\t\t}\n\t}\n\treturn st.headerBuf.Bytes()\n}\n\n// bodylessReq1 writes a HEADERS frames with StreamID 1 and EndStream and EndHeaders set.\nfunc (st *serverTester) bodylessReq1(headers ...string) {\n\tst.writeHeaders(HeadersFrameParam{\n\t\tStreamID:      1, // clients send odd numbers\n\t\tBlockFragment: st.encodeHeader(headers...),\n\t\tEndStream:     true,\n\t\tEndHeaders:    true,\n\t})\n}\n\nfunc (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) {\n\tif err := st.fr.WriteData(streamID, endStream, data); err != nil {\n\t\tst.t.Fatalf(\"Error writing DATA: %v\", err)\n\t}\n}\n\nfunc (st *serverTester) writeDataPadded(streamID uint32, endStream bool, data, pad []byte) {\n\tif err := st.fr.WriteDataPadded(streamID, endStream, data, pad); err != nil {\n\t\tst.t.Fatalf(\"Error writing DATA: %v\", err)\n\t}\n}\n\nfunc readFrameTimeout(fr *Framer, wait time.Duration) (Frame, error) {\n\tch := make(chan interface{}, 1)\n\tgo func() {\n\t\tfr, err := fr.ReadFrame()\n\t\tif err != nil {\n\t\t\tch <- err\n\t\t} else {\n\t\t\tch <- fr\n\t\t}\n\t}()\n\tt := time.NewTimer(wait)\n\tselect {\n\tcase v := <-ch:\n\t\tt.Stop()\n\t\tif fr, ok := v.(Frame); ok {\n\t\t\treturn fr, nil\n\t\t}\n\t\treturn nil, v.(error)\n\tcase <-t.C:\n\t\treturn nil, errors.New(\"timeout waiting for frame\")\n\t}\n}\n\nfunc (st *serverTester) readFrame() (Frame, error) {\n\treturn readFrameTimeout(st.fr, 2*time.Second)\n}\n\nfunc (st *serverTester) wantHeaders() *HeadersFrame {\n\tf, err := st.readFrame()\n\tif err != nil {\n\t\tst.t.Fatalf(\"Error while expecting a HEADERS frame: %v\", err)\n\t}\n\thf, ok := f.(*HeadersFrame)\n\tif !ok {\n\t\tst.t.Fatalf(\"got a %T; want *HeadersFrame\", f)\n\t}\n\treturn hf\n}\n\nfunc (st *serverTester) wantContinuation() *ContinuationFrame {\n\tf, err := st.readFrame()\n\tif err != nil {\n\t\tst.t.Fatalf(\"Error while expecting a CONTINUATION frame: %v\", err)\n\t}\n\tcf, ok := f.(*ContinuationFrame)\n\tif !ok {\n\t\tst.t.Fatalf(\"got a %T; want *ContinuationFrame\", f)\n\t}\n\treturn cf\n}\n\nfunc (st *serverTester) wantData() *DataFrame {\n\tf, err := st.readFrame()\n\tif err != nil {\n\t\tst.t.Fatalf(\"Error while expecting a DATA frame: %v\", err)\n\t}\n\tdf, ok := f.(*DataFrame)\n\tif !ok {\n\t\tst.t.Fatalf(\"got a %T; want *DataFrame\", f)\n\t}\n\treturn df\n}\n\nfunc (st *serverTester) wantSettings() *SettingsFrame {\n\tf, err := st.readFrame()\n\tif err != nil {\n\t\tst.t.Fatalf(\"Error while expecting a SETTINGS frame: %v\", err)\n\t}\n\tsf, ok := f.(*SettingsFrame)\n\tif !ok {\n\t\tst.t.Fatalf(\"got a %T; want *SettingsFrame\", f)\n\t}\n\treturn sf\n}\n\nfunc (st *serverTester) wantPing() *PingFrame {\n\tf, err := st.readFrame()\n\tif err != nil {\n\t\tst.t.Fatalf(\"Error while expecting a PING frame: %v\", err)\n\t}\n\tpf, ok := f.(*PingFrame)\n\tif !ok {\n\t\tst.t.Fatalf(\"got a %T; want *PingFrame\", f)\n\t}\n\treturn pf\n}\n\nfunc (st *serverTester) wantGoAway() *GoAwayFrame {\n\tf, err := st.readFrame()\n\tif err != nil {\n\t\tst.t.Fatalf(\"Error while expecting a GOAWAY frame: %v\", err)\n\t}\n\tgf, ok := f.(*GoAwayFrame)\n\tif !ok {\n\t\tst.t.Fatalf(\"got a %T; want *GoAwayFrame\", f)\n\t}\n\treturn gf\n}\n\nfunc (st *serverTester) wantRSTStream(streamID uint32, errCode ErrCode) {\n\tf, err := st.readFrame()\n\tif err != nil {\n\t\tst.t.Fatalf(\"Error while expecting an RSTStream frame: %v\", err)\n\t}\n\trs, ok := f.(*RSTStreamFrame)\n\tif !ok {\n\t\tst.t.Fatalf(\"got a %T; want *RSTStreamFrame\", f)\n\t}\n\tif rs.FrameHeader.StreamID != streamID {\n\t\tst.t.Fatalf(\"RSTStream StreamID = %d; want %d\", rs.FrameHeader.StreamID, streamID)\n\t}\n\tif rs.ErrCode != errCode {\n\t\tst.t.Fatalf(\"RSTStream ErrCode = %d (%s); want %d (%s)\", rs.ErrCode, rs.ErrCode, errCode, errCode)\n\t}\n}\n\nfunc (st *serverTester) wantWindowUpdate(streamID, incr uint32) {\n\tf, err := st.readFrame()\n\tif err != nil {\n\t\tst.t.Fatalf(\"Error while expecting a WINDOW_UPDATE frame: %v\", err)\n\t}\n\twu, ok := f.(*WindowUpdateFrame)\n\tif !ok {\n\t\tst.t.Fatalf(\"got a %T; want *WindowUpdateFrame\", f)\n\t}\n\tif wu.FrameHeader.StreamID != streamID {\n\t\tst.t.Fatalf(\"WindowUpdate StreamID = %d; want %d\", wu.FrameHeader.StreamID, streamID)\n\t}\n\tif wu.Increment != incr {\n\t\tst.t.Fatalf(\"WindowUpdate increment = %d; want %d\", wu.Increment, incr)\n\t}\n}\n\nfunc (st *serverTester) wantSettingsAck() {\n\tf, err := st.readFrame()\n\tif err != nil {\n\t\tst.t.Fatal(err)\n\t}\n\tsf, ok := f.(*SettingsFrame)\n\tif !ok {\n\t\tst.t.Fatalf(\"Wanting a settings ACK, received a %T\", f)\n\t}\n\tif !sf.Header().Flags.Has(FlagSettingsAck) {\n\t\tst.t.Fatal(\"Settings Frame didn't have ACK set\")\n\t}\n}\n\nfunc (st *serverTester) wantPushPromise() *PushPromiseFrame {\n\tf, err := st.readFrame()\n\tif err != nil {\n\t\tst.t.Fatal(err)\n\t}\n\tppf, ok := f.(*PushPromiseFrame)\n\tif !ok {\n\t\tst.t.Fatalf(\"Wanted PushPromise, received %T\", ppf)\n\t}\n\treturn ppf\n}\n\nfunc TestServer(t *testing.T) {\n\tgotReq := make(chan bool, 1)\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Foo\", \"Bar\")\n\t\tgotReq <- true\n\t})\n\tdefer st.Close()\n\n\tcovers(\"3.5\", `\n\t\tThe server connection preface consists of a potentially empty\n\t\tSETTINGS frame ([SETTINGS]) that MUST be the first frame the\n\t\tserver sends in the HTTP/2 connection.\n\t`)\n\n\tst.greet()\n\tst.writeHeaders(HeadersFrameParam{\n\t\tStreamID:      1, // clients send odd numbers\n\t\tBlockFragment: st.encodeHeader(),\n\t\tEndStream:     true, // no DATA frames\n\t\tEndHeaders:    true,\n\t})\n\n\tselect {\n\tcase <-gotReq:\n\tcase <-time.After(2 * time.Second):\n\t\tt.Error(\"timeout waiting for request\")\n\t}\n}\n\nfunc TestServer_Request_Get(t *testing.T) {\n\ttestServerRequest(t, func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      1, // clients send odd numbers\n\t\t\tBlockFragment: st.encodeHeader(\"foo-bar\", \"some-value\"),\n\t\t\tEndStream:     true, // no DATA frames\n\t\t\tEndHeaders:    true,\n\t\t})\n\t}, func(r *http.Request) {\n\t\tif r.Method != \"GET\" {\n\t\t\tt.Errorf(\"Method = %q; want GET\", r.Method)\n\t\t}\n\t\tif r.URL.Path != \"/\" {\n\t\t\tt.Errorf(\"URL.Path = %q; want /\", r.URL.Path)\n\t\t}\n\t\tif r.ContentLength != 0 {\n\t\t\tt.Errorf(\"ContentLength = %v; want 0\", r.ContentLength)\n\t\t}\n\t\tif r.Close {\n\t\t\tt.Error(\"Close = true; want false\")\n\t\t}\n\t\tif !strings.Contains(r.RemoteAddr, \":\") {\n\t\t\tt.Errorf(\"RemoteAddr = %q; want something with a colon\", r.RemoteAddr)\n\t\t}\n\t\tif r.Proto != \"HTTP/2.0\" || r.ProtoMajor != 2 || r.ProtoMinor != 0 {\n\t\t\tt.Errorf(\"Proto = %q Major=%v,Minor=%v; want HTTP/2.0\", r.Proto, r.ProtoMajor, r.ProtoMinor)\n\t\t}\n\t\twantHeader := http.Header{\n\t\t\t\"Foo-Bar\": []string{\"some-value\"},\n\t\t}\n\t\tif !reflect.DeepEqual(r.Header, wantHeader) {\n\t\t\tt.Errorf(\"Header = %#v; want %#v\", r.Header, wantHeader)\n\t\t}\n\t\tif n, err := r.Body.Read([]byte(\" \")); err != io.EOF || n != 0 {\n\t\t\tt.Errorf(\"Read = %d, %v; want 0, EOF\", n, err)\n\t\t}\n\t})\n}\n\nfunc TestServer_Request_Get_PathSlashes(t *testing.T) {\n\ttestServerRequest(t, func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      1, // clients send odd numbers\n\t\t\tBlockFragment: st.encodeHeader(\":path\", \"/%2f/\"),\n\t\t\tEndStream:     true, // no DATA frames\n\t\t\tEndHeaders:    true,\n\t\t})\n\t}, func(r *http.Request) {\n\t\tif r.RequestURI != \"/%2f/\" {\n\t\t\tt.Errorf(\"RequestURI = %q; want /%%2f/\", r.RequestURI)\n\t\t}\n\t\tif r.URL.Path != \"///\" {\n\t\t\tt.Errorf(\"URL.Path = %q; want ///\", r.URL.Path)\n\t\t}\n\t})\n}\n\n// TODO: add a test with EndStream=true on the HEADERS but setting a\n// Content-Length anyway. Should we just omit it and force it to\n// zero?\n\nfunc TestServer_Request_Post_NoContentLength_EndStream(t *testing.T) {\n\ttestServerRequest(t, func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      1, // clients send odd numbers\n\t\t\tBlockFragment: st.encodeHeader(\":method\", \"POST\"),\n\t\t\tEndStream:     true,\n\t\t\tEndHeaders:    true,\n\t\t})\n\t}, func(r *http.Request) {\n\t\tif r.Method != \"POST\" {\n\t\t\tt.Errorf(\"Method = %q; want POST\", r.Method)\n\t\t}\n\t\tif r.ContentLength != 0 {\n\t\t\tt.Errorf(\"ContentLength = %v; want 0\", r.ContentLength)\n\t\t}\n\t\tif n, err := r.Body.Read([]byte(\" \")); err != io.EOF || n != 0 {\n\t\t\tt.Errorf(\"Read = %d, %v; want 0, EOF\", n, err)\n\t\t}\n\t})\n}\n\nfunc TestServer_Request_Post_Body_ImmediateEOF(t *testing.T) {\n\ttestBodyContents(t, -1, \"\", func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      1, // clients send odd numbers\n\t\t\tBlockFragment: st.encodeHeader(\":method\", \"POST\"),\n\t\t\tEndStream:     false, // to say DATA frames are coming\n\t\t\tEndHeaders:    true,\n\t\t})\n\t\tst.writeData(1, true, nil) // just kidding. empty body.\n\t})\n}\n\nfunc TestServer_Request_Post_Body_OneData(t *testing.T) {\n\tconst content = \"Some content\"\n\ttestBodyContents(t, -1, content, func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      1, // clients send odd numbers\n\t\t\tBlockFragment: st.encodeHeader(\":method\", \"POST\"),\n\t\t\tEndStream:     false, // to say DATA frames are coming\n\t\t\tEndHeaders:    true,\n\t\t})\n\t\tst.writeData(1, true, []byte(content))\n\t})\n}\n\nfunc TestServer_Request_Post_Body_TwoData(t *testing.T) {\n\tconst content = \"Some content\"\n\ttestBodyContents(t, -1, content, func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      1, // clients send odd numbers\n\t\t\tBlockFragment: st.encodeHeader(\":method\", \"POST\"),\n\t\t\tEndStream:     false, // to say DATA frames are coming\n\t\t\tEndHeaders:    true,\n\t\t})\n\t\tst.writeData(1, false, []byte(content[:5]))\n\t\tst.writeData(1, true, []byte(content[5:]))\n\t})\n}\n\nfunc TestServer_Request_Post_Body_ContentLength_Correct(t *testing.T) {\n\tconst content = \"Some content\"\n\ttestBodyContents(t, int64(len(content)), content, func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID: 1, // clients send odd numbers\n\t\t\tBlockFragment: st.encodeHeader(\n\t\t\t\t\":method\", \"POST\",\n\t\t\t\t\"content-length\", strconv.Itoa(len(content)),\n\t\t\t),\n\t\t\tEndStream:  false, // to say DATA frames are coming\n\t\t\tEndHeaders: true,\n\t\t})\n\t\tst.writeData(1, true, []byte(content))\n\t})\n}\n\nfunc TestServer_Request_Post_Body_ContentLength_TooLarge(t *testing.T) {\n\ttestBodyContentsFail(t, 3, \"request declared a Content-Length of 3 but only wrote 2 bytes\",\n\t\tfunc(st *serverTester) {\n\t\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\t\tStreamID: 1, // clients send odd numbers\n\t\t\t\tBlockFragment: st.encodeHeader(\n\t\t\t\t\t\":method\", \"POST\",\n\t\t\t\t\t\"content-length\", \"3\",\n\t\t\t\t),\n\t\t\t\tEndStream:  false, // to say DATA frames are coming\n\t\t\t\tEndHeaders: true,\n\t\t\t})\n\t\t\tst.writeData(1, true, []byte(\"12\"))\n\t\t})\n}\n\nfunc TestServer_Request_Post_Body_ContentLength_TooSmall(t *testing.T) {\n\ttestBodyContentsFail(t, 4, \"sender tried to send more than declared Content-Length of 4 bytes\",\n\t\tfunc(st *serverTester) {\n\t\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\t\tStreamID: 1, // clients send odd numbers\n\t\t\t\tBlockFragment: st.encodeHeader(\n\t\t\t\t\t\":method\", \"POST\",\n\t\t\t\t\t\"content-length\", \"4\",\n\t\t\t\t),\n\t\t\t\tEndStream:  false, // to say DATA frames are coming\n\t\t\t\tEndHeaders: true,\n\t\t\t})\n\t\t\tst.writeData(1, true, []byte(\"12345\"))\n\t\t})\n}\n\nfunc testBodyContents(t *testing.T, wantContentLength int64, wantBody string, write func(st *serverTester)) {\n\ttestServerRequest(t, write, func(r *http.Request) {\n\t\tif r.Method != \"POST\" {\n\t\t\tt.Errorf(\"Method = %q; want POST\", r.Method)\n\t\t}\n\t\tif r.ContentLength != wantContentLength {\n\t\t\tt.Errorf(\"ContentLength = %v; want %d\", r.ContentLength, wantContentLength)\n\t\t}\n\t\tall, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif string(all) != wantBody {\n\t\t\tt.Errorf(\"Read = %q; want %q\", all, wantBody)\n\t\t}\n\t\tif err := r.Body.Close(); err != nil {\n\t\t\tt.Fatalf(\"Close: %v\", err)\n\t\t}\n\t})\n}\n\nfunc testBodyContentsFail(t *testing.T, wantContentLength int64, wantReadError string, write func(st *serverTester)) {\n\ttestServerRequest(t, write, func(r *http.Request) {\n\t\tif r.Method != \"POST\" {\n\t\t\tt.Errorf(\"Method = %q; want POST\", r.Method)\n\t\t}\n\t\tif r.ContentLength != wantContentLength {\n\t\t\tt.Errorf(\"ContentLength = %v; want %d\", r.ContentLength, wantContentLength)\n\t\t}\n\t\tall, err := ioutil.ReadAll(r.Body)\n\t\tif err == nil {\n\t\t\tt.Fatalf(\"expected an error (%q) reading from the body. Successfully read %q instead.\",\n\t\t\t\twantReadError, all)\n\t\t}\n\t\tif !strings.Contains(err.Error(), wantReadError) {\n\t\t\tt.Fatalf(\"Body.Read = %v; want substring %q\", err, wantReadError)\n\t\t}\n\t\tif err := r.Body.Close(); err != nil {\n\t\t\tt.Fatalf(\"Close: %v\", err)\n\t\t}\n\t})\n}\n\n// Using a Host header, instead of :authority\nfunc TestServer_Request_Get_Host(t *testing.T) {\n\tconst host = \"example.com\"\n\ttestServerRequest(t, func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      1, // clients send odd numbers\n\t\t\tBlockFragment: st.encodeHeader(\":authority\", \"\", \"host\", host),\n\t\t\tEndStream:     true,\n\t\t\tEndHeaders:    true,\n\t\t})\n\t}, func(r *http.Request) {\n\t\tif r.Host != host {\n\t\t\tt.Errorf(\"Host = %q; want %q\", r.Host, host)\n\t\t}\n\t})\n}\n\n// Using an :authority pseudo-header, instead of Host\nfunc TestServer_Request_Get_Authority(t *testing.T) {\n\tconst host = \"example.com\"\n\ttestServerRequest(t, func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      1, // clients send odd numbers\n\t\t\tBlockFragment: st.encodeHeader(\":authority\", host),\n\t\t\tEndStream:     true,\n\t\t\tEndHeaders:    true,\n\t\t})\n\t}, func(r *http.Request) {\n\t\tif r.Host != host {\n\t\t\tt.Errorf(\"Host = %q; want %q\", r.Host, host)\n\t\t}\n\t})\n}\n\nfunc TestServer_Request_WithContinuation(t *testing.T) {\n\twantHeader := http.Header{\n\t\t\"Foo-One\":   []string{\"value-one\"},\n\t\t\"Foo-Two\":   []string{\"value-two\"},\n\t\t\"Foo-Three\": []string{\"value-three\"},\n\t}\n\ttestServerRequest(t, func(st *serverTester) {\n\t\tfullHeaders := st.encodeHeader(\n\t\t\t\"foo-one\", \"value-one\",\n\t\t\t\"foo-two\", \"value-two\",\n\t\t\t\"foo-three\", \"value-three\",\n\t\t)\n\t\tremain := fullHeaders\n\t\tchunks := 0\n\t\tfor len(remain) > 0 {\n\t\t\tconst maxChunkSize = 5\n\t\t\tchunk := remain\n\t\t\tif len(chunk) > maxChunkSize {\n\t\t\t\tchunk = chunk[:maxChunkSize]\n\t\t\t}\n\t\t\tremain = remain[len(chunk):]\n\n\t\t\tif chunks == 0 {\n\t\t\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\t\t\tStreamID:      1, // clients send odd numbers\n\t\t\t\t\tBlockFragment: chunk,\n\t\t\t\t\tEndStream:     true,  // no DATA frames\n\t\t\t\t\tEndHeaders:    false, // we'll have continuation frames\n\t\t\t\t})\n\t\t\t} else {\n\t\t\t\terr := st.fr.WriteContinuation(1, len(remain) == 0, chunk)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tchunks++\n\t\t}\n\t\tif chunks < 2 {\n\t\t\tt.Fatal(\"too few chunks\")\n\t\t}\n\t}, func(r *http.Request) {\n\t\tif !reflect.DeepEqual(r.Header, wantHeader) {\n\t\t\tt.Errorf(\"Header = %#v; want %#v\", r.Header, wantHeader)\n\t\t}\n\t})\n}\n\n// Concatenated cookie headers. (\"8.1.2.5 Compressing the Cookie Header Field\")\nfunc TestServer_Request_CookieConcat(t *testing.T) {\n\tconst host = \"example.com\"\n\ttestServerRequest(t, func(st *serverTester) {\n\t\tst.bodylessReq1(\n\t\t\t\":authority\", host,\n\t\t\t\"cookie\", \"a=b\",\n\t\t\t\"cookie\", \"c=d\",\n\t\t\t\"cookie\", \"e=f\",\n\t\t)\n\t}, func(r *http.Request) {\n\t\tconst want = \"a=b; c=d; e=f\"\n\t\tif got := r.Header.Get(\"Cookie\"); got != want {\n\t\t\tt.Errorf(\"Cookie = %q; want %q\", got, want)\n\t\t}\n\t})\n}\n\nfunc TestServer_Request_Reject_CapitalHeader(t *testing.T) {\n\ttestRejectRequest(t, func(st *serverTester) { st.bodylessReq1(\"UPPER\", \"v\") })\n}\n\nfunc TestServer_Request_Reject_HeaderFieldNameColon(t *testing.T) {\n\ttestRejectRequest(t, func(st *serverTester) { st.bodylessReq1(\"has:colon\", \"v\") })\n}\n\nfunc TestServer_Request_Reject_HeaderFieldNameNULL(t *testing.T) {\n\ttestRejectRequest(t, func(st *serverTester) { st.bodylessReq1(\"has\\x00null\", \"v\") })\n}\n\nfunc TestServer_Request_Reject_HeaderFieldNameEmpty(t *testing.T) {\n\ttestRejectRequest(t, func(st *serverTester) { st.bodylessReq1(\"\", \"v\") })\n}\n\nfunc TestServer_Request_Reject_HeaderFieldValueNewline(t *testing.T) {\n\ttestRejectRequest(t, func(st *serverTester) { st.bodylessReq1(\"foo\", \"has\\nnewline\") })\n}\n\nfunc TestServer_Request_Reject_HeaderFieldValueCR(t *testing.T) {\n\ttestRejectRequest(t, func(st *serverTester) { st.bodylessReq1(\"foo\", \"has\\rcarriage\") })\n}\n\nfunc TestServer_Request_Reject_HeaderFieldValueDEL(t *testing.T) {\n\ttestRejectRequest(t, func(st *serverTester) { st.bodylessReq1(\"foo\", \"has\\x7fdel\") })\n}\n\nfunc TestServer_Request_Reject_Pseudo_Missing_method(t *testing.T) {\n\ttestRejectRequest(t, func(st *serverTester) { st.bodylessReq1(\":method\", \"\") })\n}\n\nfunc TestServer_Request_Reject_Pseudo_ExactlyOne(t *testing.T) {\n\t// 8.1.2.3 Request Pseudo-Header Fields\n\t// \"All HTTP/2 requests MUST include exactly one valid value\" ...\n\ttestRejectRequest(t, func(st *serverTester) {\n\t\tst.addLogFilter(\"duplicate pseudo-header\")\n\t\tst.bodylessReq1(\":method\", \"GET\", \":method\", \"POST\")\n\t})\n}\n\nfunc TestServer_Request_Reject_Pseudo_AfterRegular(t *testing.T) {\n\t// 8.1.2.3 Request Pseudo-Header Fields\n\t// \"All pseudo-header fields MUST appear in the header block\n\t// before regular header fields. Any request or response that\n\t// contains a pseudo-header field that appears in a header\n\t// block after a regular header field MUST be treated as\n\t// malformed (Section 8.1.2.6).\"\n\ttestRejectRequest(t, func(st *serverTester) {\n\t\tst.addLogFilter(\"pseudo-header after regular header\")\n\t\tvar buf bytes.Buffer\n\t\tenc := hpack.NewEncoder(&buf)\n\t\tenc.WriteField(hpack.HeaderField{Name: \":method\", Value: \"GET\"})\n\t\tenc.WriteField(hpack.HeaderField{Name: \"regular\", Value: \"foobar\"})\n\t\tenc.WriteField(hpack.HeaderField{Name: \":path\", Value: \"/\"})\n\t\tenc.WriteField(hpack.HeaderField{Name: \":scheme\", Value: \"https\"})\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      1, // clients send odd numbers\n\t\t\tBlockFragment: buf.Bytes(),\n\t\t\tEndStream:     true,\n\t\t\tEndHeaders:    true,\n\t\t})\n\t})\n}\n\nfunc TestServer_Request_Reject_Pseudo_Missing_path(t *testing.T) {\n\ttestRejectRequest(t, func(st *serverTester) { st.bodylessReq1(\":path\", \"\") })\n}\n\nfunc TestServer_Request_Reject_Pseudo_Missing_scheme(t *testing.T) {\n\ttestRejectRequest(t, func(st *serverTester) { st.bodylessReq1(\":scheme\", \"\") })\n}\n\nfunc TestServer_Request_Reject_Pseudo_scheme_invalid(t *testing.T) {\n\ttestRejectRequest(t, func(st *serverTester) { st.bodylessReq1(\":scheme\", \"bogus\") })\n}\n\nfunc TestServer_Request_Reject_Pseudo_Unknown(t *testing.T) {\n\ttestRejectRequest(t, func(st *serverTester) {\n\t\tst.addLogFilter(`invalid pseudo-header \":unknown_thing\"`)\n\t\tst.bodylessReq1(\":unknown_thing\", \"\")\n\t})\n}\n\nfunc testRejectRequest(t *testing.T, send func(*serverTester)) {\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tt.Error(\"server request made it to handler; should've been rejected\")\n\t})\n\tdefer st.Close()\n\n\tst.greet()\n\tsend(st)\n\tst.wantRSTStream(1, ErrCodeProtocol)\n}\n\nfunc testRejectRequestWithProtocolError(t *testing.T, send func(*serverTester)) {\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tt.Error(\"server request made it to handler; should've been rejected\")\n\t}, optQuiet)\n\tdefer st.Close()\n\n\tst.greet()\n\tsend(st)\n\tgf := st.wantGoAway()\n\tif gf.ErrCode != ErrCodeProtocol {\n\t\tt.Errorf(\"err code = %v; want %v\", gf.ErrCode, ErrCodeProtocol)\n\t}\n}\n\n// Section 5.1, on idle connections: \"Receiving any frame other than\n// HEADERS or PRIORITY on a stream in this state MUST be treated as a\n// connection error (Section 5.4.1) of type PROTOCOL_ERROR.\"\nfunc TestRejectFrameOnIdle_WindowUpdate(t *testing.T) {\n\ttestRejectRequestWithProtocolError(t, func(st *serverTester) {\n\t\tst.fr.WriteWindowUpdate(123, 456)\n\t})\n}\nfunc TestRejectFrameOnIdle_Data(t *testing.T) {\n\ttestRejectRequestWithProtocolError(t, func(st *serverTester) {\n\t\tst.fr.WriteData(123, true, nil)\n\t})\n}\nfunc TestRejectFrameOnIdle_RSTStream(t *testing.T) {\n\ttestRejectRequestWithProtocolError(t, func(st *serverTester) {\n\t\tst.fr.WriteRSTStream(123, ErrCodeCancel)\n\t})\n}\n\nfunc TestServer_Request_Connect(t *testing.T) {\n\ttestServerRequest(t, func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID: 1,\n\t\t\tBlockFragment: st.encodeHeaderRaw(\n\t\t\t\t\":method\", \"CONNECT\",\n\t\t\t\t\":authority\", \"example.com:123\",\n\t\t\t),\n\t\t\tEndStream:  true,\n\t\t\tEndHeaders: true,\n\t\t})\n\t}, func(r *http.Request) {\n\t\tif g, w := r.Method, \"CONNECT\"; g != w {\n\t\t\tt.Errorf(\"Method = %q; want %q\", g, w)\n\t\t}\n\t\tif g, w := r.RequestURI, \"example.com:123\"; g != w {\n\t\t\tt.Errorf(\"RequestURI = %q; want %q\", g, w)\n\t\t}\n\t\tif g, w := r.URL.Host, \"example.com:123\"; g != w {\n\t\t\tt.Errorf(\"URL.Host = %q; want %q\", g, w)\n\t\t}\n\t})\n}\n\nfunc TestServer_Request_Connect_InvalidPath(t *testing.T) {\n\ttestServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID: 1,\n\t\t\tBlockFragment: st.encodeHeaderRaw(\n\t\t\t\t\":method\", \"CONNECT\",\n\t\t\t\t\":authority\", \"example.com:123\",\n\t\t\t\t\":path\", \"/bogus\",\n\t\t\t),\n\t\t\tEndStream:  true,\n\t\t\tEndHeaders: true,\n\t\t})\n\t})\n}\n\nfunc TestServer_Request_Connect_InvalidScheme(t *testing.T) {\n\ttestServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID: 1,\n\t\t\tBlockFragment: st.encodeHeaderRaw(\n\t\t\t\t\":method\", \"CONNECT\",\n\t\t\t\t\":authority\", \"example.com:123\",\n\t\t\t\t\":scheme\", \"https\",\n\t\t\t),\n\t\t\tEndStream:  true,\n\t\t\tEndHeaders: true,\n\t\t})\n\t})\n}\n\nfunc TestServer_Ping(t *testing.T) {\n\tst := newServerTester(t, nil)\n\tdefer st.Close()\n\tst.greet()\n\n\t// Server should ignore this one, since it has ACK set.\n\tackPingData := [8]byte{1, 2, 4, 8, 16, 32, 64, 128}\n\tif err := st.fr.WritePing(true, ackPingData); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// But the server should reply to this one, since ACK is false.\n\tpingData := [8]byte{1, 2, 3, 4, 5, 6, 7, 8}\n\tif err := st.fr.WritePing(false, pingData); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpf := st.wantPing()\n\tif !pf.Flags.Has(FlagPingAck) {\n\t\tt.Error(\"response ping doesn't have ACK set\")\n\t}\n\tif pf.Data != pingData {\n\t\tt.Errorf(\"response ping has data %q; want %q\", pf.Data, pingData)\n\t}\n}\n\nfunc TestServer_RejectsLargeFrames(t *testing.T) {\n\tif runtime.GOOS == \"windows\" {\n\t\tt.Skip(\"see golang.org/issue/13434\")\n\t}\n\n\tst := newServerTester(t, nil)\n\tdefer st.Close()\n\tst.greet()\n\n\t// Write too large of a frame (too large by one byte)\n\t// We ignore the return value because it's expected that the server\n\t// will only read the first 9 bytes (the headre) and then disconnect.\n\tst.fr.WriteRawFrame(0xff, 0, 0, make([]byte, defaultMaxReadFrameSize+1))\n\n\tgf := st.wantGoAway()\n\tif gf.ErrCode != ErrCodeFrameSize {\n\t\tt.Errorf(\"GOAWAY err = %v; want %v\", gf.ErrCode, ErrCodeFrameSize)\n\t}\n\tif st.serverLogBuf.Len() != 0 {\n\t\t// Previously we spun here for a bit until the GOAWAY disconnect\n\t\t// timer fired, logging while we fired.\n\t\tt.Errorf(\"unexpected server output: %.500s\\n\", st.serverLogBuf.Bytes())\n\t}\n}\n\nfunc TestServer_Handler_Sends_WindowUpdate(t *testing.T) {\n\tpuppet := newHandlerPuppet()\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tpuppet.act(w, r)\n\t})\n\tdefer st.Close()\n\tdefer puppet.done()\n\n\tst.greet()\n\n\tst.writeHeaders(HeadersFrameParam{\n\t\tStreamID:      1, // clients send odd numbers\n\t\tBlockFragment: st.encodeHeader(\":method\", \"POST\"),\n\t\tEndStream:     false, // data coming\n\t\tEndHeaders:    true,\n\t})\n\tst.writeData(1, false, []byte(\"abcdef\"))\n\tpuppet.do(readBodyHandler(t, \"abc\"))\n\tst.wantWindowUpdate(0, 3)\n\tst.wantWindowUpdate(1, 3)\n\n\tpuppet.do(readBodyHandler(t, \"def\"))\n\tst.wantWindowUpdate(0, 3)\n\tst.wantWindowUpdate(1, 3)\n\n\tst.writeData(1, true, []byte(\"ghijkl\")) // END_STREAM here\n\tpuppet.do(readBodyHandler(t, \"ghi\"))\n\tpuppet.do(readBodyHandler(t, \"jkl\"))\n\tst.wantWindowUpdate(0, 3)\n\tst.wantWindowUpdate(0, 3) // no more stream-level, since END_STREAM\n}\n\n// the version of the TestServer_Handler_Sends_WindowUpdate with padding.\n// See golang.org/issue/16556\nfunc TestServer_Handler_Sends_WindowUpdate_Padding(t *testing.T) {\n\tpuppet := newHandlerPuppet()\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tpuppet.act(w, r)\n\t})\n\tdefer st.Close()\n\tdefer puppet.done()\n\n\tst.greet()\n\n\tst.writeHeaders(HeadersFrameParam{\n\t\tStreamID:      1,\n\t\tBlockFragment: st.encodeHeader(\":method\", \"POST\"),\n\t\tEndStream:     false,\n\t\tEndHeaders:    true,\n\t})\n\tst.writeDataPadded(1, false, []byte(\"abcdef\"), []byte{0, 0, 0, 0})\n\n\t// Expect to immediately get our 5 bytes of padding back for\n\t// both the connection and stream (4 bytes of padding + 1 byte of length)\n\tst.wantWindowUpdate(0, 5)\n\tst.wantWindowUpdate(1, 5)\n\n\tpuppet.do(readBodyHandler(t, \"abc\"))\n\tst.wantWindowUpdate(0, 3)\n\tst.wantWindowUpdate(1, 3)\n\n\tpuppet.do(readBodyHandler(t, \"def\"))\n\tst.wantWindowUpdate(0, 3)\n\tst.wantWindowUpdate(1, 3)\n}\n\nfunc TestServer_Send_GoAway_After_Bogus_WindowUpdate(t *testing.T) {\n\tst := newServerTester(t, nil)\n\tdefer st.Close()\n\tst.greet()\n\tif err := st.fr.WriteWindowUpdate(0, 1<<31-1); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgf := st.wantGoAway()\n\tif gf.ErrCode != ErrCodeFlowControl {\n\t\tt.Errorf(\"GOAWAY err = %v; want %v\", gf.ErrCode, ErrCodeFlowControl)\n\t}\n\tif gf.LastStreamID != 0 {\n\t\tt.Errorf(\"GOAWAY last stream ID = %v; want %v\", gf.LastStreamID, 0)\n\t}\n}\n\nfunc TestServer_Send_RstStream_After_Bogus_WindowUpdate(t *testing.T) {\n\tinHandler := make(chan bool)\n\tblockHandler := make(chan bool)\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tinHandler <- true\n\t\t<-blockHandler\n\t})\n\tdefer st.Close()\n\tdefer close(blockHandler)\n\tst.greet()\n\tst.writeHeaders(HeadersFrameParam{\n\t\tStreamID:      1,\n\t\tBlockFragment: st.encodeHeader(\":method\", \"POST\"),\n\t\tEndStream:     false, // keep it open\n\t\tEndHeaders:    true,\n\t})\n\t<-inHandler\n\t// Send a bogus window update:\n\tif err := st.fr.WriteWindowUpdate(1, 1<<31-1); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tst.wantRSTStream(1, ErrCodeFlowControl)\n}\n\n// testServerPostUnblock sends a hanging POST with unsent data to handler,\n// then runs fn once in the handler, and verifies that the error returned from\n// handler is acceptable. It fails if takes over 5 seconds for handler to exit.\nfunc testServerPostUnblock(t *testing.T,\n\thandler func(http.ResponseWriter, *http.Request) error,\n\tfn func(*serverTester),\n\tcheckErr func(error),\n\totherHeaders ...string) {\n\tinHandler := make(chan bool)\n\terrc := make(chan error, 1)\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tinHandler <- true\n\t\terrc <- handler(w, r)\n\t})\n\tdefer st.Close()\n\tst.greet()\n\tst.writeHeaders(HeadersFrameParam{\n\t\tStreamID:      1,\n\t\tBlockFragment: st.encodeHeader(append([]string{\":method\", \"POST\"}, otherHeaders...)...),\n\t\tEndStream:     false, // keep it open\n\t\tEndHeaders:    true,\n\t})\n\t<-inHandler\n\tfn(st)\n\tselect {\n\tcase err := <-errc:\n\t\tif checkErr != nil {\n\t\t\tcheckErr(err)\n\t\t}\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"timeout waiting for Handler to return\")\n\t}\n}\n\nfunc TestServer_RSTStream_Unblocks_Read(t *testing.T) {\n\ttestServerPostUnblock(t,\n\t\tfunc(w http.ResponseWriter, r *http.Request) (err error) {\n\t\t\t_, err = r.Body.Read(make([]byte, 1))\n\t\t\treturn\n\t\t},\n\t\tfunc(st *serverTester) {\n\t\t\tif err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t},\n\t\tfunc(err error) {\n\t\t\twant := StreamError{StreamID: 0x1, Code: 0x8}\n\t\t\tif !reflect.DeepEqual(err, want) {\n\t\t\t\tt.Errorf(\"Read error = %v; want %v\", err, want)\n\t\t\t}\n\t\t},\n\t)\n}\n\nfunc TestServer_RSTStream_Unblocks_Header_Write(t *testing.T) {\n\t// Run this test a bunch, because it doesn't always\n\t// deadlock. But with a bunch, it did.\n\tn := 50\n\tif testing.Short() {\n\t\tn = 5\n\t}\n\tfor i := 0; i < n; i++ {\n\t\ttestServer_RSTStream_Unblocks_Header_Write(t)\n\t}\n}\n\nfunc testServer_RSTStream_Unblocks_Header_Write(t *testing.T) {\n\tinHandler := make(chan bool, 1)\n\tunblockHandler := make(chan bool, 1)\n\theaderWritten := make(chan bool, 1)\n\twroteRST := make(chan bool, 1)\n\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tinHandler <- true\n\t\t<-wroteRST\n\t\tw.Header().Set(\"foo\", \"bar\")\n\t\tw.WriteHeader(200)\n\t\tw.(http.Flusher).Flush()\n\t\theaderWritten <- true\n\t\t<-unblockHandler\n\t})\n\tdefer st.Close()\n\n\tst.greet()\n\tst.writeHeaders(HeadersFrameParam{\n\t\tStreamID:      1,\n\t\tBlockFragment: st.encodeHeader(\":method\", \"POST\"),\n\t\tEndStream:     false, // keep it open\n\t\tEndHeaders:    true,\n\t})\n\t<-inHandler\n\tif err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {\n\t\tt.Fatal(err)\n\t}\n\twroteRST <- true\n\tst.awaitIdle()\n\tselect {\n\tcase <-headerWritten:\n\tcase <-time.After(2 * time.Second):\n\t\tt.Error(\"timeout waiting for header write\")\n\t}\n\tunblockHandler <- true\n}\n\nfunc TestServer_DeadConn_Unblocks_Read(t *testing.T) {\n\ttestServerPostUnblock(t,\n\t\tfunc(w http.ResponseWriter, r *http.Request) (err error) {\n\t\t\t_, err = r.Body.Read(make([]byte, 1))\n\t\t\treturn\n\t\t},\n\t\tfunc(st *serverTester) { st.cc.Close() },\n\t\tfunc(err error) {\n\t\t\tif err == nil {\n\t\t\t\tt.Error(\"unexpected nil error from Request.Body.Read\")\n\t\t\t}\n\t\t},\n\t)\n}\n\nvar blockUntilClosed = func(w http.ResponseWriter, r *http.Request) error {\n\t<-w.(http.CloseNotifier).CloseNotify()\n\treturn nil\n}\n\nfunc TestServer_CloseNotify_After_RSTStream(t *testing.T) {\n\ttestServerPostUnblock(t, blockUntilClosed, func(st *serverTester) {\n\t\tif err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}, nil)\n}\n\nfunc TestServer_CloseNotify_After_ConnClose(t *testing.T) {\n\ttestServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { st.cc.Close() }, nil)\n}\n\n// that CloseNotify unblocks after a stream error due to the client's\n// problem that's unrelated to them explicitly canceling it (which is\n// TestServer_CloseNotify_After_RSTStream above)\nfunc TestServer_CloseNotify_After_StreamError(t *testing.T) {\n\ttestServerPostUnblock(t, blockUntilClosed, func(st *serverTester) {\n\t\t// data longer than declared Content-Length => stream error\n\t\tst.writeData(1, true, []byte(\"1234\"))\n\t}, nil, \"content-length\", \"3\")\n}\n\nfunc TestServer_StateTransitions(t *testing.T) {\n\tvar st *serverTester\n\tinHandler := make(chan bool)\n\twriteData := make(chan bool)\n\tleaveHandler := make(chan bool)\n\tst = newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tinHandler <- true\n\t\tif st.stream(1) == nil {\n\t\t\tt.Errorf(\"nil stream 1 in handler\")\n\t\t}\n\t\tif got, want := st.streamState(1), stateOpen; got != want {\n\t\t\tt.Errorf(\"in handler, state is %v; want %v\", got, want)\n\t\t}\n\t\twriteData <- true\n\t\tif n, err := r.Body.Read(make([]byte, 1)); n != 0 || err != io.EOF {\n\t\t\tt.Errorf(\"body read = %d, %v; want 0, EOF\", n, err)\n\t\t}\n\t\tif got, want := st.streamState(1), stateHalfClosedRemote; got != want {\n\t\t\tt.Errorf(\"in handler, state is %v; want %v\", got, want)\n\t\t}\n\n\t\t<-leaveHandler\n\t})\n\tst.greet()\n\tif st.stream(1) != nil {\n\t\tt.Fatal(\"stream 1 should be empty\")\n\t}\n\tif got := st.streamState(1); got != stateIdle {\n\t\tt.Fatalf(\"stream 1 should be idle; got %v\", got)\n\t}\n\n\tst.writeHeaders(HeadersFrameParam{\n\t\tStreamID:      1,\n\t\tBlockFragment: st.encodeHeader(\":method\", \"POST\"),\n\t\tEndStream:     false, // keep it open\n\t\tEndHeaders:    true,\n\t})\n\t<-inHandler\n\t<-writeData\n\tst.writeData(1, true, nil)\n\n\tleaveHandler <- true\n\thf := st.wantHeaders()\n\tif !hf.StreamEnded() {\n\t\tt.Fatal(\"expected END_STREAM flag\")\n\t}\n\n\tif got, want := st.streamState(1), stateClosed; got != want {\n\t\tt.Errorf(\"at end, state is %v; want %v\", got, want)\n\t}\n\tif st.stream(1) != nil {\n\t\tt.Fatal(\"at end, stream 1 should be gone\")\n\t}\n}\n\n// test HEADERS w/o EndHeaders + another HEADERS (should get rejected)\nfunc TestServer_Rejects_HeadersNoEnd_Then_Headers(t *testing.T) {\n\ttestServerRejectsConn(t, func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      1,\n\t\t\tBlockFragment: st.encodeHeader(),\n\t\t\tEndStream:     true,\n\t\t\tEndHeaders:    false,\n\t\t})\n\t\tst.writeHeaders(HeadersFrameParam{ // Not a continuation.\n\t\t\tStreamID:      3, // different stream.\n\t\t\tBlockFragment: st.encodeHeader(),\n\t\t\tEndStream:     true,\n\t\t\tEndHeaders:    true,\n\t\t})\n\t})\n}\n\n// test HEADERS w/o EndHeaders + PING (should get rejected)\nfunc TestServer_Rejects_HeadersNoEnd_Then_Ping(t *testing.T) {\n\ttestServerRejectsConn(t, func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      1,\n\t\t\tBlockFragment: st.encodeHeader(),\n\t\t\tEndStream:     true,\n\t\t\tEndHeaders:    false,\n\t\t})\n\t\tif err := st.fr.WritePing(false, [8]byte{}); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t})\n}\n\n// test HEADERS w/ EndHeaders + a continuation HEADERS (should get rejected)\nfunc TestServer_Rejects_HeadersEnd_Then_Continuation(t *testing.T) {\n\ttestServerRejectsConn(t, func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      1,\n\t\t\tBlockFragment: st.encodeHeader(),\n\t\t\tEndStream:     true,\n\t\t\tEndHeaders:    true,\n\t\t})\n\t\tst.wantHeaders()\n\t\tif err := st.fr.WriteContinuation(1, true, encodeHeaderNoImplicit(t, \"foo\", \"bar\")); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t})\n}\n\n// test HEADERS w/o EndHeaders + a continuation HEADERS on wrong stream ID\nfunc TestServer_Rejects_HeadersNoEnd_Then_ContinuationWrongStream(t *testing.T) {\n\ttestServerRejectsConn(t, func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      1,\n\t\t\tBlockFragment: st.encodeHeader(),\n\t\t\tEndStream:     true,\n\t\t\tEndHeaders:    false,\n\t\t})\n\t\tif err := st.fr.WriteContinuation(3, true, encodeHeaderNoImplicit(t, \"foo\", \"bar\")); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t})\n}\n\n// No HEADERS on stream 0.\nfunc TestServer_Rejects_Headers0(t *testing.T) {\n\ttestServerRejectsConn(t, func(st *serverTester) {\n\t\tst.fr.AllowIllegalWrites = true\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      0,\n\t\t\tBlockFragment: st.encodeHeader(),\n\t\t\tEndStream:     true,\n\t\t\tEndHeaders:    true,\n\t\t})\n\t})\n}\n\n// No CONTINUATION on stream 0.\nfunc TestServer_Rejects_Continuation0(t *testing.T) {\n\ttestServerRejectsConn(t, func(st *serverTester) {\n\t\tst.fr.AllowIllegalWrites = true\n\t\tif err := st.fr.WriteContinuation(0, true, st.encodeHeader()); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t})\n}\n\n// No PRIORITY on stream 0.\nfunc TestServer_Rejects_Priority0(t *testing.T) {\n\ttestServerRejectsConn(t, func(st *serverTester) {\n\t\tst.fr.AllowIllegalWrites = true\n\t\tst.writePriority(0, PriorityParam{StreamDep: 1})\n\t})\n}\n\n// No HEADERS frame with a self-dependence.\nfunc TestServer_Rejects_HeadersSelfDependence(t *testing.T) {\n\ttestServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) {\n\t\tst.fr.AllowIllegalWrites = true\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      1,\n\t\t\tBlockFragment: st.encodeHeader(),\n\t\t\tEndStream:     true,\n\t\t\tEndHeaders:    true,\n\t\t\tPriority:      PriorityParam{StreamDep: 1},\n\t\t})\n\t})\n}\n\n// No PRIORTY frame with a self-dependence.\nfunc TestServer_Rejects_PrioritySelfDependence(t *testing.T) {\n\ttestServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) {\n\t\tst.fr.AllowIllegalWrites = true\n\t\tst.writePriority(1, PriorityParam{StreamDep: 1})\n\t})\n}\n\nfunc TestServer_Rejects_PushPromise(t *testing.T) {\n\ttestServerRejectsConn(t, func(st *serverTester) {\n\t\tpp := PushPromiseParam{\n\t\t\tStreamID:  1,\n\t\t\tPromiseID: 3,\n\t\t}\n\t\tif err := st.fr.WritePushPromise(pp); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t})\n}\n\n// testServerRejectsConn tests that the server hangs up with a GOAWAY\n// frame and a server close after the client does something\n// deserving a CONNECTION_ERROR.\nfunc testServerRejectsConn(t *testing.T, writeReq func(*serverTester)) {\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {})\n\tst.addLogFilter(\"connection error: PROTOCOL_ERROR\")\n\tdefer st.Close()\n\tst.greet()\n\twriteReq(st)\n\n\tst.wantGoAway()\n\terrc := make(chan error, 1)\n\tgo func() {\n\t\tfr, err := st.fr.ReadFrame()\n\t\tif err == nil {\n\t\t\terr = fmt.Errorf(\"got frame of type %T\", fr)\n\t\t}\n\t\terrc <- err\n\t}()\n\tselect {\n\tcase err := <-errc:\n\t\tif err != io.EOF {\n\t\t\tt.Errorf(\"ReadFrame = %v; want io.EOF\", err)\n\t\t}\n\tcase <-time.After(2 * time.Second):\n\t\tt.Error(\"timeout waiting for disconnect\")\n\t}\n}\n\n// testServerRejectsStream tests that the server sends a RST_STREAM with the provided\n// error code after a client sends a bogus request.\nfunc testServerRejectsStream(t *testing.T, code ErrCode, writeReq func(*serverTester)) {\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {})\n\tdefer st.Close()\n\tst.greet()\n\twriteReq(st)\n\tst.wantRSTStream(1, code)\n}\n\n// testServerRequest sets up an idle HTTP/2 connection and lets you\n// write a single request with writeReq, and then verify that the\n// *http.Request is built correctly in checkReq.\nfunc testServerRequest(t *testing.T, writeReq func(*serverTester), checkReq func(*http.Request)) {\n\tgotReq := make(chan bool, 1)\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Body == nil {\n\t\t\tt.Fatal(\"nil Body\")\n\t\t}\n\t\tcheckReq(r)\n\t\tgotReq <- true\n\t})\n\tdefer st.Close()\n\n\tst.greet()\n\twriteReq(st)\n\n\tselect {\n\tcase <-gotReq:\n\tcase <-time.After(2 * time.Second):\n\t\tt.Error(\"timeout waiting for request\")\n\t}\n}\n\nfunc getSlash(st *serverTester) { st.bodylessReq1() }\n\nfunc TestServer_Response_NoData(t *testing.T) {\n\ttestServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {\n\t\t// Nothing.\n\t\treturn nil\n\t}, func(st *serverTester) {\n\t\tgetSlash(st)\n\t\thf := st.wantHeaders()\n\t\tif !hf.StreamEnded() {\n\t\t\tt.Fatal(\"want END_STREAM flag\")\n\t\t}\n\t\tif !hf.HeadersEnded() {\n\t\t\tt.Fatal(\"want END_HEADERS flag\")\n\t\t}\n\t})\n}\n\nfunc TestServer_Response_NoData_Header_FooBar(t *testing.T) {\n\ttestServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {\n\t\tw.Header().Set(\"Foo-Bar\", \"some-value\")\n\t\treturn nil\n\t}, func(st *serverTester) {\n\t\tgetSlash(st)\n\t\thf := st.wantHeaders()\n\t\tif !hf.StreamEnded() {\n\t\t\tt.Fatal(\"want END_STREAM flag\")\n\t\t}\n\t\tif !hf.HeadersEnded() {\n\t\t\tt.Fatal(\"want END_HEADERS flag\")\n\t\t}\n\t\tgoth := st.decodeHeader(hf.HeaderBlockFragment())\n\t\twanth := [][2]string{\n\t\t\t{\":status\", \"200\"},\n\t\t\t{\"foo-bar\", \"some-value\"},\n\t\t\t{\"content-type\", \"text/plain; charset=utf-8\"},\n\t\t\t{\"content-length\", \"0\"},\n\t\t}\n\t\tif !reflect.DeepEqual(goth, wanth) {\n\t\t\tt.Errorf(\"Got headers %v; want %v\", goth, wanth)\n\t\t}\n\t})\n}\n\nfunc TestServer_Response_Data_Sniff_DoesntOverride(t *testing.T) {\n\tconst msg = \"<html>this is HTML.\"\n\ttestServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {\n\t\tw.Header().Set(\"Content-Type\", \"foo/bar\")\n\t\tio.WriteString(w, msg)\n\t\treturn nil\n\t}, func(st *serverTester) {\n\t\tgetSlash(st)\n\t\thf := st.wantHeaders()\n\t\tif hf.StreamEnded() {\n\t\t\tt.Fatal(\"don't want END_STREAM, expecting data\")\n\t\t}\n\t\tif !hf.HeadersEnded() {\n\t\t\tt.Fatal(\"want END_HEADERS flag\")\n\t\t}\n\t\tgoth := st.decodeHeader(hf.HeaderBlockFragment())\n\t\twanth := [][2]string{\n\t\t\t{\":status\", \"200\"},\n\t\t\t{\"content-type\", \"foo/bar\"},\n\t\t\t{\"content-length\", strconv.Itoa(len(msg))},\n\t\t}\n\t\tif !reflect.DeepEqual(goth, wanth) {\n\t\t\tt.Errorf(\"Got headers %v; want %v\", goth, wanth)\n\t\t}\n\t\tdf := st.wantData()\n\t\tif !df.StreamEnded() {\n\t\t\tt.Error(\"expected DATA to have END_STREAM flag\")\n\t\t}\n\t\tif got := string(df.Data()); got != msg {\n\t\t\tt.Errorf(\"got DATA %q; want %q\", got, msg)\n\t\t}\n\t})\n}\n\nfunc TestServer_Response_TransferEncoding_chunked(t *testing.T) {\n\tconst msg = \"hi\"\n\ttestServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {\n\t\tw.Header().Set(\"Transfer-Encoding\", \"chunked\") // should be stripped\n\t\tio.WriteString(w, msg)\n\t\treturn nil\n\t}, func(st *serverTester) {\n\t\tgetSlash(st)\n\t\thf := st.wantHeaders()\n\t\tgoth := st.decodeHeader(hf.HeaderBlockFragment())\n\t\twanth := [][2]string{\n\t\t\t{\":status\", \"200\"},\n\t\t\t{\"content-type\", \"text/plain; charset=utf-8\"},\n\t\t\t{\"content-length\", strconv.Itoa(len(msg))},\n\t\t}\n\t\tif !reflect.DeepEqual(goth, wanth) {\n\t\t\tt.Errorf(\"Got headers %v; want %v\", goth, wanth)\n\t\t}\n\t})\n}\n\n// Header accessed only after the initial write.\nfunc TestServer_Response_Data_IgnoreHeaderAfterWrite_After(t *testing.T) {\n\tconst msg = \"<html>this is HTML.\"\n\ttestServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {\n\t\tio.WriteString(w, msg)\n\t\tw.Header().Set(\"foo\", \"should be ignored\")\n\t\treturn nil\n\t}, func(st *serverTester) {\n\t\tgetSlash(st)\n\t\thf := st.wantHeaders()\n\t\tif hf.StreamEnded() {\n\t\t\tt.Fatal(\"unexpected END_STREAM\")\n\t\t}\n\t\tif !hf.HeadersEnded() {\n\t\t\tt.Fatal(\"want END_HEADERS flag\")\n\t\t}\n\t\tgoth := st.decodeHeader(hf.HeaderBlockFragment())\n\t\twanth := [][2]string{\n\t\t\t{\":status\", \"200\"},\n\t\t\t{\"content-type\", \"text/html; charset=utf-8\"},\n\t\t\t{\"content-length\", strconv.Itoa(len(msg))},\n\t\t}\n\t\tif !reflect.DeepEqual(goth, wanth) {\n\t\t\tt.Errorf(\"Got headers %v; want %v\", goth, wanth)\n\t\t}\n\t})\n}\n\n// Header accessed before the initial write and later mutated.\nfunc TestServer_Response_Data_IgnoreHeaderAfterWrite_Overwrite(t *testing.T) {\n\tconst msg = \"<html>this is HTML.\"\n\ttestServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {\n\t\tw.Header().Set(\"foo\", \"proper value\")\n\t\tio.WriteString(w, msg)\n\t\tw.Header().Set(\"foo\", \"should be ignored\")\n\t\treturn nil\n\t}, func(st *serverTester) {\n\t\tgetSlash(st)\n\t\thf := st.wantHeaders()\n\t\tif hf.StreamEnded() {\n\t\t\tt.Fatal(\"unexpected END_STREAM\")\n\t\t}\n\t\tif !hf.HeadersEnded() {\n\t\t\tt.Fatal(\"want END_HEADERS flag\")\n\t\t}\n\t\tgoth := st.decodeHeader(hf.HeaderBlockFragment())\n\t\twanth := [][2]string{\n\t\t\t{\":status\", \"200\"},\n\t\t\t{\"foo\", \"proper value\"},\n\t\t\t{\"content-type\", \"text/html; charset=utf-8\"},\n\t\t\t{\"content-length\", strconv.Itoa(len(msg))},\n\t\t}\n\t\tif !reflect.DeepEqual(goth, wanth) {\n\t\t\tt.Errorf(\"Got headers %v; want %v\", goth, wanth)\n\t\t}\n\t})\n}\n\nfunc TestServer_Response_Data_SniffLenType(t *testing.T) {\n\tconst msg = \"<html>this is HTML.\"\n\ttestServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {\n\t\tio.WriteString(w, msg)\n\t\treturn nil\n\t}, func(st *serverTester) {\n\t\tgetSlash(st)\n\t\thf := st.wantHeaders()\n\t\tif hf.StreamEnded() {\n\t\t\tt.Fatal(\"don't want END_STREAM, expecting data\")\n\t\t}\n\t\tif !hf.HeadersEnded() {\n\t\t\tt.Fatal(\"want END_HEADERS flag\")\n\t\t}\n\t\tgoth := st.decodeHeader(hf.HeaderBlockFragment())\n\t\twanth := [][2]string{\n\t\t\t{\":status\", \"200\"},\n\t\t\t{\"content-type\", \"text/html; charset=utf-8\"},\n\t\t\t{\"content-length\", strconv.Itoa(len(msg))},\n\t\t}\n\t\tif !reflect.DeepEqual(goth, wanth) {\n\t\t\tt.Errorf(\"Got headers %v; want %v\", goth, wanth)\n\t\t}\n\t\tdf := st.wantData()\n\t\tif !df.StreamEnded() {\n\t\t\tt.Error(\"expected DATA to have END_STREAM flag\")\n\t\t}\n\t\tif got := string(df.Data()); got != msg {\n\t\t\tt.Errorf(\"got DATA %q; want %q\", got, msg)\n\t\t}\n\t})\n}\n\nfunc TestServer_Response_Header_Flush_MidWrite(t *testing.T) {\n\tconst msg = \"<html>this is HTML\"\n\tconst msg2 = \", and this is the next chunk\"\n\ttestServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {\n\t\tio.WriteString(w, msg)\n\t\tw.(http.Flusher).Flush()\n\t\tio.WriteString(w, msg2)\n\t\treturn nil\n\t}, func(st *serverTester) {\n\t\tgetSlash(st)\n\t\thf := st.wantHeaders()\n\t\tif hf.StreamEnded() {\n\t\t\tt.Fatal(\"unexpected END_STREAM flag\")\n\t\t}\n\t\tif !hf.HeadersEnded() {\n\t\t\tt.Fatal(\"want END_HEADERS flag\")\n\t\t}\n\t\tgoth := st.decodeHeader(hf.HeaderBlockFragment())\n\t\twanth := [][2]string{\n\t\t\t{\":status\", \"200\"},\n\t\t\t{\"content-type\", \"text/html; charset=utf-8\"}, // sniffed\n\t\t\t// and no content-length\n\t\t}\n\t\tif !reflect.DeepEqual(goth, wanth) {\n\t\t\tt.Errorf(\"Got headers %v; want %v\", goth, wanth)\n\t\t}\n\t\t{\n\t\t\tdf := st.wantData()\n\t\t\tif df.StreamEnded() {\n\t\t\t\tt.Error(\"unexpected END_STREAM flag\")\n\t\t\t}\n\t\t\tif got := string(df.Data()); got != msg {\n\t\t\t\tt.Errorf(\"got DATA %q; want %q\", got, msg)\n\t\t\t}\n\t\t}\n\t\t{\n\t\t\tdf := st.wantData()\n\t\t\tif !df.StreamEnded() {\n\t\t\t\tt.Error(\"wanted END_STREAM flag on last data chunk\")\n\t\t\t}\n\t\t\tif got := string(df.Data()); got != msg2 {\n\t\t\t\tt.Errorf(\"got DATA %q; want %q\", got, msg2)\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc TestServer_Response_LargeWrite(t *testing.T) {\n\tconst size = 1 << 20\n\tconst maxFrameSize = 16 << 10\n\ttestServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {\n\t\tn, err := w.Write(bytes.Repeat([]byte(\"a\"), size))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Write error: %v\", err)\n\t\t}\n\t\tif n != size {\n\t\t\treturn fmt.Errorf(\"wrong size %d from Write\", n)\n\t\t}\n\t\treturn nil\n\t}, func(st *serverTester) {\n\t\tif err := st.fr.WriteSettings(\n\t\t\tSetting{SettingInitialWindowSize, 0},\n\t\t\tSetting{SettingMaxFrameSize, maxFrameSize},\n\t\t); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tst.wantSettingsAck()\n\n\t\tgetSlash(st) // make the single request\n\n\t\t// Give the handler quota to write:\n\t\tif err := st.fr.WriteWindowUpdate(1, size); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\t// Give the handler quota to write to connection-level\n\t\t// window as well\n\t\tif err := st.fr.WriteWindowUpdate(0, size); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\thf := st.wantHeaders()\n\t\tif hf.StreamEnded() {\n\t\t\tt.Fatal(\"unexpected END_STREAM flag\")\n\t\t}\n\t\tif !hf.HeadersEnded() {\n\t\t\tt.Fatal(\"want END_HEADERS flag\")\n\t\t}\n\t\tgoth := st.decodeHeader(hf.HeaderBlockFragment())\n\t\twanth := [][2]string{\n\t\t\t{\":status\", \"200\"},\n\t\t\t{\"content-type\", \"text/plain; charset=utf-8\"}, // sniffed\n\t\t\t// and no content-length\n\t\t}\n\t\tif !reflect.DeepEqual(goth, wanth) {\n\t\t\tt.Errorf(\"Got headers %v; want %v\", goth, wanth)\n\t\t}\n\t\tvar bytes, frames int\n\t\tfor {\n\t\t\tdf := st.wantData()\n\t\t\tbytes += len(df.Data())\n\t\t\tframes++\n\t\t\tfor _, b := range df.Data() {\n\t\t\t\tif b != 'a' {\n\t\t\t\t\tt.Fatal(\"non-'a' byte seen in DATA\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tif df.StreamEnded() {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif bytes != size {\n\t\t\tt.Errorf(\"Got %d bytes; want %d\", bytes, size)\n\t\t}\n\t\tif want := int(size / maxFrameSize); frames < want || frames > want*2 {\n\t\t\tt.Errorf(\"Got %d frames; want %d\", frames, size)\n\t\t}\n\t})\n}\n\n// Test that the handler can't write more than the client allows\nfunc TestServer_Response_LargeWrite_FlowControlled(t *testing.T) {\n\t// Make these reads. Before each read, the client adds exactly enough\n\t// flow-control to satisfy the read. Numbers chosen arbitrarily.\n\treads := []int{123, 1, 13, 127}\n\tsize := 0\n\tfor _, n := range reads {\n\t\tsize += n\n\t}\n\n\ttestServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {\n\t\tw.(http.Flusher).Flush()\n\t\tn, err := w.Write(bytes.Repeat([]byte(\"a\"), size))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Write error: %v\", err)\n\t\t}\n\t\tif n != size {\n\t\t\treturn fmt.Errorf(\"wrong size %d from Write\", n)\n\t\t}\n\t\treturn nil\n\t}, func(st *serverTester) {\n\t\t// Set the window size to something explicit for this test.\n\t\t// It's also how much initial data we expect.\n\t\tif err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, uint32(reads[0])}); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tst.wantSettingsAck()\n\n\t\tgetSlash(st) // make the single request\n\n\t\thf := st.wantHeaders()\n\t\tif hf.StreamEnded() {\n\t\t\tt.Fatal(\"unexpected END_STREAM flag\")\n\t\t}\n\t\tif !hf.HeadersEnded() {\n\t\t\tt.Fatal(\"want END_HEADERS flag\")\n\t\t}\n\n\t\tdf := st.wantData()\n\t\tif got := len(df.Data()); got != reads[0] {\n\t\t\tt.Fatalf(\"Initial window size = %d but got DATA with %d bytes\", reads[0], got)\n\t\t}\n\n\t\tfor _, quota := range reads[1:] {\n\t\t\tif err := st.fr.WriteWindowUpdate(1, uint32(quota)); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tdf := st.wantData()\n\t\t\tif int(quota) != len(df.Data()) {\n\t\t\t\tt.Fatalf(\"read %d bytes after giving %d quota\", len(df.Data()), quota)\n\t\t\t}\n\t\t}\n\t})\n}\n\n// Test that the handler blocked in a Write is unblocked if the server sends a RST_STREAM.\nfunc TestServer_Response_RST_Unblocks_LargeWrite(t *testing.T) {\n\tconst size = 1 << 20\n\tconst maxFrameSize = 16 << 10\n\ttestServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {\n\t\tw.(http.Flusher).Flush()\n\t\terrc := make(chan error, 1)\n\t\tgo func() {\n\t\t\t_, err := w.Write(bytes.Repeat([]byte(\"a\"), size))\n\t\t\terrc <- err\n\t\t}()\n\t\tselect {\n\t\tcase err := <-errc:\n\t\t\tif err == nil {\n\t\t\t\treturn errors.New(\"unexpected nil error from Write in handler\")\n\t\t\t}\n\t\t\treturn nil\n\t\tcase <-time.After(2 * time.Second):\n\t\t\treturn errors.New(\"timeout waiting for Write in handler\")\n\t\t}\n\t}, func(st *serverTester) {\n\t\tif err := st.fr.WriteSettings(\n\t\t\tSetting{SettingInitialWindowSize, 0},\n\t\t\tSetting{SettingMaxFrameSize, maxFrameSize},\n\t\t); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tst.wantSettingsAck()\n\n\t\tgetSlash(st) // make the single request\n\n\t\thf := st.wantHeaders()\n\t\tif hf.StreamEnded() {\n\t\t\tt.Fatal(\"unexpected END_STREAM flag\")\n\t\t}\n\t\tif !hf.HeadersEnded() {\n\t\t\tt.Fatal(\"want END_HEADERS flag\")\n\t\t}\n\n\t\tif err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t})\n}\n\nfunc TestServer_Response_Empty_Data_Not_FlowControlled(t *testing.T) {\n\ttestServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {\n\t\tw.(http.Flusher).Flush()\n\t\t// Nothing; send empty DATA\n\t\treturn nil\n\t}, func(st *serverTester) {\n\t\t// Handler gets no data quota:\n\t\tif err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, 0}); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tst.wantSettingsAck()\n\n\t\tgetSlash(st) // make the single request\n\n\t\thf := st.wantHeaders()\n\t\tif hf.StreamEnded() {\n\t\t\tt.Fatal(\"unexpected END_STREAM flag\")\n\t\t}\n\t\tif !hf.HeadersEnded() {\n\t\t\tt.Fatal(\"want END_HEADERS flag\")\n\t\t}\n\n\t\tdf := st.wantData()\n\t\tif got := len(df.Data()); got != 0 {\n\t\t\tt.Fatalf(\"unexpected %d DATA bytes; want 0\", got)\n\t\t}\n\t\tif !df.StreamEnded() {\n\t\t\tt.Fatal(\"DATA didn't have END_STREAM\")\n\t\t}\n\t})\n}\n\nfunc TestServer_Response_Automatic100Continue(t *testing.T) {\n\tconst msg = \"foo\"\n\tconst reply = \"bar\"\n\ttestServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {\n\t\tif v := r.Header.Get(\"Expect\"); v != \"\" {\n\t\t\tt.Errorf(\"Expect header = %q; want empty\", v)\n\t\t}\n\t\tbuf := make([]byte, len(msg))\n\t\t// This read should trigger the 100-continue being sent.\n\t\tif n, err := io.ReadFull(r.Body, buf); err != nil || n != len(msg) || string(buf) != msg {\n\t\t\treturn fmt.Errorf(\"ReadFull = %q, %v; want %q, nil\", buf[:n], err, msg)\n\t\t}\n\t\t_, err := io.WriteString(w, reply)\n\t\treturn err\n\t}, func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      1, // clients send odd numbers\n\t\t\tBlockFragment: st.encodeHeader(\":method\", \"POST\", \"expect\", \"100-continue\"),\n\t\t\tEndStream:     false,\n\t\t\tEndHeaders:    true,\n\t\t})\n\t\thf := st.wantHeaders()\n\t\tif hf.StreamEnded() {\n\t\t\tt.Fatal(\"unexpected END_STREAM flag\")\n\t\t}\n\t\tif !hf.HeadersEnded() {\n\t\t\tt.Fatal(\"want END_HEADERS flag\")\n\t\t}\n\t\tgoth := st.decodeHeader(hf.HeaderBlockFragment())\n\t\twanth := [][2]string{\n\t\t\t{\":status\", \"100\"},\n\t\t}\n\t\tif !reflect.DeepEqual(goth, wanth) {\n\t\t\tt.Fatalf(\"Got headers %v; want %v\", goth, wanth)\n\t\t}\n\n\t\t// Okay, they sent status 100, so we can send our\n\t\t// gigantic and/or sensitive \"foo\" payload now.\n\t\tst.writeData(1, true, []byte(msg))\n\n\t\tst.wantWindowUpdate(0, uint32(len(msg)))\n\n\t\thf = st.wantHeaders()\n\t\tif hf.StreamEnded() {\n\t\t\tt.Fatal(\"expected data to follow\")\n\t\t}\n\t\tif !hf.HeadersEnded() {\n\t\t\tt.Fatal(\"want END_HEADERS flag\")\n\t\t}\n\t\tgoth = st.decodeHeader(hf.HeaderBlockFragment())\n\t\twanth = [][2]string{\n\t\t\t{\":status\", \"200\"},\n\t\t\t{\"content-type\", \"text/plain; charset=utf-8\"},\n\t\t\t{\"content-length\", strconv.Itoa(len(reply))},\n\t\t}\n\t\tif !reflect.DeepEqual(goth, wanth) {\n\t\t\tt.Errorf(\"Got headers %v; want %v\", goth, wanth)\n\t\t}\n\n\t\tdf := st.wantData()\n\t\tif string(df.Data()) != reply {\n\t\t\tt.Errorf(\"Client read %q; want %q\", df.Data(), reply)\n\t\t}\n\t\tif !df.StreamEnded() {\n\t\t\tt.Errorf(\"expect data stream end\")\n\t\t}\n\t})\n}\n\nfunc TestServer_HandlerWriteErrorOnDisconnect(t *testing.T) {\n\terrc := make(chan error, 1)\n\ttestServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {\n\t\tp := []byte(\"some data.\\n\")\n\t\tfor {\n\t\t\t_, err := w.Write(p)\n\t\t\tif err != nil {\n\t\t\t\terrc <- err\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}, func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      1,\n\t\t\tBlockFragment: st.encodeHeader(),\n\t\t\tEndStream:     false,\n\t\t\tEndHeaders:    true,\n\t\t})\n\t\thf := st.wantHeaders()\n\t\tif hf.StreamEnded() {\n\t\t\tt.Fatal(\"unexpected END_STREAM flag\")\n\t\t}\n\t\tif !hf.HeadersEnded() {\n\t\t\tt.Fatal(\"want END_HEADERS flag\")\n\t\t}\n\t\t// Close the connection and wait for the handler to (hopefully) notice.\n\t\tst.cc.Close()\n\t\tselect {\n\t\tcase <-errc:\n\t\tcase <-time.After(5 * time.Second):\n\t\t\tt.Error(\"timeout\")\n\t\t}\n\t})\n}\n\nfunc TestServer_Rejects_Too_Many_Streams(t *testing.T) {\n\tconst testPath = \"/some/path\"\n\n\tinHandler := make(chan uint32)\n\tleaveHandler := make(chan bool)\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tid := w.(*responseWriter).rws.stream.id\n\t\tinHandler <- id\n\t\tif id == 1+(defaultMaxStreams+1)*2 && r.URL.Path != testPath {\n\t\t\tt.Errorf(\"decoded final path as %q; want %q\", r.URL.Path, testPath)\n\t\t}\n\t\t<-leaveHandler\n\t})\n\tdefer st.Close()\n\tst.greet()\n\tnextStreamID := uint32(1)\n\tstreamID := func() uint32 {\n\t\tdefer func() { nextStreamID += 2 }()\n\t\treturn nextStreamID\n\t}\n\tsendReq := func(id uint32, headers ...string) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      id,\n\t\t\tBlockFragment: st.encodeHeader(headers...),\n\t\t\tEndStream:     true,\n\t\t\tEndHeaders:    true,\n\t\t})\n\t}\n\tfor i := 0; i < defaultMaxStreams; i++ {\n\t\tsendReq(streamID())\n\t\t<-inHandler\n\t}\n\tdefer func() {\n\t\tfor i := 0; i < defaultMaxStreams; i++ {\n\t\t\tleaveHandler <- true\n\t\t}\n\t}()\n\n\t// And this one should cross the limit:\n\t// (It's also sent as a CONTINUATION, to verify we still track the decoder context,\n\t// even if we're rejecting it)\n\trejectID := streamID()\n\theaderBlock := st.encodeHeader(\":path\", testPath)\n\tfrag1, frag2 := headerBlock[:3], headerBlock[3:]\n\tst.writeHeaders(HeadersFrameParam{\n\t\tStreamID:      rejectID,\n\t\tBlockFragment: frag1,\n\t\tEndStream:     true,\n\t\tEndHeaders:    false, // CONTINUATION coming\n\t})\n\tif err := st.fr.WriteContinuation(rejectID, true, frag2); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tst.wantRSTStream(rejectID, ErrCodeProtocol)\n\n\t// But let a handler finish:\n\tleaveHandler <- true\n\tst.wantHeaders()\n\n\t// And now another stream should be able to start:\n\tgoodID := streamID()\n\tsendReq(goodID, \":path\", testPath)\n\tselect {\n\tcase got := <-inHandler:\n\t\tif got != goodID {\n\t\t\tt.Errorf(\"Got stream %d; want %d\", got, goodID)\n\t\t}\n\tcase <-time.After(3 * time.Second):\n\t\tt.Error(\"timeout waiting for handler\")\n\t}\n}\n\n// So many response headers that the server needs to use CONTINUATION frames:\nfunc TestServer_Response_ManyHeaders_With_Continuation(t *testing.T) {\n\ttestServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {\n\t\th := w.Header()\n\t\tfor i := 0; i < 5000; i++ {\n\t\t\th.Set(fmt.Sprintf(\"x-header-%d\", i), fmt.Sprintf(\"x-value-%d\", i))\n\t\t}\n\t\treturn nil\n\t}, func(st *serverTester) {\n\t\tgetSlash(st)\n\t\thf := st.wantHeaders()\n\t\tif hf.HeadersEnded() {\n\t\t\tt.Fatal(\"got unwanted END_HEADERS flag\")\n\t\t}\n\t\tn := 0\n\t\tfor {\n\t\t\tn++\n\t\t\tcf := st.wantContinuation()\n\t\t\tif cf.HeadersEnded() {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif n < 5 {\n\t\t\tt.Errorf(\"Only got %d CONTINUATION frames; expected 5+ (currently 6)\", n)\n\t\t}\n\t})\n}\n\n// This previously crashed (reported by Mathieu Lonjaret as observed\n// while using Camlistore) because we got a DATA frame from the client\n// after the handler exited and our logic at the time was wrong,\n// keeping a stream in the map in stateClosed, which tickled an\n// invariant check later when we tried to remove that stream (via\n// defer sc.closeAllStreamsOnConnClose) when the serverConn serve loop\n// ended.\nfunc TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) {\n\ttestServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {\n\t\t// nothing\n\t\treturn nil\n\t}, func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      1,\n\t\t\tBlockFragment: st.encodeHeader(),\n\t\t\tEndStream:     false, // DATA is coming\n\t\t\tEndHeaders:    true,\n\t\t})\n\t\thf := st.wantHeaders()\n\t\tif !hf.HeadersEnded() || !hf.StreamEnded() {\n\t\t\tt.Fatalf(\"want END_HEADERS+END_STREAM, got %v\", hf)\n\t\t}\n\n\t\t// Sent when the a Handler closes while a client has\n\t\t// indicated it's still sending DATA:\n\t\tst.wantRSTStream(1, ErrCodeNo)\n\n\t\t// Now the handler has ended, so it's ended its\n\t\t// stream, but the client hasn't closed its side\n\t\t// (stateClosedLocal).  So send more data and verify\n\t\t// it doesn't crash with an internal invariant panic, like\n\t\t// it did before.\n\t\tst.writeData(1, true, []byte(\"foo\"))\n\n\t\t// Get our flow control bytes back, since the handler didn't get them.\n\t\tst.wantWindowUpdate(0, uint32(len(\"foo\")))\n\n\t\t// Sent after a peer sends data anyway (admittedly the\n\t\t// previous RST_STREAM might've still been in-flight),\n\t\t// but they'll get the more friendly 'cancel' code\n\t\t// first.\n\t\tst.wantRSTStream(1, ErrCodeStreamClosed)\n\n\t\t// Set up a bunch of machinery to record the panic we saw\n\t\t// previously.\n\t\tvar (\n\t\t\tpanMu    sync.Mutex\n\t\t\tpanicVal interface{}\n\t\t)\n\n\t\ttestHookOnPanicMu.Lock()\n\t\ttestHookOnPanic = func(sc *serverConn, pv interface{}) bool {\n\t\t\tpanMu.Lock()\n\t\t\tpanicVal = pv\n\t\t\tpanMu.Unlock()\n\t\t\treturn true\n\t\t}\n\t\ttestHookOnPanicMu.Unlock()\n\n\t\t// Now force the serve loop to end, via closing the connection.\n\t\tst.cc.Close()\n\t\tselect {\n\t\tcase <-st.sc.doneServing:\n\t\t\t// Loop has exited.\n\t\t\tpanMu.Lock()\n\t\t\tgot := panicVal\n\t\t\tpanMu.Unlock()\n\t\t\tif got != nil {\n\t\t\t\tt.Errorf(\"Got panic: %v\", got)\n\t\t\t}\n\t\tcase <-time.After(5 * time.Second):\n\t\t\tt.Error(\"timeout\")\n\t\t}\n\t})\n}\n\nfunc TestServer_Rejects_TLS10(t *testing.T) { testRejectTLS(t, tls.VersionTLS10) }\nfunc TestServer_Rejects_TLS11(t *testing.T) { testRejectTLS(t, tls.VersionTLS11) }\n\nfunc testRejectTLS(t *testing.T, max uint16) {\n\tst := newServerTester(t, nil, func(c *tls.Config) {\n\t\tc.MaxVersion = max\n\t})\n\tdefer st.Close()\n\tgf := st.wantGoAway()\n\tif got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want {\n\t\tt.Errorf(\"Got error code %v; want %v\", got, want)\n\t}\n}\n\nfunc TestServer_Rejects_TLSBadCipher(t *testing.T) {\n\tst := newServerTester(t, nil, func(c *tls.Config) {\n\t\t// Only list bad ones:\n\t\tc.CipherSuites = []uint16{\n\t\t\ttls.TLS_RSA_WITH_RC4_128_SHA,\n\t\t\ttls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,\n\t\t\ttls.TLS_RSA_WITH_AES_128_CBC_SHA,\n\t\t\ttls.TLS_RSA_WITH_AES_256_CBC_SHA,\n\t\t\ttls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,\n\t\t\ttls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,\n\t\t\ttls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,\n\t\t\ttls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,\n\t\t\ttls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,\n\t\t\ttls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,\n\t\t\ttls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,\n\t\t\tcipher_TLS_RSA_WITH_AES_128_CBC_SHA256,\n\t\t}\n\t})\n\tdefer st.Close()\n\tgf := st.wantGoAway()\n\tif got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want {\n\t\tt.Errorf(\"Got error code %v; want %v\", got, want)\n\t}\n}\n\nfunc TestServer_Advertises_Common_Cipher(t *testing.T) {\n\tconst requiredSuite = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n\tst := newServerTester(t, nil, func(c *tls.Config) {\n\t\t// Have the client only support the one required by the spec.\n\t\tc.CipherSuites = []uint16{requiredSuite}\n\t}, func(ts *httptest.Server) {\n\t\tvar srv *http.Server = ts.Config\n\t\t// Have the server configured with no specific cipher suites.\n\t\t// This tests that Go's defaults include the required one.\n\t\tsrv.TLSConfig = nil\n\t})\n\tdefer st.Close()\n\tst.greet()\n}\n\nfunc (st *serverTester) onHeaderField(f hpack.HeaderField) {\n\tif f.Name == \"date\" {\n\t\treturn\n\t}\n\tst.decodedHeaders = append(st.decodedHeaders, [2]string{f.Name, f.Value})\n}\n\nfunc (st *serverTester) decodeHeader(headerBlock []byte) (pairs [][2]string) {\n\tst.decodedHeaders = nil\n\tif _, err := st.hpackDec.Write(headerBlock); err != nil {\n\t\tst.t.Fatalf(\"hpack decoding error: %v\", err)\n\t}\n\tif err := st.hpackDec.Close(); err != nil {\n\t\tst.t.Fatalf(\"hpack decoding error: %v\", err)\n\t}\n\treturn st.decodedHeaders\n}\n\n// testServerResponse sets up an idle HTTP/2 connection. The client function should\n// write a single request that must be handled by the handler. This waits up to 5s\n// for client to return, then up to an additional 2s for the handler to return.\nfunc testServerResponse(t testing.TB,\n\thandler func(http.ResponseWriter, *http.Request) error,\n\tclient func(*serverTester),\n) {\n\terrc := make(chan error, 1)\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Body == nil {\n\t\t\tt.Fatal(\"nil Body\")\n\t\t}\n\t\terrc <- handler(w, r)\n\t})\n\tdefer st.Close()\n\n\tdonec := make(chan bool)\n\tgo func() {\n\t\tdefer close(donec)\n\t\tst.greet()\n\t\tclient(st)\n\t}()\n\n\tselect {\n\tcase <-donec:\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"timeout in client\")\n\t}\n\n\tselect {\n\tcase err := <-errc:\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error in handler: %v\", err)\n\t\t}\n\tcase <-time.After(2 * time.Second):\n\t\tt.Fatal(\"timeout in handler\")\n\t}\n}\n\n// readBodyHandler returns an http Handler func that reads len(want)\n// bytes from r.Body and fails t if the contents read were not\n// the value of want.\nfunc readBodyHandler(t *testing.T, want string) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tbuf := make([]byte, len(want))\n\t\t_, err := io.ReadFull(r.Body, buf)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif string(buf) != want {\n\t\t\tt.Errorf(\"read %q; want %q\", buf, want)\n\t\t}\n\t}\n}\n\n// TestServerWithCurl currently fails, hence the LenientCipherSuites test. See:\n//   https://github.com/tatsuhiro-t/nghttp2/issues/140 &\n//   http://sourceforge.net/p/curl/bugs/1472/\nfunc TestServerWithCurl(t *testing.T)                     { testServerWithCurl(t, false) }\nfunc TestServerWithCurl_LenientCipherSuites(t *testing.T) { testServerWithCurl(t, true) }\n\nfunc testServerWithCurl(t *testing.T, permitProhibitedCipherSuites bool) {\n\tif runtime.GOOS != \"linux\" {\n\t\tt.Skip(\"skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway\")\n\t}\n\tif testing.Short() {\n\t\tt.Skip(\"skipping curl test in short mode\")\n\t}\n\trequireCurl(t)\n\tvar gotConn int32\n\ttestHookOnConn = func() { atomic.StoreInt32(&gotConn, 1) }\n\n\tconst msg = \"Hello from curl!\\n\"\n\tts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Foo\", \"Bar\")\n\t\tw.Header().Set(\"Client-Proto\", r.Proto)\n\t\tio.WriteString(w, msg)\n\t}))\n\tConfigureServer(ts.Config, &Server{\n\t\tPermitProhibitedCipherSuites: permitProhibitedCipherSuites,\n\t})\n\tts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config\n\tts.StartTLS()\n\tdefer ts.Close()\n\n\tt.Logf(\"Running test server for curl to hit at: %s\", ts.URL)\n\tcontainer := curl(t, \"--silent\", \"--http2\", \"--insecure\", \"-v\", ts.URL)\n\tdefer kill(container)\n\tresc := make(chan interface{}, 1)\n\tgo func() {\n\t\tres, err := dockerLogs(container)\n\t\tif err != nil {\n\t\t\tresc <- err\n\t\t} else {\n\t\t\tresc <- res\n\t\t}\n\t}()\n\tselect {\n\tcase res := <-resc:\n\t\tif err, ok := res.(error); ok {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tbody := string(res.([]byte))\n\t\t// Search for both \"key: value\" and \"key:value\", since curl changed their format\n\t\t// Our Dockerfile contains the latest version (no space), but just in case people\n\t\t// didn't rebuild, check both.\n\t\tif !strings.Contains(body, \"foo: Bar\") && !strings.Contains(body, \"foo:Bar\") {\n\t\t\tt.Errorf(\"didn't see foo: Bar header\")\n\t\t\tt.Logf(\"Got: %s\", body)\n\t\t}\n\t\tif !strings.Contains(body, \"client-proto: HTTP/2\") && !strings.Contains(body, \"client-proto:HTTP/2\") {\n\t\t\tt.Errorf(\"didn't see client-proto: HTTP/2 header\")\n\t\t\tt.Logf(\"Got: %s\", res)\n\t\t}\n\t\tif !strings.Contains(string(res.([]byte)), msg) {\n\t\t\tt.Errorf(\"didn't see %q content\", msg)\n\t\t\tt.Logf(\"Got: %s\", res)\n\t\t}\n\tcase <-time.After(3 * time.Second):\n\t\tt.Errorf(\"timeout waiting for curl\")\n\t}\n\n\tif atomic.LoadInt32(&gotConn) == 0 {\n\t\tt.Error(\"never saw an http2 connection\")\n\t}\n}\n\nvar doh2load = flag.Bool(\"h2load\", false, \"Run h2load test\")\n\nfunc TestServerWithH2Load(t *testing.T) {\n\tif !*doh2load {\n\t\tt.Skip(\"Skipping without --h2load flag.\")\n\t}\n\tif runtime.GOOS != \"linux\" {\n\t\tt.Skip(\"skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway\")\n\t}\n\trequireH2load(t)\n\n\tmsg := strings.Repeat(\"Hello, h2load!\\n\", 5000)\n\tts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tio.WriteString(w, msg)\n\t\tw.(http.Flusher).Flush()\n\t\tio.WriteString(w, msg)\n\t}))\n\tts.StartTLS()\n\tdefer ts.Close()\n\n\tcmd := exec.Command(\"docker\", \"run\", \"--net=host\", \"--entrypoint=/usr/local/bin/h2load\", \"gohttp2/curl\",\n\t\t\"-n100000\", \"-c100\", \"-m100\", ts.URL)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\n// Issue 12843\nfunc TestServerDoS_MaxHeaderListSize(t *testing.T) {\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {})\n\tdefer st.Close()\n\n\t// shake hands\n\tframeSize := defaultMaxReadFrameSize\n\tvar advHeaderListSize *uint32\n\tst.greetAndCheckSettings(func(s Setting) error {\n\t\tswitch s.ID {\n\t\tcase SettingMaxFrameSize:\n\t\t\tif s.Val < minMaxFrameSize {\n\t\t\t\tframeSize = minMaxFrameSize\n\t\t\t} else if s.Val > maxFrameSize {\n\t\t\t\tframeSize = maxFrameSize\n\t\t\t} else {\n\t\t\t\tframeSize = int(s.Val)\n\t\t\t}\n\t\tcase SettingMaxHeaderListSize:\n\t\t\tadvHeaderListSize = &s.Val\n\t\t}\n\t\treturn nil\n\t})\n\n\tif advHeaderListSize == nil {\n\t\tt.Errorf(\"server didn't advertise a max header list size\")\n\t} else if *advHeaderListSize == 0 {\n\t\tt.Errorf(\"server advertised a max header list size of 0\")\n\t}\n\n\tst.encodeHeaderField(\":method\", \"GET\")\n\tst.encodeHeaderField(\":path\", \"/\")\n\tst.encodeHeaderField(\":scheme\", \"https\")\n\tcookie := strings.Repeat(\"*\", 4058)\n\tst.encodeHeaderField(\"cookie\", cookie)\n\tst.writeHeaders(HeadersFrameParam{\n\t\tStreamID:      1,\n\t\tBlockFragment: st.headerBuf.Bytes(),\n\t\tEndStream:     true,\n\t\tEndHeaders:    false,\n\t})\n\n\t// Capture the short encoding of a duplicate ~4K cookie, now\n\t// that we've already sent it once.\n\tst.headerBuf.Reset()\n\tst.encodeHeaderField(\"cookie\", cookie)\n\n\t// Now send 1MB of it.\n\tconst size = 1 << 20\n\tb := bytes.Repeat(st.headerBuf.Bytes(), size/st.headerBuf.Len())\n\tfor len(b) > 0 {\n\t\tchunk := b\n\t\tif len(chunk) > frameSize {\n\t\t\tchunk = chunk[:frameSize]\n\t\t}\n\t\tb = b[len(chunk):]\n\t\tst.fr.WriteContinuation(1, len(b) == 0, chunk)\n\t}\n\n\th := st.wantHeaders()\n\tif !h.HeadersEnded() {\n\t\tt.Fatalf(\"Got HEADERS without END_HEADERS set: %v\", h)\n\t}\n\theaders := st.decodeHeader(h.HeaderBlockFragment())\n\twant := [][2]string{\n\t\t{\":status\", \"431\"},\n\t\t{\"content-type\", \"text/html; charset=utf-8\"},\n\t\t{\"content-length\", \"63\"},\n\t}\n\tif !reflect.DeepEqual(headers, want) {\n\t\tt.Errorf(\"Headers mismatch.\\n got: %q\\nwant: %q\\n\", headers, want)\n\t}\n}\n\nfunc TestCompressionErrorOnWrite(t *testing.T) {\n\tconst maxStrLen = 8 << 10\n\tvar serverConfig *http.Server\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\t// No response body.\n\t}, func(ts *httptest.Server) {\n\t\tserverConfig = ts.Config\n\t\tserverConfig.MaxHeaderBytes = maxStrLen\n\t})\n\tst.addLogFilter(\"connection error: COMPRESSION_ERROR\")\n\tdefer st.Close()\n\tst.greet()\n\n\tmaxAllowed := st.sc.framer.maxHeaderStringLen()\n\n\t// Crank this up, now that we have a conn connected with the\n\t// hpack.Decoder's max string length set has been initialized\n\t// from the earlier low ~8K value. We want this higher so don't\n\t// hit the max header list size. We only want to test hitting\n\t// the max string size.\n\tserverConfig.MaxHeaderBytes = 1 << 20\n\n\t// First a request with a header that's exactly the max allowed size\n\t// for the hpack compression. It's still too long for the header list\n\t// size, so we'll get the 431 error, but that keeps the compression\n\t// context still valid.\n\thbf := st.encodeHeader(\"foo\", strings.Repeat(\"a\", maxAllowed))\n\n\tst.writeHeaders(HeadersFrameParam{\n\t\tStreamID:      1,\n\t\tBlockFragment: hbf,\n\t\tEndStream:     true,\n\t\tEndHeaders:    true,\n\t})\n\th := st.wantHeaders()\n\tif !h.HeadersEnded() {\n\t\tt.Fatalf(\"Got HEADERS without END_HEADERS set: %v\", h)\n\t}\n\theaders := st.decodeHeader(h.HeaderBlockFragment())\n\twant := [][2]string{\n\t\t{\":status\", \"431\"},\n\t\t{\"content-type\", \"text/html; charset=utf-8\"},\n\t\t{\"content-length\", \"63\"},\n\t}\n\tif !reflect.DeepEqual(headers, want) {\n\t\tt.Errorf(\"Headers mismatch.\\n got: %q\\nwant: %q\\n\", headers, want)\n\t}\n\tdf := st.wantData()\n\tif !strings.Contains(string(df.Data()), \"HTTP Error 431\") {\n\t\tt.Errorf(\"Unexpected data body: %q\", df.Data())\n\t}\n\tif !df.StreamEnded() {\n\t\tt.Fatalf(\"expect data stream end\")\n\t}\n\n\t// And now send one that's just one byte too big.\n\thbf = st.encodeHeader(\"bar\", strings.Repeat(\"b\", maxAllowed+1))\n\tst.writeHeaders(HeadersFrameParam{\n\t\tStreamID:      3,\n\t\tBlockFragment: hbf,\n\t\tEndStream:     true,\n\t\tEndHeaders:    true,\n\t})\n\tga := st.wantGoAway()\n\tif ga.ErrCode != ErrCodeCompression {\n\t\tt.Errorf(\"GOAWAY err = %v; want ErrCodeCompression\", ga.ErrCode)\n\t}\n}\n\nfunc TestCompressionErrorOnClose(t *testing.T) {\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\t// No response body.\n\t})\n\tst.addLogFilter(\"connection error: COMPRESSION_ERROR\")\n\tdefer st.Close()\n\tst.greet()\n\n\thbf := st.encodeHeader(\"foo\", \"bar\")\n\thbf = hbf[:len(hbf)-1] // truncate one byte from the end, so hpack.Decoder.Close fails.\n\tst.writeHeaders(HeadersFrameParam{\n\t\tStreamID:      1,\n\t\tBlockFragment: hbf,\n\t\tEndStream:     true,\n\t\tEndHeaders:    true,\n\t})\n\tga := st.wantGoAway()\n\tif ga.ErrCode != ErrCodeCompression {\n\t\tt.Errorf(\"GOAWAY err = %v; want ErrCodeCompression\", ga.ErrCode)\n\t}\n}\n\n// test that a server handler can read trailers from a client\nfunc TestServerReadsTrailers(t *testing.T) {\n\tconst testBody = \"some test body\"\n\twriteReq := func(st *serverTester) {\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      1, // clients send odd numbers\n\t\t\tBlockFragment: st.encodeHeader(\"trailer\", \"Foo, Bar\", \"trailer\", \"Baz\"),\n\t\t\tEndStream:     false,\n\t\t\tEndHeaders:    true,\n\t\t})\n\t\tst.writeData(1, false, []byte(testBody))\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID: 1, // clients send odd numbers\n\t\t\tBlockFragment: st.encodeHeaderRaw(\n\t\t\t\t\"foo\", \"foov\",\n\t\t\t\t\"bar\", \"barv\",\n\t\t\t\t\"baz\", \"bazv\",\n\t\t\t\t\"surprise\", \"wasn't declared; shouldn't show up\",\n\t\t\t),\n\t\t\tEndStream:  true,\n\t\t\tEndHeaders: true,\n\t\t})\n\t}\n\tcheckReq := func(r *http.Request) {\n\t\twantTrailer := http.Header{\n\t\t\t\"Foo\": nil,\n\t\t\t\"Bar\": nil,\n\t\t\t\"Baz\": nil,\n\t\t}\n\t\tif !reflect.DeepEqual(r.Trailer, wantTrailer) {\n\t\t\tt.Errorf(\"initial Trailer = %v; want %v\", r.Trailer, wantTrailer)\n\t\t}\n\t\tslurp, err := ioutil.ReadAll(r.Body)\n\t\tif string(slurp) != testBody {\n\t\t\tt.Errorf(\"read body %q; want %q\", slurp, testBody)\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Body slurp: %v\", err)\n\t\t}\n\t\twantTrailerAfter := http.Header{\n\t\t\t\"Foo\": {\"foov\"},\n\t\t\t\"Bar\": {\"barv\"},\n\t\t\t\"Baz\": {\"bazv\"},\n\t\t}\n\t\tif !reflect.DeepEqual(r.Trailer, wantTrailerAfter) {\n\t\t\tt.Errorf(\"final Trailer = %v; want %v\", r.Trailer, wantTrailerAfter)\n\t\t}\n\t}\n\ttestServerRequest(t, writeReq, checkReq)\n}\n\n// test that a server handler can send trailers\nfunc TestServerWritesTrailers_WithFlush(t *testing.T)    { testServerWritesTrailers(t, true) }\nfunc TestServerWritesTrailers_WithoutFlush(t *testing.T) { testServerWritesTrailers(t, false) }\n\nfunc testServerWritesTrailers(t *testing.T, withFlush bool) {\n\t// See https://httpwg.github.io/specs/rfc7540.html#rfc.section.8.1.3\n\ttestServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {\n\t\tw.Header().Set(\"Trailer\", \"Server-Trailer-A, Server-Trailer-B\")\n\t\tw.Header().Add(\"Trailer\", \"Server-Trailer-C\")\n\t\tw.Header().Add(\"Trailer\", \"Transfer-Encoding, Content-Length, Trailer\") // filtered\n\n\t\t// Regular headers:\n\t\tw.Header().Set(\"Foo\", \"Bar\")\n\t\tw.Header().Set(\"Content-Length\", \"5\") // len(\"Hello\")\n\n\t\tio.WriteString(w, \"Hello\")\n\t\tif withFlush {\n\t\t\tw.(http.Flusher).Flush()\n\t\t}\n\t\tw.Header().Set(\"Server-Trailer-A\", \"valuea\")\n\t\tw.Header().Set(\"Server-Trailer-C\", \"valuec\") // skipping B\n\t\t// After a flush, random keys like Server-Surprise shouldn't show up:\n\t\tw.Header().Set(\"Server-Surpise\", \"surprise! this isn't predeclared!\")\n\t\t// But we do permit promoting keys to trailers after a\n\t\t// flush if they start with the magic\n\t\t// otherwise-invalid \"Trailer:\" prefix:\n\t\tw.Header().Set(\"Trailer:Post-Header-Trailer\", \"hi1\")\n\t\tw.Header().Set(\"Trailer:post-header-trailer2\", \"hi2\")\n\t\tw.Header().Set(\"Trailer:Range\", \"invalid\")\n\t\tw.Header().Set(\"Trailer:Foo\\x01Bogus\", \"invalid\")\n\t\tw.Header().Set(\"Transfer-Encoding\", \"should not be included; Forbidden by RFC 2616 14.40\")\n\t\tw.Header().Set(\"Content-Length\", \"should not be included; Forbidden by RFC 2616 14.40\")\n\t\tw.Header().Set(\"Trailer\", \"should not be included; Forbidden by RFC 2616 14.40\")\n\t\treturn nil\n\t}, func(st *serverTester) {\n\t\tgetSlash(st)\n\t\thf := st.wantHeaders()\n\t\tif hf.StreamEnded() {\n\t\t\tt.Fatal(\"response HEADERS had END_STREAM\")\n\t\t}\n\t\tif !hf.HeadersEnded() {\n\t\t\tt.Fatal(\"response HEADERS didn't have END_HEADERS\")\n\t\t}\n\t\tgoth := st.decodeHeader(hf.HeaderBlockFragment())\n\t\twanth := [][2]string{\n\t\t\t{\":status\", \"200\"},\n\t\t\t{\"foo\", \"Bar\"},\n\t\t\t{\"trailer\", \"Server-Trailer-A, Server-Trailer-B\"},\n\t\t\t{\"trailer\", \"Server-Trailer-C\"},\n\t\t\t{\"trailer\", \"Transfer-Encoding, Content-Length, Trailer\"},\n\t\t\t{\"content-type\", \"text/plain; charset=utf-8\"},\n\t\t\t{\"content-length\", \"5\"},\n\t\t}\n\t\tif !reflect.DeepEqual(goth, wanth) {\n\t\t\tt.Errorf(\"Header mismatch.\\n got: %v\\nwant: %v\", goth, wanth)\n\t\t}\n\t\tdf := st.wantData()\n\t\tif string(df.Data()) != \"Hello\" {\n\t\t\tt.Fatalf(\"Client read %q; want Hello\", df.Data())\n\t\t}\n\t\tif df.StreamEnded() {\n\t\t\tt.Fatalf(\"data frame had STREAM_ENDED\")\n\t\t}\n\t\ttf := st.wantHeaders() // for the trailers\n\t\tif !tf.StreamEnded() {\n\t\t\tt.Fatalf(\"trailers HEADERS lacked END_STREAM\")\n\t\t}\n\t\tif !tf.HeadersEnded() {\n\t\t\tt.Fatalf(\"trailers HEADERS lacked END_HEADERS\")\n\t\t}\n\t\twanth = [][2]string{\n\t\t\t{\"post-header-trailer\", \"hi1\"},\n\t\t\t{\"post-header-trailer2\", \"hi2\"},\n\t\t\t{\"server-trailer-a\", \"valuea\"},\n\t\t\t{\"server-trailer-c\", \"valuec\"},\n\t\t}\n\t\tgoth = st.decodeHeader(tf.HeaderBlockFragment())\n\t\tif !reflect.DeepEqual(goth, wanth) {\n\t\t\tt.Errorf(\"Header mismatch.\\n got: %v\\nwant: %v\", goth, wanth)\n\t\t}\n\t})\n}\n\n// validate transmitted header field names & values\n// golang.org/issue/14048\nfunc TestServerDoesntWriteInvalidHeaders(t *testing.T) {\n\ttestServerResponse(t, func(w http.ResponseWriter, r *http.Request) error {\n\t\tw.Header().Add(\"OK1\", \"x\")\n\t\tw.Header().Add(\"Bad:Colon\", \"x\") // colon (non-token byte) in key\n\t\tw.Header().Add(\"Bad1\\x00\", \"x\")  // null in key\n\t\tw.Header().Add(\"Bad2\", \"x\\x00y\") // null in value\n\t\treturn nil\n\t}, func(st *serverTester) {\n\t\tgetSlash(st)\n\t\thf := st.wantHeaders()\n\t\tif !hf.StreamEnded() {\n\t\t\tt.Error(\"response HEADERS lacked END_STREAM\")\n\t\t}\n\t\tif !hf.HeadersEnded() {\n\t\t\tt.Fatal(\"response HEADERS didn't have END_HEADERS\")\n\t\t}\n\t\tgoth := st.decodeHeader(hf.HeaderBlockFragment())\n\t\twanth := [][2]string{\n\t\t\t{\":status\", \"200\"},\n\t\t\t{\"ok1\", \"x\"},\n\t\t\t{\"content-type\", \"text/plain; charset=utf-8\"},\n\t\t\t{\"content-length\", \"0\"},\n\t\t}\n\t\tif !reflect.DeepEqual(goth, wanth) {\n\t\t\tt.Errorf(\"Header mismatch.\\n got: %v\\nwant: %v\", goth, wanth)\n\t\t}\n\t})\n}\n\nfunc BenchmarkServerGets(b *testing.B) {\n\tdefer disableGoroutineTracking()()\n\tb.ReportAllocs()\n\n\tconst msg = \"Hello, world\"\n\tst := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {\n\t\tio.WriteString(w, msg)\n\t})\n\tdefer st.Close()\n\tst.greet()\n\n\t// Give the server quota to reply. (plus it has the the 64KB)\n\tif err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {\n\t\tb.Fatal(err)\n\t}\n\n\tfor i := 0; i < b.N; i++ {\n\t\tid := 1 + uint32(i)*2\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      id,\n\t\t\tBlockFragment: st.encodeHeader(),\n\t\t\tEndStream:     true,\n\t\t\tEndHeaders:    true,\n\t\t})\n\t\tst.wantHeaders()\n\t\tdf := st.wantData()\n\t\tif !df.StreamEnded() {\n\t\t\tb.Fatalf(\"DATA didn't have END_STREAM; got %v\", df)\n\t\t}\n\t}\n}\n\nfunc BenchmarkServerPosts(b *testing.B) {\n\tdefer disableGoroutineTracking()()\n\tb.ReportAllocs()\n\n\tconst msg = \"Hello, world\"\n\tst := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {\n\t\t// Consume the (empty) body from th peer before replying, otherwise\n\t\t// the server will sometimes (depending on scheduling) send the peer a\n\t\t// a RST_STREAM with the CANCEL error code.\n\t\tif n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil {\n\t\t\tb.Errorf(\"Copy error; got %v, %v; want 0, nil\", n, err)\n\t\t}\n\t\tio.WriteString(w, msg)\n\t})\n\tdefer st.Close()\n\tst.greet()\n\n\t// Give the server quota to reply. (plus it has the the 64KB)\n\tif err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {\n\t\tb.Fatal(err)\n\t}\n\n\tfor i := 0; i < b.N; i++ {\n\t\tid := 1 + uint32(i)*2\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      id,\n\t\t\tBlockFragment: st.encodeHeader(\":method\", \"POST\"),\n\t\t\tEndStream:     false,\n\t\t\tEndHeaders:    true,\n\t\t})\n\t\tst.writeData(id, true, nil)\n\t\tst.wantHeaders()\n\t\tdf := st.wantData()\n\t\tif !df.StreamEnded() {\n\t\t\tb.Fatalf(\"DATA didn't have END_STREAM; got %v\", df)\n\t\t}\n\t}\n}\n\n// Send a stream of messages from server to client in separate data frames.\n// Brings up performance issues seen in long streams.\n// Created to show problem in go issue #18502\nfunc BenchmarkServerToClientStreamDefaultOptions(b *testing.B) {\n\tbenchmarkServerToClientStream(b)\n}\n\n// Justification for Change-Id: Iad93420ef6c3918f54249d867098f1dadfa324d8\n// Expect to see memory/alloc reduction by opting in to Frame reuse with the Framer.\nfunc BenchmarkServerToClientStreamReuseFrames(b *testing.B) {\n\tbenchmarkServerToClientStream(b, optFramerReuseFrames)\n}\n\nfunc benchmarkServerToClientStream(b *testing.B, newServerOpts ...interface{}) {\n\tdefer disableGoroutineTracking()()\n\tb.ReportAllocs()\n\tconst msgLen = 1\n\t// default window size\n\tconst windowSize = 1<<16 - 1\n\n\t// next message to send from the server and for the client to expect\n\tnextMsg := func(i int) []byte {\n\t\tmsg := make([]byte, msgLen)\n\t\tmsg[0] = byte(i)\n\t\tif len(msg) != msgLen {\n\t\t\tpanic(\"invalid test setup msg length\")\n\t\t}\n\t\treturn msg\n\t}\n\n\tst := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {\n\t\t// Consume the (empty) body from th peer before replying, otherwise\n\t\t// the server will sometimes (depending on scheduling) send the peer a\n\t\t// a RST_STREAM with the CANCEL error code.\n\t\tif n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil {\n\t\t\tb.Errorf(\"Copy error; got %v, %v; want 0, nil\", n, err)\n\t\t}\n\t\tfor i := 0; i < b.N; i += 1 {\n\t\t\tw.Write(nextMsg(i))\n\t\t\tw.(http.Flusher).Flush()\n\t\t}\n\t}, newServerOpts...)\n\tdefer st.Close()\n\tst.greet()\n\n\tconst id = uint32(1)\n\n\tst.writeHeaders(HeadersFrameParam{\n\t\tStreamID:      id,\n\t\tBlockFragment: st.encodeHeader(\":method\", \"POST\"),\n\t\tEndStream:     false,\n\t\tEndHeaders:    true,\n\t})\n\n\tst.writeData(id, true, nil)\n\tst.wantHeaders()\n\n\tvar pendingWindowUpdate = uint32(0)\n\n\tfor i := 0; i < b.N; i += 1 {\n\t\texpected := nextMsg(i)\n\t\tdf := st.wantData()\n\t\tif bytes.Compare(expected, df.data) != 0 {\n\t\t\tb.Fatalf(\"Bad message received; want %v; got %v\", expected, df.data)\n\t\t}\n\t\t// try to send infrequent but large window updates so they don't overwhelm the test\n\t\tpendingWindowUpdate += uint32(len(df.data))\n\t\tif pendingWindowUpdate >= windowSize/2 {\n\t\t\tif err := st.fr.WriteWindowUpdate(0, pendingWindowUpdate); err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t\tif err := st.fr.WriteWindowUpdate(id, pendingWindowUpdate); err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t\tpendingWindowUpdate = 0\n\t\t}\n\t}\n\tdf := st.wantData()\n\tif !df.StreamEnded() {\n\t\tb.Fatalf(\"DATA didn't have END_STREAM; got %v\", df)\n\t}\n}\n\n// go-fuzz bug, originally reported at https://github.com/bradfitz/http2/issues/53\n// Verify we don't hang.\nfunc TestIssue53(t *testing.T) {\n\tconst data = \"PRI * HTTP/2.0\\r\\n\\r\\nSM\" +\n\t\t\"\\r\\n\\r\\n\\x00\\x00\\x00\\x01\\ainfinfin\\ad\"\n\ts := &http.Server{\n\t\tErrorLog: log.New(io.MultiWriter(stderrv(), twriter{t: t}), \"\", log.LstdFlags),\n\t\tHandler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\tw.Write([]byte(\"hello\"))\n\t\t}),\n\t}\n\ts2 := &Server{\n\t\tMaxReadFrameSize:             1 << 16,\n\t\tPermitProhibitedCipherSuites: true,\n\t}\n\tc := &issue53Conn{[]byte(data), false, false}\n\ts2.ServeConn(c, &ServeConnOpts{BaseConfig: s})\n\tif !c.closed {\n\t\tt.Fatal(\"connection is not closed\")\n\t}\n}\n\ntype issue53Conn struct {\n\tdata    []byte\n\tclosed  bool\n\twritten bool\n}\n\nfunc (c *issue53Conn) Read(b []byte) (n int, err error) {\n\tif len(c.data) == 0 {\n\t\treturn 0, io.EOF\n\t}\n\tn = copy(b, c.data)\n\tc.data = c.data[n:]\n\treturn\n}\n\nfunc (c *issue53Conn) Write(b []byte) (n int, err error) {\n\tc.written = true\n\treturn len(b), nil\n}\n\nfunc (c *issue53Conn) Close() error {\n\tc.closed = true\n\treturn nil\n}\n\nfunc (c *issue53Conn) LocalAddr() net.Addr {\n\treturn &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706}\n}\nfunc (c *issue53Conn) RemoteAddr() net.Addr {\n\treturn &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706}\n}\nfunc (c *issue53Conn) SetDeadline(t time.Time) error      { return nil }\nfunc (c *issue53Conn) SetReadDeadline(t time.Time) error  { return nil }\nfunc (c *issue53Conn) SetWriteDeadline(t time.Time) error { return nil }\n\n// golang.org/issue/12895\nfunc TestConfigureServer(t *testing.T) {\n\ttests := []struct {\n\t\tname      string\n\t\ttlsConfig *tls.Config\n\t\twantErr   string\n\t}{\n\t\t{\n\t\t\tname: \"empty server\",\n\t\t},\n\t\t{\n\t\t\tname: \"just the required cipher suite\",\n\t\t\ttlsConfig: &tls.Config{\n\t\t\t\tCipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"missing required cipher suite\",\n\t\t\ttlsConfig: &tls.Config{\n\t\t\t\tCipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384},\n\t\t\t},\n\t\t\twantErr: \"is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t},\n\t\t{\n\t\t\tname: \"required after bad\",\n\t\t\ttlsConfig: &tls.Config{\n\t\t\t\tCipherSuites: []uint16{tls.TLS_RSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256},\n\t\t\t},\n\t\t\twantErr: \"contains an HTTP/2-approved cipher suite (0xc02f), but it comes after\",\n\t\t},\n\t\t{\n\t\t\tname: \"bad after required\",\n\t\t\ttlsConfig: &tls.Config{\n\t\t\t\tCipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_RC4_128_SHA},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tsrv := &http.Server{TLSConfig: tt.tlsConfig}\n\t\terr := ConfigureServer(srv, nil)\n\t\tif (err != nil) != (tt.wantErr != \"\") {\n\t\t\tif tt.wantErr != \"\" {\n\t\t\t\tt.Errorf(\"%s: success, but want error\", tt.name)\n\t\t\t} else {\n\t\t\t\tt.Errorf(\"%s: unexpected error: %v\", tt.name, err)\n\t\t\t}\n\t\t}\n\t\tif err != nil && tt.wantErr != \"\" && !strings.Contains(err.Error(), tt.wantErr) {\n\t\t\tt.Errorf(\"%s: err = %v; want substring %q\", tt.name, err, tt.wantErr)\n\t\t}\n\t\tif err == nil && !srv.TLSConfig.PreferServerCipherSuites {\n\t\t\tt.Errorf(\"%s: PreferServerCipherSuite is false; want true\", tt.name)\n\t\t}\n\t}\n}\n\nfunc TestServerRejectHeadWithBody(t *testing.T) {\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\t// No response body.\n\t})\n\tdefer st.Close()\n\tst.greet()\n\tst.writeHeaders(HeadersFrameParam{\n\t\tStreamID:      1, // clients send odd numbers\n\t\tBlockFragment: st.encodeHeader(\":method\", \"HEAD\"),\n\t\tEndStream:     false, // what we're testing, a bogus HEAD request with body\n\t\tEndHeaders:    true,\n\t})\n\tst.wantRSTStream(1, ErrCodeProtocol)\n}\n\nfunc TestServerNoAutoContentLengthOnHead(t *testing.T) {\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\t// No response body. (or smaller than one frame)\n\t})\n\tdefer st.Close()\n\tst.greet()\n\tst.writeHeaders(HeadersFrameParam{\n\t\tStreamID:      1, // clients send odd numbers\n\t\tBlockFragment: st.encodeHeader(\":method\", \"HEAD\"),\n\t\tEndStream:     true,\n\t\tEndHeaders:    true,\n\t})\n\th := st.wantHeaders()\n\theaders := st.decodeHeader(h.HeaderBlockFragment())\n\twant := [][2]string{\n\t\t{\":status\", \"200\"},\n\t\t{\"content-type\", \"text/plain; charset=utf-8\"},\n\t}\n\tif !reflect.DeepEqual(headers, want) {\n\t\tt.Errorf(\"Headers mismatch.\\n got: %q\\nwant: %q\\n\", headers, want)\n\t}\n}\n\n// golang.org/issue/13495\nfunc TestServerNoDuplicateContentType(t *testing.T) {\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header()[\"Content-Type\"] = []string{\"\"}\n\t\tfmt.Fprintf(w, \"<html><head></head><body>hi</body></html>\")\n\t})\n\tdefer st.Close()\n\tst.greet()\n\tst.writeHeaders(HeadersFrameParam{\n\t\tStreamID:      1,\n\t\tBlockFragment: st.encodeHeader(),\n\t\tEndStream:     true,\n\t\tEndHeaders:    true,\n\t})\n\th := st.wantHeaders()\n\theaders := st.decodeHeader(h.HeaderBlockFragment())\n\twant := [][2]string{\n\t\t{\":status\", \"200\"},\n\t\t{\"content-type\", \"\"},\n\t\t{\"content-length\", \"41\"},\n\t}\n\tif !reflect.DeepEqual(headers, want) {\n\t\tt.Errorf(\"Headers mismatch.\\n got: %q\\nwant: %q\\n\", headers, want)\n\t}\n}\n\nfunc disableGoroutineTracking() (restore func()) {\n\told := DebugGoroutines\n\tDebugGoroutines = false\n\treturn func() { DebugGoroutines = old }\n}\n\nfunc BenchmarkServer_GetRequest(b *testing.B) {\n\tdefer disableGoroutineTracking()()\n\tb.ReportAllocs()\n\tconst msg = \"Hello, world.\"\n\tst := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {\n\t\tn, err := io.Copy(ioutil.Discard, r.Body)\n\t\tif err != nil || n > 0 {\n\t\t\tb.Errorf(\"Read %d bytes, error %v; want 0 bytes.\", n, err)\n\t\t}\n\t\tio.WriteString(w, msg)\n\t})\n\tdefer st.Close()\n\n\tst.greet()\n\t// Give the server quota to reply. (plus it has the the 64KB)\n\tif err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {\n\t\tb.Fatal(err)\n\t}\n\thbf := st.encodeHeader(\":method\", \"GET\")\n\tfor i := 0; i < b.N; i++ {\n\t\tstreamID := uint32(1 + 2*i)\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      streamID,\n\t\t\tBlockFragment: hbf,\n\t\t\tEndStream:     true,\n\t\t\tEndHeaders:    true,\n\t\t})\n\t\tst.wantHeaders()\n\t\tst.wantData()\n\t}\n}\n\nfunc BenchmarkServer_PostRequest(b *testing.B) {\n\tdefer disableGoroutineTracking()()\n\tb.ReportAllocs()\n\tconst msg = \"Hello, world.\"\n\tst := newServerTester(b, func(w http.ResponseWriter, r *http.Request) {\n\t\tn, err := io.Copy(ioutil.Discard, r.Body)\n\t\tif err != nil || n > 0 {\n\t\t\tb.Errorf(\"Read %d bytes, error %v; want 0 bytes.\", n, err)\n\t\t}\n\t\tio.WriteString(w, msg)\n\t})\n\tdefer st.Close()\n\tst.greet()\n\t// Give the server quota to reply. (plus it has the the 64KB)\n\tif err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {\n\t\tb.Fatal(err)\n\t}\n\thbf := st.encodeHeader(\":method\", \"POST\")\n\tfor i := 0; i < b.N; i++ {\n\t\tstreamID := uint32(1 + 2*i)\n\t\tst.writeHeaders(HeadersFrameParam{\n\t\t\tStreamID:      streamID,\n\t\t\tBlockFragment: hbf,\n\t\t\tEndStream:     false,\n\t\t\tEndHeaders:    true,\n\t\t})\n\t\tst.writeData(streamID, true, nil)\n\t\tst.wantHeaders()\n\t\tst.wantData()\n\t}\n}\n\ntype connStateConn struct {\n\tnet.Conn\n\tcs tls.ConnectionState\n}\n\nfunc (c connStateConn) ConnectionState() tls.ConnectionState { return c.cs }\n\n// golang.org/issue/12737 -- handle any net.Conn, not just\n// *tls.Conn.\nfunc TestServerHandleCustomConn(t *testing.T) {\n\tvar s Server\n\tc1, c2 := net.Pipe()\n\tclientDone := make(chan struct{})\n\thandlerDone := make(chan struct{})\n\tvar req *http.Request\n\tgo func() {\n\t\tdefer close(clientDone)\n\t\tdefer c2.Close()\n\t\tfr := NewFramer(c2, c2)\n\t\tio.WriteString(c2, ClientPreface)\n\t\tfr.WriteSettings()\n\t\tfr.WriteSettingsAck()\n\t\tf, err := fr.ReadFrame()\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif sf, ok := f.(*SettingsFrame); !ok || sf.IsAck() {\n\t\t\tt.Errorf(\"Got %v; want non-ACK SettingsFrame\", summarizeFrame(f))\n\t\t\treturn\n\t\t}\n\t\tf, err = fr.ReadFrame()\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif sf, ok := f.(*SettingsFrame); !ok || !sf.IsAck() {\n\t\t\tt.Errorf(\"Got %v; want ACK SettingsFrame\", summarizeFrame(f))\n\t\t\treturn\n\t\t}\n\t\tvar henc hpackEncoder\n\t\tfr.WriteHeaders(HeadersFrameParam{\n\t\t\tStreamID:      1,\n\t\t\tBlockFragment: henc.encodeHeaderRaw(t, \":method\", \"GET\", \":path\", \"/\", \":scheme\", \"https\", \":authority\", \"foo.com\"),\n\t\t\tEndStream:     true,\n\t\t\tEndHeaders:    true,\n\t\t})\n\t\tgo io.Copy(ioutil.Discard, c2)\n\t\t<-handlerDone\n\t}()\n\tconst testString = \"my custom ConnectionState\"\n\tfakeConnState := tls.ConnectionState{\n\t\tServerName:  testString,\n\t\tVersion:     tls.VersionTLS12,\n\t\tCipherSuite: cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,\n\t}\n\tgo s.ServeConn(connStateConn{c1, fakeConnState}, &ServeConnOpts{\n\t\tBaseConfig: &http.Server{\n\t\t\tHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tdefer close(handlerDone)\n\t\t\t\treq = r\n\t\t\t}),\n\t\t}})\n\tselect {\n\tcase <-clientDone:\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"timeout waiting for handler\")\n\t}\n\tif req.TLS == nil {\n\t\tt.Fatalf(\"Request.TLS is nil. Got: %#v\", req)\n\t}\n\tif req.TLS.ServerName != testString {\n\t\tt.Fatalf(\"Request.TLS = %+v; want ServerName of %q\", req.TLS, testString)\n\t}\n}\n\n// golang.org/issue/14214\nfunc TestServer_Rejects_ConnHeaders(t *testing.T) {\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tt.Error(\"should not get to Handler\")\n\t})\n\tdefer st.Close()\n\tst.greet()\n\tst.bodylessReq1(\"connection\", \"foo\")\n\thf := st.wantHeaders()\n\tgoth := st.decodeHeader(hf.HeaderBlockFragment())\n\twanth := [][2]string{\n\t\t{\":status\", \"400\"},\n\t\t{\"content-type\", \"text/plain; charset=utf-8\"},\n\t\t{\"x-content-type-options\", \"nosniff\"},\n\t\t{\"content-length\", \"51\"},\n\t}\n\tif !reflect.DeepEqual(goth, wanth) {\n\t\tt.Errorf(\"Got headers %v; want %v\", goth, wanth)\n\t}\n}\n\ntype hpackEncoder struct {\n\tenc *hpack.Encoder\n\tbuf bytes.Buffer\n}\n\nfunc (he *hpackEncoder) encodeHeaderRaw(t *testing.T, headers ...string) []byte {\n\tif len(headers)%2 == 1 {\n\t\tpanic(\"odd number of kv args\")\n\t}\n\the.buf.Reset()\n\tif he.enc == nil {\n\t\the.enc = hpack.NewEncoder(&he.buf)\n\t}\n\tfor len(headers) > 0 {\n\t\tk, v := headers[0], headers[1]\n\t\terr := he.enc.WriteField(hpack.HeaderField{Name: k, Value: v})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"HPACK encoding error for %q/%q: %v\", k, v, err)\n\t\t}\n\t\theaders = headers[2:]\n\t}\n\treturn he.buf.Bytes()\n}\n\nfunc TestCheckValidHTTP2Request(t *testing.T) {\n\ttests := []struct {\n\t\th    http.Header\n\t\twant error\n\t}{\n\t\t{\n\t\t\th:    http.Header{\"Te\": {\"trailers\"}},\n\t\t\twant: nil,\n\t\t},\n\t\t{\n\t\t\th:    http.Header{\"Te\": {\"trailers\", \"bogus\"}},\n\t\t\twant: errors.New(`request header \"TE\" may only be \"trailers\" in HTTP/2`),\n\t\t},\n\t\t{\n\t\t\th:    http.Header{\"Foo\": {\"\"}},\n\t\t\twant: nil,\n\t\t},\n\t\t{\n\t\t\th:    http.Header{\"Connection\": {\"\"}},\n\t\t\twant: errors.New(`request header \"Connection\" is not valid in HTTP/2`),\n\t\t},\n\t\t{\n\t\t\th:    http.Header{\"Proxy-Connection\": {\"\"}},\n\t\t\twant: errors.New(`request header \"Proxy-Connection\" is not valid in HTTP/2`),\n\t\t},\n\t\t{\n\t\t\th:    http.Header{\"Keep-Alive\": {\"\"}},\n\t\t\twant: errors.New(`request header \"Keep-Alive\" is not valid in HTTP/2`),\n\t\t},\n\t\t{\n\t\t\th:    http.Header{\"Upgrade\": {\"\"}},\n\t\t\twant: errors.New(`request header \"Upgrade\" is not valid in HTTP/2`),\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tgot := checkValidHTTP2RequestHeaders(tt.h)\n\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\tt.Errorf(\"%d. checkValidHTTP2Request = %v; want %v\", i, got, tt.want)\n\t\t}\n\t}\n}\n\n// golang.org/issue/14030\nfunc TestExpect100ContinueAfterHandlerWrites(t *testing.T) {\n\tconst msg = \"Hello\"\n\tconst msg2 = \"World\"\n\n\tdoRead := make(chan bool, 1)\n\tdefer close(doRead) // fallback cleanup\n\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tio.WriteString(w, msg)\n\t\tw.(http.Flusher).Flush()\n\n\t\t// Do a read, which might force a 100-continue status to be sent.\n\t\t<-doRead\n\t\tr.Body.Read(make([]byte, 10))\n\n\t\tio.WriteString(w, msg2)\n\n\t}, optOnlyServer)\n\tdefer st.Close()\n\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\n\treq, _ := http.NewRequest(\"POST\", st.ts.URL, io.LimitReader(neverEnding('A'), 2<<20))\n\treq.Header.Set(\"Expect\", \"100-continue\")\n\n\tres, err := tr.RoundTrip(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer res.Body.Close()\n\n\tbuf := make([]byte, len(msg))\n\tif _, err := io.ReadFull(res.Body, buf); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif string(buf) != msg {\n\t\tt.Fatalf(\"msg = %q; want %q\", buf, msg)\n\t}\n\n\tdoRead <- true\n\n\tif _, err := io.ReadFull(res.Body, buf); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif string(buf) != msg2 {\n\t\tt.Fatalf(\"second msg = %q; want %q\", buf, msg2)\n\t}\n}\n\ntype funcReader func([]byte) (n int, err error)\n\nfunc (f funcReader) Read(p []byte) (n int, err error) { return f(p) }\n\n// golang.org/issue/16481 -- return flow control when streams close with unread data.\n// (The Server version of the bug. See also TestUnreadFlowControlReturned_Transport)\nfunc TestUnreadFlowControlReturned_Server(t *testing.T) {\n\tunblock := make(chan bool, 1)\n\tdefer close(unblock)\n\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\t// Don't read the 16KB request body. Wait until the client's\n\t\t// done sending it and then return. This should cause the Server\n\t\t// to then return those 16KB of flow control to the client.\n\t\t<-unblock\n\t}, optOnlyServer)\n\tdefer st.Close()\n\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\n\t// This previously hung on the 4th iteration.\n\tfor i := 0; i < 6; i++ {\n\t\tbody := io.MultiReader(\n\t\t\tio.LimitReader(neverEnding('A'), 16<<10),\n\t\t\tfuncReader(func([]byte) (n int, err error) {\n\t\t\t\tunblock <- true\n\t\t\t\treturn 0, io.EOF\n\t\t\t}),\n\t\t)\n\t\treq, _ := http.NewRequest(\"POST\", st.ts.URL, body)\n\t\tres, err := tr.RoundTrip(req)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tres.Body.Close()\n\t}\n\n}\n\nfunc TestServerIdleTimeout(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping in short mode\")\n\t}\n\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t}, func(h2s *Server) {\n\t\th2s.IdleTimeout = 500 * time.Millisecond\n\t})\n\tdefer st.Close()\n\n\tst.greet()\n\tga := st.wantGoAway()\n\tif ga.ErrCode != ErrCodeNo {\n\t\tt.Errorf(\"GOAWAY error = %v; want ErrCodeNo\", ga.ErrCode)\n\t}\n}\n\nfunc TestServerIdleTimeout_AfterRequest(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping in short mode\")\n\t}\n\tconst timeout = 250 * time.Millisecond\n\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\ttime.Sleep(timeout * 2)\n\t}, func(h2s *Server) {\n\t\th2s.IdleTimeout = timeout\n\t})\n\tdefer st.Close()\n\n\tst.greet()\n\n\t// Send a request which takes twice the timeout. Verifies the\n\t// idle timeout doesn't fire while we're in a request:\n\tst.bodylessReq1()\n\tst.wantHeaders()\n\n\t// But the idle timeout should be rearmed after the request\n\t// is done:\n\tga := st.wantGoAway()\n\tif ga.ErrCode != ErrCodeNo {\n\t\tt.Errorf(\"GOAWAY error = %v; want ErrCodeNo\", ga.ErrCode)\n\t}\n}\n\n// grpc-go closes the Request.Body currently with a Read.\n// Verify that it doesn't race.\n// See https://github.com/grpc/grpc-go/pull/938\nfunc TestRequestBodyReadCloseRace(t *testing.T) {\n\tfor i := 0; i < 100; i++ {\n\t\tbody := &requestBody{\n\t\t\tpipe: &pipe{\n\t\t\t\tb: new(bytes.Buffer),\n\t\t\t},\n\t\t}\n\t\tbody.pipe.CloseWithError(io.EOF)\n\n\t\tdone := make(chan bool, 1)\n\t\tbuf := make([]byte, 10)\n\t\tgo func() {\n\t\t\ttime.Sleep(1 * time.Millisecond)\n\t\t\tbody.Close()\n\t\t\tdone <- true\n\t\t}()\n\t\tbody.Read(buf)\n\t\t<-done\n\t}\n}\n\nfunc TestIssue20704Race(t *testing.T) {\n\tif testing.Short() && os.Getenv(\"GO_BUILDER_NAME\") == \"\" {\n\t\tt.Skip(\"skipping in short mode\")\n\t}\n\tconst (\n\t\titemSize  = 1 << 10\n\t\titemCount = 100\n\t)\n\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tfor i := 0; i < itemCount; i++ {\n\t\t\t_, err := w.Write(make([]byte, itemSize))\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}, optOnlyServer)\n\tdefer st.Close()\n\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\tcl := &http.Client{Transport: tr}\n\n\tfor i := 0; i < 1000; i++ {\n\t\tresp, err := cl.Get(st.ts.URL)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\t// Force a RST stream to the server by closing without\n\t\t// reading the body:\n\t\tresp.Body.Close()\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml",
    "content": "<?xml version=\"1.0\"?>\r\n<?xml-stylesheet type=\"text/xsl\" href=\"lib/rfc2629.xslt\"?>\r\n<?rfc toc=\"yes\" ?>\r\n<?rfc symrefs=\"yes\" ?>\r\n<?rfc sortrefs=\"yes\" ?>\r\n<?rfc compact=\"yes\"?>\r\n<?rfc subcompact=\"no\" ?>\r\n<?rfc linkmailto=\"no\" ?>\r\n<?rfc editing=\"no\" ?>\r\n<?rfc comments=\"yes\" ?>\r\n<?rfc inline=\"yes\"?>\r\n<?rfc rfcedstyle=\"yes\"?>\r\n<?rfc-ext allow-markup-in-artwork=\"yes\" ?>\r\n<?rfc-ext include-index=\"no\" ?>\r\n\r\n<rfc ipr=\"trust200902\"\r\n     category=\"std\"\r\n     docName=\"draft-ietf-httpbis-http2-latest\"\r\n     x:maturity-level=\"proposed\"\r\n     xmlns:x=\"http://purl.org/net/xml2rfc/ext\">\r\n  <x:feedback template=\"mailto:ietf-http-wg@w3.org?subject={docname},%20%22{section}%22&amp;body=&lt;{ref}&gt;:\"/>\r\n  <front>\r\n    <title abbrev=\"HTTP/2\">Hypertext Transfer Protocol version 2</title>\r\n\r\n    <author initials=\"M.\" surname=\"Belshe\" fullname=\"Mike Belshe\">\r\n      <organization>Twist</organization>\r\n      <address>\r\n        <email>mbelshe@chromium.org</email>\r\n      </address>\r\n    </author>\r\n\r\n    <author initials=\"R.\" surname=\"Peon\" fullname=\"Roberto Peon\">\r\n      <organization>Google, Inc</organization>\r\n      <address>\r\n        <email>fenix@google.com</email>\r\n      </address>\r\n    </author>\r\n\r\n    <author initials=\"M.\" surname=\"Thomson\" fullname=\"Martin Thomson\" role=\"editor\">\r\n      <organization>Mozilla</organization>\r\n      <address>\r\n        <postal>\r\n          <street>331 E Evelyn Street</street>\r\n          <city>Mountain View</city>\r\n          <region>CA</region>\r\n          <code>94041</code>\r\n          <country>US</country>\r\n        </postal>\r\n        <email>martin.thomson@gmail.com</email>\r\n      </address>\r\n    </author>\r\n\r\n    <date year=\"2014\" />\r\n    <area>Applications</area>\r\n    <workgroup>HTTPbis</workgroup>\r\n    <keyword>HTTP</keyword>\r\n    <keyword>SPDY</keyword>\r\n    <keyword>Web</keyword>\r\n\r\n    <abstract>\r\n      <t>\r\n        This specification describes an optimized expression of the semantics of the Hypertext\r\n        Transfer Protocol (HTTP). HTTP/2 enables a more efficient use of network resources and a\r\n        reduced perception of latency by introducing header field compression and allowing multiple\r\n        concurrent messages on the same connection. It also introduces unsolicited push of\r\n        representations from servers to clients.\r\n      </t>\r\n      <t>\r\n        This specification is an alternative to, but does not obsolete, the HTTP/1.1 message syntax.\r\n        HTTP's existing semantics remain unchanged.\r\n      </t>\r\n    </abstract>\r\n\r\n    <note title=\"Editorial Note (To be removed by RFC Editor)\">\r\n      <t>\r\n        Discussion of this draft takes place on the HTTPBIS working group mailing list\r\n        (ietf-http-wg@w3.org), which is archived at <eref\r\n        target=\"https://lists.w3.org/Archives/Public/ietf-http-wg/\"/>.\r\n      </t>\r\n      <t>\r\n        Working Group information can be found at <eref\r\n        target=\"https://tools.ietf.org/wg/httpbis/\"/>; that specific to HTTP/2 are at <eref\r\n        target=\"https://http2.github.io/\"/>.\r\n      </t>\r\n      <t>\r\n        The changes in this draft are summarized in <xref\r\n        target=\"change.log\"/>.\r\n      </t>\r\n    </note>\r\n\r\n  </front>\r\n\r\n  <middle>\r\n    <section anchor=\"intro\" title=\"Introduction\">\r\n\r\n      <t>\r\n        The Hypertext Transfer Protocol (HTTP) is a wildly successful protocol. However, the\r\n        HTTP/1.1 message format (<xref target=\"RFC7230\" x:fmt=\",\" x:rel=\"#http.message\"/>) has\r\n        several characteristics that have a negative overall effect on application performance\r\n        today.\r\n      </t>\r\n      <t>\r\n        In particular, HTTP/1.0 allowed only one request to be outstanding at a time on a given\r\n        TCP connection. HTTP/1.1 added request pipelining, but this only partially addressed\r\n        request concurrency and still suffers from head-of-line blocking. Therefore, HTTP/1.1\r\n        clients that need to make many requests typically use multiple connections to a server in\r\n        order to achieve concurrency and thereby reduce latency.\r\n      </t>\r\n      <t>\r\n        Furthermore, HTTP header fields are often repetitive and verbose, causing unnecessary\r\n        network traffic, as well as causing the initial <xref target=\"TCP\">TCP</xref> congestion\r\n        window to quickly fill. This can result in excessive latency when multiple requests are\r\n        made on a new TCP connection.\r\n      </t>\r\n      <t>\r\n        HTTP/2 addresses these issues by defining an optimized mapping of HTTP's semantics to an\r\n        underlying connection. Specifically, it allows interleaving of request and response\r\n        messages on the same connection and uses an efficient coding for HTTP header fields. It\r\n        also allows prioritization of requests, letting more important requests complete more\r\n        quickly, further improving performance.\r\n      </t>\r\n      <t>\r\n        The resulting protocol is more friendly to the network, because fewer TCP connections can\r\n        be used in comparison to HTTP/1.x. This means less competition with other flows, and\r\n        longer-lived connections, which in turn leads to better utilization of available network\r\n        capacity.\r\n      </t>\r\n      <t>\r\n        Finally, HTTP/2 also enables more efficient processing of messages through use of binary\r\n        message framing.\r\n      </t>\r\n    </section>\r\n\r\n    <section anchor=\"Overview\" title=\"HTTP/2 Protocol Overview\">\r\n      <t>\r\n        HTTP/2 provides an optimized transport for HTTP semantics.  HTTP/2 supports all of the core\r\n        features of HTTP/1.1, but aims to be more efficient in several ways.\r\n      </t>\r\n      <t>\r\n        The basic protocol unit in HTTP/2 is a <xref target=\"FrameHeader\">frame</xref>.  Each frame\r\n        type serves a different purpose.  For example, <x:ref>HEADERS</x:ref> and\r\n        <x:ref>DATA</x:ref> frames form the basis of <xref target=\"HttpSequence\">HTTP requests and\r\n        responses</xref>; other frame types like <x:ref>SETTINGS</x:ref>,\r\n        <x:ref>WINDOW_UPDATE</x:ref>, and <x:ref>PUSH_PROMISE</x:ref> are used in support of other\r\n        HTTP/2 features.\r\n      </t>\r\n      <t>\r\n        Multiplexing of requests is achieved by having each HTTP request-response exchange\r\n        associated with its own <xref target=\"StreamsLayer\">stream</xref>. Streams are largely\r\n        independent of each other, so a blocked or stalled request or response does not prevent\r\n        progress on other streams.\r\n      </t>\r\n      <t>\r\n        Flow control and prioritization ensure that it is possible to efficiently use multiplexed\r\n        streams.  <xref target=\"FlowControl\">Flow control</xref> helps to ensure that only data that\r\n        can be used by a receiver is transmitted.  <xref\r\n        target=\"StreamPriority\">Prioritization</xref> ensures that limited resources can be directed\r\n        to the most important streams first.\r\n      </t>\r\n      <t>\r\n        HTTP/2 adds a new interaction mode, whereby a server can <xref target=\"PushResources\">push\r\n        responses to a client</xref>.  Server push allows a server to speculatively send a client\r\n        data that the server anticipates the client will need, trading off some network usage\r\n        against a potential latency gain.  The server does this by synthesizing a request, which it\r\n        sends as a <x:ref>PUSH_PROMISE</x:ref> frame.  The server is then able to send a response to\r\n        the synthetic request on a separate stream.\r\n      </t>\r\n      <t>\r\n        Frames that contain HTTP header fields are <xref target=\"HeaderBlock\">compressed</xref>.\r\n        HTTP requests can be highly redundant, so compression can reduce the size of requests and\r\n        responses significantly.\r\n      </t>\r\n\r\n      <section title=\"Document Organization\">\r\n        <t>\r\n          The HTTP/2 specification is split into four parts:\r\n          <list style=\"symbols\">\r\n            <t>\r\n              <xref target=\"starting\">Starting HTTP/2</xref> covers how an HTTP/2 connection is\r\n              initiated.\r\n            </t>\r\n            <t>\r\n              The <xref target=\"FramingLayer\">framing</xref> and <xref\r\n              target=\"StreamsLayer\">streams</xref> layers describe the way HTTP/2 frames are\r\n              structured and formed into multiplexed streams.\r\n            </t>\r\n            <t>\r\n              <xref target=\"FrameTypes\">Frame</xref> and <xref target=\"ErrorCodes\">error</xref>\r\n              definitions include details of the frame and error types used in HTTP/2.\r\n            </t>\r\n            <t>\r\n              <xref target=\"HTTPLayer\">HTTP mappings</xref> and <xref target=\"HttpExtra\">additional\r\n              requirements</xref> describe how HTTP semantics are expressed using frames and\r\n              streams.\r\n          </t>\r\n          </list>\r\n        </t>\r\n        <t>\r\n          While some of the frame and stream layer concepts are isolated from HTTP, this\r\n          specification does not define a completely generic framing layer. The framing and streams\r\n          layers are tailored to the needs of the HTTP protocol and server push.\r\n        </t>\r\n      </section>\r\n\r\n      <section title=\"Conventions and Terminology\">\r\n        <t>\r\n          The key words \"MUST\", \"MUST NOT\", \"REQUIRED\", \"SHALL\", \"SHALL NOT\", \"SHOULD\", \"SHOULD\r\n          NOT\", \"RECOMMENDED\", \"MAY\", and \"OPTIONAL\" in this document are to be interpreted as\r\n          described in <xref target=\"RFC2119\">RFC 2119</xref>.\r\n        </t>\r\n        <t>\r\n          All numeric values are in network byte order.  Values are unsigned unless otherwise\r\n          indicated.  Literal values are provided in decimal or hexadecimal as appropriate.\r\n          Hexadecimal literals are prefixed with <spanx style=\"verb\">0x</spanx> to distinguish them\r\n          from decimal literals.\r\n        </t>\r\n        <t>\r\n          The following terms are used:\r\n          <list style=\"hanging\">\r\n            <t hangText=\"client:\">\r\n              The endpoint initiating the HTTP/2 connection.\r\n            </t>\r\n            <t hangText=\"connection:\">\r\n              A transport-layer connection between two endpoints.\r\n            </t>\r\n            <t hangText=\"connection error:\">\r\n              An error that affects the entire HTTP/2 connection.\r\n            </t>\r\n            <t hangText=\"endpoint:\">\r\n              Either the client or server of the connection.\r\n            </t>\r\n            <t hangText=\"frame:\">\r\n              The smallest unit of communication within an HTTP/2 connection, consisting of a header\r\n              and a variable-length sequence of octets structured according to the frame type.\r\n            </t>\r\n            <t hangText=\"peer:\">\r\n              An endpoint.  When discussing a particular endpoint, \"peer\" refers to the endpoint\r\n              that is remote to the primary subject of discussion.\r\n            </t>\r\n            <t hangText=\"receiver:\">\r\n              An endpoint that is receiving frames.\r\n            </t>\r\n            <t hangText=\"sender:\">\r\n              An endpoint that is transmitting frames.\r\n            </t>\r\n            <t hangText=\"server:\">\r\n              The endpoint which did not initiate the HTTP/2 connection.\r\n            </t>\r\n            <t hangText=\"stream:\">\r\n              A bi-directional flow of frames across a virtual channel within the HTTP/2 connection.\r\n            </t>\r\n            <t hangText=\"stream error:\">\r\n              An error on the individual HTTP/2 stream.\r\n            </t>\r\n          </list>\r\n        </t>\r\n        <t>\r\n          Finally, the terms \"gateway\", \"intermediary\", \"proxy\", and \"tunnel\" are defined\r\n          in <xref target=\"RFC7230\" x:fmt=\"of\" x:rel=\"#intermediaries\"/>.\r\n        </t>\r\n      </section>\r\n    </section>\r\n\r\n    <section anchor=\"starting\" title=\"Starting HTTP/2\">\r\n      <t>\r\n        An HTTP/2 connection is an application layer protocol running on top of a TCP connection\r\n        (<xref target=\"TCP\"/>). The client is the TCP connection initiator.\r\n      </t>\r\n      <t>\r\n        HTTP/2 uses the same \"http\" and \"https\" URI schemes used by HTTP/1.1. HTTP/2 shares the same\r\n        default port numbers: 80 for \"http\" URIs and 443 for \"https\" URIs.  As a result,\r\n        implementations processing requests for target resource URIs like <spanx\r\n        style=\"verb\">http://example.org/foo</spanx> or <spanx\r\n        style=\"verb\">https://example.com/bar</spanx> are required to first discover whether the\r\n        upstream server (the immediate peer to which the client wishes to establish a connection)\r\n        supports HTTP/2.\r\n      </t>\r\n\r\n      <t>\r\n        The means by which support for HTTP/2 is determined is different for \"http\" and \"https\"\r\n        URIs. Discovery for \"http\" URIs is described in <xref target=\"discover-http\"/>.  Discovery\r\n        for \"https\" URIs is described in <xref target=\"discover-https\"/>.\r\n      </t>\r\n\r\n      <section anchor=\"versioning\" title=\"HTTP/2 Version Identification\">\r\n        <t>\r\n          The protocol defined in this document has two identifiers.\r\n          <list style=\"symbols\">\r\n            <x:lt>\r\n              <t>\r\n                The string \"h2\" identifies the protocol where HTTP/2 uses <xref\r\n                target=\"TLS12\">TLS</xref>.  This identifier is used in the <xref\r\n                target=\"TLS-ALPN\">TLS application layer protocol negotiation extension (ALPN)</xref>\r\n                field and any place that HTTP/2 over TLS is identified.\r\n              </t>\r\n              <t>\r\n                The \"h2\" string is serialized into an ALPN protocol identifier as the two octet\r\n                sequence: 0x68, 0x32.\r\n              </t>\r\n            </x:lt>\r\n            <x:lt>\r\n              <t>\r\n                The string \"h2c\" identifies the protocol where HTTP/2 is run over cleartext TCP.\r\n                This identifier is used in the HTTP/1.1 Upgrade header field and any place that\r\n                HTTP/2 over TCP is identified.\r\n              </t>\r\n            </x:lt>\r\n          </list>\r\n        </t>\r\n        <t>\r\n          Negotiating \"h2\" or \"h2c\" implies the use of the transport, security, framing and message\r\n          semantics described in this document.\r\n        </t>\r\n        <t>\r\n          <cref>RFC Editor's Note: please remove the remainder of this section prior to the\r\n          publication of a final version of this document.</cref>\r\n        </t>\r\n        <t>\r\n          Only implementations of the final, published RFC can identify themselves as \"h2\" or \"h2c\".\r\n          Until such an RFC exists, implementations MUST NOT identify themselves using these\r\n          strings.\r\n        </t>\r\n        <t>\r\n          Examples and text throughout the rest of this document use \"h2\" as a matter of\r\n          editorial convenience only.  Implementations of draft versions MUST NOT identify using\r\n          this string.\r\n        </t>\r\n        <t>\r\n          Implementations of draft versions of the protocol MUST add the string \"-\" and the\r\n          corresponding draft number to the identifier. For example, draft-ietf-httpbis-http2-11\r\n          over TLS is identified using the string \"h2-11\".\r\n        </t>\r\n        <t>\r\n          Non-compatible experiments that are based on these draft versions MUST append the string\r\n          \"-\" and an experiment name to the identifier.  For example, an experimental implementation\r\n          of packet mood-based encoding based on draft-ietf-httpbis-http2-09 might identify itself\r\n          as \"h2-09-emo\".  Note that any label MUST conform to the \"token\" syntax defined in\r\n          <xref target=\"RFC7230\" x:fmt=\"of\" x:rel=\"#field.components\"/>.  Experimenters are\r\n          encouraged to coordinate their experiments on the ietf-http-wg@w3.org mailing list.\r\n        </t>\r\n      </section>\r\n\r\n      <section anchor=\"discover-http\" title=\"Starting HTTP/2 for &quot;http&quot; URIs\">\r\n        <t>\r\n          A client that makes a request for an \"http\" URI without prior knowledge about support for\r\n          HTTP/2 uses the HTTP Upgrade mechanism (<xref target=\"RFC7230\" x:fmt=\"of\"\r\n          x:rel=\"#header.upgrade\"/>).  The client makes an HTTP/1.1 request that includes an Upgrade\r\n          header field identifying HTTP/2 with the \"h2c\" token.  The HTTP/1.1 request MUST include\r\n          exactly one <xref target=\"Http2SettingsHeader\">HTTP2-Settings</xref> header field.\r\n        </t>\r\n        <figure>\r\n          <preamble>For example:</preamble>\r\n          <artwork type=\"message/http; msgtype=&#34;request&#34;\" x:indent-with=\"  \"><![CDATA[\r\nGET / HTTP/1.1\r\nHost: server.example.com\r\nConnection: Upgrade, HTTP2-Settings\r\nUpgrade: h2c\r\nHTTP2-Settings: <base64url encoding of HTTP/2 SETTINGS payload>\r\n\r\n]]></artwork>\r\n        </figure>\r\n        <t>\r\n          Requests that contain an entity body MUST be sent in their entirety before the client can\r\n          send HTTP/2 frames.  This means that a large request entity can block the use of the\r\n          connection until it is completely sent.\r\n        </t>\r\n        <t>\r\n          If concurrency of an initial request with subsequent requests is important, an OPTIONS\r\n          request can be used to perform the upgrade to HTTP/2, at the cost of an additional\r\n          round-trip.\r\n        </t>\r\n        <t>\r\n          A server that does not support HTTP/2 can respond to the request as though the Upgrade\r\n          header field were absent:\r\n        </t>\r\n        <figure>\r\n          <artwork type=\"message/http; msgtype=&#34;response&#34;\" x:indent-with=\"  \">\r\nHTTP/1.1 200 OK\r\nContent-Length: 243\r\nContent-Type: text/html\r\n\r\n...\r\n</artwork>\r\n        </figure>\r\n        <t>\r\n          A server MUST ignore a \"h2\" token in an Upgrade header field.  Presence of a token with\r\n          \"h2\" implies HTTP/2 over TLS, which is instead negotiated as described in <xref\r\n          target=\"discover-https\"/>.\r\n        </t>\r\n        <t>\r\n          A server that supports HTTP/2 can accept the upgrade with a 101 (Switching Protocols)\r\n          response.  After the empty line that terminates the 101 response, the server can begin\r\n          sending HTTP/2 frames.  These frames MUST include a response to the request that initiated\r\n          the Upgrade.\r\n        </t>\r\n\r\n        <figure>\r\n          <preamble>\r\n            For example:\r\n          </preamble>\r\n          <artwork type=\"message/http; msgtype=&#34;response&#34;\" x:indent-with=\"  \">\r\nHTTP/1.1 101 Switching Protocols\r\nConnection: Upgrade\r\nUpgrade: h2c\r\n\r\n[ HTTP/2 connection ...\r\n</artwork>\r\n        </figure>\r\n        <t>\r\n          The first HTTP/2 frame sent by the server is a <x:ref>SETTINGS</x:ref> frame (<xref\r\n          target=\"SETTINGS\"/>) as the server connection preface (<xref\r\n          target=\"ConnectionHeader\"/>). Upon receiving the 101 response, the client sends a <xref\r\n          target=\"ConnectionHeader\">connection preface</xref>, which includes a\r\n          <x:ref>SETTINGS</x:ref> frame.\r\n        </t>\r\n        <t>\r\n          The HTTP/1.1 request that is sent prior to upgrade is assigned stream identifier 1 and is\r\n          assigned <xref target=\"pri-default\">default priority values</xref>.  Stream 1 is\r\n          implicitly half closed from the client toward the server, since the request is completed\r\n          as an HTTP/1.1 request.  After commencing the HTTP/2 connection, stream 1 is used for the\r\n          response.\r\n        </t>\r\n\r\n        <section anchor=\"Http2SettingsHeader\" title=\"HTTP2-Settings Header Field\">\r\n          <t>\r\n            A request that upgrades from HTTP/1.1 to HTTP/2 MUST include exactly one <spanx\r\n            style=\"verb\">HTTP2-Settings</spanx> header field.  The <spanx\r\n            style=\"verb\">HTTP2-Settings</spanx> header field is a connection-specific header field\r\n            that includes parameters that govern the HTTP/2 connection, provided in anticipation of\r\n            the server accepting the request to upgrade.\r\n          </t>\r\n          <figure>\r\n            <artwork type=\"abnf\" x:indent-with=\"  \"><![CDATA[\r\nHTTP2-Settings    = token68\r\n]]></artwork>\r\n          </figure>\r\n          <t>\r\n            A server MUST NOT upgrade the connection to HTTP/2 if this header field is not present,\r\n            or if more than one is present. A server MUST NOT send this header field.\r\n          </t>\r\n\r\n          <t>\r\n            The content of the <spanx style=\"verb\">HTTP2-Settings</spanx> header field is the\r\n            payload of a <x:ref>SETTINGS</x:ref> frame (<xref target=\"SETTINGS\"/>), encoded as a\r\n            base64url string (that is, the URL- and filename-safe Base64 encoding described in <xref\r\n            target=\"RFC4648\" x:fmt=\"of\" x:sec=\"5\"/>, with any trailing '=' characters omitted).  The\r\n            <xref target=\"RFC5234\">ABNF</xref> production for <spanx style=\"verb\">token68</spanx> is\r\n            defined in <xref target=\"RFC7235\" x:fmt=\"of\" x:rel=\"#challenge.and.response\"/>.\r\n          </t>\r\n          <t>\r\n            Since the upgrade is only intended to apply to the immediate connection, a client\r\n            sending <spanx style=\"verb\">HTTP2-Settings</spanx> MUST also send <spanx\r\n            style=\"verb\">HTTP2-Settings</spanx> as a connection option in the <spanx\r\n            style=\"verb\">Connection</spanx> header field to prevent it from being forwarded\r\n            downstream.\r\n          </t>\r\n          <t>\r\n            A server decodes and interprets these values as it would any other\r\n            <x:ref>SETTINGS</x:ref> frame.  <xref target=\"SettingsSync\">Acknowledgement of the\r\n            SETTINGS parameters</xref> is not necessary, since a 101 response serves as implicit\r\n            acknowledgment.  Providing these values in the Upgrade request gives a client an\r\n            opportunity to provide parameters prior to receiving any frames from the server.\r\n          </t>\r\n        </section>\r\n      </section>\r\n\r\n      <section anchor=\"discover-https\" title=\"Starting HTTP/2 for &quot;https&quot; URIs\">\r\n        <t>\r\n          A client that makes a request to an \"https\" URI uses <xref target=\"TLS12\">TLS</xref>\r\n          with the <xref target=\"TLS-ALPN\">application layer protocol negotiation extension</xref>.\r\n        </t>\r\n        <t>\r\n          HTTP/2 over TLS uses the \"h2\" application token.  The \"h2c\" token MUST NOT be sent by a\r\n          client or selected by a server.\r\n        </t>\r\n        <t>\r\n          Once TLS negotiation is complete, both the client and the server send a <xref\r\n          target=\"ConnectionHeader\">connection preface</xref>.\r\n        </t>\r\n      </section>\r\n\r\n      <section anchor=\"known-http\" title=\"Starting HTTP/2 with Prior Knowledge\">\r\n        <t>\r\n          A client can learn that a particular server supports HTTP/2 by other means.  For example,\r\n          <xref target=\"ALT-SVC\"/> describes a mechanism for advertising this capability.\r\n        </t>\r\n        <t>\r\n          A client MAY immediately send HTTP/2 frames to a server that is known to support HTTP/2,\r\n          after the <xref target=\"ConnectionHeader\">connection preface</xref>; a server can\r\n          identify such a connection by the presence of the connection preface. This only affects\r\n          the establishment of HTTP/2 connections over cleartext TCP; implementations that support\r\n          HTTP/2 over TLS MUST use <xref target=\"TLS-ALPN\">protocol negotiation in TLS</xref>.\r\n        </t>\r\n        <t>\r\n          Without additional information, prior support for HTTP/2 is not a strong signal that a\r\n          given server will support HTTP/2 for future connections. For example, it is possible for\r\n          server configurations to change, for configurations to differ between instances in\r\n          clustered servers, or for network conditions to change.\r\n        </t>\r\n      </section>\r\n\r\n      <section anchor=\"ConnectionHeader\" title=\"HTTP/2 Connection Preface\">\r\n        <t>\r\n          Upon establishment of a TCP connection and determination that HTTP/2 will be used by both\r\n          peers, each endpoint MUST send a connection preface as a final confirmation and to\r\n          establish the initial SETTINGS parameters for the HTTP/2 connection.  The client and\r\n          server each send a different connection preface.\r\n        </t>\r\n        <t>\r\n          The client connection preface starts with a sequence of 24 octets, which in hex notation\r\n          are:\r\n        </t>\r\n        <figure>\r\n          <artwork type=\"inline\" x:indent-with=\"  \"><![CDATA[\r\n0x505249202a20485454502f322e300d0a0d0a534d0d0a0d0a\r\n]]></artwork>\r\n        </figure>\r\n        <t>\r\n          (the string <spanx style=\"verb\">PRI * HTTP/2.0\\r\\n\\r\\nSM\\r\\n\\r\\n</spanx>).  This sequence\r\n          is followed by a <x:ref>SETTINGS</x:ref> frame (<xref target=\"SETTINGS\"/>).  The\r\n          <x:ref>SETTINGS</x:ref> frame MAY be empty.  The client sends the client connection\r\n          preface immediately upon receipt of a 101 Switching Protocols response (indicating a\r\n          successful upgrade), or as the first application data octets of a TLS connection. If\r\n          starting an HTTP/2 connection with prior knowledge of server support for the protocol, the\r\n          client connection preface is sent upon connection establishment.\r\n        </t>\r\n        <t>\r\n          <list>\r\n            <t>\r\n              The client connection preface is selected so that a large proportion of HTTP/1.1 or\r\n              HTTP/1.0 servers and intermediaries do not attempt to process further frames.  Note\r\n              that this does not address the concerns raised in <xref target=\"TALKING\"/>.\r\n            </t>\r\n          </list>\r\n        </t>\r\n        <t>\r\n          The server connection preface consists of a potentially empty <x:ref>SETTINGS</x:ref>\r\n          frame (<xref target=\"SETTINGS\"/>) that MUST be the first frame the server sends in the\r\n          HTTP/2 connection.\r\n        </t>\r\n        <t>\r\n          The <x:ref>SETTINGS</x:ref> frames received from a peer as part of the connection preface\r\n          MUST be acknowledged (see <xref target=\"SettingsSync\"/>) after sending the connection\r\n          preface.\r\n        </t>\r\n        <t>\r\n          To avoid unnecessary latency, clients are permitted to send additional frames to the\r\n          server immediately after sending the client connection preface, without waiting to receive\r\n          the server connection preface.  It is important to note, however, that the server\r\n          connection preface <x:ref>SETTINGS</x:ref> frame might include parameters that necessarily\r\n          alter how a client is expected to communicate with the server. Upon receiving the\r\n          <x:ref>SETTINGS</x:ref> frame, the client is expected to honor any parameters established.\r\n          In some configurations, it is possible for the server to transmit <x:ref>SETTINGS</x:ref>\r\n          before the client sends additional frames, providing an opportunity to avoid this issue.\r\n        </t>\r\n        <t>\r\n          Clients and servers MUST treat an invalid connection preface as a <xref\r\n          target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>.  A <x:ref>GOAWAY</x:ref> frame (<xref target=\"GOAWAY\"/>)\r\n          MAY be omitted in this case, since an invalid preface indicates that the peer is not using\r\n          HTTP/2.\r\n        </t>\r\n      </section>\r\n    </section>\r\n\r\n    <section anchor=\"FramingLayer\" title=\"HTTP Frames\">\r\n      <t>\r\n        Once the HTTP/2 connection is established, endpoints can begin exchanging frames.\r\n      </t>\r\n\r\n      <section anchor=\"FrameHeader\" title=\"Frame Format\">\r\n        <t>\r\n          All frames begin with a fixed 9-octet header followed by a variable-length payload.\r\n        </t>\r\n        <figure title=\"Frame Layout\">\r\n          <artwork type=\"inline\"><![CDATA[\r\n  0                   1                   2                   3\r\n  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\r\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\r\n |                 Length (24)                   |\r\n +---------------+---------------+---------------+\r\n |   Type (8)    |   Flags (8)   |\r\n +-+-+-----------+---------------+-------------------------------+\r\n |R|                 Stream Identifier (31)                      |\r\n +=+=============================================================+\r\n |                   Frame Payload (0...)                      ...\r\n +---------------------------------------------------------------+\r\n]]></artwork>\r\n        </figure>\r\n        <t>\r\n          The fields of the frame header are defined as:\r\n          <list style=\"hanging\">\r\n            <x:lt hangText=\"Length:\">\r\n              <t>\r\n                The length of the frame payload expressed as an unsigned 24-bit integer.  Values\r\n                greater than 2<x:sup>14</x:sup> (16,384) MUST NOT be sent unless the receiver has\r\n                set a larger value for <x:ref>SETTINGS_MAX_FRAME_SIZE</x:ref>.\r\n              </t>\r\n              <t>\r\n                The 9 octets of the frame header are not included in this value.\r\n              </t>\r\n            </x:lt>\r\n            <x:lt hangText=\"Type:\">\r\n              <t>\r\n                The 8-bit type of the frame.  The frame type determines the format and semantics of\r\n                the frame.  Implementations MUST ignore and discard any frame that has a type that\r\n                is unknown.\r\n              </t>\r\n            </x:lt>\r\n            <x:lt hangText=\"Flags:\">\r\n              <t>\r\n                An 8-bit field reserved for frame-type specific boolean flags.\r\n              </t>\r\n              <t>\r\n                Flags are assigned semantics specific to the indicated frame type.  Flags that have\r\n                no defined semantics for a particular frame type MUST be ignored, and MUST be left\r\n                unset (0) when sending.\r\n              </t>\r\n            </x:lt>\r\n            <x:lt hangText=\"R:\">\r\n              <t>\r\n                A reserved 1-bit field.  The semantics of this bit are undefined and the bit MUST\r\n                remain unset (0) when sending and MUST be ignored when receiving.\r\n              </t>\r\n            </x:lt>\r\n            <x:lt hangText=\"Stream Identifier:\">\r\n              <t>\r\n                A 31-bit stream identifier (see <xref target=\"StreamIdentifiers\"/>).  The value 0 is\r\n                reserved for frames that are associated with the connection as a whole as opposed to\r\n                an individual stream.\r\n              </t>\r\n            </x:lt>\r\n          </list>\r\n        </t>\r\n        <t>\r\n          The structure and content of the frame payload is dependent entirely on the frame type.\r\n        </t>\r\n      </section>\r\n\r\n      <section anchor=\"FrameSize\" title=\"Frame Size\">\r\n        <t>\r\n          The size of a frame payload is limited by the maximum size that a receiver advertises in\r\n          the <x:ref>SETTINGS_MAX_FRAME_SIZE</x:ref> setting.  This setting can have any value\r\n          between 2<x:sup>14</x:sup> (16,384) and 2<x:sup>24</x:sup>-1 (16,777,215) octets,\r\n          inclusive.\r\n        </t>\r\n        <t>\r\n          All implementations MUST be capable of receiving and minimally processing frames up to\r\n          2<x:sup>14</x:sup> octets in length, plus the 9 octet <xref target=\"FrameHeader\">frame\r\n          header</xref>.  The size of the frame header is not included when describing frame sizes.\r\n          <list style=\"hanging\">\r\n            <t hangText=\"Note:\">\r\n              Certain frame types, such as <xref target=\"PING\">PING</xref>, impose additional limits\r\n              on the amount of payload data allowed.\r\n            </t>\r\n          </list>\r\n        </t>\r\n        <t>\r\n          If a frame size exceeds any defined limit, or is too small to contain mandatory frame\r\n          data, the endpoint MUST send a <x:ref>FRAME_SIZE_ERROR</x:ref> error. A frame size error\r\n          in a frame that could alter the state of the entire connection MUST be treated as a <xref\r\n          target=\"ConnectionErrorHandler\">connection error</xref>; this includes any frame carrying\r\n          a <xref target=\"HeaderBlock\">header block</xref> (that is, <x:ref>HEADERS</x:ref>,\r\n          <x:ref>PUSH_PROMISE</x:ref>, and <x:ref>CONTINUATION</x:ref>), <x:ref>SETTINGS</x:ref>,\r\n          and any <x:ref>WINDOW_UPDATE</x:ref> frame with a stream identifier of 0.\r\n        </t>\r\n        <t>\r\n          Endpoints are not obligated to use all available space in a frame. Responsiveness can be\r\n          improved by using frames that are smaller than the permitted maximum size. Sending large\r\n          frames can result in delays in sending time-sensitive frames (such\r\n          <x:ref>RST_STREAM</x:ref>, <x:ref>WINDOW_UPDATE</x:ref>, or <x:ref>PRIORITY</x:ref>)\r\n          which if blocked by the transmission of a large frame, could affect performance.\r\n        </t>\r\n      </section>\r\n\r\n      <section anchor=\"HeaderBlock\" title=\"Header Compression and Decompression\">\r\n        <t>\r\n          Just as in HTTP/1, a header field in HTTP/2 is a name with one or more associated values.\r\n          They are used within HTTP request and response messages as well as server push operations\r\n          (see <xref target=\"PushResources\" />).\r\n        </t>\r\n        <t>\r\n          Header lists are collections of zero or more header fields.  When transmitted over a\r\n          connection, a header list is serialized into a header block using <xref\r\n          target=\"COMPRESSION\">HTTP Header Compression</xref>.  The serialized header block is then\r\n          divided into one or more octet sequences, called header block fragments, and transmitted\r\n          within the payload of <xref target=\"HEADERS\">HEADERS</xref>, <xref\r\n          target=\"PUSH_PROMISE\">PUSH_PROMISE</xref> or <xref\r\n          target=\"CONTINUATION\">CONTINUATION</xref> frames.\r\n        </t>\r\n        <t>\r\n          The <xref target=\"COOKIE\">Cookie header field</xref> is treated specially by the HTTP\r\n          mapping (see <xref target=\"CompressCookie\"/>).\r\n        </t>\r\n        <t>\r\n          A receiving endpoint reassembles the header block by concatenating its fragments, then\r\n          decompresses the block to reconstruct the header list.\r\n        </t>\r\n        <t>\r\n          A complete header block consists of either:\r\n          <list style=\"symbols\">\r\n            <t>\r\n              a single <x:ref>HEADERS</x:ref> or <x:ref>PUSH_PROMISE</x:ref> frame,\r\n              with the END_HEADERS flag set, or\r\n            </t>\r\n            <t>\r\n              a <x:ref>HEADERS</x:ref> or <x:ref>PUSH_PROMISE</x:ref> frame with the END_HEADERS\r\n              flag cleared and one or more <x:ref>CONTINUATION</x:ref> frames,\r\n              where the last <x:ref>CONTINUATION</x:ref> frame has the END_HEADERS flag set.\r\n            </t>\r\n          </list>\r\n        </t>\r\n        <t>\r\n          Header compression is stateful.  One compression context and one decompression context is\r\n          used for the entire connection.  Each header block is processed as a discrete unit.\r\n          Header blocks MUST be transmitted as a contiguous sequence of frames, with no interleaved\r\n          frames of any other type or from any other stream.  The last frame in a sequence of\r\n          <x:ref>HEADERS</x:ref> or <x:ref>CONTINUATION</x:ref> frames MUST have the END_HEADERS\r\n          flag set.  The last frame in a sequence of <x:ref>PUSH_PROMISE</x:ref> or\r\n          <x:ref>CONTINUATION</x:ref> frames MUST have the END_HEADERS flag set.  This allows a\r\n          header block to be logically equivalent to a single frame.\r\n        </t>\r\n        <t>\r\n          Header block fragments can only be sent as the payload of <x:ref>HEADERS</x:ref>,\r\n          <x:ref>PUSH_PROMISE</x:ref> or <x:ref>CONTINUATION</x:ref> frames, because these frames\r\n          carry data that can modify the compression context maintained by a receiver.  An endpoint\r\n          receiving <x:ref>HEADERS</x:ref>, <x:ref>PUSH_PROMISE</x:ref> or\r\n          <x:ref>CONTINUATION</x:ref> frames MUST reassemble header blocks and perform decompression\r\n          even if the frames are to be discarded.  A receiver MUST terminate the connection with a\r\n          <xref target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>COMPRESSION_ERROR</x:ref> if it does not decompress a header block.\r\n        </t>\r\n      </section>\r\n    </section>\r\n\r\n    <section anchor=\"StreamsLayer\" title=\"Streams and Multiplexing\">\r\n      <t>\r\n        A \"stream\" is an independent, bi-directional sequence of frames exchanged between the client\r\n        and server within an HTTP/2 connection.  Streams have several important characteristics:\r\n        <list style=\"symbols\">\r\n          <t>\r\n            A single HTTP/2 connection can contain multiple concurrently open streams, with either\r\n            endpoint interleaving frames from multiple streams.\r\n          </t>\r\n          <t>\r\n            Streams can be established and used unilaterally or shared by either the client or\r\n            server.\r\n          </t>\r\n          <t>\r\n            Streams can be closed by either endpoint.\r\n          </t>\r\n          <t>\r\n            The order in which frames are sent on a stream is significant. Recipients process frames\r\n            in the order they are received.  In particular, the order of <x:ref>HEADERS</x:ref>,\r\n            and <x:ref>DATA</x:ref> frames is semantically significant.\r\n          </t>\r\n          <t>\r\n            Streams are identified by an integer.  Stream identifiers are assigned to streams by the\r\n            endpoint initiating the stream.\r\n          </t>\r\n        </list>\r\n      </t>\r\n\r\n      <section anchor=\"StreamStates\" title=\"Stream States\">\r\n        <t>\r\n          The lifecycle of a stream is shown in <xref target=\"StreamStatesFigure\"/>.\r\n        </t>\r\n\r\n        <figure anchor=\"StreamStatesFigure\" title=\"Stream States\">\r\n          <artwork type=\"drawing\">\r\n            <![CDATA[\r\n                           +--------+\r\n                     PP    |        |    PP\r\n                  ,--------|  idle  |--------.\r\n                 /         |        |         \\\r\n                v          +--------+          v\r\n         +----------+          |           +----------+\r\n         |          |          | H         |          |\r\n     ,---| reserved |          |           | reserved |---.\r\n     |   | (local)  |          v           | (remote) |   |\r\n     |   +----------+      +--------+      +----------+   |\r\n     |      |          ES  |        |  ES          |      |\r\n     |      | H    ,-------|  open  |-------.      | H    |\r\n     |      |     /        |        |        \\     |      |\r\n     |      v    v         +--------+         v    v      |\r\n     |   +----------+          |           +----------+   |\r\n     |   |   half   |          |           |   half   |   |\r\n     |   |  closed  |          | R         |  closed  |   |\r\n     |   | (remote) |          |           | (local)  |   |\r\n     |   +----------+          |           +----------+   |\r\n     |        |                v                 |        |\r\n     |        |  ES / R    +--------+  ES / R    |        |\r\n     |        `----------->|        |<-----------'        |\r\n     |  R                  | closed |                  R  |\r\n     `-------------------->|        |<--------------------'\r\n                           +--------+\r\n\r\n       H:  HEADERS frame (with implied CONTINUATIONs)\r\n       PP: PUSH_PROMISE frame (with implied CONTINUATIONs)\r\n       ES: END_STREAM flag\r\n       R:  RST_STREAM frame\r\n]]>\r\n          </artwork>\r\n        </figure>\r\n\r\n        <t>\r\n          Note that this diagram shows stream state transitions and the frames and flags that affect\r\n          those transitions only.  In this regard, <x:ref>CONTINUATION</x:ref> frames do not result\r\n          in state transitions; they are effectively part of the <x:ref>HEADERS</x:ref> or\r\n          <x:ref>PUSH_PROMISE</x:ref> that they follow.  For this purpose, the END_STREAM flag is\r\n          processed as a separate event to the frame that bears it; a <x:ref>HEADERS</x:ref> frame\r\n          with the END_STREAM flag set can cause two state transitions.\r\n        </t>\r\n        <t>\r\n          Both endpoints have a subjective view of the state of a stream that could be different\r\n          when frames are in transit.  Endpoints do not coordinate the creation of streams; they are\r\n          created unilaterally by either endpoint.  The negative consequences of a mismatch in\r\n          states are limited to the \"closed\" state after sending <x:ref>RST_STREAM</x:ref>, where\r\n          frames might be received for some time after closing.\r\n        </t>\r\n        <t>\r\n          Streams have the following states:\r\n          <list style=\"hanging\">\r\n\r\n            <x:lt hangText=\"idle:\">\r\n              <t>\r\n                <vspace blankLines=\"0\"/>\r\n                All streams start in the \"idle\" state.  In this state, no frames have been\r\n                exchanged.\r\n              </t>\r\n              <t>\r\n                The following transitions are valid from this state:\r\n                <list style=\"symbols\">\r\n                  <t>\r\n                    Sending or receiving a <x:ref>HEADERS</x:ref> frame causes the stream to become\r\n                    \"open\".  The stream identifier is selected as described in <xref\r\n                    target=\"StreamIdentifiers\"/>.  The same <x:ref>HEADERS</x:ref> frame can also\r\n                    cause a stream to immediately become \"half closed\".\r\n                  </t>\r\n                  <t>\r\n                    Sending a <x:ref>PUSH_PROMISE</x:ref> frame marks the associated stream for\r\n                    later use.  The stream state for the reserved stream transitions to \"reserved\r\n                    (local)\".\r\n                  </t>\r\n                  <t>\r\n                    Receiving a <x:ref>PUSH_PROMISE</x:ref> frame marks the associated stream as\r\n                    reserved by the remote peer.  The state of the stream becomes \"reserved\r\n                    (remote)\".\r\n                  </t>\r\n                </list>\r\n              </t>\r\n              <t>\r\n                Receiving any frames other than <x:ref>HEADERS</x:ref> or\r\n                <x:ref>PUSH_PROMISE</x:ref> on a stream in this state MUST be treated as a <xref\r\n                target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n                <x:ref>PROTOCOL_ERROR</x:ref>.\r\n              </t>\r\n            </x:lt>\r\n\r\n            <x:lt hangText=\"reserved (local):\">\r\n              <t>\r\n                <vspace blankLines=\"0\"/>\r\n                A stream in the \"reserved (local)\" state is one that has been promised by sending a\r\n                <x:ref>PUSH_PROMISE</x:ref> frame.  A <x:ref>PUSH_PROMISE</x:ref> frame reserves an\r\n                idle stream by associating the stream with an open stream that was initiated by the\r\n                remote peer (see <xref target=\"PushResources\"/>).\r\n              </t>\r\n              <t>\r\n                In this state, only the following transitions are possible:\r\n                <list style=\"symbols\">\r\n                  <t>\r\n                    The endpoint can send a <x:ref>HEADERS</x:ref> frame.  This causes the stream to\r\n                    open in a \"half closed (remote)\" state.\r\n                  </t>\r\n                  <t>\r\n                    Either endpoint can send a <x:ref>RST_STREAM</x:ref> frame to cause the stream\r\n                    to become \"closed\".  This releases the stream reservation.\r\n                  </t>\r\n                </list>\r\n              </t>\r\n              <t>\r\n                An endpoint MUST NOT send any type of frame other than <x:ref>HEADERS</x:ref> or\r\n                <x:ref>RST_STREAM</x:ref> in this state.\r\n              </t>\r\n              <t>\r\n                A <x:ref>PRIORITY</x:ref> frame MAY be received in this state.  Receiving any type\r\n                of frame other than <x:ref>RST_STREAM</x:ref> or <x:ref>PRIORITY</x:ref> on a stream\r\n                in this state MUST be treated as a <xref target=\"ConnectionErrorHandler\">connection\r\n                error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.\r\n              </t>\r\n            </x:lt>\r\n\r\n            <x:lt hangText=\"reserved (remote):\">\r\n              <t>\r\n                <vspace blankLines=\"0\"/>\r\n                A stream in the \"reserved (remote)\" state has been reserved by a remote peer.\r\n              </t>\r\n              <t>\r\n                In this state, only the following transitions are possible:\r\n                <list style=\"symbols\">\r\n                  <t>\r\n                    Receiving a <x:ref>HEADERS</x:ref> frame causes the stream to transition to\r\n                    \"half closed (local)\".\r\n                  </t>\r\n                  <t>\r\n                    Either endpoint can send a <x:ref>RST_STREAM</x:ref> frame to cause the stream\r\n                    to become \"closed\".  This releases the stream reservation.\r\n                  </t>\r\n                </list>\r\n              </t>\r\n              <t>\r\n                An endpoint MAY send a <x:ref>PRIORITY</x:ref> frame in this state to reprioritize\r\n                the reserved stream.  An endpoint MUST NOT send any type of frame other than\r\n                <x:ref>RST_STREAM</x:ref>, <x:ref>WINDOW_UPDATE</x:ref>, or <x:ref>PRIORITY</x:ref>\r\n                in this state.\r\n              </t>\r\n              <t>\r\n                Receiving any type of frame other than <x:ref>HEADERS</x:ref> or\r\n                <x:ref>RST_STREAM</x:ref> on a stream in this state MUST be treated as a <xref\r\n                target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n                <x:ref>PROTOCOL_ERROR</x:ref>.\r\n              </t>\r\n            </x:lt>\r\n\r\n            <x:lt hangText=\"open:\">\r\n              <t>\r\n                <vspace blankLines=\"0\"/>\r\n                A stream in the \"open\" state may be used by both peers to send frames of any type.\r\n                In this state, sending peers observe advertised <xref target=\"FlowControl\">stream\r\n                level flow control limits</xref>.\r\n              </t>\r\n              <t>\r\n                From this state either endpoint can send a frame with an END_STREAM flag set, which\r\n                causes the stream to transition into one of the \"half closed\" states: an endpoint\r\n                sending an END_STREAM flag causes the stream state to become \"half closed (local)\";\r\n                an endpoint receiving an END_STREAM flag causes the stream state to become \"half\r\n                closed (remote)\".\r\n              </t>\r\n              <t>\r\n                Either endpoint can send a <x:ref>RST_STREAM</x:ref> frame from this state, causing\r\n                it to transition immediately to \"closed\".\r\n              </t>\r\n            </x:lt>\r\n\r\n            <x:lt hangText=\"half closed (local):\">\r\n              <t>\r\n                <vspace blankLines=\"0\"/>\r\n                A stream that is in the \"half closed (local)\" state cannot be used for sending\r\n                frames.  Only <x:ref>WINDOW_UPDATE</x:ref>, <x:ref>PRIORITY</x:ref> and\r\n                <x:ref>RST_STREAM</x:ref> frames can be sent in this state.\r\n              </t>\r\n              <t>\r\n                A stream transitions from this state to \"closed\" when a frame that contains an\r\n                END_STREAM flag is received, or when either peer sends a <x:ref>RST_STREAM</x:ref>\r\n                frame.\r\n              </t>\r\n              <t>\r\n                A receiver can ignore <x:ref>WINDOW_UPDATE</x:ref> frames in this state, which might\r\n                arrive for a short period after a frame bearing the END_STREAM flag is sent.\r\n              </t>\r\n              <t>\r\n                <x:ref>PRIORITY</x:ref> frames received in this state are used to reprioritize\r\n                streams that depend on the current stream.\r\n              </t>\r\n            </x:lt>\r\n\r\n            <x:lt hangText=\"half closed (remote):\">\r\n              <t>\r\n                <vspace blankLines=\"0\"/>\r\n                A stream that is \"half closed (remote)\" is no longer being used by the peer to send\r\n                frames.  In this state, an endpoint is no longer obligated to maintain a receiver\r\n                flow control window if it performs flow control.\r\n              </t>\r\n              <t>\r\n                If an endpoint receives additional frames for a stream that is in this state, other\r\n                than <x:ref>WINDOW_UPDATE</x:ref>, <x:ref>PRIORITY</x:ref> or\r\n                <x:ref>RST_STREAM</x:ref>, it MUST respond with a <xref\r\n                target=\"StreamErrorHandler\">stream error</xref> of type\r\n                <x:ref>STREAM_CLOSED</x:ref>.\r\n              </t>\r\n              <t>\r\n                A stream that is \"half closed (remote)\" can be used by the endpoint to send frames\r\n                of any type. In this state, the endpoint continues to observe advertised <xref\r\n                target=\"FlowControl\">stream level flow control limits</xref>.\r\n              </t>\r\n              <t>\r\n                A stream can transition from this state to \"closed\" by sending a frame that contains\r\n                an END_STREAM flag, or when either peer sends a <x:ref>RST_STREAM</x:ref> frame.\r\n              </t>\r\n            </x:lt>\r\n\r\n            <x:lt hangText=\"closed:\">\r\n              <t>\r\n                <vspace blankLines=\"0\"/>\r\n                The \"closed\" state is the terminal state.\r\n              </t>\r\n              <t>\r\n                An endpoint MUST NOT send frames other than <x:ref>PRIORITY</x:ref> on a closed\r\n                stream.  An endpoint that receives any frame other than <x:ref>PRIORITY</x:ref>\r\n                after receiving a <x:ref>RST_STREAM</x:ref> MUST treat that as a <xref\r\n                target=\"StreamErrorHandler\">stream error</xref> of type\r\n                <x:ref>STREAM_CLOSED</x:ref>.  Similarly, an endpoint that receives any frames after\r\n                receiving a frame with the END_STREAM flag set MUST treat that as a <xref\r\n                target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n                <x:ref>STREAM_CLOSED</x:ref>, unless the frame is permitted as described below.\r\n              </t>\r\n              <t>\r\n                <x:ref>WINDOW_UPDATE</x:ref> or <x:ref>RST_STREAM</x:ref> frames can be received in\r\n                this state for a short period after a <x:ref>DATA</x:ref> or <x:ref>HEADERS</x:ref>\r\n                frame containing an END_STREAM flag is sent.  Until the remote peer receives and\r\n                processes <x:ref>RST_STREAM</x:ref> or the frame bearing the END_STREAM flag, it\r\n                might send frames of these types.  Endpoints MUST ignore\r\n                <x:ref>WINDOW_UPDATE</x:ref> or <x:ref>RST_STREAM</x:ref> frames received in this\r\n                state, though endpoints MAY choose to treat frames that arrive a significant time\r\n                after sending END_STREAM as a <xref target=\"ConnectionErrorHandler\">connection\r\n                error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.\r\n              </t>\r\n              <t>\r\n                <x:ref>PRIORITY</x:ref> frames can be sent on closed streams to prioritize streams\r\n                that are dependent on the closed stream.  Endpoints SHOULD process\r\n                <x:ref>PRIORITY</x:ref> frame, though they can be ignored if the stream has been\r\n                removed from the dependency tree (see <xref target=\"priority-gc\"/>).\r\n              </t>\r\n              <t>\r\n                If this state is reached as a result of sending a <x:ref>RST_STREAM</x:ref> frame,\r\n                the peer that receives the <x:ref>RST_STREAM</x:ref> might have already sent - or\r\n                enqueued for sending - frames on the stream that cannot be withdrawn.  An endpoint\r\n                MUST ignore frames that it receives on closed streams after it has sent a\r\n                <x:ref>RST_STREAM</x:ref> frame.  An endpoint MAY choose to limit the period over\r\n                which it ignores frames and treat frames that arrive after this time as being in\r\n                error.\r\n              </t>\r\n              <t>\r\n                Flow controlled frames (i.e., <x:ref>DATA</x:ref>) received after sending\r\n                <x:ref>RST_STREAM</x:ref> are counted toward the connection flow control window.\r\n                Even though these frames might be ignored, because they are sent before the sender\r\n                receives the <x:ref>RST_STREAM</x:ref>, the sender will consider the frames to count\r\n                against the flow control window.\r\n              </t>\r\n              <t>\r\n                An endpoint might receive a <x:ref>PUSH_PROMISE</x:ref> frame after it sends\r\n                <x:ref>RST_STREAM</x:ref>.  <x:ref>PUSH_PROMISE</x:ref> causes a stream to become\r\n                \"reserved\" even if the associated stream has been reset.  Therefore, a\r\n                <x:ref>RST_STREAM</x:ref> is needed to close an unwanted promised stream.\r\n              </t>\r\n            </x:lt>\r\n          </list>\r\n        </t>\r\n        <t>\r\n          In the absence of more specific guidance elsewhere in this document, implementations\r\n          SHOULD treat the receipt of a frame that is not expressly permitted in the description of\r\n          a state as a <xref target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>.  Frame of unknown types are ignored.\r\n        </t>\r\n        <t>\r\n          An example of the state transitions for an HTTP request/response exchange can be found in\r\n          <xref target=\"HttpSequence\"/>.  An example of the state transitions for server push can be\r\n          found in <xref target=\"PushRequests\"/> and <xref target=\"PushResponses\"/>.\r\n        </t>\r\n\r\n        <section anchor=\"StreamIdentifiers\" title=\"Stream Identifiers\">\r\n          <t>\r\n            Streams are identified with an unsigned 31-bit integer.  Streams initiated by a client\r\n            MUST use odd-numbered stream identifiers; those initiated by the server MUST use\r\n            even-numbered stream identifiers.  A stream identifier of zero (0x0) is used for\r\n            connection control messages; the stream identifier zero cannot be used to establish a\r\n            new stream.\r\n          </t>\r\n          <t>\r\n            HTTP/1.1 requests that are upgraded to HTTP/2 (see <xref target=\"discover-http\"/>) are\r\n            responded to with a stream identifier of one (0x1).  After the upgrade\r\n            completes, stream 0x1 is \"half closed (local)\" to the client.  Therefore, stream 0x1\r\n            cannot be selected as a new stream identifier by a client that upgrades from HTTP/1.1.\r\n          </t>\r\n          <t>\r\n            The identifier of a newly established stream MUST be numerically greater than all\r\n            streams that the initiating endpoint has opened or reserved.  This governs streams that\r\n            are opened using a <x:ref>HEADERS</x:ref> frame and streams that are reserved using\r\n            <x:ref>PUSH_PROMISE</x:ref>.  An endpoint that receives an unexpected stream identifier\r\n            MUST respond with a <xref target=\"ConnectionErrorHandler\">connection error</xref> of\r\n            type <x:ref>PROTOCOL_ERROR</x:ref>.\r\n          </t>\r\n          <t>\r\n            The first use of a new stream identifier implicitly closes all streams in the \"idle\"\r\n            state that might have been initiated by that peer with a lower-valued stream identifier.\r\n            For example, if a client sends a <x:ref>HEADERS</x:ref> frame on stream 7 without ever\r\n            sending a frame on stream 5, then stream 5 transitions to the \"closed\" state when the\r\n            first frame for stream 7 is sent or received.\r\n          </t>\r\n          <t>\r\n            Stream identifiers cannot be reused.  Long-lived connections can result in an endpoint\r\n            exhausting the available range of stream identifiers.  A client that is unable to\r\n            establish a new stream identifier can establish a new connection for new streams.  A\r\n            server that is unable to establish a new stream identifier can send a\r\n            <x:ref>GOAWAY</x:ref> frame so that the client is forced to open a new connection for\r\n            new streams.\r\n          </t>\r\n        </section>\r\n\r\n        <section title=\"Stream Concurrency\">\r\n          <t>\r\n            A peer can limit the number of concurrently active streams using the\r\n            <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> parameter (see <xref\r\n            target=\"SettingValues\"/>) within a <x:ref>SETTINGS</x:ref> frame. The maximum concurrent\r\n            streams setting is specific to each endpoint and applies only to the peer that receives\r\n            the setting. That is, clients specify the maximum number of concurrent streams the\r\n            server can initiate, and servers specify the maximum number of concurrent streams the\r\n            client can initiate.\r\n          </t>\r\n          <t>\r\n            Streams that are in the \"open\" state, or either of the \"half closed\" states count toward\r\n            the maximum number of streams that an endpoint is permitted to open.  Streams in any of\r\n            these three states count toward the limit advertised in the\r\n            <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> setting.  Streams in either of the\r\n            \"reserved\" states do not count toward the stream limit.\r\n          </t>\r\n          <t>\r\n            Endpoints MUST NOT exceed the limit set by their peer.  An endpoint that receives a\r\n            <x:ref>HEADERS</x:ref> frame that causes their advertised concurrent stream limit to be\r\n            exceeded MUST treat this as a <xref target=\"StreamErrorHandler\">stream error</xref>.  An\r\n            endpoint that wishes to reduce the value of\r\n            <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> to a value that is below the current\r\n            number of open streams can either close streams that exceed the new value or allow\r\n            streams to complete.\r\n          </t>\r\n        </section>\r\n      </section>\r\n\r\n     <section anchor=\"FlowControl\" title=\"Flow Control\">\r\n        <t>\r\n          Using streams for multiplexing introduces contention over use of the TCP connection,\r\n          resulting in blocked streams.  A flow control scheme ensures that streams on the same\r\n          connection do not destructively interfere with each other.  Flow control is used for both\r\n          individual streams and for the connection as a whole.\r\n        </t>\r\n        <t>\r\n          HTTP/2 provides for flow control through use of the <xref\r\n          target=\"WINDOW_UPDATE\">WINDOW_UPDATE frame</xref>.\r\n        </t>\r\n\r\n        <section anchor=\"fc-principles\" title=\"Flow Control Principles\">\r\n          <t>\r\n            HTTP/2 stream flow control aims to allow a variety of flow control algorithms to be\r\n            used without requiring protocol changes. Flow control in HTTP/2 has the following\r\n            characteristics:\r\n            <list style=\"numbers\">\r\n              <t>\r\n                Flow control is specific to a connection; i.e., it is \"hop-by-hop\", not\r\n                \"end-to-end\".\r\n              </t>\r\n              <t>\r\n                Flow control is based on window update frames.  Receivers advertise how many octets\r\n                they are prepared to receive on a stream and for the entire connection.  This is a\r\n                credit-based scheme.\r\n              </t>\r\n              <t>\r\n                Flow control is directional with overall control provided by the receiver.  A\r\n                receiver MAY choose to set any window size that it desires for each stream and for\r\n                the entire connection.  A sender MUST respect flow control limits imposed by a\r\n                receiver.  Clients, servers and intermediaries all independently advertise their\r\n                flow control window as a receiver and abide by the flow control limits set by\r\n                their peer when sending.\r\n              </t>\r\n              <t>\r\n                The initial value for the flow control window is 65,535 octets for both new streams\r\n                and the overall connection.\r\n              </t>\r\n              <t>\r\n                The frame type determines whether flow control applies to a frame.  Of the frames\r\n                specified in this document, only <x:ref>DATA</x:ref> frames are subject to flow\r\n                control; all other frame types do not consume space in the advertised flow control\r\n                window.  This ensures that important control frames are not blocked by flow control.\r\n              </t>\r\n              <t>\r\n                Flow control cannot be disabled.\r\n              </t>\r\n              <t>\r\n                HTTP/2 defines only the format and semantics of the <x:ref>WINDOW_UPDATE</x:ref>\r\n                frame (<xref target=\"WINDOW_UPDATE\"/>).  This document does not stipulate how a\r\n                receiver decides when to send this frame or the value that it sends, nor does it\r\n                specify how a sender chooses to send packets.  Implementations are able to select\r\n                any algorithm that suits their needs.\r\n              </t>\r\n            </list>\r\n          </t>\r\n          <t>\r\n            Implementations are also responsible for managing how requests and responses are sent\r\n            based on priority; choosing how to avoid head of line blocking for requests; and\r\n            managing the creation of new streams.  Algorithm choices for these could interact with\r\n            any flow control algorithm.\r\n          </t>\r\n        </section>\r\n\r\n        <section anchor=\"DisableFlowControl\" title=\"Appropriate Use of Flow Control\">\r\n          <t>\r\n            Flow control is defined to protect endpoints that are operating under resource\r\n            constraints.  For example, a proxy needs to share memory between many connections, and\r\n            also might have a slow upstream connection and a fast downstream one.  Flow control\r\n            addresses cases where the receiver is unable process data on one stream, yet wants to\r\n            continue to process other streams in the same connection.\r\n          </t>\r\n          <t>\r\n            Deployments that do not require this capability can advertise a flow control window of\r\n            the maximum size, incrementing the available space when new data is received.  This\r\n            effectively disables flow control for that receiver.  Conversely, a sender is always\r\n            subject to the flow control window advertised by the receiver.\r\n          </t>\r\n          <t>\r\n            Deployments with constrained resources (for example, memory) can employ flow control to\r\n            limit the amount of memory a peer can consume.  Note, however, that this can lead to\r\n            suboptimal use of available network resources if flow control is enabled without\r\n            knowledge of the bandwidth-delay product (see <xref target=\"RFC1323\"/>).\r\n          </t>\r\n          <t>\r\n            Even with full awareness of the current bandwidth-delay product, implementation of flow\r\n            control can be difficult.  When using flow control, the receiver MUST read from the TCP\r\n            receive buffer in a timely fashion.  Failure to do so could lead to a deadlock when\r\n            critical frames, such as <x:ref>WINDOW_UPDATE</x:ref>, are not read and acted upon.\r\n          </t>\r\n        </section>\r\n      </section>\r\n\r\n      <section anchor=\"StreamPriority\" title=\"Stream priority\">\r\n        <t>\r\n          A client can assign a priority for a new stream by including prioritization information in\r\n          the <xref target=\"HEADERS\">HEADERS frame</xref> that opens the stream.  For an existing\r\n          stream, the <xref target=\"PRIORITY\">PRIORITY frame</xref> can be used to change the\r\n          priority.\r\n        </t>\r\n        <t>\r\n          The purpose of prioritization is to allow an endpoint to express how it would prefer its\r\n          peer allocate resources when managing concurrent streams.  Most importantly, priority can\r\n          be used to select streams for transmitting frames when there is limited capacity for\r\n          sending.\r\n        </t>\r\n        <t>\r\n          Streams can be prioritized by marking them as dependent on the completion of other streams\r\n          (<xref target=\"pri-depend\"/>).  Each dependency is assigned a relative weight, a number\r\n          that is used to determine the relative proportion of available resources that are assigned\r\n          to streams dependent on the same stream.\r\n        </t>\r\n        <!--\r\n          Note that stream dependencies have not yet been validated in practice.  The theory\r\n          might be fairly sound, but there are no implementations currently sending these.  If it\r\n          turns out that they are not useful, or actively harmful, implementations will be requested\r\n          to avoid creating stream dependencies.\r\n        -->\r\n        <t>\r\n          Explicitly setting the priority for a stream is input to a prioritization process.  It\r\n          does not guarantee any particular processing or transmission order for the stream relative\r\n          to any other stream.  An endpoint cannot force a peer to process concurrent streams in a\r\n          particular order using priority.  Expressing priority is therefore only ever a suggestion.\r\n        </t>\r\n        <t>\r\n          Providing prioritization information is optional, so default values are used if no\r\n          explicit indicator is provided (<xref target=\"pri-default\"/>).\r\n        </t>\r\n\r\n        <section title=\"Stream Dependencies\" anchor=\"pri-depend\">\r\n          <t>\r\n            Each stream can be given an explicit dependency on another stream.  Including a\r\n            dependency expresses a preference to allocate resources to the identified stream rather\r\n            than to the dependent stream.\r\n          </t>\r\n          <t>\r\n            A stream that is not dependent on any other stream is given a stream dependency of 0x0.\r\n            In other words, the non-existent stream 0 forms the root of the tree.\r\n          </t>\r\n          <t>\r\n            A stream that depends on another stream is a dependent stream. The stream upon which a\r\n            stream is dependent is a parent stream. A dependency on a stream that is not currently\r\n            in the tree - such as a stream in the \"idle\" state - results in that stream being given\r\n            a <xref target=\"pri-default\">default priority</xref>.\r\n          </t>\r\n          <t>\r\n            When assigning a dependency on another stream, the stream is added as a new dependency\r\n            of the parent stream.  Dependent streams that share the same parent are not ordered with\r\n            respect to each other.  For example, if streams B and C are dependent on stream A, and\r\n            if stream D is created with a dependency on stream A, this results in a dependency order\r\n            of A followed by B, C, and D in any order.\r\n          </t>\r\n          <figure title=\"Example of Default Dependency Creation\">\r\n            <artwork type=\"inline\"><![CDATA[\r\n    A                 A\r\n   / \\      ==>      /|\\\r\n  B   C             B D C\r\n]]></artwork>\r\n          </figure>\r\n          <t>\r\n            An exclusive flag allows for the insertion of a new level of dependencies.  The\r\n            exclusive flag causes the stream to become the sole dependency of its parent stream,\r\n            causing other dependencies to become dependent on the exclusive stream.  In the\r\n            previous example, if stream D is created with an exclusive dependency on stream A, this\r\n            results in D becoming the dependency parent of B and C.\r\n          </t>\r\n          <figure title=\"Example of Exclusive Dependency Creation\">\r\n            <artwork type=\"inline\"><![CDATA[\r\n                      A\r\n    A                 |\r\n   / \\      ==>       D\r\n  B   C              / \\\r\n                    B   C\r\n]]></artwork>\r\n          </figure>\r\n          <t>\r\n            Inside the dependency tree, a dependent stream SHOULD only be allocated resources if all\r\n            of the streams that it depends on (the chain of parent streams up to 0x0) are either\r\n            closed, or it is not possible to make progress on them.\r\n          </t>\r\n          <t>\r\n            A stream cannot depend on itself.  An endpoint MUST treat this as a <xref\r\n            target=\"StreamErrorHandler\">stream error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.\r\n          </t>\r\n        </section>\r\n\r\n        <section title=\"Dependency Weighting\">\r\n          <t>\r\n            All dependent streams are allocated an integer weight between 1 and 256 (inclusive).\r\n          </t>\r\n          <t>\r\n            Streams with the same parent SHOULD be allocated resources proportionally based on their\r\n            weight.  Thus, if stream B depends on stream A with weight 4, and C depends on stream A\r\n            with weight 12, and if no progress can be made on A, stream B ideally receives one third\r\n            of the resources allocated to stream C.\r\n          </t>\r\n        </section>\r\n\r\n        <section anchor=\"reprioritize\" title=\"Reprioritization\">\r\n          <t>\r\n            Stream priorities are changed using the <x:ref>PRIORITY</x:ref> frame.  Setting a\r\n            dependency causes a stream to become dependent on the identified parent stream.\r\n          </t>\r\n          <t>\r\n            Dependent streams move with their parent stream if the parent is reprioritized.  Setting\r\n            a dependency with the exclusive flag for a reprioritized stream moves all the\r\n            dependencies of the new parent stream to become dependent on the reprioritized stream.\r\n          </t>\r\n          <t>\r\n            If a stream is made dependent on one of its own dependencies, the formerly dependent\r\n            stream is first moved to be dependent on the reprioritized stream's previous parent.\r\n            The moved dependency retains its weight.\r\n          </t>\r\n          <figure title=\"Example of Dependency Reordering\">\r\n            <preamble>\r\n              For example, consider an original dependency tree where B and C depend on A, D and E\r\n              depend on C, and F depends on D.  If A is made dependent on D, then D takes the place\r\n              of A.  All other dependency relationships stay the same, except for F, which becomes\r\n              dependent on A if the reprioritization is exclusive.\r\n            </preamble>\r\n            <artwork type=\"inline\"><![CDATA[\r\n    ?                ?                ?                 ?\r\n    |               / \\               |                 |\r\n    A              D   A              D                 D\r\n   / \\            /   / \\            / \\                |\r\n  B   C     ==>  F   B   C   ==>    F   A       OR      A\r\n     / \\                 |             / \\             /|\\\r\n    D   E                E            B   C           B C F\r\n    |                                     |             |\r\n    F                                     E             E\r\n               (intermediate)   (non-exclusive)    (exclusive)\r\n]]></artwork>\r\n          </figure>\r\n        </section>\r\n\r\n        <section anchor=\"priority-gc\" title=\"Prioritization State Management\">\r\n          <t>\r\n            When a stream is removed from the dependency tree, its dependencies can be moved to\r\n            become dependent on the parent of the closed stream.  The weights of new dependencies\r\n            are recalculated by distributing the weight of the dependency of the closed stream\r\n            proportionally based on the weights of its dependencies.\r\n          </t>\r\n          <t>\r\n            Streams that are removed from the dependency tree cause some prioritization information\r\n            to be lost.  Resources are shared between streams with the same parent stream, which\r\n            means that if a stream in that set closes or becomes blocked, any spare capacity\r\n            allocated to a stream is distributed to the immediate neighbors of the stream.  However,\r\n            if the common dependency is removed from the tree, those streams share resources with\r\n            streams at the next highest level.\r\n          </t>\r\n          <t>\r\n            For example, assume streams A and B share a parent, and streams C and D both depend on\r\n            stream A. Prior to the removal of stream A, if streams A and D are unable to proceed,\r\n            then stream C receives all the resources dedicated to stream A.  If stream A is removed\r\n            from the tree, the weight of stream A is divided between streams C and D.  If stream D\r\n            is still unable to proceed, this results in stream C receiving a reduced proportion of\r\n            resources.  For equal starting weights, C receives one third, rather than one half, of\r\n            available resources.\r\n          </t>\r\n          <t>\r\n            It is possible for a stream to become closed while prioritization information that\r\n            creates a dependency on that stream is in transit.  If a stream identified in a\r\n            dependency has no associated priority information, then the dependent stream is instead\r\n            assigned a <xref target=\"pri-default\">default priority</xref>.  This potentially creates\r\n            suboptimal prioritization, since the stream could be given a priority that is different\r\n            to what is intended.\r\n          </t>\r\n          <t>\r\n            To avoid these problems, an endpoint SHOULD retain stream prioritization state for a\r\n            period after streams become closed.  The longer state is retained, the lower the chance\r\n            that streams are assigned incorrect or default priority values.\r\n          </t>\r\n          <t>\r\n            This could create a large state burden for an endpoint, so this state MAY be limited.\r\n            An endpoint MAY apply a fixed upper limit on the number of closed streams for which\r\n            prioritization state is tracked to limit state exposure.  The amount of additional state\r\n            an endpoint maintains could be dependent on load; under high load, prioritization state\r\n            can be discarded to limit resource commitments.  In extreme cases, an endpoint could\r\n            even discard prioritization state for active or reserved streams. If a fixed limit is\r\n            applied, endpoints SHOULD maintain state for at least as many streams as allowed by\r\n            their setting for <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref>.\r\n          </t>\r\n          <t>\r\n            An endpoint receiving a <x:ref>PRIORITY</x:ref> frame that changes the priority of a\r\n            closed stream SHOULD alter the dependencies of the streams that depend on it, if it has\r\n            retained enough state to do so.\r\n          </t>\r\n        </section>\r\n\r\n        <section title=\"Default Priorities\" anchor=\"pri-default\">\r\n          <t>\r\n            Providing priority information is optional.  Streams are assigned a non-exclusive\r\n            dependency on stream 0x0 by default.  <xref target=\"PushResources\">Pushed streams</xref>\r\n            initially depend on their associated stream.  In both cases, streams are assigned a\r\n            default weight of 16.\r\n          </t>\r\n        </section>\r\n      </section>\r\n\r\n      <section title=\"Error Handling\">\r\n        <t>\r\n          HTTP/2 framing permits two classes of error:\r\n          <list style=\"symbols\">\r\n            <t>\r\n              An error condition that renders the entire connection unusable is a connection error.\r\n            </t>\r\n            <t>\r\n              An error in an individual stream is a stream error.\r\n            </t>\r\n          </list>\r\n        </t>\r\n        <t>\r\n          A list of error codes is included in <xref target=\"ErrorCodes\"/>.\r\n        </t>\r\n\r\n        <section anchor=\"ConnectionErrorHandler\" title=\"Connection Error Handling\">\r\n          <t>\r\n            A connection error is any error which prevents further processing of the framing layer,\r\n            or which corrupts any connection state.\r\n          </t>\r\n          <t>\r\n            An endpoint that encounters a connection error SHOULD first send a <x:ref>GOAWAY</x:ref>\r\n            frame (<xref target=\"GOAWAY\"/>) with the stream identifier of the last stream that it\r\n            successfully received from its peer.  The <x:ref>GOAWAY</x:ref> frame includes an error\r\n            code that indicates why the connection is terminating.  After sending the\r\n            <x:ref>GOAWAY</x:ref> frame, the endpoint MUST close the TCP connection.\r\n          </t>\r\n          <t>\r\n            It is possible that the <x:ref>GOAWAY</x:ref> will not be reliably received by the\r\n            receiving endpoint (see <xref target=\"RFC7230\" x:fmt=\",\"\r\n            x:rel=\"#persistent.tear-down\"/>).  In the event of a connection error,\r\n            <x:ref>GOAWAY</x:ref> only provides a best effort attempt to communicate with the peer\r\n            about why the connection is being terminated.\r\n          </t>\r\n          <t>\r\n            An endpoint can end a connection at any time.  In particular, an endpoint MAY choose to\r\n            treat a stream error as a connection error.  Endpoints SHOULD send a\r\n            <x:ref>GOAWAY</x:ref> frame when ending a connection, providing that circumstances\r\n            permit it.\r\n          </t>\r\n        </section>\r\n\r\n        <section anchor=\"StreamErrorHandler\" title=\"Stream Error Handling\">\r\n          <t>\r\n            A stream error is an error related to a specific stream that does not affect processing\r\n            of other streams.\r\n          </t>\r\n          <t>\r\n            An endpoint that detects a stream error sends a <x:ref>RST_STREAM</x:ref> frame (<xref\r\n            target=\"RST_STREAM\"/>) that contains the stream identifier of the stream where the error\r\n            occurred.  The <x:ref>RST_STREAM</x:ref> frame includes an error code that indicates the\r\n            type of error.\r\n          </t>\r\n          <t>\r\n            A <x:ref>RST_STREAM</x:ref> is the last frame that an endpoint can send on a stream.\r\n            The peer that sends the <x:ref>RST_STREAM</x:ref> frame MUST be prepared to receive any\r\n            frames that were sent or enqueued for sending by the remote peer.  These frames can be\r\n            ignored, except where they modify connection state (such as the state maintained for\r\n            <xref target=\"HeaderBlock\">header compression</xref>, or flow control).\r\n          </t>\r\n          <t>\r\n            Normally, an endpoint SHOULD NOT send more than one <x:ref>RST_STREAM</x:ref> frame for\r\n            any stream. However, an endpoint MAY send additional <x:ref>RST_STREAM</x:ref> frames if\r\n            it receives frames on a closed stream after more than a round-trip time.  This behavior\r\n            is permitted to deal with misbehaving implementations.\r\n          </t>\r\n          <t>\r\n            An endpoint MUST NOT send a <x:ref>RST_STREAM</x:ref> in response to an\r\n            <x:ref>RST_STREAM</x:ref> frame, to avoid looping.\r\n          </t>\r\n        </section>\r\n\r\n        <section title=\"Connection Termination\">\r\n          <t>\r\n            If the TCP connection is closed or reset while streams remain in open or half closed\r\n            states, then the endpoint MUST assume that those streams were abnormally interrupted and\r\n            could be incomplete.\r\n          </t>\r\n        </section>\r\n      </section>\r\n\r\n      <section anchor=\"extensibility\" title=\"Extending HTTP/2\">\r\n        <t>\r\n          HTTP/2 permits extension of the protocol.  Protocol extensions can be used to provide\r\n          additional services or alter any aspect of the protocol, within the limitations described\r\n          in this section.  Extensions are effective only within the scope of a single HTTP/2\r\n          connection.\r\n        </t>\r\n        <t>\r\n          Extensions are permitted to use new <xref target=\"FrameHeader\">frame types</xref>, new\r\n          <xref target=\"SettingValues\">settings</xref>, or new <xref target=\"ErrorCodes\">error\r\n          codes</xref>.  Registries are established for managing these extension points: <xref\r\n          target=\"iana-frames\">frame types</xref>, <xref target=\"iana-settings\">settings</xref> and\r\n          <xref target=\"iana-errors\">error codes</xref>.\r\n        </t>\r\n        <t>\r\n          Implementations MUST ignore unknown or unsupported values in all extensible protocol\r\n          elements.  Implementations MUST discard frames that have unknown or unsupported types.\r\n          This means that any of these extension points can be safely used by extensions without\r\n          prior arrangement or negotiation.  However, extension frames that appear in the middle of\r\n          a <xref target=\"HeaderBlock\">header block</xref> are not permitted; these MUST be treated\r\n          as a <xref target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>.\r\n        </t>\r\n        <t>\r\n          However, extensions that could change the semantics of existing protocol components MUST\r\n          be negotiated before being used.  For example, an extension that changes the layout of the\r\n          <x:ref>HEADERS</x:ref> frame cannot be used until the peer has given a positive signal\r\n          that this is acceptable.  In this case, it could also be necessary to coordinate when the\r\n          revised layout comes into effect.  Note that treating any frame other than\r\n          <x:ref>DATA</x:ref> frames as flow controlled is such a change in semantics, and can only\r\n          be done through negotiation.\r\n        </t>\r\n        <t>\r\n          This document doesn't mandate a specific method for negotiating the use of an extension,\r\n          but notes that a <xref target=\"SettingValues\">setting</xref> could be used for that\r\n          purpose.  If both peers set a value that indicates willingness to use the extension, then\r\n          the extension can be used.  If a setting is used for extension negotiation, the initial\r\n          value MUST be defined so that the extension is initially disabled.\r\n        </t>\r\n      </section>\r\n    </section>\r\n\r\n    <section anchor=\"FrameTypes\" title=\"Frame Definitions\">\r\n      <t>\r\n        This specification defines a number of frame types, each identified by a unique 8-bit type\r\n        code. Each frame type serves a distinct purpose either in the establishment and management\r\n        of the connection as a whole, or of individual streams.\r\n      </t>\r\n      <t>\r\n        The transmission of specific frame types can alter the state of a connection. If endpoints\r\n        fail to maintain a synchronized view of the connection state, successful communication\r\n        within the connection will no longer be possible. Therefore, it is important that endpoints\r\n        have a shared comprehension of how the state is affected by the use any given frame.\r\n      </t>\r\n\r\n      <section anchor=\"DATA\" title=\"DATA\">\r\n        <t>\r\n          DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated\r\n          with a stream. One or more DATA frames are used, for instance, to carry HTTP request or\r\n          response payloads.\r\n        </t>\r\n        <t>\r\n          DATA frames MAY also contain arbitrary padding.  Padding can be added to DATA frames to\r\n          obscure the size of messages.\r\n        </t>\r\n        <figure title=\"DATA Frame Payload\">\r\n          <artwork type=\"inline\"><![CDATA[\r\n  0                   1                   2                   3\r\n  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\r\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\r\n |Pad Length? (8)|\r\n +---------------+-----------------------------------------------+\r\n |                            Data (*)                         ...\r\n +---------------------------------------------------------------+\r\n |                           Padding (*)                       ...\r\n +---------------------------------------------------------------+\r\n]]></artwork>\r\n        </figure>\r\n        <t>\r\n          The DATA frame contains the following fields:\r\n          <list style=\"hanging\">\r\n            <t hangText=\"Pad Length:\">\r\n              An 8-bit field containing the length of the frame padding in units of octets.  This\r\n              field is optional and is only present if the PADDED flag is set.\r\n            </t>\r\n            <t hangText=\"Data:\">\r\n              Application data.  The amount of data is the remainder of the frame payload after\r\n              subtracting the length of the other fields that are present.\r\n            </t>\r\n            <t hangText=\"Padding:\">\r\n              Padding octets that contain no application semantic value.  Padding octets MUST be set\r\n              to zero when sending and ignored when receiving.\r\n            </t>\r\n          </list>\r\n        </t>\r\n\r\n        <t>\r\n          The DATA frame defines the following flags:\r\n          <list style=\"hanging\">\r\n            <t hangText=\"END_STREAM (0x1):\">\r\n              Bit 1 being set indicates that this frame is the last that the endpoint will send for\r\n              the identified stream.  Setting this flag causes the stream to enter one of <xref\r\n              target=\"StreamStates\">the \"half closed\" states or the \"closed\" state</xref>.\r\n            </t>\r\n            <t hangText=\"PADDED (0x8):\">\r\n              Bit 4 being set indicates that the Pad Length field and any padding that it describes\r\n              is present.\r\n            </t>\r\n          </list>\r\n        </t>\r\n        <t>\r\n          DATA frames MUST be associated with a stream. If a DATA frame is received whose stream\r\n          identifier field is 0x0, the recipient MUST respond with a <xref\r\n          target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>.\r\n        </t>\r\n        <t>\r\n          DATA frames are subject to flow control and can only be sent when a stream is in the\r\n          \"open\" or \"half closed (remote)\" states. The entire DATA frame payload is included in flow\r\n          control, including Pad Length and Padding fields if present.  If a DATA frame is received\r\n          whose stream is not in \"open\" or \"half closed (local)\" state, the recipient MUST respond\r\n          with a <xref target=\"StreamErrorHandler\">stream error</xref> of type\r\n          <x:ref>STREAM_CLOSED</x:ref>.\r\n        </t>\r\n        <t>\r\n          The total number of padding octets is determined by the value of the Pad Length field. If\r\n          the length of the padding is greater than the length of the frame payload, the recipient\r\n          MUST treat this as a <xref target=\"ConnectionErrorHandler\">connection error</xref> of\r\n          type <x:ref>PROTOCOL_ERROR</x:ref>.\r\n          <list style=\"hanging\">\r\n            <t hangText=\"Note:\">\r\n              A frame can be increased in size by one octet by including a Pad Length field with a\r\n              value of zero.\r\n            </t>\r\n          </list>\r\n        </t>\r\n        <t>\r\n          Padding is a security feature; see <xref target=\"padding\"/>.\r\n        </t>\r\n      </section>\r\n\r\n      <section anchor=\"HEADERS\" title=\"HEADERS\">\r\n        <t>\r\n          The HEADERS frame (type=0x1) is used to <xref target=\"StreamStates\">open a stream</xref>,\r\n          and additionally carries a header block fragment. HEADERS frames can be sent on a stream\r\n          in the \"open\" or \"half closed (remote)\" states.\r\n        </t>\r\n        <figure title=\"HEADERS Frame Payload\">\r\n          <artwork type=\"inline\"><![CDATA[\r\n  0                   1                   2                   3\r\n  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\r\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\r\n |Pad Length? (8)|\r\n +-+-------------+-----------------------------------------------+\r\n |E|                 Stream Dependency? (31)                     |\r\n +-+-------------+-----------------------------------------------+\r\n |  Weight? (8)  |\r\n +-+-------------+-----------------------------------------------+\r\n |                   Header Block Fragment (*)                 ...\r\n +---------------------------------------------------------------+\r\n |                           Padding (*)                       ...\r\n +---------------------------------------------------------------+\r\n]]></artwork>\r\n        </figure>\r\n        <t>\r\n          The HEADERS frame payload has the following fields:\r\n          <list style=\"hanging\">\r\n            <t hangText=\"Pad Length:\">\r\n              An 8-bit field containing the length of the frame padding in units of octets.  This\r\n              field is only present if the PADDED flag is set.\r\n            </t>\r\n            <t hangText=\"E:\">\r\n              A single bit flag indicates that the stream dependency is exclusive, see <xref\r\n              target=\"StreamPriority\"/>.  This field is only present if the PRIORITY flag is set.\r\n            </t>\r\n            <t hangText=\"Stream Dependency:\">\r\n              A 31-bit stream identifier for the stream that this stream depends on, see <xref\r\n              target=\"StreamPriority\"/>.  This field is only present if the PRIORITY flag is set.\r\n            </t>\r\n            <t hangText=\"Weight:\">\r\n              An 8-bit weight for the stream, see <xref target=\"StreamPriority\"/>.  Add one to the\r\n              value to obtain a weight between 1 and 256.  This field is only present if the\r\n              PRIORITY flag is set.\r\n            </t>\r\n            <t hangText=\"Header Block Fragment:\">\r\n              A <xref target=\"HeaderBlock\">header block fragment</xref>.\r\n            </t>\r\n            <t hangText=\"Padding:\">\r\n              Padding octets that contain no application semantic value.  Padding octets MUST be set\r\n              to zero when sending and ignored when receiving.\r\n            </t>\r\n          </list>\r\n        </t>\r\n\r\n        <t>\r\n          The HEADERS frame defines the following flags:\r\n          <list style=\"hanging\">\r\n            <x:lt hangText=\"END_STREAM (0x1):\">\r\n              <t>\r\n                Bit 1 being set indicates that the <xref target=\"HeaderBlock\">header block</xref> is\r\n                the last that the endpoint will send for the identified stream.  Setting this flag\r\n                causes the stream to enter one of <xref target=\"StreamStates\">\"half closed\"\r\n                states</xref>.\r\n              </t>\r\n              <t>\r\n                A HEADERS frame carries the END_STREAM flag that signals the end of a stream.\r\n                However, a HEADERS frame with the END_STREAM flag set can be followed by\r\n                <x:ref>CONTINUATION</x:ref> frames on the same stream.  Logically, the\r\n                <x:ref>CONTINUATION</x:ref> frames are part of the HEADERS frame.\r\n              </t>\r\n            </x:lt>\r\n            <x:lt hangText=\"END_HEADERS (0x4):\">\r\n              <t>\r\n                Bit 3 being set indicates that this frame contains an entire <xref\r\n                target=\"HeaderBlock\">header block</xref> and is not followed by any\r\n                <x:ref>CONTINUATION</x:ref> frames.\r\n              </t>\r\n              <t>\r\n                A HEADERS frame without the END_HEADERS flag set MUST be followed by a\r\n                <x:ref>CONTINUATION</x:ref> frame for the same stream.  A receiver MUST treat the\r\n                receipt of any other type of frame or a frame on a different stream as a <xref\r\n                target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n                <x:ref>PROTOCOL_ERROR</x:ref>.\r\n              </t>\r\n            </x:lt>\r\n            <x:lt hangText=\"PADDED (0x8):\">\r\n              <t>\r\n                Bit 4 being set indicates that the Pad Length field and any padding that it\r\n                describes is present.\r\n              </t>\r\n            </x:lt>\r\n            <x:lt hangText=\"PRIORITY (0x20):\">\r\n              <t>\r\n                Bit 6 being set indicates that the Exclusive Flag (E), Stream Dependency, and Weight\r\n                fields are present; see <xref target=\"StreamPriority\"/>.\r\n              </t>\r\n            </x:lt>\r\n          </list>\r\n        </t>\r\n\r\n        <t>\r\n          The payload of a HEADERS frame contains a <xref target=\"HeaderBlock\">header block\r\n          fragment</xref>.  A header block that does not fit within a HEADERS frame is continued in\r\n          a <xref target=\"CONTINUATION\">CONTINUATION frame</xref>.\r\n        </t>\r\n\r\n        <t>\r\n          HEADERS frames MUST be associated with a stream. If a HEADERS frame is received whose\r\n          stream identifier field is 0x0, the recipient MUST respond with a <xref\r\n          target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>.\r\n        </t>\r\n\r\n        <t>\r\n          The HEADERS frame changes the connection state as described in <xref\r\n          target=\"HeaderBlock\"/>.\r\n        </t>\r\n\r\n        <t>\r\n          The HEADERS frame includes optional padding.  Padding fields and flags are identical to\r\n          those defined for <xref target=\"DATA\">DATA frames</xref>.\r\n        </t>\r\n        <t>\r\n          Prioritization information in a HEADERS frame is logically equivalent to a separate\r\n          <x:ref>PRIORITY</x:ref> frame, but inclusion in HEADERS avoids the potential for churn in\r\n          stream prioritization when new streams are created.  Priorization fields in HEADERS frames\r\n          subsequent to the first on a stream <xref target=\"reprioritize\">reprioritize the\r\n          stream</xref>.\r\n        </t>\r\n      </section>\r\n\r\n      <section anchor=\"PRIORITY\" title=\"PRIORITY\">\r\n        <t>\r\n          The PRIORITY frame (type=0x2) specifies the <xref target=\"StreamPriority\">sender-advised\r\n          priority of a stream</xref>.  It can be sent at any time for an existing stream, including\r\n          closed streams.  This enables reprioritization of existing streams.\r\n        </t>\r\n        <figure title=\"PRIORITY Frame Payload\">\r\n          <artwork type=\"inline\"><![CDATA[\r\n  0                   1                   2                   3\r\n  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\r\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\r\n |E|                  Stream Dependency (31)                     |\r\n +-+-------------+-----------------------------------------------+\r\n |   Weight (8)  |\r\n +-+-------------+\r\n]]></artwork>\r\n        </figure>\r\n        <t>\r\n          The payload of a PRIORITY frame contains the following fields:\r\n          <list style=\"hanging\">\r\n            <t hangText=\"E:\">\r\n              A single bit flag indicates that the stream dependency is exclusive, see <xref\r\n              target=\"StreamPriority\"/>.\r\n            </t>\r\n            <t hangText=\"Stream Dependency:\">\r\n              A 31-bit stream identifier for the stream that this stream depends on, see <xref\r\n              target=\"StreamPriority\"/>.\r\n            </t>\r\n            <t hangText=\"Weight:\">\r\n              An 8-bit weight for the identified stream dependency, see <xref\r\n              target=\"StreamPriority\"/>.  Add one to the value to obtain a weight between 1 and 256.\r\n            </t>\r\n          </list>\r\n        </t>\r\n\r\n        <t>\r\n          The PRIORITY frame does not define any flags.\r\n        </t>\r\n\r\n        <t>\r\n          The PRIORITY frame is associated with an existing stream. If a PRIORITY frame is received\r\n          with a stream identifier of 0x0, the recipient MUST respond with a <xref\r\n          target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>.\r\n        </t>\r\n        <t>\r\n          The PRIORITY frame can be sent on a stream in any of the \"reserved (remote)\", \"open\",\r\n          \"half closed (local)\", \"half closed (remote)\", or \"closed\" states, though it cannot be\r\n          sent between consecutive frames that comprise a single <xref target=\"HeaderBlock\">header\r\n          block</xref>.  Note that this frame could arrive after processing or frame sending has\r\n          completed, which would cause it to have no effect on the current stream.  For a stream\r\n          that is in the \"half closed (remote)\" or \"closed\" - state, this frame can only affect\r\n          processing of the current stream and not frame transmission.\r\n        </t>\r\n        <t>\r\n          The PRIORITY frame is the only frame that can be sent for a stream in the \"closed\" state.\r\n          This allows for the reprioritization of a group of dependent streams by altering the\r\n          priority of a parent stream, which might be closed.  However, a PRIORITY frame sent on a\r\n          closed stream risks being ignored due to the peer having discarded priority state\r\n          information for that stream.\r\n        </t>\r\n      </section>\r\n\r\n      <section anchor=\"RST_STREAM\" title=\"RST_STREAM\">\r\n        <t>\r\n          The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream.  When sent by\r\n          the initiator of a stream, it indicates that they wish to cancel the stream or that an\r\n          error condition has occurred.  When sent by the receiver of a stream, it indicates that\r\n          either the receiver is rejecting the stream, requesting that the stream be cancelled, or\r\n          that an error condition has occurred.\r\n        </t>\r\n        <figure title=\"RST_STREAM Frame Payload\">\r\n          <artwork type=\"inline\"><![CDATA[\r\n  0                   1                   2                   3\r\n  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\r\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\r\n |                        Error Code (32)                        |\r\n +---------------------------------------------------------------+\r\n]]></artwork>\r\n        </figure>\r\n\r\n        <t>\r\n          The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the <xref\r\n          target=\"ErrorCodes\">error code</xref>.  The error code indicates why the stream is being\r\n          terminated.\r\n        </t>\r\n\r\n        <t>\r\n          The RST_STREAM frame does not define any flags.\r\n        </t>\r\n\r\n        <t>\r\n          The RST_STREAM frame fully terminates the referenced stream and causes it to enter the\r\n          closed state. After receiving a RST_STREAM on a stream, the receiver MUST NOT send\r\n          additional frames for that stream, with the exception of <x:ref>PRIORITY</x:ref>. However,\r\n          after sending the RST_STREAM, the sending endpoint MUST be prepared to receive and process\r\n          additional frames sent on the stream that might have been sent by the peer prior to the\r\n          arrival of the RST_STREAM.\r\n        </t>\r\n\r\n        <t>\r\n          RST_STREAM frames MUST be associated with a stream.  If a RST_STREAM frame is received\r\n          with a stream identifier of 0x0, the recipient MUST treat this as a <xref\r\n          target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>.\r\n        </t>\r\n\r\n        <t>\r\n          RST_STREAM frames MUST NOT be sent for a stream in the \"idle\" state.  If a RST_STREAM\r\n          frame identifying an idle stream is received, the recipient MUST treat this as a <xref\r\n          target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>.\r\n        </t>\r\n\r\n      </section>\r\n\r\n      <section anchor=\"SETTINGS\" title=\"SETTINGS\">\r\n        <t>\r\n          The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints\r\n          communicate, such as preferences and constraints on peer behavior.  The SETTINGS frame is\r\n          also used to acknowledge the receipt of those parameters.  Individually, a SETTINGS\r\n          parameter can also be referred to as a \"setting\".\r\n        </t>\r\n        <t>\r\n          SETTINGS parameters are not negotiated; they describe characteristics of the sending peer,\r\n          which are used by the receiving peer. Different values for the same parameter can be\r\n          advertised by each peer. For example, a client might set a high initial flow control\r\n          window, whereas a server might set a lower value to conserve resources.\r\n        </t>\r\n\r\n        <t>\r\n          A SETTINGS frame MUST be sent by both endpoints at the start of a connection, and MAY be\r\n          sent at any other time by either endpoint over the lifetime of the connection.\r\n          Implementations MUST support all of the parameters defined by this specification.\r\n        </t>\r\n\r\n        <t>\r\n          Each parameter in a SETTINGS frame replaces any existing value for that parameter.\r\n          Parameters are processed in the order in which they appear, and a receiver of a SETTINGS\r\n          frame does not need to maintain any state other than the current value of its\r\n          parameters. Therefore, the value of a SETTINGS parameter is the last value that is seen by\r\n          a receiver.\r\n        </t>\r\n        <t>\r\n          SETTINGS parameters are acknowledged by the receiving peer. To enable this, the SETTINGS\r\n          frame defines the following flag:\r\n          <list style=\"hanging\">\r\n            <t hangText=\"ACK (0x1):\">\r\n              Bit 1 being set indicates that this frame acknowledges receipt and application of the\r\n              peer's SETTINGS frame.  When this bit is set, the payload of the SETTINGS frame MUST\r\n              be empty.  Receipt of a SETTINGS frame with the ACK flag set and a length field value\r\n              other than 0 MUST be treated as a <xref target=\"ConnectionErrorHandler\">connection\r\n              error</xref> of type <x:ref>FRAME_SIZE_ERROR</x:ref>.  For more info, see <xref\r\n              target=\"SettingsSync\">Settings Synchronization</xref>.\r\n            </t>\r\n          </list>\r\n        </t>\r\n        <t>\r\n          SETTINGS frames always apply to a connection, never a single stream.  The stream\r\n          identifier for a SETTINGS frame MUST be zero (0x0). If an endpoint receives a SETTINGS\r\n          frame whose stream identifier field is anything other than 0x0, the endpoint MUST respond\r\n          with a <xref target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>.\r\n        </t>\r\n        <t>\r\n          The SETTINGS frame affects connection state.  A badly formed or incomplete SETTINGS frame\r\n          MUST be treated as a <xref target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>.\r\n        </t>\r\n\r\n        <section title=\"SETTINGS Format\" anchor=\"SettingFormat\">\r\n          <t>\r\n            The payload of a SETTINGS frame consists of zero or more parameters, each consisting of\r\n            an unsigned 16-bit setting identifier and an unsigned 32-bit value.\r\n          </t>\r\n\r\n          <figure title=\"Setting Format\">\r\n            <artwork type=\"inline\"><![CDATA[\r\n  0                   1                   2                   3\r\n  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\r\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\r\n |       Identifier (16)         |\r\n +-------------------------------+-------------------------------+\r\n |                        Value (32)                             |\r\n +---------------------------------------------------------------+\r\n]]></artwork>\r\n          </figure>\r\n        </section>\r\n\r\n        <section anchor=\"SettingValues\" title=\"Defined SETTINGS Parameters\">\r\n          <t>\r\n            The following parameters are defined:\r\n            <list style=\"hanging\">\r\n              <x:lt hangText=\"SETTINGS_HEADER_TABLE_SIZE (0x1):\"\r\n                    anchor=\"SETTINGS_HEADER_TABLE_SIZE\">\r\n                <t>\r\n                  Allows the sender to inform the remote endpoint of the maximum size of the header\r\n                  compression table used to decode header blocks, in octets. The encoder can select\r\n                  any size equal to or less than this value by using signaling specific to the\r\n                  header compression format inside a header block. The initial value is 4,096\r\n                  octets.\r\n                </t>\r\n              </x:lt>\r\n              <x:lt hangText=\"SETTINGS_ENABLE_PUSH (0x2):\"\r\n                    anchor=\"SETTINGS_ENABLE_PUSH\">\r\n                <t>\r\n                  This setting can be use to disable <xref target=\"PushResources\">server\r\n                  push</xref>. An endpoint MUST NOT send a <x:ref>PUSH_PROMISE</x:ref> frame if it\r\n                  receives this parameter set to a value of 0. An endpoint that has both set this\r\n                  parameter to 0 and had it acknowledged MUST treat the receipt of a\r\n                  <x:ref>PUSH_PROMISE</x:ref> frame as a <xref\r\n                  target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n                  <x:ref>PROTOCOL_ERROR</x:ref>.\r\n                </t>\r\n                <t>\r\n                  The initial value is 1, which indicates that server push is permitted.  Any value\r\n                  other than 0 or 1 MUST be treated as a <xref\r\n                  target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n                  <x:ref>PROTOCOL_ERROR</x:ref>.\r\n                </t>\r\n              </x:lt>\r\n              <x:lt hangText=\"SETTINGS_MAX_CONCURRENT_STREAMS (0x3):\"\r\n                    anchor=\"SETTINGS_MAX_CONCURRENT_STREAMS\">\r\n                <t>\r\n                  Indicates the maximum number of concurrent streams that the sender will allow.\r\n                  This limit is directional: it applies to the number of streams that the sender\r\n                  permits the receiver to create. Initially there is no limit to this value.  It is\r\n                  recommended that this value be no smaller than 100, so as to not unnecessarily\r\n                  limit parallelism.\r\n                </t>\r\n                <t>\r\n                  A value of 0 for SETTINGS_MAX_CONCURRENT_STREAMS SHOULD NOT be treated as special\r\n                  by endpoints.  A zero value does prevent the creation of new streams, however this\r\n                  can also happen for any limit that is exhausted with active streams.  Servers\r\n                  SHOULD only set a zero value for short durations; if a server does not wish to\r\n                  accept requests, closing the connection could be preferable.\r\n                </t>\r\n              </x:lt>\r\n              <x:lt hangText=\"SETTINGS_INITIAL_WINDOW_SIZE (0x4):\"\r\n                    anchor=\"SETTINGS_INITIAL_WINDOW_SIZE\">\r\n                <t>\r\n                  Indicates the sender's initial window size (in octets) for stream level flow\r\n                  control.  The initial value is 2<x:sup>16</x:sup>-1 (65,535) octets.\r\n                </t>\r\n                <t>\r\n                  This setting affects the window size of all streams, including existing streams,\r\n                  see <xref target=\"InitialWindowSize\"/>.\r\n                </t>\r\n                <t>\r\n                  Values above the maximum flow control window size of 2<x:sup>31</x:sup>-1 MUST\r\n                  be treated as a <xref target=\"ConnectionErrorHandler\">connection error</xref> of\r\n                  type <x:ref>FLOW_CONTROL_ERROR</x:ref>.\r\n                </t>\r\n              </x:lt>\r\n              <x:lt hangText=\"SETTINGS_MAX_FRAME_SIZE (0x5):\"\r\n                    anchor=\"SETTINGS_MAX_FRAME_SIZE\">\r\n                <t>\r\n                  Indicates the size of the largest frame payload that the sender is willing to\r\n                  receive, in octets.\r\n                </t>\r\n                <t>\r\n                  The initial value is 2<x:sup>14</x:sup> (16,384) octets.  The value advertised by\r\n                  an endpoint MUST be between this initial value and the maximum allowed frame size\r\n                  (2<x:sup>24</x:sup>-1 or 16,777,215 octets), inclusive.  Values outside this range\r\n                  MUST be treated as a <xref target=\"ConnectionErrorHandler\">connection error</xref>\r\n                  of type <x:ref>PROTOCOL_ERROR</x:ref>.\r\n                </t>\r\n              </x:lt>\r\n              <x:lt hangText=\"SETTINGS_MAX_HEADER_LIST_SIZE (0x6):\"\r\n                    anchor=\"SETTINGS_MAX_HEADER_LIST_SIZE\">\r\n                <t>\r\n                  This advisory setting informs a peer of the maximum size of header list that the\r\n                  sender is prepared to accept, in octets. The value is based on the uncompressed\r\n                  size of header fields, including the length of the name and value in octets plus\r\n                  an overhead of 32 octets for each header field.\r\n                </t>\r\n                <t>\r\n                  For any given request, a lower limit than what is advertised MAY be enforced.  The\r\n                  initial value of this setting is unlimited.\r\n                </t>\r\n              </x:lt>\r\n            </list>\r\n          </t>\r\n          <t>\r\n            An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier\r\n            MUST ignore that setting.\r\n          </t>\r\n        </section>\r\n\r\n        <section anchor=\"SettingsSync\" title=\"Settings Synchronization\">\r\n          <t>\r\n            Most values in SETTINGS benefit from or require an understanding of when the peer has\r\n            received and applied the changed parameter values. In order to provide\r\n            such synchronization timepoints, the recipient of a SETTINGS frame in which the ACK flag\r\n            is not set MUST apply the updated parameters as soon as possible upon receipt.\r\n          </t>\r\n          <t>\r\n            The values in the SETTINGS frame MUST be processed in the order they appear, with no\r\n            other frame processing between values.  Unsupported parameters MUST be ignored.  Once\r\n            all values have been processed, the recipient MUST immediately emit a SETTINGS frame\r\n            with the ACK flag set. Upon receiving a SETTINGS frame with the ACK flag set, the sender\r\n            of the altered parameters can rely on the setting having been applied.\r\n          </t>\r\n          <t>\r\n            If the sender of a SETTINGS frame does not receive an acknowledgement within a\r\n            reasonable amount of time, it MAY issue a <xref\r\n            target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n            <x:ref>SETTINGS_TIMEOUT</x:ref>.\r\n          </t>\r\n        </section>\r\n      </section>\r\n\r\n      <section anchor=\"PUSH_PROMISE\" title=\"PUSH_PROMISE\">\r\n        <t>\r\n          The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of\r\n          streams the sender intends to initiate.  The PUSH_PROMISE frame includes the unsigned\r\n          31-bit identifier of the stream the endpoint plans to create along with a set of headers\r\n          that provide additional context for the stream.  <xref target=\"PushResources\"/> contains a\r\n          thorough description of the use of PUSH_PROMISE frames.\r\n        </t>\r\n\r\n        <figure title=\"PUSH_PROMISE Payload Format\">\r\n          <artwork type=\"inline\"><![CDATA[\r\n  0                   1                   2                   3\r\n  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\r\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\r\n |Pad Length? (8)|\r\n +-+-------------+-----------------------------------------------+\r\n |R|                  Promised Stream ID (31)                    |\r\n +-+-----------------------------+-------------------------------+\r\n |                   Header Block Fragment (*)                 ...\r\n +---------------------------------------------------------------+\r\n |                           Padding (*)                       ...\r\n +---------------------------------------------------------------+\r\n]]></artwork>\r\n        </figure>\r\n        <t>\r\n          The PUSH_PROMISE frame payload has the following fields:\r\n          <list style=\"hanging\">\r\n            <t hangText=\"Pad Length:\">\r\n              An 8-bit field containing the length of the frame padding in units of octets.  This\r\n              field is only present if the PADDED flag is set.\r\n            </t>\r\n            <t hangText=\"R:\">\r\n              A single reserved bit.\r\n            </t>\r\n            <t hangText=\"Promised Stream ID:\">\r\n              An unsigned 31-bit integer that identifies the stream that is reserved by the\r\n              PUSH_PROMISE.  The promised stream identifier MUST be a valid choice for the next\r\n              stream sent by the sender (see <xref target=\"StreamIdentifiers\">new stream\r\n              identifier</xref>).\r\n            </t>\r\n            <t hangText=\"Header Block Fragment:\">\r\n              A <xref target=\"HeaderBlock\">header block fragment</xref> containing request header\r\n              fields.\r\n            </t>\r\n            <t hangText=\"Padding:\">\r\n              Padding octets.\r\n            </t>\r\n          </list>\r\n        </t>\r\n\r\n        <t>\r\n          The PUSH_PROMISE frame defines the following flags:\r\n          <list style=\"hanging\">\r\n            <x:lt hangText=\"END_HEADERS (0x4):\">\r\n              <t>\r\n                Bit 3 being set indicates that this frame contains an entire <xref\r\n                target=\"HeaderBlock\">header block</xref> and is not followed by any\r\n                <x:ref>CONTINUATION</x:ref> frames.\r\n              </t>\r\n              <t>\r\n                A PUSH_PROMISE frame without the END_HEADERS flag set MUST be followed by a\r\n                CONTINUATION frame for the same stream.  A receiver MUST treat the receipt of any\r\n                other type of frame or a frame on a different stream as a <xref\r\n                target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n                <x:ref>PROTOCOL_ERROR</x:ref>.\r\n              </t>\r\n            </x:lt>\r\n            <x:lt hangText=\"PADDED (0x8):\">\r\n              <t>\r\n                Bit 4 being set indicates that the Pad Length field and any padding that it\r\n                describes is present.\r\n              </t>\r\n            </x:lt>\r\n          </list>\r\n        </t>\r\n\r\n        <t>\r\n          PUSH_PROMISE frames MUST be associated with an existing, peer-initiated stream. The stream\r\n          identifier of a PUSH_PROMISE frame indicates the stream it is associated with.  If the\r\n          stream identifier field specifies the value 0x0, a recipient MUST respond with a <xref\r\n          target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>.\r\n        </t>\r\n\r\n        <t>\r\n          Promised streams are not required to be used in the order they are promised.  The\r\n          PUSH_PROMISE only reserves stream identifiers for later use.\r\n        </t>\r\n\r\n        <t>\r\n          PUSH_PROMISE MUST NOT be sent if the <x:ref>SETTINGS_ENABLE_PUSH</x:ref> setting of the\r\n          peer endpoint is set to 0.  An endpoint that has set this setting and has received\r\n          acknowledgement MUST treat the receipt of a PUSH_PROMISE frame as a <xref\r\n          target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>.\r\n        </t>\r\n        <t>\r\n          Recipients of PUSH_PROMISE frames can choose to reject promised streams by returning a\r\n          <x:ref>RST_STREAM</x:ref> referencing the promised stream identifier back to the sender of\r\n          the PUSH_PROMISE.\r\n        </t>\r\n\r\n       <t>\r\n          A PUSH_PROMISE frame modifies the connection state in two ways.  The inclusion of a <xref\r\n          target=\"HeaderBlock\">header block</xref> potentially modifies the state maintained for\r\n          header compression.  PUSH_PROMISE also reserves a stream for later use, causing the\r\n          promised stream to enter the \"reserved\" state.  A sender MUST NOT send a PUSH_PROMISE on a\r\n          stream unless that stream is either \"open\" or \"half closed (remote)\"; the sender MUST\r\n          ensure that the promised stream is a valid choice for a <xref\r\n          target=\"StreamIdentifiers\">new stream identifier</xref> (that is, the promised stream MUST\r\n          be in the \"idle\" state).\r\n        </t>\r\n        <t>\r\n          Since PUSH_PROMISE reserves a stream, ignoring a PUSH_PROMISE frame causes the stream\r\n          state to become indeterminate.  A receiver MUST treat the receipt of a PUSH_PROMISE on a\r\n          stream that is neither \"open\" nor \"half closed (local)\" as a <xref\r\n          target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>.  However, an endpoint that has sent\r\n          <x:ref>RST_STREAM</x:ref> on the associated stream MUST handle PUSH_PROMISE frames that\r\n          might have been created before the <x:ref>RST_STREAM</x:ref> frame is received and\r\n          processed.\r\n        </t>\r\n        <t>\r\n          A receiver MUST treat the receipt of a PUSH_PROMISE that promises an <xref\r\n          target=\"StreamIdentifiers\">illegal stream identifier</xref> (that is, an identifier for a\r\n          stream that is not currently in the \"idle\" state) as a <xref\r\n          target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>.\r\n        </t>\r\n\r\n        <t>\r\n          The PUSH_PROMISE frame includes optional padding.  Padding fields and flags are identical\r\n          to those defined for <xref target=\"DATA\">DATA frames</xref>.\r\n        </t>\r\n      </section>\r\n\r\n      <section anchor=\"PING\" title=\"PING\">\r\n        <t>\r\n          The PING frame (type=0x6) is a mechanism for measuring a minimal round trip time from the\r\n          sender, as well as determining whether an idle connection is still functional.  PING\r\n          frames can be sent from any endpoint.\r\n        </t>\r\n        <figure title=\"PING Payload Format\">\r\n          <artwork type=\"inline\"><![CDATA[\r\n  0                   1                   2                   3\r\n  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\r\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\r\n |                                                               |\r\n |                      Opaque Data (64)                         |\r\n |                                                               |\r\n +---------------------------------------------------------------+\r\n]]></artwork>\r\n        </figure>\r\n\r\n        <t>\r\n          In addition to the frame header, PING frames MUST contain 8 octets of data in the payload.\r\n          A sender can include any value it chooses and use those bytes in any fashion.\r\n        </t>\r\n        <t>\r\n          Receivers of a PING frame that does not include an ACK flag MUST send a PING frame with\r\n          the ACK flag set in response, with an identical payload.  PING responses SHOULD be given\r\n          higher priority than any other frame.\r\n        </t>\r\n\r\n        <t>\r\n          The PING frame defines the following flags:\r\n          <list style=\"hanging\">\r\n            <t hangText=\"ACK (0x1):\">\r\n              Bit 1 being set indicates that this PING frame is a PING response.  An endpoint MUST\r\n              set this flag in PING responses.  An endpoint MUST NOT respond to PING frames\r\n              containing this flag.\r\n            </t>\r\n          </list>\r\n        </t>\r\n        <t>\r\n          PING frames are not associated with any individual stream. If a PING frame is received\r\n          with a stream identifier field value other than 0x0, the recipient MUST respond with a\r\n          <xref target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>.\r\n        </t>\r\n        <t>\r\n          Receipt of a PING frame with a length field value other than 8 MUST be treated as a <xref\r\n          target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>FRAME_SIZE_ERROR</x:ref>.\r\n        </t>\r\n\r\n      </section>\r\n\r\n      <section anchor=\"GOAWAY\" title=\"GOAWAY\">\r\n        <t>\r\n          The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this\r\n          connection.  GOAWAY can be sent by either the client or the server.  Once sent, the sender\r\n          will ignore frames sent on any new streams with identifiers higher than the included last\r\n          stream identifier.  Receivers of a GOAWAY frame MUST NOT open additional streams on the\r\n          connection, although a new connection can be established for new streams.\r\n        </t>\r\n        <t>\r\n          The purpose of this frame is to allow an endpoint to gracefully stop accepting new\r\n          streams, while still finishing processing of previously established streams.  This enables\r\n          administrative actions, like server maintainance.\r\n        </t>\r\n        <t>\r\n          There is an inherent race condition between an endpoint starting new streams and the\r\n          remote sending a GOAWAY frame.  To deal with this case, the GOAWAY contains the stream\r\n          identifier of the last peer-initiated stream which was or might be processed on the\r\n          sending endpoint in this connection.  For instance, if the server sends a GOAWAY frame,\r\n          the identified stream is the highest numbered stream initiated by the client.\r\n        </t>\r\n        <t>\r\n          If the receiver of the GOAWAY has sent data on streams with a higher stream identifier\r\n          than what is indicated in the GOAWAY frame, those streams are not or will not be\r\n          processed.  The receiver of the GOAWAY frame can treat the streams as though they had\r\n          never been created at all, thereby allowing those streams to be retried later on a new\r\n          connection.\r\n        </t>\r\n        <t>\r\n          Endpoints SHOULD always send a GOAWAY frame before closing a connection so that the remote\r\n          can know whether a stream has been partially processed or not.  For example, if an HTTP\r\n          client sends a POST at the same time that a server closes a connection, the client cannot\r\n          know if the server started to process that POST request if the server does not send a\r\n          GOAWAY frame to indicate what streams it might have acted on.\r\n        </t>\r\n        <t>\r\n          An endpoint might choose to close a connection without sending GOAWAY for misbehaving\r\n          peers.\r\n        </t>\r\n\r\n        <figure title=\"GOAWAY Payload Format\">\r\n          <artwork type=\"inline\"><![CDATA[\r\n  0                   1                   2                   3\r\n  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\r\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\r\n |R|                  Last-Stream-ID (31)                        |\r\n +-+-------------------------------------------------------------+\r\n |                      Error Code (32)                          |\r\n +---------------------------------------------------------------+\r\n |                  Additional Debug Data (*)                    |\r\n +---------------------------------------------------------------+\r\n]]></artwork>\r\n        </figure>\r\n        <t>\r\n          The GOAWAY frame does not define any flags.\r\n        </t>\r\n        <t>\r\n          The GOAWAY frame applies to the connection, not a specific stream.  An endpoint MUST treat\r\n          a <x:ref>GOAWAY</x:ref> frame with a stream identifier other than 0x0 as a <xref\r\n          target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>.\r\n        </t>\r\n        <t>\r\n          The last stream identifier in the GOAWAY frame contains the highest numbered stream\r\n          identifier for which the sender of the GOAWAY frame might have taken some action on, or\r\n          might yet take action on.  All streams up to and including the identified stream might\r\n          have been processed in some way.  The last stream identifier can be set to 0 if no streams\r\n          were processed.\r\n          <list style=\"hanging\">\r\n            <t hangText=\"Note:\">\r\n              In this context, \"processed\" means that some data from the stream was passed to some\r\n              higher layer of software that might have taken some action as a result.\r\n            </t>\r\n          </list>\r\n          If a connection terminates without a GOAWAY frame, the last stream identifier is\r\n          effectively the highest possible stream identifier.\r\n        </t>\r\n        <t>\r\n          On streams with lower or equal numbered identifiers that were not closed completely prior\r\n          to the connection being closed, re-attempting requests, transactions, or any protocol\r\n          activity is not possible, with the exception of idempotent actions like HTTP GET, PUT, or\r\n          DELETE.  Any protocol activity that uses higher numbered streams can be safely retried\r\n          using a new connection.\r\n        </t>\r\n        <t>\r\n          Activity on streams numbered lower or equal to the last stream identifier might still\r\n          complete successfully.  The sender of a GOAWAY frame might gracefully shut down a\r\n          connection by sending a GOAWAY frame, maintaining the connection in an open state until\r\n          all in-progress streams complete.\r\n        </t>\r\n        <t>\r\n          An endpoint MAY send multiple GOAWAY frames if circumstances change.  For instance, an\r\n          endpoint that sends GOAWAY with <x:ref>NO_ERROR</x:ref> during graceful shutdown could\r\n          subsequently encounter an condition that requires immediate termination of the connection.\r\n          The last stream identifier from the last GOAWAY frame received indicates which streams\r\n          could have been acted upon.  Endpoints MUST NOT increase the value they send in the last\r\n          stream identifier, since the peers might already have retried unprocessed requests on\r\n          another connection.\r\n        </t>\r\n        <t>\r\n          A client that is unable to retry requests loses all requests that are in flight when the\r\n          server closes the connection.  This is especially true for intermediaries that might\r\n          not be serving clients using HTTP/2.  A server that is attempting to gracefully shut down\r\n          a connection SHOULD send an initial GOAWAY frame with the last stream identifier set to\r\n          2<x:sup>31</x:sup>-1 and a <x:ref>NO_ERROR</x:ref> code.  This signals to the client that\r\n          a shutdown is imminent and that no further requests can be initiated.  After waiting at\r\n          least one round trip time, the server can send another GOAWAY frame with an updated last\r\n          stream identifier.  This ensures that a connection can be cleanly shut down without losing\r\n          requests.\r\n        </t>\r\n\r\n        <t>\r\n          After sending a GOAWAY frame, the sender can discard frames for streams with identifiers\r\n          higher than the identified last stream.  However, any frames that alter connection state\r\n          cannot be completely ignored.  For instance, <x:ref>HEADERS</x:ref>,\r\n          <x:ref>PUSH_PROMISE</x:ref> and <x:ref>CONTINUATION</x:ref> frames MUST be minimally\r\n          processed to ensure the state maintained for header compression is consistent (see <xref\r\n          target=\"HeaderBlock\"/>); similarly DATA frames MUST be counted toward the connection flow\r\n          control window.  Failure to process these frames can cause flow control or header\r\n          compression state to become unsynchronized.\r\n        </t>\r\n\r\n        <t>\r\n          The GOAWAY frame also contains a 32-bit <xref target=\"ErrorCodes\">error code</xref> that\r\n          contains the reason for closing the connection.\r\n        </t>\r\n        <t>\r\n          Endpoints MAY append opaque data to the payload of any GOAWAY frame.  Additional debug\r\n          data is intended for diagnostic purposes only and carries no semantic value.  Debug\r\n          information could contain security- or privacy-sensitive data.  Logged or otherwise\r\n          persistently stored debug data MUST have adequate safeguards to prevent unauthorized\r\n          access.\r\n        </t>\r\n      </section>\r\n\r\n      <section anchor=\"WINDOW_UPDATE\" title=\"WINDOW_UPDATE\">\r\n        <t>\r\n          The WINDOW_UPDATE frame (type=0x8) is used to implement flow control; see <xref\r\n          target=\"FlowControl\"/> for an overview.\r\n        </t>\r\n        <t>\r\n          Flow control operates at two levels: on each individual stream and on the entire\r\n          connection.\r\n        </t>\r\n        <t>\r\n          Both types of flow control are hop-by-hop; that is, only between the two endpoints.\r\n          Intermediaries do not forward WINDOW_UPDATE frames between dependent connections.\r\n          However, throttling of data transfer by any receiver can indirectly cause the propagation\r\n          of flow control information toward the original sender.\r\n        </t>\r\n        <t>\r\n          Flow control only applies to frames that are identified as being subject to flow control.\r\n          Of the frame types defined in this document, this includes only <x:ref>DATA</x:ref> frames.\r\n          Frames that are exempt from flow control MUST be accepted and processed, unless the\r\n          receiver is unable to assign resources to handling the frame.  A receiver MAY respond with\r\n          a <xref target=\"StreamErrorHandler\">stream error</xref> or <xref\r\n          target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>FLOW_CONTROL_ERROR</x:ref> if it is unable to accept a frame.\r\n        </t>\r\n        <figure title=\"WINDOW_UPDATE Payload Format\">\r\n          <artwork type=\"inline\"><![CDATA[\r\n  0                   1                   2                   3\r\n  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\r\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\r\n |R|              Window Size Increment (31)                     |\r\n +-+-------------------------------------------------------------+\r\n]]></artwork>\r\n        </figure>\r\n        <t>\r\n          The payload of a WINDOW_UPDATE frame is one reserved bit, plus an unsigned 31-bit integer\r\n          indicating the number of octets that the sender can transmit in addition to the existing\r\n          flow control window.  The legal range for the increment to the flow control window is 1 to\r\n          2<x:sup>31</x:sup>-1 (0x7fffffff) octets.\r\n        </t>\r\n        <t>\r\n          The WINDOW_UPDATE frame does not define any flags.\r\n        </t>\r\n        <t>\r\n          The WINDOW_UPDATE frame can be specific to a stream or to the entire connection.  In the\r\n          former case, the frame's stream identifier indicates the affected stream; in the latter,\r\n          the value \"0\" indicates that the entire connection is the subject of the frame.\r\n        </t>\r\n        <t>\r\n          A receiver MUST treat the receipt of a WINDOW_UPDATE frame with an flow control window\r\n          increment of 0 as a <xref target=\"StreamErrorHandler\">stream error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>; errors on the connection flow control window MUST be\r\n          treated as a <xref target=\"ConnectionErrorHandler\">connection error</xref>.\r\n        </t>\r\n        <t>\r\n          WINDOW_UPDATE can be sent by a peer that has sent a frame bearing the END_STREAM flag.\r\n          This means that a receiver could receive a WINDOW_UPDATE frame on a \"half closed (remote)\"\r\n          or \"closed\" stream.  A receiver MUST NOT treat this as an error, see <xref\r\n          target=\"StreamStates\"/>.\r\n        </t>\r\n        <t>\r\n          A receiver that receives a flow controlled frame MUST always account for its contribution\r\n          against the connection flow control window, unless the receiver treats this as a <xref\r\n          target=\"ConnectionErrorHandler\">connection error</xref>.  This is necessary even if the\r\n          frame is in error.  Since the sender counts the frame toward the flow control window, if\r\n          the receiver does not, the flow control window at sender and receiver can become\r\n          different.\r\n        </t>\r\n\r\n        <section title=\"The Flow Control Window\">\r\n          <t>\r\n            Flow control in HTTP/2 is implemented using a window kept by each sender on every\r\n            stream. The flow control window is a simple integer value that indicates how many octets\r\n            of data the sender is permitted to transmit; as such, its size is a measure of the\r\n            buffering capacity of the receiver.\r\n          </t>\r\n          <t>\r\n            Two flow control windows are applicable: the stream flow control window and the\r\n            connection flow control window.  The sender MUST NOT send a flow controlled frame with a\r\n            length that exceeds the space available in either of the flow control windows advertised\r\n            by the receiver.  Frames with zero length with the END_STREAM flag set (that is, an\r\n            empty <x:ref>DATA</x:ref> frame) MAY be sent if there is no available space in either\r\n            flow control window.\r\n          </t>\r\n          <t>\r\n            For flow control calculations, the 9 octet frame header is not counted.\r\n          </t>\r\n          <t>\r\n            After sending a flow controlled frame, the sender reduces the space available in both\r\n            windows by the length of the transmitted frame.\r\n          </t>\r\n          <t>\r\n            The receiver of a frame sends a WINDOW_UPDATE frame as it consumes data and frees up\r\n            space in flow control windows.  Separate WINDOW_UPDATE frames are sent for the stream\r\n            and connection level flow control windows.\r\n          </t>\r\n          <t>\r\n            A sender that receives a WINDOW_UPDATE frame updates the corresponding window by the\r\n            amount specified in the frame.\r\n          </t>\r\n          <t>\r\n            A sender MUST NOT allow a flow control window to exceed 2<x:sup>31</x:sup>-1 octets.\r\n            If a sender receives a WINDOW_UPDATE that causes a flow control window to exceed this\r\n            maximum it MUST terminate either the stream or the connection, as appropriate.  For\r\n            streams, the sender sends a <x:ref>RST_STREAM</x:ref> with the error code of\r\n            <x:ref>FLOW_CONTROL_ERROR</x:ref> code; for the connection, a <x:ref>GOAWAY</x:ref>\r\n            frame with a <x:ref>FLOW_CONTROL_ERROR</x:ref> code.\r\n          </t>\r\n          <t>\r\n            Flow controlled frames from the sender and WINDOW_UPDATE frames from the receiver are\r\n            completely asynchronous with respect to each other. This property allows a receiver to\r\n            aggressively update the window size kept by the sender to prevent streams from stalling.\r\n          </t>\r\n        </section>\r\n\r\n        <section anchor=\"InitialWindowSize\" title=\"Initial Flow Control Window Size\">\r\n          <t>\r\n            When an HTTP/2 connection is first established, new streams are created with an initial\r\n            flow control window size of 65,535 octets. The connection flow control window is 65,535\r\n            octets. Both endpoints can adjust the initial window size for new streams by including\r\n            a value for <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref> in the <x:ref>SETTINGS</x:ref>\r\n            frame that forms part of the connection preface. The connection flow control window can\r\n            only be changed using WINDOW_UPDATE frames.\r\n          </t>\r\n          <t>\r\n            Prior to receiving a <x:ref>SETTINGS</x:ref> frame that sets a value for\r\n            <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref>, an endpoint can only use the default\r\n            initial window size when sending flow controlled frames.  Similarly, the connection flow\r\n            control window is set to the default initial window size until a WINDOW_UPDATE frame is\r\n            received.\r\n          </t>\r\n          <t>\r\n            A <x:ref>SETTINGS</x:ref> frame can alter the initial flow control window size for all\r\n            current streams. When the value of <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref> changes,\r\n            a receiver MUST adjust the size of all stream flow control windows that it maintains by\r\n            the difference between the new value and the old value.\r\n          </t>\r\n          <t>\r\n            A change to <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref> can cause the available space in\r\n            a flow control window to become negative.  A sender MUST track the negative flow control\r\n            window, and MUST NOT send new flow controlled frames until it receives WINDOW_UPDATE\r\n            frames that cause the flow control window to become positive.\r\n          </t>\r\n          <t>\r\n            For example, if the client sends 60KB immediately on connection establishment, and the\r\n            server sets the initial window size to be 16KB, the client will recalculate the\r\n            available flow control window to be -44KB on receipt of the <x:ref>SETTINGS</x:ref>\r\n            frame.  The client retains a negative flow control window until WINDOW_UPDATE frames\r\n            restore the window to being positive, after which the client can resume sending.\r\n          </t>\r\n          <t>\r\n            A <x:ref>SETTINGS</x:ref> frame cannot alter the connection flow control window.\r\n          </t>\r\n          <t>\r\n            An endpoint MUST treat a change to <x:ref>SETTINGS_INITIAL_WINDOW_SIZE</x:ref> that\r\n            causes any flow control window to exceed the maximum size as a <xref\r\n            target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n            <x:ref>FLOW_CONTROL_ERROR</x:ref>.\r\n          </t>\r\n        </section>\r\n\r\n        <section title=\"Reducing the Stream Window Size\">\r\n          <t>\r\n            A receiver that wishes to use a smaller flow control window than the current size can\r\n            send a new <x:ref>SETTINGS</x:ref> frame.  However, the receiver MUST be prepared to\r\n            receive data that exceeds this window size, since the sender might send data that\r\n            exceeds the lower limit prior to processing the <x:ref>SETTINGS</x:ref> frame.\r\n          </t>\r\n          <t>\r\n            After sending a SETTINGS frame that reduces the initial flow control window size, a\r\n            receiver has two options for handling streams that exceed flow control limits:\r\n            <list style=\"numbers\">\r\n              <t>\r\n                The receiver can immediately send <x:ref>RST_STREAM</x:ref> with\r\n                <x:ref>FLOW_CONTROL_ERROR</x:ref> error code for the affected streams.\r\n              </t>\r\n              <t>\r\n                The receiver can accept the streams and tolerate the resulting head of line\r\n                blocking, sending WINDOW_UPDATE frames as it consumes data.\r\n              </t>\r\n            </list>\r\n          </t>\r\n        </section>\r\n      </section>\r\n\r\n      <section anchor=\"CONTINUATION\" title=\"CONTINUATION\">\r\n        <t>\r\n          The CONTINUATION frame (type=0x9) is used to continue a sequence of <xref\r\n          target=\"HeaderBlock\">header block fragments</xref>.  Any number of CONTINUATION frames can\r\n          be sent on an existing stream, as long as the preceding frame is on the same stream and is\r\n          a <x:ref>HEADERS</x:ref>, <x:ref>PUSH_PROMISE</x:ref> or CONTINUATION frame without the\r\n          END_HEADERS flag set.\r\n        </t>\r\n\r\n        <figure title=\"CONTINUATION Frame Payload\">\r\n          <artwork type=\"inline\"><![CDATA[\r\n  0                   1                   2                   3\r\n  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\r\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\r\n |                   Header Block Fragment (*)                 ...\r\n +---------------------------------------------------------------+\r\n]]></artwork>\r\n        </figure>\r\n        <t>\r\n          The CONTINUATION frame payload contains a <xref target=\"HeaderBlock\">header block\r\n          fragment</xref>.\r\n        </t>\r\n\r\n        <t>\r\n          The CONTINUATION frame defines the following flag:\r\n          <list style=\"hanging\">\r\n            <x:lt hangText=\"END_HEADERS (0x4):\">\r\n              <t>\r\n                Bit 3 being set indicates that this frame ends a <xref target=\"HeaderBlock\">header\r\n                block</xref>.\r\n              </t>\r\n              <t>\r\n                If the END_HEADERS bit is not set, this frame MUST be followed by another\r\n                CONTINUATION frame.  A receiver MUST treat the receipt of any other type of frame or\r\n                a frame on a different stream as a <xref target=\"ConnectionErrorHandler\">connection\r\n                error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.\r\n              </t>\r\n            </x:lt>\r\n          </list>\r\n        </t>\r\n\r\n        <t>\r\n          The CONTINUATION frame changes the connection state as defined in <xref\r\n          target=\"HeaderBlock\" />.\r\n        </t>\r\n\r\n        <t>\r\n          CONTINUATION frames MUST be associated with a stream. If a CONTINUATION frame is received\r\n          whose stream identifier field is 0x0, the recipient MUST respond with a <xref\r\n          target=\"ConnectionErrorHandler\">connection error</xref> of type PROTOCOL_ERROR.\r\n        </t>\r\n\r\n        <t>\r\n          A CONTINUATION frame MUST be preceded by a <x:ref>HEADERS</x:ref>,\r\n          <x:ref>PUSH_PROMISE</x:ref> or CONTINUATION frame without the END_HEADERS flag set.  A\r\n          recipient that observes violation of this rule MUST respond with a <xref\r\n          target=\"ConnectionErrorHandler\"> connection error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>.\r\n        </t>\r\n      </section>\r\n    </section>\r\n\r\n    <section anchor=\"ErrorCodes\" title=\"Error Codes\">\r\n      <t>\r\n        Error codes are 32-bit fields that are used in <x:ref>RST_STREAM</x:ref> and\r\n        <x:ref>GOAWAY</x:ref> frames to convey the reasons for the stream or connection error.\r\n      </t>\r\n\r\n      <t>\r\n        Error codes share a common code space.  Some error codes apply only to either streams or the\r\n        entire connection and have no defined semantics in the other context.\r\n      </t>\r\n\r\n      <t>\r\n        The following error codes are defined:\r\n        <list style=\"hanging\">\r\n          <t hangText=\"NO_ERROR (0x0):\" anchor=\"NO_ERROR\">\r\n            The associated condition is not as a result of an error.  For example, a\r\n            <x:ref>GOAWAY</x:ref> might include this code to indicate graceful shutdown of a\r\n            connection.\r\n          </t>\r\n          <t hangText=\"PROTOCOL_ERROR (0x1):\" anchor=\"PROTOCOL_ERROR\">\r\n            The endpoint detected an unspecific protocol error.  This error is for use when a more\r\n            specific error code is not available.\r\n          </t>\r\n          <t hangText=\"INTERNAL_ERROR (0x2):\" anchor=\"INTERNAL_ERROR\">\r\n            The endpoint encountered an unexpected internal error.\r\n          </t>\r\n          <t hangText=\"FLOW_CONTROL_ERROR (0x3):\" anchor=\"FLOW_CONTROL_ERROR\">\r\n            The endpoint detected that its peer violated the flow control protocol.\r\n          </t>\r\n          <t hangText=\"SETTINGS_TIMEOUT (0x4):\" anchor=\"SETTINGS_TIMEOUT\">\r\n            The endpoint sent a <x:ref>SETTINGS</x:ref> frame, but did not receive a response in a\r\n            timely manner.  See <xref target=\"SettingsSync\">Settings Synchronization</xref>.\r\n          </t>\r\n          <t hangText=\"STREAM_CLOSED (0x5):\" anchor=\"STREAM_CLOSED\">\r\n            The endpoint received a frame after a stream was half closed.\r\n          </t>\r\n          <t hangText=\"FRAME_SIZE_ERROR (0x6):\" anchor=\"FRAME_SIZE_ERROR\">\r\n            The endpoint received a frame with an invalid size.\r\n          </t>\r\n          <t hangText=\"REFUSED_STREAM (0x7):\" anchor=\"REFUSED_STREAM\">\r\n            The endpoint refuses the stream prior to performing any application processing, see\r\n            <xref target=\"Reliability\"/> for details.\r\n          </t>\r\n          <t hangText=\"CANCEL (0x8):\" anchor=\"CANCEL\">\r\n            Used by the endpoint to indicate that the stream is no longer needed.\r\n          </t>\r\n          <t hangText=\"COMPRESSION_ERROR (0x9):\" anchor=\"COMPRESSION_ERROR\">\r\n            The endpoint is unable to maintain the header compression context for the connection.\r\n          </t>\r\n          <t hangText=\"CONNECT_ERROR (0xa):\" anchor=\"CONNECT_ERROR\">\r\n            The connection established in response to a <xref target=\"CONNECT\">CONNECT\r\n            request</xref> was reset or abnormally closed.\r\n          </t>\r\n          <t hangText=\"ENHANCE_YOUR_CALM (0xb):\" anchor=\"ENHANCE_YOUR_CALM\">\r\n            The endpoint detected that its peer is exhibiting a behavior that might be generating\r\n            excessive load.\r\n          </t>\r\n          <t hangText=\"INADEQUATE_SECURITY (0xc):\" anchor=\"INADEQUATE_SECURITY\">\r\n            The underlying transport has properties that do not meet minimum security\r\n            requirements (see <xref target=\"TLSUsage\"/>).\r\n          </t>\r\n        </list>\r\n      </t>\r\n      <t>\r\n        Unknown or unsupported error codes MUST NOT trigger any special behavior.  These MAY be\r\n        treated by an implementation as being equivalent to <x:ref>INTERNAL_ERROR</x:ref>.\r\n      </t>\r\n    </section>\r\n\r\n    <section anchor=\"HTTPLayer\" title=\"HTTP Message Exchanges\">\r\n      <t>\r\n        HTTP/2 is intended to be as compatible as possible with current uses of HTTP. This means\r\n        that, from the application perspective, the features of the protocol are largely\r\n        unchanged. To achieve this, all request and response semantics are preserved, although the\r\n        syntax of conveying those semantics has changed.\r\n      </t>\r\n      <t>\r\n        Thus, the specification and requirements of HTTP/1.1 Semantics and Content <xref\r\n        target=\"RFC7231\"/>, Conditional Requests <xref target=\"RFC7232\"/>, Range Requests <xref\r\n        target=\"RFC7233\"/>, Caching <xref target=\"RFC7234\"/> and Authentication <xref\r\n        target=\"RFC7235\"/> are applicable to HTTP/2. Selected portions of HTTP/1.1 Message Syntax\r\n        and Routing <xref target=\"RFC7230\"/>, such as the HTTP and HTTPS URI schemes, are also\r\n        applicable in HTTP/2, but the expression of those semantics for this protocol are defined\r\n        in the sections below.\r\n      </t>\r\n\r\n      <section anchor=\"HttpSequence\" title=\"HTTP Request/Response Exchange\">\r\n        <t>\r\n          A client sends an HTTP request on a new stream, using a previously unused <xref\r\n          target=\"StreamIdentifiers\">stream identifier</xref>.  A server sends an HTTP response on\r\n          the same stream as the request.\r\n        </t>\r\n        <t>\r\n          An HTTP message (request or response) consists of:\r\n          <list style=\"numbers\">\r\n            <t>\r\n              for a response only, zero or more <x:ref>HEADERS</x:ref> frames (each followed by zero\r\n              or more <x:ref>CONTINUATION</x:ref> frames) containing the message headers of\r\n              informational (1xx) HTTP responses (see <xref target=\"RFC7230\" x:fmt=\",\"\r\n              x:rel=\"#header.fields\"/> and <xref target=\"RFC7231\" x:fmt=\",\" x:rel=\"#status.1xx\"/>),\r\n              and\r\n            </t>\r\n            <t>\r\n              one <x:ref>HEADERS</x:ref> frame (followed by zero or more <x:ref>CONTINUATION</x:ref>\r\n              frames) containing the message headers (see <xref target=\"RFC7230\" x:fmt=\",\"\r\n              x:rel=\"#header.fields\"/>), and\r\n            </t>\r\n            <t>\r\n              zero or more <x:ref>DATA</x:ref> frames containing the message payload (see <xref\r\n              target=\"RFC7230\" x:fmt=\",\" x:rel=\"#message.body\"/>), and\r\n            </t>\r\n            <t>\r\n              optionally, one <x:ref>HEADERS</x:ref> frame, followed by zero or more\r\n              <x:ref>CONTINUATION</x:ref> frames containing the trailer-part, if present (see <xref\r\n              target=\"RFC7230\" x:fmt=\",\" x:rel=\"#chunked.trailer.part\"/>).\r\n            </t>\r\n          </list>\r\n          The last frame in the sequence bears an END_STREAM flag, noting that a\r\n          <x:ref>HEADERS</x:ref> frame bearing the END_STREAM flag can be followed by\r\n          <x:ref>CONTINUATION</x:ref> frames that carry any remaining portions of the header block.\r\n        </t>\r\n        <t>\r\n          Other frames (from any stream) MUST NOT occur between either <x:ref>HEADERS</x:ref> frame\r\n          and any <x:ref>CONTINUATION</x:ref> frames that might follow.\r\n        </t>\r\n\r\n        <t>\r\n          Trailing header fields are carried in a header block that also terminates the stream.\r\n          That is, a sequence starting with a <x:ref>HEADERS</x:ref> frame, followed by zero or more\r\n          <x:ref>CONTINUATION</x:ref> frames, where the <x:ref>HEADERS</x:ref> frame bears an\r\n          END_STREAM flag.  Header blocks after the first that do not terminate the stream are not\r\n          part of an HTTP request or response.\r\n        </t>\r\n        <t>\r\n          A <x:ref>HEADERS</x:ref> frame (and associated <x:ref>CONTINUATION</x:ref> frames) can\r\n          only appear at the start or end of a stream.  An endpoint that receives a\r\n          <x:ref>HEADERS</x:ref> frame without the END_STREAM flag set after receiving a final\r\n          (non-informational) status code MUST treat the corresponding request or response as <xref\r\n          target=\"malformed\">malformed</xref>.\r\n        </t>\r\n\r\n        <t>\r\n          An HTTP request/response exchange fully consumes a single stream.  A request starts with\r\n          the <x:ref>HEADERS</x:ref> frame that puts the stream into an \"open\" state. The request\r\n          ends with a frame bearing END_STREAM, which causes the stream to become \"half closed\r\n          (local)\" for the client and \"half closed (remote)\" for the server.  A response starts with\r\n          a <x:ref>HEADERS</x:ref> frame and ends with a frame bearing END_STREAM, which places the\r\n          stream in the \"closed\" state.\r\n          <!-- Yes, the response might be completed before the request does, but that's not a detail\r\n               we need to expand upon.  It's complicated enough explaining this as it is.  -->\r\n        </t>\r\n\r\n        <section anchor=\"informational-responses\" title=\"Upgrading From HTTP/2\">\r\n          <t>\r\n            HTTP/2 removes support for the 101 (Switching Protocols) informational status code\r\n            (<xref target=\"RFC7231\" x:fmt=\",\" x:rel=\"#status.101\"/>).\r\n          </t>\r\n          <t>\r\n            The semantics of 101 (Switching Protocols) aren't applicable to a multiplexed protocol.\r\n            Alternative protocols are able to use the same mechanisms that HTTP/2 uses to negotiate\r\n            their use (see <xref target=\"starting\"/>).\r\n          </t>\r\n        </section>\r\n\r\n        <section anchor=\"HttpHeaders\" title=\"HTTP Header Fields\">\r\n          <t>\r\n            HTTP header fields carry information as a series of key-value pairs. For a listing of\r\n            registered HTTP headers, see the Message Header Field Registry maintained at <eref\r\n            target=\"https://www.iana.org/assignments/message-headers\"/>.\r\n          </t>\r\n\r\n          <section anchor=\"PseudoHeaderFields\" title=\"Pseudo-Header Fields\">\r\n            <t>\r\n              While HTTP/1.x used the message start-line (see <xref target=\"RFC7230\" x:fmt=\",\"\r\n              x:rel=\"#start.line\"/>) to convey the target URI and method of the request, and the\r\n              status code for the response, HTTP/2 uses special pseudo-header fields beginning with\r\n              ':' character (ASCII 0x3a) for this purpose.\r\n            </t>\r\n            <t>\r\n              Pseudo-header fields are not HTTP header fields. Endpoints MUST NOT generate\r\n              pseudo-header fields other than those defined in this document.\r\n            </t>\r\n            <t>\r\n              Pseudo-header fields are only valid in the context in which they are defined.\r\n              Pseudo-header fields defined for requests MUST NOT appear in responses; pseudo-header\r\n              fields defined for responses MUST NOT appear in requests.  Pseudo-header fields MUST\r\n              NOT appear in trailers.  Endpoints MUST treat a request or response that contains\r\n              undefined or invalid pseudo-header fields as <xref\r\n              target=\"malformed\">malformed</xref>.\r\n            </t>\r\n            <t>\r\n              Just as in HTTP/1.x, header field names are strings of ASCII characters that are\r\n              compared in a case-insensitive fashion. However, header field names MUST be converted\r\n              to lowercase prior to their encoding in HTTP/2. A request or response containing\r\n              uppercase header field names MUST be treated as <xref\r\n              target=\"malformed\">malformed</xref>.\r\n            </t>\r\n            <t>\r\n              All pseudo-header fields MUST appear in the header block before regular header fields.\r\n              Any request or response that contains a pseudo-header field that appears in a header\r\n              block after a regular header field MUST be treated as <xref\r\n              target=\"malformed\">malformed</xref>.\r\n            </t>\r\n          </section>\r\n\r\n          <section title=\"Connection-Specific Header Fields\">\r\n            <t>\r\n              HTTP/2 does not use the <spanx style=\"verb\">Connection</spanx> header field to\r\n              indicate connection-specific header fields; in this protocol, connection-specific\r\n              metadata is conveyed by other means.  An endpoint MUST NOT generate a HTTP/2 message\r\n              containing connection-specific header fields; any message containing\r\n              connection-specific header fields MUST be treated as <xref\r\n              target=\"malformed\">malformed</xref>.\r\n            </t>\r\n            <t>\r\n              This means that an intermediary transforming an HTTP/1.x message to HTTP/2 will need\r\n              to remove any header fields nominated by the Connection header field, along with the\r\n              Connection header field itself. Such intermediaries SHOULD also remove other\r\n              connection-specific header fields, such as Keep-Alive, Proxy-Connection,\r\n              Transfer-Encoding and Upgrade, even if they are not nominated by Connection.\r\n            </t>\r\n            <t>\r\n              One exception to this is the TE header field, which MAY be present in an HTTP/2\r\n              request, but when it is MUST NOT contain any value other than \"trailers\".\r\n            </t>\r\n            <t>\r\n              <list style=\"hanging\">\r\n                <t hangText=\"Note:\">\r\n                  HTTP/2 purposefully does not support upgrade to another protocol.  The handshake\r\n                  methods described in <xref target=\"starting\"/> are believed sufficient to\r\n                  negotiate the use of alternative protocols.\r\n                </t>\r\n              </list>\r\n            </t>\r\n          </section>\r\n\r\n          <section anchor=\"HttpRequest\" title=\"Request Pseudo-Header Fields\">\r\n            <t>\r\n              The following pseudo-header fields are defined for HTTP/2 requests:\r\n              <list style=\"symbols\">\r\n                <x:lt>\r\n                  <t>\r\n                    The <spanx style=\"verb\">:method</spanx> pseudo-header field includes the HTTP\r\n                    method (<xref target=\"RFC7231\" x:fmt=\",\" x:rel=\"#methods\"/>).\r\n                  </t>\r\n                </x:lt>\r\n                <x:lt>\r\n                  <t>\r\n                    The <spanx style=\"verb\">:scheme</spanx> pseudo-header field includes the scheme\r\n                    portion of the target URI (<xref target=\"RFC3986\" x:fmt=\",\" x:sec=\"3.1\"/>).\r\n                  </t>\r\n                  <t>\r\n                    <spanx style=\"verb\">:scheme</spanx> is not restricted to <spanx\r\n                    style=\"verb\">http</spanx> and <spanx style=\"verb\">https</spanx> schemed URIs.  A\r\n                    proxy or gateway can translate requests for non-HTTP schemes, enabling the use\r\n                    of HTTP to interact with non-HTTP services.\r\n                  </t>\r\n                </x:lt>\r\n                <x:lt>\r\n                  <t>\r\n                    The <spanx style=\"verb\">:authority</spanx> pseudo-header field includes the\r\n                    authority portion of the target URI (<xref target=\"RFC3986\" x:fmt=\",\"\r\n                    x:sec=\"3.2\"/>). The authority MUST NOT include the deprecated <spanx\r\n                    style=\"verb\">userinfo</spanx> subcomponent for <spanx style=\"verb\">http</spanx>\r\n                    or <spanx style=\"verb\">https</spanx> schemed URIs.\r\n                  </t>\r\n                  <t>\r\n                    To ensure that the HTTP/1.1 request line can be reproduced accurately, this\r\n                    pseudo-header field MUST be omitted when translating from an HTTP/1.1 request\r\n                    that has a request target in origin or asterisk form (see <xref\r\n                    target=\"RFC7230\" x:fmt=\",\" x:rel=\"#request-target\"/>). Clients that generate\r\n                    HTTP/2 requests directly SHOULD use the <spanx>:authority</spanx> pseudo-header\r\n                    field instead of the <spanx style=\"verb\">Host</spanx> header field. An\r\n                    intermediary that converts an HTTP/2 request to HTTP/1.1 MUST create a <spanx\r\n                    style=\"verb\">Host</spanx> header field if one is not present in a request by\r\n                    copying the value of the <spanx style=\"verb\">:authority</spanx> pseudo-header\r\n                    field.\r\n                  </t>\r\n                </x:lt>\r\n                <x:lt>\r\n                  <t>\r\n                    The <spanx style=\"verb\">:path</spanx> pseudo-header field includes the path and\r\n                    query parts of the target URI (the <spanx style=\"verb\">path-absolute</spanx>\r\n                    production from <xref target=\"RFC3986\"/> and optionally a '?' character\r\n                    followed by the <spanx style=\"verb\">query</spanx> production, see <xref\r\n                    target=\"RFC3986\" x:fmt=\",\" x:sec=\"3.3\"/> and <xref target=\"RFC3986\" x:fmt=\",\"\r\n                    x:sec=\"3.4\"/>). A request in asterisk form includes the value '*' for the\r\n                    <spanx style=\"verb\">:path</spanx> pseudo-header field.\r\n                  </t>\r\n                  <t>\r\n                    This pseudo-header field MUST NOT be empty for <spanx style=\"verb\">http</spanx>\r\n                    or <spanx style=\"verb\">https</spanx> URIs; <spanx style=\"verb\">http</spanx> or\r\n                    <spanx style=\"verb\">https</spanx> URIs that do not contain a path component\r\n                    MUST include a value of '/'. The exception to this rule is an OPTIONS request\r\n                    for an <spanx style=\"verb\">http</spanx> or <spanx style=\"verb\">https</spanx>\r\n                    URI that does not include a path component; these MUST include a <spanx\r\n                    style=\"verb\">:path</spanx> pseudo-header field with a value of '*' (see <xref\r\n                    target=\"RFC7230\" x:fmt=\",\" x:rel=\"#asterisk-form\"/>).\r\n                  </t>\r\n                </x:lt>\r\n              </list>\r\n            </t>\r\n            <t>\r\n              All HTTP/2 requests MUST include exactly one valid value for the <spanx\r\n              style=\"verb\">:method</spanx>, <spanx style=\"verb\">:scheme</spanx>, and <spanx\r\n              style=\"verb\">:path</spanx> pseudo-header fields, unless it is a <xref\r\n              target=\"CONNECT\">CONNECT request</xref>. An HTTP request that omits mandatory\r\n              pseudo-header fields is <xref target=\"malformed\">malformed</xref>.\r\n            </t>\r\n            <t>\r\n              HTTP/2 does not define a way to carry the version identifier that is included in the\r\n              HTTP/1.1 request line.\r\n            </t>\r\n          </section>\r\n\r\n          <section anchor=\"HttpResponse\" title=\"Response Pseudo-Header Fields\">\r\n            <t>\r\n              For HTTP/2 responses, a single <spanx style=\"verb\">:status</spanx> pseudo-header\r\n              field is defined that carries the HTTP status code field (see <xref target=\"RFC7231\"\r\n              x:fmt=\",\" x:rel=\"#status.codes\"/>). This pseudo-header field MUST be included in all\r\n              responses, otherwise the response is <xref target=\"malformed\">malformed</xref>.\r\n            </t>\r\n            <t>\r\n              HTTP/2 does not define a way to carry the version or reason phrase that is included in\r\n              an HTTP/1.1 status line.\r\n            </t>\r\n          </section>\r\n\r\n         <section anchor=\"CompressCookie\" title=\"Compressing the Cookie Header Field\">\r\n            <t>\r\n              The <xref target=\"COOKIE\">Cookie header field</xref> can carry a significant amount of\r\n              redundant data.\r\n            </t>\r\n            <t>\r\n              The Cookie header field uses a semi-colon (\";\") to delimit cookie-pairs (or \"crumbs\").\r\n              This header field doesn't follow the list construction rules in HTTP (see <xref\r\n              target=\"RFC7230\" x:fmt=\",\" x:rel=\"#field.order\"/>), which prevents cookie-pairs from\r\n              being separated into different name-value pairs.  This can significantly reduce\r\n              compression efficiency as individual cookie-pairs are updated.\r\n            </t>\r\n            <t>\r\n              To allow for better compression efficiency, the Cookie header field MAY be split into\r\n              separate header fields, each with one or more cookie-pairs.  If there are multiple\r\n              Cookie header fields after decompression, these MUST be concatenated into a single\r\n              octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII string \"; \")\r\n              before being passed into a non-HTTP/2 context, such as an HTTP/1.1 connection, or a\r\n              generic HTTP server application.\r\n            </t>\r\n            <figure>\r\n              <preamble>\r\n                Therefore, the following two lists of Cookie header fields are semantically\r\n                equivalent.\r\n              </preamble>\r\n              <artwork type=\"inline\"><![CDATA[\r\n  cookie: a=b; c=d; e=f\r\n\r\n  cookie: a=b\r\n  cookie: c=d\r\n  cookie: e=f\r\n]]></artwork>\r\n            </figure>\r\n          </section>\r\n\r\n          <section anchor=\"malformed\" title=\"Malformed Requests and Responses\">\r\n            <t>\r\n              A malformed request or response is one that is an otherwise valid sequence of HTTP/2\r\n              frames, but is otherwise invalid due to the presence of extraneous frames, prohibited\r\n              header fields, the absence of mandatory header fields, or the inclusion of uppercase\r\n              header field names.\r\n            </t>\r\n            <t>\r\n              A request or response that includes an entity body can include a <spanx\r\n              style=\"verb\">content-length</spanx> header field.  A request or response is also\r\n              malformed if the value of a <spanx style=\"verb\">content-length</spanx> header field\r\n              does not equal the sum of the <x:ref>DATA</x:ref> frame payload lengths that form the\r\n              body.  A response that is defined to have no payload, as described in <xref\r\n              target=\"RFC7230\" x:fmt=\",\" x:rel=\"#header.content-length\"/>, can have a non-zero\r\n              <spanx style=\"verb\">content-length</spanx> header field, even though no content is\r\n              included in <x:ref>DATA</x:ref> frames.\r\n            </t>\r\n            <t>\r\n              Intermediaries that process HTTP requests or responses (i.e., any intermediary not\r\n              acting as a tunnel) MUST NOT forward a malformed request or response.  Malformed\r\n              requests or responses that are detected MUST be treated as a <xref\r\n              target=\"StreamErrorHandler\">stream error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.\r\n            </t>\r\n            <t>\r\n              For malformed requests, a server MAY send an HTTP response prior to closing or\r\n              resetting the stream.  Clients MUST NOT accept a malformed response. Note that these\r\n              requirements are intended to protect against several types of common attacks against\r\n              HTTP; they are deliberately strict, because being permissive can expose\r\n              implementations to these vulnerabilities.\r\n            </t>\r\n          </section>\r\n        </section>\r\n\r\n        <section title=\"Examples\">\r\n          <t>\r\n            This section shows HTTP/1.1 requests and responses, with illustrations of equivalent\r\n            HTTP/2 requests and responses.\r\n          </t>\r\n          <t>\r\n            An HTTP GET request includes request header fields and no body and is therefore\r\n            transmitted as a single <x:ref>HEADERS</x:ref> frame, followed by zero or more\r\n            <x:ref>CONTINUATION</x:ref> frames containing the serialized block of request header\r\n            fields.  The <x:ref>HEADERS</x:ref> frame in the following has both the END_HEADERS and\r\n            END_STREAM flags set; no <x:ref>CONTINUATION</x:ref> frames are sent:\r\n          </t>\r\n\r\n          <figure>\r\n            <artwork type=\"inline\"><![CDATA[\r\n  GET /resource HTTP/1.1           HEADERS\r\n  Host: example.org          ==>     + END_STREAM\r\n  Accept: image/jpeg                 + END_HEADERS\r\n                                       :method = GET\r\n                                       :scheme = https\r\n                                       :path = /resource\r\n                                       host = example.org\r\n                                       accept = image/jpeg\r\n]]></artwork>\r\n          </figure>\r\n\r\n          <t>\r\n            Similarly, a response that includes only response header fields is transmitted as a\r\n            <x:ref>HEADERS</x:ref> frame (again, followed by zero or more\r\n            <x:ref>CONTINUATION</x:ref> frames) containing the serialized block of response header\r\n            fields.\r\n          </t>\r\n\r\n          <figure>\r\n            <artwork type=\"inline\"><![CDATA[\r\n  HTTP/1.1 304 Not Modified        HEADERS\r\n  ETag: \"xyzzy\"              ==>     + END_STREAM\r\n  Expires: Thu, 23 Jan ...           + END_HEADERS\r\n                                       :status = 304\r\n                                       etag = \"xyzzy\"\r\n                                       expires = Thu, 23 Jan ...\r\n]]></artwork>\r\n          </figure>\r\n\r\n          <t>\r\n            An HTTP POST request that includes request header fields and payload data is transmitted\r\n            as one <x:ref>HEADERS</x:ref> frame, followed by zero or more\r\n            <x:ref>CONTINUATION</x:ref> frames containing the request header fields, followed by one\r\n            or more <x:ref>DATA</x:ref> frames, with the last <x:ref>CONTINUATION</x:ref> (or\r\n            <x:ref>HEADERS</x:ref>) frame having the END_HEADERS flag set and the final\r\n            <x:ref>DATA</x:ref> frame having the END_STREAM flag set:\r\n          </t>\r\n\r\n          <figure>\r\n            <artwork type=\"inline\"><![CDATA[\r\n  POST /resource HTTP/1.1          HEADERS\r\n  Host: example.org          ==>     - END_STREAM\r\n  Content-Type: image/jpeg           - END_HEADERS\r\n  Content-Length: 123                  :method = POST\r\n                                       :path = /resource\r\n  {binary data}                        :scheme = https\r\n\r\n                                   CONTINUATION\r\n                                     + END_HEADERS\r\n                                       content-type = image/jpeg\r\n                                       host = example.org\r\n                                       content-length = 123\r\n\r\n                                   DATA\r\n                                     + END_STREAM\r\n                                   {binary data}\r\n]]></artwork>\r\n            <postamble>\r\n              Note that data contributing to any given header field could be spread between header\r\n              block fragments.  The allocation of header fields to frames in this example is\r\n              illustrative only.\r\n            </postamble>\r\n          </figure>\r\n\r\n          <t>\r\n            A response that includes header fields and payload data is transmitted as a\r\n            <x:ref>HEADERS</x:ref> frame, followed by zero or more <x:ref>CONTINUATION</x:ref>\r\n            frames, followed by one or more <x:ref>DATA</x:ref> frames, with the last\r\n            <x:ref>DATA</x:ref> frame in the sequence having the END_STREAM flag set:\r\n          </t>\r\n\r\n          <figure>\r\n            <artwork type=\"inline\"><![CDATA[\r\n  HTTP/1.1 200 OK                  HEADERS\r\n  Content-Type: image/jpeg   ==>     - END_STREAM\r\n  Content-Length: 123                + END_HEADERS\r\n                                       :status = 200\r\n  {binary data}                        content-type = image/jpeg\r\n                                       content-length = 123\r\n\r\n                                   DATA\r\n                                     + END_STREAM\r\n                                   {binary data}\r\n]]></artwork>\r\n          </figure>\r\n\r\n          <t>\r\n            Trailing header fields are sent as a header block after both the request or response\r\n            header block and all the <x:ref>DATA</x:ref> frames have been sent.  The\r\n            <x:ref>HEADERS</x:ref> frame starting the trailers header block has the END_STREAM flag\r\n            set.\r\n          </t>\r\n\r\n          <figure>\r\n            <artwork type=\"inline\"><![CDATA[\r\n  HTTP/1.1 200 OK                  HEADERS\r\n  Content-Type: image/jpeg   ==>     - END_STREAM\r\n  Transfer-Encoding: chunked         + END_HEADERS\r\n  Trailer: Foo                         :status = 200\r\n                                       content-length = 123\r\n  123                                  content-type = image/jpeg\r\n  {binary data}                        trailer = Foo\r\n  0\r\n  Foo: bar                         DATA\r\n                                     - END_STREAM\r\n                                   {binary data}\r\n\r\n                                   HEADERS\r\n                                     + END_STREAM\r\n                                     + END_HEADERS\r\n                                       foo = bar\r\n]]></artwork>\r\n          </figure>\r\n\r\n\r\n          <figure>\r\n           <preamble>\r\n             An informational response using a 1xx status code other than 101 is transmitted as a\r\n             <x:ref>HEADERS</x:ref> frame, followed by zero or more <x:ref>CONTINUATION</x:ref>\r\n             frames:\r\n           </preamble>\r\n           <artwork type=\"inline\"><![CDATA[\r\n  HTTP/1.1 103 BAR                 HEADERS\r\n  Extension-Field: bar       ==>     - END_STREAM\r\n                                     + END_HEADERS\r\n                                       :status = 103\r\n                                       extension-field = bar\r\n]]></artwork>\r\n </figure>\r\n        </section>\r\n\r\n        <section anchor=\"Reliability\" title=\"Request Reliability Mechanisms in HTTP/2\">\r\n          <t>\r\n            In HTTP/1.1, an HTTP client is unable to retry a non-idempotent request when an error\r\n            occurs, because there is no means to determine the nature of the error.  It is possible\r\n            that some server processing occurred prior to the error, which could result in\r\n            undesirable effects if the request were reattempted.\r\n          </t>\r\n          <t>\r\n            HTTP/2 provides two mechanisms for providing a guarantee to a client that a request has\r\n            not been processed:\r\n            <list style=\"symbols\">\r\n              <t>\r\n                The <x:ref>GOAWAY</x:ref> frame indicates the highest stream number that might have\r\n                been processed.  Requests on streams with higher numbers are therefore guaranteed to\r\n                be safe to retry.\r\n              </t>\r\n              <t>\r\n                The <x:ref>REFUSED_STREAM</x:ref> error code can be included in a\r\n                <x:ref>RST_STREAM</x:ref> frame to indicate that the stream is being closed prior to\r\n                any processing having occurred.  Any request that was sent on the reset stream can\r\n                be safely retried.\r\n              </t>\r\n            </list>\r\n          </t>\r\n          <t>\r\n            Requests that have not been processed have not failed; clients MAY automatically retry\r\n            them, even those with non-idempotent methods.\r\n          </t>\r\n          <t>\r\n            A server MUST NOT indicate that a stream has not been processed unless it can guarantee\r\n            that fact.  If frames that are on a stream are passed to the application layer for any\r\n            stream, then <x:ref>REFUSED_STREAM</x:ref> MUST NOT be used for that stream, and a\r\n            <x:ref>GOAWAY</x:ref> frame MUST include a stream identifier that is greater than or\r\n            equal to the given stream identifier.\r\n          </t>\r\n          <t>\r\n            In addition to these mechanisms, the <x:ref>PING</x:ref> frame provides a way for a\r\n            client to easily test a connection.  Connections that remain idle can become broken as\r\n            some middleboxes (for instance, network address translators, or load balancers) silently\r\n            discard connection bindings.  The <x:ref>PING</x:ref> frame allows a client to safely\r\n            test whether a connection is still active without sending a request.\r\n          </t>\r\n        </section>\r\n      </section>\r\n\r\n      <section anchor=\"PushResources\" title=\"Server Push\">\r\n        <t>\r\n          HTTP/2 allows a server to pre-emptively send (or \"push\") responses (along with\r\n          corresponding \"promised\" requests) to a client in association with a previous\r\n          client-initiated request. This can be useful when the server knows the client will need\r\n          to have those responses available in order to fully process the response to the original\r\n          request.\r\n        </t>\r\n\r\n        <t>\r\n          Pushing additional message exchanges in this fashion is optional, and is negotiated\r\n          between individual endpoints. The <x:ref>SETTINGS_ENABLE_PUSH</x:ref> setting can be set\r\n          to 0 to indicate that server push is disabled.\r\n        </t>\r\n        <t>\r\n          Promised requests MUST be cacheable (see <xref target=\"RFC7231\" x:fmt=\",\"\r\n          x:rel=\"#cacheable.methods\"/>), MUST be safe (see <xref target=\"RFC7231\" x:fmt=\",\"\r\n          x:rel=\"#safe.methods\"/>) and MUST NOT include a request body. Clients that receive a\r\n          promised request that is not cacheable, unsafe or that includes a request body MUST\r\n          reset the stream with a <xref target=\"StreamErrorHandler\">stream error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>.\r\n        </t>\r\n        <t>\r\n          Pushed responses that are cacheable (see <xref target=\"RFC7234\" x:fmt=\",\"\r\n          x:rel=\"#response.cacheability\"/>) can be stored by the client, if it implements a HTTP\r\n          cache.  Pushed responses are considered successfully validated on the origin server (e.g.,\r\n          if the \"no-cache\" cache response directive <xref target=\"RFC7234\" x:fmt=\",\"\r\n          x:rel=\"#cache-response-directive\"/> is present) while the stream identified by the\r\n          promised stream ID is still open.\r\n        </t>\r\n        <t>\r\n          Pushed responses that are not cacheable MUST NOT be stored by any HTTP cache. They MAY\r\n          be made available to the application separately.\r\n        </t>\r\n        <t>\r\n          An intermediary can receive pushes from the server and choose not to forward them on to\r\n          the client. In other words, how to make use of the pushed information is up to that\r\n          intermediary. Equally, the intermediary might choose to make additional pushes to the\r\n          client, without any action taken by the server.\r\n        </t>\r\n        <t>\r\n          A client cannot push. Thus, servers MUST treat the receipt of a\r\n          <x:ref>PUSH_PROMISE</x:ref> frame as a <xref target=\"ConnectionErrorHandler\">connection\r\n          error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>. Clients MUST reject any attempt to\r\n          change the <x:ref>SETTINGS_ENABLE_PUSH</x:ref> setting to a value other than 0 by treating\r\n          the message as a <xref target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>PROTOCOL_ERROR</x:ref>.\r\n        </t>\r\n\r\n        <section anchor=\"PushRequests\" title=\"Push Requests\">\r\n          <t>\r\n            Server push is semantically equivalent to a server responding to a request; however, in\r\n            this case that request is also sent by the server, as a <x:ref>PUSH_PROMISE</x:ref>\r\n            frame.\r\n          </t>\r\n          <t>\r\n            The <x:ref>PUSH_PROMISE</x:ref> frame includes a header block that contains a complete\r\n            set of request header fields that the server attributes to the request. It is not\r\n            possible to push a response to a request that includes a request body.\r\n          </t>\r\n\r\n          <t>\r\n            Pushed responses are always associated with an explicit request from the client. The\r\n            <x:ref>PUSH_PROMISE</x:ref> frames sent by the server are sent on that explicit\r\n            request's stream. The <x:ref>PUSH_PROMISE</x:ref> frame also includes a promised stream\r\n            identifier, chosen from the stream identifiers available to the server (see <xref\r\n            target=\"StreamIdentifiers\"/>).\r\n          </t>\r\n\r\n          <t>\r\n            The header fields in <x:ref>PUSH_PROMISE</x:ref> and any subsequent\r\n            <x:ref>CONTINUATION</x:ref> frames MUST be a valid and complete set of <xref\r\n            target=\"HttpRequest\">request header fields</xref>.  The server MUST include a method in\r\n            the <spanx style=\"verb\">:method</spanx> header field that is safe and cacheable.  If a\r\n            client receives a <x:ref>PUSH_PROMISE</x:ref> that does not include a complete and valid\r\n            set of header fields, or the <spanx style=\"verb\">:method</spanx> header field identifies\r\n            a method that is not safe, it MUST respond with a <xref\r\n            target=\"StreamErrorHandler\">stream error</xref> of type <x:ref>PROTOCOL_ERROR</x:ref>.\r\n          </t>\r\n\r\n          <t>\r\n            The server SHOULD send <x:ref>PUSH_PROMISE</x:ref> (<xref target=\"PUSH_PROMISE\"/>)\r\n            frames prior to sending any frames that reference the promised responses. This avoids a\r\n            race where clients issue requests prior to receiving any <x:ref>PUSH_PROMISE</x:ref>\r\n            frames.\r\n          </t>\r\n          <t>\r\n            For example, if the server receives a request for a document containing embedded links\r\n            to multiple image files, and the server chooses to push those additional images to the\r\n            client, sending push promises before the <x:ref>DATA</x:ref> frames that contain the\r\n            image links ensures that the client is able to see the promises before discovering\r\n            embedded links. Similarly, if the server pushes responses referenced by the header block\r\n            (for instance, in Link header fields), sending the push promises before sending the\r\n            header block ensures that clients do not request them.\r\n          </t>\r\n\r\n          <t>\r\n            <x:ref>PUSH_PROMISE</x:ref> frames MUST NOT be sent by the client.\r\n          </t>\r\n          <t>\r\n            <x:ref>PUSH_PROMISE</x:ref> frames can be sent by the server in response to any\r\n            client-initiated stream, but the stream MUST be in either the \"open\" or \"half closed\r\n            (remote)\" state with respect to the server.  <x:ref>PUSH_PROMISE</x:ref> frames are\r\n            interspersed with the frames that comprise a response, though they cannot be\r\n            interspersed with <x:ref>HEADERS</x:ref> and <x:ref>CONTINUATION</x:ref> frames that\r\n            comprise a single header block.\r\n          </t>\r\n          <t>\r\n            Sending a <x:ref>PUSH_PROMISE</x:ref> frame creates a new stream and puts the stream\r\n            into the “reserved (local)” state for the server and the “reserved (remote)” state for\r\n            the client.\r\n          </t>\r\n        </section>\r\n\r\n        <section anchor=\"PushResponses\" title=\"Push Responses\">\r\n          <t>\r\n            After sending the <x:ref>PUSH_PROMISE</x:ref> frame, the server can begin delivering the\r\n            pushed response as a <xref target=\"HttpResponse\">response</xref> on a server-initiated\r\n            stream that uses the promised stream identifier.  The server uses this stream to\r\n            transmit an HTTP response, using the same sequence of frames as defined in <xref\r\n            target=\"HttpSequence\"/>.  This stream becomes <xref target=\"StreamStates\">\"half closed\"\r\n            to the client</xref> after the initial <x:ref>HEADERS</x:ref> frame is sent.\r\n          </t>\r\n\r\n          <t>\r\n            Once a client receives a <x:ref>PUSH_PROMISE</x:ref> frame and chooses to accept the\r\n            pushed response, the client SHOULD NOT issue any requests for the promised response\r\n            until after the promised stream has closed.\r\n          </t>\r\n\r\n          <t>\r\n            If the client determines, for any reason, that it does not wish to receive the pushed\r\n            response from the server, or if the server takes too long to begin sending the promised\r\n            response, the client can send an <x:ref>RST_STREAM</x:ref> frame, using either the\r\n            <x:ref>CANCEL</x:ref> or <x:ref>REFUSED_STREAM</x:ref> codes, and referencing the pushed\r\n            stream's identifier.\r\n          </t>\r\n          <t>\r\n            A client can use the <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> setting to limit the\r\n            number of responses that can be concurrently pushed by a server.  Advertising a\r\n            <x:ref>SETTINGS_MAX_CONCURRENT_STREAMS</x:ref> value of zero disables server push by\r\n            preventing the server from creating the necessary streams.  This does not prohibit a\r\n            server from sending <x:ref>PUSH_PROMISE</x:ref> frames; clients need to reset any\r\n            promised streams that are not wanted.\r\n          </t>\r\n\r\n          <t>\r\n            Clients receiving a pushed response MUST validate that either the server is\r\n            authoritative (see <xref target=\"authority\"/>), or the proxy that provided the pushed\r\n            response is configured for the corresponding request. For example, a server that offers\r\n            a certificate for only the <spanx style=\"verb\">example.com</spanx> DNS-ID or Common Name\r\n            is not permitted to push a response for <spanx\r\n            style=\"verb\">https://www.example.org/doc</spanx>.\r\n          </t>\r\n          <t>\r\n            The response for a <x:ref>PUSH_PROMISE</x:ref> stream begins with a\r\n            <x:ref>HEADERS</x:ref> frame, which immediately puts the stream into the “half closed\r\n            (remote)” state for the server and “half closed (local)” state for the client, and ends\r\n            with a frame bearing END_STREAM, which places the stream in the \"closed\" state.\r\n            <list style=\"hanging\">\r\n              <t hangText=\"Note:\">\r\n                The client never sends a frame with the END_STREAM flag for a server push.\r\n              </t>\r\n            </list>\r\n          </t>\r\n        </section>\r\n\r\n      </section>\r\n\r\n      <section anchor=\"CONNECT\" title=\"The CONNECT Method\">\r\n        <t>\r\n          In HTTP/1.x, the pseudo-method CONNECT (<xref target=\"RFC7231\" x:fmt=\",\"\r\n          x:rel=\"#CONNECT\"/>) is used to convert an HTTP connection into a tunnel to a remote host.\r\n          CONNECT is primarily used with HTTP proxies to establish a TLS session with an origin\r\n          server for the purposes of interacting with <spanx style=\"verb\">https</spanx> resources.\r\n        </t>\r\n        <t>\r\n          In HTTP/2, the CONNECT method is used to establish a tunnel over a single HTTP/2 stream to\r\n          a remote host, for similar purposes. The HTTP header field mapping works as defined in\r\n          <xref target=\"HttpRequest\">Request Header Fields</xref>, with a few\r\n          differences. Specifically:\r\n          <list style=\"symbols\">\r\n            <t>\r\n              The <spanx style=\"verb\">:method</spanx> header field is set to <spanx\r\n              style=\"verb\">CONNECT</spanx>.\r\n            </t>\r\n            <t>\r\n              The <spanx style=\"verb\">:scheme</spanx> and <spanx style=\"verb\">:path</spanx> header\r\n              fields MUST be omitted.\r\n            </t>\r\n            <t>\r\n              The <spanx style=\"verb\">:authority</spanx> header field contains the host and port to\r\n              connect to (equivalent to the authority-form of the request-target of CONNECT\r\n              requests, see <xref target=\"RFC7230\" x:fmt=\",\" x:rel=\"#request-target\"/>).\r\n            </t>\r\n          </list>\r\n        </t>\r\n        <t>\r\n          A proxy that supports CONNECT establishes a <xref target=\"TCP\">TCP connection</xref> to\r\n          the server identified in the <spanx style=\"verb\">:authority</spanx> header field. Once\r\n          this connection is successfully established, the proxy sends a <x:ref>HEADERS</x:ref>\r\n          frame containing a 2xx series status code to the client, as defined in <xref\r\n          target=\"RFC7231\" x:fmt=\",\" x:rel=\"#CONNECT\"/>.\r\n        </t>\r\n        <t>\r\n          After the initial <x:ref>HEADERS</x:ref> frame sent by each peer, all subsequent\r\n          <x:ref>DATA</x:ref> frames correspond to data sent on the TCP connection.  The payload of\r\n          any <x:ref>DATA</x:ref> frames sent by the client is transmitted by the proxy to the TCP\r\n          server; data received from the TCP server is assembled into <x:ref>DATA</x:ref> frames by\r\n          the proxy.  Frame types other than <x:ref>DATA</x:ref> or stream management frames\r\n          (<x:ref>RST_STREAM</x:ref>, <x:ref>WINDOW_UPDATE</x:ref>, and <x:ref>PRIORITY</x:ref>)\r\n          MUST NOT be sent on a connected stream, and MUST be treated as a <xref\r\n          target=\"StreamErrorHandler\">stream error</xref> if received.\r\n        </t>\r\n        <t>\r\n          The TCP connection can be closed by either peer.  The END_STREAM flag on a\r\n          <x:ref>DATA</x:ref> frame is treated as being equivalent to the TCP FIN bit.  A client is\r\n          expected to send a <x:ref>DATA</x:ref> frame with the END_STREAM flag set after receiving\r\n          a frame bearing the END_STREAM flag.  A proxy that receives a <x:ref>DATA</x:ref> frame\r\n          with the END_STREAM flag set sends the attached data with the FIN bit set on the last TCP\r\n          segment.  A proxy that receives a TCP segment with the FIN bit set sends a\r\n          <x:ref>DATA</x:ref> frame with the END_STREAM flag set.  Note that the final TCP segment\r\n          or <x:ref>DATA</x:ref> frame could be empty.\r\n        </t>\r\n        <t>\r\n          A TCP connection error is signaled with <x:ref>RST_STREAM</x:ref>.  A proxy treats any\r\n          error in the TCP connection, which includes receiving a TCP segment with the RST bit set,\r\n          as a <xref target=\"StreamErrorHandler\">stream error</xref> of type\r\n          <x:ref>CONNECT_ERROR</x:ref>.  Correspondingly, a proxy MUST send a TCP segment with the\r\n          RST bit set if it detects an error with the stream or the HTTP/2 connection.\r\n        </t>\r\n      </section>\r\n    </section>\r\n\r\n    <section anchor=\"HttpExtra\" title=\"Additional HTTP Requirements/Considerations\">\r\n      <t>\r\n        This section outlines attributes of the HTTP protocol that improve interoperability, reduce\r\n        exposure to known security vulnerabilities, or reduce the potential for implementation\r\n        variation.\r\n      </t>\r\n\r\n      <section title=\"Connection Management\">\r\n        <t>\r\n          HTTP/2 connections are persistent.  For best performance, it is expected clients will not\r\n          close connections until it is determined that no further communication with a server is\r\n          necessary (for example, when a user navigates away from a particular web page), or until\r\n          the server closes the connection.\r\n        </t>\r\n        <t>\r\n          Clients SHOULD NOT open more than one HTTP/2 connection to a given host and port pair,\r\n          where host is derived from a URI, a selected <xref target=\"ALT-SVC\">alternative\r\n          service</xref>, or a configured proxy.\r\n        </t>\r\n        <t>\r\n          A client can create additional connections as replacements, either to replace connections\r\n          that are near to exhausting the available <xref target=\"StreamIdentifiers\">stream\r\n          identifier space</xref>, to refresh the keying material for a TLS connection, or to\r\n          replace connections that have encountered <xref\r\n          target=\"ConnectionErrorHandler\">errors</xref>.\r\n        </t>\r\n        <t>\r\n          A client MAY open multiple connections to the same IP address and TCP port using different\r\n          <xref target=\"TLS-EXT\">Server Name Indication</xref> values or to provide different TLS\r\n          client certificates, but SHOULD avoid creating multiple connections with the same\r\n          configuration.\r\n        </t>\r\n        <t>\r\n          Servers are encouraged to maintain open connections for as long as possible, but are\r\n          permitted to terminate idle connections if necessary.  When either endpoint chooses to\r\n          close the transport-layer TCP connection, the terminating endpoint SHOULD first send a\r\n          <x:ref>GOAWAY</x:ref> (<xref target=\"GOAWAY\"/>) frame so that both endpoints can reliably\r\n          determine whether previously sent frames have been processed and gracefully complete or\r\n          terminate any necessary remaining tasks.\r\n        </t>\r\n\r\n        <section anchor=\"reuse\" title=\"Connection Reuse\">\r\n          <t>\r\n            Connections that are made to an origin servers, either directly or through a tunnel\r\n            created using the <xref target=\"CONNECT\">CONNECT method</xref> MAY be reused for\r\n            requests with multiple different URI authority components.  A connection can be reused\r\n            as long as the origin server is <xref target=\"authority\">authoritative</xref>.  For\r\n            <spanx style=\"verb\">http</spanx> resources, this depends on the host having resolved to\r\n            the same IP address.\r\n          </t>\r\n          <t>\r\n            For <spanx style=\"verb\">https</spanx> resources, connection reuse additionally depends\r\n            on having a certificate that is valid for the host in the URI.  An origin server might\r\n            offer a certificate with multiple <spanx style=\"verb\">subjectAltName</spanx> attributes,\r\n            or names with wildcards, one of which is valid for the authority in the URI.  For\r\n            example, a certificate with a <spanx style=\"verb\">subjectAltName</spanx> of <spanx\r\n            style=\"verb\">*.example.com</spanx> might permit the use of the same connection for\r\n            requests to URIs starting with <spanx style=\"verb\">https://a.example.com/</spanx> and\r\n            <spanx style=\"verb\">https://b.example.com/</spanx>.\r\n          </t>\r\n          <t>\r\n            In some deployments, reusing a connection for multiple origins can result in requests\r\n            being directed to the wrong origin server.  For example, TLS termination might be\r\n            performed by a middlebox that uses the TLS <xref target=\"TLS-EXT\">Server Name Indication\r\n            (SNI)</xref> extension to select an origin server.  This means that it is possible\r\n            for clients to send confidential information to servers that might not be the intended\r\n            target for the request, even though the server is otherwise authoritative.\r\n          </t>\r\n          <t>\r\n            A server that does not wish clients to reuse connections can indicate that it is not\r\n            authoritative for a request by sending a 421 (Misdirected Request) status code in response\r\n            to the request (see <xref target=\"MisdirectedRequest\"/>).\r\n          </t>\r\n          <t>\r\n            A client that is configured to use a proxy over HTTP/2 directs requests to that proxy\r\n            through a single connection.  That is, all requests sent via a proxy reuse the\r\n            connection to the proxy.\r\n          </t>\r\n        </section>\r\n\r\n        <section anchor=\"MisdirectedRequest\" title=\"The 421 (Misdirected Request) Status Code\">\r\n          <t>\r\n            The 421 (Misdirected Request) status code indicates that the request was directed at a\r\n            server that is not able to produce a response.  This can be sent by a server that is not\r\n            configured to produce responses for the combination of scheme and authority that are\r\n            included in the request URI.\r\n          </t>\r\n          <t>\r\n            Clients receiving a 421 (Misdirected Request) response from a server MAY retry the\r\n            request - whether the request method is idempotent or not - over a different connection.\r\n            This is possible if a connection is reused (<xref target=\"reuse\"/>) or if an alternative\r\n            service is selected (<xref target=\"ALT-SVC\"/>).\r\n          </t>\r\n          <t>\r\n            This status code MUST NOT be generated by proxies.\r\n          </t>\r\n          <t>\r\n            A 421 response is cacheable by default; i.e., unless otherwise indicated by the method\r\n            definition or explicit cache controls (see <xref target=\"RFC7234\"\r\n            x:rel=\"#heuristic.freshness\" x:fmt=\"of\"/>).\r\n          </t>\r\n        </section>\r\n      </section>\r\n\r\n      <section title=\"Use of TLS Features\" anchor=\"TLSUsage\">\r\n        <t>\r\n          Implementations of HTTP/2 MUST support <xref target=\"TLS12\">TLS 1.2</xref> for HTTP/2 over\r\n          TLS.  The general TLS usage guidance in <xref target=\"TLSBCP\"/> SHOULD be followed, with\r\n          some additional restrictions that are specific to HTTP/2.\r\n        </t>\r\n\r\n        <t>\r\n          An implementation of HTTP/2 over TLS MUST use TLS 1.2 or higher with the restrictions on\r\n          feature set and cipher suite described in this section.  Due to implementation\r\n          limitations, it might not be possible to fail TLS negotiation.  An endpoint MUST\r\n          immediately terminate an HTTP/2 connection that does not meet these minimum requirements\r\n          with a <xref target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>INADEQUATE_SECURITY</x:ref>.\r\n        </t>\r\n\r\n        <section anchor=\"TLSFeatures\" title=\"TLS Features\">\r\n          <t>\r\n            The TLS implementation MUST support the <xref target=\"TLS-EXT\">Server Name Indication\r\n            (SNI)</xref> extension to TLS. HTTP/2 clients MUST indicate the target domain name when\r\n            negotiating TLS.\r\n          </t>\r\n          <t>\r\n            The TLS implementation MUST disable compression.  TLS compression can lead to the\r\n            exposure of information that would not otherwise be revealed <xref target=\"RFC3749\"/>.\r\n            Generic compression is unnecessary since HTTP/2 provides compression features that are\r\n            more aware of context and therefore likely to be more appropriate for use for\r\n            performance, security or other reasons.\r\n          </t>\r\n          <t>\r\n            The TLS implementation MUST disable renegotiation.  An endpoint MUST treat a TLS\r\n            renegotiation as a <xref target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n            <x:ref>PROTOCOL_ERROR</x:ref>.  Note that disabling renegotiation can result in\r\n            long-lived connections becoming unusable due to limits on the number of messages the\r\n            underlying cipher suite can encipher.\r\n          </t>\r\n          <t>\r\n            A client MAY use renegotiation to provide confidentiality protection for client\r\n            credentials offered in the handshake, but any renegotiation MUST occur prior to sending\r\n            the connection preface.  A server SHOULD request a client certificate if it sees a\r\n            renegotiation request immediately after establishing a connection.\r\n          </t>\r\n          <t>\r\n            This effectively prevents the use of renegotiation in response to a request for a\r\n            specific protected resource.  A future specification might provide a way to support this\r\n            use case. <!-- <cref> We are tracking this in a non-blocking fashion in issue #496 and\r\n            with a new draft. -->\r\n          </t>\r\n        </section>\r\n\r\n        <section title=\"TLS Cipher Suites\">\r\n          <t>\r\n            The set of TLS cipher suites that are permitted in HTTP/2 is restricted.  HTTP/2 MUST\r\n            only be used with cipher suites that have ephemeral key exchange, such as the <xref\r\n            target=\"TLS12\">ephemeral Diffie-Hellman (DHE)</xref> or the <xref\r\n            target=\"RFC4492\">elliptic curve variant (ECDHE)</xref>.  Ephemeral key exchange MUST\r\n            have a minimum size of 2048 bits for DHE or security level of 128 bits for ECDHE.\r\n            Clients MUST accept DHE sizes of up to 4096 bits.  HTTP MUST NOT be used with cipher\r\n            suites that use stream or block ciphers.  Authenticated Encryption with Additional Data\r\n            (AEAD) modes, such as the <xref target=\"RFC5288\">Galois Counter Model (GCM) mode for\r\n            AES</xref> are acceptable.\r\n          </t>\r\n          <t>\r\n            The effect of these restrictions is that TLS 1.2 implementations could have\r\n            non-intersecting sets of available cipher suites, since these prevent the use of the\r\n            cipher suite that TLS 1.2 makes mandatory.  To avoid this problem, implementations of\r\n            HTTP/2 that use TLS 1.2 MUST support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 <xref\r\n            target=\"TLS-ECDHE\"/> with P256 <xref target=\"FIPS186\"/>.\r\n          </t>\r\n          <t>\r\n            Clients MAY advertise support of cipher suites that are prohibited by the above\r\n            restrictions in order to allow for connection to servers that do not support HTTP/2.\r\n            This enables a fallback to protocols without these constraints without the additional\r\n            latency imposed by using a separate connection for fallback.\r\n          </t>\r\n        </section>\r\n      </section>\r\n    </section>\r\n\r\n    <section anchor=\"security\" title=\"Security Considerations\">\r\n      <section title=\"Server Authority\" anchor=\"authority\">\r\n        <t>\r\n          HTTP/2 relies on the HTTP/1.1 definition of authority for determining whether a server is\r\n          authoritative in providing a given response, see <xref target=\"RFC7230\" x:fmt=\",\"\r\n          x:rel=\"#establishing.authority\"/>.  This relies on local name resolution for the \"http\"\r\n          URI scheme, and the authenticated server identity for the \"https\" scheme (see <xref\r\n          target=\"RFC2818\" x:fmt=\",\" x:sec=\"3\"/>).\r\n        </t>\r\n      </section>\r\n\r\n      <section title=\"Cross-Protocol Attacks\">\r\n        <t>\r\n          In a cross-protocol attack, an attacker causes a client to initiate a transaction in one\r\n          protocol toward a server that understands a different protocol.  An attacker might be able\r\n          to cause the transaction to appear as valid transaction in the second protocol.  In\r\n          combination with the capabilities of the web context, this can be used to interact with\r\n          poorly protected servers in private networks.\r\n        </t>\r\n        <t>\r\n          Completing a TLS handshake with an ALPN identifier for HTTP/2 can be considered sufficient\r\n          protection against cross protocol attacks.  ALPN provides a positive indication that a\r\n          server is willing to proceed with HTTP/2, which prevents attacks on other TLS-based\r\n          protocols.\r\n        </t>\r\n        <t>\r\n          The encryption in TLS makes it difficult for attackers to control the data which could be\r\n          used in a cross-protocol attack on a cleartext protocol.\r\n        </t>\r\n        <t>\r\n          The cleartext version of HTTP/2 has minimal protection against cross-protocol attacks.\r\n          The <xref target=\"ConnectionHeader\">connection preface</xref> contains a string that is\r\n          designed to confuse HTTP/1.1 servers, but no special protection is offered for other\r\n          protocols.  A server that is willing to ignore parts of an HTTP/1.1 request containing an\r\n          Upgrade header field in addition to the client connection preface could be exposed to a\r\n          cross-protocol attack.\r\n        </t>\r\n      </section>\r\n\r\n      <section title=\"Intermediary Encapsulation Attacks\">\r\n        <t>\r\n          HTTP/2 header field names and values are encoded as sequences of octets with a length\r\n          prefix.  This enables HTTP/2 to carry any string of octets as the name or value of a\r\n          header field.  An intermediary that translates HTTP/2 requests or responses into HTTP/1.1\r\n          directly could permit the creation of corrupted HTTP/1.1 messages.  An attacker might\r\n          exploit this behavior to cause the intermediary to create HTTP/1.1 messages with illegal\r\n          header fields, extra header fields, or even new messages that are entirely falsified.\r\n        </t>\r\n        <t>\r\n          Header field names or values that contain characters not permitted by HTTP/1.1, including\r\n          carriage return (ASCII 0xd) or line feed (ASCII 0xa) MUST NOT be translated verbatim by an\r\n          intermediary, as stipulated in <xref target=\"RFC7230\" x:rel=\"#field.parsing\" x:fmt=\",\"/>.\r\n        </t>\r\n        <t>\r\n          Translation from HTTP/1.x to HTTP/2 does not produce the same opportunity to an attacker.\r\n          Intermediaries that perform translation to HTTP/2 MUST remove any instances of the <spanx\r\n          style=\"verb\">obs-fold</spanx> production from header field values.\r\n        </t>\r\n      </section>\r\n\r\n      <section title=\"Cacheability of Pushed Responses\">\r\n        <t>\r\n          Pushed responses do not have an explicit request from the client; the request\r\n          is provided by the server in the <x:ref>PUSH_PROMISE</x:ref> frame.\r\n        </t>\r\n        <t>\r\n          Caching responses that are pushed is possible based on the guidance provided by the origin\r\n          server in the Cache-Control header field.  However, this can cause issues if a single\r\n          server hosts more than one tenant.  For example, a server might offer multiple users each\r\n          a small portion of its URI space.\r\n        </t>\r\n        <t>\r\n          Where multiple tenants share space on the same server, that server MUST ensure that\r\n          tenants are not able to push representations of resources that they do not have authority\r\n          over.  Failure to enforce this would allow a tenant to provide a representation that would\r\n          be served out of cache, overriding the actual representation that the authoritative tenant\r\n          provides.\r\n        </t>\r\n        <t>\r\n          Pushed responses for which an origin server is not authoritative (see\r\n          <xref target=\"authority\"/>) are never cached or used.\r\n        </t>\r\n      </section>\r\n\r\n      <section anchor=\"dos\" title=\"Denial of Service Considerations\">\r\n        <t>\r\n          An HTTP/2 connection can demand a greater commitment of resources to operate than a\r\n          HTTP/1.1 connection.  The use of header compression and flow control depend on a\r\n          commitment of resources for storing a greater amount of state.  Settings for these\r\n          features ensure that memory commitments for these features are strictly bounded.\r\n        </t>\r\n        <t>\r\n          The number of <x:ref>PUSH_PROMISE</x:ref> frames is not constrained in the same fashion.\r\n          A client that accepts server push SHOULD limit the number of streams it allows to be in\r\n          the \"reserved (remote)\" state.  Excessive number of server push streams can be treated as\r\n          a <xref target=\"StreamErrorHandler\">stream error</xref> of type\r\n          <x:ref>ENHANCE_YOUR_CALM</x:ref>.\r\n        </t>\r\n        <t>\r\n          Processing capacity cannot be guarded as effectively as state capacity.\r\n        </t>\r\n        <t>\r\n          The <x:ref>SETTINGS</x:ref> frame can be abused to cause a peer to expend additional\r\n          processing time. This might be done by pointlessly changing SETTINGS parameters, setting\r\n          multiple undefined parameters, or changing the same setting multiple times in the same\r\n          frame.  <x:ref>WINDOW_UPDATE</x:ref> or <x:ref>PRIORITY</x:ref> frames can be abused to\r\n          cause an unnecessary waste of resources.\r\n        </t>\r\n        <t>\r\n          Large numbers of small or empty frames can be abused to cause a peer to expend time\r\n          processing frame headers.  Note however that some uses are entirely legitimate, such as\r\n          the sending of an empty <x:ref>DATA</x:ref> frame to end a stream.\r\n        </t>\r\n        <t>\r\n          Header compression also offers some opportunities to waste processing resources; see <xref\r\n          target=\"COMPRESSION\" x:fmt=\"of\" x:rel=\"#Security\"/> for more details on potential abuses.\r\n        </t>\r\n        <t>\r\n          Limits in <x:ref>SETTINGS</x:ref> parameters cannot be reduced instantaneously, which\r\n          leaves an endpoint exposed to behavior from a peer that could exceed the new limits. In\r\n          particular, immediately after establishing a connection, limits set by a server are not\r\n          known to clients and could be exceeded without being an obvious protocol violation.\r\n        </t>\r\n        <t>\r\n          All these features - i.e., <x:ref>SETTINGS</x:ref> changes, small frames, header\r\n          compression - have legitimate uses.  These features become a burden only when they are\r\n          used unnecessarily or to excess.\r\n        </t>\r\n        <t>\r\n          An endpoint that doesn't monitor this behavior exposes itself to a risk of denial of\r\n          service attack.  Implementations SHOULD track the use of these features and set limits on\r\n          their use.  An endpoint MAY treat activity that is suspicious as a <xref\r\n          target=\"ConnectionErrorHandler\">connection error</xref> of type\r\n          <x:ref>ENHANCE_YOUR_CALM</x:ref>.\r\n        </t>\r\n\r\n        <section anchor=\"MaxHeaderBlock\" title=\"Limits on Header Block Size\">\r\n          <t>\r\n            A large <xref target=\"HeaderBlock\">header block</xref> can cause an implementation to\r\n            commit a large amount of state.  Header fields that are critical for routing can appear\r\n            toward the end of a header block, which prevents streaming of header fields to their\r\n            ultimate destination. For this an other reasons, such as ensuring cache correctness,\r\n            means that an endpoint might need to buffer the entire header block.  Since there is no\r\n            hard limit to the size of a header block, some endpoints could be forced commit a large\r\n            amount of available memory for header fields.\r\n          </t>\r\n          <t>\r\n            An endpoint can use the <x:ref>SETTINGS_MAX_HEADER_LIST_SIZE</x:ref> to advise peers of\r\n            limits that might apply on the size of header blocks.  This setting is only advisory, so\r\n            endpoints MAY choose to send header blocks that exceed this limit and risk having the\r\n            request or response being treated as malformed.  This setting specific to a connection,\r\n            so any request or response could encounter a hop with a lower, unknown limit.  An\r\n            intermediary can attempt to avoid this problem by passing on values presented by\r\n            different peers, but they are not obligated to do so.\r\n          </t>\r\n          <t>\r\n            A server that receives a larger header block than it is willing to handle can send an\r\n            HTTP 431 (Request Header Fields Too Large) status code <xref target=\"RFC6585\"/>.  A\r\n            client can discard responses that it cannot process.  The header block MUST be processed\r\n            to ensure a consistent connection state, unless the connection is closed.\r\n          </t>\r\n        </section>\r\n      </section>\r\n\r\n      <section title=\"Use of Compression\">\r\n        <t>\r\n          HTTP/2 enables greater use of compression for both header fields (<xref\r\n          target=\"HeaderBlock\"/>) and entity bodies.  Compression can allow an attacker to recover\r\n          secret data when it is compressed in the same context as data under attacker control.\r\n        </t>\r\n        <t>\r\n          There are demonstrable attacks on compression that exploit the characteristics of the web\r\n          (e.g., <xref target=\"BREACH\"/>).  The attacker induces multiple requests containing\r\n          varying plaintext, observing the length of the resulting ciphertext in each, which\r\n          reveals a shorter length when a guess about the secret is correct.\r\n        </t>\r\n        <t>\r\n          Implementations communicating on a secure channel MUST NOT compress content that includes\r\n          both confidential and attacker-controlled data unless separate compression dictionaries\r\n          are used for each source of data.  Compression MUST NOT be used if the source of data\r\n          cannot be reliably determined.  Generic stream compression, such as that provided by TLS\r\n          MUST NOT be used with HTTP/2 (<xref target=\"TLSFeatures\"/>).\r\n        </t>\r\n        <t>\r\n          Further considerations regarding the compression of header fields are described in <xref\r\n          target=\"COMPRESSION\"/>.\r\n        </t>\r\n      </section>\r\n\r\n      <section title=\"Use of Padding\" anchor=\"padding\">\r\n        <t>\r\n          Padding within HTTP/2 is not intended as a replacement for general purpose padding, such\r\n          as might be provided by <xref target=\"TLS12\">TLS</xref>.  Redundant padding could even be\r\n          counterproductive.  Correct application can depend on having specific knowledge of the\r\n          data that is being padded.\r\n        </t>\r\n        <t>\r\n          To mitigate attacks that rely on compression, disabling or limiting compression might be\r\n          preferable to padding as a countermeasure.\r\n        </t>\r\n        <t>\r\n          Padding can be used to obscure the exact size of frame content, and is provided to\r\n          mitigate specific attacks within HTTP.  For example, attacks where compressed content\r\n          includes both attacker-controlled plaintext and secret data (see for example, <xref\r\n          target=\"BREACH\"/>).\r\n        </t>\r\n        <t>\r\n          Use of padding can result in less protection than might seem immediately obvious.  At\r\n          best, padding only makes it more difficult for an attacker to infer length information by\r\n          increasing the number of frames an attacker has to observe.  Incorrectly implemented\r\n          padding schemes can be easily defeated.  In particular, randomized padding with a\r\n          predictable distribution provides very little protection; similarly, padding payloads to a\r\n          fixed size exposes information as payload sizes cross the fixed size boundary, which could\r\n          be possible if an attacker can control plaintext.\r\n        </t>\r\n        <t>\r\n          Intermediaries SHOULD retain padding for <x:ref>DATA</x:ref> frames, but MAY drop padding\r\n          for <x:ref>HEADERS</x:ref> and <x:ref>PUSH_PROMISE</x:ref> frames.  A valid reason for an\r\n          intermediary to change the amount of padding of frames is to improve the protections that\r\n          padding provides.\r\n        </t>\r\n      </section>\r\n\r\n      <section title=\"Privacy Considerations\">\r\n        <t>\r\n          Several characteristics of HTTP/2 provide an observer an opportunity to correlate actions\r\n          of a single client or server over time.  This includes the value of settings, the manner\r\n          in which flow control windows are managed, the way priorities are allocated to streams,\r\n          timing of reactions to stimulus, and handling of any optional features.\r\n        </t>\r\n        <t>\r\n          As far as this creates observable differences in behavior, they could be used as a basis\r\n          for fingerprinting a specific client, as defined in <xref target=\"HTML5\" x:fmt=\"of\"\r\n          x:sec=\"1.8\" x:rel=\"introduction.html#fingerprint\"/>.\r\n        </t>\r\n      </section>\r\n    </section>\r\n\r\n    <section anchor=\"iana\" title=\"IANA Considerations\">\r\n      <t>\r\n        A string for identifying HTTP/2 is entered into the \"Application Layer Protocol Negotiation\r\n        (ALPN) Protocol IDs\" registry established in <xref target=\"TLS-ALPN\"/>.\r\n      </t>\r\n      <t>\r\n        This document establishes a registry for frame types, settings, and error codes.  These new\r\n        registries are entered into a new \"Hypertext Transfer Protocol (HTTP) 2 Parameters\" section.\r\n      </t>\r\n      <t>\r\n        This document registers the <spanx style=\"verb\">HTTP2-Settings</spanx> header field for\r\n        use in HTTP; and the 421 (Misdirected Request) status code.\r\n      </t>\r\n      <t>\r\n        This document registers the <spanx style=\"verb\">PRI</spanx> method for use in HTTP, to avoid\r\n        collisions with the <xref target=\"ConnectionHeader\">connection preface</xref>.\r\n      </t>\r\n\r\n      <section anchor=\"iana-alpn\" title=\"Registration of HTTP/2 Identification Strings\">\r\n        <t>\r\n          This document creates two registrations for the identification of HTTP/2 in the\r\n          \"Application Layer Protocol Negotiation (ALPN) Protocol IDs\" registry established in <xref\r\n          target=\"TLS-ALPN\"/>.\r\n        </t>\r\n        <t>\r\n          The \"h2\" string identifies HTTP/2 when used over TLS:\r\n          <list style=\"hanging\">\r\n            <t hangText=\"Protocol:\">HTTP/2 over TLS</t>\r\n            <t hangText=\"Identification Sequence:\">0x68 0x32 (\"h2\")</t>\r\n            <t hangText=\"Specification:\">This document</t>\r\n          </list>\r\n        </t>\r\n        <t>\r\n          The \"h2c\" string identifies HTTP/2 when used over cleartext TCP:\r\n          <list style=\"hanging\">\r\n            <t hangText=\"Protocol:\">HTTP/2 over TCP</t>\r\n            <t hangText=\"Identification Sequence:\">0x68 0x32 0x63 (\"h2c\")</t>\r\n            <t hangText=\"Specification:\">This document</t>\r\n          </list>\r\n        </t>\r\n      </section>\r\n\r\n      <section anchor=\"iana-frames\" title=\"Frame Type Registry\">\r\n        <t>\r\n          This document establishes a registry for HTTP/2 frame type codes.  The \"HTTP/2 Frame\r\n          Type\" registry manages an 8-bit space.  The \"HTTP/2 Frame Type\" registry operates under\r\n          either of the <xref target=\"RFC5226\">\"IETF Review\" or \"IESG Approval\" policies</xref> for\r\n          values between 0x00 and 0xef, with values between 0xf0 and 0xff being reserved for\r\n          experimental use.\r\n        </t>\r\n        <t>\r\n          New entries in this registry require the following information:\r\n          <list style=\"hanging\">\r\n            <t hangText=\"Frame Type:\">\r\n              A name or label for the frame type.\r\n            </t>\r\n            <t hangText=\"Code:\">\r\n              The 8-bit code assigned to the frame type.\r\n            </t>\r\n            <t hangText=\"Specification:\">\r\n              A reference to a specification that includes a description of the frame layout,\r\n              it's semantics and flags that the frame type uses, including any parts of the frame\r\n              that are conditionally present based on the value of flags.\r\n            </t>\r\n          </list>\r\n        </t>\r\n        <t>\r\n          The entries in the following table are registered by this document.\r\n        </t>\r\n        <texttable align=\"left\" suppress-title=\"true\">\r\n          <ttcol>Frame Type</ttcol>\r\n          <ttcol>Code</ttcol>\r\n          <ttcol>Section</ttcol>\r\n          <c>DATA</c><c>0x0</c><c><xref target=\"DATA\"/></c>\r\n          <c>HEADERS</c><c>0x1</c><c><xref target=\"HEADERS\"/></c>\r\n          <c>PRIORITY</c><c>0x2</c><c><xref target=\"PRIORITY\"/></c>\r\n          <c>RST_STREAM</c><c>0x3</c><c><xref target=\"RST_STREAM\"/></c>\r\n          <c>SETTINGS</c><c>0x4</c><c><xref target=\"SETTINGS\"/></c>\r\n          <c>PUSH_PROMISE</c><c>0x5</c><c><xref target=\"PUSH_PROMISE\"/></c>\r\n          <c>PING</c><c>0x6</c><c><xref target=\"PING\"/></c>\r\n          <c>GOAWAY</c><c>0x7</c><c><xref target=\"GOAWAY\"/></c>\r\n          <c>WINDOW_UPDATE</c><c>0x8</c><c><xref target=\"WINDOW_UPDATE\"/></c>\r\n          <c>CONTINUATION</c><c>0x9</c><c><xref target=\"CONTINUATION\"/></c>\r\n        </texttable>\r\n      </section>\r\n\r\n      <section anchor=\"iana-settings\" title=\"Settings Registry\">\r\n        <t>\r\n          This document establishes a registry for HTTP/2 settings.  The \"HTTP/2 Settings\" registry\r\n          manages a 16-bit space.  The \"HTTP/2 Settings\" registry operates under the <xref\r\n          target=\"RFC5226\">\"Expert Review\" policy</xref> for values in the range from 0x0000 to\r\n          0xefff, with values between and 0xf000 and 0xffff being reserved for experimental use.\r\n        </t>\r\n        <t>\r\n          New registrations are advised to provide the following information:\r\n          <list style=\"hanging\">\r\n            <t hangText=\"Name:\">\r\n              A symbolic name for the setting.  Specifying a setting name is optional.\r\n            </t>\r\n            <t hangText=\"Code:\">\r\n              The 16-bit code assigned to the setting.\r\n            </t>\r\n            <t hangText=\"Initial Value:\">\r\n              An initial value for the setting.\r\n            </t>\r\n            <t hangText=\"Specification:\">\r\n              An optional reference to a specification that describes the use of the setting.\r\n            </t>\r\n          </list>\r\n        </t>\r\n        <t>\r\n          An initial set of setting registrations can be found in <xref target=\"SettingValues\"/>.\r\n        </t>\r\n        <texttable align=\"left\" suppress-title=\"true\">\r\n          <ttcol>Name</ttcol>\r\n          <ttcol>Code</ttcol>\r\n          <ttcol>Initial Value</ttcol>\r\n          <ttcol>Specification</ttcol>\r\n          <c>HEADER_TABLE_SIZE</c>\r\n          <c>0x1</c><c>4096</c><c><xref target=\"SettingValues\"/></c>\r\n          <c>ENABLE_PUSH</c>\r\n          <c>0x2</c><c>1</c><c><xref target=\"SettingValues\"/></c>\r\n          <c>MAX_CONCURRENT_STREAMS</c>\r\n          <c>0x3</c><c>(infinite)</c><c><xref target=\"SettingValues\"/></c>\r\n          <c>INITIAL_WINDOW_SIZE</c>\r\n          <c>0x4</c><c>65535</c><c><xref target=\"SettingValues\"/></c>\r\n          <c>MAX_FRAME_SIZE</c>\r\n          <c>0x5</c><c>16384</c><c><xref target=\"SettingValues\"/></c>\r\n          <c>MAX_HEADER_LIST_SIZE</c>\r\n          <c>0x6</c><c>(infinite)</c><c><xref target=\"SettingValues\"/></c>\r\n        </texttable>\r\n\r\n      </section>\r\n\r\n      <section anchor=\"iana-errors\" title=\"Error Code Registry\">\r\n        <t>\r\n          This document establishes a registry for HTTP/2 error codes.  The \"HTTP/2 Error Code\"\r\n          registry manages a 32-bit space.  The \"HTTP/2 Error Code\" registry operates under the\r\n          <xref target=\"RFC5226\">\"Expert Review\" policy</xref>.\r\n        </t>\r\n        <t>\r\n          Registrations for error codes are required to include a description of the error code.  An\r\n          expert reviewer is advised to examine new registrations for possible duplication with\r\n          existing error codes.  Use of existing registrations is to be encouraged, but not\r\n          mandated.\r\n        </t>\r\n        <t>\r\n          New registrations are advised to provide the following information:\r\n          <list style=\"hanging\">\r\n            <t hangText=\"Name:\">\r\n              A name for the error code.  Specifying an error code name is optional.\r\n            </t>\r\n            <t hangText=\"Code:\">\r\n              The 32-bit error code value.\r\n            </t>\r\n            <t hangText=\"Description:\">\r\n              A brief description of the error code semantics, longer if no detailed specification\r\n              is provided.\r\n            </t>\r\n            <t hangText=\"Specification:\">\r\n              An optional reference for a specification that defines the error code.\r\n            </t>\r\n          </list>\r\n        </t>\r\n        <t>\r\n          The entries in the following table are registered by this document.\r\n        </t>\r\n        <texttable align=\"left\" suppress-title=\"true\">\r\n          <ttcol>Name</ttcol>\r\n          <ttcol>Code</ttcol>\r\n          <ttcol>Description</ttcol>\r\n          <ttcol>Specification</ttcol>\r\n          <c>NO_ERROR</c><c>0x0</c>\r\n          <c>Graceful shutdown</c>\r\n          <c><xref target=\"ErrorCodes\"/></c>\r\n          <c>PROTOCOL_ERROR</c><c>0x1</c>\r\n          <c>Protocol error detected</c>\r\n          <c><xref target=\"ErrorCodes\"/></c>\r\n          <c>INTERNAL_ERROR</c><c>0x2</c>\r\n          <c>Implementation fault</c>\r\n          <c><xref target=\"ErrorCodes\"/></c>\r\n          <c>FLOW_CONTROL_ERROR</c><c>0x3</c>\r\n          <c>Flow control limits exceeded</c>\r\n          <c><xref target=\"ErrorCodes\"/></c>\r\n          <c>SETTINGS_TIMEOUT</c><c>0x4</c>\r\n          <c>Settings not acknowledged</c>\r\n          <c><xref target=\"ErrorCodes\"/></c>\r\n          <c>STREAM_CLOSED</c><c>0x5</c>\r\n          <c>Frame received for closed stream</c>\r\n          <c><xref target=\"ErrorCodes\"/></c>\r\n          <c>FRAME_SIZE_ERROR</c><c>0x6</c>\r\n          <c>Frame size incorrect</c>\r\n          <c><xref target=\"ErrorCodes\"/></c>\r\n          <c>REFUSED_STREAM</c><c>0x7</c>\r\n          <c>Stream not processed</c>\r\n          <c><xref target=\"ErrorCodes\"/></c>\r\n          <c>CANCEL</c><c>0x8</c>\r\n          <c>Stream cancelled</c>\r\n          <c><xref target=\"ErrorCodes\"/></c>\r\n          <c>COMPRESSION_ERROR</c><c>0x9</c>\r\n          <c>Compression state not updated</c>\r\n          <c><xref target=\"ErrorCodes\"/></c>\r\n          <c>CONNECT_ERROR</c><c>0xa</c>\r\n          <c>TCP connection error for CONNECT method</c>\r\n          <c><xref target=\"ErrorCodes\"/></c>\r\n          <c>ENHANCE_YOUR_CALM</c><c>0xb</c>\r\n          <c>Processing capacity exceeded</c>\r\n          <c><xref target=\"ErrorCodes\"/></c>\r\n          <c>INADEQUATE_SECURITY</c><c>0xc</c>\r\n          <c>Negotiated TLS parameters not acceptable</c>\r\n          <c><xref target=\"ErrorCodes\"/></c>\r\n        </texttable>\r\n\r\n      </section>\r\n\r\n      <section title=\"HTTP2-Settings Header Field Registration\">\r\n        <t>\r\n          This section registers the <spanx style=\"verb\">HTTP2-Settings</spanx> header field in the\r\n          <xref target=\"BCP90\">Permanent Message Header Field Registry</xref>.\r\n          <list style=\"hanging\">\r\n            <t hangText=\"Header field name:\">\r\n              HTTP2-Settings\r\n            </t>\r\n            <t hangText=\"Applicable protocol:\">\r\n              http\r\n            </t>\r\n            <t hangText=\"Status:\">\r\n              standard\r\n            </t>\r\n            <t hangText=\"Author/Change controller:\">\r\n              IETF\r\n            </t>\r\n            <t hangText=\"Specification document(s):\">\r\n              <xref target=\"Http2SettingsHeader\"/> of this document\r\n            </t>\r\n            <t hangText=\"Related information:\">\r\n              This header field is only used by an HTTP/2 client for Upgrade-based negotiation.\r\n            </t>\r\n          </list>\r\n        </t>\r\n      </section>\r\n\r\n      <section title=\"PRI Method Registration\">\r\n        <t>\r\n          This section registers the <spanx style=\"verb\">PRI</spanx> method in the HTTP Method\r\n          Registry (<xref target=\"RFC7231\" x:fmt=\",\" x:rel=\"#method.registry\"/>).\r\n          <list style=\"hanging\">\r\n            <t hangText=\"Method Name:\">\r\n              PRI\r\n            </t>\r\n            <t hangText=\"Safe\">\r\n              No\r\n            </t>\r\n            <t hangText=\"Idempotent\">\r\n              No\r\n            </t>\r\n            <t hangText=\"Specification document(s)\">\r\n              <xref target=\"ConnectionHeader\"/> of this document\r\n            </t>\r\n            <t hangText=\"Related information:\">\r\n              This method is never used by an actual client. This method will appear to be used\r\n              when an HTTP/1.1 server or intermediary attempts to parse an HTTP/2 connection\r\n              preface.\r\n            </t>\r\n          </list>\r\n        </t>\r\n      </section>\r\n\r\n      <section title=\"The 421 (Misdirected Request) HTTP Status Code\"\r\n               anchor=\"iana-MisdirectedRequest\">\r\n        <t>\r\n          This document registers the 421 (Misdirected Request) HTTP Status code in the Hypertext\r\n          Transfer Protocol (HTTP) Status Code Registry (<xref target=\"RFC7231\" x:fmt=\",\"\r\n          x:rel=\"#status.code.registry\"/>).\r\n        </t>\r\n        <t>\r\n          <list style=\"hanging\">\r\n            <t hangText=\"Status Code:\">\r\n              421\r\n            </t>\r\n            <t hangText=\"Short Description:\">\r\n              Misdirected Request\r\n            </t>\r\n            <t hangText=\"Specification:\">\r\n              <xref target=\"MisdirectedRequest\"/> of this document\r\n            </t>\r\n          </list>\r\n        </t>\r\n      </section>\r\n\r\n    </section>\r\n\r\n    <section title=\"Acknowledgements\">\r\n      <t>\r\n        This document includes substantial input from the following individuals:\r\n        <list style=\"symbols\">\r\n          <t>\r\n            Adam Langley, Wan-Teh Chang, Jim Morrison, Mark Nottingham, Alyssa Wilk, Costin\r\n            Manolache, William Chan, Vitaliy Lvin, Joe Chan, Adam Barth, Ryan Hamilton, Gavin\r\n            Peters, Kent Alstad, Kevin Lindsay, Paul Amer, Fan Yang, Jonathan Leighton (SPDY\r\n            contributors).\r\n          </t>\r\n          <t>\r\n            Gabriel Montenegro and Willy Tarreau (Upgrade mechanism).\r\n          </t>\r\n          <t>\r\n            William Chan, Salvatore Loreto, Osama Mazahir, Gabriel Montenegro, Jitu Padhye, Roberto\r\n            Peon, Rob Trace (Flow control).\r\n          </t>\r\n          <t>\r\n            Mike Bishop (Extensibility).\r\n          </t>\r\n          <t>\r\n            Mark Nottingham, Julian Reschke, James Snell, Jeff Pinner, Mike Bishop, Herve Ruellan\r\n            (Substantial editorial contributions).\r\n          </t>\r\n          <t>\r\n            Kari Hurtta, Tatsuhiro Tsujikawa, Greg Wilkins, Poul-Henning Kamp.\r\n          </t>\r\n          <t>\r\n            Alexey Melnikov was an editor of this document during 2013.\r\n          </t>\r\n          <t>\r\n            A substantial proportion of Martin's contribution was supported by Microsoft during his\r\n            employment there.\r\n          </t>\r\n        </list>\r\n      </t>\r\n    </section>\r\n  </middle>\r\n\r\n  <back>\r\n    <references title=\"Normative References\">\r\n      <reference anchor=\"COMPRESSION\">\r\n        <front>\r\n          <title>HPACK - Header Compression for HTTP/2</title>\r\n          <author initials=\"H.\" surname=\"Ruellan\" fullname=\"Herve Ruellan\"/>\r\n          <author initials=\"R.\" surname=\"Peon\" fullname=\"Roberto Peon\"/>\r\n          <date month=\"July\" year=\"2014\" />\r\n        </front>\r\n        <seriesInfo name=\"Internet-Draft\" value=\"draft-ietf-httpbis-header-compression-09\" />\r\n        <x:source href=\"refs/draft-ietf-httpbis-header-compression-09.xml\"/>\r\n      </reference>\r\n\r\n      <reference anchor=\"TCP\">\r\n        <front>\r\n          <title abbrev=\"Transmission Control Protocol\">\r\n            Transmission Control Protocol\r\n          </title>\r\n          <author initials=\"J.\" surname=\"Postel\" fullname=\"Jon Postel\">\r\n            <organization>University of Southern California (USC)/Information Sciences\r\n            Institute</organization>\r\n          </author>\r\n          <date year=\"1981\" month=\"September\" />\r\n        </front>\r\n        <seriesInfo name=\"STD\" value=\"7\" />\r\n        <seriesInfo name=\"RFC\" value=\"793\" />\r\n      </reference>\r\n\r\n      <reference anchor=\"RFC2119\">\r\n        <front>\r\n          <title>\r\n            Key words for use in RFCs to Indicate Requirement Levels\r\n          </title>\r\n          <author initials=\"S.\" surname=\"Bradner\" fullname=\"Scott Bradner\">\r\n            <organization>Harvard University</organization>\r\n            <address><email>sob@harvard.edu</email></address>\r\n          </author>\r\n          <date month=\"March\" year=\"1997\"/>\r\n        </front>\r\n        <seriesInfo name=\"BCP\" value=\"14\"/>\r\n        <seriesInfo name=\"RFC\" value=\"2119\"/>\r\n      </reference>\r\n\r\n     <reference anchor=\"RFC2818\">\r\n        <front>\r\n          <title>\r\n            HTTP Over TLS\r\n          </title>\r\n          <author initials=\"E.\" surname=\"Rescorla\" fullname=\"Eric Rescorla\"/>\r\n          <date month=\"May\" year=\"2000\"/>\r\n        </front>\r\n        <seriesInfo name=\"RFC\" value=\"2818\"/>\r\n      </reference>\r\n\r\n      <reference anchor=\"RFC3986\">\r\n        <front>\r\n          <title abbrev=\"URI Generic Syntax\">Uniform Resource Identifier (URI): Generic\r\n          Syntax</title>\r\n          <author initials=\"T.\" surname=\"Berners-Lee\" fullname=\"Tim Berners-Lee\"></author>\r\n          <author initials=\"R.\" surname=\"Fielding\" fullname=\"Roy T. Fielding\"></author>\r\n          <author initials=\"L.\" surname=\"Masinter\" fullname=\"Larry Masinter\"></author>\r\n          <date year=\"2005\" month=\"January\" />\r\n        </front>\r\n        <seriesInfo name=\"STD\" value=\"66\" />\r\n        <seriesInfo name=\"RFC\" value=\"3986\" />\r\n      </reference>\r\n\r\n      <reference anchor=\"RFC4648\">\r\n        <front>\r\n          <title>The Base16, Base32, and Base64 Data Encodings</title>\r\n          <author fullname=\"S. Josefsson\" initials=\"S.\" surname=\"Josefsson\"/>\r\n          <date year=\"2006\" month=\"October\"/>\r\n        </front>\r\n        <seriesInfo value=\"4648\" name=\"RFC\"/>\r\n      </reference>\r\n\r\n      <reference anchor=\"RFC5226\">\r\n        <front>\r\n          <title>Guidelines for Writing an IANA Considerations Section in RFCs</title>\r\n          <author initials=\"T.\" surname=\"Narten\" fullname=\"T. Narten\"/>\r\n          <author initials=\"H.\" surname=\"Alvestrand\" fullname=\"H. Alvestrand\"/>\r\n          <date year=\"2008\" month=\"May\" />\r\n        </front>\r\n        <seriesInfo name=\"BCP\" value=\"26\" />\r\n        <seriesInfo name=\"RFC\" value=\"5226\" />\r\n      </reference>\r\n\r\n      <reference anchor=\"RFC5234\">\r\n        <front>\r\n          <title>Augmented BNF for Syntax Specifications: ABNF</title>\r\n          <author initials=\"D.\" surname=\"Crocker\" fullname=\"D. Crocker\"/>\r\n          <author initials=\"P.\" surname=\"Overell\" fullname=\"P. Overell\"/>\r\n          <date year=\"2008\" month=\"January\" />\r\n        </front>\r\n        <seriesInfo name=\"STD\" value=\"68\" />\r\n        <seriesInfo name=\"RFC\" value=\"5234\" />\r\n      </reference>\r\n\r\n      <reference anchor=\"TLS12\">\r\n        <front>\r\n          <title>The Transport Layer Security (TLS) Protocol Version 1.2</title>\r\n          <author initials=\"T.\" surname=\"Dierks\" fullname=\"Tim Dierks\"/>\r\n          <author initials=\"E.\" surname=\"Rescorla\" fullname=\"Eric Rescorla\"/>\r\n          <date year=\"2008\" month=\"August\" />\r\n        </front>\r\n        <seriesInfo name=\"RFC\" value=\"5246\" />\r\n      </reference>\r\n\r\n      <reference anchor=\"TLS-EXT\">\r\n        <front>\r\n          <title>\r\n            Transport Layer Security (TLS) Extensions: Extension Definitions\r\n          </title>\r\n          <author initials=\"D.\" surname=\"Eastlake\" fullname=\"D. Eastlake\"/>\r\n          <date year=\"2011\" month=\"January\"/>\r\n        </front>\r\n        <seriesInfo name=\"RFC\" value=\"6066\"/>\r\n      </reference>\r\n\r\n      <reference anchor=\"TLS-ALPN\">\r\n        <front>\r\n          <title>Transport Layer Security (TLS) Application-Layer Protocol Negotiation Extension</title>\r\n          <author initials=\"S.\" surname=\"Friedl\" fullname=\"Stephan Friedl\"></author>\r\n          <author initials=\"A.\" surname=\"Popov\" fullname=\"Andrei Popov\"></author>\r\n          <author initials=\"A.\" surname=\"Langley\" fullname=\"Adam Langley\"></author>\r\n          <author initials=\"E.\" surname=\"Stephan\" fullname=\"Emile Stephan\"></author>\r\n          <date month=\"July\" year=\"2014\" />\r\n        </front>\r\n        <seriesInfo name=\"RFC\" value=\"7301\" />\r\n      </reference>\r\n\r\n      <reference anchor=\"TLS-ECDHE\">\r\n        <front>\r\n          <title>\r\n            TLS Elliptic Curve Cipher Suites with SHA-256/384 and AES Galois\r\n            Counter Mode (GCM)\r\n          </title>\r\n          <author initials=\"E.\" surname=\"Rescorla\" fullname=\"E. Rescorla\"/>\r\n          <date year=\"2008\" month=\"August\" />\r\n        </front>\r\n        <seriesInfo name=\"RFC\" value=\"5289\" />\r\n      </reference>\r\n\r\n      <reference anchor=\"FIPS186\">\r\n        <front>\r\n          <title>\r\n            Digital Signature Standard (DSS)\r\n          </title>\r\n          <author><organization>NIST</organization></author>\r\n          <date year=\"2013\" month=\"July\" />\r\n        </front>\r\n        <seriesInfo name=\"FIPS\" value=\"PUB 186-4\" />\r\n      </reference>\r\n\r\n      <reference anchor=\"RFC7230\">\r\n        <front>\r\n          <title>\r\n          Hypertext Transfer Protocol (HTTP/1.1): Message Syntax and Routing</title>\r\n          <author fullname=\"Roy T. Fielding\" initials=\"R.\" role=\"editor\" surname=\"Fielding\">\r\n            <organization abbrev=\"Adobe\">Adobe Systems Incorporated</organization>\r\n            <address><email>fielding@gbiv.com</email></address>\r\n          </author>\r\n          <author fullname=\"Julian F. Reschke\" initials=\"J. F.\" role=\"editor\" surname=\"Reschke\">\r\n            <organization abbrev=\"greenbytes\">greenbytes GmbH</organization>\r\n            <address><email>julian.reschke@greenbytes.de</email></address>\r\n          </author>\r\n          <date month=\"June\" year=\"2014\" />\r\n        </front>\r\n        <seriesInfo name=\"RFC\" value=\"7230\" />\r\n        <x:source href=\"refs/rfc7230.xml\"\r\n                  basename=\"https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230\"/>\r\n      </reference>\r\n      <reference anchor=\"RFC7231\">\r\n        <front>\r\n          <title>\r\n          Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content</title>\r\n          <author fullname=\"Roy T. Fielding\" initials=\"R.\" role=\"editor\" surname=\"Fielding\">\r\n            <organization abbrev=\"Adobe\">Adobe Systems Incorporated</organization>\r\n            <address><email>fielding@gbiv.com</email></address>\r\n          </author>\r\n          <author fullname=\"Julian F. Reschke\" initials=\"J. F.\" role=\"editor\" surname=\"Reschke\">\r\n            <organization abbrev=\"greenbytes\">greenbytes GmbH</organization>\r\n            <address><email>julian.reschke@greenbytes.de</email></address>\r\n          </author>\r\n          <date month=\"June\" year=\"2014\" />\r\n        </front>\r\n        <seriesInfo name=\"RFC\" value=\"7231\" />\r\n        <x:source href=\"refs/rfc7231.xml\"\r\n                  basename=\"https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7231\"/>\r\n      </reference>\r\n      <reference anchor=\"RFC7232\">\r\n        <front>\r\n          <title>Hypertext Transfer Protocol (HTTP/1.1): Conditional Requests</title>\r\n          <author fullname=\"Roy T. Fielding\" initials=\"R.\" role=\"editor\" surname=\"Fielding\">\r\n            <organization abbrev=\"Adobe\">Adobe Systems Incorporated</organization>\r\n            <address><email>fielding@gbiv.com</email></address>\r\n          </author>\r\n          <author fullname=\"Julian F. Reschke\" initials=\"J. F.\" role=\"editor\" surname=\"Reschke\">\r\n            <organization abbrev=\"greenbytes\">greenbytes GmbH</organization>\r\n            <address><email>julian.reschke@greenbytes.de</email></address>\r\n          </author>\r\n          <date month=\"June\" year=\"2014\" />\r\n        </front>\r\n        <seriesInfo name=\"RFC\" value=\"7232\" />\r\n      </reference>\r\n      <reference anchor=\"RFC7233\">\r\n        <front>\r\n          <title>Hypertext Transfer Protocol (HTTP/1.1): Range Requests</title>\r\n          <author initials=\"R.\" surname=\"Fielding\" fullname=\"Roy T. Fielding\" role=\"editor\">\r\n            <organization abbrev=\"Adobe\">Adobe Systems Incorporated</organization>\r\n            <address><email>fielding@gbiv.com</email></address>\r\n          </author>\r\n          <author initials=\"Y.\" surname=\"Lafon\" fullname=\"Yves Lafon\" role=\"editor\">\r\n            <organization abbrev=\"W3C\">World Wide Web Consortium</organization>\r\n            <address><email>ylafon@w3.org</email></address>\r\n          </author>\r\n          <author initials=\"J. F.\" surname=\"Reschke\" fullname=\"Julian F. Reschke\" role=\"editor\">\r\n            <organization abbrev=\"greenbytes\">greenbytes GmbH</organization>\r\n            <address><email>julian.reschke@greenbytes.de</email></address>\r\n          </author>\r\n          <date month=\"June\" year=\"2014\" />\r\n        </front>\r\n        <seriesInfo name=\"RFC\" value=\"7233\" />\r\n      </reference>\r\n      <reference anchor=\"RFC7234\">\r\n        <front>\r\n          <title>Hypertext Transfer Protocol (HTTP/1.1): Caching</title>\r\n          <author initials=\"R.\" surname=\"Fielding\" fullname=\"Roy T. Fielding\" role=\"editor\">\r\n            <organization abbrev=\"Adobe\">Adobe Systems Incorporated</organization>\r\n            <address><email>fielding@gbiv.com</email></address>\r\n          </author>\r\n          <author fullname=\"Mark Nottingham\" initials=\"M.\" role=\"editor\" surname=\"Nottingham\">\r\n            <organization>Akamai</organization>\r\n            <address><email>mnot@mnot.net</email></address>\r\n          </author>\r\n          <author initials=\"J. F.\" surname=\"Reschke\" fullname=\"Julian F. Reschke\" role=\"editor\">\r\n            <organization abbrev=\"greenbytes\">greenbytes GmbH</organization>\r\n            <address><email>julian.reschke@greenbytes.de</email></address>\r\n          </author>\r\n          <date month=\"June\" year=\"2014\" />\r\n        </front>\r\n        <seriesInfo name=\"RFC\" value=\"7234\"/>\r\n        <x:source href=\"refs/rfc7234.xml\"\r\n                  basename=\"https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7234\"/>\r\n      </reference>\r\n      <reference anchor=\"RFC7235\">\r\n        <front>\r\n          <title>Hypertext Transfer Protocol (HTTP/1.1): Authentication</title>\r\n          <author initials=\"R.\" surname=\"Fielding\" fullname=\"Roy T. Fielding\" role=\"editor\">\r\n            <organization abbrev=\"Adobe\">Adobe Systems Incorporated</organization>\r\n            <address><email>fielding@gbiv.com</email></address>\r\n          </author>\r\n          <author initials=\"J. F.\" surname=\"Reschke\" fullname=\"Julian F. Reschke\" role=\"editor\">\r\n            <organization abbrev=\"greenbytes\">greenbytes GmbH</organization>\r\n            <address><email>julian.reschke@greenbytes.de</email></address>\r\n          </author>\r\n          <date month=\"June\" year=\"2014\" />\r\n        </front>\r\n        <seriesInfo name=\"RFC\" value=\"7235\"/>\r\n        <x:source href=\"refs/rfc7235.xml\"\r\n                  basename=\"https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7235\"/>\r\n      </reference>\r\n\r\n      <reference anchor=\"COOKIE\">\r\n        <front>\r\n          <title>HTTP State Management Mechanism</title>\r\n          <author initials=\"A.\" surname=\"Barth\" fullname=\"A. Barth\"/>\r\n          <date year=\"2011\" month=\"April\" />\r\n        </front>\r\n        <seriesInfo name=\"RFC\" value=\"6265\" />\r\n      </reference>\r\n    </references>\r\n\r\n    <references title=\"Informative References\">\r\n      <reference anchor=\"RFC1323\">\r\n        <front>\r\n          <title>\r\n            TCP Extensions for High Performance\r\n          </title>\r\n          <author initials=\"V.\" surname=\"Jacobson\" fullname=\"Van Jacobson\"></author>\r\n          <author initials=\"B.\" surname=\"Braden\" fullname=\"Bob Braden\"></author>\r\n          <author initials=\"D.\" surname=\"Borman\" fullname=\"Dave Borman\"></author>\r\n          <date year=\"1992\" month=\"May\" />\r\n        </front>\r\n        <seriesInfo name=\"RFC\" value=\"1323\" />\r\n      </reference>\r\n\r\n      <reference anchor=\"RFC3749\">\r\n        <front>\r\n          <title>Transport Layer Security Protocol Compression Methods</title>\r\n          <author initials=\"S.\" surname=\"Hollenbeck\" fullname=\"S. Hollenbeck\"/>\r\n          <date year=\"2004\" month=\"May\" />\r\n        </front>\r\n        <seriesInfo name=\"RFC\" value=\"3749\" />\r\n      </reference>\r\n\r\n      <reference anchor=\"RFC6585\">\r\n        <front>\r\n          <title>Additional HTTP Status Codes</title>\r\n          <author initials=\"M.\" surname=\"Nottingham\" fullname=\"Mark Nottingham\"/>\r\n          <author initials=\"R.\" surname=\"Fielding\" fullname=\"Roy Fielding\"/>\r\n          <date year=\"2012\" month=\"April\" />\r\n        </front>\r\n        <seriesInfo name=\"RFC\" value=\"6585\" />\r\n      </reference>\r\n\r\n      <reference anchor=\"RFC4492\">\r\n        <front>\r\n          <title>\r\n            Elliptic Curve Cryptography (ECC) Cipher Suites for Transport Layer Security (TLS)\r\n          </title>\r\n          <author initials=\"S.\" surname=\"Blake-Wilson\" fullname=\"S. Blake-Wilson\"/>\r\n          <author initials=\"N.\" surname=\"Bolyard\" fullname=\"N. Bolyard\"/>\r\n          <author initials=\"V.\" surname=\"Gupta\" fullname=\"V. Gupta\"/>\r\n          <author initials=\"C.\" surname=\"Hawk\" fullname=\"C. Hawk\"/>\r\n          <author initials=\"B.\" surname=\"Moeller\" fullname=\"B. Moeller\"/>\r\n          <date year=\"2006\" month=\"May\" />\r\n        </front>\r\n        <seriesInfo name=\"RFC\" value=\"4492\" />\r\n      </reference>\r\n\r\n      <reference anchor=\"RFC5288\">\r\n        <front>\r\n          <title>\r\n            AES Galois Counter Mode (GCM) Cipher Suites for TLS\r\n          </title>\r\n          <author initials=\"J.\" surname=\"Salowey\" fullname=\"J. Salowey\"/>\r\n          <author initials=\"A.\" surname=\"Choudhury\" fullname=\"A. Choudhury\"/>\r\n          <author initials=\"D.\" surname=\"McGrew\" fullname=\"D. McGrew\"/>\r\n          <date year=\"2008\" month=\"August\" />\r\n        </front>\r\n        <seriesInfo name=\"RFC\" value=\"5288\" />\r\n      </reference>\r\n\r\n      <reference anchor='HTML5'\r\n           target='http://www.w3.org/TR/2014/CR-html5-20140731/'>\r\n        <front>\r\n          <title>HTML5</title>\r\n          <author fullname='Robin Berjon' surname='Berjon' initials='R.'/>\r\n          <author fullname='Steve Faulkner' surname='Faulkner' initials='S.'/>\r\n          <author fullname='Travis Leithead' surname='Leithead' initials='T.'/>\r\n          <author fullname='Erika Doyle Navara' surname='Doyle Navara' initials='E.'/>\r\n          <author fullname='Edward O&apos;Connor' surname='O&apos;Connor' initials='E.'/>\r\n          <author fullname='Silvia Pfeiffer' surname='Pfeiffer' initials='S.'/>\r\n          <date year='2014' month='July' day='31'/>\r\n        </front>\r\n        <seriesInfo name='W3C Candidate Recommendation' value='CR-html5-20140731'/>\r\n        <annotation>\r\n          Latest version available at\r\n          <eref target='http://www.w3.org/TR/html5/'/>.\r\n        </annotation>\r\n      </reference>\r\n\r\n      <reference anchor=\"TALKING\" target=\"http://w2spconf.com/2011/papers/websocket.pdf\">\r\n        <front>\r\n          <title>\r\n            Talking to Yourself for Fun and Profit\r\n          </title>\r\n          <author initials=\"L-S.\" surname=\"Huang\"/>\r\n          <author initials=\"E.\" surname=\"Chen\"/>\r\n          <author initials=\"A.\" surname=\"Barth\"/>\r\n          <author initials=\"E.\" surname=\"Rescorla\"/>\r\n          <author initials=\"C.\" surname=\"Jackson\"/>\r\n          <date year=\"2011\" />\r\n        </front>\r\n      </reference>\r\n\r\n      <reference anchor=\"BREACH\"\r\n                 target=\"http://breachattack.com/resources/BREACH%20-%20SSL,%20gone%20in%2030%20seconds.pdf\">\r\n        <front>\r\n          <title>\r\n            BREACH: Reviving the CRIME Attack\r\n          </title>\r\n          <author initials=\"Y.\" surname=\"Gluck\"/>\r\n          <author initials=\"N.\" surname=\"Harris\"/>\r\n          <author initials=\"A.\" surname=\"Prado\"/>\r\n          <date year=\"2013\" month=\"July\" day=\"12\"/>\r\n        </front>\r\n      </reference>\r\n\r\n      <reference anchor=\"BCP90\">\r\n        <front>\r\n          <title>Registration Procedures for Message Header Fields</title>\r\n          <author initials=\"G.\" surname=\"Klyne\" fullname=\"G. Klyne\">\r\n            <organization>Nine by Nine</organization>\r\n            <address><email>GK-IETF@ninebynine.org</email></address>\r\n          </author>\r\n          <author initials=\"M.\" surname=\"Nottingham\" fullname=\"M. Nottingham\">\r\n            <organization>BEA Systems</organization>\r\n            <address><email>mnot@pobox.com</email></address>\r\n          </author>\r\n          <author initials=\"J.\" surname=\"Mogul\" fullname=\"J. Mogul\">\r\n            <organization>HP Labs</organization>\r\n            <address><email>JeffMogul@acm.org</email></address>\r\n          </author>\r\n          <date year=\"2004\" month=\"September\" />\r\n        </front>\r\n        <seriesInfo name=\"BCP\" value=\"90\" />\r\n        <seriesInfo name=\"RFC\" value=\"3864\" />\r\n      </reference>\r\n\r\n      <reference anchor=\"TLSBCP\">\r\n        <front>\r\n          <title>Recommendations for Secure Use of TLS and DTLS</title>\r\n          <author initials=\"Y\" surname=\"Sheffer\" fullname=\"Yaron Sheffer\">\r\n            <organization />\r\n          </author>\r\n          <author initials=\"R\" surname=\"Holz\" fullname=\"Ralph Holz\">\r\n            <organization />\r\n          </author>\r\n          <author initials=\"P\" surname=\"Saint-Andre\" fullname=\"Peter Saint-Andre\">\r\n            <organization />\r\n          </author>\r\n          <date month=\"June\" day=\"23\" year=\"2014\" />\r\n        </front>\r\n        <seriesInfo name=\"Internet-Draft\" value=\"draft-ietf-uta-tls-bcp-01\" />\r\n      </reference>\r\n\r\n      <reference anchor=\"ALT-SVC\">\r\n        <front>\r\n          <title>\r\n            HTTP Alternative Services\r\n          </title>\r\n          <author initials=\"M.\" surname=\"Nottingham\" fullname=\"Mark Nottingham\">\r\n            <organization>Akamai</organization>\r\n          </author>\r\n          <author initials=\"P.\" surname=\"McManus\" fullname=\"Patrick McManus\">\r\n            <organization>Mozilla</organization>\r\n          </author>\r\n          <author initials=\"J.\" surname=\"Reschke\" fullname=\"Julian Reschke\">\r\n            <organization>greenbytes</organization>\r\n          </author>\r\n          <date year=\"2014\" month=\"April\"/>\r\n        </front>\r\n        <seriesInfo name=\"Internet-Draft\" value=\"draft-ietf-httpbis-alt-svc-02\"/>\r\n        <x:source href=\"refs/draft-ietf-httpbis-alt-svc-02.xml\"/>\r\n      </reference>\r\n    </references>\r\n\r\n    <section title=\"Change Log\" anchor=\"change.log\">\r\n      <t>\r\n        This section is to be removed by RFC Editor before publication.\r\n      </t>\r\n\r\n      <section title=\"Since draft-ietf-httpbis-http2-14\" anchor=\"changes.since.draft-ietf-httpbis-http2-14\">\r\n        <t>\r\n          Renamed Not Authoritative status code to Misdirected Request.\r\n        </t>\r\n      </section>\r\n\r\n      <section title=\"Since draft-ietf-httpbis-http2-13\" anchor=\"changes.since.draft-ietf-httpbis-http2-13\">\r\n        <t>\r\n          Pseudo-header fields are now required to appear strictly before regular ones.\r\n        </t>\r\n        <t>\r\n          Restored 1xx series status codes, except 101.\r\n        </t>\r\n        <t>\r\n          Changed frame length field 24-bits.  Expanded frame header to 9 octets.  Added a setting\r\n          to limit the damage.\r\n        </t>\r\n        <t>\r\n          Added a setting to advise peers of header set size limits.\r\n        </t>\r\n        <t>\r\n          Removed segments.\r\n        </t>\r\n        <t>\r\n          Made non-semantic-bearing <x:ref>HEADERS</x:ref> frames illegal in the HTTP mapping.\r\n        </t>\r\n      </section>\r\n\r\n       <section title=\"Since draft-ietf-httpbis-http2-12\" anchor=\"changes.since.draft-ietf-httpbis-http2-12\">\r\n         <t>\r\n           Restored extensibility options.\r\n         </t>\r\n         <t>\r\n           Restricting TLS cipher suites to AEAD only.\r\n         </t>\r\n         <t>\r\n           Removing Content-Encoding requirements.\r\n         </t>\r\n         <t>\r\n           Permitting the use of <x:ref>PRIORITY</x:ref> after stream close.\r\n         </t>\r\n         <t>\r\n           Removed ALTSVC frame.\r\n         </t>\r\n         <t>\r\n           Removed BLOCKED frame.\r\n         </t>\r\n         <t>\r\n           Reducing the maximum padding size to 256 octets; removing padding from\r\n           <x:ref>CONTINUATION</x:ref> frames.\r\n         </t>\r\n         <t>\r\n           Removed per-frame GZIP compression.\r\n         </t>\r\n       </section>\r\n\r\n       <section title=\"Since draft-ietf-httpbis-http2-11\" anchor=\"changes.since.draft-ietf-httpbis-http2-11\">\r\n         <t>\r\n           Added BLOCKED frame (at risk).\r\n         </t>\r\n         <t>\r\n           Simplified priority scheme.\r\n         </t>\r\n         <t>\r\n           Added <x:ref>DATA</x:ref> per-frame GZIP compression.\r\n         </t>\r\n       </section>\r\n\r\n       <section title=\"Since draft-ietf-httpbis-http2-10\" anchor=\"changes.since.draft-ietf-httpbis-http2-10\">\r\n        <t>\r\n          Changed \"connection header\" to \"connection preface\" to avoid confusion.\r\n        </t>\r\n        <t>\r\n          Added dependency-based stream prioritization.\r\n        </t>\r\n        <t>\r\n          Added \"h2c\" identifier to distinguish between cleartext and secured HTTP/2.\r\n        </t>\r\n        <t>\r\n          Adding missing padding to <x:ref>PUSH_PROMISE</x:ref>.\r\n        </t>\r\n        <t>\r\n          Integrate ALTSVC frame and supporting text.\r\n        </t>\r\n        <t>\r\n          Dropping requirement on \"deflate\" Content-Encoding.\r\n        </t>\r\n        <t>\r\n          Improving security considerations around use of compression.\r\n        </t>\r\n      </section>\r\n\r\n      <section title=\"Since draft-ietf-httpbis-http2-09\" anchor=\"changes.since.draft-ietf-httpbis-http2-09\">\r\n        <t>\r\n          Adding padding for data frames.\r\n        </t>\r\n        <t>\r\n          Renumbering frame types, error codes, and settings.\r\n        </t>\r\n        <t>\r\n          Adding INADEQUATE_SECURITY error code.\r\n        </t>\r\n        <t>\r\n          Updating TLS usage requirements to 1.2; forbidding TLS compression.\r\n        </t>\r\n        <t>\r\n          Removing extensibility for frames and settings.\r\n        </t>\r\n        <t>\r\n          Changing setting identifier size.\r\n        </t>\r\n        <t>\r\n          Removing the ability to disable flow control.\r\n        </t>\r\n        <t>\r\n          Changing the protocol identification token to \"h2\".\r\n        </t>\r\n        <t>\r\n          Changing the use of :authority to make it optional and to allow userinfo in non-HTTP\r\n          cases.\r\n        </t>\r\n        <t>\r\n          Allowing split on 0x0 for Cookie.\r\n        </t>\r\n        <t>\r\n          Reserved PRI method in HTTP/1.1 to avoid possible future collisions.\r\n        </t>\r\n      </section>\r\n\r\n      <section title=\"Since draft-ietf-httpbis-http2-08\" anchor=\"changes.since.draft-ietf-httpbis-http2-08\">\r\n        <t>\r\n          Added cookie crumbling for more efficient header compression.\r\n        </t>\r\n        <t>\r\n          Added header field ordering with the value-concatenation mechanism.\r\n        </t>\r\n      </section>\r\n\r\n      <section title=\"Since draft-ietf-httpbis-http2-07\" anchor=\"changes.since.draft-ietf-httpbis-http2-07\">\r\n        <t>\r\n          Marked draft for implementation.\r\n        </t>\r\n      </section>\r\n\r\n      <section title=\"Since draft-ietf-httpbis-http2-06\" anchor=\"changes.since.draft-ietf-httpbis-http2-06\">\r\n        <t>\r\n          Adding definition for CONNECT method.\r\n        </t>\r\n        <t>\r\n          Constraining the use of push to safe, cacheable methods with no request body.\r\n        </t>\r\n        <t>\r\n          Changing from :host to :authority to remove any potential confusion.\r\n        </t>\r\n        <t>\r\n          Adding setting for header compression table size.\r\n        </t>\r\n        <t>\r\n          Adding settings acknowledgement.\r\n        </t>\r\n        <t>\r\n          Removing unnecessary and potentially problematic flags from CONTINUATION.\r\n        </t>\r\n        <t>\r\n          Added denial of service considerations.\r\n        </t>\r\n      </section>\r\n      <section title=\"Since draft-ietf-httpbis-http2-05\" anchor=\"changes.since.draft-ietf-httpbis-http2-05\">\r\n        <t>\r\n          Marking the draft ready for implementation.\r\n        </t>\r\n        <t>\r\n          Renumbering END_PUSH_PROMISE flag.\r\n        </t>\r\n        <t>\r\n          Editorial clarifications and changes.\r\n        </t>\r\n      </section>\r\n\r\n      <section title=\"Since draft-ietf-httpbis-http2-04\" anchor=\"changes.since.draft-ietf-httpbis-http2-04\">\r\n        <t>\r\n          Added CONTINUATION frame for HEADERS and PUSH_PROMISE.\r\n        </t>\r\n        <t>\r\n          PUSH_PROMISE is no longer implicitly prohibited if SETTINGS_MAX_CONCURRENT_STREAMS is\r\n          zero.\r\n        </t>\r\n        <t>\r\n          Push expanded to allow all safe methods without a request body.\r\n        </t>\r\n        <t>\r\n          Clarified the use of HTTP header fields in requests and responses.  Prohibited HTTP/1.1\r\n          hop-by-hop header fields.\r\n        </t>\r\n        <t>\r\n          Requiring that intermediaries not forward requests with missing or illegal routing\r\n          :-headers.\r\n        </t>\r\n        <t>\r\n          Clarified requirements around handling different frames after stream close, stream reset\r\n          and <x:ref>GOAWAY</x:ref>.\r\n        </t>\r\n        <t>\r\n          Added more specific prohibitions for sending of different frame types in various stream\r\n          states.\r\n        </t>\r\n        <t>\r\n          Making the last received setting value the effective value.\r\n        </t>\r\n        <t>\r\n          Clarified requirements on TLS version, extension and ciphers.\r\n        </t>\r\n      </section>\r\n\r\n      <section title=\"Since draft-ietf-httpbis-http2-03\" anchor=\"changes.since.draft-ietf-httpbis-http2-03\">\r\n        <t>\r\n          Committed major restructuring atrocities.\r\n        </t>\r\n        <t>\r\n          Added reference to first header compression draft.\r\n        </t>\r\n        <t>\r\n          Added more formal description of frame lifecycle.\r\n        </t>\r\n        <t>\r\n          Moved END_STREAM (renamed from FINAL) back to <x:ref>HEADERS</x:ref>/<x:ref>DATA</x:ref>.\r\n        </t>\r\n        <t>\r\n          Removed HEADERS+PRIORITY, added optional priority to <x:ref>HEADERS</x:ref> frame.\r\n        </t>\r\n        <t>\r\n          Added <x:ref>PRIORITY</x:ref> frame.\r\n        </t>\r\n      </section>\r\n\r\n      <section title=\"Since draft-ietf-httpbis-http2-02\" anchor=\"changes.since.draft-ietf-httpbis-http2-02\">\r\n        <t>\r\n          Added continuations to frames carrying header blocks.\r\n        </t>\r\n        <t>\r\n          Replaced use of \"session\" with \"connection\" to avoid confusion with other HTTP stateful\r\n          concepts, like cookies.\r\n        </t>\r\n        <t>\r\n          Removed \"message\".\r\n        </t>\r\n        <t>\r\n          Switched to TLS ALPN from NPN.\r\n        </t>\r\n        <t>\r\n          Editorial changes.\r\n        </t>\r\n      </section>\r\n\r\n      <section title=\"Since draft-ietf-httpbis-http2-01\" anchor=\"changes.since.draft-ietf-httpbis-http2-01\">\r\n        <t>\r\n          Added IANA considerations section for frame types, error codes and settings.\r\n        </t>\r\n        <t>\r\n          Removed data frame compression.\r\n        </t>\r\n        <t>\r\n          Added <x:ref>PUSH_PROMISE</x:ref>.\r\n        </t>\r\n        <t>\r\n          Added globally applicable flags to framing.\r\n        </t>\r\n        <t>\r\n          Removed zlib-based header compression mechanism.\r\n        </t>\r\n        <t>\r\n          Updated references.\r\n        </t>\r\n        <t>\r\n          Clarified stream identifier reuse.\r\n        </t>\r\n        <t>\r\n          Removed CREDENTIALS frame and associated mechanisms.\r\n        </t>\r\n        <t>\r\n          Added advice against naive implementation of flow control.\r\n        </t>\r\n        <t>\r\n          Added session header section.\r\n        </t>\r\n        <t>\r\n          Restructured frame header.  Removed distinction between data and control frames.\r\n        </t>\r\n        <t>\r\n          Altered flow control properties to include session-level limits.\r\n        </t>\r\n        <t>\r\n          Added note on cacheability of pushed resources and multiple tenant servers.\r\n        </t>\r\n        <t>\r\n          Changed protocol label form based on discussions.\r\n        </t>\r\n      </section>\r\n\r\n      <section title=\"Since draft-ietf-httpbis-http2-00\" anchor=\"changes.since.draft-ietf-httpbis-http2-00\">\r\n        <t>\r\n          Changed title throughout.\r\n        </t>\r\n        <t>\r\n          Removed section on Incompatibilities with SPDY draft#2.\r\n        </t>\r\n        <t>\r\n          Changed <x:ref>INTERNAL_ERROR</x:ref> on <x:ref>GOAWAY</x:ref> to have a value of 2 <eref\r\n          target=\"https://groups.google.com/forum/?fromgroups#!topic/spdy-dev/cfUef2gL3iU\"/>.\r\n        </t>\r\n        <t>\r\n          Replaced abstract and introduction.\r\n        </t>\r\n        <t>\r\n          Added section on starting HTTP/2.0, including upgrade mechanism.\r\n        </t>\r\n        <t>\r\n          Removed unused references.\r\n        </t>\r\n        <t>\r\n          Added <xref target=\"fc-principles\">flow control principles</xref> based on <eref\r\n          target=\"https://tools.ietf.org/html/draft-montenegro-httpbis-http2-fc-principles-01\"/>.\r\n        </t>\r\n      </section>\r\n\r\n      <section title=\"Since draft-mbelshe-httpbis-spdy-00\" anchor=\"changes.since.draft-mbelshe-httpbis-spdy-00\">\r\n        <t>\r\n          Adopted as base for draft-ietf-httpbis-http2.\r\n        </t>\r\n        <t>\r\n          Updated authors/editors list.\r\n        </t>\r\n        <t>\r\n          Added status note.\r\n        </t>\r\n      </section>\r\n    </section>\r\n\r\n  </back>\r\n</rfc>\r\n<!--\r\n  vim:et:tw=100:sw=2:\r\n  -->\r\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/transport.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Transport code.\n\npackage http2\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"compress/gzip\"\n\t\"crypto/rand\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"math\"\n\tmathrand \"math/rand\"\n\t\"net\"\n\t\"net/http\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org/x/net/http2/hpack\"\n\t\"golang.org/x/net/idna\"\n\t\"golang.org/x/net/lex/httplex\"\n)\n\nconst (\n\t// transportDefaultConnFlow is how many connection-level flow control\n\t// tokens we give the server at start-up, past the default 64k.\n\ttransportDefaultConnFlow = 1 << 30\n\n\t// transportDefaultStreamFlow is how many stream-level flow\n\t// control tokens we announce to the peer, and how many bytes\n\t// we buffer per stream.\n\ttransportDefaultStreamFlow = 4 << 20\n\n\t// transportDefaultStreamMinRefresh is the minimum number of bytes we'll send\n\t// a stream-level WINDOW_UPDATE for at a time.\n\ttransportDefaultStreamMinRefresh = 4 << 10\n\n\tdefaultUserAgent = \"Go-http-client/2.0\"\n)\n\n// Transport is an HTTP/2 Transport.\n//\n// A Transport internally caches connections to servers. It is safe\n// for concurrent use by multiple goroutines.\ntype Transport struct {\n\t// DialTLS specifies an optional dial function for creating\n\t// TLS connections for requests.\n\t//\n\t// If DialTLS is nil, tls.Dial is used.\n\t//\n\t// If the returned net.Conn has a ConnectionState method like tls.Conn,\n\t// it will be used to set http.Response.TLS.\n\tDialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error)\n\n\t// TLSClientConfig specifies the TLS configuration to use with\n\t// tls.Client. If nil, the default configuration is used.\n\tTLSClientConfig *tls.Config\n\n\t// ConnPool optionally specifies an alternate connection pool to use.\n\t// If nil, the default is used.\n\tConnPool ClientConnPool\n\n\t// DisableCompression, if true, prevents the Transport from\n\t// requesting compression with an \"Accept-Encoding: gzip\"\n\t// request header when the Request contains no existing\n\t// Accept-Encoding value. If the Transport requests gzip on\n\t// its own and gets a gzipped response, it's transparently\n\t// decoded in the Response.Body. However, if the user\n\t// explicitly requested gzip it is not automatically\n\t// uncompressed.\n\tDisableCompression bool\n\n\t// AllowHTTP, if true, permits HTTP/2 requests using the insecure,\n\t// plain-text \"http\" scheme. Note that this does not enable h2c support.\n\tAllowHTTP bool\n\n\t// MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to\n\t// send in the initial settings frame. It is how many bytes\n\t// of response headers are allowed. Unlike the http2 spec, zero here\n\t// means to use a default limit (currently 10MB). If you actually\n\t// want to advertise an ulimited value to the peer, Transport\n\t// interprets the highest possible value here (0xffffffff or 1<<32-1)\n\t// to mean no limit.\n\tMaxHeaderListSize uint32\n\n\t// t1, if non-nil, is the standard library Transport using\n\t// this transport. Its settings are used (but not its\n\t// RoundTrip method, etc).\n\tt1 *http.Transport\n\n\tconnPoolOnce  sync.Once\n\tconnPoolOrDef ClientConnPool // non-nil version of ConnPool\n}\n\nfunc (t *Transport) maxHeaderListSize() uint32 {\n\tif t.MaxHeaderListSize == 0 {\n\t\treturn 10 << 20\n\t}\n\tif t.MaxHeaderListSize == 0xffffffff {\n\t\treturn 0\n\t}\n\treturn t.MaxHeaderListSize\n}\n\nfunc (t *Transport) disableCompression() bool {\n\treturn t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)\n}\n\nvar errTransportVersion = errors.New(\"http2: ConfigureTransport is only supported starting at Go 1.6\")\n\n// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.\n// It requires Go 1.6 or later and returns an error if the net/http package is too old\n// or if t1 has already been HTTP/2-enabled.\nfunc ConfigureTransport(t1 *http.Transport) error {\n\t_, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go\n\treturn err\n}\n\nfunc (t *Transport) connPool() ClientConnPool {\n\tt.connPoolOnce.Do(t.initConnPool)\n\treturn t.connPoolOrDef\n}\n\nfunc (t *Transport) initConnPool() {\n\tif t.ConnPool != nil {\n\t\tt.connPoolOrDef = t.ConnPool\n\t} else {\n\t\tt.connPoolOrDef = &clientConnPool{t: t}\n\t}\n}\n\n// ClientConn is the state of a single HTTP/2 client connection to an\n// HTTP/2 server.\ntype ClientConn struct {\n\tt         *Transport\n\ttconn     net.Conn             // usually *tls.Conn, except specialized impls\n\ttlsState  *tls.ConnectionState // nil only for specialized impls\n\tsingleUse bool                 // whether being used for a single http.Request\n\n\t// readLoop goroutine fields:\n\treaderDone chan struct{} // closed on error\n\treaderErr  error         // set before readerDone is closed\n\n\tidleTimeout time.Duration // or 0 for never\n\tidleTimer   *time.Timer\n\n\tmu              sync.Mutex // guards following\n\tcond            *sync.Cond // hold mu; broadcast on flow/closed changes\n\tflow            flow       // our conn-level flow control quota (cs.flow is per stream)\n\tinflow          flow       // peer's conn-level flow control\n\tclosed          bool\n\twantSettingsAck bool                     // we sent a SETTINGS frame and haven't heard back\n\tgoAway          *GoAwayFrame             // if non-nil, the GoAwayFrame we received\n\tgoAwayDebug     string                   // goAway frame's debug data, retained as a string\n\tstreams         map[uint32]*clientStream // client-initiated\n\tnextStreamID    uint32\n\tpendingRequests int                       // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams\n\tpings           map[[8]byte]chan struct{} // in flight ping data to notification channel\n\tbw              *bufio.Writer\n\tbr              *bufio.Reader\n\tfr              *Framer\n\tlastActive      time.Time\n\t// Settings from peer: (also guarded by mu)\n\tmaxFrameSize          uint32\n\tmaxConcurrentStreams  uint32\n\tpeerMaxHeaderListSize uint64\n\tinitialWindowSize     uint32\n\n\thbuf    bytes.Buffer // HPACK encoder writes into this\n\thenc    *hpack.Encoder\n\tfreeBuf [][]byte\n\n\twmu  sync.Mutex // held while writing; acquire AFTER mu if holding both\n\twerr error      // first write error that has occurred\n}\n\n// clientStream is the state for a single HTTP/2 stream. One of these\n// is created for each Transport.RoundTrip call.\ntype clientStream struct {\n\tcc            *ClientConn\n\treq           *http.Request\n\ttrace         *clientTrace // or nil\n\tID            uint32\n\tresc          chan resAndError\n\tbufPipe       pipe // buffered pipe with the flow-controlled response payload\n\tstartedWrite  bool // started request body write; guarded by cc.mu\n\trequestedGzip bool\n\ton100         func() // optional code to run if get a 100 continue response\n\n\tflow        flow  // guarded by cc.mu\n\tinflow      flow  // guarded by cc.mu\n\tbytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read\n\treadErr     error // sticky read error; owned by transportResponseBody.Read\n\tstopReqBody error // if non-nil, stop writing req body; guarded by cc.mu\n\tdidReset    bool  // whether we sent a RST_STREAM to the server; guarded by cc.mu\n\n\tpeerReset chan struct{} // closed on peer reset\n\tresetErr  error         // populated before peerReset is closed\n\n\tdone chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu\n\n\t// owned by clientConnReadLoop:\n\tfirstByte    bool // got the first response byte\n\tpastHeaders  bool // got first MetaHeadersFrame (actual headers)\n\tpastTrailers bool // got optional second MetaHeadersFrame (trailers)\n\n\ttrailer    http.Header  // accumulated trailers\n\tresTrailer *http.Header // client's Response.Trailer\n}\n\n// awaitRequestCancel waits for the user to cancel a request or for the done\n// channel to be signaled. A non-nil error is returned only if the request was\n// canceled.\nfunc awaitRequestCancel(req *http.Request, done <-chan struct{}) error {\n\tctx := reqContext(req)\n\tif req.Cancel == nil && ctx.Done() == nil {\n\t\treturn nil\n\t}\n\tselect {\n\tcase <-req.Cancel:\n\t\treturn errRequestCanceled\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tcase <-done:\n\t\treturn nil\n\t}\n}\n\n// awaitRequestCancel waits for the user to cancel a request, its context to\n// expire, or for the request to be done (any way it might be removed from the\n// cc.streams map: peer reset, successful completion, TCP connection breakage,\n// etc). If the request is canceled, then cs will be canceled and closed.\nfunc (cs *clientStream) awaitRequestCancel(req *http.Request) {\n\tif err := awaitRequestCancel(req, cs.done); err != nil {\n\t\tcs.cancelStream()\n\t\tcs.bufPipe.CloseWithError(err)\n\t}\n}\n\nfunc (cs *clientStream) cancelStream() {\n\tcc := cs.cc\n\tcc.mu.Lock()\n\tdidReset := cs.didReset\n\tcs.didReset = true\n\tcc.mu.Unlock()\n\n\tif !didReset {\n\t\tcc.writeStreamReset(cs.ID, ErrCodeCancel, nil)\n\t\tcc.forgetStreamID(cs.ID)\n\t}\n}\n\n// checkResetOrDone reports any error sent in a RST_STREAM frame by the\n// server, or errStreamClosed if the stream is complete.\nfunc (cs *clientStream) checkResetOrDone() error {\n\tselect {\n\tcase <-cs.peerReset:\n\t\treturn cs.resetErr\n\tcase <-cs.done:\n\t\treturn errStreamClosed\n\tdefault:\n\t\treturn nil\n\t}\n}\n\nfunc (cs *clientStream) abortRequestBodyWrite(err error) {\n\tif err == nil {\n\t\tpanic(\"nil error\")\n\t}\n\tcc := cs.cc\n\tcc.mu.Lock()\n\tcs.stopReqBody = err\n\tcc.cond.Broadcast()\n\tcc.mu.Unlock()\n}\n\ntype stickyErrWriter struct {\n\tw   io.Writer\n\terr *error\n}\n\nfunc (sew stickyErrWriter) Write(p []byte) (n int, err error) {\n\tif *sew.err != nil {\n\t\treturn 0, *sew.err\n\t}\n\tn, err = sew.w.Write(p)\n\t*sew.err = err\n\treturn\n}\n\nvar ErrNoCachedConn = errors.New(\"http2: no cached connection was available\")\n\n// RoundTripOpt are options for the Transport.RoundTripOpt method.\ntype RoundTripOpt struct {\n\t// OnlyCachedConn controls whether RoundTripOpt may\n\t// create a new TCP connection. If set true and\n\t// no cached connection is available, RoundTripOpt\n\t// will return ErrNoCachedConn.\n\tOnlyCachedConn bool\n}\n\nfunc (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {\n\treturn t.RoundTripOpt(req, RoundTripOpt{})\n}\n\n// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)\n// and returns a host:port. The port 443 is added if needed.\nfunc authorityAddr(scheme string, authority string) (addr string) {\n\thost, port, err := net.SplitHostPort(authority)\n\tif err != nil { // authority didn't have a port\n\t\tport = \"443\"\n\t\tif scheme == \"http\" {\n\t\t\tport = \"80\"\n\t\t}\n\t\thost = authority\n\t}\n\tif a, err := idna.ToASCII(host); err == nil {\n\t\thost = a\n\t}\n\t// IPv6 address literal, without a port:\n\tif strings.HasPrefix(host, \"[\") && strings.HasSuffix(host, \"]\") {\n\t\treturn host + \":\" + port\n\t}\n\treturn net.JoinHostPort(host, port)\n}\n\n// RoundTripOpt is like RoundTrip, but takes options.\nfunc (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {\n\tif !(req.URL.Scheme == \"https\" || (req.URL.Scheme == \"http\" && t.AllowHTTP)) {\n\t\treturn nil, errors.New(\"http2: unsupported scheme\")\n\t}\n\n\taddr := authorityAddr(req.URL.Scheme, req.URL.Host)\n\tfor retry := 0; ; retry++ {\n\t\tcc, err := t.connPool().GetClientConn(req, addr)\n\t\tif err != nil {\n\t\t\tt.vlogf(\"http2: Transport failed to get client conn for %s: %v\", addr, err)\n\t\t\treturn nil, err\n\t\t}\n\t\ttraceGotConn(req, cc)\n\t\tres, err := cc.RoundTrip(req)\n\t\tif err != nil && retry <= 6 {\n\t\t\tafterBodyWrite := false\n\t\t\tif e, ok := err.(afterReqBodyWriteError); ok {\n\t\t\t\terr = e\n\t\t\t\tafterBodyWrite = true\n\t\t\t}\n\t\t\tif req, err = shouldRetryRequest(req, err, afterBodyWrite); err == nil {\n\t\t\t\t// After the first retry, do exponential backoff with 10% jitter.\n\t\t\t\tif retry == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbackoff := float64(uint(1) << (uint(retry) - 1))\n\t\t\t\tbackoff += backoff * (0.1 * mathrand.Float64())\n\t\t\t\tselect {\n\t\t\t\tcase <-time.After(time.Second * time.Duration(backoff)):\n\t\t\t\t\tcontinue\n\t\t\t\tcase <-reqContext(req).Done():\n\t\t\t\t\treturn nil, reqContext(req).Err()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\tt.vlogf(\"RoundTrip failure: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\t\treturn res, nil\n\t}\n}\n\n// CloseIdleConnections closes any connections which were previously\n// connected from previous requests but are now sitting idle.\n// It does not interrupt any connections currently in use.\nfunc (t *Transport) CloseIdleConnections() {\n\tif cp, ok := t.connPool().(clientConnPoolIdleCloser); ok {\n\t\tcp.closeIdleConnections()\n\t}\n}\n\nvar (\n\terrClientConnClosed    = errors.New(\"http2: client conn is closed\")\n\terrClientConnUnusable  = errors.New(\"http2: client conn not usable\")\n\terrClientConnGotGoAway = errors.New(\"http2: Transport received Server's graceful shutdown GOAWAY\")\n)\n\n// afterReqBodyWriteError is a wrapper around errors returned by ClientConn.RoundTrip.\n// It is used to signal that err happened after part of Request.Body was sent to the server.\ntype afterReqBodyWriteError struct {\n\terr error\n}\n\nfunc (e afterReqBodyWriteError) Error() string {\n\treturn e.err.Error() + \"; some request body already written\"\n}\n\n// shouldRetryRequest is called by RoundTrip when a request fails to get\n// response headers. It is always called with a non-nil error.\n// It returns either a request to retry (either the same request, or a\n// modified clone), or an error if the request can't be replayed.\nfunc shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) {\n\tif !canRetryError(err) {\n\t\treturn nil, err\n\t}\n\tif !afterBodyWrite {\n\t\treturn req, nil\n\t}\n\t// If the Body is nil (or http.NoBody), it's safe to reuse\n\t// this request and its Body.\n\tif req.Body == nil || reqBodyIsNoBody(req.Body) {\n\t\treturn req, nil\n\t}\n\t// Otherwise we depend on the Request having its GetBody\n\t// func defined.\n\tgetBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody\n\tif getBody == nil {\n\t\treturn nil, fmt.Errorf(\"http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error\", err)\n\t}\n\tbody, err := getBody()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnewReq := *req\n\tnewReq.Body = body\n\treturn &newReq, nil\n}\n\nfunc canRetryError(err error) bool {\n\tif err == errClientConnUnusable || err == errClientConnGotGoAway {\n\t\treturn true\n\t}\n\tif se, ok := err.(StreamError); ok {\n\t\treturn se.Code == ErrCodeRefusedStream\n\t}\n\treturn false\n}\n\nfunc (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) {\n\thost, _, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttconn, err := t.dialTLS()(\"tcp\", addr, t.newTLSConfig(host))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn t.newClientConn(tconn, singleUse)\n}\n\nfunc (t *Transport) newTLSConfig(host string) *tls.Config {\n\tcfg := new(tls.Config)\n\tif t.TLSClientConfig != nil {\n\t\t*cfg = *cloneTLSConfig(t.TLSClientConfig)\n\t}\n\tif !strSliceContains(cfg.NextProtos, NextProtoTLS) {\n\t\tcfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...)\n\t}\n\tif cfg.ServerName == \"\" {\n\t\tcfg.ServerName = host\n\t}\n\treturn cfg\n}\n\nfunc (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) {\n\tif t.DialTLS != nil {\n\t\treturn t.DialTLS\n\t}\n\treturn t.dialTLSDefault\n}\n\nfunc (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) {\n\tcn, err := tls.Dial(network, addr, cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := cn.Handshake(); err != nil {\n\t\treturn nil, err\n\t}\n\tif !cfg.InsecureSkipVerify {\n\t\tif err := cn.VerifyHostname(cfg.ServerName); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tstate := cn.ConnectionState()\n\tif p := state.NegotiatedProtocol; p != NextProtoTLS {\n\t\treturn nil, fmt.Errorf(\"http2: unexpected ALPN protocol %q; want %q\", p, NextProtoTLS)\n\t}\n\tif !state.NegotiatedProtocolIsMutual {\n\t\treturn nil, errors.New(\"http2: could not negotiate protocol mutually\")\n\t}\n\treturn cn, nil\n}\n\n// disableKeepAlives reports whether connections should be closed as\n// soon as possible after handling the first request.\nfunc (t *Transport) disableKeepAlives() bool {\n\treturn t.t1 != nil && t.t1.DisableKeepAlives\n}\n\nfunc (t *Transport) expectContinueTimeout() time.Duration {\n\tif t.t1 == nil {\n\t\treturn 0\n\t}\n\treturn transportExpectContinueTimeout(t.t1)\n}\n\nfunc (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {\n\treturn t.newClientConn(c, false)\n}\n\nfunc (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {\n\tcc := &ClientConn{\n\t\tt:                     t,\n\t\ttconn:                 c,\n\t\treaderDone:            make(chan struct{}),\n\t\tnextStreamID:          1,\n\t\tmaxFrameSize:          16 << 10,           // spec default\n\t\tinitialWindowSize:     65535,              // spec default\n\t\tmaxConcurrentStreams:  1000,               // \"infinite\", per spec. 1000 seems good enough.\n\t\tpeerMaxHeaderListSize: 0xffffffffffffffff, // \"infinite\", per spec. Use 2^64-1 instead.\n\t\tstreams:               make(map[uint32]*clientStream),\n\t\tsingleUse:             singleUse,\n\t\twantSettingsAck:       true,\n\t\tpings:                 make(map[[8]byte]chan struct{}),\n\t}\n\tif d := t.idleConnTimeout(); d != 0 {\n\t\tcc.idleTimeout = d\n\t\tcc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)\n\t}\n\tif VerboseLogs {\n\t\tt.vlogf(\"http2: Transport creating client conn %p to %v\", cc, c.RemoteAddr())\n\t}\n\n\tcc.cond = sync.NewCond(&cc.mu)\n\tcc.flow.add(int32(initialWindowSize))\n\n\t// TODO: adjust this writer size to account for frame size +\n\t// MTU + crypto/tls record padding.\n\tcc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr})\n\tcc.br = bufio.NewReader(c)\n\tcc.fr = NewFramer(cc.bw, cc.br)\n\tcc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)\n\tcc.fr.MaxHeaderListSize = t.maxHeaderListSize()\n\n\t// TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on\n\t// henc in response to SETTINGS frames?\n\tcc.henc = hpack.NewEncoder(&cc.hbuf)\n\n\tif cs, ok := c.(connectionStater); ok {\n\t\tstate := cs.ConnectionState()\n\t\tcc.tlsState = &state\n\t}\n\n\tinitialSettings := []Setting{\n\t\t{ID: SettingEnablePush, Val: 0},\n\t\t{ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},\n\t}\n\tif max := t.maxHeaderListSize(); max != 0 {\n\t\tinitialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})\n\t}\n\n\tcc.bw.Write(clientPreface)\n\tcc.fr.WriteSettings(initialSettings...)\n\tcc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)\n\tcc.inflow.add(transportDefaultConnFlow + initialWindowSize)\n\tcc.bw.Flush()\n\tif cc.werr != nil {\n\t\treturn nil, cc.werr\n\t}\n\n\tgo cc.readLoop()\n\treturn cc, nil\n}\n\nfunc (cc *ClientConn) setGoAway(f *GoAwayFrame) {\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\n\told := cc.goAway\n\tcc.goAway = f\n\n\t// Merge the previous and current GoAway error frames.\n\tif cc.goAwayDebug == \"\" {\n\t\tcc.goAwayDebug = string(f.DebugData())\n\t}\n\tif old != nil && old.ErrCode != ErrCodeNo {\n\t\tcc.goAway.ErrCode = old.ErrCode\n\t}\n\tlast := f.LastStreamID\n\tfor streamID, cs := range cc.streams {\n\t\tif streamID > last {\n\t\t\tselect {\n\t\t\tcase cs.resc <- resAndError{err: errClientConnGotGoAway}:\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t}\n}\n\n// CanTakeNewRequest reports whether the connection can take a new request,\n// meaning it has not been closed or received or sent a GOAWAY.\nfunc (cc *ClientConn) CanTakeNewRequest() bool {\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\treturn cc.canTakeNewRequestLocked()\n}\n\nfunc (cc *ClientConn) canTakeNewRequestLocked() bool {\n\tif cc.singleUse && cc.nextStreamID > 1 {\n\t\treturn false\n\t}\n\treturn cc.goAway == nil && !cc.closed &&\n\t\tint64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32\n}\n\n// onIdleTimeout is called from a time.AfterFunc goroutine. It will\n// only be called when we're idle, but because we're coming from a new\n// goroutine, there could be a new request coming in at the same time,\n// so this simply calls the synchronized closeIfIdle to shut down this\n// connection. The timer could just call closeIfIdle, but this is more\n// clear.\nfunc (cc *ClientConn) onIdleTimeout() {\n\tcc.closeIfIdle()\n}\n\nfunc (cc *ClientConn) closeIfIdle() {\n\tcc.mu.Lock()\n\tif len(cc.streams) > 0 {\n\t\tcc.mu.Unlock()\n\t\treturn\n\t}\n\tcc.closed = true\n\tnextID := cc.nextStreamID\n\t// TODO: do clients send GOAWAY too? maybe? Just Close:\n\tcc.mu.Unlock()\n\n\tif VerboseLogs {\n\t\tcc.vlogf(\"http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)\", cc, cc.singleUse, nextID-2)\n\t}\n\tcc.tconn.Close()\n}\n\nconst maxAllocFrameSize = 512 << 10\n\n// frameBuffer returns a scratch buffer suitable for writing DATA frames.\n// They're capped at the min of the peer's max frame size or 512KB\n// (kinda arbitrarily), but definitely capped so we don't allocate 4GB\n// bufers.\nfunc (cc *ClientConn) frameScratchBuffer() []byte {\n\tcc.mu.Lock()\n\tsize := cc.maxFrameSize\n\tif size > maxAllocFrameSize {\n\t\tsize = maxAllocFrameSize\n\t}\n\tfor i, buf := range cc.freeBuf {\n\t\tif len(buf) >= int(size) {\n\t\t\tcc.freeBuf[i] = nil\n\t\t\tcc.mu.Unlock()\n\t\t\treturn buf[:size]\n\t\t}\n\t}\n\tcc.mu.Unlock()\n\treturn make([]byte, size)\n}\n\nfunc (cc *ClientConn) putFrameScratchBuffer(buf []byte) {\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\tconst maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate.\n\tif len(cc.freeBuf) < maxBufs {\n\t\tcc.freeBuf = append(cc.freeBuf, buf)\n\t\treturn\n\t}\n\tfor i, old := range cc.freeBuf {\n\t\tif old == nil {\n\t\t\tcc.freeBuf[i] = buf\n\t\t\treturn\n\t\t}\n\t}\n\t// forget about it.\n}\n\n// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not\n// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.\nvar errRequestCanceled = errors.New(\"net/http: request canceled\")\n\nfunc commaSeparatedTrailers(req *http.Request) (string, error) {\n\tkeys := make([]string, 0, len(req.Trailer))\n\tfor k := range req.Trailer {\n\t\tk = http.CanonicalHeaderKey(k)\n\t\tswitch k {\n\t\tcase \"Transfer-Encoding\", \"Trailer\", \"Content-Length\":\n\t\t\treturn \"\", &badStringError{\"invalid Trailer key\", k}\n\t\t}\n\t\tkeys = append(keys, k)\n\t}\n\tif len(keys) > 0 {\n\t\tsort.Strings(keys)\n\t\treturn strings.Join(keys, \",\"), nil\n\t}\n\treturn \"\", nil\n}\n\nfunc (cc *ClientConn) responseHeaderTimeout() time.Duration {\n\tif cc.t.t1 != nil {\n\t\treturn cc.t.t1.ResponseHeaderTimeout\n\t}\n\t// No way to do this (yet?) with just an http2.Transport. Probably\n\t// no need. Request.Cancel this is the new way. We only need to support\n\t// this for compatibility with the old http.Transport fields when\n\t// we're doing transparent http2.\n\treturn 0\n}\n\n// checkConnHeaders checks whether req has any invalid connection-level headers.\n// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.\n// Certain headers are special-cased as okay but not transmitted later.\nfunc checkConnHeaders(req *http.Request) error {\n\tif v := req.Header.Get(\"Upgrade\"); v != \"\" {\n\t\treturn fmt.Errorf(\"http2: invalid Upgrade request header: %q\", req.Header[\"Upgrade\"])\n\t}\n\tif vv := req.Header[\"Transfer-Encoding\"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != \"\" && vv[0] != \"chunked\") {\n\t\treturn fmt.Errorf(\"http2: invalid Transfer-Encoding request header: %q\", vv)\n\t}\n\tif vv := req.Header[\"Connection\"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != \"\" && vv[0] != \"close\" && vv[0] != \"keep-alive\") {\n\t\treturn fmt.Errorf(\"http2: invalid Connection request header: %q\", vv)\n\t}\n\treturn nil\n}\n\n// actualContentLength returns a sanitized version of\n// req.ContentLength, where 0 actually means zero (not unknown) and -1\n// means unknown.\nfunc actualContentLength(req *http.Request) int64 {\n\tif req.Body == nil || reqBodyIsNoBody(req.Body) {\n\t\treturn 0\n\t}\n\tif req.ContentLength != 0 {\n\t\treturn req.ContentLength\n\t}\n\treturn -1\n}\n\nfunc (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {\n\tif err := checkConnHeaders(req); err != nil {\n\t\treturn nil, err\n\t}\n\tif cc.idleTimer != nil {\n\t\tcc.idleTimer.Stop()\n\t}\n\n\ttrailers, err := commaSeparatedTrailers(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thasTrailers := trailers != \"\"\n\n\tcc.mu.Lock()\n\tif err := cc.awaitOpenSlotForRequest(req); err != nil {\n\t\tcc.mu.Unlock()\n\t\treturn nil, err\n\t}\n\n\tbody := req.Body\n\tcontentLen := actualContentLength(req)\n\thasBody := contentLen != 0\n\n\t// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?\n\tvar requestedGzip bool\n\tif !cc.t.disableCompression() &&\n\t\treq.Header.Get(\"Accept-Encoding\") == \"\" &&\n\t\treq.Header.Get(\"Range\") == \"\" &&\n\t\treq.Method != \"HEAD\" {\n\t\t// Request gzip only, not deflate. Deflate is ambiguous and\n\t\t// not as universally supported anyway.\n\t\t// See: http://www.gzip.org/zlib/zlib_faq.html#faq38\n\t\t//\n\t\t// Note that we don't request this for HEAD requests,\n\t\t// due to a bug in nginx:\n\t\t//   http://trac.nginx.org/nginx/ticket/358\n\t\t//   https://golang.org/issue/5522\n\t\t//\n\t\t// We don't request gzip if the request is for a range, since\n\t\t// auto-decoding a portion of a gzipped document will just fail\n\t\t// anyway. See https://golang.org/issue/8923\n\t\trequestedGzip = true\n\t}\n\n\t// we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is\n\t// sent by writeRequestBody below, along with any Trailers,\n\t// again in form HEADERS{1}, CONTINUATION{0,})\n\thdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen)\n\tif err != nil {\n\t\tcc.mu.Unlock()\n\t\treturn nil, err\n\t}\n\n\tcs := cc.newStream()\n\tcs.req = req\n\tcs.trace = requestTrace(req)\n\tcs.requestedGzip = requestedGzip\n\tbodyWriter := cc.t.getBodyWriterState(cs, body)\n\tcs.on100 = bodyWriter.on100\n\n\tcc.wmu.Lock()\n\tendStream := !hasBody && !hasTrailers\n\twerr := cc.writeHeaders(cs.ID, endStream, hdrs)\n\tcc.wmu.Unlock()\n\ttraceWroteHeaders(cs.trace)\n\tcc.mu.Unlock()\n\n\tif werr != nil {\n\t\tif hasBody {\n\t\t\treq.Body.Close() // per RoundTripper contract\n\t\t\tbodyWriter.cancel()\n\t\t}\n\t\tcc.forgetStreamID(cs.ID)\n\t\t// Don't bother sending a RST_STREAM (our write already failed;\n\t\t// no need to keep writing)\n\t\ttraceWroteRequest(cs.trace, werr)\n\t\treturn nil, werr\n\t}\n\n\tvar respHeaderTimer <-chan time.Time\n\tif hasBody {\n\t\tbodyWriter.scheduleBodyWrite()\n\t} else {\n\t\ttraceWroteRequest(cs.trace, nil)\n\t\tif d := cc.responseHeaderTimeout(); d != 0 {\n\t\t\ttimer := time.NewTimer(d)\n\t\t\tdefer timer.Stop()\n\t\t\trespHeaderTimer = timer.C\n\t\t}\n\t}\n\n\treadLoopResCh := cs.resc\n\tbodyWritten := false\n\tctx := reqContext(req)\n\n\thandleReadLoopResponse := func(re resAndError) (*http.Response, error) {\n\t\tres := re.res\n\t\tif re.err != nil || res.StatusCode > 299 {\n\t\t\t// On error or status code 3xx, 4xx, 5xx, etc abort any\n\t\t\t// ongoing write, assuming that the server doesn't care\n\t\t\t// about our request body. If the server replied with 1xx or\n\t\t\t// 2xx, however, then assume the server DOES potentially\n\t\t\t// want our body (e.g. full-duplex streaming:\n\t\t\t// golang.org/issue/13444). If it turns out the server\n\t\t\t// doesn't, they'll RST_STREAM us soon enough. This is a\n\t\t\t// heuristic to avoid adding knobs to Transport. Hopefully\n\t\t\t// we can keep it.\n\t\t\tbodyWriter.cancel()\n\t\t\tcs.abortRequestBodyWrite(errStopReqBodyWrite)\n\t\t}\n\t\tif re.err != nil {\n\t\t\tcc.mu.Lock()\n\t\t\tafterBodyWrite := cs.startedWrite\n\t\t\tcc.mu.Unlock()\n\t\t\tcc.forgetStreamID(cs.ID)\n\t\t\tif afterBodyWrite {\n\t\t\t\treturn nil, afterReqBodyWriteError{re.err}\n\t\t\t}\n\t\t\treturn nil, re.err\n\t\t}\n\t\tres.Request = req\n\t\tres.TLS = cc.tlsState\n\t\treturn res, nil\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase re := <-readLoopResCh:\n\t\t\treturn handleReadLoopResponse(re)\n\t\tcase <-respHeaderTimer:\n\t\t\tif !hasBody || bodyWritten {\n\t\t\t\tcc.writeStreamReset(cs.ID, ErrCodeCancel, nil)\n\t\t\t} else {\n\t\t\t\tbodyWriter.cancel()\n\t\t\t\tcs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)\n\t\t\t}\n\t\t\tcc.forgetStreamID(cs.ID)\n\t\t\treturn nil, errTimeout\n\t\tcase <-ctx.Done():\n\t\t\tif !hasBody || bodyWritten {\n\t\t\t\tcc.writeStreamReset(cs.ID, ErrCodeCancel, nil)\n\t\t\t} else {\n\t\t\t\tbodyWriter.cancel()\n\t\t\t\tcs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)\n\t\t\t}\n\t\t\tcc.forgetStreamID(cs.ID)\n\t\t\treturn nil, ctx.Err()\n\t\tcase <-req.Cancel:\n\t\t\tif !hasBody || bodyWritten {\n\t\t\t\tcc.writeStreamReset(cs.ID, ErrCodeCancel, nil)\n\t\t\t} else {\n\t\t\t\tbodyWriter.cancel()\n\t\t\t\tcs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)\n\t\t\t}\n\t\t\tcc.forgetStreamID(cs.ID)\n\t\t\treturn nil, errRequestCanceled\n\t\tcase <-cs.peerReset:\n\t\t\t// processResetStream already removed the\n\t\t\t// stream from the streams map; no need for\n\t\t\t// forgetStreamID.\n\t\t\treturn nil, cs.resetErr\n\t\tcase err := <-bodyWriter.resc:\n\t\t\t// Prefer the read loop's response, if available. Issue 16102.\n\t\t\tselect {\n\t\t\tcase re := <-readLoopResCh:\n\t\t\t\treturn handleReadLoopResponse(re)\n\t\t\tdefault:\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbodyWritten = true\n\t\t\tif d := cc.responseHeaderTimeout(); d != 0 {\n\t\t\t\ttimer := time.NewTimer(d)\n\t\t\t\tdefer timer.Stop()\n\t\t\t\trespHeaderTimer = timer.C\n\t\t\t}\n\t\t}\n\t}\n}\n\n// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams.\n// Must hold cc.mu.\nfunc (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error {\n\tvar waitingForConn chan struct{}\n\tvar waitingForConnErr error // guarded by cc.mu\n\tfor {\n\t\tcc.lastActive = time.Now()\n\t\tif cc.closed || !cc.canTakeNewRequestLocked() {\n\t\t\treturn errClientConnUnusable\n\t\t}\n\t\tif int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) {\n\t\t\tif waitingForConn != nil {\n\t\t\t\tclose(waitingForConn)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\t// Unfortunately, we cannot wait on a condition variable and channel at\n\t\t// the same time, so instead, we spin up a goroutine to check if the\n\t\t// request is canceled while we wait for a slot to open in the connection.\n\t\tif waitingForConn == nil {\n\t\t\twaitingForConn = make(chan struct{})\n\t\t\tgo func() {\n\t\t\t\tif err := awaitRequestCancel(req, waitingForConn); err != nil {\n\t\t\t\t\tcc.mu.Lock()\n\t\t\t\t\twaitingForConnErr = err\n\t\t\t\t\tcc.cond.Broadcast()\n\t\t\t\t\tcc.mu.Unlock()\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t\tcc.pendingRequests++\n\t\tcc.cond.Wait()\n\t\tcc.pendingRequests--\n\t\tif waitingForConnErr != nil {\n\t\t\treturn waitingForConnErr\n\t\t}\n\t}\n}\n\n// requires cc.wmu be held\nfunc (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error {\n\tfirst := true // first frame written (HEADERS is first, then CONTINUATION)\n\tframeSize := int(cc.maxFrameSize)\n\tfor len(hdrs) > 0 && cc.werr == nil {\n\t\tchunk := hdrs\n\t\tif len(chunk) > frameSize {\n\t\t\tchunk = chunk[:frameSize]\n\t\t}\n\t\thdrs = hdrs[len(chunk):]\n\t\tendHeaders := len(hdrs) == 0\n\t\tif first {\n\t\t\tcc.fr.WriteHeaders(HeadersFrameParam{\n\t\t\t\tStreamID:      streamID,\n\t\t\t\tBlockFragment: chunk,\n\t\t\t\tEndStream:     endStream,\n\t\t\t\tEndHeaders:    endHeaders,\n\t\t\t})\n\t\t\tfirst = false\n\t\t} else {\n\t\t\tcc.fr.WriteContinuation(streamID, endHeaders, chunk)\n\t\t}\n\t}\n\t// TODO(bradfitz): this Flush could potentially block (as\n\t// could the WriteHeaders call(s) above), which means they\n\t// wouldn't respond to Request.Cancel being readable. That's\n\t// rare, but this should probably be in a goroutine.\n\tcc.bw.Flush()\n\treturn cc.werr\n}\n\n// internal error values; they don't escape to callers\nvar (\n\t// abort request body write; don't send cancel\n\terrStopReqBodyWrite = errors.New(\"http2: aborting request body write\")\n\n\t// abort request body write, but send stream reset of cancel.\n\terrStopReqBodyWriteAndCancel = errors.New(\"http2: canceling request\")\n)\n\nfunc (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {\n\tcc := cs.cc\n\tsentEnd := false // whether we sent the final DATA frame w/ END_STREAM\n\tbuf := cc.frameScratchBuffer()\n\tdefer cc.putFrameScratchBuffer(buf)\n\n\tdefer func() {\n\t\ttraceWroteRequest(cs.trace, err)\n\t\t// TODO: write h12Compare test showing whether\n\t\t// Request.Body is closed by the Transport,\n\t\t// and in multiple cases: server replies <=299 and >299\n\t\t// while still writing request body\n\t\tcerr := bodyCloser.Close()\n\t\tif err == nil {\n\t\t\terr = cerr\n\t\t}\n\t}()\n\n\treq := cs.req\n\thasTrailers := req.Trailer != nil\n\n\tvar sawEOF bool\n\tfor !sawEOF {\n\t\tn, err := body.Read(buf)\n\t\tif err == io.EOF {\n\t\t\tsawEOF = true\n\t\t\terr = nil\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tremain := buf[:n]\n\t\tfor len(remain) > 0 && err == nil {\n\t\t\tvar allowed int32\n\t\t\tallowed, err = cs.awaitFlowControl(len(remain))\n\t\t\tswitch {\n\t\t\tcase err == errStopReqBodyWrite:\n\t\t\t\treturn err\n\t\t\tcase err == errStopReqBodyWriteAndCancel:\n\t\t\t\tcc.writeStreamReset(cs.ID, ErrCodeCancel, nil)\n\t\t\t\treturn err\n\t\t\tcase err != nil:\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcc.wmu.Lock()\n\t\t\tdata := remain[:allowed]\n\t\t\tremain = remain[allowed:]\n\t\t\tsentEnd = sawEOF && len(remain) == 0 && !hasTrailers\n\t\t\terr = cc.fr.WriteData(cs.ID, sentEnd, data)\n\t\t\tif err == nil {\n\t\t\t\t// TODO(bradfitz): this flush is for latency, not bandwidth.\n\t\t\t\t// Most requests won't need this. Make this opt-in or\n\t\t\t\t// opt-out?  Use some heuristic on the body type? Nagel-like\n\t\t\t\t// timers?  Based on 'n'? Only last chunk of this for loop,\n\t\t\t\t// unless flow control tokens are low? For now, always.\n\t\t\t\t// If we change this, see comment below.\n\t\t\t\terr = cc.bw.Flush()\n\t\t\t}\n\t\t\tcc.wmu.Unlock()\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif sentEnd {\n\t\t// Already sent END_STREAM (which implies we have no\n\t\t// trailers) and flushed, because currently all\n\t\t// WriteData frames above get a flush. So we're done.\n\t\treturn nil\n\t}\n\n\tvar trls []byte\n\tif hasTrailers {\n\t\tcc.mu.Lock()\n\t\ttrls, err = cc.encodeTrailers(req)\n\t\tcc.mu.Unlock()\n\t\tif err != nil {\n\t\t\tcc.writeStreamReset(cs.ID, ErrCodeInternal, err)\n\t\t\tcc.forgetStreamID(cs.ID)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tcc.wmu.Lock()\n\tdefer cc.wmu.Unlock()\n\n\t// Two ways to send END_STREAM: either with trailers, or\n\t// with an empty DATA frame.\n\tif len(trls) > 0 {\n\t\terr = cc.writeHeaders(cs.ID, true, trls)\n\t} else {\n\t\terr = cc.fr.WriteData(cs.ID, true, nil)\n\t}\n\tif ferr := cc.bw.Flush(); ferr != nil && err == nil {\n\t\terr = ferr\n\t}\n\treturn err\n}\n\n// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow\n// control tokens from the server.\n// It returns either the non-zero number of tokens taken or an error\n// if the stream is dead.\nfunc (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) {\n\tcc := cs.cc\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\tfor {\n\t\tif cc.closed {\n\t\t\treturn 0, errClientConnClosed\n\t\t}\n\t\tif cs.stopReqBody != nil {\n\t\t\treturn 0, cs.stopReqBody\n\t\t}\n\t\tif err := cs.checkResetOrDone(); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tif a := cs.flow.available(); a > 0 {\n\t\t\ttake := a\n\t\t\tif int(take) > maxBytes {\n\n\t\t\t\ttake = int32(maxBytes) // can't truncate int; take is int32\n\t\t\t}\n\t\t\tif take > int32(cc.maxFrameSize) {\n\t\t\t\ttake = int32(cc.maxFrameSize)\n\t\t\t}\n\t\t\tcs.flow.take(take)\n\t\t\treturn take, nil\n\t\t}\n\t\tcc.cond.Wait()\n\t}\n}\n\ntype badStringError struct {\n\twhat string\n\tstr  string\n}\n\nfunc (e *badStringError) Error() string { return fmt.Sprintf(\"%s %q\", e.what, e.str) }\n\n// requires cc.mu be held.\nfunc (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {\n\tcc.hbuf.Reset()\n\n\thost := req.Host\n\tif host == \"\" {\n\t\thost = req.URL.Host\n\t}\n\thost, err := httplex.PunycodeHostPort(host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar path string\n\tif req.Method != \"CONNECT\" {\n\t\tpath = req.URL.RequestURI()\n\t\tif !validPseudoPath(path) {\n\t\t\torig := path\n\t\t\tpath = strings.TrimPrefix(path, req.URL.Scheme+\"://\"+host)\n\t\t\tif !validPseudoPath(path) {\n\t\t\t\tif req.URL.Opaque != \"\" {\n\t\t\t\t\treturn nil, fmt.Errorf(\"invalid request :path %q from URL.Opaque = %q\", orig, req.URL.Opaque)\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, fmt.Errorf(\"invalid request :path %q\", orig)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check for any invalid headers and return an error before we\n\t// potentially pollute our hpack state. (We want to be able to\n\t// continue to reuse the hpack encoder for future requests)\n\tfor k, vv := range req.Header {\n\t\tif !httplex.ValidHeaderFieldName(k) {\n\t\t\treturn nil, fmt.Errorf(\"invalid HTTP header name %q\", k)\n\t\t}\n\t\tfor _, v := range vv {\n\t\t\tif !httplex.ValidHeaderFieldValue(v) {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid HTTP header value %q for header %q\", v, k)\n\t\t\t}\n\t\t}\n\t}\n\n\tenumerateHeaders := func(f func(name, value string)) {\n\t\t// 8.1.2.3 Request Pseudo-Header Fields\n\t\t// The :path pseudo-header field includes the path and query parts of the\n\t\t// target URI (the path-absolute production and optionally a '?' character\n\t\t// followed by the query production (see Sections 3.3 and 3.4 of\n\t\t// [RFC3986]).\n\t\tf(\":authority\", host)\n\t\tf(\":method\", req.Method)\n\t\tif req.Method != \"CONNECT\" {\n\t\t\tf(\":path\", path)\n\t\t\tf(\":scheme\", req.URL.Scheme)\n\t\t}\n\t\tif trailers != \"\" {\n\t\t\tf(\"trailer\", trailers)\n\t\t}\n\n\t\tvar didUA bool\n\t\tfor k, vv := range req.Header {\n\t\t\tif strings.EqualFold(k, \"host\") || strings.EqualFold(k, \"content-length\") {\n\t\t\t\t// Host is :authority, already sent.\n\t\t\t\t// Content-Length is automatic, set below.\n\t\t\t\tcontinue\n\t\t\t} else if strings.EqualFold(k, \"connection\") || strings.EqualFold(k, \"proxy-connection\") ||\n\t\t\t\tstrings.EqualFold(k, \"transfer-encoding\") || strings.EqualFold(k, \"upgrade\") ||\n\t\t\t\tstrings.EqualFold(k, \"keep-alive\") {\n\t\t\t\t// Per 8.1.2.2 Connection-Specific Header\n\t\t\t\t// Fields, don't send connection-specific\n\t\t\t\t// fields. We have already checked if any\n\t\t\t\t// are error-worthy so just ignore the rest.\n\t\t\t\tcontinue\n\t\t\t} else if strings.EqualFold(k, \"user-agent\") {\n\t\t\t\t// Match Go's http1 behavior: at most one\n\t\t\t\t// User-Agent. If set to nil or empty string,\n\t\t\t\t// then omit it. Otherwise if not mentioned,\n\t\t\t\t// include the default (below).\n\t\t\t\tdidUA = true\n\t\t\t\tif len(vv) < 1 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tvv = vv[:1]\n\t\t\t\tif vv[0] == \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tfor _, v := range vv {\n\t\t\t\tf(k, v)\n\t\t\t}\n\t\t}\n\t\tif shouldSendReqContentLength(req.Method, contentLength) {\n\t\t\tf(\"content-length\", strconv.FormatInt(contentLength, 10))\n\t\t}\n\t\tif addGzipHeader {\n\t\t\tf(\"accept-encoding\", \"gzip\")\n\t\t}\n\t\tif !didUA {\n\t\t\tf(\"user-agent\", defaultUserAgent)\n\t\t}\n\t}\n\n\t// Do a first pass over the headers counting bytes to ensure\n\t// we don't exceed cc.peerMaxHeaderListSize. This is done as a\n\t// separate pass before encoding the headers to prevent\n\t// modifying the hpack state.\n\thlSize := uint64(0)\n\tenumerateHeaders(func(name, value string) {\n\t\thf := hpack.HeaderField{Name: name, Value: value}\n\t\thlSize += uint64(hf.Size())\n\t})\n\n\tif hlSize > cc.peerMaxHeaderListSize {\n\t\treturn nil, errRequestHeaderListSize\n\t}\n\n\t// Header list size is ok. Write the headers.\n\tenumerateHeaders(func(name, value string) {\n\t\tcc.writeHeader(strings.ToLower(name), value)\n\t})\n\n\treturn cc.hbuf.Bytes(), nil\n}\n\n// shouldSendReqContentLength reports whether the http2.Transport should send\n// a \"content-length\" request header. This logic is basically a copy of the net/http\n// transferWriter.shouldSendContentLength.\n// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).\n// -1 means unknown.\nfunc shouldSendReqContentLength(method string, contentLength int64) bool {\n\tif contentLength > 0 {\n\t\treturn true\n\t}\n\tif contentLength < 0 {\n\t\treturn false\n\t}\n\t// For zero bodies, whether we send a content-length depends on the method.\n\t// It also kinda doesn't matter for http2 either way, with END_STREAM.\n\tswitch method {\n\tcase \"POST\", \"PUT\", \"PATCH\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\n// requires cc.mu be held.\nfunc (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) {\n\tcc.hbuf.Reset()\n\n\thlSize := uint64(0)\n\tfor k, vv := range req.Trailer {\n\t\tfor _, v := range vv {\n\t\t\thf := hpack.HeaderField{Name: k, Value: v}\n\t\t\thlSize += uint64(hf.Size())\n\t\t}\n\t}\n\tif hlSize > cc.peerMaxHeaderListSize {\n\t\treturn nil, errRequestHeaderListSize\n\t}\n\n\tfor k, vv := range req.Trailer {\n\t\t// Transfer-Encoding, etc.. have already been filtered at the\n\t\t// start of RoundTrip\n\t\tlowKey := strings.ToLower(k)\n\t\tfor _, v := range vv {\n\t\t\tcc.writeHeader(lowKey, v)\n\t\t}\n\t}\n\treturn cc.hbuf.Bytes(), nil\n}\n\nfunc (cc *ClientConn) writeHeader(name, value string) {\n\tif VerboseLogs {\n\t\tlog.Printf(\"http2: Transport encoding header %q = %q\", name, value)\n\t}\n\tcc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})\n}\n\ntype resAndError struct {\n\tres *http.Response\n\terr error\n}\n\n// requires cc.mu be held.\nfunc (cc *ClientConn) newStream() *clientStream {\n\tcs := &clientStream{\n\t\tcc:        cc,\n\t\tID:        cc.nextStreamID,\n\t\tresc:      make(chan resAndError, 1),\n\t\tpeerReset: make(chan struct{}),\n\t\tdone:      make(chan struct{}),\n\t}\n\tcs.flow.add(int32(cc.initialWindowSize))\n\tcs.flow.setConnFlow(&cc.flow)\n\tcs.inflow.add(transportDefaultStreamFlow)\n\tcs.inflow.setConnFlow(&cc.inflow)\n\tcc.nextStreamID += 2\n\tcc.streams[cs.ID] = cs\n\treturn cs\n}\n\nfunc (cc *ClientConn) forgetStreamID(id uint32) {\n\tcc.streamByID(id, true)\n}\n\nfunc (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\tcs := cc.streams[id]\n\tif andRemove && cs != nil && !cc.closed {\n\t\tcc.lastActive = time.Now()\n\t\tdelete(cc.streams, id)\n\t\tif len(cc.streams) == 0 && cc.idleTimer != nil {\n\t\t\tcc.idleTimer.Reset(cc.idleTimeout)\n\t\t}\n\t\tclose(cs.done)\n\t\t// Wake up checkResetOrDone via clientStream.awaitFlowControl and\n\t\t// wake up RoundTrip if there is a pending request.\n\t\tcc.cond.Broadcast()\n\t}\n\treturn cs\n}\n\n// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.\ntype clientConnReadLoop struct {\n\tcc            *ClientConn\n\tactiveRes     map[uint32]*clientStream // keyed by streamID\n\tcloseWhenIdle bool\n}\n\n// readLoop runs in its own goroutine and reads and dispatches frames.\nfunc (cc *ClientConn) readLoop() {\n\trl := &clientConnReadLoop{\n\t\tcc:        cc,\n\t\tactiveRes: make(map[uint32]*clientStream),\n\t}\n\n\tdefer rl.cleanup()\n\tcc.readerErr = rl.run()\n\tif ce, ok := cc.readerErr.(ConnectionError); ok {\n\t\tcc.wmu.Lock()\n\t\tcc.fr.WriteGoAway(0, ErrCode(ce), nil)\n\t\tcc.wmu.Unlock()\n\t}\n}\n\n// GoAwayError is returned by the Transport when the server closes the\n// TCP connection after sending a GOAWAY frame.\ntype GoAwayError struct {\n\tLastStreamID uint32\n\tErrCode      ErrCode\n\tDebugData    string\n}\n\nfunc (e GoAwayError) Error() string {\n\treturn fmt.Sprintf(\"http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q\",\n\t\te.LastStreamID, e.ErrCode, e.DebugData)\n}\n\nfunc isEOFOrNetReadError(err error) bool {\n\tif err == io.EOF {\n\t\treturn true\n\t}\n\tne, ok := err.(*net.OpError)\n\treturn ok && ne.Op == \"read\"\n}\n\nfunc (rl *clientConnReadLoop) cleanup() {\n\tcc := rl.cc\n\tdefer cc.tconn.Close()\n\tdefer cc.t.connPool().MarkDead(cc)\n\tdefer close(cc.readerDone)\n\n\tif cc.idleTimer != nil {\n\t\tcc.idleTimer.Stop()\n\t}\n\n\t// Close any response bodies if the server closes prematurely.\n\t// TODO: also do this if we've written the headers but not\n\t// gotten a response yet.\n\terr := cc.readerErr\n\tcc.mu.Lock()\n\tif cc.goAway != nil && isEOFOrNetReadError(err) {\n\t\terr = GoAwayError{\n\t\t\tLastStreamID: cc.goAway.LastStreamID,\n\t\t\tErrCode:      cc.goAway.ErrCode,\n\t\t\tDebugData:    cc.goAwayDebug,\n\t\t}\n\t} else if err == io.EOF {\n\t\terr = io.ErrUnexpectedEOF\n\t}\n\tfor _, cs := range rl.activeRes {\n\t\tcs.bufPipe.CloseWithError(err)\n\t}\n\tfor _, cs := range cc.streams {\n\t\tselect {\n\t\tcase cs.resc <- resAndError{err: err}:\n\t\tdefault:\n\t\t}\n\t\tclose(cs.done)\n\t}\n\tcc.closed = true\n\tcc.cond.Broadcast()\n\tcc.mu.Unlock()\n}\n\nfunc (rl *clientConnReadLoop) run() error {\n\tcc := rl.cc\n\trl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse\n\tgotReply := false // ever saw a HEADERS reply\n\tgotSettings := false\n\tfor {\n\t\tf, err := cc.fr.ReadFrame()\n\t\tif err != nil {\n\t\t\tcc.vlogf(\"http2: Transport readFrame error on conn %p: (%T) %v\", cc, err, err)\n\t\t}\n\t\tif se, ok := err.(StreamError); ok {\n\t\t\tif cs := cc.streamByID(se.StreamID, false); cs != nil {\n\t\t\t\tcs.cc.writeStreamReset(cs.ID, se.Code, err)\n\t\t\t\tcs.cc.forgetStreamID(cs.ID)\n\t\t\t\tif se.Cause == nil {\n\t\t\t\t\tse.Cause = cc.fr.errDetail\n\t\t\t\t}\n\t\t\t\trl.endStreamError(cs, se)\n\t\t\t}\n\t\t\tcontinue\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif VerboseLogs {\n\t\t\tcc.vlogf(\"http2: Transport received %s\", summarizeFrame(f))\n\t\t}\n\t\tif !gotSettings {\n\t\t\tif _, ok := f.(*SettingsFrame); !ok {\n\t\t\t\tcc.logf(\"protocol error: received %T before a SETTINGS frame\", f)\n\t\t\t\treturn ConnectionError(ErrCodeProtocol)\n\t\t\t}\n\t\t\tgotSettings = true\n\t\t}\n\t\tmaybeIdle := false // whether frame might transition us to idle\n\n\t\tswitch f := f.(type) {\n\t\tcase *MetaHeadersFrame:\n\t\t\terr = rl.processHeaders(f)\n\t\t\tmaybeIdle = true\n\t\t\tgotReply = true\n\t\tcase *DataFrame:\n\t\t\terr = rl.processData(f)\n\t\t\tmaybeIdle = true\n\t\tcase *GoAwayFrame:\n\t\t\terr = rl.processGoAway(f)\n\t\t\tmaybeIdle = true\n\t\tcase *RSTStreamFrame:\n\t\t\terr = rl.processResetStream(f)\n\t\t\tmaybeIdle = true\n\t\tcase *SettingsFrame:\n\t\t\terr = rl.processSettings(f)\n\t\tcase *PushPromiseFrame:\n\t\t\terr = rl.processPushPromise(f)\n\t\tcase *WindowUpdateFrame:\n\t\t\terr = rl.processWindowUpdate(f)\n\t\tcase *PingFrame:\n\t\t\terr = rl.processPing(f)\n\t\tdefault:\n\t\t\tcc.logf(\"Transport: unhandled response frame type %T\", f)\n\t\t}\n\t\tif err != nil {\n\t\t\tif VerboseLogs {\n\t\t\t\tcc.vlogf(\"http2: Transport conn %p received error from processing frame %v: %v\", cc, summarizeFrame(f), err)\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tif rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 {\n\t\t\tcc.closeIfIdle()\n\t\t}\n\t}\n}\n\nfunc (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {\n\tcc := rl.cc\n\tif f.StreamEnded() {\n\t\t// Issue 20521: If the stream has ended, streamByID() causes\n\t\t// clientStream.done to be closed, which causes the request's bodyWriter\n\t\t// to be closed with an errStreamClosed, which may be received by\n\t\t// clientConn.RoundTrip before the result of processing these headers.\n\t\t// Deferring stream closure allows the header processing to occur first.\n\t\t// clientConn.RoundTrip may still receive the bodyWriter error first, but\n\t\t// the fix for issue 16102 prioritises any response.\n\t\tdefer cc.streamByID(f.StreamID, true)\n\t}\n\tcs := cc.streamByID(f.StreamID, false)\n\tif cs == nil {\n\t\t// We'd get here if we canceled a request while the\n\t\t// server had its response still in flight. So if this\n\t\t// was just something we canceled, ignore it.\n\t\treturn nil\n\t}\n\tif !cs.firstByte {\n\t\tif cs.trace != nil {\n\t\t\t// TODO(bradfitz): move first response byte earlier,\n\t\t\t// when we first read the 9 byte header, not waiting\n\t\t\t// until all the HEADERS+CONTINUATION frames have been\n\t\t\t// merged. This works for now.\n\t\t\ttraceFirstResponseByte(cs.trace)\n\t\t}\n\t\tcs.firstByte = true\n\t}\n\tif !cs.pastHeaders {\n\t\tcs.pastHeaders = true\n\t} else {\n\t\treturn rl.processTrailers(cs, f)\n\t}\n\n\tres, err := rl.handleResponse(cs, f)\n\tif err != nil {\n\t\tif _, ok := err.(ConnectionError); ok {\n\t\t\treturn err\n\t\t}\n\t\t// Any other error type is a stream error.\n\t\tcs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err)\n\t\tcs.resc <- resAndError{err: err}\n\t\treturn nil // return nil from process* funcs to keep conn alive\n\t}\n\tif res == nil {\n\t\t// (nil, nil) special case. See handleResponse docs.\n\t\treturn nil\n\t}\n\tif res.Body != noBody {\n\t\trl.activeRes[cs.ID] = cs\n\t}\n\tcs.resTrailer = &res.Trailer\n\tcs.resc <- resAndError{res: res}\n\treturn nil\n}\n\n// may return error types nil, or ConnectionError. Any other error value\n// is a StreamError of type ErrCodeProtocol. The returned error in that case\n// is the detail.\n//\n// As a special case, handleResponse may return (nil, nil) to skip the\n// frame (currently only used for 100 expect continue). This special\n// case is going away after Issue 13851 is fixed.\nfunc (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) {\n\tif f.Truncated {\n\t\treturn nil, errResponseHeaderListSize\n\t}\n\n\tstatus := f.PseudoValue(\"status\")\n\tif status == \"\" {\n\t\treturn nil, errors.New(\"missing status pseudo header\")\n\t}\n\tstatusCode, err := strconv.Atoi(status)\n\tif err != nil {\n\t\treturn nil, errors.New(\"malformed non-numeric status pseudo header\")\n\t}\n\n\tif statusCode == 100 {\n\t\ttraceGot100Continue(cs.trace)\n\t\tif cs.on100 != nil {\n\t\t\tcs.on100() // forces any write delay timer to fire\n\t\t}\n\t\tcs.pastHeaders = false // do it all again\n\t\treturn nil, nil\n\t}\n\n\theader := make(http.Header)\n\tres := &http.Response{\n\t\tProto:      \"HTTP/2.0\",\n\t\tProtoMajor: 2,\n\t\tHeader:     header,\n\t\tStatusCode: statusCode,\n\t\tStatus:     status + \" \" + http.StatusText(statusCode),\n\t}\n\tfor _, hf := range f.RegularFields() {\n\t\tkey := http.CanonicalHeaderKey(hf.Name)\n\t\tif key == \"Trailer\" {\n\t\t\tt := res.Trailer\n\t\t\tif t == nil {\n\t\t\t\tt = make(http.Header)\n\t\t\t\tres.Trailer = t\n\t\t\t}\n\t\t\tforeachHeaderElement(hf.Value, func(v string) {\n\t\t\t\tt[http.CanonicalHeaderKey(v)] = nil\n\t\t\t})\n\t\t} else {\n\t\t\theader[key] = append(header[key], hf.Value)\n\t\t}\n\t}\n\n\tstreamEnded := f.StreamEnded()\n\tisHead := cs.req.Method == \"HEAD\"\n\tif !streamEnded || isHead {\n\t\tres.ContentLength = -1\n\t\tif clens := res.Header[\"Content-Length\"]; len(clens) == 1 {\n\t\t\tif clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil {\n\t\t\t\tres.ContentLength = clen64\n\t\t\t} else {\n\t\t\t\t// TODO: care? unlike http/1, it won't mess up our framing, so it's\n\t\t\t\t// more safe smuggling-wise to ignore.\n\t\t\t}\n\t\t} else if len(clens) > 1 {\n\t\t\t// TODO: care? unlike http/1, it won't mess up our framing, so it's\n\t\t\t// more safe smuggling-wise to ignore.\n\t\t}\n\t}\n\n\tif streamEnded || isHead {\n\t\tres.Body = noBody\n\t\treturn res, nil\n\t}\n\n\tcs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}}\n\tcs.bytesRemain = res.ContentLength\n\tres.Body = transportResponseBody{cs}\n\tgo cs.awaitRequestCancel(cs.req)\n\n\tif cs.requestedGzip && res.Header.Get(\"Content-Encoding\") == \"gzip\" {\n\t\tres.Header.Del(\"Content-Encoding\")\n\t\tres.Header.Del(\"Content-Length\")\n\t\tres.ContentLength = -1\n\t\tres.Body = &gzipReader{body: res.Body}\n\t\tsetResponseUncompressed(res)\n\t}\n\treturn res, nil\n}\n\nfunc (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error {\n\tif cs.pastTrailers {\n\t\t// Too many HEADERS frames for this stream.\n\t\treturn ConnectionError(ErrCodeProtocol)\n\t}\n\tcs.pastTrailers = true\n\tif !f.StreamEnded() {\n\t\t// We expect that any headers for trailers also\n\t\t// has END_STREAM.\n\t\treturn ConnectionError(ErrCodeProtocol)\n\t}\n\tif len(f.PseudoFields()) > 0 {\n\t\t// No pseudo header fields are defined for trailers.\n\t\t// TODO: ConnectionError might be overly harsh? Check.\n\t\treturn ConnectionError(ErrCodeProtocol)\n\t}\n\n\ttrailer := make(http.Header)\n\tfor _, hf := range f.RegularFields() {\n\t\tkey := http.CanonicalHeaderKey(hf.Name)\n\t\ttrailer[key] = append(trailer[key], hf.Value)\n\t}\n\tcs.trailer = trailer\n\n\trl.endStream(cs)\n\treturn nil\n}\n\n// transportResponseBody is the concrete type of Transport.RoundTrip's\n// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body.\n// On Close it sends RST_STREAM if EOF wasn't already seen.\ntype transportResponseBody struct {\n\tcs *clientStream\n}\n\nfunc (b transportResponseBody) Read(p []byte) (n int, err error) {\n\tcs := b.cs\n\tcc := cs.cc\n\n\tif cs.readErr != nil {\n\t\treturn 0, cs.readErr\n\t}\n\tn, err = b.cs.bufPipe.Read(p)\n\tif cs.bytesRemain != -1 {\n\t\tif int64(n) > cs.bytesRemain {\n\t\t\tn = int(cs.bytesRemain)\n\t\t\tif err == nil {\n\t\t\t\terr = errors.New(\"net/http: server replied with more than declared Content-Length; truncated\")\n\t\t\t\tcc.writeStreamReset(cs.ID, ErrCodeProtocol, err)\n\t\t\t}\n\t\t\tcs.readErr = err\n\t\t\treturn int(cs.bytesRemain), err\n\t\t}\n\t\tcs.bytesRemain -= int64(n)\n\t\tif err == io.EOF && cs.bytesRemain > 0 {\n\t\t\terr = io.ErrUnexpectedEOF\n\t\t\tcs.readErr = err\n\t\t\treturn n, err\n\t\t}\n\t}\n\tif n == 0 {\n\t\t// No flow control tokens to send back.\n\t\treturn\n\t}\n\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\n\tvar connAdd, streamAdd int32\n\t// Check the conn-level first, before the stream-level.\n\tif v := cc.inflow.available(); v < transportDefaultConnFlow/2 {\n\t\tconnAdd = transportDefaultConnFlow - v\n\t\tcc.inflow.add(connAdd)\n\t}\n\tif err == nil { // No need to refresh if the stream is over or failed.\n\t\t// Consider any buffered body data (read from the conn but not\n\t\t// consumed by the client) when computing flow control for this\n\t\t// stream.\n\t\tv := int(cs.inflow.available()) + cs.bufPipe.Len()\n\t\tif v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh {\n\t\t\tstreamAdd = int32(transportDefaultStreamFlow - v)\n\t\t\tcs.inflow.add(streamAdd)\n\t\t}\n\t}\n\tif connAdd != 0 || streamAdd != 0 {\n\t\tcc.wmu.Lock()\n\t\tdefer cc.wmu.Unlock()\n\t\tif connAdd != 0 {\n\t\t\tcc.fr.WriteWindowUpdate(0, mustUint31(connAdd))\n\t\t}\n\t\tif streamAdd != 0 {\n\t\t\tcc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd))\n\t\t}\n\t\tcc.bw.Flush()\n\t}\n\treturn\n}\n\nvar errClosedResponseBody = errors.New(\"http2: response body closed\")\n\nfunc (b transportResponseBody) Close() error {\n\tcs := b.cs\n\tcc := cs.cc\n\n\tserverSentStreamEnd := cs.bufPipe.Err() == io.EOF\n\tunread := cs.bufPipe.Len()\n\n\tif unread > 0 || !serverSentStreamEnd {\n\t\tcc.mu.Lock()\n\t\tcc.wmu.Lock()\n\t\tif !serverSentStreamEnd {\n\t\t\tcc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)\n\t\t\tcs.didReset = true\n\t\t}\n\t\t// Return connection-level flow control.\n\t\tif unread > 0 {\n\t\t\tcc.inflow.add(int32(unread))\n\t\t\tcc.fr.WriteWindowUpdate(0, uint32(unread))\n\t\t}\n\t\tcc.bw.Flush()\n\t\tcc.wmu.Unlock()\n\t\tcc.mu.Unlock()\n\t}\n\n\tcs.bufPipe.BreakWithError(errClosedResponseBody)\n\tcc.forgetStreamID(cs.ID)\n\treturn nil\n}\n\nfunc (rl *clientConnReadLoop) processData(f *DataFrame) error {\n\tcc := rl.cc\n\tcs := cc.streamByID(f.StreamID, f.StreamEnded())\n\tdata := f.Data()\n\tif cs == nil {\n\t\tcc.mu.Lock()\n\t\tneverSent := cc.nextStreamID\n\t\tcc.mu.Unlock()\n\t\tif f.StreamID >= neverSent {\n\t\t\t// We never asked for this.\n\t\t\tcc.logf(\"http2: Transport received unsolicited DATA frame; closing connection\")\n\t\t\treturn ConnectionError(ErrCodeProtocol)\n\t\t}\n\t\t// We probably did ask for this, but canceled. Just ignore it.\n\t\t// TODO: be stricter here? only silently ignore things which\n\t\t// we canceled, but not things which were closed normally\n\t\t// by the peer? Tough without accumulating too much state.\n\n\t\t// But at least return their flow control:\n\t\tif f.Length > 0 {\n\t\t\tcc.mu.Lock()\n\t\t\tcc.inflow.add(int32(f.Length))\n\t\t\tcc.mu.Unlock()\n\n\t\t\tcc.wmu.Lock()\n\t\t\tcc.fr.WriteWindowUpdate(0, uint32(f.Length))\n\t\t\tcc.bw.Flush()\n\t\t\tcc.wmu.Unlock()\n\t\t}\n\t\treturn nil\n\t}\n\tif !cs.firstByte {\n\t\tcc.logf(\"protocol error: received DATA before a HEADERS frame\")\n\t\trl.endStreamError(cs, StreamError{\n\t\t\tStreamID: f.StreamID,\n\t\t\tCode:     ErrCodeProtocol,\n\t\t})\n\t\treturn nil\n\t}\n\tif f.Length > 0 {\n\t\tif cs.req.Method == \"HEAD\" && len(data) > 0 {\n\t\t\tcc.logf(\"protocol error: received DATA on a HEAD request\")\n\t\t\trl.endStreamError(cs, StreamError{\n\t\t\t\tStreamID: f.StreamID,\n\t\t\t\tCode:     ErrCodeProtocol,\n\t\t\t})\n\t\t\treturn nil\n\t\t}\n\t\t// Check connection-level flow control.\n\t\tcc.mu.Lock()\n\t\tif cs.inflow.available() >= int32(f.Length) {\n\t\t\tcs.inflow.take(int32(f.Length))\n\t\t} else {\n\t\t\tcc.mu.Unlock()\n\t\t\treturn ConnectionError(ErrCodeFlowControl)\n\t\t}\n\t\t// Return any padded flow control now, since we won't\n\t\t// refund it later on body reads.\n\t\tvar refund int\n\t\tif pad := int(f.Length) - len(data); pad > 0 {\n\t\t\trefund += pad\n\t\t}\n\t\t// Return len(data) now if the stream is already closed,\n\t\t// since data will never be read.\n\t\tdidReset := cs.didReset\n\t\tif didReset {\n\t\t\trefund += len(data)\n\t\t}\n\t\tif refund > 0 {\n\t\t\tcc.inflow.add(int32(refund))\n\t\t\tcc.wmu.Lock()\n\t\t\tcc.fr.WriteWindowUpdate(0, uint32(refund))\n\t\t\tif !didReset {\n\t\t\t\tcs.inflow.add(int32(refund))\n\t\t\t\tcc.fr.WriteWindowUpdate(cs.ID, uint32(refund))\n\t\t\t}\n\t\t\tcc.bw.Flush()\n\t\t\tcc.wmu.Unlock()\n\t\t}\n\t\tcc.mu.Unlock()\n\n\t\tif len(data) > 0 && !didReset {\n\t\t\tif _, err := cs.bufPipe.Write(data); err != nil {\n\t\t\t\trl.endStreamError(cs, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif f.StreamEnded() {\n\t\trl.endStream(cs)\n\t}\n\treturn nil\n}\n\nvar errInvalidTrailers = errors.New(\"http2: invalid trailers\")\n\nfunc (rl *clientConnReadLoop) endStream(cs *clientStream) {\n\t// TODO: check that any declared content-length matches, like\n\t// server.go's (*stream).endStream method.\n\trl.endStreamError(cs, nil)\n}\n\nfunc (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {\n\tvar code func()\n\tif err == nil {\n\t\terr = io.EOF\n\t\tcode = cs.copyTrailers\n\t}\n\tcs.bufPipe.closeWithErrorAndCode(err, code)\n\tdelete(rl.activeRes, cs.ID)\n\tif isConnectionCloseRequest(cs.req) {\n\t\trl.closeWhenIdle = true\n\t}\n\n\tselect {\n\tcase cs.resc <- resAndError{err: err}:\n\tdefault:\n\t}\n}\n\nfunc (cs *clientStream) copyTrailers() {\n\tfor k, vv := range cs.trailer {\n\t\tt := cs.resTrailer\n\t\tif *t == nil {\n\t\t\t*t = make(http.Header)\n\t\t}\n\t\t(*t)[k] = vv\n\t}\n}\n\nfunc (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error {\n\tcc := rl.cc\n\tcc.t.connPool().MarkDead(cc)\n\tif f.ErrCode != 0 {\n\t\t// TODO: deal with GOAWAY more. particularly the error code\n\t\tcc.vlogf(\"transport got GOAWAY with error code = %v\", f.ErrCode)\n\t}\n\tcc.setGoAway(f)\n\treturn nil\n}\n\nfunc (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {\n\tcc := rl.cc\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\n\tif f.IsAck() {\n\t\tif cc.wantSettingsAck {\n\t\t\tcc.wantSettingsAck = false\n\t\t\treturn nil\n\t\t}\n\t\treturn ConnectionError(ErrCodeProtocol)\n\t}\n\n\terr := f.ForeachSetting(func(s Setting) error {\n\t\tswitch s.ID {\n\t\tcase SettingMaxFrameSize:\n\t\t\tcc.maxFrameSize = s.Val\n\t\tcase SettingMaxConcurrentStreams:\n\t\t\tcc.maxConcurrentStreams = s.Val\n\t\tcase SettingMaxHeaderListSize:\n\t\t\tcc.peerMaxHeaderListSize = uint64(s.Val)\n\t\tcase SettingInitialWindowSize:\n\t\t\t// Values above the maximum flow-control\n\t\t\t// window size of 2^31-1 MUST be treated as a\n\t\t\t// connection error (Section 5.4.1) of type\n\t\t\t// FLOW_CONTROL_ERROR.\n\t\t\tif s.Val > math.MaxInt32 {\n\t\t\t\treturn ConnectionError(ErrCodeFlowControl)\n\t\t\t}\n\n\t\t\t// Adjust flow control of currently-open\n\t\t\t// frames by the difference of the old initial\n\t\t\t// window size and this one.\n\t\t\tdelta := int32(s.Val) - int32(cc.initialWindowSize)\n\t\t\tfor _, cs := range cc.streams {\n\t\t\t\tcs.flow.add(delta)\n\t\t\t}\n\t\t\tcc.cond.Broadcast()\n\n\t\t\tcc.initialWindowSize = s.Val\n\t\tdefault:\n\t\t\t// TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.\n\t\t\tcc.vlogf(\"Unhandled Setting: %v\", s)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcc.wmu.Lock()\n\tdefer cc.wmu.Unlock()\n\n\tcc.fr.WriteSettingsAck()\n\tcc.bw.Flush()\n\treturn cc.werr\n}\n\nfunc (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {\n\tcc := rl.cc\n\tcs := cc.streamByID(f.StreamID, false)\n\tif f.StreamID != 0 && cs == nil {\n\t\treturn nil\n\t}\n\n\tcc.mu.Lock()\n\tdefer cc.mu.Unlock()\n\n\tfl := &cc.flow\n\tif cs != nil {\n\t\tfl = &cs.flow\n\t}\n\tif !fl.add(int32(f.Increment)) {\n\t\treturn ConnectionError(ErrCodeFlowControl)\n\t}\n\tcc.cond.Broadcast()\n\treturn nil\n}\n\nfunc (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {\n\tcs := rl.cc.streamByID(f.StreamID, true)\n\tif cs == nil {\n\t\t// TODO: return error if server tries to RST_STEAM an idle stream\n\t\treturn nil\n\t}\n\tselect {\n\tcase <-cs.peerReset:\n\t\t// Already reset.\n\t\t// This is the only goroutine\n\t\t// which closes this, so there\n\t\t// isn't a race.\n\tdefault:\n\t\terr := streamError(cs.ID, f.ErrCode)\n\t\tcs.resetErr = err\n\t\tclose(cs.peerReset)\n\t\tcs.bufPipe.CloseWithError(err)\n\t\tcs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl\n\t}\n\tdelete(rl.activeRes, cs.ID)\n\treturn nil\n}\n\n// Ping sends a PING frame to the server and waits for the ack.\n// Public implementation is in go17.go and not_go17.go\nfunc (cc *ClientConn) ping(ctx contextContext) error {\n\tc := make(chan struct{})\n\t// Generate a random payload\n\tvar p [8]byte\n\tfor {\n\t\tif _, err := rand.Read(p[:]); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcc.mu.Lock()\n\t\t// check for dup before insert\n\t\tif _, found := cc.pings[p]; !found {\n\t\t\tcc.pings[p] = c\n\t\t\tcc.mu.Unlock()\n\t\t\tbreak\n\t\t}\n\t\tcc.mu.Unlock()\n\t}\n\tcc.wmu.Lock()\n\tif err := cc.fr.WritePing(false, p); err != nil {\n\t\tcc.wmu.Unlock()\n\t\treturn err\n\t}\n\tif err := cc.bw.Flush(); err != nil {\n\t\tcc.wmu.Unlock()\n\t\treturn err\n\t}\n\tcc.wmu.Unlock()\n\tselect {\n\tcase <-c:\n\t\treturn nil\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\tcase <-cc.readerDone:\n\t\t// connection closed\n\t\treturn cc.readerErr\n\t}\n}\n\nfunc (rl *clientConnReadLoop) processPing(f *PingFrame) error {\n\tif f.IsAck() {\n\t\tcc := rl.cc\n\t\tcc.mu.Lock()\n\t\tdefer cc.mu.Unlock()\n\t\t// If ack, notify listener if any\n\t\tif c, ok := cc.pings[f.Data]; ok {\n\t\t\tclose(c)\n\t\t\tdelete(cc.pings, f.Data)\n\t\t}\n\t\treturn nil\n\t}\n\tcc := rl.cc\n\tcc.wmu.Lock()\n\tdefer cc.wmu.Unlock()\n\tif err := cc.fr.WritePing(true, f.Data); err != nil {\n\t\treturn err\n\t}\n\treturn cc.bw.Flush()\n}\n\nfunc (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {\n\t// We told the peer we don't want them.\n\t// Spec says:\n\t// \"PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH\n\t// setting of the peer endpoint is set to 0. An endpoint that\n\t// has set this setting and has received acknowledgement MUST\n\t// treat the receipt of a PUSH_PROMISE frame as a connection\n\t// error (Section 5.4.1) of type PROTOCOL_ERROR.\"\n\treturn ConnectionError(ErrCodeProtocol)\n}\n\nfunc (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {\n\t// TODO: map err to more interesting error codes, once the\n\t// HTTP community comes up with some. But currently for\n\t// RST_STREAM there's no equivalent to GOAWAY frame's debug\n\t// data, and the error codes are all pretty vague (\"cancel\").\n\tcc.wmu.Lock()\n\tcc.fr.WriteRSTStream(streamID, code)\n\tcc.bw.Flush()\n\tcc.wmu.Unlock()\n}\n\nvar (\n\terrResponseHeaderListSize = errors.New(\"http2: response header list larger than advertised limit\")\n\terrRequestHeaderListSize  = errors.New(\"http2: request header list larger than peer's advertised limit\")\n\terrPseudoTrailers         = errors.New(\"http2: invalid pseudo header in trailers\")\n)\n\nfunc (cc *ClientConn) logf(format string, args ...interface{}) {\n\tcc.t.logf(format, args...)\n}\n\nfunc (cc *ClientConn) vlogf(format string, args ...interface{}) {\n\tcc.t.vlogf(format, args...)\n}\n\nfunc (t *Transport) vlogf(format string, args ...interface{}) {\n\tif VerboseLogs {\n\t\tt.logf(format, args...)\n\t}\n}\n\nfunc (t *Transport) logf(format string, args ...interface{}) {\n\tlog.Printf(format, args...)\n}\n\nvar noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))\n\nfunc strSliceContains(ss []string, s string) bool {\n\tfor _, v := range ss {\n\t\tif v == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\ntype erringRoundTripper struct{ err error }\n\nfunc (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }\n\n// gzipReader wraps a response body so it can lazily\n// call gzip.NewReader on the first call to Read\ntype gzipReader struct {\n\tbody io.ReadCloser // underlying Response.Body\n\tzr   *gzip.Reader  // lazily-initialized gzip reader\n\tzerr error         // sticky error\n}\n\nfunc (gz *gzipReader) Read(p []byte) (n int, err error) {\n\tif gz.zerr != nil {\n\t\treturn 0, gz.zerr\n\t}\n\tif gz.zr == nil {\n\t\tgz.zr, err = gzip.NewReader(gz.body)\n\t\tif err != nil {\n\t\t\tgz.zerr = err\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn gz.zr.Read(p)\n}\n\nfunc (gz *gzipReader) Close() error {\n\treturn gz.body.Close()\n}\n\ntype errorReader struct{ err error }\n\nfunc (r errorReader) Read(p []byte) (int, error) { return 0, r.err }\n\n// bodyWriterState encapsulates various state around the Transport's writing\n// of the request body, particularly regarding doing delayed writes of the body\n// when the request contains \"Expect: 100-continue\".\ntype bodyWriterState struct {\n\tcs     *clientStream\n\ttimer  *time.Timer   // if non-nil, we're doing a delayed write\n\tfnonce *sync.Once    // to call fn with\n\tfn     func()        // the code to run in the goroutine, writing the body\n\tresc   chan error    // result of fn's execution\n\tdelay  time.Duration // how long we should delay a delayed write for\n}\n\nfunc (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) {\n\ts.cs = cs\n\tif body == nil {\n\t\treturn\n\t}\n\tresc := make(chan error, 1)\n\ts.resc = resc\n\ts.fn = func() {\n\t\tcs.cc.mu.Lock()\n\t\tcs.startedWrite = true\n\t\tcs.cc.mu.Unlock()\n\t\tresc <- cs.writeRequestBody(body, cs.req.Body)\n\t}\n\ts.delay = t.expectContinueTimeout()\n\tif s.delay == 0 ||\n\t\t!httplex.HeaderValuesContainsToken(\n\t\t\tcs.req.Header[\"Expect\"],\n\t\t\t\"100-continue\") {\n\t\treturn\n\t}\n\ts.fnonce = new(sync.Once)\n\n\t// Arm the timer with a very large duration, which we'll\n\t// intentionally lower later. It has to be large now because\n\t// we need a handle to it before writing the headers, but the\n\t// s.delay value is defined to not start until after the\n\t// request headers were written.\n\tconst hugeDuration = 365 * 24 * time.Hour\n\ts.timer = time.AfterFunc(hugeDuration, func() {\n\t\ts.fnonce.Do(s.fn)\n\t})\n\treturn\n}\n\nfunc (s bodyWriterState) cancel() {\n\tif s.timer != nil {\n\t\ts.timer.Stop()\n\t}\n}\n\nfunc (s bodyWriterState) on100() {\n\tif s.timer == nil {\n\t\t// If we didn't do a delayed write, ignore the server's\n\t\t// bogus 100 continue response.\n\t\treturn\n\t}\n\ts.timer.Stop()\n\tgo func() { s.fnonce.Do(s.fn) }()\n}\n\n// scheduleBodyWrite starts writing the body, either immediately (in\n// the common case) or after the delay timeout. It should not be\n// called until after the headers have been written.\nfunc (s bodyWriterState) scheduleBodyWrite() {\n\tif s.timer == nil {\n\t\t// We're not doing a delayed write (see\n\t\t// getBodyWriterState), so just start the writing\n\t\t// goroutine immediately.\n\t\tgo s.fn()\n\t\treturn\n\t}\n\ttraceWait100Continue(s.cs.trace)\n\tif s.timer.Stop() {\n\t\ts.timer.Reset(s.delay)\n\t}\n}\n\n// isConnectionCloseRequest reports whether req should use its own\n// connection for a single request and then close the connection.\nfunc isConnectionCloseRequest(req *http.Request) bool {\n\treturn req.Close || httplex.HeaderValuesContainsToken(req.Header[\"Connection\"], \"close\")\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/transport_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"math/rand\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"os\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org/x/net/http2/hpack\"\n)\n\nvar (\n\textNet        = flag.Bool(\"extnet\", false, \"do external network tests\")\n\ttransportHost = flag.String(\"transporthost\", \"http2.golang.org\", \"hostname to use for TestTransport\")\n\tinsecure      = flag.Bool(\"insecure\", false, \"insecure TLS dials\") // TODO: dead code. remove?\n)\n\nvar tlsConfigInsecure = &tls.Config{InsecureSkipVerify: true}\n\ntype testContext struct{}\n\nfunc (testContext) Done() <-chan struct{}                   { return make(chan struct{}) }\nfunc (testContext) Err() error                              { panic(\"should not be called\") }\nfunc (testContext) Deadline() (deadline time.Time, ok bool) { return time.Time{}, false }\nfunc (testContext) Value(key interface{}) interface{}       { return nil }\n\nfunc TestTransportExternal(t *testing.T) {\n\tif !*extNet {\n\t\tt.Skip(\"skipping external network test\")\n\t}\n\treq, _ := http.NewRequest(\"GET\", \"https://\"+*transportHost+\"/\", nil)\n\trt := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tres, err := rt.RoundTrip(req)\n\tif err != nil {\n\t\tt.Fatalf(\"%v\", err)\n\t}\n\tres.Write(os.Stdout)\n}\n\ntype fakeTLSConn struct {\n\tnet.Conn\n}\n\nfunc (c *fakeTLSConn) ConnectionState() tls.ConnectionState {\n\treturn tls.ConnectionState{\n\t\tVersion:     tls.VersionTLS12,\n\t\tCipherSuite: cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,\n\t}\n}\n\nfunc startH2cServer(t *testing.T) net.Listener {\n\th2Server := &Server{}\n\tl := newLocalListener(t)\n\tgo func() {\n\t\tconn, err := l.Accept()\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\th2Server.ServeConn(&fakeTLSConn{conn}, &ServeConnOpts{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tfmt.Fprintf(w, \"Hello, %v, http: %v\", r.URL.Path, r.TLS == nil)\n\t\t})})\n\t}()\n\treturn l\n}\n\nfunc TestTransportH2c(t *testing.T) {\n\tl := startH2cServer(t)\n\tdefer l.Close()\n\treq, err := http.NewRequest(\"GET\", \"http://\"+l.Addr().String()+\"/foobar\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttr := &Transport{\n\t\tAllowHTTP: true,\n\t\tDialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {\n\t\t\treturn net.Dial(network, addr)\n\t\t},\n\t}\n\tres, err := tr.RoundTrip(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif res.ProtoMajor != 2 {\n\t\tt.Fatal(\"proto not h2c\")\n\t}\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := string(body), \"Hello, /foobar, http: true\"; got != want {\n\t\tt.Fatalf(\"response got %v, want %v\", got, want)\n\t}\n}\n\nfunc TestTransport(t *testing.T) {\n\tconst body = \"sup\"\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tio.WriteString(w, body)\n\t}, optOnlyServer)\n\tdefer st.Close()\n\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\n\treq, err := http.NewRequest(\"GET\", st.ts.URL, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tres, err := tr.RoundTrip(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer res.Body.Close()\n\n\tt.Logf(\"Got res: %+v\", res)\n\tif g, w := res.StatusCode, 200; g != w {\n\t\tt.Errorf(\"StatusCode = %v; want %v\", g, w)\n\t}\n\tif g, w := res.Status, \"200 OK\"; g != w {\n\t\tt.Errorf(\"Status = %q; want %q\", g, w)\n\t}\n\twantHeader := http.Header{\n\t\t\"Content-Length\": []string{\"3\"},\n\t\t\"Content-Type\":   []string{\"text/plain; charset=utf-8\"},\n\t\t\"Date\":           []string{\"XXX\"}, // see cleanDate\n\t}\n\tcleanDate(res)\n\tif !reflect.DeepEqual(res.Header, wantHeader) {\n\t\tt.Errorf(\"res Header = %v; want %v\", res.Header, wantHeader)\n\t}\n\tif res.Request != req {\n\t\tt.Errorf(\"Response.Request = %p; want %p\", res.Request, req)\n\t}\n\tif res.TLS == nil {\n\t\tt.Error(\"Response.TLS = nil; want non-nil\")\n\t}\n\tslurp, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tt.Errorf(\"Body read: %v\", err)\n\t} else if string(slurp) != body {\n\t\tt.Errorf(\"Body = %q; want %q\", slurp, body)\n\t}\n}\n\nfunc onSameConn(t *testing.T, modReq func(*http.Request)) bool {\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tio.WriteString(w, r.RemoteAddr)\n\t}, optOnlyServer, func(c net.Conn, st http.ConnState) {\n\t\tt.Logf(\"conn %v is now state %v\", c.RemoteAddr(), st)\n\t})\n\tdefer st.Close()\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\tget := func() string {\n\t\treq, err := http.NewRequest(\"GET\", st.ts.URL, nil)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tmodReq(req)\n\t\tres, err := tr.RoundTrip(req)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer res.Body.Close()\n\t\tslurp, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Body read: %v\", err)\n\t\t}\n\t\taddr := strings.TrimSpace(string(slurp))\n\t\tif addr == \"\" {\n\t\t\tt.Fatalf(\"didn't get an addr in response\")\n\t\t}\n\t\treturn addr\n\t}\n\tfirst := get()\n\tsecond := get()\n\treturn first == second\n}\n\nfunc TestTransportReusesConns(t *testing.T) {\n\tif !onSameConn(t, func(*http.Request) {}) {\n\t\tt.Errorf(\"first and second responses were on different connections\")\n\t}\n}\n\nfunc TestTransportReusesConn_RequestClose(t *testing.T) {\n\tif onSameConn(t, func(r *http.Request) { r.Close = true }) {\n\t\tt.Errorf(\"first and second responses were not on different connections\")\n\t}\n}\n\nfunc TestTransportReusesConn_ConnClose(t *testing.T) {\n\tif onSameConn(t, func(r *http.Request) { r.Header.Set(\"Connection\", \"close\") }) {\n\t\tt.Errorf(\"first and second responses were not on different connections\")\n\t}\n}\n\n// Tests that the Transport only keeps one pending dial open per destination address.\n// https://golang.org/issue/13397\nfunc TestTransportGroupsPendingDials(t *testing.T) {\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tio.WriteString(w, r.RemoteAddr)\n\t}, optOnlyServer)\n\tdefer st.Close()\n\ttr := &Transport{\n\t\tTLSClientConfig: tlsConfigInsecure,\n\t}\n\tdefer tr.CloseIdleConnections()\n\tvar (\n\t\tmu    sync.Mutex\n\t\tdials = map[string]int{}\n\t)\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\treq, err := http.NewRequest(\"GET\", st.ts.URL, nil)\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tres, err := tr.RoundTrip(req)\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer res.Body.Close()\n\t\t\tslurp, err := ioutil.ReadAll(res.Body)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Body read: %v\", err)\n\t\t\t}\n\t\t\taddr := strings.TrimSpace(string(slurp))\n\t\t\tif addr == \"\" {\n\t\t\t\tt.Errorf(\"didn't get an addr in response\")\n\t\t\t}\n\t\t\tmu.Lock()\n\t\t\tdials[addr]++\n\t\t\tmu.Unlock()\n\t\t}()\n\t}\n\twg.Wait()\n\tif len(dials) != 1 {\n\t\tt.Errorf(\"saw %d dials; want 1: %v\", len(dials), dials)\n\t}\n\ttr.CloseIdleConnections()\n\tif err := retry(50, 10*time.Millisecond, func() error {\n\t\tcp, ok := tr.connPool().(*clientConnPool)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Conn pool is %T; want *clientConnPool\", tr.connPool())\n\t\t}\n\t\tcp.mu.Lock()\n\t\tdefer cp.mu.Unlock()\n\t\tif len(cp.dialing) != 0 {\n\t\t\treturn fmt.Errorf(\"dialing map = %v; want empty\", cp.dialing)\n\t\t}\n\t\tif len(cp.conns) != 0 {\n\t\t\treturn fmt.Errorf(\"conns = %v; want empty\", cp.conns)\n\t\t}\n\t\tif len(cp.keys) != 0 {\n\t\t\treturn fmt.Errorf(\"keys = %v; want empty\", cp.keys)\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\tt.Errorf(\"State of pool after CloseIdleConnections: %v\", err)\n\t}\n}\n\nfunc retry(tries int, delay time.Duration, fn func() error) error {\n\tvar err error\n\tfor i := 0; i < tries; i++ {\n\t\terr = fn()\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(delay)\n\t}\n\treturn err\n}\n\nfunc TestTransportAbortClosesPipes(t *testing.T) {\n\tshutdown := make(chan struct{})\n\tst := newServerTester(t,\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tw.(http.Flusher).Flush()\n\t\t\t<-shutdown\n\t\t},\n\t\toptOnlyServer,\n\t)\n\tdefer st.Close()\n\tdefer close(shutdown) // we must shutdown before st.Close() to avoid hanging\n\n\tdone := make(chan struct{})\n\trequestMade := make(chan struct{})\n\tgo func() {\n\t\tdefer close(done)\n\t\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\t\treq, err := http.NewRequest(\"GET\", st.ts.URL, nil)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tres, err := tr.RoundTrip(req)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer res.Body.Close()\n\t\tclose(requestMade)\n\t\t_, err = ioutil.ReadAll(res.Body)\n\t\tif err == nil {\n\t\t\tt.Error(\"expected error from res.Body.Read\")\n\t\t}\n\t}()\n\n\t<-requestMade\n\t// Now force the serve loop to end, via closing the connection.\n\tst.closeConn()\n\t// deadlock? that's a bug.\n\tselect {\n\tcase <-done:\n\tcase <-time.After(3 * time.Second):\n\t\tt.Fatal(\"timeout\")\n\t}\n}\n\n// TODO: merge this with TestTransportBody to make TestTransportRequest? This\n// could be a table-driven test with extra goodies.\nfunc TestTransportPath(t *testing.T) {\n\tgotc := make(chan *url.URL, 1)\n\tst := newServerTester(t,\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tgotc <- r.URL\n\t\t},\n\t\toptOnlyServer,\n\t)\n\tdefer st.Close()\n\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\tconst (\n\t\tpath  = \"/testpath\"\n\t\tquery = \"q=1\"\n\t)\n\tsurl := st.ts.URL + path + \"?\" + query\n\treq, err := http.NewRequest(\"POST\", surl, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tc := &http.Client{Transport: tr}\n\tres, err := c.Do(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer res.Body.Close()\n\tgot := <-gotc\n\tif got.Path != path {\n\t\tt.Errorf(\"Read Path = %q; want %q\", got.Path, path)\n\t}\n\tif got.RawQuery != query {\n\t\tt.Errorf(\"Read RawQuery = %q; want %q\", got.RawQuery, query)\n\t}\n}\n\nfunc randString(n int) string {\n\trnd := rand.New(rand.NewSource(int64(n)))\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = byte(rnd.Intn(256))\n\t}\n\treturn string(b)\n}\n\ntype panicReader struct{}\n\nfunc (panicReader) Read([]byte) (int, error) { panic(\"unexpected Read\") }\nfunc (panicReader) Close() error             { panic(\"unexpected Close\") }\n\nfunc TestActualContentLength(t *testing.T) {\n\ttests := []struct {\n\t\treq  *http.Request\n\t\twant int64\n\t}{\n\t\t// Verify we don't read from Body:\n\t\t0: {\n\t\t\treq:  &http.Request{Body: panicReader{}},\n\t\t\twant: -1,\n\t\t},\n\t\t// nil Body means 0, regardless of ContentLength:\n\t\t1: {\n\t\t\treq:  &http.Request{Body: nil, ContentLength: 5},\n\t\t\twant: 0,\n\t\t},\n\t\t// ContentLength is used if set.\n\t\t2: {\n\t\t\treq:  &http.Request{Body: panicReader{}, ContentLength: 5},\n\t\t\twant: 5,\n\t\t},\n\t\t// http.NoBody means 0, not -1.\n\t\t3: {\n\t\t\treq:  &http.Request{Body: go18httpNoBody()},\n\t\t\twant: 0,\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tgot := actualContentLength(tt.req)\n\t\tif got != tt.want {\n\t\t\tt.Errorf(\"test[%d]: got %d; want %d\", i, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc TestTransportBody(t *testing.T) {\n\tbodyTests := []struct {\n\t\tbody         string\n\t\tnoContentLen bool\n\t}{\n\t\t{body: \"some message\"},\n\t\t{body: \"some message\", noContentLen: true},\n\t\t{body: strings.Repeat(\"a\", 1<<20), noContentLen: true},\n\t\t{body: strings.Repeat(\"a\", 1<<20)},\n\t\t{body: randString(16<<10 - 1)},\n\t\t{body: randString(16 << 10)},\n\t\t{body: randString(16<<10 + 1)},\n\t\t{body: randString(512<<10 - 1)},\n\t\t{body: randString(512 << 10)},\n\t\t{body: randString(512<<10 + 1)},\n\t\t{body: randString(1<<20 - 1)},\n\t\t{body: randString(1 << 20)},\n\t\t{body: randString(1<<20 + 2)},\n\t}\n\n\ttype reqInfo struct {\n\t\treq   *http.Request\n\t\tslurp []byte\n\t\terr   error\n\t}\n\tgotc := make(chan reqInfo, 1)\n\tst := newServerTester(t,\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tslurp, err := ioutil.ReadAll(r.Body)\n\t\t\tif err != nil {\n\t\t\t\tgotc <- reqInfo{err: err}\n\t\t\t} else {\n\t\t\t\tgotc <- reqInfo{req: r, slurp: slurp}\n\t\t\t}\n\t\t},\n\t\toptOnlyServer,\n\t)\n\tdefer st.Close()\n\n\tfor i, tt := range bodyTests {\n\t\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\t\tdefer tr.CloseIdleConnections()\n\n\t\tvar body io.Reader = strings.NewReader(tt.body)\n\t\tif tt.noContentLen {\n\t\t\tbody = struct{ io.Reader }{body} // just a Reader, hiding concrete type and other methods\n\t\t}\n\t\treq, err := http.NewRequest(\"POST\", st.ts.URL, body)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"#%d: %v\", i, err)\n\t\t}\n\t\tc := &http.Client{Transport: tr}\n\t\tres, err := c.Do(req)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"#%d: %v\", i, err)\n\t\t}\n\t\tdefer res.Body.Close()\n\t\tri := <-gotc\n\t\tif ri.err != nil {\n\t\t\tt.Errorf(\"#%d: read error: %v\", i, ri.err)\n\t\t\tcontinue\n\t\t}\n\t\tif got := string(ri.slurp); got != tt.body {\n\t\t\tt.Errorf(\"#%d: Read body mismatch.\\n got: %q (len %d)\\nwant: %q (len %d)\", i, shortString(got), len(got), shortString(tt.body), len(tt.body))\n\t\t}\n\t\twantLen := int64(len(tt.body))\n\t\tif tt.noContentLen && tt.body != \"\" {\n\t\t\twantLen = -1\n\t\t}\n\t\tif ri.req.ContentLength != wantLen {\n\t\t\tt.Errorf(\"#%d. handler got ContentLength = %v; want %v\", i, ri.req.ContentLength, wantLen)\n\t\t}\n\t}\n}\n\nfunc shortString(v string) string {\n\tconst maxLen = 100\n\tif len(v) <= maxLen {\n\t\treturn v\n\t}\n\treturn fmt.Sprintf(\"%v[...%d bytes omitted...]%v\", v[:maxLen/2], len(v)-maxLen, v[len(v)-maxLen/2:])\n}\n\nfunc TestTransportDialTLS(t *testing.T) {\n\tvar mu sync.Mutex // guards following\n\tvar gotReq, didDial bool\n\n\tts := newServerTester(t,\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tmu.Lock()\n\t\t\tgotReq = true\n\t\t\tmu.Unlock()\n\t\t},\n\t\toptOnlyServer,\n\t)\n\tdefer ts.Close()\n\ttr := &Transport{\n\t\tDialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) {\n\t\t\tmu.Lock()\n\t\t\tdidDial = true\n\t\t\tmu.Unlock()\n\t\t\tcfg.InsecureSkipVerify = true\n\t\t\tc, err := tls.Dial(netw, addr, cfg)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, c.Handshake()\n\t\t},\n\t}\n\tdefer tr.CloseIdleConnections()\n\tclient := &http.Client{Transport: tr}\n\tres, err := client.Get(ts.ts.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tres.Body.Close()\n\tmu.Lock()\n\tif !gotReq {\n\t\tt.Error(\"didn't get request\")\n\t}\n\tif !didDial {\n\t\tt.Error(\"didn't use dial hook\")\n\t}\n}\n\nfunc TestConfigureTransport(t *testing.T) {\n\tt1 := &http.Transport{}\n\terr := ConfigureTransport(t1)\n\tif err == errTransportVersion {\n\t\tt.Skip(err)\n\t}\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got := fmt.Sprintf(\"%#v\", t1); !strings.Contains(got, `\"h2\"`) {\n\t\t// Laziness, to avoid buildtags.\n\t\tt.Errorf(\"stringification of HTTP/1 transport didn't contain \\\"h2\\\": %v\", got)\n\t}\n\twantNextProtos := []string{\"h2\", \"http/1.1\"}\n\tif t1.TLSClientConfig == nil {\n\t\tt.Errorf(\"nil t1.TLSClientConfig\")\n\t} else if !reflect.DeepEqual(t1.TLSClientConfig.NextProtos, wantNextProtos) {\n\t\tt.Errorf(\"TLSClientConfig.NextProtos = %q; want %q\", t1.TLSClientConfig.NextProtos, wantNextProtos)\n\t}\n\tif err := ConfigureTransport(t1); err == nil {\n\t\tt.Error(\"unexpected success on second call to ConfigureTransport\")\n\t}\n\n\t// And does it work?\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tio.WriteString(w, r.Proto)\n\t}, optOnlyServer)\n\tdefer st.Close()\n\n\tt1.TLSClientConfig.InsecureSkipVerify = true\n\tc := &http.Client{Transport: t1}\n\tres, err := c.Get(st.ts.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tslurp, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := string(slurp), \"HTTP/2.0\"; got != want {\n\t\tt.Errorf(\"body = %q; want %q\", got, want)\n\t}\n}\n\ntype capitalizeReader struct {\n\tr io.Reader\n}\n\nfunc (cr capitalizeReader) Read(p []byte) (n int, err error) {\n\tn, err = cr.r.Read(p)\n\tfor i, b := range p[:n] {\n\t\tif b >= 'a' && b <= 'z' {\n\t\t\tp[i] = b - ('a' - 'A')\n\t\t}\n\t}\n\treturn\n}\n\ntype flushWriter struct {\n\tw io.Writer\n}\n\nfunc (fw flushWriter) Write(p []byte) (n int, err error) {\n\tn, err = fw.w.Write(p)\n\tif f, ok := fw.w.(http.Flusher); ok {\n\t\tf.Flush()\n\t}\n\treturn\n}\n\ntype clientTester struct {\n\tt      *testing.T\n\ttr     *Transport\n\tsc, cc net.Conn // server and client conn\n\tfr     *Framer  // server's framer\n\tclient func() error\n\tserver func() error\n}\n\nfunc newClientTester(t *testing.T) *clientTester {\n\tvar dialOnce struct {\n\t\tsync.Mutex\n\t\tdialed bool\n\t}\n\tct := &clientTester{\n\t\tt: t,\n\t}\n\tct.tr = &Transport{\n\t\tTLSClientConfig: tlsConfigInsecure,\n\t\tDialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {\n\t\t\tdialOnce.Lock()\n\t\t\tdefer dialOnce.Unlock()\n\t\t\tif dialOnce.dialed {\n\t\t\t\treturn nil, errors.New(\"only one dial allowed in test mode\")\n\t\t\t}\n\t\t\tdialOnce.dialed = true\n\t\t\treturn ct.cc, nil\n\t\t},\n\t}\n\n\tln := newLocalListener(t)\n\tcc, err := net.Dial(\"tcp\", ln.Addr().String())\n\tif err != nil {\n\t\tt.Fatal(err)\n\n\t}\n\tsc, err := ln.Accept()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tln.Close()\n\tct.cc = cc\n\tct.sc = sc\n\tct.fr = NewFramer(sc, sc)\n\treturn ct\n}\n\nfunc newLocalListener(t *testing.T) net.Listener {\n\tln, err := net.Listen(\"tcp4\", \"127.0.0.1:0\")\n\tif err == nil {\n\t\treturn ln\n\t}\n\tln, err = net.Listen(\"tcp6\", \"[::1]:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn ln\n}\n\nfunc (ct *clientTester) greet(settings ...Setting) {\n\tbuf := make([]byte, len(ClientPreface))\n\t_, err := io.ReadFull(ct.sc, buf)\n\tif err != nil {\n\t\tct.t.Fatalf(\"reading client preface: %v\", err)\n\t}\n\tf, err := ct.fr.ReadFrame()\n\tif err != nil {\n\t\tct.t.Fatalf(\"Reading client settings frame: %v\", err)\n\t}\n\tif sf, ok := f.(*SettingsFrame); !ok {\n\t\tct.t.Fatalf(\"Wanted client settings frame; got %v\", f)\n\t\t_ = sf // stash it away?\n\t}\n\tif err := ct.fr.WriteSettings(settings...); err != nil {\n\t\tct.t.Fatal(err)\n\t}\n\tif err := ct.fr.WriteSettingsAck(); err != nil {\n\t\tct.t.Fatal(err)\n\t}\n}\n\nfunc (ct *clientTester) readNonSettingsFrame() (Frame, error) {\n\tfor {\n\t\tf, err := ct.fr.ReadFrame()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif _, ok := f.(*SettingsFrame); ok {\n\t\t\tcontinue\n\t\t}\n\t\treturn f, nil\n\t}\n}\n\nfunc (ct *clientTester) cleanup() {\n\tct.tr.CloseIdleConnections()\n}\n\nfunc (ct *clientTester) run() {\n\terrc := make(chan error, 2)\n\tct.start(\"client\", errc, ct.client)\n\tct.start(\"server\", errc, ct.server)\n\tdefer ct.cleanup()\n\tfor i := 0; i < 2; i++ {\n\t\tif err := <-errc; err != nil {\n\t\t\tct.t.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (ct *clientTester) start(which string, errc chan<- error, fn func() error) {\n\tgo func() {\n\t\tfinished := false\n\t\tvar err error\n\t\tdefer func() {\n\t\t\tif !finished {\n\t\t\t\terr = fmt.Errorf(\"%s goroutine didn't finish.\", which)\n\t\t\t} else if err != nil {\n\t\t\t\terr = fmt.Errorf(\"%s: %v\", which, err)\n\t\t\t}\n\t\t\terrc <- err\n\t\t}()\n\t\terr = fn()\n\t\tfinished = true\n\t}()\n}\n\nfunc (ct *clientTester) readFrame() (Frame, error) {\n\treturn readFrameTimeout(ct.fr, 2*time.Second)\n}\n\nfunc (ct *clientTester) firstHeaders() (*HeadersFrame, error) {\n\tfor {\n\t\tf, err := ct.readFrame()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"ReadFrame while waiting for Headers: %v\", err)\n\t\t}\n\t\tswitch f.(type) {\n\t\tcase *WindowUpdateFrame, *SettingsFrame:\n\t\t\tcontinue\n\t\t}\n\t\thf, ok := f.(*HeadersFrame)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"Got %T; want HeadersFrame\", f)\n\t\t}\n\t\treturn hf, nil\n\t}\n}\n\ntype countingReader struct {\n\tn *int64\n}\n\nfunc (r countingReader) Read(p []byte) (n int, err error) {\n\tfor i := range p {\n\t\tp[i] = byte(i)\n\t}\n\tatomic.AddInt64(r.n, int64(len(p)))\n\treturn len(p), err\n}\n\nfunc TestTransportReqBodyAfterResponse_200(t *testing.T) { testTransportReqBodyAfterResponse(t, 200) }\nfunc TestTransportReqBodyAfterResponse_403(t *testing.T) { testTransportReqBodyAfterResponse(t, 403) }\n\nfunc testTransportReqBodyAfterResponse(t *testing.T, status int) {\n\tconst bodySize = 10 << 20\n\tclientDone := make(chan struct{})\n\tct := newClientTester(t)\n\tct.client = func() error {\n\t\tdefer ct.cc.(*net.TCPConn).CloseWrite()\n\t\tdefer close(clientDone)\n\n\t\tvar n int64 // atomic\n\t\treq, err := http.NewRequest(\"PUT\", \"https://dummy.tld/\", io.LimitReader(countingReader{&n}, bodySize))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err := ct.tr.RoundTrip(req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"RoundTrip: %v\", err)\n\t\t}\n\t\tdefer res.Body.Close()\n\t\tif res.StatusCode != status {\n\t\t\treturn fmt.Errorf(\"status code = %v; want %v\", res.StatusCode, status)\n\t\t}\n\t\tslurp, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Slurp: %v\", err)\n\t\t}\n\t\tif len(slurp) > 0 {\n\t\t\treturn fmt.Errorf(\"unexpected body: %q\", slurp)\n\t\t}\n\t\tif status == 200 {\n\t\t\tif got := atomic.LoadInt64(&n); got != bodySize {\n\t\t\t\treturn fmt.Errorf(\"For 200 response, Transport wrote %d bytes; want %d\", got, bodySize)\n\t\t\t}\n\t\t} else {\n\t\t\tif got := atomic.LoadInt64(&n); got == 0 || got >= bodySize {\n\t\t\t\treturn fmt.Errorf(\"For %d response, Transport wrote %d bytes; want (0,%d) exclusive\", status, got, bodySize)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\tct.server = func() error {\n\t\tct.greet()\n\t\tvar buf bytes.Buffer\n\t\tenc := hpack.NewEncoder(&buf)\n\t\tvar dataRecv int64\n\t\tvar closed bool\n\t\tfor {\n\t\t\tf, err := ct.fr.ReadFrame()\n\t\t\tif err != nil {\n\t\t\t\tselect {\n\t\t\t\tcase <-clientDone:\n\t\t\t\t\t// If the client's done, it\n\t\t\t\t\t// will have reported any\n\t\t\t\t\t// errors on its side.\n\t\t\t\t\treturn nil\n\t\t\t\tdefault:\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\t//println(fmt.Sprintf(\"server got frame: %v\", f))\n\t\t\tswitch f := f.(type) {\n\t\t\tcase *WindowUpdateFrame, *SettingsFrame:\n\t\t\tcase *HeadersFrame:\n\t\t\t\tif !f.HeadersEnded() {\n\t\t\t\t\treturn fmt.Errorf(\"headers should have END_HEADERS be ended: %v\", f)\n\t\t\t\t}\n\t\t\t\tif f.StreamEnded() {\n\t\t\t\t\treturn fmt.Errorf(\"headers contains END_STREAM unexpectedly: %v\", f)\n\t\t\t\t}\n\t\t\tcase *DataFrame:\n\t\t\t\tdataLen := len(f.Data())\n\t\t\t\tif dataLen > 0 {\n\t\t\t\t\tif dataRecv == 0 {\n\t\t\t\t\t\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: strconv.Itoa(status)})\n\t\t\t\t\t\tct.fr.WriteHeaders(HeadersFrameParam{\n\t\t\t\t\t\t\tStreamID:      f.StreamID,\n\t\t\t\t\t\t\tEndHeaders:    true,\n\t\t\t\t\t\t\tEndStream:     false,\n\t\t\t\t\t\t\tBlockFragment: buf.Bytes(),\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t\tif err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tdataRecv += int64(dataLen)\n\n\t\t\t\tif !closed && ((status != 200 && dataRecv > 0) ||\n\t\t\t\t\t(status == 200 && dataRecv == bodySize)) {\n\t\t\t\t\tclosed = true\n\t\t\t\t\tif err := ct.fr.WriteData(f.StreamID, true, nil); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"Unexpected client frame %v\", f)\n\t\t\t}\n\t\t}\n\t}\n\tct.run()\n}\n\n// See golang.org/issue/13444\nfunc TestTransportFullDuplex(t *testing.T) {\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(200) // redundant but for clarity\n\t\tw.(http.Flusher).Flush()\n\t\tio.Copy(flushWriter{w}, capitalizeReader{r.Body})\n\t\tfmt.Fprintf(w, \"bye.\\n\")\n\t}, optOnlyServer)\n\tdefer st.Close()\n\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\tc := &http.Client{Transport: tr}\n\n\tpr, pw := io.Pipe()\n\treq, err := http.NewRequest(\"PUT\", st.ts.URL, ioutil.NopCloser(pr))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treq.ContentLength = -1\n\tres, err := c.Do(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode != 200 {\n\t\tt.Fatalf(\"StatusCode = %v; want %v\", res.StatusCode, 200)\n\t}\n\tbs := bufio.NewScanner(res.Body)\n\twant := func(v string) {\n\t\tif !bs.Scan() {\n\t\t\tt.Fatalf(\"wanted to read %q but Scan() = false, err = %v\", v, bs.Err())\n\t\t}\n\t}\n\twrite := func(v string) {\n\t\t_, err := io.WriteString(pw, v)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"pipe write: %v\", err)\n\t\t}\n\t}\n\twrite(\"foo\\n\")\n\twant(\"FOO\")\n\twrite(\"bar\\n\")\n\twant(\"BAR\")\n\tpw.Close()\n\twant(\"bye.\")\n\tif err := bs.Err(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestTransportConnectRequest(t *testing.T) {\n\tgotc := make(chan *http.Request, 1)\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tgotc <- r\n\t}, optOnlyServer)\n\tdefer st.Close()\n\n\tu, err := url.Parse(st.ts.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\tc := &http.Client{Transport: tr}\n\n\ttests := []struct {\n\t\treq  *http.Request\n\t\twant string\n\t}{\n\t\t{\n\t\t\treq: &http.Request{\n\t\t\t\tMethod: \"CONNECT\",\n\t\t\t\tHeader: http.Header{},\n\t\t\t\tURL:    u,\n\t\t\t},\n\t\t\twant: u.Host,\n\t\t},\n\t\t{\n\t\t\treq: &http.Request{\n\t\t\t\tMethod: \"CONNECT\",\n\t\t\t\tHeader: http.Header{},\n\t\t\t\tURL:    u,\n\t\t\t\tHost:   \"example.com:123\",\n\t\t\t},\n\t\t\twant: \"example.com:123\",\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\tres, err := c.Do(tt.req)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%d. RoundTrip = %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tres.Body.Close()\n\t\treq := <-gotc\n\t\tif req.Method != \"CONNECT\" {\n\t\t\tt.Errorf(\"method = %q; want CONNECT\", req.Method)\n\t\t}\n\t\tif req.Host != tt.want {\n\t\t\tt.Errorf(\"Host = %q; want %q\", req.Host, tt.want)\n\t\t}\n\t\tif req.URL.Host != tt.want {\n\t\t\tt.Errorf(\"URL.Host = %q; want %q\", req.URL.Host, tt.want)\n\t\t}\n\t}\n}\n\ntype headerType int\n\nconst (\n\tnoHeader headerType = iota // omitted\n\toneHeader\n\tsplitHeader // broken into continuation on purpose\n)\n\nconst (\n\tf0 = noHeader\n\tf1 = oneHeader\n\tf2 = splitHeader\n\td0 = false\n\td1 = true\n)\n\n// Test all 36 combinations of response frame orders:\n//    (3 ways of 100-continue) * (2 ways of headers) * (2 ways of data) * (3 ways of trailers):func TestTransportResponsePattern_00f0(t *testing.T) { testTransportResponsePattern(h0, h1, false, h0) }\n// Generated by http://play.golang.org/p/SScqYKJYXd\nfunc TestTransportResPattern_c0h1d0t0(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f0) }\nfunc TestTransportResPattern_c0h1d0t1(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f1) }\nfunc TestTransportResPattern_c0h1d0t2(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f2) }\nfunc TestTransportResPattern_c0h1d1t0(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f0) }\nfunc TestTransportResPattern_c0h1d1t1(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f1) }\nfunc TestTransportResPattern_c0h1d1t2(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f2) }\nfunc TestTransportResPattern_c0h2d0t0(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f0) }\nfunc TestTransportResPattern_c0h2d0t1(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f1) }\nfunc TestTransportResPattern_c0h2d0t2(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f2) }\nfunc TestTransportResPattern_c0h2d1t0(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f0) }\nfunc TestTransportResPattern_c0h2d1t1(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f1) }\nfunc TestTransportResPattern_c0h2d1t2(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f2) }\nfunc TestTransportResPattern_c1h1d0t0(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f0) }\nfunc TestTransportResPattern_c1h1d0t1(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f1) }\nfunc TestTransportResPattern_c1h1d0t2(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f2) }\nfunc TestTransportResPattern_c1h1d1t0(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f0) }\nfunc TestTransportResPattern_c1h1d1t1(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f1) }\nfunc TestTransportResPattern_c1h1d1t2(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f2) }\nfunc TestTransportResPattern_c1h2d0t0(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f0) }\nfunc TestTransportResPattern_c1h2d0t1(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f1) }\nfunc TestTransportResPattern_c1h2d0t2(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f2) }\nfunc TestTransportResPattern_c1h2d1t0(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f0) }\nfunc TestTransportResPattern_c1h2d1t1(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f1) }\nfunc TestTransportResPattern_c1h2d1t2(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f2) }\nfunc TestTransportResPattern_c2h1d0t0(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f0) }\nfunc TestTransportResPattern_c2h1d0t1(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f1) }\nfunc TestTransportResPattern_c2h1d0t2(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f2) }\nfunc TestTransportResPattern_c2h1d1t0(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f0) }\nfunc TestTransportResPattern_c2h1d1t1(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f1) }\nfunc TestTransportResPattern_c2h1d1t2(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f2) }\nfunc TestTransportResPattern_c2h2d0t0(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f0) }\nfunc TestTransportResPattern_c2h2d0t1(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f1) }\nfunc TestTransportResPattern_c2h2d0t2(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f2) }\nfunc TestTransportResPattern_c2h2d1t0(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f0) }\nfunc TestTransportResPattern_c2h2d1t1(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f1) }\nfunc TestTransportResPattern_c2h2d1t2(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f2) }\n\nfunc testTransportResPattern(t *testing.T, expect100Continue, resHeader headerType, withData bool, trailers headerType) {\n\tconst reqBody = \"some request body\"\n\tconst resBody = \"some response body\"\n\n\tif resHeader == noHeader {\n\t\t// TODO: test 100-continue followed by immediate\n\t\t// server stream reset, without headers in the middle?\n\t\tpanic(\"invalid combination\")\n\t}\n\n\tct := newClientTester(t)\n\tct.client = func() error {\n\t\treq, _ := http.NewRequest(\"POST\", \"https://dummy.tld/\", strings.NewReader(reqBody))\n\t\tif expect100Continue != noHeader {\n\t\t\treq.Header.Set(\"Expect\", \"100-continue\")\n\t\t}\n\t\tres, err := ct.tr.RoundTrip(req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"RoundTrip: %v\", err)\n\t\t}\n\t\tdefer res.Body.Close()\n\t\tif res.StatusCode != 200 {\n\t\t\treturn fmt.Errorf(\"status code = %v; want 200\", res.StatusCode)\n\t\t}\n\t\tslurp, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Slurp: %v\", err)\n\t\t}\n\t\twantBody := resBody\n\t\tif !withData {\n\t\t\twantBody = \"\"\n\t\t}\n\t\tif string(slurp) != wantBody {\n\t\t\treturn fmt.Errorf(\"body = %q; want %q\", slurp, wantBody)\n\t\t}\n\t\tif trailers == noHeader {\n\t\t\tif len(res.Trailer) > 0 {\n\t\t\t\tt.Errorf(\"Trailer = %v; want none\", res.Trailer)\n\t\t\t}\n\t\t} else {\n\t\t\twant := http.Header{\"Some-Trailer\": {\"some-value\"}}\n\t\t\tif !reflect.DeepEqual(res.Trailer, want) {\n\t\t\t\tt.Errorf(\"Trailer = %v; want %v\", res.Trailer, want)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\tct.server = func() error {\n\t\tct.greet()\n\t\tvar buf bytes.Buffer\n\t\tenc := hpack.NewEncoder(&buf)\n\n\t\tfor {\n\t\t\tf, err := ct.fr.ReadFrame()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tendStream := false\n\t\t\tsend := func(mode headerType) {\n\t\t\t\thbf := buf.Bytes()\n\t\t\t\tswitch mode {\n\t\t\t\tcase oneHeader:\n\t\t\t\t\tct.fr.WriteHeaders(HeadersFrameParam{\n\t\t\t\t\t\tStreamID:      f.Header().StreamID,\n\t\t\t\t\t\tEndHeaders:    true,\n\t\t\t\t\t\tEndStream:     endStream,\n\t\t\t\t\t\tBlockFragment: hbf,\n\t\t\t\t\t})\n\t\t\t\tcase splitHeader:\n\t\t\t\t\tif len(hbf) < 2 {\n\t\t\t\t\t\tpanic(\"too small\")\n\t\t\t\t\t}\n\t\t\t\t\tct.fr.WriteHeaders(HeadersFrameParam{\n\t\t\t\t\t\tStreamID:      f.Header().StreamID,\n\t\t\t\t\t\tEndHeaders:    false,\n\t\t\t\t\t\tEndStream:     endStream,\n\t\t\t\t\t\tBlockFragment: hbf[:1],\n\t\t\t\t\t})\n\t\t\t\t\tct.fr.WriteContinuation(f.Header().StreamID, true, hbf[1:])\n\t\t\t\tdefault:\n\t\t\t\t\tpanic(\"bogus mode\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tswitch f := f.(type) {\n\t\t\tcase *WindowUpdateFrame, *SettingsFrame:\n\t\t\tcase *DataFrame:\n\t\t\t\tif !f.StreamEnded() {\n\t\t\t\t\t// No need to send flow control tokens. The test request body is tiny.\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// Response headers (1+ frames; 1 or 2 in this test, but never 0)\n\t\t\t\t{\n\t\t\t\t\tbuf.Reset()\n\t\t\t\t\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"200\"})\n\t\t\t\t\tenc.WriteField(hpack.HeaderField{Name: \"x-foo\", Value: \"blah\"})\n\t\t\t\t\tenc.WriteField(hpack.HeaderField{Name: \"x-bar\", Value: \"more\"})\n\t\t\t\t\tif trailers != noHeader {\n\t\t\t\t\t\tenc.WriteField(hpack.HeaderField{Name: \"trailer\", Value: \"some-trailer\"})\n\t\t\t\t\t}\n\t\t\t\t\tendStream = withData == false && trailers == noHeader\n\t\t\t\t\tsend(resHeader)\n\t\t\t\t}\n\t\t\t\tif withData {\n\t\t\t\t\tendStream = trailers == noHeader\n\t\t\t\t\tct.fr.WriteData(f.StreamID, endStream, []byte(resBody))\n\t\t\t\t}\n\t\t\t\tif trailers != noHeader {\n\t\t\t\t\tendStream = true\n\t\t\t\t\tbuf.Reset()\n\t\t\t\t\tenc.WriteField(hpack.HeaderField{Name: \"some-trailer\", Value: \"some-value\"})\n\t\t\t\t\tsend(trailers)\n\t\t\t\t}\n\t\t\t\tif endStream {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase *HeadersFrame:\n\t\t\t\tif expect100Continue != noHeader {\n\t\t\t\t\tbuf.Reset()\n\t\t\t\t\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"100\"})\n\t\t\t\t\tsend(expect100Continue)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tct.run()\n}\n\nfunc TestTransportReceiveUndeclaredTrailer(t *testing.T) {\n\tct := newClientTester(t)\n\tct.client = func() error {\n\t\treq, _ := http.NewRequest(\"GET\", \"https://dummy.tld/\", nil)\n\t\tres, err := ct.tr.RoundTrip(req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"RoundTrip: %v\", err)\n\t\t}\n\t\tdefer res.Body.Close()\n\t\tif res.StatusCode != 200 {\n\t\t\treturn fmt.Errorf(\"status code = %v; want 200\", res.StatusCode)\n\t\t}\n\t\tslurp, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"res.Body ReadAll error = %q, %v; want %v\", slurp, err, nil)\n\t\t}\n\t\tif len(slurp) > 0 {\n\t\t\treturn fmt.Errorf(\"body = %q; want nothing\", slurp)\n\t\t}\n\t\tif _, ok := res.Trailer[\"Some-Trailer\"]; !ok {\n\t\t\treturn fmt.Errorf(\"expected Some-Trailer\")\n\t\t}\n\t\treturn nil\n\t}\n\tct.server = func() error {\n\t\tct.greet()\n\n\t\tvar n int\n\t\tvar hf *HeadersFrame\n\t\tfor hf == nil && n < 10 {\n\t\t\tf, err := ct.fr.ReadFrame()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thf, _ = f.(*HeadersFrame)\n\t\t\tn++\n\t\t}\n\n\t\tvar buf bytes.Buffer\n\t\tenc := hpack.NewEncoder(&buf)\n\n\t\t// send headers without Trailer header\n\t\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"200\"})\n\t\tct.fr.WriteHeaders(HeadersFrameParam{\n\t\t\tStreamID:      hf.StreamID,\n\t\t\tEndHeaders:    true,\n\t\t\tEndStream:     false,\n\t\t\tBlockFragment: buf.Bytes(),\n\t\t})\n\n\t\t// send trailers\n\t\tbuf.Reset()\n\t\tenc.WriteField(hpack.HeaderField{Name: \"some-trailer\", Value: \"I'm an undeclared Trailer!\"})\n\t\tct.fr.WriteHeaders(HeadersFrameParam{\n\t\t\tStreamID:      hf.StreamID,\n\t\t\tEndHeaders:    true,\n\t\t\tEndStream:     true,\n\t\t\tBlockFragment: buf.Bytes(),\n\t\t})\n\t\treturn nil\n\t}\n\tct.run()\n}\n\nfunc TestTransportInvalidTrailer_Pseudo1(t *testing.T) {\n\ttestTransportInvalidTrailer_Pseudo(t, oneHeader)\n}\nfunc TestTransportInvalidTrailer_Pseudo2(t *testing.T) {\n\ttestTransportInvalidTrailer_Pseudo(t, splitHeader)\n}\nfunc testTransportInvalidTrailer_Pseudo(t *testing.T, trailers headerType) {\n\ttestInvalidTrailer(t, trailers, pseudoHeaderError(\":colon\"), func(enc *hpack.Encoder) {\n\t\tenc.WriteField(hpack.HeaderField{Name: \":colon\", Value: \"foo\"})\n\t\tenc.WriteField(hpack.HeaderField{Name: \"foo\", Value: \"bar\"})\n\t})\n}\n\nfunc TestTransportInvalidTrailer_Capital1(t *testing.T) {\n\ttestTransportInvalidTrailer_Capital(t, oneHeader)\n}\nfunc TestTransportInvalidTrailer_Capital2(t *testing.T) {\n\ttestTransportInvalidTrailer_Capital(t, splitHeader)\n}\nfunc testTransportInvalidTrailer_Capital(t *testing.T, trailers headerType) {\n\ttestInvalidTrailer(t, trailers, headerFieldNameError(\"Capital\"), func(enc *hpack.Encoder) {\n\t\tenc.WriteField(hpack.HeaderField{Name: \"foo\", Value: \"bar\"})\n\t\tenc.WriteField(hpack.HeaderField{Name: \"Capital\", Value: \"bad\"})\n\t})\n}\nfunc TestTransportInvalidTrailer_EmptyFieldName(t *testing.T) {\n\ttestInvalidTrailer(t, oneHeader, headerFieldNameError(\"\"), func(enc *hpack.Encoder) {\n\t\tenc.WriteField(hpack.HeaderField{Name: \"\", Value: \"bad\"})\n\t})\n}\nfunc TestTransportInvalidTrailer_BinaryFieldValue(t *testing.T) {\n\ttestInvalidTrailer(t, oneHeader, headerFieldValueError(\"has\\nnewline\"), func(enc *hpack.Encoder) {\n\t\tenc.WriteField(hpack.HeaderField{Name: \"x\", Value: \"has\\nnewline\"})\n\t})\n}\n\nfunc testInvalidTrailer(t *testing.T, trailers headerType, wantErr error, writeTrailer func(*hpack.Encoder)) {\n\tct := newClientTester(t)\n\tct.client = func() error {\n\t\treq, _ := http.NewRequest(\"GET\", \"https://dummy.tld/\", nil)\n\t\tres, err := ct.tr.RoundTrip(req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"RoundTrip: %v\", err)\n\t\t}\n\t\tdefer res.Body.Close()\n\t\tif res.StatusCode != 200 {\n\t\t\treturn fmt.Errorf(\"status code = %v; want 200\", res.StatusCode)\n\t\t}\n\t\tslurp, err := ioutil.ReadAll(res.Body)\n\t\tse, ok := err.(StreamError)\n\t\tif !ok || se.Cause != wantErr {\n\t\t\treturn fmt.Errorf(\"res.Body ReadAll error = %q, %#v; want StreamError with cause %T, %#v\", slurp, err, wantErr, wantErr)\n\t\t}\n\t\tif len(slurp) > 0 {\n\t\t\treturn fmt.Errorf(\"body = %q; want nothing\", slurp)\n\t\t}\n\t\treturn nil\n\t}\n\tct.server = func() error {\n\t\tct.greet()\n\t\tvar buf bytes.Buffer\n\t\tenc := hpack.NewEncoder(&buf)\n\n\t\tfor {\n\t\t\tf, err := ct.fr.ReadFrame()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tswitch f := f.(type) {\n\t\t\tcase *HeadersFrame:\n\t\t\t\tvar endStream bool\n\t\t\t\tsend := func(mode headerType) {\n\t\t\t\t\thbf := buf.Bytes()\n\t\t\t\t\tswitch mode {\n\t\t\t\t\tcase oneHeader:\n\t\t\t\t\t\tct.fr.WriteHeaders(HeadersFrameParam{\n\t\t\t\t\t\t\tStreamID:      f.StreamID,\n\t\t\t\t\t\t\tEndHeaders:    true,\n\t\t\t\t\t\t\tEndStream:     endStream,\n\t\t\t\t\t\t\tBlockFragment: hbf,\n\t\t\t\t\t\t})\n\t\t\t\t\tcase splitHeader:\n\t\t\t\t\t\tif len(hbf) < 2 {\n\t\t\t\t\t\t\tpanic(\"too small\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tct.fr.WriteHeaders(HeadersFrameParam{\n\t\t\t\t\t\t\tStreamID:      f.StreamID,\n\t\t\t\t\t\t\tEndHeaders:    false,\n\t\t\t\t\t\t\tEndStream:     endStream,\n\t\t\t\t\t\t\tBlockFragment: hbf[:1],\n\t\t\t\t\t\t})\n\t\t\t\t\t\tct.fr.WriteContinuation(f.StreamID, true, hbf[1:])\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tpanic(\"bogus mode\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Response headers (1+ frames; 1 or 2 in this test, but never 0)\n\t\t\t\t{\n\t\t\t\t\tbuf.Reset()\n\t\t\t\t\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"200\"})\n\t\t\t\t\tenc.WriteField(hpack.HeaderField{Name: \"trailer\", Value: \"declared\"})\n\t\t\t\t\tendStream = false\n\t\t\t\t\tsend(oneHeader)\n\t\t\t\t}\n\t\t\t\t// Trailers:\n\t\t\t\t{\n\t\t\t\t\tendStream = true\n\t\t\t\t\tbuf.Reset()\n\t\t\t\t\twriteTrailer(enc)\n\t\t\t\t\tsend(trailers)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\tct.run()\n}\n\n// headerListSize returns the HTTP2 header list size of h.\n//   http://httpwg.org/specs/rfc7540.html#SETTINGS_MAX_HEADER_LIST_SIZE\n//   http://httpwg.org/specs/rfc7540.html#MaxHeaderBlock\nfunc headerListSize(h http.Header) (size uint32) {\n\tfor k, vv := range h {\n\t\tfor _, v := range vv {\n\t\t\thf := hpack.HeaderField{Name: k, Value: v}\n\t\t\tsize += hf.Size()\n\t\t}\n\t}\n\treturn size\n}\n\n// padHeaders adds data to an http.Header until headerListSize(h) ==\n// limit. Due to the way header list sizes are calculated, padHeaders\n// cannot add fewer than len(\"Pad-Headers\") + 32 bytes to h, and will\n// call t.Fatal if asked to do so. PadHeaders first reserves enough\n// space for an empty \"Pad-Headers\" key, then adds as many copies of\n// filler as possible. Any remaining bytes necessary to push the\n// header list size up to limit are added to h[\"Pad-Headers\"].\nfunc padHeaders(t *testing.T, h http.Header, limit uint64, filler string) {\n\tif limit > 0xffffffff {\n\t\tt.Fatalf(\"padHeaders: refusing to pad to more than 2^32-1 bytes. limit = %v\", limit)\n\t}\n\thf := hpack.HeaderField{Name: \"Pad-Headers\", Value: \"\"}\n\tminPadding := uint64(hf.Size())\n\tsize := uint64(headerListSize(h))\n\n\tminlimit := size + minPadding\n\tif limit < minlimit {\n\t\tt.Fatalf(\"padHeaders: limit %v < %v\", limit, minlimit)\n\t}\n\n\t// Use a fixed-width format for name so that fieldSize\n\t// remains constant.\n\tnameFmt := \"Pad-Headers-%06d\"\n\thf = hpack.HeaderField{Name: fmt.Sprintf(nameFmt, 1), Value: filler}\n\tfieldSize := uint64(hf.Size())\n\n\t// Add as many complete filler values as possible, leaving\n\t// room for at least one empty \"Pad-Headers\" key.\n\tlimit = limit - minPadding\n\tfor i := 0; size+fieldSize < limit; i++ {\n\t\tname := fmt.Sprintf(nameFmt, i)\n\t\th.Add(name, filler)\n\t\tsize += fieldSize\n\t}\n\n\t// Add enough bytes to reach limit.\n\tremain := limit - size\n\tlastValue := strings.Repeat(\"*\", int(remain))\n\th.Add(\"Pad-Headers\", lastValue)\n}\n\nfunc TestPadHeaders(t *testing.T) {\n\tcheck := func(h http.Header, limit uint32, fillerLen int) {\n\t\tif h == nil {\n\t\t\th = make(http.Header)\n\t\t}\n\t\tfiller := strings.Repeat(\"f\", fillerLen)\n\t\tpadHeaders(t, h, uint64(limit), filler)\n\t\tgotSize := headerListSize(h)\n\t\tif gotSize != limit {\n\t\t\tt.Errorf(\"Got size = %v; want %v\", gotSize, limit)\n\t\t}\n\t}\n\t// Try all possible combinations for small fillerLen and limit.\n\thf := hpack.HeaderField{Name: \"Pad-Headers\", Value: \"\"}\n\tminLimit := hf.Size()\n\tfor limit := minLimit; limit <= 128; limit++ {\n\t\tfor fillerLen := 0; uint32(fillerLen) <= limit; fillerLen++ {\n\t\t\tcheck(nil, limit, fillerLen)\n\t\t}\n\t}\n\n\t// Try a few tests with larger limits, plus cumulative\n\t// tests. Since these tests are cumulative, tests[i+1].limit\n\t// must be >= tests[i].limit + minLimit. See the comment on\n\t// padHeaders for more info on why the limit arg has this\n\t// restriction.\n\ttests := []struct {\n\t\tfillerLen int\n\t\tlimit     uint32\n\t}{\n\t\t{\n\t\t\tfillerLen: 64,\n\t\t\tlimit:     1024,\n\t\t},\n\t\t{\n\t\t\tfillerLen: 1024,\n\t\t\tlimit:     1286,\n\t\t},\n\t\t{\n\t\t\tfillerLen: 256,\n\t\t\tlimit:     2048,\n\t\t},\n\t\t{\n\t\t\tfillerLen: 1024,\n\t\t\tlimit:     10 * 1024,\n\t\t},\n\t\t{\n\t\t\tfillerLen: 1023,\n\t\t\tlimit:     11 * 1024,\n\t\t},\n\t}\n\th := make(http.Header)\n\tfor _, tc := range tests {\n\t\tcheck(nil, tc.limit, tc.fillerLen)\n\t\tcheck(h, tc.limit, tc.fillerLen)\n\t}\n}\n\nfunc TestTransportChecksRequestHeaderListSize(t *testing.T) {\n\tst := newServerTester(t,\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\t// Consume body & force client to send\n\t\t\t// trailers before writing response.\n\t\t\t// ioutil.ReadAll returns non-nil err for\n\t\t\t// requests that attempt to send greater than\n\t\t\t// maxHeaderListSize bytes of trailers, since\n\t\t\t// those requests generate a stream reset.\n\t\t\tioutil.ReadAll(r.Body)\n\t\t\tr.Body.Close()\n\t\t},\n\t\tfunc(ts *httptest.Server) {\n\t\t\tts.Config.MaxHeaderBytes = 16 << 10\n\t\t},\n\t\toptOnlyServer,\n\t\toptQuiet,\n\t)\n\tdefer st.Close()\n\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\n\tcheckRoundTrip := func(req *http.Request, wantErr error, desc string) {\n\t\tres, err := tr.RoundTrip(req)\n\t\tif err != wantErr {\n\t\t\tif res != nil {\n\t\t\t\tres.Body.Close()\n\t\t\t}\n\t\t\tt.Errorf(\"%v: RoundTrip err = %v; want %v\", desc, err, wantErr)\n\t\t\treturn\n\t\t}\n\t\tif err == nil {\n\t\t\tif res == nil {\n\t\t\t\tt.Errorf(\"%v: response nil; want non-nil.\", desc)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer res.Body.Close()\n\t\t\tif res.StatusCode != http.StatusOK {\n\t\t\t\tt.Errorf(\"%v: response status = %v; want %v\", desc, res.StatusCode, http.StatusOK)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tif res != nil {\n\t\t\tt.Errorf(\"%v: RoundTrip err = %v but response non-nil\", desc, err)\n\t\t}\n\t}\n\theaderListSizeForRequest := func(req *http.Request) (size uint64) {\n\t\tcontentLen := actualContentLength(req)\n\t\ttrailers, err := commaSeparatedTrailers(req)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"headerListSizeForRequest: %v\", err)\n\t\t}\n\t\tcc := &ClientConn{peerMaxHeaderListSize: 0xffffffffffffffff}\n\t\tcc.henc = hpack.NewEncoder(&cc.hbuf)\n\t\tcc.mu.Lock()\n\t\thdrs, err := cc.encodeHeaders(req, true, trailers, contentLen)\n\t\tcc.mu.Unlock()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"headerListSizeForRequest: %v\", err)\n\t\t}\n\t\thpackDec := hpack.NewDecoder(initialHeaderTableSize, func(hf hpack.HeaderField) {\n\t\t\tsize += uint64(hf.Size())\n\t\t})\n\t\tif len(hdrs) > 0 {\n\t\t\tif _, err := hpackDec.Write(hdrs); err != nil {\n\t\t\t\tt.Fatalf(\"headerListSizeForRequest: %v\", err)\n\t\t\t}\n\t\t}\n\t\treturn size\n\t}\n\t// Create a new Request for each test, rather than reusing the\n\t// same Request, to avoid a race when modifying req.Headers.\n\t// See https://github.com/golang/go/issues/21316\n\tnewRequest := func() *http.Request {\n\t\t// Body must be non-nil to enable writing trailers.\n\t\tbody := strings.NewReader(\"hello\")\n\t\treq, err := http.NewRequest(\"POST\", st.ts.URL, body)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"newRequest: NewRequest: %v\", err)\n\t\t}\n\t\treturn req\n\t}\n\n\t// Make an arbitrary request to ensure we get the server's\n\t// settings frame and initialize peerMaxHeaderListSize.\n\treq := newRequest()\n\tcheckRoundTrip(req, nil, \"Initial request\")\n\n\t// Get the ClientConn associated with the request and validate\n\t// peerMaxHeaderListSize.\n\taddr := authorityAddr(req.URL.Scheme, req.URL.Host)\n\tcc, err := tr.connPool().GetClientConn(req, addr)\n\tif err != nil {\n\t\tt.Fatalf(\"GetClientConn: %v\", err)\n\t}\n\tcc.mu.Lock()\n\tpeerSize := cc.peerMaxHeaderListSize\n\tcc.mu.Unlock()\n\tst.scMu.Lock()\n\twantSize := uint64(st.sc.maxHeaderListSize())\n\tst.scMu.Unlock()\n\tif peerSize != wantSize {\n\t\tt.Errorf(\"peerMaxHeaderListSize = %v; want %v\", peerSize, wantSize)\n\t}\n\n\t// Sanity check peerSize. (*serverConn) maxHeaderListSize adds\n\t// 320 bytes of padding.\n\twantHeaderBytes := uint64(st.ts.Config.MaxHeaderBytes) + 320\n\tif peerSize != wantHeaderBytes {\n\t\tt.Errorf(\"peerMaxHeaderListSize = %v; want %v.\", peerSize, wantHeaderBytes)\n\t}\n\n\t// Pad headers & trailers, but stay under peerSize.\n\treq = newRequest()\n\treq.Header = make(http.Header)\n\treq.Trailer = make(http.Header)\n\tfiller := strings.Repeat(\"*\", 1024)\n\tpadHeaders(t, req.Trailer, peerSize, filler)\n\t// cc.encodeHeaders adds some default headers to the request,\n\t// so we need to leave room for those.\n\tdefaultBytes := headerListSizeForRequest(req)\n\tpadHeaders(t, req.Header, peerSize-defaultBytes, filler)\n\tcheckRoundTrip(req, nil, \"Headers & Trailers under limit\")\n\n\t// Add enough header bytes to push us over peerSize.\n\treq = newRequest()\n\treq.Header = make(http.Header)\n\tpadHeaders(t, req.Header, peerSize, filler)\n\tcheckRoundTrip(req, errRequestHeaderListSize, \"Headers over limit\")\n\n\t// Push trailers over the limit.\n\treq = newRequest()\n\treq.Trailer = make(http.Header)\n\tpadHeaders(t, req.Trailer, peerSize+1, filler)\n\tcheckRoundTrip(req, errRequestHeaderListSize, \"Trailers over limit\")\n\n\t// Send headers with a single large value.\n\treq = newRequest()\n\tfiller = strings.Repeat(\"*\", int(peerSize))\n\treq.Header = make(http.Header)\n\treq.Header.Set(\"Big\", filler)\n\tcheckRoundTrip(req, errRequestHeaderListSize, \"Single large header\")\n\n\t// Send trailers with a single large value.\n\treq = newRequest()\n\treq.Trailer = make(http.Header)\n\treq.Trailer.Set(\"Big\", filler)\n\tcheckRoundTrip(req, errRequestHeaderListSize, \"Single large trailer\")\n}\n\nfunc TestTransportChecksResponseHeaderListSize(t *testing.T) {\n\tct := newClientTester(t)\n\tct.client = func() error {\n\t\treq, _ := http.NewRequest(\"GET\", \"https://dummy.tld/\", nil)\n\t\tres, err := ct.tr.RoundTrip(req)\n\t\tif err != errResponseHeaderListSize {\n\t\t\tif res != nil {\n\t\t\t\tres.Body.Close()\n\t\t\t}\n\t\t\tsize := int64(0)\n\t\t\tfor k, vv := range res.Header {\n\t\t\t\tfor _, v := range vv {\n\t\t\t\t\tsize += int64(len(k)) + int64(len(v)) + 32\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"RoundTrip Error = %v (and %d bytes of response headers); want errResponseHeaderListSize\", err, size)\n\t\t}\n\t\treturn nil\n\t}\n\tct.server = func() error {\n\t\tct.greet()\n\t\tvar buf bytes.Buffer\n\t\tenc := hpack.NewEncoder(&buf)\n\n\t\tfor {\n\t\t\tf, err := ct.fr.ReadFrame()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tswitch f := f.(type) {\n\t\t\tcase *HeadersFrame:\n\t\t\t\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"200\"})\n\t\t\t\tlarge := strings.Repeat(\"a\", 1<<10)\n\t\t\t\tfor i := 0; i < 5042; i++ {\n\t\t\t\t\tenc.WriteField(hpack.HeaderField{Name: large, Value: large})\n\t\t\t\t}\n\t\t\t\tif size, want := buf.Len(), 6329; size != want {\n\t\t\t\t\t// Note: this number might change if\n\t\t\t\t\t// our hpack implementation\n\t\t\t\t\t// changes. That's fine. This is\n\t\t\t\t\t// just a sanity check that our\n\t\t\t\t\t// response can fit in a single\n\t\t\t\t\t// header block fragment frame.\n\t\t\t\t\treturn fmt.Errorf(\"encoding over 10MB of duplicate keypairs took %d bytes; expected %d\", size, want)\n\t\t\t\t}\n\t\t\t\tct.fr.WriteHeaders(HeadersFrameParam{\n\t\t\t\t\tStreamID:      f.StreamID,\n\t\t\t\t\tEndHeaders:    true,\n\t\t\t\t\tEndStream:     true,\n\t\t\t\t\tBlockFragment: buf.Bytes(),\n\t\t\t\t})\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\tct.run()\n}\n\n// Test that the the Transport returns a typed error from Response.Body.Read calls\n// when the server sends an error. (here we use a panic, since that should generate\n// a stream error, but others like cancel should be similar)\nfunc TestTransportBodyReadErrorType(t *testing.T) {\n\tdoPanic := make(chan bool, 1)\n\tst := newServerTester(t,\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tw.(http.Flusher).Flush() // force headers out\n\t\t\t<-doPanic\n\t\t\tpanic(\"boom\")\n\t\t},\n\t\toptOnlyServer,\n\t\toptQuiet,\n\t)\n\tdefer st.Close()\n\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\tc := &http.Client{Transport: tr}\n\n\tres, err := c.Get(st.ts.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer res.Body.Close()\n\tdoPanic <- true\n\tbuf := make([]byte, 100)\n\tn, err := res.Body.Read(buf)\n\twant := StreamError{StreamID: 0x1, Code: 0x2}\n\tif !reflect.DeepEqual(want, err) {\n\t\tt.Errorf(\"Read = %v, %#v; want error %#v\", n, err, want)\n\t}\n}\n\n// golang.org/issue/13924\n// This used to fail after many iterations, especially with -race:\n// go test -v -run=TestTransportDoubleCloseOnWriteError -count=500 -race\nfunc TestTransportDoubleCloseOnWriteError(t *testing.T) {\n\tvar (\n\t\tmu   sync.Mutex\n\t\tconn net.Conn // to close if set\n\t)\n\n\tst := newServerTester(t,\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tmu.Lock()\n\t\t\tdefer mu.Unlock()\n\t\t\tif conn != nil {\n\t\t\t\tconn.Close()\n\t\t\t}\n\t\t},\n\t\toptOnlyServer,\n\t)\n\tdefer st.Close()\n\n\ttr := &Transport{\n\t\tTLSClientConfig: tlsConfigInsecure,\n\t\tDialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {\n\t\t\ttc, err := tls.Dial(network, addr, cfg)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tmu.Lock()\n\t\t\tdefer mu.Unlock()\n\t\t\tconn = tc\n\t\t\treturn tc, nil\n\t\t},\n\t}\n\tdefer tr.CloseIdleConnections()\n\tc := &http.Client{Transport: tr}\n\tc.Get(st.ts.URL)\n}\n\n// Test that the http1 Transport.DisableKeepAlives option is respected\n// and connections are closed as soon as idle.\n// See golang.org/issue/14008\nfunc TestTransportDisableKeepAlives(t *testing.T) {\n\tst := newServerTester(t,\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tio.WriteString(w, \"hi\")\n\t\t},\n\t\toptOnlyServer,\n\t)\n\tdefer st.Close()\n\n\tconnClosed := make(chan struct{}) // closed on tls.Conn.Close\n\ttr := &Transport{\n\t\tt1: &http.Transport{\n\t\t\tDisableKeepAlives: true,\n\t\t},\n\t\tTLSClientConfig: tlsConfigInsecure,\n\t\tDialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {\n\t\t\ttc, err := tls.Dial(network, addr, cfg)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn &noteCloseConn{Conn: tc, closefn: func() { close(connClosed) }}, nil\n\t\t},\n\t}\n\tc := &http.Client{Transport: tr}\n\tres, err := c.Get(st.ts.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := ioutil.ReadAll(res.Body); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer res.Body.Close()\n\n\tselect {\n\tcase <-connClosed:\n\tcase <-time.After(1 * time.Second):\n\t\tt.Errorf(\"timeout\")\n\t}\n\n}\n\n// Test concurrent requests with Transport.DisableKeepAlives. We can share connections,\n// but when things are totally idle, it still needs to close.\nfunc TestTransportDisableKeepAlives_Concurrency(t *testing.T) {\n\tconst D = 25 * time.Millisecond\n\tst := newServerTester(t,\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\ttime.Sleep(D)\n\t\t\tio.WriteString(w, \"hi\")\n\t\t},\n\t\toptOnlyServer,\n\t)\n\tdefer st.Close()\n\n\tvar dials int32\n\tvar conns sync.WaitGroup\n\ttr := &Transport{\n\t\tt1: &http.Transport{\n\t\t\tDisableKeepAlives: true,\n\t\t},\n\t\tTLSClientConfig: tlsConfigInsecure,\n\t\tDialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {\n\t\t\ttc, err := tls.Dial(network, addr, cfg)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tatomic.AddInt32(&dials, 1)\n\t\t\tconns.Add(1)\n\t\t\treturn &noteCloseConn{Conn: tc, closefn: func() { conns.Done() }}, nil\n\t\t},\n\t}\n\tc := &http.Client{Transport: tr}\n\tvar reqs sync.WaitGroup\n\tconst N = 20\n\tfor i := 0; i < N; i++ {\n\t\treqs.Add(1)\n\t\tif i == N-1 {\n\t\t\t// For the final request, try to make all the\n\t\t\t// others close. This isn't verified in the\n\t\t\t// count, other than the Log statement, since\n\t\t\t// it's so timing dependent. This test is\n\t\t\t// really to make sure we don't interrupt a\n\t\t\t// valid request.\n\t\t\ttime.Sleep(D * 2)\n\t\t}\n\t\tgo func() {\n\t\t\tdefer reqs.Done()\n\t\t\tres, err := c.Get(st.ts.URL)\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, err := ioutil.ReadAll(res.Body); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tres.Body.Close()\n\t\t}()\n\t}\n\treqs.Wait()\n\tconns.Wait()\n\tt.Logf(\"did %d dials, %d requests\", atomic.LoadInt32(&dials), N)\n}\n\ntype noteCloseConn struct {\n\tnet.Conn\n\tonceClose sync.Once\n\tclosefn   func()\n}\n\nfunc (c *noteCloseConn) Close() error {\n\tc.onceClose.Do(c.closefn)\n\treturn c.Conn.Close()\n}\n\nfunc isTimeout(err error) bool {\n\tswitch err := err.(type) {\n\tcase nil:\n\t\treturn false\n\tcase *url.Error:\n\t\treturn isTimeout(err.Err)\n\tcase net.Error:\n\t\treturn err.Timeout()\n\t}\n\treturn false\n}\n\n// Test that the http1 Transport.ResponseHeaderTimeout option and cancel is sent.\nfunc TestTransportResponseHeaderTimeout_NoBody(t *testing.T) {\n\ttestTransportResponseHeaderTimeout(t, false)\n}\nfunc TestTransportResponseHeaderTimeout_Body(t *testing.T) {\n\ttestTransportResponseHeaderTimeout(t, true)\n}\n\nfunc testTransportResponseHeaderTimeout(t *testing.T, body bool) {\n\tct := newClientTester(t)\n\tct.tr.t1 = &http.Transport{\n\t\tResponseHeaderTimeout: 5 * time.Millisecond,\n\t}\n\tct.client = func() error {\n\t\tc := &http.Client{Transport: ct.tr}\n\t\tvar err error\n\t\tvar n int64\n\t\tconst bodySize = 4 << 20\n\t\tif body {\n\t\t\t_, err = c.Post(\"https://dummy.tld/\", \"text/foo\", io.LimitReader(countingReader{&n}, bodySize))\n\t\t} else {\n\t\t\t_, err = c.Get(\"https://dummy.tld/\")\n\t\t}\n\t\tif !isTimeout(err) {\n\t\t\tt.Errorf(\"client expected timeout error; got %#v\", err)\n\t\t}\n\t\tif body && n != bodySize {\n\t\t\tt.Errorf(\"only read %d bytes of body; want %d\", n, bodySize)\n\t\t}\n\t\treturn nil\n\t}\n\tct.server = func() error {\n\t\tct.greet()\n\t\tfor {\n\t\t\tf, err := ct.fr.ReadFrame()\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"ReadFrame: %v\", err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tswitch f := f.(type) {\n\t\t\tcase *DataFrame:\n\t\t\t\tdataLen := len(f.Data())\n\t\t\t\tif dataLen > 0 {\n\t\t\t\t\tif err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase *RSTStreamFrame:\n\t\t\t\tif f.StreamID == 1 && f.ErrCode == ErrCodeCancel {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tct.run()\n}\n\nfunc TestTransportDisableCompression(t *testing.T) {\n\tconst body = \"sup\"\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\twant := http.Header{\n\t\t\t\"User-Agent\": []string{\"Go-http-client/2.0\"},\n\t\t}\n\t\tif !reflect.DeepEqual(r.Header, want) {\n\t\t\tt.Errorf(\"request headers = %v; want %v\", r.Header, want)\n\t\t}\n\t}, optOnlyServer)\n\tdefer st.Close()\n\n\ttr := &Transport{\n\t\tTLSClientConfig: tlsConfigInsecure,\n\t\tt1: &http.Transport{\n\t\t\tDisableCompression: true,\n\t\t},\n\t}\n\tdefer tr.CloseIdleConnections()\n\n\treq, err := http.NewRequest(\"GET\", st.ts.URL, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tres, err := tr.RoundTrip(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer res.Body.Close()\n}\n\n// RFC 7540 section 8.1.2.2\nfunc TestTransportRejectsConnHeaders(t *testing.T) {\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tvar got []string\n\t\tfor k := range r.Header {\n\t\t\tgot = append(got, k)\n\t\t}\n\t\tsort.Strings(got)\n\t\tw.Header().Set(\"Got-Header\", strings.Join(got, \",\"))\n\t}, optOnlyServer)\n\tdefer st.Close()\n\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\n\ttests := []struct {\n\t\tkey   string\n\t\tvalue []string\n\t\twant  string\n\t}{\n\t\t{\n\t\t\tkey:   \"Upgrade\",\n\t\t\tvalue: []string{\"anything\"},\n\t\t\twant:  \"ERROR: http2: invalid Upgrade request header: [\\\"anything\\\"]\",\n\t\t},\n\t\t{\n\t\t\tkey:   \"Connection\",\n\t\t\tvalue: []string{\"foo\"},\n\t\t\twant:  \"ERROR: http2: invalid Connection request header: [\\\"foo\\\"]\",\n\t\t},\n\t\t{\n\t\t\tkey:   \"Connection\",\n\t\t\tvalue: []string{\"close\"},\n\t\t\twant:  \"Accept-Encoding,User-Agent\",\n\t\t},\n\t\t{\n\t\t\tkey:   \"Connection\",\n\t\t\tvalue: []string{\"close\", \"something-else\"},\n\t\t\twant:  \"ERROR: http2: invalid Connection request header: [\\\"close\\\" \\\"something-else\\\"]\",\n\t\t},\n\t\t{\n\t\t\tkey:   \"Connection\",\n\t\t\tvalue: []string{\"keep-alive\"},\n\t\t\twant:  \"Accept-Encoding,User-Agent\",\n\t\t},\n\t\t{\n\t\t\tkey:   \"Proxy-Connection\", // just deleted and ignored\n\t\t\tvalue: []string{\"keep-alive\"},\n\t\t\twant:  \"Accept-Encoding,User-Agent\",\n\t\t},\n\t\t{\n\t\t\tkey:   \"Transfer-Encoding\",\n\t\t\tvalue: []string{\"\"},\n\t\t\twant:  \"Accept-Encoding,User-Agent\",\n\t\t},\n\t\t{\n\t\t\tkey:   \"Transfer-Encoding\",\n\t\t\tvalue: []string{\"foo\"},\n\t\t\twant:  \"ERROR: http2: invalid Transfer-Encoding request header: [\\\"foo\\\"]\",\n\t\t},\n\t\t{\n\t\t\tkey:   \"Transfer-Encoding\",\n\t\t\tvalue: []string{\"chunked\"},\n\t\t\twant:  \"Accept-Encoding,User-Agent\",\n\t\t},\n\t\t{\n\t\t\tkey:   \"Transfer-Encoding\",\n\t\t\tvalue: []string{\"chunked\", \"other\"},\n\t\t\twant:  \"ERROR: http2: invalid Transfer-Encoding request header: [\\\"chunked\\\" \\\"other\\\"]\",\n\t\t},\n\t\t{\n\t\t\tkey:   \"Content-Length\",\n\t\t\tvalue: []string{\"123\"},\n\t\t\twant:  \"Accept-Encoding,User-Agent\",\n\t\t},\n\t\t{\n\t\t\tkey:   \"Keep-Alive\",\n\t\t\tvalue: []string{\"doop\"},\n\t\t\twant:  \"Accept-Encoding,User-Agent\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\treq, _ := http.NewRequest(\"GET\", st.ts.URL, nil)\n\t\treq.Header[tt.key] = tt.value\n\t\tres, err := tr.RoundTrip(req)\n\t\tvar got string\n\t\tif err != nil {\n\t\t\tgot = fmt.Sprintf(\"ERROR: %v\", err)\n\t\t} else {\n\t\t\tgot = res.Header.Get(\"Got-Header\")\n\t\t\tres.Body.Close()\n\t\t}\n\t\tif got != tt.want {\n\t\t\tt.Errorf(\"For key %q, value %q, got = %q; want %q\", tt.key, tt.value, got, tt.want)\n\t\t}\n\t}\n}\n\n// golang.org/issue/14048\nfunc TestTransportFailsOnInvalidHeaders(t *testing.T) {\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tvar got []string\n\t\tfor k := range r.Header {\n\t\t\tgot = append(got, k)\n\t\t}\n\t\tsort.Strings(got)\n\t\tw.Header().Set(\"Got-Header\", strings.Join(got, \",\"))\n\t}, optOnlyServer)\n\tdefer st.Close()\n\n\ttests := [...]struct {\n\t\th       http.Header\n\t\twantErr string\n\t}{\n\t\t0: {\n\t\t\th:       http.Header{\"with space\": {\"foo\"}},\n\t\t\twantErr: `invalid HTTP header name \"with space\"`,\n\t\t},\n\t\t1: {\n\t\t\th:       http.Header{\"name\": {\"Брэд\"}},\n\t\t\twantErr: \"\", // okay\n\t\t},\n\t\t2: {\n\t\t\th:       http.Header{\"имя\": {\"Brad\"}},\n\t\t\twantErr: `invalid HTTP header name \"имя\"`,\n\t\t},\n\t\t3: {\n\t\t\th:       http.Header{\"foo\": {\"foo\\x01bar\"}},\n\t\t\twantErr: `invalid HTTP header value \"foo\\x01bar\" for header \"foo\"`,\n\t\t},\n\t}\n\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\n\tfor i, tt := range tests {\n\t\treq, _ := http.NewRequest(\"GET\", st.ts.URL, nil)\n\t\treq.Header = tt.h\n\t\tres, err := tr.RoundTrip(req)\n\t\tvar bad bool\n\t\tif tt.wantErr == \"\" {\n\t\t\tif err != nil {\n\t\t\t\tbad = true\n\t\t\t\tt.Errorf(\"case %d: error = %v; want no error\", i, err)\n\t\t\t}\n\t\t} else {\n\t\t\tif !strings.Contains(fmt.Sprint(err), tt.wantErr) {\n\t\t\t\tbad = true\n\t\t\t\tt.Errorf(\"case %d: error = %v; want error %q\", i, err, tt.wantErr)\n\t\t\t}\n\t\t}\n\t\tif err == nil {\n\t\t\tif bad {\n\t\t\t\tt.Logf(\"case %d: server got headers %q\", i, res.Header.Get(\"Got-Header\"))\n\t\t\t}\n\t\t\tres.Body.Close()\n\t\t}\n\t}\n}\n\n// Tests that gzipReader doesn't crash on a second Read call following\n// the first Read call's gzip.NewReader returning an error.\nfunc TestGzipReader_DoubleReadCrash(t *testing.T) {\n\tgz := &gzipReader{\n\t\tbody: ioutil.NopCloser(strings.NewReader(\"0123456789\")),\n\t}\n\tvar buf [1]byte\n\tn, err1 := gz.Read(buf[:])\n\tif n != 0 || !strings.Contains(fmt.Sprint(err1), \"invalid header\") {\n\t\tt.Fatalf(\"Read = %v, %v; want 0, invalid header\", n, err1)\n\t}\n\tn, err2 := gz.Read(buf[:])\n\tif n != 0 || err2 != err1 {\n\t\tt.Fatalf(\"second Read = %v, %v; want 0, %v\", n, err2, err1)\n\t}\n}\n\nfunc TestTransportNewTLSConfig(t *testing.T) {\n\ttests := [...]struct {\n\t\tconf *tls.Config\n\t\thost string\n\t\twant *tls.Config\n\t}{\n\t\t// Normal case.\n\t\t0: {\n\t\t\tconf: nil,\n\t\t\thost: \"foo.com\",\n\t\t\twant: &tls.Config{\n\t\t\t\tServerName: \"foo.com\",\n\t\t\t\tNextProtos: []string{NextProtoTLS},\n\t\t\t},\n\t\t},\n\n\t\t// User-provided name (bar.com) takes precedence:\n\t\t1: {\n\t\t\tconf: &tls.Config{\n\t\t\t\tServerName: \"bar.com\",\n\t\t\t},\n\t\t\thost: \"foo.com\",\n\t\t\twant: &tls.Config{\n\t\t\t\tServerName: \"bar.com\",\n\t\t\t\tNextProtos: []string{NextProtoTLS},\n\t\t\t},\n\t\t},\n\n\t\t// NextProto is prepended:\n\t\t2: {\n\t\t\tconf: &tls.Config{\n\t\t\t\tNextProtos: []string{\"foo\", \"bar\"},\n\t\t\t},\n\t\t\thost: \"example.com\",\n\t\t\twant: &tls.Config{\n\t\t\t\tServerName: \"example.com\",\n\t\t\t\tNextProtos: []string{NextProtoTLS, \"foo\", \"bar\"},\n\t\t\t},\n\t\t},\n\n\t\t// NextProto is not duplicated:\n\t\t3: {\n\t\t\tconf: &tls.Config{\n\t\t\t\tNextProtos: []string{\"foo\", \"bar\", NextProtoTLS},\n\t\t\t},\n\t\t\thost: \"example.com\",\n\t\t\twant: &tls.Config{\n\t\t\t\tServerName: \"example.com\",\n\t\t\t\tNextProtos: []string{\"foo\", \"bar\", NextProtoTLS},\n\t\t\t},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\t// Ignore the session ticket keys part, which ends up populating\n\t\t// unexported fields in the Config:\n\t\tif tt.conf != nil {\n\t\t\ttt.conf.SessionTicketsDisabled = true\n\t\t}\n\n\t\ttr := &Transport{TLSClientConfig: tt.conf}\n\t\tgot := tr.newTLSConfig(tt.host)\n\n\t\tgot.SessionTicketsDisabled = false\n\n\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\tt.Errorf(\"%d. got %#v; want %#v\", i, got, tt.want)\n\t\t}\n\t}\n}\n\n// The Google GFE responds to HEAD requests with a HEADERS frame\n// without END_STREAM, followed by a 0-length DATA frame with\n// END_STREAM. Make sure we don't get confused by that. (We did.)\nfunc TestTransportReadHeadResponse(t *testing.T) {\n\tct := newClientTester(t)\n\tclientDone := make(chan struct{})\n\tct.client = func() error {\n\t\tdefer close(clientDone)\n\t\treq, _ := http.NewRequest(\"HEAD\", \"https://dummy.tld/\", nil)\n\t\tres, err := ct.tr.RoundTrip(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif res.ContentLength != 123 {\n\t\t\treturn fmt.Errorf(\"Content-Length = %d; want 123\", res.ContentLength)\n\t\t}\n\t\tslurp, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"ReadAll: %v\", err)\n\t\t}\n\t\tif len(slurp) > 0 {\n\t\t\treturn fmt.Errorf(\"Unexpected non-empty ReadAll body: %q\", slurp)\n\t\t}\n\t\treturn nil\n\t}\n\tct.server = func() error {\n\t\tct.greet()\n\t\tfor {\n\t\t\tf, err := ct.fr.ReadFrame()\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"ReadFrame: %v\", err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\thf, ok := f.(*HeadersFrame)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar buf bytes.Buffer\n\t\t\tenc := hpack.NewEncoder(&buf)\n\t\t\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"200\"})\n\t\t\tenc.WriteField(hpack.HeaderField{Name: \"content-length\", Value: \"123\"})\n\t\t\tct.fr.WriteHeaders(HeadersFrameParam{\n\t\t\t\tStreamID:      hf.StreamID,\n\t\t\t\tEndHeaders:    true,\n\t\t\t\tEndStream:     false, // as the GFE does\n\t\t\t\tBlockFragment: buf.Bytes(),\n\t\t\t})\n\t\t\tct.fr.WriteData(hf.StreamID, true, nil)\n\n\t\t\t<-clientDone\n\t\t\treturn nil\n\t\t}\n\t}\n\tct.run()\n}\n\nfunc TestTransportReadHeadResponseWithBody(t *testing.T) {\n\tresponse := \"redirecting to /elsewhere\"\n\tct := newClientTester(t)\n\tclientDone := make(chan struct{})\n\tct.client = func() error {\n\t\tdefer close(clientDone)\n\t\treq, _ := http.NewRequest(\"HEAD\", \"https://dummy.tld/\", nil)\n\t\tres, err := ct.tr.RoundTrip(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif res.ContentLength != int64(len(response)) {\n\t\t\treturn fmt.Errorf(\"Content-Length = %d; want %d\", res.ContentLength, len(response))\n\t\t}\n\t\tslurp, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"ReadAll: %v\", err)\n\t\t}\n\t\tif len(slurp) > 0 {\n\t\t\treturn fmt.Errorf(\"Unexpected non-empty ReadAll body: %q\", slurp)\n\t\t}\n\t\treturn nil\n\t}\n\tct.server = func() error {\n\t\tct.greet()\n\t\tfor {\n\t\t\tf, err := ct.fr.ReadFrame()\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"ReadFrame: %v\", err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\thf, ok := f.(*HeadersFrame)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar buf bytes.Buffer\n\t\t\tenc := hpack.NewEncoder(&buf)\n\t\t\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"200\"})\n\t\t\tenc.WriteField(hpack.HeaderField{Name: \"content-length\", Value: strconv.Itoa(len(response))})\n\t\t\tct.fr.WriteHeaders(HeadersFrameParam{\n\t\t\t\tStreamID:      hf.StreamID,\n\t\t\t\tEndHeaders:    true,\n\t\t\t\tEndStream:     false,\n\t\t\t\tBlockFragment: buf.Bytes(),\n\t\t\t})\n\t\t\tct.fr.WriteData(hf.StreamID, true, []byte(response))\n\n\t\t\t<-clientDone\n\t\t\treturn nil\n\t\t}\n\t}\n\tct.run()\n}\n\ntype neverEnding byte\n\nfunc (b neverEnding) Read(p []byte) (int, error) {\n\tfor i := range p {\n\t\tp[i] = byte(b)\n\t}\n\treturn len(p), nil\n}\n\n// golang.org/issue/15425: test that a handler closing the request\n// body doesn't terminate the stream to the peer. (It just stops\n// readability from the handler's side, and eventually the client\n// runs out of flow control tokens)\nfunc TestTransportHandlerBodyClose(t *testing.T) {\n\tconst bodySize = 10 << 20\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tr.Body.Close()\n\t\tio.Copy(w, io.LimitReader(neverEnding('A'), bodySize))\n\t}, optOnlyServer)\n\tdefer st.Close()\n\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\n\tg0 := runtime.NumGoroutine()\n\n\tconst numReq = 10\n\tfor i := 0; i < numReq; i++ {\n\t\treq, err := http.NewRequest(\"POST\", st.ts.URL, struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)})\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tres, err := tr.RoundTrip(req)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tn, err := io.Copy(ioutil.Discard, res.Body)\n\t\tres.Body.Close()\n\t\tif n != bodySize || err != nil {\n\t\t\tt.Fatalf(\"req#%d: Copy = %d, %v; want %d, nil\", i, n, err, bodySize)\n\t\t}\n\t}\n\ttr.CloseIdleConnections()\n\n\tgd := runtime.NumGoroutine() - g0\n\tif gd > numReq/2 {\n\t\tt.Errorf(\"appeared to leak goroutines\")\n\t}\n\n}\n\n// https://golang.org/issue/15930\nfunc TestTransportFlowControl(t *testing.T) {\n\tconst bufLen = 64 << 10\n\tvar total int64 = 100 << 20 // 100MB\n\tif testing.Short() {\n\t\ttotal = 10 << 20\n\t}\n\n\tvar wrote int64 // updated atomically\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tb := make([]byte, bufLen)\n\t\tfor wrote < total {\n\t\t\tn, err := w.Write(b)\n\t\t\tatomic.AddInt64(&wrote, int64(n))\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"ResponseWriter.Write error: %v\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tw.(http.Flusher).Flush()\n\t\t}\n\t}, optOnlyServer)\n\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\treq, err := http.NewRequest(\"GET\", st.ts.URL, nil)\n\tif err != nil {\n\t\tt.Fatal(\"NewRequest error:\", err)\n\t}\n\tresp, err := tr.RoundTrip(req)\n\tif err != nil {\n\t\tt.Fatal(\"RoundTrip error:\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tvar read int64\n\tb := make([]byte, bufLen)\n\tfor {\n\t\tn, err := resp.Body.Read(b)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Fatal(\"Read error:\", err)\n\t\t}\n\t\tread += int64(n)\n\n\t\tconst max = transportDefaultStreamFlow\n\t\tif w := atomic.LoadInt64(&wrote); -max > read-w || read-w > max {\n\t\t\tt.Fatalf(\"Too much data inflight: server wrote %v bytes but client only received %v\", w, read)\n\t\t}\n\n\t\t// Let the server get ahead of the client.\n\t\ttime.Sleep(1 * time.Millisecond)\n\t}\n}\n\n// golang.org/issue/14627 -- if the server sends a GOAWAY frame, make\n// the Transport remember it and return it back to users (via\n// RoundTrip or request body reads) if needed (e.g. if the server\n// proceeds to close the TCP connection before the client gets its\n// response)\nfunc TestTransportUsesGoAwayDebugError_RoundTrip(t *testing.T) {\n\ttestTransportUsesGoAwayDebugError(t, false)\n}\n\nfunc TestTransportUsesGoAwayDebugError_Body(t *testing.T) {\n\ttestTransportUsesGoAwayDebugError(t, true)\n}\n\nfunc testTransportUsesGoAwayDebugError(t *testing.T, failMidBody bool) {\n\tct := newClientTester(t)\n\tclientDone := make(chan struct{})\n\n\tconst goAwayErrCode = ErrCodeHTTP11Required // arbitrary\n\tconst goAwayDebugData = \"some debug data\"\n\n\tct.client = func() error {\n\t\tdefer close(clientDone)\n\t\treq, _ := http.NewRequest(\"GET\", \"https://dummy.tld/\", nil)\n\t\tres, err := ct.tr.RoundTrip(req)\n\t\tif failMidBody {\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unexpected client RoundTrip error: %v\", err)\n\t\t\t}\n\t\t\t_, err = io.Copy(ioutil.Discard, res.Body)\n\t\t\tres.Body.Close()\n\t\t}\n\t\twant := GoAwayError{\n\t\t\tLastStreamID: 5,\n\t\t\tErrCode:      goAwayErrCode,\n\t\t\tDebugData:    goAwayDebugData,\n\t\t}\n\t\tif !reflect.DeepEqual(err, want) {\n\t\t\tt.Errorf(\"RoundTrip error = %T: %#v, want %T (%#v)\", err, err, want, want)\n\t\t}\n\t\treturn nil\n\t}\n\tct.server = func() error {\n\t\tct.greet()\n\t\tfor {\n\t\t\tf, err := ct.fr.ReadFrame()\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"ReadFrame: %v\", err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\thf, ok := f.(*HeadersFrame)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif failMidBody {\n\t\t\t\tvar buf bytes.Buffer\n\t\t\t\tenc := hpack.NewEncoder(&buf)\n\t\t\t\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"200\"})\n\t\t\t\tenc.WriteField(hpack.HeaderField{Name: \"content-length\", Value: \"123\"})\n\t\t\t\tct.fr.WriteHeaders(HeadersFrameParam{\n\t\t\t\t\tStreamID:      hf.StreamID,\n\t\t\t\t\tEndHeaders:    true,\n\t\t\t\t\tEndStream:     false,\n\t\t\t\t\tBlockFragment: buf.Bytes(),\n\t\t\t\t})\n\t\t\t}\n\t\t\t// Write two GOAWAY frames, to test that the Transport takes\n\t\t\t// the interesting parts of both.\n\t\t\tct.fr.WriteGoAway(5, ErrCodeNo, []byte(goAwayDebugData))\n\t\t\tct.fr.WriteGoAway(5, goAwayErrCode, nil)\n\t\t\tct.sc.(*net.TCPConn).CloseWrite()\n\t\t\t<-clientDone\n\t\t\treturn nil\n\t\t}\n\t}\n\tct.run()\n}\n\nfunc testTransportReturnsUnusedFlowControl(t *testing.T, oneDataFrame bool) {\n\tct := newClientTester(t)\n\n\tclientClosed := make(chan struct{})\n\tserverWroteFirstByte := make(chan struct{})\n\n\tct.client = func() error {\n\t\treq, _ := http.NewRequest(\"GET\", \"https://dummy.tld/\", nil)\n\t\tres, err := ct.tr.RoundTrip(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t<-serverWroteFirstByte\n\n\t\tif n, err := res.Body.Read(make([]byte, 1)); err != nil || n != 1 {\n\t\t\treturn fmt.Errorf(\"body read = %v, %v; want 1, nil\", n, err)\n\t\t}\n\t\tres.Body.Close() // leaving 4999 bytes unread\n\t\tclose(clientClosed)\n\n\t\treturn nil\n\t}\n\tct.server = func() error {\n\t\tct.greet()\n\n\t\tvar hf *HeadersFrame\n\t\tfor {\n\t\t\tf, err := ct.fr.ReadFrame()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"ReadFrame while waiting for Headers: %v\", err)\n\t\t\t}\n\t\t\tswitch f.(type) {\n\t\t\tcase *WindowUpdateFrame, *SettingsFrame:\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar ok bool\n\t\t\thf, ok = f.(*HeadersFrame)\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"Got %T; want HeadersFrame\", f)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\tvar buf bytes.Buffer\n\t\tenc := hpack.NewEncoder(&buf)\n\t\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"200\"})\n\t\tenc.WriteField(hpack.HeaderField{Name: \"content-length\", Value: \"5000\"})\n\t\tct.fr.WriteHeaders(HeadersFrameParam{\n\t\t\tStreamID:      hf.StreamID,\n\t\t\tEndHeaders:    true,\n\t\t\tEndStream:     false,\n\t\t\tBlockFragment: buf.Bytes(),\n\t\t})\n\n\t\t// Two cases:\n\t\t// - Send one DATA frame with 5000 bytes.\n\t\t// - Send two DATA frames with 1 and 4999 bytes each.\n\t\t//\n\t\t// In both cases, the client should consume one byte of data,\n\t\t// refund that byte, then refund the following 4999 bytes.\n\t\t//\n\t\t// In the second case, the server waits for the client connection to\n\t\t// close before seconding the second DATA frame. This tests the case\n\t\t// where the client receives a DATA frame after it has reset the stream.\n\t\tif oneDataFrame {\n\t\t\tct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 5000))\n\t\t\tclose(serverWroteFirstByte)\n\t\t\t<-clientClosed\n\t\t} else {\n\t\t\tct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 1))\n\t\t\tclose(serverWroteFirstByte)\n\t\t\t<-clientClosed\n\t\t\tct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 4999))\n\t\t}\n\n\t\twaitingFor := \"RSTStreamFrame\"\n\t\tfor {\n\t\t\tf, err := ct.fr.ReadFrame()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"ReadFrame while waiting for %s: %v\", waitingFor, err)\n\t\t\t}\n\t\t\tif _, ok := f.(*SettingsFrame); ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch waitingFor {\n\t\t\tcase \"RSTStreamFrame\":\n\t\t\t\tif rf, ok := f.(*RSTStreamFrame); !ok || rf.ErrCode != ErrCodeCancel {\n\t\t\t\t\treturn fmt.Errorf(\"Expected a RSTStreamFrame with code cancel; got %v\", summarizeFrame(f))\n\t\t\t\t}\n\t\t\t\twaitingFor = \"WindowUpdateFrame\"\n\t\t\tcase \"WindowUpdateFrame\":\n\t\t\t\tif wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != 4999 {\n\t\t\t\t\treturn fmt.Errorf(\"Expected WindowUpdateFrame for 4999 bytes; got %v\", summarizeFrame(f))\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\tct.run()\n}\n\n// See golang.org/issue/16481\nfunc TestTransportReturnsUnusedFlowControlSingleWrite(t *testing.T) {\n\ttestTransportReturnsUnusedFlowControl(t, true)\n}\n\n// See golang.org/issue/20469\nfunc TestTransportReturnsUnusedFlowControlMultipleWrites(t *testing.T) {\n\ttestTransportReturnsUnusedFlowControl(t, false)\n}\n\n// Issue 16612: adjust flow control on open streams when transport\n// receives SETTINGS with INITIAL_WINDOW_SIZE from server.\nfunc TestTransportAdjustsFlowControl(t *testing.T) {\n\tct := newClientTester(t)\n\tclientDone := make(chan struct{})\n\n\tconst bodySize = 1 << 20\n\n\tct.client = func() error {\n\t\tdefer ct.cc.(*net.TCPConn).CloseWrite()\n\t\tdefer close(clientDone)\n\n\t\treq, _ := http.NewRequest(\"POST\", \"https://dummy.tld/\", struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)})\n\t\tres, err := ct.tr.RoundTrip(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres.Body.Close()\n\t\treturn nil\n\t}\n\tct.server = func() error {\n\t\t_, err := io.ReadFull(ct.sc, make([]byte, len(ClientPreface)))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"reading client preface: %v\", err)\n\t\t}\n\n\t\tvar gotBytes int64\n\t\tvar sentSettings bool\n\t\tfor {\n\t\t\tf, err := ct.fr.ReadFrame()\n\t\t\tif err != nil {\n\t\t\t\tselect {\n\t\t\t\tcase <-clientDone:\n\t\t\t\t\treturn nil\n\t\t\t\tdefault:\n\t\t\t\t\treturn fmt.Errorf(\"ReadFrame while waiting for Headers: %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tswitch f := f.(type) {\n\t\t\tcase *DataFrame:\n\t\t\t\tgotBytes += int64(len(f.Data()))\n\t\t\t\t// After we've got half the client's\n\t\t\t\t// initial flow control window's worth\n\t\t\t\t// of request body data, give it just\n\t\t\t\t// enough flow control to finish.\n\t\t\t\tif gotBytes >= initialWindowSize/2 && !sentSettings {\n\t\t\t\t\tsentSettings = true\n\n\t\t\t\t\tct.fr.WriteSettings(Setting{ID: SettingInitialWindowSize, Val: bodySize})\n\t\t\t\t\tct.fr.WriteWindowUpdate(0, bodySize)\n\t\t\t\t\tct.fr.WriteSettingsAck()\n\t\t\t\t}\n\n\t\t\t\tif f.StreamEnded() {\n\t\t\t\t\tvar buf bytes.Buffer\n\t\t\t\t\tenc := hpack.NewEncoder(&buf)\n\t\t\t\t\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"200\"})\n\t\t\t\t\tct.fr.WriteHeaders(HeadersFrameParam{\n\t\t\t\t\t\tStreamID:      f.StreamID,\n\t\t\t\t\t\tEndHeaders:    true,\n\t\t\t\t\t\tEndStream:     true,\n\t\t\t\t\t\tBlockFragment: buf.Bytes(),\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tct.run()\n}\n\n// See golang.org/issue/16556\nfunc TestTransportReturnsDataPaddingFlowControl(t *testing.T) {\n\tct := newClientTester(t)\n\n\tunblockClient := make(chan bool, 1)\n\n\tct.client = func() error {\n\t\treq, _ := http.NewRequest(\"GET\", \"https://dummy.tld/\", nil)\n\t\tres, err := ct.tr.RoundTrip(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer res.Body.Close()\n\t\t<-unblockClient\n\t\treturn nil\n\t}\n\tct.server = func() error {\n\t\tct.greet()\n\n\t\tvar hf *HeadersFrame\n\t\tfor {\n\t\t\tf, err := ct.fr.ReadFrame()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"ReadFrame while waiting for Headers: %v\", err)\n\t\t\t}\n\t\t\tswitch f.(type) {\n\t\t\tcase *WindowUpdateFrame, *SettingsFrame:\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar ok bool\n\t\t\thf, ok = f.(*HeadersFrame)\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"Got %T; want HeadersFrame\", f)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\tvar buf bytes.Buffer\n\t\tenc := hpack.NewEncoder(&buf)\n\t\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"200\"})\n\t\tenc.WriteField(hpack.HeaderField{Name: \"content-length\", Value: \"5000\"})\n\t\tct.fr.WriteHeaders(HeadersFrameParam{\n\t\t\tStreamID:      hf.StreamID,\n\t\t\tEndHeaders:    true,\n\t\t\tEndStream:     false,\n\t\t\tBlockFragment: buf.Bytes(),\n\t\t})\n\t\tpad := make([]byte, 5)\n\t\tct.fr.WriteDataPadded(hf.StreamID, false, make([]byte, 5000), pad) // without ending stream\n\n\t\tf, err := ct.readNonSettingsFrame()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"ReadFrame while waiting for first WindowUpdateFrame: %v\", err)\n\t\t}\n\t\twantBack := uint32(len(pad)) + 1 // one byte for the length of the padding\n\t\tif wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != wantBack || wuf.StreamID != 0 {\n\t\t\treturn fmt.Errorf(\"Expected conn WindowUpdateFrame for %d bytes; got %v\", wantBack, summarizeFrame(f))\n\t\t}\n\n\t\tf, err = ct.readNonSettingsFrame()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"ReadFrame while waiting for second WindowUpdateFrame: %v\", err)\n\t\t}\n\t\tif wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != wantBack || wuf.StreamID == 0 {\n\t\t\treturn fmt.Errorf(\"Expected stream WindowUpdateFrame for %d bytes; got %v\", wantBack, summarizeFrame(f))\n\t\t}\n\t\tunblockClient <- true\n\t\treturn nil\n\t}\n\tct.run()\n}\n\n// golang.org/issue/16572 -- RoundTrip shouldn't hang when it gets a\n// StreamError as a result of the response HEADERS\nfunc TestTransportReturnsErrorOnBadResponseHeaders(t *testing.T) {\n\tct := newClientTester(t)\n\n\tct.client = func() error {\n\t\treq, _ := http.NewRequest(\"GET\", \"https://dummy.tld/\", nil)\n\t\tres, err := ct.tr.RoundTrip(req)\n\t\tif err == nil {\n\t\t\tres.Body.Close()\n\t\t\treturn errors.New(\"unexpected successful GET\")\n\t\t}\n\t\twant := StreamError{1, ErrCodeProtocol, headerFieldNameError(\"  content-type\")}\n\t\tif !reflect.DeepEqual(want, err) {\n\t\t\tt.Errorf(\"RoundTrip error = %#v; want %#v\", err, want)\n\t\t}\n\t\treturn nil\n\t}\n\tct.server = func() error {\n\t\tct.greet()\n\n\t\thf, err := ct.firstHeaders()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar buf bytes.Buffer\n\t\tenc := hpack.NewEncoder(&buf)\n\t\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"200\"})\n\t\tenc.WriteField(hpack.HeaderField{Name: \"  content-type\", Value: \"bogus\"}) // bogus spaces\n\t\tct.fr.WriteHeaders(HeadersFrameParam{\n\t\t\tStreamID:      hf.StreamID,\n\t\t\tEndHeaders:    true,\n\t\t\tEndStream:     false,\n\t\t\tBlockFragment: buf.Bytes(),\n\t\t})\n\n\t\tfor {\n\t\t\tfr, err := ct.readFrame()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error waiting for RST_STREAM from client: %v\", err)\n\t\t\t}\n\t\t\tif _, ok := fr.(*SettingsFrame); ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif rst, ok := fr.(*RSTStreamFrame); !ok || rst.StreamID != 1 || rst.ErrCode != ErrCodeProtocol {\n\t\t\t\tt.Errorf(\"Frame = %v; want RST_STREAM for stream 1 with ErrCodeProtocol\", summarizeFrame(fr))\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\treturn nil\n\t}\n\tct.run()\n}\n\n// byteAndEOFReader returns is in an io.Reader which reads one byte\n// (the underlying byte) and io.EOF at once in its Read call.\ntype byteAndEOFReader byte\n\nfunc (b byteAndEOFReader) Read(p []byte) (n int, err error) {\n\tif len(p) == 0 {\n\t\tpanic(\"unexpected useless call\")\n\t}\n\tp[0] = byte(b)\n\treturn 1, io.EOF\n}\n\n// Issue 16788: the Transport had a regression where it started\n// sending a spurious DATA frame with a duplicate END_STREAM bit after\n// the request body writer goroutine had already read an EOF from the\n// Request.Body and included the END_STREAM on a data-carrying DATA\n// frame.\n//\n// Notably, to trigger this, the requests need to use a Request.Body\n// which returns (non-0, io.EOF) and also needs to set the ContentLength\n// explicitly.\nfunc TestTransportBodyDoubleEndStream(t *testing.T) {\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\t// Nothing.\n\t}, optOnlyServer)\n\tdefer st.Close()\n\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\n\tfor i := 0; i < 2; i++ {\n\t\treq, _ := http.NewRequest(\"POST\", st.ts.URL, byteAndEOFReader('a'))\n\t\treq.ContentLength = 1\n\t\tres, err := tr.RoundTrip(req)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failure on req %d: %v\", i+1, err)\n\t\t}\n\t\tdefer res.Body.Close()\n\t}\n}\n\n// golang.org/issue/16847, golang.org/issue/19103\nfunc TestTransportRequestPathPseudo(t *testing.T) {\n\ttype result struct {\n\t\tpath string\n\t\terr  string\n\t}\n\ttests := []struct {\n\t\treq  *http.Request\n\t\twant result\n\t}{\n\t\t0: {\n\t\t\treq: &http.Request{\n\t\t\t\tMethod: \"GET\",\n\t\t\t\tURL: &url.URL{\n\t\t\t\t\tHost: \"foo.com\",\n\t\t\t\t\tPath: \"/foo\",\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: result{path: \"/foo\"},\n\t\t},\n\t\t// In Go 1.7, we accepted paths of \"//foo\".\n\t\t// In Go 1.8, we rejected it (issue 16847).\n\t\t// In Go 1.9, we accepted it again (issue 19103).\n\t\t1: {\n\t\t\treq: &http.Request{\n\t\t\t\tMethod: \"GET\",\n\t\t\t\tURL: &url.URL{\n\t\t\t\t\tHost: \"foo.com\",\n\t\t\t\t\tPath: \"//foo\",\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: result{path: \"//foo\"},\n\t\t},\n\n\t\t// Opaque with //$Matching_Hostname/path\n\t\t2: {\n\t\t\treq: &http.Request{\n\t\t\t\tMethod: \"GET\",\n\t\t\t\tURL: &url.URL{\n\t\t\t\t\tScheme: \"https\",\n\t\t\t\t\tOpaque: \"//foo.com/path\",\n\t\t\t\t\tHost:   \"foo.com\",\n\t\t\t\t\tPath:   \"/ignored\",\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: result{path: \"/path\"},\n\t\t},\n\n\t\t// Opaque with some other Request.Host instead:\n\t\t3: {\n\t\t\treq: &http.Request{\n\t\t\t\tMethod: \"GET\",\n\t\t\t\tHost:   \"bar.com\",\n\t\t\t\tURL: &url.URL{\n\t\t\t\t\tScheme: \"https\",\n\t\t\t\t\tOpaque: \"//bar.com/path\",\n\t\t\t\t\tHost:   \"foo.com\",\n\t\t\t\t\tPath:   \"/ignored\",\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: result{path: \"/path\"},\n\t\t},\n\n\t\t// Opaque without the leading \"//\":\n\t\t4: {\n\t\t\treq: &http.Request{\n\t\t\t\tMethod: \"GET\",\n\t\t\t\tURL: &url.URL{\n\t\t\t\t\tOpaque: \"/path\",\n\t\t\t\t\tHost:   \"foo.com\",\n\t\t\t\t\tPath:   \"/ignored\",\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: result{path: \"/path\"},\n\t\t},\n\n\t\t// Opaque we can't handle:\n\t\t5: {\n\t\t\treq: &http.Request{\n\t\t\t\tMethod: \"GET\",\n\t\t\t\tURL: &url.URL{\n\t\t\t\t\tScheme: \"https\",\n\t\t\t\t\tOpaque: \"//unknown_host/path\",\n\t\t\t\t\tHost:   \"foo.com\",\n\t\t\t\t\tPath:   \"/ignored\",\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: result{err: `invalid request :path \"https://unknown_host/path\" from URL.Opaque = \"//unknown_host/path\"`},\n\t\t},\n\n\t\t// A CONNECT request:\n\t\t6: {\n\t\t\treq: &http.Request{\n\t\t\t\tMethod: \"CONNECT\",\n\t\t\t\tURL: &url.URL{\n\t\t\t\t\tHost: \"foo.com\",\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: result{},\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tcc := &ClientConn{peerMaxHeaderListSize: 0xffffffffffffffff}\n\t\tcc.henc = hpack.NewEncoder(&cc.hbuf)\n\t\tcc.mu.Lock()\n\t\thdrs, err := cc.encodeHeaders(tt.req, false, \"\", -1)\n\t\tcc.mu.Unlock()\n\t\tvar got result\n\t\thpackDec := hpack.NewDecoder(initialHeaderTableSize, func(f hpack.HeaderField) {\n\t\t\tif f.Name == \":path\" {\n\t\t\t\tgot.path = f.Value\n\t\t\t}\n\t\t})\n\t\tif err != nil {\n\t\t\tgot.err = err.Error()\n\t\t} else if len(hdrs) > 0 {\n\t\t\tif _, err := hpackDec.Write(hdrs); err != nil {\n\t\t\t\tt.Errorf(\"%d. bogus hpack: %v\", i, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif got != tt.want {\n\t\t\tt.Errorf(\"%d. got %+v; want %+v\", i, got, tt.want)\n\t\t}\n\n\t}\n\n}\n\n// golang.org/issue/17071 -- don't sniff the first byte of the request body\n// before we've determined that the ClientConn is usable.\nfunc TestRoundTripDoesntConsumeRequestBodyEarly(t *testing.T) {\n\tconst body = \"foo\"\n\treq, _ := http.NewRequest(\"POST\", \"http://foo.com/\", ioutil.NopCloser(strings.NewReader(body)))\n\tcc := &ClientConn{\n\t\tclosed: true,\n\t}\n\t_, err := cc.RoundTrip(req)\n\tif err != errClientConnUnusable {\n\t\tt.Fatalf(\"RoundTrip = %v; want errClientConnUnusable\", err)\n\t}\n\tslurp, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tt.Errorf(\"ReadAll = %v\", err)\n\t}\n\tif string(slurp) != body {\n\t\tt.Errorf(\"Body = %q; want %q\", slurp, body)\n\t}\n}\n\nfunc TestClientConnPing(t *testing.T) {\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}, optOnlyServer)\n\tdefer st.Close()\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\tcc, err := tr.dialClientConn(st.ts.Listener.Addr().String(), false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err = cc.Ping(testContext{}); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\n// Issue 16974: if the server sent a DATA frame after the user\n// canceled the Transport's Request, the Transport previously wrote to a\n// closed pipe, got an error, and ended up closing the whole TCP\n// connection.\nfunc TestTransportCancelDataResponseRace(t *testing.T) {\n\tcancel := make(chan struct{})\n\tclientGotError := make(chan bool, 1)\n\n\tconst msg = \"Hello.\"\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tif strings.Contains(r.URL.Path, \"/hello\") {\n\t\t\ttime.Sleep(50 * time.Millisecond)\n\t\t\tio.WriteString(w, msg)\n\t\t\treturn\n\t\t}\n\t\tfor i := 0; i < 50; i++ {\n\t\t\tio.WriteString(w, \"Some data.\")\n\t\t\tw.(http.Flusher).Flush()\n\t\t\tif i == 2 {\n\t\t\t\tclose(cancel)\n\t\t\t\t<-clientGotError\n\t\t\t}\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t}\n\t}, optOnlyServer)\n\tdefer st.Close()\n\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\n\tc := &http.Client{Transport: tr}\n\treq, _ := http.NewRequest(\"GET\", st.ts.URL, nil)\n\treq.Cancel = cancel\n\tres, err := c.Do(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err = io.Copy(ioutil.Discard, res.Body); err == nil {\n\t\tt.Fatal(\"unexpected success\")\n\t}\n\tclientGotError <- true\n\n\tres, err = c.Get(st.ts.URL + \"/hello\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tslurp, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif string(slurp) != msg {\n\t\tt.Errorf(\"Got = %q; want %q\", slurp, msg)\n\t}\n}\n\nfunc TestTransportRetryAfterGOAWAY(t *testing.T) {\n\tvar dialer struct {\n\t\tsync.Mutex\n\t\tcount int\n\t}\n\tct1 := make(chan *clientTester)\n\tct2 := make(chan *clientTester)\n\n\tln := newLocalListener(t)\n\tdefer ln.Close()\n\n\ttr := &Transport{\n\t\tTLSClientConfig: tlsConfigInsecure,\n\t}\n\ttr.DialTLS = func(network, addr string, cfg *tls.Config) (net.Conn, error) {\n\t\tdialer.Lock()\n\t\tdefer dialer.Unlock()\n\t\tdialer.count++\n\t\tif dialer.count == 3 {\n\t\t\treturn nil, errors.New(\"unexpected number of dials\")\n\t\t}\n\t\tcc, err := net.Dial(\"tcp\", ln.Addr().String())\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"dial error: %v\", err)\n\t\t}\n\t\tsc, err := ln.Accept()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"accept error: %v\", err)\n\t\t}\n\t\tct := &clientTester{\n\t\t\tt:  t,\n\t\t\ttr: tr,\n\t\t\tcc: cc,\n\t\t\tsc: sc,\n\t\t\tfr: NewFramer(sc, sc),\n\t\t}\n\t\tswitch dialer.count {\n\t\tcase 1:\n\t\t\tct1 <- ct\n\t\tcase 2:\n\t\t\tct2 <- ct\n\t\t}\n\t\treturn cc, nil\n\t}\n\n\terrs := make(chan error, 3)\n\tdone := make(chan struct{})\n\tdefer close(done)\n\n\t// Client.\n\tgo func() {\n\t\treq, _ := http.NewRequest(\"GET\", \"https://dummy.tld/\", nil)\n\t\tres, err := tr.RoundTrip(req)\n\t\tif res != nil {\n\t\t\tres.Body.Close()\n\t\t\tif got := res.Header.Get(\"Foo\"); got != \"bar\" {\n\t\t\t\terr = fmt.Errorf(\"foo header = %q; want bar\", got)\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"RoundTrip: %v\", err)\n\t\t}\n\t\terrs <- err\n\t}()\n\n\tconnToClose := make(chan io.Closer, 2)\n\n\t// Server for the first request.\n\tgo func() {\n\t\tvar ct *clientTester\n\t\tselect {\n\t\tcase ct = <-ct1:\n\t\tcase <-done:\n\t\t\treturn\n\t\t}\n\n\t\tconnToClose <- ct.cc\n\t\tct.greet()\n\t\thf, err := ct.firstHeaders()\n\t\tif err != nil {\n\t\t\terrs <- fmt.Errorf(\"server1 failed reading HEADERS: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tt.Logf(\"server1 got %v\", hf)\n\t\tif err := ct.fr.WriteGoAway(0 /*max id*/, ErrCodeNo, nil); err != nil {\n\t\t\terrs <- fmt.Errorf(\"server1 failed writing GOAWAY: %v\", err)\n\t\t\treturn\n\t\t}\n\t\terrs <- nil\n\t}()\n\n\t// Server for the second request.\n\tgo func() {\n\t\tvar ct *clientTester\n\t\tselect {\n\t\tcase ct = <-ct2:\n\t\tcase <-done:\n\t\t\treturn\n\t\t}\n\n\t\tconnToClose <- ct.cc\n\t\tct.greet()\n\t\thf, err := ct.firstHeaders()\n\t\tif err != nil {\n\t\t\terrs <- fmt.Errorf(\"server2 failed reading HEADERS: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tt.Logf(\"server2 got %v\", hf)\n\n\t\tvar buf bytes.Buffer\n\t\tenc := hpack.NewEncoder(&buf)\n\t\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"200\"})\n\t\tenc.WriteField(hpack.HeaderField{Name: \"foo\", Value: \"bar\"})\n\t\terr = ct.fr.WriteHeaders(HeadersFrameParam{\n\t\t\tStreamID:      hf.StreamID,\n\t\t\tEndHeaders:    true,\n\t\t\tEndStream:     false,\n\t\t\tBlockFragment: buf.Bytes(),\n\t\t})\n\t\tif err != nil {\n\t\t\terrs <- fmt.Errorf(\"server2 failed writing response HEADERS: %v\", err)\n\t\t} else {\n\t\t\terrs <- nil\n\t\t}\n\t}()\n\n\tfor k := 0; k < 3; k++ {\n\t\tselect {\n\t\tcase err := <-errs:\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\tcase <-time.After(1 * time.Second):\n\t\t\tt.Errorf(\"timed out\")\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase c := <-connToClose:\n\t\t\tc.Close()\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc TestTransportRetryAfterRefusedStream(t *testing.T) {\n\tclientDone := make(chan struct{})\n\tct := newClientTester(t)\n\tct.client = func() error {\n\t\tdefer ct.cc.(*net.TCPConn).CloseWrite()\n\t\tdefer close(clientDone)\n\t\treq, _ := http.NewRequest(\"GET\", \"https://dummy.tld/\", nil)\n\t\tresp, err := ct.tr.RoundTrip(req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"RoundTrip: %v\", err)\n\t\t}\n\t\tresp.Body.Close()\n\t\tif resp.StatusCode != 204 {\n\t\t\treturn fmt.Errorf(\"Status = %v; want 204\", resp.StatusCode)\n\t\t}\n\t\treturn nil\n\t}\n\tct.server = func() error {\n\t\tct.greet()\n\t\tvar buf bytes.Buffer\n\t\tenc := hpack.NewEncoder(&buf)\n\t\tnreq := 0\n\n\t\tfor {\n\t\t\tf, err := ct.fr.ReadFrame()\n\t\t\tif err != nil {\n\t\t\t\tselect {\n\t\t\t\tcase <-clientDone:\n\t\t\t\t\t// If the client's done, it\n\t\t\t\t\t// will have reported any\n\t\t\t\t\t// errors on its side.\n\t\t\t\t\treturn nil\n\t\t\t\tdefault:\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tswitch f := f.(type) {\n\t\t\tcase *WindowUpdateFrame, *SettingsFrame:\n\t\t\tcase *HeadersFrame:\n\t\t\t\tif !f.HeadersEnded() {\n\t\t\t\t\treturn fmt.Errorf(\"headers should have END_HEADERS be ended: %v\", f)\n\t\t\t\t}\n\t\t\t\tnreq++\n\t\t\t\tif nreq == 1 {\n\t\t\t\t\tct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream)\n\t\t\t\t} else {\n\t\t\t\t\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"204\"})\n\t\t\t\t\tct.fr.WriteHeaders(HeadersFrameParam{\n\t\t\t\t\t\tStreamID:      f.StreamID,\n\t\t\t\t\t\tEndHeaders:    true,\n\t\t\t\t\t\tEndStream:     true,\n\t\t\t\t\t\tBlockFragment: buf.Bytes(),\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"Unexpected client frame %v\", f)\n\t\t\t}\n\t\t}\n\t}\n\tct.run()\n}\n\nfunc TestTransportRetryHasLimit(t *testing.T) {\n\t// Skip in short mode because the total expected delay is 1s+2s+4s+8s+16s=29s.\n\tif testing.Short() {\n\t\tt.Skip(\"skipping long test in short mode\")\n\t}\n\tclientDone := make(chan struct{})\n\tct := newClientTester(t)\n\tct.client = func() error {\n\t\tdefer ct.cc.(*net.TCPConn).CloseWrite()\n\t\tdefer close(clientDone)\n\t\treq, _ := http.NewRequest(\"GET\", \"https://dummy.tld/\", nil)\n\t\tresp, err := ct.tr.RoundTrip(req)\n\t\tif err == nil {\n\t\t\treturn fmt.Errorf(\"RoundTrip expected error, got response: %+v\", resp)\n\t\t}\n\t\tt.Logf(\"expected error, got: %v\", err)\n\t\treturn nil\n\t}\n\tct.server = func() error {\n\t\tct.greet()\n\t\tfor {\n\t\t\tf, err := ct.fr.ReadFrame()\n\t\t\tif err != nil {\n\t\t\t\tselect {\n\t\t\t\tcase <-clientDone:\n\t\t\t\t\t// If the client's done, it\n\t\t\t\t\t// will have reported any\n\t\t\t\t\t// errors on its side.\n\t\t\t\t\treturn nil\n\t\t\t\tdefault:\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tswitch f := f.(type) {\n\t\t\tcase *WindowUpdateFrame, *SettingsFrame:\n\t\t\tcase *HeadersFrame:\n\t\t\t\tif !f.HeadersEnded() {\n\t\t\t\t\treturn fmt.Errorf(\"headers should have END_HEADERS be ended: %v\", f)\n\t\t\t\t}\n\t\t\t\tct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream)\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"Unexpected client frame %v\", f)\n\t\t\t}\n\t\t}\n\t}\n\tct.run()\n}\n\nfunc TestTransportResponseDataBeforeHeaders(t *testing.T) {\n\tct := newClientTester(t)\n\tct.client = func() error {\n\t\tdefer ct.cc.(*net.TCPConn).CloseWrite()\n\t\treq := httptest.NewRequest(\"GET\", \"https://dummy.tld/\", nil)\n\t\t// First request is normal to ensure the check is per stream and not per connection.\n\t\t_, err := ct.tr.RoundTrip(req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"RoundTrip expected no error, got: %v\", err)\n\t\t}\n\t\t// Second request returns a DATA frame with no HEADERS.\n\t\tresp, err := ct.tr.RoundTrip(req)\n\t\tif err == nil {\n\t\t\treturn fmt.Errorf(\"RoundTrip expected error, got response: %+v\", resp)\n\t\t}\n\t\tif err, ok := err.(StreamError); !ok || err.Code != ErrCodeProtocol {\n\t\t\treturn fmt.Errorf(\"expected stream PROTOCOL_ERROR, got: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\tct.server = func() error {\n\t\tct.greet()\n\t\tfor {\n\t\t\tf, err := ct.fr.ReadFrame()\n\t\t\tif err == io.EOF {\n\t\t\t\treturn nil\n\t\t\t} else if err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tswitch f := f.(type) {\n\t\t\tcase *WindowUpdateFrame, *SettingsFrame:\n\t\t\tcase *HeadersFrame:\n\t\t\t\tswitch f.StreamID {\n\t\t\t\tcase 1:\n\t\t\t\t\t// Send a valid response to first request.\n\t\t\t\t\tvar buf bytes.Buffer\n\t\t\t\t\tenc := hpack.NewEncoder(&buf)\n\t\t\t\t\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"200\"})\n\t\t\t\t\tct.fr.WriteHeaders(HeadersFrameParam{\n\t\t\t\t\t\tStreamID:      f.StreamID,\n\t\t\t\t\t\tEndHeaders:    true,\n\t\t\t\t\t\tEndStream:     true,\n\t\t\t\t\t\tBlockFragment: buf.Bytes(),\n\t\t\t\t\t})\n\t\t\t\tcase 3:\n\t\t\t\t\tct.fr.WriteData(f.StreamID, true, []byte(\"payload\"))\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"Unexpected client frame %v\", f)\n\t\t\t}\n\t\t}\n\t}\n\tct.run()\n}\nfunc TestTransportRequestsStallAtServerLimit(t *testing.T) {\n\tconst maxConcurrent = 2\n\n\tgreet := make(chan struct{})      // server sends initial SETTINGS frame\n\tgotRequest := make(chan struct{}) // server received a request\n\tclientDone := make(chan struct{})\n\n\t// Collect errors from goroutines.\n\tvar wg sync.WaitGroup\n\terrs := make(chan error, 100)\n\tdefer func() {\n\t\twg.Wait()\n\t\tclose(errs)\n\t\tfor err := range errs {\n\t\t\tt.Error(err)\n\t\t}\n\t}()\n\n\t// We will send maxConcurrent+2 requests. This checker goroutine waits for the\n\t// following stages:\n\t//   1. The first maxConcurrent requests are received by the server.\n\t//   2. The client will cancel the next request\n\t//   3. The server is unblocked so it can service the first maxConcurrent requests\n\t//   4. The client will send the final request\n\twg.Add(1)\n\tunblockClient := make(chan struct{})\n\tclientRequestCancelled := make(chan struct{})\n\tunblockServer := make(chan struct{})\n\tgo func() {\n\t\tdefer wg.Done()\n\t\t// Stage 1.\n\t\tfor k := 0; k < maxConcurrent; k++ {\n\t\t\t<-gotRequest\n\t\t}\n\t\t// Stage 2.\n\t\tclose(unblockClient)\n\t\t<-clientRequestCancelled\n\t\t// Stage 3: give some time for the final RoundTrip call to be scheduled and\n\t\t// verify that the final request is not sent.\n\t\ttime.Sleep(50 * time.Millisecond)\n\t\tselect {\n\t\tcase <-gotRequest:\n\t\t\terrs <- errors.New(\"last request did not stall\")\n\t\t\tclose(unblockServer)\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t\tclose(unblockServer)\n\t\t// Stage 4.\n\t\t<-gotRequest\n\t}()\n\n\tct := newClientTester(t)\n\tct.client = func() error {\n\t\tvar wg sync.WaitGroup\n\t\tdefer func() {\n\t\t\twg.Wait()\n\t\t\tclose(clientDone)\n\t\t\tct.cc.(*net.TCPConn).CloseWrite()\n\t\t}()\n\t\tfor k := 0; k < maxConcurrent+2; k++ {\n\t\t\twg.Add(1)\n\t\t\tgo func(k int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t// Don't send the second request until after receiving SETTINGS from the server\n\t\t\t\t// to avoid a race where we use the default SettingMaxConcurrentStreams, which\n\t\t\t\t// is much larger than maxConcurrent. We have to send the first request before\n\t\t\t\t// waiting because the first request triggers the dial and greet.\n\t\t\t\tif k > 0 {\n\t\t\t\t\t<-greet\n\t\t\t\t}\n\t\t\t\t// Block until maxConcurrent requests are sent before sending any more.\n\t\t\t\tif k >= maxConcurrent {\n\t\t\t\t\t<-unblockClient\n\t\t\t\t}\n\t\t\t\treq, _ := http.NewRequest(\"GET\", fmt.Sprintf(\"https://dummy.tld/%d\", k), nil)\n\t\t\t\tif k == maxConcurrent {\n\t\t\t\t\t// This request will be canceled.\n\t\t\t\t\tcancel := make(chan struct{})\n\t\t\t\t\treq.Cancel = cancel\n\t\t\t\t\tclose(cancel)\n\t\t\t\t\t_, err := ct.tr.RoundTrip(req)\n\t\t\t\t\tclose(clientRequestCancelled)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\terrs <- fmt.Errorf(\"RoundTrip(%d) should have failed due to cancel\", k)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tresp, err := ct.tr.RoundTrip(req)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\terrs <- fmt.Errorf(\"RoundTrip(%d): %v\", k, err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tioutil.ReadAll(resp.Body)\n\t\t\t\t\tresp.Body.Close()\n\t\t\t\t\tif resp.StatusCode != 204 {\n\t\t\t\t\t\terrs <- fmt.Errorf(\"Status = %v; want 204\", resp.StatusCode)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}(k)\n\t\t}\n\t\treturn nil\n\t}\n\n\tct.server = func() error {\n\t\tvar wg sync.WaitGroup\n\t\tdefer wg.Wait()\n\n\t\tct.greet(Setting{SettingMaxConcurrentStreams, maxConcurrent})\n\n\t\t// Server write loop.\n\t\tvar buf bytes.Buffer\n\t\tenc := hpack.NewEncoder(&buf)\n\t\twriteResp := make(chan uint32, maxConcurrent+1)\n\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t<-unblockServer\n\t\t\tfor id := range writeResp {\n\t\t\t\tbuf.Reset()\n\t\t\t\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: \"204\"})\n\t\t\t\tct.fr.WriteHeaders(HeadersFrameParam{\n\t\t\t\t\tStreamID:      id,\n\t\t\t\t\tEndHeaders:    true,\n\t\t\t\t\tEndStream:     true,\n\t\t\t\t\tBlockFragment: buf.Bytes(),\n\t\t\t\t})\n\t\t\t}\n\t\t}()\n\n\t\t// Server read loop.\n\t\tvar nreq int\n\t\tfor {\n\t\t\tf, err := ct.fr.ReadFrame()\n\t\t\tif err != nil {\n\t\t\t\tselect {\n\t\t\t\tcase <-clientDone:\n\t\t\t\t\t// If the client's done, it will have reported any errors on its side.\n\t\t\t\t\treturn nil\n\t\t\t\tdefault:\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tswitch f := f.(type) {\n\t\t\tcase *WindowUpdateFrame:\n\t\t\tcase *SettingsFrame:\n\t\t\t\t// Wait for the client SETTINGS ack until ending the greet.\n\t\t\t\tclose(greet)\n\t\t\tcase *HeadersFrame:\n\t\t\t\tif !f.HeadersEnded() {\n\t\t\t\t\treturn fmt.Errorf(\"headers should have END_HEADERS be ended: %v\", f)\n\t\t\t\t}\n\t\t\t\tgotRequest <- struct{}{}\n\t\t\t\tnreq++\n\t\t\t\twriteResp <- f.StreamID\n\t\t\t\tif nreq == maxConcurrent+1 {\n\t\t\t\t\tclose(writeResp)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"Unexpected client frame %v\", f)\n\t\t\t}\n\t\t}\n\t}\n\n\tct.run()\n}\n\nfunc TestAuthorityAddr(t *testing.T) {\n\ttests := []struct {\n\t\tscheme, authority string\n\t\twant              string\n\t}{\n\t\t{\"http\", \"foo.com\", \"foo.com:80\"},\n\t\t{\"https\", \"foo.com\", \"foo.com:443\"},\n\t\t{\"https\", \"foo.com:1234\", \"foo.com:1234\"},\n\t\t{\"https\", \"1.2.3.4:1234\", \"1.2.3.4:1234\"},\n\t\t{\"https\", \"1.2.3.4\", \"1.2.3.4:443\"},\n\t\t{\"https\", \"[::1]:1234\", \"[::1]:1234\"},\n\t\t{\"https\", \"[::1]\", \"[::1]:443\"},\n\t}\n\tfor _, tt := range tests {\n\t\tgot := authorityAddr(tt.scheme, tt.authority)\n\t\tif got != tt.want {\n\t\t\tt.Errorf(\"authorityAddr(%q, %q) = %q; want %q\", tt.scheme, tt.authority, got, tt.want)\n\t\t}\n\t}\n}\n\n// Issue 20448: stop allocating for DATA frames' payload after\n// Response.Body.Close is called.\nfunc TestTransportAllocationsAfterResponseBodyClose(t *testing.T) {\n\tmegabyteZero := make([]byte, 1<<20)\n\n\twriteErr := make(chan error, 1)\n\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tw.(http.Flusher).Flush()\n\t\tvar sum int64\n\t\tfor i := 0; i < 100; i++ {\n\t\t\tn, err := w.Write(megabyteZero)\n\t\t\tsum += int64(n)\n\t\t\tif err != nil {\n\t\t\t\twriteErr <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tt.Logf(\"wrote all %d bytes\", sum)\n\t\twriteErr <- nil\n\t}, optOnlyServer)\n\tdefer st.Close()\n\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\tc := &http.Client{Transport: tr}\n\tres, err := c.Get(st.ts.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tvar buf [1]byte\n\tif _, err := res.Body.Read(buf[:]); err != nil {\n\t\tt.Error(err)\n\t}\n\tif err := res.Body.Close(); err != nil {\n\t\tt.Error(err)\n\t}\n\n\ttrb, ok := res.Body.(transportResponseBody)\n\tif !ok {\n\t\tt.Fatalf(\"res.Body = %T; want transportResponseBody\", res.Body)\n\t}\n\tif trb.cs.bufPipe.b != nil {\n\t\tt.Errorf(\"response body pipe is still open\")\n\t}\n\n\tgotErr := <-writeErr\n\tif gotErr == nil {\n\t\tt.Errorf(\"Handler unexpectedly managed to write its entire response without getting an error\")\n\t} else if gotErr != errStreamClosed {\n\t\tt.Errorf(\"Handler Write err = %v; want errStreamClosed\", gotErr)\n\t}\n}\n\n// Issue 18891: make sure Request.Body == NoBody means no DATA frame\n// is ever sent, even if empty.\nfunc TestTransportNoBodyMeansNoDATA(t *testing.T) {\n\tct := newClientTester(t)\n\n\tunblockClient := make(chan bool)\n\n\tct.client = func() error {\n\t\treq, _ := http.NewRequest(\"GET\", \"https://dummy.tld/\", go18httpNoBody())\n\t\tct.tr.RoundTrip(req)\n\t\t<-unblockClient\n\t\treturn nil\n\t}\n\tct.server = func() error {\n\t\tdefer close(unblockClient)\n\t\tdefer ct.cc.(*net.TCPConn).Close()\n\t\tct.greet()\n\n\t\tfor {\n\t\t\tf, err := ct.fr.ReadFrame()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"ReadFrame while waiting for Headers: %v\", err)\n\t\t\t}\n\t\t\tswitch f := f.(type) {\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"Got %T; want HeadersFrame\", f)\n\t\t\tcase *WindowUpdateFrame, *SettingsFrame:\n\t\t\t\tcontinue\n\t\t\tcase *HeadersFrame:\n\t\t\t\tif !f.StreamEnded() {\n\t\t\t\t\treturn fmt.Errorf(\"got headers frame without END_STREAM\")\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\tct.run()\n}\n\nfunc benchSimpleRoundTrip(b *testing.B, nHeaders int) {\n\tdefer disableGoroutineTracking()()\n\tb.ReportAllocs()\n\tst := newServerTester(b,\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t},\n\t\toptOnlyServer,\n\t\toptQuiet,\n\t)\n\tdefer st.Close()\n\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\n\treq, err := http.NewRequest(\"GET\", st.ts.URL, nil)\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\n\tfor i := 0; i < nHeaders; i++ {\n\t\tname := fmt.Sprint(\"A-\", i)\n\t\treq.Header.Set(name, \"*\")\n\t}\n\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tres, err := tr.RoundTrip(req)\n\t\tif err != nil {\n\t\t\tif res != nil {\n\t\t\t\tres.Body.Close()\n\t\t\t}\n\t\t\tb.Fatalf(\"RoundTrip err = %v; want nil\", err)\n\t\t}\n\t\tres.Body.Close()\n\t\tif res.StatusCode != http.StatusOK {\n\t\t\tb.Fatalf(\"Response code = %v; want %v\", res.StatusCode, http.StatusOK)\n\t\t}\n\t}\n}\n\ntype infiniteReader struct{}\n\nfunc (r infiniteReader) Read(b []byte) (int, error) {\n\treturn len(b), nil\n}\n\n// Issue 20521: it is not an error to receive a response and end stream\n// from the server without the body being consumed.\nfunc TestTransportResponseAndResetWithoutConsumingBodyRace(t *testing.T) {\n\tst := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(http.StatusOK)\n\t}, optOnlyServer)\n\tdefer st.Close()\n\n\ttr := &Transport{TLSClientConfig: tlsConfigInsecure}\n\tdefer tr.CloseIdleConnections()\n\n\t// The request body needs to be big enough to trigger flow control.\n\treq, _ := http.NewRequest(\"PUT\", st.ts.URL, infiniteReader{})\n\tres, err := tr.RoundTrip(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif res.StatusCode != http.StatusOK {\n\t\tt.Fatalf(\"Response code = %v; want %v\", res.StatusCode, http.StatusOK)\n\t}\n}\n\nfunc BenchmarkClientRequestHeaders(b *testing.B) {\n\tb.Run(\"   0 Headers\", func(b *testing.B) { benchSimpleRoundTrip(b, 0) })\n\tb.Run(\"  10 Headers\", func(b *testing.B) { benchSimpleRoundTrip(b, 10) })\n\tb.Run(\" 100 Headers\", func(b *testing.B) { benchSimpleRoundTrip(b, 100) })\n\tb.Run(\"1000 Headers\", func(b *testing.B) { benchSimpleRoundTrip(b, 1000) })\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/write.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"net/http\"\n\t\"net/url\"\n\n\t\"golang.org/x/net/http2/hpack\"\n\t\"golang.org/x/net/lex/httplex\"\n)\n\n// writeFramer is implemented by any type that is used to write frames.\ntype writeFramer interface {\n\twriteFrame(writeContext) error\n\n\t// staysWithinBuffer reports whether this writer promises that\n\t// it will only write less than or equal to size bytes, and it\n\t// won't Flush the write context.\n\tstaysWithinBuffer(size int) bool\n}\n\n// writeContext is the interface needed by the various frame writer\n// types below. All the writeFrame methods below are scheduled via the\n// frame writing scheduler (see writeScheduler in writesched.go).\n//\n// This interface is implemented by *serverConn.\n//\n// TODO: decide whether to a) use this in the client code (which didn't\n// end up using this yet, because it has a simpler design, not\n// currently implementing priorities), or b) delete this and\n// make the server code a bit more concrete.\ntype writeContext interface {\n\tFramer() *Framer\n\tFlush() error\n\tCloseConn() error\n\t// HeaderEncoder returns an HPACK encoder that writes to the\n\t// returned buffer.\n\tHeaderEncoder() (*hpack.Encoder, *bytes.Buffer)\n}\n\n// writeEndsStream reports whether w writes a frame that will transition\n// the stream to a half-closed local state. This returns false for RST_STREAM,\n// which closes the entire stream (not just the local half).\nfunc writeEndsStream(w writeFramer) bool {\n\tswitch v := w.(type) {\n\tcase *writeData:\n\t\treturn v.endStream\n\tcase *writeResHeaders:\n\t\treturn v.endStream\n\tcase nil:\n\t\t// This can only happen if the caller reuses w after it's\n\t\t// been intentionally nil'ed out to prevent use. Keep this\n\t\t// here to catch future refactoring breaking it.\n\t\tpanic(\"writeEndsStream called on nil writeFramer\")\n\t}\n\treturn false\n}\n\ntype flushFrameWriter struct{}\n\nfunc (flushFrameWriter) writeFrame(ctx writeContext) error {\n\treturn ctx.Flush()\n}\n\nfunc (flushFrameWriter) staysWithinBuffer(max int) bool { return false }\n\ntype writeSettings []Setting\n\nfunc (s writeSettings) staysWithinBuffer(max int) bool {\n\tconst settingSize = 6 // uint16 + uint32\n\treturn frameHeaderLen+settingSize*len(s) <= max\n\n}\n\nfunc (s writeSettings) writeFrame(ctx writeContext) error {\n\treturn ctx.Framer().WriteSettings([]Setting(s)...)\n}\n\ntype writeGoAway struct {\n\tmaxStreamID uint32\n\tcode        ErrCode\n}\n\nfunc (p *writeGoAway) writeFrame(ctx writeContext) error {\n\terr := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)\n\tctx.Flush() // ignore error: we're hanging up on them anyway\n\treturn err\n}\n\nfunc (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes\n\ntype writeData struct {\n\tstreamID  uint32\n\tp         []byte\n\tendStream bool\n}\n\nfunc (w *writeData) String() string {\n\treturn fmt.Sprintf(\"writeData(stream=%d, p=%d, endStream=%v)\", w.streamID, len(w.p), w.endStream)\n}\n\nfunc (w *writeData) writeFrame(ctx writeContext) error {\n\treturn ctx.Framer().WriteData(w.streamID, w.endStream, w.p)\n}\n\nfunc (w *writeData) staysWithinBuffer(max int) bool {\n\treturn frameHeaderLen+len(w.p) <= max\n}\n\n// handlerPanicRST is the message sent from handler goroutines when\n// the handler panics.\ntype handlerPanicRST struct {\n\tStreamID uint32\n}\n\nfunc (hp handlerPanicRST) writeFrame(ctx writeContext) error {\n\treturn ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)\n}\n\nfunc (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }\n\nfunc (se StreamError) writeFrame(ctx writeContext) error {\n\treturn ctx.Framer().WriteRSTStream(se.StreamID, se.Code)\n}\n\nfunc (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }\n\ntype writePingAck struct{ pf *PingFrame }\n\nfunc (w writePingAck) writeFrame(ctx writeContext) error {\n\treturn ctx.Framer().WritePing(true, w.pf.Data)\n}\n\nfunc (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max }\n\ntype writeSettingsAck struct{}\n\nfunc (writeSettingsAck) writeFrame(ctx writeContext) error {\n\treturn ctx.Framer().WriteSettingsAck()\n}\n\nfunc (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max }\n\n// splitHeaderBlock splits headerBlock into fragments so that each fragment fits\n// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true\n// for the first/last fragment, respectively.\nfunc splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error {\n\t// For now we're lazy and just pick the minimum MAX_FRAME_SIZE\n\t// that all peers must support (16KB). Later we could care\n\t// more and send larger frames if the peer advertised it, but\n\t// there's little point. Most headers are small anyway (so we\n\t// generally won't have CONTINUATION frames), and extra frames\n\t// only waste 9 bytes anyway.\n\tconst maxFrameSize = 16384\n\n\tfirst := true\n\tfor len(headerBlock) > 0 {\n\t\tfrag := headerBlock\n\t\tif len(frag) > maxFrameSize {\n\t\t\tfrag = frag[:maxFrameSize]\n\t\t}\n\t\theaderBlock = headerBlock[len(frag):]\n\t\tif err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfirst = false\n\t}\n\treturn nil\n}\n\n// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames\n// for HTTP response headers or trailers from a server handler.\ntype writeResHeaders struct {\n\tstreamID    uint32\n\thttpResCode int         // 0 means no \":status\" line\n\th           http.Header // may be nil\n\ttrailers    []string    // if non-nil, which keys of h to write. nil means all.\n\tendStream   bool\n\n\tdate          string\n\tcontentType   string\n\tcontentLength string\n}\n\nfunc encKV(enc *hpack.Encoder, k, v string) {\n\tif VerboseLogs {\n\t\tlog.Printf(\"http2: server encoding header %q = %q\", k, v)\n\t}\n\tenc.WriteField(hpack.HeaderField{Name: k, Value: v})\n}\n\nfunc (w *writeResHeaders) staysWithinBuffer(max int) bool {\n\t// TODO: this is a common one. It'd be nice to return true\n\t// here and get into the fast path if we could be clever and\n\t// calculate the size fast enough, or at least a conservative\n\t// uppper bound that usually fires. (Maybe if w.h and\n\t// w.trailers are nil, so we don't need to enumerate it.)\n\t// Otherwise I'm afraid that just calculating the length to\n\t// answer this question would be slower than the ~2µs benefit.\n\treturn false\n}\n\nfunc (w *writeResHeaders) writeFrame(ctx writeContext) error {\n\tenc, buf := ctx.HeaderEncoder()\n\tbuf.Reset()\n\n\tif w.httpResCode != 0 {\n\t\tencKV(enc, \":status\", httpCodeString(w.httpResCode))\n\t}\n\n\tencodeHeaders(enc, w.h, w.trailers)\n\n\tif w.contentType != \"\" {\n\t\tencKV(enc, \"content-type\", w.contentType)\n\t}\n\tif w.contentLength != \"\" {\n\t\tencKV(enc, \"content-length\", w.contentLength)\n\t}\n\tif w.date != \"\" {\n\t\tencKV(enc, \"date\", w.date)\n\t}\n\n\theaderBlock := buf.Bytes()\n\tif len(headerBlock) == 0 && w.trailers == nil {\n\t\tpanic(\"unexpected empty hpack\")\n\t}\n\n\treturn splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)\n}\n\nfunc (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {\n\tif firstFrag {\n\t\treturn ctx.Framer().WriteHeaders(HeadersFrameParam{\n\t\t\tStreamID:      w.streamID,\n\t\t\tBlockFragment: frag,\n\t\t\tEndStream:     w.endStream,\n\t\t\tEndHeaders:    lastFrag,\n\t\t})\n\t} else {\n\t\treturn ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)\n\t}\n}\n\n// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.\ntype writePushPromise struct {\n\tstreamID uint32   // pusher stream\n\tmethod   string   // for :method\n\turl      *url.URL // for :scheme, :authority, :path\n\th        http.Header\n\n\t// Creates an ID for a pushed stream. This runs on serveG just before\n\t// the frame is written. The returned ID is copied to promisedID.\n\tallocatePromisedID func() (uint32, error)\n\tpromisedID         uint32\n}\n\nfunc (w *writePushPromise) staysWithinBuffer(max int) bool {\n\t// TODO: see writeResHeaders.staysWithinBuffer\n\treturn false\n}\n\nfunc (w *writePushPromise) writeFrame(ctx writeContext) error {\n\tenc, buf := ctx.HeaderEncoder()\n\tbuf.Reset()\n\n\tencKV(enc, \":method\", w.method)\n\tencKV(enc, \":scheme\", w.url.Scheme)\n\tencKV(enc, \":authority\", w.url.Host)\n\tencKV(enc, \":path\", w.url.RequestURI())\n\tencodeHeaders(enc, w.h, nil)\n\n\theaderBlock := buf.Bytes()\n\tif len(headerBlock) == 0 {\n\t\tpanic(\"unexpected empty hpack\")\n\t}\n\n\treturn splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)\n}\n\nfunc (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {\n\tif firstFrag {\n\t\treturn ctx.Framer().WritePushPromise(PushPromiseParam{\n\t\t\tStreamID:      w.streamID,\n\t\t\tPromiseID:     w.promisedID,\n\t\t\tBlockFragment: frag,\n\t\t\tEndHeaders:    lastFrag,\n\t\t})\n\t} else {\n\t\treturn ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)\n\t}\n}\n\ntype write100ContinueHeadersFrame struct {\n\tstreamID uint32\n}\n\nfunc (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {\n\tenc, buf := ctx.HeaderEncoder()\n\tbuf.Reset()\n\tencKV(enc, \":status\", \"100\")\n\treturn ctx.Framer().WriteHeaders(HeadersFrameParam{\n\t\tStreamID:      w.streamID,\n\t\tBlockFragment: buf.Bytes(),\n\t\tEndStream:     false,\n\t\tEndHeaders:    true,\n\t})\n}\n\nfunc (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {\n\t// Sloppy but conservative:\n\treturn 9+2*(len(\":status\")+len(\"100\")) <= max\n}\n\ntype writeWindowUpdate struct {\n\tstreamID uint32 // or 0 for conn-level\n\tn        uint32\n}\n\nfunc (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }\n\nfunc (wu writeWindowUpdate) writeFrame(ctx writeContext) error {\n\treturn ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)\n}\n\n// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k])\n// is encoded only only if k is in keys.\nfunc encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {\n\tif keys == nil {\n\t\tsorter := sorterPool.Get().(*sorter)\n\t\t// Using defer here, since the returned keys from the\n\t\t// sorter.Keys method is only valid until the sorter\n\t\t// is returned:\n\t\tdefer sorterPool.Put(sorter)\n\t\tkeys = sorter.Keys(h)\n\t}\n\tfor _, k := range keys {\n\t\tvv := h[k]\n\t\tk = lowerHeader(k)\n\t\tif !validWireHeaderFieldName(k) {\n\t\t\t// Skip it as backup paranoia. Per\n\t\t\t// golang.org/issue/14048, these should\n\t\t\t// already be rejected at a higher level.\n\t\t\tcontinue\n\t\t}\n\t\tisTE := k == \"transfer-encoding\"\n\t\tfor _, v := range vv {\n\t\t\tif !httplex.ValidHeaderFieldValue(v) {\n\t\t\t\t// TODO: return an error? golang.org/issue/14048\n\t\t\t\t// For now just omit it.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// TODO: more of \"8.1.2.2 Connection-Specific Header Fields\"\n\t\t\tif isTE && v != \"trailers\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tencKV(enc, k, v)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/writesched.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport \"fmt\"\n\n// WriteScheduler is the interface implemented by HTTP/2 write schedulers.\n// Methods are never called concurrently.\ntype WriteScheduler interface {\n\t// OpenStream opens a new stream in the write scheduler.\n\t// It is illegal to call this with streamID=0 or with a streamID that is\n\t// already open -- the call may panic.\n\tOpenStream(streamID uint32, options OpenStreamOptions)\n\n\t// CloseStream closes a stream in the write scheduler. Any frames queued on\n\t// this stream should be discarded. It is illegal to call this on a stream\n\t// that is not open -- the call may panic.\n\tCloseStream(streamID uint32)\n\n\t// AdjustStream adjusts the priority of the given stream. This may be called\n\t// on a stream that has not yet been opened or has been closed. Note that\n\t// RFC 7540 allows PRIORITY frames to be sent on streams in any state. See:\n\t// https://tools.ietf.org/html/rfc7540#section-5.1\n\tAdjustStream(streamID uint32, priority PriorityParam)\n\n\t// Push queues a frame in the scheduler. In most cases, this will not be\n\t// called with wr.StreamID()!=0 unless that stream is currently open. The one\n\t// exception is RST_STREAM frames, which may be sent on idle or closed streams.\n\tPush(wr FrameWriteRequest)\n\n\t// Pop dequeues the next frame to write. Returns false if no frames can\n\t// be written. Frames with a given wr.StreamID() are Pop'd in the same\n\t// order they are Push'd.\n\tPop() (wr FrameWriteRequest, ok bool)\n}\n\n// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream.\ntype OpenStreamOptions struct {\n\t// PusherID is zero if the stream was initiated by the client. Otherwise,\n\t// PusherID names the stream that pushed the newly opened stream.\n\tPusherID uint32\n}\n\n// FrameWriteRequest is a request to write a frame.\ntype FrameWriteRequest struct {\n\t// write is the interface value that does the writing, once the\n\t// WriteScheduler has selected this frame to write. The write\n\t// functions are all defined in write.go.\n\twrite writeFramer\n\n\t// stream is the stream on which this frame will be written.\n\t// nil for non-stream frames like PING and SETTINGS.\n\tstream *stream\n\n\t// done, if non-nil, must be a buffered channel with space for\n\t// 1 message and is sent the return value from write (or an\n\t// earlier error) when the frame has been written.\n\tdone chan error\n}\n\n// StreamID returns the id of the stream this frame will be written to.\n// 0 is used for non-stream frames such as PING and SETTINGS.\nfunc (wr FrameWriteRequest) StreamID() uint32 {\n\tif wr.stream == nil {\n\t\tif se, ok := wr.write.(StreamError); ok {\n\t\t\t// (*serverConn).resetStream doesn't set\n\t\t\t// stream because it doesn't necessarily have\n\t\t\t// one. So special case this type of write\n\t\t\t// message.\n\t\t\treturn se.StreamID\n\t\t}\n\t\treturn 0\n\t}\n\treturn wr.stream.id\n}\n\n// DataSize returns the number of flow control bytes that must be consumed\n// to write this entire frame. This is 0 for non-DATA frames.\nfunc (wr FrameWriteRequest) DataSize() int {\n\tif wd, ok := wr.write.(*writeData); ok {\n\t\treturn len(wd.p)\n\t}\n\treturn 0\n}\n\n// Consume consumes min(n, available) bytes from this frame, where available\n// is the number of flow control bytes available on the stream. Consume returns\n// 0, 1, or 2 frames, where the integer return value gives the number of frames\n// returned.\n//\n// If flow control prevents consuming any bytes, this returns (_, _, 0). If\n// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this\n// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and\n// 'rest' contains the remaining bytes. The consumed bytes are deducted from the\n// underlying stream's flow control budget.\nfunc (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) {\n\tvar empty FrameWriteRequest\n\n\t// Non-DATA frames are always consumed whole.\n\twd, ok := wr.write.(*writeData)\n\tif !ok || len(wd.p) == 0 {\n\t\treturn wr, empty, 1\n\t}\n\n\t// Might need to split after applying limits.\n\tallowed := wr.stream.flow.available()\n\tif n < allowed {\n\t\tallowed = n\n\t}\n\tif wr.stream.sc.maxFrameSize < allowed {\n\t\tallowed = wr.stream.sc.maxFrameSize\n\t}\n\tif allowed <= 0 {\n\t\treturn empty, empty, 0\n\t}\n\tif len(wd.p) > int(allowed) {\n\t\twr.stream.flow.take(allowed)\n\t\tconsumed := FrameWriteRequest{\n\t\t\tstream: wr.stream,\n\t\t\twrite: &writeData{\n\t\t\t\tstreamID: wd.streamID,\n\t\t\t\tp:        wd.p[:allowed],\n\t\t\t\t// Even if the original had endStream set, there\n\t\t\t\t// are bytes remaining because len(wd.p) > allowed,\n\t\t\t\t// so we know endStream is false.\n\t\t\t\tendStream: false,\n\t\t\t},\n\t\t\t// Our caller is blocking on the final DATA frame, not\n\t\t\t// this intermediate frame, so no need to wait.\n\t\t\tdone: nil,\n\t\t}\n\t\trest := FrameWriteRequest{\n\t\t\tstream: wr.stream,\n\t\t\twrite: &writeData{\n\t\t\t\tstreamID:  wd.streamID,\n\t\t\t\tp:         wd.p[allowed:],\n\t\t\t\tendStream: wd.endStream,\n\t\t\t},\n\t\t\tdone: wr.done,\n\t\t}\n\t\treturn consumed, rest, 2\n\t}\n\n\t// The frame is consumed whole.\n\t// NB: This cast cannot overflow because allowed is <= math.MaxInt32.\n\twr.stream.flow.take(int32(len(wd.p)))\n\treturn wr, empty, 1\n}\n\n// String is for debugging only.\nfunc (wr FrameWriteRequest) String() string {\n\tvar des string\n\tif s, ok := wr.write.(fmt.Stringer); ok {\n\t\tdes = s.String()\n\t} else {\n\t\tdes = fmt.Sprintf(\"%T\", wr.write)\n\t}\n\treturn fmt.Sprintf(\"[FrameWriteRequest stream=%d, ch=%v, writer=%v]\", wr.StreamID(), wr.done != nil, des)\n}\n\n// replyToWriter sends err to wr.done and panics if the send must block\n// This does nothing if wr.done is nil.\nfunc (wr *FrameWriteRequest) replyToWriter(err error) {\n\tif wr.done == nil {\n\t\treturn\n\t}\n\tselect {\n\tcase wr.done <- err:\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unbuffered done channel passed in for type %T\", wr.write))\n\t}\n\twr.write = nil // prevent use (assume it's tainted after wr.done send)\n}\n\n// writeQueue is used by implementations of WriteScheduler.\ntype writeQueue struct {\n\ts []FrameWriteRequest\n}\n\nfunc (q *writeQueue) empty() bool { return len(q.s) == 0 }\n\nfunc (q *writeQueue) push(wr FrameWriteRequest) {\n\tq.s = append(q.s, wr)\n}\n\nfunc (q *writeQueue) shift() FrameWriteRequest {\n\tif len(q.s) == 0 {\n\t\tpanic(\"invalid use of queue\")\n\t}\n\twr := q.s[0]\n\t// TODO: less copy-happy queue.\n\tcopy(q.s, q.s[1:])\n\tq.s[len(q.s)-1] = FrameWriteRequest{}\n\tq.s = q.s[:len(q.s)-1]\n\treturn wr\n}\n\n// consume consumes up to n bytes from q.s[0]. If the frame is\n// entirely consumed, it is removed from the queue. If the frame\n// is partially consumed, the frame is kept with the consumed\n// bytes removed. Returns true iff any bytes were consumed.\nfunc (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) {\n\tif len(q.s) == 0 {\n\t\treturn FrameWriteRequest{}, false\n\t}\n\tconsumed, rest, numresult := q.s[0].Consume(n)\n\tswitch numresult {\n\tcase 0:\n\t\treturn FrameWriteRequest{}, false\n\tcase 1:\n\t\tq.shift()\n\tcase 2:\n\t\tq.s[0] = rest\n\t}\n\treturn consumed, true\n}\n\ntype writeQueuePool []*writeQueue\n\n// put inserts an unused writeQueue into the pool.\nfunc (p *writeQueuePool) put(q *writeQueue) {\n\tfor i := range q.s {\n\t\tq.s[i] = FrameWriteRequest{}\n\t}\n\tq.s = q.s[:0]\n\t*p = append(*p, q)\n}\n\n// get returns an empty writeQueue.\nfunc (p *writeQueuePool) get() *writeQueue {\n\tln := len(*p)\n\tif ln == 0 {\n\t\treturn new(writeQueue)\n\t}\n\tx := ln - 1\n\tq := (*p)[x]\n\t(*p)[x] = nil\n\t*p = (*p)[:x]\n\treturn q\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/writesched_priority.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n)\n\n// RFC 7540, Section 5.3.5: the default weight is 16.\nconst priorityDefaultWeight = 15 // 16 = 15 + 1\n\n// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.\ntype PriorityWriteSchedulerConfig struct {\n\t// MaxClosedNodesInTree controls the maximum number of closed streams to\n\t// retain in the priority tree. Setting this to zero saves a small amount\n\t// of memory at the cost of performance.\n\t//\n\t// See RFC 7540, Section 5.3.4:\n\t//   \"It is possible for a stream to become closed while prioritization\n\t//   information ... is in transit. ... This potentially creates suboptimal\n\t//   prioritization, since the stream could be given a priority that is\n\t//   different from what is intended. To avoid these problems, an endpoint\n\t//   SHOULD retain stream prioritization state for a period after streams\n\t//   become closed. The longer state is retained, the lower the chance that\n\t//   streams are assigned incorrect or default priority values.\"\n\tMaxClosedNodesInTree int\n\n\t// MaxIdleNodesInTree controls the maximum number of idle streams to\n\t// retain in the priority tree. Setting this to zero saves a small amount\n\t// of memory at the cost of performance.\n\t//\n\t// See RFC 7540, Section 5.3.4:\n\t//   Similarly, streams that are in the \"idle\" state can be assigned\n\t//   priority or become a parent of other streams. This allows for the\n\t//   creation of a grouping node in the dependency tree, which enables\n\t//   more flexible expressions of priority. Idle streams begin with a\n\t//   default priority (Section 5.3.5).\n\tMaxIdleNodesInTree int\n\n\t// ThrottleOutOfOrderWrites enables write throttling to help ensure that\n\t// data is delivered in priority order. This works around a race where\n\t// stream B depends on stream A and both streams are about to call Write\n\t// to queue DATA frames. If B wins the race, a naive scheduler would eagerly\n\t// write as much data from B as possible, but this is suboptimal because A\n\t// is a higher-priority stream. With throttling enabled, we write a small\n\t// amount of data from B to minimize the amount of bandwidth that B can\n\t// steal from A.\n\tThrottleOutOfOrderWrites bool\n}\n\n// NewPriorityWriteScheduler constructs a WriteScheduler that schedules\n// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.\n// If cfg is nil, default options are used.\nfunc NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {\n\tif cfg == nil {\n\t\t// For justification of these defaults, see:\n\t\t// https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY\n\t\tcfg = &PriorityWriteSchedulerConfig{\n\t\t\tMaxClosedNodesInTree:     10,\n\t\t\tMaxIdleNodesInTree:       10,\n\t\t\tThrottleOutOfOrderWrites: false,\n\t\t}\n\t}\n\n\tws := &priorityWriteScheduler{\n\t\tnodes:                make(map[uint32]*priorityNode),\n\t\tmaxClosedNodesInTree: cfg.MaxClosedNodesInTree,\n\t\tmaxIdleNodesInTree:   cfg.MaxIdleNodesInTree,\n\t\tenableWriteThrottle:  cfg.ThrottleOutOfOrderWrites,\n\t}\n\tws.nodes[0] = &ws.root\n\tif cfg.ThrottleOutOfOrderWrites {\n\t\tws.writeThrottleLimit = 1024\n\t} else {\n\t\tws.writeThrottleLimit = math.MaxInt32\n\t}\n\treturn ws\n}\n\ntype priorityNodeState int\n\nconst (\n\tpriorityNodeOpen priorityNodeState = iota\n\tpriorityNodeClosed\n\tpriorityNodeIdle\n)\n\n// priorityNode is a node in an HTTP/2 priority tree.\n// Each node is associated with a single stream ID.\n// See RFC 7540, Section 5.3.\ntype priorityNode struct {\n\tq            writeQueue        // queue of pending frames to write\n\tid           uint32            // id of the stream, or 0 for the root of the tree\n\tweight       uint8             // the actual weight is weight+1, so the value is in [1,256]\n\tstate        priorityNodeState // open | closed | idle\n\tbytes        int64             // number of bytes written by this node, or 0 if closed\n\tsubtreeBytes int64             // sum(node.bytes) of all nodes in this subtree\n\n\t// These links form the priority tree.\n\tparent     *priorityNode\n\tkids       *priorityNode // start of the kids list\n\tprev, next *priorityNode // doubly-linked list of siblings\n}\n\nfunc (n *priorityNode) setParent(parent *priorityNode) {\n\tif n == parent {\n\t\tpanic(\"setParent to self\")\n\t}\n\tif n.parent == parent {\n\t\treturn\n\t}\n\t// Unlink from current parent.\n\tif parent := n.parent; parent != nil {\n\t\tif n.prev == nil {\n\t\t\tparent.kids = n.next\n\t\t} else {\n\t\t\tn.prev.next = n.next\n\t\t}\n\t\tif n.next != nil {\n\t\t\tn.next.prev = n.prev\n\t\t}\n\t}\n\t// Link to new parent.\n\t// If parent=nil, remove n from the tree.\n\t// Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).\n\tn.parent = parent\n\tif parent == nil {\n\t\tn.next = nil\n\t\tn.prev = nil\n\t} else {\n\t\tn.next = parent.kids\n\t\tn.prev = nil\n\t\tif n.next != nil {\n\t\t\tn.next.prev = n\n\t\t}\n\t\tparent.kids = n\n\t}\n}\n\nfunc (n *priorityNode) addBytes(b int64) {\n\tn.bytes += b\n\tfor ; n != nil; n = n.parent {\n\t\tn.subtreeBytes += b\n\t}\n}\n\n// walkReadyInOrder iterates over the tree in priority order, calling f for each node\n// with a non-empty write queue. When f returns true, this funcion returns true and the\n// walk halts. tmp is used as scratch space for sorting.\n//\n// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true\n// if any ancestor p of n is still open (ignoring the root node).\nfunc (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {\n\tif !n.q.empty() && f(n, openParent) {\n\t\treturn true\n\t}\n\tif n.kids == nil {\n\t\treturn false\n\t}\n\n\t// Don't consider the root \"open\" when updating openParent since\n\t// we can't send data frames on the root stream (only control frames).\n\tif n.id != 0 {\n\t\topenParent = openParent || (n.state == priorityNodeOpen)\n\t}\n\n\t// Common case: only one kid or all kids have the same weight.\n\t// Some clients don't use weights; other clients (like web browsers)\n\t// use mostly-linear priority trees.\n\tw := n.kids.weight\n\tneedSort := false\n\tfor k := n.kids.next; k != nil; k = k.next {\n\t\tif k.weight != w {\n\t\t\tneedSort = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !needSort {\n\t\tfor k := n.kids; k != nil; k = k.next {\n\t\t\tif k.walkReadyInOrder(openParent, tmp, f) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\t// Uncommon case: sort the child nodes. We remove the kids from the parent,\n\t// then re-insert after sorting so we can reuse tmp for future sort calls.\n\t*tmp = (*tmp)[:0]\n\tfor n.kids != nil {\n\t\t*tmp = append(*tmp, n.kids)\n\t\tn.kids.setParent(nil)\n\t}\n\tsort.Sort(sortPriorityNodeSiblings(*tmp))\n\tfor i := len(*tmp) - 1; i >= 0; i-- {\n\t\t(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids\n\t}\n\tfor k := n.kids; k != nil; k = k.next {\n\t\tif k.walkReadyInOrder(openParent, tmp, f) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\ntype sortPriorityNodeSiblings []*priorityNode\n\nfunc (z sortPriorityNodeSiblings) Len() int      { return len(z) }\nfunc (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }\nfunc (z sortPriorityNodeSiblings) Less(i, k int) bool {\n\t// Prefer the subtree that has sent fewer bytes relative to its weight.\n\t// See sections 5.3.2 and 5.3.4.\n\twi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)\n\twk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)\n\tif bi == 0 && bk == 0 {\n\t\treturn wi >= wk\n\t}\n\tif bk == 0 {\n\t\treturn false\n\t}\n\treturn bi/bk <= wi/wk\n}\n\ntype priorityWriteScheduler struct {\n\t// root is the root of the priority tree, where root.id = 0.\n\t// The root queues control frames that are not associated with any stream.\n\troot priorityNode\n\n\t// nodes maps stream ids to priority tree nodes.\n\tnodes map[uint32]*priorityNode\n\n\t// maxID is the maximum stream id in nodes.\n\tmaxID uint32\n\n\t// lists of nodes that have been closed or are idle, but are kept in\n\t// the tree for improved prioritization. When the lengths exceed either\n\t// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.\n\tclosedNodes, idleNodes []*priorityNode\n\n\t// From the config.\n\tmaxClosedNodesInTree int\n\tmaxIdleNodesInTree   int\n\twriteThrottleLimit   int32\n\tenableWriteThrottle  bool\n\n\t// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.\n\ttmp []*priorityNode\n\n\t// pool of empty queues for reuse.\n\tqueuePool writeQueuePool\n}\n\nfunc (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {\n\t// The stream may be currently idle but cannot be opened or closed.\n\tif curr := ws.nodes[streamID]; curr != nil {\n\t\tif curr.state != priorityNodeIdle {\n\t\t\tpanic(fmt.Sprintf(\"stream %d already opened\", streamID))\n\t\t}\n\t\tcurr.state = priorityNodeOpen\n\t\treturn\n\t}\n\n\t// RFC 7540, Section 5.3.5:\n\t//  \"All streams are initially assigned a non-exclusive dependency on stream 0x0.\n\t//  Pushed streams initially depend on their associated stream. In both cases,\n\t//  streams are assigned a default weight of 16.\"\n\tparent := ws.nodes[options.PusherID]\n\tif parent == nil {\n\t\tparent = &ws.root\n\t}\n\tn := &priorityNode{\n\t\tq:      *ws.queuePool.get(),\n\t\tid:     streamID,\n\t\tweight: priorityDefaultWeight,\n\t\tstate:  priorityNodeOpen,\n\t}\n\tn.setParent(parent)\n\tws.nodes[streamID] = n\n\tif streamID > ws.maxID {\n\t\tws.maxID = streamID\n\t}\n}\n\nfunc (ws *priorityWriteScheduler) CloseStream(streamID uint32) {\n\tif streamID == 0 {\n\t\tpanic(\"violation of WriteScheduler interface: cannot close stream 0\")\n\t}\n\tif ws.nodes[streamID] == nil {\n\t\tpanic(fmt.Sprintf(\"violation of WriteScheduler interface: unknown stream %d\", streamID))\n\t}\n\tif ws.nodes[streamID].state != priorityNodeOpen {\n\t\tpanic(fmt.Sprintf(\"violation of WriteScheduler interface: stream %d already closed\", streamID))\n\t}\n\n\tn := ws.nodes[streamID]\n\tn.state = priorityNodeClosed\n\tn.addBytes(-n.bytes)\n\n\tq := n.q\n\tws.queuePool.put(&q)\n\tn.q.s = nil\n\tif ws.maxClosedNodesInTree > 0 {\n\t\tws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)\n\t} else {\n\t\tws.removeNode(n)\n\t}\n}\n\nfunc (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {\n\tif streamID == 0 {\n\t\tpanic(\"adjustPriority on root\")\n\t}\n\n\t// If streamID does not exist, there are two cases:\n\t// - A closed stream that has been removed (this will have ID <= maxID)\n\t// - An idle stream that is being used for \"grouping\" (this will have ID > maxID)\n\tn := ws.nodes[streamID]\n\tif n == nil {\n\t\tif streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {\n\t\t\treturn\n\t\t}\n\t\tws.maxID = streamID\n\t\tn = &priorityNode{\n\t\t\tq:      *ws.queuePool.get(),\n\t\t\tid:     streamID,\n\t\t\tweight: priorityDefaultWeight,\n\t\t\tstate:  priorityNodeIdle,\n\t\t}\n\t\tn.setParent(&ws.root)\n\t\tws.nodes[streamID] = n\n\t\tws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)\n\t}\n\n\t// Section 5.3.1: A dependency on a stream that is not currently in the tree\n\t// results in that stream being given a default priority (Section 5.3.5).\n\tparent := ws.nodes[priority.StreamDep]\n\tif parent == nil {\n\t\tn.setParent(&ws.root)\n\t\tn.weight = priorityDefaultWeight\n\t\treturn\n\t}\n\n\t// Ignore if the client tries to make a node its own parent.\n\tif n == parent {\n\t\treturn\n\t}\n\n\t// Section 5.3.3:\n\t//   \"If a stream is made dependent on one of its own dependencies, the\n\t//   formerly dependent stream is first moved to be dependent on the\n\t//   reprioritized stream's previous parent. The moved dependency retains\n\t//   its weight.\"\n\t//\n\t// That is: if parent depends on n, move parent to depend on n.parent.\n\tfor x := parent.parent; x != nil; x = x.parent {\n\t\tif x == n {\n\t\t\tparent.setParent(n.parent)\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Section 5.3.3: The exclusive flag causes the stream to become the sole\n\t// dependency of its parent stream, causing other dependencies to become\n\t// dependent on the exclusive stream.\n\tif priority.Exclusive {\n\t\tk := parent.kids\n\t\tfor k != nil {\n\t\t\tnext := k.next\n\t\t\tif k != n {\n\t\t\t\tk.setParent(n)\n\t\t\t}\n\t\t\tk = next\n\t\t}\n\t}\n\n\tn.setParent(parent)\n\tn.weight = priority.Weight\n}\n\nfunc (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {\n\tvar n *priorityNode\n\tif id := wr.StreamID(); id == 0 {\n\t\tn = &ws.root\n\t} else {\n\t\tn = ws.nodes[id]\n\t\tif n == nil {\n\t\t\t// id is an idle or closed stream. wr should not be a HEADERS or\n\t\t\t// DATA frame. However, wr can be a RST_STREAM. In this case, we\n\t\t\t// push wr onto the root, rather than creating a new priorityNode,\n\t\t\t// since RST_STREAM is tiny and the stream's priority is unknown\n\t\t\t// anyway. See issue #17919.\n\t\t\tif wr.DataSize() > 0 {\n\t\t\t\tpanic(\"add DATA on non-open stream\")\n\t\t\t}\n\t\t\tn = &ws.root\n\t\t}\n\t}\n\tn.q.push(wr)\n}\n\nfunc (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {\n\tws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {\n\t\tlimit := int32(math.MaxInt32)\n\t\tif openParent {\n\t\t\tlimit = ws.writeThrottleLimit\n\t\t}\n\t\twr, ok = n.q.consume(limit)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\tn.addBytes(int64(wr.DataSize()))\n\t\t// If B depends on A and B continuously has data available but A\n\t\t// does not, gradually increase the throttling limit to allow B to\n\t\t// steal more and more bandwidth from A.\n\t\tif openParent {\n\t\t\tws.writeThrottleLimit += 1024\n\t\t\tif ws.writeThrottleLimit < 0 {\n\t\t\t\tws.writeThrottleLimit = math.MaxInt32\n\t\t\t}\n\t\t} else if ws.enableWriteThrottle {\n\t\t\tws.writeThrottleLimit = 1024\n\t\t}\n\t\treturn true\n\t})\n\treturn wr, ok\n}\n\nfunc (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {\n\tif maxSize == 0 {\n\t\treturn\n\t}\n\tif len(*list) == maxSize {\n\t\t// Remove the oldest node, then shift left.\n\t\tws.removeNode((*list)[0])\n\t\tx := (*list)[1:]\n\t\tcopy(*list, x)\n\t\t*list = (*list)[:len(x)]\n\t}\n\t*list = append(*list, n)\n}\n\nfunc (ws *priorityWriteScheduler) removeNode(n *priorityNode) {\n\tfor k := n.kids; k != nil; k = k.next {\n\t\tk.setParent(n.parent)\n\t}\n\tn.setParent(nil)\n\tdelete(ws.nodes, n.id)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/writesched_priority_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"sort\"\n\t\"testing\"\n)\n\nfunc defaultPriorityWriteScheduler() *priorityWriteScheduler {\n\treturn NewPriorityWriteScheduler(nil).(*priorityWriteScheduler)\n}\n\nfunc checkPriorityWellFormed(ws *priorityWriteScheduler) error {\n\tfor id, n := range ws.nodes {\n\t\tif id != n.id {\n\t\t\treturn fmt.Errorf(\"bad ws.nodes: ws.nodes[%d] = %d\", id, n.id)\n\t\t}\n\t\tif n.parent == nil {\n\t\t\tif n.next != nil || n.prev != nil {\n\t\t\t\treturn fmt.Errorf(\"bad node %d: nil parent but prev/next not nil\", id)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tfound := false\n\t\tfor k := n.parent.kids; k != nil; k = k.next {\n\t\t\tif k.id == id {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\treturn fmt.Errorf(\"bad node %d: not found in parent %d kids list\", id, n.parent.id)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc fmtTree(ws *priorityWriteScheduler, fmtNode func(*priorityNode) string) string {\n\tvar ids []int\n\tfor _, n := range ws.nodes {\n\t\tids = append(ids, int(n.id))\n\t}\n\tsort.Ints(ids)\n\n\tvar buf bytes.Buffer\n\tfor _, id := range ids {\n\t\tif buf.Len() != 0 {\n\t\t\tbuf.WriteString(\" \")\n\t\t}\n\t\tif id == 0 {\n\t\t\tbuf.WriteString(fmtNode(&ws.root))\n\t\t} else {\n\t\t\tbuf.WriteString(fmtNode(ws.nodes[uint32(id)]))\n\t\t}\n\t}\n\treturn buf.String()\n}\n\nfunc fmtNodeParentSkipRoot(n *priorityNode) string {\n\tswitch {\n\tcase n.id == 0:\n\t\treturn \"\"\n\tcase n.parent == nil:\n\t\treturn fmt.Sprintf(\"%d{parent:nil}\", n.id)\n\tdefault:\n\t\treturn fmt.Sprintf(\"%d{parent:%d}\", n.id, n.parent.id)\n\t}\n}\n\nfunc fmtNodeWeightParentSkipRoot(n *priorityNode) string {\n\tswitch {\n\tcase n.id == 0:\n\t\treturn \"\"\n\tcase n.parent == nil:\n\t\treturn fmt.Sprintf(\"%d{weight:%d,parent:nil}\", n.id, n.weight)\n\tdefault:\n\t\treturn fmt.Sprintf(\"%d{weight:%d,parent:%d}\", n.id, n.weight, n.parent.id)\n\t}\n}\n\nfunc TestPriorityTwoStreams(t *testing.T) {\n\tws := defaultPriorityWriteScheduler()\n\tws.OpenStream(1, OpenStreamOptions{})\n\tws.OpenStream(2, OpenStreamOptions{})\n\n\twant := \"1{weight:15,parent:0} 2{weight:15,parent:0}\"\n\tif got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {\n\t\tt.Errorf(\"After open\\ngot  %q\\nwant %q\", got, want)\n\t}\n\n\t// Move 1's parent to 2.\n\tws.AdjustStream(1, PriorityParam{\n\t\tStreamDep: 2,\n\t\tWeight:    32,\n\t\tExclusive: false,\n\t})\n\twant = \"1{weight:32,parent:2} 2{weight:15,parent:0}\"\n\tif got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {\n\t\tt.Errorf(\"After adjust\\ngot  %q\\nwant %q\", got, want)\n\t}\n\n\tif err := checkPriorityWellFormed(ws); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestPriorityAdjustExclusiveZero(t *testing.T) {\n\t// 1, 2, and 3 are all children of the 0 stream.\n\t// Exclusive reprioritization to any of the streams should bring\n\t// the rest of the streams under the reprioritized stream.\n\tws := defaultPriorityWriteScheduler()\n\tws.OpenStream(1, OpenStreamOptions{})\n\tws.OpenStream(2, OpenStreamOptions{})\n\tws.OpenStream(3, OpenStreamOptions{})\n\n\twant := \"1{weight:15,parent:0} 2{weight:15,parent:0} 3{weight:15,parent:0}\"\n\tif got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {\n\t\tt.Errorf(\"After open\\ngot  %q\\nwant %q\", got, want)\n\t}\n\n\tws.AdjustStream(2, PriorityParam{\n\t\tStreamDep: 0,\n\t\tWeight:    20,\n\t\tExclusive: true,\n\t})\n\twant = \"1{weight:15,parent:2} 2{weight:20,parent:0} 3{weight:15,parent:2}\"\n\tif got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {\n\t\tt.Errorf(\"After adjust\\ngot  %q\\nwant %q\", got, want)\n\t}\n\n\tif err := checkPriorityWellFormed(ws); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestPriorityAdjustOwnParent(t *testing.T) {\n\t// Assigning a node as its own parent should have no effect.\n\tws := defaultPriorityWriteScheduler()\n\tws.OpenStream(1, OpenStreamOptions{})\n\tws.OpenStream(2, OpenStreamOptions{})\n\tws.AdjustStream(2, PriorityParam{\n\t\tStreamDep: 2,\n\t\tWeight:    20,\n\t\tExclusive: true,\n\t})\n\twant := \"1{weight:15,parent:0} 2{weight:15,parent:0}\"\n\tif got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {\n\t\tt.Errorf(\"After adjust\\ngot  %q\\nwant %q\", got, want)\n\t}\n\tif err := checkPriorityWellFormed(ws); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestPriorityClosedStreams(t *testing.T) {\n\tws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxClosedNodesInTree: 2}).(*priorityWriteScheduler)\n\tws.OpenStream(1, OpenStreamOptions{})\n\tws.OpenStream(2, OpenStreamOptions{PusherID: 1})\n\tws.OpenStream(3, OpenStreamOptions{PusherID: 2})\n\tws.OpenStream(4, OpenStreamOptions{PusherID: 3})\n\n\t// Close the first three streams. We lose 1, but keep 2 and 3.\n\tws.CloseStream(1)\n\tws.CloseStream(2)\n\tws.CloseStream(3)\n\n\twant := \"2{weight:15,parent:0} 3{weight:15,parent:2} 4{weight:15,parent:3}\"\n\tif got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {\n\t\tt.Errorf(\"After close\\ngot  %q\\nwant %q\", got, want)\n\t}\n\tif err := checkPriorityWellFormed(ws); err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Adding a stream as an exclusive child of 1 gives it default\n\t// priorities, since 1 is gone.\n\tws.OpenStream(5, OpenStreamOptions{})\n\tws.AdjustStream(5, PriorityParam{StreamDep: 1, Weight: 15, Exclusive: true})\n\n\t// Adding a stream as an exclusive child of 2 should work, since 2 is not gone.\n\tws.OpenStream(6, OpenStreamOptions{})\n\tws.AdjustStream(6, PriorityParam{StreamDep: 2, Weight: 15, Exclusive: true})\n\n\twant = \"2{weight:15,parent:0} 3{weight:15,parent:6} 4{weight:15,parent:3} 5{weight:15,parent:0} 6{weight:15,parent:2}\"\n\tif got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {\n\t\tt.Errorf(\"After add streams\\ngot  %q\\nwant %q\", got, want)\n\t}\n\tif err := checkPriorityWellFormed(ws); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestPriorityClosedStreamsDisabled(t *testing.T) {\n\tws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler)\n\tws.OpenStream(1, OpenStreamOptions{})\n\tws.OpenStream(2, OpenStreamOptions{PusherID: 1})\n\tws.OpenStream(3, OpenStreamOptions{PusherID: 2})\n\n\t// Close the first two streams. We keep only 3.\n\tws.CloseStream(1)\n\tws.CloseStream(2)\n\n\twant := \"3{weight:15,parent:0}\"\n\tif got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {\n\t\tt.Errorf(\"After close\\ngot  %q\\nwant %q\", got, want)\n\t}\n\tif err := checkPriorityWellFormed(ws); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestPriorityIdleStreams(t *testing.T) {\n\tws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxIdleNodesInTree: 2}).(*priorityWriteScheduler)\n\tws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle\n\tws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle\n\tws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle\n\tws.OpenStream(4, OpenStreamOptions{})\n\tws.OpenStream(5, OpenStreamOptions{})\n\tws.OpenStream(6, OpenStreamOptions{})\n\tws.AdjustStream(4, PriorityParam{StreamDep: 1, Weight: 15})\n\tws.AdjustStream(5, PriorityParam{StreamDep: 2, Weight: 15})\n\tws.AdjustStream(6, PriorityParam{StreamDep: 3, Weight: 15})\n\n\twant := \"2{weight:15,parent:0} 3{weight:20,parent:2} 4{weight:15,parent:0} 5{weight:15,parent:2} 6{weight:15,parent:3}\"\n\tif got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {\n\t\tt.Errorf(\"After open\\ngot  %q\\nwant %q\", got, want)\n\t}\n\tif err := checkPriorityWellFormed(ws); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestPriorityIdleStreamsDisabled(t *testing.T) {\n\tws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler)\n\tws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle\n\tws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle\n\tws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle\n\tws.OpenStream(4, OpenStreamOptions{})\n\n\twant := \"4{weight:15,parent:0}\"\n\tif got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want {\n\t\tt.Errorf(\"After open\\ngot  %q\\nwant %q\", got, want)\n\t}\n\tif err := checkPriorityWellFormed(ws); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestPrioritySection531NonExclusive(t *testing.T) {\n\t// Example from RFC 7540 Section 5.3.1.\n\t// A,B,C,D = 1,2,3,4\n\tws := defaultPriorityWriteScheduler()\n\tws.OpenStream(1, OpenStreamOptions{})\n\tws.OpenStream(2, OpenStreamOptions{PusherID: 1})\n\tws.OpenStream(3, OpenStreamOptions{PusherID: 1})\n\tws.OpenStream(4, OpenStreamOptions{})\n\tws.AdjustStream(4, PriorityParam{\n\t\tStreamDep: 1,\n\t\tWeight:    15,\n\t\tExclusive: false,\n\t})\n\twant := \"1{parent:0} 2{parent:1} 3{parent:1} 4{parent:1}\"\n\tif got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {\n\t\tt.Errorf(\"After adjust\\ngot  %q\\nwant %q\", got, want)\n\t}\n\tif err := checkPriorityWellFormed(ws); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestPrioritySection531Exclusive(t *testing.T) {\n\t// Example from RFC 7540 Section 5.3.1.\n\t// A,B,C,D = 1,2,3,4\n\tws := defaultPriorityWriteScheduler()\n\tws.OpenStream(1, OpenStreamOptions{})\n\tws.OpenStream(2, OpenStreamOptions{PusherID: 1})\n\tws.OpenStream(3, OpenStreamOptions{PusherID: 1})\n\tws.OpenStream(4, OpenStreamOptions{})\n\tws.AdjustStream(4, PriorityParam{\n\t\tStreamDep: 1,\n\t\tWeight:    15,\n\t\tExclusive: true,\n\t})\n\twant := \"1{parent:0} 2{parent:4} 3{parent:4} 4{parent:1}\"\n\tif got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {\n\t\tt.Errorf(\"After adjust\\ngot  %q\\nwant %q\", got, want)\n\t}\n\tif err := checkPriorityWellFormed(ws); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc makeSection533Tree() *priorityWriteScheduler {\n\t// Initial tree from RFC 7540 Section 5.3.3.\n\t// A,B,C,D,E,F = 1,2,3,4,5,6\n\tws := defaultPriorityWriteScheduler()\n\tws.OpenStream(1, OpenStreamOptions{})\n\tws.OpenStream(2, OpenStreamOptions{PusherID: 1})\n\tws.OpenStream(3, OpenStreamOptions{PusherID: 1})\n\tws.OpenStream(4, OpenStreamOptions{PusherID: 3})\n\tws.OpenStream(5, OpenStreamOptions{PusherID: 3})\n\tws.OpenStream(6, OpenStreamOptions{PusherID: 4})\n\treturn ws\n}\n\nfunc TestPrioritySection533NonExclusive(t *testing.T) {\n\t// Example from RFC 7540 Section 5.3.3.\n\t// A,B,C,D,E,F = 1,2,3,4,5,6\n\tws := defaultPriorityWriteScheduler()\n\tws.OpenStream(1, OpenStreamOptions{})\n\tws.OpenStream(2, OpenStreamOptions{PusherID: 1})\n\tws.OpenStream(3, OpenStreamOptions{PusherID: 1})\n\tws.OpenStream(4, OpenStreamOptions{PusherID: 3})\n\tws.OpenStream(5, OpenStreamOptions{PusherID: 3})\n\tws.OpenStream(6, OpenStreamOptions{PusherID: 4})\n\tws.AdjustStream(1, PriorityParam{\n\t\tStreamDep: 4,\n\t\tWeight:    15,\n\t\tExclusive: false,\n\t})\n\twant := \"1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:4}\"\n\tif got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {\n\t\tt.Errorf(\"After adjust\\ngot  %q\\nwant %q\", got, want)\n\t}\n\tif err := checkPriorityWellFormed(ws); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestPrioritySection533Exclusive(t *testing.T) {\n\t// Example from RFC 7540 Section 5.3.3.\n\t// A,B,C,D,E,F = 1,2,3,4,5,6\n\tws := defaultPriorityWriteScheduler()\n\tws.OpenStream(1, OpenStreamOptions{})\n\tws.OpenStream(2, OpenStreamOptions{PusherID: 1})\n\tws.OpenStream(3, OpenStreamOptions{PusherID: 1})\n\tws.OpenStream(4, OpenStreamOptions{PusherID: 3})\n\tws.OpenStream(5, OpenStreamOptions{PusherID: 3})\n\tws.OpenStream(6, OpenStreamOptions{PusherID: 4})\n\tws.AdjustStream(1, PriorityParam{\n\t\tStreamDep: 4,\n\t\tWeight:    15,\n\t\tExclusive: true,\n\t})\n\twant := \"1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:1}\"\n\tif got := fmtTree(ws, fmtNodeParentSkipRoot); got != want {\n\t\tt.Errorf(\"After adjust\\ngot  %q\\nwant %q\", got, want)\n\t}\n\tif err := checkPriorityWellFormed(ws); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc checkPopAll(ws WriteScheduler, order []uint32) error {\n\tfor k, id := range order {\n\t\twr, ok := ws.Pop()\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Pop[%d]: got ok=false, want %d (order=%v)\", k, id, order)\n\t\t}\n\t\tif got := wr.StreamID(); got != id {\n\t\t\treturn fmt.Errorf(\"Pop[%d]: got %v, want %d (order=%v)\", k, got, id, order)\n\t\t}\n\t}\n\twr, ok := ws.Pop()\n\tif ok {\n\t\treturn fmt.Errorf(\"Pop[%d]: got %v, want ok=false (order=%v)\", len(order), wr.StreamID(), order)\n\t}\n\treturn nil\n}\n\nfunc TestPriorityPopFrom533Tree(t *testing.T) {\n\tws := makeSection533Tree()\n\n\tws.Push(makeWriteHeadersRequest(3 /*C*/))\n\tws.Push(makeWriteNonStreamRequest())\n\tws.Push(makeWriteHeadersRequest(5 /*E*/))\n\tws.Push(makeWriteHeadersRequest(1 /*A*/))\n\tt.Log(\"tree:\", fmtTree(ws, fmtNodeParentSkipRoot))\n\n\tif err := checkPopAll(ws, []uint32{0 /*NonStream*/, 1, 3, 5}); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestPriorityPopFromLinearTree(t *testing.T) {\n\tws := defaultPriorityWriteScheduler()\n\tws.OpenStream(1, OpenStreamOptions{})\n\tws.OpenStream(2, OpenStreamOptions{PusherID: 1})\n\tws.OpenStream(3, OpenStreamOptions{PusherID: 2})\n\tws.OpenStream(4, OpenStreamOptions{PusherID: 3})\n\n\tws.Push(makeWriteHeadersRequest(3))\n\tws.Push(makeWriteHeadersRequest(4))\n\tws.Push(makeWriteHeadersRequest(1))\n\tws.Push(makeWriteHeadersRequest(2))\n\tws.Push(makeWriteNonStreamRequest())\n\tws.Push(makeWriteNonStreamRequest())\n\tt.Log(\"tree:\", fmtTree(ws, fmtNodeParentSkipRoot))\n\n\tif err := checkPopAll(ws, []uint32{0, 0 /*NonStreams*/, 1, 2, 3, 4}); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestPriorityFlowControl(t *testing.T) {\n\tws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: false})\n\tws.OpenStream(1, OpenStreamOptions{})\n\tws.OpenStream(2, OpenStreamOptions{PusherID: 1})\n\n\tsc := &serverConn{maxFrameSize: 16}\n\tst1 := &stream{id: 1, sc: sc}\n\tst2 := &stream{id: 2, sc: sc}\n\n\tws.Push(FrameWriteRequest{&writeData{1, make([]byte, 16), false}, st1, nil})\n\tws.Push(FrameWriteRequest{&writeData{2, make([]byte, 16), false}, st2, nil})\n\tws.AdjustStream(2, PriorityParam{StreamDep: 1})\n\n\t// No flow-control bytes available.\n\tif wr, ok := ws.Pop(); ok {\n\t\tt.Fatalf(\"Pop(limited by flow control)=%v,true, want false\", wr)\n\t}\n\n\t// Add enough flow-control bytes to write st2 in two Pop calls.\n\t// Should write data from st2 even though it's lower priority than st1.\n\tfor i := 1; i <= 2; i++ {\n\t\tst2.flow.add(8)\n\t\twr, ok := ws.Pop()\n\t\tif !ok {\n\t\t\tt.Fatalf(\"Pop(%d)=false, want true\", i)\n\t\t}\n\t\tif got, want := wr.DataSize(), 8; got != want {\n\t\t\tt.Fatalf(\"Pop(%d)=%d bytes, want %d bytes\", i, got, want)\n\t\t}\n\t}\n}\n\nfunc TestPriorityThrottleOutOfOrderWrites(t *testing.T) {\n\tws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: true})\n\tws.OpenStream(1, OpenStreamOptions{})\n\tws.OpenStream(2, OpenStreamOptions{PusherID: 1})\n\n\tsc := &serverConn{maxFrameSize: 4096}\n\tst1 := &stream{id: 1, sc: sc}\n\tst2 := &stream{id: 2, sc: sc}\n\tst1.flow.add(4096)\n\tst2.flow.add(4096)\n\tws.Push(FrameWriteRequest{&writeData{2, make([]byte, 4096), false}, st2, nil})\n\tws.AdjustStream(2, PriorityParam{StreamDep: 1})\n\n\t// We have enough flow-control bytes to write st2 in a single Pop call.\n\t// However, due to out-of-order write throttling, the first call should\n\t// only write 1KB.\n\twr, ok := ws.Pop()\n\tif !ok {\n\t\tt.Fatalf(\"Pop(st2.first)=false, want true\")\n\t}\n\tif got, want := wr.StreamID(), uint32(2); got != want {\n\t\tt.Fatalf(\"Pop(st2.first)=stream %d, want stream %d\", got, want)\n\t}\n\tif got, want := wr.DataSize(), 1024; got != want {\n\t\tt.Fatalf(\"Pop(st2.first)=%d bytes, want %d bytes\", got, want)\n\t}\n\n\t// Now add data on st1. This should take precedence.\n\tws.Push(FrameWriteRequest{&writeData{1, make([]byte, 4096), false}, st1, nil})\n\twr, ok = ws.Pop()\n\tif !ok {\n\t\tt.Fatalf(\"Pop(st1)=false, want true\")\n\t}\n\tif got, want := wr.StreamID(), uint32(1); got != want {\n\t\tt.Fatalf(\"Pop(st1)=stream %d, want stream %d\", got, want)\n\t}\n\tif got, want := wr.DataSize(), 4096; got != want {\n\t\tt.Fatalf(\"Pop(st1)=%d bytes, want %d bytes\", got, want)\n\t}\n\n\t// Should go back to writing 1KB from st2.\n\twr, ok = ws.Pop()\n\tif !ok {\n\t\tt.Fatalf(\"Pop(st2.last)=false, want true\")\n\t}\n\tif got, want := wr.StreamID(), uint32(2); got != want {\n\t\tt.Fatalf(\"Pop(st2.last)=stream %d, want stream %d\", got, want)\n\t}\n\tif got, want := wr.DataSize(), 1024; got != want {\n\t\tt.Fatalf(\"Pop(st2.last)=%d bytes, want %d bytes\", got, want)\n\t}\n}\n\nfunc TestPriorityWeights(t *testing.T) {\n\tws := defaultPriorityWriteScheduler()\n\tws.OpenStream(1, OpenStreamOptions{})\n\tws.OpenStream(2, OpenStreamOptions{})\n\n\tsc := &serverConn{maxFrameSize: 8}\n\tst1 := &stream{id: 1, sc: sc}\n\tst2 := &stream{id: 2, sc: sc}\n\tst1.flow.add(40)\n\tst2.flow.add(40)\n\n\tws.Push(FrameWriteRequest{&writeData{1, make([]byte, 40), false}, st1, nil})\n\tws.Push(FrameWriteRequest{&writeData{2, make([]byte, 40), false}, st2, nil})\n\tws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 34})\n\tws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 9})\n\n\t// st1 gets 3.5x the bandwidth of st2 (3.5 = (34+1)/(9+1)).\n\t// The maximum frame size is 8 bytes. The write sequence should be:\n\t//   st1, total bytes so far is (st1=8,  st=0)\n\t//   st2, total bytes so far is (st1=8,  st=8)\n\t//   st1, total bytes so far is (st1=16, st=8)\n\t//   st1, total bytes so far is (st1=24, st=8)   // 3x bandwidth\n\t//   st1, total bytes so far is (st1=32, st=8)   // 4x bandwidth\n\t//   st2, total bytes so far is (st1=32, st=16)  // 2x bandwidth\n\t//   st1, total bytes so far is (st1=40, st=16)\n\t//   st2, total bytes so far is (st1=40, st=24)\n\t//   st2, total bytes so far is (st1=40, st=32)\n\t//   st2, total bytes so far is (st1=40, st=40)\n\tif err := checkPopAll(ws, []uint32{1, 2, 1, 1, 1, 2, 1, 2, 2, 2}); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestPriorityRstStreamOnNonOpenStreams(t *testing.T) {\n\tws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{\n\t\tMaxClosedNodesInTree: 0,\n\t\tMaxIdleNodesInTree:   0,\n\t})\n\tws.OpenStream(1, OpenStreamOptions{})\n\tws.CloseStream(1)\n\tws.Push(FrameWriteRequest{write: streamError(1, ErrCodeProtocol)})\n\tws.Push(FrameWriteRequest{write: streamError(2, ErrCodeProtocol)})\n\n\tif err := checkPopAll(ws, []uint32{1, 2}); err != nil {\n\t\tt.Error(err)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/writesched_random.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport \"math\"\n\n// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2\n// priorities. Control frames like SETTINGS and PING are written before DATA\n// frames, but if no control frames are queued and multiple streams have queued\n// HEADERS or DATA frames, Pop selects a ready stream arbitrarily.\nfunc NewRandomWriteScheduler() WriteScheduler {\n\treturn &randomWriteScheduler{sq: make(map[uint32]*writeQueue)}\n}\n\ntype randomWriteScheduler struct {\n\t// zero are frames not associated with a specific stream.\n\tzero writeQueue\n\n\t// sq contains the stream-specific queues, keyed by stream ID.\n\t// When a stream is idle or closed, it's deleted from the map.\n\tsq map[uint32]*writeQueue\n\n\t// pool of empty queues for reuse.\n\tqueuePool writeQueuePool\n}\n\nfunc (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {\n\t// no-op: idle streams are not tracked\n}\n\nfunc (ws *randomWriteScheduler) CloseStream(streamID uint32) {\n\tq, ok := ws.sq[streamID]\n\tif !ok {\n\t\treturn\n\t}\n\tdelete(ws.sq, streamID)\n\tws.queuePool.put(q)\n}\n\nfunc (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {\n\t// no-op: priorities are ignored\n}\n\nfunc (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {\n\tid := wr.StreamID()\n\tif id == 0 {\n\t\tws.zero.push(wr)\n\t\treturn\n\t}\n\tq, ok := ws.sq[id]\n\tif !ok {\n\t\tq = ws.queuePool.get()\n\t\tws.sq[id] = q\n\t}\n\tq.push(wr)\n}\n\nfunc (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {\n\t// Control frames first.\n\tif !ws.zero.empty() {\n\t\treturn ws.zero.shift(), true\n\t}\n\t// Iterate over all non-idle streams until finding one that can be consumed.\n\tfor _, q := range ws.sq {\n\t\tif wr, ok := q.consume(math.MaxInt32); ok {\n\t\t\treturn wr, true\n\t\t}\n\t}\n\treturn FrameWriteRequest{}, false\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/writesched_random_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport \"testing\"\n\nfunc TestRandomScheduler(t *testing.T) {\n\tws := NewRandomWriteScheduler()\n\tws.Push(makeWriteHeadersRequest(3))\n\tws.Push(makeWriteHeadersRequest(4))\n\tws.Push(makeWriteHeadersRequest(1))\n\tws.Push(makeWriteHeadersRequest(2))\n\tws.Push(makeWriteNonStreamRequest())\n\tws.Push(makeWriteNonStreamRequest())\n\n\t// Pop all frames. Should get the non-stream requests first,\n\t// followed by the stream requests in any order.\n\tvar order []FrameWriteRequest\n\tfor {\n\t\twr, ok := ws.Pop()\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\torder = append(order, wr)\n\t}\n\tt.Logf(\"got frames: %v\", order)\n\tif len(order) != 6 {\n\t\tt.Fatalf(\"got %d frames, expected 6\", len(order))\n\t}\n\tif order[0].StreamID() != 0 || order[1].StreamID() != 0 {\n\t\tt.Fatal(\"expected non-stream frames first\", order[0], order[1])\n\t}\n\tgot := make(map[uint32]bool)\n\tfor _, wr := range order[2:] {\n\t\tgot[wr.StreamID()] = true\n\t}\n\tfor id := uint32(1); id <= 4; id++ {\n\t\tif !got[id] {\n\t\t\tt.Errorf(\"frame not found for stream %d\", id)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/writesched_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc makeWriteNonStreamRequest() FrameWriteRequest {\n\treturn FrameWriteRequest{writeSettingsAck{}, nil, nil}\n}\n\nfunc makeWriteHeadersRequest(streamID uint32) FrameWriteRequest {\n\tst := &stream{id: streamID}\n\treturn FrameWriteRequest{&writeResHeaders{streamID: streamID, httpResCode: 200}, st, nil}\n}\n\nfunc checkConsume(wr FrameWriteRequest, nbytes int32, want []FrameWriteRequest) error {\n\tconsumed, rest, n := wr.Consume(nbytes)\n\tvar wantConsumed, wantRest FrameWriteRequest\n\tswitch len(want) {\n\tcase 0:\n\tcase 1:\n\t\twantConsumed = want[0]\n\tcase 2:\n\t\twantConsumed = want[0]\n\t\twantRest = want[1]\n\t}\n\tif !reflect.DeepEqual(consumed, wantConsumed) || !reflect.DeepEqual(rest, wantRest) || n != len(want) {\n\t\treturn fmt.Errorf(\"got %v, %v, %v\\nwant %v, %v, %v\", consumed, rest, n, wantConsumed, wantRest, len(want))\n\t}\n\treturn nil\n}\n\nfunc TestFrameWriteRequestNonData(t *testing.T) {\n\twr := makeWriteNonStreamRequest()\n\tif got, want := wr.DataSize(), 0; got != want {\n\t\tt.Errorf(\"DataSize: got %v, want %v\", got, want)\n\t}\n\n\t// Non-DATA frames are always consumed whole.\n\tif err := checkConsume(wr, 0, []FrameWriteRequest{wr}); err != nil {\n\t\tt.Errorf(\"Consume:\\n%v\", err)\n\t}\n}\n\nfunc TestFrameWriteRequestData(t *testing.T) {\n\tst := &stream{\n\t\tid: 1,\n\t\tsc: &serverConn{maxFrameSize: 16},\n\t}\n\tconst size = 32\n\twr := FrameWriteRequest{&writeData{st.id, make([]byte, size), true}, st, make(chan error)}\n\tif got, want := wr.DataSize(), size; got != want {\n\t\tt.Errorf(\"DataSize: got %v, want %v\", got, want)\n\t}\n\n\t// No flow-control bytes available: cannot consume anything.\n\tif err := checkConsume(wr, math.MaxInt32, []FrameWriteRequest{}); err != nil {\n\t\tt.Errorf(\"Consume(limited by flow control):\\n%v\", err)\n\t}\n\n\t// Add enough flow-control bytes to consume the entire frame,\n\t// but we're now restricted by st.sc.maxFrameSize.\n\tst.flow.add(size)\n\twant := []FrameWriteRequest{\n\t\t{\n\t\t\twrite:  &writeData{st.id, make([]byte, st.sc.maxFrameSize), false},\n\t\t\tstream: st,\n\t\t\tdone:   nil,\n\t\t},\n\t\t{\n\t\t\twrite:  &writeData{st.id, make([]byte, size-st.sc.maxFrameSize), true},\n\t\t\tstream: st,\n\t\t\tdone:   wr.done,\n\t\t},\n\t}\n\tif err := checkConsume(wr, math.MaxInt32, want); err != nil {\n\t\tt.Errorf(\"Consume(limited by maxFrameSize):\\n%v\", err)\n\t}\n\trest := want[1]\n\n\t// Consume 8 bytes from the remaining frame.\n\twant = []FrameWriteRequest{\n\t\t{\n\t\t\twrite:  &writeData{st.id, make([]byte, 8), false},\n\t\t\tstream: st,\n\t\t\tdone:   nil,\n\t\t},\n\t\t{\n\t\t\twrite:  &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true},\n\t\t\tstream: st,\n\t\t\tdone:   wr.done,\n\t\t},\n\t}\n\tif err := checkConsume(rest, 8, want); err != nil {\n\t\tt.Errorf(\"Consume(8):\\n%v\", err)\n\t}\n\trest = want[1]\n\n\t// Consume all remaining bytes.\n\twant = []FrameWriteRequest{\n\t\t{\n\t\t\twrite:  &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true},\n\t\t\tstream: st,\n\t\t\tdone:   wr.done,\n\t\t},\n\t}\n\tif err := checkConsume(rest, math.MaxInt32, want); err != nil {\n\t\tt.Errorf(\"Consume(remainder):\\n%v\", err)\n\t}\n}\n\nfunc TestFrameWriteRequest_StreamID(t *testing.T) {\n\tconst streamID = 123\n\twr := FrameWriteRequest{write: streamError(streamID, ErrCodeNo)}\n\tif got := wr.StreamID(); got != streamID {\n\t\tt.Errorf(\"FrameWriteRequest(StreamError) = %v; want %v\", got, streamID)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/http2/z_spec_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage http2\n\nimport (\n\t\"bytes\"\n\t\"encoding/xml\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n)\n\nvar coverSpec = flag.Bool(\"coverspec\", false, \"Run spec coverage tests\")\n\n// The global map of sentence coverage for the http2 spec.\nvar defaultSpecCoverage specCoverage\n\nvar loadSpecOnce sync.Once\n\nfunc loadSpec() {\n\tif f, err := os.Open(\"testdata/draft-ietf-httpbis-http2.xml\"); err != nil {\n\t\tpanic(err)\n\t} else {\n\t\tdefaultSpecCoverage = readSpecCov(f)\n\t\tf.Close()\n\t}\n}\n\n// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not\n// \"covered\" will be included in report outputted by TestSpecCoverage.\nfunc covers(sec, sentences string) {\n\tloadSpecOnce.Do(loadSpec)\n\tdefaultSpecCoverage.cover(sec, sentences)\n}\n\ntype specPart struct {\n\tsection  string\n\tsentence string\n}\n\nfunc (ss specPart) Less(oo specPart) bool {\n\tatoi := func(s string) int {\n\t\tn, err := strconv.Atoi(s)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn n\n\t}\n\ta := strings.Split(ss.section, \".\")\n\tb := strings.Split(oo.section, \".\")\n\tfor len(a) > 0 {\n\t\tif len(b) == 0 {\n\t\t\treturn false\n\t\t}\n\t\tx, y := atoi(a[0]), atoi(b[0])\n\t\tif x == y {\n\t\t\ta, b = a[1:], b[1:]\n\t\t\tcontinue\n\t\t}\n\t\treturn x < y\n\t}\n\tif len(b) > 0 {\n\t\treturn true\n\t}\n\treturn false\n}\n\ntype bySpecSection []specPart\n\nfunc (a bySpecSection) Len() int           { return len(a) }\nfunc (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) }\nfunc (a bySpecSection) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\n\ntype specCoverage struct {\n\tcoverage map[specPart]bool\n\td        *xml.Decoder\n}\n\nfunc joinSection(sec []int) string {\n\ts := fmt.Sprintf(\"%d\", sec[0])\n\tfor _, n := range sec[1:] {\n\t\ts = fmt.Sprintf(\"%s.%d\", s, n)\n\t}\n\treturn s\n}\n\nfunc (sc specCoverage) readSection(sec []int) {\n\tvar (\n\t\tbuf = new(bytes.Buffer)\n\t\tsub = 0\n\t)\n\tfor {\n\t\ttk, err := sc.d.Token()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpanic(err)\n\t\t}\n\t\tswitch v := tk.(type) {\n\t\tcase xml.StartElement:\n\t\t\tif skipElement(v) {\n\t\t\t\tif err := sc.d.Skip(); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif v.Name.Local == \"section\" {\n\t\t\t\t\tsub++\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tswitch v.Name.Local {\n\t\t\tcase \"section\":\n\t\t\t\tsub++\n\t\t\t\tsc.readSection(append(sec, sub))\n\t\t\tcase \"xref\":\n\t\t\t\tbuf.Write(sc.readXRef(v))\n\t\t\t}\n\t\tcase xml.CharData:\n\t\t\tif len(sec) == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tbuf.Write(v)\n\t\tcase xml.EndElement:\n\t\t\tif v.Name.Local == \"section\" {\n\t\t\t\tsc.addSentences(joinSection(sec), buf.String())\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (sc specCoverage) readXRef(se xml.StartElement) []byte {\n\tvar b []byte\n\tfor {\n\t\ttk, err := sc.d.Token()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tswitch v := tk.(type) {\n\t\tcase xml.CharData:\n\t\t\tif b != nil {\n\t\t\t\tpanic(\"unexpected CharData\")\n\t\t\t}\n\t\t\tb = []byte(string(v))\n\t\tcase xml.EndElement:\n\t\t\tif v.Name.Local != \"xref\" {\n\t\t\t\tpanic(\"expected </xref>\")\n\t\t\t}\n\t\t\tif b != nil {\n\t\t\t\treturn b\n\t\t\t}\n\t\t\tsig := attrSig(se)\n\t\t\tswitch sig {\n\t\t\tcase \"target\":\n\t\t\t\treturn []byte(fmt.Sprintf(\"[%s]\", attrValue(se, \"target\")))\n\t\t\tcase \"fmt-of,rel,target\", \"fmt-,,rel,target\":\n\t\t\t\treturn []byte(fmt.Sprintf(\"[%s, %s]\", attrValue(se, \"target\"), attrValue(se, \"rel\")))\n\t\t\tcase \"fmt-of,sec,target\", \"fmt-,,sec,target\":\n\t\t\t\treturn []byte(fmt.Sprintf(\"[section %s of %s]\", attrValue(se, \"sec\"), attrValue(se, \"target\")))\n\t\t\tcase \"fmt-of,rel,sec,target\":\n\t\t\t\treturn []byte(fmt.Sprintf(\"[section %s of %s, %s]\", attrValue(se, \"sec\"), attrValue(se, \"target\"), attrValue(se, \"rel\")))\n\t\t\tdefault:\n\t\t\t\tpanic(fmt.Sprintf(\"unknown attribute signature %q in %#v\", sig, fmt.Sprintf(\"%#v\", se)))\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"unexpected tag %q\", v))\n\t\t}\n\t}\n}\n\nvar skipAnchor = map[string]bool{\n\t\"intro\":    true,\n\t\"Overview\": true,\n}\n\nvar skipTitle = map[string]bool{\n\t\"Acknowledgements\":            true,\n\t\"Change Log\":                  true,\n\t\"Document Organization\":       true,\n\t\"Conventions and Terminology\": true,\n}\n\nfunc skipElement(s xml.StartElement) bool {\n\tswitch s.Name.Local {\n\tcase \"artwork\":\n\t\treturn true\n\tcase \"section\":\n\t\tfor _, attr := range s.Attr {\n\t\t\tswitch attr.Name.Local {\n\t\t\tcase \"anchor\":\n\t\t\t\tif skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, \"changes.since.\") {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase \"title\":\n\t\t\t\tif skipTitle[attr.Value] {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\nfunc readSpecCov(r io.Reader) specCoverage {\n\tsc := specCoverage{\n\t\tcoverage: map[specPart]bool{},\n\t\td:        xml.NewDecoder(r)}\n\tsc.readSection(nil)\n\treturn sc\n}\n\nfunc (sc specCoverage) addSentences(sec string, sentence string) {\n\tfor _, s := range parseSentences(sentence) {\n\t\tsc.coverage[specPart{sec, s}] = false\n\t}\n}\n\nfunc (sc specCoverage) cover(sec string, sentence string) {\n\tfor _, s := range parseSentences(sentence) {\n\t\tp := specPart{sec, s}\n\t\tif _, ok := sc.coverage[p]; !ok {\n\t\t\tpanic(fmt.Sprintf(\"Not found in spec: %q, %q\", sec, s))\n\t\t}\n\t\tsc.coverage[specPart{sec, s}] = true\n\t}\n\n}\n\nvar whitespaceRx = regexp.MustCompile(`\\s+`)\n\nfunc parseSentences(sens string) []string {\n\tsens = strings.TrimSpace(sens)\n\tif sens == \"\" {\n\t\treturn nil\n\t}\n\tss := strings.Split(whitespaceRx.ReplaceAllString(sens, \" \"), \". \")\n\tfor i, s := range ss {\n\t\ts = strings.TrimSpace(s)\n\t\tif !strings.HasSuffix(s, \".\") {\n\t\t\ts += \".\"\n\t\t}\n\t\tss[i] = s\n\t}\n\treturn ss\n}\n\nfunc TestSpecParseSentences(t *testing.T) {\n\ttests := []struct {\n\t\tss   string\n\t\twant []string\n\t}{\n\t\t{\"Sentence 1. Sentence 2.\",\n\t\t\t[]string{\n\t\t\t\t\"Sentence 1.\",\n\t\t\t\t\"Sentence 2.\",\n\t\t\t}},\n\t\t{\"Sentence 1.  \\nSentence 2.\\tSentence 3.\",\n\t\t\t[]string{\n\t\t\t\t\"Sentence 1.\",\n\t\t\t\t\"Sentence 2.\",\n\t\t\t\t\"Sentence 3.\",\n\t\t\t}},\n\t}\n\n\tfor i, tt := range tests {\n\t\tgot := parseSentences(tt.ss)\n\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\tt.Errorf(\"%d: got = %q, want %q\", i, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc TestSpecCoverage(t *testing.T) {\n\tif !*coverSpec {\n\t\tt.Skip()\n\t}\n\n\tloadSpecOnce.Do(loadSpec)\n\n\tvar (\n\t\tlist     []specPart\n\t\tcv       = defaultSpecCoverage.coverage\n\t\ttotal    = len(cv)\n\t\tcomplete = 0\n\t)\n\n\tfor sp, touched := range defaultSpecCoverage.coverage {\n\t\tif touched {\n\t\t\tcomplete++\n\t\t} else {\n\t\t\tlist = append(list, sp)\n\t\t}\n\t}\n\tsort.Stable(bySpecSection(list))\n\n\tif testing.Short() && len(list) > 5 {\n\t\tlist = list[:5]\n\t}\n\n\tfor _, p := range list {\n\t\tt.Errorf(\"\\tSECTION %s: %s\", p.section, p.sentence)\n\t}\n\n\tt.Logf(\"%d/%d (%d%%) sentences covered\", complete, total, (complete/total)*100)\n}\n\nfunc attrSig(se xml.StartElement) string {\n\tvar names []string\n\tfor _, attr := range se.Attr {\n\t\tif attr.Name.Local == \"fmt\" {\n\t\t\tnames = append(names, \"fmt-\"+attr.Value)\n\t\t} else {\n\t\t\tnames = append(names, attr.Name.Local)\n\t\t}\n\t}\n\tsort.Strings(names)\n\treturn strings.Join(names, \",\")\n}\n\nfunc attrValue(se xml.StartElement, attr string) string {\n\tfor _, a := range se.Attr {\n\t\tif a.Name.Local == attr {\n\t\t\treturn a.Value\n\t\t}\n\t}\n\tpanic(\"unknown attribute \" + attr)\n}\n\nfunc TestSpecPartLess(t *testing.T) {\n\ttests := []struct {\n\t\tsec1, sec2 string\n\t\twant       bool\n\t}{\n\t\t{\"6.2.1\", \"6.2\", false},\n\t\t{\"6.2\", \"6.2.1\", true},\n\t\t{\"6.10\", \"6.10.1\", true},\n\t\t{\"6.10\", \"6.1.1\", false}, // 10, not 1\n\t\t{\"6.1\", \"6.1\", false},    // equal, so not less\n\t}\n\tfor _, tt := range tests {\n\t\tgot := (specPart{tt.sec1, \"foo\"}).Less(specPart{tt.sec2, \"foo\"})\n\t\tif got != tt.want {\n\t\t\tt.Errorf(\"Less(%q, %q) = %v; want %v\", tt.sec1, tt.sec2, got, tt.want)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/dstunreach.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp\n\n// A DstUnreach represents an ICMP destination unreachable message\n// body.\ntype DstUnreach struct {\n\tData       []byte      // data, known as original datagram field\n\tExtensions []Extension // extensions\n}\n\n// Len implements the Len method of MessageBody interface.\nfunc (p *DstUnreach) Len(proto int) int {\n\tif p == nil {\n\t\treturn 0\n\t}\n\tl, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions)\n\treturn 4 + l\n}\n\n// Marshal implements the Marshal method of MessageBody interface.\nfunc (p *DstUnreach) Marshal(proto int) ([]byte, error) {\n\treturn marshalMultipartMessageBody(proto, p.Data, p.Extensions)\n}\n\n// parseDstUnreach parses b as an ICMP destination unreachable message\n// body.\nfunc parseDstUnreach(proto int, b []byte) (MessageBody, error) {\n\tif len(b) < 4 {\n\t\treturn nil, errMessageTooShort\n\t}\n\tp := &DstUnreach{}\n\tvar err error\n\tp.Data, p.Extensions, err = parseMultipartMessageBody(proto, b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/echo.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp\n\nimport \"encoding/binary\"\n\n// An Echo represents an ICMP echo request or reply message body.\ntype Echo struct {\n\tID   int    // identifier\n\tSeq  int    // sequence number\n\tData []byte // data\n}\n\n// Len implements the Len method of MessageBody interface.\nfunc (p *Echo) Len(proto int) int {\n\tif p == nil {\n\t\treturn 0\n\t}\n\treturn 4 + len(p.Data)\n}\n\n// Marshal implements the Marshal method of MessageBody interface.\nfunc (p *Echo) Marshal(proto int) ([]byte, error) {\n\tb := make([]byte, 4+len(p.Data))\n\tbinary.BigEndian.PutUint16(b[:2], uint16(p.ID))\n\tbinary.BigEndian.PutUint16(b[2:4], uint16(p.Seq))\n\tcopy(b[4:], p.Data)\n\treturn b, nil\n}\n\n// parseEcho parses b as an ICMP echo request or reply message body.\nfunc parseEcho(proto int, b []byte) (MessageBody, error) {\n\tbodyLen := len(b)\n\tif bodyLen < 4 {\n\t\treturn nil, errMessageTooShort\n\t}\n\tp := &Echo{ID: int(binary.BigEndian.Uint16(b[:2])), Seq: int(binary.BigEndian.Uint16(b[2:4]))}\n\tif bodyLen > 4 {\n\t\tp.Data = make([]byte, bodyLen-4)\n\t\tcopy(p.Data, b[4:])\n\t}\n\treturn p, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/endpoint.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp\n\nimport (\n\t\"net\"\n\t\"runtime\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"golang.org/x/net/ipv4\"\n\t\"golang.org/x/net/ipv6\"\n)\n\nvar _ net.PacketConn = &PacketConn{}\n\n// A PacketConn represents a packet network endpoint that uses either\n// ICMPv4 or ICMPv6.\ntype PacketConn struct {\n\tc  net.PacketConn\n\tp4 *ipv4.PacketConn\n\tp6 *ipv6.PacketConn\n}\n\nfunc (c *PacketConn) ok() bool { return c != nil && c.c != nil }\n\n// IPv4PacketConn returns the ipv4.PacketConn of c.\n// It returns nil when c is not created as the endpoint for ICMPv4.\nfunc (c *PacketConn) IPv4PacketConn() *ipv4.PacketConn {\n\tif !c.ok() {\n\t\treturn nil\n\t}\n\treturn c.p4\n}\n\n// IPv6PacketConn returns the ipv6.PacketConn of c.\n// It returns nil when c is not created as the endpoint for ICMPv6.\nfunc (c *PacketConn) IPv6PacketConn() *ipv6.PacketConn {\n\tif !c.ok() {\n\t\treturn nil\n\t}\n\treturn c.p6\n}\n\n// ReadFrom reads an ICMP message from the connection.\nfunc (c *PacketConn) ReadFrom(b []byte) (int, net.Addr, error) {\n\tif !c.ok() {\n\t\treturn 0, nil, syscall.EINVAL\n\t}\n\t// Please be informed that ipv4.NewPacketConn enables\n\t// IP_STRIPHDR option by default on Darwin.\n\t// See golang.org/issue/9395 for further information.\n\tif runtime.GOOS == \"darwin\" && c.p4 != nil {\n\t\tn, _, peer, err := c.p4.ReadFrom(b)\n\t\treturn n, peer, err\n\t}\n\treturn c.c.ReadFrom(b)\n}\n\n// WriteTo writes the ICMP message b to dst.\n// Dst must be net.UDPAddr when c is a non-privileged\n// datagram-oriented ICMP endpoint. Otherwise it must be net.IPAddr.\nfunc (c *PacketConn) WriteTo(b []byte, dst net.Addr) (int, error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\treturn c.c.WriteTo(b, dst)\n}\n\n// Close closes the endpoint.\nfunc (c *PacketConn) Close() error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn c.c.Close()\n}\n\n// LocalAddr returns the local network address.\nfunc (c *PacketConn) LocalAddr() net.Addr {\n\tif !c.ok() {\n\t\treturn nil\n\t}\n\treturn c.c.LocalAddr()\n}\n\n// SetDeadline sets the read and write deadlines associated with the\n// endpoint.\nfunc (c *PacketConn) SetDeadline(t time.Time) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn c.c.SetDeadline(t)\n}\n\n// SetReadDeadline sets the read deadline associated with the\n// endpoint.\nfunc (c *PacketConn) SetReadDeadline(t time.Time) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn c.c.SetReadDeadline(t)\n}\n\n// SetWriteDeadline sets the write deadline associated with the\n// endpoint.\nfunc (c *PacketConn) SetWriteDeadline(t time.Time) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn c.c.SetWriteDeadline(t)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/example_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp_test\n\nimport (\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\n\t\"golang.org/x/net/icmp\"\n\t\"golang.org/x/net/ipv6\"\n)\n\nfunc ExamplePacketConn_nonPrivilegedPing() {\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\tcase \"linux\":\n\t\tlog.Println(\"you may need to adjust the net.ipv4.ping_group_range kernel state\")\n\tdefault:\n\t\tlog.Println(\"not supported on\", runtime.GOOS)\n\t\treturn\n\t}\n\n\tc, err := icmp.ListenPacket(\"udp6\", \"fe80::1%en0\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\twm := icmp.Message{\n\t\tType: ipv6.ICMPTypeEchoRequest, Code: 0,\n\t\tBody: &icmp.Echo{\n\t\t\tID: os.Getpid() & 0xffff, Seq: 1,\n\t\t\tData: []byte(\"HELLO-R-U-THERE\"),\n\t\t},\n\t}\n\twb, err := wm.Marshal(nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif _, err := c.WriteTo(wb, &net.UDPAddr{IP: net.ParseIP(\"ff02::1\"), Zone: \"en0\"}); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\trb := make([]byte, 1500)\n\tn, peer, err := c.ReadFrom(rb)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\trm, err := icmp.ParseMessage(58, rb[:n])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tswitch rm.Type {\n\tcase ipv6.ICMPTypeEchoReply:\n\t\tlog.Printf(\"got reflection from %v\", peer)\n\tdefault:\n\t\tlog.Printf(\"got %+v; want echo reply\", rm)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/extension.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp\n\nimport \"encoding/binary\"\n\n// An Extension represents an ICMP extension.\ntype Extension interface {\n\t// Len returns the length of ICMP extension.\n\t// Proto must be either the ICMPv4 or ICMPv6 protocol number.\n\tLen(proto int) int\n\n\t// Marshal returns the binary encoding of ICMP extension.\n\t// Proto must be either the ICMPv4 or ICMPv6 protocol number.\n\tMarshal(proto int) ([]byte, error)\n}\n\nconst extensionVersion = 2\n\nfunc validExtensionHeader(b []byte) bool {\n\tv := int(b[0]&0xf0) >> 4\n\ts := binary.BigEndian.Uint16(b[2:4])\n\tif s != 0 {\n\t\ts = checksum(b)\n\t}\n\tif v != extensionVersion || s != 0 {\n\t\treturn false\n\t}\n\treturn true\n}\n\n// parseExtensions parses b as a list of ICMP extensions.\n// The length attribute l must be the length attribute field in\n// received icmp messages.\n//\n// It will return a list of ICMP extensions and an adjusted length\n// attribute that represents the length of the padded original\n// datagram field. Otherwise, it returns an error.\nfunc parseExtensions(b []byte, l int) ([]Extension, int, error) {\n\t// Still a lot of non-RFC 4884 compliant implementations are\n\t// out there. Set the length attribute l to 128 when it looks\n\t// inappropriate for backwards compatibility.\n\t//\n\t// A minimal extension at least requires 8 octets; 4 octets\n\t// for an extension header, and 4 octets for a single object\n\t// header.\n\t//\n\t// See RFC 4884 for further information.\n\tif 128 > l || l+8 > len(b) {\n\t\tl = 128\n\t}\n\tif l+8 > len(b) {\n\t\treturn nil, -1, errNoExtension\n\t}\n\tif !validExtensionHeader(b[l:]) {\n\t\tif l == 128 {\n\t\t\treturn nil, -1, errNoExtension\n\t\t}\n\t\tl = 128\n\t\tif !validExtensionHeader(b[l:]) {\n\t\t\treturn nil, -1, errNoExtension\n\t\t}\n\t}\n\tvar exts []Extension\n\tfor b = b[l+4:]; len(b) >= 4; {\n\t\tol := int(binary.BigEndian.Uint16(b[:2]))\n\t\tif 4 > ol || ol > len(b) {\n\t\t\tbreak\n\t\t}\n\t\tswitch b[2] {\n\t\tcase classMPLSLabelStack:\n\t\t\text, err := parseMPLSLabelStack(b[:ol])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, -1, err\n\t\t\t}\n\t\t\texts = append(exts, ext)\n\t\tcase classInterfaceInfo:\n\t\t\text, err := parseInterfaceInfo(b[:ol])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, -1, err\n\t\t\t}\n\t\t\texts = append(exts, ext)\n\t\t}\n\t\tb = b[ol:]\n\t}\n\treturn exts, l, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/extension_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp\n\nimport (\n\t\"net\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/iana\"\n)\n\nvar marshalAndParseExtensionTests = []struct {\n\tproto int\n\thdr   []byte\n\tobj   []byte\n\texts  []Extension\n}{\n\t// MPLS label stack with no label\n\t{\n\t\tproto: iana.ProtocolICMP,\n\t\thdr: []byte{\n\t\t\t0x20, 0x00, 0x00, 0x00,\n\t\t},\n\t\tobj: []byte{\n\t\t\t0x00, 0x04, 0x01, 0x01,\n\t\t},\n\t\texts: []Extension{\n\t\t\t&MPLSLabelStack{\n\t\t\t\tClass: classMPLSLabelStack,\n\t\t\t\tType:  typeIncomingMPLSLabelStack,\n\t\t\t},\n\t\t},\n\t},\n\t// MPLS label stack with a single label\n\t{\n\t\tproto: iana.ProtocolIPv6ICMP,\n\t\thdr: []byte{\n\t\t\t0x20, 0x00, 0x00, 0x00,\n\t\t},\n\t\tobj: []byte{\n\t\t\t0x00, 0x08, 0x01, 0x01,\n\t\t\t0x03, 0xe8, 0xe9, 0xff,\n\t\t},\n\t\texts: []Extension{\n\t\t\t&MPLSLabelStack{\n\t\t\t\tClass: classMPLSLabelStack,\n\t\t\t\tType:  typeIncomingMPLSLabelStack,\n\t\t\t\tLabels: []MPLSLabel{\n\t\t\t\t\t{\n\t\t\t\t\t\tLabel: 16014,\n\t\t\t\t\t\tTC:    0x4,\n\t\t\t\t\t\tS:     true,\n\t\t\t\t\t\tTTL:   255,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t// MPLS label stack with multiple labels\n\t{\n\t\tproto: iana.ProtocolICMP,\n\t\thdr: []byte{\n\t\t\t0x20, 0x00, 0x00, 0x00,\n\t\t},\n\t\tobj: []byte{\n\t\t\t0x00, 0x0c, 0x01, 0x01,\n\t\t\t0x03, 0xe8, 0xde, 0xfe,\n\t\t\t0x03, 0xe8, 0xe1, 0xff,\n\t\t},\n\t\texts: []Extension{\n\t\t\t&MPLSLabelStack{\n\t\t\t\tClass: classMPLSLabelStack,\n\t\t\t\tType:  typeIncomingMPLSLabelStack,\n\t\t\t\tLabels: []MPLSLabel{\n\t\t\t\t\t{\n\t\t\t\t\t\tLabel: 16013,\n\t\t\t\t\t\tTC:    0x7,\n\t\t\t\t\t\tS:     false,\n\t\t\t\t\t\tTTL:   254,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tLabel: 16014,\n\t\t\t\t\t\tTC:    0,\n\t\t\t\t\t\tS:     true,\n\t\t\t\t\t\tTTL:   255,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t// Interface information with no attribute\n\t{\n\t\tproto: iana.ProtocolICMP,\n\t\thdr: []byte{\n\t\t\t0x20, 0x00, 0x00, 0x00,\n\t\t},\n\t\tobj: []byte{\n\t\t\t0x00, 0x04, 0x02, 0x00,\n\t\t},\n\t\texts: []Extension{\n\t\t\t&InterfaceInfo{\n\t\t\t\tClass: classInterfaceInfo,\n\t\t\t},\n\t\t},\n\t},\n\t// Interface information with ifIndex and name\n\t{\n\t\tproto: iana.ProtocolICMP,\n\t\thdr: []byte{\n\t\t\t0x20, 0x00, 0x00, 0x00,\n\t\t},\n\t\tobj: []byte{\n\t\t\t0x00, 0x10, 0x02, 0x0a,\n\t\t\t0x00, 0x00, 0x00, 0x10,\n\t\t\t0x08, byte('e'), byte('n'), byte('1'),\n\t\t\tbyte('0'), byte('1'), 0x00, 0x00,\n\t\t},\n\t\texts: []Extension{\n\t\t\t&InterfaceInfo{\n\t\t\t\tClass: classInterfaceInfo,\n\t\t\t\tType:  0x0a,\n\t\t\t\tInterface: &net.Interface{\n\t\t\t\t\tIndex: 16,\n\t\t\t\t\tName:  \"en101\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t// Interface information with ifIndex, IPAddr, name and MTU\n\t{\n\t\tproto: iana.ProtocolIPv6ICMP,\n\t\thdr: []byte{\n\t\t\t0x20, 0x00, 0x00, 0x00,\n\t\t},\n\t\tobj: []byte{\n\t\t\t0x00, 0x28, 0x02, 0x0f,\n\t\t\t0x00, 0x00, 0x00, 0x0f,\n\t\t\t0x00, 0x02, 0x00, 0x00,\n\t\t\t0xfe, 0x80, 0x00, 0x00,\n\t\t\t0x00, 0x00, 0x00, 0x00,\n\t\t\t0x00, 0x00, 0x00, 0x00,\n\t\t\t0x00, 0x00, 0x00, 0x01,\n\t\t\t0x08, byte('e'), byte('n'), byte('1'),\n\t\t\tbyte('0'), byte('1'), 0x00, 0x00,\n\t\t\t0x00, 0x00, 0x20, 0x00,\n\t\t},\n\t\texts: []Extension{\n\t\t\t&InterfaceInfo{\n\t\t\t\tClass: classInterfaceInfo,\n\t\t\t\tType:  0x0f,\n\t\t\t\tInterface: &net.Interface{\n\t\t\t\t\tIndex: 15,\n\t\t\t\t\tName:  \"en101\",\n\t\t\t\t\tMTU:   8192,\n\t\t\t\t},\n\t\t\t\tAddr: &net.IPAddr{\n\t\t\t\t\tIP:   net.ParseIP(\"fe80::1\"),\n\t\t\t\t\tZone: \"en101\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestMarshalAndParseExtension(t *testing.T) {\n\tfor i, tt := range marshalAndParseExtensionTests {\n\t\tfor j, ext := range tt.exts {\n\t\t\tvar err error\n\t\t\tvar b []byte\n\t\t\tswitch ext := ext.(type) {\n\t\t\tcase *MPLSLabelStack:\n\t\t\t\tb, err = ext.Marshal(tt.proto)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"#%v/%v: %v\", i, j, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase *InterfaceInfo:\n\t\t\t\tb, err = ext.Marshal(tt.proto)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"#%v/%v: %v\", i, j, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(b, tt.obj) {\n\t\t\t\tt.Errorf(\"#%v/%v: got %#v; want %#v\", i, j, b, tt.obj)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tfor j, wire := range []struct {\n\t\t\tdata     []byte // original datagram\n\t\t\tinlattr  int    // length of padded original datagram, a hint\n\t\t\toutlattr int    // length of padded original datagram, a want\n\t\t\terr      error\n\t\t}{\n\t\t\t{nil, 0, -1, errNoExtension},\n\t\t\t{make([]byte, 127), 128, -1, errNoExtension},\n\n\t\t\t{make([]byte, 128), 127, -1, errNoExtension},\n\t\t\t{make([]byte, 128), 128, -1, errNoExtension},\n\t\t\t{make([]byte, 128), 129, -1, errNoExtension},\n\n\t\t\t{append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 127, 128, nil},\n\t\t\t{append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 128, 128, nil},\n\t\t\t{append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 129, 128, nil},\n\n\t\t\t{append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 511, -1, errNoExtension},\n\t\t\t{append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 512, 512, nil},\n\t\t\t{append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 513, -1, errNoExtension},\n\t\t} {\n\t\t\texts, l, err := parseExtensions(wire.data, wire.inlattr)\n\t\t\tif err != wire.err {\n\t\t\t\tt.Errorf(\"#%v/%v: got %v; want %v\", i, j, err, wire.err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif wire.err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif l != wire.outlattr {\n\t\t\t\tt.Errorf(\"#%v/%v: got %v; want %v\", i, j, l, wire.outlattr)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(exts, tt.exts) {\n\t\t\t\tfor j, ext := range exts {\n\t\t\t\t\tswitch ext := ext.(type) {\n\t\t\t\t\tcase *MPLSLabelStack:\n\t\t\t\t\t\twant := tt.exts[j].(*MPLSLabelStack)\n\t\t\t\t\t\tt.Errorf(\"#%v/%v: got %#v; want %#v\", i, j, ext, want)\n\t\t\t\t\tcase *InterfaceInfo:\n\t\t\t\t\t\twant := tt.exts[j].(*InterfaceInfo)\n\t\t\t\t\t\tt.Errorf(\"#%v/%v: got %#v; want %#v\", i, j, ext, want)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar parseInterfaceNameTests = []struct {\n\tb []byte\n\terror\n}{\n\t{[]byte{0, 'e', 'n', '0'}, errInvalidExtension},\n\t{[]byte{4, 'e', 'n', '0'}, nil},\n\t{[]byte{7, 'e', 'n', '0', 0xff, 0xff, 0xff, 0xff}, errInvalidExtension},\n\t{[]byte{8, 'e', 'n', '0', 0xff, 0xff, 0xff}, errMessageTooShort},\n}\n\nfunc TestParseInterfaceName(t *testing.T) {\n\tifi := InterfaceInfo{Interface: &net.Interface{}}\n\tfor i, tt := range parseInterfaceNameTests {\n\t\tif _, err := ifi.parseName(tt.b); err != tt.error {\n\t\t\tt.Errorf(\"#%d: got %v; want %v\", i, err, tt.error)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/helper_posix.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows\n\npackage icmp\n\nimport (\n\t\"net\"\n\t\"strconv\"\n\t\"syscall\"\n)\n\nfunc sockaddr(family int, address string) (syscall.Sockaddr, error) {\n\tswitch family {\n\tcase syscall.AF_INET:\n\t\ta, err := net.ResolveIPAddr(\"ip4\", address)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(a.IP) == 0 {\n\t\t\ta.IP = net.IPv4zero\n\t\t}\n\t\tif a.IP = a.IP.To4(); a.IP == nil {\n\t\t\treturn nil, net.InvalidAddrError(\"non-ipv4 address\")\n\t\t}\n\t\tsa := &syscall.SockaddrInet4{}\n\t\tcopy(sa.Addr[:], a.IP)\n\t\treturn sa, nil\n\tcase syscall.AF_INET6:\n\t\ta, err := net.ResolveIPAddr(\"ip6\", address)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(a.IP) == 0 {\n\t\t\ta.IP = net.IPv6unspecified\n\t\t}\n\t\tif a.IP.Equal(net.IPv4zero) {\n\t\t\ta.IP = net.IPv6unspecified\n\t\t}\n\t\tif a.IP = a.IP.To16(); a.IP == nil || a.IP.To4() != nil {\n\t\t\treturn nil, net.InvalidAddrError(\"non-ipv6 address\")\n\t\t}\n\t\tsa := &syscall.SockaddrInet6{ZoneId: zoneToUint32(a.Zone)}\n\t\tcopy(sa.Addr[:], a.IP)\n\t\treturn sa, nil\n\tdefault:\n\t\treturn nil, net.InvalidAddrError(\"unexpected family\")\n\t}\n}\n\nfunc zoneToUint32(zone string) uint32 {\n\tif zone == \"\" {\n\t\treturn 0\n\t}\n\tif ifi, err := net.InterfaceByName(zone); err == nil {\n\t\treturn uint32(ifi.Index)\n\t}\n\tn, err := strconv.Atoi(zone)\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn uint32(n)\n}\n\nfunc last(s string, b byte) int {\n\ti := len(s)\n\tfor i--; i >= 0; i-- {\n\t\tif s[i] == b {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn i\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/interface.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp\n\nimport (\n\t\"encoding/binary\"\n\t\"net\"\n\t\"strings\"\n\n\t\"golang.org/x/net/internal/iana\"\n)\n\nconst (\n\tclassInterfaceInfo = 2\n\n\tafiIPv4 = 1\n\tafiIPv6 = 2\n)\n\nconst (\n\tattrMTU = 1 << iota\n\tattrName\n\tattrIPAddr\n\tattrIfIndex\n)\n\n// An InterfaceInfo represents interface and next-hop identification.\ntype InterfaceInfo struct {\n\tClass     int // extension object class number\n\tType      int // extension object sub-type\n\tInterface *net.Interface\n\tAddr      *net.IPAddr\n}\n\nfunc (ifi *InterfaceInfo) nameLen() int {\n\tif len(ifi.Interface.Name) > 63 {\n\t\treturn 64\n\t}\n\tl := 1 + len(ifi.Interface.Name)\n\treturn (l + 3) &^ 3\n}\n\nfunc (ifi *InterfaceInfo) attrsAndLen(proto int) (attrs, l int) {\n\tl = 4\n\tif ifi.Interface != nil && ifi.Interface.Index > 0 {\n\t\tattrs |= attrIfIndex\n\t\tl += 4\n\t\tif len(ifi.Interface.Name) > 0 {\n\t\t\tattrs |= attrName\n\t\t\tl += ifi.nameLen()\n\t\t}\n\t\tif ifi.Interface.MTU > 0 {\n\t\t\tattrs |= attrMTU\n\t\t\tl += 4\n\t\t}\n\t}\n\tif ifi.Addr != nil {\n\t\tswitch proto {\n\t\tcase iana.ProtocolICMP:\n\t\t\tif ifi.Addr.IP.To4() != nil {\n\t\t\t\tattrs |= attrIPAddr\n\t\t\t\tl += 4 + net.IPv4len\n\t\t\t}\n\t\tcase iana.ProtocolIPv6ICMP:\n\t\t\tif ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil {\n\t\t\t\tattrs |= attrIPAddr\n\t\t\t\tl += 4 + net.IPv6len\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n// Len implements the Len method of Extension interface.\nfunc (ifi *InterfaceInfo) Len(proto int) int {\n\t_, l := ifi.attrsAndLen(proto)\n\treturn l\n}\n\n// Marshal implements the Marshal method of Extension interface.\nfunc (ifi *InterfaceInfo) Marshal(proto int) ([]byte, error) {\n\tattrs, l := ifi.attrsAndLen(proto)\n\tb := make([]byte, l)\n\tif err := ifi.marshal(proto, b, attrs, l); err != nil {\n\t\treturn nil, err\n\t}\n\treturn b, nil\n}\n\nfunc (ifi *InterfaceInfo) marshal(proto int, b []byte, attrs, l int) error {\n\tbinary.BigEndian.PutUint16(b[:2], uint16(l))\n\tb[2], b[3] = classInterfaceInfo, byte(ifi.Type)\n\tfor b = b[4:]; len(b) > 0 && attrs != 0; {\n\t\tswitch {\n\t\tcase attrs&attrIfIndex != 0:\n\t\t\tb = ifi.marshalIfIndex(proto, b)\n\t\t\tattrs &^= attrIfIndex\n\t\tcase attrs&attrIPAddr != 0:\n\t\t\tb = ifi.marshalIPAddr(proto, b)\n\t\t\tattrs &^= attrIPAddr\n\t\tcase attrs&attrName != 0:\n\t\t\tb = ifi.marshalName(proto, b)\n\t\t\tattrs &^= attrName\n\t\tcase attrs&attrMTU != 0:\n\t\t\tb = ifi.marshalMTU(proto, b)\n\t\t\tattrs &^= attrMTU\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (ifi *InterfaceInfo) marshalIfIndex(proto int, b []byte) []byte {\n\tbinary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.Index))\n\treturn b[4:]\n}\n\nfunc (ifi *InterfaceInfo) parseIfIndex(b []byte) ([]byte, error) {\n\tif len(b) < 4 {\n\t\treturn nil, errMessageTooShort\n\t}\n\tifi.Interface.Index = int(binary.BigEndian.Uint32(b[:4]))\n\treturn b[4:], nil\n}\n\nfunc (ifi *InterfaceInfo) marshalIPAddr(proto int, b []byte) []byte {\n\tswitch proto {\n\tcase iana.ProtocolICMP:\n\t\tbinary.BigEndian.PutUint16(b[:2], uint16(afiIPv4))\n\t\tcopy(b[4:4+net.IPv4len], ifi.Addr.IP.To4())\n\t\tb = b[4+net.IPv4len:]\n\tcase iana.ProtocolIPv6ICMP:\n\t\tbinary.BigEndian.PutUint16(b[:2], uint16(afiIPv6))\n\t\tcopy(b[4:4+net.IPv6len], ifi.Addr.IP.To16())\n\t\tb = b[4+net.IPv6len:]\n\t}\n\treturn b\n}\n\nfunc (ifi *InterfaceInfo) parseIPAddr(b []byte) ([]byte, error) {\n\tif len(b) < 4 {\n\t\treturn nil, errMessageTooShort\n\t}\n\tafi := int(binary.BigEndian.Uint16(b[:2]))\n\tb = b[4:]\n\tswitch afi {\n\tcase afiIPv4:\n\t\tif len(b) < net.IPv4len {\n\t\t\treturn nil, errMessageTooShort\n\t\t}\n\t\tifi.Addr.IP = make(net.IP, net.IPv4len)\n\t\tcopy(ifi.Addr.IP, b[:net.IPv4len])\n\t\tb = b[net.IPv4len:]\n\tcase afiIPv6:\n\t\tif len(b) < net.IPv6len {\n\t\t\treturn nil, errMessageTooShort\n\t\t}\n\t\tifi.Addr.IP = make(net.IP, net.IPv6len)\n\t\tcopy(ifi.Addr.IP, b[:net.IPv6len])\n\t\tb = b[net.IPv6len:]\n\t}\n\treturn b, nil\n}\n\nfunc (ifi *InterfaceInfo) marshalName(proto int, b []byte) []byte {\n\tl := byte(ifi.nameLen())\n\tb[0] = l\n\tcopy(b[1:], []byte(ifi.Interface.Name))\n\treturn b[l:]\n}\n\nfunc (ifi *InterfaceInfo) parseName(b []byte) ([]byte, error) {\n\tif 4 > len(b) || len(b) < int(b[0]) {\n\t\treturn nil, errMessageTooShort\n\t}\n\tl := int(b[0])\n\tif l%4 != 0 || 4 > l || l > 64 {\n\t\treturn nil, errInvalidExtension\n\t}\n\tvar name [63]byte\n\tcopy(name[:], b[1:l])\n\tifi.Interface.Name = strings.Trim(string(name[:]), \"\\000\")\n\treturn b[l:], nil\n}\n\nfunc (ifi *InterfaceInfo) marshalMTU(proto int, b []byte) []byte {\n\tbinary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.MTU))\n\treturn b[4:]\n}\n\nfunc (ifi *InterfaceInfo) parseMTU(b []byte) ([]byte, error) {\n\tif len(b) < 4 {\n\t\treturn nil, errMessageTooShort\n\t}\n\tifi.Interface.MTU = int(binary.BigEndian.Uint32(b[:4]))\n\treturn b[4:], nil\n}\n\nfunc parseInterfaceInfo(b []byte) (Extension, error) {\n\tifi := &InterfaceInfo{\n\t\tClass: int(b[2]),\n\t\tType:  int(b[3]),\n\t}\n\tif ifi.Type&(attrIfIndex|attrName|attrMTU) != 0 {\n\t\tifi.Interface = &net.Interface{}\n\t}\n\tif ifi.Type&attrIPAddr != 0 {\n\t\tifi.Addr = &net.IPAddr{}\n\t}\n\tattrs := ifi.Type & (attrIfIndex | attrIPAddr | attrName | attrMTU)\n\tfor b = b[4:]; len(b) > 0 && attrs != 0; {\n\t\tvar err error\n\t\tswitch {\n\t\tcase attrs&attrIfIndex != 0:\n\t\t\tb, err = ifi.parseIfIndex(b)\n\t\t\tattrs &^= attrIfIndex\n\t\tcase attrs&attrIPAddr != 0:\n\t\t\tb, err = ifi.parseIPAddr(b)\n\t\t\tattrs &^= attrIPAddr\n\t\tcase attrs&attrName != 0:\n\t\t\tb, err = ifi.parseName(b)\n\t\t\tattrs &^= attrName\n\t\tcase attrs&attrMTU != 0:\n\t\t\tb, err = ifi.parseMTU(b)\n\t\t\tattrs &^= attrMTU\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif ifi.Interface != nil && ifi.Interface.Name != \"\" && ifi.Addr != nil && ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil {\n\t\tifi.Addr.Zone = ifi.Interface.Name\n\t}\n\treturn ifi, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/ipv4.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp\n\nimport (\n\t\"encoding/binary\"\n\t\"net\"\n\t\"runtime\"\n\n\t\"golang.org/x/net/internal/socket\"\n\t\"golang.org/x/net/ipv4\"\n)\n\n// freebsdVersion is set in sys_freebsd.go.\n// See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html.\nvar freebsdVersion uint32\n\n// ParseIPv4Header parses b as an IPv4 header of ICMP error message\n// invoking packet, which is contained in ICMP error message.\nfunc ParseIPv4Header(b []byte) (*ipv4.Header, error) {\n\tif len(b) < ipv4.HeaderLen {\n\t\treturn nil, errHeaderTooShort\n\t}\n\thdrlen := int(b[0]&0x0f) << 2\n\tif hdrlen > len(b) {\n\t\treturn nil, errBufferTooShort\n\t}\n\th := &ipv4.Header{\n\t\tVersion:  int(b[0] >> 4),\n\t\tLen:      hdrlen,\n\t\tTOS:      int(b[1]),\n\t\tID:       int(binary.BigEndian.Uint16(b[4:6])),\n\t\tFragOff:  int(binary.BigEndian.Uint16(b[6:8])),\n\t\tTTL:      int(b[8]),\n\t\tProtocol: int(b[9]),\n\t\tChecksum: int(binary.BigEndian.Uint16(b[10:12])),\n\t\tSrc:      net.IPv4(b[12], b[13], b[14], b[15]),\n\t\tDst:      net.IPv4(b[16], b[17], b[18], b[19]),\n\t}\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\t\th.TotalLen = int(socket.NativeEndian.Uint16(b[2:4]))\n\tcase \"freebsd\":\n\t\tif freebsdVersion >= 1000000 {\n\t\t\th.TotalLen = int(binary.BigEndian.Uint16(b[2:4]))\n\t\t} else {\n\t\t\th.TotalLen = int(socket.NativeEndian.Uint16(b[2:4]))\n\t\t}\n\tdefault:\n\t\th.TotalLen = int(binary.BigEndian.Uint16(b[2:4]))\n\t}\n\th.Flags = ipv4.HeaderFlags(h.FragOff&0xe000) >> 13\n\th.FragOff = h.FragOff & 0x1fff\n\tif hdrlen-ipv4.HeaderLen > 0 {\n\t\th.Options = make([]byte, hdrlen-ipv4.HeaderLen)\n\t\tcopy(h.Options, b[ipv4.HeaderLen:])\n\t}\n\treturn h, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/ipv4_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp\n\nimport (\n\t\"encoding/binary\"\n\t\"net\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/socket\"\n\t\"golang.org/x/net/ipv4\"\n)\n\ntype ipv4HeaderTest struct {\n\twireHeaderFromKernel        [ipv4.HeaderLen]byte\n\twireHeaderFromTradBSDKernel [ipv4.HeaderLen]byte\n\tHeader                      *ipv4.Header\n}\n\nvar ipv4HeaderLittleEndianTest = ipv4HeaderTest{\n\t// TODO(mikio): Add platform dependent wire header formats when\n\t// we support new platforms.\n\twireHeaderFromKernel: [ipv4.HeaderLen]byte{\n\t\t0x45, 0x01, 0xbe, 0xef,\n\t\t0xca, 0xfe, 0x45, 0xdc,\n\t\t0xff, 0x01, 0xde, 0xad,\n\t\t172, 16, 254, 254,\n\t\t192, 168, 0, 1,\n\t},\n\twireHeaderFromTradBSDKernel: [ipv4.HeaderLen]byte{\n\t\t0x45, 0x01, 0xef, 0xbe,\n\t\t0xca, 0xfe, 0x45, 0xdc,\n\t\t0xff, 0x01, 0xde, 0xad,\n\t\t172, 16, 254, 254,\n\t\t192, 168, 0, 1,\n\t},\n\tHeader: &ipv4.Header{\n\t\tVersion:  ipv4.Version,\n\t\tLen:      ipv4.HeaderLen,\n\t\tTOS:      1,\n\t\tTotalLen: 0xbeef,\n\t\tID:       0xcafe,\n\t\tFlags:    ipv4.DontFragment,\n\t\tFragOff:  1500,\n\t\tTTL:      255,\n\t\tProtocol: 1,\n\t\tChecksum: 0xdead,\n\t\tSrc:      net.IPv4(172, 16, 254, 254),\n\t\tDst:      net.IPv4(192, 168, 0, 1),\n\t},\n}\n\nfunc TestParseIPv4Header(t *testing.T) {\n\ttt := &ipv4HeaderLittleEndianTest\n\tif socket.NativeEndian != binary.LittleEndian {\n\t\tt.Skip(\"no test for non-little endian machine yet\")\n\t}\n\n\tvar wh []byte\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\t\twh = tt.wireHeaderFromTradBSDKernel[:]\n\tcase \"freebsd\":\n\t\tif freebsdVersion >= 1000000 {\n\t\t\twh = tt.wireHeaderFromKernel[:]\n\t\t} else {\n\t\t\twh = tt.wireHeaderFromTradBSDKernel[:]\n\t\t}\n\tdefault:\n\t\twh = tt.wireHeaderFromKernel[:]\n\t}\n\th, err := ParseIPv4Header(wh)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !reflect.DeepEqual(h, tt.Header) {\n\t\tt.Fatalf(\"got %#v; want %#v\", h, tt.Header)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/ipv6.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp\n\nimport (\n\t\"net\"\n\n\t\"golang.org/x/net/internal/iana\"\n)\n\nconst ipv6PseudoHeaderLen = 2*net.IPv6len + 8\n\n// IPv6PseudoHeader returns an IPv6 pseudo header for checksum\n// calculation.\nfunc IPv6PseudoHeader(src, dst net.IP) []byte {\n\tb := make([]byte, ipv6PseudoHeaderLen)\n\tcopy(b, src.To16())\n\tcopy(b[net.IPv6len:], dst.To16())\n\tb[len(b)-1] = byte(iana.ProtocolIPv6ICMP)\n\treturn b\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/listen_posix.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows\n\npackage icmp\n\nimport (\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"syscall\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/ipv4\"\n\t\"golang.org/x/net/ipv6\"\n)\n\nconst sysIP_STRIPHDR = 0x17 // for now only darwin supports this option\n\n// ListenPacket listens for incoming ICMP packets addressed to\n// address. See net.Dial for the syntax of address.\n//\n// For non-privileged datagram-oriented ICMP endpoints, network must\n// be \"udp4\" or \"udp6\". The endpoint allows to read, write a few\n// limited ICMP messages such as echo request and echo reply.\n// Currently only Darwin and Linux support this.\n//\n// Examples:\n//\tListenPacket(\"udp4\", \"192.168.0.1\")\n//\tListenPacket(\"udp4\", \"0.0.0.0\")\n//\tListenPacket(\"udp6\", \"fe80::1%en0\")\n//\tListenPacket(\"udp6\", \"::\")\n//\n// For privileged raw ICMP endpoints, network must be \"ip4\" or \"ip6\"\n// followed by a colon and an ICMP protocol number or name.\n//\n// Examples:\n//\tListenPacket(\"ip4:icmp\", \"192.168.0.1\")\n//\tListenPacket(\"ip4:1\", \"0.0.0.0\")\n//\tListenPacket(\"ip6:ipv6-icmp\", \"fe80::1%en0\")\n//\tListenPacket(\"ip6:58\", \"::\")\nfunc ListenPacket(network, address string) (*PacketConn, error) {\n\tvar family, proto int\n\tswitch network {\n\tcase \"udp4\":\n\t\tfamily, proto = syscall.AF_INET, iana.ProtocolICMP\n\tcase \"udp6\":\n\t\tfamily, proto = syscall.AF_INET6, iana.ProtocolIPv6ICMP\n\tdefault:\n\t\ti := last(network, ':')\n\t\tswitch network[:i] {\n\t\tcase \"ip4\":\n\t\t\tproto = iana.ProtocolICMP\n\t\tcase \"ip6\":\n\t\t\tproto = iana.ProtocolIPv6ICMP\n\t\t}\n\t}\n\tvar cerr error\n\tvar c net.PacketConn\n\tswitch family {\n\tcase syscall.AF_INET, syscall.AF_INET6:\n\t\ts, err := syscall.Socket(family, syscall.SOCK_DGRAM, proto)\n\t\tif err != nil {\n\t\t\treturn nil, os.NewSyscallError(\"socket\", err)\n\t\t}\n\t\tif runtime.GOOS == \"darwin\" && family == syscall.AF_INET {\n\t\t\tif err := syscall.SetsockoptInt(s, iana.ProtocolIP, sysIP_STRIPHDR, 1); err != nil {\n\t\t\t\tsyscall.Close(s)\n\t\t\t\treturn nil, os.NewSyscallError(\"setsockopt\", err)\n\t\t\t}\n\t\t}\n\t\tsa, err := sockaddr(family, address)\n\t\tif err != nil {\n\t\t\tsyscall.Close(s)\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := syscall.Bind(s, sa); err != nil {\n\t\t\tsyscall.Close(s)\n\t\t\treturn nil, os.NewSyscallError(\"bind\", err)\n\t\t}\n\t\tf := os.NewFile(uintptr(s), \"datagram-oriented icmp\")\n\t\tc, cerr = net.FilePacketConn(f)\n\t\tf.Close()\n\tdefault:\n\t\tc, cerr = net.ListenPacket(network, address)\n\t}\n\tif cerr != nil {\n\t\treturn nil, cerr\n\t}\n\tswitch proto {\n\tcase iana.ProtocolICMP:\n\t\treturn &PacketConn{c: c, p4: ipv4.NewPacketConn(c)}, nil\n\tcase iana.ProtocolIPv6ICMP:\n\t\treturn &PacketConn{c: c, p6: ipv6.NewPacketConn(c)}, nil\n\tdefault:\n\t\treturn &PacketConn{c: c}, nil\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/listen_stub.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build nacl plan9\n\npackage icmp\n\n// ListenPacket listens for incoming ICMP packets addressed to\n// address. See net.Dial for the syntax of address.\n//\n// For non-privileged datagram-oriented ICMP endpoints, network must\n// be \"udp4\" or \"udp6\". The endpoint allows to read, write a few\n// limited ICMP messages such as echo request and echo reply.\n// Currently only Darwin and Linux support this.\n//\n// Examples:\n//\tListenPacket(\"udp4\", \"192.168.0.1\")\n//\tListenPacket(\"udp4\", \"0.0.0.0\")\n//\tListenPacket(\"udp6\", \"fe80::1%en0\")\n//\tListenPacket(\"udp6\", \"::\")\n//\n// For privileged raw ICMP endpoints, network must be \"ip4\" or \"ip6\"\n// followed by a colon and an ICMP protocol number or name.\n//\n// Examples:\n//\tListenPacket(\"ip4:icmp\", \"192.168.0.1\")\n//\tListenPacket(\"ip4:1\", \"0.0.0.0\")\n//\tListenPacket(\"ip6:ipv6-icmp\", \"fe80::1%en0\")\n//\tListenPacket(\"ip6:58\", \"::\")\nfunc ListenPacket(network, address string) (*PacketConn, error) {\n\treturn nil, errOpNoSupport\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/message.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package icmp provides basic functions for the manipulation of\n// messages used in the Internet Control Message Protocols,\n// ICMPv4 and ICMPv6.\n//\n// ICMPv4 and ICMPv6 are defined in RFC 792 and RFC 4443.\n// Multi-part message support for ICMP is defined in RFC 4884.\n// ICMP extensions for MPLS are defined in RFC 4950.\n// ICMP extensions for interface and next-hop identification are\n// defined in RFC 5837.\npackage icmp // import \"golang.org/x/net/icmp\"\n\nimport (\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"net\"\n\t\"syscall\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/ipv4\"\n\t\"golang.org/x/net/ipv6\"\n)\n\n// BUG(mikio): This package is not implemented on NaCl and Plan 9.\n\nvar (\n\terrMessageTooShort  = errors.New(\"message too short\")\n\terrHeaderTooShort   = errors.New(\"header too short\")\n\terrBufferTooShort   = errors.New(\"buffer too short\")\n\terrOpNoSupport      = errors.New(\"operation not supported\")\n\terrNoExtension      = errors.New(\"no extension\")\n\terrInvalidExtension = errors.New(\"invalid extension\")\n)\n\nfunc checksum(b []byte) uint16 {\n\tcsumcv := len(b) - 1 // checksum coverage\n\ts := uint32(0)\n\tfor i := 0; i < csumcv; i += 2 {\n\t\ts += uint32(b[i+1])<<8 | uint32(b[i])\n\t}\n\tif csumcv&1 == 0 {\n\t\ts += uint32(b[csumcv])\n\t}\n\ts = s>>16 + s&0xffff\n\ts = s + s>>16\n\treturn ^uint16(s)\n}\n\n// A Type represents an ICMP message type.\ntype Type interface {\n\tProtocol() int\n}\n\n// A Message represents an ICMP message.\ntype Message struct {\n\tType     Type        // type, either ipv4.ICMPType or ipv6.ICMPType\n\tCode     int         // code\n\tChecksum int         // checksum\n\tBody     MessageBody // body\n}\n\n// Marshal returns the binary encoding of the ICMP message m.\n//\n// For an ICMPv4 message, the returned message always contains the\n// calculated checksum field.\n//\n// For an ICMPv6 message, the returned message contains the calculated\n// checksum field when psh is not nil, otherwise the kernel will\n// compute the checksum field during the message transmission.\n// When psh is not nil, it must be the pseudo header for IPv6.\nfunc (m *Message) Marshal(psh []byte) ([]byte, error) {\n\tvar mtype int\n\tswitch typ := m.Type.(type) {\n\tcase ipv4.ICMPType:\n\t\tmtype = int(typ)\n\tcase ipv6.ICMPType:\n\t\tmtype = int(typ)\n\tdefault:\n\t\treturn nil, syscall.EINVAL\n\t}\n\tb := []byte{byte(mtype), byte(m.Code), 0, 0}\n\tif m.Type.Protocol() == iana.ProtocolIPv6ICMP && psh != nil {\n\t\tb = append(psh, b...)\n\t}\n\tif m.Body != nil && m.Body.Len(m.Type.Protocol()) != 0 {\n\t\tmb, err := m.Body.Marshal(m.Type.Protocol())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tb = append(b, mb...)\n\t}\n\tif m.Type.Protocol() == iana.ProtocolIPv6ICMP {\n\t\tif psh == nil { // cannot calculate checksum here\n\t\t\treturn b, nil\n\t\t}\n\t\toff, l := 2*net.IPv6len, len(b)-len(psh)\n\t\tbinary.BigEndian.PutUint32(b[off:off+4], uint32(l))\n\t}\n\ts := checksum(b)\n\t// Place checksum back in header; using ^= avoids the\n\t// assumption the checksum bytes are zero.\n\tb[len(psh)+2] ^= byte(s)\n\tb[len(psh)+3] ^= byte(s >> 8)\n\treturn b[len(psh):], nil\n}\n\nvar parseFns = map[Type]func(int, []byte) (MessageBody, error){\n\tipv4.ICMPTypeDestinationUnreachable: parseDstUnreach,\n\tipv4.ICMPTypeTimeExceeded:           parseTimeExceeded,\n\tipv4.ICMPTypeParameterProblem:       parseParamProb,\n\n\tipv4.ICMPTypeEcho:      parseEcho,\n\tipv4.ICMPTypeEchoReply: parseEcho,\n\n\tipv6.ICMPTypeDestinationUnreachable: parseDstUnreach,\n\tipv6.ICMPTypePacketTooBig:           parsePacketTooBig,\n\tipv6.ICMPTypeTimeExceeded:           parseTimeExceeded,\n\tipv6.ICMPTypeParameterProblem:       parseParamProb,\n\n\tipv6.ICMPTypeEchoRequest: parseEcho,\n\tipv6.ICMPTypeEchoReply:   parseEcho,\n}\n\n// ParseMessage parses b as an ICMP message.\n// Proto must be either the ICMPv4 or ICMPv6 protocol number.\nfunc ParseMessage(proto int, b []byte) (*Message, error) {\n\tif len(b) < 4 {\n\t\treturn nil, errMessageTooShort\n\t}\n\tvar err error\n\tm := &Message{Code: int(b[1]), Checksum: int(binary.BigEndian.Uint16(b[2:4]))}\n\tswitch proto {\n\tcase iana.ProtocolICMP:\n\t\tm.Type = ipv4.ICMPType(b[0])\n\tcase iana.ProtocolIPv6ICMP:\n\t\tm.Type = ipv6.ICMPType(b[0])\n\tdefault:\n\t\treturn nil, syscall.EINVAL\n\t}\n\tif fn, ok := parseFns[m.Type]; !ok {\n\t\tm.Body, err = parseDefaultMessageBody(proto, b[4:])\n\t} else {\n\t\tm.Body, err = fn(proto, b[4:])\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/message_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp_test\n\nimport (\n\t\"net\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"golang.org/x/net/icmp\"\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/ipv4\"\n\t\"golang.org/x/net/ipv6\"\n)\n\nvar marshalAndParseMessageForIPv4Tests = []icmp.Message{\n\t{\n\t\tType: ipv4.ICMPTypeDestinationUnreachable, Code: 15,\n\t\tBody: &icmp.DstUnreach{\n\t\t\tData: []byte(\"ERROR-INVOKING-PACKET\"),\n\t\t},\n\t},\n\t{\n\t\tType: ipv4.ICMPTypeTimeExceeded, Code: 1,\n\t\tBody: &icmp.TimeExceeded{\n\t\t\tData: []byte(\"ERROR-INVOKING-PACKET\"),\n\t\t},\n\t},\n\t{\n\t\tType: ipv4.ICMPTypeParameterProblem, Code: 2,\n\t\tBody: &icmp.ParamProb{\n\t\t\tPointer: 8,\n\t\t\tData:    []byte(\"ERROR-INVOKING-PACKET\"),\n\t\t},\n\t},\n\t{\n\t\tType: ipv4.ICMPTypeEcho, Code: 0,\n\t\tBody: &icmp.Echo{\n\t\t\tID: 1, Seq: 2,\n\t\t\tData: []byte(\"HELLO-R-U-THERE\"),\n\t\t},\n\t},\n\t{\n\t\tType: ipv4.ICMPTypePhoturis,\n\t\tBody: &icmp.DefaultMessageBody{\n\t\t\tData: []byte{0x80, 0x40, 0x20, 0x10},\n\t\t},\n\t},\n}\n\nfunc TestMarshalAndParseMessageForIPv4(t *testing.T) {\n\tfor i, tt := range marshalAndParseMessageForIPv4Tests {\n\t\tb, err := tt.Marshal(nil)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tm, err := icmp.ParseMessage(iana.ProtocolICMP, b)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif m.Type != tt.Type || m.Code != tt.Code {\n\t\t\tt.Errorf(\"#%v: got %v; want %v\", i, m, &tt)\n\t\t}\n\t\tif !reflect.DeepEqual(m.Body, tt.Body) {\n\t\t\tt.Errorf(\"#%v: got %v; want %v\", i, m.Body, tt.Body)\n\t\t}\n\t}\n}\n\nvar marshalAndParseMessageForIPv6Tests = []icmp.Message{\n\t{\n\t\tType: ipv6.ICMPTypeDestinationUnreachable, Code: 6,\n\t\tBody: &icmp.DstUnreach{\n\t\t\tData: []byte(\"ERROR-INVOKING-PACKET\"),\n\t\t},\n\t},\n\t{\n\t\tType: ipv6.ICMPTypePacketTooBig, Code: 0,\n\t\tBody: &icmp.PacketTooBig{\n\t\t\tMTU:  1<<16 - 1,\n\t\t\tData: []byte(\"ERROR-INVOKING-PACKET\"),\n\t\t},\n\t},\n\t{\n\t\tType: ipv6.ICMPTypeTimeExceeded, Code: 1,\n\t\tBody: &icmp.TimeExceeded{\n\t\t\tData: []byte(\"ERROR-INVOKING-PACKET\"),\n\t\t},\n\t},\n\t{\n\t\tType: ipv6.ICMPTypeParameterProblem, Code: 2,\n\t\tBody: &icmp.ParamProb{\n\t\t\tPointer: 8,\n\t\t\tData:    []byte(\"ERROR-INVOKING-PACKET\"),\n\t\t},\n\t},\n\t{\n\t\tType: ipv6.ICMPTypeEchoRequest, Code: 0,\n\t\tBody: &icmp.Echo{\n\t\t\tID: 1, Seq: 2,\n\t\t\tData: []byte(\"HELLO-R-U-THERE\"),\n\t\t},\n\t},\n\t{\n\t\tType: ipv6.ICMPTypeDuplicateAddressConfirmation,\n\t\tBody: &icmp.DefaultMessageBody{\n\t\t\tData: []byte{0x80, 0x40, 0x20, 0x10},\n\t\t},\n\t},\n}\n\nfunc TestMarshalAndParseMessageForIPv6(t *testing.T) {\n\tpshicmp := icmp.IPv6PseudoHeader(net.ParseIP(\"fe80::1\"), net.ParseIP(\"ff02::1\"))\n\tfor i, tt := range marshalAndParseMessageForIPv6Tests {\n\t\tfor _, psh := range [][]byte{pshicmp, nil} {\n\t\t\tb, err := tt.Marshal(psh)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tm, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, b)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif m.Type != tt.Type || m.Code != tt.Code {\n\t\t\t\tt.Errorf(\"#%v: got %v; want %v\", i, m, &tt)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(m.Body, tt.Body) {\n\t\t\t\tt.Errorf(\"#%v: got %v; want %v\", i, m.Body, tt.Body)\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/messagebody.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp\n\n// A MessageBody represents an ICMP message body.\ntype MessageBody interface {\n\t// Len returns the length of ICMP message body.\n\t// Proto must be either the ICMPv4 or ICMPv6 protocol number.\n\tLen(proto int) int\n\n\t// Marshal returns the binary encoding of ICMP message body.\n\t// Proto must be either the ICMPv4 or ICMPv6 protocol number.\n\tMarshal(proto int) ([]byte, error)\n}\n\n// A DefaultMessageBody represents the default message body.\ntype DefaultMessageBody struct {\n\tData []byte // data\n}\n\n// Len implements the Len method of MessageBody interface.\nfunc (p *DefaultMessageBody) Len(proto int) int {\n\tif p == nil {\n\t\treturn 0\n\t}\n\treturn len(p.Data)\n}\n\n// Marshal implements the Marshal method of MessageBody interface.\nfunc (p *DefaultMessageBody) Marshal(proto int) ([]byte, error) {\n\treturn p.Data, nil\n}\n\n// parseDefaultMessageBody parses b as an ICMP message body.\nfunc parseDefaultMessageBody(proto int, b []byte) (MessageBody, error) {\n\tp := &DefaultMessageBody{Data: make([]byte, len(b))}\n\tcopy(p.Data, b)\n\treturn p, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/mpls.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp\n\nimport \"encoding/binary\"\n\n// A MPLSLabel represents a MPLS label stack entry.\ntype MPLSLabel struct {\n\tLabel int  // label value\n\tTC    int  // traffic class; formerly experimental use\n\tS     bool // bottom of stack\n\tTTL   int  // time to live\n}\n\nconst (\n\tclassMPLSLabelStack        = 1\n\ttypeIncomingMPLSLabelStack = 1\n)\n\n// A MPLSLabelStack represents a MPLS label stack.\ntype MPLSLabelStack struct {\n\tClass  int // extension object class number\n\tType   int // extension object sub-type\n\tLabels []MPLSLabel\n}\n\n// Len implements the Len method of Extension interface.\nfunc (ls *MPLSLabelStack) Len(proto int) int {\n\treturn 4 + (4 * len(ls.Labels))\n}\n\n// Marshal implements the Marshal method of Extension interface.\nfunc (ls *MPLSLabelStack) Marshal(proto int) ([]byte, error) {\n\tb := make([]byte, ls.Len(proto))\n\tif err := ls.marshal(proto, b); err != nil {\n\t\treturn nil, err\n\t}\n\treturn b, nil\n}\n\nfunc (ls *MPLSLabelStack) marshal(proto int, b []byte) error {\n\tl := ls.Len(proto)\n\tbinary.BigEndian.PutUint16(b[:2], uint16(l))\n\tb[2], b[3] = classMPLSLabelStack, typeIncomingMPLSLabelStack\n\toff := 4\n\tfor _, ll := range ls.Labels {\n\t\tb[off], b[off+1], b[off+2] = byte(ll.Label>>12), byte(ll.Label>>4&0xff), byte(ll.Label<<4&0xf0)\n\t\tb[off+2] |= byte(ll.TC << 1 & 0x0e)\n\t\tif ll.S {\n\t\t\tb[off+2] |= 0x1\n\t\t}\n\t\tb[off+3] = byte(ll.TTL)\n\t\toff += 4\n\t}\n\treturn nil\n}\n\nfunc parseMPLSLabelStack(b []byte) (Extension, error) {\n\tls := &MPLSLabelStack{\n\t\tClass: int(b[2]),\n\t\tType:  int(b[3]),\n\t}\n\tfor b = b[4:]; len(b) >= 4; b = b[4:] {\n\t\tll := MPLSLabel{\n\t\t\tLabel: int(b[0])<<12 | int(b[1])<<4 | int(b[2])>>4,\n\t\t\tTC:    int(b[2]&0x0e) >> 1,\n\t\t\tTTL:   int(b[3]),\n\t\t}\n\t\tif b[2]&0x1 != 0 {\n\t\t\tll.S = true\n\t\t}\n\t\tls.Labels = append(ls.Labels, ll)\n\t}\n\treturn ls, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/multipart.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp\n\nimport \"golang.org/x/net/internal/iana\"\n\n// multipartMessageBodyDataLen takes b as an original datagram and\n// exts as extensions, and returns a required length for message body\n// and a required length for a padded original datagram in wire\n// format.\nfunc multipartMessageBodyDataLen(proto int, b []byte, exts []Extension) (bodyLen, dataLen int) {\n\tfor _, ext := range exts {\n\t\tbodyLen += ext.Len(proto)\n\t}\n\tif bodyLen > 0 {\n\t\tdataLen = multipartMessageOrigDatagramLen(proto, b)\n\t\tbodyLen += 4 // length of extension header\n\t} else {\n\t\tdataLen = len(b)\n\t}\n\tbodyLen += dataLen\n\treturn bodyLen, dataLen\n}\n\n// multipartMessageOrigDatagramLen takes b as an original datagram,\n// and returns a required length for a padded orignal datagram in wire\n// format.\nfunc multipartMessageOrigDatagramLen(proto int, b []byte) int {\n\troundup := func(b []byte, align int) int {\n\t\t// According to RFC 4884, the padded original datagram\n\t\t// field must contain at least 128 octets.\n\t\tif len(b) < 128 {\n\t\t\treturn 128\n\t\t}\n\t\tr := len(b)\n\t\treturn (r + align - 1) & ^(align - 1)\n\t}\n\tswitch proto {\n\tcase iana.ProtocolICMP:\n\t\treturn roundup(b, 4)\n\tcase iana.ProtocolIPv6ICMP:\n\t\treturn roundup(b, 8)\n\tdefault:\n\t\treturn len(b)\n\t}\n}\n\n// marshalMultipartMessageBody takes data as an original datagram and\n// exts as extesnsions, and returns a binary encoding of message body.\n// It can be used for non-multipart message bodies when exts is nil.\nfunc marshalMultipartMessageBody(proto int, data []byte, exts []Extension) ([]byte, error) {\n\tbodyLen, dataLen := multipartMessageBodyDataLen(proto, data, exts)\n\tb := make([]byte, 4+bodyLen)\n\tcopy(b[4:], data)\n\toff := dataLen + 4\n\tif len(exts) > 0 {\n\t\tb[dataLen+4] = byte(extensionVersion << 4)\n\t\toff += 4 // length of object header\n\t\tfor _, ext := range exts {\n\t\t\tswitch ext := ext.(type) {\n\t\t\tcase *MPLSLabelStack:\n\t\t\t\tif err := ext.marshal(proto, b[off:]); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\toff += ext.Len(proto)\n\t\t\tcase *InterfaceInfo:\n\t\t\t\tattrs, l := ext.attrsAndLen(proto)\n\t\t\t\tif err := ext.marshal(proto, b[off:], attrs, l); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\toff += ext.Len(proto)\n\t\t\t}\n\t\t}\n\t\ts := checksum(b[dataLen+4:])\n\t\tb[dataLen+4+2] ^= byte(s)\n\t\tb[dataLen+4+3] ^= byte(s >> 8)\n\t\tswitch proto {\n\t\tcase iana.ProtocolICMP:\n\t\t\tb[1] = byte(dataLen / 4)\n\t\tcase iana.ProtocolIPv6ICMP:\n\t\t\tb[0] = byte(dataLen / 8)\n\t\t}\n\t}\n\treturn b, nil\n}\n\n// parseMultipartMessageBody parses b as either a non-multipart\n// message body or a multipart message body.\nfunc parseMultipartMessageBody(proto int, b []byte) ([]byte, []Extension, error) {\n\tvar l int\n\tswitch proto {\n\tcase iana.ProtocolICMP:\n\t\tl = 4 * int(b[1])\n\tcase iana.ProtocolIPv6ICMP:\n\t\tl = 8 * int(b[0])\n\t}\n\tif len(b) == 4 {\n\t\treturn nil, nil, nil\n\t}\n\texts, l, err := parseExtensions(b[4:], l)\n\tif err != nil {\n\t\tl = len(b) - 4\n\t}\n\tdata := make([]byte, l)\n\tcopy(data, b[4:])\n\treturn data, exts, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/multipart_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp_test\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"golang.org/x/net/icmp\"\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/ipv4\"\n\t\"golang.org/x/net/ipv6\"\n)\n\nvar marshalAndParseMultipartMessageForIPv4Tests = []icmp.Message{\n\t{\n\t\tType: ipv4.ICMPTypeDestinationUnreachable, Code: 15,\n\t\tBody: &icmp.DstUnreach{\n\t\t\tData: []byte(\"ERROR-INVOKING-PACKET\"),\n\t\t\tExtensions: []icmp.Extension{\n\t\t\t\t&icmp.MPLSLabelStack{\n\t\t\t\t\tClass: 1,\n\t\t\t\t\tType:  1,\n\t\t\t\t\tLabels: []icmp.MPLSLabel{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tLabel: 16014,\n\t\t\t\t\t\t\tTC:    0x4,\n\t\t\t\t\t\t\tS:     true,\n\t\t\t\t\t\t\tTTL:   255,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&icmp.InterfaceInfo{\n\t\t\t\t\tClass: 2,\n\t\t\t\t\tType:  0x0f,\n\t\t\t\t\tInterface: &net.Interface{\n\t\t\t\t\t\tIndex: 15,\n\t\t\t\t\t\tName:  \"en101\",\n\t\t\t\t\t\tMTU:   8192,\n\t\t\t\t\t},\n\t\t\t\t\tAddr: &net.IPAddr{\n\t\t\t\t\t\tIP: net.IPv4(192, 168, 0, 1).To4(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tType: ipv4.ICMPTypeTimeExceeded, Code: 1,\n\t\tBody: &icmp.TimeExceeded{\n\t\t\tData: []byte(\"ERROR-INVOKING-PACKET\"),\n\t\t\tExtensions: []icmp.Extension{\n\t\t\t\t&icmp.InterfaceInfo{\n\t\t\t\t\tClass: 2,\n\t\t\t\t\tType:  0x0f,\n\t\t\t\t\tInterface: &net.Interface{\n\t\t\t\t\t\tIndex: 15,\n\t\t\t\t\t\tName:  \"en101\",\n\t\t\t\t\t\tMTU:   8192,\n\t\t\t\t\t},\n\t\t\t\t\tAddr: &net.IPAddr{\n\t\t\t\t\t\tIP: net.IPv4(192, 168, 0, 1).To4(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&icmp.MPLSLabelStack{\n\t\t\t\t\tClass: 1,\n\t\t\t\t\tType:  1,\n\t\t\t\t\tLabels: []icmp.MPLSLabel{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tLabel: 16014,\n\t\t\t\t\t\t\tTC:    0x4,\n\t\t\t\t\t\t\tS:     true,\n\t\t\t\t\t\t\tTTL:   255,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tType: ipv4.ICMPTypeParameterProblem, Code: 2,\n\t\tBody: &icmp.ParamProb{\n\t\t\tPointer: 8,\n\t\t\tData:    []byte(\"ERROR-INVOKING-PACKET\"),\n\t\t\tExtensions: []icmp.Extension{\n\t\t\t\t&icmp.MPLSLabelStack{\n\t\t\t\t\tClass: 1,\n\t\t\t\t\tType:  1,\n\t\t\t\t\tLabels: []icmp.MPLSLabel{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tLabel: 16014,\n\t\t\t\t\t\t\tTC:    0x4,\n\t\t\t\t\t\t\tS:     true,\n\t\t\t\t\t\t\tTTL:   255,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&icmp.InterfaceInfo{\n\t\t\t\t\tClass: 2,\n\t\t\t\t\tType:  0x0f,\n\t\t\t\t\tInterface: &net.Interface{\n\t\t\t\t\t\tIndex: 15,\n\t\t\t\t\t\tName:  \"en101\",\n\t\t\t\t\t\tMTU:   8192,\n\t\t\t\t\t},\n\t\t\t\t\tAddr: &net.IPAddr{\n\t\t\t\t\t\tIP: net.IPv4(192, 168, 0, 1).To4(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&icmp.InterfaceInfo{\n\t\t\t\t\tClass: 2,\n\t\t\t\t\tType:  0x2f,\n\t\t\t\t\tInterface: &net.Interface{\n\t\t\t\t\t\tIndex: 16,\n\t\t\t\t\t\tName:  \"en102\",\n\t\t\t\t\t\tMTU:   8192,\n\t\t\t\t\t},\n\t\t\t\t\tAddr: &net.IPAddr{\n\t\t\t\t\t\tIP: net.IPv4(192, 168, 0, 2).To4(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestMarshalAndParseMultipartMessageForIPv4(t *testing.T) {\n\tfor i, tt := range marshalAndParseMultipartMessageForIPv4Tests {\n\t\tb, err := tt.Marshal(nil)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif b[5] != 32 {\n\t\t\tt.Errorf(\"#%v: got %v; want 32\", i, b[5])\n\t\t}\n\t\tm, err := icmp.ParseMessage(iana.ProtocolICMP, b)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif m.Type != tt.Type || m.Code != tt.Code {\n\t\t\tt.Errorf(\"#%v: got %v; want %v\", i, m, &tt)\n\t\t}\n\t\tswitch m.Type {\n\t\tcase ipv4.ICMPTypeDestinationUnreachable:\n\t\t\tgot, want := m.Body.(*icmp.DstUnreach), tt.Body.(*icmp.DstUnreach)\n\t\t\tif !reflect.DeepEqual(got.Extensions, want.Extensions) {\n\t\t\t\tt.Error(dumpExtensions(i, got.Extensions, want.Extensions))\n\t\t\t}\n\t\t\tif len(got.Data) != 128 {\n\t\t\t\tt.Errorf(\"#%v: got %v; want 128\", i, len(got.Data))\n\t\t\t}\n\t\tcase ipv4.ICMPTypeTimeExceeded:\n\t\t\tgot, want := m.Body.(*icmp.TimeExceeded), tt.Body.(*icmp.TimeExceeded)\n\t\t\tif !reflect.DeepEqual(got.Extensions, want.Extensions) {\n\t\t\t\tt.Error(dumpExtensions(i, got.Extensions, want.Extensions))\n\t\t\t}\n\t\t\tif len(got.Data) != 128 {\n\t\t\t\tt.Errorf(\"#%v: got %v; want 128\", i, len(got.Data))\n\t\t\t}\n\t\tcase ipv4.ICMPTypeParameterProblem:\n\t\t\tgot, want := m.Body.(*icmp.ParamProb), tt.Body.(*icmp.ParamProb)\n\t\t\tif !reflect.DeepEqual(got.Extensions, want.Extensions) {\n\t\t\t\tt.Error(dumpExtensions(i, got.Extensions, want.Extensions))\n\t\t\t}\n\t\t\tif len(got.Data) != 128 {\n\t\t\t\tt.Errorf(\"#%v: got %v; want 128\", i, len(got.Data))\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar marshalAndParseMultipartMessageForIPv6Tests = []icmp.Message{\n\t{\n\t\tType: ipv6.ICMPTypeDestinationUnreachable, Code: 6,\n\t\tBody: &icmp.DstUnreach{\n\t\t\tData: []byte(\"ERROR-INVOKING-PACKET\"),\n\t\t\tExtensions: []icmp.Extension{\n\t\t\t\t&icmp.MPLSLabelStack{\n\t\t\t\t\tClass: 1,\n\t\t\t\t\tType:  1,\n\t\t\t\t\tLabels: []icmp.MPLSLabel{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tLabel: 16014,\n\t\t\t\t\t\t\tTC:    0x4,\n\t\t\t\t\t\t\tS:     true,\n\t\t\t\t\t\t\tTTL:   255,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&icmp.InterfaceInfo{\n\t\t\t\t\tClass: 2,\n\t\t\t\t\tType:  0x0f,\n\t\t\t\t\tInterface: &net.Interface{\n\t\t\t\t\t\tIndex: 15,\n\t\t\t\t\t\tName:  \"en101\",\n\t\t\t\t\t\tMTU:   8192,\n\t\t\t\t\t},\n\t\t\t\t\tAddr: &net.IPAddr{\n\t\t\t\t\t\tIP:   net.ParseIP(\"fe80::1\"),\n\t\t\t\t\t\tZone: \"en101\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tType: ipv6.ICMPTypeTimeExceeded, Code: 1,\n\t\tBody: &icmp.TimeExceeded{\n\t\t\tData: []byte(\"ERROR-INVOKING-PACKET\"),\n\t\t\tExtensions: []icmp.Extension{\n\t\t\t\t&icmp.InterfaceInfo{\n\t\t\t\t\tClass: 2,\n\t\t\t\t\tType:  0x0f,\n\t\t\t\t\tInterface: &net.Interface{\n\t\t\t\t\t\tIndex: 15,\n\t\t\t\t\t\tName:  \"en101\",\n\t\t\t\t\t\tMTU:   8192,\n\t\t\t\t\t},\n\t\t\t\t\tAddr: &net.IPAddr{\n\t\t\t\t\t\tIP:   net.ParseIP(\"fe80::1\"),\n\t\t\t\t\t\tZone: \"en101\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&icmp.MPLSLabelStack{\n\t\t\t\t\tClass: 1,\n\t\t\t\t\tType:  1,\n\t\t\t\t\tLabels: []icmp.MPLSLabel{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tLabel: 16014,\n\t\t\t\t\t\t\tTC:    0x4,\n\t\t\t\t\t\t\tS:     true,\n\t\t\t\t\t\t\tTTL:   255,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&icmp.InterfaceInfo{\n\t\t\t\t\tClass: 2,\n\t\t\t\t\tType:  0x2f,\n\t\t\t\t\tInterface: &net.Interface{\n\t\t\t\t\t\tIndex: 16,\n\t\t\t\t\t\tName:  \"en102\",\n\t\t\t\t\t\tMTU:   8192,\n\t\t\t\t\t},\n\t\t\t\t\tAddr: &net.IPAddr{\n\t\t\t\t\t\tIP:   net.ParseIP(\"fe80::1\"),\n\t\t\t\t\t\tZone: \"en102\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestMarshalAndParseMultipartMessageForIPv6(t *testing.T) {\n\tpshicmp := icmp.IPv6PseudoHeader(net.ParseIP(\"fe80::1\"), net.ParseIP(\"ff02::1\"))\n\tfor i, tt := range marshalAndParseMultipartMessageForIPv6Tests {\n\t\tfor _, psh := range [][]byte{pshicmp, nil} {\n\t\t\tb, err := tt.Marshal(psh)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif b[4] != 16 {\n\t\t\t\tt.Errorf(\"#%v: got %v; want 16\", i, b[4])\n\t\t\t}\n\t\t\tm, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, b)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif m.Type != tt.Type || m.Code != tt.Code {\n\t\t\t\tt.Errorf(\"#%v: got %v; want %v\", i, m, &tt)\n\t\t\t}\n\t\t\tswitch m.Type {\n\t\t\tcase ipv6.ICMPTypeDestinationUnreachable:\n\t\t\t\tgot, want := m.Body.(*icmp.DstUnreach), tt.Body.(*icmp.DstUnreach)\n\t\t\t\tif !reflect.DeepEqual(got.Extensions, want.Extensions) {\n\t\t\t\t\tt.Error(dumpExtensions(i, got.Extensions, want.Extensions))\n\t\t\t\t}\n\t\t\t\tif len(got.Data) != 128 {\n\t\t\t\t\tt.Errorf(\"#%v: got %v; want 128\", i, len(got.Data))\n\t\t\t\t}\n\t\t\tcase ipv6.ICMPTypeTimeExceeded:\n\t\t\t\tgot, want := m.Body.(*icmp.TimeExceeded), tt.Body.(*icmp.TimeExceeded)\n\t\t\t\tif !reflect.DeepEqual(got.Extensions, want.Extensions) {\n\t\t\t\t\tt.Error(dumpExtensions(i, got.Extensions, want.Extensions))\n\t\t\t\t}\n\t\t\t\tif len(got.Data) != 128 {\n\t\t\t\t\tt.Errorf(\"#%v: got %v; want 128\", i, len(got.Data))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc dumpExtensions(i int, gotExts, wantExts []icmp.Extension) string {\n\tvar s string\n\tfor j, got := range gotExts {\n\t\tswitch got := got.(type) {\n\t\tcase *icmp.MPLSLabelStack:\n\t\t\twant := wantExts[j].(*icmp.MPLSLabelStack)\n\t\t\tif !reflect.DeepEqual(got, want) {\n\t\t\t\ts += fmt.Sprintf(\"#%v/%v: got %#v; want %#v\\n\", i, j, got, want)\n\t\t\t}\n\t\tcase *icmp.InterfaceInfo:\n\t\t\twant := wantExts[j].(*icmp.InterfaceInfo)\n\t\t\tif !reflect.DeepEqual(got, want) {\n\t\t\t\ts += fmt.Sprintf(\"#%v/%v: got %#v, %#v, %#v; want %#v, %#v, %#v\\n\", i, j, got, got.Interface, got.Addr, want, want.Interface, want.Addr)\n\t\t\t}\n\t\t}\n\t}\n\treturn s[:len(s)-1]\n}\n\nvar multipartMessageBodyLenTests = []struct {\n\tproto int\n\tin    icmp.MessageBody\n\tout   int\n}{\n\t{\n\t\tiana.ProtocolICMP,\n\t\t&icmp.DstUnreach{\n\t\t\tData: make([]byte, ipv4.HeaderLen),\n\t\t},\n\t\t4 + ipv4.HeaderLen, // unused and original datagram\n\t},\n\t{\n\t\tiana.ProtocolICMP,\n\t\t&icmp.TimeExceeded{\n\t\t\tData: make([]byte, ipv4.HeaderLen),\n\t\t},\n\t\t4 + ipv4.HeaderLen, // unused and original datagram\n\t},\n\t{\n\t\tiana.ProtocolICMP,\n\t\t&icmp.ParamProb{\n\t\t\tData: make([]byte, ipv4.HeaderLen),\n\t\t},\n\t\t4 + ipv4.HeaderLen, // [pointer, unused] and original datagram\n\t},\n\n\t{\n\t\tiana.ProtocolICMP,\n\t\t&icmp.ParamProb{\n\t\t\tData: make([]byte, ipv4.HeaderLen),\n\t\t\tExtensions: []icmp.Extension{\n\t\t\t\t&icmp.MPLSLabelStack{},\n\t\t\t},\n\t\t},\n\t\t4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload, original datagram\n\t},\n\t{\n\t\tiana.ProtocolICMP,\n\t\t&icmp.ParamProb{\n\t\t\tData: make([]byte, 128),\n\t\t\tExtensions: []icmp.Extension{\n\t\t\t\t&icmp.MPLSLabelStack{},\n\t\t\t},\n\t\t},\n\t\t4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload and original datagram\n\t},\n\t{\n\t\tiana.ProtocolICMP,\n\t\t&icmp.ParamProb{\n\t\t\tData: make([]byte, 129),\n\t\t\tExtensions: []icmp.Extension{\n\t\t\t\t&icmp.MPLSLabelStack{},\n\t\t\t},\n\t\t},\n\t\t4 + 4 + 4 + 0 + 132, // [pointer, length, unused], extension header, object header, object payload and original datagram\n\t},\n\n\t{\n\t\tiana.ProtocolIPv6ICMP,\n\t\t&icmp.DstUnreach{\n\t\t\tData: make([]byte, ipv6.HeaderLen),\n\t\t},\n\t\t4 + ipv6.HeaderLen, // unused and original datagram\n\t},\n\t{\n\t\tiana.ProtocolIPv6ICMP,\n\t\t&icmp.PacketTooBig{\n\t\t\tData: make([]byte, ipv6.HeaderLen),\n\t\t},\n\t\t4 + ipv6.HeaderLen, // mtu and original datagram\n\t},\n\t{\n\t\tiana.ProtocolIPv6ICMP,\n\t\t&icmp.TimeExceeded{\n\t\t\tData: make([]byte, ipv6.HeaderLen),\n\t\t},\n\t\t4 + ipv6.HeaderLen, // unused and original datagram\n\t},\n\t{\n\t\tiana.ProtocolIPv6ICMP,\n\t\t&icmp.ParamProb{\n\t\t\tData: make([]byte, ipv6.HeaderLen),\n\t\t},\n\t\t4 + ipv6.HeaderLen, // pointer and original datagram\n\t},\n\n\t{\n\t\tiana.ProtocolIPv6ICMP,\n\t\t&icmp.DstUnreach{\n\t\t\tData: make([]byte, 127),\n\t\t\tExtensions: []icmp.Extension{\n\t\t\t\t&icmp.MPLSLabelStack{},\n\t\t\t},\n\t\t},\n\t\t4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram\n\t},\n\t{\n\t\tiana.ProtocolIPv6ICMP,\n\t\t&icmp.DstUnreach{\n\t\t\tData: make([]byte, 128),\n\t\t\tExtensions: []icmp.Extension{\n\t\t\t\t&icmp.MPLSLabelStack{},\n\t\t\t},\n\t\t},\n\t\t4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram\n\t},\n\t{\n\t\tiana.ProtocolIPv6ICMP,\n\t\t&icmp.DstUnreach{\n\t\t\tData: make([]byte, 129),\n\t\t\tExtensions: []icmp.Extension{\n\t\t\t\t&icmp.MPLSLabelStack{},\n\t\t\t},\n\t\t},\n\t\t4 + 4 + 4 + 0 + 136, // [length, unused], extension header, object header, object payload and original datagram\n\t},\n}\n\nfunc TestMultipartMessageBodyLen(t *testing.T) {\n\tfor i, tt := range multipartMessageBodyLenTests {\n\t\tif out := tt.in.Len(tt.proto); out != tt.out {\n\t\t\tt.Errorf(\"#%d: got %d; want %d\", i, out, tt.out)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/packettoobig.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp\n\nimport \"encoding/binary\"\n\n// A PacketTooBig represents an ICMP packet too big message body.\ntype PacketTooBig struct {\n\tMTU  int    // maximum transmission unit of the nexthop link\n\tData []byte // data, known as original datagram field\n}\n\n// Len implements the Len method of MessageBody interface.\nfunc (p *PacketTooBig) Len(proto int) int {\n\tif p == nil {\n\t\treturn 0\n\t}\n\treturn 4 + len(p.Data)\n}\n\n// Marshal implements the Marshal method of MessageBody interface.\nfunc (p *PacketTooBig) Marshal(proto int) ([]byte, error) {\n\tb := make([]byte, 4+len(p.Data))\n\tbinary.BigEndian.PutUint32(b[:4], uint32(p.MTU))\n\tcopy(b[4:], p.Data)\n\treturn b, nil\n}\n\n// parsePacketTooBig parses b as an ICMP packet too big message body.\nfunc parsePacketTooBig(proto int, b []byte) (MessageBody, error) {\n\tbodyLen := len(b)\n\tif bodyLen < 4 {\n\t\treturn nil, errMessageTooShort\n\t}\n\tp := &PacketTooBig{MTU: int(binary.BigEndian.Uint32(b[:4]))}\n\tif bodyLen > 4 {\n\t\tp.Data = make([]byte, bodyLen-4)\n\t\tcopy(p.Data, b[4:])\n\t}\n\treturn p, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/paramprob.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp\n\nimport (\n\t\"encoding/binary\"\n\t\"golang.org/x/net/internal/iana\"\n)\n\n// A ParamProb represents an ICMP parameter problem message body.\ntype ParamProb struct {\n\tPointer    uintptr     // offset within the data where the error was detected\n\tData       []byte      // data, known as original datagram field\n\tExtensions []Extension // extensions\n}\n\n// Len implements the Len method of MessageBody interface.\nfunc (p *ParamProb) Len(proto int) int {\n\tif p == nil {\n\t\treturn 0\n\t}\n\tl, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions)\n\treturn 4 + l\n}\n\n// Marshal implements the Marshal method of MessageBody interface.\nfunc (p *ParamProb) Marshal(proto int) ([]byte, error) {\n\tif proto == iana.ProtocolIPv6ICMP {\n\t\tb := make([]byte, p.Len(proto))\n\t\tbinary.BigEndian.PutUint32(b[:4], uint32(p.Pointer))\n\t\tcopy(b[4:], p.Data)\n\t\treturn b, nil\n\t}\n\tb, err := marshalMultipartMessageBody(proto, p.Data, p.Extensions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb[0] = byte(p.Pointer)\n\treturn b, nil\n}\n\n// parseParamProb parses b as an ICMP parameter problem message body.\nfunc parseParamProb(proto int, b []byte) (MessageBody, error) {\n\tif len(b) < 4 {\n\t\treturn nil, errMessageTooShort\n\t}\n\tp := &ParamProb{}\n\tif proto == iana.ProtocolIPv6ICMP {\n\t\tp.Pointer = uintptr(binary.BigEndian.Uint32(b[:4]))\n\t\tp.Data = make([]byte, len(b)-4)\n\t\tcopy(p.Data, b[4:])\n\t\treturn p, nil\n\t}\n\tp.Pointer = uintptr(b[0])\n\tvar err error\n\tp.Data, p.Extensions, err = parseMultipartMessageBody(proto, b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/ping_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp_test\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org/x/net/icmp\"\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv4\"\n\t\"golang.org/x/net/ipv6\"\n)\n\nfunc googleAddr(c *icmp.PacketConn, protocol int) (net.Addr, error) {\n\tconst host = \"www.google.com\"\n\tips, err := net.LookupIP(host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnetaddr := func(ip net.IP) (net.Addr, error) {\n\t\tswitch c.LocalAddr().(type) {\n\t\tcase *net.UDPAddr:\n\t\t\treturn &net.UDPAddr{IP: ip}, nil\n\t\tcase *net.IPAddr:\n\t\t\treturn &net.IPAddr{IP: ip}, nil\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"neither UDPAddr nor IPAddr\")\n\t\t}\n\t}\n\tfor _, ip := range ips {\n\t\tswitch protocol {\n\t\tcase iana.ProtocolICMP:\n\t\t\tif ip.To4() != nil {\n\t\t\t\treturn netaddr(ip)\n\t\t\t}\n\t\tcase iana.ProtocolIPv6ICMP:\n\t\t\tif ip.To16() != nil && ip.To4() == nil {\n\t\t\t\treturn netaddr(ip)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, errors.New(\"no A or AAAA record\")\n}\n\ntype pingTest struct {\n\tnetwork, address string\n\tprotocol         int\n\tmtype            icmp.Type\n}\n\nvar nonPrivilegedPingTests = []pingTest{\n\t{\"udp4\", \"0.0.0.0\", iana.ProtocolICMP, ipv4.ICMPTypeEcho},\n\n\t{\"udp6\", \"::\", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest},\n}\n\nfunc TestNonPrivilegedPing(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"avoid external network\")\n\t}\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\tcase \"linux\":\n\t\tt.Log(\"you may need to adjust the net.ipv4.ping_group_range kernel state\")\n\tdefault:\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\n\tfor i, tt := range nonPrivilegedPingTests {\n\t\tif err := doPing(tt, i); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n}\n\nvar privilegedPingTests = []pingTest{\n\t{\"ip4:icmp\", \"0.0.0.0\", iana.ProtocolICMP, ipv4.ICMPTypeEcho},\n\n\t{\"ip6:ipv6-icmp\", \"::\", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest},\n}\n\nfunc TestPrivilegedPing(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"avoid external network\")\n\t}\n\tif m, ok := nettest.SupportsRawIPSocket(); !ok {\n\t\tt.Skip(m)\n\t}\n\n\tfor i, tt := range privilegedPingTests {\n\t\tif err := doPing(tt, i); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n}\n\nfunc doPing(tt pingTest, seq int) error {\n\tc, err := icmp.ListenPacket(tt.network, tt.address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer c.Close()\n\n\tdst, err := googleAddr(c, tt.protocol)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif tt.network != \"udp6\" && tt.protocol == iana.ProtocolIPv6ICMP {\n\t\tvar f ipv6.ICMPFilter\n\t\tf.SetAll(true)\n\t\tf.Accept(ipv6.ICMPTypeDestinationUnreachable)\n\t\tf.Accept(ipv6.ICMPTypePacketTooBig)\n\t\tf.Accept(ipv6.ICMPTypeTimeExceeded)\n\t\tf.Accept(ipv6.ICMPTypeParameterProblem)\n\t\tf.Accept(ipv6.ICMPTypeEchoReply)\n\t\tif err := c.IPv6PacketConn().SetICMPFilter(&f); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\twm := icmp.Message{\n\t\tType: tt.mtype, Code: 0,\n\t\tBody: &icmp.Echo{\n\t\t\tID: os.Getpid() & 0xffff, Seq: 1 << uint(seq),\n\t\t\tData: []byte(\"HELLO-R-U-THERE\"),\n\t\t},\n\t}\n\twb, err := wm.Marshal(nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif n, err := c.WriteTo(wb, dst); err != nil {\n\t\treturn err\n\t} else if n != len(wb) {\n\t\treturn fmt.Errorf(\"got %v; want %v\", n, len(wb))\n\t}\n\n\trb := make([]byte, 1500)\n\tif err := c.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil {\n\t\treturn err\n\t}\n\tn, peer, err := c.ReadFrom(rb)\n\tif err != nil {\n\t\treturn err\n\t}\n\trm, err := icmp.ParseMessage(tt.protocol, rb[:n])\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch rm.Type {\n\tcase ipv4.ICMPTypeEchoReply, ipv6.ICMPTypeEchoReply:\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"got %+v from %v; want echo reply\", rm, peer)\n\t}\n}\n\nfunc TestConcurrentNonPrivilegedListenPacket(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"avoid external network\")\n\t}\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\tcase \"linux\":\n\t\tt.Log(\"you may need to adjust the net.ipv4.ping_group_range kernel state\")\n\tdefault:\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\n\tnetwork, address := \"udp4\", \"127.0.0.1\"\n\tif !nettest.SupportsIPv4() {\n\t\tnetwork, address = \"udp6\", \"::1\"\n\t}\n\tconst N = 1000\n\tvar wg sync.WaitGroup\n\twg.Add(N)\n\tfor i := 0; i < N; i++ {\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tc, err := icmp.ListenPacket(network, address)\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tc.Close()\n\t\t}()\n\t}\n\twg.Wait()\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/sys_freebsd.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp\n\nimport \"syscall\"\n\nfunc init() {\n\tfreebsdVersion, _ = syscall.SysctlUint32(\"kern.osreldate\")\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/icmp/timeexceeded.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage icmp\n\n// A TimeExceeded represents an ICMP time exceeded message body.\ntype TimeExceeded struct {\n\tData       []byte      // data, known as original datagram field\n\tExtensions []Extension // extensions\n}\n\n// Len implements the Len method of MessageBody interface.\nfunc (p *TimeExceeded) Len(proto int) int {\n\tif p == nil {\n\t\treturn 0\n\t}\n\tl, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions)\n\treturn 4 + l\n}\n\n// Marshal implements the Marshal method of MessageBody interface.\nfunc (p *TimeExceeded) Marshal(proto int) ([]byte, error) {\n\treturn marshalMultipartMessageBody(proto, p.Data, p.Extensions)\n}\n\n// parseTimeExceeded parses b as an ICMP time exceeded message body.\nfunc parseTimeExceeded(proto int, b []byte) (MessageBody, error) {\n\tif len(b) < 4 {\n\t\treturn nil, errMessageTooShort\n\t}\n\tp := &TimeExceeded{}\n\tvar err error\n\tp.Data, p.Extensions, err = parseMultipartMessageBody(proto, b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/idna/example_test.go",
    "content": "// Code generated by running \"go generate\" in golang.org/x/text. DO NOT EDIT.\n\n// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage idna_test\n\nimport (\n\t\"fmt\"\n\n\t\"golang.org/x/net/idna\"\n)\n\nfunc ExampleProfile() {\n\t// Raw Punycode has no restrictions and does no mappings.\n\tfmt.Println(idna.ToASCII(\"\"))\n\tfmt.Println(idna.ToASCII(\"*.faß.com\"))\n\tfmt.Println(idna.Punycode.ToASCII(\"*.faß.com\"))\n\n\t// Rewrite IDN for lookup. This (currently) uses transitional mappings to\n\t// find a balance between IDNA2003 and IDNA2008 compatibility.\n\tfmt.Println(idna.Lookup.ToASCII(\"\"))\n\tfmt.Println(idna.Lookup.ToASCII(\"www.faß.com\"))\n\n\t// Convert an IDN to ASCII for registration purposes. This changes the\n\t// encoding, but reports an error if the input was illformed.\n\tfmt.Println(idna.Registration.ToASCII(\"\"))\n\tfmt.Println(idna.Registration.ToASCII(\"www.faß.com\"))\n\n\t// Output:\n\t//  <nil>\n\t// *.xn--fa-hia.com <nil>\n\t// *.xn--fa-hia.com <nil>\n\t//  <nil>\n\t// www.fass.com <nil>\n\t//  idna: invalid label \"\"\n\t// www.xn--fa-hia.com <nil>\n}\n\nfunc ExampleNew() {\n\tvar p *idna.Profile\n\n\t// Raw Punycode has no restrictions and does no mappings.\n\tp = idna.New()\n\tfmt.Println(p.ToASCII(\"*.faß.com\"))\n\n\t// Do mappings. Note that star is not allowed in a DNS lookup.\n\tp = idna.New(\n\t\tidna.MapForLookup(),\n\t\tidna.Transitional(true)) // Map ß -> ss\n\tfmt.Println(p.ToASCII(\"*.faß.com\"))\n\n\t// Lookup for registration. Also does not allow '*'.\n\tp = idna.New(idna.ValidateForRegistration())\n\tfmt.Println(p.ToUnicode(\"*.faß.com\"))\n\n\t// Set up a profile maps for lookup, but allows wild cards.\n\tp = idna.New(\n\t\tidna.MapForLookup(),\n\t\tidna.Transitional(true),      // Map ß -> ss\n\t\tidna.StrictDomainName(false)) // Set more permissive ASCII rules.\n\tfmt.Println(p.ToASCII(\"*.faß.com\"))\n\n\t// Output:\n\t// *.xn--fa-hia.com <nil>\n\t// *.fass.com idna: disallowed rune U+002A\n\t// *.faß.com idna: disallowed rune U+002A\n\t// *.fass.com <nil>\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/idna/idna.go",
    "content": "// Code generated by running \"go generate\" in golang.org/x/text. DO NOT EDIT.\n\n// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package idna implements IDNA2008 using the compatibility processing\n// defined by UTS (Unicode Technical Standard) #46, which defines a standard to\n// deal with the transition from IDNA2003.\n//\n// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC\n// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.\n// UTS #46 is defined in http://www.unicode.org/reports/tr46.\n// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the\n// differences between these two standards.\npackage idna // import \"golang.org/x/net/idna\"\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"unicode/utf8\"\n\n\t\"golang.org/x/text/secure/bidirule\"\n\t\"golang.org/x/text/unicode/bidi\"\n\t\"golang.org/x/text/unicode/norm\"\n)\n\n// NOTE: Unlike common practice in Go APIs, the functions will return a\n// sanitized domain name in case of errors. Browsers sometimes use a partially\n// evaluated string as lookup.\n// TODO: the current error handling is, in my opinion, the least opinionated.\n// Other strategies are also viable, though:\n// Option 1) Return an empty string in case of error, but allow the user to\n//    specify explicitly which errors to ignore.\n// Option 2) Return the partially evaluated string if it is itself a valid\n//    string, otherwise return the empty string in case of error.\n// Option 3) Option 1 and 2.\n// Option 4) Always return an empty string for now and implement Option 1 as\n//    needed, and document that the return string may not be empty in case of\n//    error in the future.\n// I think Option 1 is best, but it is quite opinionated.\n\n// ToASCII is a wrapper for Punycode.ToASCII.\nfunc ToASCII(s string) (string, error) {\n\treturn Punycode.process(s, true)\n}\n\n// ToUnicode is a wrapper for Punycode.ToUnicode.\nfunc ToUnicode(s string) (string, error) {\n\treturn Punycode.process(s, false)\n}\n\n// An Option configures a Profile at creation time.\ntype Option func(*options)\n\n// Transitional sets a Profile to use the Transitional mapping as defined in UTS\n// #46. This will cause, for example, \"ß\" to be mapped to \"ss\". Using the\n// transitional mapping provides a compromise between IDNA2003 and IDNA2008\n// compatibility. It is used by most browsers when resolving domain names. This\n// option is only meaningful if combined with MapForLookup.\nfunc Transitional(transitional bool) Option {\n\treturn func(o *options) { o.transitional = true }\n}\n\n// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts\n// are longer than allowed by the RFC.\nfunc VerifyDNSLength(verify bool) Option {\n\treturn func(o *options) { o.verifyDNSLength = verify }\n}\n\n// RemoveLeadingDots removes leading label separators. Leading runes that map to\n// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.\n//\n// This is the behavior suggested by the UTS #46 and is adopted by some\n// browsers.\nfunc RemoveLeadingDots(remove bool) Option {\n\treturn func(o *options) { o.removeLeadingDots = remove }\n}\n\n// ValidateLabels sets whether to check the mandatory label validation criteria\n// as defined in Section 5.4 of RFC 5891. This includes testing for correct use\n// of hyphens ('-'), normalization, validity of runes, and the context rules.\nfunc ValidateLabels(enable bool) Option {\n\treturn func(o *options) {\n\t\t// Don't override existing mappings, but set one that at least checks\n\t\t// normalization if it is not set.\n\t\tif o.mapping == nil && enable {\n\t\t\to.mapping = normalize\n\t\t}\n\t\to.trie = trie\n\t\to.validateLabels = enable\n\t\to.fromPuny = validateFromPunycode\n\t}\n}\n\n// StrictDomainName limits the set of permissible ASCII characters to those\n// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the\n// hyphen). This is set by default for MapForLookup and ValidateForRegistration.\n//\n// This option is useful, for instance, for browsers that allow characters\n// outside this range, for example a '_' (U+005F LOW LINE). See\n// http://www.rfc-editor.org/std/std3.txt for more details This option\n// corresponds to the UseSTD3ASCIIRules option in UTS #46.\nfunc StrictDomainName(use bool) Option {\n\treturn func(o *options) {\n\t\to.trie = trie\n\t\to.useSTD3Rules = use\n\t\to.fromPuny = validateFromPunycode\n\t}\n}\n\n// NOTE: the following options pull in tables. The tables should not be linked\n// in as long as the options are not used.\n\n// BidiRule enables the Bidi rule as defined in RFC 5893. Any application\n// that relies on proper validation of labels should include this rule.\nfunc BidiRule() Option {\n\treturn func(o *options) { o.bidirule = bidirule.ValidString }\n}\n\n// ValidateForRegistration sets validation options to verify that a given IDN is\n// properly formatted for registration as defined by Section 4 of RFC 5891.\nfunc ValidateForRegistration() Option {\n\treturn func(o *options) {\n\t\to.mapping = validateRegistration\n\t\tStrictDomainName(true)(o)\n\t\tValidateLabels(true)(o)\n\t\tVerifyDNSLength(true)(o)\n\t\tBidiRule()(o)\n\t}\n}\n\n// MapForLookup sets validation and mapping options such that a given IDN is\n// transformed for domain name lookup according to the requirements set out in\n// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,\n// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option\n// to add this check.\n//\n// The mappings include normalization and mapping case, width and other\n// compatibility mappings.\nfunc MapForLookup() Option {\n\treturn func(o *options) {\n\t\to.mapping = validateAndMap\n\t\tStrictDomainName(true)(o)\n\t\tValidateLabels(true)(o)\n\t}\n}\n\ntype options struct {\n\ttransitional      bool\n\tuseSTD3Rules      bool\n\tvalidateLabels    bool\n\tverifyDNSLength   bool\n\tremoveLeadingDots bool\n\n\ttrie *idnaTrie\n\n\t// fromPuny calls validation rules when converting A-labels to U-labels.\n\tfromPuny func(p *Profile, s string) error\n\n\t// mapping implements a validation and mapping step as defined in RFC 5895\n\t// or UTS 46, tailored to, for example, domain registration or lookup.\n\tmapping func(p *Profile, s string) (mapped string, isBidi bool, err error)\n\n\t// bidirule, if specified, checks whether s conforms to the Bidi Rule\n\t// defined in RFC 5893.\n\tbidirule func(s string) bool\n}\n\n// A Profile defines the configuration of an IDNA mapper.\ntype Profile struct {\n\toptions\n}\n\nfunc apply(o *options, opts []Option) {\n\tfor _, f := range opts {\n\t\tf(o)\n\t}\n}\n\n// New creates a new Profile.\n//\n// With no options, the returned Profile is the most permissive and equals the\n// Punycode Profile. Options can be passed to further restrict the Profile. The\n// MapForLookup and ValidateForRegistration options set a collection of options,\n// for lookup and registration purposes respectively, which can be tailored by\n// adding more fine-grained options, where later options override earlier\n// options.\nfunc New(o ...Option) *Profile {\n\tp := &Profile{}\n\tapply(&p.options, o)\n\treturn p\n}\n\n// ToASCII converts a domain or domain label to its ASCII form. For example,\n// ToASCII(\"bücher.example.com\") is \"xn--bcher-kva.example.com\", and\n// ToASCII(\"golang\") is \"golang\". If an error is encountered it will return\n// an error and a (partially) processed result.\nfunc (p *Profile) ToASCII(s string) (string, error) {\n\treturn p.process(s, true)\n}\n\n// ToUnicode converts a domain or domain label to its Unicode form. For example,\n// ToUnicode(\"xn--bcher-kva.example.com\") is \"bücher.example.com\", and\n// ToUnicode(\"golang\") is \"golang\". If an error is encountered it will return\n// an error and a (partially) processed result.\nfunc (p *Profile) ToUnicode(s string) (string, error) {\n\tpp := *p\n\tpp.transitional = false\n\treturn pp.process(s, false)\n}\n\n// String reports a string with a description of the profile for debugging\n// purposes. The string format may change with different versions.\nfunc (p *Profile) String() string {\n\ts := \"\"\n\tif p.transitional {\n\t\ts = \"Transitional\"\n\t} else {\n\t\ts = \"NonTransitional\"\n\t}\n\tif p.useSTD3Rules {\n\t\ts += \":UseSTD3Rules\"\n\t}\n\tif p.validateLabels {\n\t\ts += \":ValidateLabels\"\n\t}\n\tif p.verifyDNSLength {\n\t\ts += \":VerifyDNSLength\"\n\t}\n\treturn s\n}\n\nvar (\n\t// Punycode is a Profile that does raw punycode processing with a minimum\n\t// of validation.\n\tPunycode *Profile = punycode\n\n\t// Lookup is the recommended profile for looking up domain names, according\n\t// to Section 5 of RFC 5891. The exact configuration of this profile may\n\t// change over time.\n\tLookup *Profile = lookup\n\n\t// Display is the recommended profile for displaying domain names.\n\t// The configuration of this profile may change over time.\n\tDisplay *Profile = display\n\n\t// Registration is the recommended profile for checking whether a given\n\t// IDN is valid for registration, according to Section 4 of RFC 5891.\n\tRegistration *Profile = registration\n\n\tpunycode = &Profile{}\n\tlookup   = &Profile{options{\n\t\ttransitional:   true,\n\t\tuseSTD3Rules:   true,\n\t\tvalidateLabels: true,\n\t\ttrie:           trie,\n\t\tfromPuny:       validateFromPunycode,\n\t\tmapping:        validateAndMap,\n\t\tbidirule:       bidirule.ValidString,\n\t}}\n\tdisplay = &Profile{options{\n\t\tuseSTD3Rules:   true,\n\t\tvalidateLabels: true,\n\t\ttrie:           trie,\n\t\tfromPuny:       validateFromPunycode,\n\t\tmapping:        validateAndMap,\n\t\tbidirule:       bidirule.ValidString,\n\t}}\n\tregistration = &Profile{options{\n\t\tuseSTD3Rules:    true,\n\t\tvalidateLabels:  true,\n\t\tverifyDNSLength: true,\n\t\ttrie:            trie,\n\t\tfromPuny:        validateFromPunycode,\n\t\tmapping:         validateRegistration,\n\t\tbidirule:        bidirule.ValidString,\n\t}}\n\n\t// TODO: profiles\n\t// Register: recommended for approving domain names: don't do any mappings\n\t// but rather reject on invalid input. Bundle or block deviation characters.\n)\n\ntype labelError struct{ label, code_ string }\n\nfunc (e labelError) code() string { return e.code_ }\nfunc (e labelError) Error() string {\n\treturn fmt.Sprintf(\"idna: invalid label %q\", e.label)\n}\n\ntype runeError rune\n\nfunc (e runeError) code() string { return \"P1\" }\nfunc (e runeError) Error() string {\n\treturn fmt.Sprintf(\"idna: disallowed rune %U\", e)\n}\n\n// process implements the algorithm described in section 4 of UTS #46,\n// see http://www.unicode.org/reports/tr46.\nfunc (p *Profile) process(s string, toASCII bool) (string, error) {\n\tvar err error\n\tvar isBidi bool\n\tif p.mapping != nil {\n\t\ts, isBidi, err = p.mapping(p, s)\n\t}\n\t// Remove leading empty labels.\n\tif p.removeLeadingDots {\n\t\tfor ; len(s) > 0 && s[0] == '.'; s = s[1:] {\n\t\t}\n\t}\n\t// TODO: allow for a quick check the tables data.\n\t// It seems like we should only create this error on ToASCII, but the\n\t// UTS 46 conformance tests suggests we should always check this.\n\tif err == nil && p.verifyDNSLength && s == \"\" {\n\t\terr = &labelError{s, \"A4\"}\n\t}\n\tlabels := labelIter{orig: s}\n\tfor ; !labels.done(); labels.next() {\n\t\tlabel := labels.label()\n\t\tif label == \"\" {\n\t\t\t// Empty labels are not okay. The label iterator skips the last\n\t\t\t// label if it is empty.\n\t\t\tif err == nil && p.verifyDNSLength {\n\t\t\t\terr = &labelError{s, \"A4\"}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(label, acePrefix) {\n\t\t\tu, err2 := decode(label[len(acePrefix):])\n\t\t\tif err2 != nil {\n\t\t\t\tif err == nil {\n\t\t\t\t\terr = err2\n\t\t\t\t}\n\t\t\t\t// Spec says keep the old label.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tisBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight\n\t\t\tlabels.set(u)\n\t\t\tif err == nil && p.validateLabels {\n\t\t\t\terr = p.fromPuny(p, u)\n\t\t\t}\n\t\t\tif err == nil {\n\t\t\t\t// This should be called on NonTransitional, according to the\n\t\t\t\t// spec, but that currently does not have any effect. Use the\n\t\t\t\t// original profile to preserve options.\n\t\t\t\terr = p.validateLabel(u)\n\t\t\t}\n\t\t} else if err == nil {\n\t\t\terr = p.validateLabel(label)\n\t\t}\n\t}\n\tif isBidi && p.bidirule != nil && err == nil {\n\t\tfor labels.reset(); !labels.done(); labels.next() {\n\t\t\tif !p.bidirule(labels.label()) {\n\t\t\t\terr = &labelError{s, \"B\"}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif toASCII {\n\t\tfor labels.reset(); !labels.done(); labels.next() {\n\t\t\tlabel := labels.label()\n\t\t\tif !ascii(label) {\n\t\t\t\ta, err2 := encode(acePrefix, label)\n\t\t\t\tif err == nil {\n\t\t\t\t\terr = err2\n\t\t\t\t}\n\t\t\t\tlabel = a\n\t\t\t\tlabels.set(a)\n\t\t\t}\n\t\t\tn := len(label)\n\t\t\tif p.verifyDNSLength && err == nil && (n == 0 || n > 63) {\n\t\t\t\terr = &labelError{label, \"A4\"}\n\t\t\t}\n\t\t}\n\t}\n\ts = labels.result()\n\tif toASCII && p.verifyDNSLength && err == nil {\n\t\t// Compute the length of the domain name minus the root label and its dot.\n\t\tn := len(s)\n\t\tif n > 0 && s[n-1] == '.' {\n\t\t\tn--\n\t\t}\n\t\tif len(s) < 1 || n > 253 {\n\t\t\terr = &labelError{s, \"A4\"}\n\t\t}\n\t}\n\treturn s, err\n}\n\nfunc normalize(p *Profile, s string) (mapped string, isBidi bool, err error) {\n\t// TODO: consider first doing a quick check to see if any of these checks\n\t// need to be done. This will make it slower in the general case, but\n\t// faster in the common case.\n\tmapped = norm.NFC.String(s)\n\tisBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft\n\treturn mapped, isBidi, nil\n}\n\nfunc validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) {\n\t// TODO: filter need for normalization in loop below.\n\tif !norm.NFC.IsNormalString(s) {\n\t\treturn s, false, &labelError{s, \"V1\"}\n\t}\n\tfor i := 0; i < len(s); {\n\t\tv, sz := trie.lookupString(s[i:])\n\t\tbidi = bidi || info(v).isBidi(s[i:])\n\t\t// Copy bytes not copied so far.\n\t\tswitch p.simplify(info(v).category()) {\n\t\t// TODO: handle the NV8 defined in the Unicode idna data set to allow\n\t\t// for strict conformance to IDNA2008.\n\t\tcase valid, deviation:\n\t\tcase disallowed, mapped, unknown, ignored:\n\t\t\tr, _ := utf8.DecodeRuneInString(s[i:])\n\t\t\treturn s, bidi, runeError(r)\n\t\t}\n\t\ti += sz\n\t}\n\treturn s, bidi, nil\n}\n\nfunc (c info) isBidi(s string) bool {\n\tif !c.isMapped() {\n\t\treturn c&attributesMask == rtl\n\t}\n\t// TODO: also store bidi info for mapped data. This is possible, but a bit\n\t// cumbersome and not for the common case.\n\tp, _ := bidi.LookupString(s)\n\tswitch p.Class() {\n\tcase bidi.R, bidi.AL, bidi.AN:\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) {\n\tvar (\n\t\tb []byte\n\t\tk int\n\t)\n\t// combinedInfoBits contains the or-ed bits of all runes. We use this\n\t// to derive the mayNeedNorm bit later. This may trigger normalization\n\t// overeagerly, but it will not do so in the common case. The end result\n\t// is another 10% saving on BenchmarkProfile for the common case.\n\tvar combinedInfoBits info\n\tfor i := 0; i < len(s); {\n\t\tv, sz := trie.lookupString(s[i:])\n\t\tcombinedInfoBits |= info(v)\n\t\tbidi = bidi || info(v).isBidi(s[i:])\n\t\tstart := i\n\t\ti += sz\n\t\t// Copy bytes not copied so far.\n\t\tswitch p.simplify(info(v).category()) {\n\t\tcase valid:\n\t\t\tcontinue\n\t\tcase disallowed:\n\t\t\tif err == nil {\n\t\t\t\tr, _ := utf8.DecodeRuneInString(s[start:])\n\t\t\t\terr = runeError(r)\n\t\t\t}\n\t\t\tcontinue\n\t\tcase mapped, deviation:\n\t\t\tb = append(b, s[k:start]...)\n\t\t\tb = info(v).appendMapping(b, s[start:i])\n\t\tcase ignored:\n\t\t\tb = append(b, s[k:start]...)\n\t\t\t// drop the rune\n\t\tcase unknown:\n\t\t\tb = append(b, s[k:start]...)\n\t\t\tb = append(b, \"\\ufffd\"...)\n\t\t}\n\t\tk = i\n\t}\n\tif k == 0 {\n\t\t// No changes so far.\n\t\tif combinedInfoBits&mayNeedNorm != 0 {\n\t\t\ts = norm.NFC.String(s)\n\t\t}\n\t} else {\n\t\tb = append(b, s[k:]...)\n\t\tif norm.NFC.QuickSpan(b) != len(b) {\n\t\t\tb = norm.NFC.Bytes(b)\n\t\t}\n\t\t// TODO: the punycode converters require strings as input.\n\t\ts = string(b)\n\t}\n\treturn s, bidi, err\n}\n\n// A labelIter allows iterating over domain name labels.\ntype labelIter struct {\n\torig     string\n\tslice    []string\n\tcurStart int\n\tcurEnd   int\n\ti        int\n}\n\nfunc (l *labelIter) reset() {\n\tl.curStart = 0\n\tl.curEnd = 0\n\tl.i = 0\n}\n\nfunc (l *labelIter) done() bool {\n\treturn l.curStart >= len(l.orig)\n}\n\nfunc (l *labelIter) result() string {\n\tif l.slice != nil {\n\t\treturn strings.Join(l.slice, \".\")\n\t}\n\treturn l.orig\n}\n\nfunc (l *labelIter) label() string {\n\tif l.slice != nil {\n\t\treturn l.slice[l.i]\n\t}\n\tp := strings.IndexByte(l.orig[l.curStart:], '.')\n\tl.curEnd = l.curStart + p\n\tif p == -1 {\n\t\tl.curEnd = len(l.orig)\n\t}\n\treturn l.orig[l.curStart:l.curEnd]\n}\n\n// next sets the value to the next label. It skips the last label if it is empty.\nfunc (l *labelIter) next() {\n\tl.i++\n\tif l.slice != nil {\n\t\tif l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == \"\" {\n\t\t\tl.curStart = len(l.orig)\n\t\t}\n\t} else {\n\t\tl.curStart = l.curEnd + 1\n\t\tif l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {\n\t\t\tl.curStart = len(l.orig)\n\t\t}\n\t}\n}\n\nfunc (l *labelIter) set(s string) {\n\tif l.slice == nil {\n\t\tl.slice = strings.Split(l.orig, \".\")\n\t}\n\tl.slice[l.i] = s\n}\n\n// acePrefix is the ASCII Compatible Encoding prefix.\nconst acePrefix = \"xn--\"\n\nfunc (p *Profile) simplify(cat category) category {\n\tswitch cat {\n\tcase disallowedSTD3Mapped:\n\t\tif p.useSTD3Rules {\n\t\t\tcat = disallowed\n\t\t} else {\n\t\t\tcat = mapped\n\t\t}\n\tcase disallowedSTD3Valid:\n\t\tif p.useSTD3Rules {\n\t\t\tcat = disallowed\n\t\t} else {\n\t\t\tcat = valid\n\t\t}\n\tcase deviation:\n\t\tif !p.transitional {\n\t\t\tcat = valid\n\t\t}\n\tcase validNV8, validXV8:\n\t\t// TODO: handle V2008\n\t\tcat = valid\n\t}\n\treturn cat\n}\n\nfunc validateFromPunycode(p *Profile, s string) error {\n\tif !norm.NFC.IsNormalString(s) {\n\t\treturn &labelError{s, \"V1\"}\n\t}\n\t// TODO: detect whether string may have to be normalized in the following\n\t// loop.\n\tfor i := 0; i < len(s); {\n\t\tv, sz := trie.lookupString(s[i:])\n\t\tif c := p.simplify(info(v).category()); c != valid && c != deviation {\n\t\t\treturn &labelError{s, \"V6\"}\n\t\t}\n\t\ti += sz\n\t}\n\treturn nil\n}\n\nconst (\n\tzwnj = \"\\u200c\"\n\tzwj  = \"\\u200d\"\n)\n\ntype joinState int8\n\nconst (\n\tstateStart joinState = iota\n\tstateVirama\n\tstateBefore\n\tstateBeforeVirama\n\tstateAfter\n\tstateFAIL\n)\n\nvar joinStates = [][numJoinTypes]joinState{\n\tstateStart: {\n\t\tjoiningL:   stateBefore,\n\t\tjoiningD:   stateBefore,\n\t\tjoinZWNJ:   stateFAIL,\n\t\tjoinZWJ:    stateFAIL,\n\t\tjoinVirama: stateVirama,\n\t},\n\tstateVirama: {\n\t\tjoiningL: stateBefore,\n\t\tjoiningD: stateBefore,\n\t},\n\tstateBefore: {\n\t\tjoiningL:   stateBefore,\n\t\tjoiningD:   stateBefore,\n\t\tjoiningT:   stateBefore,\n\t\tjoinZWNJ:   stateAfter,\n\t\tjoinZWJ:    stateFAIL,\n\t\tjoinVirama: stateBeforeVirama,\n\t},\n\tstateBeforeVirama: {\n\t\tjoiningL: stateBefore,\n\t\tjoiningD: stateBefore,\n\t\tjoiningT: stateBefore,\n\t},\n\tstateAfter: {\n\t\tjoiningL:   stateFAIL,\n\t\tjoiningD:   stateBefore,\n\t\tjoiningT:   stateAfter,\n\t\tjoiningR:   stateStart,\n\t\tjoinZWNJ:   stateFAIL,\n\t\tjoinZWJ:    stateFAIL,\n\t\tjoinVirama: stateAfter, // no-op as we can't accept joiners here\n\t},\n\tstateFAIL: {\n\t\t0:          stateFAIL,\n\t\tjoiningL:   stateFAIL,\n\t\tjoiningD:   stateFAIL,\n\t\tjoiningT:   stateFAIL,\n\t\tjoiningR:   stateFAIL,\n\t\tjoinZWNJ:   stateFAIL,\n\t\tjoinZWJ:    stateFAIL,\n\t\tjoinVirama: stateFAIL,\n\t},\n}\n\n// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are\n// already implicitly satisfied by the overall implementation.\nfunc (p *Profile) validateLabel(s string) (err error) {\n\tif s == \"\" {\n\t\tif p.verifyDNSLength {\n\t\t\treturn &labelError{s, \"A4\"}\n\t\t}\n\t\treturn nil\n\t}\n\tif !p.validateLabels {\n\t\treturn nil\n\t}\n\ttrie := p.trie // p.validateLabels is only set if trie is set.\n\tif len(s) > 4 && s[2] == '-' && s[3] == '-' {\n\t\treturn &labelError{s, \"V2\"}\n\t}\n\tif s[0] == '-' || s[len(s)-1] == '-' {\n\t\treturn &labelError{s, \"V3\"}\n\t}\n\t// TODO: merge the use of this in the trie.\n\tv, sz := trie.lookupString(s)\n\tx := info(v)\n\tif x.isModifier() {\n\t\treturn &labelError{s, \"V5\"}\n\t}\n\t// Quickly return in the absence of zero-width (non) joiners.\n\tif strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {\n\t\treturn nil\n\t}\n\tst := stateStart\n\tfor i := 0; ; {\n\t\tjt := x.joinType()\n\t\tif s[i:i+sz] == zwj {\n\t\t\tjt = joinZWJ\n\t\t} else if s[i:i+sz] == zwnj {\n\t\t\tjt = joinZWNJ\n\t\t}\n\t\tst = joinStates[st][jt]\n\t\tif x.isViramaModifier() {\n\t\t\tst = joinStates[st][joinVirama]\n\t\t}\n\t\tif i += sz; i == len(s) {\n\t\t\tbreak\n\t\t}\n\t\tv, sz = trie.lookupString(s[i:])\n\t\tx = info(v)\n\t}\n\tif st == stateFAIL || st == stateAfter {\n\t\treturn &labelError{s, \"C\"}\n\t}\n\treturn nil\n}\n\nfunc ascii(s string) bool {\n\tfor i := 0; i < len(s); i++ {\n\t\tif s[i] >= utf8.RuneSelf {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/idna/idna_test.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage idna\n\nimport (\n\t\"testing\"\n)\n\nvar idnaTestCases = [...]struct {\n\tascii, unicode string\n}{\n\t// Labels.\n\t{\"books\", \"books\"},\n\t{\"xn--bcher-kva\", \"bücher\"},\n\n\t// Domains.\n\t{\"foo--xn--bar.org\", \"foo--xn--bar.org\"},\n\t{\"golang.org\", \"golang.org\"},\n\t{\"example.xn--p1ai\", \"example.рф\"},\n\t{\"xn--czrw28b.tw\", \"商業.tw\"},\n\t{\"www.xn--mller-kva.de\", \"www.müller.de\"},\n}\n\nfunc TestIDNA(t *testing.T) {\n\tfor _, tc := range idnaTestCases {\n\t\tif a, err := ToASCII(tc.unicode); err != nil {\n\t\t\tt.Errorf(\"ToASCII(%q): %v\", tc.unicode, err)\n\t\t} else if a != tc.ascii {\n\t\t\tt.Errorf(\"ToASCII(%q): got %q, want %q\", tc.unicode, a, tc.ascii)\n\t\t}\n\n\t\tif u, err := ToUnicode(tc.ascii); err != nil {\n\t\t\tt.Errorf(\"ToUnicode(%q): %v\", tc.ascii, err)\n\t\t} else if u != tc.unicode {\n\t\t\tt.Errorf(\"ToUnicode(%q): got %q, want %q\", tc.ascii, u, tc.unicode)\n\t\t}\n\t}\n}\n\nfunc TestIDNASeparators(t *testing.T) {\n\ttype subCase struct {\n\t\tunicode   string\n\t\twantASCII string\n\t\twantErr   bool\n\t}\n\n\ttestCases := []struct {\n\t\tname     string\n\t\tprofile  *Profile\n\t\tsubCases []subCase\n\t}{\n\t\t{\n\t\t\tname: \"Punycode\", profile: Punycode,\n\t\t\tsubCases: []subCase{\n\t\t\t\t{\"example\\u3002jp\", \"xn--examplejp-ck3h\", false},\n\t\t\t\t{\"東京\\uFF0Ejp\", \"xn--jp-l92cn98g071o\", false},\n\t\t\t\t{\"大阪\\uFF61jp\", \"xn--jp-ku9cz72u463f\", false},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Lookup\", profile: Lookup,\n\t\t\tsubCases: []subCase{\n\t\t\t\t{\"example\\u3002jp\", \"example.jp\", false},\n\t\t\t\t{\"東京\\uFF0Ejp\", \"xn--1lqs71d.jp\", false},\n\t\t\t\t{\"大阪\\uFF61jp\", \"xn--pssu33l.jp\", false},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Display\", profile: Display,\n\t\t\tsubCases: []subCase{\n\t\t\t\t{\"example\\u3002jp\", \"example.jp\", false},\n\t\t\t\t{\"東京\\uFF0Ejp\", \"xn--1lqs71d.jp\", false},\n\t\t\t\t{\"大阪\\uFF61jp\", \"xn--pssu33l.jp\", false},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Registration\", profile: Registration,\n\t\t\tsubCases: []subCase{\n\t\t\t\t{\"example\\u3002jp\", \"\", true},\n\t\t\t\t{\"東京\\uFF0Ejp\", \"\", true},\n\t\t\t\t{\"大阪\\uFF61jp\", \"\", true},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tfor _, c := range tc.subCases {\n\t\t\t\tgotA, err := tc.profile.ToASCII(c.unicode)\n\t\t\t\tif c.wantErr {\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tt.Errorf(\"ToASCII(%q): got no error, but an error expected\", c.unicode)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Errorf(\"ToASCII(%q): got err=%v, but no error expected\", c.unicode, err)\n\t\t\t\t\t} else if gotA != c.wantASCII {\n\t\t\t\t\t\tt.Errorf(\"ToASCII(%q): got %q, want %q\", c.unicode, gotA, c.wantASCII)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TODO(nigeltao): test errors, once we've specified when ToASCII and ToUnicode\n// return errors.\n"
  },
  {
    "path": "vendor/golang.org/x/net/idna/punycode.go",
    "content": "// Code generated by running \"go generate\" in golang.org/x/text. DO NOT EDIT.\n\n// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage idna\n\n// This file implements the Punycode algorithm from RFC 3492.\n\nimport (\n\t\"math\"\n\t\"strings\"\n\t\"unicode/utf8\"\n)\n\n// These parameter values are specified in section 5.\n//\n// All computation is done with int32s, so that overflow behavior is identical\n// regardless of whether int is 32-bit or 64-bit.\nconst (\n\tbase        int32 = 36\n\tdamp        int32 = 700\n\tinitialBias int32 = 72\n\tinitialN    int32 = 128\n\tskew        int32 = 38\n\ttmax        int32 = 26\n\ttmin        int32 = 1\n)\n\nfunc punyError(s string) error { return &labelError{s, \"A3\"} }\n\n// decode decodes a string as specified in section 6.2.\nfunc decode(encoded string) (string, error) {\n\tif encoded == \"\" {\n\t\treturn \"\", nil\n\t}\n\tpos := 1 + strings.LastIndex(encoded, \"-\")\n\tif pos == 1 {\n\t\treturn \"\", punyError(encoded)\n\t}\n\tif pos == len(encoded) {\n\t\treturn encoded[:len(encoded)-1], nil\n\t}\n\toutput := make([]rune, 0, len(encoded))\n\tif pos != 0 {\n\t\tfor _, r := range encoded[:pos-1] {\n\t\t\toutput = append(output, r)\n\t\t}\n\t}\n\ti, n, bias := int32(0), initialN, initialBias\n\tfor pos < len(encoded) {\n\t\toldI, w := i, int32(1)\n\t\tfor k := base; ; k += base {\n\t\t\tif pos == len(encoded) {\n\t\t\t\treturn \"\", punyError(encoded)\n\t\t\t}\n\t\t\tdigit, ok := decodeDigit(encoded[pos])\n\t\t\tif !ok {\n\t\t\t\treturn \"\", punyError(encoded)\n\t\t\t}\n\t\t\tpos++\n\t\t\ti += digit * w\n\t\t\tif i < 0 {\n\t\t\t\treturn \"\", punyError(encoded)\n\t\t\t}\n\t\t\tt := k - bias\n\t\t\tif t < tmin {\n\t\t\t\tt = tmin\n\t\t\t} else if t > tmax {\n\t\t\t\tt = tmax\n\t\t\t}\n\t\t\tif digit < t {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tw *= base - t\n\t\t\tif w >= math.MaxInt32/base {\n\t\t\t\treturn \"\", punyError(encoded)\n\t\t\t}\n\t\t}\n\t\tx := int32(len(output) + 1)\n\t\tbias = adapt(i-oldI, x, oldI == 0)\n\t\tn += i / x\n\t\ti %= x\n\t\tif n > utf8.MaxRune || len(output) >= 1024 {\n\t\t\treturn \"\", punyError(encoded)\n\t\t}\n\t\toutput = append(output, 0)\n\t\tcopy(output[i+1:], output[i:])\n\t\toutput[i] = n\n\t\ti++\n\t}\n\treturn string(output), nil\n}\n\n// encode encodes a string as specified in section 6.3 and prepends prefix to\n// the result.\n//\n// The \"while h < length(input)\" line in the specification becomes \"for\n// remaining != 0\" in the Go code, because len(s) in Go is in bytes, not runes.\nfunc encode(prefix, s string) (string, error) {\n\toutput := make([]byte, len(prefix), len(prefix)+1+2*len(s))\n\tcopy(output, prefix)\n\tdelta, n, bias := int32(0), initialN, initialBias\n\tb, remaining := int32(0), int32(0)\n\tfor _, r := range s {\n\t\tif r < 0x80 {\n\t\t\tb++\n\t\t\toutput = append(output, byte(r))\n\t\t} else {\n\t\t\tremaining++\n\t\t}\n\t}\n\th := b\n\tif b > 0 {\n\t\toutput = append(output, '-')\n\t}\n\tfor remaining != 0 {\n\t\tm := int32(0x7fffffff)\n\t\tfor _, r := range s {\n\t\t\tif m > r && r >= n {\n\t\t\t\tm = r\n\t\t\t}\n\t\t}\n\t\tdelta += (m - n) * (h + 1)\n\t\tif delta < 0 {\n\t\t\treturn \"\", punyError(s)\n\t\t}\n\t\tn = m\n\t\tfor _, r := range s {\n\t\t\tif r < n {\n\t\t\t\tdelta++\n\t\t\t\tif delta < 0 {\n\t\t\t\t\treturn \"\", punyError(s)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif r > n {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tq := delta\n\t\t\tfor k := base; ; k += base {\n\t\t\t\tt := k - bias\n\t\t\t\tif t < tmin {\n\t\t\t\t\tt = tmin\n\t\t\t\t} else if t > tmax {\n\t\t\t\t\tt = tmax\n\t\t\t\t}\n\t\t\t\tif q < t {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\toutput = append(output, encodeDigit(t+(q-t)%(base-t)))\n\t\t\t\tq = (q - t) / (base - t)\n\t\t\t}\n\t\t\toutput = append(output, encodeDigit(q))\n\t\t\tbias = adapt(delta, h+1, h == b)\n\t\t\tdelta = 0\n\t\t\th++\n\t\t\tremaining--\n\t\t}\n\t\tdelta++\n\t\tn++\n\t}\n\treturn string(output), nil\n}\n\nfunc decodeDigit(x byte) (digit int32, ok bool) {\n\tswitch {\n\tcase '0' <= x && x <= '9':\n\t\treturn int32(x - ('0' - 26)), true\n\tcase 'A' <= x && x <= 'Z':\n\t\treturn int32(x - 'A'), true\n\tcase 'a' <= x && x <= 'z':\n\t\treturn int32(x - 'a'), true\n\t}\n\treturn 0, false\n}\n\nfunc encodeDigit(digit int32) byte {\n\tswitch {\n\tcase 0 <= digit && digit < 26:\n\t\treturn byte(digit + 'a')\n\tcase 26 <= digit && digit < 36:\n\t\treturn byte(digit + ('0' - 26))\n\t}\n\tpanic(\"idna: internal error in punycode encoding\")\n}\n\n// adapt is the bias adaptation function specified in section 6.1.\nfunc adapt(delta, numPoints int32, firstTime bool) int32 {\n\tif firstTime {\n\t\tdelta /= damp\n\t} else {\n\t\tdelta /= 2\n\t}\n\tdelta += delta / numPoints\n\tk := int32(0)\n\tfor delta > ((base-tmin)*tmax)/2 {\n\t\tdelta /= base - tmin\n\t\tk += base\n\t}\n\treturn k + (base-tmin+1)*delta/(delta+skew)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/idna/punycode_test.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage idna\n\nimport (\n\t\"strings\"\n\t\"testing\"\n)\n\nvar punycodeTestCases = [...]struct {\n\ts, encoded string\n}{\n\t{\"\", \"\"},\n\t{\"-\", \"--\"},\n\t{\"-a\", \"-a-\"},\n\t{\"-a-\", \"-a--\"},\n\t{\"a\", \"a-\"},\n\t{\"a-\", \"a--\"},\n\t{\"a-b\", \"a-b-\"},\n\t{\"books\", \"books-\"},\n\t{\"bücher\", \"bcher-kva\"},\n\t{\"Hello世界\", \"Hello-ck1hg65u\"},\n\t{\"ü\", \"tda\"},\n\t{\"üý\", \"tdac\"},\n\n\t// The test cases below come from RFC 3492 section 7.1 with Errata 3026.\n\t{\n\t\t// (A) Arabic (Egyptian).\n\t\t\"\\u0644\\u064A\\u0647\\u0645\\u0627\\u0628\\u062A\\u0643\\u0644\" +\n\t\t\t\"\\u0645\\u0648\\u0634\\u0639\\u0631\\u0628\\u064A\\u061F\",\n\t\t\"egbpdaj6bu4bxfgehfvwxn\",\n\t},\n\t{\n\t\t// (B) Chinese (simplified).\n\t\t\"\\u4ED6\\u4EEC\\u4E3A\\u4EC0\\u4E48\\u4E0D\\u8BF4\\u4E2D\\u6587\",\n\t\t\"ihqwcrb4cv8a8dqg056pqjye\",\n\t},\n\t{\n\t\t// (C) Chinese (traditional).\n\t\t\"\\u4ED6\\u5011\\u7232\\u4EC0\\u9EBD\\u4E0D\\u8AAA\\u4E2D\\u6587\",\n\t\t\"ihqwctvzc91f659drss3x8bo0yb\",\n\t},\n\t{\n\t\t// (D) Czech.\n\t\t\"\\u0050\\u0072\\u006F\\u010D\\u0070\\u0072\\u006F\\u0073\\u0074\" +\n\t\t\t\"\\u011B\\u006E\\u0065\\u006D\\u006C\\u0075\\u0076\\u00ED\\u010D\" +\n\t\t\t\"\\u0065\\u0073\\u006B\\u0079\",\n\t\t\"Proprostnemluvesky-uyb24dma41a\",\n\t},\n\t{\n\t\t// (E) Hebrew.\n\t\t\"\\u05DC\\u05DE\\u05D4\\u05D4\\u05DD\\u05E4\\u05E9\\u05D5\\u05D8\" +\n\t\t\t\"\\u05DC\\u05D0\\u05DE\\u05D3\\u05D1\\u05E8\\u05D9\\u05DD\\u05E2\" +\n\t\t\t\"\\u05D1\\u05E8\\u05D9\\u05EA\",\n\t\t\"4dbcagdahymbxekheh6e0a7fei0b\",\n\t},\n\t{\n\t\t// (F) Hindi (Devanagari).\n\t\t\"\\u092F\\u0939\\u0932\\u094B\\u0917\\u0939\\u093F\\u0928\\u094D\" +\n\t\t\t\"\\u0926\\u0940\\u0915\\u094D\\u092F\\u094B\\u0902\\u0928\\u0939\" +\n\t\t\t\"\\u0940\\u0902\\u092C\\u094B\\u0932\\u0938\\u0915\\u0924\\u0947\" +\n\t\t\t\"\\u0939\\u0948\\u0902\",\n\t\t\"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd\",\n\t},\n\t{\n\t\t// (G) Japanese (kanji and hiragana).\n\t\t\"\\u306A\\u305C\\u307F\\u3093\\u306A\\u65E5\\u672C\\u8A9E\\u3092\" +\n\t\t\t\"\\u8A71\\u3057\\u3066\\u304F\\u308C\\u306A\\u3044\\u306E\\u304B\",\n\t\t\"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa\",\n\t},\n\t{\n\t\t// (H) Korean (Hangul syllables).\n\t\t\"\\uC138\\uACC4\\uC758\\uBAA8\\uB4E0\\uC0AC\\uB78C\\uB4E4\\uC774\" +\n\t\t\t\"\\uD55C\\uAD6D\\uC5B4\\uB97C\\uC774\\uD574\\uD55C\\uB2E4\\uBA74\" +\n\t\t\t\"\\uC5BC\\uB9C8\\uB098\\uC88B\\uC744\\uAE4C\",\n\t\t\"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j\" +\n\t\t\t\"psd879ccm6fea98c\",\n\t},\n\t{\n\t\t// (I) Russian (Cyrillic).\n\t\t\"\\u043F\\u043E\\u0447\\u0435\\u043C\\u0443\\u0436\\u0435\\u043E\" +\n\t\t\t\"\\u043D\\u0438\\u043D\\u0435\\u0433\\u043E\\u0432\\u043E\\u0440\" +\n\t\t\t\"\\u044F\\u0442\\u043F\\u043E\\u0440\\u0443\\u0441\\u0441\\u043A\" +\n\t\t\t\"\\u0438\",\n\t\t\"b1abfaaepdrnnbgefbadotcwatmq2g4l\",\n\t},\n\t{\n\t\t// (J) Spanish.\n\t\t\"\\u0050\\u006F\\u0072\\u0071\\u0075\\u00E9\\u006E\\u006F\\u0070\" +\n\t\t\t\"\\u0075\\u0065\\u0064\\u0065\\u006E\\u0073\\u0069\\u006D\\u0070\" +\n\t\t\t\"\\u006C\\u0065\\u006D\\u0065\\u006E\\u0074\\u0065\\u0068\\u0061\" +\n\t\t\t\"\\u0062\\u006C\\u0061\\u0072\\u0065\\u006E\\u0045\\u0073\\u0070\" +\n\t\t\t\"\\u0061\\u00F1\\u006F\\u006C\",\n\t\t\"PorqunopuedensimplementehablarenEspaol-fmd56a\",\n\t},\n\t{\n\t\t// (K) Vietnamese.\n\t\t\"\\u0054\\u1EA1\\u0069\\u0073\\u0061\\u006F\\u0068\\u1ECD\\u006B\" +\n\t\t\t\"\\u0068\\u00F4\\u006E\\u0067\\u0074\\u0068\\u1EC3\\u0063\\u0068\" +\n\t\t\t\"\\u1EC9\\u006E\\u00F3\\u0069\\u0074\\u0069\\u1EBF\\u006E\\u0067\" +\n\t\t\t\"\\u0056\\u0069\\u1EC7\\u0074\",\n\t\t\"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g\",\n\t},\n\t{\n\t\t// (L) 3<nen>B<gumi><kinpachi><sensei>.\n\t\t\"\\u0033\\u5E74\\u0042\\u7D44\\u91D1\\u516B\\u5148\\u751F\",\n\t\t\"3B-ww4c5e180e575a65lsy2b\",\n\t},\n\t{\n\t\t// (M) <amuro><namie>-with-SUPER-MONKEYS.\n\t\t\"\\u5B89\\u5BA4\\u5948\\u7F8E\\u6075\\u002D\\u0077\\u0069\\u0074\" +\n\t\t\t\"\\u0068\\u002D\\u0053\\u0055\\u0050\\u0045\\u0052\\u002D\\u004D\" +\n\t\t\t\"\\u004F\\u004E\\u004B\\u0045\\u0059\\u0053\",\n\t\t\"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n\",\n\t},\n\t{\n\t\t// (N) Hello-Another-Way-<sorezore><no><basho>.\n\t\t\"\\u0048\\u0065\\u006C\\u006C\\u006F\\u002D\\u0041\\u006E\\u006F\" +\n\t\t\t\"\\u0074\\u0068\\u0065\\u0072\\u002D\\u0057\\u0061\\u0079\\u002D\" +\n\t\t\t\"\\u305D\\u308C\\u305E\\u308C\\u306E\\u5834\\u6240\",\n\t\t\"Hello-Another-Way--fc4qua05auwb3674vfr0b\",\n\t},\n\t{\n\t\t// (O) <hitotsu><yane><no><shita>2.\n\t\t\"\\u3072\\u3068\\u3064\\u5C4B\\u6839\\u306E\\u4E0B\\u0032\",\n\t\t\"2-u9tlzr9756bt3uc0v\",\n\t},\n\t{\n\t\t// (P) Maji<de>Koi<suru>5<byou><mae>\n\t\t\"\\u004D\\u0061\\u006A\\u0069\\u3067\\u004B\\u006F\\u0069\\u3059\" +\n\t\t\t\"\\u308B\\u0035\\u79D2\\u524D\",\n\t\t\"MajiKoi5-783gue6qz075azm5e\",\n\t},\n\t{\n\t\t// (Q) <pafii>de<runba>\n\t\t\"\\u30D1\\u30D5\\u30A3\\u30FC\\u0064\\u0065\\u30EB\\u30F3\\u30D0\",\n\t\t\"de-jg4avhby1noc0d\",\n\t},\n\t{\n\t\t// (R) <sono><supiido><de>\n\t\t\"\\u305D\\u306E\\u30B9\\u30D4\\u30FC\\u30C9\\u3067\",\n\t\t\"d9juau41awczczp\",\n\t},\n\t{\n\t\t// (S) -> $1.00 <-\n\t\t\"\\u002D\\u003E\\u0020\\u0024\\u0031\\u002E\\u0030\\u0030\\u0020\" +\n\t\t\t\"\\u003C\\u002D\",\n\t\t\"-> $1.00 <--\",\n\t},\n}\n\nfunc TestPunycode(t *testing.T) {\n\tfor _, tc := range punycodeTestCases {\n\t\tif got, err := decode(tc.encoded); err != nil {\n\t\t\tt.Errorf(\"decode(%q): %v\", tc.encoded, err)\n\t\t} else if got != tc.s {\n\t\t\tt.Errorf(\"decode(%q): got %q, want %q\", tc.encoded, got, tc.s)\n\t\t}\n\n\t\tif got, err := encode(\"\", tc.s); err != nil {\n\t\t\tt.Errorf(`encode(\"\", %q): %v`, tc.s, err)\n\t\t} else if got != tc.encoded {\n\t\t\tt.Errorf(`encode(\"\", %q): got %q, want %q`, tc.s, got, tc.encoded)\n\t\t}\n\t}\n}\n\nvar punycodeErrorTestCases = [...]string{\n\t\"decode -\",            // A sole '-' is invalid.\n\t\"decode foo\\x00bar\",   // '\\x00' is not in [0-9A-Za-z].\n\t\"decode foo#bar\",      // '#' is not in [0-9A-Za-z].\n\t\"decode foo\\u00A3bar\", // '\\u00A3' is not in [0-9A-Za-z].\n\t\"decode 9\",            // \"9a\" decodes to codepoint \\u00A3; \"9\" is truncated.\n\t\"decode 99999a\",       // \"99999a\" decodes to codepoint \\U0048A3C1, which is > \\U0010FFFF.\n\t\"decode 9999999999a\",  // \"9999999999a\" overflows the int32 calculation.\n\n\t\"encode \" + strings.Repeat(\"x\", 65536) + \"\\uff00\", // int32 overflow.\n}\n\nfunc TestPunycodeErrors(t *testing.T) {\n\tfor _, tc := range punycodeErrorTestCases {\n\t\tvar err error\n\t\tswitch {\n\t\tcase strings.HasPrefix(tc, \"decode \"):\n\t\t\t_, err = decode(tc[7:])\n\t\tcase strings.HasPrefix(tc, \"encode \"):\n\t\t\t_, err = encode(\"\", tc[7:])\n\t\t}\n\t\tif err == nil {\n\t\t\tif len(tc) > 256 {\n\t\t\t\ttc = tc[:100] + \"...\" + tc[len(tc)-100:]\n\t\t\t}\n\t\t\tt.Errorf(\"no error for %s\", tc)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/idna/tables.go",
    "content": "// Code generated by running \"go generate\" in golang.org/x/text. DO NOT EDIT.\n\npackage idna\n\n// UnicodeVersion is the Unicode version from which the tables in this package are derived.\nconst UnicodeVersion = \"10.0.0\"\n\nvar mappings string = \"\" + // Size: 8176 bytes\n\t\"\\x00\\x01 \\x03 ̈\\x01a\\x03 ̄\\x012\\x013\\x03 ́\\x03 ̧\\x011\\x01o\\x051⁄4\\x051⁄2\" +\n\t\"\\x053⁄4\\x03i̇\\x03l·\\x03ʼn\\x01s\\x03dž\\x03ⱥ\\x03ⱦ\\x01h\\x01j\\x01r\\x01w\\x01y\" +\n\t\"\\x03 ̆\\x03 ̇\\x03 ̊\\x03 ̨\\x03 ̃\\x03 ̋\\x01l\\x01x\\x04̈́\\x03 ι\\x01;\\x05 ̈́\" +\n\t\"\\x04եւ\\x04اٴ\\x04وٴ\\x04ۇٴ\\x04يٴ\\x06क़\\x06ख़\\x06ग़\\x06ज़\\x06ड़\\x06ढ़\\x06फ़\" +\n\t\"\\x06य़\\x06ড়\\x06ঢ়\\x06য়\\x06ਲ਼\\x06ਸ਼\\x06ਖ਼\\x06ਗ਼\\x06ਜ਼\\x06ਫ਼\\x06ଡ଼\\x06ଢ଼\" +\n\t\"\\x06ํา\\x06ໍາ\\x06ຫນ\\x06ຫມ\\x06གྷ\\x06ཌྷ\\x06དྷ\\x06བྷ\\x06ཛྷ\\x06ཀྵ\\x06ཱི\\x06ཱུ\" +\n\t\"\\x06ྲྀ\\x09ྲཱྀ\\x06ླྀ\\x09ླཱྀ\\x06ཱྀ\\x06ྒྷ\\x06ྜྷ\\x06ྡྷ\\x06ྦྷ\\x06ྫྷ\\x06ྐྵ\\x02\" +\n\t\"в\\x02д\\x02о\\x02с\\x02т\\x02ъ\\x02ѣ\\x02æ\\x01b\\x01d\\x01e\\x02ǝ\\x01g\\x01i\\x01k\" +\n\t\"\\x01m\\x01n\\x02ȣ\\x01p\\x01t\\x01u\\x02ɐ\\x02ɑ\\x02ə\\x02ɛ\\x02ɜ\\x02ŋ\\x02ɔ\\x02ɯ\" +\n\t\"\\x01v\\x02β\\x02γ\\x02δ\\x02φ\\x02χ\\x02ρ\\x02н\\x02ɒ\\x01c\\x02ɕ\\x02ð\\x01f\\x02ɟ\" +\n\t\"\\x02ɡ\\x02ɥ\\x02ɨ\\x02ɩ\\x02ɪ\\x02ʝ\\x02ɭ\\x02ʟ\\x02ɱ\\x02ɰ\\x02ɲ\\x02ɳ\\x02ɴ\\x02ɵ\" +\n\t\"\\x02ɸ\\x02ʂ\\x02ʃ\\x02ƫ\\x02ʉ\\x02ʊ\\x02ʋ\\x02ʌ\\x01z\\x02ʐ\\x02ʑ\\x02ʒ\\x02θ\\x02ss\" +\n\t\"\\x02ά\\x02έ\\x02ή\\x02ί\\x02ό\\x02ύ\\x02ώ\\x05ἀι\\x05ἁι\\x05ἂι\\x05ἃι\\x05ἄι\\x05ἅι\" +\n\t\"\\x05ἆι\\x05ἇι\\x05ἠι\\x05ἡι\\x05ἢι\\x05ἣι\\x05ἤι\\x05ἥι\\x05ἦι\\x05ἧι\\x05ὠι\\x05ὡι\" +\n\t\"\\x05ὢι\\x05ὣι\\x05ὤι\\x05ὥι\\x05ὦι\\x05ὧι\\x05ὰι\\x04αι\\x04άι\\x05ᾶι\\x02ι\\x05 ̈͂\" +\n\t\"\\x05ὴι\\x04ηι\\x04ήι\\x05ῆι\\x05 ̓̀\\x05 ̓́\\x05 ̓͂\\x02ΐ\\x05 ̔̀\\x05 ̔́\\x05 ̔͂\" +\n\t\"\\x02ΰ\\x05 ̈̀\\x01`\\x05ὼι\\x04ωι\\x04ώι\\x05ῶι\\x06′′\\x09′′′\\x06‵‵\\x09‵‵‵\\x02!\" +\n\t\"!\\x02??\\x02?!\\x02!?\\x0c′′′′\\x010\\x014\\x015\\x016\\x017\\x018\\x019\\x01+\\x01=\" +\n\t\"\\x01(\\x01)\\x02rs\\x02ħ\\x02no\\x01q\\x02sm\\x02tm\\x02ω\\x02å\\x02א\\x02ב\\x02ג\" +\n\t\"\\x02ד\\x02π\\x051⁄7\\x051⁄9\\x061⁄10\\x051⁄3\\x052⁄3\\x051⁄5\\x052⁄5\\x053⁄5\\x054\" +\n\t\"⁄5\\x051⁄6\\x055⁄6\\x051⁄8\\x053⁄8\\x055⁄8\\x057⁄8\\x041⁄\\x02ii\\x02iv\\x02vi\" +\n\t\"\\x04viii\\x02ix\\x02xi\\x050⁄3\\x06∫∫\\x09∫∫∫\\x06∮∮\\x09∮∮∮\\x0210\\x0211\\x0212\" +\n\t\"\\x0213\\x0214\\x0215\\x0216\\x0217\\x0218\\x0219\\x0220\\x04(10)\\x04(11)\\x04(12)\" +\n\t\"\\x04(13)\\x04(14)\\x04(15)\\x04(16)\\x04(17)\\x04(18)\\x04(19)\\x04(20)\\x0c∫∫∫∫\" +\n\t\"\\x02==\\x05⫝̸\\x02ɫ\\x02ɽ\\x02ȿ\\x02ɀ\\x01.\\x04 ゙\\x04 ゚\\x06より\\x06コト\\x05(ᄀ)\\x05\" +\n\t\"(ᄂ)\\x05(ᄃ)\\x05(ᄅ)\\x05(ᄆ)\\x05(ᄇ)\\x05(ᄉ)\\x05(ᄋ)\\x05(ᄌ)\\x05(ᄎ)\\x05(ᄏ)\\x05(ᄐ\" +\n\t\")\\x05(ᄑ)\\x05(ᄒ)\\x05(가)\\x05(나)\\x05(다)\\x05(라)\\x05(마)\\x05(바)\\x05(사)\\x05(아)\" +\n\t\"\\x05(자)\\x05(차)\\x05(카)\\x05(타)\\x05(파)\\x05(하)\\x05(주)\\x08(오전)\\x08(오후)\\x05(一)\" +\n\t\"\\x05(二)\\x05(三)\\x05(四)\\x05(五)\\x05(六)\\x05(七)\\x05(八)\\x05(九)\\x05(十)\\x05(月)\" +\n\t\"\\x05(火)\\x05(水)\\x05(木)\\x05(金)\\x05(土)\\x05(日)\\x05(株)\\x05(有)\\x05(社)\\x05(名)\" +\n\t\"\\x05(特)\\x05(財)\\x05(祝)\\x05(労)\\x05(代)\\x05(呼)\\x05(学)\\x05(監)\\x05(企)\\x05(資)\" +\n\t\"\\x05(協)\\x05(祭)\\x05(休)\\x05(自)\\x05(至)\\x0221\\x0222\\x0223\\x0224\\x0225\\x0226\" +\n\t\"\\x0227\\x0228\\x0229\\x0230\\x0231\\x0232\\x0233\\x0234\\x0235\\x06참고\\x06주의\\x0236\" +\n\t\"\\x0237\\x0238\\x0239\\x0240\\x0241\\x0242\\x0243\\x0244\\x0245\\x0246\\x0247\\x0248\" +\n\t\"\\x0249\\x0250\\x041月\\x042月\\x043月\\x044月\\x045月\\x046月\\x047月\\x048月\\x049月\\x0510\" +\n\t\"月\\x0511月\\x0512月\\x02hg\\x02ev\\x0cアパート\\x0cアルファ\\x0cアンペア\\x09アール\\x0cイニング\\x09\" +\n\t\"インチ\\x09ウォン\\x0fエスクード\\x0cエーカー\\x09オンス\\x09オーム\\x09カイリ\\x0cカラット\\x0cカロリー\\x09ガロ\" +\n\t\"ン\\x09ガンマ\\x06ギガ\\x09ギニー\\x0cキュリー\\x0cギルダー\\x06キロ\\x0fキログラム\\x12キロメートル\\x0fキロワッ\" +\n\t\"ト\\x09グラム\\x0fグラムトン\\x0fクルゼイロ\\x0cクローネ\\x09ケース\\x09コルナ\\x09コーポ\\x0cサイクル\\x0fサンチ\" +\n\t\"ーム\\x0cシリング\\x09センチ\\x09セント\\x09ダース\\x06デシ\\x06ドル\\x06トン\\x06ナノ\\x09ノット\\x09ハイツ\" +\n\t\"\\x0fパーセント\\x09パーツ\\x0cバーレル\\x0fピアストル\\x09ピクル\\x06ピコ\\x06ビル\\x0fファラッド\\x0cフィート\" +\n\t\"\\x0fブッシェル\\x09フラン\\x0fヘクタール\\x06ペソ\\x09ペニヒ\\x09ヘルツ\\x09ペンス\\x09ページ\\x09ベータ\\x0cポイ\" +\n\t\"ント\\x09ボルト\\x06ホン\\x09ポンド\\x09ホール\\x09ホーン\\x0cマイクロ\\x09マイル\\x09マッハ\\x09マルク\\x0fマ\" +\n\t\"ンション\\x0cミクロン\\x06ミリ\\x0fミリバール\\x06メガ\\x0cメガトン\\x0cメートル\\x09ヤード\\x09ヤール\\x09ユアン\" +\n\t\"\\x0cリットル\\x06リラ\\x09ルピー\\x0cルーブル\\x06レム\\x0fレントゲン\\x09ワット\\x040点\\x041点\\x042点\" +\n\t\"\\x043点\\x044点\\x045点\\x046点\\x047点\\x048点\\x049点\\x0510点\\x0511点\\x0512点\\x0513点\" +\n\t\"\\x0514点\\x0515点\\x0516点\\x0517点\\x0518点\\x0519点\\x0520点\\x0521点\\x0522点\\x0523点\" +\n\t\"\\x0524点\\x02da\\x02au\\x02ov\\x02pc\\x02dm\\x02iu\\x06平成\\x06昭和\\x06大正\\x06明治\\x0c株\" +\n\t\"式会社\\x02pa\\x02na\\x02ma\\x02ka\\x02kb\\x02mb\\x02gb\\x04kcal\\x02pf\\x02nf\\x02m\" +\n\t\"g\\x02kg\\x02hz\\x02ml\\x02dl\\x02kl\\x02fm\\x02nm\\x02mm\\x02cm\\x02km\\x02m2\\x02m\" +\n\t\"3\\x05m∕s\\x06m∕s2\\x07rad∕s\\x08rad∕s2\\x02ps\\x02ns\\x02ms\\x02pv\\x02nv\\x02mv\" +\n\t\"\\x02kv\\x02pw\\x02nw\\x02mw\\x02kw\\x02bq\\x02cc\\x02cd\\x06c∕kg\\x02db\\x02gy\\x02\" +\n\t\"ha\\x02hp\\x02in\\x02kk\\x02kt\\x02lm\\x02ln\\x02lx\\x02ph\\x02pr\\x02sr\\x02sv\\x02\" +\n\t\"wb\\x05v∕m\\x05a∕m\\x041日\\x042日\\x043日\\x044日\\x045日\\x046日\\x047日\\x048日\\x049日\" +\n\t\"\\x0510日\\x0511日\\x0512日\\x0513日\\x0514日\\x0515日\\x0516日\\x0517日\\x0518日\\x0519日\" +\n\t\"\\x0520日\\x0521日\\x0522日\\x0523日\\x0524日\\x0525日\\x0526日\\x0527日\\x0528日\\x0529日\" +\n\t\"\\x0530日\\x0531日\\x02ь\\x02ɦ\\x02ɬ\\x02ʞ\\x02ʇ\\x02œ\\x04𤋮\\x04𢡊\\x04𢡄\\x04𣏕\\x04𥉉\" +\n\t\"\\x04𥳐\\x04𧻓\\x02ff\\x02fi\\x02fl\\x02st\\x04մն\\x04մե\\x04մի\\x04վն\\x04մխ\\x04יִ\" +\n\t\"\\x04ײַ\\x02ע\\x02ה\\x02כ\\x02ל\\x02ם\\x02ר\\x02ת\\x04שׁ\\x04שׂ\\x06שּׁ\\x06שּׂ\\x04א\" +\n\t\"ַ\\x04אָ\\x04אּ\\x04בּ\\x04גּ\\x04דּ\\x04הּ\\x04וּ\\x04זּ\\x04טּ\\x04יּ\\x04ךּ\\x04\" +\n\t\"כּ\\x04לּ\\x04מּ\\x04נּ\\x04סּ\\x04ףּ\\x04פּ\\x04צּ\\x04קּ\\x04רּ\\x04שּ\\x04תּ\" +\n\t\"\\x04וֹ\\x04בֿ\\x04כֿ\\x04פֿ\\x04אל\\x02ٱ\\x02ٻ\\x02پ\\x02ڀ\\x02ٺ\\x02ٿ\\x02ٹ\\x02ڤ\" +\n\t\"\\x02ڦ\\x02ڄ\\x02ڃ\\x02چ\\x02ڇ\\x02ڍ\\x02ڌ\\x02ڎ\\x02ڈ\\x02ژ\\x02ڑ\\x02ک\\x02گ\\x02ڳ\" +\n\t\"\\x02ڱ\\x02ں\\x02ڻ\\x02ۀ\\x02ہ\\x02ھ\\x02ے\\x02ۓ\\x02ڭ\\x02ۇ\\x02ۆ\\x02ۈ\\x02ۋ\\x02ۅ\" +\n\t\"\\x02ۉ\\x02ې\\x02ى\\x04ئا\\x04ئە\\x04ئو\\x04ئۇ\\x04ئۆ\\x04ئۈ\\x04ئې\\x04ئى\\x02ی\\x04\" +\n\t\"ئج\\x04ئح\\x04ئم\\x04ئي\\x04بج\\x04بح\\x04بخ\\x04بم\\x04بى\\x04بي\\x04تج\\x04تح\" +\n\t\"\\x04تخ\\x04تم\\x04تى\\x04تي\\x04ثج\\x04ثم\\x04ثى\\x04ثي\\x04جح\\x04جم\\x04حج\\x04حم\" +\n\t\"\\x04خج\\x04خح\\x04خم\\x04سج\\x04سح\\x04سخ\\x04سم\\x04صح\\x04صم\\x04ضج\\x04ضح\\x04ضخ\" +\n\t\"\\x04ضم\\x04طح\\x04طم\\x04ظم\\x04عج\\x04عم\\x04غج\\x04غم\\x04فج\\x04فح\\x04فخ\\x04فم\" +\n\t\"\\x04فى\\x04في\\x04قح\\x04قم\\x04قى\\x04قي\\x04كا\\x04كج\\x04كح\\x04كخ\\x04كل\\x04كم\" +\n\t\"\\x04كى\\x04كي\\x04لج\\x04لح\\x04لخ\\x04لم\\x04لى\\x04لي\\x04مج\\x04مح\\x04مخ\\x04مم\" +\n\t\"\\x04مى\\x04مي\\x04نج\\x04نح\\x04نخ\\x04نم\\x04نى\\x04ني\\x04هج\\x04هم\\x04هى\\x04هي\" +\n\t\"\\x04يج\\x04يح\\x04يخ\\x04يم\\x04يى\\x04يي\\x04ذٰ\\x04رٰ\\x04ىٰ\\x05 ٌّ\\x05 ٍّ\\x05\" +\n\t\" َّ\\x05 ُّ\\x05 ِّ\\x05 ّٰ\\x04ئر\\x04ئز\\x04ئن\\x04بر\\x04بز\\x04بن\\x04تر\\x04تز\" +\n\t\"\\x04تن\\x04ثر\\x04ثز\\x04ثن\\x04ما\\x04نر\\x04نز\\x04نن\\x04ير\\x04يز\\x04ين\\x04ئخ\" +\n\t\"\\x04ئه\\x04به\\x04ته\\x04صخ\\x04له\\x04نه\\x04هٰ\\x04يه\\x04ثه\\x04سه\\x04شم\\x04شه\" +\n\t\"\\x06ـَّ\\x06ـُّ\\x06ـِّ\\x04طى\\x04طي\\x04عى\\x04عي\\x04غى\\x04غي\\x04سى\\x04سي\" +\n\t\"\\x04شى\\x04شي\\x04حى\\x04حي\\x04جى\\x04جي\\x04خى\\x04خي\\x04صى\\x04صي\\x04ضى\\x04ضي\" +\n\t\"\\x04شج\\x04شح\\x04شخ\\x04شر\\x04سر\\x04صر\\x04ضر\\x04اً\\x06تجم\\x06تحج\\x06تحم\" +\n\t\"\\x06تخم\\x06تمج\\x06تمح\\x06تمخ\\x06جمح\\x06حمي\\x06حمى\\x06سحج\\x06سجح\\x06سجى\" +\n\t\"\\x06سمح\\x06سمج\\x06سمم\\x06صحح\\x06صمم\\x06شحم\\x06شجي\\x06شمخ\\x06شمم\\x06ضحى\" +\n\t\"\\x06ضخم\\x06طمح\\x06طمم\\x06طمي\\x06عجم\\x06عمم\\x06عمى\\x06غمم\\x06غمي\\x06غمى\" +\n\t\"\\x06فخم\\x06قمح\\x06قمم\\x06لحم\\x06لحي\\x06لحى\\x06لجج\\x06لخم\\x06لمح\\x06محج\" +\n\t\"\\x06محم\\x06محي\\x06مجح\\x06مجم\\x06مخج\\x06مخم\\x06مجخ\\x06همج\\x06همم\\x06نحم\" +\n\t\"\\x06نحى\\x06نجم\\x06نجى\\x06نمي\\x06نمى\\x06يمم\\x06بخي\\x06تجي\\x06تجى\\x06تخي\" +\n\t\"\\x06تخى\\x06تمي\\x06تمى\\x06جمي\\x06جحى\\x06جمى\\x06سخى\\x06صحي\\x06شحي\\x06ضحي\" +\n\t\"\\x06لجي\\x06لمي\\x06يحي\\x06يجي\\x06يمي\\x06ممي\\x06قمي\\x06نحي\\x06عمي\\x06كمي\" +\n\t\"\\x06نجح\\x06مخي\\x06لجم\\x06كمم\\x06جحي\\x06حجي\\x06مجي\\x06فمي\\x06بحي\\x06سخي\" +\n\t\"\\x06نجي\\x06صلے\\x06قلے\\x08الله\\x08اكبر\\x08محمد\\x08صلعم\\x08رسول\\x08عليه\" +\n\t\"\\x08وسلم\\x06صلى!صلى الله عليه وسلم\\x0fجل جلاله\\x08ریال\\x01,\\x01:\\x01!\" +\n\t\"\\x01?\\x01_\\x01{\\x01}\\x01[\\x01]\\x01#\\x01&\\x01*\\x01-\\x01<\\x01>\\x01\\\\\\x01$\" +\n\t\"\\x01%\\x01@\\x04ـً\\x04ـَ\\x04ـُ\\x04ـِ\\x04ـّ\\x04ـْ\\x02ء\\x02آ\\x02أ\\x02ؤ\\x02إ\" +\n\t\"\\x02ئ\\x02ا\\x02ب\\x02ة\\x02ت\\x02ث\\x02ج\\x02ح\\x02خ\\x02د\\x02ذ\\x02ر\\x02ز\\x02س\" +\n\t\"\\x02ش\\x02ص\\x02ض\\x02ط\\x02ظ\\x02ع\\x02غ\\x02ف\\x02ق\\x02ك\\x02ل\\x02م\\x02ن\\x02ه\" +\n\t\"\\x02و\\x02ي\\x04لآ\\x04لأ\\x04لإ\\x04لا\\x01\\x22\\x01'\\x01/\\x01^\\x01|\\x01~\\x02¢\" +\n\t\"\\x02£\\x02¬\\x02¦\\x02¥\\x08𝅗𝅥\\x08𝅘𝅥\\x0c𝅘𝅥𝅮\\x0c𝅘𝅥𝅯\\x0c𝅘𝅥𝅰\\x0c𝅘𝅥𝅱\\x0c𝅘𝅥𝅲\\x08𝆹\" +\n\t\"𝅥\\x08𝆺𝅥\\x0c𝆹𝅥𝅮\\x0c𝆺𝅥𝅮\\x0c𝆹𝅥𝅯\\x0c𝆺𝅥𝅯\\x02ı\\x02ȷ\\x02α\\x02ε\\x02ζ\\x02η\\x02\" +\n\t\"κ\\x02λ\\x02μ\\x02ν\\x02ξ\\x02ο\\x02σ\\x02τ\\x02υ\\x02ψ\\x03∇\\x03∂\\x02ϝ\\x02ٮ\\x02ڡ\" +\n\t\"\\x02ٯ\\x020,\\x021,\\x022,\\x023,\\x024,\\x025,\\x026,\\x027,\\x028,\\x029,\\x03(a)\" +\n\t\"\\x03(b)\\x03(c)\\x03(d)\\x03(e)\\x03(f)\\x03(g)\\x03(h)\\x03(i)\\x03(j)\\x03(k)\" +\n\t\"\\x03(l)\\x03(m)\\x03(n)\\x03(o)\\x03(p)\\x03(q)\\x03(r)\\x03(s)\\x03(t)\\x03(u)\" +\n\t\"\\x03(v)\\x03(w)\\x03(x)\\x03(y)\\x03(z)\\x07〔s〕\\x02wz\\x02hv\\x02sd\\x03ppv\\x02w\" +\n\t\"c\\x02mc\\x02md\\x02dj\\x06ほか\\x06ココ\\x03サ\\x03手\\x03字\\x03双\\x03デ\\x03二\\x03多\\x03解\" +\n\t\"\\x03天\\x03交\\x03映\\x03無\\x03料\\x03前\\x03後\\x03再\\x03新\\x03初\\x03終\\x03生\\x03販\\x03声\" +\n\t\"\\x03吹\\x03演\\x03投\\x03捕\\x03一\\x03三\\x03遊\\x03左\\x03中\\x03右\\x03指\\x03走\\x03打\\x03禁\" +\n\t\"\\x03空\\x03合\\x03満\\x03有\\x03月\\x03申\\x03割\\x03営\\x03配\\x09〔本〕\\x09〔三〕\\x09〔二〕\\x09〔安\" +\n\t\"〕\\x09〔点〕\\x09〔打〕\\x09〔盗〕\\x09〔勝〕\\x09〔敗〕\\x03得\\x03可\\x03丽\\x03丸\\x03乁\\x03你\\x03\" +\n\t\"侮\\x03侻\\x03倂\\x03偺\\x03備\\x03僧\\x03像\\x03㒞\\x03免\\x03兔\\x03兤\\x03具\\x03㒹\\x03內\\x03\" +\n\t\"冗\\x03冤\\x03仌\\x03冬\\x03况\\x03凵\\x03刃\\x03㓟\\x03刻\\x03剆\\x03剷\\x03㔕\\x03勇\\x03勉\\x03\" +\n\t\"勤\\x03勺\\x03包\\x03匆\\x03北\\x03卉\\x03卑\\x03博\\x03即\\x03卽\\x03卿\\x03灰\\x03及\\x03叟\\x03\" +\n\t\"叫\\x03叱\\x03吆\\x03咞\\x03吸\\x03呈\\x03周\\x03咢\\x03哶\\x03唐\\x03啓\\x03啣\\x03善\\x03喙\\x03\" +\n\t\"喫\\x03喳\\x03嗂\\x03圖\\x03嘆\\x03圗\\x03噑\\x03噴\\x03切\\x03壮\\x03城\\x03埴\\x03堍\\x03型\\x03\" +\n\t\"堲\\x03報\\x03墬\\x03売\\x03壷\\x03夆\\x03夢\\x03奢\\x03姬\\x03娛\\x03娧\\x03姘\\x03婦\\x03㛮\\x03\" +\n\t\"嬈\\x03嬾\\x03寃\\x03寘\\x03寧\\x03寳\\x03寿\\x03将\\x03尢\\x03㞁\\x03屠\\x03屮\\x03峀\\x03岍\\x03\" +\n\t\"嵃\\x03嵮\\x03嵫\\x03嵼\\x03巡\\x03巢\\x03㠯\\x03巽\\x03帨\\x03帽\\x03幩\\x03㡢\\x03㡼\\x03庰\\x03\" +\n\t\"庳\\x03庶\\x03廊\\x03廾\\x03舁\\x03弢\\x03㣇\\x03形\\x03彫\\x03㣣\\x03徚\\x03忍\\x03志\\x03忹\\x03\" +\n\t\"悁\\x03㤺\\x03㤜\\x03悔\\x03惇\\x03慈\\x03慌\\x03慎\\x03慺\\x03憎\\x03憲\\x03憤\\x03憯\\x03懞\\x03\" +\n\t\"懲\\x03懶\\x03成\\x03戛\\x03扝\\x03抱\\x03拔\\x03捐\\x03挽\\x03拼\\x03捨\\x03掃\\x03揤\\x03搢\\x03\" +\n\t\"揅\\x03掩\\x03㨮\\x03摩\\x03摾\\x03撝\\x03摷\\x03㩬\\x03敏\\x03敬\\x03旣\\x03書\\x03晉\\x03㬙\\x03\" +\n\t\"暑\\x03㬈\\x03㫤\\x03冒\\x03冕\\x03最\\x03暜\\x03肭\\x03䏙\\x03朗\\x03望\\x03朡\\x03杞\\x03杓\\x03\" +\n\t\"㭉\\x03柺\\x03枅\\x03桒\\x03梅\\x03梎\\x03栟\\x03椔\\x03㮝\\x03楂\\x03榣\\x03槪\\x03檨\\x03櫛\\x03\" +\n\t\"㰘\\x03次\\x03歔\\x03㱎\\x03歲\\x03殟\\x03殺\\x03殻\\x03汎\\x03沿\\x03泍\\x03汧\\x03洖\\x03派\\x03\" +\n\t\"海\\x03流\\x03浩\\x03浸\\x03涅\\x03洴\\x03港\\x03湮\\x03㴳\\x03滋\\x03滇\\x03淹\\x03潮\\x03濆\\x03\" +\n\t\"瀹\\x03瀞\\x03瀛\\x03㶖\\x03灊\\x03災\\x03灷\\x03炭\\x03煅\\x03熜\\x03爨\\x03爵\\x03牐\\x03犀\\x03\" +\n\t\"犕\\x03獺\\x03王\\x03㺬\\x03玥\\x03㺸\\x03瑇\\x03瑜\\x03瑱\\x03璅\\x03瓊\\x03㼛\\x03甤\\x03甾\\x03\" +\n\t\"異\\x03瘐\\x03㿼\\x03䀈\\x03直\\x03眞\\x03真\\x03睊\\x03䀹\\x03瞋\\x03䁆\\x03䂖\\x03硎\\x03碌\\x03\" +\n\t\"磌\\x03䃣\\x03祖\\x03福\\x03秫\\x03䄯\\x03穀\\x03穊\\x03穏\\x03䈂\\x03篆\\x03築\\x03䈧\\x03糒\\x03\" +\n\t\"䊠\\x03糨\\x03糣\\x03紀\\x03絣\\x03䌁\\x03緇\\x03縂\\x03繅\\x03䌴\\x03䍙\\x03罺\\x03羕\\x03翺\\x03\" +\n\t\"者\\x03聠\\x03聰\\x03䏕\\x03育\\x03脃\\x03䐋\\x03脾\\x03媵\\x03舄\\x03辞\\x03䑫\\x03芑\\x03芋\\x03\" +\n\t\"芝\\x03劳\\x03花\\x03芳\\x03芽\\x03苦\\x03若\\x03茝\\x03荣\\x03莭\\x03茣\\x03莽\\x03菧\\x03著\\x03\" +\n\t\"荓\\x03菊\\x03菌\\x03菜\\x03䔫\\x03蓱\\x03蓳\\x03蔖\\x03蕤\\x03䕝\\x03䕡\\x03䕫\\x03虐\\x03虜\\x03\" +\n\t\"虧\\x03虩\\x03蚩\\x03蚈\\x03蜎\\x03蛢\\x03蝹\\x03蜨\\x03蝫\\x03螆\\x03蟡\\x03蠁\\x03䗹\\x03衠\\x03\" +\n\t\"衣\\x03裗\\x03裞\\x03䘵\\x03裺\\x03㒻\\x03䚾\\x03䛇\\x03誠\\x03諭\\x03變\\x03豕\\x03貫\\x03賁\\x03\" +\n\t\"贛\\x03起\\x03跋\\x03趼\\x03跰\\x03軔\\x03輸\\x03邔\\x03郱\\x03鄑\\x03鄛\\x03鈸\\x03鋗\\x03鋘\\x03\" +\n\t\"鉼\\x03鏹\\x03鐕\\x03開\\x03䦕\\x03閷\\x03䧦\\x03雃\\x03嶲\\x03霣\\x03䩮\\x03䩶\\x03韠\\x03䪲\\x03\" +\n\t\"頋\\x03頩\\x03飢\\x03䬳\\x03餩\\x03馧\\x03駂\\x03駾\\x03䯎\\x03鬒\\x03鱀\\x03鳽\\x03䳎\\x03䳭\\x03\" +\n\t\"鵧\\x03䳸\\x03麻\\x03䵖\\x03黹\\x03黾\\x03鼅\\x03鼏\\x03鼖\\x03鼻\"\n\nvar xorData string = \"\" + // Size: 4855 bytes\n\t\"\\x02\\x0c\\x09\\x02\\xb0\\xec\\x02\\xad\\xd8\\x02\\xad\\xd9\\x02\\x06\\x07\\x02\\x0f\\x12\" +\n\t\"\\x02\\x0f\\x1f\\x02\\x0f\\x1d\\x02\\x01\\x13\\x02\\x0f\\x16\\x02\\x0f\\x0b\\x02\\x0f3\" +\n\t\"\\x02\\x0f7\\x02\\x0f?\\x02\\x0f/\\x02\\x0f*\\x02\\x0c&\\x02\\x0c*\\x02\\x0c;\\x02\\x0c9\" +\n\t\"\\x02\\x0c%\\x02\\xab\\xed\\x02\\xab\\xe2\\x02\\xab\\xe3\\x02\\xa9\\xe0\\x02\\xa9\\xe1\" +\n\t\"\\x02\\xa9\\xe6\\x02\\xa3\\xcb\\x02\\xa3\\xc8\\x02\\xa3\\xc9\\x02\\x01#\\x02\\x01\\x08\" +\n\t\"\\x02\\x0e>\\x02\\x0e'\\x02\\x0f\\x03\\x02\\x03\\x0d\\x02\\x03\\x09\\x02\\x03\\x17\\x02\" +\n\t\"\\x03\\x0e\\x02\\x02\\x03\\x02\\x011\\x02\\x01\\x00\\x02\\x01\\x10\\x02\\x03<\\x02\\x07\" +\n\t\"\\x0d\\x02\\x02\\x0c\\x02\\x0c0\\x02\\x01\\x03\\x02\\x01\\x01\\x02\\x01 \\x02\\x01\\x22\" +\n\t\"\\x02\\x01)\\x02\\x01\\x0a\\x02\\x01\\x0c\\x02\\x02\\x06\\x02\\x02\\x02\\x02\\x03\\x10\" +\n\t\"\\x03\\x037 \\x03\\x0b+\\x03\\x02\\x01\\x04\\x02\\x01\\x02\\x02\\x019\\x02\\x03\\x1c\\x02\" +\n\t\"\\x02$\\x03\\x80p$\\x02\\x03:\\x02\\x03\\x0a\\x03\\xc1r.\\x03\\xc1r,\\x03\\xc1r\\x02\" +\n\t\"\\x02\\x02:\\x02\\x02>\\x02\\x02,\\x02\\x02\\x10\\x02\\x02\\x00\\x03\\xc1s<\\x03\\xc1s*\" +\n\t\"\\x03\\xc2L$\\x03\\xc2L;\\x02\\x09)\\x02\\x0a\\x19\\x03\\x83\\xab\\xe3\\x03\\x83\\xab\" +\n\t\"\\xf2\\x03 4\\xe0\\x03\\x81\\xab\\xea\\x03\\x81\\xab\\xf3\\x03 4\\xef\\x03\\x96\\xe1\\xcd\" +\n\t\"\\x03\\x84\\xe5\\xc3\\x02\\x0d\\x11\\x03\\x8b\\xec\\xcb\\x03\\x94\\xec\\xcf\\x03\\x9a\\xec\" +\n\t\"\\xc2\\x03\\x8b\\xec\\xdb\\x03\\x94\\xec\\xdf\\x03\\x9a\\xec\\xd2\\x03\\x01\\x0c!\\x03\" +\n\t\"\\x01\\x0c#\\x03ʠ\\x9d\\x03ʣ\\x9c\\x03ʢ\\x9f\\x03ʥ\\x9e\\x03ʤ\\x91\\x03ʧ\\x90\\x03ʦ\\x93\" +\n\t\"\\x03ʩ\\x92\\x03ʨ\\x95\\x03\\xca\\xf3\\xb5\\x03\\xca\\xf0\\xb4\\x03\\xca\\xf1\\xb7\\x03\" +\n\t\"\\xca\\xf6\\xb6\\x03\\xca\\xf7\\x89\\x03\\xca\\xf4\\x88\\x03\\xca\\xf5\\x8b\\x03\\xca\\xfa\" +\n\t\"\\x8a\\x03\\xca\\xfb\\x8d\\x03\\xca\\xf8\\x8c\\x03\\xca\\xf9\\x8f\\x03\\xca\\xfe\\x8e\\x03\" +\n\t\"\\xca\\xff\\x81\\x03\\xca\\xfc\\x80\\x03\\xca\\xfd\\x83\\x03\\xca\\xe2\\x82\\x03\\xca\\xe3\" +\n\t\"\\x85\\x03\\xca\\xe0\\x84\\x03\\xca\\xe1\\x87\\x03\\xca\\xe6\\x86\\x03\\xca\\xe7\\x99\\x03\" +\n\t\"\\xca\\xe4\\x98\\x03\\xca\\xe5\\x9b\\x03\\xca\\xea\\x9a\\x03\\xca\\xeb\\x9d\\x03\\xca\\xe8\" +\n\t\"\\x9c\\x03ؓ\\x89\\x03ߔ\\x8b\\x02\\x010\\x03\\x03\\x04\\x1e\\x03\\x04\\x15\\x12\\x03\\x0b\" +\n\t\"\\x05,\\x03\\x06\\x04\\x00\\x03\\x06\\x04)\\x03\\x06\\x044\\x03\\x06\\x04<\\x03\\x06\\x05\" +\n\t\"\\x1d\\x03\\x06\\x06\\x00\\x03\\x06\\x06\\x0a\\x03\\x06\\x06'\\x03\\x06\\x062\\x03\\x0786\" +\n\t\"\\x03\\x079/\\x03\\x079 \\x03\\x07:\\x0e\\x03\\x07:\\x1b\\x03\\x07:%\\x03\\x07;/\\x03\" +\n\t\"\\x07;%\\x03\\x074\\x11\\x03\\x076\\x09\\x03\\x077*\\x03\\x070\\x01\\x03\\x070\\x0f\\x03\" +\n\t\"\\x070.\\x03\\x071\\x16\\x03\\x071\\x04\\x03\\x0710\\x03\\x072\\x18\\x03\\x072-\\x03\" +\n\t\"\\x073\\x14\\x03\\x073>\\x03\\x07'\\x09\\x03\\x07 \\x00\\x03\\x07\\x1f\\x0b\\x03\\x07\" +\n\t\"\\x18#\\x03\\x07\\x18(\\x03\\x07\\x186\\x03\\x07\\x18\\x03\\x03\\x07\\x19\\x16\\x03\\x07\" +\n\t\"\\x116\\x03\\x07\\x12'\\x03\\x07\\x13\\x10\\x03\\x07\\x0c&\\x03\\x07\\x0c\\x08\\x03\\x07\" +\n\t\"\\x0c\\x13\\x03\\x07\\x0d\\x02\\x03\\x07\\x0d\\x1c\\x03\\x07\\x0b5\\x03\\x07\\x0b\\x0a\" +\n\t\"\\x03\\x07\\x0b\\x01\\x03\\x07\\x0b\\x0f\\x03\\x07\\x05\\x00\\x03\\x07\\x05\\x09\\x03\\x07\" +\n\t\"\\x05\\x0b\\x03\\x07\\x07\\x01\\x03\\x07\\x07\\x08\\x03\\x07\\x00<\\x03\\x07\\x00+\\x03\" +\n\t\"\\x07\\x01)\\x03\\x07\\x01\\x1b\\x03\\x07\\x01\\x08\\x03\\x07\\x03?\\x03\\x0445\\x03\\x04\" +\n\t\"4\\x08\\x03\\x0454\\x03\\x04)/\\x03\\x04)5\\x03\\x04+\\x05\\x03\\x04+\\x14\\x03\\x04+ \" +\n\t\"\\x03\\x04+<\\x03\\x04*&\\x03\\x04*\\x22\\x03\\x04&8\\x03\\x04!\\x01\\x03\\x04!\\x22\" +\n\t\"\\x03\\x04\\x11+\\x03\\x04\\x10.\\x03\\x04\\x104\\x03\\x04\\x13=\\x03\\x04\\x12\\x04\\x03\" +\n\t\"\\x04\\x12\\x0a\\x03\\x04\\x0d\\x1d\\x03\\x04\\x0d\\x07\\x03\\x04\\x0d \\x03\\x05<>\\x03\" +\n\t\"\\x055<\\x03\\x055!\\x03\\x055#\\x03\\x055&\\x03\\x054\\x1d\\x03\\x054\\x02\\x03\\x054\" +\n\t\"\\x07\\x03\\x0571\\x03\\x053\\x1a\\x03\\x053\\x16\\x03\\x05.<\\x03\\x05.\\x07\\x03\\x05)\" +\n\t\":\\x03\\x05)<\\x03\\x05)\\x0c\\x03\\x05)\\x15\\x03\\x05+-\\x03\\x05+5\\x03\\x05$\\x1e\" +\n\t\"\\x03\\x05$\\x14\\x03\\x05'\\x04\\x03\\x05'\\x14\\x03\\x05&\\x02\\x03\\x05\\x226\\x03\" +\n\t\"\\x05\\x22\\x0c\\x03\\x05\\x22\\x1c\\x03\\x05\\x19\\x0a\\x03\\x05\\x1b\\x09\\x03\\x05\\x1b\" +\n\t\"\\x0c\\x03\\x05\\x14\\x07\\x03\\x05\\x16?\\x03\\x05\\x16\\x0c\\x03\\x05\\x0c\\x05\\x03\" +\n\t\"\\x05\\x0e\\x0f\\x03\\x05\\x01\\x0e\\x03\\x05\\x00(\\x03\\x05\\x030\\x03\\x05\\x03\\x06\" +\n\t\"\\x03\\x0a==\\x03\\x0a=1\\x03\\x0a=,\\x03\\x0a=\\x0c\\x03\\x0a??\\x03\\x0a<\\x08\\x03\" +\n\t\"\\x0a9!\\x03\\x0a9)\\x03\\x0a97\\x03\\x0a99\\x03\\x0a6\\x0a\\x03\\x0a6\\x1c\\x03\\x0a6\" +\n\t\"\\x17\\x03\\x0a7'\\x03\\x0a78\\x03\\x0a73\\x03\\x0a'\\x01\\x03\\x0a'&\\x03\\x0a\\x1f\" +\n\t\"\\x0e\\x03\\x0a\\x1f\\x03\\x03\\x0a\\x1f3\\x03\\x0a\\x1b/\\x03\\x0a\\x18\\x19\\x03\\x0a\" +\n\t\"\\x19\\x01\\x03\\x0a\\x16\\x14\\x03\\x0a\\x0e\\x22\\x03\\x0a\\x0f\\x10\\x03\\x0a\\x0f\\x02\" +\n\t\"\\x03\\x0a\\x0f \\x03\\x0a\\x0c\\x04\\x03\\x0a\\x0b>\\x03\\x0a\\x0b+\\x03\\x0a\\x08/\\x03\" +\n\t\"\\x0a\\x046\\x03\\x0a\\x05\\x14\\x03\\x0a\\x00\\x04\\x03\\x0a\\x00\\x10\\x03\\x0a\\x00\" +\n\t\"\\x14\\x03\\x0b<3\\x03\\x0b;*\\x03\\x0b9\\x22\\x03\\x0b9)\\x03\\x0b97\\x03\\x0b+\\x10\" +\n\t\"\\x03\\x0b((\\x03\\x0b&5\\x03\\x0b$\\x1c\\x03\\x0b$\\x12\\x03\\x0b%\\x04\\x03\\x0b#<\" +\n\t\"\\x03\\x0b#0\\x03\\x0b#\\x0d\\x03\\x0b#\\x19\\x03\\x0b!:\\x03\\x0b!\\x1f\\x03\\x0b!\\x00\" +\n\t\"\\x03\\x0b\\x1e5\\x03\\x0b\\x1c\\x1d\\x03\\x0b\\x1d-\\x03\\x0b\\x1d(\\x03\\x0b\\x18.\\x03\" +\n\t\"\\x0b\\x18 \\x03\\x0b\\x18\\x16\\x03\\x0b\\x14\\x13\\x03\\x0b\\x15$\\x03\\x0b\\x15\\x22\" +\n\t\"\\x03\\x0b\\x12\\x1b\\x03\\x0b\\x12\\x10\\x03\\x0b\\x132\\x03\\x0b\\x13=\\x03\\x0b\\x12\" +\n\t\"\\x18\\x03\\x0b\\x0c&\\x03\\x0b\\x061\\x03\\x0b\\x06:\\x03\\x0b\\x05#\\x03\\x0b\\x05<\" +\n\t\"\\x03\\x0b\\x04\\x0b\\x03\\x0b\\x04\\x04\\x03\\x0b\\x04\\x1b\\x03\\x0b\\x042\\x03\\x0b\" +\n\t\"\\x041\\x03\\x0b\\x03\\x03\\x03\\x0b\\x03\\x1d\\x03\\x0b\\x03/\\x03\\x0b\\x03+\\x03\\x0b\" +\n\t\"\\x02\\x1b\\x03\\x0b\\x02\\x00\\x03\\x0b\\x01\\x1e\\x03\\x0b\\x01\\x08\\x03\\x0b\\x015\" +\n\t\"\\x03\\x06\\x0d9\\x03\\x06\\x0d=\\x03\\x06\\x0d?\\x03\\x02\\x001\\x03\\x02\\x003\\x03\" +\n\t\"\\x02\\x02\\x19\\x03\\x02\\x006\\x03\\x02\\x02\\x1b\\x03\\x02\\x004\\x03\\x02\\x00<\\x03\" +\n\t\"\\x02\\x02\\x0a\\x03\\x02\\x02\\x0e\\x03\\x02\\x01\\x1a\\x03\\x02\\x01\\x07\\x03\\x02\\x01\" +\n\t\"\\x05\\x03\\x02\\x01\\x0b\\x03\\x02\\x01%\\x03\\x02\\x01\\x0c\\x03\\x02\\x01\\x04\\x03\" +\n\t\"\\x02\\x01\\x1c\\x03\\x02\\x00.\\x03\\x02\\x002\\x03\\x02\\x00>\\x03\\x02\\x00\\x12\\x03\" +\n\t\"\\x02\\x00\\x16\\x03\\x02\\x011\\x03\\x02\\x013\\x03\\x02\\x02 \\x03\\x02\\x02%\\x03\\x02\" +\n\t\"\\x02$\\x03\\x02\\x028\\x03\\x02\\x02;\\x03\\x02\\x024\\x03\\x02\\x012\\x03\\x02\\x022\" +\n\t\"\\x03\\x02\\x02/\\x03\\x02\\x01,\\x03\\x02\\x01\\x13\\x03\\x02\\x01\\x16\\x03\\x02\\x01\" +\n\t\"\\x11\\x03\\x02\\x01\\x1e\\x03\\x02\\x01\\x15\\x03\\x02\\x01\\x17\\x03\\x02\\x01\\x0f\\x03\" +\n\t\"\\x02\\x01\\x08\\x03\\x02\\x00?\\x03\\x02\\x03\\x07\\x03\\x02\\x03\\x0d\\x03\\x02\\x03\" +\n\t\"\\x13\\x03\\x02\\x03\\x1d\\x03\\x02\\x03\\x1f\\x03\\x02\\x00\\x03\\x03\\x02\\x00\\x0d\\x03\" +\n\t\"\\x02\\x00\\x01\\x03\\x02\\x00\\x1b\\x03\\x02\\x00\\x19\\x03\\x02\\x00\\x18\\x03\\x02\\x00\" +\n\t\"\\x13\\x03\\x02\\x00/\\x03\\x07>\\x12\\x03\\x07<\\x1f\\x03\\x07>\\x1d\\x03\\x06\\x1d\\x0e\" +\n\t\"\\x03\\x07>\\x1c\\x03\\x07>:\\x03\\x07>\\x13\\x03\\x04\\x12+\\x03\\x07?\\x03\\x03\\x07>\" +\n\t\"\\x02\\x03\\x06\\x224\\x03\\x06\\x1a.\\x03\\x07<%\\x03\\x06\\x1c\\x0b\\x03\\x0609\\x03\" +\n\t\"\\x05\\x1f\\x01\\x03\\x04'\\x08\\x03\\x93\\xfd\\xf5\\x03\\x02\\x0d \\x03\\x02\\x0d#\\x03\" +\n\t\"\\x02\\x0d!\\x03\\x02\\x0d&\\x03\\x02\\x0d\\x22\\x03\\x02\\x0d/\\x03\\x02\\x0d,\\x03\\x02\" +\n\t\"\\x0d$\\x03\\x02\\x0d'\\x03\\x02\\x0d%\\x03\\x02\\x0d;\\x03\\x02\\x0d=\\x03\\x02\\x0d?\" +\n\t\"\\x03\\x099.\\x03\\x08\\x0b7\\x03\\x08\\x02\\x14\\x03\\x08\\x14\\x0d\\x03\\x08.:\\x03\" +\n\t\"\\x089'\\x03\\x0f\\x0b\\x18\\x03\\x0f\\x1c1\\x03\\x0f\\x17&\\x03\\x0f9\\x1f\\x03\\x0f0\" +\n\t\"\\x0c\\x03\\x0e\\x0a9\\x03\\x0e\\x056\\x03\\x0e\\x1c#\\x03\\x0f\\x13\\x0e\\x03\\x072\\x00\" +\n\t\"\\x03\\x070\\x0d\\x03\\x072\\x0b\\x03\\x06\\x11\\x18\\x03\\x070\\x10\\x03\\x06\\x0f(\\x03\" +\n\t\"\\x072\\x05\\x03\\x06\\x0f,\\x03\\x073\\x15\\x03\\x06\\x07\\x08\\x03\\x05\\x16\\x02\\x03\" +\n\t\"\\x04\\x0b \\x03\\x05:8\\x03\\x05\\x16%\\x03\\x0a\\x0d\\x1f\\x03\\x06\\x16\\x10\\x03\\x05\" +\n\t\"\\x1d5\\x03\\x05*;\\x03\\x05\\x16\\x1b\\x03\\x04.-\\x03\\x06\\x1a\\x19\\x03\\x04\\x03,\" +\n\t\"\\x03\\x0b87\\x03\\x04/\\x0a\\x03\\x06\\x00,\\x03\\x04-\\x01\\x03\\x04\\x1e-\\x03\\x06/(\" +\n\t\"\\x03\\x0a\\x0b5\\x03\\x06\\x0e7\\x03\\x06\\x07.\\x03\\x0597\\x03\\x0a*%\\x03\\x0760\" +\n\t\"\\x03\\x06\\x0c;\\x03\\x05'\\x00\\x03\\x072.\\x03\\x072\\x08\\x03\\x06=\\x01\\x03\\x06\" +\n\t\"\\x05\\x1b\\x03\\x06\\x06\\x12\\x03\\x06$=\\x03\\x06'\\x0d\\x03\\x04\\x11\\x0f\\x03\\x076\" +\n\t\",\\x03\\x06\\x07;\\x03\\x06.,\\x03\\x86\\xf9\\xea\\x03\\x8f\\xff\\xeb\\x02\\x092\\x02\" +\n\t\"\\x095\\x02\\x094\\x02\\x09;\\x02\\x09>\\x02\\x098\\x02\\x09*\\x02\\x09/\\x02\\x09,\\x02\" +\n\t\"\\x09%\\x02\\x09&\\x02\\x09#\\x02\\x09 \\x02\\x08!\\x02\\x08%\\x02\\x08$\\x02\\x08+\\x02\" +\n\t\"\\x08.\\x02\\x08*\\x02\\x08&\\x02\\x088\\x02\\x08>\\x02\\x084\\x02\\x086\\x02\\x080\\x02\" +\n\t\"\\x08\\x10\\x02\\x08\\x17\\x02\\x08\\x12\\x02\\x08\\x1d\\x02\\x08\\x1f\\x02\\x08\\x13\\x02\" +\n\t\"\\x08\\x15\\x02\\x08\\x14\\x02\\x08\\x0c\\x03\\x8b\\xfd\\xd0\\x03\\x81\\xec\\xc6\\x03\\x87\" +\n\t\"\\xe0\\x8a\\x03-2\\xe3\\x03\\x80\\xef\\xe4\\x03-2\\xea\\x03\\x88\\xe6\\xeb\\x03\\x8e\\xe6\" +\n\t\"\\xe8\\x03\\x84\\xe6\\xe9\\x03\\x97\\xe6\\xee\\x03-2\\xf9\\x03-2\\xf6\\x03\\x8e\\xe3\\xad\" +\n\t\"\\x03\\x80\\xe3\\x92\\x03\\x88\\xe3\\x90\\x03\\x8e\\xe3\\x90\\x03\\x80\\xe3\\x97\\x03\\x88\" +\n\t\"\\xe3\\x95\\x03\\x88\\xfe\\xcb\\x03\\x8e\\xfe\\xca\\x03\\x84\\xfe\\xcd\\x03\\x91\\xef\\xc9\" +\n\t\"\\x03-2\\xc1\\x03-2\\xc0\\x03-2\\xcb\\x03\\x88@\\x09\\x03\\x8e@\\x08\\x03\\x8f\\xe0\\xf5\" +\n\t\"\\x03\\x8e\\xe6\\xf9\\x03\\x8e\\xe0\\xfa\\x03\\x93\\xff\\xf4\\x03\\x84\\xee\\xd3\\x03\\x0b\" +\n\t\"(\\x04\\x023 \\x021;\\x02\\x01*\\x03\\x0b#\\x10\\x03\\x0b 0\\x03\\x0b!\\x10\\x03\\x0b!0\" +\n\t\"\\x03\\x07\\x15\\x08\\x03\\x09?5\\x03\\x07\\x1f\\x08\\x03\\x07\\x17\\x0b\\x03\\x09\\x1f\" +\n\t\"\\x15\\x03\\x0b\\x1c7\\x03\\x0a+#\\x03\\x06\\x1a\\x1b\\x03\\x06\\x1a\\x14\\x03\\x0a\\x01\" +\n\t\"\\x18\\x03\\x06#\\x1b\\x03\\x0a2\\x0c\\x03\\x0a\\x01\\x04\\x03\\x09#;\\x03\\x08='\\x03\" +\n\t\"\\x08\\x1a\\x0a\\x03\\x07</\\x03\\x07:+\\x03\\x07\\x07*\\x03\\x06&\\x1c\\x03\\x09\\x0c\" +\n\t\"\\x16\\x03\\x09\\x10\\x0e\\x03\\x08'\\x0f\\x03\\x08+\\x09\\x03\\x074%\\x03\\x06!3\\x03\" +\n\t\"\\x06\\x03+\\x03\\x0b\\x1e\\x19\\x03\\x0a))\\x03\\x09\\x08\\x19\\x03\\x08,\\x05\\x03\\x07\" +\n\t\"<2\\x03\\x06\\x1c>\\x03\\x0a\\x111\\x03\\x09\\x1b\\x09\\x03\\x073.\\x03\\x07\\x01\\x00\" +\n\t\"\\x03\\x09/,\\x03\\x07#>\\x03\\x07\\x048\\x03\\x0a\\x1f\\x22\\x03\\x098>\\x03\\x09\\x11\" +\n\t\"\\x00\\x03\\x08/\\x17\\x03\\x06'\\x22\\x03\\x0b\\x1a+\\x03\\x0a\\x22\\x19\\x03\\x0a/1\" +\n\t\"\\x03\\x0974\\x03\\x09\\x0f\\x22\\x03\\x08,\\x22\\x03\\x08?\\x14\\x03\\x07$5\\x03\\x07<3\" +\n\t\"\\x03\\x07=*\\x03\\x07\\x13\\x18\\x03\\x068\\x0a\\x03\\x06\\x09\\x16\\x03\\x06\\x13\\x00\" +\n\t\"\\x03\\x08\\x067\\x03\\x08\\x01\\x03\\x03\\x08\\x12\\x1d\\x03\\x07+7\\x03\\x06(;\\x03\" +\n\t\"\\x06\\x1c?\\x03\\x07\\x0e\\x17\\x03\\x0a\\x06\\x1d\\x03\\x0a\\x19\\x07\\x03\\x08\\x14$\" +\n\t\"\\x03\\x07$;\\x03\\x08,$\\x03\\x08\\x06\\x0d\\x03\\x07\\x16\\x0a\\x03\\x06>>\\x03\\x0a\" +\n\t\"\\x06\\x12\\x03\\x0a\\x14)\\x03\\x09\\x0d\\x1f\\x03\\x09\\x12\\x17\\x03\\x09\\x19\\x01\" +\n\t\"\\x03\\x08\\x11 \\x03\\x08\\x1d'\\x03\\x06<\\x1a\\x03\\x0a.\\x00\\x03\\x07'\\x18\\x03\" +\n\t\"\\x0a\\x22\\x08\\x03\\x08\\x0d\\x0a\\x03\\x08\\x13)\\x03\\x07*)\\x03\\x06<,\\x03\\x07\" +\n\t\"\\x0b\\x1a\\x03\\x09.\\x14\\x03\\x09\\x0d\\x1e\\x03\\x07\\x0e#\\x03\\x0b\\x1d'\\x03\\x0a\" +\n\t\"\\x0a8\\x03\\x09%2\\x03\\x08+&\\x03\\x080\\x12\\x03\\x0a)4\\x03\\x08\\x06\\x1f\\x03\\x0b\" +\n\t\"\\x1b\\x1a\\x03\\x0a\\x1b\\x0f\\x03\\x0b\\x1d*\\x03\\x09\\x16$\\x03\\x090\\x11\\x03\\x08\" +\n\t\"\\x11\\x08\\x03\\x0a*(\\x03\\x0a\\x042\\x03\\x089,\\x03\\x074'\\x03\\x07\\x0f\\x05\\x03\" +\n\t\"\\x09\\x0b\\x0a\\x03\\x07\\x1b\\x01\\x03\\x09\\x17:\\x03\\x09.\\x0d\\x03\\x07.\\x11\\x03\" +\n\t\"\\x09+\\x15\\x03\\x080\\x13\\x03\\x0b\\x1f\\x19\\x03\\x0a \\x11\\x03\\x0a\\x220\\x03\\x09\" +\n\t\"\\x07;\\x03\\x08\\x16\\x1c\\x03\\x07,\\x13\\x03\\x07\\x0e/\\x03\\x06\\x221\\x03\\x0a.\" +\n\t\"\\x0a\\x03\\x0a7\\x02\\x03\\x0a\\x032\\x03\\x0a\\x1d.\\x03\\x091\\x06\\x03\\x09\\x19:\" +\n\t\"\\x03\\x08\\x02/\\x03\\x060+\\x03\\x06\\x0f-\\x03\\x06\\x1c\\x1f\\x03\\x06\\x1d\\x07\\x03\" +\n\t\"\\x0a,\\x11\\x03\\x09=\\x0d\\x03\\x09\\x0b;\\x03\\x07\\x1b/\\x03\\x0a\\x1f:\\x03\\x09 \" +\n\t\"\\x1f\\x03\\x09.\\x10\\x03\\x094\\x0b\\x03\\x09\\x1a1\\x03\\x08#\\x1a\\x03\\x084\\x1d\" +\n\t\"\\x03\\x08\\x01\\x1f\\x03\\x08\\x11\\x22\\x03\\x07'8\\x03\\x07\\x1a>\\x03\\x0757\\x03\" +\n\t\"\\x06&9\\x03\\x06+\\x11\\x03\\x0a.\\x0b\\x03\\x0a,>\\x03\\x0a4#\\x03\\x08%\\x17\\x03\" +\n\t\"\\x07\\x05\\x22\\x03\\x07\\x0c\\x0b\\x03\\x0a\\x1d+\\x03\\x0a\\x19\\x16\\x03\\x09+\\x1f\" +\n\t\"\\x03\\x09\\x08\\x0b\\x03\\x08\\x16\\x18\\x03\\x08+\\x12\\x03\\x0b\\x1d\\x0c\\x03\\x0a=\" +\n\t\"\\x10\\x03\\x0a\\x09\\x0d\\x03\\x0a\\x10\\x11\\x03\\x09&0\\x03\\x08(\\x1f\\x03\\x087\\x07\" +\n\t\"\\x03\\x08\\x185\\x03\\x07'6\\x03\\x06.\\x05\\x03\\x06=\\x04\\x03\\x06;;\\x03\\x06\\x06,\" +\n\t\"\\x03\\x0b\\x18>\\x03\\x08\\x00\\x18\\x03\\x06 \\x03\\x03\\x06<\\x00\\x03\\x09%\\x18\\x03\" +\n\t\"\\x0b\\x1c<\\x03\\x0a%!\\x03\\x0a\\x09\\x12\\x03\\x0a\\x16\\x02\\x03\\x090'\\x03\\x09\" +\n\t\"\\x0e=\\x03\\x08 \\x0e\\x03\\x08>\\x03\\x03\\x074>\\x03\\x06&?\\x03\\x06\\x19\\x09\\x03\" +\n\t\"\\x06?(\\x03\\x0a-\\x0e\\x03\\x09:3\\x03\\x098:\\x03\\x09\\x12\\x0b\\x03\\x09\\x1d\\x17\" +\n\t\"\\x03\\x087\\x05\\x03\\x082\\x14\\x03\\x08\\x06%\\x03\\x08\\x13\\x1f\\x03\\x06\\x06\\x0e\" +\n\t\"\\x03\\x0a\\x22<\\x03\\x09/<\\x03\\x06>+\\x03\\x0a'?\\x03\\x0a\\x13\\x0c\\x03\\x09\\x10<\" +\n\t\"\\x03\\x07\\x1b=\\x03\\x0a\\x19\\x13\\x03\\x09\\x22\\x1d\\x03\\x09\\x07\\x0d\\x03\\x08)\" +\n\t\"\\x1c\\x03\\x06=\\x1a\\x03\\x0a/4\\x03\\x0a7\\x11\\x03\\x0a\\x16:\\x03\\x09?3\\x03\\x09:\" +\n\t\"/\\x03\\x09\\x05\\x0a\\x03\\x09\\x14\\x06\\x03\\x087\\x22\\x03\\x080\\x07\\x03\\x08\\x1a\" +\n\t\"\\x1f\\x03\\x07\\x04(\\x03\\x07\\x04\\x09\\x03\\x06 %\\x03\\x06<\\x08\\x03\\x0a+\\x14\" +\n\t\"\\x03\\x09\\x1d\\x16\\x03\\x0a70\\x03\\x08 >\\x03\\x0857\\x03\\x070\\x0a\\x03\\x06=\\x12\" +\n\t\"\\x03\\x06\\x16%\\x03\\x06\\x1d,\\x03\\x099#\\x03\\x09\\x10>\\x03\\x07 \\x1e\\x03\\x08\" +\n\t\"\\x0c<\\x03\\x08\\x0b\\x18\\x03\\x08\\x15+\\x03\\x08,:\\x03\\x08%\\x22\\x03\\x07\\x0a$\" +\n\t\"\\x03\\x0b\\x1c=\\x03\\x07+\\x08\\x03\\x0a/\\x05\\x03\\x0a \\x07\\x03\\x0a\\x12'\\x03\" +\n\t\"\\x09#\\x11\\x03\\x08\\x1b\\x15\\x03\\x0a\\x06\\x01\\x03\\x09\\x1c\\x1b\\x03\\x0922\\x03\" +\n\t\"\\x07\\x14<\\x03\\x07\\x09\\x04\\x03\\x061\\x04\\x03\\x07\\x0e\\x01\\x03\\x0a\\x13\\x18\" +\n\t\"\\x03\\x0a-\\x0c\\x03\\x0a?\\x0d\\x03\\x0a\\x09\\x0a\\x03\\x091&\\x03\\x0a/\\x0b\\x03\" +\n\t\"\\x08$<\\x03\\x083\\x1d\\x03\\x08\\x0c$\\x03\\x08\\x0d\\x07\\x03\\x08\\x0d?\\x03\\x08\" +\n\t\"\\x0e\\x14\\x03\\x065\\x0a\\x03\\x08\\x1a#\\x03\\x08\\x16#\\x03\\x0702\\x03\\x07\\x03\" +\n\t\"\\x1a\\x03\\x06(\\x1d\\x03\\x06+\\x1b\\x03\\x06\\x0b\\x05\\x03\\x06\\x0b\\x17\\x03\\x06\" +\n\t\"\\x0c\\x04\\x03\\x06\\x1e\\x19\\x03\\x06+0\\x03\\x062\\x18\\x03\\x0b\\x16\\x1e\\x03\\x0a+\" +\n\t\"\\x16\\x03\\x0a-?\\x03\\x0a#:\\x03\\x0a#\\x10\\x03\\x0a%$\\x03\\x0a>+\\x03\\x0a01\\x03\" +\n\t\"\\x0a1\\x10\\x03\\x0a\\x099\\x03\\x0a\\x0a\\x12\\x03\\x0a\\x19\\x1f\\x03\\x0a\\x19\\x12\" +\n\t\"\\x03\\x09*)\\x03\\x09-\\x16\\x03\\x09.1\\x03\\x09.2\\x03\\x09<\\x0e\\x03\\x09> \\x03\" +\n\t\"\\x093\\x12\\x03\\x09\\x0b\\x01\\x03\\x09\\x1c2\\x03\\x09\\x11\\x1c\\x03\\x09\\x15%\\x03\" +\n\t\"\\x08,&\\x03\\x08!\\x22\\x03\\x089(\\x03\\x08\\x0b\\x1a\\x03\\x08\\x0d2\\x03\\x08\\x0c\" +\n\t\"\\x04\\x03\\x08\\x0c\\x06\\x03\\x08\\x0c\\x1f\\x03\\x08\\x0c\\x0c\\x03\\x08\\x0f\\x1f\\x03\" +\n\t\"\\x08\\x0f\\x1d\\x03\\x08\\x00\\x14\\x03\\x08\\x03\\x14\\x03\\x08\\x06\\x16\\x03\\x08\\x1e\" +\n\t\"#\\x03\\x08\\x11\\x11\\x03\\x08\\x10\\x18\\x03\\x08\\x14(\\x03\\x07)\\x1e\\x03\\x07.1\" +\n\t\"\\x03\\x07 $\\x03\\x07 '\\x03\\x078\\x08\\x03\\x07\\x0d0\\x03\\x07\\x0f7\\x03\\x07\\x05#\" +\n\t\"\\x03\\x07\\x05\\x1a\\x03\\x07\\x1a7\\x03\\x07\\x1d-\\x03\\x07\\x17\\x10\\x03\\x06)\\x1f\" +\n\t\"\\x03\\x062\\x0b\\x03\\x066\\x16\\x03\\x06\\x09\\x11\\x03\\x09(\\x1e\\x03\\x07!5\\x03\" +\n\t\"\\x0b\\x11\\x16\\x03\\x0a/\\x04\\x03\\x0a,\\x1a\\x03\\x0b\\x173\\x03\\x0a,1\\x03\\x0a/5\" +\n\t\"\\x03\\x0a\\x221\\x03\\x0a\\x22\\x0d\\x03\\x0a?%\\x03\\x0a<,\\x03\\x0a?#\\x03\\x0a>\\x19\" +\n\t\"\\x03\\x0a\\x08&\\x03\\x0a\\x0b\\x0e\\x03\\x0a\\x0c:\\x03\\x0a\\x0c+\\x03\\x0a\\x03\\x22\" +\n\t\"\\x03\\x0a\\x06)\\x03\\x0a\\x11\\x10\\x03\\x0a\\x11\\x1a\\x03\\x0a\\x17-\\x03\\x0a\\x14(\" +\n\t\"\\x03\\x09)\\x1e\\x03\\x09/\\x09\\x03\\x09.\\x00\\x03\\x09,\\x07\\x03\\x09/*\\x03\\x09-9\" +\n\t\"\\x03\\x09\\x228\\x03\\x09%\\x09\\x03\\x09:\\x12\\x03\\x09;\\x1d\\x03\\x09?\\x06\\x03\" +\n\t\"\\x093%\\x03\\x096\\x05\\x03\\x096\\x08\\x03\\x097\\x02\\x03\\x09\\x07,\\x03\\x09\\x04,\" +\n\t\"\\x03\\x09\\x1f\\x16\\x03\\x09\\x11\\x03\\x03\\x09\\x11\\x12\\x03\\x09\\x168\\x03\\x08*\" +\n\t\"\\x05\\x03\\x08/2\\x03\\x084:\\x03\\x08\\x22+\\x03\\x08 0\\x03\\x08&\\x0a\\x03\\x08;\" +\n\t\"\\x10\\x03\\x08>$\\x03\\x08>\\x18\\x03\\x0829\\x03\\x082:\\x03\\x081,\\x03\\x081<\\x03\" +\n\t\"\\x081\\x1c\\x03\\x087#\\x03\\x087*\\x03\\x08\\x09'\\x03\\x08\\x00\\x1d\\x03\\x08\\x05-\" +\n\t\"\\x03\\x08\\x1f4\\x03\\x08\\x1d\\x04\\x03\\x08\\x16\\x0f\\x03\\x07*7\\x03\\x07'!\\x03\" +\n\t\"\\x07%\\x1b\\x03\\x077\\x0c\\x03\\x07\\x0c1\\x03\\x07\\x0c.\\x03\\x07\\x00\\x06\\x03\\x07\" +\n\t\"\\x01\\x02\\x03\\x07\\x010\\x03\\x07\\x06=\\x03\\x07\\x01\\x03\\x03\\x07\\x01\\x13\\x03\" +\n\t\"\\x07\\x06\\x06\\x03\\x07\\x05\\x0a\\x03\\x07\\x1f\\x09\\x03\\x07\\x17:\\x03\\x06*1\\x03\" +\n\t\"\\x06-\\x1d\\x03\\x06\\x223\\x03\\x062:\\x03\\x060$\\x03\\x066\\x1e\\x03\\x064\\x12\\x03\" +\n\t\"\\x0645\\x03\\x06\\x0b\\x00\\x03\\x06\\x0b7\\x03\\x06\\x07\\x1f\\x03\\x06\\x15\\x12\\x03\" +\n\t\"\\x0c\\x05\\x0f\\x03\\x0b+\\x0b\\x03\\x0b+-\\x03\\x06\\x16\\x1b\\x03\\x06\\x15\\x17\\x03\" +\n\t\"\\x89\\xca\\xea\\x03\\x89\\xca\\xe8\\x03\\x0c8\\x10\\x03\\x0c8\\x01\\x03\\x0c8\\x0f\\x03\" +\n\t\"\\x0d8%\\x03\\x0d8!\\x03\\x0c8-\\x03\\x0c8/\\x03\\x0c8+\\x03\\x0c87\\x03\\x0c85\\x03\" +\n\t\"\\x0c9\\x09\\x03\\x0c9\\x0d\\x03\\x0c9\\x0f\\x03\\x0c9\\x0b\\x03\\xcfu\\x0c\\x03\\xcfu\" +\n\t\"\\x0f\\x03\\xcfu\\x0e\\x03\\xcfu\\x09\\x03\\x0c9\\x10\\x03\\x0d9\\x0c\\x03\\xcf`;\\x03\" +\n\t\"\\xcf`>\\x03\\xcf`9\\x03\\xcf`8\\x03\\xcf`7\\x03\\xcf`*\\x03\\xcf`-\\x03\\xcf`,\\x03\" +\n\t\"\\x0d\\x1b\\x1a\\x03\\x0d\\x1b&\\x03\\x0c=.\\x03\\x0c=%\\x03\\x0c>\\x1e\\x03\\x0c>\\x14\" +\n\t\"\\x03\\x0c?\\x06\\x03\\x0c?\\x0b\\x03\\x0c?\\x0c\\x03\\x0c?\\x0d\\x03\\x0c?\\x02\\x03\" +\n\t\"\\x0c>\\x0f\\x03\\x0c>\\x08\\x03\\x0c>\\x09\\x03\\x0c>,\\x03\\x0c>\\x0c\\x03\\x0c?\\x13\" +\n\t\"\\x03\\x0c?\\x16\\x03\\x0c?\\x15\\x03\\x0c?\\x1c\\x03\\x0c?\\x1f\\x03\\x0c?\\x1d\\x03\" +\n\t\"\\x0c?\\x1a\\x03\\x0c?\\x17\\x03\\x0c?\\x08\\x03\\x0c?\\x09\\x03\\x0c?\\x0e\\x03\\x0c?\" +\n\t\"\\x04\\x03\\x0c?\\x05\\x03\\x0c<?\\x03\\x0c=\\x00\\x03\\x0c=\\x06\\x03\\x0c=\\x05\\x03\" +\n\t\"\\x0c=\\x0c\\x03\\x0c=\\x0f\\x03\\x0c=\\x0d\\x03\\x0c=\\x0b\\x03\\x0c=\\x07\\x03\\x0c=\" +\n\t\"\\x19\\x03\\x0c=\\x15\\x03\\x0c=\\x11\\x03\\x0c=1\\x03\\x0c=3\\x03\\x0c=0\\x03\\x0c=>\" +\n\t\"\\x03\\x0c=2\\x03\\x0c=6\\x03\\x0c<\\x07\\x03\\x0c<\\x05\\x03\\x0e:!\\x03\\x0e:#\\x03\" +\n\t\"\\x0e8\\x09\\x03\\x0e:&\\x03\\x0e8\\x0b\\x03\\x0e:$\\x03\\x0e:,\\x03\\x0e8\\x1a\\x03\" +\n\t\"\\x0e8\\x1e\\x03\\x0e:*\\x03\\x0e:7\\x03\\x0e:5\\x03\\x0e:;\\x03\\x0e:\\x15\\x03\\x0e:<\" +\n\t\"\\x03\\x0e:4\\x03\\x0e:'\\x03\\x0e:-\\x03\\x0e:%\\x03\\x0e:?\\x03\\x0e:=\\x03\\x0e:)\" +\n\t\"\\x03\\x0e:/\\x03\\xcfs'\\x03\\x0d=\\x0f\\x03\\x0d+*\\x03\\x0d99\\x03\\x0d9;\\x03\\x0d9\" +\n\t\"?\\x03\\x0d)\\x0d\\x03\\x0d(%\\x02\\x01\\x18\\x02\\x01(\\x02\\x01\\x1e\\x03\\x0f$!\\x03\" +\n\t\"\\x0f87\\x03\\x0f4\\x0e\\x03\\x0f5\\x1d\\x03\\x06'\\x03\\x03\\x0f\\x08\\x18\\x03\\x0f\" +\n\t\"\\x0d\\x1b\\x03\\x0e2=\\x03\\x0e;\\x08\\x03\\x0e:\\x0b\\x03\\x0e\\x06$\\x03\\x0e\\x0d)\" +\n\t\"\\x03\\x0e\\x16\\x1f\\x03\\x0e\\x16\\x1b\\x03\\x0d$\\x0a\\x03\\x05,\\x1d\\x03\\x0d. \\x03\" +\n\t\"\\x0d.#\\x03\\x0c(/\\x03\\x09%\\x02\\x03\\x0d90\\x03\\x0d\\x0e4\\x03\\x0d\\x0d\\x0f\\x03\" +\n\t\"\\x0c#\\x00\\x03\\x0c,\\x1e\\x03\\x0c2\\x0e\\x03\\x0c\\x01\\x17\\x03\\x0c\\x09:\\x03\\x0e\" +\n\t\"\\x173\\x03\\x0c\\x08\\x03\\x03\\x0c\\x11\\x07\\x03\\x0c\\x10\\x18\\x03\\x0c\\x1f\\x1c\" +\n\t\"\\x03\\x0c\\x19\\x0e\\x03\\x0c\\x1a\\x1f\\x03\\x0f0>\\x03\\x0b->\\x03\\x0b<+\\x03\\x0b8\" +\n\t\"\\x13\\x03\\x0b\\x043\\x03\\x0b\\x14\\x03\\x03\\x0b\\x16%\\x03\\x0d\\x22&\\x03\\x0b\\x1a\" +\n\t\"\\x1a\\x03\\x0b\\x1a\\x04\\x03\\x0a%9\\x03\\x0a&2\\x03\\x0a&0\\x03\\x0a!\\x1a\\x03\\x0a!\" +\n\t\"7\\x03\\x0a5\\x10\\x03\\x0a=4\\x03\\x0a?\\x0e\\x03\\x0a>\\x10\\x03\\x0a\\x00 \\x03\\x0a\" +\n\t\"\\x0f:\\x03\\x0a\\x0f9\\x03\\x0a\\x0b\\x0a\\x03\\x0a\\x17%\\x03\\x0a\\x1b-\\x03\\x09-\" +\n\t\"\\x1a\\x03\\x09,4\\x03\\x09.,\\x03\\x09)\\x09\\x03\\x096!\\x03\\x091\\x1f\\x03\\x093\" +\n\t\"\\x16\\x03\\x0c+\\x1f\\x03\\x098 \\x03\\x098=\\x03\\x0c(\\x1a\\x03\\x0c(\\x16\\x03\\x09\" +\n\t\"\\x0a+\\x03\\x09\\x16\\x12\\x03\\x09\\x13\\x0e\\x03\\x09\\x153\\x03\\x08)!\\x03\\x09\\x1a\" +\n\t\"\\x01\\x03\\x09\\x18\\x01\\x03\\x08%#\\x03\\x08>\\x22\\x03\\x08\\x05%\\x03\\x08\\x02*\" +\n\t\"\\x03\\x08\\x15;\\x03\\x08\\x1b7\\x03\\x0f\\x07\\x1d\\x03\\x0f\\x04\\x03\\x03\\x070\\x0c\" +\n\t\"\\x03\\x07;\\x0b\\x03\\x07\\x08\\x17\\x03\\x07\\x12\\x06\\x03\\x06/-\\x03\\x0671\\x03\" +\n\t\"\\x065+\\x03\\x06>7\\x03\\x06\\x049\\x03\\x05+\\x1e\\x03\\x05,\\x17\\x03\\x05 \\x1d\\x03\" +\n\t\"\\x05\\x22\\x05\\x03\\x050\\x1d\"\n\n// lookup returns the trie value for the first UTF-8 encoding in s and\n// the width in bytes of this encoding. The size will be 0 if s does not\n// hold enough bytes to complete the encoding. len(s) must be greater than 0.\nfunc (t *idnaTrie) lookup(s []byte) (v uint16, sz int) {\n\tc0 := s[0]\n\tswitch {\n\tcase c0 < 0x80: // is ASCII\n\t\treturn idnaValues[c0], 1\n\tcase c0 < 0xC2:\n\t\treturn 0, 1 // Illegal UTF-8: not a starter, not ASCII.\n\tcase c0 < 0xE0: // 2-byte UTF-8\n\t\tif len(s) < 2 {\n\t\t\treturn 0, 0\n\t\t}\n\t\ti := idnaIndex[c0]\n\t\tc1 := s[1]\n\t\tif c1 < 0x80 || 0xC0 <= c1 {\n\t\t\treturn 0, 1 // Illegal UTF-8: not a continuation byte.\n\t\t}\n\t\treturn t.lookupValue(uint32(i), c1), 2\n\tcase c0 < 0xF0: // 3-byte UTF-8\n\t\tif len(s) < 3 {\n\t\t\treturn 0, 0\n\t\t}\n\t\ti := idnaIndex[c0]\n\t\tc1 := s[1]\n\t\tif c1 < 0x80 || 0xC0 <= c1 {\n\t\t\treturn 0, 1 // Illegal UTF-8: not a continuation byte.\n\t\t}\n\t\to := uint32(i)<<6 + uint32(c1)\n\t\ti = idnaIndex[o]\n\t\tc2 := s[2]\n\t\tif c2 < 0x80 || 0xC0 <= c2 {\n\t\t\treturn 0, 2 // Illegal UTF-8: not a continuation byte.\n\t\t}\n\t\treturn t.lookupValue(uint32(i), c2), 3\n\tcase c0 < 0xF8: // 4-byte UTF-8\n\t\tif len(s) < 4 {\n\t\t\treturn 0, 0\n\t\t}\n\t\ti := idnaIndex[c0]\n\t\tc1 := s[1]\n\t\tif c1 < 0x80 || 0xC0 <= c1 {\n\t\t\treturn 0, 1 // Illegal UTF-8: not a continuation byte.\n\t\t}\n\t\to := uint32(i)<<6 + uint32(c1)\n\t\ti = idnaIndex[o]\n\t\tc2 := s[2]\n\t\tif c2 < 0x80 || 0xC0 <= c2 {\n\t\t\treturn 0, 2 // Illegal UTF-8: not a continuation byte.\n\t\t}\n\t\to = uint32(i)<<6 + uint32(c2)\n\t\ti = idnaIndex[o]\n\t\tc3 := s[3]\n\t\tif c3 < 0x80 || 0xC0 <= c3 {\n\t\t\treturn 0, 3 // Illegal UTF-8: not a continuation byte.\n\t\t}\n\t\treturn t.lookupValue(uint32(i), c3), 4\n\t}\n\t// Illegal rune\n\treturn 0, 1\n}\n\n// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.\n// s must start with a full and valid UTF-8 encoded rune.\nfunc (t *idnaTrie) lookupUnsafe(s []byte) uint16 {\n\tc0 := s[0]\n\tif c0 < 0x80 { // is ASCII\n\t\treturn idnaValues[c0]\n\t}\n\ti := idnaIndex[c0]\n\tif c0 < 0xE0 { // 2-byte UTF-8\n\t\treturn t.lookupValue(uint32(i), s[1])\n\t}\n\ti = idnaIndex[uint32(i)<<6+uint32(s[1])]\n\tif c0 < 0xF0 { // 3-byte UTF-8\n\t\treturn t.lookupValue(uint32(i), s[2])\n\t}\n\ti = idnaIndex[uint32(i)<<6+uint32(s[2])]\n\tif c0 < 0xF8 { // 4-byte UTF-8\n\t\treturn t.lookupValue(uint32(i), s[3])\n\t}\n\treturn 0\n}\n\n// lookupString returns the trie value for the first UTF-8 encoding in s and\n// the width in bytes of this encoding. The size will be 0 if s does not\n// hold enough bytes to complete the encoding. len(s) must be greater than 0.\nfunc (t *idnaTrie) lookupString(s string) (v uint16, sz int) {\n\tc0 := s[0]\n\tswitch {\n\tcase c0 < 0x80: // is ASCII\n\t\treturn idnaValues[c0], 1\n\tcase c0 < 0xC2:\n\t\treturn 0, 1 // Illegal UTF-8: not a starter, not ASCII.\n\tcase c0 < 0xE0: // 2-byte UTF-8\n\t\tif len(s) < 2 {\n\t\t\treturn 0, 0\n\t\t}\n\t\ti := idnaIndex[c0]\n\t\tc1 := s[1]\n\t\tif c1 < 0x80 || 0xC0 <= c1 {\n\t\t\treturn 0, 1 // Illegal UTF-8: not a continuation byte.\n\t\t}\n\t\treturn t.lookupValue(uint32(i), c1), 2\n\tcase c0 < 0xF0: // 3-byte UTF-8\n\t\tif len(s) < 3 {\n\t\t\treturn 0, 0\n\t\t}\n\t\ti := idnaIndex[c0]\n\t\tc1 := s[1]\n\t\tif c1 < 0x80 || 0xC0 <= c1 {\n\t\t\treturn 0, 1 // Illegal UTF-8: not a continuation byte.\n\t\t}\n\t\to := uint32(i)<<6 + uint32(c1)\n\t\ti = idnaIndex[o]\n\t\tc2 := s[2]\n\t\tif c2 < 0x80 || 0xC0 <= c2 {\n\t\t\treturn 0, 2 // Illegal UTF-8: not a continuation byte.\n\t\t}\n\t\treturn t.lookupValue(uint32(i), c2), 3\n\tcase c0 < 0xF8: // 4-byte UTF-8\n\t\tif len(s) < 4 {\n\t\t\treturn 0, 0\n\t\t}\n\t\ti := idnaIndex[c0]\n\t\tc1 := s[1]\n\t\tif c1 < 0x80 || 0xC0 <= c1 {\n\t\t\treturn 0, 1 // Illegal UTF-8: not a continuation byte.\n\t\t}\n\t\to := uint32(i)<<6 + uint32(c1)\n\t\ti = idnaIndex[o]\n\t\tc2 := s[2]\n\t\tif c2 < 0x80 || 0xC0 <= c2 {\n\t\t\treturn 0, 2 // Illegal UTF-8: not a continuation byte.\n\t\t}\n\t\to = uint32(i)<<6 + uint32(c2)\n\t\ti = idnaIndex[o]\n\t\tc3 := s[3]\n\t\tif c3 < 0x80 || 0xC0 <= c3 {\n\t\t\treturn 0, 3 // Illegal UTF-8: not a continuation byte.\n\t\t}\n\t\treturn t.lookupValue(uint32(i), c3), 4\n\t}\n\t// Illegal rune\n\treturn 0, 1\n}\n\n// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.\n// s must start with a full and valid UTF-8 encoded rune.\nfunc (t *idnaTrie) lookupStringUnsafe(s string) uint16 {\n\tc0 := s[0]\n\tif c0 < 0x80 { // is ASCII\n\t\treturn idnaValues[c0]\n\t}\n\ti := idnaIndex[c0]\n\tif c0 < 0xE0 { // 2-byte UTF-8\n\t\treturn t.lookupValue(uint32(i), s[1])\n\t}\n\ti = idnaIndex[uint32(i)<<6+uint32(s[1])]\n\tif c0 < 0xF0 { // 3-byte UTF-8\n\t\treturn t.lookupValue(uint32(i), s[2])\n\t}\n\ti = idnaIndex[uint32(i)<<6+uint32(s[2])]\n\tif c0 < 0xF8 { // 4-byte UTF-8\n\t\treturn t.lookupValue(uint32(i), s[3])\n\t}\n\treturn 0\n}\n\n// idnaTrie. Total size: 29052 bytes (28.37 KiB). Checksum: ef06e7ecc26f36dd.\ntype idnaTrie struct{}\n\nfunc newIdnaTrie(i int) *idnaTrie {\n\treturn &idnaTrie{}\n}\n\n// lookupValue determines the type of block n and looks up the value for b.\nfunc (t *idnaTrie) lookupValue(n uint32, b byte) uint16 {\n\tswitch {\n\tcase n < 125:\n\t\treturn uint16(idnaValues[n<<6+uint32(b)])\n\tdefault:\n\t\tn -= 125\n\t\treturn uint16(idnaSparse.lookup(n, b))\n\t}\n}\n\n// idnaValues: 127 blocks, 8128 entries, 16256 bytes\n// The third block is the zero block.\nvar idnaValues = [8128]uint16{\n\t// Block 0x0, offset 0x0\n\t0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080,\n\t0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080,\n\t0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080,\n\t0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080,\n\t0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080,\n\t0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080,\n\t0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080,\n\t0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080,\n\t0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008,\n\t0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080,\n\t0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080,\n\t// Block 0x1, offset 0x40\n\t0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105,\n\t0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105,\n\t0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105,\n\t0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105,\n\t0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080,\n\t0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008,\n\t0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008,\n\t0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008,\n\t0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008,\n\t0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080,\n\t0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080,\n\t// Block 0x2, offset 0x80\n\t// Block 0x3, offset 0xc0\n\t0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040,\n\t0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040,\n\t0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040,\n\t0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040,\n\t0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040,\n\t0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018,\n\t0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018,\n\t0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a,\n\t0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005,\n\t0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018,\n\t0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018,\n\t// Block 0x4, offset 0x100\n\t0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008,\n\t0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008,\n\t0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008,\n\t0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008,\n\t0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008,\n\t0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008,\n\t0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008,\n\t0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008,\n\t0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008,\n\t0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d,\n\t0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199,\n\t// Block 0x5, offset 0x140\n\t0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d,\n\t0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008,\n\t0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008,\n\t0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008,\n\t0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008,\n\t0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008,\n\t0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008,\n\t0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008,\n\t0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008,\n\t0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d,\n\t0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9,\n\t// Block 0x6, offset 0x180\n\t0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008,\n\t0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d,\n\t0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d,\n\t0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d,\n\t0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155,\n\t0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008,\n\t0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d,\n\t0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd,\n\t0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d,\n\t0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008,\n\t0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008,\n\t// Block 0x7, offset 0x1c0\n\t0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9,\n\t0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d,\n\t0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d,\n\t0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d,\n\t0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008,\n\t0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008,\n\t0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008,\n\t0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008,\n\t0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008,\n\t0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008,\n\t0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008,\n\t// Block 0x8, offset 0x200\n\t0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008,\n\t0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008,\n\t0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008,\n\t0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008,\n\t0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008,\n\t0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008,\n\t0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008,\n\t0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008,\n\t0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008,\n\t0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d,\n\t0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008,\n\t// Block 0x9, offset 0x240\n\t0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018,\n\t0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008,\n\t0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008,\n\t0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018,\n\t0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a,\n\t0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369,\n\t0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018,\n\t0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018,\n\t0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018,\n\t0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018,\n\t0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018,\n\t// Block 0xa, offset 0x280\n\t0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d,\n\t0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308,\n\t0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308,\n\t0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308,\n\t0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308,\n\t0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308,\n\t0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308,\n\t0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308,\n\t0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008,\n\t0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008,\n\t0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d,\n\t// Block 0xb, offset 0x2c0\n\t0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2,\n\t0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040,\n\t0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105,\n\t0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105,\n\t0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105,\n\t0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d,\n\t0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d,\n\t0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008,\n\t0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008,\n\t0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008,\n\t0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008,\n\t// Block 0xc, offset 0x300\n\t0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008,\n\t0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008,\n\t0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd,\n\t0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008,\n\t0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008,\n\t0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008,\n\t0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008,\n\t0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008,\n\t0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd,\n\t0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008,\n\t0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d,\n\t// Block 0xd, offset 0x340\n\t0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008,\n\t0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008,\n\t0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008,\n\t0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008,\n\t0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008,\n\t0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008,\n\t0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008,\n\t0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008,\n\t0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008,\n\t0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008,\n\t0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008,\n\t// Block 0xe, offset 0x380\n\t0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308,\n\t0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008,\n\t0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008,\n\t0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008,\n\t0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008,\n\t0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008,\n\t0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008,\n\t0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008,\n\t0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008,\n\t0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008,\n\t0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008,\n\t// Block 0xf, offset 0x3c0\n\t0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d,\n\t0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d,\n\t0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008,\n\t0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008,\n\t0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008,\n\t0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008,\n\t0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008,\n\t0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008,\n\t0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008,\n\t0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008,\n\t0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008,\n\t// Block 0x10, offset 0x400\n\t0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008,\n\t0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008,\n\t0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008,\n\t0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008,\n\t0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008,\n\t0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008,\n\t0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008,\n\t0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008,\n\t0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5,\n\t0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5,\n\t0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5,\n\t// Block 0x11, offset 0x440\n\t0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840,\n\t0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818,\n\t0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308,\n\t0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308,\n\t0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040,\n\t0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08,\n\t0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08,\n\t0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08,\n\t0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08,\n\t0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08,\n\t0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08,\n\t// Block 0x12, offset 0x480\n\t0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08,\n\t0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308,\n\t0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308,\n\t0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308,\n\t0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308,\n\t0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808,\n\t0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808,\n\t0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08,\n\t0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429,\n\t0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08,\n\t0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08,\n\t// Block 0x13, offset 0x4c0\n\t0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08,\n\t0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08,\n\t0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08,\n\t0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308,\n\t0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840,\n\t0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308,\n\t0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018,\n\t0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08,\n\t0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008,\n\t0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08,\n\t0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08,\n\t// Block 0x14, offset 0x500\n\t0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818,\n\t0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818,\n\t0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308,\n\t0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08,\n\t0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08,\n\t0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08,\n\t0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08,\n\t0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08,\n\t0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308,\n\t0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308,\n\t0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308,\n\t// Block 0x15, offset 0x540\n\t0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08,\n\t0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08,\n\t0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08,\n\t0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808,\n\t0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040,\n\t0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08,\n\t0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08,\n\t0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040,\n\t0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040,\n\t0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040,\n\t0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040,\n\t// Block 0x16, offset 0x580\n\t0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308,\n\t0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008,\n\t0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308,\n\t0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308,\n\t0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1,\n\t0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308,\n\t0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008,\n\t0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008,\n\t0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008,\n\t0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008,\n\t0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008,\n\t// Block 0x17, offset 0x5c0\n\t0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008,\n\t0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008,\n\t0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040,\n\t0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008,\n\t0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008,\n\t0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008,\n\t0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040,\n\t0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008,\n\t0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040,\n\t0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040,\n\t0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008,\n\t// Block 0x18, offset 0x600\n\t0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040,\n\t0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008,\n\t0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040,\n\t0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008,\n\t0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1,\n\t0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308,\n\t0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008,\n\t0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008,\n\t0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018,\n\t0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018,\n\t0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x0040, 0x63f: 0x0040,\n\t// Block 0x19, offset 0x640\n\t0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008,\n\t0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040,\n\t0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040,\n\t0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008,\n\t0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008,\n\t0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008,\n\t0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040,\n\t0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008,\n\t0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008,\n\t0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040,\n\t0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008,\n\t// Block 0x1a, offset 0x680\n\t0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040,\n\t0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308,\n\t0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308,\n\t0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040,\n\t0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040,\n\t0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040,\n\t0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008,\n\t0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008,\n\t0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308,\n\t0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040,\n\t0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040,\n\t// Block 0x1b, offset 0x6c0\n\t0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008,\n\t0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008,\n\t0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008,\n\t0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008,\n\t0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008,\n\t0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008,\n\t0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040,\n\t0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008,\n\t0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008,\n\t0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040,\n\t0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008,\n\t// Block 0x1c, offset 0x700\n\t0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308,\n\t0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008,\n\t0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040,\n\t0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040,\n\t0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040,\n\t0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308,\n\t0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008,\n\t0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008,\n\t0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040,\n\t0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308,\n\t0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308,\n\t// Block 0x1d, offset 0x740\n\t0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008,\n\t0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008,\n\t0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040,\n\t0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008,\n\t0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008,\n\t0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008,\n\t0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040,\n\t0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008,\n\t0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008,\n\t0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040,\n\t0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308,\n\t// Block 0x1e, offset 0x780\n\t0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040,\n\t0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008,\n\t0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040,\n\t0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008,\n\t0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9,\n\t0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308,\n\t0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008,\n\t0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008,\n\t0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018,\n\t0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040,\n\t0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040,\n\t// Block 0x1f, offset 0x7c0\n\t0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008,\n\t0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040,\n\t0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040,\n\t0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040,\n\t0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040,\n\t0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008,\n\t0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008,\n\t0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008,\n\t0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008,\n\t0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040,\n\t0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008,\n\t// Block 0x20, offset 0x800\n\t0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040,\n\t0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308,\n\t0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040,\n\t0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040,\n\t0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040,\n\t0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308,\n\t0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008,\n\t0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008,\n\t0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040,\n\t0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018,\n\t0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018,\n\t// Block 0x21, offset 0x840\n\t0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0040, 0x845: 0x0008,\n\t0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008,\n\t0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040,\n\t0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008,\n\t0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008,\n\t0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008,\n\t0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040,\n\t0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008,\n\t0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008,\n\t0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040,\n\t0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308,\n\t// Block 0x22, offset 0x880\n\t0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040,\n\t0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008,\n\t0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040,\n\t0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040,\n\t0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040,\n\t0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308,\n\t0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008,\n\t0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008,\n\t0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040,\n\t0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040,\n\t0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040,\n\t// Block 0x23, offset 0x8c0\n\t0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040,\n\t0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008,\n\t0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040,\n\t0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008,\n\t0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018,\n\t0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308,\n\t0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008,\n\t0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008,\n\t0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018,\n\t0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008,\n\t0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008,\n\t// Block 0x24, offset 0x900\n\t0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040,\n\t0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040,\n\t0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040,\n\t0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008,\n\t0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008,\n\t0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008,\n\t0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040,\n\t0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008,\n\t0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308,\n\t0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308,\n\t0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040,\n\t// Block 0x25, offset 0x940\n\t0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008,\n\t0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008,\n\t0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008,\n\t0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79,\n\t0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008,\n\t0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008,\n\t0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9,\n\t0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040,\n\t0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59,\n\t0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308,\n\t0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008,\n\t// Block 0x26, offset 0x980\n\t0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018,\n\t0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008,\n\t0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308,\n\t0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308,\n\t0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11,\n\t0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308,\n\t0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308,\n\t0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308,\n\t0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308,\n\t0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308,\n\t0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018,\n\t// Block 0x27, offset 0x9c0\n\t0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008,\n\t0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008,\n\t0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008,\n\t0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008,\n\t0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008,\n\t0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008,\n\t0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008,\n\t0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008,\n\t0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41,\n\t0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008,\n\t0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269,\n\t// Block 0x28, offset 0xa00\n\t0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1,\n\t0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011,\n\t0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041,\n\t0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9,\n\t0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099,\n\t0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269,\n\t0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1,\n\t0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008,\n\t0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008,\n\t0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008,\n\t0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008,\n\t// Block 0x29, offset 0xa40\n\t0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008,\n\t0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008,\n\t0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008,\n\t0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008,\n\t0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169,\n\t0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9,\n\t0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251,\n\t0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9,\n\t0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359,\n\t0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1,\n\t0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429,\n\t// Block 0x2a, offset 0xa80\n\t0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008,\n\t0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008,\n\t0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008,\n\t0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008,\n\t0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008,\n\t0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008,\n\t0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008,\n\t0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008,\n\t0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008,\n\t0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008,\n\t0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008,\n\t// Block 0x2b, offset 0xac0\n\t0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008,\n\t0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008,\n\t0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008,\n\t0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008,\n\t0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008,\n\t0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008,\n\t0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008,\n\t0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008,\n\t0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008,\n\t0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008,\n\t0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008,\n\t// Block 0x2c, offset 0xb00\n\t0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008,\n\t0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045,\n\t0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008,\n\t0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008,\n\t0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045,\n\t0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008,\n\t0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045,\n\t0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045,\n\t0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489,\n\t0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1,\n\t0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040,\n\t// Block 0x2d, offset 0xb40\n\t0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1,\n\t0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591,\n\t0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1,\n\t0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1,\n\t0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771,\n\t0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891,\n\t0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831,\n\t0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951,\n\t0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040,\n\t0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459,\n\t0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686,\n\t// Block 0x2e, offset 0xb80\n\t0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040,\n\t0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489,\n\t0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008,\n\t0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008,\n\t0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2,\n\t0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61,\n\t0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045,\n\t0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa,\n\t0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040,\n\t0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9,\n\t0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040,\n\t// Block 0x2f, offset 0xbc0\n\t0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a,\n\t0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0,\n\t0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d,\n\t0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e,\n\t0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018,\n\t0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018,\n\t0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040,\n\t0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a,\n\t0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018,\n\t0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018,\n\t0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018,\n\t// Block 0x30, offset 0xc00\n\t0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018,\n\t0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018,\n\t0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018,\n\t0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9,\n\t0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018,\n\t0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340,\n\t0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040,\n\t0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340,\n\t0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61,\n\t0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd,\n\t0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71,\n\t// Block 0x31, offset 0xc40\n\t0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61,\n\t0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5,\n\t0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09,\n\t0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359,\n\t0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040,\n\t0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018,\n\t0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018,\n\t0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018,\n\t0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018,\n\t0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018,\n\t0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018,\n\t// Block 0x32, offset 0xc80\n\t0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e,\n\t0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249,\n\t0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41,\n\t0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018,\n\t0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269,\n\t0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018,\n\t0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018,\n\t0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09,\n\t0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9,\n\t0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd,\n\t0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109,\n\t// Block 0x33, offset 0xcc0\n\t0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9,\n\t0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018,\n\t0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151,\n\t0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279,\n\t0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399,\n\t0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439,\n\t0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369,\n\t0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61,\n\t0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451,\n\t0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5,\n\t0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61,\n\t// Block 0x34, offset 0xd00\n\t0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018,\n\t0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040,\n\t0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040,\n\t0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040,\n\t0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040,\n\t0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51,\n\t0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601,\n\t0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691,\n\t0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26,\n\t0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6,\n\t0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a,\n\t// Block 0x35, offset 0xd40\n\t0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a,\n\t0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040,\n\t0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040,\n\t0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040,\n\t0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46,\n\t0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06,\n\t0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6,\n\t0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86,\n\t0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46,\n\t0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199,\n\t0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259,\n\t// Block 0x36, offset 0xd80\n\t0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99,\n\t0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089,\n\t0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9,\n\t0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249,\n\t0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71,\n\t0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9,\n\t0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1,\n\t0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018,\n\t0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018,\n\t0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018,\n\t0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018,\n\t// Block 0x37, offset 0xdc0\n\t0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008,\n\t0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008,\n\t0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008,\n\t0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008,\n\t0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008,\n\t0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd,\n\t0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d,\n\t0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9,\n\t0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d,\n\t0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008,\n\t0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9,\n\t// Block 0x38, offset 0xe00\n\t0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008,\n\t0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008,\n\t0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008,\n\t0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008,\n\t0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008,\n\t0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008,\n\t0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018,\n\t0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308,\n\t0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040,\n\t0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018,\n\t0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018,\n\t// Block 0x39, offset 0xe40\n\t0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d,\n\t0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d,\n\t0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d,\n\t0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040,\n\t0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040,\n\t0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040,\n\t0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040,\n\t0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040,\n\t0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040,\n\t0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040,\n\t0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040,\n\t// Block 0x3a, offset 0xe80\n\t0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008,\n\t0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018,\n\t0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018,\n\t0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018,\n\t0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018,\n\t0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018,\n\t0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018,\n\t0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018,\n\t0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018,\n\t0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018,\n\t0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018,\n\t// Block 0x3b, offset 0xec0\n\t0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd,\n\t0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd,\n\t0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d,\n\t0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d,\n\t0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d,\n\t0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd,\n\t0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d,\n\t0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd,\n\t0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d,\n\t0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd,\n\t0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d,\n\t// Block 0x3c, offset 0xf00\n\t0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd,\n\t0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d,\n\t0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018,\n\t0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd,\n\t0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d,\n\t0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008,\n\t0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008,\n\t0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008,\n\t0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008,\n\t0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040,\n\t0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040,\n\t// Block 0x3d, offset 0xf40\n\t0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd,\n\t0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018,\n\t0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761,\n\t0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1,\n\t0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881,\n\t0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd,\n\t0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d,\n\t0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d,\n\t0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd,\n\t0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d,\n\t0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018,\n\t// Block 0x3e, offset 0xf80\n\t0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d,\n\t0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d,\n\t0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd,\n\t0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd,\n\t0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d,\n\t0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d,\n\t0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd,\n\t0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d,\n\t0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999,\n\t0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29,\n\t0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89,\n\t// Block 0x3f, offset 0xfc0\n\t0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69,\n\t0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69,\n\t0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15,\n\t0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75,\n\t0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded,\n\t0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d,\n\t0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5,\n\t0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d,\n\t0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d,\n\t0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd,\n\t0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040,\n\t// Block 0x40, offset 0x1000\n\t0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9,\n\t0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1,\n\t0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9,\n\t0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549,\n\t0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1,\n\t0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11,\n\t0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91,\n\t0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9,\n\t0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011,\n\t0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209,\n\t0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361,\n\t// Block 0x41, offset 0x1040\n\t0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541,\n\t0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781,\n\t0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979,\n\t0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89,\n\t0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1,\n\t0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99,\n\t0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9,\n\t0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9,\n\t0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069,\n\t0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9,\n\t0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9,\n\t// Block 0x42, offset 0x1080\n\t0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271,\n\t0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9,\n\t0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed,\n\t0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371,\n\t0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9,\n\t0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d,\n\t0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211,\n\t0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1,\n\t0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599,\n\t0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9,\n\t0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611,\n\t// Block 0x43, offset 0x10c0\n\t0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671,\n\t0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709,\n\t0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781,\n\t0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1,\n\t0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811,\n\t0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901,\n\t0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1,\n\t0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11,\n\t0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31,\n\t0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51,\n\t0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d,\n\t// Block 0x44, offset 0x1100\n\t0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008,\n\t0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008,\n\t0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008,\n\t0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008,\n\t0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008,\n\t0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008,\n\t0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008,\n\t0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308,\n\t0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308,\n\t0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308,\n\t0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008,\n\t// Block 0x45, offset 0x1140\n\t0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008,\n\t0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008,\n\t0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008,\n\t0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008,\n\t0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11,\n\t0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008,\n\t0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008,\n\t0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008,\n\t0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008,\n\t0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008,\n\t0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008,\n\t// Block 0x46, offset 0x1180\n\t0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018,\n\t0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018,\n\t0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018,\n\t0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008,\n\t0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008,\n\t0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008,\n\t0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008,\n\t0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008,\n\t0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008,\n\t0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008,\n\t0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008,\n\t// Block 0x47, offset 0x11c0\n\t0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008,\n\t0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008,\n\t0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008,\n\t0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008,\n\t0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008,\n\t0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008,\n\t0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008,\n\t0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008,\n\t0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008,\n\t0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d,\n\t0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008,\n\t// Block 0x48, offset 0x1200\n\t0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008,\n\t0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d,\n\t0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008,\n\t0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008,\n\t0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008,\n\t0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008,\n\t0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008,\n\t0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0040,\n\t0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008,\n\t0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040,\n\t0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040,\n\t// Block 0x49, offset 0x1240\n\t0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575,\n\t0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635,\n\t0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008,\n\t0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715,\n\t0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5,\n\t0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008,\n\t0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008,\n\t0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935,\n\t0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5,\n\t0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5,\n\t0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35,\n\t// Block 0x4a, offset 0x1280\n\t0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35,\n\t0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5,\n\t0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19,\n\t0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91,\n\t0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040,\n\t0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040,\n\t0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040,\n\t0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040,\n\t0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040,\n\t0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040,\n\t0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040,\n\t// Block 0x4b, offset 0x12c0\n\t0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001,\n\t0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040,\n\t0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040,\n\t0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9,\n\t0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1,\n\t0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149,\n\t0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2,\n\t0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1,\n\t0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1,\n\t0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479,\n\t0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040,\n\t// Block 0x4c, offset 0x1300\n\t0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040,\n\t0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659,\n\t0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721,\n\t0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751,\n\t0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769,\n\t0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799,\n\t0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1,\n\t0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1,\n\t0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9,\n\t0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829,\n\t0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841,\n\t// Block 0x4d, offset 0x1340\n\t0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871,\n\t0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9,\n\t0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9,\n\t0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919,\n\t0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931,\n\t0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961,\n\t0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991,\n\t0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1,\n\t0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818,\n\t0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818,\n\t0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818,\n\t// Block 0x4e, offset 0x1380\n\t0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040,\n\t0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040,\n\t0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040,\n\t0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09,\n\t0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479,\n\t0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81,\n\t0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1,\n\t0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19,\n\t0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91,\n\t0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1,\n\t0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09,\n\t// Block 0x4f, offset 0x13c0\n\t0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1,\n\t0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1,\n\t0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1,\n\t0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991,\n\t0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81,\n\t0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a,\n\t0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99,\n\t0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89,\n\t0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79,\n\t0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19,\n\t0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469,\n\t// Block 0x50, offset 0x1400\n\t0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649,\n\t0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9,\n\t0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49,\n\t0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21,\n\t0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9,\n\t0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01,\n\t0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91,\n\t0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9,\n\t0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171,\n\t0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289,\n\t0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329,\n\t// Block 0x51, offset 0x1440\n\t0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1,\n\t0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621,\n\t0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739,\n\t0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1,\n\t0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9,\n\t0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29,\n\t0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079,\n\t0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1,\n\t0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171,\n\t0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261,\n\t0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301,\n\t// Block 0x52, offset 0x1480\n\t0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1,\n\t0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1,\n\t0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171,\n\t0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261,\n\t0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351,\n\t0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441,\n\t0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509,\n\t0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1,\n\t0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081,\n\t0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239,\n\t0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018,\n\t// Block 0x53, offset 0x14c0\n\t0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040,\n\t0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040,\n\t0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609,\n\t0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721,\n\t0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839,\n\t0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919,\n\t0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9,\n\t0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9,\n\t0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9,\n\t0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1,\n\t0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79,\n\t// Block 0x54, offset 0x1500\n\t0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989,\n\t0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040,\n\t0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040,\n\t0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040,\n\t0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040,\n\t0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040,\n\t0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040,\n\t0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040,\n\t0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9,\n\t0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12,\n\t0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040,\n\t// Block 0x55, offset 0x1540\n\t0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0,\n\t0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0,\n\t0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55,\n\t0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75,\n\t0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040,\n\t0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308,\n\t0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308,\n\t0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308,\n\t0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2,\n\t0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35,\n\t0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55,\n\t// Block 0x56, offset 0x1580\n\t0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018,\n\t0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56,\n\t0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95,\n\t0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa,\n\t0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95,\n\t0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99,\n\t0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda,\n\t0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040,\n\t0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040,\n\t0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081,\n\t0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1,\n\t// Block 0x57, offset 0x15c0\n\t0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141,\n\t0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171,\n\t0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1,\n\t0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1,\n\t0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201,\n\t0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219,\n\t0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249,\n\t0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291,\n\t0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1,\n\t0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9,\n\t0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1,\n\t// Block 0x58, offset 0x1600\n\t0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321,\n\t0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339,\n\t0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369,\n\t0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381,\n\t0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1,\n\t0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9,\n\t0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9,\n\t0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1,\n\t0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441,\n\t0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9,\n\t0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0,\n\t// Block 0x59, offset 0x1640\n\t0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea,\n\t0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2,\n\t0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9,\n\t0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81,\n\t0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2,\n\t0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159,\n\t0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41,\n\t0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9,\n\t0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9,\n\t0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a,\n\t0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a,\n\t// Block 0x5a, offset 0x1680\n\t0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09,\n\t0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51,\n\t0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039,\n\t0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279,\n\t0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a,\n\t0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115,\n\t0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5,\n\t0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295,\n\t0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355,\n\t0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415,\n\t0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215,\n\t// Block 0x5b, offset 0x16c0\n\t0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515,\n\t0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595,\n\t0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5,\n\t0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655,\n\t0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115,\n\t0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735,\n\t0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5,\n\t0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5,\n\t0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5,\n\t0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5,\n\t0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040,\n\t// Block 0x5c, offset 0x1700\n\t0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5,\n\t0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715,\n\t0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040,\n\t0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935,\n\t0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040,\n\t0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6,\n\t0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35,\n\t0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040,\n\t0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040,\n\t0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340,\n\t0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040,\n\t// Block 0x5d, offset 0x1740\n\t0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08,\n\t0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808,\n\t0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08,\n\t0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908,\n\t0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08,\n\t0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808,\n\t0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040,\n\t0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18,\n\t0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818,\n\t0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040,\n\t0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040,\n\t// Block 0x5e, offset 0x1780\n\t0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08,\n\t0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08,\n\t0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08,\n\t0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040,\n\t0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040,\n\t0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040,\n\t0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18,\n\t0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818,\n\t0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040,\n\t0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040,\n\t0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040,\n\t// Block 0x5f, offset 0x17c0\n\t0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008,\n\t0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008,\n\t0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040,\n\t0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008,\n\t0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008,\n\t0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008,\n\t0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040,\n\t0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008,\n\t0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008,\n\t0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x0040,\n\t0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008,\n\t// Block 0x60, offset 0x1800\n\t0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040,\n\t0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008,\n\t0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040,\n\t0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008,\n\t0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008,\n\t0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008,\n\t0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308,\n\t0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040,\n\t0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040,\n\t0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040,\n\t0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040,\n\t// Block 0x61, offset 0x1840\n\t0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199,\n\t0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359,\n\t0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269,\n\t0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369,\n\t0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9,\n\t0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259,\n\t0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99,\n\t0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089,\n\t0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9,\n\t0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249,\n\t0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359,\n\t// Block 0x62, offset 0x1880\n\t0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269,\n\t0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369,\n\t0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9,\n\t0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259,\n\t0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99,\n\t0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089,\n\t0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9,\n\t0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249,\n\t0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71,\n\t0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9,\n\t0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369,\n\t// Block 0x63, offset 0x18c0\n\t0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9,\n\t0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259,\n\t0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99,\n\t0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089,\n\t0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040,\n\t0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040,\n\t0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71,\n\t0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9,\n\t0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1,\n\t0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199,\n\t0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259,\n\t// Block 0x64, offset 0x1900\n\t0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99,\n\t0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089,\n\t0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9,\n\t0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249,\n\t0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71,\n\t0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9,\n\t0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1,\n\t0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199,\n\t0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359,\n\t0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269,\n\t0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089,\n\t// Block 0x65, offset 0x1940\n\t0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9,\n\t0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040,\n\t0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71,\n\t0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9,\n\t0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040,\n\t0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199,\n\t0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359,\n\t0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269,\n\t0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369,\n\t0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9,\n\t0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040,\n\t// Block 0x66, offset 0x1980\n\t0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040,\n\t0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9,\n\t0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040,\n\t0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199,\n\t0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359,\n\t0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269,\n\t0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369,\n\t0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9,\n\t0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259,\n\t0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99,\n\t0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9,\n\t// Block 0x67, offset 0x19c0\n\t0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1,\n\t0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199,\n\t0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359,\n\t0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269,\n\t0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369,\n\t0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9,\n\t0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259,\n\t0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99,\n\t0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089,\n\t0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9,\n\t0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199,\n\t// Block 0x68, offset 0x1a00\n\t0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359,\n\t0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269,\n\t0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369,\n\t0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9,\n\t0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259,\n\t0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99,\n\t0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089,\n\t0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9,\n\t0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249,\n\t0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71,\n\t0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269,\n\t// Block 0x69, offset 0x1a40\n\t0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369,\n\t0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9,\n\t0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259,\n\t0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99,\n\t0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089,\n\t0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9,\n\t0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249,\n\t0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71,\n\t0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9,\n\t0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1,\n\t0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9,\n\t// Block 0x6a, offset 0x1a80\n\t0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259,\n\t0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99,\n\t0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089,\n\t0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9,\n\t0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249,\n\t0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71,\n\t0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9,\n\t0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1,\n\t0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199,\n\t0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359,\n\t0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99,\n\t// Block 0x6b, offset 0x1ac0\n\t0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089,\n\t0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9,\n\t0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249,\n\t0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71,\n\t0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9,\n\t0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1,\n\t0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099,\n\t0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429,\n\t0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71,\n\t0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9,\n\t0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9,\n\t// Block 0x6c, offset 0x1b00\n\t0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9,\n\t0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11,\n\t0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109,\n\t0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1,\n\t0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429,\n\t0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099,\n\t0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429,\n\t0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71,\n\t0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9,\n\t0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01,\n\t0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9,\n\t// Block 0x6d, offset 0x1b40\n\t0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11,\n\t0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109,\n\t0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1,\n\t0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429,\n\t0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099,\n\t0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429,\n\t0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71,\n\t0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9,\n\t0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01,\n\t0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1,\n\t0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11,\n\t// Block 0x6e, offset 0x1b80\n\t0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109,\n\t0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1,\n\t0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429,\n\t0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099,\n\t0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429,\n\t0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71,\n\t0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9,\n\t0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01,\n\t0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1,\n\t0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41,\n\t0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109,\n\t// Block 0x6f, offset 0x1bc0\n\t0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1,\n\t0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429,\n\t0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099,\n\t0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429,\n\t0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71,\n\t0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9,\n\t0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01,\n\t0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1,\n\t0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41,\n\t0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1,\n\t0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1,\n\t// Block 0x70, offset 0x1c00\n\t0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429,\n\t0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41,\n\t0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079,\n\t0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1,\n\t0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61,\n\t0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9,\n\t0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81,\n\t0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079,\n\t0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1,\n\t0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61,\n\t0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1,\n\t// Block 0x71, offset 0x1c40\n\t0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115,\n\t0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135,\n\t0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115,\n\t0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175,\n\t0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115,\n\t0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08,\n\t0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08,\n\t0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08,\n\t0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08,\n\t0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08,\n\t0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08,\n\t// Block 0x72, offset 0x1c80\n\t0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411,\n\t0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1,\n\t0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9,\n\t0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231,\n\t0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949,\n\t0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040,\n\t0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429,\n\t0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339,\n\t0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1,\n\t0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351,\n\t0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040,\n\t// Block 0x73, offset 0x1cc0\n\t0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040,\n\t0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1,\n\t0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9,\n\t0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231,\n\t0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949,\n\t0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040,\n\t0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429,\n\t0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339,\n\t0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1,\n\t0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351,\n\t0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040,\n\t// Block 0x74, offset 0x1d00\n\t0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411,\n\t0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1,\n\t0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9,\n\t0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231,\n\t0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040,\n\t0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249,\n\t0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429,\n\t0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339,\n\t0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1,\n\t0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351,\n\t0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040,\n\t// Block 0x75, offset 0x1d40\n\t0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02,\n\t0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018,\n\t0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2,\n\t0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72,\n\t0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32,\n\t0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2,\n\t0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2,\n\t0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0040,\n\t0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199,\n\t0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359,\n\t0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99,\n\t// Block 0x76, offset 0x1d80\n\t0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089,\n\t0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1,\n\t0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018,\n\t0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018,\n\t0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018,\n\t0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018,\n\t0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018,\n\t0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040,\n\t0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018,\n\t0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018,\n\t0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018,\n\t// Block 0x77, offset 0x1dc0\n\t0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040,\n\t0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040,\n\t0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289,\n\t0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349,\n\t0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409,\n\t0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9,\n\t0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589,\n\t0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649,\n\t0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709,\n\t0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9,\n\t0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040,\n\t// Block 0x78, offset 0x1e00\n\t0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79,\n\t0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39,\n\t0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9,\n\t0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39,\n\t0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9,\n\t0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79,\n\t0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39,\n\t0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9,\n\t0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059,\n\t0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9,\n\t0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179,\n\t// Block 0x79, offset 0x1e40\n\t0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239,\n\t0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9,\n\t0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399,\n\t0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459,\n\t0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309,\n\t0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559,\n\t0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9,\n\t0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679,\n\t0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9,\n\t0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d,\n\t0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9,\n\t// Block 0x7a, offset 0x1e80\n\t0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9,\n\t0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959,\n\t0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d,\n\t0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d,\n\t0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9,\n\t0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99,\n\t0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9,\n\t0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9,\n\t0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99,\n\t0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39,\n\t0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99,\n\t// Block 0x7b, offset 0x1ec0\n\t0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639,\n\t0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9,\n\t0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d,\n\t0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9,\n\t0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d,\n\t0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd,\n\t0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979,\n\t0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19,\n\t0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d,\n\t0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d,\n\t0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59,\n\t// Block 0x7c, offset 0x1f00\n\t0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99,\n\t0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39,\n\t0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9,\n\t0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39,\n\t0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd,\n\t0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19,\n\t0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9,\n\t0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59,\n\t0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd,\n\t0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d,\n\t0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079,\n\t// Block 0x7d, offset 0x1f40\n\t0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d,\n\t0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d,\n\t0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879,\n\t0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919,\n\t0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd,\n\t0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9,\n\t0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99,\n\t0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39,\n\t0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9,\n\t0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d,\n\t0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79,\n\t// Block 0x7e, offset 0x1f80\n\t0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19,\n\t0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9,\n\t0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59,\n\t0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9,\n\t0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d,\n\t0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040,\n\t0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040,\n\t0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040,\n\t0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040,\n\t0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040,\n\t0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040,\n}\n\n// idnaIndex: 36 blocks, 2304 entries, 4608 bytes\n// Block 0 is the zero block.\nvar idnaIndex = [2304]uint16{\n\t// Block 0x0, offset 0x0\n\t// Block 0x1, offset 0x40\n\t// Block 0x2, offset 0x80\n\t// Block 0x3, offset 0xc0\n\t0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05,\n\t0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a,\n\t0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84,\n\t0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88,\n\t0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07,\n\t0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c,\n\t0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21,\n\t// Block 0x4, offset 0x100\n\t0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16,\n\t0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d,\n\t0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91,\n\t0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96,\n\t// Block 0x5, offset 0x140\n\t0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e,\n\t0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6,\n\t0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f,\n\t0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae,\n\t0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6,\n\t0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe,\n\t0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3,\n\t0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c,\n\t// Block 0x6, offset 0x180\n\t0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b,\n\t0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b,\n\t0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b,\n\t0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b,\n\t0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b,\n\t0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0,\n\t0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5,\n\t0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37,\n\t// Block 0x7, offset 0x1c0\n\t0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1,\n\t0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41,\n\t0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f,\n\t0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f,\n\t0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f,\n\t0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f,\n\t0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f,\n\t0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f,\n\t// Block 0x8, offset 0x200\n\t0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f,\n\t0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f,\n\t0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f,\n\t0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f,\n\t0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f,\n\t0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f,\n\t0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b,\n\t0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f,\n\t// Block 0x9, offset 0x240\n\t0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f,\n\t0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f,\n\t0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f,\n\t0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f,\n\t0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f,\n\t0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f,\n\t0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f,\n\t0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f,\n\t// Block 0xa, offset 0x280\n\t0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f,\n\t0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f,\n\t0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f,\n\t0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f,\n\t0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f,\n\t0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f,\n\t0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f,\n\t0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3,\n\t// Block 0xb, offset 0x2c0\n\t0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f,\n\t0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f,\n\t0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f,\n\t0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8,\n\t0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0,\n\t0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8,\n\t0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f,\n\t0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f,\n\t// Block 0xc, offset 0x300\n\t0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f,\n\t0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f,\n\t0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f,\n\t0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa,\n\t// Block 0xd, offset 0x340\n\t0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba,\n\t0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba,\n\t0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba,\n\t0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba,\n\t0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba,\n\t0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba,\n\t0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba,\n\t0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba,\n\t// Block 0xe, offset 0x380\n\t0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba,\n\t0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba,\n\t0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba,\n\t0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba,\n\t0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe,\n\t0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c,\n\t0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52,\n\t0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a,\n\t// Block 0xf, offset 0x3c0\n\t0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108,\n\t0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e,\n\t0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba,\n\t0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba,\n\t0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c,\n\t0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba,\n\t0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba,\n\t0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba,\n\t// Block 0x10, offset 0x400\n\t0x400: 0x127, 0x401: 0x128, 0x402: 0x129, 0x403: 0x12a, 0x404: 0x12b, 0x405: 0x12c, 0x406: 0x12d, 0x407: 0x12e,\n\t0x408: 0x12f, 0x409: 0xba, 0x40a: 0x130, 0x40b: 0x131, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba,\n\t0x410: 0x132, 0x411: 0x133, 0x412: 0x134, 0x413: 0x135, 0x414: 0xba, 0x415: 0xba, 0x416: 0x136, 0x417: 0x137,\n\t0x418: 0x138, 0x419: 0x139, 0x41a: 0x13a, 0x41b: 0x13b, 0x41c: 0x13c, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba,\n\t0x420: 0xba, 0x421: 0xba, 0x422: 0x13d, 0x423: 0x13e, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba,\n\t0x428: 0x13f, 0x429: 0x140, 0x42a: 0x141, 0x42b: 0x142, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba,\n\t0x430: 0x143, 0x431: 0x144, 0x432: 0x145, 0x433: 0xba, 0x434: 0x146, 0x435: 0x147, 0x436: 0xba, 0x437: 0xba,\n\t0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba,\n\t// Block 0x11, offset 0x440\n\t0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f,\n\t0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x148, 0x44f: 0xba,\n\t0x450: 0x9b, 0x451: 0x149, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x14a, 0x456: 0xba, 0x457: 0xba,\n\t0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba,\n\t0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba,\n\t0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba,\n\t0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba,\n\t0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba,\n\t// Block 0x12, offset 0x480\n\t0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f,\n\t0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f,\n\t0x490: 0x14b, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba,\n\t0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba,\n\t0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba,\n\t0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba,\n\t0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba,\n\t0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba,\n\t// Block 0x13, offset 0x4c0\n\t0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba,\n\t0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba,\n\t0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f,\n\t0x4d8: 0x9f, 0x4d9: 0x14c, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba,\n\t0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba,\n\t0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba,\n\t0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba,\n\t0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba,\n\t// Block 0x14, offset 0x500\n\t0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba,\n\t0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba,\n\t0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba,\n\t0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba,\n\t0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f,\n\t0x528: 0x142, 0x529: 0x14d, 0x52a: 0xba, 0x52b: 0x14e, 0x52c: 0x14f, 0x52d: 0x150, 0x52e: 0x151, 0x52f: 0xba,\n\t0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba,\n\t0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x152, 0x53e: 0x153, 0x53f: 0x154,\n\t// Block 0x15, offset 0x540\n\t0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f,\n\t0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f,\n\t0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f,\n\t0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x155,\n\t0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f,\n\t0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x156, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba,\n\t0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba,\n\t0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba,\n\t// Block 0x16, offset 0x580\n\t0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x157, 0x585: 0x158, 0x586: 0x9f, 0x587: 0x9f,\n\t0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x159, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba,\n\t0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba,\n\t0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba,\n\t0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba,\n\t0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba,\n\t0x5b0: 0x9f, 0x5b1: 0x15a, 0x5b2: 0x15b, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba,\n\t0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba,\n\t// Block 0x17, offset 0x5c0\n\t0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x15c, 0x5c4: 0x15d, 0x5c5: 0x15e, 0x5c6: 0x15f, 0x5c7: 0x160,\n\t0x5c8: 0x9b, 0x5c9: 0x161, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x162, 0x5ce: 0xba, 0x5cf: 0xba,\n\t0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66,\n\t0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e,\n\t0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b,\n\t0x5e8: 0x163, 0x5e9: 0x164, 0x5ea: 0x165, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba,\n\t0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba,\n\t0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba,\n\t// Block 0x18, offset 0x600\n\t0x600: 0x166, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba,\n\t0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba,\n\t0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba,\n\t0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba,\n\t0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x167, 0x624: 0x6f, 0x625: 0x168, 0x626: 0xba, 0x627: 0xba,\n\t0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba,\n\t0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba,\n\t0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x169, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba,\n\t// Block 0x19, offset 0x640\n\t0x640: 0x16a, 0x641: 0x9b, 0x642: 0x16b, 0x643: 0x16c, 0x644: 0x73, 0x645: 0x74, 0x646: 0x16d, 0x647: 0x16e,\n\t0x648: 0x75, 0x649: 0x16f, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b,\n\t0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b,\n\t0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x170, 0x65c: 0x9b, 0x65d: 0x171, 0x65e: 0x9b, 0x65f: 0x172,\n\t0x660: 0x173, 0x661: 0x174, 0x662: 0x175, 0x663: 0xba, 0x664: 0x176, 0x665: 0x177, 0x666: 0x178, 0x667: 0x179,\n\t0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba,\n\t0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba,\n\t0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba,\n\t// Block 0x1a, offset 0x680\n\t0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f,\n\t0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f,\n\t0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f,\n\t0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x17a, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f,\n\t0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f,\n\t0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f,\n\t0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f,\n\t0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f,\n\t// Block 0x1b, offset 0x6c0\n\t0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f,\n\t0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f,\n\t0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f,\n\t0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x17b, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f,\n\t0x6e0: 0x17c, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f,\n\t0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f,\n\t0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f,\n\t0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f,\n\t// Block 0x1c, offset 0x700\n\t0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f,\n\t0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f,\n\t0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f,\n\t0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f,\n\t0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f,\n\t0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f,\n\t0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f,\n\t0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x17d, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f,\n\t// Block 0x1d, offset 0x740\n\t0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f,\n\t0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f,\n\t0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f,\n\t0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f,\n\t0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f,\n\t0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x17e,\n\t0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba,\n\t0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba,\n\t// Block 0x1e, offset 0x780\n\t0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba,\n\t0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba,\n\t0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba,\n\t0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba,\n\t0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x17f, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x180, 0x7a7: 0x7b,\n\t0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba,\n\t0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba,\n\t0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba,\n\t// Block 0x1f, offset 0x7c0\n\t0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07,\n\t0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17,\n\t0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07,\n\t0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c,\n\t0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b,\n\t0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b,\n\t// Block 0x20, offset 0x800\n\t0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b,\n\t0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b,\n\t0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b,\n\t0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b,\n\t0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b,\n\t0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b,\n\t0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b,\n\t0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b,\n\t// Block 0x21, offset 0x840\n\t0x840: 0x181, 0x841: 0x182, 0x842: 0xba, 0x843: 0xba, 0x844: 0x183, 0x845: 0x183, 0x846: 0x183, 0x847: 0x184,\n\t0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba,\n\t0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba,\n\t0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba,\n\t0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba,\n\t0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba,\n\t0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba,\n\t0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba,\n\t// Block 0x22, offset 0x880\n\t0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b,\n\t0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b,\n\t0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b,\n\t0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b,\n\t0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b,\n\t0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b,\n\t0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b,\n\t0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b,\n\t// Block 0x23, offset 0x8c0\n\t0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b,\n\t0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b,\n}\n\n// idnaSparseOffset: 264 entries, 528 bytes\nvar idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x8a, 0x93, 0xa3, 0xb1, 0xbd, 0xc9, 0xda, 0xe4, 0xeb, 0xf8, 0x109, 0x110, 0x11b, 0x12a, 0x138, 0x142, 0x144, 0x149, 0x14c, 0x14f, 0x151, 0x15d, 0x168, 0x170, 0x176, 0x17c, 0x181, 0x186, 0x189, 0x18d, 0x193, 0x198, 0x1a4, 0x1ae, 0x1b4, 0x1c5, 0x1cf, 0x1d2, 0x1da, 0x1dd, 0x1ea, 0x1f2, 0x1f6, 0x1fd, 0x205, 0x215, 0x221, 0x223, 0x22d, 0x239, 0x245, 0x251, 0x259, 0x25e, 0x268, 0x279, 0x27d, 0x288, 0x28c, 0x295, 0x29d, 0x2a3, 0x2a8, 0x2ab, 0x2af, 0x2b5, 0x2b9, 0x2bd, 0x2c3, 0x2ca, 0x2d0, 0x2d8, 0x2df, 0x2ea, 0x2f4, 0x2f8, 0x2fb, 0x301, 0x305, 0x307, 0x30a, 0x30c, 0x30f, 0x319, 0x31c, 0x32b, 0x32f, 0x334, 0x337, 0x33b, 0x340, 0x345, 0x34b, 0x351, 0x360, 0x366, 0x36a, 0x379, 0x37e, 0x386, 0x390, 0x39b, 0x3a3, 0x3b4, 0x3bd, 0x3cd, 0x3da, 0x3e4, 0x3e9, 0x3f6, 0x3fa, 0x3ff, 0x401, 0x405, 0x407, 0x40b, 0x414, 0x41a, 0x41e, 0x42e, 0x438, 0x43d, 0x440, 0x446, 0x44d, 0x452, 0x456, 0x45c, 0x461, 0x46a, 0x46f, 0x475, 0x47c, 0x483, 0x48a, 0x48e, 0x493, 0x496, 0x49b, 0x4a7, 0x4ad, 0x4b2, 0x4b9, 0x4c1, 0x4c6, 0x4ca, 0x4da, 0x4e1, 0x4e5, 0x4e9, 0x4f0, 0x4f2, 0x4f5, 0x4f8, 0x4fc, 0x500, 0x506, 0x50f, 0x51b, 0x522, 0x52b, 0x533, 0x53a, 0x548, 0x555, 0x562, 0x56b, 0x56f, 0x57d, 0x585, 0x590, 0x599, 0x59f, 0x5a7, 0x5b0, 0x5ba, 0x5bd, 0x5c9, 0x5cc, 0x5d1, 0x5de, 0x5e7, 0x5f3, 0x5f6, 0x600, 0x609, 0x615, 0x622, 0x62a, 0x62d, 0x632, 0x635, 0x638, 0x63b, 0x642, 0x649, 0x64d, 0x658, 0x65b, 0x661, 0x666, 0x66a, 0x66d, 0x670, 0x673, 0x676, 0x679, 0x67e, 0x688, 0x68b, 0x68f, 0x69e, 0x6aa, 0x6ae, 0x6b3, 0x6b8, 0x6bc, 0x6c1, 0x6ca, 0x6d5, 0x6db, 0x6e3, 0x6e7, 0x6eb, 0x6f1, 0x6f7, 0x6fc, 0x6ff, 0x70f, 0x716, 0x719, 0x71c, 0x720, 0x726, 0x72b, 0x730, 0x735, 0x738, 0x73d, 0x740, 0x743, 0x747, 0x74b, 0x74e, 0x75e, 0x76f, 0x774, 0x776, 0x778}\n\n// idnaSparseValues: 1915 entries, 7660 bytes\nvar idnaSparseValues = [1915]valueRange{\n\t// Block 0x0, offset 0x0\n\t{value: 0x0000, lo: 0x07},\n\t{value: 0xe105, lo: 0x80, hi: 0x96},\n\t{value: 0x0018, lo: 0x97, hi: 0x97},\n\t{value: 0xe105, lo: 0x98, hi: 0x9e},\n\t{value: 0x001f, lo: 0x9f, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xb6},\n\t{value: 0x0018, lo: 0xb7, hi: 0xb7},\n\t{value: 0x0008, lo: 0xb8, hi: 0xbf},\n\t// Block 0x1, offset 0x8\n\t{value: 0x0000, lo: 0x10},\n\t{value: 0x0008, lo: 0x80, hi: 0x80},\n\t{value: 0xe01d, lo: 0x81, hi: 0x81},\n\t{value: 0x0008, lo: 0x82, hi: 0x82},\n\t{value: 0x0335, lo: 0x83, hi: 0x83},\n\t{value: 0x034d, lo: 0x84, hi: 0x84},\n\t{value: 0x0365, lo: 0x85, hi: 0x85},\n\t{value: 0xe00d, lo: 0x86, hi: 0x86},\n\t{value: 0x0008, lo: 0x87, hi: 0x87},\n\t{value: 0xe00d, lo: 0x88, hi: 0x88},\n\t{value: 0x0008, lo: 0x89, hi: 0x89},\n\t{value: 0xe00d, lo: 0x8a, hi: 0x8a},\n\t{value: 0x0008, lo: 0x8b, hi: 0x8b},\n\t{value: 0xe00d, lo: 0x8c, hi: 0x8c},\n\t{value: 0x0008, lo: 0x8d, hi: 0x8d},\n\t{value: 0xe00d, lo: 0x8e, hi: 0x8e},\n\t{value: 0x0008, lo: 0x8f, hi: 0xbf},\n\t// Block 0x2, offset 0x19\n\t{value: 0x0000, lo: 0x0b},\n\t{value: 0x0008, lo: 0x80, hi: 0xaf},\n\t{value: 0x0249, lo: 0xb0, hi: 0xb0},\n\t{value: 0x037d, lo: 0xb1, hi: 0xb1},\n\t{value: 0x0259, lo: 0xb2, hi: 0xb2},\n\t{value: 0x0269, lo: 0xb3, hi: 0xb3},\n\t{value: 0x034d, lo: 0xb4, hi: 0xb4},\n\t{value: 0x0395, lo: 0xb5, hi: 0xb5},\n\t{value: 0xe1bd, lo: 0xb6, hi: 0xb6},\n\t{value: 0x0279, lo: 0xb7, hi: 0xb7},\n\t{value: 0x0289, lo: 0xb8, hi: 0xb8},\n\t{value: 0x0008, lo: 0xb9, hi: 0xbf},\n\t// Block 0x3, offset 0x25\n\t{value: 0x0000, lo: 0x01},\n\t{value: 0x3308, lo: 0x80, hi: 0xbf},\n\t// Block 0x4, offset 0x27\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x03f5, lo: 0x80, hi: 0x8f},\n\t{value: 0xe105, lo: 0x90, hi: 0x9f},\n\t{value: 0x049d, lo: 0xa0, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xbf},\n\t// Block 0x5, offset 0x2c\n\t{value: 0x0000, lo: 0x07},\n\t{value: 0xe185, lo: 0x80, hi: 0x8f},\n\t{value: 0x0545, lo: 0x90, hi: 0x96},\n\t{value: 0x0040, lo: 0x97, hi: 0x98},\n\t{value: 0x0008, lo: 0x99, hi: 0x99},\n\t{value: 0x0018, lo: 0x9a, hi: 0x9f},\n\t{value: 0x0040, lo: 0xa0, hi: 0xa0},\n\t{value: 0x0008, lo: 0xa1, hi: 0xbf},\n\t// Block 0x6, offset 0x34\n\t{value: 0x0000, lo: 0x0a},\n\t{value: 0x0008, lo: 0x80, hi: 0x86},\n\t{value: 0x0401, lo: 0x87, hi: 0x87},\n\t{value: 0x0040, lo: 0x88, hi: 0x88},\n\t{value: 0x0018, lo: 0x89, hi: 0x8a},\n\t{value: 0x0040, lo: 0x8b, hi: 0x8c},\n\t{value: 0x0018, lo: 0x8d, hi: 0x8f},\n\t{value: 0x0040, lo: 0x90, hi: 0x90},\n\t{value: 0x3308, lo: 0x91, hi: 0xbd},\n\t{value: 0x0818, lo: 0xbe, hi: 0xbe},\n\t{value: 0x3308, lo: 0xbf, hi: 0xbf},\n\t// Block 0x7, offset 0x3f\n\t{value: 0x0000, lo: 0x0b},\n\t{value: 0x0818, lo: 0x80, hi: 0x80},\n\t{value: 0x3308, lo: 0x81, hi: 0x82},\n\t{value: 0x0818, lo: 0x83, hi: 0x83},\n\t{value: 0x3308, lo: 0x84, hi: 0x85},\n\t{value: 0x0818, lo: 0x86, hi: 0x86},\n\t{value: 0x3308, lo: 0x87, hi: 0x87},\n\t{value: 0x0040, lo: 0x88, hi: 0x8f},\n\t{value: 0x0808, lo: 0x90, hi: 0xaa},\n\t{value: 0x0040, lo: 0xab, hi: 0xaf},\n\t{value: 0x0808, lo: 0xb0, hi: 0xb4},\n\t{value: 0x0040, lo: 0xb5, hi: 0xbf},\n\t// Block 0x8, offset 0x4b\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0a08, lo: 0x80, hi: 0x87},\n\t{value: 0x0c08, lo: 0x88, hi: 0x99},\n\t{value: 0x0a08, lo: 0x9a, hi: 0xbf},\n\t// Block 0x9, offset 0x4f\n\t{value: 0x0000, lo: 0x0e},\n\t{value: 0x3308, lo: 0x80, hi: 0x8a},\n\t{value: 0x0040, lo: 0x8b, hi: 0x8c},\n\t{value: 0x0c08, lo: 0x8d, hi: 0x8d},\n\t{value: 0x0a08, lo: 0x8e, hi: 0x98},\n\t{value: 0x0c08, lo: 0x99, hi: 0x9b},\n\t{value: 0x0a08, lo: 0x9c, hi: 0xaa},\n\t{value: 0x0c08, lo: 0xab, hi: 0xac},\n\t{value: 0x0a08, lo: 0xad, hi: 0xb0},\n\t{value: 0x0c08, lo: 0xb1, hi: 0xb1},\n\t{value: 0x0a08, lo: 0xb2, hi: 0xb2},\n\t{value: 0x0c08, lo: 0xb3, hi: 0xb4},\n\t{value: 0x0a08, lo: 0xb5, hi: 0xb7},\n\t{value: 0x0c08, lo: 0xb8, hi: 0xb9},\n\t{value: 0x0a08, lo: 0xba, hi: 0xbf},\n\t// Block 0xa, offset 0x5e\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0808, lo: 0x80, hi: 0xa5},\n\t{value: 0x3308, lo: 0xa6, hi: 0xb0},\n\t{value: 0x0808, lo: 0xb1, hi: 0xb1},\n\t{value: 0x0040, lo: 0xb2, hi: 0xbf},\n\t// Block 0xb, offset 0x63\n\t{value: 0x0000, lo: 0x07},\n\t{value: 0x0808, lo: 0x80, hi: 0x89},\n\t{value: 0x0a08, lo: 0x8a, hi: 0xaa},\n\t{value: 0x3308, lo: 0xab, hi: 0xb3},\n\t{value: 0x0808, lo: 0xb4, hi: 0xb5},\n\t{value: 0x0018, lo: 0xb6, hi: 0xb9},\n\t{value: 0x0818, lo: 0xba, hi: 0xba},\n\t{value: 0x0040, lo: 0xbb, hi: 0xbf},\n\t// Block 0xc, offset 0x6b\n\t{value: 0x0000, lo: 0x0b},\n\t{value: 0x0808, lo: 0x80, hi: 0x95},\n\t{value: 0x3308, lo: 0x96, hi: 0x99},\n\t{value: 0x0808, lo: 0x9a, hi: 0x9a},\n\t{value: 0x3308, lo: 0x9b, hi: 0xa3},\n\t{value: 0x0808, lo: 0xa4, hi: 0xa4},\n\t{value: 0x3308, lo: 0xa5, hi: 0xa7},\n\t{value: 0x0808, lo: 0xa8, hi: 0xa8},\n\t{value: 0x3308, lo: 0xa9, hi: 0xad},\n\t{value: 0x0040, lo: 0xae, hi: 0xaf},\n\t{value: 0x0818, lo: 0xb0, hi: 0xbe},\n\t{value: 0x0040, lo: 0xbf, hi: 0xbf},\n\t// Block 0xd, offset 0x77\n\t{value: 0x0000, lo: 0x0d},\n\t{value: 0x0040, lo: 0x80, hi: 0x9f},\n\t{value: 0x0a08, lo: 0xa0, hi: 0xa9},\n\t{value: 0x0c08, lo: 0xaa, hi: 0xac},\n\t{value: 0x0808, lo: 0xad, hi: 0xad},\n\t{value: 0x0c08, lo: 0xae, hi: 0xae},\n\t{value: 0x0a08, lo: 0xaf, hi: 0xb0},\n\t{value: 0x0c08, lo: 0xb1, hi: 0xb2},\n\t{value: 0x0a08, lo: 0xb3, hi: 0xb4},\n\t{value: 0x0040, lo: 0xb5, hi: 0xb5},\n\t{value: 0x0a08, lo: 0xb6, hi: 0xb8},\n\t{value: 0x0c08, lo: 0xb9, hi: 0xb9},\n\t{value: 0x0a08, lo: 0xba, hi: 0xbd},\n\t{value: 0x0040, lo: 0xbe, hi: 0xbf},\n\t// Block 0xe, offset 0x85\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0040, lo: 0x80, hi: 0x93},\n\t{value: 0x3308, lo: 0x94, hi: 0xa1},\n\t{value: 0x0840, lo: 0xa2, hi: 0xa2},\n\t{value: 0x3308, lo: 0xa3, hi: 0xbf},\n\t// Block 0xf, offset 0x8a\n\t{value: 0x0000, lo: 0x08},\n\t{value: 0x3308, lo: 0x80, hi: 0x82},\n\t{value: 0x3008, lo: 0x83, hi: 0x83},\n\t{value: 0x0008, lo: 0x84, hi: 0xb9},\n\t{value: 0x3308, lo: 0xba, hi: 0xba},\n\t{value: 0x3008, lo: 0xbb, hi: 0xbb},\n\t{value: 0x3308, lo: 0xbc, hi: 0xbc},\n\t{value: 0x0008, lo: 0xbd, hi: 0xbd},\n\t{value: 0x3008, lo: 0xbe, hi: 0xbf},\n\t// Block 0x10, offset 0x93\n\t{value: 0x0000, lo: 0x0f},\n\t{value: 0x3308, lo: 0x80, hi: 0x80},\n\t{value: 0x3008, lo: 0x81, hi: 0x82},\n\t{value: 0x0040, lo: 0x83, hi: 0x85},\n\t{value: 0x3008, lo: 0x86, hi: 0x88},\n\t{value: 0x0040, lo: 0x89, hi: 0x89},\n\t{value: 0x3008, lo: 0x8a, hi: 0x8c},\n\t{value: 0x3b08, lo: 0x8d, hi: 0x8d},\n\t{value: 0x0040, lo: 0x8e, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x90},\n\t{value: 0x0040, lo: 0x91, hi: 0x96},\n\t{value: 0x3008, lo: 0x97, hi: 0x97},\n\t{value: 0x0040, lo: 0x98, hi: 0xa5},\n\t{value: 0x0008, lo: 0xa6, hi: 0xaf},\n\t{value: 0x0018, lo: 0xb0, hi: 0xba},\n\t{value: 0x0040, lo: 0xbb, hi: 0xbf},\n\t// Block 0x11, offset 0xa3\n\t{value: 0x0000, lo: 0x0d},\n\t{value: 0x3308, lo: 0x80, hi: 0x80},\n\t{value: 0x3008, lo: 0x81, hi: 0x83},\n\t{value: 0x0040, lo: 0x84, hi: 0x84},\n\t{value: 0x0008, lo: 0x85, hi: 0x8c},\n\t{value: 0x0040, lo: 0x8d, hi: 0x8d},\n\t{value: 0x0008, lo: 0x8e, hi: 0x90},\n\t{value: 0x0040, lo: 0x91, hi: 0x91},\n\t{value: 0x0008, lo: 0x92, hi: 0xa8},\n\t{value: 0x0040, lo: 0xa9, hi: 0xa9},\n\t{value: 0x0008, lo: 0xaa, hi: 0xb9},\n\t{value: 0x0040, lo: 0xba, hi: 0xbc},\n\t{value: 0x0008, lo: 0xbd, hi: 0xbd},\n\t{value: 0x3308, lo: 0xbe, hi: 0xbf},\n\t// Block 0x12, offset 0xb1\n\t{value: 0x0000, lo: 0x0b},\n\t{value: 0x3308, lo: 0x80, hi: 0x81},\n\t{value: 0x3008, lo: 0x82, hi: 0x83},\n\t{value: 0x0040, lo: 0x84, hi: 0x84},\n\t{value: 0x0008, lo: 0x85, hi: 0x8c},\n\t{value: 0x0040, lo: 0x8d, hi: 0x8d},\n\t{value: 0x0008, lo: 0x8e, hi: 0x90},\n\t{value: 0x0040, lo: 0x91, hi: 0x91},\n\t{value: 0x0008, lo: 0x92, hi: 0xba},\n\t{value: 0x3b08, lo: 0xbb, hi: 0xbc},\n\t{value: 0x0008, lo: 0xbd, hi: 0xbd},\n\t{value: 0x3008, lo: 0xbe, hi: 0xbf},\n\t// Block 0x13, offset 0xbd\n\t{value: 0x0000, lo: 0x0b},\n\t{value: 0x0040, lo: 0x80, hi: 0x81},\n\t{value: 0x3008, lo: 0x82, hi: 0x83},\n\t{value: 0x0040, lo: 0x84, hi: 0x84},\n\t{value: 0x0008, lo: 0x85, hi: 0x96},\n\t{value: 0x0040, lo: 0x97, hi: 0x99},\n\t{value: 0x0008, lo: 0x9a, hi: 0xb1},\n\t{value: 0x0040, lo: 0xb2, hi: 0xb2},\n\t{value: 0x0008, lo: 0xb3, hi: 0xbb},\n\t{value: 0x0040, lo: 0xbc, hi: 0xbc},\n\t{value: 0x0008, lo: 0xbd, hi: 0xbd},\n\t{value: 0x0040, lo: 0xbe, hi: 0xbf},\n\t// Block 0x14, offset 0xc9\n\t{value: 0x0000, lo: 0x10},\n\t{value: 0x0008, lo: 0x80, hi: 0x86},\n\t{value: 0x0040, lo: 0x87, hi: 0x89},\n\t{value: 0x3b08, lo: 0x8a, hi: 0x8a},\n\t{value: 0x0040, lo: 0x8b, hi: 0x8e},\n\t{value: 0x3008, lo: 0x8f, hi: 0x91},\n\t{value: 0x3308, lo: 0x92, hi: 0x94},\n\t{value: 0x0040, lo: 0x95, hi: 0x95},\n\t{value: 0x3308, lo: 0x96, hi: 0x96},\n\t{value: 0x0040, lo: 0x97, hi: 0x97},\n\t{value: 0x3008, lo: 0x98, hi: 0x9f},\n\t{value: 0x0040, lo: 0xa0, hi: 0xa5},\n\t{value: 0x0008, lo: 0xa6, hi: 0xaf},\n\t{value: 0x0040, lo: 0xb0, hi: 0xb1},\n\t{value: 0x3008, lo: 0xb2, hi: 0xb3},\n\t{value: 0x0018, lo: 0xb4, hi: 0xb4},\n\t{value: 0x0040, lo: 0xb5, hi: 0xbf},\n\t// Block 0x15, offset 0xda\n\t{value: 0x0000, lo: 0x09},\n\t{value: 0x0040, lo: 0x80, hi: 0x80},\n\t{value: 0x0008, lo: 0x81, hi: 0xb0},\n\t{value: 0x3308, lo: 0xb1, hi: 0xb1},\n\t{value: 0x0008, lo: 0xb2, hi: 0xb2},\n\t{value: 0x08f1, lo: 0xb3, hi: 0xb3},\n\t{value: 0x3308, lo: 0xb4, hi: 0xb9},\n\t{value: 0x3b08, lo: 0xba, hi: 0xba},\n\t{value: 0x0040, lo: 0xbb, hi: 0xbe},\n\t{value: 0x0018, lo: 0xbf, hi: 0xbf},\n\t// Block 0x16, offset 0xe4\n\t{value: 0x0000, lo: 0x06},\n\t{value: 0x0008, lo: 0x80, hi: 0x86},\n\t{value: 0x3308, lo: 0x87, hi: 0x8e},\n\t{value: 0x0018, lo: 0x8f, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x99},\n\t{value: 0x0018, lo: 0x9a, hi: 0x9b},\n\t{value: 0x0040, lo: 0x9c, hi: 0xbf},\n\t// Block 0x17, offset 0xeb\n\t{value: 0x0000, lo: 0x0c},\n\t{value: 0x0008, lo: 0x80, hi: 0x84},\n\t{value: 0x0040, lo: 0x85, hi: 0x85},\n\t{value: 0x0008, lo: 0x86, hi: 0x86},\n\t{value: 0x0040, lo: 0x87, hi: 0x87},\n\t{value: 0x3308, lo: 0x88, hi: 0x8d},\n\t{value: 0x0040, lo: 0x8e, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x99},\n\t{value: 0x0040, lo: 0x9a, hi: 0x9b},\n\t{value: 0x0961, lo: 0x9c, hi: 0x9c},\n\t{value: 0x0999, lo: 0x9d, hi: 0x9d},\n\t{value: 0x0008, lo: 0x9e, hi: 0x9f},\n\t{value: 0x0040, lo: 0xa0, hi: 0xbf},\n\t// Block 0x18, offset 0xf8\n\t{value: 0x0000, lo: 0x10},\n\t{value: 0x0008, lo: 0x80, hi: 0x80},\n\t{value: 0x0018, lo: 0x81, hi: 0x8a},\n\t{value: 0x0008, lo: 0x8b, hi: 0x8b},\n\t{value: 0xe03d, lo: 0x8c, hi: 0x8c},\n\t{value: 0x0018, lo: 0x8d, hi: 0x97},\n\t{value: 0x3308, lo: 0x98, hi: 0x99},\n\t{value: 0x0018, lo: 0x9a, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xa9},\n\t{value: 0x0018, lo: 0xaa, hi: 0xb4},\n\t{value: 0x3308, lo: 0xb5, hi: 0xb5},\n\t{value: 0x0018, lo: 0xb6, hi: 0xb6},\n\t{value: 0x3308, lo: 0xb7, hi: 0xb7},\n\t{value: 0x0018, lo: 0xb8, hi: 0xb8},\n\t{value: 0x3308, lo: 0xb9, hi: 0xb9},\n\t{value: 0x0018, lo: 0xba, hi: 0xbd},\n\t{value: 0x3008, lo: 0xbe, hi: 0xbf},\n\t// Block 0x19, offset 0x109\n\t{value: 0x0000, lo: 0x06},\n\t{value: 0x0018, lo: 0x80, hi: 0x85},\n\t{value: 0x3308, lo: 0x86, hi: 0x86},\n\t{value: 0x0018, lo: 0x87, hi: 0x8c},\n\t{value: 0x0040, lo: 0x8d, hi: 0x8d},\n\t{value: 0x0018, lo: 0x8e, hi: 0x9a},\n\t{value: 0x0040, lo: 0x9b, hi: 0xbf},\n\t// Block 0x1a, offset 0x110\n\t{value: 0x0000, lo: 0x0a},\n\t{value: 0x0008, lo: 0x80, hi: 0xaa},\n\t{value: 0x3008, lo: 0xab, hi: 0xac},\n\t{value: 0x3308, lo: 0xad, hi: 0xb0},\n\t{value: 0x3008, lo: 0xb1, hi: 0xb1},\n\t{value: 0x3308, lo: 0xb2, hi: 0xb7},\n\t{value: 0x3008, lo: 0xb8, hi: 0xb8},\n\t{value: 0x3b08, lo: 0xb9, hi: 0xba},\n\t{value: 0x3008, lo: 0xbb, hi: 0xbc},\n\t{value: 0x3308, lo: 0xbd, hi: 0xbe},\n\t{value: 0x0008, lo: 0xbf, hi: 0xbf},\n\t// Block 0x1b, offset 0x11b\n\t{value: 0x0000, lo: 0x0e},\n\t{value: 0x0008, lo: 0x80, hi: 0x89},\n\t{value: 0x0018, lo: 0x8a, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x95},\n\t{value: 0x3008, lo: 0x96, hi: 0x97},\n\t{value: 0x3308, lo: 0x98, hi: 0x99},\n\t{value: 0x0008, lo: 0x9a, hi: 0x9d},\n\t{value: 0x3308, lo: 0x9e, hi: 0xa0},\n\t{value: 0x0008, lo: 0xa1, hi: 0xa1},\n\t{value: 0x3008, lo: 0xa2, hi: 0xa4},\n\t{value: 0x0008, lo: 0xa5, hi: 0xa6},\n\t{value: 0x3008, lo: 0xa7, hi: 0xad},\n\t{value: 0x0008, lo: 0xae, hi: 0xb0},\n\t{value: 0x3308, lo: 0xb1, hi: 0xb4},\n\t{value: 0x0008, lo: 0xb5, hi: 0xbf},\n\t// Block 0x1c, offset 0x12a\n\t{value: 0x0000, lo: 0x0d},\n\t{value: 0x0008, lo: 0x80, hi: 0x81},\n\t{value: 0x3308, lo: 0x82, hi: 0x82},\n\t{value: 0x3008, lo: 0x83, hi: 0x84},\n\t{value: 0x3308, lo: 0x85, hi: 0x86},\n\t{value: 0x3008, lo: 0x87, hi: 0x8c},\n\t{value: 0x3308, lo: 0x8d, hi: 0x8d},\n\t{value: 0x0008, lo: 0x8e, hi: 0x8e},\n\t{value: 0x3008, lo: 0x8f, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x99},\n\t{value: 0x3008, lo: 0x9a, hi: 0x9c},\n\t{value: 0x3308, lo: 0x9d, hi: 0x9d},\n\t{value: 0x0018, lo: 0x9e, hi: 0x9f},\n\t{value: 0x0040, lo: 0xa0, hi: 0xbf},\n\t// Block 0x1d, offset 0x138\n\t{value: 0x0000, lo: 0x09},\n\t{value: 0x0040, lo: 0x80, hi: 0x86},\n\t{value: 0x055d, lo: 0x87, hi: 0x87},\n\t{value: 0x0040, lo: 0x88, hi: 0x8c},\n\t{value: 0x055d, lo: 0x8d, hi: 0x8d},\n\t{value: 0x0040, lo: 0x8e, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0xba},\n\t{value: 0x0018, lo: 0xbb, hi: 0xbb},\n\t{value: 0xe105, lo: 0xbc, hi: 0xbc},\n\t{value: 0x0008, lo: 0xbd, hi: 0xbf},\n\t// Block 0x1e, offset 0x142\n\t{value: 0x0000, lo: 0x01},\n\t{value: 0x0018, lo: 0x80, hi: 0xbf},\n\t// Block 0x1f, offset 0x144\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0018, lo: 0x80, hi: 0x9e},\n\t{value: 0x0040, lo: 0x9f, hi: 0xa0},\n\t{value: 0x2018, lo: 0xa1, hi: 0xb5},\n\t{value: 0x0018, lo: 0xb6, hi: 0xbf},\n\t// Block 0x20, offset 0x149\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0018, lo: 0x80, hi: 0xa7},\n\t{value: 0x2018, lo: 0xa8, hi: 0xbf},\n\t// Block 0x21, offset 0x14c\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x2018, lo: 0x80, hi: 0x82},\n\t{value: 0x0018, lo: 0x83, hi: 0xbf},\n\t// Block 0x22, offset 0x14f\n\t{value: 0x0000, lo: 0x01},\n\t{value: 0x0008, lo: 0x80, hi: 0xbf},\n\t// Block 0x23, offset 0x151\n\t{value: 0x0000, lo: 0x0b},\n\t{value: 0x0008, lo: 0x80, hi: 0x88},\n\t{value: 0x0040, lo: 0x89, hi: 0x89},\n\t{value: 0x0008, lo: 0x8a, hi: 0x8d},\n\t{value: 0x0040, lo: 0x8e, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x96},\n\t{value: 0x0040, lo: 0x97, hi: 0x97},\n\t{value: 0x0008, lo: 0x98, hi: 0x98},\n\t{value: 0x0040, lo: 0x99, hi: 0x99},\n\t{value: 0x0008, lo: 0x9a, hi: 0x9d},\n\t{value: 0x0040, lo: 0x9e, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xbf},\n\t// Block 0x24, offset 0x15d\n\t{value: 0x0000, lo: 0x0a},\n\t{value: 0x0008, lo: 0x80, hi: 0x88},\n\t{value: 0x0040, lo: 0x89, hi: 0x89},\n\t{value: 0x0008, lo: 0x8a, hi: 0x8d},\n\t{value: 0x0040, lo: 0x8e, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0xb0},\n\t{value: 0x0040, lo: 0xb1, hi: 0xb1},\n\t{value: 0x0008, lo: 0xb2, hi: 0xb5},\n\t{value: 0x0040, lo: 0xb6, hi: 0xb7},\n\t{value: 0x0008, lo: 0xb8, hi: 0xbe},\n\t{value: 0x0040, lo: 0xbf, hi: 0xbf},\n\t// Block 0x25, offset 0x168\n\t{value: 0x0000, lo: 0x07},\n\t{value: 0x0008, lo: 0x80, hi: 0x80},\n\t{value: 0x0040, lo: 0x81, hi: 0x81},\n\t{value: 0x0008, lo: 0x82, hi: 0x85},\n\t{value: 0x0040, lo: 0x86, hi: 0x87},\n\t{value: 0x0008, lo: 0x88, hi: 0x96},\n\t{value: 0x0040, lo: 0x97, hi: 0x97},\n\t{value: 0x0008, lo: 0x98, hi: 0xbf},\n\t// Block 0x26, offset 0x170\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0008, lo: 0x80, hi: 0x90},\n\t{value: 0x0040, lo: 0x91, hi: 0x91},\n\t{value: 0x0008, lo: 0x92, hi: 0x95},\n\t{value: 0x0040, lo: 0x96, hi: 0x97},\n\t{value: 0x0008, lo: 0x98, hi: 0xbf},\n\t// Block 0x27, offset 0x176\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0008, lo: 0x80, hi: 0x9a},\n\t{value: 0x0040, lo: 0x9b, hi: 0x9c},\n\t{value: 0x3308, lo: 0x9d, hi: 0x9f},\n\t{value: 0x0018, lo: 0xa0, hi: 0xbc},\n\t{value: 0x0040, lo: 0xbd, hi: 0xbf},\n\t// Block 0x28, offset 0x17c\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0008, lo: 0x80, hi: 0x8f},\n\t{value: 0x0018, lo: 0x90, hi: 0x99},\n\t{value: 0x0040, lo: 0x9a, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xbf},\n\t// Block 0x29, offset 0x181\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0008, lo: 0x80, hi: 0xb5},\n\t{value: 0x0040, lo: 0xb6, hi: 0xb7},\n\t{value: 0xe045, lo: 0xb8, hi: 0xbd},\n\t{value: 0x0040, lo: 0xbe, hi: 0xbf},\n\t// Block 0x2a, offset 0x186\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0018, lo: 0x80, hi: 0x80},\n\t{value: 0x0008, lo: 0x81, hi: 0xbf},\n\t// Block 0x2b, offset 0x189\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0008, lo: 0x80, hi: 0xac},\n\t{value: 0x0018, lo: 0xad, hi: 0xae},\n\t{value: 0x0008, lo: 0xaf, hi: 0xbf},\n\t// Block 0x2c, offset 0x18d\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0040, lo: 0x80, hi: 0x80},\n\t{value: 0x0008, lo: 0x81, hi: 0x9a},\n\t{value: 0x0018, lo: 0x9b, hi: 0x9c},\n\t{value: 0x0040, lo: 0x9d, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xbf},\n\t// Block 0x2d, offset 0x193\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0008, lo: 0x80, hi: 0xaa},\n\t{value: 0x0018, lo: 0xab, hi: 0xb0},\n\t{value: 0x0008, lo: 0xb1, hi: 0xb8},\n\t{value: 0x0040, lo: 0xb9, hi: 0xbf},\n\t// Block 0x2e, offset 0x198\n\t{value: 0x0000, lo: 0x0b},\n\t{value: 0x0008, lo: 0x80, hi: 0x8c},\n\t{value: 0x0040, lo: 0x8d, hi: 0x8d},\n\t{value: 0x0008, lo: 0x8e, hi: 0x91},\n\t{value: 0x3308, lo: 0x92, hi: 0x93},\n\t{value: 0x3b08, lo: 0x94, hi: 0x94},\n\t{value: 0x0040, lo: 0x95, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xb1},\n\t{value: 0x3308, lo: 0xb2, hi: 0xb3},\n\t{value: 0x3b08, lo: 0xb4, hi: 0xb4},\n\t{value: 0x0018, lo: 0xb5, hi: 0xb6},\n\t{value: 0x0040, lo: 0xb7, hi: 0xbf},\n\t// Block 0x2f, offset 0x1a4\n\t{value: 0x0000, lo: 0x09},\n\t{value: 0x0008, lo: 0x80, hi: 0x91},\n\t{value: 0x3308, lo: 0x92, hi: 0x93},\n\t{value: 0x0040, lo: 0x94, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xac},\n\t{value: 0x0040, lo: 0xad, hi: 0xad},\n\t{value: 0x0008, lo: 0xae, hi: 0xb0},\n\t{value: 0x0040, lo: 0xb1, hi: 0xb1},\n\t{value: 0x3308, lo: 0xb2, hi: 0xb3},\n\t{value: 0x0040, lo: 0xb4, hi: 0xbf},\n\t// Block 0x30, offset 0x1ae\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0008, lo: 0x80, hi: 0xb3},\n\t{value: 0x3340, lo: 0xb4, hi: 0xb5},\n\t{value: 0x3008, lo: 0xb6, hi: 0xb6},\n\t{value: 0x3308, lo: 0xb7, hi: 0xbd},\n\t{value: 0x3008, lo: 0xbe, hi: 0xbf},\n\t// Block 0x31, offset 0x1b4\n\t{value: 0x0000, lo: 0x10},\n\t{value: 0x3008, lo: 0x80, hi: 0x85},\n\t{value: 0x3308, lo: 0x86, hi: 0x86},\n\t{value: 0x3008, lo: 0x87, hi: 0x88},\n\t{value: 0x3308, lo: 0x89, hi: 0x91},\n\t{value: 0x3b08, lo: 0x92, hi: 0x92},\n\t{value: 0x3308, lo: 0x93, hi: 0x93},\n\t{value: 0x0018, lo: 0x94, hi: 0x96},\n\t{value: 0x0008, lo: 0x97, hi: 0x97},\n\t{value: 0x0018, lo: 0x98, hi: 0x9b},\n\t{value: 0x0008, lo: 0x9c, hi: 0x9c},\n\t{value: 0x3308, lo: 0x9d, hi: 0x9d},\n\t{value: 0x0040, lo: 0x9e, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xa9},\n\t{value: 0x0040, lo: 0xaa, hi: 0xaf},\n\t{value: 0x0018, lo: 0xb0, hi: 0xb9},\n\t{value: 0x0040, lo: 0xba, hi: 0xbf},\n\t// Block 0x32, offset 0x1c5\n\t{value: 0x0000, lo: 0x09},\n\t{value: 0x0018, lo: 0x80, hi: 0x85},\n\t{value: 0x0040, lo: 0x86, hi: 0x86},\n\t{value: 0x0218, lo: 0x87, hi: 0x87},\n\t{value: 0x0018, lo: 0x88, hi: 0x8a},\n\t{value: 0x33c0, lo: 0x8b, hi: 0x8d},\n\t{value: 0x0040, lo: 0x8e, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x99},\n\t{value: 0x0040, lo: 0x9a, hi: 0x9f},\n\t{value: 0x0208, lo: 0xa0, hi: 0xbf},\n\t// Block 0x33, offset 0x1cf\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0208, lo: 0x80, hi: 0xb7},\n\t{value: 0x0040, lo: 0xb8, hi: 0xbf},\n\t// Block 0x34, offset 0x1d2\n\t{value: 0x0000, lo: 0x07},\n\t{value: 0x0008, lo: 0x80, hi: 0x84},\n\t{value: 0x3308, lo: 0x85, hi: 0x86},\n\t{value: 0x0208, lo: 0x87, hi: 0xa8},\n\t{value: 0x3308, lo: 0xa9, hi: 0xa9},\n\t{value: 0x0208, lo: 0xaa, hi: 0xaa},\n\t{value: 0x0040, lo: 0xab, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xbf},\n\t// Block 0x35, offset 0x1da\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0008, lo: 0x80, hi: 0xb5},\n\t{value: 0x0040, lo: 0xb6, hi: 0xbf},\n\t// Block 0x36, offset 0x1dd\n\t{value: 0x0000, lo: 0x0c},\n\t{value: 0x0008, lo: 0x80, hi: 0x9e},\n\t{value: 0x0040, lo: 0x9f, hi: 0x9f},\n\t{value: 0x3308, lo: 0xa0, hi: 0xa2},\n\t{value: 0x3008, lo: 0xa3, hi: 0xa6},\n\t{value: 0x3308, lo: 0xa7, hi: 0xa8},\n\t{value: 0x3008, lo: 0xa9, hi: 0xab},\n\t{value: 0x0040, lo: 0xac, hi: 0xaf},\n\t{value: 0x3008, lo: 0xb0, hi: 0xb1},\n\t{value: 0x3308, lo: 0xb2, hi: 0xb2},\n\t{value: 0x3008, lo: 0xb3, hi: 0xb8},\n\t{value: 0x3308, lo: 0xb9, hi: 0xbb},\n\t{value: 0x0040, lo: 0xbc, hi: 0xbf},\n\t// Block 0x37, offset 0x1ea\n\t{value: 0x0000, lo: 0x07},\n\t{value: 0x0018, lo: 0x80, hi: 0x80},\n\t{value: 0x0040, lo: 0x81, hi: 0x83},\n\t{value: 0x0018, lo: 0x84, hi: 0x85},\n\t{value: 0x0008, lo: 0x86, hi: 0xad},\n\t{value: 0x0040, lo: 0xae, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xb4},\n\t{value: 0x0040, lo: 0xb5, hi: 0xbf},\n\t// Block 0x38, offset 0x1f2\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0008, lo: 0x80, hi: 0xab},\n\t{value: 0x0040, lo: 0xac, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xbf},\n\t// Block 0x39, offset 0x1f6\n\t{value: 0x0000, lo: 0x06},\n\t{value: 0x0008, lo: 0x80, hi: 0x89},\n\t{value: 0x0040, lo: 0x8a, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x99},\n\t{value: 0x0028, lo: 0x9a, hi: 0x9a},\n\t{value: 0x0040, lo: 0x9b, hi: 0x9d},\n\t{value: 0x0018, lo: 0x9e, hi: 0xbf},\n\t// Block 0x3a, offset 0x1fd\n\t{value: 0x0000, lo: 0x07},\n\t{value: 0x0008, lo: 0x80, hi: 0x96},\n\t{value: 0x3308, lo: 0x97, hi: 0x98},\n\t{value: 0x3008, lo: 0x99, hi: 0x9a},\n\t{value: 0x3308, lo: 0x9b, hi: 0x9b},\n\t{value: 0x0040, lo: 0x9c, hi: 0x9d},\n\t{value: 0x0018, lo: 0x9e, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xbf},\n\t// Block 0x3b, offset 0x205\n\t{value: 0x0000, lo: 0x0f},\n\t{value: 0x0008, lo: 0x80, hi: 0x94},\n\t{value: 0x3008, lo: 0x95, hi: 0x95},\n\t{value: 0x3308, lo: 0x96, hi: 0x96},\n\t{value: 0x3008, lo: 0x97, hi: 0x97},\n\t{value: 0x3308, lo: 0x98, hi: 0x9e},\n\t{value: 0x0040, lo: 0x9f, hi: 0x9f},\n\t{value: 0x3b08, lo: 0xa0, hi: 0xa0},\n\t{value: 0x3008, lo: 0xa1, hi: 0xa1},\n\t{value: 0x3308, lo: 0xa2, hi: 0xa2},\n\t{value: 0x3008, lo: 0xa3, hi: 0xa4},\n\t{value: 0x3308, lo: 0xa5, hi: 0xac},\n\t{value: 0x3008, lo: 0xad, hi: 0xb2},\n\t{value: 0x3308, lo: 0xb3, hi: 0xbc},\n\t{value: 0x0040, lo: 0xbd, hi: 0xbe},\n\t{value: 0x3308, lo: 0xbf, hi: 0xbf},\n\t// Block 0x3c, offset 0x215\n\t{value: 0x0000, lo: 0x0b},\n\t{value: 0x0008, lo: 0x80, hi: 0x89},\n\t{value: 0x0040, lo: 0x8a, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x99},\n\t{value: 0x0040, lo: 0x9a, hi: 0x9f},\n\t{value: 0x0018, lo: 0xa0, hi: 0xa6},\n\t{value: 0x0008, lo: 0xa7, hi: 0xa7},\n\t{value: 0x0018, lo: 0xa8, hi: 0xad},\n\t{value: 0x0040, lo: 0xae, hi: 0xaf},\n\t{value: 0x3308, lo: 0xb0, hi: 0xbd},\n\t{value: 0x3318, lo: 0xbe, hi: 0xbe},\n\t{value: 0x0040, lo: 0xbf, hi: 0xbf},\n\t// Block 0x3d, offset 0x221\n\t{value: 0x0000, lo: 0x01},\n\t{value: 0x0040, lo: 0x80, hi: 0xbf},\n\t// Block 0x3e, offset 0x223\n\t{value: 0x0000, lo: 0x09},\n\t{value: 0x3308, lo: 0x80, hi: 0x83},\n\t{value: 0x3008, lo: 0x84, hi: 0x84},\n\t{value: 0x0008, lo: 0x85, hi: 0xb3},\n\t{value: 0x3308, lo: 0xb4, hi: 0xb4},\n\t{value: 0x3008, lo: 0xb5, hi: 0xb5},\n\t{value: 0x3308, lo: 0xb6, hi: 0xba},\n\t{value: 0x3008, lo: 0xbb, hi: 0xbb},\n\t{value: 0x3308, lo: 0xbc, hi: 0xbc},\n\t{value: 0x3008, lo: 0xbd, hi: 0xbf},\n\t// Block 0x3f, offset 0x22d\n\t{value: 0x0000, lo: 0x0b},\n\t{value: 0x3008, lo: 0x80, hi: 0x81},\n\t{value: 0x3308, lo: 0x82, hi: 0x82},\n\t{value: 0x3008, lo: 0x83, hi: 0x83},\n\t{value: 0x3808, lo: 0x84, hi: 0x84},\n\t{value: 0x0008, lo: 0x85, hi: 0x8b},\n\t{value: 0x0040, lo: 0x8c, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x99},\n\t{value: 0x0018, lo: 0x9a, hi: 0xaa},\n\t{value: 0x3308, lo: 0xab, hi: 0xb3},\n\t{value: 0x0018, lo: 0xb4, hi: 0xbc},\n\t{value: 0x0040, lo: 0xbd, hi: 0xbf},\n\t// Block 0x40, offset 0x239\n\t{value: 0x0000, lo: 0x0b},\n\t{value: 0x3308, lo: 0x80, hi: 0x81},\n\t{value: 0x3008, lo: 0x82, hi: 0x82},\n\t{value: 0x0008, lo: 0x83, hi: 0xa0},\n\t{value: 0x3008, lo: 0xa1, hi: 0xa1},\n\t{value: 0x3308, lo: 0xa2, hi: 0xa5},\n\t{value: 0x3008, lo: 0xa6, hi: 0xa7},\n\t{value: 0x3308, lo: 0xa8, hi: 0xa9},\n\t{value: 0x3808, lo: 0xaa, hi: 0xaa},\n\t{value: 0x3b08, lo: 0xab, hi: 0xab},\n\t{value: 0x3308, lo: 0xac, hi: 0xad},\n\t{value: 0x0008, lo: 0xae, hi: 0xbf},\n\t// Block 0x41, offset 0x245\n\t{value: 0x0000, lo: 0x0b},\n\t{value: 0x0008, lo: 0x80, hi: 0xa5},\n\t{value: 0x3308, lo: 0xa6, hi: 0xa6},\n\t{value: 0x3008, lo: 0xa7, hi: 0xa7},\n\t{value: 0x3308, lo: 0xa8, hi: 0xa9},\n\t{value: 0x3008, lo: 0xaa, hi: 0xac},\n\t{value: 0x3308, lo: 0xad, hi: 0xad},\n\t{value: 0x3008, lo: 0xae, hi: 0xae},\n\t{value: 0x3308, lo: 0xaf, hi: 0xb1},\n\t{value: 0x3808, lo: 0xb2, hi: 0xb3},\n\t{value: 0x0040, lo: 0xb4, hi: 0xbb},\n\t{value: 0x0018, lo: 0xbc, hi: 0xbf},\n\t// Block 0x42, offset 0x251\n\t{value: 0x0000, lo: 0x07},\n\t{value: 0x0008, lo: 0x80, hi: 0xa3},\n\t{value: 0x3008, lo: 0xa4, hi: 0xab},\n\t{value: 0x3308, lo: 0xac, hi: 0xb3},\n\t{value: 0x3008, lo: 0xb4, hi: 0xb5},\n\t{value: 0x3308, lo: 0xb6, hi: 0xb7},\n\t{value: 0x0040, lo: 0xb8, hi: 0xba},\n\t{value: 0x0018, lo: 0xbb, hi: 0xbf},\n\t// Block 0x43, offset 0x259\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0008, lo: 0x80, hi: 0x89},\n\t{value: 0x0040, lo: 0x8a, hi: 0x8c},\n\t{value: 0x0008, lo: 0x8d, hi: 0xbd},\n\t{value: 0x0018, lo: 0xbe, hi: 0xbf},\n\t// Block 0x44, offset 0x25e\n\t{value: 0x0000, lo: 0x09},\n\t{value: 0x0e29, lo: 0x80, hi: 0x80},\n\t{value: 0x0e41, lo: 0x81, hi: 0x81},\n\t{value: 0x0e59, lo: 0x82, hi: 0x82},\n\t{value: 0x0e71, lo: 0x83, hi: 0x83},\n\t{value: 0x0e89, lo: 0x84, hi: 0x85},\n\t{value: 0x0ea1, lo: 0x86, hi: 0x86},\n\t{value: 0x0eb9, lo: 0x87, hi: 0x87},\n\t{value: 0x057d, lo: 0x88, hi: 0x88},\n\t{value: 0x0040, lo: 0x89, hi: 0xbf},\n\t// Block 0x45, offset 0x268\n\t{value: 0x0000, lo: 0x10},\n\t{value: 0x0018, lo: 0x80, hi: 0x87},\n\t{value: 0x0040, lo: 0x88, hi: 0x8f},\n\t{value: 0x3308, lo: 0x90, hi: 0x92},\n\t{value: 0x0018, lo: 0x93, hi: 0x93},\n\t{value: 0x3308, lo: 0x94, hi: 0xa0},\n\t{value: 0x3008, lo: 0xa1, hi: 0xa1},\n\t{value: 0x3308, lo: 0xa2, hi: 0xa8},\n\t{value: 0x0008, lo: 0xa9, hi: 0xac},\n\t{value: 0x3308, lo: 0xad, hi: 0xad},\n\t{value: 0x0008, lo: 0xae, hi: 0xb1},\n\t{value: 0x3008, lo: 0xb2, hi: 0xb3},\n\t{value: 0x3308, lo: 0xb4, hi: 0xb4},\n\t{value: 0x0008, lo: 0xb5, hi: 0xb6},\n\t{value: 0x3008, lo: 0xb7, hi: 0xb7},\n\t{value: 0x3308, lo: 0xb8, hi: 0xb9},\n\t{value: 0x0040, lo: 0xba, hi: 0xbf},\n\t// Block 0x46, offset 0x279\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x3308, lo: 0x80, hi: 0xb9},\n\t{value: 0x0040, lo: 0xba, hi: 0xba},\n\t{value: 0x3308, lo: 0xbb, hi: 0xbf},\n\t// Block 0x47, offset 0x27d\n\t{value: 0x0000, lo: 0x0a},\n\t{value: 0x0008, lo: 0x80, hi: 0x87},\n\t{value: 0xe045, lo: 0x88, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x95},\n\t{value: 0x0040, lo: 0x96, hi: 0x97},\n\t{value: 0xe045, lo: 0x98, hi: 0x9d},\n\t{value: 0x0040, lo: 0x9e, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xa7},\n\t{value: 0xe045, lo: 0xa8, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xb7},\n\t{value: 0xe045, lo: 0xb8, hi: 0xbf},\n\t// Block 0x48, offset 0x288\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0040, lo: 0x80, hi: 0x8f},\n\t{value: 0x3318, lo: 0x90, hi: 0xb0},\n\t{value: 0x0040, lo: 0xb1, hi: 0xbf},\n\t// Block 0x49, offset 0x28c\n\t{value: 0x0000, lo: 0x08},\n\t{value: 0x0018, lo: 0x80, hi: 0x82},\n\t{value: 0x0040, lo: 0x83, hi: 0x83},\n\t{value: 0x0008, lo: 0x84, hi: 0x84},\n\t{value: 0x0018, lo: 0x85, hi: 0x88},\n\t{value: 0x24c1, lo: 0x89, hi: 0x89},\n\t{value: 0x0018, lo: 0x8a, hi: 0x8b},\n\t{value: 0x0040, lo: 0x8c, hi: 0x8f},\n\t{value: 0x0018, lo: 0x90, hi: 0xbf},\n\t// Block 0x4a, offset 0x295\n\t{value: 0x0000, lo: 0x07},\n\t{value: 0x0018, lo: 0x80, hi: 0xab},\n\t{value: 0x24f1, lo: 0xac, hi: 0xac},\n\t{value: 0x2529, lo: 0xad, hi: 0xad},\n\t{value: 0x0018, lo: 0xae, hi: 0xae},\n\t{value: 0x2579, lo: 0xaf, hi: 0xaf},\n\t{value: 0x25b1, lo: 0xb0, hi: 0xb0},\n\t{value: 0x0018, lo: 0xb1, hi: 0xbf},\n\t// Block 0x4b, offset 0x29d\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0018, lo: 0x80, hi: 0x9f},\n\t{value: 0x0080, lo: 0xa0, hi: 0xa0},\n\t{value: 0x0018, lo: 0xa1, hi: 0xad},\n\t{value: 0x0080, lo: 0xae, hi: 0xaf},\n\t{value: 0x0018, lo: 0xb0, hi: 0xbf},\n\t// Block 0x4c, offset 0x2a3\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0018, lo: 0x80, hi: 0xa8},\n\t{value: 0x09c5, lo: 0xa9, hi: 0xa9},\n\t{value: 0x09e5, lo: 0xaa, hi: 0xaa},\n\t{value: 0x0018, lo: 0xab, hi: 0xbf},\n\t// Block 0x4d, offset 0x2a8\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0018, lo: 0x80, hi: 0xa6},\n\t{value: 0x0040, lo: 0xa7, hi: 0xbf},\n\t// Block 0x4e, offset 0x2ab\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0018, lo: 0x80, hi: 0x8b},\n\t{value: 0x28c1, lo: 0x8c, hi: 0x8c},\n\t{value: 0x0018, lo: 0x8d, hi: 0xbf},\n\t// Block 0x4f, offset 0x2af\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0018, lo: 0x80, hi: 0xb3},\n\t{value: 0x0e66, lo: 0xb4, hi: 0xb4},\n\t{value: 0x292a, lo: 0xb5, hi: 0xb5},\n\t{value: 0x0e86, lo: 0xb6, hi: 0xb6},\n\t{value: 0x0018, lo: 0xb7, hi: 0xbf},\n\t// Block 0x50, offset 0x2b5\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0018, lo: 0x80, hi: 0x9b},\n\t{value: 0x2941, lo: 0x9c, hi: 0x9c},\n\t{value: 0x0018, lo: 0x9d, hi: 0xbf},\n\t// Block 0x51, offset 0x2b9\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0018, lo: 0x80, hi: 0xb3},\n\t{value: 0x0040, lo: 0xb4, hi: 0xb5},\n\t{value: 0x0018, lo: 0xb6, hi: 0xbf},\n\t// Block 0x52, offset 0x2bd\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0018, lo: 0x80, hi: 0x95},\n\t{value: 0x0040, lo: 0x96, hi: 0x97},\n\t{value: 0x0018, lo: 0x98, hi: 0xb9},\n\t{value: 0x0040, lo: 0xba, hi: 0xbc},\n\t{value: 0x0018, lo: 0xbd, hi: 0xbf},\n\t// Block 0x53, offset 0x2c3\n\t{value: 0x0000, lo: 0x06},\n\t{value: 0x0018, lo: 0x80, hi: 0x88},\n\t{value: 0x0040, lo: 0x89, hi: 0x89},\n\t{value: 0x0018, lo: 0x8a, hi: 0x92},\n\t{value: 0x0040, lo: 0x93, hi: 0xab},\n\t{value: 0x0018, lo: 0xac, hi: 0xaf},\n\t{value: 0x0040, lo: 0xb0, hi: 0xbf},\n\t// Block 0x54, offset 0x2ca\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0xe185, lo: 0x80, hi: 0x8f},\n\t{value: 0x03f5, lo: 0x90, hi: 0x9f},\n\t{value: 0x0ea5, lo: 0xa0, hi: 0xae},\n\t{value: 0x0040, lo: 0xaf, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xbf},\n\t// Block 0x55, offset 0x2d0\n\t{value: 0x0000, lo: 0x07},\n\t{value: 0x0008, lo: 0x80, hi: 0xa5},\n\t{value: 0x0040, lo: 0xa6, hi: 0xa6},\n\t{value: 0x0008, lo: 0xa7, hi: 0xa7},\n\t{value: 0x0040, lo: 0xa8, hi: 0xac},\n\t{value: 0x0008, lo: 0xad, hi: 0xad},\n\t{value: 0x0040, lo: 0xae, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xbf},\n\t// Block 0x56, offset 0x2d8\n\t{value: 0x0000, lo: 0x06},\n\t{value: 0x0008, lo: 0x80, hi: 0xa7},\n\t{value: 0x0040, lo: 0xa8, hi: 0xae},\n\t{value: 0xe075, lo: 0xaf, hi: 0xaf},\n\t{value: 0x0018, lo: 0xb0, hi: 0xb0},\n\t{value: 0x0040, lo: 0xb1, hi: 0xbe},\n\t{value: 0x3b08, lo: 0xbf, hi: 0xbf},\n\t// Block 0x57, offset 0x2df\n\t{value: 0x0000, lo: 0x0a},\n\t{value: 0x0008, lo: 0x80, hi: 0x96},\n\t{value: 0x0040, lo: 0x97, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xa6},\n\t{value: 0x0040, lo: 0xa7, hi: 0xa7},\n\t{value: 0x0008, lo: 0xa8, hi: 0xae},\n\t{value: 0x0040, lo: 0xaf, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xb6},\n\t{value: 0x0040, lo: 0xb7, hi: 0xb7},\n\t{value: 0x0008, lo: 0xb8, hi: 0xbe},\n\t{value: 0x0040, lo: 0xbf, hi: 0xbf},\n\t// Block 0x58, offset 0x2ea\n\t{value: 0x0000, lo: 0x09},\n\t{value: 0x0008, lo: 0x80, hi: 0x86},\n\t{value: 0x0040, lo: 0x87, hi: 0x87},\n\t{value: 0x0008, lo: 0x88, hi: 0x8e},\n\t{value: 0x0040, lo: 0x8f, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x96},\n\t{value: 0x0040, lo: 0x97, hi: 0x97},\n\t{value: 0x0008, lo: 0x98, hi: 0x9e},\n\t{value: 0x0040, lo: 0x9f, hi: 0x9f},\n\t{value: 0x3308, lo: 0xa0, hi: 0xbf},\n\t// Block 0x59, offset 0x2f4\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0018, lo: 0x80, hi: 0xae},\n\t{value: 0x0008, lo: 0xaf, hi: 0xaf},\n\t{value: 0x0018, lo: 0xb0, hi: 0xbf},\n\t// Block 0x5a, offset 0x2f8\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0018, lo: 0x80, hi: 0x89},\n\t{value: 0x0040, lo: 0x8a, hi: 0xbf},\n\t// Block 0x5b, offset 0x2fb\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0018, lo: 0x80, hi: 0x99},\n\t{value: 0x0040, lo: 0x9a, hi: 0x9a},\n\t{value: 0x0018, lo: 0x9b, hi: 0x9e},\n\t{value: 0x0edd, lo: 0x9f, hi: 0x9f},\n\t{value: 0x0018, lo: 0xa0, hi: 0xbf},\n\t// Block 0x5c, offset 0x301\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0018, lo: 0x80, hi: 0xb2},\n\t{value: 0x0efd, lo: 0xb3, hi: 0xb3},\n\t{value: 0x0040, lo: 0xb4, hi: 0xbf},\n\t// Block 0x5d, offset 0x305\n\t{value: 0x0020, lo: 0x01},\n\t{value: 0x0f1d, lo: 0x80, hi: 0xbf},\n\t// Block 0x5e, offset 0x307\n\t{value: 0x0020, lo: 0x02},\n\t{value: 0x171d, lo: 0x80, hi: 0x8f},\n\t{value: 0x18fd, lo: 0x90, hi: 0xbf},\n\t// Block 0x5f, offset 0x30a\n\t{value: 0x0020, lo: 0x01},\n\t{value: 0x1efd, lo: 0x80, hi: 0xbf},\n\t// Block 0x60, offset 0x30c\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0040, lo: 0x80, hi: 0x80},\n\t{value: 0x0008, lo: 0x81, hi: 0xbf},\n\t// Block 0x61, offset 0x30f\n\t{value: 0x0000, lo: 0x09},\n\t{value: 0x0008, lo: 0x80, hi: 0x96},\n\t{value: 0x0040, lo: 0x97, hi: 0x98},\n\t{value: 0x3308, lo: 0x99, hi: 0x9a},\n\t{value: 0x29e2, lo: 0x9b, hi: 0x9b},\n\t{value: 0x2a0a, lo: 0x9c, hi: 0x9c},\n\t{value: 0x0008, lo: 0x9d, hi: 0x9e},\n\t{value: 0x2a31, lo: 0x9f, hi: 0x9f},\n\t{value: 0x0018, lo: 0xa0, hi: 0xa0},\n\t{value: 0x0008, lo: 0xa1, hi: 0xbf},\n\t// Block 0x62, offset 0x319\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0008, lo: 0x80, hi: 0xbe},\n\t{value: 0x2a69, lo: 0xbf, hi: 0xbf},\n\t// Block 0x63, offset 0x31c\n\t{value: 0x0000, lo: 0x0e},\n\t{value: 0x0040, lo: 0x80, hi: 0x84},\n\t{value: 0x0008, lo: 0x85, hi: 0xae},\n\t{value: 0x0040, lo: 0xaf, hi: 0xb0},\n\t{value: 0x2a1d, lo: 0xb1, hi: 0xb1},\n\t{value: 0x2a3d, lo: 0xb2, hi: 0xb2},\n\t{value: 0x2a5d, lo: 0xb3, hi: 0xb3},\n\t{value: 0x2a7d, lo: 0xb4, hi: 0xb4},\n\t{value: 0x2a5d, lo: 0xb5, hi: 0xb5},\n\t{value: 0x2a9d, lo: 0xb6, hi: 0xb6},\n\t{value: 0x2abd, lo: 0xb7, hi: 0xb7},\n\t{value: 0x2add, lo: 0xb8, hi: 0xb9},\n\t{value: 0x2afd, lo: 0xba, hi: 0xbb},\n\t{value: 0x2b1d, lo: 0xbc, hi: 0xbd},\n\t{value: 0x2afd, lo: 0xbe, hi: 0xbf},\n\t// Block 0x64, offset 0x32b\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0018, lo: 0x80, hi: 0xa3},\n\t{value: 0x0040, lo: 0xa4, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xbf},\n\t// Block 0x65, offset 0x32f\n\t{value: 0x0030, lo: 0x04},\n\t{value: 0x2aa2, lo: 0x80, hi: 0x9d},\n\t{value: 0x305a, lo: 0x9e, hi: 0x9e},\n\t{value: 0x0040, lo: 0x9f, hi: 0x9f},\n\t{value: 0x30a2, lo: 0xa0, hi: 0xbf},\n\t// Block 0x66, offset 0x334\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0008, lo: 0x80, hi: 0xaa},\n\t{value: 0x0040, lo: 0xab, hi: 0xbf},\n\t// Block 0x67, offset 0x337\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0008, lo: 0x80, hi: 0x8c},\n\t{value: 0x0040, lo: 0x8d, hi: 0x8f},\n\t{value: 0x0018, lo: 0x90, hi: 0xbf},\n\t// Block 0x68, offset 0x33b\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0018, lo: 0x80, hi: 0x86},\n\t{value: 0x0040, lo: 0x87, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0xbd},\n\t{value: 0x0018, lo: 0xbe, hi: 0xbf},\n\t// Block 0x69, offset 0x340\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0008, lo: 0x80, hi: 0x8c},\n\t{value: 0x0018, lo: 0x8d, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0xab},\n\t{value: 0x0040, lo: 0xac, hi: 0xbf},\n\t// Block 0x6a, offset 0x345\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0008, lo: 0x80, hi: 0xa5},\n\t{value: 0x0018, lo: 0xa6, hi: 0xaf},\n\t{value: 0x3308, lo: 0xb0, hi: 0xb1},\n\t{value: 0x0018, lo: 0xb2, hi: 0xb7},\n\t{value: 0x0040, lo: 0xb8, hi: 0xbf},\n\t// Block 0x6b, offset 0x34b\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0040, lo: 0x80, hi: 0xb6},\n\t{value: 0x0008, lo: 0xb7, hi: 0xb7},\n\t{value: 0x2009, lo: 0xb8, hi: 0xb8},\n\t{value: 0x6e89, lo: 0xb9, hi: 0xb9},\n\t{value: 0x0008, lo: 0xba, hi: 0xbf},\n\t// Block 0x6c, offset 0x351\n\t{value: 0x0000, lo: 0x0e},\n\t{value: 0x0008, lo: 0x80, hi: 0x81},\n\t{value: 0x3308, lo: 0x82, hi: 0x82},\n\t{value: 0x0008, lo: 0x83, hi: 0x85},\n\t{value: 0x3b08, lo: 0x86, hi: 0x86},\n\t{value: 0x0008, lo: 0x87, hi: 0x8a},\n\t{value: 0x3308, lo: 0x8b, hi: 0x8b},\n\t{value: 0x0008, lo: 0x8c, hi: 0xa2},\n\t{value: 0x3008, lo: 0xa3, hi: 0xa4},\n\t{value: 0x3308, lo: 0xa5, hi: 0xa6},\n\t{value: 0x3008, lo: 0xa7, hi: 0xa7},\n\t{value: 0x0018, lo: 0xa8, hi: 0xab},\n\t{value: 0x0040, lo: 0xac, hi: 0xaf},\n\t{value: 0x0018, lo: 0xb0, hi: 0xb9},\n\t{value: 0x0040, lo: 0xba, hi: 0xbf},\n\t// Block 0x6d, offset 0x360\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0208, lo: 0x80, hi: 0xb1},\n\t{value: 0x0108, lo: 0xb2, hi: 0xb2},\n\t{value: 0x0008, lo: 0xb3, hi: 0xb3},\n\t{value: 0x0018, lo: 0xb4, hi: 0xb7},\n\t{value: 0x0040, lo: 0xb8, hi: 0xbf},\n\t// Block 0x6e, offset 0x366\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x3008, lo: 0x80, hi: 0x81},\n\t{value: 0x0008, lo: 0x82, hi: 0xb3},\n\t{value: 0x3008, lo: 0xb4, hi: 0xbf},\n\t// Block 0x6f, offset 0x36a\n\t{value: 0x0000, lo: 0x0e},\n\t{value: 0x3008, lo: 0x80, hi: 0x83},\n\t{value: 0x3b08, lo: 0x84, hi: 0x84},\n\t{value: 0x3308, lo: 0x85, hi: 0x85},\n\t{value: 0x0040, lo: 0x86, hi: 0x8d},\n\t{value: 0x0018, lo: 0x8e, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x99},\n\t{value: 0x0040, lo: 0x9a, hi: 0x9f},\n\t{value: 0x3308, lo: 0xa0, hi: 0xb1},\n\t{value: 0x0008, lo: 0xb2, hi: 0xb7},\n\t{value: 0x0018, lo: 0xb8, hi: 0xba},\n\t{value: 0x0008, lo: 0xbb, hi: 0xbb},\n\t{value: 0x0018, lo: 0xbc, hi: 0xbc},\n\t{value: 0x0008, lo: 0xbd, hi: 0xbd},\n\t{value: 0x0040, lo: 0xbe, hi: 0xbf},\n\t// Block 0x70, offset 0x379\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0008, lo: 0x80, hi: 0xa5},\n\t{value: 0x3308, lo: 0xa6, hi: 0xad},\n\t{value: 0x0018, lo: 0xae, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xbf},\n\t// Block 0x71, offset 0x37e\n\t{value: 0x0000, lo: 0x07},\n\t{value: 0x0008, lo: 0x80, hi: 0x86},\n\t{value: 0x3308, lo: 0x87, hi: 0x91},\n\t{value: 0x3008, lo: 0x92, hi: 0x92},\n\t{value: 0x3808, lo: 0x93, hi: 0x93},\n\t{value: 0x0040, lo: 0x94, hi: 0x9e},\n\t{value: 0x0018, lo: 0x9f, hi: 0xbc},\n\t{value: 0x0040, lo: 0xbd, hi: 0xbf},\n\t// Block 0x72, offset 0x386\n\t{value: 0x0000, lo: 0x09},\n\t{value: 0x3308, lo: 0x80, hi: 0x82},\n\t{value: 0x3008, lo: 0x83, hi: 0x83},\n\t{value: 0x0008, lo: 0x84, hi: 0xb2},\n\t{value: 0x3308, lo: 0xb3, hi: 0xb3},\n\t{value: 0x3008, lo: 0xb4, hi: 0xb5},\n\t{value: 0x3308, lo: 0xb6, hi: 0xb9},\n\t{value: 0x3008, lo: 0xba, hi: 0xbb},\n\t{value: 0x3308, lo: 0xbc, hi: 0xbc},\n\t{value: 0x3008, lo: 0xbd, hi: 0xbf},\n\t// Block 0x73, offset 0x390\n\t{value: 0x0000, lo: 0x0a},\n\t{value: 0x3808, lo: 0x80, hi: 0x80},\n\t{value: 0x0018, lo: 0x81, hi: 0x8d},\n\t{value: 0x0040, lo: 0x8e, hi: 0x8e},\n\t{value: 0x0008, lo: 0x8f, hi: 0x99},\n\t{value: 0x0040, lo: 0x9a, hi: 0x9d},\n\t{value: 0x0018, lo: 0x9e, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xa4},\n\t{value: 0x3308, lo: 0xa5, hi: 0xa5},\n\t{value: 0x0008, lo: 0xa6, hi: 0xbe},\n\t{value: 0x0040, lo: 0xbf, hi: 0xbf},\n\t// Block 0x74, offset 0x39b\n\t{value: 0x0000, lo: 0x07},\n\t{value: 0x0008, lo: 0x80, hi: 0xa8},\n\t{value: 0x3308, lo: 0xa9, hi: 0xae},\n\t{value: 0x3008, lo: 0xaf, hi: 0xb0},\n\t{value: 0x3308, lo: 0xb1, hi: 0xb2},\n\t{value: 0x3008, lo: 0xb3, hi: 0xb4},\n\t{value: 0x3308, lo: 0xb5, hi: 0xb6},\n\t{value: 0x0040, lo: 0xb7, hi: 0xbf},\n\t// Block 0x75, offset 0x3a3\n\t{value: 0x0000, lo: 0x10},\n\t{value: 0x0008, lo: 0x80, hi: 0x82},\n\t{value: 0x3308, lo: 0x83, hi: 0x83},\n\t{value: 0x0008, lo: 0x84, hi: 0x8b},\n\t{value: 0x3308, lo: 0x8c, hi: 0x8c},\n\t{value: 0x3008, lo: 0x8d, hi: 0x8d},\n\t{value: 0x0040, lo: 0x8e, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x99},\n\t{value: 0x0040, lo: 0x9a, hi: 0x9b},\n\t{value: 0x0018, lo: 0x9c, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xb6},\n\t{value: 0x0018, lo: 0xb7, hi: 0xb9},\n\t{value: 0x0008, lo: 0xba, hi: 0xba},\n\t{value: 0x3008, lo: 0xbb, hi: 0xbb},\n\t{value: 0x3308, lo: 0xbc, hi: 0xbc},\n\t{value: 0x3008, lo: 0xbd, hi: 0xbd},\n\t{value: 0x0008, lo: 0xbe, hi: 0xbf},\n\t// Block 0x76, offset 0x3b4\n\t{value: 0x0000, lo: 0x08},\n\t{value: 0x0008, lo: 0x80, hi: 0xaf},\n\t{value: 0x3308, lo: 0xb0, hi: 0xb0},\n\t{value: 0x0008, lo: 0xb1, hi: 0xb1},\n\t{value: 0x3308, lo: 0xb2, hi: 0xb4},\n\t{value: 0x0008, lo: 0xb5, hi: 0xb6},\n\t{value: 0x3308, lo: 0xb7, hi: 0xb8},\n\t{value: 0x0008, lo: 0xb9, hi: 0xbd},\n\t{value: 0x3308, lo: 0xbe, hi: 0xbf},\n\t// Block 0x77, offset 0x3bd\n\t{value: 0x0000, lo: 0x0f},\n\t{value: 0x0008, lo: 0x80, hi: 0x80},\n\t{value: 0x3308, lo: 0x81, hi: 0x81},\n\t{value: 0x0008, lo: 0x82, hi: 0x82},\n\t{value: 0x0040, lo: 0x83, hi: 0x9a},\n\t{value: 0x0008, lo: 0x9b, hi: 0x9d},\n\t{value: 0x0018, lo: 0x9e, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xaa},\n\t{value: 0x3008, lo: 0xab, hi: 0xab},\n\t{value: 0x3308, lo: 0xac, hi: 0xad},\n\t{value: 0x3008, lo: 0xae, hi: 0xaf},\n\t{value: 0x0018, lo: 0xb0, hi: 0xb1},\n\t{value: 0x0008, lo: 0xb2, hi: 0xb4},\n\t{value: 0x3008, lo: 0xb5, hi: 0xb5},\n\t{value: 0x3b08, lo: 0xb6, hi: 0xb6},\n\t{value: 0x0040, lo: 0xb7, hi: 0xbf},\n\t// Block 0x78, offset 0x3cd\n\t{value: 0x0000, lo: 0x0c},\n\t{value: 0x0040, lo: 0x80, hi: 0x80},\n\t{value: 0x0008, lo: 0x81, hi: 0x86},\n\t{value: 0x0040, lo: 0x87, hi: 0x88},\n\t{value: 0x0008, lo: 0x89, hi: 0x8e},\n\t{value: 0x0040, lo: 0x8f, hi: 0x90},\n\t{value: 0x0008, lo: 0x91, hi: 0x96},\n\t{value: 0x0040, lo: 0x97, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xa6},\n\t{value: 0x0040, lo: 0xa7, hi: 0xa7},\n\t{value: 0x0008, lo: 0xa8, hi: 0xae},\n\t{value: 0x0040, lo: 0xaf, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xbf},\n\t// Block 0x79, offset 0x3da\n\t{value: 0x0000, lo: 0x09},\n\t{value: 0x0008, lo: 0x80, hi: 0x9a},\n\t{value: 0x0018, lo: 0x9b, hi: 0x9b},\n\t{value: 0x4465, lo: 0x9c, hi: 0x9c},\n\t{value: 0x447d, lo: 0x9d, hi: 0x9d},\n\t{value: 0x2971, lo: 0x9e, hi: 0x9e},\n\t{value: 0xe06d, lo: 0x9f, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xa5},\n\t{value: 0x0040, lo: 0xa6, hi: 0xaf},\n\t{value: 0x4495, lo: 0xb0, hi: 0xbf},\n\t// Block 0x7a, offset 0x3e4\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x44b5, lo: 0x80, hi: 0x8f},\n\t{value: 0x44d5, lo: 0x90, hi: 0x9f},\n\t{value: 0x44f5, lo: 0xa0, hi: 0xaf},\n\t{value: 0x44d5, lo: 0xb0, hi: 0xbf},\n\t// Block 0x7b, offset 0x3e9\n\t{value: 0x0000, lo: 0x0c},\n\t{value: 0x0008, lo: 0x80, hi: 0xa2},\n\t{value: 0x3008, lo: 0xa3, hi: 0xa4},\n\t{value: 0x3308, lo: 0xa5, hi: 0xa5},\n\t{value: 0x3008, lo: 0xa6, hi: 0xa7},\n\t{value: 0x3308, lo: 0xa8, hi: 0xa8},\n\t{value: 0x3008, lo: 0xa9, hi: 0xaa},\n\t{value: 0x0018, lo: 0xab, hi: 0xab},\n\t{value: 0x3008, lo: 0xac, hi: 0xac},\n\t{value: 0x3b08, lo: 0xad, hi: 0xad},\n\t{value: 0x0040, lo: 0xae, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xb9},\n\t{value: 0x0040, lo: 0xba, hi: 0xbf},\n\t// Block 0x7c, offset 0x3f6\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0008, lo: 0x80, hi: 0xa3},\n\t{value: 0x0040, lo: 0xa4, hi: 0xaf},\n\t{value: 0x0018, lo: 0xb0, hi: 0xbf},\n\t// Block 0x7d, offset 0x3fa\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0018, lo: 0x80, hi: 0x86},\n\t{value: 0x0040, lo: 0x87, hi: 0x8a},\n\t{value: 0x0018, lo: 0x8b, hi: 0xbb},\n\t{value: 0x0040, lo: 0xbc, hi: 0xbf},\n\t// Block 0x7e, offset 0x3ff\n\t{value: 0x0020, lo: 0x01},\n\t{value: 0x4515, lo: 0x80, hi: 0xbf},\n\t// Block 0x7f, offset 0x401\n\t{value: 0x0020, lo: 0x03},\n\t{value: 0x4d15, lo: 0x80, hi: 0x94},\n\t{value: 0x4ad5, lo: 0x95, hi: 0x95},\n\t{value: 0x4fb5, lo: 0x96, hi: 0xbf},\n\t// Block 0x80, offset 0x405\n\t{value: 0x0020, lo: 0x01},\n\t{value: 0x54f5, lo: 0x80, hi: 0xbf},\n\t// Block 0x81, offset 0x407\n\t{value: 0x0020, lo: 0x03},\n\t{value: 0x5cf5, lo: 0x80, hi: 0x84},\n\t{value: 0x5655, lo: 0x85, hi: 0x85},\n\t{value: 0x5d95, lo: 0x86, hi: 0xbf},\n\t// Block 0x82, offset 0x40b\n\t{value: 0x0020, lo: 0x08},\n\t{value: 0x6b55, lo: 0x80, hi: 0x8f},\n\t{value: 0x6d15, lo: 0x90, hi: 0x90},\n\t{value: 0x6d55, lo: 0x91, hi: 0xab},\n\t{value: 0x6ea1, lo: 0xac, hi: 0xac},\n\t{value: 0x70b5, lo: 0xad, hi: 0xad},\n\t{value: 0x0040, lo: 0xae, hi: 0xae},\n\t{value: 0x0040, lo: 0xaf, hi: 0xaf},\n\t{value: 0x70d5, lo: 0xb0, hi: 0xbf},\n\t// Block 0x83, offset 0x414\n\t{value: 0x0020, lo: 0x05},\n\t{value: 0x72d5, lo: 0x80, hi: 0xad},\n\t{value: 0x6535, lo: 0xae, hi: 0xae},\n\t{value: 0x7895, lo: 0xaf, hi: 0xb5},\n\t{value: 0x6f55, lo: 0xb6, hi: 0xb6},\n\t{value: 0x7975, lo: 0xb7, hi: 0xbf},\n\t// Block 0x84, offset 0x41a\n\t{value: 0x0028, lo: 0x03},\n\t{value: 0x7c21, lo: 0x80, hi: 0x82},\n\t{value: 0x7be1, lo: 0x83, hi: 0x83},\n\t{value: 0x7c99, lo: 0x84, hi: 0xbf},\n\t// Block 0x85, offset 0x41e\n\t{value: 0x0038, lo: 0x0f},\n\t{value: 0x9db1, lo: 0x80, hi: 0x83},\n\t{value: 0x9e59, lo: 0x84, hi: 0x85},\n\t{value: 0x9e91, lo: 0x86, hi: 0x87},\n\t{value: 0x9ec9, lo: 0x88, hi: 0x8f},\n\t{value: 0x0040, lo: 0x90, hi: 0x90},\n\t{value: 0x0040, lo: 0x91, hi: 0x91},\n\t{value: 0xa089, lo: 0x92, hi: 0x97},\n\t{value: 0xa1a1, lo: 0x98, hi: 0x9c},\n\t{value: 0xa281, lo: 0x9d, hi: 0xb3},\n\t{value: 0x9d41, lo: 0xb4, hi: 0xb4},\n\t{value: 0x9db1, lo: 0xb5, hi: 0xb5},\n\t{value: 0xa789, lo: 0xb6, hi: 0xbb},\n\t{value: 0xa869, lo: 0xbc, hi: 0xbc},\n\t{value: 0xa7f9, lo: 0xbd, hi: 0xbd},\n\t{value: 0xa8d9, lo: 0xbe, hi: 0xbf},\n\t// Block 0x86, offset 0x42e\n\t{value: 0x0000, lo: 0x09},\n\t{value: 0x0008, lo: 0x80, hi: 0x8b},\n\t{value: 0x0040, lo: 0x8c, hi: 0x8c},\n\t{value: 0x0008, lo: 0x8d, hi: 0xa6},\n\t{value: 0x0040, lo: 0xa7, hi: 0xa7},\n\t{value: 0x0008, lo: 0xa8, hi: 0xba},\n\t{value: 0x0040, lo: 0xbb, hi: 0xbb},\n\t{value: 0x0008, lo: 0xbc, hi: 0xbd},\n\t{value: 0x0040, lo: 0xbe, hi: 0xbe},\n\t{value: 0x0008, lo: 0xbf, hi: 0xbf},\n\t// Block 0x87, offset 0x438\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0008, lo: 0x80, hi: 0x8d},\n\t{value: 0x0040, lo: 0x8e, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x9d},\n\t{value: 0x0040, lo: 0x9e, hi: 0xbf},\n\t// Block 0x88, offset 0x43d\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0008, lo: 0x80, hi: 0xba},\n\t{value: 0x0040, lo: 0xbb, hi: 0xbf},\n\t// Block 0x89, offset 0x440\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0018, lo: 0x80, hi: 0x82},\n\t{value: 0x0040, lo: 0x83, hi: 0x86},\n\t{value: 0x0018, lo: 0x87, hi: 0xb3},\n\t{value: 0x0040, lo: 0xb4, hi: 0xb6},\n\t{value: 0x0018, lo: 0xb7, hi: 0xbf},\n\t// Block 0x8a, offset 0x446\n\t{value: 0x0000, lo: 0x06},\n\t{value: 0x0018, lo: 0x80, hi: 0x8e},\n\t{value: 0x0040, lo: 0x8f, hi: 0x8f},\n\t{value: 0x0018, lo: 0x90, hi: 0x9b},\n\t{value: 0x0040, lo: 0x9c, hi: 0x9f},\n\t{value: 0x0018, lo: 0xa0, hi: 0xa0},\n\t{value: 0x0040, lo: 0xa1, hi: 0xbf},\n\t// Block 0x8b, offset 0x44d\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0040, lo: 0x80, hi: 0x8f},\n\t{value: 0x0018, lo: 0x90, hi: 0xbc},\n\t{value: 0x3308, lo: 0xbd, hi: 0xbd},\n\t{value: 0x0040, lo: 0xbe, hi: 0xbf},\n\t// Block 0x8c, offset 0x452\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0008, lo: 0x80, hi: 0x9c},\n\t{value: 0x0040, lo: 0x9d, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xbf},\n\t// Block 0x8d, offset 0x456\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0008, lo: 0x80, hi: 0x90},\n\t{value: 0x0040, lo: 0x91, hi: 0x9f},\n\t{value: 0x3308, lo: 0xa0, hi: 0xa0},\n\t{value: 0x0018, lo: 0xa1, hi: 0xbb},\n\t{value: 0x0040, lo: 0xbc, hi: 0xbf},\n\t// Block 0x8e, offset 0x45c\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0008, lo: 0x80, hi: 0x9f},\n\t{value: 0x0018, lo: 0xa0, hi: 0xa3},\n\t{value: 0x0040, lo: 0xa4, hi: 0xac},\n\t{value: 0x0008, lo: 0xad, hi: 0xbf},\n\t// Block 0x8f, offset 0x461\n\t{value: 0x0000, lo: 0x08},\n\t{value: 0x0008, lo: 0x80, hi: 0x80},\n\t{value: 0x0018, lo: 0x81, hi: 0x81},\n\t{value: 0x0008, lo: 0x82, hi: 0x89},\n\t{value: 0x0018, lo: 0x8a, hi: 0x8a},\n\t{value: 0x0040, lo: 0x8b, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0xb5},\n\t{value: 0x3308, lo: 0xb6, hi: 0xba},\n\t{value: 0x0040, lo: 0xbb, hi: 0xbf},\n\t// Block 0x90, offset 0x46a\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0008, lo: 0x80, hi: 0x9d},\n\t{value: 0x0040, lo: 0x9e, hi: 0x9e},\n\t{value: 0x0018, lo: 0x9f, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xbf},\n\t// Block 0x91, offset 0x46f\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0008, lo: 0x80, hi: 0x83},\n\t{value: 0x0040, lo: 0x84, hi: 0x87},\n\t{value: 0x0008, lo: 0x88, hi: 0x8f},\n\t{value: 0x0018, lo: 0x90, hi: 0x95},\n\t{value: 0x0040, lo: 0x96, hi: 0xbf},\n\t// Block 0x92, offset 0x475\n\t{value: 0x0000, lo: 0x06},\n\t{value: 0xe145, lo: 0x80, hi: 0x87},\n\t{value: 0xe1c5, lo: 0x88, hi: 0x8f},\n\t{value: 0xe145, lo: 0x90, hi: 0x97},\n\t{value: 0x8ad5, lo: 0x98, hi: 0x9f},\n\t{value: 0x8aed, lo: 0xa0, hi: 0xa7},\n\t{value: 0x0008, lo: 0xa8, hi: 0xbf},\n\t// Block 0x93, offset 0x47c\n\t{value: 0x0000, lo: 0x06},\n\t{value: 0x0008, lo: 0x80, hi: 0x9d},\n\t{value: 0x0040, lo: 0x9e, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xa9},\n\t{value: 0x0040, lo: 0xaa, hi: 0xaf},\n\t{value: 0x8aed, lo: 0xb0, hi: 0xb7},\n\t{value: 0x8ad5, lo: 0xb8, hi: 0xbf},\n\t// Block 0x94, offset 0x483\n\t{value: 0x0000, lo: 0x06},\n\t{value: 0xe145, lo: 0x80, hi: 0x87},\n\t{value: 0xe1c5, lo: 0x88, hi: 0x8f},\n\t{value: 0xe145, lo: 0x90, hi: 0x93},\n\t{value: 0x0040, lo: 0x94, hi: 0x97},\n\t{value: 0x0008, lo: 0x98, hi: 0xbb},\n\t{value: 0x0040, lo: 0xbc, hi: 0xbf},\n\t// Block 0x95, offset 0x48a\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0008, lo: 0x80, hi: 0xa7},\n\t{value: 0x0040, lo: 0xa8, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xbf},\n\t// Block 0x96, offset 0x48e\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0008, lo: 0x80, hi: 0xa3},\n\t{value: 0x0040, lo: 0xa4, hi: 0xae},\n\t{value: 0x0018, lo: 0xaf, hi: 0xaf},\n\t{value: 0x0040, lo: 0xb0, hi: 0xbf},\n\t// Block 0x97, offset 0x493\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0008, lo: 0x80, hi: 0xb6},\n\t{value: 0x0040, lo: 0xb7, hi: 0xbf},\n\t// Block 0x98, offset 0x496\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0008, lo: 0x80, hi: 0x95},\n\t{value: 0x0040, lo: 0x96, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xa7},\n\t{value: 0x0040, lo: 0xa8, hi: 0xbf},\n\t// Block 0x99, offset 0x49b\n\t{value: 0x0000, lo: 0x0b},\n\t{value: 0x0808, lo: 0x80, hi: 0x85},\n\t{value: 0x0040, lo: 0x86, hi: 0x87},\n\t{value: 0x0808, lo: 0x88, hi: 0x88},\n\t{value: 0x0040, lo: 0x89, hi: 0x89},\n\t{value: 0x0808, lo: 0x8a, hi: 0xb5},\n\t{value: 0x0040, lo: 0xb6, hi: 0xb6},\n\t{value: 0x0808, lo: 0xb7, hi: 0xb8},\n\t{value: 0x0040, lo: 0xb9, hi: 0xbb},\n\t{value: 0x0808, lo: 0xbc, hi: 0xbc},\n\t{value: 0x0040, lo: 0xbd, hi: 0xbe},\n\t{value: 0x0808, lo: 0xbf, hi: 0xbf},\n\t// Block 0x9a, offset 0x4a7\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0808, lo: 0x80, hi: 0x95},\n\t{value: 0x0040, lo: 0x96, hi: 0x96},\n\t{value: 0x0818, lo: 0x97, hi: 0x9f},\n\t{value: 0x0808, lo: 0xa0, hi: 0xb6},\n\t{value: 0x0818, lo: 0xb7, hi: 0xbf},\n\t// Block 0x9b, offset 0x4ad\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0808, lo: 0x80, hi: 0x9e},\n\t{value: 0x0040, lo: 0x9f, hi: 0xa6},\n\t{value: 0x0818, lo: 0xa7, hi: 0xaf},\n\t{value: 0x0040, lo: 0xb0, hi: 0xbf},\n\t// Block 0x9c, offset 0x4b2\n\t{value: 0x0000, lo: 0x06},\n\t{value: 0x0040, lo: 0x80, hi: 0x9f},\n\t{value: 0x0808, lo: 0xa0, hi: 0xb2},\n\t{value: 0x0040, lo: 0xb3, hi: 0xb3},\n\t{value: 0x0808, lo: 0xb4, hi: 0xb5},\n\t{value: 0x0040, lo: 0xb6, hi: 0xba},\n\t{value: 0x0818, lo: 0xbb, hi: 0xbf},\n\t// Block 0x9d, offset 0x4b9\n\t{value: 0x0000, lo: 0x07},\n\t{value: 0x0808, lo: 0x80, hi: 0x95},\n\t{value: 0x0818, lo: 0x96, hi: 0x9b},\n\t{value: 0x0040, lo: 0x9c, hi: 0x9e},\n\t{value: 0x0018, lo: 0x9f, hi: 0x9f},\n\t{value: 0x0808, lo: 0xa0, hi: 0xb9},\n\t{value: 0x0040, lo: 0xba, hi: 0xbe},\n\t{value: 0x0818, lo: 0xbf, hi: 0xbf},\n\t// Block 0x9e, offset 0x4c1\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0808, lo: 0x80, hi: 0xb7},\n\t{value: 0x0040, lo: 0xb8, hi: 0xbb},\n\t{value: 0x0818, lo: 0xbc, hi: 0xbd},\n\t{value: 0x0808, lo: 0xbe, hi: 0xbf},\n\t// Block 0x9f, offset 0x4c6\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0818, lo: 0x80, hi: 0x8f},\n\t{value: 0x0040, lo: 0x90, hi: 0x91},\n\t{value: 0x0818, lo: 0x92, hi: 0xbf},\n\t// Block 0xa0, offset 0x4ca\n\t{value: 0x0000, lo: 0x0f},\n\t{value: 0x0808, lo: 0x80, hi: 0x80},\n\t{value: 0x3308, lo: 0x81, hi: 0x83},\n\t{value: 0x0040, lo: 0x84, hi: 0x84},\n\t{value: 0x3308, lo: 0x85, hi: 0x86},\n\t{value: 0x0040, lo: 0x87, hi: 0x8b},\n\t{value: 0x3308, lo: 0x8c, hi: 0x8f},\n\t{value: 0x0808, lo: 0x90, hi: 0x93},\n\t{value: 0x0040, lo: 0x94, hi: 0x94},\n\t{value: 0x0808, lo: 0x95, hi: 0x97},\n\t{value: 0x0040, lo: 0x98, hi: 0x98},\n\t{value: 0x0808, lo: 0x99, hi: 0xb3},\n\t{value: 0x0040, lo: 0xb4, hi: 0xb7},\n\t{value: 0x3308, lo: 0xb8, hi: 0xba},\n\t{value: 0x0040, lo: 0xbb, hi: 0xbe},\n\t{value: 0x3b08, lo: 0xbf, hi: 0xbf},\n\t// Block 0xa1, offset 0x4da\n\t{value: 0x0000, lo: 0x06},\n\t{value: 0x0818, lo: 0x80, hi: 0x87},\n\t{value: 0x0040, lo: 0x88, hi: 0x8f},\n\t{value: 0x0818, lo: 0x90, hi: 0x98},\n\t{value: 0x0040, lo: 0x99, hi: 0x9f},\n\t{value: 0x0808, lo: 0xa0, hi: 0xbc},\n\t{value: 0x0818, lo: 0xbd, hi: 0xbf},\n\t// Block 0xa2, offset 0x4e1\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0808, lo: 0x80, hi: 0x9c},\n\t{value: 0x0818, lo: 0x9d, hi: 0x9f},\n\t{value: 0x0040, lo: 0xa0, hi: 0xbf},\n\t// Block 0xa3, offset 0x4e5\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0808, lo: 0x80, hi: 0xb5},\n\t{value: 0x0040, lo: 0xb6, hi: 0xb8},\n\t{value: 0x0018, lo: 0xb9, hi: 0xbf},\n\t// Block 0xa4, offset 0x4e9\n\t{value: 0x0000, lo: 0x06},\n\t{value: 0x0808, lo: 0x80, hi: 0x95},\n\t{value: 0x0040, lo: 0x96, hi: 0x97},\n\t{value: 0x0818, lo: 0x98, hi: 0x9f},\n\t{value: 0x0808, lo: 0xa0, hi: 0xb2},\n\t{value: 0x0040, lo: 0xb3, hi: 0xb7},\n\t{value: 0x0818, lo: 0xb8, hi: 0xbf},\n\t// Block 0xa5, offset 0x4f0\n\t{value: 0x0000, lo: 0x01},\n\t{value: 0x0808, lo: 0x80, hi: 0xbf},\n\t// Block 0xa6, offset 0x4f2\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0808, lo: 0x80, hi: 0x88},\n\t{value: 0x0040, lo: 0x89, hi: 0xbf},\n\t// Block 0xa7, offset 0x4f5\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x03dd, lo: 0x80, hi: 0xb2},\n\t{value: 0x0040, lo: 0xb3, hi: 0xbf},\n\t// Block 0xa8, offset 0x4f8\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0808, lo: 0x80, hi: 0xb2},\n\t{value: 0x0040, lo: 0xb3, hi: 0xb9},\n\t{value: 0x0818, lo: 0xba, hi: 0xbf},\n\t// Block 0xa9, offset 0x4fc\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0040, lo: 0x80, hi: 0x9f},\n\t{value: 0x0818, lo: 0xa0, hi: 0xbe},\n\t{value: 0x0040, lo: 0xbf, hi: 0xbf},\n\t// Block 0xaa, offset 0x500\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x3008, lo: 0x80, hi: 0x80},\n\t{value: 0x3308, lo: 0x81, hi: 0x81},\n\t{value: 0x3008, lo: 0x82, hi: 0x82},\n\t{value: 0x0008, lo: 0x83, hi: 0xb7},\n\t{value: 0x3308, lo: 0xb8, hi: 0xbf},\n\t// Block 0xab, offset 0x506\n\t{value: 0x0000, lo: 0x08},\n\t{value: 0x3308, lo: 0x80, hi: 0x85},\n\t{value: 0x3b08, lo: 0x86, hi: 0x86},\n\t{value: 0x0018, lo: 0x87, hi: 0x8d},\n\t{value: 0x0040, lo: 0x8e, hi: 0x91},\n\t{value: 0x0018, lo: 0x92, hi: 0xa5},\n\t{value: 0x0008, lo: 0xa6, hi: 0xaf},\n\t{value: 0x0040, lo: 0xb0, hi: 0xbe},\n\t{value: 0x3b08, lo: 0xbf, hi: 0xbf},\n\t// Block 0xac, offset 0x50f\n\t{value: 0x0000, lo: 0x0b},\n\t{value: 0x3308, lo: 0x80, hi: 0x81},\n\t{value: 0x3008, lo: 0x82, hi: 0x82},\n\t{value: 0x0008, lo: 0x83, hi: 0xaf},\n\t{value: 0x3008, lo: 0xb0, hi: 0xb2},\n\t{value: 0x3308, lo: 0xb3, hi: 0xb6},\n\t{value: 0x3008, lo: 0xb7, hi: 0xb8},\n\t{value: 0x3b08, lo: 0xb9, hi: 0xb9},\n\t{value: 0x3308, lo: 0xba, hi: 0xba},\n\t{value: 0x0018, lo: 0xbb, hi: 0xbc},\n\t{value: 0x0340, lo: 0xbd, hi: 0xbd},\n\t{value: 0x0018, lo: 0xbe, hi: 0xbf},\n\t// Block 0xad, offset 0x51b\n\t{value: 0x0000, lo: 0x06},\n\t{value: 0x0018, lo: 0x80, hi: 0x81},\n\t{value: 0x0040, lo: 0x82, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0xa8},\n\t{value: 0x0040, lo: 0xa9, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xb9},\n\t{value: 0x0040, lo: 0xba, hi: 0xbf},\n\t// Block 0xae, offset 0x522\n\t{value: 0x0000, lo: 0x08},\n\t{value: 0x3308, lo: 0x80, hi: 0x82},\n\t{value: 0x0008, lo: 0x83, hi: 0xa6},\n\t{value: 0x3308, lo: 0xa7, hi: 0xab},\n\t{value: 0x3008, lo: 0xac, hi: 0xac},\n\t{value: 0x3308, lo: 0xad, hi: 0xb2},\n\t{value: 0x3b08, lo: 0xb3, hi: 0xb4},\n\t{value: 0x0040, lo: 0xb5, hi: 0xb5},\n\t{value: 0x0008, lo: 0xb6, hi: 0xbf},\n\t// Block 0xaf, offset 0x52b\n\t{value: 0x0000, lo: 0x07},\n\t{value: 0x0018, lo: 0x80, hi: 0x83},\n\t{value: 0x0040, lo: 0x84, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0xb2},\n\t{value: 0x3308, lo: 0xb3, hi: 0xb3},\n\t{value: 0x0018, lo: 0xb4, hi: 0xb5},\n\t{value: 0x0008, lo: 0xb6, hi: 0xb6},\n\t{value: 0x0040, lo: 0xb7, hi: 0xbf},\n\t// Block 0xb0, offset 0x533\n\t{value: 0x0000, lo: 0x06},\n\t{value: 0x3308, lo: 0x80, hi: 0x81},\n\t{value: 0x3008, lo: 0x82, hi: 0x82},\n\t{value: 0x0008, lo: 0x83, hi: 0xb2},\n\t{value: 0x3008, lo: 0xb3, hi: 0xb5},\n\t{value: 0x3308, lo: 0xb6, hi: 0xbe},\n\t{value: 0x3008, lo: 0xbf, hi: 0xbf},\n\t// Block 0xb1, offset 0x53a\n\t{value: 0x0000, lo: 0x0d},\n\t{value: 0x3808, lo: 0x80, hi: 0x80},\n\t{value: 0x0008, lo: 0x81, hi: 0x84},\n\t{value: 0x0018, lo: 0x85, hi: 0x89},\n\t{value: 0x3308, lo: 0x8a, hi: 0x8c},\n\t{value: 0x0018, lo: 0x8d, hi: 0x8d},\n\t{value: 0x0040, lo: 0x8e, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x9a},\n\t{value: 0x0018, lo: 0x9b, hi: 0x9b},\n\t{value: 0x0008, lo: 0x9c, hi: 0x9c},\n\t{value: 0x0018, lo: 0x9d, hi: 0x9f},\n\t{value: 0x0040, lo: 0xa0, hi: 0xa0},\n\t{value: 0x0018, lo: 0xa1, hi: 0xb4},\n\t{value: 0x0040, lo: 0xb5, hi: 0xbf},\n\t// Block 0xb2, offset 0x548\n\t{value: 0x0000, lo: 0x0c},\n\t{value: 0x0008, lo: 0x80, hi: 0x91},\n\t{value: 0x0040, lo: 0x92, hi: 0x92},\n\t{value: 0x0008, lo: 0x93, hi: 0xab},\n\t{value: 0x3008, lo: 0xac, hi: 0xae},\n\t{value: 0x3308, lo: 0xaf, hi: 0xb1},\n\t{value: 0x3008, lo: 0xb2, hi: 0xb3},\n\t{value: 0x3308, lo: 0xb4, hi: 0xb4},\n\t{value: 0x3808, lo: 0xb5, hi: 0xb5},\n\t{value: 0x3308, lo: 0xb6, hi: 0xb7},\n\t{value: 0x0018, lo: 0xb8, hi: 0xbd},\n\t{value: 0x3308, lo: 0xbe, hi: 0xbe},\n\t{value: 0x0040, lo: 0xbf, hi: 0xbf},\n\t// Block 0xb3, offset 0x555\n\t{value: 0x0000, lo: 0x0c},\n\t{value: 0x0008, lo: 0x80, hi: 0x86},\n\t{value: 0x0040, lo: 0x87, hi: 0x87},\n\t{value: 0x0008, lo: 0x88, hi: 0x88},\n\t{value: 0x0040, lo: 0x89, hi: 0x89},\n\t{value: 0x0008, lo: 0x8a, hi: 0x8d},\n\t{value: 0x0040, lo: 0x8e, hi: 0x8e},\n\t{value: 0x0008, lo: 0x8f, hi: 0x9d},\n\t{value: 0x0040, lo: 0x9e, hi: 0x9e},\n\t{value: 0x0008, lo: 0x9f, hi: 0xa8},\n\t{value: 0x0018, lo: 0xa9, hi: 0xa9},\n\t{value: 0x0040, lo: 0xaa, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xbf},\n\t// Block 0xb4, offset 0x562\n\t{value: 0x0000, lo: 0x08},\n\t{value: 0x0008, lo: 0x80, hi: 0x9e},\n\t{value: 0x3308, lo: 0x9f, hi: 0x9f},\n\t{value: 0x3008, lo: 0xa0, hi: 0xa2},\n\t{value: 0x3308, lo: 0xa3, hi: 0xa9},\n\t{value: 0x3b08, lo: 0xaa, hi: 0xaa},\n\t{value: 0x0040, lo: 0xab, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xb9},\n\t{value: 0x0040, lo: 0xba, hi: 0xbf},\n\t// Block 0xb5, offset 0x56b\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0008, lo: 0x80, hi: 0xb4},\n\t{value: 0x3008, lo: 0xb5, hi: 0xb7},\n\t{value: 0x3308, lo: 0xb8, hi: 0xbf},\n\t// Block 0xb6, offset 0x56f\n\t{value: 0x0000, lo: 0x0d},\n\t{value: 0x3008, lo: 0x80, hi: 0x81},\n\t{value: 0x3b08, lo: 0x82, hi: 0x82},\n\t{value: 0x3308, lo: 0x83, hi: 0x84},\n\t{value: 0x3008, lo: 0x85, hi: 0x85},\n\t{value: 0x3308, lo: 0x86, hi: 0x86},\n\t{value: 0x0008, lo: 0x87, hi: 0x8a},\n\t{value: 0x0018, lo: 0x8b, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x99},\n\t{value: 0x0040, lo: 0x9a, hi: 0x9a},\n\t{value: 0x0018, lo: 0x9b, hi: 0x9b},\n\t{value: 0x0040, lo: 0x9c, hi: 0x9c},\n\t{value: 0x0018, lo: 0x9d, hi: 0x9d},\n\t{value: 0x0040, lo: 0x9e, hi: 0xbf},\n\t// Block 0xb7, offset 0x57d\n\t{value: 0x0000, lo: 0x07},\n\t{value: 0x0008, lo: 0x80, hi: 0xaf},\n\t{value: 0x3008, lo: 0xb0, hi: 0xb2},\n\t{value: 0x3308, lo: 0xb3, hi: 0xb8},\n\t{value: 0x3008, lo: 0xb9, hi: 0xb9},\n\t{value: 0x3308, lo: 0xba, hi: 0xba},\n\t{value: 0x3008, lo: 0xbb, hi: 0xbe},\n\t{value: 0x3308, lo: 0xbf, hi: 0xbf},\n\t// Block 0xb8, offset 0x585\n\t{value: 0x0000, lo: 0x0a},\n\t{value: 0x3308, lo: 0x80, hi: 0x80},\n\t{value: 0x3008, lo: 0x81, hi: 0x81},\n\t{value: 0x3b08, lo: 0x82, hi: 0x82},\n\t{value: 0x3308, lo: 0x83, hi: 0x83},\n\t{value: 0x0008, lo: 0x84, hi: 0x85},\n\t{value: 0x0018, lo: 0x86, hi: 0x86},\n\t{value: 0x0008, lo: 0x87, hi: 0x87},\n\t{value: 0x0040, lo: 0x88, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x99},\n\t{value: 0x0040, lo: 0x9a, hi: 0xbf},\n\t// Block 0xb9, offset 0x590\n\t{value: 0x0000, lo: 0x08},\n\t{value: 0x0008, lo: 0x80, hi: 0xae},\n\t{value: 0x3008, lo: 0xaf, hi: 0xb1},\n\t{value: 0x3308, lo: 0xb2, hi: 0xb5},\n\t{value: 0x0040, lo: 0xb6, hi: 0xb7},\n\t{value: 0x3008, lo: 0xb8, hi: 0xbb},\n\t{value: 0x3308, lo: 0xbc, hi: 0xbd},\n\t{value: 0x3008, lo: 0xbe, hi: 0xbe},\n\t{value: 0x3b08, lo: 0xbf, hi: 0xbf},\n\t// Block 0xba, offset 0x599\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x3308, lo: 0x80, hi: 0x80},\n\t{value: 0x0018, lo: 0x81, hi: 0x97},\n\t{value: 0x0008, lo: 0x98, hi: 0x9b},\n\t{value: 0x3308, lo: 0x9c, hi: 0x9d},\n\t{value: 0x0040, lo: 0x9e, hi: 0xbf},\n\t// Block 0xbb, offset 0x59f\n\t{value: 0x0000, lo: 0x07},\n\t{value: 0x0008, lo: 0x80, hi: 0xaf},\n\t{value: 0x3008, lo: 0xb0, hi: 0xb2},\n\t{value: 0x3308, lo: 0xb3, hi: 0xba},\n\t{value: 0x3008, lo: 0xbb, hi: 0xbc},\n\t{value: 0x3308, lo: 0xbd, hi: 0xbd},\n\t{value: 0x3008, lo: 0xbe, hi: 0xbe},\n\t{value: 0x3b08, lo: 0xbf, hi: 0xbf},\n\t// Block 0xbc, offset 0x5a7\n\t{value: 0x0000, lo: 0x08},\n\t{value: 0x3308, lo: 0x80, hi: 0x80},\n\t{value: 0x0018, lo: 0x81, hi: 0x83},\n\t{value: 0x0008, lo: 0x84, hi: 0x84},\n\t{value: 0x0040, lo: 0x85, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x99},\n\t{value: 0x0040, lo: 0x9a, hi: 0x9f},\n\t{value: 0x0018, lo: 0xa0, hi: 0xac},\n\t{value: 0x0040, lo: 0xad, hi: 0xbf},\n\t// Block 0xbd, offset 0x5b0\n\t{value: 0x0000, lo: 0x09},\n\t{value: 0x0008, lo: 0x80, hi: 0xaa},\n\t{value: 0x3308, lo: 0xab, hi: 0xab},\n\t{value: 0x3008, lo: 0xac, hi: 0xac},\n\t{value: 0x3308, lo: 0xad, hi: 0xad},\n\t{value: 0x3008, lo: 0xae, hi: 0xaf},\n\t{value: 0x3308, lo: 0xb0, hi: 0xb5},\n\t{value: 0x3808, lo: 0xb6, hi: 0xb6},\n\t{value: 0x3308, lo: 0xb7, hi: 0xb7},\n\t{value: 0x0040, lo: 0xb8, hi: 0xbf},\n\t// Block 0xbe, offset 0x5ba\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0008, lo: 0x80, hi: 0x89},\n\t{value: 0x0040, lo: 0x8a, hi: 0xbf},\n\t// Block 0xbf, offset 0x5bd\n\t{value: 0x0000, lo: 0x0b},\n\t{value: 0x0008, lo: 0x80, hi: 0x99},\n\t{value: 0x0040, lo: 0x9a, hi: 0x9c},\n\t{value: 0x3308, lo: 0x9d, hi: 0x9f},\n\t{value: 0x3008, lo: 0xa0, hi: 0xa1},\n\t{value: 0x3308, lo: 0xa2, hi: 0xa5},\n\t{value: 0x3008, lo: 0xa6, hi: 0xa6},\n\t{value: 0x3308, lo: 0xa7, hi: 0xaa},\n\t{value: 0x3b08, lo: 0xab, hi: 0xab},\n\t{value: 0x0040, lo: 0xac, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xb9},\n\t{value: 0x0018, lo: 0xba, hi: 0xbf},\n\t// Block 0xc0, offset 0x5c9\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0040, lo: 0x80, hi: 0x9f},\n\t{value: 0x049d, lo: 0xa0, hi: 0xbf},\n\t// Block 0xc1, offset 0x5cc\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0008, lo: 0x80, hi: 0xa9},\n\t{value: 0x0018, lo: 0xaa, hi: 0xb2},\n\t{value: 0x0040, lo: 0xb3, hi: 0xbe},\n\t{value: 0x0008, lo: 0xbf, hi: 0xbf},\n\t// Block 0xc2, offset 0x5d1\n\t{value: 0x0000, lo: 0x0c},\n\t{value: 0x0008, lo: 0x80, hi: 0x80},\n\t{value: 0x3308, lo: 0x81, hi: 0x86},\n\t{value: 0x3008, lo: 0x87, hi: 0x88},\n\t{value: 0x3308, lo: 0x89, hi: 0x8a},\n\t{value: 0x0008, lo: 0x8b, hi: 0xb2},\n\t{value: 0x3308, lo: 0xb3, hi: 0xb3},\n\t{value: 0x3b08, lo: 0xb4, hi: 0xb4},\n\t{value: 0x3308, lo: 0xb5, hi: 0xb8},\n\t{value: 0x3008, lo: 0xb9, hi: 0xb9},\n\t{value: 0x0008, lo: 0xba, hi: 0xba},\n\t{value: 0x3308, lo: 0xbb, hi: 0xbe},\n\t{value: 0x0018, lo: 0xbf, hi: 0xbf},\n\t// Block 0xc3, offset 0x5de\n\t{value: 0x0000, lo: 0x08},\n\t{value: 0x0018, lo: 0x80, hi: 0x86},\n\t{value: 0x3b08, lo: 0x87, hi: 0x87},\n\t{value: 0x0040, lo: 0x88, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x90},\n\t{value: 0x3308, lo: 0x91, hi: 0x96},\n\t{value: 0x3008, lo: 0x97, hi: 0x98},\n\t{value: 0x3308, lo: 0x99, hi: 0x9b},\n\t{value: 0x0008, lo: 0x9c, hi: 0xbf},\n\t// Block 0xc4, offset 0x5e7\n\t{value: 0x0000, lo: 0x0b},\n\t{value: 0x0008, lo: 0x80, hi: 0x83},\n\t{value: 0x0040, lo: 0x84, hi: 0x85},\n\t{value: 0x0008, lo: 0x86, hi: 0x89},\n\t{value: 0x3308, lo: 0x8a, hi: 0x96},\n\t{value: 0x3008, lo: 0x97, hi: 0x97},\n\t{value: 0x3308, lo: 0x98, hi: 0x98},\n\t{value: 0x3b08, lo: 0x99, hi: 0x99},\n\t{value: 0x0018, lo: 0x9a, hi: 0x9c},\n\t{value: 0x0040, lo: 0x9d, hi: 0x9d},\n\t{value: 0x0018, lo: 0x9e, hi: 0xa2},\n\t{value: 0x0040, lo: 0xa3, hi: 0xbf},\n\t// Block 0xc5, offset 0x5f3\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0008, lo: 0x80, hi: 0xb8},\n\t{value: 0x0040, lo: 0xb9, hi: 0xbf},\n\t// Block 0xc6, offset 0x5f6\n\t{value: 0x0000, lo: 0x09},\n\t{value: 0x0008, lo: 0x80, hi: 0x88},\n\t{value: 0x0040, lo: 0x89, hi: 0x89},\n\t{value: 0x0008, lo: 0x8a, hi: 0xae},\n\t{value: 0x3008, lo: 0xaf, hi: 0xaf},\n\t{value: 0x3308, lo: 0xb0, hi: 0xb6},\n\t{value: 0x0040, lo: 0xb7, hi: 0xb7},\n\t{value: 0x3308, lo: 0xb8, hi: 0xbd},\n\t{value: 0x3008, lo: 0xbe, hi: 0xbe},\n\t{value: 0x3b08, lo: 0xbf, hi: 0xbf},\n\t// Block 0xc7, offset 0x600\n\t{value: 0x0000, lo: 0x08},\n\t{value: 0x0008, lo: 0x80, hi: 0x80},\n\t{value: 0x0018, lo: 0x81, hi: 0x85},\n\t{value: 0x0040, lo: 0x86, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x99},\n\t{value: 0x0018, lo: 0x9a, hi: 0xac},\n\t{value: 0x0040, lo: 0xad, hi: 0xaf},\n\t{value: 0x0018, lo: 0xb0, hi: 0xb1},\n\t{value: 0x0008, lo: 0xb2, hi: 0xbf},\n\t// Block 0xc8, offset 0x609\n\t{value: 0x0000, lo: 0x0b},\n\t{value: 0x0008, lo: 0x80, hi: 0x8f},\n\t{value: 0x0040, lo: 0x90, hi: 0x91},\n\t{value: 0x3308, lo: 0x92, hi: 0xa7},\n\t{value: 0x0040, lo: 0xa8, hi: 0xa8},\n\t{value: 0x3008, lo: 0xa9, hi: 0xa9},\n\t{value: 0x3308, lo: 0xaa, hi: 0xb0},\n\t{value: 0x3008, lo: 0xb1, hi: 0xb1},\n\t{value: 0x3308, lo: 0xb2, hi: 0xb3},\n\t{value: 0x3008, lo: 0xb4, hi: 0xb4},\n\t{value: 0x3308, lo: 0xb5, hi: 0xb6},\n\t{value: 0x0040, lo: 0xb7, hi: 0xbf},\n\t// Block 0xc9, offset 0x615\n\t{value: 0x0000, lo: 0x0c},\n\t{value: 0x0008, lo: 0x80, hi: 0x86},\n\t{value: 0x0040, lo: 0x87, hi: 0x87},\n\t{value: 0x0008, lo: 0x88, hi: 0x89},\n\t{value: 0x0040, lo: 0x8a, hi: 0x8a},\n\t{value: 0x0008, lo: 0x8b, hi: 0xb0},\n\t{value: 0x3308, lo: 0xb1, hi: 0xb6},\n\t{value: 0x0040, lo: 0xb7, hi: 0xb9},\n\t{value: 0x3308, lo: 0xba, hi: 0xba},\n\t{value: 0x0040, lo: 0xbb, hi: 0xbb},\n\t{value: 0x3308, lo: 0xbc, hi: 0xbd},\n\t{value: 0x0040, lo: 0xbe, hi: 0xbe},\n\t{value: 0x3308, lo: 0xbf, hi: 0xbf},\n\t// Block 0xca, offset 0x622\n\t{value: 0x0000, lo: 0x07},\n\t{value: 0x3308, lo: 0x80, hi: 0x83},\n\t{value: 0x3b08, lo: 0x84, hi: 0x85},\n\t{value: 0x0008, lo: 0x86, hi: 0x86},\n\t{value: 0x3308, lo: 0x87, hi: 0x87},\n\t{value: 0x0040, lo: 0x88, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x99},\n\t{value: 0x0040, lo: 0x9a, hi: 0xbf},\n\t// Block 0xcb, offset 0x62a\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0008, lo: 0x80, hi: 0x99},\n\t{value: 0x0040, lo: 0x9a, hi: 0xbf},\n\t// Block 0xcc, offset 0x62d\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0018, lo: 0x80, hi: 0xae},\n\t{value: 0x0040, lo: 0xaf, hi: 0xaf},\n\t{value: 0x0018, lo: 0xb0, hi: 0xb4},\n\t{value: 0x0040, lo: 0xb5, hi: 0xbf},\n\t// Block 0xcd, offset 0x632\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0008, lo: 0x80, hi: 0x83},\n\t{value: 0x0040, lo: 0x84, hi: 0xbf},\n\t// Block 0xce, offset 0x635\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0008, lo: 0x80, hi: 0xae},\n\t{value: 0x0040, lo: 0xaf, hi: 0xbf},\n\t// Block 0xcf, offset 0x638\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0008, lo: 0x80, hi: 0x86},\n\t{value: 0x0040, lo: 0x87, hi: 0xbf},\n\t// Block 0xd0, offset 0x63b\n\t{value: 0x0000, lo: 0x06},\n\t{value: 0x0008, lo: 0x80, hi: 0x9e},\n\t{value: 0x0040, lo: 0x9f, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xa9},\n\t{value: 0x0040, lo: 0xaa, hi: 0xad},\n\t{value: 0x0018, lo: 0xae, hi: 0xaf},\n\t{value: 0x0040, lo: 0xb0, hi: 0xbf},\n\t// Block 0xd1, offset 0x642\n\t{value: 0x0000, lo: 0x06},\n\t{value: 0x0040, lo: 0x80, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0xad},\n\t{value: 0x0040, lo: 0xae, hi: 0xaf},\n\t{value: 0x3308, lo: 0xb0, hi: 0xb4},\n\t{value: 0x0018, lo: 0xb5, hi: 0xb5},\n\t{value: 0x0040, lo: 0xb6, hi: 0xbf},\n\t// Block 0xd2, offset 0x649\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0008, lo: 0x80, hi: 0xaf},\n\t{value: 0x3308, lo: 0xb0, hi: 0xb6},\n\t{value: 0x0018, lo: 0xb7, hi: 0xbf},\n\t// Block 0xd3, offset 0x64d\n\t{value: 0x0000, lo: 0x0a},\n\t{value: 0x0008, lo: 0x80, hi: 0x83},\n\t{value: 0x0018, lo: 0x84, hi: 0x85},\n\t{value: 0x0040, lo: 0x86, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x99},\n\t{value: 0x0040, lo: 0x9a, hi: 0x9a},\n\t{value: 0x0018, lo: 0x9b, hi: 0xa1},\n\t{value: 0x0040, lo: 0xa2, hi: 0xa2},\n\t{value: 0x0008, lo: 0xa3, hi: 0xb7},\n\t{value: 0x0040, lo: 0xb8, hi: 0xbc},\n\t{value: 0x0008, lo: 0xbd, hi: 0xbf},\n\t// Block 0xd4, offset 0x658\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0008, lo: 0x80, hi: 0x8f},\n\t{value: 0x0040, lo: 0x90, hi: 0xbf},\n\t// Block 0xd5, offset 0x65b\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0008, lo: 0x80, hi: 0x84},\n\t{value: 0x0040, lo: 0x85, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x90},\n\t{value: 0x3008, lo: 0x91, hi: 0xbe},\n\t{value: 0x0040, lo: 0xbf, hi: 0xbf},\n\t// Block 0xd6, offset 0x661\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0040, lo: 0x80, hi: 0x8e},\n\t{value: 0x3308, lo: 0x8f, hi: 0x92},\n\t{value: 0x0008, lo: 0x93, hi: 0x9f},\n\t{value: 0x0040, lo: 0xa0, hi: 0xbf},\n\t// Block 0xd7, offset 0x666\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0040, lo: 0x80, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xa1},\n\t{value: 0x0040, lo: 0xa2, hi: 0xbf},\n\t// Block 0xd8, offset 0x66a\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0008, lo: 0x80, hi: 0xac},\n\t{value: 0x0040, lo: 0xad, hi: 0xbf},\n\t// Block 0xd9, offset 0x66d\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0008, lo: 0x80, hi: 0xb2},\n\t{value: 0x0040, lo: 0xb3, hi: 0xbf},\n\t// Block 0xda, offset 0x670\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0008, lo: 0x80, hi: 0x9e},\n\t{value: 0x0040, lo: 0x9f, hi: 0xbf},\n\t// Block 0xdb, offset 0x673\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0040, lo: 0x80, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xbf},\n\t// Block 0xdc, offset 0x676\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0008, lo: 0x80, hi: 0xbb},\n\t{value: 0x0040, lo: 0xbc, hi: 0xbf},\n\t// Block 0xdd, offset 0x679\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0008, lo: 0x80, hi: 0xaa},\n\t{value: 0x0040, lo: 0xab, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xbc},\n\t{value: 0x0040, lo: 0xbd, hi: 0xbf},\n\t// Block 0xde, offset 0x67e\n\t{value: 0x0000, lo: 0x09},\n\t{value: 0x0008, lo: 0x80, hi: 0x88},\n\t{value: 0x0040, lo: 0x89, hi: 0x8f},\n\t{value: 0x0008, lo: 0x90, hi: 0x99},\n\t{value: 0x0040, lo: 0x9a, hi: 0x9b},\n\t{value: 0x0018, lo: 0x9c, hi: 0x9c},\n\t{value: 0x3308, lo: 0x9d, hi: 0x9e},\n\t{value: 0x0018, lo: 0x9f, hi: 0x9f},\n\t{value: 0x03c0, lo: 0xa0, hi: 0xa3},\n\t{value: 0x0040, lo: 0xa4, hi: 0xbf},\n\t// Block 0xdf, offset 0x688\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0018, lo: 0x80, hi: 0xb5},\n\t{value: 0x0040, lo: 0xb6, hi: 0xbf},\n\t// Block 0xe0, offset 0x68b\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0018, lo: 0x80, hi: 0xa6},\n\t{value: 0x0040, lo: 0xa7, hi: 0xa8},\n\t{value: 0x0018, lo: 0xa9, hi: 0xbf},\n\t// Block 0xe1, offset 0x68f\n\t{value: 0x0000, lo: 0x0e},\n\t{value: 0x0018, lo: 0x80, hi: 0x9d},\n\t{value: 0xb5b9, lo: 0x9e, hi: 0x9e},\n\t{value: 0xb601, lo: 0x9f, hi: 0x9f},\n\t{value: 0xb649, lo: 0xa0, hi: 0xa0},\n\t{value: 0xb6b1, lo: 0xa1, hi: 0xa1},\n\t{value: 0xb719, lo: 0xa2, hi: 0xa2},\n\t{value: 0xb781, lo: 0xa3, hi: 0xa3},\n\t{value: 0xb7e9, lo: 0xa4, hi: 0xa4},\n\t{value: 0x3018, lo: 0xa5, hi: 0xa6},\n\t{value: 0x3318, lo: 0xa7, hi: 0xa9},\n\t{value: 0x0018, lo: 0xaa, hi: 0xac},\n\t{value: 0x3018, lo: 0xad, hi: 0xb2},\n\t{value: 0x0340, lo: 0xb3, hi: 0xba},\n\t{value: 0x3318, lo: 0xbb, hi: 0xbf},\n\t// Block 0xe2, offset 0x69e\n\t{value: 0x0000, lo: 0x0b},\n\t{value: 0x3318, lo: 0x80, hi: 0x82},\n\t{value: 0x0018, lo: 0x83, hi: 0x84},\n\t{value: 0x3318, lo: 0x85, hi: 0x8b},\n\t{value: 0x0018, lo: 0x8c, hi: 0xa9},\n\t{value: 0x3318, lo: 0xaa, hi: 0xad},\n\t{value: 0x0018, lo: 0xae, hi: 0xba},\n\t{value: 0xb851, lo: 0xbb, hi: 0xbb},\n\t{value: 0xb899, lo: 0xbc, hi: 0xbc},\n\t{value: 0xb8e1, lo: 0xbd, hi: 0xbd},\n\t{value: 0xb949, lo: 0xbe, hi: 0xbe},\n\t{value: 0xb9b1, lo: 0xbf, hi: 0xbf},\n\t// Block 0xe3, offset 0x6aa\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0xba19, lo: 0x80, hi: 0x80},\n\t{value: 0x0018, lo: 0x81, hi: 0xa8},\n\t{value: 0x0040, lo: 0xa9, hi: 0xbf},\n\t// Block 0xe4, offset 0x6ae\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0018, lo: 0x80, hi: 0x81},\n\t{value: 0x3318, lo: 0x82, hi: 0x84},\n\t{value: 0x0018, lo: 0x85, hi: 0x85},\n\t{value: 0x0040, lo: 0x86, hi: 0xbf},\n\t// Block 0xe5, offset 0x6b3\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0018, lo: 0x80, hi: 0x96},\n\t{value: 0x0040, lo: 0x97, hi: 0x9f},\n\t{value: 0x0018, lo: 0xa0, hi: 0xb1},\n\t{value: 0x0040, lo: 0xb2, hi: 0xbf},\n\t// Block 0xe6, offset 0x6b8\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x3308, lo: 0x80, hi: 0xb6},\n\t{value: 0x0018, lo: 0xb7, hi: 0xba},\n\t{value: 0x3308, lo: 0xbb, hi: 0xbf},\n\t// Block 0xe7, offset 0x6bc\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x3308, lo: 0x80, hi: 0xac},\n\t{value: 0x0018, lo: 0xad, hi: 0xb4},\n\t{value: 0x3308, lo: 0xb5, hi: 0xb5},\n\t{value: 0x0018, lo: 0xb6, hi: 0xbf},\n\t// Block 0xe8, offset 0x6c1\n\t{value: 0x0000, lo: 0x08},\n\t{value: 0x0018, lo: 0x80, hi: 0x83},\n\t{value: 0x3308, lo: 0x84, hi: 0x84},\n\t{value: 0x0018, lo: 0x85, hi: 0x8b},\n\t{value: 0x0040, lo: 0x8c, hi: 0x9a},\n\t{value: 0x3308, lo: 0x9b, hi: 0x9f},\n\t{value: 0x0040, lo: 0xa0, hi: 0xa0},\n\t{value: 0x3308, lo: 0xa1, hi: 0xaf},\n\t{value: 0x0040, lo: 0xb0, hi: 0xbf},\n\t// Block 0xe9, offset 0x6ca\n\t{value: 0x0000, lo: 0x0a},\n\t{value: 0x3308, lo: 0x80, hi: 0x86},\n\t{value: 0x0040, lo: 0x87, hi: 0x87},\n\t{value: 0x3308, lo: 0x88, hi: 0x98},\n\t{value: 0x0040, lo: 0x99, hi: 0x9a},\n\t{value: 0x3308, lo: 0x9b, hi: 0xa1},\n\t{value: 0x0040, lo: 0xa2, hi: 0xa2},\n\t{value: 0x3308, lo: 0xa3, hi: 0xa4},\n\t{value: 0x0040, lo: 0xa5, hi: 0xa5},\n\t{value: 0x3308, lo: 0xa6, hi: 0xaa},\n\t{value: 0x0040, lo: 0xab, hi: 0xbf},\n\t// Block 0xea, offset 0x6d5\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0808, lo: 0x80, hi: 0x84},\n\t{value: 0x0040, lo: 0x85, hi: 0x86},\n\t{value: 0x0818, lo: 0x87, hi: 0x8f},\n\t{value: 0x3308, lo: 0x90, hi: 0x96},\n\t{value: 0x0040, lo: 0x97, hi: 0xbf},\n\t// Block 0xeb, offset 0x6db\n\t{value: 0x0000, lo: 0x07},\n\t{value: 0x0a08, lo: 0x80, hi: 0x83},\n\t{value: 0x3308, lo: 0x84, hi: 0x8a},\n\t{value: 0x0040, lo: 0x8b, hi: 0x8f},\n\t{value: 0x0808, lo: 0x90, hi: 0x99},\n\t{value: 0x0040, lo: 0x9a, hi: 0x9d},\n\t{value: 0x0818, lo: 0x9e, hi: 0x9f},\n\t{value: 0x0040, lo: 0xa0, hi: 0xbf},\n\t// Block 0xec, offset 0x6e3\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0040, lo: 0x80, hi: 0xaf},\n\t{value: 0x0018, lo: 0xb0, hi: 0xb1},\n\t{value: 0x0040, lo: 0xb2, hi: 0xbf},\n\t// Block 0xed, offset 0x6e7\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0018, lo: 0x80, hi: 0xab},\n\t{value: 0x0040, lo: 0xac, hi: 0xaf},\n\t{value: 0x0018, lo: 0xb0, hi: 0xbf},\n\t// Block 0xee, offset 0x6eb\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0018, lo: 0x80, hi: 0x93},\n\t{value: 0x0040, lo: 0x94, hi: 0x9f},\n\t{value: 0x0018, lo: 0xa0, hi: 0xae},\n\t{value: 0x0040, lo: 0xaf, hi: 0xb0},\n\t{value: 0x0018, lo: 0xb1, hi: 0xbf},\n\t// Block 0xef, offset 0x6f1\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0040, lo: 0x80, hi: 0x80},\n\t{value: 0x0018, lo: 0x81, hi: 0x8f},\n\t{value: 0x0040, lo: 0x90, hi: 0x90},\n\t{value: 0x0018, lo: 0x91, hi: 0xb5},\n\t{value: 0x0040, lo: 0xb6, hi: 0xbf},\n\t// Block 0xf0, offset 0x6f7\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0018, lo: 0x80, hi: 0x8f},\n\t{value: 0xc1c1, lo: 0x90, hi: 0x90},\n\t{value: 0x0018, lo: 0x91, hi: 0xac},\n\t{value: 0x0040, lo: 0xad, hi: 0xbf},\n\t// Block 0xf1, offset 0x6fc\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0040, lo: 0x80, hi: 0xa5},\n\t{value: 0x0018, lo: 0xa6, hi: 0xbf},\n\t// Block 0xf2, offset 0x6ff\n\t{value: 0x0000, lo: 0x0f},\n\t{value: 0xc7e9, lo: 0x80, hi: 0x80},\n\t{value: 0xc839, lo: 0x81, hi: 0x81},\n\t{value: 0xc889, lo: 0x82, hi: 0x82},\n\t{value: 0xc8d9, lo: 0x83, hi: 0x83},\n\t{value: 0xc929, lo: 0x84, hi: 0x84},\n\t{value: 0xc979, lo: 0x85, hi: 0x85},\n\t{value: 0xc9c9, lo: 0x86, hi: 0x86},\n\t{value: 0xca19, lo: 0x87, hi: 0x87},\n\t{value: 0xca69, lo: 0x88, hi: 0x88},\n\t{value: 0x0040, lo: 0x89, hi: 0x8f},\n\t{value: 0xcab9, lo: 0x90, hi: 0x90},\n\t{value: 0xcad9, lo: 0x91, hi: 0x91},\n\t{value: 0x0040, lo: 0x92, hi: 0x9f},\n\t{value: 0x0018, lo: 0xa0, hi: 0xa5},\n\t{value: 0x0040, lo: 0xa6, hi: 0xbf},\n\t// Block 0xf3, offset 0x70f\n\t{value: 0x0000, lo: 0x06},\n\t{value: 0x0018, lo: 0x80, hi: 0x94},\n\t{value: 0x0040, lo: 0x95, hi: 0x9f},\n\t{value: 0x0018, lo: 0xa0, hi: 0xac},\n\t{value: 0x0040, lo: 0xad, hi: 0xaf},\n\t{value: 0x0018, lo: 0xb0, hi: 0xb8},\n\t{value: 0x0040, lo: 0xb9, hi: 0xbf},\n\t// Block 0xf4, offset 0x716\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0018, lo: 0x80, hi: 0xb3},\n\t{value: 0x0040, lo: 0xb4, hi: 0xbf},\n\t// Block 0xf5, offset 0x719\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0018, lo: 0x80, hi: 0x94},\n\t{value: 0x0040, lo: 0x95, hi: 0xbf},\n\t// Block 0xf6, offset 0x71c\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0018, lo: 0x80, hi: 0x8b},\n\t{value: 0x0040, lo: 0x8c, hi: 0x8f},\n\t{value: 0x0018, lo: 0x90, hi: 0xbf},\n\t// Block 0xf7, offset 0x720\n\t{value: 0x0000, lo: 0x05},\n\t{value: 0x0018, lo: 0x80, hi: 0x87},\n\t{value: 0x0040, lo: 0x88, hi: 0x8f},\n\t{value: 0x0018, lo: 0x90, hi: 0x99},\n\t{value: 0x0040, lo: 0x9a, hi: 0x9f},\n\t{value: 0x0018, lo: 0xa0, hi: 0xbf},\n\t// Block 0xf8, offset 0x726\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0018, lo: 0x80, hi: 0x87},\n\t{value: 0x0040, lo: 0x88, hi: 0x8f},\n\t{value: 0x0018, lo: 0x90, hi: 0xad},\n\t{value: 0x0040, lo: 0xae, hi: 0xbf},\n\t// Block 0xf9, offset 0x72b\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0018, lo: 0x80, hi: 0x8b},\n\t{value: 0x0040, lo: 0x8c, hi: 0x8f},\n\t{value: 0x0018, lo: 0x90, hi: 0xbe},\n\t{value: 0x0040, lo: 0xbf, hi: 0xbf},\n\t// Block 0xfa, offset 0x730\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0018, lo: 0x80, hi: 0x8c},\n\t{value: 0x0040, lo: 0x8d, hi: 0x8f},\n\t{value: 0x0018, lo: 0x90, hi: 0xab},\n\t{value: 0x0040, lo: 0xac, hi: 0xbf},\n\t// Block 0xfb, offset 0x735\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0018, lo: 0x80, hi: 0x97},\n\t{value: 0x0040, lo: 0x98, hi: 0xbf},\n\t// Block 0xfc, offset 0x738\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0018, lo: 0x80, hi: 0x80},\n\t{value: 0x0040, lo: 0x81, hi: 0x8f},\n\t{value: 0x0018, lo: 0x90, hi: 0xa6},\n\t{value: 0x0040, lo: 0xa7, hi: 0xbf},\n\t// Block 0xfd, offset 0x73d\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0008, lo: 0x80, hi: 0x96},\n\t{value: 0x0040, lo: 0x97, hi: 0xbf},\n\t// Block 0xfe, offset 0x740\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0008, lo: 0x80, hi: 0xb4},\n\t{value: 0x0040, lo: 0xb5, hi: 0xbf},\n\t// Block 0xff, offset 0x743\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0008, lo: 0x80, hi: 0x9d},\n\t{value: 0x0040, lo: 0x9e, hi: 0x9f},\n\t{value: 0x0008, lo: 0xa0, hi: 0xbf},\n\t// Block 0x100, offset 0x747\n\t{value: 0x0000, lo: 0x03},\n\t{value: 0x0008, lo: 0x80, hi: 0xa1},\n\t{value: 0x0040, lo: 0xa2, hi: 0xaf},\n\t{value: 0x0008, lo: 0xb0, hi: 0xbf},\n\t// Block 0x101, offset 0x74b\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x0008, lo: 0x80, hi: 0xa0},\n\t{value: 0x0040, lo: 0xa1, hi: 0xbf},\n\t// Block 0x102, offset 0x74e\n\t{value: 0x0020, lo: 0x0f},\n\t{value: 0xdeb9, lo: 0x80, hi: 0x89},\n\t{value: 0x8dfd, lo: 0x8a, hi: 0x8a},\n\t{value: 0xdff9, lo: 0x8b, hi: 0x9c},\n\t{value: 0x8e1d, lo: 0x9d, hi: 0x9d},\n\t{value: 0xe239, lo: 0x9e, hi: 0xa2},\n\t{value: 0x8e3d, lo: 0xa3, hi: 0xa3},\n\t{value: 0xe2d9, lo: 0xa4, hi: 0xab},\n\t{value: 0x7ed5, lo: 0xac, hi: 0xac},\n\t{value: 0xe3d9, lo: 0xad, hi: 0xaf},\n\t{value: 0x8e5d, lo: 0xb0, hi: 0xb0},\n\t{value: 0xe439, lo: 0xb1, hi: 0xb6},\n\t{value: 0x8e7d, lo: 0xb7, hi: 0xb9},\n\t{value: 0xe4f9, lo: 0xba, hi: 0xba},\n\t{value: 0x8edd, lo: 0xbb, hi: 0xbb},\n\t{value: 0xe519, lo: 0xbc, hi: 0xbf},\n\t// Block 0x103, offset 0x75e\n\t{value: 0x0020, lo: 0x10},\n\t{value: 0x937d, lo: 0x80, hi: 0x80},\n\t{value: 0xf099, lo: 0x81, hi: 0x86},\n\t{value: 0x939d, lo: 0x87, hi: 0x8a},\n\t{value: 0xd9f9, lo: 0x8b, hi: 0x8b},\n\t{value: 0xf159, lo: 0x8c, hi: 0x96},\n\t{value: 0x941d, lo: 0x97, hi: 0x97},\n\t{value: 0xf2b9, lo: 0x98, hi: 0xa3},\n\t{value: 0x943d, lo: 0xa4, hi: 0xa6},\n\t{value: 0xf439, lo: 0xa7, hi: 0xaa},\n\t{value: 0x949d, lo: 0xab, hi: 0xab},\n\t{value: 0xf4b9, lo: 0xac, hi: 0xac},\n\t{value: 0x94bd, lo: 0xad, hi: 0xad},\n\t{value: 0xf4d9, lo: 0xae, hi: 0xaf},\n\t{value: 0x94dd, lo: 0xb0, hi: 0xb1},\n\t{value: 0xf519, lo: 0xb2, hi: 0xbe},\n\t{value: 0x2040, lo: 0xbf, hi: 0xbf},\n\t// Block 0x104, offset 0x76f\n\t{value: 0x0000, lo: 0x04},\n\t{value: 0x0040, lo: 0x80, hi: 0x80},\n\t{value: 0x0340, lo: 0x81, hi: 0x81},\n\t{value: 0x0040, lo: 0x82, hi: 0x9f},\n\t{value: 0x0340, lo: 0xa0, hi: 0xbf},\n\t// Block 0x105, offset 0x774\n\t{value: 0x0000, lo: 0x01},\n\t{value: 0x0340, lo: 0x80, hi: 0xbf},\n\t// Block 0x106, offset 0x776\n\t{value: 0x0000, lo: 0x01},\n\t{value: 0x33c0, lo: 0x80, hi: 0xbf},\n\t// Block 0x107, offset 0x778\n\t{value: 0x0000, lo: 0x02},\n\t{value: 0x33c0, lo: 0x80, hi: 0xaf},\n\t{value: 0x0040, lo: 0xb0, hi: 0xbf},\n}\n\n// Total table size 42115 bytes (41KiB); checksum: F4A1FA4E\n"
  },
  {
    "path": "vendor/golang.org/x/net/idna/trie.go",
    "content": "// Code generated by running \"go generate\" in golang.org/x/text. DO NOT EDIT.\n\n// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage idna\n\n// appendMapping appends the mapping for the respective rune. isMapped must be\n// true. A mapping is a categorization of a rune as defined in UTS #46.\nfunc (c info) appendMapping(b []byte, s string) []byte {\n\tindex := int(c >> indexShift)\n\tif c&xorBit == 0 {\n\t\ts := mappings[index:]\n\t\treturn append(b, s[1:s[0]+1]...)\n\t}\n\tb = append(b, s...)\n\tif c&inlineXOR == inlineXOR {\n\t\t// TODO: support and handle two-byte inline masks\n\t\tb[len(b)-1] ^= byte(index)\n\t} else {\n\t\tfor p := len(b) - int(xorData[index]); p < len(b); p++ {\n\t\t\tindex++\n\t\t\tb[p] ^= xorData[index]\n\t\t}\n\t}\n\treturn b\n}\n\n// Sparse block handling code.\n\ntype valueRange struct {\n\tvalue  uint16 // header: value:stride\n\tlo, hi byte   // header: lo:n\n}\n\ntype sparseBlocks struct {\n\tvalues []valueRange\n\toffset []uint16\n}\n\nvar idnaSparse = sparseBlocks{\n\tvalues: idnaSparseValues[:],\n\toffset: idnaSparseOffset[:],\n}\n\n// Don't use newIdnaTrie to avoid unconditional linking in of the table.\nvar trie = &idnaTrie{}\n\n// lookup determines the type of block n and looks up the value for b.\n// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block\n// is a list of ranges with an accompanying value. Given a matching range r,\n// the value for b is by r.value + (b - r.lo) * stride.\nfunc (t *sparseBlocks) lookup(n uint32, b byte) uint16 {\n\toffset := t.offset[n]\n\theader := t.values[offset]\n\tlo := offset + 1\n\thi := lo + uint16(header.lo)\n\tfor lo < hi {\n\t\tm := lo + (hi-lo)/2\n\t\tr := t.values[m]\n\t\tif r.lo <= b && b <= r.hi {\n\t\t\treturn r.value + uint16(b-r.lo)*header.value\n\t\t}\n\t\tif b < r.lo {\n\t\t\thi = m\n\t\t} else {\n\t\t\tlo = m + 1\n\t\t}\n\t}\n\treturn 0\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/idna/trieval.go",
    "content": "// Code generated by running \"go generate\" in golang.org/x/text. DO NOT EDIT.\n\npackage idna\n\n// This file contains definitions for interpreting the trie value of the idna\n// trie generated by \"go run gen*.go\". It is shared by both the generator\n// program and the resultant package. Sharing is achieved by the generator\n// copying gen_trieval.go to trieval.go and changing what's above this comment.\n\n// info holds information from the IDNA mapping table for a single rune. It is\n// the value returned by a trie lookup. In most cases, all information fits in\n// a 16-bit value. For mappings, this value may contain an index into a slice\n// with the mapped string. Such mappings can consist of the actual mapped value\n// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the\n// input rune. This technique is used by the cases packages and reduces the\n// table size significantly.\n//\n// The per-rune values have the following format:\n//\n//   if mapped {\n//     if inlinedXOR {\n//       15..13 inline XOR marker\n//       12..11 unused\n//       10..3  inline XOR mask\n//     } else {\n//       15..3  index into xor or mapping table\n//     }\n//   } else {\n//       15..14 unused\n//       13     mayNeedNorm\n//       12..11 attributes\n//       10..8  joining type\n//        7..3  category type\n//   }\n//      2  use xor pattern\n//   1..0  mapped category\n//\n// See the definitions below for a more detailed description of the various\n// bits.\ntype info uint16\n\nconst (\n\tcatSmallMask = 0x3\n\tcatBigMask   = 0xF8\n\tindexShift   = 3\n\txorBit       = 0x4    // interpret the index as an xor pattern\n\tinlineXOR    = 0xE000 // These bits are set if the XOR pattern is inlined.\n\n\tjoinShift = 8\n\tjoinMask  = 0x07\n\n\t// Attributes\n\tattributesMask = 0x1800\n\tviramaModifier = 0x1800\n\tmodifier       = 0x1000\n\trtl            = 0x0800\n\n\tmayNeedNorm = 0x2000\n)\n\n// A category corresponds to a category defined in the IDNA mapping table.\ntype category uint16\n\nconst (\n\tunknown              category = 0 // not currently defined in unicode.\n\tmapped               category = 1\n\tdisallowedSTD3Mapped category = 2\n\tdeviation            category = 3\n)\n\nconst (\n\tvalid               category = 0x08\n\tvalidNV8            category = 0x18\n\tvalidXV8            category = 0x28\n\tdisallowed          category = 0x40\n\tdisallowedSTD3Valid category = 0x80\n\tignored             category = 0xC0\n)\n\n// join types and additional rune information\nconst (\n\tjoiningL = (iota + 1)\n\tjoiningD\n\tjoiningT\n\tjoiningR\n\n\t//the following types are derived during processing\n\tjoinZWJ\n\tjoinZWNJ\n\tjoinVirama\n\tnumJoinTypes\n)\n\nfunc (c info) isMapped() bool {\n\treturn c&0x3 != 0\n}\n\nfunc (c info) category() category {\n\tsmall := c & catSmallMask\n\tif small != 0 {\n\t\treturn category(small)\n\t}\n\treturn category(c & catBigMask)\n}\n\nfunc (c info) joinType() info {\n\tif c.isMapped() {\n\t\treturn 0\n\t}\n\treturn (c >> joinShift) & joinMask\n}\n\nfunc (c info) isModifier() bool {\n\treturn c&(modifier|catSmallMask) == modifier\n}\n\nfunc (c info) isViramaModifier() bool {\n\treturn c&(attributesMask|catSmallMask) == viramaModifier\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/iana/const.go",
    "content": "// go generate gen.go\n// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\npackage iana // import \"golang.org/x/net/internal/iana\"\n\n// Differentiated Services Field Codepoints (DSCP), Updated: 2017-05-12\nconst (\n\tDiffServCS0        = 0x0  // CS0\n\tDiffServCS1        = 0x20 // CS1\n\tDiffServCS2        = 0x40 // CS2\n\tDiffServCS3        = 0x60 // CS3\n\tDiffServCS4        = 0x80 // CS4\n\tDiffServCS5        = 0xa0 // CS5\n\tDiffServCS6        = 0xc0 // CS6\n\tDiffServCS7        = 0xe0 // CS7\n\tDiffServAF11       = 0x28 // AF11\n\tDiffServAF12       = 0x30 // AF12\n\tDiffServAF13       = 0x38 // AF13\n\tDiffServAF21       = 0x48 // AF21\n\tDiffServAF22       = 0x50 // AF22\n\tDiffServAF23       = 0x58 // AF23\n\tDiffServAF31       = 0x68 // AF31\n\tDiffServAF32       = 0x70 // AF32\n\tDiffServAF33       = 0x78 // AF33\n\tDiffServAF41       = 0x88 // AF41\n\tDiffServAF42       = 0x90 // AF42\n\tDiffServAF43       = 0x98 // AF43\n\tDiffServEF         = 0xb8 // EF\n\tDiffServVOICEADMIT = 0xb0 // VOICE-ADMIT\n)\n\n// IPv4 TOS Byte and IPv6 Traffic Class Octet, Updated: 2001-09-06\nconst (\n\tNotECNTransport       = 0x0 // Not-ECT (Not ECN-Capable Transport)\n\tECNTransport1         = 0x1 // ECT(1) (ECN-Capable Transport(1))\n\tECNTransport0         = 0x2 // ECT(0) (ECN-Capable Transport(0))\n\tCongestionExperienced = 0x3 // CE (Congestion Experienced)\n)\n\n// Protocol Numbers, Updated: 2016-06-22\nconst (\n\tProtocolIP             = 0   // IPv4 encapsulation, pseudo protocol number\n\tProtocolHOPOPT         = 0   // IPv6 Hop-by-Hop Option\n\tProtocolICMP           = 1   // Internet Control Message\n\tProtocolIGMP           = 2   // Internet Group Management\n\tProtocolGGP            = 3   // Gateway-to-Gateway\n\tProtocolIPv4           = 4   // IPv4 encapsulation\n\tProtocolST             = 5   // Stream\n\tProtocolTCP            = 6   // Transmission Control\n\tProtocolCBT            = 7   // CBT\n\tProtocolEGP            = 8   // Exterior Gateway Protocol\n\tProtocolIGP            = 9   // any private interior gateway (used by Cisco for their IGRP)\n\tProtocolBBNRCCMON      = 10  // BBN RCC Monitoring\n\tProtocolNVPII          = 11  // Network Voice Protocol\n\tProtocolPUP            = 12  // PUP\n\tProtocolEMCON          = 14  // EMCON\n\tProtocolXNET           = 15  // Cross Net Debugger\n\tProtocolCHAOS          = 16  // Chaos\n\tProtocolUDP            = 17  // User Datagram\n\tProtocolMUX            = 18  // Multiplexing\n\tProtocolDCNMEAS        = 19  // DCN Measurement Subsystems\n\tProtocolHMP            = 20  // Host Monitoring\n\tProtocolPRM            = 21  // Packet Radio Measurement\n\tProtocolXNSIDP         = 22  // XEROX NS IDP\n\tProtocolTRUNK1         = 23  // Trunk-1\n\tProtocolTRUNK2         = 24  // Trunk-2\n\tProtocolLEAF1          = 25  // Leaf-1\n\tProtocolLEAF2          = 26  // Leaf-2\n\tProtocolRDP            = 27  // Reliable Data Protocol\n\tProtocolIRTP           = 28  // Internet Reliable Transaction\n\tProtocolISOTP4         = 29  // ISO Transport Protocol Class 4\n\tProtocolNETBLT         = 30  // Bulk Data Transfer Protocol\n\tProtocolMFENSP         = 31  // MFE Network Services Protocol\n\tProtocolMERITINP       = 32  // MERIT Internodal Protocol\n\tProtocolDCCP           = 33  // Datagram Congestion Control Protocol\n\tProtocol3PC            = 34  // Third Party Connect Protocol\n\tProtocolIDPR           = 35  // Inter-Domain Policy Routing Protocol\n\tProtocolXTP            = 36  // XTP\n\tProtocolDDP            = 37  // Datagram Delivery Protocol\n\tProtocolIDPRCMTP       = 38  // IDPR Control Message Transport Proto\n\tProtocolTPPP           = 39  // TP++ Transport Protocol\n\tProtocolIL             = 40  // IL Transport Protocol\n\tProtocolIPv6           = 41  // IPv6 encapsulation\n\tProtocolSDRP           = 42  // Source Demand Routing Protocol\n\tProtocolIPv6Route      = 43  // Routing Header for IPv6\n\tProtocolIPv6Frag       = 44  // Fragment Header for IPv6\n\tProtocolIDRP           = 45  // Inter-Domain Routing Protocol\n\tProtocolRSVP           = 46  // Reservation Protocol\n\tProtocolGRE            = 47  // Generic Routing Encapsulation\n\tProtocolDSR            = 48  // Dynamic Source Routing Protocol\n\tProtocolBNA            = 49  // BNA\n\tProtocolESP            = 50  // Encap Security Payload\n\tProtocolAH             = 51  // Authentication Header\n\tProtocolINLSP          = 52  // Integrated Net Layer Security  TUBA\n\tProtocolNARP           = 54  // NBMA Address Resolution Protocol\n\tProtocolMOBILE         = 55  // IP Mobility\n\tProtocolTLSP           = 56  // Transport Layer Security Protocol using Kryptonet key management\n\tProtocolSKIP           = 57  // SKIP\n\tProtocolIPv6ICMP       = 58  // ICMP for IPv6\n\tProtocolIPv6NoNxt      = 59  // No Next Header for IPv6\n\tProtocolIPv6Opts       = 60  // Destination Options for IPv6\n\tProtocolCFTP           = 62  // CFTP\n\tProtocolSATEXPAK       = 64  // SATNET and Backroom EXPAK\n\tProtocolKRYPTOLAN      = 65  // Kryptolan\n\tProtocolRVD            = 66  // MIT Remote Virtual Disk Protocol\n\tProtocolIPPC           = 67  // Internet Pluribus Packet Core\n\tProtocolSATMON         = 69  // SATNET Monitoring\n\tProtocolVISA           = 70  // VISA Protocol\n\tProtocolIPCV           = 71  // Internet Packet Core Utility\n\tProtocolCPNX           = 72  // Computer Protocol Network Executive\n\tProtocolCPHB           = 73  // Computer Protocol Heart Beat\n\tProtocolWSN            = 74  // Wang Span Network\n\tProtocolPVP            = 75  // Packet Video Protocol\n\tProtocolBRSATMON       = 76  // Backroom SATNET Monitoring\n\tProtocolSUNND          = 77  // SUN ND PROTOCOL-Temporary\n\tProtocolWBMON          = 78  // WIDEBAND Monitoring\n\tProtocolWBEXPAK        = 79  // WIDEBAND EXPAK\n\tProtocolISOIP          = 80  // ISO Internet Protocol\n\tProtocolVMTP           = 81  // VMTP\n\tProtocolSECUREVMTP     = 82  // SECURE-VMTP\n\tProtocolVINES          = 83  // VINES\n\tProtocolTTP            = 84  // Transaction Transport Protocol\n\tProtocolIPTM           = 84  // Internet Protocol Traffic Manager\n\tProtocolNSFNETIGP      = 85  // NSFNET-IGP\n\tProtocolDGP            = 86  // Dissimilar Gateway Protocol\n\tProtocolTCF            = 87  // TCF\n\tProtocolEIGRP          = 88  // EIGRP\n\tProtocolOSPFIGP        = 89  // OSPFIGP\n\tProtocolSpriteRPC      = 90  // Sprite RPC Protocol\n\tProtocolLARP           = 91  // Locus Address Resolution Protocol\n\tProtocolMTP            = 92  // Multicast Transport Protocol\n\tProtocolAX25           = 93  // AX.25 Frames\n\tProtocolIPIP           = 94  // IP-within-IP Encapsulation Protocol\n\tProtocolSCCSP          = 96  // Semaphore Communications Sec. Pro.\n\tProtocolETHERIP        = 97  // Ethernet-within-IP Encapsulation\n\tProtocolENCAP          = 98  // Encapsulation Header\n\tProtocolGMTP           = 100 // GMTP\n\tProtocolIFMP           = 101 // Ipsilon Flow Management Protocol\n\tProtocolPNNI           = 102 // PNNI over IP\n\tProtocolPIM            = 103 // Protocol Independent Multicast\n\tProtocolARIS           = 104 // ARIS\n\tProtocolSCPS           = 105 // SCPS\n\tProtocolQNX            = 106 // QNX\n\tProtocolAN             = 107 // Active Networks\n\tProtocolIPComp         = 108 // IP Payload Compression Protocol\n\tProtocolSNP            = 109 // Sitara Networks Protocol\n\tProtocolCompaqPeer     = 110 // Compaq Peer Protocol\n\tProtocolIPXinIP        = 111 // IPX in IP\n\tProtocolVRRP           = 112 // Virtual Router Redundancy Protocol\n\tProtocolPGM            = 113 // PGM Reliable Transport Protocol\n\tProtocolL2TP           = 115 // Layer Two Tunneling Protocol\n\tProtocolDDX            = 116 // D-II Data Exchange (DDX)\n\tProtocolIATP           = 117 // Interactive Agent Transfer Protocol\n\tProtocolSTP            = 118 // Schedule Transfer Protocol\n\tProtocolSRP            = 119 // SpectraLink Radio Protocol\n\tProtocolUTI            = 120 // UTI\n\tProtocolSMP            = 121 // Simple Message Protocol\n\tProtocolPTP            = 123 // Performance Transparency Protocol\n\tProtocolISIS           = 124 // ISIS over IPv4\n\tProtocolFIRE           = 125 // FIRE\n\tProtocolCRTP           = 126 // Combat Radio Transport Protocol\n\tProtocolCRUDP          = 127 // Combat Radio User Datagram\n\tProtocolSSCOPMCE       = 128 // SSCOPMCE\n\tProtocolIPLT           = 129 // IPLT\n\tProtocolSPS            = 130 // Secure Packet Shield\n\tProtocolPIPE           = 131 // Private IP Encapsulation within IP\n\tProtocolSCTP           = 132 // Stream Control Transmission Protocol\n\tProtocolFC             = 133 // Fibre Channel\n\tProtocolRSVPE2EIGNORE  = 134 // RSVP-E2E-IGNORE\n\tProtocolMobilityHeader = 135 // Mobility Header\n\tProtocolUDPLite        = 136 // UDPLite\n\tProtocolMPLSinIP       = 137 // MPLS-in-IP\n\tProtocolMANET          = 138 // MANET Protocols\n\tProtocolHIP            = 139 // Host Identity Protocol\n\tProtocolShim6          = 140 // Shim6 Protocol\n\tProtocolWESP           = 141 // Wrapped Encapsulating Security Payload\n\tProtocolROHC           = 142 // Robust Header Compression\n\tProtocolReserved       = 255 // Reserved\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/iana/gen.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n//go:generate go run gen.go\n\n// This program generates internet protocol constants and tables by\n// reading IANA protocol registries.\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding/xml\"\n\t\"fmt\"\n\t\"go/format\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar registries = []struct {\n\turl   string\n\tparse func(io.Writer, io.Reader) error\n}{\n\t{\n\t\t\"http://www.iana.org/assignments/dscp-registry/dscp-registry.xml\",\n\t\tparseDSCPRegistry,\n\t},\n\t{\n\t\t\"http://www.iana.org/assignments/ipv4-tos-byte/ipv4-tos-byte.xml\",\n\t\tparseTOSTCByte,\n\t},\n\t{\n\t\t\"http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml\",\n\t\tparseProtocolNumbers,\n\t},\n}\n\nfunc main() {\n\tvar bb bytes.Buffer\n\tfmt.Fprintf(&bb, \"// go generate gen.go\\n\")\n\tfmt.Fprintf(&bb, \"// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\\n\\n\")\n\tfmt.Fprintf(&bb, \"// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\\n\")\n\tfmt.Fprintf(&bb, `package iana // import \"golang.org/x/net/internal/iana\"`+\"\\n\\n\")\n\tfor _, r := range registries {\n\t\tresp, err := http.Get(r.url)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\tfmt.Fprintf(os.Stderr, \"got HTTP status code %v for %v\\n\", resp.StatusCode, r.url)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tif err := r.parse(&bb, resp.Body); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Fprintf(&bb, \"\\n\")\n\t}\n\tb, err := format.Source(bb.Bytes())\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\tif err := ioutil.WriteFile(\"const.go\", b, 0644); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc parseDSCPRegistry(w io.Writer, r io.Reader) error {\n\tdec := xml.NewDecoder(r)\n\tvar dr dscpRegistry\n\tif err := dec.Decode(&dr); err != nil {\n\t\treturn err\n\t}\n\tdrs := dr.escape()\n\tfmt.Fprintf(w, \"// %s, Updated: %s\\n\", dr.Title, dr.Updated)\n\tfmt.Fprintf(w, \"const (\\n\")\n\tfor _, dr := range drs {\n\t\tfmt.Fprintf(w, \"DiffServ%s = %#x\", dr.Name, dr.Value)\n\t\tfmt.Fprintf(w, \"// %s\\n\", dr.OrigName)\n\t}\n\tfmt.Fprintf(w, \")\\n\")\n\treturn nil\n}\n\ntype dscpRegistry struct {\n\tXMLName     xml.Name `xml:\"registry\"`\n\tTitle       string   `xml:\"title\"`\n\tUpdated     string   `xml:\"updated\"`\n\tNote        string   `xml:\"note\"`\n\tRegTitle    string   `xml:\"registry>title\"`\n\tPoolRecords []struct {\n\t\tName  string `xml:\"name\"`\n\t\tSpace string `xml:\"space\"`\n\t} `xml:\"registry>record\"`\n\tRecords []struct {\n\t\tName  string `xml:\"name\"`\n\t\tSpace string `xml:\"space\"`\n\t} `xml:\"registry>registry>record\"`\n}\n\ntype canonDSCPRecord struct {\n\tOrigName string\n\tName     string\n\tValue    int\n}\n\nfunc (drr *dscpRegistry) escape() []canonDSCPRecord {\n\tdrs := make([]canonDSCPRecord, len(drr.Records))\n\tsr := strings.NewReplacer(\n\t\t\"+\", \"\",\n\t\t\"-\", \"\",\n\t\t\"/\", \"\",\n\t\t\".\", \"\",\n\t\t\" \", \"\",\n\t)\n\tfor i, dr := range drr.Records {\n\t\ts := strings.TrimSpace(dr.Name)\n\t\tdrs[i].OrigName = s\n\t\tdrs[i].Name = sr.Replace(s)\n\t\tn, err := strconv.ParseUint(dr.Space, 2, 8)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tdrs[i].Value = int(n) << 2\n\t}\n\treturn drs\n}\n\nfunc parseTOSTCByte(w io.Writer, r io.Reader) error {\n\tdec := xml.NewDecoder(r)\n\tvar ttb tosTCByte\n\tif err := dec.Decode(&ttb); err != nil {\n\t\treturn err\n\t}\n\ttrs := ttb.escape()\n\tfmt.Fprintf(w, \"// %s, Updated: %s\\n\", ttb.Title, ttb.Updated)\n\tfmt.Fprintf(w, \"const (\\n\")\n\tfor _, tr := range trs {\n\t\tfmt.Fprintf(w, \"%s = %#x\", tr.Keyword, tr.Value)\n\t\tfmt.Fprintf(w, \"// %s\\n\", tr.OrigKeyword)\n\t}\n\tfmt.Fprintf(w, \")\\n\")\n\treturn nil\n}\n\ntype tosTCByte struct {\n\tXMLName  xml.Name `xml:\"registry\"`\n\tTitle    string   `xml:\"title\"`\n\tUpdated  string   `xml:\"updated\"`\n\tNote     string   `xml:\"note\"`\n\tRegTitle string   `xml:\"registry>title\"`\n\tRecords  []struct {\n\t\tBinary  string `xml:\"binary\"`\n\t\tKeyword string `xml:\"keyword\"`\n\t} `xml:\"registry>record\"`\n}\n\ntype canonTOSTCByteRecord struct {\n\tOrigKeyword string\n\tKeyword     string\n\tValue       int\n}\n\nfunc (ttb *tosTCByte) escape() []canonTOSTCByteRecord {\n\ttrs := make([]canonTOSTCByteRecord, len(ttb.Records))\n\tsr := strings.NewReplacer(\n\t\t\"Capable\", \"\",\n\t\t\"(\", \"\",\n\t\t\")\", \"\",\n\t\t\"+\", \"\",\n\t\t\"-\", \"\",\n\t\t\"/\", \"\",\n\t\t\".\", \"\",\n\t\t\" \", \"\",\n\t)\n\tfor i, tr := range ttb.Records {\n\t\ts := strings.TrimSpace(tr.Keyword)\n\t\ttrs[i].OrigKeyword = s\n\t\tss := strings.Split(s, \" \")\n\t\tif len(ss) > 1 {\n\t\t\ttrs[i].Keyword = strings.Join(ss[1:], \" \")\n\t\t} else {\n\t\t\ttrs[i].Keyword = ss[0]\n\t\t}\n\t\ttrs[i].Keyword = sr.Replace(trs[i].Keyword)\n\t\tn, err := strconv.ParseUint(tr.Binary, 2, 8)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\ttrs[i].Value = int(n)\n\t}\n\treturn trs\n}\n\nfunc parseProtocolNumbers(w io.Writer, r io.Reader) error {\n\tdec := xml.NewDecoder(r)\n\tvar pn protocolNumbers\n\tif err := dec.Decode(&pn); err != nil {\n\t\treturn err\n\t}\n\tprs := pn.escape()\n\tprs = append([]canonProtocolRecord{{\n\t\tName:  \"IP\",\n\t\tDescr: \"IPv4 encapsulation, pseudo protocol number\",\n\t\tValue: 0,\n\t}}, prs...)\n\tfmt.Fprintf(w, \"// %s, Updated: %s\\n\", pn.Title, pn.Updated)\n\tfmt.Fprintf(w, \"const (\\n\")\n\tfor _, pr := range prs {\n\t\tif pr.Name == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(w, \"Protocol%s = %d\", pr.Name, pr.Value)\n\t\ts := pr.Descr\n\t\tif s == \"\" {\n\t\t\ts = pr.OrigName\n\t\t}\n\t\tfmt.Fprintf(w, \"// %s\\n\", s)\n\t}\n\tfmt.Fprintf(w, \")\\n\")\n\treturn nil\n}\n\ntype protocolNumbers struct {\n\tXMLName  xml.Name `xml:\"registry\"`\n\tTitle    string   `xml:\"title\"`\n\tUpdated  string   `xml:\"updated\"`\n\tRegTitle string   `xml:\"registry>title\"`\n\tNote     string   `xml:\"registry>note\"`\n\tRecords  []struct {\n\t\tValue string `xml:\"value\"`\n\t\tName  string `xml:\"name\"`\n\t\tDescr string `xml:\"description\"`\n\t} `xml:\"registry>record\"`\n}\n\ntype canonProtocolRecord struct {\n\tOrigName string\n\tName     string\n\tDescr    string\n\tValue    int\n}\n\nfunc (pn *protocolNumbers) escape() []canonProtocolRecord {\n\tprs := make([]canonProtocolRecord, len(pn.Records))\n\tsr := strings.NewReplacer(\n\t\t\"-in-\", \"in\",\n\t\t\"-within-\", \"within\",\n\t\t\"-over-\", \"over\",\n\t\t\"+\", \"P\",\n\t\t\"-\", \"\",\n\t\t\"/\", \"\",\n\t\t\".\", \"\",\n\t\t\" \", \"\",\n\t)\n\tfor i, pr := range pn.Records {\n\t\tif strings.Contains(pr.Name, \"Deprecated\") ||\n\t\t\tstrings.Contains(pr.Name, \"deprecated\") {\n\t\t\tcontinue\n\t\t}\n\t\tprs[i].OrigName = pr.Name\n\t\ts := strings.TrimSpace(pr.Name)\n\t\tswitch pr.Name {\n\t\tcase \"ISIS over IPv4\":\n\t\t\tprs[i].Name = \"ISIS\"\n\t\tcase \"manet\":\n\t\t\tprs[i].Name = \"MANET\"\n\t\tdefault:\n\t\t\tprs[i].Name = sr.Replace(s)\n\t\t}\n\t\tss := strings.Split(pr.Descr, \"\\n\")\n\t\tfor i := range ss {\n\t\t\tss[i] = strings.TrimSpace(ss[i])\n\t\t}\n\t\tif len(ss) > 1 {\n\t\t\tprs[i].Descr = strings.Join(ss, \" \")\n\t\t} else {\n\t\t\tprs[i].Descr = ss[0]\n\t\t}\n\t\tprs[i].Value, _ = strconv.Atoi(pr.Value)\n\t}\n\treturn prs\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/nettest/helper_bsd.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd netbsd openbsd\n\npackage nettest\n\nimport (\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nvar darwinVersion int\n\nfunc init() {\n\tif runtime.GOOS == \"darwin\" {\n\t\t// See http://support.apple.com/kb/HT1633.\n\t\ts, err := syscall.Sysctl(\"kern.osrelease\")\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tss := strings.Split(s, \".\")\n\t\tif len(ss) == 0 {\n\t\t\treturn\n\t\t}\n\t\tdarwinVersion, _ = strconv.Atoi(ss[0])\n\t}\n}\n\nfunc supportsIPv6MulticastDeliveryOnLoopback() bool {\n\tswitch runtime.GOOS {\n\tcase \"freebsd\":\n\t\t// See http://www.freebsd.org/cgi/query-pr.cgi?pr=180065.\n\t\t// Even after the fix, it looks like the latest\n\t\t// kernels don't deliver link-local scoped multicast\n\t\t// packets correctly.\n\t\treturn false\n\tcase \"darwin\":\n\t\treturn !causesIPv6Crash()\n\tdefault:\n\t\treturn true\n\t}\n}\n\nfunc causesIPv6Crash() bool {\n\t// We see some kernel crash when running IPv6 with IP-level\n\t// options on Darwin kernel version 12 or below.\n\t// See golang.org/issues/17015.\n\treturn darwinVersion < 13\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/nettest/helper_nobsd.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build linux solaris\n\npackage nettest\n\nfunc supportsIPv6MulticastDeliveryOnLoopback() bool {\n\treturn true\n}\n\nfunc causesIPv6Crash() bool {\n\treturn false\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/nettest/helper_posix.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows\n\npackage nettest\n\nimport (\n\t\"os\"\n\t\"syscall\"\n)\n\nfunc protocolNotSupported(err error) bool {\n\tswitch err := err.(type) {\n\tcase syscall.Errno:\n\t\tswitch err {\n\t\tcase syscall.EPROTONOSUPPORT, syscall.ENOPROTOOPT:\n\t\t\treturn true\n\t\t}\n\tcase *os.SyscallError:\n\t\tswitch err := err.Err.(type) {\n\t\tcase syscall.Errno:\n\t\t\tswitch err {\n\t\t\tcase syscall.EPROTONOSUPPORT, syscall.ENOPROTOOPT:\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/nettest/helper_stub.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build nacl plan9\n\npackage nettest\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n)\n\nfunc maxOpenFiles() int {\n\treturn defaultMaxOpenFiles\n}\n\nfunc supportsRawIPSocket() (string, bool) {\n\treturn fmt.Sprintf(\"not supported on %s\", runtime.GOOS), false\n}\n\nfunc supportsIPv6MulticastDeliveryOnLoopback() bool {\n\treturn false\n}\n\nfunc causesIPv6Crash() bool {\n\treturn false\n}\n\nfunc protocolNotSupported(err error) bool {\n\treturn false\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/nettest/helper_unix.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd linux netbsd openbsd solaris\n\npackage nettest\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"syscall\"\n)\n\nfunc maxOpenFiles() int {\n\tvar rlim syscall.Rlimit\n\tif err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim); err != nil {\n\t\treturn defaultMaxOpenFiles\n\t}\n\treturn int(rlim.Cur)\n}\n\nfunc supportsRawIPSocket() (string, bool) {\n\tif os.Getuid() != 0 {\n\t\treturn fmt.Sprintf(\"must be root on %s\", runtime.GOOS), false\n\t}\n\treturn \"\", true\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/nettest/helper_windows.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage nettest\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"syscall\"\n)\n\nfunc maxOpenFiles() int {\n\treturn 4 * defaultMaxOpenFiles /* actually it's 16581375 */\n}\n\nfunc supportsRawIPSocket() (string, bool) {\n\t// From http://msdn.microsoft.com/en-us/library/windows/desktop/ms740548.aspx:\n\t// Note: To use a socket of type SOCK_RAW requires administrative privileges.\n\t// Users running Winsock applications that use raw sockets must be a member of\n\t// the Administrators group on the local computer, otherwise raw socket calls\n\t// will fail with an error code of WSAEACCES. On Windows Vista and later, access\n\t// for raw sockets is enforced at socket creation. In earlier versions of Windows,\n\t// access for raw sockets is enforced during other socket operations.\n\ts, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_RAW, 0)\n\tif err == syscall.WSAEACCES {\n\t\treturn fmt.Sprintf(\"no access to raw socket allowed on %s\", runtime.GOOS), false\n\t}\n\tif err != nil {\n\t\treturn err.Error(), false\n\t}\n\tsyscall.Closesocket(s)\n\treturn \"\", true\n}\n\nfunc supportsIPv6MulticastDeliveryOnLoopback() bool {\n\treturn true\n}\n\nfunc causesIPv6Crash() bool {\n\treturn false\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/nettest/interface.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage nettest\n\nimport \"net\"\n\n// IsMulticastCapable reports whether ifi is an IP multicast-capable\n// network interface. Network must be \"ip\", \"ip4\" or \"ip6\".\nfunc IsMulticastCapable(network string, ifi *net.Interface) (net.IP, bool) {\n\tswitch network {\n\tcase \"ip\", \"ip4\", \"ip6\":\n\tdefault:\n\t\treturn nil, false\n\t}\n\tif ifi == nil || ifi.Flags&net.FlagUp == 0 || ifi.Flags&net.FlagMulticast == 0 {\n\t\treturn nil, false\n\t}\n\treturn hasRoutableIP(network, ifi)\n}\n\n// RoutedInterface returns a network interface that can route IP\n// traffic and satisfies flags. It returns nil when an appropriate\n// network interface is not found. Network must be \"ip\", \"ip4\" or\n// \"ip6\".\nfunc RoutedInterface(network string, flags net.Flags) *net.Interface {\n\tswitch network {\n\tcase \"ip\", \"ip4\", \"ip6\":\n\tdefault:\n\t\treturn nil\n\t}\n\tift, err := net.Interfaces()\n\tif err != nil {\n\t\treturn nil\n\t}\n\tfor _, ifi := range ift {\n\t\tif ifi.Flags&flags != flags {\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := hasRoutableIP(network, &ifi); !ok {\n\t\t\tcontinue\n\t\t}\n\t\treturn &ifi\n\t}\n\treturn nil\n}\n\nfunc hasRoutableIP(network string, ifi *net.Interface) (net.IP, bool) {\n\tifat, err := ifi.Addrs()\n\tif err != nil {\n\t\treturn nil, false\n\t}\n\tfor _, ifa := range ifat {\n\t\tswitch ifa := ifa.(type) {\n\t\tcase *net.IPAddr:\n\t\t\tif ip := routableIP(network, ifa.IP); ip != nil {\n\t\t\t\treturn ip, true\n\t\t\t}\n\t\tcase *net.IPNet:\n\t\t\tif ip := routableIP(network, ifa.IP); ip != nil {\n\t\t\t\treturn ip, true\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, false\n}\n\nfunc routableIP(network string, ip net.IP) net.IP {\n\tif !ip.IsLoopback() && !ip.IsLinkLocalUnicast() && !ip.IsGlobalUnicast() {\n\t\treturn nil\n\t}\n\tswitch network {\n\tcase \"ip4\":\n\t\tif ip := ip.To4(); ip != nil {\n\t\t\treturn ip\n\t\t}\n\tcase \"ip6\":\n\t\tif ip.IsLoopback() { // addressing scope of the loopback address depends on each implementation\n\t\t\treturn nil\n\t\t}\n\t\tif ip := ip.To16(); ip != nil && ip.To4() == nil {\n\t\t\treturn ip\n\t\t}\n\tdefault:\n\t\tif ip := ip.To4(); ip != nil {\n\t\t\treturn ip\n\t\t}\n\t\tif ip := ip.To16(); ip != nil {\n\t\t\treturn ip\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/nettest/rlimit.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage nettest\n\nconst defaultMaxOpenFiles = 256\n\n// MaxOpenFiles returns the maximum number of open files for the\n// caller's process.\nfunc MaxOpenFiles() int { return maxOpenFiles() }\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/nettest/stack.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package nettest provides utilities for network testing.\npackage nettest // import \"golang.org/x/net/internal/nettest\"\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n)\n\nvar (\n\tsupportsIPv4 bool\n\tsupportsIPv6 bool\n)\n\nfunc init() {\n\tif ln, err := net.Listen(\"tcp4\", \"127.0.0.1:0\"); err == nil {\n\t\tln.Close()\n\t\tsupportsIPv4 = true\n\t}\n\tif ln, err := net.Listen(\"tcp6\", \"[::1]:0\"); err == nil {\n\t\tln.Close()\n\t\tsupportsIPv6 = true\n\t}\n}\n\n// SupportsIPv4 reports whether the platform supports IPv4 networking\n// functionality.\nfunc SupportsIPv4() bool { return supportsIPv4 }\n\n// SupportsIPv6 reports whether the platform supports IPv6 networking\n// functionality.\nfunc SupportsIPv6() bool { return supportsIPv6 }\n\n// SupportsRawIPSocket reports whether the platform supports raw IP\n// sockets.\nfunc SupportsRawIPSocket() (string, bool) {\n\treturn supportsRawIPSocket()\n}\n\n// SupportsIPv6MulticastDeliveryOnLoopback reports whether the\n// platform supports IPv6 multicast packet delivery on software\n// loopback interface.\nfunc SupportsIPv6MulticastDeliveryOnLoopback() bool {\n\treturn supportsIPv6MulticastDeliveryOnLoopback()\n}\n\n// ProtocolNotSupported reports whether err is a protocol not\n// supported error.\nfunc ProtocolNotSupported(err error) bool {\n\treturn protocolNotSupported(err)\n}\n\n// TestableNetwork reports whether network is testable on the current\n// platform configuration.\nfunc TestableNetwork(network string) bool {\n\t// This is based on logic from standard library's\n\t// net/platform_test.go.\n\tswitch network {\n\tcase \"unix\", \"unixgram\":\n\t\tswitch runtime.GOOS {\n\t\tcase \"android\", \"nacl\", \"plan9\", \"windows\":\n\t\t\treturn false\n\t\t}\n\t\tif runtime.GOOS == \"darwin\" && (runtime.GOARCH == \"arm\" || runtime.GOARCH == \"arm64\") {\n\t\t\treturn false\n\t\t}\n\tcase \"unixpacket\":\n\t\tswitch runtime.GOOS {\n\t\tcase \"android\", \"darwin\", \"freebsd\", \"nacl\", \"plan9\", \"windows\":\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// NewLocalListener returns a listener which listens to a loopback IP\n// address or local file system path.\n// Network must be \"tcp\", \"tcp4\", \"tcp6\", \"unix\" or \"unixpacket\".\nfunc NewLocalListener(network string) (net.Listener, error) {\n\tswitch network {\n\tcase \"tcp\":\n\t\tif supportsIPv4 {\n\t\t\tif ln, err := net.Listen(\"tcp4\", \"127.0.0.1:0\"); err == nil {\n\t\t\t\treturn ln, nil\n\t\t\t}\n\t\t}\n\t\tif supportsIPv6 {\n\t\t\treturn net.Listen(\"tcp6\", \"[::1]:0\")\n\t\t}\n\tcase \"tcp4\":\n\t\tif supportsIPv4 {\n\t\t\treturn net.Listen(\"tcp4\", \"127.0.0.1:0\")\n\t\t}\n\tcase \"tcp6\":\n\t\tif supportsIPv6 {\n\t\t\treturn net.Listen(\"tcp6\", \"[::1]:0\")\n\t\t}\n\tcase \"unix\", \"unixpacket\":\n\t\treturn net.Listen(network, localPath())\n\t}\n\treturn nil, fmt.Errorf(\"%s is not supported\", network)\n}\n\n// NewLocalPacketListener returns a packet listener which listens to a\n// loopback IP address or local file system path.\n// Network must be \"udp\", \"udp4\", \"udp6\" or \"unixgram\".\nfunc NewLocalPacketListener(network string) (net.PacketConn, error) {\n\tswitch network {\n\tcase \"udp\":\n\t\tif supportsIPv4 {\n\t\t\tif c, err := net.ListenPacket(\"udp4\", \"127.0.0.1:0\"); err == nil {\n\t\t\t\treturn c, nil\n\t\t\t}\n\t\t}\n\t\tif supportsIPv6 {\n\t\t\treturn net.ListenPacket(\"udp6\", \"[::1]:0\")\n\t\t}\n\tcase \"udp4\":\n\t\tif supportsIPv4 {\n\t\t\treturn net.ListenPacket(\"udp4\", \"127.0.0.1:0\")\n\t\t}\n\tcase \"udp6\":\n\t\tif supportsIPv6 {\n\t\t\treturn net.ListenPacket(\"udp6\", \"[::1]:0\")\n\t\t}\n\tcase \"unixgram\":\n\t\treturn net.ListenPacket(network, localPath())\n\t}\n\treturn nil, fmt.Errorf(\"%s is not supported\", network)\n}\n\nfunc localPath() string {\n\tf, err := ioutil.TempFile(\"\", \"nettest\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tpath := f.Name()\n\tf.Close()\n\tos.Remove(path)\n\treturn path\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/cmsghdr.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd linux netbsd openbsd solaris\n\npackage socket\n\nfunc (h *cmsghdr) len() int { return int(h.Len) }\nfunc (h *cmsghdr) lvl() int { return int(h.Level) }\nfunc (h *cmsghdr) typ() int { return int(h.Type) }\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd netbsd openbsd\n\npackage socket\n\nfunc (h *cmsghdr) set(l, lvl, typ int) {\n\th.Len = uint32(l)\n\th.Level = int32(lvl)\n\th.Type = int32(typ)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build arm mips mipsle 386\n// +build linux\n\npackage socket\n\nfunc (h *cmsghdr) set(l, lvl, typ int) {\n\th.Len = uint32(l)\n\th.Level = int32(lvl)\n\th.Type = int32(typ)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x\n// +build linux\n\npackage socket\n\nfunc (h *cmsghdr) set(l, lvl, typ int) {\n\th.Len = uint64(l)\n\th.Level = int32(lvl)\n\th.Type = int32(typ)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build amd64\n// +build solaris\n\npackage socket\n\nfunc (h *cmsghdr) set(l, lvl, typ int) {\n\th.Len = uint32(l)\n\th.Level = int32(lvl)\n\th.Type = int32(typ)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris\n\npackage socket\n\ntype cmsghdr struct{}\n\nconst sizeofCmsghdr = 0\n\nfunc (h *cmsghdr) len() int { return 0 }\nfunc (h *cmsghdr) lvl() int { return 0 }\nfunc (h *cmsghdr) typ() int { return 0 }\n\nfunc (h *cmsghdr) set(l, lvl, typ int) {}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/defs_darwin.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in_addr [4]byte /* in_addr */\n// +godefs map struct_in6_addr [16]byte /* in6_addr */\n\npackage socket\n\n/*\n#include <sys/socket.h>\n\n#include <netinet/in.h>\n*/\nimport \"C\"\n\nconst (\n\tsysAF_UNSPEC = C.AF_UNSPEC\n\tsysAF_INET   = C.AF_INET\n\tsysAF_INET6  = C.AF_INET6\n\n\tsysSOCK_RAW = C.SOCK_RAW\n)\n\ntype iovec C.struct_iovec\n\ntype msghdr C.struct_msghdr\n\ntype cmsghdr C.struct_cmsghdr\n\ntype sockaddrInet C.struct_sockaddr_in\n\ntype sockaddrInet6 C.struct_sockaddr_in6\n\nconst (\n\tsizeofIovec   = C.sizeof_struct_iovec\n\tsizeofMsghdr  = C.sizeof_struct_msghdr\n\tsizeofCmsghdr = C.sizeof_struct_cmsghdr\n\n\tsizeofSockaddrInet  = C.sizeof_struct_sockaddr_in\n\tsizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/defs_dragonfly.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in_addr [4]byte /* in_addr */\n// +godefs map struct_in6_addr [16]byte /* in6_addr */\n\npackage socket\n\n/*\n#include <sys/socket.h>\n\n#include <netinet/in.h>\n*/\nimport \"C\"\n\nconst (\n\tsysAF_UNSPEC = C.AF_UNSPEC\n\tsysAF_INET   = C.AF_INET\n\tsysAF_INET6  = C.AF_INET6\n\n\tsysSOCK_RAW = C.SOCK_RAW\n)\n\ntype iovec C.struct_iovec\n\ntype msghdr C.struct_msghdr\n\ntype cmsghdr C.struct_cmsghdr\n\ntype sockaddrInet C.struct_sockaddr_in\n\ntype sockaddrInet6 C.struct_sockaddr_in6\n\nconst (\n\tsizeofIovec   = C.sizeof_struct_iovec\n\tsizeofMsghdr  = C.sizeof_struct_msghdr\n\tsizeofCmsghdr = C.sizeof_struct_cmsghdr\n\n\tsizeofSockaddrInet  = C.sizeof_struct_sockaddr_in\n\tsizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/defs_freebsd.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in_addr [4]byte /* in_addr */\n// +godefs map struct_in6_addr [16]byte /* in6_addr */\n\npackage socket\n\n/*\n#include <sys/socket.h>\n\n#include <netinet/in.h>\n*/\nimport \"C\"\n\nconst (\n\tsysAF_UNSPEC = C.AF_UNSPEC\n\tsysAF_INET   = C.AF_INET\n\tsysAF_INET6  = C.AF_INET6\n\n\tsysSOCK_RAW = C.SOCK_RAW\n)\n\ntype iovec C.struct_iovec\n\ntype msghdr C.struct_msghdr\n\ntype cmsghdr C.struct_cmsghdr\n\ntype sockaddrInet C.struct_sockaddr_in\n\ntype sockaddrInet6 C.struct_sockaddr_in6\n\nconst (\n\tsizeofIovec   = C.sizeof_struct_iovec\n\tsizeofMsghdr  = C.sizeof_struct_msghdr\n\tsizeofCmsghdr = C.sizeof_struct_cmsghdr\n\n\tsizeofSockaddrInet  = C.sizeof_struct_sockaddr_in\n\tsizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/defs_linux.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in_addr [4]byte /* in_addr */\n// +godefs map struct_in6_addr [16]byte /* in6_addr */\n\npackage socket\n\n/*\n#include <linux/in.h>\n#include <linux/in6.h>\n\n#define _GNU_SOURCE\n#include <sys/socket.h>\n*/\nimport \"C\"\n\nconst (\n\tsysAF_UNSPEC = C.AF_UNSPEC\n\tsysAF_INET   = C.AF_INET\n\tsysAF_INET6  = C.AF_INET6\n\n\tsysSOCK_RAW = C.SOCK_RAW\n)\n\ntype iovec C.struct_iovec\n\ntype msghdr C.struct_msghdr\n\ntype mmsghdr C.struct_mmsghdr\n\ntype cmsghdr C.struct_cmsghdr\n\ntype sockaddrInet C.struct_sockaddr_in\n\ntype sockaddrInet6 C.struct_sockaddr_in6\n\nconst (\n\tsizeofIovec   = C.sizeof_struct_iovec\n\tsizeofMsghdr  = C.sizeof_struct_msghdr\n\tsizeofMmsghdr = C.sizeof_struct_mmsghdr\n\tsizeofCmsghdr = C.sizeof_struct_cmsghdr\n\n\tsizeofSockaddrInet  = C.sizeof_struct_sockaddr_in\n\tsizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/defs_netbsd.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in_addr [4]byte /* in_addr */\n// +godefs map struct_in6_addr [16]byte /* in6_addr */\n\npackage socket\n\n/*\n#include <sys/socket.h>\n\n#include <netinet/in.h>\n*/\nimport \"C\"\n\nconst (\n\tsysAF_UNSPEC = C.AF_UNSPEC\n\tsysAF_INET   = C.AF_INET\n\tsysAF_INET6  = C.AF_INET6\n\n\tsysSOCK_RAW = C.SOCK_RAW\n)\n\ntype iovec C.struct_iovec\n\ntype msghdr C.struct_msghdr\n\ntype mmsghdr C.struct_mmsghdr\n\ntype cmsghdr C.struct_cmsghdr\n\ntype sockaddrInet C.struct_sockaddr_in\n\ntype sockaddrInet6 C.struct_sockaddr_in6\n\nconst (\n\tsizeofIovec   = C.sizeof_struct_iovec\n\tsizeofMsghdr  = C.sizeof_struct_msghdr\n\tsizeofMmsghdr = C.sizeof_struct_mmsghdr\n\tsizeofCmsghdr = C.sizeof_struct_cmsghdr\n\n\tsizeofSockaddrInet  = C.sizeof_struct_sockaddr_in\n\tsizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/defs_openbsd.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in_addr [4]byte /* in_addr */\n// +godefs map struct_in6_addr [16]byte /* in6_addr */\n\npackage socket\n\n/*\n#include <sys/socket.h>\n\n#include <netinet/in.h>\n*/\nimport \"C\"\n\nconst (\n\tsysAF_UNSPEC = C.AF_UNSPEC\n\tsysAF_INET   = C.AF_INET\n\tsysAF_INET6  = C.AF_INET6\n\n\tsysSOCK_RAW = C.SOCK_RAW\n)\n\ntype iovec C.struct_iovec\n\ntype msghdr C.struct_msghdr\n\ntype cmsghdr C.struct_cmsghdr\n\ntype sockaddrInet C.struct_sockaddr_in\n\ntype sockaddrInet6 C.struct_sockaddr_in6\n\nconst (\n\tsizeofIovec   = C.sizeof_struct_iovec\n\tsizeofMsghdr  = C.sizeof_struct_msghdr\n\tsizeofCmsghdr = C.sizeof_struct_cmsghdr\n\n\tsizeofSockaddrInet  = C.sizeof_struct_sockaddr_in\n\tsizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/defs_solaris.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in_addr [4]byte /* in_addr */\n// +godefs map struct_in6_addr [16]byte /* in6_addr */\n\npackage socket\n\n/*\n#include <sys/socket.h>\n\n#include <netinet/in.h>\n*/\nimport \"C\"\n\nconst (\n\tsysAF_UNSPEC = C.AF_UNSPEC\n\tsysAF_INET   = C.AF_INET\n\tsysAF_INET6  = C.AF_INET6\n\n\tsysSOCK_RAW = C.SOCK_RAW\n)\n\ntype iovec C.struct_iovec\n\ntype msghdr C.struct_msghdr\n\ntype cmsghdr C.struct_cmsghdr\n\ntype sockaddrInet C.struct_sockaddr_in\n\ntype sockaddrInet6 C.struct_sockaddr_in6\n\nconst (\n\tsizeofIovec   = C.sizeof_struct_iovec\n\tsizeofMsghdr  = C.sizeof_struct_msghdr\n\tsizeofCmsghdr = C.sizeof_struct_cmsghdr\n\n\tsizeofSockaddrInet  = C.sizeof_struct_sockaddr_in\n\tsizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/error_unix.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd linux netbsd openbsd solaris\n\npackage socket\n\nimport \"syscall\"\n\nvar (\n\terrEAGAIN error = syscall.EAGAIN\n\terrEINVAL error = syscall.EINVAL\n\terrENOENT error = syscall.ENOENT\n)\n\n// errnoErr returns common boxed Errno values, to prevent allocations\n// at runtime.\nfunc errnoErr(errno syscall.Errno) error {\n\tswitch errno {\n\tcase 0:\n\t\treturn nil\n\tcase syscall.EAGAIN:\n\t\treturn errEAGAIN\n\tcase syscall.EINVAL:\n\t\treturn errEINVAL\n\tcase syscall.ENOENT:\n\t\treturn errENOENT\n\t}\n\treturn errno\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/error_windows.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nimport \"syscall\"\n\nvar (\n\terrERROR_IO_PENDING error = syscall.ERROR_IO_PENDING\n\terrEINVAL           error = syscall.EINVAL\n)\n\n// errnoErr returns common boxed Errno values, to prevent allocations\n// at runtime.\nfunc errnoErr(errno syscall.Errno) error {\n\tswitch errno {\n\tcase 0:\n\t\treturn nil\n\tcase syscall.ERROR_IO_PENDING:\n\t\treturn errERROR_IO_PENDING\n\tcase syscall.EINVAL:\n\t\treturn errEINVAL\n\t}\n\treturn errno\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/iovec_32bit.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build arm mips mipsle 386\n// +build darwin dragonfly freebsd linux netbsd openbsd\n\npackage socket\n\nimport \"unsafe\"\n\nfunc (v *iovec) set(b []byte) {\n\tl := len(b)\n\tif l == 0 {\n\t\treturn\n\t}\n\tv.Base = (*byte)(unsafe.Pointer(&b[0]))\n\tv.Len = uint32(l)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/iovec_64bit.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x\n// +build darwin dragonfly freebsd linux netbsd openbsd\n\npackage socket\n\nimport \"unsafe\"\n\nfunc (v *iovec) set(b []byte) {\n\tl := len(b)\n\tif l == 0 {\n\t\treturn\n\t}\n\tv.Base = (*byte)(unsafe.Pointer(&b[0]))\n\tv.Len = uint64(l)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build amd64\n// +build solaris\n\npackage socket\n\nimport \"unsafe\"\n\nfunc (v *iovec) set(b []byte) {\n\tl := len(b)\n\tif l == 0 {\n\t\treturn\n\t}\n\tv.Base = (*int8)(unsafe.Pointer(&b[0]))\n\tv.Len = uint64(l)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/iovec_stub.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris\n\npackage socket\n\ntype iovec struct{}\n\nfunc (v *iovec) set(b []byte) {}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !linux,!netbsd\n\npackage socket\n\nimport \"net\"\n\ntype mmsghdr struct{}\n\ntype mmsghdrs []mmsghdr\n\nfunc (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error {\n\treturn nil\n}\n\nfunc (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error {\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build linux netbsd\n\npackage socket\n\nimport \"net\"\n\ntype mmsghdrs []mmsghdr\n\nfunc (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error {\n\tfor i := range hs {\n\t\tvs := make([]iovec, len(ms[i].Buffers))\n\t\tvar sa []byte\n\t\tif parseFn != nil {\n\t\t\tsa = make([]byte, sizeofSockaddrInet6)\n\t\t}\n\t\tif marshalFn != nil {\n\t\t\tsa = marshalFn(ms[i].Addr)\n\t\t}\n\t\ths[i].Hdr.pack(vs, ms[i].Buffers, ms[i].OOB, sa)\n\t}\n\treturn nil\n}\n\nfunc (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error {\n\tfor i := range hs {\n\t\tms[i].N = int(hs[i].Len)\n\t\tms[i].NN = hs[i].Hdr.controllen()\n\t\tms[i].Flags = hs[i].Hdr.flags()\n\t\tif parseFn != nil {\n\t\t\tvar err error\n\t\t\tms[i].Addr, err = parseFn(hs[i].Hdr.name(), hint)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/msghdr_bsd.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd netbsd openbsd\n\npackage socket\n\nimport \"unsafe\"\n\nfunc (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) {\n\tfor i := range vs {\n\t\tvs[i].set(bs[i])\n\t}\n\th.setIov(vs)\n\tif len(oob) > 0 {\n\t\th.Control = (*byte)(unsafe.Pointer(&oob[0]))\n\t\th.Controllen = uint32(len(oob))\n\t}\n\tif sa != nil {\n\t\th.Name = (*byte)(unsafe.Pointer(&sa[0]))\n\t\th.Namelen = uint32(len(sa))\n\t}\n}\n\nfunc (h *msghdr) name() []byte {\n\tif h.Name != nil && h.Namelen > 0 {\n\t\treturn (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen]\n\t}\n\treturn nil\n}\n\nfunc (h *msghdr) controllen() int {\n\treturn int(h.Controllen)\n}\n\nfunc (h *msghdr) flags() int {\n\treturn int(h.Flags)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd netbsd\n\npackage socket\n\nfunc (h *msghdr) setIov(vs []iovec) {\n\tl := len(vs)\n\tif l == 0 {\n\t\treturn\n\t}\n\th.Iov = &vs[0]\n\th.Iovlen = int32(l)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/msghdr_linux.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nimport \"unsafe\"\n\nfunc (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) {\n\tfor i := range vs {\n\t\tvs[i].set(bs[i])\n\t}\n\th.setIov(vs)\n\tif len(oob) > 0 {\n\t\th.setControl(oob)\n\t}\n\tif sa != nil {\n\t\th.Name = (*byte)(unsafe.Pointer(&sa[0]))\n\t\th.Namelen = uint32(len(sa))\n\t}\n}\n\nfunc (h *msghdr) name() []byte {\n\tif h.Name != nil && h.Namelen > 0 {\n\t\treturn (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen]\n\t}\n\treturn nil\n}\n\nfunc (h *msghdr) controllen() int {\n\treturn int(h.Controllen)\n}\n\nfunc (h *msghdr) flags() int {\n\treturn int(h.Flags)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build arm mips mipsle 386\n// +build linux\n\npackage socket\n\nimport \"unsafe\"\n\nfunc (h *msghdr) setIov(vs []iovec) {\n\tl := len(vs)\n\tif l == 0 {\n\t\treturn\n\t}\n\th.Iov = &vs[0]\n\th.Iovlen = uint32(l)\n}\n\nfunc (h *msghdr) setControl(b []byte) {\n\th.Control = (*byte)(unsafe.Pointer(&b[0]))\n\th.Controllen = uint32(len(b))\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x\n// +build linux\n\npackage socket\n\nimport \"unsafe\"\n\nfunc (h *msghdr) setIov(vs []iovec) {\n\tl := len(vs)\n\tif l == 0 {\n\t\treturn\n\t}\n\th.Iov = &vs[0]\n\th.Iovlen = uint64(l)\n}\n\nfunc (h *msghdr) setControl(b []byte) {\n\th.Control = (*byte)(unsafe.Pointer(&b[0]))\n\th.Controllen = uint64(len(b))\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nfunc (h *msghdr) setIov(vs []iovec) {\n\tl := len(vs)\n\tif l == 0 {\n\t\treturn\n\t}\n\th.Iov = &vs[0]\n\th.Iovlen = uint32(l)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build amd64\n// +build solaris\n\npackage socket\n\nimport \"unsafe\"\n\nfunc (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) {\n\tfor i := range vs {\n\t\tvs[i].set(bs[i])\n\t}\n\tif len(vs) > 0 {\n\t\th.Iov = &vs[0]\n\t\th.Iovlen = int32(len(vs))\n\t}\n\tif len(oob) > 0 {\n\t\th.Accrights = (*int8)(unsafe.Pointer(&oob[0]))\n\t\th.Accrightslen = int32(len(oob))\n\t}\n\tif sa != nil {\n\t\th.Name = (*byte)(unsafe.Pointer(&sa[0]))\n\t\th.Namelen = uint32(len(sa))\n\t}\n}\n\nfunc (h *msghdr) controllen() int {\n\treturn int(h.Accrightslen)\n}\n\nfunc (h *msghdr) flags() int {\n\treturn int(NativeEndian.Uint32(h.Pad_cgo_2[:]))\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/msghdr_stub.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris\n\npackage socket\n\ntype msghdr struct{}\n\nfunc (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) {}\nfunc (h *msghdr) name() []byte                                        { return nil }\nfunc (h *msghdr) controllen() int                                     { return 0 }\nfunc (h *msghdr) flags() int                                          { return 0 }\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/rawconn.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.9\n\npackage socket\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"os\"\n\t\"syscall\"\n)\n\n// A Conn represents a raw connection.\ntype Conn struct {\n\tnetwork string\n\tc       syscall.RawConn\n}\n\n// NewConn returns a new raw connection.\nfunc NewConn(c net.Conn) (*Conn, error) {\n\tvar err error\n\tvar cc Conn\n\tswitch c := c.(type) {\n\tcase *net.TCPConn:\n\t\tcc.network = \"tcp\"\n\t\tcc.c, err = c.SyscallConn()\n\tcase *net.UDPConn:\n\t\tcc.network = \"udp\"\n\t\tcc.c, err = c.SyscallConn()\n\tcase *net.IPConn:\n\t\tcc.network = \"ip\"\n\t\tcc.c, err = c.SyscallConn()\n\tdefault:\n\t\treturn nil, errors.New(\"unknown connection type\")\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &cc, nil\n}\n\nfunc (o *Option) get(c *Conn, b []byte) (int, error) {\n\tvar operr error\n\tvar n int\n\tfn := func(s uintptr) {\n\t\tn, operr = getsockopt(s, o.Level, o.Name, b)\n\t}\n\tif err := c.c.Control(fn); err != nil {\n\t\treturn 0, err\n\t}\n\treturn n, os.NewSyscallError(\"getsockopt\", operr)\n}\n\nfunc (o *Option) set(c *Conn, b []byte) error {\n\tvar operr error\n\tfn := func(s uintptr) {\n\t\toperr = setsockopt(s, o.Level, o.Name, b)\n\t}\n\tif err := c.c.Control(fn); err != nil {\n\t\treturn err\n\t}\n\treturn os.NewSyscallError(\"setsockopt\", operr)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.9\n// +build linux\n\npackage socket\n\nimport (\n\t\"net\"\n\t\"os\"\n\t\"syscall\"\n)\n\nfunc (c *Conn) recvMsgs(ms []Message, flags int) (int, error) {\n\ths := make(mmsghdrs, len(ms))\n\tvar parseFn func([]byte, string) (net.Addr, error)\n\tif c.network != \"tcp\" {\n\t\tparseFn = parseInetAddr\n\t}\n\tif err := hs.pack(ms, parseFn, nil); err != nil {\n\t\treturn 0, err\n\t}\n\tvar operr error\n\tvar n int\n\tfn := func(s uintptr) bool {\n\t\tn, operr = recvmmsg(s, hs, flags)\n\t\tif operr == syscall.EAGAIN {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\tif err := c.c.Read(fn); err != nil {\n\t\treturn n, err\n\t}\n\tif operr != nil {\n\t\treturn n, os.NewSyscallError(\"recvmmsg\", operr)\n\t}\n\tif err := hs[:n].unpack(ms[:n], parseFn, c.network); err != nil {\n\t\treturn n, err\n\t}\n\treturn n, nil\n}\n\nfunc (c *Conn) sendMsgs(ms []Message, flags int) (int, error) {\n\ths := make(mmsghdrs, len(ms))\n\tvar marshalFn func(net.Addr) []byte\n\tif c.network != \"tcp\" {\n\t\tmarshalFn = marshalInetAddr\n\t}\n\tif err := hs.pack(ms, nil, marshalFn); err != nil {\n\t\treturn 0, err\n\t}\n\tvar operr error\n\tvar n int\n\tfn := func(s uintptr) bool {\n\t\tn, operr = sendmmsg(s, hs, flags)\n\t\tif operr == syscall.EAGAIN {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\tif err := c.c.Write(fn); err != nil {\n\t\treturn n, err\n\t}\n\tif operr != nil {\n\t\treturn n, os.NewSyscallError(\"sendmmsg\", operr)\n\t}\n\tif err := hs[:n].unpack(ms[:n], nil, \"\"); err != nil {\n\t\treturn n, err\n\t}\n\treturn n, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/rawconn_msg.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.9\n// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows\n\npackage socket\n\nimport (\n\t\"os\"\n\t\"syscall\"\n)\n\nfunc (c *Conn) recvMsg(m *Message, flags int) error {\n\tvar h msghdr\n\tvs := make([]iovec, len(m.Buffers))\n\tvar sa []byte\n\tif c.network != \"tcp\" {\n\t\tsa = make([]byte, sizeofSockaddrInet6)\n\t}\n\th.pack(vs, m.Buffers, m.OOB, sa)\n\tvar operr error\n\tvar n int\n\tfn := func(s uintptr) bool {\n\t\tn, operr = recvmsg(s, &h, flags)\n\t\tif operr == syscall.EAGAIN {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\tif err := c.c.Read(fn); err != nil {\n\t\treturn err\n\t}\n\tif operr != nil {\n\t\treturn os.NewSyscallError(\"recvmsg\", operr)\n\t}\n\tif c.network != \"tcp\" {\n\t\tvar err error\n\t\tm.Addr, err = parseInetAddr(sa[:], c.network)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tm.N = n\n\tm.NN = h.controllen()\n\tm.Flags = h.flags()\n\treturn nil\n}\n\nfunc (c *Conn) sendMsg(m *Message, flags int) error {\n\tvar h msghdr\n\tvs := make([]iovec, len(m.Buffers))\n\tvar sa []byte\n\tif m.Addr != nil {\n\t\tsa = marshalInetAddr(m.Addr)\n\t}\n\th.pack(vs, m.Buffers, m.OOB, sa)\n\tvar operr error\n\tvar n int\n\tfn := func(s uintptr) bool {\n\t\tn, operr = sendmsg(s, &h, flags)\n\t\tif operr == syscall.EAGAIN {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\tif err := c.c.Write(fn); err != nil {\n\t\treturn err\n\t}\n\tif operr != nil {\n\t\treturn os.NewSyscallError(\"sendmsg\", operr)\n\t}\n\tm.N = n\n\tm.NN = len(m.OOB)\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.9\n// +build !linux\n\npackage socket\n\nimport \"errors\"\n\nfunc (c *Conn) recvMsgs(ms []Message, flags int) (int, error) {\n\treturn 0, errors.New(\"not implemented\")\n}\n\nfunc (c *Conn) sendMsgs(ms []Message, flags int) (int, error) {\n\treturn 0, errors.New(\"not implemented\")\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.9\n// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows\n\npackage socket\n\nimport \"errors\"\n\nfunc (c *Conn) recvMsg(m *Message, flags int) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (c *Conn) sendMsg(m *Message, flags int) error {\n\treturn errors.New(\"not implemented\")\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/rawconn_stub.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !go1.9\n\npackage socket\n\nimport \"errors\"\n\nfunc (c *Conn) recvMsg(m *Message, flags int) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (c *Conn) sendMsg(m *Message, flags int) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (c *Conn) recvMsgs(ms []Message, flags int) (int, error) {\n\treturn 0, errors.New(\"not implemented\")\n}\n\nfunc (c *Conn) sendMsgs(ms []Message, flags int) (int, error) {\n\treturn 0, errors.New(\"not implemented\")\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/reflect.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !go1.9\n\npackage socket\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"os\"\n\t\"reflect\"\n\t\"runtime\"\n)\n\n// A Conn represents a raw connection.\ntype Conn struct {\n\tc net.Conn\n}\n\n// NewConn returns a new raw connection.\nfunc NewConn(c net.Conn) (*Conn, error) {\n\treturn &Conn{c: c}, nil\n}\n\nfunc (o *Option) get(c *Conn, b []byte) (int, error) {\n\ts, err := socketOf(c.c)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tn, err := getsockopt(s, o.Level, o.Name, b)\n\treturn n, os.NewSyscallError(\"getsockopt\", err)\n}\n\nfunc (o *Option) set(c *Conn, b []byte) error {\n\ts, err := socketOf(c.c)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn os.NewSyscallError(\"setsockopt\", setsockopt(s, o.Level, o.Name, b))\n}\n\nfunc socketOf(c net.Conn) (uintptr, error) {\n\tswitch c.(type) {\n\tcase *net.TCPConn, *net.UDPConn, *net.IPConn:\n\t\tv := reflect.ValueOf(c)\n\t\tswitch e := v.Elem(); e.Kind() {\n\t\tcase reflect.Struct:\n\t\t\tfd := e.FieldByName(\"conn\").FieldByName(\"fd\")\n\t\t\tswitch e := fd.Elem(); e.Kind() {\n\t\t\tcase reflect.Struct:\n\t\t\t\tsysfd := e.FieldByName(\"sysfd\")\n\t\t\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\t\treturn uintptr(sysfd.Uint()), nil\n\t\t\t\t}\n\t\t\t\treturn uintptr(sysfd.Int()), nil\n\t\t\t}\n\t\t}\n\t}\n\treturn 0, errors.New(\"invalid type\")\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/socket.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package socket provides a portable interface for socket system\n// calls.\npackage socket // import \"golang.org/x/net/internal/socket\"\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"unsafe\"\n)\n\n// An Option represents a sticky socket option.\ntype Option struct {\n\tLevel int // level\n\tName  int // name; must be equal or greater than 1\n\tLen   int // length of value in bytes; must be equal or greater than 1\n}\n\n// Get reads a value for the option from the kernel.\n// It returns the number of bytes written into b.\nfunc (o *Option) Get(c *Conn, b []byte) (int, error) {\n\tif o.Name < 1 || o.Len < 1 {\n\t\treturn 0, errors.New(\"invalid option\")\n\t}\n\tif len(b) < o.Len {\n\t\treturn 0, errors.New(\"short buffer\")\n\t}\n\treturn o.get(c, b)\n}\n\n// GetInt returns an integer value for the option.\n//\n// The Len field of Option must be either 1 or 4.\nfunc (o *Option) GetInt(c *Conn) (int, error) {\n\tif o.Len != 1 && o.Len != 4 {\n\t\treturn 0, errors.New(\"invalid option\")\n\t}\n\tvar b []byte\n\tvar bb [4]byte\n\tif o.Len == 1 {\n\t\tb = bb[:1]\n\t} else {\n\t\tb = bb[:4]\n\t}\n\tn, err := o.get(c, b)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif n != o.Len {\n\t\treturn 0, errors.New(\"invalid option length\")\n\t}\n\tif o.Len == 1 {\n\t\treturn int(b[0]), nil\n\t}\n\treturn int(NativeEndian.Uint32(b[:4])), nil\n}\n\n// Set writes the option and value to the kernel.\nfunc (o *Option) Set(c *Conn, b []byte) error {\n\tif o.Name < 1 || o.Len < 1 {\n\t\treturn errors.New(\"invalid option\")\n\t}\n\tif len(b) < o.Len {\n\t\treturn errors.New(\"short buffer\")\n\t}\n\treturn o.set(c, b)\n}\n\n// SetInt writes the option and value to the kernel.\n//\n// The Len field of Option must be either 1 or 4.\nfunc (o *Option) SetInt(c *Conn, v int) error {\n\tif o.Len != 1 && o.Len != 4 {\n\t\treturn errors.New(\"invalid option\")\n\t}\n\tvar b []byte\n\tif o.Len == 1 {\n\t\tb = []byte{byte(v)}\n\t} else {\n\t\tvar bb [4]byte\n\t\tNativeEndian.PutUint32(bb[:o.Len], uint32(v))\n\t\tb = bb[:4]\n\t}\n\treturn o.set(c, b)\n}\n\nfunc controlHeaderLen() int {\n\treturn roundup(sizeofCmsghdr)\n}\n\nfunc controlMessageLen(dataLen int) int {\n\treturn roundup(sizeofCmsghdr) + dataLen\n}\n\n// ControlMessageSpace returns the whole length of control message.\nfunc ControlMessageSpace(dataLen int) int {\n\treturn roundup(sizeofCmsghdr) + roundup(dataLen)\n}\n\n// A ControlMessage represents the head message in a stream of control\n// messages.\n//\n// A control message comprises of a header, data and a few padding\n// fields to conform to the interface to the kernel.\n//\n// See RFC 3542 for further information.\ntype ControlMessage []byte\n\n// Data returns the data field of the control message at the head on\n// w.\nfunc (m ControlMessage) Data(dataLen int) []byte {\n\tl := controlHeaderLen()\n\tif len(m) < l || len(m) < l+dataLen {\n\t\treturn nil\n\t}\n\treturn m[l : l+dataLen]\n}\n\n// Next returns the control message at the next on w.\n//\n// Next works only for standard control messages.\nfunc (m ControlMessage) Next(dataLen int) ControlMessage {\n\tl := ControlMessageSpace(dataLen)\n\tif len(m) < l {\n\t\treturn nil\n\t}\n\treturn m[l:]\n}\n\n// MarshalHeader marshals the header fields of the control message at\n// the head on w.\nfunc (m ControlMessage) MarshalHeader(lvl, typ, dataLen int) error {\n\tif len(m) < controlHeaderLen() {\n\t\treturn errors.New(\"short message\")\n\t}\n\th := (*cmsghdr)(unsafe.Pointer(&m[0]))\n\th.set(controlMessageLen(dataLen), lvl, typ)\n\treturn nil\n}\n\n// ParseHeader parses and returns the header fields of the control\n// message at the head on w.\nfunc (m ControlMessage) ParseHeader() (lvl, typ, dataLen int, err error) {\n\tl := controlHeaderLen()\n\tif len(m) < l {\n\t\treturn 0, 0, 0, errors.New(\"short message\")\n\t}\n\th := (*cmsghdr)(unsafe.Pointer(&m[0]))\n\treturn h.lvl(), h.typ(), int(uint64(h.len()) - uint64(l)), nil\n}\n\n// Marshal marshals the control message at the head on w, and returns\n// the next control message.\nfunc (m ControlMessage) Marshal(lvl, typ int, data []byte) (ControlMessage, error) {\n\tl := len(data)\n\tif len(m) < ControlMessageSpace(l) {\n\t\treturn nil, errors.New(\"short message\")\n\t}\n\th := (*cmsghdr)(unsafe.Pointer(&m[0]))\n\th.set(controlMessageLen(l), lvl, typ)\n\tif l > 0 {\n\t\tcopy(m.Data(l), data)\n\t}\n\treturn m.Next(l), nil\n}\n\n// Parse parses w as a single or multiple control messages.\n//\n// Parse works for both standard and compatible messages.\nfunc (m ControlMessage) Parse() ([]ControlMessage, error) {\n\tvar ms []ControlMessage\n\tfor len(m) >= controlHeaderLen() {\n\t\th := (*cmsghdr)(unsafe.Pointer(&m[0]))\n\t\tl := h.len()\n\t\tif l <= 0 {\n\t\t\treturn nil, errors.New(\"invalid header length\")\n\t\t}\n\t\tif uint64(l) < uint64(controlHeaderLen()) {\n\t\t\treturn nil, errors.New(\"invalid message length\")\n\t\t}\n\t\tif uint64(l) > uint64(len(m)) {\n\t\t\treturn nil, errors.New(\"short buffer\")\n\t\t}\n\t\t// On message reception:\n\t\t//\n\t\t// |<- ControlMessageSpace --------------->|\n\t\t// |<- controlMessageLen ---------->|      |\n\t\t// |<- controlHeaderLen ->|         |      |\n\t\t// +---------------+------+---------+------+\n\t\t// |    Header     | PadH |  Data   | PadD |\n\t\t// +---------------+------+---------+------+\n\t\t//\n\t\t// On compatible message reception:\n\t\t//\n\t\t// | ... |<- controlMessageLen ----------->|\n\t\t// | ... |<- controlHeaderLen ->|          |\n\t\t// +-----+---------------+------+----------+\n\t\t// | ... |    Header     | PadH |   Data   |\n\t\t// +-----+---------------+------+----------+\n\t\tms = append(ms, ControlMessage(m[:l]))\n\t\tll := l - controlHeaderLen()\n\t\tif len(m) >= ControlMessageSpace(ll) {\n\t\t\tm = m[ControlMessageSpace(ll):]\n\t\t} else {\n\t\t\tm = m[controlMessageLen(ll):]\n\t\t}\n\t}\n\treturn ms, nil\n}\n\n// NewControlMessage returns a new stream of control messages.\nfunc NewControlMessage(dataLen []int) ControlMessage {\n\tvar l int\n\tfor i := range dataLen {\n\t\tl += ControlMessageSpace(dataLen[i])\n\t}\n\treturn make([]byte, l)\n}\n\n// A Message represents an IO message.\ntype Message struct {\n\t// When writing, the Buffers field must contain at least one\n\t// byte to write.\n\t// When reading, the Buffers field will always contain a byte\n\t// to read.\n\tBuffers [][]byte\n\n\t// OOB contains protocol-specific control or miscellaneous\n\t// ancillary data known as out-of-band data.\n\tOOB []byte\n\n\t// Addr specifies a destination address when writing.\n\t// It can be nil when the underlying protocol of the raw\n\t// connection uses connection-oriented communication.\n\t// After a successful read, it may contain the source address\n\t// on the received packet.\n\tAddr net.Addr\n\n\tN     int // # of bytes read or written from/to Buffers\n\tNN    int // # of bytes read or written from/to OOB\n\tFlags int // protocol-specific information on the received message\n}\n\n// RecvMsg wraps recvmsg system call.\n//\n// The provided flags is a set of platform-dependent flags, such as\n// syscall.MSG_PEEK.\nfunc (c *Conn) RecvMsg(m *Message, flags int) error {\n\treturn c.recvMsg(m, flags)\n}\n\n// SendMsg wraps sendmsg system call.\n//\n// The provided flags is a set of platform-dependent flags, such as\n// syscall.MSG_DONTROUTE.\nfunc (c *Conn) SendMsg(m *Message, flags int) error {\n\treturn c.sendMsg(m, flags)\n}\n\n// RecvMsgs wraps recvmmsg system call.\n//\n// It returns the number of processed messages.\n//\n// The provided flags is a set of platform-dependent flags, such as\n// syscall.MSG_PEEK.\n//\n// Only Linux supports this.\nfunc (c *Conn) RecvMsgs(ms []Message, flags int) (int, error) {\n\treturn c.recvMsgs(ms, flags)\n}\n\n// SendMsgs wraps sendmmsg system call.\n//\n// It returns the number of processed messages.\n//\n// The provided flags is a set of platform-dependent flags, such as\n// syscall.MSG_DONTROUTE.\n//\n// Only Linux supports this.\nfunc (c *Conn) SendMsgs(ms []Message, flags int) (int, error) {\n\treturn c.sendMsgs(ms, flags)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.9\n// +build darwin dragonfly freebsd linux netbsd openbsd solaris\n\npackage socket_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\ntype mockControl struct {\n\tLevel int\n\tType  int\n\tData  []byte\n}\n\nfunc TestControlMessage(t *testing.T) {\n\tfor _, tt := range []struct {\n\t\tcs []mockControl\n\t}{\n\t\t{\n\t\t\t[]mockControl{\n\t\t\t\t{Level: 1, Type: 1},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t[]mockControl{\n\t\t\t\t{Level: 2, Type: 2, Data: []byte{0xfe}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t[]mockControl{\n\t\t\t\t{Level: 3, Type: 3, Data: []byte{0xfe, 0xff, 0xff, 0xfe}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t[]mockControl{\n\t\t\t\t{Level: 4, Type: 4, Data: []byte{0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t[]mockControl{\n\t\t\t\t{Level: 4, Type: 4, Data: []byte{0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe}},\n\t\t\t\t{Level: 2, Type: 2, Data: []byte{0xfe}},\n\t\t\t},\n\t\t},\n\t} {\n\t\tvar w []byte\n\t\tvar tailPadLen int\n\t\tmm := socket.NewControlMessage([]int{0})\n\t\tfor i, c := range tt.cs {\n\t\t\tm := socket.NewControlMessage([]int{len(c.Data)})\n\t\t\tl := len(m) - len(mm)\n\t\t\tif i == len(tt.cs)-1 && l > len(c.Data) {\n\t\t\t\ttailPadLen = l - len(c.Data)\n\t\t\t}\n\t\t\tw = append(w, m...)\n\t\t}\n\n\t\tvar err error\n\t\tww := make([]byte, len(w))\n\t\tcopy(ww, w)\n\t\tm := socket.ControlMessage(ww)\n\t\tfor _, c := range tt.cs {\n\t\t\tif err = m.MarshalHeader(c.Level, c.Type, len(c.Data)); err != nil {\n\t\t\t\tt.Fatalf(\"(%v).MarshalHeader() = %v\", tt.cs, err)\n\t\t\t}\n\t\t\tcopy(m.Data(len(c.Data)), c.Data)\n\t\t\tm = m.Next(len(c.Data))\n\t\t}\n\t\tm = socket.ControlMessage(w)\n\t\tfor _, c := range tt.cs {\n\t\t\tm, err = m.Marshal(c.Level, c.Type, c.Data)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"(%v).Marshal() = %v\", tt.cs, err)\n\t\t\t}\n\t\t}\n\t\tif !bytes.Equal(ww, w) {\n\t\t\tt.Fatalf(\"got %#v; want %#v\", ww, w)\n\t\t}\n\n\t\tws := [][]byte{w}\n\t\tif tailPadLen > 0 {\n\t\t\t// Test a message with no tail padding.\n\t\t\tnopad := w[:len(w)-tailPadLen]\n\t\t\tws = append(ws, [][]byte{nopad}...)\n\t\t}\n\t\tfor _, w := range ws {\n\t\t\tms, err := socket.ControlMessage(w).Parse()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"(%v).Parse() = %v\", tt.cs, err)\n\t\t\t}\n\t\t\tfor i, m := range ms {\n\t\t\t\tlvl, typ, dataLen, err := m.ParseHeader()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"(%v).ParseHeader() = %v\", tt.cs, err)\n\t\t\t\t}\n\t\t\t\tif lvl != tt.cs[i].Level || typ != tt.cs[i].Type || dataLen != len(tt.cs[i].Data) {\n\t\t\t\t\tt.Fatalf(\"%v: got %d, %d, %d; want %d, %d, %d\", tt.cs[i], lvl, typ, dataLen, tt.cs[i].Level, tt.cs[i].Type, len(tt.cs[i].Data))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestUDP(t *testing.T) {\n\tc, err := nettest.NewLocalPacketListener(\"udp\")\n\tif err != nil {\n\t\tt.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t}\n\tdefer c.Close()\n\tcc, err := socket.NewConn(c.(net.Conn))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tt.Run(\"Message\", func(t *testing.T) {\n\t\tdata := []byte(\"HELLO-R-U-THERE\")\n\t\twm := socket.Message{\n\t\t\tBuffers: bytes.SplitAfter(data, []byte(\"-\")),\n\t\t\tAddr:    c.LocalAddr(),\n\t\t}\n\t\tif err := cc.SendMsg(&wm, 0); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tb := make([]byte, 32)\n\t\trm := socket.Message{\n\t\t\tBuffers: [][]byte{b[:1], b[1:3], b[3:7], b[7:11], b[11:]},\n\t\t}\n\t\tif err := cc.RecvMsg(&rm, 0); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !bytes.Equal(b[:rm.N], data) {\n\t\t\tt.Fatalf(\"got %#v; want %#v\", b[:rm.N], data)\n\t\t}\n\t})\n\tswitch runtime.GOOS {\n\tcase \"android\", \"linux\":\n\t\tt.Run(\"Messages\", func(t *testing.T) {\n\t\t\tdata := []byte(\"HELLO-R-U-THERE\")\n\t\t\twmbs := bytes.SplitAfter(data, []byte(\"-\"))\n\t\t\twms := []socket.Message{\n\t\t\t\t{Buffers: wmbs[:1], Addr: c.LocalAddr()},\n\t\t\t\t{Buffers: wmbs[1:], Addr: c.LocalAddr()},\n\t\t\t}\n\t\t\tn, err := cc.SendMsgs(wms, 0)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif n != len(wms) {\n\t\t\t\tt.Fatalf(\"got %d; want %d\", n, len(wms))\n\t\t\t}\n\t\t\tb := make([]byte, 32)\n\t\t\trmbs := [][][]byte{{b[:len(wmbs[0])]}, {b[len(wmbs[0]):]}}\n\t\t\trms := []socket.Message{\n\t\t\t\t{Buffers: rmbs[0]},\n\t\t\t\t{Buffers: rmbs[1]},\n\t\t\t}\n\t\t\tn, err = cc.RecvMsgs(rms, 0)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif n != len(rms) {\n\t\t\t\tt.Fatalf(\"got %d; want %d\", n, len(rms))\n\t\t\t}\n\t\t\tnn := 0\n\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\tnn += rms[i].N\n\t\t\t}\n\t\t\tif !bytes.Equal(b[:nn], data) {\n\t\t\t\tt.Fatalf(\"got %#v; want %#v\", b[:nn], data)\n\t\t\t}\n\t\t})\n\t}\n\n\t// The behavior of transmission for zero byte paylaod depends\n\t// on each platform implementation. Some may transmit only\n\t// protocol header and options, other may transmit nothing.\n\t// We test only that SendMsg and SendMsgs will not crash with\n\t// empty buffers.\n\twm := socket.Message{\n\t\tBuffers: [][]byte{{}},\n\t\tAddr:    c.LocalAddr(),\n\t}\n\tcc.SendMsg(&wm, 0)\n\twms := []socket.Message{\n\t\t{Buffers: [][]byte{{}}, Addr: c.LocalAddr()},\n\t}\n\tcc.SendMsgs(wms, 0)\n}\n\nfunc BenchmarkUDP(b *testing.B) {\n\tc, err := nettest.NewLocalPacketListener(\"udp\")\n\tif err != nil {\n\t\tb.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t}\n\tdefer c.Close()\n\tcc, err := socket.NewConn(c.(net.Conn))\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\tdata := []byte(\"HELLO-R-U-THERE\")\n\twm := socket.Message{\n\t\tBuffers: [][]byte{data},\n\t\tAddr:    c.LocalAddr(),\n\t}\n\trm := socket.Message{\n\t\tBuffers: [][]byte{make([]byte, 128)},\n\t\tOOB:     make([]byte, 128),\n\t}\n\n\tfor M := 1; M <= 1<<9; M = M << 1 {\n\t\tb.Run(fmt.Sprintf(\"Iter-%d\", M), func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tfor j := 0; j < M; j++ {\n\t\t\t\t\tif err := cc.SendMsg(&wm, 0); err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t\tif err := cc.RecvMsg(&rm, 0); err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tswitch runtime.GOOS {\n\t\tcase \"android\", \"linux\":\n\t\t\twms := make([]socket.Message, M)\n\t\t\tfor i := range wms {\n\t\t\t\twms[i].Buffers = [][]byte{data}\n\t\t\t\twms[i].Addr = c.LocalAddr()\n\t\t\t}\n\t\t\trms := make([]socket.Message, M)\n\t\t\tfor i := range rms {\n\t\t\t\trms[i].Buffers = [][]byte{make([]byte, 128)}\n\t\t\t\trms[i].OOB = make([]byte, 128)\n\t\t\t}\n\t\t\tb.Run(fmt.Sprintf(\"Batch-%d\", M), func(b *testing.B) {\n\t\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t\tif _, err := cc.SendMsgs(wms, 0); err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t\tif _, err := cc.RecvMsgs(rms, 0); err != nil {\n\t\t\t\t\t\tb.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/socket_test.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows\n\npackage socket_test\n\nimport (\n\t\"net\"\n\t\"runtime\"\n\t\"syscall\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc TestSocket(t *testing.T) {\n\tt.Run(\"Option\", func(t *testing.T) {\n\t\ttestSocketOption(t, &socket.Option{Level: syscall.SOL_SOCKET, Name: syscall.SO_RCVBUF, Len: 4})\n\t})\n}\n\nfunc testSocketOption(t *testing.T, so *socket.Option) {\n\tc, err := nettest.NewLocalPacketListener(\"udp\")\n\tif err != nil {\n\t\tt.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t}\n\tdefer c.Close()\n\tcc, err := socket.NewConn(c.(net.Conn))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tconst N = 2048\n\tif err := so.SetInt(cc, N); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tn, err := so.GetInt(cc)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif n < N {\n\t\tt.Fatalf(\"got %d; want greater than or equal to %d\", n, N)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nimport (\n\t\"encoding/binary\"\n\t\"unsafe\"\n)\n\nvar (\n\t// NativeEndian is the machine native endian implementation of\n\t// ByteOrder.\n\tNativeEndian binary.ByteOrder\n\n\tkernelAlign int\n)\n\nfunc init() {\n\ti := uint32(1)\n\tb := (*[4]byte)(unsafe.Pointer(&i))\n\tif b[0] == 1 {\n\t\tNativeEndian = binary.LittleEndian\n\t} else {\n\t\tNativeEndian = binary.BigEndian\n\t}\n\tkernelAlign = probeProtocolStack()\n}\n\nfunc roundup(l int) int {\n\treturn (l + kernelAlign - 1) & ^(kernelAlign - 1)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_bsd.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd openbsd\n\npackage socket\n\nimport \"errors\"\n\nfunc recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) {\n\treturn 0, errors.New(\"not implemented\")\n}\n\nfunc sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) {\n\treturn 0, errors.New(\"not implemented\")\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_bsdvar.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build freebsd netbsd openbsd\n\npackage socket\n\nimport \"unsafe\"\n\nfunc probeProtocolStack() int {\n\tvar p uintptr\n\treturn int(unsafe.Sizeof(p))\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_darwin.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nfunc probeProtocolStack() int { return 4 }\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_dragonfly.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nfunc probeProtocolStack() int { return 4 }\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_linux.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build linux,!s390x,!386\n\npackage socket\n\nimport (\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nfunc probeProtocolStack() int {\n\tvar p uintptr\n\treturn int(unsafe.Sizeof(p))\n}\n\nfunc recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) {\n\tn, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0)\n\treturn int(n), errnoErr(errno)\n}\n\nfunc sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) {\n\tn, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0)\n\treturn int(n), errnoErr(errno)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_linux_386.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nimport (\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nfunc probeProtocolStack() int { return 4 }\n\nconst (\n\tsysSETSOCKOPT = 0xe\n\tsysGETSOCKOPT = 0xf\n\tsysSENDMSG    = 0x10\n\tsysRECVMSG    = 0x11\n\tsysRECVMMSG   = 0x13\n\tsysSENDMMSG   = 0x14\n)\n\nfunc socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno)\nfunc rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno)\n\nfunc getsockopt(s uintptr, level, name int, b []byte) (int, error) {\n\tl := uint32(len(b))\n\t_, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0)\n\treturn int(l), errnoErr(errno)\n}\n\nfunc setsockopt(s uintptr, level, name int, b []byte) error {\n\t_, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0)\n\treturn errnoErr(errno)\n}\n\nfunc recvmsg(s uintptr, h *msghdr, flags int) (int, error) {\n\tn, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0)\n\treturn int(n), errnoErr(errno)\n}\n\nfunc sendmsg(s uintptr, h *msghdr, flags int) (int, error) {\n\tn, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0)\n\treturn int(n), errnoErr(errno)\n}\n\nfunc recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) {\n\tn, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0)\n\treturn int(n), errnoErr(errno)\n}\n\nfunc sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) {\n\tn, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0)\n\treturn int(n), errnoErr(errno)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_linux_386.s",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n#include \"textflag.h\"\n\nTEXT\t·socketcall(SB),NOSPLIT,$0-36\n\tJMP\tsyscall·socketcall(SB)\n\nTEXT\t·rawsocketcall(SB),NOSPLIT,$0-36\n\tJMP\tsyscall·rawsocketcall(SB)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nconst (\n\tsysRECVMMSG = 0x12b\n\tsysSENDMMSG = 0x133\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_linux_arm.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nconst (\n\tsysRECVMMSG = 0x16d\n\tsysSENDMMSG = 0x176\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nconst (\n\tsysRECVMMSG = 0xf3\n\tsysSENDMMSG = 0x10d\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_linux_mips.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nconst (\n\tsysRECVMMSG = 0x10ef\n\tsysSENDMMSG = 0x10f7\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nconst (\n\tsysRECVMMSG = 0x14ae\n\tsysSENDMMSG = 0x14b6\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nconst (\n\tsysRECVMMSG = 0x14ae\n\tsysSENDMMSG = 0x14b6\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nconst (\n\tsysRECVMMSG = 0x10ef\n\tsysSENDMMSG = 0x10f7\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nconst (\n\tsysRECVMMSG = 0x157\n\tsysSENDMMSG = 0x15d\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nconst (\n\tsysRECVMMSG = 0x157\n\tsysSENDMMSG = 0x15d\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nimport (\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nfunc probeProtocolStack() int { return 8 }\n\nconst (\n\tsysSETSOCKOPT = 0xe\n\tsysGETSOCKOPT = 0xf\n\tsysSENDMSG    = 0x10\n\tsysRECVMSG    = 0x11\n\tsysRECVMMSG   = 0x13\n\tsysSENDMMSG   = 0x14\n)\n\nfunc socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno)\nfunc rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno)\n\nfunc getsockopt(s uintptr, level, name int, b []byte) (int, error) {\n\tl := uint32(len(b))\n\t_, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0)\n\treturn int(l), errnoErr(errno)\n}\n\nfunc setsockopt(s uintptr, level, name int, b []byte) error {\n\t_, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0)\n\treturn errnoErr(errno)\n}\n\nfunc recvmsg(s uintptr, h *msghdr, flags int) (int, error) {\n\tn, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0)\n\treturn int(n), errnoErr(errno)\n}\n\nfunc sendmsg(s uintptr, h *msghdr, flags int) (int, error) {\n\tn, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0)\n\treturn int(n), errnoErr(errno)\n}\n\nfunc recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) {\n\tn, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0)\n\treturn int(n), errnoErr(errno)\n}\n\nfunc sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) {\n\tn, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0)\n\treturn int(n), errnoErr(errno)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n#include \"textflag.h\"\n\nTEXT\t·socketcall(SB),NOSPLIT,$0-72\n\tJMP\tsyscall·socketcall(SB)\n\nTEXT\t·rawsocketcall(SB),NOSPLIT,$0-72\n\tJMP\tsyscall·rawsocketcall(SB)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_netbsd.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nimport (\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nconst (\n\tsysRECVMMSG = 0x1db\n\tsysSENDMMSG = 0x1dc\n)\n\nfunc recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) {\n\tn, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0)\n\treturn int(n), errnoErr(errno)\n}\n\nfunc sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) {\n\tn, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0)\n\treturn int(n), errnoErr(errno)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_posix.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.9\n// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows\n\npackage socket\n\nimport (\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"net\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n)\n\nfunc marshalInetAddr(a net.Addr) []byte {\n\tswitch a := a.(type) {\n\tcase *net.TCPAddr:\n\t\treturn marshalSockaddr(a.IP, a.Port, a.Zone)\n\tcase *net.UDPAddr:\n\t\treturn marshalSockaddr(a.IP, a.Port, a.Zone)\n\tcase *net.IPAddr:\n\t\treturn marshalSockaddr(a.IP, 0, a.Zone)\n\tdefault:\n\t\treturn nil\n\t}\n}\n\nfunc marshalSockaddr(ip net.IP, port int, zone string) []byte {\n\tif ip4 := ip.To4(); ip4 != nil {\n\t\tb := make([]byte, sizeofSockaddrInet)\n\t\tswitch runtime.GOOS {\n\t\tcase \"android\", \"linux\", \"solaris\", \"windows\":\n\t\t\tNativeEndian.PutUint16(b[:2], uint16(sysAF_INET))\n\t\tdefault:\n\t\t\tb[0] = sizeofSockaddrInet\n\t\t\tb[1] = sysAF_INET\n\t\t}\n\t\tbinary.BigEndian.PutUint16(b[2:4], uint16(port))\n\t\tcopy(b[4:8], ip4)\n\t\treturn b\n\t}\n\tif ip6 := ip.To16(); ip6 != nil && ip.To4() == nil {\n\t\tb := make([]byte, sizeofSockaddrInet6)\n\t\tswitch runtime.GOOS {\n\t\tcase \"android\", \"linux\", \"solaris\", \"windows\":\n\t\t\tNativeEndian.PutUint16(b[:2], uint16(sysAF_INET6))\n\t\tdefault:\n\t\t\tb[0] = sizeofSockaddrInet6\n\t\t\tb[1] = sysAF_INET6\n\t\t}\n\t\tbinary.BigEndian.PutUint16(b[2:4], uint16(port))\n\t\tcopy(b[8:24], ip6)\n\t\tif zone != \"\" {\n\t\t\tNativeEndian.PutUint32(b[24:28], uint32(zoneCache.index(zone)))\n\t\t}\n\t\treturn b\n\t}\n\treturn nil\n}\n\nfunc parseInetAddr(b []byte, network string) (net.Addr, error) {\n\tif len(b) < 2 {\n\t\treturn nil, errors.New(\"invalid address\")\n\t}\n\tvar af int\n\tswitch runtime.GOOS {\n\tcase \"android\", \"linux\", \"solaris\", \"windows\":\n\t\taf = int(NativeEndian.Uint16(b[:2]))\n\tdefault:\n\t\taf = int(b[1])\n\t}\n\tvar ip net.IP\n\tvar zone string\n\tif af == sysAF_INET {\n\t\tif len(b) < sizeofSockaddrInet {\n\t\t\treturn nil, errors.New(\"short address\")\n\t\t}\n\t\tip = make(net.IP, net.IPv4len)\n\t\tcopy(ip, b[4:8])\n\t}\n\tif af == sysAF_INET6 {\n\t\tif len(b) < sizeofSockaddrInet6 {\n\t\t\treturn nil, errors.New(\"short address\")\n\t\t}\n\t\tip = make(net.IP, net.IPv6len)\n\t\tcopy(ip, b[8:24])\n\t\tif id := int(NativeEndian.Uint32(b[24:28])); id > 0 {\n\t\t\tzone = zoneCache.name(id)\n\t\t}\n\t}\n\tswitch network {\n\tcase \"tcp\", \"tcp4\", \"tcp6\":\n\t\treturn &net.TCPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil\n\tcase \"udp\", \"udp4\", \"udp6\":\n\t\treturn &net.UDPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil\n\tdefault:\n\t\treturn &net.IPAddr{IP: ip, Zone: zone}, nil\n\t}\n}\n\n// An ipv6ZoneCache represents a cache holding partial network\n// interface information. It is used for reducing the cost of IPv6\n// addressing scope zone resolution.\n//\n// Multiple names sharing the index are managed by first-come\n// first-served basis for consistency.\ntype ipv6ZoneCache struct {\n\tsync.RWMutex                // guard the following\n\tlastFetched  time.Time      // last time routing information was fetched\n\ttoIndex      map[string]int // interface name to its index\n\ttoName       map[int]string // interface index to its name\n}\n\nvar zoneCache = ipv6ZoneCache{\n\ttoIndex: make(map[string]int),\n\ttoName:  make(map[int]string),\n}\n\nfunc (zc *ipv6ZoneCache) update(ift []net.Interface) {\n\tzc.Lock()\n\tdefer zc.Unlock()\n\tnow := time.Now()\n\tif zc.lastFetched.After(now.Add(-60 * time.Second)) {\n\t\treturn\n\t}\n\tzc.lastFetched = now\n\tif len(ift) == 0 {\n\t\tvar err error\n\t\tif ift, err = net.Interfaces(); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tzc.toIndex = make(map[string]int, len(ift))\n\tzc.toName = make(map[int]string, len(ift))\n\tfor _, ifi := range ift {\n\t\tzc.toIndex[ifi.Name] = ifi.Index\n\t\tif _, ok := zc.toName[ifi.Index]; !ok {\n\t\t\tzc.toName[ifi.Index] = ifi.Name\n\t\t}\n\t}\n}\n\nfunc (zc *ipv6ZoneCache) name(zone int) string {\n\tzoneCache.update(nil)\n\tzoneCache.RLock()\n\tdefer zoneCache.RUnlock()\n\tname, ok := zoneCache.toName[zone]\n\tif !ok {\n\t\tname = strconv.Itoa(zone)\n\t}\n\treturn name\n}\n\nfunc (zc *ipv6ZoneCache) index(zone string) int {\n\tzoneCache.update(nil)\n\tzoneCache.RLock()\n\tdefer zoneCache.RUnlock()\n\tindex, ok := zoneCache.toIndex[zone]\n\tif !ok {\n\t\tindex, _ = strconv.Atoi(zone)\n\t}\n\treturn index\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_solaris.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nimport (\n\t\"errors\"\n\t\"runtime\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nfunc probeProtocolStack() int {\n\tswitch runtime.GOARCH {\n\tcase \"amd64\":\n\t\treturn 4\n\tdefault:\n\t\tvar p uintptr\n\t\treturn int(unsafe.Sizeof(p))\n\t}\n}\n\n//go:cgo_import_dynamic libc___xnet_getsockopt __xnet_getsockopt \"libsocket.so\"\n//go:cgo_import_dynamic libc_setsockopt setsockopt \"libsocket.so\"\n//go:cgo_import_dynamic libc___xnet_recvmsg __xnet_recvmsg \"libsocket.so\"\n//go:cgo_import_dynamic libc___xnet_sendmsg __xnet_sendmsg \"libsocket.so\"\n\n//go:linkname procGetsockopt libc___xnet_getsockopt\n//go:linkname procSetsockopt libc_setsockopt\n//go:linkname procRecvmsg libc___xnet_recvmsg\n//go:linkname procSendmsg libc___xnet_sendmsg\n\nvar (\n\tprocGetsockopt uintptr\n\tprocSetsockopt uintptr\n\tprocRecvmsg    uintptr\n\tprocSendmsg    uintptr\n)\n\nfunc sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno)\nfunc rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno)\n\nfunc getsockopt(s uintptr, level, name int, b []byte) (int, error) {\n\tl := uint32(len(b))\n\t_, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procGetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0)\n\treturn int(l), errnoErr(errno)\n}\n\nfunc setsockopt(s uintptr, level, name int, b []byte) error {\n\t_, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0)\n\treturn errnoErr(errno)\n}\n\nfunc recvmsg(s uintptr, h *msghdr, flags int) (int, error) {\n\tn, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procRecvmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0)\n\treturn int(n), errnoErr(errno)\n}\n\nfunc sendmsg(s uintptr, h *msghdr, flags int) (int, error) {\n\tn, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSendmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0)\n\treturn int(n), errnoErr(errno)\n}\n\nfunc recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) {\n\treturn 0, errors.New(\"not implemented\")\n}\n\nfunc sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) {\n\treturn 0, errors.New(\"not implemented\")\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n#include \"textflag.h\"\n\nTEXT\t·sysvicall6(SB),NOSPLIT,$0-88\n\tJMP\tsyscall·sysvicall6(SB)\n\nTEXT\t·rawSysvicall6(SB),NOSPLIT,$0-88\n\tJMP\tsyscall·rawSysvicall6(SB)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_stub.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows\n\npackage socket\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"runtime\"\n\t\"unsafe\"\n)\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0xa\n\n\tsysSOCK_RAW = 0x3\n)\n\nfunc probeProtocolStack() int {\n\tswitch runtime.GOARCH {\n\tcase \"amd64p32\", \"mips64p32\":\n\t\treturn 4\n\tdefault:\n\t\tvar p uintptr\n\t\treturn int(unsafe.Sizeof(p))\n\t}\n}\n\nfunc marshalInetAddr(ip net.IP, port int, zone string) []byte {\n\treturn nil\n}\n\nfunc parseInetAddr(b []byte, network string) (net.Addr, error) {\n\treturn nil, errors.New(\"not implemented\")\n}\n\nfunc getsockopt(s uintptr, level, name int, b []byte) (int, error) {\n\treturn 0, errors.New(\"not implemented\")\n}\n\nfunc setsockopt(s uintptr, level, name int, b []byte) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc recvmsg(s uintptr, h *msghdr, flags int) (int, error) {\n\treturn 0, errors.New(\"not implemented\")\n}\n\nfunc sendmsg(s uintptr, h *msghdr, flags int) (int, error) {\n\treturn 0, errors.New(\"not implemented\")\n}\n\nfunc recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) {\n\treturn 0, errors.New(\"not implemented\")\n}\n\nfunc sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) {\n\treturn 0, errors.New(\"not implemented\")\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_unix.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd linux,!s390x,!386 netbsd openbsd\n\npackage socket\n\nimport (\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nfunc getsockopt(s uintptr, level, name int, b []byte) (int, error) {\n\tl := uint32(len(b))\n\t_, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0)\n\treturn int(l), errnoErr(errno)\n}\n\nfunc setsockopt(s uintptr, level, name int, b []byte) error {\n\t_, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0)\n\treturn errnoErr(errno)\n}\n\nfunc recvmsg(s uintptr, h *msghdr, flags int) (int, error) {\n\tn, _, errno := syscall.Syscall(syscall.SYS_RECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags))\n\treturn int(n), errnoErr(errno)\n}\n\nfunc sendmsg(s uintptr, h *msghdr, flags int) (int, error) {\n\tn, _, errno := syscall.Syscall(syscall.SYS_SENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags))\n\treturn int(n), errnoErr(errno)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/sys_windows.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage socket\n\nimport (\n\t\"errors\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nfunc probeProtocolStack() int {\n\tvar p uintptr\n\treturn int(unsafe.Sizeof(p))\n}\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0x17\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]uint8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n\nfunc getsockopt(s uintptr, level, name int, b []byte) (int, error) {\n\tl := uint32(len(b))\n\terr := syscall.Getsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), (*int32)(unsafe.Pointer(&l)))\n\treturn int(l), err\n}\n\nfunc setsockopt(s uintptr, level, name int, b []byte) error {\n\treturn syscall.Setsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), int32(len(b)))\n}\n\nfunc recvmsg(s uintptr, h *msghdr, flags int) (int, error) {\n\treturn 0, errors.New(\"not implemented\")\n}\n\nfunc sendmsg(s uintptr, h *msghdr, flags int) (int, error) {\n\treturn 0, errors.New(\"not implemented\")\n}\n\nfunc recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) {\n\treturn 0, errors.New(\"not implemented\")\n}\n\nfunc sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) {\n\treturn 0, errors.New(\"not implemented\")\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_darwin.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0x1e\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint32\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tIov        *iovec\n\tIovlen     int32\n\tControl    *byte\n\tControllen uint32\n\tFlags      int32\n}\n\ntype cmsghdr struct {\n\tLen   uint32\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tLen    uint8\n\tFamily uint8\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]int8\n}\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x8\n\tsizeofMsghdr  = 0x1c\n\tsizeofCmsghdr = 0xc\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_darwin.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0x1e\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint64\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tPad_cgo_0  [4]byte\n\tIov        *iovec\n\tIovlen     int32\n\tPad_cgo_1  [4]byte\n\tControl    *byte\n\tControllen uint32\n\tFlags      int32\n}\n\ntype cmsghdr struct {\n\tLen   uint32\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tLen    uint8\n\tFamily uint8\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]int8\n}\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x10\n\tsizeofMsghdr  = 0x30\n\tsizeofCmsghdr = 0xc\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_darwin.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0x1e\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint32\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tIov        *iovec\n\tIovlen     int32\n\tControl    *byte\n\tControllen uint32\n\tFlags      int32\n}\n\ntype cmsghdr struct {\n\tLen   uint32\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tLen    uint8\n\tFamily uint8\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]int8\n}\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x8\n\tsizeofMsghdr  = 0x1c\n\tsizeofCmsghdr = 0xc\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_dragonfly.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0x1c\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint64\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tPad_cgo_0  [4]byte\n\tIov        *iovec\n\tIovlen     int32\n\tPad_cgo_1  [4]byte\n\tControl    *byte\n\tControllen uint32\n\tFlags      int32\n}\n\ntype cmsghdr struct {\n\tLen   uint32\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tLen    uint8\n\tFamily uint8\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]int8\n}\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x10\n\tsizeofMsghdr  = 0x30\n\tsizeofCmsghdr = 0xc\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_freebsd.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0x1c\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint32\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tIov        *iovec\n\tIovlen     int32\n\tControl    *byte\n\tControllen uint32\n\tFlags      int32\n}\n\ntype cmsghdr struct {\n\tLen   uint32\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tLen    uint8\n\tFamily uint8\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]int8\n}\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x8\n\tsizeofMsghdr  = 0x1c\n\tsizeofCmsghdr = 0xc\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_freebsd.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0x1c\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint64\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tPad_cgo_0  [4]byte\n\tIov        *iovec\n\tIovlen     int32\n\tPad_cgo_1  [4]byte\n\tControl    *byte\n\tControllen uint32\n\tFlags      int32\n}\n\ntype cmsghdr struct {\n\tLen   uint32\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tLen    uint8\n\tFamily uint8\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]int8\n}\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x10\n\tsizeofMsghdr  = 0x30\n\tsizeofCmsghdr = 0xc\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_freebsd.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0x1c\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint32\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tIov        *iovec\n\tIovlen     int32\n\tControl    *byte\n\tControllen uint32\n\tFlags      int32\n}\n\ntype cmsghdr struct {\n\tLen   uint32\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tLen    uint8\n\tFamily uint8\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]int8\n}\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x8\n\tsizeofMsghdr  = 0x1c\n\tsizeofCmsghdr = 0xc\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_linux_386.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0xa\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint32\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tIov        *iovec\n\tIovlen     uint32\n\tControl    *byte\n\tControllen uint32\n\tFlags      int32\n}\n\ntype mmsghdr struct {\n\tHdr msghdr\n\tLen uint32\n}\n\ntype cmsghdr struct {\n\tLen   uint32\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x8\n\tsizeofMsghdr  = 0x1c\n\tsizeofMmsghdr = 0x20\n\tsizeofCmsghdr = 0xc\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0xa\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint64\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tPad_cgo_0  [4]byte\n\tIov        *iovec\n\tIovlen     uint64\n\tControl    *byte\n\tControllen uint64\n\tFlags      int32\n\tPad_cgo_1  [4]byte\n}\n\ntype mmsghdr struct {\n\tHdr       msghdr\n\tLen       uint32\n\tPad_cgo_0 [4]byte\n}\n\ntype cmsghdr struct {\n\tLen   uint64\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x10\n\tsizeofMsghdr  = 0x38\n\tsizeofMmsghdr = 0x40\n\tsizeofCmsghdr = 0x10\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0xa\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint32\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tIov        *iovec\n\tIovlen     uint32\n\tControl    *byte\n\tControllen uint32\n\tFlags      int32\n}\n\ntype mmsghdr struct {\n\tHdr msghdr\n\tLen uint32\n}\n\ntype cmsghdr struct {\n\tLen   uint32\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x8\n\tsizeofMsghdr  = 0x1c\n\tsizeofMmsghdr = 0x20\n\tsizeofCmsghdr = 0xc\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0xa\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint64\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tPad_cgo_0  [4]byte\n\tIov        *iovec\n\tIovlen     uint64\n\tControl    *byte\n\tControllen uint64\n\tFlags      int32\n\tPad_cgo_1  [4]byte\n}\n\ntype mmsghdr struct {\n\tHdr       msghdr\n\tLen       uint32\n\tPad_cgo_0 [4]byte\n}\n\ntype cmsghdr struct {\n\tLen   uint64\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x10\n\tsizeofMsghdr  = 0x38\n\tsizeofMmsghdr = 0x40\n\tsizeofCmsghdr = 0x10\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0xa\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint32\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tIov        *iovec\n\tIovlen     uint32\n\tControl    *byte\n\tControllen uint32\n\tFlags      int32\n}\n\ntype mmsghdr struct {\n\tHdr msghdr\n\tLen uint32\n}\n\ntype cmsghdr struct {\n\tLen   uint32\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x8\n\tsizeofMsghdr  = 0x1c\n\tsizeofMmsghdr = 0x20\n\tsizeofCmsghdr = 0xc\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0xa\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint64\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tPad_cgo_0  [4]byte\n\tIov        *iovec\n\tIovlen     uint64\n\tControl    *byte\n\tControllen uint64\n\tFlags      int32\n\tPad_cgo_1  [4]byte\n}\n\ntype mmsghdr struct {\n\tHdr       msghdr\n\tLen       uint32\n\tPad_cgo_0 [4]byte\n}\n\ntype cmsghdr struct {\n\tLen   uint64\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x10\n\tsizeofMsghdr  = 0x38\n\tsizeofMmsghdr = 0x40\n\tsizeofCmsghdr = 0x10\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0xa\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint64\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tPad_cgo_0  [4]byte\n\tIov        *iovec\n\tIovlen     uint64\n\tControl    *byte\n\tControllen uint64\n\tFlags      int32\n\tPad_cgo_1  [4]byte\n}\n\ntype mmsghdr struct {\n\tHdr       msghdr\n\tLen       uint32\n\tPad_cgo_0 [4]byte\n}\n\ntype cmsghdr struct {\n\tLen   uint64\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x10\n\tsizeofMsghdr  = 0x38\n\tsizeofMmsghdr = 0x40\n\tsizeofCmsghdr = 0x10\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0xa\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint32\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tIov        *iovec\n\tIovlen     uint32\n\tControl    *byte\n\tControllen uint32\n\tFlags      int32\n}\n\ntype mmsghdr struct {\n\tHdr msghdr\n\tLen uint32\n}\n\ntype cmsghdr struct {\n\tLen   uint32\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x8\n\tsizeofMsghdr  = 0x1c\n\tsizeofMmsghdr = 0x20\n\tsizeofCmsghdr = 0xc\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0xa\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint64\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tPad_cgo_0  [4]byte\n\tIov        *iovec\n\tIovlen     uint64\n\tControl    *byte\n\tControllen uint64\n\tFlags      int32\n\tPad_cgo_1  [4]byte\n}\n\ntype mmsghdr struct {\n\tHdr       msghdr\n\tLen       uint32\n\tPad_cgo_0 [4]byte\n}\n\ntype cmsghdr struct {\n\tLen   uint64\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x10\n\tsizeofMsghdr  = 0x38\n\tsizeofMmsghdr = 0x40\n\tsizeofCmsghdr = 0x10\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0xa\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint64\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tPad_cgo_0  [4]byte\n\tIov        *iovec\n\tIovlen     uint64\n\tControl    *byte\n\tControllen uint64\n\tFlags      int32\n\tPad_cgo_1  [4]byte\n}\n\ntype mmsghdr struct {\n\tHdr       msghdr\n\tLen       uint32\n\tPad_cgo_0 [4]byte\n}\n\ntype cmsghdr struct {\n\tLen   uint64\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x10\n\tsizeofMsghdr  = 0x38\n\tsizeofMmsghdr = 0x40\n\tsizeofCmsghdr = 0x10\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0xa\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint64\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tPad_cgo_0  [4]byte\n\tIov        *iovec\n\tIovlen     uint64\n\tControl    *byte\n\tControllen uint64\n\tFlags      int32\n\tPad_cgo_1  [4]byte\n}\n\ntype mmsghdr struct {\n\tHdr       msghdr\n\tLen       uint32\n\tPad_cgo_0 [4]byte\n}\n\ntype cmsghdr struct {\n\tLen   uint64\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x10\n\tsizeofMsghdr  = 0x38\n\tsizeofMmsghdr = 0x40\n\tsizeofCmsghdr = 0x10\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_netbsd.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0x18\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint32\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tIov        *iovec\n\tIovlen     int32\n\tControl    *byte\n\tControllen uint32\n\tFlags      int32\n}\n\ntype mmsghdr struct {\n\tHdr msghdr\n\tLen uint32\n}\n\ntype cmsghdr struct {\n\tLen   uint32\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tLen    uint8\n\tFamily uint8\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]int8\n}\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x8\n\tsizeofMsghdr  = 0x1c\n\tsizeofMmsghdr = 0x20\n\tsizeofCmsghdr = 0xc\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_netbsd.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0x18\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint64\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tPad_cgo_0  [4]byte\n\tIov        *iovec\n\tIovlen     int32\n\tPad_cgo_1  [4]byte\n\tControl    *byte\n\tControllen uint32\n\tFlags      int32\n}\n\ntype mmsghdr struct {\n\tHdr       msghdr\n\tLen       uint32\n\tPad_cgo_0 [4]byte\n}\n\ntype cmsghdr struct {\n\tLen   uint32\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tLen    uint8\n\tFamily uint8\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]int8\n}\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x10\n\tsizeofMsghdr  = 0x30\n\tsizeofMmsghdr = 0x40\n\tsizeofCmsghdr = 0xc\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_netbsd.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0x18\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint32\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tIov        *iovec\n\tIovlen     int32\n\tControl    *byte\n\tControllen uint32\n\tFlags      int32\n}\n\ntype cmsghdr struct {\n\tLen   uint32\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tLen    uint8\n\tFamily uint8\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]int8\n}\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x8\n\tsizeofMsghdr  = 0x1c\n\tsizeofCmsghdr = 0xc\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_openbsd.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0x18\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint32\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tIov        *iovec\n\tIovlen     uint32\n\tControl    *byte\n\tControllen uint32\n\tFlags      int32\n}\n\ntype cmsghdr struct {\n\tLen   uint32\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tLen    uint8\n\tFamily uint8\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]int8\n}\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x8\n\tsizeofMsghdr  = 0x1c\n\tsizeofCmsghdr = 0xc\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_openbsd.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0x18\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint64\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tPad_cgo_0  [4]byte\n\tIov        *iovec\n\tIovlen     uint32\n\tPad_cgo_1  [4]byte\n\tControl    *byte\n\tControllen uint32\n\tFlags      int32\n}\n\ntype cmsghdr struct {\n\tLen   uint32\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tLen    uint8\n\tFamily uint8\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]int8\n}\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x10\n\tsizeofMsghdr  = 0x30\n\tsizeofCmsghdr = 0xc\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_openbsd.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0x18\n\n\tsysSOCK_RAW = 0x3\n)\n\ntype iovec struct {\n\tBase *byte\n\tLen  uint32\n}\n\ntype msghdr struct {\n\tName       *byte\n\tNamelen    uint32\n\tIov        *iovec\n\tIovlen     uint32\n\tControl    *byte\n\tControllen uint32\n\tFlags      int32\n}\n\ntype cmsghdr struct {\n\tLen   uint32\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tLen    uint8\n\tFamily uint8\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]int8\n}\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x8\n\tsizeofMsghdr  = 0x1c\n\tsizeofCmsghdr = 0xc\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_solaris.go\n\npackage socket\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0x1a\n\n\tsysSOCK_RAW = 0x4\n)\n\ntype iovec struct {\n\tBase *int8\n\tLen  uint64\n}\n\ntype msghdr struct {\n\tName         *byte\n\tNamelen      uint32\n\tPad_cgo_0    [4]byte\n\tIov          *iovec\n\tIovlen       int32\n\tPad_cgo_1    [4]byte\n\tAccrights    *int8\n\tAccrightslen int32\n\tPad_cgo_2    [4]byte\n}\n\ntype cmsghdr struct {\n\tLen   uint32\n\tLevel int32\n\tType  int32\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]int8\n}\n\ntype sockaddrInet6 struct {\n\tFamily         uint16\n\tPort           uint16\n\tFlowinfo       uint32\n\tAddr           [16]byte /* in6_addr */\n\tScope_id       uint32\n\tX__sin6_src_id uint32\n}\n\nconst (\n\tsizeofIovec   = 0x10\n\tsizeofMsghdr  = 0x30\n\tsizeofCmsghdr = 0xc\n\n\tsizeofSockaddrInet  = 0x10\n\tsizeofSockaddrInet6 = 0x20\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/timeseries/timeseries.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package timeseries implements a time series structure for stats collection.\npackage timeseries // import \"golang.org/x/net/internal/timeseries\"\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n)\n\nconst (\n\ttimeSeriesNumBuckets       = 64\n\tminuteHourSeriesNumBuckets = 60\n)\n\nvar timeSeriesResolutions = []time.Duration{\n\t1 * time.Second,\n\t10 * time.Second,\n\t1 * time.Minute,\n\t10 * time.Minute,\n\t1 * time.Hour,\n\t6 * time.Hour,\n\t24 * time.Hour,          // 1 day\n\t7 * 24 * time.Hour,      // 1 week\n\t4 * 7 * 24 * time.Hour,  // 4 weeks\n\t16 * 7 * 24 * time.Hour, // 16 weeks\n}\n\nvar minuteHourSeriesResolutions = []time.Duration{\n\t1 * time.Second,\n\t1 * time.Minute,\n}\n\n// An Observable is a kind of data that can be aggregated in a time series.\ntype Observable interface {\n\tMultiply(ratio float64)    // Multiplies the data in self by a given ratio\n\tAdd(other Observable)      // Adds the data from a different observation to self\n\tClear()                    // Clears the observation so it can be reused.\n\tCopyFrom(other Observable) // Copies the contents of a given observation to self\n}\n\n// Float attaches the methods of Observable to a float64.\ntype Float float64\n\n// NewFloat returns a Float.\nfunc NewFloat() Observable {\n\tf := Float(0)\n\treturn &f\n}\n\n// String returns the float as a string.\nfunc (f *Float) String() string { return fmt.Sprintf(\"%g\", f.Value()) }\n\n// Value returns the float's value.\nfunc (f *Float) Value() float64 { return float64(*f) }\n\nfunc (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }\n\nfunc (f *Float) Add(other Observable) {\n\to := other.(*Float)\n\t*f += *o\n}\n\nfunc (f *Float) Clear() { *f = 0 }\n\nfunc (f *Float) CopyFrom(other Observable) {\n\to := other.(*Float)\n\t*f = *o\n}\n\n// A Clock tells the current time.\ntype Clock interface {\n\tTime() time.Time\n}\n\ntype defaultClock int\n\nvar defaultClockInstance defaultClock\n\nfunc (defaultClock) Time() time.Time { return time.Now() }\n\n// Information kept per level. Each level consists of a circular list of\n// observations. The start of the level may be derived from end and the\n// len(buckets) * sizeInMillis.\ntype tsLevel struct {\n\toldest   int               // index to oldest bucketed Observable\n\tnewest   int               // index to newest bucketed Observable\n\tend      time.Time         // end timestamp for this level\n\tsize     time.Duration     // duration of the bucketed Observable\n\tbuckets  []Observable      // collections of observations\n\tprovider func() Observable // used for creating new Observable\n}\n\nfunc (l *tsLevel) Clear() {\n\tl.oldest = 0\n\tl.newest = len(l.buckets) - 1\n\tl.end = time.Time{}\n\tfor i := range l.buckets {\n\t\tif l.buckets[i] != nil {\n\t\t\tl.buckets[i].Clear()\n\t\t\tl.buckets[i] = nil\n\t\t}\n\t}\n}\n\nfunc (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {\n\tl.size = size\n\tl.provider = f\n\tl.buckets = make([]Observable, numBuckets)\n}\n\n// Keeps a sequence of levels. Each level is responsible for storing data at\n// a given resolution. For example, the first level stores data at a one\n// minute resolution while the second level stores data at a one hour\n// resolution.\n\n// Each level is represented by a sequence of buckets. Each bucket spans an\n// interval equal to the resolution of the level. New observations are added\n// to the last bucket.\ntype timeSeries struct {\n\tprovider    func() Observable // make more Observable\n\tnumBuckets  int               // number of buckets in each level\n\tlevels      []*tsLevel        // levels of bucketed Observable\n\tlastAdd     time.Time         // time of last Observable tracked\n\ttotal       Observable        // convenient aggregation of all Observable\n\tclock       Clock             // Clock for getting current time\n\tpending     Observable        // observations not yet bucketed\n\tpendingTime time.Time         // what time are we keeping in pending\n\tdirty       bool              // if there are pending observations\n}\n\n// init initializes a level according to the supplied criteria.\nfunc (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {\n\tts.provider = f\n\tts.numBuckets = numBuckets\n\tts.clock = clock\n\tts.levels = make([]*tsLevel, len(resolutions))\n\n\tfor i := range resolutions {\n\t\tif i > 0 && resolutions[i-1] >= resolutions[i] {\n\t\t\tlog.Print(\"timeseries: resolutions must be monotonically increasing\")\n\t\t\tbreak\n\t\t}\n\t\tnewLevel := new(tsLevel)\n\t\tnewLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)\n\t\tts.levels[i] = newLevel\n\t}\n\n\tts.Clear()\n}\n\n// Clear removes all observations from the time series.\nfunc (ts *timeSeries) Clear() {\n\tts.lastAdd = time.Time{}\n\tts.total = ts.resetObservation(ts.total)\n\tts.pending = ts.resetObservation(ts.pending)\n\tts.pendingTime = time.Time{}\n\tts.dirty = false\n\n\tfor i := range ts.levels {\n\t\tts.levels[i].Clear()\n\t}\n}\n\n// Add records an observation at the current time.\nfunc (ts *timeSeries) Add(observation Observable) {\n\tts.AddWithTime(observation, ts.clock.Time())\n}\n\n// AddWithTime records an observation at the specified time.\nfunc (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {\n\n\tsmallBucketDuration := ts.levels[0].size\n\n\tif t.After(ts.lastAdd) {\n\t\tts.lastAdd = t\n\t}\n\n\tif t.After(ts.pendingTime) {\n\t\tts.advance(t)\n\t\tts.mergePendingUpdates()\n\t\tts.pendingTime = ts.levels[0].end\n\t\tts.pending.CopyFrom(observation)\n\t\tts.dirty = true\n\t} else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {\n\t\t// The observation is close enough to go into the pending bucket.\n\t\t// This compensates for clock skewing and small scheduling delays\n\t\t// by letting the update stay in the fast path.\n\t\tts.pending.Add(observation)\n\t\tts.dirty = true\n\t} else {\n\t\tts.mergeValue(observation, t)\n\t}\n}\n\n// mergeValue inserts the observation at the specified time in the past into all levels.\nfunc (ts *timeSeries) mergeValue(observation Observable, t time.Time) {\n\tfor _, level := range ts.levels {\n\t\tindex := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)\n\t\tif 0 <= index && index < ts.numBuckets {\n\t\t\tbucketNumber := (level.oldest + index) % ts.numBuckets\n\t\t\tif level.buckets[bucketNumber] == nil {\n\t\t\t\tlevel.buckets[bucketNumber] = level.provider()\n\t\t\t}\n\t\t\tlevel.buckets[bucketNumber].Add(observation)\n\t\t}\n\t}\n\tts.total.Add(observation)\n}\n\n// mergePendingUpdates applies the pending updates into all levels.\nfunc (ts *timeSeries) mergePendingUpdates() {\n\tif ts.dirty {\n\t\tts.mergeValue(ts.pending, ts.pendingTime)\n\t\tts.pending = ts.resetObservation(ts.pending)\n\t\tts.dirty = false\n\t}\n}\n\n// advance cycles the buckets at each level until the latest bucket in\n// each level can hold the time specified.\nfunc (ts *timeSeries) advance(t time.Time) {\n\tif !t.After(ts.levels[0].end) {\n\t\treturn\n\t}\n\tfor i := 0; i < len(ts.levels); i++ {\n\t\tlevel := ts.levels[i]\n\t\tif !level.end.Before(t) {\n\t\t\tbreak\n\t\t}\n\n\t\t// If the time is sufficiently far, just clear the level and advance\n\t\t// directly.\n\t\tif !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {\n\t\t\tfor _, b := range level.buckets {\n\t\t\t\tts.resetObservation(b)\n\t\t\t}\n\t\t\tlevel.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())\n\t\t}\n\n\t\tfor t.After(level.end) {\n\t\t\tlevel.end = level.end.Add(level.size)\n\t\t\tlevel.newest = level.oldest\n\t\t\tlevel.oldest = (level.oldest + 1) % ts.numBuckets\n\t\t\tts.resetObservation(level.buckets[level.newest])\n\t\t}\n\n\t\tt = level.end\n\t}\n}\n\n// Latest returns the sum of the num latest buckets from the level.\nfunc (ts *timeSeries) Latest(level, num int) Observable {\n\tnow := ts.clock.Time()\n\tif ts.levels[0].end.Before(now) {\n\t\tts.advance(now)\n\t}\n\n\tts.mergePendingUpdates()\n\n\tresult := ts.provider()\n\tl := ts.levels[level]\n\tindex := l.newest\n\n\tfor i := 0; i < num; i++ {\n\t\tif l.buckets[index] != nil {\n\t\t\tresult.Add(l.buckets[index])\n\t\t}\n\t\tif index == 0 {\n\t\t\tindex = ts.numBuckets\n\t\t}\n\t\tindex--\n\t}\n\n\treturn result\n}\n\n// LatestBuckets returns a copy of the num latest buckets from level.\nfunc (ts *timeSeries) LatestBuckets(level, num int) []Observable {\n\tif level < 0 || level > len(ts.levels) {\n\t\tlog.Print(\"timeseries: bad level argument: \", level)\n\t\treturn nil\n\t}\n\tif num < 0 || num >= ts.numBuckets {\n\t\tlog.Print(\"timeseries: bad num argument: \", num)\n\t\treturn nil\n\t}\n\n\tresults := make([]Observable, num)\n\tnow := ts.clock.Time()\n\tif ts.levels[0].end.Before(now) {\n\t\tts.advance(now)\n\t}\n\n\tts.mergePendingUpdates()\n\n\tl := ts.levels[level]\n\tindex := l.newest\n\n\tfor i := 0; i < num; i++ {\n\t\tresult := ts.provider()\n\t\tresults[i] = result\n\t\tif l.buckets[index] != nil {\n\t\t\tresult.CopyFrom(l.buckets[index])\n\t\t}\n\n\t\tif index == 0 {\n\t\t\tindex = ts.numBuckets\n\t\t}\n\t\tindex -= 1\n\t}\n\treturn results\n}\n\n// ScaleBy updates observations by scaling by factor.\nfunc (ts *timeSeries) ScaleBy(factor float64) {\n\tfor _, l := range ts.levels {\n\t\tfor i := 0; i < ts.numBuckets; i++ {\n\t\t\tl.buckets[i].Multiply(factor)\n\t\t}\n\t}\n\n\tts.total.Multiply(factor)\n\tts.pending.Multiply(factor)\n}\n\n// Range returns the sum of observations added over the specified time range.\n// If start or finish times don't fall on bucket boundaries of the same\n// level, then return values are approximate answers.\nfunc (ts *timeSeries) Range(start, finish time.Time) Observable {\n\treturn ts.ComputeRange(start, finish, 1)[0]\n}\n\n// Recent returns the sum of observations from the last delta.\nfunc (ts *timeSeries) Recent(delta time.Duration) Observable {\n\tnow := ts.clock.Time()\n\treturn ts.Range(now.Add(-delta), now)\n}\n\n// Total returns the total of all observations.\nfunc (ts *timeSeries) Total() Observable {\n\tts.mergePendingUpdates()\n\treturn ts.total\n}\n\n// ComputeRange computes a specified number of values into a slice using\n// the observations recorded over the specified time period. The return\n// values are approximate if the start or finish times don't fall on the\n// bucket boundaries at the same level or if the number of buckets spanning\n// the range is not an integral multiple of num.\nfunc (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {\n\tif start.After(finish) {\n\t\tlog.Printf(\"timeseries: start > finish, %v>%v\", start, finish)\n\t\treturn nil\n\t}\n\n\tif num < 0 {\n\t\tlog.Printf(\"timeseries: num < 0, %v\", num)\n\t\treturn nil\n\t}\n\n\tresults := make([]Observable, num)\n\n\tfor _, l := range ts.levels {\n\t\tif !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {\n\t\t\tts.extract(l, start, finish, num, results)\n\t\t\treturn results\n\t\t}\n\t}\n\n\t// Failed to find a level that covers the desired range. So just\n\t// extract from the last level, even if it doesn't cover the entire\n\t// desired range.\n\tts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)\n\n\treturn results\n}\n\n// RecentList returns the specified number of values in slice over the most\n// recent time period of the specified range.\nfunc (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {\n\tif delta < 0 {\n\t\treturn nil\n\t}\n\tnow := ts.clock.Time()\n\treturn ts.ComputeRange(now.Add(-delta), now, num)\n}\n\n// extract returns a slice of specified number of observations from a given\n// level over a given range.\nfunc (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {\n\tts.mergePendingUpdates()\n\n\tsrcInterval := l.size\n\tdstInterval := finish.Sub(start) / time.Duration(num)\n\tdstStart := start\n\tsrcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))\n\n\tsrcIndex := 0\n\n\t// Where should scanning start?\n\tif dstStart.After(srcStart) {\n\t\tadvance := dstStart.Sub(srcStart) / srcInterval\n\t\tsrcIndex += int(advance)\n\t\tsrcStart = srcStart.Add(advance * srcInterval)\n\t}\n\n\t// The i'th value is computed as show below.\n\t// interval = (finish/start)/num\n\t// i'th value = sum of observation in range\n\t//   [ start + i       * interval,\n\t//     start + (i + 1) * interval )\n\tfor i := 0; i < num; i++ {\n\t\tresults[i] = ts.resetObservation(results[i])\n\t\tdstEnd := dstStart.Add(dstInterval)\n\t\tfor srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {\n\t\t\tsrcEnd := srcStart.Add(srcInterval)\n\t\t\tif srcEnd.After(ts.lastAdd) {\n\t\t\t\tsrcEnd = ts.lastAdd\n\t\t\t}\n\n\t\t\tif !srcEnd.Before(dstStart) {\n\t\t\t\tsrcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]\n\t\t\t\tif !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {\n\t\t\t\t\t// dst completely contains src.\n\t\t\t\t\tif srcValue != nil {\n\t\t\t\t\t\tresults[i].Add(srcValue)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// dst partially overlaps src.\n\t\t\t\t\toverlapStart := maxTime(srcStart, dstStart)\n\t\t\t\t\toverlapEnd := minTime(srcEnd, dstEnd)\n\t\t\t\t\tbase := srcEnd.Sub(srcStart)\n\t\t\t\t\tfraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()\n\n\t\t\t\t\tused := ts.provider()\n\t\t\t\t\tif srcValue != nil {\n\t\t\t\t\t\tused.CopyFrom(srcValue)\n\t\t\t\t\t}\n\t\t\t\t\tused.Multiply(fraction)\n\t\t\t\t\tresults[i].Add(used)\n\t\t\t\t}\n\n\t\t\t\tif srcEnd.After(dstEnd) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tsrcIndex++\n\t\t\tsrcStart = srcStart.Add(srcInterval)\n\t\t}\n\t\tdstStart = dstStart.Add(dstInterval)\n\t}\n}\n\n// resetObservation clears the content so the struct may be reused.\nfunc (ts *timeSeries) resetObservation(observation Observable) Observable {\n\tif observation == nil {\n\t\tobservation = ts.provider()\n\t} else {\n\t\tobservation.Clear()\n\t}\n\treturn observation\n}\n\n// TimeSeries tracks data at granularities from 1 second to 16 weeks.\ntype TimeSeries struct {\n\ttimeSeries\n}\n\n// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.\nfunc NewTimeSeries(f func() Observable) *TimeSeries {\n\treturn NewTimeSeriesWithClock(f, defaultClockInstance)\n}\n\n// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for\n// assigning timestamps.\nfunc NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {\n\tts := new(TimeSeries)\n\tts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)\n\treturn ts\n}\n\n// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.\ntype MinuteHourSeries struct {\n\ttimeSeries\n}\n\n// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.\nfunc NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {\n\treturn NewMinuteHourSeriesWithClock(f, defaultClockInstance)\n}\n\n// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for\n// assigning timestamps.\nfunc NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {\n\tts := new(MinuteHourSeries)\n\tts.timeSeries.init(minuteHourSeriesResolutions, f,\n\t\tminuteHourSeriesNumBuckets, clock)\n\treturn ts\n}\n\nfunc (ts *MinuteHourSeries) Minute() Observable {\n\treturn ts.timeSeries.Latest(0, 60)\n}\n\nfunc (ts *MinuteHourSeries) Hour() Observable {\n\treturn ts.timeSeries.Latest(1, 60)\n}\n\nfunc minTime(a, b time.Time) time.Time {\n\tif a.Before(b) {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc maxTime(a, b time.Time) time.Time {\n\tif a.After(b) {\n\t\treturn a\n\t}\n\treturn b\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/internal/timeseries/timeseries_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage timeseries\n\nimport (\n\t\"math\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc isNear(x *Float, y float64, tolerance float64) bool {\n\treturn math.Abs(x.Value()-y) < tolerance\n}\n\nfunc isApproximate(x *Float, y float64) bool {\n\treturn isNear(x, y, 1e-2)\n}\n\nfunc checkApproximate(t *testing.T, o Observable, y float64) {\n\tx := o.(*Float)\n\tif !isApproximate(x, y) {\n\t\tt.Errorf(\"Wanted %g, got %g\", y, x.Value())\n\t}\n}\n\nfunc checkNear(t *testing.T, o Observable, y, tolerance float64) {\n\tx := o.(*Float)\n\tif !isNear(x, y, tolerance) {\n\t\tt.Errorf(\"Wanted %g +- %g, got %g\", y, tolerance, x.Value())\n\t}\n}\n\nvar baseTime = time.Date(2013, 1, 1, 0, 0, 0, 0, time.UTC)\n\nfunc tu(s int64) time.Time {\n\treturn baseTime.Add(time.Duration(s) * time.Second)\n}\n\nfunc tu2(s int64, ns int64) time.Time {\n\treturn baseTime.Add(time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond)\n}\n\nfunc TestBasicTimeSeries(t *testing.T) {\n\tts := NewTimeSeries(NewFloat)\n\tfo := new(Float)\n\t*fo = Float(10)\n\tts.AddWithTime(fo, tu(1))\n\tts.AddWithTime(fo, tu(1))\n\tts.AddWithTime(fo, tu(1))\n\tts.AddWithTime(fo, tu(1))\n\tcheckApproximate(t, ts.Range(tu(0), tu(1)), 40)\n\tcheckApproximate(t, ts.Total(), 40)\n\tts.AddWithTime(fo, tu(3))\n\tts.AddWithTime(fo, tu(3))\n\tts.AddWithTime(fo, tu(3))\n\tcheckApproximate(t, ts.Range(tu(0), tu(2)), 40)\n\tcheckApproximate(t, ts.Range(tu(2), tu(4)), 30)\n\tcheckApproximate(t, ts.Total(), 70)\n\tts.AddWithTime(fo, tu(1))\n\tts.AddWithTime(fo, tu(1))\n\tcheckApproximate(t, ts.Range(tu(0), tu(2)), 60)\n\tcheckApproximate(t, ts.Range(tu(2), tu(4)), 30)\n\tcheckApproximate(t, ts.Total(), 90)\n\t*fo = Float(100)\n\tts.AddWithTime(fo, tu(100))\n\tcheckApproximate(t, ts.Range(tu(99), tu(100)), 100)\n\tcheckApproximate(t, ts.Range(tu(0), tu(4)), 36)\n\tcheckApproximate(t, ts.Total(), 190)\n\t*fo = Float(10)\n\tts.AddWithTime(fo, tu(1))\n\tts.AddWithTime(fo, tu(1))\n\tcheckApproximate(t, ts.Range(tu(0), tu(4)), 44)\n\tcheckApproximate(t, ts.Range(tu(37), tu2(100, 100e6)), 100)\n\tcheckApproximate(t, ts.Range(tu(50), tu2(100, 100e6)), 100)\n\tcheckApproximate(t, ts.Range(tu(99), tu2(100, 100e6)), 100)\n\tcheckApproximate(t, ts.Total(), 210)\n\n\tfor i, l := range ts.ComputeRange(tu(36), tu(100), 64) {\n\t\tif i == 63 {\n\t\t\tcheckApproximate(t, l, 100)\n\t\t} else {\n\t\t\tcheckApproximate(t, l, 0)\n\t\t}\n\t}\n\n\tcheckApproximate(t, ts.Range(tu(0), tu(100)), 210)\n\tcheckApproximate(t, ts.Range(tu(10), tu(100)), 100)\n\n\tfor i, l := range ts.ComputeRange(tu(0), tu(100), 100) {\n\t\tif i < 10 {\n\t\t\tcheckApproximate(t, l, 11)\n\t\t} else if i >= 90 {\n\t\t\tcheckApproximate(t, l, 10)\n\t\t} else {\n\t\t\tcheckApproximate(t, l, 0)\n\t\t}\n\t}\n}\n\nfunc TestFloat(t *testing.T) {\n\tf := Float(1)\n\tif g, w := f.String(), \"1\"; g != w {\n\t\tt.Errorf(\"Float(1).String = %q; want %q\", g, w)\n\t}\n\tf2 := Float(2)\n\tvar o Observable = &f2\n\tf.Add(o)\n\tif g, w := f.Value(), 3.0; g != w {\n\t\tt.Errorf(\"Float post-add = %v; want %v\", g, w)\n\t}\n\tf.Multiply(2)\n\tif g, w := f.Value(), 6.0; g != w {\n\t\tt.Errorf(\"Float post-multiply = %v; want %v\", g, w)\n\t}\n\tf.Clear()\n\tif g, w := f.Value(), 0.0; g != w {\n\t\tt.Errorf(\"Float post-clear = %v; want %v\", g, w)\n\t}\n\tf.CopyFrom(&f2)\n\tif g, w := f.Value(), 2.0; g != w {\n\t\tt.Errorf(\"Float post-CopyFrom = %v; want %v\", g, w)\n\t}\n}\n\ntype mockClock struct {\n\ttime time.Time\n}\n\nfunc (m *mockClock) Time() time.Time { return m.time }\nfunc (m *mockClock) Set(t time.Time) { m.time = t }\n\nconst buckets = 6\n\nvar testResolutions = []time.Duration{\n\t10 * time.Second,  // level holds one minute of observations\n\t100 * time.Second, // level holds ten minutes of observations\n\t10 * time.Minute,  // level holds one hour of observations\n}\n\n// TestTimeSeries uses a small number of buckets to force a higher\n// error rate on approximations from the timeseries.\ntype TestTimeSeries struct {\n\ttimeSeries\n}\n\nfunc TestExpectedErrorRate(t *testing.T) {\n\tts := new(TestTimeSeries)\n\tfake := new(mockClock)\n\tfake.Set(time.Now())\n\tts.timeSeries.init(testResolutions, NewFloat, buckets, fake)\n\tfor i := 1; i <= 61*61; i++ {\n\t\tfake.Set(fake.Time().Add(1 * time.Second))\n\t\tob := Float(1)\n\t\tts.AddWithTime(&ob, fake.Time())\n\n\t\t// The results should be accurate within one missing bucket (1/6) of the observations recorded.\n\t\tcheckNear(t, ts.Latest(0, buckets), min(float64(i), 60), 10)\n\t\tcheckNear(t, ts.Latest(1, buckets), min(float64(i), 600), 100)\n\t\tcheckNear(t, ts.Latest(2, buckets), min(float64(i), 3600), 600)\n\t}\n}\n\nfunc min(a, b float64) float64 {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/batch.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.9\n\npackage ipv4\n\nimport (\n\t\"net\"\n\t\"runtime\"\n\t\"syscall\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\n// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of\n// PacketConn are not implemented.\n\n// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of\n// RawConn are not implemented.\n\n// A Message represents an IO message.\n//\n//\ttype Message struct {\n//\t\tBuffers [][]byte\n//\t\tOOB     []byte\n//\t\tAddr    net.Addr\n//\t\tN       int\n//\t\tNN      int\n//\t\tFlags   int\n//\t}\n//\n// The Buffers fields represents a list of contiguous buffers, which\n// can be used for vectored IO, for example, putting a header and a\n// payload in each slice.\n// When writing, the Buffers field must contain at least one byte to\n// write.\n// When reading, the Buffers field will always contain a byte to read.\n//\n// The OOB field contains protocol-specific control or miscellaneous\n// ancillary data known as out-of-band data.\n// It can be nil when not required.\n//\n// The Addr field specifies a destination address when writing.\n// It can be nil when the underlying protocol of the endpoint uses\n// connection-oriented communication.\n// After a successful read, it may contain the source address on the\n// received packet.\n//\n// The N field indicates the number of bytes read or written from/to\n// Buffers.\n//\n// The NN field indicates the number of bytes read or written from/to\n// OOB.\n//\n// The Flags field contains protocol-specific information on the\n// received message.\ntype Message = socket.Message\n\n// ReadBatch reads a batch of messages.\n//\n// The provided flags is a set of platform-dependent flags, such as\n// syscall.MSG_PEEK.\n//\n// On a successful read it returns the number of messages received, up\n// to len(ms).\n//\n// On Linux, a batch read will be optimized.\n// On other platforms, this method will read only a single message.\n//\n// Unlike the ReadFrom method, it doesn't strip the IPv4 header\n// followed by option headers from the received IPv4 datagram when the\n// underlying transport is net.IPConn. Each Buffers field of Message\n// must be large enough to accommodate an IPv4 header and option\n// headers.\nfunc (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\tn, err := c.RecvMsgs([]socket.Message(ms), flags)\n\t\tif err != nil {\n\t\t\terr = &net.OpError{Op: \"read\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err}\n\t\t}\n\t\treturn n, err\n\tdefault:\n\t\tn := 1\n\t\terr := c.RecvMsg(&ms[0], flags)\n\t\tif err != nil {\n\t\t\tn = 0\n\t\t\terr = &net.OpError{Op: \"read\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err}\n\t\t}\n\t\treturn n, err\n\t}\n}\n\n// WriteBatch writes a batch of messages.\n//\n// The provided flags is a set of platform-dependent flags, such as\n// syscall.MSG_DONTROUTE.\n//\n// It returns the number of messages written on a successful write.\n//\n// On Linux, a batch write will be optimized.\n// On other platforms, this method will write only a single message.\nfunc (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\tn, err := c.SendMsgs([]socket.Message(ms), flags)\n\t\tif err != nil {\n\t\t\terr = &net.OpError{Op: \"write\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err}\n\t\t}\n\t\treturn n, err\n\tdefault:\n\t\tn := 1\n\t\terr := c.SendMsg(&ms[0], flags)\n\t\tif err != nil {\n\t\t\tn = 0\n\t\t\terr = &net.OpError{Op: \"write\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err}\n\t\t}\n\t\treturn n, err\n\t}\n}\n\n// ReadBatch reads a batch of messages.\n//\n// The provided flags is a set of platform-dependent flags, such as\n// syscall.MSG_PEEK.\n//\n// On a successful read it returns the number of messages received, up\n// to len(ms).\n//\n// On Linux, a batch read will be optimized.\n// On other platforms, this method will read only a single message.\nfunc (c *packetHandler) ReadBatch(ms []Message, flags int) (int, error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\tn, err := c.RecvMsgs([]socket.Message(ms), flags)\n\t\tif err != nil {\n\t\t\terr = &net.OpError{Op: \"read\", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err}\n\t\t}\n\t\treturn n, err\n\tdefault:\n\t\tn := 1\n\t\terr := c.RecvMsg(&ms[0], flags)\n\t\tif err != nil {\n\t\t\tn = 0\n\t\t\terr = &net.OpError{Op: \"read\", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err}\n\t\t}\n\t\treturn n, err\n\t}\n}\n\n// WriteBatch writes a batch of messages.\n//\n// The provided flags is a set of platform-dependent flags, such as\n// syscall.MSG_DONTROUTE.\n//\n// It returns the number of messages written on a successful write.\n//\n// On Linux, a batch write will be optimized.\n// On other platforms, this method will write only a single message.\nfunc (c *packetHandler) WriteBatch(ms []Message, flags int) (int, error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\tn, err := c.SendMsgs([]socket.Message(ms), flags)\n\t\tif err != nil {\n\t\t\terr = &net.OpError{Op: \"write\", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err}\n\t\t}\n\t\treturn n, err\n\tdefault:\n\t\tn := 1\n\t\terr := c.SendMsg(&ms[0], flags)\n\t\tif err != nil {\n\t\t\tn = 0\n\t\t\terr = &net.OpError{Op: \"write\", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err}\n\t\t}\n\t\treturn n, err\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/bpf_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4_test\n\nimport (\n\t\"net\"\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org/x/net/bpf\"\n\t\"golang.org/x/net/ipv4\"\n)\n\nfunc TestBPF(t *testing.T) {\n\tif runtime.GOOS != \"linux\" {\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\n\tl, err := net.ListenPacket(\"udp4\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer l.Close()\n\n\tp := ipv4.NewPacketConn(l)\n\n\t// This filter accepts UDP packets whose first payload byte is\n\t// even.\n\tprog, err := bpf.Assemble([]bpf.Instruction{\n\t\t// Load the first byte of the payload (skipping UDP header).\n\t\tbpf.LoadAbsolute{Off: 8, Size: 1},\n\t\t// Select LSB of the byte.\n\t\tbpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 1},\n\t\t// Byte is even?\n\t\tbpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipFalse: 1},\n\t\t// Accept.\n\t\tbpf.RetConstant{Val: 4096},\n\t\t// Ignore.\n\t\tbpf.RetConstant{Val: 0},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"compiling BPF: %s\", err)\n\t}\n\n\tif err = p.SetBPF(prog); err != nil {\n\t\tt.Fatalf(\"attaching filter to Conn: %s\", err)\n\t}\n\n\ts, err := net.Dial(\"udp4\", l.LocalAddr().String())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer s.Close()\n\tgo func() {\n\t\tfor i := byte(0); i < 10; i++ {\n\t\t\ts.Write([]byte{i})\n\t\t}\n\t}()\n\n\tl.SetDeadline(time.Now().Add(2 * time.Second))\n\tseen := make([]bool, 5)\n\tfor {\n\t\tvar b [512]byte\n\t\tn, _, err := l.ReadFrom(b[:])\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"reading from listener: %s\", err)\n\t\t}\n\t\tif n != 1 {\n\t\t\tt.Fatalf(\"unexpected packet length, want 1, got %d\", n)\n\t\t}\n\t\tif b[0] >= 10 {\n\t\t\tt.Fatalf(\"unexpected byte, want 0-9, got %d\", b[0])\n\t\t}\n\t\tif b[0]%2 != 0 {\n\t\t\tt.Fatalf(\"got odd byte %d, wanted only even bytes\", b[0])\n\t\t}\n\t\tseen[b[0]/2] = true\n\n\t\tseenAll := true\n\t\tfor _, v := range seen {\n\t\t\tif !v {\n\t\t\t\tseenAll = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif seenAll {\n\t\t\tbreak\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/control.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\ntype rawOpt struct {\n\tsync.RWMutex\n\tcflags ControlFlags\n}\n\nfunc (c *rawOpt) set(f ControlFlags)        { c.cflags |= f }\nfunc (c *rawOpt) clear(f ControlFlags)      { c.cflags &^= f }\nfunc (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 }\n\ntype ControlFlags uint\n\nconst (\n\tFlagTTL       ControlFlags = 1 << iota // pass the TTL on the received packet\n\tFlagSrc                                // pass the source address on the received packet\n\tFlagDst                                // pass the destination address on the received packet\n\tFlagInterface                          // pass the interface index on the received packet\n)\n\n// A ControlMessage represents per packet basis IP-level socket options.\ntype ControlMessage struct {\n\t// Receiving socket options: SetControlMessage allows to\n\t// receive the options from the protocol stack using ReadFrom\n\t// method of PacketConn or RawConn.\n\t//\n\t// Specifying socket options: ControlMessage for WriteTo\n\t// method of PacketConn or RawConn allows to send the options\n\t// to the protocol stack.\n\t//\n\tTTL     int    // time-to-live, receiving only\n\tSrc     net.IP // source address, specifying only\n\tDst     net.IP // destination address, receiving only\n\tIfIndex int    // interface index, must be 1 <= value when specifying\n}\n\nfunc (cm *ControlMessage) String() string {\n\tif cm == nil {\n\t\treturn \"<nil>\"\n\t}\n\treturn fmt.Sprintf(\"ttl=%d src=%v dst=%v ifindex=%d\", cm.TTL, cm.Src, cm.Dst, cm.IfIndex)\n}\n\n// Marshal returns the binary encoding of cm.\nfunc (cm *ControlMessage) Marshal() []byte {\n\tif cm == nil {\n\t\treturn nil\n\t}\n\tvar m socket.ControlMessage\n\tif ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To4() != nil || cm.IfIndex > 0) {\n\t\tm = socket.NewControlMessage([]int{ctlOpts[ctlPacketInfo].length})\n\t}\n\tif len(m) > 0 {\n\t\tctlOpts[ctlPacketInfo].marshal(m, cm)\n\t}\n\treturn m\n}\n\n// Parse parses b as a control message and stores the result in cm.\nfunc (cm *ControlMessage) Parse(b []byte) error {\n\tms, err := socket.ControlMessage(b).Parse()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, m := range ms {\n\t\tlvl, typ, l, err := m.ParseHeader()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif lvl != iana.ProtocolIP {\n\t\t\tcontinue\n\t\t}\n\t\tswitch {\n\t\tcase typ == ctlOpts[ctlTTL].name && l >= ctlOpts[ctlTTL].length:\n\t\t\tctlOpts[ctlTTL].parse(cm, m.Data(l))\n\t\tcase typ == ctlOpts[ctlDst].name && l >= ctlOpts[ctlDst].length:\n\t\t\tctlOpts[ctlDst].parse(cm, m.Data(l))\n\t\tcase typ == ctlOpts[ctlInterface].name && l >= ctlOpts[ctlInterface].length:\n\t\t\tctlOpts[ctlInterface].parse(cm, m.Data(l))\n\t\tcase typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length:\n\t\t\tctlOpts[ctlPacketInfo].parse(cm, m.Data(l))\n\t\t}\n\t}\n\treturn nil\n}\n\n// NewControlMessage returns a new control message.\n//\n// The returned message is large enough for options specified by cf.\nfunc NewControlMessage(cf ControlFlags) []byte {\n\topt := rawOpt{cflags: cf}\n\tvar l int\n\tif opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 {\n\t\tl += socket.ControlMessageSpace(ctlOpts[ctlTTL].length)\n\t}\n\tif ctlOpts[ctlPacketInfo].name > 0 {\n\t\tif opt.isset(FlagSrc | FlagDst | FlagInterface) {\n\t\t\tl += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length)\n\t\t}\n\t} else {\n\t\tif opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 {\n\t\t\tl += socket.ControlMessageSpace(ctlOpts[ctlDst].length)\n\t\t}\n\t\tif opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 {\n\t\t\tl += socket.ControlMessageSpace(ctlOpts[ctlInterface].length)\n\t\t}\n\t}\n\tvar b []byte\n\tif l > 0 {\n\t\tb = make([]byte, l)\n\t}\n\treturn b\n}\n\n// Ancillary data socket options\nconst (\n\tctlTTL        = iota // header field\n\tctlSrc               // header field\n\tctlDst               // header field\n\tctlInterface         // inbound or outbound interface\n\tctlPacketInfo        // inbound or outbound packet path\n\tctlMax\n)\n\n// A ctlOpt represents a binding for ancillary data socket option.\ntype ctlOpt struct {\n\tname    int // option name, must be equal or greater than 1\n\tlength  int // option length\n\tmarshal func([]byte, *ControlMessage) []byte\n\tparse   func(*ControlMessage, []byte)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/control_bsd.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd netbsd openbsd\n\npackage ipv4\n\nimport (\n\t\"net\"\n\t\"syscall\"\n\t\"unsafe\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc marshalDst(b []byte, cm *ControlMessage) []byte {\n\tm := socket.ControlMessage(b)\n\tm.MarshalHeader(iana.ProtocolIP, sysIP_RECVDSTADDR, net.IPv4len)\n\treturn m.Next(net.IPv4len)\n}\n\nfunc parseDst(cm *ControlMessage, b []byte) {\n\tif len(cm.Dst) < net.IPv4len {\n\t\tcm.Dst = make(net.IP, net.IPv4len)\n\t}\n\tcopy(cm.Dst, b[:net.IPv4len])\n}\n\nfunc marshalInterface(b []byte, cm *ControlMessage) []byte {\n\tm := socket.ControlMessage(b)\n\tm.MarshalHeader(iana.ProtocolIP, sysIP_RECVIF, syscall.SizeofSockaddrDatalink)\n\treturn m.Next(syscall.SizeofSockaddrDatalink)\n}\n\nfunc parseInterface(cm *ControlMessage, b []byte) {\n\tsadl := (*syscall.SockaddrDatalink)(unsafe.Pointer(&b[0]))\n\tcm.IfIndex = int(sadl.Index)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/control_pktinfo.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin linux solaris\n\npackage ipv4\n\nimport (\n\t\"net\"\n\t\"unsafe\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc marshalPacketInfo(b []byte, cm *ControlMessage) []byte {\n\tm := socket.ControlMessage(b)\n\tm.MarshalHeader(iana.ProtocolIP, sysIP_PKTINFO, sizeofInetPktinfo)\n\tif cm != nil {\n\t\tpi := (*inetPktinfo)(unsafe.Pointer(&m.Data(sizeofInetPktinfo)[0]))\n\t\tif ip := cm.Src.To4(); ip != nil {\n\t\t\tcopy(pi.Spec_dst[:], ip)\n\t\t}\n\t\tif cm.IfIndex > 0 {\n\t\t\tpi.setIfindex(cm.IfIndex)\n\t\t}\n\t}\n\treturn m.Next(sizeofInetPktinfo)\n}\n\nfunc parsePacketInfo(cm *ControlMessage, b []byte) {\n\tpi := (*inetPktinfo)(unsafe.Pointer(&b[0]))\n\tcm.IfIndex = int(pi.Ifindex)\n\tif len(cm.Dst) < net.IPv4len {\n\t\tcm.Dst = make(net.IP, net.IPv4len)\n\t}\n\tcopy(cm.Dst, pi.Addr[:])\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/control_stub.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows\n\npackage ipv4\n\nimport \"golang.org/x/net/internal/socket\"\n\nfunc setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error {\n\treturn errOpNoSupport\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/control_test.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4_test\n\nimport (\n\t\"testing\"\n\n\t\"golang.org/x/net/ipv4\"\n)\n\nfunc TestControlMessageParseWithFuzz(t *testing.T) {\n\tvar cm ipv4.ControlMessage\n\tfor _, fuzz := range []string{\n\t\t\"\\f\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x14\\x00\\x00\\x00\",\n\t\t\"\\f\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x1a\\x00\\x00\\x00\",\n\t} {\n\t\tcm.Parse([]byte(fuzz))\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/control_unix.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd linux netbsd openbsd solaris\n\npackage ipv4\n\nimport (\n\t\"unsafe\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error {\n\topt.Lock()\n\tdefer opt.Unlock()\n\tif so, ok := sockOpts[ssoReceiveTTL]; ok && cf&FlagTTL != 0 {\n\t\tif err := so.SetInt(c, boolint(on)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif on {\n\t\t\topt.set(FlagTTL)\n\t\t} else {\n\t\t\topt.clear(FlagTTL)\n\t\t}\n\t}\n\tif so, ok := sockOpts[ssoPacketInfo]; ok {\n\t\tif cf&(FlagSrc|FlagDst|FlagInterface) != 0 {\n\t\t\tif err := so.SetInt(c, boolint(on)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif on {\n\t\t\t\topt.set(cf & (FlagSrc | FlagDst | FlagInterface))\n\t\t\t} else {\n\t\t\t\topt.clear(cf & (FlagSrc | FlagDst | FlagInterface))\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif so, ok := sockOpts[ssoReceiveDst]; ok && cf&FlagDst != 0 {\n\t\t\tif err := so.SetInt(c, boolint(on)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif on {\n\t\t\t\topt.set(FlagDst)\n\t\t\t} else {\n\t\t\t\topt.clear(FlagDst)\n\t\t\t}\n\t\t}\n\t\tif so, ok := sockOpts[ssoReceiveInterface]; ok && cf&FlagInterface != 0 {\n\t\t\tif err := so.SetInt(c, boolint(on)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif on {\n\t\t\t\topt.set(FlagInterface)\n\t\t\t} else {\n\t\t\t\topt.clear(FlagInterface)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc marshalTTL(b []byte, cm *ControlMessage) []byte {\n\tm := socket.ControlMessage(b)\n\tm.MarshalHeader(iana.ProtocolIP, sysIP_RECVTTL, 1)\n\treturn m.Next(1)\n}\n\nfunc parseTTL(cm *ControlMessage, b []byte) {\n\tcm.TTL = int(*(*byte)(unsafe.Pointer(&b[:1][0])))\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/control_windows.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4\n\nimport (\n\t\"syscall\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error {\n\t// TODO(mikio): implement this\n\treturn syscall.EWINDOWS\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/defs_darwin.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in_addr [4]byte /* in_addr */\n\npackage ipv4\n\n/*\n#include <sys/socket.h>\n\n#include <netinet/in.h>\n*/\nimport \"C\"\n\nconst (\n\tsysIP_OPTIONS     = C.IP_OPTIONS\n\tsysIP_HDRINCL     = C.IP_HDRINCL\n\tsysIP_TOS         = C.IP_TOS\n\tsysIP_TTL         = C.IP_TTL\n\tsysIP_RECVOPTS    = C.IP_RECVOPTS\n\tsysIP_RECVRETOPTS = C.IP_RECVRETOPTS\n\tsysIP_RECVDSTADDR = C.IP_RECVDSTADDR\n\tsysIP_RETOPTS     = C.IP_RETOPTS\n\tsysIP_RECVIF      = C.IP_RECVIF\n\tsysIP_STRIPHDR    = C.IP_STRIPHDR\n\tsysIP_RECVTTL     = C.IP_RECVTTL\n\tsysIP_BOUND_IF    = C.IP_BOUND_IF\n\tsysIP_PKTINFO     = C.IP_PKTINFO\n\tsysIP_RECVPKTINFO = C.IP_RECVPKTINFO\n\n\tsysIP_MULTICAST_IF           = C.IP_MULTICAST_IF\n\tsysIP_MULTICAST_TTL          = C.IP_MULTICAST_TTL\n\tsysIP_MULTICAST_LOOP         = C.IP_MULTICAST_LOOP\n\tsysIP_ADD_MEMBERSHIP         = C.IP_ADD_MEMBERSHIP\n\tsysIP_DROP_MEMBERSHIP        = C.IP_DROP_MEMBERSHIP\n\tsysIP_MULTICAST_VIF          = C.IP_MULTICAST_VIF\n\tsysIP_MULTICAST_IFINDEX      = C.IP_MULTICAST_IFINDEX\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = C.IP_ADD_SOURCE_MEMBERSHIP\n\tsysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP\n\tsysIP_BLOCK_SOURCE           = C.IP_BLOCK_SOURCE\n\tsysIP_UNBLOCK_SOURCE         = C.IP_UNBLOCK_SOURCE\n\tsysMCAST_JOIN_GROUP          = C.MCAST_JOIN_GROUP\n\tsysMCAST_LEAVE_GROUP         = C.MCAST_LEAVE_GROUP\n\tsysMCAST_JOIN_SOURCE_GROUP   = C.MCAST_JOIN_SOURCE_GROUP\n\tsysMCAST_LEAVE_SOURCE_GROUP  = C.MCAST_LEAVE_SOURCE_GROUP\n\tsysMCAST_BLOCK_SOURCE        = C.MCAST_BLOCK_SOURCE\n\tsysMCAST_UNBLOCK_SOURCE      = C.MCAST_UNBLOCK_SOURCE\n\n\tsizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage\n\tsizeofSockaddrInet    = C.sizeof_struct_sockaddr_in\n\tsizeofInetPktinfo     = C.sizeof_struct_in_pktinfo\n\n\tsizeofIPMreq         = C.sizeof_struct_ip_mreq\n\tsizeofIPMreqn        = C.sizeof_struct_ip_mreqn\n\tsizeofIPMreqSource   = C.sizeof_struct_ip_mreq_source\n\tsizeofGroupReq       = C.sizeof_struct_group_req\n\tsizeofGroupSourceReq = C.sizeof_struct_group_source_req\n)\n\ntype sockaddrStorage C.struct_sockaddr_storage\n\ntype sockaddrInet C.struct_sockaddr_in\n\ntype inetPktinfo C.struct_in_pktinfo\n\ntype ipMreq C.struct_ip_mreq\n\ntype ipMreqn C.struct_ip_mreqn\n\ntype ipMreqSource C.struct_ip_mreq_source\n\ntype groupReq C.struct_group_req\n\ntype groupSourceReq C.struct_group_source_req\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/defs_dragonfly.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in_addr [4]byte /* in_addr */\n\npackage ipv4\n\n/*\n#include <netinet/in.h>\n*/\nimport \"C\"\n\nconst (\n\tsysIP_OPTIONS     = C.IP_OPTIONS\n\tsysIP_HDRINCL     = C.IP_HDRINCL\n\tsysIP_TOS         = C.IP_TOS\n\tsysIP_TTL         = C.IP_TTL\n\tsysIP_RECVOPTS    = C.IP_RECVOPTS\n\tsysIP_RECVRETOPTS = C.IP_RECVRETOPTS\n\tsysIP_RECVDSTADDR = C.IP_RECVDSTADDR\n\tsysIP_RETOPTS     = C.IP_RETOPTS\n\tsysIP_RECVIF      = C.IP_RECVIF\n\tsysIP_RECVTTL     = C.IP_RECVTTL\n\n\tsysIP_MULTICAST_IF    = C.IP_MULTICAST_IF\n\tsysIP_MULTICAST_TTL   = C.IP_MULTICAST_TTL\n\tsysIP_MULTICAST_LOOP  = C.IP_MULTICAST_LOOP\n\tsysIP_MULTICAST_VIF   = C.IP_MULTICAST_VIF\n\tsysIP_ADD_MEMBERSHIP  = C.IP_ADD_MEMBERSHIP\n\tsysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP\n\n\tsizeofIPMreq = C.sizeof_struct_ip_mreq\n)\n\ntype ipMreq C.struct_ip_mreq\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/defs_freebsd.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in_addr [4]byte /* in_addr */\n\npackage ipv4\n\n/*\n#include <sys/socket.h>\n\n#include <netinet/in.h>\n*/\nimport \"C\"\n\nconst (\n\tsysIP_OPTIONS     = C.IP_OPTIONS\n\tsysIP_HDRINCL     = C.IP_HDRINCL\n\tsysIP_TOS         = C.IP_TOS\n\tsysIP_TTL         = C.IP_TTL\n\tsysIP_RECVOPTS    = C.IP_RECVOPTS\n\tsysIP_RECVRETOPTS = C.IP_RECVRETOPTS\n\tsysIP_RECVDSTADDR = C.IP_RECVDSTADDR\n\tsysIP_SENDSRCADDR = C.IP_SENDSRCADDR\n\tsysIP_RETOPTS     = C.IP_RETOPTS\n\tsysIP_RECVIF      = C.IP_RECVIF\n\tsysIP_ONESBCAST   = C.IP_ONESBCAST\n\tsysIP_BINDANY     = C.IP_BINDANY\n\tsysIP_RECVTTL     = C.IP_RECVTTL\n\tsysIP_MINTTL      = C.IP_MINTTL\n\tsysIP_DONTFRAG    = C.IP_DONTFRAG\n\tsysIP_RECVTOS     = C.IP_RECVTOS\n\n\tsysIP_MULTICAST_IF           = C.IP_MULTICAST_IF\n\tsysIP_MULTICAST_TTL          = C.IP_MULTICAST_TTL\n\tsysIP_MULTICAST_LOOP         = C.IP_MULTICAST_LOOP\n\tsysIP_ADD_MEMBERSHIP         = C.IP_ADD_MEMBERSHIP\n\tsysIP_DROP_MEMBERSHIP        = C.IP_DROP_MEMBERSHIP\n\tsysIP_MULTICAST_VIF          = C.IP_MULTICAST_VIF\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = C.IP_ADD_SOURCE_MEMBERSHIP\n\tsysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP\n\tsysIP_BLOCK_SOURCE           = C.IP_BLOCK_SOURCE\n\tsysIP_UNBLOCK_SOURCE         = C.IP_UNBLOCK_SOURCE\n\tsysMCAST_JOIN_GROUP          = C.MCAST_JOIN_GROUP\n\tsysMCAST_LEAVE_GROUP         = C.MCAST_LEAVE_GROUP\n\tsysMCAST_JOIN_SOURCE_GROUP   = C.MCAST_JOIN_SOURCE_GROUP\n\tsysMCAST_LEAVE_SOURCE_GROUP  = C.MCAST_LEAVE_SOURCE_GROUP\n\tsysMCAST_BLOCK_SOURCE        = C.MCAST_BLOCK_SOURCE\n\tsysMCAST_UNBLOCK_SOURCE      = C.MCAST_UNBLOCK_SOURCE\n\n\tsizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage\n\tsizeofSockaddrInet    = C.sizeof_struct_sockaddr_in\n\n\tsizeofIPMreq         = C.sizeof_struct_ip_mreq\n\tsizeofIPMreqn        = C.sizeof_struct_ip_mreqn\n\tsizeofIPMreqSource   = C.sizeof_struct_ip_mreq_source\n\tsizeofGroupReq       = C.sizeof_struct_group_req\n\tsizeofGroupSourceReq = C.sizeof_struct_group_source_req\n)\n\ntype sockaddrStorage C.struct_sockaddr_storage\n\ntype sockaddrInet C.struct_sockaddr_in\n\ntype ipMreq C.struct_ip_mreq\n\ntype ipMreqn C.struct_ip_mreqn\n\ntype ipMreqSource C.struct_ip_mreq_source\n\ntype groupReq C.struct_group_req\n\ntype groupSourceReq C.struct_group_source_req\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/defs_linux.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in_addr [4]byte /* in_addr */\n\npackage ipv4\n\n/*\n#include <time.h>\n\n#include <linux/errqueue.h>\n#include <linux/icmp.h>\n#include <linux/in.h>\n#include <linux/filter.h>\n#include <sys/socket.h>\n*/\nimport \"C\"\n\nconst (\n\tsysIP_TOS             = C.IP_TOS\n\tsysIP_TTL             = C.IP_TTL\n\tsysIP_HDRINCL         = C.IP_HDRINCL\n\tsysIP_OPTIONS         = C.IP_OPTIONS\n\tsysIP_ROUTER_ALERT    = C.IP_ROUTER_ALERT\n\tsysIP_RECVOPTS        = C.IP_RECVOPTS\n\tsysIP_RETOPTS         = C.IP_RETOPTS\n\tsysIP_PKTINFO         = C.IP_PKTINFO\n\tsysIP_PKTOPTIONS      = C.IP_PKTOPTIONS\n\tsysIP_MTU_DISCOVER    = C.IP_MTU_DISCOVER\n\tsysIP_RECVERR         = C.IP_RECVERR\n\tsysIP_RECVTTL         = C.IP_RECVTTL\n\tsysIP_RECVTOS         = C.IP_RECVTOS\n\tsysIP_MTU             = C.IP_MTU\n\tsysIP_FREEBIND        = C.IP_FREEBIND\n\tsysIP_TRANSPARENT     = C.IP_TRANSPARENT\n\tsysIP_RECVRETOPTS     = C.IP_RECVRETOPTS\n\tsysIP_ORIGDSTADDR     = C.IP_ORIGDSTADDR\n\tsysIP_RECVORIGDSTADDR = C.IP_RECVORIGDSTADDR\n\tsysIP_MINTTL          = C.IP_MINTTL\n\tsysIP_NODEFRAG        = C.IP_NODEFRAG\n\tsysIP_UNICAST_IF      = C.IP_UNICAST_IF\n\n\tsysIP_MULTICAST_IF           = C.IP_MULTICAST_IF\n\tsysIP_MULTICAST_TTL          = C.IP_MULTICAST_TTL\n\tsysIP_MULTICAST_LOOP         = C.IP_MULTICAST_LOOP\n\tsysIP_ADD_MEMBERSHIP         = C.IP_ADD_MEMBERSHIP\n\tsysIP_DROP_MEMBERSHIP        = C.IP_DROP_MEMBERSHIP\n\tsysIP_UNBLOCK_SOURCE         = C.IP_UNBLOCK_SOURCE\n\tsysIP_BLOCK_SOURCE           = C.IP_BLOCK_SOURCE\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = C.IP_ADD_SOURCE_MEMBERSHIP\n\tsysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP\n\tsysIP_MSFILTER               = C.IP_MSFILTER\n\tsysMCAST_JOIN_GROUP          = C.MCAST_JOIN_GROUP\n\tsysMCAST_LEAVE_GROUP         = C.MCAST_LEAVE_GROUP\n\tsysMCAST_JOIN_SOURCE_GROUP   = C.MCAST_JOIN_SOURCE_GROUP\n\tsysMCAST_LEAVE_SOURCE_GROUP  = C.MCAST_LEAVE_SOURCE_GROUP\n\tsysMCAST_BLOCK_SOURCE        = C.MCAST_BLOCK_SOURCE\n\tsysMCAST_UNBLOCK_SOURCE      = C.MCAST_UNBLOCK_SOURCE\n\tsysMCAST_MSFILTER            = C.MCAST_MSFILTER\n\tsysIP_MULTICAST_ALL          = C.IP_MULTICAST_ALL\n\n\t//sysIP_PMTUDISC_DONT      = C.IP_PMTUDISC_DONT\n\t//sysIP_PMTUDISC_WANT      = C.IP_PMTUDISC_WANT\n\t//sysIP_PMTUDISC_DO        = C.IP_PMTUDISC_DO\n\t//sysIP_PMTUDISC_PROBE     = C.IP_PMTUDISC_PROBE\n\t//sysIP_PMTUDISC_INTERFACE = C.IP_PMTUDISC_INTERFACE\n\t//sysIP_PMTUDISC_OMIT      = C.IP_PMTUDISC_OMIT\n\n\tsysICMP_FILTER = C.ICMP_FILTER\n\n\tsysSO_EE_ORIGIN_NONE         = C.SO_EE_ORIGIN_NONE\n\tsysSO_EE_ORIGIN_LOCAL        = C.SO_EE_ORIGIN_LOCAL\n\tsysSO_EE_ORIGIN_ICMP         = C.SO_EE_ORIGIN_ICMP\n\tsysSO_EE_ORIGIN_ICMP6        = C.SO_EE_ORIGIN_ICMP6\n\tsysSO_EE_ORIGIN_TXSTATUS     = C.SO_EE_ORIGIN_TXSTATUS\n\tsysSO_EE_ORIGIN_TIMESTAMPING = C.SO_EE_ORIGIN_TIMESTAMPING\n\n\tsysSOL_SOCKET       = C.SOL_SOCKET\n\tsysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER\n\n\tsizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage\n\tsizeofSockaddrInet          = C.sizeof_struct_sockaddr_in\n\tsizeofInetPktinfo           = C.sizeof_struct_in_pktinfo\n\tsizeofSockExtendedErr       = C.sizeof_struct_sock_extended_err\n\n\tsizeofIPMreq         = C.sizeof_struct_ip_mreq\n\tsizeofIPMreqn        = C.sizeof_struct_ip_mreqn\n\tsizeofIPMreqSource   = C.sizeof_struct_ip_mreq_source\n\tsizeofGroupReq       = C.sizeof_struct_group_req\n\tsizeofGroupSourceReq = C.sizeof_struct_group_source_req\n\n\tsizeofICMPFilter = C.sizeof_struct_icmp_filter\n\n\tsizeofSockFprog = C.sizeof_struct_sock_fprog\n)\n\ntype kernelSockaddrStorage C.struct___kernel_sockaddr_storage\n\ntype sockaddrInet C.struct_sockaddr_in\n\ntype inetPktinfo C.struct_in_pktinfo\n\ntype sockExtendedErr C.struct_sock_extended_err\n\ntype ipMreq C.struct_ip_mreq\n\ntype ipMreqn C.struct_ip_mreqn\n\ntype ipMreqSource C.struct_ip_mreq_source\n\ntype groupReq C.struct_group_req\n\ntype groupSourceReq C.struct_group_source_req\n\ntype icmpFilter C.struct_icmp_filter\n\ntype sockFProg C.struct_sock_fprog\n\ntype sockFilter C.struct_sock_filter\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/defs_netbsd.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in_addr [4]byte /* in_addr */\n\npackage ipv4\n\n/*\n#include <netinet/in.h>\n*/\nimport \"C\"\n\nconst (\n\tsysIP_OPTIONS     = C.IP_OPTIONS\n\tsysIP_HDRINCL     = C.IP_HDRINCL\n\tsysIP_TOS         = C.IP_TOS\n\tsysIP_TTL         = C.IP_TTL\n\tsysIP_RECVOPTS    = C.IP_RECVOPTS\n\tsysIP_RECVRETOPTS = C.IP_RECVRETOPTS\n\tsysIP_RECVDSTADDR = C.IP_RECVDSTADDR\n\tsysIP_RETOPTS     = C.IP_RETOPTS\n\tsysIP_RECVIF      = C.IP_RECVIF\n\tsysIP_RECVTTL     = C.IP_RECVTTL\n\n\tsysIP_MULTICAST_IF    = C.IP_MULTICAST_IF\n\tsysIP_MULTICAST_TTL   = C.IP_MULTICAST_TTL\n\tsysIP_MULTICAST_LOOP  = C.IP_MULTICAST_LOOP\n\tsysIP_ADD_MEMBERSHIP  = C.IP_ADD_MEMBERSHIP\n\tsysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP\n\n\tsizeofIPMreq = C.sizeof_struct_ip_mreq\n)\n\ntype ipMreq C.struct_ip_mreq\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/defs_openbsd.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in_addr [4]byte /* in_addr */\n\npackage ipv4\n\n/*\n#include <netinet/in.h>\n*/\nimport \"C\"\n\nconst (\n\tsysIP_OPTIONS     = C.IP_OPTIONS\n\tsysIP_HDRINCL     = C.IP_HDRINCL\n\tsysIP_TOS         = C.IP_TOS\n\tsysIP_TTL         = C.IP_TTL\n\tsysIP_RECVOPTS    = C.IP_RECVOPTS\n\tsysIP_RECVRETOPTS = C.IP_RECVRETOPTS\n\tsysIP_RECVDSTADDR = C.IP_RECVDSTADDR\n\tsysIP_RETOPTS     = C.IP_RETOPTS\n\tsysIP_RECVIF      = C.IP_RECVIF\n\tsysIP_RECVTTL     = C.IP_RECVTTL\n\n\tsysIP_MULTICAST_IF    = C.IP_MULTICAST_IF\n\tsysIP_MULTICAST_TTL   = C.IP_MULTICAST_TTL\n\tsysIP_MULTICAST_LOOP  = C.IP_MULTICAST_LOOP\n\tsysIP_ADD_MEMBERSHIP  = C.IP_ADD_MEMBERSHIP\n\tsysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP\n\n\tsizeofIPMreq = C.sizeof_struct_ip_mreq\n)\n\ntype ipMreq C.struct_ip_mreq\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/defs_solaris.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in_addr [4]byte /* in_addr */\n\npackage ipv4\n\n/*\n#include <sys/socket.h>\n\n#include <netinet/in.h>\n*/\nimport \"C\"\n\nconst (\n\tsysIP_OPTIONS     = C.IP_OPTIONS\n\tsysIP_HDRINCL     = C.IP_HDRINCL\n\tsysIP_TOS         = C.IP_TOS\n\tsysIP_TTL         = C.IP_TTL\n\tsysIP_RECVOPTS    = C.IP_RECVOPTS\n\tsysIP_RECVRETOPTS = C.IP_RECVRETOPTS\n\tsysIP_RECVDSTADDR = C.IP_RECVDSTADDR\n\tsysIP_RETOPTS     = C.IP_RETOPTS\n\tsysIP_RECVIF      = C.IP_RECVIF\n\tsysIP_RECVSLLA    = C.IP_RECVSLLA\n\tsysIP_RECVTTL     = C.IP_RECVTTL\n\n\tsysIP_MULTICAST_IF           = C.IP_MULTICAST_IF\n\tsysIP_MULTICAST_TTL          = C.IP_MULTICAST_TTL\n\tsysIP_MULTICAST_LOOP         = C.IP_MULTICAST_LOOP\n\tsysIP_ADD_MEMBERSHIP         = C.IP_ADD_MEMBERSHIP\n\tsysIP_DROP_MEMBERSHIP        = C.IP_DROP_MEMBERSHIP\n\tsysIP_BLOCK_SOURCE           = C.IP_BLOCK_SOURCE\n\tsysIP_UNBLOCK_SOURCE         = C.IP_UNBLOCK_SOURCE\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = C.IP_ADD_SOURCE_MEMBERSHIP\n\tsysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP\n\tsysIP_NEXTHOP                = C.IP_NEXTHOP\n\n\tsysIP_PKTINFO     = C.IP_PKTINFO\n\tsysIP_RECVPKTINFO = C.IP_RECVPKTINFO\n\tsysIP_DONTFRAG    = C.IP_DONTFRAG\n\n\tsysIP_BOUND_IF      = C.IP_BOUND_IF\n\tsysIP_UNSPEC_SRC    = C.IP_UNSPEC_SRC\n\tsysIP_BROADCAST_TTL = C.IP_BROADCAST_TTL\n\tsysIP_DHCPINIT_IF   = C.IP_DHCPINIT_IF\n\n\tsysIP_REUSEADDR = C.IP_REUSEADDR\n\tsysIP_DONTROUTE = C.IP_DONTROUTE\n\tsysIP_BROADCAST = C.IP_BROADCAST\n\n\tsysMCAST_JOIN_GROUP         = C.MCAST_JOIN_GROUP\n\tsysMCAST_LEAVE_GROUP        = C.MCAST_LEAVE_GROUP\n\tsysMCAST_BLOCK_SOURCE       = C.MCAST_BLOCK_SOURCE\n\tsysMCAST_UNBLOCK_SOURCE     = C.MCAST_UNBLOCK_SOURCE\n\tsysMCAST_JOIN_SOURCE_GROUP  = C.MCAST_JOIN_SOURCE_GROUP\n\tsysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP\n\n\tsizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage\n\tsizeofSockaddrInet    = C.sizeof_struct_sockaddr_in\n\tsizeofInetPktinfo     = C.sizeof_struct_in_pktinfo\n\n\tsizeofIPMreq         = C.sizeof_struct_ip_mreq\n\tsizeofIPMreqSource   = C.sizeof_struct_ip_mreq_source\n\tsizeofGroupReq       = C.sizeof_struct_group_req\n\tsizeofGroupSourceReq = C.sizeof_struct_group_source_req\n)\n\ntype sockaddrStorage C.struct_sockaddr_storage\n\ntype sockaddrInet C.struct_sockaddr_in\n\ntype inetPktinfo C.struct_in_pktinfo\n\ntype ipMreq C.struct_ip_mreq\n\ntype ipMreqSource C.struct_ip_mreq_source\n\ntype groupReq C.struct_group_req\n\ntype groupSourceReq C.struct_group_source_req\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/dgramopt.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4\n\nimport (\n\t\"net\"\n\t\"syscall\"\n\n\t\"golang.org/x/net/bpf\"\n)\n\n// MulticastTTL returns the time-to-live field value for outgoing\n// multicast packets.\nfunc (c *dgramOpt) MulticastTTL() (int, error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoMulticastTTL]\n\tif !ok {\n\t\treturn 0, errOpNoSupport\n\t}\n\treturn so.GetInt(c.Conn)\n}\n\n// SetMulticastTTL sets the time-to-live field value for future\n// outgoing multicast packets.\nfunc (c *dgramOpt) SetMulticastTTL(ttl int) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoMulticastTTL]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\treturn so.SetInt(c.Conn, ttl)\n}\n\n// MulticastInterface returns the default interface for multicast\n// packet transmissions.\nfunc (c *dgramOpt) MulticastInterface() (*net.Interface, error) {\n\tif !c.ok() {\n\t\treturn nil, syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoMulticastInterface]\n\tif !ok {\n\t\treturn nil, errOpNoSupport\n\t}\n\treturn so.getMulticastInterface(c.Conn)\n}\n\n// SetMulticastInterface sets the default interface for future\n// multicast packet transmissions.\nfunc (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoMulticastInterface]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\treturn so.setMulticastInterface(c.Conn, ifi)\n}\n\n// MulticastLoopback reports whether transmitted multicast packets\n// should be copied and send back to the originator.\nfunc (c *dgramOpt) MulticastLoopback() (bool, error) {\n\tif !c.ok() {\n\t\treturn false, syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoMulticastLoopback]\n\tif !ok {\n\t\treturn false, errOpNoSupport\n\t}\n\ton, err := so.GetInt(c.Conn)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn on == 1, nil\n}\n\n// SetMulticastLoopback sets whether transmitted multicast packets\n// should be copied and send back to the originator.\nfunc (c *dgramOpt) SetMulticastLoopback(on bool) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoMulticastLoopback]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\treturn so.SetInt(c.Conn, boolint(on))\n}\n\n// JoinGroup joins the group address group on the interface ifi.\n// By default all sources that can cast data to group are accepted.\n// It's possible to mute and unmute data transmission from a specific\n// source by using ExcludeSourceSpecificGroup and\n// IncludeSourceSpecificGroup.\n// JoinGroup uses the system assigned multicast interface when ifi is\n// nil, although this is not recommended because the assignment\n// depends on platforms and sometimes it might require routing\n// configuration.\nfunc (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoJoinGroup]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\tgrp := netAddrToIP4(group)\n\tif grp == nil {\n\t\treturn errMissingAddress\n\t}\n\treturn so.setGroup(c.Conn, ifi, grp)\n}\n\n// LeaveGroup leaves the group address group on the interface ifi\n// regardless of whether the group is any-source group or\n// source-specific group.\nfunc (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoLeaveGroup]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\tgrp := netAddrToIP4(group)\n\tif grp == nil {\n\t\treturn errMissingAddress\n\t}\n\treturn so.setGroup(c.Conn, ifi, grp)\n}\n\n// JoinSourceSpecificGroup joins the source-specific group comprising\n// group and source on the interface ifi.\n// JoinSourceSpecificGroup uses the system assigned multicast\n// interface when ifi is nil, although this is not recommended because\n// the assignment depends on platforms and sometimes it might require\n// routing configuration.\nfunc (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoJoinSourceGroup]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\tgrp := netAddrToIP4(group)\n\tif grp == nil {\n\t\treturn errMissingAddress\n\t}\n\tsrc := netAddrToIP4(source)\n\tif src == nil {\n\t\treturn errMissingAddress\n\t}\n\treturn so.setSourceGroup(c.Conn, ifi, grp, src)\n}\n\n// LeaveSourceSpecificGroup leaves the source-specific group on the\n// interface ifi.\nfunc (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoLeaveSourceGroup]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\tgrp := netAddrToIP4(group)\n\tif grp == nil {\n\t\treturn errMissingAddress\n\t}\n\tsrc := netAddrToIP4(source)\n\tif src == nil {\n\t\treturn errMissingAddress\n\t}\n\treturn so.setSourceGroup(c.Conn, ifi, grp, src)\n}\n\n// ExcludeSourceSpecificGroup excludes the source-specific group from\n// the already joined any-source groups by JoinGroup on the interface\n// ifi.\nfunc (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoBlockSourceGroup]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\tgrp := netAddrToIP4(group)\n\tif grp == nil {\n\t\treturn errMissingAddress\n\t}\n\tsrc := netAddrToIP4(source)\n\tif src == nil {\n\t\treturn errMissingAddress\n\t}\n\treturn so.setSourceGroup(c.Conn, ifi, grp, src)\n}\n\n// IncludeSourceSpecificGroup includes the excluded source-specific\n// group by ExcludeSourceSpecificGroup again on the interface ifi.\nfunc (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoUnblockSourceGroup]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\tgrp := netAddrToIP4(group)\n\tif grp == nil {\n\t\treturn errMissingAddress\n\t}\n\tsrc := netAddrToIP4(source)\n\tif src == nil {\n\t\treturn errMissingAddress\n\t}\n\treturn so.setSourceGroup(c.Conn, ifi, grp, src)\n}\n\n// ICMPFilter returns an ICMP filter.\n// Currently only Linux supports this.\nfunc (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) {\n\tif !c.ok() {\n\t\treturn nil, syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoICMPFilter]\n\tif !ok {\n\t\treturn nil, errOpNoSupport\n\t}\n\treturn so.getICMPFilter(c.Conn)\n}\n\n// SetICMPFilter deploys the ICMP filter.\n// Currently only Linux supports this.\nfunc (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoICMPFilter]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\treturn so.setICMPFilter(c.Conn, f)\n}\n\n// SetBPF attaches a BPF program to the connection.\n//\n// Only supported on Linux.\nfunc (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoAttachFilter]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\treturn so.setBPF(c.Conn, filter)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/doc.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package ipv4 implements IP-level socket options for the Internet\n// Protocol version 4.\n//\n// The package provides IP-level socket options that allow\n// manipulation of IPv4 facilities.\n//\n// The IPv4 protocol and basic host requirements for IPv4 are defined\n// in RFC 791 and RFC 1122.\n// Host extensions for multicasting and socket interface extensions\n// for multicast source filters are defined in RFC 1112 and RFC 3678.\n// IGMPv1, IGMPv2 and IGMPv3 are defined in RFC 1112, RFC 2236 and RFC\n// 3376.\n// Source-specific multicast is defined in RFC 4607.\n//\n//\n// Unicasting\n//\n// The options for unicasting are available for net.TCPConn,\n// net.UDPConn and net.IPConn which are created as network connections\n// that use the IPv4 transport. When a single TCP connection carrying\n// a data flow of multiple packets needs to indicate the flow is\n// important, Conn is used to set the type-of-service field on the\n// IPv4 header for each packet.\n//\n//\tln, err := net.Listen(\"tcp4\", \"0.0.0.0:1024\")\n//\tif err != nil {\n//\t\t// error handling\n//\t}\n//\tdefer ln.Close()\n//\tfor {\n//\t\tc, err := ln.Accept()\n//\t\tif err != nil {\n//\t\t\t// error handling\n//\t\t}\n//\t\tgo func(c net.Conn) {\n//\t\t\tdefer c.Close()\n//\n// The outgoing packets will be labeled DiffServ assured forwarding\n// class 1 low drop precedence, known as AF11 packets.\n//\n//\t\t\tif err := ipv4.NewConn(c).SetTOS(0x28); err != nil {\n//\t\t\t\t// error handling\n//\t\t\t}\n//\t\t\tif _, err := c.Write(data); err != nil {\n//\t\t\t\t// error handling\n//\t\t\t}\n//\t\t}(c)\n//\t}\n//\n//\n// Multicasting\n//\n// The options for multicasting are available for net.UDPConn and\n// net.IPconn which are created as network connections that use the\n// IPv4 transport. A few network facilities must be prepared before\n// you begin multicasting, at a minimum joining network interfaces and\n// multicast groups.\n//\n//\ten0, err := net.InterfaceByName(\"en0\")\n//\tif err != nil {\n//\t\t// error handling\n//\t}\n//\ten1, err := net.InterfaceByIndex(911)\n//\tif err != nil {\n//\t\t// error handling\n//\t}\n//\tgroup := net.IPv4(224, 0, 0, 250)\n//\n// First, an application listens to an appropriate address with an\n// appropriate service port.\n//\n//\tc, err := net.ListenPacket(\"udp4\", \"0.0.0.0:1024\")\n//\tif err != nil {\n//\t\t// error handling\n//\t}\n//\tdefer c.Close()\n//\n// Second, the application joins multicast groups, starts listening to\n// the groups on the specified network interfaces. Note that the\n// service port for transport layer protocol does not matter with this\n// operation as joining groups affects only network and link layer\n// protocols, such as IPv4 and Ethernet.\n//\n//\tp := ipv4.NewPacketConn(c)\n//\tif err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil {\n//\t\t// error handling\n//\t}\n//\tif err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil {\n//\t\t// error handling\n//\t}\n//\n// The application might set per packet control message transmissions\n// between the protocol stack within the kernel. When the application\n// needs a destination address on an incoming packet,\n// SetControlMessage of PacketConn is used to enable control message\n// transmissions.\n//\n//\tif err := p.SetControlMessage(ipv4.FlagDst, true); err != nil {\n//\t\t// error handling\n//\t}\n//\n// The application could identify whether the received packets are\n// of interest by using the control message that contains the\n// destination address of the received packet.\n//\n//\tb := make([]byte, 1500)\n//\tfor {\n//\t\tn, cm, src, err := p.ReadFrom(b)\n//\t\tif err != nil {\n//\t\t\t// error handling\n//\t\t}\n//\t\tif cm.Dst.IsMulticast() {\n//\t\t\tif cm.Dst.Equal(group) {\n//\t\t\t\t// joined group, do something\n//\t\t\t} else {\n//\t\t\t\t// unknown group, discard\n//\t\t\t\tcontinue\n//\t\t\t}\n//\t\t}\n//\n// The application can also send both unicast and multicast packets.\n//\n//\t\tp.SetTOS(0x0)\n//\t\tp.SetTTL(16)\n//\t\tif _, err := p.WriteTo(data, nil, src); err != nil {\n//\t\t\t// error handling\n//\t\t}\n//\t\tdst := &net.UDPAddr{IP: group, Port: 1024}\n//\t\tfor _, ifi := range []*net.Interface{en0, en1} {\n//\t\t\tif err := p.SetMulticastInterface(ifi); err != nil {\n//\t\t\t\t// error handling\n//\t\t\t}\n//\t\t\tp.SetMulticastTTL(2)\n//\t\t\tif _, err := p.WriteTo(data, nil, dst); err != nil {\n//\t\t\t\t// error handling\n//\t\t\t}\n//\t\t}\n//\t}\n//\n//\n// More multicasting\n//\n// An application that uses PacketConn or RawConn may join multiple\n// multicast groups. For example, a UDP listener with port 1024 might\n// join two different groups across over two different network\n// interfaces by using:\n//\n//\tc, err := net.ListenPacket(\"udp4\", \"0.0.0.0:1024\")\n//\tif err != nil {\n//\t\t// error handling\n//\t}\n//\tdefer c.Close()\n//\tp := ipv4.NewPacketConn(c)\n//\tif err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil {\n//\t\t// error handling\n//\t}\n//\tif err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil {\n//\t\t// error handling\n//\t}\n//\tif err := p.JoinGroup(en1, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil {\n//\t\t// error handling\n//\t}\n//\n// It is possible for multiple UDP listeners that listen on the same\n// UDP port to join the same multicast group. The net package will\n// provide a socket that listens to a wildcard address with reusable\n// UDP port when an appropriate multicast address prefix is passed to\n// the net.ListenPacket or net.ListenUDP.\n//\n//\tc1, err := net.ListenPacket(\"udp4\", \"224.0.0.0:1024\")\n//\tif err != nil {\n//\t\t// error handling\n//\t}\n//\tdefer c1.Close()\n//\tc2, err := net.ListenPacket(\"udp4\", \"224.0.0.0:1024\")\n//\tif err != nil {\n//\t\t// error handling\n//\t}\n//\tdefer c2.Close()\n//\tp1 := ipv4.NewPacketConn(c1)\n//\tif err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil {\n//\t\t// error handling\n//\t}\n//\tp2 := ipv4.NewPacketConn(c2)\n//\tif err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil {\n//\t\t// error handling\n//\t}\n//\n// Also it is possible for the application to leave or rejoin a\n// multicast group on the network interface.\n//\n//\tif err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil {\n//\t\t// error handling\n//\t}\n//\tif err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}); err != nil {\n//\t\t// error handling\n//\t}\n//\n//\n// Source-specific multicasting\n//\n// An application that uses PacketConn or RawConn on IGMPv3 supported\n// platform is able to join source-specific multicast groups.\n// The application may use JoinSourceSpecificGroup and\n// LeaveSourceSpecificGroup for the operation known as \"include\" mode,\n//\n//\tssmgroup := net.UDPAddr{IP: net.IPv4(232, 7, 8, 9)}\n//\tssmsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)})\n//\tif err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil {\n//\t\t// error handling\n//\t}\n//\tif err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil {\n//\t\t// error handling\n//\t}\n//\n// or JoinGroup, ExcludeSourceSpecificGroup,\n// IncludeSourceSpecificGroup and LeaveGroup for the operation known\n// as \"exclude\" mode.\n//\n//\texclsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 254)}\n//\tif err := p.JoinGroup(en0, &ssmgroup); err != nil {\n//\t\t// error handling\n//\t}\n//\tif err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil {\n//\t\t// error handling\n//\t}\n//\tif err := p.LeaveGroup(en0, &ssmgroup); err != nil {\n//\t\t// error handling\n//\t}\n//\n// Note that it depends on each platform implementation what happens\n// when an application which runs on IGMPv3 unsupported platform uses\n// JoinSourceSpecificGroup and LeaveSourceSpecificGroup.\n// In general the platform tries to fall back to conversations using\n// IGMPv1 or IGMPv2 and starts to listen to multicast traffic.\n// In the fallback case, ExcludeSourceSpecificGroup and\n// IncludeSourceSpecificGroup may return an error.\npackage ipv4 // import \"golang.org/x/net/ipv4\"\n\n// BUG(mikio): This package is not implemented on NaCl and Plan 9.\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/endpoint.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4\n\nimport (\n\t\"net\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\n// BUG(mikio): On Windows, the JoinSourceSpecificGroup,\n// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and\n// IncludeSourceSpecificGroup methods of PacketConn and RawConn are\n// not implemented.\n\n// A Conn represents a network endpoint that uses the IPv4 transport.\n// It is used to control basic IP-level socket options such as TOS and\n// TTL.\ntype Conn struct {\n\tgenericOpt\n}\n\ntype genericOpt struct {\n\t*socket.Conn\n}\n\nfunc (c *genericOpt) ok() bool { return c != nil && c.Conn != nil }\n\n// NewConn returns a new Conn.\nfunc NewConn(c net.Conn) *Conn {\n\tcc, _ := socket.NewConn(c)\n\treturn &Conn{\n\t\tgenericOpt: genericOpt{Conn: cc},\n\t}\n}\n\n// A PacketConn represents a packet network endpoint that uses the\n// IPv4 transport. It is used to control several IP-level socket\n// options including multicasting. It also provides datagram based\n// network I/O methods specific to the IPv4 and higher layer protocols\n// such as UDP.\ntype PacketConn struct {\n\tgenericOpt\n\tdgramOpt\n\tpayloadHandler\n}\n\ntype dgramOpt struct {\n\t*socket.Conn\n}\n\nfunc (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil }\n\n// SetControlMessage sets the per packet IP-level socket options.\nfunc (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error {\n\tif !c.payloadHandler.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on)\n}\n\n// SetDeadline sets the read and write deadlines associated with the\n// endpoint.\nfunc (c *PacketConn) SetDeadline(t time.Time) error {\n\tif !c.payloadHandler.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn c.payloadHandler.PacketConn.SetDeadline(t)\n}\n\n// SetReadDeadline sets the read deadline associated with the\n// endpoint.\nfunc (c *PacketConn) SetReadDeadline(t time.Time) error {\n\tif !c.payloadHandler.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn c.payloadHandler.PacketConn.SetReadDeadline(t)\n}\n\n// SetWriteDeadline sets the write deadline associated with the\n// endpoint.\nfunc (c *PacketConn) SetWriteDeadline(t time.Time) error {\n\tif !c.payloadHandler.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn c.payloadHandler.PacketConn.SetWriteDeadline(t)\n}\n\n// Close closes the endpoint.\nfunc (c *PacketConn) Close() error {\n\tif !c.payloadHandler.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn c.payloadHandler.PacketConn.Close()\n}\n\n// NewPacketConn returns a new PacketConn using c as its underlying\n// transport.\nfunc NewPacketConn(c net.PacketConn) *PacketConn {\n\tcc, _ := socket.NewConn(c.(net.Conn))\n\tp := &PacketConn{\n\t\tgenericOpt:     genericOpt{Conn: cc},\n\t\tdgramOpt:       dgramOpt{Conn: cc},\n\t\tpayloadHandler: payloadHandler{PacketConn: c, Conn: cc},\n\t}\n\treturn p\n}\n\n// A RawConn represents a packet network endpoint that uses the IPv4\n// transport. It is used to control several IP-level socket options\n// including IPv4 header manipulation. It also provides datagram\n// based network I/O methods specific to the IPv4 and higher layer\n// protocols that handle IPv4 datagram directly such as OSPF, GRE.\ntype RawConn struct {\n\tgenericOpt\n\tdgramOpt\n\tpacketHandler\n}\n\n// SetControlMessage sets the per packet IP-level socket options.\nfunc (c *RawConn) SetControlMessage(cf ControlFlags, on bool) error {\n\tif !c.packetHandler.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn setControlMessage(c.dgramOpt.Conn, &c.packetHandler.rawOpt, cf, on)\n}\n\n// SetDeadline sets the read and write deadlines associated with the\n// endpoint.\nfunc (c *RawConn) SetDeadline(t time.Time) error {\n\tif !c.packetHandler.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn c.packetHandler.IPConn.SetDeadline(t)\n}\n\n// SetReadDeadline sets the read deadline associated with the\n// endpoint.\nfunc (c *RawConn) SetReadDeadline(t time.Time) error {\n\tif !c.packetHandler.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn c.packetHandler.IPConn.SetReadDeadline(t)\n}\n\n// SetWriteDeadline sets the write deadline associated with the\n// endpoint.\nfunc (c *RawConn) SetWriteDeadline(t time.Time) error {\n\tif !c.packetHandler.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn c.packetHandler.IPConn.SetWriteDeadline(t)\n}\n\n// Close closes the endpoint.\nfunc (c *RawConn) Close() error {\n\tif !c.packetHandler.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn c.packetHandler.IPConn.Close()\n}\n\n// NewRawConn returns a new RawConn using c as its underlying\n// transport.\nfunc NewRawConn(c net.PacketConn) (*RawConn, error) {\n\tcc, err := socket.NewConn(c.(net.Conn))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := &RawConn{\n\t\tgenericOpt:    genericOpt{Conn: cc},\n\t\tdgramOpt:      dgramOpt{Conn: cc},\n\t\tpacketHandler: packetHandler{IPConn: c.(*net.IPConn), Conn: cc},\n\t}\n\tso, ok := sockOpts[ssoHeaderPrepend]\n\tif !ok {\n\t\treturn nil, errOpNoSupport\n\t}\n\tif err := so.SetInt(r.dgramOpt.Conn, boolint(true)); err != nil {\n\t\treturn nil, err\n\t}\n\treturn r, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/example_test.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4_test\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"golang.org/x/net/icmp\"\n\t\"golang.org/x/net/ipv4\"\n)\n\nfunc ExampleConn_markingTCP() {\n\tln, err := net.Listen(\"tcp\", \"0.0.0.0:1024\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer ln.Close()\n\n\tfor {\n\t\tc, err := ln.Accept()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tgo func(c net.Conn) {\n\t\t\tdefer c.Close()\n\t\t\tif c.RemoteAddr().(*net.TCPAddr).IP.To4() != nil {\n\t\t\t\tp := ipv4.NewConn(c)\n\t\t\t\tif err := p.SetTOS(0x28); err != nil { // DSCP AF11\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif err := p.SetTTL(128); err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif _, err := c.Write([]byte(\"HELLO-R-U-THERE-ACK\")); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}(c)\n\t}\n}\n\nfunc ExamplePacketConn_servingOneShotMulticastDNS() {\n\tc, err := net.ListenPacket(\"udp4\", \"0.0.0.0:5353\") // mDNS over UDP\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer c.Close()\n\tp := ipv4.NewPacketConn(c)\n\n\ten0, err := net.InterfaceByName(\"en0\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tmDNSLinkLocal := net.UDPAddr{IP: net.IPv4(224, 0, 0, 251)}\n\tif err := p.JoinGroup(en0, &mDNSLinkLocal); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer p.LeaveGroup(en0, &mDNSLinkLocal)\n\tif err := p.SetControlMessage(ipv4.FlagDst, true); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tb := make([]byte, 1500)\n\tfor {\n\t\t_, cm, peer, err := p.ReadFrom(b)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif !cm.Dst.IsMulticast() || !cm.Dst.Equal(mDNSLinkLocal.IP) {\n\t\t\tcontinue\n\t\t}\n\t\tanswers := []byte(\"FAKE-MDNS-ANSWERS\") // fake mDNS answers, you need to implement this\n\t\tif _, err := p.WriteTo(answers, nil, peer); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc ExamplePacketConn_tracingIPPacketRoute() {\n\t// Tracing an IP packet route to www.google.com.\n\n\tconst host = \"www.google.com\"\n\tips, err := net.LookupIP(host)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar dst net.IPAddr\n\tfor _, ip := range ips {\n\t\tif ip.To4() != nil {\n\t\t\tdst.IP = ip\n\t\t\tfmt.Printf(\"using %v for tracing an IP packet route to %s\\n\", dst.IP, host)\n\t\t\tbreak\n\t\t}\n\t}\n\tif dst.IP == nil {\n\t\tlog.Fatal(\"no A record found\")\n\t}\n\n\tc, err := net.ListenPacket(\"ip4:1\", \"0.0.0.0\") // ICMP for IPv4\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer c.Close()\n\tp := ipv4.NewPacketConn(c)\n\n\tif err := p.SetControlMessage(ipv4.FlagTTL|ipv4.FlagSrc|ipv4.FlagDst|ipv4.FlagInterface, true); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\twm := icmp.Message{\n\t\tType: ipv4.ICMPTypeEcho, Code: 0,\n\t\tBody: &icmp.Echo{\n\t\t\tID:   os.Getpid() & 0xffff,\n\t\t\tData: []byte(\"HELLO-R-U-THERE\"),\n\t\t},\n\t}\n\n\trb := make([]byte, 1500)\n\tfor i := 1; i <= 64; i++ { // up to 64 hops\n\t\twm.Body.(*icmp.Echo).Seq = i\n\t\twb, err := wm.Marshal(nil)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif err := p.SetTTL(i); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\t// In the real world usually there are several\n\t\t// multiple traffic-engineered paths for each hop.\n\t\t// You may need to probe a few times to each hop.\n\t\tbegin := time.Now()\n\t\tif _, err := p.WriteTo(wb, nil, &dst); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif err := p.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tn, cm, peer, err := p.ReadFrom(rb)\n\t\tif err != nil {\n\t\t\tif err, ok := err.(net.Error); ok && err.Timeout() {\n\t\t\t\tfmt.Printf(\"%v\\t*\\n\", i)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\trm, err := icmp.ParseMessage(1, rb[:n])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\trtt := time.Since(begin)\n\n\t\t// In the real world you need to determine whether the\n\t\t// received message is yours using ControlMessage.Src,\n\t\t// ControlMessage.Dst, icmp.Echo.ID and icmp.Echo.Seq.\n\t\tswitch rm.Type {\n\t\tcase ipv4.ICMPTypeTimeExceeded:\n\t\t\tnames, _ := net.LookupAddr(peer.String())\n\t\t\tfmt.Printf(\"%d\\t%v %+v %v\\n\\t%+v\\n\", i, peer, names, rtt, cm)\n\t\tcase ipv4.ICMPTypeEchoReply:\n\t\t\tnames, _ := net.LookupAddr(peer.String())\n\t\t\tfmt.Printf(\"%d\\t%v %+v %v\\n\\t%+v\\n\", i, peer, names, rtt, cm)\n\t\t\treturn\n\t\tdefault:\n\t\t\tlog.Printf(\"unknown ICMP message: %+v\\n\", rm)\n\t\t}\n\t}\n}\n\nfunc ExampleRawConn_advertisingOSPFHello() {\n\tc, err := net.ListenPacket(\"ip4:89\", \"0.0.0.0\") // OSPF for IPv4\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer c.Close()\n\tr, err := ipv4.NewRawConn(c)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ten0, err := net.InterfaceByName(\"en0\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tallSPFRouters := net.IPAddr{IP: net.IPv4(224, 0, 0, 5)}\n\tif err := r.JoinGroup(en0, &allSPFRouters); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer r.LeaveGroup(en0, &allSPFRouters)\n\n\thello := make([]byte, 24) // fake hello data, you need to implement this\n\tospf := make([]byte, 24)  // fake ospf header, you need to implement this\n\tospf[0] = 2               // version 2\n\tospf[1] = 1               // hello packet\n\tospf = append(ospf, hello...)\n\tiph := &ipv4.Header{\n\t\tVersion:  ipv4.Version,\n\t\tLen:      ipv4.HeaderLen,\n\t\tTOS:      0xc0, // DSCP CS6\n\t\tTotalLen: ipv4.HeaderLen + len(ospf),\n\t\tTTL:      1,\n\t\tProtocol: 89,\n\t\tDst:      allSPFRouters.IP.To4(),\n\t}\n\n\tvar cm *ipv4.ControlMessage\n\tswitch runtime.GOOS {\n\tcase \"darwin\", \"linux\":\n\t\tcm = &ipv4.ControlMessage{IfIndex: en0.Index}\n\tdefault:\n\t\tif err := r.SetMulticastInterface(en0); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\tif err := r.WriteTo(iph, ospf, cm); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/gen.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n//go:generate go run gen.go\n\n// This program generates system adaptation constants and types,\n// internet protocol constants and tables by reading template files\n// and IANA protocol registries.\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding/xml\"\n\t\"fmt\"\n\t\"go/format\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/exec\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc main() {\n\tif err := genzsys(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\tif err := geniana(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc genzsys() error {\n\tdefs := \"defs_\" + runtime.GOOS + \".go\"\n\tf, err := os.Open(defs)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tf.Close()\n\tcmd := exec.Command(\"go\", \"tool\", \"cgo\", \"-godefs\", defs)\n\tb, err := cmd.Output()\n\tif err != nil {\n\t\treturn err\n\t}\n\tb, err = format.Source(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\tzsys := \"zsys_\" + runtime.GOOS + \".go\"\n\tswitch runtime.GOOS {\n\tcase \"freebsd\", \"linux\":\n\t\tzsys = \"zsys_\" + runtime.GOOS + \"_\" + runtime.GOARCH + \".go\"\n\t}\n\tif err := ioutil.WriteFile(zsys, b, 0644); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nvar registries = []struct {\n\turl   string\n\tparse func(io.Writer, io.Reader) error\n}{\n\t{\n\t\t\"http://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml\",\n\t\tparseICMPv4Parameters,\n\t},\n}\n\nfunc geniana() error {\n\tvar bb bytes.Buffer\n\tfmt.Fprintf(&bb, \"// go generate gen.go\\n\")\n\tfmt.Fprintf(&bb, \"// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\\n\\n\")\n\tfmt.Fprintf(&bb, \"package ipv4\\n\\n\")\n\tfor _, r := range registries {\n\t\tresp, err := http.Get(r.url)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\treturn fmt.Errorf(\"got HTTP status code %v for %v\\n\", resp.StatusCode, r.url)\n\t\t}\n\t\tif err := r.parse(&bb, resp.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintf(&bb, \"\\n\")\n\t}\n\tb, err := format.Source(bb.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := ioutil.WriteFile(\"iana.go\", b, 0644); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc parseICMPv4Parameters(w io.Writer, r io.Reader) error {\n\tdec := xml.NewDecoder(r)\n\tvar icp icmpv4Parameters\n\tif err := dec.Decode(&icp); err != nil {\n\t\treturn err\n\t}\n\tprs := icp.escape()\n\tfmt.Fprintf(w, \"// %s, Updated: %s\\n\", icp.Title, icp.Updated)\n\tfmt.Fprintf(w, \"const (\\n\")\n\tfor _, pr := range prs {\n\t\tif pr.Descr == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(w, \"ICMPType%s ICMPType = %d\", pr.Descr, pr.Value)\n\t\tfmt.Fprintf(w, \"// %s\\n\", pr.OrigDescr)\n\t}\n\tfmt.Fprintf(w, \")\\n\\n\")\n\tfmt.Fprintf(w, \"// %s, Updated: %s\\n\", icp.Title, icp.Updated)\n\tfmt.Fprintf(w, \"var icmpTypes = map[ICMPType]string{\\n\")\n\tfor _, pr := range prs {\n\t\tif pr.Descr == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(w, \"%d: %q,\\n\", pr.Value, strings.ToLower(pr.OrigDescr))\n\t}\n\tfmt.Fprintf(w, \"}\\n\")\n\treturn nil\n}\n\ntype icmpv4Parameters struct {\n\tXMLName    xml.Name `xml:\"registry\"`\n\tTitle      string   `xml:\"title\"`\n\tUpdated    string   `xml:\"updated\"`\n\tRegistries []struct {\n\t\tTitle   string `xml:\"title\"`\n\t\tRecords []struct {\n\t\t\tValue string `xml:\"value\"`\n\t\t\tDescr string `xml:\"description\"`\n\t\t} `xml:\"record\"`\n\t} `xml:\"registry\"`\n}\n\ntype canonICMPv4ParamRecord struct {\n\tOrigDescr string\n\tDescr     string\n\tValue     int\n}\n\nfunc (icp *icmpv4Parameters) escape() []canonICMPv4ParamRecord {\n\tid := -1\n\tfor i, r := range icp.Registries {\n\t\tif strings.Contains(r.Title, \"Type\") || strings.Contains(r.Title, \"type\") {\n\t\t\tid = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif id < 0 {\n\t\treturn nil\n\t}\n\tprs := make([]canonICMPv4ParamRecord, len(icp.Registries[id].Records))\n\tsr := strings.NewReplacer(\n\t\t\"Messages\", \"\",\n\t\t\"Message\", \"\",\n\t\t\"ICMP\", \"\",\n\t\t\"+\", \"P\",\n\t\t\"-\", \"\",\n\t\t\"/\", \"\",\n\t\t\".\", \"\",\n\t\t\" \", \"\",\n\t)\n\tfor i, pr := range icp.Registries[id].Records {\n\t\tif strings.Contains(pr.Descr, \"Reserved\") ||\n\t\t\tstrings.Contains(pr.Descr, \"Unassigned\") ||\n\t\t\tstrings.Contains(pr.Descr, \"Deprecated\") ||\n\t\t\tstrings.Contains(pr.Descr, \"Experiment\") ||\n\t\t\tstrings.Contains(pr.Descr, \"experiment\") {\n\t\t\tcontinue\n\t\t}\n\t\tss := strings.Split(pr.Descr, \"\\n\")\n\t\tif len(ss) > 1 {\n\t\t\tprs[i].Descr = strings.Join(ss, \" \")\n\t\t} else {\n\t\t\tprs[i].Descr = ss[0]\n\t\t}\n\t\ts := strings.TrimSpace(prs[i].Descr)\n\t\tprs[i].OrigDescr = s\n\t\tprs[i].Descr = sr.Replace(s)\n\t\tprs[i].Value, _ = strconv.Atoi(pr.Value)\n\t}\n\treturn prs\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/genericopt.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4\n\nimport \"syscall\"\n\n// TOS returns the type-of-service field value for outgoing packets.\nfunc (c *genericOpt) TOS() (int, error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoTOS]\n\tif !ok {\n\t\treturn 0, errOpNoSupport\n\t}\n\treturn so.GetInt(c.Conn)\n}\n\n// SetTOS sets the type-of-service field value for future outgoing\n// packets.\nfunc (c *genericOpt) SetTOS(tos int) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoTOS]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\treturn so.SetInt(c.Conn, tos)\n}\n\n// TTL returns the time-to-live field value for outgoing packets.\nfunc (c *genericOpt) TTL() (int, error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoTTL]\n\tif !ok {\n\t\treturn 0, errOpNoSupport\n\t}\n\treturn so.GetInt(c.Conn)\n}\n\n// SetTTL sets the time-to-live field value for future outgoing\n// packets.\nfunc (c *genericOpt) SetTTL(ttl int) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoTTL]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\treturn so.SetInt(c.Conn, ttl)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/header.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4\n\nimport (\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"net\"\n\t\"runtime\"\n\t\"syscall\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\nconst (\n\tVersion      = 4  // protocol version\n\tHeaderLen    = 20 // header length without extension headers\n\tmaxHeaderLen = 60 // sensible default, revisit if later RFCs define new usage of version and header length fields\n)\n\ntype HeaderFlags int\n\nconst (\n\tMoreFragments HeaderFlags = 1 << iota // more fragments flag\n\tDontFragment                          // don't fragment flag\n)\n\n// A Header represents an IPv4 header.\ntype Header struct {\n\tVersion  int         // protocol version\n\tLen      int         // header length\n\tTOS      int         // type-of-service\n\tTotalLen int         // packet total length\n\tID       int         // identification\n\tFlags    HeaderFlags // flags\n\tFragOff  int         // fragment offset\n\tTTL      int         // time-to-live\n\tProtocol int         // next protocol\n\tChecksum int         // checksum\n\tSrc      net.IP      // source address\n\tDst      net.IP      // destination address\n\tOptions  []byte      // options, extension headers\n}\n\nfunc (h *Header) String() string {\n\tif h == nil {\n\t\treturn \"<nil>\"\n\t}\n\treturn fmt.Sprintf(\"ver=%d hdrlen=%d tos=%#x totallen=%d id=%#x flags=%#x fragoff=%#x ttl=%d proto=%d cksum=%#x src=%v dst=%v\", h.Version, h.Len, h.TOS, h.TotalLen, h.ID, h.Flags, h.FragOff, h.TTL, h.Protocol, h.Checksum, h.Src, h.Dst)\n}\n\n// Marshal returns the binary encoding of h.\nfunc (h *Header) Marshal() ([]byte, error) {\n\tif h == nil {\n\t\treturn nil, syscall.EINVAL\n\t}\n\tif h.Len < HeaderLen {\n\t\treturn nil, errHeaderTooShort\n\t}\n\thdrlen := HeaderLen + len(h.Options)\n\tb := make([]byte, hdrlen)\n\tb[0] = byte(Version<<4 | (hdrlen >> 2 & 0x0f))\n\tb[1] = byte(h.TOS)\n\tflagsAndFragOff := (h.FragOff & 0x1fff) | int(h.Flags<<13)\n\tswitch runtime.GOOS {\n\tcase \"darwin\", \"dragonfly\", \"netbsd\":\n\t\tsocket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen))\n\t\tsocket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff))\n\tcase \"freebsd\":\n\t\tif freebsdVersion < 1100000 {\n\t\t\tsocket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen))\n\t\t\tsocket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff))\n\t\t} else {\n\t\t\tbinary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen))\n\t\t\tbinary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff))\n\t\t}\n\tdefault:\n\t\tbinary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen))\n\t\tbinary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff))\n\t}\n\tbinary.BigEndian.PutUint16(b[4:6], uint16(h.ID))\n\tb[8] = byte(h.TTL)\n\tb[9] = byte(h.Protocol)\n\tbinary.BigEndian.PutUint16(b[10:12], uint16(h.Checksum))\n\tif ip := h.Src.To4(); ip != nil {\n\t\tcopy(b[12:16], ip[:net.IPv4len])\n\t}\n\tif ip := h.Dst.To4(); ip != nil {\n\t\tcopy(b[16:20], ip[:net.IPv4len])\n\t} else {\n\t\treturn nil, errMissingAddress\n\t}\n\tif len(h.Options) > 0 {\n\t\tcopy(b[HeaderLen:], h.Options)\n\t}\n\treturn b, nil\n}\n\n// Parse parses b as an IPv4 header and sotres the result in h.\nfunc (h *Header) Parse(b []byte) error {\n\tif h == nil || len(b) < HeaderLen {\n\t\treturn errHeaderTooShort\n\t}\n\thdrlen := int(b[0]&0x0f) << 2\n\tif hdrlen > len(b) {\n\t\treturn errBufferTooShort\n\t}\n\th.Version = int(b[0] >> 4)\n\th.Len = hdrlen\n\th.TOS = int(b[1])\n\th.ID = int(binary.BigEndian.Uint16(b[4:6]))\n\th.TTL = int(b[8])\n\th.Protocol = int(b[9])\n\th.Checksum = int(binary.BigEndian.Uint16(b[10:12]))\n\th.Src = net.IPv4(b[12], b[13], b[14], b[15])\n\th.Dst = net.IPv4(b[16], b[17], b[18], b[19])\n\tswitch runtime.GOOS {\n\tcase \"darwin\", \"dragonfly\", \"netbsd\":\n\t\th.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + hdrlen\n\t\th.FragOff = int(socket.NativeEndian.Uint16(b[6:8]))\n\tcase \"freebsd\":\n\t\tif freebsdVersion < 1100000 {\n\t\t\th.TotalLen = int(socket.NativeEndian.Uint16(b[2:4]))\n\t\t\tif freebsdVersion < 1000000 {\n\t\t\t\th.TotalLen += hdrlen\n\t\t\t}\n\t\t\th.FragOff = int(socket.NativeEndian.Uint16(b[6:8]))\n\t\t} else {\n\t\t\th.TotalLen = int(binary.BigEndian.Uint16(b[2:4]))\n\t\t\th.FragOff = int(binary.BigEndian.Uint16(b[6:8]))\n\t\t}\n\tdefault:\n\t\th.TotalLen = int(binary.BigEndian.Uint16(b[2:4]))\n\t\th.FragOff = int(binary.BigEndian.Uint16(b[6:8]))\n\t}\n\th.Flags = HeaderFlags(h.FragOff&0xe000) >> 13\n\th.FragOff = h.FragOff & 0x1fff\n\toptlen := hdrlen - HeaderLen\n\tif optlen > 0 && len(b) >= hdrlen {\n\t\tif cap(h.Options) < optlen {\n\t\t\th.Options = make([]byte, optlen)\n\t\t} else {\n\t\t\th.Options = h.Options[:optlen]\n\t\t}\n\t\tcopy(h.Options, b[HeaderLen:hdrlen])\n\t}\n\treturn nil\n}\n\n// ParseHeader parses b as an IPv4 header.\nfunc ParseHeader(b []byte) (*Header, error) {\n\th := new(Header)\n\tif err := h.Parse(b); err != nil {\n\t\treturn nil, err\n\t}\n\treturn h, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/header_test.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"net\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\ntype headerTest struct {\n\twireHeaderFromKernel          []byte\n\twireHeaderToKernel            []byte\n\twireHeaderFromTradBSDKernel   []byte\n\twireHeaderToTradBSDKernel     []byte\n\twireHeaderFromFreeBSD10Kernel []byte\n\twireHeaderToFreeBSD10Kernel   []byte\n\t*Header\n}\n\nvar headerLittleEndianTests = []headerTest{\n\t// TODO(mikio): Add platform dependent wire header formats when\n\t// we support new platforms.\n\t{\n\t\twireHeaderFromKernel: []byte{\n\t\t\t0x45, 0x01, 0xbe, 0xef,\n\t\t\t0xca, 0xfe, 0x45, 0xdc,\n\t\t\t0xff, 0x01, 0xde, 0xad,\n\t\t\t172, 16, 254, 254,\n\t\t\t192, 168, 0, 1,\n\t\t},\n\t\twireHeaderToKernel: []byte{\n\t\t\t0x45, 0x01, 0xbe, 0xef,\n\t\t\t0xca, 0xfe, 0x45, 0xdc,\n\t\t\t0xff, 0x01, 0xde, 0xad,\n\t\t\t172, 16, 254, 254,\n\t\t\t192, 168, 0, 1,\n\t\t},\n\t\twireHeaderFromTradBSDKernel: []byte{\n\t\t\t0x45, 0x01, 0xdb, 0xbe,\n\t\t\t0xca, 0xfe, 0xdc, 0x45,\n\t\t\t0xff, 0x01, 0xde, 0xad,\n\t\t\t172, 16, 254, 254,\n\t\t\t192, 168, 0, 1,\n\t\t},\n\t\twireHeaderToTradBSDKernel: []byte{\n\t\t\t0x45, 0x01, 0xef, 0xbe,\n\t\t\t0xca, 0xfe, 0xdc, 0x45,\n\t\t\t0xff, 0x01, 0xde, 0xad,\n\t\t\t172, 16, 254, 254,\n\t\t\t192, 168, 0, 1,\n\t\t},\n\t\twireHeaderFromFreeBSD10Kernel: []byte{\n\t\t\t0x45, 0x01, 0xef, 0xbe,\n\t\t\t0xca, 0xfe, 0xdc, 0x45,\n\t\t\t0xff, 0x01, 0xde, 0xad,\n\t\t\t172, 16, 254, 254,\n\t\t\t192, 168, 0, 1,\n\t\t},\n\t\twireHeaderToFreeBSD10Kernel: []byte{\n\t\t\t0x45, 0x01, 0xef, 0xbe,\n\t\t\t0xca, 0xfe, 0xdc, 0x45,\n\t\t\t0xff, 0x01, 0xde, 0xad,\n\t\t\t172, 16, 254, 254,\n\t\t\t192, 168, 0, 1,\n\t\t},\n\t\tHeader: &Header{\n\t\t\tVersion:  Version,\n\t\t\tLen:      HeaderLen,\n\t\t\tTOS:      1,\n\t\t\tTotalLen: 0xbeef,\n\t\t\tID:       0xcafe,\n\t\t\tFlags:    DontFragment,\n\t\t\tFragOff:  1500,\n\t\t\tTTL:      255,\n\t\t\tProtocol: 1,\n\t\t\tChecksum: 0xdead,\n\t\t\tSrc:      net.IPv4(172, 16, 254, 254),\n\t\t\tDst:      net.IPv4(192, 168, 0, 1),\n\t\t},\n\t},\n\n\t// with option headers\n\t{\n\t\twireHeaderFromKernel: []byte{\n\t\t\t0x46, 0x01, 0xbe, 0xf3,\n\t\t\t0xca, 0xfe, 0x45, 0xdc,\n\t\t\t0xff, 0x01, 0xde, 0xad,\n\t\t\t172, 16, 254, 254,\n\t\t\t192, 168, 0, 1,\n\t\t\t0xff, 0xfe, 0xfe, 0xff,\n\t\t},\n\t\twireHeaderToKernel: []byte{\n\t\t\t0x46, 0x01, 0xbe, 0xf3,\n\t\t\t0xca, 0xfe, 0x45, 0xdc,\n\t\t\t0xff, 0x01, 0xde, 0xad,\n\t\t\t172, 16, 254, 254,\n\t\t\t192, 168, 0, 1,\n\t\t\t0xff, 0xfe, 0xfe, 0xff,\n\t\t},\n\t\twireHeaderFromTradBSDKernel: []byte{\n\t\t\t0x46, 0x01, 0xdb, 0xbe,\n\t\t\t0xca, 0xfe, 0xdc, 0x45,\n\t\t\t0xff, 0x01, 0xde, 0xad,\n\t\t\t172, 16, 254, 254,\n\t\t\t192, 168, 0, 1,\n\t\t\t0xff, 0xfe, 0xfe, 0xff,\n\t\t},\n\t\twireHeaderToTradBSDKernel: []byte{\n\t\t\t0x46, 0x01, 0xf3, 0xbe,\n\t\t\t0xca, 0xfe, 0xdc, 0x45,\n\t\t\t0xff, 0x01, 0xde, 0xad,\n\t\t\t172, 16, 254, 254,\n\t\t\t192, 168, 0, 1,\n\t\t\t0xff, 0xfe, 0xfe, 0xff,\n\t\t},\n\t\twireHeaderFromFreeBSD10Kernel: []byte{\n\t\t\t0x46, 0x01, 0xf3, 0xbe,\n\t\t\t0xca, 0xfe, 0xdc, 0x45,\n\t\t\t0xff, 0x01, 0xde, 0xad,\n\t\t\t172, 16, 254, 254,\n\t\t\t192, 168, 0, 1,\n\t\t\t0xff, 0xfe, 0xfe, 0xff,\n\t\t},\n\t\twireHeaderToFreeBSD10Kernel: []byte{\n\t\t\t0x46, 0x01, 0xf3, 0xbe,\n\t\t\t0xca, 0xfe, 0xdc, 0x45,\n\t\t\t0xff, 0x01, 0xde, 0xad,\n\t\t\t172, 16, 254, 254,\n\t\t\t192, 168, 0, 1,\n\t\t\t0xff, 0xfe, 0xfe, 0xff,\n\t\t},\n\t\tHeader: &Header{\n\t\t\tVersion:  Version,\n\t\t\tLen:      HeaderLen + 4,\n\t\t\tTOS:      1,\n\t\t\tTotalLen: 0xbef3,\n\t\t\tID:       0xcafe,\n\t\t\tFlags:    DontFragment,\n\t\t\tFragOff:  1500,\n\t\t\tTTL:      255,\n\t\t\tProtocol: 1,\n\t\t\tChecksum: 0xdead,\n\t\t\tSrc:      net.IPv4(172, 16, 254, 254),\n\t\t\tDst:      net.IPv4(192, 168, 0, 1),\n\t\t\tOptions:  []byte{0xff, 0xfe, 0xfe, 0xff},\n\t\t},\n\t},\n}\n\nfunc TestMarshalHeader(t *testing.T) {\n\tif socket.NativeEndian != binary.LittleEndian {\n\t\tt.Skip(\"no test for non-little endian machine yet\")\n\t}\n\n\tfor _, tt := range headerLittleEndianTests {\n\t\tb, err := tt.Header.Marshal()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tvar wh []byte\n\t\tswitch runtime.GOOS {\n\t\tcase \"darwin\", \"dragonfly\", \"netbsd\":\n\t\t\twh = tt.wireHeaderToTradBSDKernel\n\t\tcase \"freebsd\":\n\t\t\tswitch {\n\t\t\tcase freebsdVersion < 1000000:\n\t\t\t\twh = tt.wireHeaderToTradBSDKernel\n\t\t\tcase 1000000 <= freebsdVersion && freebsdVersion < 1100000:\n\t\t\t\twh = tt.wireHeaderToFreeBSD10Kernel\n\t\t\tdefault:\n\t\t\t\twh = tt.wireHeaderToKernel\n\t\t\t}\n\t\tdefault:\n\t\t\twh = tt.wireHeaderToKernel\n\t\t}\n\t\tif !bytes.Equal(b, wh) {\n\t\t\tt.Fatalf(\"got %#v; want %#v\", b, wh)\n\t\t}\n\t}\n}\n\nfunc TestParseHeader(t *testing.T) {\n\tif socket.NativeEndian != binary.LittleEndian {\n\t\tt.Skip(\"no test for big endian machine yet\")\n\t}\n\n\tfor _, tt := range headerLittleEndianTests {\n\t\tvar wh []byte\n\t\tswitch runtime.GOOS {\n\t\tcase \"darwin\", \"dragonfly\", \"netbsd\":\n\t\t\twh = tt.wireHeaderFromTradBSDKernel\n\t\tcase \"freebsd\":\n\t\t\tswitch {\n\t\t\tcase freebsdVersion < 1000000:\n\t\t\t\twh = tt.wireHeaderFromTradBSDKernel\n\t\t\tcase 1000000 <= freebsdVersion && freebsdVersion < 1100000:\n\t\t\t\twh = tt.wireHeaderFromFreeBSD10Kernel\n\t\t\tdefault:\n\t\t\t\twh = tt.wireHeaderFromKernel\n\t\t\t}\n\t\tdefault:\n\t\t\twh = tt.wireHeaderFromKernel\n\t\t}\n\t\th, err := ParseHeader(wh)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := h.Parse(wh); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !reflect.DeepEqual(h, tt.Header) {\n\t\t\tt.Fatalf(\"got %#v; want %#v\", h, tt.Header)\n\t\t}\n\t\ts := h.String()\n\t\tif strings.Contains(s, \",\") {\n\t\t\tt.Fatalf(\"should be space-separated values: %s\", s)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/helper.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4\n\nimport (\n\t\"errors\"\n\t\"net\"\n)\n\nvar (\n\terrMissingAddress           = errors.New(\"missing address\")\n\terrMissingHeader            = errors.New(\"missing header\")\n\terrHeaderTooShort           = errors.New(\"header too short\")\n\terrBufferTooShort           = errors.New(\"buffer too short\")\n\terrInvalidConnType          = errors.New(\"invalid conn type\")\n\terrOpNoSupport              = errors.New(\"operation not supported\")\n\terrNoSuchInterface          = errors.New(\"no such interface\")\n\terrNoSuchMulticastInterface = errors.New(\"no such multicast interface\")\n\n\t// See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html.\n\tfreebsdVersion uint32\n)\n\nfunc boolint(b bool) int {\n\tif b {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc netAddrToIP4(a net.Addr) net.IP {\n\tswitch v := a.(type) {\n\tcase *net.UDPAddr:\n\t\tif ip := v.IP.To4(); ip != nil {\n\t\t\treturn ip\n\t\t}\n\tcase *net.IPAddr:\n\t\tif ip := v.IP.To4(); ip != nil {\n\t\t\treturn ip\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc opAddr(a net.Addr) net.Addr {\n\tswitch a.(type) {\n\tcase *net.TCPAddr:\n\t\tif a == nil {\n\t\t\treturn nil\n\t\t}\n\tcase *net.UDPAddr:\n\t\tif a == nil {\n\t\t\treturn nil\n\t\t}\n\tcase *net.IPAddr:\n\t\tif a == nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn a\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/iana.go",
    "content": "// go generate gen.go\n// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\npackage ipv4\n\n// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19\nconst (\n\tICMPTypeEchoReply              ICMPType = 0  // Echo Reply\n\tICMPTypeDestinationUnreachable ICMPType = 3  // Destination Unreachable\n\tICMPTypeRedirect               ICMPType = 5  // Redirect\n\tICMPTypeEcho                   ICMPType = 8  // Echo\n\tICMPTypeRouterAdvertisement    ICMPType = 9  // Router Advertisement\n\tICMPTypeRouterSolicitation     ICMPType = 10 // Router Solicitation\n\tICMPTypeTimeExceeded           ICMPType = 11 // Time Exceeded\n\tICMPTypeParameterProblem       ICMPType = 12 // Parameter Problem\n\tICMPTypeTimestamp              ICMPType = 13 // Timestamp\n\tICMPTypeTimestampReply         ICMPType = 14 // Timestamp Reply\n\tICMPTypePhoturis               ICMPType = 40 // Photuris\n)\n\n// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19\nvar icmpTypes = map[ICMPType]string{\n\t0:  \"echo reply\",\n\t3:  \"destination unreachable\",\n\t5:  \"redirect\",\n\t8:  \"echo\",\n\t9:  \"router advertisement\",\n\t10: \"router solicitation\",\n\t11: \"time exceeded\",\n\t12: \"parameter problem\",\n\t13: \"timestamp\",\n\t14: \"timestamp reply\",\n\t40: \"photuris\",\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/icmp.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4\n\nimport \"golang.org/x/net/internal/iana\"\n\n// An ICMPType represents a type of ICMP message.\ntype ICMPType int\n\nfunc (typ ICMPType) String() string {\n\ts, ok := icmpTypes[typ]\n\tif !ok {\n\t\treturn \"<nil>\"\n\t}\n\treturn s\n}\n\n// Protocol returns the ICMPv4 protocol number.\nfunc (typ ICMPType) Protocol() int {\n\treturn iana.ProtocolICMP\n}\n\n// An ICMPFilter represents an ICMP message filter for incoming\n// packets. The filter belongs to a packet delivery path on a host and\n// it cannot interact with forwarding packets or tunnel-outer packets.\n//\n// Note: RFC 8200 defines a reasonable role model and it works not\n// only for IPv6 but IPv4. A node means a device that implements IP.\n// A router means a node that forwards IP packets not explicitly\n// addressed to itself, and a host means a node that is not a router.\ntype ICMPFilter struct {\n\ticmpFilter\n}\n\n// Accept accepts incoming ICMP packets including the type field value\n// typ.\nfunc (f *ICMPFilter) Accept(typ ICMPType) {\n\tf.accept(typ)\n}\n\n// Block blocks incoming ICMP packets including the type field value\n// typ.\nfunc (f *ICMPFilter) Block(typ ICMPType) {\n\tf.block(typ)\n}\n\n// SetAll sets the filter action to the filter.\nfunc (f *ICMPFilter) SetAll(block bool) {\n\tf.setAll(block)\n}\n\n// WillBlock reports whether the ICMP type will be blocked.\nfunc (f *ICMPFilter) WillBlock(typ ICMPType) bool {\n\treturn f.willBlock(typ)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/icmp_linux.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4\n\nfunc (f *icmpFilter) accept(typ ICMPType) {\n\tf.Data &^= 1 << (uint32(typ) & 31)\n}\n\nfunc (f *icmpFilter) block(typ ICMPType) {\n\tf.Data |= 1 << (uint32(typ) & 31)\n}\n\nfunc (f *icmpFilter) setAll(block bool) {\n\tif block {\n\t\tf.Data = 1<<32 - 1\n\t} else {\n\t\tf.Data = 0\n\t}\n}\n\nfunc (f *icmpFilter) willBlock(typ ICMPType) bool {\n\treturn f.Data&(1<<(uint32(typ)&31)) != 0\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/icmp_stub.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !linux\n\npackage ipv4\n\nconst sizeofICMPFilter = 0x0\n\ntype icmpFilter struct {\n}\n\nfunc (f *icmpFilter) accept(typ ICMPType) {\n}\n\nfunc (f *icmpFilter) block(typ ICMPType) {\n}\n\nfunc (f *icmpFilter) setAll(block bool) {\n}\n\nfunc (f *icmpFilter) willBlock(typ ICMPType) bool {\n\treturn false\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/icmp_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4_test\n\nimport (\n\t\"net\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv4\"\n)\n\nvar icmpStringTests = []struct {\n\tin  ipv4.ICMPType\n\tout string\n}{\n\t{ipv4.ICMPTypeDestinationUnreachable, \"destination unreachable\"},\n\n\t{256, \"<nil>\"},\n}\n\nfunc TestICMPString(t *testing.T) {\n\tfor _, tt := range icmpStringTests {\n\t\ts := tt.in.String()\n\t\tif s != tt.out {\n\t\t\tt.Errorf(\"got %s; want %s\", s, tt.out)\n\t\t}\n\t}\n}\n\nfunc TestICMPFilter(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\tdefault:\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\n\tvar f ipv4.ICMPFilter\n\tfor _, toggle := range []bool{false, true} {\n\t\tf.SetAll(toggle)\n\t\tfor _, typ := range []ipv4.ICMPType{\n\t\t\tipv4.ICMPTypeDestinationUnreachable,\n\t\t\tipv4.ICMPTypeEchoReply,\n\t\t\tipv4.ICMPTypeTimeExceeded,\n\t\t\tipv4.ICMPTypeParameterProblem,\n\t\t} {\n\t\t\tf.Accept(typ)\n\t\t\tif f.WillBlock(typ) {\n\t\t\t\tt.Errorf(\"ipv4.ICMPFilter.Set(%v, false) failed\", typ)\n\t\t\t}\n\t\t\tf.Block(typ)\n\t\t\tif !f.WillBlock(typ) {\n\t\t\t\tt.Errorf(\"ipv4.ICMPFilter.Set(%v, true) failed\", typ)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestSetICMPFilter(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\tdefault:\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif m, ok := nettest.SupportsRawIPSocket(); !ok {\n\t\tt.Skip(m)\n\t}\n\n\tc, err := net.ListenPacket(\"ip4:icmp\", \"127.0.0.1\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\tp := ipv4.NewPacketConn(c)\n\n\tvar f ipv4.ICMPFilter\n\tf.SetAll(true)\n\tf.Accept(ipv4.ICMPTypeEcho)\n\tf.Accept(ipv4.ICMPTypeEchoReply)\n\tif err := p.SetICMPFilter(&f); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tkf, err := p.ICMPFilter()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !reflect.DeepEqual(kf, &f) {\n\t\tt.Fatalf(\"got %#v; want %#v\", kf, f)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/multicast_test.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4_test\n\nimport (\n\t\"bytes\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org/x/net/icmp\"\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv4\"\n)\n\nvar packetConnReadWriteMulticastUDPTests = []struct {\n\taddr     string\n\tgrp, src *net.UDPAddr\n}{\n\t{\"224.0.0.0:0\", &net.UDPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727\n\n\t{\"232.0.1.0:0\", &net.UDPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771\n}\n\nfunc TestPacketConnReadWriteMulticastUDP(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"solaris\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tifi := nettest.RoutedInterface(\"ip4\", net.FlagUp|net.FlagMulticast|net.FlagLoopback)\n\tif ifi == nil {\n\t\tt.Skipf(\"not available on %s\", runtime.GOOS)\n\t}\n\n\tfor _, tt := range packetConnReadWriteMulticastUDPTests {\n\t\tc, err := net.ListenPacket(\"udp4\", tt.addr)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c.Close()\n\n\t\tgrp := *tt.grp\n\t\tgrp.Port = c.LocalAddr().(*net.UDPAddr).Port\n\t\tp := ipv4.NewPacketConn(c)\n\t\tdefer p.Close()\n\t\tif tt.src == nil {\n\t\t\tif err := p.JoinGroup(ifi, &grp); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tdefer p.LeaveGroup(ifi, &grp)\n\t\t} else {\n\t\t\tif err := p.JoinSourceSpecificGroup(ifi, &grp, tt.src); err != nil {\n\t\t\t\tswitch runtime.GOOS {\n\t\t\t\tcase \"freebsd\", \"linux\":\n\t\t\t\tdefault: // platforms that don't support IGMPv2/3 fail here\n\t\t\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tdefer p.LeaveSourceSpecificGroup(ifi, &grp, tt.src)\n\t\t}\n\t\tif err := p.SetMulticastInterface(ifi); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif _, err := p.MulticastInterface(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := p.SetMulticastLoopback(true); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif _, err := p.MulticastLoopback(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tcf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface\n\t\twb := []byte(\"HELLO-R-U-THERE\")\n\n\t\tfor i, toggle := range []bool{true, false, true} {\n\t\t\tif err := p.SetControlMessage(cf, toggle); err != nil {\n\t\t\t\tif nettest.ProtocolNotSupported(err) {\n\t\t\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tp.SetMulticastTTL(i + 1)\n\t\t\tif n, err := p.WriteTo(wb, nil, &grp); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t} else if n != len(wb) {\n\t\t\t\tt.Fatalf(\"got %v; want %v\", n, len(wb))\n\t\t\t}\n\t\t\trb := make([]byte, 128)\n\t\t\tif n, _, _, err := p.ReadFrom(rb); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t} else if !bytes.Equal(rb[:n], wb) {\n\t\t\t\tt.Fatalf(\"got %v; want %v\", rb[:n], wb)\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar packetConnReadWriteMulticastICMPTests = []struct {\n\tgrp, src *net.IPAddr\n}{\n\t{&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727\n\n\t{&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771\n}\n\nfunc TestPacketConnReadWriteMulticastICMP(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"solaris\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif m, ok := nettest.SupportsRawIPSocket(); !ok {\n\t\tt.Skip(m)\n\t}\n\tifi := nettest.RoutedInterface(\"ip4\", net.FlagUp|net.FlagMulticast|net.FlagLoopback)\n\tif ifi == nil {\n\t\tt.Skipf(\"not available on %s\", runtime.GOOS)\n\t}\n\n\tfor _, tt := range packetConnReadWriteMulticastICMPTests {\n\t\tc, err := net.ListenPacket(\"ip4:icmp\", \"0.0.0.0\")\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c.Close()\n\n\t\tp := ipv4.NewPacketConn(c)\n\t\tdefer p.Close()\n\t\tif tt.src == nil {\n\t\t\tif err := p.JoinGroup(ifi, tt.grp); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tdefer p.LeaveGroup(ifi, tt.grp)\n\t\t} else {\n\t\t\tif err := p.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil {\n\t\t\t\tswitch runtime.GOOS {\n\t\t\t\tcase \"freebsd\", \"linux\":\n\t\t\t\tdefault: // platforms that don't support IGMPv2/3 fail here\n\t\t\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tdefer p.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src)\n\t\t}\n\t\tif err := p.SetMulticastInterface(ifi); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif _, err := p.MulticastInterface(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := p.SetMulticastLoopback(true); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif _, err := p.MulticastLoopback(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tcf := ipv4.FlagDst | ipv4.FlagInterface\n\t\tif runtime.GOOS != \"solaris\" {\n\t\t\t// Solaris never allows to modify ICMP properties.\n\t\t\tcf |= ipv4.FlagTTL\n\t\t}\n\n\t\tfor i, toggle := range []bool{true, false, true} {\n\t\t\twb, err := (&icmp.Message{\n\t\t\t\tType: ipv4.ICMPTypeEcho, Code: 0,\n\t\t\t\tBody: &icmp.Echo{\n\t\t\t\t\tID: os.Getpid() & 0xffff, Seq: i + 1,\n\t\t\t\t\tData: []byte(\"HELLO-R-U-THERE\"),\n\t\t\t\t},\n\t\t\t}).Marshal(nil)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif err := p.SetControlMessage(cf, toggle); err != nil {\n\t\t\t\tif nettest.ProtocolNotSupported(err) {\n\t\t\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tp.SetMulticastTTL(i + 1)\n\t\t\tif n, err := p.WriteTo(wb, nil, tt.grp); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t} else if n != len(wb) {\n\t\t\t\tt.Fatalf(\"got %v; want %v\", n, len(wb))\n\t\t\t}\n\t\t\trb := make([]byte, 128)\n\t\t\tif n, _, _, err := p.ReadFrom(rb); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t} else {\n\t\t\t\tm, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n])\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t\tswitch {\n\t\t\t\tcase m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1\n\t\t\t\tcase m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0\n\t\t\t\tdefault:\n\t\t\t\t\tt.Fatalf(\"got type=%v, code=%v; want type=%v, code=%v\", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar rawConnReadWriteMulticastICMPTests = []struct {\n\tgrp, src *net.IPAddr\n}{\n\t{&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727\n\n\t{&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771\n}\n\nfunc TestRawConnReadWriteMulticastICMP(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"to avoid external network\")\n\t}\n\tif m, ok := nettest.SupportsRawIPSocket(); !ok {\n\t\tt.Skip(m)\n\t}\n\tifi := nettest.RoutedInterface(\"ip4\", net.FlagUp|net.FlagMulticast|net.FlagLoopback)\n\tif ifi == nil {\n\t\tt.Skipf(\"not available on %s\", runtime.GOOS)\n\t}\n\n\tfor _, tt := range rawConnReadWriteMulticastICMPTests {\n\t\tc, err := net.ListenPacket(\"ip4:icmp\", \"0.0.0.0\")\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c.Close()\n\n\t\tr, err := ipv4.NewRawConn(c)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer r.Close()\n\t\tif tt.src == nil {\n\t\t\tif err := r.JoinGroup(ifi, tt.grp); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tdefer r.LeaveGroup(ifi, tt.grp)\n\t\t} else {\n\t\t\tif err := r.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil {\n\t\t\t\tswitch runtime.GOOS {\n\t\t\t\tcase \"freebsd\", \"linux\":\n\t\t\t\tdefault: // platforms that don't support IGMPv2/3 fail here\n\t\t\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tdefer r.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src)\n\t\t}\n\t\tif err := r.SetMulticastInterface(ifi); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif _, err := r.MulticastInterface(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := r.SetMulticastLoopback(true); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif _, err := r.MulticastLoopback(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tcf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface\n\n\t\tfor i, toggle := range []bool{true, false, true} {\n\t\t\twb, err := (&icmp.Message{\n\t\t\t\tType: ipv4.ICMPTypeEcho, Code: 0,\n\t\t\t\tBody: &icmp.Echo{\n\t\t\t\t\tID: os.Getpid() & 0xffff, Seq: i + 1,\n\t\t\t\t\tData: []byte(\"HELLO-R-U-THERE\"),\n\t\t\t\t},\n\t\t\t}).Marshal(nil)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\twh := &ipv4.Header{\n\t\t\t\tVersion:  ipv4.Version,\n\t\t\t\tLen:      ipv4.HeaderLen,\n\t\t\t\tTOS:      i + 1,\n\t\t\t\tTotalLen: ipv4.HeaderLen + len(wb),\n\t\t\t\tProtocol: 1,\n\t\t\t\tDst:      tt.grp.IP,\n\t\t\t}\n\t\t\tif err := r.SetControlMessage(cf, toggle); err != nil {\n\t\t\t\tif nettest.ProtocolNotSupported(err) {\n\t\t\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif err := r.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tr.SetMulticastTTL(i + 1)\n\t\t\tif err := r.WriteTo(wh, wb, nil); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\trb := make([]byte, ipv4.HeaderLen+128)\n\t\t\tif rh, b, _, err := r.ReadFrom(rb); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t} else {\n\t\t\t\tm, err := icmp.ParseMessage(iana.ProtocolICMP, b)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t\tswitch {\n\t\t\t\tcase (rh.Dst.IsLoopback() || rh.Dst.IsLinkLocalUnicast() || rh.Dst.IsGlobalUnicast()) && m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1\n\t\t\t\tcase rh.Dst.IsMulticast() && m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0\n\t\t\t\tdefault:\n\t\t\t\t\tt.Fatalf(\"got type=%v, code=%v; want type=%v, code=%v\", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/multicastlistener_test.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4_test\n\nimport (\n\t\"net\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv4\"\n)\n\nvar udpMultipleGroupListenerTests = []net.Addr{\n\t&net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}, // see RFC 4727\n\t&net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)},\n\t&net.UDPAddr{IP: net.IPv4(224, 0, 0, 254)},\n}\n\nfunc TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif testing.Short() {\n\t\tt.Skip(\"to avoid external network\")\n\t}\n\n\tfor _, gaddr := range udpMultipleGroupListenerTests {\n\t\tc, err := net.ListenPacket(\"udp4\", \"0.0.0.0:0\") // wildcard address with no reusable port\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c.Close()\n\n\t\tp := ipv4.NewPacketConn(c)\n\t\tvar mift []*net.Interface\n\n\t\tift, err := net.Interfaces()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfor i, ifi := range ift {\n\t\t\tif _, ok := nettest.IsMulticastCapable(\"ip4\", &ifi); !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := p.JoinGroup(&ifi, gaddr); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tmift = append(mift, &ift[i])\n\t\t}\n\t\tfor _, ifi := range mift {\n\t\t\tif err := p.LeaveGroup(ifi, gaddr); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif testing.Short() {\n\t\tt.Skip(\"to avoid external network\")\n\t}\n\n\tfor _, gaddr := range udpMultipleGroupListenerTests {\n\t\tc1, err := net.ListenPacket(\"udp4\", \"224.0.0.0:0\") // wildcard address with reusable port\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c1.Close()\n\t\t_, port, err := net.SplitHostPort(c1.LocalAddr().String())\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tc2, err := net.ListenPacket(\"udp4\", net.JoinHostPort(\"224.0.0.0\", port)) // wildcard address with reusable port\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c2.Close()\n\n\t\tvar ps [2]*ipv4.PacketConn\n\t\tps[0] = ipv4.NewPacketConn(c1)\n\t\tps[1] = ipv4.NewPacketConn(c2)\n\t\tvar mift []*net.Interface\n\n\t\tift, err := net.Interfaces()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfor i, ifi := range ift {\n\t\t\tif _, ok := nettest.IsMulticastCapable(\"ip4\", &ifi); !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, p := range ps {\n\t\t\t\tif err := p.JoinGroup(&ifi, gaddr); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tmift = append(mift, &ift[i])\n\t\t}\n\t\tfor _, ifi := range mift {\n\t\t\tfor _, p := range ps {\n\t\t\t\tif err := p.LeaveGroup(ifi, gaddr); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif testing.Short() {\n\t\tt.Skip(\"to avoid external network\")\n\t}\n\n\tgaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727\n\ttype ml struct {\n\t\tc   *ipv4.PacketConn\n\t\tifi *net.Interface\n\t}\n\tvar mlt []*ml\n\n\tift, err := net.Interfaces()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tport := \"0\"\n\tfor i, ifi := range ift {\n\t\tip, ok := nettest.IsMulticastCapable(\"ip4\", &ifi)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tc, err := net.ListenPacket(\"udp4\", net.JoinHostPort(ip.String(), port)) // unicast address with non-reusable port\n\t\tif err != nil {\n\t\t\t// The listen may fail when the serivce is\n\t\t\t// already in use, but it's fine because the\n\t\t\t// purpose of this is not to test the\n\t\t\t// bookkeeping of IP control block inside the\n\t\t\t// kernel.\n\t\t\tt.Log(err)\n\t\t\tcontinue\n\t\t}\n\t\tdefer c.Close()\n\t\tif port == \"0\" {\n\t\t\t_, port, err = net.SplitHostPort(c.LocalAddr().String())\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t\tp := ipv4.NewPacketConn(c)\n\t\tif err := p.JoinGroup(&ifi, &gaddr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tmlt = append(mlt, &ml{p, &ift[i]})\n\t}\n\tfor _, m := range mlt {\n\t\tif err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestIPSingleRawConnWithSingleGroupListener(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif testing.Short() {\n\t\tt.Skip(\"to avoid external network\")\n\t}\n\tif m, ok := nettest.SupportsRawIPSocket(); !ok {\n\t\tt.Skip(m)\n\t}\n\n\tc, err := net.ListenPacket(\"ip4:icmp\", \"0.0.0.0\") // wildcard address\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\tr, err := ipv4.NewRawConn(c)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727\n\tvar mift []*net.Interface\n\n\tift, err := net.Interfaces()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor i, ifi := range ift {\n\t\tif _, ok := nettest.IsMulticastCapable(\"ip4\", &ifi); !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif err := r.JoinGroup(&ifi, &gaddr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tmift = append(mift, &ift[i])\n\t}\n\tfor _, ifi := range mift {\n\t\tif err := r.LeaveGroup(ifi, &gaddr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestIPPerInterfaceSingleRawConnWithSingleGroupListener(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif testing.Short() {\n\t\tt.Skip(\"to avoid external network\")\n\t}\n\tif m, ok := nettest.SupportsRawIPSocket(); !ok {\n\t\tt.Skip(m)\n\t}\n\n\tgaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727\n\ttype ml struct {\n\t\tc   *ipv4.RawConn\n\t\tifi *net.Interface\n\t}\n\tvar mlt []*ml\n\n\tift, err := net.Interfaces()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor i, ifi := range ift {\n\t\tip, ok := nettest.IsMulticastCapable(\"ip4\", &ifi)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tc, err := net.ListenPacket(\"ip4:253\", ip.String()) // unicast address\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c.Close()\n\t\tr, err := ipv4.NewRawConn(c)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := r.JoinGroup(&ifi, &gaddr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tmlt = append(mlt, &ml{r, &ift[i]})\n\t}\n\tfor _, m := range mlt {\n\t\tif err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/multicastsockopt_test.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4_test\n\nimport (\n\t\"net\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv4\"\n)\n\nvar packetConnMulticastSocketOptionTests = []struct {\n\tnet, proto, addr string\n\tgrp, src         net.Addr\n}{\n\t{\"udp4\", \"\", \"224.0.0.0:0\", &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}, nil}, // see RFC 4727\n\t{\"ip4\", \":icmp\", \"0.0.0.0\", &net.IPAddr{IP: net.IPv4(224, 0, 0, 250)}, nil},  // see RFC 4727\n\n\t{\"udp4\", \"\", \"232.0.0.0:0\", &net.UDPAddr{IP: net.IPv4(232, 0, 1, 249)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771\n\t{\"ip4\", \":icmp\", \"0.0.0.0\", &net.IPAddr{IP: net.IPv4(232, 0, 1, 250)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}},  // see RFC 5771\n}\n\nfunc TestPacketConnMulticastSocketOptions(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tifi := nettest.RoutedInterface(\"ip4\", net.FlagUp|net.FlagMulticast|net.FlagLoopback)\n\tif ifi == nil {\n\t\tt.Skipf(\"not available on %s\", runtime.GOOS)\n\t}\n\n\tm, ok := nettest.SupportsRawIPSocket()\n\tfor _, tt := range packetConnMulticastSocketOptionTests {\n\t\tif tt.net == \"ip4\" && !ok {\n\t\t\tt.Log(m)\n\t\t\tcontinue\n\t\t}\n\t\tc, err := net.ListenPacket(tt.net+tt.proto, tt.addr)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c.Close()\n\t\tp := ipv4.NewPacketConn(c)\n\t\tdefer p.Close()\n\n\t\tif tt.src == nil {\n\t\t\ttestMulticastSocketOptions(t, p, ifi, tt.grp)\n\t\t} else {\n\t\t\ttestSourceSpecificMulticastSocketOptions(t, p, ifi, tt.grp, tt.src)\n\t\t}\n\t}\n}\n\nvar rawConnMulticastSocketOptionTests = []struct {\n\tgrp, src net.Addr\n}{\n\t{&net.IPAddr{IP: net.IPv4(224, 0, 0, 250)}, nil}, // see RFC 4727\n\n\t{&net.IPAddr{IP: net.IPv4(232, 0, 1, 250)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771\n}\n\nfunc TestRawConnMulticastSocketOptions(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif m, ok := nettest.SupportsRawIPSocket(); !ok {\n\t\tt.Skip(m)\n\t}\n\tifi := nettest.RoutedInterface(\"ip4\", net.FlagUp|net.FlagMulticast|net.FlagLoopback)\n\tif ifi == nil {\n\t\tt.Skipf(\"not available on %s\", runtime.GOOS)\n\t}\n\n\tfor _, tt := range rawConnMulticastSocketOptionTests {\n\t\tc, err := net.ListenPacket(\"ip4:icmp\", \"0.0.0.0\")\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c.Close()\n\t\tr, err := ipv4.NewRawConn(c)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer r.Close()\n\n\t\tif tt.src == nil {\n\t\t\ttestMulticastSocketOptions(t, r, ifi, tt.grp)\n\t\t} else {\n\t\t\ttestSourceSpecificMulticastSocketOptions(t, r, ifi, tt.grp, tt.src)\n\t\t}\n\t}\n}\n\ntype testIPv4MulticastConn interface {\n\tMulticastTTL() (int, error)\n\tSetMulticastTTL(ttl int) error\n\tMulticastLoopback() (bool, error)\n\tSetMulticastLoopback(bool) error\n\tJoinGroup(*net.Interface, net.Addr) error\n\tLeaveGroup(*net.Interface, net.Addr) error\n\tJoinSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error\n\tLeaveSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error\n\tExcludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error\n\tIncludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error\n}\n\nfunc testMulticastSocketOptions(t *testing.T, c testIPv4MulticastConn, ifi *net.Interface, grp net.Addr) {\n\tconst ttl = 255\n\tif err := c.SetMulticastTTL(ttl); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif v, err := c.MulticastTTL(); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t} else if v != ttl {\n\t\tt.Errorf(\"got %v; want %v\", v, ttl)\n\t\treturn\n\t}\n\n\tfor _, toggle := range []bool{true, false} {\n\t\tif err := c.SetMulticastLoopback(toggle); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif v, err := c.MulticastLoopback(); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t} else if v != toggle {\n\t\t\tt.Errorf(\"got %v; want %v\", v, toggle)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif err := c.JoinGroup(ifi, grp); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif err := c.LeaveGroup(ifi, grp); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n}\n\nfunc testSourceSpecificMulticastSocketOptions(t *testing.T, c testIPv4MulticastConn, ifi *net.Interface, grp, src net.Addr) {\n\t// MCAST_JOIN_GROUP -> MCAST_BLOCK_SOURCE -> MCAST_UNBLOCK_SOURCE -> MCAST_LEAVE_GROUP\n\tif err := c.JoinGroup(ifi, grp); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif err := c.ExcludeSourceSpecificGroup(ifi, grp, src); err != nil {\n\t\tswitch runtime.GOOS {\n\t\tcase \"freebsd\", \"linux\":\n\t\tdefault: // platforms that don't support IGMPv2/3 fail here\n\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\treturn\n\t\t}\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif err := c.IncludeSourceSpecificGroup(ifi, grp, src); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif err := c.LeaveGroup(ifi, grp); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\t// MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_SOURCE_GROUP\n\tif err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif err := c.LeaveSourceSpecificGroup(ifi, grp, src); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\t// MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_GROUP\n\tif err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif err := c.LeaveGroup(ifi, grp); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/packet.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4\n\nimport (\n\t\"net\"\n\t\"syscall\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\n// BUG(mikio): On Windows, the ReadFrom and WriteTo methods of RawConn\n// are not implemented.\n\n// A packetHandler represents the IPv4 datagram handler.\ntype packetHandler struct {\n\t*net.IPConn\n\t*socket.Conn\n\trawOpt\n}\n\nfunc (c *packetHandler) ok() bool { return c != nil && c.IPConn != nil && c.Conn != nil }\n\n// ReadFrom reads an IPv4 datagram from the endpoint c, copying the\n// datagram into b. It returns the received datagram as the IPv4\n// header h, the payload p and the control message cm.\nfunc (c *packetHandler) ReadFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) {\n\tif !c.ok() {\n\t\treturn nil, nil, nil, syscall.EINVAL\n\t}\n\treturn c.readFrom(b)\n}\n\nfunc slicePacket(b []byte) (h, p []byte, err error) {\n\tif len(b) < HeaderLen {\n\t\treturn nil, nil, errHeaderTooShort\n\t}\n\thdrlen := int(b[0]&0x0f) << 2\n\treturn b[:hdrlen], b[hdrlen:], nil\n}\n\n// WriteTo writes an IPv4 datagram through the endpoint c, copying the\n// datagram from the IPv4 header h and the payload p. The control\n// message cm allows the datagram path and the outgoing interface to be\n// specified.  Currently only Darwin and Linux support this. The cm\n// may be nil if control of the outgoing datagram is not required.\n//\n// The IPv4 header h must contain appropriate fields that include:\n//\n//\tVersion       = <must be specified>\n//\tLen           = <must be specified>\n//\tTOS           = <must be specified>\n//\tTotalLen      = <must be specified>\n//\tID            = platform sets an appropriate value if ID is zero\n//\tFragOff       = <must be specified>\n//\tTTL           = <must be specified>\n//\tProtocol      = <must be specified>\n//\tChecksum      = platform sets an appropriate value if Checksum is zero\n//\tSrc           = platform sets an appropriate value if Src is nil\n//\tDst           = <must be specified>\n//\tOptions       = optional\nfunc (c *packetHandler) WriteTo(h *Header, p []byte, cm *ControlMessage) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn c.writeTo(h, p, cm)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/packet_go1_8.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !go1.9\n\npackage ipv4\n\nimport \"net\"\n\nfunc (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) {\n\tc.rawOpt.RLock()\n\toob := NewControlMessage(c.rawOpt.cflags)\n\tc.rawOpt.RUnlock()\n\tn, nn, _, src, err := c.ReadMsgIP(b, oob)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tvar hs []byte\n\tif hs, p, err = slicePacket(b[:n]); err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tif h, err = ParseHeader(hs); err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tif nn > 0 {\n\t\tcm = new(ControlMessage)\n\t\tif err := cm.Parse(oob[:nn]); err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t}\n\tif src != nil && cm != nil {\n\t\tcm.Src = src.IP\n\t}\n\treturn\n}\n\nfunc (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error {\n\toob := cm.Marshal()\n\twh, err := h.Marshal()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdst := new(net.IPAddr)\n\tif cm != nil {\n\t\tif ip := cm.Dst.To4(); ip != nil {\n\t\t\tdst.IP = ip\n\t\t}\n\t}\n\tif dst.IP == nil {\n\t\tdst.IP = h.Dst\n\t}\n\twh = append(wh, p...)\n\t_, _, err = c.WriteMsgIP(wh, oob, dst)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/packet_go1_9.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.9\n\npackage ipv4\n\nimport (\n\t\"net\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) {\n\tc.rawOpt.RLock()\n\tm := socket.Message{\n\t\tBuffers: [][]byte{b},\n\t\tOOB:     NewControlMessage(c.rawOpt.cflags),\n\t}\n\tc.rawOpt.RUnlock()\n\tif err := c.RecvMsg(&m, 0); err != nil {\n\t\treturn nil, nil, nil, &net.OpError{Op: \"read\", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err}\n\t}\n\tvar hs []byte\n\tif hs, p, err = slicePacket(b[:m.N]); err != nil {\n\t\treturn nil, nil, nil, &net.OpError{Op: \"read\", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err}\n\t}\n\tif h, err = ParseHeader(hs); err != nil {\n\t\treturn nil, nil, nil, &net.OpError{Op: \"read\", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err}\n\t}\n\tif m.NN > 0 {\n\t\tcm = new(ControlMessage)\n\t\tif err := cm.Parse(m.OOB[:m.NN]); err != nil {\n\t\t\treturn nil, nil, nil, &net.OpError{Op: \"read\", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err}\n\t\t}\n\t}\n\tif src, ok := m.Addr.(*net.IPAddr); ok && cm != nil {\n\t\tcm.Src = src.IP\n\t}\n\treturn\n}\n\nfunc (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error {\n\tm := socket.Message{\n\t\tOOB: cm.Marshal(),\n\t}\n\twh, err := h.Marshal()\n\tif err != nil {\n\t\treturn err\n\t}\n\tm.Buffers = [][]byte{wh, p}\n\tdst := new(net.IPAddr)\n\tif cm != nil {\n\t\tif ip := cm.Dst.To4(); ip != nil {\n\t\t\tdst.IP = ip\n\t\t}\n\t}\n\tif dst.IP == nil {\n\t\tdst.IP = h.Dst\n\t}\n\tm.Addr = dst\n\tif err := c.SendMsg(&m, 0); err != nil {\n\t\treturn &net.OpError{Op: \"write\", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Addr: opAddr(dst), Err: err}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/payload.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4\n\nimport (\n\t\"net\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\n// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo\n// methods of PacketConn is not implemented.\n\n// A payloadHandler represents the IPv4 datagram payload handler.\ntype payloadHandler struct {\n\tnet.PacketConn\n\t*socket.Conn\n\trawOpt\n}\n\nfunc (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil }\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/payload_cmsg.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !nacl,!plan9,!windows\n\npackage ipv4\n\nimport (\n\t\"net\"\n\t\"syscall\"\n)\n\n// ReadFrom reads a payload of the received IPv4 datagram, from the\n// endpoint c, copying the payload into b. It returns the number of\n// bytes copied into b, the control message cm and the source address\n// src of the received datagram.\nfunc (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) {\n\tif !c.ok() {\n\t\treturn 0, nil, nil, syscall.EINVAL\n\t}\n\treturn c.readFrom(b)\n}\n\n// WriteTo writes a payload of the IPv4 datagram, to the destination\n// address dst through the endpoint c, copying the payload from b. It\n// returns the number of bytes written. The control message cm allows\n// the datagram path and the outgoing interface to be specified.\n// Currently only Darwin and Linux support this. The cm may be nil if\n// control of the outgoing datagram is not required.\nfunc (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\treturn c.writeTo(b, cm, dst)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !go1.9\n// +build !nacl,!plan9,!windows\n\npackage ipv4\n\nimport \"net\"\n\nfunc (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) {\n\tc.rawOpt.RLock()\n\toob := NewControlMessage(c.rawOpt.cflags)\n\tc.rawOpt.RUnlock()\n\tvar nn int\n\tswitch c := c.PacketConn.(type) {\n\tcase *net.UDPConn:\n\t\tif n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil {\n\t\t\treturn 0, nil, nil, err\n\t\t}\n\tcase *net.IPConn:\n\t\tnb := make([]byte, maxHeaderLen+len(b))\n\t\tif n, nn, _, src, err = c.ReadMsgIP(nb, oob); err != nil {\n\t\t\treturn 0, nil, nil, err\n\t\t}\n\t\thdrlen := int(nb[0]&0x0f) << 2\n\t\tcopy(b, nb[hdrlen:])\n\t\tn -= hdrlen\n\tdefault:\n\t\treturn 0, nil, nil, &net.OpError{Op: \"read\", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType}\n\t}\n\tif nn > 0 {\n\t\tcm = new(ControlMessage)\n\t\tif err = cm.Parse(oob[:nn]); err != nil {\n\t\t\treturn 0, nil, nil, &net.OpError{Op: \"read\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err}\n\t\t}\n\t}\n\tif cm != nil {\n\t\tcm.Src = netAddrToIP4(src)\n\t}\n\treturn\n}\n\nfunc (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) {\n\toob := cm.Marshal()\n\tif dst == nil {\n\t\treturn 0, &net.OpError{Op: \"write\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress}\n\t}\n\tswitch c := c.PacketConn.(type) {\n\tcase *net.UDPConn:\n\t\tn, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr))\n\tcase *net.IPConn:\n\t\tn, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr))\n\tdefault:\n\t\treturn 0, &net.OpError{Op: \"write\", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType}\n\t}\n\treturn\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.9\n// +build !nacl,!plan9,!windows\n\npackage ipv4\n\nimport (\n\t\"net\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) {\n\tc.rawOpt.RLock()\n\tm := socket.Message{\n\t\tOOB: NewControlMessage(c.rawOpt.cflags),\n\t}\n\tc.rawOpt.RUnlock()\n\tswitch c.PacketConn.(type) {\n\tcase *net.UDPConn:\n\t\tm.Buffers = [][]byte{b}\n\t\tif err := c.RecvMsg(&m, 0); err != nil {\n\t\t\treturn 0, nil, nil, &net.OpError{Op: \"read\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err}\n\t\t}\n\tcase *net.IPConn:\n\t\th := make([]byte, HeaderLen)\n\t\tm.Buffers = [][]byte{h, b}\n\t\tif err := c.RecvMsg(&m, 0); err != nil {\n\t\t\treturn 0, nil, nil, &net.OpError{Op: \"read\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err}\n\t\t}\n\t\thdrlen := int(h[0]&0x0f) << 2\n\t\tif hdrlen > len(h) {\n\t\t\td := hdrlen - len(h)\n\t\t\tcopy(b, b[d:])\n\t\t\tm.N -= d\n\t\t} else {\n\t\t\tm.N -= hdrlen\n\t\t}\n\tdefault:\n\t\treturn 0, nil, nil, &net.OpError{Op: \"read\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType}\n\t}\n\tvar cm *ControlMessage\n\tif m.NN > 0 {\n\t\tcm = new(ControlMessage)\n\t\tif err := cm.Parse(m.OOB[:m.NN]); err != nil {\n\t\t\treturn 0, nil, nil, &net.OpError{Op: \"read\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err}\n\t\t}\n\t\tcm.Src = netAddrToIP4(m.Addr)\n\t}\n\treturn m.N, cm, m.Addr, nil\n}\n\nfunc (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) {\n\tm := socket.Message{\n\t\tBuffers: [][]byte{b},\n\t\tOOB:     cm.Marshal(),\n\t\tAddr:    dst,\n\t}\n\terr := c.SendMsg(&m, 0)\n\tif err != nil {\n\t\terr = &net.OpError{Op: \"write\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err}\n\t}\n\treturn m.N, err\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/payload_nocmsg.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build nacl plan9 windows\n\npackage ipv4\n\nimport (\n\t\"net\"\n\t\"syscall\"\n)\n\n// ReadFrom reads a payload of the received IPv4 datagram, from the\n// endpoint c, copying the payload into b. It returns the number of\n// bytes copied into b, the control message cm and the source address\n// src of the received datagram.\nfunc (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) {\n\tif !c.ok() {\n\t\treturn 0, nil, nil, syscall.EINVAL\n\t}\n\tif n, src, err = c.PacketConn.ReadFrom(b); err != nil {\n\t\treturn 0, nil, nil, err\n\t}\n\treturn\n}\n\n// WriteTo writes a payload of the IPv4 datagram, to the destination\n// address dst through the endpoint c, copying the payload from b. It\n// returns the number of bytes written. The control message cm allows\n// the datagram path and the outgoing interface to be specified.\n// Currently only Darwin and Linux support this. The cm may be nil if\n// control of the outgoing datagram is not required.\nfunc (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tif dst == nil {\n\t\treturn 0, errMissingAddress\n\t}\n\treturn c.PacketConn.WriteTo(b, dst)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !go1.9\n\npackage ipv4_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv4\"\n)\n\nfunc BenchmarkPacketConnReadWriteUnicast(b *testing.B) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tb.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\n\tpayload := []byte(\"HELLO-R-U-THERE\")\n\tiph, err := (&ipv4.Header{\n\t\tVersion:  ipv4.Version,\n\t\tLen:      ipv4.HeaderLen,\n\t\tTotalLen: ipv4.HeaderLen + len(payload),\n\t\tTTL:      1,\n\t\tProtocol: iana.ProtocolReserved,\n\t\tSrc:      net.IPv4(192, 0, 2, 1),\n\t\tDst:      net.IPv4(192, 0, 2, 254),\n\t}).Marshal()\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\tgreh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00}\n\tdatagram := append(greh, append(iph, payload...)...)\n\tbb := make([]byte, 128)\n\tcm := ipv4.ControlMessage{\n\t\tSrc: net.IPv4(127, 0, 0, 1),\n\t}\n\tif ifi := nettest.RoutedInterface(\"ip4\", net.FlagUp|net.FlagLoopback); ifi != nil {\n\t\tcm.IfIndex = ifi.Index\n\t}\n\n\tb.Run(\"UDP\", func(b *testing.B) {\n\t\tc, err := nettest.NewLocalPacketListener(\"udp4\")\n\t\tif err != nil {\n\t\t\tb.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t\t}\n\t\tdefer c.Close()\n\t\tp := ipv4.NewPacketConn(c)\n\t\tdst := c.LocalAddr()\n\t\tcf := ipv4.FlagTTL | ipv4.FlagInterface\n\t\tif err := p.SetControlMessage(cf, true); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\tb.Run(\"Net\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := c.WriteTo(payload, dst); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, _, err := c.ReadFrom(bb); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tb.Run(\"ToFrom\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := p.WriteTo(payload, &cm, dst); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, _, _, err := p.ReadFrom(bb); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t})\n\tb.Run(\"IP\", func(b *testing.B) {\n\t\tswitch runtime.GOOS {\n\t\tcase \"netbsd\":\n\t\t\tb.Skip(\"need to configure gre on netbsd\")\n\t\tcase \"openbsd\":\n\t\t\tb.Skip(\"net.inet.gre.allow=0 by default on openbsd\")\n\t\t}\n\n\t\tc, err := net.ListenPacket(fmt.Sprintf(\"ip4:%d\", iana.ProtocolGRE), \"127.0.0.1\")\n\t\tif err != nil {\n\t\t\tb.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t\t}\n\t\tdefer c.Close()\n\t\tp := ipv4.NewPacketConn(c)\n\t\tdst := c.LocalAddr()\n\t\tcf := ipv4.FlagTTL | ipv4.FlagInterface\n\t\tif err := p.SetControlMessage(cf, true); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\tb.Run(\"Net\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := c.WriteTo(datagram, dst); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, _, err := c.ReadFrom(bb); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tb.Run(\"ToFrom\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := p.WriteTo(datagram, &cm, dst); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, _, _, err := p.ReadFrom(bb); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t})\n}\n\nfunc TestPacketConnConcurrentReadWriteUnicast(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\n\tpayload := []byte(\"HELLO-R-U-THERE\")\n\tiph, err := (&ipv4.Header{\n\t\tVersion:  ipv4.Version,\n\t\tLen:      ipv4.HeaderLen,\n\t\tTotalLen: ipv4.HeaderLen + len(payload),\n\t\tTTL:      1,\n\t\tProtocol: iana.ProtocolReserved,\n\t\tSrc:      net.IPv4(192, 0, 2, 1),\n\t\tDst:      net.IPv4(192, 0, 2, 254),\n\t}).Marshal()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgreh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00}\n\tdatagram := append(greh, append(iph, payload...)...)\n\n\tt.Run(\"UDP\", func(t *testing.T) {\n\t\tc, err := nettest.NewLocalPacketListener(\"udp4\")\n\t\tif err != nil {\n\t\t\tt.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t\t}\n\t\tdefer c.Close()\n\t\tp := ipv4.NewPacketConn(c)\n\t\tt.Run(\"ToFrom\", func(t *testing.T) {\n\t\t\ttestPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr())\n\t\t})\n\t})\n\tt.Run(\"IP\", func(t *testing.T) {\n\t\tswitch runtime.GOOS {\n\t\tcase \"netbsd\":\n\t\t\tt.Skip(\"need to configure gre on netbsd\")\n\t\tcase \"openbsd\":\n\t\t\tt.Skip(\"net.inet.gre.allow=0 by default on openbsd\")\n\t\t}\n\n\t\tc, err := net.ListenPacket(fmt.Sprintf(\"ip4:%d\", iana.ProtocolGRE), \"127.0.0.1\")\n\t\tif err != nil {\n\t\t\tt.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t\t}\n\t\tdefer c.Close()\n\t\tp := ipv4.NewPacketConn(c)\n\t\tt.Run(\"ToFrom\", func(t *testing.T) {\n\t\t\ttestPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr())\n\t\t})\n\t})\n}\n\nfunc testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv4.PacketConn, data []byte, dst net.Addr) {\n\tifi := nettest.RoutedInterface(\"ip4\", net.FlagUp|net.FlagLoopback)\n\tcf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface\n\n\tif err := p.SetControlMessage(cf, true); err != nil { // probe before test\n\t\tif nettest.ProtocolNotSupported(err) {\n\t\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t\t}\n\t\tt.Fatal(err)\n\t}\n\n\tvar wg sync.WaitGroup\n\treader := func() {\n\t\tdefer wg.Done()\n\t\tb := make([]byte, 128)\n\t\tn, cm, _, err := p.ReadFrom(b)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif !bytes.Equal(b[:n], data) {\n\t\t\tt.Errorf(\"got %#v; want %#v\", b[:n], data)\n\t\t\treturn\n\t\t}\n\t\ts := cm.String()\n\t\tif strings.Contains(s, \",\") {\n\t\t\tt.Errorf(\"should be space-separated values: %s\", s)\n\t\t\treturn\n\t\t}\n\t}\n\twriter := func(toggle bool) {\n\t\tdefer wg.Done()\n\t\tcm := ipv4.ControlMessage{\n\t\t\tSrc: net.IPv4(127, 0, 0, 1),\n\t\t}\n\t\tif ifi != nil {\n\t\t\tcm.IfIndex = ifi.Index\n\t\t}\n\t\tif err := p.SetControlMessage(cf, toggle); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tn, err := p.WriteTo(data, &cm, dst)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif n != len(data) {\n\t\t\tt.Errorf(\"got %d; want %d\", n, len(data))\n\t\t\treturn\n\t\t}\n\t}\n\n\tconst N = 10\n\twg.Add(N)\n\tfor i := 0; i < N; i++ {\n\t\tgo reader()\n\t}\n\twg.Add(2 * N)\n\tfor i := 0; i < 2*N; i++ {\n\t\tgo writer(i%2 != 0)\n\n\t}\n\twg.Add(N)\n\tfor i := 0; i < N; i++ {\n\t\tgo reader()\n\t}\n\twg.Wait()\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.9\n\npackage ipv4_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv4\"\n)\n\nfunc BenchmarkPacketConnReadWriteUnicast(b *testing.B) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tb.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\n\tpayload := []byte(\"HELLO-R-U-THERE\")\n\tiph, err := (&ipv4.Header{\n\t\tVersion:  ipv4.Version,\n\t\tLen:      ipv4.HeaderLen,\n\t\tTotalLen: ipv4.HeaderLen + len(payload),\n\t\tTTL:      1,\n\t\tProtocol: iana.ProtocolReserved,\n\t\tSrc:      net.IPv4(192, 0, 2, 1),\n\t\tDst:      net.IPv4(192, 0, 2, 254),\n\t}).Marshal()\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\tgreh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00}\n\tdatagram := append(greh, append(iph, payload...)...)\n\tbb := make([]byte, 128)\n\tcm := ipv4.ControlMessage{\n\t\tSrc: net.IPv4(127, 0, 0, 1),\n\t}\n\tif ifi := nettest.RoutedInterface(\"ip4\", net.FlagUp|net.FlagLoopback); ifi != nil {\n\t\tcm.IfIndex = ifi.Index\n\t}\n\n\tb.Run(\"UDP\", func(b *testing.B) {\n\t\tc, err := nettest.NewLocalPacketListener(\"udp4\")\n\t\tif err != nil {\n\t\t\tb.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t\t}\n\t\tdefer c.Close()\n\t\tp := ipv4.NewPacketConn(c)\n\t\tdst := c.LocalAddr()\n\t\tcf := ipv4.FlagTTL | ipv4.FlagInterface\n\t\tif err := p.SetControlMessage(cf, true); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\twms := []ipv4.Message{\n\t\t\t{\n\t\t\t\tBuffers: [][]byte{payload},\n\t\t\t\tAddr:    dst,\n\t\t\t\tOOB:     cm.Marshal(),\n\t\t\t},\n\t\t}\n\t\trms := []ipv4.Message{\n\t\t\t{\n\t\t\t\tBuffers: [][]byte{bb},\n\t\t\t\tOOB:     ipv4.NewControlMessage(cf),\n\t\t\t},\n\t\t}\n\t\tb.Run(\"Net\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := c.WriteTo(payload, dst); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, _, err := c.ReadFrom(bb); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tb.Run(\"ToFrom\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := p.WriteTo(payload, &cm, dst); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, _, _, err := p.ReadFrom(bb); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tb.Run(\"Batch\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := p.WriteBatch(wms, 0); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, err := p.ReadBatch(rms, 0); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t})\n\tb.Run(\"IP\", func(b *testing.B) {\n\t\tswitch runtime.GOOS {\n\t\tcase \"netbsd\":\n\t\t\tb.Skip(\"need to configure gre on netbsd\")\n\t\tcase \"openbsd\":\n\t\t\tb.Skip(\"net.inet.gre.allow=0 by default on openbsd\")\n\t\t}\n\n\t\tc, err := net.ListenPacket(fmt.Sprintf(\"ip4:%d\", iana.ProtocolGRE), \"127.0.0.1\")\n\t\tif err != nil {\n\t\t\tb.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t\t}\n\t\tdefer c.Close()\n\t\tp := ipv4.NewPacketConn(c)\n\t\tdst := c.LocalAddr()\n\t\tcf := ipv4.FlagTTL | ipv4.FlagInterface\n\t\tif err := p.SetControlMessage(cf, true); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\twms := []ipv4.Message{\n\t\t\t{\n\t\t\t\tBuffers: [][]byte{datagram},\n\t\t\t\tAddr:    dst,\n\t\t\t\tOOB:     cm.Marshal(),\n\t\t\t},\n\t\t}\n\t\trms := []ipv4.Message{\n\t\t\t{\n\t\t\t\tBuffers: [][]byte{bb},\n\t\t\t\tOOB:     ipv4.NewControlMessage(cf),\n\t\t\t},\n\t\t}\n\t\tb.Run(\"Net\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := c.WriteTo(datagram, dst); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, _, err := c.ReadFrom(bb); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tb.Run(\"ToFrom\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := p.WriteTo(datagram, &cm, dst); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, _, _, err := p.ReadFrom(bb); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tb.Run(\"Batch\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := p.WriteBatch(wms, 0); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, err := p.ReadBatch(rms, 0); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t})\n}\n\nfunc TestPacketConnConcurrentReadWriteUnicast(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\n\tpayload := []byte(\"HELLO-R-U-THERE\")\n\tiph, err := (&ipv4.Header{\n\t\tVersion:  ipv4.Version,\n\t\tLen:      ipv4.HeaderLen,\n\t\tTotalLen: ipv4.HeaderLen + len(payload),\n\t\tTTL:      1,\n\t\tProtocol: iana.ProtocolReserved,\n\t\tSrc:      net.IPv4(192, 0, 2, 1),\n\t\tDst:      net.IPv4(192, 0, 2, 254),\n\t}).Marshal()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgreh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00}\n\tdatagram := append(greh, append(iph, payload...)...)\n\n\tt.Run(\"UDP\", func(t *testing.T) {\n\t\tc, err := nettest.NewLocalPacketListener(\"udp4\")\n\t\tif err != nil {\n\t\t\tt.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t\t}\n\t\tdefer c.Close()\n\t\tp := ipv4.NewPacketConn(c)\n\t\tt.Run(\"ToFrom\", func(t *testing.T) {\n\t\t\ttestPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), false)\n\t\t})\n\t\tt.Run(\"Batch\", func(t *testing.T) {\n\t\t\ttestPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), true)\n\t\t})\n\t})\n\tt.Run(\"IP\", func(t *testing.T) {\n\t\tswitch runtime.GOOS {\n\t\tcase \"netbsd\":\n\t\t\tt.Skip(\"need to configure gre on netbsd\")\n\t\tcase \"openbsd\":\n\t\t\tt.Skip(\"net.inet.gre.allow=0 by default on openbsd\")\n\t\t}\n\n\t\tc, err := net.ListenPacket(fmt.Sprintf(\"ip4:%d\", iana.ProtocolGRE), \"127.0.0.1\")\n\t\tif err != nil {\n\t\t\tt.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t\t}\n\t\tdefer c.Close()\n\t\tp := ipv4.NewPacketConn(c)\n\t\tt.Run(\"ToFrom\", func(t *testing.T) {\n\t\t\ttestPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), false)\n\t\t})\n\t\tt.Run(\"Batch\", func(t *testing.T) {\n\t\t\ttestPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), true)\n\t\t})\n\t})\n}\n\nfunc testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv4.PacketConn, data []byte, dst net.Addr, batch bool) {\n\tifi := nettest.RoutedInterface(\"ip4\", net.FlagUp|net.FlagLoopback)\n\tcf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface\n\n\tif err := p.SetControlMessage(cf, true); err != nil { // probe before test\n\t\tif nettest.ProtocolNotSupported(err) {\n\t\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t\t}\n\t\tt.Fatal(err)\n\t}\n\n\tvar wg sync.WaitGroup\n\treader := func() {\n\t\tdefer wg.Done()\n\t\tb := make([]byte, 128)\n\t\tn, cm, _, err := p.ReadFrom(b)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif !bytes.Equal(b[:n], data) {\n\t\t\tt.Errorf(\"got %#v; want %#v\", b[:n], data)\n\t\t\treturn\n\t\t}\n\t\ts := cm.String()\n\t\tif strings.Contains(s, \",\") {\n\t\t\tt.Errorf(\"should be space-separated values: %s\", s)\n\t\t\treturn\n\t\t}\n\t}\n\tbatchReader := func() {\n\t\tdefer wg.Done()\n\t\tms := []ipv4.Message{\n\t\t\t{\n\t\t\t\tBuffers: [][]byte{make([]byte, 128)},\n\t\t\t\tOOB:     ipv4.NewControlMessage(cf),\n\t\t\t},\n\t\t}\n\t\tn, err := p.ReadBatch(ms, 0)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif n != len(ms) {\n\t\t\tt.Errorf(\"got %d; want %d\", n, len(ms))\n\t\t\treturn\n\t\t}\n\t\tvar cm ipv4.ControlMessage\n\t\tif err := cm.Parse(ms[0].OOB[:ms[0].NN]); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tvar b []byte\n\t\tif _, ok := dst.(*net.IPAddr); ok {\n\t\t\tvar h ipv4.Header\n\t\t\tif err := h.Parse(ms[0].Buffers[0][:ms[0].N]); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tb = ms[0].Buffers[0][h.Len:ms[0].N]\n\t\t} else {\n\t\t\tb = ms[0].Buffers[0][:ms[0].N]\n\t\t}\n\t\tif !bytes.Equal(b, data) {\n\t\t\tt.Errorf(\"got %#v; want %#v\", b, data)\n\t\t\treturn\n\t\t}\n\t\ts := cm.String()\n\t\tif strings.Contains(s, \",\") {\n\t\t\tt.Errorf(\"should be space-separated values: %s\", s)\n\t\t\treturn\n\t\t}\n\t}\n\twriter := func(toggle bool) {\n\t\tdefer wg.Done()\n\t\tcm := ipv4.ControlMessage{\n\t\t\tSrc: net.IPv4(127, 0, 0, 1),\n\t\t}\n\t\tif ifi != nil {\n\t\t\tcm.IfIndex = ifi.Index\n\t\t}\n\t\tif err := p.SetControlMessage(cf, toggle); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tn, err := p.WriteTo(data, &cm, dst)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif n != len(data) {\n\t\t\tt.Errorf(\"got %d; want %d\", n, len(data))\n\t\t\treturn\n\t\t}\n\t}\n\tbatchWriter := func(toggle bool) {\n\t\tdefer wg.Done()\n\t\tcm := ipv4.ControlMessage{\n\t\t\tSrc: net.IPv4(127, 0, 0, 1),\n\t\t}\n\t\tif ifi != nil {\n\t\t\tcm.IfIndex = ifi.Index\n\t\t}\n\t\tif err := p.SetControlMessage(cf, toggle); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tms := []ipv4.Message{\n\t\t\t{\n\t\t\t\tBuffers: [][]byte{data},\n\t\t\t\tOOB:     cm.Marshal(),\n\t\t\t\tAddr:    dst,\n\t\t\t},\n\t\t}\n\t\tn, err := p.WriteBatch(ms, 0)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif n != len(ms) {\n\t\t\tt.Errorf(\"got %d; want %d\", n, len(ms))\n\t\t\treturn\n\t\t}\n\t\tif ms[0].N != len(data) {\n\t\t\tt.Errorf(\"got %d; want %d\", ms[0].N, len(data))\n\t\t\treturn\n\t\t}\n\t}\n\n\tconst N = 10\n\twg.Add(N)\n\tfor i := 0; i < N; i++ {\n\t\tif batch {\n\t\t\tgo batchReader()\n\t\t} else {\n\t\t\tgo reader()\n\t\t}\n\t}\n\twg.Add(2 * N)\n\tfor i := 0; i < 2*N; i++ {\n\t\tif batch {\n\t\t\tgo batchWriter(i%2 != 0)\n\t\t} else {\n\t\t\tgo writer(i%2 != 0)\n\t\t}\n\n\t}\n\twg.Add(N)\n\tfor i := 0; i < N; i++ {\n\t\tif batch {\n\t\t\tgo batchReader()\n\t\t} else {\n\t\t\tgo reader()\n\t\t}\n\t}\n\twg.Wait()\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/readwrite_test.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4_test\n\nimport (\n\t\"bytes\"\n\t\"net\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv4\"\n)\n\nfunc BenchmarkReadWriteUnicast(b *testing.B) {\n\tc, err := nettest.NewLocalPacketListener(\"udp4\")\n\tif err != nil {\n\t\tb.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t}\n\tdefer c.Close()\n\n\tdst := c.LocalAddr()\n\twb, rb := []byte(\"HELLO-R-U-THERE\"), make([]byte, 128)\n\n\tb.Run(\"NetUDP\", func(b *testing.B) {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tif _, err := c.WriteTo(wb, dst); err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t\tif _, _, err := c.ReadFrom(rb); err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t}\n\t})\n\tb.Run(\"IPv4UDP\", func(b *testing.B) {\n\t\tp := ipv4.NewPacketConn(c)\n\t\tcf := ipv4.FlagTTL | ipv4.FlagInterface\n\t\tif err := p.SetControlMessage(cf, true); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\tcm := ipv4.ControlMessage{TTL: 1}\n\t\tifi := nettest.RoutedInterface(\"ip4\", net.FlagUp|net.FlagLoopback)\n\t\tif ifi != nil {\n\t\t\tcm.IfIndex = ifi.Index\n\t\t}\n\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tif _, err := p.WriteTo(wb, &cm, dst); err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t\tif _, _, _, err := p.ReadFrom(rb); err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\n\tc, err := nettest.NewLocalPacketListener(\"udp4\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\tp := ipv4.NewPacketConn(c)\n\tdefer p.Close()\n\n\tdst := c.LocalAddr()\n\tifi := nettest.RoutedInterface(\"ip4\", net.FlagUp|net.FlagLoopback)\n\tcf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface\n\twb := []byte(\"HELLO-R-U-THERE\")\n\n\tif err := p.SetControlMessage(cf, true); err != nil { // probe before test\n\t\tif nettest.ProtocolNotSupported(err) {\n\t\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t\t}\n\t\tt.Fatal(err)\n\t}\n\n\tvar wg sync.WaitGroup\n\treader := func() {\n\t\tdefer wg.Done()\n\t\trb := make([]byte, 128)\n\t\tif n, cm, _, err := p.ReadFrom(rb); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t} else if !bytes.Equal(rb[:n], wb) {\n\t\t\tt.Errorf(\"got %v; want %v\", rb[:n], wb)\n\t\t\treturn\n\t\t} else {\n\t\t\ts := cm.String()\n\t\t\tif strings.Contains(s, \",\") {\n\t\t\t\tt.Errorf(\"should be space-separated values: %s\", s)\n\t\t\t}\n\t\t}\n\t}\n\twriter := func(toggle bool) {\n\t\tdefer wg.Done()\n\t\tcm := ipv4.ControlMessage{\n\t\t\tSrc: net.IPv4(127, 0, 0, 1),\n\t\t}\n\t\tif ifi != nil {\n\t\t\tcm.IfIndex = ifi.Index\n\t\t}\n\t\tif err := p.SetControlMessage(cf, toggle); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif n, err := p.WriteTo(wb, &cm, dst); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t} else if n != len(wb) {\n\t\t\tt.Errorf(\"got %d; want %d\", n, len(wb))\n\t\t\treturn\n\t\t}\n\t}\n\n\tconst N = 10\n\twg.Add(N)\n\tfor i := 0; i < N; i++ {\n\t\tgo reader()\n\t}\n\twg.Add(2 * N)\n\tfor i := 0; i < 2*N; i++ {\n\t\tgo writer(i%2 != 0)\n\t}\n\twg.Add(N)\n\tfor i := 0; i < N; i++ {\n\t\tgo reader()\n\t}\n\twg.Wait()\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/sockopt.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4\n\nimport \"golang.org/x/net/internal/socket\"\n\n// Sticky socket options\nconst (\n\tssoTOS                = iota // header field for unicast packet\n\tssoTTL                       // header field for unicast packet\n\tssoMulticastTTL              // header field for multicast packet\n\tssoMulticastInterface        // outbound interface for multicast packet\n\tssoMulticastLoopback         // loopback for multicast packet\n\tssoReceiveTTL                // header field on received packet\n\tssoReceiveDst                // header field on received packet\n\tssoReceiveInterface          // inbound interface on received packet\n\tssoPacketInfo                // incbound or outbound packet path\n\tssoHeaderPrepend             // ipv4 header prepend\n\tssoStripHeader               // strip ipv4 header\n\tssoICMPFilter                // icmp filter\n\tssoJoinGroup                 // any-source multicast\n\tssoLeaveGroup                // any-source multicast\n\tssoJoinSourceGroup           // source-specific multicast\n\tssoLeaveSourceGroup          // source-specific multicast\n\tssoBlockSourceGroup          // any-source or source-specific multicast\n\tssoUnblockSourceGroup        // any-source or source-specific multicast\n\tssoAttachFilter              // attach BPF for filtering inbound traffic\n)\n\n// Sticky socket option value types\nconst (\n\tssoTypeIPMreq = iota + 1\n\tssoTypeIPMreqn\n\tssoTypeGroupReq\n\tssoTypeGroupSourceReq\n)\n\n// A sockOpt represents a binding for sticky socket option.\ntype sockOpt struct {\n\tsocket.Option\n\ttyp int // hint for option value type; optional\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/sockopt_posix.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows\n\npackage ipv4\n\nimport (\n\t\"net\"\n\t\"unsafe\"\n\n\t\"golang.org/x/net/bpf\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) {\n\tswitch so.typ {\n\tcase ssoTypeIPMreqn:\n\t\treturn so.getIPMreqn(c)\n\tdefault:\n\t\treturn so.getMulticastIf(c)\n\t}\n}\n\nfunc (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error {\n\tswitch so.typ {\n\tcase ssoTypeIPMreqn:\n\t\treturn so.setIPMreqn(c, ifi, nil)\n\tdefault:\n\t\treturn so.setMulticastIf(c, ifi)\n\t}\n}\n\nfunc (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) {\n\tb := make([]byte, so.Len)\n\tn, err := so.Get(c, b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n != sizeofICMPFilter {\n\t\treturn nil, errOpNoSupport\n\t}\n\treturn (*ICMPFilter)(unsafe.Pointer(&b[0])), nil\n}\n\nfunc (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error {\n\tb := (*[sizeofICMPFilter]byte)(unsafe.Pointer(f))[:sizeofICMPFilter]\n\treturn so.Set(c, b)\n}\n\nfunc (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error {\n\tswitch so.typ {\n\tcase ssoTypeIPMreq:\n\t\treturn so.setIPMreq(c, ifi, grp)\n\tcase ssoTypeIPMreqn:\n\t\treturn so.setIPMreqn(c, ifi, grp)\n\tcase ssoTypeGroupReq:\n\t\treturn so.setGroupReq(c, ifi, grp)\n\tdefault:\n\t\treturn errOpNoSupport\n\t}\n}\n\nfunc (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error {\n\treturn so.setGroupSourceReq(c, ifi, grp, src)\n}\n\nfunc (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error {\n\treturn so.setAttachFilter(c, f)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/sockopt_stub.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows\n\npackage ipv4\n\nimport (\n\t\"net\"\n\n\t\"golang.org/x/net/bpf\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) {\n\treturn nil, errOpNoSupport\n}\n\nfunc (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error {\n\treturn errOpNoSupport\n}\n\nfunc (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) {\n\treturn nil, errOpNoSupport\n}\n\nfunc (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error {\n\treturn errOpNoSupport\n}\n\nfunc (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error {\n\treturn errOpNoSupport\n}\n\nfunc (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error {\n\treturn errOpNoSupport\n}\n\nfunc (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error {\n\treturn errOpNoSupport\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/sys_asmreq.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd netbsd openbsd solaris windows\n\npackage ipv4\n\nimport (\n\t\"net\"\n\t\"unsafe\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error {\n\tmreq := ipMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}}\n\tif err := setIPMreqInterface(&mreq, ifi); err != nil {\n\t\treturn err\n\t}\n\tb := (*[sizeofIPMreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPMreq]\n\treturn so.Set(c, b)\n}\n\nfunc (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) {\n\tvar b [4]byte\n\tif _, err := so.Get(c, b[:]); err != nil {\n\t\treturn nil, err\n\t}\n\tifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3]))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ifi, nil\n}\n\nfunc (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error {\n\tip, err := netInterfaceToIP4(ifi)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar b [4]byte\n\tcopy(b[:], ip)\n\treturn so.Set(c, b[:])\n}\n\nfunc setIPMreqInterface(mreq *ipMreq, ifi *net.Interface) error {\n\tif ifi == nil {\n\t\treturn nil\n\t}\n\tifat, err := ifi.Addrs()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, ifa := range ifat {\n\t\tswitch ifa := ifa.(type) {\n\t\tcase *net.IPAddr:\n\t\t\tif ip := ifa.IP.To4(); ip != nil {\n\t\t\t\tcopy(mreq.Interface[:], ip)\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase *net.IPNet:\n\t\t\tif ip := ifa.IP.To4(); ip != nil {\n\t\t\t\tcopy(mreq.Interface[:], ip)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\treturn errNoSuchInterface\n}\n\nfunc netIP4ToInterface(ip net.IP) (*net.Interface, error) {\n\tift, err := net.Interfaces()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, ifi := range ift {\n\t\tifat, err := ifi.Addrs()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, ifa := range ifat {\n\t\t\tswitch ifa := ifa.(type) {\n\t\t\tcase *net.IPAddr:\n\t\t\t\tif ip.Equal(ifa.IP) {\n\t\t\t\t\treturn &ifi, nil\n\t\t\t\t}\n\t\t\tcase *net.IPNet:\n\t\t\t\tif ip.Equal(ifa.IP) {\n\t\t\t\t\treturn &ifi, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, errNoSuchInterface\n}\n\nfunc netInterfaceToIP4(ifi *net.Interface) (net.IP, error) {\n\tif ifi == nil {\n\t\treturn net.IPv4zero.To4(), nil\n\t}\n\tifat, err := ifi.Addrs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, ifa := range ifat {\n\t\tswitch ifa := ifa.(type) {\n\t\tcase *net.IPAddr:\n\t\t\tif ip := ifa.IP.To4(); ip != nil {\n\t\t\t\treturn ip, nil\n\t\t\t}\n\t\tcase *net.IPNet:\n\t\t\tif ip := ifa.IP.To4(); ip != nil {\n\t\t\t\treturn ip, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, errNoSuchInterface\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows\n\npackage ipv4\n\nimport (\n\t\"net\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error {\n\treturn errOpNoSupport\n}\n\nfunc (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) {\n\treturn nil, errOpNoSupport\n}\n\nfunc (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error {\n\treturn errOpNoSupport\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/sys_asmreqn.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin freebsd linux\n\npackage ipv4\n\nimport (\n\t\"net\"\n\t\"unsafe\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) {\n\tb := make([]byte, so.Len)\n\tif _, err := so.Get(c, b); err != nil {\n\t\treturn nil, err\n\t}\n\tmreqn := (*ipMreqn)(unsafe.Pointer(&b[0]))\n\tif mreqn.Ifindex == 0 {\n\t\treturn nil, nil\n\t}\n\tifi, err := net.InterfaceByIndex(int(mreqn.Ifindex))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ifi, nil\n}\n\nfunc (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error {\n\tvar mreqn ipMreqn\n\tif ifi != nil {\n\t\tmreqn.Ifindex = int32(ifi.Index)\n\t}\n\tif grp != nil {\n\t\tmreqn.Multiaddr = [4]byte{grp[0], grp[1], grp[2], grp[3]}\n\t}\n\tb := (*[sizeofIPMreqn]byte)(unsafe.Pointer(&mreqn))[:sizeofIPMreqn]\n\treturn so.Set(c, b)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !darwin,!freebsd,!linux\n\npackage ipv4\n\nimport (\n\t\"net\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) {\n\treturn nil, errOpNoSupport\n}\n\nfunc (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error {\n\treturn errOpNoSupport\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/sys_bpf.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build linux\n\npackage ipv4\n\nimport (\n\t\"unsafe\"\n\n\t\"golang.org/x/net/bpf\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error {\n\tprog := sockFProg{\n\t\tLen:    uint16(len(f)),\n\t\tFilter: (*sockFilter)(unsafe.Pointer(&f[0])),\n\t}\n\tb := (*[sizeofSockFprog]byte)(unsafe.Pointer(&prog))[:sizeofSockFprog]\n\treturn so.Set(c, b)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/sys_bpf_stub.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !linux\n\npackage ipv4\n\nimport (\n\t\"golang.org/x/net/bpf\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error {\n\treturn errOpNoSupport\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/sys_bsd.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build netbsd openbsd\n\npackage ipv4\n\nimport (\n\t\"net\"\n\t\"syscall\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nvar (\n\tctlOpts = [ctlMax]ctlOpt{\n\t\tctlTTL:       {sysIP_RECVTTL, 1, marshalTTL, parseTTL},\n\t\tctlDst:       {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst},\n\t\tctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface},\n\t}\n\n\tsockOpts = map[int]*sockOpt{\n\t\tssoTOS:                {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}},\n\t\tssoTTL:                {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}},\n\t\tssoMulticastTTL:       {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}},\n\t\tssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}},\n\t\tssoMulticastLoopback:  {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}},\n\t\tssoReceiveTTL:         {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}},\n\t\tssoReceiveDst:         {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}},\n\t\tssoReceiveInterface:   {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}},\n\t\tssoHeaderPrepend:      {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}},\n\t\tssoJoinGroup:          {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq},\n\t\tssoLeaveGroup:         {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq},\n\t}\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/sys_darwin.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4\n\nimport (\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\t\"unsafe\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nvar (\n\tctlOpts = [ctlMax]ctlOpt{\n\t\tctlTTL:       {sysIP_RECVTTL, 1, marshalTTL, parseTTL},\n\t\tctlDst:       {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst},\n\t\tctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface},\n\t}\n\n\tsockOpts = map[int]*sockOpt{\n\t\tssoTOS:                {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}},\n\t\tssoTTL:                {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}},\n\t\tssoMulticastTTL:       {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}},\n\t\tssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}},\n\t\tssoMulticastLoopback:  {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}},\n\t\tssoReceiveTTL:         {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}},\n\t\tssoReceiveDst:         {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}},\n\t\tssoReceiveInterface:   {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}},\n\t\tssoHeaderPrepend:      {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}},\n\t\tssoStripHeader:        {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_STRIPHDR, Len: 4}},\n\t\tssoJoinGroup:          {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq},\n\t\tssoLeaveGroup:         {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq},\n\t}\n)\n\nfunc init() {\n\t// Seems like kern.osreldate is veiled on latest OS X. We use\n\t// kern.osrelease instead.\n\ts, err := syscall.Sysctl(\"kern.osrelease\")\n\tif err != nil {\n\t\treturn\n\t}\n\tss := strings.Split(s, \".\")\n\tif len(ss) == 0 {\n\t\treturn\n\t}\n\t// The IP_PKTINFO and protocol-independent multicast API were\n\t// introduced in OS X 10.7 (Darwin 11). But it looks like\n\t// those features require OS X 10.8 (Darwin 12) or above.\n\t// See http://support.apple.com/kb/HT1633.\n\tif mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 12 {\n\t\treturn\n\t}\n\tctlOpts[ctlPacketInfo].name = sysIP_PKTINFO\n\tctlOpts[ctlPacketInfo].length = sizeofInetPktinfo\n\tctlOpts[ctlPacketInfo].marshal = marshalPacketInfo\n\tctlOpts[ctlPacketInfo].parse = parsePacketInfo\n\tsockOpts[ssoPacketInfo] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}}\n\tsockOpts[ssoMulticastInterface] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn}\n\tsockOpts[ssoJoinGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}\n\tsockOpts[ssoLeaveGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}\n\tsockOpts[ssoJoinSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}\n\tsockOpts[ssoLeaveSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}\n\tsockOpts[ssoBlockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}\n\tsockOpts[ssoUnblockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}\n}\n\nfunc (pi *inetPktinfo) setIfindex(i int) {\n\tpi.Ifindex = uint32(i)\n}\n\nfunc (gr *groupReq) setGroup(grp net.IP) {\n\tsa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4))\n\tsa.Len = sizeofSockaddrInet\n\tsa.Family = syscall.AF_INET\n\tcopy(sa.Addr[:], grp)\n}\n\nfunc (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) {\n\tsa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4))\n\tsa.Len = sizeofSockaddrInet\n\tsa.Family = syscall.AF_INET\n\tcopy(sa.Addr[:], grp)\n\tsa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132))\n\tsa.Len = sizeofSockaddrInet\n\tsa.Family = syscall.AF_INET\n\tcopy(sa.Addr[:], src)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/sys_dragonfly.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4\n\nimport (\n\t\"net\"\n\t\"syscall\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nvar (\n\tctlOpts = [ctlMax]ctlOpt{\n\t\tctlTTL:       {sysIP_RECVTTL, 1, marshalTTL, parseTTL},\n\t\tctlDst:       {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst},\n\t\tctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface},\n\t}\n\n\tsockOpts = map[int]*sockOpt{\n\t\tssoTOS:                {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}},\n\t\tssoTTL:                {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}},\n\t\tssoMulticastTTL:       {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}},\n\t\tssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}},\n\t\tssoMulticastLoopback:  {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}},\n\t\tssoReceiveTTL:         {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}},\n\t\tssoReceiveDst:         {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}},\n\t\tssoReceiveInterface:   {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}},\n\t\tssoHeaderPrepend:      {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}},\n\t\tssoJoinGroup:          {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq},\n\t\tssoLeaveGroup:         {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq},\n\t}\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/sys_freebsd.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4\n\nimport (\n\t\"net\"\n\t\"runtime\"\n\t\"strings\"\n\t\"syscall\"\n\t\"unsafe\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nvar (\n\tctlOpts = [ctlMax]ctlOpt{\n\t\tctlTTL:       {sysIP_RECVTTL, 1, marshalTTL, parseTTL},\n\t\tctlDst:       {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst},\n\t\tctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface},\n\t}\n\n\tsockOpts = map[int]*sockOpt{\n\t\tssoTOS:                {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}},\n\t\tssoTTL:                {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}},\n\t\tssoMulticastTTL:       {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}},\n\t\tssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}},\n\t\tssoMulticastLoopback:  {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}},\n\t\tssoReceiveTTL:         {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}},\n\t\tssoReceiveDst:         {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}},\n\t\tssoReceiveInterface:   {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}},\n\t\tssoHeaderPrepend:      {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}},\n\t\tssoJoinGroup:          {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq},\n\t\tssoLeaveGroup:         {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq},\n\t\tssoJoinSourceGroup:    {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoLeaveSourceGroup:   {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoBlockSourceGroup:   {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t}\n)\n\nfunc init() {\n\tfreebsdVersion, _ = syscall.SysctlUint32(\"kern.osreldate\")\n\tif freebsdVersion >= 1000000 {\n\t\tsockOpts[ssoMulticastInterface] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn}\n\t}\n\tif runtime.GOOS == \"freebsd\" && runtime.GOARCH == \"386\" {\n\t\tarchs, _ := syscall.Sysctl(\"kern.supported_archs\")\n\t\tfor _, s := range strings.Fields(archs) {\n\t\t\tif s == \"amd64\" {\n\t\t\t\tfreebsd32o64 = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (gr *groupReq) setGroup(grp net.IP) {\n\tsa := (*sockaddrInet)(unsafe.Pointer(&gr.Group))\n\tsa.Len = sizeofSockaddrInet\n\tsa.Family = syscall.AF_INET\n\tcopy(sa.Addr[:], grp)\n}\n\nfunc (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) {\n\tsa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group))\n\tsa.Len = sizeofSockaddrInet\n\tsa.Family = syscall.AF_INET\n\tcopy(sa.Addr[:], grp)\n\tsa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source))\n\tsa.Len = sizeofSockaddrInet\n\tsa.Family = syscall.AF_INET\n\tcopy(sa.Addr[:], src)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/sys_linux.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4\n\nimport (\n\t\"net\"\n\t\"syscall\"\n\t\"unsafe\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nvar (\n\tctlOpts = [ctlMax]ctlOpt{\n\t\tctlTTL:        {sysIP_TTL, 1, marshalTTL, parseTTL},\n\t\tctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo},\n\t}\n\n\tsockOpts = map[int]*sockOpt{\n\t\tssoTOS:                {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}},\n\t\tssoTTL:                {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}},\n\t\tssoMulticastTTL:       {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}},\n\t\tssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn},\n\t\tssoMulticastLoopback:  {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}},\n\t\tssoReceiveTTL:         {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}},\n\t\tssoPacketInfo:         {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_PKTINFO, Len: 4}},\n\t\tssoHeaderPrepend:      {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}},\n\t\tssoICMPFilter:         {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysICMP_FILTER, Len: sizeofICMPFilter}},\n\t\tssoJoinGroup:          {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq},\n\t\tssoLeaveGroup:         {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq},\n\t\tssoJoinSourceGroup:    {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoLeaveSourceGroup:   {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoBlockSourceGroup:   {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoAttachFilter:       {Option: socket.Option{Level: sysSOL_SOCKET, Name: sysSO_ATTACH_FILTER, Len: sizeofSockFprog}},\n\t}\n)\n\nfunc (pi *inetPktinfo) setIfindex(i int) {\n\tpi.Ifindex = int32(i)\n}\n\nfunc (gr *groupReq) setGroup(grp net.IP) {\n\tsa := (*sockaddrInet)(unsafe.Pointer(&gr.Group))\n\tsa.Family = syscall.AF_INET\n\tcopy(sa.Addr[:], grp)\n}\n\nfunc (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) {\n\tsa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group))\n\tsa.Family = syscall.AF_INET\n\tcopy(sa.Addr[:], grp)\n\tsa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source))\n\tsa.Family = syscall.AF_INET\n\tcopy(sa.Addr[:], src)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/sys_solaris.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4\n\nimport (\n\t\"net\"\n\t\"syscall\"\n\t\"unsafe\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nvar (\n\tctlOpts = [ctlMax]ctlOpt{\n\t\tctlTTL:        {sysIP_RECVTTL, 4, marshalTTL, parseTTL},\n\t\tctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo},\n\t}\n\n\tsockOpts = map[int]sockOpt{\n\t\tssoTOS:                {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}},\n\t\tssoTTL:                {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}},\n\t\tssoMulticastTTL:       {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}},\n\t\tssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}},\n\t\tssoMulticastLoopback:  {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}},\n\t\tssoReceiveTTL:         {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}},\n\t\tssoPacketInfo:         {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}},\n\t\tssoHeaderPrepend:      {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}},\n\t\tssoJoinGroup:          {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq},\n\t\tssoLeaveGroup:         {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq},\n\t\tssoJoinSourceGroup:    {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoLeaveSourceGroup:   {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoBlockSourceGroup:   {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t}\n)\n\nfunc (pi *inetPktinfo) setIfindex(i int) {\n\tpi.Ifindex = uint32(i)\n}\n\nfunc (gr *groupReq) setGroup(grp net.IP) {\n\tsa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4))\n\tsa.Family = syscall.AF_INET\n\tcopy(sa.Addr[:], grp)\n}\n\nfunc (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) {\n\tsa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4))\n\tsa.Family = syscall.AF_INET\n\tcopy(sa.Addr[:], grp)\n\tsa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260))\n\tsa.Family = syscall.AF_INET\n\tcopy(sa.Addr[:], src)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/sys_ssmreq.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin freebsd linux solaris\n\npackage ipv4\n\nimport (\n\t\"net\"\n\t\"unsafe\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\nvar freebsd32o64 bool\n\nfunc (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error {\n\tvar gr groupReq\n\tif ifi != nil {\n\t\tgr.Interface = uint32(ifi.Index)\n\t}\n\tgr.setGroup(grp)\n\tvar b []byte\n\tif freebsd32o64 {\n\t\tvar d [sizeofGroupReq + 4]byte\n\t\ts := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))\n\t\tcopy(d[:4], s[:4])\n\t\tcopy(d[8:], s[4:])\n\t\tb = d[:]\n\t} else {\n\t\tb = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq]\n\t}\n\treturn so.Set(c, b)\n}\n\nfunc (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error {\n\tvar gsr groupSourceReq\n\tif ifi != nil {\n\t\tgsr.Interface = uint32(ifi.Index)\n\t}\n\tgsr.setSourceGroup(grp, src)\n\tvar b []byte\n\tif freebsd32o64 {\n\t\tvar d [sizeofGroupSourceReq + 4]byte\n\t\ts := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))\n\t\tcopy(d[:4], s[:4])\n\t\tcopy(d[8:], s[4:])\n\t\tb = d[:]\n\t} else {\n\t\tb = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq]\n\t}\n\treturn so.Set(c, b)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !darwin,!freebsd,!linux,!solaris\n\npackage ipv4\n\nimport (\n\t\"net\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error {\n\treturn errOpNoSupport\n}\n\nfunc (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error {\n\treturn errOpNoSupport\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/sys_stub.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows\n\npackage ipv4\n\nvar (\n\tctlOpts = [ctlMax]ctlOpt{}\n\n\tsockOpts = map[int]*sockOpt{}\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/sys_windows.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4\n\nimport (\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nconst (\n\t// See ws2tcpip.h.\n\tsysIP_OPTIONS                = 0x1\n\tsysIP_HDRINCL                = 0x2\n\tsysIP_TOS                    = 0x3\n\tsysIP_TTL                    = 0x4\n\tsysIP_MULTICAST_IF           = 0x9\n\tsysIP_MULTICAST_TTL          = 0xa\n\tsysIP_MULTICAST_LOOP         = 0xb\n\tsysIP_ADD_MEMBERSHIP         = 0xc\n\tsysIP_DROP_MEMBERSHIP        = 0xd\n\tsysIP_DONTFRAGMENT           = 0xe\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = 0xf\n\tsysIP_DROP_SOURCE_MEMBERSHIP = 0x10\n\tsysIP_PKTINFO                = 0x13\n\n\tsizeofInetPktinfo  = 0x8\n\tsizeofIPMreq       = 0x8\n\tsizeofIPMreqSource = 0xc\n)\n\ntype inetPktinfo struct {\n\tAddr    [4]byte\n\tIfindex int32\n}\n\ntype ipMreq struct {\n\tMultiaddr [4]byte\n\tInterface [4]byte\n}\n\ntype ipMreqSource struct {\n\tMultiaddr  [4]byte\n\tSourceaddr [4]byte\n\tInterface  [4]byte\n}\n\n// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms738586(v=vs.85).aspx\nvar (\n\tctlOpts = [ctlMax]ctlOpt{}\n\n\tsockOpts = map[int]*sockOpt{\n\t\tssoTOS:                {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}},\n\t\tssoTTL:                {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}},\n\t\tssoMulticastTTL:       {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}},\n\t\tssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}},\n\t\tssoMulticastLoopback:  {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}},\n\t\tssoHeaderPrepend:      {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}},\n\t\tssoJoinGroup:          {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq},\n\t\tssoLeaveGroup:         {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq},\n\t}\n)\n\nfunc (pi *inetPktinfo) setIfindex(i int) {\n\tpi.Ifindex = int32(i)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/unicast_test.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4_test\n\nimport (\n\t\"bytes\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org/x/net/icmp\"\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv4\"\n)\n\nfunc TestPacketConnReadWriteUnicastUDP(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tifi := nettest.RoutedInterface(\"ip4\", net.FlagUp|net.FlagLoopback)\n\tif ifi == nil {\n\t\tt.Skipf(\"not available on %s\", runtime.GOOS)\n\t}\n\n\tc, err := nettest.NewLocalPacketListener(\"udp4\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\tp := ipv4.NewPacketConn(c)\n\tdefer p.Close()\n\n\tdst := c.LocalAddr()\n\tcf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface\n\twb := []byte(\"HELLO-R-U-THERE\")\n\n\tfor i, toggle := range []bool{true, false, true} {\n\t\tif err := p.SetControlMessage(cf, toggle); err != nil {\n\t\t\tif nettest.ProtocolNotSupported(err) {\n\t\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tp.SetTTL(i + 1)\n\t\tif err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif n, err := p.WriteTo(wb, nil, dst); err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else if n != len(wb) {\n\t\t\tt.Fatalf(\"got %v; want %v\", n, len(wb))\n\t\t}\n\t\trb := make([]byte, 128)\n\t\tif err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif n, _, _, err := p.ReadFrom(rb); err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else if !bytes.Equal(rb[:n], wb) {\n\t\t\tt.Fatalf(\"got %v; want %v\", rb[:n], wb)\n\t\t}\n\t}\n}\n\nfunc TestPacketConnReadWriteUnicastICMP(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif m, ok := nettest.SupportsRawIPSocket(); !ok {\n\t\tt.Skip(m)\n\t}\n\tifi := nettest.RoutedInterface(\"ip4\", net.FlagUp|net.FlagLoopback)\n\tif ifi == nil {\n\t\tt.Skipf(\"not available on %s\", runtime.GOOS)\n\t}\n\n\tc, err := net.ListenPacket(\"ip4:icmp\", \"0.0.0.0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\tdst, err := net.ResolveIPAddr(\"ip4\", \"127.0.0.1\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tp := ipv4.NewPacketConn(c)\n\tdefer p.Close()\n\tcf := ipv4.FlagDst | ipv4.FlagInterface\n\tif runtime.GOOS != \"solaris\" {\n\t\t// Solaris never allows to modify ICMP properties.\n\t\tcf |= ipv4.FlagTTL\n\t}\n\n\tfor i, toggle := range []bool{true, false, true} {\n\t\twb, err := (&icmp.Message{\n\t\t\tType: ipv4.ICMPTypeEcho, Code: 0,\n\t\t\tBody: &icmp.Echo{\n\t\t\t\tID: os.Getpid() & 0xffff, Seq: i + 1,\n\t\t\t\tData: []byte(\"HELLO-R-U-THERE\"),\n\t\t\t},\n\t\t}).Marshal(nil)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := p.SetControlMessage(cf, toggle); err != nil {\n\t\t\tif nettest.ProtocolNotSupported(err) {\n\t\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tp.SetTTL(i + 1)\n\t\tif err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif n, err := p.WriteTo(wb, nil, dst); err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else if n != len(wb) {\n\t\t\tt.Fatalf(\"got %v; want %v\", n, len(wb))\n\t\t}\n\t\trb := make([]byte, 128)\n\tloop:\n\t\tif err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif n, _, _, err := p.ReadFrom(rb); err != nil {\n\t\t\tswitch runtime.GOOS {\n\t\t\tcase \"darwin\": // older darwin kernels have some limitation on receiving icmp packet through raw socket\n\t\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt.Fatal(err)\n\t\t} else {\n\t\t\tm, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n])\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif runtime.GOOS == \"linux\" && m.Type == ipv4.ICMPTypeEcho {\n\t\t\t\t// On Linux we must handle own sent packets.\n\t\t\t\tgoto loop\n\t\t\t}\n\t\t\tif m.Type != ipv4.ICMPTypeEchoReply || m.Code != 0 {\n\t\t\t\tt.Fatalf(\"got type=%v, code=%v; want type=%v, code=%v\", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestRawConnReadWriteUnicastICMP(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif m, ok := nettest.SupportsRawIPSocket(); !ok {\n\t\tt.Skip(m)\n\t}\n\tifi := nettest.RoutedInterface(\"ip4\", net.FlagUp|net.FlagLoopback)\n\tif ifi == nil {\n\t\tt.Skipf(\"not available on %s\", runtime.GOOS)\n\t}\n\n\tc, err := net.ListenPacket(\"ip4:icmp\", \"0.0.0.0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\tdst, err := net.ResolveIPAddr(\"ip4\", \"127.0.0.1\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tr, err := ipv4.NewRawConn(c)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer r.Close()\n\tcf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface\n\n\tfor i, toggle := range []bool{true, false, true} {\n\t\twb, err := (&icmp.Message{\n\t\t\tType: ipv4.ICMPTypeEcho, Code: 0,\n\t\t\tBody: &icmp.Echo{\n\t\t\t\tID: os.Getpid() & 0xffff, Seq: i + 1,\n\t\t\t\tData: []byte(\"HELLO-R-U-THERE\"),\n\t\t\t},\n\t\t}).Marshal(nil)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\twh := &ipv4.Header{\n\t\t\tVersion:  ipv4.Version,\n\t\t\tLen:      ipv4.HeaderLen,\n\t\t\tTOS:      i + 1,\n\t\t\tTotalLen: ipv4.HeaderLen + len(wb),\n\t\t\tTTL:      i + 1,\n\t\t\tProtocol: 1,\n\t\t\tDst:      dst.IP,\n\t\t}\n\t\tif err := r.SetControlMessage(cf, toggle); err != nil {\n\t\t\tif nettest.ProtocolNotSupported(err) {\n\t\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := r.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := r.WriteTo(wh, wb, nil); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\trb := make([]byte, ipv4.HeaderLen+128)\n\tloop:\n\t\tif err := r.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif _, b, _, err := r.ReadFrom(rb); err != nil {\n\t\t\tswitch runtime.GOOS {\n\t\t\tcase \"darwin\": // older darwin kernels have some limitation on receiving icmp packet through raw socket\n\t\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt.Fatal(err)\n\t\t} else {\n\t\t\tm, err := icmp.ParseMessage(iana.ProtocolICMP, b)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif runtime.GOOS == \"linux\" && m.Type == ipv4.ICMPTypeEcho {\n\t\t\t\t// On Linux we must handle own sent packets.\n\t\t\t\tgoto loop\n\t\t\t}\n\t\t\tif m.Type != ipv4.ICMPTypeEchoReply || m.Code != 0 {\n\t\t\t\tt.Fatalf(\"got type=%v, code=%v; want type=%v, code=%v\", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0)\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/unicastsockopt_test.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv4_test\n\nimport (\n\t\"net\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv4\"\n)\n\nfunc TestConnUnicastSocketOptions(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tifi := nettest.RoutedInterface(\"ip4\", net.FlagUp|net.FlagLoopback)\n\tif ifi == nil {\n\t\tt.Skipf(\"not available on %s\", runtime.GOOS)\n\t}\n\n\tln, err := net.Listen(\"tcp4\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer ln.Close()\n\n\terrc := make(chan error, 1)\n\tgo func() {\n\t\tc, err := ln.Accept()\n\t\tif err != nil {\n\t\t\terrc <- err\n\t\t\treturn\n\t\t}\n\t\terrc <- c.Close()\n\t}()\n\n\tc, err := net.Dial(\"tcp4\", ln.Addr().String())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\ttestUnicastSocketOptions(t, ipv4.NewConn(c))\n\n\tif err := <-errc; err != nil {\n\t\tt.Errorf(\"server: %v\", err)\n\t}\n}\n\nvar packetConnUnicastSocketOptionTests = []struct {\n\tnet, proto, addr string\n}{\n\t{\"udp4\", \"\", \"127.0.0.1:0\"},\n\t{\"ip4\", \":icmp\", \"127.0.0.1\"},\n}\n\nfunc TestPacketConnUnicastSocketOptions(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tifi := nettest.RoutedInterface(\"ip4\", net.FlagUp|net.FlagLoopback)\n\tif ifi == nil {\n\t\tt.Skipf(\"not available on %s\", runtime.GOOS)\n\t}\n\n\tm, ok := nettest.SupportsRawIPSocket()\n\tfor _, tt := range packetConnUnicastSocketOptionTests {\n\t\tif tt.net == \"ip4\" && !ok {\n\t\t\tt.Log(m)\n\t\t\tcontinue\n\t\t}\n\t\tc, err := net.ListenPacket(tt.net+tt.proto, tt.addr)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c.Close()\n\n\t\ttestUnicastSocketOptions(t, ipv4.NewPacketConn(c))\n\t}\n}\n\nfunc TestRawConnUnicastSocketOptions(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif m, ok := nettest.SupportsRawIPSocket(); !ok {\n\t\tt.Skip(m)\n\t}\n\tifi := nettest.RoutedInterface(\"ip4\", net.FlagUp|net.FlagLoopback)\n\tif ifi == nil {\n\t\tt.Skipf(\"not available on %s\", runtime.GOOS)\n\t}\n\n\tc, err := net.ListenPacket(\"ip4:icmp\", \"127.0.0.1\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\tr, err := ipv4.NewRawConn(c)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttestUnicastSocketOptions(t, r)\n}\n\ntype testIPv4UnicastConn interface {\n\tTOS() (int, error)\n\tSetTOS(int) error\n\tTTL() (int, error)\n\tSetTTL(int) error\n}\n\nfunc testUnicastSocketOptions(t *testing.T, c testIPv4UnicastConn) {\n\ttos := iana.DiffServCS0 | iana.NotECNTransport\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\t// IP_TOS option is supported on Windows 8 and beyond.\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\n\tif err := c.SetTOS(tos); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif v, err := c.TOS(); err != nil {\n\t\tt.Fatal(err)\n\t} else if v != tos {\n\t\tt.Fatalf(\"got %v; want %v\", v, tos)\n\t}\n\tconst ttl = 255\n\tif err := c.SetTTL(ttl); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif v, err := c.TTL(); err != nil {\n\t\tt.Fatal(err)\n\t} else if v != ttl {\n\t\tt.Fatalf(\"got %v; want %v\", v, ttl)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_darwin.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_darwin.go\n\npackage ipv4\n\nconst (\n\tsysIP_OPTIONS     = 0x1\n\tsysIP_HDRINCL     = 0x2\n\tsysIP_TOS         = 0x3\n\tsysIP_TTL         = 0x4\n\tsysIP_RECVOPTS    = 0x5\n\tsysIP_RECVRETOPTS = 0x6\n\tsysIP_RECVDSTADDR = 0x7\n\tsysIP_RETOPTS     = 0x8\n\tsysIP_RECVIF      = 0x14\n\tsysIP_STRIPHDR    = 0x17\n\tsysIP_RECVTTL     = 0x18\n\tsysIP_BOUND_IF    = 0x19\n\tsysIP_PKTINFO     = 0x1a\n\tsysIP_RECVPKTINFO = 0x1a\n\n\tsysIP_MULTICAST_IF           = 0x9\n\tsysIP_MULTICAST_TTL          = 0xa\n\tsysIP_MULTICAST_LOOP         = 0xb\n\tsysIP_ADD_MEMBERSHIP         = 0xc\n\tsysIP_DROP_MEMBERSHIP        = 0xd\n\tsysIP_MULTICAST_VIF          = 0xe\n\tsysIP_MULTICAST_IFINDEX      = 0x42\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = 0x46\n\tsysIP_DROP_SOURCE_MEMBERSHIP = 0x47\n\tsysIP_BLOCK_SOURCE           = 0x48\n\tsysIP_UNBLOCK_SOURCE         = 0x49\n\tsysMCAST_JOIN_GROUP          = 0x50\n\tsysMCAST_LEAVE_GROUP         = 0x51\n\tsysMCAST_JOIN_SOURCE_GROUP   = 0x52\n\tsysMCAST_LEAVE_SOURCE_GROUP  = 0x53\n\tsysMCAST_BLOCK_SOURCE        = 0x54\n\tsysMCAST_UNBLOCK_SOURCE      = 0x55\n\n\tsizeofSockaddrStorage = 0x80\n\tsizeofSockaddrInet    = 0x10\n\tsizeofInetPktinfo     = 0xc\n\n\tsizeofIPMreq         = 0x8\n\tsizeofIPMreqn        = 0xc\n\tsizeofIPMreqSource   = 0xc\n\tsizeofGroupReq       = 0x84\n\tsizeofGroupSourceReq = 0x104\n)\n\ntype sockaddrStorage struct {\n\tLen         uint8\n\tFamily      uint8\n\tX__ss_pad1  [6]int8\n\tX__ss_align int64\n\tX__ss_pad2  [112]int8\n}\n\ntype sockaddrInet struct {\n\tLen    uint8\n\tFamily uint8\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]int8\n}\n\ntype inetPktinfo struct {\n\tIfindex  uint32\n\tSpec_dst [4]byte /* in_addr */\n\tAddr     [4]byte /* in_addr */\n}\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n\ntype ipMreqn struct {\n\tMultiaddr [4]byte /* in_addr */\n\tAddress   [4]byte /* in_addr */\n\tIfindex   int32\n}\n\ntype ipMreqSource struct {\n\tMultiaddr  [4]byte /* in_addr */\n\tSourceaddr [4]byte /* in_addr */\n\tInterface  [4]byte /* in_addr */\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [128]byte\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [128]byte\n\tPad_cgo_1 [128]byte\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_dragonfly.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_dragonfly.go\n\npackage ipv4\n\nconst (\n\tsysIP_OPTIONS     = 0x1\n\tsysIP_HDRINCL     = 0x2\n\tsysIP_TOS         = 0x3\n\tsysIP_TTL         = 0x4\n\tsysIP_RECVOPTS    = 0x5\n\tsysIP_RECVRETOPTS = 0x6\n\tsysIP_RECVDSTADDR = 0x7\n\tsysIP_RETOPTS     = 0x8\n\tsysIP_RECVIF      = 0x14\n\tsysIP_RECVTTL     = 0x41\n\n\tsysIP_MULTICAST_IF    = 0x9\n\tsysIP_MULTICAST_TTL   = 0xa\n\tsysIP_MULTICAST_LOOP  = 0xb\n\tsysIP_MULTICAST_VIF   = 0xe\n\tsysIP_ADD_MEMBERSHIP  = 0xc\n\tsysIP_DROP_MEMBERSHIP = 0xd\n\n\tsizeofIPMreq = 0x8\n)\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_freebsd.go\n\npackage ipv4\n\nconst (\n\tsysIP_OPTIONS     = 0x1\n\tsysIP_HDRINCL     = 0x2\n\tsysIP_TOS         = 0x3\n\tsysIP_TTL         = 0x4\n\tsysIP_RECVOPTS    = 0x5\n\tsysIP_RECVRETOPTS = 0x6\n\tsysIP_RECVDSTADDR = 0x7\n\tsysIP_SENDSRCADDR = 0x7\n\tsysIP_RETOPTS     = 0x8\n\tsysIP_RECVIF      = 0x14\n\tsysIP_ONESBCAST   = 0x17\n\tsysIP_BINDANY     = 0x18\n\tsysIP_RECVTTL     = 0x41\n\tsysIP_MINTTL      = 0x42\n\tsysIP_DONTFRAG    = 0x43\n\tsysIP_RECVTOS     = 0x44\n\n\tsysIP_MULTICAST_IF           = 0x9\n\tsysIP_MULTICAST_TTL          = 0xa\n\tsysIP_MULTICAST_LOOP         = 0xb\n\tsysIP_ADD_MEMBERSHIP         = 0xc\n\tsysIP_DROP_MEMBERSHIP        = 0xd\n\tsysIP_MULTICAST_VIF          = 0xe\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = 0x46\n\tsysIP_DROP_SOURCE_MEMBERSHIP = 0x47\n\tsysIP_BLOCK_SOURCE           = 0x48\n\tsysIP_UNBLOCK_SOURCE         = 0x49\n\tsysMCAST_JOIN_GROUP          = 0x50\n\tsysMCAST_LEAVE_GROUP         = 0x51\n\tsysMCAST_JOIN_SOURCE_GROUP   = 0x52\n\tsysMCAST_LEAVE_SOURCE_GROUP  = 0x53\n\tsysMCAST_BLOCK_SOURCE        = 0x54\n\tsysMCAST_UNBLOCK_SOURCE      = 0x55\n\n\tsizeofSockaddrStorage = 0x80\n\tsizeofSockaddrInet    = 0x10\n\n\tsizeofIPMreq         = 0x8\n\tsizeofIPMreqn        = 0xc\n\tsizeofIPMreqSource   = 0xc\n\tsizeofGroupReq       = 0x84\n\tsizeofGroupSourceReq = 0x104\n)\n\ntype sockaddrStorage struct {\n\tLen         uint8\n\tFamily      uint8\n\tX__ss_pad1  [6]int8\n\tX__ss_align int64\n\tX__ss_pad2  [112]int8\n}\n\ntype sockaddrInet struct {\n\tLen    uint8\n\tFamily uint8\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]int8\n}\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n\ntype ipMreqn struct {\n\tMultiaddr [4]byte /* in_addr */\n\tAddress   [4]byte /* in_addr */\n\tIfindex   int32\n}\n\ntype ipMreqSource struct {\n\tMultiaddr  [4]byte /* in_addr */\n\tSourceaddr [4]byte /* in_addr */\n\tInterface  [4]byte /* in_addr */\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tGroup     sockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tGroup     sockaddrStorage\n\tSource    sockaddrStorage\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_freebsd.go\n\npackage ipv4\n\nconst (\n\tsysIP_OPTIONS     = 0x1\n\tsysIP_HDRINCL     = 0x2\n\tsysIP_TOS         = 0x3\n\tsysIP_TTL         = 0x4\n\tsysIP_RECVOPTS    = 0x5\n\tsysIP_RECVRETOPTS = 0x6\n\tsysIP_RECVDSTADDR = 0x7\n\tsysIP_SENDSRCADDR = 0x7\n\tsysIP_RETOPTS     = 0x8\n\tsysIP_RECVIF      = 0x14\n\tsysIP_ONESBCAST   = 0x17\n\tsysIP_BINDANY     = 0x18\n\tsysIP_RECVTTL     = 0x41\n\tsysIP_MINTTL      = 0x42\n\tsysIP_DONTFRAG    = 0x43\n\tsysIP_RECVTOS     = 0x44\n\n\tsysIP_MULTICAST_IF           = 0x9\n\tsysIP_MULTICAST_TTL          = 0xa\n\tsysIP_MULTICAST_LOOP         = 0xb\n\tsysIP_ADD_MEMBERSHIP         = 0xc\n\tsysIP_DROP_MEMBERSHIP        = 0xd\n\tsysIP_MULTICAST_VIF          = 0xe\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = 0x46\n\tsysIP_DROP_SOURCE_MEMBERSHIP = 0x47\n\tsysIP_BLOCK_SOURCE           = 0x48\n\tsysIP_UNBLOCK_SOURCE         = 0x49\n\tsysMCAST_JOIN_GROUP          = 0x50\n\tsysMCAST_LEAVE_GROUP         = 0x51\n\tsysMCAST_JOIN_SOURCE_GROUP   = 0x52\n\tsysMCAST_LEAVE_SOURCE_GROUP  = 0x53\n\tsysMCAST_BLOCK_SOURCE        = 0x54\n\tsysMCAST_UNBLOCK_SOURCE      = 0x55\n\n\tsizeofSockaddrStorage = 0x80\n\tsizeofSockaddrInet    = 0x10\n\n\tsizeofIPMreq         = 0x8\n\tsizeofIPMreqn        = 0xc\n\tsizeofIPMreqSource   = 0xc\n\tsizeofGroupReq       = 0x88\n\tsizeofGroupSourceReq = 0x108\n)\n\ntype sockaddrStorage struct {\n\tLen         uint8\n\tFamily      uint8\n\tX__ss_pad1  [6]int8\n\tX__ss_align int64\n\tX__ss_pad2  [112]int8\n}\n\ntype sockaddrInet struct {\n\tLen    uint8\n\tFamily uint8\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]int8\n}\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n\ntype ipMreqn struct {\n\tMultiaddr [4]byte /* in_addr */\n\tAddress   [4]byte /* in_addr */\n\tIfindex   int32\n}\n\ntype ipMreqSource struct {\n\tMultiaddr  [4]byte /* in_addr */\n\tSourceaddr [4]byte /* in_addr */\n\tInterface  [4]byte /* in_addr */\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     sockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     sockaddrStorage\n\tSource    sockaddrStorage\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_freebsd.go\n\npackage ipv4\n\nconst (\n\tsysIP_OPTIONS     = 0x1\n\tsysIP_HDRINCL     = 0x2\n\tsysIP_TOS         = 0x3\n\tsysIP_TTL         = 0x4\n\tsysIP_RECVOPTS    = 0x5\n\tsysIP_RECVRETOPTS = 0x6\n\tsysIP_RECVDSTADDR = 0x7\n\tsysIP_SENDSRCADDR = 0x7\n\tsysIP_RETOPTS     = 0x8\n\tsysIP_RECVIF      = 0x14\n\tsysIP_ONESBCAST   = 0x17\n\tsysIP_BINDANY     = 0x18\n\tsysIP_RECVTTL     = 0x41\n\tsysIP_MINTTL      = 0x42\n\tsysIP_DONTFRAG    = 0x43\n\tsysIP_RECVTOS     = 0x44\n\n\tsysIP_MULTICAST_IF           = 0x9\n\tsysIP_MULTICAST_TTL          = 0xa\n\tsysIP_MULTICAST_LOOP         = 0xb\n\tsysIP_ADD_MEMBERSHIP         = 0xc\n\tsysIP_DROP_MEMBERSHIP        = 0xd\n\tsysIP_MULTICAST_VIF          = 0xe\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = 0x46\n\tsysIP_DROP_SOURCE_MEMBERSHIP = 0x47\n\tsysIP_BLOCK_SOURCE           = 0x48\n\tsysIP_UNBLOCK_SOURCE         = 0x49\n\tsysMCAST_JOIN_GROUP          = 0x50\n\tsysMCAST_LEAVE_GROUP         = 0x51\n\tsysMCAST_JOIN_SOURCE_GROUP   = 0x52\n\tsysMCAST_LEAVE_SOURCE_GROUP  = 0x53\n\tsysMCAST_BLOCK_SOURCE        = 0x54\n\tsysMCAST_UNBLOCK_SOURCE      = 0x55\n\n\tsizeofSockaddrStorage = 0x80\n\tsizeofSockaddrInet    = 0x10\n\n\tsizeofIPMreq         = 0x8\n\tsizeofIPMreqn        = 0xc\n\tsizeofIPMreqSource   = 0xc\n\tsizeofGroupReq       = 0x88\n\tsizeofGroupSourceReq = 0x108\n)\n\ntype sockaddrStorage struct {\n\tLen         uint8\n\tFamily      uint8\n\tX__ss_pad1  [6]int8\n\tX__ss_align int64\n\tX__ss_pad2  [112]int8\n}\n\ntype sockaddrInet struct {\n\tLen    uint8\n\tFamily uint8\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]int8\n}\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n\ntype ipMreqn struct {\n\tMultiaddr [4]byte /* in_addr */\n\tAddress   [4]byte /* in_addr */\n\tIfindex   int32\n}\n\ntype ipMreqSource struct {\n\tMultiaddr  [4]byte /* in_addr */\n\tSourceaddr [4]byte /* in_addr */\n\tInterface  [4]byte /* in_addr */\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     sockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     sockaddrStorage\n\tSource    sockaddrStorage\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_linux_386.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv4\n\nconst (\n\tsysIP_TOS             = 0x1\n\tsysIP_TTL             = 0x2\n\tsysIP_HDRINCL         = 0x3\n\tsysIP_OPTIONS         = 0x4\n\tsysIP_ROUTER_ALERT    = 0x5\n\tsysIP_RECVOPTS        = 0x6\n\tsysIP_RETOPTS         = 0x7\n\tsysIP_PKTINFO         = 0x8\n\tsysIP_PKTOPTIONS      = 0x9\n\tsysIP_MTU_DISCOVER    = 0xa\n\tsysIP_RECVERR         = 0xb\n\tsysIP_RECVTTL         = 0xc\n\tsysIP_RECVTOS         = 0xd\n\tsysIP_MTU             = 0xe\n\tsysIP_FREEBIND        = 0xf\n\tsysIP_TRANSPARENT     = 0x13\n\tsysIP_RECVRETOPTS     = 0x7\n\tsysIP_ORIGDSTADDR     = 0x14\n\tsysIP_RECVORIGDSTADDR = 0x14\n\tsysIP_MINTTL          = 0x15\n\tsysIP_NODEFRAG        = 0x16\n\tsysIP_UNICAST_IF      = 0x32\n\n\tsysIP_MULTICAST_IF           = 0x20\n\tsysIP_MULTICAST_TTL          = 0x21\n\tsysIP_MULTICAST_LOOP         = 0x22\n\tsysIP_ADD_MEMBERSHIP         = 0x23\n\tsysIP_DROP_MEMBERSHIP        = 0x24\n\tsysIP_UNBLOCK_SOURCE         = 0x25\n\tsysIP_BLOCK_SOURCE           = 0x26\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = 0x27\n\tsysIP_DROP_SOURCE_MEMBERSHIP = 0x28\n\tsysIP_MSFILTER               = 0x29\n\tsysMCAST_JOIN_GROUP          = 0x2a\n\tsysMCAST_LEAVE_GROUP         = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP   = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP  = 0x2f\n\tsysMCAST_BLOCK_SOURCE        = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE      = 0x2c\n\tsysMCAST_MSFILTER            = 0x30\n\tsysIP_MULTICAST_ALL          = 0x31\n\n\tsysICMP_FILTER = 0x1\n\n\tsysSO_EE_ORIGIN_NONE         = 0x0\n\tsysSO_EE_ORIGIN_LOCAL        = 0x1\n\tsysSO_EE_ORIGIN_ICMP         = 0x2\n\tsysSO_EE_ORIGIN_ICMP6        = 0x3\n\tsysSO_EE_ORIGIN_TXSTATUS     = 0x4\n\tsysSO_EE_ORIGIN_TIMESTAMPING = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet          = 0x10\n\tsizeofInetPktinfo           = 0xc\n\tsizeofSockExtendedErr       = 0x10\n\n\tsizeofIPMreq         = 0x8\n\tsizeofIPMreqn        = 0xc\n\tsizeofIPMreqSource   = 0xc\n\tsizeofGroupReq       = 0x84\n\tsizeofGroupSourceReq = 0x104\n\n\tsizeofICMPFilter = 0x4\n\n\tsizeofSockFprog = 0x8\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype inetPktinfo struct {\n\tIfindex  int32\n\tSpec_dst [4]byte /* in_addr */\n\tAddr     [4]byte /* in_addr */\n}\n\ntype sockExtendedErr struct {\n\tErrno  uint32\n\tOrigin uint8\n\tType   uint8\n\tCode   uint8\n\tPad    uint8\n\tInfo   uint32\n\tData   uint32\n}\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n\ntype ipMreqn struct {\n\tMultiaddr [4]byte /* in_addr */\n\tAddress   [4]byte /* in_addr */\n\tIfindex   int32\n}\n\ntype ipMreqSource struct {\n\tMultiaddr  uint32\n\tInterface  uint32\n\tSourceaddr uint32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpFilter struct {\n\tData uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [2]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv4\n\nconst (\n\tsysIP_TOS             = 0x1\n\tsysIP_TTL             = 0x2\n\tsysIP_HDRINCL         = 0x3\n\tsysIP_OPTIONS         = 0x4\n\tsysIP_ROUTER_ALERT    = 0x5\n\tsysIP_RECVOPTS        = 0x6\n\tsysIP_RETOPTS         = 0x7\n\tsysIP_PKTINFO         = 0x8\n\tsysIP_PKTOPTIONS      = 0x9\n\tsysIP_MTU_DISCOVER    = 0xa\n\tsysIP_RECVERR         = 0xb\n\tsysIP_RECVTTL         = 0xc\n\tsysIP_RECVTOS         = 0xd\n\tsysIP_MTU             = 0xe\n\tsysIP_FREEBIND        = 0xf\n\tsysIP_TRANSPARENT     = 0x13\n\tsysIP_RECVRETOPTS     = 0x7\n\tsysIP_ORIGDSTADDR     = 0x14\n\tsysIP_RECVORIGDSTADDR = 0x14\n\tsysIP_MINTTL          = 0x15\n\tsysIP_NODEFRAG        = 0x16\n\tsysIP_UNICAST_IF      = 0x32\n\n\tsysIP_MULTICAST_IF           = 0x20\n\tsysIP_MULTICAST_TTL          = 0x21\n\tsysIP_MULTICAST_LOOP         = 0x22\n\tsysIP_ADD_MEMBERSHIP         = 0x23\n\tsysIP_DROP_MEMBERSHIP        = 0x24\n\tsysIP_UNBLOCK_SOURCE         = 0x25\n\tsysIP_BLOCK_SOURCE           = 0x26\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = 0x27\n\tsysIP_DROP_SOURCE_MEMBERSHIP = 0x28\n\tsysIP_MSFILTER               = 0x29\n\tsysMCAST_JOIN_GROUP          = 0x2a\n\tsysMCAST_LEAVE_GROUP         = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP   = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP  = 0x2f\n\tsysMCAST_BLOCK_SOURCE        = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE      = 0x2c\n\tsysMCAST_MSFILTER            = 0x30\n\tsysIP_MULTICAST_ALL          = 0x31\n\n\tsysICMP_FILTER = 0x1\n\n\tsysSO_EE_ORIGIN_NONE         = 0x0\n\tsysSO_EE_ORIGIN_LOCAL        = 0x1\n\tsysSO_EE_ORIGIN_ICMP         = 0x2\n\tsysSO_EE_ORIGIN_ICMP6        = 0x3\n\tsysSO_EE_ORIGIN_TXSTATUS     = 0x4\n\tsysSO_EE_ORIGIN_TIMESTAMPING = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet          = 0x10\n\tsizeofInetPktinfo           = 0xc\n\tsizeofSockExtendedErr       = 0x10\n\n\tsizeofIPMreq         = 0x8\n\tsizeofIPMreqn        = 0xc\n\tsizeofIPMreqSource   = 0xc\n\tsizeofGroupReq       = 0x88\n\tsizeofGroupSourceReq = 0x108\n\n\tsizeofICMPFilter = 0x4\n\n\tsizeofSockFprog = 0x10\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype inetPktinfo struct {\n\tIfindex  int32\n\tSpec_dst [4]byte /* in_addr */\n\tAddr     [4]byte /* in_addr */\n}\n\ntype sockExtendedErr struct {\n\tErrno  uint32\n\tOrigin uint8\n\tType   uint8\n\tCode   uint8\n\tPad    uint8\n\tInfo   uint32\n\tData   uint32\n}\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n\ntype ipMreqn struct {\n\tMultiaddr [4]byte /* in_addr */\n\tAddress   [4]byte /* in_addr */\n\tIfindex   int32\n}\n\ntype ipMreqSource struct {\n\tMultiaddr  uint32\n\tInterface  uint32\n\tSourceaddr uint32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpFilter struct {\n\tData uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [6]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_linux_arm.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv4\n\nconst (\n\tsysIP_TOS             = 0x1\n\tsysIP_TTL             = 0x2\n\tsysIP_HDRINCL         = 0x3\n\tsysIP_OPTIONS         = 0x4\n\tsysIP_ROUTER_ALERT    = 0x5\n\tsysIP_RECVOPTS        = 0x6\n\tsysIP_RETOPTS         = 0x7\n\tsysIP_PKTINFO         = 0x8\n\tsysIP_PKTOPTIONS      = 0x9\n\tsysIP_MTU_DISCOVER    = 0xa\n\tsysIP_RECVERR         = 0xb\n\tsysIP_RECVTTL         = 0xc\n\tsysIP_RECVTOS         = 0xd\n\tsysIP_MTU             = 0xe\n\tsysIP_FREEBIND        = 0xf\n\tsysIP_TRANSPARENT     = 0x13\n\tsysIP_RECVRETOPTS     = 0x7\n\tsysIP_ORIGDSTADDR     = 0x14\n\tsysIP_RECVORIGDSTADDR = 0x14\n\tsysIP_MINTTL          = 0x15\n\tsysIP_NODEFRAG        = 0x16\n\tsysIP_UNICAST_IF      = 0x32\n\n\tsysIP_MULTICAST_IF           = 0x20\n\tsysIP_MULTICAST_TTL          = 0x21\n\tsysIP_MULTICAST_LOOP         = 0x22\n\tsysIP_ADD_MEMBERSHIP         = 0x23\n\tsysIP_DROP_MEMBERSHIP        = 0x24\n\tsysIP_UNBLOCK_SOURCE         = 0x25\n\tsysIP_BLOCK_SOURCE           = 0x26\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = 0x27\n\tsysIP_DROP_SOURCE_MEMBERSHIP = 0x28\n\tsysIP_MSFILTER               = 0x29\n\tsysMCAST_JOIN_GROUP          = 0x2a\n\tsysMCAST_LEAVE_GROUP         = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP   = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP  = 0x2f\n\tsysMCAST_BLOCK_SOURCE        = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE      = 0x2c\n\tsysMCAST_MSFILTER            = 0x30\n\tsysIP_MULTICAST_ALL          = 0x31\n\n\tsysICMP_FILTER = 0x1\n\n\tsysSO_EE_ORIGIN_NONE         = 0x0\n\tsysSO_EE_ORIGIN_LOCAL        = 0x1\n\tsysSO_EE_ORIGIN_ICMP         = 0x2\n\tsysSO_EE_ORIGIN_ICMP6        = 0x3\n\tsysSO_EE_ORIGIN_TXSTATUS     = 0x4\n\tsysSO_EE_ORIGIN_TIMESTAMPING = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet          = 0x10\n\tsizeofInetPktinfo           = 0xc\n\tsizeofSockExtendedErr       = 0x10\n\n\tsizeofIPMreq         = 0x8\n\tsizeofIPMreqn        = 0xc\n\tsizeofIPMreqSource   = 0xc\n\tsizeofGroupReq       = 0x84\n\tsizeofGroupSourceReq = 0x104\n\n\tsizeofICMPFilter = 0x4\n\n\tsizeofSockFprog = 0x8\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype inetPktinfo struct {\n\tIfindex  int32\n\tSpec_dst [4]byte /* in_addr */\n\tAddr     [4]byte /* in_addr */\n}\n\ntype sockExtendedErr struct {\n\tErrno  uint32\n\tOrigin uint8\n\tType   uint8\n\tCode   uint8\n\tPad    uint8\n\tInfo   uint32\n\tData   uint32\n}\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n\ntype ipMreqn struct {\n\tMultiaddr [4]byte /* in_addr */\n\tAddress   [4]byte /* in_addr */\n\tIfindex   int32\n}\n\ntype ipMreqSource struct {\n\tMultiaddr  uint32\n\tInterface  uint32\n\tSourceaddr uint32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpFilter struct {\n\tData uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [2]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv4\n\nconst (\n\tsysIP_TOS             = 0x1\n\tsysIP_TTL             = 0x2\n\tsysIP_HDRINCL         = 0x3\n\tsysIP_OPTIONS         = 0x4\n\tsysIP_ROUTER_ALERT    = 0x5\n\tsysIP_RECVOPTS        = 0x6\n\tsysIP_RETOPTS         = 0x7\n\tsysIP_PKTINFO         = 0x8\n\tsysIP_PKTOPTIONS      = 0x9\n\tsysIP_MTU_DISCOVER    = 0xa\n\tsysIP_RECVERR         = 0xb\n\tsysIP_RECVTTL         = 0xc\n\tsysIP_RECVTOS         = 0xd\n\tsysIP_MTU             = 0xe\n\tsysIP_FREEBIND        = 0xf\n\tsysIP_TRANSPARENT     = 0x13\n\tsysIP_RECVRETOPTS     = 0x7\n\tsysIP_ORIGDSTADDR     = 0x14\n\tsysIP_RECVORIGDSTADDR = 0x14\n\tsysIP_MINTTL          = 0x15\n\tsysIP_NODEFRAG        = 0x16\n\tsysIP_UNICAST_IF      = 0x32\n\n\tsysIP_MULTICAST_IF           = 0x20\n\tsysIP_MULTICAST_TTL          = 0x21\n\tsysIP_MULTICAST_LOOP         = 0x22\n\tsysIP_ADD_MEMBERSHIP         = 0x23\n\tsysIP_DROP_MEMBERSHIP        = 0x24\n\tsysIP_UNBLOCK_SOURCE         = 0x25\n\tsysIP_BLOCK_SOURCE           = 0x26\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = 0x27\n\tsysIP_DROP_SOURCE_MEMBERSHIP = 0x28\n\tsysIP_MSFILTER               = 0x29\n\tsysMCAST_JOIN_GROUP          = 0x2a\n\tsysMCAST_LEAVE_GROUP         = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP   = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP  = 0x2f\n\tsysMCAST_BLOCK_SOURCE        = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE      = 0x2c\n\tsysMCAST_MSFILTER            = 0x30\n\tsysIP_MULTICAST_ALL          = 0x31\n\n\tsysICMP_FILTER = 0x1\n\n\tsysSO_EE_ORIGIN_NONE         = 0x0\n\tsysSO_EE_ORIGIN_LOCAL        = 0x1\n\tsysSO_EE_ORIGIN_ICMP         = 0x2\n\tsysSO_EE_ORIGIN_ICMP6        = 0x3\n\tsysSO_EE_ORIGIN_TXSTATUS     = 0x4\n\tsysSO_EE_ORIGIN_TIMESTAMPING = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet          = 0x10\n\tsizeofInetPktinfo           = 0xc\n\tsizeofSockExtendedErr       = 0x10\n\n\tsizeofIPMreq         = 0x8\n\tsizeofIPMreqn        = 0xc\n\tsizeofIPMreqSource   = 0xc\n\tsizeofGroupReq       = 0x88\n\tsizeofGroupSourceReq = 0x108\n\n\tsizeofICMPFilter = 0x4\n\n\tsizeofSockFprog = 0x10\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype inetPktinfo struct {\n\tIfindex  int32\n\tSpec_dst [4]byte /* in_addr */\n\tAddr     [4]byte /* in_addr */\n}\n\ntype sockExtendedErr struct {\n\tErrno  uint32\n\tOrigin uint8\n\tType   uint8\n\tCode   uint8\n\tPad    uint8\n\tInfo   uint32\n\tData   uint32\n}\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n\ntype ipMreqn struct {\n\tMultiaddr [4]byte /* in_addr */\n\tAddress   [4]byte /* in_addr */\n\tIfindex   int32\n}\n\ntype ipMreqSource struct {\n\tMultiaddr  uint32\n\tInterface  uint32\n\tSourceaddr uint32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpFilter struct {\n\tData uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [6]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_linux_mips.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv4\n\nconst (\n\tsysIP_TOS             = 0x1\n\tsysIP_TTL             = 0x2\n\tsysIP_HDRINCL         = 0x3\n\tsysIP_OPTIONS         = 0x4\n\tsysIP_ROUTER_ALERT    = 0x5\n\tsysIP_RECVOPTS        = 0x6\n\tsysIP_RETOPTS         = 0x7\n\tsysIP_PKTINFO         = 0x8\n\tsysIP_PKTOPTIONS      = 0x9\n\tsysIP_MTU_DISCOVER    = 0xa\n\tsysIP_RECVERR         = 0xb\n\tsysIP_RECVTTL         = 0xc\n\tsysIP_RECVTOS         = 0xd\n\tsysIP_MTU             = 0xe\n\tsysIP_FREEBIND        = 0xf\n\tsysIP_TRANSPARENT     = 0x13\n\tsysIP_RECVRETOPTS     = 0x7\n\tsysIP_ORIGDSTADDR     = 0x14\n\tsysIP_RECVORIGDSTADDR = 0x14\n\tsysIP_MINTTL          = 0x15\n\tsysIP_NODEFRAG        = 0x16\n\tsysIP_UNICAST_IF      = 0x32\n\n\tsysIP_MULTICAST_IF           = 0x20\n\tsysIP_MULTICAST_TTL          = 0x21\n\tsysIP_MULTICAST_LOOP         = 0x22\n\tsysIP_ADD_MEMBERSHIP         = 0x23\n\tsysIP_DROP_MEMBERSHIP        = 0x24\n\tsysIP_UNBLOCK_SOURCE         = 0x25\n\tsysIP_BLOCK_SOURCE           = 0x26\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = 0x27\n\tsysIP_DROP_SOURCE_MEMBERSHIP = 0x28\n\tsysIP_MSFILTER               = 0x29\n\tsysMCAST_JOIN_GROUP          = 0x2a\n\tsysMCAST_LEAVE_GROUP         = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP   = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP  = 0x2f\n\tsysMCAST_BLOCK_SOURCE        = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE      = 0x2c\n\tsysMCAST_MSFILTER            = 0x30\n\tsysIP_MULTICAST_ALL          = 0x31\n\n\tsysICMP_FILTER = 0x1\n\n\tsysSO_EE_ORIGIN_NONE         = 0x0\n\tsysSO_EE_ORIGIN_LOCAL        = 0x1\n\tsysSO_EE_ORIGIN_ICMP         = 0x2\n\tsysSO_EE_ORIGIN_ICMP6        = 0x3\n\tsysSO_EE_ORIGIN_TXSTATUS     = 0x4\n\tsysSO_EE_ORIGIN_TIMESTAMPING = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet          = 0x10\n\tsizeofInetPktinfo           = 0xc\n\tsizeofSockExtendedErr       = 0x10\n\n\tsizeofIPMreq         = 0x8\n\tsizeofIPMreqn        = 0xc\n\tsizeofIPMreqSource   = 0xc\n\tsizeofGroupReq       = 0x84\n\tsizeofGroupSourceReq = 0x104\n\n\tsizeofICMPFilter = 0x4\n\n\tsizeofSockFprog = 0x8\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype inetPktinfo struct {\n\tIfindex  int32\n\tSpec_dst [4]byte /* in_addr */\n\tAddr     [4]byte /* in_addr */\n}\n\ntype sockExtendedErr struct {\n\tErrno  uint32\n\tOrigin uint8\n\tType   uint8\n\tCode   uint8\n\tPad    uint8\n\tInfo   uint32\n\tData   uint32\n}\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n\ntype ipMreqn struct {\n\tMultiaddr [4]byte /* in_addr */\n\tAddress   [4]byte /* in_addr */\n\tIfindex   int32\n}\n\ntype ipMreqSource struct {\n\tMultiaddr  uint32\n\tInterface  uint32\n\tSourceaddr uint32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpFilter struct {\n\tData uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [2]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv4\n\nconst (\n\tsysIP_TOS             = 0x1\n\tsysIP_TTL             = 0x2\n\tsysIP_HDRINCL         = 0x3\n\tsysIP_OPTIONS         = 0x4\n\tsysIP_ROUTER_ALERT    = 0x5\n\tsysIP_RECVOPTS        = 0x6\n\tsysIP_RETOPTS         = 0x7\n\tsysIP_PKTINFO         = 0x8\n\tsysIP_PKTOPTIONS      = 0x9\n\tsysIP_MTU_DISCOVER    = 0xa\n\tsysIP_RECVERR         = 0xb\n\tsysIP_RECVTTL         = 0xc\n\tsysIP_RECVTOS         = 0xd\n\tsysIP_MTU             = 0xe\n\tsysIP_FREEBIND        = 0xf\n\tsysIP_TRANSPARENT     = 0x13\n\tsysIP_RECVRETOPTS     = 0x7\n\tsysIP_ORIGDSTADDR     = 0x14\n\tsysIP_RECVORIGDSTADDR = 0x14\n\tsysIP_MINTTL          = 0x15\n\tsysIP_NODEFRAG        = 0x16\n\tsysIP_UNICAST_IF      = 0x32\n\n\tsysIP_MULTICAST_IF           = 0x20\n\tsysIP_MULTICAST_TTL          = 0x21\n\tsysIP_MULTICAST_LOOP         = 0x22\n\tsysIP_ADD_MEMBERSHIP         = 0x23\n\tsysIP_DROP_MEMBERSHIP        = 0x24\n\tsysIP_UNBLOCK_SOURCE         = 0x25\n\tsysIP_BLOCK_SOURCE           = 0x26\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = 0x27\n\tsysIP_DROP_SOURCE_MEMBERSHIP = 0x28\n\tsysIP_MSFILTER               = 0x29\n\tsysMCAST_JOIN_GROUP          = 0x2a\n\tsysMCAST_LEAVE_GROUP         = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP   = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP  = 0x2f\n\tsysMCAST_BLOCK_SOURCE        = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE      = 0x2c\n\tsysMCAST_MSFILTER            = 0x30\n\tsysIP_MULTICAST_ALL          = 0x31\n\n\tsysICMP_FILTER = 0x1\n\n\tsysSO_EE_ORIGIN_NONE         = 0x0\n\tsysSO_EE_ORIGIN_LOCAL        = 0x1\n\tsysSO_EE_ORIGIN_ICMP         = 0x2\n\tsysSO_EE_ORIGIN_ICMP6        = 0x3\n\tsysSO_EE_ORIGIN_TXSTATUS     = 0x4\n\tsysSO_EE_ORIGIN_TIMESTAMPING = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet          = 0x10\n\tsizeofInetPktinfo           = 0xc\n\tsizeofSockExtendedErr       = 0x10\n\n\tsizeofIPMreq         = 0x8\n\tsizeofIPMreqn        = 0xc\n\tsizeofIPMreqSource   = 0xc\n\tsizeofGroupReq       = 0x88\n\tsizeofGroupSourceReq = 0x108\n\n\tsizeofICMPFilter = 0x4\n\n\tsizeofSockFprog = 0x10\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype inetPktinfo struct {\n\tIfindex  int32\n\tSpec_dst [4]byte /* in_addr */\n\tAddr     [4]byte /* in_addr */\n}\n\ntype sockExtendedErr struct {\n\tErrno  uint32\n\tOrigin uint8\n\tType   uint8\n\tCode   uint8\n\tPad    uint8\n\tInfo   uint32\n\tData   uint32\n}\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n\ntype ipMreqn struct {\n\tMultiaddr [4]byte /* in_addr */\n\tAddress   [4]byte /* in_addr */\n\tIfindex   int32\n}\n\ntype ipMreqSource struct {\n\tMultiaddr  uint32\n\tInterface  uint32\n\tSourceaddr uint32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpFilter struct {\n\tData uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [6]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv4\n\nconst (\n\tsysIP_TOS             = 0x1\n\tsysIP_TTL             = 0x2\n\tsysIP_HDRINCL         = 0x3\n\tsysIP_OPTIONS         = 0x4\n\tsysIP_ROUTER_ALERT    = 0x5\n\tsysIP_RECVOPTS        = 0x6\n\tsysIP_RETOPTS         = 0x7\n\tsysIP_PKTINFO         = 0x8\n\tsysIP_PKTOPTIONS      = 0x9\n\tsysIP_MTU_DISCOVER    = 0xa\n\tsysIP_RECVERR         = 0xb\n\tsysIP_RECVTTL         = 0xc\n\tsysIP_RECVTOS         = 0xd\n\tsysIP_MTU             = 0xe\n\tsysIP_FREEBIND        = 0xf\n\tsysIP_TRANSPARENT     = 0x13\n\tsysIP_RECVRETOPTS     = 0x7\n\tsysIP_ORIGDSTADDR     = 0x14\n\tsysIP_RECVORIGDSTADDR = 0x14\n\tsysIP_MINTTL          = 0x15\n\tsysIP_NODEFRAG        = 0x16\n\tsysIP_UNICAST_IF      = 0x32\n\n\tsysIP_MULTICAST_IF           = 0x20\n\tsysIP_MULTICAST_TTL          = 0x21\n\tsysIP_MULTICAST_LOOP         = 0x22\n\tsysIP_ADD_MEMBERSHIP         = 0x23\n\tsysIP_DROP_MEMBERSHIP        = 0x24\n\tsysIP_UNBLOCK_SOURCE         = 0x25\n\tsysIP_BLOCK_SOURCE           = 0x26\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = 0x27\n\tsysIP_DROP_SOURCE_MEMBERSHIP = 0x28\n\tsysIP_MSFILTER               = 0x29\n\tsysMCAST_JOIN_GROUP          = 0x2a\n\tsysMCAST_LEAVE_GROUP         = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP   = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP  = 0x2f\n\tsysMCAST_BLOCK_SOURCE        = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE      = 0x2c\n\tsysMCAST_MSFILTER            = 0x30\n\tsysIP_MULTICAST_ALL          = 0x31\n\n\tsysICMP_FILTER = 0x1\n\n\tsysSO_EE_ORIGIN_NONE         = 0x0\n\tsysSO_EE_ORIGIN_LOCAL        = 0x1\n\tsysSO_EE_ORIGIN_ICMP         = 0x2\n\tsysSO_EE_ORIGIN_ICMP6        = 0x3\n\tsysSO_EE_ORIGIN_TXSTATUS     = 0x4\n\tsysSO_EE_ORIGIN_TIMESTAMPING = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet          = 0x10\n\tsizeofInetPktinfo           = 0xc\n\tsizeofSockExtendedErr       = 0x10\n\n\tsizeofIPMreq         = 0x8\n\tsizeofIPMreqn        = 0xc\n\tsizeofIPMreqSource   = 0xc\n\tsizeofGroupReq       = 0x88\n\tsizeofGroupSourceReq = 0x108\n\n\tsizeofICMPFilter = 0x4\n\n\tsizeofSockFprog = 0x10\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype inetPktinfo struct {\n\tIfindex  int32\n\tSpec_dst [4]byte /* in_addr */\n\tAddr     [4]byte /* in_addr */\n}\n\ntype sockExtendedErr struct {\n\tErrno  uint32\n\tOrigin uint8\n\tType   uint8\n\tCode   uint8\n\tPad    uint8\n\tInfo   uint32\n\tData   uint32\n}\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n\ntype ipMreqn struct {\n\tMultiaddr [4]byte /* in_addr */\n\tAddress   [4]byte /* in_addr */\n\tIfindex   int32\n}\n\ntype ipMreqSource struct {\n\tMultiaddr  uint32\n\tInterface  uint32\n\tSourceaddr uint32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpFilter struct {\n\tData uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [6]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv4\n\nconst (\n\tsysIP_TOS             = 0x1\n\tsysIP_TTL             = 0x2\n\tsysIP_HDRINCL         = 0x3\n\tsysIP_OPTIONS         = 0x4\n\tsysIP_ROUTER_ALERT    = 0x5\n\tsysIP_RECVOPTS        = 0x6\n\tsysIP_RETOPTS         = 0x7\n\tsysIP_PKTINFO         = 0x8\n\tsysIP_PKTOPTIONS      = 0x9\n\tsysIP_MTU_DISCOVER    = 0xa\n\tsysIP_RECVERR         = 0xb\n\tsysIP_RECVTTL         = 0xc\n\tsysIP_RECVTOS         = 0xd\n\tsysIP_MTU             = 0xe\n\tsysIP_FREEBIND        = 0xf\n\tsysIP_TRANSPARENT     = 0x13\n\tsysIP_RECVRETOPTS     = 0x7\n\tsysIP_ORIGDSTADDR     = 0x14\n\tsysIP_RECVORIGDSTADDR = 0x14\n\tsysIP_MINTTL          = 0x15\n\tsysIP_NODEFRAG        = 0x16\n\tsysIP_UNICAST_IF      = 0x32\n\n\tsysIP_MULTICAST_IF           = 0x20\n\tsysIP_MULTICAST_TTL          = 0x21\n\tsysIP_MULTICAST_LOOP         = 0x22\n\tsysIP_ADD_MEMBERSHIP         = 0x23\n\tsysIP_DROP_MEMBERSHIP        = 0x24\n\tsysIP_UNBLOCK_SOURCE         = 0x25\n\tsysIP_BLOCK_SOURCE           = 0x26\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = 0x27\n\tsysIP_DROP_SOURCE_MEMBERSHIP = 0x28\n\tsysIP_MSFILTER               = 0x29\n\tsysMCAST_JOIN_GROUP          = 0x2a\n\tsysMCAST_LEAVE_GROUP         = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP   = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP  = 0x2f\n\tsysMCAST_BLOCK_SOURCE        = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE      = 0x2c\n\tsysMCAST_MSFILTER            = 0x30\n\tsysIP_MULTICAST_ALL          = 0x31\n\n\tsysICMP_FILTER = 0x1\n\n\tsysSO_EE_ORIGIN_NONE         = 0x0\n\tsysSO_EE_ORIGIN_LOCAL        = 0x1\n\tsysSO_EE_ORIGIN_ICMP         = 0x2\n\tsysSO_EE_ORIGIN_ICMP6        = 0x3\n\tsysSO_EE_ORIGIN_TXSTATUS     = 0x4\n\tsysSO_EE_ORIGIN_TIMESTAMPING = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet          = 0x10\n\tsizeofInetPktinfo           = 0xc\n\tsizeofSockExtendedErr       = 0x10\n\n\tsizeofIPMreq         = 0x8\n\tsizeofIPMreqn        = 0xc\n\tsizeofIPMreqSource   = 0xc\n\tsizeofGroupReq       = 0x84\n\tsizeofGroupSourceReq = 0x104\n\n\tsizeofICMPFilter = 0x4\n\n\tsizeofSockFprog = 0x8\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype inetPktinfo struct {\n\tIfindex  int32\n\tSpec_dst [4]byte /* in_addr */\n\tAddr     [4]byte /* in_addr */\n}\n\ntype sockExtendedErr struct {\n\tErrno  uint32\n\tOrigin uint8\n\tType   uint8\n\tCode   uint8\n\tPad    uint8\n\tInfo   uint32\n\tData   uint32\n}\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n\ntype ipMreqn struct {\n\tMultiaddr [4]byte /* in_addr */\n\tAddress   [4]byte /* in_addr */\n\tIfindex   int32\n}\n\ntype ipMreqSource struct {\n\tMultiaddr  uint32\n\tInterface  uint32\n\tSourceaddr uint32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpFilter struct {\n\tData uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [2]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv4\n\nconst (\n\tsysIP_TOS             = 0x1\n\tsysIP_TTL             = 0x2\n\tsysIP_HDRINCL         = 0x3\n\tsysIP_OPTIONS         = 0x4\n\tsysIP_ROUTER_ALERT    = 0x5\n\tsysIP_RECVOPTS        = 0x6\n\tsysIP_RETOPTS         = 0x7\n\tsysIP_PKTINFO         = 0x8\n\tsysIP_PKTOPTIONS      = 0x9\n\tsysIP_MTU_DISCOVER    = 0xa\n\tsysIP_RECVERR         = 0xb\n\tsysIP_RECVTTL         = 0xc\n\tsysIP_RECVTOS         = 0xd\n\tsysIP_MTU             = 0xe\n\tsysIP_FREEBIND        = 0xf\n\tsysIP_TRANSPARENT     = 0x13\n\tsysIP_RECVRETOPTS     = 0x7\n\tsysIP_ORIGDSTADDR     = 0x14\n\tsysIP_RECVORIGDSTADDR = 0x14\n\tsysIP_MINTTL          = 0x15\n\tsysIP_NODEFRAG        = 0x16\n\tsysIP_UNICAST_IF      = 0x32\n\n\tsysIP_MULTICAST_IF           = 0x20\n\tsysIP_MULTICAST_TTL          = 0x21\n\tsysIP_MULTICAST_LOOP         = 0x22\n\tsysIP_ADD_MEMBERSHIP         = 0x23\n\tsysIP_DROP_MEMBERSHIP        = 0x24\n\tsysIP_UNBLOCK_SOURCE         = 0x25\n\tsysIP_BLOCK_SOURCE           = 0x26\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = 0x27\n\tsysIP_DROP_SOURCE_MEMBERSHIP = 0x28\n\tsysIP_MSFILTER               = 0x29\n\tsysMCAST_JOIN_GROUP          = 0x2a\n\tsysMCAST_LEAVE_GROUP         = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP   = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP  = 0x2f\n\tsysMCAST_BLOCK_SOURCE        = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE      = 0x2c\n\tsysMCAST_MSFILTER            = 0x30\n\tsysIP_MULTICAST_ALL          = 0x31\n\n\tsysICMP_FILTER = 0x1\n\n\tsysSO_EE_ORIGIN_NONE         = 0x0\n\tsysSO_EE_ORIGIN_LOCAL        = 0x1\n\tsysSO_EE_ORIGIN_ICMP         = 0x2\n\tsysSO_EE_ORIGIN_ICMP6        = 0x3\n\tsysSO_EE_ORIGIN_TXSTATUS     = 0x4\n\tsysSO_EE_ORIGIN_TIMESTAMPING = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet          = 0x10\n\tsizeofInetPktinfo           = 0xc\n\tsizeofSockExtendedErr       = 0x10\n\n\tsizeofIPMreq         = 0x8\n\tsizeofIPMreqn        = 0xc\n\tsizeofIPMreqSource   = 0xc\n\tsizeofGroupReq       = 0x84\n\tsizeofGroupSourceReq = 0x104\n\n\tsizeofICMPFilter = 0x4\n\n\tsizeofSockFprog = 0x8\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]uint8\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype inetPktinfo struct {\n\tIfindex  int32\n\tSpec_dst [4]byte /* in_addr */\n\tAddr     [4]byte /* in_addr */\n}\n\ntype sockExtendedErr struct {\n\tErrno  uint32\n\tOrigin uint8\n\tType   uint8\n\tCode   uint8\n\tPad    uint8\n\tInfo   uint32\n\tData   uint32\n}\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n\ntype ipMreqn struct {\n\tMultiaddr [4]byte /* in_addr */\n\tAddress   [4]byte /* in_addr */\n\tIfindex   int32\n}\n\ntype ipMreqSource struct {\n\tMultiaddr  uint32\n\tInterface  uint32\n\tSourceaddr uint32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpFilter struct {\n\tData uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [2]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv4\n\nconst (\n\tsysIP_TOS             = 0x1\n\tsysIP_TTL             = 0x2\n\tsysIP_HDRINCL         = 0x3\n\tsysIP_OPTIONS         = 0x4\n\tsysIP_ROUTER_ALERT    = 0x5\n\tsysIP_RECVOPTS        = 0x6\n\tsysIP_RETOPTS         = 0x7\n\tsysIP_PKTINFO         = 0x8\n\tsysIP_PKTOPTIONS      = 0x9\n\tsysIP_MTU_DISCOVER    = 0xa\n\tsysIP_RECVERR         = 0xb\n\tsysIP_RECVTTL         = 0xc\n\tsysIP_RECVTOS         = 0xd\n\tsysIP_MTU             = 0xe\n\tsysIP_FREEBIND        = 0xf\n\tsysIP_TRANSPARENT     = 0x13\n\tsysIP_RECVRETOPTS     = 0x7\n\tsysIP_ORIGDSTADDR     = 0x14\n\tsysIP_RECVORIGDSTADDR = 0x14\n\tsysIP_MINTTL          = 0x15\n\tsysIP_NODEFRAG        = 0x16\n\tsysIP_UNICAST_IF      = 0x32\n\n\tsysIP_MULTICAST_IF           = 0x20\n\tsysIP_MULTICAST_TTL          = 0x21\n\tsysIP_MULTICAST_LOOP         = 0x22\n\tsysIP_ADD_MEMBERSHIP         = 0x23\n\tsysIP_DROP_MEMBERSHIP        = 0x24\n\tsysIP_UNBLOCK_SOURCE         = 0x25\n\tsysIP_BLOCK_SOURCE           = 0x26\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = 0x27\n\tsysIP_DROP_SOURCE_MEMBERSHIP = 0x28\n\tsysIP_MSFILTER               = 0x29\n\tsysMCAST_JOIN_GROUP          = 0x2a\n\tsysMCAST_LEAVE_GROUP         = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP   = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP  = 0x2f\n\tsysMCAST_BLOCK_SOURCE        = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE      = 0x2c\n\tsysMCAST_MSFILTER            = 0x30\n\tsysIP_MULTICAST_ALL          = 0x31\n\n\tsysICMP_FILTER = 0x1\n\n\tsysSO_EE_ORIGIN_NONE         = 0x0\n\tsysSO_EE_ORIGIN_LOCAL        = 0x1\n\tsysSO_EE_ORIGIN_ICMP         = 0x2\n\tsysSO_EE_ORIGIN_ICMP6        = 0x3\n\tsysSO_EE_ORIGIN_TXSTATUS     = 0x4\n\tsysSO_EE_ORIGIN_TIMESTAMPING = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet          = 0x10\n\tsizeofInetPktinfo           = 0xc\n\tsizeofSockExtendedErr       = 0x10\n\n\tsizeofIPMreq         = 0x8\n\tsizeofIPMreqn        = 0xc\n\tsizeofIPMreqSource   = 0xc\n\tsizeofGroupReq       = 0x88\n\tsizeofGroupSourceReq = 0x108\n\n\tsizeofICMPFilter = 0x4\n\n\tsizeofSockFprog = 0x10\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype inetPktinfo struct {\n\tIfindex  int32\n\tSpec_dst [4]byte /* in_addr */\n\tAddr     [4]byte /* in_addr */\n}\n\ntype sockExtendedErr struct {\n\tErrno  uint32\n\tOrigin uint8\n\tType   uint8\n\tCode   uint8\n\tPad    uint8\n\tInfo   uint32\n\tData   uint32\n}\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n\ntype ipMreqn struct {\n\tMultiaddr [4]byte /* in_addr */\n\tAddress   [4]byte /* in_addr */\n\tIfindex   int32\n}\n\ntype ipMreqSource struct {\n\tMultiaddr  uint32\n\tInterface  uint32\n\tSourceaddr uint32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpFilter struct {\n\tData uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [6]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv4\n\nconst (\n\tsysIP_TOS             = 0x1\n\tsysIP_TTL             = 0x2\n\tsysIP_HDRINCL         = 0x3\n\tsysIP_OPTIONS         = 0x4\n\tsysIP_ROUTER_ALERT    = 0x5\n\tsysIP_RECVOPTS        = 0x6\n\tsysIP_RETOPTS         = 0x7\n\tsysIP_PKTINFO         = 0x8\n\tsysIP_PKTOPTIONS      = 0x9\n\tsysIP_MTU_DISCOVER    = 0xa\n\tsysIP_RECVERR         = 0xb\n\tsysIP_RECVTTL         = 0xc\n\tsysIP_RECVTOS         = 0xd\n\tsysIP_MTU             = 0xe\n\tsysIP_FREEBIND        = 0xf\n\tsysIP_TRANSPARENT     = 0x13\n\tsysIP_RECVRETOPTS     = 0x7\n\tsysIP_ORIGDSTADDR     = 0x14\n\tsysIP_RECVORIGDSTADDR = 0x14\n\tsysIP_MINTTL          = 0x15\n\tsysIP_NODEFRAG        = 0x16\n\tsysIP_UNICAST_IF      = 0x32\n\n\tsysIP_MULTICAST_IF           = 0x20\n\tsysIP_MULTICAST_TTL          = 0x21\n\tsysIP_MULTICAST_LOOP         = 0x22\n\tsysIP_ADD_MEMBERSHIP         = 0x23\n\tsysIP_DROP_MEMBERSHIP        = 0x24\n\tsysIP_UNBLOCK_SOURCE         = 0x25\n\tsysIP_BLOCK_SOURCE           = 0x26\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = 0x27\n\tsysIP_DROP_SOURCE_MEMBERSHIP = 0x28\n\tsysIP_MSFILTER               = 0x29\n\tsysMCAST_JOIN_GROUP          = 0x2a\n\tsysMCAST_LEAVE_GROUP         = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP   = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP  = 0x2f\n\tsysMCAST_BLOCK_SOURCE        = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE      = 0x2c\n\tsysMCAST_MSFILTER            = 0x30\n\tsysIP_MULTICAST_ALL          = 0x31\n\n\tsysICMP_FILTER = 0x1\n\n\tsysSO_EE_ORIGIN_NONE         = 0x0\n\tsysSO_EE_ORIGIN_LOCAL        = 0x1\n\tsysSO_EE_ORIGIN_ICMP         = 0x2\n\tsysSO_EE_ORIGIN_ICMP6        = 0x3\n\tsysSO_EE_ORIGIN_TXSTATUS     = 0x4\n\tsysSO_EE_ORIGIN_TIMESTAMPING = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet          = 0x10\n\tsizeofInetPktinfo           = 0xc\n\tsizeofSockExtendedErr       = 0x10\n\n\tsizeofIPMreq         = 0x8\n\tsizeofIPMreqn        = 0xc\n\tsizeofIPMreqSource   = 0xc\n\tsizeofGroupReq       = 0x88\n\tsizeofGroupSourceReq = 0x108\n\n\tsizeofICMPFilter = 0x4\n\n\tsizeofSockFprog = 0x10\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype inetPktinfo struct {\n\tIfindex  int32\n\tSpec_dst [4]byte /* in_addr */\n\tAddr     [4]byte /* in_addr */\n}\n\ntype sockExtendedErr struct {\n\tErrno  uint32\n\tOrigin uint8\n\tType   uint8\n\tCode   uint8\n\tPad    uint8\n\tInfo   uint32\n\tData   uint32\n}\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n\ntype ipMreqn struct {\n\tMultiaddr [4]byte /* in_addr */\n\tAddress   [4]byte /* in_addr */\n\tIfindex   int32\n}\n\ntype ipMreqSource struct {\n\tMultiaddr  uint32\n\tInterface  uint32\n\tSourceaddr uint32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpFilter struct {\n\tData uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [6]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv4\n\nconst (\n\tsysIP_TOS             = 0x1\n\tsysIP_TTL             = 0x2\n\tsysIP_HDRINCL         = 0x3\n\tsysIP_OPTIONS         = 0x4\n\tsysIP_ROUTER_ALERT    = 0x5\n\tsysIP_RECVOPTS        = 0x6\n\tsysIP_RETOPTS         = 0x7\n\tsysIP_PKTINFO         = 0x8\n\tsysIP_PKTOPTIONS      = 0x9\n\tsysIP_MTU_DISCOVER    = 0xa\n\tsysIP_RECVERR         = 0xb\n\tsysIP_RECVTTL         = 0xc\n\tsysIP_RECVTOS         = 0xd\n\tsysIP_MTU             = 0xe\n\tsysIP_FREEBIND        = 0xf\n\tsysIP_TRANSPARENT     = 0x13\n\tsysIP_RECVRETOPTS     = 0x7\n\tsysIP_ORIGDSTADDR     = 0x14\n\tsysIP_RECVORIGDSTADDR = 0x14\n\tsysIP_MINTTL          = 0x15\n\tsysIP_NODEFRAG        = 0x16\n\tsysIP_UNICAST_IF      = 0x32\n\n\tsysIP_MULTICAST_IF           = 0x20\n\tsysIP_MULTICAST_TTL          = 0x21\n\tsysIP_MULTICAST_LOOP         = 0x22\n\tsysIP_ADD_MEMBERSHIP         = 0x23\n\tsysIP_DROP_MEMBERSHIP        = 0x24\n\tsysIP_UNBLOCK_SOURCE         = 0x25\n\tsysIP_BLOCK_SOURCE           = 0x26\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = 0x27\n\tsysIP_DROP_SOURCE_MEMBERSHIP = 0x28\n\tsysIP_MSFILTER               = 0x29\n\tsysMCAST_JOIN_GROUP          = 0x2a\n\tsysMCAST_LEAVE_GROUP         = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP   = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP  = 0x2f\n\tsysMCAST_BLOCK_SOURCE        = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE      = 0x2c\n\tsysMCAST_MSFILTER            = 0x30\n\tsysIP_MULTICAST_ALL          = 0x31\n\n\tsysICMP_FILTER = 0x1\n\n\tsysSO_EE_ORIGIN_NONE         = 0x0\n\tsysSO_EE_ORIGIN_LOCAL        = 0x1\n\tsysSO_EE_ORIGIN_ICMP         = 0x2\n\tsysSO_EE_ORIGIN_ICMP6        = 0x3\n\tsysSO_EE_ORIGIN_TXSTATUS     = 0x4\n\tsysSO_EE_ORIGIN_TIMESTAMPING = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet          = 0x10\n\tsizeofInetPktinfo           = 0xc\n\tsizeofSockExtendedErr       = 0x10\n\n\tsizeofIPMreq         = 0x8\n\tsizeofIPMreqn        = 0xc\n\tsizeofIPMreqSource   = 0xc\n\tsizeofGroupReq       = 0x88\n\tsizeofGroupSourceReq = 0x108\n\n\tsizeofICMPFilter = 0x4\n\n\tsizeofSockFprog = 0x10\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tX__pad [8]uint8\n}\n\ntype inetPktinfo struct {\n\tIfindex  int32\n\tSpec_dst [4]byte /* in_addr */\n\tAddr     [4]byte /* in_addr */\n}\n\ntype sockExtendedErr struct {\n\tErrno  uint32\n\tOrigin uint8\n\tType   uint8\n\tCode   uint8\n\tPad    uint8\n\tInfo   uint32\n\tData   uint32\n}\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n\ntype ipMreqn struct {\n\tMultiaddr [4]byte /* in_addr */\n\tAddress   [4]byte /* in_addr */\n\tIfindex   int32\n}\n\ntype ipMreqSource struct {\n\tMultiaddr  uint32\n\tInterface  uint32\n\tSourceaddr uint32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpFilter struct {\n\tData uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [6]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_netbsd.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_netbsd.go\n\npackage ipv4\n\nconst (\n\tsysIP_OPTIONS     = 0x1\n\tsysIP_HDRINCL     = 0x2\n\tsysIP_TOS         = 0x3\n\tsysIP_TTL         = 0x4\n\tsysIP_RECVOPTS    = 0x5\n\tsysIP_RECVRETOPTS = 0x6\n\tsysIP_RECVDSTADDR = 0x7\n\tsysIP_RETOPTS     = 0x8\n\tsysIP_RECVIF      = 0x14\n\tsysIP_RECVTTL     = 0x17\n\n\tsysIP_MULTICAST_IF    = 0x9\n\tsysIP_MULTICAST_TTL   = 0xa\n\tsysIP_MULTICAST_LOOP  = 0xb\n\tsysIP_ADD_MEMBERSHIP  = 0xc\n\tsysIP_DROP_MEMBERSHIP = 0xd\n\n\tsizeofIPMreq = 0x8\n)\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_openbsd.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_openbsd.go\n\npackage ipv4\n\nconst (\n\tsysIP_OPTIONS     = 0x1\n\tsysIP_HDRINCL     = 0x2\n\tsysIP_TOS         = 0x3\n\tsysIP_TTL         = 0x4\n\tsysIP_RECVOPTS    = 0x5\n\tsysIP_RECVRETOPTS = 0x6\n\tsysIP_RECVDSTADDR = 0x7\n\tsysIP_RETOPTS     = 0x8\n\tsysIP_RECVIF      = 0x1e\n\tsysIP_RECVTTL     = 0x1f\n\n\tsysIP_MULTICAST_IF    = 0x9\n\tsysIP_MULTICAST_TTL   = 0xa\n\tsysIP_MULTICAST_LOOP  = 0xb\n\tsysIP_ADD_MEMBERSHIP  = 0xc\n\tsysIP_DROP_MEMBERSHIP = 0xd\n\n\tsizeofIPMreq = 0x8\n)\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv4/zsys_solaris.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_solaris.go\n\npackage ipv4\n\nconst (\n\tsysIP_OPTIONS     = 0x1\n\tsysIP_HDRINCL     = 0x2\n\tsysIP_TOS         = 0x3\n\tsysIP_TTL         = 0x4\n\tsysIP_RECVOPTS    = 0x5\n\tsysIP_RECVRETOPTS = 0x6\n\tsysIP_RECVDSTADDR = 0x7\n\tsysIP_RETOPTS     = 0x8\n\tsysIP_RECVIF      = 0x9\n\tsysIP_RECVSLLA    = 0xa\n\tsysIP_RECVTTL     = 0xb\n\n\tsysIP_MULTICAST_IF           = 0x10\n\tsysIP_MULTICAST_TTL          = 0x11\n\tsysIP_MULTICAST_LOOP         = 0x12\n\tsysIP_ADD_MEMBERSHIP         = 0x13\n\tsysIP_DROP_MEMBERSHIP        = 0x14\n\tsysIP_BLOCK_SOURCE           = 0x15\n\tsysIP_UNBLOCK_SOURCE         = 0x16\n\tsysIP_ADD_SOURCE_MEMBERSHIP  = 0x17\n\tsysIP_DROP_SOURCE_MEMBERSHIP = 0x18\n\tsysIP_NEXTHOP                = 0x19\n\n\tsysIP_PKTINFO     = 0x1a\n\tsysIP_RECVPKTINFO = 0x1a\n\tsysIP_DONTFRAG    = 0x1b\n\n\tsysIP_BOUND_IF      = 0x41\n\tsysIP_UNSPEC_SRC    = 0x42\n\tsysIP_BROADCAST_TTL = 0x43\n\tsysIP_DHCPINIT_IF   = 0x45\n\n\tsysIP_REUSEADDR = 0x104\n\tsysIP_DONTROUTE = 0x105\n\tsysIP_BROADCAST = 0x106\n\n\tsysMCAST_JOIN_GROUP         = 0x29\n\tsysMCAST_LEAVE_GROUP        = 0x2a\n\tsysMCAST_BLOCK_SOURCE       = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE     = 0x2c\n\tsysMCAST_JOIN_SOURCE_GROUP  = 0x2d\n\tsysMCAST_LEAVE_SOURCE_GROUP = 0x2e\n\n\tsizeofSockaddrStorage = 0x100\n\tsizeofSockaddrInet    = 0x10\n\tsizeofInetPktinfo     = 0xc\n\n\tsizeofIPMreq         = 0x8\n\tsizeofIPMreqSource   = 0xc\n\tsizeofGroupReq       = 0x104\n\tsizeofGroupSourceReq = 0x204\n)\n\ntype sockaddrStorage struct {\n\tFamily     uint16\n\tX_ss_pad1  [6]int8\n\tX_ss_align float64\n\tX_ss_pad2  [240]int8\n}\n\ntype sockaddrInet struct {\n\tFamily uint16\n\tPort   uint16\n\tAddr   [4]byte /* in_addr */\n\tZero   [8]int8\n}\n\ntype inetPktinfo struct {\n\tIfindex  uint32\n\tSpec_dst [4]byte /* in_addr */\n\tAddr     [4]byte /* in_addr */\n}\n\ntype ipMreq struct {\n\tMultiaddr [4]byte /* in_addr */\n\tInterface [4]byte /* in_addr */\n}\n\ntype ipMreqSource struct {\n\tMultiaddr  [4]byte /* in_addr */\n\tSourceaddr [4]byte /* in_addr */\n\tInterface  [4]byte /* in_addr */\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [256]byte\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [256]byte\n\tPad_cgo_1 [256]byte\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/batch.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.9\n\npackage ipv6\n\nimport (\n\t\"net\"\n\t\"runtime\"\n\t\"syscall\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\n// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of\n// PacketConn are not implemented.\n\n// A Message represents an IO message.\n//\n//\ttype Message struct {\n//\t\tBuffers [][]byte\n//\t\tOOB     []byte\n//\t\tAddr    net.Addr\n//\t\tN       int\n//\t\tNN      int\n//\t\tFlags   int\n//\t}\n//\n// The Buffers fields represents a list of contiguous buffers, which\n// can be used for vectored IO, for example, putting a header and a\n// payload in each slice.\n// When writing, the Buffers field must contain at least one byte to\n// write.\n// When reading, the Buffers field will always contain a byte to read.\n//\n// The OOB field contains protocol-specific control or miscellaneous\n// ancillary data known as out-of-band data.\n// It can be nil when not required.\n//\n// The Addr field specifies a destination address when writing.\n// It can be nil when the underlying protocol of the endpoint uses\n// connection-oriented communication.\n// After a successful read, it may contain the source address on the\n// received packet.\n//\n// The N field indicates the number of bytes read or written from/to\n// Buffers.\n//\n// The NN field indicates the number of bytes read or written from/to\n// OOB.\n//\n// The Flags field contains protocol-specific information on the\n// received message.\ntype Message = socket.Message\n\n// ReadBatch reads a batch of messages.\n//\n// The provided flags is a set of platform-dependent flags, such as\n// syscall.MSG_PEEK.\n//\n// On a successful read it returns the number of messages received, up\n// to len(ms).\n//\n// On Linux, a batch read will be optimized.\n// On other platforms, this method will read only a single message.\nfunc (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\tn, err := c.RecvMsgs([]socket.Message(ms), flags)\n\t\tif err != nil {\n\t\t\terr = &net.OpError{Op: \"read\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err}\n\t\t}\n\t\treturn n, err\n\tdefault:\n\t\tn := 1\n\t\terr := c.RecvMsg(&ms[0], flags)\n\t\tif err != nil {\n\t\t\tn = 0\n\t\t\terr = &net.OpError{Op: \"read\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err}\n\t\t}\n\t\treturn n, err\n\t}\n}\n\n// WriteBatch writes a batch of messages.\n//\n// The provided flags is a set of platform-dependent flags, such as\n// syscall.MSG_DONTROUTE.\n//\n// It returns the number of messages written on a successful write.\n//\n// On Linux, a batch write will be optimized.\n// On other platforms, this method will write only a single message.\nfunc (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\tn, err := c.SendMsgs([]socket.Message(ms), flags)\n\t\tif err != nil {\n\t\t\terr = &net.OpError{Op: \"write\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err}\n\t\t}\n\t\treturn n, err\n\tdefault:\n\t\tn := 1\n\t\terr := c.SendMsg(&ms[0], flags)\n\t\tif err != nil {\n\t\t\tn = 0\n\t\t\terr = &net.OpError{Op: \"write\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err}\n\t\t}\n\t\treturn n, err\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/bpf_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6_test\n\nimport (\n\t\"net\"\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org/x/net/bpf\"\n\t\"golang.org/x/net/ipv6\"\n)\n\nfunc TestBPF(t *testing.T) {\n\tif runtime.GOOS != \"linux\" {\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif !supportsIPv6 {\n\t\tt.Skip(\"ipv6 is not supported\")\n\t}\n\n\tl, err := net.ListenPacket(\"udp6\", \"[::1]:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer l.Close()\n\n\tp := ipv6.NewPacketConn(l)\n\n\t// This filter accepts UDP packets whose first payload byte is\n\t// even.\n\tprog, err := bpf.Assemble([]bpf.Instruction{\n\t\t// Load the first byte of the payload (skipping UDP header).\n\t\tbpf.LoadAbsolute{Off: 8, Size: 1},\n\t\t// Select LSB of the byte.\n\t\tbpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 1},\n\t\t// Byte is even?\n\t\tbpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipFalse: 1},\n\t\t// Accept.\n\t\tbpf.RetConstant{Val: 4096},\n\t\t// Ignore.\n\t\tbpf.RetConstant{Val: 0},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"compiling BPF: %s\", err)\n\t}\n\n\tif err = p.SetBPF(prog); err != nil {\n\t\tt.Fatalf(\"attaching filter to Conn: %s\", err)\n\t}\n\n\ts, err := net.Dial(\"udp6\", l.LocalAddr().String())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer s.Close()\n\tgo func() {\n\t\tfor i := byte(0); i < 10; i++ {\n\t\t\ts.Write([]byte{i})\n\t\t}\n\t}()\n\n\tl.SetDeadline(time.Now().Add(2 * time.Second))\n\tseen := make([]bool, 5)\n\tfor {\n\t\tvar b [512]byte\n\t\tn, _, err := l.ReadFrom(b[:])\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"reading from listener: %s\", err)\n\t\t}\n\t\tif n != 1 {\n\t\t\tt.Fatalf(\"unexpected packet length, want 1, got %d\", n)\n\t\t}\n\t\tif b[0] >= 10 {\n\t\t\tt.Fatalf(\"unexpected byte, want 0-9, got %d\", b[0])\n\t\t}\n\t\tif b[0]%2 != 0 {\n\t\t\tt.Fatalf(\"got odd byte %d, wanted only even bytes\", b[0])\n\t\t}\n\t\tseen[b[0]/2] = true\n\n\t\tseenAll := true\n\t\tfor _, v := range seen {\n\t\t\tif !v {\n\t\t\t\tseenAll = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif seenAll {\n\t\t\tbreak\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/control.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\n// Note that RFC 3542 obsoletes RFC 2292 but OS X Snow Leopard and the\n// former still support RFC 2292 only. Please be aware that almost\n// all protocol implementations prohibit using a combination of RFC\n// 2292 and RFC 3542 for some practical reasons.\n\ntype rawOpt struct {\n\tsync.RWMutex\n\tcflags ControlFlags\n}\n\nfunc (c *rawOpt) set(f ControlFlags)        { c.cflags |= f }\nfunc (c *rawOpt) clear(f ControlFlags)      { c.cflags &^= f }\nfunc (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 }\n\n// A ControlFlags represents per packet basis IP-level socket option\n// control flags.\ntype ControlFlags uint\n\nconst (\n\tFlagTrafficClass ControlFlags = 1 << iota // pass the traffic class on the received packet\n\tFlagHopLimit                              // pass the hop limit on the received packet\n\tFlagSrc                                   // pass the source address on the received packet\n\tFlagDst                                   // pass the destination address on the received packet\n\tFlagInterface                             // pass the interface index on the received packet\n\tFlagPathMTU                               // pass the path MTU on the received packet path\n)\n\nconst flagPacketInfo = FlagDst | FlagInterface\n\n// A ControlMessage represents per packet basis IP-level socket\n// options.\ntype ControlMessage struct {\n\t// Receiving socket options: SetControlMessage allows to\n\t// receive the options from the protocol stack using ReadFrom\n\t// method of PacketConn.\n\t//\n\t// Specifying socket options: ControlMessage for WriteTo\n\t// method of PacketConn allows to send the options to the\n\t// protocol stack.\n\t//\n\tTrafficClass int    // traffic class, must be 1 <= value <= 255 when specifying\n\tHopLimit     int    // hop limit, must be 1 <= value <= 255 when specifying\n\tSrc          net.IP // source address, specifying only\n\tDst          net.IP // destination address, receiving only\n\tIfIndex      int    // interface index, must be 1 <= value when specifying\n\tNextHop      net.IP // next hop address, specifying only\n\tMTU          int    // path MTU, receiving only\n}\n\nfunc (cm *ControlMessage) String() string {\n\tif cm == nil {\n\t\treturn \"<nil>\"\n\t}\n\treturn fmt.Sprintf(\"tclass=%#x hoplim=%d src=%v dst=%v ifindex=%d nexthop=%v mtu=%d\", cm.TrafficClass, cm.HopLimit, cm.Src, cm.Dst, cm.IfIndex, cm.NextHop, cm.MTU)\n}\n\n// Marshal returns the binary encoding of cm.\nfunc (cm *ControlMessage) Marshal() []byte {\n\tif cm == nil {\n\t\treturn nil\n\t}\n\tvar l int\n\ttclass := false\n\tif ctlOpts[ctlTrafficClass].name > 0 && cm.TrafficClass > 0 {\n\t\ttclass = true\n\t\tl += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length)\n\t}\n\thoplimit := false\n\tif ctlOpts[ctlHopLimit].name > 0 && cm.HopLimit > 0 {\n\t\thoplimit = true\n\t\tl += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length)\n\t}\n\tpktinfo := false\n\tif ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To16() != nil && cm.Src.To4() == nil || cm.IfIndex > 0) {\n\t\tpktinfo = true\n\t\tl += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length)\n\t}\n\tnexthop := false\n\tif ctlOpts[ctlNextHop].name > 0 && cm.NextHop.To16() != nil && cm.NextHop.To4() == nil {\n\t\tnexthop = true\n\t\tl += socket.ControlMessageSpace(ctlOpts[ctlNextHop].length)\n\t}\n\tvar b []byte\n\tif l > 0 {\n\t\tb = make([]byte, l)\n\t\tbb := b\n\t\tif tclass {\n\t\t\tbb = ctlOpts[ctlTrafficClass].marshal(bb, cm)\n\t\t}\n\t\tif hoplimit {\n\t\t\tbb = ctlOpts[ctlHopLimit].marshal(bb, cm)\n\t\t}\n\t\tif pktinfo {\n\t\t\tbb = ctlOpts[ctlPacketInfo].marshal(bb, cm)\n\t\t}\n\t\tif nexthop {\n\t\t\tbb = ctlOpts[ctlNextHop].marshal(bb, cm)\n\t\t}\n\t}\n\treturn b\n}\n\n// Parse parses b as a control message and stores the result in cm.\nfunc (cm *ControlMessage) Parse(b []byte) error {\n\tms, err := socket.ControlMessage(b).Parse()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, m := range ms {\n\t\tlvl, typ, l, err := m.ParseHeader()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif lvl != iana.ProtocolIPv6 {\n\t\t\tcontinue\n\t\t}\n\t\tswitch {\n\t\tcase typ == ctlOpts[ctlTrafficClass].name && l >= ctlOpts[ctlTrafficClass].length:\n\t\t\tctlOpts[ctlTrafficClass].parse(cm, m.Data(l))\n\t\tcase typ == ctlOpts[ctlHopLimit].name && l >= ctlOpts[ctlHopLimit].length:\n\t\t\tctlOpts[ctlHopLimit].parse(cm, m.Data(l))\n\t\tcase typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length:\n\t\t\tctlOpts[ctlPacketInfo].parse(cm, m.Data(l))\n\t\tcase typ == ctlOpts[ctlPathMTU].name && l >= ctlOpts[ctlPathMTU].length:\n\t\t\tctlOpts[ctlPathMTU].parse(cm, m.Data(l))\n\t\t}\n\t}\n\treturn nil\n}\n\n// NewControlMessage returns a new control message.\n//\n// The returned message is large enough for options specified by cf.\nfunc NewControlMessage(cf ControlFlags) []byte {\n\topt := rawOpt{cflags: cf}\n\tvar l int\n\tif opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 {\n\t\tl += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length)\n\t}\n\tif opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 {\n\t\tl += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length)\n\t}\n\tif opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 {\n\t\tl += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length)\n\t}\n\tif opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 {\n\t\tl += socket.ControlMessageSpace(ctlOpts[ctlPathMTU].length)\n\t}\n\tvar b []byte\n\tif l > 0 {\n\t\tb = make([]byte, l)\n\t}\n\treturn b\n}\n\n// Ancillary data socket options\nconst (\n\tctlTrafficClass = iota // header field\n\tctlHopLimit            // header field\n\tctlPacketInfo          // inbound or outbound packet path\n\tctlNextHop             // nexthop\n\tctlPathMTU             // path mtu\n\tctlMax\n)\n\n// A ctlOpt represents a binding for ancillary data socket option.\ntype ctlOpt struct {\n\tname    int // option name, must be equal or greater than 1\n\tlength  int // option length\n\tmarshal func([]byte, *ControlMessage) []byte\n\tparse   func(*ControlMessage, []byte)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin\n\npackage ipv6\n\nimport (\n\t\"unsafe\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc marshal2292HopLimit(b []byte, cm *ControlMessage) []byte {\n\tm := socket.ControlMessage(b)\n\tm.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292HOPLIMIT, 4)\n\tif cm != nil {\n\t\tsocket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit))\n\t}\n\treturn m.Next(4)\n}\n\nfunc marshal2292PacketInfo(b []byte, cm *ControlMessage) []byte {\n\tm := socket.ControlMessage(b)\n\tm.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292PKTINFO, sizeofInet6Pktinfo)\n\tif cm != nil {\n\t\tpi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0]))\n\t\tif ip := cm.Src.To16(); ip != nil && ip.To4() == nil {\n\t\t\tcopy(pi.Addr[:], ip)\n\t\t}\n\t\tif cm.IfIndex > 0 {\n\t\t\tpi.setIfindex(cm.IfIndex)\n\t\t}\n\t}\n\treturn m.Next(sizeofInet6Pktinfo)\n}\n\nfunc marshal2292NextHop(b []byte, cm *ControlMessage) []byte {\n\tm := socket.ControlMessage(b)\n\tm.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292NEXTHOP, sizeofSockaddrInet6)\n\tif cm != nil {\n\t\tsa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0]))\n\t\tsa.setSockaddr(cm.NextHop, cm.IfIndex)\n\t}\n\treturn m.Next(sizeofSockaddrInet6)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd linux netbsd openbsd solaris\n\npackage ipv6\n\nimport (\n\t\"net\"\n\t\"unsafe\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc marshalTrafficClass(b []byte, cm *ControlMessage) []byte {\n\tm := socket.ControlMessage(b)\n\tm.MarshalHeader(iana.ProtocolIPv6, sysIPV6_TCLASS, 4)\n\tif cm != nil {\n\t\tsocket.NativeEndian.PutUint32(m.Data(4), uint32(cm.TrafficClass))\n\t}\n\treturn m.Next(4)\n}\n\nfunc parseTrafficClass(cm *ControlMessage, b []byte) {\n\tcm.TrafficClass = int(socket.NativeEndian.Uint32(b[:4]))\n}\n\nfunc marshalHopLimit(b []byte, cm *ControlMessage) []byte {\n\tm := socket.ControlMessage(b)\n\tm.MarshalHeader(iana.ProtocolIPv6, sysIPV6_HOPLIMIT, 4)\n\tif cm != nil {\n\t\tsocket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit))\n\t}\n\treturn m.Next(4)\n}\n\nfunc parseHopLimit(cm *ControlMessage, b []byte) {\n\tcm.HopLimit = int(socket.NativeEndian.Uint32(b[:4]))\n}\n\nfunc marshalPacketInfo(b []byte, cm *ControlMessage) []byte {\n\tm := socket.ControlMessage(b)\n\tm.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PKTINFO, sizeofInet6Pktinfo)\n\tif cm != nil {\n\t\tpi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0]))\n\t\tif ip := cm.Src.To16(); ip != nil && ip.To4() == nil {\n\t\t\tcopy(pi.Addr[:], ip)\n\t\t}\n\t\tif cm.IfIndex > 0 {\n\t\t\tpi.setIfindex(cm.IfIndex)\n\t\t}\n\t}\n\treturn m.Next(sizeofInet6Pktinfo)\n}\n\nfunc parsePacketInfo(cm *ControlMessage, b []byte) {\n\tpi := (*inet6Pktinfo)(unsafe.Pointer(&b[0]))\n\tif len(cm.Dst) < net.IPv6len {\n\t\tcm.Dst = make(net.IP, net.IPv6len)\n\t}\n\tcopy(cm.Dst, pi.Addr[:])\n\tcm.IfIndex = int(pi.Ifindex)\n}\n\nfunc marshalNextHop(b []byte, cm *ControlMessage) []byte {\n\tm := socket.ControlMessage(b)\n\tm.MarshalHeader(iana.ProtocolIPv6, sysIPV6_NEXTHOP, sizeofSockaddrInet6)\n\tif cm != nil {\n\t\tsa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0]))\n\t\tsa.setSockaddr(cm.NextHop, cm.IfIndex)\n\t}\n\treturn m.Next(sizeofSockaddrInet6)\n}\n\nfunc parseNextHop(cm *ControlMessage, b []byte) {\n}\n\nfunc marshalPathMTU(b []byte, cm *ControlMessage) []byte {\n\tm := socket.ControlMessage(b)\n\tm.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PATHMTU, sizeofIPv6Mtuinfo)\n\treturn m.Next(sizeofIPv6Mtuinfo)\n}\n\nfunc parsePathMTU(cm *ControlMessage, b []byte) {\n\tmi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0]))\n\tif len(cm.Dst) < net.IPv6len {\n\t\tcm.Dst = make(net.IP, net.IPv6len)\n\t}\n\tcopy(cm.Dst, mi.Addr.Addr[:])\n\tcm.IfIndex = int(mi.Addr.Scope_id)\n\tcm.MTU = int(mi.Mtu)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/control_stub.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows\n\npackage ipv6\n\nimport \"golang.org/x/net/internal/socket\"\n\nfunc setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error {\n\treturn errOpNoSupport\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/control_test.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6_test\n\nimport (\n\t\"testing\"\n\n\t\"golang.org/x/net/ipv6\"\n)\n\nfunc TestControlMessageParseWithFuzz(t *testing.T) {\n\tvar cm ipv6.ControlMessage\n\tfor _, fuzz := range []string{\n\t\t\"\\f\\x00\\x00\\x00)\\x00\\x00\\x00.\\x00\\x00\\x00\",\n\t\t\"\\f\\x00\\x00\\x00)\\x00\\x00\\x00,\\x00\\x00\\x00\",\n\t} {\n\t\tcm.Parse([]byte(fuzz))\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/control_unix.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd linux netbsd openbsd solaris\n\npackage ipv6\n\nimport \"golang.org/x/net/internal/socket\"\n\nfunc setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error {\n\topt.Lock()\n\tdefer opt.Unlock()\n\tif so, ok := sockOpts[ssoReceiveTrafficClass]; ok && cf&FlagTrafficClass != 0 {\n\t\tif err := so.SetInt(c, boolint(on)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif on {\n\t\t\topt.set(FlagTrafficClass)\n\t\t} else {\n\t\t\topt.clear(FlagTrafficClass)\n\t\t}\n\t}\n\tif so, ok := sockOpts[ssoReceiveHopLimit]; ok && cf&FlagHopLimit != 0 {\n\t\tif err := so.SetInt(c, boolint(on)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif on {\n\t\t\topt.set(FlagHopLimit)\n\t\t} else {\n\t\t\topt.clear(FlagHopLimit)\n\t\t}\n\t}\n\tif so, ok := sockOpts[ssoReceivePacketInfo]; ok && cf&flagPacketInfo != 0 {\n\t\tif err := so.SetInt(c, boolint(on)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif on {\n\t\t\topt.set(cf & flagPacketInfo)\n\t\t} else {\n\t\t\topt.clear(cf & flagPacketInfo)\n\t\t}\n\t}\n\tif so, ok := sockOpts[ssoReceivePathMTU]; ok && cf&FlagPathMTU != 0 {\n\t\tif err := so.SetInt(c, boolint(on)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif on {\n\t\t\topt.set(FlagPathMTU)\n\t\t} else {\n\t\t\topt.clear(FlagPathMTU)\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/control_windows.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6\n\nimport (\n\t\"syscall\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error {\n\t// TODO(mikio): implement this\n\treturn syscall.EWINDOWS\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/defs_darwin.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in6_addr [16]byte /* in6_addr */\n\npackage ipv6\n\n/*\n#define __APPLE_USE_RFC_3542\n#include <netinet/in.h>\n#include <netinet/icmp6.h>\n*/\nimport \"C\"\n\nconst (\n\tsysIPV6_UNICAST_HOPS   = C.IPV6_UNICAST_HOPS\n\tsysIPV6_MULTICAST_IF   = C.IPV6_MULTICAST_IF\n\tsysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS\n\tsysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP\n\tsysIPV6_JOIN_GROUP     = C.IPV6_JOIN_GROUP\n\tsysIPV6_LEAVE_GROUP    = C.IPV6_LEAVE_GROUP\n\n\tsysIPV6_PORTRANGE    = C.IPV6_PORTRANGE\n\tsysICMP6_FILTER      = C.ICMP6_FILTER\n\tsysIPV6_2292PKTINFO  = C.IPV6_2292PKTINFO\n\tsysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT\n\tsysIPV6_2292NEXTHOP  = C.IPV6_2292NEXTHOP\n\tsysIPV6_2292HOPOPTS  = C.IPV6_2292HOPOPTS\n\tsysIPV6_2292DSTOPTS  = C.IPV6_2292DSTOPTS\n\tsysIPV6_2292RTHDR    = C.IPV6_2292RTHDR\n\n\tsysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS\n\n\tsysIPV6_CHECKSUM = C.IPV6_CHECKSUM\n\tsysIPV6_V6ONLY   = C.IPV6_V6ONLY\n\n\tsysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY\n\n\tsysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS\n\tsysIPV6_TCLASS     = C.IPV6_TCLASS\n\n\tsysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS\n\n\tsysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO\n\n\tsysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT\n\tsysIPV6_RECVRTHDR    = C.IPV6_RECVRTHDR\n\tsysIPV6_RECVHOPOPTS  = C.IPV6_RECVHOPOPTS\n\tsysIPV6_RECVDSTOPTS  = C.IPV6_RECVDSTOPTS\n\n\tsysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU\n\tsysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU\n\n\tsysIPV6_PATHMTU = C.IPV6_PATHMTU\n\n\tsysIPV6_PKTINFO  = C.IPV6_PKTINFO\n\tsysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT\n\tsysIPV6_NEXTHOP  = C.IPV6_NEXTHOP\n\tsysIPV6_HOPOPTS  = C.IPV6_HOPOPTS\n\tsysIPV6_DSTOPTS  = C.IPV6_DSTOPTS\n\tsysIPV6_RTHDR    = C.IPV6_RTHDR\n\n\tsysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL\n\n\tsysIPV6_DONTFRAG = C.IPV6_DONTFRAG\n\n\tsysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR\n\n\tsysIPV6_MSFILTER            = C.IPV6_MSFILTER\n\tsysMCAST_JOIN_GROUP         = C.MCAST_JOIN_GROUP\n\tsysMCAST_LEAVE_GROUP        = C.MCAST_LEAVE_GROUP\n\tsysMCAST_JOIN_SOURCE_GROUP  = C.MCAST_JOIN_SOURCE_GROUP\n\tsysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP\n\tsysMCAST_BLOCK_SOURCE       = C.MCAST_BLOCK_SOURCE\n\tsysMCAST_UNBLOCK_SOURCE     = C.MCAST_UNBLOCK_SOURCE\n\n\tsysIPV6_BOUND_IF = C.IPV6_BOUND_IF\n\n\tsysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT\n\tsysIPV6_PORTRANGE_HIGH    = C.IPV6_PORTRANGE_HIGH\n\tsysIPV6_PORTRANGE_LOW     = C.IPV6_PORTRANGE_LOW\n\n\tsizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage\n\tsizeofSockaddrInet6   = C.sizeof_struct_sockaddr_in6\n\tsizeofInet6Pktinfo    = C.sizeof_struct_in6_pktinfo\n\tsizeofIPv6Mtuinfo     = C.sizeof_struct_ip6_mtuinfo\n\n\tsizeofIPv6Mreq       = C.sizeof_struct_ipv6_mreq\n\tsizeofGroupReq       = C.sizeof_struct_group_req\n\tsizeofGroupSourceReq = C.sizeof_struct_group_source_req\n\n\tsizeofICMPv6Filter = C.sizeof_struct_icmp6_filter\n)\n\ntype sockaddrStorage C.struct_sockaddr_storage\n\ntype sockaddrInet6 C.struct_sockaddr_in6\n\ntype inet6Pktinfo C.struct_in6_pktinfo\n\ntype ipv6Mtuinfo C.struct_ip6_mtuinfo\n\ntype ipv6Mreq C.struct_ipv6_mreq\n\ntype icmpv6Filter C.struct_icmp6_filter\n\ntype groupReq C.struct_group_req\n\ntype groupSourceReq C.struct_group_source_req\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/defs_dragonfly.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in6_addr [16]byte /* in6_addr */\n\npackage ipv6\n\n/*\n#include <sys/param.h>\n#include <sys/socket.h>\n\n#include <netinet/in.h>\n#include <netinet/icmp6.h>\n*/\nimport \"C\"\n\nconst (\n\tsysIPV6_UNICAST_HOPS   = C.IPV6_UNICAST_HOPS\n\tsysIPV6_MULTICAST_IF   = C.IPV6_MULTICAST_IF\n\tsysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS\n\tsysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP\n\tsysIPV6_JOIN_GROUP     = C.IPV6_JOIN_GROUP\n\tsysIPV6_LEAVE_GROUP    = C.IPV6_LEAVE_GROUP\n\tsysIPV6_PORTRANGE      = C.IPV6_PORTRANGE\n\tsysICMP6_FILTER        = C.ICMP6_FILTER\n\n\tsysIPV6_CHECKSUM = C.IPV6_CHECKSUM\n\tsysIPV6_V6ONLY   = C.IPV6_V6ONLY\n\n\tsysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY\n\n\tsysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS\n\tsysIPV6_RECVPKTINFO  = C.IPV6_RECVPKTINFO\n\tsysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT\n\tsysIPV6_RECVRTHDR    = C.IPV6_RECVRTHDR\n\tsysIPV6_RECVHOPOPTS  = C.IPV6_RECVHOPOPTS\n\tsysIPV6_RECVDSTOPTS  = C.IPV6_RECVDSTOPTS\n\n\tsysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU\n\tsysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU\n\n\tsysIPV6_PATHMTU = C.IPV6_PATHMTU\n\n\tsysIPV6_PKTINFO  = C.IPV6_PKTINFO\n\tsysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT\n\tsysIPV6_NEXTHOP  = C.IPV6_NEXTHOP\n\tsysIPV6_HOPOPTS  = C.IPV6_HOPOPTS\n\tsysIPV6_DSTOPTS  = C.IPV6_DSTOPTS\n\tsysIPV6_RTHDR    = C.IPV6_RTHDR\n\n\tsysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS\n\n\tsysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL\n\n\tsysIPV6_TCLASS   = C.IPV6_TCLASS\n\tsysIPV6_DONTFRAG = C.IPV6_DONTFRAG\n\n\tsysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR\n\n\tsysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT\n\tsysIPV6_PORTRANGE_HIGH    = C.IPV6_PORTRANGE_HIGH\n\tsysIPV6_PORTRANGE_LOW     = C.IPV6_PORTRANGE_LOW\n\n\tsizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6\n\tsizeofInet6Pktinfo  = C.sizeof_struct_in6_pktinfo\n\tsizeofIPv6Mtuinfo   = C.sizeof_struct_ip6_mtuinfo\n\n\tsizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq\n\n\tsizeofICMPv6Filter = C.sizeof_struct_icmp6_filter\n)\n\ntype sockaddrInet6 C.struct_sockaddr_in6\n\ntype inet6Pktinfo C.struct_in6_pktinfo\n\ntype ipv6Mtuinfo C.struct_ip6_mtuinfo\n\ntype ipv6Mreq C.struct_ipv6_mreq\n\ntype icmpv6Filter C.struct_icmp6_filter\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/defs_freebsd.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in6_addr [16]byte /* in6_addr */\n\npackage ipv6\n\n/*\n#include <sys/param.h>\n#include <sys/socket.h>\n\n#include <netinet/in.h>\n#include <netinet/icmp6.h>\n*/\nimport \"C\"\n\nconst (\n\tsysIPV6_UNICAST_HOPS   = C.IPV6_UNICAST_HOPS\n\tsysIPV6_MULTICAST_IF   = C.IPV6_MULTICAST_IF\n\tsysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS\n\tsysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP\n\tsysIPV6_JOIN_GROUP     = C.IPV6_JOIN_GROUP\n\tsysIPV6_LEAVE_GROUP    = C.IPV6_LEAVE_GROUP\n\tsysIPV6_PORTRANGE      = C.IPV6_PORTRANGE\n\tsysICMP6_FILTER        = C.ICMP6_FILTER\n\n\tsysIPV6_CHECKSUM = C.IPV6_CHECKSUM\n\tsysIPV6_V6ONLY   = C.IPV6_V6ONLY\n\n\tsysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY\n\n\tsysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS\n\n\tsysIPV6_RECVPKTINFO  = C.IPV6_RECVPKTINFO\n\tsysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT\n\tsysIPV6_RECVRTHDR    = C.IPV6_RECVRTHDR\n\tsysIPV6_RECVHOPOPTS  = C.IPV6_RECVHOPOPTS\n\tsysIPV6_RECVDSTOPTS  = C.IPV6_RECVDSTOPTS\n\n\tsysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU\n\tsysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU\n\n\tsysIPV6_PATHMTU = C.IPV6_PATHMTU\n\n\tsysIPV6_PKTINFO  = C.IPV6_PKTINFO\n\tsysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT\n\tsysIPV6_NEXTHOP  = C.IPV6_NEXTHOP\n\tsysIPV6_HOPOPTS  = C.IPV6_HOPOPTS\n\tsysIPV6_DSTOPTS  = C.IPV6_DSTOPTS\n\tsysIPV6_RTHDR    = C.IPV6_RTHDR\n\n\tsysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS\n\n\tsysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL\n\n\tsysIPV6_TCLASS   = C.IPV6_TCLASS\n\tsysIPV6_DONTFRAG = C.IPV6_DONTFRAG\n\n\tsysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR\n\n\tsysIPV6_BINDANY = C.IPV6_BINDANY\n\n\tsysIPV6_MSFILTER = C.IPV6_MSFILTER\n\n\tsysMCAST_JOIN_GROUP         = C.MCAST_JOIN_GROUP\n\tsysMCAST_LEAVE_GROUP        = C.MCAST_LEAVE_GROUP\n\tsysMCAST_JOIN_SOURCE_GROUP  = C.MCAST_JOIN_SOURCE_GROUP\n\tsysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP\n\tsysMCAST_BLOCK_SOURCE       = C.MCAST_BLOCK_SOURCE\n\tsysMCAST_UNBLOCK_SOURCE     = C.MCAST_UNBLOCK_SOURCE\n\n\tsysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT\n\tsysIPV6_PORTRANGE_HIGH    = C.IPV6_PORTRANGE_HIGH\n\tsysIPV6_PORTRANGE_LOW     = C.IPV6_PORTRANGE_LOW\n\n\tsizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage\n\tsizeofSockaddrInet6   = C.sizeof_struct_sockaddr_in6\n\tsizeofInet6Pktinfo    = C.sizeof_struct_in6_pktinfo\n\tsizeofIPv6Mtuinfo     = C.sizeof_struct_ip6_mtuinfo\n\n\tsizeofIPv6Mreq       = C.sizeof_struct_ipv6_mreq\n\tsizeofGroupReq       = C.sizeof_struct_group_req\n\tsizeofGroupSourceReq = C.sizeof_struct_group_source_req\n\n\tsizeofICMPv6Filter = C.sizeof_struct_icmp6_filter\n)\n\ntype sockaddrStorage C.struct_sockaddr_storage\n\ntype sockaddrInet6 C.struct_sockaddr_in6\n\ntype inet6Pktinfo C.struct_in6_pktinfo\n\ntype ipv6Mtuinfo C.struct_ip6_mtuinfo\n\ntype ipv6Mreq C.struct_ipv6_mreq\n\ntype groupReq C.struct_group_req\n\ntype groupSourceReq C.struct_group_source_req\n\ntype icmpv6Filter C.struct_icmp6_filter\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/defs_linux.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in6_addr [16]byte /* in6_addr */\n\npackage ipv6\n\n/*\n#include <linux/in.h>\n#include <linux/in6.h>\n#include <linux/ipv6.h>\n#include <linux/icmpv6.h>\n#include <linux/filter.h>\n#include <sys/socket.h>\n*/\nimport \"C\"\n\nconst (\n\tsysIPV6_ADDRFORM       = C.IPV6_ADDRFORM\n\tsysIPV6_2292PKTINFO    = C.IPV6_2292PKTINFO\n\tsysIPV6_2292HOPOPTS    = C.IPV6_2292HOPOPTS\n\tsysIPV6_2292DSTOPTS    = C.IPV6_2292DSTOPTS\n\tsysIPV6_2292RTHDR      = C.IPV6_2292RTHDR\n\tsysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS\n\tsysIPV6_CHECKSUM       = C.IPV6_CHECKSUM\n\tsysIPV6_2292HOPLIMIT   = C.IPV6_2292HOPLIMIT\n\tsysIPV6_NEXTHOP        = C.IPV6_NEXTHOP\n\tsysIPV6_FLOWINFO       = C.IPV6_FLOWINFO\n\n\tsysIPV6_UNICAST_HOPS        = C.IPV6_UNICAST_HOPS\n\tsysIPV6_MULTICAST_IF        = C.IPV6_MULTICAST_IF\n\tsysIPV6_MULTICAST_HOPS      = C.IPV6_MULTICAST_HOPS\n\tsysIPV6_MULTICAST_LOOP      = C.IPV6_MULTICAST_LOOP\n\tsysIPV6_ADD_MEMBERSHIP      = C.IPV6_ADD_MEMBERSHIP\n\tsysIPV6_DROP_MEMBERSHIP     = C.IPV6_DROP_MEMBERSHIP\n\tsysMCAST_JOIN_GROUP         = C.MCAST_JOIN_GROUP\n\tsysMCAST_LEAVE_GROUP        = C.MCAST_LEAVE_GROUP\n\tsysMCAST_JOIN_SOURCE_GROUP  = C.MCAST_JOIN_SOURCE_GROUP\n\tsysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP\n\tsysMCAST_BLOCK_SOURCE       = C.MCAST_BLOCK_SOURCE\n\tsysMCAST_UNBLOCK_SOURCE     = C.MCAST_UNBLOCK_SOURCE\n\tsysMCAST_MSFILTER           = C.MCAST_MSFILTER\n\tsysIPV6_ROUTER_ALERT        = C.IPV6_ROUTER_ALERT\n\tsysIPV6_MTU_DISCOVER        = C.IPV6_MTU_DISCOVER\n\tsysIPV6_MTU                 = C.IPV6_MTU\n\tsysIPV6_RECVERR             = C.IPV6_RECVERR\n\tsysIPV6_V6ONLY              = C.IPV6_V6ONLY\n\tsysIPV6_JOIN_ANYCAST        = C.IPV6_JOIN_ANYCAST\n\tsysIPV6_LEAVE_ANYCAST       = C.IPV6_LEAVE_ANYCAST\n\n\t//sysIPV6_PMTUDISC_DONT      = C.IPV6_PMTUDISC_DONT\n\t//sysIPV6_PMTUDISC_WANT      = C.IPV6_PMTUDISC_WANT\n\t//sysIPV6_PMTUDISC_DO        = C.IPV6_PMTUDISC_DO\n\t//sysIPV6_PMTUDISC_PROBE     = C.IPV6_PMTUDISC_PROBE\n\t//sysIPV6_PMTUDISC_INTERFACE = C.IPV6_PMTUDISC_INTERFACE\n\t//sysIPV6_PMTUDISC_OMIT      = C.IPV6_PMTUDISC_OMIT\n\n\tsysIPV6_FLOWLABEL_MGR = C.IPV6_FLOWLABEL_MGR\n\tsysIPV6_FLOWINFO_SEND = C.IPV6_FLOWINFO_SEND\n\n\tsysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY\n\tsysIPV6_XFRM_POLICY  = C.IPV6_XFRM_POLICY\n\n\tsysIPV6_RECVPKTINFO  = C.IPV6_RECVPKTINFO\n\tsysIPV6_PKTINFO      = C.IPV6_PKTINFO\n\tsysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT\n\tsysIPV6_HOPLIMIT     = C.IPV6_HOPLIMIT\n\tsysIPV6_RECVHOPOPTS  = C.IPV6_RECVHOPOPTS\n\tsysIPV6_HOPOPTS      = C.IPV6_HOPOPTS\n\tsysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS\n\tsysIPV6_RECVRTHDR    = C.IPV6_RECVRTHDR\n\tsysIPV6_RTHDR        = C.IPV6_RTHDR\n\tsysIPV6_RECVDSTOPTS  = C.IPV6_RECVDSTOPTS\n\tsysIPV6_DSTOPTS      = C.IPV6_DSTOPTS\n\tsysIPV6_RECVPATHMTU  = C.IPV6_RECVPATHMTU\n\tsysIPV6_PATHMTU      = C.IPV6_PATHMTU\n\tsysIPV6_DONTFRAG     = C.IPV6_DONTFRAG\n\n\tsysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS\n\tsysIPV6_TCLASS     = C.IPV6_TCLASS\n\n\tsysIPV6_ADDR_PREFERENCES = C.IPV6_ADDR_PREFERENCES\n\n\tsysIPV6_PREFER_SRC_TMP            = C.IPV6_PREFER_SRC_TMP\n\tsysIPV6_PREFER_SRC_PUBLIC         = C.IPV6_PREFER_SRC_PUBLIC\n\tsysIPV6_PREFER_SRC_PUBTMP_DEFAULT = C.IPV6_PREFER_SRC_PUBTMP_DEFAULT\n\tsysIPV6_PREFER_SRC_COA            = C.IPV6_PREFER_SRC_COA\n\tsysIPV6_PREFER_SRC_HOME           = C.IPV6_PREFER_SRC_HOME\n\tsysIPV6_PREFER_SRC_CGA            = C.IPV6_PREFER_SRC_CGA\n\tsysIPV6_PREFER_SRC_NONCGA         = C.IPV6_PREFER_SRC_NONCGA\n\n\tsysIPV6_MINHOPCOUNT = C.IPV6_MINHOPCOUNT\n\n\tsysIPV6_ORIGDSTADDR     = C.IPV6_ORIGDSTADDR\n\tsysIPV6_RECVORIGDSTADDR = C.IPV6_RECVORIGDSTADDR\n\tsysIPV6_TRANSPARENT     = C.IPV6_TRANSPARENT\n\tsysIPV6_UNICAST_IF      = C.IPV6_UNICAST_IF\n\n\tsysICMPV6_FILTER = C.ICMPV6_FILTER\n\n\tsysICMPV6_FILTER_BLOCK       = C.ICMPV6_FILTER_BLOCK\n\tsysICMPV6_FILTER_PASS        = C.ICMPV6_FILTER_PASS\n\tsysICMPV6_FILTER_BLOCKOTHERS = C.ICMPV6_FILTER_BLOCKOTHERS\n\tsysICMPV6_FILTER_PASSONLY    = C.ICMPV6_FILTER_PASSONLY\n\n\tsysSOL_SOCKET       = C.SOL_SOCKET\n\tsysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER\n\n\tsizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage\n\tsizeofSockaddrInet6         = C.sizeof_struct_sockaddr_in6\n\tsizeofInet6Pktinfo          = C.sizeof_struct_in6_pktinfo\n\tsizeofIPv6Mtuinfo           = C.sizeof_struct_ip6_mtuinfo\n\tsizeofIPv6FlowlabelReq      = C.sizeof_struct_in6_flowlabel_req\n\n\tsizeofIPv6Mreq       = C.sizeof_struct_ipv6_mreq\n\tsizeofGroupReq       = C.sizeof_struct_group_req\n\tsizeofGroupSourceReq = C.sizeof_struct_group_source_req\n\n\tsizeofICMPv6Filter = C.sizeof_struct_icmp6_filter\n\n\tsizeofSockFprog = C.sizeof_struct_sock_fprog\n)\n\ntype kernelSockaddrStorage C.struct___kernel_sockaddr_storage\n\ntype sockaddrInet6 C.struct_sockaddr_in6\n\ntype inet6Pktinfo C.struct_in6_pktinfo\n\ntype ipv6Mtuinfo C.struct_ip6_mtuinfo\n\ntype ipv6FlowlabelReq C.struct_in6_flowlabel_req\n\ntype ipv6Mreq C.struct_ipv6_mreq\n\ntype groupReq C.struct_group_req\n\ntype groupSourceReq C.struct_group_source_req\n\ntype icmpv6Filter C.struct_icmp6_filter\n\ntype sockFProg C.struct_sock_fprog\n\ntype sockFilter C.struct_sock_filter\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/defs_netbsd.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in6_addr [16]byte /* in6_addr */\n\npackage ipv6\n\n/*\n#include <sys/param.h>\n#include <sys/socket.h>\n\n#include <netinet/in.h>\n#include <netinet/icmp6.h>\n*/\nimport \"C\"\n\nconst (\n\tsysIPV6_UNICAST_HOPS   = C.IPV6_UNICAST_HOPS\n\tsysIPV6_MULTICAST_IF   = C.IPV6_MULTICAST_IF\n\tsysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS\n\tsysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP\n\tsysIPV6_JOIN_GROUP     = C.IPV6_JOIN_GROUP\n\tsysIPV6_LEAVE_GROUP    = C.IPV6_LEAVE_GROUP\n\tsysIPV6_PORTRANGE      = C.IPV6_PORTRANGE\n\tsysICMP6_FILTER        = C.ICMP6_FILTER\n\n\tsysIPV6_CHECKSUM = C.IPV6_CHECKSUM\n\tsysIPV6_V6ONLY   = C.IPV6_V6ONLY\n\n\tsysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY\n\n\tsysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS\n\n\tsysIPV6_RECVPKTINFO  = C.IPV6_RECVPKTINFO\n\tsysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT\n\tsysIPV6_RECVRTHDR    = C.IPV6_RECVRTHDR\n\tsysIPV6_RECVHOPOPTS  = C.IPV6_RECVHOPOPTS\n\tsysIPV6_RECVDSTOPTS  = C.IPV6_RECVDSTOPTS\n\n\tsysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU\n\tsysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU\n\tsysIPV6_PATHMTU     = C.IPV6_PATHMTU\n\n\tsysIPV6_PKTINFO  = C.IPV6_PKTINFO\n\tsysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT\n\tsysIPV6_NEXTHOP  = C.IPV6_NEXTHOP\n\tsysIPV6_HOPOPTS  = C.IPV6_HOPOPTS\n\tsysIPV6_DSTOPTS  = C.IPV6_DSTOPTS\n\tsysIPV6_RTHDR    = C.IPV6_RTHDR\n\n\tsysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS\n\n\tsysIPV6_TCLASS   = C.IPV6_TCLASS\n\tsysIPV6_DONTFRAG = C.IPV6_DONTFRAG\n\n\tsysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT\n\tsysIPV6_PORTRANGE_HIGH    = C.IPV6_PORTRANGE_HIGH\n\tsysIPV6_PORTRANGE_LOW     = C.IPV6_PORTRANGE_LOW\n\n\tsizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6\n\tsizeofInet6Pktinfo  = C.sizeof_struct_in6_pktinfo\n\tsizeofIPv6Mtuinfo   = C.sizeof_struct_ip6_mtuinfo\n\n\tsizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq\n\n\tsizeofICMPv6Filter = C.sizeof_struct_icmp6_filter\n)\n\ntype sockaddrInet6 C.struct_sockaddr_in6\n\ntype inet6Pktinfo C.struct_in6_pktinfo\n\ntype ipv6Mtuinfo C.struct_ip6_mtuinfo\n\ntype ipv6Mreq C.struct_ipv6_mreq\n\ntype icmpv6Filter C.struct_icmp6_filter\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/defs_openbsd.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in6_addr [16]byte /* in6_addr */\n\npackage ipv6\n\n/*\n#include <sys/param.h>\n#include <sys/socket.h>\n\n#include <netinet/in.h>\n#include <netinet/icmp6.h>\n*/\nimport \"C\"\n\nconst (\n\tsysIPV6_UNICAST_HOPS   = C.IPV6_UNICAST_HOPS\n\tsysIPV6_MULTICAST_IF   = C.IPV6_MULTICAST_IF\n\tsysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS\n\tsysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP\n\tsysIPV6_JOIN_GROUP     = C.IPV6_JOIN_GROUP\n\tsysIPV6_LEAVE_GROUP    = C.IPV6_LEAVE_GROUP\n\tsysIPV6_PORTRANGE      = C.IPV6_PORTRANGE\n\tsysICMP6_FILTER        = C.ICMP6_FILTER\n\n\tsysIPV6_CHECKSUM = C.IPV6_CHECKSUM\n\tsysIPV6_V6ONLY   = C.IPV6_V6ONLY\n\n\tsysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS\n\n\tsysIPV6_RECVPKTINFO  = C.IPV6_RECVPKTINFO\n\tsysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT\n\tsysIPV6_RECVRTHDR    = C.IPV6_RECVRTHDR\n\tsysIPV6_RECVHOPOPTS  = C.IPV6_RECVHOPOPTS\n\tsysIPV6_RECVDSTOPTS  = C.IPV6_RECVDSTOPTS\n\n\tsysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU\n\tsysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU\n\n\tsysIPV6_PATHMTU = C.IPV6_PATHMTU\n\n\tsysIPV6_PKTINFO  = C.IPV6_PKTINFO\n\tsysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT\n\tsysIPV6_NEXTHOP  = C.IPV6_NEXTHOP\n\tsysIPV6_HOPOPTS  = C.IPV6_HOPOPTS\n\tsysIPV6_DSTOPTS  = C.IPV6_DSTOPTS\n\tsysIPV6_RTHDR    = C.IPV6_RTHDR\n\n\tsysIPV6_AUTH_LEVEL        = C.IPV6_AUTH_LEVEL\n\tsysIPV6_ESP_TRANS_LEVEL   = C.IPV6_ESP_TRANS_LEVEL\n\tsysIPV6_ESP_NETWORK_LEVEL = C.IPV6_ESP_NETWORK_LEVEL\n\tsysIPSEC6_OUTSA           = C.IPSEC6_OUTSA\n\tsysIPV6_RECVTCLASS        = C.IPV6_RECVTCLASS\n\n\tsysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL\n\tsysIPV6_IPCOMP_LEVEL  = C.IPV6_IPCOMP_LEVEL\n\n\tsysIPV6_TCLASS   = C.IPV6_TCLASS\n\tsysIPV6_DONTFRAG = C.IPV6_DONTFRAG\n\tsysIPV6_PIPEX    = C.IPV6_PIPEX\n\n\tsysIPV6_RTABLE = C.IPV6_RTABLE\n\n\tsysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT\n\tsysIPV6_PORTRANGE_HIGH    = C.IPV6_PORTRANGE_HIGH\n\tsysIPV6_PORTRANGE_LOW     = C.IPV6_PORTRANGE_LOW\n\n\tsizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6\n\tsizeofInet6Pktinfo  = C.sizeof_struct_in6_pktinfo\n\tsizeofIPv6Mtuinfo   = C.sizeof_struct_ip6_mtuinfo\n\n\tsizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq\n\n\tsizeofICMPv6Filter = C.sizeof_struct_icmp6_filter\n)\n\ntype sockaddrInet6 C.struct_sockaddr_in6\n\ntype inet6Pktinfo C.struct_in6_pktinfo\n\ntype ipv6Mtuinfo C.struct_ip6_mtuinfo\n\ntype ipv6Mreq C.struct_ipv6_mreq\n\ntype icmpv6Filter C.struct_icmp6_filter\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/defs_solaris.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in6_addr [16]byte /* in6_addr */\n\npackage ipv6\n\n/*\n#include <sys/socket.h>\n\n#include <netinet/in.h>\n#include <netinet/icmp6.h>\n*/\nimport \"C\"\n\nconst (\n\tsysIPV6_UNICAST_HOPS   = C.IPV6_UNICAST_HOPS\n\tsysIPV6_MULTICAST_IF   = C.IPV6_MULTICAST_IF\n\tsysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS\n\tsysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP\n\tsysIPV6_JOIN_GROUP     = C.IPV6_JOIN_GROUP\n\tsysIPV6_LEAVE_GROUP    = C.IPV6_LEAVE_GROUP\n\n\tsysIPV6_PKTINFO = C.IPV6_PKTINFO\n\n\tsysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT\n\tsysIPV6_NEXTHOP  = C.IPV6_NEXTHOP\n\tsysIPV6_HOPOPTS  = C.IPV6_HOPOPTS\n\tsysIPV6_DSTOPTS  = C.IPV6_DSTOPTS\n\n\tsysIPV6_RTHDR        = C.IPV6_RTHDR\n\tsysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS\n\n\tsysIPV6_RECVPKTINFO  = C.IPV6_RECVPKTINFO\n\tsysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT\n\tsysIPV6_RECVHOPOPTS  = C.IPV6_RECVHOPOPTS\n\n\tsysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR\n\n\tsysIPV6_RECVRTHDRDSTOPTS = C.IPV6_RECVRTHDRDSTOPTS\n\n\tsysIPV6_CHECKSUM        = C.IPV6_CHECKSUM\n\tsysIPV6_RECVTCLASS      = C.IPV6_RECVTCLASS\n\tsysIPV6_USE_MIN_MTU     = C.IPV6_USE_MIN_MTU\n\tsysIPV6_DONTFRAG        = C.IPV6_DONTFRAG\n\tsysIPV6_SEC_OPT         = C.IPV6_SEC_OPT\n\tsysIPV6_SRC_PREFERENCES = C.IPV6_SRC_PREFERENCES\n\tsysIPV6_RECVPATHMTU     = C.IPV6_RECVPATHMTU\n\tsysIPV6_PATHMTU         = C.IPV6_PATHMTU\n\tsysIPV6_TCLASS          = C.IPV6_TCLASS\n\tsysIPV6_V6ONLY          = C.IPV6_V6ONLY\n\n\tsysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS\n\n\tsysMCAST_JOIN_GROUP         = C.MCAST_JOIN_GROUP\n\tsysMCAST_LEAVE_GROUP        = C.MCAST_LEAVE_GROUP\n\tsysMCAST_BLOCK_SOURCE       = C.MCAST_BLOCK_SOURCE\n\tsysMCAST_UNBLOCK_SOURCE     = C.MCAST_UNBLOCK_SOURCE\n\tsysMCAST_JOIN_SOURCE_GROUP  = C.MCAST_JOIN_SOURCE_GROUP\n\tsysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP\n\n\tsysIPV6_PREFER_SRC_HOME   = C.IPV6_PREFER_SRC_HOME\n\tsysIPV6_PREFER_SRC_COA    = C.IPV6_PREFER_SRC_COA\n\tsysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC\n\tsysIPV6_PREFER_SRC_TMP    = C.IPV6_PREFER_SRC_TMP\n\tsysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA\n\tsysIPV6_PREFER_SRC_CGA    = C.IPV6_PREFER_SRC_CGA\n\n\tsysIPV6_PREFER_SRC_MIPMASK    = C.IPV6_PREFER_SRC_MIPMASK\n\tsysIPV6_PREFER_SRC_MIPDEFAULT = C.IPV6_PREFER_SRC_MIPDEFAULT\n\tsysIPV6_PREFER_SRC_TMPMASK    = C.IPV6_PREFER_SRC_TMPMASK\n\tsysIPV6_PREFER_SRC_TMPDEFAULT = C.IPV6_PREFER_SRC_TMPDEFAULT\n\tsysIPV6_PREFER_SRC_CGAMASK    = C.IPV6_PREFER_SRC_CGAMASK\n\tsysIPV6_PREFER_SRC_CGADEFAULT = C.IPV6_PREFER_SRC_CGADEFAULT\n\n\tsysIPV6_PREFER_SRC_MASK = C.IPV6_PREFER_SRC_MASK\n\n\tsysIPV6_PREFER_SRC_DEFAULT = C.IPV6_PREFER_SRC_DEFAULT\n\n\tsysIPV6_BOUND_IF   = C.IPV6_BOUND_IF\n\tsysIPV6_UNSPEC_SRC = C.IPV6_UNSPEC_SRC\n\n\tsysICMP6_FILTER = C.ICMP6_FILTER\n\n\tsizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage\n\tsizeofSockaddrInet6   = C.sizeof_struct_sockaddr_in6\n\tsizeofInet6Pktinfo    = C.sizeof_struct_in6_pktinfo\n\tsizeofIPv6Mtuinfo     = C.sizeof_struct_ip6_mtuinfo\n\n\tsizeofIPv6Mreq       = C.sizeof_struct_ipv6_mreq\n\tsizeofGroupReq       = C.sizeof_struct_group_req\n\tsizeofGroupSourceReq = C.sizeof_struct_group_source_req\n\n\tsizeofICMPv6Filter = C.sizeof_struct_icmp6_filter\n)\n\ntype sockaddrStorage C.struct_sockaddr_storage\n\ntype sockaddrInet6 C.struct_sockaddr_in6\n\ntype inet6Pktinfo C.struct_in6_pktinfo\n\ntype ipv6Mtuinfo C.struct_ip6_mtuinfo\n\ntype ipv6Mreq C.struct_ipv6_mreq\n\ntype groupReq C.struct_group_req\n\ntype groupSourceReq C.struct_group_source_req\n\ntype icmpv6Filter C.struct_icmp6_filter\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/dgramopt.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6\n\nimport (\n\t\"net\"\n\t\"syscall\"\n\n\t\"golang.org/x/net/bpf\"\n)\n\n// MulticastHopLimit returns the hop limit field value for outgoing\n// multicast packets.\nfunc (c *dgramOpt) MulticastHopLimit() (int, error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoMulticastHopLimit]\n\tif !ok {\n\t\treturn 0, errOpNoSupport\n\t}\n\treturn so.GetInt(c.Conn)\n}\n\n// SetMulticastHopLimit sets the hop limit field value for future\n// outgoing multicast packets.\nfunc (c *dgramOpt) SetMulticastHopLimit(hoplim int) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoMulticastHopLimit]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\treturn so.SetInt(c.Conn, hoplim)\n}\n\n// MulticastInterface returns the default interface for multicast\n// packet transmissions.\nfunc (c *dgramOpt) MulticastInterface() (*net.Interface, error) {\n\tif !c.ok() {\n\t\treturn nil, syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoMulticastInterface]\n\tif !ok {\n\t\treturn nil, errOpNoSupport\n\t}\n\treturn so.getMulticastInterface(c.Conn)\n}\n\n// SetMulticastInterface sets the default interface for future\n// multicast packet transmissions.\nfunc (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoMulticastInterface]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\treturn so.setMulticastInterface(c.Conn, ifi)\n}\n\n// MulticastLoopback reports whether transmitted multicast packets\n// should be copied and send back to the originator.\nfunc (c *dgramOpt) MulticastLoopback() (bool, error) {\n\tif !c.ok() {\n\t\treturn false, syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoMulticastLoopback]\n\tif !ok {\n\t\treturn false, errOpNoSupport\n\t}\n\ton, err := so.GetInt(c.Conn)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn on == 1, nil\n}\n\n// SetMulticastLoopback sets whether transmitted multicast packets\n// should be copied and send back to the originator.\nfunc (c *dgramOpt) SetMulticastLoopback(on bool) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoMulticastLoopback]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\treturn so.SetInt(c.Conn, boolint(on))\n}\n\n// JoinGroup joins the group address group on the interface ifi.\n// By default all sources that can cast data to group are accepted.\n// It's possible to mute and unmute data transmission from a specific\n// source by using ExcludeSourceSpecificGroup and\n// IncludeSourceSpecificGroup.\n// JoinGroup uses the system assigned multicast interface when ifi is\n// nil, although this is not recommended because the assignment\n// depends on platforms and sometimes it might require routing\n// configuration.\nfunc (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoJoinGroup]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\tgrp := netAddrToIP16(group)\n\tif grp == nil {\n\t\treturn errMissingAddress\n\t}\n\treturn so.setGroup(c.Conn, ifi, grp)\n}\n\n// LeaveGroup leaves the group address group on the interface ifi\n// regardless of whether the group is any-source group or\n// source-specific group.\nfunc (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoLeaveGroup]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\tgrp := netAddrToIP16(group)\n\tif grp == nil {\n\t\treturn errMissingAddress\n\t}\n\treturn so.setGroup(c.Conn, ifi, grp)\n}\n\n// JoinSourceSpecificGroup joins the source-specific group comprising\n// group and source on the interface ifi.\n// JoinSourceSpecificGroup uses the system assigned multicast\n// interface when ifi is nil, although this is not recommended because\n// the assignment depends on platforms and sometimes it might require\n// routing configuration.\nfunc (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoJoinSourceGroup]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\tgrp := netAddrToIP16(group)\n\tif grp == nil {\n\t\treturn errMissingAddress\n\t}\n\tsrc := netAddrToIP16(source)\n\tif src == nil {\n\t\treturn errMissingAddress\n\t}\n\treturn so.setSourceGroup(c.Conn, ifi, grp, src)\n}\n\n// LeaveSourceSpecificGroup leaves the source-specific group on the\n// interface ifi.\nfunc (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoLeaveSourceGroup]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\tgrp := netAddrToIP16(group)\n\tif grp == nil {\n\t\treturn errMissingAddress\n\t}\n\tsrc := netAddrToIP16(source)\n\tif src == nil {\n\t\treturn errMissingAddress\n\t}\n\treturn so.setSourceGroup(c.Conn, ifi, grp, src)\n}\n\n// ExcludeSourceSpecificGroup excludes the source-specific group from\n// the already joined any-source groups by JoinGroup on the interface\n// ifi.\nfunc (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoBlockSourceGroup]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\tgrp := netAddrToIP16(group)\n\tif grp == nil {\n\t\treturn errMissingAddress\n\t}\n\tsrc := netAddrToIP16(source)\n\tif src == nil {\n\t\treturn errMissingAddress\n\t}\n\treturn so.setSourceGroup(c.Conn, ifi, grp, src)\n}\n\n// IncludeSourceSpecificGroup includes the excluded source-specific\n// group by ExcludeSourceSpecificGroup again on the interface ifi.\nfunc (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoUnblockSourceGroup]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\tgrp := netAddrToIP16(group)\n\tif grp == nil {\n\t\treturn errMissingAddress\n\t}\n\tsrc := netAddrToIP16(source)\n\tif src == nil {\n\t\treturn errMissingAddress\n\t}\n\treturn so.setSourceGroup(c.Conn, ifi, grp, src)\n}\n\n// Checksum reports whether the kernel will compute, store or verify a\n// checksum for both incoming and outgoing packets. If on is true, it\n// returns an offset in bytes into the data of where the checksum\n// field is located.\nfunc (c *dgramOpt) Checksum() (on bool, offset int, err error) {\n\tif !c.ok() {\n\t\treturn false, 0, syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoChecksum]\n\tif !ok {\n\t\treturn false, 0, errOpNoSupport\n\t}\n\toffset, err = so.GetInt(c.Conn)\n\tif err != nil {\n\t\treturn false, 0, err\n\t}\n\tif offset < 0 {\n\t\treturn false, 0, nil\n\t}\n\treturn true, offset, nil\n}\n\n// SetChecksum enables the kernel checksum processing. If on is ture,\n// the offset should be an offset in bytes into the data of where the\n// checksum field is located.\nfunc (c *dgramOpt) SetChecksum(on bool, offset int) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoChecksum]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\tif !on {\n\t\toffset = -1\n\t}\n\treturn so.SetInt(c.Conn, offset)\n}\n\n// ICMPFilter returns an ICMP filter.\nfunc (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) {\n\tif !c.ok() {\n\t\treturn nil, syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoICMPFilter]\n\tif !ok {\n\t\treturn nil, errOpNoSupport\n\t}\n\treturn so.getICMPFilter(c.Conn)\n}\n\n// SetICMPFilter deploys the ICMP filter.\nfunc (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoICMPFilter]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\treturn so.setICMPFilter(c.Conn, f)\n}\n\n// SetBPF attaches a BPF program to the connection.\n//\n// Only supported on Linux.\nfunc (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoAttachFilter]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\treturn so.setBPF(c.Conn, filter)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/doc.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package ipv6 implements IP-level socket options for the Internet\n// Protocol version 6.\n//\n// The package provides IP-level socket options that allow\n// manipulation of IPv6 facilities.\n//\n// The IPv6 protocol is defined in RFC 8200.\n// Socket interface extensions are defined in RFC 3493, RFC 3542 and\n// RFC 3678.\n// MLDv1 and MLDv2 are defined in RFC 2710 and RFC 3810.\n// Source-specific multicast is defined in RFC 4607.\n//\n// On Darwin, this package requires OS X Mavericks version 10.9 or\n// above, or equivalent.\n//\n//\n// Unicasting\n//\n// The options for unicasting are available for net.TCPConn,\n// net.UDPConn and net.IPConn which are created as network connections\n// that use the IPv6 transport. When a single TCP connection carrying\n// a data flow of multiple packets needs to indicate the flow is\n// important, Conn is used to set the traffic class field on the IPv6\n// header for each packet.\n//\n//\tln, err := net.Listen(\"tcp6\", \"[::]:1024\")\n//\tif err != nil {\n//\t\t// error handling\n//\t}\n//\tdefer ln.Close()\n//\tfor {\n//\t\tc, err := ln.Accept()\n//\t\tif err != nil {\n//\t\t\t// error handling\n//\t\t}\n//\t\tgo func(c net.Conn) {\n//\t\t\tdefer c.Close()\n//\n// The outgoing packets will be labeled DiffServ assured forwarding\n// class 1 low drop precedence, known as AF11 packets.\n//\n//\t\t\tif err := ipv6.NewConn(c).SetTrafficClass(0x28); err != nil {\n//\t\t\t\t// error handling\n//\t\t\t}\n//\t\t\tif _, err := c.Write(data); err != nil {\n//\t\t\t\t// error handling\n//\t\t\t}\n//\t\t}(c)\n//\t}\n//\n//\n// Multicasting\n//\n// The options for multicasting are available for net.UDPConn and\n// net.IPconn which are created as network connections that use the\n// IPv6 transport. A few network facilities must be prepared before\n// you begin multicasting, at a minimum joining network interfaces and\n// multicast groups.\n//\n//\ten0, err := net.InterfaceByName(\"en0\")\n//\tif err != nil {\n//\t\t// error handling\n//\t}\n//\ten1, err := net.InterfaceByIndex(911)\n//\tif err != nil {\n//\t\t// error handling\n//\t}\n//\tgroup := net.ParseIP(\"ff02::114\")\n//\n// First, an application listens to an appropriate address with an\n// appropriate service port.\n//\n//\tc, err := net.ListenPacket(\"udp6\", \"[::]:1024\")\n//\tif err != nil {\n//\t\t// error handling\n//\t}\n//\tdefer c.Close()\n//\n// Second, the application joins multicast groups, starts listening to\n// the groups on the specified network interfaces. Note that the\n// service port for transport layer protocol does not matter with this\n// operation as joining groups affects only network and link layer\n// protocols, such as IPv6 and Ethernet.\n//\n//\tp := ipv6.NewPacketConn(c)\n//\tif err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil {\n//\t\t// error handling\n//\t}\n//\tif err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil {\n//\t\t// error handling\n//\t}\n//\n// The application might set per packet control message transmissions\n// between the protocol stack within the kernel. When the application\n// needs a destination address on an incoming packet,\n// SetControlMessage of PacketConn is used to enable control message\n// transmissions.\n//\n//\tif err := p.SetControlMessage(ipv6.FlagDst, true); err != nil {\n//\t\t// error handling\n//\t}\n//\n// The application could identify whether the received packets are\n// of interest by using the control message that contains the\n// destination address of the received packet.\n//\n//\tb := make([]byte, 1500)\n//\tfor {\n//\t\tn, rcm, src, err := p.ReadFrom(b)\n//\t\tif err != nil {\n//\t\t\t// error handling\n//\t\t}\n//\t\tif rcm.Dst.IsMulticast() {\n//\t\t\tif rcm.Dst.Equal(group) {\n//\t\t\t\t// joined group, do something\n//\t\t\t} else {\n//\t\t\t\t// unknown group, discard\n//\t\t\t\tcontinue\n//\t\t\t}\n//\t\t}\n//\n// The application can also send both unicast and multicast packets.\n//\n//\t\tp.SetTrafficClass(0x0)\n//\t\tp.SetHopLimit(16)\n//\t\tif _, err := p.WriteTo(data[:n], nil, src); err != nil {\n//\t\t\t// error handling\n//\t\t}\n//\t\tdst := &net.UDPAddr{IP: group, Port: 1024}\n//\t\twcm := ipv6.ControlMessage{TrafficClass: 0xe0, HopLimit: 1}\n//\t\tfor _, ifi := range []*net.Interface{en0, en1} {\n//\t\t\twcm.IfIndex = ifi.Index\n//\t\t\tif _, err := p.WriteTo(data[:n], &wcm, dst); err != nil {\n//\t\t\t\t// error handling\n//\t\t\t}\n//\t\t}\n//\t}\n//\n//\n// More multicasting\n//\n// An application that uses PacketConn may join multiple multicast\n// groups. For example, a UDP listener with port 1024 might join two\n// different groups across over two different network interfaces by\n// using:\n//\n//\tc, err := net.ListenPacket(\"udp6\", \"[::]:1024\")\n//\tif err != nil {\n//\t\t// error handling\n//\t}\n//\tdefer c.Close()\n//\tp := ipv6.NewPacketConn(c)\n//\tif err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP(\"ff02::1:114\")}); err != nil {\n//\t\t// error handling\n//\t}\n//\tif err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP(\"ff02::2:114\")}); err != nil {\n//\t\t// error handling\n//\t}\n//\tif err := p.JoinGroup(en1, &net.UDPAddr{IP: net.ParseIP(\"ff02::2:114\")}); err != nil {\n//\t\t// error handling\n//\t}\n//\n// It is possible for multiple UDP listeners that listen on the same\n// UDP port to join the same multicast group. The net package will\n// provide a socket that listens to a wildcard address with reusable\n// UDP port when an appropriate multicast address prefix is passed to\n// the net.ListenPacket or net.ListenUDP.\n//\n//\tc1, err := net.ListenPacket(\"udp6\", \"[ff02::]:1024\")\n//\tif err != nil {\n//\t\t// error handling\n//\t}\n//\tdefer c1.Close()\n//\tc2, err := net.ListenPacket(\"udp6\", \"[ff02::]:1024\")\n//\tif err != nil {\n//\t\t// error handling\n//\t}\n//\tdefer c2.Close()\n//\tp1 := ipv6.NewPacketConn(c1)\n//\tif err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP(\"ff02::114\")}); err != nil {\n//\t\t// error handling\n//\t}\n//\tp2 := ipv6.NewPacketConn(c2)\n//\tif err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP(\"ff02::114\")}); err != nil {\n//\t\t// error handling\n//\t}\n//\n// Also it is possible for the application to leave or rejoin a\n// multicast group on the network interface.\n//\n//\tif err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.ParseIP(\"ff02::114\")}); err != nil {\n//\t\t// error handling\n//\t}\n//\tif err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP(\"ff01::114\")}); err != nil {\n//\t\t// error handling\n//\t}\n//\n//\n// Source-specific multicasting\n//\n// An application that uses PacketConn on MLDv2 supported platform is\n// able to join source-specific multicast groups.\n// The application may use JoinSourceSpecificGroup and\n// LeaveSourceSpecificGroup for the operation known as \"include\" mode,\n//\n//\tssmgroup := net.UDPAddr{IP: net.ParseIP(\"ff32::8000:9\")}\n//\tssmsource := net.UDPAddr{IP: net.ParseIP(\"fe80::cafe\")}\n//\tif err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil {\n//\t\t// error handling\n//\t}\n//\tif err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil {\n//\t\t// error handling\n//\t}\n//\n// or JoinGroup, ExcludeSourceSpecificGroup,\n// IncludeSourceSpecificGroup and LeaveGroup for the operation known\n// as \"exclude\" mode.\n//\n//\texclsource := net.UDPAddr{IP: net.ParseIP(\"fe80::dead\")}\n//\tif err := p.JoinGroup(en0, &ssmgroup); err != nil {\n//\t\t// error handling\n//\t}\n//\tif err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil {\n//\t\t// error handling\n//\t}\n//\tif err := p.LeaveGroup(en0, &ssmgroup); err != nil {\n//\t\t// error handling\n//\t}\n//\n// Note that it depends on each platform implementation what happens\n// when an application which runs on MLDv2 unsupported platform uses\n// JoinSourceSpecificGroup and LeaveSourceSpecificGroup.\n// In general the platform tries to fall back to conversations using\n// MLDv1 and starts to listen to multicast traffic.\n// In the fallback case, ExcludeSourceSpecificGroup and\n// IncludeSourceSpecificGroup may return an error.\npackage ipv6 // import \"golang.org/x/net/ipv6\"\n\n// BUG(mikio): This package is not implemented on NaCl and Plan 9.\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/endpoint.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6\n\nimport (\n\t\"net\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\n// BUG(mikio): On Windows, the JoinSourceSpecificGroup,\n// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and\n// IncludeSourceSpecificGroup methods of PacketConn are not\n// implemented.\n\n// A Conn represents a network endpoint that uses IPv6 transport.\n// It allows to set basic IP-level socket options such as traffic\n// class and hop limit.\ntype Conn struct {\n\tgenericOpt\n}\n\ntype genericOpt struct {\n\t*socket.Conn\n}\n\nfunc (c *genericOpt) ok() bool { return c != nil && c.Conn != nil }\n\n// PathMTU returns a path MTU value for the destination associated\n// with the endpoint.\nfunc (c *Conn) PathMTU() (int, error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoPathMTU]\n\tif !ok {\n\t\treturn 0, errOpNoSupport\n\t}\n\t_, mtu, err := so.getMTUInfo(c.Conn)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn mtu, nil\n}\n\n// NewConn returns a new Conn.\nfunc NewConn(c net.Conn) *Conn {\n\tcc, _ := socket.NewConn(c)\n\treturn &Conn{\n\t\tgenericOpt: genericOpt{Conn: cc},\n\t}\n}\n\n// A PacketConn represents a packet network endpoint that uses IPv6\n// transport. It is used to control several IP-level socket options\n// including IPv6 header manipulation. It also provides datagram\n// based network I/O methods specific to the IPv6 and higher layer\n// protocols such as OSPF, GRE, and UDP.\ntype PacketConn struct {\n\tgenericOpt\n\tdgramOpt\n\tpayloadHandler\n}\n\ntype dgramOpt struct {\n\t*socket.Conn\n}\n\nfunc (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil }\n\n// SetControlMessage allows to receive the per packet basis IP-level\n// socket options.\nfunc (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error {\n\tif !c.payloadHandler.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on)\n}\n\n// SetDeadline sets the read and write deadlines associated with the\n// endpoint.\nfunc (c *PacketConn) SetDeadline(t time.Time) error {\n\tif !c.payloadHandler.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn c.payloadHandler.SetDeadline(t)\n}\n\n// SetReadDeadline sets the read deadline associated with the\n// endpoint.\nfunc (c *PacketConn) SetReadDeadline(t time.Time) error {\n\tif !c.payloadHandler.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn c.payloadHandler.SetReadDeadline(t)\n}\n\n// SetWriteDeadline sets the write deadline associated with the\n// endpoint.\nfunc (c *PacketConn) SetWriteDeadline(t time.Time) error {\n\tif !c.payloadHandler.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn c.payloadHandler.SetWriteDeadline(t)\n}\n\n// Close closes the endpoint.\nfunc (c *PacketConn) Close() error {\n\tif !c.payloadHandler.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\treturn c.payloadHandler.Close()\n}\n\n// NewPacketConn returns a new PacketConn using c as its underlying\n// transport.\nfunc NewPacketConn(c net.PacketConn) *PacketConn {\n\tcc, _ := socket.NewConn(c.(net.Conn))\n\treturn &PacketConn{\n\t\tgenericOpt:     genericOpt{Conn: cc},\n\t\tdgramOpt:       dgramOpt{Conn: cc},\n\t\tpayloadHandler: payloadHandler{PacketConn: c, Conn: cc},\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/example_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6_test\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"time\"\n\n\t\"golang.org/x/net/icmp\"\n\t\"golang.org/x/net/ipv6\"\n)\n\nfunc ExampleConn_markingTCP() {\n\tln, err := net.Listen(\"tcp\", \"[::]:1024\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer ln.Close()\n\n\tfor {\n\t\tc, err := ln.Accept()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tgo func(c net.Conn) {\n\t\t\tdefer c.Close()\n\t\t\tif c.RemoteAddr().(*net.TCPAddr).IP.To16() != nil && c.RemoteAddr().(*net.TCPAddr).IP.To4() == nil {\n\t\t\t\tp := ipv6.NewConn(c)\n\t\t\t\tif err := p.SetTrafficClass(0x28); err != nil { // DSCP AF11\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif err := p.SetHopLimit(128); err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif _, err := c.Write([]byte(\"HELLO-R-U-THERE-ACK\")); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}(c)\n\t}\n}\n\nfunc ExamplePacketConn_servingOneShotMulticastDNS() {\n\tc, err := net.ListenPacket(\"udp6\", \"[::]:5353\") // mDNS over UDP\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer c.Close()\n\tp := ipv6.NewPacketConn(c)\n\n\ten0, err := net.InterfaceByName(\"en0\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tmDNSLinkLocal := net.UDPAddr{IP: net.ParseIP(\"ff02::fb\")}\n\tif err := p.JoinGroup(en0, &mDNSLinkLocal); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer p.LeaveGroup(en0, &mDNSLinkLocal)\n\tif err := p.SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar wcm ipv6.ControlMessage\n\tb := make([]byte, 1500)\n\tfor {\n\t\t_, rcm, peer, err := p.ReadFrom(b)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif !rcm.Dst.IsMulticast() || !rcm.Dst.Equal(mDNSLinkLocal.IP) {\n\t\t\tcontinue\n\t\t}\n\t\twcm.IfIndex = rcm.IfIndex\n\t\tanswers := []byte(\"FAKE-MDNS-ANSWERS\") // fake mDNS answers, you need to implement this\n\t\tif _, err := p.WriteTo(answers, &wcm, peer); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc ExamplePacketConn_tracingIPPacketRoute() {\n\t// Tracing an IP packet route to www.google.com.\n\n\tconst host = \"www.google.com\"\n\tips, err := net.LookupIP(host)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar dst net.IPAddr\n\tfor _, ip := range ips {\n\t\tif ip.To16() != nil && ip.To4() == nil {\n\t\t\tdst.IP = ip\n\t\t\tfmt.Printf(\"using %v for tracing an IP packet route to %s\\n\", dst.IP, host)\n\t\t\tbreak\n\t\t}\n\t}\n\tif dst.IP == nil {\n\t\tlog.Fatal(\"no AAAA record found\")\n\t}\n\n\tc, err := net.ListenPacket(\"ip6:58\", \"::\") // ICMP for IPv6\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer c.Close()\n\tp := ipv6.NewPacketConn(c)\n\n\tif err := p.SetControlMessage(ipv6.FlagHopLimit|ipv6.FlagSrc|ipv6.FlagDst|ipv6.FlagInterface, true); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\twm := icmp.Message{\n\t\tType: ipv6.ICMPTypeEchoRequest, Code: 0,\n\t\tBody: &icmp.Echo{\n\t\t\tID:   os.Getpid() & 0xffff,\n\t\t\tData: []byte(\"HELLO-R-U-THERE\"),\n\t\t},\n\t}\n\tvar f ipv6.ICMPFilter\n\tf.SetAll(true)\n\tf.Accept(ipv6.ICMPTypeTimeExceeded)\n\tf.Accept(ipv6.ICMPTypeEchoReply)\n\tif err := p.SetICMPFilter(&f); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar wcm ipv6.ControlMessage\n\trb := make([]byte, 1500)\n\tfor i := 1; i <= 64; i++ { // up to 64 hops\n\t\twm.Body.(*icmp.Echo).Seq = i\n\t\twb, err := wm.Marshal(nil)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\t// In the real world usually there are several\n\t\t// multiple traffic-engineered paths for each hop.\n\t\t// You may need to probe a few times to each hop.\n\t\tbegin := time.Now()\n\t\twcm.HopLimit = i\n\t\tif _, err := p.WriteTo(wb, &wcm, &dst); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif err := p.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tn, rcm, peer, err := p.ReadFrom(rb)\n\t\tif err != nil {\n\t\t\tif err, ok := err.(net.Error); ok && err.Timeout() {\n\t\t\t\tfmt.Printf(\"%v\\t*\\n\", i)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\trm, err := icmp.ParseMessage(58, rb[:n])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\trtt := time.Since(begin)\n\n\t\t// In the real world you need to determine whether the\n\t\t// received message is yours using ControlMessage.Src,\n\t\t// ControlMesage.Dst, icmp.Echo.ID and icmp.Echo.Seq.\n\t\tswitch rm.Type {\n\t\tcase ipv6.ICMPTypeTimeExceeded:\n\t\t\tnames, _ := net.LookupAddr(peer.String())\n\t\t\tfmt.Printf(\"%d\\t%v %+v %v\\n\\t%+v\\n\", i, peer, names, rtt, rcm)\n\t\tcase ipv6.ICMPTypeEchoReply:\n\t\t\tnames, _ := net.LookupAddr(peer.String())\n\t\t\tfmt.Printf(\"%d\\t%v %+v %v\\n\\t%+v\\n\", i, peer, names, rtt, rcm)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc ExamplePacketConn_advertisingOSPFHello() {\n\tc, err := net.ListenPacket(\"ip6:89\", \"::\") // OSPF for IPv6\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer c.Close()\n\tp := ipv6.NewPacketConn(c)\n\n\ten0, err := net.InterfaceByName(\"en0\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tallSPFRouters := net.IPAddr{IP: net.ParseIP(\"ff02::5\")}\n\tif err := p.JoinGroup(en0, &allSPFRouters); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer p.LeaveGroup(en0, &allSPFRouters)\n\n\thello := make([]byte, 24) // fake hello data, you need to implement this\n\tospf := make([]byte, 16)  // fake ospf header, you need to implement this\n\tospf[0] = 3               // version 3\n\tospf[1] = 1               // hello packet\n\tospf = append(ospf, hello...)\n\tif err := p.SetChecksum(true, 12); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tcm := ipv6.ControlMessage{\n\t\tTrafficClass: 0xc0, // DSCP CS6\n\t\tHopLimit:     1,\n\t\tIfIndex:      en0.Index,\n\t}\n\tif _, err := p.WriteTo(ospf, &cm, &allSPFRouters); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/gen.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n//go:generate go run gen.go\n\n// This program generates system adaptation constants and types,\n// internet protocol constants and tables by reading template files\n// and IANA protocol registries.\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding/xml\"\n\t\"fmt\"\n\t\"go/format\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/exec\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc main() {\n\tif err := genzsys(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\tif err := geniana(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc genzsys() error {\n\tdefs := \"defs_\" + runtime.GOOS + \".go\"\n\tf, err := os.Open(defs)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tf.Close()\n\tcmd := exec.Command(\"go\", \"tool\", \"cgo\", \"-godefs\", defs)\n\tb, err := cmd.Output()\n\tif err != nil {\n\t\treturn err\n\t}\n\tb, err = format.Source(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\tzsys := \"zsys_\" + runtime.GOOS + \".go\"\n\tswitch runtime.GOOS {\n\tcase \"freebsd\", \"linux\":\n\t\tzsys = \"zsys_\" + runtime.GOOS + \"_\" + runtime.GOARCH + \".go\"\n\t}\n\tif err := ioutil.WriteFile(zsys, b, 0644); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nvar registries = []struct {\n\turl   string\n\tparse func(io.Writer, io.Reader) error\n}{\n\t{\n\t\t\"http://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml\",\n\t\tparseICMPv6Parameters,\n\t},\n}\n\nfunc geniana() error {\n\tvar bb bytes.Buffer\n\tfmt.Fprintf(&bb, \"// go generate gen.go\\n\")\n\tfmt.Fprintf(&bb, \"// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\\n\\n\")\n\tfmt.Fprintf(&bb, \"package ipv6\\n\\n\")\n\tfor _, r := range registries {\n\t\tresp, err := http.Get(r.url)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\treturn fmt.Errorf(\"got HTTP status code %v for %v\\n\", resp.StatusCode, r.url)\n\t\t}\n\t\tif err := r.parse(&bb, resp.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintf(&bb, \"\\n\")\n\t}\n\tb, err := format.Source(bb.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := ioutil.WriteFile(\"iana.go\", b, 0644); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc parseICMPv6Parameters(w io.Writer, r io.Reader) error {\n\tdec := xml.NewDecoder(r)\n\tvar icp icmpv6Parameters\n\tif err := dec.Decode(&icp); err != nil {\n\t\treturn err\n\t}\n\tprs := icp.escape()\n\tfmt.Fprintf(w, \"// %s, Updated: %s\\n\", icp.Title, icp.Updated)\n\tfmt.Fprintf(w, \"const (\\n\")\n\tfor _, pr := range prs {\n\t\tif pr.Name == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(w, \"ICMPType%s ICMPType = %d\", pr.Name, pr.Value)\n\t\tfmt.Fprintf(w, \"// %s\\n\", pr.OrigName)\n\t}\n\tfmt.Fprintf(w, \")\\n\\n\")\n\tfmt.Fprintf(w, \"// %s, Updated: %s\\n\", icp.Title, icp.Updated)\n\tfmt.Fprintf(w, \"var icmpTypes = map[ICMPType]string{\\n\")\n\tfor _, pr := range prs {\n\t\tif pr.Name == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(w, \"%d: %q,\\n\", pr.Value, strings.ToLower(pr.OrigName))\n\t}\n\tfmt.Fprintf(w, \"}\\n\")\n\treturn nil\n}\n\ntype icmpv6Parameters struct {\n\tXMLName    xml.Name `xml:\"registry\"`\n\tTitle      string   `xml:\"title\"`\n\tUpdated    string   `xml:\"updated\"`\n\tRegistries []struct {\n\t\tTitle   string `xml:\"title\"`\n\t\tRecords []struct {\n\t\t\tValue string `xml:\"value\"`\n\t\t\tName  string `xml:\"name\"`\n\t\t} `xml:\"record\"`\n\t} `xml:\"registry\"`\n}\n\ntype canonICMPv6ParamRecord struct {\n\tOrigName string\n\tName     string\n\tValue    int\n}\n\nfunc (icp *icmpv6Parameters) escape() []canonICMPv6ParamRecord {\n\tid := -1\n\tfor i, r := range icp.Registries {\n\t\tif strings.Contains(r.Title, \"Type\") || strings.Contains(r.Title, \"type\") {\n\t\t\tid = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif id < 0 {\n\t\treturn nil\n\t}\n\tprs := make([]canonICMPv6ParamRecord, len(icp.Registries[id].Records))\n\tsr := strings.NewReplacer(\n\t\t\"Messages\", \"\",\n\t\t\"Message\", \"\",\n\t\t\"ICMP\", \"\",\n\t\t\"+\", \"P\",\n\t\t\"-\", \"\",\n\t\t\"/\", \"\",\n\t\t\".\", \"\",\n\t\t\" \", \"\",\n\t)\n\tfor i, pr := range icp.Registries[id].Records {\n\t\tif strings.Contains(pr.Name, \"Reserved\") ||\n\t\t\tstrings.Contains(pr.Name, \"Unassigned\") ||\n\t\t\tstrings.Contains(pr.Name, \"Deprecated\") ||\n\t\t\tstrings.Contains(pr.Name, \"Experiment\") ||\n\t\t\tstrings.Contains(pr.Name, \"experiment\") {\n\t\t\tcontinue\n\t\t}\n\t\tss := strings.Split(pr.Name, \"\\n\")\n\t\tif len(ss) > 1 {\n\t\t\tprs[i].Name = strings.Join(ss, \" \")\n\t\t} else {\n\t\t\tprs[i].Name = ss[0]\n\t\t}\n\t\ts := strings.TrimSpace(prs[i].Name)\n\t\tprs[i].OrigName = s\n\t\tprs[i].Name = sr.Replace(s)\n\t\tprs[i].Value, _ = strconv.Atoi(pr.Value)\n\t}\n\treturn prs\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/genericopt.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6\n\nimport \"syscall\"\n\n// TrafficClass returns the traffic class field value for outgoing\n// packets.\nfunc (c *genericOpt) TrafficClass() (int, error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoTrafficClass]\n\tif !ok {\n\t\treturn 0, errOpNoSupport\n\t}\n\treturn so.GetInt(c.Conn)\n}\n\n// SetTrafficClass sets the traffic class field value for future\n// outgoing packets.\nfunc (c *genericOpt) SetTrafficClass(tclass int) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoTrafficClass]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\treturn so.SetInt(c.Conn, tclass)\n}\n\n// HopLimit returns the hop limit field value for outgoing packets.\nfunc (c *genericOpt) HopLimit() (int, error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoHopLimit]\n\tif !ok {\n\t\treturn 0, errOpNoSupport\n\t}\n\treturn so.GetInt(c.Conn)\n}\n\n// SetHopLimit sets the hop limit field value for future outgoing\n// packets.\nfunc (c *genericOpt) SetHopLimit(hoplim int) error {\n\tif !c.ok() {\n\t\treturn syscall.EINVAL\n\t}\n\tso, ok := sockOpts[ssoHopLimit]\n\tif !ok {\n\t\treturn errOpNoSupport\n\t}\n\treturn so.SetInt(c.Conn, hoplim)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/header.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6\n\nimport (\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"net\"\n)\n\nconst (\n\tVersion   = 6  // protocol version\n\tHeaderLen = 40 // header length\n)\n\n// A Header represents an IPv6 base header.\ntype Header struct {\n\tVersion      int    // protocol version\n\tTrafficClass int    // traffic class\n\tFlowLabel    int    // flow label\n\tPayloadLen   int    // payload length\n\tNextHeader   int    // next header\n\tHopLimit     int    // hop limit\n\tSrc          net.IP // source address\n\tDst          net.IP // destination address\n}\n\nfunc (h *Header) String() string {\n\tif h == nil {\n\t\treturn \"<nil>\"\n\t}\n\treturn fmt.Sprintf(\"ver=%d tclass=%#x flowlbl=%#x payloadlen=%d nxthdr=%d hoplim=%d src=%v dst=%v\", h.Version, h.TrafficClass, h.FlowLabel, h.PayloadLen, h.NextHeader, h.HopLimit, h.Src, h.Dst)\n}\n\n// ParseHeader parses b as an IPv6 base header.\nfunc ParseHeader(b []byte) (*Header, error) {\n\tif len(b) < HeaderLen {\n\t\treturn nil, errHeaderTooShort\n\t}\n\th := &Header{\n\t\tVersion:      int(b[0]) >> 4,\n\t\tTrafficClass: int(b[0]&0x0f)<<4 | int(b[1])>>4,\n\t\tFlowLabel:    int(b[1]&0x0f)<<16 | int(b[2])<<8 | int(b[3]),\n\t\tPayloadLen:   int(binary.BigEndian.Uint16(b[4:6])),\n\t\tNextHeader:   int(b[6]),\n\t\tHopLimit:     int(b[7]),\n\t}\n\th.Src = make(net.IP, net.IPv6len)\n\tcopy(h.Src, b[8:24])\n\th.Dst = make(net.IP, net.IPv6len)\n\tcopy(h.Dst, b[24:40])\n\treturn h, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/header_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6_test\n\nimport (\n\t\"net\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/ipv6\"\n)\n\nvar (\n\twireHeaderFromKernel = [ipv6.HeaderLen]byte{\n\t\t0x69, 0x8b, 0xee, 0xf1,\n\t\t0xca, 0xfe, 0x2c, 0x01,\n\t\t0x20, 0x01, 0x0d, 0xb8,\n\t\t0x00, 0x01, 0x00, 0x00,\n\t\t0x00, 0x00, 0x00, 0x00,\n\t\t0x00, 0x00, 0x00, 0x01,\n\t\t0x20, 0x01, 0x0d, 0xb8,\n\t\t0x00, 0x02, 0x00, 0x00,\n\t\t0x00, 0x00, 0x00, 0x00,\n\t\t0x00, 0x00, 0x00, 0x01,\n\t}\n\n\ttestHeader = &ipv6.Header{\n\t\tVersion:      ipv6.Version,\n\t\tTrafficClass: iana.DiffServAF43,\n\t\tFlowLabel:    0xbeef1,\n\t\tPayloadLen:   0xcafe,\n\t\tNextHeader:   iana.ProtocolIPv6Frag,\n\t\tHopLimit:     1,\n\t\tSrc:          net.ParseIP(\"2001:db8:1::1\"),\n\t\tDst:          net.ParseIP(\"2001:db8:2::1\"),\n\t}\n)\n\nfunc TestParseHeader(t *testing.T) {\n\th, err := ipv6.ParseHeader(wireHeaderFromKernel[:])\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !reflect.DeepEqual(h, testHeader) {\n\t\tt.Fatalf(\"got %#v; want %#v\", h, testHeader)\n\t}\n\ts := h.String()\n\tif strings.Contains(s, \",\") {\n\t\tt.Fatalf(\"should be space-separated values: %s\", s)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/helper.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6\n\nimport (\n\t\"errors\"\n\t\"net\"\n)\n\nvar (\n\terrMissingAddress  = errors.New(\"missing address\")\n\terrHeaderTooShort  = errors.New(\"header too short\")\n\terrInvalidConnType = errors.New(\"invalid conn type\")\n\terrOpNoSupport     = errors.New(\"operation not supported\")\n\terrNoSuchInterface = errors.New(\"no such interface\")\n)\n\nfunc boolint(b bool) int {\n\tif b {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc netAddrToIP16(a net.Addr) net.IP {\n\tswitch v := a.(type) {\n\tcase *net.UDPAddr:\n\t\tif ip := v.IP.To16(); ip != nil && ip.To4() == nil {\n\t\t\treturn ip\n\t\t}\n\tcase *net.IPAddr:\n\t\tif ip := v.IP.To16(); ip != nil && ip.To4() == nil {\n\t\t\treturn ip\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc opAddr(a net.Addr) net.Addr {\n\tswitch a.(type) {\n\tcase *net.TCPAddr:\n\t\tif a == nil {\n\t\t\treturn nil\n\t\t}\n\tcase *net.UDPAddr:\n\t\tif a == nil {\n\t\t\treturn nil\n\t\t}\n\tcase *net.IPAddr:\n\t\tif a == nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn a\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/iana.go",
    "content": "// go generate gen.go\n// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\npackage ipv6\n\n// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07\nconst (\n\tICMPTypeDestinationUnreachable                ICMPType = 1   // Destination Unreachable\n\tICMPTypePacketTooBig                          ICMPType = 2   // Packet Too Big\n\tICMPTypeTimeExceeded                          ICMPType = 3   // Time Exceeded\n\tICMPTypeParameterProblem                      ICMPType = 4   // Parameter Problem\n\tICMPTypeEchoRequest                           ICMPType = 128 // Echo Request\n\tICMPTypeEchoReply                             ICMPType = 129 // Echo Reply\n\tICMPTypeMulticastListenerQuery                ICMPType = 130 // Multicast Listener Query\n\tICMPTypeMulticastListenerReport               ICMPType = 131 // Multicast Listener Report\n\tICMPTypeMulticastListenerDone                 ICMPType = 132 // Multicast Listener Done\n\tICMPTypeRouterSolicitation                    ICMPType = 133 // Router Solicitation\n\tICMPTypeRouterAdvertisement                   ICMPType = 134 // Router Advertisement\n\tICMPTypeNeighborSolicitation                  ICMPType = 135 // Neighbor Solicitation\n\tICMPTypeNeighborAdvertisement                 ICMPType = 136 // Neighbor Advertisement\n\tICMPTypeRedirect                              ICMPType = 137 // Redirect Message\n\tICMPTypeRouterRenumbering                     ICMPType = 138 // Router Renumbering\n\tICMPTypeNodeInformationQuery                  ICMPType = 139 // ICMP Node Information Query\n\tICMPTypeNodeInformationResponse               ICMPType = 140 // ICMP Node Information Response\n\tICMPTypeInverseNeighborDiscoverySolicitation  ICMPType = 141 // Inverse Neighbor Discovery Solicitation Message\n\tICMPTypeInverseNeighborDiscoveryAdvertisement ICMPType = 142 // Inverse Neighbor Discovery Advertisement Message\n\tICMPTypeVersion2MulticastListenerReport       ICMPType = 143 // Version 2 Multicast Listener Report\n\tICMPTypeHomeAgentAddressDiscoveryRequest      ICMPType = 144 // Home Agent Address Discovery Request Message\n\tICMPTypeHomeAgentAddressDiscoveryReply        ICMPType = 145 // Home Agent Address Discovery Reply Message\n\tICMPTypeMobilePrefixSolicitation              ICMPType = 146 // Mobile Prefix Solicitation\n\tICMPTypeMobilePrefixAdvertisement             ICMPType = 147 // Mobile Prefix Advertisement\n\tICMPTypeCertificationPathSolicitation         ICMPType = 148 // Certification Path Solicitation Message\n\tICMPTypeCertificationPathAdvertisement        ICMPType = 149 // Certification Path Advertisement Message\n\tICMPTypeMulticastRouterAdvertisement          ICMPType = 151 // Multicast Router Advertisement\n\tICMPTypeMulticastRouterSolicitation           ICMPType = 152 // Multicast Router Solicitation\n\tICMPTypeMulticastRouterTermination            ICMPType = 153 // Multicast Router Termination\n\tICMPTypeFMIPv6                                ICMPType = 154 // FMIPv6 Messages\n\tICMPTypeRPLControl                            ICMPType = 155 // RPL Control Message\n\tICMPTypeILNPv6LocatorUpdate                   ICMPType = 156 // ILNPv6 Locator Update Message\n\tICMPTypeDuplicateAddressRequest               ICMPType = 157 // Duplicate Address Request\n\tICMPTypeDuplicateAddressConfirmation          ICMPType = 158 // Duplicate Address Confirmation\n\tICMPTypeMPLControl                            ICMPType = 159 // MPL Control Message\n)\n\n// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07\nvar icmpTypes = map[ICMPType]string{\n\t1:   \"destination unreachable\",\n\t2:   \"packet too big\",\n\t3:   \"time exceeded\",\n\t4:   \"parameter problem\",\n\t128: \"echo request\",\n\t129: \"echo reply\",\n\t130: \"multicast listener query\",\n\t131: \"multicast listener report\",\n\t132: \"multicast listener done\",\n\t133: \"router solicitation\",\n\t134: \"router advertisement\",\n\t135: \"neighbor solicitation\",\n\t136: \"neighbor advertisement\",\n\t137: \"redirect message\",\n\t138: \"router renumbering\",\n\t139: \"icmp node information query\",\n\t140: \"icmp node information response\",\n\t141: \"inverse neighbor discovery solicitation message\",\n\t142: \"inverse neighbor discovery advertisement message\",\n\t143: \"version 2 multicast listener report\",\n\t144: \"home agent address discovery request message\",\n\t145: \"home agent address discovery reply message\",\n\t146: \"mobile prefix solicitation\",\n\t147: \"mobile prefix advertisement\",\n\t148: \"certification path solicitation message\",\n\t149: \"certification path advertisement message\",\n\t151: \"multicast router advertisement\",\n\t152: \"multicast router solicitation\",\n\t153: \"multicast router termination\",\n\t154: \"fmipv6 messages\",\n\t155: \"rpl control message\",\n\t156: \"ilnpv6 locator update message\",\n\t157: \"duplicate address request\",\n\t158: \"duplicate address confirmation\",\n\t159: \"mpl control message\",\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/icmp.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6\n\nimport \"golang.org/x/net/internal/iana\"\n\n// BUG(mikio): On Windows, methods related to ICMPFilter are not\n// implemented.\n\n// An ICMPType represents a type of ICMP message.\ntype ICMPType int\n\nfunc (typ ICMPType) String() string {\n\ts, ok := icmpTypes[typ]\n\tif !ok {\n\t\treturn \"<nil>\"\n\t}\n\treturn s\n}\n\n// Protocol returns the ICMPv6 protocol number.\nfunc (typ ICMPType) Protocol() int {\n\treturn iana.ProtocolIPv6ICMP\n}\n\n// An ICMPFilter represents an ICMP message filter for incoming\n// packets. The filter belongs to a packet delivery path on a host and\n// it cannot interact with forwarding packets or tunnel-outer packets.\n//\n// Note: RFC 8200 defines a reasonable role model. A node means a\n// device that implements IP. A router means a node that forwards IP\n// packets not explicitly addressed to itself, and a host means a node\n// that is not a router.\ntype ICMPFilter struct {\n\ticmpv6Filter\n}\n\n// Accept accepts incoming ICMP packets including the type field value\n// typ.\nfunc (f *ICMPFilter) Accept(typ ICMPType) {\n\tf.accept(typ)\n}\n\n// Block blocks incoming ICMP packets including the type field value\n// typ.\nfunc (f *ICMPFilter) Block(typ ICMPType) {\n\tf.block(typ)\n}\n\n// SetAll sets the filter action to the filter.\nfunc (f *ICMPFilter) SetAll(block bool) {\n\tf.setAll(block)\n}\n\n// WillBlock reports whether the ICMP type will be blocked.\nfunc (f *ICMPFilter) WillBlock(typ ICMPType) bool {\n\treturn f.willBlock(typ)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/icmp_bsd.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd netbsd openbsd\n\npackage ipv6\n\nfunc (f *icmpv6Filter) accept(typ ICMPType) {\n\tf.Filt[typ>>5] |= 1 << (uint32(typ) & 31)\n}\n\nfunc (f *icmpv6Filter) block(typ ICMPType) {\n\tf.Filt[typ>>5] &^= 1 << (uint32(typ) & 31)\n}\n\nfunc (f *icmpv6Filter) setAll(block bool) {\n\tfor i := range f.Filt {\n\t\tif block {\n\t\t\tf.Filt[i] = 0\n\t\t} else {\n\t\t\tf.Filt[i] = 1<<32 - 1\n\t\t}\n\t}\n}\n\nfunc (f *icmpv6Filter) willBlock(typ ICMPType) bool {\n\treturn f.Filt[typ>>5]&(1<<(uint32(typ)&31)) == 0\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/icmp_linux.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6\n\nfunc (f *icmpv6Filter) accept(typ ICMPType) {\n\tf.Data[typ>>5] &^= 1 << (uint32(typ) & 31)\n}\n\nfunc (f *icmpv6Filter) block(typ ICMPType) {\n\tf.Data[typ>>5] |= 1 << (uint32(typ) & 31)\n}\n\nfunc (f *icmpv6Filter) setAll(block bool) {\n\tfor i := range f.Data {\n\t\tif block {\n\t\t\tf.Data[i] = 1<<32 - 1\n\t\t} else {\n\t\t\tf.Data[i] = 0\n\t\t}\n\t}\n}\n\nfunc (f *icmpv6Filter) willBlock(typ ICMPType) bool {\n\treturn f.Data[typ>>5]&(1<<(uint32(typ)&31)) != 0\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/icmp_solaris.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6\n\nfunc (f *icmpv6Filter) accept(typ ICMPType) {\n\tf.X__icmp6_filt[typ>>5] |= 1 << (uint32(typ) & 31)\n}\n\nfunc (f *icmpv6Filter) block(typ ICMPType) {\n\tf.X__icmp6_filt[typ>>5] &^= 1 << (uint32(typ) & 31)\n}\n\nfunc (f *icmpv6Filter) setAll(block bool) {\n\tfor i := range f.X__icmp6_filt {\n\t\tif block {\n\t\t\tf.X__icmp6_filt[i] = 0\n\t\t} else {\n\t\t\tf.X__icmp6_filt[i] = 1<<32 - 1\n\t\t}\n\t}\n}\n\nfunc (f *icmpv6Filter) willBlock(typ ICMPType) bool {\n\treturn f.X__icmp6_filt[typ>>5]&(1<<(uint32(typ)&31)) == 0\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/icmp_stub.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows\n\npackage ipv6\n\ntype icmpv6Filter struct {\n}\n\nfunc (f *icmpv6Filter) accept(typ ICMPType) {\n}\n\nfunc (f *icmpv6Filter) block(typ ICMPType) {\n}\n\nfunc (f *icmpv6Filter) setAll(block bool) {\n}\n\nfunc (f *icmpv6Filter) willBlock(typ ICMPType) bool {\n\treturn false\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/icmp_test.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6_test\n\nimport (\n\t\"net\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv6\"\n)\n\nvar icmpStringTests = []struct {\n\tin  ipv6.ICMPType\n\tout string\n}{\n\t{ipv6.ICMPTypeDestinationUnreachable, \"destination unreachable\"},\n\n\t{256, \"<nil>\"},\n}\n\nfunc TestICMPString(t *testing.T) {\n\tfor _, tt := range icmpStringTests {\n\t\ts := tt.in.String()\n\t\tif s != tt.out {\n\t\t\tt.Errorf(\"got %s; want %s\", s, tt.out)\n\t\t}\n\t}\n}\n\nfunc TestICMPFilter(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\n\tvar f ipv6.ICMPFilter\n\tfor _, toggle := range []bool{false, true} {\n\t\tf.SetAll(toggle)\n\t\tfor _, typ := range []ipv6.ICMPType{\n\t\t\tipv6.ICMPTypeDestinationUnreachable,\n\t\t\tipv6.ICMPTypeEchoReply,\n\t\t\tipv6.ICMPTypeNeighborSolicitation,\n\t\t\tipv6.ICMPTypeDuplicateAddressConfirmation,\n\t\t} {\n\t\t\tf.Accept(typ)\n\t\t\tif f.WillBlock(typ) {\n\t\t\t\tt.Errorf(\"ipv6.ICMPFilter.Set(%v, false) failed\", typ)\n\t\t\t}\n\t\t\tf.Block(typ)\n\t\t\tif !f.WillBlock(typ) {\n\t\t\t\tt.Errorf(\"ipv6.ICMPFilter.Set(%v, true) failed\", typ)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestSetICMPFilter(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif !supportsIPv6 {\n\t\tt.Skip(\"ipv6 is not supported\")\n\t}\n\tif m, ok := nettest.SupportsRawIPSocket(); !ok {\n\t\tt.Skip(m)\n\t}\n\n\tc, err := net.ListenPacket(\"ip6:ipv6-icmp\", \"::1\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\tp := ipv6.NewPacketConn(c)\n\n\tvar f ipv6.ICMPFilter\n\tf.SetAll(true)\n\tf.Accept(ipv6.ICMPTypeEchoRequest)\n\tf.Accept(ipv6.ICMPTypeEchoReply)\n\tif err := p.SetICMPFilter(&f); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tkf, err := p.ICMPFilter()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !reflect.DeepEqual(kf, &f) {\n\t\tt.Fatalf(\"got %#v; want %#v\", kf, f)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/icmp_windows.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6\n\nfunc (f *icmpv6Filter) accept(typ ICMPType) {\n\t// TODO(mikio): implement this\n}\n\nfunc (f *icmpv6Filter) block(typ ICMPType) {\n\t// TODO(mikio): implement this\n}\n\nfunc (f *icmpv6Filter) setAll(block bool) {\n\t// TODO(mikio): implement this\n}\n\nfunc (f *icmpv6Filter) willBlock(typ ICMPType) bool {\n\t// TODO(mikio): implement this\n\treturn false\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/mocktransponder_test.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6_test\n\nimport (\n\t\"net\"\n\t\"testing\"\n)\n\nfunc connector(t *testing.T, network, addr string, done chan<- bool) {\n\tdefer func() { done <- true }()\n\n\tc, err := net.Dial(network, addr)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tc.Close()\n}\n\nfunc acceptor(t *testing.T, ln net.Listener, done chan<- bool) {\n\tdefer func() { done <- true }()\n\n\tc, err := ln.Accept()\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tc.Close()\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/multicast_test.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6_test\n\nimport (\n\t\"bytes\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org/x/net/icmp\"\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv6\"\n)\n\nvar packetConnReadWriteMulticastUDPTests = []struct {\n\taddr     string\n\tgrp, src *net.UDPAddr\n}{\n\t{\"[ff02::]:0\", &net.UDPAddr{IP: net.ParseIP(\"ff02::114\")}, nil}, // see RFC 4727\n\n\t{\"[ff30::8000:0]:0\", &net.UDPAddr{IP: net.ParseIP(\"ff30::8000:1\")}, &net.UDPAddr{IP: net.IPv6loopback}}, // see RFC 5771\n}\n\nfunc TestPacketConnReadWriteMulticastUDP(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif !supportsIPv6 {\n\t\tt.Skip(\"ipv6 is not supported\")\n\t}\n\tif !nettest.SupportsIPv6MulticastDeliveryOnLoopback() {\n\t\tt.Skipf(\"multicast delivery doesn't work correctly on %s\", runtime.GOOS)\n\t}\n\tifi := nettest.RoutedInterface(\"ip6\", net.FlagUp|net.FlagMulticast|net.FlagLoopback)\n\tif ifi == nil {\n\t\tt.Skipf(\"not available on %s\", runtime.GOOS)\n\t}\n\n\tfor _, tt := range packetConnReadWriteMulticastUDPTests {\n\t\tc, err := net.ListenPacket(\"udp6\", tt.addr)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c.Close()\n\n\t\tgrp := *tt.grp\n\t\tgrp.Port = c.LocalAddr().(*net.UDPAddr).Port\n\t\tp := ipv6.NewPacketConn(c)\n\t\tdefer p.Close()\n\t\tif tt.src == nil {\n\t\t\tif err := p.JoinGroup(ifi, &grp); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tdefer p.LeaveGroup(ifi, &grp)\n\t\t} else {\n\t\t\tif err := p.JoinSourceSpecificGroup(ifi, &grp, tt.src); err != nil {\n\t\t\t\tswitch runtime.GOOS {\n\t\t\t\tcase \"freebsd\", \"linux\":\n\t\t\t\tdefault: // platforms that don't support MLDv2 fail here\n\t\t\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tdefer p.LeaveSourceSpecificGroup(ifi, &grp, tt.src)\n\t\t}\n\t\tif err := p.SetMulticastInterface(ifi); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif _, err := p.MulticastInterface(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := p.SetMulticastLoopback(true); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif _, err := p.MulticastLoopback(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tcm := ipv6.ControlMessage{\n\t\t\tTrafficClass: iana.DiffServAF11 | iana.CongestionExperienced,\n\t\t\tSrc:          net.IPv6loopback,\n\t\t\tIfIndex:      ifi.Index,\n\t\t}\n\t\tcf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU\n\t\twb := []byte(\"HELLO-R-U-THERE\")\n\n\t\tfor i, toggle := range []bool{true, false, true} {\n\t\t\tif err := p.SetControlMessage(cf, toggle); err != nil {\n\t\t\t\tif nettest.ProtocolNotSupported(err) {\n\t\t\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tcm.HopLimit = i + 1\n\t\t\tif n, err := p.WriteTo(wb, &cm, &grp); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t} else if n != len(wb) {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\trb := make([]byte, 128)\n\t\t\tif n, _, _, err := p.ReadFrom(rb); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t} else if !bytes.Equal(rb[:n], wb) {\n\t\t\t\tt.Fatalf(\"got %v; want %v\", rb[:n], wb)\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar packetConnReadWriteMulticastICMPTests = []struct {\n\tgrp, src *net.IPAddr\n}{\n\t{&net.IPAddr{IP: net.ParseIP(\"ff02::114\")}, nil}, // see RFC 4727\n\n\t{&net.IPAddr{IP: net.ParseIP(\"ff30::8000:1\")}, &net.IPAddr{IP: net.IPv6loopback}}, // see RFC 5771\n}\n\nfunc TestPacketConnReadWriteMulticastICMP(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif !supportsIPv6 {\n\t\tt.Skip(\"ipv6 is not supported\")\n\t}\n\tif !nettest.SupportsIPv6MulticastDeliveryOnLoopback() {\n\t\tt.Skipf(\"multicast delivery doesn't work correctly on %s\", runtime.GOOS)\n\t}\n\tif m, ok := nettest.SupportsRawIPSocket(); !ok {\n\t\tt.Skip(m)\n\t}\n\tifi := nettest.RoutedInterface(\"ip6\", net.FlagUp|net.FlagMulticast|net.FlagLoopback)\n\tif ifi == nil {\n\t\tt.Skipf(\"not available on %s\", runtime.GOOS)\n\t}\n\n\tfor _, tt := range packetConnReadWriteMulticastICMPTests {\n\t\tc, err := net.ListenPacket(\"ip6:ipv6-icmp\", \"::\")\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c.Close()\n\n\t\tpshicmp := icmp.IPv6PseudoHeader(c.LocalAddr().(*net.IPAddr).IP, tt.grp.IP)\n\t\tp := ipv6.NewPacketConn(c)\n\t\tdefer p.Close()\n\t\tif tt.src == nil {\n\t\t\tif err := p.JoinGroup(ifi, tt.grp); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tdefer p.LeaveGroup(ifi, tt.grp)\n\t\t} else {\n\t\t\tif err := p.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil {\n\t\t\t\tswitch runtime.GOOS {\n\t\t\t\tcase \"freebsd\", \"linux\":\n\t\t\t\tdefault: // platforms that don't support MLDv2 fail here\n\t\t\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tdefer p.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src)\n\t\t}\n\t\tif err := p.SetMulticastInterface(ifi); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif _, err := p.MulticastInterface(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := p.SetMulticastLoopback(true); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif _, err := p.MulticastLoopback(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tcm := ipv6.ControlMessage{\n\t\t\tTrafficClass: iana.DiffServAF11 | iana.CongestionExperienced,\n\t\t\tSrc:          net.IPv6loopback,\n\t\t\tIfIndex:      ifi.Index,\n\t\t}\n\t\tcf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU\n\n\t\tvar f ipv6.ICMPFilter\n\t\tf.SetAll(true)\n\t\tf.Accept(ipv6.ICMPTypeEchoReply)\n\t\tif err := p.SetICMPFilter(&f); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tvar psh []byte\n\t\tfor i, toggle := range []bool{true, false, true} {\n\t\t\tif toggle {\n\t\t\t\tpsh = nil\n\t\t\t\tif err := p.SetChecksum(true, 2); err != nil {\n\t\t\t\t\t// Solaris never allows to\n\t\t\t\t\t// modify ICMP properties.\n\t\t\t\t\tif runtime.GOOS != \"solaris\" {\n\t\t\t\t\t\tt.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tpsh = pshicmp\n\t\t\t\t// Some platforms never allow to\n\t\t\t\t// disable the kernel checksum\n\t\t\t\t// processing.\n\t\t\t\tp.SetChecksum(false, -1)\n\t\t\t}\n\t\t\twb, err := (&icmp.Message{\n\t\t\t\tType: ipv6.ICMPTypeEchoRequest, Code: 0,\n\t\t\t\tBody: &icmp.Echo{\n\t\t\t\t\tID: os.Getpid() & 0xffff, Seq: i + 1,\n\t\t\t\t\tData: []byte(\"HELLO-R-U-THERE\"),\n\t\t\t\t},\n\t\t\t}).Marshal(psh)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif err := p.SetControlMessage(cf, toggle); err != nil {\n\t\t\t\tif nettest.ProtocolNotSupported(err) {\n\t\t\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tcm.HopLimit = i + 1\n\t\t\tif n, err := p.WriteTo(wb, &cm, tt.grp); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t} else if n != len(wb) {\n\t\t\t\tt.Fatalf(\"got %v; want %v\", n, len(wb))\n\t\t\t}\n\t\t\trb := make([]byte, 128)\n\t\t\tif n, _, _, err := p.ReadFrom(rb); err != nil {\n\t\t\t\tswitch runtime.GOOS {\n\t\t\t\tcase \"darwin\": // older darwin kernels have some limitation on receiving icmp packet through raw socket\n\t\t\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tt.Fatal(err)\n\t\t\t} else {\n\t\t\t\tif m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t} else if m.Type != ipv6.ICMPTypeEchoReply || m.Code != 0 {\n\t\t\t\t\tt.Fatalf(\"got type=%v, code=%v; want type=%v, code=%v\", m.Type, m.Code, ipv6.ICMPTypeEchoReply, 0)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/multicastlistener_test.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6_test\n\nimport (\n\t\"net\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv6\"\n)\n\nvar udpMultipleGroupListenerTests = []net.Addr{\n\t&net.UDPAddr{IP: net.ParseIP(\"ff02::114\")}, // see RFC 4727\n\t&net.UDPAddr{IP: net.ParseIP(\"ff02::1:114\")},\n\t&net.UDPAddr{IP: net.ParseIP(\"ff02::2:114\")},\n}\n\nfunc TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif !supportsIPv6 {\n\t\tt.Skip(\"ipv6 is not supported\")\n\t}\n\n\tfor _, gaddr := range udpMultipleGroupListenerTests {\n\t\tc, err := net.ListenPacket(\"udp6\", \"[::]:0\") // wildcard address with non-reusable port\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c.Close()\n\n\t\tp := ipv6.NewPacketConn(c)\n\t\tvar mift []*net.Interface\n\n\t\tift, err := net.Interfaces()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfor i, ifi := range ift {\n\t\t\tif _, ok := nettest.IsMulticastCapable(\"ip6\", &ifi); !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := p.JoinGroup(&ifi, gaddr); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tmift = append(mift, &ift[i])\n\t\t}\n\t\tfor _, ifi := range mift {\n\t\t\tif err := p.LeaveGroup(ifi, gaddr); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif !supportsIPv6 {\n\t\tt.Skip(\"ipv6 is not supported\")\n\t}\n\n\tfor _, gaddr := range udpMultipleGroupListenerTests {\n\t\tc1, err := net.ListenPacket(\"udp6\", \"[ff02::]:0\") // wildcard address with reusable port\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c1.Close()\n\t\t_, port, err := net.SplitHostPort(c1.LocalAddr().String())\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tc2, err := net.ListenPacket(\"udp6\", net.JoinHostPort(\"ff02::\", port)) // wildcard address with reusable port\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c2.Close()\n\n\t\tvar ps [2]*ipv6.PacketConn\n\t\tps[0] = ipv6.NewPacketConn(c1)\n\t\tps[1] = ipv6.NewPacketConn(c2)\n\t\tvar mift []*net.Interface\n\n\t\tift, err := net.Interfaces()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfor i, ifi := range ift {\n\t\t\tif _, ok := nettest.IsMulticastCapable(\"ip6\", &ifi); !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, p := range ps {\n\t\t\t\tif err := p.JoinGroup(&ifi, gaddr); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tmift = append(mift, &ift[i])\n\t\t}\n\t\tfor _, ifi := range mift {\n\t\t\tfor _, p := range ps {\n\t\t\t\tif err := p.LeaveGroup(ifi, gaddr); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif !supportsIPv6 {\n\t\tt.Skip(\"ipv6 is not supported\")\n\t}\n\n\tgaddr := net.IPAddr{IP: net.ParseIP(\"ff02::114\")} // see RFC 4727\n\ttype ml struct {\n\t\tc   *ipv6.PacketConn\n\t\tifi *net.Interface\n\t}\n\tvar mlt []*ml\n\n\tift, err := net.Interfaces()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tport := \"0\"\n\tfor i, ifi := range ift {\n\t\tip, ok := nettest.IsMulticastCapable(\"ip6\", &ifi)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tc, err := net.ListenPacket(\"udp6\", net.JoinHostPort(ip.String()+\"%\"+ifi.Name, port)) // unicast address with non-reusable port\n\t\tif err != nil {\n\t\t\t// The listen may fail when the serivce is\n\t\t\t// already in use, but it's fine because the\n\t\t\t// purpose of this is not to test the\n\t\t\t// bookkeeping of IP control block inside the\n\t\t\t// kernel.\n\t\t\tt.Log(err)\n\t\t\tcontinue\n\t\t}\n\t\tdefer c.Close()\n\t\tif port == \"0\" {\n\t\t\t_, port, err = net.SplitHostPort(c.LocalAddr().String())\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t\tp := ipv6.NewPacketConn(c)\n\t\tif err := p.JoinGroup(&ifi, &gaddr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tmlt = append(mlt, &ml{p, &ift[i]})\n\t}\n\tfor _, m := range mlt {\n\t\tif err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestIPSinglePacketConnWithSingleGroupListener(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif !supportsIPv6 {\n\t\tt.Skip(\"ipv6 is not supported\")\n\t}\n\tif m, ok := nettest.SupportsRawIPSocket(); !ok {\n\t\tt.Skip(m)\n\t}\n\n\tc, err := net.ListenPacket(\"ip6:ipv6-icmp\", \"::\") // wildcard address\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\tp := ipv6.NewPacketConn(c)\n\tgaddr := net.IPAddr{IP: net.ParseIP(\"ff02::114\")} // see RFC 4727\n\tvar mift []*net.Interface\n\n\tift, err := net.Interfaces()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor i, ifi := range ift {\n\t\tif _, ok := nettest.IsMulticastCapable(\"ip6\", &ifi); !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif err := p.JoinGroup(&ifi, &gaddr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tmift = append(mift, &ift[i])\n\t}\n\tfor _, ifi := range mift {\n\t\tif err := p.LeaveGroup(ifi, &gaddr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestIPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"darwin\", \"dragonfly\", \"openbsd\": // platforms that return fe80::1%lo0: bind: can't assign requested address\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif !supportsIPv6 {\n\t\tt.Skip(\"ipv6 is not supported\")\n\t}\n\tif m, ok := nettest.SupportsRawIPSocket(); !ok {\n\t\tt.Skip(m)\n\t}\n\n\tgaddr := net.IPAddr{IP: net.ParseIP(\"ff02::114\")} // see RFC 4727\n\ttype ml struct {\n\t\tc   *ipv6.PacketConn\n\t\tifi *net.Interface\n\t}\n\tvar mlt []*ml\n\n\tift, err := net.Interfaces()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor i, ifi := range ift {\n\t\tip, ok := nettest.IsMulticastCapable(\"ip6\", &ifi)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tc, err := net.ListenPacket(\"ip6:ipv6-icmp\", ip.String()+\"%\"+ifi.Name) // unicast address\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c.Close()\n\t\tp := ipv6.NewPacketConn(c)\n\t\tif err := p.JoinGroup(&ifi, &gaddr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tmlt = append(mlt, &ml{p, &ift[i]})\n\t}\n\tfor _, m := range mlt {\n\t\tif err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/multicastsockopt_test.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6_test\n\nimport (\n\t\"net\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv6\"\n)\n\nvar packetConnMulticastSocketOptionTests = []struct {\n\tnet, proto, addr string\n\tgrp, src         net.Addr\n}{\n\t{\"udp6\", \"\", \"[ff02::]:0\", &net.UDPAddr{IP: net.ParseIP(\"ff02::114\")}, nil}, // see RFC 4727\n\t{\"ip6\", \":ipv6-icmp\", \"::\", &net.IPAddr{IP: net.ParseIP(\"ff02::115\")}, nil}, // see RFC 4727\n\n\t{\"udp6\", \"\", \"[ff30::8000:0]:0\", &net.UDPAddr{IP: net.ParseIP(\"ff30::8000:1\")}, &net.UDPAddr{IP: net.IPv6loopback}}, // see RFC 5771\n\t{\"ip6\", \":ipv6-icmp\", \"::\", &net.IPAddr{IP: net.ParseIP(\"ff30::8000:2\")}, &net.IPAddr{IP: net.IPv6loopback}},        // see RFC 5771\n}\n\nfunc TestPacketConnMulticastSocketOptions(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif !supportsIPv6 {\n\t\tt.Skip(\"ipv6 is not supported\")\n\t}\n\tifi := nettest.RoutedInterface(\"ip6\", net.FlagUp|net.FlagMulticast|net.FlagLoopback)\n\tif ifi == nil {\n\t\tt.Skipf(\"not available on %s\", runtime.GOOS)\n\t}\n\n\tm, ok := nettest.SupportsRawIPSocket()\n\tfor _, tt := range packetConnMulticastSocketOptionTests {\n\t\tif tt.net == \"ip6\" && !ok {\n\t\t\tt.Log(m)\n\t\t\tcontinue\n\t\t}\n\t\tc, err := net.ListenPacket(tt.net+tt.proto, tt.addr)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c.Close()\n\t\tp := ipv6.NewPacketConn(c)\n\t\tdefer p.Close()\n\n\t\tif tt.src == nil {\n\t\t\ttestMulticastSocketOptions(t, p, ifi, tt.grp)\n\t\t} else {\n\t\t\ttestSourceSpecificMulticastSocketOptions(t, p, ifi, tt.grp, tt.src)\n\t\t}\n\t}\n}\n\ntype testIPv6MulticastConn interface {\n\tMulticastHopLimit() (int, error)\n\tSetMulticastHopLimit(ttl int) error\n\tMulticastLoopback() (bool, error)\n\tSetMulticastLoopback(bool) error\n\tJoinGroup(*net.Interface, net.Addr) error\n\tLeaveGroup(*net.Interface, net.Addr) error\n\tJoinSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error\n\tLeaveSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error\n\tExcludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error\n\tIncludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error\n}\n\nfunc testMulticastSocketOptions(t *testing.T, c testIPv6MulticastConn, ifi *net.Interface, grp net.Addr) {\n\tconst hoplim = 255\n\tif err := c.SetMulticastHopLimit(hoplim); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif v, err := c.MulticastHopLimit(); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t} else if v != hoplim {\n\t\tt.Errorf(\"got %v; want %v\", v, hoplim)\n\t\treturn\n\t}\n\n\tfor _, toggle := range []bool{true, false} {\n\t\tif err := c.SetMulticastLoopback(toggle); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif v, err := c.MulticastLoopback(); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t} else if v != toggle {\n\t\t\tt.Errorf(\"got %v; want %v\", v, toggle)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif err := c.JoinGroup(ifi, grp); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif err := c.LeaveGroup(ifi, grp); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n}\n\nfunc testSourceSpecificMulticastSocketOptions(t *testing.T, c testIPv6MulticastConn, ifi *net.Interface, grp, src net.Addr) {\n\t// MCAST_JOIN_GROUP -> MCAST_BLOCK_SOURCE -> MCAST_UNBLOCK_SOURCE -> MCAST_LEAVE_GROUP\n\tif err := c.JoinGroup(ifi, grp); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif err := c.ExcludeSourceSpecificGroup(ifi, grp, src); err != nil {\n\t\tswitch runtime.GOOS {\n\t\tcase \"freebsd\", \"linux\":\n\t\tdefault: // platforms that don't support MLDv2 fail here\n\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\treturn\n\t\t}\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif err := c.IncludeSourceSpecificGroup(ifi, grp, src); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif err := c.LeaveGroup(ifi, grp); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\t// MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_SOURCE_GROUP\n\tif err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif err := c.LeaveSourceSpecificGroup(ifi, grp, src); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\t// MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_GROUP\n\tif err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif err := c.LeaveGroup(ifi, grp); err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/payload.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6\n\nimport (\n\t\"net\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\n// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo\n// methods of PacketConn is not implemented.\n\n// A payloadHandler represents the IPv6 datagram payload handler.\ntype payloadHandler struct {\n\tnet.PacketConn\n\t*socket.Conn\n\trawOpt\n}\n\nfunc (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil }\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/payload_cmsg.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !nacl,!plan9,!windows\n\npackage ipv6\n\nimport (\n\t\"net\"\n\t\"syscall\"\n)\n\n// ReadFrom reads a payload of the received IPv6 datagram, from the\n// endpoint c, copying the payload into b. It returns the number of\n// bytes copied into b, the control message cm and the source address\n// src of the received datagram.\nfunc (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) {\n\tif !c.ok() {\n\t\treturn 0, nil, nil, syscall.EINVAL\n\t}\n\treturn c.readFrom(b)\n}\n\n// WriteTo writes a payload of the IPv6 datagram, to the destination\n// address dst through the endpoint c, copying the payload from b. It\n// returns the number of bytes written. The control message cm allows\n// the IPv6 header fields and the datagram path to be specified. The\n// cm may be nil if control of the outgoing datagram is not required.\nfunc (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\treturn c.writeTo(b, cm, dst)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !go1.9\n// +build !nacl,!plan9,!windows\n\npackage ipv6\n\nimport \"net\"\n\nfunc (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) {\n\tc.rawOpt.RLock()\n\toob := NewControlMessage(c.rawOpt.cflags)\n\tc.rawOpt.RUnlock()\n\tvar nn int\n\tswitch c := c.PacketConn.(type) {\n\tcase *net.UDPConn:\n\t\tif n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil {\n\t\t\treturn 0, nil, nil, err\n\t\t}\n\tcase *net.IPConn:\n\t\tif n, nn, _, src, err = c.ReadMsgIP(b, oob); err != nil {\n\t\t\treturn 0, nil, nil, err\n\t\t}\n\tdefault:\n\t\treturn 0, nil, nil, &net.OpError{Op: \"read\", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType}\n\t}\n\tif nn > 0 {\n\t\tcm = new(ControlMessage)\n\t\tif err = cm.Parse(oob[:nn]); err != nil {\n\t\t\treturn 0, nil, nil, &net.OpError{Op: \"read\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err}\n\t\t}\n\t}\n\tif cm != nil {\n\t\tcm.Src = netAddrToIP16(src)\n\t}\n\treturn\n}\n\nfunc (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) {\n\toob := cm.Marshal()\n\tif dst == nil {\n\t\treturn 0, &net.OpError{Op: \"write\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress}\n\t}\n\tswitch c := c.PacketConn.(type) {\n\tcase *net.UDPConn:\n\t\tn, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr))\n\tcase *net.IPConn:\n\t\tn, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr))\n\tdefault:\n\t\treturn 0, &net.OpError{Op: \"write\", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType}\n\t}\n\treturn\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.9\n// +build !nacl,!plan9,!windows\n\npackage ipv6\n\nimport (\n\t\"net\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) {\n\tc.rawOpt.RLock()\n\tm := socket.Message{\n\t\tBuffers: [][]byte{b},\n\t\tOOB:     NewControlMessage(c.rawOpt.cflags),\n\t}\n\tc.rawOpt.RUnlock()\n\tswitch c.PacketConn.(type) {\n\tcase *net.UDPConn:\n\t\tif err := c.RecvMsg(&m, 0); err != nil {\n\t\t\treturn 0, nil, nil, &net.OpError{Op: \"read\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err}\n\t\t}\n\tcase *net.IPConn:\n\t\tif err := c.RecvMsg(&m, 0); err != nil {\n\t\t\treturn 0, nil, nil, &net.OpError{Op: \"read\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err}\n\t\t}\n\tdefault:\n\t\treturn 0, nil, nil, &net.OpError{Op: \"read\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType}\n\t}\n\tvar cm *ControlMessage\n\tif m.NN > 0 {\n\t\tcm = new(ControlMessage)\n\t\tif err := cm.Parse(m.OOB[:m.NN]); err != nil {\n\t\t\treturn 0, nil, nil, &net.OpError{Op: \"read\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err}\n\t\t}\n\t\tcm.Src = netAddrToIP16(m.Addr)\n\t}\n\treturn m.N, cm, m.Addr, nil\n}\n\nfunc (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) {\n\tm := socket.Message{\n\t\tBuffers: [][]byte{b},\n\t\tOOB:     cm.Marshal(),\n\t\tAddr:    dst,\n\t}\n\terr := c.SendMsg(&m, 0)\n\tif err != nil {\n\t\terr = &net.OpError{Op: \"write\", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err}\n\t}\n\treturn m.N, err\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/payload_nocmsg.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build nacl plan9 windows\n\npackage ipv6\n\nimport (\n\t\"net\"\n\t\"syscall\"\n)\n\n// ReadFrom reads a payload of the received IPv6 datagram, from the\n// endpoint c, copying the payload into b. It returns the number of\n// bytes copied into b, the control message cm and the source address\n// src of the received datagram.\nfunc (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) {\n\tif !c.ok() {\n\t\treturn 0, nil, nil, syscall.EINVAL\n\t}\n\tif n, src, err = c.PacketConn.ReadFrom(b); err != nil {\n\t\treturn 0, nil, nil, err\n\t}\n\treturn\n}\n\n// WriteTo writes a payload of the IPv6 datagram, to the destination\n// address dst through the endpoint c, copying the payload from b. It\n// returns the number of bytes written. The control message cm allows\n// the IPv6 header fields and the datagram path to be specified. The\n// cm may be nil if control of the outgoing datagram is not required.\nfunc (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tif dst == nil {\n\t\treturn 0, errMissingAddress\n\t}\n\treturn c.PacketConn.WriteTo(b, dst)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !go1.9\n\npackage ipv6_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv6\"\n)\n\nfunc BenchmarkPacketConnReadWriteUnicast(b *testing.B) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tb.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\n\tpayload := []byte(\"HELLO-R-U-THERE\")\n\tiph := []byte{\n\t\t0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01,\n\t\t0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00,\n\t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,\n\t\t0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00,\n\t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,\n\t}\n\tgreh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00}\n\tdatagram := append(greh, append(iph, payload...)...)\n\tbb := make([]byte, 128)\n\tcm := ipv6.ControlMessage{\n\t\tTrafficClass: iana.DiffServAF11 | iana.CongestionExperienced,\n\t\tHopLimit:     1,\n\t\tSrc:          net.IPv6loopback,\n\t}\n\tif ifi := nettest.RoutedInterface(\"ip6\", net.FlagUp|net.FlagLoopback); ifi != nil {\n\t\tcm.IfIndex = ifi.Index\n\t}\n\n\tb.Run(\"UDP\", func(b *testing.B) {\n\t\tc, err := nettest.NewLocalPacketListener(\"udp6\")\n\t\tif err != nil {\n\t\t\tb.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t\t}\n\t\tdefer c.Close()\n\t\tp := ipv6.NewPacketConn(c)\n\t\tdst := c.LocalAddr()\n\t\tcf := ipv6.FlagHopLimit | ipv6.FlagInterface\n\t\tif err := p.SetControlMessage(cf, true); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\tb.Run(\"Net\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := c.WriteTo(payload, dst); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, _, err := c.ReadFrom(bb); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tb.Run(\"ToFrom\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := p.WriteTo(payload, &cm, dst); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, _, _, err := p.ReadFrom(bb); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t})\n\tb.Run(\"IP\", func(b *testing.B) {\n\t\tswitch runtime.GOOS {\n\t\tcase \"netbsd\":\n\t\t\tb.Skip(\"need to configure gre on netbsd\")\n\t\tcase \"openbsd\":\n\t\t\tb.Skip(\"net.inet.gre.allow=0 by default on openbsd\")\n\t\t}\n\n\t\tc, err := net.ListenPacket(fmt.Sprintf(\"ip6:%d\", iana.ProtocolGRE), \"::1\")\n\t\tif err != nil {\n\t\t\tb.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t\t}\n\t\tdefer c.Close()\n\t\tp := ipv6.NewPacketConn(c)\n\t\tdst := c.LocalAddr()\n\t\tcf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU\n\t\tif err := p.SetControlMessage(cf, true); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\tb.Run(\"Net\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := c.WriteTo(datagram, dst); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, _, err := c.ReadFrom(bb); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tb.Run(\"ToFrom\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := p.WriteTo(datagram, &cm, dst); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, _, _, err := p.ReadFrom(bb); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t})\n}\n\nfunc TestPacketConnConcurrentReadWriteUnicast(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\n\tpayload := []byte(\"HELLO-R-U-THERE\")\n\tiph := []byte{\n\t\t0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01,\n\t\t0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00,\n\t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,\n\t\t0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00,\n\t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,\n\t}\n\tgreh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00}\n\tdatagram := append(greh, append(iph, payload...)...)\n\n\tt.Run(\"UDP\", func(t *testing.T) {\n\t\tc, err := nettest.NewLocalPacketListener(\"udp6\")\n\t\tif err != nil {\n\t\t\tt.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t\t}\n\t\tdefer c.Close()\n\t\tp := ipv6.NewPacketConn(c)\n\t\tt.Run(\"ToFrom\", func(t *testing.T) {\n\t\t\ttestPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr())\n\t\t})\n\t})\n\tt.Run(\"IP\", func(t *testing.T) {\n\t\tswitch runtime.GOOS {\n\t\tcase \"netbsd\":\n\t\t\tt.Skip(\"need to configure gre on netbsd\")\n\t\tcase \"openbsd\":\n\t\t\tt.Skip(\"net.inet.gre.allow=0 by default on openbsd\")\n\t\t}\n\n\t\tc, err := net.ListenPacket(fmt.Sprintf(\"ip6:%d\", iana.ProtocolGRE), \"::1\")\n\t\tif err != nil {\n\t\t\tt.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t\t}\n\t\tdefer c.Close()\n\t\tp := ipv6.NewPacketConn(c)\n\t\tt.Run(\"ToFrom\", func(t *testing.T) {\n\t\t\ttestPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr())\n\t\t})\n\t})\n}\n\nfunc testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv6.PacketConn, data []byte, dst net.Addr) {\n\tifi := nettest.RoutedInterface(\"ip6\", net.FlagUp|net.FlagLoopback)\n\tcf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU\n\n\tif err := p.SetControlMessage(cf, true); err != nil { // probe before test\n\t\tif nettest.ProtocolNotSupported(err) {\n\t\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t\t}\n\t\tt.Fatal(err)\n\t}\n\n\tvar wg sync.WaitGroup\n\treader := func() {\n\t\tdefer wg.Done()\n\t\tb := make([]byte, 128)\n\t\tn, cm, _, err := p.ReadFrom(b)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif !bytes.Equal(b[:n], data) {\n\t\t\tt.Errorf(\"got %#v; want %#v\", b[:n], data)\n\t\t\treturn\n\t\t}\n\t\ts := cm.String()\n\t\tif strings.Contains(s, \",\") {\n\t\t\tt.Errorf(\"should be space-separated values: %s\", s)\n\t\t\treturn\n\t\t}\n\t}\n\twriter := func(toggle bool) {\n\t\tdefer wg.Done()\n\t\tcm := ipv6.ControlMessage{\n\t\t\tTrafficClass: iana.DiffServAF11 | iana.CongestionExperienced,\n\t\t\tHopLimit:     1,\n\t\t\tSrc:          net.IPv6loopback,\n\t\t}\n\t\tif ifi != nil {\n\t\t\tcm.IfIndex = ifi.Index\n\t\t}\n\t\tif err := p.SetControlMessage(cf, toggle); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tn, err := p.WriteTo(data, &cm, dst)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif n != len(data) {\n\t\t\tt.Errorf(\"got %d; want %d\", n, len(data))\n\t\t\treturn\n\t\t}\n\t}\n\n\tconst N = 10\n\twg.Add(N)\n\tfor i := 0; i < N; i++ {\n\t\tgo reader()\n\t}\n\twg.Add(2 * N)\n\tfor i := 0; i < 2*N; i++ {\n\t\tgo writer(i%2 != 0)\n\n\t}\n\twg.Add(N)\n\tfor i := 0; i < N; i++ {\n\t\tgo reader()\n\t}\n\twg.Wait()\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.9\n\npackage ipv6_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv6\"\n)\n\nfunc BenchmarkPacketConnReadWriteUnicast(b *testing.B) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tb.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\n\tpayload := []byte(\"HELLO-R-U-THERE\")\n\tiph := []byte{\n\t\t0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01,\n\t\t0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00,\n\t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,\n\t\t0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00,\n\t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,\n\t}\n\tgreh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00}\n\tdatagram := append(greh, append(iph, payload...)...)\n\tbb := make([]byte, 128)\n\tcm := ipv6.ControlMessage{\n\t\tTrafficClass: iana.DiffServAF11 | iana.CongestionExperienced,\n\t\tHopLimit:     1,\n\t\tSrc:          net.IPv6loopback,\n\t}\n\tif ifi := nettest.RoutedInterface(\"ip6\", net.FlagUp|net.FlagLoopback); ifi != nil {\n\t\tcm.IfIndex = ifi.Index\n\t}\n\n\tb.Run(\"UDP\", func(b *testing.B) {\n\t\tc, err := nettest.NewLocalPacketListener(\"udp6\")\n\t\tif err != nil {\n\t\t\tb.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t\t}\n\t\tdefer c.Close()\n\t\tp := ipv6.NewPacketConn(c)\n\t\tdst := c.LocalAddr()\n\t\tcf := ipv6.FlagHopLimit | ipv6.FlagInterface\n\t\tif err := p.SetControlMessage(cf, true); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\twms := []ipv6.Message{\n\t\t\t{\n\t\t\t\tBuffers: [][]byte{payload},\n\t\t\t\tAddr:    dst,\n\t\t\t\tOOB:     cm.Marshal(),\n\t\t\t},\n\t\t}\n\t\trms := []ipv6.Message{\n\t\t\t{\n\t\t\t\tBuffers: [][]byte{bb},\n\t\t\t\tOOB:     ipv6.NewControlMessage(cf),\n\t\t\t},\n\t\t}\n\t\tb.Run(\"Net\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := c.WriteTo(payload, dst); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, _, err := c.ReadFrom(bb); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tb.Run(\"ToFrom\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := p.WriteTo(payload, &cm, dst); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, _, _, err := p.ReadFrom(bb); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tb.Run(\"Batch\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := p.WriteBatch(wms, 0); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, err := p.ReadBatch(rms, 0); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t})\n\tb.Run(\"IP\", func(b *testing.B) {\n\t\tswitch runtime.GOOS {\n\t\tcase \"netbsd\":\n\t\t\tb.Skip(\"need to configure gre on netbsd\")\n\t\tcase \"openbsd\":\n\t\t\tb.Skip(\"net.inet.gre.allow=0 by default on openbsd\")\n\t\t}\n\n\t\tc, err := net.ListenPacket(fmt.Sprintf(\"ip6:%d\", iana.ProtocolGRE), \"::1\")\n\t\tif err != nil {\n\t\t\tb.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t\t}\n\t\tdefer c.Close()\n\t\tp := ipv6.NewPacketConn(c)\n\t\tdst := c.LocalAddr()\n\t\tcf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU\n\t\tif err := p.SetControlMessage(cf, true); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\twms := []ipv6.Message{\n\t\t\t{\n\t\t\t\tBuffers: [][]byte{datagram},\n\t\t\t\tAddr:    dst,\n\t\t\t\tOOB:     cm.Marshal(),\n\t\t\t},\n\t\t}\n\t\trms := []ipv6.Message{\n\t\t\t{\n\t\t\t\tBuffers: [][]byte{bb},\n\t\t\t\tOOB:     ipv6.NewControlMessage(cf),\n\t\t\t},\n\t\t}\n\t\tb.Run(\"Net\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := c.WriteTo(datagram, dst); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, _, err := c.ReadFrom(bb); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tb.Run(\"ToFrom\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := p.WriteTo(datagram, &cm, dst); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, _, _, err := p.ReadFrom(bb); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tb.Run(\"Batch\", func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tif _, err := p.WriteBatch(wms, 0); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif _, err := p.ReadBatch(rms, 0); err != nil {\n\t\t\t\t\tb.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t})\n}\n\nfunc TestPacketConnConcurrentReadWriteUnicast(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\n\tpayload := []byte(\"HELLO-R-U-THERE\")\n\tiph := []byte{\n\t\t0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01,\n\t\t0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00,\n\t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,\n\t\t0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00,\n\t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,\n\t}\n\tgreh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00}\n\tdatagram := append(greh, append(iph, payload...)...)\n\n\tt.Run(\"UDP\", func(t *testing.T) {\n\t\tc, err := nettest.NewLocalPacketListener(\"udp6\")\n\t\tif err != nil {\n\t\t\tt.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t\t}\n\t\tdefer c.Close()\n\t\tp := ipv6.NewPacketConn(c)\n\t\tt.Run(\"ToFrom\", func(t *testing.T) {\n\t\t\ttestPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), false)\n\t\t})\n\t\tt.Run(\"Batch\", func(t *testing.T) {\n\t\t\ttestPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), true)\n\t\t})\n\t})\n\tt.Run(\"IP\", func(t *testing.T) {\n\t\tswitch runtime.GOOS {\n\t\tcase \"netbsd\":\n\t\t\tt.Skip(\"need to configure gre on netbsd\")\n\t\tcase \"openbsd\":\n\t\t\tt.Skip(\"net.inet.gre.allow=0 by default on openbsd\")\n\t\t}\n\n\t\tc, err := net.ListenPacket(fmt.Sprintf(\"ip6:%d\", iana.ProtocolGRE), \"::1\")\n\t\tif err != nil {\n\t\t\tt.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t\t}\n\t\tdefer c.Close()\n\t\tp := ipv6.NewPacketConn(c)\n\t\tt.Run(\"ToFrom\", func(t *testing.T) {\n\t\t\ttestPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), false)\n\t\t})\n\t\tt.Run(\"Batch\", func(t *testing.T) {\n\t\t\ttestPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), true)\n\t\t})\n\t})\n}\n\nfunc testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv6.PacketConn, data []byte, dst net.Addr, batch bool) {\n\tifi := nettest.RoutedInterface(\"ip6\", net.FlagUp|net.FlagLoopback)\n\tcf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU\n\n\tif err := p.SetControlMessage(cf, true); err != nil { // probe before test\n\t\tif nettest.ProtocolNotSupported(err) {\n\t\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t\t}\n\t\tt.Fatal(err)\n\t}\n\n\tvar wg sync.WaitGroup\n\treader := func() {\n\t\tdefer wg.Done()\n\t\tb := make([]byte, 128)\n\t\tn, cm, _, err := p.ReadFrom(b)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif !bytes.Equal(b[:n], data) {\n\t\t\tt.Errorf(\"got %#v; want %#v\", b[:n], data)\n\t\t\treturn\n\t\t}\n\t\ts := cm.String()\n\t\tif strings.Contains(s, \",\") {\n\t\t\tt.Errorf(\"should be space-separated values: %s\", s)\n\t\t\treturn\n\t\t}\n\t}\n\tbatchReader := func() {\n\t\tdefer wg.Done()\n\t\tms := []ipv6.Message{\n\t\t\t{\n\t\t\t\tBuffers: [][]byte{make([]byte, 128)},\n\t\t\t\tOOB:     ipv6.NewControlMessage(cf),\n\t\t\t},\n\t\t}\n\t\tn, err := p.ReadBatch(ms, 0)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif n != len(ms) {\n\t\t\tt.Errorf(\"got %d; want %d\", n, len(ms))\n\t\t\treturn\n\t\t}\n\t\tvar cm ipv6.ControlMessage\n\t\tif err := cm.Parse(ms[0].OOB[:ms[0].NN]); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tb := ms[0].Buffers[0][:ms[0].N]\n\t\tif !bytes.Equal(b, data) {\n\t\t\tt.Errorf(\"got %#v; want %#v\", b, data)\n\t\t\treturn\n\t\t}\n\t\ts := cm.String()\n\t\tif strings.Contains(s, \",\") {\n\t\t\tt.Errorf(\"should be space-separated values: %s\", s)\n\t\t\treturn\n\t\t}\n\t}\n\twriter := func(toggle bool) {\n\t\tdefer wg.Done()\n\t\tcm := ipv6.ControlMessage{\n\t\t\tTrafficClass: iana.DiffServAF11 | iana.CongestionExperienced,\n\t\t\tHopLimit:     1,\n\t\t\tSrc:          net.IPv6loopback,\n\t\t}\n\t\tif ifi != nil {\n\t\t\tcm.IfIndex = ifi.Index\n\t\t}\n\t\tif err := p.SetControlMessage(cf, toggle); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tn, err := p.WriteTo(data, &cm, dst)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif n != len(data) {\n\t\t\tt.Errorf(\"got %d; want %d\", n, len(data))\n\t\t\treturn\n\t\t}\n\t}\n\tbatchWriter := func(toggle bool) {\n\t\tdefer wg.Done()\n\t\tcm := ipv6.ControlMessage{\n\t\t\tTrafficClass: iana.DiffServAF11 | iana.CongestionExperienced,\n\t\t\tHopLimit:     1,\n\t\t\tSrc:          net.IPv6loopback,\n\t\t}\n\t\tif ifi != nil {\n\t\t\tcm.IfIndex = ifi.Index\n\t\t}\n\t\tif err := p.SetControlMessage(cf, toggle); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tms := []ipv6.Message{\n\t\t\t{\n\t\t\t\tBuffers: [][]byte{data},\n\t\t\t\tOOB:     cm.Marshal(),\n\t\t\t\tAddr:    dst,\n\t\t\t},\n\t\t}\n\t\tn, err := p.WriteBatch(ms, 0)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif n != len(ms) {\n\t\t\tt.Errorf(\"got %d; want %d\", n, len(ms))\n\t\t\treturn\n\t\t}\n\t\tif ms[0].N != len(data) {\n\t\t\tt.Errorf(\"got %d; want %d\", ms[0].N, len(data))\n\t\t\treturn\n\t\t}\n\t}\n\n\tconst N = 10\n\twg.Add(N)\n\tfor i := 0; i < N; i++ {\n\t\tif batch {\n\t\t\tgo batchReader()\n\t\t} else {\n\t\t\tgo reader()\n\t\t}\n\t}\n\twg.Add(2 * N)\n\tfor i := 0; i < 2*N; i++ {\n\t\tif batch {\n\t\t\tgo batchWriter(i%2 != 0)\n\t\t} else {\n\t\t\tgo writer(i%2 != 0)\n\t\t}\n\t}\n\twg.Add(N)\n\tfor i := 0; i < N; i++ {\n\t\tif batch {\n\t\t\tgo batchReader()\n\t\t} else {\n\t\t\tgo reader()\n\t\t}\n\t}\n\twg.Wait()\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/readwrite_test.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6_test\n\nimport (\n\t\"bytes\"\n\t\"net\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv6\"\n)\n\nfunc BenchmarkReadWriteUnicast(b *testing.B) {\n\tc, err := nettest.NewLocalPacketListener(\"udp6\")\n\tif err != nil {\n\t\tb.Skipf(\"not supported on %s/%s: %v\", runtime.GOOS, runtime.GOARCH, err)\n\t}\n\tdefer c.Close()\n\n\tdst := c.LocalAddr()\n\twb, rb := []byte(\"HELLO-R-U-THERE\"), make([]byte, 128)\n\n\tb.Run(\"NetUDP\", func(b *testing.B) {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tif _, err := c.WriteTo(wb, dst); err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t\tif _, _, err := c.ReadFrom(rb); err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t}\n\t})\n\tb.Run(\"IPv6UDP\", func(b *testing.B) {\n\t\tp := ipv6.NewPacketConn(c)\n\t\tcf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU\n\t\tif err := p.SetControlMessage(cf, true); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\tcm := ipv6.ControlMessage{\n\t\t\tTrafficClass: iana.DiffServAF11 | iana.CongestionExperienced,\n\t\t\tHopLimit:     1,\n\t\t}\n\t\tifi := nettest.RoutedInterface(\"ip6\", net.FlagUp|net.FlagLoopback)\n\t\tif ifi != nil {\n\t\t\tcm.IfIndex = ifi.Index\n\t\t}\n\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tif _, err := p.WriteTo(wb, &cm, dst); err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t\tif _, _, _, err := p.ReadFrom(rb); err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif !supportsIPv6 {\n\t\tt.Skip(\"ipv6 is not supported\")\n\t}\n\n\tc, err := nettest.NewLocalPacketListener(\"udp6\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\tp := ipv6.NewPacketConn(c)\n\tdefer p.Close()\n\n\tdst := c.LocalAddr()\n\tifi := nettest.RoutedInterface(\"ip6\", net.FlagUp|net.FlagLoopback)\n\tcf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU\n\twb := []byte(\"HELLO-R-U-THERE\")\n\n\tif err := p.SetControlMessage(cf, true); err != nil { // probe before test\n\t\tif nettest.ProtocolNotSupported(err) {\n\t\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t\t}\n\t\tt.Fatal(err)\n\t}\n\n\tvar wg sync.WaitGroup\n\treader := func() {\n\t\tdefer wg.Done()\n\t\trb := make([]byte, 128)\n\t\tif n, cm, _, err := p.ReadFrom(rb); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t} else if !bytes.Equal(rb[:n], wb) {\n\t\t\tt.Errorf(\"got %v; want %v\", rb[:n], wb)\n\t\t\treturn\n\t\t} else {\n\t\t\ts := cm.String()\n\t\t\tif strings.Contains(s, \",\") {\n\t\t\t\tt.Errorf(\"should be space-separated values: %s\", s)\n\t\t\t}\n\t\t}\n\t}\n\twriter := func(toggle bool) {\n\t\tdefer wg.Done()\n\t\tcm := ipv6.ControlMessage{\n\t\t\tTrafficClass: iana.DiffServAF11 | iana.CongestionExperienced,\n\t\t\tSrc:          net.IPv6loopback,\n\t\t}\n\t\tif ifi != nil {\n\t\t\tcm.IfIndex = ifi.Index\n\t\t}\n\t\tif err := p.SetControlMessage(cf, toggle); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif n, err := p.WriteTo(wb, &cm, dst); err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t} else if n != len(wb) {\n\t\t\tt.Errorf(\"got %d; want %d\", n, len(wb))\n\t\t\treturn\n\t\t}\n\t}\n\n\tconst N = 10\n\twg.Add(N)\n\tfor i := 0; i < N; i++ {\n\t\tgo reader()\n\t}\n\twg.Add(2 * N)\n\tfor i := 0; i < 2*N; i++ {\n\t\tgo writer(i%2 != 0)\n\t}\n\twg.Add(N)\n\tfor i := 0; i < N; i++ {\n\t\tgo reader()\n\t}\n\twg.Wait()\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/sockopt.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6\n\nimport \"golang.org/x/net/internal/socket\"\n\n// Sticky socket options\nconst (\n\tssoTrafficClass        = iota // header field for unicast packet, RFC 3542\n\tssoHopLimit                   // header field for unicast packet, RFC 3493\n\tssoMulticastInterface         // outbound interface for multicast packet, RFC 3493\n\tssoMulticastHopLimit          // header field for multicast packet, RFC 3493\n\tssoMulticastLoopback          // loopback for multicast packet, RFC 3493\n\tssoReceiveTrafficClass        // header field on received packet, RFC 3542\n\tssoReceiveHopLimit            // header field on received packet, RFC 2292 or 3542\n\tssoReceivePacketInfo          // incbound or outbound packet path, RFC 2292 or 3542\n\tssoReceivePathMTU             // path mtu, RFC 3542\n\tssoPathMTU                    // path mtu, RFC 3542\n\tssoChecksum                   // packet checksum, RFC 2292 or 3542\n\tssoICMPFilter                 // icmp filter, RFC 2292 or 3542\n\tssoJoinGroup                  // any-source multicast, RFC 3493\n\tssoLeaveGroup                 // any-source multicast, RFC 3493\n\tssoJoinSourceGroup            // source-specific multicast\n\tssoLeaveSourceGroup           // source-specific multicast\n\tssoBlockSourceGroup           // any-source or source-specific multicast\n\tssoUnblockSourceGroup         // any-source or source-specific multicast\n\tssoAttachFilter               // attach BPF for filtering inbound traffic\n)\n\n// Sticky socket option value types\nconst (\n\tssoTypeIPMreq = iota + 1\n\tssoTypeGroupReq\n\tssoTypeGroupSourceReq\n)\n\n// A sockOpt represents a binding for sticky socket option.\ntype sockOpt struct {\n\tsocket.Option\n\ttyp int // hint for option value type; optional\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/sockopt_posix.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows\n\npackage ipv6\n\nimport (\n\t\"net\"\n\t\"unsafe\"\n\n\t\"golang.org/x/net/bpf\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) {\n\tn, err := so.GetInt(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn net.InterfaceByIndex(n)\n}\n\nfunc (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error {\n\tvar n int\n\tif ifi != nil {\n\t\tn = ifi.Index\n\t}\n\treturn so.SetInt(c, n)\n}\n\nfunc (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) {\n\tb := make([]byte, so.Len)\n\tn, err := so.Get(c, b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n != sizeofICMPv6Filter {\n\t\treturn nil, errOpNoSupport\n\t}\n\treturn (*ICMPFilter)(unsafe.Pointer(&b[0])), nil\n}\n\nfunc (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error {\n\tb := (*[sizeofICMPv6Filter]byte)(unsafe.Pointer(f))[:sizeofICMPv6Filter]\n\treturn so.Set(c, b)\n}\n\nfunc (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) {\n\tb := make([]byte, so.Len)\n\tn, err := so.Get(c, b)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\tif n != sizeofIPv6Mtuinfo {\n\t\treturn nil, 0, errOpNoSupport\n\t}\n\tmi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0]))\n\tif mi.Addr.Scope_id == 0 {\n\t\treturn nil, int(mi.Mtu), nil\n\t}\n\tifi, err := net.InterfaceByIndex(int(mi.Addr.Scope_id))\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\treturn ifi, int(mi.Mtu), nil\n}\n\nfunc (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error {\n\tswitch so.typ {\n\tcase ssoTypeIPMreq:\n\t\treturn so.setIPMreq(c, ifi, grp)\n\tcase ssoTypeGroupReq:\n\t\treturn so.setGroupReq(c, ifi, grp)\n\tdefault:\n\t\treturn errOpNoSupport\n\t}\n}\n\nfunc (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error {\n\treturn so.setGroupSourceReq(c, ifi, grp, src)\n}\n\nfunc (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error {\n\treturn so.setAttachFilter(c, f)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/sockopt_stub.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows\n\npackage ipv6\n\nimport (\n\t\"net\"\n\n\t\"golang.org/x/net/bpf\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) {\n\treturn nil, errOpNoSupport\n}\n\nfunc (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error {\n\treturn errOpNoSupport\n}\n\nfunc (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) {\n\treturn nil, errOpNoSupport\n}\n\nfunc (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error {\n\treturn errOpNoSupport\n}\n\nfunc (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) {\n\treturn nil, 0, errOpNoSupport\n}\n\nfunc (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error {\n\treturn errOpNoSupport\n}\n\nfunc (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error {\n\treturn errOpNoSupport\n}\n\nfunc (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error {\n\treturn errOpNoSupport\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/sockopt_test.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6_test\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv6\"\n)\n\nvar supportsIPv6 bool = nettest.SupportsIPv6()\n\nfunc TestConnInitiatorPathMTU(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif !supportsIPv6 {\n\t\tt.Skip(\"ipv6 is not supported\")\n\t}\n\n\tln, err := net.Listen(\"tcp6\", \"[::1]:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer ln.Close()\n\n\tdone := make(chan bool)\n\tgo acceptor(t, ln, done)\n\n\tc, err := net.Dial(\"tcp6\", ln.Addr().String())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\tif pmtu, err := ipv6.NewConn(c).PathMTU(); err != nil {\n\t\tswitch runtime.GOOS {\n\t\tcase \"darwin\": // older darwin kernels don't support IPV6_PATHMTU option\n\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\tdefault:\n\t\t\tt.Fatal(err)\n\t\t}\n\t} else {\n\t\tt.Logf(\"path mtu for %v: %v\", c.RemoteAddr(), pmtu)\n\t}\n\n\t<-done\n}\n\nfunc TestConnResponderPathMTU(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif !supportsIPv6 {\n\t\tt.Skip(\"ipv6 is not supported\")\n\t}\n\n\tln, err := net.Listen(\"tcp6\", \"[::1]:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer ln.Close()\n\n\tdone := make(chan bool)\n\tgo connector(t, \"tcp6\", ln.Addr().String(), done)\n\n\tc, err := ln.Accept()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\tif pmtu, err := ipv6.NewConn(c).PathMTU(); err != nil {\n\t\tswitch runtime.GOOS {\n\t\tcase \"darwin\": // older darwin kernels don't support IPV6_PATHMTU option\n\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\tdefault:\n\t\t\tt.Fatal(err)\n\t\t}\n\t} else {\n\t\tt.Logf(\"path mtu for %v: %v\", c.RemoteAddr(), pmtu)\n\t}\n\n\t<-done\n}\n\nfunc TestPacketConnChecksum(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif !supportsIPv6 {\n\t\tt.Skip(\"ipv6 is not supported\")\n\t}\n\tif m, ok := nettest.SupportsRawIPSocket(); !ok {\n\t\tt.Skip(m)\n\t}\n\n\tc, err := net.ListenPacket(fmt.Sprintf(\"ip6:%d\", iana.ProtocolOSPFIGP), \"::\") // OSPF for IPv6\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\tp := ipv6.NewPacketConn(c)\n\toffset := 12 // see RFC 5340\n\n\tfor _, toggle := range []bool{false, true} {\n\t\tif err := p.SetChecksum(toggle, offset); err != nil {\n\t\t\tif toggle {\n\t\t\t\tt.Fatalf(\"ipv6.PacketConn.SetChecksum(%v, %v) failed: %v\", toggle, offset, err)\n\t\t\t} else {\n\t\t\t\t// Some platforms never allow to disable the kernel\n\t\t\t\t// checksum processing.\n\t\t\t\tt.Logf(\"ipv6.PacketConn.SetChecksum(%v, %v) failed: %v\", toggle, offset, err)\n\t\t\t}\n\t\t}\n\t\tif on, offset, err := p.Checksum(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else {\n\t\t\tt.Logf(\"kernel checksum processing enabled=%v, offset=%v\", on, offset)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/sys_asmreq.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows\n\npackage ipv6\n\nimport (\n\t\"net\"\n\t\"unsafe\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error {\n\tvar mreq ipv6Mreq\n\tcopy(mreq.Multiaddr[:], grp)\n\tif ifi != nil {\n\t\tmreq.setIfindex(ifi.Index)\n\t}\n\tb := (*[sizeofIPv6Mreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPv6Mreq]\n\treturn so.Set(c, b)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows\n\npackage ipv6\n\nimport (\n\t\"net\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error {\n\treturn errOpNoSupport\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/sys_bpf.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build linux\n\npackage ipv6\n\nimport (\n\t\"unsafe\"\n\n\t\"golang.org/x/net/bpf\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error {\n\tprog := sockFProg{\n\t\tLen:    uint16(len(f)),\n\t\tFilter: (*sockFilter)(unsafe.Pointer(&f[0])),\n\t}\n\tb := (*[sizeofSockFprog]byte)(unsafe.Pointer(&prog))[:sizeofSockFprog]\n\treturn so.Set(c, b)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/sys_bpf_stub.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !linux\n\npackage ipv6\n\nimport (\n\t\"golang.org/x/net/bpf\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error {\n\treturn errOpNoSupport\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/sys_bsd.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build dragonfly netbsd openbsd\n\npackage ipv6\n\nimport (\n\t\"net\"\n\t\"syscall\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nvar (\n\tctlOpts = [ctlMax]ctlOpt{\n\t\tctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass},\n\t\tctlHopLimit:     {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit},\n\t\tctlPacketInfo:   {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo},\n\t\tctlNextHop:      {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop},\n\t\tctlPathMTU:      {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU},\n\t}\n\n\tsockOpts = map[int]*sockOpt{\n\t\tssoTrafficClass:        {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}},\n\t\tssoHopLimit:            {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}},\n\t\tssoMulticastInterface:  {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}},\n\t\tssoMulticastHopLimit:   {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}},\n\t\tssoMulticastLoopback:   {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}},\n\t\tssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}},\n\t\tssoReceiveHopLimit:     {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}},\n\t\tssoReceivePacketInfo:   {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}},\n\t\tssoReceivePathMTU:      {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}},\n\t\tssoPathMTU:             {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}},\n\t\tssoChecksum:            {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}},\n\t\tssoICMPFilter:          {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}},\n\t\tssoJoinGroup:           {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq},\n\t\tssoLeaveGroup:          {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq},\n\t}\n)\n\nfunc (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) {\n\tsa.Len = sizeofSockaddrInet6\n\tsa.Family = syscall.AF_INET6\n\tcopy(sa.Addr[:], ip)\n\tsa.Scope_id = uint32(i)\n}\n\nfunc (pi *inet6Pktinfo) setIfindex(i int) {\n\tpi.Ifindex = uint32(i)\n}\n\nfunc (mreq *ipv6Mreq) setIfindex(i int) {\n\tmreq.Interface = uint32(i)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/sys_darwin.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6\n\nimport (\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\t\"unsafe\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nvar (\n\tctlOpts = [ctlMax]ctlOpt{\n\t\tctlHopLimit:   {sysIPV6_2292HOPLIMIT, 4, marshal2292HopLimit, parseHopLimit},\n\t\tctlPacketInfo: {sysIPV6_2292PKTINFO, sizeofInet6Pktinfo, marshal2292PacketInfo, parsePacketInfo},\n\t}\n\n\tsockOpts = map[int]*sockOpt{\n\t\tssoHopLimit:           {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}},\n\t\tssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}},\n\t\tssoMulticastHopLimit:  {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}},\n\t\tssoMulticastLoopback:  {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}},\n\t\tssoReceiveHopLimit:    {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_2292HOPLIMIT, Len: 4}},\n\t\tssoReceivePacketInfo:  {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_2292PKTINFO, Len: 4}},\n\t\tssoChecksum:           {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}},\n\t\tssoICMPFilter:         {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}},\n\t\tssoJoinGroup:          {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq},\n\t\tssoLeaveGroup:         {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq},\n\t}\n)\n\nfunc init() {\n\t// Seems like kern.osreldate is veiled on latest OS X. We use\n\t// kern.osrelease instead.\n\ts, err := syscall.Sysctl(\"kern.osrelease\")\n\tif err != nil {\n\t\treturn\n\t}\n\tss := strings.Split(s, \".\")\n\tif len(ss) == 0 {\n\t\treturn\n\t}\n\t// The IP_PKTINFO and protocol-independent multicast API were\n\t// introduced in OS X 10.7 (Darwin 11). But it looks like\n\t// those features require OS X 10.8 (Darwin 12) or above.\n\t// See http://support.apple.com/kb/HT1633.\n\tif mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 12 {\n\t\treturn\n\t}\n\tctlOpts[ctlTrafficClass] = ctlOpt{sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}\n\tctlOpts[ctlHopLimit] = ctlOpt{sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}\n\tctlOpts[ctlPacketInfo] = ctlOpt{sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}\n\tctlOpts[ctlNextHop] = ctlOpt{sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}\n\tctlOpts[ctlPathMTU] = ctlOpt{sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}\n\tsockOpts[ssoTrafficClass] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}\n\tsockOpts[ssoReceiveTrafficClass] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}\n\tsockOpts[ssoReceiveHopLimit] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}\n\tsockOpts[ssoReceivePacketInfo] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}\n\tsockOpts[ssoReceivePathMTU] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}\n\tsockOpts[ssoPathMTU] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}\n\tsockOpts[ssoJoinGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}\n\tsockOpts[ssoLeaveGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}\n\tsockOpts[ssoJoinSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}\n\tsockOpts[ssoLeaveSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}\n\tsockOpts[ssoBlockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}\n\tsockOpts[ssoUnblockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}\n}\n\nfunc (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) {\n\tsa.Len = sizeofSockaddrInet6\n\tsa.Family = syscall.AF_INET6\n\tcopy(sa.Addr[:], ip)\n\tsa.Scope_id = uint32(i)\n}\n\nfunc (pi *inet6Pktinfo) setIfindex(i int) {\n\tpi.Ifindex = uint32(i)\n}\n\nfunc (mreq *ipv6Mreq) setIfindex(i int) {\n\tmreq.Interface = uint32(i)\n}\n\nfunc (gr *groupReq) setGroup(grp net.IP) {\n\tsa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4))\n\tsa.Len = sizeofSockaddrInet6\n\tsa.Family = syscall.AF_INET6\n\tcopy(sa.Addr[:], grp)\n}\n\nfunc (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) {\n\tsa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4))\n\tsa.Len = sizeofSockaddrInet6\n\tsa.Family = syscall.AF_INET6\n\tcopy(sa.Addr[:], grp)\n\tsa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132))\n\tsa.Len = sizeofSockaddrInet6\n\tsa.Family = syscall.AF_INET6\n\tcopy(sa.Addr[:], src)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/sys_freebsd.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6\n\nimport (\n\t\"net\"\n\t\"runtime\"\n\t\"strings\"\n\t\"syscall\"\n\t\"unsafe\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nvar (\n\tctlOpts = [ctlMax]ctlOpt{\n\t\tctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass},\n\t\tctlHopLimit:     {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit},\n\t\tctlPacketInfo:   {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo},\n\t\tctlNextHop:      {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop},\n\t\tctlPathMTU:      {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU},\n\t}\n\n\tsockOpts = map[int]sockOpt{\n\t\tssoTrafficClass:        {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}},\n\t\tssoHopLimit:            {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}},\n\t\tssoMulticastInterface:  {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}},\n\t\tssoMulticastHopLimit:   {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}},\n\t\tssoMulticastLoopback:   {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}},\n\t\tssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}},\n\t\tssoReceiveHopLimit:     {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}},\n\t\tssoReceivePacketInfo:   {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}},\n\t\tssoReceivePathMTU:      {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}},\n\t\tssoPathMTU:             {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}},\n\t\tssoChecksum:            {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}},\n\t\tssoICMPFilter:          {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}},\n\t\tssoJoinGroup:           {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq},\n\t\tssoLeaveGroup:          {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq},\n\t\tssoJoinSourceGroup:     {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoLeaveSourceGroup:    {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoBlockSourceGroup:    {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoUnblockSourceGroup:  {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t}\n)\n\nfunc init() {\n\tif runtime.GOOS == \"freebsd\" && runtime.GOARCH == \"386\" {\n\t\tarchs, _ := syscall.Sysctl(\"kern.supported_archs\")\n\t\tfor _, s := range strings.Fields(archs) {\n\t\t\tif s == \"amd64\" {\n\t\t\t\tfreebsd32o64 = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) {\n\tsa.Len = sizeofSockaddrInet6\n\tsa.Family = syscall.AF_INET6\n\tcopy(sa.Addr[:], ip)\n\tsa.Scope_id = uint32(i)\n}\n\nfunc (pi *inet6Pktinfo) setIfindex(i int) {\n\tpi.Ifindex = uint32(i)\n}\n\nfunc (mreq *ipv6Mreq) setIfindex(i int) {\n\tmreq.Interface = uint32(i)\n}\n\nfunc (gr *groupReq) setGroup(grp net.IP) {\n\tsa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group))\n\tsa.Len = sizeofSockaddrInet6\n\tsa.Family = syscall.AF_INET6\n\tcopy(sa.Addr[:], grp)\n}\n\nfunc (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) {\n\tsa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group))\n\tsa.Len = sizeofSockaddrInet6\n\tsa.Family = syscall.AF_INET6\n\tcopy(sa.Addr[:], grp)\n\tsa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source))\n\tsa.Len = sizeofSockaddrInet6\n\tsa.Family = syscall.AF_INET6\n\tcopy(sa.Addr[:], src)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/sys_linux.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6\n\nimport (\n\t\"net\"\n\t\"syscall\"\n\t\"unsafe\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nvar (\n\tctlOpts = [ctlMax]ctlOpt{\n\t\tctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass},\n\t\tctlHopLimit:     {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit},\n\t\tctlPacketInfo:   {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo},\n\t\tctlPathMTU:      {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU},\n\t}\n\n\tsockOpts = map[int]*sockOpt{\n\t\tssoTrafficClass:        {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}},\n\t\tssoHopLimit:            {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}},\n\t\tssoMulticastInterface:  {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}},\n\t\tssoMulticastHopLimit:   {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}},\n\t\tssoMulticastLoopback:   {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}},\n\t\tssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}},\n\t\tssoReceiveHopLimit:     {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}},\n\t\tssoReceivePacketInfo:   {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}},\n\t\tssoReceivePathMTU:      {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}},\n\t\tssoPathMTU:             {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}},\n\t\tssoChecksum:            {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysIPV6_CHECKSUM, Len: 4}},\n\t\tssoICMPFilter:          {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMPV6_FILTER, Len: sizeofICMPv6Filter}},\n\t\tssoJoinGroup:           {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq},\n\t\tssoLeaveGroup:          {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq},\n\t\tssoJoinSourceGroup:     {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoLeaveSourceGroup:    {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoBlockSourceGroup:    {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoUnblockSourceGroup:  {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoAttachFilter:        {Option: socket.Option{Level: sysSOL_SOCKET, Name: sysSO_ATTACH_FILTER, Len: sizeofSockFprog}},\n\t}\n)\n\nfunc (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) {\n\tsa.Family = syscall.AF_INET6\n\tcopy(sa.Addr[:], ip)\n\tsa.Scope_id = uint32(i)\n}\n\nfunc (pi *inet6Pktinfo) setIfindex(i int) {\n\tpi.Ifindex = int32(i)\n}\n\nfunc (mreq *ipv6Mreq) setIfindex(i int) {\n\tmreq.Ifindex = int32(i)\n}\n\nfunc (gr *groupReq) setGroup(grp net.IP) {\n\tsa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group))\n\tsa.Family = syscall.AF_INET6\n\tcopy(sa.Addr[:], grp)\n}\n\nfunc (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) {\n\tsa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group))\n\tsa.Family = syscall.AF_INET6\n\tcopy(sa.Addr[:], grp)\n\tsa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source))\n\tsa.Family = syscall.AF_INET6\n\tcopy(sa.Addr[:], src)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/sys_solaris.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6\n\nimport (\n\t\"net\"\n\t\"syscall\"\n\t\"unsafe\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nvar (\n\tctlOpts = [ctlMax]ctlOpt{\n\t\tctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass},\n\t\tctlHopLimit:     {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit},\n\t\tctlPacketInfo:   {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo},\n\t\tctlNextHop:      {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop},\n\t\tctlPathMTU:      {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU},\n\t}\n\n\tsockOpts = map[int]*sockOpt{\n\t\tssoTrafficClass:        {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}},\n\t\tssoHopLimit:            {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}},\n\t\tssoMulticastInterface:  {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}},\n\t\tssoMulticastHopLimit:   {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}},\n\t\tssoMulticastLoopback:   {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}},\n\t\tssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}},\n\t\tssoReceiveHopLimit:     {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}},\n\t\tssoReceivePacketInfo:   {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}},\n\t\tssoReceivePathMTU:      {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}},\n\t\tssoPathMTU:             {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}},\n\t\tssoChecksum:            {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}},\n\t\tssoICMPFilter:          {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}},\n\t\tssoJoinGroup:           {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq},\n\t\tssoLeaveGroup:          {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq},\n\t\tssoJoinSourceGroup:     {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoLeaveSourceGroup:    {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoBlockSourceGroup:    {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t\tssoUnblockSourceGroup:  {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq},\n\t}\n)\n\nfunc (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) {\n\tsa.Family = syscall.AF_INET6\n\tcopy(sa.Addr[:], ip)\n\tsa.Scope_id = uint32(i)\n}\n\nfunc (pi *inet6Pktinfo) setIfindex(i int) {\n\tpi.Ifindex = uint32(i)\n}\n\nfunc (mreq *ipv6Mreq) setIfindex(i int) {\n\tmreq.Interface = uint32(i)\n}\n\nfunc (gr *groupReq) setGroup(grp net.IP) {\n\tsa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4))\n\tsa.Family = syscall.AF_INET6\n\tcopy(sa.Addr[:], grp)\n}\n\nfunc (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) {\n\tsa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4))\n\tsa.Family = syscall.AF_INET6\n\tcopy(sa.Addr[:], grp)\n\tsa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260))\n\tsa.Family = syscall.AF_INET6\n\tcopy(sa.Addr[:], src)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/sys_ssmreq.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin freebsd linux solaris\n\npackage ipv6\n\nimport (\n\t\"net\"\n\t\"unsafe\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\nvar freebsd32o64 bool\n\nfunc (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error {\n\tvar gr groupReq\n\tif ifi != nil {\n\t\tgr.Interface = uint32(ifi.Index)\n\t}\n\tgr.setGroup(grp)\n\tvar b []byte\n\tif freebsd32o64 {\n\t\tvar d [sizeofGroupReq + 4]byte\n\t\ts := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))\n\t\tcopy(d[:4], s[:4])\n\t\tcopy(d[8:], s[4:])\n\t\tb = d[:]\n\t} else {\n\t\tb = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq]\n\t}\n\treturn so.Set(c, b)\n}\n\nfunc (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error {\n\tvar gsr groupSourceReq\n\tif ifi != nil {\n\t\tgsr.Interface = uint32(ifi.Index)\n\t}\n\tgsr.setSourceGroup(grp, src)\n\tvar b []byte\n\tif freebsd32o64 {\n\t\tvar d [sizeofGroupSourceReq + 4]byte\n\t\ts := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))\n\t\tcopy(d[:4], s[:4])\n\t\tcopy(d[8:], s[4:])\n\t\tb = d[:]\n\t} else {\n\t\tb = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq]\n\t}\n\treturn so.Set(c, b)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !darwin,!freebsd,!linux,!solaris\n\npackage ipv6\n\nimport (\n\t\"net\"\n\n\t\"golang.org/x/net/internal/socket\"\n)\n\nfunc (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error {\n\treturn errOpNoSupport\n}\n\nfunc (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error {\n\treturn errOpNoSupport\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/sys_stub.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows\n\npackage ipv6\n\nvar (\n\tctlOpts = [ctlMax]ctlOpt{}\n\n\tsockOpts = map[int]*sockOpt{}\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/sys_windows.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6\n\nimport (\n\t\"net\"\n\t\"syscall\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/socket\"\n)\n\nconst (\n\t// See ws2tcpip.h.\n\tsysIPV6_UNICAST_HOPS   = 0x4\n\tsysIPV6_MULTICAST_IF   = 0x9\n\tsysIPV6_MULTICAST_HOPS = 0xa\n\tsysIPV6_MULTICAST_LOOP = 0xb\n\tsysIPV6_JOIN_GROUP     = 0xc\n\tsysIPV6_LEAVE_GROUP    = 0xd\n\tsysIPV6_PKTINFO        = 0x13\n\n\tsizeofSockaddrInet6 = 0x1c\n\n\tsizeofIPv6Mreq     = 0x14\n\tsizeofIPv6Mtuinfo  = 0x20\n\tsizeofICMPv6Filter = 0\n)\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tInterface uint32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype icmpv6Filter struct {\n\t// TODO(mikio): implement this\n}\n\nvar (\n\tctlOpts = [ctlMax]ctlOpt{}\n\n\tsockOpts = map[int]*sockOpt{\n\t\tssoHopLimit:           {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}},\n\t\tssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}},\n\t\tssoMulticastHopLimit:  {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}},\n\t\tssoMulticastLoopback:  {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}},\n\t\tssoJoinGroup:          {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq},\n\t\tssoLeaveGroup:         {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq},\n\t}\n)\n\nfunc (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) {\n\tsa.Family = syscall.AF_INET6\n\tcopy(sa.Addr[:], ip)\n\tsa.Scope_id = uint32(i)\n}\n\nfunc (mreq *ipv6Mreq) setIfindex(i int) {\n\tmreq.Interface = uint32(i)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/unicast_test.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6_test\n\nimport (\n\t\"bytes\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org/x/net/icmp\"\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv6\"\n)\n\nfunc TestPacketConnReadWriteUnicastUDP(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif !supportsIPv6 {\n\t\tt.Skip(\"ipv6 is not supported\")\n\t}\n\n\tc, err := nettest.NewLocalPacketListener(\"udp6\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\tp := ipv6.NewPacketConn(c)\n\tdefer p.Close()\n\n\tdst := c.LocalAddr()\n\tcm := ipv6.ControlMessage{\n\t\tTrafficClass: iana.DiffServAF11 | iana.CongestionExperienced,\n\t\tSrc:          net.IPv6loopback,\n\t}\n\tcf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU\n\tifi := nettest.RoutedInterface(\"ip6\", net.FlagUp|net.FlagLoopback)\n\tif ifi != nil {\n\t\tcm.IfIndex = ifi.Index\n\t}\n\twb := []byte(\"HELLO-R-U-THERE\")\n\n\tfor i, toggle := range []bool{true, false, true} {\n\t\tif err := p.SetControlMessage(cf, toggle); err != nil {\n\t\t\tif nettest.ProtocolNotSupported(err) {\n\t\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tcm.HopLimit = i + 1\n\t\tif err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif n, err := p.WriteTo(wb, &cm, dst); err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else if n != len(wb) {\n\t\t\tt.Fatalf(\"got %v; want %v\", n, len(wb))\n\t\t}\n\t\trb := make([]byte, 128)\n\t\tif err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif n, _, _, err := p.ReadFrom(rb); err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else if !bytes.Equal(rb[:n], wb) {\n\t\t\tt.Fatalf(\"got %v; want %v\", rb[:n], wb)\n\t\t}\n\t}\n}\n\nfunc TestPacketConnReadWriteUnicastICMP(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif !supportsIPv6 {\n\t\tt.Skip(\"ipv6 is not supported\")\n\t}\n\tif m, ok := nettest.SupportsRawIPSocket(); !ok {\n\t\tt.Skip(m)\n\t}\n\n\tc, err := net.ListenPacket(\"ip6:ipv6-icmp\", \"::1\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\tp := ipv6.NewPacketConn(c)\n\tdefer p.Close()\n\n\tdst, err := net.ResolveIPAddr(\"ip6\", \"::1\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpshicmp := icmp.IPv6PseudoHeader(c.LocalAddr().(*net.IPAddr).IP, dst.IP)\n\tcm := ipv6.ControlMessage{\n\t\tTrafficClass: iana.DiffServAF11 | iana.CongestionExperienced,\n\t\tSrc:          net.IPv6loopback,\n\t}\n\tcf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU\n\tifi := nettest.RoutedInterface(\"ip6\", net.FlagUp|net.FlagLoopback)\n\tif ifi != nil {\n\t\tcm.IfIndex = ifi.Index\n\t}\n\n\tvar f ipv6.ICMPFilter\n\tf.SetAll(true)\n\tf.Accept(ipv6.ICMPTypeEchoReply)\n\tif err := p.SetICMPFilter(&f); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar psh []byte\n\tfor i, toggle := range []bool{true, false, true} {\n\t\tif toggle {\n\t\t\tpsh = nil\n\t\t\tif err := p.SetChecksum(true, 2); err != nil {\n\t\t\t\t// Solaris never allows to modify\n\t\t\t\t// ICMP properties.\n\t\t\t\tif runtime.GOOS != \"solaris\" {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tpsh = pshicmp\n\t\t\t// Some platforms never allow to disable the\n\t\t\t// kernel checksum processing.\n\t\t\tp.SetChecksum(false, -1)\n\t\t}\n\t\twb, err := (&icmp.Message{\n\t\t\tType: ipv6.ICMPTypeEchoRequest, Code: 0,\n\t\t\tBody: &icmp.Echo{\n\t\t\t\tID: os.Getpid() & 0xffff, Seq: i + 1,\n\t\t\t\tData: []byte(\"HELLO-R-U-THERE\"),\n\t\t\t},\n\t\t}).Marshal(psh)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := p.SetControlMessage(cf, toggle); err != nil {\n\t\t\tif nettest.ProtocolNotSupported(err) {\n\t\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tcm.HopLimit = i + 1\n\t\tif err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif n, err := p.WriteTo(wb, &cm, dst); err != nil {\n\t\t\tt.Fatal(err)\n\t\t} else if n != len(wb) {\n\t\t\tt.Fatalf(\"got %v; want %v\", n, len(wb))\n\t\t}\n\t\trb := make([]byte, 128)\n\t\tif err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif n, _, _, err := p.ReadFrom(rb); err != nil {\n\t\t\tswitch runtime.GOOS {\n\t\t\tcase \"darwin\": // older darwin kernels have some limitation on receiving icmp packet through raw socket\n\t\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt.Fatal(err)\n\t\t} else {\n\t\t\tif m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t} else if m.Type != ipv6.ICMPTypeEchoReply || m.Code != 0 {\n\t\t\t\tt.Fatalf(\"got type=%v, code=%v; want type=%v, code=%v\", m.Type, m.Code, ipv6.ICMPTypeEchoReply, 0)\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/unicastsockopt_test.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ipv6_test\n\nimport (\n\t\"net\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/iana\"\n\t\"golang.org/x/net/internal/nettest\"\n\t\"golang.org/x/net/ipv6\"\n)\n\nfunc TestConnUnicastSocketOptions(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif !supportsIPv6 {\n\t\tt.Skip(\"ipv6 is not supported\")\n\t}\n\n\tln, err := net.Listen(\"tcp6\", \"[::1]:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer ln.Close()\n\n\terrc := make(chan error, 1)\n\tgo func() {\n\t\tc, err := ln.Accept()\n\t\tif err != nil {\n\t\t\terrc <- err\n\t\t\treturn\n\t\t}\n\t\terrc <- c.Close()\n\t}()\n\n\tc, err := net.Dial(\"tcp6\", ln.Addr().String())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\ttestUnicastSocketOptions(t, ipv6.NewConn(c))\n\n\tif err := <-errc; err != nil {\n\t\tt.Errorf(\"server: %v\", err)\n\t}\n}\n\nvar packetConnUnicastSocketOptionTests = []struct {\n\tnet, proto, addr string\n}{\n\t{\"udp6\", \"\", \"[::1]:0\"},\n\t{\"ip6\", \":ipv6-icmp\", \"::1\"},\n}\n\nfunc TestPacketConnUnicastSocketOptions(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\", \"plan9\", \"windows\":\n\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t}\n\tif !supportsIPv6 {\n\t\tt.Skip(\"ipv6 is not supported\")\n\t}\n\n\tm, ok := nettest.SupportsRawIPSocket()\n\tfor _, tt := range packetConnUnicastSocketOptionTests {\n\t\tif tt.net == \"ip6\" && !ok {\n\t\t\tt.Log(m)\n\t\t\tcontinue\n\t\t}\n\t\tc, err := net.ListenPacket(tt.net+tt.proto, tt.addr)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer c.Close()\n\n\t\ttestUnicastSocketOptions(t, ipv6.NewPacketConn(c))\n\t}\n}\n\ntype testIPv6UnicastConn interface {\n\tTrafficClass() (int, error)\n\tSetTrafficClass(int) error\n\tHopLimit() (int, error)\n\tSetHopLimit(int) error\n}\n\nfunc testUnicastSocketOptions(t *testing.T, c testIPv6UnicastConn) {\n\ttclass := iana.DiffServCS0 | iana.NotECNTransport\n\tif err := c.SetTrafficClass(tclass); err != nil {\n\t\tswitch runtime.GOOS {\n\t\tcase \"darwin\": // older darwin kernels don't support IPV6_TCLASS option\n\t\t\tt.Logf(\"not supported on %s\", runtime.GOOS)\n\t\t\tgoto next\n\t\t}\n\t\tt.Fatal(err)\n\t}\n\tif v, err := c.TrafficClass(); err != nil {\n\t\tt.Fatal(err)\n\t} else if v != tclass {\n\t\tt.Fatalf(\"got %v; want %v\", v, tclass)\n\t}\n\nnext:\n\thoplim := 255\n\tif err := c.SetHopLimit(hoplim); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif v, err := c.HopLimit(); err != nil {\n\t\tt.Fatal(err)\n\t} else if v != hoplim {\n\t\tt.Fatalf(\"got %v; want %v\", v, hoplim)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_darwin.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_darwin.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_UNICAST_HOPS   = 0x4\n\tsysIPV6_MULTICAST_IF   = 0x9\n\tsysIPV6_MULTICAST_HOPS = 0xa\n\tsysIPV6_MULTICAST_LOOP = 0xb\n\tsysIPV6_JOIN_GROUP     = 0xc\n\tsysIPV6_LEAVE_GROUP    = 0xd\n\n\tsysIPV6_PORTRANGE    = 0xe\n\tsysICMP6_FILTER      = 0x12\n\tsysIPV6_2292PKTINFO  = 0x13\n\tsysIPV6_2292HOPLIMIT = 0x14\n\tsysIPV6_2292NEXTHOP  = 0x15\n\tsysIPV6_2292HOPOPTS  = 0x16\n\tsysIPV6_2292DSTOPTS  = 0x17\n\tsysIPV6_2292RTHDR    = 0x18\n\n\tsysIPV6_2292PKTOPTIONS = 0x19\n\n\tsysIPV6_CHECKSUM = 0x1a\n\tsysIPV6_V6ONLY   = 0x1b\n\n\tsysIPV6_IPSEC_POLICY = 0x1c\n\n\tsysIPV6_RECVTCLASS = 0x23\n\tsysIPV6_TCLASS     = 0x24\n\n\tsysIPV6_RTHDRDSTOPTS = 0x39\n\n\tsysIPV6_RECVPKTINFO = 0x3d\n\n\tsysIPV6_RECVHOPLIMIT = 0x25\n\tsysIPV6_RECVRTHDR    = 0x26\n\tsysIPV6_RECVHOPOPTS  = 0x27\n\tsysIPV6_RECVDSTOPTS  = 0x28\n\n\tsysIPV6_USE_MIN_MTU = 0x2a\n\tsysIPV6_RECVPATHMTU = 0x2b\n\n\tsysIPV6_PATHMTU = 0x2c\n\n\tsysIPV6_PKTINFO  = 0x2e\n\tsysIPV6_HOPLIMIT = 0x2f\n\tsysIPV6_NEXTHOP  = 0x30\n\tsysIPV6_HOPOPTS  = 0x31\n\tsysIPV6_DSTOPTS  = 0x32\n\tsysIPV6_RTHDR    = 0x33\n\n\tsysIPV6_AUTOFLOWLABEL = 0x3b\n\n\tsysIPV6_DONTFRAG = 0x3e\n\n\tsysIPV6_PREFER_TEMPADDR = 0x3f\n\n\tsysIPV6_MSFILTER            = 0x4a\n\tsysMCAST_JOIN_GROUP         = 0x50\n\tsysMCAST_LEAVE_GROUP        = 0x51\n\tsysMCAST_JOIN_SOURCE_GROUP  = 0x52\n\tsysMCAST_LEAVE_SOURCE_GROUP = 0x53\n\tsysMCAST_BLOCK_SOURCE       = 0x54\n\tsysMCAST_UNBLOCK_SOURCE     = 0x55\n\n\tsysIPV6_BOUND_IF = 0x7d\n\n\tsysIPV6_PORTRANGE_DEFAULT = 0x0\n\tsysIPV6_PORTRANGE_HIGH    = 0x1\n\tsysIPV6_PORTRANGE_LOW     = 0x2\n\n\tsizeofSockaddrStorage = 0x80\n\tsizeofSockaddrInet6   = 0x1c\n\tsizeofInet6Pktinfo    = 0x14\n\tsizeofIPv6Mtuinfo     = 0x20\n\n\tsizeofIPv6Mreq       = 0x14\n\tsizeofGroupReq       = 0x84\n\tsizeofGroupSourceReq = 0x104\n\n\tsizeofICMPv6Filter = 0x20\n)\n\ntype sockaddrStorage struct {\n\tLen         uint8\n\tFamily      uint8\n\tX__ss_pad1  [6]int8\n\tX__ss_align int64\n\tX__ss_pad2  [112]int8\n}\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex uint32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tInterface uint32\n}\n\ntype icmpv6Filter struct {\n\tFilt [8]uint32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [128]byte\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [128]byte\n\tPad_cgo_1 [128]byte\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_dragonfly.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_dragonfly.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_UNICAST_HOPS   = 0x4\n\tsysIPV6_MULTICAST_IF   = 0x9\n\tsysIPV6_MULTICAST_HOPS = 0xa\n\tsysIPV6_MULTICAST_LOOP = 0xb\n\tsysIPV6_JOIN_GROUP     = 0xc\n\tsysIPV6_LEAVE_GROUP    = 0xd\n\tsysIPV6_PORTRANGE      = 0xe\n\tsysICMP6_FILTER        = 0x12\n\n\tsysIPV6_CHECKSUM = 0x1a\n\tsysIPV6_V6ONLY   = 0x1b\n\n\tsysIPV6_IPSEC_POLICY = 0x1c\n\n\tsysIPV6_RTHDRDSTOPTS = 0x23\n\tsysIPV6_RECVPKTINFO  = 0x24\n\tsysIPV6_RECVHOPLIMIT = 0x25\n\tsysIPV6_RECVRTHDR    = 0x26\n\tsysIPV6_RECVHOPOPTS  = 0x27\n\tsysIPV6_RECVDSTOPTS  = 0x28\n\n\tsysIPV6_USE_MIN_MTU = 0x2a\n\tsysIPV6_RECVPATHMTU = 0x2b\n\n\tsysIPV6_PATHMTU = 0x2c\n\n\tsysIPV6_PKTINFO  = 0x2e\n\tsysIPV6_HOPLIMIT = 0x2f\n\tsysIPV6_NEXTHOP  = 0x30\n\tsysIPV6_HOPOPTS  = 0x31\n\tsysIPV6_DSTOPTS  = 0x32\n\tsysIPV6_RTHDR    = 0x33\n\n\tsysIPV6_RECVTCLASS = 0x39\n\n\tsysIPV6_AUTOFLOWLABEL = 0x3b\n\n\tsysIPV6_TCLASS   = 0x3d\n\tsysIPV6_DONTFRAG = 0x3e\n\n\tsysIPV6_PREFER_TEMPADDR = 0x3f\n\n\tsysIPV6_PORTRANGE_DEFAULT = 0x0\n\tsysIPV6_PORTRANGE_HIGH    = 0x1\n\tsysIPV6_PORTRANGE_LOW     = 0x2\n\n\tsizeofSockaddrInet6 = 0x1c\n\tsizeofInet6Pktinfo  = 0x14\n\tsizeofIPv6Mtuinfo   = 0x20\n\n\tsizeofIPv6Mreq = 0x14\n\n\tsizeofICMPv6Filter = 0x20\n)\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex uint32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tInterface uint32\n}\n\ntype icmpv6Filter struct {\n\tFilt [8]uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_freebsd.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_UNICAST_HOPS   = 0x4\n\tsysIPV6_MULTICAST_IF   = 0x9\n\tsysIPV6_MULTICAST_HOPS = 0xa\n\tsysIPV6_MULTICAST_LOOP = 0xb\n\tsysIPV6_JOIN_GROUP     = 0xc\n\tsysIPV6_LEAVE_GROUP    = 0xd\n\tsysIPV6_PORTRANGE      = 0xe\n\tsysICMP6_FILTER        = 0x12\n\n\tsysIPV6_CHECKSUM = 0x1a\n\tsysIPV6_V6ONLY   = 0x1b\n\n\tsysIPV6_IPSEC_POLICY = 0x1c\n\n\tsysIPV6_RTHDRDSTOPTS = 0x23\n\n\tsysIPV6_RECVPKTINFO  = 0x24\n\tsysIPV6_RECVHOPLIMIT = 0x25\n\tsysIPV6_RECVRTHDR    = 0x26\n\tsysIPV6_RECVHOPOPTS  = 0x27\n\tsysIPV6_RECVDSTOPTS  = 0x28\n\n\tsysIPV6_USE_MIN_MTU = 0x2a\n\tsysIPV6_RECVPATHMTU = 0x2b\n\n\tsysIPV6_PATHMTU = 0x2c\n\n\tsysIPV6_PKTINFO  = 0x2e\n\tsysIPV6_HOPLIMIT = 0x2f\n\tsysIPV6_NEXTHOP  = 0x30\n\tsysIPV6_HOPOPTS  = 0x31\n\tsysIPV6_DSTOPTS  = 0x32\n\tsysIPV6_RTHDR    = 0x33\n\n\tsysIPV6_RECVTCLASS = 0x39\n\n\tsysIPV6_AUTOFLOWLABEL = 0x3b\n\n\tsysIPV6_TCLASS   = 0x3d\n\tsysIPV6_DONTFRAG = 0x3e\n\n\tsysIPV6_PREFER_TEMPADDR = 0x3f\n\n\tsysIPV6_BINDANY = 0x40\n\n\tsysIPV6_MSFILTER = 0x4a\n\n\tsysMCAST_JOIN_GROUP         = 0x50\n\tsysMCAST_LEAVE_GROUP        = 0x51\n\tsysMCAST_JOIN_SOURCE_GROUP  = 0x52\n\tsysMCAST_LEAVE_SOURCE_GROUP = 0x53\n\tsysMCAST_BLOCK_SOURCE       = 0x54\n\tsysMCAST_UNBLOCK_SOURCE     = 0x55\n\n\tsysIPV6_PORTRANGE_DEFAULT = 0x0\n\tsysIPV6_PORTRANGE_HIGH    = 0x1\n\tsysIPV6_PORTRANGE_LOW     = 0x2\n\n\tsizeofSockaddrStorage = 0x80\n\tsizeofSockaddrInet6   = 0x1c\n\tsizeofInet6Pktinfo    = 0x14\n\tsizeofIPv6Mtuinfo     = 0x20\n\n\tsizeofIPv6Mreq       = 0x14\n\tsizeofGroupReq       = 0x84\n\tsizeofGroupSourceReq = 0x104\n\n\tsizeofICMPv6Filter = 0x20\n)\n\ntype sockaddrStorage struct {\n\tLen         uint8\n\tFamily      uint8\n\tX__ss_pad1  [6]int8\n\tX__ss_align int64\n\tX__ss_pad2  [112]int8\n}\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex uint32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tInterface uint32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tGroup     sockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tGroup     sockaddrStorage\n\tSource    sockaddrStorage\n}\n\ntype icmpv6Filter struct {\n\tFilt [8]uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_freebsd.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_UNICAST_HOPS   = 0x4\n\tsysIPV6_MULTICAST_IF   = 0x9\n\tsysIPV6_MULTICAST_HOPS = 0xa\n\tsysIPV6_MULTICAST_LOOP = 0xb\n\tsysIPV6_JOIN_GROUP     = 0xc\n\tsysIPV6_LEAVE_GROUP    = 0xd\n\tsysIPV6_PORTRANGE      = 0xe\n\tsysICMP6_FILTER        = 0x12\n\n\tsysIPV6_CHECKSUM = 0x1a\n\tsysIPV6_V6ONLY   = 0x1b\n\n\tsysIPV6_IPSEC_POLICY = 0x1c\n\n\tsysIPV6_RTHDRDSTOPTS = 0x23\n\n\tsysIPV6_RECVPKTINFO  = 0x24\n\tsysIPV6_RECVHOPLIMIT = 0x25\n\tsysIPV6_RECVRTHDR    = 0x26\n\tsysIPV6_RECVHOPOPTS  = 0x27\n\tsysIPV6_RECVDSTOPTS  = 0x28\n\n\tsysIPV6_USE_MIN_MTU = 0x2a\n\tsysIPV6_RECVPATHMTU = 0x2b\n\n\tsysIPV6_PATHMTU = 0x2c\n\n\tsysIPV6_PKTINFO  = 0x2e\n\tsysIPV6_HOPLIMIT = 0x2f\n\tsysIPV6_NEXTHOP  = 0x30\n\tsysIPV6_HOPOPTS  = 0x31\n\tsysIPV6_DSTOPTS  = 0x32\n\tsysIPV6_RTHDR    = 0x33\n\n\tsysIPV6_RECVTCLASS = 0x39\n\n\tsysIPV6_AUTOFLOWLABEL = 0x3b\n\n\tsysIPV6_TCLASS   = 0x3d\n\tsysIPV6_DONTFRAG = 0x3e\n\n\tsysIPV6_PREFER_TEMPADDR = 0x3f\n\n\tsysIPV6_BINDANY = 0x40\n\n\tsysIPV6_MSFILTER = 0x4a\n\n\tsysMCAST_JOIN_GROUP         = 0x50\n\tsysMCAST_LEAVE_GROUP        = 0x51\n\tsysMCAST_JOIN_SOURCE_GROUP  = 0x52\n\tsysMCAST_LEAVE_SOURCE_GROUP = 0x53\n\tsysMCAST_BLOCK_SOURCE       = 0x54\n\tsysMCAST_UNBLOCK_SOURCE     = 0x55\n\n\tsysIPV6_PORTRANGE_DEFAULT = 0x0\n\tsysIPV6_PORTRANGE_HIGH    = 0x1\n\tsysIPV6_PORTRANGE_LOW     = 0x2\n\n\tsizeofSockaddrStorage = 0x80\n\tsizeofSockaddrInet6   = 0x1c\n\tsizeofInet6Pktinfo    = 0x14\n\tsizeofIPv6Mtuinfo     = 0x20\n\n\tsizeofIPv6Mreq       = 0x14\n\tsizeofGroupReq       = 0x88\n\tsizeofGroupSourceReq = 0x108\n\n\tsizeofICMPv6Filter = 0x20\n)\n\ntype sockaddrStorage struct {\n\tLen         uint8\n\tFamily      uint8\n\tX__ss_pad1  [6]int8\n\tX__ss_align int64\n\tX__ss_pad2  [112]int8\n}\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex uint32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tInterface uint32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     sockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     sockaddrStorage\n\tSource    sockaddrStorage\n}\n\ntype icmpv6Filter struct {\n\tFilt [8]uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_freebsd.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_UNICAST_HOPS   = 0x4\n\tsysIPV6_MULTICAST_IF   = 0x9\n\tsysIPV6_MULTICAST_HOPS = 0xa\n\tsysIPV6_MULTICAST_LOOP = 0xb\n\tsysIPV6_JOIN_GROUP     = 0xc\n\tsysIPV6_LEAVE_GROUP    = 0xd\n\tsysIPV6_PORTRANGE      = 0xe\n\tsysICMP6_FILTER        = 0x12\n\n\tsysIPV6_CHECKSUM = 0x1a\n\tsysIPV6_V6ONLY   = 0x1b\n\n\tsysIPV6_IPSEC_POLICY = 0x1c\n\n\tsysIPV6_RTHDRDSTOPTS = 0x23\n\n\tsysIPV6_RECVPKTINFO  = 0x24\n\tsysIPV6_RECVHOPLIMIT = 0x25\n\tsysIPV6_RECVRTHDR    = 0x26\n\tsysIPV6_RECVHOPOPTS  = 0x27\n\tsysIPV6_RECVDSTOPTS  = 0x28\n\n\tsysIPV6_USE_MIN_MTU = 0x2a\n\tsysIPV6_RECVPATHMTU = 0x2b\n\n\tsysIPV6_PATHMTU = 0x2c\n\n\tsysIPV6_PKTINFO  = 0x2e\n\tsysIPV6_HOPLIMIT = 0x2f\n\tsysIPV6_NEXTHOP  = 0x30\n\tsysIPV6_HOPOPTS  = 0x31\n\tsysIPV6_DSTOPTS  = 0x32\n\tsysIPV6_RTHDR    = 0x33\n\n\tsysIPV6_RECVTCLASS = 0x39\n\n\tsysIPV6_AUTOFLOWLABEL = 0x3b\n\n\tsysIPV6_TCLASS   = 0x3d\n\tsysIPV6_DONTFRAG = 0x3e\n\n\tsysIPV6_PREFER_TEMPADDR = 0x3f\n\n\tsysIPV6_BINDANY = 0x40\n\n\tsysIPV6_MSFILTER = 0x4a\n\n\tsysMCAST_JOIN_GROUP         = 0x50\n\tsysMCAST_LEAVE_GROUP        = 0x51\n\tsysMCAST_JOIN_SOURCE_GROUP  = 0x52\n\tsysMCAST_LEAVE_SOURCE_GROUP = 0x53\n\tsysMCAST_BLOCK_SOURCE       = 0x54\n\tsysMCAST_UNBLOCK_SOURCE     = 0x55\n\n\tsysIPV6_PORTRANGE_DEFAULT = 0x0\n\tsysIPV6_PORTRANGE_HIGH    = 0x1\n\tsysIPV6_PORTRANGE_LOW     = 0x2\n\n\tsizeofSockaddrStorage = 0x80\n\tsizeofSockaddrInet6   = 0x1c\n\tsizeofInet6Pktinfo    = 0x14\n\tsizeofIPv6Mtuinfo     = 0x20\n\n\tsizeofIPv6Mreq       = 0x14\n\tsizeofGroupReq       = 0x88\n\tsizeofGroupSourceReq = 0x108\n\n\tsizeofICMPv6Filter = 0x20\n)\n\ntype sockaddrStorage struct {\n\tLen         uint8\n\tFamily      uint8\n\tX__ss_pad1  [6]int8\n\tX__ss_align int64\n\tX__ss_pad2  [112]int8\n}\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex uint32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tInterface uint32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     sockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     sockaddrStorage\n\tSource    sockaddrStorage\n}\n\ntype icmpv6Filter struct {\n\tFilt [8]uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_linux_386.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_ADDRFORM       = 0x1\n\tsysIPV6_2292PKTINFO    = 0x2\n\tsysIPV6_2292HOPOPTS    = 0x3\n\tsysIPV6_2292DSTOPTS    = 0x4\n\tsysIPV6_2292RTHDR      = 0x5\n\tsysIPV6_2292PKTOPTIONS = 0x6\n\tsysIPV6_CHECKSUM       = 0x7\n\tsysIPV6_2292HOPLIMIT   = 0x8\n\tsysIPV6_NEXTHOP        = 0x9\n\tsysIPV6_FLOWINFO       = 0xb\n\n\tsysIPV6_UNICAST_HOPS        = 0x10\n\tsysIPV6_MULTICAST_IF        = 0x11\n\tsysIPV6_MULTICAST_HOPS      = 0x12\n\tsysIPV6_MULTICAST_LOOP      = 0x13\n\tsysIPV6_ADD_MEMBERSHIP      = 0x14\n\tsysIPV6_DROP_MEMBERSHIP     = 0x15\n\tsysMCAST_JOIN_GROUP         = 0x2a\n\tsysMCAST_LEAVE_GROUP        = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP  = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP = 0x2f\n\tsysMCAST_BLOCK_SOURCE       = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE     = 0x2c\n\tsysMCAST_MSFILTER           = 0x30\n\tsysIPV6_ROUTER_ALERT        = 0x16\n\tsysIPV6_MTU_DISCOVER        = 0x17\n\tsysIPV6_MTU                 = 0x18\n\tsysIPV6_RECVERR             = 0x19\n\tsysIPV6_V6ONLY              = 0x1a\n\tsysIPV6_JOIN_ANYCAST        = 0x1b\n\tsysIPV6_LEAVE_ANYCAST       = 0x1c\n\n\tsysIPV6_FLOWLABEL_MGR = 0x20\n\tsysIPV6_FLOWINFO_SEND = 0x21\n\n\tsysIPV6_IPSEC_POLICY = 0x22\n\tsysIPV6_XFRM_POLICY  = 0x23\n\n\tsysIPV6_RECVPKTINFO  = 0x31\n\tsysIPV6_PKTINFO      = 0x32\n\tsysIPV6_RECVHOPLIMIT = 0x33\n\tsysIPV6_HOPLIMIT     = 0x34\n\tsysIPV6_RECVHOPOPTS  = 0x35\n\tsysIPV6_HOPOPTS      = 0x36\n\tsysIPV6_RTHDRDSTOPTS = 0x37\n\tsysIPV6_RECVRTHDR    = 0x38\n\tsysIPV6_RTHDR        = 0x39\n\tsysIPV6_RECVDSTOPTS  = 0x3a\n\tsysIPV6_DSTOPTS      = 0x3b\n\tsysIPV6_RECVPATHMTU  = 0x3c\n\tsysIPV6_PATHMTU      = 0x3d\n\tsysIPV6_DONTFRAG     = 0x3e\n\n\tsysIPV6_RECVTCLASS = 0x42\n\tsysIPV6_TCLASS     = 0x43\n\n\tsysIPV6_ADDR_PREFERENCES = 0x48\n\n\tsysIPV6_PREFER_SRC_TMP            = 0x1\n\tsysIPV6_PREFER_SRC_PUBLIC         = 0x2\n\tsysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100\n\tsysIPV6_PREFER_SRC_COA            = 0x4\n\tsysIPV6_PREFER_SRC_HOME           = 0x400\n\tsysIPV6_PREFER_SRC_CGA            = 0x8\n\tsysIPV6_PREFER_SRC_NONCGA         = 0x800\n\n\tsysIPV6_MINHOPCOUNT = 0x49\n\n\tsysIPV6_ORIGDSTADDR     = 0x4a\n\tsysIPV6_RECVORIGDSTADDR = 0x4a\n\tsysIPV6_TRANSPARENT     = 0x4b\n\tsysIPV6_UNICAST_IF      = 0x4c\n\n\tsysICMPV6_FILTER = 0x1\n\n\tsysICMPV6_FILTER_BLOCK       = 0x1\n\tsysICMPV6_FILTER_PASS        = 0x2\n\tsysICMPV6_FILTER_BLOCKOTHERS = 0x3\n\tsysICMPV6_FILTER_PASSONLY    = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet6         = 0x1c\n\tsizeofInet6Pktinfo          = 0x14\n\tsizeofIPv6Mtuinfo           = 0x20\n\tsizeofIPv6FlowlabelReq      = 0x20\n\n\tsizeofIPv6Mreq       = 0x14\n\tsizeofGroupReq       = 0x84\n\tsizeofGroupSourceReq = 0x104\n\n\tsizeofICMPv6Filter = 0x20\n\n\tsizeofSockFprog = 0x8\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex int32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6FlowlabelReq struct {\n\tDst        [16]byte /* in6_addr */\n\tLabel      uint32\n\tAction     uint8\n\tShare      uint8\n\tFlags      uint16\n\tExpires    uint16\n\tLinger     uint16\n\tX__flr_pad uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tIfindex   int32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpv6Filter struct {\n\tData [8]uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [2]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_ADDRFORM       = 0x1\n\tsysIPV6_2292PKTINFO    = 0x2\n\tsysIPV6_2292HOPOPTS    = 0x3\n\tsysIPV6_2292DSTOPTS    = 0x4\n\tsysIPV6_2292RTHDR      = 0x5\n\tsysIPV6_2292PKTOPTIONS = 0x6\n\tsysIPV6_CHECKSUM       = 0x7\n\tsysIPV6_2292HOPLIMIT   = 0x8\n\tsysIPV6_NEXTHOP        = 0x9\n\tsysIPV6_FLOWINFO       = 0xb\n\n\tsysIPV6_UNICAST_HOPS        = 0x10\n\tsysIPV6_MULTICAST_IF        = 0x11\n\tsysIPV6_MULTICAST_HOPS      = 0x12\n\tsysIPV6_MULTICAST_LOOP      = 0x13\n\tsysIPV6_ADD_MEMBERSHIP      = 0x14\n\tsysIPV6_DROP_MEMBERSHIP     = 0x15\n\tsysMCAST_JOIN_GROUP         = 0x2a\n\tsysMCAST_LEAVE_GROUP        = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP  = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP = 0x2f\n\tsysMCAST_BLOCK_SOURCE       = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE     = 0x2c\n\tsysMCAST_MSFILTER           = 0x30\n\tsysIPV6_ROUTER_ALERT        = 0x16\n\tsysIPV6_MTU_DISCOVER        = 0x17\n\tsysIPV6_MTU                 = 0x18\n\tsysIPV6_RECVERR             = 0x19\n\tsysIPV6_V6ONLY              = 0x1a\n\tsysIPV6_JOIN_ANYCAST        = 0x1b\n\tsysIPV6_LEAVE_ANYCAST       = 0x1c\n\n\tsysIPV6_FLOWLABEL_MGR = 0x20\n\tsysIPV6_FLOWINFO_SEND = 0x21\n\n\tsysIPV6_IPSEC_POLICY = 0x22\n\tsysIPV6_XFRM_POLICY  = 0x23\n\n\tsysIPV6_RECVPKTINFO  = 0x31\n\tsysIPV6_PKTINFO      = 0x32\n\tsysIPV6_RECVHOPLIMIT = 0x33\n\tsysIPV6_HOPLIMIT     = 0x34\n\tsysIPV6_RECVHOPOPTS  = 0x35\n\tsysIPV6_HOPOPTS      = 0x36\n\tsysIPV6_RTHDRDSTOPTS = 0x37\n\tsysIPV6_RECVRTHDR    = 0x38\n\tsysIPV6_RTHDR        = 0x39\n\tsysIPV6_RECVDSTOPTS  = 0x3a\n\tsysIPV6_DSTOPTS      = 0x3b\n\tsysIPV6_RECVPATHMTU  = 0x3c\n\tsysIPV6_PATHMTU      = 0x3d\n\tsysIPV6_DONTFRAG     = 0x3e\n\n\tsysIPV6_RECVTCLASS = 0x42\n\tsysIPV6_TCLASS     = 0x43\n\n\tsysIPV6_ADDR_PREFERENCES = 0x48\n\n\tsysIPV6_PREFER_SRC_TMP            = 0x1\n\tsysIPV6_PREFER_SRC_PUBLIC         = 0x2\n\tsysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100\n\tsysIPV6_PREFER_SRC_COA            = 0x4\n\tsysIPV6_PREFER_SRC_HOME           = 0x400\n\tsysIPV6_PREFER_SRC_CGA            = 0x8\n\tsysIPV6_PREFER_SRC_NONCGA         = 0x800\n\n\tsysIPV6_MINHOPCOUNT = 0x49\n\n\tsysIPV6_ORIGDSTADDR     = 0x4a\n\tsysIPV6_RECVORIGDSTADDR = 0x4a\n\tsysIPV6_TRANSPARENT     = 0x4b\n\tsysIPV6_UNICAST_IF      = 0x4c\n\n\tsysICMPV6_FILTER = 0x1\n\n\tsysICMPV6_FILTER_BLOCK       = 0x1\n\tsysICMPV6_FILTER_PASS        = 0x2\n\tsysICMPV6_FILTER_BLOCKOTHERS = 0x3\n\tsysICMPV6_FILTER_PASSONLY    = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet6         = 0x1c\n\tsizeofInet6Pktinfo          = 0x14\n\tsizeofIPv6Mtuinfo           = 0x20\n\tsizeofIPv6FlowlabelReq      = 0x20\n\n\tsizeofIPv6Mreq       = 0x14\n\tsizeofGroupReq       = 0x88\n\tsizeofGroupSourceReq = 0x108\n\n\tsizeofICMPv6Filter = 0x20\n\n\tsizeofSockFprog = 0x10\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex int32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6FlowlabelReq struct {\n\tDst        [16]byte /* in6_addr */\n\tLabel      uint32\n\tAction     uint8\n\tShare      uint8\n\tFlags      uint16\n\tExpires    uint16\n\tLinger     uint16\n\tX__flr_pad uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tIfindex   int32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpv6Filter struct {\n\tData [8]uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [6]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_linux_arm.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_ADDRFORM       = 0x1\n\tsysIPV6_2292PKTINFO    = 0x2\n\tsysIPV6_2292HOPOPTS    = 0x3\n\tsysIPV6_2292DSTOPTS    = 0x4\n\tsysIPV6_2292RTHDR      = 0x5\n\tsysIPV6_2292PKTOPTIONS = 0x6\n\tsysIPV6_CHECKSUM       = 0x7\n\tsysIPV6_2292HOPLIMIT   = 0x8\n\tsysIPV6_NEXTHOP        = 0x9\n\tsysIPV6_FLOWINFO       = 0xb\n\n\tsysIPV6_UNICAST_HOPS        = 0x10\n\tsysIPV6_MULTICAST_IF        = 0x11\n\tsysIPV6_MULTICAST_HOPS      = 0x12\n\tsysIPV6_MULTICAST_LOOP      = 0x13\n\tsysIPV6_ADD_MEMBERSHIP      = 0x14\n\tsysIPV6_DROP_MEMBERSHIP     = 0x15\n\tsysMCAST_JOIN_GROUP         = 0x2a\n\tsysMCAST_LEAVE_GROUP        = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP  = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP = 0x2f\n\tsysMCAST_BLOCK_SOURCE       = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE     = 0x2c\n\tsysMCAST_MSFILTER           = 0x30\n\tsysIPV6_ROUTER_ALERT        = 0x16\n\tsysIPV6_MTU_DISCOVER        = 0x17\n\tsysIPV6_MTU                 = 0x18\n\tsysIPV6_RECVERR             = 0x19\n\tsysIPV6_V6ONLY              = 0x1a\n\tsysIPV6_JOIN_ANYCAST        = 0x1b\n\tsysIPV6_LEAVE_ANYCAST       = 0x1c\n\n\tsysIPV6_FLOWLABEL_MGR = 0x20\n\tsysIPV6_FLOWINFO_SEND = 0x21\n\n\tsysIPV6_IPSEC_POLICY = 0x22\n\tsysIPV6_XFRM_POLICY  = 0x23\n\n\tsysIPV6_RECVPKTINFO  = 0x31\n\tsysIPV6_PKTINFO      = 0x32\n\tsysIPV6_RECVHOPLIMIT = 0x33\n\tsysIPV6_HOPLIMIT     = 0x34\n\tsysIPV6_RECVHOPOPTS  = 0x35\n\tsysIPV6_HOPOPTS      = 0x36\n\tsysIPV6_RTHDRDSTOPTS = 0x37\n\tsysIPV6_RECVRTHDR    = 0x38\n\tsysIPV6_RTHDR        = 0x39\n\tsysIPV6_RECVDSTOPTS  = 0x3a\n\tsysIPV6_DSTOPTS      = 0x3b\n\tsysIPV6_RECVPATHMTU  = 0x3c\n\tsysIPV6_PATHMTU      = 0x3d\n\tsysIPV6_DONTFRAG     = 0x3e\n\n\tsysIPV6_RECVTCLASS = 0x42\n\tsysIPV6_TCLASS     = 0x43\n\n\tsysIPV6_ADDR_PREFERENCES = 0x48\n\n\tsysIPV6_PREFER_SRC_TMP            = 0x1\n\tsysIPV6_PREFER_SRC_PUBLIC         = 0x2\n\tsysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100\n\tsysIPV6_PREFER_SRC_COA            = 0x4\n\tsysIPV6_PREFER_SRC_HOME           = 0x400\n\tsysIPV6_PREFER_SRC_CGA            = 0x8\n\tsysIPV6_PREFER_SRC_NONCGA         = 0x800\n\n\tsysIPV6_MINHOPCOUNT = 0x49\n\n\tsysIPV6_ORIGDSTADDR     = 0x4a\n\tsysIPV6_RECVORIGDSTADDR = 0x4a\n\tsysIPV6_TRANSPARENT     = 0x4b\n\tsysIPV6_UNICAST_IF      = 0x4c\n\n\tsysICMPV6_FILTER = 0x1\n\n\tsysICMPV6_FILTER_BLOCK       = 0x1\n\tsysICMPV6_FILTER_PASS        = 0x2\n\tsysICMPV6_FILTER_BLOCKOTHERS = 0x3\n\tsysICMPV6_FILTER_PASSONLY    = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet6         = 0x1c\n\tsizeofInet6Pktinfo          = 0x14\n\tsizeofIPv6Mtuinfo           = 0x20\n\tsizeofIPv6FlowlabelReq      = 0x20\n\n\tsizeofIPv6Mreq       = 0x14\n\tsizeofGroupReq       = 0x84\n\tsizeofGroupSourceReq = 0x104\n\n\tsizeofICMPv6Filter = 0x20\n\n\tsizeofSockFprog = 0x8\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex int32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6FlowlabelReq struct {\n\tDst        [16]byte /* in6_addr */\n\tLabel      uint32\n\tAction     uint8\n\tShare      uint8\n\tFlags      uint16\n\tExpires    uint16\n\tLinger     uint16\n\tX__flr_pad uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tIfindex   int32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpv6Filter struct {\n\tData [8]uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [2]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_ADDRFORM       = 0x1\n\tsysIPV6_2292PKTINFO    = 0x2\n\tsysIPV6_2292HOPOPTS    = 0x3\n\tsysIPV6_2292DSTOPTS    = 0x4\n\tsysIPV6_2292RTHDR      = 0x5\n\tsysIPV6_2292PKTOPTIONS = 0x6\n\tsysIPV6_CHECKSUM       = 0x7\n\tsysIPV6_2292HOPLIMIT   = 0x8\n\tsysIPV6_NEXTHOP        = 0x9\n\tsysIPV6_FLOWINFO       = 0xb\n\n\tsysIPV6_UNICAST_HOPS        = 0x10\n\tsysIPV6_MULTICAST_IF        = 0x11\n\tsysIPV6_MULTICAST_HOPS      = 0x12\n\tsysIPV6_MULTICAST_LOOP      = 0x13\n\tsysIPV6_ADD_MEMBERSHIP      = 0x14\n\tsysIPV6_DROP_MEMBERSHIP     = 0x15\n\tsysMCAST_JOIN_GROUP         = 0x2a\n\tsysMCAST_LEAVE_GROUP        = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP  = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP = 0x2f\n\tsysMCAST_BLOCK_SOURCE       = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE     = 0x2c\n\tsysMCAST_MSFILTER           = 0x30\n\tsysIPV6_ROUTER_ALERT        = 0x16\n\tsysIPV6_MTU_DISCOVER        = 0x17\n\tsysIPV6_MTU                 = 0x18\n\tsysIPV6_RECVERR             = 0x19\n\tsysIPV6_V6ONLY              = 0x1a\n\tsysIPV6_JOIN_ANYCAST        = 0x1b\n\tsysIPV6_LEAVE_ANYCAST       = 0x1c\n\n\tsysIPV6_FLOWLABEL_MGR = 0x20\n\tsysIPV6_FLOWINFO_SEND = 0x21\n\n\tsysIPV6_IPSEC_POLICY = 0x22\n\tsysIPV6_XFRM_POLICY  = 0x23\n\n\tsysIPV6_RECVPKTINFO  = 0x31\n\tsysIPV6_PKTINFO      = 0x32\n\tsysIPV6_RECVHOPLIMIT = 0x33\n\tsysIPV6_HOPLIMIT     = 0x34\n\tsysIPV6_RECVHOPOPTS  = 0x35\n\tsysIPV6_HOPOPTS      = 0x36\n\tsysIPV6_RTHDRDSTOPTS = 0x37\n\tsysIPV6_RECVRTHDR    = 0x38\n\tsysIPV6_RTHDR        = 0x39\n\tsysIPV6_RECVDSTOPTS  = 0x3a\n\tsysIPV6_DSTOPTS      = 0x3b\n\tsysIPV6_RECVPATHMTU  = 0x3c\n\tsysIPV6_PATHMTU      = 0x3d\n\tsysIPV6_DONTFRAG     = 0x3e\n\n\tsysIPV6_RECVTCLASS = 0x42\n\tsysIPV6_TCLASS     = 0x43\n\n\tsysIPV6_ADDR_PREFERENCES = 0x48\n\n\tsysIPV6_PREFER_SRC_TMP            = 0x1\n\tsysIPV6_PREFER_SRC_PUBLIC         = 0x2\n\tsysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100\n\tsysIPV6_PREFER_SRC_COA            = 0x4\n\tsysIPV6_PREFER_SRC_HOME           = 0x400\n\tsysIPV6_PREFER_SRC_CGA            = 0x8\n\tsysIPV6_PREFER_SRC_NONCGA         = 0x800\n\n\tsysIPV6_MINHOPCOUNT = 0x49\n\n\tsysIPV6_ORIGDSTADDR     = 0x4a\n\tsysIPV6_RECVORIGDSTADDR = 0x4a\n\tsysIPV6_TRANSPARENT     = 0x4b\n\tsysIPV6_UNICAST_IF      = 0x4c\n\n\tsysICMPV6_FILTER = 0x1\n\n\tsysICMPV6_FILTER_BLOCK       = 0x1\n\tsysICMPV6_FILTER_PASS        = 0x2\n\tsysICMPV6_FILTER_BLOCKOTHERS = 0x3\n\tsysICMPV6_FILTER_PASSONLY    = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet6         = 0x1c\n\tsizeofInet6Pktinfo          = 0x14\n\tsizeofIPv6Mtuinfo           = 0x20\n\tsizeofIPv6FlowlabelReq      = 0x20\n\n\tsizeofIPv6Mreq       = 0x14\n\tsizeofGroupReq       = 0x88\n\tsizeofGroupSourceReq = 0x108\n\n\tsizeofICMPv6Filter = 0x20\n\n\tsizeofSockFprog = 0x10\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex int32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6FlowlabelReq struct {\n\tDst        [16]byte /* in6_addr */\n\tLabel      uint32\n\tAction     uint8\n\tShare      uint8\n\tFlags      uint16\n\tExpires    uint16\n\tLinger     uint16\n\tX__flr_pad uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tIfindex   int32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpv6Filter struct {\n\tData [8]uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [6]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_linux_mips.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_ADDRFORM       = 0x1\n\tsysIPV6_2292PKTINFO    = 0x2\n\tsysIPV6_2292HOPOPTS    = 0x3\n\tsysIPV6_2292DSTOPTS    = 0x4\n\tsysIPV6_2292RTHDR      = 0x5\n\tsysIPV6_2292PKTOPTIONS = 0x6\n\tsysIPV6_CHECKSUM       = 0x7\n\tsysIPV6_2292HOPLIMIT   = 0x8\n\tsysIPV6_NEXTHOP        = 0x9\n\tsysIPV6_FLOWINFO       = 0xb\n\n\tsysIPV6_UNICAST_HOPS        = 0x10\n\tsysIPV6_MULTICAST_IF        = 0x11\n\tsysIPV6_MULTICAST_HOPS      = 0x12\n\tsysIPV6_MULTICAST_LOOP      = 0x13\n\tsysIPV6_ADD_MEMBERSHIP      = 0x14\n\tsysIPV6_DROP_MEMBERSHIP     = 0x15\n\tsysMCAST_JOIN_GROUP         = 0x2a\n\tsysMCAST_LEAVE_GROUP        = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP  = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP = 0x2f\n\tsysMCAST_BLOCK_SOURCE       = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE     = 0x2c\n\tsysMCAST_MSFILTER           = 0x30\n\tsysIPV6_ROUTER_ALERT        = 0x16\n\tsysIPV6_MTU_DISCOVER        = 0x17\n\tsysIPV6_MTU                 = 0x18\n\tsysIPV6_RECVERR             = 0x19\n\tsysIPV6_V6ONLY              = 0x1a\n\tsysIPV6_JOIN_ANYCAST        = 0x1b\n\tsysIPV6_LEAVE_ANYCAST       = 0x1c\n\n\tsysIPV6_FLOWLABEL_MGR = 0x20\n\tsysIPV6_FLOWINFO_SEND = 0x21\n\n\tsysIPV6_IPSEC_POLICY = 0x22\n\tsysIPV6_XFRM_POLICY  = 0x23\n\n\tsysIPV6_RECVPKTINFO  = 0x31\n\tsysIPV6_PKTINFO      = 0x32\n\tsysIPV6_RECVHOPLIMIT = 0x33\n\tsysIPV6_HOPLIMIT     = 0x34\n\tsysIPV6_RECVHOPOPTS  = 0x35\n\tsysIPV6_HOPOPTS      = 0x36\n\tsysIPV6_RTHDRDSTOPTS = 0x37\n\tsysIPV6_RECVRTHDR    = 0x38\n\tsysIPV6_RTHDR        = 0x39\n\tsysIPV6_RECVDSTOPTS  = 0x3a\n\tsysIPV6_DSTOPTS      = 0x3b\n\tsysIPV6_RECVPATHMTU  = 0x3c\n\tsysIPV6_PATHMTU      = 0x3d\n\tsysIPV6_DONTFRAG     = 0x3e\n\n\tsysIPV6_RECVTCLASS = 0x42\n\tsysIPV6_TCLASS     = 0x43\n\n\tsysIPV6_ADDR_PREFERENCES = 0x48\n\n\tsysIPV6_PREFER_SRC_TMP            = 0x1\n\tsysIPV6_PREFER_SRC_PUBLIC         = 0x2\n\tsysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100\n\tsysIPV6_PREFER_SRC_COA            = 0x4\n\tsysIPV6_PREFER_SRC_HOME           = 0x400\n\tsysIPV6_PREFER_SRC_CGA            = 0x8\n\tsysIPV6_PREFER_SRC_NONCGA         = 0x800\n\n\tsysIPV6_MINHOPCOUNT = 0x49\n\n\tsysIPV6_ORIGDSTADDR     = 0x4a\n\tsysIPV6_RECVORIGDSTADDR = 0x4a\n\tsysIPV6_TRANSPARENT     = 0x4b\n\tsysIPV6_UNICAST_IF      = 0x4c\n\n\tsysICMPV6_FILTER = 0x1\n\n\tsysICMPV6_FILTER_BLOCK       = 0x1\n\tsysICMPV6_FILTER_PASS        = 0x2\n\tsysICMPV6_FILTER_BLOCKOTHERS = 0x3\n\tsysICMPV6_FILTER_PASSONLY    = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet6         = 0x1c\n\tsizeofInet6Pktinfo          = 0x14\n\tsizeofIPv6Mtuinfo           = 0x20\n\tsizeofIPv6FlowlabelReq      = 0x20\n\n\tsizeofIPv6Mreq       = 0x14\n\tsizeofGroupReq       = 0x84\n\tsizeofGroupSourceReq = 0x104\n\n\tsizeofICMPv6Filter = 0x20\n\n\tsizeofSockFprog = 0x8\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex int32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6FlowlabelReq struct {\n\tDst        [16]byte /* in6_addr */\n\tLabel      uint32\n\tAction     uint8\n\tShare      uint8\n\tFlags      uint16\n\tExpires    uint16\n\tLinger     uint16\n\tX__flr_pad uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tIfindex   int32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpv6Filter struct {\n\tData [8]uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [2]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_ADDRFORM       = 0x1\n\tsysIPV6_2292PKTINFO    = 0x2\n\tsysIPV6_2292HOPOPTS    = 0x3\n\tsysIPV6_2292DSTOPTS    = 0x4\n\tsysIPV6_2292RTHDR      = 0x5\n\tsysIPV6_2292PKTOPTIONS = 0x6\n\tsysIPV6_CHECKSUM       = 0x7\n\tsysIPV6_2292HOPLIMIT   = 0x8\n\tsysIPV6_NEXTHOP        = 0x9\n\tsysIPV6_FLOWINFO       = 0xb\n\n\tsysIPV6_UNICAST_HOPS        = 0x10\n\tsysIPV6_MULTICAST_IF        = 0x11\n\tsysIPV6_MULTICAST_HOPS      = 0x12\n\tsysIPV6_MULTICAST_LOOP      = 0x13\n\tsysIPV6_ADD_MEMBERSHIP      = 0x14\n\tsysIPV6_DROP_MEMBERSHIP     = 0x15\n\tsysMCAST_JOIN_GROUP         = 0x2a\n\tsysMCAST_LEAVE_GROUP        = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP  = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP = 0x2f\n\tsysMCAST_BLOCK_SOURCE       = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE     = 0x2c\n\tsysMCAST_MSFILTER           = 0x30\n\tsysIPV6_ROUTER_ALERT        = 0x16\n\tsysIPV6_MTU_DISCOVER        = 0x17\n\tsysIPV6_MTU                 = 0x18\n\tsysIPV6_RECVERR             = 0x19\n\tsysIPV6_V6ONLY              = 0x1a\n\tsysIPV6_JOIN_ANYCAST        = 0x1b\n\tsysIPV6_LEAVE_ANYCAST       = 0x1c\n\n\tsysIPV6_FLOWLABEL_MGR = 0x20\n\tsysIPV6_FLOWINFO_SEND = 0x21\n\n\tsysIPV6_IPSEC_POLICY = 0x22\n\tsysIPV6_XFRM_POLICY  = 0x23\n\n\tsysIPV6_RECVPKTINFO  = 0x31\n\tsysIPV6_PKTINFO      = 0x32\n\tsysIPV6_RECVHOPLIMIT = 0x33\n\tsysIPV6_HOPLIMIT     = 0x34\n\tsysIPV6_RECVHOPOPTS  = 0x35\n\tsysIPV6_HOPOPTS      = 0x36\n\tsysIPV6_RTHDRDSTOPTS = 0x37\n\tsysIPV6_RECVRTHDR    = 0x38\n\tsysIPV6_RTHDR        = 0x39\n\tsysIPV6_RECVDSTOPTS  = 0x3a\n\tsysIPV6_DSTOPTS      = 0x3b\n\tsysIPV6_RECVPATHMTU  = 0x3c\n\tsysIPV6_PATHMTU      = 0x3d\n\tsysIPV6_DONTFRAG     = 0x3e\n\n\tsysIPV6_RECVTCLASS = 0x42\n\tsysIPV6_TCLASS     = 0x43\n\n\tsysIPV6_ADDR_PREFERENCES = 0x48\n\n\tsysIPV6_PREFER_SRC_TMP            = 0x1\n\tsysIPV6_PREFER_SRC_PUBLIC         = 0x2\n\tsysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100\n\tsysIPV6_PREFER_SRC_COA            = 0x4\n\tsysIPV6_PREFER_SRC_HOME           = 0x400\n\tsysIPV6_PREFER_SRC_CGA            = 0x8\n\tsysIPV6_PREFER_SRC_NONCGA         = 0x800\n\n\tsysIPV6_MINHOPCOUNT = 0x49\n\n\tsysIPV6_ORIGDSTADDR     = 0x4a\n\tsysIPV6_RECVORIGDSTADDR = 0x4a\n\tsysIPV6_TRANSPARENT     = 0x4b\n\tsysIPV6_UNICAST_IF      = 0x4c\n\n\tsysICMPV6_FILTER = 0x1\n\n\tsysICMPV6_FILTER_BLOCK       = 0x1\n\tsysICMPV6_FILTER_PASS        = 0x2\n\tsysICMPV6_FILTER_BLOCKOTHERS = 0x3\n\tsysICMPV6_FILTER_PASSONLY    = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet6         = 0x1c\n\tsizeofInet6Pktinfo          = 0x14\n\tsizeofIPv6Mtuinfo           = 0x20\n\tsizeofIPv6FlowlabelReq      = 0x20\n\n\tsizeofIPv6Mreq       = 0x14\n\tsizeofGroupReq       = 0x88\n\tsizeofGroupSourceReq = 0x108\n\n\tsizeofICMPv6Filter = 0x20\n\n\tsizeofSockFprog = 0x10\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex int32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6FlowlabelReq struct {\n\tDst        [16]byte /* in6_addr */\n\tLabel      uint32\n\tAction     uint8\n\tShare      uint8\n\tFlags      uint16\n\tExpires    uint16\n\tLinger     uint16\n\tX__flr_pad uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tIfindex   int32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpv6Filter struct {\n\tData [8]uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [6]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_ADDRFORM       = 0x1\n\tsysIPV6_2292PKTINFO    = 0x2\n\tsysIPV6_2292HOPOPTS    = 0x3\n\tsysIPV6_2292DSTOPTS    = 0x4\n\tsysIPV6_2292RTHDR      = 0x5\n\tsysIPV6_2292PKTOPTIONS = 0x6\n\tsysIPV6_CHECKSUM       = 0x7\n\tsysIPV6_2292HOPLIMIT   = 0x8\n\tsysIPV6_NEXTHOP        = 0x9\n\tsysIPV6_FLOWINFO       = 0xb\n\n\tsysIPV6_UNICAST_HOPS        = 0x10\n\tsysIPV6_MULTICAST_IF        = 0x11\n\tsysIPV6_MULTICAST_HOPS      = 0x12\n\tsysIPV6_MULTICAST_LOOP      = 0x13\n\tsysIPV6_ADD_MEMBERSHIP      = 0x14\n\tsysIPV6_DROP_MEMBERSHIP     = 0x15\n\tsysMCAST_JOIN_GROUP         = 0x2a\n\tsysMCAST_LEAVE_GROUP        = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP  = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP = 0x2f\n\tsysMCAST_BLOCK_SOURCE       = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE     = 0x2c\n\tsysMCAST_MSFILTER           = 0x30\n\tsysIPV6_ROUTER_ALERT        = 0x16\n\tsysIPV6_MTU_DISCOVER        = 0x17\n\tsysIPV6_MTU                 = 0x18\n\tsysIPV6_RECVERR             = 0x19\n\tsysIPV6_V6ONLY              = 0x1a\n\tsysIPV6_JOIN_ANYCAST        = 0x1b\n\tsysIPV6_LEAVE_ANYCAST       = 0x1c\n\n\tsysIPV6_FLOWLABEL_MGR = 0x20\n\tsysIPV6_FLOWINFO_SEND = 0x21\n\n\tsysIPV6_IPSEC_POLICY = 0x22\n\tsysIPV6_XFRM_POLICY  = 0x23\n\n\tsysIPV6_RECVPKTINFO  = 0x31\n\tsysIPV6_PKTINFO      = 0x32\n\tsysIPV6_RECVHOPLIMIT = 0x33\n\tsysIPV6_HOPLIMIT     = 0x34\n\tsysIPV6_RECVHOPOPTS  = 0x35\n\tsysIPV6_HOPOPTS      = 0x36\n\tsysIPV6_RTHDRDSTOPTS = 0x37\n\tsysIPV6_RECVRTHDR    = 0x38\n\tsysIPV6_RTHDR        = 0x39\n\tsysIPV6_RECVDSTOPTS  = 0x3a\n\tsysIPV6_DSTOPTS      = 0x3b\n\tsysIPV6_RECVPATHMTU  = 0x3c\n\tsysIPV6_PATHMTU      = 0x3d\n\tsysIPV6_DONTFRAG     = 0x3e\n\n\tsysIPV6_RECVTCLASS = 0x42\n\tsysIPV6_TCLASS     = 0x43\n\n\tsysIPV6_ADDR_PREFERENCES = 0x48\n\n\tsysIPV6_PREFER_SRC_TMP            = 0x1\n\tsysIPV6_PREFER_SRC_PUBLIC         = 0x2\n\tsysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100\n\tsysIPV6_PREFER_SRC_COA            = 0x4\n\tsysIPV6_PREFER_SRC_HOME           = 0x400\n\tsysIPV6_PREFER_SRC_CGA            = 0x8\n\tsysIPV6_PREFER_SRC_NONCGA         = 0x800\n\n\tsysIPV6_MINHOPCOUNT = 0x49\n\n\tsysIPV6_ORIGDSTADDR     = 0x4a\n\tsysIPV6_RECVORIGDSTADDR = 0x4a\n\tsysIPV6_TRANSPARENT     = 0x4b\n\tsysIPV6_UNICAST_IF      = 0x4c\n\n\tsysICMPV6_FILTER = 0x1\n\n\tsysICMPV6_FILTER_BLOCK       = 0x1\n\tsysICMPV6_FILTER_PASS        = 0x2\n\tsysICMPV6_FILTER_BLOCKOTHERS = 0x3\n\tsysICMPV6_FILTER_PASSONLY    = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet6         = 0x1c\n\tsizeofInet6Pktinfo          = 0x14\n\tsizeofIPv6Mtuinfo           = 0x20\n\tsizeofIPv6FlowlabelReq      = 0x20\n\n\tsizeofIPv6Mreq       = 0x14\n\tsizeofGroupReq       = 0x88\n\tsizeofGroupSourceReq = 0x108\n\n\tsizeofICMPv6Filter = 0x20\n\n\tsizeofSockFprog = 0x10\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex int32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6FlowlabelReq struct {\n\tDst        [16]byte /* in6_addr */\n\tLabel      uint32\n\tAction     uint8\n\tShare      uint8\n\tFlags      uint16\n\tExpires    uint16\n\tLinger     uint16\n\tX__flr_pad uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tIfindex   int32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpv6Filter struct {\n\tData [8]uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [6]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_ADDRFORM       = 0x1\n\tsysIPV6_2292PKTINFO    = 0x2\n\tsysIPV6_2292HOPOPTS    = 0x3\n\tsysIPV6_2292DSTOPTS    = 0x4\n\tsysIPV6_2292RTHDR      = 0x5\n\tsysIPV6_2292PKTOPTIONS = 0x6\n\tsysIPV6_CHECKSUM       = 0x7\n\tsysIPV6_2292HOPLIMIT   = 0x8\n\tsysIPV6_NEXTHOP        = 0x9\n\tsysIPV6_FLOWINFO       = 0xb\n\n\tsysIPV6_UNICAST_HOPS        = 0x10\n\tsysIPV6_MULTICAST_IF        = 0x11\n\tsysIPV6_MULTICAST_HOPS      = 0x12\n\tsysIPV6_MULTICAST_LOOP      = 0x13\n\tsysIPV6_ADD_MEMBERSHIP      = 0x14\n\tsysIPV6_DROP_MEMBERSHIP     = 0x15\n\tsysMCAST_JOIN_GROUP         = 0x2a\n\tsysMCAST_LEAVE_GROUP        = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP  = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP = 0x2f\n\tsysMCAST_BLOCK_SOURCE       = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE     = 0x2c\n\tsysMCAST_MSFILTER           = 0x30\n\tsysIPV6_ROUTER_ALERT        = 0x16\n\tsysIPV6_MTU_DISCOVER        = 0x17\n\tsysIPV6_MTU                 = 0x18\n\tsysIPV6_RECVERR             = 0x19\n\tsysIPV6_V6ONLY              = 0x1a\n\tsysIPV6_JOIN_ANYCAST        = 0x1b\n\tsysIPV6_LEAVE_ANYCAST       = 0x1c\n\n\tsysIPV6_FLOWLABEL_MGR = 0x20\n\tsysIPV6_FLOWINFO_SEND = 0x21\n\n\tsysIPV6_IPSEC_POLICY = 0x22\n\tsysIPV6_XFRM_POLICY  = 0x23\n\n\tsysIPV6_RECVPKTINFO  = 0x31\n\tsysIPV6_PKTINFO      = 0x32\n\tsysIPV6_RECVHOPLIMIT = 0x33\n\tsysIPV6_HOPLIMIT     = 0x34\n\tsysIPV6_RECVHOPOPTS  = 0x35\n\tsysIPV6_HOPOPTS      = 0x36\n\tsysIPV6_RTHDRDSTOPTS = 0x37\n\tsysIPV6_RECVRTHDR    = 0x38\n\tsysIPV6_RTHDR        = 0x39\n\tsysIPV6_RECVDSTOPTS  = 0x3a\n\tsysIPV6_DSTOPTS      = 0x3b\n\tsysIPV6_RECVPATHMTU  = 0x3c\n\tsysIPV6_PATHMTU      = 0x3d\n\tsysIPV6_DONTFRAG     = 0x3e\n\n\tsysIPV6_RECVTCLASS = 0x42\n\tsysIPV6_TCLASS     = 0x43\n\n\tsysIPV6_ADDR_PREFERENCES = 0x48\n\n\tsysIPV6_PREFER_SRC_TMP            = 0x1\n\tsysIPV6_PREFER_SRC_PUBLIC         = 0x2\n\tsysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100\n\tsysIPV6_PREFER_SRC_COA            = 0x4\n\tsysIPV6_PREFER_SRC_HOME           = 0x400\n\tsysIPV6_PREFER_SRC_CGA            = 0x8\n\tsysIPV6_PREFER_SRC_NONCGA         = 0x800\n\n\tsysIPV6_MINHOPCOUNT = 0x49\n\n\tsysIPV6_ORIGDSTADDR     = 0x4a\n\tsysIPV6_RECVORIGDSTADDR = 0x4a\n\tsysIPV6_TRANSPARENT     = 0x4b\n\tsysIPV6_UNICAST_IF      = 0x4c\n\n\tsysICMPV6_FILTER = 0x1\n\n\tsysICMPV6_FILTER_BLOCK       = 0x1\n\tsysICMPV6_FILTER_PASS        = 0x2\n\tsysICMPV6_FILTER_BLOCKOTHERS = 0x3\n\tsysICMPV6_FILTER_PASSONLY    = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet6         = 0x1c\n\tsizeofInet6Pktinfo          = 0x14\n\tsizeofIPv6Mtuinfo           = 0x20\n\tsizeofIPv6FlowlabelReq      = 0x20\n\n\tsizeofIPv6Mreq       = 0x14\n\tsizeofGroupReq       = 0x84\n\tsizeofGroupSourceReq = 0x104\n\n\tsizeofICMPv6Filter = 0x20\n\n\tsizeofSockFprog = 0x8\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex int32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6FlowlabelReq struct {\n\tDst        [16]byte /* in6_addr */\n\tLabel      uint32\n\tAction     uint8\n\tShare      uint8\n\tFlags      uint16\n\tExpires    uint16\n\tLinger     uint16\n\tX__flr_pad uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tIfindex   int32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpv6Filter struct {\n\tData [8]uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [2]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_ADDRFORM       = 0x1\n\tsysIPV6_2292PKTINFO    = 0x2\n\tsysIPV6_2292HOPOPTS    = 0x3\n\tsysIPV6_2292DSTOPTS    = 0x4\n\tsysIPV6_2292RTHDR      = 0x5\n\tsysIPV6_2292PKTOPTIONS = 0x6\n\tsysIPV6_CHECKSUM       = 0x7\n\tsysIPV6_2292HOPLIMIT   = 0x8\n\tsysIPV6_NEXTHOP        = 0x9\n\tsysIPV6_FLOWINFO       = 0xb\n\n\tsysIPV6_UNICAST_HOPS        = 0x10\n\tsysIPV6_MULTICAST_IF        = 0x11\n\tsysIPV6_MULTICAST_HOPS      = 0x12\n\tsysIPV6_MULTICAST_LOOP      = 0x13\n\tsysIPV6_ADD_MEMBERSHIP      = 0x14\n\tsysIPV6_DROP_MEMBERSHIP     = 0x15\n\tsysMCAST_JOIN_GROUP         = 0x2a\n\tsysMCAST_LEAVE_GROUP        = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP  = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP = 0x2f\n\tsysMCAST_BLOCK_SOURCE       = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE     = 0x2c\n\tsysMCAST_MSFILTER           = 0x30\n\tsysIPV6_ROUTER_ALERT        = 0x16\n\tsysIPV6_MTU_DISCOVER        = 0x17\n\tsysIPV6_MTU                 = 0x18\n\tsysIPV6_RECVERR             = 0x19\n\tsysIPV6_V6ONLY              = 0x1a\n\tsysIPV6_JOIN_ANYCAST        = 0x1b\n\tsysIPV6_LEAVE_ANYCAST       = 0x1c\n\n\tsysIPV6_FLOWLABEL_MGR = 0x20\n\tsysIPV6_FLOWINFO_SEND = 0x21\n\n\tsysIPV6_IPSEC_POLICY = 0x22\n\tsysIPV6_XFRM_POLICY  = 0x23\n\n\tsysIPV6_RECVPKTINFO  = 0x31\n\tsysIPV6_PKTINFO      = 0x32\n\tsysIPV6_RECVHOPLIMIT = 0x33\n\tsysIPV6_HOPLIMIT     = 0x34\n\tsysIPV6_RECVHOPOPTS  = 0x35\n\tsysIPV6_HOPOPTS      = 0x36\n\tsysIPV6_RTHDRDSTOPTS = 0x37\n\tsysIPV6_RECVRTHDR    = 0x38\n\tsysIPV6_RTHDR        = 0x39\n\tsysIPV6_RECVDSTOPTS  = 0x3a\n\tsysIPV6_DSTOPTS      = 0x3b\n\tsysIPV6_RECVPATHMTU  = 0x3c\n\tsysIPV6_PATHMTU      = 0x3d\n\tsysIPV6_DONTFRAG     = 0x3e\n\n\tsysIPV6_RECVTCLASS = 0x42\n\tsysIPV6_TCLASS     = 0x43\n\n\tsysIPV6_ADDR_PREFERENCES = 0x48\n\n\tsysIPV6_PREFER_SRC_TMP            = 0x1\n\tsysIPV6_PREFER_SRC_PUBLIC         = 0x2\n\tsysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100\n\tsysIPV6_PREFER_SRC_COA            = 0x4\n\tsysIPV6_PREFER_SRC_HOME           = 0x400\n\tsysIPV6_PREFER_SRC_CGA            = 0x8\n\tsysIPV6_PREFER_SRC_NONCGA         = 0x800\n\n\tsysIPV6_MINHOPCOUNT = 0x49\n\n\tsysIPV6_ORIGDSTADDR     = 0x4a\n\tsysIPV6_RECVORIGDSTADDR = 0x4a\n\tsysIPV6_TRANSPARENT     = 0x4b\n\tsysIPV6_UNICAST_IF      = 0x4c\n\n\tsysICMPV6_FILTER = 0x1\n\n\tsysICMPV6_FILTER_BLOCK       = 0x1\n\tsysICMPV6_FILTER_PASS        = 0x2\n\tsysICMPV6_FILTER_BLOCKOTHERS = 0x3\n\tsysICMPV6_FILTER_PASSONLY    = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet6         = 0x1c\n\tsizeofInet6Pktinfo          = 0x14\n\tsizeofIPv6Mtuinfo           = 0x20\n\tsizeofIPv6FlowlabelReq      = 0x20\n\n\tsizeofIPv6Mreq       = 0x14\n\tsizeofGroupReq       = 0x84\n\tsizeofGroupSourceReq = 0x104\n\n\tsizeofICMPv6Filter = 0x20\n\n\tsizeofSockFprog = 0x8\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]uint8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex int32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6FlowlabelReq struct {\n\tDst        [16]byte /* in6_addr */\n\tLabel      uint32\n\tAction     uint8\n\tShare      uint8\n\tFlags      uint16\n\tExpires    uint16\n\tLinger     uint16\n\tX__flr_pad uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tIfindex   int32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpv6Filter struct {\n\tData [8]uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [2]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_ADDRFORM       = 0x1\n\tsysIPV6_2292PKTINFO    = 0x2\n\tsysIPV6_2292HOPOPTS    = 0x3\n\tsysIPV6_2292DSTOPTS    = 0x4\n\tsysIPV6_2292RTHDR      = 0x5\n\tsysIPV6_2292PKTOPTIONS = 0x6\n\tsysIPV6_CHECKSUM       = 0x7\n\tsysIPV6_2292HOPLIMIT   = 0x8\n\tsysIPV6_NEXTHOP        = 0x9\n\tsysIPV6_FLOWINFO       = 0xb\n\n\tsysIPV6_UNICAST_HOPS        = 0x10\n\tsysIPV6_MULTICAST_IF        = 0x11\n\tsysIPV6_MULTICAST_HOPS      = 0x12\n\tsysIPV6_MULTICAST_LOOP      = 0x13\n\tsysIPV6_ADD_MEMBERSHIP      = 0x14\n\tsysIPV6_DROP_MEMBERSHIP     = 0x15\n\tsysMCAST_JOIN_GROUP         = 0x2a\n\tsysMCAST_LEAVE_GROUP        = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP  = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP = 0x2f\n\tsysMCAST_BLOCK_SOURCE       = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE     = 0x2c\n\tsysMCAST_MSFILTER           = 0x30\n\tsysIPV6_ROUTER_ALERT        = 0x16\n\tsysIPV6_MTU_DISCOVER        = 0x17\n\tsysIPV6_MTU                 = 0x18\n\tsysIPV6_RECVERR             = 0x19\n\tsysIPV6_V6ONLY              = 0x1a\n\tsysIPV6_JOIN_ANYCAST        = 0x1b\n\tsysIPV6_LEAVE_ANYCAST       = 0x1c\n\n\tsysIPV6_FLOWLABEL_MGR = 0x20\n\tsysIPV6_FLOWINFO_SEND = 0x21\n\n\tsysIPV6_IPSEC_POLICY = 0x22\n\tsysIPV6_XFRM_POLICY  = 0x23\n\n\tsysIPV6_RECVPKTINFO  = 0x31\n\tsysIPV6_PKTINFO      = 0x32\n\tsysIPV6_RECVHOPLIMIT = 0x33\n\tsysIPV6_HOPLIMIT     = 0x34\n\tsysIPV6_RECVHOPOPTS  = 0x35\n\tsysIPV6_HOPOPTS      = 0x36\n\tsysIPV6_RTHDRDSTOPTS = 0x37\n\tsysIPV6_RECVRTHDR    = 0x38\n\tsysIPV6_RTHDR        = 0x39\n\tsysIPV6_RECVDSTOPTS  = 0x3a\n\tsysIPV6_DSTOPTS      = 0x3b\n\tsysIPV6_RECVPATHMTU  = 0x3c\n\tsysIPV6_PATHMTU      = 0x3d\n\tsysIPV6_DONTFRAG     = 0x3e\n\n\tsysIPV6_RECVTCLASS = 0x42\n\tsysIPV6_TCLASS     = 0x43\n\n\tsysIPV6_ADDR_PREFERENCES = 0x48\n\n\tsysIPV6_PREFER_SRC_TMP            = 0x1\n\tsysIPV6_PREFER_SRC_PUBLIC         = 0x2\n\tsysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100\n\tsysIPV6_PREFER_SRC_COA            = 0x4\n\tsysIPV6_PREFER_SRC_HOME           = 0x400\n\tsysIPV6_PREFER_SRC_CGA            = 0x8\n\tsysIPV6_PREFER_SRC_NONCGA         = 0x800\n\n\tsysIPV6_MINHOPCOUNT = 0x49\n\n\tsysIPV6_ORIGDSTADDR     = 0x4a\n\tsysIPV6_RECVORIGDSTADDR = 0x4a\n\tsysIPV6_TRANSPARENT     = 0x4b\n\tsysIPV6_UNICAST_IF      = 0x4c\n\n\tsysICMPV6_FILTER = 0x1\n\n\tsysICMPV6_FILTER_BLOCK       = 0x1\n\tsysICMPV6_FILTER_PASS        = 0x2\n\tsysICMPV6_FILTER_BLOCKOTHERS = 0x3\n\tsysICMPV6_FILTER_PASSONLY    = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet6         = 0x1c\n\tsizeofInet6Pktinfo          = 0x14\n\tsizeofIPv6Mtuinfo           = 0x20\n\tsizeofIPv6FlowlabelReq      = 0x20\n\n\tsizeofIPv6Mreq       = 0x14\n\tsizeofGroupReq       = 0x88\n\tsizeofGroupSourceReq = 0x108\n\n\tsizeofICMPv6Filter = 0x20\n\n\tsizeofSockFprog = 0x10\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex int32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6FlowlabelReq struct {\n\tDst        [16]byte /* in6_addr */\n\tLabel      uint32\n\tAction     uint8\n\tShare      uint8\n\tFlags      uint16\n\tExpires    uint16\n\tLinger     uint16\n\tX__flr_pad uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tIfindex   int32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpv6Filter struct {\n\tData [8]uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [6]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_ADDRFORM       = 0x1\n\tsysIPV6_2292PKTINFO    = 0x2\n\tsysIPV6_2292HOPOPTS    = 0x3\n\tsysIPV6_2292DSTOPTS    = 0x4\n\tsysIPV6_2292RTHDR      = 0x5\n\tsysIPV6_2292PKTOPTIONS = 0x6\n\tsysIPV6_CHECKSUM       = 0x7\n\tsysIPV6_2292HOPLIMIT   = 0x8\n\tsysIPV6_NEXTHOP        = 0x9\n\tsysIPV6_FLOWINFO       = 0xb\n\n\tsysIPV6_UNICAST_HOPS        = 0x10\n\tsysIPV6_MULTICAST_IF        = 0x11\n\tsysIPV6_MULTICAST_HOPS      = 0x12\n\tsysIPV6_MULTICAST_LOOP      = 0x13\n\tsysIPV6_ADD_MEMBERSHIP      = 0x14\n\tsysIPV6_DROP_MEMBERSHIP     = 0x15\n\tsysMCAST_JOIN_GROUP         = 0x2a\n\tsysMCAST_LEAVE_GROUP        = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP  = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP = 0x2f\n\tsysMCAST_BLOCK_SOURCE       = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE     = 0x2c\n\tsysMCAST_MSFILTER           = 0x30\n\tsysIPV6_ROUTER_ALERT        = 0x16\n\tsysIPV6_MTU_DISCOVER        = 0x17\n\tsysIPV6_MTU                 = 0x18\n\tsysIPV6_RECVERR             = 0x19\n\tsysIPV6_V6ONLY              = 0x1a\n\tsysIPV6_JOIN_ANYCAST        = 0x1b\n\tsysIPV6_LEAVE_ANYCAST       = 0x1c\n\n\tsysIPV6_FLOWLABEL_MGR = 0x20\n\tsysIPV6_FLOWINFO_SEND = 0x21\n\n\tsysIPV6_IPSEC_POLICY = 0x22\n\tsysIPV6_XFRM_POLICY  = 0x23\n\n\tsysIPV6_RECVPKTINFO  = 0x31\n\tsysIPV6_PKTINFO      = 0x32\n\tsysIPV6_RECVHOPLIMIT = 0x33\n\tsysIPV6_HOPLIMIT     = 0x34\n\tsysIPV6_RECVHOPOPTS  = 0x35\n\tsysIPV6_HOPOPTS      = 0x36\n\tsysIPV6_RTHDRDSTOPTS = 0x37\n\tsysIPV6_RECVRTHDR    = 0x38\n\tsysIPV6_RTHDR        = 0x39\n\tsysIPV6_RECVDSTOPTS  = 0x3a\n\tsysIPV6_DSTOPTS      = 0x3b\n\tsysIPV6_RECVPATHMTU  = 0x3c\n\tsysIPV6_PATHMTU      = 0x3d\n\tsysIPV6_DONTFRAG     = 0x3e\n\n\tsysIPV6_RECVTCLASS = 0x42\n\tsysIPV6_TCLASS     = 0x43\n\n\tsysIPV6_ADDR_PREFERENCES = 0x48\n\n\tsysIPV6_PREFER_SRC_TMP            = 0x1\n\tsysIPV6_PREFER_SRC_PUBLIC         = 0x2\n\tsysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100\n\tsysIPV6_PREFER_SRC_COA            = 0x4\n\tsysIPV6_PREFER_SRC_HOME           = 0x400\n\tsysIPV6_PREFER_SRC_CGA            = 0x8\n\tsysIPV6_PREFER_SRC_NONCGA         = 0x800\n\n\tsysIPV6_MINHOPCOUNT = 0x49\n\n\tsysIPV6_ORIGDSTADDR     = 0x4a\n\tsysIPV6_RECVORIGDSTADDR = 0x4a\n\tsysIPV6_TRANSPARENT     = 0x4b\n\tsysIPV6_UNICAST_IF      = 0x4c\n\n\tsysICMPV6_FILTER = 0x1\n\n\tsysICMPV6_FILTER_BLOCK       = 0x1\n\tsysICMPV6_FILTER_PASS        = 0x2\n\tsysICMPV6_FILTER_BLOCKOTHERS = 0x3\n\tsysICMPV6_FILTER_PASSONLY    = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet6         = 0x1c\n\tsizeofInet6Pktinfo          = 0x14\n\tsizeofIPv6Mtuinfo           = 0x20\n\tsizeofIPv6FlowlabelReq      = 0x20\n\n\tsizeofIPv6Mreq       = 0x14\n\tsizeofGroupReq       = 0x88\n\tsizeofGroupSourceReq = 0x108\n\n\tsizeofICMPv6Filter = 0x20\n\n\tsizeofSockFprog = 0x10\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex int32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6FlowlabelReq struct {\n\tDst        [16]byte /* in6_addr */\n\tLabel      uint32\n\tAction     uint8\n\tShare      uint8\n\tFlags      uint16\n\tExpires    uint16\n\tLinger     uint16\n\tX__flr_pad uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tIfindex   int32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpv6Filter struct {\n\tData [8]uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [6]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_linux.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_ADDRFORM       = 0x1\n\tsysIPV6_2292PKTINFO    = 0x2\n\tsysIPV6_2292HOPOPTS    = 0x3\n\tsysIPV6_2292DSTOPTS    = 0x4\n\tsysIPV6_2292RTHDR      = 0x5\n\tsysIPV6_2292PKTOPTIONS = 0x6\n\tsysIPV6_CHECKSUM       = 0x7\n\tsysIPV6_2292HOPLIMIT   = 0x8\n\tsysIPV6_NEXTHOP        = 0x9\n\tsysIPV6_FLOWINFO       = 0xb\n\n\tsysIPV6_UNICAST_HOPS        = 0x10\n\tsysIPV6_MULTICAST_IF        = 0x11\n\tsysIPV6_MULTICAST_HOPS      = 0x12\n\tsysIPV6_MULTICAST_LOOP      = 0x13\n\tsysIPV6_ADD_MEMBERSHIP      = 0x14\n\tsysIPV6_DROP_MEMBERSHIP     = 0x15\n\tsysMCAST_JOIN_GROUP         = 0x2a\n\tsysMCAST_LEAVE_GROUP        = 0x2d\n\tsysMCAST_JOIN_SOURCE_GROUP  = 0x2e\n\tsysMCAST_LEAVE_SOURCE_GROUP = 0x2f\n\tsysMCAST_BLOCK_SOURCE       = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE     = 0x2c\n\tsysMCAST_MSFILTER           = 0x30\n\tsysIPV6_ROUTER_ALERT        = 0x16\n\tsysIPV6_MTU_DISCOVER        = 0x17\n\tsysIPV6_MTU                 = 0x18\n\tsysIPV6_RECVERR             = 0x19\n\tsysIPV6_V6ONLY              = 0x1a\n\tsysIPV6_JOIN_ANYCAST        = 0x1b\n\tsysIPV6_LEAVE_ANYCAST       = 0x1c\n\n\tsysIPV6_FLOWLABEL_MGR = 0x20\n\tsysIPV6_FLOWINFO_SEND = 0x21\n\n\tsysIPV6_IPSEC_POLICY = 0x22\n\tsysIPV6_XFRM_POLICY  = 0x23\n\n\tsysIPV6_RECVPKTINFO  = 0x31\n\tsysIPV6_PKTINFO      = 0x32\n\tsysIPV6_RECVHOPLIMIT = 0x33\n\tsysIPV6_HOPLIMIT     = 0x34\n\tsysIPV6_RECVHOPOPTS  = 0x35\n\tsysIPV6_HOPOPTS      = 0x36\n\tsysIPV6_RTHDRDSTOPTS = 0x37\n\tsysIPV6_RECVRTHDR    = 0x38\n\tsysIPV6_RTHDR        = 0x39\n\tsysIPV6_RECVDSTOPTS  = 0x3a\n\tsysIPV6_DSTOPTS      = 0x3b\n\tsysIPV6_RECVPATHMTU  = 0x3c\n\tsysIPV6_PATHMTU      = 0x3d\n\tsysIPV6_DONTFRAG     = 0x3e\n\n\tsysIPV6_RECVTCLASS = 0x42\n\tsysIPV6_TCLASS     = 0x43\n\n\tsysIPV6_ADDR_PREFERENCES = 0x48\n\n\tsysIPV6_PREFER_SRC_TMP            = 0x1\n\tsysIPV6_PREFER_SRC_PUBLIC         = 0x2\n\tsysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100\n\tsysIPV6_PREFER_SRC_COA            = 0x4\n\tsysIPV6_PREFER_SRC_HOME           = 0x400\n\tsysIPV6_PREFER_SRC_CGA            = 0x8\n\tsysIPV6_PREFER_SRC_NONCGA         = 0x800\n\n\tsysIPV6_MINHOPCOUNT = 0x49\n\n\tsysIPV6_ORIGDSTADDR     = 0x4a\n\tsysIPV6_RECVORIGDSTADDR = 0x4a\n\tsysIPV6_TRANSPARENT     = 0x4b\n\tsysIPV6_UNICAST_IF      = 0x4c\n\n\tsysICMPV6_FILTER = 0x1\n\n\tsysICMPV6_FILTER_BLOCK       = 0x1\n\tsysICMPV6_FILTER_PASS        = 0x2\n\tsysICMPV6_FILTER_BLOCKOTHERS = 0x3\n\tsysICMPV6_FILTER_PASSONLY    = 0x4\n\n\tsysSOL_SOCKET       = 0x1\n\tsysSO_ATTACH_FILTER = 0x1a\n\n\tsizeofKernelSockaddrStorage = 0x80\n\tsizeofSockaddrInet6         = 0x1c\n\tsizeofInet6Pktinfo          = 0x14\n\tsizeofIPv6Mtuinfo           = 0x20\n\tsizeofIPv6FlowlabelReq      = 0x20\n\n\tsizeofIPv6Mreq       = 0x14\n\tsizeofGroupReq       = 0x88\n\tsizeofGroupSourceReq = 0x108\n\n\tsizeofICMPv6Filter = 0x20\n\n\tsizeofSockFprog = 0x10\n)\n\ntype kernelSockaddrStorage struct {\n\tFamily  uint16\n\tX__data [126]int8\n}\n\ntype sockaddrInet6 struct {\n\tFamily   uint16\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex int32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6FlowlabelReq struct {\n\tDst        [16]byte /* in6_addr */\n\tLabel      uint32\n\tAction     uint8\n\tShare      uint8\n\tFlags      uint16\n\tExpires    uint16\n\tLinger     uint16\n\tX__flr_pad uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tIfindex   int32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [4]byte\n\tGroup     kernelSockaddrStorage\n\tSource    kernelSockaddrStorage\n}\n\ntype icmpv6Filter struct {\n\tData [8]uint32\n}\n\ntype sockFProg struct {\n\tLen       uint16\n\tPad_cgo_0 [6]byte\n\tFilter    *sockFilter\n}\n\ntype sockFilter struct {\n\tCode uint16\n\tJt   uint8\n\tJf   uint8\n\tK    uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_netbsd.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_netbsd.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_UNICAST_HOPS   = 0x4\n\tsysIPV6_MULTICAST_IF   = 0x9\n\tsysIPV6_MULTICAST_HOPS = 0xa\n\tsysIPV6_MULTICAST_LOOP = 0xb\n\tsysIPV6_JOIN_GROUP     = 0xc\n\tsysIPV6_LEAVE_GROUP    = 0xd\n\tsysIPV6_PORTRANGE      = 0xe\n\tsysICMP6_FILTER        = 0x12\n\n\tsysIPV6_CHECKSUM = 0x1a\n\tsysIPV6_V6ONLY   = 0x1b\n\n\tsysIPV6_IPSEC_POLICY = 0x1c\n\n\tsysIPV6_RTHDRDSTOPTS = 0x23\n\n\tsysIPV6_RECVPKTINFO  = 0x24\n\tsysIPV6_RECVHOPLIMIT = 0x25\n\tsysIPV6_RECVRTHDR    = 0x26\n\tsysIPV6_RECVHOPOPTS  = 0x27\n\tsysIPV6_RECVDSTOPTS  = 0x28\n\n\tsysIPV6_USE_MIN_MTU = 0x2a\n\tsysIPV6_RECVPATHMTU = 0x2b\n\tsysIPV6_PATHMTU     = 0x2c\n\n\tsysIPV6_PKTINFO  = 0x2e\n\tsysIPV6_HOPLIMIT = 0x2f\n\tsysIPV6_NEXTHOP  = 0x30\n\tsysIPV6_HOPOPTS  = 0x31\n\tsysIPV6_DSTOPTS  = 0x32\n\tsysIPV6_RTHDR    = 0x33\n\n\tsysIPV6_RECVTCLASS = 0x39\n\n\tsysIPV6_TCLASS   = 0x3d\n\tsysIPV6_DONTFRAG = 0x3e\n\n\tsysIPV6_PORTRANGE_DEFAULT = 0x0\n\tsysIPV6_PORTRANGE_HIGH    = 0x1\n\tsysIPV6_PORTRANGE_LOW     = 0x2\n\n\tsizeofSockaddrInet6 = 0x1c\n\tsizeofInet6Pktinfo  = 0x14\n\tsizeofIPv6Mtuinfo   = 0x20\n\n\tsizeofIPv6Mreq = 0x14\n\n\tsizeofICMPv6Filter = 0x20\n)\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex uint32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tInterface uint32\n}\n\ntype icmpv6Filter struct {\n\tFilt [8]uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_openbsd.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_openbsd.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_UNICAST_HOPS   = 0x4\n\tsysIPV6_MULTICAST_IF   = 0x9\n\tsysIPV6_MULTICAST_HOPS = 0xa\n\tsysIPV6_MULTICAST_LOOP = 0xb\n\tsysIPV6_JOIN_GROUP     = 0xc\n\tsysIPV6_LEAVE_GROUP    = 0xd\n\tsysIPV6_PORTRANGE      = 0xe\n\tsysICMP6_FILTER        = 0x12\n\n\tsysIPV6_CHECKSUM = 0x1a\n\tsysIPV6_V6ONLY   = 0x1b\n\n\tsysIPV6_RTHDRDSTOPTS = 0x23\n\n\tsysIPV6_RECVPKTINFO  = 0x24\n\tsysIPV6_RECVHOPLIMIT = 0x25\n\tsysIPV6_RECVRTHDR    = 0x26\n\tsysIPV6_RECVHOPOPTS  = 0x27\n\tsysIPV6_RECVDSTOPTS  = 0x28\n\n\tsysIPV6_USE_MIN_MTU = 0x2a\n\tsysIPV6_RECVPATHMTU = 0x2b\n\n\tsysIPV6_PATHMTU = 0x2c\n\n\tsysIPV6_PKTINFO  = 0x2e\n\tsysIPV6_HOPLIMIT = 0x2f\n\tsysIPV6_NEXTHOP  = 0x30\n\tsysIPV6_HOPOPTS  = 0x31\n\tsysIPV6_DSTOPTS  = 0x32\n\tsysIPV6_RTHDR    = 0x33\n\n\tsysIPV6_AUTH_LEVEL        = 0x35\n\tsysIPV6_ESP_TRANS_LEVEL   = 0x36\n\tsysIPV6_ESP_NETWORK_LEVEL = 0x37\n\tsysIPSEC6_OUTSA           = 0x38\n\tsysIPV6_RECVTCLASS        = 0x39\n\n\tsysIPV6_AUTOFLOWLABEL = 0x3b\n\tsysIPV6_IPCOMP_LEVEL  = 0x3c\n\n\tsysIPV6_TCLASS   = 0x3d\n\tsysIPV6_DONTFRAG = 0x3e\n\tsysIPV6_PIPEX    = 0x3f\n\n\tsysIPV6_RTABLE = 0x1021\n\n\tsysIPV6_PORTRANGE_DEFAULT = 0x0\n\tsysIPV6_PORTRANGE_HIGH    = 0x1\n\tsysIPV6_PORTRANGE_LOW     = 0x2\n\n\tsizeofSockaddrInet6 = 0x1c\n\tsizeofInet6Pktinfo  = 0x14\n\tsizeofIPv6Mtuinfo   = 0x20\n\n\tsizeofIPv6Mreq = 0x14\n\n\tsizeofICMPv6Filter = 0x20\n)\n\ntype sockaddrInet6 struct {\n\tLen      uint8\n\tFamily   uint8\n\tPort     uint16\n\tFlowinfo uint32\n\tAddr     [16]byte /* in6_addr */\n\tScope_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex uint32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tInterface uint32\n}\n\ntype icmpv6Filter struct {\n\tFilt [8]uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/ipv6/zsys_solaris.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_solaris.go\n\npackage ipv6\n\nconst (\n\tsysIPV6_UNICAST_HOPS   = 0x5\n\tsysIPV6_MULTICAST_IF   = 0x6\n\tsysIPV6_MULTICAST_HOPS = 0x7\n\tsysIPV6_MULTICAST_LOOP = 0x8\n\tsysIPV6_JOIN_GROUP     = 0x9\n\tsysIPV6_LEAVE_GROUP    = 0xa\n\n\tsysIPV6_PKTINFO = 0xb\n\n\tsysIPV6_HOPLIMIT = 0xc\n\tsysIPV6_NEXTHOP  = 0xd\n\tsysIPV6_HOPOPTS  = 0xe\n\tsysIPV6_DSTOPTS  = 0xf\n\n\tsysIPV6_RTHDR        = 0x10\n\tsysIPV6_RTHDRDSTOPTS = 0x11\n\n\tsysIPV6_RECVPKTINFO  = 0x12\n\tsysIPV6_RECVHOPLIMIT = 0x13\n\tsysIPV6_RECVHOPOPTS  = 0x14\n\n\tsysIPV6_RECVRTHDR = 0x16\n\n\tsysIPV6_RECVRTHDRDSTOPTS = 0x17\n\n\tsysIPV6_CHECKSUM        = 0x18\n\tsysIPV6_RECVTCLASS      = 0x19\n\tsysIPV6_USE_MIN_MTU     = 0x20\n\tsysIPV6_DONTFRAG        = 0x21\n\tsysIPV6_SEC_OPT         = 0x22\n\tsysIPV6_SRC_PREFERENCES = 0x23\n\tsysIPV6_RECVPATHMTU     = 0x24\n\tsysIPV6_PATHMTU         = 0x25\n\tsysIPV6_TCLASS          = 0x26\n\tsysIPV6_V6ONLY          = 0x27\n\n\tsysIPV6_RECVDSTOPTS = 0x28\n\n\tsysMCAST_JOIN_GROUP         = 0x29\n\tsysMCAST_LEAVE_GROUP        = 0x2a\n\tsysMCAST_BLOCK_SOURCE       = 0x2b\n\tsysMCAST_UNBLOCK_SOURCE     = 0x2c\n\tsysMCAST_JOIN_SOURCE_GROUP  = 0x2d\n\tsysMCAST_LEAVE_SOURCE_GROUP = 0x2e\n\n\tsysIPV6_PREFER_SRC_HOME   = 0x1\n\tsysIPV6_PREFER_SRC_COA    = 0x2\n\tsysIPV6_PREFER_SRC_PUBLIC = 0x4\n\tsysIPV6_PREFER_SRC_TMP    = 0x8\n\tsysIPV6_PREFER_SRC_NONCGA = 0x10\n\tsysIPV6_PREFER_SRC_CGA    = 0x20\n\n\tsysIPV6_PREFER_SRC_MIPMASK    = 0x3\n\tsysIPV6_PREFER_SRC_MIPDEFAULT = 0x1\n\tsysIPV6_PREFER_SRC_TMPMASK    = 0xc\n\tsysIPV6_PREFER_SRC_TMPDEFAULT = 0x4\n\tsysIPV6_PREFER_SRC_CGAMASK    = 0x30\n\tsysIPV6_PREFER_SRC_CGADEFAULT = 0x10\n\n\tsysIPV6_PREFER_SRC_MASK = 0x3f\n\n\tsysIPV6_PREFER_SRC_DEFAULT = 0x15\n\n\tsysIPV6_BOUND_IF   = 0x41\n\tsysIPV6_UNSPEC_SRC = 0x42\n\n\tsysICMP6_FILTER = 0x1\n\n\tsizeofSockaddrStorage = 0x100\n\tsizeofSockaddrInet6   = 0x20\n\tsizeofInet6Pktinfo    = 0x14\n\tsizeofIPv6Mtuinfo     = 0x24\n\n\tsizeofIPv6Mreq       = 0x14\n\tsizeofGroupReq       = 0x104\n\tsizeofGroupSourceReq = 0x204\n\n\tsizeofICMPv6Filter = 0x20\n)\n\ntype sockaddrStorage struct {\n\tFamily     uint16\n\tX_ss_pad1  [6]int8\n\tX_ss_align float64\n\tX_ss_pad2  [240]int8\n}\n\ntype sockaddrInet6 struct {\n\tFamily         uint16\n\tPort           uint16\n\tFlowinfo       uint32\n\tAddr           [16]byte /* in6_addr */\n\tScope_id       uint32\n\tX__sin6_src_id uint32\n}\n\ntype inet6Pktinfo struct {\n\tAddr    [16]byte /* in6_addr */\n\tIfindex uint32\n}\n\ntype ipv6Mtuinfo struct {\n\tAddr sockaddrInet6\n\tMtu  uint32\n}\n\ntype ipv6Mreq struct {\n\tMultiaddr [16]byte /* in6_addr */\n\tInterface uint32\n}\n\ntype groupReq struct {\n\tInterface uint32\n\tPad_cgo_0 [256]byte\n}\n\ntype groupSourceReq struct {\n\tInterface uint32\n\tPad_cgo_0 [256]byte\n\tPad_cgo_1 [256]byte\n}\n\ntype icmpv6Filter struct {\n\tX__icmp6_filt [8]uint32\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/lex/httplex/httplex.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package httplex contains rules around lexical matters of various\n// HTTP-related specifications.\n//\n// This package is shared by the standard library (which vendors it)\n// and x/net/http2. It comes with no API stability promise.\npackage httplex\n\nimport (\n\t\"net\"\n\t\"strings\"\n\t\"unicode/utf8\"\n\n\t\"golang.org/x/net/idna\"\n)\n\nvar isTokenTable = [127]bool{\n\t'!':  true,\n\t'#':  true,\n\t'$':  true,\n\t'%':  true,\n\t'&':  true,\n\t'\\'': true,\n\t'*':  true,\n\t'+':  true,\n\t'-':  true,\n\t'.':  true,\n\t'0':  true,\n\t'1':  true,\n\t'2':  true,\n\t'3':  true,\n\t'4':  true,\n\t'5':  true,\n\t'6':  true,\n\t'7':  true,\n\t'8':  true,\n\t'9':  true,\n\t'A':  true,\n\t'B':  true,\n\t'C':  true,\n\t'D':  true,\n\t'E':  true,\n\t'F':  true,\n\t'G':  true,\n\t'H':  true,\n\t'I':  true,\n\t'J':  true,\n\t'K':  true,\n\t'L':  true,\n\t'M':  true,\n\t'N':  true,\n\t'O':  true,\n\t'P':  true,\n\t'Q':  true,\n\t'R':  true,\n\t'S':  true,\n\t'T':  true,\n\t'U':  true,\n\t'W':  true,\n\t'V':  true,\n\t'X':  true,\n\t'Y':  true,\n\t'Z':  true,\n\t'^':  true,\n\t'_':  true,\n\t'`':  true,\n\t'a':  true,\n\t'b':  true,\n\t'c':  true,\n\t'd':  true,\n\t'e':  true,\n\t'f':  true,\n\t'g':  true,\n\t'h':  true,\n\t'i':  true,\n\t'j':  true,\n\t'k':  true,\n\t'l':  true,\n\t'm':  true,\n\t'n':  true,\n\t'o':  true,\n\t'p':  true,\n\t'q':  true,\n\t'r':  true,\n\t's':  true,\n\t't':  true,\n\t'u':  true,\n\t'v':  true,\n\t'w':  true,\n\t'x':  true,\n\t'y':  true,\n\t'z':  true,\n\t'|':  true,\n\t'~':  true,\n}\n\nfunc IsTokenRune(r rune) bool {\n\ti := int(r)\n\treturn i < len(isTokenTable) && isTokenTable[i]\n}\n\nfunc isNotToken(r rune) bool {\n\treturn !IsTokenRune(r)\n}\n\n// HeaderValuesContainsToken reports whether any string in values\n// contains the provided token, ASCII case-insensitively.\nfunc HeaderValuesContainsToken(values []string, token string) bool {\n\tfor _, v := range values {\n\t\tif headerValueContainsToken(v, token) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// isOWS reports whether b is an optional whitespace byte, as defined\n// by RFC 7230 section 3.2.3.\nfunc isOWS(b byte) bool { return b == ' ' || b == '\\t' }\n\n// trimOWS returns x with all optional whitespace removes from the\n// beginning and end.\nfunc trimOWS(x string) string {\n\t// TODO: consider using strings.Trim(x, \" \\t\") instead,\n\t// if and when it's fast enough. See issue 10292.\n\t// But this ASCII-only code will probably always beat UTF-8\n\t// aware code.\n\tfor len(x) > 0 && isOWS(x[0]) {\n\t\tx = x[1:]\n\t}\n\tfor len(x) > 0 && isOWS(x[len(x)-1]) {\n\t\tx = x[:len(x)-1]\n\t}\n\treturn x\n}\n\n// headerValueContainsToken reports whether v (assumed to be a\n// 0#element, in the ABNF extension described in RFC 7230 section 7)\n// contains token amongst its comma-separated tokens, ASCII\n// case-insensitively.\nfunc headerValueContainsToken(v string, token string) bool {\n\tv = trimOWS(v)\n\tif comma := strings.IndexByte(v, ','); comma != -1 {\n\t\treturn tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token)\n\t}\n\treturn tokenEqual(v, token)\n}\n\n// lowerASCII returns the ASCII lowercase version of b.\nfunc lowerASCII(b byte) byte {\n\tif 'A' <= b && b <= 'Z' {\n\t\treturn b + ('a' - 'A')\n\t}\n\treturn b\n}\n\n// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively.\nfunc tokenEqual(t1, t2 string) bool {\n\tif len(t1) != len(t2) {\n\t\treturn false\n\t}\n\tfor i, b := range t1 {\n\t\tif b >= utf8.RuneSelf {\n\t\t\t// No UTF-8 or non-ASCII allowed in tokens.\n\t\t\treturn false\n\t\t}\n\t\tif lowerASCII(byte(b)) != lowerASCII(t2[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// isLWS reports whether b is linear white space, according\n// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2\n//      LWS            = [CRLF] 1*( SP | HT )\nfunc isLWS(b byte) bool { return b == ' ' || b == '\\t' }\n\n// isCTL reports whether b is a control byte, according\n// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2\n//      CTL            = <any US-ASCII control character\n//                       (octets 0 - 31) and DEL (127)>\nfunc isCTL(b byte) bool {\n\tconst del = 0x7f // a CTL\n\treturn b < ' ' || b == del\n}\n\n// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name.\n// HTTP/2 imposes the additional restriction that uppercase ASCII\n// letters are not allowed.\n//\n//  RFC 7230 says:\n//   header-field   = field-name \":\" OWS field-value OWS\n//   field-name     = token\n//   token          = 1*tchar\n//   tchar = \"!\" / \"#\" / \"$\" / \"%\" / \"&\" / \"'\" / \"*\" / \"+\" / \"-\" / \".\" /\n//           \"^\" / \"_\" / \"`\" / \"|\" / \"~\" / DIGIT / ALPHA\nfunc ValidHeaderFieldName(v string) bool {\n\tif len(v) == 0 {\n\t\treturn false\n\t}\n\tfor _, r := range v {\n\t\tif !IsTokenRune(r) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// ValidHostHeader reports whether h is a valid host header.\nfunc ValidHostHeader(h string) bool {\n\t// The latest spec is actually this:\n\t//\n\t// http://tools.ietf.org/html/rfc7230#section-5.4\n\t//     Host = uri-host [ \":\" port ]\n\t//\n\t// Where uri-host is:\n\t//     http://tools.ietf.org/html/rfc3986#section-3.2.2\n\t//\n\t// But we're going to be much more lenient for now and just\n\t// search for any byte that's not a valid byte in any of those\n\t// expressions.\n\tfor i := 0; i < len(h); i++ {\n\t\tif !validHostByte[h[i]] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// See the validHostHeader comment.\nvar validHostByte = [256]bool{\n\t'0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true,\n\t'8': true, '9': true,\n\n\t'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true,\n\t'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true,\n\t'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true,\n\t'y': true, 'z': true,\n\n\t'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true,\n\t'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true,\n\t'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true,\n\t'Y': true, 'Z': true,\n\n\t'!':  true, // sub-delims\n\t'$':  true, // sub-delims\n\t'%':  true, // pct-encoded (and used in IPv6 zones)\n\t'&':  true, // sub-delims\n\t'(':  true, // sub-delims\n\t')':  true, // sub-delims\n\t'*':  true, // sub-delims\n\t'+':  true, // sub-delims\n\t',':  true, // sub-delims\n\t'-':  true, // unreserved\n\t'.':  true, // unreserved\n\t':':  true, // IPv6address + Host expression's optional port\n\t';':  true, // sub-delims\n\t'=':  true, // sub-delims\n\t'[':  true,\n\t'\\'': true, // sub-delims\n\t']':  true,\n\t'_':  true, // unreserved\n\t'~':  true, // unreserved\n}\n\n// ValidHeaderFieldValue reports whether v is a valid \"field-value\" according to\n// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 :\n//\n//        message-header = field-name \":\" [ field-value ]\n//        field-value    = *( field-content | LWS )\n//        field-content  = <the OCTETs making up the field-value\n//                         and consisting of either *TEXT or combinations\n//                         of token, separators, and quoted-string>\n//\n// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 :\n//\n//        TEXT           = <any OCTET except CTLs,\n//                          but including LWS>\n//        LWS            = [CRLF] 1*( SP | HT )\n//        CTL            = <any US-ASCII control character\n//                         (octets 0 - 31) and DEL (127)>\n//\n// RFC 7230 says:\n//  field-value    = *( field-content / obs-fold )\n//  obj-fold       =  N/A to http2, and deprecated\n//  field-content  = field-vchar [ 1*( SP / HTAB ) field-vchar ]\n//  field-vchar    = VCHAR / obs-text\n//  obs-text       = %x80-FF\n//  VCHAR          = \"any visible [USASCII] character\"\n//\n// http2 further says: \"Similarly, HTTP/2 allows header field values\n// that are not valid. While most of the values that can be encoded\n// will not alter header field parsing, carriage return (CR, ASCII\n// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII\n// 0x0) might be exploited by an attacker if they are translated\n// verbatim. Any request or response that contains a character not\n// permitted in a header field value MUST be treated as malformed\n// (Section 8.1.2.6). Valid characters are defined by the\n// field-content ABNF rule in Section 3.2 of [RFC7230].\"\n//\n// This function does not (yet?) properly handle the rejection of\n// strings that begin or end with SP or HTAB.\nfunc ValidHeaderFieldValue(v string) bool {\n\tfor i := 0; i < len(v); i++ {\n\t\tb := v[i]\n\t\tif isCTL(b) && !isLWS(b) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc isASCII(s string) bool {\n\tfor i := 0; i < len(s); i++ {\n\t\tif s[i] >= utf8.RuneSelf {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// PunycodeHostPort returns the IDNA Punycode version\n// of the provided \"host\" or \"host:port\" string.\nfunc PunycodeHostPort(v string) (string, error) {\n\tif isASCII(v) {\n\t\treturn v, nil\n\t}\n\n\thost, port, err := net.SplitHostPort(v)\n\tif err != nil {\n\t\t// The input 'v' argument was just a \"host\" argument,\n\t\t// without a port. This error should not be returned\n\t\t// to the caller.\n\t\thost = v\n\t\tport = \"\"\n\t}\n\thost, err = idna.ToASCII(host)\n\tif err != nil {\n\t\t// Non-UTF-8? Not representable in Punycode, in any\n\t\t// case.\n\t\treturn \"\", err\n\t}\n\tif port == \"\" {\n\t\treturn host, nil\n\t}\n\treturn net.JoinHostPort(host, port), nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/lex/httplex/httplex_test.go",
    "content": "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage httplex\n\nimport (\n\t\"testing\"\n)\n\nfunc isChar(c rune) bool { return c <= 127 }\n\nfunc isCtl(c rune) bool { return c <= 31 || c == 127 }\n\nfunc isSeparator(c rune) bool {\n\tswitch c {\n\tcase '(', ')', '<', '>', '@', ',', ';', ':', '\\\\', '\"', '/', '[', ']', '?', '=', '{', '}', ' ', '\\t':\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc TestIsToken(t *testing.T) {\n\tfor i := 0; i <= 130; i++ {\n\t\tr := rune(i)\n\t\texpected := isChar(r) && !isCtl(r) && !isSeparator(r)\n\t\tif IsTokenRune(r) != expected {\n\t\t\tt.Errorf(\"isToken(0x%x) = %v\", r, !expected)\n\t\t}\n\t}\n}\n\nfunc TestHeaderValuesContainsToken(t *testing.T) {\n\ttests := []struct {\n\t\tvals  []string\n\t\ttoken string\n\t\twant  bool\n\t}{\n\t\t{\n\t\t\tvals:  []string{\"foo\"},\n\t\t\ttoken: \"foo\",\n\t\t\twant:  true,\n\t\t},\n\t\t{\n\t\t\tvals:  []string{\"bar\", \"foo\"},\n\t\t\ttoken: \"foo\",\n\t\t\twant:  true,\n\t\t},\n\t\t{\n\t\t\tvals:  []string{\"foo\"},\n\t\t\ttoken: \"FOO\",\n\t\t\twant:  true,\n\t\t},\n\t\t{\n\t\t\tvals:  []string{\"foo\"},\n\t\t\ttoken: \"bar\",\n\t\t\twant:  false,\n\t\t},\n\t\t{\n\t\t\tvals:  []string{\" foo \"},\n\t\t\ttoken: \"FOO\",\n\t\t\twant:  true,\n\t\t},\n\t\t{\n\t\t\tvals:  []string{\"foo,bar\"},\n\t\t\ttoken: \"FOO\",\n\t\t\twant:  true,\n\t\t},\n\t\t{\n\t\t\tvals:  []string{\"bar,foo,bar\"},\n\t\t\ttoken: \"FOO\",\n\t\t\twant:  true,\n\t\t},\n\t\t{\n\t\t\tvals:  []string{\"bar , foo\"},\n\t\t\ttoken: \"FOO\",\n\t\t\twant:  true,\n\t\t},\n\t\t{\n\t\t\tvals:  []string{\"foo ,bar \"},\n\t\t\ttoken: \"FOO\",\n\t\t\twant:  true,\n\t\t},\n\t\t{\n\t\t\tvals:  []string{\"bar, foo ,bar\"},\n\t\t\ttoken: \"FOO\",\n\t\t\twant:  true,\n\t\t},\n\t\t{\n\t\t\tvals:  []string{\"bar , foo\"},\n\t\t\ttoken: \"FOO\",\n\t\t\twant:  true,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tgot := HeaderValuesContainsToken(tt.vals, tt.token)\n\t\tif got != tt.want {\n\t\t\tt.Errorf(\"headerValuesContainsToken(%q, %q) = %v; want %v\", tt.vals, tt.token, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc TestPunycodeHostPort(t *testing.T) {\n\ttests := []struct {\n\t\tin, want string\n\t}{\n\t\t{\"www.google.com\", \"www.google.com\"},\n\t\t{\"гофер.рф\", \"xn--c1ae0ajs.xn--p1ai\"},\n\t\t{\"bücher.de\", \"xn--bcher-kva.de\"},\n\t\t{\"bücher.de:8080\", \"xn--bcher-kva.de:8080\"},\n\t\t{\"[1::6]:8080\", \"[1::6]:8080\"},\n\t}\n\tfor _, tt := range tests {\n\t\tgot, err := PunycodeHostPort(tt.in)\n\t\tif tt.want != got || err != nil {\n\t\t\tt.Errorf(\"PunycodeHostPort(%q) = %q, %v, want %q, nil\", tt.in, got, err, tt.want)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/lif/address.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build solaris\n\npackage lif\n\nimport (\n\t\"errors\"\n\t\"unsafe\"\n)\n\n// An Addr represents an address associated with packet routing.\ntype Addr interface {\n\t// Family returns an address family.\n\tFamily() int\n}\n\n// An Inet4Addr represents an internet address for IPv4.\ntype Inet4Addr struct {\n\tIP        [4]byte // IP address\n\tPrefixLen int     // address prefix length\n}\n\n// Family implements the Family method of Addr interface.\nfunc (a *Inet4Addr) Family() int { return sysAF_INET }\n\n// An Inet6Addr represents an internet address for IPv6.\ntype Inet6Addr struct {\n\tIP        [16]byte // IP address\n\tPrefixLen int      // address prefix length\n\tZoneID    int      // zone identifier\n}\n\n// Family implements the Family method of Addr interface.\nfunc (a *Inet6Addr) Family() int { return sysAF_INET6 }\n\n// Addrs returns a list of interface addresses.\n//\n// The provided af must be an address family and name must be a data\n// link name. The zero value of af or name means a wildcard.\nfunc Addrs(af int, name string) ([]Addr, error) {\n\teps, err := newEndpoints(af)\n\tif len(eps) == 0 {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tfor _, ep := range eps {\n\t\t\tep.close()\n\t\t}\n\t}()\n\tlls, err := links(eps, name)\n\tif len(lls) == 0 {\n\t\treturn nil, err\n\t}\n\tvar as []Addr\n\tfor _, ll := range lls {\n\t\tvar lifr lifreq\n\t\tfor i := 0; i < len(ll.Name); i++ {\n\t\t\tlifr.Name[i] = int8(ll.Name[i])\n\t\t}\n\t\tfor _, ep := range eps {\n\t\t\tioc := int64(sysSIOCGLIFADDR)\n\t\t\terr := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifr))\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsa := (*sockaddrStorage)(unsafe.Pointer(&lifr.Lifru[0]))\n\t\t\tl := int(nativeEndian.Uint32(lifr.Lifru1[:4]))\n\t\t\tif l == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch sa.Family {\n\t\t\tcase sysAF_INET:\n\t\t\t\ta := &Inet4Addr{PrefixLen: l}\n\t\t\t\tcopy(a.IP[:], lifr.Lifru[4:8])\n\t\t\t\tas = append(as, a)\n\t\t\tcase sysAF_INET6:\n\t\t\t\ta := &Inet6Addr{PrefixLen: l, ZoneID: int(nativeEndian.Uint32(lifr.Lifru[24:28]))}\n\t\t\t\tcopy(a.IP[:], lifr.Lifru[8:24])\n\t\t\t\tas = append(as, a)\n\t\t\t}\n\t\t}\n\t}\n\treturn as, nil\n}\n\nfunc parseLinkAddr(b []byte) ([]byte, error) {\n\tnlen, alen, slen := int(b[1]), int(b[2]), int(b[3])\n\tl := 4 + nlen + alen + slen\n\tif len(b) < l {\n\t\treturn nil, errors.New(\"invalid address\")\n\t}\n\tb = b[4:]\n\tvar addr []byte\n\tif nlen > 0 {\n\t\tb = b[nlen:]\n\t}\n\tif alen > 0 {\n\t\taddr = make([]byte, alen)\n\t\tcopy(addr, b[:alen])\n\t}\n\treturn addr, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/lif/address_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build solaris\n\npackage lif\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\ntype addrFamily int\n\nfunc (af addrFamily) String() string {\n\tswitch af {\n\tcase sysAF_UNSPEC:\n\t\treturn \"unspec\"\n\tcase sysAF_INET:\n\t\treturn \"inet4\"\n\tcase sysAF_INET6:\n\t\treturn \"inet6\"\n\tdefault:\n\t\treturn fmt.Sprintf(\"%d\", af)\n\t}\n}\n\nconst hexDigit = \"0123456789abcdef\"\n\ntype llAddr []byte\n\nfunc (a llAddr) String() string {\n\tif len(a) == 0 {\n\t\treturn \"\"\n\t}\n\tbuf := make([]byte, 0, len(a)*3-1)\n\tfor i, b := range a {\n\t\tif i > 0 {\n\t\t\tbuf = append(buf, ':')\n\t\t}\n\t\tbuf = append(buf, hexDigit[b>>4])\n\t\tbuf = append(buf, hexDigit[b&0xF])\n\t}\n\treturn string(buf)\n}\n\ntype ipAddr []byte\n\nfunc (a ipAddr) String() string {\n\tif len(a) == 0 {\n\t\treturn \"<nil>\"\n\t}\n\tif len(a) == 4 {\n\t\treturn fmt.Sprintf(\"%d.%d.%d.%d\", a[0], a[1], a[2], a[3])\n\t}\n\tif len(a) == 16 {\n\t\treturn fmt.Sprintf(\"%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\", a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15])\n\t}\n\ts := make([]byte, len(a)*2)\n\tfor i, tn := range a {\n\t\ts[i*2], s[i*2+1] = hexDigit[tn>>4], hexDigit[tn&0xf]\n\t}\n\treturn string(s)\n}\n\nfunc (a *Inet4Addr) String() string {\n\treturn fmt.Sprintf(\"(%s %s %d)\", addrFamily(a.Family()), ipAddr(a.IP[:]), a.PrefixLen)\n}\n\nfunc (a *Inet6Addr) String() string {\n\treturn fmt.Sprintf(\"(%s %s %d %d)\", addrFamily(a.Family()), ipAddr(a.IP[:]), a.PrefixLen, a.ZoneID)\n}\n\ntype addrPack struct {\n\taf int\n\tas []Addr\n}\n\nfunc addrPacks() ([]addrPack, error) {\n\tvar lastErr error\n\tvar aps []addrPack\n\tfor _, af := range [...]int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} {\n\t\tas, err := Addrs(af, \"\")\n\t\tif err != nil {\n\t\t\tlastErr = err\n\t\t\tcontinue\n\t\t}\n\t\taps = append(aps, addrPack{af: af, as: as})\n\t}\n\treturn aps, lastErr\n}\n\nfunc TestAddrs(t *testing.T) {\n\taps, err := addrPacks()\n\tif len(aps) == 0 && err != nil {\n\t\tt.Fatal(err)\n\t}\n\tlps, err := linkPacks()\n\tif len(lps) == 0 && err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor _, lp := range lps {\n\t\tn := 0\n\t\tfor _, ll := range lp.lls {\n\t\t\tas, err := Addrs(lp.af, ll.Name)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(lp.af, ll.Name, err)\n\t\t\t}\n\t\t\tt.Logf(\"af=%s name=%s %v\", addrFamily(lp.af), ll.Name, as)\n\t\t\tn += len(as)\n\t\t}\n\t\tfor _, ap := range aps {\n\t\t\tif ap.af != lp.af {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif n != len(ap.as) {\n\t\t\t\tt.Errorf(\"af=%s got %d; want %d\", addrFamily(lp.af), n, len(ap.as))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/lif/binary.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build solaris\n\npackage lif\n\n// This file contains duplicates of encoding/binary package.\n//\n// This package is supposed to be used by the net package of standard\n// library. Therefore the package set used in the package must be the\n// same as net package.\n\nvar (\n\tlittleEndian binaryLittleEndian\n\tbigEndian    binaryBigEndian\n)\n\ntype binaryByteOrder interface {\n\tUint16([]byte) uint16\n\tUint32([]byte) uint32\n\tUint64([]byte) uint64\n\tPutUint16([]byte, uint16)\n\tPutUint32([]byte, uint32)\n\tPutUint64([]byte, uint64)\n}\n\ntype binaryLittleEndian struct{}\n\nfunc (binaryLittleEndian) Uint16(b []byte) uint16 {\n\t_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808\n\treturn uint16(b[0]) | uint16(b[1])<<8\n}\n\nfunc (binaryLittleEndian) PutUint16(b []byte, v uint16) {\n\t_ = b[1] // early bounds check to guarantee safety of writes below\n\tb[0] = byte(v)\n\tb[1] = byte(v >> 8)\n}\n\nfunc (binaryLittleEndian) Uint32(b []byte) uint32 {\n\t_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808\n\treturn uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24\n}\n\nfunc (binaryLittleEndian) PutUint32(b []byte, v uint32) {\n\t_ = b[3] // early bounds check to guarantee safety of writes below\n\tb[0] = byte(v)\n\tb[1] = byte(v >> 8)\n\tb[2] = byte(v >> 16)\n\tb[3] = byte(v >> 24)\n}\n\nfunc (binaryLittleEndian) Uint64(b []byte) uint64 {\n\t_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808\n\treturn uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |\n\t\tuint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56\n}\n\nfunc (binaryLittleEndian) PutUint64(b []byte, v uint64) {\n\t_ = b[7] // early bounds check to guarantee safety of writes below\n\tb[0] = byte(v)\n\tb[1] = byte(v >> 8)\n\tb[2] = byte(v >> 16)\n\tb[3] = byte(v >> 24)\n\tb[4] = byte(v >> 32)\n\tb[5] = byte(v >> 40)\n\tb[6] = byte(v >> 48)\n\tb[7] = byte(v >> 56)\n}\n\ntype binaryBigEndian struct{}\n\nfunc (binaryBigEndian) Uint16(b []byte) uint16 {\n\t_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808\n\treturn uint16(b[1]) | uint16(b[0])<<8\n}\n\nfunc (binaryBigEndian) PutUint16(b []byte, v uint16) {\n\t_ = b[1] // early bounds check to guarantee safety of writes below\n\tb[0] = byte(v >> 8)\n\tb[1] = byte(v)\n}\n\nfunc (binaryBigEndian) Uint32(b []byte) uint32 {\n\t_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808\n\treturn uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24\n}\n\nfunc (binaryBigEndian) PutUint32(b []byte, v uint32) {\n\t_ = b[3] // early bounds check to guarantee safety of writes below\n\tb[0] = byte(v >> 24)\n\tb[1] = byte(v >> 16)\n\tb[2] = byte(v >> 8)\n\tb[3] = byte(v)\n}\n\nfunc (binaryBigEndian) Uint64(b []byte) uint64 {\n\t_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808\n\treturn uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |\n\t\tuint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56\n}\n\nfunc (binaryBigEndian) PutUint64(b []byte, v uint64) {\n\t_ = b[7] // early bounds check to guarantee safety of writes below\n\tb[0] = byte(v >> 56)\n\tb[1] = byte(v >> 48)\n\tb[2] = byte(v >> 40)\n\tb[3] = byte(v >> 32)\n\tb[4] = byte(v >> 24)\n\tb[5] = byte(v >> 16)\n\tb[6] = byte(v >> 8)\n\tb[7] = byte(v)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/lif/defs_solaris.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// +godefs map struct_in_addr [4]byte /* in_addr */\n// +godefs map struct_in6_addr [16]byte /* in6_addr */\n\npackage lif\n\n/*\n#include <sys/socket.h>\n#include <sys/sockio.h>\n\n#include <net/if.h>\n#include <net/if_types.h>\n*/\nimport \"C\"\n\nconst (\n\tsysAF_UNSPEC = C.AF_UNSPEC\n\tsysAF_INET   = C.AF_INET\n\tsysAF_INET6  = C.AF_INET6\n\n\tsysSOCK_DGRAM = C.SOCK_DGRAM\n)\n\ntype sockaddrStorage C.struct_sockaddr_storage\n\nconst (\n\tsysLIFC_NOXMIT          = C.LIFC_NOXMIT\n\tsysLIFC_EXTERNAL_SOURCE = C.LIFC_EXTERNAL_SOURCE\n\tsysLIFC_TEMPORARY       = C.LIFC_TEMPORARY\n\tsysLIFC_ALLZONES        = C.LIFC_ALLZONES\n\tsysLIFC_UNDER_IPMP      = C.LIFC_UNDER_IPMP\n\tsysLIFC_ENABLED         = C.LIFC_ENABLED\n\n\tsysSIOCGLIFADDR    = C.SIOCGLIFADDR\n\tsysSIOCGLIFDSTADDR = C.SIOCGLIFDSTADDR\n\tsysSIOCGLIFFLAGS   = C.SIOCGLIFFLAGS\n\tsysSIOCGLIFMTU     = C.SIOCGLIFMTU\n\tsysSIOCGLIFNETMASK = C.SIOCGLIFNETMASK\n\tsysSIOCGLIFMETRIC  = C.SIOCGLIFMETRIC\n\tsysSIOCGLIFNUM     = C.SIOCGLIFNUM\n\tsysSIOCGLIFINDEX   = C.SIOCGLIFINDEX\n\tsysSIOCGLIFSUBNET  = C.SIOCGLIFSUBNET\n\tsysSIOCGLIFLNKINFO = C.SIOCGLIFLNKINFO\n\tsysSIOCGLIFCONF    = C.SIOCGLIFCONF\n\tsysSIOCGLIFHWADDR  = C.SIOCGLIFHWADDR\n)\n\nconst (\n\tsysIFF_UP          = C.IFF_UP\n\tsysIFF_BROADCAST   = C.IFF_BROADCAST\n\tsysIFF_DEBUG       = C.IFF_DEBUG\n\tsysIFF_LOOPBACK    = C.IFF_LOOPBACK\n\tsysIFF_POINTOPOINT = C.IFF_POINTOPOINT\n\tsysIFF_NOTRAILERS  = C.IFF_NOTRAILERS\n\tsysIFF_RUNNING     = C.IFF_RUNNING\n\tsysIFF_NOARP       = C.IFF_NOARP\n\tsysIFF_PROMISC     = C.IFF_PROMISC\n\tsysIFF_ALLMULTI    = C.IFF_ALLMULTI\n\tsysIFF_INTELLIGENT = C.IFF_INTELLIGENT\n\tsysIFF_MULTICAST   = C.IFF_MULTICAST\n\tsysIFF_MULTI_BCAST = C.IFF_MULTI_BCAST\n\tsysIFF_UNNUMBERED  = C.IFF_UNNUMBERED\n\tsysIFF_PRIVATE     = C.IFF_PRIVATE\n)\n\nconst (\n\tsizeofLifnum       = C.sizeof_struct_lifnum\n\tsizeofLifreq       = C.sizeof_struct_lifreq\n\tsizeofLifconf      = C.sizeof_struct_lifconf\n\tsizeofLifIfinfoReq = C.sizeof_struct_lif_ifinfo_req\n)\n\ntype lifnum C.struct_lifnum\n\ntype lifreq C.struct_lifreq\n\ntype lifconf C.struct_lifconf\n\ntype lifIfinfoReq C.struct_lif_ifinfo_req\n\nconst (\n\tsysIFT_IPV4 = C.IFT_IPV4\n\tsysIFT_IPV6 = C.IFT_IPV6\n\tsysIFT_6TO4 = C.IFT_6TO4\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/lif/lif.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build solaris\n\n// Package lif provides basic functions for the manipulation of\n// logical network interfaces and interface addresses on Solaris.\n//\n// The package supports Solaris 11 or above.\npackage lif\n\nimport \"syscall\"\n\ntype endpoint struct {\n\taf int\n\ts  uintptr\n}\n\nfunc (ep *endpoint) close() error {\n\treturn syscall.Close(int(ep.s))\n}\n\nfunc newEndpoints(af int) ([]endpoint, error) {\n\tvar lastErr error\n\tvar eps []endpoint\n\tafs := []int{sysAF_INET, sysAF_INET6}\n\tif af != sysAF_UNSPEC {\n\t\tafs = []int{af}\n\t}\n\tfor _, af := range afs {\n\t\ts, err := syscall.Socket(af, sysSOCK_DGRAM, 0)\n\t\tif err != nil {\n\t\t\tlastErr = err\n\t\t\tcontinue\n\t\t}\n\t\teps = append(eps, endpoint{af: af, s: uintptr(s)})\n\t}\n\tif len(eps) == 0 {\n\t\treturn nil, lastErr\n\t}\n\treturn eps, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/lif/link.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build solaris\n\npackage lif\n\nimport \"unsafe\"\n\n// A Link represents logical data link information.\n//\n// It also represents base information for logical network interface.\n// On Solaris, each logical network interface represents network layer\n// adjacency information and the interface has a only single network\n// address or address pair for tunneling. It's usual that multiple\n// logical network interfaces share the same logical data link.\ntype Link struct {\n\tName  string // name, equivalent to IP interface name\n\tIndex int    // index, equivalent to IP interface index\n\tType  int    // type\n\tFlags int    // flags\n\tMTU   int    // maximum transmission unit, basically link MTU but may differ between IP address families\n\tAddr  []byte // address\n}\n\nfunc (ll *Link) fetch(s uintptr) {\n\tvar lifr lifreq\n\tfor i := 0; i < len(ll.Name); i++ {\n\t\tlifr.Name[i] = int8(ll.Name[i])\n\t}\n\tioc := int64(sysSIOCGLIFINDEX)\n\tif err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil {\n\t\tll.Index = int(nativeEndian.Uint32(lifr.Lifru[:4]))\n\t}\n\tioc = int64(sysSIOCGLIFFLAGS)\n\tif err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil {\n\t\tll.Flags = int(nativeEndian.Uint64(lifr.Lifru[:8]))\n\t}\n\tioc = int64(sysSIOCGLIFMTU)\n\tif err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil {\n\t\tll.MTU = int(nativeEndian.Uint32(lifr.Lifru[:4]))\n\t}\n\tswitch ll.Type {\n\tcase sysIFT_IPV4, sysIFT_IPV6, sysIFT_6TO4:\n\tdefault:\n\t\tioc = int64(sysSIOCGLIFHWADDR)\n\t\tif err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil {\n\t\t\tll.Addr, _ = parseLinkAddr(lifr.Lifru[4:])\n\t\t}\n\t}\n}\n\n// Links returns a list of logical data links.\n//\n// The provided af must be an address family and name must be a data\n// link name. The zero value of af or name means a wildcard.\nfunc Links(af int, name string) ([]Link, error) {\n\teps, err := newEndpoints(af)\n\tif len(eps) == 0 {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tfor _, ep := range eps {\n\t\t\tep.close()\n\t\t}\n\t}()\n\treturn links(eps, name)\n}\n\nfunc links(eps []endpoint, name string) ([]Link, error) {\n\tvar lls []Link\n\tlifn := lifnum{Flags: sysLIFC_NOXMIT | sysLIFC_TEMPORARY | sysLIFC_ALLZONES | sysLIFC_UNDER_IPMP}\n\tlifc := lifconf{Flags: sysLIFC_NOXMIT | sysLIFC_TEMPORARY | sysLIFC_ALLZONES | sysLIFC_UNDER_IPMP}\n\tfor _, ep := range eps {\n\t\tlifn.Family = uint16(ep.af)\n\t\tioc := int64(sysSIOCGLIFNUM)\n\t\tif err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifn)); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif lifn.Count == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tb := make([]byte, lifn.Count*sizeofLifreq)\n\t\tlifc.Family = uint16(ep.af)\n\t\tlifc.Len = lifn.Count * sizeofLifreq\n\t\tif len(lifc.Lifcu) == 8 {\n\t\t\tnativeEndian.PutUint64(lifc.Lifcu[:], uint64(uintptr(unsafe.Pointer(&b[0]))))\n\t\t} else {\n\t\t\tnativeEndian.PutUint32(lifc.Lifcu[:], uint32(uintptr(unsafe.Pointer(&b[0]))))\n\t\t}\n\t\tioc = int64(sysSIOCGLIFCONF)\n\t\tif err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifc)); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tnb := make([]byte, 32) // see LIFNAMSIZ in net/if.h\n\t\tfor i := 0; i < int(lifn.Count); i++ {\n\t\t\tlifr := (*lifreq)(unsafe.Pointer(&b[i*sizeofLifreq]))\n\t\t\tfor i := 0; i < 32; i++ {\n\t\t\t\tif lifr.Name[i] == 0 {\n\t\t\t\t\tnb = nb[:i]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tnb[i] = byte(lifr.Name[i])\n\t\t\t}\n\t\t\tllname := string(nb)\n\t\t\tnb = nb[:32]\n\t\t\tif isDupLink(lls, llname) || name != \"\" && name != llname {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tll := Link{Name: llname, Type: int(lifr.Type)}\n\t\t\tll.fetch(ep.s)\n\t\t\tlls = append(lls, ll)\n\t\t}\n\t}\n\treturn lls, nil\n}\n\nfunc isDupLink(lls []Link, name string) bool {\n\tfor _, ll := range lls {\n\t\tif ll.Name == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/lif/link_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build solaris\n\npackage lif\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc (ll *Link) String() string {\n\treturn fmt.Sprintf(\"name=%s index=%d type=%d flags=%#x mtu=%d addr=%v\", ll.Name, ll.Index, ll.Type, ll.Flags, ll.MTU, llAddr(ll.Addr))\n}\n\ntype linkPack struct {\n\taf  int\n\tlls []Link\n}\n\nfunc linkPacks() ([]linkPack, error) {\n\tvar lastErr error\n\tvar lps []linkPack\n\tfor _, af := range [...]int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} {\n\t\tlls, err := Links(af, \"\")\n\t\tif err != nil {\n\t\t\tlastErr = err\n\t\t\tcontinue\n\t\t}\n\t\tlps = append(lps, linkPack{af: af, lls: lls})\n\t}\n\treturn lps, lastErr\n}\n\nfunc TestLinks(t *testing.T) {\n\tlps, err := linkPacks()\n\tif len(lps) == 0 && err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor _, lp := range lps {\n\t\tn := 0\n\t\tfor _, sll := range lp.lls {\n\t\t\tlls, err := Links(lp.af, sll.Name)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(lp.af, sll.Name, err)\n\t\t\t}\n\t\t\tfor _, ll := range lls {\n\t\t\t\tif ll.Name != sll.Name || ll.Index != sll.Index {\n\t\t\t\t\tt.Errorf(\"af=%s got %v; want %v\", addrFamily(lp.af), &ll, &sll)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tt.Logf(\"af=%s name=%s %v\", addrFamily(lp.af), sll.Name, &ll)\n\t\t\t\tn++\n\t\t\t}\n\t\t}\n\t\tif n != len(lp.lls) {\n\t\t\tt.Errorf(\"af=%s got %d; want %d\", addrFamily(lp.af), n, len(lp.lls))\n\t\t\tcontinue\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/lif/sys.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build solaris\n\npackage lif\n\nimport \"unsafe\"\n\nvar nativeEndian binaryByteOrder\n\nfunc init() {\n\ti := uint32(1)\n\tb := (*[4]byte)(unsafe.Pointer(&i))\n\tif b[0] == 1 {\n\t\tnativeEndian = littleEndian\n\t} else {\n\t\tnativeEndian = bigEndian\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/lif/sys_solaris_amd64.s",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n#include \"textflag.h\"\n\nTEXT ·sysvicall6(SB),NOSPLIT,$0-88\n\tJMP\tsyscall·sysvicall6(SB)\n"
  },
  {
    "path": "vendor/golang.org/x/net/lif/syscall.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build solaris\n\npackage lif\n\nimport (\n\t\"syscall\"\n\t\"unsafe\"\n)\n\n//go:cgo_import_dynamic libc_ioctl ioctl \"libc.so\"\n\n//go:linkname procIoctl libc_ioctl\n\nvar procIoctl uintptr\n\nfunc sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno)\n\nfunc ioctl(s, ioc uintptr, arg unsafe.Pointer) error {\n\t_, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procIoctl)), 3, s, ioc, uintptr(arg), 0, 0, 0)\n\tif errno != 0 {\n\t\treturn error(errno)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/lif/zsys_solaris_amd64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_solaris.go\n\npackage lif\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_INET6  = 0x1a\n\n\tsysSOCK_DGRAM = 0x1\n)\n\ntype sockaddrStorage struct {\n\tFamily     uint16\n\tX_ss_pad1  [6]int8\n\tX_ss_align float64\n\tX_ss_pad2  [240]int8\n}\n\nconst (\n\tsysLIFC_NOXMIT          = 0x1\n\tsysLIFC_EXTERNAL_SOURCE = 0x2\n\tsysLIFC_TEMPORARY       = 0x4\n\tsysLIFC_ALLZONES        = 0x8\n\tsysLIFC_UNDER_IPMP      = 0x10\n\tsysLIFC_ENABLED         = 0x20\n\n\tsysSIOCGLIFADDR    = -0x3f87968f\n\tsysSIOCGLIFDSTADDR = -0x3f87968d\n\tsysSIOCGLIFFLAGS   = -0x3f87968b\n\tsysSIOCGLIFMTU     = -0x3f879686\n\tsysSIOCGLIFNETMASK = -0x3f879683\n\tsysSIOCGLIFMETRIC  = -0x3f879681\n\tsysSIOCGLIFNUM     = -0x3ff3967e\n\tsysSIOCGLIFINDEX   = -0x3f87967b\n\tsysSIOCGLIFSUBNET  = -0x3f879676\n\tsysSIOCGLIFLNKINFO = -0x3f879674\n\tsysSIOCGLIFCONF    = -0x3fef965b\n\tsysSIOCGLIFHWADDR  = -0x3f879640\n)\n\nconst (\n\tsysIFF_UP          = 0x1\n\tsysIFF_BROADCAST   = 0x2\n\tsysIFF_DEBUG       = 0x4\n\tsysIFF_LOOPBACK    = 0x8\n\tsysIFF_POINTOPOINT = 0x10\n\tsysIFF_NOTRAILERS  = 0x20\n\tsysIFF_RUNNING     = 0x40\n\tsysIFF_NOARP       = 0x80\n\tsysIFF_PROMISC     = 0x100\n\tsysIFF_ALLMULTI    = 0x200\n\tsysIFF_INTELLIGENT = 0x400\n\tsysIFF_MULTICAST   = 0x800\n\tsysIFF_MULTI_BCAST = 0x1000\n\tsysIFF_UNNUMBERED  = 0x2000\n\tsysIFF_PRIVATE     = 0x8000\n)\n\nconst (\n\tsizeofLifnum       = 0xc\n\tsizeofLifreq       = 0x178\n\tsizeofLifconf      = 0x18\n\tsizeofLifIfinfoReq = 0x10\n)\n\ntype lifnum struct {\n\tFamily    uint16\n\tPad_cgo_0 [2]byte\n\tFlags     int32\n\tCount     int32\n}\n\ntype lifreq struct {\n\tName   [32]int8\n\tLifru1 [4]byte\n\tType   uint32\n\tLifru  [336]byte\n}\n\ntype lifconf struct {\n\tFamily    uint16\n\tPad_cgo_0 [2]byte\n\tFlags     int32\n\tLen       int32\n\tPad_cgo_1 [4]byte\n\tLifcu     [8]byte\n}\n\ntype lifIfinfoReq struct {\n\tMaxhops      uint8\n\tPad_cgo_0    [3]byte\n\tReachtime    uint32\n\tReachretrans uint32\n\tMaxmtu       uint32\n}\n\nconst (\n\tsysIFT_IPV4 = 0xc8\n\tsysIFT_IPV6 = 0xc9\n\tsysIFT_6TO4 = 0xca\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/nettest/conntest.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package nettest provides utilities for network testing.\npackage nettest\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"math/rand\"\n\t\"net\"\n\t\"runtime\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar (\n\taLongTimeAgo = time.Unix(233431200, 0)\n\tneverTimeout = time.Time{}\n)\n\n// MakePipe creates a connection between two endpoints and returns the pair\n// as c1 and c2, such that anything written to c1 is read by c2 and vice-versa.\n// The stop function closes all resources, including c1, c2, and the underlying\n// net.Listener (if there is one), and should not be nil.\ntype MakePipe func() (c1, c2 net.Conn, stop func(), err error)\n\n// TestConn tests that a net.Conn implementation properly satisfies the interface.\n// The tests should not produce any false positives, but may experience\n// false negatives. Thus, some issues may only be detected when the test is\n// run multiple times. For maximal effectiveness, run the tests under the\n// race detector.\nfunc TestConn(t *testing.T, mp MakePipe) {\n\ttestConn(t, mp)\n}\n\ntype connTester func(t *testing.T, c1, c2 net.Conn)\n\nfunc timeoutWrapper(t *testing.T, mp MakePipe, f connTester) {\n\tc1, c2, stop, err := mp()\n\tif err != nil {\n\t\tt.Fatalf(\"unable to make pipe: %v\", err)\n\t}\n\tvar once sync.Once\n\tdefer once.Do(func() { stop() })\n\ttimer := time.AfterFunc(time.Minute, func() {\n\t\tonce.Do(func() {\n\t\t\tt.Error(\"test timed out; terminating pipe\")\n\t\t\tstop()\n\t\t})\n\t})\n\tdefer timer.Stop()\n\tf(t, c1, c2)\n}\n\n// testBasicIO tests that the data sent on c1 is properly received on c2.\nfunc testBasicIO(t *testing.T, c1, c2 net.Conn) {\n\twant := make([]byte, 1<<20)\n\trand.New(rand.NewSource(0)).Read(want)\n\n\tdataCh := make(chan []byte)\n\tgo func() {\n\t\trd := bytes.NewReader(want)\n\t\tif err := chunkedCopy(c1, rd); err != nil {\n\t\t\tt.Errorf(\"unexpected c1.Write error: %v\", err)\n\t\t}\n\t\tif err := c1.Close(); err != nil {\n\t\t\tt.Errorf(\"unexpected c1.Close error: %v\", err)\n\t\t}\n\t}()\n\n\tgo func() {\n\t\twr := new(bytes.Buffer)\n\t\tif err := chunkedCopy(wr, c2); err != nil {\n\t\t\tt.Errorf(\"unexpected c2.Read error: %v\", err)\n\t\t}\n\t\tif err := c2.Close(); err != nil {\n\t\t\tt.Errorf(\"unexpected c2.Close error: %v\", err)\n\t\t}\n\t\tdataCh <- wr.Bytes()\n\t}()\n\n\tif got := <-dataCh; !bytes.Equal(got, want) {\n\t\tt.Errorf(\"transmitted data differs\")\n\t}\n}\n\n// testPingPong tests that the two endpoints can synchronously send data to\n// each other in a typical request-response pattern.\nfunc testPingPong(t *testing.T, c1, c2 net.Conn) {\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\n\tpingPonger := func(c net.Conn) {\n\t\tdefer wg.Done()\n\t\tbuf := make([]byte, 8)\n\t\tvar prev uint64\n\t\tfor {\n\t\t\tif _, err := io.ReadFull(c, buf); err != nil {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tt.Errorf(\"unexpected Read error: %v\", err)\n\t\t\t}\n\n\t\t\tv := binary.LittleEndian.Uint64(buf)\n\t\t\tbinary.LittleEndian.PutUint64(buf, v+1)\n\t\t\tif prev != 0 && prev+2 != v {\n\t\t\t\tt.Errorf(\"mismatching value: got %d, want %d\", v, prev+2)\n\t\t\t}\n\t\t\tprev = v\n\t\t\tif v == 1000 {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif _, err := c.Write(buf); err != nil {\n\t\t\t\tt.Errorf(\"unexpected Write error: %v\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif err := c.Close(); err != nil {\n\t\t\tt.Errorf(\"unexpected Close error: %v\", err)\n\t\t}\n\t}\n\n\twg.Add(2)\n\tgo pingPonger(c1)\n\tgo pingPonger(c2)\n\n\t// Start off the chain reaction.\n\tif _, err := c1.Write(make([]byte, 8)); err != nil {\n\t\tt.Errorf(\"unexpected c1.Write error: %v\", err)\n\t}\n}\n\n// testRacyRead tests that it is safe to mutate the input Read buffer\n// immediately after cancelation has occurred.\nfunc testRacyRead(t *testing.T, c1, c2 net.Conn) {\n\tgo chunkedCopy(c2, rand.New(rand.NewSource(0)))\n\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\n\tc1.SetReadDeadline(time.Now().Add(time.Millisecond))\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\n\t\t\tb1 := make([]byte, 1024)\n\t\t\tb2 := make([]byte, 1024)\n\t\t\tfor j := 0; j < 100; j++ {\n\t\t\t\t_, err := c1.Read(b1)\n\t\t\t\tcopy(b1, b2) // Mutate b1 to trigger potential race\n\t\t\t\tif err != nil {\n\t\t\t\t\tcheckForTimeoutError(t, err)\n\t\t\t\t\tc1.SetReadDeadline(time.Now().Add(time.Millisecond))\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}\n\n// testRacyWrite tests that it is safe to mutate the input Write buffer\n// immediately after cancelation has occurred.\nfunc testRacyWrite(t *testing.T, c1, c2 net.Conn) {\n\tgo chunkedCopy(ioutil.Discard, c2)\n\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\n\tc1.SetWriteDeadline(time.Now().Add(time.Millisecond))\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\n\t\t\tb1 := make([]byte, 1024)\n\t\t\tb2 := make([]byte, 1024)\n\t\t\tfor j := 0; j < 100; j++ {\n\t\t\t\t_, err := c1.Write(b1)\n\t\t\t\tcopy(b1, b2) // Mutate b1 to trigger potential race\n\t\t\t\tif err != nil {\n\t\t\t\t\tcheckForTimeoutError(t, err)\n\t\t\t\t\tc1.SetWriteDeadline(time.Now().Add(time.Millisecond))\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}\n\n// testReadTimeout tests that Read timeouts do not affect Write.\nfunc testReadTimeout(t *testing.T, c1, c2 net.Conn) {\n\tgo chunkedCopy(ioutil.Discard, c2)\n\n\tc1.SetReadDeadline(aLongTimeAgo)\n\t_, err := c1.Read(make([]byte, 1024))\n\tcheckForTimeoutError(t, err)\n\tif _, err := c1.Write(make([]byte, 1024)); err != nil {\n\t\tt.Errorf(\"unexpected Write error: %v\", err)\n\t}\n}\n\n// testWriteTimeout tests that Write timeouts do not affect Read.\nfunc testWriteTimeout(t *testing.T, c1, c2 net.Conn) {\n\tgo chunkedCopy(c2, rand.New(rand.NewSource(0)))\n\n\tc1.SetWriteDeadline(aLongTimeAgo)\n\t_, err := c1.Write(make([]byte, 1024))\n\tcheckForTimeoutError(t, err)\n\tif _, err := c1.Read(make([]byte, 1024)); err != nil {\n\t\tt.Errorf(\"unexpected Read error: %v\", err)\n\t}\n}\n\n// testPastTimeout tests that a deadline set in the past immediately times out\n// Read and Write requests.\nfunc testPastTimeout(t *testing.T, c1, c2 net.Conn) {\n\tgo chunkedCopy(c2, c2)\n\n\ttestRoundtrip(t, c1)\n\n\tc1.SetDeadline(aLongTimeAgo)\n\tn, err := c1.Write(make([]byte, 1024))\n\tif n != 0 {\n\t\tt.Errorf(\"unexpected Write count: got %d, want 0\", n)\n\t}\n\tcheckForTimeoutError(t, err)\n\tn, err = c1.Read(make([]byte, 1024))\n\tif n != 0 {\n\t\tt.Errorf(\"unexpected Read count: got %d, want 0\", n)\n\t}\n\tcheckForTimeoutError(t, err)\n\n\ttestRoundtrip(t, c1)\n}\n\n// testPresentTimeout tests that a deadline set while there are pending\n// Read and Write operations immediately times out those operations.\nfunc testPresentTimeout(t *testing.T, c1, c2 net.Conn) {\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\twg.Add(3)\n\n\tdeadlineSet := make(chan bool, 1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\tdeadlineSet <- true\n\t\tc1.SetReadDeadline(aLongTimeAgo)\n\t\tc1.SetWriteDeadline(aLongTimeAgo)\n\t}()\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tn, err := c1.Read(make([]byte, 1024))\n\t\tif n != 0 {\n\t\t\tt.Errorf(\"unexpected Read count: got %d, want 0\", n)\n\t\t}\n\t\tcheckForTimeoutError(t, err)\n\t\tif len(deadlineSet) == 0 {\n\t\t\tt.Error(\"Read timed out before deadline is set\")\n\t\t}\n\t}()\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tvar err error\n\t\tfor err == nil {\n\t\t\t_, err = c1.Write(make([]byte, 1024))\n\t\t}\n\t\tcheckForTimeoutError(t, err)\n\t\tif len(deadlineSet) == 0 {\n\t\t\tt.Error(\"Write timed out before deadline is set\")\n\t\t}\n\t}()\n}\n\n// testFutureTimeout tests that a future deadline will eventually time out\n// Read and Write operations.\nfunc testFutureTimeout(t *testing.T, c1, c2 net.Conn) {\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\n\tc1.SetDeadline(time.Now().Add(100 * time.Millisecond))\n\tgo func() {\n\t\tdefer wg.Done()\n\t\t_, err := c1.Read(make([]byte, 1024))\n\t\tcheckForTimeoutError(t, err)\n\t}()\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tvar err error\n\t\tfor err == nil {\n\t\t\t_, err = c1.Write(make([]byte, 1024))\n\t\t}\n\t\tcheckForTimeoutError(t, err)\n\t}()\n\twg.Wait()\n\n\tgo chunkedCopy(c2, c2)\n\tresyncConn(t, c1)\n\ttestRoundtrip(t, c1)\n}\n\n// testCloseTimeout tests that calling Close immediately times out pending\n// Read and Write operations.\nfunc testCloseTimeout(t *testing.T, c1, c2 net.Conn) {\n\tgo chunkedCopy(c2, c2)\n\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\twg.Add(3)\n\n\t// Test for cancelation upon connection closure.\n\tc1.SetDeadline(neverTimeout)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\tc1.Close()\n\t}()\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tvar err error\n\t\tbuf := make([]byte, 1024)\n\t\tfor err == nil {\n\t\t\t_, err = c1.Read(buf)\n\t\t}\n\t}()\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tvar err error\n\t\tbuf := make([]byte, 1024)\n\t\tfor err == nil {\n\t\t\t_, err = c1.Write(buf)\n\t\t}\n\t}()\n}\n\n// testConcurrentMethods tests that the methods of net.Conn can safely\n// be called concurrently.\nfunc testConcurrentMethods(t *testing.T, c1, c2 net.Conn) {\n\tif runtime.GOOS == \"plan9\" {\n\t\tt.Skip(\"skipping on plan9; see https://golang.org/issue/20489\")\n\t}\n\tgo chunkedCopy(c2, c2)\n\n\t// The results of the calls may be nonsensical, but this should\n\t// not trigger a race detector warning.\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 100; i++ {\n\t\twg.Add(7)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tc1.Read(make([]byte, 1024))\n\t\t}()\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tc1.Write(make([]byte, 1024))\n\t\t}()\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tc1.SetDeadline(time.Now().Add(10 * time.Millisecond))\n\t\t}()\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tc1.SetReadDeadline(aLongTimeAgo)\n\t\t}()\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tc1.SetWriteDeadline(aLongTimeAgo)\n\t\t}()\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tc1.LocalAddr()\n\t\t}()\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tc1.RemoteAddr()\n\t\t}()\n\t}\n\twg.Wait() // At worst, the deadline is set 10ms into the future\n\n\tresyncConn(t, c1)\n\ttestRoundtrip(t, c1)\n}\n\n// checkForTimeoutError checks that the error satisfies the Error interface\n// and that Timeout returns true.\nfunc checkForTimeoutError(t *testing.T, err error) {\n\tif nerr, ok := err.(net.Error); ok {\n\t\tif !nerr.Timeout() {\n\t\t\tt.Errorf(\"err.Timeout() = false, want true\")\n\t\t}\n\t} else {\n\t\tt.Errorf(\"got %T, want net.Error\", err)\n\t}\n}\n\n// testRoundtrip writes something into c and reads it back.\n// It assumes that everything written into c is echoed back to itself.\nfunc testRoundtrip(t *testing.T, c net.Conn) {\n\tif err := c.SetDeadline(neverTimeout); err != nil {\n\t\tt.Errorf(\"roundtrip SetDeadline error: %v\", err)\n\t}\n\n\tconst s = \"Hello, world!\"\n\tbuf := []byte(s)\n\tif _, err := c.Write(buf); err != nil {\n\t\tt.Errorf(\"roundtrip Write error: %v\", err)\n\t}\n\tif _, err := io.ReadFull(c, buf); err != nil {\n\t\tt.Errorf(\"roundtrip Read error: %v\", err)\n\t}\n\tif string(buf) != s {\n\t\tt.Errorf(\"roundtrip data mismatch: got %q, want %q\", buf, s)\n\t}\n}\n\n// resyncConn resynchronizes the connection into a sane state.\n// It assumes that everything written into c is echoed back to itself.\n// It assumes that 0xff is not currently on the wire or in the read buffer.\nfunc resyncConn(t *testing.T, c net.Conn) {\n\tc.SetDeadline(neverTimeout)\n\terrCh := make(chan error)\n\tgo func() {\n\t\t_, err := c.Write([]byte{0xff})\n\t\terrCh <- err\n\t}()\n\tbuf := make([]byte, 1024)\n\tfor {\n\t\tn, err := c.Read(buf)\n\t\tif n > 0 && bytes.IndexByte(buf[:n], 0xff) == n-1 {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Errorf(\"unexpected Read error: %v\", err)\n\t\t\tbreak\n\t\t}\n\t}\n\tif err := <-errCh; err != nil {\n\t\tt.Errorf(\"unexpected Write error: %v\", err)\n\t}\n}\n\n// chunkedCopy copies from r to w in fixed-width chunks to avoid\n// causing a Write that exceeds the maximum packet size for packet-based\n// connections like \"unixpacket\".\n// We assume that the maximum packet size is at least 1024.\nfunc chunkedCopy(w io.Writer, r io.Reader) error {\n\tb := make([]byte, 1024)\n\t_, err := io.CopyBuffer(struct{ io.Writer }{w}, struct{ io.Reader }{r}, b)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/nettest/conntest_go16.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !go1.7\n\npackage nettest\n\nimport \"testing\"\n\nfunc testConn(t *testing.T, mp MakePipe) {\n\t// Avoid using subtests on Go 1.6 and below.\n\ttimeoutWrapper(t, mp, testBasicIO)\n\ttimeoutWrapper(t, mp, testPingPong)\n\ttimeoutWrapper(t, mp, testRacyRead)\n\ttimeoutWrapper(t, mp, testRacyWrite)\n\ttimeoutWrapper(t, mp, testReadTimeout)\n\ttimeoutWrapper(t, mp, testWriteTimeout)\n\ttimeoutWrapper(t, mp, testPastTimeout)\n\ttimeoutWrapper(t, mp, testPresentTimeout)\n\ttimeoutWrapper(t, mp, testFutureTimeout)\n\ttimeoutWrapper(t, mp, testCloseTimeout)\n\ttimeoutWrapper(t, mp, testConcurrentMethods)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/nettest/conntest_go17.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.7\n\npackage nettest\n\nimport \"testing\"\n\nfunc testConn(t *testing.T, mp MakePipe) {\n\t// Use subtests on Go 1.7 and above since it is better organized.\n\tt.Run(\"BasicIO\", func(t *testing.T) { timeoutWrapper(t, mp, testBasicIO) })\n\tt.Run(\"PingPong\", func(t *testing.T) { timeoutWrapper(t, mp, testPingPong) })\n\tt.Run(\"RacyRead\", func(t *testing.T) { timeoutWrapper(t, mp, testRacyRead) })\n\tt.Run(\"RacyWrite\", func(t *testing.T) { timeoutWrapper(t, mp, testRacyWrite) })\n\tt.Run(\"ReadTimeout\", func(t *testing.T) { timeoutWrapper(t, mp, testReadTimeout) })\n\tt.Run(\"WriteTimeout\", func(t *testing.T) { timeoutWrapper(t, mp, testWriteTimeout) })\n\tt.Run(\"PastTimeout\", func(t *testing.T) { timeoutWrapper(t, mp, testPastTimeout) })\n\tt.Run(\"PresentTimeout\", func(t *testing.T) { timeoutWrapper(t, mp, testPresentTimeout) })\n\tt.Run(\"FutureTimeout\", func(t *testing.T) { timeoutWrapper(t, mp, testFutureTimeout) })\n\tt.Run(\"CloseTimeout\", func(t *testing.T) { timeoutWrapper(t, mp, testCloseTimeout) })\n\tt.Run(\"ConcurrentMethods\", func(t *testing.T) { timeoutWrapper(t, mp, testConcurrentMethods) })\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/nettest/conntest_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.8\n\npackage nettest\n\nimport (\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"golang.org/x/net/internal/nettest\"\n)\n\nfunc TestTestConn(t *testing.T) {\n\ttests := []struct{ name, network string }{\n\t\t{\"TCP\", \"tcp\"},\n\t\t{\"UnixPipe\", \"unix\"},\n\t\t{\"UnixPacketPipe\", \"unixpacket\"},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif !nettest.TestableNetwork(tt.network) {\n\t\t\t\tt.Skipf(\"not supported on %s\", runtime.GOOS)\n\t\t\t}\n\n\t\t\tmp := func() (c1, c2 net.Conn, stop func(), err error) {\n\t\t\t\tln, err := nettest.NewLocalListener(tt.network)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, nil, nil, err\n\t\t\t\t}\n\n\t\t\t\t// Start a connection between two endpoints.\n\t\t\t\tvar err1, err2 error\n\t\t\t\tdone := make(chan bool)\n\t\t\t\tgo func() {\n\t\t\t\t\tc2, err2 = ln.Accept()\n\t\t\t\t\tclose(done)\n\t\t\t\t}()\n\t\t\t\tc1, err1 = net.Dial(ln.Addr().Network(), ln.Addr().String())\n\t\t\t\t<-done\n\n\t\t\t\tstop = func() {\n\t\t\t\t\tif err1 == nil {\n\t\t\t\t\t\tc1.Close()\n\t\t\t\t\t}\n\t\t\t\t\tif err2 == nil {\n\t\t\t\t\t\tc2.Close()\n\t\t\t\t\t}\n\t\t\t\t\tln.Close()\n\t\t\t\t\tswitch tt.network {\n\t\t\t\t\tcase \"unix\", \"unixpacket\":\n\t\t\t\t\t\tos.Remove(ln.Addr().String())\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tswitch {\n\t\t\t\tcase err1 != nil:\n\t\t\t\t\tstop()\n\t\t\t\t\treturn nil, nil, nil, err1\n\t\t\t\tcase err2 != nil:\n\t\t\t\t\tstop()\n\t\t\t\t\treturn nil, nil, nil, err2\n\t\t\t\tdefault:\n\t\t\t\t\treturn c1, c2, stop, nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tTestConn(t, mp)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/netutil/listen.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package netutil provides network utility functions, complementing the more\n// common ones in the net package.\npackage netutil // import \"golang.org/x/net/netutil\"\n\nimport (\n\t\"net\"\n\t\"sync\"\n)\n\n// LimitListener returns a Listener that accepts at most n simultaneous\n// connections from the provided Listener.\nfunc LimitListener(l net.Listener, n int) net.Listener {\n\treturn &limitListener{l, make(chan struct{}, n)}\n}\n\ntype limitListener struct {\n\tnet.Listener\n\tsem chan struct{}\n}\n\nfunc (l *limitListener) acquire() { l.sem <- struct{}{} }\nfunc (l *limitListener) release() { <-l.sem }\n\nfunc (l *limitListener) Accept() (net.Conn, error) {\n\tl.acquire()\n\tc, err := l.Listener.Accept()\n\tif err != nil {\n\t\tl.release()\n\t\treturn nil, err\n\t}\n\treturn &limitListenerConn{Conn: c, release: l.release}, nil\n}\n\ntype limitListenerConn struct {\n\tnet.Conn\n\treleaseOnce sync.Once\n\trelease     func()\n}\n\nfunc (l *limitListenerConn) Close() error {\n\terr := l.Conn.Close()\n\tl.releaseOnce.Do(l.release)\n\treturn err\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/netutil/listen_test.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage netutil\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org/x/net/internal/nettest\"\n)\n\nfunc TestLimitListener(t *testing.T) {\n\tconst max = 5\n\tattempts := (nettest.MaxOpenFiles() - max) / 2\n\tif attempts > 256 { // maximum length of accept queue is 128 by default\n\t\tattempts = 256\n\t}\n\n\tl, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer l.Close()\n\tl = LimitListener(l, max)\n\n\tvar open int32\n\tgo http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif n := atomic.AddInt32(&open, 1); n > max {\n\t\t\tt.Errorf(\"%d open connections, want <= %d\", n, max)\n\t\t}\n\t\tdefer atomic.AddInt32(&open, -1)\n\t\ttime.Sleep(10 * time.Millisecond)\n\t\tfmt.Fprint(w, \"some body\")\n\t}))\n\n\tvar wg sync.WaitGroup\n\tvar failed int32\n\tfor i := 0; i < attempts; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tc := http.Client{Timeout: 3 * time.Second}\n\t\t\tr, err := c.Get(\"http://\" + l.Addr().String())\n\t\t\tif err != nil {\n\t\t\t\tt.Log(err)\n\t\t\t\tatomic.AddInt32(&failed, 1)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer r.Body.Close()\n\t\t\tio.Copy(ioutil.Discard, r.Body)\n\t\t}()\n\t}\n\twg.Wait()\n\n\t// We expect some Gets to fail as the kernel's accept queue is filled,\n\t// but most should succeed.\n\tif int(failed) >= attempts/2 {\n\t\tt.Errorf(\"%d requests failed within %d attempts\", failed, attempts)\n\t}\n}\n\ntype errorListener struct {\n\tnet.Listener\n}\n\nfunc (errorListener) Accept() (net.Conn, error) {\n\treturn nil, errFake\n}\n\nvar errFake = errors.New(\"fake error from errorListener\")\n\n// This used to hang.\nfunc TestLimitListenerError(t *testing.T) {\n\tdonec := make(chan bool, 1)\n\tgo func() {\n\t\tconst n = 2\n\t\tll := LimitListener(errorListener{}, n)\n\t\tfor i := 0; i < n+1; i++ {\n\t\t\t_, err := ll.Accept()\n\t\t\tif err != errFake {\n\t\t\t\tt.Fatalf(\"Accept error = %v; want errFake\", err)\n\t\t\t}\n\t\t}\n\t\tdonec <- true\n\t}()\n\tselect {\n\tcase <-donec:\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"timeout. deadlock?\")\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/proxy/direct.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage proxy\n\nimport (\n\t\"net\"\n)\n\ntype direct struct{}\n\n// Direct is a direct proxy: one that makes network connections directly.\nvar Direct = direct{}\n\nfunc (direct) Dial(network, addr string) (net.Conn, error) {\n\treturn net.Dial(network, addr)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/proxy/per_host.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage proxy\n\nimport (\n\t\"net\"\n\t\"strings\"\n)\n\n// A PerHost directs connections to a default Dialer unless the host name\n// requested matches one of a number of exceptions.\ntype PerHost struct {\n\tdef, bypass Dialer\n\n\tbypassNetworks []*net.IPNet\n\tbypassIPs      []net.IP\n\tbypassZones    []string\n\tbypassHosts    []string\n}\n\n// NewPerHost returns a PerHost Dialer that directs connections to either\n// defaultDialer or bypass, depending on whether the connection matches one of\n// the configured rules.\nfunc NewPerHost(defaultDialer, bypass Dialer) *PerHost {\n\treturn &PerHost{\n\t\tdef:    defaultDialer,\n\t\tbypass: bypass,\n\t}\n}\n\n// Dial connects to the address addr on the given network through either\n// defaultDialer or bypass.\nfunc (p *PerHost) Dial(network, addr string) (c net.Conn, err error) {\n\thost, _, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn p.dialerForRequest(host).Dial(network, addr)\n}\n\nfunc (p *PerHost) dialerForRequest(host string) Dialer {\n\tif ip := net.ParseIP(host); ip != nil {\n\t\tfor _, net := range p.bypassNetworks {\n\t\t\tif net.Contains(ip) {\n\t\t\t\treturn p.bypass\n\t\t\t}\n\t\t}\n\t\tfor _, bypassIP := range p.bypassIPs {\n\t\t\tif bypassIP.Equal(ip) {\n\t\t\t\treturn p.bypass\n\t\t\t}\n\t\t}\n\t\treturn p.def\n\t}\n\n\tfor _, zone := range p.bypassZones {\n\t\tif strings.HasSuffix(host, zone) {\n\t\t\treturn p.bypass\n\t\t}\n\t\tif host == zone[1:] {\n\t\t\t// For a zone \".example.com\", we match \"example.com\"\n\t\t\t// too.\n\t\t\treturn p.bypass\n\t\t}\n\t}\n\tfor _, bypassHost := range p.bypassHosts {\n\t\tif bypassHost == host {\n\t\t\treturn p.bypass\n\t\t}\n\t}\n\treturn p.def\n}\n\n// AddFromString parses a string that contains comma-separated values\n// specifying hosts that should use the bypass proxy. Each value is either an\n// IP address, a CIDR range, a zone (*.example.com) or a host name\n// (localhost). A best effort is made to parse the string and errors are\n// ignored.\nfunc (p *PerHost) AddFromString(s string) {\n\thosts := strings.Split(s, \",\")\n\tfor _, host := range hosts {\n\t\thost = strings.TrimSpace(host)\n\t\tif len(host) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.Contains(host, \"/\") {\n\t\t\t// We assume that it's a CIDR address like 127.0.0.0/8\n\t\t\tif _, net, err := net.ParseCIDR(host); err == nil {\n\t\t\t\tp.AddNetwork(net)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif ip := net.ParseIP(host); ip != nil {\n\t\t\tp.AddIP(ip)\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(host, \"*.\") {\n\t\t\tp.AddZone(host[1:])\n\t\t\tcontinue\n\t\t}\n\t\tp.AddHost(host)\n\t}\n}\n\n// AddIP specifies an IP address that will use the bypass proxy. Note that\n// this will only take effect if a literal IP address is dialed. A connection\n// to a named host will never match an IP.\nfunc (p *PerHost) AddIP(ip net.IP) {\n\tp.bypassIPs = append(p.bypassIPs, ip)\n}\n\n// AddNetwork specifies an IP range that will use the bypass proxy. Note that\n// this will only take effect if a literal IP address is dialed. A connection\n// to a named host will never match.\nfunc (p *PerHost) AddNetwork(net *net.IPNet) {\n\tp.bypassNetworks = append(p.bypassNetworks, net)\n}\n\n// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of\n// \"example.com\" matches \"example.com\" and all of its subdomains.\nfunc (p *PerHost) AddZone(zone string) {\n\tif strings.HasSuffix(zone, \".\") {\n\t\tzone = zone[:len(zone)-1]\n\t}\n\tif !strings.HasPrefix(zone, \".\") {\n\t\tzone = \".\" + zone\n\t}\n\tp.bypassZones = append(p.bypassZones, zone)\n}\n\n// AddHost specifies a host name that will use the bypass proxy.\nfunc (p *PerHost) AddHost(host string) {\n\tif strings.HasSuffix(host, \".\") {\n\t\thost = host[:len(host)-1]\n\t}\n\tp.bypassHosts = append(p.bypassHosts, host)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/proxy/per_host_test.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage proxy\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"reflect\"\n\t\"testing\"\n)\n\ntype recordingProxy struct {\n\taddrs []string\n}\n\nfunc (r *recordingProxy) Dial(network, addr string) (net.Conn, error) {\n\tr.addrs = append(r.addrs, addr)\n\treturn nil, errors.New(\"recordingProxy\")\n}\n\nfunc TestPerHost(t *testing.T) {\n\tvar def, bypass recordingProxy\n\tperHost := NewPerHost(&def, &bypass)\n\tperHost.AddFromString(\"localhost,*.zone,127.0.0.1,10.0.0.1/8,1000::/16\")\n\n\texpectedDef := []string{\n\t\t\"example.com:123\",\n\t\t\"1.2.3.4:123\",\n\t\t\"[1001::]:123\",\n\t}\n\texpectedBypass := []string{\n\t\t\"localhost:123\",\n\t\t\"zone:123\",\n\t\t\"foo.zone:123\",\n\t\t\"127.0.0.1:123\",\n\t\t\"10.1.2.3:123\",\n\t\t\"[1000::]:123\",\n\t}\n\n\tfor _, addr := range expectedDef {\n\t\tperHost.Dial(\"tcp\", addr)\n\t}\n\tfor _, addr := range expectedBypass {\n\t\tperHost.Dial(\"tcp\", addr)\n\t}\n\n\tif !reflect.DeepEqual(expectedDef, def.addrs) {\n\t\tt.Errorf(\"Hosts which went to the default proxy didn't match. Got %v, want %v\", def.addrs, expectedDef)\n\t}\n\tif !reflect.DeepEqual(expectedBypass, bypass.addrs) {\n\t\tt.Errorf(\"Hosts which went to the bypass proxy didn't match. Got %v, want %v\", bypass.addrs, expectedBypass)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/proxy/proxy.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package proxy provides support for a variety of protocols to proxy network\n// data.\npackage proxy // import \"golang.org/x/net/proxy\"\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"net/url\"\n\t\"os\"\n\t\"sync\"\n)\n\n// A Dialer is a means to establish a connection.\ntype Dialer interface {\n\t// Dial connects to the given address via the proxy.\n\tDial(network, addr string) (c net.Conn, err error)\n}\n\n// Auth contains authentication parameters that specific Dialers may require.\ntype Auth struct {\n\tUser, Password string\n}\n\n// FromEnvironment returns the dialer specified by the proxy related variables in\n// the environment.\nfunc FromEnvironment() Dialer {\n\tallProxy := allProxyEnv.Get()\n\tif len(allProxy) == 0 {\n\t\treturn Direct\n\t}\n\n\tproxyURL, err := url.Parse(allProxy)\n\tif err != nil {\n\t\treturn Direct\n\t}\n\tproxy, err := FromURL(proxyURL, Direct)\n\tif err != nil {\n\t\treturn Direct\n\t}\n\n\tnoProxy := noProxyEnv.Get()\n\tif len(noProxy) == 0 {\n\t\treturn proxy\n\t}\n\n\tperHost := NewPerHost(proxy, Direct)\n\tperHost.AddFromString(noProxy)\n\treturn perHost\n}\n\n// proxySchemes is a map from URL schemes to a function that creates a Dialer\n// from a URL with such a scheme.\nvar proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error)\n\n// RegisterDialerType takes a URL scheme and a function to generate Dialers from\n// a URL with that scheme and a forwarding Dialer. Registered schemes are used\n// by FromURL.\nfunc RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) {\n\tif proxySchemes == nil {\n\t\tproxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error))\n\t}\n\tproxySchemes[scheme] = f\n}\n\n// FromURL returns a Dialer given a URL specification and an underlying\n// Dialer for it to make network requests.\nfunc FromURL(u *url.URL, forward Dialer) (Dialer, error) {\n\tvar auth *Auth\n\tif u.User != nil {\n\t\tauth = new(Auth)\n\t\tauth.User = u.User.Username()\n\t\tif p, ok := u.User.Password(); ok {\n\t\t\tauth.Password = p\n\t\t}\n\t}\n\n\tswitch u.Scheme {\n\tcase \"socks5\":\n\t\treturn SOCKS5(\"tcp\", u.Host, auth, forward)\n\t}\n\n\t// If the scheme doesn't match any of the built-in schemes, see if it\n\t// was registered by another package.\n\tif proxySchemes != nil {\n\t\tif f, ok := proxySchemes[u.Scheme]; ok {\n\t\t\treturn f(u, forward)\n\t\t}\n\t}\n\n\treturn nil, errors.New(\"proxy: unknown scheme: \" + u.Scheme)\n}\n\nvar (\n\tallProxyEnv = &envOnce{\n\t\tnames: []string{\"ALL_PROXY\", \"all_proxy\"},\n\t}\n\tnoProxyEnv = &envOnce{\n\t\tnames: []string{\"NO_PROXY\", \"no_proxy\"},\n\t}\n)\n\n// envOnce looks up an environment variable (optionally by multiple\n// names) once. It mitigates expensive lookups on some platforms\n// (e.g. Windows).\n// (Borrowed from net/http/transport.go)\ntype envOnce struct {\n\tnames []string\n\tonce  sync.Once\n\tval   string\n}\n\nfunc (e *envOnce) Get() string {\n\te.once.Do(e.init)\n\treturn e.val\n}\n\nfunc (e *envOnce) init() {\n\tfor _, n := range e.names {\n\t\te.val = os.Getenv(n)\n\t\tif e.val != \"\" {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// reset is used by tests\nfunc (e *envOnce) reset() {\n\te.once = sync.Once{}\n\te.val = \"\"\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/proxy/proxy_test.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage proxy\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n)\n\ntype proxyFromEnvTest struct {\n\tallProxyEnv string\n\tnoProxyEnv  string\n\twantTypeOf  Dialer\n}\n\nfunc (t proxyFromEnvTest) String() string {\n\tvar buf bytes.Buffer\n\tspace := func() {\n\t\tif buf.Len() > 0 {\n\t\t\tbuf.WriteByte(' ')\n\t\t}\n\t}\n\tif t.allProxyEnv != \"\" {\n\t\tfmt.Fprintf(&buf, \"all_proxy=%q\", t.allProxyEnv)\n\t}\n\tif t.noProxyEnv != \"\" {\n\t\tspace()\n\t\tfmt.Fprintf(&buf, \"no_proxy=%q\", t.noProxyEnv)\n\t}\n\treturn strings.TrimSpace(buf.String())\n}\n\nfunc TestFromEnvironment(t *testing.T) {\n\tResetProxyEnv()\n\n\ttype dummyDialer struct {\n\t\tdirect\n\t}\n\n\tRegisterDialerType(\"irc\", func(_ *url.URL, _ Dialer) (Dialer, error) {\n\t\treturn dummyDialer{}, nil\n\t})\n\n\tproxyFromEnvTests := []proxyFromEnvTest{\n\t\t{allProxyEnv: \"127.0.0.1:8080\", noProxyEnv: \"localhost, 127.0.0.1\", wantTypeOf: direct{}},\n\t\t{allProxyEnv: \"ftp://example.com:8000\", noProxyEnv: \"localhost, 127.0.0.1\", wantTypeOf: direct{}},\n\t\t{allProxyEnv: \"socks5://example.com:8080\", noProxyEnv: \"localhost, 127.0.0.1\", wantTypeOf: &PerHost{}},\n\t\t{allProxyEnv: \"irc://example.com:8000\", wantTypeOf: dummyDialer{}},\n\t\t{noProxyEnv: \"localhost, 127.0.0.1\", wantTypeOf: direct{}},\n\t\t{wantTypeOf: direct{}},\n\t}\n\n\tfor _, tt := range proxyFromEnvTests {\n\t\tos.Setenv(\"ALL_PROXY\", tt.allProxyEnv)\n\t\tos.Setenv(\"NO_PROXY\", tt.noProxyEnv)\n\t\tResetCachedEnvironment()\n\n\t\td := FromEnvironment()\n\t\tif got, want := fmt.Sprintf(\"%T\", d), fmt.Sprintf(\"%T\", tt.wantTypeOf); got != want {\n\t\t\tt.Errorf(\"%v: got type = %T, want %T\", tt, d, tt.wantTypeOf)\n\t\t}\n\t}\n}\n\nfunc TestFromURL(t *testing.T) {\n\tendSystem, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatalf(\"net.Listen failed: %v\", err)\n\t}\n\tdefer endSystem.Close()\n\tgateway, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatalf(\"net.Listen failed: %v\", err)\n\t}\n\tdefer gateway.Close()\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo socks5Gateway(t, gateway, endSystem, socks5Domain, &wg)\n\n\turl, err := url.Parse(\"socks5://user:password@\" + gateway.Addr().String())\n\tif err != nil {\n\t\tt.Fatalf(\"url.Parse failed: %v\", err)\n\t}\n\tproxy, err := FromURL(url, Direct)\n\tif err != nil {\n\t\tt.Fatalf(\"FromURL failed: %v\", err)\n\t}\n\t_, port, err := net.SplitHostPort(endSystem.Addr().String())\n\tif err != nil {\n\t\tt.Fatalf(\"net.SplitHostPort failed: %v\", err)\n\t}\n\tif c, err := proxy.Dial(\"tcp\", \"localhost:\"+port); err != nil {\n\t\tt.Fatalf(\"FromURL.Dial failed: %v\", err)\n\t} else {\n\t\tc.Close()\n\t}\n\n\twg.Wait()\n}\n\nfunc TestSOCKS5(t *testing.T) {\n\tendSystem, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatalf(\"net.Listen failed: %v\", err)\n\t}\n\tdefer endSystem.Close()\n\tgateway, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tif err != nil {\n\t\tt.Fatalf(\"net.Listen failed: %v\", err)\n\t}\n\tdefer gateway.Close()\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo socks5Gateway(t, gateway, endSystem, socks5IP4, &wg)\n\n\tproxy, err := SOCKS5(\"tcp\", gateway.Addr().String(), nil, Direct)\n\tif err != nil {\n\t\tt.Fatalf(\"SOCKS5 failed: %v\", err)\n\t}\n\tif c, err := proxy.Dial(\"tcp\", endSystem.Addr().String()); err != nil {\n\t\tt.Fatalf(\"SOCKS5.Dial failed: %v\", err)\n\t} else {\n\t\tc.Close()\n\t}\n\n\twg.Wait()\n}\n\nfunc socks5Gateway(t *testing.T, gateway, endSystem net.Listener, typ byte, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\n\tc, err := gateway.Accept()\n\tif err != nil {\n\t\tt.Errorf(\"net.Listener.Accept failed: %v\", err)\n\t\treturn\n\t}\n\tdefer c.Close()\n\n\tb := make([]byte, 32)\n\tvar n int\n\tif typ == socks5Domain {\n\t\tn = 4\n\t} else {\n\t\tn = 3\n\t}\n\tif _, err := io.ReadFull(c, b[:n]); err != nil {\n\t\tt.Errorf(\"io.ReadFull failed: %v\", err)\n\t\treturn\n\t}\n\tif _, err := c.Write([]byte{socks5Version, socks5AuthNone}); err != nil {\n\t\tt.Errorf(\"net.Conn.Write failed: %v\", err)\n\t\treturn\n\t}\n\tif typ == socks5Domain {\n\t\tn = 16\n\t} else {\n\t\tn = 10\n\t}\n\tif _, err := io.ReadFull(c, b[:n]); err != nil {\n\t\tt.Errorf(\"io.ReadFull failed: %v\", err)\n\t\treturn\n\t}\n\tif b[0] != socks5Version || b[1] != socks5Connect || b[2] != 0x00 || b[3] != typ {\n\t\tt.Errorf(\"got an unexpected packet: %#02x %#02x %#02x %#02x\", b[0], b[1], b[2], b[3])\n\t\treturn\n\t}\n\tif typ == socks5Domain {\n\t\tcopy(b[:5], []byte{socks5Version, 0x00, 0x00, socks5Domain, 9})\n\t\tb = append(b, []byte(\"localhost\")...)\n\t} else {\n\t\tcopy(b[:4], []byte{socks5Version, 0x00, 0x00, socks5IP4})\n\t}\n\thost, port, err := net.SplitHostPort(endSystem.Addr().String())\n\tif err != nil {\n\t\tt.Errorf(\"net.SplitHostPort failed: %v\", err)\n\t\treturn\n\t}\n\tb = append(b, []byte(net.ParseIP(host).To4())...)\n\tp, err := strconv.Atoi(port)\n\tif err != nil {\n\t\tt.Errorf(\"strconv.Atoi failed: %v\", err)\n\t\treturn\n\t}\n\tb = append(b, []byte{byte(p >> 8), byte(p)}...)\n\tif _, err := c.Write(b); err != nil {\n\t\tt.Errorf(\"net.Conn.Write failed: %v\", err)\n\t\treturn\n\t}\n}\n\nfunc ResetProxyEnv() {\n\tfor _, env := range []*envOnce{allProxyEnv, noProxyEnv} {\n\t\tfor _, v := range env.names {\n\t\t\tos.Setenv(v, \"\")\n\t\t}\n\t}\n\tResetCachedEnvironment()\n}\n\nfunc ResetCachedEnvironment() {\n\tallProxyEnv.reset()\n\tnoProxyEnv.reset()\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/proxy/socks5.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage proxy\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"net\"\n\t\"strconv\"\n)\n\n// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address\n// with an optional username and password. See RFC 1928 and RFC 1929.\nfunc SOCKS5(network, addr string, auth *Auth, forward Dialer) (Dialer, error) {\n\ts := &socks5{\n\t\tnetwork: network,\n\t\taddr:    addr,\n\t\tforward: forward,\n\t}\n\tif auth != nil {\n\t\ts.user = auth.User\n\t\ts.password = auth.Password\n\t}\n\n\treturn s, nil\n}\n\ntype socks5 struct {\n\tuser, password string\n\tnetwork, addr  string\n\tforward        Dialer\n}\n\nconst socks5Version = 5\n\nconst (\n\tsocks5AuthNone     = 0\n\tsocks5AuthPassword = 2\n)\n\nconst socks5Connect = 1\n\nconst (\n\tsocks5IP4    = 1\n\tsocks5Domain = 3\n\tsocks5IP6    = 4\n)\n\nvar socks5Errors = []string{\n\t\"\",\n\t\"general failure\",\n\t\"connection forbidden\",\n\t\"network unreachable\",\n\t\"host unreachable\",\n\t\"connection refused\",\n\t\"TTL expired\",\n\t\"command not supported\",\n\t\"address type not supported\",\n}\n\n// Dial connects to the address addr on the given network via the SOCKS5 proxy.\nfunc (s *socks5) Dial(network, addr string) (net.Conn, error) {\n\tswitch network {\n\tcase \"tcp\", \"tcp6\", \"tcp4\":\n\tdefault:\n\t\treturn nil, errors.New(\"proxy: no support for SOCKS5 proxy connections of type \" + network)\n\t}\n\n\tconn, err := s.forward.Dial(s.network, s.addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := s.connect(conn, addr); err != nil {\n\t\tconn.Close()\n\t\treturn nil, err\n\t}\n\treturn conn, nil\n}\n\n// connect takes an existing connection to a socks5 proxy server,\n// and commands the server to extend that connection to target,\n// which must be a canonical address with a host and port.\nfunc (s *socks5) connect(conn net.Conn, target string) error {\n\thost, portStr, err := net.SplitHostPort(target)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tport, err := strconv.Atoi(portStr)\n\tif err != nil {\n\t\treturn errors.New(\"proxy: failed to parse port number: \" + portStr)\n\t}\n\tif port < 1 || port > 0xffff {\n\t\treturn errors.New(\"proxy: port number out of range: \" + portStr)\n\t}\n\n\t// the size here is just an estimate\n\tbuf := make([]byte, 0, 6+len(host))\n\n\tbuf = append(buf, socks5Version)\n\tif len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {\n\t\tbuf = append(buf, 2 /* num auth methods */, socks5AuthNone, socks5AuthPassword)\n\t} else {\n\t\tbuf = append(buf, 1 /* num auth methods */, socks5AuthNone)\n\t}\n\n\tif _, err := conn.Write(buf); err != nil {\n\t\treturn errors.New(\"proxy: failed to write greeting to SOCKS5 proxy at \" + s.addr + \": \" + err.Error())\n\t}\n\n\tif _, err := io.ReadFull(conn, buf[:2]); err != nil {\n\t\treturn errors.New(\"proxy: failed to read greeting from SOCKS5 proxy at \" + s.addr + \": \" + err.Error())\n\t}\n\tif buf[0] != 5 {\n\t\treturn errors.New(\"proxy: SOCKS5 proxy at \" + s.addr + \" has unexpected version \" + strconv.Itoa(int(buf[0])))\n\t}\n\tif buf[1] == 0xff {\n\t\treturn errors.New(\"proxy: SOCKS5 proxy at \" + s.addr + \" requires authentication\")\n\t}\n\n\t// See RFC 1929\n\tif buf[1] == socks5AuthPassword {\n\t\tbuf = buf[:0]\n\t\tbuf = append(buf, 1 /* password protocol version */)\n\t\tbuf = append(buf, uint8(len(s.user)))\n\t\tbuf = append(buf, s.user...)\n\t\tbuf = append(buf, uint8(len(s.password)))\n\t\tbuf = append(buf, s.password...)\n\n\t\tif _, err := conn.Write(buf); err != nil {\n\t\t\treturn errors.New(\"proxy: failed to write authentication request to SOCKS5 proxy at \" + s.addr + \": \" + err.Error())\n\t\t}\n\n\t\tif _, err := io.ReadFull(conn, buf[:2]); err != nil {\n\t\t\treturn errors.New(\"proxy: failed to read authentication reply from SOCKS5 proxy at \" + s.addr + \": \" + err.Error())\n\t\t}\n\n\t\tif buf[1] != 0 {\n\t\t\treturn errors.New(\"proxy: SOCKS5 proxy at \" + s.addr + \" rejected username/password\")\n\t\t}\n\t}\n\n\tbuf = buf[:0]\n\tbuf = append(buf, socks5Version, socks5Connect, 0 /* reserved */)\n\n\tif ip := net.ParseIP(host); ip != nil {\n\t\tif ip4 := ip.To4(); ip4 != nil {\n\t\t\tbuf = append(buf, socks5IP4)\n\t\t\tip = ip4\n\t\t} else {\n\t\t\tbuf = append(buf, socks5IP6)\n\t\t}\n\t\tbuf = append(buf, ip...)\n\t} else {\n\t\tif len(host) > 255 {\n\t\t\treturn errors.New(\"proxy: destination host name too long: \" + host)\n\t\t}\n\t\tbuf = append(buf, socks5Domain)\n\t\tbuf = append(buf, byte(len(host)))\n\t\tbuf = append(buf, host...)\n\t}\n\tbuf = append(buf, byte(port>>8), byte(port))\n\n\tif _, err := conn.Write(buf); err != nil {\n\t\treturn errors.New(\"proxy: failed to write connect request to SOCKS5 proxy at \" + s.addr + \": \" + err.Error())\n\t}\n\n\tif _, err := io.ReadFull(conn, buf[:4]); err != nil {\n\t\treturn errors.New(\"proxy: failed to read connect reply from SOCKS5 proxy at \" + s.addr + \": \" + err.Error())\n\t}\n\n\tfailure := \"unknown error\"\n\tif int(buf[1]) < len(socks5Errors) {\n\t\tfailure = socks5Errors[buf[1]]\n\t}\n\n\tif len(failure) > 0 {\n\t\treturn errors.New(\"proxy: SOCKS5 proxy at \" + s.addr + \" failed to connect: \" + failure)\n\t}\n\n\tbytesToDiscard := 0\n\tswitch buf[3] {\n\tcase socks5IP4:\n\t\tbytesToDiscard = net.IPv4len\n\tcase socks5IP6:\n\t\tbytesToDiscard = net.IPv6len\n\tcase socks5Domain:\n\t\t_, err := io.ReadFull(conn, buf[:1])\n\t\tif err != nil {\n\t\t\treturn errors.New(\"proxy: failed to read domain length from SOCKS5 proxy at \" + s.addr + \": \" + err.Error())\n\t\t}\n\t\tbytesToDiscard = int(buf[0])\n\tdefault:\n\t\treturn errors.New(\"proxy: got unknown address type \" + strconv.Itoa(int(buf[3])) + \" from SOCKS5 proxy at \" + s.addr)\n\t}\n\n\tif cap(buf) < bytesToDiscard {\n\t\tbuf = make([]byte, bytesToDiscard)\n\t} else {\n\t\tbuf = buf[:bytesToDiscard]\n\t}\n\tif _, err := io.ReadFull(conn, buf); err != nil {\n\t\treturn errors.New(\"proxy: failed to read address from SOCKS5 proxy at \" + s.addr + \": \" + err.Error())\n\t}\n\n\t// Also need to discard the port number\n\tif _, err := io.ReadFull(conn, buf[:2]); err != nil {\n\t\treturn errors.New(\"proxy: failed to read port from SOCKS5 proxy at \" + s.addr + \": \" + err.Error())\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/publicsuffix/gen.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\npackage main\n\n// This program generates table.go and table_test.go based on the authoritative\n// public suffix list at https://publicsuffix.org/list/effective_tld_names.dat\n//\n// The version is derived from\n// https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat\n// and a human-readable form is at\n// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat\n//\n// To fetch a particular git revision, such as 5c70ccd250, pass\n// -url \"https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat\"\n// and -version \"an explicit version string\".\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"go/format\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"golang.org/x/net/idna\"\n)\n\nconst (\n\t// These sum of these four values must be no greater than 32.\n\tnodesBitsChildren   = 10\n\tnodesBitsICANN      = 1\n\tnodesBitsTextOffset = 15\n\tnodesBitsTextLength = 6\n\n\t// These sum of these four values must be no greater than 32.\n\tchildrenBitsWildcard = 1\n\tchildrenBitsNodeType = 2\n\tchildrenBitsHi       = 14\n\tchildrenBitsLo       = 14\n)\n\nvar (\n\tmaxChildren   int\n\tmaxTextOffset int\n\tmaxTextLength int\n\tmaxHi         uint32\n\tmaxLo         uint32\n)\n\nfunc max(a, b int) int {\n\tif a < b {\n\t\treturn b\n\t}\n\treturn a\n}\n\nfunc u32max(a, b uint32) uint32 {\n\tif a < b {\n\t\treturn b\n\t}\n\treturn a\n}\n\nconst (\n\tnodeTypeNormal     = 0\n\tnodeTypeException  = 1\n\tnodeTypeParentOnly = 2\n\tnumNodeType        = 3\n)\n\nfunc nodeTypeStr(n int) string {\n\tswitch n {\n\tcase nodeTypeNormal:\n\t\treturn \"+\"\n\tcase nodeTypeException:\n\t\treturn \"!\"\n\tcase nodeTypeParentOnly:\n\t\treturn \"o\"\n\t}\n\tpanic(\"unreachable\")\n}\n\nconst (\n\tdefaultURL   = \"https://publicsuffix.org/list/effective_tld_names.dat\"\n\tgitCommitURL = \"https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat\"\n)\n\nvar (\n\tlabelEncoding = map[string]uint32{}\n\tlabelsList    = []string{}\n\tlabelsMap     = map[string]bool{}\n\trules         = []string{}\n\n\t// validSuffixRE is used to check that the entries in the public suffix\n\t// list are in canonical form (after Punycode encoding). Specifically,\n\t// capital letters are not allowed.\n\tvalidSuffixRE = regexp.MustCompile(`^[a-z0-9_\\!\\*\\-\\.]+$`)\n\n\tshaRE  = regexp.MustCompile(`\"sha\":\"([^\"]+)\"`)\n\tdateRE = regexp.MustCompile(`\"committer\":{[^{]+\"date\":\"([^\"]+)\"`)\n\n\tcomments = flag.Bool(\"comments\", false, \"generate table.go comments, for debugging\")\n\tsubset   = flag.Bool(\"subset\", false, \"generate only a subset of the full table, for debugging\")\n\turl      = flag.String(\"url\", defaultURL, \"URL of the publicsuffix.org list. If empty, stdin is read instead\")\n\tv        = flag.Bool(\"v\", false, \"verbose output (to stderr)\")\n\tversion  = flag.String(\"version\", \"\", \"the effective_tld_names.dat version\")\n)\n\nfunc main() {\n\tif err := main1(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc main1() error {\n\tflag.Parse()\n\tif nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 {\n\t\treturn fmt.Errorf(\"not enough bits to encode the nodes table\")\n\t}\n\tif childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 {\n\t\treturn fmt.Errorf(\"not enough bits to encode the children table\")\n\t}\n\tif *version == \"\" {\n\t\tif *url != defaultURL {\n\t\t\treturn fmt.Errorf(\"-version was not specified, and the -url is not the default one\")\n\t\t}\n\t\tsha, date, err := gitCommit()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*version = fmt.Sprintf(\"publicsuffix.org's public_suffix_list.dat, git revision %s (%s)\", sha, date)\n\t}\n\tvar r io.Reader = os.Stdin\n\tif *url != \"\" {\n\t\tres, err := http.Get(*url)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif res.StatusCode != http.StatusOK {\n\t\t\treturn fmt.Errorf(\"bad GET status for %s: %d\", *url, res.Status)\n\t\t}\n\t\tr = res.Body\n\t\tdefer res.Body.Close()\n\t}\n\n\tvar root node\n\ticann := false\n\tbr := bufio.NewReader(r)\n\tfor {\n\t\ts, err := br.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\ts = strings.TrimSpace(s)\n\t\tif strings.Contains(s, \"BEGIN ICANN DOMAINS\") {\n\t\t\ticann = true\n\t\t\tcontinue\n\t\t}\n\t\tif strings.Contains(s, \"END ICANN DOMAINS\") {\n\t\t\ticann = false\n\t\t\tcontinue\n\t\t}\n\t\tif s == \"\" || strings.HasPrefix(s, \"//\") {\n\t\t\tcontinue\n\t\t}\n\t\ts, err = idna.ToASCII(s)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !validSuffixRE.MatchString(s) {\n\t\t\treturn fmt.Errorf(\"bad publicsuffix.org list data: %q\", s)\n\t\t}\n\n\t\tif *subset {\n\t\t\tswitch {\n\t\t\tcase s == \"ac.jp\" || strings.HasSuffix(s, \".ac.jp\"):\n\t\t\tcase s == \"ak.us\" || strings.HasSuffix(s, \".ak.us\"):\n\t\t\tcase s == \"ao\" || strings.HasSuffix(s, \".ao\"):\n\t\t\tcase s == \"ar\" || strings.HasSuffix(s, \".ar\"):\n\t\t\tcase s == \"arpa\" || strings.HasSuffix(s, \".arpa\"):\n\t\t\tcase s == \"cy\" || strings.HasSuffix(s, \".cy\"):\n\t\t\tcase s == \"dyndns.org\" || strings.HasSuffix(s, \".dyndns.org\"):\n\t\t\tcase s == \"jp\":\n\t\t\tcase s == \"kobe.jp\" || strings.HasSuffix(s, \".kobe.jp\"):\n\t\t\tcase s == \"kyoto.jp\" || strings.HasSuffix(s, \".kyoto.jp\"):\n\t\t\tcase s == \"om\" || strings.HasSuffix(s, \".om\"):\n\t\t\tcase s == \"uk\" || strings.HasSuffix(s, \".uk\"):\n\t\t\tcase s == \"uk.com\" || strings.HasSuffix(s, \".uk.com\"):\n\t\t\tcase s == \"tw\" || strings.HasSuffix(s, \".tw\"):\n\t\t\tcase s == \"zw\" || strings.HasSuffix(s, \".zw\"):\n\t\t\tcase s == \"xn--p1ai\" || strings.HasSuffix(s, \".xn--p1ai\"):\n\t\t\t\t// xn--p1ai is Russian-Cyrillic \"рф\".\n\t\t\tdefault:\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\trules = append(rules, s)\n\n\t\tnt, wildcard := nodeTypeNormal, false\n\t\tswitch {\n\t\tcase strings.HasPrefix(s, \"*.\"):\n\t\t\ts, nt = s[2:], nodeTypeParentOnly\n\t\t\twildcard = true\n\t\tcase strings.HasPrefix(s, \"!\"):\n\t\t\ts, nt = s[1:], nodeTypeException\n\t\t}\n\t\tlabels := strings.Split(s, \".\")\n\t\tfor n, i := &root, len(labels)-1; i >= 0; i-- {\n\t\t\tlabel := labels[i]\n\t\t\tn = n.child(label)\n\t\t\tif i == 0 {\n\t\t\t\tif nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly {\n\t\t\t\t\tn.nodeType = nt\n\t\t\t\t}\n\t\t\t\tn.icann = n.icann && icann\n\t\t\t\tn.wildcard = n.wildcard || wildcard\n\t\t\t}\n\t\t\tlabelsMap[label] = true\n\t\t}\n\t}\n\tlabelsList = make([]string, 0, len(labelsMap))\n\tfor label := range labelsMap {\n\t\tlabelsList = append(labelsList, label)\n\t}\n\tsort.Strings(labelsList)\n\n\tif err := generate(printReal, &root, \"table.go\"); err != nil {\n\t\treturn err\n\t}\n\tif err := generate(printTest, &root, \"table_test.go\"); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc generate(p func(io.Writer, *node) error, root *node, filename string) error {\n\tbuf := new(bytes.Buffer)\n\tif err := p(buf, root); err != nil {\n\t\treturn err\n\t}\n\tb, err := format.Source(buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(filename, b, 0644)\n}\n\nfunc gitCommit() (sha, date string, retErr error) {\n\tres, err := http.Get(gitCommitURL)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tif res.StatusCode != http.StatusOK {\n\t\treturn \"\", \"\", fmt.Errorf(\"bad GET status for %s: %d\", gitCommitURL, res.Status)\n\t}\n\tdefer res.Body.Close()\n\tb, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tif m := shaRE.FindSubmatch(b); m != nil {\n\t\tsha = string(m[1])\n\t}\n\tif m := dateRE.FindSubmatch(b); m != nil {\n\t\tdate = string(m[1])\n\t}\n\tif sha == \"\" || date == \"\" {\n\t\tretErr = fmt.Errorf(\"could not find commit SHA and date in %s\", gitCommitURL)\n\t}\n\treturn sha, date, retErr\n}\n\nfunc printTest(w io.Writer, n *node) error {\n\tfmt.Fprintf(w, \"// generated by go run gen.go; DO NOT EDIT\\n\\n\")\n\tfmt.Fprintf(w, \"package publicsuffix\\n\\nvar rules = [...]string{\\n\")\n\tfor _, rule := range rules {\n\t\tfmt.Fprintf(w, \"%q,\\n\", rule)\n\t}\n\tfmt.Fprintf(w, \"}\\n\\nvar nodeLabels = [...]string{\\n\")\n\tif err := n.walk(w, printNodeLabel); err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintf(w, \"}\\n\")\n\treturn nil\n}\n\nfunc printReal(w io.Writer, n *node) error {\n\tconst header = `// generated by go run gen.go; DO NOT EDIT\n\npackage publicsuffix\n\nconst version = %q\n\nconst (\n\tnodesBitsChildren   = %d\n\tnodesBitsICANN      = %d\n\tnodesBitsTextOffset = %d\n\tnodesBitsTextLength = %d\n\n\tchildrenBitsWildcard = %d\n\tchildrenBitsNodeType = %d\n\tchildrenBitsHi       = %d\n\tchildrenBitsLo       = %d\n)\n\nconst (\n\tnodeTypeNormal     = %d\n\tnodeTypeException  = %d\n\tnodeTypeParentOnly = %d\n)\n\n// numTLD is the number of top level domains.\nconst numTLD = %d\n\n`\n\tfmt.Fprintf(w, header, *version,\n\t\tnodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength,\n\t\tchildrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo,\n\t\tnodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children))\n\n\ttext := combineText(labelsList)\n\tif text == \"\" {\n\t\treturn fmt.Errorf(\"internal error: makeText returned no text\")\n\t}\n\tfor _, label := range labelsList {\n\t\toffset, length := strings.Index(text, label), len(label)\n\t\tif offset < 0 {\n\t\t\treturn fmt.Errorf(\"internal error: could not find %q in text %q\", label, text)\n\t\t}\n\t\tmaxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length)\n\t\tif offset >= 1<<nodesBitsTextOffset {\n\t\t\treturn fmt.Errorf(\"text offset %d is too large, or nodeBitsTextOffset is too small\", offset)\n\t\t}\n\t\tif length >= 1<<nodesBitsTextLength {\n\t\t\treturn fmt.Errorf(\"text length %d is too large, or nodeBitsTextLength is too small\", length)\n\t\t}\n\t\tlabelEncoding[label] = uint32(offset)<<nodesBitsTextLength | uint32(length)\n\t}\n\tfmt.Fprintf(w, \"// Text is the combined text of all labels.\\nconst text = \")\n\tfor len(text) > 0 {\n\t\tn, plus := len(text), \"\"\n\t\tif n > 64 {\n\t\t\tn, plus = 64, \" +\"\n\t\t}\n\t\tfmt.Fprintf(w, \"%q%s\\n\", text[:n], plus)\n\t\ttext = text[n:]\n\t}\n\n\tif err := n.walk(w, assignIndexes); err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Fprintf(w, `\n\n// nodes is the list of nodes. Each node is represented as a uint32, which\n// encodes the node's children, wildcard bit and node type (as an index into\n// the children array), ICANN bit and text.\n//\n// If the table was generated with the -comments flag, there is a //-comment\n// after each node's data. In it is the nodes-array indexes of the children,\n// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The\n// nodeType is printed as + for normal, ! for exception, and o for parent-only\n// nodes that have children but don't match a domain label in their own right.\n// An I denotes an ICANN domain.\n//\n// The layout within the uint32, from MSB to LSB, is:\n//\t[%2d bits] unused\n//\t[%2d bits] children index\n//\t[%2d bits] ICANN bit\n//\t[%2d bits] text index\n//\t[%2d bits] text length\nvar nodes = [...]uint32{\n`,\n\t\t32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength,\n\t\tnodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength)\n\tif err := n.walk(w, printNode); err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintf(w, `}\n\n// children is the list of nodes' children, the parent's wildcard bit and the\n// parent's node type. If a node has no children then their children index\n// will be in the range [0, 6), depending on the wildcard bit and node type.\n//\n// The layout within the uint32, from MSB to LSB, is:\n//\t[%2d bits] unused\n//\t[%2d bits] wildcard bit\n//\t[%2d bits] node type\n//\t[%2d bits] high nodes index (exclusive) of children\n//\t[%2d bits] low nodes index (inclusive) of children\nvar children=[...]uint32{\n`,\n\t\t32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo,\n\t\tchildrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo)\n\tfor i, c := range childrenEncoding {\n\t\ts := \"---------------\"\n\t\tlo := c & (1<<childrenBitsLo - 1)\n\t\thi := (c >> childrenBitsLo) & (1<<childrenBitsHi - 1)\n\t\tif lo != hi {\n\t\t\ts = fmt.Sprintf(\"n0x%04x-n0x%04x\", lo, hi)\n\t\t}\n\t\tnodeType := int(c>>(childrenBitsLo+childrenBitsHi)) & (1<<childrenBitsNodeType - 1)\n\t\twildcard := c>>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0\n\t\tif *comments {\n\t\t\tfmt.Fprintf(w, \"0x%08x, // c0x%04x (%s)%s %s\\n\",\n\t\t\t\tc, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType))\n\t\t} else {\n\t\t\tfmt.Fprintf(w, \"0x%x,\\n\", c)\n\t\t}\n\t}\n\tfmt.Fprintf(w, \"}\\n\\n\")\n\tfmt.Fprintf(w, \"// max children %d (capacity %d)\\n\", maxChildren, 1<<nodesBitsChildren-1)\n\tfmt.Fprintf(w, \"// max text offset %d (capacity %d)\\n\", maxTextOffset, 1<<nodesBitsTextOffset-1)\n\tfmt.Fprintf(w, \"// max text length %d (capacity %d)\\n\", maxTextLength, 1<<nodesBitsTextLength-1)\n\tfmt.Fprintf(w, \"// max hi %d (capacity %d)\\n\", maxHi, 1<<childrenBitsHi-1)\n\tfmt.Fprintf(w, \"// max lo %d (capacity %d)\\n\", maxLo, 1<<childrenBitsLo-1)\n\treturn nil\n}\n\ntype node struct {\n\tlabel    string\n\tnodeType int\n\ticann    bool\n\twildcard bool\n\t// nodesIndex and childrenIndex are the index of this node in the nodes\n\t// and the index of its children offset/length in the children arrays.\n\tnodesIndex, childrenIndex int\n\t// firstChild is the index of this node's first child, or zero if this\n\t// node has no children.\n\tfirstChild int\n\t// children are the node's children, in strictly increasing node label order.\n\tchildren []*node\n}\n\nfunc (n *node) walk(w io.Writer, f func(w1 io.Writer, n1 *node) error) error {\n\tif err := f(w, n); err != nil {\n\t\treturn err\n\t}\n\tfor _, c := range n.children {\n\t\tif err := c.walk(w, f); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// child returns the child of n with the given label. The child is created if\n// it did not exist beforehand.\nfunc (n *node) child(label string) *node {\n\tfor _, c := range n.children {\n\t\tif c.label == label {\n\t\t\treturn c\n\t\t}\n\t}\n\tc := &node{\n\t\tlabel:    label,\n\t\tnodeType: nodeTypeParentOnly,\n\t\ticann:    true,\n\t}\n\tn.children = append(n.children, c)\n\tsort.Sort(byLabel(n.children))\n\treturn c\n}\n\ntype byLabel []*node\n\nfunc (b byLabel) Len() int           { return len(b) }\nfunc (b byLabel) Swap(i, j int)      { b[i], b[j] = b[j], b[i] }\nfunc (b byLabel) Less(i, j int) bool { return b[i].label < b[j].label }\n\nvar nextNodesIndex int\n\n// childrenEncoding are the encoded entries in the generated children array.\n// All these pre-defined entries have no children.\nvar childrenEncoding = []uint32{\n\t0 << (childrenBitsLo + childrenBitsHi), // Without wildcard bit, nodeTypeNormal.\n\t1 << (childrenBitsLo + childrenBitsHi), // Without wildcard bit, nodeTypeException.\n\t2 << (childrenBitsLo + childrenBitsHi), // Without wildcard bit, nodeTypeParentOnly.\n\t4 << (childrenBitsLo + childrenBitsHi), // With wildcard bit, nodeTypeNormal.\n\t5 << (childrenBitsLo + childrenBitsHi), // With wildcard bit, nodeTypeException.\n\t6 << (childrenBitsLo + childrenBitsHi), // With wildcard bit, nodeTypeParentOnly.\n}\n\nvar firstCallToAssignIndexes = true\n\nfunc assignIndexes(w io.Writer, n *node) error {\n\tif len(n.children) != 0 {\n\t\t// Assign nodesIndex.\n\t\tn.firstChild = nextNodesIndex\n\t\tfor _, c := range n.children {\n\t\t\tc.nodesIndex = nextNodesIndex\n\t\t\tnextNodesIndex++\n\t\t}\n\n\t\t// The root node's children is implicit.\n\t\tif firstCallToAssignIndexes {\n\t\t\tfirstCallToAssignIndexes = false\n\t\t\treturn nil\n\t\t}\n\n\t\t// Assign childrenIndex.\n\t\tmaxChildren = max(maxChildren, len(childrenEncoding))\n\t\tif len(childrenEncoding) >= 1<<nodesBitsChildren {\n\t\t\treturn fmt.Errorf(\"children table size %d is too large, or nodeBitsChildren is too small\", len(childrenEncoding))\n\t\t}\n\t\tn.childrenIndex = len(childrenEncoding)\n\t\tlo := uint32(n.firstChild)\n\t\thi := lo + uint32(len(n.children))\n\t\tmaxLo, maxHi = u32max(maxLo, lo), u32max(maxHi, hi)\n\t\tif lo >= 1<<childrenBitsLo {\n\t\t\treturn fmt.Errorf(\"children lo %d is too large, or childrenBitsLo is too small\", lo)\n\t\t}\n\t\tif hi >= 1<<childrenBitsHi {\n\t\t\treturn fmt.Errorf(\"children hi %d is too large, or childrenBitsHi is too small\", hi)\n\t\t}\n\t\tenc := hi<<childrenBitsLo | lo\n\t\tenc |= uint32(n.nodeType) << (childrenBitsLo + childrenBitsHi)\n\t\tif n.wildcard {\n\t\t\tenc |= 1 << (childrenBitsLo + childrenBitsHi + childrenBitsNodeType)\n\t\t}\n\t\tchildrenEncoding = append(childrenEncoding, enc)\n\t} else {\n\t\tn.childrenIndex = n.nodeType\n\t\tif n.wildcard {\n\t\t\tn.childrenIndex += numNodeType\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc printNode(w io.Writer, n *node) error {\n\tfor _, c := range n.children {\n\t\ts := \"---------------\"\n\t\tif len(c.children) != 0 {\n\t\t\ts = fmt.Sprintf(\"n0x%04x-n0x%04x\", c.firstChild, c.firstChild+len(c.children))\n\t\t}\n\t\tencoding := labelEncoding[c.label]\n\t\tif c.icann {\n\t\t\tencoding |= 1 << (nodesBitsTextLength + nodesBitsTextOffset)\n\t\t}\n\t\tencoding |= uint32(c.childrenIndex) << (nodesBitsTextLength + nodesBitsTextOffset + nodesBitsICANN)\n\t\tif *comments {\n\t\t\tfmt.Fprintf(w, \"0x%08x, // n0x%04x c0x%04x (%s)%s %s %s %s\\n\",\n\t\t\t\tencoding, c.nodesIndex, c.childrenIndex, s, wildcardStr(c.wildcard),\n\t\t\t\tnodeTypeStr(c.nodeType), icannStr(c.icann), c.label,\n\t\t\t)\n\t\t} else {\n\t\t\tfmt.Fprintf(w, \"0x%x,\\n\", encoding)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc printNodeLabel(w io.Writer, n *node) error {\n\tfor _, c := range n.children {\n\t\tfmt.Fprintf(w, \"%q,\\n\", c.label)\n\t}\n\treturn nil\n}\n\nfunc icannStr(icann bool) string {\n\tif icann {\n\t\treturn \"I\"\n\t}\n\treturn \" \"\n}\n\nfunc wildcardStr(wildcard bool) string {\n\tif wildcard {\n\t\treturn \"*\"\n\t}\n\treturn \" \"\n}\n\n// combineText combines all the strings in labelsList to form one giant string.\n// Overlapping strings will be merged: \"arpa\" and \"parliament\" could yield\n// \"arparliament\".\nfunc combineText(labelsList []string) string {\n\tbeforeLength := 0\n\tfor _, s := range labelsList {\n\t\tbeforeLength += len(s)\n\t}\n\n\ttext := crush(removeSubstrings(labelsList))\n\tif *v {\n\t\tfmt.Fprintf(os.Stderr, \"crushed %d bytes to become %d bytes\\n\", beforeLength, len(text))\n\t}\n\treturn text\n}\n\ntype byLength []string\n\nfunc (s byLength) Len() int           { return len(s) }\nfunc (s byLength) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }\nfunc (s byLength) Less(i, j int) bool { return len(s[i]) < len(s[j]) }\n\n// removeSubstrings returns a copy of its input with any strings removed\n// that are substrings of other provided strings.\nfunc removeSubstrings(input []string) []string {\n\t// Make a copy of input.\n\tss := append(make([]string, 0, len(input)), input...)\n\tsort.Sort(byLength(ss))\n\n\tfor i, shortString := range ss {\n\t\t// For each string, only consider strings higher than it in sort order, i.e.\n\t\t// of equal length or greater.\n\t\tfor _, longString := range ss[i+1:] {\n\t\t\tif strings.Contains(longString, shortString) {\n\t\t\t\tss[i] = \"\"\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// Remove the empty strings.\n\tsort.Strings(ss)\n\tfor len(ss) > 0 && ss[0] == \"\" {\n\t\tss = ss[1:]\n\t}\n\treturn ss\n}\n\n// crush combines a list of strings, taking advantage of overlaps. It returns a\n// single string that contains each input string as a substring.\nfunc crush(ss []string) string {\n\tmaxLabelLen := 0\n\tfor _, s := range ss {\n\t\tif maxLabelLen < len(s) {\n\t\t\tmaxLabelLen = len(s)\n\t\t}\n\t}\n\n\tfor prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- {\n\t\tprefixes := makePrefixMap(ss, prefixLen)\n\t\tfor i, s := range ss {\n\t\t\tif len(s) <= prefixLen {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmergeLabel(ss, i, prefixLen, prefixes)\n\t\t}\n\t}\n\n\treturn strings.Join(ss, \"\")\n}\n\n// mergeLabel merges the label at ss[i] with the first available matching label\n// in prefixMap, where the last \"prefixLen\" characters in ss[i] match the first\n// \"prefixLen\" characters in the matching label.\n// It will merge ss[i] repeatedly until no more matches are available.\n// All matching labels merged into ss[i] are replaced by \"\".\nfunc mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) {\n\ts := ss[i]\n\tsuffix := s[len(s)-prefixLen:]\n\tfor _, j := range prefixes[suffix] {\n\t\t// Empty strings mean \"already used.\" Also avoid merging with self.\n\t\tif ss[j] == \"\" || i == j {\n\t\t\tcontinue\n\t\t}\n\t\tif *v {\n\t\t\tfmt.Fprintf(os.Stderr, \"%d-length overlap at (%4d,%4d): %q and %q share %q\\n\",\n\t\t\t\tprefixLen, i, j, ss[i], ss[j], suffix)\n\t\t}\n\t\tss[i] += ss[j][prefixLen:]\n\t\tss[j] = \"\"\n\t\t// ss[i] has a new suffix, so merge again if possible.\n\t\t// Note: we only have to merge again at the same prefix length. Shorter\n\t\t// prefix lengths will be handled in the next iteration of crush's for loop.\n\t\t// Can there be matches for longer prefix lengths, introduced by the merge?\n\t\t// I believe that any such matches would by necessity have been eliminated\n\t\t// during substring removal or merged at a higher prefix length. For\n\t\t// instance, in crush(\"abc\", \"cde\", \"bcdef\"), combining \"abc\" and \"cde\"\n\t\t// would yield \"abcde\", which could be merged with \"bcdef.\" However, in\n\t\t// practice \"cde\" would already have been elimintated by removeSubstrings.\n\t\tmergeLabel(ss, i, prefixLen, prefixes)\n\t\treturn\n\t}\n}\n\n// prefixMap maps from a prefix to a list of strings containing that prefix. The\n// list of strings is represented as indexes into a slice of strings stored\n// elsewhere.\ntype prefixMap map[string][]int\n\n// makePrefixMap constructs a prefixMap from a slice of strings.\nfunc makePrefixMap(ss []string, prefixLen int) prefixMap {\n\tprefixes := make(prefixMap)\n\tfor i, s := range ss {\n\t\t// We use < rather than <= because if a label matches on a prefix equal to\n\t\t// its full length, that's actually a substring match handled by\n\t\t// removeSubstrings.\n\t\tif prefixLen < len(s) {\n\t\t\tprefix := s[:prefixLen]\n\t\t\tprefixes[prefix] = append(prefixes[prefix], i)\n\t\t}\n\t}\n\n\treturn prefixes\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/publicsuffix/list.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n//go:generate go run gen.go\n\n// Package publicsuffix provides a public suffix list based on data from\n// http://publicsuffix.org/. A public suffix is one under which Internet users\n// can directly register names.\npackage publicsuffix // import \"golang.org/x/net/publicsuffix\"\n\n// TODO: specify case sensitivity and leading/trailing dot behavior for\n// func PublicSuffix and func EffectiveTLDPlusOne.\n\nimport (\n\t\"fmt\"\n\t\"net/http/cookiejar\"\n\t\"strings\"\n)\n\n// List implements the cookiejar.PublicSuffixList interface by calling the\n// PublicSuffix function.\nvar List cookiejar.PublicSuffixList = list{}\n\ntype list struct{}\n\nfunc (list) PublicSuffix(domain string) string {\n\tps, _ := PublicSuffix(domain)\n\treturn ps\n}\n\nfunc (list) String() string {\n\treturn version\n}\n\n// PublicSuffix returns the public suffix of the domain using a copy of the\n// publicsuffix.org database compiled into the library.\n//\n// icann is whether the public suffix is managed by the Internet Corporation\n// for Assigned Names and Numbers. If not, the public suffix is privately\n// managed. For example, foo.org and foo.co.uk are ICANN domains,\n// foo.dyndns.org and foo.blogspot.co.uk are private domains.\n//\n// Use cases for distinguishing ICANN domains like foo.com from private\n// domains like foo.appspot.com can be found at\n// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases\nfunc PublicSuffix(domain string) (publicSuffix string, icann bool) {\n\tlo, hi := uint32(0), uint32(numTLD)\n\ts, suffix, wildcard := domain, len(domain), false\nloop:\n\tfor {\n\t\tdot := strings.LastIndex(s, \".\")\n\t\tif wildcard {\n\t\t\tsuffix = 1 + dot\n\t\t}\n\t\tif lo == hi {\n\t\t\tbreak\n\t\t}\n\t\tf := find(s[1+dot:], lo, hi)\n\t\tif f == notFound {\n\t\t\tbreak\n\t\t}\n\n\t\tu := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength)\n\t\ticann = u&(1<<nodesBitsICANN-1) != 0\n\t\tu >>= nodesBitsICANN\n\t\tu = children[u&(1<<nodesBitsChildren-1)]\n\t\tlo = u & (1<<childrenBitsLo - 1)\n\t\tu >>= childrenBitsLo\n\t\thi = u & (1<<childrenBitsHi - 1)\n\t\tu >>= childrenBitsHi\n\t\tswitch u & (1<<childrenBitsNodeType - 1) {\n\t\tcase nodeTypeNormal:\n\t\t\tsuffix = 1 + dot\n\t\tcase nodeTypeException:\n\t\t\tsuffix = 1 + len(s)\n\t\t\tbreak loop\n\t\t}\n\t\tu >>= childrenBitsNodeType\n\t\twildcard = u&(1<<childrenBitsWildcard-1) != 0\n\n\t\tif dot == -1 {\n\t\t\tbreak\n\t\t}\n\t\ts = s[:dot]\n\t}\n\tif suffix == len(domain) {\n\t\t// If no rules match, the prevailing rule is \"*\".\n\t\treturn domain[1+strings.LastIndex(domain, \".\"):], icann\n\t}\n\treturn domain[suffix:], icann\n}\n\nconst notFound uint32 = 1<<32 - 1\n\n// find returns the index of the node in the range [lo, hi) whose label equals\n// label, or notFound if there is no such node. The range is assumed to be in\n// strictly increasing node label order.\nfunc find(label string, lo, hi uint32) uint32 {\n\tfor lo < hi {\n\t\tmid := lo + (hi-lo)/2\n\t\ts := nodeLabel(mid)\n\t\tif s < label {\n\t\t\tlo = mid + 1\n\t\t} else if s == label {\n\t\t\treturn mid\n\t\t} else {\n\t\t\thi = mid\n\t\t}\n\t}\n\treturn notFound\n}\n\n// nodeLabel returns the label for the i'th node.\nfunc nodeLabel(i uint32) string {\n\tx := nodes[i]\n\tlength := x & (1<<nodesBitsTextLength - 1)\n\tx >>= nodesBitsTextLength\n\toffset := x & (1<<nodesBitsTextOffset - 1)\n\treturn text[offset : offset+length]\n}\n\n// EffectiveTLDPlusOne returns the effective top level domain plus one more\n// label. For example, the eTLD+1 for \"foo.bar.golang.org\" is \"golang.org\".\nfunc EffectiveTLDPlusOne(domain string) (string, error) {\n\tsuffix, _ := PublicSuffix(domain)\n\tif len(domain) <= len(suffix) {\n\t\treturn \"\", fmt.Errorf(\"publicsuffix: cannot derive eTLD+1 for domain %q\", domain)\n\t}\n\ti := len(domain) - len(suffix) - 1\n\tif domain[i] != '.' {\n\t\treturn \"\", fmt.Errorf(\"publicsuffix: invalid public suffix %q for domain %q\", suffix, domain)\n\t}\n\treturn domain[1+strings.LastIndex(domain[:i], \".\"):], nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/publicsuffix/list_test.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage publicsuffix\n\nimport (\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestNodeLabel(t *testing.T) {\n\tfor i, want := range nodeLabels {\n\t\tgot := nodeLabel(uint32(i))\n\t\tif got != want {\n\t\t\tt.Errorf(\"%d: got %q, want %q\", i, got, want)\n\t\t}\n\t}\n}\n\nfunc TestFind(t *testing.T) {\n\ttestCases := []string{\n\t\t\"\",\n\t\t\"a\",\n\t\t\"a0\",\n\t\t\"aaaa\",\n\t\t\"ao\",\n\t\t\"ap\",\n\t\t\"ar\",\n\t\t\"aro\",\n\t\t\"arp\",\n\t\t\"arpa\",\n\t\t\"arpaa\",\n\t\t\"arpb\",\n\t\t\"az\",\n\t\t\"b\",\n\t\t\"b0\",\n\t\t\"ba\",\n\t\t\"z\",\n\t\t\"zu\",\n\t\t\"zv\",\n\t\t\"zw\",\n\t\t\"zx\",\n\t\t\"zy\",\n\t\t\"zz\",\n\t\t\"zzzz\",\n\t}\n\tfor _, tc := range testCases {\n\t\tgot := find(tc, 0, numTLD)\n\t\twant := notFound\n\t\tfor i := uint32(0); i < numTLD; i++ {\n\t\t\tif tc == nodeLabel(i) {\n\t\t\t\twant = i\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif got != want {\n\t\t\tt.Errorf(\"%q: got %d, want %d\", tc, got, want)\n\t\t}\n\t}\n}\n\nfunc TestICANN(t *testing.T) {\n\ttestCases := map[string]bool{\n\t\t\"foo.org\":            true,\n\t\t\"foo.co.uk\":          true,\n\t\t\"foo.dyndns.org\":     false,\n\t\t\"foo.go.dyndns.org\":  false,\n\t\t\"foo.blogspot.co.uk\": false,\n\t\t\"foo.intranet\":       false,\n\t}\n\tfor domain, want := range testCases {\n\t\t_, got := PublicSuffix(domain)\n\t\tif got != want {\n\t\t\tt.Errorf(\"%q: got %v, want %v\", domain, got, want)\n\t\t}\n\t}\n}\n\nvar publicSuffixTestCases = []struct {\n\tdomain, want string\n}{\n\t// Empty string.\n\t{\"\", \"\"},\n\n\t// The .ao rules are:\n\t// ao\n\t// ed.ao\n\t// gv.ao\n\t// og.ao\n\t// co.ao\n\t// pb.ao\n\t// it.ao\n\t{\"ao\", \"ao\"},\n\t{\"www.ao\", \"ao\"},\n\t{\"pb.ao\", \"pb.ao\"},\n\t{\"www.pb.ao\", \"pb.ao\"},\n\t{\"www.xxx.yyy.zzz.pb.ao\", \"pb.ao\"},\n\n\t// The .ar rules are:\n\t// ar\n\t// com.ar\n\t// edu.ar\n\t// gob.ar\n\t// gov.ar\n\t// int.ar\n\t// mil.ar\n\t// net.ar\n\t// org.ar\n\t// tur.ar\n\t// blogspot.com.ar\n\t{\"ar\", \"ar\"},\n\t{\"www.ar\", \"ar\"},\n\t{\"nic.ar\", \"ar\"},\n\t{\"www.nic.ar\", \"ar\"},\n\t{\"com.ar\", \"com.ar\"},\n\t{\"www.com.ar\", \"com.ar\"},\n\t{\"blogspot.com.ar\", \"blogspot.com.ar\"},\n\t{\"www.blogspot.com.ar\", \"blogspot.com.ar\"},\n\t{\"www.xxx.yyy.zzz.blogspot.com.ar\", \"blogspot.com.ar\"},\n\t{\"logspot.com.ar\", \"com.ar\"},\n\t{\"zlogspot.com.ar\", \"com.ar\"},\n\t{\"zblogspot.com.ar\", \"com.ar\"},\n\n\t// The .arpa rules are:\n\t// arpa\n\t// e164.arpa\n\t// in-addr.arpa\n\t// ip6.arpa\n\t// iris.arpa\n\t// uri.arpa\n\t// urn.arpa\n\t{\"arpa\", \"arpa\"},\n\t{\"www.arpa\", \"arpa\"},\n\t{\"urn.arpa\", \"urn.arpa\"},\n\t{\"www.urn.arpa\", \"urn.arpa\"},\n\t{\"www.xxx.yyy.zzz.urn.arpa\", \"urn.arpa\"},\n\n\t// The relevant {kobe,kyoto}.jp rules are:\n\t// jp\n\t// *.kobe.jp\n\t// !city.kobe.jp\n\t// kyoto.jp\n\t// ide.kyoto.jp\n\t{\"jp\", \"jp\"},\n\t{\"kobe.jp\", \"jp\"},\n\t{\"c.kobe.jp\", \"c.kobe.jp\"},\n\t{\"b.c.kobe.jp\", \"c.kobe.jp\"},\n\t{\"a.b.c.kobe.jp\", \"c.kobe.jp\"},\n\t{\"city.kobe.jp\", \"kobe.jp\"},\n\t{\"www.city.kobe.jp\", \"kobe.jp\"},\n\t{\"kyoto.jp\", \"kyoto.jp\"},\n\t{\"test.kyoto.jp\", \"kyoto.jp\"},\n\t{\"ide.kyoto.jp\", \"ide.kyoto.jp\"},\n\t{\"b.ide.kyoto.jp\", \"ide.kyoto.jp\"},\n\t{\"a.b.ide.kyoto.jp\", \"ide.kyoto.jp\"},\n\n\t// The .tw rules are:\n\t// tw\n\t// edu.tw\n\t// gov.tw\n\t// mil.tw\n\t// com.tw\n\t// net.tw\n\t// org.tw\n\t// idv.tw\n\t// game.tw\n\t// ebiz.tw\n\t// club.tw\n\t// 網路.tw (xn--zf0ao64a.tw)\n\t// 組織.tw (xn--uc0atv.tw)\n\t// 商業.tw (xn--czrw28b.tw)\n\t// blogspot.tw\n\t{\"tw\", \"tw\"},\n\t{\"aaa.tw\", \"tw\"},\n\t{\"www.aaa.tw\", \"tw\"},\n\t{\"xn--czrw28b.aaa.tw\", \"tw\"},\n\t{\"edu.tw\", \"edu.tw\"},\n\t{\"www.edu.tw\", \"edu.tw\"},\n\t{\"xn--czrw28b.edu.tw\", \"edu.tw\"},\n\t{\"xn--czrw28b.tw\", \"xn--czrw28b.tw\"},\n\t{\"www.xn--czrw28b.tw\", \"xn--czrw28b.tw\"},\n\t{\"xn--uc0atv.xn--czrw28b.tw\", \"xn--czrw28b.tw\"},\n\t{\"xn--kpry57d.tw\", \"tw\"},\n\n\t// The .uk rules are:\n\t// uk\n\t// ac.uk\n\t// co.uk\n\t// gov.uk\n\t// ltd.uk\n\t// me.uk\n\t// net.uk\n\t// nhs.uk\n\t// org.uk\n\t// plc.uk\n\t// police.uk\n\t// *.sch.uk\n\t// blogspot.co.uk\n\t{\"uk\", \"uk\"},\n\t{\"aaa.uk\", \"uk\"},\n\t{\"www.aaa.uk\", \"uk\"},\n\t{\"mod.uk\", \"uk\"},\n\t{\"www.mod.uk\", \"uk\"},\n\t{\"sch.uk\", \"uk\"},\n\t{\"mod.sch.uk\", \"mod.sch.uk\"},\n\t{\"www.sch.uk\", \"www.sch.uk\"},\n\t{\"blogspot.co.uk\", \"blogspot.co.uk\"},\n\t{\"blogspot.nic.uk\", \"uk\"},\n\t{\"blogspot.sch.uk\", \"blogspot.sch.uk\"},\n\n\t// The .рф rules are\n\t// рф (xn--p1ai)\n\t{\"xn--p1ai\", \"xn--p1ai\"},\n\t{\"aaa.xn--p1ai\", \"xn--p1ai\"},\n\t{\"www.xxx.yyy.xn--p1ai\", \"xn--p1ai\"},\n\n\t// The .bd rules are:\n\t// *.bd\n\t{\"bd\", \"bd\"},\n\t{\"www.bd\", \"www.bd\"},\n\t{\"zzz.bd\", \"zzz.bd\"},\n\t{\"www.zzz.bd\", \"zzz.bd\"},\n\t{\"www.xxx.yyy.zzz.bd\", \"zzz.bd\"},\n\n\t// There are no .nosuchtld rules.\n\t{\"nosuchtld\", \"nosuchtld\"},\n\t{\"foo.nosuchtld\", \"nosuchtld\"},\n\t{\"bar.foo.nosuchtld\", \"nosuchtld\"},\n}\n\nfunc BenchmarkPublicSuffix(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tfor _, tc := range publicSuffixTestCases {\n\t\t\tList.PublicSuffix(tc.domain)\n\t\t}\n\t}\n}\n\nfunc TestPublicSuffix(t *testing.T) {\n\tfor _, tc := range publicSuffixTestCases {\n\t\tgot := List.PublicSuffix(tc.domain)\n\t\tif got != tc.want {\n\t\t\tt.Errorf(\"%q: got %q, want %q\", tc.domain, got, tc.want)\n\t\t}\n\t}\n}\n\nfunc TestSlowPublicSuffix(t *testing.T) {\n\tfor _, tc := range publicSuffixTestCases {\n\t\tgot := slowPublicSuffix(tc.domain)\n\t\tif got != tc.want {\n\t\t\tt.Errorf(\"%q: got %q, want %q\", tc.domain, got, tc.want)\n\t\t}\n\t}\n}\n\n// slowPublicSuffix implements the canonical (but O(number of rules)) public\n// suffix algorithm described at http://publicsuffix.org/list/.\n//\n// 1. Match domain against all rules and take note of the matching ones.\n// 2. If no rules match, the prevailing rule is \"*\".\n// 3. If more than one rule matches, the prevailing rule is the one which is an exception rule.\n// 4. If there is no matching exception rule, the prevailing rule is the one with the most labels.\n// 5. If the prevailing rule is a exception rule, modify it by removing the leftmost label.\n// 6. The public suffix is the set of labels from the domain which directly match the labels of the prevailing rule (joined by dots).\n// 7. The registered or registrable domain is the public suffix plus one additional label.\n//\n// This function returns the public suffix, not the registrable domain, and so\n// it stops after step 6.\nfunc slowPublicSuffix(domain string) string {\n\tmatch := func(rulePart, domainPart string) bool {\n\t\tswitch rulePart[0] {\n\t\tcase '*':\n\t\t\treturn true\n\t\tcase '!':\n\t\t\treturn rulePart[1:] == domainPart\n\t\t}\n\t\treturn rulePart == domainPart\n\t}\n\n\tdomainParts := strings.Split(domain, \".\")\n\tvar matchingRules [][]string\n\nloop:\n\tfor _, rule := range rules {\n\t\truleParts := strings.Split(rule, \".\")\n\t\tif len(domainParts) < len(ruleParts) {\n\t\t\tcontinue\n\t\t}\n\t\tfor i := range ruleParts {\n\t\t\trulePart := ruleParts[len(ruleParts)-1-i]\n\t\t\tdomainPart := domainParts[len(domainParts)-1-i]\n\t\t\tif !match(rulePart, domainPart) {\n\t\t\t\tcontinue loop\n\t\t\t}\n\t\t}\n\t\tmatchingRules = append(matchingRules, ruleParts)\n\t}\n\tif len(matchingRules) == 0 {\n\t\tmatchingRules = append(matchingRules, []string{\"*\"})\n\t} else {\n\t\tsort.Sort(byPriority(matchingRules))\n\t}\n\tprevailing := matchingRules[0]\n\tif prevailing[0][0] == '!' {\n\t\tprevailing = prevailing[1:]\n\t}\n\tif prevailing[0][0] == '*' {\n\t\treplaced := domainParts[len(domainParts)-len(prevailing)]\n\t\tprevailing = append([]string{replaced}, prevailing[1:]...)\n\t}\n\treturn strings.Join(prevailing, \".\")\n}\n\ntype byPriority [][]string\n\nfunc (b byPriority) Len() int      { return len(b) }\nfunc (b byPriority) Swap(i, j int) { b[i], b[j] = b[j], b[i] }\nfunc (b byPriority) Less(i, j int) bool {\n\tif b[i][0][0] == '!' {\n\t\treturn true\n\t}\n\tif b[j][0][0] == '!' {\n\t\treturn false\n\t}\n\treturn len(b[i]) > len(b[j])\n}\n\n// eTLDPlusOneTestCases come from\n// https://github.com/publicsuffix/list/blob/master/tests/test_psl.txt\nvar eTLDPlusOneTestCases = []struct {\n\tdomain, want string\n}{\n\t// Empty input.\n\t{\"\", \"\"},\n\t// Unlisted TLD.\n\t{\"example\", \"\"},\n\t{\"example.example\", \"example.example\"},\n\t{\"b.example.example\", \"example.example\"},\n\t{\"a.b.example.example\", \"example.example\"},\n\t// TLD with only 1 rule.\n\t{\"biz\", \"\"},\n\t{\"domain.biz\", \"domain.biz\"},\n\t{\"b.domain.biz\", \"domain.biz\"},\n\t{\"a.b.domain.biz\", \"domain.biz\"},\n\t// TLD with some 2-level rules.\n\t{\"com\", \"\"},\n\t{\"example.com\", \"example.com\"},\n\t{\"b.example.com\", \"example.com\"},\n\t{\"a.b.example.com\", \"example.com\"},\n\t{\"uk.com\", \"\"},\n\t{\"example.uk.com\", \"example.uk.com\"},\n\t{\"b.example.uk.com\", \"example.uk.com\"},\n\t{\"a.b.example.uk.com\", \"example.uk.com\"},\n\t{\"test.ac\", \"test.ac\"},\n\t// TLD with only 1 (wildcard) rule.\n\t{\"mm\", \"\"},\n\t{\"c.mm\", \"\"},\n\t{\"b.c.mm\", \"b.c.mm\"},\n\t{\"a.b.c.mm\", \"b.c.mm\"},\n\t// More complex TLD.\n\t{\"jp\", \"\"},\n\t{\"test.jp\", \"test.jp\"},\n\t{\"www.test.jp\", \"test.jp\"},\n\t{\"ac.jp\", \"\"},\n\t{\"test.ac.jp\", \"test.ac.jp\"},\n\t{\"www.test.ac.jp\", \"test.ac.jp\"},\n\t{\"kyoto.jp\", \"\"},\n\t{\"test.kyoto.jp\", \"test.kyoto.jp\"},\n\t{\"ide.kyoto.jp\", \"\"},\n\t{\"b.ide.kyoto.jp\", \"b.ide.kyoto.jp\"},\n\t{\"a.b.ide.kyoto.jp\", \"b.ide.kyoto.jp\"},\n\t{\"c.kobe.jp\", \"\"},\n\t{\"b.c.kobe.jp\", \"b.c.kobe.jp\"},\n\t{\"a.b.c.kobe.jp\", \"b.c.kobe.jp\"},\n\t{\"city.kobe.jp\", \"city.kobe.jp\"},\n\t{\"www.city.kobe.jp\", \"city.kobe.jp\"},\n\t// TLD with a wildcard rule and exceptions.\n\t{\"ck\", \"\"},\n\t{\"test.ck\", \"\"},\n\t{\"b.test.ck\", \"b.test.ck\"},\n\t{\"a.b.test.ck\", \"b.test.ck\"},\n\t{\"www.ck\", \"www.ck\"},\n\t{\"www.www.ck\", \"www.ck\"},\n\t// US K12.\n\t{\"us\", \"\"},\n\t{\"test.us\", \"test.us\"},\n\t{\"www.test.us\", \"test.us\"},\n\t{\"ak.us\", \"\"},\n\t{\"test.ak.us\", \"test.ak.us\"},\n\t{\"www.test.ak.us\", \"test.ak.us\"},\n\t{\"k12.ak.us\", \"\"},\n\t{\"test.k12.ak.us\", \"test.k12.ak.us\"},\n\t{\"www.test.k12.ak.us\", \"test.k12.ak.us\"},\n\t// Punycoded IDN labels\n\t{\"xn--85x722f.com.cn\", \"xn--85x722f.com.cn\"},\n\t{\"xn--85x722f.xn--55qx5d.cn\", \"xn--85x722f.xn--55qx5d.cn\"},\n\t{\"www.xn--85x722f.xn--55qx5d.cn\", \"xn--85x722f.xn--55qx5d.cn\"},\n\t{\"shishi.xn--55qx5d.cn\", \"shishi.xn--55qx5d.cn\"},\n\t{\"xn--55qx5d.cn\", \"\"},\n\t{\"xn--85x722f.xn--fiqs8s\", \"xn--85x722f.xn--fiqs8s\"},\n\t{\"www.xn--85x722f.xn--fiqs8s\", \"xn--85x722f.xn--fiqs8s\"},\n\t{\"shishi.xn--fiqs8s\", \"shishi.xn--fiqs8s\"},\n\t{\"xn--fiqs8s\", \"\"},\n}\n\nfunc TestEffectiveTLDPlusOne(t *testing.T) {\n\tfor _, tc := range eTLDPlusOneTestCases {\n\t\tgot, _ := EffectiveTLDPlusOne(tc.domain)\n\t\tif got != tc.want {\n\t\t\tt.Errorf(\"%q: got %q, want %q\", tc.domain, got, tc.want)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/publicsuffix/table.go",
    "content": "// generated by go run gen.go; DO NOT EDIT\n\npackage publicsuffix\n\nconst version = \"publicsuffix.org's public_suffix_list.dat, git revision 38b238d6324042f2c2e6270459d1f4ccfe789fba (2017-08-28T20:09:01Z)\"\n\nconst (\n\tnodesBitsChildren   = 10\n\tnodesBitsICANN      = 1\n\tnodesBitsTextOffset = 15\n\tnodesBitsTextLength = 6\n\n\tchildrenBitsWildcard = 1\n\tchildrenBitsNodeType = 2\n\tchildrenBitsHi       = 14\n\tchildrenBitsLo       = 14\n)\n\nconst (\n\tnodeTypeNormal     = 0\n\tnodeTypeException  = 1\n\tnodeTypeParentOnly = 2\n)\n\n// numTLD is the number of top level domains.\nconst numTLD = 1557\n\n// Text is the combined text of all labels.\nconst text = \"bifukagawalterbihorologyukuhashimoichinosekigaharaxastronomy-gat\" +\n\t\"ewaybomloans3-ca-central-1bikedagestangeorgeorgiabilbaogakihokum\" +\n\t\"akogengerdalces3-website-us-west-1billustrationikinuyamashinashi\" +\n\t\"kitchenikkoebenhavnikolaevents3-website-us-west-2bioddabirdartce\" +\n\t\"nterprisesakikugawarszawashingtondclkariyameldalindesnesakurainv\" +\n\t\"estmentsakyotanabellunord-odalivornomutashinainzais-a-candidateb\" +\n\t\"irkenesoddtangenovaraumalopolskanlandrayddnsfreebox-oslocus-3bir\" +\n\t\"thplacebitballooningladefinimakanegasakindlegokasells-for-lessal\" +\n\t\"angenikonantankarlsoyurihonjoyentattoolsztynsettlersalondonetska\" +\n\t\"rmoyusuharabjarkoyusuisserveexchangebjerkreimbalsfjordgcahcesuol\" +\n\t\"ocalhostrodawaraugustowadaegubalsanagochihayaakasakawaharanzanne\" +\n\t\"frankfurtarumizusawabkhaziamallamagazineat-url-o-g-i-naturalhist\" +\n\t\"orymuseumcentereviewskrakowebredirectmeteorappaleobihirosakikami\" +\n\t\"jimabogadocscbgdyniabruzzoologicalvinklein-addrammenuernberggfar\" +\n\t\"merseinebinagisochildrensgardenaturalsciencesnaturelles3-ap-nort\" +\n\t\"heast-2ixboxenapponazure-mobileastcoastaldefenceatonsberg12000em\" +\n\t\"mafanconagawakayamadridvagsoyericssonyoursidealerimo-i-ranaamesj\" +\n\t\"evuemielno-ip6bjugninohekinannestadraydnsaltdalombardiamondsalva\" +\n\t\"dordalibabalatinord-frontierblockbustermezjavald-aostaplesalzbur\" +\n\t\"glassassinationalheritagematsubarakawagoebloombergbauerninomiyak\" +\n\t\"onojosoyrorosamegawabloxcmsamnangerbluedancebmoattachmentsamsclu\" +\n\t\"bindalombardynamisches-dnsamsungleezebmsandvikcoromantovalle-d-a\" +\n\t\"ostathellebmwedeployuufcfanirasakis-a-catererbnpparibaselburgliw\" +\n\t\"icebnrwegroweibolzanorddalomzaporizhzheguris-a-celticsfanishiaza\" +\n\t\"is-a-chefarmsteadrivelandrobaknoluoktachikawalbrzycharternidrudu\" +\n\t\"nsanfranciscofreakunedre-eikerbonnishigoppdalorenskoglobalashovh\" +\n\t\"achinohedmarkarpaczeladzlglobodoes-itvedestrandupontariobookingl\" +\n\t\"ogoweirboomladbrokesangobootsanjournalismailillesandefjordurbana\" +\n\t\"mexnetlifyis-a-conservativefsnillfjordurhamburgloppenzaogashimad\" +\n\t\"achicagoboatsannanishiharaboschaefflerdalotenkawabostikaruizawab\" +\n\t\"ostonakijinsekikogentingmbhartiffanyuzawabotanicalgardenishiizun\" +\n\t\"azukis-a-cpadualstackspace-to-rentalstomakomaibarabotanicgardeni\" +\n\t\"shikatakayamatta-varjjataxihuanishikatsuragit-repostfoldnavybota\" +\n\t\"nybouncemerckmsdnipropetrovskjervoyagebounty-fullensakerryproper\" +\n\t\"tiesannohelplfinancialotteboutiquebecngminakamichiharabozentsuji\" +\n\t\"iebplacedekagaminordkappgafanpachigasakievennodesashibetsukumiya\" +\n\t\"mazonawsaarlandyndns-at-workinggroupalmspringsakerbrandywinevall\" +\n\t\"eybrasiliabresciabrindisibenikebristoloseyouripirangapartmentsan\" +\n\t\"okarumaifarsundyndns-blogdnsantabarbarabritishcolumbialowiezachp\" +\n\t\"omorskienishikawazukamitsuebroadcastlefrakkestadyndns-freeboxost\" +\n\t\"rowwlkpmgmodenakatombetsumitakagiizebroadwaybroke-itgorybrokerbr\" +\n\t\"onnoysundyndns-homednsantacruzsantafedjeffersonishimerabrotherme\" +\n\t\"saverdeatnurembergmxfinitybrowsersafetymarketsanukis-a-cubicle-s\" +\n\t\"lavellinotteroybrumunddalottokonamegatakasugais-a-democratjeldsu\" +\n\t\"ndyndns-ipamperedchefashionishinomiyashironobrunelasticbeanstalk\" +\n\t\"asaokaminoyamaxunusualpersonishinoomotegobrusselsaotomeloyalistj\" +\n\t\"ordalshalsenishinoshimattelefonicarbonia-iglesias-carboniaiglesi\" +\n\t\"ascarboniabruxellesapodlasiellaktyubinskiptveterinairealtorlandy\" +\n\t\"ndns-mailouvrehabmerbryanskleppanamabrynewjerseybuskerudinewport\" +\n\t\"lligatjmaxxxjaworznowtv-infoodnetworkshoppingrimstadyndns-office\" +\n\t\"-on-the-webcambulancebuzenishiokoppegardyndns-picsapporobuzzpana\" +\n\t\"sonicateringebugattipschlesischesardegnamsskoganeis-a-designerim\" +\n\t\"arumorimachidabwfastlylbaltimore-og-romsdalillyokozehimejibigawa\" +\n\t\"ukraanghkeymachinewhampshirebungoonord-aurdalpha-myqnapcloudacce\" +\n\t\"sscambridgestonemurorangeiseiyoichippubetsubetsugaruhrhcloudns3-\" +\n\t\"eu-central-1bzhitomirumalselvendrellowiczest-le-patronishitosash\" +\n\t\"imizunaminamiashigaracompute-1computerhistoryofscience-fictionco\" +\n\t\"msecuritytacticsaseboknowsitallvivano-frankivskasuyanagawacondos\" +\n\t\"hichinohealth-carereformitakeharaconferenceconstructionconsulado\" +\n\t\"esntexistanbullensvanguardyndns-workisboringrueconsultanthropolo\" +\n\t\"gyconsultingvollcontactoyonocontemporaryarteducationalchikugodoh\" +\n\t\"aruovatoyookannamifunecontractorskenconventureshinodearthdfcbank\" +\n\t\"aszubycookingchannelsdvrdnsdojoetsuwanouchikujogaszczytnordreisa\" +\n\t\"-geekatowicecoolkuszkolahppiacenzaganquannakadomarineustarhubsas\" +\n\t\"katchewancooperaunitemp-dnsassaris-a-gurulsandoycopenhagencyclop\" +\n\t\"edichernihivanovodkagoshimalvikashibatakashimaseratis-a-financia\" +\n\t\"ladvisor-aurdalucaniacorsicagliaridagawashtenawdev-myqnapcloudap\" +\n\t\"plebtimnetzwhoswhokksundyndns1corvettenrightathomeftparliamentoy\" +\n\t\"osatoyakokonoecosenzakopanerairguardiann-arboretumbriacosidnsfor\" +\n\t\"-better-thanawatchesatxn--12c1fe0bradescorporationcostumedio-cam\" +\n\t\"pidano-mediocampidanomediocouchpotatofriesaudacouncilcouponsauhe\" +\n\t\"radynnsavannahgacoursesaves-the-whalessandria-trani-barletta-and\" +\n\t\"riatranibarlettaandriacqhachiojiyahoooshikamaishimodatecranbrook\" +\n\t\"uwanalyticsavonaplesaxocreditcardynulvikatsushikabeeldengeluidyn\" +\n\t\"v6creditunioncremonashgabadaddjambylcrewiiheyakagecricketrzyncri\" +\n\t\"meast-kazakhstanangercrotonexus-2crownprovidercrsvparmacruisesbs\" +\n\t\"chokoladencryptonomichigangwoncuisinellair-traffic-controlleycul\" +\n\t\"turalcentertainmentoyotaris-a-hard-workercuneocupcakecxn--12cfi8\" +\n\t\"ixb8lcyberlevagangaviikanonjis-a-huntercymrussiacyonabarunzencyo\" +\n\t\"utheworkpccwildlifedorainfracloudcontrolledogawarabikomaezakirun\" +\n\t\"orfolkebibleikangerfidonnakaniikawatanagurafieldfiguerestauranto\" +\n\t\"yotsukaidownloadfilateliafilegearfilminamiechizenfinalfinancefin\" +\n\t\"eartscientistockholmestrandfinlandfinnoyfirebaseapparscjohnsonfi\" +\n\t\"renzefirestonefirmdaleirvikatsuyamasfjordenfishingolffanscotland\" +\n\t\"fitjarfitnessettlementoyourafjalerflesbergulenflickragerotikakeg\" +\n\t\"awaflightscrapper-siteflirflogintogurafloraflorencefloridavvesii\" +\n\t\"dazaifudaigojomedizinhistorischescrappingunmarburguovdageaidnusl\" +\n\t\"ivinghistoryfloripaderbornfloristanohatakahamangyshlakasamatsudo\" +\n\t\"ntexisteingeekaufenflorogerserveftpartis-a-landscaperflowerserve\" +\n\t\"game-serversicherungushikamifuranortonflynnhostingxn--1ck2e1bamb\" +\n\t\"leclercasadelamonedatingjerstadotsuruokakudamatsuemrflynnhubanan\" +\n\t\"arepublicaseihichisobetsuitainairforcechirealmetlifeinsuranceu-1\" +\n\t\"fndfor-ourfor-someethnologyfor-theaterforexrothachirogatakahatak\" +\n\t\"aishimogosenforgotdnservehalflifestyleforli-cesena-forlicesenafo\" +\n\t\"rlikescandynamic-dnservehttpartnerservehumourforsaleitungsenfors\" +\n\t\"andasuolodingenfortmissoulancashireggio-calabriafortworthadanose\" +\n\t\"gawaforuminamifuranofosneserveirchernovtsykkylvenetogakushimotog\" +\n\t\"anewyorkshirecipesaro-urbino-pesarourbinopesaromasvuotnaharimamu\" +\n\t\"rogawassamukawataricohdatsunanjoburgriwataraidyndns-remotewdyndn\" +\n\t\"s-serverdaluccapitalonewspaperfotaruis-a-lawyerfoxfordebianfredr\" +\n\t\"ikstadtvserveminecraftoystre-slidrettozawafreeddnsgeekgalaxyfree\" +\n\t\"masonryfreesitexascolipicenogiftservemp3freetlservep2partservepi\" +\n\t\"cservequakefreiburgfreightcminamiiselectozsdeloittevadsoccertifi\" +\n\t\"cationfresenius-4fribourgfriuli-v-giuliafriuli-ve-giuliafriuli-v\" +\n\t\"egiuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafr\" +\n\t\"iuliv-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafri\" +\n\t\"uliveneziagiuliafriulivgiuliafrlfroganservesarcasmatartanddesign\" +\n\t\"frognfrolandfrom-akrehamnfrom-alfrom-arfrom-azfrom-capebretonami\" +\n\t\"astalowa-wolayangroupartyfrom-coguchikuzenfrom-ctrani-andria-bar\" +\n\t\"letta-trani-andriafrom-dchirurgiens-dentistes-en-francefrom-dedy\" +\n\t\"n-ip24from-flanderservicesettsurgeonshalloffamemergencyachtsevas\" +\n\t\"topolefrom-gausdalfrom-higashiagatsumagoizumizakirkenesevenassis\" +\n\t\"icilyfrom-iafrom-idfrom-ilfrom-incheonfrom-ksewilliamhillfrom-ky\" +\n\t\"owariasahikawafrom-lancasterfrom-maniwakuratextileksvikautokeino\" +\n\t\"from-mdfrom-megurokunohealthcareersharis-a-liberalfrom-microsoft\" +\n\t\"bankazofrom-mnfrom-modellingfrom-msharpasadenamsosnowiechiryukyu\" +\n\t\"ragifuchungbukharafrom-mtnfrom-nchitachinakagawatchandclockashih\" +\n\t\"arafrom-ndfrom-nefrom-nhktraniandriabarlettatraniandriafrom-njcb\" +\n\t\"nlfrom-nminamiizukamishihoronobeauxartsandcraftshawaiijimarugame\" +\n\t\"-hostrolekamikitayamatsuris-a-libertarianfrom-nvalled-aostatoilf\" +\n\t\"rom-nyfrom-ohkurafrom-oketohmannorth-kazakhstanfrom-orfrom-padov\" +\n\t\"aksdalfrom-pratohnoshooguyfrom-rivnefrom-schoenbrunnfrom-sdfrom-\" +\n\t\"tnfrom-txn--1ctwolominamatakkokamiokamiminershellaspeziafrom-uta\" +\n\t\"zuerichardlillehammerfeste-ipassagenshimojis-a-linux-useranishia\" +\n\t\"ritabashijonawatefrom-val-daostavalleyfrom-vtranoyfrom-wafrom-wi\" +\n\t\"elunnerfrom-wvalledaostavangerfrom-wyfrosinonefrostalbanshimokaw\" +\n\t\"afroyahikobeardubaiduckdnshimokitayamafstavernfujiiderafujikawag\" +\n\t\"uchikonefujiminohtawaramotoineppubolognakanotoddenfujinomiyadafu\" +\n\t\"jiokayamansionshimonitayanagithubusercontentransportransurlfujis\" +\n\t\"atoshonairtelecitychyattorneyagawakuyabukidsmynasushiobaragusart\" +\n\t\"shimonosekikawafujisawafujishiroishidakabiratoridefenseljordfuji\" +\n\t\"tsurugashimaritimekeepingfujixeroxn--1lqs03nfujiyoshidafukayabea\" +\n\t\"tshimosuwalkis-a-llamarylandfukuchiyamadafukudominichitosetogits\" +\n\t\"uldalucernefukuis-a-musicianfukumitsubishigakirovogradoyfukuokaz\" +\n\t\"akiryuohadselfipassenger-associationfukuroishikarikaturindalfuku\" +\n\t\"sakisarazurewebsiteshikagamiishibukawafukuyamagatakaharufunabash\" +\n\t\"iriuchinadafunagatakahashimamakishiwadafunahashikamiamakusatsuma\" +\n\t\"sendaisennangonohejis-a-nascarfanfundaciofuoiskujukuriyamanxn--1\" +\n\t\"lqs71dfuosskoczowinbarcelonagasakikonaikawachinaganoharamcoacham\" +\n\t\"pionshiphoptobishimaizurugbydgoszczecinemakeupowiathletajimabari\" +\n\t\"akembuchikumagayagawakkanaibetsubamericanfamilydscloudcontrolapp\" +\n\t\"spotagerfurnitureggio-emilia-romagnakasatsunairtrafficplexus-1fu\" +\n\t\"rubiraquarellebesbyenglandfurudonostiaarpaviancarrierfurukawais-\" +\n\t\"a-nurservebbshimotsukefusodegaurafussagamiharafutabayamaguchinom\" +\n\t\"igawafutboldlygoingnowhere-for-moregontrailroadfuttsurugimperiaf\" +\n\t\"uturecmshimotsumafuturehostingfuturemailingfvgfylkesbiblackfrida\" +\n\t\"yfyresdalhangglidinghangoutsystemscloudfunctionshinichinanhannan\" +\n\t\"mokuizumodernhannotaireshinjournalisteinkjerusalembroideryhanyuz\" +\n\t\"enhapmirhareidsbergenharstadharvestcelebrationhasamarcheapgfoggi\" +\n\t\"ahasaminami-alpssells-itrapaniimimatakatoris-a-playerhashbanghas\" +\n\t\"udahasura-appharmacienshinjukumanohasvikazunohatogayaitakamoriok\" +\n\t\"aluganskolevangerhatoyamazakitahiroshimarnardalhatsukaichikaisei\" +\n\t\"s-a-republicancerresearchaeologicaliforniahattfjelldalhayashimam\" +\n\t\"otobungotakadapliernewmexicodyn-vpnplusterhazuminobusellsyourhom\" +\n\t\"egoodshinkamigotoyohashimotoshimahboehringerikehelsinkitakamiizu\" +\n\t\"misanofidelityhembygdsforbundhemneshinshinotsurgeryhemsedalhepfo\" +\n\t\"rgeherokussldheroyhgtvallee-aosteroyhigashichichibunkyonanaoshim\" +\n\t\"ageandsoundandvisionhigashihiroshimanehigashiizumozakitakatakana\" +\n\t\"beautysfjordhigashikagawahigashikagurasoedahigashikawakitaaikita\" +\n\t\"kyushuaiahigashikurumeiwamarriottravelchannelhigashimatsushimars\" +\n\t\"hallstatebankddielddanuorrikuzentakataiwanairlinebraskaunjargals\" +\n\t\"aceohigashimatsuyamakitaakitadaitoigawahigashimurayamamotorcycle\" +\n\t\"shinshirohigashinarusembokukitamidoris-a-rockstarachowicehigashi\" +\n\t\"nehigashiomihachimanchesterhigashiosakasayamanakakogawahigashish\" +\n\t\"irakawamatakanezawahigashisumiyoshikawaminamiaikitamotosumy-rout\" +\n\t\"erhigashitsunotogawahigashiurausukitanakagusukumoduminamiminowah\" +\n\t\"igashiyamatokoriyamanashifteditchyouripharmacyshintokushimahigas\" +\n\t\"hiyodogawahigashiyoshinogaris-a-socialistmein-vigorgehiraizumisa\" +\n\t\"tohobby-sitehirakatashinagawahiranais-a-soxfanhirarahiratsukagaw\" +\n\t\"ahirayaizuwakamatsubushikusakadogawahistorichouseshintomikasahar\" +\n\t\"ahitachiomiyagildeskaliszhitachiotagooglecodespotravelersinsuran\" +\n\t\"cehitraeumtgeradellogliastradinghjartdalhjelmelandholeckobierzyc\" +\n\t\"eholidayhomeiphdhomelinkfhappouhomelinuxn--1qqw23ahomeofficehome\" +\n\t\"securitymaceratakaokamakurazakitashiobarahomesecuritypchloehomes\" +\n\t\"enseminehomeunixn--2m4a15ehondahoneywellbeingzonehongopocznorthw\" +\n\t\"esternmutualhonjyoitakarazukameokameyamatotakadahornindalhorseou\" +\n\t\"lminamiogunicomcastresistancehortendofinternet-dnshinyoshitomiok\" +\n\t\"amogawahospitalhoteleshiojirishirifujiedahotmailhoyangerhoylande\" +\n\t\"troitskydivinghumanitieshioyanaizuhurdalhurumajis-a-studentalhyl\" +\n\t\"lestadhyogoris-a-teacherkassymantechnologyhyugawarahyundaiwafune\" +\n\t\"hzchocolatemasekashiwarajewishartgalleryjfkharkovalleeaosteigenj\" +\n\t\"gorajlcube-serverrankoshigayakumoldelmenhorstagejlljmphilipsynol\" +\n\t\"ogy-diskstationjnjcphilatelyjoyokaichibahccavuotnagareyamalborkd\" +\n\t\"alwaysdatabaseballangenoamishirasatochigiessensiositelemarkherso\" +\n\t\"njpmorganjpnjprshiraokananporovigotpantheonsitejuniperjurkoshuna\" +\n\t\"ntokigawakosugekotohiradomainshiratakahagitlaborkotourakouhokuta\" +\n\t\"makis-an-artistcgrouphiladelphiaareadmyblogsitekounosupplieshish\" +\n\t\"ikuis-an-engineeringkouyamashikokuchuokouzushimasoykozagawakozak\" +\n\t\"is-an-entertainerkozowindmillkpnkppspdnshisognekrasnodarkredston\" +\n\t\"ekristiansandcatshisuifuelblagdenesnaaseralingenkainanaejrietisa\" +\n\t\"latinabenonichoshibuyachiyodavvenjargaulardalutskasukabedzin-the\" +\n\t\"-bandaioiraseeklogest-mon-blogueurovisionisshingugekristiansundk\" +\n\t\"rodsheradkrokstadelvaldaostarnbergkryminamisanrikubetsupportrent\" +\n\t\"ino-alto-adigekumatorinokumejimasudakumenanyokkaichiropractichoy\" +\n\t\"odobashichikashukujitawarakunisakis-bykunitachiarailwaykunitomig\" +\n\t\"usukumamotoyamassa-carrara-massacarraramassabusinessebyklegalloc\" +\n\t\"alhistoryggeelvinckhmelnytskyivanylvenicekunneppulawykunstsammlu\" +\n\t\"ngkunstunddesignkuokgrouphoenixn--30rr7ykureggioemiliaromagnakay\" +\n\t\"amatsumaebashikshacknetrentino-altoadigekurgankurobelaudiblebork\" +\n\t\"angerkurogimilanokuroisoftwarendalenugkuromatsunais-certifieduca\" +\n\t\"torahimeshimamateramochizukirakurotakikawasakis-foundationkushir\" +\n\t\"ogawakustanais-gonekusupplykutchanelkutnokuzumakis-into-animelbo\" +\n\t\"urnekvafjordkvalsundkvamlidlugolekafjordkvanangenkvinesdalkvinnh\" +\n\t\"eradkviteseidskogkvitsoykwpspiegelkzmisugitokorozawamitourismola\" +\n\t\"ngevagrarchaeologyeongbuknx-serveronakatsugawamitoyoakemiuramiya\" +\n\t\"zumiyotamanomjondalenmlbfanmonstermonticellolmontrealestatefarme\" +\n\t\"quipmentrentino-s-tirollagrigentomologyeonggiehtavuoatnagaivuotn\" +\n\t\"agaokakyotambabia-goracleaningatlantabusebastopologyeongnamegawa\" +\n\t\"keisenbahnmonza-brianzaporizhzhiamonza-e-della-brianzapposhitara\" +\n\t\"mamonzabrianzaptokuyamatsusakahoginankokubunjis-leetnedalmonzaeb\" +\n\t\"rianzaramonzaedellabrianzamoonscalezajskolobrzegersundmoparachut\" +\n\t\"ingmordoviajessheiminamitanemoriyamatsushigemoriyoshimilitarymor\" +\n\t\"monmouthagakhanamigawamoroyamatsuuramortgagemoscowindowshizukuis\" +\n\t\"himofusaintlouis-a-bruinsfanmoseushistorymosjoenmoskeneshizuokan\" +\n\t\"azawamosshoujis-lostre-toteneis-an-accountantshirahamatonbetsurn\" +\n\t\"adalmosvikomaganemoteginowaniihamatamakawajimaoris-not-certified\" +\n\t\"unetbankhakassiamoviemovistargardmtpchristiansburgrondarmtranbym\" +\n\t\"uenstermuginozawaonsenmuikamisunagawamukochikushinonsenergymulho\" +\n\t\"uservebeermunakatanemuncieszynmuosattemuphonefosshowamurmanskoma\" +\n\t\"kiyosunndalmurotorcraftrentino-stirolmusashimurayamatsuzakis-sav\" +\n\t\"edmusashinoharamuseetrentino-sud-tirolmuseumverenigingmusicargod\" +\n\t\"addynaliascoli-picenogataijis-slickharkivgucciprianiigataishinom\" +\n\t\"akinderoymutsuzawamy-vigorlicemy-wanggouvicenzamyactivedirectory\" +\n\t\"myasustor-elvdalmycdn77-securecifedexhibitionmyddnskingmydissent\" +\n\t\"rentino-sudtirolmydrobofagemydshowtimemorialmyeffectrentino-sued\" +\n\t\"-tirolmyfirewallonieruchomoscienceandindustrynmyfritzmyftpaccess\" +\n\t\"hriramsterdamnserverbaniamyfusionmyhome-serversaillesienarashino\" +\n\t\"mykolaivaolbia-tempio-olbiatempioolbialystokkepnoduminamiuonumat\" +\n\t\"sumotofukemymailermymediapchristmasakimobetsuliguriamyokohamamat\" +\n\t\"sudamypephotographysiomypetsigdalmyphotoshibajddarchitecturealty\" +\n\t\"dalipaymypsxn--32vp30hagebostadmysecuritycamerakermyshopblocksil\" +\n\t\"komatsushimashikizunokunimihoboleslawiechonanbuilderschmidtre-ga\" +\n\t\"uldalukowhalingroks-thisayamanobeokalmykiamytis-a-bloggermytulea\" +\n\t\"piagetmyipictetrentino-suedtirolmyvnchromedicaltanissettairamywi\" +\n\t\"reitrentinoa-adigepinkomforbarclays3-us-east-2pioneerpippupictur\" +\n\t\"esimple-urlpiszpittsburghofauskedsmokorsetagayasells-for-usgarde\" +\n\t\"npiwatepixolinopizzapkommunalforbundplanetariuminamiyamashirokaw\" +\n\t\"anabelembetsukubanklabudhabikinokawabarthaebaruminamimakis-a-pai\" +\n\t\"nteractivegarsheis-a-patsfanplantationplantslingplatformshangril\" +\n\t\"anslupskommuneplaystationplazaplchryslerplumbingopmnpodzonepohlp\" +\n\t\"oivronpokerpokrovskomonopolitiendapolkowicepoltavalle-aostarostw\" +\n\t\"odzislawinnersnoasaitamatsukuris-uberleetrdpomorzeszowiosokaneya\" +\n\t\"mazoepordenonepornporsangerporsanguidell-ogliastraderporsgrunnan\" +\n\t\"poznanpraxis-a-bookkeeperugiaprdpreservationpresidioprgmrprimelh\" +\n\t\"uscultureisenprincipeprivatizehealthinsuranceprochowiceproductio\" +\n\t\"nsokndalprofbsbxn--12co0c3b4evalleaostaticschuleprogressivegasia\" +\n\t\"promombetsurfbx-oschwarzgwangjuifminamidaitomangotsukisofukushim\" +\n\t\"aparocherkasyno-dschweizpropertyprotectionprotonetrentinoaadigep\" +\n\t\"rudentialpruszkowitdkomorotsukamisatokamachintaifun-dnsaliasdabu\" +\n\t\"rprzeworskogptplusdecorativeartsolarssonpvtrentinoalto-adigepwch\" +\n\t\"ungnamdalseidfjordyndns-weberlincolniyodogawapzqldqponqslgbtrent\" +\n\t\"inoaltoadigequicksytesolognequipelementsolundbeckomvuxn--2scrj9c\" +\n\t\"hoseiroumuenchenissandnessjoenissayokoshibahikariwanumatakazakis\" +\n\t\"-a-greenissedaluroyqvchurchaseljeepsongdalenviknagatorodoystufft\" +\n\t\"oread-booksnesomnaritakurashikis-very-badajozorastuttgartrentino\" +\n\t\"sudtirolsusakis-very-evillagesusonosuzakaniepcesuzukanmakiwakuni\" +\n\t\"gamidsundsuzukis-very-goodhandsonsvalbardunloppacificirclegnicaf\" +\n\t\"ederationsveiosvelvikongsvingersvizzerasvn-reposooswedenswidnica\" +\n\t\"rtierswiebodzindianapolis-a-anarchistoireggiocalabriaswiftcovers\" +\n\t\"winoujscienceandhistoryswisshikis-very-nicesynology-dsopotrentin\" +\n\t\"os-tirolturystykanoyaltakasakiwientuscanytushuissier-justicetuva\" +\n\t\"lle-daostatic-accessorreisahayakawakamiichikawamisatotaltuxfamil\" +\n\t\"ytwmailvbargainstitutelevisionaustdalimanowarudaustevollavangena\" +\n\t\"turbruksgymnaturhistorisches3-eu-west-1venneslaskerrylogisticsor\" +\n\t\"tlandvestfoldvestnesoruminanovestre-slidreamhostersouthcarolinaz\" +\n\t\"awavestre-totennishiawakuravestvagoyvevelstadvibo-valentiavibova\" +\n\t\"lentiavideovillaskimitsubatamicable-modemoneyvinnicartoonartdeco\" +\n\t\"ffeedbackplaneapplinzis-very-sweetpeppervinnytsiavipsinaappilots\" +\n\t\"irdalvirginiavirtualvirtueeldomeindianmarketingvirtuelvisakataki\" +\n\t\"nouevistaprinternationalfirearmsouthwestfalenviterboltrevisohugh\" +\n\t\"esor-odalvivoldavixn--3bst00mincommbankmpspbarclaycards3-sa-east\" +\n\t\"-1vlaanderenvladikavkazimierz-dolnyvladimirvlogoipimientaketomis\" +\n\t\"atolgavolkswagentsowavologdanskonskowolawavolvolkenkundenvolyngd\" +\n\t\"alvossevangenvotevotingvotoyonakagyokutourspjelkavikongsbergwloc\" +\n\t\"lawekonsulatrobeepilepsydneywmflabspreadbettingworldworse-thanda\" +\n\t\"wowithgoogleapisa-hockeynutsiracusakakinokiawpdevcloudwritesthis\" +\n\t\"blogsytewroclawithyoutubeneventoeidsvollwtcircustomerwtfbxoscien\" +\n\t\"cecentersciencehistorywuozuwwwiwatsukiyonowruzhgorodeowzmiuwajim\" +\n\t\"axn--42c2d9axn--45br5cylxn--45brj9citadeliveryxn--45q11citicatho\" +\n\t\"licheltenham-radio-opencraftrainingripescaravantaaxn--4gbriminin\" +\n\t\"gxn--4it168dxn--4it797kooris-an-actorxn--4pvxs4allxn--54b7fta0cc\" +\n\t\"ivilaviationxn--55qw42gxn--55qx5dxn--5js045dxn--5rtp49civilisati\" +\n\t\"onxn--5rtq34kopervikhmelnitskiyamashikexn--5su34j936bgsgxn--5tzm\" +\n\t\"5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264civili\" +\n\t\"zationxn--80adxhkspydebergxn--80ao21axn--80aqecdr1axn--80asehdba\" +\n\t\"rreauctionaval-d-aosta-valleyolasiteu-2xn--80aswgxn--80audnedaln\" +\n\t\"xn--8ltr62koryokamikawanehonbetsurutaharaxn--8pvr4uxn--8y0a063ax\" +\n\t\"n--90a3academy-firewall-gatewayxn--90aeroportalaheadjudaicaaarbo\" +\n\t\"rteaches-yogasawaracingroks-theatreexn--90aishobaraomoriguchihar\" +\n\t\"ahkkeravjuedischesapeakebayernrtritonxn--90azhytomyrxn--9dbhblg6\" +\n\t\"dietcimdbarrel-of-knowledgemologicallimitediscountysvardolls3-us\" +\n\t\"-gov-west-1xn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aropor\" +\n\t\"t-byandexn--3ds443gxn--asky-iraxn--aurskog-hland-jnbarrell-of-kn\" +\n\t\"owledgeologyombondiscoveryomitanobninskarasjohkaminokawanishiaiz\" +\n\t\"ubangeu-3utilitiesquare7xn--avery-yuasakegawaxn--b-5gaxn--b4w605\" +\n\t\"ferdxn--bck1b9a5dre4civilwarmanagementjxn--0trq7p7nnxn--bdddj-mr\" +\n\t\"abdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccav\" +\n\t\"uotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyaotsu\" +\n\t\"rreyxn--bjddar-ptamayufuettertdasnetzxn--blt-elabourxn--bmlo-gra\" +\n\t\"ingerxn--bod-2naroyxn--brnny-wuaccident-investigation-aptiblease\" +\n\t\"ating-organicbcn-north-1xn--brnnysund-m8accident-prevention-webh\" +\n\t\"openairbusantiquest-a-la-maisondre-landebudapest-a-la-masionionj\" +\n\t\"ukudoyamagentositelekommunikationthewifiat-band-campaniaxn--brum\" +\n\t\"-voagatroandinosaurepbodynathomebuiltrentinosued-tirolxn--btsfjo\" +\n\t\"rd-9zaxn--c1avgxn--c2br7gxn--c3s14minnesotaketakatsukis-into-car\" +\n\t\"shiranukanagawaxn--cck2b3barsyonlinewhollandishakotanavigationav\" +\n\t\"oibmdisrechtranakaiwamizawaweddingjesdalimoliserniaustinnatuurwe\" +\n\t\"tenschappenaumburgjerdrumckinseyokosukanzakiyokawaragrocerybnika\" +\n\t\"hokutobamaintenancebetsuikicks-assedic66xn--cg4bkis-with-theband\" +\n\t\"ovre-eikerxn--ciqpnxn--clchc0ea0b2g2a9gcdn77-sslattumintelligenc\" +\n\t\"exn--comunicaes-v6a2oxn--correios-e-telecomunicaes-ghc29axn--czr\" +\n\t\"694bashkiriaustraliaisondriodejaneirochesterxn--czrs0trogstadxn-\" +\n\t\"-czru2dxn--czrw28basilicataniaustrheimatunduhrennesoyokotebinore\" +\n\t\"-og-uvdalaziobiraskvolloabathsbcasacamdvrcampobassociatestingjem\" +\n\t\"nes3-ap-southeast-1xn--d1acj3basketballyngenavuotnaklodzkodairau\" +\n\t\"thordalandroiddnss3-eu-west-2xn--d1alfaromeoxn--d1atromsaitomobe\" +\n\t\"llevuelosangelesjaguarmeniaxn--d5qv7z876claimsardiniaxn--davvenj\" +\n\t\"rga-y4axn--djrs72d6uyxn--djty4kosaigawaxn--dnna-grajewolterskluw\" +\n\t\"erxn--drbak-wuaxn--dyry-iraxn--e1a4clanbibaidarq-axn--eckvdtc9dx\" +\n\t\"n--efvn9srlxn--efvy88haibarakisosakitagawaxn--ehqz56nxn--elqq16h\" +\n\t\"air-surveillancexn--estv75gxn--eveni-0qa01gaxn--f6qx53axn--fct42\" +\n\t\"9kosakaerodromegallupinbarefootballfinanzgoraurskog-holandroverh\" +\n\t\"alla-speziaetnagahamaroygardenebakkeshibechambagriculturennebude\" +\n\t\"jjudygarlandd-dnshome-webservercellikes-piedmontblancomeeres3-ap\" +\n\t\"-south-1kappchizippodhaleangaviikadenadexetereport3l3p0rtargets-\" +\n\t\"itargivestbytomaritimobaravennagasuke12hpalace164lima-cityeatsel\" +\n\t\"inogradultarnobrzegyptianativeamericanantiques3-ap-northeast-133\" +\n\t\"7xn--fhbeiarnxn--finny-yuaxn--fiq228c5hsrtrentinostirolxn--fiq64\" +\n\t\"batodayonagoyautomotivecoalvdalaskanittedallasalleasinglesurance\" +\n\t\"rtmgretagajoboji234xn--fiqs8srvaporcloudxn--fiqz9storagexn--fjor\" +\n\t\"d-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--fpcrj9c3dxn\" +\n\t\"--frde-grandrapidstordalxn--frna-woaraisaijotromsojampagefrontap\" +\n\t\"piemontexn--frya-hraxn--fzc2c9e2cldmailuxembourgrongaxn--fzys8d6\" +\n\t\"9uvgmailxn--g2xx48clickasumigaurawa-mazowszextraspacekitagatajir\" +\n\t\"issagaeroclubmedecincinnationwidealstahaugesunderseaportsinfolld\" +\n\t\"alabamagasakishimabarackmazerbaijan-mayendoftheinternetflixilove\" +\n\t\"collegefantasyleaguernseyxn--gckr3f0fedorapeopleirfjordynvpncher\" +\n\t\"nivtsiciliaxn--gecrj9clinichernigovernmentjometacentruminamiawaj\" +\n\t\"ikis-a-doctorayxn--ggaviika-8ya47hakatanoshiroomuraxn--gildeskl-\" +\n\t\"g0axn--givuotna-8yasakaiminatoyonezawaxn--gjvik-wuaxn--gk3at1exn\" +\n\t\"--gls-elacaixaxn--gmq050isleofmandalxn--gmqw5axn--h-2failxn--h1a\" +\n\t\"eghakodatexn--h2breg3evenestorepaircraftrentinosud-tirolxn--h2br\" +\n\t\"j9c8cliniquenoharaxn--h3cuzk1digitalxn--hbmer-xqaxn--hcesuolo-7y\" +\n\t\"a35batsfjordivtasvuodnakamagayahababyglandivttasvuotnakamurataji\" +\n\t\"mibuildingjovikarasjokarasuyamarylhurstjohnayorovnoceanographics\" +\n\t\"3-us-west-1xn--hery-iraxn--hgebostad-g3axn--hmmrfeasta-s4acctrus\" +\n\t\"teexn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hx\" +\n\t\"t814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr513nxn\" +\n\t\"--indery-fyasugivingxn--io0a7issmarterthanyouxn--j1aefedoraproje\" +\n\t\"ctoyotomiyazakis-a-knightpointtokaizukamikoaniikappugliaxn--j1am\" +\n\t\"hakonexn--j6w193gxn--jlq61u9w7bauhausposts-and-telecommunication\" +\n\t\"sncfdiyonaguniversityoriikarateu-4xn--jlster-byasuokanraxn--jrpe\" +\n\t\"land-54axn--jvr189misakis-into-cartoonshiraois-a-techietis-a-the\" +\n\t\"rapistoiaxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kf\" +\n\t\"jord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--\" +\n\t\"3e0b707exn--koluokta-7ya57hakubaghdadxn--kprw13dxn--kpry57dxn--k\" +\n\t\"pu716fermodalenxn--kput3iwchofunatoriginsurecreationishiwakis-a-\" +\n\t\"geekashiwazakiyosatokashikiyosemitexn--krager-gyatomitamamuraxn-\" +\n\t\"-kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49j\" +\n\t\"elenia-goraxn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyatsukanumazu\" +\n\t\"ryxn--kvnangen-k0axn--l-1fairwindstorfjordxn--l1accentureklambor\" +\n\t\"ghiniizaxn--laheadju-7yatsushiroxn--langevg-jxaxn--lcvr32dxn--ld\" +\n\t\"ingen-q1axn--leagaviika-52bbcasertaipeiheijiitatebayashiibahcavu\" +\n\t\"otnagaraholtalenvironmentalconservationflfanfshostrowiecasinordl\" +\n\t\"andnpalermomahachijorpelandrangedalindashorokanaieverbankaratsug\" +\n\t\"inamikatagamiharuconnectashkentatamotors3-us-west-2xn--lesund-hu\" +\n\t\"axn--lgbbat1ad8jeonnamerikawauexn--lgrd-poaclintonoshoesarluxury\" +\n\t\"xn--lhppi-xqaxn--linds-pramericanartrvareserveblogspotrentinosue\" +\n\t\"dtirolxn--lns-qlapyatigorskypexn--loabt-0qaxn--lrdal-sraxn--lren\" +\n\t\"skog-54axn--lt-liaclothingdustkakamigaharaxn--lten-granexn--lury\" +\n\t\"-iraxn--m3ch0j3axn--mely-iraxn--merker-kuaxn--mgb2ddestorjdevclo\" +\n\t\"udfrontdoorxn--mgb9awbferraraxn--mgba3a3ejtrysiljanxn--mgba3a4f1\" +\n\t\"6axn--mgba3a4franamizuholdingsmilelverumisasaguris-into-gamessin\" +\n\t\"atsukigatakasagotembaixadaxn--mgba7c0bbn0axn--mgbaakc7dvferrarit\" +\n\t\"togoldpoint2thisamitsukexn--mgbaam7a8hakuis-a-personaltrainerxn-\" +\n\t\"-mgbab2bdxn--mgbai9a5eva00bbtatarantottoriiyamanouchikuhokuryuga\" +\n\t\"sakitaurayasudautoscanadaejeonbukaragandasnesoddenmarkhangelskja\" +\n\t\"kdnepropetrovskiervaapsteiermark12xn--mgbai9azgqp6jetztrentino-a\" +\n\t\"-adigexn--mgbayh7gpagespeedmobilizeroxn--mgbb9fbpobanazawaxn--mg\" +\n\t\"bbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgberp4a5d4a87gxn--mgbe\" +\n\t\"rp4a5d4arxn--mgbgu82axn--mgbi4ecexposedxn--mgbpl2fhskodjejuegosh\" +\n\t\"ikiminokamoenairportland-4-salernoboribetsuckstpetersburgxn--mgb\" +\n\t\"qly7c0a67fbcnsarpsborgrossetouchijiwadegreexn--mgbqly7cvafranzis\" +\n\t\"kanerdpolicexn--mgbt3dhdxn--mgbtf8flatangerxn--mgbtx2bbvacations\" +\n\t\"watch-and-clockerxn--mgbx4cd0abbottulanxessor-varangerxn--mix082\" +\n\t\"ferreroticanonoichinomiyakexn--mix891fetsundyroyrvikinguitarscho\" +\n\t\"larshipschoolxn--mjndalen-64axn--mk0axindustriesteamfamberkeleyx\" +\n\t\"n--mk1bu44cntkmaxxn--11b4c3dyndns-wikinkobayashikaoirminamibosog\" +\n\t\"ndaluzernxn--mkru45ixn--mlatvuopmi-s4axn--mli-tlaquilanciaxn--ml\" +\n\t\"selv-iuaxn--moreke-juaxn--mori-qsakuhokkaidoomdnsiskinkyotobetsu\" +\n\t\"midatlanticolognextdirectmparaglidingroundhandlingroznyxn--mosje\" +\n\t\"n-eyawaraxn--mot-tlarvikoseis-an-actresshirakofuefukihaboromskog\" +\n\t\"xn--mre-og-romsdal-qqbentleyoshiokaracoldwarmiamihamadaveroykeni\" +\n\t\"waizumiotsukuibestadds3-external-1xn--msy-ula0hakusandiegoodyear\" +\n\t\"xn--mtta-vrjjat-k7afamilycompanycolonialwilliamsburgrparisor-fro\" +\n\t\"nxn--muost-0qaxn--mxtq1misawaxn--ngbc5azdxn--ngbe9e0axn--ngbrxn-\" +\n\t\"-3hcrj9cistrondheimmobilienxn--nit225kosherbrookegawaxn--nmesjev\" +\n\t\"uemie-tcbalestrandabergamoarekexn--nnx388axn--nodessakuragawaxn-\" +\n\t\"-nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-bya\" +\n\t\"eservecounterstrikexn--nvuotna-hwaxn--nyqy26axn--o1achattanoogan\" +\n\t\"ordre-landxn--o3cw4haldenxn--o3cyx2axn--od0algxn--od0aq3beppubli\" +\n\t\"shproxyzgorzeleccollectionhlfanhs3-website-ap-northeast-1xn--ogb\" +\n\t\"pf8flekkefjordxn--oppegrd-ixaxn--ostery-fyawatahamaxn--osyro-wua\" +\n\t\"xn--p1acfgujolsterxn--p1aixn--pbt977coloradoplateaudioxn--pgbs0d\" +\n\t\"hlxn--porsgu-sta26fhvalerxn--pssu33lxn--pssy2uxn--q9jyb4columbus\" +\n\t\"heyxn--qcka1pmcdonaldstreamuneuesolutionsomaxn--qqqt11misconfuse\" +\n\t\"dxn--qxamusementunesorfoldxn--rady-iraxn--rdal-poaxn--rde-ulavag\" +\n\t\"iskexn--rdy-0nabarixn--rennesy-v1axn--rhkkervju-01aflakstadaokag\" +\n\t\"akibichuoxn--rholt-mragowoodsideltaitogliattirestudioxn--rhqv96g\" +\n\t\"xn--rht27zxn--rht3dxn--rht61exn--risa-5narusawaxn--risr-iraxn--r\" +\n\t\"land-uuaxn--rlingen-mxaxn--rmskog-byaxn--rny31halsaikitahatakama\" +\n\t\"tsukawaxn--rovu88bernuorockartuzyukinfinitintuitateshinanomachim\" +\n\t\"kentateyamavocatanzarowebspacebizenakanojohanamakinoharassnasaba\" +\n\t\"erobatickets3-ap-southeast-2xn--rros-granvindafjordxn--rskog-uua\" +\n\t\"xn--rst-0narutokyotangovtunkoninjamisonxn--rsta-francaiseharaxn-\" +\n\t\"-rvc1e0am3exn--ryken-vuaxn--ryrvik-byaxn--s-1faithruheredumbrell\" +\n\t\"ajollamericanexpressexyxn--s9brj9communitysnesarufutsunomiyawaka\" +\n\t\"saikaitakoelnxn--sandnessjen-ogbizxn--sandy-yuaxn--seral-lraxn--\" +\n\t\"ses554gxn--sgne-gratangenxn--skierv-utazaskoyabearalvahkijobserv\" +\n\t\"erisignieznoipifonymishimatsunoxn--skjervy-v1axn--skjk-soaxn--sk\" +\n\t\"nit-yqaxn--sknland-fxaxn--slat-5narviikamitondabayashiogamagoriz\" +\n\t\"iaxn--slt-elabbvieeexn--smla-hraxn--smna-gratis-a-bulls-fanxn--s\" +\n\t\"nase-nraxn--sndre-land-0cbremangerxn--snes-poaxn--snsa-roaxn--sr\" +\n\t\"-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbeski\" +\n\t\"dyn-o-saurlandes3-website-ap-southeast-1xn--srfold-byaxn--srreis\" +\n\t\"a-q1axn--srum-grazxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalse\" +\n\t\"n-sqbestbuyshouses3-website-ap-southeast-2xn--stre-toten-zcbstud\" +\n\t\"yndns-at-homedepotenzamamicrolightingxn--t60b56axn--tckweatherch\" +\n\t\"annelxn--tiq49xqyjevnakershuscountryestateofdelawarezzoologyxn--\" +\n\t\"tjme-hraxn--tn0agrinet-freakstuff-4-salexn--tnsberg-q1axn--tor13\" +\n\t\"1oxn--trany-yuaxn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr\" +\n\t\"-vraxn--uc0atvarggatrentoyokawaxn--uc0ay4axn--uist22hammarfeasta\" +\n\t\"fricapetownnews-stagingxn--uisz3gxn--unjrga-rtaobaokinawashirosa\" +\n\t\"tochiokinoshimalatvuopmiasakuchinotsuchiurakawalesundxn--unup4yx\" +\n\t\"n--uuwu58axn--vads-jraxn--vard-jraxn--vegrshei-c0axn--vermgensbe\" +\n\t\"rater-ctbetainaboxfusejnynysadodgeometre-experts-comptables3-web\" +\n\t\"site-eu-west-1xn--vermgensberatung-pwbieigersundray-dnsupdaterno\" +\n\t\"pilawavoues3-fips-us-gov-west-1xn--vestvgy-ixa6oxn--vg-yiabcgxn-\" +\n\t\"-vgan-qoaxn--vgsy-qoa0jewelryxn--vgu402comobilyxn--vhquvaroyxn--\" +\n\t\"vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bi\" +\n\t\"elawalmartatsunoceanographiquevje-og-hornnes3-website-sa-east-1x\" +\n\t\"n--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1comparemarkerr\" +\n\t\"yhotelsasayamaxn--wgbl6axn--xhq521biellaakesvuemieleccexn--xkc2a\" +\n\t\"l3hye2axn--xkc2dl3a5ee0hamurakamigoris-a-photographerokuappfizer\" +\n\t\"xn--y9a3aquariumissilewismillerxn--yer-znarvikoshimizumakis-an-a\" +\n\t\"narchistoricalsocietyxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn-\" +\n\t\"-3oq18vl8pn36axn--ystre-slidre-ujbieszczadygeyachimataikikuchiku\" +\n\t\"seikarugamvikareliancexn--zbx025dxn--zf0ao64axn--zf0avxn--3pxu8k\" +\n\t\"onyveloftrentino-aadigexn--zfr164bievatmallorcadaques3-website-u\" +\n\t\"s-east-1xperiaxz\"\n\n// nodes is the list of nodes. Each node is represented as a uint32, which\n// encodes the node's children, wildcard bit and node type (as an index into\n// the children array), ICANN bit and text.\n//\n// If the table was generated with the -comments flag, there is a //-comment\n// after each node's data. In it is the nodes-array indexes of the children,\n// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The\n// nodeType is printed as + for normal, ! for exception, and o for parent-only\n// nodes that have children but don't match a domain label in their own right.\n// An I denotes an ICANN domain.\n//\n// The layout within the uint32, from MSB to LSB, is:\n//\t[ 0 bits] unused\n//\t[10 bits] children index\n//\t[ 1 bits] ICANN bit\n//\t[15 bits] text index\n//\t[ 6 bits] text length\nvar nodes = [...]uint32{\n\t0x31fe83,\n\t0x28e944,\n\t0x2ed8c6,\n\t0x380743,\n\t0x380746,\n\t0x3a5306,\n\t0x3b5e43,\n\t0x30a7c4,\n\t0x20d0c7,\n\t0x2ed508,\n\t0x1a07102,\n\t0x31f1c7,\n\t0x368c09,\n\t0x2d68ca,\n\t0x2d68cb,\n\t0x238503,\n\t0x2dec46,\n\t0x23d6c5,\n\t0x1e07542,\n\t0x21cf84,\n\t0x266d03,\n\t0x346145,\n\t0x22035c2,\n\t0x20a643,\n\t0x271f944,\n\t0x342285,\n\t0x2a10042,\n\t0x38a48e,\n\t0x255083,\n\t0x3affc6,\n\t0x2e00142,\n\t0x2d4207,\n\t0x240d86,\n\t0x3204f02,\n\t0x22ee43,\n\t0x256204,\n\t0x32d106,\n\t0x25b788,\n\t0x2811c6,\n\t0x378fc4,\n\t0x3600242,\n\t0x33b8c9,\n\t0x212107,\n\t0x2e6046,\n\t0x341809,\n\t0x2a0048,\n\t0x33a904,\n\t0x2a0f46,\n\t0x21f886,\n\t0x3a02d42,\n\t0x3a014f,\n\t0x28c84e,\n\t0x21bfc4,\n\t0x382c85,\n\t0x30a6c5,\n\t0x2e2109,\n\t0x249089,\n\t0x33b1c7,\n\t0x23f8c6,\n\t0x20ae43,\n\t0x3e01d42,\n\t0x2e3203,\n\t0x225d0a,\n\t0x20cac3,\n\t0x242f85,\n\t0x28e142,\n\t0x28e149,\n\t0x4200bc2,\n\t0x209204,\n\t0x28ad46,\n\t0x2e5c05,\n\t0x361644,\n\t0x4a1a344,\n\t0x203ec3,\n\t0x218d04,\n\t0x4e00702,\n\t0x2f8e84,\n\t0x52f5f04,\n\t0x339bca,\n\t0x5600f82,\n\t0x28bc47,\n\t0x281548,\n\t0x6206502,\n\t0x31d0c7,\n\t0x2c6d44,\n\t0x2c6d47,\n\t0x393c45,\n\t0x35e887,\n\t0x33af86,\n\t0x271dc4,\n\t0x378385,\n\t0x28ea47,\n\t0x72001c2,\n\t0x224143,\n\t0x200c42,\n\t0x200c43,\n\t0x760b5c2,\n\t0x20f4c5,\n\t0x7a01d02,\n\t0x357844,\n\t0x27e405,\n\t0x21bf07,\n\t0x25aece,\n\t0x2bf044,\n\t0x23df04,\n\t0x211c43,\n\t0x28a4c9,\n\t0x30eacb,\n\t0x2ea6c8,\n\t0x3415c8,\n\t0x306208,\n\t0x2b7288,\n\t0x33a74a,\n\t0x35e787,\n\t0x321606,\n\t0x7e8f282,\n\t0x36a683,\n\t0x377683,\n\t0x37fd44,\n\t0x3b5e83,\n\t0x32c343,\n\t0x1727e02,\n\t0x8203302,\n\t0x283f45,\n\t0x29e006,\n\t0x2da184,\n\t0x388547,\n\t0x2fa686,\n\t0x389384,\n\t0x3aa107,\n\t0x223d43,\n\t0x86cd5c2,\n\t0x8a0d342,\n\t0x8e1e642,\n\t0x21e646,\n\t0x9200002,\n\t0x2501c5,\n\t0x329343,\n\t0x201684,\n\t0x2efb04,\n\t0x2efb05,\n\t0x203c43,\n\t0x979c783,\n\t0x9a092c2,\n\t0x291d85,\n\t0x291d8b,\n\t0x343c06,\n\t0x21270b,\n\t0x226544,\n\t0x213a49,\n\t0x2148c4,\n\t0x9e14b02,\n\t0x215943,\n\t0x216283,\n\t0x1616b42,\n\t0x275fc3,\n\t0x216b4a,\n\t0xa201102,\n\t0x21d205,\n\t0x29a88a,\n\t0x2e0544,\n\t0x201103,\n\t0x325384,\n\t0x21ae03,\n\t0x21ae04,\n\t0x21ae07,\n\t0x21b605,\n\t0x21d685,\n\t0x21dc46,\n\t0x21dfc6,\n\t0x21ea43,\n\t0x222688,\n\t0x206c03,\n\t0xa60c702,\n\t0x245848,\n\t0x23614b,\n\t0x228908,\n\t0x228e06,\n\t0x229dc7,\n\t0x22da48,\n\t0xb6024c2,\n\t0xba430c2,\n\t0x32da08,\n\t0x233347,\n\t0x2e7b45,\n\t0x2e7b48,\n\t0x2c3b08,\n\t0x2be483,\n\t0x232e04,\n\t0x37fd82,\n\t0xbe34382,\n\t0xc23e102,\n\t0xca37302,\n\t0x237303,\n\t0xce01382,\n\t0x30a783,\n\t0x300f44,\n\t0x20a043,\n\t0x322844,\n\t0x20d7cb,\n\t0x2322c3,\n\t0x2e6a46,\n\t0x245f44,\n\t0x2982ce,\n\t0x381245,\n\t0x3b00c8,\n\t0x263347,\n\t0x26334a,\n\t0x22e803,\n\t0x317a07,\n\t0x30ec85,\n\t0x23a384,\n\t0x272706,\n\t0x272707,\n\t0x330f44,\n\t0x301f87,\n\t0x25a184,\n\t0x25b204,\n\t0x25b206,\n\t0x25f704,\n\t0x36bdc6,\n\t0x216983,\n\t0x233108,\n\t0x316ec8,\n\t0x23dec3,\n\t0x275f83,\n\t0x3a6604,\n\t0x3aae83,\n\t0xd235f42,\n\t0xd6df482,\n\t0x207143,\n\t0x203f86,\n\t0x2a1043,\n\t0x285184,\n\t0xda165c2,\n\t0x2165c3,\n\t0x35f083,\n\t0x21fe02,\n\t0xde008c2,\n\t0x2c9786,\n\t0x23e347,\n\t0x2fd645,\n\t0x38fd04,\n\t0x294d45,\n\t0x2f8a47,\n\t0x2add85,\n\t0x2e4689,\n\t0x2e9906,\n\t0x2ef808,\n\t0x2fd546,\n\t0xe20e982,\n\t0x2ddb08,\n\t0x300d06,\n\t0x219205,\n\t0x316887,\n\t0x316dc4,\n\t0x316dc5,\n\t0x281384,\n\t0x345d88,\n\t0xe6127c2,\n\t0xea04882,\n\t0x33ca06,\n\t0x2cf588,\n\t0x34d485,\n\t0x351546,\n\t0x356108,\n\t0x371488,\n\t0xee35dc5,\n\t0xf214f44,\n\t0x34e247,\n\t0xf614602,\n\t0xfa22902,\n\t0x10e0f882,\n\t0x28ae45,\n\t0x2aaa45,\n\t0x30af86,\n\t0x350007,\n\t0x386287,\n\t0x11638543,\n\t0x2b0307,\n\t0x30e7c8,\n\t0x3a0849,\n\t0x38a647,\n\t0x3b9c87,\n\t0x238788,\n\t0x238f86,\n\t0x239e86,\n\t0x23aacc,\n\t0x23c08a,\n\t0x23c407,\n\t0x23d58b,\n\t0x23e187,\n\t0x23e18e,\n\t0x19a3f304,\n\t0x240244,\n\t0x242547,\n\t0x3ac747,\n\t0x246d46,\n\t0x246d47,\n\t0x247407,\n\t0x19e29682,\n\t0x2495c6,\n\t0x2495ca,\n\t0x24a08b,\n\t0x24ac87,\n\t0x24b845,\n\t0x24bb83,\n\t0x24bdc6,\n\t0x24bdc7,\n\t0x20d283,\n\t0x1a206e02,\n\t0x24c78a,\n\t0x1a769d02,\n\t0x1aa4f282,\n\t0x1ae4dd42,\n\t0x1b240e82,\n\t0x24e9c5,\n\t0x24ef44,\n\t0x1ba1a442,\n\t0x2f8f05,\n\t0x24a683,\n\t0x2149c5,\n\t0x2b7184,\n\t0x205ec4,\n\t0x25a486,\n\t0x262586,\n\t0x291f83,\n\t0x204844,\n\t0x3894c3,\n\t0x1c204c82,\n\t0x210ac4,\n\t0x210ac6,\n\t0x34e7c5,\n\t0x37e946,\n\t0x316988,\n\t0x273544,\n\t0x266ac8,\n\t0x398785,\n\t0x22bc88,\n\t0x2b2dc6,\n\t0x26d907,\n\t0x233d84,\n\t0x233d86,\n\t0x242bc3,\n\t0x393fc3,\n\t0x211d08,\n\t0x322004,\n\t0x356747,\n\t0x20c7c6,\n\t0x2dedc9,\n\t0x322a88,\n\t0x325448,\n\t0x331ac4,\n\t0x35f103,\n\t0x229942,\n\t0x1d2234c2,\n\t0x1d61a202,\n\t0x36c083,\n\t0x1da08e02,\n\t0x20d204,\n\t0x3521c6,\n\t0x3b3745,\n\t0x24fa83,\n\t0x23cf44,\n\t0x2b95c7,\n\t0x25a783,\n\t0x251208,\n\t0x218405,\n\t0x264143,\n\t0x27e385,\n\t0x27e4c4,\n\t0x300a06,\n\t0x218f84,\n\t0x21ab86,\n\t0x21be46,\n\t0x210584,\n\t0x23e543,\n\t0x1de1a582,\n\t0x23dd05,\n\t0x20b9c3,\n\t0x1e20c882,\n\t0x23aa83,\n\t0x2231c5,\n\t0x23cac3,\n\t0x23cac9,\n\t0x1e606b82,\n\t0x1ee07842,\n\t0x2918c5,\n\t0x2211c6,\n\t0x2d9d46,\n\t0x2bb248,\n\t0x2bb24b,\n\t0x203fcb,\n\t0x220bc5,\n\t0x2fd845,\n\t0x2cdfc9,\n\t0x1600302,\n\t0x210748,\n\t0x213d44,\n\t0x1f601842,\n\t0x326403,\n\t0x1fecdd46,\n\t0x348e08,\n\t0x20208b42,\n\t0x2bdec8,\n\t0x2060c182,\n\t0x2bf7ca,\n\t0x20a3fd03,\n\t0x203606,\n\t0x36cc48,\n\t0x209708,\n\t0x3b3a46,\n\t0x37c807,\n\t0x3a0347,\n\t0x34daca,\n\t0x2e05c4,\n\t0x354d44,\n\t0x368649,\n\t0x2139fb45,\n\t0x28ca46,\n\t0x210083,\n\t0x253d44,\n\t0x2160df44,\n\t0x20df47,\n\t0x22c507,\n\t0x234404,\n\t0x2df805,\n\t0x30b048,\n\t0x375e07,\n\t0x381007,\n\t0x21a07602,\n\t0x32e984,\n\t0x29b188,\n\t0x2504c4,\n\t0x251844,\n\t0x251c45,\n\t0x251d87,\n\t0x222349,\n\t0x252a04,\n\t0x253149,\n\t0x253388,\n\t0x253ac4,\n\t0x253ac7,\n\t0x21e54003,\n\t0x254187,\n\t0x1609c42,\n\t0x16b4a42,\n\t0x254b86,\n\t0x2550c7,\n\t0x255584,\n\t0x257687,\n\t0x258d47,\n\t0x259983,\n\t0x2f6802,\n\t0x207d82,\n\t0x231683,\n\t0x231684,\n\t0x23168b,\n\t0x3416c8,\n\t0x263c84,\n\t0x25c985,\n\t0x25eb47,\n\t0x260105,\n\t0x2c8c0a,\n\t0x263bc3,\n\t0x22206b02,\n\t0x206b04,\n\t0x267189,\n\t0x26a743,\n\t0x26a807,\n\t0x373089,\n\t0x212508,\n\t0x2db543,\n\t0x282f07,\n\t0x283649,\n\t0x23d483,\n\t0x289844,\n\t0x28d209,\n\t0x290146,\n\t0x21c203,\n\t0x200182,\n\t0x264d83,\n\t0x2b4847,\n\t0x2c3e85,\n\t0x3413c6,\n\t0x259004,\n\t0x374e05,\n\t0x225cc3,\n\t0x20e646,\n\t0x213c42,\n\t0x3a1784,\n\t0x2260d382,\n\t0x226603,\n\t0x22a01802,\n\t0x251743,\n\t0x21e444,\n\t0x21e447,\n\t0x201986,\n\t0x20df02,\n\t0x22e0dec2,\n\t0x2c4244,\n\t0x23235182,\n\t0x23601b82,\n\t0x265704,\n\t0x265705,\n\t0x345105,\n\t0x35c386,\n\t0x23a074c2,\n\t0x2074c5,\n\t0x213005,\n\t0x2157c3,\n\t0x219d06,\n\t0x21a645,\n\t0x21e5c2,\n\t0x34d0c5,\n\t0x21e5c4,\n\t0x228203,\n\t0x22a443,\n\t0x23e11442,\n\t0x2dcf47,\n\t0x376084,\n\t0x376089,\n\t0x253c44,\n\t0x2357c3,\n\t0x300589,\n\t0x389e08,\n\t0x242aa8c4,\n\t0x2aa8c6,\n\t0x219983,\n\t0x25d3c3,\n\t0x323043,\n\t0x246eebc2,\n\t0x379b82,\n\t0x24a17202,\n\t0x32af48,\n\t0x358e08,\n\t0x3a5a46,\n\t0x2fd0c5,\n\t0x317885,\n\t0x333d07,\n\t0x2247c5,\n\t0x210642,\n\t0x24e04742,\n\t0x160a442,\n\t0x2447c8,\n\t0x2dda45,\n\t0x2bfbc4,\n\t0x2f2845,\n\t0x381d87,\n\t0x240944,\n\t0x24c682,\n\t0x25200582,\n\t0x33ffc4,\n\t0x21ca07,\n\t0x292507,\n\t0x35e844,\n\t0x29a843,\n\t0x23de04,\n\t0x23de08,\n\t0x23a1c6,\n\t0x27258a,\n\t0x222204,\n\t0x29abc8,\n\t0x290584,\n\t0x229ec6,\n\t0x29c484,\n\t0x28b146,\n\t0x376349,\n\t0x274847,\n\t0x241243,\n\t0x256351c2,\n\t0x2755c3,\n\t0x214d02,\n\t0x25a52e42,\n\t0x313486,\n\t0x374588,\n\t0x2ac047,\n\t0x3ab249,\n\t0x299f49,\n\t0x2acf05,\n\t0x2adec9,\n\t0x2ae685,\n\t0x2ae7c9,\n\t0x2afe45,\n\t0x2b11c8,\n\t0x25e0a104,\n\t0x26259ac7,\n\t0x2b13c3,\n\t0x2b13c7,\n\t0x3ba046,\n\t0x2b1a47,\n\t0x2a9b05,\n\t0x2a2cc3,\n\t0x26636d02,\n\t0x339704,\n\t0x26a42a42,\n\t0x266603,\n\t0x26e206c2,\n\t0x30df06,\n\t0x2814c5,\n\t0x2b3cc7,\n\t0x332043,\n\t0x32c2c4,\n\t0x217003,\n\t0x342c43,\n\t0x27205e82,\n\t0x27a0c442,\n\t0x3a5404,\n\t0x2f67c3,\n\t0x24e545,\n\t0x27e01c82,\n\t0x286007c2,\n\t0x2c8286,\n\t0x322144,\n\t0x38c444,\n\t0x38c44a,\n\t0x28e00942,\n\t0x38298a,\n\t0x39b8c8,\n\t0x29231604,\n\t0x2046c3,\n\t0x20d8c3,\n\t0x306349,\n\t0x25bd09,\n\t0x364986,\n\t0x29655783,\n\t0x335d45,\n\t0x30d2cd,\n\t0x39ba86,\n\t0x204f4b,\n\t0x29a02b02,\n\t0x225b48,\n\t0x2be22782,\n\t0x2c203e02,\n\t0x2b1685,\n\t0x2c604182,\n\t0x266847,\n\t0x21b987,\n\t0x20bf43,\n\t0x23b188,\n\t0x2ca02542,\n\t0x3780c4,\n\t0x21a8c3,\n\t0x348505,\n\t0x364603,\n\t0x33c406,\n\t0x212a84,\n\t0x275f43,\n\t0x2b6443,\n\t0x2ce09942,\n\t0x2fd7c4,\n\t0x379c85,\n\t0x3b6587,\n\t0x280003,\n\t0x2b5103,\n\t0x2b5c03,\n\t0x1631182,\n\t0x2b5cc3,\n\t0x2b63c3,\n\t0x2d2086c2,\n\t0x3a2e44,\n\t0x262786,\n\t0x34ba83,\n\t0x2086c3,\n\t0x2d6b8042,\n\t0x2b8048,\n\t0x2b8304,\n\t0x37ce46,\n\t0x2b8bc7,\n\t0x258346,\n\t0x2a0304,\n\t0x3b201702,\n\t0x3b9f0b,\n\t0x307c0e,\n\t0x221d4f,\n\t0x2ac5c3,\n\t0x3ba64d42,\n\t0x160b542,\n\t0x3be00a82,\n\t0x2e89c3,\n\t0x2e4903,\n\t0x2de046,\n\t0x207986,\n\t0x203007,\n\t0x304704,\n\t0x3c221302,\n\t0x3c618742,\n\t0x3a1205,\n\t0x2e7007,\n\t0x38c946,\n\t0x3ca28142,\n\t0x228144,\n\t0x2bc743,\n\t0x3ce09a02,\n\t0x3d366443,\n\t0x2bce04,\n\t0x2c5409,\n\t0x16cb602,\n\t0x3d605242,\n\t0x385d85,\n\t0x3dacb882,\n\t0x3de03582,\n\t0x3541c7,\n\t0x21b2c9,\n\t0x368e8b,\n\t0x3a0105,\n\t0x2714c9,\n\t0x384d06,\n\t0x343c47,\n\t0x3e206844,\n\t0x341d89,\n\t0x380907,\n\t0x348ac7,\n\t0x2122c3,\n\t0x2122c6,\n\t0x312247,\n\t0x263a43,\n\t0x263a46,\n\t0x3ea01cc2,\n\t0x3ee022c2,\n\t0x22bf03,\n\t0x32bec5,\n\t0x25a007,\n\t0x227906,\n\t0x2c3e05,\n\t0x207a84,\n\t0x28ddc5,\n\t0x2fae04,\n\t0x3f204bc2,\n\t0x337447,\n\t0x2ca604,\n\t0x24f3c4,\n\t0x25bc0d,\n\t0x25d749,\n\t0x3ab748,\n\t0x25e044,\n\t0x234a85,\n\t0x322907,\n\t0x3329c4,\n\t0x2fa747,\n\t0x204bc5,\n\t0x3f6ac504,\n\t0x2b5e05,\n\t0x269404,\n\t0x256fc6,\n\t0x34fe05,\n\t0x3fa048c2,\n\t0x2011c4,\n\t0x2011c5,\n\t0x3802c6,\n\t0x206d85,\n\t0x3c0144,\n\t0x2cda83,\n\t0x208d46,\n\t0x222545,\n\t0x22b605,\n\t0x34ff04,\n\t0x222283,\n\t0x22228c,\n\t0x3fe90a82,\n\t0x40206702,\n\t0x40600282,\n\t0x211a83,\n\t0x211a84,\n\t0x40a02942,\n\t0x2fba48,\n\t0x341485,\n\t0x34c984,\n\t0x36ee86,\n\t0x40e0d842,\n\t0x41234502,\n\t0x41601fc2,\n\t0x2a6a85,\n\t0x210446,\n\t0x226144,\n\t0x32d646,\n\t0x28ba06,\n\t0x215c83,\n\t0x41b2770a,\n\t0x2f6b05,\n\t0x2f6fc3,\n\t0x22a9c6,\n\t0x30c989,\n\t0x22a9c7,\n\t0x29f648,\n\t0x29ff09,\n\t0x241b08,\n\t0x22e546,\n\t0x209b03,\n\t0x41e0c202,\n\t0x395343,\n\t0x395349,\n\t0x333608,\n\t0x42253442,\n\t0x42604a82,\n\t0x229443,\n\t0x2e4505,\n\t0x25c404,\n\t0x2c9ec9,\n\t0x26eb44,\n\t0x2e0908,\n\t0x2050c3,\n\t0x20dc44,\n\t0x2acd03,\n\t0x221208,\n\t0x25bb47,\n\t0x42e281c2,\n\t0x270d02,\n\t0x388b05,\n\t0x272dc9,\n\t0x28cac3,\n\t0x284bc4,\n\t0x335d04,\n\t0x227543,\n\t0x28580a,\n\t0x43382842,\n\t0x43601182,\n\t0x2cd543,\n\t0x384f83,\n\t0x160dc02,\n\t0x20ffc3,\n\t0x43a14702,\n\t0x43e00802,\n\t0x4420f644,\n\t0x20f646,\n\t0x3b6a46,\n\t0x248c44,\n\t0x37d243,\n\t0x200803,\n\t0x2f60c3,\n\t0x24a406,\n\t0x30aa05,\n\t0x2cd6c7,\n\t0x343b09,\n\t0x2d2d85,\n\t0x2d3f46,\n\t0x2d4908,\n\t0x2d4b06,\n\t0x260ec4,\n\t0x2a1d8b,\n\t0x2d8403,\n\t0x2d8405,\n\t0x2d8548,\n\t0x22c2c2,\n\t0x3544c2,\n\t0x4464ea42,\n\t0x44a14642,\n\t0x221343,\n\t0x44e745c2,\n\t0x2745c3,\n\t0x2d8844,\n\t0x2d8e03,\n\t0x45605902,\n\t0x45a0c0c6,\n\t0x2af186,\n\t0x45edcac2,\n\t0x462162c2,\n\t0x4662a482,\n\t0x46a00e82,\n\t0x46e176c2,\n\t0x47202ec2,\n\t0x205383,\n\t0x344905,\n\t0x348206,\n\t0x4761bf84,\n\t0x34e5ca,\n\t0x20bd46,\n\t0x220e04,\n\t0x28a483,\n\t0x4820ea42,\n\t0x204d42,\n\t0x23d503,\n\t0x48608e83,\n\t0x2d8047,\n\t0x34fd07,\n\t0x49e31787,\n\t0x23fcc7,\n\t0x2309c3,\n\t0x33188a,\n\t0x263544,\n\t0x3863c4,\n\t0x3863ca,\n\t0x24b685,\n\t0x4a2190c2,\n\t0x254b43,\n\t0x4a601942,\n\t0x21b543,\n\t0x275583,\n\t0x4ae02b82,\n\t0x2b0284,\n\t0x2256c4,\n\t0x208105,\n\t0x39e745,\n\t0x2fc3c6,\n\t0x2fc746,\n\t0x4b206802,\n\t0x4b600982,\n\t0x3139c5,\n\t0x2aee92,\n\t0x259806,\n\t0x231483,\n\t0x315a06,\n\t0x231485,\n\t0x1616b82,\n\t0x53a17102,\n\t0x35fd43,\n\t0x217103,\n\t0x35d703,\n\t0x53e02c82,\n\t0x38a783,\n\t0x54205b82,\n\t0x20cc43,\n\t0x3a2e88,\n\t0x231e83,\n\t0x231e86,\n\t0x3b0c87,\n\t0x26c286,\n\t0x26c28b,\n\t0x220d47,\n\t0x339504,\n\t0x54a00e42,\n\t0x341305,\n\t0x54e08e43,\n\t0x2aec83,\n\t0x32de85,\n\t0x331783,\n\t0x55331786,\n\t0x2108ca,\n\t0x2488c3,\n\t0x240c44,\n\t0x2cf4c6,\n\t0x2364c6,\n\t0x55601a03,\n\t0x32c187,\n\t0x364887,\n\t0x2a3885,\n\t0x251046,\n\t0x222583,\n\t0x57619f43,\n\t0x57a0cb42,\n\t0x34bd44,\n\t0x22c24c,\n\t0x232f09,\n\t0x2445c7,\n\t0x38ad45,\n\t0x252c84,\n\t0x25e6c8,\n\t0x265d45,\n\t0x57e6c505,\n\t0x27b709,\n\t0x2e6103,\n\t0x24f204,\n\t0x5821cc82,\n\t0x221543,\n\t0x5869bf42,\n\t0x3bbe86,\n\t0x16235c2,\n\t0x58a35b42,\n\t0x2a6988,\n\t0x2ac343,\n\t0x2b5d47,\n\t0x2daa05,\n\t0x2e5205,\n\t0x2e520b,\n\t0x2e58c6,\n\t0x2e5406,\n\t0x2e9006,\n\t0x232b84,\n\t0x2e9246,\n\t0x58eeae88,\n\t0x246003,\n\t0x231a43,\n\t0x231a44,\n\t0x2ea484,\n\t0x2eab87,\n\t0x2ec3c5,\n\t0x592ec502,\n\t0x59607082,\n\t0x207085,\n\t0x295bc4,\n\t0x2ef38b,\n\t0x2efa08,\n\t0x2998c4,\n\t0x228182,\n\t0x59e99842,\n\t0x350e83,\n\t0x2efec4,\n\t0x2f0185,\n\t0x2f0607,\n\t0x2f2384,\n\t0x220c04,\n\t0x5a204102,\n\t0x36f5c9,\n\t0x2f3185,\n\t0x3a03c5,\n\t0x2f3e45,\n\t0x5a621483,\n\t0x2f4dc4,\n\t0x2f4dcb,\n\t0x2f5204,\n\t0x2f5c0b,\n\t0x2f6005,\n\t0x221e8a,\n\t0x2f7608,\n\t0x2f780a,\n\t0x2f7fc3,\n\t0x2f7fca,\n\t0x5aa33502,\n\t0x5ae2fa42,\n\t0x236903,\n\t0x5b2f9f02,\n\t0x2f9f03,\n\t0x5b71c482,\n\t0x5bb29ac2,\n\t0x2fac84,\n\t0x2227c6,\n\t0x32d385,\n\t0x2fd4c3,\n\t0x320446,\n\t0x317345,\n\t0x262a84,\n\t0x5be06b42,\n\t0x2ba844,\n\t0x2cdc4a,\n\t0x22fd07,\n\t0x2e5e86,\n\t0x2612c7,\n\t0x20c743,\n\t0x2bce48,\n\t0x39fd8b,\n\t0x230305,\n\t0x2f41c5,\n\t0x2f41c6,\n\t0x2ea004,\n\t0x3bf388,\n\t0x20e543,\n\t0x21f784,\n\t0x21f787,\n\t0x355746,\n\t0x344b06,\n\t0x29810a,\n\t0x250d44,\n\t0x250d4a,\n\t0x5c20c386,\n\t0x20c387,\n\t0x25ca07,\n\t0x27b0c4,\n\t0x27b0c9,\n\t0x262445,\n\t0x2439cb,\n\t0x2eef43,\n\t0x21ad43,\n\t0x5c625b03,\n\t0x23a584,\n\t0x5ca00482,\n\t0x2f70c6,\n\t0x5cea2a45,\n\t0x315c45,\n\t0x258586,\n\t0x352b04,\n\t0x5d2044c2,\n\t0x24bbc4,\n\t0x5d60b282,\n\t0x28b5c5,\n\t0x236c84,\n\t0x22cb43,\n\t0x5de17142,\n\t0x217143,\n\t0x273e86,\n\t0x5e204242,\n\t0x2241c8,\n\t0x22a844,\n\t0x22a846,\n\t0x204dc6,\n\t0x25ec04,\n\t0x208cc5,\n\t0x214e48,\n\t0x215647,\n\t0x2159c7,\n\t0x2159cf,\n\t0x29b086,\n\t0x22f483,\n\t0x22f484,\n\t0x36edc4,\n\t0x213103,\n\t0x22a004,\n\t0x2494c4,\n\t0x5e60fd02,\n\t0x291cc3,\n\t0x24bf43,\n\t0x5ea0d2c2,\n\t0x22f043,\n\t0x20d2c3,\n\t0x21d70a,\n\t0x2e7d07,\n\t0x381f0c,\n\t0x3821c6,\n\t0x2f5a86,\n\t0x2f6447,\n\t0x5ee0e947,\n\t0x252d49,\n\t0x245984,\n\t0x253e04,\n\t0x5f221382,\n\t0x5f600a02,\n\t0x2984c6,\n\t0x32bf84,\n\t0x2df606,\n\t0x239048,\n\t0x2bf2c4,\n\t0x266886,\n\t0x2d9d05,\n\t0x26e488,\n\t0x2041c3,\n\t0x26fd85,\n\t0x270b03,\n\t0x3a04c3,\n\t0x3a04c4,\n\t0x206ac3,\n\t0x5fa0e602,\n\t0x5fe00742,\n\t0x2eee09,\n\t0x273885,\n\t0x276bc4,\n\t0x27ab05,\n\t0x217e84,\n\t0x2c62c7,\n\t0x36ecc5,\n\t0x231944,\n\t0x231948,\n\t0x2d6206,\n\t0x2dac04,\n\t0x2e0788,\n\t0x2e1fc7,\n\t0x60202502,\n\t0x2e6f44,\n\t0x2131c4,\n\t0x348cc7,\n\t0x60602504,\n\t0x210f82,\n\t0x60a06742,\n\t0x227103,\n\t0x2dfc84,\n\t0x2b2143,\n\t0x370645,\n\t0x60e06d42,\n\t0x2eeac5,\n\t0x21b9c2,\n\t0x35c7c5,\n\t0x374745,\n\t0x61204d02,\n\t0x35f004,\n\t0x61606182,\n\t0x266d86,\n\t0x2a7806,\n\t0x272f08,\n\t0x2c7588,\n\t0x30de84,\n\t0x2f97c5,\n\t0x395809,\n\t0x2fd8c4,\n\t0x210884,\n\t0x208483,\n\t0x61a1f545,\n\t0x2cb6c7,\n\t0x28d004,\n\t0x31288d,\n\t0x332182,\n\t0x33f203,\n\t0x3479c3,\n\t0x61e00d02,\n\t0x397dc5,\n\t0x212cc7,\n\t0x23fd84,\n\t0x23fd87,\n\t0x2a0109,\n\t0x2cdd89,\n\t0x277e07,\n\t0x20f803,\n\t0x2ba348,\n\t0x2522c9,\n\t0x349c47,\n\t0x355685,\n\t0x395546,\n\t0x398bc6,\n\t0x3aaf05,\n\t0x25d845,\n\t0x62209142,\n\t0x37da45,\n\t0x2bad08,\n\t0x2c9546,\n\t0x626c0d47,\n\t0x2f6244,\n\t0x29bb07,\n\t0x300246,\n\t0x62a3b442,\n\t0x37ffc6,\n\t0x302d4a,\n\t0x3035c5,\n\t0x62ee6282,\n\t0x63260a02,\n\t0x312586,\n\t0x2b36c8,\n\t0x636926c7,\n\t0x63a04502,\n\t0x226783,\n\t0x36a846,\n\t0x22cf04,\n\t0x3b0b46,\n\t0x344e06,\n\t0x36d78a,\n\t0x377705,\n\t0x208806,\n\t0x2205c3,\n\t0x2205c4,\n\t0x203082,\n\t0x314a43,\n\t0x63e11ac2,\n\t0x2f8483,\n\t0x382c04,\n\t0x2b3804,\n\t0x2b380a,\n\t0x22e603,\n\t0x281288,\n\t0x22e60a,\n\t0x2b4247,\n\t0x309306,\n\t0x266c44,\n\t0x220cc2,\n\t0x228cc2,\n\t0x64207002,\n\t0x23ddc3,\n\t0x25c7c7,\n\t0x320707,\n\t0x28e8c4,\n\t0x39d147,\n\t0x2f0706,\n\t0x21e747,\n\t0x233484,\n\t0x398ac5,\n\t0x2ce485,\n\t0x6462be42,\n\t0x231146,\n\t0x327943,\n\t0x371742,\n\t0x383306,\n\t0x64a08bc2,\n\t0x64e05082,\n\t0x3c0985,\n\t0x6522a202,\n\t0x65604782,\n\t0x348085,\n\t0x39e345,\n\t0x2088c5,\n\t0x26f003,\n\t0x352285,\n\t0x2e5987,\n\t0x305cc5,\n\t0x311985,\n\t0x3b01c4,\n\t0x24d486,\n\t0x264544,\n\t0x65a00d42,\n\t0x666f2bc5,\n\t0x2ab647,\n\t0x3176c8,\n\t0x29f806,\n\t0x29f80d,\n\t0x2aac09,\n\t0x2aac12,\n\t0x359f05,\n\t0x36f8c3,\n\t0x66a08882,\n\t0x314544,\n\t0x39bb03,\n\t0x3963c5,\n\t0x304a45,\n\t0x66e1a902,\n\t0x264183,\n\t0x67231802,\n\t0x67a43242,\n\t0x67e1f342,\n\t0x2ed385,\n\t0x23fec3,\n\t0x36d408,\n\t0x68204382,\n\t0x686000c2,\n\t0x2b0246,\n\t0x35f2ca,\n\t0x205503,\n\t0x209f43,\n\t0x2ef103,\n\t0x69202642,\n\t0x77602cc2,\n\t0x77e0d582,\n\t0x206442,\n\t0x37fdc9,\n\t0x2caa44,\n\t0x23b488,\n\t0x782fd502,\n\t0x78603642,\n\t0x2f5e45,\n\t0x23d9c8,\n\t0x3a2fc8,\n\t0x25920c,\n\t0x22fac3,\n\t0x78a68dc2,\n\t0x78e0c402,\n\t0x2d3206,\n\t0x30a185,\n\t0x2a7b83,\n\t0x381c46,\n\t0x30a2c6,\n\t0x20d883,\n\t0x30bc43,\n\t0x30c146,\n\t0x30cd84,\n\t0x29d386,\n\t0x2d85c5,\n\t0x30d10a,\n\t0x2397c4,\n\t0x30e244,\n\t0x30f08a,\n\t0x79203442,\n\t0x2413c5,\n\t0x31018a,\n\t0x310a85,\n\t0x311344,\n\t0x311446,\n\t0x3115c4,\n\t0x221806,\n\t0x79611042,\n\t0x33c0c6,\n\t0x3b1b45,\n\t0x3b80c7,\n\t0x200206,\n\t0x2de844,\n\t0x2de847,\n\t0x327646,\n\t0x245345,\n\t0x245347,\n\t0x3abdc7,\n\t0x3abdce,\n\t0x232206,\n\t0x2fa605,\n\t0x202447,\n\t0x216303,\n\t0x3326c7,\n\t0x2172c5,\n\t0x21b0c4,\n\t0x2343c2,\n\t0x2432c7,\n\t0x304784,\n\t0x383884,\n\t0x270b8b,\n\t0x224e03,\n\t0x2d4c47,\n\t0x224e04,\n\t0x2f11c7,\n\t0x299543,\n\t0x33dd4d,\n\t0x398608,\n\t0x224604,\n\t0x231845,\n\t0x312bc5,\n\t0x313003,\n\t0x79a0c4c2,\n\t0x314a03,\n\t0x314d43,\n\t0x20f204,\n\t0x283745,\n\t0x22a4c7,\n\t0x220646,\n\t0x382943,\n\t0x38344b,\n\t0x259c8b,\n\t0x2ac9cb,\n\t0x2fbd4b,\n\t0x2c578a,\n\t0x30e48b,\n\t0x32420b,\n\t0x362f0c,\n\t0x38bf4b,\n\t0x3bdf51,\n\t0x3bfd8a,\n\t0x31604b,\n\t0x31630c,\n\t0x31660b,\n\t0x316b8a,\n\t0x317c8a,\n\t0x318c8e,\n\t0x31930b,\n\t0x3195ca,\n\t0x31a9d1,\n\t0x31ae0a,\n\t0x31b30b,\n\t0x31b84e,\n\t0x31c18c,\n\t0x31c68b,\n\t0x31c94e,\n\t0x31cccc,\n\t0x31d9ca,\n\t0x31eccc,\n\t0x79f1efca,\n\t0x31f7c8,\n\t0x320909,\n\t0x3232ca,\n\t0x32354a,\n\t0x3237cb,\n\t0x326d8e,\n\t0x327111,\n\t0x330189,\n\t0x3303ca,\n\t0x3313cb,\n\t0x334a0a,\n\t0x3354d6,\n\t0x336e4b,\n\t0x337b0a,\n\t0x337f4a,\n\t0x33a4cb,\n\t0x33b749,\n\t0x33e6c9,\n\t0x33ec8d,\n\t0x33f2cb,\n\t0x34040b,\n\t0x340dcb,\n\t0x347049,\n\t0x34768e,\n\t0x347dca,\n\t0x3494ca,\n\t0x349a0a,\n\t0x34a14b,\n\t0x34a98b,\n\t0x34ac4d,\n\t0x34c50d,\n\t0x34cd50,\n\t0x34d20b,\n\t0x35064c,\n\t0x3512cb,\n\t0x353ccb,\n\t0x35528e,\n\t0x355e0b,\n\t0x355e0d,\n\t0x35ae8b,\n\t0x35b90f,\n\t0x35bccb,\n\t0x35c50a,\n\t0x35cb49,\n\t0x35de09,\n\t0x35e18b,\n\t0x35e44e,\n\t0x36020b,\n\t0x361acf,\n\t0x36394b,\n\t0x363c0b,\n\t0x363ecb,\n\t0x3643ca,\n\t0x368a89,\n\t0x36e04f,\n\t0x372a8c,\n\t0x3732cc,\n\t0x37374e,\n\t0x373ccf,\n\t0x37408e,\n\t0x375690,\n\t0x375a8f,\n\t0x37660e,\n\t0x376f4c,\n\t0x377252,\n\t0x379891,\n\t0x37a18e,\n\t0x37a94e,\n\t0x37ae8e,\n\t0x37b20f,\n\t0x37b5ce,\n\t0x37b953,\n\t0x37be11,\n\t0x37c24c,\n\t0x37c54e,\n\t0x37c9cc,\n\t0x37de53,\n\t0x37ead0,\n\t0x37f30c,\n\t0x37f60c,\n\t0x37facb,\n\t0x38044e,\n\t0x380d8b,\n\t0x3816cb,\n\t0x382fcc,\n\t0x38b38a,\n\t0x38b74c,\n\t0x38ba4c,\n\t0x38bd49,\n\t0x38d7cb,\n\t0x38da88,\n\t0x38df49,\n\t0x38df4f,\n\t0x38f88b,\n\t0x7a39028a,\n\t0x391e4c,\n\t0x393009,\n\t0x393488,\n\t0x39368b,\n\t0x393d8b,\n\t0x39490a,\n\t0x394b8b,\n\t0x3950cc,\n\t0x396048,\n\t0x398d4b,\n\t0x39b1cb,\n\t0x39ef4e,\n\t0x3a05cb,\n\t0x3a1f0b,\n\t0x3ab94b,\n\t0x3abc09,\n\t0x3ac14d,\n\t0x3b1d4a,\n\t0x3b2c97,\n\t0x3b4398,\n\t0x3b6bc9,\n\t0x3b7d0b,\n\t0x3b8fd4,\n\t0x3b94cb,\n\t0x3b9a4a,\n\t0x3ba38a,\n\t0x3ba60b,\n\t0x3badd0,\n\t0x3bb1d1,\n\t0x3bc00a,\n\t0x3bd54d,\n\t0x3bdc4d,\n\t0x3c05cb,\n\t0x3c1206,\n\t0x231243,\n\t0x7a791143,\n\t0x26ed86,\n\t0x248805,\n\t0x22d287,\n\t0x3240c6,\n\t0x1608742,\n\t0x2c1fc9,\n\t0x320244,\n\t0x2e4d48,\n\t0x210943,\n\t0x314487,\n\t0x239202,\n\t0x2b3d03,\n\t0x7aa04542,\n\t0x2d0d06,\n\t0x2d2104,\n\t0x37a844,\n\t0x3443c3,\n\t0x3443c5,\n\t0x7b2cb8c2,\n\t0x7b6aeb44,\n\t0x27b007,\n\t0x7ba43282,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x28cac3,\n\t0x208e83,\n\t0x201a03,\n\t0x200e03,\n\t0x207102,\n\t0x16fb88,\n\t0x20f882,\n\t0x323043,\n\t0x28cac3,\n\t0x208e83,\n\t0xe03,\n\t0x201a03,\n\t0x215443,\n\t0x32b7d6,\n\t0x32ca13,\n\t0x39cfc9,\n\t0x34e148,\n\t0x341189,\n\t0x310306,\n\t0x340010,\n\t0x24c9d3,\n\t0x355808,\n\t0x2a0a87,\n\t0x37d347,\n\t0x28db0a,\n\t0x232309,\n\t0x3961c9,\n\t0x28664b,\n\t0x33af86,\n\t0x20728a,\n\t0x228e06,\n\t0x31fe43,\n\t0x2dce85,\n\t0x233108,\n\t0x266e4d,\n\t0x28af0c,\n\t0x218c87,\n\t0x318fcd,\n\t0x214f44,\n\t0x23a84a,\n\t0x23bbca,\n\t0x23c08a,\n\t0x24ccc7,\n\t0x246b87,\n\t0x24a904,\n\t0x233d86,\n\t0x209d44,\n\t0x2c7ec8,\n\t0x26eb89,\n\t0x2bb246,\n\t0x2bb248,\n\t0x24d18d,\n\t0x2cdfc9,\n\t0x209708,\n\t0x3a0347,\n\t0x300fca,\n\t0x2550c6,\n\t0x2664c7,\n\t0x2bd584,\n\t0x292347,\n\t0x35180a,\n\t0x38690e,\n\t0x2247c5,\n\t0x29224b,\n\t0x32f709,\n\t0x25bd09,\n\t0x21b7c7,\n\t0x2936ca,\n\t0x348c07,\n\t0x307d49,\n\t0x20b808,\n\t0x33420b,\n\t0x2e4505,\n\t0x3ab60a,\n\t0x2734c9,\n\t0x331d0a,\n\t0x2d2e0b,\n\t0x38668b,\n\t0x2863d5,\n\t0x30be85,\n\t0x3a03c5,\n\t0x2f4dca,\n\t0x364a8a,\n\t0x32f487,\n\t0x2252c3,\n\t0x298448,\n\t0x2db34a,\n\t0x22a846,\n\t0x252109,\n\t0x26e488,\n\t0x2dac04,\n\t0x2b2149,\n\t0x2c7588,\n\t0x2b2d07,\n\t0x2f2bc6,\n\t0x2ab647,\n\t0x376d87,\n\t0x24a205,\n\t0x22460c,\n\t0x231845,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x20f882,\n\t0x238543,\n\t0x208e83,\n\t0x200e03,\n\t0x201a03,\n\t0x238543,\n\t0x208e83,\n\t0xe03,\n\t0x231e83,\n\t0x201a03,\n\t0x16fb88,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x28cac3,\n\t0x208e83,\n\t0xe03,\n\t0x201a03,\n\t0x16fb88,\n\t0x20f882,\n\t0x201742,\n\t0x23c2c2,\n\t0x202542,\n\t0x200542,\n\t0x2e6dc2,\n\t0x4638543,\n\t0x23cac3,\n\t0x21b583,\n\t0x323043,\n\t0x255783,\n\t0x28cac3,\n\t0x2dcd86,\n\t0x208e83,\n\t0x201a03,\n\t0x20bdc3,\n\t0x16fb88,\n\t0x345b44,\n\t0x20da07,\n\t0x2112c3,\n\t0x2b1684,\n\t0x208543,\n\t0x21b843,\n\t0x323043,\n\t0x36dc7,\n\t0x145944,\n\t0xf183,\n\t0x145c05,\n\t0x207102,\n\t0x19c783,\n\t0x5a0f882,\n\t0x1490fc9,\n\t0x9144d,\n\t0x9178d,\n\t0x23c2c2,\n\t0x31604,\n\t0x145c49,\n\t0x200442,\n\t0x5f4ed48,\n\t0xf4544,\n\t0x16fb88,\n\t0x1409702,\n\t0x1510cc6,\n\t0x239283,\n\t0x2bcc43,\n\t0x6638543,\n\t0x23a844,\n\t0x6a3cac3,\n\t0x6f23043,\n\t0x205e82,\n\t0x231604,\n\t0x208e83,\n\t0x301dc3,\n\t0x2014c2,\n\t0x201a03,\n\t0x222dc2,\n\t0x2fabc3,\n\t0x204242,\n\t0x205983,\n\t0x26e543,\n\t0x200202,\n\t0x16fb88,\n\t0x239283,\n\t0x301dc3,\n\t0x2014c2,\n\t0x2fabc3,\n\t0x204242,\n\t0x205983,\n\t0x26e543,\n\t0x200202,\n\t0x2fabc3,\n\t0x204242,\n\t0x205983,\n\t0x26e543,\n\t0x200202,\n\t0x238543,\n\t0x39c783,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x231604,\n\t0x255783,\n\t0x28cac3,\n\t0x21bf84,\n\t0x208e83,\n\t0x201a03,\n\t0x20cb02,\n\t0x221483,\n\t0x16fb88,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x28cac3,\n\t0x208e83,\n\t0x201a03,\n\t0x39c783,\n\t0x20f882,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x231604,\n\t0x208e83,\n\t0x201a03,\n\t0x355685,\n\t0x21a902,\n\t0x207102,\n\t0x16fb88,\n\t0x1480cc8,\n\t0x323043,\n\t0x20fec1,\n\t0x201641,\n\t0x203c01,\n\t0x201301,\n\t0x267401,\n\t0x2ae601,\n\t0x211341,\n\t0x28a0c1,\n\t0x24dfc1,\n\t0x2fbf81,\n\t0x200141,\n\t0x200001,\n\t0x131645,\n\t0x16fb88,\n\t0x2008c1,\n\t0x201781,\n\t0x200301,\n\t0x200081,\n\t0x200181,\n\t0x200401,\n\t0x200041,\n\t0x2086c1,\n\t0x200101,\n\t0x200281,\n\t0x200801,\n\t0x200981,\n\t0x200441,\n\t0x204101,\n\t0x2227c1,\n\t0x200341,\n\t0x200741,\n\t0x2002c1,\n\t0x2000c1,\n\t0x203441,\n\t0x200201,\n\t0x200c81,\n\t0x2005c1,\n\t0x204541,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x20f882,\n\t0x238543,\n\t0x23cac3,\n\t0x200442,\n\t0x201a03,\n\t0x36dc7,\n\t0x8cbc7,\n\t0x24386,\n\t0x44f4a,\n\t0x906c8,\n\t0x5c288,\n\t0x5c6c7,\n\t0xffc6,\n\t0xe1d45,\n\t0x11205,\n\t0x86286,\n\t0x12cf06,\n\t0x286644,\n\t0x31cf87,\n\t0x16fb88,\n\t0x2de944,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x238543,\n\t0x23cac3,\n\t0x21b583,\n\t0x323043,\n\t0x255783,\n\t0x28cac3,\n\t0x208e83,\n\t0x201a03,\n\t0x21a902,\n\t0x2ba8c3,\n\t0x242043,\n\t0x2cc103,\n\t0x202d42,\n\t0x33eb43,\n\t0x203ec3,\n\t0x20fc03,\n\t0x200001,\n\t0x2ed0c5,\n\t0x203c43,\n\t0x226544,\n\t0x332083,\n\t0x322103,\n\t0x222903,\n\t0x383283,\n\t0xaa38543,\n\t0x240244,\n\t0x24ac83,\n\t0x207583,\n\t0x2228c3,\n\t0x23aa83,\n\t0x23cac3,\n\t0x23c803,\n\t0x202103,\n\t0x2aab03,\n\t0x322083,\n\t0x2bdec3,\n\t0x20df43,\n\t0x255684,\n\t0x257307,\n\t0x2f6802,\n\t0x25c003,\n\t0x263783,\n\t0x27e983,\n\t0x20fe03,\n\t0x20dec3,\n\t0xaf23043,\n\t0x209ac3,\n\t0x204c03,\n\t0x231603,\n\t0x34bc85,\n\t0x209c83,\n\t0x304d43,\n\t0xb207a83,\n\t0x374803,\n\t0x213643,\n\t0x229443,\n\t0x28cac3,\n\t0x22c2c2,\n\t0x20c0c3,\n\t0x208e83,\n\t0x1600e03,\n\t0x22b1c3,\n\t0x2014c3,\n\t0x21a743,\n\t0x201a03,\n\t0x36ea03,\n\t0x223583,\n\t0x221483,\n\t0x233503,\n\t0x30bcc3,\n\t0x2fad83,\n\t0x317345,\n\t0x20c843,\n\t0x2df706,\n\t0x2fadc3,\n\t0x349703,\n\t0x2205c4,\n\t0x20c9c3,\n\t0x386603,\n\t0x2f1a03,\n\t0x20bdc3,\n\t0x21a902,\n\t0x22fac3,\n\t0x30e403,\n\t0x30fac4,\n\t0x383884,\n\t0x21a5c3,\n\t0x16fb88,\n\t0x207102,\n\t0x200242,\n\t0x202d42,\n\t0x20cac2,\n\t0x201d02,\n\t0x201442,\n\t0x23de42,\n\t0x201842,\n\t0x207b02,\n\t0x201fc2,\n\t0x2281c2,\n\t0x214642,\n\t0x2745c2,\n\t0x20cb42,\n\t0x2e6dc2,\n\t0x21cc82,\n\t0x225b82,\n\t0x204102,\n\t0x2204c2,\n\t0x205842,\n\t0x200482,\n\t0x221dc2,\n\t0x2044c2,\n\t0x20d2c2,\n\t0x200a02,\n\t0x21f542,\n\t0x204782,\n\t0x7102,\n\t0x242,\n\t0x2d42,\n\t0xcac2,\n\t0x1d02,\n\t0x1442,\n\t0x3de42,\n\t0x1842,\n\t0x7b02,\n\t0x1fc2,\n\t0x281c2,\n\t0x14642,\n\t0x745c2,\n\t0xcb42,\n\t0xe6dc2,\n\t0x1cc82,\n\t0x25b82,\n\t0x4102,\n\t0x204c2,\n\t0x5842,\n\t0x482,\n\t0x21dc2,\n\t0x44c2,\n\t0xd2c2,\n\t0xa02,\n\t0x1f542,\n\t0x4782,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x2442,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x20f882,\n\t0x201a03,\n\t0xc638543,\n\t0x323043,\n\t0x28cac3,\n\t0x1a3443,\n\t0x219302,\n\t0x16fb88,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x1a3443,\n\t0x201a03,\n\t0x4542,\n\t0x201c02,\n\t0x1442b45,\n\t0x232282,\n\t0x16fb88,\n\t0xf882,\n\t0x209d82,\n\t0x209b02,\n\t0x20ddc2,\n\t0x2190c2,\n\t0x206802,\n\t0x11205,\n\t0x201282,\n\t0x2014c2,\n\t0x202c82,\n\t0x200dc2,\n\t0x21cc82,\n\t0x3951c2,\n\t0x206742,\n\t0x260a42,\n\t0x36dc7,\n\t0x1501cd,\n\t0xe1dc9,\n\t0x5900b,\n\t0xe5848,\n\t0x56809,\n\t0x106046,\n\t0x323043,\n\t0x16fb88,\n\t0x145944,\n\t0xf183,\n\t0x145c05,\n\t0x16fb88,\n\t0x5d3c6,\n\t0x145c49,\n\t0x126447,\n\t0x207102,\n\t0x286644,\n\t0x20f882,\n\t0x238543,\n\t0x201742,\n\t0x23cac3,\n\t0x207b02,\n\t0x2de944,\n\t0x255783,\n\t0x253442,\n\t0x208e83,\n\t0x200442,\n\t0x201a03,\n\t0x3a03c6,\n\t0x323d8f,\n\t0x7156c3,\n\t0x16fb88,\n\t0x20f882,\n\t0x21b583,\n\t0x323043,\n\t0x28cac3,\n\t0xe03,\n\t0x152e1cb,\n\t0xe2648,\n\t0x14b7aca,\n\t0x14f5907,\n\t0x8dbcb,\n\t0x149785,\n\t0x36dc7,\n\t0x20f882,\n\t0x238543,\n\t0x323043,\n\t0x208e83,\n\t0x207102,\n\t0x200b42,\n\t0x2092c2,\n\t0xfe38543,\n\t0x248582,\n\t0x23cac3,\n\t0x209c42,\n\t0x20d382,\n\t0x323043,\n\t0x210642,\n\t0x259c42,\n\t0x2aeb02,\n\t0x2006c2,\n\t0x295e02,\n\t0x203102,\n\t0x200782,\n\t0x2351c2,\n\t0x2335c2,\n\t0x252e42,\n\t0x2b5102,\n\t0x2d2942,\n\t0x327982,\n\t0x2111c2,\n\t0x28cac3,\n\t0x200802,\n\t0x208e83,\n\t0x24d382,\n\t0x289e82,\n\t0x201a03,\n\t0x2485c2,\n\t0x20d2c2,\n\t0x221382,\n\t0x200742,\n\t0x204d02,\n\t0x2e6282,\n\t0x22be42,\n\t0x231802,\n\t0x2312c2,\n\t0x3195ca,\n\t0x35c50a,\n\t0x39090a,\n\t0x3c1382,\n\t0x208a82,\n\t0x212a42,\n\t0x10223fc9,\n\t0x1072c38a,\n\t0x1438547,\n\t0x10a02482,\n\t0x1416dc3,\n\t0x12c2,\n\t0x12c38a,\n\t0x252044,\n\t0x11238543,\n\t0x23cac3,\n\t0x253384,\n\t0x323043,\n\t0x231604,\n\t0x255783,\n\t0x28cac3,\n\t0x208e83,\n\t0xe3bc5,\n\t0x200e03,\n\t0x201a03,\n\t0x20c843,\n\t0x202443,\n\t0x16fb88,\n\t0x140ff44,\n\t0x1441c5,\n\t0x12620a,\n\t0x11ec42,\n\t0x1affc6,\n\t0x35ad1,\n\t0x11a23fc9,\n\t0x144248,\n\t0x10b388,\n\t0x8cf47,\n\t0xbc2,\n\t0x13164b,\n\t0x1b320a,\n\t0x71ca,\n\t0x26547,\n\t0x16fb88,\n\t0x114008,\n\t0x14507,\n\t0x17c2198b,\n\t0x23087,\n\t0xc702,\n\t0x5b907,\n\t0x1920a,\n\t0x8cc4f,\n\t0x4f70f,\n\t0x22902,\n\t0xf882,\n\t0xaaa48,\n\t0xe228a,\n\t0x6a08,\n\t0x64b88,\n\t0xdfbc8,\n\t0x4c82,\n\t0x42bcf,\n\t0xa670b,\n\t0xf8d08,\n\t0x3e607,\n\t0x185b8a,\n\t0x3af8b,\n\t0x57f89,\n\t0x185a87,\n\t0x6908,\n\t0x1089cc,\n\t0x81a87,\n\t0x1a800a,\n\t0xdd088,\n\t0x1aafce,\n\t0x2438e,\n\t0x2638b,\n\t0x27bcb,\n\t0x2920b,\n\t0x2c049,\n\t0x2ff8b,\n\t0x31ccd,\n\t0x329cb,\n\t0x62b4d,\n\t0x62ecd,\n\t0xfa44a,\n\t0x1836cb,\n\t0x3b64b,\n\t0x47085,\n\t0x1802cc10,\n\t0x12d40f,\n\t0x12db4f,\n\t0x37a4d,\n\t0xbf490,\n\t0xc182,\n\t0x18623a08,\n\t0x8ca48,\n\t0x18af52c5,\n\t0x52a0b,\n\t0x11f3d0,\n\t0x5ad08,\n\t0x6b0a,\n\t0x27d89,\n\t0x6b307,\n\t0x6b647,\n\t0x6b807,\n\t0x6bb87,\n\t0x6ca87,\n\t0x6d487,\n\t0x6ddc7,\n\t0x6e187,\n\t0x6f187,\n\t0x6f487,\n\t0x70147,\n\t0x70307,\n\t0x704c7,\n\t0x70687,\n\t0x70987,\n\t0x70e47,\n\t0x71707,\n\t0x72007,\n\t0x72c87,\n\t0x731c7,\n\t0x73387,\n\t0x73707,\n\t0x74487,\n\t0x74687,\n\t0x750c7,\n\t0x75287,\n\t0x75447,\n\t0x75dc7,\n\t0x76087,\n\t0x77a47,\n\t0x78187,\n\t0x78447,\n\t0x78bc7,\n\t0x78d87,\n\t0x79187,\n\t0x79687,\n\t0x79907,\n\t0x79d07,\n\t0x79ec7,\n\t0x7a087,\n\t0x7ae07,\n\t0x7c447,\n\t0x7c987,\n\t0x7cc87,\n\t0x7ce47,\n\t0x7d1c7,\n\t0x7d787,\n\t0x13c42,\n\t0x64c8a,\n\t0xe90c7,\n\t0x287c5,\n\t0x806d1,\n\t0x157c6,\n\t0x11318a,\n\t0xaa8ca,\n\t0x5d3c6,\n\t0xb880b,\n\t0x17202,\n\t0x3a1d1,\n\t0x1bbc89,\n\t0x9c0c9,\n\t0x351c2,\n\t0xa808a,\n\t0xac7c9,\n\t0xacf0f,\n\t0xada4e,\n\t0xae208,\n\t0x206c2,\n\t0xb649,\n\t0x1025ce,\n\t0xe8b4c,\n\t0xf328f,\n\t0x1a5b4e,\n\t0x1684c,\n\t0x18009,\n\t0x1c291,\n\t0x1f108,\n\t0x2ac92,\n\t0x2bb4d,\n\t0x33c4d,\n\t0x15208b,\n\t0x41cd5,\n\t0x164ec9,\n\t0xfcf8a,\n\t0x40809,\n\t0x4d650,\n\t0x4e70b,\n\t0x5898f,\n\t0x6390b,\n\t0x7298c,\n\t0x77650,\n\t0x8430a,\n\t0x853cd,\n\t0x894ce,\n\t0x8ef4a,\n\t0xede0c,\n\t0x176a54,\n\t0x1bb911,\n\t0x95a8b,\n\t0x97fcf,\n\t0xa290d,\n\t0xa76ce,\n\t0xb2bcc,\n\t0xb330c,\n\t0x160b0b,\n\t0x160e0e,\n\t0xd6750,\n\t0x11868b,\n\t0x1876cd,\n\t0x1bce4f,\n\t0xba0cc,\n\t0xbb0ce,\n\t0xbc011,\n\t0xc7c4c,\n\t0xc9307,\n\t0xc9c0d,\n\t0x130d4c,\n\t0x1605d0,\n\t0x174c0d,\n\t0xd1b47,\n\t0xd7c10,\n\t0xdd6c8,\n\t0xf178b,\n\t0x134c4f,\n\t0x3ef48,\n\t0x11338d,\n\t0x15c750,\n\t0x172e49,\n\t0x18e086c6,\n\t0xb8243,\n\t0xbc445,\n\t0x9a02,\n\t0x143889,\n\t0x5e04a,\n\t0x10fb06,\n\t0x2594a,\n\t0x1900c949,\n\t0x1c003,\n\t0xdebd1,\n\t0xdf009,\n\t0xe0407,\n\t0x35c4b,\n\t0xe67d0,\n\t0xe6c8c,\n\t0xe8e48,\n\t0xe9805,\n\t0xb988,\n\t0x1ad4ca,\n\t0x1c0c7,\n\t0x16bac7,\n\t0x982,\n\t0x12bcca,\n\t0x12e7c9,\n\t0x79545,\n\t0x402ca,\n\t0x9260f,\n\t0x4b8cb,\n\t0x14bd4c,\n\t0x17a492,\n\t0x94e45,\n\t0xec1c8,\n\t0x17618a,\n\t0x196f3d05,\n\t0x190ecc,\n\t0x129ac3,\n\t0x1951c2,\n\t0xfb30a,\n\t0x14fb70c,\n\t0x14f508,\n\t0x62d08,\n\t0x36d47,\n\t0xb282,\n\t0x4242,\n\t0x47590,\n\t0xa02,\n\t0x3904f,\n\t0x86286,\n\t0x7c0e,\n\t0xebbcb,\n\t0x8f148,\n\t0xda049,\n\t0x18f052,\n\t0x95cd,\n\t0x586c8,\n\t0x58ec9,\n\t0x5d50d,\n\t0x5e4c9,\n\t0x5e88b,\n\t0x60648,\n\t0x65808,\n\t0x65b88,\n\t0x65e49,\n\t0x6604a,\n\t0x6a98c,\n\t0xeb04a,\n\t0x10bd07,\n\t0x1f54d,\n\t0xfde8b,\n\t0x12004c,\n\t0x404c8,\n\t0x4f049,\n\t0x1b01d0,\n\t0xc2,\n\t0x2d3cd,\n\t0x2642,\n\t0x2cc2,\n\t0x10bc4a,\n\t0x11308a,\n\t0x11438b,\n\t0x3b80c,\n\t0x113b0a,\n\t0x113d8e,\n\t0xf2cd,\n\t0x11d708,\n\t0x4542,\n\t0x11f46c0e,\n\t0x1260ee4e,\n\t0x12f43f8a,\n\t0x1373a14e,\n\t0x13f9d38e,\n\t0x1460138c,\n\t0x1438547,\n\t0x1438549,\n\t0x1416dc3,\n\t0x14e3700c,\n\t0x15707789,\n\t0x15f3b509,\n\t0x12c2,\n\t0x146b51,\n\t0xed91,\n\t0x143ecd,\n\t0x13a091,\n\t0x19d2d1,\n\t0x12cf,\n\t0x36f4f,\n\t0x1076cc,\n\t0x13b44c,\n\t0x18954d,\n\t0x1b5295,\n\t0x10ed8c,\n\t0xea88c,\n\t0x122ed0,\n\t0x158fcc,\n\t0x16d9cc,\n\t0x191819,\n\t0x1a83d9,\n\t0x1aa459,\n\t0x1b3e94,\n\t0x1b8ad4,\n\t0x1c0d14,\n\t0x2394,\n\t0x3754,\n\t0x1670ee49,\n\t0x16dc0fc9,\n\t0x176ea949,\n\t0x1221f309,\n\t0x12c2,\n\t0x12a1f309,\n\t0x12c2,\n\t0x238a,\n\t0x12c2,\n\t0x1321f309,\n\t0x12c2,\n\t0x238a,\n\t0x12c2,\n\t0x13a1f309,\n\t0x12c2,\n\t0x1421f309,\n\t0x12c2,\n\t0x14a1f309,\n\t0x12c2,\n\t0x238a,\n\t0x12c2,\n\t0x1521f309,\n\t0x12c2,\n\t0x238a,\n\t0x12c2,\n\t0x15a1f309,\n\t0x12c2,\n\t0x1621f309,\n\t0x12c2,\n\t0x238a,\n\t0x12c2,\n\t0x16a1f309,\n\t0x12c2,\n\t0x1721f309,\n\t0x12c2,\n\t0x17a1f309,\n\t0x12c2,\n\t0x238a,\n\t0x12c2,\n\t0x35ac5,\n\t0x1b3204,\n\t0x146c0e,\n\t0xee4e,\n\t0x143f8a,\n\t0x13a14e,\n\t0x19d38e,\n\t0x138c,\n\t0x3700c,\n\t0x107789,\n\t0x13b509,\n\t0x10ee49,\n\t0x1c0fc9,\n\t0xea949,\n\t0x122f8d,\n\t0x2649,\n\t0x3a09,\n\t0x5bf04,\n\t0x11d8c4,\n\t0x126144,\n\t0x15f784,\n\t0x8de84,\n\t0x4b744,\n\t0x6e44,\n\t0x67344,\n\t0x8cf44,\n\t0x157e2c3,\n\t0xc182,\n\t0xf2c3,\n\t0x4c82,\n\t0x207102,\n\t0x20f882,\n\t0x201742,\n\t0x207602,\n\t0x207b02,\n\t0x200442,\n\t0x204242,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x231603,\n\t0x208e83,\n\t0x201a03,\n\t0x16fb88,\n\t0x238543,\n\t0x23cac3,\n\t0x208e83,\n\t0x201a03,\n\t0x160c3,\n\t0x323043,\n\t0x31604,\n\t0x207102,\n\t0x39c783,\n\t0x1b638543,\n\t0x2bf347,\n\t0x323043,\n\t0x211a83,\n\t0x21bf84,\n\t0x208e83,\n\t0x201a03,\n\t0x243d0a,\n\t0x3a03c5,\n\t0x221483,\n\t0x205082,\n\t0x16fb88,\n\t0x16fb88,\n\t0xf882,\n\t0x127482,\n\t0x1bf51b0b,\n\t0x5ba45,\n\t0x35dc5,\n\t0x114b46,\n\t0x145944,\n\t0xf183,\n\t0x145c05,\n\t0x131645,\n\t0x16fb88,\n\t0x23087,\n\t0x38543,\n\t0x1c644d87,\n\t0x1432c6,\n\t0x1c93b345,\n\t0x143387,\n\t0x1b4d0a,\n\t0x1b4bc8,\n\t0x11887,\n\t0x6df88,\n\t0x99707,\n\t0x152cf,\n\t0x435c7,\n\t0x150d86,\n\t0x11f3d0,\n\t0x12a58f,\n\t0x20a89,\n\t0x10fb84,\n\t0x1cd4344e,\n\t0xb098c,\n\t0x5810a,\n\t0xa7987,\n\t0x3520a,\n\t0xbb49,\n\t0xb514c,\n\t0x4304a,\n\t0x5ec8a,\n\t0x145c49,\n\t0x10fb06,\n\t0xa7a4a,\n\t0xe8a,\n\t0xa4e49,\n\t0xde488,\n\t0xde786,\n\t0xe284d,\n\t0xbc8c5,\n\t0x126447,\n\t0x1019c9,\n\t0xf72c7,\n\t0xb5ed4,\n\t0x103acb,\n\t0xf8b4a,\n\t0xab10d,\n\t0xd3c3,\n\t0xd3c3,\n\t0x24386,\n\t0xd3c3,\n\t0x19c783,\n\t0x16fb88,\n\t0xf882,\n\t0x53384,\n\t0x5f843,\n\t0x155685,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x203ec3,\n\t0x238543,\n\t0x23cac3,\n\t0x21b583,\n\t0x323043,\n\t0x28cac3,\n\t0x208e83,\n\t0x201a03,\n\t0x29c283,\n\t0x202443,\n\t0x203ec3,\n\t0x286644,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x206683,\n\t0x238543,\n\t0x23cac3,\n\t0x207603,\n\t0x21b583,\n\t0x323043,\n\t0x231604,\n\t0x3797c3,\n\t0x229443,\n\t0x28cac3,\n\t0x208e83,\n\t0x201a03,\n\t0x221483,\n\t0x36a883,\n\t0x1ea38543,\n\t0x23cac3,\n\t0x250ac3,\n\t0x323043,\n\t0x212143,\n\t0x229443,\n\t0x201a03,\n\t0x204103,\n\t0x35f584,\n\t0x16fb88,\n\t0x1f238543,\n\t0x23cac3,\n\t0x2ae2c3,\n\t0x323043,\n\t0x28cac3,\n\t0x21bf84,\n\t0x208e83,\n\t0x201a03,\n\t0x20e943,\n\t0x16fb88,\n\t0x1fa38543,\n\t0x23cac3,\n\t0x21b583,\n\t0x200e03,\n\t0x201a03,\n\t0x16fb88,\n\t0x1438547,\n\t0x39c783,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x231604,\n\t0x21bf84,\n\t0x208e83,\n\t0x201a03,\n\t0x131645,\n\t0x36dc7,\n\t0xb610b,\n\t0xdf404,\n\t0xbc8c5,\n\t0x1480cc8,\n\t0xae90d,\n\t0x20e6c505,\n\t0x7bd44,\n\t0x10c3,\n\t0x172d45,\n\t0x33b145,\n\t0x16fb88,\n\t0xd3c2,\n\t0x2bc3,\n\t0xf9306,\n\t0x31f948,\n\t0x3347c7,\n\t0x286644,\n\t0x39c286,\n\t0x3b5146,\n\t0x16fb88,\n\t0x2ddac3,\n\t0x342a49,\n\t0x26d615,\n\t0x6d61f,\n\t0x238543,\n\t0x3b3a52,\n\t0xf6306,\n\t0x114dc5,\n\t0x6b0a,\n\t0x27d89,\n\t0x3b380f,\n\t0x2de944,\n\t0x3490c5,\n\t0x304b10,\n\t0x34e347,\n\t0x200e03,\n\t0x293408,\n\t0x12ce46,\n\t0x29630a,\n\t0x230f04,\n\t0x2f3743,\n\t0x3a03c6,\n\t0x205082,\n\t0x22facb,\n\t0xe03,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x28cac3,\n\t0x208e83,\n\t0x201a03,\n\t0x2f9a03,\n\t0x20f882,\n\t0x6ed43,\n\t0x208e83,\n\t0x201a03,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x28cac3,\n\t0x201a03,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x211a83,\n\t0x228243,\n\t0x201a03,\n\t0x20f882,\n\t0x238543,\n\t0x23cac3,\n\t0x208e83,\n\t0xe03,\n\t0x201a03,\n\t0x207102,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x35dc5,\n\t0x286644,\n\t0x238543,\n\t0x23cac3,\n\t0x20f644,\n\t0x208e83,\n\t0x201a03,\n\t0x16fb88,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x1a3443,\n\t0x201a03,\n\t0x238543,\n\t0x23cac3,\n\t0x21b583,\n\t0x204c03,\n\t0x28cac3,\n\t0x208e83,\n\t0xe03,\n\t0x201a03,\n\t0x20f882,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x16fb88,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x210543,\n\t0x707c3,\n\t0x11a83,\n\t0x208e83,\n\t0x201a03,\n\t0x3195ca,\n\t0x335289,\n\t0x35438b,\n\t0x35490a,\n\t0x35c50a,\n\t0x369bcb,\n\t0x38274a,\n\t0x38b38a,\n\t0x39090a,\n\t0x390b8b,\n\t0x3ad209,\n\t0x3af10a,\n\t0x3af7cb,\n\t0x3b978b,\n\t0x3bfb4a,\n\t0x238543,\n\t0x23cac3,\n\t0x21b583,\n\t0x28cac3,\n\t0x208e83,\n\t0xe03,\n\t0x201a03,\n\t0x35dcb,\n\t0x651c8,\n\t0x1174c9,\n\t0x16fb88,\n\t0x238543,\n\t0x26b304,\n\t0x20b342,\n\t0x21bf84,\n\t0x346145,\n\t0x203ec3,\n\t0x286644,\n\t0x238543,\n\t0x240244,\n\t0x23cac3,\n\t0x253384,\n\t0x2de944,\n\t0x231604,\n\t0x229443,\n\t0x208e83,\n\t0x201a03,\n\t0x22d585,\n\t0x206683,\n\t0x221483,\n\t0x20ec43,\n\t0x231944,\n\t0x20fe84,\n\t0x2cc105,\n\t0x16fb88,\n\t0x30dc84,\n\t0x36bdc6,\n\t0x281384,\n\t0x20f882,\n\t0x381107,\n\t0x254d87,\n\t0x251844,\n\t0x260105,\n\t0x374e05,\n\t0x2b13c5,\n\t0x231604,\n\t0x2cf6c8,\n\t0x23eb46,\n\t0x3bffc8,\n\t0x257cc5,\n\t0x2e4505,\n\t0x263544,\n\t0x201a03,\n\t0x2f4544,\n\t0x368dc6,\n\t0x3a04c3,\n\t0x231944,\n\t0x280bc5,\n\t0x2e4ac4,\n\t0x34da44,\n\t0x205082,\n\t0x2669c6,\n\t0x3a2906,\n\t0x30a185,\n\t0x207102,\n\t0x39c783,\n\t0x2760f882,\n\t0x223b84,\n\t0x207b02,\n\t0x28cac3,\n\t0x200e82,\n\t0x208e83,\n\t0x200442,\n\t0x215443,\n\t0x202443,\n\t0x16fb88,\n\t0x16fb88,\n\t0x323043,\n\t0x207102,\n\t0x2820f882,\n\t0x323043,\n\t0x270443,\n\t0x3797c3,\n\t0x32e5c4,\n\t0x208e83,\n\t0x201a03,\n\t0x16fb88,\n\t0x207102,\n\t0x28a0f882,\n\t0x238543,\n\t0x208e83,\n\t0xe03,\n\t0x201a03,\n\t0x482,\n\t0x208882,\n\t0x21a902,\n\t0x211a83,\n\t0x2ef783,\n\t0x207102,\n\t0x131645,\n\t0x16fb88,\n\t0x36dc7,\n\t0x20f882,\n\t0x23cac3,\n\t0x253384,\n\t0x2020c3,\n\t0x323043,\n\t0x204c03,\n\t0x28cac3,\n\t0x208e83,\n\t0x21eb43,\n\t0x201a03,\n\t0x2252c3,\n\t0x122213,\n\t0x124cd4,\n\t0x36dc7,\n\t0x139986,\n\t0x5e24b,\n\t0x24386,\n\t0x5c0c7,\n\t0x120589,\n\t0xe838a,\n\t0x9058d,\n\t0x14fecc,\n\t0x3954a,\n\t0x11205,\n\t0x1b4d48,\n\t0x86286,\n\t0x31586,\n\t0x12cf06,\n\t0x20c182,\n\t0x10b14c,\n\t0x1b33c7,\n\t0x2a691,\n\t0x238543,\n\t0x6df05,\n\t0x7588,\n\t0x18ec4,\n\t0x29cbe1c6,\n\t0x806c6,\n\t0xb9a06,\n\t0x960ca,\n\t0xb4003,\n\t0x2a24c984,\n\t0xe8345,\n\t0x18e43,\n\t0x2a63dc47,\n\t0xe3bc5,\n\t0xb88cc,\n\t0xf7a88,\n\t0xbd248,\n\t0xa6589,\n\t0x14dc08,\n\t0x1425886,\n\t0x2ab71549,\n\t0x14978a,\n\t0x16308,\n\t0x114b48,\n\t0x8cf44,\n\t0xb5ac5,\n\t0x2ae42bc3,\n\t0x2b332106,\n\t0x2b6f4dc4,\n\t0x2bb39d87,\n\t0x114b44,\n\t0x114b44,\n\t0x114b44,\n\t0x114b44,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x28cac3,\n\t0x208e83,\n\t0x201a03,\n\t0x207102,\n\t0x20f882,\n\t0x323043,\n\t0x205e82,\n\t0x208e83,\n\t0x201a03,\n\t0x215443,\n\t0x373ccf,\n\t0x37408e,\n\t0x16fb88,\n\t0x238543,\n\t0x4db87,\n\t0x23cac3,\n\t0x323043,\n\t0x255783,\n\t0x208e83,\n\t0x201a03,\n\t0x20d4c3,\n\t0x20d4c7,\n\t0x200142,\n\t0x2ce609,\n\t0x200242,\n\t0x24788b,\n\t0x2c110a,\n\t0x2c67c9,\n\t0x201242,\n\t0x2100c6,\n\t0x26cd95,\n\t0x2479d5,\n\t0x275793,\n\t0x247f53,\n\t0x201d42,\n\t0x212c45,\n\t0x31d44c,\n\t0x27c6cb,\n\t0x29c705,\n\t0x20cac2,\n\t0x28e142,\n\t0x384c06,\n\t0x200bc2,\n\t0x3acc46,\n\t0x2dd20d,\n\t0x26540c,\n\t0x22cc84,\n\t0x200f82,\n\t0x203402,\n\t0x22b048,\n\t0x201d02,\n\t0x20a746,\n\t0x28bf04,\n\t0x26cf55,\n\t0x275913,\n\t0x216d03,\n\t0x33844a,\n\t0x205407,\n\t0x3145c9,\n\t0x38d4c7,\n\t0x20d342,\n\t0x200002,\n\t0x3ba886,\n\t0x212702,\n\t0x16fb88,\n\t0x216b42,\n\t0x201102,\n\t0x27f847,\n\t0x217387,\n\t0x222d85,\n\t0x20c702,\n\t0x225287,\n\t0x225448,\n\t0x2024c2,\n\t0x2430c2,\n\t0x237302,\n\t0x201382,\n\t0x242688,\n\t0x20a043,\n\t0x25fa08,\n\t0x2e9b0d,\n\t0x2322c3,\n\t0x32ec08,\n\t0x245f4f,\n\t0x24630e,\n\t0x339a4a,\n\t0x22e811,\n\t0x22ec90,\n\t0x2c34cd,\n\t0x2c380c,\n\t0x36a707,\n\t0x3385c7,\n\t0x39c349,\n\t0x20d302,\n\t0x201442,\n\t0x25db0c,\n\t0x25de0b,\n\t0x2008c2,\n\t0x360cc6,\n\t0x20e982,\n\t0x204882,\n\t0x222902,\n\t0x20f882,\n\t0x3b69c4,\n\t0x244387,\n\t0x229682,\n\t0x24a347,\n\t0x24b547,\n\t0x20d282,\n\t0x20c8c2,\n\t0x24da45,\n\t0x21a442,\n\t0x2f290e,\n\t0x2ab3cd,\n\t0x23cac3,\n\t0x28d58e,\n\t0x2c5c0d,\n\t0x25ac43,\n\t0x201482,\n\t0x2891c4,\n\t0x216582,\n\t0x20fac2,\n\t0x364145,\n\t0x373587,\n\t0x393202,\n\t0x207602,\n\t0x252f87,\n\t0x255ac8,\n\t0x2f6802,\n\t0x294ec6,\n\t0x25d98c,\n\t0x25dccb,\n\t0x206b02,\n\t0x26764f,\n\t0x267a10,\n\t0x267e0f,\n\t0x2681d5,\n\t0x268714,\n\t0x268c0e,\n\t0x268f8e,\n\t0x26930f,\n\t0x2696ce,\n\t0x269a54,\n\t0x269f53,\n\t0x26a40d,\n\t0x27d949,\n\t0x291ac3,\n\t0x201802,\n\t0x2b7505,\n\t0x206346,\n\t0x207b02,\n\t0x3a4ec7,\n\t0x323043,\n\t0x217202,\n\t0x37e548,\n\t0x22ea51,\n\t0x22ee90,\n\t0x2007c2,\n\t0x290e07,\n\t0x204182,\n\t0x332b07,\n\t0x209a02,\n\t0x342089,\n\t0x384bc7,\n\t0x27ac08,\n\t0x2be006,\n\t0x2ef683,\n\t0x339205,\n\t0x2022c2,\n\t0x207a82,\n\t0x3bac85,\n\t0x391345,\n\t0x204bc2,\n\t0x231043,\n\t0x2e4b47,\n\t0x205747,\n\t0x200502,\n\t0x25f1c4,\n\t0x211b83,\n\t0x211b89,\n\t0x215148,\n\t0x200282,\n\t0x202942,\n\t0x242387,\n\t0x263285,\n\t0x2ad208,\n\t0x215c87,\n\t0x21a243,\n\t0x294c86,\n\t0x2c334d,\n\t0x2c36cc,\n\t0x2c8346,\n\t0x209b02,\n\t0x20c202,\n\t0x204a82,\n\t0x245dcf,\n\t0x2461ce,\n\t0x374e87,\n\t0x20b302,\n\t0x2c72c5,\n\t0x2c72c6,\n\t0x214702,\n\t0x200802,\n\t0x228246,\n\t0x2b57c3,\n\t0x332a46,\n\t0x2d0285,\n\t0x2d028d,\n\t0x2d0855,\n\t0x2d108c,\n\t0x2d1e4d,\n\t0x2d2212,\n\t0x214642,\n\t0x2745c2,\n\t0x202ec2,\n\t0x249386,\n\t0x302486,\n\t0x200982,\n\t0x2063c6,\n\t0x202c82,\n\t0x39b505,\n\t0x200542,\n\t0x2ab4c9,\n\t0x2e324c,\n\t0x2e358b,\n\t0x200442,\n\t0x257708,\n\t0x2052c2,\n\t0x20cb42,\n\t0x278ec6,\n\t0x21f285,\n\t0x36c107,\n\t0x24bc85,\n\t0x28ea05,\n\t0x235d82,\n\t0x219a42,\n\t0x21cc82,\n\t0x2f3587,\n\t0x2613cd,\n\t0x26174c,\n\t0x317947,\n\t0x2235c2,\n\t0x225b82,\n\t0x23f688,\n\t0x343a08,\n\t0x34c008,\n\t0x313344,\n\t0x361087,\n\t0x2efc43,\n\t0x299842,\n\t0x206682,\n\t0x2f2149,\n\t0x3ab3c7,\n\t0x204102,\n\t0x2792c5,\n\t0x22fa42,\n\t0x236902,\n\t0x35dc83,\n\t0x35dc86,\n\t0x2f9a02,\n\t0x2fab42,\n\t0x200c02,\n\t0x281e06,\n\t0x345607,\n\t0x221282,\n\t0x206b42,\n\t0x25f84f,\n\t0x28d3cd,\n\t0x3029ce,\n\t0x2c5a8c,\n\t0x201a42,\n\t0x204142,\n\t0x2bde45,\n\t0x317e46,\n\t0x209002,\n\t0x205842,\n\t0x200482,\n\t0x215c04,\n\t0x2e9984,\n\t0x2b8706,\n\t0x204242,\n\t0x37d6c7,\n\t0x233803,\n\t0x233808,\n\t0x33cb48,\n\t0x240687,\n\t0x249286,\n\t0x202502,\n\t0x242603,\n\t0x351107,\n\t0x26ffc6,\n\t0x2e2d05,\n\t0x3136c8,\n\t0x206182,\n\t0x337547,\n\t0x21f542,\n\t0x332182,\n\t0x207f02,\n\t0x2e95c9,\n\t0x23b442,\n\t0x2018c2,\n\t0x248383,\n\t0x377787,\n\t0x2002c2,\n\t0x2e33cc,\n\t0x2e36cb,\n\t0x2c83c6,\n\t0x218d85,\n\t0x22a202,\n\t0x204782,\n\t0x2c1486,\n\t0x237e83,\n\t0x378407,\n\t0x243cc2,\n\t0x200d42,\n\t0x26cc15,\n\t0x247b95,\n\t0x275653,\n\t0x2480d3,\n\t0x2955c7,\n\t0x2c0ec8,\n\t0x379d90,\n\t0x3c020f,\n\t0x2c0ed3,\n\t0x2c6592,\n\t0x2ce1d0,\n\t0x2db58f,\n\t0x2dc512,\n\t0x2dffd1,\n\t0x2e0cd3,\n\t0x2e9392,\n\t0x2ea0cf,\n\t0x2f7c4e,\n\t0x2f9a92,\n\t0x2faed1,\n\t0x303e4f,\n\t0x347a4e,\n\t0x3559d1,\n\t0x2fee10,\n\t0x32f912,\n\t0x36fd51,\n\t0x3af4c6,\n\t0x30dd47,\n\t0x382ac7,\n\t0x203702,\n\t0x286d05,\n\t0x304887,\n\t0x21a902,\n\t0x218f42,\n\t0x230d85,\n\t0x226c43,\n\t0x244c06,\n\t0x26158d,\n\t0x2618cc,\n\t0x206442,\n\t0x31d2cb,\n\t0x27c58a,\n\t0x212b0a,\n\t0x2c04c9,\n\t0x2f0c0b,\n\t0x215dcd,\n\t0x304f8c,\n\t0x2f574a,\n\t0x277bcc,\n\t0x27d34b,\n\t0x29c54c,\n\t0x2b4c0b,\n\t0x2e31c3,\n\t0x36f946,\n\t0x3061c2,\n\t0x2fd502,\n\t0x256d03,\n\t0x203642,\n\t0x203643,\n\t0x260b86,\n\t0x268387,\n\t0x2c48c6,\n\t0x2e2448,\n\t0x343708,\n\t0x2cc7c6,\n\t0x20c402,\n\t0x309b4d,\n\t0x309e8c,\n\t0x2dea07,\n\t0x30db47,\n\t0x2302c2,\n\t0x221682,\n\t0x260982,\n\t0x255e82,\n\t0x20f882,\n\t0x208e83,\n\t0x201a03,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x28cac3,\n\t0x21bf84,\n\t0x208e83,\n\t0x201a03,\n\t0x215443,\n\t0x207102,\n\t0x207542,\n\t0x2da97d45,\n\t0x2de97685,\n\t0x2e320c86,\n\t0x16fb88,\n\t0x2e6b68c5,\n\t0x20f882,\n\t0x201742,\n\t0x2ea34cc5,\n\t0x2ee852c5,\n\t0x2f285e07,\n\t0x2f6f6e09,\n\t0x2fa74084,\n\t0x207b02,\n\t0x217202,\n\t0x2fe56a05,\n\t0x302977c9,\n\t0x30785908,\n\t0x30ab3185,\n\t0x30f3f5c7,\n\t0x31227248,\n\t0x316ec085,\n\t0x31a00106,\n\t0x31e41489,\n\t0x323311c8,\n\t0x326c8988,\n\t0x32a9ef0a,\n\t0x32e7e204,\n\t0x332d99c5,\n\t0x336c30c8,\n\t0x33b85d85,\n\t0x21a602,\n\t0x33e11103,\n\t0x342aa246,\n\t0x3475d1c8,\n\t0x34a8ab86,\n\t0x34e8a688,\n\t0x35348206,\n\t0x356e2dc4,\n\t0x204d42,\n\t0x35addc87,\n\t0x35eaf444,\n\t0x36280087,\n\t0x367b0c87,\n\t0x200442,\n\t0x36aa3885,\n\t0x36e8f904,\n\t0x372f1447,\n\t0x37632c47,\n\t0x37a89006,\n\t0x37e38385,\n\t0x3829d7c7,\n\t0x386d5dc8,\n\t0x38ab7887,\n\t0x38ea6c89,\n\t0x3939e345,\n\t0x397778c7,\n\t0x39a974c6,\n\t0x39e102c8,\n\t0x3279cd,\n\t0x27a209,\n\t0x28384b,\n\t0x289ecb,\n\t0x2ae3cb,\n\t0x2e62cb,\n\t0x31804b,\n\t0x31830b,\n\t0x318949,\n\t0x31984b,\n\t0x319b0b,\n\t0x31a08b,\n\t0x31b08a,\n\t0x31b5ca,\n\t0x31bbcc,\n\t0x31e00b,\n\t0x31ea4a,\n\t0x33064a,\n\t0x33c6ce,\n\t0x33d1ce,\n\t0x33d54a,\n\t0x33efca,\n\t0x33fa8b,\n\t0x33fd4b,\n\t0x340b0b,\n\t0x36124b,\n\t0x36184a,\n\t0x36250b,\n\t0x3627ca,\n\t0x362a4a,\n\t0x362cca,\n\t0x38424b,\n\t0x38c6cb,\n\t0x38e64e,\n\t0x38e9cb,\n\t0x39464b,\n\t0x395b0b,\n\t0x39900a,\n\t0x399289,\n\t0x3994ca,\n\t0x39a94a,\n\t0x3addcb,\n\t0x3afa8b,\n\t0x3b05ca,\n\t0x3b1fcb,\n\t0x3b674b,\n\t0x3bf58b,\n\t0x3a287a88,\n\t0x3a68fd09,\n\t0x3aaa6409,\n\t0x3aee4d48,\n\t0x34b945,\n\t0x202d43,\n\t0x21b744,\n\t0x345805,\n\t0x273dc6,\n\t0x274805,\n\t0x28f584,\n\t0x3a4dc8,\n\t0x312ec5,\n\t0x299a84,\n\t0x211587,\n\t0x2a550a,\n\t0x3813ca,\n\t0x308f07,\n\t0x202c47,\n\t0x303647,\n\t0x271907,\n\t0x2ff9c5,\n\t0x204906,\n\t0x22b9c7,\n\t0x2c8684,\n\t0x2db006,\n\t0x2daf06,\n\t0x208185,\n\t0x331c04,\n\t0x388bc6,\n\t0x2a4707,\n\t0x232646,\n\t0x2bfa07,\n\t0x232dc3,\n\t0x26c7c6,\n\t0x23cf85,\n\t0x285f07,\n\t0x27100a,\n\t0x284e04,\n\t0x220808,\n\t0x2a2009,\n\t0x2d0e47,\n\t0x31e8c6,\n\t0x257988,\n\t0x28b2c9,\n\t0x314784,\n\t0x376004,\n\t0x35d785,\n\t0x22b6c8,\n\t0x2ccc07,\n\t0x29a3c9,\n\t0x3af5c8,\n\t0x353706,\n\t0x24d486,\n\t0x29fd88,\n\t0x365bc6,\n\t0x297685,\n\t0x2890c6,\n\t0x280ec8,\n\t0x256286,\n\t0x25cb8b,\n\t0x2ac646,\n\t0x2a224d,\n\t0x208605,\n\t0x2af306,\n\t0x218a05,\n\t0x35d949,\n\t0x27a787,\n\t0x36d148,\n\t0x2969c6,\n\t0x2a1509,\n\t0x341046,\n\t0x270f85,\n\t0x2a7f06,\n\t0x2d3586,\n\t0x2d3b09,\n\t0x333f06,\n\t0x3529c7,\n\t0x248c85,\n\t0x201d83,\n\t0x25cd05,\n\t0x2a2507,\n\t0x338d06,\n\t0x208509,\n\t0x320c86,\n\t0x289306,\n\t0x219fc9,\n\t0x288ac9,\n\t0x2a8747,\n\t0x20cd08,\n\t0x280509,\n\t0x286988,\n\t0x38b5c6,\n\t0x2de245,\n\t0x23fa4a,\n\t0x289386,\n\t0x2bf1c6,\n\t0x2d7605,\n\t0x272408,\n\t0x2220c7,\n\t0x239fca,\n\t0x253b46,\n\t0x27a645,\n\t0x20a506,\n\t0x236b47,\n\t0x31e787,\n\t0x24fc45,\n\t0x271145,\n\t0x2e79c6,\n\t0x2fbfc6,\n\t0x2be306,\n\t0x2bb884,\n\t0x287e09,\n\t0x290bc6,\n\t0x2d430a,\n\t0x222b88,\n\t0x3059c8,\n\t0x3813ca,\n\t0x205b45,\n\t0x2a4645,\n\t0x3575c8,\n\t0x2b0fc8,\n\t0x2b43c7,\n\t0x295946,\n\t0x329608,\n\t0x30a447,\n\t0x287088,\n\t0x2bbec6,\n\t0x289b88,\n\t0x29cd06,\n\t0x257e47,\n\t0x2a27c6,\n\t0x388bc6,\n\t0x383d4a,\n\t0x345506,\n\t0x2de249,\n\t0x36b086,\n\t0x2b6c0a,\n\t0x2e2dc9,\n\t0x2fe406,\n\t0x2bccc4,\n\t0x2b75cd,\n\t0x28ff87,\n\t0x32df46,\n\t0x2c8845,\n\t0x3410c5,\n\t0x204dc6,\n\t0x2d4fc9,\n\t0x3879c7,\n\t0x2826c6,\n\t0x2bd406,\n\t0x28f609,\n\t0x33f784,\n\t0x3a1184,\n\t0x39c0c8,\n\t0x260f46,\n\t0x279388,\n\t0x30fec8,\n\t0x378187,\n\t0x3beb49,\n\t0x2be507,\n\t0x2b678a,\n\t0x2fc88f,\n\t0x25100a,\n\t0x2bdc45,\n\t0x281105,\n\t0x220085,\n\t0x28be47,\n\t0x236703,\n\t0x20cf08,\n\t0x201e46,\n\t0x201f49,\n\t0x2e4806,\n\t0x3a3607,\n\t0x2a12c9,\n\t0x36d048,\n\t0x2d76c7,\n\t0x315603,\n\t0x34b9c5,\n\t0x236685,\n\t0x2bb6cb,\n\t0x385e44,\n\t0x30ad44,\n\t0x27f006,\n\t0x315e87,\n\t0x392a4a,\n\t0x251a87,\n\t0x36a947,\n\t0x2852c5,\n\t0x2016c5,\n\t0x253689,\n\t0x388bc6,\n\t0x25190d,\n\t0x334145,\n\t0x2a10c3,\n\t0x200dc3,\n\t0x39cf05,\n\t0x3534c5,\n\t0x257988,\n\t0x283007,\n\t0x3a0f06,\n\t0x2a6086,\n\t0x232545,\n\t0x23cd87,\n\t0x377c87,\n\t0x23ea07,\n\t0x2d9a4a,\n\t0x26c888,\n\t0x2bb884,\n\t0x256007,\n\t0x284707,\n\t0x352846,\n\t0x26f5c7,\n\t0x2ece48,\n\t0x2e8548,\n\t0x276346,\n\t0x374f88,\n\t0x2d1704,\n\t0x22b9c6,\n\t0x239b86,\n\t0x333b86,\n\t0x2d0006,\n\t0x233ac4,\n\t0x2719c6,\n\t0x2c7146,\n\t0x29f406,\n\t0x2381c6,\n\t0x213ec6,\n\t0x223f06,\n\t0x3a0e08,\n\t0x3bcc88,\n\t0x2da288,\n\t0x274a08,\n\t0x357546,\n\t0x217e05,\n\t0x2dd4c6,\n\t0x2b3205,\n\t0x397f07,\n\t0x27df05,\n\t0x21ae83,\n\t0x2058c5,\n\t0x34cc44,\n\t0x214005,\n\t0x22dc83,\n\t0x33d807,\n\t0x374a48,\n\t0x2bfac6,\n\t0x2b0c4d,\n\t0x2810c6,\n\t0x29e985,\n\t0x227603,\n\t0x2c2a89,\n\t0x33f906,\n\t0x29dd86,\n\t0x2a8004,\n\t0x250f87,\n\t0x334546,\n\t0x387c85,\n\t0x20b2c3,\n\t0x209484,\n\t0x2848c6,\n\t0x204a04,\n\t0x239c88,\n\t0x2005c9,\n\t0x325f49,\n\t0x2a7e0a,\n\t0x2a918d,\n\t0x20abc7,\n\t0x2bf046,\n\t0x205ec4,\n\t0x2f6e09,\n\t0x28e688,\n\t0x28fb86,\n\t0x245246,\n\t0x26f5c7,\n\t0x2b9786,\n\t0x22c986,\n\t0x36aac6,\n\t0x3b0d0a,\n\t0x227248,\n\t0x364dc5,\n\t0x26fa09,\n\t0x28758a,\n\t0x2f1e88,\n\t0x2a40c8,\n\t0x29dd08,\n\t0x2ad74c,\n\t0x318585,\n\t0x2a6308,\n\t0x2e7546,\n\t0x36d2c6,\n\t0x3a34c7,\n\t0x251985,\n\t0x289245,\n\t0x325e09,\n\t0x219847,\n\t0x201f05,\n\t0x22d887,\n\t0x200dc3,\n\t0x2cd145,\n\t0x214308,\n\t0x25d087,\n\t0x2a3f89,\n\t0x2dac05,\n\t0x395a04,\n\t0x2a8e48,\n\t0x2dddc7,\n\t0x2d7888,\n\t0x2508c8,\n\t0x2d6645,\n\t0x281906,\n\t0x2a6186,\n\t0x277449,\n\t0x2b26c7,\n\t0x2b3ac6,\n\t0x2236c7,\n\t0x20e743,\n\t0x274084,\n\t0x2d1805,\n\t0x23cec4,\n\t0x393244,\n\t0x288547,\n\t0x25b347,\n\t0x234284,\n\t0x2a3dd0,\n\t0x234e47,\n\t0x2016c5,\n\t0x37178c,\n\t0x250684,\n\t0x2a9e48,\n\t0x257d49,\n\t0x36e646,\n\t0x34dd48,\n\t0x223384,\n\t0x37d0c8,\n\t0x23a5c6,\n\t0x238048,\n\t0x2a4cc6,\n\t0x2cc8cb,\n\t0x201d85,\n\t0x2d1688,\n\t0x200a04,\n\t0x200a0a,\n\t0x2a3f89,\n\t0x357f06,\n\t0x220148,\n\t0x263805,\n\t0x2b9044,\n\t0x2a9d46,\n\t0x23e8c8,\n\t0x287a88,\n\t0x329e86,\n\t0x358b04,\n\t0x23f9c6,\n\t0x2be587,\n\t0x27ff87,\n\t0x26f5cf,\n\t0x204187,\n\t0x2fe4c7,\n\t0x23d2c5,\n\t0x35fcc5,\n\t0x2a8409,\n\t0x2ed806,\n\t0x286045,\n\t0x288dc7,\n\t0x2c6188,\n\t0x29f505,\n\t0x2a27c6,\n\t0x2229c8,\n\t0x28ab8a,\n\t0x39c888,\n\t0x292f47,\n\t0x2fccc6,\n\t0x26f9c6,\n\t0x20ca43,\n\t0x2052c3,\n\t0x287749,\n\t0x280389,\n\t0x2a6b86,\n\t0x2dac05,\n\t0x304588,\n\t0x220148,\n\t0x365d48,\n\t0x36ab4b,\n\t0x2b0e87,\n\t0x315849,\n\t0x26f848,\n\t0x356284,\n\t0x3886c8,\n\t0x295089,\n\t0x2b3dc5,\n\t0x28bd47,\n\t0x274105,\n\t0x287988,\n\t0x297bcb,\n\t0x29d510,\n\t0x2aec45,\n\t0x21e20c,\n\t0x3a10c5,\n\t0x285343,\n\t0x296706,\n\t0x2c5a04,\n\t0x28fa06,\n\t0x2a4707,\n\t0x222a44,\n\t0x24c3c8,\n\t0x20cdcd,\n\t0x330a05,\n\t0x20ac04,\n\t0x241b84,\n\t0x27bd89,\n\t0x292bc8,\n\t0x320b07,\n\t0x23a648,\n\t0x287ec8,\n\t0x2829c5,\n\t0x28c647,\n\t0x282947,\n\t0x342807,\n\t0x271149,\n\t0x223c49,\n\t0x36c986,\n\t0x2c3a06,\n\t0x26f806,\n\t0x33e9c5,\n\t0x3b4944,\n\t0x200006,\n\t0x200386,\n\t0x282a08,\n\t0x23680b,\n\t0x284cc7,\n\t0x205ec4,\n\t0x334486,\n\t0x2ed187,\n\t0x388f45,\n\t0x210bc5,\n\t0x21b484,\n\t0x223bc6,\n\t0x200088,\n\t0x2f6e09,\n\t0x259706,\n\t0x28df88,\n\t0x387d46,\n\t0x355088,\n\t0x2d6c8c,\n\t0x282886,\n\t0x29e64d,\n\t0x29eacb,\n\t0x352a85,\n\t0x377dc7,\n\t0x334006,\n\t0x31e648,\n\t0x36ca09,\n\t0x276608,\n\t0x2016c5,\n\t0x2076c7,\n\t0x286a88,\n\t0x332489,\n\t0x2a0986,\n\t0x25960a,\n\t0x31e3c8,\n\t0x27644b,\n\t0x2d964c,\n\t0x37d1c8,\n\t0x283e46,\n\t0x28c048,\n\t0x28a807,\n\t0x2e4909,\n\t0x2976cd,\n\t0x2a26c6,\n\t0x365308,\n\t0x3bcb49,\n\t0x2c4a48,\n\t0x289c88,\n\t0x2c798c,\n\t0x2c8e87,\n\t0x2c96c7,\n\t0x270f85,\n\t0x31a807,\n\t0x2c6048,\n\t0x2a9dc6,\n\t0x26020c,\n\t0x2f60c8,\n\t0x2d5708,\n\t0x262246,\n\t0x236407,\n\t0x36cb84,\n\t0x274a08,\n\t0x28d88c,\n\t0x22834c,\n\t0x2bdcc5,\n\t0x2b85c7,\n\t0x358a86,\n\t0x236386,\n\t0x35db08,\n\t0x202b84,\n\t0x23264b,\n\t0x37d80b,\n\t0x2fccc6,\n\t0x20cc47,\n\t0x339305,\n\t0x278585,\n\t0x232786,\n\t0x2637c5,\n\t0x385e05,\n\t0x2e40c7,\n\t0x27f609,\n\t0x2fc184,\n\t0x2feac5,\n\t0x2ead45,\n\t0x2b5448,\n\t0x235685,\n\t0x2c0b89,\n\t0x2b16c7,\n\t0x2b16cb,\n\t0x261ac6,\n\t0x3a0b49,\n\t0x331b48,\n\t0x272885,\n\t0x342908,\n\t0x223c88,\n\t0x249b07,\n\t0x383b47,\n\t0x2885c9,\n\t0x237f87,\n\t0x27de09,\n\t0x29b88c,\n\t0x2a6b88,\n\t0x331009,\n\t0x360987,\n\t0x287f89,\n\t0x25b487,\n\t0x2d9748,\n\t0x3bed05,\n\t0x22b946,\n\t0x2c8888,\n\t0x30cf08,\n\t0x287449,\n\t0x385e47,\n\t0x278645,\n\t0x21f949,\n\t0x345306,\n\t0x2440c4,\n\t0x2440c6,\n\t0x35d048,\n\t0x254547,\n\t0x236a08,\n\t0x375049,\n\t0x3b1a07,\n\t0x2a56c6,\n\t0x377e84,\n\t0x205949,\n\t0x28c4c8,\n\t0x262107,\n\t0x2b56c6,\n\t0x236746,\n\t0x2bf144,\n\t0x241986,\n\t0x202003,\n\t0x34f109,\n\t0x201d46,\n\t0x3752c5,\n\t0x2a6086,\n\t0x2d79c5,\n\t0x286f08,\n\t0x37cf07,\n\t0x261e06,\n\t0x234d06,\n\t0x3059c8,\n\t0x2a8587,\n\t0x2a2705,\n\t0x2a3bc8,\n\t0x3bb748,\n\t0x31e3c8,\n\t0x3a0f85,\n\t0x22b9c6,\n\t0x325d09,\n\t0x2772c4,\n\t0x351d8b,\n\t0x22c68b,\n\t0x364cc9,\n\t0x200dc3,\n\t0x25efc5,\n\t0x21d306,\n\t0x3ba188,\n\t0x2fc804,\n\t0x2bfac6,\n\t0x2d9b89,\n\t0x2bc9c5,\n\t0x2e4006,\n\t0x2dddc6,\n\t0x220144,\n\t0x2af4ca,\n\t0x375208,\n\t0x30cf06,\n\t0x2cf245,\n\t0x3b8247,\n\t0x23d187,\n\t0x281904,\n\t0x22c8c7,\n\t0x2b6784,\n\t0x333b06,\n\t0x20cf43,\n\t0x271145,\n\t0x334f05,\n\t0x3beec8,\n\t0x2561c5,\n\t0x2825c9,\n\t0x274847,\n\t0x27484b,\n\t0x2aa04c,\n\t0x2aa64a,\n\t0x33f5c7,\n\t0x202e83,\n\t0x202e88,\n\t0x3a1145,\n\t0x29f585,\n\t0x2140c4,\n\t0x2d9646,\n\t0x257d46,\n\t0x2419c7,\n\t0x34d58b,\n\t0x233ac4,\n\t0x2e7644,\n\t0x2cbd04,\n\t0x2d3706,\n\t0x222a44,\n\t0x22b7c8,\n\t0x34b885,\n\t0x24fac5,\n\t0x365c87,\n\t0x377ec9,\n\t0x3534c5,\n\t0x38dcca,\n\t0x248b89,\n\t0x2911ca,\n\t0x3b0e49,\n\t0x310444,\n\t0x2bd4c5,\n\t0x2b9888,\n\t0x2f150b,\n\t0x35d785,\n\t0x33be86,\n\t0x236304,\n\t0x282b06,\n\t0x3b1889,\n\t0x2ed287,\n\t0x320e48,\n\t0x2a9506,\n\t0x2be507,\n\t0x287a88,\n\t0x3870c6,\n\t0x39b804,\n\t0x3743c7,\n\t0x376945,\n\t0x389b87,\n\t0x200104,\n\t0x333f86,\n\t0x2d5f48,\n\t0x29ec88,\n\t0x2e7007,\n\t0x27f988,\n\t0x29cdc5,\n\t0x213e44,\n\t0x3812c8,\n\t0x27fa84,\n\t0x220005,\n\t0x2ffbc4,\n\t0x30a547,\n\t0x290c87,\n\t0x2880c8,\n\t0x2d7a06,\n\t0x256145,\n\t0x2823c8,\n\t0x39ca88,\n\t0x2a7d49,\n\t0x22c986,\n\t0x23a048,\n\t0x20088a,\n\t0x388fc8,\n\t0x2ec085,\n\t0x349286,\n\t0x248a48,\n\t0x20778a,\n\t0x226047,\n\t0x28ee45,\n\t0x29ad48,\n\t0x2c2404,\n\t0x272486,\n\t0x2c9a48,\n\t0x213ec6,\n\t0x20b308,\n\t0x296e87,\n\t0x211486,\n\t0x2bccc4,\n\t0x364707,\n\t0x2b8e84,\n\t0x3b1847,\n\t0x2a064d,\n\t0x288805,\n\t0x2d4dcb,\n\t0x2285c6,\n\t0x257808,\n\t0x24c384,\n\t0x357746,\n\t0x2848c6,\n\t0x28c387,\n\t0x29e30d,\n\t0x24e587,\n\t0x2b93c8,\n\t0x278705,\n\t0x276e08,\n\t0x2ccb86,\n\t0x29ce48,\n\t0x22ab46,\n\t0x25a707,\n\t0x39ae89,\n\t0x36ebc7,\n\t0x28fe48,\n\t0x27af45,\n\t0x222e08,\n\t0x219405,\n\t0x3ab545,\n\t0x3b10c5,\n\t0x23ef43,\n\t0x289144,\n\t0x26fa05,\n\t0x241489,\n\t0x3043c6,\n\t0x2ecf48,\n\t0x383905,\n\t0x2bb507,\n\t0x2ad54a,\n\t0x2e3f49,\n\t0x2d348a,\n\t0x2da308,\n\t0x22d6cc,\n\t0x288e4d,\n\t0x301bc3,\n\t0x20b208,\n\t0x209445,\n\t0x28a946,\n\t0x36cec6,\n\t0x2ebb05,\n\t0x2237c9,\n\t0x20e1c5,\n\t0x2823c8,\n\t0x25fe06,\n\t0x35e006,\n\t0x2a8d09,\n\t0x39ed87,\n\t0x297e86,\n\t0x2ad4c8,\n\t0x333a88,\n\t0x2e4f47,\n\t0x2381ce,\n\t0x2ccdc5,\n\t0x332385,\n\t0x213dc8,\n\t0x20a247,\n\t0x200842,\n\t0x2c7504,\n\t0x28f90a,\n\t0x2621c8,\n\t0x389206,\n\t0x2a1408,\n\t0x2a6186,\n\t0x3337c8,\n\t0x2b3ac8,\n\t0x3ab504,\n\t0x2bba45,\n\t0x681384,\n\t0x681384,\n\t0x681384,\n\t0x201e03,\n\t0x2365c6,\n\t0x282886,\n\t0x2a508c,\n\t0x200943,\n\t0x223286,\n\t0x20cf04,\n\t0x33f888,\n\t0x2d99c5,\n\t0x28fa06,\n\t0x2c31c8,\n\t0x2db2c6,\n\t0x261d86,\n\t0x357d08,\n\t0x2d1887,\n\t0x237d49,\n\t0x2fa8ca,\n\t0x20a944,\n\t0x27df05,\n\t0x29a385,\n\t0x2f6c06,\n\t0x20ac06,\n\t0x2a5ac6,\n\t0x2ff206,\n\t0x237e84,\n\t0x237e8b,\n\t0x23c584,\n\t0x2a5245,\n\t0x2b2ac5,\n\t0x378246,\n\t0x2090c8,\n\t0x288d07,\n\t0x320c04,\n\t0x232fc3,\n\t0x2c1f05,\n\t0x311847,\n\t0x288c0b,\n\t0x3bedc7,\n\t0x2c30c8,\n\t0x2e7287,\n\t0x23d406,\n\t0x27a4c8,\n\t0x2b004b,\n\t0x345746,\n\t0x21d449,\n\t0x2b01c5,\n\t0x315603,\n\t0x2e4006,\n\t0x296d88,\n\t0x21f083,\n\t0x271e03,\n\t0x287a86,\n\t0x2a6186,\n\t0x36958a,\n\t0x283e85,\n\t0x28470b,\n\t0x2a5fcb,\n\t0x210a83,\n\t0x20b943,\n\t0x2b6704,\n\t0x2af6c7,\n\t0x296e04,\n\t0x277344,\n\t0x2e73c4,\n\t0x223e88,\n\t0x2cf188,\n\t0x205249,\n\t0x39e3c8,\n\t0x28b487,\n\t0x2381c6,\n\t0x2ecb8f,\n\t0x2ccf06,\n\t0x2d9944,\n\t0x2cefca,\n\t0x311747,\n\t0x208206,\n\t0x297509,\n\t0x2051c5,\n\t0x3bf005,\n\t0x205306,\n\t0x222f43,\n\t0x2c2449,\n\t0x2273c6,\n\t0x202d09,\n\t0x392a46,\n\t0x271145,\n\t0x2be0c5,\n\t0x204183,\n\t0x2af808,\n\t0x213887,\n\t0x201e44,\n\t0x33f708,\n\t0x2ffe04,\n\t0x2f0486,\n\t0x296706,\n\t0x248fc6,\n\t0x2d1549,\n\t0x29f505,\n\t0x388bc6,\n\t0x2666c9,\n\t0x2cb906,\n\t0x223f06,\n\t0x397346,\n\t0x21ce85,\n\t0x2ffbc6,\n\t0x25a704,\n\t0x3bed05,\n\t0x2c8884,\n\t0x2b9f86,\n\t0x334104,\n\t0x2136c3,\n\t0x28e745,\n\t0x23dac8,\n\t0x262987,\n\t0x2c1ac9,\n\t0x28ed48,\n\t0x29fb51,\n\t0x2dde4a,\n\t0x2fcc07,\n\t0x25a986,\n\t0x20cf04,\n\t0x2c8988,\n\t0x233fc8,\n\t0x29fd0a,\n\t0x2c094d,\n\t0x2a7f06,\n\t0x357e06,\n\t0x3647c6,\n\t0x24fac7,\n\t0x2b9485,\n\t0x210187,\n\t0x20cdc5,\n\t0x2b1804,\n\t0x2ae086,\n\t0x241807,\n\t0x2c214d,\n\t0x248987,\n\t0x3a4cc8,\n\t0x2826c9,\n\t0x349186,\n\t0x2a0905,\n\t0x22dcc4,\n\t0x35d146,\n\t0x281806,\n\t0x262346,\n\t0x2a1c88,\n\t0x21cd43,\n\t0x20aa83,\n\t0x338e45,\n\t0x207b06,\n\t0x2b3a85,\n\t0x2a9708,\n\t0x2a48ca,\n\t0x3a2dc4,\n\t0x33f888,\n\t0x29dd08,\n\t0x378087,\n\t0x3839c9,\n\t0x2c2dc8,\n\t0x2a6d07,\n\t0x2957c6,\n\t0x213eca,\n\t0x35d1c8,\n\t0x2f8589,\n\t0x292c88,\n\t0x229b89,\n\t0x2e8747,\n\t0x33bdc5,\n\t0x36ad46,\n\t0x2a9c48,\n\t0x287c08,\n\t0x29de88,\n\t0x2fcdc8,\n\t0x2a5245,\n\t0x218944,\n\t0x213588,\n\t0x24b384,\n\t0x3b0c44,\n\t0x271145,\n\t0x299ac7,\n\t0x377c89,\n\t0x28c187,\n\t0x2008c5,\n\t0x27f206,\n\t0x363686,\n\t0x200b84,\n\t0x2a9046,\n\t0x255f84,\n\t0x276d06,\n\t0x377a46,\n\t0x21eec6,\n\t0x2016c5,\n\t0x2a95c7,\n\t0x202e83,\n\t0x21dd89,\n\t0x3057c8,\n\t0x2f6d04,\n\t0x2f6d0d,\n\t0x29ed88,\n\t0x2d7248,\n\t0x2f8506,\n\t0x39af89,\n\t0x2e3f49,\n\t0x3b1585,\n\t0x2a49ca,\n\t0x2edbca,\n\t0x2a5ccc,\n\t0x2a5e46,\n\t0x27fe06,\n\t0x2cd086,\n\t0x2c84c9,\n\t0x28ab86,\n\t0x2101c6,\n\t0x20e286,\n\t0x274a08,\n\t0x27f986,\n\t0x2d92cb,\n\t0x299c45,\n\t0x24fac5,\n\t0x280085,\n\t0x39be46,\n\t0x213e83,\n\t0x248f46,\n\t0x248907,\n\t0x2c8845,\n\t0x24d545,\n\t0x3410c5,\n\t0x313846,\n\t0x204dc4,\n\t0x385806,\n\t0x284049,\n\t0x39bccc,\n\t0x2b1548,\n\t0x23e844,\n\t0x2ff8c6,\n\t0x2286c6,\n\t0x296d88,\n\t0x220148,\n\t0x39bbc9,\n\t0x3b8247,\n\t0x260c89,\n\t0x255806,\n\t0x237404,\n\t0x214944,\n\t0x20a584,\n\t0x287a88,\n\t0x377aca,\n\t0x353446,\n\t0x35fb87,\n\t0x37e787,\n\t0x3a0c45,\n\t0x29a344,\n\t0x295046,\n\t0x2b94c6,\n\t0x202bc3,\n\t0x305607,\n\t0x2507c8,\n\t0x3b16ca,\n\t0x2d4708,\n\t0x28a688,\n\t0x334145,\n\t0x352b85,\n\t0x284dc5,\n\t0x3a1006,\n\t0x2393c6,\n\t0x25b285,\n\t0x34f349,\n\t0x29a14c,\n\t0x284e87,\n\t0x29fd88,\n\t0x24ee05,\n\t0x681384,\n\t0x240ac4,\n\t0x25d1c4,\n\t0x217946,\n\t0x2a728e,\n\t0x3bf087,\n\t0x24fcc5,\n\t0x27724c,\n\t0x2ffcc7,\n\t0x241787,\n\t0x274e89,\n\t0x2208c9,\n\t0x28ee45,\n\t0x3057c8,\n\t0x325d09,\n\t0x31e285,\n\t0x2c8788,\n\t0x227546,\n\t0x381546,\n\t0x2e2dc4,\n\t0x25ff08,\n\t0x248743,\n\t0x235e44,\n\t0x2c1f85,\n\t0x204dc7,\n\t0x21b4c5,\n\t0x200749,\n\t0x27e64d,\n\t0x2935c6,\n\t0x229b04,\n\t0x2958c8,\n\t0x27f44a,\n\t0x21da87,\n\t0x243905,\n\t0x235e83,\n\t0x2a618e,\n\t0x2af90c,\n\t0x2f1f87,\n\t0x2a7447,\n\t0x200143,\n\t0x28abc5,\n\t0x25d1c5,\n\t0x2a17c8,\n\t0x29db49,\n\t0x23e746,\n\t0x296e04,\n\t0x2fcb46,\n\t0x3650cb,\n\t0x2e3ccc,\n\t0x376447,\n\t0x2d9585,\n\t0x3bb648,\n\t0x2e4d05,\n\t0x2cefc7,\n\t0x2ddc87,\n\t0x248745,\n\t0x213e83,\n\t0x3b36c4,\n\t0x21b705,\n\t0x2fc085,\n\t0x2fc086,\n\t0x2821c8,\n\t0x241807,\n\t0x36d1c6,\n\t0x25b686,\n\t0x3b1006,\n\t0x2f88c9,\n\t0x28c747,\n\t0x262606,\n\t0x2e3e46,\n\t0x27e106,\n\t0x2af405,\n\t0x21e8c6,\n\t0x390e05,\n\t0x235708,\n\t0x2990cb,\n\t0x294b86,\n\t0x37e7c4,\n\t0x2c8109,\n\t0x274844,\n\t0x2274c8,\n\t0x2441c7,\n\t0x289b84,\n\t0x2c2688,\n\t0x2c94c4,\n\t0x2af444,\n\t0x39ac45,\n\t0x330a46,\n\t0x223dc7,\n\t0x20b3c3,\n\t0x2a5785,\n\t0x32a504,\n\t0x3323c6,\n\t0x3b1608,\n\t0x39c785,\n\t0x298d89,\n\t0x21fb45,\n\t0x223288,\n\t0x22cfc7,\n\t0x398048,\n\t0x2c1907,\n\t0x2fe589,\n\t0x271846,\n\t0x360486,\n\t0x20e284,\n\t0x295705,\n\t0x3093cc,\n\t0x280087,\n\t0x280fc7,\n\t0x37e648,\n\t0x2935c6,\n\t0x2794c4,\n\t0x34bc04,\n\t0x288449,\n\t0x2cd186,\n\t0x253707,\n\t0x2cff84,\n\t0x24ab06,\n\t0x35f245,\n\t0x2d7547,\n\t0x2d9246,\n\t0x2594c9,\n\t0x2eda07,\n\t0x26f5c7,\n\t0x2a8b86,\n\t0x24aa45,\n\t0x285988,\n\t0x227248,\n\t0x2f6a46,\n\t0x39c7c5,\n\t0x344806,\n\t0x202c03,\n\t0x2a1649,\n\t0x2a584e,\n\t0x2c1608,\n\t0x2fff08,\n\t0x2f684b,\n\t0x298fc6,\n\t0x20a884,\n\t0x261d84,\n\t0x2a594a,\n\t0x21e107,\n\t0x2626c5,\n\t0x21d449,\n\t0x2c7205,\n\t0x3b0c87,\n\t0x250584,\n\t0x27b907,\n\t0x30fdc8,\n\t0x2d0f06,\n\t0x365489,\n\t0x2c2eca,\n\t0x21e086,\n\t0x29e8c6,\n\t0x2b2a45,\n\t0x38ef85,\n\t0x325647,\n\t0x24ec48,\n\t0x35f188,\n\t0x3ab506,\n\t0x2be145,\n\t0x20a98e,\n\t0x2bb884,\n\t0x2a1745,\n\t0x27eb89,\n\t0x2ed608,\n\t0x292e86,\n\t0x2a36cc,\n\t0x2a44d0,\n\t0x2a6ecf,\n\t0x2a8308,\n\t0x33f5c7,\n\t0x2016c5,\n\t0x26fa05,\n\t0x389089,\n\t0x29af49,\n\t0x23fac6,\n\t0x35d807,\n\t0x2b8545,\n\t0x2b43c9,\n\t0x3528c6,\n\t0x28a9cd,\n\t0x288789,\n\t0x277344,\n\t0x2c1388,\n\t0x213649,\n\t0x353606,\n\t0x27f305,\n\t0x360486,\n\t0x320d09,\n\t0x281688,\n\t0x217e05,\n\t0x200984,\n\t0x2a388b,\n\t0x3534c5,\n\t0x2a39c6,\n\t0x289186,\n\t0x26e646,\n\t0x27c18b,\n\t0x298e89,\n\t0x25b5c5,\n\t0x397e07,\n\t0x2dddc6,\n\t0x34dec6,\n\t0x25cf48,\n\t0x330b49,\n\t0x3a4a8c,\n\t0x311648,\n\t0x23c586,\n\t0x329e83,\n\t0x28bf46,\n\t0x27bfc5,\n\t0x284a48,\n\t0x2bdb46,\n\t0x2d7788,\n\t0x251b05,\n\t0x283245,\n\t0x27a8c8,\n\t0x333947,\n\t0x36ce07,\n\t0x2419c7,\n\t0x34dd48,\n\t0x39ad08,\n\t0x31a706,\n\t0x2b9dc7,\n\t0x273f47,\n\t0x27be8a,\n\t0x20d703,\n\t0x39be46,\n\t0x23e985,\n\t0x28f904,\n\t0x2826c9,\n\t0x2fe504,\n\t0x262a04,\n\t0x2a4d44,\n\t0x2a744b,\n\t0x2137c7,\n\t0x20abc5,\n\t0x29cac8,\n\t0x27f206,\n\t0x27f208,\n\t0x283dc6,\n\t0x293345,\n\t0x293e85,\n\t0x295f46,\n\t0x296b48,\n\t0x297448,\n\t0x282886,\n\t0x29c90f,\n\t0x2a1110,\n\t0x208605,\n\t0x202e83,\n\t0x2374c5,\n\t0x315788,\n\t0x29ae49,\n\t0x31e3c8,\n\t0x2f8748,\n\t0x2bec08,\n\t0x213887,\n\t0x27eec9,\n\t0x2d7988,\n\t0x2730c4,\n\t0x2a4bc8,\n\t0x2b5509,\n\t0x2babc7,\n\t0x2a2644,\n\t0x28c248,\n\t0x2a938a,\n\t0x3085c6,\n\t0x2a7f06,\n\t0x22c849,\n\t0x2a4707,\n\t0x2d4588,\n\t0x2fdbc8,\n\t0x2cfe08,\n\t0x3690c5,\n\t0x38ff05,\n\t0x24fac5,\n\t0x25d185,\n\t0x38cb87,\n\t0x213e85,\n\t0x2c8845,\n\t0x20ae06,\n\t0x31e307,\n\t0x2f1447,\n\t0x2a9686,\n\t0x2da845,\n\t0x2a39c6,\n\t0x202f45,\n\t0x2b83c8,\n\t0x2f1e04,\n\t0x2cb986,\n\t0x348084,\n\t0x2b9048,\n\t0x2cba8a,\n\t0x28300c,\n\t0x34d785,\n\t0x24fb86,\n\t0x3a4c46,\n\t0x234b86,\n\t0x23c604,\n\t0x35f505,\n\t0x283c07,\n\t0x2a4789,\n\t0x2d3c07,\n\t0x681384,\n\t0x681384,\n\t0x320a85,\n\t0x38d584,\n\t0x2a308a,\n\t0x27f086,\n\t0x27a704,\n\t0x208185,\n\t0x3875c5,\n\t0x2b93c4,\n\t0x288dc7,\n\t0x21fac7,\n\t0x2d3708,\n\t0x342348,\n\t0x217e09,\n\t0x2a5308,\n\t0x2a324b,\n\t0x251044,\n\t0x375f45,\n\t0x2860c5,\n\t0x241949,\n\t0x330b49,\n\t0x2c8008,\n\t0x243f48,\n\t0x2df044,\n\t0x228705,\n\t0x202d43,\n\t0x2f6bc5,\n\t0x388c46,\n\t0x29d98c,\n\t0x2189c6,\n\t0x37cfc6,\n\t0x293105,\n\t0x3138c8,\n\t0x2c1786,\n\t0x25ab06,\n\t0x2a7f06,\n\t0x22e2cc,\n\t0x262504,\n\t0x3b114a,\n\t0x293048,\n\t0x29d7c7,\n\t0x32a406,\n\t0x23e807,\n\t0x2f2ec5,\n\t0x2b56c6,\n\t0x35c286,\n\t0x367cc7,\n\t0x262a44,\n\t0x30a645,\n\t0x27eb84,\n\t0x2b1887,\n\t0x27edc8,\n\t0x27fc8a,\n\t0x286907,\n\t0x375387,\n\t0x33f547,\n\t0x2e4e49,\n\t0x29d98a,\n\t0x2373c3,\n\t0x262945,\n\t0x20b343,\n\t0x2e7409,\n\t0x254ec8,\n\t0x23d2c7,\n\t0x31e4c9,\n\t0x227346,\n\t0x2042c8,\n\t0x33d785,\n\t0x39cb8a,\n\t0x2dbc89,\n\t0x276209,\n\t0x3a34c7,\n\t0x2340c9,\n\t0x21edc8,\n\t0x367e86,\n\t0x24fd48,\n\t0x21ce87,\n\t0x237f87,\n\t0x248b87,\n\t0x2d5dc8,\n\t0x2ff746,\n\t0x2a9145,\n\t0x283c07,\n\t0x29e3c8,\n\t0x348004,\n\t0x2d41c4,\n\t0x297d87,\n\t0x2b3e47,\n\t0x325b8a,\n\t0x367e06,\n\t0x35854a,\n\t0x2c7447,\n\t0x2bb647,\n\t0x358004,\n\t0x27dec4,\n\t0x2d7446,\n\t0x281b84,\n\t0x281b8c,\n\t0x203185,\n\t0x21ff89,\n\t0x265684,\n\t0x2b9485,\n\t0x27f3c8,\n\t0x22d245,\n\t0x204dc6,\n\t0x225f44,\n\t0x28f30a,\n\t0x2b25c6,\n\t0x2a424a,\n\t0x2b7887,\n\t0x236b45,\n\t0x222f45,\n\t0x3a0c8a,\n\t0x296cc5,\n\t0x2a7e06,\n\t0x24b384,\n\t0x2b6886,\n\t0x325705,\n\t0x2bdc06,\n\t0x2e700c,\n\t0x2d388a,\n\t0x2957c4,\n\t0x2381c6,\n\t0x2a4707,\n\t0x2d91c4,\n\t0x274a08,\n\t0x39e246,\n\t0x20a809,\n\t0x2baec9,\n\t0x2a6c89,\n\t0x351f46,\n\t0x21cf86,\n\t0x24fe87,\n\t0x34f288,\n\t0x21cd89,\n\t0x2137c7,\n\t0x29cc46,\n\t0x2be587,\n\t0x364685,\n\t0x2bb884,\n\t0x24fa47,\n\t0x274105,\n\t0x28f845,\n\t0x36c347,\n\t0x248608,\n\t0x3bb5c6,\n\t0x29f24d,\n\t0x2a19cf,\n\t0x2a5fcd,\n\t0x200904,\n\t0x23dbc6,\n\t0x2dc1c8,\n\t0x20e245,\n\t0x27c048,\n\t0x2499ca,\n\t0x277344,\n\t0x365646,\n\t0x33ae07,\n\t0x233ac7,\n\t0x2d1949,\n\t0x24fd05,\n\t0x2b93c4,\n\t0x2bb98a,\n\t0x2c2989,\n\t0x2341c7,\n\t0x272306,\n\t0x353606,\n\t0x228646,\n\t0x374486,\n\t0x2db94f,\n\t0x2dc089,\n\t0x27f986,\n\t0x233ec6,\n\t0x320289,\n\t0x2b9ec7,\n\t0x229403,\n\t0x22e446,\n\t0x2052c3,\n\t0x2eb9c8,\n\t0x2be3c7,\n\t0x2a8509,\n\t0x296588,\n\t0x36cf48,\n\t0x385f86,\n\t0x218909,\n\t0x398845,\n\t0x2b9f84,\n\t0x29a687,\n\t0x2c8545,\n\t0x200904,\n\t0x20ac88,\n\t0x202044,\n\t0x2b9c07,\n\t0x3749c6,\n\t0x2e7a85,\n\t0x292c88,\n\t0x3534cb,\n\t0x3778c7,\n\t0x3a0f06,\n\t0x2ccf84,\n\t0x348186,\n\t0x271145,\n\t0x274105,\n\t0x285709,\n\t0x2889c9,\n\t0x237fc4,\n\t0x238005,\n\t0x238205,\n\t0x39ca06,\n\t0x3058c8,\n\t0x2c6b86,\n\t0x25060b,\n\t0x36e4ca,\n\t0x2b8f85,\n\t0x293f06,\n\t0x3a2ac5,\n\t0x2e9dc5,\n\t0x2ad387,\n\t0x39c0c8,\n\t0x260c84,\n\t0x26be86,\n\t0x2974c6,\n\t0x21ef87,\n\t0x3155c4,\n\t0x2848c6,\n\t0x2427c5,\n\t0x2427c9,\n\t0x21b584,\n\t0x29a4c9,\n\t0x282886,\n\t0x2c8f48,\n\t0x238205,\n\t0x37e885,\n\t0x2bdc06,\n\t0x3a4989,\n\t0x2208c9,\n\t0x37d046,\n\t0x2ed708,\n\t0x277348,\n\t0x3a2a84,\n\t0x2bbcc4,\n\t0x2bbcc8,\n\t0x32e048,\n\t0x260d89,\n\t0x388bc6,\n\t0x2a7f06,\n\t0x3294cd,\n\t0x2bfac6,\n\t0x2d6b49,\n\t0x2dd5c5,\n\t0x205306,\n\t0x2102c8,\n\t0x326885,\n\t0x273f84,\n\t0x271145,\n\t0x2882c8,\n\t0x2a2e49,\n\t0x27ec44,\n\t0x333f86,\n\t0x22d10a,\n\t0x2f1e88,\n\t0x325d09,\n\t0x261f0a,\n\t0x31e446,\n\t0x2a1b88,\n\t0x2ced85,\n\t0x2c5ec8,\n\t0x2c1a05,\n\t0x227209,\n\t0x37ac49,\n\t0x203282,\n\t0x2b01c5,\n\t0x2782c6,\n\t0x2827c7,\n\t0x34e085,\n\t0x30ce06,\n\t0x326948,\n\t0x2935c6,\n\t0x2b9749,\n\t0x2810c6,\n\t0x25cdc8,\n\t0x2b0805,\n\t0x264906,\n\t0x25a808,\n\t0x287a88,\n\t0x2e8648,\n\t0x353788,\n\t0x21e8c4,\n\t0x281943,\n\t0x2b9984,\n\t0x286b06,\n\t0x3646c4,\n\t0x2ffe47,\n\t0x25aa09,\n\t0x2cbd05,\n\t0x2fdbc6,\n\t0x22e446,\n\t0x28200b,\n\t0x2b8ec6,\n\t0x2cf8c6,\n\t0x2d13c8,\n\t0x24d486,\n\t0x236943,\n\t0x2164c3,\n\t0x2bb884,\n\t0x239f45,\n\t0x387b87,\n\t0x27edc8,\n\t0x27edcf,\n\t0x283b0b,\n\t0x3056c8,\n\t0x334006,\n\t0x3059ce,\n\t0x251143,\n\t0x387b04,\n\t0x2b8e45,\n\t0x2b9246,\n\t0x29514b,\n\t0x299b86,\n\t0x222a49,\n\t0x2e7a85,\n\t0x3999c8,\n\t0x216688,\n\t0x22078c,\n\t0x2a7486,\n\t0x2f6c06,\n\t0x2dac05,\n\t0x28fc08,\n\t0x25a805,\n\t0x356288,\n\t0x2a3a4a,\n\t0x2a6409,\n\t0x681384,\n\t0x3b60f882,\n\t0x16fb88,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x28cac3,\n\t0x208e83,\n\t0x201a03,\n\t0x39c783,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x231604,\n\t0x208e83,\n\t0x201a03,\n\t0x213083,\n\t0x286644,\n\t0x238543,\n\t0x240244,\n\t0x23cac3,\n\t0x2de944,\n\t0x323043,\n\t0x34e347,\n\t0x28cac3,\n\t0x200e03,\n\t0x293408,\n\t0x201a03,\n\t0x29630b,\n\t0x2f3743,\n\t0x3a03c6,\n\t0x205082,\n\t0x22facb,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x201a03,\n\t0x220b83,\n\t0x201503,\n\t0x207102,\n\t0x16fb88,\n\t0x32d1c5,\n\t0x274188,\n\t0x2f9f88,\n\t0x20f882,\n\t0x20a605,\n\t0x3785c7,\n\t0x201842,\n\t0x24c5c7,\n\t0x207b02,\n\t0x2f6607,\n\t0x2cc409,\n\t0x2ce948,\n\t0x2cfc89,\n\t0x24b2c2,\n\t0x2707c7,\n\t0x37cdc4,\n\t0x378687,\n\t0x36e3c7,\n\t0x264d42,\n\t0x28cac3,\n\t0x214642,\n\t0x204d42,\n\t0x200442,\n\t0x21cc82,\n\t0x206b42,\n\t0x20d2c2,\n\t0x2aff05,\n\t0x240a05,\n\t0xf882,\n\t0x3cac3,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x28cac3,\n\t0x208e83,\n\t0x1a3443,\n\t0x201a03,\n\t0x170c3,\n\t0x8c1,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x231604,\n\t0x255783,\n\t0x208e83,\n\t0x1a3443,\n\t0x201a03,\n\t0x221f43,\n\t0x3e4f5906,\n\t0x42bc3,\n\t0x873c5,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x20f882,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x84c2,\n\t0x16fb88,\n\t0xe03,\n\t0x1a3443,\n\t0x4ec04,\n\t0xe5105,\n\t0x207102,\n\t0x39cdc4,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x38acc3,\n\t0x2b13c5,\n\t0x255783,\n\t0x211a83,\n\t0x208e83,\n\t0x21b543,\n\t0x201a03,\n\t0x215443,\n\t0x20e383,\n\t0x202443,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x20f882,\n\t0x201a03,\n\t0x16fb88,\n\t0x323043,\n\t0x1a3443,\n\t0x16fb88,\n\t0x1a3443,\n\t0x2bcc43,\n\t0x238543,\n\t0x23a844,\n\t0x23cac3,\n\t0x323043,\n\t0x205e82,\n\t0x28cac3,\n\t0x208e83,\n\t0x201a03,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x205e82,\n\t0x229443,\n\t0x208e83,\n\t0x201a03,\n\t0x2ef783,\n\t0x215443,\n\t0x207102,\n\t0x20f882,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x3a03c5,\n\t0xa4f06,\n\t0x286644,\n\t0x205082,\n\t0x16fb88,\n\t0x207102,\n\t0x25088,\n\t0x134943,\n\t0x20f882,\n\t0x42899306,\n\t0x6a04,\n\t0xb610b,\n\t0x44e86,\n\t0x8cbc7,\n\t0x23cac3,\n\t0x51648,\n\t0x323043,\n\t0x8b205,\n\t0x1493c4,\n\t0x227583,\n\t0x556c7,\n\t0xe06c4,\n\t0x208e83,\n\t0x1a3284,\n\t0x1a3443,\n\t0x201a03,\n\t0x2f4544,\n\t0xb5ec8,\n\t0x12cf06,\n\t0x16308,\n\t0x1252c5,\n\t0x9fc9,\n\t0x20f882,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x28cac3,\n\t0x200e03,\n\t0x201a03,\n\t0x2f3743,\n\t0x205082,\n\t0x16fb88,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x231603,\n\t0x21bf84,\n\t0x208e83,\n\t0xe03,\n\t0x201a03,\n\t0x238543,\n\t0x23cac3,\n\t0x2de944,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x3a03c6,\n\t0x23cac3,\n\t0x323043,\n\t0x18a783,\n\t0x201a03,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x8cbc7,\n\t0x16fb88,\n\t0x323043,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x45238543,\n\t0x23cac3,\n\t0x208e83,\n\t0x201a03,\n\t0x16fb88,\n\t0x207102,\n\t0x20f882,\n\t0x238543,\n\t0x323043,\n\t0x208e83,\n\t0x200442,\n\t0x201a03,\n\t0x31f1c7,\n\t0x342b8b,\n\t0x22fc83,\n\t0x244708,\n\t0x34f007,\n\t0x348746,\n\t0x382d45,\n\t0x232309,\n\t0x28c848,\n\t0x346789,\n\t0x346790,\n\t0x36f64b,\n\t0x2e2109,\n\t0x205dc3,\n\t0x20af09,\n\t0x23bd86,\n\t0x23bd8c,\n\t0x32d288,\n\t0x3bc208,\n\t0x244a49,\n\t0x29854e,\n\t0x2cc1cb,\n\t0x2e5c0c,\n\t0x203ec3,\n\t0x26ad0c,\n\t0x203ec9,\n\t0x30ae47,\n\t0x23ca0c,\n\t0x2b478a,\n\t0x252044,\n\t0x2768cd,\n\t0x26abc8,\n\t0x21308d,\n\t0x26fec6,\n\t0x28664b,\n\t0x200cc9,\n\t0x2cf787,\n\t0x332c86,\n\t0x3372c9,\n\t0x34834a,\n\t0x319108,\n\t0x2f3204,\n\t0x2fe987,\n\t0x363787,\n\t0x2d0184,\n\t0x38d204,\n\t0x2345c9,\n\t0x28a4c9,\n\t0x2b7288,\n\t0x216d05,\n\t0x339645,\n\t0x213c86,\n\t0x276789,\n\t0x249c4d,\n\t0x33bf88,\n\t0x213b87,\n\t0x382dc8,\n\t0x2fa686,\n\t0x39b444,\n\t0x2501c5,\n\t0x201c46,\n\t0x202884,\n\t0x203dc7,\n\t0x206f4a,\n\t0x219784,\n\t0x21dfc6,\n\t0x21ea49,\n\t0x21ea4f,\n\t0x21fc8d,\n\t0x220f06,\n\t0x224c90,\n\t0x225086,\n\t0x2257c7,\n\t0x2269c7,\n\t0x2269cf,\n\t0x2276c9,\n\t0x22cb06,\n\t0x22da47,\n\t0x22da48,\n\t0x22f289,\n\t0x358088,\n\t0x2eb507,\n\t0x212843,\n\t0x394f46,\n\t0x3c0b48,\n\t0x29880a,\n\t0x236089,\n\t0x205d83,\n\t0x3784c6,\n\t0x26bcca,\n\t0x28eb87,\n\t0x30ac8a,\n\t0x25a18e,\n\t0x227806,\n\t0x2b03c7,\n\t0x217bc6,\n\t0x203f86,\n\t0x38fd0b,\n\t0x31708a,\n\t0x32138d,\n\t0x21d047,\n\t0x20e408,\n\t0x20e409,\n\t0x20e40f,\n\t0x2c1c4c,\n\t0x2b4089,\n\t0x2d890e,\n\t0x34e44a,\n\t0x28b906,\n\t0x314a86,\n\t0x319d8c,\n\t0x31be8c,\n\t0x327508,\n\t0x36eac7,\n\t0x274d85,\n\t0x3485c4,\n\t0x20f88e,\n\t0x299684,\n\t0x388947,\n\t0x39140a,\n\t0x38a814,\n\t0x39390f,\n\t0x226b88,\n\t0x394e08,\n\t0x35eccd,\n\t0x35ecce,\n\t0x3a0849,\n\t0x238788,\n\t0x23878f,\n\t0x23c70c,\n\t0x23c70f,\n\t0x23d907,\n\t0x240c0a,\n\t0x2459cb,\n\t0x243788,\n\t0x245c87,\n\t0x3ac74d,\n\t0x322b46,\n\t0x276a86,\n\t0x248dc9,\n\t0x364b08,\n\t0x24cf48,\n\t0x24cf4e,\n\t0x2f4087,\n\t0x24e145,\n\t0x24e9c5,\n\t0x204b44,\n\t0x348a06,\n\t0x2b7188,\n\t0x20db03,\n\t0x2f948e,\n\t0x3acb08,\n\t0x2b588b,\n\t0x378bc7,\n\t0x3ab345,\n\t0x233d86,\n\t0x2b1f87,\n\t0x32f2c8,\n\t0x325449,\n\t0x322dc5,\n\t0x28e788,\n\t0x21c946,\n\t0x3afeca,\n\t0x20f789,\n\t0x23cac9,\n\t0x23cacb,\n\t0x346448,\n\t0x2d0049,\n\t0x216dc6,\n\t0x23768a,\n\t0x293c0a,\n\t0x240e0c,\n\t0x28e4c7,\n\t0x2ce74a,\n\t0x36b38b,\n\t0x36b399,\n\t0x312408,\n\t0x3a0445,\n\t0x2cdd46,\n\t0x25c489,\n\t0x3449c6,\n\t0x2df8ca,\n\t0x28ca46,\n\t0x20df44,\n\t0x2cdecd,\n\t0x20df47,\n\t0x218209,\n\t0x250ac5,\n\t0x250c08,\n\t0x251409,\n\t0x251844,\n\t0x251f47,\n\t0x251f48,\n\t0x2526c7,\n\t0x26e2c8,\n\t0x255cc7,\n\t0x25b845,\n\t0x25f3cc,\n\t0x25fc09,\n\t0x2c8c0a,\n\t0x39ec09,\n\t0x20b009,\n\t0x37ee4c,\n\t0x264f0b,\n\t0x2662c8,\n\t0x267448,\n\t0x26a804,\n\t0x289848,\n\t0x28d209,\n\t0x2b4847,\n\t0x20e646,\n\t0x200f47,\n\t0x2c4289,\n\t0x32264b,\n\t0x325147,\n\t0x201a87,\n\t0x2b79c7,\n\t0x213004,\n\t0x213005,\n\t0x2a7c05,\n\t0x34b1cb,\n\t0x3a9384,\n\t0x350448,\n\t0x26e94a,\n\t0x21ca07,\n\t0x300687,\n\t0x294712,\n\t0x276c06,\n\t0x23a1c6,\n\t0x33888e,\n\t0x27ab46,\n\t0x29abc8,\n\t0x29b38f,\n\t0x213448,\n\t0x302848,\n\t0x3bd10a,\n\t0x3bd111,\n\t0x2a990e,\n\t0x25654a,\n\t0x25654c,\n\t0x20bf07,\n\t0x238990,\n\t0x200408,\n\t0x2a9b05,\n\t0x2b238a,\n\t0x2028cc,\n\t0x29cf8d,\n\t0x302346,\n\t0x302347,\n\t0x30234c,\n\t0x30c80c,\n\t0x335d4c,\n\t0x2edfcb,\n\t0x28e0c4,\n\t0x22c9c4,\n\t0x354609,\n\t0x39e807,\n\t0x229989,\n\t0x293a49,\n\t0x3b6587,\n\t0x2b4606,\n\t0x2b4609,\n\t0x2b4a03,\n\t0x21b7ca,\n\t0x31fd07,\n\t0x34304b,\n\t0x32120a,\n\t0x2f6744,\n\t0x35f646,\n\t0x286b89,\n\t0x281a04,\n\t0x20324a,\n\t0x3a1205,\n\t0x2c4d45,\n\t0x2c4d4d,\n\t0x2c508e,\n\t0x2b9ac5,\n\t0x32ab86,\n\t0x39ffc7,\n\t0x25f64a,\n\t0x3a8286,\n\t0x2eefc4,\n\t0x2f9847,\n\t0x3bc50b,\n\t0x2fa747,\n\t0x30b444,\n\t0x256fc6,\n\t0x256fcd,\n\t0x2c3f4c,\n\t0x208d46,\n\t0x33c18a,\n\t0x230206,\n\t0x22ddc8,\n\t0x285107,\n\t0x34c98a,\n\t0x3840c6,\n\t0x210443,\n\t0x210446,\n\t0x3c09c8,\n\t0x2a344a,\n\t0x2801c7,\n\t0x2801c8,\n\t0x289e04,\n\t0x256ac7,\n\t0x283288,\n\t0x345388,\n\t0x284508,\n\t0x35874a,\n\t0x2e4505,\n\t0x2e9a07,\n\t0x256393,\n\t0x343d86,\n\t0x2e0908,\n\t0x229f89,\n\t0x24c488,\n\t0x38600b,\n\t0x2d3d48,\n\t0x2bc644,\n\t0x27a9c6,\n\t0x317ec6,\n\t0x330889,\n\t0x3bc3c7,\n\t0x25f4c8,\n\t0x2931c6,\n\t0x36c244,\n\t0x30aa05,\n\t0x2d4008,\n\t0x2cd88a,\n\t0x2cdb48,\n\t0x2d4b06,\n\t0x2a1d8a,\n\t0x2fc208,\n\t0x2d8fc8,\n\t0x2d9ec8,\n\t0x2da506,\n\t0x2dc3c6,\n\t0x20c0cc,\n\t0x2dc990,\n\t0x285505,\n\t0x213248,\n\t0x30d410,\n\t0x213250,\n\t0x34660e,\n\t0x20bd4e,\n\t0x20bd54,\n\t0x20e78f,\n\t0x20eb46,\n\t0x3072d1,\n\t0x332e13,\n\t0x333288,\n\t0x31d245,\n\t0x2a0bc8,\n\t0x395705,\n\t0x23540c,\n\t0x2309c9,\n\t0x2994c9,\n\t0x230e47,\n\t0x263549,\n\t0x261047,\n\t0x2ffa46,\n\t0x24ffc7,\n\t0x20ef05,\n\t0x217103,\n\t0x20dcc9,\n\t0x22a249,\n\t0x38a783,\n\t0x3b35c4,\n\t0x358c8d,\n\t0x3b83cf,\n\t0x36c285,\n\t0x331786,\n\t0x21ac47,\n\t0x32d007,\n\t0x290806,\n\t0x29080b,\n\t0x2aa805,\n\t0x263c06,\n\t0x300b87,\n\t0x257449,\n\t0x345a06,\n\t0x20cb45,\n\t0x2248cb,\n\t0x230786,\n\t0x38ad45,\n\t0x273988,\n\t0x2a6988,\n\t0x2ba50c,\n\t0x2ba510,\n\t0x2b64c9,\n\t0x2c5607,\n\t0x2e520b,\n\t0x30be86,\n\t0x2eb3ca,\n\t0x2ec90b,\n\t0x2ee70a,\n\t0x2ee986,\n\t0x2ef645,\n\t0x31fa46,\n\t0x37d408,\n\t0x230f0a,\n\t0x35e95c,\n\t0x2f380c,\n\t0x2f3b08,\n\t0x3a03c5,\n\t0x35cec7,\n\t0x25b0c6,\n\t0x27f7c5,\n\t0x2227c6,\n\t0x2909c8,\n\t0x2c2c07,\n\t0x298448,\n\t0x2b04ca,\n\t0x33764c,\n\t0x3378c9,\n\t0x39b5c7,\n\t0x215c04,\n\t0x24ea86,\n\t0x2d518a,\n\t0x293b45,\n\t0x211ecc,\n\t0x212e48,\n\t0x389c88,\n\t0x21904c,\n\t0x2266cc,\n\t0x229549,\n\t0x229787,\n\t0x23ff4c,\n\t0x2454c4,\n\t0x24718a,\n\t0x23354c,\n\t0x279a4b,\n\t0x24bfcb,\n\t0x3821c6,\n\t0x2f7447,\n\t0x20e947,\n\t0x238bcf,\n\t0x303191,\n\t0x2e16d2,\n\t0x314ecd,\n\t0x314ece,\n\t0x31520e,\n\t0x20e948,\n\t0x20e952,\n\t0x253e08,\n\t0x34ec47,\n\t0x25430a,\n\t0x208b08,\n\t0x27ab05,\n\t0x38c9ca,\n\t0x2255c7,\n\t0x2e6f44,\n\t0x227103,\n\t0x297185,\n\t0x3bd387,\n\t0x2fb547,\n\t0x29d18e,\n\t0x308c8d,\n\t0x30d7c9,\n\t0x21f545,\n\t0x31c443,\n\t0x326446,\n\t0x264085,\n\t0x27dc48,\n\t0x2c0649,\n\t0x2a0105,\n\t0x3ac94f,\n\t0x2b6207,\n\t0x382bc5,\n\t0x37958a,\n\t0x358946,\n\t0x2522c9,\n\t0x37db4c,\n\t0x2fec09,\n\t0x2094c6,\n\t0x26e74c,\n\t0x329f86,\n\t0x3017c8,\n\t0x301c86,\n\t0x312586,\n\t0x2082c4,\n\t0x266643,\n\t0x2b380a,\n\t0x32e411,\n\t0x30650a,\n\t0x265345,\n\t0x271ac7,\n\t0x25c7c7,\n\t0x283384,\n\t0x28338b,\n\t0x2cfb08,\n\t0x2c1486,\n\t0x37e6c5,\n\t0x3b01c4,\n\t0x280ac9,\n\t0x320804,\n\t0x24cd87,\n\t0x359f05,\n\t0x359f07,\n\t0x338ac5,\n\t0x2affc3,\n\t0x34eb08,\n\t0x35f2ca,\n\t0x20b3c3,\n\t0x32d20a,\n\t0x281ec6,\n\t0x3ac6cf,\n\t0x2f4009,\n\t0x2f9410,\n\t0x2ebe48,\n\t0x2d5809,\n\t0x29f087,\n\t0x256f4f,\n\t0x31e884,\n\t0x2de9c4,\n\t0x224f06,\n\t0x317b06,\n\t0x2e2aca,\n\t0x381c46,\n\t0x2ff587,\n\t0x30c148,\n\t0x30c347,\n\t0x30cbc7,\n\t0x30f08a,\n\t0x310b4b,\n\t0x3b1b45,\n\t0x2e1308,\n\t0x204443,\n\t0x2045cc,\n\t0x38000f,\n\t0x274b8d,\n\t0x2aefc7,\n\t0x30d909,\n\t0x2e8207,\n\t0x24f2c8,\n\t0x38aa0c,\n\t0x2bc548,\n\t0x231848,\n\t0x321d0e,\n\t0x336054,\n\t0x336564,\n\t0x354e4a,\n\t0x37018b,\n\t0x261104,\n\t0x261109,\n\t0x3656c8,\n\t0x24ef85,\n\t0x20d60a,\n\t0x3acd47,\n\t0x31f944,\n\t0x39c783,\n\t0x238543,\n\t0x240244,\n\t0x23cac3,\n\t0x323043,\n\t0x231604,\n\t0x255783,\n\t0x28cac3,\n\t0x20c0c6,\n\t0x21bf84,\n\t0x208e83,\n\t0x201a03,\n\t0x221483,\n\t0x207102,\n\t0x39c783,\n\t0x20f882,\n\t0x238543,\n\t0x240244,\n\t0x23cac3,\n\t0x323043,\n\t0x255783,\n\t0x20c0c6,\n\t0x208e83,\n\t0x201a03,\n\t0x16fb88,\n\t0x238543,\n\t0x23cac3,\n\t0x21b583,\n\t0x208e83,\n\t0x1a3443,\n\t0x201a03,\n\t0x16fb88,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x28cac3,\n\t0x21bf84,\n\t0x208e83,\n\t0x201a03,\n\t0x207102,\n\t0x242043,\n\t0x20f882,\n\t0x23cac3,\n\t0x323043,\n\t0x28cac3,\n\t0x208e83,\n\t0x201a03,\n\t0x201382,\n\t0x235f42,\n\t0x20f882,\n\t0x238543,\n\t0x206902,\n\t0x200942,\n\t0x231604,\n\t0x20f644,\n\t0x22a482,\n\t0x21bf84,\n\t0x200442,\n\t0x201a03,\n\t0x221483,\n\t0x3821c6,\n\t0x21a902,\n\t0x202642,\n\t0x20c4c2,\n\t0x47a13443,\n\t0x47e0bf03,\n\t0x5d306,\n\t0x5d306,\n\t0x286644,\n\t0x200e03,\n\t0x14b700a,\n\t0x12ea0c,\n\t0xf4cc,\n\t0x871cd,\n\t0x131645,\n\t0x26547,\n\t0x1b1c6,\n\t0x21088,\n\t0x23087,\n\t0x28b08,\n\t0x1aa20a,\n\t0x1397c7,\n\t0x48adf485,\n\t0x1359c9,\n\t0x3e34b,\n\t0x35dcb,\n\t0x42e48,\n\t0x172f4a,\n\t0x9288e,\n\t0x144c28b,\n\t0x6a04,\n\t0x63d46,\n\t0x7588,\n\t0xf8d08,\n\t0x3e607,\n\t0x1a787,\n\t0x57f89,\n\t0x81a87,\n\t0xdd088,\n\t0x12f5c9,\n\t0x49804,\n\t0x49f45,\n\t0x12bfce,\n\t0xb084d,\n\t0x8ca48,\n\t0x48e34406,\n\t0x49834408,\n\t0x7b548,\n\t0x11f3d0,\n\t0x5998c,\n\t0x6b9c7,\n\t0x6c647,\n\t0x71387,\n\t0x77fc7,\n\t0x13c42,\n\t0x144ec7,\n\t0x11724c,\n\t0x43b87,\n\t0xac206,\n\t0xac7c9,\n\t0xae208,\n\t0x206c2,\n\t0x942,\n\t0xbee8b,\n\t0x1a3307,\n\t0x18009,\n\t0x164ec9,\n\t0x3ef48,\n\t0xb8042,\n\t0x134649,\n\t0xcc60a,\n\t0xd2689,\n\t0xdfdc9,\n\t0xe0b08,\n\t0xe1b87,\n\t0xe4489,\n\t0xe61c5,\n\t0xe67d0,\n\t0x191646,\n\t0x11205,\n\t0x31e8d,\n\t0x235c6,\n\t0xefd07,\n\t0xf4558,\n\t0x14f508,\n\t0xc74a,\n\t0xb282,\n\t0x5524d,\n\t0xa02,\n\t0x86286,\n\t0x95408,\n\t0x8f148,\n\t0x16fa49,\n\t0x586c8,\n\t0x6420e,\n\t0x126447,\n\t0x1051cd,\n\t0xfb445,\n\t0x144c48,\n\t0x19fc08,\n\t0x106046,\n\t0xc2,\n\t0x12cf06,\n\t0x4542,\n\t0x341,\n\t0x65a07,\n\t0xf6fc3,\n\t0x492f4dc4,\n\t0x4969c243,\n\t0x141,\n\t0x19d06,\n\t0x141,\n\t0x1,\n\t0x19d06,\n\t0xf6fc3,\n\t0x1402285,\n\t0x252044,\n\t0x238543,\n\t0x253384,\n\t0x231604,\n\t0x208e83,\n\t0x229e45,\n\t0x221f43,\n\t0x20c843,\n\t0x355685,\n\t0x202443,\n\t0x4aa38543,\n\t0x23cac3,\n\t0x323043,\n\t0x200041,\n\t0x28cac3,\n\t0x20f644,\n\t0x21bf84,\n\t0x208e83,\n\t0x201a03,\n\t0x215443,\n\t0x16fb88,\n\t0x207102,\n\t0x39c783,\n\t0x20f882,\n\t0x238543,\n\t0x23cac3,\n\t0x21b583,\n\t0x200942,\n\t0x231604,\n\t0x255783,\n\t0x28cac3,\n\t0x208e83,\n\t0x200e03,\n\t0x201a03,\n\t0x202443,\n\t0x16fb88,\n\t0x37fd82,\n\t0x18c1c7,\n\t0xf882,\n\t0x10a985,\n\t0x1480cc8,\n\t0x10c50e,\n\t0x4ba0ab02,\n\t0x31fec8,\n\t0x2bdd86,\n\t0x2ca186,\n\t0x2bd707,\n\t0x4be00b42,\n\t0x4c3ac548,\n\t0x21870a,\n\t0x26b448,\n\t0x200242,\n\t0x31fb49,\n\t0x3b1b87,\n\t0x21ec06,\n\t0x34e849,\n\t0x2e9b44,\n\t0x348646,\n\t0x2ca584,\n\t0x27f584,\n\t0x25f009,\n\t0x32d906,\n\t0x240ac5,\n\t0x297a85,\n\t0x3b9d87,\n\t0x2c76c7,\n\t0x2979c4,\n\t0x2bd946,\n\t0x307b85,\n\t0x30a3c5,\n\t0x3a2a05,\n\t0x339407,\n\t0x378a05,\n\t0x31ddc9,\n\t0x234fc5,\n\t0x32f404,\n\t0x3a81c7,\n\t0x341b0e,\n\t0x306bc9,\n\t0x338749,\n\t0x388d86,\n\t0x24a608,\n\t0x36ae4b,\n\t0x2b698c,\n\t0x33ea46,\n\t0x2e5ac7,\n\t0x212245,\n\t0x38d20a,\n\t0x2b7389,\n\t0x209b49,\n\t0x259f06,\n\t0x300945,\n\t0x2edac5,\n\t0x3570c9,\n\t0x3a2b8b,\n\t0x27e286,\n\t0x3471c6,\n\t0x20de04,\n\t0x2943c6,\n\t0x24e1c8,\n\t0x3c0846,\n\t0x215006,\n\t0x205fc8,\n\t0x2092c7,\n\t0x209909,\n\t0x211385,\n\t0x16fb88,\n\t0x21a704,\n\t0x2394c4,\n\t0x201105,\n\t0x3a6649,\n\t0x228f87,\n\t0x228f8b,\n\t0x22b3ca,\n\t0x230905,\n\t0x4c612842,\n\t0x342f07,\n\t0x4ca30c08,\n\t0x3578c7,\n\t0x2c3d45,\n\t0x209dca,\n\t0xf882,\n\t0x2be6cb,\n\t0x255e0a,\n\t0x22a146,\n\t0x216383,\n\t0x2a038d,\n\t0x3572cc,\n\t0x357a4d,\n\t0x250545,\n\t0x334fc5,\n\t0x20db47,\n\t0x36c689,\n\t0x218606,\n\t0x381ac5,\n\t0x2d2b88,\n\t0x2942c3,\n\t0x2fa288,\n\t0x2942c8,\n\t0x2cb287,\n\t0x314808,\n\t0x3b49c9,\n\t0x374847,\n\t0x342707,\n\t0x202108,\n\t0x2d1c84,\n\t0x2d1c87,\n\t0x26fdc8,\n\t0x355546,\n\t0x3b874f,\n\t0x226207,\n\t0x2eb686,\n\t0x2298c5,\n\t0x22a8c3,\n\t0x381947,\n\t0x37cc43,\n\t0x252886,\n\t0x254006,\n\t0x254706,\n\t0x298b85,\n\t0x26e2c3,\n\t0x397cc8,\n\t0x37f889,\n\t0x3920cb,\n\t0x254888,\n\t0x255985,\n\t0x2584c5,\n\t0x4cef6802,\n\t0x250089,\n\t0x34eec7,\n\t0x263c85,\n\t0x25ef07,\n\t0x260506,\n\t0x374345,\n\t0x263ecb,\n\t0x2662c4,\n\t0x26b005,\n\t0x26b147,\n\t0x27db86,\n\t0x27e045,\n\t0x289a47,\n\t0x28a187,\n\t0x2d5104,\n\t0x291b8a,\n\t0x292048,\n\t0x2cee09,\n\t0x2a0f05,\n\t0x3bf1c6,\n\t0x24e38a,\n\t0x2be906,\n\t0x26f2c7,\n\t0x2ceacd,\n\t0x2aa349,\n\t0x396fc5,\n\t0x339f07,\n\t0x333448,\n\t0x25a5c8,\n\t0x332847,\n\t0x358246,\n\t0x21cb87,\n\t0x253c43,\n\t0x34b1c4,\n\t0x371cc5,\n\t0x39d947,\n\t0x3a2409,\n\t0x231b08,\n\t0x34cbc5,\n\t0x23bac4,\n\t0x254a45,\n\t0x256c4d,\n\t0x2006c2,\n\t0x230386,\n\t0x2861c6,\n\t0x2e654a,\n\t0x3904c6,\n\t0x39ab85,\n\t0x342445,\n\t0x342447,\n\t0x3afd0c,\n\t0x27b3ca,\n\t0x294086,\n\t0x28ad05,\n\t0x294206,\n\t0x294547,\n\t0x296886,\n\t0x298a8c,\n\t0x34e989,\n\t0x4d21a187,\n\t0x29b745,\n\t0x29b746,\n\t0x29bcc8,\n\t0x246f85,\n\t0x2ab085,\n\t0x2ab808,\n\t0x2aba0a,\n\t0x4d6335c2,\n\t0x4da14d02,\n\t0x2e76c5,\n\t0x2eb603,\n\t0x243408,\n\t0x252403,\n\t0x2abc84,\n\t0x25240b,\n\t0x36b208,\n\t0x2daa48,\n\t0x4df3b049,\n\t0x2afc09,\n\t0x2b0746,\n\t0x2b1c08,\n\t0x2b1e09,\n\t0x2b2886,\n\t0x2b2a05,\n\t0x3944c6,\n\t0x2b2f49,\n\t0x389347,\n\t0x2647c6,\n\t0x2de087,\n\t0x218487,\n\t0x2dd9c4,\n\t0x4e34f809,\n\t0x2d32c8,\n\t0x3ac448,\n\t0x3932c7,\n\t0x2cd346,\n\t0x36c489,\n\t0x2ca847,\n\t0x32598a,\n\t0x358388,\n\t0x208387,\n\t0x208f86,\n\t0x271d8a,\n\t0x26fbc8,\n\t0x2ed485,\n\t0x230685,\n\t0x2ef1c7,\n\t0x311cc9,\n\t0x30150b,\n\t0x31a308,\n\t0x235049,\n\t0x254c87,\n\t0x2bd04c,\n\t0x2bfccc,\n\t0x2bffca,\n\t0x2c024c,\n\t0x2ca108,\n\t0x2ca308,\n\t0x2ca504,\n\t0x2caa09,\n\t0x2cac49,\n\t0x2cae8a,\n\t0x2cb109,\n\t0x2cb447,\n\t0x3ba98c,\n\t0x23f586,\n\t0x2cbf88,\n\t0x2be9c6,\n\t0x387486,\n\t0x396ec7,\n\t0x306dc8,\n\t0x3445cb,\n\t0x28e307,\n\t0x250289,\n\t0x350b89,\n\t0x253507,\n\t0x2771c4,\n\t0x271c07,\n\t0x2fda46,\n\t0x21d8c6,\n\t0x33c345,\n\t0x297248,\n\t0x2993c4,\n\t0x2993c6,\n\t0x27b28b,\n\t0x21bac9,\n\t0x36c886,\n\t0x204bc9,\n\t0x339586,\n\t0x25f1c8,\n\t0x211b83,\n\t0x300ac5,\n\t0x219b09,\n\t0x21da05,\n\t0x2fba44,\n\t0x27d046,\n\t0x2fd385,\n\t0x299906,\n\t0x310ec7,\n\t0x33a986,\n\t0x3b134b,\n\t0x237587,\n\t0x241646,\n\t0x354786,\n\t0x3b9e46,\n\t0x297989,\n\t0x25384a,\n\t0x2bbb85,\n\t0x2202cd,\n\t0x2abb06,\n\t0x204a86,\n\t0x2f3f06,\n\t0x22dd45,\n\t0x2e6ac7,\n\t0x300087,\n\t0x2e7dce,\n\t0x28cac3,\n\t0x2cd309,\n\t0x210c89,\n\t0x38d607,\n\t0x364207,\n\t0x2a5bc5,\n\t0x2b57c5,\n\t0x4e63470f,\n\t0x2d5a47,\n\t0x2d5c08,\n\t0x2d6144,\n\t0x2d7106,\n\t0x4ea4ea42,\n\t0x2da786,\n\t0x20c0c6,\n\t0x210e4e,\n\t0x2fa0ca,\n\t0x273b06,\n\t0x23398a,\n\t0x211689,\n\t0x32b385,\n\t0x3a4808,\n\t0x3bca06,\n\t0x306748,\n\t0x33aac8,\n\t0x2194cb,\n\t0x2bd805,\n\t0x378a88,\n\t0x20610c,\n\t0x2c3c07,\n\t0x254246,\n\t0x2fd1c8,\n\t0x3488c8,\n\t0x4ee06802,\n\t0x23588b,\n\t0x2123c9,\n\t0x205549,\n\t0x2174c7,\n\t0x223408,\n\t0x4f36bec8,\n\t0x38ffcb,\n\t0x23edc9,\n\t0x338f0d,\n\t0x27fa88,\n\t0x22b1c8,\n\t0x4f6014c2,\n\t0x203cc4,\n\t0x4fa19302,\n\t0x2fe206,\n\t0x4fe004c2,\n\t0x261b8a,\n\t0x2199c6,\n\t0x232808,\n\t0x2c6f48,\n\t0x2b6f06,\n\t0x22fe46,\n\t0x2f9186,\n\t0x2b5a45,\n\t0x2443c4,\n\t0x50206d04,\n\t0x214106,\n\t0x29c747,\n\t0x50620c47,\n\t0x2d644b,\n\t0x341ec9,\n\t0x33500a,\n\t0x2106c4,\n\t0x342588,\n\t0x26458d,\n\t0x2f2489,\n\t0x2f26c8,\n\t0x2f2d49,\n\t0x2f4544,\n\t0x245884,\n\t0x285cc5,\n\t0x320fcb,\n\t0x36b186,\n\t0x34b905,\n\t0x2279c9,\n\t0x2bda08,\n\t0x210dc4,\n\t0x38d389,\n\t0x2064c5,\n\t0x2c7708,\n\t0x342dc7,\n\t0x338b48,\n\t0x286d86,\n\t0x233207,\n\t0x29a989,\n\t0x224a49,\n\t0x38adc5,\n\t0x34dfc5,\n\t0x50a08402,\n\t0x32f1c4,\n\t0x2fdd45,\n\t0x2ce506,\n\t0x33bd05,\n\t0x387e47,\n\t0x214205,\n\t0x27dbc4,\n\t0x388e46,\n\t0x381b47,\n\t0x23d046,\n\t0x2c41c5,\n\t0x207f48,\n\t0x2bdf85,\n\t0x211a07,\n\t0x214689,\n\t0x21bc0a,\n\t0x2fc487,\n\t0x2fc48c,\n\t0x240a86,\n\t0x37e349,\n\t0x246a45,\n\t0x246ec8,\n\t0x207c03,\n\t0x216d85,\n\t0x2fd705,\n\t0x282d47,\n\t0x50e06ac2,\n\t0x22f647,\n\t0x2e56c6,\n\t0x373b46,\n\t0x30bfc6,\n\t0x348806,\n\t0x206748,\n\t0x2a0d05,\n\t0x2eb747,\n\t0x2eb74d,\n\t0x227103,\n\t0x227105,\n\t0x379347,\n\t0x22f988,\n\t0x378f05,\n\t0x2216c8,\n\t0x37ccc6,\n\t0x335b87,\n\t0x2cbec5,\n\t0x2bd886,\n\t0x39ce45,\n\t0x21c70a,\n\t0x2f1346,\n\t0x383f47,\n\t0x2bca85,\n\t0x2f5047,\n\t0x2f97c4,\n\t0x2fb9c6,\n\t0x2fe345,\n\t0x32d70b,\n\t0x2fd8c9,\n\t0x24214a,\n\t0x38ae48,\n\t0x30e048,\n\t0x380a8c,\n\t0x3964c7,\n\t0x3054c8,\n\t0x307f48,\n\t0x3084c5,\n\t0x311a8a,\n\t0x31c449,\n\t0x51200d02,\n\t0x201886,\n\t0x216044,\n\t0x216049,\n\t0x27d549,\n\t0x27e9c7,\n\t0x2b4e07,\n\t0x2938c9,\n\t0x22df48,\n\t0x22df4f,\n\t0x2e3a06,\n\t0x2df14b,\n\t0x34b445,\n\t0x34b447,\n\t0x368849,\n\t0x21aa46,\n\t0x38d307,\n\t0x2e1a45,\n\t0x23ae84,\n\t0x284fc6,\n\t0x2262c4,\n\t0x2db107,\n\t0x2d6f08,\n\t0x51700848,\n\t0x301245,\n\t0x301387,\n\t0x260a09,\n\t0x205304,\n\t0x24b348,\n\t0x51ab7cc8,\n\t0x283384,\n\t0x23c208,\n\t0x332d44,\n\t0x22be49,\n\t0x351a45,\n\t0x51e05082,\n\t0x2e3a45,\n\t0x310045,\n\t0x20fc48,\n\t0x23d747,\n\t0x52200d42,\n\t0x3322c5,\n\t0x2d8e46,\n\t0x27cb06,\n\t0x32f188,\n\t0x337d48,\n\t0x33bcc6,\n\t0x34bb06,\n\t0x38c289,\n\t0x373a86,\n\t0x21a90b,\n\t0x2e5f85,\n\t0x208a46,\n\t0x29e108,\n\t0x3a0a06,\n\t0x322c46,\n\t0x221b8a,\n\t0x23b30a,\n\t0x2498c5,\n\t0x2a0dc7,\n\t0x313646,\n\t0x52606442,\n\t0x379487,\n\t0x266cc5,\n\t0x24e304,\n\t0x24e305,\n\t0x2105c6,\n\t0x278fc7,\n\t0x215dc5,\n\t0x23b484,\n\t0x2c4788,\n\t0x322d05,\n\t0x3af347,\n\t0x3b6dc5,\n\t0x21c645,\n\t0x258f84,\n\t0x2ee209,\n\t0x3079c8,\n\t0x263146,\n\t0x2b5386,\n\t0x345186,\n\t0x52b08148,\n\t0x308347,\n\t0x30874d,\n\t0x3090cc,\n\t0x3096c9,\n\t0x309909,\n\t0x52f67742,\n\t0x3b6343,\n\t0x215ac3,\n\t0x2fdb05,\n\t0x39da4a,\n\t0x32f046,\n\t0x30e2c5,\n\t0x311084,\n\t0x31108b,\n\t0x323a8c,\n\t0x3244cc,\n\t0x3247d5,\n\t0x32660d,\n\t0x327d0f,\n\t0x3280d2,\n\t0x32854f,\n\t0x328912,\n\t0x328d93,\n\t0x32924d,\n\t0x32980d,\n\t0x329b8e,\n\t0x32a10e,\n\t0x32a94c,\n\t0x32ad0c,\n\t0x32b14b,\n\t0x32b4ce,\n\t0x32c612,\n\t0x32ee0c,\n\t0x32fd90,\n\t0x33cd52,\n\t0x33d9cc,\n\t0x33e08d,\n\t0x33e3cc,\n\t0x3406d1,\n\t0x34734d,\n\t0x349e0d,\n\t0x34a40a,\n\t0x34a68c,\n\t0x34af8c,\n\t0x34b60c,\n\t0x34c20c,\n\t0x3523d3,\n\t0x352cd0,\n\t0x3530d0,\n\t0x35398d,\n\t0x353f8c,\n\t0x354b89,\n\t0x35690d,\n\t0x356c53,\n\t0x3595d1,\n\t0x359a13,\n\t0x35a0cf,\n\t0x35a48c,\n\t0x35a78f,\n\t0x35ab4d,\n\t0x35b14f,\n\t0x35b510,\n\t0x35bf8e,\n\t0x35f88e,\n\t0x35fe10,\n\t0x36150d,\n\t0x361e8e,\n\t0x36220c,\n\t0x363213,\n\t0x3658ce,\n\t0x365f50,\n\t0x366351,\n\t0x36678f,\n\t0x366b53,\n\t0x3672cd,\n\t0x36760f,\n\t0x3679ce,\n\t0x368090,\n\t0x368489,\n\t0x369210,\n\t0x36980f,\n\t0x369e8f,\n\t0x36a252,\n\t0x36dcce,\n\t0x36e7cd,\n\t0x36f00d,\n\t0x36f34d,\n\t0x37078d,\n\t0x370acd,\n\t0x370e10,\n\t0x37120b,\n\t0x371a8c,\n\t0x371e0c,\n\t0x37240c,\n\t0x37270e,\n\t0x382350,\n\t0x384512,\n\t0x38498b,\n\t0x384e8e,\n\t0x38520e,\n\t0x386dce,\n\t0x38724b,\n\t0x53388016,\n\t0x38988d,\n\t0x38a014,\n\t0x38b04d,\n\t0x38cd55,\n\t0x38e30d,\n\t0x38ec8f,\n\t0x38f4cf,\n\t0x39238f,\n\t0x39274e,\n\t0x392ccd,\n\t0x394091,\n\t0x39668c,\n\t0x39698c,\n\t0x396c8b,\n\t0x39710c,\n\t0x3974cf,\n\t0x397892,\n\t0x39824d,\n\t0x39974c,\n\t0x399bcc,\n\t0x399ecd,\n\t0x39a20f,\n\t0x39a5ce,\n\t0x39d70c,\n\t0x39dccd,\n\t0x39e00b,\n\t0x39e9cc,\n\t0x39f2cd,\n\t0x39f60e,\n\t0x39f989,\n\t0x3a1353,\n\t0x3a188d,\n\t0x3a1bcd,\n\t0x3a21cc,\n\t0x3a264e,\n\t0x3a37cf,\n\t0x3a3b8c,\n\t0x3a3e8d,\n\t0x3a41cf,\n\t0x3a458c,\n\t0x3a508c,\n\t0x3a550c,\n\t0x3a580c,\n\t0x3a5ecd,\n\t0x3a6212,\n\t0x3a688c,\n\t0x3a6b8c,\n\t0x3a6e91,\n\t0x3a72cf,\n\t0x3a768f,\n\t0x3a7a53,\n\t0x3a8a0e,\n\t0x3a8d8f,\n\t0x3a914c,\n\t0x537a948e,\n\t0x3a980f,\n\t0x3a9bd6,\n\t0x3aaa92,\n\t0x3acf0c,\n\t0x3ada0f,\n\t0x3ae08d,\n\t0x3ae3cf,\n\t0x3ae78c,\n\t0x3aea8d,\n\t0x3aedcd,\n\t0x3b084e,\n\t0x3b228c,\n\t0x3b258c,\n\t0x3b2890,\n\t0x3b57d1,\n\t0x3b5c0b,\n\t0x3b5f4c,\n\t0x3b624e,\n\t0x3b7211,\n\t0x3b764e,\n\t0x3b79cd,\n\t0x3bc7cb,\n\t0x3bd88f,\n\t0x3be394,\n\t0x210642,\n\t0x210642,\n\t0x204d43,\n\t0x210642,\n\t0x204d43,\n\t0x210642,\n\t0x2009c2,\n\t0x394505,\n\t0x3b6f0c,\n\t0x210642,\n\t0x210642,\n\t0x2009c2,\n\t0x210642,\n\t0x29c345,\n\t0x21bc05,\n\t0x210642,\n\t0x210642,\n\t0x201102,\n\t0x29c345,\n\t0x326b49,\n\t0x3592cc,\n\t0x210642,\n\t0x210642,\n\t0x210642,\n\t0x210642,\n\t0x394505,\n\t0x210642,\n\t0x210642,\n\t0x210642,\n\t0x210642,\n\t0x201102,\n\t0x326b49,\n\t0x210642,\n\t0x210642,\n\t0x210642,\n\t0x21bc05,\n\t0x210642,\n\t0x21bc05,\n\t0x3592cc,\n\t0x3b6f0c,\n\t0x39c783,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x231604,\n\t0x208e83,\n\t0x201a03,\n\t0xe008,\n\t0x64344,\n\t0xe03,\n\t0xc63c8,\n\t0x207102,\n\t0x5460f882,\n\t0x24ac83,\n\t0x23f044,\n\t0x2020c3,\n\t0x39e544,\n\t0x23a1c6,\n\t0x216f83,\n\t0x304704,\n\t0x2d7b05,\n\t0x28cac3,\n\t0x208e83,\n\t0x1a3443,\n\t0x201a03,\n\t0x243d0a,\n\t0x3821c6,\n\t0x38558c,\n\t0x16fb88,\n\t0x20f882,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x229443,\n\t0x20c0c6,\n\t0x208e83,\n\t0x201a03,\n\t0x221483,\n\t0xac408,\n\t0x131645,\n\t0x35f09,\n\t0x35c2,\n\t0x55b95645,\n\t0x26547,\n\t0xba9c8,\n\t0x14b0e,\n\t0x90212,\n\t0x10a78b,\n\t0x1398c6,\n\t0x55edf485,\n\t0x562df48c,\n\t0x148f87,\n\t0x36dc7,\n\t0x15000a,\n\t0x46690,\n\t0x13b345,\n\t0xb610b,\n\t0xf8d08,\n\t0x3e607,\n\t0x3af8b,\n\t0x57f89,\n\t0x185a87,\n\t0x81a87,\n\t0x7e4c7,\n\t0x3e546,\n\t0xdd088,\n\t0x56824386,\n\t0xb084d,\n\t0x14f9d0,\n\t0x56c0c182,\n\t0x8ca48,\n\t0x4f450,\n\t0x15090c,\n\t0x5735cd4d,\n\t0x64a88,\n\t0x721c7,\n\t0x76f09,\n\t0x5d3c6,\n\t0x9bec8,\n\t0x351c2,\n\t0xa808a,\n\t0x293c7,\n\t0x43b87,\n\t0xac7c9,\n\t0xae208,\n\t0x8b205,\n\t0xd538e,\n\t0x5c4e,\n\t0x17a8f,\n\t0x18009,\n\t0x164ec9,\n\t0x15d38b,\n\t0x7ba8f,\n\t0xee40c,\n\t0xa88cb,\n\t0xc8b48,\n\t0xd6347,\n\t0xdbe88,\n\t0xfe78b,\n\t0xff34c,\n\t0x10038c,\n\t0x1037cc,\n\t0x10b54d,\n\t0x3ef48,\n\t0xd2942,\n\t0x134649,\n\t0x195d8b,\n\t0xcd546,\n\t0x11f30b,\n\t0xe118a,\n\t0xe1d45,\n\t0xe67d0,\n\t0xe9f06,\n\t0x16b986,\n\t0x11205,\n\t0x10fc48,\n\t0xefd07,\n\t0xeffc7,\n\t0x8d047,\n\t0xfe04a,\n\t0xba84a,\n\t0x86286,\n\t0x99d0d,\n\t0x8f148,\n\t0x586c8,\n\t0x58ec9,\n\t0xbc8c5,\n\t0x1ad70c,\n\t0x10b74b,\n\t0x19e604,\n\t0x105e09,\n\t0x106046,\n\t0x16546,\n\t0x2642,\n\t0x12cf06,\n\t0xc68b,\n\t0x112707,\n\t0x4542,\n\t0xd1305,\n\t0x2e604,\n\t0x8c1,\n\t0x52d03,\n\t0x56764886,\n\t0x9c243,\n\t0x7b02,\n\t0x293c4,\n\t0x242,\n\t0x86644,\n\t0xf82,\n\t0x6502,\n\t0x3302,\n\t0xd342,\n\t0x1382,\n\t0xdf482,\n\t0x8c2,\n\t0x22902,\n\t0x40e82,\n\t0x1a442,\n\t0x4c82,\n\t0x234c2,\n\t0x3cac3,\n\t0x6b82,\n\t0x1842,\n\t0x7602,\n\t0x6b02,\n\t0x17202,\n\t0x36d02,\n\t0x206c2,\n\t0xc442,\n\t0x1c82,\n\t0x942,\n\t0x55783,\n\t0x4182,\n\t0x2542,\n\t0xb8042,\n\t0x9a02,\n\t0x282,\n\t0x2942,\n\t0xd842,\n\t0xc202,\n\t0x4a82,\n\t0x182842,\n\t0x745c2,\n\t0xe82,\n\t0x8e83,\n\t0x1942,\n\t0x6802,\n\t0x982,\n\t0x5b82,\n\t0x18ad45,\n\t0x7082,\n\t0x2fa42,\n\t0x13ebc3,\n\t0x482,\n\t0xb282,\n\t0xa02,\n\t0x2502,\n\t0x6742,\n\t0xd42,\n\t0xc2,\n\t0x2642,\n\t0x35dc5,\n\t0x17f087,\n\t0x20d0c3,\n\t0x207102,\n\t0x238543,\n\t0x23cac3,\n\t0x21b583,\n\t0x2046c3,\n\t0x229443,\n\t0x208e83,\n\t0x200e03,\n\t0x201a03,\n\t0x29c283,\n\t0x10c3,\n\t0x16fb88,\n\t0x238543,\n\t0x23cac3,\n\t0x21b583,\n\t0x28cac3,\n\t0x208e83,\n\t0x200e03,\n\t0x1a3443,\n\t0x201a03,\n\t0x238543,\n\t0x23cac3,\n\t0x201a03,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x200041,\n\t0x28cac3,\n\t0x208e83,\n\t0x21b543,\n\t0x201a03,\n\t0x146f44,\n\t0x39c783,\n\t0x238543,\n\t0x23cac3,\n\t0x26eac3,\n\t0x21b583,\n\t0x207b03,\n\t0x289303,\n\t0x219983,\n\t0x241503,\n\t0x323043,\n\t0x231604,\n\t0x208e83,\n\t0x201a03,\n\t0x202443,\n\t0x333cc4,\n\t0x251183,\n\t0x3ec3,\n\t0x3c0943,\n\t0x20a3c8,\n\t0x271dc4,\n\t0x2cf30a,\n\t0x2bed86,\n\t0x112384,\n\t0x3a7ec7,\n\t0x226cca,\n\t0x2e38c9,\n\t0x3b7f87,\n\t0x3be84a,\n\t0x39c783,\n\t0x2e774b,\n\t0x28b689,\n\t0x345285,\n\t0x2da5c7,\n\t0xf882,\n\t0x238543,\n\t0x21a447,\n\t0x2379c5,\n\t0x2ca689,\n\t0x23cac3,\n\t0x2bd606,\n\t0x2c9883,\n\t0xe5743,\n\t0x110646,\n\t0xd386,\n\t0x16f07,\n\t0x21af86,\n\t0x222985,\n\t0x3a3147,\n\t0x2de5c7,\n\t0x59b23043,\n\t0x33dc07,\n\t0x374703,\n\t0x3b5045,\n\t0x231604,\n\t0x231308,\n\t0x366fcc,\n\t0x2b4fc5,\n\t0x2aa4c6,\n\t0x21a307,\n\t0x39b687,\n\t0x23dfc7,\n\t0x23f108,\n\t0x30f50f,\n\t0x2e3b05,\n\t0x24ad87,\n\t0x33acc7,\n\t0x2abdca,\n\t0x2d29c9,\n\t0x39e6c5,\n\t0x31078a,\n\t0xc546,\n\t0x2c9905,\n\t0x3703c4,\n\t0x2c6e86,\n\t0x300e07,\n\t0x2d2847,\n\t0x306908,\n\t0x217645,\n\t0x2378c6,\n\t0x214f85,\n\t0x2e8105,\n\t0x21ba04,\n\t0x2b6e07,\n\t0x20658a,\n\t0x34d908,\n\t0x367f06,\n\t0x29443,\n\t0x2e4505,\n\t0x26bf86,\n\t0x3babc6,\n\t0x211106,\n\t0x28cac3,\n\t0x3984c7,\n\t0x33ac45,\n\t0x208e83,\n\t0x2e144d,\n\t0x200e03,\n\t0x306a08,\n\t0x3b3644,\n\t0x310945,\n\t0x2abcc6,\n\t0x23f386,\n\t0x208947,\n\t0x2aed47,\n\t0x26f045,\n\t0x201a03,\n\t0x20a147,\n\t0x277089,\n\t0x36bbc9,\n\t0x227f4a,\n\t0x235d82,\n\t0x3b5004,\n\t0x2eb2c4,\n\t0x344487,\n\t0x22f508,\n\t0x2f0889,\n\t0x226fc9,\n\t0x2f1ac7,\n\t0x28bb46,\n\t0xf3006,\n\t0x2f4544,\n\t0x2f4b4a,\n\t0x2f8248,\n\t0x2f9049,\n\t0x2c4bc6,\n\t0x2b9545,\n\t0x34d7c8,\n\t0x2cdc4a,\n\t0x20ec43,\n\t0x333e46,\n\t0x2f1bc7,\n\t0x225f45,\n\t0x3b3505,\n\t0x3a04c3,\n\t0x231944,\n\t0x230645,\n\t0x28a287,\n\t0x307b05,\n\t0x2ef086,\n\t0x103d45,\n\t0x273bc3,\n\t0x273bc9,\n\t0x26c04c,\n\t0x2a2b4c,\n\t0x2d8648,\n\t0x284187,\n\t0x301e08,\n\t0x30214a,\n\t0x302fcb,\n\t0x28b7c8,\n\t0x23ec48,\n\t0x23f486,\n\t0x345045,\n\t0x34624a,\n\t0x228cc5,\n\t0x205082,\n\t0x2cbd87,\n\t0x29f806,\n\t0x368d45,\n\t0x304209,\n\t0x281405,\n\t0x3716c5,\n\t0x218ac9,\n\t0x388a46,\n\t0x204448,\n\t0x332643,\n\t0x217186,\n\t0x27cf86,\n\t0x311f05,\n\t0x311f09,\n\t0x2f0fc9,\n\t0x27a3c7,\n\t0x114204,\n\t0x314207,\n\t0x226ec9,\n\t0x23f805,\n\t0x444c8,\n\t0x39c485,\n\t0x341a05,\n\t0x3911c9,\n\t0x20cac2,\n\t0x2628c4,\n\t0x200882,\n\t0x204182,\n\t0x30e985,\n\t0x312108,\n\t0x2bc805,\n\t0x2cb603,\n\t0x2cb605,\n\t0x2da983,\n\t0x2162c2,\n\t0x383c84,\n\t0x2fc183,\n\t0x20cb42,\n\t0x341504,\n\t0x2ec043,\n\t0x206682,\n\t0x28cfc3,\n\t0x295384,\n\t0x2eae03,\n\t0x2f6584,\n\t0x204242,\n\t0x221383,\n\t0x219c43,\n\t0x206182,\n\t0x332182,\n\t0x2f0e09,\n\t0x204382,\n\t0x290d84,\n\t0x201f82,\n\t0x34d644,\n\t0x28bb04,\n\t0x2c0d84,\n\t0x202642,\n\t0x23e882,\n\t0x229703,\n\t0x302d83,\n\t0x24a9c4,\n\t0x28a404,\n\t0x2f1d44,\n\t0x2f8404,\n\t0x315743,\n\t0x224183,\n\t0x20c4c4,\n\t0x315584,\n\t0x315d86,\n\t0x232ec2,\n\t0x20f882,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x207102,\n\t0x39c783,\n\t0x238543,\n\t0x23cac3,\n\t0x201843,\n\t0x323043,\n\t0x231604,\n\t0x2f10c4,\n\t0x21bf84,\n\t0x208e83,\n\t0x201a03,\n\t0x221483,\n\t0x2f5204,\n\t0x31fe83,\n\t0x2c37c3,\n\t0x359e44,\n\t0x39c286,\n\t0x211c43,\n\t0x36dc7,\n\t0x21f243,\n\t0x202103,\n\t0x2b8d83,\n\t0x263a43,\n\t0x229443,\n\t0x3321c5,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x216403,\n\t0x239043,\n\t0x16fb88,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x255783,\n\t0x208e83,\n\t0x2464c4,\n\t0x1a3443,\n\t0x201a03,\n\t0x25b0c4,\n\t0x2c6c85,\n\t0x36dc7,\n\t0x20f882,\n\t0x201742,\n\t0x207b02,\n\t0x204d42,\n\t0xe03,\n\t0x200442,\n\t0x238543,\n\t0x240244,\n\t0x23cac3,\n\t0x323043,\n\t0x28cac3,\n\t0x208e83,\n\t0x201a03,\n\t0x16fb88,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x28cac3,\n\t0x21bf84,\n\t0x208e83,\n\t0xe03,\n\t0x201a03,\n\t0x215443,\n\t0x286644,\n\t0x16fb88,\n\t0x238543,\n\t0x200e03,\n\t0x10c3,\n\t0x13e8c4,\n\t0x252044,\n\t0x16fb88,\n\t0x238543,\n\t0x253384,\n\t0x231604,\n\t0x200e03,\n\t0x2014c2,\n\t0x201a03,\n\t0x20c843,\n\t0x31944,\n\t0x355685,\n\t0x205082,\n\t0x3156c3,\n\t0x145c49,\n\t0xdfb46,\n\t0x19c588,\n\t0x207102,\n\t0x16fb88,\n\t0x20f882,\n\t0x23cac3,\n\t0x323043,\n\t0x200942,\n\t0xe03,\n\t0x201a03,\n\t0x207102,\n\t0x1bea07,\n\t0x1370c9,\n\t0x3dc3,\n\t0x16fb88,\n\t0xd303,\n\t0x5db4c807,\n\t0x38543,\n\t0x1788,\n\t0x23cac3,\n\t0x323043,\n\t0x186c46,\n\t0x255783,\n\t0xe8888,\n\t0xc9148,\n\t0x3fbc6,\n\t0x28cac3,\n\t0xd30c8,\n\t0x187ec3,\n\t0xe8a85,\n\t0x3ccc7,\n\t0x8e83,\n\t0x63c3,\n\t0x1a03,\n\t0xcb02,\n\t0x17044a,\n\t0x10ea43,\n\t0x313e44,\n\t0x10f30b,\n\t0x10f8c8,\n\t0x95e02,\n\t0x207102,\n\t0x20f882,\n\t0x238543,\n\t0x23cac3,\n\t0x2de944,\n\t0x323043,\n\t0x255783,\n\t0x28cac3,\n\t0x208e83,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x229443,\n\t0x208e83,\n\t0x201a03,\n\t0x236903,\n\t0x215443,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x10c3,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x231604,\n\t0x229443,\n\t0x208e83,\n\t0x201a03,\n\t0x21a902,\n\t0x200141,\n\t0x207102,\n\t0x200001,\n\t0x327e02,\n\t0x16fb88,\n\t0x224c85,\n\t0x2008c1,\n\t0x38543,\n\t0x201781,\n\t0x200301,\n\t0x200081,\n\t0x2ac602,\n\t0x37cc44,\n\t0x394483,\n\t0x200181,\n\t0x200401,\n\t0x200041,\n\t0x200101,\n\t0x2ea547,\n\t0x2ec54f,\n\t0x2fbc06,\n\t0x200281,\n\t0x33e906,\n\t0x200801,\n\t0x200981,\n\t0x306f8e,\n\t0x200441,\n\t0x201a03,\n\t0x204101,\n\t0x258885,\n\t0x20cb02,\n\t0x3a03c5,\n\t0x200341,\n\t0x200741,\n\t0x2002c1,\n\t0x205082,\n\t0x2000c1,\n\t0x200201,\n\t0x200c81,\n\t0x2005c1,\n\t0x204541,\n\t0x16fb88,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x221f43,\n\t0x238543,\n\t0x323043,\n\t0x95d48,\n\t0x28cac3,\n\t0x208e83,\n\t0x31483,\n\t0x201a03,\n\t0x14eec08,\n\t0x16308,\n\t0x16fb88,\n\t0xe03,\n\t0x8e444,\n\t0x4ec04,\n\t0x14eec0a,\n\t0x16fb88,\n\t0x1a3443,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x208e83,\n\t0x201a03,\n\t0x203ec3,\n\t0x16fb88,\n\t0x238543,\n\t0x23cac3,\n\t0x2de944,\n\t0x201a03,\n\t0x22d585,\n\t0x35f2c4,\n\t0x238543,\n\t0x208e83,\n\t0x201a03,\n\t0x1f40a,\n\t0xf1844,\n\t0x118b06,\n\t0x20f882,\n\t0x238543,\n\t0x23adc9,\n\t0x23cac3,\n\t0x375449,\n\t0x323043,\n\t0x28cac3,\n\t0x208e83,\n\t0x201a03,\n\t0x2f4348,\n\t0x22dc07,\n\t0x355685,\n\t0xb4c8,\n\t0x1bea07,\n\t0x2f78a,\n\t0x178ccb,\n\t0x13c507,\n\t0x4a4c8,\n\t0x14f64a,\n\t0x19dc8,\n\t0x1370c9,\n\t0x30507,\n\t0x742c7,\n\t0x19bf08,\n\t0x1788,\n\t0x4b04f,\n\t0x1c045,\n\t0x1a87,\n\t0x186c46,\n\t0x41287,\n\t0x4a786,\n\t0xe8888,\n\t0x96fc6,\n\t0x188847,\n\t0x178809,\n\t0x1bf307,\n\t0xd81c9,\n\t0xbcbc9,\n\t0xc6a06,\n\t0xc9148,\n\t0xc7845,\n\t0x57b0a,\n\t0xd30c8,\n\t0x187ec3,\n\t0xdad48,\n\t0x3ccc7,\n\t0x131f45,\n\t0x787d0,\n\t0x63c3,\n\t0x1a3443,\n\t0x125807,\n\t0x1cc85,\n\t0xf02c8,\n\t0xe385,\n\t0x10ea43,\n\t0x16d5c8,\n\t0x12906,\n\t0x198909,\n\t0xb2007,\n\t0x145f0b,\n\t0x180884,\n\t0x104f04,\n\t0x10f30b,\n\t0x10f8c8,\n\t0x110547,\n\t0x131645,\n\t0x238543,\n\t0x23cac3,\n\t0x21b583,\n\t0x201a03,\n\t0x20c743,\n\t0x323043,\n\t0x1a3443,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x28cac3,\n\t0x208e83,\n\t0x201a03,\n\t0x15d4cb,\n\t0x207102,\n\t0x20f882,\n\t0x201a03,\n\t0x16fb88,\n\t0x207102,\n\t0x20f882,\n\t0x207b02,\n\t0x200942,\n\t0x20b302,\n\t0x208e83,\n\t0x200442,\n\t0x207102,\n\t0x39c783,\n\t0x20f882,\n\t0x238543,\n\t0x23cac3,\n\t0x207b02,\n\t0x323043,\n\t0x255783,\n\t0x28cac3,\n\t0x21bf84,\n\t0x208e83,\n\t0x21eb43,\n\t0x201a03,\n\t0x313e44,\n\t0x202443,\n\t0x323043,\n\t0x20f882,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x28cac3,\n\t0x208e83,\n\t0x200e03,\n\t0x201a03,\n\t0x3ad3c7,\n\t0x238543,\n\t0x282c07,\n\t0x2d7f86,\n\t0x20e583,\n\t0x207603,\n\t0x323043,\n\t0x204c03,\n\t0x231604,\n\t0x2d5204,\n\t0x30e706,\n\t0x20bd43,\n\t0x208e83,\n\t0x201a03,\n\t0x22d585,\n\t0x321704,\n\t0x350503,\n\t0x39b4c3,\n\t0x2cbd87,\n\t0x342d45,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x28cac3,\n\t0x208e83,\n\t0x201a03,\n\t0x99807,\n\t0x203402,\n\t0x28f283,\n\t0x205403,\n\t0x39c783,\n\t0x65e38543,\n\t0x206902,\n\t0x23cac3,\n\t0x2020c3,\n\t0x323043,\n\t0x231604,\n\t0x3797c3,\n\t0x2e3b03,\n\t0x28cac3,\n\t0x21bf84,\n\t0x6620ea42,\n\t0x208e83,\n\t0x201a03,\n\t0x206683,\n\t0x22e603,\n\t0x21a902,\n\t0x202443,\n\t0x16fb88,\n\t0x323043,\n\t0x10c3,\n\t0x31f944,\n\t0x39c783,\n\t0x20f882,\n\t0x238543,\n\t0x240244,\n\t0x23cac3,\n\t0x323043,\n\t0x231604,\n\t0x255783,\n\t0x3a2e44,\n\t0x20f644,\n\t0x20c0c6,\n\t0x21bf84,\n\t0x208e83,\n\t0x201a03,\n\t0x221483,\n\t0x29f806,\n\t0x4504b,\n\t0x24386,\n\t0x3204a,\n\t0x112d0a,\n\t0x16fb88,\n\t0x214f44,\n\t0x67638543,\n\t0x39c744,\n\t0x23cac3,\n\t0x259004,\n\t0x323043,\n\t0x210543,\n\t0x28cac3,\n\t0x208e83,\n\t0x1a3443,\n\t0x201a03,\n\t0xbac3,\n\t0x3381cb,\n\t0x3af10a,\n\t0x3bf84c,\n\t0xe4288,\n\t0x207102,\n\t0x20f882,\n\t0x207b02,\n\t0x2b13c5,\n\t0x231604,\n\t0x204a82,\n\t0x28cac3,\n\t0x20f644,\n\t0x204d42,\n\t0x200442,\n\t0x20d2c2,\n\t0x21a902,\n\t0x19c783,\n\t0x35f42,\n\t0x2b3509,\n\t0x2f7148,\n\t0x351689,\n\t0x2410c9,\n\t0x350f0a,\n\t0x26080a,\n\t0x2127c2,\n\t0x222902,\n\t0xf882,\n\t0x238543,\n\t0x229682,\n\t0x24af46,\n\t0x369d02,\n\t0x206a42,\n\t0x37904e,\n\t0x2213ce,\n\t0x284b47,\n\t0x208e07,\n\t0x2ec8c2,\n\t0x23cac3,\n\t0x323043,\n\t0x200042,\n\t0x200942,\n\t0x31603,\n\t0x23980f,\n\t0x20b542,\n\t0x2dd887,\n\t0x2b4a87,\n\t0x2b7e87,\n\t0x31a4cc,\n\t0x2c448c,\n\t0x223984,\n\t0x285b0a,\n\t0x221302,\n\t0x209a02,\n\t0x2c0884,\n\t0x21f502,\n\t0x2ca102,\n\t0x2c46c4,\n\t0x21a602,\n\t0x200282,\n\t0x11a83,\n\t0x297047,\n\t0x2beb05,\n\t0x20d842,\n\t0x239784,\n\t0x382842,\n\t0x2e3008,\n\t0x208e83,\n\t0x203488,\n\t0x203cc2,\n\t0x223b45,\n\t0x38dbc6,\n\t0x201a03,\n\t0x207082,\n\t0x2f0ac7,\n\t0xcb02,\n\t0x2797c5,\n\t0x358b85,\n\t0x209642,\n\t0x20fd02,\n\t0x2cf9ca,\n\t0x26eeca,\n\t0x21b9c2,\n\t0x2a4dc4,\n\t0x2002c2,\n\t0x3b4ec8,\n\t0x20d582,\n\t0x315b08,\n\t0x30ab47,\n\t0x30ba09,\n\t0x203442,\n\t0x310e45,\n\t0x3044c5,\n\t0x21770b,\n\t0x2d054c,\n\t0x237348,\n\t0x321b08,\n\t0x232ec2,\n\t0x208a02,\n\t0x207102,\n\t0x16fb88,\n\t0x20f882,\n\t0x238543,\n\t0x207b02,\n\t0x204d42,\n\t0xe03,\n\t0x200442,\n\t0x201a03,\n\t0x20d2c2,\n\t0x207102,\n\t0x68a0f882,\n\t0x68f23043,\n\t0x211a83,\n\t0x204a82,\n\t0x208e83,\n\t0x391783,\n\t0x201a03,\n\t0x2ef783,\n\t0x37f186,\n\t0x1615443,\n\t0x16fb88,\n\t0x11205,\n\t0xae90d,\n\t0xacc8a,\n\t0x6e487,\n\t0x69601e02,\n\t0x69a00242,\n\t0x69e00bc2,\n\t0x6a200702,\n\t0x6a60b5c2,\n\t0x6aa01382,\n\t0x36dc7,\n\t0x6ae0f882,\n\t0x6b20c8c2,\n\t0x6b604842,\n\t0x6ba04c82,\n\t0x2213c3,\n\t0x18ec4,\n\t0x2298c3,\n\t0x6be1d882,\n\t0x6c200182,\n\t0x53c47,\n\t0x6c60a442,\n\t0x6ca00782,\n\t0x6ce01bc2,\n\t0x6d205e82,\n\t0x6d601c82,\n\t0x6da00942,\n\t0xc2845,\n\t0x23ef43,\n\t0x281a04,\n\t0x6de1f502,\n\t0x6e205242,\n\t0x6e603582,\n\t0x17d50b,\n\t0x6ea01fc2,\n\t0x6f253442,\n\t0x6f604a82,\n\t0x6fa0b302,\n\t0x6fe14702,\n\t0x70200802,\n\t0x70614642,\n\t0x70a745c2,\n\t0x70e0ea42,\n\t0x71204802,\n\t0x71604d42,\n\t0x71a03382,\n\t0x71e08682,\n\t0x7224d382,\n\t0x1a3284,\n\t0x35efc3,\n\t0x72604f82,\n\t0x72a10902,\n\t0x72e11542,\n\t0x73201f02,\n\t0x73600442,\n\t0x73a0cb42,\n\t0x15d647,\n\t0x73e04102,\n\t0x74204142,\n\t0x7460d2c2,\n\t0x74a21382,\n\t0x1ad70c,\n\t0x74e2a202,\n\t0x75245542,\n\t0x75605942,\n\t0x75a06442,\n\t0x75e0c402,\n\t0x76260982,\n\t0x76600202,\n\t0x76a16fc2,\n\t0x76e7d302,\n\t0x772610c2,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x12143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x6ef797c3,\n\t0x212143,\n\t0x332244,\n\t0x2f7046,\n\t0x2f9a03,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x244949,\n\t0x235f42,\n\t0x26c783,\n\t0x2bcec3,\n\t0x20fbc5,\n\t0x2020c3,\n\t0x3797c3,\n\t0x212143,\n\t0x20c0c3,\n\t0x248d43,\n\t0x242989,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x235f42,\n\t0x235f42,\n\t0x3797c3,\n\t0x212143,\n\t0x77a38543,\n\t0x23cac3,\n\t0x20a6c3,\n\t0x28cac3,\n\t0x208e83,\n\t0xe03,\n\t0x201a03,\n\t0x16fb88,\n\t0x20f882,\n\t0x238543,\n\t0x208e83,\n\t0x201a03,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x28cac3,\n\t0x208e83,\n\t0xe03,\n\t0x201a03,\n\t0x252044,\n\t0x20f882,\n\t0x238543,\n\t0x345903,\n\t0x23cac3,\n\t0x253384,\n\t0x21b583,\n\t0x323043,\n\t0x231604,\n\t0x255783,\n\t0x28cac3,\n\t0x208e83,\n\t0x201a03,\n\t0x20c843,\n\t0x355685,\n\t0x248d43,\n\t0x202443,\n\t0xe03,\n\t0x20f882,\n\t0x238543,\n\t0x3797c3,\n\t0x208e83,\n\t0x201a03,\n\t0x207102,\n\t0x39c783,\n\t0x16fb88,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x23a1c6,\n\t0x231604,\n\t0x255783,\n\t0x21bf84,\n\t0x208e83,\n\t0x201a03,\n\t0x221483,\n\t0x238543,\n\t0x23cac3,\n\t0x208e83,\n\t0x201a03,\n\t0x1442047,\n\t0x238543,\n\t0x24386,\n\t0x23cac3,\n\t0x323043,\n\t0xe5586,\n\t0x208e83,\n\t0x201a03,\n\t0x31dc48,\n\t0x321949,\n\t0x330189,\n\t0x33bb08,\n\t0x38fb48,\n\t0x38fb49,\n\t0x24558d,\n\t0x24dd8f,\n\t0x2f53d0,\n\t0x35648d,\n\t0x37210c,\n\t0x39064b,\n\t0xba9c8,\n\t0xac605,\n\t0x207102,\n\t0x342b85,\n\t0x200243,\n\t0x7ae0f882,\n\t0x23cac3,\n\t0x323043,\n\t0x2d8c47,\n\t0x263a43,\n\t0x28cac3,\n\t0x208e83,\n\t0x21b543,\n\t0x217e03,\n\t0x200e03,\n\t0x201a03,\n\t0x3821c6,\n\t0x205082,\n\t0x202443,\n\t0x16fb88,\n\t0x207102,\n\t0x39c783,\n\t0x20f882,\n\t0x238543,\n\t0x23cac3,\n\t0x323043,\n\t0x231604,\n\t0x28cac3,\n\t0x208e83,\n\t0x201a03,\n\t0x215443,\n\t0x106904,\n\t0x15217c6,\n\t0x207102,\n\t0x20f882,\n\t0x323043,\n\t0x28cac3,\n\t0x201a03,\n}\n\n// children is the list of nodes' children, the parent's wildcard bit and the\n// parent's node type. If a node has no children then their children index\n// will be in the range [0, 6), depending on the wildcard bit and node type.\n//\n// The layout within the uint32, from MSB to LSB, is:\n//\t[ 1 bits] unused\n//\t[ 1 bits] wildcard bit\n//\t[ 2 bits] node type\n//\t[14 bits] high nodes index (exclusive) of children\n//\t[14 bits] low nodes index (inclusive) of children\nvar children = [...]uint32{\n\t0x0,\n\t0x10000000,\n\t0x20000000,\n\t0x40000000,\n\t0x50000000,\n\t0x60000000,\n\t0x186c615,\n\t0x187061b,\n\t0x189461c,\n\t0x19f0625,\n\t0x1a0467c,\n\t0x1a18681,\n\t0x1a2c686,\n\t0x1a4c68b,\n\t0x1a50693,\n\t0x1a68694,\n\t0x1a9069a,\n\t0x1a946a4,\n\t0x1aac6a5,\n\t0x1ab06ab,\n\t0x1ab46ac,\n\t0x1af06ad,\n\t0x1af46bc,\n\t0x21afc6bd,\n\t0x1b446bf,\n\t0x1b486d1,\n\t0x1b686d2,\n\t0x1b7c6da,\n\t0x1b806df,\n\t0x1bb06e0,\n\t0x1bcc6ec,\n\t0x1bf46f3,\n\t0x1c006fd,\n\t0x1c04700,\n\t0x1c9c701,\n\t0x1cb0727,\n\t0x1cc472c,\n\t0x1cf4731,\n\t0x1d0473d,\n\t0x1d18741,\n\t0x1d3c746,\n\t0x1e7474f,\n\t0x1e7879d,\n\t0x1ee479e,\n\t0x1f507b9,\n\t0x1f687d4,\n\t0x1f7c7da,\n\t0x1f847df,\n\t0x1f987e1,\n\t0x1f9c7e6,\n\t0x1fb87e7,\n\t0x20047ee,\n\t0x2020801,\n\t0x2024808,\n\t0x2028809,\n\t0x204480a,\n\t0x2080811,\n\t0x62084820,\n\t0x209c821,\n\t0x20b4827,\n\t0x20b882d,\n\t0x20c882e,\n\t0x2178832,\n\t0x217c85e,\n\t0x2218c85f,\n\t0x22190863,\n\t0x22194864,\n\t0x21cc865,\n\t0x21d0873,\n\t0x2658874,\n\t0x226f8996,\n\t0x226fc9be,\n\t0x227009bf,\n\t0x2270c9c0,\n\t0x227109c3,\n\t0x2271c9c4,\n\t0x227209c7,\n\t0x227249c8,\n\t0x227289c9,\n\t0x2272c9ca,\n\t0x227309cb,\n\t0x2273c9cc,\n\t0x227409cf,\n\t0x2274c9d0,\n\t0x227509d3,\n\t0x227549d4,\n\t0x227589d5,\n\t0x227649d6,\n\t0x227689d9,\n\t0x2276c9da,\n\t0x227709db,\n\t0x27749dc,\n\t0x227789dd,\n\t0x227849de,\n\t0x227889e1,\n\t0x27909e2,\n\t0x27cc9e4,\n\t0x227ec9f3,\n\t0x227f09fb,\n\t0x227f49fc,\n\t0x27f89fd,\n\t0x227fc9fe,\n\t0x28009ff,\n\t0x281ca00,\n\t0x2834a07,\n\t0x2838a0d,\n\t0x2848a0e,\n\t0x2854a12,\n\t0x2888a15,\n\t0x288ca22,\n\t0x28a0a23,\n\t0x228a8a28,\n\t0x2968a2a,\n\t0x2296ca5a,\n\t0x2974a5b,\n\t0x2978a5d,\n\t0x2990a5e,\n\t0x29a4a64,\n\t0x29cca69,\n\t0x29eca73,\n\t0x2a1ca7b,\n\t0x2a44a87,\n\t0x2a48a91,\n\t0x2a6ca92,\n\t0x2a70a9b,\n\t0x2a84a9c,\n\t0x2a88aa1,\n\t0x2a8caa2,\n\t0x2aacaa3,\n\t0x2ac8aab,\n\t0x2accab2,\n\t0x22ad0ab3,\n\t0x2ad4ab4,\n\t0x2ad8ab5,\n\t0x2ae8ab6,\n\t0x2aecaba,\n\t0x2b64abb,\n\t0x2b68ad9,\n\t0x2b84ada,\n\t0x2b94ae1,\n\t0x2ba8ae5,\n\t0x2bc0aea,\n\t0x2bd8af0,\n\t0x2bf0af6,\n\t0x2bf4afc,\n\t0x2c0cafd,\n\t0x2c28b03,\n\t0x2c48b0a,\n\t0x2c60b12,\n\t0x2cc0b18,\n\t0x2cdcb30,\n\t0x2ce4b37,\n\t0x2ce8b39,\n\t0x2cfcb3a,\n\t0x2d40b3f,\n\t0x2dc0b50,\n\t0x2decb70,\n\t0x2df0b7b,\n\t0x2df8b7c,\n\t0x2e18b7e,\n\t0x2e1cb86,\n\t0x2e40b87,\n\t0x2e48b90,\n\t0x2e84b92,\n\t0x2ec8ba1,\n\t0x2eccbb2,\n\t0x2f34bb3,\n\t0x2f38bcd,\n\t0x22f3cbce,\n\t0x22f40bcf,\n\t0x22f50bd0,\n\t0x22f54bd4,\n\t0x22f58bd5,\n\t0x22f5cbd6,\n\t0x22f60bd7,\n\t0x2f78bd8,\n\t0x2f9cbde,\n\t0x2fbcbe7,\n\t0x3580bef,\n\t0x358cd60,\n\t0x35acd63,\n\t0x3768d6b,\n\t0x3838dda,\n\t0x38a8e0e,\n\t0x3900e2a,\n\t0x39e8e40,\n\t0x3a40e7a,\n\t0x3a7ce90,\n\t0x3b78e9f,\n\t0x3c44ede,\n\t0x3cdcf11,\n\t0x3d6cf37,\n\t0x3dd0f5b,\n\t0x4008f74,\n\t0x40c1002,\n\t0x418d030,\n\t0x41d9063,\n\t0x4261076,\n\t0x429d098,\n\t0x42ed0a7,\n\t0x43650bb,\n\t0x643690d9,\n\t0x6436d0da,\n\t0x643710db,\n\t0x43ed0dc,\n\t0x44490fb,\n\t0x44c5112,\n\t0x453d131,\n\t0x45bd14f,\n\t0x462916f,\n\t0x475518a,\n\t0x47ad1d5,\n\t0x647b11eb,\n\t0x48491ec,\n\t0x48d1212,\n\t0x491d234,\n\t0x4985247,\n\t0x4a2d261,\n\t0x4af528b,\n\t0x4b5d2bd,\n\t0x4c712d7,\n\t0x64c7531c,\n\t0x64c7931d,\n\t0x4cd531e,\n\t0x4d31335,\n\t0x4dc134c,\n\t0x4e3d370,\n\t0x4e8138f,\n\t0x4f653a0,\n\t0x4f993d9,\n\t0x4ff93e6,\n\t0x506d3fe,\n\t0x50f541b,\n\t0x513543d,\n\t0x51a544d,\n\t0x651a9469,\n\t0x651ad46a,\n\t0x251b146b,\n\t0x51c946c,\n\t0x51e5472,\n\t0x5229479,\n\t0x523948a,\n\t0x525148e,\n\t0x52c9494,\n\t0x52d14b2,\n\t0x52e54b4,\n\t0x53014b9,\n\t0x532d4c0,\n\t0x53314cb,\n\t0x53394cc,\n\t0x534d4ce,\n\t0x53694d3,\n\t0x53754da,\n\t0x537d4dd,\n\t0x53b94df,\n\t0x53cd4ee,\n\t0x53d54f3,\n\t0x53e14f5,\n\t0x53e94f8,\n\t0x540d4fa,\n\t0x5431503,\n\t0x544950c,\n\t0x544d512,\n\t0x5455513,\n\t0x5459515,\n\t0x54c1516,\n\t0x54c5530,\n\t0x54e9531,\n\t0x550d53a,\n\t0x5529543,\n\t0x553954a,\n\t0x554d54e,\n\t0x5551553,\n\t0x5559554,\n\t0x556d556,\n\t0x557d55b,\n\t0x558155f,\n\t0x559d560,\n\t0x5e2d567,\n\t0x5e6578b,\n\t0x5e91799,\n\t0x5ead7a4,\n\t0x5ecd7ab,\n\t0x5eed7b3,\n\t0x5f317bb,\n\t0x5f397cc,\n\t0x25f3d7ce,\n\t0x25f417cf,\n\t0x5f497d0,\n\t0x60c17d2,\n\t0x260c5830,\n\t0x260d5831,\n\t0x260dd835,\n\t0x260e9837,\n\t0x60ed83a,\n\t0x60f183b,\n\t0x611983c,\n\t0x6141846,\n\t0x6145850,\n\t0x617d851,\n\t0x619985f,\n\t0x6cf1866,\n\t0x6cf5b3c,\n\t0x6cf9b3d,\n\t0x26cfdb3e,\n\t0x6d01b3f,\n\t0x26d05b40,\n\t0x6d09b41,\n\t0x26d15b42,\n\t0x6d19b45,\n\t0x6d1db46,\n\t0x26d21b47,\n\t0x6d25b48,\n\t0x26d2db49,\n\t0x6d31b4b,\n\t0x6d35b4c,\n\t0x26d45b4d,\n\t0x6d49b51,\n\t0x6d4db52,\n\t0x6d51b53,\n\t0x6d55b54,\n\t0x26d59b55,\n\t0x6d5db56,\n\t0x6d61b57,\n\t0x6d65b58,\n\t0x6d69b59,\n\t0x26d71b5a,\n\t0x6d75b5c,\n\t0x6d79b5d,\n\t0x6d7db5e,\n\t0x26d81b5f,\n\t0x6d85b60,\n\t0x26d8db61,\n\t0x26d91b63,\n\t0x6dadb64,\n\t0x6dbdb6b,\n\t0x6e01b6f,\n\t0x6e05b80,\n\t0x6e29b81,\n\t0x6e2db8a,\n\t0x6e31b8b,\n\t0x6fbdb8c,\n\t0x26fc1bef,\n\t0x26fc9bf0,\n\t0x26fcdbf2,\n\t0x26fd1bf3,\n\t0x6fd9bf4,\n\t0x70b5bf6,\n\t0x270b9c2d,\n\t0x70bdc2e,\n\t0x70e9c2f,\n\t0x70edc3a,\n\t0x7111c3b,\n\t0x711dc44,\n\t0x713dc47,\n\t0x7141c4f,\n\t0x7179c50,\n\t0x7411c5e,\n\t0x74cdd04,\n\t0x74e1d33,\n\t0x7515d38,\n\t0x7545d45,\n\t0x7561d51,\n\t0x7589d58,\n\t0x75a9d62,\n\t0x75c5d6a,\n\t0x75edd71,\n\t0x75fdd7b,\n\t0x7601d7f,\n\t0x7605d80,\n\t0x7639d81,\n\t0x7645d8e,\n\t0x7665d91,\n\t0x76ddd99,\n\t0x276e1db7,\n\t0x7705db8,\n\t0x7725dc1,\n\t0x7739dc9,\n\t0x774ddce,\n\t0x7751dd3,\n\t0x7771dd4,\n\t0x7815ddc,\n\t0x7831e05,\n\t0x7855e0c,\n\t0x785de15,\n\t0x7869e17,\n\t0x7871e1a,\n\t0x7885e1c,\n\t0x78a5e21,\n\t0x78b1e29,\n\t0x78bde2c,\n\t0x78ede2f,\n\t0x79c1e3b,\n\t0x79c5e70,\n\t0x79d9e71,\n\t0x79e1e76,\n\t0x79f9e78,\n\t0x79fde7e,\n\t0x7a09e7f,\n\t0x7a0de82,\n\t0x7a29e83,\n\t0x7a65e8a,\n\t0x7a69e99,\n\t0x7a89e9a,\n\t0x7ad9ea2,\n\t0x7af5eb6,\n\t0x7b49ebd,\n\t0x7b4ded2,\n\t0x7b51ed3,\n\t0x7b55ed4,\n\t0x7b99ed5,\n\t0x7ba9ee6,\n\t0x7be9eea,\n\t0x7bedefa,\n\t0x7c1defb,\n\t0x7d65f07,\n\t0x7d8df59,\n\t0x7db9f63,\n\t0x7dc5f6e,\n\t0x7dcdf71,\n\t0x7eddf73,\n\t0x7ee9fb7,\n\t0x7ef5fba,\n\t0x7f01fbd,\n\t0x7f0dfc0,\n\t0x7f19fc3,\n\t0x7f25fc6,\n\t0x7f31fc9,\n\t0x7f3dfcc,\n\t0x7f49fcf,\n\t0x7f55fd2,\n\t0x7f61fd5,\n\t0x7f6dfd8,\n\t0x7f79fdb,\n\t0x7f81fde,\n\t0x7f8dfe0,\n\t0x7f99fe3,\n\t0x7fa5fe6,\n\t0x7fb1fe9,\n\t0x7fbdfec,\n\t0x7fc9fef,\n\t0x7fd5ff2,\n\t0x7fe1ff5,\n\t0x7fedff8,\n\t0x7ff9ffb,\n\t0x8005ffe,\n\t0x8032001,\n\t0x803e00c,\n\t0x804a00f,\n\t0x8056012,\n\t0x8062015,\n\t0x806e018,\n\t0x807601b,\n\t0x808201d,\n\t0x808e020,\n\t0x809a023,\n\t0x80a6026,\n\t0x80b2029,\n\t0x80be02c,\n\t0x80ca02f,\n\t0x80d6032,\n\t0x80e2035,\n\t0x80ee038,\n\t0x80fa03b,\n\t0x810603e,\n\t0x8112041,\n\t0x811a044,\n\t0x8126046,\n\t0x8132049,\n\t0x813e04c,\n\t0x814a04f,\n\t0x8156052,\n\t0x8162055,\n\t0x816e058,\n\t0x817a05b,\n\t0x817e05e,\n\t0x818a05f,\n\t0x81a6062,\n\t0x81aa069,\n\t0x81ba06a,\n\t0x81d606e,\n\t0x821a075,\n\t0x821e086,\n\t0x8232087,\n\t0x826608c,\n\t0x8276099,\n\t0x829609d,\n\t0x82ae0a5,\n\t0x82c60ab,\n\t0x82ce0b1,\n\t0x283120b3,\n\t0x83160c4,\n\t0x83420c5,\n\t0x834a0d0,\n\t0x835e0d2,\n}\n\n// max children 494 (capacity 1023)\n// max text offset 28750 (capacity 32767)\n// max text length 36 (capacity 63)\n// max hi 8407 (capacity 16383)\n// max lo 8402 (capacity 16383)\n"
  },
  {
    "path": "vendor/golang.org/x/net/publicsuffix/table_test.go",
    "content": "// generated by go run gen.go; DO NOT EDIT\n\npackage publicsuffix\n\nvar rules = [...]string{\n\t\"ac\",\n\t\"com.ac\",\n\t\"edu.ac\",\n\t\"gov.ac\",\n\t\"net.ac\",\n\t\"mil.ac\",\n\t\"org.ac\",\n\t\"ad\",\n\t\"nom.ad\",\n\t\"ae\",\n\t\"co.ae\",\n\t\"net.ae\",\n\t\"org.ae\",\n\t\"sch.ae\",\n\t\"ac.ae\",\n\t\"gov.ae\",\n\t\"mil.ae\",\n\t\"aero\",\n\t\"accident-investigation.aero\",\n\t\"accident-prevention.aero\",\n\t\"aerobatic.aero\",\n\t\"aeroclub.aero\",\n\t\"aerodrome.aero\",\n\t\"agents.aero\",\n\t\"aircraft.aero\",\n\t\"airline.aero\",\n\t\"airport.aero\",\n\t\"air-surveillance.aero\",\n\t\"airtraffic.aero\",\n\t\"air-traffic-control.aero\",\n\t\"ambulance.aero\",\n\t\"amusement.aero\",\n\t\"association.aero\",\n\t\"author.aero\",\n\t\"ballooning.aero\",\n\t\"broker.aero\",\n\t\"caa.aero\",\n\t\"cargo.aero\",\n\t\"catering.aero\",\n\t\"certification.aero\",\n\t\"championship.aero\",\n\t\"charter.aero\",\n\t\"civilaviation.aero\",\n\t\"club.aero\",\n\t\"conference.aero\",\n\t\"consultant.aero\",\n\t\"consulting.aero\",\n\t\"control.aero\",\n\t\"council.aero\",\n\t\"crew.aero\",\n\t\"design.aero\",\n\t\"dgca.aero\",\n\t\"educator.aero\",\n\t\"emergency.aero\",\n\t\"engine.aero\",\n\t\"engineer.aero\",\n\t\"entertainment.aero\",\n\t\"equipment.aero\",\n\t\"exchange.aero\",\n\t\"express.aero\",\n\t\"federation.aero\",\n\t\"flight.aero\",\n\t\"freight.aero\",\n\t\"fuel.aero\",\n\t\"gliding.aero\",\n\t\"government.aero\",\n\t\"groundhandling.aero\",\n\t\"group.aero\",\n\t\"hanggliding.aero\",\n\t\"homebuilt.aero\",\n\t\"insurance.aero\",\n\t\"journal.aero\",\n\t\"journalist.aero\",\n\t\"leasing.aero\",\n\t\"logistics.aero\",\n\t\"magazine.aero\",\n\t\"maintenance.aero\",\n\t\"media.aero\",\n\t\"microlight.aero\",\n\t\"modelling.aero\",\n\t\"navigation.aero\",\n\t\"parachuting.aero\",\n\t\"paragliding.aero\",\n\t\"passenger-association.aero\",\n\t\"pilot.aero\",\n\t\"press.aero\",\n\t\"production.aero\",\n\t\"recreation.aero\",\n\t\"repbody.aero\",\n\t\"res.aero\",\n\t\"research.aero\",\n\t\"rotorcraft.aero\",\n\t\"safety.aero\",\n\t\"scientist.aero\",\n\t\"services.aero\",\n\t\"show.aero\",\n\t\"skydiving.aero\",\n\t\"software.aero\",\n\t\"student.aero\",\n\t\"trader.aero\",\n\t\"trading.aero\",\n\t\"trainer.aero\",\n\t\"union.aero\",\n\t\"workinggroup.aero\",\n\t\"works.aero\",\n\t\"af\",\n\t\"gov.af\",\n\t\"com.af\",\n\t\"org.af\",\n\t\"net.af\",\n\t\"edu.af\",\n\t\"ag\",\n\t\"com.ag\",\n\t\"org.ag\",\n\t\"net.ag\",\n\t\"co.ag\",\n\t\"nom.ag\",\n\t\"ai\",\n\t\"off.ai\",\n\t\"com.ai\",\n\t\"net.ai\",\n\t\"org.ai\",\n\t\"al\",\n\t\"com.al\",\n\t\"edu.al\",\n\t\"gov.al\",\n\t\"mil.al\",\n\t\"net.al\",\n\t\"org.al\",\n\t\"am\",\n\t\"ao\",\n\t\"ed.ao\",\n\t\"gv.ao\",\n\t\"og.ao\",\n\t\"co.ao\",\n\t\"pb.ao\",\n\t\"it.ao\",\n\t\"aq\",\n\t\"ar\",\n\t\"com.ar\",\n\t\"edu.ar\",\n\t\"gob.ar\",\n\t\"gov.ar\",\n\t\"int.ar\",\n\t\"mil.ar\",\n\t\"musica.ar\",\n\t\"net.ar\",\n\t\"org.ar\",\n\t\"tur.ar\",\n\t\"arpa\",\n\t\"e164.arpa\",\n\t\"in-addr.arpa\",\n\t\"ip6.arpa\",\n\t\"iris.arpa\",\n\t\"uri.arpa\",\n\t\"urn.arpa\",\n\t\"as\",\n\t\"gov.as\",\n\t\"asia\",\n\t\"at\",\n\t\"ac.at\",\n\t\"co.at\",\n\t\"gv.at\",\n\t\"or.at\",\n\t\"au\",\n\t\"com.au\",\n\t\"net.au\",\n\t\"org.au\",\n\t\"edu.au\",\n\t\"gov.au\",\n\t\"asn.au\",\n\t\"id.au\",\n\t\"info.au\",\n\t\"conf.au\",\n\t\"oz.au\",\n\t\"act.au\",\n\t\"nsw.au\",\n\t\"nt.au\",\n\t\"qld.au\",\n\t\"sa.au\",\n\t\"tas.au\",\n\t\"vic.au\",\n\t\"wa.au\",\n\t\"act.edu.au\",\n\t\"nsw.edu.au\",\n\t\"nt.edu.au\",\n\t\"qld.edu.au\",\n\t\"sa.edu.au\",\n\t\"tas.edu.au\",\n\t\"vic.edu.au\",\n\t\"wa.edu.au\",\n\t\"qld.gov.au\",\n\t\"sa.gov.au\",\n\t\"tas.gov.au\",\n\t\"vic.gov.au\",\n\t\"wa.gov.au\",\n\t\"aw\",\n\t\"com.aw\",\n\t\"ax\",\n\t\"az\",\n\t\"com.az\",\n\t\"net.az\",\n\t\"int.az\",\n\t\"gov.az\",\n\t\"org.az\",\n\t\"edu.az\",\n\t\"info.az\",\n\t\"pp.az\",\n\t\"mil.az\",\n\t\"name.az\",\n\t\"pro.az\",\n\t\"biz.az\",\n\t\"ba\",\n\t\"com.ba\",\n\t\"edu.ba\",\n\t\"gov.ba\",\n\t\"mil.ba\",\n\t\"net.ba\",\n\t\"org.ba\",\n\t\"bb\",\n\t\"biz.bb\",\n\t\"co.bb\",\n\t\"com.bb\",\n\t\"edu.bb\",\n\t\"gov.bb\",\n\t\"info.bb\",\n\t\"net.bb\",\n\t\"org.bb\",\n\t\"store.bb\",\n\t\"tv.bb\",\n\t\"*.bd\",\n\t\"be\",\n\t\"ac.be\",\n\t\"bf\",\n\t\"gov.bf\",\n\t\"bg\",\n\t\"a.bg\",\n\t\"b.bg\",\n\t\"c.bg\",\n\t\"d.bg\",\n\t\"e.bg\",\n\t\"f.bg\",\n\t\"g.bg\",\n\t\"h.bg\",\n\t\"i.bg\",\n\t\"j.bg\",\n\t\"k.bg\",\n\t\"l.bg\",\n\t\"m.bg\",\n\t\"n.bg\",\n\t\"o.bg\",\n\t\"p.bg\",\n\t\"q.bg\",\n\t\"r.bg\",\n\t\"s.bg\",\n\t\"t.bg\",\n\t\"u.bg\",\n\t\"v.bg\",\n\t\"w.bg\",\n\t\"x.bg\",\n\t\"y.bg\",\n\t\"z.bg\",\n\t\"0.bg\",\n\t\"1.bg\",\n\t\"2.bg\",\n\t\"3.bg\",\n\t\"4.bg\",\n\t\"5.bg\",\n\t\"6.bg\",\n\t\"7.bg\",\n\t\"8.bg\",\n\t\"9.bg\",\n\t\"bh\",\n\t\"com.bh\",\n\t\"edu.bh\",\n\t\"net.bh\",\n\t\"org.bh\",\n\t\"gov.bh\",\n\t\"bi\",\n\t\"co.bi\",\n\t\"com.bi\",\n\t\"edu.bi\",\n\t\"or.bi\",\n\t\"org.bi\",\n\t\"biz\",\n\t\"bj\",\n\t\"asso.bj\",\n\t\"barreau.bj\",\n\t\"gouv.bj\",\n\t\"bm\",\n\t\"com.bm\",\n\t\"edu.bm\",\n\t\"gov.bm\",\n\t\"net.bm\",\n\t\"org.bm\",\n\t\"*.bn\",\n\t\"bo\",\n\t\"com.bo\",\n\t\"edu.bo\",\n\t\"gov.bo\",\n\t\"gob.bo\",\n\t\"int.bo\",\n\t\"org.bo\",\n\t\"net.bo\",\n\t\"mil.bo\",\n\t\"tv.bo\",\n\t\"br\",\n\t\"adm.br\",\n\t\"adv.br\",\n\t\"agr.br\",\n\t\"am.br\",\n\t\"arq.br\",\n\t\"art.br\",\n\t\"ato.br\",\n\t\"b.br\",\n\t\"belem.br\",\n\t\"bio.br\",\n\t\"blog.br\",\n\t\"bmd.br\",\n\t\"cim.br\",\n\t\"cng.br\",\n\t\"cnt.br\",\n\t\"com.br\",\n\t\"coop.br\",\n\t\"cri.br\",\n\t\"def.br\",\n\t\"ecn.br\",\n\t\"eco.br\",\n\t\"edu.br\",\n\t\"emp.br\",\n\t\"eng.br\",\n\t\"esp.br\",\n\t\"etc.br\",\n\t\"eti.br\",\n\t\"far.br\",\n\t\"flog.br\",\n\t\"floripa.br\",\n\t\"fm.br\",\n\t\"fnd.br\",\n\t\"fot.br\",\n\t\"fst.br\",\n\t\"g12.br\",\n\t\"ggf.br\",\n\t\"gov.br\",\n\t\"ac.gov.br\",\n\t\"al.gov.br\",\n\t\"am.gov.br\",\n\t\"ap.gov.br\",\n\t\"ba.gov.br\",\n\t\"ce.gov.br\",\n\t\"df.gov.br\",\n\t\"es.gov.br\",\n\t\"go.gov.br\",\n\t\"ma.gov.br\",\n\t\"mg.gov.br\",\n\t\"ms.gov.br\",\n\t\"mt.gov.br\",\n\t\"pa.gov.br\",\n\t\"pb.gov.br\",\n\t\"pe.gov.br\",\n\t\"pi.gov.br\",\n\t\"pr.gov.br\",\n\t\"rj.gov.br\",\n\t\"rn.gov.br\",\n\t\"ro.gov.br\",\n\t\"rr.gov.br\",\n\t\"rs.gov.br\",\n\t\"sc.gov.br\",\n\t\"se.gov.br\",\n\t\"sp.gov.br\",\n\t\"to.gov.br\",\n\t\"imb.br\",\n\t\"ind.br\",\n\t\"inf.br\",\n\t\"jampa.br\",\n\t\"jor.br\",\n\t\"jus.br\",\n\t\"leg.br\",\n\t\"lel.br\",\n\t\"mat.br\",\n\t\"med.br\",\n\t\"mil.br\",\n\t\"mp.br\",\n\t\"mus.br\",\n\t\"net.br\",\n\t\"*.nom.br\",\n\t\"not.br\",\n\t\"ntr.br\",\n\t\"odo.br\",\n\t\"org.br\",\n\t\"poa.br\",\n\t\"ppg.br\",\n\t\"pro.br\",\n\t\"psc.br\",\n\t\"psi.br\",\n\t\"qsl.br\",\n\t\"radio.br\",\n\t\"rec.br\",\n\t\"recife.br\",\n\t\"slg.br\",\n\t\"srv.br\",\n\t\"taxi.br\",\n\t\"teo.br\",\n\t\"tmp.br\",\n\t\"trd.br\",\n\t\"tur.br\",\n\t\"tv.br\",\n\t\"vet.br\",\n\t\"vix.br\",\n\t\"vlog.br\",\n\t\"wiki.br\",\n\t\"zlg.br\",\n\t\"bs\",\n\t\"com.bs\",\n\t\"net.bs\",\n\t\"org.bs\",\n\t\"edu.bs\",\n\t\"gov.bs\",\n\t\"bt\",\n\t\"com.bt\",\n\t\"edu.bt\",\n\t\"gov.bt\",\n\t\"net.bt\",\n\t\"org.bt\",\n\t\"bv\",\n\t\"bw\",\n\t\"co.bw\",\n\t\"org.bw\",\n\t\"by\",\n\t\"gov.by\",\n\t\"mil.by\",\n\t\"com.by\",\n\t\"of.by\",\n\t\"bz\",\n\t\"com.bz\",\n\t\"net.bz\",\n\t\"org.bz\",\n\t\"edu.bz\",\n\t\"gov.bz\",\n\t\"ca\",\n\t\"ab.ca\",\n\t\"bc.ca\",\n\t\"mb.ca\",\n\t\"nb.ca\",\n\t\"nf.ca\",\n\t\"nl.ca\",\n\t\"ns.ca\",\n\t\"nt.ca\",\n\t\"nu.ca\",\n\t\"on.ca\",\n\t\"pe.ca\",\n\t\"qc.ca\",\n\t\"sk.ca\",\n\t\"yk.ca\",\n\t\"gc.ca\",\n\t\"cat\",\n\t\"cc\",\n\t\"cd\",\n\t\"gov.cd\",\n\t\"cf\",\n\t\"cg\",\n\t\"ch\",\n\t\"ci\",\n\t\"org.ci\",\n\t\"or.ci\",\n\t\"com.ci\",\n\t\"co.ci\",\n\t\"edu.ci\",\n\t\"ed.ci\",\n\t\"ac.ci\",\n\t\"net.ci\",\n\t\"go.ci\",\n\t\"asso.ci\",\n\t\"xn--aroport-bya.ci\",\n\t\"int.ci\",\n\t\"presse.ci\",\n\t\"md.ci\",\n\t\"gouv.ci\",\n\t\"*.ck\",\n\t\"!www.ck\",\n\t\"cl\",\n\t\"gov.cl\",\n\t\"gob.cl\",\n\t\"co.cl\",\n\t\"mil.cl\",\n\t\"cm\",\n\t\"co.cm\",\n\t\"com.cm\",\n\t\"gov.cm\",\n\t\"net.cm\",\n\t\"cn\",\n\t\"ac.cn\",\n\t\"com.cn\",\n\t\"edu.cn\",\n\t\"gov.cn\",\n\t\"net.cn\",\n\t\"org.cn\",\n\t\"mil.cn\",\n\t\"xn--55qx5d.cn\",\n\t\"xn--io0a7i.cn\",\n\t\"xn--od0alg.cn\",\n\t\"ah.cn\",\n\t\"bj.cn\",\n\t\"cq.cn\",\n\t\"fj.cn\",\n\t\"gd.cn\",\n\t\"gs.cn\",\n\t\"gz.cn\",\n\t\"gx.cn\",\n\t\"ha.cn\",\n\t\"hb.cn\",\n\t\"he.cn\",\n\t\"hi.cn\",\n\t\"hl.cn\",\n\t\"hn.cn\",\n\t\"jl.cn\",\n\t\"js.cn\",\n\t\"jx.cn\",\n\t\"ln.cn\",\n\t\"nm.cn\",\n\t\"nx.cn\",\n\t\"qh.cn\",\n\t\"sc.cn\",\n\t\"sd.cn\",\n\t\"sh.cn\",\n\t\"sn.cn\",\n\t\"sx.cn\",\n\t\"tj.cn\",\n\t\"xj.cn\",\n\t\"xz.cn\",\n\t\"yn.cn\",\n\t\"zj.cn\",\n\t\"hk.cn\",\n\t\"mo.cn\",\n\t\"tw.cn\",\n\t\"co\",\n\t\"arts.co\",\n\t\"com.co\",\n\t\"edu.co\",\n\t\"firm.co\",\n\t\"gov.co\",\n\t\"info.co\",\n\t\"int.co\",\n\t\"mil.co\",\n\t\"net.co\",\n\t\"nom.co\",\n\t\"org.co\",\n\t\"rec.co\",\n\t\"web.co\",\n\t\"com\",\n\t\"coop\",\n\t\"cr\",\n\t\"ac.cr\",\n\t\"co.cr\",\n\t\"ed.cr\",\n\t\"fi.cr\",\n\t\"go.cr\",\n\t\"or.cr\",\n\t\"sa.cr\",\n\t\"cu\",\n\t\"com.cu\",\n\t\"edu.cu\",\n\t\"org.cu\",\n\t\"net.cu\",\n\t\"gov.cu\",\n\t\"inf.cu\",\n\t\"cv\",\n\t\"cw\",\n\t\"com.cw\",\n\t\"edu.cw\",\n\t\"net.cw\",\n\t\"org.cw\",\n\t\"cx\",\n\t\"gov.cx\",\n\t\"cy\",\n\t\"ac.cy\",\n\t\"biz.cy\",\n\t\"com.cy\",\n\t\"ekloges.cy\",\n\t\"gov.cy\",\n\t\"ltd.cy\",\n\t\"name.cy\",\n\t\"net.cy\",\n\t\"org.cy\",\n\t\"parliament.cy\",\n\t\"press.cy\",\n\t\"pro.cy\",\n\t\"tm.cy\",\n\t\"cz\",\n\t\"de\",\n\t\"dj\",\n\t\"dk\",\n\t\"dm\",\n\t\"com.dm\",\n\t\"net.dm\",\n\t\"org.dm\",\n\t\"edu.dm\",\n\t\"gov.dm\",\n\t\"do\",\n\t\"art.do\",\n\t\"com.do\",\n\t\"edu.do\",\n\t\"gob.do\",\n\t\"gov.do\",\n\t\"mil.do\",\n\t\"net.do\",\n\t\"org.do\",\n\t\"sld.do\",\n\t\"web.do\",\n\t\"dz\",\n\t\"com.dz\",\n\t\"org.dz\",\n\t\"net.dz\",\n\t\"gov.dz\",\n\t\"edu.dz\",\n\t\"asso.dz\",\n\t\"pol.dz\",\n\t\"art.dz\",\n\t\"ec\",\n\t\"com.ec\",\n\t\"info.ec\",\n\t\"net.ec\",\n\t\"fin.ec\",\n\t\"k12.ec\",\n\t\"med.ec\",\n\t\"pro.ec\",\n\t\"org.ec\",\n\t\"edu.ec\",\n\t\"gov.ec\",\n\t\"gob.ec\",\n\t\"mil.ec\",\n\t\"edu\",\n\t\"ee\",\n\t\"edu.ee\",\n\t\"gov.ee\",\n\t\"riik.ee\",\n\t\"lib.ee\",\n\t\"med.ee\",\n\t\"com.ee\",\n\t\"pri.ee\",\n\t\"aip.ee\",\n\t\"org.ee\",\n\t\"fie.ee\",\n\t\"eg\",\n\t\"com.eg\",\n\t\"edu.eg\",\n\t\"eun.eg\",\n\t\"gov.eg\",\n\t\"mil.eg\",\n\t\"name.eg\",\n\t\"net.eg\",\n\t\"org.eg\",\n\t\"sci.eg\",\n\t\"*.er\",\n\t\"es\",\n\t\"com.es\",\n\t\"nom.es\",\n\t\"org.es\",\n\t\"gob.es\",\n\t\"edu.es\",\n\t\"et\",\n\t\"com.et\",\n\t\"gov.et\",\n\t\"org.et\",\n\t\"edu.et\",\n\t\"biz.et\",\n\t\"name.et\",\n\t\"info.et\",\n\t\"net.et\",\n\t\"eu\",\n\t\"fi\",\n\t\"aland.fi\",\n\t\"*.fj\",\n\t\"*.fk\",\n\t\"fm\",\n\t\"fo\",\n\t\"fr\",\n\t\"com.fr\",\n\t\"asso.fr\",\n\t\"nom.fr\",\n\t\"prd.fr\",\n\t\"presse.fr\",\n\t\"tm.fr\",\n\t\"aeroport.fr\",\n\t\"assedic.fr\",\n\t\"avocat.fr\",\n\t\"avoues.fr\",\n\t\"cci.fr\",\n\t\"chambagri.fr\",\n\t\"chirurgiens-dentistes.fr\",\n\t\"experts-comptables.fr\",\n\t\"geometre-expert.fr\",\n\t\"gouv.fr\",\n\t\"greta.fr\",\n\t\"huissier-justice.fr\",\n\t\"medecin.fr\",\n\t\"notaires.fr\",\n\t\"pharmacien.fr\",\n\t\"port.fr\",\n\t\"veterinaire.fr\",\n\t\"ga\",\n\t\"gb\",\n\t\"gd\",\n\t\"ge\",\n\t\"com.ge\",\n\t\"edu.ge\",\n\t\"gov.ge\",\n\t\"org.ge\",\n\t\"mil.ge\",\n\t\"net.ge\",\n\t\"pvt.ge\",\n\t\"gf\",\n\t\"gg\",\n\t\"co.gg\",\n\t\"net.gg\",\n\t\"org.gg\",\n\t\"gh\",\n\t\"com.gh\",\n\t\"edu.gh\",\n\t\"gov.gh\",\n\t\"org.gh\",\n\t\"mil.gh\",\n\t\"gi\",\n\t\"com.gi\",\n\t\"ltd.gi\",\n\t\"gov.gi\",\n\t\"mod.gi\",\n\t\"edu.gi\",\n\t\"org.gi\",\n\t\"gl\",\n\t\"co.gl\",\n\t\"com.gl\",\n\t\"edu.gl\",\n\t\"net.gl\",\n\t\"org.gl\",\n\t\"gm\",\n\t\"gn\",\n\t\"ac.gn\",\n\t\"com.gn\",\n\t\"edu.gn\",\n\t\"gov.gn\",\n\t\"org.gn\",\n\t\"net.gn\",\n\t\"gov\",\n\t\"gp\",\n\t\"com.gp\",\n\t\"net.gp\",\n\t\"mobi.gp\",\n\t\"edu.gp\",\n\t\"org.gp\",\n\t\"asso.gp\",\n\t\"gq\",\n\t\"gr\",\n\t\"com.gr\",\n\t\"edu.gr\",\n\t\"net.gr\",\n\t\"org.gr\",\n\t\"gov.gr\",\n\t\"gs\",\n\t\"gt\",\n\t\"com.gt\",\n\t\"edu.gt\",\n\t\"gob.gt\",\n\t\"ind.gt\",\n\t\"mil.gt\",\n\t\"net.gt\",\n\t\"org.gt\",\n\t\"*.gu\",\n\t\"gw\",\n\t\"gy\",\n\t\"co.gy\",\n\t\"com.gy\",\n\t\"edu.gy\",\n\t\"gov.gy\",\n\t\"net.gy\",\n\t\"org.gy\",\n\t\"hk\",\n\t\"com.hk\",\n\t\"edu.hk\",\n\t\"gov.hk\",\n\t\"idv.hk\",\n\t\"net.hk\",\n\t\"org.hk\",\n\t\"xn--55qx5d.hk\",\n\t\"xn--wcvs22d.hk\",\n\t\"xn--lcvr32d.hk\",\n\t\"xn--mxtq1m.hk\",\n\t\"xn--gmqw5a.hk\",\n\t\"xn--ciqpn.hk\",\n\t\"xn--gmq050i.hk\",\n\t\"xn--zf0avx.hk\",\n\t\"xn--io0a7i.hk\",\n\t\"xn--mk0axi.hk\",\n\t\"xn--od0alg.hk\",\n\t\"xn--od0aq3b.hk\",\n\t\"xn--tn0ag.hk\",\n\t\"xn--uc0atv.hk\",\n\t\"xn--uc0ay4a.hk\",\n\t\"hm\",\n\t\"hn\",\n\t\"com.hn\",\n\t\"edu.hn\",\n\t\"org.hn\",\n\t\"net.hn\",\n\t\"mil.hn\",\n\t\"gob.hn\",\n\t\"hr\",\n\t\"iz.hr\",\n\t\"from.hr\",\n\t\"name.hr\",\n\t\"com.hr\",\n\t\"ht\",\n\t\"com.ht\",\n\t\"shop.ht\",\n\t\"firm.ht\",\n\t\"info.ht\",\n\t\"adult.ht\",\n\t\"net.ht\",\n\t\"pro.ht\",\n\t\"org.ht\",\n\t\"med.ht\",\n\t\"art.ht\",\n\t\"coop.ht\",\n\t\"pol.ht\",\n\t\"asso.ht\",\n\t\"edu.ht\",\n\t\"rel.ht\",\n\t\"gouv.ht\",\n\t\"perso.ht\",\n\t\"hu\",\n\t\"co.hu\",\n\t\"info.hu\",\n\t\"org.hu\",\n\t\"priv.hu\",\n\t\"sport.hu\",\n\t\"tm.hu\",\n\t\"2000.hu\",\n\t\"agrar.hu\",\n\t\"bolt.hu\",\n\t\"casino.hu\",\n\t\"city.hu\",\n\t\"erotica.hu\",\n\t\"erotika.hu\",\n\t\"film.hu\",\n\t\"forum.hu\",\n\t\"games.hu\",\n\t\"hotel.hu\",\n\t\"ingatlan.hu\",\n\t\"jogasz.hu\",\n\t\"konyvelo.hu\",\n\t\"lakas.hu\",\n\t\"media.hu\",\n\t\"news.hu\",\n\t\"reklam.hu\",\n\t\"sex.hu\",\n\t\"shop.hu\",\n\t\"suli.hu\",\n\t\"szex.hu\",\n\t\"tozsde.hu\",\n\t\"utazas.hu\",\n\t\"video.hu\",\n\t\"id\",\n\t\"ac.id\",\n\t\"biz.id\",\n\t\"co.id\",\n\t\"desa.id\",\n\t\"go.id\",\n\t\"mil.id\",\n\t\"my.id\",\n\t\"net.id\",\n\t\"or.id\",\n\t\"sch.id\",\n\t\"web.id\",\n\t\"ie\",\n\t\"gov.ie\",\n\t\"il\",\n\t\"ac.il\",\n\t\"co.il\",\n\t\"gov.il\",\n\t\"idf.il\",\n\t\"k12.il\",\n\t\"muni.il\",\n\t\"net.il\",\n\t\"org.il\",\n\t\"im\",\n\t\"ac.im\",\n\t\"co.im\",\n\t\"com.im\",\n\t\"ltd.co.im\",\n\t\"net.im\",\n\t\"org.im\",\n\t\"plc.co.im\",\n\t\"tt.im\",\n\t\"tv.im\",\n\t\"in\",\n\t\"co.in\",\n\t\"firm.in\",\n\t\"net.in\",\n\t\"org.in\",\n\t\"gen.in\",\n\t\"ind.in\",\n\t\"nic.in\",\n\t\"ac.in\",\n\t\"edu.in\",\n\t\"res.in\",\n\t\"gov.in\",\n\t\"mil.in\",\n\t\"info\",\n\t\"int\",\n\t\"eu.int\",\n\t\"io\",\n\t\"com.io\",\n\t\"iq\",\n\t\"gov.iq\",\n\t\"edu.iq\",\n\t\"mil.iq\",\n\t\"com.iq\",\n\t\"org.iq\",\n\t\"net.iq\",\n\t\"ir\",\n\t\"ac.ir\",\n\t\"co.ir\",\n\t\"gov.ir\",\n\t\"id.ir\",\n\t\"net.ir\",\n\t\"org.ir\",\n\t\"sch.ir\",\n\t\"xn--mgba3a4f16a.ir\",\n\t\"xn--mgba3a4fra.ir\",\n\t\"is\",\n\t\"net.is\",\n\t\"com.is\",\n\t\"edu.is\",\n\t\"gov.is\",\n\t\"org.is\",\n\t\"int.is\",\n\t\"it\",\n\t\"gov.it\",\n\t\"edu.it\",\n\t\"abr.it\",\n\t\"abruzzo.it\",\n\t\"aosta-valley.it\",\n\t\"aostavalley.it\",\n\t\"bas.it\",\n\t\"basilicata.it\",\n\t\"cal.it\",\n\t\"calabria.it\",\n\t\"cam.it\",\n\t\"campania.it\",\n\t\"emilia-romagna.it\",\n\t\"emiliaromagna.it\",\n\t\"emr.it\",\n\t\"friuli-v-giulia.it\",\n\t\"friuli-ve-giulia.it\",\n\t\"friuli-vegiulia.it\",\n\t\"friuli-venezia-giulia.it\",\n\t\"friuli-veneziagiulia.it\",\n\t\"friuli-vgiulia.it\",\n\t\"friuliv-giulia.it\",\n\t\"friulive-giulia.it\",\n\t\"friulivegiulia.it\",\n\t\"friulivenezia-giulia.it\",\n\t\"friuliveneziagiulia.it\",\n\t\"friulivgiulia.it\",\n\t\"fvg.it\",\n\t\"laz.it\",\n\t\"lazio.it\",\n\t\"lig.it\",\n\t\"liguria.it\",\n\t\"lom.it\",\n\t\"lombardia.it\",\n\t\"lombardy.it\",\n\t\"lucania.it\",\n\t\"mar.it\",\n\t\"marche.it\",\n\t\"mol.it\",\n\t\"molise.it\",\n\t\"piedmont.it\",\n\t\"piemonte.it\",\n\t\"pmn.it\",\n\t\"pug.it\",\n\t\"puglia.it\",\n\t\"sar.it\",\n\t\"sardegna.it\",\n\t\"sardinia.it\",\n\t\"sic.it\",\n\t\"sicilia.it\",\n\t\"sicily.it\",\n\t\"taa.it\",\n\t\"tos.it\",\n\t\"toscana.it\",\n\t\"trentino-a-adige.it\",\n\t\"trentino-aadige.it\",\n\t\"trentino-alto-adige.it\",\n\t\"trentino-altoadige.it\",\n\t\"trentino-s-tirol.it\",\n\t\"trentino-stirol.it\",\n\t\"trentino-sud-tirol.it\",\n\t\"trentino-sudtirol.it\",\n\t\"trentino-sued-tirol.it\",\n\t\"trentino-suedtirol.it\",\n\t\"trentinoa-adige.it\",\n\t\"trentinoaadige.it\",\n\t\"trentinoalto-adige.it\",\n\t\"trentinoaltoadige.it\",\n\t\"trentinos-tirol.it\",\n\t\"trentinostirol.it\",\n\t\"trentinosud-tirol.it\",\n\t\"trentinosudtirol.it\",\n\t\"trentinosued-tirol.it\",\n\t\"trentinosuedtirol.it\",\n\t\"tuscany.it\",\n\t\"umb.it\",\n\t\"umbria.it\",\n\t\"val-d-aosta.it\",\n\t\"val-daosta.it\",\n\t\"vald-aosta.it\",\n\t\"valdaosta.it\",\n\t\"valle-aosta.it\",\n\t\"valle-d-aosta.it\",\n\t\"valle-daosta.it\",\n\t\"valleaosta.it\",\n\t\"valled-aosta.it\",\n\t\"valledaosta.it\",\n\t\"vallee-aoste.it\",\n\t\"valleeaoste.it\",\n\t\"vao.it\",\n\t\"vda.it\",\n\t\"ven.it\",\n\t\"veneto.it\",\n\t\"ag.it\",\n\t\"agrigento.it\",\n\t\"al.it\",\n\t\"alessandria.it\",\n\t\"alto-adige.it\",\n\t\"altoadige.it\",\n\t\"an.it\",\n\t\"ancona.it\",\n\t\"andria-barletta-trani.it\",\n\t\"andria-trani-barletta.it\",\n\t\"andriabarlettatrani.it\",\n\t\"andriatranibarletta.it\",\n\t\"ao.it\",\n\t\"aosta.it\",\n\t\"aoste.it\",\n\t\"ap.it\",\n\t\"aq.it\",\n\t\"aquila.it\",\n\t\"ar.it\",\n\t\"arezzo.it\",\n\t\"ascoli-piceno.it\",\n\t\"ascolipiceno.it\",\n\t\"asti.it\",\n\t\"at.it\",\n\t\"av.it\",\n\t\"avellino.it\",\n\t\"ba.it\",\n\t\"balsan.it\",\n\t\"bari.it\",\n\t\"barletta-trani-andria.it\",\n\t\"barlettatraniandria.it\",\n\t\"belluno.it\",\n\t\"benevento.it\",\n\t\"bergamo.it\",\n\t\"bg.it\",\n\t\"bi.it\",\n\t\"biella.it\",\n\t\"bl.it\",\n\t\"bn.it\",\n\t\"bo.it\",\n\t\"bologna.it\",\n\t\"bolzano.it\",\n\t\"bozen.it\",\n\t\"br.it\",\n\t\"brescia.it\",\n\t\"brindisi.it\",\n\t\"bs.it\",\n\t\"bt.it\",\n\t\"bz.it\",\n\t\"ca.it\",\n\t\"cagliari.it\",\n\t\"caltanissetta.it\",\n\t\"campidano-medio.it\",\n\t\"campidanomedio.it\",\n\t\"campobasso.it\",\n\t\"carbonia-iglesias.it\",\n\t\"carboniaiglesias.it\",\n\t\"carrara-massa.it\",\n\t\"carraramassa.it\",\n\t\"caserta.it\",\n\t\"catania.it\",\n\t\"catanzaro.it\",\n\t\"cb.it\",\n\t\"ce.it\",\n\t\"cesena-forli.it\",\n\t\"cesenaforli.it\",\n\t\"ch.it\",\n\t\"chieti.it\",\n\t\"ci.it\",\n\t\"cl.it\",\n\t\"cn.it\",\n\t\"co.it\",\n\t\"como.it\",\n\t\"cosenza.it\",\n\t\"cr.it\",\n\t\"cremona.it\",\n\t\"crotone.it\",\n\t\"cs.it\",\n\t\"ct.it\",\n\t\"cuneo.it\",\n\t\"cz.it\",\n\t\"dell-ogliastra.it\",\n\t\"dellogliastra.it\",\n\t\"en.it\",\n\t\"enna.it\",\n\t\"fc.it\",\n\t\"fe.it\",\n\t\"fermo.it\",\n\t\"ferrara.it\",\n\t\"fg.it\",\n\t\"fi.it\",\n\t\"firenze.it\",\n\t\"florence.it\",\n\t\"fm.it\",\n\t\"foggia.it\",\n\t\"forli-cesena.it\",\n\t\"forlicesena.it\",\n\t\"fr.it\",\n\t\"frosinone.it\",\n\t\"ge.it\",\n\t\"genoa.it\",\n\t\"genova.it\",\n\t\"go.it\",\n\t\"gorizia.it\",\n\t\"gr.it\",\n\t\"grosseto.it\",\n\t\"iglesias-carbonia.it\",\n\t\"iglesiascarbonia.it\",\n\t\"im.it\",\n\t\"imperia.it\",\n\t\"is.it\",\n\t\"isernia.it\",\n\t\"kr.it\",\n\t\"la-spezia.it\",\n\t\"laquila.it\",\n\t\"laspezia.it\",\n\t\"latina.it\",\n\t\"lc.it\",\n\t\"le.it\",\n\t\"lecce.it\",\n\t\"lecco.it\",\n\t\"li.it\",\n\t\"livorno.it\",\n\t\"lo.it\",\n\t\"lodi.it\",\n\t\"lt.it\",\n\t\"lu.it\",\n\t\"lucca.it\",\n\t\"macerata.it\",\n\t\"mantova.it\",\n\t\"massa-carrara.it\",\n\t\"massacarrara.it\",\n\t\"matera.it\",\n\t\"mb.it\",\n\t\"mc.it\",\n\t\"me.it\",\n\t\"medio-campidano.it\",\n\t\"mediocampidano.it\",\n\t\"messina.it\",\n\t\"mi.it\",\n\t\"milan.it\",\n\t\"milano.it\",\n\t\"mn.it\",\n\t\"mo.it\",\n\t\"modena.it\",\n\t\"monza-brianza.it\",\n\t\"monza-e-della-brianza.it\",\n\t\"monza.it\",\n\t\"monzabrianza.it\",\n\t\"monzaebrianza.it\",\n\t\"monzaedellabrianza.it\",\n\t\"ms.it\",\n\t\"mt.it\",\n\t\"na.it\",\n\t\"naples.it\",\n\t\"napoli.it\",\n\t\"no.it\",\n\t\"novara.it\",\n\t\"nu.it\",\n\t\"nuoro.it\",\n\t\"og.it\",\n\t\"ogliastra.it\",\n\t\"olbia-tempio.it\",\n\t\"olbiatempio.it\",\n\t\"or.it\",\n\t\"oristano.it\",\n\t\"ot.it\",\n\t\"pa.it\",\n\t\"padova.it\",\n\t\"padua.it\",\n\t\"palermo.it\",\n\t\"parma.it\",\n\t\"pavia.it\",\n\t\"pc.it\",\n\t\"pd.it\",\n\t\"pe.it\",\n\t\"perugia.it\",\n\t\"pesaro-urbino.it\",\n\t\"pesarourbino.it\",\n\t\"pescara.it\",\n\t\"pg.it\",\n\t\"pi.it\",\n\t\"piacenza.it\",\n\t\"pisa.it\",\n\t\"pistoia.it\",\n\t\"pn.it\",\n\t\"po.it\",\n\t\"pordenone.it\",\n\t\"potenza.it\",\n\t\"pr.it\",\n\t\"prato.it\",\n\t\"pt.it\",\n\t\"pu.it\",\n\t\"pv.it\",\n\t\"pz.it\",\n\t\"ra.it\",\n\t\"ragusa.it\",\n\t\"ravenna.it\",\n\t\"rc.it\",\n\t\"re.it\",\n\t\"reggio-calabria.it\",\n\t\"reggio-emilia.it\",\n\t\"reggiocalabria.it\",\n\t\"reggioemilia.it\",\n\t\"rg.it\",\n\t\"ri.it\",\n\t\"rieti.it\",\n\t\"rimini.it\",\n\t\"rm.it\",\n\t\"rn.it\",\n\t\"ro.it\",\n\t\"roma.it\",\n\t\"rome.it\",\n\t\"rovigo.it\",\n\t\"sa.it\",\n\t\"salerno.it\",\n\t\"sassari.it\",\n\t\"savona.it\",\n\t\"si.it\",\n\t\"siena.it\",\n\t\"siracusa.it\",\n\t\"so.it\",\n\t\"sondrio.it\",\n\t\"sp.it\",\n\t\"sr.it\",\n\t\"ss.it\",\n\t\"suedtirol.it\",\n\t\"sv.it\",\n\t\"ta.it\",\n\t\"taranto.it\",\n\t\"te.it\",\n\t\"tempio-olbia.it\",\n\t\"tempioolbia.it\",\n\t\"teramo.it\",\n\t\"terni.it\",\n\t\"tn.it\",\n\t\"to.it\",\n\t\"torino.it\",\n\t\"tp.it\",\n\t\"tr.it\",\n\t\"trani-andria-barletta.it\",\n\t\"trani-barletta-andria.it\",\n\t\"traniandriabarletta.it\",\n\t\"tranibarlettaandria.it\",\n\t\"trapani.it\",\n\t\"trentino.it\",\n\t\"trento.it\",\n\t\"treviso.it\",\n\t\"trieste.it\",\n\t\"ts.it\",\n\t\"turin.it\",\n\t\"tv.it\",\n\t\"ud.it\",\n\t\"udine.it\",\n\t\"urbino-pesaro.it\",\n\t\"urbinopesaro.it\",\n\t\"va.it\",\n\t\"varese.it\",\n\t\"vb.it\",\n\t\"vc.it\",\n\t\"ve.it\",\n\t\"venezia.it\",\n\t\"venice.it\",\n\t\"verbania.it\",\n\t\"vercelli.it\",\n\t\"verona.it\",\n\t\"vi.it\",\n\t\"vibo-valentia.it\",\n\t\"vibovalentia.it\",\n\t\"vicenza.it\",\n\t\"viterbo.it\",\n\t\"vr.it\",\n\t\"vs.it\",\n\t\"vt.it\",\n\t\"vv.it\",\n\t\"je\",\n\t\"co.je\",\n\t\"net.je\",\n\t\"org.je\",\n\t\"*.jm\",\n\t\"jo\",\n\t\"com.jo\",\n\t\"org.jo\",\n\t\"net.jo\",\n\t\"edu.jo\",\n\t\"sch.jo\",\n\t\"gov.jo\",\n\t\"mil.jo\",\n\t\"name.jo\",\n\t\"jobs\",\n\t\"jp\",\n\t\"ac.jp\",\n\t\"ad.jp\",\n\t\"co.jp\",\n\t\"ed.jp\",\n\t\"go.jp\",\n\t\"gr.jp\",\n\t\"lg.jp\",\n\t\"ne.jp\",\n\t\"or.jp\",\n\t\"aichi.jp\",\n\t\"akita.jp\",\n\t\"aomori.jp\",\n\t\"chiba.jp\",\n\t\"ehime.jp\",\n\t\"fukui.jp\",\n\t\"fukuoka.jp\",\n\t\"fukushima.jp\",\n\t\"gifu.jp\",\n\t\"gunma.jp\",\n\t\"hiroshima.jp\",\n\t\"hokkaido.jp\",\n\t\"hyogo.jp\",\n\t\"ibaraki.jp\",\n\t\"ishikawa.jp\",\n\t\"iwate.jp\",\n\t\"kagawa.jp\",\n\t\"kagoshima.jp\",\n\t\"kanagawa.jp\",\n\t\"kochi.jp\",\n\t\"kumamoto.jp\",\n\t\"kyoto.jp\",\n\t\"mie.jp\",\n\t\"miyagi.jp\",\n\t\"miyazaki.jp\",\n\t\"nagano.jp\",\n\t\"nagasaki.jp\",\n\t\"nara.jp\",\n\t\"niigata.jp\",\n\t\"oita.jp\",\n\t\"okayama.jp\",\n\t\"okinawa.jp\",\n\t\"osaka.jp\",\n\t\"saga.jp\",\n\t\"saitama.jp\",\n\t\"shiga.jp\",\n\t\"shimane.jp\",\n\t\"shizuoka.jp\",\n\t\"tochigi.jp\",\n\t\"tokushima.jp\",\n\t\"tokyo.jp\",\n\t\"tottori.jp\",\n\t\"toyama.jp\",\n\t\"wakayama.jp\",\n\t\"yamagata.jp\",\n\t\"yamaguchi.jp\",\n\t\"yamanashi.jp\",\n\t\"xn--4pvxs.jp\",\n\t\"xn--vgu402c.jp\",\n\t\"xn--c3s14m.jp\",\n\t\"xn--f6qx53a.jp\",\n\t\"xn--8pvr4u.jp\",\n\t\"xn--uist22h.jp\",\n\t\"xn--djrs72d6uy.jp\",\n\t\"xn--mkru45i.jp\",\n\t\"xn--0trq7p7nn.jp\",\n\t\"xn--8ltr62k.jp\",\n\t\"xn--2m4a15e.jp\",\n\t\"xn--efvn9s.jp\",\n\t\"xn--32vp30h.jp\",\n\t\"xn--4it797k.jp\",\n\t\"xn--1lqs71d.jp\",\n\t\"xn--5rtp49c.jp\",\n\t\"xn--5js045d.jp\",\n\t\"xn--ehqz56n.jp\",\n\t\"xn--1lqs03n.jp\",\n\t\"xn--qqqt11m.jp\",\n\t\"xn--kbrq7o.jp\",\n\t\"xn--pssu33l.jp\",\n\t\"xn--ntsq17g.jp\",\n\t\"xn--uisz3g.jp\",\n\t\"xn--6btw5a.jp\",\n\t\"xn--1ctwo.jp\",\n\t\"xn--6orx2r.jp\",\n\t\"xn--rht61e.jp\",\n\t\"xn--rht27z.jp\",\n\t\"xn--djty4k.jp\",\n\t\"xn--nit225k.jp\",\n\t\"xn--rht3d.jp\",\n\t\"xn--klty5x.jp\",\n\t\"xn--kltx9a.jp\",\n\t\"xn--kltp7d.jp\",\n\t\"xn--uuwu58a.jp\",\n\t\"xn--zbx025d.jp\",\n\t\"xn--ntso0iqx3a.jp\",\n\t\"xn--elqq16h.jp\",\n\t\"xn--4it168d.jp\",\n\t\"xn--klt787d.jp\",\n\t\"xn--rny31h.jp\",\n\t\"xn--7t0a264c.jp\",\n\t\"xn--5rtq34k.jp\",\n\t\"xn--k7yn95e.jp\",\n\t\"xn--tor131o.jp\",\n\t\"xn--d5qv7z876c.jp\",\n\t\"*.kawasaki.jp\",\n\t\"*.kitakyushu.jp\",\n\t\"*.kobe.jp\",\n\t\"*.nagoya.jp\",\n\t\"*.sapporo.jp\",\n\t\"*.sendai.jp\",\n\t\"*.yokohama.jp\",\n\t\"!city.kawasaki.jp\",\n\t\"!city.kitakyushu.jp\",\n\t\"!city.kobe.jp\",\n\t\"!city.nagoya.jp\",\n\t\"!city.sapporo.jp\",\n\t\"!city.sendai.jp\",\n\t\"!city.yokohama.jp\",\n\t\"aisai.aichi.jp\",\n\t\"ama.aichi.jp\",\n\t\"anjo.aichi.jp\",\n\t\"asuke.aichi.jp\",\n\t\"chiryu.aichi.jp\",\n\t\"chita.aichi.jp\",\n\t\"fuso.aichi.jp\",\n\t\"gamagori.aichi.jp\",\n\t\"handa.aichi.jp\",\n\t\"hazu.aichi.jp\",\n\t\"hekinan.aichi.jp\",\n\t\"higashiura.aichi.jp\",\n\t\"ichinomiya.aichi.jp\",\n\t\"inazawa.aichi.jp\",\n\t\"inuyama.aichi.jp\",\n\t\"isshiki.aichi.jp\",\n\t\"iwakura.aichi.jp\",\n\t\"kanie.aichi.jp\",\n\t\"kariya.aichi.jp\",\n\t\"kasugai.aichi.jp\",\n\t\"kira.aichi.jp\",\n\t\"kiyosu.aichi.jp\",\n\t\"komaki.aichi.jp\",\n\t\"konan.aichi.jp\",\n\t\"kota.aichi.jp\",\n\t\"mihama.aichi.jp\",\n\t\"miyoshi.aichi.jp\",\n\t\"nishio.aichi.jp\",\n\t\"nisshin.aichi.jp\",\n\t\"obu.aichi.jp\",\n\t\"oguchi.aichi.jp\",\n\t\"oharu.aichi.jp\",\n\t\"okazaki.aichi.jp\",\n\t\"owariasahi.aichi.jp\",\n\t\"seto.aichi.jp\",\n\t\"shikatsu.aichi.jp\",\n\t\"shinshiro.aichi.jp\",\n\t\"shitara.aichi.jp\",\n\t\"tahara.aichi.jp\",\n\t\"takahama.aichi.jp\",\n\t\"tobishima.aichi.jp\",\n\t\"toei.aichi.jp\",\n\t\"togo.aichi.jp\",\n\t\"tokai.aichi.jp\",\n\t\"tokoname.aichi.jp\",\n\t\"toyoake.aichi.jp\",\n\t\"toyohashi.aichi.jp\",\n\t\"toyokawa.aichi.jp\",\n\t\"toyone.aichi.jp\",\n\t\"toyota.aichi.jp\",\n\t\"tsushima.aichi.jp\",\n\t\"yatomi.aichi.jp\",\n\t\"akita.akita.jp\",\n\t\"daisen.akita.jp\",\n\t\"fujisato.akita.jp\",\n\t\"gojome.akita.jp\",\n\t\"hachirogata.akita.jp\",\n\t\"happou.akita.jp\",\n\t\"higashinaruse.akita.jp\",\n\t\"honjo.akita.jp\",\n\t\"honjyo.akita.jp\",\n\t\"ikawa.akita.jp\",\n\t\"kamikoani.akita.jp\",\n\t\"kamioka.akita.jp\",\n\t\"katagami.akita.jp\",\n\t\"kazuno.akita.jp\",\n\t\"kitaakita.akita.jp\",\n\t\"kosaka.akita.jp\",\n\t\"kyowa.akita.jp\",\n\t\"misato.akita.jp\",\n\t\"mitane.akita.jp\",\n\t\"moriyoshi.akita.jp\",\n\t\"nikaho.akita.jp\",\n\t\"noshiro.akita.jp\",\n\t\"odate.akita.jp\",\n\t\"oga.akita.jp\",\n\t\"ogata.akita.jp\",\n\t\"semboku.akita.jp\",\n\t\"yokote.akita.jp\",\n\t\"yurihonjo.akita.jp\",\n\t\"aomori.aomori.jp\",\n\t\"gonohe.aomori.jp\",\n\t\"hachinohe.aomori.jp\",\n\t\"hashikami.aomori.jp\",\n\t\"hiranai.aomori.jp\",\n\t\"hirosaki.aomori.jp\",\n\t\"itayanagi.aomori.jp\",\n\t\"kuroishi.aomori.jp\",\n\t\"misawa.aomori.jp\",\n\t\"mutsu.aomori.jp\",\n\t\"nakadomari.aomori.jp\",\n\t\"noheji.aomori.jp\",\n\t\"oirase.aomori.jp\",\n\t\"owani.aomori.jp\",\n\t\"rokunohe.aomori.jp\",\n\t\"sannohe.aomori.jp\",\n\t\"shichinohe.aomori.jp\",\n\t\"shingo.aomori.jp\",\n\t\"takko.aomori.jp\",\n\t\"towada.aomori.jp\",\n\t\"tsugaru.aomori.jp\",\n\t\"tsuruta.aomori.jp\",\n\t\"abiko.chiba.jp\",\n\t\"asahi.chiba.jp\",\n\t\"chonan.chiba.jp\",\n\t\"chosei.chiba.jp\",\n\t\"choshi.chiba.jp\",\n\t\"chuo.chiba.jp\",\n\t\"funabashi.chiba.jp\",\n\t\"futtsu.chiba.jp\",\n\t\"hanamigawa.chiba.jp\",\n\t\"ichihara.chiba.jp\",\n\t\"ichikawa.chiba.jp\",\n\t\"ichinomiya.chiba.jp\",\n\t\"inzai.chiba.jp\",\n\t\"isumi.chiba.jp\",\n\t\"kamagaya.chiba.jp\",\n\t\"kamogawa.chiba.jp\",\n\t\"kashiwa.chiba.jp\",\n\t\"katori.chiba.jp\",\n\t\"katsuura.chiba.jp\",\n\t\"kimitsu.chiba.jp\",\n\t\"kisarazu.chiba.jp\",\n\t\"kozaki.chiba.jp\",\n\t\"kujukuri.chiba.jp\",\n\t\"kyonan.chiba.jp\",\n\t\"matsudo.chiba.jp\",\n\t\"midori.chiba.jp\",\n\t\"mihama.chiba.jp\",\n\t\"minamiboso.chiba.jp\",\n\t\"mobara.chiba.jp\",\n\t\"mutsuzawa.chiba.jp\",\n\t\"nagara.chiba.jp\",\n\t\"nagareyama.chiba.jp\",\n\t\"narashino.chiba.jp\",\n\t\"narita.chiba.jp\",\n\t\"noda.chiba.jp\",\n\t\"oamishirasato.chiba.jp\",\n\t\"omigawa.chiba.jp\",\n\t\"onjuku.chiba.jp\",\n\t\"otaki.chiba.jp\",\n\t\"sakae.chiba.jp\",\n\t\"sakura.chiba.jp\",\n\t\"shimofusa.chiba.jp\",\n\t\"shirako.chiba.jp\",\n\t\"shiroi.chiba.jp\",\n\t\"shisui.chiba.jp\",\n\t\"sodegaura.chiba.jp\",\n\t\"sosa.chiba.jp\",\n\t\"tako.chiba.jp\",\n\t\"tateyama.chiba.jp\",\n\t\"togane.chiba.jp\",\n\t\"tohnosho.chiba.jp\",\n\t\"tomisato.chiba.jp\",\n\t\"urayasu.chiba.jp\",\n\t\"yachimata.chiba.jp\",\n\t\"yachiyo.chiba.jp\",\n\t\"yokaichiba.chiba.jp\",\n\t\"yokoshibahikari.chiba.jp\",\n\t\"yotsukaido.chiba.jp\",\n\t\"ainan.ehime.jp\",\n\t\"honai.ehime.jp\",\n\t\"ikata.ehime.jp\",\n\t\"imabari.ehime.jp\",\n\t\"iyo.ehime.jp\",\n\t\"kamijima.ehime.jp\",\n\t\"kihoku.ehime.jp\",\n\t\"kumakogen.ehime.jp\",\n\t\"masaki.ehime.jp\",\n\t\"matsuno.ehime.jp\",\n\t\"matsuyama.ehime.jp\",\n\t\"namikata.ehime.jp\",\n\t\"niihama.ehime.jp\",\n\t\"ozu.ehime.jp\",\n\t\"saijo.ehime.jp\",\n\t\"seiyo.ehime.jp\",\n\t\"shikokuchuo.ehime.jp\",\n\t\"tobe.ehime.jp\",\n\t\"toon.ehime.jp\",\n\t\"uchiko.ehime.jp\",\n\t\"uwajima.ehime.jp\",\n\t\"yawatahama.ehime.jp\",\n\t\"echizen.fukui.jp\",\n\t\"eiheiji.fukui.jp\",\n\t\"fukui.fukui.jp\",\n\t\"ikeda.fukui.jp\",\n\t\"katsuyama.fukui.jp\",\n\t\"mihama.fukui.jp\",\n\t\"minamiechizen.fukui.jp\",\n\t\"obama.fukui.jp\",\n\t\"ohi.fukui.jp\",\n\t\"ono.fukui.jp\",\n\t\"sabae.fukui.jp\",\n\t\"sakai.fukui.jp\",\n\t\"takahama.fukui.jp\",\n\t\"tsuruga.fukui.jp\",\n\t\"wakasa.fukui.jp\",\n\t\"ashiya.fukuoka.jp\",\n\t\"buzen.fukuoka.jp\",\n\t\"chikugo.fukuoka.jp\",\n\t\"chikuho.fukuoka.jp\",\n\t\"chikujo.fukuoka.jp\",\n\t\"chikushino.fukuoka.jp\",\n\t\"chikuzen.fukuoka.jp\",\n\t\"chuo.fukuoka.jp\",\n\t\"dazaifu.fukuoka.jp\",\n\t\"fukuchi.fukuoka.jp\",\n\t\"hakata.fukuoka.jp\",\n\t\"higashi.fukuoka.jp\",\n\t\"hirokawa.fukuoka.jp\",\n\t\"hisayama.fukuoka.jp\",\n\t\"iizuka.fukuoka.jp\",\n\t\"inatsuki.fukuoka.jp\",\n\t\"kaho.fukuoka.jp\",\n\t\"kasuga.fukuoka.jp\",\n\t\"kasuya.fukuoka.jp\",\n\t\"kawara.fukuoka.jp\",\n\t\"keisen.fukuoka.jp\",\n\t\"koga.fukuoka.jp\",\n\t\"kurate.fukuoka.jp\",\n\t\"kurogi.fukuoka.jp\",\n\t\"kurume.fukuoka.jp\",\n\t\"minami.fukuoka.jp\",\n\t\"miyako.fukuoka.jp\",\n\t\"miyama.fukuoka.jp\",\n\t\"miyawaka.fukuoka.jp\",\n\t\"mizumaki.fukuoka.jp\",\n\t\"munakata.fukuoka.jp\",\n\t\"nakagawa.fukuoka.jp\",\n\t\"nakama.fukuoka.jp\",\n\t\"nishi.fukuoka.jp\",\n\t\"nogata.fukuoka.jp\",\n\t\"ogori.fukuoka.jp\",\n\t\"okagaki.fukuoka.jp\",\n\t\"okawa.fukuoka.jp\",\n\t\"oki.fukuoka.jp\",\n\t\"omuta.fukuoka.jp\",\n\t\"onga.fukuoka.jp\",\n\t\"onojo.fukuoka.jp\",\n\t\"oto.fukuoka.jp\",\n\t\"saigawa.fukuoka.jp\",\n\t\"sasaguri.fukuoka.jp\",\n\t\"shingu.fukuoka.jp\",\n\t\"shinyoshitomi.fukuoka.jp\",\n\t\"shonai.fukuoka.jp\",\n\t\"soeda.fukuoka.jp\",\n\t\"sue.fukuoka.jp\",\n\t\"tachiarai.fukuoka.jp\",\n\t\"tagawa.fukuoka.jp\",\n\t\"takata.fukuoka.jp\",\n\t\"toho.fukuoka.jp\",\n\t\"toyotsu.fukuoka.jp\",\n\t\"tsuiki.fukuoka.jp\",\n\t\"ukiha.fukuoka.jp\",\n\t\"umi.fukuoka.jp\",\n\t\"usui.fukuoka.jp\",\n\t\"yamada.fukuoka.jp\",\n\t\"yame.fukuoka.jp\",\n\t\"yanagawa.fukuoka.jp\",\n\t\"yukuhashi.fukuoka.jp\",\n\t\"aizubange.fukushima.jp\",\n\t\"aizumisato.fukushima.jp\",\n\t\"aizuwakamatsu.fukushima.jp\",\n\t\"asakawa.fukushima.jp\",\n\t\"bandai.fukushima.jp\",\n\t\"date.fukushima.jp\",\n\t\"fukushima.fukushima.jp\",\n\t\"furudono.fukushima.jp\",\n\t\"futaba.fukushima.jp\",\n\t\"hanawa.fukushima.jp\",\n\t\"higashi.fukushima.jp\",\n\t\"hirata.fukushima.jp\",\n\t\"hirono.fukushima.jp\",\n\t\"iitate.fukushima.jp\",\n\t\"inawashiro.fukushima.jp\",\n\t\"ishikawa.fukushima.jp\",\n\t\"iwaki.fukushima.jp\",\n\t\"izumizaki.fukushima.jp\",\n\t\"kagamiishi.fukushima.jp\",\n\t\"kaneyama.fukushima.jp\",\n\t\"kawamata.fukushima.jp\",\n\t\"kitakata.fukushima.jp\",\n\t\"kitashiobara.fukushima.jp\",\n\t\"koori.fukushima.jp\",\n\t\"koriyama.fukushima.jp\",\n\t\"kunimi.fukushima.jp\",\n\t\"miharu.fukushima.jp\",\n\t\"mishima.fukushima.jp\",\n\t\"namie.fukushima.jp\",\n\t\"nango.fukushima.jp\",\n\t\"nishiaizu.fukushima.jp\",\n\t\"nishigo.fukushima.jp\",\n\t\"okuma.fukushima.jp\",\n\t\"omotego.fukushima.jp\",\n\t\"ono.fukushima.jp\",\n\t\"otama.fukushima.jp\",\n\t\"samegawa.fukushima.jp\",\n\t\"shimogo.fukushima.jp\",\n\t\"shirakawa.fukushima.jp\",\n\t\"showa.fukushima.jp\",\n\t\"soma.fukushima.jp\",\n\t\"sukagawa.fukushima.jp\",\n\t\"taishin.fukushima.jp\",\n\t\"tamakawa.fukushima.jp\",\n\t\"tanagura.fukushima.jp\",\n\t\"tenei.fukushima.jp\",\n\t\"yabuki.fukushima.jp\",\n\t\"yamato.fukushima.jp\",\n\t\"yamatsuri.fukushima.jp\",\n\t\"yanaizu.fukushima.jp\",\n\t\"yugawa.fukushima.jp\",\n\t\"anpachi.gifu.jp\",\n\t\"ena.gifu.jp\",\n\t\"gifu.gifu.jp\",\n\t\"ginan.gifu.jp\",\n\t\"godo.gifu.jp\",\n\t\"gujo.gifu.jp\",\n\t\"hashima.gifu.jp\",\n\t\"hichiso.gifu.jp\",\n\t\"hida.gifu.jp\",\n\t\"higashishirakawa.gifu.jp\",\n\t\"ibigawa.gifu.jp\",\n\t\"ikeda.gifu.jp\",\n\t\"kakamigahara.gifu.jp\",\n\t\"kani.gifu.jp\",\n\t\"kasahara.gifu.jp\",\n\t\"kasamatsu.gifu.jp\",\n\t\"kawaue.gifu.jp\",\n\t\"kitagata.gifu.jp\",\n\t\"mino.gifu.jp\",\n\t\"minokamo.gifu.jp\",\n\t\"mitake.gifu.jp\",\n\t\"mizunami.gifu.jp\",\n\t\"motosu.gifu.jp\",\n\t\"nakatsugawa.gifu.jp\",\n\t\"ogaki.gifu.jp\",\n\t\"sakahogi.gifu.jp\",\n\t\"seki.gifu.jp\",\n\t\"sekigahara.gifu.jp\",\n\t\"shirakawa.gifu.jp\",\n\t\"tajimi.gifu.jp\",\n\t\"takayama.gifu.jp\",\n\t\"tarui.gifu.jp\",\n\t\"toki.gifu.jp\",\n\t\"tomika.gifu.jp\",\n\t\"wanouchi.gifu.jp\",\n\t\"yamagata.gifu.jp\",\n\t\"yaotsu.gifu.jp\",\n\t\"yoro.gifu.jp\",\n\t\"annaka.gunma.jp\",\n\t\"chiyoda.gunma.jp\",\n\t\"fujioka.gunma.jp\",\n\t\"higashiagatsuma.gunma.jp\",\n\t\"isesaki.gunma.jp\",\n\t\"itakura.gunma.jp\",\n\t\"kanna.gunma.jp\",\n\t\"kanra.gunma.jp\",\n\t\"katashina.gunma.jp\",\n\t\"kawaba.gunma.jp\",\n\t\"kiryu.gunma.jp\",\n\t\"kusatsu.gunma.jp\",\n\t\"maebashi.gunma.jp\",\n\t\"meiwa.gunma.jp\",\n\t\"midori.gunma.jp\",\n\t\"minakami.gunma.jp\",\n\t\"naganohara.gunma.jp\",\n\t\"nakanojo.gunma.jp\",\n\t\"nanmoku.gunma.jp\",\n\t\"numata.gunma.jp\",\n\t\"oizumi.gunma.jp\",\n\t\"ora.gunma.jp\",\n\t\"ota.gunma.jp\",\n\t\"shibukawa.gunma.jp\",\n\t\"shimonita.gunma.jp\",\n\t\"shinto.gunma.jp\",\n\t\"showa.gunma.jp\",\n\t\"takasaki.gunma.jp\",\n\t\"takayama.gunma.jp\",\n\t\"tamamura.gunma.jp\",\n\t\"tatebayashi.gunma.jp\",\n\t\"tomioka.gunma.jp\",\n\t\"tsukiyono.gunma.jp\",\n\t\"tsumagoi.gunma.jp\",\n\t\"ueno.gunma.jp\",\n\t\"yoshioka.gunma.jp\",\n\t\"asaminami.hiroshima.jp\",\n\t\"daiwa.hiroshima.jp\",\n\t\"etajima.hiroshima.jp\",\n\t\"fuchu.hiroshima.jp\",\n\t\"fukuyama.hiroshima.jp\",\n\t\"hatsukaichi.hiroshima.jp\",\n\t\"higashihiroshima.hiroshima.jp\",\n\t\"hongo.hiroshima.jp\",\n\t\"jinsekikogen.hiroshima.jp\",\n\t\"kaita.hiroshima.jp\",\n\t\"kui.hiroshima.jp\",\n\t\"kumano.hiroshima.jp\",\n\t\"kure.hiroshima.jp\",\n\t\"mihara.hiroshima.jp\",\n\t\"miyoshi.hiroshima.jp\",\n\t\"naka.hiroshima.jp\",\n\t\"onomichi.hiroshima.jp\",\n\t\"osakikamijima.hiroshima.jp\",\n\t\"otake.hiroshima.jp\",\n\t\"saka.hiroshima.jp\",\n\t\"sera.hiroshima.jp\",\n\t\"seranishi.hiroshima.jp\",\n\t\"shinichi.hiroshima.jp\",\n\t\"shobara.hiroshima.jp\",\n\t\"takehara.hiroshima.jp\",\n\t\"abashiri.hokkaido.jp\",\n\t\"abira.hokkaido.jp\",\n\t\"aibetsu.hokkaido.jp\",\n\t\"akabira.hokkaido.jp\",\n\t\"akkeshi.hokkaido.jp\",\n\t\"asahikawa.hokkaido.jp\",\n\t\"ashibetsu.hokkaido.jp\",\n\t\"ashoro.hokkaido.jp\",\n\t\"assabu.hokkaido.jp\",\n\t\"atsuma.hokkaido.jp\",\n\t\"bibai.hokkaido.jp\",\n\t\"biei.hokkaido.jp\",\n\t\"bifuka.hokkaido.jp\",\n\t\"bihoro.hokkaido.jp\",\n\t\"biratori.hokkaido.jp\",\n\t\"chippubetsu.hokkaido.jp\",\n\t\"chitose.hokkaido.jp\",\n\t\"date.hokkaido.jp\",\n\t\"ebetsu.hokkaido.jp\",\n\t\"embetsu.hokkaido.jp\",\n\t\"eniwa.hokkaido.jp\",\n\t\"erimo.hokkaido.jp\",\n\t\"esan.hokkaido.jp\",\n\t\"esashi.hokkaido.jp\",\n\t\"fukagawa.hokkaido.jp\",\n\t\"fukushima.hokkaido.jp\",\n\t\"furano.hokkaido.jp\",\n\t\"furubira.hokkaido.jp\",\n\t\"haboro.hokkaido.jp\",\n\t\"hakodate.hokkaido.jp\",\n\t\"hamatonbetsu.hokkaido.jp\",\n\t\"hidaka.hokkaido.jp\",\n\t\"higashikagura.hokkaido.jp\",\n\t\"higashikawa.hokkaido.jp\",\n\t\"hiroo.hokkaido.jp\",\n\t\"hokuryu.hokkaido.jp\",\n\t\"hokuto.hokkaido.jp\",\n\t\"honbetsu.hokkaido.jp\",\n\t\"horokanai.hokkaido.jp\",\n\t\"horonobe.hokkaido.jp\",\n\t\"ikeda.hokkaido.jp\",\n\t\"imakane.hokkaido.jp\",\n\t\"ishikari.hokkaido.jp\",\n\t\"iwamizawa.hokkaido.jp\",\n\t\"iwanai.hokkaido.jp\",\n\t\"kamifurano.hokkaido.jp\",\n\t\"kamikawa.hokkaido.jp\",\n\t\"kamishihoro.hokkaido.jp\",\n\t\"kamisunagawa.hokkaido.jp\",\n\t\"kamoenai.hokkaido.jp\",\n\t\"kayabe.hokkaido.jp\",\n\t\"kembuchi.hokkaido.jp\",\n\t\"kikonai.hokkaido.jp\",\n\t\"kimobetsu.hokkaido.jp\",\n\t\"kitahiroshima.hokkaido.jp\",\n\t\"kitami.hokkaido.jp\",\n\t\"kiyosato.hokkaido.jp\",\n\t\"koshimizu.hokkaido.jp\",\n\t\"kunneppu.hokkaido.jp\",\n\t\"kuriyama.hokkaido.jp\",\n\t\"kuromatsunai.hokkaido.jp\",\n\t\"kushiro.hokkaido.jp\",\n\t\"kutchan.hokkaido.jp\",\n\t\"kyowa.hokkaido.jp\",\n\t\"mashike.hokkaido.jp\",\n\t\"matsumae.hokkaido.jp\",\n\t\"mikasa.hokkaido.jp\",\n\t\"minamifurano.hokkaido.jp\",\n\t\"mombetsu.hokkaido.jp\",\n\t\"moseushi.hokkaido.jp\",\n\t\"mukawa.hokkaido.jp\",\n\t\"muroran.hokkaido.jp\",\n\t\"naie.hokkaido.jp\",\n\t\"nakagawa.hokkaido.jp\",\n\t\"nakasatsunai.hokkaido.jp\",\n\t\"nakatombetsu.hokkaido.jp\",\n\t\"nanae.hokkaido.jp\",\n\t\"nanporo.hokkaido.jp\",\n\t\"nayoro.hokkaido.jp\",\n\t\"nemuro.hokkaido.jp\",\n\t\"niikappu.hokkaido.jp\",\n\t\"niki.hokkaido.jp\",\n\t\"nishiokoppe.hokkaido.jp\",\n\t\"noboribetsu.hokkaido.jp\",\n\t\"numata.hokkaido.jp\",\n\t\"obihiro.hokkaido.jp\",\n\t\"obira.hokkaido.jp\",\n\t\"oketo.hokkaido.jp\",\n\t\"okoppe.hokkaido.jp\",\n\t\"otaru.hokkaido.jp\",\n\t\"otobe.hokkaido.jp\",\n\t\"otofuke.hokkaido.jp\",\n\t\"otoineppu.hokkaido.jp\",\n\t\"oumu.hokkaido.jp\",\n\t\"ozora.hokkaido.jp\",\n\t\"pippu.hokkaido.jp\",\n\t\"rankoshi.hokkaido.jp\",\n\t\"rebun.hokkaido.jp\",\n\t\"rikubetsu.hokkaido.jp\",\n\t\"rishiri.hokkaido.jp\",\n\t\"rishirifuji.hokkaido.jp\",\n\t\"saroma.hokkaido.jp\",\n\t\"sarufutsu.hokkaido.jp\",\n\t\"shakotan.hokkaido.jp\",\n\t\"shari.hokkaido.jp\",\n\t\"shibecha.hokkaido.jp\",\n\t\"shibetsu.hokkaido.jp\",\n\t\"shikabe.hokkaido.jp\",\n\t\"shikaoi.hokkaido.jp\",\n\t\"shimamaki.hokkaido.jp\",\n\t\"shimizu.hokkaido.jp\",\n\t\"shimokawa.hokkaido.jp\",\n\t\"shinshinotsu.hokkaido.jp\",\n\t\"shintoku.hokkaido.jp\",\n\t\"shiranuka.hokkaido.jp\",\n\t\"shiraoi.hokkaido.jp\",\n\t\"shiriuchi.hokkaido.jp\",\n\t\"sobetsu.hokkaido.jp\",\n\t\"sunagawa.hokkaido.jp\",\n\t\"taiki.hokkaido.jp\",\n\t\"takasu.hokkaido.jp\",\n\t\"takikawa.hokkaido.jp\",\n\t\"takinoue.hokkaido.jp\",\n\t\"teshikaga.hokkaido.jp\",\n\t\"tobetsu.hokkaido.jp\",\n\t\"tohma.hokkaido.jp\",\n\t\"tomakomai.hokkaido.jp\",\n\t\"tomari.hokkaido.jp\",\n\t\"toya.hokkaido.jp\",\n\t\"toyako.hokkaido.jp\",\n\t\"toyotomi.hokkaido.jp\",\n\t\"toyoura.hokkaido.jp\",\n\t\"tsubetsu.hokkaido.jp\",\n\t\"tsukigata.hokkaido.jp\",\n\t\"urakawa.hokkaido.jp\",\n\t\"urausu.hokkaido.jp\",\n\t\"uryu.hokkaido.jp\",\n\t\"utashinai.hokkaido.jp\",\n\t\"wakkanai.hokkaido.jp\",\n\t\"wassamu.hokkaido.jp\",\n\t\"yakumo.hokkaido.jp\",\n\t\"yoichi.hokkaido.jp\",\n\t\"aioi.hyogo.jp\",\n\t\"akashi.hyogo.jp\",\n\t\"ako.hyogo.jp\",\n\t\"amagasaki.hyogo.jp\",\n\t\"aogaki.hyogo.jp\",\n\t\"asago.hyogo.jp\",\n\t\"ashiya.hyogo.jp\",\n\t\"awaji.hyogo.jp\",\n\t\"fukusaki.hyogo.jp\",\n\t\"goshiki.hyogo.jp\",\n\t\"harima.hyogo.jp\",\n\t\"himeji.hyogo.jp\",\n\t\"ichikawa.hyogo.jp\",\n\t\"inagawa.hyogo.jp\",\n\t\"itami.hyogo.jp\",\n\t\"kakogawa.hyogo.jp\",\n\t\"kamigori.hyogo.jp\",\n\t\"kamikawa.hyogo.jp\",\n\t\"kasai.hyogo.jp\",\n\t\"kasuga.hyogo.jp\",\n\t\"kawanishi.hyogo.jp\",\n\t\"miki.hyogo.jp\",\n\t\"minamiawaji.hyogo.jp\",\n\t\"nishinomiya.hyogo.jp\",\n\t\"nishiwaki.hyogo.jp\",\n\t\"ono.hyogo.jp\",\n\t\"sanda.hyogo.jp\",\n\t\"sannan.hyogo.jp\",\n\t\"sasayama.hyogo.jp\",\n\t\"sayo.hyogo.jp\",\n\t\"shingu.hyogo.jp\",\n\t\"shinonsen.hyogo.jp\",\n\t\"shiso.hyogo.jp\",\n\t\"sumoto.hyogo.jp\",\n\t\"taishi.hyogo.jp\",\n\t\"taka.hyogo.jp\",\n\t\"takarazuka.hyogo.jp\",\n\t\"takasago.hyogo.jp\",\n\t\"takino.hyogo.jp\",\n\t\"tamba.hyogo.jp\",\n\t\"tatsuno.hyogo.jp\",\n\t\"toyooka.hyogo.jp\",\n\t\"yabu.hyogo.jp\",\n\t\"yashiro.hyogo.jp\",\n\t\"yoka.hyogo.jp\",\n\t\"yokawa.hyogo.jp\",\n\t\"ami.ibaraki.jp\",\n\t\"asahi.ibaraki.jp\",\n\t\"bando.ibaraki.jp\",\n\t\"chikusei.ibaraki.jp\",\n\t\"daigo.ibaraki.jp\",\n\t\"fujishiro.ibaraki.jp\",\n\t\"hitachi.ibaraki.jp\",\n\t\"hitachinaka.ibaraki.jp\",\n\t\"hitachiomiya.ibaraki.jp\",\n\t\"hitachiota.ibaraki.jp\",\n\t\"ibaraki.ibaraki.jp\",\n\t\"ina.ibaraki.jp\",\n\t\"inashiki.ibaraki.jp\",\n\t\"itako.ibaraki.jp\",\n\t\"iwama.ibaraki.jp\",\n\t\"joso.ibaraki.jp\",\n\t\"kamisu.ibaraki.jp\",\n\t\"kasama.ibaraki.jp\",\n\t\"kashima.ibaraki.jp\",\n\t\"kasumigaura.ibaraki.jp\",\n\t\"koga.ibaraki.jp\",\n\t\"miho.ibaraki.jp\",\n\t\"mito.ibaraki.jp\",\n\t\"moriya.ibaraki.jp\",\n\t\"naka.ibaraki.jp\",\n\t\"namegata.ibaraki.jp\",\n\t\"oarai.ibaraki.jp\",\n\t\"ogawa.ibaraki.jp\",\n\t\"omitama.ibaraki.jp\",\n\t\"ryugasaki.ibaraki.jp\",\n\t\"sakai.ibaraki.jp\",\n\t\"sakuragawa.ibaraki.jp\",\n\t\"shimodate.ibaraki.jp\",\n\t\"shimotsuma.ibaraki.jp\",\n\t\"shirosato.ibaraki.jp\",\n\t\"sowa.ibaraki.jp\",\n\t\"suifu.ibaraki.jp\",\n\t\"takahagi.ibaraki.jp\",\n\t\"tamatsukuri.ibaraki.jp\",\n\t\"tokai.ibaraki.jp\",\n\t\"tomobe.ibaraki.jp\",\n\t\"tone.ibaraki.jp\",\n\t\"toride.ibaraki.jp\",\n\t\"tsuchiura.ibaraki.jp\",\n\t\"tsukuba.ibaraki.jp\",\n\t\"uchihara.ibaraki.jp\",\n\t\"ushiku.ibaraki.jp\",\n\t\"yachiyo.ibaraki.jp\",\n\t\"yamagata.ibaraki.jp\",\n\t\"yawara.ibaraki.jp\",\n\t\"yuki.ibaraki.jp\",\n\t\"anamizu.ishikawa.jp\",\n\t\"hakui.ishikawa.jp\",\n\t\"hakusan.ishikawa.jp\",\n\t\"kaga.ishikawa.jp\",\n\t\"kahoku.ishikawa.jp\",\n\t\"kanazawa.ishikawa.jp\",\n\t\"kawakita.ishikawa.jp\",\n\t\"komatsu.ishikawa.jp\",\n\t\"nakanoto.ishikawa.jp\",\n\t\"nanao.ishikawa.jp\",\n\t\"nomi.ishikawa.jp\",\n\t\"nonoichi.ishikawa.jp\",\n\t\"noto.ishikawa.jp\",\n\t\"shika.ishikawa.jp\",\n\t\"suzu.ishikawa.jp\",\n\t\"tsubata.ishikawa.jp\",\n\t\"tsurugi.ishikawa.jp\",\n\t\"uchinada.ishikawa.jp\",\n\t\"wajima.ishikawa.jp\",\n\t\"fudai.iwate.jp\",\n\t\"fujisawa.iwate.jp\",\n\t\"hanamaki.iwate.jp\",\n\t\"hiraizumi.iwate.jp\",\n\t\"hirono.iwate.jp\",\n\t\"ichinohe.iwate.jp\",\n\t\"ichinoseki.iwate.jp\",\n\t\"iwaizumi.iwate.jp\",\n\t\"iwate.iwate.jp\",\n\t\"joboji.iwate.jp\",\n\t\"kamaishi.iwate.jp\",\n\t\"kanegasaki.iwate.jp\",\n\t\"karumai.iwate.jp\",\n\t\"kawai.iwate.jp\",\n\t\"kitakami.iwate.jp\",\n\t\"kuji.iwate.jp\",\n\t\"kunohe.iwate.jp\",\n\t\"kuzumaki.iwate.jp\",\n\t\"miyako.iwate.jp\",\n\t\"mizusawa.iwate.jp\",\n\t\"morioka.iwate.jp\",\n\t\"ninohe.iwate.jp\",\n\t\"noda.iwate.jp\",\n\t\"ofunato.iwate.jp\",\n\t\"oshu.iwate.jp\",\n\t\"otsuchi.iwate.jp\",\n\t\"rikuzentakata.iwate.jp\",\n\t\"shiwa.iwate.jp\",\n\t\"shizukuishi.iwate.jp\",\n\t\"sumita.iwate.jp\",\n\t\"tanohata.iwate.jp\",\n\t\"tono.iwate.jp\",\n\t\"yahaba.iwate.jp\",\n\t\"yamada.iwate.jp\",\n\t\"ayagawa.kagawa.jp\",\n\t\"higashikagawa.kagawa.jp\",\n\t\"kanonji.kagawa.jp\",\n\t\"kotohira.kagawa.jp\",\n\t\"manno.kagawa.jp\",\n\t\"marugame.kagawa.jp\",\n\t\"mitoyo.kagawa.jp\",\n\t\"naoshima.kagawa.jp\",\n\t\"sanuki.kagawa.jp\",\n\t\"tadotsu.kagawa.jp\",\n\t\"takamatsu.kagawa.jp\",\n\t\"tonosho.kagawa.jp\",\n\t\"uchinomi.kagawa.jp\",\n\t\"utazu.kagawa.jp\",\n\t\"zentsuji.kagawa.jp\",\n\t\"akune.kagoshima.jp\",\n\t\"amami.kagoshima.jp\",\n\t\"hioki.kagoshima.jp\",\n\t\"isa.kagoshima.jp\",\n\t\"isen.kagoshima.jp\",\n\t\"izumi.kagoshima.jp\",\n\t\"kagoshima.kagoshima.jp\",\n\t\"kanoya.kagoshima.jp\",\n\t\"kawanabe.kagoshima.jp\",\n\t\"kinko.kagoshima.jp\",\n\t\"kouyama.kagoshima.jp\",\n\t\"makurazaki.kagoshima.jp\",\n\t\"matsumoto.kagoshima.jp\",\n\t\"minamitane.kagoshima.jp\",\n\t\"nakatane.kagoshima.jp\",\n\t\"nishinoomote.kagoshima.jp\",\n\t\"satsumasendai.kagoshima.jp\",\n\t\"soo.kagoshima.jp\",\n\t\"tarumizu.kagoshima.jp\",\n\t\"yusui.kagoshima.jp\",\n\t\"aikawa.kanagawa.jp\",\n\t\"atsugi.kanagawa.jp\",\n\t\"ayase.kanagawa.jp\",\n\t\"chigasaki.kanagawa.jp\",\n\t\"ebina.kanagawa.jp\",\n\t\"fujisawa.kanagawa.jp\",\n\t\"hadano.kanagawa.jp\",\n\t\"hakone.kanagawa.jp\",\n\t\"hiratsuka.kanagawa.jp\",\n\t\"isehara.kanagawa.jp\",\n\t\"kaisei.kanagawa.jp\",\n\t\"kamakura.kanagawa.jp\",\n\t\"kiyokawa.kanagawa.jp\",\n\t\"matsuda.kanagawa.jp\",\n\t\"minamiashigara.kanagawa.jp\",\n\t\"miura.kanagawa.jp\",\n\t\"nakai.kanagawa.jp\",\n\t\"ninomiya.kanagawa.jp\",\n\t\"odawara.kanagawa.jp\",\n\t\"oi.kanagawa.jp\",\n\t\"oiso.kanagawa.jp\",\n\t\"sagamihara.kanagawa.jp\",\n\t\"samukawa.kanagawa.jp\",\n\t\"tsukui.kanagawa.jp\",\n\t\"yamakita.kanagawa.jp\",\n\t\"yamato.kanagawa.jp\",\n\t\"yokosuka.kanagawa.jp\",\n\t\"yugawara.kanagawa.jp\",\n\t\"zama.kanagawa.jp\",\n\t\"zushi.kanagawa.jp\",\n\t\"aki.kochi.jp\",\n\t\"geisei.kochi.jp\",\n\t\"hidaka.kochi.jp\",\n\t\"higashitsuno.kochi.jp\",\n\t\"ino.kochi.jp\",\n\t\"kagami.kochi.jp\",\n\t\"kami.kochi.jp\",\n\t\"kitagawa.kochi.jp\",\n\t\"kochi.kochi.jp\",\n\t\"mihara.kochi.jp\",\n\t\"motoyama.kochi.jp\",\n\t\"muroto.kochi.jp\",\n\t\"nahari.kochi.jp\",\n\t\"nakamura.kochi.jp\",\n\t\"nankoku.kochi.jp\",\n\t\"nishitosa.kochi.jp\",\n\t\"niyodogawa.kochi.jp\",\n\t\"ochi.kochi.jp\",\n\t\"okawa.kochi.jp\",\n\t\"otoyo.kochi.jp\",\n\t\"otsuki.kochi.jp\",\n\t\"sakawa.kochi.jp\",\n\t\"sukumo.kochi.jp\",\n\t\"susaki.kochi.jp\",\n\t\"tosa.kochi.jp\",\n\t\"tosashimizu.kochi.jp\",\n\t\"toyo.kochi.jp\",\n\t\"tsuno.kochi.jp\",\n\t\"umaji.kochi.jp\",\n\t\"yasuda.kochi.jp\",\n\t\"yusuhara.kochi.jp\",\n\t\"amakusa.kumamoto.jp\",\n\t\"arao.kumamoto.jp\",\n\t\"aso.kumamoto.jp\",\n\t\"choyo.kumamoto.jp\",\n\t\"gyokuto.kumamoto.jp\",\n\t\"kamiamakusa.kumamoto.jp\",\n\t\"kikuchi.kumamoto.jp\",\n\t\"kumamoto.kumamoto.jp\",\n\t\"mashiki.kumamoto.jp\",\n\t\"mifune.kumamoto.jp\",\n\t\"minamata.kumamoto.jp\",\n\t\"minamioguni.kumamoto.jp\",\n\t\"nagasu.kumamoto.jp\",\n\t\"nishihara.kumamoto.jp\",\n\t\"oguni.kumamoto.jp\",\n\t\"ozu.kumamoto.jp\",\n\t\"sumoto.kumamoto.jp\",\n\t\"takamori.kumamoto.jp\",\n\t\"uki.kumamoto.jp\",\n\t\"uto.kumamoto.jp\",\n\t\"yamaga.kumamoto.jp\",\n\t\"yamato.kumamoto.jp\",\n\t\"yatsushiro.kumamoto.jp\",\n\t\"ayabe.kyoto.jp\",\n\t\"fukuchiyama.kyoto.jp\",\n\t\"higashiyama.kyoto.jp\",\n\t\"ide.kyoto.jp\",\n\t\"ine.kyoto.jp\",\n\t\"joyo.kyoto.jp\",\n\t\"kameoka.kyoto.jp\",\n\t\"kamo.kyoto.jp\",\n\t\"kita.kyoto.jp\",\n\t\"kizu.kyoto.jp\",\n\t\"kumiyama.kyoto.jp\",\n\t\"kyotamba.kyoto.jp\",\n\t\"kyotanabe.kyoto.jp\",\n\t\"kyotango.kyoto.jp\",\n\t\"maizuru.kyoto.jp\",\n\t\"minami.kyoto.jp\",\n\t\"minamiyamashiro.kyoto.jp\",\n\t\"miyazu.kyoto.jp\",\n\t\"muko.kyoto.jp\",\n\t\"nagaokakyo.kyoto.jp\",\n\t\"nakagyo.kyoto.jp\",\n\t\"nantan.kyoto.jp\",\n\t\"oyamazaki.kyoto.jp\",\n\t\"sakyo.kyoto.jp\",\n\t\"seika.kyoto.jp\",\n\t\"tanabe.kyoto.jp\",\n\t\"uji.kyoto.jp\",\n\t\"ujitawara.kyoto.jp\",\n\t\"wazuka.kyoto.jp\",\n\t\"yamashina.kyoto.jp\",\n\t\"yawata.kyoto.jp\",\n\t\"asahi.mie.jp\",\n\t\"inabe.mie.jp\",\n\t\"ise.mie.jp\",\n\t\"kameyama.mie.jp\",\n\t\"kawagoe.mie.jp\",\n\t\"kiho.mie.jp\",\n\t\"kisosaki.mie.jp\",\n\t\"kiwa.mie.jp\",\n\t\"komono.mie.jp\",\n\t\"kumano.mie.jp\",\n\t\"kuwana.mie.jp\",\n\t\"matsusaka.mie.jp\",\n\t\"meiwa.mie.jp\",\n\t\"mihama.mie.jp\",\n\t\"minamiise.mie.jp\",\n\t\"misugi.mie.jp\",\n\t\"miyama.mie.jp\",\n\t\"nabari.mie.jp\",\n\t\"shima.mie.jp\",\n\t\"suzuka.mie.jp\",\n\t\"tado.mie.jp\",\n\t\"taiki.mie.jp\",\n\t\"taki.mie.jp\",\n\t\"tamaki.mie.jp\",\n\t\"toba.mie.jp\",\n\t\"tsu.mie.jp\",\n\t\"udono.mie.jp\",\n\t\"ureshino.mie.jp\",\n\t\"watarai.mie.jp\",\n\t\"yokkaichi.mie.jp\",\n\t\"furukawa.miyagi.jp\",\n\t\"higashimatsushima.miyagi.jp\",\n\t\"ishinomaki.miyagi.jp\",\n\t\"iwanuma.miyagi.jp\",\n\t\"kakuda.miyagi.jp\",\n\t\"kami.miyagi.jp\",\n\t\"kawasaki.miyagi.jp\",\n\t\"marumori.miyagi.jp\",\n\t\"matsushima.miyagi.jp\",\n\t\"minamisanriku.miyagi.jp\",\n\t\"misato.miyagi.jp\",\n\t\"murata.miyagi.jp\",\n\t\"natori.miyagi.jp\",\n\t\"ogawara.miyagi.jp\",\n\t\"ohira.miyagi.jp\",\n\t\"onagawa.miyagi.jp\",\n\t\"osaki.miyagi.jp\",\n\t\"rifu.miyagi.jp\",\n\t\"semine.miyagi.jp\",\n\t\"shibata.miyagi.jp\",\n\t\"shichikashuku.miyagi.jp\",\n\t\"shikama.miyagi.jp\",\n\t\"shiogama.miyagi.jp\",\n\t\"shiroishi.miyagi.jp\",\n\t\"tagajo.miyagi.jp\",\n\t\"taiwa.miyagi.jp\",\n\t\"tome.miyagi.jp\",\n\t\"tomiya.miyagi.jp\",\n\t\"wakuya.miyagi.jp\",\n\t\"watari.miyagi.jp\",\n\t\"yamamoto.miyagi.jp\",\n\t\"zao.miyagi.jp\",\n\t\"aya.miyazaki.jp\",\n\t\"ebino.miyazaki.jp\",\n\t\"gokase.miyazaki.jp\",\n\t\"hyuga.miyazaki.jp\",\n\t\"kadogawa.miyazaki.jp\",\n\t\"kawaminami.miyazaki.jp\",\n\t\"kijo.miyazaki.jp\",\n\t\"kitagawa.miyazaki.jp\",\n\t\"kitakata.miyazaki.jp\",\n\t\"kitaura.miyazaki.jp\",\n\t\"kobayashi.miyazaki.jp\",\n\t\"kunitomi.miyazaki.jp\",\n\t\"kushima.miyazaki.jp\",\n\t\"mimata.miyazaki.jp\",\n\t\"miyakonojo.miyazaki.jp\",\n\t\"miyazaki.miyazaki.jp\",\n\t\"morotsuka.miyazaki.jp\",\n\t\"nichinan.miyazaki.jp\",\n\t\"nishimera.miyazaki.jp\",\n\t\"nobeoka.miyazaki.jp\",\n\t\"saito.miyazaki.jp\",\n\t\"shiiba.miyazaki.jp\",\n\t\"shintomi.miyazaki.jp\",\n\t\"takaharu.miyazaki.jp\",\n\t\"takanabe.miyazaki.jp\",\n\t\"takazaki.miyazaki.jp\",\n\t\"tsuno.miyazaki.jp\",\n\t\"achi.nagano.jp\",\n\t\"agematsu.nagano.jp\",\n\t\"anan.nagano.jp\",\n\t\"aoki.nagano.jp\",\n\t\"asahi.nagano.jp\",\n\t\"azumino.nagano.jp\",\n\t\"chikuhoku.nagano.jp\",\n\t\"chikuma.nagano.jp\",\n\t\"chino.nagano.jp\",\n\t\"fujimi.nagano.jp\",\n\t\"hakuba.nagano.jp\",\n\t\"hara.nagano.jp\",\n\t\"hiraya.nagano.jp\",\n\t\"iida.nagano.jp\",\n\t\"iijima.nagano.jp\",\n\t\"iiyama.nagano.jp\",\n\t\"iizuna.nagano.jp\",\n\t\"ikeda.nagano.jp\",\n\t\"ikusaka.nagano.jp\",\n\t\"ina.nagano.jp\",\n\t\"karuizawa.nagano.jp\",\n\t\"kawakami.nagano.jp\",\n\t\"kiso.nagano.jp\",\n\t\"kisofukushima.nagano.jp\",\n\t\"kitaaiki.nagano.jp\",\n\t\"komagane.nagano.jp\",\n\t\"komoro.nagano.jp\",\n\t\"matsukawa.nagano.jp\",\n\t\"matsumoto.nagano.jp\",\n\t\"miasa.nagano.jp\",\n\t\"minamiaiki.nagano.jp\",\n\t\"minamimaki.nagano.jp\",\n\t\"minamiminowa.nagano.jp\",\n\t\"minowa.nagano.jp\",\n\t\"miyada.nagano.jp\",\n\t\"miyota.nagano.jp\",\n\t\"mochizuki.nagano.jp\",\n\t\"nagano.nagano.jp\",\n\t\"nagawa.nagano.jp\",\n\t\"nagiso.nagano.jp\",\n\t\"nakagawa.nagano.jp\",\n\t\"nakano.nagano.jp\",\n\t\"nozawaonsen.nagano.jp\",\n\t\"obuse.nagano.jp\",\n\t\"ogawa.nagano.jp\",\n\t\"okaya.nagano.jp\",\n\t\"omachi.nagano.jp\",\n\t\"omi.nagano.jp\",\n\t\"ookuwa.nagano.jp\",\n\t\"ooshika.nagano.jp\",\n\t\"otaki.nagano.jp\",\n\t\"otari.nagano.jp\",\n\t\"sakae.nagano.jp\",\n\t\"sakaki.nagano.jp\",\n\t\"saku.nagano.jp\",\n\t\"sakuho.nagano.jp\",\n\t\"shimosuwa.nagano.jp\",\n\t\"shinanomachi.nagano.jp\",\n\t\"shiojiri.nagano.jp\",\n\t\"suwa.nagano.jp\",\n\t\"suzaka.nagano.jp\",\n\t\"takagi.nagano.jp\",\n\t\"takamori.nagano.jp\",\n\t\"takayama.nagano.jp\",\n\t\"tateshina.nagano.jp\",\n\t\"tatsuno.nagano.jp\",\n\t\"togakushi.nagano.jp\",\n\t\"togura.nagano.jp\",\n\t\"tomi.nagano.jp\",\n\t\"ueda.nagano.jp\",\n\t\"wada.nagano.jp\",\n\t\"yamagata.nagano.jp\",\n\t\"yamanouchi.nagano.jp\",\n\t\"yasaka.nagano.jp\",\n\t\"yasuoka.nagano.jp\",\n\t\"chijiwa.nagasaki.jp\",\n\t\"futsu.nagasaki.jp\",\n\t\"goto.nagasaki.jp\",\n\t\"hasami.nagasaki.jp\",\n\t\"hirado.nagasaki.jp\",\n\t\"iki.nagasaki.jp\",\n\t\"isahaya.nagasaki.jp\",\n\t\"kawatana.nagasaki.jp\",\n\t\"kuchinotsu.nagasaki.jp\",\n\t\"matsuura.nagasaki.jp\",\n\t\"nagasaki.nagasaki.jp\",\n\t\"obama.nagasaki.jp\",\n\t\"omura.nagasaki.jp\",\n\t\"oseto.nagasaki.jp\",\n\t\"saikai.nagasaki.jp\",\n\t\"sasebo.nagasaki.jp\",\n\t\"seihi.nagasaki.jp\",\n\t\"shimabara.nagasaki.jp\",\n\t\"shinkamigoto.nagasaki.jp\",\n\t\"togitsu.nagasaki.jp\",\n\t\"tsushima.nagasaki.jp\",\n\t\"unzen.nagasaki.jp\",\n\t\"ando.nara.jp\",\n\t\"gose.nara.jp\",\n\t\"heguri.nara.jp\",\n\t\"higashiyoshino.nara.jp\",\n\t\"ikaruga.nara.jp\",\n\t\"ikoma.nara.jp\",\n\t\"kamikitayama.nara.jp\",\n\t\"kanmaki.nara.jp\",\n\t\"kashiba.nara.jp\",\n\t\"kashihara.nara.jp\",\n\t\"katsuragi.nara.jp\",\n\t\"kawai.nara.jp\",\n\t\"kawakami.nara.jp\",\n\t\"kawanishi.nara.jp\",\n\t\"koryo.nara.jp\",\n\t\"kurotaki.nara.jp\",\n\t\"mitsue.nara.jp\",\n\t\"miyake.nara.jp\",\n\t\"nara.nara.jp\",\n\t\"nosegawa.nara.jp\",\n\t\"oji.nara.jp\",\n\t\"ouda.nara.jp\",\n\t\"oyodo.nara.jp\",\n\t\"sakurai.nara.jp\",\n\t\"sango.nara.jp\",\n\t\"shimoichi.nara.jp\",\n\t\"shimokitayama.nara.jp\",\n\t\"shinjo.nara.jp\",\n\t\"soni.nara.jp\",\n\t\"takatori.nara.jp\",\n\t\"tawaramoto.nara.jp\",\n\t\"tenkawa.nara.jp\",\n\t\"tenri.nara.jp\",\n\t\"uda.nara.jp\",\n\t\"yamatokoriyama.nara.jp\",\n\t\"yamatotakada.nara.jp\",\n\t\"yamazoe.nara.jp\",\n\t\"yoshino.nara.jp\",\n\t\"aga.niigata.jp\",\n\t\"agano.niigata.jp\",\n\t\"gosen.niigata.jp\",\n\t\"itoigawa.niigata.jp\",\n\t\"izumozaki.niigata.jp\",\n\t\"joetsu.niigata.jp\",\n\t\"kamo.niigata.jp\",\n\t\"kariwa.niigata.jp\",\n\t\"kashiwazaki.niigata.jp\",\n\t\"minamiuonuma.niigata.jp\",\n\t\"mitsuke.niigata.jp\",\n\t\"muika.niigata.jp\",\n\t\"murakami.niigata.jp\",\n\t\"myoko.niigata.jp\",\n\t\"nagaoka.niigata.jp\",\n\t\"niigata.niigata.jp\",\n\t\"ojiya.niigata.jp\",\n\t\"omi.niigata.jp\",\n\t\"sado.niigata.jp\",\n\t\"sanjo.niigata.jp\",\n\t\"seiro.niigata.jp\",\n\t\"seirou.niigata.jp\",\n\t\"sekikawa.niigata.jp\",\n\t\"shibata.niigata.jp\",\n\t\"tagami.niigata.jp\",\n\t\"tainai.niigata.jp\",\n\t\"tochio.niigata.jp\",\n\t\"tokamachi.niigata.jp\",\n\t\"tsubame.niigata.jp\",\n\t\"tsunan.niigata.jp\",\n\t\"uonuma.niigata.jp\",\n\t\"yahiko.niigata.jp\",\n\t\"yoita.niigata.jp\",\n\t\"yuzawa.niigata.jp\",\n\t\"beppu.oita.jp\",\n\t\"bungoono.oita.jp\",\n\t\"bungotakada.oita.jp\",\n\t\"hasama.oita.jp\",\n\t\"hiji.oita.jp\",\n\t\"himeshima.oita.jp\",\n\t\"hita.oita.jp\",\n\t\"kamitsue.oita.jp\",\n\t\"kokonoe.oita.jp\",\n\t\"kuju.oita.jp\",\n\t\"kunisaki.oita.jp\",\n\t\"kusu.oita.jp\",\n\t\"oita.oita.jp\",\n\t\"saiki.oita.jp\",\n\t\"taketa.oita.jp\",\n\t\"tsukumi.oita.jp\",\n\t\"usa.oita.jp\",\n\t\"usuki.oita.jp\",\n\t\"yufu.oita.jp\",\n\t\"akaiwa.okayama.jp\",\n\t\"asakuchi.okayama.jp\",\n\t\"bizen.okayama.jp\",\n\t\"hayashima.okayama.jp\",\n\t\"ibara.okayama.jp\",\n\t\"kagamino.okayama.jp\",\n\t\"kasaoka.okayama.jp\",\n\t\"kibichuo.okayama.jp\",\n\t\"kumenan.okayama.jp\",\n\t\"kurashiki.okayama.jp\",\n\t\"maniwa.okayama.jp\",\n\t\"misaki.okayama.jp\",\n\t\"nagi.okayama.jp\",\n\t\"niimi.okayama.jp\",\n\t\"nishiawakura.okayama.jp\",\n\t\"okayama.okayama.jp\",\n\t\"satosho.okayama.jp\",\n\t\"setouchi.okayama.jp\",\n\t\"shinjo.okayama.jp\",\n\t\"shoo.okayama.jp\",\n\t\"soja.okayama.jp\",\n\t\"takahashi.okayama.jp\",\n\t\"tamano.okayama.jp\",\n\t\"tsuyama.okayama.jp\",\n\t\"wake.okayama.jp\",\n\t\"yakage.okayama.jp\",\n\t\"aguni.okinawa.jp\",\n\t\"ginowan.okinawa.jp\",\n\t\"ginoza.okinawa.jp\",\n\t\"gushikami.okinawa.jp\",\n\t\"haebaru.okinawa.jp\",\n\t\"higashi.okinawa.jp\",\n\t\"hirara.okinawa.jp\",\n\t\"iheya.okinawa.jp\",\n\t\"ishigaki.okinawa.jp\",\n\t\"ishikawa.okinawa.jp\",\n\t\"itoman.okinawa.jp\",\n\t\"izena.okinawa.jp\",\n\t\"kadena.okinawa.jp\",\n\t\"kin.okinawa.jp\",\n\t\"kitadaito.okinawa.jp\",\n\t\"kitanakagusuku.okinawa.jp\",\n\t\"kumejima.okinawa.jp\",\n\t\"kunigami.okinawa.jp\",\n\t\"minamidaito.okinawa.jp\",\n\t\"motobu.okinawa.jp\",\n\t\"nago.okinawa.jp\",\n\t\"naha.okinawa.jp\",\n\t\"nakagusuku.okinawa.jp\",\n\t\"nakijin.okinawa.jp\",\n\t\"nanjo.okinawa.jp\",\n\t\"nishihara.okinawa.jp\",\n\t\"ogimi.okinawa.jp\",\n\t\"okinawa.okinawa.jp\",\n\t\"onna.okinawa.jp\",\n\t\"shimoji.okinawa.jp\",\n\t\"taketomi.okinawa.jp\",\n\t\"tarama.okinawa.jp\",\n\t\"tokashiki.okinawa.jp\",\n\t\"tomigusuku.okinawa.jp\",\n\t\"tonaki.okinawa.jp\",\n\t\"urasoe.okinawa.jp\",\n\t\"uruma.okinawa.jp\",\n\t\"yaese.okinawa.jp\",\n\t\"yomitan.okinawa.jp\",\n\t\"yonabaru.okinawa.jp\",\n\t\"yonaguni.okinawa.jp\",\n\t\"zamami.okinawa.jp\",\n\t\"abeno.osaka.jp\",\n\t\"chihayaakasaka.osaka.jp\",\n\t\"chuo.osaka.jp\",\n\t\"daito.osaka.jp\",\n\t\"fujiidera.osaka.jp\",\n\t\"habikino.osaka.jp\",\n\t\"hannan.osaka.jp\",\n\t\"higashiosaka.osaka.jp\",\n\t\"higashisumiyoshi.osaka.jp\",\n\t\"higashiyodogawa.osaka.jp\",\n\t\"hirakata.osaka.jp\",\n\t\"ibaraki.osaka.jp\",\n\t\"ikeda.osaka.jp\",\n\t\"izumi.osaka.jp\",\n\t\"izumiotsu.osaka.jp\",\n\t\"izumisano.osaka.jp\",\n\t\"kadoma.osaka.jp\",\n\t\"kaizuka.osaka.jp\",\n\t\"kanan.osaka.jp\",\n\t\"kashiwara.osaka.jp\",\n\t\"katano.osaka.jp\",\n\t\"kawachinagano.osaka.jp\",\n\t\"kishiwada.osaka.jp\",\n\t\"kita.osaka.jp\",\n\t\"kumatori.osaka.jp\",\n\t\"matsubara.osaka.jp\",\n\t\"minato.osaka.jp\",\n\t\"minoh.osaka.jp\",\n\t\"misaki.osaka.jp\",\n\t\"moriguchi.osaka.jp\",\n\t\"neyagawa.osaka.jp\",\n\t\"nishi.osaka.jp\",\n\t\"nose.osaka.jp\",\n\t\"osakasayama.osaka.jp\",\n\t\"sakai.osaka.jp\",\n\t\"sayama.osaka.jp\",\n\t\"sennan.osaka.jp\",\n\t\"settsu.osaka.jp\",\n\t\"shijonawate.osaka.jp\",\n\t\"shimamoto.osaka.jp\",\n\t\"suita.osaka.jp\",\n\t\"tadaoka.osaka.jp\",\n\t\"taishi.osaka.jp\",\n\t\"tajiri.osaka.jp\",\n\t\"takaishi.osaka.jp\",\n\t\"takatsuki.osaka.jp\",\n\t\"tondabayashi.osaka.jp\",\n\t\"toyonaka.osaka.jp\",\n\t\"toyono.osaka.jp\",\n\t\"yao.osaka.jp\",\n\t\"ariake.saga.jp\",\n\t\"arita.saga.jp\",\n\t\"fukudomi.saga.jp\",\n\t\"genkai.saga.jp\",\n\t\"hamatama.saga.jp\",\n\t\"hizen.saga.jp\",\n\t\"imari.saga.jp\",\n\t\"kamimine.saga.jp\",\n\t\"kanzaki.saga.jp\",\n\t\"karatsu.saga.jp\",\n\t\"kashima.saga.jp\",\n\t\"kitagata.saga.jp\",\n\t\"kitahata.saga.jp\",\n\t\"kiyama.saga.jp\",\n\t\"kouhoku.saga.jp\",\n\t\"kyuragi.saga.jp\",\n\t\"nishiarita.saga.jp\",\n\t\"ogi.saga.jp\",\n\t\"omachi.saga.jp\",\n\t\"ouchi.saga.jp\",\n\t\"saga.saga.jp\",\n\t\"shiroishi.saga.jp\",\n\t\"taku.saga.jp\",\n\t\"tara.saga.jp\",\n\t\"tosu.saga.jp\",\n\t\"yoshinogari.saga.jp\",\n\t\"arakawa.saitama.jp\",\n\t\"asaka.saitama.jp\",\n\t\"chichibu.saitama.jp\",\n\t\"fujimi.saitama.jp\",\n\t\"fujimino.saitama.jp\",\n\t\"fukaya.saitama.jp\",\n\t\"hanno.saitama.jp\",\n\t\"hanyu.saitama.jp\",\n\t\"hasuda.saitama.jp\",\n\t\"hatogaya.saitama.jp\",\n\t\"hatoyama.saitama.jp\",\n\t\"hidaka.saitama.jp\",\n\t\"higashichichibu.saitama.jp\",\n\t\"higashimatsuyama.saitama.jp\",\n\t\"honjo.saitama.jp\",\n\t\"ina.saitama.jp\",\n\t\"iruma.saitama.jp\",\n\t\"iwatsuki.saitama.jp\",\n\t\"kamiizumi.saitama.jp\",\n\t\"kamikawa.saitama.jp\",\n\t\"kamisato.saitama.jp\",\n\t\"kasukabe.saitama.jp\",\n\t\"kawagoe.saitama.jp\",\n\t\"kawaguchi.saitama.jp\",\n\t\"kawajima.saitama.jp\",\n\t\"kazo.saitama.jp\",\n\t\"kitamoto.saitama.jp\",\n\t\"koshigaya.saitama.jp\",\n\t\"kounosu.saitama.jp\",\n\t\"kuki.saitama.jp\",\n\t\"kumagaya.saitama.jp\",\n\t\"matsubushi.saitama.jp\",\n\t\"minano.saitama.jp\",\n\t\"misato.saitama.jp\",\n\t\"miyashiro.saitama.jp\",\n\t\"miyoshi.saitama.jp\",\n\t\"moroyama.saitama.jp\",\n\t\"nagatoro.saitama.jp\",\n\t\"namegawa.saitama.jp\",\n\t\"niiza.saitama.jp\",\n\t\"ogano.saitama.jp\",\n\t\"ogawa.saitama.jp\",\n\t\"ogose.saitama.jp\",\n\t\"okegawa.saitama.jp\",\n\t\"omiya.saitama.jp\",\n\t\"otaki.saitama.jp\",\n\t\"ranzan.saitama.jp\",\n\t\"ryokami.saitama.jp\",\n\t\"saitama.saitama.jp\",\n\t\"sakado.saitama.jp\",\n\t\"satte.saitama.jp\",\n\t\"sayama.saitama.jp\",\n\t\"shiki.saitama.jp\",\n\t\"shiraoka.saitama.jp\",\n\t\"soka.saitama.jp\",\n\t\"sugito.saitama.jp\",\n\t\"toda.saitama.jp\",\n\t\"tokigawa.saitama.jp\",\n\t\"tokorozawa.saitama.jp\",\n\t\"tsurugashima.saitama.jp\",\n\t\"urawa.saitama.jp\",\n\t\"warabi.saitama.jp\",\n\t\"yashio.saitama.jp\",\n\t\"yokoze.saitama.jp\",\n\t\"yono.saitama.jp\",\n\t\"yorii.saitama.jp\",\n\t\"yoshida.saitama.jp\",\n\t\"yoshikawa.saitama.jp\",\n\t\"yoshimi.saitama.jp\",\n\t\"aisho.shiga.jp\",\n\t\"gamo.shiga.jp\",\n\t\"higashiomi.shiga.jp\",\n\t\"hikone.shiga.jp\",\n\t\"koka.shiga.jp\",\n\t\"konan.shiga.jp\",\n\t\"kosei.shiga.jp\",\n\t\"koto.shiga.jp\",\n\t\"kusatsu.shiga.jp\",\n\t\"maibara.shiga.jp\",\n\t\"moriyama.shiga.jp\",\n\t\"nagahama.shiga.jp\",\n\t\"nishiazai.shiga.jp\",\n\t\"notogawa.shiga.jp\",\n\t\"omihachiman.shiga.jp\",\n\t\"otsu.shiga.jp\",\n\t\"ritto.shiga.jp\",\n\t\"ryuoh.shiga.jp\",\n\t\"takashima.shiga.jp\",\n\t\"takatsuki.shiga.jp\",\n\t\"torahime.shiga.jp\",\n\t\"toyosato.shiga.jp\",\n\t\"yasu.shiga.jp\",\n\t\"akagi.shimane.jp\",\n\t\"ama.shimane.jp\",\n\t\"gotsu.shimane.jp\",\n\t\"hamada.shimane.jp\",\n\t\"higashiizumo.shimane.jp\",\n\t\"hikawa.shimane.jp\",\n\t\"hikimi.shimane.jp\",\n\t\"izumo.shimane.jp\",\n\t\"kakinoki.shimane.jp\",\n\t\"masuda.shimane.jp\",\n\t\"matsue.shimane.jp\",\n\t\"misato.shimane.jp\",\n\t\"nishinoshima.shimane.jp\",\n\t\"ohda.shimane.jp\",\n\t\"okinoshima.shimane.jp\",\n\t\"okuizumo.shimane.jp\",\n\t\"shimane.shimane.jp\",\n\t\"tamayu.shimane.jp\",\n\t\"tsuwano.shimane.jp\",\n\t\"unnan.shimane.jp\",\n\t\"yakumo.shimane.jp\",\n\t\"yasugi.shimane.jp\",\n\t\"yatsuka.shimane.jp\",\n\t\"arai.shizuoka.jp\",\n\t\"atami.shizuoka.jp\",\n\t\"fuji.shizuoka.jp\",\n\t\"fujieda.shizuoka.jp\",\n\t\"fujikawa.shizuoka.jp\",\n\t\"fujinomiya.shizuoka.jp\",\n\t\"fukuroi.shizuoka.jp\",\n\t\"gotemba.shizuoka.jp\",\n\t\"haibara.shizuoka.jp\",\n\t\"hamamatsu.shizuoka.jp\",\n\t\"higashiizu.shizuoka.jp\",\n\t\"ito.shizuoka.jp\",\n\t\"iwata.shizuoka.jp\",\n\t\"izu.shizuoka.jp\",\n\t\"izunokuni.shizuoka.jp\",\n\t\"kakegawa.shizuoka.jp\",\n\t\"kannami.shizuoka.jp\",\n\t\"kawanehon.shizuoka.jp\",\n\t\"kawazu.shizuoka.jp\",\n\t\"kikugawa.shizuoka.jp\",\n\t\"kosai.shizuoka.jp\",\n\t\"makinohara.shizuoka.jp\",\n\t\"matsuzaki.shizuoka.jp\",\n\t\"minamiizu.shizuoka.jp\",\n\t\"mishima.shizuoka.jp\",\n\t\"morimachi.shizuoka.jp\",\n\t\"nishiizu.shizuoka.jp\",\n\t\"numazu.shizuoka.jp\",\n\t\"omaezaki.shizuoka.jp\",\n\t\"shimada.shizuoka.jp\",\n\t\"shimizu.shizuoka.jp\",\n\t\"shimoda.shizuoka.jp\",\n\t\"shizuoka.shizuoka.jp\",\n\t\"susono.shizuoka.jp\",\n\t\"yaizu.shizuoka.jp\",\n\t\"yoshida.shizuoka.jp\",\n\t\"ashikaga.tochigi.jp\",\n\t\"bato.tochigi.jp\",\n\t\"haga.tochigi.jp\",\n\t\"ichikai.tochigi.jp\",\n\t\"iwafune.tochigi.jp\",\n\t\"kaminokawa.tochigi.jp\",\n\t\"kanuma.tochigi.jp\",\n\t\"karasuyama.tochigi.jp\",\n\t\"kuroiso.tochigi.jp\",\n\t\"mashiko.tochigi.jp\",\n\t\"mibu.tochigi.jp\",\n\t\"moka.tochigi.jp\",\n\t\"motegi.tochigi.jp\",\n\t\"nasu.tochigi.jp\",\n\t\"nasushiobara.tochigi.jp\",\n\t\"nikko.tochigi.jp\",\n\t\"nishikata.tochigi.jp\",\n\t\"nogi.tochigi.jp\",\n\t\"ohira.tochigi.jp\",\n\t\"ohtawara.tochigi.jp\",\n\t\"oyama.tochigi.jp\",\n\t\"sakura.tochigi.jp\",\n\t\"sano.tochigi.jp\",\n\t\"shimotsuke.tochigi.jp\",\n\t\"shioya.tochigi.jp\",\n\t\"takanezawa.tochigi.jp\",\n\t\"tochigi.tochigi.jp\",\n\t\"tsuga.tochigi.jp\",\n\t\"ujiie.tochigi.jp\",\n\t\"utsunomiya.tochigi.jp\",\n\t\"yaita.tochigi.jp\",\n\t\"aizumi.tokushima.jp\",\n\t\"anan.tokushima.jp\",\n\t\"ichiba.tokushima.jp\",\n\t\"itano.tokushima.jp\",\n\t\"kainan.tokushima.jp\",\n\t\"komatsushima.tokushima.jp\",\n\t\"matsushige.tokushima.jp\",\n\t\"mima.tokushima.jp\",\n\t\"minami.tokushima.jp\",\n\t\"miyoshi.tokushima.jp\",\n\t\"mugi.tokushima.jp\",\n\t\"nakagawa.tokushima.jp\",\n\t\"naruto.tokushima.jp\",\n\t\"sanagochi.tokushima.jp\",\n\t\"shishikui.tokushima.jp\",\n\t\"tokushima.tokushima.jp\",\n\t\"wajiki.tokushima.jp\",\n\t\"adachi.tokyo.jp\",\n\t\"akiruno.tokyo.jp\",\n\t\"akishima.tokyo.jp\",\n\t\"aogashima.tokyo.jp\",\n\t\"arakawa.tokyo.jp\",\n\t\"bunkyo.tokyo.jp\",\n\t\"chiyoda.tokyo.jp\",\n\t\"chofu.tokyo.jp\",\n\t\"chuo.tokyo.jp\",\n\t\"edogawa.tokyo.jp\",\n\t\"fuchu.tokyo.jp\",\n\t\"fussa.tokyo.jp\",\n\t\"hachijo.tokyo.jp\",\n\t\"hachioji.tokyo.jp\",\n\t\"hamura.tokyo.jp\",\n\t\"higashikurume.tokyo.jp\",\n\t\"higashimurayama.tokyo.jp\",\n\t\"higashiyamato.tokyo.jp\",\n\t\"hino.tokyo.jp\",\n\t\"hinode.tokyo.jp\",\n\t\"hinohara.tokyo.jp\",\n\t\"inagi.tokyo.jp\",\n\t\"itabashi.tokyo.jp\",\n\t\"katsushika.tokyo.jp\",\n\t\"kita.tokyo.jp\",\n\t\"kiyose.tokyo.jp\",\n\t\"kodaira.tokyo.jp\",\n\t\"koganei.tokyo.jp\",\n\t\"kokubunji.tokyo.jp\",\n\t\"komae.tokyo.jp\",\n\t\"koto.tokyo.jp\",\n\t\"kouzushima.tokyo.jp\",\n\t\"kunitachi.tokyo.jp\",\n\t\"machida.tokyo.jp\",\n\t\"meguro.tokyo.jp\",\n\t\"minato.tokyo.jp\",\n\t\"mitaka.tokyo.jp\",\n\t\"mizuho.tokyo.jp\",\n\t\"musashimurayama.tokyo.jp\",\n\t\"musashino.tokyo.jp\",\n\t\"nakano.tokyo.jp\",\n\t\"nerima.tokyo.jp\",\n\t\"ogasawara.tokyo.jp\",\n\t\"okutama.tokyo.jp\",\n\t\"ome.tokyo.jp\",\n\t\"oshima.tokyo.jp\",\n\t\"ota.tokyo.jp\",\n\t\"setagaya.tokyo.jp\",\n\t\"shibuya.tokyo.jp\",\n\t\"shinagawa.tokyo.jp\",\n\t\"shinjuku.tokyo.jp\",\n\t\"suginami.tokyo.jp\",\n\t\"sumida.tokyo.jp\",\n\t\"tachikawa.tokyo.jp\",\n\t\"taito.tokyo.jp\",\n\t\"tama.tokyo.jp\",\n\t\"toshima.tokyo.jp\",\n\t\"chizu.tottori.jp\",\n\t\"hino.tottori.jp\",\n\t\"kawahara.tottori.jp\",\n\t\"koge.tottori.jp\",\n\t\"kotoura.tottori.jp\",\n\t\"misasa.tottori.jp\",\n\t\"nanbu.tottori.jp\",\n\t\"nichinan.tottori.jp\",\n\t\"sakaiminato.tottori.jp\",\n\t\"tottori.tottori.jp\",\n\t\"wakasa.tottori.jp\",\n\t\"yazu.tottori.jp\",\n\t\"yonago.tottori.jp\",\n\t\"asahi.toyama.jp\",\n\t\"fuchu.toyama.jp\",\n\t\"fukumitsu.toyama.jp\",\n\t\"funahashi.toyama.jp\",\n\t\"himi.toyama.jp\",\n\t\"imizu.toyama.jp\",\n\t\"inami.toyama.jp\",\n\t\"johana.toyama.jp\",\n\t\"kamiichi.toyama.jp\",\n\t\"kurobe.toyama.jp\",\n\t\"nakaniikawa.toyama.jp\",\n\t\"namerikawa.toyama.jp\",\n\t\"nanto.toyama.jp\",\n\t\"nyuzen.toyama.jp\",\n\t\"oyabe.toyama.jp\",\n\t\"taira.toyama.jp\",\n\t\"takaoka.toyama.jp\",\n\t\"tateyama.toyama.jp\",\n\t\"toga.toyama.jp\",\n\t\"tonami.toyama.jp\",\n\t\"toyama.toyama.jp\",\n\t\"unazuki.toyama.jp\",\n\t\"uozu.toyama.jp\",\n\t\"yamada.toyama.jp\",\n\t\"arida.wakayama.jp\",\n\t\"aridagawa.wakayama.jp\",\n\t\"gobo.wakayama.jp\",\n\t\"hashimoto.wakayama.jp\",\n\t\"hidaka.wakayama.jp\",\n\t\"hirogawa.wakayama.jp\",\n\t\"inami.wakayama.jp\",\n\t\"iwade.wakayama.jp\",\n\t\"kainan.wakayama.jp\",\n\t\"kamitonda.wakayama.jp\",\n\t\"katsuragi.wakayama.jp\",\n\t\"kimino.wakayama.jp\",\n\t\"kinokawa.wakayama.jp\",\n\t\"kitayama.wakayama.jp\",\n\t\"koya.wakayama.jp\",\n\t\"koza.wakayama.jp\",\n\t\"kozagawa.wakayama.jp\",\n\t\"kudoyama.wakayama.jp\",\n\t\"kushimoto.wakayama.jp\",\n\t\"mihama.wakayama.jp\",\n\t\"misato.wakayama.jp\",\n\t\"nachikatsuura.wakayama.jp\",\n\t\"shingu.wakayama.jp\",\n\t\"shirahama.wakayama.jp\",\n\t\"taiji.wakayama.jp\",\n\t\"tanabe.wakayama.jp\",\n\t\"wakayama.wakayama.jp\",\n\t\"yuasa.wakayama.jp\",\n\t\"yura.wakayama.jp\",\n\t\"asahi.yamagata.jp\",\n\t\"funagata.yamagata.jp\",\n\t\"higashine.yamagata.jp\",\n\t\"iide.yamagata.jp\",\n\t\"kahoku.yamagata.jp\",\n\t\"kaminoyama.yamagata.jp\",\n\t\"kaneyama.yamagata.jp\",\n\t\"kawanishi.yamagata.jp\",\n\t\"mamurogawa.yamagata.jp\",\n\t\"mikawa.yamagata.jp\",\n\t\"murayama.yamagata.jp\",\n\t\"nagai.yamagata.jp\",\n\t\"nakayama.yamagata.jp\",\n\t\"nanyo.yamagata.jp\",\n\t\"nishikawa.yamagata.jp\",\n\t\"obanazawa.yamagata.jp\",\n\t\"oe.yamagata.jp\",\n\t\"oguni.yamagata.jp\",\n\t\"ohkura.yamagata.jp\",\n\t\"oishida.yamagata.jp\",\n\t\"sagae.yamagata.jp\",\n\t\"sakata.yamagata.jp\",\n\t\"sakegawa.yamagata.jp\",\n\t\"shinjo.yamagata.jp\",\n\t\"shirataka.yamagata.jp\",\n\t\"shonai.yamagata.jp\",\n\t\"takahata.yamagata.jp\",\n\t\"tendo.yamagata.jp\",\n\t\"tozawa.yamagata.jp\",\n\t\"tsuruoka.yamagata.jp\",\n\t\"yamagata.yamagata.jp\",\n\t\"yamanobe.yamagata.jp\",\n\t\"yonezawa.yamagata.jp\",\n\t\"yuza.yamagata.jp\",\n\t\"abu.yamaguchi.jp\",\n\t\"hagi.yamaguchi.jp\",\n\t\"hikari.yamaguchi.jp\",\n\t\"hofu.yamaguchi.jp\",\n\t\"iwakuni.yamaguchi.jp\",\n\t\"kudamatsu.yamaguchi.jp\",\n\t\"mitou.yamaguchi.jp\",\n\t\"nagato.yamaguchi.jp\",\n\t\"oshima.yamaguchi.jp\",\n\t\"shimonoseki.yamaguchi.jp\",\n\t\"shunan.yamaguchi.jp\",\n\t\"tabuse.yamaguchi.jp\",\n\t\"tokuyama.yamaguchi.jp\",\n\t\"toyota.yamaguchi.jp\",\n\t\"ube.yamaguchi.jp\",\n\t\"yuu.yamaguchi.jp\",\n\t\"chuo.yamanashi.jp\",\n\t\"doshi.yamanashi.jp\",\n\t\"fuefuki.yamanashi.jp\",\n\t\"fujikawa.yamanashi.jp\",\n\t\"fujikawaguchiko.yamanashi.jp\",\n\t\"fujiyoshida.yamanashi.jp\",\n\t\"hayakawa.yamanashi.jp\",\n\t\"hokuto.yamanashi.jp\",\n\t\"ichikawamisato.yamanashi.jp\",\n\t\"kai.yamanashi.jp\",\n\t\"kofu.yamanashi.jp\",\n\t\"koshu.yamanashi.jp\",\n\t\"kosuge.yamanashi.jp\",\n\t\"minami-alps.yamanashi.jp\",\n\t\"minobu.yamanashi.jp\",\n\t\"nakamichi.yamanashi.jp\",\n\t\"nanbu.yamanashi.jp\",\n\t\"narusawa.yamanashi.jp\",\n\t\"nirasaki.yamanashi.jp\",\n\t\"nishikatsura.yamanashi.jp\",\n\t\"oshino.yamanashi.jp\",\n\t\"otsuki.yamanashi.jp\",\n\t\"showa.yamanashi.jp\",\n\t\"tabayama.yamanashi.jp\",\n\t\"tsuru.yamanashi.jp\",\n\t\"uenohara.yamanashi.jp\",\n\t\"yamanakako.yamanashi.jp\",\n\t\"yamanashi.yamanashi.jp\",\n\t\"*.ke\",\n\t\"kg\",\n\t\"org.kg\",\n\t\"net.kg\",\n\t\"com.kg\",\n\t\"edu.kg\",\n\t\"gov.kg\",\n\t\"mil.kg\",\n\t\"*.kh\",\n\t\"ki\",\n\t\"edu.ki\",\n\t\"biz.ki\",\n\t\"net.ki\",\n\t\"org.ki\",\n\t\"gov.ki\",\n\t\"info.ki\",\n\t\"com.ki\",\n\t\"km\",\n\t\"org.km\",\n\t\"nom.km\",\n\t\"gov.km\",\n\t\"prd.km\",\n\t\"tm.km\",\n\t\"edu.km\",\n\t\"mil.km\",\n\t\"ass.km\",\n\t\"com.km\",\n\t\"coop.km\",\n\t\"asso.km\",\n\t\"presse.km\",\n\t\"medecin.km\",\n\t\"notaires.km\",\n\t\"pharmaciens.km\",\n\t\"veterinaire.km\",\n\t\"gouv.km\",\n\t\"kn\",\n\t\"net.kn\",\n\t\"org.kn\",\n\t\"edu.kn\",\n\t\"gov.kn\",\n\t\"kp\",\n\t\"com.kp\",\n\t\"edu.kp\",\n\t\"gov.kp\",\n\t\"org.kp\",\n\t\"rep.kp\",\n\t\"tra.kp\",\n\t\"kr\",\n\t\"ac.kr\",\n\t\"co.kr\",\n\t\"es.kr\",\n\t\"go.kr\",\n\t\"hs.kr\",\n\t\"kg.kr\",\n\t\"mil.kr\",\n\t\"ms.kr\",\n\t\"ne.kr\",\n\t\"or.kr\",\n\t\"pe.kr\",\n\t\"re.kr\",\n\t\"sc.kr\",\n\t\"busan.kr\",\n\t\"chungbuk.kr\",\n\t\"chungnam.kr\",\n\t\"daegu.kr\",\n\t\"daejeon.kr\",\n\t\"gangwon.kr\",\n\t\"gwangju.kr\",\n\t\"gyeongbuk.kr\",\n\t\"gyeonggi.kr\",\n\t\"gyeongnam.kr\",\n\t\"incheon.kr\",\n\t\"jeju.kr\",\n\t\"jeonbuk.kr\",\n\t\"jeonnam.kr\",\n\t\"seoul.kr\",\n\t\"ulsan.kr\",\n\t\"*.kw\",\n\t\"ky\",\n\t\"edu.ky\",\n\t\"gov.ky\",\n\t\"com.ky\",\n\t\"org.ky\",\n\t\"net.ky\",\n\t\"kz\",\n\t\"org.kz\",\n\t\"edu.kz\",\n\t\"net.kz\",\n\t\"gov.kz\",\n\t\"mil.kz\",\n\t\"com.kz\",\n\t\"la\",\n\t\"int.la\",\n\t\"net.la\",\n\t\"info.la\",\n\t\"edu.la\",\n\t\"gov.la\",\n\t\"per.la\",\n\t\"com.la\",\n\t\"org.la\",\n\t\"lb\",\n\t\"com.lb\",\n\t\"edu.lb\",\n\t\"gov.lb\",\n\t\"net.lb\",\n\t\"org.lb\",\n\t\"lc\",\n\t\"com.lc\",\n\t\"net.lc\",\n\t\"co.lc\",\n\t\"org.lc\",\n\t\"edu.lc\",\n\t\"gov.lc\",\n\t\"li\",\n\t\"lk\",\n\t\"gov.lk\",\n\t\"sch.lk\",\n\t\"net.lk\",\n\t\"int.lk\",\n\t\"com.lk\",\n\t\"org.lk\",\n\t\"edu.lk\",\n\t\"ngo.lk\",\n\t\"soc.lk\",\n\t\"web.lk\",\n\t\"ltd.lk\",\n\t\"assn.lk\",\n\t\"grp.lk\",\n\t\"hotel.lk\",\n\t\"ac.lk\",\n\t\"lr\",\n\t\"com.lr\",\n\t\"edu.lr\",\n\t\"gov.lr\",\n\t\"org.lr\",\n\t\"net.lr\",\n\t\"ls\",\n\t\"co.ls\",\n\t\"org.ls\",\n\t\"lt\",\n\t\"gov.lt\",\n\t\"lu\",\n\t\"lv\",\n\t\"com.lv\",\n\t\"edu.lv\",\n\t\"gov.lv\",\n\t\"org.lv\",\n\t\"mil.lv\",\n\t\"id.lv\",\n\t\"net.lv\",\n\t\"asn.lv\",\n\t\"conf.lv\",\n\t\"ly\",\n\t\"com.ly\",\n\t\"net.ly\",\n\t\"gov.ly\",\n\t\"plc.ly\",\n\t\"edu.ly\",\n\t\"sch.ly\",\n\t\"med.ly\",\n\t\"org.ly\",\n\t\"id.ly\",\n\t\"ma\",\n\t\"co.ma\",\n\t\"net.ma\",\n\t\"gov.ma\",\n\t\"org.ma\",\n\t\"ac.ma\",\n\t\"press.ma\",\n\t\"mc\",\n\t\"tm.mc\",\n\t\"asso.mc\",\n\t\"md\",\n\t\"me\",\n\t\"co.me\",\n\t\"net.me\",\n\t\"org.me\",\n\t\"edu.me\",\n\t\"ac.me\",\n\t\"gov.me\",\n\t\"its.me\",\n\t\"priv.me\",\n\t\"mg\",\n\t\"org.mg\",\n\t\"nom.mg\",\n\t\"gov.mg\",\n\t\"prd.mg\",\n\t\"tm.mg\",\n\t\"edu.mg\",\n\t\"mil.mg\",\n\t\"com.mg\",\n\t\"co.mg\",\n\t\"mh\",\n\t\"mil\",\n\t\"mk\",\n\t\"com.mk\",\n\t\"org.mk\",\n\t\"net.mk\",\n\t\"edu.mk\",\n\t\"gov.mk\",\n\t\"inf.mk\",\n\t\"name.mk\",\n\t\"ml\",\n\t\"com.ml\",\n\t\"edu.ml\",\n\t\"gouv.ml\",\n\t\"gov.ml\",\n\t\"net.ml\",\n\t\"org.ml\",\n\t\"presse.ml\",\n\t\"*.mm\",\n\t\"mn\",\n\t\"gov.mn\",\n\t\"edu.mn\",\n\t\"org.mn\",\n\t\"mo\",\n\t\"com.mo\",\n\t\"net.mo\",\n\t\"org.mo\",\n\t\"edu.mo\",\n\t\"gov.mo\",\n\t\"mobi\",\n\t\"mp\",\n\t\"mq\",\n\t\"mr\",\n\t\"gov.mr\",\n\t\"ms\",\n\t\"com.ms\",\n\t\"edu.ms\",\n\t\"gov.ms\",\n\t\"net.ms\",\n\t\"org.ms\",\n\t\"mt\",\n\t\"com.mt\",\n\t\"edu.mt\",\n\t\"net.mt\",\n\t\"org.mt\",\n\t\"mu\",\n\t\"com.mu\",\n\t\"net.mu\",\n\t\"org.mu\",\n\t\"gov.mu\",\n\t\"ac.mu\",\n\t\"co.mu\",\n\t\"or.mu\",\n\t\"museum\",\n\t\"academy.museum\",\n\t\"agriculture.museum\",\n\t\"air.museum\",\n\t\"airguard.museum\",\n\t\"alabama.museum\",\n\t\"alaska.museum\",\n\t\"amber.museum\",\n\t\"ambulance.museum\",\n\t\"american.museum\",\n\t\"americana.museum\",\n\t\"americanantiques.museum\",\n\t\"americanart.museum\",\n\t\"amsterdam.museum\",\n\t\"and.museum\",\n\t\"annefrank.museum\",\n\t\"anthro.museum\",\n\t\"anthropology.museum\",\n\t\"antiques.museum\",\n\t\"aquarium.museum\",\n\t\"arboretum.museum\",\n\t\"archaeological.museum\",\n\t\"archaeology.museum\",\n\t\"architecture.museum\",\n\t\"art.museum\",\n\t\"artanddesign.museum\",\n\t\"artcenter.museum\",\n\t\"artdeco.museum\",\n\t\"arteducation.museum\",\n\t\"artgallery.museum\",\n\t\"arts.museum\",\n\t\"artsandcrafts.museum\",\n\t\"asmatart.museum\",\n\t\"assassination.museum\",\n\t\"assisi.museum\",\n\t\"association.museum\",\n\t\"astronomy.museum\",\n\t\"atlanta.museum\",\n\t\"austin.museum\",\n\t\"australia.museum\",\n\t\"automotive.museum\",\n\t\"aviation.museum\",\n\t\"axis.museum\",\n\t\"badajoz.museum\",\n\t\"baghdad.museum\",\n\t\"bahn.museum\",\n\t\"bale.museum\",\n\t\"baltimore.museum\",\n\t\"barcelona.museum\",\n\t\"baseball.museum\",\n\t\"basel.museum\",\n\t\"baths.museum\",\n\t\"bauern.museum\",\n\t\"beauxarts.museum\",\n\t\"beeldengeluid.museum\",\n\t\"bellevue.museum\",\n\t\"bergbau.museum\",\n\t\"berkeley.museum\",\n\t\"berlin.museum\",\n\t\"bern.museum\",\n\t\"bible.museum\",\n\t\"bilbao.museum\",\n\t\"bill.museum\",\n\t\"birdart.museum\",\n\t\"birthplace.museum\",\n\t\"bonn.museum\",\n\t\"boston.museum\",\n\t\"botanical.museum\",\n\t\"botanicalgarden.museum\",\n\t\"botanicgarden.museum\",\n\t\"botany.museum\",\n\t\"brandywinevalley.museum\",\n\t\"brasil.museum\",\n\t\"bristol.museum\",\n\t\"british.museum\",\n\t\"britishcolumbia.museum\",\n\t\"broadcast.museum\",\n\t\"brunel.museum\",\n\t\"brussel.museum\",\n\t\"brussels.museum\",\n\t\"bruxelles.museum\",\n\t\"building.museum\",\n\t\"burghof.museum\",\n\t\"bus.museum\",\n\t\"bushey.museum\",\n\t\"cadaques.museum\",\n\t\"california.museum\",\n\t\"cambridge.museum\",\n\t\"can.museum\",\n\t\"canada.museum\",\n\t\"capebreton.museum\",\n\t\"carrier.museum\",\n\t\"cartoonart.museum\",\n\t\"casadelamoneda.museum\",\n\t\"castle.museum\",\n\t\"castres.museum\",\n\t\"celtic.museum\",\n\t\"center.museum\",\n\t\"chattanooga.museum\",\n\t\"cheltenham.museum\",\n\t\"chesapeakebay.museum\",\n\t\"chicago.museum\",\n\t\"children.museum\",\n\t\"childrens.museum\",\n\t\"childrensgarden.museum\",\n\t\"chiropractic.museum\",\n\t\"chocolate.museum\",\n\t\"christiansburg.museum\",\n\t\"cincinnati.museum\",\n\t\"cinema.museum\",\n\t\"circus.museum\",\n\t\"civilisation.museum\",\n\t\"civilization.museum\",\n\t\"civilwar.museum\",\n\t\"clinton.museum\",\n\t\"clock.museum\",\n\t\"coal.museum\",\n\t\"coastaldefence.museum\",\n\t\"cody.museum\",\n\t\"coldwar.museum\",\n\t\"collection.museum\",\n\t\"colonialwilliamsburg.museum\",\n\t\"coloradoplateau.museum\",\n\t\"columbia.museum\",\n\t\"columbus.museum\",\n\t\"communication.museum\",\n\t\"communications.museum\",\n\t\"community.museum\",\n\t\"computer.museum\",\n\t\"computerhistory.museum\",\n\t\"xn--comunicaes-v6a2o.museum\",\n\t\"contemporary.museum\",\n\t\"contemporaryart.museum\",\n\t\"convent.museum\",\n\t\"copenhagen.museum\",\n\t\"corporation.museum\",\n\t\"xn--correios-e-telecomunicaes-ghc29a.museum\",\n\t\"corvette.museum\",\n\t\"costume.museum\",\n\t\"countryestate.museum\",\n\t\"county.museum\",\n\t\"crafts.museum\",\n\t\"cranbrook.museum\",\n\t\"creation.museum\",\n\t\"cultural.museum\",\n\t\"culturalcenter.museum\",\n\t\"culture.museum\",\n\t\"cyber.museum\",\n\t\"cymru.museum\",\n\t\"dali.museum\",\n\t\"dallas.museum\",\n\t\"database.museum\",\n\t\"ddr.museum\",\n\t\"decorativearts.museum\",\n\t\"delaware.museum\",\n\t\"delmenhorst.museum\",\n\t\"denmark.museum\",\n\t\"depot.museum\",\n\t\"design.museum\",\n\t\"detroit.museum\",\n\t\"dinosaur.museum\",\n\t\"discovery.museum\",\n\t\"dolls.museum\",\n\t\"donostia.museum\",\n\t\"durham.museum\",\n\t\"eastafrica.museum\",\n\t\"eastcoast.museum\",\n\t\"education.museum\",\n\t\"educational.museum\",\n\t\"egyptian.museum\",\n\t\"eisenbahn.museum\",\n\t\"elburg.museum\",\n\t\"elvendrell.museum\",\n\t\"embroidery.museum\",\n\t\"encyclopedic.museum\",\n\t\"england.museum\",\n\t\"entomology.museum\",\n\t\"environment.museum\",\n\t\"environmentalconservation.museum\",\n\t\"epilepsy.museum\",\n\t\"essex.museum\",\n\t\"estate.museum\",\n\t\"ethnology.museum\",\n\t\"exeter.museum\",\n\t\"exhibition.museum\",\n\t\"family.museum\",\n\t\"farm.museum\",\n\t\"farmequipment.museum\",\n\t\"farmers.museum\",\n\t\"farmstead.museum\",\n\t\"field.museum\",\n\t\"figueres.museum\",\n\t\"filatelia.museum\",\n\t\"film.museum\",\n\t\"fineart.museum\",\n\t\"finearts.museum\",\n\t\"finland.museum\",\n\t\"flanders.museum\",\n\t\"florida.museum\",\n\t\"force.museum\",\n\t\"fortmissoula.museum\",\n\t\"fortworth.museum\",\n\t\"foundation.museum\",\n\t\"francaise.museum\",\n\t\"frankfurt.museum\",\n\t\"franziskaner.museum\",\n\t\"freemasonry.museum\",\n\t\"freiburg.museum\",\n\t\"fribourg.museum\",\n\t\"frog.museum\",\n\t\"fundacio.museum\",\n\t\"furniture.museum\",\n\t\"gallery.museum\",\n\t\"garden.museum\",\n\t\"gateway.museum\",\n\t\"geelvinck.museum\",\n\t\"gemological.museum\",\n\t\"geology.museum\",\n\t\"georgia.museum\",\n\t\"giessen.museum\",\n\t\"glas.museum\",\n\t\"glass.museum\",\n\t\"gorge.museum\",\n\t\"grandrapids.museum\",\n\t\"graz.museum\",\n\t\"guernsey.museum\",\n\t\"halloffame.museum\",\n\t\"hamburg.museum\",\n\t\"handson.museum\",\n\t\"harvestcelebration.museum\",\n\t\"hawaii.museum\",\n\t\"health.museum\",\n\t\"heimatunduhren.museum\",\n\t\"hellas.museum\",\n\t\"helsinki.museum\",\n\t\"hembygdsforbund.museum\",\n\t\"heritage.museum\",\n\t\"histoire.museum\",\n\t\"historical.museum\",\n\t\"historicalsociety.museum\",\n\t\"historichouses.museum\",\n\t\"historisch.museum\",\n\t\"historisches.museum\",\n\t\"history.museum\",\n\t\"historyofscience.museum\",\n\t\"horology.museum\",\n\t\"house.museum\",\n\t\"humanities.museum\",\n\t\"illustration.museum\",\n\t\"imageandsound.museum\",\n\t\"indian.museum\",\n\t\"indiana.museum\",\n\t\"indianapolis.museum\",\n\t\"indianmarket.museum\",\n\t\"intelligence.museum\",\n\t\"interactive.museum\",\n\t\"iraq.museum\",\n\t\"iron.museum\",\n\t\"isleofman.museum\",\n\t\"jamison.museum\",\n\t\"jefferson.museum\",\n\t\"jerusalem.museum\",\n\t\"jewelry.museum\",\n\t\"jewish.museum\",\n\t\"jewishart.museum\",\n\t\"jfk.museum\",\n\t\"journalism.museum\",\n\t\"judaica.museum\",\n\t\"judygarland.museum\",\n\t\"juedisches.museum\",\n\t\"juif.museum\",\n\t\"karate.museum\",\n\t\"karikatur.museum\",\n\t\"kids.museum\",\n\t\"koebenhavn.museum\",\n\t\"koeln.museum\",\n\t\"kunst.museum\",\n\t\"kunstsammlung.museum\",\n\t\"kunstunddesign.museum\",\n\t\"labor.museum\",\n\t\"labour.museum\",\n\t\"lajolla.museum\",\n\t\"lancashire.museum\",\n\t\"landes.museum\",\n\t\"lans.museum\",\n\t\"xn--lns-qla.museum\",\n\t\"larsson.museum\",\n\t\"lewismiller.museum\",\n\t\"lincoln.museum\",\n\t\"linz.museum\",\n\t\"living.museum\",\n\t\"livinghistory.museum\",\n\t\"localhistory.museum\",\n\t\"london.museum\",\n\t\"losangeles.museum\",\n\t\"louvre.museum\",\n\t\"loyalist.museum\",\n\t\"lucerne.museum\",\n\t\"luxembourg.museum\",\n\t\"luzern.museum\",\n\t\"mad.museum\",\n\t\"madrid.museum\",\n\t\"mallorca.museum\",\n\t\"manchester.museum\",\n\t\"mansion.museum\",\n\t\"mansions.museum\",\n\t\"manx.museum\",\n\t\"marburg.museum\",\n\t\"maritime.museum\",\n\t\"maritimo.museum\",\n\t\"maryland.museum\",\n\t\"marylhurst.museum\",\n\t\"media.museum\",\n\t\"medical.museum\",\n\t\"medizinhistorisches.museum\",\n\t\"meeres.museum\",\n\t\"memorial.museum\",\n\t\"mesaverde.museum\",\n\t\"michigan.museum\",\n\t\"midatlantic.museum\",\n\t\"military.museum\",\n\t\"mill.museum\",\n\t\"miners.museum\",\n\t\"mining.museum\",\n\t\"minnesota.museum\",\n\t\"missile.museum\",\n\t\"missoula.museum\",\n\t\"modern.museum\",\n\t\"moma.museum\",\n\t\"money.museum\",\n\t\"monmouth.museum\",\n\t\"monticello.museum\",\n\t\"montreal.museum\",\n\t\"moscow.museum\",\n\t\"motorcycle.museum\",\n\t\"muenchen.museum\",\n\t\"muenster.museum\",\n\t\"mulhouse.museum\",\n\t\"muncie.museum\",\n\t\"museet.museum\",\n\t\"museumcenter.museum\",\n\t\"museumvereniging.museum\",\n\t\"music.museum\",\n\t\"national.museum\",\n\t\"nationalfirearms.museum\",\n\t\"nationalheritage.museum\",\n\t\"nativeamerican.museum\",\n\t\"naturalhistory.museum\",\n\t\"naturalhistorymuseum.museum\",\n\t\"naturalsciences.museum\",\n\t\"nature.museum\",\n\t\"naturhistorisches.museum\",\n\t\"natuurwetenschappen.museum\",\n\t\"naumburg.museum\",\n\t\"naval.museum\",\n\t\"nebraska.museum\",\n\t\"neues.museum\",\n\t\"newhampshire.museum\",\n\t\"newjersey.museum\",\n\t\"newmexico.museum\",\n\t\"newport.museum\",\n\t\"newspaper.museum\",\n\t\"newyork.museum\",\n\t\"niepce.museum\",\n\t\"norfolk.museum\",\n\t\"north.museum\",\n\t\"nrw.museum\",\n\t\"nuernberg.museum\",\n\t\"nuremberg.museum\",\n\t\"nyc.museum\",\n\t\"nyny.museum\",\n\t\"oceanographic.museum\",\n\t\"oceanographique.museum\",\n\t\"omaha.museum\",\n\t\"online.museum\",\n\t\"ontario.museum\",\n\t\"openair.museum\",\n\t\"oregon.museum\",\n\t\"oregontrail.museum\",\n\t\"otago.museum\",\n\t\"oxford.museum\",\n\t\"pacific.museum\",\n\t\"paderborn.museum\",\n\t\"palace.museum\",\n\t\"paleo.museum\",\n\t\"palmsprings.museum\",\n\t\"panama.museum\",\n\t\"paris.museum\",\n\t\"pasadena.museum\",\n\t\"pharmacy.museum\",\n\t\"philadelphia.museum\",\n\t\"philadelphiaarea.museum\",\n\t\"philately.museum\",\n\t\"phoenix.museum\",\n\t\"photography.museum\",\n\t\"pilots.museum\",\n\t\"pittsburgh.museum\",\n\t\"planetarium.museum\",\n\t\"plantation.museum\",\n\t\"plants.museum\",\n\t\"plaza.museum\",\n\t\"portal.museum\",\n\t\"portland.museum\",\n\t\"portlligat.museum\",\n\t\"posts-and-telecommunications.museum\",\n\t\"preservation.museum\",\n\t\"presidio.museum\",\n\t\"press.museum\",\n\t\"project.museum\",\n\t\"public.museum\",\n\t\"pubol.museum\",\n\t\"quebec.museum\",\n\t\"railroad.museum\",\n\t\"railway.museum\",\n\t\"research.museum\",\n\t\"resistance.museum\",\n\t\"riodejaneiro.museum\",\n\t\"rochester.museum\",\n\t\"rockart.museum\",\n\t\"roma.museum\",\n\t\"russia.museum\",\n\t\"saintlouis.museum\",\n\t\"salem.museum\",\n\t\"salvadordali.museum\",\n\t\"salzburg.museum\",\n\t\"sandiego.museum\",\n\t\"sanfrancisco.museum\",\n\t\"santabarbara.museum\",\n\t\"santacruz.museum\",\n\t\"santafe.museum\",\n\t\"saskatchewan.museum\",\n\t\"satx.museum\",\n\t\"savannahga.museum\",\n\t\"schlesisches.museum\",\n\t\"schoenbrunn.museum\",\n\t\"schokoladen.museum\",\n\t\"school.museum\",\n\t\"schweiz.museum\",\n\t\"science.museum\",\n\t\"scienceandhistory.museum\",\n\t\"scienceandindustry.museum\",\n\t\"sciencecenter.museum\",\n\t\"sciencecenters.museum\",\n\t\"science-fiction.museum\",\n\t\"sciencehistory.museum\",\n\t\"sciences.museum\",\n\t\"sciencesnaturelles.museum\",\n\t\"scotland.museum\",\n\t\"seaport.museum\",\n\t\"settlement.museum\",\n\t\"settlers.museum\",\n\t\"shell.museum\",\n\t\"sherbrooke.museum\",\n\t\"sibenik.museum\",\n\t\"silk.museum\",\n\t\"ski.museum\",\n\t\"skole.museum\",\n\t\"society.museum\",\n\t\"sologne.museum\",\n\t\"soundandvision.museum\",\n\t\"southcarolina.museum\",\n\t\"southwest.museum\",\n\t\"space.museum\",\n\t\"spy.museum\",\n\t\"square.museum\",\n\t\"stadt.museum\",\n\t\"stalbans.museum\",\n\t\"starnberg.museum\",\n\t\"state.museum\",\n\t\"stateofdelaware.museum\",\n\t\"station.museum\",\n\t\"steam.museum\",\n\t\"steiermark.museum\",\n\t\"stjohn.museum\",\n\t\"stockholm.museum\",\n\t\"stpetersburg.museum\",\n\t\"stuttgart.museum\",\n\t\"suisse.museum\",\n\t\"surgeonshall.museum\",\n\t\"surrey.museum\",\n\t\"svizzera.museum\",\n\t\"sweden.museum\",\n\t\"sydney.museum\",\n\t\"tank.museum\",\n\t\"tcm.museum\",\n\t\"technology.museum\",\n\t\"telekommunikation.museum\",\n\t\"television.museum\",\n\t\"texas.museum\",\n\t\"textile.museum\",\n\t\"theater.museum\",\n\t\"time.museum\",\n\t\"timekeeping.museum\",\n\t\"topology.museum\",\n\t\"torino.museum\",\n\t\"touch.museum\",\n\t\"town.museum\",\n\t\"transport.museum\",\n\t\"tree.museum\",\n\t\"trolley.museum\",\n\t\"trust.museum\",\n\t\"trustee.museum\",\n\t\"uhren.museum\",\n\t\"ulm.museum\",\n\t\"undersea.museum\",\n\t\"university.museum\",\n\t\"usa.museum\",\n\t\"usantiques.museum\",\n\t\"usarts.museum\",\n\t\"uscountryestate.museum\",\n\t\"usculture.museum\",\n\t\"usdecorativearts.museum\",\n\t\"usgarden.museum\",\n\t\"ushistory.museum\",\n\t\"ushuaia.museum\",\n\t\"uslivinghistory.museum\",\n\t\"utah.museum\",\n\t\"uvic.museum\",\n\t\"valley.museum\",\n\t\"vantaa.museum\",\n\t\"versailles.museum\",\n\t\"viking.museum\",\n\t\"village.museum\",\n\t\"virginia.museum\",\n\t\"virtual.museum\",\n\t\"virtuel.museum\",\n\t\"vlaanderen.museum\",\n\t\"volkenkunde.museum\",\n\t\"wales.museum\",\n\t\"wallonie.museum\",\n\t\"war.museum\",\n\t\"washingtondc.museum\",\n\t\"watchandclock.museum\",\n\t\"watch-and-clock.museum\",\n\t\"western.museum\",\n\t\"westfalen.museum\",\n\t\"whaling.museum\",\n\t\"wildlife.museum\",\n\t\"williamsburg.museum\",\n\t\"windmill.museum\",\n\t\"workshop.museum\",\n\t\"york.museum\",\n\t\"yorkshire.museum\",\n\t\"yosemite.museum\",\n\t\"youth.museum\",\n\t\"zoological.museum\",\n\t\"zoology.museum\",\n\t\"xn--9dbhblg6di.museum\",\n\t\"xn--h1aegh.museum\",\n\t\"mv\",\n\t\"aero.mv\",\n\t\"biz.mv\",\n\t\"com.mv\",\n\t\"coop.mv\",\n\t\"edu.mv\",\n\t\"gov.mv\",\n\t\"info.mv\",\n\t\"int.mv\",\n\t\"mil.mv\",\n\t\"museum.mv\",\n\t\"name.mv\",\n\t\"net.mv\",\n\t\"org.mv\",\n\t\"pro.mv\",\n\t\"mw\",\n\t\"ac.mw\",\n\t\"biz.mw\",\n\t\"co.mw\",\n\t\"com.mw\",\n\t\"coop.mw\",\n\t\"edu.mw\",\n\t\"gov.mw\",\n\t\"int.mw\",\n\t\"museum.mw\",\n\t\"net.mw\",\n\t\"org.mw\",\n\t\"mx\",\n\t\"com.mx\",\n\t\"org.mx\",\n\t\"gob.mx\",\n\t\"edu.mx\",\n\t\"net.mx\",\n\t\"my\",\n\t\"com.my\",\n\t\"net.my\",\n\t\"org.my\",\n\t\"gov.my\",\n\t\"edu.my\",\n\t\"mil.my\",\n\t\"name.my\",\n\t\"mz\",\n\t\"ac.mz\",\n\t\"adv.mz\",\n\t\"co.mz\",\n\t\"edu.mz\",\n\t\"gov.mz\",\n\t\"mil.mz\",\n\t\"net.mz\",\n\t\"org.mz\",\n\t\"na\",\n\t\"info.na\",\n\t\"pro.na\",\n\t\"name.na\",\n\t\"school.na\",\n\t\"or.na\",\n\t\"dr.na\",\n\t\"us.na\",\n\t\"mx.na\",\n\t\"ca.na\",\n\t\"in.na\",\n\t\"cc.na\",\n\t\"tv.na\",\n\t\"ws.na\",\n\t\"mobi.na\",\n\t\"co.na\",\n\t\"com.na\",\n\t\"org.na\",\n\t\"name\",\n\t\"nc\",\n\t\"asso.nc\",\n\t\"nom.nc\",\n\t\"ne\",\n\t\"net\",\n\t\"nf\",\n\t\"com.nf\",\n\t\"net.nf\",\n\t\"per.nf\",\n\t\"rec.nf\",\n\t\"web.nf\",\n\t\"arts.nf\",\n\t\"firm.nf\",\n\t\"info.nf\",\n\t\"other.nf\",\n\t\"store.nf\",\n\t\"ng\",\n\t\"com.ng\",\n\t\"edu.ng\",\n\t\"gov.ng\",\n\t\"i.ng\",\n\t\"mil.ng\",\n\t\"mobi.ng\",\n\t\"name.ng\",\n\t\"net.ng\",\n\t\"org.ng\",\n\t\"sch.ng\",\n\t\"ni\",\n\t\"ac.ni\",\n\t\"biz.ni\",\n\t\"co.ni\",\n\t\"com.ni\",\n\t\"edu.ni\",\n\t\"gob.ni\",\n\t\"in.ni\",\n\t\"info.ni\",\n\t\"int.ni\",\n\t\"mil.ni\",\n\t\"net.ni\",\n\t\"nom.ni\",\n\t\"org.ni\",\n\t\"web.ni\",\n\t\"nl\",\n\t\"bv.nl\",\n\t\"no\",\n\t\"fhs.no\",\n\t\"vgs.no\",\n\t\"fylkesbibl.no\",\n\t\"folkebibl.no\",\n\t\"museum.no\",\n\t\"idrett.no\",\n\t\"priv.no\",\n\t\"mil.no\",\n\t\"stat.no\",\n\t\"dep.no\",\n\t\"kommune.no\",\n\t\"herad.no\",\n\t\"aa.no\",\n\t\"ah.no\",\n\t\"bu.no\",\n\t\"fm.no\",\n\t\"hl.no\",\n\t\"hm.no\",\n\t\"jan-mayen.no\",\n\t\"mr.no\",\n\t\"nl.no\",\n\t\"nt.no\",\n\t\"of.no\",\n\t\"ol.no\",\n\t\"oslo.no\",\n\t\"rl.no\",\n\t\"sf.no\",\n\t\"st.no\",\n\t\"svalbard.no\",\n\t\"tm.no\",\n\t\"tr.no\",\n\t\"va.no\",\n\t\"vf.no\",\n\t\"gs.aa.no\",\n\t\"gs.ah.no\",\n\t\"gs.bu.no\",\n\t\"gs.fm.no\",\n\t\"gs.hl.no\",\n\t\"gs.hm.no\",\n\t\"gs.jan-mayen.no\",\n\t\"gs.mr.no\",\n\t\"gs.nl.no\",\n\t\"gs.nt.no\",\n\t\"gs.of.no\",\n\t\"gs.ol.no\",\n\t\"gs.oslo.no\",\n\t\"gs.rl.no\",\n\t\"gs.sf.no\",\n\t\"gs.st.no\",\n\t\"gs.svalbard.no\",\n\t\"gs.tm.no\",\n\t\"gs.tr.no\",\n\t\"gs.va.no\",\n\t\"gs.vf.no\",\n\t\"akrehamn.no\",\n\t\"xn--krehamn-dxa.no\",\n\t\"algard.no\",\n\t\"xn--lgrd-poac.no\",\n\t\"arna.no\",\n\t\"brumunddal.no\",\n\t\"bryne.no\",\n\t\"bronnoysund.no\",\n\t\"xn--brnnysund-m8ac.no\",\n\t\"drobak.no\",\n\t\"xn--drbak-wua.no\",\n\t\"egersund.no\",\n\t\"fetsund.no\",\n\t\"floro.no\",\n\t\"xn--flor-jra.no\",\n\t\"fredrikstad.no\",\n\t\"hokksund.no\",\n\t\"honefoss.no\",\n\t\"xn--hnefoss-q1a.no\",\n\t\"jessheim.no\",\n\t\"jorpeland.no\",\n\t\"xn--jrpeland-54a.no\",\n\t\"kirkenes.no\",\n\t\"kopervik.no\",\n\t\"krokstadelva.no\",\n\t\"langevag.no\",\n\t\"xn--langevg-jxa.no\",\n\t\"leirvik.no\",\n\t\"mjondalen.no\",\n\t\"xn--mjndalen-64a.no\",\n\t\"mo-i-rana.no\",\n\t\"mosjoen.no\",\n\t\"xn--mosjen-eya.no\",\n\t\"nesoddtangen.no\",\n\t\"orkanger.no\",\n\t\"osoyro.no\",\n\t\"xn--osyro-wua.no\",\n\t\"raholt.no\",\n\t\"xn--rholt-mra.no\",\n\t\"sandnessjoen.no\",\n\t\"xn--sandnessjen-ogb.no\",\n\t\"skedsmokorset.no\",\n\t\"slattum.no\",\n\t\"spjelkavik.no\",\n\t\"stathelle.no\",\n\t\"stavern.no\",\n\t\"stjordalshalsen.no\",\n\t\"xn--stjrdalshalsen-sqb.no\",\n\t\"tananger.no\",\n\t\"tranby.no\",\n\t\"vossevangen.no\",\n\t\"afjord.no\",\n\t\"xn--fjord-lra.no\",\n\t\"agdenes.no\",\n\t\"al.no\",\n\t\"xn--l-1fa.no\",\n\t\"alesund.no\",\n\t\"xn--lesund-hua.no\",\n\t\"alstahaug.no\",\n\t\"alta.no\",\n\t\"xn--lt-liac.no\",\n\t\"alaheadju.no\",\n\t\"xn--laheadju-7ya.no\",\n\t\"alvdal.no\",\n\t\"amli.no\",\n\t\"xn--mli-tla.no\",\n\t\"amot.no\",\n\t\"xn--mot-tla.no\",\n\t\"andebu.no\",\n\t\"andoy.no\",\n\t\"xn--andy-ira.no\",\n\t\"andasuolo.no\",\n\t\"ardal.no\",\n\t\"xn--rdal-poa.no\",\n\t\"aremark.no\",\n\t\"arendal.no\",\n\t\"xn--s-1fa.no\",\n\t\"aseral.no\",\n\t\"xn--seral-lra.no\",\n\t\"asker.no\",\n\t\"askim.no\",\n\t\"askvoll.no\",\n\t\"askoy.no\",\n\t\"xn--asky-ira.no\",\n\t\"asnes.no\",\n\t\"xn--snes-poa.no\",\n\t\"audnedaln.no\",\n\t\"aukra.no\",\n\t\"aure.no\",\n\t\"aurland.no\",\n\t\"aurskog-holand.no\",\n\t\"xn--aurskog-hland-jnb.no\",\n\t\"austevoll.no\",\n\t\"austrheim.no\",\n\t\"averoy.no\",\n\t\"xn--avery-yua.no\",\n\t\"balestrand.no\",\n\t\"ballangen.no\",\n\t\"balat.no\",\n\t\"xn--blt-elab.no\",\n\t\"balsfjord.no\",\n\t\"bahccavuotna.no\",\n\t\"xn--bhccavuotna-k7a.no\",\n\t\"bamble.no\",\n\t\"bardu.no\",\n\t\"beardu.no\",\n\t\"beiarn.no\",\n\t\"bajddar.no\",\n\t\"xn--bjddar-pta.no\",\n\t\"baidar.no\",\n\t\"xn--bidr-5nac.no\",\n\t\"berg.no\",\n\t\"bergen.no\",\n\t\"berlevag.no\",\n\t\"xn--berlevg-jxa.no\",\n\t\"bearalvahki.no\",\n\t\"xn--bearalvhki-y4a.no\",\n\t\"bindal.no\",\n\t\"birkenes.no\",\n\t\"bjarkoy.no\",\n\t\"xn--bjarky-fya.no\",\n\t\"bjerkreim.no\",\n\t\"bjugn.no\",\n\t\"bodo.no\",\n\t\"xn--bod-2na.no\",\n\t\"badaddja.no\",\n\t\"xn--bdddj-mrabd.no\",\n\t\"budejju.no\",\n\t\"bokn.no\",\n\t\"bremanger.no\",\n\t\"bronnoy.no\",\n\t\"xn--brnny-wuac.no\",\n\t\"bygland.no\",\n\t\"bykle.no\",\n\t\"barum.no\",\n\t\"xn--brum-voa.no\",\n\t\"bo.telemark.no\",\n\t\"xn--b-5ga.telemark.no\",\n\t\"bo.nordland.no\",\n\t\"xn--b-5ga.nordland.no\",\n\t\"bievat.no\",\n\t\"xn--bievt-0qa.no\",\n\t\"bomlo.no\",\n\t\"xn--bmlo-gra.no\",\n\t\"batsfjord.no\",\n\t\"xn--btsfjord-9za.no\",\n\t\"bahcavuotna.no\",\n\t\"xn--bhcavuotna-s4a.no\",\n\t\"dovre.no\",\n\t\"drammen.no\",\n\t\"drangedal.no\",\n\t\"dyroy.no\",\n\t\"xn--dyry-ira.no\",\n\t\"donna.no\",\n\t\"xn--dnna-gra.no\",\n\t\"eid.no\",\n\t\"eidfjord.no\",\n\t\"eidsberg.no\",\n\t\"eidskog.no\",\n\t\"eidsvoll.no\",\n\t\"eigersund.no\",\n\t\"elverum.no\",\n\t\"enebakk.no\",\n\t\"engerdal.no\",\n\t\"etne.no\",\n\t\"etnedal.no\",\n\t\"evenes.no\",\n\t\"evenassi.no\",\n\t\"xn--eveni-0qa01ga.no\",\n\t\"evje-og-hornnes.no\",\n\t\"farsund.no\",\n\t\"fauske.no\",\n\t\"fuossko.no\",\n\t\"fuoisku.no\",\n\t\"fedje.no\",\n\t\"fet.no\",\n\t\"finnoy.no\",\n\t\"xn--finny-yua.no\",\n\t\"fitjar.no\",\n\t\"fjaler.no\",\n\t\"fjell.no\",\n\t\"flakstad.no\",\n\t\"flatanger.no\",\n\t\"flekkefjord.no\",\n\t\"flesberg.no\",\n\t\"flora.no\",\n\t\"fla.no\",\n\t\"xn--fl-zia.no\",\n\t\"folldal.no\",\n\t\"forsand.no\",\n\t\"fosnes.no\",\n\t\"frei.no\",\n\t\"frogn.no\",\n\t\"froland.no\",\n\t\"frosta.no\",\n\t\"frana.no\",\n\t\"xn--frna-woa.no\",\n\t\"froya.no\",\n\t\"xn--frya-hra.no\",\n\t\"fusa.no\",\n\t\"fyresdal.no\",\n\t\"forde.no\",\n\t\"xn--frde-gra.no\",\n\t\"gamvik.no\",\n\t\"gangaviika.no\",\n\t\"xn--ggaviika-8ya47h.no\",\n\t\"gaular.no\",\n\t\"gausdal.no\",\n\t\"gildeskal.no\",\n\t\"xn--gildeskl-g0a.no\",\n\t\"giske.no\",\n\t\"gjemnes.no\",\n\t\"gjerdrum.no\",\n\t\"gjerstad.no\",\n\t\"gjesdal.no\",\n\t\"gjovik.no\",\n\t\"xn--gjvik-wua.no\",\n\t\"gloppen.no\",\n\t\"gol.no\",\n\t\"gran.no\",\n\t\"grane.no\",\n\t\"granvin.no\",\n\t\"gratangen.no\",\n\t\"grimstad.no\",\n\t\"grong.no\",\n\t\"kraanghke.no\",\n\t\"xn--kranghke-b0a.no\",\n\t\"grue.no\",\n\t\"gulen.no\",\n\t\"hadsel.no\",\n\t\"halden.no\",\n\t\"halsa.no\",\n\t\"hamar.no\",\n\t\"hamaroy.no\",\n\t\"habmer.no\",\n\t\"xn--hbmer-xqa.no\",\n\t\"hapmir.no\",\n\t\"xn--hpmir-xqa.no\",\n\t\"hammerfest.no\",\n\t\"hammarfeasta.no\",\n\t\"xn--hmmrfeasta-s4ac.no\",\n\t\"haram.no\",\n\t\"hareid.no\",\n\t\"harstad.no\",\n\t\"hasvik.no\",\n\t\"aknoluokta.no\",\n\t\"xn--koluokta-7ya57h.no\",\n\t\"hattfjelldal.no\",\n\t\"aarborte.no\",\n\t\"haugesund.no\",\n\t\"hemne.no\",\n\t\"hemnes.no\",\n\t\"hemsedal.no\",\n\t\"heroy.more-og-romsdal.no\",\n\t\"xn--hery-ira.xn--mre-og-romsdal-qqb.no\",\n\t\"heroy.nordland.no\",\n\t\"xn--hery-ira.nordland.no\",\n\t\"hitra.no\",\n\t\"hjartdal.no\",\n\t\"hjelmeland.no\",\n\t\"hobol.no\",\n\t\"xn--hobl-ira.no\",\n\t\"hof.no\",\n\t\"hol.no\",\n\t\"hole.no\",\n\t\"holmestrand.no\",\n\t\"holtalen.no\",\n\t\"xn--holtlen-hxa.no\",\n\t\"hornindal.no\",\n\t\"horten.no\",\n\t\"hurdal.no\",\n\t\"hurum.no\",\n\t\"hvaler.no\",\n\t\"hyllestad.no\",\n\t\"hagebostad.no\",\n\t\"xn--hgebostad-g3a.no\",\n\t\"hoyanger.no\",\n\t\"xn--hyanger-q1a.no\",\n\t\"hoylandet.no\",\n\t\"xn--hylandet-54a.no\",\n\t\"ha.no\",\n\t\"xn--h-2fa.no\",\n\t\"ibestad.no\",\n\t\"inderoy.no\",\n\t\"xn--indery-fya.no\",\n\t\"iveland.no\",\n\t\"jevnaker.no\",\n\t\"jondal.no\",\n\t\"jolster.no\",\n\t\"xn--jlster-bya.no\",\n\t\"karasjok.no\",\n\t\"karasjohka.no\",\n\t\"xn--krjohka-hwab49j.no\",\n\t\"karlsoy.no\",\n\t\"galsa.no\",\n\t\"xn--gls-elac.no\",\n\t\"karmoy.no\",\n\t\"xn--karmy-yua.no\",\n\t\"kautokeino.no\",\n\t\"guovdageaidnu.no\",\n\t\"klepp.no\",\n\t\"klabu.no\",\n\t\"xn--klbu-woa.no\",\n\t\"kongsberg.no\",\n\t\"kongsvinger.no\",\n\t\"kragero.no\",\n\t\"xn--krager-gya.no\",\n\t\"kristiansand.no\",\n\t\"kristiansund.no\",\n\t\"krodsherad.no\",\n\t\"xn--krdsherad-m8a.no\",\n\t\"kvalsund.no\",\n\t\"rahkkeravju.no\",\n\t\"xn--rhkkervju-01af.no\",\n\t\"kvam.no\",\n\t\"kvinesdal.no\",\n\t\"kvinnherad.no\",\n\t\"kviteseid.no\",\n\t\"kvitsoy.no\",\n\t\"xn--kvitsy-fya.no\",\n\t\"kvafjord.no\",\n\t\"xn--kvfjord-nxa.no\",\n\t\"giehtavuoatna.no\",\n\t\"kvanangen.no\",\n\t\"xn--kvnangen-k0a.no\",\n\t\"navuotna.no\",\n\t\"xn--nvuotna-hwa.no\",\n\t\"kafjord.no\",\n\t\"xn--kfjord-iua.no\",\n\t\"gaivuotna.no\",\n\t\"xn--givuotna-8ya.no\",\n\t\"larvik.no\",\n\t\"lavangen.no\",\n\t\"lavagis.no\",\n\t\"loabat.no\",\n\t\"xn--loabt-0qa.no\",\n\t\"lebesby.no\",\n\t\"davvesiida.no\",\n\t\"leikanger.no\",\n\t\"leirfjord.no\",\n\t\"leka.no\",\n\t\"leksvik.no\",\n\t\"lenvik.no\",\n\t\"leangaviika.no\",\n\t\"xn--leagaviika-52b.no\",\n\t\"lesja.no\",\n\t\"levanger.no\",\n\t\"lier.no\",\n\t\"lierne.no\",\n\t\"lillehammer.no\",\n\t\"lillesand.no\",\n\t\"lindesnes.no\",\n\t\"lindas.no\",\n\t\"xn--linds-pra.no\",\n\t\"lom.no\",\n\t\"loppa.no\",\n\t\"lahppi.no\",\n\t\"xn--lhppi-xqa.no\",\n\t\"lund.no\",\n\t\"lunner.no\",\n\t\"luroy.no\",\n\t\"xn--lury-ira.no\",\n\t\"luster.no\",\n\t\"lyngdal.no\",\n\t\"lyngen.no\",\n\t\"ivgu.no\",\n\t\"lardal.no\",\n\t\"lerdal.no\",\n\t\"xn--lrdal-sra.no\",\n\t\"lodingen.no\",\n\t\"xn--ldingen-q1a.no\",\n\t\"lorenskog.no\",\n\t\"xn--lrenskog-54a.no\",\n\t\"loten.no\",\n\t\"xn--lten-gra.no\",\n\t\"malvik.no\",\n\t\"masoy.no\",\n\t\"xn--msy-ula0h.no\",\n\t\"muosat.no\",\n\t\"xn--muost-0qa.no\",\n\t\"mandal.no\",\n\t\"marker.no\",\n\t\"marnardal.no\",\n\t\"masfjorden.no\",\n\t\"meland.no\",\n\t\"meldal.no\",\n\t\"melhus.no\",\n\t\"meloy.no\",\n\t\"xn--mely-ira.no\",\n\t\"meraker.no\",\n\t\"xn--merker-kua.no\",\n\t\"moareke.no\",\n\t\"xn--moreke-jua.no\",\n\t\"midsund.no\",\n\t\"midtre-gauldal.no\",\n\t\"modalen.no\",\n\t\"modum.no\",\n\t\"molde.no\",\n\t\"moskenes.no\",\n\t\"moss.no\",\n\t\"mosvik.no\",\n\t\"malselv.no\",\n\t\"xn--mlselv-iua.no\",\n\t\"malatvuopmi.no\",\n\t\"xn--mlatvuopmi-s4a.no\",\n\t\"namdalseid.no\",\n\t\"aejrie.no\",\n\t\"namsos.no\",\n\t\"namsskogan.no\",\n\t\"naamesjevuemie.no\",\n\t\"xn--nmesjevuemie-tcba.no\",\n\t\"laakesvuemie.no\",\n\t\"nannestad.no\",\n\t\"narvik.no\",\n\t\"narviika.no\",\n\t\"naustdal.no\",\n\t\"nedre-eiker.no\",\n\t\"nes.akershus.no\",\n\t\"nes.buskerud.no\",\n\t\"nesna.no\",\n\t\"nesodden.no\",\n\t\"nesseby.no\",\n\t\"unjarga.no\",\n\t\"xn--unjrga-rta.no\",\n\t\"nesset.no\",\n\t\"nissedal.no\",\n\t\"nittedal.no\",\n\t\"nord-aurdal.no\",\n\t\"nord-fron.no\",\n\t\"nord-odal.no\",\n\t\"norddal.no\",\n\t\"nordkapp.no\",\n\t\"davvenjarga.no\",\n\t\"xn--davvenjrga-y4a.no\",\n\t\"nordre-land.no\",\n\t\"nordreisa.no\",\n\t\"raisa.no\",\n\t\"xn--risa-5na.no\",\n\t\"nore-og-uvdal.no\",\n\t\"notodden.no\",\n\t\"naroy.no\",\n\t\"xn--nry-yla5g.no\",\n\t\"notteroy.no\",\n\t\"xn--nttery-byae.no\",\n\t\"odda.no\",\n\t\"oksnes.no\",\n\t\"xn--ksnes-uua.no\",\n\t\"oppdal.no\",\n\t\"oppegard.no\",\n\t\"xn--oppegrd-ixa.no\",\n\t\"orkdal.no\",\n\t\"orland.no\",\n\t\"xn--rland-uua.no\",\n\t\"orskog.no\",\n\t\"xn--rskog-uua.no\",\n\t\"orsta.no\",\n\t\"xn--rsta-fra.no\",\n\t\"os.hedmark.no\",\n\t\"os.hordaland.no\",\n\t\"osen.no\",\n\t\"osteroy.no\",\n\t\"xn--ostery-fya.no\",\n\t\"ostre-toten.no\",\n\t\"xn--stre-toten-zcb.no\",\n\t\"overhalla.no\",\n\t\"ovre-eiker.no\",\n\t\"xn--vre-eiker-k8a.no\",\n\t\"oyer.no\",\n\t\"xn--yer-zna.no\",\n\t\"oygarden.no\",\n\t\"xn--ygarden-p1a.no\",\n\t\"oystre-slidre.no\",\n\t\"xn--ystre-slidre-ujb.no\",\n\t\"porsanger.no\",\n\t\"porsangu.no\",\n\t\"xn--porsgu-sta26f.no\",\n\t\"porsgrunn.no\",\n\t\"radoy.no\",\n\t\"xn--rady-ira.no\",\n\t\"rakkestad.no\",\n\t\"rana.no\",\n\t\"ruovat.no\",\n\t\"randaberg.no\",\n\t\"rauma.no\",\n\t\"rendalen.no\",\n\t\"rennebu.no\",\n\t\"rennesoy.no\",\n\t\"xn--rennesy-v1a.no\",\n\t\"rindal.no\",\n\t\"ringebu.no\",\n\t\"ringerike.no\",\n\t\"ringsaker.no\",\n\t\"rissa.no\",\n\t\"risor.no\",\n\t\"xn--risr-ira.no\",\n\t\"roan.no\",\n\t\"rollag.no\",\n\t\"rygge.no\",\n\t\"ralingen.no\",\n\t\"xn--rlingen-mxa.no\",\n\t\"rodoy.no\",\n\t\"xn--rdy-0nab.no\",\n\t\"romskog.no\",\n\t\"xn--rmskog-bya.no\",\n\t\"roros.no\",\n\t\"xn--rros-gra.no\",\n\t\"rost.no\",\n\t\"xn--rst-0na.no\",\n\t\"royken.no\",\n\t\"xn--ryken-vua.no\",\n\t\"royrvik.no\",\n\t\"xn--ryrvik-bya.no\",\n\t\"rade.no\",\n\t\"xn--rde-ula.no\",\n\t\"salangen.no\",\n\t\"siellak.no\",\n\t\"saltdal.no\",\n\t\"salat.no\",\n\t\"xn--slt-elab.no\",\n\t\"xn--slat-5na.no\",\n\t\"samnanger.no\",\n\t\"sande.more-og-romsdal.no\",\n\t\"sande.xn--mre-og-romsdal-qqb.no\",\n\t\"sande.vestfold.no\",\n\t\"sandefjord.no\",\n\t\"sandnes.no\",\n\t\"sandoy.no\",\n\t\"xn--sandy-yua.no\",\n\t\"sarpsborg.no\",\n\t\"sauda.no\",\n\t\"sauherad.no\",\n\t\"sel.no\",\n\t\"selbu.no\",\n\t\"selje.no\",\n\t\"seljord.no\",\n\t\"sigdal.no\",\n\t\"siljan.no\",\n\t\"sirdal.no\",\n\t\"skaun.no\",\n\t\"skedsmo.no\",\n\t\"ski.no\",\n\t\"skien.no\",\n\t\"skiptvet.no\",\n\t\"skjervoy.no\",\n\t\"xn--skjervy-v1a.no\",\n\t\"skierva.no\",\n\t\"xn--skierv-uta.no\",\n\t\"skjak.no\",\n\t\"xn--skjk-soa.no\",\n\t\"skodje.no\",\n\t\"skanland.no\",\n\t\"xn--sknland-fxa.no\",\n\t\"skanit.no\",\n\t\"xn--sknit-yqa.no\",\n\t\"smola.no\",\n\t\"xn--smla-hra.no\",\n\t\"snillfjord.no\",\n\t\"snasa.no\",\n\t\"xn--snsa-roa.no\",\n\t\"snoasa.no\",\n\t\"snaase.no\",\n\t\"xn--snase-nra.no\",\n\t\"sogndal.no\",\n\t\"sokndal.no\",\n\t\"sola.no\",\n\t\"solund.no\",\n\t\"songdalen.no\",\n\t\"sortland.no\",\n\t\"spydeberg.no\",\n\t\"stange.no\",\n\t\"stavanger.no\",\n\t\"steigen.no\",\n\t\"steinkjer.no\",\n\t\"stjordal.no\",\n\t\"xn--stjrdal-s1a.no\",\n\t\"stokke.no\",\n\t\"stor-elvdal.no\",\n\t\"stord.no\",\n\t\"stordal.no\",\n\t\"storfjord.no\",\n\t\"omasvuotna.no\",\n\t\"strand.no\",\n\t\"stranda.no\",\n\t\"stryn.no\",\n\t\"sula.no\",\n\t\"suldal.no\",\n\t\"sund.no\",\n\t\"sunndal.no\",\n\t\"surnadal.no\",\n\t\"sveio.no\",\n\t\"svelvik.no\",\n\t\"sykkylven.no\",\n\t\"sogne.no\",\n\t\"xn--sgne-gra.no\",\n\t\"somna.no\",\n\t\"xn--smna-gra.no\",\n\t\"sondre-land.no\",\n\t\"xn--sndre-land-0cb.no\",\n\t\"sor-aurdal.no\",\n\t\"xn--sr-aurdal-l8a.no\",\n\t\"sor-fron.no\",\n\t\"xn--sr-fron-q1a.no\",\n\t\"sor-odal.no\",\n\t\"xn--sr-odal-q1a.no\",\n\t\"sor-varanger.no\",\n\t\"xn--sr-varanger-ggb.no\",\n\t\"matta-varjjat.no\",\n\t\"xn--mtta-vrjjat-k7af.no\",\n\t\"sorfold.no\",\n\t\"xn--srfold-bya.no\",\n\t\"sorreisa.no\",\n\t\"xn--srreisa-q1a.no\",\n\t\"sorum.no\",\n\t\"xn--srum-gra.no\",\n\t\"tana.no\",\n\t\"deatnu.no\",\n\t\"time.no\",\n\t\"tingvoll.no\",\n\t\"tinn.no\",\n\t\"tjeldsund.no\",\n\t\"dielddanuorri.no\",\n\t\"tjome.no\",\n\t\"xn--tjme-hra.no\",\n\t\"tokke.no\",\n\t\"tolga.no\",\n\t\"torsken.no\",\n\t\"tranoy.no\",\n\t\"xn--trany-yua.no\",\n\t\"tromso.no\",\n\t\"xn--troms-zua.no\",\n\t\"tromsa.no\",\n\t\"romsa.no\",\n\t\"trondheim.no\",\n\t\"troandin.no\",\n\t\"trysil.no\",\n\t\"trana.no\",\n\t\"xn--trna-woa.no\",\n\t\"trogstad.no\",\n\t\"xn--trgstad-r1a.no\",\n\t\"tvedestrand.no\",\n\t\"tydal.no\",\n\t\"tynset.no\",\n\t\"tysfjord.no\",\n\t\"divtasvuodna.no\",\n\t\"divttasvuotna.no\",\n\t\"tysnes.no\",\n\t\"tysvar.no\",\n\t\"xn--tysvr-vra.no\",\n\t\"tonsberg.no\",\n\t\"xn--tnsberg-q1a.no\",\n\t\"ullensaker.no\",\n\t\"ullensvang.no\",\n\t\"ulvik.no\",\n\t\"utsira.no\",\n\t\"vadso.no\",\n\t\"xn--vads-jra.no\",\n\t\"cahcesuolo.no\",\n\t\"xn--hcesuolo-7ya35b.no\",\n\t\"vaksdal.no\",\n\t\"valle.no\",\n\t\"vang.no\",\n\t\"vanylven.no\",\n\t\"vardo.no\",\n\t\"xn--vard-jra.no\",\n\t\"varggat.no\",\n\t\"xn--vrggt-xqad.no\",\n\t\"vefsn.no\",\n\t\"vaapste.no\",\n\t\"vega.no\",\n\t\"vegarshei.no\",\n\t\"xn--vegrshei-c0a.no\",\n\t\"vennesla.no\",\n\t\"verdal.no\",\n\t\"verran.no\",\n\t\"vestby.no\",\n\t\"vestnes.no\",\n\t\"vestre-slidre.no\",\n\t\"vestre-toten.no\",\n\t\"vestvagoy.no\",\n\t\"xn--vestvgy-ixa6o.no\",\n\t\"vevelstad.no\",\n\t\"vik.no\",\n\t\"vikna.no\",\n\t\"vindafjord.no\",\n\t\"volda.no\",\n\t\"voss.no\",\n\t\"varoy.no\",\n\t\"xn--vry-yla5g.no\",\n\t\"vagan.no\",\n\t\"xn--vgan-qoa.no\",\n\t\"voagat.no\",\n\t\"vagsoy.no\",\n\t\"xn--vgsy-qoa0j.no\",\n\t\"vaga.no\",\n\t\"xn--vg-yiab.no\",\n\t\"valer.ostfold.no\",\n\t\"xn--vler-qoa.xn--stfold-9xa.no\",\n\t\"valer.hedmark.no\",\n\t\"xn--vler-qoa.hedmark.no\",\n\t\"*.np\",\n\t\"nr\",\n\t\"biz.nr\",\n\t\"info.nr\",\n\t\"gov.nr\",\n\t\"edu.nr\",\n\t\"org.nr\",\n\t\"net.nr\",\n\t\"com.nr\",\n\t\"nu\",\n\t\"nz\",\n\t\"ac.nz\",\n\t\"co.nz\",\n\t\"cri.nz\",\n\t\"geek.nz\",\n\t\"gen.nz\",\n\t\"govt.nz\",\n\t\"health.nz\",\n\t\"iwi.nz\",\n\t\"kiwi.nz\",\n\t\"maori.nz\",\n\t\"mil.nz\",\n\t\"xn--mori-qsa.nz\",\n\t\"net.nz\",\n\t\"org.nz\",\n\t\"parliament.nz\",\n\t\"school.nz\",\n\t\"om\",\n\t\"co.om\",\n\t\"com.om\",\n\t\"edu.om\",\n\t\"gov.om\",\n\t\"med.om\",\n\t\"museum.om\",\n\t\"net.om\",\n\t\"org.om\",\n\t\"pro.om\",\n\t\"onion\",\n\t\"org\",\n\t\"pa\",\n\t\"ac.pa\",\n\t\"gob.pa\",\n\t\"com.pa\",\n\t\"org.pa\",\n\t\"sld.pa\",\n\t\"edu.pa\",\n\t\"net.pa\",\n\t\"ing.pa\",\n\t\"abo.pa\",\n\t\"med.pa\",\n\t\"nom.pa\",\n\t\"pe\",\n\t\"edu.pe\",\n\t\"gob.pe\",\n\t\"nom.pe\",\n\t\"mil.pe\",\n\t\"org.pe\",\n\t\"com.pe\",\n\t\"net.pe\",\n\t\"pf\",\n\t\"com.pf\",\n\t\"org.pf\",\n\t\"edu.pf\",\n\t\"*.pg\",\n\t\"ph\",\n\t\"com.ph\",\n\t\"net.ph\",\n\t\"org.ph\",\n\t\"gov.ph\",\n\t\"edu.ph\",\n\t\"ngo.ph\",\n\t\"mil.ph\",\n\t\"i.ph\",\n\t\"pk\",\n\t\"com.pk\",\n\t\"net.pk\",\n\t\"edu.pk\",\n\t\"org.pk\",\n\t\"fam.pk\",\n\t\"biz.pk\",\n\t\"web.pk\",\n\t\"gov.pk\",\n\t\"gob.pk\",\n\t\"gok.pk\",\n\t\"gon.pk\",\n\t\"gop.pk\",\n\t\"gos.pk\",\n\t\"info.pk\",\n\t\"pl\",\n\t\"com.pl\",\n\t\"net.pl\",\n\t\"org.pl\",\n\t\"aid.pl\",\n\t\"agro.pl\",\n\t\"atm.pl\",\n\t\"auto.pl\",\n\t\"biz.pl\",\n\t\"edu.pl\",\n\t\"gmina.pl\",\n\t\"gsm.pl\",\n\t\"info.pl\",\n\t\"mail.pl\",\n\t\"miasta.pl\",\n\t\"media.pl\",\n\t\"mil.pl\",\n\t\"nieruchomosci.pl\",\n\t\"nom.pl\",\n\t\"pc.pl\",\n\t\"powiat.pl\",\n\t\"priv.pl\",\n\t\"realestate.pl\",\n\t\"rel.pl\",\n\t\"sex.pl\",\n\t\"shop.pl\",\n\t\"sklep.pl\",\n\t\"sos.pl\",\n\t\"szkola.pl\",\n\t\"targi.pl\",\n\t\"tm.pl\",\n\t\"tourism.pl\",\n\t\"travel.pl\",\n\t\"turystyka.pl\",\n\t\"gov.pl\",\n\t\"ap.gov.pl\",\n\t\"ic.gov.pl\",\n\t\"is.gov.pl\",\n\t\"us.gov.pl\",\n\t\"kmpsp.gov.pl\",\n\t\"kppsp.gov.pl\",\n\t\"kwpsp.gov.pl\",\n\t\"psp.gov.pl\",\n\t\"wskr.gov.pl\",\n\t\"kwp.gov.pl\",\n\t\"mw.gov.pl\",\n\t\"ug.gov.pl\",\n\t\"um.gov.pl\",\n\t\"umig.gov.pl\",\n\t\"ugim.gov.pl\",\n\t\"upow.gov.pl\",\n\t\"uw.gov.pl\",\n\t\"starostwo.gov.pl\",\n\t\"pa.gov.pl\",\n\t\"po.gov.pl\",\n\t\"psse.gov.pl\",\n\t\"pup.gov.pl\",\n\t\"rzgw.gov.pl\",\n\t\"sa.gov.pl\",\n\t\"so.gov.pl\",\n\t\"sr.gov.pl\",\n\t\"wsa.gov.pl\",\n\t\"sko.gov.pl\",\n\t\"uzs.gov.pl\",\n\t\"wiih.gov.pl\",\n\t\"winb.gov.pl\",\n\t\"pinb.gov.pl\",\n\t\"wios.gov.pl\",\n\t\"witd.gov.pl\",\n\t\"wzmiuw.gov.pl\",\n\t\"piw.gov.pl\",\n\t\"wiw.gov.pl\",\n\t\"griw.gov.pl\",\n\t\"wif.gov.pl\",\n\t\"oum.gov.pl\",\n\t\"sdn.gov.pl\",\n\t\"zp.gov.pl\",\n\t\"uppo.gov.pl\",\n\t\"mup.gov.pl\",\n\t\"wuoz.gov.pl\",\n\t\"konsulat.gov.pl\",\n\t\"oirm.gov.pl\",\n\t\"augustow.pl\",\n\t\"babia-gora.pl\",\n\t\"bedzin.pl\",\n\t\"beskidy.pl\",\n\t\"bialowieza.pl\",\n\t\"bialystok.pl\",\n\t\"bielawa.pl\",\n\t\"bieszczady.pl\",\n\t\"boleslawiec.pl\",\n\t\"bydgoszcz.pl\",\n\t\"bytom.pl\",\n\t\"cieszyn.pl\",\n\t\"czeladz.pl\",\n\t\"czest.pl\",\n\t\"dlugoleka.pl\",\n\t\"elblag.pl\",\n\t\"elk.pl\",\n\t\"glogow.pl\",\n\t\"gniezno.pl\",\n\t\"gorlice.pl\",\n\t\"grajewo.pl\",\n\t\"ilawa.pl\",\n\t\"jaworzno.pl\",\n\t\"jelenia-gora.pl\",\n\t\"jgora.pl\",\n\t\"kalisz.pl\",\n\t\"kazimierz-dolny.pl\",\n\t\"karpacz.pl\",\n\t\"kartuzy.pl\",\n\t\"kaszuby.pl\",\n\t\"katowice.pl\",\n\t\"kepno.pl\",\n\t\"ketrzyn.pl\",\n\t\"klodzko.pl\",\n\t\"kobierzyce.pl\",\n\t\"kolobrzeg.pl\",\n\t\"konin.pl\",\n\t\"konskowola.pl\",\n\t\"kutno.pl\",\n\t\"lapy.pl\",\n\t\"lebork.pl\",\n\t\"legnica.pl\",\n\t\"lezajsk.pl\",\n\t\"limanowa.pl\",\n\t\"lomza.pl\",\n\t\"lowicz.pl\",\n\t\"lubin.pl\",\n\t\"lukow.pl\",\n\t\"malbork.pl\",\n\t\"malopolska.pl\",\n\t\"mazowsze.pl\",\n\t\"mazury.pl\",\n\t\"mielec.pl\",\n\t\"mielno.pl\",\n\t\"mragowo.pl\",\n\t\"naklo.pl\",\n\t\"nowaruda.pl\",\n\t\"nysa.pl\",\n\t\"olawa.pl\",\n\t\"olecko.pl\",\n\t\"olkusz.pl\",\n\t\"olsztyn.pl\",\n\t\"opoczno.pl\",\n\t\"opole.pl\",\n\t\"ostroda.pl\",\n\t\"ostroleka.pl\",\n\t\"ostrowiec.pl\",\n\t\"ostrowwlkp.pl\",\n\t\"pila.pl\",\n\t\"pisz.pl\",\n\t\"podhale.pl\",\n\t\"podlasie.pl\",\n\t\"polkowice.pl\",\n\t\"pomorze.pl\",\n\t\"pomorskie.pl\",\n\t\"prochowice.pl\",\n\t\"pruszkow.pl\",\n\t\"przeworsk.pl\",\n\t\"pulawy.pl\",\n\t\"radom.pl\",\n\t\"rawa-maz.pl\",\n\t\"rybnik.pl\",\n\t\"rzeszow.pl\",\n\t\"sanok.pl\",\n\t\"sejny.pl\",\n\t\"slask.pl\",\n\t\"slupsk.pl\",\n\t\"sosnowiec.pl\",\n\t\"stalowa-wola.pl\",\n\t\"skoczow.pl\",\n\t\"starachowice.pl\",\n\t\"stargard.pl\",\n\t\"suwalki.pl\",\n\t\"swidnica.pl\",\n\t\"swiebodzin.pl\",\n\t\"swinoujscie.pl\",\n\t\"szczecin.pl\",\n\t\"szczytno.pl\",\n\t\"tarnobrzeg.pl\",\n\t\"tgory.pl\",\n\t\"turek.pl\",\n\t\"tychy.pl\",\n\t\"ustka.pl\",\n\t\"walbrzych.pl\",\n\t\"warmia.pl\",\n\t\"warszawa.pl\",\n\t\"waw.pl\",\n\t\"wegrow.pl\",\n\t\"wielun.pl\",\n\t\"wlocl.pl\",\n\t\"wloclawek.pl\",\n\t\"wodzislaw.pl\",\n\t\"wolomin.pl\",\n\t\"wroclaw.pl\",\n\t\"zachpomor.pl\",\n\t\"zagan.pl\",\n\t\"zarow.pl\",\n\t\"zgora.pl\",\n\t\"zgorzelec.pl\",\n\t\"pm\",\n\t\"pn\",\n\t\"gov.pn\",\n\t\"co.pn\",\n\t\"org.pn\",\n\t\"edu.pn\",\n\t\"net.pn\",\n\t\"post\",\n\t\"pr\",\n\t\"com.pr\",\n\t\"net.pr\",\n\t\"org.pr\",\n\t\"gov.pr\",\n\t\"edu.pr\",\n\t\"isla.pr\",\n\t\"pro.pr\",\n\t\"biz.pr\",\n\t\"info.pr\",\n\t\"name.pr\",\n\t\"est.pr\",\n\t\"prof.pr\",\n\t\"ac.pr\",\n\t\"pro\",\n\t\"aaa.pro\",\n\t\"aca.pro\",\n\t\"acct.pro\",\n\t\"avocat.pro\",\n\t\"bar.pro\",\n\t\"cpa.pro\",\n\t\"eng.pro\",\n\t\"jur.pro\",\n\t\"law.pro\",\n\t\"med.pro\",\n\t\"recht.pro\",\n\t\"ps\",\n\t\"edu.ps\",\n\t\"gov.ps\",\n\t\"sec.ps\",\n\t\"plo.ps\",\n\t\"com.ps\",\n\t\"org.ps\",\n\t\"net.ps\",\n\t\"pt\",\n\t\"net.pt\",\n\t\"gov.pt\",\n\t\"org.pt\",\n\t\"edu.pt\",\n\t\"int.pt\",\n\t\"publ.pt\",\n\t\"com.pt\",\n\t\"nome.pt\",\n\t\"pw\",\n\t\"co.pw\",\n\t\"ne.pw\",\n\t\"or.pw\",\n\t\"ed.pw\",\n\t\"go.pw\",\n\t\"belau.pw\",\n\t\"py\",\n\t\"com.py\",\n\t\"coop.py\",\n\t\"edu.py\",\n\t\"gov.py\",\n\t\"mil.py\",\n\t\"net.py\",\n\t\"org.py\",\n\t\"qa\",\n\t\"com.qa\",\n\t\"edu.qa\",\n\t\"gov.qa\",\n\t\"mil.qa\",\n\t\"name.qa\",\n\t\"net.qa\",\n\t\"org.qa\",\n\t\"sch.qa\",\n\t\"re\",\n\t\"asso.re\",\n\t\"com.re\",\n\t\"nom.re\",\n\t\"ro\",\n\t\"arts.ro\",\n\t\"com.ro\",\n\t\"firm.ro\",\n\t\"info.ro\",\n\t\"nom.ro\",\n\t\"nt.ro\",\n\t\"org.ro\",\n\t\"rec.ro\",\n\t\"store.ro\",\n\t\"tm.ro\",\n\t\"www.ro\",\n\t\"rs\",\n\t\"ac.rs\",\n\t\"co.rs\",\n\t\"edu.rs\",\n\t\"gov.rs\",\n\t\"in.rs\",\n\t\"org.rs\",\n\t\"ru\",\n\t\"ac.ru\",\n\t\"edu.ru\",\n\t\"gov.ru\",\n\t\"int.ru\",\n\t\"mil.ru\",\n\t\"test.ru\",\n\t\"rw\",\n\t\"gov.rw\",\n\t\"net.rw\",\n\t\"edu.rw\",\n\t\"ac.rw\",\n\t\"com.rw\",\n\t\"co.rw\",\n\t\"int.rw\",\n\t\"mil.rw\",\n\t\"gouv.rw\",\n\t\"sa\",\n\t\"com.sa\",\n\t\"net.sa\",\n\t\"org.sa\",\n\t\"gov.sa\",\n\t\"med.sa\",\n\t\"pub.sa\",\n\t\"edu.sa\",\n\t\"sch.sa\",\n\t\"sb\",\n\t\"com.sb\",\n\t\"edu.sb\",\n\t\"gov.sb\",\n\t\"net.sb\",\n\t\"org.sb\",\n\t\"sc\",\n\t\"com.sc\",\n\t\"gov.sc\",\n\t\"net.sc\",\n\t\"org.sc\",\n\t\"edu.sc\",\n\t\"sd\",\n\t\"com.sd\",\n\t\"net.sd\",\n\t\"org.sd\",\n\t\"edu.sd\",\n\t\"med.sd\",\n\t\"tv.sd\",\n\t\"gov.sd\",\n\t\"info.sd\",\n\t\"se\",\n\t\"a.se\",\n\t\"ac.se\",\n\t\"b.se\",\n\t\"bd.se\",\n\t\"brand.se\",\n\t\"c.se\",\n\t\"d.se\",\n\t\"e.se\",\n\t\"f.se\",\n\t\"fh.se\",\n\t\"fhsk.se\",\n\t\"fhv.se\",\n\t\"g.se\",\n\t\"h.se\",\n\t\"i.se\",\n\t\"k.se\",\n\t\"komforb.se\",\n\t\"kommunalforbund.se\",\n\t\"komvux.se\",\n\t\"l.se\",\n\t\"lanbib.se\",\n\t\"m.se\",\n\t\"n.se\",\n\t\"naturbruksgymn.se\",\n\t\"o.se\",\n\t\"org.se\",\n\t\"p.se\",\n\t\"parti.se\",\n\t\"pp.se\",\n\t\"press.se\",\n\t\"r.se\",\n\t\"s.se\",\n\t\"t.se\",\n\t\"tm.se\",\n\t\"u.se\",\n\t\"w.se\",\n\t\"x.se\",\n\t\"y.se\",\n\t\"z.se\",\n\t\"sg\",\n\t\"com.sg\",\n\t\"net.sg\",\n\t\"org.sg\",\n\t\"gov.sg\",\n\t\"edu.sg\",\n\t\"per.sg\",\n\t\"sh\",\n\t\"com.sh\",\n\t\"net.sh\",\n\t\"gov.sh\",\n\t\"org.sh\",\n\t\"mil.sh\",\n\t\"si\",\n\t\"sj\",\n\t\"sk\",\n\t\"sl\",\n\t\"com.sl\",\n\t\"net.sl\",\n\t\"edu.sl\",\n\t\"gov.sl\",\n\t\"org.sl\",\n\t\"sm\",\n\t\"sn\",\n\t\"art.sn\",\n\t\"com.sn\",\n\t\"edu.sn\",\n\t\"gouv.sn\",\n\t\"org.sn\",\n\t\"perso.sn\",\n\t\"univ.sn\",\n\t\"so\",\n\t\"com.so\",\n\t\"net.so\",\n\t\"org.so\",\n\t\"sr\",\n\t\"st\",\n\t\"co.st\",\n\t\"com.st\",\n\t\"consulado.st\",\n\t\"edu.st\",\n\t\"embaixada.st\",\n\t\"gov.st\",\n\t\"mil.st\",\n\t\"net.st\",\n\t\"org.st\",\n\t\"principe.st\",\n\t\"saotome.st\",\n\t\"store.st\",\n\t\"su\",\n\t\"sv\",\n\t\"com.sv\",\n\t\"edu.sv\",\n\t\"gob.sv\",\n\t\"org.sv\",\n\t\"red.sv\",\n\t\"sx\",\n\t\"gov.sx\",\n\t\"sy\",\n\t\"edu.sy\",\n\t\"gov.sy\",\n\t\"net.sy\",\n\t\"mil.sy\",\n\t\"com.sy\",\n\t\"org.sy\",\n\t\"sz\",\n\t\"co.sz\",\n\t\"ac.sz\",\n\t\"org.sz\",\n\t\"tc\",\n\t\"td\",\n\t\"tel\",\n\t\"tf\",\n\t\"tg\",\n\t\"th\",\n\t\"ac.th\",\n\t\"co.th\",\n\t\"go.th\",\n\t\"in.th\",\n\t\"mi.th\",\n\t\"net.th\",\n\t\"or.th\",\n\t\"tj\",\n\t\"ac.tj\",\n\t\"biz.tj\",\n\t\"co.tj\",\n\t\"com.tj\",\n\t\"edu.tj\",\n\t\"go.tj\",\n\t\"gov.tj\",\n\t\"int.tj\",\n\t\"mil.tj\",\n\t\"name.tj\",\n\t\"net.tj\",\n\t\"nic.tj\",\n\t\"org.tj\",\n\t\"test.tj\",\n\t\"web.tj\",\n\t\"tk\",\n\t\"tl\",\n\t\"gov.tl\",\n\t\"tm\",\n\t\"com.tm\",\n\t\"co.tm\",\n\t\"org.tm\",\n\t\"net.tm\",\n\t\"nom.tm\",\n\t\"gov.tm\",\n\t\"mil.tm\",\n\t\"edu.tm\",\n\t\"tn\",\n\t\"com.tn\",\n\t\"ens.tn\",\n\t\"fin.tn\",\n\t\"gov.tn\",\n\t\"ind.tn\",\n\t\"intl.tn\",\n\t\"nat.tn\",\n\t\"net.tn\",\n\t\"org.tn\",\n\t\"info.tn\",\n\t\"perso.tn\",\n\t\"tourism.tn\",\n\t\"edunet.tn\",\n\t\"rnrt.tn\",\n\t\"rns.tn\",\n\t\"rnu.tn\",\n\t\"mincom.tn\",\n\t\"agrinet.tn\",\n\t\"defense.tn\",\n\t\"turen.tn\",\n\t\"to\",\n\t\"com.to\",\n\t\"gov.to\",\n\t\"net.to\",\n\t\"org.to\",\n\t\"edu.to\",\n\t\"mil.to\",\n\t\"tr\",\n\t\"com.tr\",\n\t\"info.tr\",\n\t\"biz.tr\",\n\t\"net.tr\",\n\t\"org.tr\",\n\t\"web.tr\",\n\t\"gen.tr\",\n\t\"tv.tr\",\n\t\"av.tr\",\n\t\"dr.tr\",\n\t\"bbs.tr\",\n\t\"name.tr\",\n\t\"tel.tr\",\n\t\"gov.tr\",\n\t\"bel.tr\",\n\t\"pol.tr\",\n\t\"mil.tr\",\n\t\"k12.tr\",\n\t\"edu.tr\",\n\t\"kep.tr\",\n\t\"nc.tr\",\n\t\"gov.nc.tr\",\n\t\"travel\",\n\t\"tt\",\n\t\"co.tt\",\n\t\"com.tt\",\n\t\"org.tt\",\n\t\"net.tt\",\n\t\"biz.tt\",\n\t\"info.tt\",\n\t\"pro.tt\",\n\t\"int.tt\",\n\t\"coop.tt\",\n\t\"jobs.tt\",\n\t\"mobi.tt\",\n\t\"travel.tt\",\n\t\"museum.tt\",\n\t\"aero.tt\",\n\t\"name.tt\",\n\t\"gov.tt\",\n\t\"edu.tt\",\n\t\"tv\",\n\t\"tw\",\n\t\"edu.tw\",\n\t\"gov.tw\",\n\t\"mil.tw\",\n\t\"com.tw\",\n\t\"net.tw\",\n\t\"org.tw\",\n\t\"idv.tw\",\n\t\"game.tw\",\n\t\"ebiz.tw\",\n\t\"club.tw\",\n\t\"xn--zf0ao64a.tw\",\n\t\"xn--uc0atv.tw\",\n\t\"xn--czrw28b.tw\",\n\t\"tz\",\n\t\"ac.tz\",\n\t\"co.tz\",\n\t\"go.tz\",\n\t\"hotel.tz\",\n\t\"info.tz\",\n\t\"me.tz\",\n\t\"mil.tz\",\n\t\"mobi.tz\",\n\t\"ne.tz\",\n\t\"or.tz\",\n\t\"sc.tz\",\n\t\"tv.tz\",\n\t\"ua\",\n\t\"com.ua\",\n\t\"edu.ua\",\n\t\"gov.ua\",\n\t\"in.ua\",\n\t\"net.ua\",\n\t\"org.ua\",\n\t\"cherkassy.ua\",\n\t\"cherkasy.ua\",\n\t\"chernigov.ua\",\n\t\"chernihiv.ua\",\n\t\"chernivtsi.ua\",\n\t\"chernovtsy.ua\",\n\t\"ck.ua\",\n\t\"cn.ua\",\n\t\"cr.ua\",\n\t\"crimea.ua\",\n\t\"cv.ua\",\n\t\"dn.ua\",\n\t\"dnepropetrovsk.ua\",\n\t\"dnipropetrovsk.ua\",\n\t\"dominic.ua\",\n\t\"donetsk.ua\",\n\t\"dp.ua\",\n\t\"if.ua\",\n\t\"ivano-frankivsk.ua\",\n\t\"kh.ua\",\n\t\"kharkiv.ua\",\n\t\"kharkov.ua\",\n\t\"kherson.ua\",\n\t\"khmelnitskiy.ua\",\n\t\"khmelnytskyi.ua\",\n\t\"kiev.ua\",\n\t\"kirovograd.ua\",\n\t\"km.ua\",\n\t\"kr.ua\",\n\t\"krym.ua\",\n\t\"ks.ua\",\n\t\"kv.ua\",\n\t\"kyiv.ua\",\n\t\"lg.ua\",\n\t\"lt.ua\",\n\t\"lugansk.ua\",\n\t\"lutsk.ua\",\n\t\"lv.ua\",\n\t\"lviv.ua\",\n\t\"mk.ua\",\n\t\"mykolaiv.ua\",\n\t\"nikolaev.ua\",\n\t\"od.ua\",\n\t\"odesa.ua\",\n\t\"odessa.ua\",\n\t\"pl.ua\",\n\t\"poltava.ua\",\n\t\"rivne.ua\",\n\t\"rovno.ua\",\n\t\"rv.ua\",\n\t\"sb.ua\",\n\t\"sebastopol.ua\",\n\t\"sevastopol.ua\",\n\t\"sm.ua\",\n\t\"sumy.ua\",\n\t\"te.ua\",\n\t\"ternopil.ua\",\n\t\"uz.ua\",\n\t\"uzhgorod.ua\",\n\t\"vinnica.ua\",\n\t\"vinnytsia.ua\",\n\t\"vn.ua\",\n\t\"volyn.ua\",\n\t\"yalta.ua\",\n\t\"zaporizhzhe.ua\",\n\t\"zaporizhzhia.ua\",\n\t\"zhitomir.ua\",\n\t\"zhytomyr.ua\",\n\t\"zp.ua\",\n\t\"zt.ua\",\n\t\"ug\",\n\t\"co.ug\",\n\t\"or.ug\",\n\t\"ac.ug\",\n\t\"sc.ug\",\n\t\"go.ug\",\n\t\"ne.ug\",\n\t\"com.ug\",\n\t\"org.ug\",\n\t\"uk\",\n\t\"ac.uk\",\n\t\"co.uk\",\n\t\"gov.uk\",\n\t\"ltd.uk\",\n\t\"me.uk\",\n\t\"net.uk\",\n\t\"nhs.uk\",\n\t\"org.uk\",\n\t\"plc.uk\",\n\t\"police.uk\",\n\t\"*.sch.uk\",\n\t\"us\",\n\t\"dni.us\",\n\t\"fed.us\",\n\t\"isa.us\",\n\t\"kids.us\",\n\t\"nsn.us\",\n\t\"ak.us\",\n\t\"al.us\",\n\t\"ar.us\",\n\t\"as.us\",\n\t\"az.us\",\n\t\"ca.us\",\n\t\"co.us\",\n\t\"ct.us\",\n\t\"dc.us\",\n\t\"de.us\",\n\t\"fl.us\",\n\t\"ga.us\",\n\t\"gu.us\",\n\t\"hi.us\",\n\t\"ia.us\",\n\t\"id.us\",\n\t\"il.us\",\n\t\"in.us\",\n\t\"ks.us\",\n\t\"ky.us\",\n\t\"la.us\",\n\t\"ma.us\",\n\t\"md.us\",\n\t\"me.us\",\n\t\"mi.us\",\n\t\"mn.us\",\n\t\"mo.us\",\n\t\"ms.us\",\n\t\"mt.us\",\n\t\"nc.us\",\n\t\"nd.us\",\n\t\"ne.us\",\n\t\"nh.us\",\n\t\"nj.us\",\n\t\"nm.us\",\n\t\"nv.us\",\n\t\"ny.us\",\n\t\"oh.us\",\n\t\"ok.us\",\n\t\"or.us\",\n\t\"pa.us\",\n\t\"pr.us\",\n\t\"ri.us\",\n\t\"sc.us\",\n\t\"sd.us\",\n\t\"tn.us\",\n\t\"tx.us\",\n\t\"ut.us\",\n\t\"vi.us\",\n\t\"vt.us\",\n\t\"va.us\",\n\t\"wa.us\",\n\t\"wi.us\",\n\t\"wv.us\",\n\t\"wy.us\",\n\t\"k12.ak.us\",\n\t\"k12.al.us\",\n\t\"k12.ar.us\",\n\t\"k12.as.us\",\n\t\"k12.az.us\",\n\t\"k12.ca.us\",\n\t\"k12.co.us\",\n\t\"k12.ct.us\",\n\t\"k12.dc.us\",\n\t\"k12.de.us\",\n\t\"k12.fl.us\",\n\t\"k12.ga.us\",\n\t\"k12.gu.us\",\n\t\"k12.ia.us\",\n\t\"k12.id.us\",\n\t\"k12.il.us\",\n\t\"k12.in.us\",\n\t\"k12.ks.us\",\n\t\"k12.ky.us\",\n\t\"k12.la.us\",\n\t\"k12.ma.us\",\n\t\"k12.md.us\",\n\t\"k12.me.us\",\n\t\"k12.mi.us\",\n\t\"k12.mn.us\",\n\t\"k12.mo.us\",\n\t\"k12.ms.us\",\n\t\"k12.mt.us\",\n\t\"k12.nc.us\",\n\t\"k12.ne.us\",\n\t\"k12.nh.us\",\n\t\"k12.nj.us\",\n\t\"k12.nm.us\",\n\t\"k12.nv.us\",\n\t\"k12.ny.us\",\n\t\"k12.oh.us\",\n\t\"k12.ok.us\",\n\t\"k12.or.us\",\n\t\"k12.pa.us\",\n\t\"k12.pr.us\",\n\t\"k12.ri.us\",\n\t\"k12.sc.us\",\n\t\"k12.tn.us\",\n\t\"k12.tx.us\",\n\t\"k12.ut.us\",\n\t\"k12.vi.us\",\n\t\"k12.vt.us\",\n\t\"k12.va.us\",\n\t\"k12.wa.us\",\n\t\"k12.wi.us\",\n\t\"k12.wy.us\",\n\t\"cc.ak.us\",\n\t\"cc.al.us\",\n\t\"cc.ar.us\",\n\t\"cc.as.us\",\n\t\"cc.az.us\",\n\t\"cc.ca.us\",\n\t\"cc.co.us\",\n\t\"cc.ct.us\",\n\t\"cc.dc.us\",\n\t\"cc.de.us\",\n\t\"cc.fl.us\",\n\t\"cc.ga.us\",\n\t\"cc.gu.us\",\n\t\"cc.hi.us\",\n\t\"cc.ia.us\",\n\t\"cc.id.us\",\n\t\"cc.il.us\",\n\t\"cc.in.us\",\n\t\"cc.ks.us\",\n\t\"cc.ky.us\",\n\t\"cc.la.us\",\n\t\"cc.ma.us\",\n\t\"cc.md.us\",\n\t\"cc.me.us\",\n\t\"cc.mi.us\",\n\t\"cc.mn.us\",\n\t\"cc.mo.us\",\n\t\"cc.ms.us\",\n\t\"cc.mt.us\",\n\t\"cc.nc.us\",\n\t\"cc.nd.us\",\n\t\"cc.ne.us\",\n\t\"cc.nh.us\",\n\t\"cc.nj.us\",\n\t\"cc.nm.us\",\n\t\"cc.nv.us\",\n\t\"cc.ny.us\",\n\t\"cc.oh.us\",\n\t\"cc.ok.us\",\n\t\"cc.or.us\",\n\t\"cc.pa.us\",\n\t\"cc.pr.us\",\n\t\"cc.ri.us\",\n\t\"cc.sc.us\",\n\t\"cc.sd.us\",\n\t\"cc.tn.us\",\n\t\"cc.tx.us\",\n\t\"cc.ut.us\",\n\t\"cc.vi.us\",\n\t\"cc.vt.us\",\n\t\"cc.va.us\",\n\t\"cc.wa.us\",\n\t\"cc.wi.us\",\n\t\"cc.wv.us\",\n\t\"cc.wy.us\",\n\t\"lib.ak.us\",\n\t\"lib.al.us\",\n\t\"lib.ar.us\",\n\t\"lib.as.us\",\n\t\"lib.az.us\",\n\t\"lib.ca.us\",\n\t\"lib.co.us\",\n\t\"lib.ct.us\",\n\t\"lib.dc.us\",\n\t\"lib.fl.us\",\n\t\"lib.ga.us\",\n\t\"lib.gu.us\",\n\t\"lib.hi.us\",\n\t\"lib.ia.us\",\n\t\"lib.id.us\",\n\t\"lib.il.us\",\n\t\"lib.in.us\",\n\t\"lib.ks.us\",\n\t\"lib.ky.us\",\n\t\"lib.la.us\",\n\t\"lib.ma.us\",\n\t\"lib.md.us\",\n\t\"lib.me.us\",\n\t\"lib.mi.us\",\n\t\"lib.mn.us\",\n\t\"lib.mo.us\",\n\t\"lib.ms.us\",\n\t\"lib.mt.us\",\n\t\"lib.nc.us\",\n\t\"lib.nd.us\",\n\t\"lib.ne.us\",\n\t\"lib.nh.us\",\n\t\"lib.nj.us\",\n\t\"lib.nm.us\",\n\t\"lib.nv.us\",\n\t\"lib.ny.us\",\n\t\"lib.oh.us\",\n\t\"lib.ok.us\",\n\t\"lib.or.us\",\n\t\"lib.pa.us\",\n\t\"lib.pr.us\",\n\t\"lib.ri.us\",\n\t\"lib.sc.us\",\n\t\"lib.sd.us\",\n\t\"lib.tn.us\",\n\t\"lib.tx.us\",\n\t\"lib.ut.us\",\n\t\"lib.vi.us\",\n\t\"lib.vt.us\",\n\t\"lib.va.us\",\n\t\"lib.wa.us\",\n\t\"lib.wi.us\",\n\t\"lib.wy.us\",\n\t\"pvt.k12.ma.us\",\n\t\"chtr.k12.ma.us\",\n\t\"paroch.k12.ma.us\",\n\t\"ann-arbor.mi.us\",\n\t\"cog.mi.us\",\n\t\"dst.mi.us\",\n\t\"eaton.mi.us\",\n\t\"gen.mi.us\",\n\t\"mus.mi.us\",\n\t\"tec.mi.us\",\n\t\"washtenaw.mi.us\",\n\t\"uy\",\n\t\"com.uy\",\n\t\"edu.uy\",\n\t\"gub.uy\",\n\t\"mil.uy\",\n\t\"net.uy\",\n\t\"org.uy\",\n\t\"uz\",\n\t\"co.uz\",\n\t\"com.uz\",\n\t\"net.uz\",\n\t\"org.uz\",\n\t\"va\",\n\t\"vc\",\n\t\"com.vc\",\n\t\"net.vc\",\n\t\"org.vc\",\n\t\"gov.vc\",\n\t\"mil.vc\",\n\t\"edu.vc\",\n\t\"ve\",\n\t\"arts.ve\",\n\t\"co.ve\",\n\t\"com.ve\",\n\t\"e12.ve\",\n\t\"edu.ve\",\n\t\"firm.ve\",\n\t\"gob.ve\",\n\t\"gov.ve\",\n\t\"info.ve\",\n\t\"int.ve\",\n\t\"mil.ve\",\n\t\"net.ve\",\n\t\"org.ve\",\n\t\"rec.ve\",\n\t\"store.ve\",\n\t\"tec.ve\",\n\t\"web.ve\",\n\t\"vg\",\n\t\"vi\",\n\t\"co.vi\",\n\t\"com.vi\",\n\t\"k12.vi\",\n\t\"net.vi\",\n\t\"org.vi\",\n\t\"vn\",\n\t\"com.vn\",\n\t\"net.vn\",\n\t\"org.vn\",\n\t\"edu.vn\",\n\t\"gov.vn\",\n\t\"int.vn\",\n\t\"ac.vn\",\n\t\"biz.vn\",\n\t\"info.vn\",\n\t\"name.vn\",\n\t\"pro.vn\",\n\t\"health.vn\",\n\t\"vu\",\n\t\"com.vu\",\n\t\"edu.vu\",\n\t\"net.vu\",\n\t\"org.vu\",\n\t\"wf\",\n\t\"ws\",\n\t\"com.ws\",\n\t\"net.ws\",\n\t\"org.ws\",\n\t\"gov.ws\",\n\t\"edu.ws\",\n\t\"yt\",\n\t\"xn--mgbaam7a8h\",\n\t\"xn--y9a3aq\",\n\t\"xn--54b7fta0cc\",\n\t\"xn--90ae\",\n\t\"xn--90ais\",\n\t\"xn--fiqs8s\",\n\t\"xn--fiqz9s\",\n\t\"xn--lgbbat1ad8j\",\n\t\"xn--wgbh1c\",\n\t\"xn--e1a4c\",\n\t\"xn--node\",\n\t\"xn--qxam\",\n\t\"xn--j6w193g\",\n\t\"xn--2scrj9c\",\n\t\"xn--3hcrj9c\",\n\t\"xn--45br5cyl\",\n\t\"xn--h2breg3eve\",\n\t\"xn--h2brj9c8c\",\n\t\"xn--mgbgu82a\",\n\t\"xn--rvc1e0am3e\",\n\t\"xn--h2brj9c\",\n\t\"xn--mgbbh1a71e\",\n\t\"xn--fpcrj9c3d\",\n\t\"xn--gecrj9c\",\n\t\"xn--s9brj9c\",\n\t\"xn--45brj9c\",\n\t\"xn--xkc2dl3a5ee0h\",\n\t\"xn--mgba3a4f16a\",\n\t\"xn--mgba3a4fra\",\n\t\"xn--mgbtx2b\",\n\t\"xn--mgbayh7gpa\",\n\t\"xn--3e0b707e\",\n\t\"xn--80ao21a\",\n\t\"xn--fzc2c9e2c\",\n\t\"xn--xkc2al3hye2a\",\n\t\"xn--mgbc0a9azcg\",\n\t\"xn--d1alf\",\n\t\"xn--l1acc\",\n\t\"xn--mix891f\",\n\t\"xn--mix082f\",\n\t\"xn--mgbx4cd0ab\",\n\t\"xn--mgb9awbf\",\n\t\"xn--mgbai9azgqp6j\",\n\t\"xn--mgbai9a5eva00b\",\n\t\"xn--ygbi2ammx\",\n\t\"xn--90a3ac\",\n\t\"xn--o1ac.xn--90a3ac\",\n\t\"xn--c1avg.xn--90a3ac\",\n\t\"xn--90azh.xn--90a3ac\",\n\t\"xn--d1at.xn--90a3ac\",\n\t\"xn--o1ach.xn--90a3ac\",\n\t\"xn--80au.xn--90a3ac\",\n\t\"xn--p1ai\",\n\t\"xn--wgbl6a\",\n\t\"xn--mgberp4a5d4ar\",\n\t\"xn--mgberp4a5d4a87g\",\n\t\"xn--mgbqly7c0a67fbc\",\n\t\"xn--mgbqly7cvafr\",\n\t\"xn--mgbpl2fh\",\n\t\"xn--yfro4i67o\",\n\t\"xn--clchc0ea0b2g2a9gcd\",\n\t\"xn--ogbpf8fl\",\n\t\"xn--mgbtf8fl\",\n\t\"xn--o3cw4h\",\n\t\"xn--12c1fe0br.xn--o3cw4h\",\n\t\"xn--12co0c3b4eva.xn--o3cw4h\",\n\t\"xn--h3cuzk1di.xn--o3cw4h\",\n\t\"xn--o3cyx2a.xn--o3cw4h\",\n\t\"xn--m3ch0j3a.xn--o3cw4h\",\n\t\"xn--12cfi8ixb8l.xn--o3cw4h\",\n\t\"xn--pgbs0dh\",\n\t\"xn--kpry57d\",\n\t\"xn--kprw13d\",\n\t\"xn--nnx388a\",\n\t\"xn--j1amh\",\n\t\"xn--mgb2ddes\",\n\t\"xxx\",\n\t\"*.ye\",\n\t\"ac.za\",\n\t\"agric.za\",\n\t\"alt.za\",\n\t\"co.za\",\n\t\"edu.za\",\n\t\"gov.za\",\n\t\"grondar.za\",\n\t\"law.za\",\n\t\"mil.za\",\n\t\"net.za\",\n\t\"ngo.za\",\n\t\"nis.za\",\n\t\"nom.za\",\n\t\"org.za\",\n\t\"school.za\",\n\t\"tm.za\",\n\t\"web.za\",\n\t\"zm\",\n\t\"ac.zm\",\n\t\"biz.zm\",\n\t\"co.zm\",\n\t\"com.zm\",\n\t\"edu.zm\",\n\t\"gov.zm\",\n\t\"info.zm\",\n\t\"mil.zm\",\n\t\"net.zm\",\n\t\"org.zm\",\n\t\"sch.zm\",\n\t\"zw\",\n\t\"ac.zw\",\n\t\"co.zw\",\n\t\"gov.zw\",\n\t\"mil.zw\",\n\t\"org.zw\",\n\t\"aaa\",\n\t\"aarp\",\n\t\"abarth\",\n\t\"abb\",\n\t\"abbott\",\n\t\"abbvie\",\n\t\"abc\",\n\t\"able\",\n\t\"abogado\",\n\t\"abudhabi\",\n\t\"academy\",\n\t\"accenture\",\n\t\"accountant\",\n\t\"accountants\",\n\t\"aco\",\n\t\"active\",\n\t\"actor\",\n\t\"adac\",\n\t\"ads\",\n\t\"adult\",\n\t\"aeg\",\n\t\"aetna\",\n\t\"afamilycompany\",\n\t\"afl\",\n\t\"africa\",\n\t\"agakhan\",\n\t\"agency\",\n\t\"aig\",\n\t\"aigo\",\n\t\"airbus\",\n\t\"airforce\",\n\t\"airtel\",\n\t\"akdn\",\n\t\"alfaromeo\",\n\t\"alibaba\",\n\t\"alipay\",\n\t\"allfinanz\",\n\t\"allstate\",\n\t\"ally\",\n\t\"alsace\",\n\t\"alstom\",\n\t\"americanexpress\",\n\t\"americanfamily\",\n\t\"amex\",\n\t\"amfam\",\n\t\"amica\",\n\t\"amsterdam\",\n\t\"analytics\",\n\t\"android\",\n\t\"anquan\",\n\t\"anz\",\n\t\"aol\",\n\t\"apartments\",\n\t\"app\",\n\t\"apple\",\n\t\"aquarelle\",\n\t\"arab\",\n\t\"aramco\",\n\t\"archi\",\n\t\"army\",\n\t\"art\",\n\t\"arte\",\n\t\"asda\",\n\t\"associates\",\n\t\"athleta\",\n\t\"attorney\",\n\t\"auction\",\n\t\"audi\",\n\t\"audible\",\n\t\"audio\",\n\t\"auspost\",\n\t\"author\",\n\t\"auto\",\n\t\"autos\",\n\t\"avianca\",\n\t\"aws\",\n\t\"axa\",\n\t\"azure\",\n\t\"baby\",\n\t\"baidu\",\n\t\"banamex\",\n\t\"bananarepublic\",\n\t\"band\",\n\t\"bank\",\n\t\"bar\",\n\t\"barcelona\",\n\t\"barclaycard\",\n\t\"barclays\",\n\t\"barefoot\",\n\t\"bargains\",\n\t\"baseball\",\n\t\"basketball\",\n\t\"bauhaus\",\n\t\"bayern\",\n\t\"bbc\",\n\t\"bbt\",\n\t\"bbva\",\n\t\"bcg\",\n\t\"bcn\",\n\t\"beats\",\n\t\"beauty\",\n\t\"beer\",\n\t\"bentley\",\n\t\"berlin\",\n\t\"best\",\n\t\"bestbuy\",\n\t\"bet\",\n\t\"bharti\",\n\t\"bible\",\n\t\"bid\",\n\t\"bike\",\n\t\"bing\",\n\t\"bingo\",\n\t\"bio\",\n\t\"black\",\n\t\"blackfriday\",\n\t\"blanco\",\n\t\"blockbuster\",\n\t\"blog\",\n\t\"bloomberg\",\n\t\"blue\",\n\t\"bms\",\n\t\"bmw\",\n\t\"bnl\",\n\t\"bnpparibas\",\n\t\"boats\",\n\t\"boehringer\",\n\t\"bofa\",\n\t\"bom\",\n\t\"bond\",\n\t\"boo\",\n\t\"book\",\n\t\"booking\",\n\t\"boots\",\n\t\"bosch\",\n\t\"bostik\",\n\t\"boston\",\n\t\"bot\",\n\t\"boutique\",\n\t\"box\",\n\t\"bradesco\",\n\t\"bridgestone\",\n\t\"broadway\",\n\t\"broker\",\n\t\"brother\",\n\t\"brussels\",\n\t\"budapest\",\n\t\"bugatti\",\n\t\"build\",\n\t\"builders\",\n\t\"business\",\n\t\"buy\",\n\t\"buzz\",\n\t\"bzh\",\n\t\"cab\",\n\t\"cafe\",\n\t\"cal\",\n\t\"call\",\n\t\"calvinklein\",\n\t\"cam\",\n\t\"camera\",\n\t\"camp\",\n\t\"cancerresearch\",\n\t\"canon\",\n\t\"capetown\",\n\t\"capital\",\n\t\"capitalone\",\n\t\"car\",\n\t\"caravan\",\n\t\"cards\",\n\t\"care\",\n\t\"career\",\n\t\"careers\",\n\t\"cars\",\n\t\"cartier\",\n\t\"casa\",\n\t\"case\",\n\t\"caseih\",\n\t\"cash\",\n\t\"casino\",\n\t\"catering\",\n\t\"catholic\",\n\t\"cba\",\n\t\"cbn\",\n\t\"cbre\",\n\t\"cbs\",\n\t\"ceb\",\n\t\"center\",\n\t\"ceo\",\n\t\"cern\",\n\t\"cfa\",\n\t\"cfd\",\n\t\"chanel\",\n\t\"channel\",\n\t\"chase\",\n\t\"chat\",\n\t\"cheap\",\n\t\"chintai\",\n\t\"chloe\",\n\t\"christmas\",\n\t\"chrome\",\n\t\"chrysler\",\n\t\"church\",\n\t\"cipriani\",\n\t\"circle\",\n\t\"cisco\",\n\t\"citadel\",\n\t\"citi\",\n\t\"citic\",\n\t\"city\",\n\t\"cityeats\",\n\t\"claims\",\n\t\"cleaning\",\n\t\"click\",\n\t\"clinic\",\n\t\"clinique\",\n\t\"clothing\",\n\t\"cloud\",\n\t\"club\",\n\t\"clubmed\",\n\t\"coach\",\n\t\"codes\",\n\t\"coffee\",\n\t\"college\",\n\t\"cologne\",\n\t\"comcast\",\n\t\"commbank\",\n\t\"community\",\n\t\"company\",\n\t\"compare\",\n\t\"computer\",\n\t\"comsec\",\n\t\"condos\",\n\t\"construction\",\n\t\"consulting\",\n\t\"contact\",\n\t\"contractors\",\n\t\"cooking\",\n\t\"cookingchannel\",\n\t\"cool\",\n\t\"corsica\",\n\t\"country\",\n\t\"coupon\",\n\t\"coupons\",\n\t\"courses\",\n\t\"credit\",\n\t\"creditcard\",\n\t\"creditunion\",\n\t\"cricket\",\n\t\"crown\",\n\t\"crs\",\n\t\"cruise\",\n\t\"cruises\",\n\t\"csc\",\n\t\"cuisinella\",\n\t\"cymru\",\n\t\"cyou\",\n\t\"dabur\",\n\t\"dad\",\n\t\"dance\",\n\t\"data\",\n\t\"date\",\n\t\"dating\",\n\t\"datsun\",\n\t\"day\",\n\t\"dclk\",\n\t\"dds\",\n\t\"deal\",\n\t\"dealer\",\n\t\"deals\",\n\t\"degree\",\n\t\"delivery\",\n\t\"dell\",\n\t\"deloitte\",\n\t\"delta\",\n\t\"democrat\",\n\t\"dental\",\n\t\"dentist\",\n\t\"desi\",\n\t\"design\",\n\t\"dev\",\n\t\"dhl\",\n\t\"diamonds\",\n\t\"diet\",\n\t\"digital\",\n\t\"direct\",\n\t\"directory\",\n\t\"discount\",\n\t\"discover\",\n\t\"dish\",\n\t\"diy\",\n\t\"dnp\",\n\t\"docs\",\n\t\"doctor\",\n\t\"dodge\",\n\t\"dog\",\n\t\"doha\",\n\t\"domains\",\n\t\"dot\",\n\t\"download\",\n\t\"drive\",\n\t\"dtv\",\n\t\"dubai\",\n\t\"duck\",\n\t\"dunlop\",\n\t\"duns\",\n\t\"dupont\",\n\t\"durban\",\n\t\"dvag\",\n\t\"dvr\",\n\t\"earth\",\n\t\"eat\",\n\t\"eco\",\n\t\"edeka\",\n\t\"education\",\n\t\"email\",\n\t\"emerck\",\n\t\"energy\",\n\t\"engineer\",\n\t\"engineering\",\n\t\"enterprises\",\n\t\"epost\",\n\t\"epson\",\n\t\"equipment\",\n\t\"ericsson\",\n\t\"erni\",\n\t\"esq\",\n\t\"estate\",\n\t\"esurance\",\n\t\"etisalat\",\n\t\"eurovision\",\n\t\"eus\",\n\t\"events\",\n\t\"everbank\",\n\t\"exchange\",\n\t\"expert\",\n\t\"exposed\",\n\t\"express\",\n\t\"extraspace\",\n\t\"fage\",\n\t\"fail\",\n\t\"fairwinds\",\n\t\"faith\",\n\t\"family\",\n\t\"fan\",\n\t\"fans\",\n\t\"farm\",\n\t\"farmers\",\n\t\"fashion\",\n\t\"fast\",\n\t\"fedex\",\n\t\"feedback\",\n\t\"ferrari\",\n\t\"ferrero\",\n\t\"fiat\",\n\t\"fidelity\",\n\t\"fido\",\n\t\"film\",\n\t\"final\",\n\t\"finance\",\n\t\"financial\",\n\t\"fire\",\n\t\"firestone\",\n\t\"firmdale\",\n\t\"fish\",\n\t\"fishing\",\n\t\"fit\",\n\t\"fitness\",\n\t\"flickr\",\n\t\"flights\",\n\t\"flir\",\n\t\"florist\",\n\t\"flowers\",\n\t\"fly\",\n\t\"foo\",\n\t\"food\",\n\t\"foodnetwork\",\n\t\"football\",\n\t\"ford\",\n\t\"forex\",\n\t\"forsale\",\n\t\"forum\",\n\t\"foundation\",\n\t\"fox\",\n\t\"free\",\n\t\"fresenius\",\n\t\"frl\",\n\t\"frogans\",\n\t\"frontdoor\",\n\t\"frontier\",\n\t\"ftr\",\n\t\"fujitsu\",\n\t\"fujixerox\",\n\t\"fun\",\n\t\"fund\",\n\t\"furniture\",\n\t\"futbol\",\n\t\"fyi\",\n\t\"gal\",\n\t\"gallery\",\n\t\"gallo\",\n\t\"gallup\",\n\t\"game\",\n\t\"games\",\n\t\"gap\",\n\t\"garden\",\n\t\"gbiz\",\n\t\"gdn\",\n\t\"gea\",\n\t\"gent\",\n\t\"genting\",\n\t\"george\",\n\t\"ggee\",\n\t\"gift\",\n\t\"gifts\",\n\t\"gives\",\n\t\"giving\",\n\t\"glade\",\n\t\"glass\",\n\t\"gle\",\n\t\"global\",\n\t\"globo\",\n\t\"gmail\",\n\t\"gmbh\",\n\t\"gmo\",\n\t\"gmx\",\n\t\"godaddy\",\n\t\"gold\",\n\t\"goldpoint\",\n\t\"golf\",\n\t\"goo\",\n\t\"goodhands\",\n\t\"goodyear\",\n\t\"goog\",\n\t\"google\",\n\t\"gop\",\n\t\"got\",\n\t\"grainger\",\n\t\"graphics\",\n\t\"gratis\",\n\t\"green\",\n\t\"gripe\",\n\t\"grocery\",\n\t\"group\",\n\t\"guardian\",\n\t\"gucci\",\n\t\"guge\",\n\t\"guide\",\n\t\"guitars\",\n\t\"guru\",\n\t\"hair\",\n\t\"hamburg\",\n\t\"hangout\",\n\t\"haus\",\n\t\"hbo\",\n\t\"hdfc\",\n\t\"hdfcbank\",\n\t\"health\",\n\t\"healthcare\",\n\t\"help\",\n\t\"helsinki\",\n\t\"here\",\n\t\"hermes\",\n\t\"hgtv\",\n\t\"hiphop\",\n\t\"hisamitsu\",\n\t\"hitachi\",\n\t\"hiv\",\n\t\"hkt\",\n\t\"hockey\",\n\t\"holdings\",\n\t\"holiday\",\n\t\"homedepot\",\n\t\"homegoods\",\n\t\"homes\",\n\t\"homesense\",\n\t\"honda\",\n\t\"honeywell\",\n\t\"horse\",\n\t\"hospital\",\n\t\"host\",\n\t\"hosting\",\n\t\"hot\",\n\t\"hoteles\",\n\t\"hotels\",\n\t\"hotmail\",\n\t\"house\",\n\t\"how\",\n\t\"hsbc\",\n\t\"htc\",\n\t\"hughes\",\n\t\"hyatt\",\n\t\"hyundai\",\n\t\"ibm\",\n\t\"icbc\",\n\t\"ice\",\n\t\"icu\",\n\t\"ieee\",\n\t\"ifm\",\n\t\"ikano\",\n\t\"imamat\",\n\t\"imdb\",\n\t\"immo\",\n\t\"immobilien\",\n\t\"industries\",\n\t\"infiniti\",\n\t\"ing\",\n\t\"ink\",\n\t\"institute\",\n\t\"insurance\",\n\t\"insure\",\n\t\"intel\",\n\t\"international\",\n\t\"intuit\",\n\t\"investments\",\n\t\"ipiranga\",\n\t\"irish\",\n\t\"iselect\",\n\t\"ismaili\",\n\t\"ist\",\n\t\"istanbul\",\n\t\"itau\",\n\t\"itv\",\n\t\"iveco\",\n\t\"iwc\",\n\t\"jaguar\",\n\t\"java\",\n\t\"jcb\",\n\t\"jcp\",\n\t\"jeep\",\n\t\"jetzt\",\n\t\"jewelry\",\n\t\"jio\",\n\t\"jlc\",\n\t\"jll\",\n\t\"jmp\",\n\t\"jnj\",\n\t\"joburg\",\n\t\"jot\",\n\t\"joy\",\n\t\"jpmorgan\",\n\t\"jprs\",\n\t\"juegos\",\n\t\"juniper\",\n\t\"kaufen\",\n\t\"kddi\",\n\t\"kerryhotels\",\n\t\"kerrylogistics\",\n\t\"kerryproperties\",\n\t\"kfh\",\n\t\"kia\",\n\t\"kim\",\n\t\"kinder\",\n\t\"kindle\",\n\t\"kitchen\",\n\t\"kiwi\",\n\t\"koeln\",\n\t\"komatsu\",\n\t\"kosher\",\n\t\"kpmg\",\n\t\"kpn\",\n\t\"krd\",\n\t\"kred\",\n\t\"kuokgroup\",\n\t\"kyoto\",\n\t\"lacaixa\",\n\t\"ladbrokes\",\n\t\"lamborghini\",\n\t\"lamer\",\n\t\"lancaster\",\n\t\"lancia\",\n\t\"lancome\",\n\t\"land\",\n\t\"landrover\",\n\t\"lanxess\",\n\t\"lasalle\",\n\t\"lat\",\n\t\"latino\",\n\t\"latrobe\",\n\t\"law\",\n\t\"lawyer\",\n\t\"lds\",\n\t\"lease\",\n\t\"leclerc\",\n\t\"lefrak\",\n\t\"legal\",\n\t\"lego\",\n\t\"lexus\",\n\t\"lgbt\",\n\t\"liaison\",\n\t\"lidl\",\n\t\"life\",\n\t\"lifeinsurance\",\n\t\"lifestyle\",\n\t\"lighting\",\n\t\"like\",\n\t\"lilly\",\n\t\"limited\",\n\t\"limo\",\n\t\"lincoln\",\n\t\"linde\",\n\t\"link\",\n\t\"lipsy\",\n\t\"live\",\n\t\"living\",\n\t\"lixil\",\n\t\"loan\",\n\t\"loans\",\n\t\"locker\",\n\t\"locus\",\n\t\"loft\",\n\t\"lol\",\n\t\"london\",\n\t\"lotte\",\n\t\"lotto\",\n\t\"love\",\n\t\"lpl\",\n\t\"lplfinancial\",\n\t\"ltd\",\n\t\"ltda\",\n\t\"lundbeck\",\n\t\"lupin\",\n\t\"luxe\",\n\t\"luxury\",\n\t\"macys\",\n\t\"madrid\",\n\t\"maif\",\n\t\"maison\",\n\t\"makeup\",\n\t\"man\",\n\t\"management\",\n\t\"mango\",\n\t\"map\",\n\t\"market\",\n\t\"marketing\",\n\t\"markets\",\n\t\"marriott\",\n\t\"marshalls\",\n\t\"maserati\",\n\t\"mattel\",\n\t\"mba\",\n\t\"mcd\",\n\t\"mcdonalds\",\n\t\"mckinsey\",\n\t\"med\",\n\t\"media\",\n\t\"meet\",\n\t\"melbourne\",\n\t\"meme\",\n\t\"memorial\",\n\t\"men\",\n\t\"menu\",\n\t\"meo\",\n\t\"merckmsd\",\n\t\"metlife\",\n\t\"miami\",\n\t\"microsoft\",\n\t\"mini\",\n\t\"mint\",\n\t\"mit\",\n\t\"mitsubishi\",\n\t\"mlb\",\n\t\"mls\",\n\t\"mma\",\n\t\"mobile\",\n\t\"mobily\",\n\t\"moda\",\n\t\"moe\",\n\t\"moi\",\n\t\"mom\",\n\t\"monash\",\n\t\"money\",\n\t\"monster\",\n\t\"montblanc\",\n\t\"mopar\",\n\t\"mormon\",\n\t\"mortgage\",\n\t\"moscow\",\n\t\"moto\",\n\t\"motorcycles\",\n\t\"mov\",\n\t\"movie\",\n\t\"movistar\",\n\t\"msd\",\n\t\"mtn\",\n\t\"mtpc\",\n\t\"mtr\",\n\t\"mutual\",\n\t\"nab\",\n\t\"nadex\",\n\t\"nagoya\",\n\t\"nationwide\",\n\t\"natura\",\n\t\"navy\",\n\t\"nba\",\n\t\"nec\",\n\t\"netbank\",\n\t\"netflix\",\n\t\"network\",\n\t\"neustar\",\n\t\"new\",\n\t\"newholland\",\n\t\"news\",\n\t\"next\",\n\t\"nextdirect\",\n\t\"nexus\",\n\t\"nfl\",\n\t\"ngo\",\n\t\"nhk\",\n\t\"nico\",\n\t\"nike\",\n\t\"nikon\",\n\t\"ninja\",\n\t\"nissan\",\n\t\"nissay\",\n\t\"nokia\",\n\t\"northwesternmutual\",\n\t\"norton\",\n\t\"now\",\n\t\"nowruz\",\n\t\"nowtv\",\n\t\"nra\",\n\t\"nrw\",\n\t\"ntt\",\n\t\"nyc\",\n\t\"obi\",\n\t\"observer\",\n\t\"off\",\n\t\"office\",\n\t\"okinawa\",\n\t\"olayan\",\n\t\"olayangroup\",\n\t\"oldnavy\",\n\t\"ollo\",\n\t\"omega\",\n\t\"one\",\n\t\"ong\",\n\t\"onl\",\n\t\"online\",\n\t\"onyourside\",\n\t\"ooo\",\n\t\"open\",\n\t\"oracle\",\n\t\"orange\",\n\t\"organic\",\n\t\"origins\",\n\t\"osaka\",\n\t\"otsuka\",\n\t\"ott\",\n\t\"ovh\",\n\t\"page\",\n\t\"pamperedchef\",\n\t\"panasonic\",\n\t\"panerai\",\n\t\"paris\",\n\t\"pars\",\n\t\"partners\",\n\t\"parts\",\n\t\"party\",\n\t\"passagens\",\n\t\"pay\",\n\t\"pccw\",\n\t\"pet\",\n\t\"pfizer\",\n\t\"pharmacy\",\n\t\"phd\",\n\t\"philips\",\n\t\"phone\",\n\t\"photo\",\n\t\"photography\",\n\t\"photos\",\n\t\"physio\",\n\t\"piaget\",\n\t\"pics\",\n\t\"pictet\",\n\t\"pictures\",\n\t\"pid\",\n\t\"pin\",\n\t\"ping\",\n\t\"pink\",\n\t\"pioneer\",\n\t\"pizza\",\n\t\"place\",\n\t\"play\",\n\t\"playstation\",\n\t\"plumbing\",\n\t\"plus\",\n\t\"pnc\",\n\t\"pohl\",\n\t\"poker\",\n\t\"politie\",\n\t\"porn\",\n\t\"pramerica\",\n\t\"praxi\",\n\t\"press\",\n\t\"prime\",\n\t\"prod\",\n\t\"productions\",\n\t\"prof\",\n\t\"progressive\",\n\t\"promo\",\n\t\"properties\",\n\t\"property\",\n\t\"protection\",\n\t\"pru\",\n\t\"prudential\",\n\t\"pub\",\n\t\"pwc\",\n\t\"qpon\",\n\t\"quebec\",\n\t\"quest\",\n\t\"qvc\",\n\t\"racing\",\n\t\"radio\",\n\t\"raid\",\n\t\"read\",\n\t\"realestate\",\n\t\"realtor\",\n\t\"realty\",\n\t\"recipes\",\n\t\"red\",\n\t\"redstone\",\n\t\"redumbrella\",\n\t\"rehab\",\n\t\"reise\",\n\t\"reisen\",\n\t\"reit\",\n\t\"reliance\",\n\t\"ren\",\n\t\"rent\",\n\t\"rentals\",\n\t\"repair\",\n\t\"report\",\n\t\"republican\",\n\t\"rest\",\n\t\"restaurant\",\n\t\"review\",\n\t\"reviews\",\n\t\"rexroth\",\n\t\"rich\",\n\t\"richardli\",\n\t\"ricoh\",\n\t\"rightathome\",\n\t\"ril\",\n\t\"rio\",\n\t\"rip\",\n\t\"rmit\",\n\t\"rocher\",\n\t\"rocks\",\n\t\"rodeo\",\n\t\"rogers\",\n\t\"room\",\n\t\"rsvp\",\n\t\"rugby\",\n\t\"ruhr\",\n\t\"run\",\n\t\"rwe\",\n\t\"ryukyu\",\n\t\"saarland\",\n\t\"safe\",\n\t\"safety\",\n\t\"sakura\",\n\t\"sale\",\n\t\"salon\",\n\t\"samsclub\",\n\t\"samsung\",\n\t\"sandvik\",\n\t\"sandvikcoromant\",\n\t\"sanofi\",\n\t\"sap\",\n\t\"sapo\",\n\t\"sarl\",\n\t\"sas\",\n\t\"save\",\n\t\"saxo\",\n\t\"sbi\",\n\t\"sbs\",\n\t\"sca\",\n\t\"scb\",\n\t\"schaeffler\",\n\t\"schmidt\",\n\t\"scholarships\",\n\t\"school\",\n\t\"schule\",\n\t\"schwarz\",\n\t\"science\",\n\t\"scjohnson\",\n\t\"scor\",\n\t\"scot\",\n\t\"search\",\n\t\"seat\",\n\t\"secure\",\n\t\"security\",\n\t\"seek\",\n\t\"select\",\n\t\"sener\",\n\t\"services\",\n\t\"ses\",\n\t\"seven\",\n\t\"sew\",\n\t\"sex\",\n\t\"sexy\",\n\t\"sfr\",\n\t\"shangrila\",\n\t\"sharp\",\n\t\"shaw\",\n\t\"shell\",\n\t\"shia\",\n\t\"shiksha\",\n\t\"shoes\",\n\t\"shop\",\n\t\"shopping\",\n\t\"shouji\",\n\t\"show\",\n\t\"showtime\",\n\t\"shriram\",\n\t\"silk\",\n\t\"sina\",\n\t\"singles\",\n\t\"site\",\n\t\"ski\",\n\t\"skin\",\n\t\"sky\",\n\t\"skype\",\n\t\"sling\",\n\t\"smart\",\n\t\"smile\",\n\t\"sncf\",\n\t\"soccer\",\n\t\"social\",\n\t\"softbank\",\n\t\"software\",\n\t\"sohu\",\n\t\"solar\",\n\t\"solutions\",\n\t\"song\",\n\t\"sony\",\n\t\"soy\",\n\t\"space\",\n\t\"spiegel\",\n\t\"spot\",\n\t\"spreadbetting\",\n\t\"srl\",\n\t\"srt\",\n\t\"stada\",\n\t\"staples\",\n\t\"star\",\n\t\"starhub\",\n\t\"statebank\",\n\t\"statefarm\",\n\t\"statoil\",\n\t\"stc\",\n\t\"stcgroup\",\n\t\"stockholm\",\n\t\"storage\",\n\t\"store\",\n\t\"stream\",\n\t\"studio\",\n\t\"study\",\n\t\"style\",\n\t\"sucks\",\n\t\"supplies\",\n\t\"supply\",\n\t\"support\",\n\t\"surf\",\n\t\"surgery\",\n\t\"suzuki\",\n\t\"swatch\",\n\t\"swiftcover\",\n\t\"swiss\",\n\t\"sydney\",\n\t\"symantec\",\n\t\"systems\",\n\t\"tab\",\n\t\"taipei\",\n\t\"talk\",\n\t\"taobao\",\n\t\"target\",\n\t\"tatamotors\",\n\t\"tatar\",\n\t\"tattoo\",\n\t\"tax\",\n\t\"taxi\",\n\t\"tci\",\n\t\"tdk\",\n\t\"team\",\n\t\"tech\",\n\t\"technology\",\n\t\"telecity\",\n\t\"telefonica\",\n\t\"temasek\",\n\t\"tennis\",\n\t\"teva\",\n\t\"thd\",\n\t\"theater\",\n\t\"theatre\",\n\t\"tiaa\",\n\t\"tickets\",\n\t\"tienda\",\n\t\"tiffany\",\n\t\"tips\",\n\t\"tires\",\n\t\"tirol\",\n\t\"tjmaxx\",\n\t\"tjx\",\n\t\"tkmaxx\",\n\t\"tmall\",\n\t\"today\",\n\t\"tokyo\",\n\t\"tools\",\n\t\"top\",\n\t\"toray\",\n\t\"toshiba\",\n\t\"total\",\n\t\"tours\",\n\t\"town\",\n\t\"toyota\",\n\t\"toys\",\n\t\"trade\",\n\t\"trading\",\n\t\"training\",\n\t\"travelchannel\",\n\t\"travelers\",\n\t\"travelersinsurance\",\n\t\"trust\",\n\t\"trv\",\n\t\"tube\",\n\t\"tui\",\n\t\"tunes\",\n\t\"tushu\",\n\t\"tvs\",\n\t\"ubank\",\n\t\"ubs\",\n\t\"uconnect\",\n\t\"unicom\",\n\t\"university\",\n\t\"uno\",\n\t\"uol\",\n\t\"ups\",\n\t\"vacations\",\n\t\"vana\",\n\t\"vanguard\",\n\t\"vegas\",\n\t\"ventures\",\n\t\"verisign\",\n\t\"versicherung\",\n\t\"vet\",\n\t\"viajes\",\n\t\"video\",\n\t\"vig\",\n\t\"viking\",\n\t\"villas\",\n\t\"vin\",\n\t\"vip\",\n\t\"virgin\",\n\t\"visa\",\n\t\"vision\",\n\t\"vista\",\n\t\"vistaprint\",\n\t\"viva\",\n\t\"vivo\",\n\t\"vlaanderen\",\n\t\"vodka\",\n\t\"volkswagen\",\n\t\"volvo\",\n\t\"vote\",\n\t\"voting\",\n\t\"voto\",\n\t\"voyage\",\n\t\"vuelos\",\n\t\"wales\",\n\t\"walmart\",\n\t\"walter\",\n\t\"wang\",\n\t\"wanggou\",\n\t\"warman\",\n\t\"watch\",\n\t\"watches\",\n\t\"weather\",\n\t\"weatherchannel\",\n\t\"webcam\",\n\t\"weber\",\n\t\"website\",\n\t\"wed\",\n\t\"wedding\",\n\t\"weibo\",\n\t\"weir\",\n\t\"whoswho\",\n\t\"wien\",\n\t\"wiki\",\n\t\"williamhill\",\n\t\"win\",\n\t\"windows\",\n\t\"wine\",\n\t\"winners\",\n\t\"wme\",\n\t\"wolterskluwer\",\n\t\"woodside\",\n\t\"work\",\n\t\"works\",\n\t\"world\",\n\t\"wow\",\n\t\"wtc\",\n\t\"wtf\",\n\t\"xbox\",\n\t\"xerox\",\n\t\"xfinity\",\n\t\"xihuan\",\n\t\"xin\",\n\t\"xn--11b4c3d\",\n\t\"xn--1ck2e1b\",\n\t\"xn--1qqw23a\",\n\t\"xn--30rr7y\",\n\t\"xn--3bst00m\",\n\t\"xn--3ds443g\",\n\t\"xn--3oq18vl8pn36a\",\n\t\"xn--3pxu8k\",\n\t\"xn--42c2d9a\",\n\t\"xn--45q11c\",\n\t\"xn--4gbrim\",\n\t\"xn--55qw42g\",\n\t\"xn--55qx5d\",\n\t\"xn--5su34j936bgsg\",\n\t\"xn--5tzm5g\",\n\t\"xn--6frz82g\",\n\t\"xn--6qq986b3xl\",\n\t\"xn--80adxhks\",\n\t\"xn--80aqecdr1a\",\n\t\"xn--80asehdb\",\n\t\"xn--80aswg\",\n\t\"xn--8y0a063a\",\n\t\"xn--9dbq2a\",\n\t\"xn--9et52u\",\n\t\"xn--9krt00a\",\n\t\"xn--b4w605ferd\",\n\t\"xn--bck1b9a5dre4c\",\n\t\"xn--c1avg\",\n\t\"xn--c2br7g\",\n\t\"xn--cck2b3b\",\n\t\"xn--cg4bki\",\n\t\"xn--czr694b\",\n\t\"xn--czrs0t\",\n\t\"xn--czru2d\",\n\t\"xn--d1acj3b\",\n\t\"xn--eckvdtc9d\",\n\t\"xn--efvy88h\",\n\t\"xn--estv75g\",\n\t\"xn--fct429k\",\n\t\"xn--fhbei\",\n\t\"xn--fiq228c5hs\",\n\t\"xn--fiq64b\",\n\t\"xn--fjq720a\",\n\t\"xn--flw351e\",\n\t\"xn--fzys8d69uvgm\",\n\t\"xn--g2xx48c\",\n\t\"xn--gckr3f0f\",\n\t\"xn--gk3at1e\",\n\t\"xn--hxt814e\",\n\t\"xn--i1b6b1a6a2e\",\n\t\"xn--imr513n\",\n\t\"xn--io0a7i\",\n\t\"xn--j1aef\",\n\t\"xn--jlq61u9w7b\",\n\t\"xn--jvr189m\",\n\t\"xn--kcrx77d1x4a\",\n\t\"xn--kpu716f\",\n\t\"xn--kput3i\",\n\t\"xn--mgba3a3ejt\",\n\t\"xn--mgba7c0bbn0a\",\n\t\"xn--mgbaakc7dvf\",\n\t\"xn--mgbab2bd\",\n\t\"xn--mgbb9fbpob\",\n\t\"xn--mgbca7dzdo\",\n\t\"xn--mgbi4ecexp\",\n\t\"xn--mgbt3dhd\",\n\t\"xn--mk1bu44c\",\n\t\"xn--mxtq1m\",\n\t\"xn--ngbc5azd\",\n\t\"xn--ngbe9e0a\",\n\t\"xn--ngbrx\",\n\t\"xn--nqv7f\",\n\t\"xn--nqv7fs00ema\",\n\t\"xn--nyqy26a\",\n\t\"xn--p1acf\",\n\t\"xn--pbt977c\",\n\t\"xn--pssy2u\",\n\t\"xn--q9jyb4c\",\n\t\"xn--qcka1pmc\",\n\t\"xn--rhqv96g\",\n\t\"xn--rovu88b\",\n\t\"xn--ses554g\",\n\t\"xn--t60b56a\",\n\t\"xn--tckwe\",\n\t\"xn--tiq49xqyj\",\n\t\"xn--unup4y\",\n\t\"xn--vermgensberater-ctb\",\n\t\"xn--vermgensberatung-pwb\",\n\t\"xn--vhquv\",\n\t\"xn--vuq861b\",\n\t\"xn--w4r85el8fhu5dnra\",\n\t\"xn--w4rs40l\",\n\t\"xn--xhq521b\",\n\t\"xn--zfr164b\",\n\t\"xperia\",\n\t\"xyz\",\n\t\"yachts\",\n\t\"yahoo\",\n\t\"yamaxun\",\n\t\"yandex\",\n\t\"yodobashi\",\n\t\"yoga\",\n\t\"yokohama\",\n\t\"you\",\n\t\"youtube\",\n\t\"yun\",\n\t\"zappos\",\n\t\"zara\",\n\t\"zero\",\n\t\"zip\",\n\t\"zippo\",\n\t\"zone\",\n\t\"zuerich\",\n\t\"cc.ua\",\n\t\"inf.ua\",\n\t\"ltd.ua\",\n\t\"beep.pl\",\n\t\"*.compute.estate\",\n\t\"*.alces.network\",\n\t\"*.alwaysdata.net\",\n\t\"cloudfront.net\",\n\t\"*.compute.amazonaws.com\",\n\t\"*.compute-1.amazonaws.com\",\n\t\"*.compute.amazonaws.com.cn\",\n\t\"us-east-1.amazonaws.com\",\n\t\"cn-north-1.eb.amazonaws.com.cn\",\n\t\"elasticbeanstalk.com\",\n\t\"ap-northeast-1.elasticbeanstalk.com\",\n\t\"ap-northeast-2.elasticbeanstalk.com\",\n\t\"ap-south-1.elasticbeanstalk.com\",\n\t\"ap-southeast-1.elasticbeanstalk.com\",\n\t\"ap-southeast-2.elasticbeanstalk.com\",\n\t\"ca-central-1.elasticbeanstalk.com\",\n\t\"eu-central-1.elasticbeanstalk.com\",\n\t\"eu-west-1.elasticbeanstalk.com\",\n\t\"eu-west-2.elasticbeanstalk.com\",\n\t\"sa-east-1.elasticbeanstalk.com\",\n\t\"us-east-1.elasticbeanstalk.com\",\n\t\"us-east-2.elasticbeanstalk.com\",\n\t\"us-gov-west-1.elasticbeanstalk.com\",\n\t\"us-west-1.elasticbeanstalk.com\",\n\t\"us-west-2.elasticbeanstalk.com\",\n\t\"*.elb.amazonaws.com\",\n\t\"*.elb.amazonaws.com.cn\",\n\t\"s3.amazonaws.com\",\n\t\"s3-ap-northeast-1.amazonaws.com\",\n\t\"s3-ap-northeast-2.amazonaws.com\",\n\t\"s3-ap-south-1.amazonaws.com\",\n\t\"s3-ap-southeast-1.amazonaws.com\",\n\t\"s3-ap-southeast-2.amazonaws.com\",\n\t\"s3-ca-central-1.amazonaws.com\",\n\t\"s3-eu-central-1.amazonaws.com\",\n\t\"s3-eu-west-1.amazonaws.com\",\n\t\"s3-eu-west-2.amazonaws.com\",\n\t\"s3-external-1.amazonaws.com\",\n\t\"s3-fips-us-gov-west-1.amazonaws.com\",\n\t\"s3-sa-east-1.amazonaws.com\",\n\t\"s3-us-gov-west-1.amazonaws.com\",\n\t\"s3-us-east-2.amazonaws.com\",\n\t\"s3-us-west-1.amazonaws.com\",\n\t\"s3-us-west-2.amazonaws.com\",\n\t\"s3.ap-northeast-2.amazonaws.com\",\n\t\"s3.ap-south-1.amazonaws.com\",\n\t\"s3.cn-north-1.amazonaws.com.cn\",\n\t\"s3.ca-central-1.amazonaws.com\",\n\t\"s3.eu-central-1.amazonaws.com\",\n\t\"s3.eu-west-2.amazonaws.com\",\n\t\"s3.us-east-2.amazonaws.com\",\n\t\"s3.dualstack.ap-northeast-1.amazonaws.com\",\n\t\"s3.dualstack.ap-northeast-2.amazonaws.com\",\n\t\"s3.dualstack.ap-south-1.amazonaws.com\",\n\t\"s3.dualstack.ap-southeast-1.amazonaws.com\",\n\t\"s3.dualstack.ap-southeast-2.amazonaws.com\",\n\t\"s3.dualstack.ca-central-1.amazonaws.com\",\n\t\"s3.dualstack.eu-central-1.amazonaws.com\",\n\t\"s3.dualstack.eu-west-1.amazonaws.com\",\n\t\"s3.dualstack.eu-west-2.amazonaws.com\",\n\t\"s3.dualstack.sa-east-1.amazonaws.com\",\n\t\"s3.dualstack.us-east-1.amazonaws.com\",\n\t\"s3.dualstack.us-east-2.amazonaws.com\",\n\t\"s3-website-us-east-1.amazonaws.com\",\n\t\"s3-website-us-west-1.amazonaws.com\",\n\t\"s3-website-us-west-2.amazonaws.com\",\n\t\"s3-website-ap-northeast-1.amazonaws.com\",\n\t\"s3-website-ap-southeast-1.amazonaws.com\",\n\t\"s3-website-ap-southeast-2.amazonaws.com\",\n\t\"s3-website-eu-west-1.amazonaws.com\",\n\t\"s3-website-sa-east-1.amazonaws.com\",\n\t\"s3-website.ap-northeast-2.amazonaws.com\",\n\t\"s3-website.ap-south-1.amazonaws.com\",\n\t\"s3-website.ca-central-1.amazonaws.com\",\n\t\"s3-website.eu-central-1.amazonaws.com\",\n\t\"s3-website.eu-west-2.amazonaws.com\",\n\t\"s3-website.us-east-2.amazonaws.com\",\n\t\"t3l3p0rt.net\",\n\t\"tele.amune.org\",\n\t\"on-aptible.com\",\n\t\"user.party.eus\",\n\t\"pimienta.org\",\n\t\"poivron.org\",\n\t\"potager.org\",\n\t\"sweetpepper.org\",\n\t\"myasustor.com\",\n\t\"myfritz.net\",\n\t\"*.awdev.ca\",\n\t\"*.advisor.ws\",\n\t\"backplaneapp.io\",\n\t\"betainabox.com\",\n\t\"bnr.la\",\n\t\"boomla.net\",\n\t\"boxfuse.io\",\n\t\"square7.ch\",\n\t\"bplaced.com\",\n\t\"bplaced.de\",\n\t\"square7.de\",\n\t\"bplaced.net\",\n\t\"square7.net\",\n\t\"browsersafetymark.io\",\n\t\"mycd.eu\",\n\t\"ae.org\",\n\t\"ar.com\",\n\t\"br.com\",\n\t\"cn.com\",\n\t\"com.de\",\n\t\"com.se\",\n\t\"de.com\",\n\t\"eu.com\",\n\t\"gb.com\",\n\t\"gb.net\",\n\t\"hu.com\",\n\t\"hu.net\",\n\t\"jp.net\",\n\t\"jpn.com\",\n\t\"kr.com\",\n\t\"mex.com\",\n\t\"no.com\",\n\t\"qc.com\",\n\t\"ru.com\",\n\t\"sa.com\",\n\t\"se.com\",\n\t\"se.net\",\n\t\"uk.com\",\n\t\"uk.net\",\n\t\"us.com\",\n\t\"uy.com\",\n\t\"za.bz\",\n\t\"za.com\",\n\t\"africa.com\",\n\t\"gr.com\",\n\t\"in.net\",\n\t\"us.org\",\n\t\"co.com\",\n\t\"c.la\",\n\t\"certmgr.org\",\n\t\"xenapponazure.com\",\n\t\"virtueeldomein.nl\",\n\t\"c66.me\",\n\t\"jdevcloud.com\",\n\t\"wpdevcloud.com\",\n\t\"cloudaccess.host\",\n\t\"freesite.host\",\n\t\"cloudaccess.net\",\n\t\"cloudcontrolled.com\",\n\t\"cloudcontrolapp.com\",\n\t\"co.ca\",\n\t\"co.cz\",\n\t\"c.cdn77.org\",\n\t\"cdn77-ssl.net\",\n\t\"r.cdn77.net\",\n\t\"rsc.cdn77.org\",\n\t\"ssl.origin.cdn77-secure.org\",\n\t\"cloudns.asia\",\n\t\"cloudns.biz\",\n\t\"cloudns.club\",\n\t\"cloudns.cc\",\n\t\"cloudns.eu\",\n\t\"cloudns.in\",\n\t\"cloudns.info\",\n\t\"cloudns.org\",\n\t\"cloudns.pro\",\n\t\"cloudns.pw\",\n\t\"cloudns.us\",\n\t\"co.nl\",\n\t\"co.no\",\n\t\"dyn.cosidns.de\",\n\t\"dynamisches-dns.de\",\n\t\"dnsupdater.de\",\n\t\"internet-dns.de\",\n\t\"l-o-g-i-n.de\",\n\t\"dynamic-dns.info\",\n\t\"feste-ip.net\",\n\t\"knx-server.net\",\n\t\"static-access.net\",\n\t\"realm.cz\",\n\t\"*.cryptonomic.net\",\n\t\"cupcake.is\",\n\t\"cyon.link\",\n\t\"cyon.site\",\n\t\"daplie.me\",\n\t\"localhost.daplie.me\",\n\t\"biz.dk\",\n\t\"co.dk\",\n\t\"firm.dk\",\n\t\"reg.dk\",\n\t\"store.dk\",\n\t\"debian.net\",\n\t\"dedyn.io\",\n\t\"dnshome.de\",\n\t\"drayddns.com\",\n\t\"dreamhosters.com\",\n\t\"mydrobo.com\",\n\t\"drud.io\",\n\t\"drud.us\",\n\t\"duckdns.org\",\n\t\"dy.fi\",\n\t\"tunk.org\",\n\t\"dyndns-at-home.com\",\n\t\"dyndns-at-work.com\",\n\t\"dyndns-blog.com\",\n\t\"dyndns-free.com\",\n\t\"dyndns-home.com\",\n\t\"dyndns-ip.com\",\n\t\"dyndns-mail.com\",\n\t\"dyndns-office.com\",\n\t\"dyndns-pics.com\",\n\t\"dyndns-remote.com\",\n\t\"dyndns-server.com\",\n\t\"dyndns-web.com\",\n\t\"dyndns-wiki.com\",\n\t\"dyndns-work.com\",\n\t\"dyndns.biz\",\n\t\"dyndns.info\",\n\t\"dyndns.org\",\n\t\"dyndns.tv\",\n\t\"at-band-camp.net\",\n\t\"ath.cx\",\n\t\"barrel-of-knowledge.info\",\n\t\"barrell-of-knowledge.info\",\n\t\"better-than.tv\",\n\t\"blogdns.com\",\n\t\"blogdns.net\",\n\t\"blogdns.org\",\n\t\"blogsite.org\",\n\t\"boldlygoingnowhere.org\",\n\t\"broke-it.net\",\n\t\"buyshouses.net\",\n\t\"cechire.com\",\n\t\"dnsalias.com\",\n\t\"dnsalias.net\",\n\t\"dnsalias.org\",\n\t\"dnsdojo.com\",\n\t\"dnsdojo.net\",\n\t\"dnsdojo.org\",\n\t\"does-it.net\",\n\t\"doesntexist.com\",\n\t\"doesntexist.org\",\n\t\"dontexist.com\",\n\t\"dontexist.net\",\n\t\"dontexist.org\",\n\t\"doomdns.com\",\n\t\"doomdns.org\",\n\t\"dvrdns.org\",\n\t\"dyn-o-saur.com\",\n\t\"dynalias.com\",\n\t\"dynalias.net\",\n\t\"dynalias.org\",\n\t\"dynathome.net\",\n\t\"dyndns.ws\",\n\t\"endofinternet.net\",\n\t\"endofinternet.org\",\n\t\"endoftheinternet.org\",\n\t\"est-a-la-maison.com\",\n\t\"est-a-la-masion.com\",\n\t\"est-le-patron.com\",\n\t\"est-mon-blogueur.com\",\n\t\"for-better.biz\",\n\t\"for-more.biz\",\n\t\"for-our.info\",\n\t\"for-some.biz\",\n\t\"for-the.biz\",\n\t\"forgot.her.name\",\n\t\"forgot.his.name\",\n\t\"from-ak.com\",\n\t\"from-al.com\",\n\t\"from-ar.com\",\n\t\"from-az.net\",\n\t\"from-ca.com\",\n\t\"from-co.net\",\n\t\"from-ct.com\",\n\t\"from-dc.com\",\n\t\"from-de.com\",\n\t\"from-fl.com\",\n\t\"from-ga.com\",\n\t\"from-hi.com\",\n\t\"from-ia.com\",\n\t\"from-id.com\",\n\t\"from-il.com\",\n\t\"from-in.com\",\n\t\"from-ks.com\",\n\t\"from-ky.com\",\n\t\"from-la.net\",\n\t\"from-ma.com\",\n\t\"from-md.com\",\n\t\"from-me.org\",\n\t\"from-mi.com\",\n\t\"from-mn.com\",\n\t\"from-mo.com\",\n\t\"from-ms.com\",\n\t\"from-mt.com\",\n\t\"from-nc.com\",\n\t\"from-nd.com\",\n\t\"from-ne.com\",\n\t\"from-nh.com\",\n\t\"from-nj.com\",\n\t\"from-nm.com\",\n\t\"from-nv.com\",\n\t\"from-ny.net\",\n\t\"from-oh.com\",\n\t\"from-ok.com\",\n\t\"from-or.com\",\n\t\"from-pa.com\",\n\t\"from-pr.com\",\n\t\"from-ri.com\",\n\t\"from-sc.com\",\n\t\"from-sd.com\",\n\t\"from-tn.com\",\n\t\"from-tx.com\",\n\t\"from-ut.com\",\n\t\"from-va.com\",\n\t\"from-vt.com\",\n\t\"from-wa.com\",\n\t\"from-wi.com\",\n\t\"from-wv.com\",\n\t\"from-wy.com\",\n\t\"ftpaccess.cc\",\n\t\"fuettertdasnetz.de\",\n\t\"game-host.org\",\n\t\"game-server.cc\",\n\t\"getmyip.com\",\n\t\"gets-it.net\",\n\t\"go.dyndns.org\",\n\t\"gotdns.com\",\n\t\"gotdns.org\",\n\t\"groks-the.info\",\n\t\"groks-this.info\",\n\t\"ham-radio-op.net\",\n\t\"here-for-more.info\",\n\t\"hobby-site.com\",\n\t\"hobby-site.org\",\n\t\"home.dyndns.org\",\n\t\"homedns.org\",\n\t\"homeftp.net\",\n\t\"homeftp.org\",\n\t\"homeip.net\",\n\t\"homelinux.com\",\n\t\"homelinux.net\",\n\t\"homelinux.org\",\n\t\"homeunix.com\",\n\t\"homeunix.net\",\n\t\"homeunix.org\",\n\t\"iamallama.com\",\n\t\"in-the-band.net\",\n\t\"is-a-anarchist.com\",\n\t\"is-a-blogger.com\",\n\t\"is-a-bookkeeper.com\",\n\t\"is-a-bruinsfan.org\",\n\t\"is-a-bulls-fan.com\",\n\t\"is-a-candidate.org\",\n\t\"is-a-caterer.com\",\n\t\"is-a-celticsfan.org\",\n\t\"is-a-chef.com\",\n\t\"is-a-chef.net\",\n\t\"is-a-chef.org\",\n\t\"is-a-conservative.com\",\n\t\"is-a-cpa.com\",\n\t\"is-a-cubicle-slave.com\",\n\t\"is-a-democrat.com\",\n\t\"is-a-designer.com\",\n\t\"is-a-doctor.com\",\n\t\"is-a-financialadvisor.com\",\n\t\"is-a-geek.com\",\n\t\"is-a-geek.net\",\n\t\"is-a-geek.org\",\n\t\"is-a-green.com\",\n\t\"is-a-guru.com\",\n\t\"is-a-hard-worker.com\",\n\t\"is-a-hunter.com\",\n\t\"is-a-knight.org\",\n\t\"is-a-landscaper.com\",\n\t\"is-a-lawyer.com\",\n\t\"is-a-liberal.com\",\n\t\"is-a-libertarian.com\",\n\t\"is-a-linux-user.org\",\n\t\"is-a-llama.com\",\n\t\"is-a-musician.com\",\n\t\"is-a-nascarfan.com\",\n\t\"is-a-nurse.com\",\n\t\"is-a-painter.com\",\n\t\"is-a-patsfan.org\",\n\t\"is-a-personaltrainer.com\",\n\t\"is-a-photographer.com\",\n\t\"is-a-player.com\",\n\t\"is-a-republican.com\",\n\t\"is-a-rockstar.com\",\n\t\"is-a-socialist.com\",\n\t\"is-a-soxfan.org\",\n\t\"is-a-student.com\",\n\t\"is-a-teacher.com\",\n\t\"is-a-techie.com\",\n\t\"is-a-therapist.com\",\n\t\"is-an-accountant.com\",\n\t\"is-an-actor.com\",\n\t\"is-an-actress.com\",\n\t\"is-an-anarchist.com\",\n\t\"is-an-artist.com\",\n\t\"is-an-engineer.com\",\n\t\"is-an-entertainer.com\",\n\t\"is-by.us\",\n\t\"is-certified.com\",\n\t\"is-found.org\",\n\t\"is-gone.com\",\n\t\"is-into-anime.com\",\n\t\"is-into-cars.com\",\n\t\"is-into-cartoons.com\",\n\t\"is-into-games.com\",\n\t\"is-leet.com\",\n\t\"is-lost.org\",\n\t\"is-not-certified.com\",\n\t\"is-saved.org\",\n\t\"is-slick.com\",\n\t\"is-uberleet.com\",\n\t\"is-very-bad.org\",\n\t\"is-very-evil.org\",\n\t\"is-very-good.org\",\n\t\"is-very-nice.org\",\n\t\"is-very-sweet.org\",\n\t\"is-with-theband.com\",\n\t\"isa-geek.com\",\n\t\"isa-geek.net\",\n\t\"isa-geek.org\",\n\t\"isa-hockeynut.com\",\n\t\"issmarterthanyou.com\",\n\t\"isteingeek.de\",\n\t\"istmein.de\",\n\t\"kicks-ass.net\",\n\t\"kicks-ass.org\",\n\t\"knowsitall.info\",\n\t\"land-4-sale.us\",\n\t\"lebtimnetz.de\",\n\t\"leitungsen.de\",\n\t\"likes-pie.com\",\n\t\"likescandy.com\",\n\t\"merseine.nu\",\n\t\"mine.nu\",\n\t\"misconfused.org\",\n\t\"mypets.ws\",\n\t\"myphotos.cc\",\n\t\"neat-url.com\",\n\t\"office-on-the.net\",\n\t\"on-the-web.tv\",\n\t\"podzone.net\",\n\t\"podzone.org\",\n\t\"readmyblog.org\",\n\t\"saves-the-whales.com\",\n\t\"scrapper-site.net\",\n\t\"scrapping.cc\",\n\t\"selfip.biz\",\n\t\"selfip.com\",\n\t\"selfip.info\",\n\t\"selfip.net\",\n\t\"selfip.org\",\n\t\"sells-for-less.com\",\n\t\"sells-for-u.com\",\n\t\"sells-it.net\",\n\t\"sellsyourhome.org\",\n\t\"servebbs.com\",\n\t\"servebbs.net\",\n\t\"servebbs.org\",\n\t\"serveftp.net\",\n\t\"serveftp.org\",\n\t\"servegame.org\",\n\t\"shacknet.nu\",\n\t\"simple-url.com\",\n\t\"space-to-rent.com\",\n\t\"stuff-4-sale.org\",\n\t\"stuff-4-sale.us\",\n\t\"teaches-yoga.com\",\n\t\"thruhere.net\",\n\t\"traeumtgerade.de\",\n\t\"webhop.biz\",\n\t\"webhop.info\",\n\t\"webhop.net\",\n\t\"webhop.org\",\n\t\"worse-than.tv\",\n\t\"writesthisblog.com\",\n\t\"ddnss.de\",\n\t\"dyn.ddnss.de\",\n\t\"dyndns.ddnss.de\",\n\t\"dyndns1.de\",\n\t\"dyn-ip24.de\",\n\t\"home-webserver.de\",\n\t\"dyn.home-webserver.de\",\n\t\"myhome-server.de\",\n\t\"ddnss.org\",\n\t\"definima.net\",\n\t\"definima.io\",\n\t\"ddnsfree.com\",\n\t\"ddnsgeek.com\",\n\t\"giize.com\",\n\t\"gleeze.com\",\n\t\"kozow.com\",\n\t\"loseyourip.com\",\n\t\"ooguy.com\",\n\t\"theworkpc.com\",\n\t\"casacam.net\",\n\t\"dynu.net\",\n\t\"accesscam.org\",\n\t\"camdvr.org\",\n\t\"freeddns.org\",\n\t\"mywire.org\",\n\t\"webredirect.org\",\n\t\"myddns.rocks\",\n\t\"blogsite.xyz\",\n\t\"dynv6.net\",\n\t\"e4.cz\",\n\t\"mytuleap.com\",\n\t\"enonic.io\",\n\t\"customer.enonic.io\",\n\t\"eu.org\",\n\t\"al.eu.org\",\n\t\"asso.eu.org\",\n\t\"at.eu.org\",\n\t\"au.eu.org\",\n\t\"be.eu.org\",\n\t\"bg.eu.org\",\n\t\"ca.eu.org\",\n\t\"cd.eu.org\",\n\t\"ch.eu.org\",\n\t\"cn.eu.org\",\n\t\"cy.eu.org\",\n\t\"cz.eu.org\",\n\t\"de.eu.org\",\n\t\"dk.eu.org\",\n\t\"edu.eu.org\",\n\t\"ee.eu.org\",\n\t\"es.eu.org\",\n\t\"fi.eu.org\",\n\t\"fr.eu.org\",\n\t\"gr.eu.org\",\n\t\"hr.eu.org\",\n\t\"hu.eu.org\",\n\t\"ie.eu.org\",\n\t\"il.eu.org\",\n\t\"in.eu.org\",\n\t\"int.eu.org\",\n\t\"is.eu.org\",\n\t\"it.eu.org\",\n\t\"jp.eu.org\",\n\t\"kr.eu.org\",\n\t\"lt.eu.org\",\n\t\"lu.eu.org\",\n\t\"lv.eu.org\",\n\t\"mc.eu.org\",\n\t\"me.eu.org\",\n\t\"mk.eu.org\",\n\t\"mt.eu.org\",\n\t\"my.eu.org\",\n\t\"net.eu.org\",\n\t\"ng.eu.org\",\n\t\"nl.eu.org\",\n\t\"no.eu.org\",\n\t\"nz.eu.org\",\n\t\"paris.eu.org\",\n\t\"pl.eu.org\",\n\t\"pt.eu.org\",\n\t\"q-a.eu.org\",\n\t\"ro.eu.org\",\n\t\"ru.eu.org\",\n\t\"se.eu.org\",\n\t\"si.eu.org\",\n\t\"sk.eu.org\",\n\t\"tr.eu.org\",\n\t\"uk.eu.org\",\n\t\"us.eu.org\",\n\t\"eu-1.evennode.com\",\n\t\"eu-2.evennode.com\",\n\t\"eu-3.evennode.com\",\n\t\"eu-4.evennode.com\",\n\t\"us-1.evennode.com\",\n\t\"us-2.evennode.com\",\n\t\"us-3.evennode.com\",\n\t\"us-4.evennode.com\",\n\t\"twmail.cc\",\n\t\"twmail.net\",\n\t\"twmail.org\",\n\t\"mymailer.com.tw\",\n\t\"url.tw\",\n\t\"apps.fbsbx.com\",\n\t\"ru.net\",\n\t\"adygeya.ru\",\n\t\"bashkiria.ru\",\n\t\"bir.ru\",\n\t\"cbg.ru\",\n\t\"com.ru\",\n\t\"dagestan.ru\",\n\t\"grozny.ru\",\n\t\"kalmykia.ru\",\n\t\"kustanai.ru\",\n\t\"marine.ru\",\n\t\"mordovia.ru\",\n\t\"msk.ru\",\n\t\"mytis.ru\",\n\t\"nalchik.ru\",\n\t\"nov.ru\",\n\t\"pyatigorsk.ru\",\n\t\"spb.ru\",\n\t\"vladikavkaz.ru\",\n\t\"vladimir.ru\",\n\t\"abkhazia.su\",\n\t\"adygeya.su\",\n\t\"aktyubinsk.su\",\n\t\"arkhangelsk.su\",\n\t\"armenia.su\",\n\t\"ashgabad.su\",\n\t\"azerbaijan.su\",\n\t\"balashov.su\",\n\t\"bashkiria.su\",\n\t\"bryansk.su\",\n\t\"bukhara.su\",\n\t\"chimkent.su\",\n\t\"dagestan.su\",\n\t\"east-kazakhstan.su\",\n\t\"exnet.su\",\n\t\"georgia.su\",\n\t\"grozny.su\",\n\t\"ivanovo.su\",\n\t\"jambyl.su\",\n\t\"kalmykia.su\",\n\t\"kaluga.su\",\n\t\"karacol.su\",\n\t\"karaganda.su\",\n\t\"karelia.su\",\n\t\"khakassia.su\",\n\t\"krasnodar.su\",\n\t\"kurgan.su\",\n\t\"kustanai.su\",\n\t\"lenug.su\",\n\t\"mangyshlak.su\",\n\t\"mordovia.su\",\n\t\"msk.su\",\n\t\"murmansk.su\",\n\t\"nalchik.su\",\n\t\"navoi.su\",\n\t\"north-kazakhstan.su\",\n\t\"nov.su\",\n\t\"obninsk.su\",\n\t\"penza.su\",\n\t\"pokrovsk.su\",\n\t\"sochi.su\",\n\t\"spb.su\",\n\t\"tashkent.su\",\n\t\"termez.su\",\n\t\"togliatti.su\",\n\t\"troitsk.su\",\n\t\"tselinograd.su\",\n\t\"tula.su\",\n\t\"tuva.su\",\n\t\"vladikavkaz.su\",\n\t\"vladimir.su\",\n\t\"vologda.su\",\n\t\"channelsdvr.net\",\n\t\"fastlylb.net\",\n\t\"map.fastlylb.net\",\n\t\"freetls.fastly.net\",\n\t\"map.fastly.net\",\n\t\"a.prod.fastly.net\",\n\t\"global.prod.fastly.net\",\n\t\"a.ssl.fastly.net\",\n\t\"b.ssl.fastly.net\",\n\t\"global.ssl.fastly.net\",\n\t\"fhapp.xyz\",\n\t\"fedorainfracloud.org\",\n\t\"fedorapeople.org\",\n\t\"cloud.fedoraproject.org\",\n\t\"filegear.me\",\n\t\"firebaseapp.com\",\n\t\"flynnhub.com\",\n\t\"flynnhosting.net\",\n\t\"freebox-os.com\",\n\t\"freeboxos.com\",\n\t\"fbx-os.fr\",\n\t\"fbxos.fr\",\n\t\"freebox-os.fr\",\n\t\"freeboxos.fr\",\n\t\"myfusion.cloud\",\n\t\"*.futurecms.at\",\n\t\"futurehosting.at\",\n\t\"futuremailing.at\",\n\t\"*.ex.ortsinfo.at\",\n\t\"*.kunden.ortsinfo.at\",\n\t\"*.statics.cloud\",\n\t\"service.gov.uk\",\n\t\"github.io\",\n\t\"githubusercontent.com\",\n\t\"gitlab.io\",\n\t\"homeoffice.gov.uk\",\n\t\"ro.im\",\n\t\"shop.ro\",\n\t\"goip.de\",\n\t\"*.0emm.com\",\n\t\"appspot.com\",\n\t\"blogspot.ae\",\n\t\"blogspot.al\",\n\t\"blogspot.am\",\n\t\"blogspot.ba\",\n\t\"blogspot.be\",\n\t\"blogspot.bg\",\n\t\"blogspot.bj\",\n\t\"blogspot.ca\",\n\t\"blogspot.cf\",\n\t\"blogspot.ch\",\n\t\"blogspot.cl\",\n\t\"blogspot.co.at\",\n\t\"blogspot.co.id\",\n\t\"blogspot.co.il\",\n\t\"blogspot.co.ke\",\n\t\"blogspot.co.nz\",\n\t\"blogspot.co.uk\",\n\t\"blogspot.co.za\",\n\t\"blogspot.com\",\n\t\"blogspot.com.ar\",\n\t\"blogspot.com.au\",\n\t\"blogspot.com.br\",\n\t\"blogspot.com.by\",\n\t\"blogspot.com.co\",\n\t\"blogspot.com.cy\",\n\t\"blogspot.com.ee\",\n\t\"blogspot.com.eg\",\n\t\"blogspot.com.es\",\n\t\"blogspot.com.mt\",\n\t\"blogspot.com.ng\",\n\t\"blogspot.com.tr\",\n\t\"blogspot.com.uy\",\n\t\"blogspot.cv\",\n\t\"blogspot.cz\",\n\t\"blogspot.de\",\n\t\"blogspot.dk\",\n\t\"blogspot.fi\",\n\t\"blogspot.fr\",\n\t\"blogspot.gr\",\n\t\"blogspot.hk\",\n\t\"blogspot.hr\",\n\t\"blogspot.hu\",\n\t\"blogspot.ie\",\n\t\"blogspot.in\",\n\t\"blogspot.is\",\n\t\"blogspot.it\",\n\t\"blogspot.jp\",\n\t\"blogspot.kr\",\n\t\"blogspot.li\",\n\t\"blogspot.lt\",\n\t\"blogspot.lu\",\n\t\"blogspot.md\",\n\t\"blogspot.mk\",\n\t\"blogspot.mr\",\n\t\"blogspot.mx\",\n\t\"blogspot.my\",\n\t\"blogspot.nl\",\n\t\"blogspot.no\",\n\t\"blogspot.pe\",\n\t\"blogspot.pt\",\n\t\"blogspot.qa\",\n\t\"blogspot.re\",\n\t\"blogspot.ro\",\n\t\"blogspot.rs\",\n\t\"blogspot.ru\",\n\t\"blogspot.se\",\n\t\"blogspot.sg\",\n\t\"blogspot.si\",\n\t\"blogspot.sk\",\n\t\"blogspot.sn\",\n\t\"blogspot.td\",\n\t\"blogspot.tw\",\n\t\"blogspot.ug\",\n\t\"blogspot.vn\",\n\t\"cloudfunctions.net\",\n\t\"cloud.goog\",\n\t\"codespot.com\",\n\t\"googleapis.com\",\n\t\"googlecode.com\",\n\t\"pagespeedmobilizer.com\",\n\t\"publishproxy.com\",\n\t\"withgoogle.com\",\n\t\"withyoutube.com\",\n\t\"hashbang.sh\",\n\t\"hasura-app.io\",\n\t\"hepforge.org\",\n\t\"herokuapp.com\",\n\t\"herokussl.com\",\n\t\"moonscale.net\",\n\t\"iki.fi\",\n\t\"biz.at\",\n\t\"info.at\",\n\t\"info.cx\",\n\t\"ac.leg.br\",\n\t\"al.leg.br\",\n\t\"am.leg.br\",\n\t\"ap.leg.br\",\n\t\"ba.leg.br\",\n\t\"ce.leg.br\",\n\t\"df.leg.br\",\n\t\"es.leg.br\",\n\t\"go.leg.br\",\n\t\"ma.leg.br\",\n\t\"mg.leg.br\",\n\t\"ms.leg.br\",\n\t\"mt.leg.br\",\n\t\"pa.leg.br\",\n\t\"pb.leg.br\",\n\t\"pe.leg.br\",\n\t\"pi.leg.br\",\n\t\"pr.leg.br\",\n\t\"rj.leg.br\",\n\t\"rn.leg.br\",\n\t\"ro.leg.br\",\n\t\"rr.leg.br\",\n\t\"rs.leg.br\",\n\t\"sc.leg.br\",\n\t\"se.leg.br\",\n\t\"sp.leg.br\",\n\t\"to.leg.br\",\n\t\"pixolino.com\",\n\t\"ipifony.net\",\n\t\"*.triton.zone\",\n\t\"*.cns.joyent.com\",\n\t\"js.org\",\n\t\"keymachine.de\",\n\t\"knightpoint.systems\",\n\t\"co.krd\",\n\t\"edu.krd\",\n\t\"git-repos.de\",\n\t\"lcube-server.de\",\n\t\"svn-repos.de\",\n\t\"we.bs\",\n\t\"barsy.bg\",\n\t\"barsyonline.com\",\n\t\"barsy.de\",\n\t\"barsy.eu\",\n\t\"barsy.in\",\n\t\"barsy.net\",\n\t\"barsy.online\",\n\t\"barsy.support\",\n\t\"*.magentosite.cloud\",\n\t\"hb.cldmail.ru\",\n\t\"cloud.metacentrum.cz\",\n\t\"custom.metacentrum.cz\",\n\t\"meteorapp.com\",\n\t\"eu.meteorapp.com\",\n\t\"co.pl\",\n\t\"azurewebsites.net\",\n\t\"azure-mobile.net\",\n\t\"cloudapp.net\",\n\t\"bmoattachments.org\",\n\t\"net.ru\",\n\t\"org.ru\",\n\t\"pp.ru\",\n\t\"bitballoon.com\",\n\t\"netlify.com\",\n\t\"4u.com\",\n\t\"ngrok.io\",\n\t\"nfshost.com\",\n\t\"nsupdate.info\",\n\t\"nerdpol.ovh\",\n\t\"blogsyte.com\",\n\t\"brasilia.me\",\n\t\"cable-modem.org\",\n\t\"ciscofreak.com\",\n\t\"collegefan.org\",\n\t\"couchpotatofries.org\",\n\t\"damnserver.com\",\n\t\"ddns.me\",\n\t\"ditchyourip.com\",\n\t\"dnsfor.me\",\n\t\"dnsiskinky.com\",\n\t\"dvrcam.info\",\n\t\"dynns.com\",\n\t\"eating-organic.net\",\n\t\"fantasyleague.cc\",\n\t\"geekgalaxy.com\",\n\t\"golffan.us\",\n\t\"health-carereform.com\",\n\t\"homesecuritymac.com\",\n\t\"homesecuritypc.com\",\n\t\"hopto.me\",\n\t\"ilovecollege.info\",\n\t\"loginto.me\",\n\t\"mlbfan.org\",\n\t\"mmafan.biz\",\n\t\"myactivedirectory.com\",\n\t\"mydissent.net\",\n\t\"myeffect.net\",\n\t\"mymediapc.net\",\n\t\"mypsx.net\",\n\t\"mysecuritycamera.com\",\n\t\"mysecuritycamera.net\",\n\t\"mysecuritycamera.org\",\n\t\"net-freaks.com\",\n\t\"nflfan.org\",\n\t\"nhlfan.net\",\n\t\"no-ip.ca\",\n\t\"no-ip.co.uk\",\n\t\"no-ip.net\",\n\t\"noip.us\",\n\t\"onthewifi.com\",\n\t\"pgafan.net\",\n\t\"point2this.com\",\n\t\"pointto.us\",\n\t\"privatizehealthinsurance.net\",\n\t\"quicksytes.com\",\n\t\"read-books.org\",\n\t\"securitytactics.com\",\n\t\"serveexchange.com\",\n\t\"servehumour.com\",\n\t\"servep2p.com\",\n\t\"servesarcasm.com\",\n\t\"stufftoread.com\",\n\t\"ufcfan.org\",\n\t\"unusualperson.com\",\n\t\"workisboring.com\",\n\t\"3utilities.com\",\n\t\"bounceme.net\",\n\t\"ddns.net\",\n\t\"ddnsking.com\",\n\t\"gotdns.ch\",\n\t\"hopto.org\",\n\t\"myftp.biz\",\n\t\"myftp.org\",\n\t\"myvnc.com\",\n\t\"no-ip.biz\",\n\t\"no-ip.info\",\n\t\"no-ip.org\",\n\t\"noip.me\",\n\t\"redirectme.net\",\n\t\"servebeer.com\",\n\t\"serveblog.net\",\n\t\"servecounterstrike.com\",\n\t\"serveftp.com\",\n\t\"servegame.com\",\n\t\"servehalflife.com\",\n\t\"servehttp.com\",\n\t\"serveirc.com\",\n\t\"serveminecraft.net\",\n\t\"servemp3.com\",\n\t\"servepics.com\",\n\t\"servequake.com\",\n\t\"sytes.net\",\n\t\"webhop.me\",\n\t\"zapto.org\",\n\t\"stage.nodeart.io\",\n\t\"nodum.co\",\n\t\"nodum.io\",\n\t\"nyc.mn\",\n\t\"nom.ae\",\n\t\"nom.ai\",\n\t\"nom.al\",\n\t\"nym.by\",\n\t\"nym.bz\",\n\t\"nom.cl\",\n\t\"nom.gd\",\n\t\"nom.gl\",\n\t\"nym.gr\",\n\t\"nom.gt\",\n\t\"nom.hn\",\n\t\"nom.im\",\n\t\"nym.kz\",\n\t\"nym.la\",\n\t\"nom.li\",\n\t\"nym.li\",\n\t\"nym.lt\",\n\t\"nym.lu\",\n\t\"nym.me\",\n\t\"nom.mk\",\n\t\"nym.mx\",\n\t\"nom.nu\",\n\t\"nym.nz\",\n\t\"nym.pe\",\n\t\"nym.pt\",\n\t\"nom.pw\",\n\t\"nom.qa\",\n\t\"nom.rs\",\n\t\"nom.si\",\n\t\"nym.sk\",\n\t\"nym.su\",\n\t\"nym.sx\",\n\t\"nym.tw\",\n\t\"nom.ug\",\n\t\"nom.uy\",\n\t\"nom.vc\",\n\t\"nom.vg\",\n\t\"cya.gg\",\n\t\"nid.io\",\n\t\"opencraft.hosting\",\n\t\"operaunite.com\",\n\t\"outsystemscloud.com\",\n\t\"ownprovider.com\",\n\t\"oy.lc\",\n\t\"pgfog.com\",\n\t\"pagefrontapp.com\",\n\t\"art.pl\",\n\t\"gliwice.pl\",\n\t\"krakow.pl\",\n\t\"poznan.pl\",\n\t\"wroc.pl\",\n\t\"zakopane.pl\",\n\t\"pantheonsite.io\",\n\t\"gotpantheon.com\",\n\t\"mypep.link\",\n\t\"on-web.fr\",\n\t\"*.platform.sh\",\n\t\"*.platformsh.site\",\n\t\"xen.prgmr.com\",\n\t\"priv.at\",\n\t\"protonet.io\",\n\t\"chirurgiens-dentistes-en-france.fr\",\n\t\"byen.site\",\n\t\"qa2.com\",\n\t\"dev-myqnapcloud.com\",\n\t\"alpha-myqnapcloud.com\",\n\t\"myqnapcloud.com\",\n\t\"*.quipelements.com\",\n\t\"vapor.cloud\",\n\t\"vaporcloud.io\",\n\t\"rackmaze.com\",\n\t\"rackmaze.net\",\n\t\"rhcloud.com\",\n\t\"hzc.io\",\n\t\"wellbeingzone.eu\",\n\t\"ptplus.fit\",\n\t\"wellbeingzone.co.uk\",\n\t\"sandcats.io\",\n\t\"logoip.de\",\n\t\"logoip.com\",\n\t\"firewall-gateway.com\",\n\t\"firewall-gateway.de\",\n\t\"my-gateway.de\",\n\t\"my-router.de\",\n\t\"spdns.de\",\n\t\"spdns.eu\",\n\t\"firewall-gateway.net\",\n\t\"my-firewall.org\",\n\t\"myfirewall.org\",\n\t\"spdns.org\",\n\t\"*.sensiosite.cloud\",\n\t\"biz.ua\",\n\t\"co.ua\",\n\t\"pp.ua\",\n\t\"shiftedit.io\",\n\t\"myshopblocks.com\",\n\t\"1kapp.com\",\n\t\"appchizi.com\",\n\t\"applinzi.com\",\n\t\"sinaapp.com\",\n\t\"vipsinaapp.com\",\n\t\"bounty-full.com\",\n\t\"alpha.bounty-full.com\",\n\t\"beta.bounty-full.com\",\n\t\"static.land\",\n\t\"dev.static.land\",\n\t\"sites.static.land\",\n\t\"apps.lair.io\",\n\t\"*.stolos.io\",\n\t\"spacekit.io\",\n\t\"stackspace.space\",\n\t\"storj.farm\",\n\t\"temp-dns.com\",\n\t\"diskstation.me\",\n\t\"dscloud.biz\",\n\t\"dscloud.me\",\n\t\"dscloud.mobi\",\n\t\"dsmynas.com\",\n\t\"dsmynas.net\",\n\t\"dsmynas.org\",\n\t\"familyds.com\",\n\t\"familyds.net\",\n\t\"familyds.org\",\n\t\"i234.me\",\n\t\"myds.me\",\n\t\"synology.me\",\n\t\"vpnplus.to\",\n\t\"taifun-dns.de\",\n\t\"gda.pl\",\n\t\"gdansk.pl\",\n\t\"gdynia.pl\",\n\t\"med.pl\",\n\t\"sopot.pl\",\n\t\"cust.dev.thingdust.io\",\n\t\"cust.disrec.thingdust.io\",\n\t\"cust.prod.thingdust.io\",\n\t\"cust.testing.thingdust.io\",\n\t\"bloxcms.com\",\n\t\"townnews-staging.com\",\n\t\"12hp.at\",\n\t\"2ix.at\",\n\t\"4lima.at\",\n\t\"lima-city.at\",\n\t\"12hp.ch\",\n\t\"2ix.ch\",\n\t\"4lima.ch\",\n\t\"lima-city.ch\",\n\t\"trafficplex.cloud\",\n\t\"de.cool\",\n\t\"12hp.de\",\n\t\"2ix.de\",\n\t\"4lima.de\",\n\t\"lima-city.de\",\n\t\"1337.pictures\",\n\t\"clan.rip\",\n\t\"lima-city.rocks\",\n\t\"webspace.rocks\",\n\t\"lima.zone\",\n\t\"*.transurl.be\",\n\t\"*.transurl.eu\",\n\t\"*.transurl.nl\",\n\t\"tuxfamily.org\",\n\t\"dd-dns.de\",\n\t\"diskstation.eu\",\n\t\"diskstation.org\",\n\t\"dray-dns.de\",\n\t\"draydns.de\",\n\t\"dyn-vpn.de\",\n\t\"dynvpn.de\",\n\t\"mein-vigor.de\",\n\t\"my-vigor.de\",\n\t\"my-wan.de\",\n\t\"syno-ds.de\",\n\t\"synology-diskstation.de\",\n\t\"synology-ds.de\",\n\t\"uber.space\",\n\t\"hk.com\",\n\t\"hk.org\",\n\t\"ltd.hk\",\n\t\"inc.hk\",\n\t\"lib.de.us\",\n\t\"router.management\",\n\t\"v-info.info\",\n\t\"wedeploy.io\",\n\t\"wedeploy.me\",\n\t\"wedeploy.sh\",\n\t\"remotewd.com\",\n\t\"wmflabs.org\",\n\t\"cistron.nl\",\n\t\"demon.nl\",\n\t\"xs4all.space\",\n\t\"yolasite.com\",\n\t\"ybo.faith\",\n\t\"yombo.me\",\n\t\"homelink.one\",\n\t\"ybo.party\",\n\t\"ybo.review\",\n\t\"ybo.science\",\n\t\"ybo.trade\",\n\t\"za.net\",\n\t\"za.org\",\n\t\"now.sh\",\n}\n\nvar nodeLabels = [...]string{\n\t\"aaa\",\n\t\"aarp\",\n\t\"abarth\",\n\t\"abb\",\n\t\"abbott\",\n\t\"abbvie\",\n\t\"abc\",\n\t\"able\",\n\t\"abogado\",\n\t\"abudhabi\",\n\t\"ac\",\n\t\"academy\",\n\t\"accenture\",\n\t\"accountant\",\n\t\"accountants\",\n\t\"aco\",\n\t\"active\",\n\t\"actor\",\n\t\"ad\",\n\t\"adac\",\n\t\"ads\",\n\t\"adult\",\n\t\"ae\",\n\t\"aeg\",\n\t\"aero\",\n\t\"aetna\",\n\t\"af\",\n\t\"afamilycompany\",\n\t\"afl\",\n\t\"africa\",\n\t\"ag\",\n\t\"agakhan\",\n\t\"agency\",\n\t\"ai\",\n\t\"aig\",\n\t\"aigo\",\n\t\"airbus\",\n\t\"airforce\",\n\t\"airtel\",\n\t\"akdn\",\n\t\"al\",\n\t\"alfaromeo\",\n\t\"alibaba\",\n\t\"alipay\",\n\t\"allfinanz\",\n\t\"allstate\",\n\t\"ally\",\n\t\"alsace\",\n\t\"alstom\",\n\t\"am\",\n\t\"americanexpress\",\n\t\"americanfamily\",\n\t\"amex\",\n\t\"amfam\",\n\t\"amica\",\n\t\"amsterdam\",\n\t\"analytics\",\n\t\"android\",\n\t\"anquan\",\n\t\"anz\",\n\t\"ao\",\n\t\"aol\",\n\t\"apartments\",\n\t\"app\",\n\t\"apple\",\n\t\"aq\",\n\t\"aquarelle\",\n\t\"ar\",\n\t\"arab\",\n\t\"aramco\",\n\t\"archi\",\n\t\"army\",\n\t\"arpa\",\n\t\"art\",\n\t\"arte\",\n\t\"as\",\n\t\"asda\",\n\t\"asia\",\n\t\"associates\",\n\t\"at\",\n\t\"athleta\",\n\t\"attorney\",\n\t\"au\",\n\t\"auction\",\n\t\"audi\",\n\t\"audible\",\n\t\"audio\",\n\t\"auspost\",\n\t\"author\",\n\t\"auto\",\n\t\"autos\",\n\t\"avianca\",\n\t\"aw\",\n\t\"aws\",\n\t\"ax\",\n\t\"axa\",\n\t\"az\",\n\t\"azure\",\n\t\"ba\",\n\t\"baby\",\n\t\"baidu\",\n\t\"banamex\",\n\t\"bananarepublic\",\n\t\"band\",\n\t\"bank\",\n\t\"bar\",\n\t\"barcelona\",\n\t\"barclaycard\",\n\t\"barclays\",\n\t\"barefoot\",\n\t\"bargains\",\n\t\"baseball\",\n\t\"basketball\",\n\t\"bauhaus\",\n\t\"bayern\",\n\t\"bb\",\n\t\"bbc\",\n\t\"bbt\",\n\t\"bbva\",\n\t\"bcg\",\n\t\"bcn\",\n\t\"bd\",\n\t\"be\",\n\t\"beats\",\n\t\"beauty\",\n\t\"beer\",\n\t\"bentley\",\n\t\"berlin\",\n\t\"best\",\n\t\"bestbuy\",\n\t\"bet\",\n\t\"bf\",\n\t\"bg\",\n\t\"bh\",\n\t\"bharti\",\n\t\"bi\",\n\t\"bible\",\n\t\"bid\",\n\t\"bike\",\n\t\"bing\",\n\t\"bingo\",\n\t\"bio\",\n\t\"biz\",\n\t\"bj\",\n\t\"black\",\n\t\"blackfriday\",\n\t\"blanco\",\n\t\"blockbuster\",\n\t\"blog\",\n\t\"bloomberg\",\n\t\"blue\",\n\t\"bm\",\n\t\"bms\",\n\t\"bmw\",\n\t\"bn\",\n\t\"bnl\",\n\t\"bnpparibas\",\n\t\"bo\",\n\t\"boats\",\n\t\"boehringer\",\n\t\"bofa\",\n\t\"bom\",\n\t\"bond\",\n\t\"boo\",\n\t\"book\",\n\t\"booking\",\n\t\"boots\",\n\t\"bosch\",\n\t\"bostik\",\n\t\"boston\",\n\t\"bot\",\n\t\"boutique\",\n\t\"box\",\n\t\"br\",\n\t\"bradesco\",\n\t\"bridgestone\",\n\t\"broadway\",\n\t\"broker\",\n\t\"brother\",\n\t\"brussels\",\n\t\"bs\",\n\t\"bt\",\n\t\"budapest\",\n\t\"bugatti\",\n\t\"build\",\n\t\"builders\",\n\t\"business\",\n\t\"buy\",\n\t\"buzz\",\n\t\"bv\",\n\t\"bw\",\n\t\"by\",\n\t\"bz\",\n\t\"bzh\",\n\t\"ca\",\n\t\"cab\",\n\t\"cafe\",\n\t\"cal\",\n\t\"call\",\n\t\"calvinklein\",\n\t\"cam\",\n\t\"camera\",\n\t\"camp\",\n\t\"cancerresearch\",\n\t\"canon\",\n\t\"capetown\",\n\t\"capital\",\n\t\"capitalone\",\n\t\"car\",\n\t\"caravan\",\n\t\"cards\",\n\t\"care\",\n\t\"career\",\n\t\"careers\",\n\t\"cars\",\n\t\"cartier\",\n\t\"casa\",\n\t\"case\",\n\t\"caseih\",\n\t\"cash\",\n\t\"casino\",\n\t\"cat\",\n\t\"catering\",\n\t\"catholic\",\n\t\"cba\",\n\t\"cbn\",\n\t\"cbre\",\n\t\"cbs\",\n\t\"cc\",\n\t\"cd\",\n\t\"ceb\",\n\t\"center\",\n\t\"ceo\",\n\t\"cern\",\n\t\"cf\",\n\t\"cfa\",\n\t\"cfd\",\n\t\"cg\",\n\t\"ch\",\n\t\"chanel\",\n\t\"channel\",\n\t\"chase\",\n\t\"chat\",\n\t\"cheap\",\n\t\"chintai\",\n\t\"chloe\",\n\t\"christmas\",\n\t\"chrome\",\n\t\"chrysler\",\n\t\"church\",\n\t\"ci\",\n\t\"cipriani\",\n\t\"circle\",\n\t\"cisco\",\n\t\"citadel\",\n\t\"citi\",\n\t\"citic\",\n\t\"city\",\n\t\"cityeats\",\n\t\"ck\",\n\t\"cl\",\n\t\"claims\",\n\t\"cleaning\",\n\t\"click\",\n\t\"clinic\",\n\t\"clinique\",\n\t\"clothing\",\n\t\"cloud\",\n\t\"club\",\n\t\"clubmed\",\n\t\"cm\",\n\t\"cn\",\n\t\"co\",\n\t\"coach\",\n\t\"codes\",\n\t\"coffee\",\n\t\"college\",\n\t\"cologne\",\n\t\"com\",\n\t\"comcast\",\n\t\"commbank\",\n\t\"community\",\n\t\"company\",\n\t\"compare\",\n\t\"computer\",\n\t\"comsec\",\n\t\"condos\",\n\t\"construction\",\n\t\"consulting\",\n\t\"contact\",\n\t\"contractors\",\n\t\"cooking\",\n\t\"cookingchannel\",\n\t\"cool\",\n\t\"coop\",\n\t\"corsica\",\n\t\"country\",\n\t\"coupon\",\n\t\"coupons\",\n\t\"courses\",\n\t\"cr\",\n\t\"credit\",\n\t\"creditcard\",\n\t\"creditunion\",\n\t\"cricket\",\n\t\"crown\",\n\t\"crs\",\n\t\"cruise\",\n\t\"cruises\",\n\t\"csc\",\n\t\"cu\",\n\t\"cuisinella\",\n\t\"cv\",\n\t\"cw\",\n\t\"cx\",\n\t\"cy\",\n\t\"cymru\",\n\t\"cyou\",\n\t\"cz\",\n\t\"dabur\",\n\t\"dad\",\n\t\"dance\",\n\t\"data\",\n\t\"date\",\n\t\"dating\",\n\t\"datsun\",\n\t\"day\",\n\t\"dclk\",\n\t\"dds\",\n\t\"de\",\n\t\"deal\",\n\t\"dealer\",\n\t\"deals\",\n\t\"degree\",\n\t\"delivery\",\n\t\"dell\",\n\t\"deloitte\",\n\t\"delta\",\n\t\"democrat\",\n\t\"dental\",\n\t\"dentist\",\n\t\"desi\",\n\t\"design\",\n\t\"dev\",\n\t\"dhl\",\n\t\"diamonds\",\n\t\"diet\",\n\t\"digital\",\n\t\"direct\",\n\t\"directory\",\n\t\"discount\",\n\t\"discover\",\n\t\"dish\",\n\t\"diy\",\n\t\"dj\",\n\t\"dk\",\n\t\"dm\",\n\t\"dnp\",\n\t\"do\",\n\t\"docs\",\n\t\"doctor\",\n\t\"dodge\",\n\t\"dog\",\n\t\"doha\",\n\t\"domains\",\n\t\"dot\",\n\t\"download\",\n\t\"drive\",\n\t\"dtv\",\n\t\"dubai\",\n\t\"duck\",\n\t\"dunlop\",\n\t\"duns\",\n\t\"dupont\",\n\t\"durban\",\n\t\"dvag\",\n\t\"dvr\",\n\t\"dz\",\n\t\"earth\",\n\t\"eat\",\n\t\"ec\",\n\t\"eco\",\n\t\"edeka\",\n\t\"edu\",\n\t\"education\",\n\t\"ee\",\n\t\"eg\",\n\t\"email\",\n\t\"emerck\",\n\t\"energy\",\n\t\"engineer\",\n\t\"engineering\",\n\t\"enterprises\",\n\t\"epost\",\n\t\"epson\",\n\t\"equipment\",\n\t\"er\",\n\t\"ericsson\",\n\t\"erni\",\n\t\"es\",\n\t\"esq\",\n\t\"estate\",\n\t\"esurance\",\n\t\"et\",\n\t\"etisalat\",\n\t\"eu\",\n\t\"eurovision\",\n\t\"eus\",\n\t\"events\",\n\t\"everbank\",\n\t\"exchange\",\n\t\"expert\",\n\t\"exposed\",\n\t\"express\",\n\t\"extraspace\",\n\t\"fage\",\n\t\"fail\",\n\t\"fairwinds\",\n\t\"faith\",\n\t\"family\",\n\t\"fan\",\n\t\"fans\",\n\t\"farm\",\n\t\"farmers\",\n\t\"fashion\",\n\t\"fast\",\n\t\"fedex\",\n\t\"feedback\",\n\t\"ferrari\",\n\t\"ferrero\",\n\t\"fi\",\n\t\"fiat\",\n\t\"fidelity\",\n\t\"fido\",\n\t\"film\",\n\t\"final\",\n\t\"finance\",\n\t\"financial\",\n\t\"fire\",\n\t\"firestone\",\n\t\"firmdale\",\n\t\"fish\",\n\t\"fishing\",\n\t\"fit\",\n\t\"fitness\",\n\t\"fj\",\n\t\"fk\",\n\t\"flickr\",\n\t\"flights\",\n\t\"flir\",\n\t\"florist\",\n\t\"flowers\",\n\t\"fly\",\n\t\"fm\",\n\t\"fo\",\n\t\"foo\",\n\t\"food\",\n\t\"foodnetwork\",\n\t\"football\",\n\t\"ford\",\n\t\"forex\",\n\t\"forsale\",\n\t\"forum\",\n\t\"foundation\",\n\t\"fox\",\n\t\"fr\",\n\t\"free\",\n\t\"fresenius\",\n\t\"frl\",\n\t\"frogans\",\n\t\"frontdoor\",\n\t\"frontier\",\n\t\"ftr\",\n\t\"fujitsu\",\n\t\"fujixerox\",\n\t\"fun\",\n\t\"fund\",\n\t\"furniture\",\n\t\"futbol\",\n\t\"fyi\",\n\t\"ga\",\n\t\"gal\",\n\t\"gallery\",\n\t\"gallo\",\n\t\"gallup\",\n\t\"game\",\n\t\"games\",\n\t\"gap\",\n\t\"garden\",\n\t\"gb\",\n\t\"gbiz\",\n\t\"gd\",\n\t\"gdn\",\n\t\"ge\",\n\t\"gea\",\n\t\"gent\",\n\t\"genting\",\n\t\"george\",\n\t\"gf\",\n\t\"gg\",\n\t\"ggee\",\n\t\"gh\",\n\t\"gi\",\n\t\"gift\",\n\t\"gifts\",\n\t\"gives\",\n\t\"giving\",\n\t\"gl\",\n\t\"glade\",\n\t\"glass\",\n\t\"gle\",\n\t\"global\",\n\t\"globo\",\n\t\"gm\",\n\t\"gmail\",\n\t\"gmbh\",\n\t\"gmo\",\n\t\"gmx\",\n\t\"gn\",\n\t\"godaddy\",\n\t\"gold\",\n\t\"goldpoint\",\n\t\"golf\",\n\t\"goo\",\n\t\"goodhands\",\n\t\"goodyear\",\n\t\"goog\",\n\t\"google\",\n\t\"gop\",\n\t\"got\",\n\t\"gov\",\n\t\"gp\",\n\t\"gq\",\n\t\"gr\",\n\t\"grainger\",\n\t\"graphics\",\n\t\"gratis\",\n\t\"green\",\n\t\"gripe\",\n\t\"grocery\",\n\t\"group\",\n\t\"gs\",\n\t\"gt\",\n\t\"gu\",\n\t\"guardian\",\n\t\"gucci\",\n\t\"guge\",\n\t\"guide\",\n\t\"guitars\",\n\t\"guru\",\n\t\"gw\",\n\t\"gy\",\n\t\"hair\",\n\t\"hamburg\",\n\t\"hangout\",\n\t\"haus\",\n\t\"hbo\",\n\t\"hdfc\",\n\t\"hdfcbank\",\n\t\"health\",\n\t\"healthcare\",\n\t\"help\",\n\t\"helsinki\",\n\t\"here\",\n\t\"hermes\",\n\t\"hgtv\",\n\t\"hiphop\",\n\t\"hisamitsu\",\n\t\"hitachi\",\n\t\"hiv\",\n\t\"hk\",\n\t\"hkt\",\n\t\"hm\",\n\t\"hn\",\n\t\"hockey\",\n\t\"holdings\",\n\t\"holiday\",\n\t\"homedepot\",\n\t\"homegoods\",\n\t\"homes\",\n\t\"homesense\",\n\t\"honda\",\n\t\"honeywell\",\n\t\"horse\",\n\t\"hospital\",\n\t\"host\",\n\t\"hosting\",\n\t\"hot\",\n\t\"hoteles\",\n\t\"hotels\",\n\t\"hotmail\",\n\t\"house\",\n\t\"how\",\n\t\"hr\",\n\t\"hsbc\",\n\t\"ht\",\n\t\"htc\",\n\t\"hu\",\n\t\"hughes\",\n\t\"hyatt\",\n\t\"hyundai\",\n\t\"ibm\",\n\t\"icbc\",\n\t\"ice\",\n\t\"icu\",\n\t\"id\",\n\t\"ie\",\n\t\"ieee\",\n\t\"ifm\",\n\t\"ikano\",\n\t\"il\",\n\t\"im\",\n\t\"imamat\",\n\t\"imdb\",\n\t\"immo\",\n\t\"immobilien\",\n\t\"in\",\n\t\"industries\",\n\t\"infiniti\",\n\t\"info\",\n\t\"ing\",\n\t\"ink\",\n\t\"institute\",\n\t\"insurance\",\n\t\"insure\",\n\t\"int\",\n\t\"intel\",\n\t\"international\",\n\t\"intuit\",\n\t\"investments\",\n\t\"io\",\n\t\"ipiranga\",\n\t\"iq\",\n\t\"ir\",\n\t\"irish\",\n\t\"is\",\n\t\"iselect\",\n\t\"ismaili\",\n\t\"ist\",\n\t\"istanbul\",\n\t\"it\",\n\t\"itau\",\n\t\"itv\",\n\t\"iveco\",\n\t\"iwc\",\n\t\"jaguar\",\n\t\"java\",\n\t\"jcb\",\n\t\"jcp\",\n\t\"je\",\n\t\"jeep\",\n\t\"jetzt\",\n\t\"jewelry\",\n\t\"jio\",\n\t\"jlc\",\n\t\"jll\",\n\t\"jm\",\n\t\"jmp\",\n\t\"jnj\",\n\t\"jo\",\n\t\"jobs\",\n\t\"joburg\",\n\t\"jot\",\n\t\"joy\",\n\t\"jp\",\n\t\"jpmorgan\",\n\t\"jprs\",\n\t\"juegos\",\n\t\"juniper\",\n\t\"kaufen\",\n\t\"kddi\",\n\t\"ke\",\n\t\"kerryhotels\",\n\t\"kerrylogistics\",\n\t\"kerryproperties\",\n\t\"kfh\",\n\t\"kg\",\n\t\"kh\",\n\t\"ki\",\n\t\"kia\",\n\t\"kim\",\n\t\"kinder\",\n\t\"kindle\",\n\t\"kitchen\",\n\t\"kiwi\",\n\t\"km\",\n\t\"kn\",\n\t\"koeln\",\n\t\"komatsu\",\n\t\"kosher\",\n\t\"kp\",\n\t\"kpmg\",\n\t\"kpn\",\n\t\"kr\",\n\t\"krd\",\n\t\"kred\",\n\t\"kuokgroup\",\n\t\"kw\",\n\t\"ky\",\n\t\"kyoto\",\n\t\"kz\",\n\t\"la\",\n\t\"lacaixa\",\n\t\"ladbrokes\",\n\t\"lamborghini\",\n\t\"lamer\",\n\t\"lancaster\",\n\t\"lancia\",\n\t\"lancome\",\n\t\"land\",\n\t\"landrover\",\n\t\"lanxess\",\n\t\"lasalle\",\n\t\"lat\",\n\t\"latino\",\n\t\"latrobe\",\n\t\"law\",\n\t\"lawyer\",\n\t\"lb\",\n\t\"lc\",\n\t\"lds\",\n\t\"lease\",\n\t\"leclerc\",\n\t\"lefrak\",\n\t\"legal\",\n\t\"lego\",\n\t\"lexus\",\n\t\"lgbt\",\n\t\"li\",\n\t\"liaison\",\n\t\"lidl\",\n\t\"life\",\n\t\"lifeinsurance\",\n\t\"lifestyle\",\n\t\"lighting\",\n\t\"like\",\n\t\"lilly\",\n\t\"limited\",\n\t\"limo\",\n\t\"lincoln\",\n\t\"linde\",\n\t\"link\",\n\t\"lipsy\",\n\t\"live\",\n\t\"living\",\n\t\"lixil\",\n\t\"lk\",\n\t\"loan\",\n\t\"loans\",\n\t\"locker\",\n\t\"locus\",\n\t\"loft\",\n\t\"lol\",\n\t\"london\",\n\t\"lotte\",\n\t\"lotto\",\n\t\"love\",\n\t\"lpl\",\n\t\"lplfinancial\",\n\t\"lr\",\n\t\"ls\",\n\t\"lt\",\n\t\"ltd\",\n\t\"ltda\",\n\t\"lu\",\n\t\"lundbeck\",\n\t\"lupin\",\n\t\"luxe\",\n\t\"luxury\",\n\t\"lv\",\n\t\"ly\",\n\t\"ma\",\n\t\"macys\",\n\t\"madrid\",\n\t\"maif\",\n\t\"maison\",\n\t\"makeup\",\n\t\"man\",\n\t\"management\",\n\t\"mango\",\n\t\"map\",\n\t\"market\",\n\t\"marketing\",\n\t\"markets\",\n\t\"marriott\",\n\t\"marshalls\",\n\t\"maserati\",\n\t\"mattel\",\n\t\"mba\",\n\t\"mc\",\n\t\"mcd\",\n\t\"mcdonalds\",\n\t\"mckinsey\",\n\t\"md\",\n\t\"me\",\n\t\"med\",\n\t\"media\",\n\t\"meet\",\n\t\"melbourne\",\n\t\"meme\",\n\t\"memorial\",\n\t\"men\",\n\t\"menu\",\n\t\"meo\",\n\t\"merckmsd\",\n\t\"metlife\",\n\t\"mg\",\n\t\"mh\",\n\t\"miami\",\n\t\"microsoft\",\n\t\"mil\",\n\t\"mini\",\n\t\"mint\",\n\t\"mit\",\n\t\"mitsubishi\",\n\t\"mk\",\n\t\"ml\",\n\t\"mlb\",\n\t\"mls\",\n\t\"mm\",\n\t\"mma\",\n\t\"mn\",\n\t\"mo\",\n\t\"mobi\",\n\t\"mobile\",\n\t\"mobily\",\n\t\"moda\",\n\t\"moe\",\n\t\"moi\",\n\t\"mom\",\n\t\"monash\",\n\t\"money\",\n\t\"monster\",\n\t\"montblanc\",\n\t\"mopar\",\n\t\"mormon\",\n\t\"mortgage\",\n\t\"moscow\",\n\t\"moto\",\n\t\"motorcycles\",\n\t\"mov\",\n\t\"movie\",\n\t\"movistar\",\n\t\"mp\",\n\t\"mq\",\n\t\"mr\",\n\t\"ms\",\n\t\"msd\",\n\t\"mt\",\n\t\"mtn\",\n\t\"mtpc\",\n\t\"mtr\",\n\t\"mu\",\n\t\"museum\",\n\t\"mutual\",\n\t\"mv\",\n\t\"mw\",\n\t\"mx\",\n\t\"my\",\n\t\"mz\",\n\t\"na\",\n\t\"nab\",\n\t\"nadex\",\n\t\"nagoya\",\n\t\"name\",\n\t\"nationwide\",\n\t\"natura\",\n\t\"navy\",\n\t\"nba\",\n\t\"nc\",\n\t\"ne\",\n\t\"nec\",\n\t\"net\",\n\t\"netbank\",\n\t\"netflix\",\n\t\"network\",\n\t\"neustar\",\n\t\"new\",\n\t\"newholland\",\n\t\"news\",\n\t\"next\",\n\t\"nextdirect\",\n\t\"nexus\",\n\t\"nf\",\n\t\"nfl\",\n\t\"ng\",\n\t\"ngo\",\n\t\"nhk\",\n\t\"ni\",\n\t\"nico\",\n\t\"nike\",\n\t\"nikon\",\n\t\"ninja\",\n\t\"nissan\",\n\t\"nissay\",\n\t\"nl\",\n\t\"no\",\n\t\"nokia\",\n\t\"northwesternmutual\",\n\t\"norton\",\n\t\"now\",\n\t\"nowruz\",\n\t\"nowtv\",\n\t\"np\",\n\t\"nr\",\n\t\"nra\",\n\t\"nrw\",\n\t\"ntt\",\n\t\"nu\",\n\t\"nyc\",\n\t\"nz\",\n\t\"obi\",\n\t\"observer\",\n\t\"off\",\n\t\"office\",\n\t\"okinawa\",\n\t\"olayan\",\n\t\"olayangroup\",\n\t\"oldnavy\",\n\t\"ollo\",\n\t\"om\",\n\t\"omega\",\n\t\"one\",\n\t\"ong\",\n\t\"onion\",\n\t\"onl\",\n\t\"online\",\n\t\"onyourside\",\n\t\"ooo\",\n\t\"open\",\n\t\"oracle\",\n\t\"orange\",\n\t\"org\",\n\t\"organic\",\n\t\"origins\",\n\t\"osaka\",\n\t\"otsuka\",\n\t\"ott\",\n\t\"ovh\",\n\t\"pa\",\n\t\"page\",\n\t\"pamperedchef\",\n\t\"panasonic\",\n\t\"panerai\",\n\t\"paris\",\n\t\"pars\",\n\t\"partners\",\n\t\"parts\",\n\t\"party\",\n\t\"passagens\",\n\t\"pay\",\n\t\"pccw\",\n\t\"pe\",\n\t\"pet\",\n\t\"pf\",\n\t\"pfizer\",\n\t\"pg\",\n\t\"ph\",\n\t\"pharmacy\",\n\t\"phd\",\n\t\"philips\",\n\t\"phone\",\n\t\"photo\",\n\t\"photography\",\n\t\"photos\",\n\t\"physio\",\n\t\"piaget\",\n\t\"pics\",\n\t\"pictet\",\n\t\"pictures\",\n\t\"pid\",\n\t\"pin\",\n\t\"ping\",\n\t\"pink\",\n\t\"pioneer\",\n\t\"pizza\",\n\t\"pk\",\n\t\"pl\",\n\t\"place\",\n\t\"play\",\n\t\"playstation\",\n\t\"plumbing\",\n\t\"plus\",\n\t\"pm\",\n\t\"pn\",\n\t\"pnc\",\n\t\"pohl\",\n\t\"poker\",\n\t\"politie\",\n\t\"porn\",\n\t\"post\",\n\t\"pr\",\n\t\"pramerica\",\n\t\"praxi\",\n\t\"press\",\n\t\"prime\",\n\t\"pro\",\n\t\"prod\",\n\t\"productions\",\n\t\"prof\",\n\t\"progressive\",\n\t\"promo\",\n\t\"properties\",\n\t\"property\",\n\t\"protection\",\n\t\"pru\",\n\t\"prudential\",\n\t\"ps\",\n\t\"pt\",\n\t\"pub\",\n\t\"pw\",\n\t\"pwc\",\n\t\"py\",\n\t\"qa\",\n\t\"qpon\",\n\t\"quebec\",\n\t\"quest\",\n\t\"qvc\",\n\t\"racing\",\n\t\"radio\",\n\t\"raid\",\n\t\"re\",\n\t\"read\",\n\t\"realestate\",\n\t\"realtor\",\n\t\"realty\",\n\t\"recipes\",\n\t\"red\",\n\t\"redstone\",\n\t\"redumbrella\",\n\t\"rehab\",\n\t\"reise\",\n\t\"reisen\",\n\t\"reit\",\n\t\"reliance\",\n\t\"ren\",\n\t\"rent\",\n\t\"rentals\",\n\t\"repair\",\n\t\"report\",\n\t\"republican\",\n\t\"rest\",\n\t\"restaurant\",\n\t\"review\",\n\t\"reviews\",\n\t\"rexroth\",\n\t\"rich\",\n\t\"richardli\",\n\t\"ricoh\",\n\t\"rightathome\",\n\t\"ril\",\n\t\"rio\",\n\t\"rip\",\n\t\"rmit\",\n\t\"ro\",\n\t\"rocher\",\n\t\"rocks\",\n\t\"rodeo\",\n\t\"rogers\",\n\t\"room\",\n\t\"rs\",\n\t\"rsvp\",\n\t\"ru\",\n\t\"rugby\",\n\t\"ruhr\",\n\t\"run\",\n\t\"rw\",\n\t\"rwe\",\n\t\"ryukyu\",\n\t\"sa\",\n\t\"saarland\",\n\t\"safe\",\n\t\"safety\",\n\t\"sakura\",\n\t\"sale\",\n\t\"salon\",\n\t\"samsclub\",\n\t\"samsung\",\n\t\"sandvik\",\n\t\"sandvikcoromant\",\n\t\"sanofi\",\n\t\"sap\",\n\t\"sapo\",\n\t\"sarl\",\n\t\"sas\",\n\t\"save\",\n\t\"saxo\",\n\t\"sb\",\n\t\"sbi\",\n\t\"sbs\",\n\t\"sc\",\n\t\"sca\",\n\t\"scb\",\n\t\"schaeffler\",\n\t\"schmidt\",\n\t\"scholarships\",\n\t\"school\",\n\t\"schule\",\n\t\"schwarz\",\n\t\"science\",\n\t\"scjohnson\",\n\t\"scor\",\n\t\"scot\",\n\t\"sd\",\n\t\"se\",\n\t\"search\",\n\t\"seat\",\n\t\"secure\",\n\t\"security\",\n\t\"seek\",\n\t\"select\",\n\t\"sener\",\n\t\"services\",\n\t\"ses\",\n\t\"seven\",\n\t\"sew\",\n\t\"sex\",\n\t\"sexy\",\n\t\"sfr\",\n\t\"sg\",\n\t\"sh\",\n\t\"shangrila\",\n\t\"sharp\",\n\t\"shaw\",\n\t\"shell\",\n\t\"shia\",\n\t\"shiksha\",\n\t\"shoes\",\n\t\"shop\",\n\t\"shopping\",\n\t\"shouji\",\n\t\"show\",\n\t\"showtime\",\n\t\"shriram\",\n\t\"si\",\n\t\"silk\",\n\t\"sina\",\n\t\"singles\",\n\t\"site\",\n\t\"sj\",\n\t\"sk\",\n\t\"ski\",\n\t\"skin\",\n\t\"sky\",\n\t\"skype\",\n\t\"sl\",\n\t\"sling\",\n\t\"sm\",\n\t\"smart\",\n\t\"smile\",\n\t\"sn\",\n\t\"sncf\",\n\t\"so\",\n\t\"soccer\",\n\t\"social\",\n\t\"softbank\",\n\t\"software\",\n\t\"sohu\",\n\t\"solar\",\n\t\"solutions\",\n\t\"song\",\n\t\"sony\",\n\t\"soy\",\n\t\"space\",\n\t\"spiegel\",\n\t\"spot\",\n\t\"spreadbetting\",\n\t\"sr\",\n\t\"srl\",\n\t\"srt\",\n\t\"st\",\n\t\"stada\",\n\t\"staples\",\n\t\"star\",\n\t\"starhub\",\n\t\"statebank\",\n\t\"statefarm\",\n\t\"statoil\",\n\t\"stc\",\n\t\"stcgroup\",\n\t\"stockholm\",\n\t\"storage\",\n\t\"store\",\n\t\"stream\",\n\t\"studio\",\n\t\"study\",\n\t\"style\",\n\t\"su\",\n\t\"sucks\",\n\t\"supplies\",\n\t\"supply\",\n\t\"support\",\n\t\"surf\",\n\t\"surgery\",\n\t\"suzuki\",\n\t\"sv\",\n\t\"swatch\",\n\t\"swiftcover\",\n\t\"swiss\",\n\t\"sx\",\n\t\"sy\",\n\t\"sydney\",\n\t\"symantec\",\n\t\"systems\",\n\t\"sz\",\n\t\"tab\",\n\t\"taipei\",\n\t\"talk\",\n\t\"taobao\",\n\t\"target\",\n\t\"tatamotors\",\n\t\"tatar\",\n\t\"tattoo\",\n\t\"tax\",\n\t\"taxi\",\n\t\"tc\",\n\t\"tci\",\n\t\"td\",\n\t\"tdk\",\n\t\"team\",\n\t\"tech\",\n\t\"technology\",\n\t\"tel\",\n\t\"telecity\",\n\t\"telefonica\",\n\t\"temasek\",\n\t\"tennis\",\n\t\"teva\",\n\t\"tf\",\n\t\"tg\",\n\t\"th\",\n\t\"thd\",\n\t\"theater\",\n\t\"theatre\",\n\t\"tiaa\",\n\t\"tickets\",\n\t\"tienda\",\n\t\"tiffany\",\n\t\"tips\",\n\t\"tires\",\n\t\"tirol\",\n\t\"tj\",\n\t\"tjmaxx\",\n\t\"tjx\",\n\t\"tk\",\n\t\"tkmaxx\",\n\t\"tl\",\n\t\"tm\",\n\t\"tmall\",\n\t\"tn\",\n\t\"to\",\n\t\"today\",\n\t\"tokyo\",\n\t\"tools\",\n\t\"top\",\n\t\"toray\",\n\t\"toshiba\",\n\t\"total\",\n\t\"tours\",\n\t\"town\",\n\t\"toyota\",\n\t\"toys\",\n\t\"tr\",\n\t\"trade\",\n\t\"trading\",\n\t\"training\",\n\t\"travel\",\n\t\"travelchannel\",\n\t\"travelers\",\n\t\"travelersinsurance\",\n\t\"trust\",\n\t\"trv\",\n\t\"tt\",\n\t\"tube\",\n\t\"tui\",\n\t\"tunes\",\n\t\"tushu\",\n\t\"tv\",\n\t\"tvs\",\n\t\"tw\",\n\t\"tz\",\n\t\"ua\",\n\t\"ubank\",\n\t\"ubs\",\n\t\"uconnect\",\n\t\"ug\",\n\t\"uk\",\n\t\"unicom\",\n\t\"university\",\n\t\"uno\",\n\t\"uol\",\n\t\"ups\",\n\t\"us\",\n\t\"uy\",\n\t\"uz\",\n\t\"va\",\n\t\"vacations\",\n\t\"vana\",\n\t\"vanguard\",\n\t\"vc\",\n\t\"ve\",\n\t\"vegas\",\n\t\"ventures\",\n\t\"verisign\",\n\t\"versicherung\",\n\t\"vet\",\n\t\"vg\",\n\t\"vi\",\n\t\"viajes\",\n\t\"video\",\n\t\"vig\",\n\t\"viking\",\n\t\"villas\",\n\t\"vin\",\n\t\"vip\",\n\t\"virgin\",\n\t\"visa\",\n\t\"vision\",\n\t\"vista\",\n\t\"vistaprint\",\n\t\"viva\",\n\t\"vivo\",\n\t\"vlaanderen\",\n\t\"vn\",\n\t\"vodka\",\n\t\"volkswagen\",\n\t\"volvo\",\n\t\"vote\",\n\t\"voting\",\n\t\"voto\",\n\t\"voyage\",\n\t\"vu\",\n\t\"vuelos\",\n\t\"wales\",\n\t\"walmart\",\n\t\"walter\",\n\t\"wang\",\n\t\"wanggou\",\n\t\"warman\",\n\t\"watch\",\n\t\"watches\",\n\t\"weather\",\n\t\"weatherchannel\",\n\t\"webcam\",\n\t\"weber\",\n\t\"website\",\n\t\"wed\",\n\t\"wedding\",\n\t\"weibo\",\n\t\"weir\",\n\t\"wf\",\n\t\"whoswho\",\n\t\"wien\",\n\t\"wiki\",\n\t\"williamhill\",\n\t\"win\",\n\t\"windows\",\n\t\"wine\",\n\t\"winners\",\n\t\"wme\",\n\t\"wolterskluwer\",\n\t\"woodside\",\n\t\"work\",\n\t\"works\",\n\t\"world\",\n\t\"wow\",\n\t\"ws\",\n\t\"wtc\",\n\t\"wtf\",\n\t\"xbox\",\n\t\"xerox\",\n\t\"xfinity\",\n\t\"xihuan\",\n\t\"xin\",\n\t\"xn--11b4c3d\",\n\t\"xn--1ck2e1b\",\n\t\"xn--1qqw23a\",\n\t\"xn--2scrj9c\",\n\t\"xn--30rr7y\",\n\t\"xn--3bst00m\",\n\t\"xn--3ds443g\",\n\t\"xn--3e0b707e\",\n\t\"xn--3hcrj9c\",\n\t\"xn--3oq18vl8pn36a\",\n\t\"xn--3pxu8k\",\n\t\"xn--42c2d9a\",\n\t\"xn--45br5cyl\",\n\t\"xn--45brj9c\",\n\t\"xn--45q11c\",\n\t\"xn--4gbrim\",\n\t\"xn--54b7fta0cc\",\n\t\"xn--55qw42g\",\n\t\"xn--55qx5d\",\n\t\"xn--5su34j936bgsg\",\n\t\"xn--5tzm5g\",\n\t\"xn--6frz82g\",\n\t\"xn--6qq986b3xl\",\n\t\"xn--80adxhks\",\n\t\"xn--80ao21a\",\n\t\"xn--80aqecdr1a\",\n\t\"xn--80asehdb\",\n\t\"xn--80aswg\",\n\t\"xn--8y0a063a\",\n\t\"xn--90a3ac\",\n\t\"xn--90ae\",\n\t\"xn--90ais\",\n\t\"xn--9dbq2a\",\n\t\"xn--9et52u\",\n\t\"xn--9krt00a\",\n\t\"xn--b4w605ferd\",\n\t\"xn--bck1b9a5dre4c\",\n\t\"xn--c1avg\",\n\t\"xn--c2br7g\",\n\t\"xn--cck2b3b\",\n\t\"xn--cg4bki\",\n\t\"xn--clchc0ea0b2g2a9gcd\",\n\t\"xn--czr694b\",\n\t\"xn--czrs0t\",\n\t\"xn--czru2d\",\n\t\"xn--d1acj3b\",\n\t\"xn--d1alf\",\n\t\"xn--e1a4c\",\n\t\"xn--eckvdtc9d\",\n\t\"xn--efvy88h\",\n\t\"xn--estv75g\",\n\t\"xn--fct429k\",\n\t\"xn--fhbei\",\n\t\"xn--fiq228c5hs\",\n\t\"xn--fiq64b\",\n\t\"xn--fiqs8s\",\n\t\"xn--fiqz9s\",\n\t\"xn--fjq720a\",\n\t\"xn--flw351e\",\n\t\"xn--fpcrj9c3d\",\n\t\"xn--fzc2c9e2c\",\n\t\"xn--fzys8d69uvgm\",\n\t\"xn--g2xx48c\",\n\t\"xn--gckr3f0f\",\n\t\"xn--gecrj9c\",\n\t\"xn--gk3at1e\",\n\t\"xn--h2breg3eve\",\n\t\"xn--h2brj9c\",\n\t\"xn--h2brj9c8c\",\n\t\"xn--hxt814e\",\n\t\"xn--i1b6b1a6a2e\",\n\t\"xn--imr513n\",\n\t\"xn--io0a7i\",\n\t\"xn--j1aef\",\n\t\"xn--j1amh\",\n\t\"xn--j6w193g\",\n\t\"xn--jlq61u9w7b\",\n\t\"xn--jvr189m\",\n\t\"xn--kcrx77d1x4a\",\n\t\"xn--kprw13d\",\n\t\"xn--kpry57d\",\n\t\"xn--kpu716f\",\n\t\"xn--kput3i\",\n\t\"xn--l1acc\",\n\t\"xn--lgbbat1ad8j\",\n\t\"xn--mgb2ddes\",\n\t\"xn--mgb9awbf\",\n\t\"xn--mgba3a3ejt\",\n\t\"xn--mgba3a4f16a\",\n\t\"xn--mgba3a4fra\",\n\t\"xn--mgba7c0bbn0a\",\n\t\"xn--mgbaakc7dvf\",\n\t\"xn--mgbaam7a8h\",\n\t\"xn--mgbab2bd\",\n\t\"xn--mgbai9a5eva00b\",\n\t\"xn--mgbai9azgqp6j\",\n\t\"xn--mgbayh7gpa\",\n\t\"xn--mgbb9fbpob\",\n\t\"xn--mgbbh1a71e\",\n\t\"xn--mgbc0a9azcg\",\n\t\"xn--mgbca7dzdo\",\n\t\"xn--mgberp4a5d4a87g\",\n\t\"xn--mgberp4a5d4ar\",\n\t\"xn--mgbgu82a\",\n\t\"xn--mgbi4ecexp\",\n\t\"xn--mgbpl2fh\",\n\t\"xn--mgbqly7c0a67fbc\",\n\t\"xn--mgbqly7cvafr\",\n\t\"xn--mgbt3dhd\",\n\t\"xn--mgbtf8fl\",\n\t\"xn--mgbtx2b\",\n\t\"xn--mgbx4cd0ab\",\n\t\"xn--mix082f\",\n\t\"xn--mix891f\",\n\t\"xn--mk1bu44c\",\n\t\"xn--mxtq1m\",\n\t\"xn--ngbc5azd\",\n\t\"xn--ngbe9e0a\",\n\t\"xn--ngbrx\",\n\t\"xn--nnx388a\",\n\t\"xn--node\",\n\t\"xn--nqv7f\",\n\t\"xn--nqv7fs00ema\",\n\t\"xn--nyqy26a\",\n\t\"xn--o3cw4h\",\n\t\"xn--ogbpf8fl\",\n\t\"xn--p1acf\",\n\t\"xn--p1ai\",\n\t\"xn--pbt977c\",\n\t\"xn--pgbs0dh\",\n\t\"xn--pssy2u\",\n\t\"xn--q9jyb4c\",\n\t\"xn--qcka1pmc\",\n\t\"xn--qxam\",\n\t\"xn--rhqv96g\",\n\t\"xn--rovu88b\",\n\t\"xn--rvc1e0am3e\",\n\t\"xn--s9brj9c\",\n\t\"xn--ses554g\",\n\t\"xn--t60b56a\",\n\t\"xn--tckwe\",\n\t\"xn--tiq49xqyj\",\n\t\"xn--unup4y\",\n\t\"xn--vermgensberater-ctb\",\n\t\"xn--vermgensberatung-pwb\",\n\t\"xn--vhquv\",\n\t\"xn--vuq861b\",\n\t\"xn--w4r85el8fhu5dnra\",\n\t\"xn--w4rs40l\",\n\t\"xn--wgbh1c\",\n\t\"xn--wgbl6a\",\n\t\"xn--xhq521b\",\n\t\"xn--xkc2al3hye2a\",\n\t\"xn--xkc2dl3a5ee0h\",\n\t\"xn--y9a3aq\",\n\t\"xn--yfro4i67o\",\n\t\"xn--ygbi2ammx\",\n\t\"xn--zfr164b\",\n\t\"xperia\",\n\t\"xxx\",\n\t\"xyz\",\n\t\"yachts\",\n\t\"yahoo\",\n\t\"yamaxun\",\n\t\"yandex\",\n\t\"ye\",\n\t\"yodobashi\",\n\t\"yoga\",\n\t\"yokohama\",\n\t\"you\",\n\t\"youtube\",\n\t\"yt\",\n\t\"yun\",\n\t\"za\",\n\t\"zappos\",\n\t\"zara\",\n\t\"zero\",\n\t\"zip\",\n\t\"zippo\",\n\t\"zm\",\n\t\"zone\",\n\t\"zuerich\",\n\t\"zw\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"mil\",\n\t\"net\",\n\t\"org\",\n\t\"nom\",\n\t\"ac\",\n\t\"blogspot\",\n\t\"co\",\n\t\"gov\",\n\t\"mil\",\n\t\"net\",\n\t\"nom\",\n\t\"org\",\n\t\"sch\",\n\t\"accident-investigation\",\n\t\"accident-prevention\",\n\t\"aerobatic\",\n\t\"aeroclub\",\n\t\"aerodrome\",\n\t\"agents\",\n\t\"air-surveillance\",\n\t\"air-traffic-control\",\n\t\"aircraft\",\n\t\"airline\",\n\t\"airport\",\n\t\"airtraffic\",\n\t\"ambulance\",\n\t\"amusement\",\n\t\"association\",\n\t\"author\",\n\t\"ballooning\",\n\t\"broker\",\n\t\"caa\",\n\t\"cargo\",\n\t\"catering\",\n\t\"certification\",\n\t\"championship\",\n\t\"charter\",\n\t\"civilaviation\",\n\t\"club\",\n\t\"conference\",\n\t\"consultant\",\n\t\"consulting\",\n\t\"control\",\n\t\"council\",\n\t\"crew\",\n\t\"design\",\n\t\"dgca\",\n\t\"educator\",\n\t\"emergency\",\n\t\"engine\",\n\t\"engineer\",\n\t\"entertainment\",\n\t\"equipment\",\n\t\"exchange\",\n\t\"express\",\n\t\"federation\",\n\t\"flight\",\n\t\"freight\",\n\t\"fuel\",\n\t\"gliding\",\n\t\"government\",\n\t\"groundhandling\",\n\t\"group\",\n\t\"hanggliding\",\n\t\"homebuilt\",\n\t\"insurance\",\n\t\"journal\",\n\t\"journalist\",\n\t\"leasing\",\n\t\"logistics\",\n\t\"magazine\",\n\t\"maintenance\",\n\t\"media\",\n\t\"microlight\",\n\t\"modelling\",\n\t\"navigation\",\n\t\"parachuting\",\n\t\"paragliding\",\n\t\"passenger-association\",\n\t\"pilot\",\n\t\"press\",\n\t\"production\",\n\t\"recreation\",\n\t\"repbody\",\n\t\"res\",\n\t\"research\",\n\t\"rotorcraft\",\n\t\"safety\",\n\t\"scientist\",\n\t\"services\",\n\t\"show\",\n\t\"skydiving\",\n\t\"software\",\n\t\"student\",\n\t\"trader\",\n\t\"trading\",\n\t\"trainer\",\n\t\"union\",\n\t\"workinggroup\",\n\t\"works\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"co\",\n\t\"com\",\n\t\"net\",\n\t\"nom\",\n\t\"org\",\n\t\"com\",\n\t\"net\",\n\t\"nom\",\n\t\"off\",\n\t\"org\",\n\t\"blogspot\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"mil\",\n\t\"net\",\n\t\"nom\",\n\t\"org\",\n\t\"blogspot\",\n\t\"co\",\n\t\"ed\",\n\t\"gv\",\n\t\"it\",\n\t\"og\",\n\t\"pb\",\n\t\"com\",\n\t\"edu\",\n\t\"gob\",\n\t\"gov\",\n\t\"int\",\n\t\"mil\",\n\t\"musica\",\n\t\"net\",\n\t\"org\",\n\t\"tur\",\n\t\"blogspot\",\n\t\"e164\",\n\t\"in-addr\",\n\t\"ip6\",\n\t\"iris\",\n\t\"uri\",\n\t\"urn\",\n\t\"gov\",\n\t\"cloudns\",\n\t\"12hp\",\n\t\"2ix\",\n\t\"4lima\",\n\t\"ac\",\n\t\"biz\",\n\t\"co\",\n\t\"futurecms\",\n\t\"futurehosting\",\n\t\"futuremailing\",\n\t\"gv\",\n\t\"info\",\n\t\"lima-city\",\n\t\"or\",\n\t\"ortsinfo\",\n\t\"priv\",\n\t\"blogspot\",\n\t\"ex\",\n\t\"kunden\",\n\t\"act\",\n\t\"asn\",\n\t\"com\",\n\t\"conf\",\n\t\"edu\",\n\t\"gov\",\n\t\"id\",\n\t\"info\",\n\t\"net\",\n\t\"nsw\",\n\t\"nt\",\n\t\"org\",\n\t\"oz\",\n\t\"qld\",\n\t\"sa\",\n\t\"tas\",\n\t\"vic\",\n\t\"wa\",\n\t\"blogspot\",\n\t\"act\",\n\t\"nsw\",\n\t\"nt\",\n\t\"qld\",\n\t\"sa\",\n\t\"tas\",\n\t\"vic\",\n\t\"wa\",\n\t\"qld\",\n\t\"sa\",\n\t\"tas\",\n\t\"vic\",\n\t\"wa\",\n\t\"com\",\n\t\"biz\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"info\",\n\t\"int\",\n\t\"mil\",\n\t\"name\",\n\t\"net\",\n\t\"org\",\n\t\"pp\",\n\t\"pro\",\n\t\"blogspot\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"mil\",\n\t\"net\",\n\t\"org\",\n\t\"biz\",\n\t\"co\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"info\",\n\t\"net\",\n\t\"org\",\n\t\"store\",\n\t\"tv\",\n\t\"ac\",\n\t\"blogspot\",\n\t\"transurl\",\n\t\"gov\",\n\t\"0\",\n\t\"1\",\n\t\"2\",\n\t\"3\",\n\t\"4\",\n\t\"5\",\n\t\"6\",\n\t\"7\",\n\t\"8\",\n\t\"9\",\n\t\"a\",\n\t\"b\",\n\t\"barsy\",\n\t\"blogspot\",\n\t\"c\",\n\t\"d\",\n\t\"e\",\n\t\"f\",\n\t\"g\",\n\t\"h\",\n\t\"i\",\n\t\"j\",\n\t\"k\",\n\t\"l\",\n\t\"m\",\n\t\"n\",\n\t\"o\",\n\t\"p\",\n\t\"q\",\n\t\"r\",\n\t\"s\",\n\t\"t\",\n\t\"u\",\n\t\"v\",\n\t\"w\",\n\t\"x\",\n\t\"y\",\n\t\"z\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"co\",\n\t\"com\",\n\t\"edu\",\n\t\"or\",\n\t\"org\",\n\t\"cloudns\",\n\t\"dscloud\",\n\t\"dyndns\",\n\t\"for-better\",\n\t\"for-more\",\n\t\"for-some\",\n\t\"for-the\",\n\t\"mmafan\",\n\t\"myftp\",\n\t\"no-ip\",\n\t\"selfip\",\n\t\"webhop\",\n\t\"asso\",\n\t\"barreau\",\n\t\"blogspot\",\n\t\"gouv\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"com\",\n\t\"edu\",\n\t\"gob\",\n\t\"gov\",\n\t\"int\",\n\t\"mil\",\n\t\"net\",\n\t\"org\",\n\t\"tv\",\n\t\"adm\",\n\t\"adv\",\n\t\"agr\",\n\t\"am\",\n\t\"arq\",\n\t\"art\",\n\t\"ato\",\n\t\"b\",\n\t\"belem\",\n\t\"bio\",\n\t\"blog\",\n\t\"bmd\",\n\t\"cim\",\n\t\"cng\",\n\t\"cnt\",\n\t\"com\",\n\t\"coop\",\n\t\"cri\",\n\t\"def\",\n\t\"ecn\",\n\t\"eco\",\n\t\"edu\",\n\t\"emp\",\n\t\"eng\",\n\t\"esp\",\n\t\"etc\",\n\t\"eti\",\n\t\"far\",\n\t\"flog\",\n\t\"floripa\",\n\t\"fm\",\n\t\"fnd\",\n\t\"fot\",\n\t\"fst\",\n\t\"g12\",\n\t\"ggf\",\n\t\"gov\",\n\t\"imb\",\n\t\"ind\",\n\t\"inf\",\n\t\"jampa\",\n\t\"jor\",\n\t\"jus\",\n\t\"leg\",\n\t\"lel\",\n\t\"mat\",\n\t\"med\",\n\t\"mil\",\n\t\"mp\",\n\t\"mus\",\n\t\"net\",\n\t\"nom\",\n\t\"not\",\n\t\"ntr\",\n\t\"odo\",\n\t\"org\",\n\t\"poa\",\n\t\"ppg\",\n\t\"pro\",\n\t\"psc\",\n\t\"psi\",\n\t\"qsl\",\n\t\"radio\",\n\t\"rec\",\n\t\"recife\",\n\t\"slg\",\n\t\"srv\",\n\t\"taxi\",\n\t\"teo\",\n\t\"tmp\",\n\t\"trd\",\n\t\"tur\",\n\t\"tv\",\n\t\"vet\",\n\t\"vix\",\n\t\"vlog\",\n\t\"wiki\",\n\t\"zlg\",\n\t\"blogspot\",\n\t\"ac\",\n\t\"al\",\n\t\"am\",\n\t\"ap\",\n\t\"ba\",\n\t\"ce\",\n\t\"df\",\n\t\"es\",\n\t\"go\",\n\t\"ma\",\n\t\"mg\",\n\t\"ms\",\n\t\"mt\",\n\t\"pa\",\n\t\"pb\",\n\t\"pe\",\n\t\"pi\",\n\t\"pr\",\n\t\"rj\",\n\t\"rn\",\n\t\"ro\",\n\t\"rr\",\n\t\"rs\",\n\t\"sc\",\n\t\"se\",\n\t\"sp\",\n\t\"to\",\n\t\"ac\",\n\t\"al\",\n\t\"am\",\n\t\"ap\",\n\t\"ba\",\n\t\"ce\",\n\t\"df\",\n\t\"es\",\n\t\"go\",\n\t\"ma\",\n\t\"mg\",\n\t\"ms\",\n\t\"mt\",\n\t\"pa\",\n\t\"pb\",\n\t\"pe\",\n\t\"pi\",\n\t\"pr\",\n\t\"rj\",\n\t\"rn\",\n\t\"ro\",\n\t\"rr\",\n\t\"rs\",\n\t\"sc\",\n\t\"se\",\n\t\"sp\",\n\t\"to\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"we\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"co\",\n\t\"org\",\n\t\"com\",\n\t\"gov\",\n\t\"mil\",\n\t\"nym\",\n\t\"of\",\n\t\"blogspot\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"nym\",\n\t\"org\",\n\t\"za\",\n\t\"ab\",\n\t\"awdev\",\n\t\"bc\",\n\t\"blogspot\",\n\t\"co\",\n\t\"gc\",\n\t\"mb\",\n\t\"nb\",\n\t\"nf\",\n\t\"nl\",\n\t\"no-ip\",\n\t\"ns\",\n\t\"nt\",\n\t\"nu\",\n\t\"on\",\n\t\"pe\",\n\t\"qc\",\n\t\"sk\",\n\t\"yk\",\n\t\"cloudns\",\n\t\"fantasyleague\",\n\t\"ftpaccess\",\n\t\"game-server\",\n\t\"myphotos\",\n\t\"scrapping\",\n\t\"twmail\",\n\t\"gov\",\n\t\"blogspot\",\n\t\"12hp\",\n\t\"2ix\",\n\t\"4lima\",\n\t\"blogspot\",\n\t\"gotdns\",\n\t\"lima-city\",\n\t\"square7\",\n\t\"ac\",\n\t\"asso\",\n\t\"co\",\n\t\"com\",\n\t\"ed\",\n\t\"edu\",\n\t\"go\",\n\t\"gouv\",\n\t\"int\",\n\t\"md\",\n\t\"net\",\n\t\"or\",\n\t\"org\",\n\t\"presse\",\n\t\"xn--aroport-bya\",\n\t\"www\",\n\t\"blogspot\",\n\t\"co\",\n\t\"gob\",\n\t\"gov\",\n\t\"mil\",\n\t\"nom\",\n\t\"magentosite\",\n\t\"myfusion\",\n\t\"sensiosite\",\n\t\"statics\",\n\t\"trafficplex\",\n\t\"vapor\",\n\t\"cloudns\",\n\t\"co\",\n\t\"com\",\n\t\"gov\",\n\t\"net\",\n\t\"ac\",\n\t\"ah\",\n\t\"bj\",\n\t\"com\",\n\t\"cq\",\n\t\"edu\",\n\t\"fj\",\n\t\"gd\",\n\t\"gov\",\n\t\"gs\",\n\t\"gx\",\n\t\"gz\",\n\t\"ha\",\n\t\"hb\",\n\t\"he\",\n\t\"hi\",\n\t\"hk\",\n\t\"hl\",\n\t\"hn\",\n\t\"jl\",\n\t\"js\",\n\t\"jx\",\n\t\"ln\",\n\t\"mil\",\n\t\"mo\",\n\t\"net\",\n\t\"nm\",\n\t\"nx\",\n\t\"org\",\n\t\"qh\",\n\t\"sc\",\n\t\"sd\",\n\t\"sh\",\n\t\"sn\",\n\t\"sx\",\n\t\"tj\",\n\t\"tw\",\n\t\"xj\",\n\t\"xn--55qx5d\",\n\t\"xn--io0a7i\",\n\t\"xn--od0alg\",\n\t\"xz\",\n\t\"yn\",\n\t\"zj\",\n\t\"amazonaws\",\n\t\"cn-north-1\",\n\t\"compute\",\n\t\"eb\",\n\t\"elb\",\n\t\"s3\",\n\t\"cn-north-1\",\n\t\"arts\",\n\t\"com\",\n\t\"edu\",\n\t\"firm\",\n\t\"gov\",\n\t\"info\",\n\t\"int\",\n\t\"mil\",\n\t\"net\",\n\t\"nodum\",\n\t\"nom\",\n\t\"org\",\n\t\"rec\",\n\t\"web\",\n\t\"blogspot\",\n\t\"0emm\",\n\t\"1kapp\",\n\t\"3utilities\",\n\t\"4u\",\n\t\"africa\",\n\t\"alpha-myqnapcloud\",\n\t\"amazonaws\",\n\t\"appchizi\",\n\t\"applinzi\",\n\t\"appspot\",\n\t\"ar\",\n\t\"barsyonline\",\n\t\"betainabox\",\n\t\"bitballoon\",\n\t\"blogdns\",\n\t\"blogspot\",\n\t\"blogsyte\",\n\t\"bloxcms\",\n\t\"bounty-full\",\n\t\"bplaced\",\n\t\"br\",\n\t\"cechire\",\n\t\"ciscofreak\",\n\t\"cloudcontrolapp\",\n\t\"cloudcontrolled\",\n\t\"cn\",\n\t\"co\",\n\t\"codespot\",\n\t\"damnserver\",\n\t\"ddnsfree\",\n\t\"ddnsgeek\",\n\t\"ddnsking\",\n\t\"de\",\n\t\"dev-myqnapcloud\",\n\t\"ditchyourip\",\n\t\"dnsalias\",\n\t\"dnsdojo\",\n\t\"dnsiskinky\",\n\t\"doesntexist\",\n\t\"dontexist\",\n\t\"doomdns\",\n\t\"drayddns\",\n\t\"dreamhosters\",\n\t\"dsmynas\",\n\t\"dyn-o-saur\",\n\t\"dynalias\",\n\t\"dyndns-at-home\",\n\t\"dyndns-at-work\",\n\t\"dyndns-blog\",\n\t\"dyndns-free\",\n\t\"dyndns-home\",\n\t\"dyndns-ip\",\n\t\"dyndns-mail\",\n\t\"dyndns-office\",\n\t\"dyndns-pics\",\n\t\"dyndns-remote\",\n\t\"dyndns-server\",\n\t\"dyndns-web\",\n\t\"dyndns-wiki\",\n\t\"dyndns-work\",\n\t\"dynns\",\n\t\"elasticbeanstalk\",\n\t\"est-a-la-maison\",\n\t\"est-a-la-masion\",\n\t\"est-le-patron\",\n\t\"est-mon-blogueur\",\n\t\"eu\",\n\t\"evennode\",\n\t\"familyds\",\n\t\"fbsbx\",\n\t\"firebaseapp\",\n\t\"firewall-gateway\",\n\t\"flynnhub\",\n\t\"freebox-os\",\n\t\"freeboxos\",\n\t\"from-ak\",\n\t\"from-al\",\n\t\"from-ar\",\n\t\"from-ca\",\n\t\"from-ct\",\n\t\"from-dc\",\n\t\"from-de\",\n\t\"from-fl\",\n\t\"from-ga\",\n\t\"from-hi\",\n\t\"from-ia\",\n\t\"from-id\",\n\t\"from-il\",\n\t\"from-in\",\n\t\"from-ks\",\n\t\"from-ky\",\n\t\"from-ma\",\n\t\"from-md\",\n\t\"from-mi\",\n\t\"from-mn\",\n\t\"from-mo\",\n\t\"from-ms\",\n\t\"from-mt\",\n\t\"from-nc\",\n\t\"from-nd\",\n\t\"from-ne\",\n\t\"from-nh\",\n\t\"from-nj\",\n\t\"from-nm\",\n\t\"from-nv\",\n\t\"from-oh\",\n\t\"from-ok\",\n\t\"from-or\",\n\t\"from-pa\",\n\t\"from-pr\",\n\t\"from-ri\",\n\t\"from-sc\",\n\t\"from-sd\",\n\t\"from-tn\",\n\t\"from-tx\",\n\t\"from-ut\",\n\t\"from-va\",\n\t\"from-vt\",\n\t\"from-wa\",\n\t\"from-wi\",\n\t\"from-wv\",\n\t\"from-wy\",\n\t\"gb\",\n\t\"geekgalaxy\",\n\t\"getmyip\",\n\t\"giize\",\n\t\"githubusercontent\",\n\t\"gleeze\",\n\t\"googleapis\",\n\t\"googlecode\",\n\t\"gotdns\",\n\t\"gotpantheon\",\n\t\"gr\",\n\t\"health-carereform\",\n\t\"herokuapp\",\n\t\"herokussl\",\n\t\"hk\",\n\t\"hobby-site\",\n\t\"homelinux\",\n\t\"homesecuritymac\",\n\t\"homesecuritypc\",\n\t\"homeunix\",\n\t\"hu\",\n\t\"iamallama\",\n\t\"is-a-anarchist\",\n\t\"is-a-blogger\",\n\t\"is-a-bookkeeper\",\n\t\"is-a-bulls-fan\",\n\t\"is-a-caterer\",\n\t\"is-a-chef\",\n\t\"is-a-conservative\",\n\t\"is-a-cpa\",\n\t\"is-a-cubicle-slave\",\n\t\"is-a-democrat\",\n\t\"is-a-designer\",\n\t\"is-a-doctor\",\n\t\"is-a-financialadvisor\",\n\t\"is-a-geek\",\n\t\"is-a-green\",\n\t\"is-a-guru\",\n\t\"is-a-hard-worker\",\n\t\"is-a-hunter\",\n\t\"is-a-landscaper\",\n\t\"is-a-lawyer\",\n\t\"is-a-liberal\",\n\t\"is-a-libertarian\",\n\t\"is-a-llama\",\n\t\"is-a-musician\",\n\t\"is-a-nascarfan\",\n\t\"is-a-nurse\",\n\t\"is-a-painter\",\n\t\"is-a-personaltrainer\",\n\t\"is-a-photographer\",\n\t\"is-a-player\",\n\t\"is-a-republican\",\n\t\"is-a-rockstar\",\n\t\"is-a-socialist\",\n\t\"is-a-student\",\n\t\"is-a-teacher\",\n\t\"is-a-techie\",\n\t\"is-a-therapist\",\n\t\"is-an-accountant\",\n\t\"is-an-actor\",\n\t\"is-an-actress\",\n\t\"is-an-anarchist\",\n\t\"is-an-artist\",\n\t\"is-an-engineer\",\n\t\"is-an-entertainer\",\n\t\"is-certified\",\n\t\"is-gone\",\n\t\"is-into-anime\",\n\t\"is-into-cars\",\n\t\"is-into-cartoons\",\n\t\"is-into-games\",\n\t\"is-leet\",\n\t\"is-not-certified\",\n\t\"is-slick\",\n\t\"is-uberleet\",\n\t\"is-with-theband\",\n\t\"isa-geek\",\n\t\"isa-hockeynut\",\n\t\"issmarterthanyou\",\n\t\"jdevcloud\",\n\t\"joyent\",\n\t\"jpn\",\n\t\"kozow\",\n\t\"kr\",\n\t\"likes-pie\",\n\t\"likescandy\",\n\t\"logoip\",\n\t\"loseyourip\",\n\t\"meteorapp\",\n\t\"mex\",\n\t\"myactivedirectory\",\n\t\"myasustor\",\n\t\"mydrobo\",\n\t\"myqnapcloud\",\n\t\"mysecuritycamera\",\n\t\"myshopblocks\",\n\t\"mytuleap\",\n\t\"myvnc\",\n\t\"neat-url\",\n\t\"net-freaks\",\n\t\"netlify\",\n\t\"nfshost\",\n\t\"no\",\n\t\"on-aptible\",\n\t\"onthewifi\",\n\t\"ooguy\",\n\t\"operaunite\",\n\t\"outsystemscloud\",\n\t\"ownprovider\",\n\t\"pagefrontapp\",\n\t\"pagespeedmobilizer\",\n\t\"pgfog\",\n\t\"pixolino\",\n\t\"point2this\",\n\t\"prgmr\",\n\t\"publishproxy\",\n\t\"qa2\",\n\t\"qc\",\n\t\"quicksytes\",\n\t\"quipelements\",\n\t\"rackmaze\",\n\t\"remotewd\",\n\t\"rhcloud\",\n\t\"ru\",\n\t\"sa\",\n\t\"saves-the-whales\",\n\t\"se\",\n\t\"securitytactics\",\n\t\"selfip\",\n\t\"sells-for-less\",\n\t\"sells-for-u\",\n\t\"servebbs\",\n\t\"servebeer\",\n\t\"servecounterstrike\",\n\t\"serveexchange\",\n\t\"serveftp\",\n\t\"servegame\",\n\t\"servehalflife\",\n\t\"servehttp\",\n\t\"servehumour\",\n\t\"serveirc\",\n\t\"servemp3\",\n\t\"servep2p\",\n\t\"servepics\",\n\t\"servequake\",\n\t\"servesarcasm\",\n\t\"simple-url\",\n\t\"sinaapp\",\n\t\"space-to-rent\",\n\t\"stufftoread\",\n\t\"teaches-yoga\",\n\t\"temp-dns\",\n\t\"theworkpc\",\n\t\"townnews-staging\",\n\t\"uk\",\n\t\"unusualperson\",\n\t\"us\",\n\t\"uy\",\n\t\"vipsinaapp\",\n\t\"withgoogle\",\n\t\"withyoutube\",\n\t\"workisboring\",\n\t\"wpdevcloud\",\n\t\"writesthisblog\",\n\t\"xenapponazure\",\n\t\"yolasite\",\n\t\"za\",\n\t\"ap-northeast-1\",\n\t\"ap-northeast-2\",\n\t\"ap-south-1\",\n\t\"ap-southeast-1\",\n\t\"ap-southeast-2\",\n\t\"ca-central-1\",\n\t\"compute\",\n\t\"compute-1\",\n\t\"elb\",\n\t\"eu-central-1\",\n\t\"eu-west-1\",\n\t\"eu-west-2\",\n\t\"s3\",\n\t\"s3-ap-northeast-1\",\n\t\"s3-ap-northeast-2\",\n\t\"s3-ap-south-1\",\n\t\"s3-ap-southeast-1\",\n\t\"s3-ap-southeast-2\",\n\t\"s3-ca-central-1\",\n\t\"s3-eu-central-1\",\n\t\"s3-eu-west-1\",\n\t\"s3-eu-west-2\",\n\t\"s3-external-1\",\n\t\"s3-fips-us-gov-west-1\",\n\t\"s3-sa-east-1\",\n\t\"s3-us-east-2\",\n\t\"s3-us-gov-west-1\",\n\t\"s3-us-west-1\",\n\t\"s3-us-west-2\",\n\t\"s3-website-ap-northeast-1\",\n\t\"s3-website-ap-southeast-1\",\n\t\"s3-website-ap-southeast-2\",\n\t\"s3-website-eu-west-1\",\n\t\"s3-website-sa-east-1\",\n\t\"s3-website-us-east-1\",\n\t\"s3-website-us-west-1\",\n\t\"s3-website-us-west-2\",\n\t\"sa-east-1\",\n\t\"us-east-1\",\n\t\"us-east-2\",\n\t\"dualstack\",\n\t\"s3\",\n\t\"dualstack\",\n\t\"s3\",\n\t\"s3-website\",\n\t\"s3\",\n\t\"dualstack\",\n\t\"s3\",\n\t\"s3-website\",\n\t\"s3\",\n\t\"dualstack\",\n\t\"s3\",\n\t\"dualstack\",\n\t\"s3\",\n\t\"dualstack\",\n\t\"s3\",\n\t\"s3-website\",\n\t\"s3\",\n\t\"dualstack\",\n\t\"s3\",\n\t\"s3-website\",\n\t\"s3\",\n\t\"dualstack\",\n\t\"s3\",\n\t\"dualstack\",\n\t\"s3\",\n\t\"s3-website\",\n\t\"s3\",\n\t\"dualstack\",\n\t\"s3\",\n\t\"dualstack\",\n\t\"s3\",\n\t\"dualstack\",\n\t\"s3\",\n\t\"s3-website\",\n\t\"s3\",\n\t\"alpha\",\n\t\"beta\",\n\t\"ap-northeast-1\",\n\t\"ap-northeast-2\",\n\t\"ap-south-1\",\n\t\"ap-southeast-1\",\n\t\"ap-southeast-2\",\n\t\"ca-central-1\",\n\t\"eu-central-1\",\n\t\"eu-west-1\",\n\t\"eu-west-2\",\n\t\"sa-east-1\",\n\t\"us-east-1\",\n\t\"us-east-2\",\n\t\"us-gov-west-1\",\n\t\"us-west-1\",\n\t\"us-west-2\",\n\t\"eu-1\",\n\t\"eu-2\",\n\t\"eu-3\",\n\t\"eu-4\",\n\t\"us-1\",\n\t\"us-2\",\n\t\"us-3\",\n\t\"us-4\",\n\t\"apps\",\n\t\"cns\",\n\t\"eu\",\n\t\"xen\",\n\t\"de\",\n\t\"ac\",\n\t\"co\",\n\t\"ed\",\n\t\"fi\",\n\t\"go\",\n\t\"or\",\n\t\"sa\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"inf\",\n\t\"net\",\n\t\"org\",\n\t\"blogspot\",\n\t\"com\",\n\t\"edu\",\n\t\"net\",\n\t\"org\",\n\t\"ath\",\n\t\"gov\",\n\t\"info\",\n\t\"ac\",\n\t\"biz\",\n\t\"com\",\n\t\"ekloges\",\n\t\"gov\",\n\t\"ltd\",\n\t\"name\",\n\t\"net\",\n\t\"org\",\n\t\"parliament\",\n\t\"press\",\n\t\"pro\",\n\t\"tm\",\n\t\"blogspot\",\n\t\"blogspot\",\n\t\"co\",\n\t\"e4\",\n\t\"metacentrum\",\n\t\"realm\",\n\t\"cloud\",\n\t\"custom\",\n\t\"12hp\",\n\t\"2ix\",\n\t\"4lima\",\n\t\"barsy\",\n\t\"blogspot\",\n\t\"bplaced\",\n\t\"com\",\n\t\"cosidns\",\n\t\"dd-dns\",\n\t\"ddnss\",\n\t\"dnshome\",\n\t\"dnsupdater\",\n\t\"dray-dns\",\n\t\"draydns\",\n\t\"dyn-ip24\",\n\t\"dyn-vpn\",\n\t\"dynamisches-dns\",\n\t\"dyndns1\",\n\t\"dynvpn\",\n\t\"firewall-gateway\",\n\t\"fuettertdasnetz\",\n\t\"git-repos\",\n\t\"goip\",\n\t\"home-webserver\",\n\t\"internet-dns\",\n\t\"isteingeek\",\n\t\"istmein\",\n\t\"keymachine\",\n\t\"l-o-g-i-n\",\n\t\"lcube-server\",\n\t\"lebtimnetz\",\n\t\"leitungsen\",\n\t\"lima-city\",\n\t\"logoip\",\n\t\"mein-vigor\",\n\t\"my-gateway\",\n\t\"my-router\",\n\t\"my-vigor\",\n\t\"my-wan\",\n\t\"myhome-server\",\n\t\"spdns\",\n\t\"square7\",\n\t\"svn-repos\",\n\t\"syno-ds\",\n\t\"synology-diskstation\",\n\t\"synology-ds\",\n\t\"taifun-dns\",\n\t\"traeumtgerade\",\n\t\"dyn\",\n\t\"dyn\",\n\t\"dyndns\",\n\t\"dyn\",\n\t\"biz\",\n\t\"blogspot\",\n\t\"co\",\n\t\"firm\",\n\t\"reg\",\n\t\"store\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"art\",\n\t\"com\",\n\t\"edu\",\n\t\"gob\",\n\t\"gov\",\n\t\"mil\",\n\t\"net\",\n\t\"org\",\n\t\"sld\",\n\t\"web\",\n\t\"art\",\n\t\"asso\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"pol\",\n\t\"com\",\n\t\"edu\",\n\t\"fin\",\n\t\"gob\",\n\t\"gov\",\n\t\"info\",\n\t\"k12\",\n\t\"med\",\n\t\"mil\",\n\t\"net\",\n\t\"org\",\n\t\"pro\",\n\t\"aip\",\n\t\"com\",\n\t\"edu\",\n\t\"fie\",\n\t\"gov\",\n\t\"lib\",\n\t\"med\",\n\t\"org\",\n\t\"pri\",\n\t\"riik\",\n\t\"blogspot\",\n\t\"com\",\n\t\"edu\",\n\t\"eun\",\n\t\"gov\",\n\t\"mil\",\n\t\"name\",\n\t\"net\",\n\t\"org\",\n\t\"sci\",\n\t\"blogspot\",\n\t\"com\",\n\t\"edu\",\n\t\"gob\",\n\t\"nom\",\n\t\"org\",\n\t\"blogspot\",\n\t\"compute\",\n\t\"biz\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"info\",\n\t\"name\",\n\t\"net\",\n\t\"org\",\n\t\"barsy\",\n\t\"cloudns\",\n\t\"diskstation\",\n\t\"mycd\",\n\t\"spdns\",\n\t\"transurl\",\n\t\"wellbeingzone\",\n\t\"party\",\n\t\"user\",\n\t\"ybo\",\n\t\"storj\",\n\t\"aland\",\n\t\"blogspot\",\n\t\"dy\",\n\t\"iki\",\n\t\"ptplus\",\n\t\"aeroport\",\n\t\"assedic\",\n\t\"asso\",\n\t\"avocat\",\n\t\"avoues\",\n\t\"blogspot\",\n\t\"cci\",\n\t\"chambagri\",\n\t\"chirurgiens-dentistes\",\n\t\"chirurgiens-dentistes-en-france\",\n\t\"com\",\n\t\"experts-comptables\",\n\t\"fbx-os\",\n\t\"fbxos\",\n\t\"freebox-os\",\n\t\"freeboxos\",\n\t\"geometre-expert\",\n\t\"gouv\",\n\t\"greta\",\n\t\"huissier-justice\",\n\t\"medecin\",\n\t\"nom\",\n\t\"notaires\",\n\t\"on-web\",\n\t\"pharmacien\",\n\t\"port\",\n\t\"prd\",\n\t\"presse\",\n\t\"tm\",\n\t\"veterinaire\",\n\t\"nom\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"mil\",\n\t\"net\",\n\t\"org\",\n\t\"pvt\",\n\t\"co\",\n\t\"cya\",\n\t\"net\",\n\t\"org\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"mil\",\n\t\"org\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"ltd\",\n\t\"mod\",\n\t\"org\",\n\t\"co\",\n\t\"com\",\n\t\"edu\",\n\t\"net\",\n\t\"nom\",\n\t\"org\",\n\t\"ac\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"cloud\",\n\t\"asso\",\n\t\"com\",\n\t\"edu\",\n\t\"mobi\",\n\t\"net\",\n\t\"org\",\n\t\"blogspot\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"nym\",\n\t\"org\",\n\t\"com\",\n\t\"edu\",\n\t\"gob\",\n\t\"ind\",\n\t\"mil\",\n\t\"net\",\n\t\"nom\",\n\t\"org\",\n\t\"co\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"blogspot\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"idv\",\n\t\"inc\",\n\t\"ltd\",\n\t\"net\",\n\t\"org\",\n\t\"xn--55qx5d\",\n\t\"xn--ciqpn\",\n\t\"xn--gmq050i\",\n\t\"xn--gmqw5a\",\n\t\"xn--io0a7i\",\n\t\"xn--lcvr32d\",\n\t\"xn--mk0axi\",\n\t\"xn--mxtq1m\",\n\t\"xn--od0alg\",\n\t\"xn--od0aq3b\",\n\t\"xn--tn0ag\",\n\t\"xn--uc0atv\",\n\t\"xn--uc0ay4a\",\n\t\"xn--wcvs22d\",\n\t\"xn--zf0avx\",\n\t\"com\",\n\t\"edu\",\n\t\"gob\",\n\t\"mil\",\n\t\"net\",\n\t\"nom\",\n\t\"org\",\n\t\"cloudaccess\",\n\t\"freesite\",\n\t\"opencraft\",\n\t\"blogspot\",\n\t\"com\",\n\t\"from\",\n\t\"iz\",\n\t\"name\",\n\t\"adult\",\n\t\"art\",\n\t\"asso\",\n\t\"com\",\n\t\"coop\",\n\t\"edu\",\n\t\"firm\",\n\t\"gouv\",\n\t\"info\",\n\t\"med\",\n\t\"net\",\n\t\"org\",\n\t\"perso\",\n\t\"pol\",\n\t\"pro\",\n\t\"rel\",\n\t\"shop\",\n\t\"2000\",\n\t\"agrar\",\n\t\"blogspot\",\n\t\"bolt\",\n\t\"casino\",\n\t\"city\",\n\t\"co\",\n\t\"erotica\",\n\t\"erotika\",\n\t\"film\",\n\t\"forum\",\n\t\"games\",\n\t\"hotel\",\n\t\"info\",\n\t\"ingatlan\",\n\t\"jogasz\",\n\t\"konyvelo\",\n\t\"lakas\",\n\t\"media\",\n\t\"news\",\n\t\"org\",\n\t\"priv\",\n\t\"reklam\",\n\t\"sex\",\n\t\"shop\",\n\t\"sport\",\n\t\"suli\",\n\t\"szex\",\n\t\"tm\",\n\t\"tozsde\",\n\t\"utazas\",\n\t\"video\",\n\t\"ac\",\n\t\"biz\",\n\t\"co\",\n\t\"desa\",\n\t\"go\",\n\t\"mil\",\n\t\"my\",\n\t\"net\",\n\t\"or\",\n\t\"sch\",\n\t\"web\",\n\t\"blogspot\",\n\t\"blogspot\",\n\t\"gov\",\n\t\"ac\",\n\t\"co\",\n\t\"gov\",\n\t\"idf\",\n\t\"k12\",\n\t\"muni\",\n\t\"net\",\n\t\"org\",\n\t\"blogspot\",\n\t\"ac\",\n\t\"co\",\n\t\"com\",\n\t\"net\",\n\t\"nom\",\n\t\"org\",\n\t\"ro\",\n\t\"tt\",\n\t\"tv\",\n\t\"ltd\",\n\t\"plc\",\n\t\"ac\",\n\t\"barsy\",\n\t\"blogspot\",\n\t\"cloudns\",\n\t\"co\",\n\t\"edu\",\n\t\"firm\",\n\t\"gen\",\n\t\"gov\",\n\t\"ind\",\n\t\"mil\",\n\t\"net\",\n\t\"nic\",\n\t\"org\",\n\t\"res\",\n\t\"barrel-of-knowledge\",\n\t\"barrell-of-knowledge\",\n\t\"cloudns\",\n\t\"dvrcam\",\n\t\"dynamic-dns\",\n\t\"dyndns\",\n\t\"for-our\",\n\t\"groks-the\",\n\t\"groks-this\",\n\t\"here-for-more\",\n\t\"ilovecollege\",\n\t\"knowsitall\",\n\t\"no-ip\",\n\t\"nsupdate\",\n\t\"selfip\",\n\t\"v-info\",\n\t\"webhop\",\n\t\"eu\",\n\t\"backplaneapp\",\n\t\"boxfuse\",\n\t\"browsersafetymark\",\n\t\"com\",\n\t\"dedyn\",\n\t\"definima\",\n\t\"drud\",\n\t\"enonic\",\n\t\"github\",\n\t\"gitlab\",\n\t\"hasura-app\",\n\t\"hzc\",\n\t\"lair\",\n\t\"ngrok\",\n\t\"nid\",\n\t\"nodeart\",\n\t\"nodum\",\n\t\"pantheonsite\",\n\t\"protonet\",\n\t\"sandcats\",\n\t\"shiftedit\",\n\t\"spacekit\",\n\t\"stolos\",\n\t\"thingdust\",\n\t\"vaporcloud\",\n\t\"wedeploy\",\n\t\"customer\",\n\t\"apps\",\n\t\"stage\",\n\t\"dev\",\n\t\"disrec\",\n\t\"prod\",\n\t\"testing\",\n\t\"cust\",\n\t\"cust\",\n\t\"cust\",\n\t\"cust\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"mil\",\n\t\"net\",\n\t\"org\",\n\t\"ac\",\n\t\"co\",\n\t\"gov\",\n\t\"id\",\n\t\"net\",\n\t\"org\",\n\t\"sch\",\n\t\"xn--mgba3a4f16a\",\n\t\"xn--mgba3a4fra\",\n\t\"blogspot\",\n\t\"com\",\n\t\"cupcake\",\n\t\"edu\",\n\t\"gov\",\n\t\"int\",\n\t\"net\",\n\t\"org\",\n\t\"abr\",\n\t\"abruzzo\",\n\t\"ag\",\n\t\"agrigento\",\n\t\"al\",\n\t\"alessandria\",\n\t\"alto-adige\",\n\t\"altoadige\",\n\t\"an\",\n\t\"ancona\",\n\t\"andria-barletta-trani\",\n\t\"andria-trani-barletta\",\n\t\"andriabarlettatrani\",\n\t\"andriatranibarletta\",\n\t\"ao\",\n\t\"aosta\",\n\t\"aosta-valley\",\n\t\"aostavalley\",\n\t\"aoste\",\n\t\"ap\",\n\t\"aq\",\n\t\"aquila\",\n\t\"ar\",\n\t\"arezzo\",\n\t\"ascoli-piceno\",\n\t\"ascolipiceno\",\n\t\"asti\",\n\t\"at\",\n\t\"av\",\n\t\"avellino\",\n\t\"ba\",\n\t\"balsan\",\n\t\"bari\",\n\t\"barletta-trani-andria\",\n\t\"barlettatraniandria\",\n\t\"bas\",\n\t\"basilicata\",\n\t\"belluno\",\n\t\"benevento\",\n\t\"bergamo\",\n\t\"bg\",\n\t\"bi\",\n\t\"biella\",\n\t\"bl\",\n\t\"blogspot\",\n\t\"bn\",\n\t\"bo\",\n\t\"bologna\",\n\t\"bolzano\",\n\t\"bozen\",\n\t\"br\",\n\t\"brescia\",\n\t\"brindisi\",\n\t\"bs\",\n\t\"bt\",\n\t\"bz\",\n\t\"ca\",\n\t\"cagliari\",\n\t\"cal\",\n\t\"calabria\",\n\t\"caltanissetta\",\n\t\"cam\",\n\t\"campania\",\n\t\"campidano-medio\",\n\t\"campidanomedio\",\n\t\"campobasso\",\n\t\"carbonia-iglesias\",\n\t\"carboniaiglesias\",\n\t\"carrara-massa\",\n\t\"carraramassa\",\n\t\"caserta\",\n\t\"catania\",\n\t\"catanzaro\",\n\t\"cb\",\n\t\"ce\",\n\t\"cesena-forli\",\n\t\"cesenaforli\",\n\t\"ch\",\n\t\"chieti\",\n\t\"ci\",\n\t\"cl\",\n\t\"cn\",\n\t\"co\",\n\t\"como\",\n\t\"cosenza\",\n\t\"cr\",\n\t\"cremona\",\n\t\"crotone\",\n\t\"cs\",\n\t\"ct\",\n\t\"cuneo\",\n\t\"cz\",\n\t\"dell-ogliastra\",\n\t\"dellogliastra\",\n\t\"edu\",\n\t\"emilia-romagna\",\n\t\"emiliaromagna\",\n\t\"emr\",\n\t\"en\",\n\t\"enna\",\n\t\"fc\",\n\t\"fe\",\n\t\"fermo\",\n\t\"ferrara\",\n\t\"fg\",\n\t\"fi\",\n\t\"firenze\",\n\t\"florence\",\n\t\"fm\",\n\t\"foggia\",\n\t\"forli-cesena\",\n\t\"forlicesena\",\n\t\"fr\",\n\t\"friuli-v-giulia\",\n\t\"friuli-ve-giulia\",\n\t\"friuli-vegiulia\",\n\t\"friuli-venezia-giulia\",\n\t\"friuli-veneziagiulia\",\n\t\"friuli-vgiulia\",\n\t\"friuliv-giulia\",\n\t\"friulive-giulia\",\n\t\"friulivegiulia\",\n\t\"friulivenezia-giulia\",\n\t\"friuliveneziagiulia\",\n\t\"friulivgiulia\",\n\t\"frosinone\",\n\t\"fvg\",\n\t\"ge\",\n\t\"genoa\",\n\t\"genova\",\n\t\"go\",\n\t\"gorizia\",\n\t\"gov\",\n\t\"gr\",\n\t\"grosseto\",\n\t\"iglesias-carbonia\",\n\t\"iglesiascarbonia\",\n\t\"im\",\n\t\"imperia\",\n\t\"is\",\n\t\"isernia\",\n\t\"kr\",\n\t\"la-spezia\",\n\t\"laquila\",\n\t\"laspezia\",\n\t\"latina\",\n\t\"laz\",\n\t\"lazio\",\n\t\"lc\",\n\t\"le\",\n\t\"lecce\",\n\t\"lecco\",\n\t\"li\",\n\t\"lig\",\n\t\"liguria\",\n\t\"livorno\",\n\t\"lo\",\n\t\"lodi\",\n\t\"lom\",\n\t\"lombardia\",\n\t\"lombardy\",\n\t\"lt\",\n\t\"lu\",\n\t\"lucania\",\n\t\"lucca\",\n\t\"macerata\",\n\t\"mantova\",\n\t\"mar\",\n\t\"marche\",\n\t\"massa-carrara\",\n\t\"massacarrara\",\n\t\"matera\",\n\t\"mb\",\n\t\"mc\",\n\t\"me\",\n\t\"medio-campidano\",\n\t\"mediocampidano\",\n\t\"messina\",\n\t\"mi\",\n\t\"milan\",\n\t\"milano\",\n\t\"mn\",\n\t\"mo\",\n\t\"modena\",\n\t\"mol\",\n\t\"molise\",\n\t\"monza\",\n\t\"monza-brianza\",\n\t\"monza-e-della-brianza\",\n\t\"monzabrianza\",\n\t\"monzaebrianza\",\n\t\"monzaedellabrianza\",\n\t\"ms\",\n\t\"mt\",\n\t\"na\",\n\t\"naples\",\n\t\"napoli\",\n\t\"no\",\n\t\"novara\",\n\t\"nu\",\n\t\"nuoro\",\n\t\"og\",\n\t\"ogliastra\",\n\t\"olbia-tempio\",\n\t\"olbiatempio\",\n\t\"or\",\n\t\"oristano\",\n\t\"ot\",\n\t\"pa\",\n\t\"padova\",\n\t\"padua\",\n\t\"palermo\",\n\t\"parma\",\n\t\"pavia\",\n\t\"pc\",\n\t\"pd\",\n\t\"pe\",\n\t\"perugia\",\n\t\"pesaro-urbino\",\n\t\"pesarourbino\",\n\t\"pescara\",\n\t\"pg\",\n\t\"pi\",\n\t\"piacenza\",\n\t\"piedmont\",\n\t\"piemonte\",\n\t\"pisa\",\n\t\"pistoia\",\n\t\"pmn\",\n\t\"pn\",\n\t\"po\",\n\t\"pordenone\",\n\t\"potenza\",\n\t\"pr\",\n\t\"prato\",\n\t\"pt\",\n\t\"pu\",\n\t\"pug\",\n\t\"puglia\",\n\t\"pv\",\n\t\"pz\",\n\t\"ra\",\n\t\"ragusa\",\n\t\"ravenna\",\n\t\"rc\",\n\t\"re\",\n\t\"reggio-calabria\",\n\t\"reggio-emilia\",\n\t\"reggiocalabria\",\n\t\"reggioemilia\",\n\t\"rg\",\n\t\"ri\",\n\t\"rieti\",\n\t\"rimini\",\n\t\"rm\",\n\t\"rn\",\n\t\"ro\",\n\t\"roma\",\n\t\"rome\",\n\t\"rovigo\",\n\t\"sa\",\n\t\"salerno\",\n\t\"sar\",\n\t\"sardegna\",\n\t\"sardinia\",\n\t\"sassari\",\n\t\"savona\",\n\t\"si\",\n\t\"sic\",\n\t\"sicilia\",\n\t\"sicily\",\n\t\"siena\",\n\t\"siracusa\",\n\t\"so\",\n\t\"sondrio\",\n\t\"sp\",\n\t\"sr\",\n\t\"ss\",\n\t\"suedtirol\",\n\t\"sv\",\n\t\"ta\",\n\t\"taa\",\n\t\"taranto\",\n\t\"te\",\n\t\"tempio-olbia\",\n\t\"tempioolbia\",\n\t\"teramo\",\n\t\"terni\",\n\t\"tn\",\n\t\"to\",\n\t\"torino\",\n\t\"tos\",\n\t\"toscana\",\n\t\"tp\",\n\t\"tr\",\n\t\"trani-andria-barletta\",\n\t\"trani-barletta-andria\",\n\t\"traniandriabarletta\",\n\t\"tranibarlettaandria\",\n\t\"trapani\",\n\t\"trentino\",\n\t\"trentino-a-adige\",\n\t\"trentino-aadige\",\n\t\"trentino-alto-adige\",\n\t\"trentino-altoadige\",\n\t\"trentino-s-tirol\",\n\t\"trentino-stirol\",\n\t\"trentino-sud-tirol\",\n\t\"trentino-sudtirol\",\n\t\"trentino-sued-tirol\",\n\t\"trentino-suedtirol\",\n\t\"trentinoa-adige\",\n\t\"trentinoaadige\",\n\t\"trentinoalto-adige\",\n\t\"trentinoaltoadige\",\n\t\"trentinos-tirol\",\n\t\"trentinostirol\",\n\t\"trentinosud-tirol\",\n\t\"trentinosudtirol\",\n\t\"trentinosued-tirol\",\n\t\"trentinosuedtirol\",\n\t\"trento\",\n\t\"treviso\",\n\t\"trieste\",\n\t\"ts\",\n\t\"turin\",\n\t\"tuscany\",\n\t\"tv\",\n\t\"ud\",\n\t\"udine\",\n\t\"umb\",\n\t\"umbria\",\n\t\"urbino-pesaro\",\n\t\"urbinopesaro\",\n\t\"va\",\n\t\"val-d-aosta\",\n\t\"val-daosta\",\n\t\"vald-aosta\",\n\t\"valdaosta\",\n\t\"valle-aosta\",\n\t\"valle-d-aosta\",\n\t\"valle-daosta\",\n\t\"valleaosta\",\n\t\"valled-aosta\",\n\t\"valledaosta\",\n\t\"vallee-aoste\",\n\t\"valleeaoste\",\n\t\"vao\",\n\t\"varese\",\n\t\"vb\",\n\t\"vc\",\n\t\"vda\",\n\t\"ve\",\n\t\"ven\",\n\t\"veneto\",\n\t\"venezia\",\n\t\"venice\",\n\t\"verbania\",\n\t\"vercelli\",\n\t\"verona\",\n\t\"vi\",\n\t\"vibo-valentia\",\n\t\"vibovalentia\",\n\t\"vicenza\",\n\t\"viterbo\",\n\t\"vr\",\n\t\"vs\",\n\t\"vt\",\n\t\"vv\",\n\t\"co\",\n\t\"net\",\n\t\"org\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"mil\",\n\t\"name\",\n\t\"net\",\n\t\"org\",\n\t\"sch\",\n\t\"ac\",\n\t\"ad\",\n\t\"aichi\",\n\t\"akita\",\n\t\"aomori\",\n\t\"blogspot\",\n\t\"chiba\",\n\t\"co\",\n\t\"ed\",\n\t\"ehime\",\n\t\"fukui\",\n\t\"fukuoka\",\n\t\"fukushima\",\n\t\"gifu\",\n\t\"go\",\n\t\"gr\",\n\t\"gunma\",\n\t\"hiroshima\",\n\t\"hokkaido\",\n\t\"hyogo\",\n\t\"ibaraki\",\n\t\"ishikawa\",\n\t\"iwate\",\n\t\"kagawa\",\n\t\"kagoshima\",\n\t\"kanagawa\",\n\t\"kawasaki\",\n\t\"kitakyushu\",\n\t\"kobe\",\n\t\"kochi\",\n\t\"kumamoto\",\n\t\"kyoto\",\n\t\"lg\",\n\t\"mie\",\n\t\"miyagi\",\n\t\"miyazaki\",\n\t\"nagano\",\n\t\"nagasaki\",\n\t\"nagoya\",\n\t\"nara\",\n\t\"ne\",\n\t\"niigata\",\n\t\"oita\",\n\t\"okayama\",\n\t\"okinawa\",\n\t\"or\",\n\t\"osaka\",\n\t\"saga\",\n\t\"saitama\",\n\t\"sapporo\",\n\t\"sendai\",\n\t\"shiga\",\n\t\"shimane\",\n\t\"shizuoka\",\n\t\"tochigi\",\n\t\"tokushima\",\n\t\"tokyo\",\n\t\"tottori\",\n\t\"toyama\",\n\t\"wakayama\",\n\t\"xn--0trq7p7nn\",\n\t\"xn--1ctwo\",\n\t\"xn--1lqs03n\",\n\t\"xn--1lqs71d\",\n\t\"xn--2m4a15e\",\n\t\"xn--32vp30h\",\n\t\"xn--4it168d\",\n\t\"xn--4it797k\",\n\t\"xn--4pvxs\",\n\t\"xn--5js045d\",\n\t\"xn--5rtp49c\",\n\t\"xn--5rtq34k\",\n\t\"xn--6btw5a\",\n\t\"xn--6orx2r\",\n\t\"xn--7t0a264c\",\n\t\"xn--8ltr62k\",\n\t\"xn--8pvr4u\",\n\t\"xn--c3s14m\",\n\t\"xn--d5qv7z876c\",\n\t\"xn--djrs72d6uy\",\n\t\"xn--djty4k\",\n\t\"xn--efvn9s\",\n\t\"xn--ehqz56n\",\n\t\"xn--elqq16h\",\n\t\"xn--f6qx53a\",\n\t\"xn--k7yn95e\",\n\t\"xn--kbrq7o\",\n\t\"xn--klt787d\",\n\t\"xn--kltp7d\",\n\t\"xn--kltx9a\",\n\t\"xn--klty5x\",\n\t\"xn--mkru45i\",\n\t\"xn--nit225k\",\n\t\"xn--ntso0iqx3a\",\n\t\"xn--ntsq17g\",\n\t\"xn--pssu33l\",\n\t\"xn--qqqt11m\",\n\t\"xn--rht27z\",\n\t\"xn--rht3d\",\n\t\"xn--rht61e\",\n\t\"xn--rny31h\",\n\t\"xn--tor131o\",\n\t\"xn--uist22h\",\n\t\"xn--uisz3g\",\n\t\"xn--uuwu58a\",\n\t\"xn--vgu402c\",\n\t\"xn--zbx025d\",\n\t\"yamagata\",\n\t\"yamaguchi\",\n\t\"yamanashi\",\n\t\"yokohama\",\n\t\"aisai\",\n\t\"ama\",\n\t\"anjo\",\n\t\"asuke\",\n\t\"chiryu\",\n\t\"chita\",\n\t\"fuso\",\n\t\"gamagori\",\n\t\"handa\",\n\t\"hazu\",\n\t\"hekinan\",\n\t\"higashiura\",\n\t\"ichinomiya\",\n\t\"inazawa\",\n\t\"inuyama\",\n\t\"isshiki\",\n\t\"iwakura\",\n\t\"kanie\",\n\t\"kariya\",\n\t\"kasugai\",\n\t\"kira\",\n\t\"kiyosu\",\n\t\"komaki\",\n\t\"konan\",\n\t\"kota\",\n\t\"mihama\",\n\t\"miyoshi\",\n\t\"nishio\",\n\t\"nisshin\",\n\t\"obu\",\n\t\"oguchi\",\n\t\"oharu\",\n\t\"okazaki\",\n\t\"owariasahi\",\n\t\"seto\",\n\t\"shikatsu\",\n\t\"shinshiro\",\n\t\"shitara\",\n\t\"tahara\",\n\t\"takahama\",\n\t\"tobishima\",\n\t\"toei\",\n\t\"togo\",\n\t\"tokai\",\n\t\"tokoname\",\n\t\"toyoake\",\n\t\"toyohashi\",\n\t\"toyokawa\",\n\t\"toyone\",\n\t\"toyota\",\n\t\"tsushima\",\n\t\"yatomi\",\n\t\"akita\",\n\t\"daisen\",\n\t\"fujisato\",\n\t\"gojome\",\n\t\"hachirogata\",\n\t\"happou\",\n\t\"higashinaruse\",\n\t\"honjo\",\n\t\"honjyo\",\n\t\"ikawa\",\n\t\"kamikoani\",\n\t\"kamioka\",\n\t\"katagami\",\n\t\"kazuno\",\n\t\"kitaakita\",\n\t\"kosaka\",\n\t\"kyowa\",\n\t\"misato\",\n\t\"mitane\",\n\t\"moriyoshi\",\n\t\"nikaho\",\n\t\"noshiro\",\n\t\"odate\",\n\t\"oga\",\n\t\"ogata\",\n\t\"semboku\",\n\t\"yokote\",\n\t\"yurihonjo\",\n\t\"aomori\",\n\t\"gonohe\",\n\t\"hachinohe\",\n\t\"hashikami\",\n\t\"hiranai\",\n\t\"hirosaki\",\n\t\"itayanagi\",\n\t\"kuroishi\",\n\t\"misawa\",\n\t\"mutsu\",\n\t\"nakadomari\",\n\t\"noheji\",\n\t\"oirase\",\n\t\"owani\",\n\t\"rokunohe\",\n\t\"sannohe\",\n\t\"shichinohe\",\n\t\"shingo\",\n\t\"takko\",\n\t\"towada\",\n\t\"tsugaru\",\n\t\"tsuruta\",\n\t\"abiko\",\n\t\"asahi\",\n\t\"chonan\",\n\t\"chosei\",\n\t\"choshi\",\n\t\"chuo\",\n\t\"funabashi\",\n\t\"futtsu\",\n\t\"hanamigawa\",\n\t\"ichihara\",\n\t\"ichikawa\",\n\t\"ichinomiya\",\n\t\"inzai\",\n\t\"isumi\",\n\t\"kamagaya\",\n\t\"kamogawa\",\n\t\"kashiwa\",\n\t\"katori\",\n\t\"katsuura\",\n\t\"kimitsu\",\n\t\"kisarazu\",\n\t\"kozaki\",\n\t\"kujukuri\",\n\t\"kyonan\",\n\t\"matsudo\",\n\t\"midori\",\n\t\"mihama\",\n\t\"minamiboso\",\n\t\"mobara\",\n\t\"mutsuzawa\",\n\t\"nagara\",\n\t\"nagareyama\",\n\t\"narashino\",\n\t\"narita\",\n\t\"noda\",\n\t\"oamishirasato\",\n\t\"omigawa\",\n\t\"onjuku\",\n\t\"otaki\",\n\t\"sakae\",\n\t\"sakura\",\n\t\"shimofusa\",\n\t\"shirako\",\n\t\"shiroi\",\n\t\"shisui\",\n\t\"sodegaura\",\n\t\"sosa\",\n\t\"tako\",\n\t\"tateyama\",\n\t\"togane\",\n\t\"tohnosho\",\n\t\"tomisato\",\n\t\"urayasu\",\n\t\"yachimata\",\n\t\"yachiyo\",\n\t\"yokaichiba\",\n\t\"yokoshibahikari\",\n\t\"yotsukaido\",\n\t\"ainan\",\n\t\"honai\",\n\t\"ikata\",\n\t\"imabari\",\n\t\"iyo\",\n\t\"kamijima\",\n\t\"kihoku\",\n\t\"kumakogen\",\n\t\"masaki\",\n\t\"matsuno\",\n\t\"matsuyama\",\n\t\"namikata\",\n\t\"niihama\",\n\t\"ozu\",\n\t\"saijo\",\n\t\"seiyo\",\n\t\"shikokuchuo\",\n\t\"tobe\",\n\t\"toon\",\n\t\"uchiko\",\n\t\"uwajima\",\n\t\"yawatahama\",\n\t\"echizen\",\n\t\"eiheiji\",\n\t\"fukui\",\n\t\"ikeda\",\n\t\"katsuyama\",\n\t\"mihama\",\n\t\"minamiechizen\",\n\t\"obama\",\n\t\"ohi\",\n\t\"ono\",\n\t\"sabae\",\n\t\"sakai\",\n\t\"takahama\",\n\t\"tsuruga\",\n\t\"wakasa\",\n\t\"ashiya\",\n\t\"buzen\",\n\t\"chikugo\",\n\t\"chikuho\",\n\t\"chikujo\",\n\t\"chikushino\",\n\t\"chikuzen\",\n\t\"chuo\",\n\t\"dazaifu\",\n\t\"fukuchi\",\n\t\"hakata\",\n\t\"higashi\",\n\t\"hirokawa\",\n\t\"hisayama\",\n\t\"iizuka\",\n\t\"inatsuki\",\n\t\"kaho\",\n\t\"kasuga\",\n\t\"kasuya\",\n\t\"kawara\",\n\t\"keisen\",\n\t\"koga\",\n\t\"kurate\",\n\t\"kurogi\",\n\t\"kurume\",\n\t\"minami\",\n\t\"miyako\",\n\t\"miyama\",\n\t\"miyawaka\",\n\t\"mizumaki\",\n\t\"munakata\",\n\t\"nakagawa\",\n\t\"nakama\",\n\t\"nishi\",\n\t\"nogata\",\n\t\"ogori\",\n\t\"okagaki\",\n\t\"okawa\",\n\t\"oki\",\n\t\"omuta\",\n\t\"onga\",\n\t\"onojo\",\n\t\"oto\",\n\t\"saigawa\",\n\t\"sasaguri\",\n\t\"shingu\",\n\t\"shinyoshitomi\",\n\t\"shonai\",\n\t\"soeda\",\n\t\"sue\",\n\t\"tachiarai\",\n\t\"tagawa\",\n\t\"takata\",\n\t\"toho\",\n\t\"toyotsu\",\n\t\"tsuiki\",\n\t\"ukiha\",\n\t\"umi\",\n\t\"usui\",\n\t\"yamada\",\n\t\"yame\",\n\t\"yanagawa\",\n\t\"yukuhashi\",\n\t\"aizubange\",\n\t\"aizumisato\",\n\t\"aizuwakamatsu\",\n\t\"asakawa\",\n\t\"bandai\",\n\t\"date\",\n\t\"fukushima\",\n\t\"furudono\",\n\t\"futaba\",\n\t\"hanawa\",\n\t\"higashi\",\n\t\"hirata\",\n\t\"hirono\",\n\t\"iitate\",\n\t\"inawashiro\",\n\t\"ishikawa\",\n\t\"iwaki\",\n\t\"izumizaki\",\n\t\"kagamiishi\",\n\t\"kaneyama\",\n\t\"kawamata\",\n\t\"kitakata\",\n\t\"kitashiobara\",\n\t\"koori\",\n\t\"koriyama\",\n\t\"kunimi\",\n\t\"miharu\",\n\t\"mishima\",\n\t\"namie\",\n\t\"nango\",\n\t\"nishiaizu\",\n\t\"nishigo\",\n\t\"okuma\",\n\t\"omotego\",\n\t\"ono\",\n\t\"otama\",\n\t\"samegawa\",\n\t\"shimogo\",\n\t\"shirakawa\",\n\t\"showa\",\n\t\"soma\",\n\t\"sukagawa\",\n\t\"taishin\",\n\t\"tamakawa\",\n\t\"tanagura\",\n\t\"tenei\",\n\t\"yabuki\",\n\t\"yamato\",\n\t\"yamatsuri\",\n\t\"yanaizu\",\n\t\"yugawa\",\n\t\"anpachi\",\n\t\"ena\",\n\t\"gifu\",\n\t\"ginan\",\n\t\"godo\",\n\t\"gujo\",\n\t\"hashima\",\n\t\"hichiso\",\n\t\"hida\",\n\t\"higashishirakawa\",\n\t\"ibigawa\",\n\t\"ikeda\",\n\t\"kakamigahara\",\n\t\"kani\",\n\t\"kasahara\",\n\t\"kasamatsu\",\n\t\"kawaue\",\n\t\"kitagata\",\n\t\"mino\",\n\t\"minokamo\",\n\t\"mitake\",\n\t\"mizunami\",\n\t\"motosu\",\n\t\"nakatsugawa\",\n\t\"ogaki\",\n\t\"sakahogi\",\n\t\"seki\",\n\t\"sekigahara\",\n\t\"shirakawa\",\n\t\"tajimi\",\n\t\"takayama\",\n\t\"tarui\",\n\t\"toki\",\n\t\"tomika\",\n\t\"wanouchi\",\n\t\"yamagata\",\n\t\"yaotsu\",\n\t\"yoro\",\n\t\"annaka\",\n\t\"chiyoda\",\n\t\"fujioka\",\n\t\"higashiagatsuma\",\n\t\"isesaki\",\n\t\"itakura\",\n\t\"kanna\",\n\t\"kanra\",\n\t\"katashina\",\n\t\"kawaba\",\n\t\"kiryu\",\n\t\"kusatsu\",\n\t\"maebashi\",\n\t\"meiwa\",\n\t\"midori\",\n\t\"minakami\",\n\t\"naganohara\",\n\t\"nakanojo\",\n\t\"nanmoku\",\n\t\"numata\",\n\t\"oizumi\",\n\t\"ora\",\n\t\"ota\",\n\t\"shibukawa\",\n\t\"shimonita\",\n\t\"shinto\",\n\t\"showa\",\n\t\"takasaki\",\n\t\"takayama\",\n\t\"tamamura\",\n\t\"tatebayashi\",\n\t\"tomioka\",\n\t\"tsukiyono\",\n\t\"tsumagoi\",\n\t\"ueno\",\n\t\"yoshioka\",\n\t\"asaminami\",\n\t\"daiwa\",\n\t\"etajima\",\n\t\"fuchu\",\n\t\"fukuyama\",\n\t\"hatsukaichi\",\n\t\"higashihiroshima\",\n\t\"hongo\",\n\t\"jinsekikogen\",\n\t\"kaita\",\n\t\"kui\",\n\t\"kumano\",\n\t\"kure\",\n\t\"mihara\",\n\t\"miyoshi\",\n\t\"naka\",\n\t\"onomichi\",\n\t\"osakikamijima\",\n\t\"otake\",\n\t\"saka\",\n\t\"sera\",\n\t\"seranishi\",\n\t\"shinichi\",\n\t\"shobara\",\n\t\"takehara\",\n\t\"abashiri\",\n\t\"abira\",\n\t\"aibetsu\",\n\t\"akabira\",\n\t\"akkeshi\",\n\t\"asahikawa\",\n\t\"ashibetsu\",\n\t\"ashoro\",\n\t\"assabu\",\n\t\"atsuma\",\n\t\"bibai\",\n\t\"biei\",\n\t\"bifuka\",\n\t\"bihoro\",\n\t\"biratori\",\n\t\"chippubetsu\",\n\t\"chitose\",\n\t\"date\",\n\t\"ebetsu\",\n\t\"embetsu\",\n\t\"eniwa\",\n\t\"erimo\",\n\t\"esan\",\n\t\"esashi\",\n\t\"fukagawa\",\n\t\"fukushima\",\n\t\"furano\",\n\t\"furubira\",\n\t\"haboro\",\n\t\"hakodate\",\n\t\"hamatonbetsu\",\n\t\"hidaka\",\n\t\"higashikagura\",\n\t\"higashikawa\",\n\t\"hiroo\",\n\t\"hokuryu\",\n\t\"hokuto\",\n\t\"honbetsu\",\n\t\"horokanai\",\n\t\"horonobe\",\n\t\"ikeda\",\n\t\"imakane\",\n\t\"ishikari\",\n\t\"iwamizawa\",\n\t\"iwanai\",\n\t\"kamifurano\",\n\t\"kamikawa\",\n\t\"kamishihoro\",\n\t\"kamisunagawa\",\n\t\"kamoenai\",\n\t\"kayabe\",\n\t\"kembuchi\",\n\t\"kikonai\",\n\t\"kimobetsu\",\n\t\"kitahiroshima\",\n\t\"kitami\",\n\t\"kiyosato\",\n\t\"koshimizu\",\n\t\"kunneppu\",\n\t\"kuriyama\",\n\t\"kuromatsunai\",\n\t\"kushiro\",\n\t\"kutchan\",\n\t\"kyowa\",\n\t\"mashike\",\n\t\"matsumae\",\n\t\"mikasa\",\n\t\"minamifurano\",\n\t\"mombetsu\",\n\t\"moseushi\",\n\t\"mukawa\",\n\t\"muroran\",\n\t\"naie\",\n\t\"nakagawa\",\n\t\"nakasatsunai\",\n\t\"nakatombetsu\",\n\t\"nanae\",\n\t\"nanporo\",\n\t\"nayoro\",\n\t\"nemuro\",\n\t\"niikappu\",\n\t\"niki\",\n\t\"nishiokoppe\",\n\t\"noboribetsu\",\n\t\"numata\",\n\t\"obihiro\",\n\t\"obira\",\n\t\"oketo\",\n\t\"okoppe\",\n\t\"otaru\",\n\t\"otobe\",\n\t\"otofuke\",\n\t\"otoineppu\",\n\t\"oumu\",\n\t\"ozora\",\n\t\"pippu\",\n\t\"rankoshi\",\n\t\"rebun\",\n\t\"rikubetsu\",\n\t\"rishiri\",\n\t\"rishirifuji\",\n\t\"saroma\",\n\t\"sarufutsu\",\n\t\"shakotan\",\n\t\"shari\",\n\t\"shibecha\",\n\t\"shibetsu\",\n\t\"shikabe\",\n\t\"shikaoi\",\n\t\"shimamaki\",\n\t\"shimizu\",\n\t\"shimokawa\",\n\t\"shinshinotsu\",\n\t\"shintoku\",\n\t\"shiranuka\",\n\t\"shiraoi\",\n\t\"shiriuchi\",\n\t\"sobetsu\",\n\t\"sunagawa\",\n\t\"taiki\",\n\t\"takasu\",\n\t\"takikawa\",\n\t\"takinoue\",\n\t\"teshikaga\",\n\t\"tobetsu\",\n\t\"tohma\",\n\t\"tomakomai\",\n\t\"tomari\",\n\t\"toya\",\n\t\"toyako\",\n\t\"toyotomi\",\n\t\"toyoura\",\n\t\"tsubetsu\",\n\t\"tsukigata\",\n\t\"urakawa\",\n\t\"urausu\",\n\t\"uryu\",\n\t\"utashinai\",\n\t\"wakkanai\",\n\t\"wassamu\",\n\t\"yakumo\",\n\t\"yoichi\",\n\t\"aioi\",\n\t\"akashi\",\n\t\"ako\",\n\t\"amagasaki\",\n\t\"aogaki\",\n\t\"asago\",\n\t\"ashiya\",\n\t\"awaji\",\n\t\"fukusaki\",\n\t\"goshiki\",\n\t\"harima\",\n\t\"himeji\",\n\t\"ichikawa\",\n\t\"inagawa\",\n\t\"itami\",\n\t\"kakogawa\",\n\t\"kamigori\",\n\t\"kamikawa\",\n\t\"kasai\",\n\t\"kasuga\",\n\t\"kawanishi\",\n\t\"miki\",\n\t\"minamiawaji\",\n\t\"nishinomiya\",\n\t\"nishiwaki\",\n\t\"ono\",\n\t\"sanda\",\n\t\"sannan\",\n\t\"sasayama\",\n\t\"sayo\",\n\t\"shingu\",\n\t\"shinonsen\",\n\t\"shiso\",\n\t\"sumoto\",\n\t\"taishi\",\n\t\"taka\",\n\t\"takarazuka\",\n\t\"takasago\",\n\t\"takino\",\n\t\"tamba\",\n\t\"tatsuno\",\n\t\"toyooka\",\n\t\"yabu\",\n\t\"yashiro\",\n\t\"yoka\",\n\t\"yokawa\",\n\t\"ami\",\n\t\"asahi\",\n\t\"bando\",\n\t\"chikusei\",\n\t\"daigo\",\n\t\"fujishiro\",\n\t\"hitachi\",\n\t\"hitachinaka\",\n\t\"hitachiomiya\",\n\t\"hitachiota\",\n\t\"ibaraki\",\n\t\"ina\",\n\t\"inashiki\",\n\t\"itako\",\n\t\"iwama\",\n\t\"joso\",\n\t\"kamisu\",\n\t\"kasama\",\n\t\"kashima\",\n\t\"kasumigaura\",\n\t\"koga\",\n\t\"miho\",\n\t\"mito\",\n\t\"moriya\",\n\t\"naka\",\n\t\"namegata\",\n\t\"oarai\",\n\t\"ogawa\",\n\t\"omitama\",\n\t\"ryugasaki\",\n\t\"sakai\",\n\t\"sakuragawa\",\n\t\"shimodate\",\n\t\"shimotsuma\",\n\t\"shirosato\",\n\t\"sowa\",\n\t\"suifu\",\n\t\"takahagi\",\n\t\"tamatsukuri\",\n\t\"tokai\",\n\t\"tomobe\",\n\t\"tone\",\n\t\"toride\",\n\t\"tsuchiura\",\n\t\"tsukuba\",\n\t\"uchihara\",\n\t\"ushiku\",\n\t\"yachiyo\",\n\t\"yamagata\",\n\t\"yawara\",\n\t\"yuki\",\n\t\"anamizu\",\n\t\"hakui\",\n\t\"hakusan\",\n\t\"kaga\",\n\t\"kahoku\",\n\t\"kanazawa\",\n\t\"kawakita\",\n\t\"komatsu\",\n\t\"nakanoto\",\n\t\"nanao\",\n\t\"nomi\",\n\t\"nonoichi\",\n\t\"noto\",\n\t\"shika\",\n\t\"suzu\",\n\t\"tsubata\",\n\t\"tsurugi\",\n\t\"uchinada\",\n\t\"wajima\",\n\t\"fudai\",\n\t\"fujisawa\",\n\t\"hanamaki\",\n\t\"hiraizumi\",\n\t\"hirono\",\n\t\"ichinohe\",\n\t\"ichinoseki\",\n\t\"iwaizumi\",\n\t\"iwate\",\n\t\"joboji\",\n\t\"kamaishi\",\n\t\"kanegasaki\",\n\t\"karumai\",\n\t\"kawai\",\n\t\"kitakami\",\n\t\"kuji\",\n\t\"kunohe\",\n\t\"kuzumaki\",\n\t\"miyako\",\n\t\"mizusawa\",\n\t\"morioka\",\n\t\"ninohe\",\n\t\"noda\",\n\t\"ofunato\",\n\t\"oshu\",\n\t\"otsuchi\",\n\t\"rikuzentakata\",\n\t\"shiwa\",\n\t\"shizukuishi\",\n\t\"sumita\",\n\t\"tanohata\",\n\t\"tono\",\n\t\"yahaba\",\n\t\"yamada\",\n\t\"ayagawa\",\n\t\"higashikagawa\",\n\t\"kanonji\",\n\t\"kotohira\",\n\t\"manno\",\n\t\"marugame\",\n\t\"mitoyo\",\n\t\"naoshima\",\n\t\"sanuki\",\n\t\"tadotsu\",\n\t\"takamatsu\",\n\t\"tonosho\",\n\t\"uchinomi\",\n\t\"utazu\",\n\t\"zentsuji\",\n\t\"akune\",\n\t\"amami\",\n\t\"hioki\",\n\t\"isa\",\n\t\"isen\",\n\t\"izumi\",\n\t\"kagoshima\",\n\t\"kanoya\",\n\t\"kawanabe\",\n\t\"kinko\",\n\t\"kouyama\",\n\t\"makurazaki\",\n\t\"matsumoto\",\n\t\"minamitane\",\n\t\"nakatane\",\n\t\"nishinoomote\",\n\t\"satsumasendai\",\n\t\"soo\",\n\t\"tarumizu\",\n\t\"yusui\",\n\t\"aikawa\",\n\t\"atsugi\",\n\t\"ayase\",\n\t\"chigasaki\",\n\t\"ebina\",\n\t\"fujisawa\",\n\t\"hadano\",\n\t\"hakone\",\n\t\"hiratsuka\",\n\t\"isehara\",\n\t\"kaisei\",\n\t\"kamakura\",\n\t\"kiyokawa\",\n\t\"matsuda\",\n\t\"minamiashigara\",\n\t\"miura\",\n\t\"nakai\",\n\t\"ninomiya\",\n\t\"odawara\",\n\t\"oi\",\n\t\"oiso\",\n\t\"sagamihara\",\n\t\"samukawa\",\n\t\"tsukui\",\n\t\"yamakita\",\n\t\"yamato\",\n\t\"yokosuka\",\n\t\"yugawara\",\n\t\"zama\",\n\t\"zushi\",\n\t\"city\",\n\t\"city\",\n\t\"city\",\n\t\"aki\",\n\t\"geisei\",\n\t\"hidaka\",\n\t\"higashitsuno\",\n\t\"ino\",\n\t\"kagami\",\n\t\"kami\",\n\t\"kitagawa\",\n\t\"kochi\",\n\t\"mihara\",\n\t\"motoyama\",\n\t\"muroto\",\n\t\"nahari\",\n\t\"nakamura\",\n\t\"nankoku\",\n\t\"nishitosa\",\n\t\"niyodogawa\",\n\t\"ochi\",\n\t\"okawa\",\n\t\"otoyo\",\n\t\"otsuki\",\n\t\"sakawa\",\n\t\"sukumo\",\n\t\"susaki\",\n\t\"tosa\",\n\t\"tosashimizu\",\n\t\"toyo\",\n\t\"tsuno\",\n\t\"umaji\",\n\t\"yasuda\",\n\t\"yusuhara\",\n\t\"amakusa\",\n\t\"arao\",\n\t\"aso\",\n\t\"choyo\",\n\t\"gyokuto\",\n\t\"kamiamakusa\",\n\t\"kikuchi\",\n\t\"kumamoto\",\n\t\"mashiki\",\n\t\"mifune\",\n\t\"minamata\",\n\t\"minamioguni\",\n\t\"nagasu\",\n\t\"nishihara\",\n\t\"oguni\",\n\t\"ozu\",\n\t\"sumoto\",\n\t\"takamori\",\n\t\"uki\",\n\t\"uto\",\n\t\"yamaga\",\n\t\"yamato\",\n\t\"yatsushiro\",\n\t\"ayabe\",\n\t\"fukuchiyama\",\n\t\"higashiyama\",\n\t\"ide\",\n\t\"ine\",\n\t\"joyo\",\n\t\"kameoka\",\n\t\"kamo\",\n\t\"kita\",\n\t\"kizu\",\n\t\"kumiyama\",\n\t\"kyotamba\",\n\t\"kyotanabe\",\n\t\"kyotango\",\n\t\"maizuru\",\n\t\"minami\",\n\t\"minamiyamashiro\",\n\t\"miyazu\",\n\t\"muko\",\n\t\"nagaokakyo\",\n\t\"nakagyo\",\n\t\"nantan\",\n\t\"oyamazaki\",\n\t\"sakyo\",\n\t\"seika\",\n\t\"tanabe\",\n\t\"uji\",\n\t\"ujitawara\",\n\t\"wazuka\",\n\t\"yamashina\",\n\t\"yawata\",\n\t\"asahi\",\n\t\"inabe\",\n\t\"ise\",\n\t\"kameyama\",\n\t\"kawagoe\",\n\t\"kiho\",\n\t\"kisosaki\",\n\t\"kiwa\",\n\t\"komono\",\n\t\"kumano\",\n\t\"kuwana\",\n\t\"matsusaka\",\n\t\"meiwa\",\n\t\"mihama\",\n\t\"minamiise\",\n\t\"misugi\",\n\t\"miyama\",\n\t\"nabari\",\n\t\"shima\",\n\t\"suzuka\",\n\t\"tado\",\n\t\"taiki\",\n\t\"taki\",\n\t\"tamaki\",\n\t\"toba\",\n\t\"tsu\",\n\t\"udono\",\n\t\"ureshino\",\n\t\"watarai\",\n\t\"yokkaichi\",\n\t\"furukawa\",\n\t\"higashimatsushima\",\n\t\"ishinomaki\",\n\t\"iwanuma\",\n\t\"kakuda\",\n\t\"kami\",\n\t\"kawasaki\",\n\t\"marumori\",\n\t\"matsushima\",\n\t\"minamisanriku\",\n\t\"misato\",\n\t\"murata\",\n\t\"natori\",\n\t\"ogawara\",\n\t\"ohira\",\n\t\"onagawa\",\n\t\"osaki\",\n\t\"rifu\",\n\t\"semine\",\n\t\"shibata\",\n\t\"shichikashuku\",\n\t\"shikama\",\n\t\"shiogama\",\n\t\"shiroishi\",\n\t\"tagajo\",\n\t\"taiwa\",\n\t\"tome\",\n\t\"tomiya\",\n\t\"wakuya\",\n\t\"watari\",\n\t\"yamamoto\",\n\t\"zao\",\n\t\"aya\",\n\t\"ebino\",\n\t\"gokase\",\n\t\"hyuga\",\n\t\"kadogawa\",\n\t\"kawaminami\",\n\t\"kijo\",\n\t\"kitagawa\",\n\t\"kitakata\",\n\t\"kitaura\",\n\t\"kobayashi\",\n\t\"kunitomi\",\n\t\"kushima\",\n\t\"mimata\",\n\t\"miyakonojo\",\n\t\"miyazaki\",\n\t\"morotsuka\",\n\t\"nichinan\",\n\t\"nishimera\",\n\t\"nobeoka\",\n\t\"saito\",\n\t\"shiiba\",\n\t\"shintomi\",\n\t\"takaharu\",\n\t\"takanabe\",\n\t\"takazaki\",\n\t\"tsuno\",\n\t\"achi\",\n\t\"agematsu\",\n\t\"anan\",\n\t\"aoki\",\n\t\"asahi\",\n\t\"azumino\",\n\t\"chikuhoku\",\n\t\"chikuma\",\n\t\"chino\",\n\t\"fujimi\",\n\t\"hakuba\",\n\t\"hara\",\n\t\"hiraya\",\n\t\"iida\",\n\t\"iijima\",\n\t\"iiyama\",\n\t\"iizuna\",\n\t\"ikeda\",\n\t\"ikusaka\",\n\t\"ina\",\n\t\"karuizawa\",\n\t\"kawakami\",\n\t\"kiso\",\n\t\"kisofukushima\",\n\t\"kitaaiki\",\n\t\"komagane\",\n\t\"komoro\",\n\t\"matsukawa\",\n\t\"matsumoto\",\n\t\"miasa\",\n\t\"minamiaiki\",\n\t\"minamimaki\",\n\t\"minamiminowa\",\n\t\"minowa\",\n\t\"miyada\",\n\t\"miyota\",\n\t\"mochizuki\",\n\t\"nagano\",\n\t\"nagawa\",\n\t\"nagiso\",\n\t\"nakagawa\",\n\t\"nakano\",\n\t\"nozawaonsen\",\n\t\"obuse\",\n\t\"ogawa\",\n\t\"okaya\",\n\t\"omachi\",\n\t\"omi\",\n\t\"ookuwa\",\n\t\"ooshika\",\n\t\"otaki\",\n\t\"otari\",\n\t\"sakae\",\n\t\"sakaki\",\n\t\"saku\",\n\t\"sakuho\",\n\t\"shimosuwa\",\n\t\"shinanomachi\",\n\t\"shiojiri\",\n\t\"suwa\",\n\t\"suzaka\",\n\t\"takagi\",\n\t\"takamori\",\n\t\"takayama\",\n\t\"tateshina\",\n\t\"tatsuno\",\n\t\"togakushi\",\n\t\"togura\",\n\t\"tomi\",\n\t\"ueda\",\n\t\"wada\",\n\t\"yamagata\",\n\t\"yamanouchi\",\n\t\"yasaka\",\n\t\"yasuoka\",\n\t\"chijiwa\",\n\t\"futsu\",\n\t\"goto\",\n\t\"hasami\",\n\t\"hirado\",\n\t\"iki\",\n\t\"isahaya\",\n\t\"kawatana\",\n\t\"kuchinotsu\",\n\t\"matsuura\",\n\t\"nagasaki\",\n\t\"obama\",\n\t\"omura\",\n\t\"oseto\",\n\t\"saikai\",\n\t\"sasebo\",\n\t\"seihi\",\n\t\"shimabara\",\n\t\"shinkamigoto\",\n\t\"togitsu\",\n\t\"tsushima\",\n\t\"unzen\",\n\t\"city\",\n\t\"ando\",\n\t\"gose\",\n\t\"heguri\",\n\t\"higashiyoshino\",\n\t\"ikaruga\",\n\t\"ikoma\",\n\t\"kamikitayama\",\n\t\"kanmaki\",\n\t\"kashiba\",\n\t\"kashihara\",\n\t\"katsuragi\",\n\t\"kawai\",\n\t\"kawakami\",\n\t\"kawanishi\",\n\t\"koryo\",\n\t\"kurotaki\",\n\t\"mitsue\",\n\t\"miyake\",\n\t\"nara\",\n\t\"nosegawa\",\n\t\"oji\",\n\t\"ouda\",\n\t\"oyodo\",\n\t\"sakurai\",\n\t\"sango\",\n\t\"shimoichi\",\n\t\"shimokitayama\",\n\t\"shinjo\",\n\t\"soni\",\n\t\"takatori\",\n\t\"tawaramoto\",\n\t\"tenkawa\",\n\t\"tenri\",\n\t\"uda\",\n\t\"yamatokoriyama\",\n\t\"yamatotakada\",\n\t\"yamazoe\",\n\t\"yoshino\",\n\t\"aga\",\n\t\"agano\",\n\t\"gosen\",\n\t\"itoigawa\",\n\t\"izumozaki\",\n\t\"joetsu\",\n\t\"kamo\",\n\t\"kariwa\",\n\t\"kashiwazaki\",\n\t\"minamiuonuma\",\n\t\"mitsuke\",\n\t\"muika\",\n\t\"murakami\",\n\t\"myoko\",\n\t\"nagaoka\",\n\t\"niigata\",\n\t\"ojiya\",\n\t\"omi\",\n\t\"sado\",\n\t\"sanjo\",\n\t\"seiro\",\n\t\"seirou\",\n\t\"sekikawa\",\n\t\"shibata\",\n\t\"tagami\",\n\t\"tainai\",\n\t\"tochio\",\n\t\"tokamachi\",\n\t\"tsubame\",\n\t\"tsunan\",\n\t\"uonuma\",\n\t\"yahiko\",\n\t\"yoita\",\n\t\"yuzawa\",\n\t\"beppu\",\n\t\"bungoono\",\n\t\"bungotakada\",\n\t\"hasama\",\n\t\"hiji\",\n\t\"himeshima\",\n\t\"hita\",\n\t\"kamitsue\",\n\t\"kokonoe\",\n\t\"kuju\",\n\t\"kunisaki\",\n\t\"kusu\",\n\t\"oita\",\n\t\"saiki\",\n\t\"taketa\",\n\t\"tsukumi\",\n\t\"usa\",\n\t\"usuki\",\n\t\"yufu\",\n\t\"akaiwa\",\n\t\"asakuchi\",\n\t\"bizen\",\n\t\"hayashima\",\n\t\"ibara\",\n\t\"kagamino\",\n\t\"kasaoka\",\n\t\"kibichuo\",\n\t\"kumenan\",\n\t\"kurashiki\",\n\t\"maniwa\",\n\t\"misaki\",\n\t\"nagi\",\n\t\"niimi\",\n\t\"nishiawakura\",\n\t\"okayama\",\n\t\"satosho\",\n\t\"setouchi\",\n\t\"shinjo\",\n\t\"shoo\",\n\t\"soja\",\n\t\"takahashi\",\n\t\"tamano\",\n\t\"tsuyama\",\n\t\"wake\",\n\t\"yakage\",\n\t\"aguni\",\n\t\"ginowan\",\n\t\"ginoza\",\n\t\"gushikami\",\n\t\"haebaru\",\n\t\"higashi\",\n\t\"hirara\",\n\t\"iheya\",\n\t\"ishigaki\",\n\t\"ishikawa\",\n\t\"itoman\",\n\t\"izena\",\n\t\"kadena\",\n\t\"kin\",\n\t\"kitadaito\",\n\t\"kitanakagusuku\",\n\t\"kumejima\",\n\t\"kunigami\",\n\t\"minamidaito\",\n\t\"motobu\",\n\t\"nago\",\n\t\"naha\",\n\t\"nakagusuku\",\n\t\"nakijin\",\n\t\"nanjo\",\n\t\"nishihara\",\n\t\"ogimi\",\n\t\"okinawa\",\n\t\"onna\",\n\t\"shimoji\",\n\t\"taketomi\",\n\t\"tarama\",\n\t\"tokashiki\",\n\t\"tomigusuku\",\n\t\"tonaki\",\n\t\"urasoe\",\n\t\"uruma\",\n\t\"yaese\",\n\t\"yomitan\",\n\t\"yonabaru\",\n\t\"yonaguni\",\n\t\"zamami\",\n\t\"abeno\",\n\t\"chihayaakasaka\",\n\t\"chuo\",\n\t\"daito\",\n\t\"fujiidera\",\n\t\"habikino\",\n\t\"hannan\",\n\t\"higashiosaka\",\n\t\"higashisumiyoshi\",\n\t\"higashiyodogawa\",\n\t\"hirakata\",\n\t\"ibaraki\",\n\t\"ikeda\",\n\t\"izumi\",\n\t\"izumiotsu\",\n\t\"izumisano\",\n\t\"kadoma\",\n\t\"kaizuka\",\n\t\"kanan\",\n\t\"kashiwara\",\n\t\"katano\",\n\t\"kawachinagano\",\n\t\"kishiwada\",\n\t\"kita\",\n\t\"kumatori\",\n\t\"matsubara\",\n\t\"minato\",\n\t\"minoh\",\n\t\"misaki\",\n\t\"moriguchi\",\n\t\"neyagawa\",\n\t\"nishi\",\n\t\"nose\",\n\t\"osakasayama\",\n\t\"sakai\",\n\t\"sayama\",\n\t\"sennan\",\n\t\"settsu\",\n\t\"shijonawate\",\n\t\"shimamoto\",\n\t\"suita\",\n\t\"tadaoka\",\n\t\"taishi\",\n\t\"tajiri\",\n\t\"takaishi\",\n\t\"takatsuki\",\n\t\"tondabayashi\",\n\t\"toyonaka\",\n\t\"toyono\",\n\t\"yao\",\n\t\"ariake\",\n\t\"arita\",\n\t\"fukudomi\",\n\t\"genkai\",\n\t\"hamatama\",\n\t\"hizen\",\n\t\"imari\",\n\t\"kamimine\",\n\t\"kanzaki\",\n\t\"karatsu\",\n\t\"kashima\",\n\t\"kitagata\",\n\t\"kitahata\",\n\t\"kiyama\",\n\t\"kouhoku\",\n\t\"kyuragi\",\n\t\"nishiarita\",\n\t\"ogi\",\n\t\"omachi\",\n\t\"ouchi\",\n\t\"saga\",\n\t\"shiroishi\",\n\t\"taku\",\n\t\"tara\",\n\t\"tosu\",\n\t\"yoshinogari\",\n\t\"arakawa\",\n\t\"asaka\",\n\t\"chichibu\",\n\t\"fujimi\",\n\t\"fujimino\",\n\t\"fukaya\",\n\t\"hanno\",\n\t\"hanyu\",\n\t\"hasuda\",\n\t\"hatogaya\",\n\t\"hatoyama\",\n\t\"hidaka\",\n\t\"higashichichibu\",\n\t\"higashimatsuyama\",\n\t\"honjo\",\n\t\"ina\",\n\t\"iruma\",\n\t\"iwatsuki\",\n\t\"kamiizumi\",\n\t\"kamikawa\",\n\t\"kamisato\",\n\t\"kasukabe\",\n\t\"kawagoe\",\n\t\"kawaguchi\",\n\t\"kawajima\",\n\t\"kazo\",\n\t\"kitamoto\",\n\t\"koshigaya\",\n\t\"kounosu\",\n\t\"kuki\",\n\t\"kumagaya\",\n\t\"matsubushi\",\n\t\"minano\",\n\t\"misato\",\n\t\"miyashiro\",\n\t\"miyoshi\",\n\t\"moroyama\",\n\t\"nagatoro\",\n\t\"namegawa\",\n\t\"niiza\",\n\t\"ogano\",\n\t\"ogawa\",\n\t\"ogose\",\n\t\"okegawa\",\n\t\"omiya\",\n\t\"otaki\",\n\t\"ranzan\",\n\t\"ryokami\",\n\t\"saitama\",\n\t\"sakado\",\n\t\"satte\",\n\t\"sayama\",\n\t\"shiki\",\n\t\"shiraoka\",\n\t\"soka\",\n\t\"sugito\",\n\t\"toda\",\n\t\"tokigawa\",\n\t\"tokorozawa\",\n\t\"tsurugashima\",\n\t\"urawa\",\n\t\"warabi\",\n\t\"yashio\",\n\t\"yokoze\",\n\t\"yono\",\n\t\"yorii\",\n\t\"yoshida\",\n\t\"yoshikawa\",\n\t\"yoshimi\",\n\t\"city\",\n\t\"city\",\n\t\"aisho\",\n\t\"gamo\",\n\t\"higashiomi\",\n\t\"hikone\",\n\t\"koka\",\n\t\"konan\",\n\t\"kosei\",\n\t\"koto\",\n\t\"kusatsu\",\n\t\"maibara\",\n\t\"moriyama\",\n\t\"nagahama\",\n\t\"nishiazai\",\n\t\"notogawa\",\n\t\"omihachiman\",\n\t\"otsu\",\n\t\"ritto\",\n\t\"ryuoh\",\n\t\"takashima\",\n\t\"takatsuki\",\n\t\"torahime\",\n\t\"toyosato\",\n\t\"yasu\",\n\t\"akagi\",\n\t\"ama\",\n\t\"gotsu\",\n\t\"hamada\",\n\t\"higashiizumo\",\n\t\"hikawa\",\n\t\"hikimi\",\n\t\"izumo\",\n\t\"kakinoki\",\n\t\"masuda\",\n\t\"matsue\",\n\t\"misato\",\n\t\"nishinoshima\",\n\t\"ohda\",\n\t\"okinoshima\",\n\t\"okuizumo\",\n\t\"shimane\",\n\t\"tamayu\",\n\t\"tsuwano\",\n\t\"unnan\",\n\t\"yakumo\",\n\t\"yasugi\",\n\t\"yatsuka\",\n\t\"arai\",\n\t\"atami\",\n\t\"fuji\",\n\t\"fujieda\",\n\t\"fujikawa\",\n\t\"fujinomiya\",\n\t\"fukuroi\",\n\t\"gotemba\",\n\t\"haibara\",\n\t\"hamamatsu\",\n\t\"higashiizu\",\n\t\"ito\",\n\t\"iwata\",\n\t\"izu\",\n\t\"izunokuni\",\n\t\"kakegawa\",\n\t\"kannami\",\n\t\"kawanehon\",\n\t\"kawazu\",\n\t\"kikugawa\",\n\t\"kosai\",\n\t\"makinohara\",\n\t\"matsuzaki\",\n\t\"minamiizu\",\n\t\"mishima\",\n\t\"morimachi\",\n\t\"nishiizu\",\n\t\"numazu\",\n\t\"omaezaki\",\n\t\"shimada\",\n\t\"shimizu\",\n\t\"shimoda\",\n\t\"shizuoka\",\n\t\"susono\",\n\t\"yaizu\",\n\t\"yoshida\",\n\t\"ashikaga\",\n\t\"bato\",\n\t\"haga\",\n\t\"ichikai\",\n\t\"iwafune\",\n\t\"kaminokawa\",\n\t\"kanuma\",\n\t\"karasuyama\",\n\t\"kuroiso\",\n\t\"mashiko\",\n\t\"mibu\",\n\t\"moka\",\n\t\"motegi\",\n\t\"nasu\",\n\t\"nasushiobara\",\n\t\"nikko\",\n\t\"nishikata\",\n\t\"nogi\",\n\t\"ohira\",\n\t\"ohtawara\",\n\t\"oyama\",\n\t\"sakura\",\n\t\"sano\",\n\t\"shimotsuke\",\n\t\"shioya\",\n\t\"takanezawa\",\n\t\"tochigi\",\n\t\"tsuga\",\n\t\"ujiie\",\n\t\"utsunomiya\",\n\t\"yaita\",\n\t\"aizumi\",\n\t\"anan\",\n\t\"ichiba\",\n\t\"itano\",\n\t\"kainan\",\n\t\"komatsushima\",\n\t\"matsushige\",\n\t\"mima\",\n\t\"minami\",\n\t\"miyoshi\",\n\t\"mugi\",\n\t\"nakagawa\",\n\t\"naruto\",\n\t\"sanagochi\",\n\t\"shishikui\",\n\t\"tokushima\",\n\t\"wajiki\",\n\t\"adachi\",\n\t\"akiruno\",\n\t\"akishima\",\n\t\"aogashima\",\n\t\"arakawa\",\n\t\"bunkyo\",\n\t\"chiyoda\",\n\t\"chofu\",\n\t\"chuo\",\n\t\"edogawa\",\n\t\"fuchu\",\n\t\"fussa\",\n\t\"hachijo\",\n\t\"hachioji\",\n\t\"hamura\",\n\t\"higashikurume\",\n\t\"higashimurayama\",\n\t\"higashiyamato\",\n\t\"hino\",\n\t\"hinode\",\n\t\"hinohara\",\n\t\"inagi\",\n\t\"itabashi\",\n\t\"katsushika\",\n\t\"kita\",\n\t\"kiyose\",\n\t\"kodaira\",\n\t\"koganei\",\n\t\"kokubunji\",\n\t\"komae\",\n\t\"koto\",\n\t\"kouzushima\",\n\t\"kunitachi\",\n\t\"machida\",\n\t\"meguro\",\n\t\"minato\",\n\t\"mitaka\",\n\t\"mizuho\",\n\t\"musashimurayama\",\n\t\"musashino\",\n\t\"nakano\",\n\t\"nerima\",\n\t\"ogasawara\",\n\t\"okutama\",\n\t\"ome\",\n\t\"oshima\",\n\t\"ota\",\n\t\"setagaya\",\n\t\"shibuya\",\n\t\"shinagawa\",\n\t\"shinjuku\",\n\t\"suginami\",\n\t\"sumida\",\n\t\"tachikawa\",\n\t\"taito\",\n\t\"tama\",\n\t\"toshima\",\n\t\"chizu\",\n\t\"hino\",\n\t\"kawahara\",\n\t\"koge\",\n\t\"kotoura\",\n\t\"misasa\",\n\t\"nanbu\",\n\t\"nichinan\",\n\t\"sakaiminato\",\n\t\"tottori\",\n\t\"wakasa\",\n\t\"yazu\",\n\t\"yonago\",\n\t\"asahi\",\n\t\"fuchu\",\n\t\"fukumitsu\",\n\t\"funahashi\",\n\t\"himi\",\n\t\"imizu\",\n\t\"inami\",\n\t\"johana\",\n\t\"kamiichi\",\n\t\"kurobe\",\n\t\"nakaniikawa\",\n\t\"namerikawa\",\n\t\"nanto\",\n\t\"nyuzen\",\n\t\"oyabe\",\n\t\"taira\",\n\t\"takaoka\",\n\t\"tateyama\",\n\t\"toga\",\n\t\"tonami\",\n\t\"toyama\",\n\t\"unazuki\",\n\t\"uozu\",\n\t\"yamada\",\n\t\"arida\",\n\t\"aridagawa\",\n\t\"gobo\",\n\t\"hashimoto\",\n\t\"hidaka\",\n\t\"hirogawa\",\n\t\"inami\",\n\t\"iwade\",\n\t\"kainan\",\n\t\"kamitonda\",\n\t\"katsuragi\",\n\t\"kimino\",\n\t\"kinokawa\",\n\t\"kitayama\",\n\t\"koya\",\n\t\"koza\",\n\t\"kozagawa\",\n\t\"kudoyama\",\n\t\"kushimoto\",\n\t\"mihama\",\n\t\"misato\",\n\t\"nachikatsuura\",\n\t\"shingu\",\n\t\"shirahama\",\n\t\"taiji\",\n\t\"tanabe\",\n\t\"wakayama\",\n\t\"yuasa\",\n\t\"yura\",\n\t\"asahi\",\n\t\"funagata\",\n\t\"higashine\",\n\t\"iide\",\n\t\"kahoku\",\n\t\"kaminoyama\",\n\t\"kaneyama\",\n\t\"kawanishi\",\n\t\"mamurogawa\",\n\t\"mikawa\",\n\t\"murayama\",\n\t\"nagai\",\n\t\"nakayama\",\n\t\"nanyo\",\n\t\"nishikawa\",\n\t\"obanazawa\",\n\t\"oe\",\n\t\"oguni\",\n\t\"ohkura\",\n\t\"oishida\",\n\t\"sagae\",\n\t\"sakata\",\n\t\"sakegawa\",\n\t\"shinjo\",\n\t\"shirataka\",\n\t\"shonai\",\n\t\"takahata\",\n\t\"tendo\",\n\t\"tozawa\",\n\t\"tsuruoka\",\n\t\"yamagata\",\n\t\"yamanobe\",\n\t\"yonezawa\",\n\t\"yuza\",\n\t\"abu\",\n\t\"hagi\",\n\t\"hikari\",\n\t\"hofu\",\n\t\"iwakuni\",\n\t\"kudamatsu\",\n\t\"mitou\",\n\t\"nagato\",\n\t\"oshima\",\n\t\"shimonoseki\",\n\t\"shunan\",\n\t\"tabuse\",\n\t\"tokuyama\",\n\t\"toyota\",\n\t\"ube\",\n\t\"yuu\",\n\t\"chuo\",\n\t\"doshi\",\n\t\"fuefuki\",\n\t\"fujikawa\",\n\t\"fujikawaguchiko\",\n\t\"fujiyoshida\",\n\t\"hayakawa\",\n\t\"hokuto\",\n\t\"ichikawamisato\",\n\t\"kai\",\n\t\"kofu\",\n\t\"koshu\",\n\t\"kosuge\",\n\t\"minami-alps\",\n\t\"minobu\",\n\t\"nakamichi\",\n\t\"nanbu\",\n\t\"narusawa\",\n\t\"nirasaki\",\n\t\"nishikatsura\",\n\t\"oshino\",\n\t\"otsuki\",\n\t\"showa\",\n\t\"tabayama\",\n\t\"tsuru\",\n\t\"uenohara\",\n\t\"yamanakako\",\n\t\"yamanashi\",\n\t\"city\",\n\t\"co\",\n\t\"blogspot\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"mil\",\n\t\"net\",\n\t\"org\",\n\t\"biz\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"info\",\n\t\"net\",\n\t\"org\",\n\t\"ass\",\n\t\"asso\",\n\t\"com\",\n\t\"coop\",\n\t\"edu\",\n\t\"gouv\",\n\t\"gov\",\n\t\"medecin\",\n\t\"mil\",\n\t\"nom\",\n\t\"notaires\",\n\t\"org\",\n\t\"pharmaciens\",\n\t\"prd\",\n\t\"presse\",\n\t\"tm\",\n\t\"veterinaire\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"org\",\n\t\"rep\",\n\t\"tra\",\n\t\"ac\",\n\t\"blogspot\",\n\t\"busan\",\n\t\"chungbuk\",\n\t\"chungnam\",\n\t\"co\",\n\t\"daegu\",\n\t\"daejeon\",\n\t\"es\",\n\t\"gangwon\",\n\t\"go\",\n\t\"gwangju\",\n\t\"gyeongbuk\",\n\t\"gyeonggi\",\n\t\"gyeongnam\",\n\t\"hs\",\n\t\"incheon\",\n\t\"jeju\",\n\t\"jeonbuk\",\n\t\"jeonnam\",\n\t\"kg\",\n\t\"mil\",\n\t\"ms\",\n\t\"ne\",\n\t\"or\",\n\t\"pe\",\n\t\"re\",\n\t\"sc\",\n\t\"seoul\",\n\t\"ulsan\",\n\t\"co\",\n\t\"edu\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"mil\",\n\t\"net\",\n\t\"nym\",\n\t\"org\",\n\t\"bnr\",\n\t\"c\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"info\",\n\t\"int\",\n\t\"net\",\n\t\"nym\",\n\t\"org\",\n\t\"per\",\n\t\"static\",\n\t\"dev\",\n\t\"sites\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"co\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"oy\",\n\t\"blogspot\",\n\t\"nom\",\n\t\"nym\",\n\t\"cyon\",\n\t\"mypep\",\n\t\"ac\",\n\t\"assn\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"grp\",\n\t\"hotel\",\n\t\"int\",\n\t\"ltd\",\n\t\"net\",\n\t\"ngo\",\n\t\"org\",\n\t\"sch\",\n\t\"soc\",\n\t\"web\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"co\",\n\t\"org\",\n\t\"blogspot\",\n\t\"gov\",\n\t\"nym\",\n\t\"blogspot\",\n\t\"nym\",\n\t\"asn\",\n\t\"com\",\n\t\"conf\",\n\t\"edu\",\n\t\"gov\",\n\t\"id\",\n\t\"mil\",\n\t\"net\",\n\t\"org\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"id\",\n\t\"med\",\n\t\"net\",\n\t\"org\",\n\t\"plc\",\n\t\"sch\",\n\t\"ac\",\n\t\"co\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"press\",\n\t\"router\",\n\t\"asso\",\n\t\"tm\",\n\t\"blogspot\",\n\t\"ac\",\n\t\"brasilia\",\n\t\"c66\",\n\t\"co\",\n\t\"daplie\",\n\t\"ddns\",\n\t\"diskstation\",\n\t\"dnsfor\",\n\t\"dscloud\",\n\t\"edu\",\n\t\"filegear\",\n\t\"gov\",\n\t\"hopto\",\n\t\"i234\",\n\t\"its\",\n\t\"loginto\",\n\t\"myds\",\n\t\"net\",\n\t\"noip\",\n\t\"nym\",\n\t\"org\",\n\t\"priv\",\n\t\"synology\",\n\t\"webhop\",\n\t\"wedeploy\",\n\t\"yombo\",\n\t\"localhost\",\n\t\"co\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"mil\",\n\t\"nom\",\n\t\"org\",\n\t\"prd\",\n\t\"tm\",\n\t\"blogspot\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"inf\",\n\t\"name\",\n\t\"net\",\n\t\"nom\",\n\t\"org\",\n\t\"com\",\n\t\"edu\",\n\t\"gouv\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"presse\",\n\t\"edu\",\n\t\"gov\",\n\t\"nyc\",\n\t\"org\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"dscloud\",\n\t\"blogspot\",\n\t\"gov\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"com\",\n\t\"edu\",\n\t\"net\",\n\t\"org\",\n\t\"blogspot\",\n\t\"ac\",\n\t\"co\",\n\t\"com\",\n\t\"gov\",\n\t\"net\",\n\t\"or\",\n\t\"org\",\n\t\"academy\",\n\t\"agriculture\",\n\t\"air\",\n\t\"airguard\",\n\t\"alabama\",\n\t\"alaska\",\n\t\"amber\",\n\t\"ambulance\",\n\t\"american\",\n\t\"americana\",\n\t\"americanantiques\",\n\t\"americanart\",\n\t\"amsterdam\",\n\t\"and\",\n\t\"annefrank\",\n\t\"anthro\",\n\t\"anthropology\",\n\t\"antiques\",\n\t\"aquarium\",\n\t\"arboretum\",\n\t\"archaeological\",\n\t\"archaeology\",\n\t\"architecture\",\n\t\"art\",\n\t\"artanddesign\",\n\t\"artcenter\",\n\t\"artdeco\",\n\t\"arteducation\",\n\t\"artgallery\",\n\t\"arts\",\n\t\"artsandcrafts\",\n\t\"asmatart\",\n\t\"assassination\",\n\t\"assisi\",\n\t\"association\",\n\t\"astronomy\",\n\t\"atlanta\",\n\t\"austin\",\n\t\"australia\",\n\t\"automotive\",\n\t\"aviation\",\n\t\"axis\",\n\t\"badajoz\",\n\t\"baghdad\",\n\t\"bahn\",\n\t\"bale\",\n\t\"baltimore\",\n\t\"barcelona\",\n\t\"baseball\",\n\t\"basel\",\n\t\"baths\",\n\t\"bauern\",\n\t\"beauxarts\",\n\t\"beeldengeluid\",\n\t\"bellevue\",\n\t\"bergbau\",\n\t\"berkeley\",\n\t\"berlin\",\n\t\"bern\",\n\t\"bible\",\n\t\"bilbao\",\n\t\"bill\",\n\t\"birdart\",\n\t\"birthplace\",\n\t\"bonn\",\n\t\"boston\",\n\t\"botanical\",\n\t\"botanicalgarden\",\n\t\"botanicgarden\",\n\t\"botany\",\n\t\"brandywinevalley\",\n\t\"brasil\",\n\t\"bristol\",\n\t\"british\",\n\t\"britishcolumbia\",\n\t\"broadcast\",\n\t\"brunel\",\n\t\"brussel\",\n\t\"brussels\",\n\t\"bruxelles\",\n\t\"building\",\n\t\"burghof\",\n\t\"bus\",\n\t\"bushey\",\n\t\"cadaques\",\n\t\"california\",\n\t\"cambridge\",\n\t\"can\",\n\t\"canada\",\n\t\"capebreton\",\n\t\"carrier\",\n\t\"cartoonart\",\n\t\"casadelamoneda\",\n\t\"castle\",\n\t\"castres\",\n\t\"celtic\",\n\t\"center\",\n\t\"chattanooga\",\n\t\"cheltenham\",\n\t\"chesapeakebay\",\n\t\"chicago\",\n\t\"children\",\n\t\"childrens\",\n\t\"childrensgarden\",\n\t\"chiropractic\",\n\t\"chocolate\",\n\t\"christiansburg\",\n\t\"cincinnati\",\n\t\"cinema\",\n\t\"circus\",\n\t\"civilisation\",\n\t\"civilization\",\n\t\"civilwar\",\n\t\"clinton\",\n\t\"clock\",\n\t\"coal\",\n\t\"coastaldefence\",\n\t\"cody\",\n\t\"coldwar\",\n\t\"collection\",\n\t\"colonialwilliamsburg\",\n\t\"coloradoplateau\",\n\t\"columbia\",\n\t\"columbus\",\n\t\"communication\",\n\t\"communications\",\n\t\"community\",\n\t\"computer\",\n\t\"computerhistory\",\n\t\"contemporary\",\n\t\"contemporaryart\",\n\t\"convent\",\n\t\"copenhagen\",\n\t\"corporation\",\n\t\"corvette\",\n\t\"costume\",\n\t\"countryestate\",\n\t\"county\",\n\t\"crafts\",\n\t\"cranbrook\",\n\t\"creation\",\n\t\"cultural\",\n\t\"culturalcenter\",\n\t\"culture\",\n\t\"cyber\",\n\t\"cymru\",\n\t\"dali\",\n\t\"dallas\",\n\t\"database\",\n\t\"ddr\",\n\t\"decorativearts\",\n\t\"delaware\",\n\t\"delmenhorst\",\n\t\"denmark\",\n\t\"depot\",\n\t\"design\",\n\t\"detroit\",\n\t\"dinosaur\",\n\t\"discovery\",\n\t\"dolls\",\n\t\"donostia\",\n\t\"durham\",\n\t\"eastafrica\",\n\t\"eastcoast\",\n\t\"education\",\n\t\"educational\",\n\t\"egyptian\",\n\t\"eisenbahn\",\n\t\"elburg\",\n\t\"elvendrell\",\n\t\"embroidery\",\n\t\"encyclopedic\",\n\t\"england\",\n\t\"entomology\",\n\t\"environment\",\n\t\"environmentalconservation\",\n\t\"epilepsy\",\n\t\"essex\",\n\t\"estate\",\n\t\"ethnology\",\n\t\"exeter\",\n\t\"exhibition\",\n\t\"family\",\n\t\"farm\",\n\t\"farmequipment\",\n\t\"farmers\",\n\t\"farmstead\",\n\t\"field\",\n\t\"figueres\",\n\t\"filatelia\",\n\t\"film\",\n\t\"fineart\",\n\t\"finearts\",\n\t\"finland\",\n\t\"flanders\",\n\t\"florida\",\n\t\"force\",\n\t\"fortmissoula\",\n\t\"fortworth\",\n\t\"foundation\",\n\t\"francaise\",\n\t\"frankfurt\",\n\t\"franziskaner\",\n\t\"freemasonry\",\n\t\"freiburg\",\n\t\"fribourg\",\n\t\"frog\",\n\t\"fundacio\",\n\t\"furniture\",\n\t\"gallery\",\n\t\"garden\",\n\t\"gateway\",\n\t\"geelvinck\",\n\t\"gemological\",\n\t\"geology\",\n\t\"georgia\",\n\t\"giessen\",\n\t\"glas\",\n\t\"glass\",\n\t\"gorge\",\n\t\"grandrapids\",\n\t\"graz\",\n\t\"guernsey\",\n\t\"halloffame\",\n\t\"hamburg\",\n\t\"handson\",\n\t\"harvestcelebration\",\n\t\"hawaii\",\n\t\"health\",\n\t\"heimatunduhren\",\n\t\"hellas\",\n\t\"helsinki\",\n\t\"hembygdsforbund\",\n\t\"heritage\",\n\t\"histoire\",\n\t\"historical\",\n\t\"historicalsociety\",\n\t\"historichouses\",\n\t\"historisch\",\n\t\"historisches\",\n\t\"history\",\n\t\"historyofscience\",\n\t\"horology\",\n\t\"house\",\n\t\"humanities\",\n\t\"illustration\",\n\t\"imageandsound\",\n\t\"indian\",\n\t\"indiana\",\n\t\"indianapolis\",\n\t\"indianmarket\",\n\t\"intelligence\",\n\t\"interactive\",\n\t\"iraq\",\n\t\"iron\",\n\t\"isleofman\",\n\t\"jamison\",\n\t\"jefferson\",\n\t\"jerusalem\",\n\t\"jewelry\",\n\t\"jewish\",\n\t\"jewishart\",\n\t\"jfk\",\n\t\"journalism\",\n\t\"judaica\",\n\t\"judygarland\",\n\t\"juedisches\",\n\t\"juif\",\n\t\"karate\",\n\t\"karikatur\",\n\t\"kids\",\n\t\"koebenhavn\",\n\t\"koeln\",\n\t\"kunst\",\n\t\"kunstsammlung\",\n\t\"kunstunddesign\",\n\t\"labor\",\n\t\"labour\",\n\t\"lajolla\",\n\t\"lancashire\",\n\t\"landes\",\n\t\"lans\",\n\t\"larsson\",\n\t\"lewismiller\",\n\t\"lincoln\",\n\t\"linz\",\n\t\"living\",\n\t\"livinghistory\",\n\t\"localhistory\",\n\t\"london\",\n\t\"losangeles\",\n\t\"louvre\",\n\t\"loyalist\",\n\t\"lucerne\",\n\t\"luxembourg\",\n\t\"luzern\",\n\t\"mad\",\n\t\"madrid\",\n\t\"mallorca\",\n\t\"manchester\",\n\t\"mansion\",\n\t\"mansions\",\n\t\"manx\",\n\t\"marburg\",\n\t\"maritime\",\n\t\"maritimo\",\n\t\"maryland\",\n\t\"marylhurst\",\n\t\"media\",\n\t\"medical\",\n\t\"medizinhistorisches\",\n\t\"meeres\",\n\t\"memorial\",\n\t\"mesaverde\",\n\t\"michigan\",\n\t\"midatlantic\",\n\t\"military\",\n\t\"mill\",\n\t\"miners\",\n\t\"mining\",\n\t\"minnesota\",\n\t\"missile\",\n\t\"missoula\",\n\t\"modern\",\n\t\"moma\",\n\t\"money\",\n\t\"monmouth\",\n\t\"monticello\",\n\t\"montreal\",\n\t\"moscow\",\n\t\"motorcycle\",\n\t\"muenchen\",\n\t\"muenster\",\n\t\"mulhouse\",\n\t\"muncie\",\n\t\"museet\",\n\t\"museumcenter\",\n\t\"museumvereniging\",\n\t\"music\",\n\t\"national\",\n\t\"nationalfirearms\",\n\t\"nationalheritage\",\n\t\"nativeamerican\",\n\t\"naturalhistory\",\n\t\"naturalhistorymuseum\",\n\t\"naturalsciences\",\n\t\"nature\",\n\t\"naturhistorisches\",\n\t\"natuurwetenschappen\",\n\t\"naumburg\",\n\t\"naval\",\n\t\"nebraska\",\n\t\"neues\",\n\t\"newhampshire\",\n\t\"newjersey\",\n\t\"newmexico\",\n\t\"newport\",\n\t\"newspaper\",\n\t\"newyork\",\n\t\"niepce\",\n\t\"norfolk\",\n\t\"north\",\n\t\"nrw\",\n\t\"nuernberg\",\n\t\"nuremberg\",\n\t\"nyc\",\n\t\"nyny\",\n\t\"oceanographic\",\n\t\"oceanographique\",\n\t\"omaha\",\n\t\"online\",\n\t\"ontario\",\n\t\"openair\",\n\t\"oregon\",\n\t\"oregontrail\",\n\t\"otago\",\n\t\"oxford\",\n\t\"pacific\",\n\t\"paderborn\",\n\t\"palace\",\n\t\"paleo\",\n\t\"palmsprings\",\n\t\"panama\",\n\t\"paris\",\n\t\"pasadena\",\n\t\"pharmacy\",\n\t\"philadelphia\",\n\t\"philadelphiaarea\",\n\t\"philately\",\n\t\"phoenix\",\n\t\"photography\",\n\t\"pilots\",\n\t\"pittsburgh\",\n\t\"planetarium\",\n\t\"plantation\",\n\t\"plants\",\n\t\"plaza\",\n\t\"portal\",\n\t\"portland\",\n\t\"portlligat\",\n\t\"posts-and-telecommunications\",\n\t\"preservation\",\n\t\"presidio\",\n\t\"press\",\n\t\"project\",\n\t\"public\",\n\t\"pubol\",\n\t\"quebec\",\n\t\"railroad\",\n\t\"railway\",\n\t\"research\",\n\t\"resistance\",\n\t\"riodejaneiro\",\n\t\"rochester\",\n\t\"rockart\",\n\t\"roma\",\n\t\"russia\",\n\t\"saintlouis\",\n\t\"salem\",\n\t\"salvadordali\",\n\t\"salzburg\",\n\t\"sandiego\",\n\t\"sanfrancisco\",\n\t\"santabarbara\",\n\t\"santacruz\",\n\t\"santafe\",\n\t\"saskatchewan\",\n\t\"satx\",\n\t\"savannahga\",\n\t\"schlesisches\",\n\t\"schoenbrunn\",\n\t\"schokoladen\",\n\t\"school\",\n\t\"schweiz\",\n\t\"science\",\n\t\"science-fiction\",\n\t\"scienceandhistory\",\n\t\"scienceandindustry\",\n\t\"sciencecenter\",\n\t\"sciencecenters\",\n\t\"sciencehistory\",\n\t\"sciences\",\n\t\"sciencesnaturelles\",\n\t\"scotland\",\n\t\"seaport\",\n\t\"settlement\",\n\t\"settlers\",\n\t\"shell\",\n\t\"sherbrooke\",\n\t\"sibenik\",\n\t\"silk\",\n\t\"ski\",\n\t\"skole\",\n\t\"society\",\n\t\"sologne\",\n\t\"soundandvision\",\n\t\"southcarolina\",\n\t\"southwest\",\n\t\"space\",\n\t\"spy\",\n\t\"square\",\n\t\"stadt\",\n\t\"stalbans\",\n\t\"starnberg\",\n\t\"state\",\n\t\"stateofdelaware\",\n\t\"station\",\n\t\"steam\",\n\t\"steiermark\",\n\t\"stjohn\",\n\t\"stockholm\",\n\t\"stpetersburg\",\n\t\"stuttgart\",\n\t\"suisse\",\n\t\"surgeonshall\",\n\t\"surrey\",\n\t\"svizzera\",\n\t\"sweden\",\n\t\"sydney\",\n\t\"tank\",\n\t\"tcm\",\n\t\"technology\",\n\t\"telekommunikation\",\n\t\"television\",\n\t\"texas\",\n\t\"textile\",\n\t\"theater\",\n\t\"time\",\n\t\"timekeeping\",\n\t\"topology\",\n\t\"torino\",\n\t\"touch\",\n\t\"town\",\n\t\"transport\",\n\t\"tree\",\n\t\"trolley\",\n\t\"trust\",\n\t\"trustee\",\n\t\"uhren\",\n\t\"ulm\",\n\t\"undersea\",\n\t\"university\",\n\t\"usa\",\n\t\"usantiques\",\n\t\"usarts\",\n\t\"uscountryestate\",\n\t\"usculture\",\n\t\"usdecorativearts\",\n\t\"usgarden\",\n\t\"ushistory\",\n\t\"ushuaia\",\n\t\"uslivinghistory\",\n\t\"utah\",\n\t\"uvic\",\n\t\"valley\",\n\t\"vantaa\",\n\t\"versailles\",\n\t\"viking\",\n\t\"village\",\n\t\"virginia\",\n\t\"virtual\",\n\t\"virtuel\",\n\t\"vlaanderen\",\n\t\"volkenkunde\",\n\t\"wales\",\n\t\"wallonie\",\n\t\"war\",\n\t\"washingtondc\",\n\t\"watch-and-clock\",\n\t\"watchandclock\",\n\t\"western\",\n\t\"westfalen\",\n\t\"whaling\",\n\t\"wildlife\",\n\t\"williamsburg\",\n\t\"windmill\",\n\t\"workshop\",\n\t\"xn--9dbhblg6di\",\n\t\"xn--comunicaes-v6a2o\",\n\t\"xn--correios-e-telecomunicaes-ghc29a\",\n\t\"xn--h1aegh\",\n\t\"xn--lns-qla\",\n\t\"york\",\n\t\"yorkshire\",\n\t\"yosemite\",\n\t\"youth\",\n\t\"zoological\",\n\t\"zoology\",\n\t\"aero\",\n\t\"biz\",\n\t\"com\",\n\t\"coop\",\n\t\"edu\",\n\t\"gov\",\n\t\"info\",\n\t\"int\",\n\t\"mil\",\n\t\"museum\",\n\t\"name\",\n\t\"net\",\n\t\"org\",\n\t\"pro\",\n\t\"ac\",\n\t\"biz\",\n\t\"co\",\n\t\"com\",\n\t\"coop\",\n\t\"edu\",\n\t\"gov\",\n\t\"int\",\n\t\"museum\",\n\t\"net\",\n\t\"org\",\n\t\"blogspot\",\n\t\"com\",\n\t\"edu\",\n\t\"gob\",\n\t\"net\",\n\t\"nym\",\n\t\"org\",\n\t\"blogspot\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"mil\",\n\t\"name\",\n\t\"net\",\n\t\"org\",\n\t\"ac\",\n\t\"adv\",\n\t\"co\",\n\t\"edu\",\n\t\"gov\",\n\t\"mil\",\n\t\"net\",\n\t\"org\",\n\t\"ca\",\n\t\"cc\",\n\t\"co\",\n\t\"com\",\n\t\"dr\",\n\t\"in\",\n\t\"info\",\n\t\"mobi\",\n\t\"mx\",\n\t\"name\",\n\t\"or\",\n\t\"org\",\n\t\"pro\",\n\t\"school\",\n\t\"tv\",\n\t\"us\",\n\t\"ws\",\n\t\"her\",\n\t\"his\",\n\t\"forgot\",\n\t\"forgot\",\n\t\"asso\",\n\t\"nom\",\n\t\"alwaysdata\",\n\t\"at-band-camp\",\n\t\"azure-mobile\",\n\t\"azurewebsites\",\n\t\"barsy\",\n\t\"blogdns\",\n\t\"boomla\",\n\t\"bounceme\",\n\t\"bplaced\",\n\t\"broke-it\",\n\t\"buyshouses\",\n\t\"casacam\",\n\t\"cdn77\",\n\t\"cdn77-ssl\",\n\t\"channelsdvr\",\n\t\"cloudaccess\",\n\t\"cloudapp\",\n\t\"cloudfront\",\n\t\"cloudfunctions\",\n\t\"cryptonomic\",\n\t\"ddns\",\n\t\"debian\",\n\t\"definima\",\n\t\"dnsalias\",\n\t\"dnsdojo\",\n\t\"does-it\",\n\t\"dontexist\",\n\t\"dsmynas\",\n\t\"dynalias\",\n\t\"dynathome\",\n\t\"dynu\",\n\t\"dynv6\",\n\t\"eating-organic\",\n\t\"endofinternet\",\n\t\"familyds\",\n\t\"fastly\",\n\t\"fastlylb\",\n\t\"feste-ip\",\n\t\"firewall-gateway\",\n\t\"flynnhosting\",\n\t\"from-az\",\n\t\"from-co\",\n\t\"from-la\",\n\t\"from-ny\",\n\t\"gb\",\n\t\"gets-it\",\n\t\"ham-radio-op\",\n\t\"homeftp\",\n\t\"homeip\",\n\t\"homelinux\",\n\t\"homeunix\",\n\t\"hu\",\n\t\"in\",\n\t\"in-the-band\",\n\t\"ipifony\",\n\t\"is-a-chef\",\n\t\"is-a-geek\",\n\t\"isa-geek\",\n\t\"jp\",\n\t\"kicks-ass\",\n\t\"knx-server\",\n\t\"moonscale\",\n\t\"mydissent\",\n\t\"myeffect\",\n\t\"myfritz\",\n\t\"mymediapc\",\n\t\"mypsx\",\n\t\"mysecuritycamera\",\n\t\"nhlfan\",\n\t\"no-ip\",\n\t\"office-on-the\",\n\t\"pgafan\",\n\t\"podzone\",\n\t\"privatizehealthinsurance\",\n\t\"rackmaze\",\n\t\"redirectme\",\n\t\"ru\",\n\t\"scrapper-site\",\n\t\"se\",\n\t\"selfip\",\n\t\"sells-it\",\n\t\"servebbs\",\n\t\"serveblog\",\n\t\"serveftp\",\n\t\"serveminecraft\",\n\t\"square7\",\n\t\"static-access\",\n\t\"sytes\",\n\t\"t3l3p0rt\",\n\t\"thruhere\",\n\t\"twmail\",\n\t\"uk\",\n\t\"webhop\",\n\t\"za\",\n\t\"r\",\n\t\"freetls\",\n\t\"map\",\n\t\"prod\",\n\t\"ssl\",\n\t\"a\",\n\t\"global\",\n\t\"a\",\n\t\"b\",\n\t\"global\",\n\t\"map\",\n\t\"alces\",\n\t\"arts\",\n\t\"com\",\n\t\"firm\",\n\t\"info\",\n\t\"net\",\n\t\"other\",\n\t\"per\",\n\t\"rec\",\n\t\"store\",\n\t\"web\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"i\",\n\t\"mil\",\n\t\"mobi\",\n\t\"name\",\n\t\"net\",\n\t\"org\",\n\t\"sch\",\n\t\"blogspot\",\n\t\"ac\",\n\t\"biz\",\n\t\"co\",\n\t\"com\",\n\t\"edu\",\n\t\"gob\",\n\t\"in\",\n\t\"info\",\n\t\"int\",\n\t\"mil\",\n\t\"net\",\n\t\"nom\",\n\t\"org\",\n\t\"web\",\n\t\"blogspot\",\n\t\"bv\",\n\t\"cistron\",\n\t\"co\",\n\t\"demon\",\n\t\"transurl\",\n\t\"virtueeldomein\",\n\t\"aa\",\n\t\"aarborte\",\n\t\"aejrie\",\n\t\"afjord\",\n\t\"agdenes\",\n\t\"ah\",\n\t\"akershus\",\n\t\"aknoluokta\",\n\t\"akrehamn\",\n\t\"al\",\n\t\"alaheadju\",\n\t\"alesund\",\n\t\"algard\",\n\t\"alstahaug\",\n\t\"alta\",\n\t\"alvdal\",\n\t\"amli\",\n\t\"amot\",\n\t\"andasuolo\",\n\t\"andebu\",\n\t\"andoy\",\n\t\"ardal\",\n\t\"aremark\",\n\t\"arendal\",\n\t\"arna\",\n\t\"aseral\",\n\t\"asker\",\n\t\"askim\",\n\t\"askoy\",\n\t\"askvoll\",\n\t\"asnes\",\n\t\"audnedaln\",\n\t\"aukra\",\n\t\"aure\",\n\t\"aurland\",\n\t\"aurskog-holand\",\n\t\"austevoll\",\n\t\"austrheim\",\n\t\"averoy\",\n\t\"badaddja\",\n\t\"bahcavuotna\",\n\t\"bahccavuotna\",\n\t\"baidar\",\n\t\"bajddar\",\n\t\"balat\",\n\t\"balestrand\",\n\t\"ballangen\",\n\t\"balsfjord\",\n\t\"bamble\",\n\t\"bardu\",\n\t\"barum\",\n\t\"batsfjord\",\n\t\"bearalvahki\",\n\t\"beardu\",\n\t\"beiarn\",\n\t\"berg\",\n\t\"bergen\",\n\t\"berlevag\",\n\t\"bievat\",\n\t\"bindal\",\n\t\"birkenes\",\n\t\"bjarkoy\",\n\t\"bjerkreim\",\n\t\"bjugn\",\n\t\"blogspot\",\n\t\"bodo\",\n\t\"bokn\",\n\t\"bomlo\",\n\t\"bremanger\",\n\t\"bronnoy\",\n\t\"bronnoysund\",\n\t\"brumunddal\",\n\t\"bryne\",\n\t\"bu\",\n\t\"budejju\",\n\t\"buskerud\",\n\t\"bygland\",\n\t\"bykle\",\n\t\"cahcesuolo\",\n\t\"co\",\n\t\"davvenjarga\",\n\t\"davvesiida\",\n\t\"deatnu\",\n\t\"dep\",\n\t\"dielddanuorri\",\n\t\"divtasvuodna\",\n\t\"divttasvuotna\",\n\t\"donna\",\n\t\"dovre\",\n\t\"drammen\",\n\t\"drangedal\",\n\t\"drobak\",\n\t\"dyroy\",\n\t\"egersund\",\n\t\"eid\",\n\t\"eidfjord\",\n\t\"eidsberg\",\n\t\"eidskog\",\n\t\"eidsvoll\",\n\t\"eigersund\",\n\t\"elverum\",\n\t\"enebakk\",\n\t\"engerdal\",\n\t\"etne\",\n\t\"etnedal\",\n\t\"evenassi\",\n\t\"evenes\",\n\t\"evje-og-hornnes\",\n\t\"farsund\",\n\t\"fauske\",\n\t\"fedje\",\n\t\"fet\",\n\t\"fetsund\",\n\t\"fhs\",\n\t\"finnoy\",\n\t\"fitjar\",\n\t\"fjaler\",\n\t\"fjell\",\n\t\"fla\",\n\t\"flakstad\",\n\t\"flatanger\",\n\t\"flekkefjord\",\n\t\"flesberg\",\n\t\"flora\",\n\t\"floro\",\n\t\"fm\",\n\t\"folkebibl\",\n\t\"folldal\",\n\t\"forde\",\n\t\"forsand\",\n\t\"fosnes\",\n\t\"frana\",\n\t\"fredrikstad\",\n\t\"frei\",\n\t\"frogn\",\n\t\"froland\",\n\t\"frosta\",\n\t\"froya\",\n\t\"fuoisku\",\n\t\"fuossko\",\n\t\"fusa\",\n\t\"fylkesbibl\",\n\t\"fyresdal\",\n\t\"gaivuotna\",\n\t\"galsa\",\n\t\"gamvik\",\n\t\"gangaviika\",\n\t\"gaular\",\n\t\"gausdal\",\n\t\"giehtavuoatna\",\n\t\"gildeskal\",\n\t\"giske\",\n\t\"gjemnes\",\n\t\"gjerdrum\",\n\t\"gjerstad\",\n\t\"gjesdal\",\n\t\"gjovik\",\n\t\"gloppen\",\n\t\"gol\",\n\t\"gran\",\n\t\"grane\",\n\t\"granvin\",\n\t\"gratangen\",\n\t\"grimstad\",\n\t\"grong\",\n\t\"grue\",\n\t\"gulen\",\n\t\"guovdageaidnu\",\n\t\"ha\",\n\t\"habmer\",\n\t\"hadsel\",\n\t\"hagebostad\",\n\t\"halden\",\n\t\"halsa\",\n\t\"hamar\",\n\t\"hamaroy\",\n\t\"hammarfeasta\",\n\t\"hammerfest\",\n\t\"hapmir\",\n\t\"haram\",\n\t\"hareid\",\n\t\"harstad\",\n\t\"hasvik\",\n\t\"hattfjelldal\",\n\t\"haugesund\",\n\t\"hedmark\",\n\t\"hemne\",\n\t\"hemnes\",\n\t\"hemsedal\",\n\t\"herad\",\n\t\"hitra\",\n\t\"hjartdal\",\n\t\"hjelmeland\",\n\t\"hl\",\n\t\"hm\",\n\t\"hobol\",\n\t\"hof\",\n\t\"hokksund\",\n\t\"hol\",\n\t\"hole\",\n\t\"holmestrand\",\n\t\"holtalen\",\n\t\"honefoss\",\n\t\"hordaland\",\n\t\"hornindal\",\n\t\"horten\",\n\t\"hoyanger\",\n\t\"hoylandet\",\n\t\"hurdal\",\n\t\"hurum\",\n\t\"hvaler\",\n\t\"hyllestad\",\n\t\"ibestad\",\n\t\"idrett\",\n\t\"inderoy\",\n\t\"iveland\",\n\t\"ivgu\",\n\t\"jan-mayen\",\n\t\"jessheim\",\n\t\"jevnaker\",\n\t\"jolster\",\n\t\"jondal\",\n\t\"jorpeland\",\n\t\"kafjord\",\n\t\"karasjohka\",\n\t\"karasjok\",\n\t\"karlsoy\",\n\t\"karmoy\",\n\t\"kautokeino\",\n\t\"kirkenes\",\n\t\"klabu\",\n\t\"klepp\",\n\t\"kommune\",\n\t\"kongsberg\",\n\t\"kongsvinger\",\n\t\"kopervik\",\n\t\"kraanghke\",\n\t\"kragero\",\n\t\"kristiansand\",\n\t\"kristiansund\",\n\t\"krodsherad\",\n\t\"krokstadelva\",\n\t\"kvafjord\",\n\t\"kvalsund\",\n\t\"kvam\",\n\t\"kvanangen\",\n\t\"kvinesdal\",\n\t\"kvinnherad\",\n\t\"kviteseid\",\n\t\"kvitsoy\",\n\t\"laakesvuemie\",\n\t\"lahppi\",\n\t\"langevag\",\n\t\"lardal\",\n\t\"larvik\",\n\t\"lavagis\",\n\t\"lavangen\",\n\t\"leangaviika\",\n\t\"lebesby\",\n\t\"leikanger\",\n\t\"leirfjord\",\n\t\"leirvik\",\n\t\"leka\",\n\t\"leksvik\",\n\t\"lenvik\",\n\t\"lerdal\",\n\t\"lesja\",\n\t\"levanger\",\n\t\"lier\",\n\t\"lierne\",\n\t\"lillehammer\",\n\t\"lillesand\",\n\t\"lindas\",\n\t\"lindesnes\",\n\t\"loabat\",\n\t\"lodingen\",\n\t\"lom\",\n\t\"loppa\",\n\t\"lorenskog\",\n\t\"loten\",\n\t\"lund\",\n\t\"lunner\",\n\t\"luroy\",\n\t\"luster\",\n\t\"lyngdal\",\n\t\"lyngen\",\n\t\"malatvuopmi\",\n\t\"malselv\",\n\t\"malvik\",\n\t\"mandal\",\n\t\"marker\",\n\t\"marnardal\",\n\t\"masfjorden\",\n\t\"masoy\",\n\t\"matta-varjjat\",\n\t\"meland\",\n\t\"meldal\",\n\t\"melhus\",\n\t\"meloy\",\n\t\"meraker\",\n\t\"midsund\",\n\t\"midtre-gauldal\",\n\t\"mil\",\n\t\"mjondalen\",\n\t\"mo-i-rana\",\n\t\"moareke\",\n\t\"modalen\",\n\t\"modum\",\n\t\"molde\",\n\t\"more-og-romsdal\",\n\t\"mosjoen\",\n\t\"moskenes\",\n\t\"moss\",\n\t\"mosvik\",\n\t\"mr\",\n\t\"muosat\",\n\t\"museum\",\n\t\"naamesjevuemie\",\n\t\"namdalseid\",\n\t\"namsos\",\n\t\"namsskogan\",\n\t\"nannestad\",\n\t\"naroy\",\n\t\"narviika\",\n\t\"narvik\",\n\t\"naustdal\",\n\t\"navuotna\",\n\t\"nedre-eiker\",\n\t\"nesna\",\n\t\"nesodden\",\n\t\"nesoddtangen\",\n\t\"nesseby\",\n\t\"nesset\",\n\t\"nissedal\",\n\t\"nittedal\",\n\t\"nl\",\n\t\"nord-aurdal\",\n\t\"nord-fron\",\n\t\"nord-odal\",\n\t\"norddal\",\n\t\"nordkapp\",\n\t\"nordland\",\n\t\"nordre-land\",\n\t\"nordreisa\",\n\t\"nore-og-uvdal\",\n\t\"notodden\",\n\t\"notteroy\",\n\t\"nt\",\n\t\"odda\",\n\t\"of\",\n\t\"oksnes\",\n\t\"ol\",\n\t\"omasvuotna\",\n\t\"oppdal\",\n\t\"oppegard\",\n\t\"orkanger\",\n\t\"orkdal\",\n\t\"orland\",\n\t\"orskog\",\n\t\"orsta\",\n\t\"osen\",\n\t\"oslo\",\n\t\"osoyro\",\n\t\"osteroy\",\n\t\"ostfold\",\n\t\"ostre-toten\",\n\t\"overhalla\",\n\t\"ovre-eiker\",\n\t\"oyer\",\n\t\"oygarden\",\n\t\"oystre-slidre\",\n\t\"porsanger\",\n\t\"porsangu\",\n\t\"porsgrunn\",\n\t\"priv\",\n\t\"rade\",\n\t\"radoy\",\n\t\"rahkkeravju\",\n\t\"raholt\",\n\t\"raisa\",\n\t\"rakkestad\",\n\t\"ralingen\",\n\t\"rana\",\n\t\"randaberg\",\n\t\"rauma\",\n\t\"rendalen\",\n\t\"rennebu\",\n\t\"rennesoy\",\n\t\"rindal\",\n\t\"ringebu\",\n\t\"ringerike\",\n\t\"ringsaker\",\n\t\"risor\",\n\t\"rissa\",\n\t\"rl\",\n\t\"roan\",\n\t\"rodoy\",\n\t\"rollag\",\n\t\"romsa\",\n\t\"romskog\",\n\t\"roros\",\n\t\"rost\",\n\t\"royken\",\n\t\"royrvik\",\n\t\"ruovat\",\n\t\"rygge\",\n\t\"salangen\",\n\t\"salat\",\n\t\"saltdal\",\n\t\"samnanger\",\n\t\"sandefjord\",\n\t\"sandnes\",\n\t\"sandnessjoen\",\n\t\"sandoy\",\n\t\"sarpsborg\",\n\t\"sauda\",\n\t\"sauherad\",\n\t\"sel\",\n\t\"selbu\",\n\t\"selje\",\n\t\"seljord\",\n\t\"sf\",\n\t\"siellak\",\n\t\"sigdal\",\n\t\"siljan\",\n\t\"sirdal\",\n\t\"skanit\",\n\t\"skanland\",\n\t\"skaun\",\n\t\"skedsmo\",\n\t\"skedsmokorset\",\n\t\"ski\",\n\t\"skien\",\n\t\"skierva\",\n\t\"skiptvet\",\n\t\"skjak\",\n\t\"skjervoy\",\n\t\"skodje\",\n\t\"slattum\",\n\t\"smola\",\n\t\"snaase\",\n\t\"snasa\",\n\t\"snillfjord\",\n\t\"snoasa\",\n\t\"sogndal\",\n\t\"sogne\",\n\t\"sokndal\",\n\t\"sola\",\n\t\"solund\",\n\t\"somna\",\n\t\"sondre-land\",\n\t\"songdalen\",\n\t\"sor-aurdal\",\n\t\"sor-fron\",\n\t\"sor-odal\",\n\t\"sor-varanger\",\n\t\"sorfold\",\n\t\"sorreisa\",\n\t\"sortland\",\n\t\"sorum\",\n\t\"spjelkavik\",\n\t\"spydeberg\",\n\t\"st\",\n\t\"stange\",\n\t\"stat\",\n\t\"stathelle\",\n\t\"stavanger\",\n\t\"stavern\",\n\t\"steigen\",\n\t\"steinkjer\",\n\t\"stjordal\",\n\t\"stjordalshalsen\",\n\t\"stokke\",\n\t\"stor-elvdal\",\n\t\"stord\",\n\t\"stordal\",\n\t\"storfjord\",\n\t\"strand\",\n\t\"stranda\",\n\t\"stryn\",\n\t\"sula\",\n\t\"suldal\",\n\t\"sund\",\n\t\"sunndal\",\n\t\"surnadal\",\n\t\"svalbard\",\n\t\"sveio\",\n\t\"svelvik\",\n\t\"sykkylven\",\n\t\"tana\",\n\t\"tananger\",\n\t\"telemark\",\n\t\"time\",\n\t\"tingvoll\",\n\t\"tinn\",\n\t\"tjeldsund\",\n\t\"tjome\",\n\t\"tm\",\n\t\"tokke\",\n\t\"tolga\",\n\t\"tonsberg\",\n\t\"torsken\",\n\t\"tr\",\n\t\"trana\",\n\t\"tranby\",\n\t\"tranoy\",\n\t\"troandin\",\n\t\"trogstad\",\n\t\"tromsa\",\n\t\"tromso\",\n\t\"trondheim\",\n\t\"trysil\",\n\t\"tvedestrand\",\n\t\"tydal\",\n\t\"tynset\",\n\t\"tysfjord\",\n\t\"tysnes\",\n\t\"tysvar\",\n\t\"ullensaker\",\n\t\"ullensvang\",\n\t\"ulvik\",\n\t\"unjarga\",\n\t\"utsira\",\n\t\"va\",\n\t\"vaapste\",\n\t\"vadso\",\n\t\"vaga\",\n\t\"vagan\",\n\t\"vagsoy\",\n\t\"vaksdal\",\n\t\"valle\",\n\t\"vang\",\n\t\"vanylven\",\n\t\"vardo\",\n\t\"varggat\",\n\t\"varoy\",\n\t\"vefsn\",\n\t\"vega\",\n\t\"vegarshei\",\n\t\"vennesla\",\n\t\"verdal\",\n\t\"verran\",\n\t\"vestby\",\n\t\"vestfold\",\n\t\"vestnes\",\n\t\"vestre-slidre\",\n\t\"vestre-toten\",\n\t\"vestvagoy\",\n\t\"vevelstad\",\n\t\"vf\",\n\t\"vgs\",\n\t\"vik\",\n\t\"vikna\",\n\t\"vindafjord\",\n\t\"voagat\",\n\t\"volda\",\n\t\"voss\",\n\t\"vossevangen\",\n\t\"xn--andy-ira\",\n\t\"xn--asky-ira\",\n\t\"xn--aurskog-hland-jnb\",\n\t\"xn--avery-yua\",\n\t\"xn--bdddj-mrabd\",\n\t\"xn--bearalvhki-y4a\",\n\t\"xn--berlevg-jxa\",\n\t\"xn--bhcavuotna-s4a\",\n\t\"xn--bhccavuotna-k7a\",\n\t\"xn--bidr-5nac\",\n\t\"xn--bievt-0qa\",\n\t\"xn--bjarky-fya\",\n\t\"xn--bjddar-pta\",\n\t\"xn--blt-elab\",\n\t\"xn--bmlo-gra\",\n\t\"xn--bod-2na\",\n\t\"xn--brnny-wuac\",\n\t\"xn--brnnysund-m8ac\",\n\t\"xn--brum-voa\",\n\t\"xn--btsfjord-9za\",\n\t\"xn--davvenjrga-y4a\",\n\t\"xn--dnna-gra\",\n\t\"xn--drbak-wua\",\n\t\"xn--dyry-ira\",\n\t\"xn--eveni-0qa01ga\",\n\t\"xn--finny-yua\",\n\t\"xn--fjord-lra\",\n\t\"xn--fl-zia\",\n\t\"xn--flor-jra\",\n\t\"xn--frde-gra\",\n\t\"xn--frna-woa\",\n\t\"xn--frya-hra\",\n\t\"xn--ggaviika-8ya47h\",\n\t\"xn--gildeskl-g0a\",\n\t\"xn--givuotna-8ya\",\n\t\"xn--gjvik-wua\",\n\t\"xn--gls-elac\",\n\t\"xn--h-2fa\",\n\t\"xn--hbmer-xqa\",\n\t\"xn--hcesuolo-7ya35b\",\n\t\"xn--hgebostad-g3a\",\n\t\"xn--hmmrfeasta-s4ac\",\n\t\"xn--hnefoss-q1a\",\n\t\"xn--hobl-ira\",\n\t\"xn--holtlen-hxa\",\n\t\"xn--hpmir-xqa\",\n\t\"xn--hyanger-q1a\",\n\t\"xn--hylandet-54a\",\n\t\"xn--indery-fya\",\n\t\"xn--jlster-bya\",\n\t\"xn--jrpeland-54a\",\n\t\"xn--karmy-yua\",\n\t\"xn--kfjord-iua\",\n\t\"xn--klbu-woa\",\n\t\"xn--koluokta-7ya57h\",\n\t\"xn--krager-gya\",\n\t\"xn--kranghke-b0a\",\n\t\"xn--krdsherad-m8a\",\n\t\"xn--krehamn-dxa\",\n\t\"xn--krjohka-hwab49j\",\n\t\"xn--ksnes-uua\",\n\t\"xn--kvfjord-nxa\",\n\t\"xn--kvitsy-fya\",\n\t\"xn--kvnangen-k0a\",\n\t\"xn--l-1fa\",\n\t\"xn--laheadju-7ya\",\n\t\"xn--langevg-jxa\",\n\t\"xn--ldingen-q1a\",\n\t\"xn--leagaviika-52b\",\n\t\"xn--lesund-hua\",\n\t\"xn--lgrd-poac\",\n\t\"xn--lhppi-xqa\",\n\t\"xn--linds-pra\",\n\t\"xn--loabt-0qa\",\n\t\"xn--lrdal-sra\",\n\t\"xn--lrenskog-54a\",\n\t\"xn--lt-liac\",\n\t\"xn--lten-gra\",\n\t\"xn--lury-ira\",\n\t\"xn--mely-ira\",\n\t\"xn--merker-kua\",\n\t\"xn--mjndalen-64a\",\n\t\"xn--mlatvuopmi-s4a\",\n\t\"xn--mli-tla\",\n\t\"xn--mlselv-iua\",\n\t\"xn--moreke-jua\",\n\t\"xn--mosjen-eya\",\n\t\"xn--mot-tla\",\n\t\"xn--mre-og-romsdal-qqb\",\n\t\"xn--msy-ula0h\",\n\t\"xn--mtta-vrjjat-k7af\",\n\t\"xn--muost-0qa\",\n\t\"xn--nmesjevuemie-tcba\",\n\t\"xn--nry-yla5g\",\n\t\"xn--nttery-byae\",\n\t\"xn--nvuotna-hwa\",\n\t\"xn--oppegrd-ixa\",\n\t\"xn--ostery-fya\",\n\t\"xn--osyro-wua\",\n\t\"xn--porsgu-sta26f\",\n\t\"xn--rady-ira\",\n\t\"xn--rdal-poa\",\n\t\"xn--rde-ula\",\n\t\"xn--rdy-0nab\",\n\t\"xn--rennesy-v1a\",\n\t\"xn--rhkkervju-01af\",\n\t\"xn--rholt-mra\",\n\t\"xn--risa-5na\",\n\t\"xn--risr-ira\",\n\t\"xn--rland-uua\",\n\t\"xn--rlingen-mxa\",\n\t\"xn--rmskog-bya\",\n\t\"xn--rros-gra\",\n\t\"xn--rskog-uua\",\n\t\"xn--rst-0na\",\n\t\"xn--rsta-fra\",\n\t\"xn--ryken-vua\",\n\t\"xn--ryrvik-bya\",\n\t\"xn--s-1fa\",\n\t\"xn--sandnessjen-ogb\",\n\t\"xn--sandy-yua\",\n\t\"xn--seral-lra\",\n\t\"xn--sgne-gra\",\n\t\"xn--skierv-uta\",\n\t\"xn--skjervy-v1a\",\n\t\"xn--skjk-soa\",\n\t\"xn--sknit-yqa\",\n\t\"xn--sknland-fxa\",\n\t\"xn--slat-5na\",\n\t\"xn--slt-elab\",\n\t\"xn--smla-hra\",\n\t\"xn--smna-gra\",\n\t\"xn--snase-nra\",\n\t\"xn--sndre-land-0cb\",\n\t\"xn--snes-poa\",\n\t\"xn--snsa-roa\",\n\t\"xn--sr-aurdal-l8a\",\n\t\"xn--sr-fron-q1a\",\n\t\"xn--sr-odal-q1a\",\n\t\"xn--sr-varanger-ggb\",\n\t\"xn--srfold-bya\",\n\t\"xn--srreisa-q1a\",\n\t\"xn--srum-gra\",\n\t\"xn--stfold-9xa\",\n\t\"xn--stjrdal-s1a\",\n\t\"xn--stjrdalshalsen-sqb\",\n\t\"xn--stre-toten-zcb\",\n\t\"xn--tjme-hra\",\n\t\"xn--tnsberg-q1a\",\n\t\"xn--trany-yua\",\n\t\"xn--trgstad-r1a\",\n\t\"xn--trna-woa\",\n\t\"xn--troms-zua\",\n\t\"xn--tysvr-vra\",\n\t\"xn--unjrga-rta\",\n\t\"xn--vads-jra\",\n\t\"xn--vard-jra\",\n\t\"xn--vegrshei-c0a\",\n\t\"xn--vestvgy-ixa6o\",\n\t\"xn--vg-yiab\",\n\t\"xn--vgan-qoa\",\n\t\"xn--vgsy-qoa0j\",\n\t\"xn--vre-eiker-k8a\",\n\t\"xn--vrggt-xqad\",\n\t\"xn--vry-yla5g\",\n\t\"xn--yer-zna\",\n\t\"xn--ygarden-p1a\",\n\t\"xn--ystre-slidre-ujb\",\n\t\"gs\",\n\t\"gs\",\n\t\"nes\",\n\t\"gs\",\n\t\"nes\",\n\t\"gs\",\n\t\"os\",\n\t\"valer\",\n\t\"xn--vler-qoa\",\n\t\"gs\",\n\t\"gs\",\n\t\"os\",\n\t\"gs\",\n\t\"heroy\",\n\t\"sande\",\n\t\"gs\",\n\t\"gs\",\n\t\"bo\",\n\t\"heroy\",\n\t\"xn--b-5ga\",\n\t\"xn--hery-ira\",\n\t\"gs\",\n\t\"gs\",\n\t\"gs\",\n\t\"gs\",\n\t\"valer\",\n\t\"gs\",\n\t\"gs\",\n\t\"gs\",\n\t\"gs\",\n\t\"bo\",\n\t\"xn--b-5ga\",\n\t\"gs\",\n\t\"gs\",\n\t\"gs\",\n\t\"sande\",\n\t\"gs\",\n\t\"sande\",\n\t\"xn--hery-ira\",\n\t\"xn--vler-qoa\",\n\t\"biz\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"info\",\n\t\"net\",\n\t\"org\",\n\t\"merseine\",\n\t\"mine\",\n\t\"nom\",\n\t\"shacknet\",\n\t\"ac\",\n\t\"co\",\n\t\"cri\",\n\t\"geek\",\n\t\"gen\",\n\t\"govt\",\n\t\"health\",\n\t\"iwi\",\n\t\"kiwi\",\n\t\"maori\",\n\t\"mil\",\n\t\"net\",\n\t\"nym\",\n\t\"org\",\n\t\"parliament\",\n\t\"school\",\n\t\"xn--mori-qsa\",\n\t\"blogspot\",\n\t\"co\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"med\",\n\t\"museum\",\n\t\"net\",\n\t\"org\",\n\t\"pro\",\n\t\"homelink\",\n\t\"barsy\",\n\t\"accesscam\",\n\t\"ae\",\n\t\"amune\",\n\t\"blogdns\",\n\t\"blogsite\",\n\t\"bmoattachments\",\n\t\"boldlygoingnowhere\",\n\t\"cable-modem\",\n\t\"camdvr\",\n\t\"cdn77\",\n\t\"cdn77-secure\",\n\t\"certmgr\",\n\t\"cloudns\",\n\t\"collegefan\",\n\t\"couchpotatofries\",\n\t\"ddnss\",\n\t\"diskstation\",\n\t\"dnsalias\",\n\t\"dnsdojo\",\n\t\"doesntexist\",\n\t\"dontexist\",\n\t\"doomdns\",\n\t\"dsmynas\",\n\t\"duckdns\",\n\t\"dvrdns\",\n\t\"dynalias\",\n\t\"dyndns\",\n\t\"endofinternet\",\n\t\"endoftheinternet\",\n\t\"eu\",\n\t\"familyds\",\n\t\"fedorainfracloud\",\n\t\"fedorapeople\",\n\t\"fedoraproject\",\n\t\"freeddns\",\n\t\"from-me\",\n\t\"game-host\",\n\t\"gotdns\",\n\t\"hepforge\",\n\t\"hk\",\n\t\"hobby-site\",\n\t\"homedns\",\n\t\"homeftp\",\n\t\"homelinux\",\n\t\"homeunix\",\n\t\"hopto\",\n\t\"is-a-bruinsfan\",\n\t\"is-a-candidate\",\n\t\"is-a-celticsfan\",\n\t\"is-a-chef\",\n\t\"is-a-geek\",\n\t\"is-a-knight\",\n\t\"is-a-linux-user\",\n\t\"is-a-patsfan\",\n\t\"is-a-soxfan\",\n\t\"is-found\",\n\t\"is-lost\",\n\t\"is-saved\",\n\t\"is-very-bad\",\n\t\"is-very-evil\",\n\t\"is-very-good\",\n\t\"is-very-nice\",\n\t\"is-very-sweet\",\n\t\"isa-geek\",\n\t\"js\",\n\t\"kicks-ass\",\n\t\"misconfused\",\n\t\"mlbfan\",\n\t\"my-firewall\",\n\t\"myfirewall\",\n\t\"myftp\",\n\t\"mysecuritycamera\",\n\t\"mywire\",\n\t\"nflfan\",\n\t\"no-ip\",\n\t\"pimienta\",\n\t\"podzone\",\n\t\"poivron\",\n\t\"potager\",\n\t\"read-books\",\n\t\"readmyblog\",\n\t\"selfip\",\n\t\"sellsyourhome\",\n\t\"servebbs\",\n\t\"serveftp\",\n\t\"servegame\",\n\t\"spdns\",\n\t\"stuff-4-sale\",\n\t\"sweetpepper\",\n\t\"tunk\",\n\t\"tuxfamily\",\n\t\"twmail\",\n\t\"ufcfan\",\n\t\"us\",\n\t\"webhop\",\n\t\"webredirect\",\n\t\"wmflabs\",\n\t\"za\",\n\t\"zapto\",\n\t\"tele\",\n\t\"c\",\n\t\"rsc\",\n\t\"origin\",\n\t\"ssl\",\n\t\"go\",\n\t\"home\",\n\t\"al\",\n\t\"asso\",\n\t\"at\",\n\t\"au\",\n\t\"be\",\n\t\"bg\",\n\t\"ca\",\n\t\"cd\",\n\t\"ch\",\n\t\"cn\",\n\t\"cy\",\n\t\"cz\",\n\t\"de\",\n\t\"dk\",\n\t\"edu\",\n\t\"ee\",\n\t\"es\",\n\t\"fi\",\n\t\"fr\",\n\t\"gr\",\n\t\"hr\",\n\t\"hu\",\n\t\"ie\",\n\t\"il\",\n\t\"in\",\n\t\"int\",\n\t\"is\",\n\t\"it\",\n\t\"jp\",\n\t\"kr\",\n\t\"lt\",\n\t\"lu\",\n\t\"lv\",\n\t\"mc\",\n\t\"me\",\n\t\"mk\",\n\t\"mt\",\n\t\"my\",\n\t\"net\",\n\t\"ng\",\n\t\"nl\",\n\t\"no\",\n\t\"nz\",\n\t\"paris\",\n\t\"pl\",\n\t\"pt\",\n\t\"q-a\",\n\t\"ro\",\n\t\"ru\",\n\t\"se\",\n\t\"si\",\n\t\"sk\",\n\t\"tr\",\n\t\"uk\",\n\t\"us\",\n\t\"cloud\",\n\t\"nerdpol\",\n\t\"abo\",\n\t\"ac\",\n\t\"com\",\n\t\"edu\",\n\t\"gob\",\n\t\"ing\",\n\t\"med\",\n\t\"net\",\n\t\"nom\",\n\t\"org\",\n\t\"sld\",\n\t\"ybo\",\n\t\"blogspot\",\n\t\"com\",\n\t\"edu\",\n\t\"gob\",\n\t\"mil\",\n\t\"net\",\n\t\"nom\",\n\t\"nym\",\n\t\"org\",\n\t\"com\",\n\t\"edu\",\n\t\"org\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"i\",\n\t\"mil\",\n\t\"net\",\n\t\"ngo\",\n\t\"org\",\n\t\"1337\",\n\t\"biz\",\n\t\"com\",\n\t\"edu\",\n\t\"fam\",\n\t\"gob\",\n\t\"gok\",\n\t\"gon\",\n\t\"gop\",\n\t\"gos\",\n\t\"gov\",\n\t\"info\",\n\t\"net\",\n\t\"org\",\n\t\"web\",\n\t\"agro\",\n\t\"aid\",\n\t\"art\",\n\t\"atm\",\n\t\"augustow\",\n\t\"auto\",\n\t\"babia-gora\",\n\t\"bedzin\",\n\t\"beep\",\n\t\"beskidy\",\n\t\"bialowieza\",\n\t\"bialystok\",\n\t\"bielawa\",\n\t\"bieszczady\",\n\t\"biz\",\n\t\"boleslawiec\",\n\t\"bydgoszcz\",\n\t\"bytom\",\n\t\"cieszyn\",\n\t\"co\",\n\t\"com\",\n\t\"czeladz\",\n\t\"czest\",\n\t\"dlugoleka\",\n\t\"edu\",\n\t\"elblag\",\n\t\"elk\",\n\t\"gda\",\n\t\"gdansk\",\n\t\"gdynia\",\n\t\"gliwice\",\n\t\"glogow\",\n\t\"gmina\",\n\t\"gniezno\",\n\t\"gorlice\",\n\t\"gov\",\n\t\"grajewo\",\n\t\"gsm\",\n\t\"ilawa\",\n\t\"info\",\n\t\"jaworzno\",\n\t\"jelenia-gora\",\n\t\"jgora\",\n\t\"kalisz\",\n\t\"karpacz\",\n\t\"kartuzy\",\n\t\"kaszuby\",\n\t\"katowice\",\n\t\"kazimierz-dolny\",\n\t\"kepno\",\n\t\"ketrzyn\",\n\t\"klodzko\",\n\t\"kobierzyce\",\n\t\"kolobrzeg\",\n\t\"konin\",\n\t\"konskowola\",\n\t\"krakow\",\n\t\"kutno\",\n\t\"lapy\",\n\t\"lebork\",\n\t\"legnica\",\n\t\"lezajsk\",\n\t\"limanowa\",\n\t\"lomza\",\n\t\"lowicz\",\n\t\"lubin\",\n\t\"lukow\",\n\t\"mail\",\n\t\"malbork\",\n\t\"malopolska\",\n\t\"mazowsze\",\n\t\"mazury\",\n\t\"med\",\n\t\"media\",\n\t\"miasta\",\n\t\"mielec\",\n\t\"mielno\",\n\t\"mil\",\n\t\"mragowo\",\n\t\"naklo\",\n\t\"net\",\n\t\"nieruchomosci\",\n\t\"nom\",\n\t\"nowaruda\",\n\t\"nysa\",\n\t\"olawa\",\n\t\"olecko\",\n\t\"olkusz\",\n\t\"olsztyn\",\n\t\"opoczno\",\n\t\"opole\",\n\t\"org\",\n\t\"ostroda\",\n\t\"ostroleka\",\n\t\"ostrowiec\",\n\t\"ostrowwlkp\",\n\t\"pc\",\n\t\"pila\",\n\t\"pisz\",\n\t\"podhale\",\n\t\"podlasie\",\n\t\"polkowice\",\n\t\"pomorskie\",\n\t\"pomorze\",\n\t\"powiat\",\n\t\"poznan\",\n\t\"priv\",\n\t\"prochowice\",\n\t\"pruszkow\",\n\t\"przeworsk\",\n\t\"pulawy\",\n\t\"radom\",\n\t\"rawa-maz\",\n\t\"realestate\",\n\t\"rel\",\n\t\"rybnik\",\n\t\"rzeszow\",\n\t\"sanok\",\n\t\"sejny\",\n\t\"sex\",\n\t\"shop\",\n\t\"sklep\",\n\t\"skoczow\",\n\t\"slask\",\n\t\"slupsk\",\n\t\"sopot\",\n\t\"sos\",\n\t\"sosnowiec\",\n\t\"stalowa-wola\",\n\t\"starachowice\",\n\t\"stargard\",\n\t\"suwalki\",\n\t\"swidnica\",\n\t\"swiebodzin\",\n\t\"swinoujscie\",\n\t\"szczecin\",\n\t\"szczytno\",\n\t\"szkola\",\n\t\"targi\",\n\t\"tarnobrzeg\",\n\t\"tgory\",\n\t\"tm\",\n\t\"tourism\",\n\t\"travel\",\n\t\"turek\",\n\t\"turystyka\",\n\t\"tychy\",\n\t\"ustka\",\n\t\"walbrzych\",\n\t\"warmia\",\n\t\"warszawa\",\n\t\"waw\",\n\t\"wegrow\",\n\t\"wielun\",\n\t\"wlocl\",\n\t\"wloclawek\",\n\t\"wodzislaw\",\n\t\"wolomin\",\n\t\"wroc\",\n\t\"wroclaw\",\n\t\"zachpomor\",\n\t\"zagan\",\n\t\"zakopane\",\n\t\"zarow\",\n\t\"zgora\",\n\t\"zgorzelec\",\n\t\"ap\",\n\t\"griw\",\n\t\"ic\",\n\t\"is\",\n\t\"kmpsp\",\n\t\"konsulat\",\n\t\"kppsp\",\n\t\"kwp\",\n\t\"kwpsp\",\n\t\"mup\",\n\t\"mw\",\n\t\"oirm\",\n\t\"oum\",\n\t\"pa\",\n\t\"pinb\",\n\t\"piw\",\n\t\"po\",\n\t\"psp\",\n\t\"psse\",\n\t\"pup\",\n\t\"rzgw\",\n\t\"sa\",\n\t\"sdn\",\n\t\"sko\",\n\t\"so\",\n\t\"sr\",\n\t\"starostwo\",\n\t\"ug\",\n\t\"ugim\",\n\t\"um\",\n\t\"umig\",\n\t\"upow\",\n\t\"uppo\",\n\t\"us\",\n\t\"uw\",\n\t\"uzs\",\n\t\"wif\",\n\t\"wiih\",\n\t\"winb\",\n\t\"wios\",\n\t\"witd\",\n\t\"wiw\",\n\t\"wsa\",\n\t\"wskr\",\n\t\"wuoz\",\n\t\"wzmiuw\",\n\t\"zp\",\n\t\"co\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"ac\",\n\t\"biz\",\n\t\"com\",\n\t\"edu\",\n\t\"est\",\n\t\"gov\",\n\t\"info\",\n\t\"isla\",\n\t\"name\",\n\t\"net\",\n\t\"org\",\n\t\"pro\",\n\t\"prof\",\n\t\"aaa\",\n\t\"aca\",\n\t\"acct\",\n\t\"avocat\",\n\t\"bar\",\n\t\"cloudns\",\n\t\"cpa\",\n\t\"eng\",\n\t\"jur\",\n\t\"law\",\n\t\"med\",\n\t\"recht\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"plo\",\n\t\"sec\",\n\t\"blogspot\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"int\",\n\t\"net\",\n\t\"nome\",\n\t\"nym\",\n\t\"org\",\n\t\"publ\",\n\t\"belau\",\n\t\"cloudns\",\n\t\"co\",\n\t\"ed\",\n\t\"go\",\n\t\"ne\",\n\t\"nom\",\n\t\"or\",\n\t\"com\",\n\t\"coop\",\n\t\"edu\",\n\t\"gov\",\n\t\"mil\",\n\t\"net\",\n\t\"org\",\n\t\"blogspot\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"mil\",\n\t\"name\",\n\t\"net\",\n\t\"nom\",\n\t\"org\",\n\t\"sch\",\n\t\"asso\",\n\t\"blogspot\",\n\t\"com\",\n\t\"nom\",\n\t\"ybo\",\n\t\"clan\",\n\t\"arts\",\n\t\"blogspot\",\n\t\"com\",\n\t\"firm\",\n\t\"info\",\n\t\"nom\",\n\t\"nt\",\n\t\"org\",\n\t\"rec\",\n\t\"shop\",\n\t\"store\",\n\t\"tm\",\n\t\"www\",\n\t\"lima-city\",\n\t\"myddns\",\n\t\"webspace\",\n\t\"ac\",\n\t\"blogspot\",\n\t\"co\",\n\t\"edu\",\n\t\"gov\",\n\t\"in\",\n\t\"nom\",\n\t\"org\",\n\t\"ac\",\n\t\"adygeya\",\n\t\"bashkiria\",\n\t\"bir\",\n\t\"blogspot\",\n\t\"cbg\",\n\t\"cldmail\",\n\t\"com\",\n\t\"dagestan\",\n\t\"edu\",\n\t\"gov\",\n\t\"grozny\",\n\t\"int\",\n\t\"kalmykia\",\n\t\"kustanai\",\n\t\"marine\",\n\t\"mil\",\n\t\"mordovia\",\n\t\"msk\",\n\t\"mytis\",\n\t\"nalchik\",\n\t\"net\",\n\t\"nov\",\n\t\"org\",\n\t\"pp\",\n\t\"pyatigorsk\",\n\t\"spb\",\n\t\"test\",\n\t\"vladikavkaz\",\n\t\"vladimir\",\n\t\"hb\",\n\t\"ac\",\n\t\"co\",\n\t\"com\",\n\t\"edu\",\n\t\"gouv\",\n\t\"gov\",\n\t\"int\",\n\t\"mil\",\n\t\"net\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"med\",\n\t\"net\",\n\t\"org\",\n\t\"pub\",\n\t\"sch\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"ybo\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"info\",\n\t\"med\",\n\t\"net\",\n\t\"org\",\n\t\"tv\",\n\t\"a\",\n\t\"ac\",\n\t\"b\",\n\t\"bd\",\n\t\"blogspot\",\n\t\"brand\",\n\t\"c\",\n\t\"com\",\n\t\"d\",\n\t\"e\",\n\t\"f\",\n\t\"fh\",\n\t\"fhsk\",\n\t\"fhv\",\n\t\"g\",\n\t\"h\",\n\t\"i\",\n\t\"k\",\n\t\"komforb\",\n\t\"kommunalforbund\",\n\t\"komvux\",\n\t\"l\",\n\t\"lanbib\",\n\t\"m\",\n\t\"n\",\n\t\"naturbruksgymn\",\n\t\"o\",\n\t\"org\",\n\t\"p\",\n\t\"parti\",\n\t\"pp\",\n\t\"press\",\n\t\"r\",\n\t\"s\",\n\t\"t\",\n\t\"tm\",\n\t\"u\",\n\t\"w\",\n\t\"x\",\n\t\"y\",\n\t\"z\",\n\t\"blogspot\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"per\",\n\t\"com\",\n\t\"gov\",\n\t\"hashbang\",\n\t\"mil\",\n\t\"net\",\n\t\"now\",\n\t\"org\",\n\t\"platform\",\n\t\"wedeploy\",\n\t\"blogspot\",\n\t\"nom\",\n\t\"byen\",\n\t\"cyon\",\n\t\"platformsh\",\n\t\"blogspot\",\n\t\"nym\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"net\",\n\t\"org\",\n\t\"art\",\n\t\"blogspot\",\n\t\"com\",\n\t\"edu\",\n\t\"gouv\",\n\t\"org\",\n\t\"perso\",\n\t\"univ\",\n\t\"com\",\n\t\"net\",\n\t\"org\",\n\t\"stackspace\",\n\t\"uber\",\n\t\"xs4all\",\n\t\"co\",\n\t\"com\",\n\t\"consulado\",\n\t\"edu\",\n\t\"embaixada\",\n\t\"gov\",\n\t\"mil\",\n\t\"net\",\n\t\"org\",\n\t\"principe\",\n\t\"saotome\",\n\t\"store\",\n\t\"abkhazia\",\n\t\"adygeya\",\n\t\"aktyubinsk\",\n\t\"arkhangelsk\",\n\t\"armenia\",\n\t\"ashgabad\",\n\t\"azerbaijan\",\n\t\"balashov\",\n\t\"bashkiria\",\n\t\"bryansk\",\n\t\"bukhara\",\n\t\"chimkent\",\n\t\"dagestan\",\n\t\"east-kazakhstan\",\n\t\"exnet\",\n\t\"georgia\",\n\t\"grozny\",\n\t\"ivanovo\",\n\t\"jambyl\",\n\t\"kalmykia\",\n\t\"kaluga\",\n\t\"karacol\",\n\t\"karaganda\",\n\t\"karelia\",\n\t\"khakassia\",\n\t\"krasnodar\",\n\t\"kurgan\",\n\t\"kustanai\",\n\t\"lenug\",\n\t\"mangyshlak\",\n\t\"mordovia\",\n\t\"msk\",\n\t\"murmansk\",\n\t\"nalchik\",\n\t\"navoi\",\n\t\"north-kazakhstan\",\n\t\"nov\",\n\t\"nym\",\n\t\"obninsk\",\n\t\"penza\",\n\t\"pokrovsk\",\n\t\"sochi\",\n\t\"spb\",\n\t\"tashkent\",\n\t\"termez\",\n\t\"togliatti\",\n\t\"troitsk\",\n\t\"tselinograd\",\n\t\"tula\",\n\t\"tuva\",\n\t\"vladikavkaz\",\n\t\"vladimir\",\n\t\"vologda\",\n\t\"barsy\",\n\t\"com\",\n\t\"edu\",\n\t\"gob\",\n\t\"org\",\n\t\"red\",\n\t\"gov\",\n\t\"nym\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"mil\",\n\t\"net\",\n\t\"org\",\n\t\"knightpoint\",\n\t\"ac\",\n\t\"co\",\n\t\"org\",\n\t\"blogspot\",\n\t\"ac\",\n\t\"co\",\n\t\"go\",\n\t\"in\",\n\t\"mi\",\n\t\"net\",\n\t\"or\",\n\t\"ac\",\n\t\"biz\",\n\t\"co\",\n\t\"com\",\n\t\"edu\",\n\t\"go\",\n\t\"gov\",\n\t\"int\",\n\t\"mil\",\n\t\"name\",\n\t\"net\",\n\t\"nic\",\n\t\"org\",\n\t\"test\",\n\t\"web\",\n\t\"gov\",\n\t\"co\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"mil\",\n\t\"net\",\n\t\"nom\",\n\t\"org\",\n\t\"agrinet\",\n\t\"com\",\n\t\"defense\",\n\t\"edunet\",\n\t\"ens\",\n\t\"fin\",\n\t\"gov\",\n\t\"ind\",\n\t\"info\",\n\t\"intl\",\n\t\"mincom\",\n\t\"nat\",\n\t\"net\",\n\t\"org\",\n\t\"perso\",\n\t\"rnrt\",\n\t\"rns\",\n\t\"rnu\",\n\t\"tourism\",\n\t\"turen\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"mil\",\n\t\"net\",\n\t\"org\",\n\t\"vpnplus\",\n\t\"av\",\n\t\"bbs\",\n\t\"bel\",\n\t\"biz\",\n\t\"com\",\n\t\"dr\",\n\t\"edu\",\n\t\"gen\",\n\t\"gov\",\n\t\"info\",\n\t\"k12\",\n\t\"kep\",\n\t\"mil\",\n\t\"name\",\n\t\"nc\",\n\t\"net\",\n\t\"org\",\n\t\"pol\",\n\t\"tel\",\n\t\"tv\",\n\t\"web\",\n\t\"blogspot\",\n\t\"gov\",\n\t\"ybo\",\n\t\"aero\",\n\t\"biz\",\n\t\"co\",\n\t\"com\",\n\t\"coop\",\n\t\"edu\",\n\t\"gov\",\n\t\"info\",\n\t\"int\",\n\t\"jobs\",\n\t\"mobi\",\n\t\"museum\",\n\t\"name\",\n\t\"net\",\n\t\"org\",\n\t\"pro\",\n\t\"travel\",\n\t\"better-than\",\n\t\"dyndns\",\n\t\"on-the-web\",\n\t\"worse-than\",\n\t\"blogspot\",\n\t\"club\",\n\t\"com\",\n\t\"ebiz\",\n\t\"edu\",\n\t\"game\",\n\t\"gov\",\n\t\"idv\",\n\t\"mil\",\n\t\"net\",\n\t\"nym\",\n\t\"org\",\n\t\"url\",\n\t\"xn--czrw28b\",\n\t\"xn--uc0atv\",\n\t\"xn--zf0ao64a\",\n\t\"mymailer\",\n\t\"ac\",\n\t\"co\",\n\t\"go\",\n\t\"hotel\",\n\t\"info\",\n\t\"me\",\n\t\"mil\",\n\t\"mobi\",\n\t\"ne\",\n\t\"or\",\n\t\"sc\",\n\t\"tv\",\n\t\"biz\",\n\t\"cc\",\n\t\"cherkassy\",\n\t\"cherkasy\",\n\t\"chernigov\",\n\t\"chernihiv\",\n\t\"chernivtsi\",\n\t\"chernovtsy\",\n\t\"ck\",\n\t\"cn\",\n\t\"co\",\n\t\"com\",\n\t\"cr\",\n\t\"crimea\",\n\t\"cv\",\n\t\"dn\",\n\t\"dnepropetrovsk\",\n\t\"dnipropetrovsk\",\n\t\"dominic\",\n\t\"donetsk\",\n\t\"dp\",\n\t\"edu\",\n\t\"gov\",\n\t\"if\",\n\t\"in\",\n\t\"inf\",\n\t\"ivano-frankivsk\",\n\t\"kh\",\n\t\"kharkiv\",\n\t\"kharkov\",\n\t\"kherson\",\n\t\"khmelnitskiy\",\n\t\"khmelnytskyi\",\n\t\"kiev\",\n\t\"kirovograd\",\n\t\"km\",\n\t\"kr\",\n\t\"krym\",\n\t\"ks\",\n\t\"kv\",\n\t\"kyiv\",\n\t\"lg\",\n\t\"lt\",\n\t\"ltd\",\n\t\"lugansk\",\n\t\"lutsk\",\n\t\"lv\",\n\t\"lviv\",\n\t\"mk\",\n\t\"mykolaiv\",\n\t\"net\",\n\t\"nikolaev\",\n\t\"od\",\n\t\"odesa\",\n\t\"odessa\",\n\t\"org\",\n\t\"pl\",\n\t\"poltava\",\n\t\"pp\",\n\t\"rivne\",\n\t\"rovno\",\n\t\"rv\",\n\t\"sb\",\n\t\"sebastopol\",\n\t\"sevastopol\",\n\t\"sm\",\n\t\"sumy\",\n\t\"te\",\n\t\"ternopil\",\n\t\"uz\",\n\t\"uzhgorod\",\n\t\"vinnica\",\n\t\"vinnytsia\",\n\t\"vn\",\n\t\"volyn\",\n\t\"yalta\",\n\t\"zaporizhzhe\",\n\t\"zaporizhzhia\",\n\t\"zhitomir\",\n\t\"zhytomyr\",\n\t\"zp\",\n\t\"zt\",\n\t\"ac\",\n\t\"blogspot\",\n\t\"co\",\n\t\"com\",\n\t\"go\",\n\t\"ne\",\n\t\"nom\",\n\t\"or\",\n\t\"org\",\n\t\"sc\",\n\t\"ac\",\n\t\"co\",\n\t\"gov\",\n\t\"ltd\",\n\t\"me\",\n\t\"net\",\n\t\"nhs\",\n\t\"org\",\n\t\"plc\",\n\t\"police\",\n\t\"sch\",\n\t\"blogspot\",\n\t\"no-ip\",\n\t\"wellbeingzone\",\n\t\"homeoffice\",\n\t\"service\",\n\t\"ak\",\n\t\"al\",\n\t\"ar\",\n\t\"as\",\n\t\"az\",\n\t\"ca\",\n\t\"cloudns\",\n\t\"co\",\n\t\"ct\",\n\t\"dc\",\n\t\"de\",\n\t\"dni\",\n\t\"drud\",\n\t\"fed\",\n\t\"fl\",\n\t\"ga\",\n\t\"golffan\",\n\t\"gu\",\n\t\"hi\",\n\t\"ia\",\n\t\"id\",\n\t\"il\",\n\t\"in\",\n\t\"is-by\",\n\t\"isa\",\n\t\"kids\",\n\t\"ks\",\n\t\"ky\",\n\t\"la\",\n\t\"land-4-sale\",\n\t\"ma\",\n\t\"md\",\n\t\"me\",\n\t\"mi\",\n\t\"mn\",\n\t\"mo\",\n\t\"ms\",\n\t\"mt\",\n\t\"nc\",\n\t\"nd\",\n\t\"ne\",\n\t\"nh\",\n\t\"nj\",\n\t\"nm\",\n\t\"noip\",\n\t\"nsn\",\n\t\"nv\",\n\t\"ny\",\n\t\"oh\",\n\t\"ok\",\n\t\"or\",\n\t\"pa\",\n\t\"pointto\",\n\t\"pr\",\n\t\"ri\",\n\t\"sc\",\n\t\"sd\",\n\t\"stuff-4-sale\",\n\t\"tn\",\n\t\"tx\",\n\t\"ut\",\n\t\"va\",\n\t\"vi\",\n\t\"vt\",\n\t\"wa\",\n\t\"wi\",\n\t\"wv\",\n\t\"wy\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"chtr\",\n\t\"paroch\",\n\t\"pvt\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"ann-arbor\",\n\t\"cc\",\n\t\"cog\",\n\t\"dst\",\n\t\"eaton\",\n\t\"gen\",\n\t\"k12\",\n\t\"lib\",\n\t\"mus\",\n\t\"tec\",\n\t\"washtenaw\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"cc\",\n\t\"cc\",\n\t\"k12\",\n\t\"lib\",\n\t\"com\",\n\t\"edu\",\n\t\"gub\",\n\t\"mil\",\n\t\"net\",\n\t\"nom\",\n\t\"org\",\n\t\"blogspot\",\n\t\"co\",\n\t\"com\",\n\t\"net\",\n\t\"org\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"mil\",\n\t\"net\",\n\t\"nom\",\n\t\"org\",\n\t\"arts\",\n\t\"co\",\n\t\"com\",\n\t\"e12\",\n\t\"edu\",\n\t\"firm\",\n\t\"gob\",\n\t\"gov\",\n\t\"info\",\n\t\"int\",\n\t\"mil\",\n\t\"net\",\n\t\"org\",\n\t\"rec\",\n\t\"store\",\n\t\"tec\",\n\t\"web\",\n\t\"nom\",\n\t\"co\",\n\t\"com\",\n\t\"k12\",\n\t\"net\",\n\t\"org\",\n\t\"ac\",\n\t\"biz\",\n\t\"blogspot\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"health\",\n\t\"info\",\n\t\"int\",\n\t\"name\",\n\t\"net\",\n\t\"org\",\n\t\"pro\",\n\t\"com\",\n\t\"edu\",\n\t\"net\",\n\t\"org\",\n\t\"advisor\",\n\t\"com\",\n\t\"dyndns\",\n\t\"edu\",\n\t\"gov\",\n\t\"mypets\",\n\t\"net\",\n\t\"org\",\n\t\"xn--80au\",\n\t\"xn--90azh\",\n\t\"xn--c1avg\",\n\t\"xn--d1at\",\n\t\"xn--o1ac\",\n\t\"xn--o1ach\",\n\t\"xn--12c1fe0br\",\n\t\"xn--12cfi8ixb8l\",\n\t\"xn--12co0c3b4eva\",\n\t\"xn--h3cuzk1di\",\n\t\"xn--m3ch0j3a\",\n\t\"xn--o3cyx2a\",\n\t\"blogsite\",\n\t\"fhapp\",\n\t\"ac\",\n\t\"agric\",\n\t\"alt\",\n\t\"co\",\n\t\"edu\",\n\t\"gov\",\n\t\"grondar\",\n\t\"law\",\n\t\"mil\",\n\t\"net\",\n\t\"ngo\",\n\t\"nis\",\n\t\"nom\",\n\t\"org\",\n\t\"school\",\n\t\"tm\",\n\t\"web\",\n\t\"blogspot\",\n\t\"ac\",\n\t\"biz\",\n\t\"co\",\n\t\"com\",\n\t\"edu\",\n\t\"gov\",\n\t\"info\",\n\t\"mil\",\n\t\"net\",\n\t\"org\",\n\t\"sch\",\n\t\"lima\",\n\t\"triton\",\n\t\"ac\",\n\t\"co\",\n\t\"gov\",\n\t\"mil\",\n\t\"org\",\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/address.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd netbsd openbsd\n\npackage route\n\nimport \"runtime\"\n\n// An Addr represents an address associated with packet routing.\ntype Addr interface {\n\t// Family returns an address family.\n\tFamily() int\n}\n\n// A LinkAddr represents a link-layer address.\ntype LinkAddr struct {\n\tIndex int    // interface index when attached\n\tName  string // interface name when attached\n\tAddr  []byte // link-layer address when attached\n}\n\n// Family implements the Family method of Addr interface.\nfunc (a *LinkAddr) Family() int { return sysAF_LINK }\n\nfunc (a *LinkAddr) lenAndSpace() (int, int) {\n\tl := 8 + len(a.Name) + len(a.Addr)\n\treturn l, roundup(l)\n}\n\nfunc (a *LinkAddr) marshal(b []byte) (int, error) {\n\tl, ll := a.lenAndSpace()\n\tif len(b) < ll {\n\t\treturn 0, errShortBuffer\n\t}\n\tnlen, alen := len(a.Name), len(a.Addr)\n\tif nlen > 255 || alen > 255 {\n\t\treturn 0, errInvalidAddr\n\t}\n\tb[0] = byte(l)\n\tb[1] = sysAF_LINK\n\tif a.Index > 0 {\n\t\tnativeEndian.PutUint16(b[2:4], uint16(a.Index))\n\t}\n\tdata := b[8:]\n\tif nlen > 0 {\n\t\tb[5] = byte(nlen)\n\t\tcopy(data[:nlen], a.Addr)\n\t\tdata = data[nlen:]\n\t}\n\tif alen > 0 {\n\t\tb[6] = byte(alen)\n\t\tcopy(data[:alen], a.Name)\n\t\tdata = data[alen:]\n\t}\n\treturn ll, nil\n}\n\nfunc parseLinkAddr(b []byte) (Addr, error) {\n\tif len(b) < 8 {\n\t\treturn nil, errInvalidAddr\n\t}\n\t_, a, err := parseKernelLinkAddr(sysAF_LINK, b[4:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ta.(*LinkAddr).Index = int(nativeEndian.Uint16(b[2:4]))\n\treturn a, nil\n}\n\n// parseKernelLinkAddr parses b as a link-layer address in\n// conventional BSD kernel form.\nfunc parseKernelLinkAddr(_ int, b []byte) (int, Addr, error) {\n\t// The encoding looks like the following:\n\t// +----------------------------+\n\t// | Type             (1 octet) |\n\t// +----------------------------+\n\t// | Name length      (1 octet) |\n\t// +----------------------------+\n\t// | Address length   (1 octet) |\n\t// +----------------------------+\n\t// | Selector length  (1 octet) |\n\t// +----------------------------+\n\t// | Data            (variable) |\n\t// +----------------------------+\n\t//\n\t// On some platforms, all-bit-one of length field means \"don't\n\t// care\".\n\tnlen, alen, slen := int(b[1]), int(b[2]), int(b[3])\n\tif nlen == 0xff {\n\t\tnlen = 0\n\t}\n\tif alen == 0xff {\n\t\talen = 0\n\t}\n\tif slen == 0xff {\n\t\tslen = 0\n\t}\n\tl := 4 + nlen + alen + slen\n\tif len(b) < l {\n\t\treturn 0, nil, errInvalidAddr\n\t}\n\tdata := b[4:]\n\tvar name string\n\tvar addr []byte\n\tif nlen > 0 {\n\t\tname = string(data[:nlen])\n\t\tdata = data[nlen:]\n\t}\n\tif alen > 0 {\n\t\taddr = data[:alen]\n\t\tdata = data[alen:]\n\t}\n\treturn l, &LinkAddr{Name: name, Addr: addr}, nil\n}\n\n// An Inet4Addr represents an internet address for IPv4.\ntype Inet4Addr struct {\n\tIP [4]byte // IP address\n}\n\n// Family implements the Family method of Addr interface.\nfunc (a *Inet4Addr) Family() int { return sysAF_INET }\n\nfunc (a *Inet4Addr) lenAndSpace() (int, int) {\n\treturn sizeofSockaddrInet, roundup(sizeofSockaddrInet)\n}\n\nfunc (a *Inet4Addr) marshal(b []byte) (int, error) {\n\tl, ll := a.lenAndSpace()\n\tif len(b) < ll {\n\t\treturn 0, errShortBuffer\n\t}\n\tb[0] = byte(l)\n\tb[1] = sysAF_INET\n\tcopy(b[4:8], a.IP[:])\n\treturn ll, nil\n}\n\n// An Inet6Addr represents an internet address for IPv6.\ntype Inet6Addr struct {\n\tIP     [16]byte // IP address\n\tZoneID int      // zone identifier\n}\n\n// Family implements the Family method of Addr interface.\nfunc (a *Inet6Addr) Family() int { return sysAF_INET6 }\n\nfunc (a *Inet6Addr) lenAndSpace() (int, int) {\n\treturn sizeofSockaddrInet6, roundup(sizeofSockaddrInet6)\n}\n\nfunc (a *Inet6Addr) marshal(b []byte) (int, error) {\n\tl, ll := a.lenAndSpace()\n\tif len(b) < ll {\n\t\treturn 0, errShortBuffer\n\t}\n\tb[0] = byte(l)\n\tb[1] = sysAF_INET6\n\tcopy(b[8:24], a.IP[:])\n\tif a.ZoneID > 0 {\n\t\tnativeEndian.PutUint32(b[24:28], uint32(a.ZoneID))\n\t}\n\treturn ll, nil\n}\n\n// parseInetAddr parses b as an internet address for IPv4 or IPv6.\nfunc parseInetAddr(af int, b []byte) (Addr, error) {\n\tswitch af {\n\tcase sysAF_INET:\n\t\tif len(b) < sizeofSockaddrInet {\n\t\t\treturn nil, errInvalidAddr\n\t\t}\n\t\ta := &Inet4Addr{}\n\t\tcopy(a.IP[:], b[4:8])\n\t\treturn a, nil\n\tcase sysAF_INET6:\n\t\tif len(b) < sizeofSockaddrInet6 {\n\t\t\treturn nil, errInvalidAddr\n\t\t}\n\t\ta := &Inet6Addr{ZoneID: int(nativeEndian.Uint32(b[24:28]))}\n\t\tcopy(a.IP[:], b[8:24])\n\t\tif a.IP[0] == 0xfe && a.IP[1]&0xc0 == 0x80 || a.IP[0] == 0xff && (a.IP[1]&0x0f == 0x01 || a.IP[1]&0x0f == 0x02) {\n\t\t\t// KAME based IPv6 protocol stack usually\n\t\t\t// embeds the interface index in the\n\t\t\t// interface-local or link-local address as\n\t\t\t// the kernel-internal form.\n\t\t\tid := int(bigEndian.Uint16(a.IP[2:4]))\n\t\t\tif id != 0 {\n\t\t\t\ta.ZoneID = id\n\t\t\t\ta.IP[2], a.IP[3] = 0, 0\n\t\t\t}\n\t\t}\n\t\treturn a, nil\n\tdefault:\n\t\treturn nil, errInvalidAddr\n\t}\n}\n\n// parseKernelInetAddr parses b as an internet address in conventional\n// BSD kernel form.\nfunc parseKernelInetAddr(af int, b []byte) (int, Addr, error) {\n\t// The encoding looks similar to the NLRI encoding.\n\t// +----------------------------+\n\t// | Length           (1 octet) |\n\t// +----------------------------+\n\t// | Address prefix  (variable) |\n\t// +----------------------------+\n\t//\n\t// The differences between the kernel form and the NLRI\n\t// encoding are:\n\t//\n\t// - The length field of the kernel form indicates the prefix\n\t//   length in bytes, not in bits\n\t//\n\t// - In the kernel form, zero value of the length field\n\t//   doesn't mean 0.0.0.0/0 or ::/0\n\t//\n\t// - The kernel form appends leading bytes to the prefix field\n\t//   to make the <length, prefix> tuple to be conformed with\n\t//   the routing message boundary\n\tl := int(b[0])\n\tif runtime.GOOS == \"darwin\" {\n\t\t// On Darwn, an address in the kernel form is also\n\t\t// used as a message filler.\n\t\tif l == 0 || len(b) > roundup(l) {\n\t\t\tl = roundup(l)\n\t\t}\n\t} else {\n\t\tl = roundup(l)\n\t}\n\tif len(b) < l {\n\t\treturn 0, nil, errInvalidAddr\n\t}\n\t// Don't reorder case expressions.\n\t// The case expressions for IPv6 must come first.\n\tconst (\n\t\toff4 = 4 // offset of in_addr\n\t\toff6 = 8 // offset of in6_addr\n\t)\n\tswitch {\n\tcase b[0] == sizeofSockaddrInet6:\n\t\ta := &Inet6Addr{}\n\t\tcopy(a.IP[:], b[off6:off6+16])\n\t\treturn int(b[0]), a, nil\n\tcase af == sysAF_INET6:\n\t\ta := &Inet6Addr{}\n\t\tif l-1 < off6 {\n\t\t\tcopy(a.IP[:], b[1:l])\n\t\t} else {\n\t\t\tcopy(a.IP[:], b[l-off6:l])\n\t\t}\n\t\treturn int(b[0]), a, nil\n\tcase b[0] == sizeofSockaddrInet:\n\t\ta := &Inet4Addr{}\n\t\tcopy(a.IP[:], b[off4:off4+4])\n\t\treturn int(b[0]), a, nil\n\tdefault: // an old fashion, AF_UNSPEC or unknown means AF_INET\n\t\ta := &Inet4Addr{}\n\t\tif l-1 < off4 {\n\t\t\tcopy(a.IP[:], b[1:l])\n\t\t} else {\n\t\t\tcopy(a.IP[:], b[l-off4:l])\n\t\t}\n\t\treturn int(b[0]), a, nil\n\t}\n}\n\n// A DefaultAddr represents an address of various operating\n// system-specific features.\ntype DefaultAddr struct {\n\taf  int\n\tRaw []byte // raw format of address\n}\n\n// Family implements the Family method of Addr interface.\nfunc (a *DefaultAddr) Family() int { return a.af }\n\nfunc (a *DefaultAddr) lenAndSpace() (int, int) {\n\tl := len(a.Raw)\n\treturn l, roundup(l)\n}\n\nfunc (a *DefaultAddr) marshal(b []byte) (int, error) {\n\tl, ll := a.lenAndSpace()\n\tif len(b) < ll {\n\t\treturn 0, errShortBuffer\n\t}\n\tif l > 255 {\n\t\treturn 0, errInvalidAddr\n\t}\n\tb[1] = byte(l)\n\tcopy(b[:l], a.Raw)\n\treturn ll, nil\n}\n\nfunc parseDefaultAddr(b []byte) (Addr, error) {\n\tif len(b) < 2 || len(b) < int(b[0]) {\n\t\treturn nil, errInvalidAddr\n\t}\n\ta := &DefaultAddr{af: int(b[1]), Raw: b[:b[0]]}\n\treturn a, nil\n}\n\nfunc addrsSpace(as []Addr) int {\n\tvar l int\n\tfor _, a := range as {\n\t\tswitch a := a.(type) {\n\t\tcase *LinkAddr:\n\t\t\t_, ll := a.lenAndSpace()\n\t\t\tl += ll\n\t\tcase *Inet4Addr:\n\t\t\t_, ll := a.lenAndSpace()\n\t\t\tl += ll\n\t\tcase *Inet6Addr:\n\t\t\t_, ll := a.lenAndSpace()\n\t\t\tl += ll\n\t\tcase *DefaultAddr:\n\t\t\t_, ll := a.lenAndSpace()\n\t\t\tl += ll\n\t\t}\n\t}\n\treturn l\n}\n\n// marshalAddrs marshals as and returns a bitmap indicating which\n// address is stored in b.\nfunc marshalAddrs(b []byte, as []Addr) (uint, error) {\n\tvar attrs uint\n\tfor i, a := range as {\n\t\tswitch a := a.(type) {\n\t\tcase *LinkAddr:\n\t\t\tl, err := a.marshal(b)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tb = b[l:]\n\t\t\tattrs |= 1 << uint(i)\n\t\tcase *Inet4Addr:\n\t\t\tl, err := a.marshal(b)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tb = b[l:]\n\t\t\tattrs |= 1 << uint(i)\n\t\tcase *Inet6Addr:\n\t\t\tl, err := a.marshal(b)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tb = b[l:]\n\t\t\tattrs |= 1 << uint(i)\n\t\tcase *DefaultAddr:\n\t\t\tl, err := a.marshal(b)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tb = b[l:]\n\t\t\tattrs |= 1 << uint(i)\n\t\t}\n\t}\n\treturn attrs, nil\n}\n\nfunc parseAddrs(attrs uint, fn func(int, []byte) (int, Addr, error), b []byte) ([]Addr, error) {\n\tvar as [sysRTAX_MAX]Addr\n\taf := int(sysAF_UNSPEC)\n\tfor i := uint(0); i < sysRTAX_MAX && len(b) >= roundup(0); i++ {\n\t\tif attrs&(1<<i) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif i <= sysRTAX_BRD {\n\t\t\tswitch b[1] {\n\t\t\tcase sysAF_LINK:\n\t\t\t\ta, err := parseLinkAddr(b)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tas[i] = a\n\t\t\t\tl := roundup(int(b[0]))\n\t\t\t\tif len(b) < l {\n\t\t\t\t\treturn nil, errMessageTooShort\n\t\t\t\t}\n\t\t\t\tb = b[l:]\n\t\t\tcase sysAF_INET, sysAF_INET6:\n\t\t\t\taf = int(b[1])\n\t\t\t\ta, err := parseInetAddr(af, b)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tas[i] = a\n\t\t\t\tl := roundup(int(b[0]))\n\t\t\t\tif len(b) < l {\n\t\t\t\t\treturn nil, errMessageTooShort\n\t\t\t\t}\n\t\t\t\tb = b[l:]\n\t\t\tdefault:\n\t\t\t\tl, a, err := fn(af, b)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tas[i] = a\n\t\t\t\tll := roundup(l)\n\t\t\t\tif len(b) < ll {\n\t\t\t\t\tb = b[l:]\n\t\t\t\t} else {\n\t\t\t\t\tb = b[ll:]\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\ta, err := parseDefaultAddr(b)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tas[i] = a\n\t\t\tl := roundup(int(b[0]))\n\t\t\tif len(b) < l {\n\t\t\t\treturn nil, errMessageTooShort\n\t\t\t}\n\t\t\tb = b[l:]\n\t\t}\n\t}\n\treturn as[:], nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/address_darwin_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage route\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n)\n\ntype parseAddrsOnDarwinTest struct {\n\tattrs uint\n\tfn    func(int, []byte) (int, Addr, error)\n\tb     []byte\n\tas    []Addr\n}\n\nvar parseAddrsOnDarwinLittleEndianTests = []parseAddrsOnDarwinTest{\n\t{\n\t\tsysRTA_DST | sysRTA_GATEWAY | sysRTA_NETMASK,\n\t\tparseKernelInetAddr,\n\t\t[]byte{\n\t\t\t0x10, 0x2, 0x0, 0x0, 0xc0, 0xa8, 0x56, 0x0,\n\t\t\t0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,\n\n\t\t\t0x14, 0x12, 0x4, 0x0, 0x6, 0x0, 0x0, 0x0,\n\t\t\t0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,\n\t\t\t0x0, 0x0, 0x0, 0x0,\n\n\t\t\t0x7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,\n\t\t},\n\t\t[]Addr{\n\t\t\t&Inet4Addr{IP: [4]byte{192, 168, 86, 0}},\n\t\t\t&LinkAddr{Index: 4},\n\t\t\t&Inet4Addr{IP: [4]byte{255, 255, 255, 255}},\n\t\t\tnil,\n\t\t\tnil,\n\t\t\tnil,\n\t\t\tnil,\n\t\t\tnil,\n\t\t},\n\t},\n}\n\nfunc TestParseAddrsOnDarwin(t *testing.T) {\n\ttests := parseAddrsOnDarwinLittleEndianTests\n\tif nativeEndian != littleEndian {\n\t\tt.Skip(\"no test for non-little endian machine yet\")\n\t}\n\n\tfor i, tt := range tests {\n\t\tas, err := parseAddrs(tt.attrs, tt.fn, tt.b)\n\t\tif err != nil {\n\t\t\tt.Error(i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(as, tt.as) {\n\t\t\tt.Errorf(\"#%d: got %+v; want %+v\", i, as, tt.as)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/address_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd netbsd openbsd\n\npackage route\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n)\n\ntype parseAddrsTest struct {\n\tattrs uint\n\tfn    func(int, []byte) (int, Addr, error)\n\tb     []byte\n\tas    []Addr\n}\n\nvar parseAddrsLittleEndianTests = []parseAddrsTest{\n\t{\n\t\tsysRTA_DST | sysRTA_GATEWAY | sysRTA_NETMASK | sysRTA_BRD,\n\t\tparseKernelInetAddr,\n\t\t[]byte{\n\t\t\t0x38, 0x12, 0x0, 0x0, 0xff, 0xff, 0xff, 0x0,\n\t\t\t0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,\n\t\t\t0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,\n\t\t\t0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,\n\t\t\t0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,\n\t\t\t0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,\n\t\t\t0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,\n\n\t\t\t0x38, 0x12, 0x2, 0x0, 0x6, 0x3, 0x6, 0x0,\n\t\t\t0x65, 0x6d, 0x31, 0x0, 0xc, 0x29, 0x66, 0x2c,\n\t\t\t0xdc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,\n\t\t\t0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,\n\t\t\t0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,\n\t\t\t0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,\n\t\t\t0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,\n\n\t\t\t0x10, 0x2, 0x0, 0x0, 0xac, 0x10, 0xdc, 0xb4,\n\t\t\t0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,\n\n\t\t\t0x10, 0x2, 0x0, 0x0, 0xac, 0x10, 0xdc, 0xff,\n\t\t\t0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,\n\t\t},\n\t\t[]Addr{\n\t\t\t&LinkAddr{Index: 0},\n\t\t\t&LinkAddr{Index: 2, Name: \"em1\", Addr: []byte{0x00, 0x0c, 0x29, 0x66, 0x2c, 0xdc}},\n\t\t\t&Inet4Addr{IP: [4]byte{172, 16, 220, 180}},\n\t\t\tnil,\n\t\t\tnil,\n\t\t\tnil,\n\t\t\tnil,\n\t\t\t&Inet4Addr{IP: [4]byte{172, 16, 220, 255}},\n\t\t},\n\t},\n\t{\n\t\tsysRTA_NETMASK | sysRTA_IFP | sysRTA_IFA,\n\t\tparseKernelInetAddr,\n\t\t[]byte{\n\t\t\t0x7, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0x0,\n\n\t\t\t0x18, 0x12, 0xa, 0x0, 0x87, 0x8, 0x0, 0x0,\n\t\t\t0x76, 0x6c, 0x61, 0x6e, 0x35, 0x36, 0x38, 0x32,\n\t\t\t0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,\n\n\t\t\t0x10, 0x2, 0x0, 0x0, 0xa9, 0xfe, 0x0, 0x1,\n\t\t\t0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,\n\t\t},\n\t\t[]Addr{\n\t\t\tnil,\n\t\t\tnil,\n\t\t\t&Inet4Addr{IP: [4]byte{255, 255, 255, 0}},\n\t\t\tnil,\n\t\t\t&LinkAddr{Index: 10, Name: \"vlan5682\"},\n\t\t\t&Inet4Addr{IP: [4]byte{169, 254, 0, 1}},\n\t\t\tnil,\n\t\t\tnil,\n\t\t},\n\t},\n}\n\nfunc TestParseAddrs(t *testing.T) {\n\ttests := parseAddrsLittleEndianTests\n\tif nativeEndian != littleEndian {\n\t\tt.Skip(\"no test for non-little endian machine yet\")\n\t}\n\n\tfor i, tt := range tests {\n\t\tas, err := parseAddrs(tt.attrs, tt.fn, tt.b)\n\t\tif err != nil {\n\t\t\tt.Error(i, err)\n\t\t\tcontinue\n\t\t}\n\t\tas = as[:8] // the list varies between operating systems\n\t\tif !reflect.DeepEqual(as, tt.as) {\n\t\t\tt.Errorf(\"#%d: got %+v; want %+v\", i, as, tt.as)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/binary.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd netbsd openbsd\n\npackage route\n\n// This file contains duplicates of encoding/binary package.\n//\n// This package is supposed to be used by the net package of standard\n// library. Therefore the package set used in the package must be the\n// same as net package.\n\nvar (\n\tlittleEndian binaryLittleEndian\n\tbigEndian    binaryBigEndian\n)\n\ntype binaryByteOrder interface {\n\tUint16([]byte) uint16\n\tUint32([]byte) uint32\n\tPutUint16([]byte, uint16)\n\tPutUint32([]byte, uint32)\n\tUint64([]byte) uint64\n}\n\ntype binaryLittleEndian struct{}\n\nfunc (binaryLittleEndian) Uint16(b []byte) uint16 {\n\t_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808\n\treturn uint16(b[0]) | uint16(b[1])<<8\n}\n\nfunc (binaryLittleEndian) PutUint16(b []byte, v uint16) {\n\t_ = b[1] // early bounds check to guarantee safety of writes below\n\tb[0] = byte(v)\n\tb[1] = byte(v >> 8)\n}\n\nfunc (binaryLittleEndian) Uint32(b []byte) uint32 {\n\t_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808\n\treturn uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24\n}\n\nfunc (binaryLittleEndian) PutUint32(b []byte, v uint32) {\n\t_ = b[3] // early bounds check to guarantee safety of writes below\n\tb[0] = byte(v)\n\tb[1] = byte(v >> 8)\n\tb[2] = byte(v >> 16)\n\tb[3] = byte(v >> 24)\n}\n\nfunc (binaryLittleEndian) Uint64(b []byte) uint64 {\n\t_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808\n\treturn uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |\n\t\tuint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56\n}\n\ntype binaryBigEndian struct{}\n\nfunc (binaryBigEndian) Uint16(b []byte) uint16 {\n\t_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808\n\treturn uint16(b[1]) | uint16(b[0])<<8\n}\n\nfunc (binaryBigEndian) PutUint16(b []byte, v uint16) {\n\t_ = b[1] // early bounds check to guarantee safety of writes below\n\tb[0] = byte(v >> 8)\n\tb[1] = byte(v)\n}\n\nfunc (binaryBigEndian) Uint32(b []byte) uint32 {\n\t_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808\n\treturn uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24\n}\n\nfunc (binaryBigEndian) PutUint32(b []byte, v uint32) {\n\t_ = b[3] // early bounds check to guarantee safety of writes below\n\tb[0] = byte(v >> 24)\n\tb[1] = byte(v >> 16)\n\tb[2] = byte(v >> 8)\n\tb[3] = byte(v)\n}\n\nfunc (binaryBigEndian) Uint64(b []byte) uint64 {\n\t_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808\n\treturn uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |\n\t\tuint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/defs_darwin.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\npackage route\n\n/*\n#include <sys/socket.h>\n#include <sys/sysctl.h>\n\n#include <net/if.h>\n#include <net/if_dl.h>\n#include <net/route.h>\n\n#include <netinet/in.h>\n*/\nimport \"C\"\n\nconst (\n\tsysAF_UNSPEC = C.AF_UNSPEC\n\tsysAF_INET   = C.AF_INET\n\tsysAF_ROUTE  = C.AF_ROUTE\n\tsysAF_LINK   = C.AF_LINK\n\tsysAF_INET6  = C.AF_INET6\n\n\tsysSOCK_RAW = C.SOCK_RAW\n\n\tsysNET_RT_DUMP    = C.NET_RT_DUMP\n\tsysNET_RT_FLAGS   = C.NET_RT_FLAGS\n\tsysNET_RT_IFLIST  = C.NET_RT_IFLIST\n\tsysNET_RT_STAT    = C.NET_RT_STAT\n\tsysNET_RT_TRASH   = C.NET_RT_TRASH\n\tsysNET_RT_IFLIST2 = C.NET_RT_IFLIST2\n\tsysNET_RT_DUMP2   = C.NET_RT_DUMP2\n\tsysNET_RT_MAXID   = C.NET_RT_MAXID\n)\n\nconst (\n\tsysCTL_MAXNAME = C.CTL_MAXNAME\n\n\tsysCTL_UNSPEC  = C.CTL_UNSPEC\n\tsysCTL_KERN    = C.CTL_KERN\n\tsysCTL_VM      = C.CTL_VM\n\tsysCTL_VFS     = C.CTL_VFS\n\tsysCTL_NET     = C.CTL_NET\n\tsysCTL_DEBUG   = C.CTL_DEBUG\n\tsysCTL_HW      = C.CTL_HW\n\tsysCTL_MACHDEP = C.CTL_MACHDEP\n\tsysCTL_USER    = C.CTL_USER\n\tsysCTL_MAXID   = C.CTL_MAXID\n)\n\nconst (\n\tsysRTM_VERSION = C.RTM_VERSION\n\n\tsysRTM_ADD       = C.RTM_ADD\n\tsysRTM_DELETE    = C.RTM_DELETE\n\tsysRTM_CHANGE    = C.RTM_CHANGE\n\tsysRTM_GET       = C.RTM_GET\n\tsysRTM_LOSING    = C.RTM_LOSING\n\tsysRTM_REDIRECT  = C.RTM_REDIRECT\n\tsysRTM_MISS      = C.RTM_MISS\n\tsysRTM_LOCK      = C.RTM_LOCK\n\tsysRTM_OLDADD    = C.RTM_OLDADD\n\tsysRTM_OLDDEL    = C.RTM_OLDDEL\n\tsysRTM_RESOLVE   = C.RTM_RESOLVE\n\tsysRTM_NEWADDR   = C.RTM_NEWADDR\n\tsysRTM_DELADDR   = C.RTM_DELADDR\n\tsysRTM_IFINFO    = C.RTM_IFINFO\n\tsysRTM_NEWMADDR  = C.RTM_NEWMADDR\n\tsysRTM_DELMADDR  = C.RTM_DELMADDR\n\tsysRTM_IFINFO2   = C.RTM_IFINFO2\n\tsysRTM_NEWMADDR2 = C.RTM_NEWMADDR2\n\tsysRTM_GET2      = C.RTM_GET2\n\n\tsysRTA_DST     = C.RTA_DST\n\tsysRTA_GATEWAY = C.RTA_GATEWAY\n\tsysRTA_NETMASK = C.RTA_NETMASK\n\tsysRTA_GENMASK = C.RTA_GENMASK\n\tsysRTA_IFP     = C.RTA_IFP\n\tsysRTA_IFA     = C.RTA_IFA\n\tsysRTA_AUTHOR  = C.RTA_AUTHOR\n\tsysRTA_BRD     = C.RTA_BRD\n\n\tsysRTAX_DST     = C.RTAX_DST\n\tsysRTAX_GATEWAY = C.RTAX_GATEWAY\n\tsysRTAX_NETMASK = C.RTAX_NETMASK\n\tsysRTAX_GENMASK = C.RTAX_GENMASK\n\tsysRTAX_IFP     = C.RTAX_IFP\n\tsysRTAX_IFA     = C.RTAX_IFA\n\tsysRTAX_AUTHOR  = C.RTAX_AUTHOR\n\tsysRTAX_BRD     = C.RTAX_BRD\n\tsysRTAX_MAX     = C.RTAX_MAX\n)\n\nconst (\n\tsizeofIfMsghdrDarwin15    = C.sizeof_struct_if_msghdr\n\tsizeofIfaMsghdrDarwin15   = C.sizeof_struct_ifa_msghdr\n\tsizeofIfmaMsghdrDarwin15  = C.sizeof_struct_ifma_msghdr\n\tsizeofIfMsghdr2Darwin15   = C.sizeof_struct_if_msghdr2\n\tsizeofIfmaMsghdr2Darwin15 = C.sizeof_struct_ifma_msghdr2\n\tsizeofIfDataDarwin15      = C.sizeof_struct_if_data\n\tsizeofIfData64Darwin15    = C.sizeof_struct_if_data64\n\n\tsizeofRtMsghdrDarwin15  = C.sizeof_struct_rt_msghdr\n\tsizeofRtMsghdr2Darwin15 = C.sizeof_struct_rt_msghdr2\n\tsizeofRtMetricsDarwin15 = C.sizeof_struct_rt_metrics\n\n\tsizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage\n\tsizeofSockaddrInet    = C.sizeof_struct_sockaddr_in\n\tsizeofSockaddrInet6   = C.sizeof_struct_sockaddr_in6\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/defs_dragonfly.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\npackage route\n\n/*\n#include <sys/socket.h>\n#include <sys/sysctl.h>\n\n#include <net/if.h>\n#include <net/if_dl.h>\n#include <net/route.h>\n\n#include <netinet/in.h>\n*/\nimport \"C\"\n\nconst (\n\tsysAF_UNSPEC = C.AF_UNSPEC\n\tsysAF_INET   = C.AF_INET\n\tsysAF_ROUTE  = C.AF_ROUTE\n\tsysAF_LINK   = C.AF_LINK\n\tsysAF_INET6  = C.AF_INET6\n\n\tsysSOCK_RAW = C.SOCK_RAW\n\n\tsysNET_RT_DUMP   = C.NET_RT_DUMP\n\tsysNET_RT_FLAGS  = C.NET_RT_FLAGS\n\tsysNET_RT_IFLIST = C.NET_RT_IFLIST\n\tsysNET_RT_MAXID  = C.NET_RT_MAXID\n)\n\nconst (\n\tsysCTL_MAXNAME = C.CTL_MAXNAME\n\n\tsysCTL_UNSPEC   = C.CTL_UNSPEC\n\tsysCTL_KERN     = C.CTL_KERN\n\tsysCTL_VM       = C.CTL_VM\n\tsysCTL_VFS      = C.CTL_VFS\n\tsysCTL_NET      = C.CTL_NET\n\tsysCTL_DEBUG    = C.CTL_DEBUG\n\tsysCTL_HW       = C.CTL_HW\n\tsysCTL_MACHDEP  = C.CTL_MACHDEP\n\tsysCTL_USER     = C.CTL_USER\n\tsysCTL_P1003_1B = C.CTL_P1003_1B\n\tsysCTL_LWKT     = C.CTL_LWKT\n\tsysCTL_MAXID    = C.CTL_MAXID\n)\n\nconst (\n\tsysRTM_VERSION = C.RTM_VERSION\n\n\tsysRTM_ADD        = C.RTM_ADD\n\tsysRTM_DELETE     = C.RTM_DELETE\n\tsysRTM_CHANGE     = C.RTM_CHANGE\n\tsysRTM_GET        = C.RTM_GET\n\tsysRTM_LOSING     = C.RTM_LOSING\n\tsysRTM_REDIRECT   = C.RTM_REDIRECT\n\tsysRTM_MISS       = C.RTM_MISS\n\tsysRTM_LOCK       = C.RTM_LOCK\n\tsysRTM_OLDADD     = C.RTM_OLDADD\n\tsysRTM_OLDDEL     = C.RTM_OLDDEL\n\tsysRTM_RESOLVE    = C.RTM_RESOLVE\n\tsysRTM_NEWADDR    = C.RTM_NEWADDR\n\tsysRTM_DELADDR    = C.RTM_DELADDR\n\tsysRTM_IFINFO     = C.RTM_IFINFO\n\tsysRTM_NEWMADDR   = C.RTM_NEWMADDR\n\tsysRTM_DELMADDR   = C.RTM_DELMADDR\n\tsysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE\n\tsysRTM_IEEE80211  = C.RTM_IEEE80211\n\n\tsysRTA_DST     = C.RTA_DST\n\tsysRTA_GATEWAY = C.RTA_GATEWAY\n\tsysRTA_NETMASK = C.RTA_NETMASK\n\tsysRTA_GENMASK = C.RTA_GENMASK\n\tsysRTA_IFP     = C.RTA_IFP\n\tsysRTA_IFA     = C.RTA_IFA\n\tsysRTA_AUTHOR  = C.RTA_AUTHOR\n\tsysRTA_BRD     = C.RTA_BRD\n\tsysRTA_MPLS1   = C.RTA_MPLS1\n\tsysRTA_MPLS2   = C.RTA_MPLS2\n\tsysRTA_MPLS3   = C.RTA_MPLS3\n\n\tsysRTAX_DST     = C.RTAX_DST\n\tsysRTAX_GATEWAY = C.RTAX_GATEWAY\n\tsysRTAX_NETMASK = C.RTAX_NETMASK\n\tsysRTAX_GENMASK = C.RTAX_GENMASK\n\tsysRTAX_IFP     = C.RTAX_IFP\n\tsysRTAX_IFA     = C.RTAX_IFA\n\tsysRTAX_AUTHOR  = C.RTAX_AUTHOR\n\tsysRTAX_BRD     = C.RTAX_BRD\n\tsysRTAX_MPLS1   = C.RTAX_MPLS1\n\tsysRTAX_MPLS2   = C.RTAX_MPLS2\n\tsysRTAX_MPLS3   = C.RTAX_MPLS3\n\tsysRTAX_MAX     = C.RTAX_MAX\n)\n\nconst (\n\tsizeofIfMsghdrDragonFlyBSD4         = C.sizeof_struct_if_msghdr\n\tsizeofIfaMsghdrDragonFlyBSD4        = C.sizeof_struct_ifa_msghdr\n\tsizeofIfmaMsghdrDragonFlyBSD4       = C.sizeof_struct_ifma_msghdr\n\tsizeofIfAnnouncemsghdrDragonFlyBSD4 = C.sizeof_struct_if_announcemsghdr\n\n\tsizeofRtMsghdrDragonFlyBSD4  = C.sizeof_struct_rt_msghdr\n\tsizeofRtMetricsDragonFlyBSD4 = C.sizeof_struct_rt_metrics\n\n\tsizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage\n\tsizeofSockaddrInet    = C.sizeof_struct_sockaddr_in\n\tsizeofSockaddrInet6   = C.sizeof_struct_sockaddr_in6\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/defs_freebsd.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\npackage route\n\n/*\n#include <sys/socket.h>\n#include <sys/sysctl.h>\n\n#include <net/if.h>\n#include <net/if_dl.h>\n#include <net/route.h>\n\n#include <netinet/in.h>\n\nstruct if_data_freebsd7 {\n\tu_char ifi_type;\n\tu_char ifi_physical;\n\tu_char ifi_addrlen;\n\tu_char ifi_hdrlen;\n\tu_char ifi_link_state;\n\tu_char ifi_spare_char1;\n\tu_char ifi_spare_char2;\n\tu_char ifi_datalen;\n\tu_long ifi_mtu;\n\tu_long ifi_metric;\n\tu_long ifi_baudrate;\n\tu_long ifi_ipackets;\n\tu_long ifi_ierrors;\n\tu_long ifi_opackets;\n\tu_long ifi_oerrors;\n\tu_long ifi_collisions;\n\tu_long ifi_ibytes;\n\tu_long ifi_obytes;\n\tu_long ifi_imcasts;\n\tu_long ifi_omcasts;\n\tu_long ifi_iqdrops;\n\tu_long ifi_noproto;\n\tu_long ifi_hwassist;\n\ttime_t __ifi_epoch;\n\tstruct timeval __ifi_lastchange;\n};\n\nstruct if_data_freebsd8 {\n\tu_char ifi_type;\n\tu_char ifi_physical;\n\tu_char ifi_addrlen;\n\tu_char ifi_hdrlen;\n\tu_char ifi_link_state;\n\tu_char ifi_spare_char1;\n\tu_char ifi_spare_char2;\n\tu_char ifi_datalen;\n\tu_long ifi_mtu;\n\tu_long ifi_metric;\n\tu_long ifi_baudrate;\n\tu_long ifi_ipackets;\n\tu_long ifi_ierrors;\n\tu_long ifi_opackets;\n\tu_long ifi_oerrors;\n\tu_long ifi_collisions;\n\tu_long ifi_ibytes;\n\tu_long ifi_obytes;\n\tu_long ifi_imcasts;\n\tu_long ifi_omcasts;\n\tu_long ifi_iqdrops;\n\tu_long ifi_noproto;\n\tu_long ifi_hwassist;\n\ttime_t __ifi_epoch;\n\tstruct timeval __ifi_lastchange;\n};\n\nstruct if_data_freebsd9 {\n\tu_char ifi_type;\n\tu_char ifi_physical;\n\tu_char ifi_addrlen;\n\tu_char ifi_hdrlen;\n\tu_char ifi_link_state;\n\tu_char ifi_spare_char1;\n\tu_char ifi_spare_char2;\n\tu_char ifi_datalen;\n\tu_long ifi_mtu;\n\tu_long ifi_metric;\n\tu_long ifi_baudrate;\n\tu_long ifi_ipackets;\n\tu_long ifi_ierrors;\n\tu_long ifi_opackets;\n\tu_long ifi_oerrors;\n\tu_long ifi_collisions;\n\tu_long ifi_ibytes;\n\tu_long ifi_obytes;\n\tu_long ifi_imcasts;\n\tu_long ifi_omcasts;\n\tu_long ifi_iqdrops;\n\tu_long ifi_noproto;\n\tu_long ifi_hwassist;\n\ttime_t __ifi_epoch;\n\tstruct timeval __ifi_lastchange;\n};\n\nstruct if_data_freebsd10 {\n\tu_char ifi_type;\n\tu_char ifi_physical;\n\tu_char ifi_addrlen;\n\tu_char ifi_hdrlen;\n\tu_char ifi_link_state;\n\tu_char ifi_vhid;\n\tu_char ifi_baudrate_pf;\n\tu_char ifi_datalen;\n\tu_long ifi_mtu;\n\tu_long ifi_metric;\n\tu_long ifi_baudrate;\n\tu_long ifi_ipackets;\n\tu_long ifi_ierrors;\n\tu_long ifi_opackets;\n\tu_long ifi_oerrors;\n\tu_long ifi_collisions;\n\tu_long ifi_ibytes;\n\tu_long ifi_obytes;\n\tu_long ifi_imcasts;\n\tu_long ifi_omcasts;\n\tu_long ifi_iqdrops;\n\tu_long ifi_noproto;\n\tuint64_t ifi_hwassist;\n\ttime_t __ifi_epoch;\n\tstruct timeval __ifi_lastchange;\n};\n\nstruct if_data_freebsd11 {\n\tuint8_t ifi_type;\n\tuint8_t ifi_physical;\n\tuint8_t ifi_addrlen;\n\tuint8_t ifi_hdrlen;\n\tuint8_t ifi_link_state;\n\tuint8_t ifi_vhid;\n\tuint16_t ifi_datalen;\n\tuint32_t ifi_mtu;\n\tuint32_t ifi_metric;\n\tuint64_t ifi_baudrate;\n\tuint64_t ifi_ipackets;\n\tuint64_t ifi_ierrors;\n\tuint64_t ifi_opackets;\n\tuint64_t ifi_oerrors;\n\tuint64_t ifi_collisions;\n\tuint64_t ifi_ibytes;\n\tuint64_t ifi_obytes;\n\tuint64_t ifi_imcasts;\n\tuint64_t ifi_omcasts;\n\tuint64_t ifi_iqdrops;\n\tuint64_t ifi_oqdrops;\n\tuint64_t ifi_noproto;\n\tuint64_t ifi_hwassist;\n\tunion {\n\t\ttime_t tt;\n\t\tuint64_t ph;\n\t} __ifi_epoch;\n\tunion {\n\t\tstruct timeval tv;\n\t\tstruct {\n\t\t\tuint64_t ph1;\n\t\t\tuint64_t ph2;\n\t\t} ph;\n\t} __ifi_lastchange;\n};\n\nstruct if_msghdr_freebsd7 {\n\tu_short ifm_msglen;\n\tu_char ifm_version;\n\tu_char ifm_type;\n\tint ifm_addrs;\n\tint ifm_flags;\n\tu_short ifm_index;\n\tstruct if_data_freebsd7 ifm_data;\n};\n\nstruct if_msghdr_freebsd8 {\n\tu_short ifm_msglen;\n\tu_char ifm_version;\n\tu_char ifm_type;\n\tint ifm_addrs;\n\tint ifm_flags;\n\tu_short ifm_index;\n\tstruct if_data_freebsd8 ifm_data;\n};\n\nstruct if_msghdr_freebsd9 {\n\tu_short ifm_msglen;\n\tu_char ifm_version;\n\tu_char ifm_type;\n\tint ifm_addrs;\n\tint ifm_flags;\n\tu_short ifm_index;\n\tstruct if_data_freebsd9 ifm_data;\n};\n\nstruct if_msghdr_freebsd10 {\n\tu_short ifm_msglen;\n\tu_char ifm_version;\n\tu_char ifm_type;\n\tint ifm_addrs;\n\tint ifm_flags;\n\tu_short ifm_index;\n\tstruct if_data_freebsd10 ifm_data;\n};\n\nstruct if_msghdr_freebsd11 {\n\tu_short ifm_msglen;\n\tu_char ifm_version;\n\tu_char ifm_type;\n\tint ifm_addrs;\n\tint ifm_flags;\n\tu_short ifm_index;\n\tstruct if_data_freebsd11 ifm_data;\n};\n*/\nimport \"C\"\n\nconst (\n\tsysAF_UNSPEC = C.AF_UNSPEC\n\tsysAF_INET   = C.AF_INET\n\tsysAF_ROUTE  = C.AF_ROUTE\n\tsysAF_LINK   = C.AF_LINK\n\tsysAF_INET6  = C.AF_INET6\n\n\tsysSOCK_RAW = C.SOCK_RAW\n\n\tsysNET_RT_DUMP     = C.NET_RT_DUMP\n\tsysNET_RT_FLAGS    = C.NET_RT_FLAGS\n\tsysNET_RT_IFLIST   = C.NET_RT_IFLIST\n\tsysNET_RT_IFMALIST = C.NET_RT_IFMALIST\n\tsysNET_RT_IFLISTL  = C.NET_RT_IFLISTL\n)\n\nconst (\n\tsysCTL_MAXNAME = C.CTL_MAXNAME\n\n\tsysCTL_UNSPEC   = C.CTL_UNSPEC\n\tsysCTL_KERN     = C.CTL_KERN\n\tsysCTL_VM       = C.CTL_VM\n\tsysCTL_VFS      = C.CTL_VFS\n\tsysCTL_NET      = C.CTL_NET\n\tsysCTL_DEBUG    = C.CTL_DEBUG\n\tsysCTL_HW       = C.CTL_HW\n\tsysCTL_MACHDEP  = C.CTL_MACHDEP\n\tsysCTL_USER     = C.CTL_USER\n\tsysCTL_P1003_1B = C.CTL_P1003_1B\n)\n\nconst (\n\tsysRTM_VERSION = C.RTM_VERSION\n\n\tsysRTM_ADD        = C.RTM_ADD\n\tsysRTM_DELETE     = C.RTM_DELETE\n\tsysRTM_CHANGE     = C.RTM_CHANGE\n\tsysRTM_GET        = C.RTM_GET\n\tsysRTM_LOSING     = C.RTM_LOSING\n\tsysRTM_REDIRECT   = C.RTM_REDIRECT\n\tsysRTM_MISS       = C.RTM_MISS\n\tsysRTM_LOCK       = C.RTM_LOCK\n\tsysRTM_RESOLVE    = C.RTM_RESOLVE\n\tsysRTM_NEWADDR    = C.RTM_NEWADDR\n\tsysRTM_DELADDR    = C.RTM_DELADDR\n\tsysRTM_IFINFO     = C.RTM_IFINFO\n\tsysRTM_NEWMADDR   = C.RTM_NEWMADDR\n\tsysRTM_DELMADDR   = C.RTM_DELMADDR\n\tsysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE\n\tsysRTM_IEEE80211  = C.RTM_IEEE80211\n\n\tsysRTA_DST     = C.RTA_DST\n\tsysRTA_GATEWAY = C.RTA_GATEWAY\n\tsysRTA_NETMASK = C.RTA_NETMASK\n\tsysRTA_GENMASK = C.RTA_GENMASK\n\tsysRTA_IFP     = C.RTA_IFP\n\tsysRTA_IFA     = C.RTA_IFA\n\tsysRTA_AUTHOR  = C.RTA_AUTHOR\n\tsysRTA_BRD     = C.RTA_BRD\n\n\tsysRTAX_DST     = C.RTAX_DST\n\tsysRTAX_GATEWAY = C.RTAX_GATEWAY\n\tsysRTAX_NETMASK = C.RTAX_NETMASK\n\tsysRTAX_GENMASK = C.RTAX_GENMASK\n\tsysRTAX_IFP     = C.RTAX_IFP\n\tsysRTAX_IFA     = C.RTAX_IFA\n\tsysRTAX_AUTHOR  = C.RTAX_AUTHOR\n\tsysRTAX_BRD     = C.RTAX_BRD\n\tsysRTAX_MAX     = C.RTAX_MAX\n)\n\nconst (\n\tsizeofIfMsghdrlFreeBSD10        = C.sizeof_struct_if_msghdrl\n\tsizeofIfaMsghdrFreeBSD10        = C.sizeof_struct_ifa_msghdr\n\tsizeofIfaMsghdrlFreeBSD10       = C.sizeof_struct_ifa_msghdrl\n\tsizeofIfmaMsghdrFreeBSD10       = C.sizeof_struct_ifma_msghdr\n\tsizeofIfAnnouncemsghdrFreeBSD10 = C.sizeof_struct_if_announcemsghdr\n\n\tsizeofRtMsghdrFreeBSD10  = C.sizeof_struct_rt_msghdr\n\tsizeofRtMetricsFreeBSD10 = C.sizeof_struct_rt_metrics\n\n\tsizeofIfMsghdrFreeBSD7  = C.sizeof_struct_if_msghdr_freebsd7\n\tsizeofIfMsghdrFreeBSD8  = C.sizeof_struct_if_msghdr_freebsd8\n\tsizeofIfMsghdrFreeBSD9  = C.sizeof_struct_if_msghdr_freebsd9\n\tsizeofIfMsghdrFreeBSD10 = C.sizeof_struct_if_msghdr_freebsd10\n\tsizeofIfMsghdrFreeBSD11 = C.sizeof_struct_if_msghdr_freebsd11\n\n\tsizeofIfDataFreeBSD7  = C.sizeof_struct_if_data_freebsd7\n\tsizeofIfDataFreeBSD8  = C.sizeof_struct_if_data_freebsd8\n\tsizeofIfDataFreeBSD9  = C.sizeof_struct_if_data_freebsd9\n\tsizeofIfDataFreeBSD10 = C.sizeof_struct_if_data_freebsd10\n\tsizeofIfDataFreeBSD11 = C.sizeof_struct_if_data_freebsd11\n\n\tsizeofIfMsghdrlFreeBSD10Emu        = C.sizeof_struct_if_msghdrl\n\tsizeofIfaMsghdrFreeBSD10Emu        = C.sizeof_struct_ifa_msghdr\n\tsizeofIfaMsghdrlFreeBSD10Emu       = C.sizeof_struct_ifa_msghdrl\n\tsizeofIfmaMsghdrFreeBSD10Emu       = C.sizeof_struct_ifma_msghdr\n\tsizeofIfAnnouncemsghdrFreeBSD10Emu = C.sizeof_struct_if_announcemsghdr\n\n\tsizeofRtMsghdrFreeBSD10Emu  = C.sizeof_struct_rt_msghdr\n\tsizeofRtMetricsFreeBSD10Emu = C.sizeof_struct_rt_metrics\n\n\tsizeofIfMsghdrFreeBSD7Emu  = C.sizeof_struct_if_msghdr_freebsd7\n\tsizeofIfMsghdrFreeBSD8Emu  = C.sizeof_struct_if_msghdr_freebsd8\n\tsizeofIfMsghdrFreeBSD9Emu  = C.sizeof_struct_if_msghdr_freebsd9\n\tsizeofIfMsghdrFreeBSD10Emu = C.sizeof_struct_if_msghdr_freebsd10\n\tsizeofIfMsghdrFreeBSD11Emu = C.sizeof_struct_if_msghdr_freebsd11\n\n\tsizeofIfDataFreeBSD7Emu  = C.sizeof_struct_if_data_freebsd7\n\tsizeofIfDataFreeBSD8Emu  = C.sizeof_struct_if_data_freebsd8\n\tsizeofIfDataFreeBSD9Emu  = C.sizeof_struct_if_data_freebsd9\n\tsizeofIfDataFreeBSD10Emu = C.sizeof_struct_if_data_freebsd10\n\tsizeofIfDataFreeBSD11Emu = C.sizeof_struct_if_data_freebsd11\n\n\tsizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage\n\tsizeofSockaddrInet    = C.sizeof_struct_sockaddr_in\n\tsizeofSockaddrInet6   = C.sizeof_struct_sockaddr_in6\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/defs_netbsd.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\npackage route\n\n/*\n#include <sys/socket.h>\n#include <sys/sysctl.h>\n\n#include <net/if.h>\n#include <net/if_dl.h>\n#include <net/route.h>\n\n#include <netinet/in.h>\n*/\nimport \"C\"\n\nconst (\n\tsysAF_UNSPEC = C.AF_UNSPEC\n\tsysAF_INET   = C.AF_INET\n\tsysAF_ROUTE  = C.AF_ROUTE\n\tsysAF_LINK   = C.AF_LINK\n\tsysAF_INET6  = C.AF_INET6\n\n\tsysSOCK_RAW = C.SOCK_RAW\n\n\tsysNET_RT_DUMP   = C.NET_RT_DUMP\n\tsysNET_RT_FLAGS  = C.NET_RT_FLAGS\n\tsysNET_RT_IFLIST = C.NET_RT_IFLIST\n\tsysNET_RT_MAXID  = C.NET_RT_MAXID\n)\n\nconst (\n\tsysCTL_MAXNAME = C.CTL_MAXNAME\n\n\tsysCTL_UNSPEC   = C.CTL_UNSPEC\n\tsysCTL_KERN     = C.CTL_KERN\n\tsysCTL_VM       = C.CTL_VM\n\tsysCTL_VFS      = C.CTL_VFS\n\tsysCTL_NET      = C.CTL_NET\n\tsysCTL_DEBUG    = C.CTL_DEBUG\n\tsysCTL_HW       = C.CTL_HW\n\tsysCTL_MACHDEP  = C.CTL_MACHDEP\n\tsysCTL_USER     = C.CTL_USER\n\tsysCTL_DDB      = C.CTL_DDB\n\tsysCTL_PROC     = C.CTL_PROC\n\tsysCTL_VENDOR   = C.CTL_VENDOR\n\tsysCTL_EMUL     = C.CTL_EMUL\n\tsysCTL_SECURITY = C.CTL_SECURITY\n\tsysCTL_MAXID    = C.CTL_MAXID\n)\n\nconst (\n\tsysRTM_VERSION = C.RTM_VERSION\n\n\tsysRTM_ADD        = C.RTM_ADD\n\tsysRTM_DELETE     = C.RTM_DELETE\n\tsysRTM_CHANGE     = C.RTM_CHANGE\n\tsysRTM_GET        = C.RTM_GET\n\tsysRTM_LOSING     = C.RTM_LOSING\n\tsysRTM_REDIRECT   = C.RTM_REDIRECT\n\tsysRTM_MISS       = C.RTM_MISS\n\tsysRTM_LOCK       = C.RTM_LOCK\n\tsysRTM_OLDADD     = C.RTM_OLDADD\n\tsysRTM_OLDDEL     = C.RTM_OLDDEL\n\tsysRTM_RESOLVE    = C.RTM_RESOLVE\n\tsysRTM_NEWADDR    = C.RTM_NEWADDR\n\tsysRTM_DELADDR    = C.RTM_DELADDR\n\tsysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE\n\tsysRTM_IEEE80211  = C.RTM_IEEE80211\n\tsysRTM_SETGATE    = C.RTM_SETGATE\n\tsysRTM_LLINFO_UPD = C.RTM_LLINFO_UPD\n\tsysRTM_IFINFO     = C.RTM_IFINFO\n\tsysRTM_CHGADDR    = C.RTM_CHGADDR\n\n\tsysRTA_DST     = C.RTA_DST\n\tsysRTA_GATEWAY = C.RTA_GATEWAY\n\tsysRTA_NETMASK = C.RTA_NETMASK\n\tsysRTA_GENMASK = C.RTA_GENMASK\n\tsysRTA_IFP     = C.RTA_IFP\n\tsysRTA_IFA     = C.RTA_IFA\n\tsysRTA_AUTHOR  = C.RTA_AUTHOR\n\tsysRTA_BRD     = C.RTA_BRD\n\tsysRTA_TAG     = C.RTA_TAG\n\n\tsysRTAX_DST     = C.RTAX_DST\n\tsysRTAX_GATEWAY = C.RTAX_GATEWAY\n\tsysRTAX_NETMASK = C.RTAX_NETMASK\n\tsysRTAX_GENMASK = C.RTAX_GENMASK\n\tsysRTAX_IFP     = C.RTAX_IFP\n\tsysRTAX_IFA     = C.RTAX_IFA\n\tsysRTAX_AUTHOR  = C.RTAX_AUTHOR\n\tsysRTAX_BRD     = C.RTAX_BRD\n\tsysRTAX_TAG     = C.RTAX_TAG\n\tsysRTAX_MAX     = C.RTAX_MAX\n)\n\nconst (\n\tsizeofIfMsghdrNetBSD7         = C.sizeof_struct_if_msghdr\n\tsizeofIfaMsghdrNetBSD7        = C.sizeof_struct_ifa_msghdr\n\tsizeofIfAnnouncemsghdrNetBSD7 = C.sizeof_struct_if_announcemsghdr\n\n\tsizeofRtMsghdrNetBSD7  = C.sizeof_struct_rt_msghdr\n\tsizeofRtMetricsNetBSD7 = C.sizeof_struct_rt_metrics\n\n\tsizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage\n\tsizeofSockaddrInet    = C.sizeof_struct_sockaddr_in\n\tsizeofSockaddrInet6   = C.sizeof_struct_sockaddr_in6\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/defs_openbsd.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\npackage route\n\n/*\n#include <sys/socket.h>\n#include <sys/sysctl.h>\n\n#include <net/if.h>\n#include <net/if_dl.h>\n#include <net/route.h>\n\n#include <netinet/in.h>\n*/\nimport \"C\"\n\nconst (\n\tsysAF_UNSPEC = C.AF_UNSPEC\n\tsysAF_INET   = C.AF_INET\n\tsysAF_ROUTE  = C.AF_ROUTE\n\tsysAF_LINK   = C.AF_LINK\n\tsysAF_INET6  = C.AF_INET6\n\n\tsysSOCK_RAW = C.SOCK_RAW\n\n\tsysNET_RT_DUMP    = C.NET_RT_DUMP\n\tsysNET_RT_FLAGS   = C.NET_RT_FLAGS\n\tsysNET_RT_IFLIST  = C.NET_RT_IFLIST\n\tsysNET_RT_STATS   = C.NET_RT_STATS\n\tsysNET_RT_TABLE   = C.NET_RT_TABLE\n\tsysNET_RT_IFNAMES = C.NET_RT_IFNAMES\n\tsysNET_RT_MAXID   = C.NET_RT_MAXID\n)\n\nconst (\n\tsysCTL_MAXNAME = C.CTL_MAXNAME\n\n\tsysCTL_UNSPEC  = C.CTL_UNSPEC\n\tsysCTL_KERN    = C.CTL_KERN\n\tsysCTL_VM      = C.CTL_VM\n\tsysCTL_FS      = C.CTL_FS\n\tsysCTL_NET     = C.CTL_NET\n\tsysCTL_DEBUG   = C.CTL_DEBUG\n\tsysCTL_HW      = C.CTL_HW\n\tsysCTL_MACHDEP = C.CTL_MACHDEP\n\tsysCTL_DDB     = C.CTL_DDB\n\tsysCTL_VFS     = C.CTL_VFS\n\tsysCTL_MAXID   = C.CTL_MAXID\n)\n\nconst (\n\tsysRTM_VERSION = C.RTM_VERSION\n\n\tsysRTM_ADD        = C.RTM_ADD\n\tsysRTM_DELETE     = C.RTM_DELETE\n\tsysRTM_CHANGE     = C.RTM_CHANGE\n\tsysRTM_GET        = C.RTM_GET\n\tsysRTM_LOSING     = C.RTM_LOSING\n\tsysRTM_REDIRECT   = C.RTM_REDIRECT\n\tsysRTM_MISS       = C.RTM_MISS\n\tsysRTM_LOCK       = C.RTM_LOCK\n\tsysRTM_RESOLVE    = C.RTM_RESOLVE\n\tsysRTM_NEWADDR    = C.RTM_NEWADDR\n\tsysRTM_DELADDR    = C.RTM_DELADDR\n\tsysRTM_IFINFO     = C.RTM_IFINFO\n\tsysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE\n\tsysRTM_DESYNC     = C.RTM_DESYNC\n\tsysRTM_INVALIDATE = C.RTM_INVALIDATE\n\tsysRTM_BFD        = C.RTM_BFD\n\tsysRTM_PROPOSAL   = C.RTM_PROPOSAL\n\n\tsysRTA_DST     = C.RTA_DST\n\tsysRTA_GATEWAY = C.RTA_GATEWAY\n\tsysRTA_NETMASK = C.RTA_NETMASK\n\tsysRTA_GENMASK = C.RTA_GENMASK\n\tsysRTA_IFP     = C.RTA_IFP\n\tsysRTA_IFA     = C.RTA_IFA\n\tsysRTA_AUTHOR  = C.RTA_AUTHOR\n\tsysRTA_BRD     = C.RTA_BRD\n\tsysRTA_SRC     = C.RTA_SRC\n\tsysRTA_SRCMASK = C.RTA_SRCMASK\n\tsysRTA_LABEL   = C.RTA_LABEL\n\tsysRTA_BFD     = C.RTA_BFD\n\tsysRTA_DNS     = C.RTA_DNS\n\tsysRTA_STATIC  = C.RTA_STATIC\n\tsysRTA_SEARCH  = C.RTA_SEARCH\n\n\tsysRTAX_DST     = C.RTAX_DST\n\tsysRTAX_GATEWAY = C.RTAX_GATEWAY\n\tsysRTAX_NETMASK = C.RTAX_NETMASK\n\tsysRTAX_GENMASK = C.RTAX_GENMASK\n\tsysRTAX_IFP     = C.RTAX_IFP\n\tsysRTAX_IFA     = C.RTAX_IFA\n\tsysRTAX_AUTHOR  = C.RTAX_AUTHOR\n\tsysRTAX_BRD     = C.RTAX_BRD\n\tsysRTAX_SRC     = C.RTAX_SRC\n\tsysRTAX_SRCMASK = C.RTAX_SRCMASK\n\tsysRTAX_LABEL   = C.RTAX_LABEL\n\tsysRTAX_BFD     = C.RTAX_BFD\n\tsysRTAX_DNS     = C.RTAX_DNS\n\tsysRTAX_STATIC  = C.RTAX_STATIC\n\tsysRTAX_SEARCH  = C.RTAX_SEARCH\n\tsysRTAX_MAX     = C.RTAX_MAX\n)\n\nconst (\n\tsizeofRtMsghdr = C.sizeof_struct_rt_msghdr\n\n\tsizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage\n\tsizeofSockaddrInet    = C.sizeof_struct_sockaddr_in\n\tsizeofSockaddrInet6   = C.sizeof_struct_sockaddr_in6\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/interface.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd netbsd openbsd\n\npackage route\n\n// An InterfaceMessage represents an interface message.\ntype InterfaceMessage struct {\n\tVersion int    // message version\n\tType    int    // message type\n\tFlags   int    // interface flags\n\tIndex   int    // interface index\n\tName    string // interface name\n\tAddrs   []Addr // addresses\n\n\textOff int    // offset of header extension\n\traw    []byte // raw message\n}\n\n// An InterfaceAddrMessage represents an interface address message.\ntype InterfaceAddrMessage struct {\n\tVersion int    // message version\n\tType    int    // message type\n\tFlags   int    // interface flags\n\tIndex   int    // interface index\n\tAddrs   []Addr // addresses\n\n\traw []byte // raw message\n}\n\n// Sys implements the Sys method of Message interface.\nfunc (m *InterfaceAddrMessage) Sys() []Sys { return nil }\n\n// An InterfaceMulticastAddrMessage represents an interface multicast\n// address message.\ntype InterfaceMulticastAddrMessage struct {\n\tVersion int    // message version\n\tType    int    // messsage type\n\tFlags   int    // interface flags\n\tIndex   int    // interface index\n\tAddrs   []Addr // addresses\n\n\traw []byte // raw message\n}\n\n// Sys implements the Sys method of Message interface.\nfunc (m *InterfaceMulticastAddrMessage) Sys() []Sys { return nil }\n\n// An InterfaceAnnounceMessage represents an interface announcement\n// message.\ntype InterfaceAnnounceMessage struct {\n\tVersion int    // message version\n\tType    int    // message type\n\tIndex   int    // interface index\n\tName    string // interface name\n\tWhat    int    // what type of announcement\n\n\traw []byte // raw message\n}\n\n// Sys implements the Sys method of Message interface.\nfunc (m *InterfaceAnnounceMessage) Sys() []Sys { return nil }\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/interface_announce.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build dragonfly freebsd netbsd\n\npackage route\n\nfunc (w *wireFormat) parseInterfaceAnnounceMessage(_ RIBType, b []byte) (Message, error) {\n\tif len(b) < w.bodyOff {\n\t\treturn nil, errMessageTooShort\n\t}\n\tl := int(nativeEndian.Uint16(b[:2]))\n\tif len(b) < l {\n\t\treturn nil, errInvalidMessage\n\t}\n\tm := &InterfaceAnnounceMessage{\n\t\tVersion: int(b[2]),\n\t\tType:    int(b[3]),\n\t\tIndex:   int(nativeEndian.Uint16(b[4:6])),\n\t\tWhat:    int(nativeEndian.Uint16(b[22:24])),\n\t\traw:     b[:l],\n\t}\n\tfor i := 0; i < 16; i++ {\n\t\tif b[6+i] != 0 {\n\t\t\tcontinue\n\t\t}\n\t\tm.Name = string(b[6 : 6+i])\n\t\tbreak\n\t}\n\treturn m, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/interface_classic.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly netbsd\n\npackage route\n\nimport \"runtime\"\n\nfunc (w *wireFormat) parseInterfaceMessage(_ RIBType, b []byte) (Message, error) {\n\tif len(b) < w.bodyOff {\n\t\treturn nil, errMessageTooShort\n\t}\n\tl := int(nativeEndian.Uint16(b[:2]))\n\tif len(b) < l {\n\t\treturn nil, errInvalidMessage\n\t}\n\tattrs := uint(nativeEndian.Uint32(b[4:8]))\n\tif attrs&sysRTA_IFP == 0 {\n\t\treturn nil, nil\n\t}\n\tm := &InterfaceMessage{\n\t\tVersion: int(b[2]),\n\t\tType:    int(b[3]),\n\t\tAddrs:   make([]Addr, sysRTAX_MAX),\n\t\tFlags:   int(nativeEndian.Uint32(b[8:12])),\n\t\tIndex:   int(nativeEndian.Uint16(b[12:14])),\n\t\textOff:  w.extOff,\n\t\traw:     b[:l],\n\t}\n\ta, err := parseLinkAddr(b[w.bodyOff:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm.Addrs[sysRTAX_IFP] = a\n\tm.Name = a.(*LinkAddr).Name\n\treturn m, nil\n}\n\nfunc (w *wireFormat) parseInterfaceAddrMessage(_ RIBType, b []byte) (Message, error) {\n\tif len(b) < w.bodyOff {\n\t\treturn nil, errMessageTooShort\n\t}\n\tl := int(nativeEndian.Uint16(b[:2]))\n\tif len(b) < l {\n\t\treturn nil, errInvalidMessage\n\t}\n\tm := &InterfaceAddrMessage{\n\t\tVersion: int(b[2]),\n\t\tType:    int(b[3]),\n\t\tFlags:   int(nativeEndian.Uint32(b[8:12])),\n\t\traw:     b[:l],\n\t}\n\tif runtime.GOOS == \"netbsd\" {\n\t\tm.Index = int(nativeEndian.Uint16(b[16:18]))\n\t} else {\n\t\tm.Index = int(nativeEndian.Uint16(b[12:14]))\n\t}\n\tvar err error\n\tm.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[w.bodyOff:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/interface_freebsd.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage route\n\nfunc (w *wireFormat) parseInterfaceMessage(typ RIBType, b []byte) (Message, error) {\n\tvar extOff, bodyOff int\n\tif typ == sysNET_RT_IFLISTL {\n\t\tif len(b) < 20 {\n\t\t\treturn nil, errMessageTooShort\n\t\t}\n\t\textOff = int(nativeEndian.Uint16(b[18:20]))\n\t\tbodyOff = int(nativeEndian.Uint16(b[16:18]))\n\t} else {\n\t\textOff = w.extOff\n\t\tbodyOff = w.bodyOff\n\t}\n\tif len(b) < extOff || len(b) < bodyOff {\n\t\treturn nil, errInvalidMessage\n\t}\n\tl := int(nativeEndian.Uint16(b[:2]))\n\tif len(b) < l {\n\t\treturn nil, errInvalidMessage\n\t}\n\tattrs := uint(nativeEndian.Uint32(b[4:8]))\n\tif attrs&sysRTA_IFP == 0 {\n\t\treturn nil, nil\n\t}\n\tm := &InterfaceMessage{\n\t\tVersion: int(b[2]),\n\t\tType:    int(b[3]),\n\t\tFlags:   int(nativeEndian.Uint32(b[8:12])),\n\t\tIndex:   int(nativeEndian.Uint16(b[12:14])),\n\t\tAddrs:   make([]Addr, sysRTAX_MAX),\n\t\textOff:  extOff,\n\t\traw:     b[:l],\n\t}\n\ta, err := parseLinkAddr(b[bodyOff:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm.Addrs[sysRTAX_IFP] = a\n\tm.Name = a.(*LinkAddr).Name\n\treturn m, nil\n}\n\nfunc (w *wireFormat) parseInterfaceAddrMessage(typ RIBType, b []byte) (Message, error) {\n\tvar bodyOff int\n\tif typ == sysNET_RT_IFLISTL {\n\t\tif len(b) < 24 {\n\t\t\treturn nil, errMessageTooShort\n\t\t}\n\t\tbodyOff = int(nativeEndian.Uint16(b[16:18]))\n\t} else {\n\t\tbodyOff = w.bodyOff\n\t}\n\tif len(b) < bodyOff {\n\t\treturn nil, errInvalidMessage\n\t}\n\tl := int(nativeEndian.Uint16(b[:2]))\n\tif len(b) < l {\n\t\treturn nil, errInvalidMessage\n\t}\n\tm := &InterfaceAddrMessage{\n\t\tVersion: int(b[2]),\n\t\tType:    int(b[3]),\n\t\tFlags:   int(nativeEndian.Uint32(b[8:12])),\n\t\tIndex:   int(nativeEndian.Uint16(b[12:14])),\n\t\traw:     b[:l],\n\t}\n\tvar err error\n\tm.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[bodyOff:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/interface_multicast.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd\n\npackage route\n\nfunc (w *wireFormat) parseInterfaceMulticastAddrMessage(_ RIBType, b []byte) (Message, error) {\n\tif len(b) < w.bodyOff {\n\t\treturn nil, errMessageTooShort\n\t}\n\tl := int(nativeEndian.Uint16(b[:2]))\n\tif len(b) < l {\n\t\treturn nil, errInvalidMessage\n\t}\n\tm := &InterfaceMulticastAddrMessage{\n\t\tVersion: int(b[2]),\n\t\tType:    int(b[3]),\n\t\tFlags:   int(nativeEndian.Uint32(b[8:12])),\n\t\tIndex:   int(nativeEndian.Uint16(b[12:14])),\n\t\traw:     b[:l],\n\t}\n\tvar err error\n\tm.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[w.bodyOff:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/interface_openbsd.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage route\n\nfunc (*wireFormat) parseInterfaceMessage(_ RIBType, b []byte) (Message, error) {\n\tif len(b) < 32 {\n\t\treturn nil, errMessageTooShort\n\t}\n\tl := int(nativeEndian.Uint16(b[:2]))\n\tif len(b) < l {\n\t\treturn nil, errInvalidMessage\n\t}\n\tattrs := uint(nativeEndian.Uint32(b[12:16]))\n\tif attrs&sysRTA_IFP == 0 {\n\t\treturn nil, nil\n\t}\n\tm := &InterfaceMessage{\n\t\tVersion: int(b[2]),\n\t\tType:    int(b[3]),\n\t\tFlags:   int(nativeEndian.Uint32(b[16:20])),\n\t\tIndex:   int(nativeEndian.Uint16(b[6:8])),\n\t\tAddrs:   make([]Addr, sysRTAX_MAX),\n\t\traw:     b[:l],\n\t}\n\tll := int(nativeEndian.Uint16(b[4:6]))\n\tif len(b) < ll {\n\t\treturn nil, errInvalidMessage\n\t}\n\ta, err := parseLinkAddr(b[ll:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm.Addrs[sysRTAX_IFP] = a\n\tm.Name = a.(*LinkAddr).Name\n\treturn m, nil\n}\n\nfunc (*wireFormat) parseInterfaceAddrMessage(_ RIBType, b []byte) (Message, error) {\n\tif len(b) < 24 {\n\t\treturn nil, errMessageTooShort\n\t}\n\tl := int(nativeEndian.Uint16(b[:2]))\n\tif len(b) < l {\n\t\treturn nil, errInvalidMessage\n\t}\n\tbodyOff := int(nativeEndian.Uint16(b[4:6]))\n\tif len(b) < bodyOff {\n\t\treturn nil, errInvalidMessage\n\t}\n\tm := &InterfaceAddrMessage{\n\t\tVersion: int(b[2]),\n\t\tType:    int(b[3]),\n\t\tFlags:   int(nativeEndian.Uint32(b[12:16])),\n\t\tIndex:   int(nativeEndian.Uint16(b[6:8])),\n\t\traw:     b[:l],\n\t}\n\tvar err error\n\tm.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[bodyOff:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc (*wireFormat) parseInterfaceAnnounceMessage(_ RIBType, b []byte) (Message, error) {\n\tif len(b) < 26 {\n\t\treturn nil, errMessageTooShort\n\t}\n\tl := int(nativeEndian.Uint16(b[:2]))\n\tif len(b) < l {\n\t\treturn nil, errInvalidMessage\n\t}\n\tm := &InterfaceAnnounceMessage{\n\t\tVersion: int(b[2]),\n\t\tType:    int(b[3]),\n\t\tIndex:   int(nativeEndian.Uint16(b[6:8])),\n\t\tWhat:    int(nativeEndian.Uint16(b[8:10])),\n\t\traw:     b[:l],\n\t}\n\tfor i := 0; i < 16; i++ {\n\t\tif b[10+i] != 0 {\n\t\t\tcontinue\n\t\t}\n\t\tm.Name = string(b[10 : 10+i])\n\t\tbreak\n\t}\n\treturn m, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/message.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd netbsd openbsd\n\npackage route\n\n// A Message represents a routing message.\ntype Message interface {\n\t// Sys returns operating system-specific information.\n\tSys() []Sys\n}\n\n// A Sys reprensents operating system-specific information.\ntype Sys interface {\n\t// SysType returns a type of operating system-specific\n\t// information.\n\tSysType() SysType\n}\n\n// A SysType represents a type of operating system-specific\n// information.\ntype SysType int\n\nconst (\n\tSysMetrics SysType = iota\n\tSysStats\n)\n\n// ParseRIB parses b as a routing information base and returns a list\n// of routing messages.\nfunc ParseRIB(typ RIBType, b []byte) ([]Message, error) {\n\tif !typ.parseable() {\n\t\treturn nil, errUnsupportedMessage\n\t}\n\tvar msgs []Message\n\tnmsgs, nskips := 0, 0\n\tfor len(b) > 4 {\n\t\tnmsgs++\n\t\tl := int(nativeEndian.Uint16(b[:2]))\n\t\tif l == 0 {\n\t\t\treturn nil, errInvalidMessage\n\t\t}\n\t\tif len(b) < l {\n\t\t\treturn nil, errMessageTooShort\n\t\t}\n\t\tif b[2] != sysRTM_VERSION {\n\t\t\tb = b[l:]\n\t\t\tcontinue\n\t\t}\n\t\tif w, ok := wireFormats[int(b[3])]; !ok {\n\t\t\tnskips++\n\t\t} else {\n\t\t\tm, err := w.parse(typ, b)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif m == nil {\n\t\t\t\tnskips++\n\t\t\t} else {\n\t\t\t\tmsgs = append(msgs, m)\n\t\t\t}\n\t\t}\n\t\tb = b[l:]\n\t}\n\t// We failed to parse any of the messages - version mismatch?\n\tif nmsgs != len(msgs)+nskips {\n\t\treturn nil, errMessageMismatch\n\t}\n\treturn msgs, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/message_darwin_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage route\n\nimport \"testing\"\n\nfunc TestFetchAndParseRIBOnDarwin(t *testing.T) {\n\tfor _, typ := range []RIBType{sysNET_RT_FLAGS, sysNET_RT_DUMP2, sysNET_RT_IFLIST2} {\n\t\tvar lastErr error\n\t\tvar ms []Message\n\t\tfor _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} {\n\t\t\trs, err := fetchAndParseRIB(af, typ)\n\t\t\tif err != nil {\n\t\t\t\tlastErr = err\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tms = append(ms, rs...)\n\t\t}\n\t\tif len(ms) == 0 && lastErr != nil {\n\t\t\tt.Error(typ, lastErr)\n\t\t\tcontinue\n\t\t}\n\t\tss, err := msgs(ms).validate()\n\t\tif err != nil {\n\t\t\tt.Error(typ, err)\n\t\t\tcontinue\n\t\t}\n\t\tfor _, s := range ss {\n\t\t\tt.Log(s)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/message_freebsd_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage route\n\nimport (\n\t\"testing\"\n\t\"unsafe\"\n)\n\nfunc TestFetchAndParseRIBOnFreeBSD(t *testing.T) {\n\tfor _, typ := range []RIBType{sysNET_RT_IFMALIST} {\n\t\tvar lastErr error\n\t\tvar ms []Message\n\t\tfor _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} {\n\t\t\trs, err := fetchAndParseRIB(af, typ)\n\t\t\tif err != nil {\n\t\t\t\tlastErr = err\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tms = append(ms, rs...)\n\t\t}\n\t\tif len(ms) == 0 && lastErr != nil {\n\t\t\tt.Error(typ, lastErr)\n\t\t\tcontinue\n\t\t}\n\t\tss, err := msgs(ms).validate()\n\t\tif err != nil {\n\t\t\tt.Error(typ, err)\n\t\t\tcontinue\n\t\t}\n\t\tfor _, s := range ss {\n\t\t\tt.Log(s)\n\t\t}\n\t}\n}\n\nfunc TestFetchAndParseRIBOnFreeBSD10AndAbove(t *testing.T) {\n\tif _, err := FetchRIB(sysAF_UNSPEC, sysNET_RT_IFLISTL, 0); err != nil {\n\t\tt.Skip(\"NET_RT_IFLISTL not supported\")\n\t}\n\tvar p uintptr\n\tif kernelAlign != int(unsafe.Sizeof(p)) {\n\t\tt.Skip(\"NET_RT_IFLIST vs. NET_RT_IFLISTL doesn't work for 386 emulation on amd64\")\n\t}\n\n\tvar tests = [2]struct {\n\t\ttyp  RIBType\n\t\tb    []byte\n\t\tmsgs []Message\n\t\tss   []string\n\t}{\n\t\t{typ: sysNET_RT_IFLIST},\n\t\t{typ: sysNET_RT_IFLISTL},\n\t}\n\tfor i := range tests {\n\t\tvar lastErr error\n\t\tfor _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} {\n\t\t\trs, err := fetchAndParseRIB(af, tests[i].typ)\n\t\t\tif err != nil {\n\t\t\t\tlastErr = err\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttests[i].msgs = append(tests[i].msgs, rs...)\n\t\t}\n\t\tif len(tests[i].msgs) == 0 && lastErr != nil {\n\t\t\tt.Error(tests[i].typ, lastErr)\n\t\t\tcontinue\n\t\t}\n\t\ttests[i].ss, lastErr = msgs(tests[i].msgs).validate()\n\t\tif lastErr != nil {\n\t\t\tt.Error(tests[i].typ, lastErr)\n\t\t\tcontinue\n\t\t}\n\t\tfor _, s := range tests[i].ss {\n\t\t\tt.Log(s)\n\t\t}\n\t}\n\tfor i := len(tests) - 1; i > 0; i-- {\n\t\tif len(tests[i].ss) != len(tests[i-1].ss) {\n\t\t\tt.Errorf(\"got %v; want %v\", tests[i].ss, tests[i-1].ss)\n\t\t\tcontinue\n\t\t}\n\t\tfor j, s1 := range tests[i].ss {\n\t\t\ts0 := tests[i-1].ss[j]\n\t\t\tif s1 != s0 {\n\t\t\t\tt.Errorf(\"got %s; want %s\", s1, s0)\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/message_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd netbsd openbsd\n\npackage route\n\nimport (\n\t\"os\"\n\t\"syscall\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestFetchAndParseRIB(t *testing.T) {\n\tfor _, typ := range []RIBType{sysNET_RT_DUMP, sysNET_RT_IFLIST} {\n\t\tvar lastErr error\n\t\tvar ms []Message\n\t\tfor _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} {\n\t\t\trs, err := fetchAndParseRIB(af, typ)\n\t\t\tif err != nil {\n\t\t\t\tlastErr = err\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tms = append(ms, rs...)\n\t\t}\n\t\tif len(ms) == 0 && lastErr != nil {\n\t\t\tt.Error(typ, lastErr)\n\t\t\tcontinue\n\t\t}\n\t\tss, err := msgs(ms).validate()\n\t\tif err != nil {\n\t\t\tt.Error(typ, err)\n\t\t\tcontinue\n\t\t}\n\t\tfor _, s := range ss {\n\t\t\tt.Log(typ, s)\n\t\t}\n\t}\n}\n\nvar (\n\trtmonSock int\n\trtmonErr  error\n)\n\nfunc init() {\n\t// We need to keep rtmonSock alive to avoid treading on\n\t// recycled socket descriptors.\n\trtmonSock, rtmonErr = syscall.Socket(sysAF_ROUTE, sysSOCK_RAW, sysAF_UNSPEC)\n}\n\n// TestMonitorAndParseRIB leaks a worker goroutine and a socket\n// descriptor but that's intentional.\nfunc TestMonitorAndParseRIB(t *testing.T) {\n\tif testing.Short() || os.Getuid() != 0 {\n\t\tt.Skip(\"must be root\")\n\t}\n\n\tif rtmonErr != nil {\n\t\tt.Fatal(rtmonErr)\n\t}\n\n\t// We suppose that using an IPv4 link-local address and the\n\t// dot1Q ID for Token Ring and FDDI doesn't harm anyone.\n\tpv := &propVirtual{addr: \"169.254.0.1\", mask: \"255.255.255.0\"}\n\tif err := pv.configure(1002); err != nil {\n\t\tt.Skip(err)\n\t}\n\tif err := pv.setup(); err != nil {\n\t\tt.Skip(err)\n\t}\n\tpv.teardown()\n\n\tgo func() {\n\t\tb := make([]byte, os.Getpagesize())\n\t\tfor {\n\t\t\t// There's no easy way to unblock this read\n\t\t\t// call because the routing message exchange\n\t\t\t// over routing socket is a connectionless\n\t\t\t// message-oriented protocol, no control plane\n\t\t\t// for signaling connectivity, and we cannot\n\t\t\t// use the net package of standard library due\n\t\t\t// to the lack of support for routing socket\n\t\t\t// and circular dependency.\n\t\t\tn, err := syscall.Read(rtmonSock, b)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tms, err := ParseRIB(0, b[:n])\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tss, err := msgs(ms).validate()\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, s := range ss {\n\t\t\t\tt.Log(s)\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor _, vid := range []int{1002, 1003, 1004, 1005} {\n\t\tpv := &propVirtual{addr: \"169.254.0.1\", mask: \"255.255.255.0\"}\n\t\tif err := pv.configure(vid); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := pv.setup(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\ttime.Sleep(200 * time.Millisecond)\n\t\tif err := pv.teardown(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\ttime.Sleep(200 * time.Millisecond)\n\t}\n}\n\nfunc TestParseRIBWithFuzz(t *testing.T) {\n\tfor _, fuzz := range []string{\n\t\t\"0\\x00\\x05\\x050000000000000000\" +\n\t\t\t\"00000000000000000000\" +\n\t\t\t\"00000000000000000000\" +\n\t\t\t\"00000000000000000000\" +\n\t\t\t\"0000000000000\\x02000000\" +\n\t\t\t\"00000000\",\n\t\t\"\\x02\\x00\\x05\\f0000000000000000\" +\n\t\t\t\"0\\x0200000000000000\",\n\t\t\"\\x02\\x00\\x05\\x100000000000000\\x1200\" +\n\t\t\t\"0\\x00\\xff\\x00\",\n\t\t\"\\x02\\x00\\x05\\f0000000000000000\" +\n\t\t\t\"0\\x12000\\x00\\x02\\x0000\",\n\t\t\"\\x00\\x00\\x00\\x01\\x00\",\n\t\t\"00000\",\n\t} {\n\t\tfor typ := RIBType(0); typ < 256; typ++ {\n\t\t\tParseRIB(typ, []byte(fuzz))\n\t\t}\n\t}\n}\n\nfunc TestRouteMessage(t *testing.T) {\n\ts, err := syscall.Socket(sysAF_ROUTE, sysSOCK_RAW, sysAF_UNSPEC)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer syscall.Close(s)\n\n\tvar ms []RouteMessage\n\tfor _, af := range []int{sysAF_INET, sysAF_INET6} {\n\t\tif _, err := fetchAndParseRIB(af, sysNET_RT_DUMP); err != nil {\n\t\t\tt.Log(err)\n\t\t\tcontinue\n\t\t}\n\t\tswitch af {\n\t\tcase sysAF_INET:\n\t\t\tms = append(ms, []RouteMessage{\n\t\t\t\t{\n\t\t\t\t\tType: sysRTM_GET,\n\t\t\t\t\tAddrs: []Addr{\n\t\t\t\t\t\t&Inet4Addr{IP: [4]byte{127, 0, 0, 1}},\n\t\t\t\t\t\tnil,\n\t\t\t\t\t\tnil,\n\t\t\t\t\t\tnil,\n\t\t\t\t\t\t&LinkAddr{},\n\t\t\t\t\t\t&Inet4Addr{},\n\t\t\t\t\t\tnil,\n\t\t\t\t\t\t&Inet4Addr{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: sysRTM_GET,\n\t\t\t\t\tAddrs: []Addr{\n\t\t\t\t\t\t&Inet4Addr{IP: [4]byte{127, 0, 0, 1}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}...)\n\t\tcase sysAF_INET6:\n\t\t\tms = append(ms, []RouteMessage{\n\t\t\t\t{\n\t\t\t\t\tType: sysRTM_GET,\n\t\t\t\t\tAddrs: []Addr{\n\t\t\t\t\t\t&Inet6Addr{IP: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}},\n\t\t\t\t\t\tnil,\n\t\t\t\t\t\tnil,\n\t\t\t\t\t\tnil,\n\t\t\t\t\t\t&LinkAddr{},\n\t\t\t\t\t\t&Inet6Addr{},\n\t\t\t\t\t\tnil,\n\t\t\t\t\t\t&Inet6Addr{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: sysRTM_GET,\n\t\t\t\t\tAddrs: []Addr{\n\t\t\t\t\t\t&Inet6Addr{IP: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}...)\n\t\t}\n\t}\n\tfor i, m := range ms {\n\t\tm.ID = uintptr(os.Getpid())\n\t\tm.Seq = i + 1\n\t\twb, err := m.Marshal()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%v: %v\", m, err)\n\t\t}\n\t\tif _, err := syscall.Write(s, wb); err != nil {\n\t\t\tt.Fatalf(\"%v: %v\", m, err)\n\t\t}\n\t\trb := make([]byte, os.Getpagesize())\n\t\tn, err := syscall.Read(s, rb)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%v: %v\", m, err)\n\t\t}\n\t\trms, err := ParseRIB(0, rb[:n])\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%v: %v\", m, err)\n\t\t}\n\t\tfor _, rm := range rms {\n\t\t\terr := rm.(*RouteMessage).Err\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"%v: %v\", m, err)\n\t\t\t}\n\t\t}\n\t\tss, err := msgs(rms).validate()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%v: %v\", m, err)\n\t\t}\n\t\tfor _, s := range ss {\n\t\t\tt.Log(s)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/route.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd netbsd openbsd\n\n// Package route provides basic functions for the manipulation of\n// packet routing facilities on BSD variants.\n//\n// The package supports any version of Darwin, any version of\n// DragonFly BSD, FreeBSD 7 through 11, NetBSD 6 and above, and\n// OpenBSD 5.6 and above.\npackage route\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"syscall\"\n)\n\nvar (\n\terrUnsupportedMessage = errors.New(\"unsupported message\")\n\terrMessageMismatch    = errors.New(\"message mismatch\")\n\terrMessageTooShort    = errors.New(\"message too short\")\n\terrInvalidMessage     = errors.New(\"invalid message\")\n\terrInvalidAddr        = errors.New(\"invalid address\")\n\terrShortBuffer        = errors.New(\"short buffer\")\n)\n\n// A RouteMessage represents a message conveying an address prefix, a\n// nexthop address and an output interface.\n//\n// Unlike other messages, this message can be used to query adjacency\n// information for the given address prefix, to add a new route, and\n// to delete or modify the existing route from the routing information\n// base inside the kernel by writing and reading route messages on a\n// routing socket.\n//\n// For the manipulation of routing information, the route message must\n// contain appropriate fields that include:\n//\n//\tVersion       = <must be specified>\n//\tType          = <must be specified>\n//\tFlags         = <must be specified>\n//\tIndex         = <must be specified if necessary>\n//\tID            = <must be specified>\n//\tSeq           = <must be specified>\n//\tAddrs         = <must be specified>\n//\n// The Type field specifies a type of manipulation, the Flags field\n// specifies a class of target information and the Addrs field\n// specifies target information like the following:\n//\n//\troute.RouteMessage{\n//\t\tVersion: RTM_VERSION,\n//\t\tType: RTM_GET,\n//\t\tFlags: RTF_UP | RTF_HOST,\n//\t\tID: uintptr(os.Getpid()),\n//\t\tSeq: 1,\n//\t\tAddrs: []route.Addrs{\n//\t\t\tRTAX_DST: &route.Inet4Addr{ ... },\n//\t\t\tRTAX_IFP: &route.LinkAddr{ ... },\n//\t\t\tRTAX_BRD: &route.Inet4Addr{ ... },\n//\t\t},\n//\t}\n//\n// The values for the above fields depend on the implementation of\n// each operating system.\n//\n// The Err field on a response message contains an error value on the\n// requested operation. If non-nil, the requested operation is failed.\ntype RouteMessage struct {\n\tVersion int     // message version\n\tType    int     // message type\n\tFlags   int     // route flags\n\tIndex   int     // interface index when atatched\n\tID      uintptr // sender's identifier; usually process ID\n\tSeq     int     // sequence number\n\tErr     error   // error on requested operation\n\tAddrs   []Addr  // addresses\n\n\textOff int    // offset of header extension\n\traw    []byte // raw message\n}\n\n// Marshal returns the binary encoding of m.\nfunc (m *RouteMessage) Marshal() ([]byte, error) {\n\treturn m.marshal()\n}\n\n// A RIBType reprensents a type of routing information base.\ntype RIBType int\n\nconst (\n\tRIBTypeRoute     RIBType = syscall.NET_RT_DUMP\n\tRIBTypeInterface RIBType = syscall.NET_RT_IFLIST\n)\n\n// FetchRIB fetches a routing information base from the operating\n// system.\n//\n// The provided af must be an address family.\n//\n// The provided arg must be a RIBType-specific argument.\n// When RIBType is related to routes, arg might be a set of route\n// flags. When RIBType is related to network interfaces, arg might be\n// an interface index or a set of interface flags. In most cases, zero\n// means a wildcard.\nfunc FetchRIB(af int, typ RIBType, arg int) ([]byte, error) {\n\tmib := [6]int32{sysCTL_NET, sysAF_ROUTE, 0, int32(af), int32(typ), int32(arg)}\n\tn := uintptr(0)\n\tif err := sysctl(mib[:], nil, &n, nil, 0); err != nil {\n\t\treturn nil, os.NewSyscallError(\"sysctl\", err)\n\t}\n\tif n == 0 {\n\t\treturn nil, nil\n\t}\n\tb := make([]byte, n)\n\tif err := sysctl(mib[:], &b[0], &n, nil, 0); err != nil {\n\t\treturn nil, os.NewSyscallError(\"sysctl\", err)\n\t}\n\treturn b[:n], nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/route_classic.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd netbsd\n\npackage route\n\nimport \"syscall\"\n\nfunc (m *RouteMessage) marshal() ([]byte, error) {\n\tw, ok := wireFormats[m.Type]\n\tif !ok {\n\t\treturn nil, errUnsupportedMessage\n\t}\n\tl := w.bodyOff + addrsSpace(m.Addrs)\n\tb := make([]byte, l)\n\tnativeEndian.PutUint16(b[:2], uint16(l))\n\tif m.Version == 0 {\n\t\tb[2] = sysRTM_VERSION\n\t} else {\n\t\tb[2] = byte(m.Version)\n\t}\n\tb[3] = byte(m.Type)\n\tnativeEndian.PutUint32(b[8:12], uint32(m.Flags))\n\tnativeEndian.PutUint16(b[4:6], uint16(m.Index))\n\tnativeEndian.PutUint32(b[16:20], uint32(m.ID))\n\tnativeEndian.PutUint32(b[20:24], uint32(m.Seq))\n\tattrs, err := marshalAddrs(b[w.bodyOff:], m.Addrs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif attrs > 0 {\n\t\tnativeEndian.PutUint32(b[12:16], uint32(attrs))\n\t}\n\treturn b, nil\n}\n\nfunc (w *wireFormat) parseRouteMessage(typ RIBType, b []byte) (Message, error) {\n\tif len(b) < w.bodyOff {\n\t\treturn nil, errMessageTooShort\n\t}\n\tl := int(nativeEndian.Uint16(b[:2]))\n\tif len(b) < l {\n\t\treturn nil, errInvalidMessage\n\t}\n\tm := &RouteMessage{\n\t\tVersion: int(b[2]),\n\t\tType:    int(b[3]),\n\t\tFlags:   int(nativeEndian.Uint32(b[8:12])),\n\t\tIndex:   int(nativeEndian.Uint16(b[4:6])),\n\t\tID:      uintptr(nativeEndian.Uint32(b[16:20])),\n\t\tSeq:     int(nativeEndian.Uint32(b[20:24])),\n\t\textOff:  w.extOff,\n\t\traw:     b[:l],\n\t}\n\terrno := syscall.Errno(nativeEndian.Uint32(b[28:32]))\n\tif errno != 0 {\n\t\tm.Err = errno\n\t}\n\tvar err error\n\tm.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[w.bodyOff:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/route_openbsd.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage route\n\nimport \"syscall\"\n\nfunc (m *RouteMessage) marshal() ([]byte, error) {\n\tl := sizeofRtMsghdr + addrsSpace(m.Addrs)\n\tb := make([]byte, l)\n\tnativeEndian.PutUint16(b[:2], uint16(l))\n\tif m.Version == 0 {\n\t\tb[2] = sysRTM_VERSION\n\t} else {\n\t\tb[2] = byte(m.Version)\n\t}\n\tb[3] = byte(m.Type)\n\tnativeEndian.PutUint16(b[4:6], uint16(sizeofRtMsghdr))\n\tnativeEndian.PutUint32(b[16:20], uint32(m.Flags))\n\tnativeEndian.PutUint16(b[6:8], uint16(m.Index))\n\tnativeEndian.PutUint32(b[24:28], uint32(m.ID))\n\tnativeEndian.PutUint32(b[28:32], uint32(m.Seq))\n\tattrs, err := marshalAddrs(b[sizeofRtMsghdr:], m.Addrs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif attrs > 0 {\n\t\tnativeEndian.PutUint32(b[12:16], uint32(attrs))\n\t}\n\treturn b, nil\n}\n\nfunc (*wireFormat) parseRouteMessage(_ RIBType, b []byte) (Message, error) {\n\tif len(b) < sizeofRtMsghdr {\n\t\treturn nil, errMessageTooShort\n\t}\n\tl := int(nativeEndian.Uint16(b[:2]))\n\tif len(b) < l {\n\t\treturn nil, errInvalidMessage\n\t}\n\tm := &RouteMessage{\n\t\tVersion: int(b[2]),\n\t\tType:    int(b[3]),\n\t\tFlags:   int(nativeEndian.Uint32(b[16:20])),\n\t\tIndex:   int(nativeEndian.Uint16(b[6:8])),\n\t\tID:      uintptr(nativeEndian.Uint32(b[24:28])),\n\t\tSeq:     int(nativeEndian.Uint32(b[28:32])),\n\t\traw:     b[:l],\n\t}\n\tll := int(nativeEndian.Uint16(b[4:6]))\n\tif len(b) < ll {\n\t\treturn nil, errInvalidMessage\n\t}\n\terrno := syscall.Errno(nativeEndian.Uint32(b[32:36]))\n\tif errno != 0 {\n\t\tm.Err = errno\n\t}\n\tas, err := parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[ll:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm.Addrs = as\n\treturn m, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/route_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd netbsd openbsd\n\npackage route\n\nimport (\n\t\"fmt\"\n\t\"os/exec\"\n\t\"runtime\"\n\t\"time\"\n)\n\nfunc (m *RouteMessage) String() string {\n\treturn fmt.Sprintf(\"%s\", addrAttrs(nativeEndian.Uint32(m.raw[12:16])))\n}\n\nfunc (m *InterfaceMessage) String() string {\n\tvar attrs addrAttrs\n\tif runtime.GOOS == \"openbsd\" {\n\t\tattrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16]))\n\t} else {\n\t\tattrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8]))\n\t}\n\treturn fmt.Sprintf(\"%s\", attrs)\n}\n\nfunc (m *InterfaceAddrMessage) String() string {\n\tvar attrs addrAttrs\n\tif runtime.GOOS == \"openbsd\" {\n\t\tattrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16]))\n\t} else {\n\t\tattrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8]))\n\t}\n\treturn fmt.Sprintf(\"%s\", attrs)\n}\n\nfunc (m *InterfaceMulticastAddrMessage) String() string {\n\treturn fmt.Sprintf(\"%s\", addrAttrs(nativeEndian.Uint32(m.raw[4:8])))\n}\n\nfunc (m *InterfaceAnnounceMessage) String() string {\n\twhat := \"<nil>\"\n\tswitch m.What {\n\tcase 0:\n\t\twhat = \"arrival\"\n\tcase 1:\n\t\twhat = \"departure\"\n\t}\n\treturn fmt.Sprintf(\"(%d %s %s)\", m.Index, m.Name, what)\n}\n\nfunc (m *InterfaceMetrics) String() string {\n\treturn fmt.Sprintf(\"(type=%d mtu=%d)\", m.Type, m.MTU)\n}\n\nfunc (m *RouteMetrics) String() string {\n\treturn fmt.Sprintf(\"(pmtu=%d)\", m.PathMTU)\n}\n\ntype addrAttrs uint\n\nvar addrAttrNames = [...]string{\n\t\"dst\",\n\t\"gateway\",\n\t\"netmask\",\n\t\"genmask\",\n\t\"ifp\",\n\t\"ifa\",\n\t\"author\",\n\t\"brd\",\n\t\"df:mpls1-n:tag-o:src\", // mpls1 for dragonfly, tag for netbsd, src for openbsd\n\t\"df:mpls2-o:srcmask\",   // mpls2 for dragonfly, srcmask for openbsd\n\t\"df:mpls3-o:label\",     // mpls3 for dragonfly, label for openbsd\n\t\"o:bfd\",                // bfd for openbsd\n\t\"o:dns\",                // dns for openbsd\n\t\"o:static\",             // static for openbsd\n\t\"o:search\",             // search for openbsd\n}\n\nfunc (attrs addrAttrs) String() string {\n\tvar s string\n\tfor i, name := range addrAttrNames {\n\t\tif attrs&(1<<uint(i)) != 0 {\n\t\t\tif s != \"\" {\n\t\t\t\ts += \"|\"\n\t\t\t}\n\t\t\ts += name\n\t\t}\n\t}\n\tif s == \"\" {\n\t\treturn \"<nil>\"\n\t}\n\treturn s\n}\n\ntype msgs []Message\n\nfunc (ms msgs) validate() ([]string, error) {\n\tvar ss []string\n\tfor _, m := range ms {\n\t\tswitch m := m.(type) {\n\t\tcase *RouteMessage:\n\t\t\tif err := addrs(m.Addrs).match(addrAttrs(nativeEndian.Uint32(m.raw[12:16]))); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tsys := m.Sys()\n\t\t\tif sys == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"no sys for %s\", m.String())\n\t\t\t}\n\t\t\tss = append(ss, m.String()+\" \"+syss(sys).String()+\" \"+addrs(m.Addrs).String())\n\t\tcase *InterfaceMessage:\n\t\t\tvar attrs addrAttrs\n\t\t\tif runtime.GOOS == \"openbsd\" {\n\t\t\t\tattrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16]))\n\t\t\t} else {\n\t\t\t\tattrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8]))\n\t\t\t}\n\t\t\tif err := addrs(m.Addrs).match(attrs); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tsys := m.Sys()\n\t\t\tif sys == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"no sys for %s\", m.String())\n\t\t\t}\n\t\t\tss = append(ss, m.String()+\" \"+syss(sys).String()+\" \"+addrs(m.Addrs).String())\n\t\tcase *InterfaceAddrMessage:\n\t\t\tvar attrs addrAttrs\n\t\t\tif runtime.GOOS == \"openbsd\" {\n\t\t\t\tattrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16]))\n\t\t\t} else {\n\t\t\t\tattrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8]))\n\t\t\t}\n\t\t\tif err := addrs(m.Addrs).match(attrs); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tss = append(ss, m.String()+\" \"+addrs(m.Addrs).String())\n\t\tcase *InterfaceMulticastAddrMessage:\n\t\t\tif err := addrs(m.Addrs).match(addrAttrs(nativeEndian.Uint32(m.raw[4:8]))); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tss = append(ss, m.String()+\" \"+addrs(m.Addrs).String())\n\t\tcase *InterfaceAnnounceMessage:\n\t\t\tss = append(ss, m.String())\n\t\tdefault:\n\t\t\tss = append(ss, fmt.Sprintf(\"%+v\", m))\n\t\t}\n\t}\n\treturn ss, nil\n}\n\ntype syss []Sys\n\nfunc (sys syss) String() string {\n\tvar s string\n\tfor _, sy := range sys {\n\t\tswitch sy := sy.(type) {\n\t\tcase *InterfaceMetrics:\n\t\t\tif len(s) > 0 {\n\t\t\t\ts += \" \"\n\t\t\t}\n\t\t\ts += sy.String()\n\t\tcase *RouteMetrics:\n\t\t\tif len(s) > 0 {\n\t\t\t\ts += \" \"\n\t\t\t}\n\t\t\ts += sy.String()\n\t\t}\n\t}\n\treturn s\n}\n\ntype addrFamily int\n\nfunc (af addrFamily) String() string {\n\tswitch af {\n\tcase sysAF_UNSPEC:\n\t\treturn \"unspec\"\n\tcase sysAF_LINK:\n\t\treturn \"link\"\n\tcase sysAF_INET:\n\t\treturn \"inet4\"\n\tcase sysAF_INET6:\n\t\treturn \"inet6\"\n\tdefault:\n\t\treturn fmt.Sprintf(\"%d\", af)\n\t}\n}\n\nconst hexDigit = \"0123456789abcdef\"\n\ntype llAddr []byte\n\nfunc (a llAddr) String() string {\n\tif len(a) == 0 {\n\t\treturn \"\"\n\t}\n\tbuf := make([]byte, 0, len(a)*3-1)\n\tfor i, b := range a {\n\t\tif i > 0 {\n\t\t\tbuf = append(buf, ':')\n\t\t}\n\t\tbuf = append(buf, hexDigit[b>>4])\n\t\tbuf = append(buf, hexDigit[b&0xF])\n\t}\n\treturn string(buf)\n}\n\ntype ipAddr []byte\n\nfunc (a ipAddr) String() string {\n\tif len(a) == 0 {\n\t\treturn \"<nil>\"\n\t}\n\tif len(a) == 4 {\n\t\treturn fmt.Sprintf(\"%d.%d.%d.%d\", a[0], a[1], a[2], a[3])\n\t}\n\tif len(a) == 16 {\n\t\treturn fmt.Sprintf(\"%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\", a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15])\n\t}\n\ts := make([]byte, len(a)*2)\n\tfor i, tn := range a {\n\t\ts[i*2], s[i*2+1] = hexDigit[tn>>4], hexDigit[tn&0xf]\n\t}\n\treturn string(s)\n}\n\nfunc (a *LinkAddr) String() string {\n\tname := a.Name\n\tif name == \"\" {\n\t\tname = \"<nil>\"\n\t}\n\tlla := llAddr(a.Addr).String()\n\tif lla == \"\" {\n\t\tlla = \"<nil>\"\n\t}\n\treturn fmt.Sprintf(\"(%v %d %s %s)\", addrFamily(a.Family()), a.Index, name, lla)\n}\n\nfunc (a *Inet4Addr) String() string {\n\treturn fmt.Sprintf(\"(%v %v)\", addrFamily(a.Family()), ipAddr(a.IP[:]))\n}\n\nfunc (a *Inet6Addr) String() string {\n\treturn fmt.Sprintf(\"(%v %v %d)\", addrFamily(a.Family()), ipAddr(a.IP[:]), a.ZoneID)\n}\n\nfunc (a *DefaultAddr) String() string {\n\treturn fmt.Sprintf(\"(%v %s)\", addrFamily(a.Family()), ipAddr(a.Raw[2:]).String())\n}\n\ntype addrs []Addr\n\nfunc (as addrs) String() string {\n\tvar s string\n\tfor _, a := range as {\n\t\tif a == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif len(s) > 0 {\n\t\t\ts += \" \"\n\t\t}\n\t\tswitch a := a.(type) {\n\t\tcase *LinkAddr:\n\t\t\ts += a.String()\n\t\tcase *Inet4Addr:\n\t\t\ts += a.String()\n\t\tcase *Inet6Addr:\n\t\t\ts += a.String()\n\t\tcase *DefaultAddr:\n\t\t\ts += a.String()\n\t\t}\n\t}\n\tif s == \"\" {\n\t\treturn \"<nil>\"\n\t}\n\treturn s\n}\n\nfunc (as addrs) match(attrs addrAttrs) error {\n\tvar ts addrAttrs\n\taf := sysAF_UNSPEC\n\tfor i := range as {\n\t\tif as[i] != nil {\n\t\t\tts |= 1 << uint(i)\n\t\t}\n\t\tswitch as[i].(type) {\n\t\tcase *Inet4Addr:\n\t\t\tif af == sysAF_UNSPEC {\n\t\t\t\taf = sysAF_INET\n\t\t\t}\n\t\t\tif af != sysAF_INET {\n\t\t\t\treturn fmt.Errorf(\"got %v; want %v\", addrs(as), addrFamily(af))\n\t\t\t}\n\t\tcase *Inet6Addr:\n\t\t\tif af == sysAF_UNSPEC {\n\t\t\t\taf = sysAF_INET6\n\t\t\t}\n\t\t\tif af != sysAF_INET6 {\n\t\t\t\treturn fmt.Errorf(\"got %v; want %v\", addrs(as), addrFamily(af))\n\t\t\t}\n\t\t}\n\t}\n\tif ts != attrs && ts > attrs {\n\t\treturn fmt.Errorf(\"%v not included in %v\", ts, attrs)\n\t}\n\treturn nil\n}\n\nfunc fetchAndParseRIB(af int, typ RIBType) ([]Message, error) {\n\tvar err error\n\tvar b []byte\n\tfor i := 0; i < 3; i++ {\n\t\tif b, err = FetchRIB(af, typ, 0); err != nil {\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%v %d %v\", addrFamily(af), typ, err)\n\t}\n\tms, err := ParseRIB(typ, b)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%v %d %v\", addrFamily(af), typ, err)\n\t}\n\treturn ms, nil\n}\n\n// propVirtual is a proprietary virtual network interface.\ntype propVirtual struct {\n\tname         string\n\taddr, mask   string\n\tsetupCmds    []*exec.Cmd\n\tteardownCmds []*exec.Cmd\n}\n\nfunc (pv *propVirtual) setup() error {\n\tfor _, cmd := range pv.setupCmds {\n\t\tif err := cmd.Run(); err != nil {\n\t\t\tpv.teardown()\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (pv *propVirtual) teardown() error {\n\tfor _, cmd := range pv.teardownCmds {\n\t\tif err := cmd.Run(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (pv *propVirtual) configure(suffix int) error {\n\tif runtime.GOOS == \"openbsd\" {\n\t\tpv.name = fmt.Sprintf(\"vether%d\", suffix)\n\t} else {\n\t\tpv.name = fmt.Sprintf(\"vlan%d\", suffix)\n\t}\n\txname, err := exec.LookPath(\"ifconfig\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tpv.setupCmds = append(pv.setupCmds, &exec.Cmd{\n\t\tPath: xname,\n\t\tArgs: []string{\"ifconfig\", pv.name, \"create\"},\n\t})\n\tif runtime.GOOS == \"netbsd\" {\n\t\t// NetBSD requires an underlying dot1Q-capable network\n\t\t// interface.\n\t\tpv.setupCmds = append(pv.setupCmds, &exec.Cmd{\n\t\t\tPath: xname,\n\t\t\tArgs: []string{\"ifconfig\", pv.name, \"vlan\", fmt.Sprintf(\"%d\", suffix&0xfff), \"vlanif\", \"wm0\"},\n\t\t})\n\t}\n\tpv.setupCmds = append(pv.setupCmds, &exec.Cmd{\n\t\tPath: xname,\n\t\tArgs: []string{\"ifconfig\", pv.name, \"inet\", pv.addr, \"netmask\", pv.mask},\n\t})\n\tpv.teardownCmds = append(pv.teardownCmds, &exec.Cmd{\n\t\tPath: xname,\n\t\tArgs: []string{\"ifconfig\", pv.name, \"destroy\"},\n\t})\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/sys.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd netbsd openbsd\n\npackage route\n\nimport \"unsafe\"\n\nvar (\n\tnativeEndian binaryByteOrder\n\tkernelAlign  int\n\twireFormats  map[int]*wireFormat\n)\n\nfunc init() {\n\ti := uint32(1)\n\tb := (*[4]byte)(unsafe.Pointer(&i))\n\tif b[0] == 1 {\n\t\tnativeEndian = littleEndian\n\t} else {\n\t\tnativeEndian = bigEndian\n\t}\n\tkernelAlign, wireFormats = probeRoutingStack()\n}\n\nfunc roundup(l int) int {\n\tif l == 0 {\n\t\treturn kernelAlign\n\t}\n\treturn (l + kernelAlign - 1) & ^(kernelAlign - 1)\n}\n\ntype wireFormat struct {\n\textOff  int // offset of header extension\n\tbodyOff int // offset of message body\n\tparse   func(RIBType, []byte) (Message, error)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/sys_darwin.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage route\n\nfunc (typ RIBType) parseable() bool {\n\tswitch typ {\n\tcase sysNET_RT_STAT, sysNET_RT_TRASH:\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}\n\n// RouteMetrics represents route metrics.\ntype RouteMetrics struct {\n\tPathMTU int // path maximum transmission unit\n}\n\n// SysType implements the SysType method of Sys interface.\nfunc (rmx *RouteMetrics) SysType() SysType { return SysMetrics }\n\n// Sys implements the Sys method of Message interface.\nfunc (m *RouteMessage) Sys() []Sys {\n\treturn []Sys{\n\t\t&RouteMetrics{\n\t\t\tPathMTU: int(nativeEndian.Uint32(m.raw[m.extOff+4 : m.extOff+8])),\n\t\t},\n\t}\n}\n\n// InterfaceMetrics represents interface metrics.\ntype InterfaceMetrics struct {\n\tType int // interface type\n\tMTU  int // maximum transmission unit\n}\n\n// SysType implements the SysType method of Sys interface.\nfunc (imx *InterfaceMetrics) SysType() SysType { return SysMetrics }\n\n// Sys implements the Sys method of Message interface.\nfunc (m *InterfaceMessage) Sys() []Sys {\n\treturn []Sys{\n\t\t&InterfaceMetrics{\n\t\t\tType: int(m.raw[m.extOff]),\n\t\t\tMTU:  int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])),\n\t\t},\n\t}\n}\n\nfunc probeRoutingStack() (int, map[int]*wireFormat) {\n\trtm := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdrDarwin15}\n\trtm.parse = rtm.parseRouteMessage\n\trtm2 := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdr2Darwin15}\n\trtm2.parse = rtm2.parseRouteMessage\n\tifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDarwin15}\n\tifm.parse = ifm.parseInterfaceMessage\n\tifm2 := &wireFormat{extOff: 32, bodyOff: sizeofIfMsghdr2Darwin15}\n\tifm2.parse = ifm2.parseInterfaceMessage\n\tifam := &wireFormat{extOff: sizeofIfaMsghdrDarwin15, bodyOff: sizeofIfaMsghdrDarwin15}\n\tifam.parse = ifam.parseInterfaceAddrMessage\n\tifmam := &wireFormat{extOff: sizeofIfmaMsghdrDarwin15, bodyOff: sizeofIfmaMsghdrDarwin15}\n\tifmam.parse = ifmam.parseInterfaceMulticastAddrMessage\n\tifmam2 := &wireFormat{extOff: sizeofIfmaMsghdr2Darwin15, bodyOff: sizeofIfmaMsghdr2Darwin15}\n\tifmam2.parse = ifmam2.parseInterfaceMulticastAddrMessage\n\t// Darwin kernels require 32-bit aligned access to routing facilities.\n\treturn 4, map[int]*wireFormat{\n\t\tsysRTM_ADD:       rtm,\n\t\tsysRTM_DELETE:    rtm,\n\t\tsysRTM_CHANGE:    rtm,\n\t\tsysRTM_GET:       rtm,\n\t\tsysRTM_LOSING:    rtm,\n\t\tsysRTM_REDIRECT:  rtm,\n\t\tsysRTM_MISS:      rtm,\n\t\tsysRTM_LOCK:      rtm,\n\t\tsysRTM_RESOLVE:   rtm,\n\t\tsysRTM_NEWADDR:   ifam,\n\t\tsysRTM_DELADDR:   ifam,\n\t\tsysRTM_IFINFO:    ifm,\n\t\tsysRTM_NEWMADDR:  ifmam,\n\t\tsysRTM_DELMADDR:  ifmam,\n\t\tsysRTM_IFINFO2:   ifm2,\n\t\tsysRTM_NEWMADDR2: ifmam2,\n\t\tsysRTM_GET2:      rtm2,\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/sys_dragonfly.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage route\n\nimport \"unsafe\"\n\nfunc (typ RIBType) parseable() bool { return true }\n\n// RouteMetrics represents route metrics.\ntype RouteMetrics struct {\n\tPathMTU int // path maximum transmission unit\n}\n\n// SysType implements the SysType method of Sys interface.\nfunc (rmx *RouteMetrics) SysType() SysType { return SysMetrics }\n\n// Sys implements the Sys method of Message interface.\nfunc (m *RouteMessage) Sys() []Sys {\n\treturn []Sys{\n\t\t&RouteMetrics{\n\t\t\tPathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])),\n\t\t},\n\t}\n}\n\n// InterfaceMetrics represents interface metrics.\ntype InterfaceMetrics struct {\n\tType int // interface type\n\tMTU  int // maximum transmission unit\n}\n\n// SysType implements the SysType method of Sys interface.\nfunc (imx *InterfaceMetrics) SysType() SysType { return SysMetrics }\n\n// Sys implements the Sys method of Message interface.\nfunc (m *InterfaceMessage) Sys() []Sys {\n\treturn []Sys{\n\t\t&InterfaceMetrics{\n\t\t\tType: int(m.raw[m.extOff]),\n\t\t\tMTU:  int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])),\n\t\t},\n\t}\n}\n\nfunc probeRoutingStack() (int, map[int]*wireFormat) {\n\tvar p uintptr\n\trtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrDragonFlyBSD4}\n\trtm.parse = rtm.parseRouteMessage\n\tifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDragonFlyBSD4}\n\tifm.parse = ifm.parseInterfaceMessage\n\tifam := &wireFormat{extOff: sizeofIfaMsghdrDragonFlyBSD4, bodyOff: sizeofIfaMsghdrDragonFlyBSD4}\n\tifam.parse = ifam.parseInterfaceAddrMessage\n\tifmam := &wireFormat{extOff: sizeofIfmaMsghdrDragonFlyBSD4, bodyOff: sizeofIfmaMsghdrDragonFlyBSD4}\n\tifmam.parse = ifmam.parseInterfaceMulticastAddrMessage\n\tifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrDragonFlyBSD4, bodyOff: sizeofIfAnnouncemsghdrDragonFlyBSD4}\n\tifanm.parse = ifanm.parseInterfaceAnnounceMessage\n\treturn int(unsafe.Sizeof(p)), map[int]*wireFormat{\n\t\tsysRTM_ADD:        rtm,\n\t\tsysRTM_DELETE:     rtm,\n\t\tsysRTM_CHANGE:     rtm,\n\t\tsysRTM_GET:        rtm,\n\t\tsysRTM_LOSING:     rtm,\n\t\tsysRTM_REDIRECT:   rtm,\n\t\tsysRTM_MISS:       rtm,\n\t\tsysRTM_LOCK:       rtm,\n\t\tsysRTM_RESOLVE:    rtm,\n\t\tsysRTM_NEWADDR:    ifam,\n\t\tsysRTM_DELADDR:    ifam,\n\t\tsysRTM_IFINFO:     ifm,\n\t\tsysRTM_NEWMADDR:   ifmam,\n\t\tsysRTM_DELMADDR:   ifmam,\n\t\tsysRTM_IFANNOUNCE: ifanm,\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/sys_freebsd.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage route\n\nimport (\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nfunc (typ RIBType) parseable() bool { return true }\n\n// RouteMetrics represents route metrics.\ntype RouteMetrics struct {\n\tPathMTU int // path maximum transmission unit\n}\n\n// SysType implements the SysType method of Sys interface.\nfunc (rmx *RouteMetrics) SysType() SysType { return SysMetrics }\n\n// Sys implements the Sys method of Message interface.\nfunc (m *RouteMessage) Sys() []Sys {\n\tif kernelAlign == 8 {\n\t\treturn []Sys{\n\t\t\t&RouteMetrics{\n\t\t\t\tPathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])),\n\t\t\t},\n\t\t}\n\t}\n\treturn []Sys{\n\t\t&RouteMetrics{\n\t\t\tPathMTU: int(nativeEndian.Uint32(m.raw[m.extOff+4 : m.extOff+8])),\n\t\t},\n\t}\n}\n\n// InterfaceMetrics represents interface metrics.\ntype InterfaceMetrics struct {\n\tType int // interface type\n\tMTU  int // maximum transmission unit\n}\n\n// SysType implements the SysType method of Sys interface.\nfunc (imx *InterfaceMetrics) SysType() SysType { return SysMetrics }\n\n// Sys implements the Sys method of Message interface.\nfunc (m *InterfaceMessage) Sys() []Sys {\n\treturn []Sys{\n\t\t&InterfaceMetrics{\n\t\t\tType: int(m.raw[m.extOff]),\n\t\t\tMTU:  int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])),\n\t\t},\n\t}\n}\n\nfunc probeRoutingStack() (int, map[int]*wireFormat) {\n\tvar p uintptr\n\twordSize := int(unsafe.Sizeof(p))\n\talign := int(unsafe.Sizeof(p))\n\t// In the case of kern.supported_archs=\"amd64 i386\", we need\n\t// to know the underlying kernel's architecture because the\n\t// alignment for routing facilities are set at the build time\n\t// of the kernel.\n\tconf, _ := syscall.Sysctl(\"kern.conftxt\")\n\tfor i, j := 0, 0; j < len(conf); j++ {\n\t\tif conf[j] != '\\n' {\n\t\t\tcontinue\n\t\t}\n\t\ts := conf[i:j]\n\t\ti = j + 1\n\t\tif len(s) > len(\"machine\") && s[:len(\"machine\")] == \"machine\" {\n\t\t\ts = s[len(\"machine\"):]\n\t\t\tfor k := 0; k < len(s); k++ {\n\t\t\t\tif s[k] == ' ' || s[k] == '\\t' {\n\t\t\t\t\ts = s[1:]\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif s == \"amd64\" {\n\t\t\t\talign = 8\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\tvar rtm, ifm, ifam, ifmam, ifanm *wireFormat\n\tif align != wordSize { // 386 emulation on amd64\n\t\trtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10Emu - sizeofRtMetricsFreeBSD10Emu, bodyOff: sizeofRtMsghdrFreeBSD10Emu}\n\t\tifm = &wireFormat{extOff: 16}\n\t\tifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10Emu, bodyOff: sizeofIfaMsghdrFreeBSD10Emu}\n\t\tifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10Emu, bodyOff: sizeofIfmaMsghdrFreeBSD10Emu}\n\t\tifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10Emu, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10Emu}\n\t} else {\n\t\trtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10 - sizeofRtMetricsFreeBSD10, bodyOff: sizeofRtMsghdrFreeBSD10}\n\t\tifm = &wireFormat{extOff: 16}\n\t\tifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10, bodyOff: sizeofIfaMsghdrFreeBSD10}\n\t\tifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10, bodyOff: sizeofIfmaMsghdrFreeBSD10}\n\t\tifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10}\n\t}\n\trel, _ := syscall.SysctlUint32(\"kern.osreldate\")\n\tswitch {\n\tcase rel < 800000:\n\t\tif align != wordSize { // 386 emulation on amd64\n\t\t\tifm.bodyOff = sizeofIfMsghdrFreeBSD7Emu\n\t\t} else {\n\t\t\tifm.bodyOff = sizeofIfMsghdrFreeBSD7\n\t\t}\n\tcase 800000 <= rel && rel < 900000:\n\t\tif align != wordSize { // 386 emulation on amd64\n\t\t\tifm.bodyOff = sizeofIfMsghdrFreeBSD8Emu\n\t\t} else {\n\t\t\tifm.bodyOff = sizeofIfMsghdrFreeBSD8\n\t\t}\n\tcase 900000 <= rel && rel < 1000000:\n\t\tif align != wordSize { // 386 emulation on amd64\n\t\t\tifm.bodyOff = sizeofIfMsghdrFreeBSD9Emu\n\t\t} else {\n\t\t\tifm.bodyOff = sizeofIfMsghdrFreeBSD9\n\t\t}\n\tcase 1000000 <= rel && rel < 1100000:\n\t\tif align != wordSize { // 386 emulation on amd64\n\t\t\tifm.bodyOff = sizeofIfMsghdrFreeBSD10Emu\n\t\t} else {\n\t\t\tifm.bodyOff = sizeofIfMsghdrFreeBSD10\n\t\t}\n\tdefault:\n\t\tif align != wordSize { // 386 emulation on amd64\n\t\t\tifm.bodyOff = sizeofIfMsghdrFreeBSD11Emu\n\t\t} else {\n\t\t\tifm.bodyOff = sizeofIfMsghdrFreeBSD11\n\t\t}\n\t}\n\trtm.parse = rtm.parseRouteMessage\n\tifm.parse = ifm.parseInterfaceMessage\n\tifam.parse = ifam.parseInterfaceAddrMessage\n\tifmam.parse = ifmam.parseInterfaceMulticastAddrMessage\n\tifanm.parse = ifanm.parseInterfaceAnnounceMessage\n\treturn align, map[int]*wireFormat{\n\t\tsysRTM_ADD:        rtm,\n\t\tsysRTM_DELETE:     rtm,\n\t\tsysRTM_CHANGE:     rtm,\n\t\tsysRTM_GET:        rtm,\n\t\tsysRTM_LOSING:     rtm,\n\t\tsysRTM_REDIRECT:   rtm,\n\t\tsysRTM_MISS:       rtm,\n\t\tsysRTM_LOCK:       rtm,\n\t\tsysRTM_RESOLVE:    rtm,\n\t\tsysRTM_NEWADDR:    ifam,\n\t\tsysRTM_DELADDR:    ifam,\n\t\tsysRTM_IFINFO:     ifm,\n\t\tsysRTM_NEWMADDR:   ifmam,\n\t\tsysRTM_DELMADDR:   ifmam,\n\t\tsysRTM_IFANNOUNCE: ifanm,\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/sys_netbsd.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage route\n\nfunc (typ RIBType) parseable() bool { return true }\n\n// RouteMetrics represents route metrics.\ntype RouteMetrics struct {\n\tPathMTU int // path maximum transmission unit\n}\n\n// SysType implements the SysType method of Sys interface.\nfunc (rmx *RouteMetrics) SysType() SysType { return SysMetrics }\n\n// Sys implements the Sys method of Message interface.\nfunc (m *RouteMessage) Sys() []Sys {\n\treturn []Sys{\n\t\t&RouteMetrics{\n\t\t\tPathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])),\n\t\t},\n\t}\n}\n\n// RouteMetrics represents route metrics.\ntype InterfaceMetrics struct {\n\tType int // interface type\n\tMTU  int // maximum transmission unit\n}\n\n// SysType implements the SysType method of Sys interface.\nfunc (imx *InterfaceMetrics) SysType() SysType { return SysMetrics }\n\n// Sys implements the Sys method of Message interface.\nfunc (m *InterfaceMessage) Sys() []Sys {\n\treturn []Sys{\n\t\t&InterfaceMetrics{\n\t\t\tType: int(m.raw[m.extOff]),\n\t\t\tMTU:  int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])),\n\t\t},\n\t}\n}\n\nfunc probeRoutingStack() (int, map[int]*wireFormat) {\n\trtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrNetBSD7}\n\trtm.parse = rtm.parseRouteMessage\n\tifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrNetBSD7}\n\tifm.parse = ifm.parseInterfaceMessage\n\tifam := &wireFormat{extOff: sizeofIfaMsghdrNetBSD7, bodyOff: sizeofIfaMsghdrNetBSD7}\n\tifam.parse = ifam.parseInterfaceAddrMessage\n\tifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrNetBSD7, bodyOff: sizeofIfAnnouncemsghdrNetBSD7}\n\tifanm.parse = ifanm.parseInterfaceAnnounceMessage\n\t// NetBSD 6 and above kernels require 64-bit aligned access to\n\t// routing facilities.\n\treturn 8, map[int]*wireFormat{\n\t\tsysRTM_ADD:        rtm,\n\t\tsysRTM_DELETE:     rtm,\n\t\tsysRTM_CHANGE:     rtm,\n\t\tsysRTM_GET:        rtm,\n\t\tsysRTM_LOSING:     rtm,\n\t\tsysRTM_REDIRECT:   rtm,\n\t\tsysRTM_MISS:       rtm,\n\t\tsysRTM_LOCK:       rtm,\n\t\tsysRTM_RESOLVE:    rtm,\n\t\tsysRTM_NEWADDR:    ifam,\n\t\tsysRTM_DELADDR:    ifam,\n\t\tsysRTM_IFANNOUNCE: ifanm,\n\t\tsysRTM_IFINFO:     ifm,\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/sys_openbsd.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage route\n\nimport \"unsafe\"\n\nfunc (typ RIBType) parseable() bool {\n\tswitch typ {\n\tcase sysNET_RT_STATS, sysNET_RT_TABLE:\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}\n\n// RouteMetrics represents route metrics.\ntype RouteMetrics struct {\n\tPathMTU int // path maximum transmission unit\n}\n\n// SysType implements the SysType method of Sys interface.\nfunc (rmx *RouteMetrics) SysType() SysType { return SysMetrics }\n\n// Sys implements the Sys method of Message interface.\nfunc (m *RouteMessage) Sys() []Sys {\n\treturn []Sys{\n\t\t&RouteMetrics{\n\t\t\tPathMTU: int(nativeEndian.Uint32(m.raw[60:64])),\n\t\t},\n\t}\n}\n\n// InterfaceMetrics represents interface metrics.\ntype InterfaceMetrics struct {\n\tType int // interface type\n\tMTU  int // maximum transmission unit\n}\n\n// SysType implements the SysType method of Sys interface.\nfunc (imx *InterfaceMetrics) SysType() SysType { return SysMetrics }\n\n// Sys implements the Sys method of Message interface.\nfunc (m *InterfaceMessage) Sys() []Sys {\n\treturn []Sys{\n\t\t&InterfaceMetrics{\n\t\t\tType: int(m.raw[24]),\n\t\t\tMTU:  int(nativeEndian.Uint32(m.raw[28:32])),\n\t\t},\n\t}\n}\n\nfunc probeRoutingStack() (int, map[int]*wireFormat) {\n\tvar p uintptr\n\trtm := &wireFormat{extOff: -1, bodyOff: -1}\n\trtm.parse = rtm.parseRouteMessage\n\tifm := &wireFormat{extOff: -1, bodyOff: -1}\n\tifm.parse = ifm.parseInterfaceMessage\n\tifam := &wireFormat{extOff: -1, bodyOff: -1}\n\tifam.parse = ifam.parseInterfaceAddrMessage\n\tifanm := &wireFormat{extOff: -1, bodyOff: -1}\n\tifanm.parse = ifanm.parseInterfaceAnnounceMessage\n\treturn int(unsafe.Sizeof(p)), map[int]*wireFormat{\n\t\tsysRTM_ADD:        rtm,\n\t\tsysRTM_DELETE:     rtm,\n\t\tsysRTM_CHANGE:     rtm,\n\t\tsysRTM_GET:        rtm,\n\t\tsysRTM_LOSING:     rtm,\n\t\tsysRTM_REDIRECT:   rtm,\n\t\tsysRTM_MISS:       rtm,\n\t\tsysRTM_LOCK:       rtm,\n\t\tsysRTM_RESOLVE:    rtm,\n\t\tsysRTM_NEWADDR:    ifam,\n\t\tsysRTM_DELADDR:    ifam,\n\t\tsysRTM_IFINFO:     ifm,\n\t\tsysRTM_IFANNOUNCE: ifanm,\n\t\tsysRTM_DESYNC:     rtm,\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/syscall.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build darwin dragonfly freebsd netbsd openbsd\n\npackage route\n\nimport (\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nvar zero uintptr\n\nfunc sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error {\n\tvar p unsafe.Pointer\n\tif len(mib) > 0 {\n\t\tp = unsafe.Pointer(&mib[0])\n\t} else {\n\t\tp = unsafe.Pointer(&zero)\n\t}\n\t_, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))\n\tif errno != 0 {\n\t\treturn error(errno)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/zsys_darwin.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_darwin.go\n\npackage route\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_ROUTE  = 0x11\n\tsysAF_LINK   = 0x12\n\tsysAF_INET6  = 0x1e\n\n\tsysSOCK_RAW = 0x3\n\n\tsysNET_RT_DUMP    = 0x1\n\tsysNET_RT_FLAGS   = 0x2\n\tsysNET_RT_IFLIST  = 0x3\n\tsysNET_RT_STAT    = 0x4\n\tsysNET_RT_TRASH   = 0x5\n\tsysNET_RT_IFLIST2 = 0x6\n\tsysNET_RT_DUMP2   = 0x7\n\tsysNET_RT_MAXID   = 0xa\n)\n\nconst (\n\tsysCTL_MAXNAME = 0xc\n\n\tsysCTL_UNSPEC  = 0x0\n\tsysCTL_KERN    = 0x1\n\tsysCTL_VM      = 0x2\n\tsysCTL_VFS     = 0x3\n\tsysCTL_NET     = 0x4\n\tsysCTL_DEBUG   = 0x5\n\tsysCTL_HW      = 0x6\n\tsysCTL_MACHDEP = 0x7\n\tsysCTL_USER    = 0x8\n\tsysCTL_MAXID   = 0x9\n)\n\nconst (\n\tsysRTM_VERSION = 0x5\n\n\tsysRTM_ADD       = 0x1\n\tsysRTM_DELETE    = 0x2\n\tsysRTM_CHANGE    = 0x3\n\tsysRTM_GET       = 0x4\n\tsysRTM_LOSING    = 0x5\n\tsysRTM_REDIRECT  = 0x6\n\tsysRTM_MISS      = 0x7\n\tsysRTM_LOCK      = 0x8\n\tsysRTM_OLDADD    = 0x9\n\tsysRTM_OLDDEL    = 0xa\n\tsysRTM_RESOLVE   = 0xb\n\tsysRTM_NEWADDR   = 0xc\n\tsysRTM_DELADDR   = 0xd\n\tsysRTM_IFINFO    = 0xe\n\tsysRTM_NEWMADDR  = 0xf\n\tsysRTM_DELMADDR  = 0x10\n\tsysRTM_IFINFO2   = 0x12\n\tsysRTM_NEWMADDR2 = 0x13\n\tsysRTM_GET2      = 0x14\n\n\tsysRTA_DST     = 0x1\n\tsysRTA_GATEWAY = 0x2\n\tsysRTA_NETMASK = 0x4\n\tsysRTA_GENMASK = 0x8\n\tsysRTA_IFP     = 0x10\n\tsysRTA_IFA     = 0x20\n\tsysRTA_AUTHOR  = 0x40\n\tsysRTA_BRD     = 0x80\n\n\tsysRTAX_DST     = 0x0\n\tsysRTAX_GATEWAY = 0x1\n\tsysRTAX_NETMASK = 0x2\n\tsysRTAX_GENMASK = 0x3\n\tsysRTAX_IFP     = 0x4\n\tsysRTAX_IFA     = 0x5\n\tsysRTAX_AUTHOR  = 0x6\n\tsysRTAX_BRD     = 0x7\n\tsysRTAX_MAX     = 0x8\n)\n\nconst (\n\tsizeofIfMsghdrDarwin15    = 0x70\n\tsizeofIfaMsghdrDarwin15   = 0x14\n\tsizeofIfmaMsghdrDarwin15  = 0x10\n\tsizeofIfMsghdr2Darwin15   = 0xa0\n\tsizeofIfmaMsghdr2Darwin15 = 0x14\n\tsizeofIfDataDarwin15      = 0x60\n\tsizeofIfData64Darwin15    = 0x80\n\n\tsizeofRtMsghdrDarwin15  = 0x5c\n\tsizeofRtMsghdr2Darwin15 = 0x5c\n\tsizeofRtMetricsDarwin15 = 0x38\n\n\tsizeofSockaddrStorage = 0x80\n\tsizeofSockaddrInet    = 0x10\n\tsizeofSockaddrInet6   = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/zsys_dragonfly.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_dragonfly.go\n\npackage route\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_ROUTE  = 0x11\n\tsysAF_LINK   = 0x12\n\tsysAF_INET6  = 0x1c\n\n\tsysSOCK_RAW = 0x3\n\n\tsysNET_RT_DUMP   = 0x1\n\tsysNET_RT_FLAGS  = 0x2\n\tsysNET_RT_IFLIST = 0x3\n\tsysNET_RT_MAXID  = 0x4\n)\n\nconst (\n\tsysCTL_MAXNAME = 0xc\n\n\tsysCTL_UNSPEC   = 0x0\n\tsysCTL_KERN     = 0x1\n\tsysCTL_VM       = 0x2\n\tsysCTL_VFS      = 0x3\n\tsysCTL_NET      = 0x4\n\tsysCTL_DEBUG    = 0x5\n\tsysCTL_HW       = 0x6\n\tsysCTL_MACHDEP  = 0x7\n\tsysCTL_USER     = 0x8\n\tsysCTL_P1003_1B = 0x9\n\tsysCTL_LWKT     = 0xa\n\tsysCTL_MAXID    = 0xb\n)\n\nconst (\n\tsysRTM_VERSION = 0x6\n\n\tsysRTM_ADD        = 0x1\n\tsysRTM_DELETE     = 0x2\n\tsysRTM_CHANGE     = 0x3\n\tsysRTM_GET        = 0x4\n\tsysRTM_LOSING     = 0x5\n\tsysRTM_REDIRECT   = 0x6\n\tsysRTM_MISS       = 0x7\n\tsysRTM_LOCK       = 0x8\n\tsysRTM_OLDADD     = 0x9\n\tsysRTM_OLDDEL     = 0xa\n\tsysRTM_RESOLVE    = 0xb\n\tsysRTM_NEWADDR    = 0xc\n\tsysRTM_DELADDR    = 0xd\n\tsysRTM_IFINFO     = 0xe\n\tsysRTM_NEWMADDR   = 0xf\n\tsysRTM_DELMADDR   = 0x10\n\tsysRTM_IFANNOUNCE = 0x11\n\tsysRTM_IEEE80211  = 0x12\n\n\tsysRTA_DST     = 0x1\n\tsysRTA_GATEWAY = 0x2\n\tsysRTA_NETMASK = 0x4\n\tsysRTA_GENMASK = 0x8\n\tsysRTA_IFP     = 0x10\n\tsysRTA_IFA     = 0x20\n\tsysRTA_AUTHOR  = 0x40\n\tsysRTA_BRD     = 0x80\n\tsysRTA_MPLS1   = 0x100\n\tsysRTA_MPLS2   = 0x200\n\tsysRTA_MPLS3   = 0x400\n\n\tsysRTAX_DST     = 0x0\n\tsysRTAX_GATEWAY = 0x1\n\tsysRTAX_NETMASK = 0x2\n\tsysRTAX_GENMASK = 0x3\n\tsysRTAX_IFP     = 0x4\n\tsysRTAX_IFA     = 0x5\n\tsysRTAX_AUTHOR  = 0x6\n\tsysRTAX_BRD     = 0x7\n\tsysRTAX_MPLS1   = 0x8\n\tsysRTAX_MPLS2   = 0x9\n\tsysRTAX_MPLS3   = 0xa\n\tsysRTAX_MAX     = 0xb\n)\n\nconst (\n\tsizeofIfMsghdrDragonFlyBSD4         = 0xb0\n\tsizeofIfaMsghdrDragonFlyBSD4        = 0x14\n\tsizeofIfmaMsghdrDragonFlyBSD4       = 0x10\n\tsizeofIfAnnouncemsghdrDragonFlyBSD4 = 0x18\n\n\tsizeofRtMsghdrDragonFlyBSD4  = 0x98\n\tsizeofRtMetricsDragonFlyBSD4 = 0x70\n\n\tsizeofSockaddrStorage = 0x80\n\tsizeofSockaddrInet    = 0x10\n\tsizeofSockaddrInet6   = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/zsys_freebsd_386.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_freebsd.go\n\npackage route\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_ROUTE  = 0x11\n\tsysAF_LINK   = 0x12\n\tsysAF_INET6  = 0x1c\n\n\tsysSOCK_RAW = 0x3\n\n\tsysNET_RT_DUMP     = 0x1\n\tsysNET_RT_FLAGS    = 0x2\n\tsysNET_RT_IFLIST   = 0x3\n\tsysNET_RT_IFMALIST = 0x4\n\tsysNET_RT_IFLISTL  = 0x5\n)\n\nconst (\n\tsysCTL_MAXNAME = 0x18\n\n\tsysCTL_UNSPEC   = 0x0\n\tsysCTL_KERN     = 0x1\n\tsysCTL_VM       = 0x2\n\tsysCTL_VFS      = 0x3\n\tsysCTL_NET      = 0x4\n\tsysCTL_DEBUG    = 0x5\n\tsysCTL_HW       = 0x6\n\tsysCTL_MACHDEP  = 0x7\n\tsysCTL_USER     = 0x8\n\tsysCTL_P1003_1B = 0x9\n)\n\nconst (\n\tsysRTM_VERSION = 0x5\n\n\tsysRTM_ADD        = 0x1\n\tsysRTM_DELETE     = 0x2\n\tsysRTM_CHANGE     = 0x3\n\tsysRTM_GET        = 0x4\n\tsysRTM_LOSING     = 0x5\n\tsysRTM_REDIRECT   = 0x6\n\tsysRTM_MISS       = 0x7\n\tsysRTM_LOCK       = 0x8\n\tsysRTM_RESOLVE    = 0xb\n\tsysRTM_NEWADDR    = 0xc\n\tsysRTM_DELADDR    = 0xd\n\tsysRTM_IFINFO     = 0xe\n\tsysRTM_NEWMADDR   = 0xf\n\tsysRTM_DELMADDR   = 0x10\n\tsysRTM_IFANNOUNCE = 0x11\n\tsysRTM_IEEE80211  = 0x12\n\n\tsysRTA_DST     = 0x1\n\tsysRTA_GATEWAY = 0x2\n\tsysRTA_NETMASK = 0x4\n\tsysRTA_GENMASK = 0x8\n\tsysRTA_IFP     = 0x10\n\tsysRTA_IFA     = 0x20\n\tsysRTA_AUTHOR  = 0x40\n\tsysRTA_BRD     = 0x80\n\n\tsysRTAX_DST     = 0x0\n\tsysRTAX_GATEWAY = 0x1\n\tsysRTAX_NETMASK = 0x2\n\tsysRTAX_GENMASK = 0x3\n\tsysRTAX_IFP     = 0x4\n\tsysRTAX_IFA     = 0x5\n\tsysRTAX_AUTHOR  = 0x6\n\tsysRTAX_BRD     = 0x7\n\tsysRTAX_MAX     = 0x8\n)\n\nconst (\n\tsizeofIfMsghdrlFreeBSD10        = 0x68\n\tsizeofIfaMsghdrFreeBSD10        = 0x14\n\tsizeofIfaMsghdrlFreeBSD10       = 0x6c\n\tsizeofIfmaMsghdrFreeBSD10       = 0x10\n\tsizeofIfAnnouncemsghdrFreeBSD10 = 0x18\n\n\tsizeofRtMsghdrFreeBSD10  = 0x5c\n\tsizeofRtMetricsFreeBSD10 = 0x38\n\n\tsizeofIfMsghdrFreeBSD7  = 0x60\n\tsizeofIfMsghdrFreeBSD8  = 0x60\n\tsizeofIfMsghdrFreeBSD9  = 0x60\n\tsizeofIfMsghdrFreeBSD10 = 0x64\n\tsizeofIfMsghdrFreeBSD11 = 0xa8\n\n\tsizeofIfDataFreeBSD7  = 0x50\n\tsizeofIfDataFreeBSD8  = 0x50\n\tsizeofIfDataFreeBSD9  = 0x50\n\tsizeofIfDataFreeBSD10 = 0x54\n\tsizeofIfDataFreeBSD11 = 0x98\n\n\t// MODIFIED BY HAND FOR 386 EMULATION ON AMD64\n\t// 386 EMULATION USES THE UNDERLYING RAW DATA LAYOUT\n\n\tsizeofIfMsghdrlFreeBSD10Emu        = 0xb0\n\tsizeofIfaMsghdrFreeBSD10Emu        = 0x14\n\tsizeofIfaMsghdrlFreeBSD10Emu       = 0xb0\n\tsizeofIfmaMsghdrFreeBSD10Emu       = 0x10\n\tsizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18\n\n\tsizeofRtMsghdrFreeBSD10Emu  = 0x98\n\tsizeofRtMetricsFreeBSD10Emu = 0x70\n\n\tsizeofIfMsghdrFreeBSD7Emu  = 0xa8\n\tsizeofIfMsghdrFreeBSD8Emu  = 0xa8\n\tsizeofIfMsghdrFreeBSD9Emu  = 0xa8\n\tsizeofIfMsghdrFreeBSD10Emu = 0xa8\n\tsizeofIfMsghdrFreeBSD11Emu = 0xa8\n\n\tsizeofIfDataFreeBSD7Emu  = 0x98\n\tsizeofIfDataFreeBSD8Emu  = 0x98\n\tsizeofIfDataFreeBSD9Emu  = 0x98\n\tsizeofIfDataFreeBSD10Emu = 0x98\n\tsizeofIfDataFreeBSD11Emu = 0x98\n\n\tsizeofSockaddrStorage = 0x80\n\tsizeofSockaddrInet    = 0x10\n\tsizeofSockaddrInet6   = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/zsys_freebsd_amd64.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_freebsd.go\n\npackage route\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_ROUTE  = 0x11\n\tsysAF_LINK   = 0x12\n\tsysAF_INET6  = 0x1c\n\n\tsysSOCK_RAW = 0x3\n\n\tsysNET_RT_DUMP     = 0x1\n\tsysNET_RT_FLAGS    = 0x2\n\tsysNET_RT_IFLIST   = 0x3\n\tsysNET_RT_IFMALIST = 0x4\n\tsysNET_RT_IFLISTL  = 0x5\n)\n\nconst (\n\tsysCTL_MAXNAME = 0x18\n\n\tsysCTL_UNSPEC   = 0x0\n\tsysCTL_KERN     = 0x1\n\tsysCTL_VM       = 0x2\n\tsysCTL_VFS      = 0x3\n\tsysCTL_NET      = 0x4\n\tsysCTL_DEBUG    = 0x5\n\tsysCTL_HW       = 0x6\n\tsysCTL_MACHDEP  = 0x7\n\tsysCTL_USER     = 0x8\n\tsysCTL_P1003_1B = 0x9\n)\n\nconst (\n\tsysRTM_VERSION = 0x5\n\n\tsysRTM_ADD        = 0x1\n\tsysRTM_DELETE     = 0x2\n\tsysRTM_CHANGE     = 0x3\n\tsysRTM_GET        = 0x4\n\tsysRTM_LOSING     = 0x5\n\tsysRTM_REDIRECT   = 0x6\n\tsysRTM_MISS       = 0x7\n\tsysRTM_LOCK       = 0x8\n\tsysRTM_RESOLVE    = 0xb\n\tsysRTM_NEWADDR    = 0xc\n\tsysRTM_DELADDR    = 0xd\n\tsysRTM_IFINFO     = 0xe\n\tsysRTM_NEWMADDR   = 0xf\n\tsysRTM_DELMADDR   = 0x10\n\tsysRTM_IFANNOUNCE = 0x11\n\tsysRTM_IEEE80211  = 0x12\n\n\tsysRTA_DST     = 0x1\n\tsysRTA_GATEWAY = 0x2\n\tsysRTA_NETMASK = 0x4\n\tsysRTA_GENMASK = 0x8\n\tsysRTA_IFP     = 0x10\n\tsysRTA_IFA     = 0x20\n\tsysRTA_AUTHOR  = 0x40\n\tsysRTA_BRD     = 0x80\n\n\tsysRTAX_DST     = 0x0\n\tsysRTAX_GATEWAY = 0x1\n\tsysRTAX_NETMASK = 0x2\n\tsysRTAX_GENMASK = 0x3\n\tsysRTAX_IFP     = 0x4\n\tsysRTAX_IFA     = 0x5\n\tsysRTAX_AUTHOR  = 0x6\n\tsysRTAX_BRD     = 0x7\n\tsysRTAX_MAX     = 0x8\n)\n\nconst (\n\tsizeofIfMsghdrlFreeBSD10        = 0xb0\n\tsizeofIfaMsghdrFreeBSD10        = 0x14\n\tsizeofIfaMsghdrlFreeBSD10       = 0xb0\n\tsizeofIfmaMsghdrFreeBSD10       = 0x10\n\tsizeofIfAnnouncemsghdrFreeBSD10 = 0x18\n\n\tsizeofRtMsghdrFreeBSD10  = 0x98\n\tsizeofRtMetricsFreeBSD10 = 0x70\n\n\tsizeofIfMsghdrFreeBSD7  = 0xa8\n\tsizeofIfMsghdrFreeBSD8  = 0xa8\n\tsizeofIfMsghdrFreeBSD9  = 0xa8\n\tsizeofIfMsghdrFreeBSD10 = 0xa8\n\tsizeofIfMsghdrFreeBSD11 = 0xa8\n\n\tsizeofIfDataFreeBSD7  = 0x98\n\tsizeofIfDataFreeBSD8  = 0x98\n\tsizeofIfDataFreeBSD9  = 0x98\n\tsizeofIfDataFreeBSD10 = 0x98\n\tsizeofIfDataFreeBSD11 = 0x98\n\n\tsizeofIfMsghdrlFreeBSD10Emu        = 0xb0\n\tsizeofIfaMsghdrFreeBSD10Emu        = 0x14\n\tsizeofIfaMsghdrlFreeBSD10Emu       = 0xb0\n\tsizeofIfmaMsghdrFreeBSD10Emu       = 0x10\n\tsizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18\n\n\tsizeofRtMsghdrFreeBSD10Emu  = 0x98\n\tsizeofRtMetricsFreeBSD10Emu = 0x70\n\n\tsizeofIfMsghdrFreeBSD7Emu  = 0xa8\n\tsizeofIfMsghdrFreeBSD8Emu  = 0xa8\n\tsizeofIfMsghdrFreeBSD9Emu  = 0xa8\n\tsizeofIfMsghdrFreeBSD10Emu = 0xa8\n\tsizeofIfMsghdrFreeBSD11Emu = 0xa8\n\n\tsizeofIfDataFreeBSD7Emu  = 0x98\n\tsizeofIfDataFreeBSD8Emu  = 0x98\n\tsizeofIfDataFreeBSD9Emu  = 0x98\n\tsizeofIfDataFreeBSD10Emu = 0x98\n\tsizeofIfDataFreeBSD11Emu = 0x98\n\n\tsizeofSockaddrStorage = 0x80\n\tsizeofSockaddrInet    = 0x10\n\tsizeofSockaddrInet6   = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/zsys_freebsd_arm.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_freebsd.go\n\npackage route\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_ROUTE  = 0x11\n\tsysAF_LINK   = 0x12\n\tsysAF_INET6  = 0x1c\n\n\tsysSOCK_RAW = 0x3\n\n\tsysNET_RT_DUMP     = 0x1\n\tsysNET_RT_FLAGS    = 0x2\n\tsysNET_RT_IFLIST   = 0x3\n\tsysNET_RT_IFMALIST = 0x4\n\tsysNET_RT_IFLISTL  = 0x5\n)\n\nconst (\n\tsysCTL_MAXNAME = 0x18\n\n\tsysCTL_UNSPEC   = 0x0\n\tsysCTL_KERN     = 0x1\n\tsysCTL_VM       = 0x2\n\tsysCTL_VFS      = 0x3\n\tsysCTL_NET      = 0x4\n\tsysCTL_DEBUG    = 0x5\n\tsysCTL_HW       = 0x6\n\tsysCTL_MACHDEP  = 0x7\n\tsysCTL_USER     = 0x8\n\tsysCTL_P1003_1B = 0x9\n)\n\nconst (\n\tsysRTM_VERSION = 0x5\n\n\tsysRTM_ADD        = 0x1\n\tsysRTM_DELETE     = 0x2\n\tsysRTM_CHANGE     = 0x3\n\tsysRTM_GET        = 0x4\n\tsysRTM_LOSING     = 0x5\n\tsysRTM_REDIRECT   = 0x6\n\tsysRTM_MISS       = 0x7\n\tsysRTM_LOCK       = 0x8\n\tsysRTM_RESOLVE    = 0xb\n\tsysRTM_NEWADDR    = 0xc\n\tsysRTM_DELADDR    = 0xd\n\tsysRTM_IFINFO     = 0xe\n\tsysRTM_NEWMADDR   = 0xf\n\tsysRTM_DELMADDR   = 0x10\n\tsysRTM_IFANNOUNCE = 0x11\n\tsysRTM_IEEE80211  = 0x12\n\n\tsysRTA_DST     = 0x1\n\tsysRTA_GATEWAY = 0x2\n\tsysRTA_NETMASK = 0x4\n\tsysRTA_GENMASK = 0x8\n\tsysRTA_IFP     = 0x10\n\tsysRTA_IFA     = 0x20\n\tsysRTA_AUTHOR  = 0x40\n\tsysRTA_BRD     = 0x80\n\n\tsysRTAX_DST     = 0x0\n\tsysRTAX_GATEWAY = 0x1\n\tsysRTAX_NETMASK = 0x2\n\tsysRTAX_GENMASK = 0x3\n\tsysRTAX_IFP     = 0x4\n\tsysRTAX_IFA     = 0x5\n\tsysRTAX_AUTHOR  = 0x6\n\tsysRTAX_BRD     = 0x7\n\tsysRTAX_MAX     = 0x8\n)\n\nconst (\n\tsizeofIfMsghdrlFreeBSD10        = 0x68\n\tsizeofIfaMsghdrFreeBSD10        = 0x14\n\tsizeofIfaMsghdrlFreeBSD10       = 0x6c\n\tsizeofIfmaMsghdrFreeBSD10       = 0x10\n\tsizeofIfAnnouncemsghdrFreeBSD10 = 0x18\n\n\tsizeofRtMsghdrFreeBSD10  = 0x5c\n\tsizeofRtMetricsFreeBSD10 = 0x38\n\n\tsizeofIfMsghdrFreeBSD7  = 0x70\n\tsizeofIfMsghdrFreeBSD8  = 0x70\n\tsizeofIfMsghdrFreeBSD9  = 0x70\n\tsizeofIfMsghdrFreeBSD10 = 0x70\n\tsizeofIfMsghdrFreeBSD11 = 0xa8\n\n\tsizeofIfDataFreeBSD7  = 0x60\n\tsizeofIfDataFreeBSD8  = 0x60\n\tsizeofIfDataFreeBSD9  = 0x60\n\tsizeofIfDataFreeBSD10 = 0x60\n\tsizeofIfDataFreeBSD11 = 0x98\n\n\tsizeofIfMsghdrlFreeBSD10Emu        = 0x68\n\tsizeofIfaMsghdrFreeBSD10Emu        = 0x14\n\tsizeofIfaMsghdrlFreeBSD10Emu       = 0x6c\n\tsizeofIfmaMsghdrFreeBSD10Emu       = 0x10\n\tsizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18\n\n\tsizeofRtMsghdrFreeBSD10Emu  = 0x5c\n\tsizeofRtMetricsFreeBSD10Emu = 0x38\n\n\tsizeofIfMsghdrFreeBSD7Emu  = 0x70\n\tsizeofIfMsghdrFreeBSD8Emu  = 0x70\n\tsizeofIfMsghdrFreeBSD9Emu  = 0x70\n\tsizeofIfMsghdrFreeBSD10Emu = 0x70\n\tsizeofIfMsghdrFreeBSD11Emu = 0xa8\n\n\tsizeofIfDataFreeBSD7Emu  = 0x60\n\tsizeofIfDataFreeBSD8Emu  = 0x60\n\tsizeofIfDataFreeBSD9Emu  = 0x60\n\tsizeofIfDataFreeBSD10Emu = 0x60\n\tsizeofIfDataFreeBSD11Emu = 0x98\n\n\tsizeofSockaddrStorage = 0x80\n\tsizeofSockaddrInet    = 0x10\n\tsizeofSockaddrInet6   = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/zsys_netbsd.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_netbsd.go\n\npackage route\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_ROUTE  = 0x22\n\tsysAF_LINK   = 0x12\n\tsysAF_INET6  = 0x18\n\n\tsysSOCK_RAW = 0x3\n\n\tsysNET_RT_DUMP   = 0x1\n\tsysNET_RT_FLAGS  = 0x2\n\tsysNET_RT_IFLIST = 0x5\n\tsysNET_RT_MAXID  = 0x6\n)\n\nconst (\n\tsysCTL_MAXNAME = 0xc\n\n\tsysCTL_UNSPEC   = 0x0\n\tsysCTL_KERN     = 0x1\n\tsysCTL_VM       = 0x2\n\tsysCTL_VFS      = 0x3\n\tsysCTL_NET      = 0x4\n\tsysCTL_DEBUG    = 0x5\n\tsysCTL_HW       = 0x6\n\tsysCTL_MACHDEP  = 0x7\n\tsysCTL_USER     = 0x8\n\tsysCTL_DDB      = 0x9\n\tsysCTL_PROC     = 0xa\n\tsysCTL_VENDOR   = 0xb\n\tsysCTL_EMUL     = 0xc\n\tsysCTL_SECURITY = 0xd\n\tsysCTL_MAXID    = 0xe\n)\n\nconst (\n\tsysRTM_VERSION = 0x4\n\n\tsysRTM_ADD        = 0x1\n\tsysRTM_DELETE     = 0x2\n\tsysRTM_CHANGE     = 0x3\n\tsysRTM_GET        = 0x4\n\tsysRTM_LOSING     = 0x5\n\tsysRTM_REDIRECT   = 0x6\n\tsysRTM_MISS       = 0x7\n\tsysRTM_LOCK       = 0x8\n\tsysRTM_OLDADD     = 0x9\n\tsysRTM_OLDDEL     = 0xa\n\tsysRTM_RESOLVE    = 0xb\n\tsysRTM_NEWADDR    = 0xc\n\tsysRTM_DELADDR    = 0xd\n\tsysRTM_IFANNOUNCE = 0x10\n\tsysRTM_IEEE80211  = 0x11\n\tsysRTM_SETGATE    = 0x12\n\tsysRTM_LLINFO_UPD = 0x13\n\tsysRTM_IFINFO     = 0x14\n\tsysRTM_CHGADDR    = 0x15\n\n\tsysRTA_DST     = 0x1\n\tsysRTA_GATEWAY = 0x2\n\tsysRTA_NETMASK = 0x4\n\tsysRTA_GENMASK = 0x8\n\tsysRTA_IFP     = 0x10\n\tsysRTA_IFA     = 0x20\n\tsysRTA_AUTHOR  = 0x40\n\tsysRTA_BRD     = 0x80\n\tsysRTA_TAG     = 0x100\n\n\tsysRTAX_DST     = 0x0\n\tsysRTAX_GATEWAY = 0x1\n\tsysRTAX_NETMASK = 0x2\n\tsysRTAX_GENMASK = 0x3\n\tsysRTAX_IFP     = 0x4\n\tsysRTAX_IFA     = 0x5\n\tsysRTAX_AUTHOR  = 0x6\n\tsysRTAX_BRD     = 0x7\n\tsysRTAX_TAG     = 0x8\n\tsysRTAX_MAX     = 0x9\n)\n\nconst (\n\tsizeofIfMsghdrNetBSD7         = 0x98\n\tsizeofIfaMsghdrNetBSD7        = 0x18\n\tsizeofIfAnnouncemsghdrNetBSD7 = 0x18\n\n\tsizeofRtMsghdrNetBSD7  = 0x78\n\tsizeofRtMetricsNetBSD7 = 0x50\n\n\tsizeofSockaddrStorage = 0x80\n\tsizeofSockaddrInet    = 0x10\n\tsizeofSockaddrInet6   = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/route/zsys_openbsd.go",
    "content": "// Created by cgo -godefs - DO NOT EDIT\n// cgo -godefs defs_openbsd.go\n\npackage route\n\nconst (\n\tsysAF_UNSPEC = 0x0\n\tsysAF_INET   = 0x2\n\tsysAF_ROUTE  = 0x11\n\tsysAF_LINK   = 0x12\n\tsysAF_INET6  = 0x18\n\n\tsysSOCK_RAW = 0x3\n\n\tsysNET_RT_DUMP    = 0x1\n\tsysNET_RT_FLAGS   = 0x2\n\tsysNET_RT_IFLIST  = 0x3\n\tsysNET_RT_STATS   = 0x4\n\tsysNET_RT_TABLE   = 0x5\n\tsysNET_RT_IFNAMES = 0x6\n\tsysNET_RT_MAXID   = 0x7\n)\n\nconst (\n\tsysCTL_MAXNAME = 0xc\n\n\tsysCTL_UNSPEC  = 0x0\n\tsysCTL_KERN    = 0x1\n\tsysCTL_VM      = 0x2\n\tsysCTL_FS      = 0x3\n\tsysCTL_NET     = 0x4\n\tsysCTL_DEBUG   = 0x5\n\tsysCTL_HW      = 0x6\n\tsysCTL_MACHDEP = 0x7\n\tsysCTL_DDB     = 0x9\n\tsysCTL_VFS     = 0xa\n\tsysCTL_MAXID   = 0xb\n)\n\nconst (\n\tsysRTM_VERSION = 0x5\n\n\tsysRTM_ADD        = 0x1\n\tsysRTM_DELETE     = 0x2\n\tsysRTM_CHANGE     = 0x3\n\tsysRTM_GET        = 0x4\n\tsysRTM_LOSING     = 0x5\n\tsysRTM_REDIRECT   = 0x6\n\tsysRTM_MISS       = 0x7\n\tsysRTM_LOCK       = 0x8\n\tsysRTM_RESOLVE    = 0xb\n\tsysRTM_NEWADDR    = 0xc\n\tsysRTM_DELADDR    = 0xd\n\tsysRTM_IFINFO     = 0xe\n\tsysRTM_IFANNOUNCE = 0xf\n\tsysRTM_DESYNC     = 0x10\n\tsysRTM_INVALIDATE = 0x11\n\tsysRTM_BFD        = 0x12\n\tsysRTM_PROPOSAL   = 0x13\n\n\tsysRTA_DST     = 0x1\n\tsysRTA_GATEWAY = 0x2\n\tsysRTA_NETMASK = 0x4\n\tsysRTA_GENMASK = 0x8\n\tsysRTA_IFP     = 0x10\n\tsysRTA_IFA     = 0x20\n\tsysRTA_AUTHOR  = 0x40\n\tsysRTA_BRD     = 0x80\n\tsysRTA_SRC     = 0x100\n\tsysRTA_SRCMASK = 0x200\n\tsysRTA_LABEL   = 0x400\n\tsysRTA_BFD     = 0x800\n\tsysRTA_DNS     = 0x1000\n\tsysRTA_STATIC  = 0x2000\n\tsysRTA_SEARCH  = 0x4000\n\n\tsysRTAX_DST     = 0x0\n\tsysRTAX_GATEWAY = 0x1\n\tsysRTAX_NETMASK = 0x2\n\tsysRTAX_GENMASK = 0x3\n\tsysRTAX_IFP     = 0x4\n\tsysRTAX_IFA     = 0x5\n\tsysRTAX_AUTHOR  = 0x6\n\tsysRTAX_BRD     = 0x7\n\tsysRTAX_SRC     = 0x8\n\tsysRTAX_SRCMASK = 0x9\n\tsysRTAX_LABEL   = 0xa\n\tsysRTAX_BFD     = 0xb\n\tsysRTAX_DNS     = 0xc\n\tsysRTAX_STATIC  = 0xd\n\tsysRTAX_SEARCH  = 0xe\n\tsysRTAX_MAX     = 0xf\n)\n\nconst (\n\tsizeofRtMsghdr = 0x60\n\n\tsizeofSockaddrStorage = 0x100\n\tsizeofSockaddrInet    = 0x10\n\tsizeofSockaddrInet6   = 0x1c\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/trace/events.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage trace\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"html/template\"\n\t\"io\"\n\t\"log\"\n\t\"net/http\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"text/tabwriter\"\n\t\"time\"\n)\n\nconst maxEventsPerLog = 100\n\ntype bucket struct {\n\tMaxErrAge time.Duration\n\tString    string\n}\n\nvar buckets = []bucket{\n\t{0, \"total\"},\n\t{10 * time.Second, \"errs<10s\"},\n\t{1 * time.Minute, \"errs<1m\"},\n\t{10 * time.Minute, \"errs<10m\"},\n\t{1 * time.Hour, \"errs<1h\"},\n\t{10 * time.Hour, \"errs<10h\"},\n\t{24000 * time.Hour, \"errors\"},\n}\n\n// RenderEvents renders the HTML page typically served at /debug/events.\n// It does not do any auth checking. The request may be nil.\n//\n// Most users will use the Events handler.\nfunc RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {\n\tnow := time.Now()\n\tdata := &struct {\n\t\tFamilies []string // family names\n\t\tBuckets  []bucket\n\t\tCounts   [][]int // eventLog count per family/bucket\n\n\t\t// Set when a bucket has been selected.\n\t\tFamily    string\n\t\tBucket    int\n\t\tEventLogs eventLogs\n\t\tExpanded  bool\n\t}{\n\t\tBuckets: buckets,\n\t}\n\n\tdata.Families = make([]string, 0, len(families))\n\tfamMu.RLock()\n\tfor name := range families {\n\t\tdata.Families = append(data.Families, name)\n\t}\n\tfamMu.RUnlock()\n\tsort.Strings(data.Families)\n\n\t// Count the number of eventLogs in each family for each error age.\n\tdata.Counts = make([][]int, len(data.Families))\n\tfor i, name := range data.Families {\n\t\t// TODO(sameer): move this loop under the family lock.\n\t\tf := getEventFamily(name)\n\t\tdata.Counts[i] = make([]int, len(data.Buckets))\n\t\tfor j, b := range data.Buckets {\n\t\t\tdata.Counts[i][j] = f.Count(now, b.MaxErrAge)\n\t\t}\n\t}\n\n\tif req != nil {\n\t\tvar ok bool\n\t\tdata.Family, data.Bucket, ok = parseEventsArgs(req)\n\t\tif !ok {\n\t\t\t// No-op\n\t\t} else {\n\t\t\tdata.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)\n\t\t}\n\t\tif data.EventLogs != nil {\n\t\t\tdefer data.EventLogs.Free()\n\t\t\tsort.Sort(data.EventLogs)\n\t\t}\n\t\tif exp, err := strconv.ParseBool(req.FormValue(\"exp\")); err == nil {\n\t\t\tdata.Expanded = exp\n\t\t}\n\t}\n\n\tfamMu.RLock()\n\tdefer famMu.RUnlock()\n\tif err := eventsTmpl().Execute(w, data); err != nil {\n\t\tlog.Printf(\"net/trace: Failed executing template: %v\", err)\n\t}\n}\n\nfunc parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {\n\tfam, bStr := req.FormValue(\"fam\"), req.FormValue(\"b\")\n\tif fam == \"\" || bStr == \"\" {\n\t\treturn \"\", 0, false\n\t}\n\tb, err := strconv.Atoi(bStr)\n\tif err != nil || b < 0 || b >= len(buckets) {\n\t\treturn \"\", 0, false\n\t}\n\treturn fam, b, true\n}\n\n// An EventLog provides a log of events associated with a specific object.\ntype EventLog interface {\n\t// Printf formats its arguments with fmt.Sprintf and adds the\n\t// result to the event log.\n\tPrintf(format string, a ...interface{})\n\n\t// Errorf is like Printf, but it marks this event as an error.\n\tErrorf(format string, a ...interface{})\n\n\t// Finish declares that this event log is complete.\n\t// The event log should not be used after calling this method.\n\tFinish()\n}\n\n// NewEventLog returns a new EventLog with the specified family name\n// and title.\nfunc NewEventLog(family, title string) EventLog {\n\tel := newEventLog()\n\tel.ref()\n\tel.Family, el.Title = family, title\n\tel.Start = time.Now()\n\tel.events = make([]logEntry, 0, maxEventsPerLog)\n\tel.stack = make([]uintptr, 32)\n\tn := runtime.Callers(2, el.stack)\n\tel.stack = el.stack[:n]\n\n\tgetEventFamily(family).add(el)\n\treturn el\n}\n\nfunc (el *eventLog) Finish() {\n\tgetEventFamily(el.Family).remove(el)\n\tel.unref() // matches ref in New\n}\n\nvar (\n\tfamMu    sync.RWMutex\n\tfamilies = make(map[string]*eventFamily) // family name => family\n)\n\nfunc getEventFamily(fam string) *eventFamily {\n\tfamMu.Lock()\n\tdefer famMu.Unlock()\n\tf := families[fam]\n\tif f == nil {\n\t\tf = &eventFamily{}\n\t\tfamilies[fam] = f\n\t}\n\treturn f\n}\n\ntype eventFamily struct {\n\tmu        sync.RWMutex\n\teventLogs eventLogs\n}\n\nfunc (f *eventFamily) add(el *eventLog) {\n\tf.mu.Lock()\n\tf.eventLogs = append(f.eventLogs, el)\n\tf.mu.Unlock()\n}\n\nfunc (f *eventFamily) remove(el *eventLog) {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\tfor i, el0 := range f.eventLogs {\n\t\tif el == el0 {\n\t\t\tcopy(f.eventLogs[i:], f.eventLogs[i+1:])\n\t\t\tf.eventLogs = f.eventLogs[:len(f.eventLogs)-1]\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\tfor _, el := range f.eventLogs {\n\t\tif el.hasRecentError(now, maxErrAge) {\n\t\t\tn++\n\t\t}\n\t}\n\treturn\n}\n\nfunc (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {\n\tf.mu.RLock()\n\tdefer f.mu.RUnlock()\n\tels = make(eventLogs, 0, len(f.eventLogs))\n\tfor _, el := range f.eventLogs {\n\t\tif el.hasRecentError(now, maxErrAge) {\n\t\t\tel.ref()\n\t\t\tels = append(els, el)\n\t\t}\n\t}\n\treturn\n}\n\ntype eventLogs []*eventLog\n\n// Free calls unref on each element of the list.\nfunc (els eventLogs) Free() {\n\tfor _, el := range els {\n\t\tel.unref()\n\t}\n}\n\n// eventLogs may be sorted in reverse chronological order.\nfunc (els eventLogs) Len() int           { return len(els) }\nfunc (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }\nfunc (els eventLogs) Swap(i, j int)      { els[i], els[j] = els[j], els[i] }\n\n// A logEntry is a timestamped log entry in an event log.\ntype logEntry struct {\n\tWhen    time.Time\n\tElapsed time.Duration // since previous event in log\n\tNewDay  bool          // whether this event is on a different day to the previous event\n\tWhat    string\n\tIsErr   bool\n}\n\n// WhenString returns a string representation of the elapsed time of the event.\n// It will include the date if midnight was crossed.\nfunc (e logEntry) WhenString() string {\n\tif e.NewDay {\n\t\treturn e.When.Format(\"2006/01/02 15:04:05.000000\")\n\t}\n\treturn e.When.Format(\"15:04:05.000000\")\n}\n\n// An eventLog represents an active event log.\ntype eventLog struct {\n\t// Family is the top-level grouping of event logs to which this belongs.\n\tFamily string\n\n\t// Title is the title of this event log.\n\tTitle string\n\n\t// Timing information.\n\tStart time.Time\n\n\t// Call stack where this event log was created.\n\tstack []uintptr\n\n\t// Append-only sequence of events.\n\t//\n\t// TODO(sameer): change this to a ring buffer to avoid the array copy\n\t// when we hit maxEventsPerLog.\n\tmu            sync.RWMutex\n\tevents        []logEntry\n\tLastErrorTime time.Time\n\tdiscarded     int\n\n\trefs int32 // how many buckets this is in\n}\n\nfunc (el *eventLog) reset() {\n\t// Clear all but the mutex. Mutexes may not be copied, even when unlocked.\n\tel.Family = \"\"\n\tel.Title = \"\"\n\tel.Start = time.Time{}\n\tel.stack = nil\n\tel.events = nil\n\tel.LastErrorTime = time.Time{}\n\tel.discarded = 0\n\tel.refs = 0\n}\n\nfunc (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {\n\tif maxErrAge == 0 {\n\t\treturn true\n\t}\n\tel.mu.RLock()\n\tdefer el.mu.RUnlock()\n\treturn now.Sub(el.LastErrorTime) < maxErrAge\n}\n\n// delta returns the elapsed time since the last event or the log start,\n// and whether it spans midnight.\n// L >= el.mu\nfunc (el *eventLog) delta(t time.Time) (time.Duration, bool) {\n\tif len(el.events) == 0 {\n\t\treturn t.Sub(el.Start), false\n\t}\n\tprev := el.events[len(el.events)-1].When\n\treturn t.Sub(prev), prev.Day() != t.Day()\n\n}\n\nfunc (el *eventLog) Printf(format string, a ...interface{}) {\n\tel.printf(false, format, a...)\n}\n\nfunc (el *eventLog) Errorf(format string, a ...interface{}) {\n\tel.printf(true, format, a...)\n}\n\nfunc (el *eventLog) printf(isErr bool, format string, a ...interface{}) {\n\te := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}\n\tel.mu.Lock()\n\te.Elapsed, e.NewDay = el.delta(e.When)\n\tif len(el.events) < maxEventsPerLog {\n\t\tel.events = append(el.events, e)\n\t} else {\n\t\t// Discard the oldest event.\n\t\tif el.discarded == 0 {\n\t\t\t// el.discarded starts at two to count for the event it\n\t\t\t// is replacing, plus the next one that we are about to\n\t\t\t// drop.\n\t\t\tel.discarded = 2\n\t\t} else {\n\t\t\tel.discarded++\n\t\t}\n\t\t// TODO(sameer): if this causes allocations on a critical path,\n\t\t// change eventLog.What to be a fmt.Stringer, as in trace.go.\n\t\tel.events[0].What = fmt.Sprintf(\"(%d events discarded)\", el.discarded)\n\t\t// The timestamp of the discarded meta-event should be\n\t\t// the time of the last event it is representing.\n\t\tel.events[0].When = el.events[1].When\n\t\tcopy(el.events[1:], el.events[2:])\n\t\tel.events[maxEventsPerLog-1] = e\n\t}\n\tif e.IsErr {\n\t\tel.LastErrorTime = e.When\n\t}\n\tel.mu.Unlock()\n}\n\nfunc (el *eventLog) ref() {\n\tatomic.AddInt32(&el.refs, 1)\n}\n\nfunc (el *eventLog) unref() {\n\tif atomic.AddInt32(&el.refs, -1) == 0 {\n\t\tfreeEventLog(el)\n\t}\n}\n\nfunc (el *eventLog) When() string {\n\treturn el.Start.Format(\"2006/01/02 15:04:05.000000\")\n}\n\nfunc (el *eventLog) ElapsedTime() string {\n\telapsed := time.Since(el.Start)\n\treturn fmt.Sprintf(\"%.6f\", elapsed.Seconds())\n}\n\nfunc (el *eventLog) Stack() string {\n\tbuf := new(bytes.Buffer)\n\ttw := tabwriter.NewWriter(buf, 1, 8, 1, '\\t', 0)\n\tprintStackRecord(tw, el.stack)\n\ttw.Flush()\n\treturn buf.String()\n}\n\n// printStackRecord prints the function + source line information\n// for a single stack trace.\n// Adapted from runtime/pprof/pprof.go.\nfunc printStackRecord(w io.Writer, stk []uintptr) {\n\tfor _, pc := range stk {\n\t\tf := runtime.FuncForPC(pc)\n\t\tif f == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfile, line := f.FileLine(pc)\n\t\tname := f.Name()\n\t\t// Hide runtime.goexit and any runtime functions at the beginning.\n\t\tif strings.HasPrefix(name, \"runtime.\") {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(w, \"#   %s\\t%s:%d\\n\", name, file, line)\n\t}\n}\n\nfunc (el *eventLog) Events() []logEntry {\n\tel.mu.RLock()\n\tdefer el.mu.RUnlock()\n\treturn el.events\n}\n\n// freeEventLogs is a freelist of *eventLog\nvar freeEventLogs = make(chan *eventLog, 1000)\n\n// newEventLog returns a event log ready to use.\nfunc newEventLog() *eventLog {\n\tselect {\n\tcase el := <-freeEventLogs:\n\t\treturn el\n\tdefault:\n\t\treturn new(eventLog)\n\t}\n}\n\n// freeEventLog adds el to freeEventLogs if there's room.\n// This is non-blocking.\nfunc freeEventLog(el *eventLog) {\n\tel.reset()\n\tselect {\n\tcase freeEventLogs <- el:\n\tdefault:\n\t}\n}\n\nvar eventsTmplCache *template.Template\nvar eventsTmplOnce sync.Once\n\nfunc eventsTmpl() *template.Template {\n\teventsTmplOnce.Do(func() {\n\t\teventsTmplCache = template.Must(template.New(\"events\").Funcs(template.FuncMap{\n\t\t\t\"elapsed\":   elapsed,\n\t\t\t\"trimSpace\": strings.TrimSpace,\n\t\t}).Parse(eventsHTML))\n\t})\n\treturn eventsTmplCache\n}\n\nconst eventsHTML = `\n<html>\n\t<head>\n\t\t<title>events</title>\n\t</head>\n\t<style type=\"text/css\">\n\t\tbody {\n\t\t\tfont-family: sans-serif;\n\t\t}\n\t\ttable#req-status td.family {\n\t\t\tpadding-right: 2em;\n\t\t}\n\t\ttable#req-status td.active {\n\t\t\tpadding-right: 1em;\n\t\t}\n\t\ttable#req-status td.empty {\n\t\t\tcolor: #aaa;\n\t\t}\n\t\ttable#reqs {\n\t\t\tmargin-top: 1em;\n\t\t}\n\t\ttable#reqs tr.first {\n\t\t\t{{if $.Expanded}}font-weight: bold;{{end}}\n\t\t}\n\t\ttable#reqs td {\n\t\t\tfont-family: monospace;\n\t\t}\n\t\ttable#reqs td.when {\n\t\t\ttext-align: right;\n\t\t\twhite-space: nowrap;\n\t\t}\n\t\ttable#reqs td.elapsed {\n\t\t\tpadding: 0 0.5em;\n\t\t\ttext-align: right;\n\t\t\twhite-space: pre;\n\t\t\twidth: 10em;\n\t\t}\n\t\taddress {\n\t\t\tfont-size: smaller;\n\t\t\tmargin-top: 5em;\n\t\t}\n\t</style>\n\t<body>\n\n<h1>/debug/events</h1>\n\n<table id=\"req-status\">\n\t{{range $i, $fam := .Families}}\n\t<tr>\n\t\t<td class=\"family\">{{$fam}}</td>\n\n\t        {{range $j, $bucket := $.Buckets}}\n\t        {{$n := index $.Counts $i $j}}\n\t\t<td class=\"{{if not $bucket.MaxErrAge}}active{{end}}{{if not $n}}empty{{end}}\">\n\t                {{if $n}}<a href=\"?fam={{$fam}}&b={{$j}}{{if $.Expanded}}&exp=1{{end}}\">{{end}}\n\t\t        [{{$n}} {{$bucket.String}}]\n\t\t\t{{if $n}}</a>{{end}}\n\t\t</td>\n                {{end}}\n\n\t</tr>{{end}}\n</table>\n\n{{if $.EventLogs}}\n<hr />\n<h3>Family: {{$.Family}}</h3>\n\n{{if $.Expanded}}<a href=\"?fam={{$.Family}}&b={{$.Bucket}}\">{{end}}\n[Summary]{{if $.Expanded}}</a>{{end}}\n\n{{if not $.Expanded}}<a href=\"?fam={{$.Family}}&b={{$.Bucket}}&exp=1\">{{end}}\n[Expanded]{{if not $.Expanded}}</a>{{end}}\n\n<table id=\"reqs\">\n\t<tr><th>When</th><th>Elapsed</th></tr>\n\t{{range $el := $.EventLogs}}\n\t<tr class=\"first\">\n\t\t<td class=\"when\">{{$el.When}}</td>\n\t\t<td class=\"elapsed\">{{$el.ElapsedTime}}</td>\n\t\t<td>{{$el.Title}}\n\t</tr>\n\t{{if $.Expanded}}\n\t<tr>\n\t\t<td class=\"when\"></td>\n\t\t<td class=\"elapsed\"></td>\n\t\t<td><pre>{{$el.Stack|trimSpace}}</pre></td>\n\t</tr>\n\t{{range $el.Events}}\n\t<tr>\n\t\t<td class=\"when\">{{.WhenString}}</td>\n\t\t<td class=\"elapsed\">{{elapsed .Elapsed}}</td>\n\t\t<td>.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}</td>\n\t</tr>\n\t{{end}}\n\t{{end}}\n\t{{end}}\n</table>\n{{end}}\n\t</body>\n</html>\n`\n"
  },
  {
    "path": "vendor/golang.org/x/net/trace/histogram.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage trace\n\n// This file implements histogramming for RPC statistics collection.\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"html/template\"\n\t\"log\"\n\t\"math\"\n\t\"sync\"\n\n\t\"golang.org/x/net/internal/timeseries\"\n)\n\nconst (\n\tbucketCount = 38\n)\n\n// histogram keeps counts of values in buckets that are spaced\n// out in powers of 2: 0-1, 2-3, 4-7...\n// histogram implements timeseries.Observable\ntype histogram struct {\n\tsum          int64   // running total of measurements\n\tsumOfSquares float64 // square of running total\n\tbuckets      []int64 // bucketed values for histogram\n\tvalue        int     // holds a single value as an optimization\n\tvalueCount   int64   // number of values recorded for single value\n}\n\n// AddMeasurement records a value measurement observation to the histogram.\nfunc (h *histogram) addMeasurement(value int64) {\n\t// TODO: assert invariant\n\th.sum += value\n\th.sumOfSquares += float64(value) * float64(value)\n\n\tbucketIndex := getBucket(value)\n\n\tif h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {\n\t\th.value = bucketIndex\n\t\th.valueCount++\n\t} else {\n\t\th.allocateBuckets()\n\t\th.buckets[bucketIndex]++\n\t}\n}\n\nfunc (h *histogram) allocateBuckets() {\n\tif h.buckets == nil {\n\t\th.buckets = make([]int64, bucketCount)\n\t\th.buckets[h.value] = h.valueCount\n\t\th.value = 0\n\t\th.valueCount = -1\n\t}\n}\n\nfunc log2(i int64) int {\n\tn := 0\n\tfor ; i >= 0x100; i >>= 8 {\n\t\tn += 8\n\t}\n\tfor ; i > 0; i >>= 1 {\n\t\tn += 1\n\t}\n\treturn n\n}\n\nfunc getBucket(i int64) (index int) {\n\tindex = log2(i) - 1\n\tif index < 0 {\n\t\tindex = 0\n\t}\n\tif index >= bucketCount {\n\t\tindex = bucketCount - 1\n\t}\n\treturn\n}\n\n// Total returns the number of recorded observations.\nfunc (h *histogram) total() (total int64) {\n\tif h.valueCount >= 0 {\n\t\ttotal = h.valueCount\n\t}\n\tfor _, val := range h.buckets {\n\t\ttotal += int64(val)\n\t}\n\treturn\n}\n\n// Average returns the average value of recorded observations.\nfunc (h *histogram) average() float64 {\n\tt := h.total()\n\tif t == 0 {\n\t\treturn 0\n\t}\n\treturn float64(h.sum) / float64(t)\n}\n\n// Variance returns the variance of recorded observations.\nfunc (h *histogram) variance() float64 {\n\tt := float64(h.total())\n\tif t == 0 {\n\t\treturn 0\n\t}\n\ts := float64(h.sum) / t\n\treturn h.sumOfSquares/t - s*s\n}\n\n// StandardDeviation returns the standard deviation of recorded observations.\nfunc (h *histogram) standardDeviation() float64 {\n\treturn math.Sqrt(h.variance())\n}\n\n// PercentileBoundary estimates the value that the given fraction of recorded\n// observations are less than.\nfunc (h *histogram) percentileBoundary(percentile float64) int64 {\n\ttotal := h.total()\n\n\t// Corner cases (make sure result is strictly less than Total())\n\tif total == 0 {\n\t\treturn 0\n\t} else if total == 1 {\n\t\treturn int64(h.average())\n\t}\n\n\tpercentOfTotal := round(float64(total) * percentile)\n\tvar runningTotal int64\n\n\tfor i := range h.buckets {\n\t\tvalue := h.buckets[i]\n\t\trunningTotal += value\n\t\tif runningTotal == percentOfTotal {\n\t\t\t// We hit an exact bucket boundary. If the next bucket has data, it is a\n\t\t\t// good estimate of the value. If the bucket is empty, we interpolate the\n\t\t\t// midpoint between the next bucket's boundary and the next non-zero\n\t\t\t// bucket. If the remaining buckets are all empty, then we use the\n\t\t\t// boundary for the next bucket as the estimate.\n\t\t\tj := uint8(i + 1)\n\t\t\tmin := bucketBoundary(j)\n\t\t\tif runningTotal < total {\n\t\t\t\tfor h.buckets[j] == 0 {\n\t\t\t\t\tj++\n\t\t\t\t}\n\t\t\t}\n\t\t\tmax := bucketBoundary(j)\n\t\t\treturn min + round(float64(max-min)/2)\n\t\t} else if runningTotal > percentOfTotal {\n\t\t\t// The value is in this bucket. Interpolate the value.\n\t\t\tdelta := runningTotal - percentOfTotal\n\t\t\tpercentBucket := float64(value-delta) / float64(value)\n\t\t\tbucketMin := bucketBoundary(uint8(i))\n\t\t\tnextBucketMin := bucketBoundary(uint8(i + 1))\n\t\t\tbucketSize := nextBucketMin - bucketMin\n\t\t\treturn bucketMin + round(percentBucket*float64(bucketSize))\n\t\t}\n\t}\n\treturn bucketBoundary(bucketCount - 1)\n}\n\n// Median returns the estimated median of the observed values.\nfunc (h *histogram) median() int64 {\n\treturn h.percentileBoundary(0.5)\n}\n\n// Add adds other to h.\nfunc (h *histogram) Add(other timeseries.Observable) {\n\to := other.(*histogram)\n\tif o.valueCount == 0 {\n\t\t// Other histogram is empty\n\t} else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {\n\t\t// Both have a single bucketed value, aggregate them\n\t\th.valueCount += o.valueCount\n\t} else {\n\t\t// Two different values necessitate buckets in this histogram\n\t\th.allocateBuckets()\n\t\tif o.valueCount >= 0 {\n\t\t\th.buckets[o.value] += o.valueCount\n\t\t} else {\n\t\t\tfor i := range h.buckets {\n\t\t\t\th.buckets[i] += o.buckets[i]\n\t\t\t}\n\t\t}\n\t}\n\th.sumOfSquares += o.sumOfSquares\n\th.sum += o.sum\n}\n\n// Clear resets the histogram to an empty state, removing all observed values.\nfunc (h *histogram) Clear() {\n\th.buckets = nil\n\th.value = 0\n\th.valueCount = 0\n\th.sum = 0\n\th.sumOfSquares = 0\n}\n\n// CopyFrom copies from other, which must be a *histogram, into h.\nfunc (h *histogram) CopyFrom(other timeseries.Observable) {\n\to := other.(*histogram)\n\tif o.valueCount == -1 {\n\t\th.allocateBuckets()\n\t\tcopy(h.buckets, o.buckets)\n\t}\n\th.sum = o.sum\n\th.sumOfSquares = o.sumOfSquares\n\th.value = o.value\n\th.valueCount = o.valueCount\n}\n\n// Multiply scales the histogram by the specified ratio.\nfunc (h *histogram) Multiply(ratio float64) {\n\tif h.valueCount == -1 {\n\t\tfor i := range h.buckets {\n\t\t\th.buckets[i] = int64(float64(h.buckets[i]) * ratio)\n\t\t}\n\t} else {\n\t\th.valueCount = int64(float64(h.valueCount) * ratio)\n\t}\n\th.sum = int64(float64(h.sum) * ratio)\n\th.sumOfSquares = h.sumOfSquares * ratio\n}\n\n// New creates a new histogram.\nfunc (h *histogram) New() timeseries.Observable {\n\tr := new(histogram)\n\tr.Clear()\n\treturn r\n}\n\nfunc (h *histogram) String() string {\n\treturn fmt.Sprintf(\"%d, %f, %d, %d, %v\",\n\t\th.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)\n}\n\n// round returns the closest int64 to the argument\nfunc round(in float64) int64 {\n\treturn int64(math.Floor(in + 0.5))\n}\n\n// bucketBoundary returns the first value in the bucket.\nfunc bucketBoundary(bucket uint8) int64 {\n\tif bucket == 0 {\n\t\treturn 0\n\t}\n\treturn 1 << bucket\n}\n\n// bucketData holds data about a specific bucket for use in distTmpl.\ntype bucketData struct {\n\tLower, Upper       int64\n\tN                  int64\n\tPct, CumulativePct float64\n\tGraphWidth         int\n}\n\n// data holds data about a Distribution for use in distTmpl.\ntype data struct {\n\tBuckets                 []*bucketData\n\tCount, Median           int64\n\tMean, StandardDeviation float64\n}\n\n// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.\nconst maxHTMLBarWidth = 350.0\n\n// newData returns data representing h for use in distTmpl.\nfunc (h *histogram) newData() *data {\n\t// Force the allocation of buckets to simplify the rendering implementation\n\th.allocateBuckets()\n\t// We scale the bars on the right so that the largest bar is\n\t// maxHTMLBarWidth pixels in width.\n\tmaxBucket := int64(0)\n\tfor _, n := range h.buckets {\n\t\tif n > maxBucket {\n\t\t\tmaxBucket = n\n\t\t}\n\t}\n\ttotal := h.total()\n\tbarsizeMult := maxHTMLBarWidth / float64(maxBucket)\n\tvar pctMult float64\n\tif total == 0 {\n\t\tpctMult = 1.0\n\t} else {\n\t\tpctMult = 100.0 / float64(total)\n\t}\n\n\tbuckets := make([]*bucketData, len(h.buckets))\n\trunningTotal := int64(0)\n\tfor i, n := range h.buckets {\n\t\tif n == 0 {\n\t\t\tcontinue\n\t\t}\n\t\trunningTotal += n\n\t\tvar upperBound int64\n\t\tif i < bucketCount-1 {\n\t\t\tupperBound = bucketBoundary(uint8(i + 1))\n\t\t} else {\n\t\t\tupperBound = math.MaxInt64\n\t\t}\n\t\tbuckets[i] = &bucketData{\n\t\t\tLower:         bucketBoundary(uint8(i)),\n\t\t\tUpper:         upperBound,\n\t\t\tN:             n,\n\t\t\tPct:           float64(n) * pctMult,\n\t\t\tCumulativePct: float64(runningTotal) * pctMult,\n\t\t\tGraphWidth:    int(float64(n) * barsizeMult),\n\t\t}\n\t}\n\treturn &data{\n\t\tBuckets:           buckets,\n\t\tCount:             total,\n\t\tMedian:            h.median(),\n\t\tMean:              h.average(),\n\t\tStandardDeviation: h.standardDeviation(),\n\t}\n}\n\nfunc (h *histogram) html() template.HTML {\n\tbuf := new(bytes.Buffer)\n\tif err := distTmpl().Execute(buf, h.newData()); err != nil {\n\t\tbuf.Reset()\n\t\tlog.Printf(\"net/trace: couldn't execute template: %v\", err)\n\t}\n\treturn template.HTML(buf.String())\n}\n\nvar distTmplCache *template.Template\nvar distTmplOnce sync.Once\n\nfunc distTmpl() *template.Template {\n\tdistTmplOnce.Do(func() {\n\t\t// Input: data\n\t\tdistTmplCache = template.Must(template.New(\"distTmpl\").Parse(`\n<table>\n<tr>\n    <td style=\"padding:0.25em\">Count: {{.Count}}</td>\n    <td style=\"padding:0.25em\">Mean: {{printf \"%.0f\" .Mean}}</td>\n    <td style=\"padding:0.25em\">StdDev: {{printf \"%.0f\" .StandardDeviation}}</td>\n    <td style=\"padding:0.25em\">Median: {{.Median}}</td>\n</tr>\n</table>\n<hr>\n<table>\n{{range $b := .Buckets}}\n{{if $b}}\n  <tr>\n    <td style=\"padding:0 0 0 0.25em\">[</td>\n    <td style=\"text-align:right;padding:0 0.25em\">{{.Lower}},</td>\n    <td style=\"text-align:right;padding:0 0.25em\">{{.Upper}})</td>\n    <td style=\"text-align:right;padding:0 0.25em\">{{.N}}</td>\n    <td style=\"text-align:right;padding:0 0.25em\">{{printf \"%#.3f\" .Pct}}%</td>\n    <td style=\"text-align:right;padding:0 0.25em\">{{printf \"%#.3f\" .CumulativePct}}%</td>\n    <td><div style=\"background-color: blue; height: 1em; width: {{.GraphWidth}};\"></div></td>\n  </tr>\n{{end}}\n{{end}}\n</table>\n`))\n\t})\n\treturn distTmplCache\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/trace/histogram_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage trace\n\nimport (\n\t\"math\"\n\t\"testing\"\n)\n\ntype sumTest struct {\n\tvalue        int64\n\tsum          int64\n\tsumOfSquares float64\n\ttotal        int64\n}\n\nvar sumTests = []sumTest{\n\t{100, 100, 10000, 1},\n\t{50, 150, 12500, 2},\n\t{50, 200, 15000, 3},\n\t{50, 250, 17500, 4},\n}\n\ntype bucketingTest struct {\n\tin     int64\n\tlog    int\n\tbucket int\n}\n\nvar bucketingTests = []bucketingTest{\n\t{0, 0, 0},\n\t{1, 1, 0},\n\t{2, 2, 1},\n\t{3, 2, 1},\n\t{4, 3, 2},\n\t{1000, 10, 9},\n\t{1023, 10, 9},\n\t{1024, 11, 10},\n\t{1000000, 20, 19},\n}\n\ntype multiplyTest struct {\n\tin                   int64\n\tratio                float64\n\texpectedSum          int64\n\texpectedTotal        int64\n\texpectedSumOfSquares float64\n}\n\nvar multiplyTests = []multiplyTest{\n\t{15, 2.5, 37, 2, 562.5},\n\t{128, 4.6, 758, 13, 77953.9},\n}\n\ntype percentileTest struct {\n\tfraction float64\n\texpected int64\n}\n\nvar percentileTests = []percentileTest{\n\t{0.25, 48},\n\t{0.5, 96},\n\t{0.6, 109},\n\t{0.75, 128},\n\t{0.90, 205},\n\t{0.95, 230},\n\t{0.99, 256},\n}\n\nfunc TestSum(t *testing.T) {\n\tvar h histogram\n\n\tfor _, test := range sumTests {\n\t\th.addMeasurement(test.value)\n\t\tsum := h.sum\n\t\tif sum != test.sum {\n\t\t\tt.Errorf(\"h.Sum = %v WANT: %v\", sum, test.sum)\n\t\t}\n\n\t\tsumOfSquares := h.sumOfSquares\n\t\tif sumOfSquares != test.sumOfSquares {\n\t\t\tt.Errorf(\"h.SumOfSquares = %v WANT: %v\", sumOfSquares, test.sumOfSquares)\n\t\t}\n\n\t\ttotal := h.total()\n\t\tif total != test.total {\n\t\t\tt.Errorf(\"h.Total = %v WANT: %v\", total, test.total)\n\t\t}\n\t}\n}\n\nfunc TestMultiply(t *testing.T) {\n\tvar h histogram\n\tfor i, test := range multiplyTests {\n\t\th.addMeasurement(test.in)\n\t\th.Multiply(test.ratio)\n\t\tif h.sum != test.expectedSum {\n\t\t\tt.Errorf(\"#%v: h.sum = %v WANT: %v\", i, h.sum, test.expectedSum)\n\t\t}\n\t\tif h.total() != test.expectedTotal {\n\t\t\tt.Errorf(\"#%v: h.total = %v WANT: %v\", i, h.total(), test.expectedTotal)\n\t\t}\n\t\tif h.sumOfSquares != test.expectedSumOfSquares {\n\t\t\tt.Errorf(\"#%v: h.SumOfSquares = %v WANT: %v\", i, test.expectedSumOfSquares, h.sumOfSquares)\n\t\t}\n\t}\n}\n\nfunc TestBucketingFunctions(t *testing.T) {\n\tfor _, test := range bucketingTests {\n\t\tlog := log2(test.in)\n\t\tif log != test.log {\n\t\t\tt.Errorf(\"log2 = %v WANT: %v\", log, test.log)\n\t\t}\n\n\t\tbucket := getBucket(test.in)\n\t\tif bucket != test.bucket {\n\t\t\tt.Errorf(\"getBucket = %v WANT: %v\", bucket, test.bucket)\n\t\t}\n\t}\n}\n\nfunc TestAverage(t *testing.T) {\n\ta := new(histogram)\n\taverage := a.average()\n\tif average != 0 {\n\t\tt.Errorf(\"Average of empty histogram was %v WANT: 0\", average)\n\t}\n\n\ta.addMeasurement(1)\n\ta.addMeasurement(1)\n\ta.addMeasurement(3)\n\tconst expected = float64(5) / float64(3)\n\taverage = a.average()\n\n\tif !isApproximate(average, expected) {\n\t\tt.Errorf(\"Average = %g WANT: %v\", average, expected)\n\t}\n}\n\nfunc TestStandardDeviation(t *testing.T) {\n\ta := new(histogram)\n\tadd(a, 10, 1<<4)\n\tadd(a, 10, 1<<5)\n\tadd(a, 10, 1<<6)\n\tstdDev := a.standardDeviation()\n\tconst expected = 19.95\n\n\tif !isApproximate(stdDev, expected) {\n\t\tt.Errorf(\"StandardDeviation = %v WANT: %v\", stdDev, expected)\n\t}\n\n\t// No values\n\ta = new(histogram)\n\tstdDev = a.standardDeviation()\n\n\tif !isApproximate(stdDev, 0) {\n\t\tt.Errorf(\"StandardDeviation = %v WANT: 0\", stdDev)\n\t}\n\n\tadd(a, 1, 1<<4)\n\tif !isApproximate(stdDev, 0) {\n\t\tt.Errorf(\"StandardDeviation = %v WANT: 0\", stdDev)\n\t}\n\n\tadd(a, 10, 1<<4)\n\tif !isApproximate(stdDev, 0) {\n\t\tt.Errorf(\"StandardDeviation = %v WANT: 0\", stdDev)\n\t}\n}\n\nfunc TestPercentileBoundary(t *testing.T) {\n\ta := new(histogram)\n\tadd(a, 5, 1<<4)\n\tadd(a, 10, 1<<6)\n\tadd(a, 5, 1<<7)\n\n\tfor _, test := range percentileTests {\n\t\tpercentile := a.percentileBoundary(test.fraction)\n\t\tif percentile != test.expected {\n\t\t\tt.Errorf(\"h.PercentileBoundary (fraction=%v) = %v WANT: %v\", test.fraction, percentile, test.expected)\n\t\t}\n\t}\n}\n\nfunc TestCopyFrom(t *testing.T) {\n\ta := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,\n\t\t19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1}\n\tb := histogram{6, 36, []int64{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n\t\t20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}, 5, -1}\n\n\ta.CopyFrom(&b)\n\n\tif a.String() != b.String() {\n\t\tt.Errorf(\"a.String = %s WANT: %s\", a.String(), b.String())\n\t}\n}\n\nfunc TestClear(t *testing.T) {\n\ta := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,\n\t\t19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1}\n\n\ta.Clear()\n\n\texpected := \"0, 0.000000, 0, 0, []\"\n\tif a.String() != expected {\n\t\tt.Errorf(\"a.String = %s WANT %s\", a.String(), expected)\n\t}\n}\n\nfunc TestNew(t *testing.T) {\n\ta := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,\n\t\t19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1}\n\tb := a.New()\n\n\texpected := \"0, 0.000000, 0, 0, []\"\n\tif b.(*histogram).String() != expected {\n\t\tt.Errorf(\"b.(*histogram).String = %s WANT: %s\", b.(*histogram).String(), expected)\n\t}\n}\n\nfunc TestAdd(t *testing.T) {\n\t// The tests here depend on the associativity of addMeasurement and Add.\n\t// Add empty observation\n\ta := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,\n\t\t19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1}\n\tb := a.New()\n\n\texpected := a.String()\n\ta.Add(b)\n\tif a.String() != expected {\n\t\tt.Errorf(\"a.String = %s WANT: %s\", a.String(), expected)\n\t}\n\n\t// Add same bucketed value, no new buckets\n\tc := new(histogram)\n\td := new(histogram)\n\te := new(histogram)\n\tc.addMeasurement(12)\n\td.addMeasurement(11)\n\te.addMeasurement(12)\n\te.addMeasurement(11)\n\tc.Add(d)\n\tif c.String() != e.String() {\n\t\tt.Errorf(\"c.String = %s WANT: %s\", c.String(), e.String())\n\t}\n\n\t// Add bucketed values\n\tf := new(histogram)\n\tg := new(histogram)\n\th := new(histogram)\n\tf.addMeasurement(4)\n\tf.addMeasurement(12)\n\tf.addMeasurement(100)\n\tg.addMeasurement(18)\n\tg.addMeasurement(36)\n\tg.addMeasurement(255)\n\th.addMeasurement(4)\n\th.addMeasurement(12)\n\th.addMeasurement(100)\n\th.addMeasurement(18)\n\th.addMeasurement(36)\n\th.addMeasurement(255)\n\tf.Add(g)\n\tif f.String() != h.String() {\n\t\tt.Errorf(\"f.String = %q WANT: %q\", f.String(), h.String())\n\t}\n\n\t// add buckets to no buckets\n\ti := new(histogram)\n\tj := new(histogram)\n\tk := new(histogram)\n\tj.addMeasurement(18)\n\tj.addMeasurement(36)\n\tj.addMeasurement(255)\n\tk.addMeasurement(18)\n\tk.addMeasurement(36)\n\tk.addMeasurement(255)\n\ti.Add(j)\n\tif i.String() != k.String() {\n\t\tt.Errorf(\"i.String = %q WANT: %q\", i.String(), k.String())\n\t}\n\n\t// add buckets to single value (no overlap)\n\tl := new(histogram)\n\tm := new(histogram)\n\tn := new(histogram)\n\tl.addMeasurement(0)\n\tm.addMeasurement(18)\n\tm.addMeasurement(36)\n\tm.addMeasurement(255)\n\tn.addMeasurement(0)\n\tn.addMeasurement(18)\n\tn.addMeasurement(36)\n\tn.addMeasurement(255)\n\tl.Add(m)\n\tif l.String() != n.String() {\n\t\tt.Errorf(\"l.String = %q WANT: %q\", l.String(), n.String())\n\t}\n\n\t// mixed order\n\to := new(histogram)\n\tp := new(histogram)\n\to.addMeasurement(0)\n\to.addMeasurement(2)\n\to.addMeasurement(0)\n\tp.addMeasurement(0)\n\tp.addMeasurement(0)\n\tp.addMeasurement(2)\n\tif o.String() != p.String() {\n\t\tt.Errorf(\"o.String = %q WANT: %q\", o.String(), p.String())\n\t}\n}\n\nfunc add(h *histogram, times int, val int64) {\n\tfor i := 0; i < times; i++ {\n\t\th.addMeasurement(val)\n\t}\n}\n\nfunc isApproximate(x, y float64) bool {\n\treturn math.Abs(x-y) < 1e-2\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/trace/trace.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n/*\nPackage trace implements tracing of requests and long-lived objects.\nIt exports HTTP interfaces on /debug/requests and /debug/events.\n\nA trace.Trace provides tracing for short-lived objects, usually requests.\nA request handler might be implemented like this:\n\n\tfunc fooHandler(w http.ResponseWriter, req *http.Request) {\n\t\ttr := trace.New(\"mypkg.Foo\", req.URL.Path)\n\t\tdefer tr.Finish()\n\t\t...\n\t\ttr.LazyPrintf(\"some event %q happened\", str)\n\t\t...\n\t\tif err := somethingImportant(); err != nil {\n\t\t\ttr.LazyPrintf(\"somethingImportant failed: %v\", err)\n\t\t\ttr.SetError()\n\t\t}\n\t}\n\nThe /debug/requests HTTP endpoint organizes the traces by family,\nerrors, and duration.  It also provides histogram of request duration\nfor each family.\n\nA trace.EventLog provides tracing for long-lived objects, such as RPC\nconnections.\n\n\t// A Fetcher fetches URL paths for a single domain.\n\ttype Fetcher struct {\n\t\tdomain string\n\t\tevents trace.EventLog\n\t}\n\n\tfunc NewFetcher(domain string) *Fetcher {\n\t\treturn &Fetcher{\n\t\t\tdomain,\n\t\t\ttrace.NewEventLog(\"mypkg.Fetcher\", domain),\n\t\t}\n\t}\n\n\tfunc (f *Fetcher) Fetch(path string) (string, error) {\n\t\tresp, err := http.Get(\"http://\" + f.domain + \"/\" + path)\n\t\tif err != nil {\n\t\t\tf.events.Errorf(\"Get(%q) = %v\", path, err)\n\t\t\treturn \"\", err\n\t\t}\n\t\tf.events.Printf(\"Get(%q) = %s\", path, resp.Status)\n\t\t...\n\t}\n\n\tfunc (f *Fetcher) Close() error {\n\t\tf.events.Finish()\n\t\treturn nil\n\t}\n\nThe /debug/events HTTP endpoint organizes the event logs by family and\nby time since the last error.  The expanded view displays recent log\nentries and the log's call stack.\n*/\npackage trace // import \"golang.org/x/net/trace\"\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"html/template\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"golang.org/x/net/internal/timeseries\"\n)\n\n// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing.\n// FOR DEBUGGING ONLY. This will slow down the program.\nvar DebugUseAfterFinish = false\n\n// AuthRequest determines whether a specific request is permitted to load the\n// /debug/requests or /debug/events pages.\n//\n// It returns two bools; the first indicates whether the page may be viewed at all,\n// and the second indicates whether sensitive events will be shown.\n//\n// AuthRequest may be replaced by a program to customize its authorization requirements.\n//\n// The default AuthRequest function returns (true, true) if and only if the request\n// comes from localhost/127.0.0.1/[::1].\nvar AuthRequest = func(req *http.Request) (any, sensitive bool) {\n\t// RemoteAddr is commonly in the form \"IP\" or \"IP:port\".\n\t// If it is in the form \"IP:port\", split off the port.\n\thost, _, err := net.SplitHostPort(req.RemoteAddr)\n\tif err != nil {\n\t\thost = req.RemoteAddr\n\t}\n\tswitch host {\n\tcase \"localhost\", \"127.0.0.1\", \"::1\":\n\t\treturn true, true\n\tdefault:\n\t\treturn false, false\n\t}\n}\n\nfunc init() {\n\t// TODO(jbd): Serve Traces from /debug/traces in the future?\n\t// There is no requirement for a request to be present to have traces.\n\thttp.HandleFunc(\"/debug/requests\", Traces)\n\thttp.HandleFunc(\"/debug/events\", Events)\n}\n\n// Traces responds with traces from the program.\n// The package initialization registers it in http.DefaultServeMux\n// at /debug/requests.\n//\n// It performs authorization by running AuthRequest.\nfunc Traces(w http.ResponseWriter, req *http.Request) {\n\tany, sensitive := AuthRequest(req)\n\tif !any {\n\t\thttp.Error(w, \"not allowed\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\tRender(w, req, sensitive)\n}\n\n// Events responds with a page of events collected by EventLogs.\n// The package initialization registers it in http.DefaultServeMux\n// at /debug/events.\n//\n// It performs authorization by running AuthRequest.\nfunc Events(w http.ResponseWriter, req *http.Request) {\n\tany, sensitive := AuthRequest(req)\n\tif !any {\n\t\thttp.Error(w, \"not allowed\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\tRenderEvents(w, req, sensitive)\n}\n\n// Render renders the HTML page typically served at /debug/requests.\n// It does not do any auth checking. The request may be nil.\n//\n// Most users will use the Traces handler.\nfunc Render(w io.Writer, req *http.Request, sensitive bool) {\n\tdata := &struct {\n\t\tFamilies         []string\n\t\tActiveTraceCount map[string]int\n\t\tCompletedTraces  map[string]*family\n\n\t\t// Set when a bucket has been selected.\n\t\tTraces        traceList\n\t\tFamily        string\n\t\tBucket        int\n\t\tExpanded      bool\n\t\tTraced        bool\n\t\tActive        bool\n\t\tShowSensitive bool // whether to show sensitive events\n\n\t\tHistogram       template.HTML\n\t\tHistogramWindow string // e.g. \"last minute\", \"last hour\", \"all time\"\n\n\t\t// If non-zero, the set of traces is a partial set,\n\t\t// and this is the total number.\n\t\tTotal int\n\t}{\n\t\tCompletedTraces: completedTraces,\n\t}\n\n\tdata.ShowSensitive = sensitive\n\tif req != nil {\n\t\t// Allow show_sensitive=0 to force hiding of sensitive data for testing.\n\t\t// This only goes one way; you can't use show_sensitive=1 to see things.\n\t\tif req.FormValue(\"show_sensitive\") == \"0\" {\n\t\t\tdata.ShowSensitive = false\n\t\t}\n\n\t\tif exp, err := strconv.ParseBool(req.FormValue(\"exp\")); err == nil {\n\t\t\tdata.Expanded = exp\n\t\t}\n\t\tif exp, err := strconv.ParseBool(req.FormValue(\"rtraced\")); err == nil {\n\t\t\tdata.Traced = exp\n\t\t}\n\t}\n\n\tcompletedMu.RLock()\n\tdata.Families = make([]string, 0, len(completedTraces))\n\tfor fam := range completedTraces {\n\t\tdata.Families = append(data.Families, fam)\n\t}\n\tcompletedMu.RUnlock()\n\tsort.Strings(data.Families)\n\n\t// We are careful here to minimize the time spent locking activeMu,\n\t// since that lock is required every time an RPC starts and finishes.\n\tdata.ActiveTraceCount = make(map[string]int, len(data.Families))\n\tactiveMu.RLock()\n\tfor fam, s := range activeTraces {\n\t\tdata.ActiveTraceCount[fam] = s.Len()\n\t}\n\tactiveMu.RUnlock()\n\n\tvar ok bool\n\tdata.Family, data.Bucket, ok = parseArgs(req)\n\tswitch {\n\tcase !ok:\n\t\t// No-op\n\tcase data.Bucket == -1:\n\t\tdata.Active = true\n\t\tn := data.ActiveTraceCount[data.Family]\n\t\tdata.Traces = getActiveTraces(data.Family)\n\t\tif len(data.Traces) < n {\n\t\t\tdata.Total = n\n\t\t}\n\tcase data.Bucket < bucketsPerFamily:\n\t\tif b := lookupBucket(data.Family, data.Bucket); b != nil {\n\t\t\tdata.Traces = b.Copy(data.Traced)\n\t\t}\n\tdefault:\n\t\tif f := getFamily(data.Family, false); f != nil {\n\t\t\tvar obs timeseries.Observable\n\t\t\tf.LatencyMu.RLock()\n\t\t\tswitch o := data.Bucket - bucketsPerFamily; o {\n\t\t\tcase 0:\n\t\t\t\tobs = f.Latency.Minute()\n\t\t\t\tdata.HistogramWindow = \"last minute\"\n\t\t\tcase 1:\n\t\t\t\tobs = f.Latency.Hour()\n\t\t\t\tdata.HistogramWindow = \"last hour\"\n\t\t\tcase 2:\n\t\t\t\tobs = f.Latency.Total()\n\t\t\t\tdata.HistogramWindow = \"all time\"\n\t\t\t}\n\t\t\tf.LatencyMu.RUnlock()\n\t\t\tif obs != nil {\n\t\t\t\tdata.Histogram = obs.(*histogram).html()\n\t\t\t}\n\t\t}\n\t}\n\n\tif data.Traces != nil {\n\t\tdefer data.Traces.Free()\n\t\tsort.Sort(data.Traces)\n\t}\n\n\tcompletedMu.RLock()\n\tdefer completedMu.RUnlock()\n\tif err := pageTmpl().ExecuteTemplate(w, \"Page\", data); err != nil {\n\t\tlog.Printf(\"net/trace: Failed executing template: %v\", err)\n\t}\n}\n\nfunc parseArgs(req *http.Request) (fam string, b int, ok bool) {\n\tif req == nil {\n\t\treturn \"\", 0, false\n\t}\n\tfam, bStr := req.FormValue(\"fam\"), req.FormValue(\"b\")\n\tif fam == \"\" || bStr == \"\" {\n\t\treturn \"\", 0, false\n\t}\n\tb, err := strconv.Atoi(bStr)\n\tif err != nil || b < -1 {\n\t\treturn \"\", 0, false\n\t}\n\n\treturn fam, b, true\n}\n\nfunc lookupBucket(fam string, b int) *traceBucket {\n\tf := getFamily(fam, false)\n\tif f == nil || b < 0 || b >= len(f.Buckets) {\n\t\treturn nil\n\t}\n\treturn f.Buckets[b]\n}\n\ntype contextKeyT string\n\nvar contextKey = contextKeyT(\"golang.org/x/net/trace.Trace\")\n\n// Trace represents an active request.\ntype Trace interface {\n\t// LazyLog adds x to the event log. It will be evaluated each time the\n\t// /debug/requests page is rendered. Any memory referenced by x will be\n\t// pinned until the trace is finished and later discarded.\n\tLazyLog(x fmt.Stringer, sensitive bool)\n\n\t// LazyPrintf evaluates its arguments with fmt.Sprintf each time the\n\t// /debug/requests page is rendered. Any memory referenced by a will be\n\t// pinned until the trace is finished and later discarded.\n\tLazyPrintf(format string, a ...interface{})\n\n\t// SetError declares that this trace resulted in an error.\n\tSetError()\n\n\t// SetRecycler sets a recycler for the trace.\n\t// f will be called for each event passed to LazyLog at a time when\n\t// it is no longer required, whether while the trace is still active\n\t// and the event is discarded, or when a completed trace is discarded.\n\tSetRecycler(f func(interface{}))\n\n\t// SetTraceInfo sets the trace info for the trace.\n\t// This is currently unused.\n\tSetTraceInfo(traceID, spanID uint64)\n\n\t// SetMaxEvents sets the maximum number of events that will be stored\n\t// in the trace. This has no effect if any events have already been\n\t// added to the trace.\n\tSetMaxEvents(m int)\n\n\t// Finish declares that this trace is complete.\n\t// The trace should not be used after calling this method.\n\tFinish()\n}\n\ntype lazySprintf struct {\n\tformat string\n\ta      []interface{}\n}\n\nfunc (l *lazySprintf) String() string {\n\treturn fmt.Sprintf(l.format, l.a...)\n}\n\n// New returns a new Trace with the specified family and title.\nfunc New(family, title string) Trace {\n\ttr := newTrace()\n\ttr.ref()\n\ttr.Family, tr.Title = family, title\n\ttr.Start = time.Now()\n\ttr.maxEvents = maxEventsPerTrace\n\ttr.events = tr.eventsBuf[:0]\n\n\tactiveMu.RLock()\n\ts := activeTraces[tr.Family]\n\tactiveMu.RUnlock()\n\tif s == nil {\n\t\tactiveMu.Lock()\n\t\ts = activeTraces[tr.Family] // check again\n\t\tif s == nil {\n\t\t\ts = new(traceSet)\n\t\t\tactiveTraces[tr.Family] = s\n\t\t}\n\t\tactiveMu.Unlock()\n\t}\n\ts.Add(tr)\n\n\t// Trigger allocation of the completed trace structure for this family.\n\t// This will cause the family to be present in the request page during\n\t// the first trace of this family. We don't care about the return value,\n\t// nor is there any need for this to run inline, so we execute it in its\n\t// own goroutine, but only if the family isn't allocated yet.\n\tcompletedMu.RLock()\n\tif _, ok := completedTraces[tr.Family]; !ok {\n\t\tgo allocFamily(tr.Family)\n\t}\n\tcompletedMu.RUnlock()\n\n\treturn tr\n}\n\nfunc (tr *trace) Finish() {\n\ttr.Elapsed = time.Now().Sub(tr.Start)\n\tif DebugUseAfterFinish {\n\t\tbuf := make([]byte, 4<<10) // 4 KB should be enough\n\t\tn := runtime.Stack(buf, false)\n\t\ttr.finishStack = buf[:n]\n\t}\n\n\tactiveMu.RLock()\n\tm := activeTraces[tr.Family]\n\tactiveMu.RUnlock()\n\tm.Remove(tr)\n\n\tf := getFamily(tr.Family, true)\n\tfor _, b := range f.Buckets {\n\t\tif b.Cond.match(tr) {\n\t\t\tb.Add(tr)\n\t\t}\n\t}\n\t// Add a sample of elapsed time as microseconds to the family's timeseries\n\th := new(histogram)\n\th.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3)\n\tf.LatencyMu.Lock()\n\tf.Latency.Add(h)\n\tf.LatencyMu.Unlock()\n\n\ttr.unref() // matches ref in New\n}\n\nconst (\n\tbucketsPerFamily    = 9\n\ttracesPerBucket     = 10\n\tmaxActiveTraces     = 20 // Maximum number of active traces to show.\n\tmaxEventsPerTrace   = 10\n\tnumHistogramBuckets = 38\n)\n\nvar (\n\t// The active traces.\n\tactiveMu     sync.RWMutex\n\tactiveTraces = make(map[string]*traceSet) // family -> traces\n\n\t// Families of completed traces.\n\tcompletedMu     sync.RWMutex\n\tcompletedTraces = make(map[string]*family) // family -> traces\n)\n\ntype traceSet struct {\n\tmu sync.RWMutex\n\tm  map[*trace]bool\n\n\t// We could avoid the entire map scan in FirstN by having a slice of all the traces\n\t// ordered by start time, and an index into that from the trace struct, with a periodic\n\t// repack of the slice after enough traces finish; we could also use a skip list or similar.\n\t// However, that would shift some of the expense from /debug/requests time to RPC time,\n\t// which is probably the wrong trade-off.\n}\n\nfunc (ts *traceSet) Len() int {\n\tts.mu.RLock()\n\tdefer ts.mu.RUnlock()\n\treturn len(ts.m)\n}\n\nfunc (ts *traceSet) Add(tr *trace) {\n\tts.mu.Lock()\n\tif ts.m == nil {\n\t\tts.m = make(map[*trace]bool)\n\t}\n\tts.m[tr] = true\n\tts.mu.Unlock()\n}\n\nfunc (ts *traceSet) Remove(tr *trace) {\n\tts.mu.Lock()\n\tdelete(ts.m, tr)\n\tts.mu.Unlock()\n}\n\n// FirstN returns the first n traces ordered by time.\nfunc (ts *traceSet) FirstN(n int) traceList {\n\tts.mu.RLock()\n\tdefer ts.mu.RUnlock()\n\n\tif n > len(ts.m) {\n\t\tn = len(ts.m)\n\t}\n\ttrl := make(traceList, 0, n)\n\n\t// Fast path for when no selectivity is needed.\n\tif n == len(ts.m) {\n\t\tfor tr := range ts.m {\n\t\t\ttr.ref()\n\t\t\ttrl = append(trl, tr)\n\t\t}\n\t\tsort.Sort(trl)\n\t\treturn trl\n\t}\n\n\t// Pick the oldest n traces.\n\t// This is inefficient. See the comment in the traceSet struct.\n\tfor tr := range ts.m {\n\t\t// Put the first n traces into trl in the order they occur.\n\t\t// When we have n, sort trl, and thereafter maintain its order.\n\t\tif len(trl) < n {\n\t\t\ttr.ref()\n\t\t\ttrl = append(trl, tr)\n\t\t\tif len(trl) == n {\n\t\t\t\t// This is guaranteed to happen exactly once during this loop.\n\t\t\t\tsort.Sort(trl)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif tr.Start.After(trl[n-1].Start) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Find where to insert this one.\n\t\ttr.ref()\n\t\ti := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) })\n\t\ttrl[n-1].unref()\n\t\tcopy(trl[i+1:], trl[i:])\n\t\ttrl[i] = tr\n\t}\n\n\treturn trl\n}\n\nfunc getActiveTraces(fam string) traceList {\n\tactiveMu.RLock()\n\ts := activeTraces[fam]\n\tactiveMu.RUnlock()\n\tif s == nil {\n\t\treturn nil\n\t}\n\treturn s.FirstN(maxActiveTraces)\n}\n\nfunc getFamily(fam string, allocNew bool) *family {\n\tcompletedMu.RLock()\n\tf := completedTraces[fam]\n\tcompletedMu.RUnlock()\n\tif f == nil && allocNew {\n\t\tf = allocFamily(fam)\n\t}\n\treturn f\n}\n\nfunc allocFamily(fam string) *family {\n\tcompletedMu.Lock()\n\tdefer completedMu.Unlock()\n\tf := completedTraces[fam]\n\tif f == nil {\n\t\tf = newFamily()\n\t\tcompletedTraces[fam] = f\n\t}\n\treturn f\n}\n\n// family represents a set of trace buckets and associated latency information.\ntype family struct {\n\t// traces may occur in multiple buckets.\n\tBuckets [bucketsPerFamily]*traceBucket\n\n\t// latency time series\n\tLatencyMu sync.RWMutex\n\tLatency   *timeseries.MinuteHourSeries\n}\n\nfunc newFamily() *family {\n\treturn &family{\n\t\tBuckets: [bucketsPerFamily]*traceBucket{\n\t\t\t{Cond: minCond(0)},\n\t\t\t{Cond: minCond(50 * time.Millisecond)},\n\t\t\t{Cond: minCond(100 * time.Millisecond)},\n\t\t\t{Cond: minCond(200 * time.Millisecond)},\n\t\t\t{Cond: minCond(500 * time.Millisecond)},\n\t\t\t{Cond: minCond(1 * time.Second)},\n\t\t\t{Cond: minCond(10 * time.Second)},\n\t\t\t{Cond: minCond(100 * time.Second)},\n\t\t\t{Cond: errorCond{}},\n\t\t},\n\t\tLatency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }),\n\t}\n}\n\n// traceBucket represents a size-capped bucket of historic traces,\n// along with a condition for a trace to belong to the bucket.\ntype traceBucket struct {\n\tCond cond\n\n\t// Ring buffer implementation of a fixed-size FIFO queue.\n\tmu     sync.RWMutex\n\tbuf    [tracesPerBucket]*trace\n\tstart  int // < tracesPerBucket\n\tlength int // <= tracesPerBucket\n}\n\nfunc (b *traceBucket) Add(tr *trace) {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\ti := b.start + b.length\n\tif i >= tracesPerBucket {\n\t\ti -= tracesPerBucket\n\t}\n\tif b.length == tracesPerBucket {\n\t\t// \"Remove\" an element from the bucket.\n\t\tb.buf[i].unref()\n\t\tb.start++\n\t\tif b.start == tracesPerBucket {\n\t\t\tb.start = 0\n\t\t}\n\t}\n\tb.buf[i] = tr\n\tif b.length < tracesPerBucket {\n\t\tb.length++\n\t}\n\ttr.ref()\n}\n\n// Copy returns a copy of the traces in the bucket.\n// If tracedOnly is true, only the traces with trace information will be returned.\n// The logs will be ref'd before returning; the caller should call\n// the Free method when it is done with them.\n// TODO(dsymonds): keep track of traced requests in separate buckets.\nfunc (b *traceBucket) Copy(tracedOnly bool) traceList {\n\tb.mu.RLock()\n\tdefer b.mu.RUnlock()\n\n\ttrl := make(traceList, 0, b.length)\n\tfor i, x := 0, b.start; i < b.length; i++ {\n\t\ttr := b.buf[x]\n\t\tif !tracedOnly || tr.spanID != 0 {\n\t\t\ttr.ref()\n\t\t\ttrl = append(trl, tr)\n\t\t}\n\t\tx++\n\t\tif x == b.length {\n\t\t\tx = 0\n\t\t}\n\t}\n\treturn trl\n}\n\nfunc (b *traceBucket) Empty() bool {\n\tb.mu.RLock()\n\tdefer b.mu.RUnlock()\n\treturn b.length == 0\n}\n\n// cond represents a condition on a trace.\ntype cond interface {\n\tmatch(t *trace) bool\n\tString() string\n}\n\ntype minCond time.Duration\n\nfunc (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) }\nfunc (m minCond) String() string      { return fmt.Sprintf(\"≥%gs\", time.Duration(m).Seconds()) }\n\ntype errorCond struct{}\n\nfunc (e errorCond) match(t *trace) bool { return t.IsError }\nfunc (e errorCond) String() string      { return \"errors\" }\n\ntype traceList []*trace\n\n// Free calls unref on each element of the list.\nfunc (trl traceList) Free() {\n\tfor _, t := range trl {\n\t\tt.unref()\n\t}\n}\n\n// traceList may be sorted in reverse chronological order.\nfunc (trl traceList) Len() int           { return len(trl) }\nfunc (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) }\nfunc (trl traceList) Swap(i, j int)      { trl[i], trl[j] = trl[j], trl[i] }\n\n// An event is a timestamped log entry in a trace.\ntype event struct {\n\tWhen       time.Time\n\tElapsed    time.Duration // since previous event in trace\n\tNewDay     bool          // whether this event is on a different day to the previous event\n\tRecyclable bool          // whether this event was passed via LazyLog\n\tSensitive  bool          // whether this event contains sensitive information\n\tWhat       interface{}   // string or fmt.Stringer\n}\n\n// WhenString returns a string representation of the elapsed time of the event.\n// It will include the date if midnight was crossed.\nfunc (e event) WhenString() string {\n\tif e.NewDay {\n\t\treturn e.When.Format(\"2006/01/02 15:04:05.000000\")\n\t}\n\treturn e.When.Format(\"15:04:05.000000\")\n}\n\n// discarded represents a number of discarded events.\n// It is stored as *discarded to make it easier to update in-place.\ntype discarded int\n\nfunc (d *discarded) String() string {\n\treturn fmt.Sprintf(\"(%d events discarded)\", int(*d))\n}\n\n// trace represents an active or complete request,\n// either sent or received by this program.\ntype trace struct {\n\t// Family is the top-level grouping of traces to which this belongs.\n\tFamily string\n\n\t// Title is the title of this trace.\n\tTitle string\n\n\t// Timing information.\n\tStart   time.Time\n\tElapsed time.Duration // zero while active\n\n\t// Trace information if non-zero.\n\ttraceID uint64\n\tspanID  uint64\n\n\t// Whether this trace resulted in an error.\n\tIsError bool\n\n\t// Append-only sequence of events (modulo discards).\n\tmu        sync.RWMutex\n\tevents    []event\n\tmaxEvents int\n\n\trefs     int32 // how many buckets this is in\n\trecycler func(interface{})\n\tdisc     discarded // scratch space to avoid allocation\n\n\tfinishStack []byte // where finish was called, if DebugUseAfterFinish is set\n\n\teventsBuf [4]event // preallocated buffer in case we only log a few events\n}\n\nfunc (tr *trace) reset() {\n\t// Clear all but the mutex. Mutexes may not be copied, even when unlocked.\n\ttr.Family = \"\"\n\ttr.Title = \"\"\n\ttr.Start = time.Time{}\n\ttr.Elapsed = 0\n\ttr.traceID = 0\n\ttr.spanID = 0\n\ttr.IsError = false\n\ttr.maxEvents = 0\n\ttr.events = nil\n\ttr.refs = 0\n\ttr.recycler = nil\n\ttr.disc = 0\n\ttr.finishStack = nil\n\tfor i := range tr.eventsBuf {\n\t\ttr.eventsBuf[i] = event{}\n\t}\n}\n\n// delta returns the elapsed time since the last event or the trace start,\n// and whether it spans midnight.\n// L >= tr.mu\nfunc (tr *trace) delta(t time.Time) (time.Duration, bool) {\n\tif len(tr.events) == 0 {\n\t\treturn t.Sub(tr.Start), false\n\t}\n\tprev := tr.events[len(tr.events)-1].When\n\treturn t.Sub(prev), prev.Day() != t.Day()\n}\n\nfunc (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {\n\tif DebugUseAfterFinish && tr.finishStack != nil {\n\t\tbuf := make([]byte, 4<<10) // 4 KB should be enough\n\t\tn := runtime.Stack(buf, false)\n\t\tlog.Printf(\"net/trace: trace used after finish:\\nFinished at:\\n%s\\nUsed at:\\n%s\", tr.finishStack, buf[:n])\n\t}\n\n\t/*\n\t\tNOTE TO DEBUGGERS\n\n\t\tIf you are here because your program panicked in this code,\n\t\tit is almost definitely the fault of code using this package,\n\t\tand very unlikely to be the fault of this code.\n\n\t\tThe most likely scenario is that some code elsewhere is using\n\t\ta trace.Trace after its Finish method is called.\n\t\tYou can temporarily set the DebugUseAfterFinish var\n\t\tto help discover where that is; do not leave that var set,\n\t\tsince it makes this package much less efficient.\n\t*/\n\n\te := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive}\n\ttr.mu.Lock()\n\te.Elapsed, e.NewDay = tr.delta(e.When)\n\tif len(tr.events) < tr.maxEvents {\n\t\ttr.events = append(tr.events, e)\n\t} else {\n\t\t// Discard the middle events.\n\t\tdi := int((tr.maxEvents - 1) / 2)\n\t\tif d, ok := tr.events[di].What.(*discarded); ok {\n\t\t\t(*d)++\n\t\t} else {\n\t\t\t// disc starts at two to count for the event it is replacing,\n\t\t\t// plus the next one that we are about to drop.\n\t\t\ttr.disc = 2\n\t\t\tif tr.recycler != nil && tr.events[di].Recyclable {\n\t\t\t\tgo tr.recycler(tr.events[di].What)\n\t\t\t}\n\t\t\ttr.events[di].What = &tr.disc\n\t\t}\n\t\t// The timestamp of the discarded meta-event should be\n\t\t// the time of the last event it is representing.\n\t\ttr.events[di].When = tr.events[di+1].When\n\n\t\tif tr.recycler != nil && tr.events[di+1].Recyclable {\n\t\t\tgo tr.recycler(tr.events[di+1].What)\n\t\t}\n\t\tcopy(tr.events[di+1:], tr.events[di+2:])\n\t\ttr.events[tr.maxEvents-1] = e\n\t}\n\ttr.mu.Unlock()\n}\n\nfunc (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) {\n\ttr.addEvent(x, true, sensitive)\n}\n\nfunc (tr *trace) LazyPrintf(format string, a ...interface{}) {\n\ttr.addEvent(&lazySprintf{format, a}, false, false)\n}\n\nfunc (tr *trace) SetError() { tr.IsError = true }\n\nfunc (tr *trace) SetRecycler(f func(interface{})) {\n\ttr.recycler = f\n}\n\nfunc (tr *trace) SetTraceInfo(traceID, spanID uint64) {\n\ttr.traceID, tr.spanID = traceID, spanID\n}\n\nfunc (tr *trace) SetMaxEvents(m int) {\n\t// Always keep at least three events: first, discarded count, last.\n\tif len(tr.events) == 0 && m > 3 {\n\t\ttr.maxEvents = m\n\t}\n}\n\nfunc (tr *trace) ref() {\n\tatomic.AddInt32(&tr.refs, 1)\n}\n\nfunc (tr *trace) unref() {\n\tif atomic.AddInt32(&tr.refs, -1) == 0 {\n\t\tif tr.recycler != nil {\n\t\t\t// freeTrace clears tr, so we hold tr.recycler and tr.events here.\n\t\t\tgo func(f func(interface{}), es []event) {\n\t\t\t\tfor _, e := range es {\n\t\t\t\t\tif e.Recyclable {\n\t\t\t\t\t\tf(e.What)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}(tr.recycler, tr.events)\n\t\t}\n\n\t\tfreeTrace(tr)\n\t}\n}\n\nfunc (tr *trace) When() string {\n\treturn tr.Start.Format(\"2006/01/02 15:04:05.000000\")\n}\n\nfunc (tr *trace) ElapsedTime() string {\n\tt := tr.Elapsed\n\tif t == 0 {\n\t\t// Active trace.\n\t\tt = time.Since(tr.Start)\n\t}\n\treturn fmt.Sprintf(\"%.6f\", t.Seconds())\n}\n\nfunc (tr *trace) Events() []event {\n\ttr.mu.RLock()\n\tdefer tr.mu.RUnlock()\n\treturn tr.events\n}\n\nvar traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool?\n\n// newTrace returns a trace ready to use.\nfunc newTrace() *trace {\n\tselect {\n\tcase tr := <-traceFreeList:\n\t\treturn tr\n\tdefault:\n\t\treturn new(trace)\n\t}\n}\n\n// freeTrace adds tr to traceFreeList if there's room.\n// This is non-blocking.\nfunc freeTrace(tr *trace) {\n\tif DebugUseAfterFinish {\n\t\treturn // never reuse\n\t}\n\ttr.reset()\n\tselect {\n\tcase traceFreeList <- tr:\n\tdefault:\n\t}\n}\n\nfunc elapsed(d time.Duration) string {\n\tb := []byte(fmt.Sprintf(\"%.6f\", d.Seconds()))\n\n\t// For subsecond durations, blank all zeros before decimal point,\n\t// and all zeros between the decimal point and the first non-zero digit.\n\tif d < time.Second {\n\t\tdot := bytes.IndexByte(b, '.')\n\t\tfor i := 0; i < dot; i++ {\n\t\t\tb[i] = ' '\n\t\t}\n\t\tfor i := dot + 1; i < len(b); i++ {\n\t\t\tif b[i] == '0' {\n\t\t\t\tb[i] = ' '\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn string(b)\n}\n\nvar pageTmplCache *template.Template\nvar pageTmplOnce sync.Once\n\nfunc pageTmpl() *template.Template {\n\tpageTmplOnce.Do(func() {\n\t\tpageTmplCache = template.Must(template.New(\"Page\").Funcs(template.FuncMap{\n\t\t\t\"elapsed\": elapsed,\n\t\t\t\"add\":     func(a, b int) int { return a + b },\n\t\t}).Parse(pageHTML))\n\t})\n\treturn pageTmplCache\n}\n\nconst pageHTML = `\n{{template \"Prolog\" .}}\n{{template \"StatusTable\" .}}\n{{template \"Epilog\" .}}\n\n{{define \"Prolog\"}}\n<html>\n\t<head>\n\t<title>/debug/requests</title>\n\t<style type=\"text/css\">\n\t\tbody {\n\t\t\tfont-family: sans-serif;\n\t\t}\n\t\ttable#tr-status td.family {\n\t\t\tpadding-right: 2em;\n\t\t}\n\t\ttable#tr-status td.active {\n\t\t\tpadding-right: 1em;\n\t\t}\n\t\ttable#tr-status td.latency-first {\n\t\t\tpadding-left: 1em;\n\t\t}\n\t\ttable#tr-status td.empty {\n\t\t\tcolor: #aaa;\n\t\t}\n\t\ttable#reqs {\n\t\t\tmargin-top: 1em;\n\t\t}\n\t\ttable#reqs tr.first {\n\t\t\t{{if $.Expanded}}font-weight: bold;{{end}}\n\t\t}\n\t\ttable#reqs td {\n\t\t\tfont-family: monospace;\n\t\t}\n\t\ttable#reqs td.when {\n\t\t\ttext-align: right;\n\t\t\twhite-space: nowrap;\n\t\t}\n\t\ttable#reqs td.elapsed {\n\t\t\tpadding: 0 0.5em;\n\t\t\ttext-align: right;\n\t\t\twhite-space: pre;\n\t\t\twidth: 10em;\n\t\t}\n\t\taddress {\n\t\t\tfont-size: smaller;\n\t\t\tmargin-top: 5em;\n\t\t}\n\t</style>\n\t</head>\n\t<body>\n\n<h1>/debug/requests</h1>\n{{end}} {{/* end of Prolog */}}\n\n{{define \"StatusTable\"}}\n<table id=\"tr-status\">\n\t{{range $fam := .Families}}\n\t<tr>\n\t\t<td class=\"family\">{{$fam}}</td>\n\n\t\t{{$n := index $.ActiveTraceCount $fam}}\n\t\t<td class=\"active {{if not $n}}empty{{end}}\">\n\t\t\t{{if $n}}<a href=\"?fam={{$fam}}&b=-1{{if $.Expanded}}&exp=1{{end}}\">{{end}}\n\t\t\t[{{$n}} active]\n\t\t\t{{if $n}}</a>{{end}}\n\t\t</td>\n\n\t\t{{$f := index $.CompletedTraces $fam}}\n\t\t{{range $i, $b := $f.Buckets}}\n\t\t{{$empty := $b.Empty}}\n\t\t<td {{if $empty}}class=\"empty\"{{end}}>\n\t\t{{if not $empty}}<a href=\"?fam={{$fam}}&b={{$i}}{{if $.Expanded}}&exp=1{{end}}\">{{end}}\n\t\t[{{.Cond}}]\n\t\t{{if not $empty}}</a>{{end}}\n\t\t</td>\n\t\t{{end}}\n\n\t\t{{$nb := len $f.Buckets}}\n\t\t<td class=\"latency-first\">\n\t\t<a href=\"?fam={{$fam}}&b={{$nb}}\">[minute]</a>\n\t\t</td>\n\t\t<td>\n\t\t<a href=\"?fam={{$fam}}&b={{add $nb 1}}\">[hour]</a>\n\t\t</td>\n\t\t<td>\n\t\t<a href=\"?fam={{$fam}}&b={{add $nb 2}}\">[total]</a>\n\t\t</td>\n\n\t</tr>\n\t{{end}}\n</table>\n{{end}} {{/* end of StatusTable */}}\n\n{{define \"Epilog\"}}\n{{if $.Traces}}\n<hr />\n<h3>Family: {{$.Family}}</h3>\n\n{{if or $.Expanded $.Traced}}\n  <a href=\"?fam={{$.Family}}&b={{$.Bucket}}\">[Normal/Summary]</a>\n{{else}}\n  [Normal/Summary]\n{{end}}\n\n{{if or (not $.Expanded) $.Traced}}\n  <a href=\"?fam={{$.Family}}&b={{$.Bucket}}&exp=1\">[Normal/Expanded]</a>\n{{else}}\n  [Normal/Expanded]\n{{end}}\n\n{{if not $.Active}}\n\t{{if or $.Expanded (not $.Traced)}}\n\t<a href=\"?fam={{$.Family}}&b={{$.Bucket}}&rtraced=1\">[Traced/Summary]</a>\n\t{{else}}\n\t[Traced/Summary]\n\t{{end}}\n\t{{if or (not $.Expanded) (not $.Traced)}}\n\t<a href=\"?fam={{$.Family}}&b={{$.Bucket}}&exp=1&rtraced=1\">[Traced/Expanded]</a>\n        {{else}}\n\t[Traced/Expanded]\n\t{{end}}\n{{end}}\n\n{{if $.Total}}\n<p><em>Showing <b>{{len $.Traces}}</b> of <b>{{$.Total}}</b> traces.</em></p>\n{{end}}\n\n<table id=\"reqs\">\n\t<caption>\n\t\t{{if $.Active}}Active{{else}}Completed{{end}} Requests\n\t</caption>\n\t<tr><th>When</th><th>Elapsed&nbsp;(s)</th></tr>\n\t{{range $tr := $.Traces}}\n\t<tr class=\"first\">\n\t\t<td class=\"when\">{{$tr.When}}</td>\n\t\t<td class=\"elapsed\">{{$tr.ElapsedTime}}</td>\n\t\t<td>{{$tr.Title}}</td>\n\t\t{{/* TODO: include traceID/spanID */}}\n\t</tr>\n\t{{if $.Expanded}}\n\t{{range $tr.Events}}\n\t<tr>\n\t\t<td class=\"when\">{{.WhenString}}</td>\n\t\t<td class=\"elapsed\">{{elapsed .Elapsed}}</td>\n\t\t<td>{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}<em>[redacted]</em>{{end}}</td>\n\t</tr>\n\t{{end}}\n\t{{end}}\n\t{{end}}\n</table>\n{{end}} {{/* if $.Traces */}}\n\n{{if $.Histogram}}\n<h4>Latency (&micro;s) of {{$.Family}} over {{$.HistogramWindow}}</h4>\n{{$.Histogram}}\n{{end}} {{/* if $.Histogram */}}\n\n\t</body>\n</html>\n{{end}} {{/* end of Epilog */}}\n`\n"
  },
  {
    "path": "vendor/golang.org/x/net/trace/trace_go16.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !go1.7\n\npackage trace\n\nimport \"golang.org/x/net/context\"\n\n// NewContext returns a copy of the parent context\n// and associates it with a Trace.\nfunc NewContext(ctx context.Context, tr Trace) context.Context {\n\treturn context.WithValue(ctx, contextKey, tr)\n}\n\n// FromContext returns the Trace bound to the context, if any.\nfunc FromContext(ctx context.Context) (tr Trace, ok bool) {\n\ttr, ok = ctx.Value(contextKey).(Trace)\n\treturn\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/trace/trace_go17.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.7\n\npackage trace\n\nimport \"context\"\n\n// NewContext returns a copy of the parent context\n// and associates it with a Trace.\nfunc NewContext(ctx context.Context, tr Trace) context.Context {\n\treturn context.WithValue(ctx, contextKey, tr)\n}\n\n// FromContext returns the Trace bound to the context, if any.\nfunc FromContext(ctx context.Context) (tr Trace, ok bool) {\n\ttr, ok = ctx.Value(contextKey).(Trace)\n\treturn\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/trace/trace_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage trace\n\nimport (\n\t\"net/http\"\n\t\"reflect\"\n\t\"testing\"\n)\n\ntype s struct{}\n\nfunc (s) String() string { return \"lazy string\" }\n\n// TestReset checks whether all the fields are zeroed after reset.\nfunc TestReset(t *testing.T) {\n\ttr := New(\"foo\", \"bar\")\n\ttr.LazyLog(s{}, false)\n\ttr.LazyPrintf(\"%d\", 1)\n\ttr.SetRecycler(func(_ interface{}) {})\n\ttr.SetTraceInfo(3, 4)\n\ttr.SetMaxEvents(100)\n\ttr.SetError()\n\ttr.Finish()\n\n\ttr.(*trace).reset()\n\n\tif !reflect.DeepEqual(tr, new(trace)) {\n\t\tt.Errorf(\"reset didn't clear all fields: %+v\", tr)\n\t}\n}\n\n// TestResetLog checks whether all the fields are zeroed after reset.\nfunc TestResetLog(t *testing.T) {\n\tel := NewEventLog(\"foo\", \"bar\")\n\tel.Printf(\"message\")\n\tel.Errorf(\"error\")\n\tel.Finish()\n\n\tel.(*eventLog).reset()\n\n\tif !reflect.DeepEqual(el, new(eventLog)) {\n\t\tt.Errorf(\"reset didn't clear all fields: %+v\", el)\n\t}\n}\n\nfunc TestAuthRequest(t *testing.T) {\n\ttestCases := []struct {\n\t\thost string\n\t\twant bool\n\t}{\n\t\t{host: \"192.168.23.1\", want: false},\n\t\t{host: \"192.168.23.1:8080\", want: false},\n\t\t{host: \"malformed remote addr\", want: false},\n\t\t{host: \"localhost\", want: true},\n\t\t{host: \"localhost:8080\", want: true},\n\t\t{host: \"127.0.0.1\", want: true},\n\t\t{host: \"127.0.0.1:8080\", want: true},\n\t\t{host: \"::1\", want: true},\n\t\t{host: \"[::1]:8080\", want: true},\n\t}\n\tfor _, tt := range testCases {\n\t\treq := &http.Request{RemoteAddr: tt.host}\n\t\tany, sensitive := AuthRequest(req)\n\t\tif any != tt.want || sensitive != tt.want {\n\t\t\tt.Errorf(\"AuthRequest(%q) = %t, %t; want %t, %t\", tt.host, any, sensitive, tt.want, tt.want)\n\t\t}\n\t}\n}\n\n// TestParseTemplate checks that all templates used by this package are valid\n// as they are parsed on first usage\nfunc TestParseTemplate(t *testing.T) {\n\tif tmpl := distTmpl(); tmpl == nil {\n\t\tt.Error(\"invalid template returned from distTmpl()\")\n\t}\n\tif tmpl := pageTmpl(); tmpl == nil {\n\t\tt.Error(\"invalid template returned from pageTmpl()\")\n\t}\n\tif tmpl := eventsTmpl(); tmpl == nil {\n\t\tt.Error(\"invalid template returned from eventsTmpl()\")\n\t}\n}\n\nfunc benchmarkTrace(b *testing.B, maxEvents, numEvents int) {\n\tnumSpans := (b.N + numEvents + 1) / numEvents\n\n\tfor i := 0; i < numSpans; i++ {\n\t\ttr := New(\"test\", \"test\")\n\t\ttr.SetMaxEvents(maxEvents)\n\t\tfor j := 0; j < numEvents; j++ {\n\t\t\ttr.LazyPrintf(\"%d\", j)\n\t\t}\n\t\ttr.Finish()\n\t}\n}\n\nfunc BenchmarkTrace_Default_2(b *testing.B) {\n\tbenchmarkTrace(b, 0, 2)\n}\n\nfunc BenchmarkTrace_Default_10(b *testing.B) {\n\tbenchmarkTrace(b, 0, 10)\n}\n\nfunc BenchmarkTrace_Default_100(b *testing.B) {\n\tbenchmarkTrace(b, 0, 100)\n}\n\nfunc BenchmarkTrace_Default_1000(b *testing.B) {\n\tbenchmarkTrace(b, 0, 1000)\n}\n\nfunc BenchmarkTrace_Default_10000(b *testing.B) {\n\tbenchmarkTrace(b, 0, 10000)\n}\n\nfunc BenchmarkTrace_10_2(b *testing.B) {\n\tbenchmarkTrace(b, 10, 2)\n}\n\nfunc BenchmarkTrace_10_10(b *testing.B) {\n\tbenchmarkTrace(b, 10, 10)\n}\n\nfunc BenchmarkTrace_10_100(b *testing.B) {\n\tbenchmarkTrace(b, 10, 100)\n}\n\nfunc BenchmarkTrace_10_1000(b *testing.B) {\n\tbenchmarkTrace(b, 10, 1000)\n}\n\nfunc BenchmarkTrace_10_10000(b *testing.B) {\n\tbenchmarkTrace(b, 10, 10000)\n}\n\nfunc BenchmarkTrace_100_2(b *testing.B) {\n\tbenchmarkTrace(b, 100, 2)\n}\n\nfunc BenchmarkTrace_100_10(b *testing.B) {\n\tbenchmarkTrace(b, 100, 10)\n}\n\nfunc BenchmarkTrace_100_100(b *testing.B) {\n\tbenchmarkTrace(b, 100, 100)\n}\n\nfunc BenchmarkTrace_100_1000(b *testing.B) {\n\tbenchmarkTrace(b, 100, 1000)\n}\n\nfunc BenchmarkTrace_100_10000(b *testing.B) {\n\tbenchmarkTrace(b, 100, 10000)\n}\n\nfunc BenchmarkTrace_1000_2(b *testing.B) {\n\tbenchmarkTrace(b, 1000, 2)\n}\n\nfunc BenchmarkTrace_1000_10(b *testing.B) {\n\tbenchmarkTrace(b, 1000, 10)\n}\n\nfunc BenchmarkTrace_1000_100(b *testing.B) {\n\tbenchmarkTrace(b, 1000, 100)\n}\n\nfunc BenchmarkTrace_1000_1000(b *testing.B) {\n\tbenchmarkTrace(b, 1000, 1000)\n}\n\nfunc BenchmarkTrace_1000_10000(b *testing.B) {\n\tbenchmarkTrace(b, 1000, 10000)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/file.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage webdav\n\nimport (\n\t\"encoding/xml\"\n\t\"io\"\n\t\"net/http\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n)\n\n// slashClean is equivalent to but slightly more efficient than\n// path.Clean(\"/\" + name).\nfunc slashClean(name string) string {\n\tif name == \"\" || name[0] != '/' {\n\t\tname = \"/\" + name\n\t}\n\treturn path.Clean(name)\n}\n\n// A FileSystem implements access to a collection of named files. The elements\n// in a file path are separated by slash ('/', U+002F) characters, regardless\n// of host operating system convention.\n//\n// Each method has the same semantics as the os package's function of the same\n// name.\n//\n// Note that the os.Rename documentation says that \"OS-specific restrictions\n// might apply\". In particular, whether or not renaming a file or directory\n// overwriting another existing file or directory is an error is OS-dependent.\ntype FileSystem interface {\n\tMkdir(ctx context.Context, name string, perm os.FileMode) error\n\tOpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error)\n\tRemoveAll(ctx context.Context, name string) error\n\tRename(ctx context.Context, oldName, newName string) error\n\tStat(ctx context.Context, name string) (os.FileInfo, error)\n}\n\n// A File is returned by a FileSystem's OpenFile method and can be served by a\n// Handler.\n//\n// A File may optionally implement the DeadPropsHolder interface, if it can\n// load and save dead properties.\ntype File interface {\n\thttp.File\n\tio.Writer\n}\n\n// A Dir implements FileSystem using the native file system restricted to a\n// specific directory tree.\n//\n// While the FileSystem.OpenFile method takes '/'-separated paths, a Dir's\n// string value is a filename on the native file system, not a URL, so it is\n// separated by filepath.Separator, which isn't necessarily '/'.\n//\n// An empty Dir is treated as \".\".\ntype Dir string\n\nfunc (d Dir) resolve(name string) string {\n\t// This implementation is based on Dir.Open's code in the standard net/http package.\n\tif filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||\n\t\tstrings.Contains(name, \"\\x00\") {\n\t\treturn \"\"\n\t}\n\tdir := string(d)\n\tif dir == \"\" {\n\t\tdir = \".\"\n\t}\n\treturn filepath.Join(dir, filepath.FromSlash(slashClean(name)))\n}\n\nfunc (d Dir) Mkdir(ctx context.Context, name string, perm os.FileMode) error {\n\tif name = d.resolve(name); name == \"\" {\n\t\treturn os.ErrNotExist\n\t}\n\treturn os.Mkdir(name, perm)\n}\n\nfunc (d Dir) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {\n\tif name = d.resolve(name); name == \"\" {\n\t\treturn nil, os.ErrNotExist\n\t}\n\tf, err := os.OpenFile(name, flag, perm)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn f, nil\n}\n\nfunc (d Dir) RemoveAll(ctx context.Context, name string) error {\n\tif name = d.resolve(name); name == \"\" {\n\t\treturn os.ErrNotExist\n\t}\n\tif name == filepath.Clean(string(d)) {\n\t\t// Prohibit removing the virtual root directory.\n\t\treturn os.ErrInvalid\n\t}\n\treturn os.RemoveAll(name)\n}\n\nfunc (d Dir) Rename(ctx context.Context, oldName, newName string) error {\n\tif oldName = d.resolve(oldName); oldName == \"\" {\n\t\treturn os.ErrNotExist\n\t}\n\tif newName = d.resolve(newName); newName == \"\" {\n\t\treturn os.ErrNotExist\n\t}\n\tif root := filepath.Clean(string(d)); root == oldName || root == newName {\n\t\t// Prohibit renaming from or to the virtual root directory.\n\t\treturn os.ErrInvalid\n\t}\n\treturn os.Rename(oldName, newName)\n}\n\nfunc (d Dir) Stat(ctx context.Context, name string) (os.FileInfo, error) {\n\tif name = d.resolve(name); name == \"\" {\n\t\treturn nil, os.ErrNotExist\n\t}\n\treturn os.Stat(name)\n}\n\n// NewMemFS returns a new in-memory FileSystem implementation.\nfunc NewMemFS() FileSystem {\n\treturn &memFS{\n\t\troot: memFSNode{\n\t\t\tchildren: make(map[string]*memFSNode),\n\t\t\tmode:     0660 | os.ModeDir,\n\t\t\tmodTime:  time.Now(),\n\t\t},\n\t}\n}\n\n// A memFS implements FileSystem, storing all metadata and actual file data\n// in-memory. No limits on filesystem size are used, so it is not recommended\n// this be used where the clients are untrusted.\n//\n// Concurrent access is permitted. The tree structure is protected by a mutex,\n// and each node's contents and metadata are protected by a per-node mutex.\n//\n// TODO: Enforce file permissions.\ntype memFS struct {\n\tmu   sync.Mutex\n\troot memFSNode\n}\n\n// TODO: clean up and rationalize the walk/find code.\n\n// walk walks the directory tree for the fullname, calling f at each step. If f\n// returns an error, the walk will be aborted and return that same error.\n//\n// dir is the directory at that step, frag is the name fragment, and final is\n// whether it is the final step. For example, walking \"/foo/bar/x\" will result\n// in 3 calls to f:\n//   - \"/\", \"foo\", false\n//   - \"/foo/\", \"bar\", false\n//   - \"/foo/bar/\", \"x\", true\n// The frag argument will be empty only if dir is the root node and the walk\n// ends at that root node.\nfunc (fs *memFS) walk(op, fullname string, f func(dir *memFSNode, frag string, final bool) error) error {\n\toriginal := fullname\n\tfullname = slashClean(fullname)\n\n\t// Strip any leading \"/\"s to make fullname a relative path, as the walk\n\t// starts at fs.root.\n\tif fullname[0] == '/' {\n\t\tfullname = fullname[1:]\n\t}\n\tdir := &fs.root\n\n\tfor {\n\t\tfrag, remaining := fullname, \"\"\n\t\ti := strings.IndexRune(fullname, '/')\n\t\tfinal := i < 0\n\t\tif !final {\n\t\t\tfrag, remaining = fullname[:i], fullname[i+1:]\n\t\t}\n\t\tif frag == \"\" && dir != &fs.root {\n\t\t\tpanic(\"webdav: empty path fragment for a clean path\")\n\t\t}\n\t\tif err := f(dir, frag, final); err != nil {\n\t\t\treturn &os.PathError{\n\t\t\t\tOp:   op,\n\t\t\t\tPath: original,\n\t\t\t\tErr:  err,\n\t\t\t}\n\t\t}\n\t\tif final {\n\t\t\tbreak\n\t\t}\n\t\tchild := dir.children[frag]\n\t\tif child == nil {\n\t\t\treturn &os.PathError{\n\t\t\t\tOp:   op,\n\t\t\t\tPath: original,\n\t\t\t\tErr:  os.ErrNotExist,\n\t\t\t}\n\t\t}\n\t\tif !child.mode.IsDir() {\n\t\t\treturn &os.PathError{\n\t\t\t\tOp:   op,\n\t\t\t\tPath: original,\n\t\t\t\tErr:  os.ErrInvalid,\n\t\t\t}\n\t\t}\n\t\tdir, fullname = child, remaining\n\t}\n\treturn nil\n}\n\n// find returns the parent of the named node and the relative name fragment\n// from the parent to the child. For example, if finding \"/foo/bar/baz\" then\n// parent will be the node for \"/foo/bar\" and frag will be \"baz\".\n//\n// If the fullname names the root node, then parent, frag and err will be zero.\n//\n// find returns an error if the parent does not already exist or the parent\n// isn't a directory, but it will not return an error per se if the child does\n// not already exist. The error returned is either nil or an *os.PathError\n// whose Op is op.\nfunc (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err error) {\n\terr = fs.walk(op, fullname, func(parent0 *memFSNode, frag0 string, final bool) error {\n\t\tif !final {\n\t\t\treturn nil\n\t\t}\n\t\tif frag0 != \"\" {\n\t\t\tparent, frag = parent0, frag0\n\t\t}\n\t\treturn nil\n\t})\n\treturn parent, frag, err\n}\n\nfunc (fs *memFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\tdir, frag, err := fs.find(\"mkdir\", name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif dir == nil {\n\t\t// We can't create the root.\n\t\treturn os.ErrInvalid\n\t}\n\tif _, ok := dir.children[frag]; ok {\n\t\treturn os.ErrExist\n\t}\n\tdir.children[frag] = &memFSNode{\n\t\tchildren: make(map[string]*memFSNode),\n\t\tmode:     perm.Perm() | os.ModeDir,\n\t\tmodTime:  time.Now(),\n\t}\n\treturn nil\n}\n\nfunc (fs *memFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\tdir, frag, err := fs.find(\"open\", name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar n *memFSNode\n\tif dir == nil {\n\t\t// We're opening the root.\n\t\tif flag&(os.O_WRONLY|os.O_RDWR) != 0 {\n\t\t\treturn nil, os.ErrPermission\n\t\t}\n\t\tn, frag = &fs.root, \"/\"\n\n\t} else {\n\t\tn = dir.children[frag]\n\t\tif flag&(os.O_SYNC|os.O_APPEND) != 0 {\n\t\t\t// memFile doesn't support these flags yet.\n\t\t\treturn nil, os.ErrInvalid\n\t\t}\n\t\tif flag&os.O_CREATE != 0 {\n\t\t\tif flag&os.O_EXCL != 0 && n != nil {\n\t\t\t\treturn nil, os.ErrExist\n\t\t\t}\n\t\t\tif n == nil {\n\t\t\t\tn = &memFSNode{\n\t\t\t\t\tmode: perm.Perm(),\n\t\t\t\t}\n\t\t\t\tdir.children[frag] = n\n\t\t\t}\n\t\t}\n\t\tif n == nil {\n\t\t\treturn nil, os.ErrNotExist\n\t\t}\n\t\tif flag&(os.O_WRONLY|os.O_RDWR) != 0 && flag&os.O_TRUNC != 0 {\n\t\t\tn.mu.Lock()\n\t\t\tn.data = nil\n\t\t\tn.mu.Unlock()\n\t\t}\n\t}\n\n\tchildren := make([]os.FileInfo, 0, len(n.children))\n\tfor cName, c := range n.children {\n\t\tchildren = append(children, c.stat(cName))\n\t}\n\treturn &memFile{\n\t\tn:                n,\n\t\tnameSnapshot:     frag,\n\t\tchildrenSnapshot: children,\n\t}, nil\n}\n\nfunc (fs *memFS) RemoveAll(ctx context.Context, name string) error {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\tdir, frag, err := fs.find(\"remove\", name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif dir == nil {\n\t\t// We can't remove the root.\n\t\treturn os.ErrInvalid\n\t}\n\tdelete(dir.children, frag)\n\treturn nil\n}\n\nfunc (fs *memFS) Rename(ctx context.Context, oldName, newName string) error {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\toldName = slashClean(oldName)\n\tnewName = slashClean(newName)\n\tif oldName == newName {\n\t\treturn nil\n\t}\n\tif strings.HasPrefix(newName, oldName+\"/\") {\n\t\t// We can't rename oldName to be a sub-directory of itself.\n\t\treturn os.ErrInvalid\n\t}\n\n\toDir, oFrag, err := fs.find(\"rename\", oldName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif oDir == nil {\n\t\t// We can't rename from the root.\n\t\treturn os.ErrInvalid\n\t}\n\n\tnDir, nFrag, err := fs.find(\"rename\", newName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif nDir == nil {\n\t\t// We can't rename to the root.\n\t\treturn os.ErrInvalid\n\t}\n\n\toNode, ok := oDir.children[oFrag]\n\tif !ok {\n\t\treturn os.ErrNotExist\n\t}\n\tif oNode.children != nil {\n\t\tif nNode, ok := nDir.children[nFrag]; ok {\n\t\t\tif nNode.children == nil {\n\t\t\t\treturn errNotADirectory\n\t\t\t}\n\t\t\tif len(nNode.children) != 0 {\n\t\t\t\treturn errDirectoryNotEmpty\n\t\t\t}\n\t\t}\n\t}\n\tdelete(oDir.children, oFrag)\n\tnDir.children[nFrag] = oNode\n\treturn nil\n}\n\nfunc (fs *memFS) Stat(ctx context.Context, name string) (os.FileInfo, error) {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\tdir, frag, err := fs.find(\"stat\", name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif dir == nil {\n\t\t// We're stat'ting the root.\n\t\treturn fs.root.stat(\"/\"), nil\n\t}\n\tif n, ok := dir.children[frag]; ok {\n\t\treturn n.stat(path.Base(name)), nil\n\t}\n\treturn nil, os.ErrNotExist\n}\n\n// A memFSNode represents a single entry in the in-memory filesystem and also\n// implements os.FileInfo.\ntype memFSNode struct {\n\t// children is protected by memFS.mu.\n\tchildren map[string]*memFSNode\n\n\tmu        sync.Mutex\n\tdata      []byte\n\tmode      os.FileMode\n\tmodTime   time.Time\n\tdeadProps map[xml.Name]Property\n}\n\nfunc (n *memFSNode) stat(name string) *memFileInfo {\n\tn.mu.Lock()\n\tdefer n.mu.Unlock()\n\treturn &memFileInfo{\n\t\tname:    name,\n\t\tsize:    int64(len(n.data)),\n\t\tmode:    n.mode,\n\t\tmodTime: n.modTime,\n\t}\n}\n\nfunc (n *memFSNode) DeadProps() (map[xml.Name]Property, error) {\n\tn.mu.Lock()\n\tdefer n.mu.Unlock()\n\tif len(n.deadProps) == 0 {\n\t\treturn nil, nil\n\t}\n\tret := make(map[xml.Name]Property, len(n.deadProps))\n\tfor k, v := range n.deadProps {\n\t\tret[k] = v\n\t}\n\treturn ret, nil\n}\n\nfunc (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) {\n\tn.mu.Lock()\n\tdefer n.mu.Unlock()\n\tpstat := Propstat{Status: http.StatusOK}\n\tfor _, patch := range patches {\n\t\tfor _, p := range patch.Props {\n\t\t\tpstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})\n\t\t\tif patch.Remove {\n\t\t\t\tdelete(n.deadProps, p.XMLName)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif n.deadProps == nil {\n\t\t\t\tn.deadProps = map[xml.Name]Property{}\n\t\t\t}\n\t\t\tn.deadProps[p.XMLName] = p\n\t\t}\n\t}\n\treturn []Propstat{pstat}, nil\n}\n\ntype memFileInfo struct {\n\tname    string\n\tsize    int64\n\tmode    os.FileMode\n\tmodTime time.Time\n}\n\nfunc (f *memFileInfo) Name() string       { return f.name }\nfunc (f *memFileInfo) Size() int64        { return f.size }\nfunc (f *memFileInfo) Mode() os.FileMode  { return f.mode }\nfunc (f *memFileInfo) ModTime() time.Time { return f.modTime }\nfunc (f *memFileInfo) IsDir() bool        { return f.mode.IsDir() }\nfunc (f *memFileInfo) Sys() interface{}   { return nil }\n\n// A memFile is a File implementation for a memFSNode. It is a per-file (not\n// per-node) read/write position, and a snapshot of the memFS' tree structure\n// (a node's name and children) for that node.\ntype memFile struct {\n\tn                *memFSNode\n\tnameSnapshot     string\n\tchildrenSnapshot []os.FileInfo\n\t// pos is protected by n.mu.\n\tpos int\n}\n\n// A *memFile implements the optional DeadPropsHolder interface.\nvar _ DeadPropsHolder = (*memFile)(nil)\n\nfunc (f *memFile) DeadProps() (map[xml.Name]Property, error)     { return f.n.DeadProps() }\nfunc (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) }\n\nfunc (f *memFile) Close() error {\n\treturn nil\n}\n\nfunc (f *memFile) Read(p []byte) (int, error) {\n\tf.n.mu.Lock()\n\tdefer f.n.mu.Unlock()\n\tif f.n.mode.IsDir() {\n\t\treturn 0, os.ErrInvalid\n\t}\n\tif f.pos >= len(f.n.data) {\n\t\treturn 0, io.EOF\n\t}\n\tn := copy(p, f.n.data[f.pos:])\n\tf.pos += n\n\treturn n, nil\n}\n\nfunc (f *memFile) Readdir(count int) ([]os.FileInfo, error) {\n\tf.n.mu.Lock()\n\tdefer f.n.mu.Unlock()\n\tif !f.n.mode.IsDir() {\n\t\treturn nil, os.ErrInvalid\n\t}\n\told := f.pos\n\tif old >= len(f.childrenSnapshot) {\n\t\t// The os.File Readdir docs say that at the end of a directory,\n\t\t// the error is io.EOF if count > 0 and nil if count <= 0.\n\t\tif count > 0 {\n\t\t\treturn nil, io.EOF\n\t\t}\n\t\treturn nil, nil\n\t}\n\tif count > 0 {\n\t\tf.pos += count\n\t\tif f.pos > len(f.childrenSnapshot) {\n\t\t\tf.pos = len(f.childrenSnapshot)\n\t\t}\n\t} else {\n\t\tf.pos = len(f.childrenSnapshot)\n\t\told = 0\n\t}\n\treturn f.childrenSnapshot[old:f.pos], nil\n}\n\nfunc (f *memFile) Seek(offset int64, whence int) (int64, error) {\n\tf.n.mu.Lock()\n\tdefer f.n.mu.Unlock()\n\tnpos := f.pos\n\t// TODO: How to handle offsets greater than the size of system int?\n\tswitch whence {\n\tcase os.SEEK_SET:\n\t\tnpos = int(offset)\n\tcase os.SEEK_CUR:\n\t\tnpos += int(offset)\n\tcase os.SEEK_END:\n\t\tnpos = len(f.n.data) + int(offset)\n\tdefault:\n\t\tnpos = -1\n\t}\n\tif npos < 0 {\n\t\treturn 0, os.ErrInvalid\n\t}\n\tf.pos = npos\n\treturn int64(f.pos), nil\n}\n\nfunc (f *memFile) Stat() (os.FileInfo, error) {\n\treturn f.n.stat(f.nameSnapshot), nil\n}\n\nfunc (f *memFile) Write(p []byte) (int, error) {\n\tlenp := len(p)\n\tf.n.mu.Lock()\n\tdefer f.n.mu.Unlock()\n\n\tif f.n.mode.IsDir() {\n\t\treturn 0, os.ErrInvalid\n\t}\n\tif f.pos < len(f.n.data) {\n\t\tn := copy(f.n.data[f.pos:], p)\n\t\tf.pos += n\n\t\tp = p[n:]\n\t} else if f.pos > len(f.n.data) {\n\t\t// Write permits the creation of holes, if we've seek'ed past the\n\t\t// existing end of file.\n\t\tif f.pos <= cap(f.n.data) {\n\t\t\toldLen := len(f.n.data)\n\t\t\tf.n.data = f.n.data[:f.pos]\n\t\t\thole := f.n.data[oldLen:]\n\t\t\tfor i := range hole {\n\t\t\t\thole[i] = 0\n\t\t\t}\n\t\t} else {\n\t\t\td := make([]byte, f.pos, f.pos+len(p))\n\t\t\tcopy(d, f.n.data)\n\t\t\tf.n.data = d\n\t\t}\n\t}\n\n\tif len(p) > 0 {\n\t\t// We should only get here if f.pos == len(f.n.data).\n\t\tf.n.data = append(f.n.data, p...)\n\t\tf.pos = len(f.n.data)\n\t}\n\tf.n.modTime = time.Now()\n\treturn lenp, nil\n}\n\n// moveFiles moves files and/or directories from src to dst.\n//\n// See section 9.9.4 for when various HTTP status codes apply.\nfunc moveFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool) (status int, err error) {\n\tcreated := false\n\tif _, err := fs.Stat(ctx, dst); err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn http.StatusForbidden, err\n\t\t}\n\t\tcreated = true\n\t} else if overwrite {\n\t\t// Section 9.9.3 says that \"If a resource exists at the destination\n\t\t// and the Overwrite header is \"T\", then prior to performing the move,\n\t\t// the server must perform a DELETE with \"Depth: infinity\" on the\n\t\t// destination resource.\n\t\tif err := fs.RemoveAll(ctx, dst); err != nil {\n\t\t\treturn http.StatusForbidden, err\n\t\t}\n\t} else {\n\t\treturn http.StatusPreconditionFailed, os.ErrExist\n\t}\n\tif err := fs.Rename(ctx, src, dst); err != nil {\n\t\treturn http.StatusForbidden, err\n\t}\n\tif created {\n\t\treturn http.StatusCreated, nil\n\t}\n\treturn http.StatusNoContent, nil\n}\n\nfunc copyProps(dst, src File) error {\n\td, ok := dst.(DeadPropsHolder)\n\tif !ok {\n\t\treturn nil\n\t}\n\ts, ok := src.(DeadPropsHolder)\n\tif !ok {\n\t\treturn nil\n\t}\n\tm, err := s.DeadProps()\n\tif err != nil {\n\t\treturn err\n\t}\n\tprops := make([]Property, 0, len(m))\n\tfor _, prop := range m {\n\t\tprops = append(props, prop)\n\t}\n\t_, err = d.Patch([]Proppatch{{Props: props}})\n\treturn err\n}\n\n// copyFiles copies files and/or directories from src to dst.\n//\n// See section 9.8.5 for when various HTTP status codes apply.\nfunc copyFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) {\n\tif recursion == 1000 {\n\t\treturn http.StatusInternalServerError, errRecursionTooDeep\n\t}\n\trecursion++\n\n\t// TODO: section 9.8.3 says that \"Note that an infinite-depth COPY of /A/\n\t// into /A/B/ could lead to infinite recursion if not handled correctly.\"\n\n\tsrcFile, err := fs.OpenFile(ctx, src, os.O_RDONLY, 0)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn http.StatusNotFound, err\n\t\t}\n\t\treturn http.StatusInternalServerError, err\n\t}\n\tdefer srcFile.Close()\n\tsrcStat, err := srcFile.Stat()\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn http.StatusNotFound, err\n\t\t}\n\t\treturn http.StatusInternalServerError, err\n\t}\n\tsrcPerm := srcStat.Mode() & os.ModePerm\n\n\tcreated := false\n\tif _, err := fs.Stat(ctx, dst); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tcreated = true\n\t\t} else {\n\t\t\treturn http.StatusForbidden, err\n\t\t}\n\t} else {\n\t\tif !overwrite {\n\t\t\treturn http.StatusPreconditionFailed, os.ErrExist\n\t\t}\n\t\tif err := fs.RemoveAll(ctx, dst); err != nil && !os.IsNotExist(err) {\n\t\t\treturn http.StatusForbidden, err\n\t\t}\n\t}\n\n\tif srcStat.IsDir() {\n\t\tif err := fs.Mkdir(ctx, dst, srcPerm); err != nil {\n\t\t\treturn http.StatusForbidden, err\n\t\t}\n\t\tif depth == infiniteDepth {\n\t\t\tchildren, err := srcFile.Readdir(-1)\n\t\t\tif err != nil {\n\t\t\t\treturn http.StatusForbidden, err\n\t\t\t}\n\t\t\tfor _, c := range children {\n\t\t\t\tname := c.Name()\n\t\t\t\ts := path.Join(src, name)\n\t\t\t\td := path.Join(dst, name)\n\t\t\t\tcStatus, cErr := copyFiles(ctx, fs, s, d, overwrite, depth, recursion)\n\t\t\t\tif cErr != nil {\n\t\t\t\t\t// TODO: MultiStatus.\n\t\t\t\t\treturn cStatus, cErr\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t} else {\n\t\tdstFile, err := fs.OpenFile(ctx, dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm)\n\t\tif err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\treturn http.StatusConflict, err\n\t\t\t}\n\t\t\treturn http.StatusForbidden, err\n\n\t\t}\n\t\t_, copyErr := io.Copy(dstFile, srcFile)\n\t\tpropsErr := copyProps(dstFile, srcFile)\n\t\tcloseErr := dstFile.Close()\n\t\tif copyErr != nil {\n\t\t\treturn http.StatusInternalServerError, copyErr\n\t\t}\n\t\tif propsErr != nil {\n\t\t\treturn http.StatusInternalServerError, propsErr\n\t\t}\n\t\tif closeErr != nil {\n\t\t\treturn http.StatusInternalServerError, closeErr\n\t\t}\n\t}\n\n\tif created {\n\t\treturn http.StatusCreated, nil\n\t}\n\treturn http.StatusNoContent, nil\n}\n\n// walkFS traverses filesystem fs starting at name up to depth levels.\n//\n// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node,\n// walkFS calls walkFn. If a visited file system node is a directory and\n// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node.\nfunc walkFS(ctx context.Context, fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error {\n\t// This implementation is based on Walk's code in the standard path/filepath package.\n\terr := walkFn(name, info, nil)\n\tif err != nil {\n\t\tif info.IsDir() && err == filepath.SkipDir {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tif !info.IsDir() || depth == 0 {\n\t\treturn nil\n\t}\n\tif depth == 1 {\n\t\tdepth = 0\n\t}\n\n\t// Read directory names.\n\tf, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn walkFn(name, info, err)\n\t}\n\tfileInfos, err := f.Readdir(0)\n\tf.Close()\n\tif err != nil {\n\t\treturn walkFn(name, info, err)\n\t}\n\n\tfor _, fileInfo := range fileInfos {\n\t\tfilename := path.Join(name, fileInfo.Name())\n\t\tfileInfo, err := fs.Stat(ctx, filename)\n\t\tif err != nil {\n\t\t\tif err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\terr = walkFS(ctx, fs, depth, filename, fileInfo, walkFn)\n\t\t\tif err != nil {\n\t\t\t\tif !fileInfo.IsDir() || err != filepath.SkipDir {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/file_go1.6.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !go1.7\n\npackage webdav\n\nimport (\n\t\"net/http\"\n\n\t\"golang.org/x/net/context\"\n)\n\nfunc getContext(r *http.Request) context.Context {\n\treturn context.Background()\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/file_go1.7.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build go1.7\n\npackage webdav\n\nimport (\n\t\"context\"\n\t\"net/http\"\n)\n\nfunc getContext(r *http.Request) context.Context {\n\treturn r.Context()\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/file_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage webdav\n\nimport (\n\t\"encoding/xml\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang.org/x/net/context\"\n)\n\nfunc TestSlashClean(t *testing.T) {\n\ttestCases := []string{\n\t\t\"\",\n\t\t\".\",\n\t\t\"/\",\n\t\t\"/./\",\n\t\t\"//\",\n\t\t\"//.\",\n\t\t\"//a\",\n\t\t\"/a\",\n\t\t\"/a/b/c\",\n\t\t\"/a//b/./../c/d/\",\n\t\t\"a\",\n\t\t\"a/b/c\",\n\t}\n\tfor _, tc := range testCases {\n\t\tgot := slashClean(tc)\n\t\twant := path.Clean(\"/\" + tc)\n\t\tif got != want {\n\t\t\tt.Errorf(\"tc=%q: got %q, want %q\", tc, got, want)\n\t\t}\n\t}\n}\n\nfunc TestDirResolve(t *testing.T) {\n\ttestCases := []struct {\n\t\tdir, name, want string\n\t}{\n\t\t{\"/\", \"\", \"/\"},\n\t\t{\"/\", \"/\", \"/\"},\n\t\t{\"/\", \".\", \"/\"},\n\t\t{\"/\", \"./a\", \"/a\"},\n\t\t{\"/\", \"..\", \"/\"},\n\t\t{\"/\", \"..\", \"/\"},\n\t\t{\"/\", \"../\", \"/\"},\n\t\t{\"/\", \"../.\", \"/\"},\n\t\t{\"/\", \"../a\", \"/a\"},\n\t\t{\"/\", \"../..\", \"/\"},\n\t\t{\"/\", \"../bar/a\", \"/bar/a\"},\n\t\t{\"/\", \"../baz/a\", \"/baz/a\"},\n\t\t{\"/\", \"...\", \"/...\"},\n\t\t{\"/\", \".../a\", \"/.../a\"},\n\t\t{\"/\", \".../..\", \"/\"},\n\t\t{\"/\", \"a\", \"/a\"},\n\t\t{\"/\", \"a/./b\", \"/a/b\"},\n\t\t{\"/\", \"a/../../b\", \"/b\"},\n\t\t{\"/\", \"a/../b\", \"/b\"},\n\t\t{\"/\", \"a/b\", \"/a/b\"},\n\t\t{\"/\", \"a/b/c/../../d\", \"/a/d\"},\n\t\t{\"/\", \"a/b/c/../../../d\", \"/d\"},\n\t\t{\"/\", \"a/b/c/../../../../d\", \"/d\"},\n\t\t{\"/\", \"a/b/c/d\", \"/a/b/c/d\"},\n\n\t\t{\"/foo/bar\", \"\", \"/foo/bar\"},\n\t\t{\"/foo/bar\", \"/\", \"/foo/bar\"},\n\t\t{\"/foo/bar\", \".\", \"/foo/bar\"},\n\t\t{\"/foo/bar\", \"./a\", \"/foo/bar/a\"},\n\t\t{\"/foo/bar\", \"..\", \"/foo/bar\"},\n\t\t{\"/foo/bar\", \"../\", \"/foo/bar\"},\n\t\t{\"/foo/bar\", \"../.\", \"/foo/bar\"},\n\t\t{\"/foo/bar\", \"../a\", \"/foo/bar/a\"},\n\t\t{\"/foo/bar\", \"../..\", \"/foo/bar\"},\n\t\t{\"/foo/bar\", \"../bar/a\", \"/foo/bar/bar/a\"},\n\t\t{\"/foo/bar\", \"../baz/a\", \"/foo/bar/baz/a\"},\n\t\t{\"/foo/bar\", \"...\", \"/foo/bar/...\"},\n\t\t{\"/foo/bar\", \".../a\", \"/foo/bar/.../a\"},\n\t\t{\"/foo/bar\", \".../..\", \"/foo/bar\"},\n\t\t{\"/foo/bar\", \"a\", \"/foo/bar/a\"},\n\t\t{\"/foo/bar\", \"a/./b\", \"/foo/bar/a/b\"},\n\t\t{\"/foo/bar\", \"a/../../b\", \"/foo/bar/b\"},\n\t\t{\"/foo/bar\", \"a/../b\", \"/foo/bar/b\"},\n\t\t{\"/foo/bar\", \"a/b\", \"/foo/bar/a/b\"},\n\t\t{\"/foo/bar\", \"a/b/c/../../d\", \"/foo/bar/a/d\"},\n\t\t{\"/foo/bar\", \"a/b/c/../../../d\", \"/foo/bar/d\"},\n\t\t{\"/foo/bar\", \"a/b/c/../../../../d\", \"/foo/bar/d\"},\n\t\t{\"/foo/bar\", \"a/b/c/d\", \"/foo/bar/a/b/c/d\"},\n\n\t\t{\"/foo/bar/\", \"\", \"/foo/bar\"},\n\t\t{\"/foo/bar/\", \"/\", \"/foo/bar\"},\n\t\t{\"/foo/bar/\", \".\", \"/foo/bar\"},\n\t\t{\"/foo/bar/\", \"./a\", \"/foo/bar/a\"},\n\t\t{\"/foo/bar/\", \"..\", \"/foo/bar\"},\n\n\t\t{\"/foo//bar///\", \"\", \"/foo/bar\"},\n\t\t{\"/foo//bar///\", \"/\", \"/foo/bar\"},\n\t\t{\"/foo//bar///\", \".\", \"/foo/bar\"},\n\t\t{\"/foo//bar///\", \"./a\", \"/foo/bar/a\"},\n\t\t{\"/foo//bar///\", \"..\", \"/foo/bar\"},\n\n\t\t{\"/x/y/z\", \"ab/c\\x00d/ef\", \"\"},\n\n\t\t{\".\", \"\", \".\"},\n\t\t{\".\", \"/\", \".\"},\n\t\t{\".\", \".\", \".\"},\n\t\t{\".\", \"./a\", \"a\"},\n\t\t{\".\", \"..\", \".\"},\n\t\t{\".\", \"..\", \".\"},\n\t\t{\".\", \"../\", \".\"},\n\t\t{\".\", \"../.\", \".\"},\n\t\t{\".\", \"../a\", \"a\"},\n\t\t{\".\", \"../..\", \".\"},\n\t\t{\".\", \"../bar/a\", \"bar/a\"},\n\t\t{\".\", \"../baz/a\", \"baz/a\"},\n\t\t{\".\", \"...\", \"...\"},\n\t\t{\".\", \".../a\", \".../a\"},\n\t\t{\".\", \".../..\", \".\"},\n\t\t{\".\", \"a\", \"a\"},\n\t\t{\".\", \"a/./b\", \"a/b\"},\n\t\t{\".\", \"a/../../b\", \"b\"},\n\t\t{\".\", \"a/../b\", \"b\"},\n\t\t{\".\", \"a/b\", \"a/b\"},\n\t\t{\".\", \"a/b/c/../../d\", \"a/d\"},\n\t\t{\".\", \"a/b/c/../../../d\", \"d\"},\n\t\t{\".\", \"a/b/c/../../../../d\", \"d\"},\n\t\t{\".\", \"a/b/c/d\", \"a/b/c/d\"},\n\n\t\t{\"\", \"\", \".\"},\n\t\t{\"\", \"/\", \".\"},\n\t\t{\"\", \".\", \".\"},\n\t\t{\"\", \"./a\", \"a\"},\n\t\t{\"\", \"..\", \".\"},\n\t}\n\n\tfor _, tc := range testCases {\n\t\td := Dir(filepath.FromSlash(tc.dir))\n\t\tif got := filepath.ToSlash(d.resolve(tc.name)); got != tc.want {\n\t\t\tt.Errorf(\"dir=%q, name=%q: got %q, want %q\", tc.dir, tc.name, got, tc.want)\n\t\t}\n\t}\n}\n\nfunc TestWalk(t *testing.T) {\n\ttype walkStep struct {\n\t\tname, frag string\n\t\tfinal      bool\n\t}\n\n\ttestCases := []struct {\n\t\tdir  string\n\t\twant []walkStep\n\t}{\n\t\t{\"\", []walkStep{\n\t\t\t{\"\", \"\", true},\n\t\t}},\n\t\t{\"/\", []walkStep{\n\t\t\t{\"\", \"\", true},\n\t\t}},\n\t\t{\"/a\", []walkStep{\n\t\t\t{\"\", \"a\", true},\n\t\t}},\n\t\t{\"/a/\", []walkStep{\n\t\t\t{\"\", \"a\", true},\n\t\t}},\n\t\t{\"/a/b\", []walkStep{\n\t\t\t{\"\", \"a\", false},\n\t\t\t{\"a\", \"b\", true},\n\t\t}},\n\t\t{\"/a/b/\", []walkStep{\n\t\t\t{\"\", \"a\", false},\n\t\t\t{\"a\", \"b\", true},\n\t\t}},\n\t\t{\"/a/b/c\", []walkStep{\n\t\t\t{\"\", \"a\", false},\n\t\t\t{\"a\", \"b\", false},\n\t\t\t{\"b\", \"c\", true},\n\t\t}},\n\t\t// The following test case is the one mentioned explicitly\n\t\t// in the method description.\n\t\t{\"/foo/bar/x\", []walkStep{\n\t\t\t{\"\", \"foo\", false},\n\t\t\t{\"foo\", \"bar\", false},\n\t\t\t{\"bar\", \"x\", true},\n\t\t}},\n\t}\n\n\tctx := context.Background()\n\n\tfor _, tc := range testCases {\n\t\tfs := NewMemFS().(*memFS)\n\n\t\tparts := strings.Split(tc.dir, \"/\")\n\t\tfor p := 2; p < len(parts); p++ {\n\t\t\td := strings.Join(parts[:p], \"/\")\n\t\t\tif err := fs.Mkdir(ctx, d, 0666); err != nil {\n\t\t\t\tt.Errorf(\"tc.dir=%q: mkdir: %q: %v\", tc.dir, d, err)\n\t\t\t}\n\t\t}\n\n\t\ti, prevFrag := 0, \"\"\n\t\terr := fs.walk(\"test\", tc.dir, func(dir *memFSNode, frag string, final bool) error {\n\t\t\tgot := walkStep{\n\t\t\t\tname:  prevFrag,\n\t\t\t\tfrag:  frag,\n\t\t\t\tfinal: final,\n\t\t\t}\n\t\t\twant := tc.want[i]\n\n\t\t\tif got != want {\n\t\t\t\treturn fmt.Errorf(\"got %+v, want %+v\", got, want)\n\t\t\t}\n\t\t\ti, prevFrag = i+1, frag\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Errorf(\"tc.dir=%q: %v\", tc.dir, err)\n\t\t}\n\t}\n}\n\n// find appends to ss the names of the named file and its children. It is\n// analogous to the Unix find command.\n//\n// The returned strings are not guaranteed to be in any particular order.\nfunc find(ctx context.Context, ss []string, fs FileSystem, name string) ([]string, error) {\n\tstat, err := fs.Stat(ctx, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tss = append(ss, name)\n\tif stat.IsDir() {\n\t\tf, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer f.Close()\n\t\tchildren, err := f.Readdir(-1)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, c := range children {\n\t\t\tss, err = find(ctx, ss, fs, path.Join(name, c.Name()))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\treturn ss, nil\n}\n\nfunc testFS(t *testing.T, fs FileSystem) {\n\terrStr := func(err error) string {\n\t\tswitch {\n\t\tcase os.IsExist(err):\n\t\t\treturn \"errExist\"\n\t\tcase os.IsNotExist(err):\n\t\t\treturn \"errNotExist\"\n\t\tcase err != nil:\n\t\t\treturn \"err\"\n\t\t}\n\t\treturn \"ok\"\n\t}\n\n\t// The non-\"find\" non-\"stat\" test cases should change the file system state. The\n\t// indentation of the \"find\"s and \"stat\"s helps distinguish such test cases.\n\ttestCases := []string{\n\t\t\"  stat / want dir\",\n\t\t\"  stat /a want errNotExist\",\n\t\t\"  stat /d want errNotExist\",\n\t\t\"  stat /d/e want errNotExist\",\n\t\t\"create /a A want ok\",\n\t\t\"  stat /a want 1\",\n\t\t\"create /d/e EEE want errNotExist\",\n\t\t\"mk-dir /a want errExist\",\n\t\t\"mk-dir /d/m want errNotExist\",\n\t\t\"mk-dir /d want ok\",\n\t\t\"  stat /d want dir\",\n\t\t\"create /d/e EEE want ok\",\n\t\t\"  stat /d/e want 3\",\n\t\t\"  find / /a /d /d/e\",\n\t\t\"create /d/f FFFF want ok\",\n\t\t\"create /d/g GGGGGGG want ok\",\n\t\t\"mk-dir /d/m want ok\",\n\t\t\"mk-dir /d/m want errExist\",\n\t\t\"create /d/m/p PPPPP want ok\",\n\t\t\"  stat /d/e want 3\",\n\t\t\"  stat /d/f want 4\",\n\t\t\"  stat /d/g want 7\",\n\t\t\"  stat /d/h want errNotExist\",\n\t\t\"  stat /d/m want dir\",\n\t\t\"  stat /d/m/p want 5\",\n\t\t\"  find / /a /d /d/e /d/f /d/g /d/m /d/m/p\",\n\t\t\"rm-all /d want ok\",\n\t\t\"  stat /a want 1\",\n\t\t\"  stat /d want errNotExist\",\n\t\t\"  stat /d/e want errNotExist\",\n\t\t\"  stat /d/f want errNotExist\",\n\t\t\"  stat /d/g want errNotExist\",\n\t\t\"  stat /d/m want errNotExist\",\n\t\t\"  stat /d/m/p want errNotExist\",\n\t\t\"  find / /a\",\n\t\t\"mk-dir /d/m want errNotExist\",\n\t\t\"mk-dir /d want ok\",\n\t\t\"create /d/f FFFF want ok\",\n\t\t\"rm-all /d/f want ok\",\n\t\t\"mk-dir /d/m want ok\",\n\t\t\"rm-all /z want ok\",\n\t\t\"rm-all / want err\",\n\t\t\"create /b BB want ok\",\n\t\t\"  stat / want dir\",\n\t\t\"  stat /a want 1\",\n\t\t\"  stat /b want 2\",\n\t\t\"  stat /c want errNotExist\",\n\t\t\"  stat /d want dir\",\n\t\t\"  stat /d/m want dir\",\n\t\t\"  find / /a /b /d /d/m\",\n\t\t\"move__ o=F /b /c want ok\",\n\t\t\"  stat /b want errNotExist\",\n\t\t\"  stat /c want 2\",\n\t\t\"  stat /d/m want dir\",\n\t\t\"  stat /d/n want errNotExist\",\n\t\t\"  find / /a /c /d /d/m\",\n\t\t\"move__ o=F /d/m /d/n want ok\",\n\t\t\"create /d/n/q QQQQ want ok\",\n\t\t\"  stat /d/m want errNotExist\",\n\t\t\"  stat /d/n want dir\",\n\t\t\"  stat /d/n/q want 4\",\n\t\t\"move__ o=F /d /d/n/z want err\",\n\t\t\"move__ o=T /c /d/n/q want ok\",\n\t\t\"  stat /c want errNotExist\",\n\t\t\"  stat /d/n/q want 2\",\n\t\t\"  find / /a /d /d/n /d/n/q\",\n\t\t\"create /d/n/r RRRRR want ok\",\n\t\t\"mk-dir /u want ok\",\n\t\t\"mk-dir /u/v want ok\",\n\t\t\"move__ o=F /d/n /u want errExist\",\n\t\t\"create /t TTTTTT want ok\",\n\t\t\"move__ o=F /d/n /t want errExist\",\n\t\t\"rm-all /t want ok\",\n\t\t\"move__ o=F /d/n /t want ok\",\n\t\t\"  stat /d want dir\",\n\t\t\"  stat /d/n want errNotExist\",\n\t\t\"  stat /d/n/r want errNotExist\",\n\t\t\"  stat /t want dir\",\n\t\t\"  stat /t/q want 2\",\n\t\t\"  stat /t/r want 5\",\n\t\t\"  find / /a /d /t /t/q /t/r /u /u/v\",\n\t\t\"move__ o=F /t / want errExist\",\n\t\t\"move__ o=T /t /u/v want ok\",\n\t\t\"  stat /u/v/r want 5\",\n\t\t\"move__ o=F / /z want err\",\n\t\t\"  find / /a /d /u /u/v /u/v/q /u/v/r\",\n\t\t\"  stat /a want 1\",\n\t\t\"  stat /b want errNotExist\",\n\t\t\"  stat /c want errNotExist\",\n\t\t\"  stat /u/v/r want 5\",\n\t\t\"copy__ o=F d=0 /a /b want ok\",\n\t\t\"copy__ o=T d=0 /a /c want ok\",\n\t\t\"  stat /a want 1\",\n\t\t\"  stat /b want 1\",\n\t\t\"  stat /c want 1\",\n\t\t\"  stat /u/v/r want 5\",\n\t\t\"copy__ o=F d=0 /u/v/r /b want errExist\",\n\t\t\"  stat /b want 1\",\n\t\t\"copy__ o=T d=0 /u/v/r /b want ok\",\n\t\t\"  stat /a want 1\",\n\t\t\"  stat /b want 5\",\n\t\t\"  stat /u/v/r want 5\",\n\t\t\"rm-all /a want ok\",\n\t\t\"rm-all /b want ok\",\n\t\t\"mk-dir /u/v/w want ok\",\n\t\t\"create /u/v/w/s SSSSSSSS want ok\",\n\t\t\"  stat /d want dir\",\n\t\t\"  stat /d/x want errNotExist\",\n\t\t\"  stat /d/y want errNotExist\",\n\t\t\"  stat /u/v/r want 5\",\n\t\t\"  stat /u/v/w/s want 8\",\n\t\t\"  find / /c /d /u /u/v /u/v/q /u/v/r /u/v/w /u/v/w/s\",\n\t\t\"copy__ o=T d=0 /u/v /d/x want ok\",\n\t\t\"copy__ o=T d=∞ /u/v /d/y want ok\",\n\t\t\"rm-all /u want ok\",\n\t\t\"  stat /d/x want dir\",\n\t\t\"  stat /d/x/q want errNotExist\",\n\t\t\"  stat /d/x/r want errNotExist\",\n\t\t\"  stat /d/x/w want errNotExist\",\n\t\t\"  stat /d/x/w/s want errNotExist\",\n\t\t\"  stat /d/y want dir\",\n\t\t\"  stat /d/y/q want 2\",\n\t\t\"  stat /d/y/r want 5\",\n\t\t\"  stat /d/y/w want dir\",\n\t\t\"  stat /d/y/w/s want 8\",\n\t\t\"  stat /u want errNotExist\",\n\t\t\"  find / /c /d /d/x /d/y /d/y/q /d/y/r /d/y/w /d/y/w/s\",\n\t\t\"copy__ o=F d=∞ /d/y /d/x want errExist\",\n\t}\n\n\tctx := context.Background()\n\n\tfor i, tc := range testCases {\n\t\ttc = strings.TrimSpace(tc)\n\t\tj := strings.IndexByte(tc, ' ')\n\t\tif j < 0 {\n\t\t\tt.Fatalf(\"test case #%d %q: invalid command\", i, tc)\n\t\t}\n\t\top, arg := tc[:j], tc[j+1:]\n\n\t\tswitch op {\n\t\tdefault:\n\t\t\tt.Fatalf(\"test case #%d %q: invalid operation %q\", i, tc, op)\n\n\t\tcase \"create\":\n\t\t\tparts := strings.Split(arg, \" \")\n\t\t\tif len(parts) != 4 || parts[2] != \"want\" {\n\t\t\t\tt.Fatalf(\"test case #%d %q: invalid write\", i, tc)\n\t\t\t}\n\t\t\tf, opErr := fs.OpenFile(ctx, parts[0], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)\n\t\t\tif got := errStr(opErr); got != parts[3] {\n\t\t\t\tt.Fatalf(\"test case #%d %q: OpenFile: got %q (%v), want %q\", i, tc, got, opErr, parts[3])\n\t\t\t}\n\t\t\tif f != nil {\n\t\t\t\tif _, err := f.Write([]byte(parts[1])); err != nil {\n\t\t\t\t\tt.Fatalf(\"test case #%d %q: Write: %v\", i, tc, err)\n\t\t\t\t}\n\t\t\t\tif err := f.Close(); err != nil {\n\t\t\t\t\tt.Fatalf(\"test case #%d %q: Close: %v\", i, tc, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"find\":\n\t\t\tgot, err := find(ctx, nil, fs, \"/\")\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"test case #%d %q: find: %v\", i, tc, err)\n\t\t\t}\n\t\t\tsort.Strings(got)\n\t\t\twant := strings.Split(arg, \" \")\n\t\t\tif !reflect.DeepEqual(got, want) {\n\t\t\t\tt.Fatalf(\"test case #%d %q:\\ngot  %s\\nwant %s\", i, tc, got, want)\n\t\t\t}\n\n\t\tcase \"copy__\", \"mk-dir\", \"move__\", \"rm-all\", \"stat\":\n\t\t\tnParts := 3\n\t\t\tswitch op {\n\t\t\tcase \"copy__\":\n\t\t\t\tnParts = 6\n\t\t\tcase \"move__\":\n\t\t\t\tnParts = 5\n\t\t\t}\n\t\t\tparts := strings.Split(arg, \" \")\n\t\t\tif len(parts) != nParts {\n\t\t\t\tt.Fatalf(\"test case #%d %q: invalid %s\", i, tc, op)\n\t\t\t}\n\n\t\t\tgot, opErr := \"\", error(nil)\n\t\t\tswitch op {\n\t\t\tcase \"copy__\":\n\t\t\t\tdepth := 0\n\t\t\t\tif parts[1] == \"d=∞\" {\n\t\t\t\t\tdepth = infiniteDepth\n\t\t\t\t}\n\t\t\t\t_, opErr = copyFiles(ctx, fs, parts[2], parts[3], parts[0] == \"o=T\", depth, 0)\n\t\t\tcase \"mk-dir\":\n\t\t\t\topErr = fs.Mkdir(ctx, parts[0], 0777)\n\t\t\tcase \"move__\":\n\t\t\t\t_, opErr = moveFiles(ctx, fs, parts[1], parts[2], parts[0] == \"o=T\")\n\t\t\tcase \"rm-all\":\n\t\t\t\topErr = fs.RemoveAll(ctx, parts[0])\n\t\t\tcase \"stat\":\n\t\t\t\tvar stat os.FileInfo\n\t\t\t\tfileName := parts[0]\n\t\t\t\tif stat, opErr = fs.Stat(ctx, fileName); opErr == nil {\n\t\t\t\t\tif stat.IsDir() {\n\t\t\t\t\t\tgot = \"dir\"\n\t\t\t\t\t} else {\n\t\t\t\t\t\tgot = strconv.Itoa(int(stat.Size()))\n\t\t\t\t\t}\n\n\t\t\t\t\tif fileName == \"/\" {\n\t\t\t\t\t\t// For a Dir FileSystem, the virtual file system root maps to a\n\t\t\t\t\t\t// real file system name like \"/tmp/webdav-test012345\", which does\n\t\t\t\t\t\t// not end with \"/\". We skip such cases.\n\t\t\t\t\t} else if statName := stat.Name(); path.Base(fileName) != statName {\n\t\t\t\t\t\tt.Fatalf(\"test case #%d %q: file name %q inconsistent with stat name %q\",\n\t\t\t\t\t\t\ti, tc, fileName, statName)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif got == \"\" {\n\t\t\t\tgot = errStr(opErr)\n\t\t\t}\n\n\t\t\tif parts[len(parts)-2] != \"want\" {\n\t\t\t\tt.Fatalf(\"test case #%d %q: invalid %s\", i, tc, op)\n\t\t\t}\n\t\t\tif want := parts[len(parts)-1]; got != want {\n\t\t\t\tt.Fatalf(\"test case #%d %q: got %q (%v), want %q\", i, tc, got, opErr, want)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestDir(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"nacl\":\n\t\tt.Skip(\"see golang.org/issue/12004\")\n\tcase \"plan9\":\n\t\tt.Skip(\"see golang.org/issue/11453\")\n\t}\n\n\ttd, err := ioutil.TempDir(\"\", \"webdav-test\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(td)\n\ttestFS(t, Dir(td))\n}\n\nfunc TestMemFS(t *testing.T) {\n\ttestFS(t, NewMemFS())\n}\n\nfunc TestMemFSRoot(t *testing.T) {\n\tctx := context.Background()\n\tfs := NewMemFS()\n\tfor i := 0; i < 5; i++ {\n\t\tstat, err := fs.Stat(ctx, \"/\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"i=%d: Stat: %v\", i, err)\n\t\t}\n\t\tif !stat.IsDir() {\n\t\t\tt.Fatalf(\"i=%d: Stat.IsDir is false, want true\", i)\n\t\t}\n\n\t\tf, err := fs.OpenFile(ctx, \"/\", os.O_RDONLY, 0)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"i=%d: OpenFile: %v\", i, err)\n\t\t}\n\t\tdefer f.Close()\n\t\tchildren, err := f.Readdir(-1)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"i=%d: Readdir: %v\", i, err)\n\t\t}\n\t\tif len(children) != i {\n\t\t\tt.Fatalf(\"i=%d: got %d children, want %d\", i, len(children), i)\n\t\t}\n\n\t\tif _, err := f.Write(make([]byte, 1)); err == nil {\n\t\t\tt.Fatalf(\"i=%d: Write: got nil error, want non-nil\", i)\n\t\t}\n\n\t\tif err := fs.Mkdir(ctx, fmt.Sprintf(\"/dir%d\", i), 0777); err != nil {\n\t\t\tt.Fatalf(\"i=%d: Mkdir: %v\", i, err)\n\t\t}\n\t}\n}\n\nfunc TestMemFileReaddir(t *testing.T) {\n\tctx := context.Background()\n\tfs := NewMemFS()\n\tif err := fs.Mkdir(ctx, \"/foo\", 0777); err != nil {\n\t\tt.Fatalf(\"Mkdir: %v\", err)\n\t}\n\treaddir := func(count int) ([]os.FileInfo, error) {\n\t\tf, err := fs.OpenFile(ctx, \"/foo\", os.O_RDONLY, 0)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"OpenFile: %v\", err)\n\t\t}\n\t\tdefer f.Close()\n\t\treturn f.Readdir(count)\n\t}\n\tif got, err := readdir(-1); len(got) != 0 || err != nil {\n\t\tt.Fatalf(\"readdir(-1): got %d fileInfos with err=%v, want 0, <nil>\", len(got), err)\n\t}\n\tif got, err := readdir(+1); len(got) != 0 || err != io.EOF {\n\t\tt.Fatalf(\"readdir(+1): got %d fileInfos with err=%v, want 0, EOF\", len(got), err)\n\t}\n}\n\nfunc TestMemFile(t *testing.T) {\n\ttestCases := []string{\n\t\t\"wantData \",\n\t\t\"wantSize 0\",\n\t\t\"write abc\",\n\t\t\"wantData abc\",\n\t\t\"write de\",\n\t\t\"wantData abcde\",\n\t\t\"wantSize 5\",\n\t\t\"write 5*x\",\n\t\t\"write 4*y+2*z\",\n\t\t\"write 3*st\",\n\t\t\"wantData abcdexxxxxyyyyzzststst\",\n\t\t\"wantSize 22\",\n\t\t\"seek set 4 want 4\",\n\t\t\"write EFG\",\n\t\t\"wantData abcdEFGxxxyyyyzzststst\",\n\t\t\"wantSize 22\",\n\t\t\"seek set 2 want 2\",\n\t\t\"read cdEF\",\n\t\t\"read Gx\",\n\t\t\"seek cur 0 want 8\",\n\t\t\"seek cur 2 want 10\",\n\t\t\"seek cur -1 want 9\",\n\t\t\"write J\",\n\t\t\"wantData abcdEFGxxJyyyyzzststst\",\n\t\t\"wantSize 22\",\n\t\t\"seek cur -4 want 6\",\n\t\t\"write ghijk\",\n\t\t\"wantData abcdEFghijkyyyzzststst\",\n\t\t\"wantSize 22\",\n\t\t\"read yyyz\",\n\t\t\"seek cur 0 want 15\",\n\t\t\"write \",\n\t\t\"seek cur 0 want 15\",\n\t\t\"read \",\n\t\t\"seek cur 0 want 15\",\n\t\t\"seek end -3 want 19\",\n\t\t\"write ZZ\",\n\t\t\"wantData abcdEFghijkyyyzzstsZZt\",\n\t\t\"wantSize 22\",\n\t\t\"write 4*A\",\n\t\t\"wantData abcdEFghijkyyyzzstsZZAAAA\",\n\t\t\"wantSize 25\",\n\t\t\"seek end 0 want 25\",\n\t\t\"seek end -5 want 20\",\n\t\t\"read Z+4*A\",\n\t\t\"write 5*B\",\n\t\t\"wantData abcdEFghijkyyyzzstsZZAAAABBBBB\",\n\t\t\"wantSize 30\",\n\t\t\"seek end 10 want 40\",\n\t\t\"write C\",\n\t\t\"wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........C\",\n\t\t\"wantSize 41\",\n\t\t\"write D\",\n\t\t\"wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD\",\n\t\t\"wantSize 42\",\n\t\t\"seek set 43 want 43\",\n\t\t\"write E\",\n\t\t\"wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD.E\",\n\t\t\"wantSize 44\",\n\t\t\"seek set 0 want 0\",\n\t\t\"write 5*123456789_\",\n\t\t\"wantData 123456789_123456789_123456789_123456789_123456789_\",\n\t\t\"wantSize 50\",\n\t\t\"seek cur 0 want 50\",\n\t\t\"seek cur -99 want err\",\n\t}\n\n\tctx := context.Background()\n\n\tconst filename = \"/foo\"\n\tfs := NewMemFS()\n\tf, err := fs.OpenFile(ctx, filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)\n\tif err != nil {\n\t\tt.Fatalf(\"OpenFile: %v\", err)\n\t}\n\tdefer f.Close()\n\n\tfor i, tc := range testCases {\n\t\tj := strings.IndexByte(tc, ' ')\n\t\tif j < 0 {\n\t\t\tt.Fatalf(\"test case #%d %q: invalid command\", i, tc)\n\t\t}\n\t\top, arg := tc[:j], tc[j+1:]\n\n\t\t// Expand an arg like \"3*a+2*b\" to \"aaabb\".\n\t\tparts := strings.Split(arg, \"+\")\n\t\tfor j, part := range parts {\n\t\t\tif k := strings.IndexByte(part, '*'); k >= 0 {\n\t\t\t\trepeatCount, repeatStr := part[:k], part[k+1:]\n\t\t\t\tn, err := strconv.Atoi(repeatCount)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"test case #%d %q: invalid repeat count %q\", i, tc, repeatCount)\n\t\t\t\t}\n\t\t\t\tparts[j] = strings.Repeat(repeatStr, n)\n\t\t\t}\n\t\t}\n\t\targ = strings.Join(parts, \"\")\n\n\t\tswitch op {\n\t\tdefault:\n\t\t\tt.Fatalf(\"test case #%d %q: invalid operation %q\", i, tc, op)\n\n\t\tcase \"read\":\n\t\t\tbuf := make([]byte, len(arg))\n\t\t\tif _, err := io.ReadFull(f, buf); err != nil {\n\t\t\t\tt.Fatalf(\"test case #%d %q: ReadFull: %v\", i, tc, err)\n\t\t\t}\n\t\t\tif got := string(buf); got != arg {\n\t\t\t\tt.Fatalf(\"test case #%d %q:\\ngot  %q\\nwant %q\", i, tc, got, arg)\n\t\t\t}\n\n\t\tcase \"seek\":\n\t\t\tparts := strings.Split(arg, \" \")\n\t\t\tif len(parts) != 4 {\n\t\t\t\tt.Fatalf(\"test case #%d %q: invalid seek\", i, tc)\n\t\t\t}\n\n\t\t\twhence := 0\n\t\t\tswitch parts[0] {\n\t\t\tdefault:\n\t\t\t\tt.Fatalf(\"test case #%d %q: invalid seek whence\", i, tc)\n\t\t\tcase \"set\":\n\t\t\t\twhence = os.SEEK_SET\n\t\t\tcase \"cur\":\n\t\t\t\twhence = os.SEEK_CUR\n\t\t\tcase \"end\":\n\t\t\t\twhence = os.SEEK_END\n\t\t\t}\n\t\t\toffset, err := strconv.Atoi(parts[1])\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"test case #%d %q: invalid offset %q\", i, tc, parts[1])\n\t\t\t}\n\n\t\t\tif parts[2] != \"want\" {\n\t\t\t\tt.Fatalf(\"test case #%d %q: invalid seek\", i, tc)\n\t\t\t}\n\t\t\tif parts[3] == \"err\" {\n\t\t\t\t_, err := f.Seek(int64(offset), whence)\n\t\t\t\tif err == nil {\n\t\t\t\t\tt.Fatalf(\"test case #%d %q: Seek returned nil error, want non-nil\", i, tc)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tgot, err := f.Seek(int64(offset), whence)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"test case #%d %q: Seek: %v\", i, tc, err)\n\t\t\t\t}\n\t\t\t\twant, err := strconv.Atoi(parts[3])\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"test case #%d %q: invalid want %q\", i, tc, parts[3])\n\t\t\t\t}\n\t\t\t\tif got != int64(want) {\n\t\t\t\t\tt.Fatalf(\"test case #%d %q: got %d, want %d\", i, tc, got, want)\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"write\":\n\t\t\tn, err := f.Write([]byte(arg))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"test case #%d %q: write: %v\", i, tc, err)\n\t\t\t}\n\t\t\tif n != len(arg) {\n\t\t\t\tt.Fatalf(\"test case #%d %q: write returned %d bytes, want %d\", i, tc, n, len(arg))\n\t\t\t}\n\n\t\tcase \"wantData\":\n\t\t\tg, err := fs.OpenFile(ctx, filename, os.O_RDONLY, 0666)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"test case #%d %q: OpenFile: %v\", i, tc, err)\n\t\t\t}\n\t\t\tgotBytes, err := ioutil.ReadAll(g)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"test case #%d %q: ReadAll: %v\", i, tc, err)\n\t\t\t}\n\t\t\tfor i, c := range gotBytes {\n\t\t\t\tif c == '\\x00' {\n\t\t\t\t\tgotBytes[i] = '.'\n\t\t\t\t}\n\t\t\t}\n\t\t\tgot := string(gotBytes)\n\t\t\tif got != arg {\n\t\t\t\tt.Fatalf(\"test case #%d %q:\\ngot  %q\\nwant %q\", i, tc, got, arg)\n\t\t\t}\n\t\t\tif err := g.Close(); err != nil {\n\t\t\t\tt.Fatalf(\"test case #%d %q: Close: %v\", i, tc, err)\n\t\t\t}\n\n\t\tcase \"wantSize\":\n\t\t\tn, err := strconv.Atoi(arg)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"test case #%d %q: invalid size %q\", i, tc, arg)\n\t\t\t}\n\t\t\tfi, err := fs.Stat(ctx, filename)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"test case #%d %q: Stat: %v\", i, tc, err)\n\t\t\t}\n\t\t\tif got, want := fi.Size(), int64(n); got != want {\n\t\t\t\tt.Fatalf(\"test case #%d %q: got %d, want %d\", i, tc, got, want)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// TestMemFileWriteAllocs tests that writing N consecutive 1KiB chunks to a\n// memFile doesn't allocate a new buffer for each of those N times. Otherwise,\n// calling io.Copy(aMemFile, src) is likely to have quadratic complexity.\nfunc TestMemFileWriteAllocs(t *testing.T) {\n\tif runtime.Compiler == \"gccgo\" {\n\t\tt.Skip(\"gccgo allocates here\")\n\t}\n\tctx := context.Background()\n\tfs := NewMemFS()\n\tf, err := fs.OpenFile(ctx, \"/xxx\", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)\n\tif err != nil {\n\t\tt.Fatalf(\"OpenFile: %v\", err)\n\t}\n\tdefer f.Close()\n\n\txxx := make([]byte, 1024)\n\tfor i := range xxx {\n\t\txxx[i] = 'x'\n\t}\n\n\ta := testing.AllocsPerRun(100, func() {\n\t\tf.Write(xxx)\n\t})\n\t// AllocsPerRun returns an integral value, so we compare the rounded-down\n\t// number to zero.\n\tif a > 0 {\n\t\tt.Fatalf(\"%v allocs per run, want 0\", a)\n\t}\n}\n\nfunc BenchmarkMemFileWrite(b *testing.B) {\n\tctx := context.Background()\n\tfs := NewMemFS()\n\txxx := make([]byte, 1024)\n\tfor i := range xxx {\n\t\txxx[i] = 'x'\n\t}\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tf, err := fs.OpenFile(ctx, \"/xxx\", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"OpenFile: %v\", err)\n\t\t}\n\t\tfor j := 0; j < 100; j++ {\n\t\t\tf.Write(xxx)\n\t\t}\n\t\tif err := f.Close(); err != nil {\n\t\t\tb.Fatalf(\"Close: %v\", err)\n\t\t}\n\t\tif err := fs.RemoveAll(ctx, \"/xxx\"); err != nil {\n\t\t\tb.Fatalf(\"RemoveAll: %v\", err)\n\t\t}\n\t}\n}\n\nfunc TestCopyMoveProps(t *testing.T) {\n\tctx := context.Background()\n\tfs := NewMemFS()\n\tcreate := func(name string) error {\n\t\tf, err := fs.OpenFile(ctx, name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, wErr := f.Write([]byte(\"contents\"))\n\t\tcErr := f.Close()\n\t\tif wErr != nil {\n\t\t\treturn wErr\n\t\t}\n\t\treturn cErr\n\t}\n\tpatch := func(name string, patches ...Proppatch) error {\n\t\tf, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, pErr := f.(DeadPropsHolder).Patch(patches)\n\t\tcErr := f.Close()\n\t\tif pErr != nil {\n\t\t\treturn pErr\n\t\t}\n\t\treturn cErr\n\t}\n\tprops := func(name string) (map[xml.Name]Property, error) {\n\t\tf, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tm, pErr := f.(DeadPropsHolder).DeadProps()\n\t\tcErr := f.Close()\n\t\tif pErr != nil {\n\t\t\treturn nil, pErr\n\t\t}\n\t\tif cErr != nil {\n\t\t\treturn nil, cErr\n\t\t}\n\t\treturn m, nil\n\t}\n\n\tp0 := Property{\n\t\tXMLName:  xml.Name{Space: \"x:\", Local: \"boat\"},\n\t\tInnerXML: []byte(\"pea-green\"),\n\t}\n\tp1 := Property{\n\t\tXMLName:  xml.Name{Space: \"x:\", Local: \"ring\"},\n\t\tInnerXML: []byte(\"1 shilling\"),\n\t}\n\tp2 := Property{\n\t\tXMLName:  xml.Name{Space: \"x:\", Local: \"spoon\"},\n\t\tInnerXML: []byte(\"runcible\"),\n\t}\n\tp3 := Property{\n\t\tXMLName:  xml.Name{Space: \"x:\", Local: \"moon\"},\n\t\tInnerXML: []byte(\"light\"),\n\t}\n\n\tif err := create(\"/src\"); err != nil {\n\t\tt.Fatalf(\"create /src: %v\", err)\n\t}\n\tif err := patch(\"/src\", Proppatch{Props: []Property{p0, p1}}); err != nil {\n\t\tt.Fatalf(\"patch /src +p0 +p1: %v\", err)\n\t}\n\tif _, err := copyFiles(ctx, fs, \"/src\", \"/tmp\", true, infiniteDepth, 0); err != nil {\n\t\tt.Fatalf(\"copyFiles /src /tmp: %v\", err)\n\t}\n\tif _, err := moveFiles(ctx, fs, \"/tmp\", \"/dst\", true); err != nil {\n\t\tt.Fatalf(\"moveFiles /tmp /dst: %v\", err)\n\t}\n\tif err := patch(\"/src\", Proppatch{Props: []Property{p0}, Remove: true}); err != nil {\n\t\tt.Fatalf(\"patch /src -p0: %v\", err)\n\t}\n\tif err := patch(\"/src\", Proppatch{Props: []Property{p2}}); err != nil {\n\t\tt.Fatalf(\"patch /src +p2: %v\", err)\n\t}\n\tif err := patch(\"/dst\", Proppatch{Props: []Property{p1}, Remove: true}); err != nil {\n\t\tt.Fatalf(\"patch /dst -p1: %v\", err)\n\t}\n\tif err := patch(\"/dst\", Proppatch{Props: []Property{p3}}); err != nil {\n\t\tt.Fatalf(\"patch /dst +p3: %v\", err)\n\t}\n\n\tgotSrc, err := props(\"/src\")\n\tif err != nil {\n\t\tt.Fatalf(\"props /src: %v\", err)\n\t}\n\twantSrc := map[xml.Name]Property{\n\t\tp1.XMLName: p1,\n\t\tp2.XMLName: p2,\n\t}\n\tif !reflect.DeepEqual(gotSrc, wantSrc) {\n\t\tt.Fatalf(\"props /src:\\ngot  %v\\nwant %v\", gotSrc, wantSrc)\n\t}\n\n\tgotDst, err := props(\"/dst\")\n\tif err != nil {\n\t\tt.Fatalf(\"props /dst: %v\", err)\n\t}\n\twantDst := map[xml.Name]Property{\n\t\tp0.XMLName: p0,\n\t\tp3.XMLName: p3,\n\t}\n\tif !reflect.DeepEqual(gotDst, wantDst) {\n\t\tt.Fatalf(\"props /dst:\\ngot  %v\\nwant %v\", gotDst, wantDst)\n\t}\n}\n\nfunc TestWalkFS(t *testing.T) {\n\ttestCases := []struct {\n\t\tdesc    string\n\t\tbuildfs []string\n\t\tstartAt string\n\t\tdepth   int\n\t\twalkFn  filepath.WalkFunc\n\t\twant    []string\n\t}{{\n\t\t\"just root\",\n\t\t[]string{},\n\t\t\"/\",\n\t\tinfiniteDepth,\n\t\tnil,\n\t\t[]string{\n\t\t\t\"/\",\n\t\t},\n\t}, {\n\t\t\"infinite walk from root\",\n\t\t[]string{\n\t\t\t\"mkdir /a\",\n\t\t\t\"mkdir /a/b\",\n\t\t\t\"touch /a/b/c\",\n\t\t\t\"mkdir /a/d\",\n\t\t\t\"mkdir /e\",\n\t\t\t\"touch /f\",\n\t\t},\n\t\t\"/\",\n\t\tinfiniteDepth,\n\t\tnil,\n\t\t[]string{\n\t\t\t\"/\",\n\t\t\t\"/a\",\n\t\t\t\"/a/b\",\n\t\t\t\"/a/b/c\",\n\t\t\t\"/a/d\",\n\t\t\t\"/e\",\n\t\t\t\"/f\",\n\t\t},\n\t}, {\n\t\t\"infinite walk from subdir\",\n\t\t[]string{\n\t\t\t\"mkdir /a\",\n\t\t\t\"mkdir /a/b\",\n\t\t\t\"touch /a/b/c\",\n\t\t\t\"mkdir /a/d\",\n\t\t\t\"mkdir /e\",\n\t\t\t\"touch /f\",\n\t\t},\n\t\t\"/a\",\n\t\tinfiniteDepth,\n\t\tnil,\n\t\t[]string{\n\t\t\t\"/a\",\n\t\t\t\"/a/b\",\n\t\t\t\"/a/b/c\",\n\t\t\t\"/a/d\",\n\t\t},\n\t}, {\n\t\t\"depth 1 walk from root\",\n\t\t[]string{\n\t\t\t\"mkdir /a\",\n\t\t\t\"mkdir /a/b\",\n\t\t\t\"touch /a/b/c\",\n\t\t\t\"mkdir /a/d\",\n\t\t\t\"mkdir /e\",\n\t\t\t\"touch /f\",\n\t\t},\n\t\t\"/\",\n\t\t1,\n\t\tnil,\n\t\t[]string{\n\t\t\t\"/\",\n\t\t\t\"/a\",\n\t\t\t\"/e\",\n\t\t\t\"/f\",\n\t\t},\n\t}, {\n\t\t\"depth 1 walk from subdir\",\n\t\t[]string{\n\t\t\t\"mkdir /a\",\n\t\t\t\"mkdir /a/b\",\n\t\t\t\"touch /a/b/c\",\n\t\t\t\"mkdir /a/b/g\",\n\t\t\t\"mkdir /a/b/g/h\",\n\t\t\t\"touch /a/b/g/i\",\n\t\t\t\"touch /a/b/g/h/j\",\n\t\t},\n\t\t\"/a/b\",\n\t\t1,\n\t\tnil,\n\t\t[]string{\n\t\t\t\"/a/b\",\n\t\t\t\"/a/b/c\",\n\t\t\t\"/a/b/g\",\n\t\t},\n\t}, {\n\t\t\"depth 0 walk from subdir\",\n\t\t[]string{\n\t\t\t\"mkdir /a\",\n\t\t\t\"mkdir /a/b\",\n\t\t\t\"touch /a/b/c\",\n\t\t\t\"mkdir /a/b/g\",\n\t\t\t\"mkdir /a/b/g/h\",\n\t\t\t\"touch /a/b/g/i\",\n\t\t\t\"touch /a/b/g/h/j\",\n\t\t},\n\t\t\"/a/b\",\n\t\t0,\n\t\tnil,\n\t\t[]string{\n\t\t\t\"/a/b\",\n\t\t},\n\t}, {\n\t\t\"infinite walk from file\",\n\t\t[]string{\n\t\t\t\"mkdir /a\",\n\t\t\t\"touch /a/b\",\n\t\t\t\"touch /a/c\",\n\t\t},\n\t\t\"/a/b\",\n\t\t0,\n\t\tnil,\n\t\t[]string{\n\t\t\t\"/a/b\",\n\t\t},\n\t}, {\n\t\t\"infinite walk with skipped subdir\",\n\t\t[]string{\n\t\t\t\"mkdir /a\",\n\t\t\t\"mkdir /a/b\",\n\t\t\t\"touch /a/b/c\",\n\t\t\t\"mkdir /a/b/g\",\n\t\t\t\"mkdir /a/b/g/h\",\n\t\t\t\"touch /a/b/g/i\",\n\t\t\t\"touch /a/b/g/h/j\",\n\t\t\t\"touch /a/b/z\",\n\t\t},\n\t\t\"/\",\n\t\tinfiniteDepth,\n\t\tfunc(path string, info os.FileInfo, err error) error {\n\t\t\tif path == \"/a/b/g\" {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t\t[]string{\n\t\t\t\"/\",\n\t\t\t\"/a\",\n\t\t\t\"/a/b\",\n\t\t\t\"/a/b/c\",\n\t\t\t\"/a/b/z\",\n\t\t},\n\t}}\n\tctx := context.Background()\n\tfor _, tc := range testCases {\n\t\tfs, err := buildTestFS(tc.buildfs)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: cannot create test filesystem: %v\", tc.desc, err)\n\t\t}\n\t\tvar got []string\n\t\ttraceFn := func(path string, info os.FileInfo, err error) error {\n\t\t\tif tc.walkFn != nil {\n\t\t\t\terr = tc.walkFn(path, info, err)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tgot = append(got, path)\n\t\t\treturn nil\n\t\t}\n\t\tfi, err := fs.Stat(ctx, tc.startAt)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: cannot stat: %v\", tc.desc, err)\n\t\t}\n\t\terr = walkFS(ctx, fs, tc.depth, tc.startAt, fi, traceFn)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s:\\ngot error %v, want nil\", tc.desc, err)\n\t\t\tcontinue\n\t\t}\n\t\tsort.Strings(got)\n\t\tsort.Strings(tc.want)\n\t\tif !reflect.DeepEqual(got, tc.want) {\n\t\t\tt.Errorf(\"%s:\\ngot  %q\\nwant %q\", tc.desc, got, tc.want)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc buildTestFS(buildfs []string) (FileSystem, error) {\n\t// TODO: Could this be merged with the build logic in TestFS?\n\n\tctx := context.Background()\n\tfs := NewMemFS()\n\tfor _, b := range buildfs {\n\t\top := strings.Split(b, \" \")\n\t\tswitch op[0] {\n\t\tcase \"mkdir\":\n\t\t\terr := fs.Mkdir(ctx, op[1], os.ModeDir|0777)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tcase \"touch\":\n\t\t\tf, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE, 0666)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tf.Close()\n\t\tcase \"write\":\n\t\t\tf, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t_, err = f.Write([]byte(op[2]))\n\t\t\tf.Close()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unknown file operation %q\", op[0])\n\t\t}\n\t}\n\treturn fs, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/if.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage webdav\n\n// The If header is covered by Section 10.4.\n// http://www.webdav.org/specs/rfc4918.html#HEADER_If\n\nimport (\n\t\"strings\"\n)\n\n// ifHeader is a disjunction (OR) of ifLists.\ntype ifHeader struct {\n\tlists []ifList\n}\n\n// ifList is a conjunction (AND) of Conditions, and an optional resource tag.\ntype ifList struct {\n\tresourceTag string\n\tconditions  []Condition\n}\n\n// parseIfHeader parses the \"If: foo bar\" HTTP header. The httpHeader string\n// should omit the \"If:\" prefix and have any \"\\r\\n\"s collapsed to a \" \", as is\n// returned by req.Header.Get(\"If\") for a http.Request req.\nfunc parseIfHeader(httpHeader string) (h ifHeader, ok bool) {\n\ts := strings.TrimSpace(httpHeader)\n\tswitch tokenType, _, _ := lex(s); tokenType {\n\tcase '(':\n\t\treturn parseNoTagLists(s)\n\tcase angleTokenType:\n\t\treturn parseTaggedLists(s)\n\tdefault:\n\t\treturn ifHeader{}, false\n\t}\n}\n\nfunc parseNoTagLists(s string) (h ifHeader, ok bool) {\n\tfor {\n\t\tl, remaining, ok := parseList(s)\n\t\tif !ok {\n\t\t\treturn ifHeader{}, false\n\t\t}\n\t\th.lists = append(h.lists, l)\n\t\tif remaining == \"\" {\n\t\t\treturn h, true\n\t\t}\n\t\ts = remaining\n\t}\n}\n\nfunc parseTaggedLists(s string) (h ifHeader, ok bool) {\n\tresourceTag, n := \"\", 0\n\tfor first := true; ; first = false {\n\t\ttokenType, tokenStr, remaining := lex(s)\n\t\tswitch tokenType {\n\t\tcase angleTokenType:\n\t\t\tif !first && n == 0 {\n\t\t\t\treturn ifHeader{}, false\n\t\t\t}\n\t\t\tresourceTag, n = tokenStr, 0\n\t\t\ts = remaining\n\t\tcase '(':\n\t\t\tn++\n\t\t\tl, remaining, ok := parseList(s)\n\t\t\tif !ok {\n\t\t\t\treturn ifHeader{}, false\n\t\t\t}\n\t\t\tl.resourceTag = resourceTag\n\t\t\th.lists = append(h.lists, l)\n\t\t\tif remaining == \"\" {\n\t\t\t\treturn h, true\n\t\t\t}\n\t\t\ts = remaining\n\t\tdefault:\n\t\t\treturn ifHeader{}, false\n\t\t}\n\t}\n}\n\nfunc parseList(s string) (l ifList, remaining string, ok bool) {\n\ttokenType, _, s := lex(s)\n\tif tokenType != '(' {\n\t\treturn ifList{}, \"\", false\n\t}\n\tfor {\n\t\ttokenType, _, remaining = lex(s)\n\t\tif tokenType == ')' {\n\t\t\tif len(l.conditions) == 0 {\n\t\t\t\treturn ifList{}, \"\", false\n\t\t\t}\n\t\t\treturn l, remaining, true\n\t\t}\n\t\tc, remaining, ok := parseCondition(s)\n\t\tif !ok {\n\t\t\treturn ifList{}, \"\", false\n\t\t}\n\t\tl.conditions = append(l.conditions, c)\n\t\ts = remaining\n\t}\n}\n\nfunc parseCondition(s string) (c Condition, remaining string, ok bool) {\n\ttokenType, tokenStr, s := lex(s)\n\tif tokenType == notTokenType {\n\t\tc.Not = true\n\t\ttokenType, tokenStr, s = lex(s)\n\t}\n\tswitch tokenType {\n\tcase strTokenType, angleTokenType:\n\t\tc.Token = tokenStr\n\tcase squareTokenType:\n\t\tc.ETag = tokenStr\n\tdefault:\n\t\treturn Condition{}, \"\", false\n\t}\n\treturn c, s, true\n}\n\n// Single-rune tokens like '(' or ')' have a token type equal to their rune.\n// All other tokens have a negative token type.\nconst (\n\terrTokenType    = rune(-1)\n\teofTokenType    = rune(-2)\n\tstrTokenType    = rune(-3)\n\tnotTokenType    = rune(-4)\n\tangleTokenType  = rune(-5)\n\tsquareTokenType = rune(-6)\n)\n\nfunc lex(s string) (tokenType rune, tokenStr string, remaining string) {\n\t// The net/textproto Reader that parses the HTTP header will collapse\n\t// Linear White Space that spans multiple \"\\r\\n\" lines to a single \" \",\n\t// so we don't need to look for '\\r' or '\\n'.\n\tfor len(s) > 0 && (s[0] == '\\t' || s[0] == ' ') {\n\t\ts = s[1:]\n\t}\n\tif len(s) == 0 {\n\t\treturn eofTokenType, \"\", \"\"\n\t}\n\ti := 0\nloop:\n\tfor ; i < len(s); i++ {\n\t\tswitch s[i] {\n\t\tcase '\\t', ' ', '(', ')', '<', '>', '[', ']':\n\t\t\tbreak loop\n\t\t}\n\t}\n\n\tif i != 0 {\n\t\ttokenStr, remaining = s[:i], s[i:]\n\t\tif tokenStr == \"Not\" {\n\t\t\treturn notTokenType, \"\", remaining\n\t\t}\n\t\treturn strTokenType, tokenStr, remaining\n\t}\n\n\tj := 0\n\tswitch s[0] {\n\tcase '<':\n\t\tj, tokenType = strings.IndexByte(s, '>'), angleTokenType\n\tcase '[':\n\t\tj, tokenType = strings.IndexByte(s, ']'), squareTokenType\n\tdefault:\n\t\treturn rune(s[0]), \"\", s[1:]\n\t}\n\tif j < 0 {\n\t\treturn errTokenType, \"\", \"\"\n\t}\n\treturn tokenType, s[1:j], s[j+1:]\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/if_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage webdav\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestParseIfHeader(t *testing.T) {\n\t// The \"section x.y.z\" test cases come from section x.y.z of the spec at\n\t// http://www.webdav.org/specs/rfc4918.html\n\ttestCases := []struct {\n\t\tdesc  string\n\t\tinput string\n\t\twant  ifHeader\n\t}{{\n\t\t\"bad: empty\",\n\t\t``,\n\t\tifHeader{},\n\t}, {\n\t\t\"bad: no parens\",\n\t\t`foobar`,\n\t\tifHeader{},\n\t}, {\n\t\t\"bad: empty list #1\",\n\t\t`()`,\n\t\tifHeader{},\n\t}, {\n\t\t\"bad: empty list #2\",\n\t\t`(a) (b c) () (d)`,\n\t\tifHeader{},\n\t}, {\n\t\t\"bad: no list after resource #1\",\n\t\t`<foo>`,\n\t\tifHeader{},\n\t}, {\n\t\t\"bad: no list after resource #2\",\n\t\t`<foo> <bar> (a)`,\n\t\tifHeader{},\n\t}, {\n\t\t\"bad: no list after resource #3\",\n\t\t`<foo> (a) (b) <bar>`,\n\t\tifHeader{},\n\t}, {\n\t\t\"bad: no-tag-list followed by tagged-list\",\n\t\t`(a) (b) <foo> (c)`,\n\t\tifHeader{},\n\t}, {\n\t\t\"bad: unfinished list\",\n\t\t`(a`,\n\t\tifHeader{},\n\t}, {\n\t\t\"bad: unfinished ETag\",\n\t\t`([b`,\n\t\tifHeader{},\n\t}, {\n\t\t\"bad: unfinished Notted list\",\n\t\t`(Not a`,\n\t\tifHeader{},\n\t}, {\n\t\t\"bad: double Not\",\n\t\t`(Not Not a)`,\n\t\tifHeader{},\n\t}, {\n\t\t\"good: one list with a Token\",\n\t\t`(a)`,\n\t\tifHeader{\n\t\t\tlists: []ifList{{\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tToken: `a`,\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t}, {\n\t\t\"good: one list with an ETag\",\n\t\t`([a])`,\n\t\tifHeader{\n\t\t\tlists: []ifList{{\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tETag: `a`,\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t}, {\n\t\t\"good: one list with three Nots\",\n\t\t`(Not a Not b Not [d])`,\n\t\tifHeader{\n\t\t\tlists: []ifList{{\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tNot:   true,\n\t\t\t\t\tToken: `a`,\n\t\t\t\t}, {\n\t\t\t\t\tNot:   true,\n\t\t\t\t\tToken: `b`,\n\t\t\t\t}, {\n\t\t\t\t\tNot:  true,\n\t\t\t\t\tETag: `d`,\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t}, {\n\t\t\"good: two lists\",\n\t\t`(a) (b)`,\n\t\tifHeader{\n\t\t\tlists: []ifList{{\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tToken: `a`,\n\t\t\t\t}},\n\t\t\t}, {\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tToken: `b`,\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t}, {\n\t\t\"good: two Notted lists\",\n\t\t`(Not a) (Not b)`,\n\t\tifHeader{\n\t\t\tlists: []ifList{{\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tNot:   true,\n\t\t\t\t\tToken: `a`,\n\t\t\t\t}},\n\t\t\t}, {\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tNot:   true,\n\t\t\t\t\tToken: `b`,\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t}, {\n\t\t\"section 7.5.1\",\n\t\t`<http://www.example.com/users/f/fielding/index.html> \n\t\t\t(<urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6>)`,\n\t\tifHeader{\n\t\t\tlists: []ifList{{\n\t\t\t\tresourceTag: `http://www.example.com/users/f/fielding/index.html`,\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tToken: `urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6`,\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t}, {\n\t\t\"section 7.5.2 #1\",\n\t\t`(<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`,\n\t\tifHeader{\n\t\t\tlists: []ifList{{\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tToken: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`,\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t}, {\n\t\t\"section 7.5.2 #2\",\n\t\t`<http://example.com/locked/>\n\t\t\t(<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`,\n\t\tifHeader{\n\t\t\tlists: []ifList{{\n\t\t\t\tresourceTag: `http://example.com/locked/`,\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tToken: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`,\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t}, {\n\t\t\"section 7.5.2 #3\",\n\t\t`<http://example.com/locked/member>\n\t\t\t(<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`,\n\t\tifHeader{\n\t\t\tlists: []ifList{{\n\t\t\t\tresourceTag: `http://example.com/locked/member`,\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tToken: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`,\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t}, {\n\t\t\"section 9.9.6\",\n\t\t`(<urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4>) \n\t\t\t(<urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77>)`,\n\t\tifHeader{\n\t\t\tlists: []ifList{{\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tToken: `urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4`,\n\t\t\t\t}},\n\t\t\t}, {\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tToken: `urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77`,\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t}, {\n\t\t\"section 9.10.8\",\n\t\t`(<urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4>)`,\n\t\tifHeader{\n\t\t\tlists: []ifList{{\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tToken: `urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4`,\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t}, {\n\t\t\"section 10.4.6\",\n\t\t`(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2> \n\t\t\t[\"I am an ETag\"])\n\t\t\t([\"I am another ETag\"])`,\n\t\tifHeader{\n\t\t\tlists: []ifList{{\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tToken: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,\n\t\t\t\t}, {\n\t\t\t\t\tETag: `\"I am an ETag\"`,\n\t\t\t\t}},\n\t\t\t}, {\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tETag: `\"I am another ETag\"`,\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t}, {\n\t\t\"section 10.4.7\",\n\t\t`(Not <urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2> \n\t\t\t<urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092>)`,\n\t\tifHeader{\n\t\t\tlists: []ifList{{\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tNot:   true,\n\t\t\t\t\tToken: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,\n\t\t\t\t}, {\n\t\t\t\t\tToken: `urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092`,\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t}, {\n\t\t\"section 10.4.8\",\n\t\t`(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>) \n\t\t\t(Not <DAV:no-lock>)`,\n\t\tifHeader{\n\t\t\tlists: []ifList{{\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tToken: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,\n\t\t\t\t}},\n\t\t\t}, {\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tNot:   true,\n\t\t\t\t\tToken: `DAV:no-lock`,\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t}, {\n\t\t\"section 10.4.9\",\n\t\t`</resource1> \n\t\t\t(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2> \n\t\t\t[W/\"A weak ETag\"]) ([\"strong ETag\"])`,\n\t\tifHeader{\n\t\t\tlists: []ifList{{\n\t\t\t\tresourceTag: `/resource1`,\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tToken: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,\n\t\t\t\t}, {\n\t\t\t\t\tETag: `W/\"A weak ETag\"`,\n\t\t\t\t}},\n\t\t\t}, {\n\t\t\t\tresourceTag: `/resource1`,\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tETag: `\"strong ETag\"`,\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t}, {\n\t\t\"section 10.4.10\",\n\t\t`<http://www.example.com/specs/> \n\t\t\t(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>)`,\n\t\tifHeader{\n\t\t\tlists: []ifList{{\n\t\t\t\tresourceTag: `http://www.example.com/specs/`,\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tToken: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t}, {\n\t\t\"section 10.4.11 #1\",\n\t\t`</specs/rfc2518.doc> ([\"4217\"])`,\n\t\tifHeader{\n\t\t\tlists: []ifList{{\n\t\t\t\tresourceTag: `/specs/rfc2518.doc`,\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tETag: `\"4217\"`,\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t}, {\n\t\t\"section 10.4.11 #2\",\n\t\t`</specs/rfc2518.doc> (Not [\"4217\"])`,\n\t\tifHeader{\n\t\t\tlists: []ifList{{\n\t\t\t\tresourceTag: `/specs/rfc2518.doc`,\n\t\t\t\tconditions: []Condition{{\n\t\t\t\t\tNot:  true,\n\t\t\t\t\tETag: `\"4217\"`,\n\t\t\t\t}},\n\t\t\t}},\n\t\t},\n\t}}\n\n\tfor _, tc := range testCases {\n\t\tgot, ok := parseIfHeader(strings.Replace(tc.input, \"\\n\", \"\", -1))\n\t\tif gotEmpty := reflect.DeepEqual(got, ifHeader{}); gotEmpty == ok {\n\t\t\tt.Errorf(\"%s: should be different: empty header == %t, ok == %t\", tc.desc, gotEmpty, ok)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(got, tc.want) {\n\t\t\tt.Errorf(\"%s:\\ngot  %v\\nwant %v\", tc.desc, got, tc.want)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/internal/xml/README",
    "content": "This is a fork of the encoding/xml package at ca1d6c4, the last commit before\nhttps://go.googlesource.com/go/+/c0d6d33 \"encoding/xml: restore Go 1.4 name\nspace behavior\" made late in the lead-up to the Go 1.5 release.\n\nThe list of encoding/xml changes is at\nhttps://go.googlesource.com/go/+log/master/src/encoding/xml\n\nThis fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is\nreleased.\n\nSee http://golang.org/issue/11841\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/internal/xml/atom_test.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage xml\n\nimport \"time\"\n\nvar atomValue = &Feed{\n\tXMLName: Name{\"http://www.w3.org/2005/Atom\", \"feed\"},\n\tTitle:   \"Example Feed\",\n\tLink:    []Link{{Href: \"http://example.org/\"}},\n\tUpdated: ParseTime(\"2003-12-13T18:30:02Z\"),\n\tAuthor:  Person{Name: \"John Doe\"},\n\tId:      \"urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6\",\n\n\tEntry: []Entry{\n\t\t{\n\t\t\tTitle:   \"Atom-Powered Robots Run Amok\",\n\t\t\tLink:    []Link{{Href: \"http://example.org/2003/12/13/atom03\"}},\n\t\t\tId:      \"urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a\",\n\t\t\tUpdated: ParseTime(\"2003-12-13T18:30:02Z\"),\n\t\t\tSummary: NewText(\"Some text.\"),\n\t\t},\n\t},\n}\n\nvar atomXml = `` +\n\t`<feed xmlns=\"http://www.w3.org/2005/Atom\" updated=\"2003-12-13T18:30:02Z\">` +\n\t`<title>Example Feed</title>` +\n\t`<id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id>` +\n\t`<link href=\"http://example.org/\"></link>` +\n\t`<author><name>John Doe</name><uri></uri><email></email></author>` +\n\t`<entry>` +\n\t`<title>Atom-Powered Robots Run Amok</title>` +\n\t`<id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id>` +\n\t`<link href=\"http://example.org/2003/12/13/atom03\"></link>` +\n\t`<updated>2003-12-13T18:30:02Z</updated>` +\n\t`<author><name></name><uri></uri><email></email></author>` +\n\t`<summary>Some text.</summary>` +\n\t`</entry>` +\n\t`</feed>`\n\nfunc ParseTime(str string) time.Time {\n\tt, err := time.Parse(time.RFC3339, str)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn t\n}\n\nfunc NewText(text string) Text {\n\treturn Text{\n\t\tBody: text,\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/internal/xml/example_test.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage xml_test\n\nimport (\n\t\"encoding/xml\"\n\t\"fmt\"\n\t\"os\"\n)\n\nfunc ExampleMarshalIndent() {\n\ttype Address struct {\n\t\tCity, State string\n\t}\n\ttype Person struct {\n\t\tXMLName   xml.Name `xml:\"person\"`\n\t\tId        int      `xml:\"id,attr\"`\n\t\tFirstName string   `xml:\"name>first\"`\n\t\tLastName  string   `xml:\"name>last\"`\n\t\tAge       int      `xml:\"age\"`\n\t\tHeight    float32  `xml:\"height,omitempty\"`\n\t\tMarried   bool\n\t\tAddress\n\t\tComment string `xml:\",comment\"`\n\t}\n\n\tv := &Person{Id: 13, FirstName: \"John\", LastName: \"Doe\", Age: 42}\n\tv.Comment = \" Need more details. \"\n\tv.Address = Address{\"Hanga Roa\", \"Easter Island\"}\n\n\toutput, err := xml.MarshalIndent(v, \"  \", \"    \")\n\tif err != nil {\n\t\tfmt.Printf(\"error: %v\\n\", err)\n\t}\n\n\tos.Stdout.Write(output)\n\t// Output:\n\t//   <person id=\"13\">\n\t//       <name>\n\t//           <first>John</first>\n\t//           <last>Doe</last>\n\t//       </name>\n\t//       <age>42</age>\n\t//       <Married>false</Married>\n\t//       <City>Hanga Roa</City>\n\t//       <State>Easter Island</State>\n\t//       <!-- Need more details. -->\n\t//   </person>\n}\n\nfunc ExampleEncoder() {\n\ttype Address struct {\n\t\tCity, State string\n\t}\n\ttype Person struct {\n\t\tXMLName   xml.Name `xml:\"person\"`\n\t\tId        int      `xml:\"id,attr\"`\n\t\tFirstName string   `xml:\"name>first\"`\n\t\tLastName  string   `xml:\"name>last\"`\n\t\tAge       int      `xml:\"age\"`\n\t\tHeight    float32  `xml:\"height,omitempty\"`\n\t\tMarried   bool\n\t\tAddress\n\t\tComment string `xml:\",comment\"`\n\t}\n\n\tv := &Person{Id: 13, FirstName: \"John\", LastName: \"Doe\", Age: 42}\n\tv.Comment = \" Need more details. \"\n\tv.Address = Address{\"Hanga Roa\", \"Easter Island\"}\n\n\tenc := xml.NewEncoder(os.Stdout)\n\tenc.Indent(\"  \", \"    \")\n\tif err := enc.Encode(v); err != nil {\n\t\tfmt.Printf(\"error: %v\\n\", err)\n\t}\n\n\t// Output:\n\t//   <person id=\"13\">\n\t//       <name>\n\t//           <first>John</first>\n\t//           <last>Doe</last>\n\t//       </name>\n\t//       <age>42</age>\n\t//       <Married>false</Married>\n\t//       <City>Hanga Roa</City>\n\t//       <State>Easter Island</State>\n\t//       <!-- Need more details. -->\n\t//   </person>\n}\n\n// This example demonstrates unmarshaling an XML excerpt into a value with\n// some preset fields. Note that the Phone field isn't modified and that\n// the XML <Company> element is ignored. Also, the Groups field is assigned\n// considering the element path provided in its tag.\nfunc ExampleUnmarshal() {\n\ttype Email struct {\n\t\tWhere string `xml:\"where,attr\"`\n\t\tAddr  string\n\t}\n\ttype Address struct {\n\t\tCity, State string\n\t}\n\ttype Result struct {\n\t\tXMLName xml.Name `xml:\"Person\"`\n\t\tName    string   `xml:\"FullName\"`\n\t\tPhone   string\n\t\tEmail   []Email\n\t\tGroups  []string `xml:\"Group>Value\"`\n\t\tAddress\n\t}\n\tv := Result{Name: \"none\", Phone: \"none\"}\n\n\tdata := `\n\t\t<Person>\n\t\t\t<FullName>Grace R. Emlin</FullName>\n\t\t\t<Company>Example Inc.</Company>\n\t\t\t<Email where=\"home\">\n\t\t\t\t<Addr>gre@example.com</Addr>\n\t\t\t</Email>\n\t\t\t<Email where='work'>\n\t\t\t\t<Addr>gre@work.com</Addr>\n\t\t\t</Email>\n\t\t\t<Group>\n\t\t\t\t<Value>Friends</Value>\n\t\t\t\t<Value>Squash</Value>\n\t\t\t</Group>\n\t\t\t<City>Hanga Roa</City>\n\t\t\t<State>Easter Island</State>\n\t\t</Person>\n\t`\n\terr := xml.Unmarshal([]byte(data), &v)\n\tif err != nil {\n\t\tfmt.Printf(\"error: %v\", err)\n\t\treturn\n\t}\n\tfmt.Printf(\"XMLName: %#v\\n\", v.XMLName)\n\tfmt.Printf(\"Name: %q\\n\", v.Name)\n\tfmt.Printf(\"Phone: %q\\n\", v.Phone)\n\tfmt.Printf(\"Email: %v\\n\", v.Email)\n\tfmt.Printf(\"Groups: %v\\n\", v.Groups)\n\tfmt.Printf(\"Address: %v\\n\", v.Address)\n\t// Output:\n\t// XMLName: xml.Name{Space:\"\", Local:\"Person\"}\n\t// Name: \"Grace R. Emlin\"\n\t// Phone: \"none\"\n\t// Email: [{home gre@example.com} {work gre@work.com}]\n\t// Groups: [Friends Squash]\n\t// Address: {Hanga Roa Easter Island}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/internal/xml/marshal.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage xml\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\t// A generic XML header suitable for use with the output of Marshal.\n\t// This is not automatically added to any output of this package,\n\t// it is provided as a convenience.\n\tHeader = `<?xml version=\"1.0\" encoding=\"UTF-8\"?>` + \"\\n\"\n)\n\n// Marshal returns the XML encoding of v.\n//\n// Marshal handles an array or slice by marshalling each of the elements.\n// Marshal handles a pointer by marshalling the value it points at or, if the\n// pointer is nil, by writing nothing. Marshal handles an interface value by\n// marshalling the value it contains or, if the interface value is nil, by\n// writing nothing. Marshal handles all other data by writing one or more XML\n// elements containing the data.\n//\n// The name for the XML elements is taken from, in order of preference:\n//     - the tag on the XMLName field, if the data is a struct\n//     - the value of the XMLName field of type xml.Name\n//     - the tag of the struct field used to obtain the data\n//     - the name of the struct field used to obtain the data\n//     - the name of the marshalled type\n//\n// The XML element for a struct contains marshalled elements for each of the\n// exported fields of the struct, with these exceptions:\n//     - the XMLName field, described above, is omitted.\n//     - a field with tag \"-\" is omitted.\n//     - a field with tag \"name,attr\" becomes an attribute with\n//       the given name in the XML element.\n//     - a field with tag \",attr\" becomes an attribute with the\n//       field name in the XML element.\n//     - a field with tag \",chardata\" is written as character data,\n//       not as an XML element.\n//     - a field with tag \",innerxml\" is written verbatim, not subject\n//       to the usual marshalling procedure.\n//     - a field with tag \",comment\" is written as an XML comment, not\n//       subject to the usual marshalling procedure. It must not contain\n//       the \"--\" string within it.\n//     - a field with a tag including the \"omitempty\" option is omitted\n//       if the field value is empty. The empty values are false, 0, any\n//       nil pointer or interface value, and any array, slice, map, or\n//       string of length zero.\n//     - an anonymous struct field is handled as if the fields of its\n//       value were part of the outer struct.\n//\n// If a field uses a tag \"a>b>c\", then the element c will be nested inside\n// parent elements a and b. Fields that appear next to each other that name\n// the same parent will be enclosed in one XML element.\n//\n// See MarshalIndent for an example.\n//\n// Marshal will return an error if asked to marshal a channel, function, or map.\nfunc Marshal(v interface{}) ([]byte, error) {\n\tvar b bytes.Buffer\n\tif err := NewEncoder(&b).Encode(v); err != nil {\n\t\treturn nil, err\n\t}\n\treturn b.Bytes(), nil\n}\n\n// Marshaler is the interface implemented by objects that can marshal\n// themselves into valid XML elements.\n//\n// MarshalXML encodes the receiver as zero or more XML elements.\n// By convention, arrays or slices are typically encoded as a sequence\n// of elements, one per entry.\n// Using start as the element tag is not required, but doing so\n// will enable Unmarshal to match the XML elements to the correct\n// struct field.\n// One common implementation strategy is to construct a separate\n// value with a layout corresponding to the desired XML and then\n// to encode it using e.EncodeElement.\n// Another common strategy is to use repeated calls to e.EncodeToken\n// to generate the XML output one token at a time.\n// The sequence of encoded tokens must make up zero or more valid\n// XML elements.\ntype Marshaler interface {\n\tMarshalXML(e *Encoder, start StartElement) error\n}\n\n// MarshalerAttr is the interface implemented by objects that can marshal\n// themselves into valid XML attributes.\n//\n// MarshalXMLAttr returns an XML attribute with the encoded value of the receiver.\n// Using name as the attribute name is not required, but doing so\n// will enable Unmarshal to match the attribute to the correct\n// struct field.\n// If MarshalXMLAttr returns the zero attribute Attr{}, no attribute\n// will be generated in the output.\n// MarshalXMLAttr is used only for struct fields with the\n// \"attr\" option in the field tag.\ntype MarshalerAttr interface {\n\tMarshalXMLAttr(name Name) (Attr, error)\n}\n\n// MarshalIndent works like Marshal, but each XML element begins on a new\n// indented line that starts with prefix and is followed by one or more\n// copies of indent according to the nesting depth.\nfunc MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {\n\tvar b bytes.Buffer\n\tenc := NewEncoder(&b)\n\tenc.Indent(prefix, indent)\n\tif err := enc.Encode(v); err != nil {\n\t\treturn nil, err\n\t}\n\treturn b.Bytes(), nil\n}\n\n// An Encoder writes XML data to an output stream.\ntype Encoder struct {\n\tp printer\n}\n\n// NewEncoder returns a new encoder that writes to w.\nfunc NewEncoder(w io.Writer) *Encoder {\n\te := &Encoder{printer{Writer: bufio.NewWriter(w)}}\n\te.p.encoder = e\n\treturn e\n}\n\n// Indent sets the encoder to generate XML in which each element\n// begins on a new indented line that starts with prefix and is followed by\n// one or more copies of indent according to the nesting depth.\nfunc (enc *Encoder) Indent(prefix, indent string) {\n\tenc.p.prefix = prefix\n\tenc.p.indent = indent\n}\n\n// Encode writes the XML encoding of v to the stream.\n//\n// See the documentation for Marshal for details about the conversion\n// of Go values to XML.\n//\n// Encode calls Flush before returning.\nfunc (enc *Encoder) Encode(v interface{}) error {\n\terr := enc.p.marshalValue(reflect.ValueOf(v), nil, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn enc.p.Flush()\n}\n\n// EncodeElement writes the XML encoding of v to the stream,\n// using start as the outermost tag in the encoding.\n//\n// See the documentation for Marshal for details about the conversion\n// of Go values to XML.\n//\n// EncodeElement calls Flush before returning.\nfunc (enc *Encoder) EncodeElement(v interface{}, start StartElement) error {\n\terr := enc.p.marshalValue(reflect.ValueOf(v), nil, &start)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn enc.p.Flush()\n}\n\nvar (\n\tbegComment   = []byte(\"<!--\")\n\tendComment   = []byte(\"-->\")\n\tendProcInst  = []byte(\"?>\")\n\tendDirective = []byte(\">\")\n)\n\n// EncodeToken writes the given XML token to the stream.\n// It returns an error if StartElement and EndElement tokens are not\n// properly matched.\n//\n// EncodeToken does not call Flush, because usually it is part of a\n// larger operation such as Encode or EncodeElement (or a custom\n// Marshaler's MarshalXML invoked during those), and those will call\n// Flush when finished. Callers that create an Encoder and then invoke\n// EncodeToken directly, without using Encode or EncodeElement, need to\n// call Flush when finished to ensure that the XML is written to the\n// underlying writer.\n//\n// EncodeToken allows writing a ProcInst with Target set to \"xml\" only\n// as the first token in the stream.\n//\n// When encoding a StartElement holding an XML namespace prefix\n// declaration for a prefix that is not already declared, contained\n// elements (including the StartElement itself) will use the declared\n// prefix when encoding names with matching namespace URIs.\nfunc (enc *Encoder) EncodeToken(t Token) error {\n\n\tp := &enc.p\n\tswitch t := t.(type) {\n\tcase StartElement:\n\t\tif err := p.writeStart(&t); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase EndElement:\n\t\tif err := p.writeEnd(t.Name); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase CharData:\n\t\tescapeText(p, t, false)\n\tcase Comment:\n\t\tif bytes.Contains(t, endComment) {\n\t\t\treturn fmt.Errorf(\"xml: EncodeToken of Comment containing --> marker\")\n\t\t}\n\t\tp.WriteString(\"<!--\")\n\t\tp.Write(t)\n\t\tp.WriteString(\"-->\")\n\t\treturn p.cachedWriteError()\n\tcase ProcInst:\n\t\t// First token to be encoded which is also a ProcInst with target of xml\n\t\t// is the xml declaration. The only ProcInst where target of xml is allowed.\n\t\tif t.Target == \"xml\" && p.Buffered() != 0 {\n\t\t\treturn fmt.Errorf(\"xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded\")\n\t\t}\n\t\tif !isNameString(t.Target) {\n\t\t\treturn fmt.Errorf(\"xml: EncodeToken of ProcInst with invalid Target\")\n\t\t}\n\t\tif bytes.Contains(t.Inst, endProcInst) {\n\t\t\treturn fmt.Errorf(\"xml: EncodeToken of ProcInst containing ?> marker\")\n\t\t}\n\t\tp.WriteString(\"<?\")\n\t\tp.WriteString(t.Target)\n\t\tif len(t.Inst) > 0 {\n\t\t\tp.WriteByte(' ')\n\t\t\tp.Write(t.Inst)\n\t\t}\n\t\tp.WriteString(\"?>\")\n\tcase Directive:\n\t\tif !isValidDirective(t) {\n\t\t\treturn fmt.Errorf(\"xml: EncodeToken of Directive containing wrong < or > markers\")\n\t\t}\n\t\tp.WriteString(\"<!\")\n\t\tp.Write(t)\n\t\tp.WriteString(\">\")\n\tdefault:\n\t\treturn fmt.Errorf(\"xml: EncodeToken of invalid token type\")\n\n\t}\n\treturn p.cachedWriteError()\n}\n\n// isValidDirective reports whether dir is a valid directive text,\n// meaning angle brackets are matched, ignoring comments and strings.\nfunc isValidDirective(dir Directive) bool {\n\tvar (\n\t\tdepth     int\n\t\tinquote   uint8\n\t\tincomment bool\n\t)\n\tfor i, c := range dir {\n\t\tswitch {\n\t\tcase incomment:\n\t\t\tif c == '>' {\n\t\t\t\tif n := 1 + i - len(endComment); n >= 0 && bytes.Equal(dir[n:i+1], endComment) {\n\t\t\t\t\tincomment = false\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Just ignore anything in comment\n\t\tcase inquote != 0:\n\t\t\tif c == inquote {\n\t\t\t\tinquote = 0\n\t\t\t}\n\t\t\t// Just ignore anything within quotes\n\t\tcase c == '\\'' || c == '\"':\n\t\t\tinquote = c\n\t\tcase c == '<':\n\t\t\tif i+len(begComment) < len(dir) && bytes.Equal(dir[i:i+len(begComment)], begComment) {\n\t\t\t\tincomment = true\n\t\t\t} else {\n\t\t\t\tdepth++\n\t\t\t}\n\t\tcase c == '>':\n\t\t\tif depth == 0 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tdepth--\n\t\t}\n\t}\n\treturn depth == 0 && inquote == 0 && !incomment\n}\n\n// Flush flushes any buffered XML to the underlying writer.\n// See the EncodeToken documentation for details about when it is necessary.\nfunc (enc *Encoder) Flush() error {\n\treturn enc.p.Flush()\n}\n\ntype printer struct {\n\t*bufio.Writer\n\tencoder    *Encoder\n\tseq        int\n\tindent     string\n\tprefix     string\n\tdepth      int\n\tindentedIn bool\n\tputNewline bool\n\tdefaultNS  string\n\tattrNS     map[string]string // map prefix -> name space\n\tattrPrefix map[string]string // map name space -> prefix\n\tprefixes   []printerPrefix\n\ttags       []Name\n}\n\n// printerPrefix holds a namespace undo record.\n// When an element is popped, the prefix record\n// is set back to the recorded URL. The empty\n// prefix records the URL for the default name space.\n//\n// The start of an element is recorded with an element\n// that has mark=true.\ntype printerPrefix struct {\n\tprefix string\n\turl    string\n\tmark   bool\n}\n\nfunc (p *printer) prefixForNS(url string, isAttr bool) string {\n\t// The \"http://www.w3.org/XML/1998/namespace\" name space is predefined as \"xml\"\n\t// and must be referred to that way.\n\t// (The \"http://www.w3.org/2000/xmlns/\" name space is also predefined as \"xmlns\",\n\t// but users should not be trying to use that one directly - that's our job.)\n\tif url == xmlURL {\n\t\treturn \"xml\"\n\t}\n\tif !isAttr && url == p.defaultNS {\n\t\t// We can use the default name space.\n\t\treturn \"\"\n\t}\n\treturn p.attrPrefix[url]\n}\n\n// defineNS pushes any namespace definition found in the given attribute.\n// If ignoreNonEmptyDefault is true, an xmlns=\"nonempty\"\n// attribute will be ignored.\nfunc (p *printer) defineNS(attr Attr, ignoreNonEmptyDefault bool) error {\n\tvar prefix string\n\tif attr.Name.Local == \"xmlns\" {\n\t\tif attr.Name.Space != \"\" && attr.Name.Space != \"xml\" && attr.Name.Space != xmlURL {\n\t\t\treturn fmt.Errorf(\"xml: cannot redefine xmlns attribute prefix\")\n\t\t}\n\t} else if attr.Name.Space == \"xmlns\" && attr.Name.Local != \"\" {\n\t\tprefix = attr.Name.Local\n\t\tif attr.Value == \"\" {\n\t\t\t// Technically, an empty XML namespace is allowed for an attribute.\n\t\t\t// From http://www.w3.org/TR/xml-names11/#scoping-defaulting:\n\t\t\t//\n\t\t\t// \tThe attribute value in a namespace declaration for a prefix may be\n\t\t\t//\tempty. This has the effect, within the scope of the declaration, of removing\n\t\t\t//\tany association of the prefix with a namespace name.\n\t\t\t//\n\t\t\t// However our namespace prefixes here are used only as hints. There's\n\t\t\t// no need to respect the removal of a namespace prefix, so we ignore it.\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\t// Ignore: it's not a namespace definition\n\t\treturn nil\n\t}\n\tif prefix == \"\" {\n\t\tif attr.Value == p.defaultNS {\n\t\t\t// No need for redefinition.\n\t\t\treturn nil\n\t\t}\n\t\tif attr.Value != \"\" && ignoreNonEmptyDefault {\n\t\t\t// We have an xmlns=\"...\" value but\n\t\t\t// it can't define a name space in this context,\n\t\t\t// probably because the element has an empty\n\t\t\t// name space. In this case, we just ignore\n\t\t\t// the name space declaration.\n\t\t\treturn nil\n\t\t}\n\t} else if _, ok := p.attrPrefix[attr.Value]; ok {\n\t\t// There's already a prefix for the given name space,\n\t\t// so use that. This prevents us from\n\t\t// having two prefixes for the same name space\n\t\t// so attrNS and attrPrefix can remain bijective.\n\t\treturn nil\n\t}\n\tp.pushPrefix(prefix, attr.Value)\n\treturn nil\n}\n\n// createNSPrefix creates a name space prefix attribute\n// to use for the given name space, defining a new prefix\n// if necessary.\n// If isAttr is true, the prefix is to be created for an attribute\n// prefix, which means that the default name space cannot\n// be used.\nfunc (p *printer) createNSPrefix(url string, isAttr bool) {\n\tif _, ok := p.attrPrefix[url]; ok {\n\t\t// We already have a prefix for the given URL.\n\t\treturn\n\t}\n\tswitch {\n\tcase !isAttr && url == p.defaultNS:\n\t\t// We can use the default name space.\n\t\treturn\n\tcase url == \"\":\n\t\t// The only way we can encode names in the empty\n\t\t// name space is by using the default name space,\n\t\t// so we must use that.\n\t\tif p.defaultNS != \"\" {\n\t\t\t// The default namespace is non-empty, so we\n\t\t\t// need to set it to empty.\n\t\t\tp.pushPrefix(\"\", \"\")\n\t\t}\n\t\treturn\n\tcase url == xmlURL:\n\t\treturn\n\t}\n\t// TODO If the URL is an existing prefix, we could\n\t// use it as is. That would enable the\n\t// marshaling of elements that had been unmarshaled\n\t// and with a name space prefix that was not found.\n\t// although technically it would be incorrect.\n\n\t// Pick a name. We try to use the final element of the path\n\t// but fall back to _.\n\tprefix := strings.TrimRight(url, \"/\")\n\tif i := strings.LastIndex(prefix, \"/\"); i >= 0 {\n\t\tprefix = prefix[i+1:]\n\t}\n\tif prefix == \"\" || !isName([]byte(prefix)) || strings.Contains(prefix, \":\") {\n\t\tprefix = \"_\"\n\t}\n\tif strings.HasPrefix(prefix, \"xml\") {\n\t\t// xmlanything is reserved.\n\t\tprefix = \"_\" + prefix\n\t}\n\tif p.attrNS[prefix] != \"\" {\n\t\t// Name is taken. Find a better one.\n\t\tfor p.seq++; ; p.seq++ {\n\t\t\tif id := prefix + \"_\" + strconv.Itoa(p.seq); p.attrNS[id] == \"\" {\n\t\t\t\tprefix = id\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tp.pushPrefix(prefix, url)\n}\n\n// writeNamespaces writes xmlns attributes for all the\n// namespace prefixes that have been defined in\n// the current element.\nfunc (p *printer) writeNamespaces() {\n\tfor i := len(p.prefixes) - 1; i >= 0; i-- {\n\t\tprefix := p.prefixes[i]\n\t\tif prefix.mark {\n\t\t\treturn\n\t\t}\n\t\tp.WriteString(\" \")\n\t\tif prefix.prefix == \"\" {\n\t\t\t// Default name space.\n\t\t\tp.WriteString(`xmlns=\"`)\n\t\t} else {\n\t\t\tp.WriteString(\"xmlns:\")\n\t\t\tp.WriteString(prefix.prefix)\n\t\t\tp.WriteString(`=\"`)\n\t\t}\n\t\tEscapeText(p, []byte(p.nsForPrefix(prefix.prefix)))\n\t\tp.WriteString(`\"`)\n\t}\n}\n\n// pushPrefix pushes a new prefix on the prefix stack\n// without checking to see if it is already defined.\nfunc (p *printer) pushPrefix(prefix, url string) {\n\tp.prefixes = append(p.prefixes, printerPrefix{\n\t\tprefix: prefix,\n\t\turl:    p.nsForPrefix(prefix),\n\t})\n\tp.setAttrPrefix(prefix, url)\n}\n\n// nsForPrefix returns the name space for the given\n// prefix. Note that this is not valid for the\n// empty attribute prefix, which always has an empty\n// name space.\nfunc (p *printer) nsForPrefix(prefix string) string {\n\tif prefix == \"\" {\n\t\treturn p.defaultNS\n\t}\n\treturn p.attrNS[prefix]\n}\n\n// markPrefix marks the start of an element on the prefix\n// stack.\nfunc (p *printer) markPrefix() {\n\tp.prefixes = append(p.prefixes, printerPrefix{\n\t\tmark: true,\n\t})\n}\n\n// popPrefix pops all defined prefixes for the current\n// element.\nfunc (p *printer) popPrefix() {\n\tfor len(p.prefixes) > 0 {\n\t\tprefix := p.prefixes[len(p.prefixes)-1]\n\t\tp.prefixes = p.prefixes[:len(p.prefixes)-1]\n\t\tif prefix.mark {\n\t\t\tbreak\n\t\t}\n\t\tp.setAttrPrefix(prefix.prefix, prefix.url)\n\t}\n}\n\n// setAttrPrefix sets an attribute name space prefix.\n// If url is empty, the attribute is removed.\n// If prefix is empty, the default name space is set.\nfunc (p *printer) setAttrPrefix(prefix, url string) {\n\tif prefix == \"\" {\n\t\tp.defaultNS = url\n\t\treturn\n\t}\n\tif url == \"\" {\n\t\tdelete(p.attrPrefix, p.attrNS[prefix])\n\t\tdelete(p.attrNS, prefix)\n\t\treturn\n\t}\n\tif p.attrPrefix == nil {\n\t\t// Need to define a new name space.\n\t\tp.attrPrefix = make(map[string]string)\n\t\tp.attrNS = make(map[string]string)\n\t}\n\t// Remove any old prefix value. This is OK because we maintain a\n\t// strict one-to-one mapping between prefix and URL (see\n\t// defineNS)\n\tdelete(p.attrPrefix, p.attrNS[prefix])\n\tp.attrPrefix[url] = prefix\n\tp.attrNS[prefix] = url\n}\n\nvar (\n\tmarshalerType     = reflect.TypeOf((*Marshaler)(nil)).Elem()\n\tmarshalerAttrType = reflect.TypeOf((*MarshalerAttr)(nil)).Elem()\n\ttextMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()\n)\n\n// marshalValue writes one or more XML elements representing val.\n// If val was obtained from a struct field, finfo must have its details.\nfunc (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error {\n\tif startTemplate != nil && startTemplate.Name.Local == \"\" {\n\t\treturn fmt.Errorf(\"xml: EncodeElement of StartElement with missing name\")\n\t}\n\n\tif !val.IsValid() {\n\t\treturn nil\n\t}\n\tif finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) {\n\t\treturn nil\n\t}\n\n\t// Drill into interfaces and pointers.\n\t// This can turn into an infinite loop given a cyclic chain,\n\t// but it matches the Go 1 behavior.\n\tfor val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr {\n\t\tif val.IsNil() {\n\t\t\treturn nil\n\t\t}\n\t\tval = val.Elem()\n\t}\n\n\tkind := val.Kind()\n\ttyp := val.Type()\n\n\t// Check for marshaler.\n\tif val.CanInterface() && typ.Implements(marshalerType) {\n\t\treturn p.marshalInterface(val.Interface().(Marshaler), p.defaultStart(typ, finfo, startTemplate))\n\t}\n\tif val.CanAddr() {\n\t\tpv := val.Addr()\n\t\tif pv.CanInterface() && pv.Type().Implements(marshalerType) {\n\t\t\treturn p.marshalInterface(pv.Interface().(Marshaler), p.defaultStart(pv.Type(), finfo, startTemplate))\n\t\t}\n\t}\n\n\t// Check for text marshaler.\n\tif val.CanInterface() && typ.Implements(textMarshalerType) {\n\t\treturn p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), p.defaultStart(typ, finfo, startTemplate))\n\t}\n\tif val.CanAddr() {\n\t\tpv := val.Addr()\n\t\tif pv.CanInterface() && pv.Type().Implements(textMarshalerType) {\n\t\t\treturn p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), p.defaultStart(pv.Type(), finfo, startTemplate))\n\t\t}\n\t}\n\n\t// Slices and arrays iterate over the elements. They do not have an enclosing tag.\n\tif (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 {\n\t\tfor i, n := 0, val.Len(); i < n; i++ {\n\t\t\tif err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\ttinfo, err := getTypeInfo(typ)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create start element.\n\t// Precedence for the XML element name is:\n\t// 0. startTemplate\n\t// 1. XMLName field in underlying struct;\n\t// 2. field name/tag in the struct field; and\n\t// 3. type name\n\tvar start StartElement\n\n\t// explicitNS records whether the element's name space has been\n\t// explicitly set (for example an XMLName field).\n\texplicitNS := false\n\n\tif startTemplate != nil {\n\t\tstart.Name = startTemplate.Name\n\t\texplicitNS = true\n\t\tstart.Attr = append(start.Attr, startTemplate.Attr...)\n\t} else if tinfo.xmlname != nil {\n\t\txmlname := tinfo.xmlname\n\t\tif xmlname.name != \"\" {\n\t\t\tstart.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name\n\t\t} else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != \"\" {\n\t\t\tstart.Name = v\n\t\t}\n\t\texplicitNS = true\n\t}\n\tif start.Name.Local == \"\" && finfo != nil {\n\t\tstart.Name.Local = finfo.name\n\t\tif finfo.xmlns != \"\" {\n\t\t\tstart.Name.Space = finfo.xmlns\n\t\t\texplicitNS = true\n\t\t}\n\t}\n\tif start.Name.Local == \"\" {\n\t\tname := typ.Name()\n\t\tif name == \"\" {\n\t\t\treturn &UnsupportedTypeError{typ}\n\t\t}\n\t\tstart.Name.Local = name\n\t}\n\n\t// defaultNS records the default name space as set by a xmlns=\"...\"\n\t// attribute. We don't set p.defaultNS because we want to let\n\t// the attribute writing code (in p.defineNS) be solely responsible\n\t// for maintaining that.\n\tdefaultNS := p.defaultNS\n\n\t// Attributes\n\tfor i := range tinfo.fields {\n\t\tfinfo := &tinfo.fields[i]\n\t\tif finfo.flags&fAttr == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tattr, err := p.fieldAttr(finfo, val)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif attr.Name.Local == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tstart.Attr = append(start.Attr, attr)\n\t\tif attr.Name.Space == \"\" && attr.Name.Local == \"xmlns\" {\n\t\t\tdefaultNS = attr.Value\n\t\t}\n\t}\n\tif !explicitNS {\n\t\t// Historic behavior: elements use the default name space\n\t\t// they are contained in by default.\n\t\tstart.Name.Space = defaultNS\n\t}\n\t// Historic behaviour: an element that's in a namespace sets\n\t// the default namespace for all elements contained within it.\n\tstart.setDefaultNamespace()\n\n\tif err := p.writeStart(&start); err != nil {\n\t\treturn err\n\t}\n\n\tif val.Kind() == reflect.Struct {\n\t\terr = p.marshalStruct(tinfo, val)\n\t} else {\n\t\ts, b, err1 := p.marshalSimple(typ, val)\n\t\tif err1 != nil {\n\t\t\terr = err1\n\t\t} else if b != nil {\n\t\t\tEscapeText(p, b)\n\t\t} else {\n\t\t\tp.EscapeString(s)\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := p.writeEnd(start.Name); err != nil {\n\t\treturn err\n\t}\n\n\treturn p.cachedWriteError()\n}\n\n// fieldAttr returns the attribute of the given field.\n// If the returned attribute has an empty Name.Local,\n// it should not be used.\n// The given value holds the value containing the field.\nfunc (p *printer) fieldAttr(finfo *fieldInfo, val reflect.Value) (Attr, error) {\n\tfv := finfo.value(val)\n\tname := Name{Space: finfo.xmlns, Local: finfo.name}\n\tif finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) {\n\t\treturn Attr{}, nil\n\t}\n\tif fv.Kind() == reflect.Interface && fv.IsNil() {\n\t\treturn Attr{}, nil\n\t}\n\tif fv.CanInterface() && fv.Type().Implements(marshalerAttrType) {\n\t\tattr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name)\n\t\treturn attr, err\n\t}\n\tif fv.CanAddr() {\n\t\tpv := fv.Addr()\n\t\tif pv.CanInterface() && pv.Type().Implements(marshalerAttrType) {\n\t\t\tattr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name)\n\t\t\treturn attr, err\n\t\t}\n\t}\n\tif fv.CanInterface() && fv.Type().Implements(textMarshalerType) {\n\t\ttext, err := fv.Interface().(encoding.TextMarshaler).MarshalText()\n\t\tif err != nil {\n\t\t\treturn Attr{}, err\n\t\t}\n\t\treturn Attr{name, string(text)}, nil\n\t}\n\tif fv.CanAddr() {\n\t\tpv := fv.Addr()\n\t\tif pv.CanInterface() && pv.Type().Implements(textMarshalerType) {\n\t\t\ttext, err := pv.Interface().(encoding.TextMarshaler).MarshalText()\n\t\t\tif err != nil {\n\t\t\t\treturn Attr{}, err\n\t\t\t}\n\t\t\treturn Attr{name, string(text)}, nil\n\t\t}\n\t}\n\t// Dereference or skip nil pointer, interface values.\n\tswitch fv.Kind() {\n\tcase reflect.Ptr, reflect.Interface:\n\t\tif fv.IsNil() {\n\t\t\treturn Attr{}, nil\n\t\t}\n\t\tfv = fv.Elem()\n\t}\n\ts, b, err := p.marshalSimple(fv.Type(), fv)\n\tif err != nil {\n\t\treturn Attr{}, err\n\t}\n\tif b != nil {\n\t\ts = string(b)\n\t}\n\treturn Attr{name, s}, nil\n}\n\n// defaultStart returns the default start element to use,\n// given the reflect type, field info, and start template.\nfunc (p *printer) defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement {\n\tvar start StartElement\n\t// Precedence for the XML element name is as above,\n\t// except that we do not look inside structs for the first field.\n\tif startTemplate != nil {\n\t\tstart.Name = startTemplate.Name\n\t\tstart.Attr = append(start.Attr, startTemplate.Attr...)\n\t} else if finfo != nil && finfo.name != \"\" {\n\t\tstart.Name.Local = finfo.name\n\t\tstart.Name.Space = finfo.xmlns\n\t} else if typ.Name() != \"\" {\n\t\tstart.Name.Local = typ.Name()\n\t} else {\n\t\t// Must be a pointer to a named type,\n\t\t// since it has the Marshaler methods.\n\t\tstart.Name.Local = typ.Elem().Name()\n\t}\n\t// Historic behaviour: elements use the name space of\n\t// the element they are contained in by default.\n\tif start.Name.Space == \"\" {\n\t\tstart.Name.Space = p.defaultNS\n\t}\n\tstart.setDefaultNamespace()\n\treturn start\n}\n\n// marshalInterface marshals a Marshaler interface value.\nfunc (p *printer) marshalInterface(val Marshaler, start StartElement) error {\n\t// Push a marker onto the tag stack so that MarshalXML\n\t// cannot close the XML tags that it did not open.\n\tp.tags = append(p.tags, Name{})\n\tn := len(p.tags)\n\n\terr := val.MarshalXML(p.encoder, start)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark.\n\tif len(p.tags) > n {\n\t\treturn fmt.Errorf(\"xml: %s.MarshalXML wrote invalid XML: <%s> not closed\", receiverType(val), p.tags[len(p.tags)-1].Local)\n\t}\n\tp.tags = p.tags[:n-1]\n\treturn nil\n}\n\n// marshalTextInterface marshals a TextMarshaler interface value.\nfunc (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error {\n\tif err := p.writeStart(&start); err != nil {\n\t\treturn err\n\t}\n\ttext, err := val.MarshalText()\n\tif err != nil {\n\t\treturn err\n\t}\n\tEscapeText(p, text)\n\treturn p.writeEnd(start.Name)\n}\n\n// writeStart writes the given start element.\nfunc (p *printer) writeStart(start *StartElement) error {\n\tif start.Name.Local == \"\" {\n\t\treturn fmt.Errorf(\"xml: start tag with no name\")\n\t}\n\n\tp.tags = append(p.tags, start.Name)\n\tp.markPrefix()\n\t// Define any name spaces explicitly declared in the attributes.\n\t// We do this as a separate pass so that explicitly declared prefixes\n\t// will take precedence over implicitly declared prefixes\n\t// regardless of the order of the attributes.\n\tignoreNonEmptyDefault := start.Name.Space == \"\"\n\tfor _, attr := range start.Attr {\n\t\tif err := p.defineNS(attr, ignoreNonEmptyDefault); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// Define any new name spaces implied by the attributes.\n\tfor _, attr := range start.Attr {\n\t\tname := attr.Name\n\t\t// From http://www.w3.org/TR/xml-names11/#defaulting\n\t\t// \"Default namespace declarations do not apply directly\n\t\t// to attribute names; the interpretation of unprefixed\n\t\t// attributes is determined by the element on which they\n\t\t// appear.\"\n\t\t// This means we don't need to create a new namespace\n\t\t// when an attribute name space is empty.\n\t\tif name.Space != \"\" && !name.isNamespace() {\n\t\t\tp.createNSPrefix(name.Space, true)\n\t\t}\n\t}\n\tp.createNSPrefix(start.Name.Space, false)\n\n\tp.writeIndent(1)\n\tp.WriteByte('<')\n\tp.writeName(start.Name, false)\n\tp.writeNamespaces()\n\tfor _, attr := range start.Attr {\n\t\tname := attr.Name\n\t\tif name.Local == \"\" || name.isNamespace() {\n\t\t\t// Namespaces have already been written by writeNamespaces above.\n\t\t\tcontinue\n\t\t}\n\t\tp.WriteByte(' ')\n\t\tp.writeName(name, true)\n\t\tp.WriteString(`=\"`)\n\t\tp.EscapeString(attr.Value)\n\t\tp.WriteByte('\"')\n\t}\n\tp.WriteByte('>')\n\treturn nil\n}\n\n// writeName writes the given name. It assumes\n// that p.createNSPrefix(name) has already been called.\nfunc (p *printer) writeName(name Name, isAttr bool) {\n\tif prefix := p.prefixForNS(name.Space, isAttr); prefix != \"\" {\n\t\tp.WriteString(prefix)\n\t\tp.WriteByte(':')\n\t}\n\tp.WriteString(name.Local)\n}\n\nfunc (p *printer) writeEnd(name Name) error {\n\tif name.Local == \"\" {\n\t\treturn fmt.Errorf(\"xml: end tag with no name\")\n\t}\n\tif len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == \"\" {\n\t\treturn fmt.Errorf(\"xml: end tag </%s> without start tag\", name.Local)\n\t}\n\tif top := p.tags[len(p.tags)-1]; top != name {\n\t\tif top.Local != name.Local {\n\t\t\treturn fmt.Errorf(\"xml: end tag </%s> does not match start tag <%s>\", name.Local, top.Local)\n\t\t}\n\t\treturn fmt.Errorf(\"xml: end tag </%s> in namespace %s does not match start tag <%s> in namespace %s\", name.Local, name.Space, top.Local, top.Space)\n\t}\n\tp.tags = p.tags[:len(p.tags)-1]\n\n\tp.writeIndent(-1)\n\tp.WriteByte('<')\n\tp.WriteByte('/')\n\tp.writeName(name, false)\n\tp.WriteByte('>')\n\tp.popPrefix()\n\treturn nil\n}\n\nfunc (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) {\n\tswitch val.Kind() {\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn strconv.FormatInt(val.Int(), 10), nil, nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\treturn strconv.FormatUint(val.Uint(), 10), nil, nil\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil\n\tcase reflect.String:\n\t\treturn val.String(), nil, nil\n\tcase reflect.Bool:\n\t\treturn strconv.FormatBool(val.Bool()), nil, nil\n\tcase reflect.Array:\n\t\tif typ.Elem().Kind() != reflect.Uint8 {\n\t\t\tbreak\n\t\t}\n\t\t// [...]byte\n\t\tvar bytes []byte\n\t\tif val.CanAddr() {\n\t\t\tbytes = val.Slice(0, val.Len()).Bytes()\n\t\t} else {\n\t\t\tbytes = make([]byte, val.Len())\n\t\t\treflect.Copy(reflect.ValueOf(bytes), val)\n\t\t}\n\t\treturn \"\", bytes, nil\n\tcase reflect.Slice:\n\t\tif typ.Elem().Kind() != reflect.Uint8 {\n\t\t\tbreak\n\t\t}\n\t\t// []byte\n\t\treturn \"\", val.Bytes(), nil\n\t}\n\treturn \"\", nil, &UnsupportedTypeError{typ}\n}\n\nvar ddBytes = []byte(\"--\")\n\nfunc (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error {\n\ts := parentStack{p: p}\n\tfor i := range tinfo.fields {\n\t\tfinfo := &tinfo.fields[i]\n\t\tif finfo.flags&fAttr != 0 {\n\t\t\tcontinue\n\t\t}\n\t\tvf := finfo.value(val)\n\n\t\t// Dereference or skip nil pointer, interface values.\n\t\tswitch vf.Kind() {\n\t\tcase reflect.Ptr, reflect.Interface:\n\t\t\tif !vf.IsNil() {\n\t\t\t\tvf = vf.Elem()\n\t\t\t}\n\t\t}\n\n\t\tswitch finfo.flags & fMode {\n\t\tcase fCharData:\n\t\t\tif err := s.setParents(&noField, reflect.Value{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif vf.CanInterface() && vf.Type().Implements(textMarshalerType) {\n\t\t\t\tdata, err := vf.Interface().(encoding.TextMarshaler).MarshalText()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tEscape(p, data)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif vf.CanAddr() {\n\t\t\t\tpv := vf.Addr()\n\t\t\t\tif pv.CanInterface() && pv.Type().Implements(textMarshalerType) {\n\t\t\t\t\tdata, err := pv.Interface().(encoding.TextMarshaler).MarshalText()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tEscape(p, data)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tvar scratch [64]byte\n\t\t\tswitch vf.Kind() {\n\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t\tEscape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10))\n\t\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\t\t\tEscape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10))\n\t\t\tcase reflect.Float32, reflect.Float64:\n\t\t\t\tEscape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits()))\n\t\t\tcase reflect.Bool:\n\t\t\t\tEscape(p, strconv.AppendBool(scratch[:0], vf.Bool()))\n\t\t\tcase reflect.String:\n\t\t\t\tif err := EscapeText(p, []byte(vf.String())); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tcase reflect.Slice:\n\t\t\t\tif elem, ok := vf.Interface().([]byte); ok {\n\t\t\t\t\tif err := EscapeText(p, elem); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\n\t\tcase fComment:\n\t\t\tif err := s.setParents(&noField, reflect.Value{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tk := vf.Kind()\n\t\t\tif !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) {\n\t\t\t\treturn fmt.Errorf(\"xml: bad type for comment field of %s\", val.Type())\n\t\t\t}\n\t\t\tif vf.Len() == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tp.writeIndent(0)\n\t\t\tp.WriteString(\"<!--\")\n\t\t\tdashDash := false\n\t\t\tdashLast := false\n\t\t\tswitch k {\n\t\t\tcase reflect.String:\n\t\t\t\ts := vf.String()\n\t\t\t\tdashDash = strings.Index(s, \"--\") >= 0\n\t\t\t\tdashLast = s[len(s)-1] == '-'\n\t\t\t\tif !dashDash {\n\t\t\t\t\tp.WriteString(s)\n\t\t\t\t}\n\t\t\tcase reflect.Slice:\n\t\t\t\tb := vf.Bytes()\n\t\t\t\tdashDash = bytes.Index(b, ddBytes) >= 0\n\t\t\t\tdashLast = b[len(b)-1] == '-'\n\t\t\t\tif !dashDash {\n\t\t\t\t\tp.Write(b)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tpanic(\"can't happen\")\n\t\t\t}\n\t\t\tif dashDash {\n\t\t\t\treturn fmt.Errorf(`xml: comments must not contain \"--\"`)\n\t\t\t}\n\t\t\tif dashLast {\n\t\t\t\t// \"--->\" is invalid grammar. Make it \"- -->\"\n\t\t\t\tp.WriteByte(' ')\n\t\t\t}\n\t\t\tp.WriteString(\"-->\")\n\t\t\tcontinue\n\n\t\tcase fInnerXml:\n\t\t\tiface := vf.Interface()\n\t\t\tswitch raw := iface.(type) {\n\t\t\tcase []byte:\n\t\t\t\tp.Write(raw)\n\t\t\t\tcontinue\n\t\t\tcase string:\n\t\t\t\tp.WriteString(raw)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\tcase fElement, fElement | fAny:\n\t\t\tif err := s.setParents(finfo, vf); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif err := p.marshalValue(vf, finfo, nil); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := s.setParents(&noField, reflect.Value{}); err != nil {\n\t\treturn err\n\t}\n\treturn p.cachedWriteError()\n}\n\nvar noField fieldInfo\n\n// return the bufio Writer's cached write error\nfunc (p *printer) cachedWriteError() error {\n\t_, err := p.Write(nil)\n\treturn err\n}\n\nfunc (p *printer) writeIndent(depthDelta int) {\n\tif len(p.prefix) == 0 && len(p.indent) == 0 {\n\t\treturn\n\t}\n\tif depthDelta < 0 {\n\t\tp.depth--\n\t\tif p.indentedIn {\n\t\t\tp.indentedIn = false\n\t\t\treturn\n\t\t}\n\t\tp.indentedIn = false\n\t}\n\tif p.putNewline {\n\t\tp.WriteByte('\\n')\n\t} else {\n\t\tp.putNewline = true\n\t}\n\tif len(p.prefix) > 0 {\n\t\tp.WriteString(p.prefix)\n\t}\n\tif len(p.indent) > 0 {\n\t\tfor i := 0; i < p.depth; i++ {\n\t\t\tp.WriteString(p.indent)\n\t\t}\n\t}\n\tif depthDelta > 0 {\n\t\tp.depth++\n\t\tp.indentedIn = true\n\t}\n}\n\ntype parentStack struct {\n\tp       *printer\n\txmlns   string\n\tparents []string\n}\n\n// setParents sets the stack of current parents to those found in finfo.\n// It only writes the start elements if vf holds a non-nil value.\n// If finfo is &noField, it pops all elements.\nfunc (s *parentStack) setParents(finfo *fieldInfo, vf reflect.Value) error {\n\txmlns := s.p.defaultNS\n\tif finfo.xmlns != \"\" {\n\t\txmlns = finfo.xmlns\n\t}\n\tcommonParents := 0\n\tif xmlns == s.xmlns {\n\t\tfor ; commonParents < len(finfo.parents) && commonParents < len(s.parents); commonParents++ {\n\t\t\tif finfo.parents[commonParents] != s.parents[commonParents] {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\t// Pop off any parents that aren't in common with the previous field.\n\tfor i := len(s.parents) - 1; i >= commonParents; i-- {\n\t\tif err := s.p.writeEnd(Name{\n\t\t\tSpace: s.xmlns,\n\t\t\tLocal: s.parents[i],\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\ts.parents = finfo.parents\n\ts.xmlns = xmlns\n\tif commonParents >= len(s.parents) {\n\t\t// No new elements to push.\n\t\treturn nil\n\t}\n\tif (vf.Kind() == reflect.Ptr || vf.Kind() == reflect.Interface) && vf.IsNil() {\n\t\t// The element is nil, so no need for the start elements.\n\t\ts.parents = s.parents[:commonParents]\n\t\treturn nil\n\t}\n\t// Push any new parents required.\n\tfor _, name := range s.parents[commonParents:] {\n\t\tstart := &StartElement{\n\t\t\tName: Name{\n\t\t\t\tSpace: s.xmlns,\n\t\t\t\tLocal: name,\n\t\t\t},\n\t\t}\n\t\t// Set the default name space for parent elements\n\t\t// to match what we do with other elements.\n\t\tif s.xmlns != s.p.defaultNS {\n\t\t\tstart.setDefaultNamespace()\n\t\t}\n\t\tif err := s.p.writeStart(start); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// A MarshalXMLError is returned when Marshal encounters a type\n// that cannot be converted into XML.\ntype UnsupportedTypeError struct {\n\tType reflect.Type\n}\n\nfunc (e *UnsupportedTypeError) Error() string {\n\treturn \"xml: unsupported type: \" + e.Type.String()\n}\n\nfunc isEmptyValue(v reflect.Value) bool {\n\tswitch v.Kind() {\n\tcase reflect.Array, reflect.Map, reflect.Slice, reflect.String:\n\t\treturn v.Len() == 0\n\tcase reflect.Bool:\n\t\treturn !v.Bool()\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn v.Int() == 0\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\treturn v.Uint() == 0\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn v.Float() == 0\n\tcase reflect.Interface, reflect.Ptr:\n\t\treturn v.IsNil()\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage xml\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\ntype DriveType int\n\nconst (\n\tHyperDrive DriveType = iota\n\tImprobabilityDrive\n)\n\ntype Passenger struct {\n\tName   []string `xml:\"name\"`\n\tWeight float32  `xml:\"weight\"`\n}\n\ntype Ship struct {\n\tXMLName struct{} `xml:\"spaceship\"`\n\n\tName      string       `xml:\"name,attr\"`\n\tPilot     string       `xml:\"pilot,attr\"`\n\tDrive     DriveType    `xml:\"drive\"`\n\tAge       uint         `xml:\"age\"`\n\tPassenger []*Passenger `xml:\"passenger\"`\n\tsecret    string\n}\n\ntype NamedType string\n\ntype Port struct {\n\tXMLName struct{} `xml:\"port\"`\n\tType    string   `xml:\"type,attr,omitempty\"`\n\tComment string   `xml:\",comment\"`\n\tNumber  string   `xml:\",chardata\"`\n}\n\ntype Domain struct {\n\tXMLName struct{} `xml:\"domain\"`\n\tCountry string   `xml:\",attr,omitempty\"`\n\tName    []byte   `xml:\",chardata\"`\n\tComment []byte   `xml:\",comment\"`\n}\n\ntype Book struct {\n\tXMLName struct{} `xml:\"book\"`\n\tTitle   string   `xml:\",chardata\"`\n}\n\ntype Event struct {\n\tXMLName struct{} `xml:\"event\"`\n\tYear    int      `xml:\",chardata\"`\n}\n\ntype Movie struct {\n\tXMLName struct{} `xml:\"movie\"`\n\tLength  uint     `xml:\",chardata\"`\n}\n\ntype Pi struct {\n\tXMLName       struct{} `xml:\"pi\"`\n\tApproximation float32  `xml:\",chardata\"`\n}\n\ntype Universe struct {\n\tXMLName struct{} `xml:\"universe\"`\n\tVisible float64  `xml:\",chardata\"`\n}\n\ntype Particle struct {\n\tXMLName struct{} `xml:\"particle\"`\n\tHasMass bool     `xml:\",chardata\"`\n}\n\ntype Departure struct {\n\tXMLName struct{}  `xml:\"departure\"`\n\tWhen    time.Time `xml:\",chardata\"`\n}\n\ntype SecretAgent struct {\n\tXMLName   struct{} `xml:\"agent\"`\n\tHandle    string   `xml:\"handle,attr\"`\n\tIdentity  string\n\tObfuscate string `xml:\",innerxml\"`\n}\n\ntype NestedItems struct {\n\tXMLName struct{} `xml:\"result\"`\n\tItems   []string `xml:\">item\"`\n\tItem1   []string `xml:\"Items>item1\"`\n}\n\ntype NestedOrder struct {\n\tXMLName struct{} `xml:\"result\"`\n\tField1  string   `xml:\"parent>c\"`\n\tField2  string   `xml:\"parent>b\"`\n\tField3  string   `xml:\"parent>a\"`\n}\n\ntype MixedNested struct {\n\tXMLName struct{} `xml:\"result\"`\n\tA       string   `xml:\"parent1>a\"`\n\tB       string   `xml:\"b\"`\n\tC       string   `xml:\"parent1>parent2>c\"`\n\tD       string   `xml:\"parent1>d\"`\n}\n\ntype NilTest struct {\n\tA interface{} `xml:\"parent1>parent2>a\"`\n\tB interface{} `xml:\"parent1>b\"`\n\tC interface{} `xml:\"parent1>parent2>c\"`\n}\n\ntype Service struct {\n\tXMLName struct{} `xml:\"service\"`\n\tDomain  *Domain  `xml:\"host>domain\"`\n\tPort    *Port    `xml:\"host>port\"`\n\tExtra1  interface{}\n\tExtra2  interface{} `xml:\"host>extra2\"`\n}\n\nvar nilStruct *Ship\n\ntype EmbedA struct {\n\tEmbedC\n\tEmbedB EmbedB\n\tFieldA string\n}\n\ntype EmbedB struct {\n\tFieldB string\n\t*EmbedC\n}\n\ntype EmbedC struct {\n\tFieldA1 string `xml:\"FieldA>A1\"`\n\tFieldA2 string `xml:\"FieldA>A2\"`\n\tFieldB  string\n\tFieldC  string\n}\n\ntype NameCasing struct {\n\tXMLName struct{} `xml:\"casing\"`\n\tXy      string\n\tXY      string\n\tXyA     string `xml:\"Xy,attr\"`\n\tXYA     string `xml:\"XY,attr\"`\n}\n\ntype NamePrecedence struct {\n\tXMLName     Name              `xml:\"Parent\"`\n\tFromTag     XMLNameWithoutTag `xml:\"InTag\"`\n\tFromNameVal XMLNameWithoutTag\n\tFromNameTag XMLNameWithTag\n\tInFieldName string\n}\n\ntype XMLNameWithTag struct {\n\tXMLName Name   `xml:\"InXMLNameTag\"`\n\tValue   string `xml:\",chardata\"`\n}\n\ntype XMLNameWithNSTag struct {\n\tXMLName Name   `xml:\"ns InXMLNameWithNSTag\"`\n\tValue   string `xml:\",chardata\"`\n}\n\ntype XMLNameWithoutTag struct {\n\tXMLName Name\n\tValue   string `xml:\",chardata\"`\n}\n\ntype NameInField struct {\n\tFoo Name `xml:\"ns foo\"`\n}\n\ntype AttrTest struct {\n\tInt   int     `xml:\",attr\"`\n\tNamed int     `xml:\"int,attr\"`\n\tFloat float64 `xml:\",attr\"`\n\tUint8 uint8   `xml:\",attr\"`\n\tBool  bool    `xml:\",attr\"`\n\tStr   string  `xml:\",attr\"`\n\tBytes []byte  `xml:\",attr\"`\n}\n\ntype OmitAttrTest struct {\n\tInt   int     `xml:\",attr,omitempty\"`\n\tNamed int     `xml:\"int,attr,omitempty\"`\n\tFloat float64 `xml:\",attr,omitempty\"`\n\tUint8 uint8   `xml:\",attr,omitempty\"`\n\tBool  bool    `xml:\",attr,omitempty\"`\n\tStr   string  `xml:\",attr,omitempty\"`\n\tBytes []byte  `xml:\",attr,omitempty\"`\n}\n\ntype OmitFieldTest struct {\n\tInt   int           `xml:\",omitempty\"`\n\tNamed int           `xml:\"int,omitempty\"`\n\tFloat float64       `xml:\",omitempty\"`\n\tUint8 uint8         `xml:\",omitempty\"`\n\tBool  bool          `xml:\",omitempty\"`\n\tStr   string        `xml:\",omitempty\"`\n\tBytes []byte        `xml:\",omitempty\"`\n\tPtr   *PresenceTest `xml:\",omitempty\"`\n}\n\ntype AnyTest struct {\n\tXMLName  struct{}  `xml:\"a\"`\n\tNested   string    `xml:\"nested>value\"`\n\tAnyField AnyHolder `xml:\",any\"`\n}\n\ntype AnyOmitTest struct {\n\tXMLName  struct{}   `xml:\"a\"`\n\tNested   string     `xml:\"nested>value\"`\n\tAnyField *AnyHolder `xml:\",any,omitempty\"`\n}\n\ntype AnySliceTest struct {\n\tXMLName  struct{}    `xml:\"a\"`\n\tNested   string      `xml:\"nested>value\"`\n\tAnyField []AnyHolder `xml:\",any\"`\n}\n\ntype AnyHolder struct {\n\tXMLName Name\n\tXML     string `xml:\",innerxml\"`\n}\n\ntype RecurseA struct {\n\tA string\n\tB *RecurseB\n}\n\ntype RecurseB struct {\n\tA *RecurseA\n\tB string\n}\n\ntype PresenceTest struct {\n\tExists *struct{}\n}\n\ntype IgnoreTest struct {\n\tPublicSecret string `xml:\"-\"`\n}\n\ntype MyBytes []byte\n\ntype Data struct {\n\tBytes  []byte\n\tAttr   []byte `xml:\",attr\"`\n\tCustom MyBytes\n}\n\ntype Plain struct {\n\tV interface{}\n}\n\ntype MyInt int\n\ntype EmbedInt struct {\n\tMyInt\n}\n\ntype Strings struct {\n\tX []string `xml:\"A>B,omitempty\"`\n}\n\ntype PointerFieldsTest struct {\n\tXMLName  Name    `xml:\"dummy\"`\n\tName     *string `xml:\"name,attr\"`\n\tAge      *uint   `xml:\"age,attr\"`\n\tEmpty    *string `xml:\"empty,attr\"`\n\tContents *string `xml:\",chardata\"`\n}\n\ntype ChardataEmptyTest struct {\n\tXMLName  Name    `xml:\"test\"`\n\tContents *string `xml:\",chardata\"`\n}\n\ntype MyMarshalerTest struct {\n}\n\nvar _ Marshaler = (*MyMarshalerTest)(nil)\n\nfunc (m *MyMarshalerTest) MarshalXML(e *Encoder, start StartElement) error {\n\te.EncodeToken(start)\n\te.EncodeToken(CharData([]byte(\"hello world\")))\n\te.EncodeToken(EndElement{start.Name})\n\treturn nil\n}\n\ntype MyMarshalerAttrTest struct{}\n\nvar _ MarshalerAttr = (*MyMarshalerAttrTest)(nil)\n\nfunc (m *MyMarshalerAttrTest) MarshalXMLAttr(name Name) (Attr, error) {\n\treturn Attr{name, \"hello world\"}, nil\n}\n\ntype MyMarshalerValueAttrTest struct{}\n\nvar _ MarshalerAttr = MyMarshalerValueAttrTest{}\n\nfunc (m MyMarshalerValueAttrTest) MarshalXMLAttr(name Name) (Attr, error) {\n\treturn Attr{name, \"hello world\"}, nil\n}\n\ntype MarshalerStruct struct {\n\tFoo MyMarshalerAttrTest `xml:\",attr\"`\n}\n\ntype MarshalerValueStruct struct {\n\tFoo MyMarshalerValueAttrTest `xml:\",attr\"`\n}\n\ntype InnerStruct struct {\n\tXMLName Name `xml:\"testns outer\"`\n}\n\ntype OuterStruct struct {\n\tInnerStruct\n\tIntAttr int `xml:\"int,attr\"`\n}\n\ntype OuterNamedStruct struct {\n\tInnerStruct\n\tXMLName Name `xml:\"outerns test\"`\n\tIntAttr int  `xml:\"int,attr\"`\n}\n\ntype OuterNamedOrderedStruct struct {\n\tXMLName Name `xml:\"outerns test\"`\n\tInnerStruct\n\tIntAttr int `xml:\"int,attr\"`\n}\n\ntype OuterOuterStruct struct {\n\tOuterStruct\n}\n\ntype NestedAndChardata struct {\n\tAB       []string `xml:\"A>B\"`\n\tChardata string   `xml:\",chardata\"`\n}\n\ntype NestedAndComment struct {\n\tAB      []string `xml:\"A>B\"`\n\tComment string   `xml:\",comment\"`\n}\n\ntype XMLNSFieldStruct struct {\n\tNs   string `xml:\"xmlns,attr\"`\n\tBody string\n}\n\ntype NamedXMLNSFieldStruct struct {\n\tXMLName struct{} `xml:\"testns test\"`\n\tNs      string   `xml:\"xmlns,attr\"`\n\tBody    string\n}\n\ntype XMLNSFieldStructWithOmitEmpty struct {\n\tNs   string `xml:\"xmlns,attr,omitempty\"`\n\tBody string\n}\n\ntype NamedXMLNSFieldStructWithEmptyNamespace struct {\n\tXMLName struct{} `xml:\"test\"`\n\tNs      string   `xml:\"xmlns,attr\"`\n\tBody    string\n}\n\ntype RecursiveXMLNSFieldStruct struct {\n\tNs   string                     `xml:\"xmlns,attr\"`\n\tBody *RecursiveXMLNSFieldStruct `xml:\",omitempty\"`\n\tText string                     `xml:\",omitempty\"`\n}\n\nfunc ifaceptr(x interface{}) interface{} {\n\treturn &x\n}\n\nvar (\n\tnameAttr     = \"Sarah\"\n\tageAttr      = uint(12)\n\tcontentsAttr = \"lorem ipsum\"\n)\n\n// Unless explicitly stated as such (or *Plain), all of the\n// tests below are two-way tests. When introducing new tests,\n// please try to make them two-way as well to ensure that\n// marshalling and unmarshalling are as symmetrical as feasible.\nvar marshalTests = []struct {\n\tValue         interface{}\n\tExpectXML     string\n\tMarshalOnly   bool\n\tUnmarshalOnly bool\n}{\n\t// Test nil marshals to nothing\n\t{Value: nil, ExpectXML: ``, MarshalOnly: true},\n\t{Value: nilStruct, ExpectXML: ``, MarshalOnly: true},\n\n\t// Test value types\n\t{Value: &Plain{true}, ExpectXML: `<Plain><V>true</V></Plain>`},\n\t{Value: &Plain{false}, ExpectXML: `<Plain><V>false</V></Plain>`},\n\t{Value: &Plain{int(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},\n\t{Value: &Plain{int8(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},\n\t{Value: &Plain{int16(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},\n\t{Value: &Plain{int32(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},\n\t{Value: &Plain{uint(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},\n\t{Value: &Plain{uint8(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},\n\t{Value: &Plain{uint16(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},\n\t{Value: &Plain{uint32(42)}, ExpectXML: `<Plain><V>42</V></Plain>`},\n\t{Value: &Plain{float32(1.25)}, ExpectXML: `<Plain><V>1.25</V></Plain>`},\n\t{Value: &Plain{float64(1.25)}, ExpectXML: `<Plain><V>1.25</V></Plain>`},\n\t{Value: &Plain{uintptr(0xFFDD)}, ExpectXML: `<Plain><V>65501</V></Plain>`},\n\t{Value: &Plain{\"gopher\"}, ExpectXML: `<Plain><V>gopher</V></Plain>`},\n\t{Value: &Plain{[]byte(\"gopher\")}, ExpectXML: `<Plain><V>gopher</V></Plain>`},\n\t{Value: &Plain{\"</>\"}, ExpectXML: `<Plain><V>&lt;/&gt;</V></Plain>`},\n\t{Value: &Plain{[]byte(\"</>\")}, ExpectXML: `<Plain><V>&lt;/&gt;</V></Plain>`},\n\t{Value: &Plain{[3]byte{'<', '/', '>'}}, ExpectXML: `<Plain><V>&lt;/&gt;</V></Plain>`},\n\t{Value: &Plain{NamedType(\"potato\")}, ExpectXML: `<Plain><V>potato</V></Plain>`},\n\t{Value: &Plain{[]int{1, 2, 3}}, ExpectXML: `<Plain><V>1</V><V>2</V><V>3</V></Plain>`},\n\t{Value: &Plain{[3]int{1, 2, 3}}, ExpectXML: `<Plain><V>1</V><V>2</V><V>3</V></Plain>`},\n\t{Value: ifaceptr(true), MarshalOnly: true, ExpectXML: `<bool>true</bool>`},\n\n\t// Test time.\n\t{\n\t\tValue:     &Plain{time.Unix(1e9, 123456789).UTC()},\n\t\tExpectXML: `<Plain><V>2001-09-09T01:46:40.123456789Z</V></Plain>`,\n\t},\n\n\t// A pointer to struct{} may be used to test for an element's presence.\n\t{\n\t\tValue:     &PresenceTest{new(struct{})},\n\t\tExpectXML: `<PresenceTest><Exists></Exists></PresenceTest>`,\n\t},\n\t{\n\t\tValue:     &PresenceTest{},\n\t\tExpectXML: `<PresenceTest></PresenceTest>`,\n\t},\n\n\t// A pointer to struct{} may be used to test for an element's presence.\n\t{\n\t\tValue:     &PresenceTest{new(struct{})},\n\t\tExpectXML: `<PresenceTest><Exists></Exists></PresenceTest>`,\n\t},\n\t{\n\t\tValue:     &PresenceTest{},\n\t\tExpectXML: `<PresenceTest></PresenceTest>`,\n\t},\n\n\t// A []byte field is only nil if the element was not found.\n\t{\n\t\tValue:         &Data{},\n\t\tExpectXML:     `<Data></Data>`,\n\t\tUnmarshalOnly: true,\n\t},\n\t{\n\t\tValue:         &Data{Bytes: []byte{}, Custom: MyBytes{}, Attr: []byte{}},\n\t\tExpectXML:     `<Data Attr=\"\"><Bytes></Bytes><Custom></Custom></Data>`,\n\t\tUnmarshalOnly: true,\n\t},\n\n\t// Check that []byte works, including named []byte types.\n\t{\n\t\tValue:     &Data{Bytes: []byte(\"ab\"), Custom: MyBytes(\"cd\"), Attr: []byte{'v'}},\n\t\tExpectXML: `<Data Attr=\"v\"><Bytes>ab</Bytes><Custom>cd</Custom></Data>`,\n\t},\n\n\t// Test innerxml\n\t{\n\t\tValue: &SecretAgent{\n\t\t\tHandle:    \"007\",\n\t\t\tIdentity:  \"James Bond\",\n\t\t\tObfuscate: \"<redacted/>\",\n\t\t},\n\t\tExpectXML:   `<agent handle=\"007\"><Identity>James Bond</Identity><redacted/></agent>`,\n\t\tMarshalOnly: true,\n\t},\n\t{\n\t\tValue: &SecretAgent{\n\t\t\tHandle:    \"007\",\n\t\t\tIdentity:  \"James Bond\",\n\t\t\tObfuscate: \"<Identity>James Bond</Identity><redacted/>\",\n\t\t},\n\t\tExpectXML:     `<agent handle=\"007\"><Identity>James Bond</Identity><redacted/></agent>`,\n\t\tUnmarshalOnly: true,\n\t},\n\n\t// Test structs\n\t{Value: &Port{Type: \"ssl\", Number: \"443\"}, ExpectXML: `<port type=\"ssl\">443</port>`},\n\t{Value: &Port{Number: \"443\"}, ExpectXML: `<port>443</port>`},\n\t{Value: &Port{Type: \"<unix>\"}, ExpectXML: `<port type=\"&lt;unix&gt;\"></port>`},\n\t{Value: &Port{Number: \"443\", Comment: \"https\"}, ExpectXML: `<port><!--https-->443</port>`},\n\t{Value: &Port{Number: \"443\", Comment: \"add space-\"}, ExpectXML: `<port><!--add space- -->443</port>`, MarshalOnly: true},\n\t{Value: &Domain{Name: []byte(\"google.com&friends\")}, ExpectXML: `<domain>google.com&amp;friends</domain>`},\n\t{Value: &Domain{Name: []byte(\"google.com\"), Comment: []byte(\" &friends \")}, ExpectXML: `<domain>google.com<!-- &friends --></domain>`},\n\t{Value: &Book{Title: \"Pride & Prejudice\"}, ExpectXML: `<book>Pride &amp; Prejudice</book>`},\n\t{Value: &Event{Year: -3114}, ExpectXML: `<event>-3114</event>`},\n\t{Value: &Movie{Length: 13440}, ExpectXML: `<movie>13440</movie>`},\n\t{Value: &Pi{Approximation: 3.14159265}, ExpectXML: `<pi>3.1415927</pi>`},\n\t{Value: &Universe{Visible: 9.3e13}, ExpectXML: `<universe>9.3e+13</universe>`},\n\t{Value: &Particle{HasMass: true}, ExpectXML: `<particle>true</particle>`},\n\t{Value: &Departure{When: ParseTime(\"2013-01-09T00:15:00-09:00\")}, ExpectXML: `<departure>2013-01-09T00:15:00-09:00</departure>`},\n\t{Value: atomValue, ExpectXML: atomXml},\n\t{\n\t\tValue: &Ship{\n\t\t\tName:  \"Heart of Gold\",\n\t\t\tPilot: \"Computer\",\n\t\t\tAge:   1,\n\t\t\tDrive: ImprobabilityDrive,\n\t\t\tPassenger: []*Passenger{\n\t\t\t\t{\n\t\t\t\t\tName:   []string{\"Zaphod\", \"Beeblebrox\"},\n\t\t\t\t\tWeight: 7.25,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   []string{\"Trisha\", \"McMillen\"},\n\t\t\t\t\tWeight: 5.5,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   []string{\"Ford\", \"Prefect\"},\n\t\t\t\t\tWeight: 7,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   []string{\"Arthur\", \"Dent\"},\n\t\t\t\t\tWeight: 6.75,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExpectXML: `<spaceship name=\"Heart of Gold\" pilot=\"Computer\">` +\n\t\t\t`<drive>` + strconv.Itoa(int(ImprobabilityDrive)) + `</drive>` +\n\t\t\t`<age>1</age>` +\n\t\t\t`<passenger>` +\n\t\t\t`<name>Zaphod</name>` +\n\t\t\t`<name>Beeblebrox</name>` +\n\t\t\t`<weight>7.25</weight>` +\n\t\t\t`</passenger>` +\n\t\t\t`<passenger>` +\n\t\t\t`<name>Trisha</name>` +\n\t\t\t`<name>McMillen</name>` +\n\t\t\t`<weight>5.5</weight>` +\n\t\t\t`</passenger>` +\n\t\t\t`<passenger>` +\n\t\t\t`<name>Ford</name>` +\n\t\t\t`<name>Prefect</name>` +\n\t\t\t`<weight>7</weight>` +\n\t\t\t`</passenger>` +\n\t\t\t`<passenger>` +\n\t\t\t`<name>Arthur</name>` +\n\t\t\t`<name>Dent</name>` +\n\t\t\t`<weight>6.75</weight>` +\n\t\t\t`</passenger>` +\n\t\t\t`</spaceship>`,\n\t},\n\n\t// Test a>b\n\t{\n\t\tValue: &NestedItems{Items: nil, Item1: nil},\n\t\tExpectXML: `<result>` +\n\t\t\t`<Items>` +\n\t\t\t`</Items>` +\n\t\t\t`</result>`,\n\t},\n\t{\n\t\tValue: &NestedItems{Items: []string{}, Item1: []string{}},\n\t\tExpectXML: `<result>` +\n\t\t\t`<Items>` +\n\t\t\t`</Items>` +\n\t\t\t`</result>`,\n\t\tMarshalOnly: true,\n\t},\n\t{\n\t\tValue: &NestedItems{Items: nil, Item1: []string{\"A\"}},\n\t\tExpectXML: `<result>` +\n\t\t\t`<Items>` +\n\t\t\t`<item1>A</item1>` +\n\t\t\t`</Items>` +\n\t\t\t`</result>`,\n\t},\n\t{\n\t\tValue: &NestedItems{Items: []string{\"A\", \"B\"}, Item1: nil},\n\t\tExpectXML: `<result>` +\n\t\t\t`<Items>` +\n\t\t\t`<item>A</item>` +\n\t\t\t`<item>B</item>` +\n\t\t\t`</Items>` +\n\t\t\t`</result>`,\n\t},\n\t{\n\t\tValue: &NestedItems{Items: []string{\"A\", \"B\"}, Item1: []string{\"C\"}},\n\t\tExpectXML: `<result>` +\n\t\t\t`<Items>` +\n\t\t\t`<item>A</item>` +\n\t\t\t`<item>B</item>` +\n\t\t\t`<item1>C</item1>` +\n\t\t\t`</Items>` +\n\t\t\t`</result>`,\n\t},\n\t{\n\t\tValue: &NestedOrder{Field1: \"C\", Field2: \"B\", Field3: \"A\"},\n\t\tExpectXML: `<result>` +\n\t\t\t`<parent>` +\n\t\t\t`<c>C</c>` +\n\t\t\t`<b>B</b>` +\n\t\t\t`<a>A</a>` +\n\t\t\t`</parent>` +\n\t\t\t`</result>`,\n\t},\n\t{\n\t\tValue: &NilTest{A: \"A\", B: nil, C: \"C\"},\n\t\tExpectXML: `<NilTest>` +\n\t\t\t`<parent1>` +\n\t\t\t`<parent2><a>A</a></parent2>` +\n\t\t\t`<parent2><c>C</c></parent2>` +\n\t\t\t`</parent1>` +\n\t\t\t`</NilTest>`,\n\t\tMarshalOnly: true, // Uses interface{}\n\t},\n\t{\n\t\tValue: &MixedNested{A: \"A\", B: \"B\", C: \"C\", D: \"D\"},\n\t\tExpectXML: `<result>` +\n\t\t\t`<parent1><a>A</a></parent1>` +\n\t\t\t`<b>B</b>` +\n\t\t\t`<parent1>` +\n\t\t\t`<parent2><c>C</c></parent2>` +\n\t\t\t`<d>D</d>` +\n\t\t\t`</parent1>` +\n\t\t\t`</result>`,\n\t},\n\t{\n\t\tValue:     &Service{Port: &Port{Number: \"80\"}},\n\t\tExpectXML: `<service><host><port>80</port></host></service>`,\n\t},\n\t{\n\t\tValue:     &Service{},\n\t\tExpectXML: `<service></service>`,\n\t},\n\t{\n\t\tValue: &Service{Port: &Port{Number: \"80\"}, Extra1: \"A\", Extra2: \"B\"},\n\t\tExpectXML: `<service>` +\n\t\t\t`<host><port>80</port></host>` +\n\t\t\t`<Extra1>A</Extra1>` +\n\t\t\t`<host><extra2>B</extra2></host>` +\n\t\t\t`</service>`,\n\t\tMarshalOnly: true,\n\t},\n\t{\n\t\tValue: &Service{Port: &Port{Number: \"80\"}, Extra2: \"example\"},\n\t\tExpectXML: `<service>` +\n\t\t\t`<host><port>80</port></host>` +\n\t\t\t`<host><extra2>example</extra2></host>` +\n\t\t\t`</service>`,\n\t\tMarshalOnly: true,\n\t},\n\t{\n\t\tValue: &struct {\n\t\t\tXMLName struct{} `xml:\"space top\"`\n\t\t\tA       string   `xml:\"x>a\"`\n\t\t\tB       string   `xml:\"x>b\"`\n\t\t\tC       string   `xml:\"space x>c\"`\n\t\t\tC1      string   `xml:\"space1 x>c\"`\n\t\t\tD1      string   `xml:\"space1 x>d\"`\n\t\t\tE1      string   `xml:\"x>e\"`\n\t\t}{\n\t\t\tA:  \"a\",\n\t\t\tB:  \"b\",\n\t\t\tC:  \"c\",\n\t\t\tC1: \"c1\",\n\t\t\tD1: \"d1\",\n\t\t\tE1: \"e1\",\n\t\t},\n\t\tExpectXML: `<top xmlns=\"space\">` +\n\t\t\t`<x><a>a</a><b>b</b><c>c</c></x>` +\n\t\t\t`<x xmlns=\"space1\">` +\n\t\t\t`<c>c1</c>` +\n\t\t\t`<d>d1</d>` +\n\t\t\t`</x>` +\n\t\t\t`<x>` +\n\t\t\t`<e>e1</e>` +\n\t\t\t`</x>` +\n\t\t\t`</top>`,\n\t},\n\t{\n\t\tValue: &struct {\n\t\t\tXMLName Name\n\t\t\tA       string `xml:\"x>a\"`\n\t\t\tB       string `xml:\"x>b\"`\n\t\t\tC       string `xml:\"space x>c\"`\n\t\t\tC1      string `xml:\"space1 x>c\"`\n\t\t\tD1      string `xml:\"space1 x>d\"`\n\t\t}{\n\t\t\tXMLName: Name{\n\t\t\t\tSpace: \"space0\",\n\t\t\t\tLocal: \"top\",\n\t\t\t},\n\t\t\tA:  \"a\",\n\t\t\tB:  \"b\",\n\t\t\tC:  \"c\",\n\t\t\tC1: \"c1\",\n\t\t\tD1: \"d1\",\n\t\t},\n\t\tExpectXML: `<top xmlns=\"space0\">` +\n\t\t\t`<x><a>a</a><b>b</b></x>` +\n\t\t\t`<x xmlns=\"space\"><c>c</c></x>` +\n\t\t\t`<x xmlns=\"space1\">` +\n\t\t\t`<c>c1</c>` +\n\t\t\t`<d>d1</d>` +\n\t\t\t`</x>` +\n\t\t\t`</top>`,\n\t},\n\t{\n\t\tValue: &struct {\n\t\t\tXMLName struct{} `xml:\"top\"`\n\t\t\tB       string   `xml:\"space x>b\"`\n\t\t\tB1      string   `xml:\"space1 x>b\"`\n\t\t}{\n\t\t\tB:  \"b\",\n\t\t\tB1: \"b1\",\n\t\t},\n\t\tExpectXML: `<top>` +\n\t\t\t`<x xmlns=\"space\"><b>b</b></x>` +\n\t\t\t`<x xmlns=\"space1\"><b>b1</b></x>` +\n\t\t\t`</top>`,\n\t},\n\n\t// Test struct embedding\n\t{\n\t\tValue: &EmbedA{\n\t\t\tEmbedC: EmbedC{\n\t\t\t\tFieldA1: \"\", // Shadowed by A.A\n\t\t\t\tFieldA2: \"\", // Shadowed by A.A\n\t\t\t\tFieldB:  \"A.C.B\",\n\t\t\t\tFieldC:  \"A.C.C\",\n\t\t\t},\n\t\t\tEmbedB: EmbedB{\n\t\t\t\tFieldB: \"A.B.B\",\n\t\t\t\tEmbedC: &EmbedC{\n\t\t\t\t\tFieldA1: \"A.B.C.A1\",\n\t\t\t\t\tFieldA2: \"A.B.C.A2\",\n\t\t\t\t\tFieldB:  \"\", // Shadowed by A.B.B\n\t\t\t\t\tFieldC:  \"A.B.C.C\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tFieldA: \"A.A\",\n\t\t},\n\t\tExpectXML: `<EmbedA>` +\n\t\t\t`<FieldB>A.C.B</FieldB>` +\n\t\t\t`<FieldC>A.C.C</FieldC>` +\n\t\t\t`<EmbedB>` +\n\t\t\t`<FieldB>A.B.B</FieldB>` +\n\t\t\t`<FieldA>` +\n\t\t\t`<A1>A.B.C.A1</A1>` +\n\t\t\t`<A2>A.B.C.A2</A2>` +\n\t\t\t`</FieldA>` +\n\t\t\t`<FieldC>A.B.C.C</FieldC>` +\n\t\t\t`</EmbedB>` +\n\t\t\t`<FieldA>A.A</FieldA>` +\n\t\t\t`</EmbedA>`,\n\t},\n\n\t// Test that name casing matters\n\t{\n\t\tValue:     &NameCasing{Xy: \"mixed\", XY: \"upper\", XyA: \"mixedA\", XYA: \"upperA\"},\n\t\tExpectXML: `<casing Xy=\"mixedA\" XY=\"upperA\"><Xy>mixed</Xy><XY>upper</XY></casing>`,\n\t},\n\n\t// Test the order in which the XML element name is chosen\n\t{\n\t\tValue: &NamePrecedence{\n\t\t\tFromTag:     XMLNameWithoutTag{Value: \"A\"},\n\t\t\tFromNameVal: XMLNameWithoutTag{XMLName: Name{Local: \"InXMLName\"}, Value: \"B\"},\n\t\t\tFromNameTag: XMLNameWithTag{Value: \"C\"},\n\t\t\tInFieldName: \"D\",\n\t\t},\n\t\tExpectXML: `<Parent>` +\n\t\t\t`<InTag>A</InTag>` +\n\t\t\t`<InXMLName>B</InXMLName>` +\n\t\t\t`<InXMLNameTag>C</InXMLNameTag>` +\n\t\t\t`<InFieldName>D</InFieldName>` +\n\t\t\t`</Parent>`,\n\t\tMarshalOnly: true,\n\t},\n\t{\n\t\tValue: &NamePrecedence{\n\t\t\tXMLName:     Name{Local: \"Parent\"},\n\t\t\tFromTag:     XMLNameWithoutTag{XMLName: Name{Local: \"InTag\"}, Value: \"A\"},\n\t\t\tFromNameVal: XMLNameWithoutTag{XMLName: Name{Local: \"FromNameVal\"}, Value: \"B\"},\n\t\t\tFromNameTag: XMLNameWithTag{XMLName: Name{Local: \"InXMLNameTag\"}, Value: \"C\"},\n\t\t\tInFieldName: \"D\",\n\t\t},\n\t\tExpectXML: `<Parent>` +\n\t\t\t`<InTag>A</InTag>` +\n\t\t\t`<FromNameVal>B</FromNameVal>` +\n\t\t\t`<InXMLNameTag>C</InXMLNameTag>` +\n\t\t\t`<InFieldName>D</InFieldName>` +\n\t\t\t`</Parent>`,\n\t\tUnmarshalOnly: true,\n\t},\n\n\t// xml.Name works in a plain field as well.\n\t{\n\t\tValue:     &NameInField{Name{Space: \"ns\", Local: \"foo\"}},\n\t\tExpectXML: `<NameInField><foo xmlns=\"ns\"></foo></NameInField>`,\n\t},\n\t{\n\t\tValue:         &NameInField{Name{Space: \"ns\", Local: \"foo\"}},\n\t\tExpectXML:     `<NameInField><foo xmlns=\"ns\"><ignore></ignore></foo></NameInField>`,\n\t\tUnmarshalOnly: true,\n\t},\n\n\t// Marshaling zero xml.Name uses the tag or field name.\n\t{\n\t\tValue:       &NameInField{},\n\t\tExpectXML:   `<NameInField><foo xmlns=\"ns\"></foo></NameInField>`,\n\t\tMarshalOnly: true,\n\t},\n\n\t// Test attributes\n\t{\n\t\tValue: &AttrTest{\n\t\t\tInt:   8,\n\t\t\tNamed: 9,\n\t\t\tFloat: 23.5,\n\t\t\tUint8: 255,\n\t\t\tBool:  true,\n\t\t\tStr:   \"str\",\n\t\t\tBytes: []byte(\"byt\"),\n\t\t},\n\t\tExpectXML: `<AttrTest Int=\"8\" int=\"9\" Float=\"23.5\" Uint8=\"255\"` +\n\t\t\t` Bool=\"true\" Str=\"str\" Bytes=\"byt\"></AttrTest>`,\n\t},\n\t{\n\t\tValue: &AttrTest{Bytes: []byte{}},\n\t\tExpectXML: `<AttrTest Int=\"0\" int=\"0\" Float=\"0\" Uint8=\"0\"` +\n\t\t\t` Bool=\"false\" Str=\"\" Bytes=\"\"></AttrTest>`,\n\t},\n\t{\n\t\tValue: &OmitAttrTest{\n\t\t\tInt:   8,\n\t\t\tNamed: 9,\n\t\t\tFloat: 23.5,\n\t\t\tUint8: 255,\n\t\t\tBool:  true,\n\t\t\tStr:   \"str\",\n\t\t\tBytes: []byte(\"byt\"),\n\t\t},\n\t\tExpectXML: `<OmitAttrTest Int=\"8\" int=\"9\" Float=\"23.5\" Uint8=\"255\"` +\n\t\t\t` Bool=\"true\" Str=\"str\" Bytes=\"byt\"></OmitAttrTest>`,\n\t},\n\t{\n\t\tValue:     &OmitAttrTest{},\n\t\tExpectXML: `<OmitAttrTest></OmitAttrTest>`,\n\t},\n\n\t// pointer fields\n\t{\n\t\tValue:       &PointerFieldsTest{Name: &nameAttr, Age: &ageAttr, Contents: &contentsAttr},\n\t\tExpectXML:   `<dummy name=\"Sarah\" age=\"12\">lorem ipsum</dummy>`,\n\t\tMarshalOnly: true,\n\t},\n\n\t// empty chardata pointer field\n\t{\n\t\tValue:       &ChardataEmptyTest{},\n\t\tExpectXML:   `<test></test>`,\n\t\tMarshalOnly: true,\n\t},\n\n\t// omitempty on fields\n\t{\n\t\tValue: &OmitFieldTest{\n\t\t\tInt:   8,\n\t\t\tNamed: 9,\n\t\t\tFloat: 23.5,\n\t\t\tUint8: 255,\n\t\t\tBool:  true,\n\t\t\tStr:   \"str\",\n\t\t\tBytes: []byte(\"byt\"),\n\t\t\tPtr:   &PresenceTest{},\n\t\t},\n\t\tExpectXML: `<OmitFieldTest>` +\n\t\t\t`<Int>8</Int>` +\n\t\t\t`<int>9</int>` +\n\t\t\t`<Float>23.5</Float>` +\n\t\t\t`<Uint8>255</Uint8>` +\n\t\t\t`<Bool>true</Bool>` +\n\t\t\t`<Str>str</Str>` +\n\t\t\t`<Bytes>byt</Bytes>` +\n\t\t\t`<Ptr></Ptr>` +\n\t\t\t`</OmitFieldTest>`,\n\t},\n\t{\n\t\tValue:     &OmitFieldTest{},\n\t\tExpectXML: `<OmitFieldTest></OmitFieldTest>`,\n\t},\n\n\t// Test \",any\"\n\t{\n\t\tExpectXML: `<a><nested><value>known</value></nested><other><sub>unknown</sub></other></a>`,\n\t\tValue: &AnyTest{\n\t\t\tNested: \"known\",\n\t\t\tAnyField: AnyHolder{\n\t\t\t\tXMLName: Name{Local: \"other\"},\n\t\t\t\tXML:     \"<sub>unknown</sub>\",\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tValue: &AnyTest{Nested: \"known\",\n\t\t\tAnyField: AnyHolder{\n\t\t\t\tXML:     \"<unknown/>\",\n\t\t\t\tXMLName: Name{Local: \"AnyField\"},\n\t\t\t},\n\t\t},\n\t\tExpectXML: `<a><nested><value>known</value></nested><AnyField><unknown/></AnyField></a>`,\n\t},\n\t{\n\t\tExpectXML: `<a><nested><value>b</value></nested></a>`,\n\t\tValue: &AnyOmitTest{\n\t\t\tNested: \"b\",\n\t\t},\n\t},\n\t{\n\t\tExpectXML: `<a><nested><value>b</value></nested><c><d>e</d></c><g xmlns=\"f\"><h>i</h></g></a>`,\n\t\tValue: &AnySliceTest{\n\t\t\tNested: \"b\",\n\t\t\tAnyField: []AnyHolder{\n\t\t\t\t{\n\t\t\t\t\tXMLName: Name{Local: \"c\"},\n\t\t\t\t\tXML:     \"<d>e</d>\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tXMLName: Name{Space: \"f\", Local: \"g\"},\n\t\t\t\t\tXML:     \"<h>i</h>\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tExpectXML: `<a><nested><value>b</value></nested></a>`,\n\t\tValue: &AnySliceTest{\n\t\t\tNested: \"b\",\n\t\t},\n\t},\n\n\t// Test recursive types.\n\t{\n\t\tValue: &RecurseA{\n\t\t\tA: \"a1\",\n\t\t\tB: &RecurseB{\n\t\t\t\tA: &RecurseA{\"a2\", nil},\n\t\t\t\tB: \"b1\",\n\t\t\t},\n\t\t},\n\t\tExpectXML: `<RecurseA><A>a1</A><B><A><A>a2</A></A><B>b1</B></B></RecurseA>`,\n\t},\n\n\t// Test ignoring fields via \"-\" tag\n\t{\n\t\tExpectXML: `<IgnoreTest></IgnoreTest>`,\n\t\tValue:     &IgnoreTest{},\n\t},\n\t{\n\t\tExpectXML:   `<IgnoreTest></IgnoreTest>`,\n\t\tValue:       &IgnoreTest{PublicSecret: \"can't tell\"},\n\t\tMarshalOnly: true,\n\t},\n\t{\n\t\tExpectXML:     `<IgnoreTest><PublicSecret>ignore me</PublicSecret></IgnoreTest>`,\n\t\tValue:         &IgnoreTest{},\n\t\tUnmarshalOnly: true,\n\t},\n\n\t// Test escaping.\n\t{\n\t\tExpectXML: `<a><nested><value>dquote: &#34;; squote: &#39;; ampersand: &amp;; less: &lt;; greater: &gt;;</value></nested><empty></empty></a>`,\n\t\tValue: &AnyTest{\n\t\t\tNested:   `dquote: \"; squote: '; ampersand: &; less: <; greater: >;`,\n\t\t\tAnyField: AnyHolder{XMLName: Name{Local: \"empty\"}},\n\t\t},\n\t},\n\t{\n\t\tExpectXML: `<a><nested><value>newline: &#xA;; cr: &#xD;; tab: &#x9;;</value></nested><AnyField></AnyField></a>`,\n\t\tValue: &AnyTest{\n\t\t\tNested:   \"newline: \\n; cr: \\r; tab: \\t;\",\n\t\t\tAnyField: AnyHolder{XMLName: Name{Local: \"AnyField\"}},\n\t\t},\n\t},\n\t{\n\t\tExpectXML: \"<a><nested><value>1\\r2\\r\\n3\\n\\r4\\n5</value></nested></a>\",\n\t\tValue: &AnyTest{\n\t\t\tNested: \"1\\n2\\n3\\n\\n4\\n5\",\n\t\t},\n\t\tUnmarshalOnly: true,\n\t},\n\t{\n\t\tExpectXML: `<EmbedInt><MyInt>42</MyInt></EmbedInt>`,\n\t\tValue: &EmbedInt{\n\t\t\tMyInt: 42,\n\t\t},\n\t},\n\t// Test omitempty with parent chain; see golang.org/issue/4168.\n\t{\n\t\tExpectXML: `<Strings><A></A></Strings>`,\n\t\tValue:     &Strings{},\n\t},\n\t// Custom marshalers.\n\t{\n\t\tExpectXML: `<MyMarshalerTest>hello world</MyMarshalerTest>`,\n\t\tValue:     &MyMarshalerTest{},\n\t},\n\t{\n\t\tExpectXML: `<MarshalerStruct Foo=\"hello world\"></MarshalerStruct>`,\n\t\tValue:     &MarshalerStruct{},\n\t},\n\t{\n\t\tExpectXML: `<MarshalerValueStruct Foo=\"hello world\"></MarshalerValueStruct>`,\n\t\tValue:     &MarshalerValueStruct{},\n\t},\n\t{\n\t\tExpectXML: `<outer xmlns=\"testns\" int=\"10\"></outer>`,\n\t\tValue:     &OuterStruct{IntAttr: 10},\n\t},\n\t{\n\t\tExpectXML: `<test xmlns=\"outerns\" int=\"10\"></test>`,\n\t\tValue:     &OuterNamedStruct{XMLName: Name{Space: \"outerns\", Local: \"test\"}, IntAttr: 10},\n\t},\n\t{\n\t\tExpectXML: `<test xmlns=\"outerns\" int=\"10\"></test>`,\n\t\tValue:     &OuterNamedOrderedStruct{XMLName: Name{Space: \"outerns\", Local: \"test\"}, IntAttr: 10},\n\t},\n\t{\n\t\tExpectXML: `<outer xmlns=\"testns\" int=\"10\"></outer>`,\n\t\tValue:     &OuterOuterStruct{OuterStruct{IntAttr: 10}},\n\t},\n\t{\n\t\tExpectXML: `<NestedAndChardata><A><B></B><B></B></A>test</NestedAndChardata>`,\n\t\tValue:     &NestedAndChardata{AB: make([]string, 2), Chardata: \"test\"},\n\t},\n\t{\n\t\tExpectXML: `<NestedAndComment><A><B></B><B></B></A><!--test--></NestedAndComment>`,\n\t\tValue:     &NestedAndComment{AB: make([]string, 2), Comment: \"test\"},\n\t},\n\t{\n\t\tExpectXML: `<XMLNSFieldStruct xmlns=\"http://example.com/ns\"><Body>hello world</Body></XMLNSFieldStruct>`,\n\t\tValue:     &XMLNSFieldStruct{Ns: \"http://example.com/ns\", Body: \"hello world\"},\n\t},\n\t{\n\t\tExpectXML: `<testns:test xmlns:testns=\"testns\" xmlns=\"http://example.com/ns\"><Body>hello world</Body></testns:test>`,\n\t\tValue:     &NamedXMLNSFieldStruct{Ns: \"http://example.com/ns\", Body: \"hello world\"},\n\t},\n\t{\n\t\tExpectXML: `<testns:test xmlns:testns=\"testns\"><Body>hello world</Body></testns:test>`,\n\t\tValue:     &NamedXMLNSFieldStruct{Ns: \"\", Body: \"hello world\"},\n\t},\n\t{\n\t\tExpectXML: `<XMLNSFieldStructWithOmitEmpty><Body>hello world</Body></XMLNSFieldStructWithOmitEmpty>`,\n\t\tValue:     &XMLNSFieldStructWithOmitEmpty{Body: \"hello world\"},\n\t},\n\t{\n\t\t// The xmlns attribute must be ignored because the <test>\n\t\t// element is in the empty namespace, so it's not possible\n\t\t// to set the default namespace to something non-empty.\n\t\tExpectXML:   `<test><Body>hello world</Body></test>`,\n\t\tValue:       &NamedXMLNSFieldStructWithEmptyNamespace{Ns: \"foo\", Body: \"hello world\"},\n\t\tMarshalOnly: true,\n\t},\n\t{\n\t\tExpectXML: `<RecursiveXMLNSFieldStruct xmlns=\"foo\"><Body xmlns=\"\"><Text>hello world</Text></Body></RecursiveXMLNSFieldStruct>`,\n\t\tValue: &RecursiveXMLNSFieldStruct{\n\t\t\tNs: \"foo\",\n\t\t\tBody: &RecursiveXMLNSFieldStruct{\n\t\t\t\tText: \"hello world\",\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestMarshal(t *testing.T) {\n\tfor idx, test := range marshalTests {\n\t\tif test.UnmarshalOnly {\n\t\t\tcontinue\n\t\t}\n\t\tdata, err := Marshal(test.Value)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"#%d: marshal(%#v): %s\", idx, test.Value, err)\n\t\t\tcontinue\n\t\t}\n\t\tif got, want := string(data), test.ExpectXML; got != want {\n\t\t\tif strings.Contains(want, \"\\n\") {\n\t\t\t\tt.Errorf(\"#%d: marshal(%#v):\\nHAVE:\\n%s\\nWANT:\\n%s\", idx, test.Value, got, want)\n\t\t\t} else {\n\t\t\t\tt.Errorf(\"#%d: marshal(%#v):\\nhave %#q\\nwant %#q\", idx, test.Value, got, want)\n\t\t\t}\n\t\t}\n\t}\n}\n\ntype AttrParent struct {\n\tX string `xml:\"X>Y,attr\"`\n}\n\ntype BadAttr struct {\n\tName []string `xml:\"name,attr\"`\n}\n\nvar marshalErrorTests = []struct {\n\tValue interface{}\n\tErr   string\n\tKind  reflect.Kind\n}{\n\t{\n\t\tValue: make(chan bool),\n\t\tErr:   \"xml: unsupported type: chan bool\",\n\t\tKind:  reflect.Chan,\n\t},\n\t{\n\t\tValue: map[string]string{\n\t\t\t\"question\": \"What do you get when you multiply six by nine?\",\n\t\t\t\"answer\":   \"42\",\n\t\t},\n\t\tErr:  \"xml: unsupported type: map[string]string\",\n\t\tKind: reflect.Map,\n\t},\n\t{\n\t\tValue: map[*Ship]bool{nil: false},\n\t\tErr:   \"xml: unsupported type: map[*xml.Ship]bool\",\n\t\tKind:  reflect.Map,\n\t},\n\t{\n\t\tValue: &Domain{Comment: []byte(\"f--bar\")},\n\t\tErr:   `xml: comments must not contain \"--\"`,\n\t},\n\t// Reject parent chain with attr, never worked; see golang.org/issue/5033.\n\t{\n\t\tValue: &AttrParent{},\n\t\tErr:   `xml: X>Y chain not valid with attr flag`,\n\t},\n\t{\n\t\tValue: BadAttr{[]string{\"X\", \"Y\"}},\n\t\tErr:   `xml: unsupported type: []string`,\n\t},\n}\n\nvar marshalIndentTests = []struct {\n\tValue     interface{}\n\tPrefix    string\n\tIndent    string\n\tExpectXML string\n}{\n\t{\n\t\tValue: &SecretAgent{\n\t\t\tHandle:    \"007\",\n\t\t\tIdentity:  \"James Bond\",\n\t\t\tObfuscate: \"<redacted/>\",\n\t\t},\n\t\tPrefix:    \"\",\n\t\tIndent:    \"\\t\",\n\t\tExpectXML: fmt.Sprintf(\"<agent handle=\\\"007\\\">\\n\\t<Identity>James Bond</Identity><redacted/>\\n</agent>\"),\n\t},\n}\n\nfunc TestMarshalErrors(t *testing.T) {\n\tfor idx, test := range marshalErrorTests {\n\t\tdata, err := Marshal(test.Value)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"#%d: marshal(%#v) = [success] %q, want error %v\", idx, test.Value, data, test.Err)\n\t\t\tcontinue\n\t\t}\n\t\tif err.Error() != test.Err {\n\t\t\tt.Errorf(\"#%d: marshal(%#v) = [error] %v, want %v\", idx, test.Value, err, test.Err)\n\t\t}\n\t\tif test.Kind != reflect.Invalid {\n\t\t\tif kind := err.(*UnsupportedTypeError).Type.Kind(); kind != test.Kind {\n\t\t\t\tt.Errorf(\"#%d: marshal(%#v) = [error kind] %s, want %s\", idx, test.Value, kind, test.Kind)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Do invertibility testing on the various structures that we test\nfunc TestUnmarshal(t *testing.T) {\n\tfor i, test := range marshalTests {\n\t\tif test.MarshalOnly {\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := test.Value.(*Plain); ok {\n\t\t\tcontinue\n\t\t}\n\t\tvt := reflect.TypeOf(test.Value)\n\t\tdest := reflect.New(vt.Elem()).Interface()\n\t\terr := Unmarshal([]byte(test.ExpectXML), dest)\n\n\t\tswitch fix := dest.(type) {\n\t\tcase *Feed:\n\t\t\tfix.Author.InnerXML = \"\"\n\t\t\tfor i := range fix.Entry {\n\t\t\t\tfix.Entry[i].Author.InnerXML = \"\"\n\t\t\t}\n\t\t}\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"#%d: unexpected error: %#v\", i, err)\n\t\t} else if got, want := dest, test.Value; !reflect.DeepEqual(got, want) {\n\t\t\tt.Errorf(\"#%d: unmarshal(%q):\\nhave %#v\\nwant %#v\", i, test.ExpectXML, got, want)\n\t\t}\n\t}\n}\n\nfunc TestMarshalIndent(t *testing.T) {\n\tfor i, test := range marshalIndentTests {\n\t\tdata, err := MarshalIndent(test.Value, test.Prefix, test.Indent)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"#%d: Error: %s\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif got, want := string(data), test.ExpectXML; got != want {\n\t\t\tt.Errorf(\"#%d: MarshalIndent:\\nGot:%s\\nWant:\\n%s\", i, got, want)\n\t\t}\n\t}\n}\n\ntype limitedBytesWriter struct {\n\tw      io.Writer\n\tremain int // until writes fail\n}\n\nfunc (lw *limitedBytesWriter) Write(p []byte) (n int, err error) {\n\tif lw.remain <= 0 {\n\t\tprintln(\"error\")\n\t\treturn 0, errors.New(\"write limit hit\")\n\t}\n\tif len(p) > lw.remain {\n\t\tp = p[:lw.remain]\n\t\tn, _ = lw.w.Write(p)\n\t\tlw.remain = 0\n\t\treturn n, errors.New(\"write limit hit\")\n\t}\n\tn, err = lw.w.Write(p)\n\tlw.remain -= n\n\treturn n, err\n}\n\nfunc TestMarshalWriteErrors(t *testing.T) {\n\tvar buf bytes.Buffer\n\tconst writeCap = 1024\n\tw := &limitedBytesWriter{&buf, writeCap}\n\tenc := NewEncoder(w)\n\tvar err error\n\tvar i int\n\tconst n = 4000\n\tfor i = 1; i <= n; i++ {\n\t\terr = enc.Encode(&Passenger{\n\t\t\tName:   []string{\"Alice\", \"Bob\"},\n\t\t\tWeight: 5,\n\t\t})\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif err == nil {\n\t\tt.Error(\"expected an error\")\n\t}\n\tif i == n {\n\t\tt.Errorf(\"expected to fail before the end\")\n\t}\n\tif buf.Len() != writeCap {\n\t\tt.Errorf(\"buf.Len() = %d; want %d\", buf.Len(), writeCap)\n\t}\n}\n\nfunc TestMarshalWriteIOErrors(t *testing.T) {\n\tenc := NewEncoder(errWriter{})\n\n\texpectErr := \"unwritable\"\n\terr := enc.Encode(&Passenger{})\n\tif err == nil || err.Error() != expectErr {\n\t\tt.Errorf(\"EscapeTest = [error] %v, want %v\", err, expectErr)\n\t}\n}\n\nfunc TestMarshalFlush(t *testing.T) {\n\tvar buf bytes.Buffer\n\tenc := NewEncoder(&buf)\n\tif err := enc.EncodeToken(CharData(\"hello world\")); err != nil {\n\t\tt.Fatalf(\"enc.EncodeToken: %v\", err)\n\t}\n\tif buf.Len() > 0 {\n\t\tt.Fatalf(\"enc.EncodeToken caused actual write: %q\", buf.Bytes())\n\t}\n\tif err := enc.Flush(); err != nil {\n\t\tt.Fatalf(\"enc.Flush: %v\", err)\n\t}\n\tif buf.String() != \"hello world\" {\n\t\tt.Fatalf(\"after enc.Flush, buf.String() = %q, want %q\", buf.String(), \"hello world\")\n\t}\n}\n\nvar encodeElementTests = []struct {\n\tdesc      string\n\tvalue     interface{}\n\tstart     StartElement\n\texpectXML string\n}{{\n\tdesc:  \"simple string\",\n\tvalue: \"hello\",\n\tstart: StartElement{\n\t\tName: Name{Local: \"a\"},\n\t},\n\texpectXML: `<a>hello</a>`,\n}, {\n\tdesc:  \"string with added attributes\",\n\tvalue: \"hello\",\n\tstart: StartElement{\n\t\tName: Name{Local: \"a\"},\n\t\tAttr: []Attr{{\n\t\t\tName:  Name{Local: \"x\"},\n\t\t\tValue: \"y\",\n\t\t}, {\n\t\t\tName:  Name{Local: \"foo\"},\n\t\t\tValue: \"bar\",\n\t\t}},\n\t},\n\texpectXML: `<a x=\"y\" foo=\"bar\">hello</a>`,\n}, {\n\tdesc: \"start element with default name space\",\n\tvalue: struct {\n\t\tFoo XMLNameWithNSTag\n\t}{\n\t\tFoo: XMLNameWithNSTag{\n\t\t\tValue: \"hello\",\n\t\t},\n\t},\n\tstart: StartElement{\n\t\tName: Name{Space: \"ns\", Local: \"a\"},\n\t\tAttr: []Attr{{\n\t\t\tName: Name{Local: \"xmlns\"},\n\t\t\t// \"ns\" is the name space defined in XMLNameWithNSTag\n\t\t\tValue: \"ns\",\n\t\t}},\n\t},\n\texpectXML: `<a xmlns=\"ns\"><InXMLNameWithNSTag>hello</InXMLNameWithNSTag></a>`,\n}, {\n\tdesc: \"start element in name space with different default name space\",\n\tvalue: struct {\n\t\tFoo XMLNameWithNSTag\n\t}{\n\t\tFoo: XMLNameWithNSTag{\n\t\t\tValue: \"hello\",\n\t\t},\n\t},\n\tstart: StartElement{\n\t\tName: Name{Space: \"ns2\", Local: \"a\"},\n\t\tAttr: []Attr{{\n\t\t\tName: Name{Local: \"xmlns\"},\n\t\t\t// \"ns\" is the name space defined in XMLNameWithNSTag\n\t\t\tValue: \"ns\",\n\t\t}},\n\t},\n\texpectXML: `<ns2:a xmlns:ns2=\"ns2\" xmlns=\"ns\"><InXMLNameWithNSTag>hello</InXMLNameWithNSTag></ns2:a>`,\n}, {\n\tdesc:  \"XMLMarshaler with start element with default name space\",\n\tvalue: &MyMarshalerTest{},\n\tstart: StartElement{\n\t\tName: Name{Space: \"ns2\", Local: \"a\"},\n\t\tAttr: []Attr{{\n\t\t\tName: Name{Local: \"xmlns\"},\n\t\t\t// \"ns\" is the name space defined in XMLNameWithNSTag\n\t\t\tValue: \"ns\",\n\t\t}},\n\t},\n\texpectXML: `<ns2:a xmlns:ns2=\"ns2\" xmlns=\"ns\">hello world</ns2:a>`,\n}}\n\nfunc TestEncodeElement(t *testing.T) {\n\tfor idx, test := range encodeElementTests {\n\t\tvar buf bytes.Buffer\n\t\tenc := NewEncoder(&buf)\n\t\terr := enc.EncodeElement(test.value, test.start)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"enc.EncodeElement: %v\", err)\n\t\t}\n\t\terr = enc.Flush()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"enc.Flush: %v\", err)\n\t\t}\n\t\tif got, want := buf.String(), test.expectXML; got != want {\n\t\t\tt.Errorf(\"#%d(%s): EncodeElement(%#v, %#v):\\nhave %#q\\nwant %#q\", idx, test.desc, test.value, test.start, got, want)\n\t\t}\n\t}\n}\n\nfunc BenchmarkMarshal(b *testing.B) {\n\tb.ReportAllocs()\n\tfor i := 0; i < b.N; i++ {\n\t\tMarshal(atomValue)\n\t}\n}\n\nfunc BenchmarkUnmarshal(b *testing.B) {\n\tb.ReportAllocs()\n\txml := []byte(atomXml)\n\tfor i := 0; i < b.N; i++ {\n\t\tUnmarshal(xml, &Feed{})\n\t}\n}\n\n// golang.org/issue/6556\nfunc TestStructPointerMarshal(t *testing.T) {\n\ttype A struct {\n\t\tXMLName string `xml:\"a\"`\n\t\tB       []interface{}\n\t}\n\ttype C struct {\n\t\tXMLName Name\n\t\tValue   string `xml:\"value\"`\n\t}\n\n\ta := new(A)\n\ta.B = append(a.B, &C{\n\t\tXMLName: Name{Local: \"c\"},\n\t\tValue:   \"x\",\n\t})\n\n\tb, err := Marshal(a)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif x := string(b); x != \"<a><c><value>x</value></c></a>\" {\n\t\tt.Fatal(x)\n\t}\n\tvar v A\n\terr = Unmarshal(b, &v)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nvar encodeTokenTests = []struct {\n\tdesc string\n\ttoks []Token\n\twant string\n\terr  string\n}{{\n\tdesc: \"start element with name space\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"space\", \"local\"}, nil},\n\t},\n\twant: `<space:local xmlns:space=\"space\">`,\n}, {\n\tdesc: \"start element with no name\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"space\", \"\"}, nil},\n\t},\n\terr: \"xml: start tag with no name\",\n}, {\n\tdesc: \"end element with no name\",\n\ttoks: []Token{\n\t\tEndElement{Name{\"space\", \"\"}},\n\t},\n\terr: \"xml: end tag with no name\",\n}, {\n\tdesc: \"char data\",\n\ttoks: []Token{\n\t\tCharData(\"foo\"),\n\t},\n\twant: `foo`,\n}, {\n\tdesc: \"char data with escaped chars\",\n\ttoks: []Token{\n\t\tCharData(\" \\t\\n\"),\n\t},\n\twant: \" &#x9;\\n\",\n}, {\n\tdesc: \"comment\",\n\ttoks: []Token{\n\t\tComment(\"foo\"),\n\t},\n\twant: `<!--foo-->`,\n}, {\n\tdesc: \"comment with invalid content\",\n\ttoks: []Token{\n\t\tComment(\"foo-->\"),\n\t},\n\terr: \"xml: EncodeToken of Comment containing --> marker\",\n}, {\n\tdesc: \"proc instruction\",\n\ttoks: []Token{\n\t\tProcInst{\"Target\", []byte(\"Instruction\")},\n\t},\n\twant: `<?Target Instruction?>`,\n}, {\n\tdesc: \"proc instruction with empty target\",\n\ttoks: []Token{\n\t\tProcInst{\"\", []byte(\"Instruction\")},\n\t},\n\terr: \"xml: EncodeToken of ProcInst with invalid Target\",\n}, {\n\tdesc: \"proc instruction with bad content\",\n\ttoks: []Token{\n\t\tProcInst{\"\", []byte(\"Instruction?>\")},\n\t},\n\terr: \"xml: EncodeToken of ProcInst with invalid Target\",\n}, {\n\tdesc: \"directive\",\n\ttoks: []Token{\n\t\tDirective(\"foo\"),\n\t},\n\twant: `<!foo>`,\n}, {\n\tdesc: \"more complex directive\",\n\ttoks: []Token{\n\t\tDirective(\"DOCTYPE doc [ <!ELEMENT doc '>'> <!-- com>ment --> ]\"),\n\t},\n\twant: `<!DOCTYPE doc [ <!ELEMENT doc '>'> <!-- com>ment --> ]>`,\n}, {\n\tdesc: \"directive instruction with bad name\",\n\ttoks: []Token{\n\t\tDirective(\"foo>\"),\n\t},\n\terr: \"xml: EncodeToken of Directive containing wrong < or > markers\",\n}, {\n\tdesc: \"end tag without start tag\",\n\ttoks: []Token{\n\t\tEndElement{Name{\"foo\", \"bar\"}},\n\t},\n\terr: \"xml: end tag </bar> without start tag\",\n}, {\n\tdesc: \"mismatching end tag local name\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"\", \"foo\"}, nil},\n\t\tEndElement{Name{\"\", \"bar\"}},\n\t},\n\terr:  \"xml: end tag </bar> does not match start tag <foo>\",\n\twant: `<foo>`,\n}, {\n\tdesc: \"mismatching end tag namespace\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"space\", \"foo\"}, nil},\n\t\tEndElement{Name{\"another\", \"foo\"}},\n\t},\n\terr:  \"xml: end tag </foo> in namespace another does not match start tag <foo> in namespace space\",\n\twant: `<space:foo xmlns:space=\"space\">`,\n}, {\n\tdesc: \"start element with explicit namespace\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"space\", \"local\"}, []Attr{\n\t\t\t{Name{\"xmlns\", \"x\"}, \"space\"},\n\t\t\t{Name{\"space\", \"foo\"}, \"value\"},\n\t\t}},\n\t},\n\twant: `<x:local xmlns:x=\"space\" x:foo=\"value\">`,\n}, {\n\tdesc: \"start element with explicit namespace and colliding prefix\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"space\", \"local\"}, []Attr{\n\t\t\t{Name{\"xmlns\", \"x\"}, \"space\"},\n\t\t\t{Name{\"space\", \"foo\"}, \"value\"},\n\t\t\t{Name{\"x\", \"bar\"}, \"other\"},\n\t\t}},\n\t},\n\twant: `<x:local xmlns:x_1=\"x\" xmlns:x=\"space\" x:foo=\"value\" x_1:bar=\"other\">`,\n}, {\n\tdesc: \"start element using previously defined namespace\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"\", \"local\"}, []Attr{\n\t\t\t{Name{\"xmlns\", \"x\"}, \"space\"},\n\t\t}},\n\t\tStartElement{Name{\"space\", \"foo\"}, []Attr{\n\t\t\t{Name{\"space\", \"x\"}, \"y\"},\n\t\t}},\n\t},\n\twant: `<local xmlns:x=\"space\"><x:foo x:x=\"y\">`,\n}, {\n\tdesc: \"nested name space with same prefix\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"\", \"foo\"}, []Attr{\n\t\t\t{Name{\"xmlns\", \"x\"}, \"space1\"},\n\t\t}},\n\t\tStartElement{Name{\"\", \"foo\"}, []Attr{\n\t\t\t{Name{\"xmlns\", \"x\"}, \"space2\"},\n\t\t}},\n\t\tStartElement{Name{\"\", \"foo\"}, []Attr{\n\t\t\t{Name{\"space1\", \"a\"}, \"space1 value\"},\n\t\t\t{Name{\"space2\", \"b\"}, \"space2 value\"},\n\t\t}},\n\t\tEndElement{Name{\"\", \"foo\"}},\n\t\tEndElement{Name{\"\", \"foo\"}},\n\t\tStartElement{Name{\"\", \"foo\"}, []Attr{\n\t\t\t{Name{\"space1\", \"a\"}, \"space1 value\"},\n\t\t\t{Name{\"space2\", \"b\"}, \"space2 value\"},\n\t\t}},\n\t},\n\twant: `<foo xmlns:x=\"space1\"><foo xmlns:x=\"space2\"><foo xmlns:space1=\"space1\" space1:a=\"space1 value\" x:b=\"space2 value\"></foo></foo><foo xmlns:space2=\"space2\" x:a=\"space1 value\" space2:b=\"space2 value\">`,\n}, {\n\tdesc: \"start element defining several prefixes for the same name space\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"space\", \"foo\"}, []Attr{\n\t\t\t{Name{\"xmlns\", \"a\"}, \"space\"},\n\t\t\t{Name{\"xmlns\", \"b\"}, \"space\"},\n\t\t\t{Name{\"space\", \"x\"}, \"value\"},\n\t\t}},\n\t},\n\twant: `<a:foo xmlns:a=\"space\" a:x=\"value\">`,\n}, {\n\tdesc: \"nested element redefines name space\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"\", \"foo\"}, []Attr{\n\t\t\t{Name{\"xmlns\", \"x\"}, \"space\"},\n\t\t}},\n\t\tStartElement{Name{\"space\", \"foo\"}, []Attr{\n\t\t\t{Name{\"xmlns\", \"y\"}, \"space\"},\n\t\t\t{Name{\"space\", \"a\"}, \"value\"},\n\t\t}},\n\t},\n\twant: `<foo xmlns:x=\"space\"><x:foo x:a=\"value\">`,\n}, {\n\tdesc: \"nested element creates alias for default name space\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"space\", \"foo\"}, []Attr{\n\t\t\t{Name{\"\", \"xmlns\"}, \"space\"},\n\t\t}},\n\t\tStartElement{Name{\"space\", \"foo\"}, []Attr{\n\t\t\t{Name{\"xmlns\", \"y\"}, \"space\"},\n\t\t\t{Name{\"space\", \"a\"}, \"value\"},\n\t\t}},\n\t},\n\twant: `<foo xmlns=\"space\"><foo xmlns:y=\"space\" y:a=\"value\">`,\n}, {\n\tdesc: \"nested element defines default name space with existing prefix\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"\", \"foo\"}, []Attr{\n\t\t\t{Name{\"xmlns\", \"x\"}, \"space\"},\n\t\t}},\n\t\tStartElement{Name{\"space\", \"foo\"}, []Attr{\n\t\t\t{Name{\"\", \"xmlns\"}, \"space\"},\n\t\t\t{Name{\"space\", \"a\"}, \"value\"},\n\t\t}},\n\t},\n\twant: `<foo xmlns:x=\"space\"><foo xmlns=\"space\" x:a=\"value\">`,\n}, {\n\tdesc: \"nested element uses empty attribute name space when default ns defined\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"space\", \"foo\"}, []Attr{\n\t\t\t{Name{\"\", \"xmlns\"}, \"space\"},\n\t\t}},\n\t\tStartElement{Name{\"space\", \"foo\"}, []Attr{\n\t\t\t{Name{\"\", \"attr\"}, \"value\"},\n\t\t}},\n\t},\n\twant: `<foo xmlns=\"space\"><foo attr=\"value\">`,\n}, {\n\tdesc: \"redefine xmlns\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"\", \"foo\"}, []Attr{\n\t\t\t{Name{\"foo\", \"xmlns\"}, \"space\"},\n\t\t}},\n\t},\n\terr: `xml: cannot redefine xmlns attribute prefix`,\n}, {\n\tdesc: \"xmlns with explicit name space #1\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"space\", \"foo\"}, []Attr{\n\t\t\t{Name{\"xml\", \"xmlns\"}, \"space\"},\n\t\t}},\n\t},\n\twant: `<foo xmlns=\"space\">`,\n}, {\n\tdesc: \"xmlns with explicit name space #2\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"space\", \"foo\"}, []Attr{\n\t\t\t{Name{xmlURL, \"xmlns\"}, \"space\"},\n\t\t}},\n\t},\n\twant: `<foo xmlns=\"space\">`,\n}, {\n\tdesc: \"empty name space declaration is ignored\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"\", \"foo\"}, []Attr{\n\t\t\t{Name{\"xmlns\", \"foo\"}, \"\"},\n\t\t}},\n\t},\n\twant: `<foo>`,\n}, {\n\tdesc: \"attribute with no name is ignored\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"\", \"foo\"}, []Attr{\n\t\t\t{Name{\"\", \"\"}, \"value\"},\n\t\t}},\n\t},\n\twant: `<foo>`,\n}, {\n\tdesc: \"namespace URL with non-valid name\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"/34\", \"foo\"}, []Attr{\n\t\t\t{Name{\"/34\", \"x\"}, \"value\"},\n\t\t}},\n\t},\n\twant: `<_:foo xmlns:_=\"/34\" _:x=\"value\">`,\n}, {\n\tdesc: \"nested element resets default namespace to empty\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"space\", \"foo\"}, []Attr{\n\t\t\t{Name{\"\", \"xmlns\"}, \"space\"},\n\t\t}},\n\t\tStartElement{Name{\"\", \"foo\"}, []Attr{\n\t\t\t{Name{\"\", \"xmlns\"}, \"\"},\n\t\t\t{Name{\"\", \"x\"}, \"value\"},\n\t\t\t{Name{\"space\", \"x\"}, \"value\"},\n\t\t}},\n\t},\n\twant: `<foo xmlns=\"space\"><foo xmlns:space=\"space\" xmlns=\"\" x=\"value\" space:x=\"value\">`,\n}, {\n\tdesc: \"nested element requires empty default name space\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"space\", \"foo\"}, []Attr{\n\t\t\t{Name{\"\", \"xmlns\"}, \"space\"},\n\t\t}},\n\t\tStartElement{Name{\"\", \"foo\"}, nil},\n\t},\n\twant: `<foo xmlns=\"space\"><foo xmlns=\"\">`,\n}, {\n\tdesc: \"attribute uses name space from xmlns\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"some/space\", \"foo\"}, []Attr{\n\t\t\t{Name{\"\", \"attr\"}, \"value\"},\n\t\t\t{Name{\"some/space\", \"other\"}, \"other value\"},\n\t\t}},\n\t},\n\twant: `<space:foo xmlns:space=\"some/space\" attr=\"value\" space:other=\"other value\">`,\n}, {\n\tdesc: \"default name space should not be used by attributes\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"space\", \"foo\"}, []Attr{\n\t\t\t{Name{\"\", \"xmlns\"}, \"space\"},\n\t\t\t{Name{\"xmlns\", \"bar\"}, \"space\"},\n\t\t\t{Name{\"space\", \"baz\"}, \"foo\"},\n\t\t}},\n\t\tStartElement{Name{\"space\", \"baz\"}, nil},\n\t\tEndElement{Name{\"space\", \"baz\"}},\n\t\tEndElement{Name{\"space\", \"foo\"}},\n\t},\n\twant: `<foo xmlns:bar=\"space\" xmlns=\"space\" bar:baz=\"foo\"><baz></baz></foo>`,\n}, {\n\tdesc: \"default name space not used by attributes, not explicitly defined\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"space\", \"foo\"}, []Attr{\n\t\t\t{Name{\"\", \"xmlns\"}, \"space\"},\n\t\t\t{Name{\"space\", \"baz\"}, \"foo\"},\n\t\t}},\n\t\tStartElement{Name{\"space\", \"baz\"}, nil},\n\t\tEndElement{Name{\"space\", \"baz\"}},\n\t\tEndElement{Name{\"space\", \"foo\"}},\n\t},\n\twant: `<foo xmlns:space=\"space\" xmlns=\"space\" space:baz=\"foo\"><baz></baz></foo>`,\n}, {\n\tdesc: \"impossible xmlns declaration\",\n\ttoks: []Token{\n\t\tStartElement{Name{\"\", \"foo\"}, []Attr{\n\t\t\t{Name{\"\", \"xmlns\"}, \"space\"},\n\t\t}},\n\t\tStartElement{Name{\"space\", \"bar\"}, []Attr{\n\t\t\t{Name{\"space\", \"attr\"}, \"value\"},\n\t\t}},\n\t},\n\twant: `<foo><space:bar xmlns:space=\"space\" space:attr=\"value\">`,\n}}\n\nfunc TestEncodeToken(t *testing.T) {\nloop:\n\tfor i, tt := range encodeTokenTests {\n\t\tvar buf bytes.Buffer\n\t\tenc := NewEncoder(&buf)\n\t\tvar err error\n\t\tfor j, tok := range tt.toks {\n\t\t\terr = enc.EncodeToken(tok)\n\t\t\tif err != nil && j < len(tt.toks)-1 {\n\t\t\t\tt.Errorf(\"#%d %s token #%d: %v\", i, tt.desc, j, err)\n\t\t\t\tcontinue loop\n\t\t\t}\n\t\t}\n\t\terrorf := func(f string, a ...interface{}) {\n\t\t\tt.Errorf(\"#%d %s token #%d:%s\", i, tt.desc, len(tt.toks)-1, fmt.Sprintf(f, a...))\n\t\t}\n\t\tswitch {\n\t\tcase tt.err != \"\" && err == nil:\n\t\t\terrorf(\" expected error; got none\")\n\t\t\tcontinue\n\t\tcase tt.err == \"\" && err != nil:\n\t\t\terrorf(\" got error: %v\", err)\n\t\t\tcontinue\n\t\tcase tt.err != \"\" && err != nil && tt.err != err.Error():\n\t\t\terrorf(\" error mismatch; got %v, want %v\", err, tt.err)\n\t\t\tcontinue\n\t\t}\n\t\tif err := enc.Flush(); err != nil {\n\t\t\terrorf(\" %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif got := buf.String(); got != tt.want {\n\t\t\terrorf(\"\\ngot  %v\\nwant %v\", got, tt.want)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc TestProcInstEncodeToken(t *testing.T) {\n\tvar buf bytes.Buffer\n\tenc := NewEncoder(&buf)\n\n\tif err := enc.EncodeToken(ProcInst{\"xml\", []byte(\"Instruction\")}); err != nil {\n\t\tt.Fatalf(\"enc.EncodeToken: expected to be able to encode xml target ProcInst as first token, %s\", err)\n\t}\n\n\tif err := enc.EncodeToken(ProcInst{\"Target\", []byte(\"Instruction\")}); err != nil {\n\t\tt.Fatalf(\"enc.EncodeToken: expected to be able to add non-xml target ProcInst\")\n\t}\n\n\tif err := enc.EncodeToken(ProcInst{\"xml\", []byte(\"Instruction\")}); err == nil {\n\t\tt.Fatalf(\"enc.EncodeToken: expected to not be allowed to encode xml target ProcInst when not first token\")\n\t}\n}\n\nfunc TestDecodeEncode(t *testing.T) {\n\tvar in, out bytes.Buffer\n\tin.WriteString(`<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<?Target Instruction?>\n<root>\n</root>\t\n`)\n\tdec := NewDecoder(&in)\n\tenc := NewEncoder(&out)\n\tfor tok, err := dec.Token(); err == nil; tok, err = dec.Token() {\n\t\terr = enc.EncodeToken(tok)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"enc.EncodeToken: Unable to encode token (%#v), %v\", tok, err)\n\t\t}\n\t}\n}\n\n// Issue 9796. Used to fail with GORACE=\"halt_on_error=1\" -race.\nfunc TestRace9796(t *testing.T) {\n\ttype A struct{}\n\ttype B struct {\n\t\tC []A `xml:\"X>Y\"`\n\t}\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 2; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tMarshal(B{[]A{{}}})\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n}\n\nfunc TestIsValidDirective(t *testing.T) {\n\ttestOK := []string{\n\t\t\"<>\",\n\t\t\"< < > >\",\n\t\t\"<!DOCTYPE '<' '>' '>' <!--nothing-->>\",\n\t\t\"<!DOCTYPE doc [ <!ELEMENT doc ANY> <!ELEMENT doc ANY> ]>\",\n\t\t\"<!DOCTYPE doc [ <!ELEMENT doc \\\"ANY> '<' <!E\\\" LEMENT '>' doc ANY> ]>\",\n\t\t\"<!DOCTYPE doc <!-- just>>>> a < comment --> [ <!ITEM anything> ] >\",\n\t}\n\ttestKO := []string{\n\t\t\"<\",\n\t\t\">\",\n\t\t\"<!--\",\n\t\t\"-->\",\n\t\t\"< > > < < >\",\n\t\t\"<!dummy <!-- > -->\",\n\t\t\"<!DOCTYPE doc '>\",\n\t\t\"<!DOCTYPE doc '>'\",\n\t\t\"<!DOCTYPE doc <!--comment>\",\n\t}\n\tfor _, s := range testOK {\n\t\tif !isValidDirective(Directive(s)) {\n\t\t\tt.Errorf(\"Directive %q is expected to be valid\", s)\n\t\t}\n\t}\n\tfor _, s := range testKO {\n\t\tif isValidDirective(Directive(s)) {\n\t\t\tt.Errorf(\"Directive %q is expected to be invalid\", s)\n\t\t}\n\t}\n}\n\n// Issue 11719. EncodeToken used to silently eat tokens with an invalid type.\nfunc TestSimpleUseOfEncodeToken(t *testing.T) {\n\tvar buf bytes.Buffer\n\tenc := NewEncoder(&buf)\n\tif err := enc.EncodeToken(&StartElement{Name: Name{\"\", \"object1\"}}); err == nil {\n\t\tt.Errorf(\"enc.EncodeToken: pointer type should be rejected\")\n\t}\n\tif err := enc.EncodeToken(&EndElement{Name: Name{\"\", \"object1\"}}); err == nil {\n\t\tt.Errorf(\"enc.EncodeToken: pointer type should be rejected\")\n\t}\n\tif err := enc.EncodeToken(StartElement{Name: Name{\"\", \"object2\"}}); err != nil {\n\t\tt.Errorf(\"enc.EncodeToken: StartElement %s\", err)\n\t}\n\tif err := enc.EncodeToken(EndElement{Name: Name{\"\", \"object2\"}}); err != nil {\n\t\tt.Errorf(\"enc.EncodeToken: EndElement %s\", err)\n\t}\n\tif err := enc.EncodeToken(Universe{}); err == nil {\n\t\tt.Errorf(\"enc.EncodeToken: invalid type not caught\")\n\t}\n\tif err := enc.Flush(); err != nil {\n\t\tt.Errorf(\"enc.Flush: %s\", err)\n\t}\n\tif buf.Len() == 0 {\n\t\tt.Errorf(\"enc.EncodeToken: empty buffer\")\n\t}\n\twant := \"<object2></object2>\"\n\tif buf.String() != want {\n\t\tt.Errorf(\"enc.EncodeToken: expected %q; got %q\", want, buf.String())\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/internal/xml/read.go",
    "content": "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage xml\n\nimport (\n\t\"bytes\"\n\t\"encoding\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// BUG(rsc): Mapping between XML elements and data structures is inherently flawed:\n// an XML element is an order-dependent collection of anonymous\n// values, while a data structure is an order-independent collection\n// of named values.\n// See package json for a textual representation more suitable\n// to data structures.\n\n// Unmarshal parses the XML-encoded data and stores the result in\n// the value pointed to by v, which must be an arbitrary struct,\n// slice, or string. Well-formed data that does not fit into v is\n// discarded.\n//\n// Because Unmarshal uses the reflect package, it can only assign\n// to exported (upper case) fields. Unmarshal uses a case-sensitive\n// comparison to match XML element names to tag values and struct\n// field names.\n//\n// Unmarshal maps an XML element to a struct using the following rules.\n// In the rules, the tag of a field refers to the value associated with the\n// key 'xml' in the struct field's tag (see the example above).\n//\n//   * If the struct has a field of type []byte or string with tag\n//      \",innerxml\", Unmarshal accumulates the raw XML nested inside the\n//      element in that field. The rest of the rules still apply.\n//\n//   * If the struct has a field named XMLName of type xml.Name,\n//      Unmarshal records the element name in that field.\n//\n//   * If the XMLName field has an associated tag of the form\n//      \"name\" or \"namespace-URL name\", the XML element must have\n//      the given name (and, optionally, name space) or else Unmarshal\n//      returns an error.\n//\n//   * If the XML element has an attribute whose name matches a\n//      struct field name with an associated tag containing \",attr\" or\n//      the explicit name in a struct field tag of the form \"name,attr\",\n//      Unmarshal records the attribute value in that field.\n//\n//   * If the XML element contains character data, that data is\n//      accumulated in the first struct field that has tag \",chardata\".\n//      The struct field may have type []byte or string.\n//      If there is no such field, the character data is discarded.\n//\n//   * If the XML element contains comments, they are accumulated in\n//      the first struct field that has tag \",comment\".  The struct\n//      field may have type []byte or string. If there is no such\n//      field, the comments are discarded.\n//\n//   * If the XML element contains a sub-element whose name matches\n//      the prefix of a tag formatted as \"a\" or \"a>b>c\", unmarshal\n//      will descend into the XML structure looking for elements with the\n//      given names, and will map the innermost elements to that struct\n//      field. A tag starting with \">\" is equivalent to one starting\n//      with the field name followed by \">\".\n//\n//   * If the XML element contains a sub-element whose name matches\n//      a struct field's XMLName tag and the struct field has no\n//      explicit name tag as per the previous rule, unmarshal maps\n//      the sub-element to that struct field.\n//\n//   * If the XML element contains a sub-element whose name matches a\n//      field without any mode flags (\",attr\", \",chardata\", etc), Unmarshal\n//      maps the sub-element to that struct field.\n//\n//   * If the XML element contains a sub-element that hasn't matched any\n//      of the above rules and the struct has a field with tag \",any\",\n//      unmarshal maps the sub-element to that struct field.\n//\n//   * An anonymous struct field is handled as if the fields of its\n//      value were part of the outer struct.\n//\n//   * A struct field with tag \"-\" is never unmarshalled into.\n//\n// Unmarshal maps an XML element to a string or []byte by saving the\n// concatenation of that element's character data in the string or\n// []byte. The saved []byte is never nil.\n//\n// Unmarshal maps an attribute value to a string or []byte by saving\n// the value in the string or slice.\n//\n// Unmarshal maps an XML element to a slice by extending the length of\n// the slice and mapping the element to the newly created value.\n//\n// Unmarshal maps an XML element or attribute value to a bool by\n// setting it to the boolean value represented by the string.\n//\n// Unmarshal maps an XML element or attribute value to an integer or\n// floating-point field by setting the field to the result of\n// interpreting the string value in decimal. There is no check for\n// overflow.\n//\n// Unmarshal maps an XML element to an xml.Name by recording the\n// element name.\n//\n// Unmarshal maps an XML element to a pointer by setting the pointer\n// to a freshly allocated value and then mapping the element to that value.\n//\nfunc Unmarshal(data []byte, v interface{}) error {\n\treturn NewDecoder(bytes.NewReader(data)).Decode(v)\n}\n\n// Decode works like xml.Unmarshal, except it reads the decoder\n// stream to find the start element.\nfunc (d *Decoder) Decode(v interface{}) error {\n\treturn d.DecodeElement(v, nil)\n}\n\n// DecodeElement works like xml.Unmarshal except that it takes\n// a pointer to the start XML element to decode into v.\n// It is useful when a client reads some raw XML tokens itself\n// but also wants to defer to Unmarshal for some elements.\nfunc (d *Decoder) DecodeElement(v interface{}, start *StartElement) error {\n\tval := reflect.ValueOf(v)\n\tif val.Kind() != reflect.Ptr {\n\t\treturn errors.New(\"non-pointer passed to Unmarshal\")\n\t}\n\treturn d.unmarshal(val.Elem(), start)\n}\n\n// An UnmarshalError represents an error in the unmarshalling process.\ntype UnmarshalError string\n\nfunc (e UnmarshalError) Error() string { return string(e) }\n\n// Unmarshaler is the interface implemented by objects that can unmarshal\n// an XML element description of themselves.\n//\n// UnmarshalXML decodes a single XML element\n// beginning with the given start element.\n// If it returns an error, the outer call to Unmarshal stops and\n// returns that error.\n// UnmarshalXML must consume exactly one XML element.\n// One common implementation strategy is to unmarshal into\n// a separate value with a layout matching the expected XML\n// using d.DecodeElement,  and then to copy the data from\n// that value into the receiver.\n// Another common strategy is to use d.Token to process the\n// XML object one token at a time.\n// UnmarshalXML may not use d.RawToken.\ntype Unmarshaler interface {\n\tUnmarshalXML(d *Decoder, start StartElement) error\n}\n\n// UnmarshalerAttr is the interface implemented by objects that can unmarshal\n// an XML attribute description of themselves.\n//\n// UnmarshalXMLAttr decodes a single XML attribute.\n// If it returns an error, the outer call to Unmarshal stops and\n// returns that error.\n// UnmarshalXMLAttr is used only for struct fields with the\n// \"attr\" option in the field tag.\ntype UnmarshalerAttr interface {\n\tUnmarshalXMLAttr(attr Attr) error\n}\n\n// receiverType returns the receiver type to use in an expression like \"%s.MethodName\".\nfunc receiverType(val interface{}) string {\n\tt := reflect.TypeOf(val)\n\tif t.Name() != \"\" {\n\t\treturn t.String()\n\t}\n\treturn \"(\" + t.String() + \")\"\n}\n\n// unmarshalInterface unmarshals a single XML element into val.\n// start is the opening tag of the element.\nfunc (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error {\n\t// Record that decoder must stop at end tag corresponding to start.\n\tp.pushEOF()\n\n\tp.unmarshalDepth++\n\terr := val.UnmarshalXML(p, *start)\n\tp.unmarshalDepth--\n\tif err != nil {\n\t\tp.popEOF()\n\t\treturn err\n\t}\n\n\tif !p.popEOF() {\n\t\treturn fmt.Errorf(\"xml: %s.UnmarshalXML did not consume entire <%s> element\", receiverType(val), start.Name.Local)\n\t}\n\n\treturn nil\n}\n\n// unmarshalTextInterface unmarshals a single XML element into val.\n// The chardata contained in the element (but not its children)\n// is passed to the text unmarshaler.\nfunc (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error {\n\tvar buf []byte\n\tdepth := 1\n\tfor depth > 0 {\n\t\tt, err := p.Token()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tswitch t := t.(type) {\n\t\tcase CharData:\n\t\t\tif depth == 1 {\n\t\t\t\tbuf = append(buf, t...)\n\t\t\t}\n\t\tcase StartElement:\n\t\t\tdepth++\n\t\tcase EndElement:\n\t\t\tdepth--\n\t\t}\n\t}\n\treturn val.UnmarshalText(buf)\n}\n\n// unmarshalAttr unmarshals a single XML attribute into val.\nfunc (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error {\n\tif val.Kind() == reflect.Ptr {\n\t\tif val.IsNil() {\n\t\t\tval.Set(reflect.New(val.Type().Elem()))\n\t\t}\n\t\tval = val.Elem()\n\t}\n\n\tif val.CanInterface() && val.Type().Implements(unmarshalerAttrType) {\n\t\t// This is an unmarshaler with a non-pointer receiver,\n\t\t// so it's likely to be incorrect, but we do what we're told.\n\t\treturn val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)\n\t}\n\tif val.CanAddr() {\n\t\tpv := val.Addr()\n\t\tif pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) {\n\t\t\treturn pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)\n\t\t}\n\t}\n\n\t// Not an UnmarshalerAttr; try encoding.TextUnmarshaler.\n\tif val.CanInterface() && val.Type().Implements(textUnmarshalerType) {\n\t\t// This is an unmarshaler with a non-pointer receiver,\n\t\t// so it's likely to be incorrect, but we do what we're told.\n\t\treturn val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))\n\t}\n\tif val.CanAddr() {\n\t\tpv := val.Addr()\n\t\tif pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {\n\t\t\treturn pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))\n\t\t}\n\t}\n\n\tcopyValue(val, []byte(attr.Value))\n\treturn nil\n}\n\nvar (\n\tunmarshalerType     = reflect.TypeOf((*Unmarshaler)(nil)).Elem()\n\tunmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem()\n\ttextUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()\n)\n\n// Unmarshal a single XML element into val.\nfunc (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error {\n\t// Find start element if we need it.\n\tif start == nil {\n\t\tfor {\n\t\t\ttok, err := p.Token()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif t, ok := tok.(StartElement); ok {\n\t\t\t\tstart = &t\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// Load value from interface, but only if the result will be\n\t// usefully addressable.\n\tif val.Kind() == reflect.Interface && !val.IsNil() {\n\t\te := val.Elem()\n\t\tif e.Kind() == reflect.Ptr && !e.IsNil() {\n\t\t\tval = e\n\t\t}\n\t}\n\n\tif val.Kind() == reflect.Ptr {\n\t\tif val.IsNil() {\n\t\t\tval.Set(reflect.New(val.Type().Elem()))\n\t\t}\n\t\tval = val.Elem()\n\t}\n\n\tif val.CanInterface() && val.Type().Implements(unmarshalerType) {\n\t\t// This is an unmarshaler with a non-pointer receiver,\n\t\t// so it's likely to be incorrect, but we do what we're told.\n\t\treturn p.unmarshalInterface(val.Interface().(Unmarshaler), start)\n\t}\n\n\tif val.CanAddr() {\n\t\tpv := val.Addr()\n\t\tif pv.CanInterface() && pv.Type().Implements(unmarshalerType) {\n\t\t\treturn p.unmarshalInterface(pv.Interface().(Unmarshaler), start)\n\t\t}\n\t}\n\n\tif val.CanInterface() && val.Type().Implements(textUnmarshalerType) {\n\t\treturn p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start)\n\t}\n\n\tif val.CanAddr() {\n\t\tpv := val.Addr()\n\t\tif pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {\n\t\t\treturn p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start)\n\t\t}\n\t}\n\n\tvar (\n\t\tdata         []byte\n\t\tsaveData     reflect.Value\n\t\tcomment      []byte\n\t\tsaveComment  reflect.Value\n\t\tsaveXML      reflect.Value\n\t\tsaveXMLIndex int\n\t\tsaveXMLData  []byte\n\t\tsaveAny      reflect.Value\n\t\tsv           reflect.Value\n\t\ttinfo        *typeInfo\n\t\terr          error\n\t)\n\n\tswitch v := val; v.Kind() {\n\tdefault:\n\t\treturn errors.New(\"unknown type \" + v.Type().String())\n\n\tcase reflect.Interface:\n\t\t// TODO: For now, simply ignore the field. In the near\n\t\t//       future we may choose to unmarshal the start\n\t\t//       element on it, if not nil.\n\t\treturn p.Skip()\n\n\tcase reflect.Slice:\n\t\ttyp := v.Type()\n\t\tif typ.Elem().Kind() == reflect.Uint8 {\n\t\t\t// []byte\n\t\t\tsaveData = v\n\t\t\tbreak\n\t\t}\n\n\t\t// Slice of element values.\n\t\t// Grow slice.\n\t\tn := v.Len()\n\t\tif n >= v.Cap() {\n\t\t\tncap := 2 * n\n\t\t\tif ncap < 4 {\n\t\t\t\tncap = 4\n\t\t\t}\n\t\t\tnew := reflect.MakeSlice(typ, n, ncap)\n\t\t\treflect.Copy(new, v)\n\t\t\tv.Set(new)\n\t\t}\n\t\tv.SetLen(n + 1)\n\n\t\t// Recur to read element into slice.\n\t\tif err := p.unmarshal(v.Index(n), start); err != nil {\n\t\t\tv.SetLen(n)\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\n\tcase reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String:\n\t\tsaveData = v\n\n\tcase reflect.Struct:\n\t\ttyp := v.Type()\n\t\tif typ == nameType {\n\t\t\tv.Set(reflect.ValueOf(start.Name))\n\t\t\tbreak\n\t\t}\n\n\t\tsv = v\n\t\ttinfo, err = getTypeInfo(typ)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Validate and assign element name.\n\t\tif tinfo.xmlname != nil {\n\t\t\tfinfo := tinfo.xmlname\n\t\t\tif finfo.name != \"\" && finfo.name != start.Name.Local {\n\t\t\t\treturn UnmarshalError(\"expected element type <\" + finfo.name + \"> but have <\" + start.Name.Local + \">\")\n\t\t\t}\n\t\t\tif finfo.xmlns != \"\" && finfo.xmlns != start.Name.Space {\n\t\t\t\te := \"expected element <\" + finfo.name + \"> in name space \" + finfo.xmlns + \" but have \"\n\t\t\t\tif start.Name.Space == \"\" {\n\t\t\t\t\te += \"no name space\"\n\t\t\t\t} else {\n\t\t\t\t\te += start.Name.Space\n\t\t\t\t}\n\t\t\t\treturn UnmarshalError(e)\n\t\t\t}\n\t\t\tfv := finfo.value(sv)\n\t\t\tif _, ok := fv.Interface().(Name); ok {\n\t\t\t\tfv.Set(reflect.ValueOf(start.Name))\n\t\t\t}\n\t\t}\n\n\t\t// Assign attributes.\n\t\t// Also, determine whether we need to save character data or comments.\n\t\tfor i := range tinfo.fields {\n\t\t\tfinfo := &tinfo.fields[i]\n\t\t\tswitch finfo.flags & fMode {\n\t\t\tcase fAttr:\n\t\t\t\tstrv := finfo.value(sv)\n\t\t\t\t// Look for attribute.\n\t\t\t\tfor _, a := range start.Attr {\n\t\t\t\t\tif a.Name.Local == finfo.name && (finfo.xmlns == \"\" || finfo.xmlns == a.Name.Space) {\n\t\t\t\t\t\tif err := p.unmarshalAttr(strv, a); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase fCharData:\n\t\t\t\tif !saveData.IsValid() {\n\t\t\t\t\tsaveData = finfo.value(sv)\n\t\t\t\t}\n\n\t\t\tcase fComment:\n\t\t\t\tif !saveComment.IsValid() {\n\t\t\t\t\tsaveComment = finfo.value(sv)\n\t\t\t\t}\n\n\t\t\tcase fAny, fAny | fElement:\n\t\t\t\tif !saveAny.IsValid() {\n\t\t\t\t\tsaveAny = finfo.value(sv)\n\t\t\t\t}\n\n\t\t\tcase fInnerXml:\n\t\t\t\tif !saveXML.IsValid() {\n\t\t\t\t\tsaveXML = finfo.value(sv)\n\t\t\t\t\tif p.saved == nil {\n\t\t\t\t\t\tsaveXMLIndex = 0\n\t\t\t\t\t\tp.saved = new(bytes.Buffer)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tsaveXMLIndex = p.savedOffset()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Find end element.\n\t// Process sub-elements along the way.\nLoop:\n\tfor {\n\t\tvar savedOffset int\n\t\tif saveXML.IsValid() {\n\t\t\tsavedOffset = p.savedOffset()\n\t\t}\n\t\ttok, err := p.Token()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tswitch t := tok.(type) {\n\t\tcase StartElement:\n\t\t\tconsumed := false\n\t\t\tif sv.IsValid() {\n\t\t\t\tconsumed, err = p.unmarshalPath(tinfo, sv, nil, &t)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !consumed && saveAny.IsValid() {\n\t\t\t\t\tconsumed = true\n\t\t\t\t\tif err := p.unmarshal(saveAny, &t); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !consumed {\n\t\t\t\tif err := p.Skip(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase EndElement:\n\t\t\tif saveXML.IsValid() {\n\t\t\t\tsaveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset]\n\t\t\t\tif saveXMLIndex == 0 {\n\t\t\t\t\tp.saved = nil\n\t\t\t\t}\n\t\t\t}\n\t\t\tbreak Loop\n\n\t\tcase CharData:\n\t\t\tif saveData.IsValid() {\n\t\t\t\tdata = append(data, t...)\n\t\t\t}\n\n\t\tcase Comment:\n\t\t\tif saveComment.IsValid() {\n\t\t\t\tcomment = append(comment, t...)\n\t\t\t}\n\t\t}\n\t}\n\n\tif saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) {\n\t\tif err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsaveData = reflect.Value{}\n\t}\n\n\tif saveData.IsValid() && saveData.CanAddr() {\n\t\tpv := saveData.Addr()\n\t\tif pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {\n\t\t\tif err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tsaveData = reflect.Value{}\n\t\t}\n\t}\n\n\tif err := copyValue(saveData, data); err != nil {\n\t\treturn err\n\t}\n\n\tswitch t := saveComment; t.Kind() {\n\tcase reflect.String:\n\t\tt.SetString(string(comment))\n\tcase reflect.Slice:\n\t\tt.Set(reflect.ValueOf(comment))\n\t}\n\n\tswitch t := saveXML; t.Kind() {\n\tcase reflect.String:\n\t\tt.SetString(string(saveXMLData))\n\tcase reflect.Slice:\n\t\tt.Set(reflect.ValueOf(saveXMLData))\n\t}\n\n\treturn nil\n}\n\nfunc copyValue(dst reflect.Value, src []byte) (err error) {\n\tdst0 := dst\n\n\tif dst.Kind() == reflect.Ptr {\n\t\tif dst.IsNil() {\n\t\t\tdst.Set(reflect.New(dst.Type().Elem()))\n\t\t}\n\t\tdst = dst.Elem()\n\t}\n\n\t// Save accumulated data.\n\tswitch dst.Kind() {\n\tcase reflect.Invalid:\n\t\t// Probably a comment.\n\tdefault:\n\t\treturn errors.New(\"cannot unmarshal into \" + dst0.Type().String())\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\titmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdst.SetInt(itmp)\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\tutmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdst.SetUint(utmp)\n\tcase reflect.Float32, reflect.Float64:\n\t\tftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdst.SetFloat(ftmp)\n\tcase reflect.Bool:\n\t\tvalue, err := strconv.ParseBool(strings.TrimSpace(string(src)))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdst.SetBool(value)\n\tcase reflect.String:\n\t\tdst.SetString(string(src))\n\tcase reflect.Slice:\n\t\tif len(src) == 0 {\n\t\t\t// non-nil to flag presence\n\t\t\tsrc = []byte{}\n\t\t}\n\t\tdst.SetBytes(src)\n\t}\n\treturn nil\n}\n\n// unmarshalPath walks down an XML structure looking for wanted\n// paths, and calls unmarshal on them.\n// The consumed result tells whether XML elements have been consumed\n// from the Decoder until start's matching end element, or if it's\n// still untouched because start is uninteresting for sv's fields.\nfunc (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) {\n\trecurse := false\nLoop:\n\tfor i := range tinfo.fields {\n\t\tfinfo := &tinfo.fields[i]\n\t\tif finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != \"\" && finfo.xmlns != start.Name.Space {\n\t\t\tcontinue\n\t\t}\n\t\tfor j := range parents {\n\t\t\tif parents[j] != finfo.parents[j] {\n\t\t\t\tcontinue Loop\n\t\t\t}\n\t\t}\n\t\tif len(finfo.parents) == len(parents) && finfo.name == start.Name.Local {\n\t\t\t// It's a perfect match, unmarshal the field.\n\t\t\treturn true, p.unmarshal(finfo.value(sv), start)\n\t\t}\n\t\tif len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local {\n\t\t\t// It's a prefix for the field. Break and recurse\n\t\t\t// since it's not ok for one field path to be itself\n\t\t\t// the prefix for another field path.\n\t\t\trecurse = true\n\n\t\t\t// We can reuse the same slice as long as we\n\t\t\t// don't try to append to it.\n\t\t\tparents = finfo.parents[:len(parents)+1]\n\t\t\tbreak\n\t\t}\n\t}\n\tif !recurse {\n\t\t// We have no business with this element.\n\t\treturn false, nil\n\t}\n\t// The element is not a perfect match for any field, but one\n\t// or more fields have the path to this element as a parent\n\t// prefix. Recurse and attempt to match these.\n\tfor {\n\t\tvar tok Token\n\t\ttok, err = p.Token()\n\t\tif err != nil {\n\t\t\treturn true, err\n\t\t}\n\t\tswitch t := tok.(type) {\n\t\tcase StartElement:\n\t\t\tconsumed2, err := p.unmarshalPath(tinfo, sv, parents, &t)\n\t\t\tif err != nil {\n\t\t\t\treturn true, err\n\t\t\t}\n\t\t\tif !consumed2 {\n\t\t\t\tif err := p.Skip(); err != nil {\n\t\t\t\t\treturn true, err\n\t\t\t\t}\n\t\t\t}\n\t\tcase EndElement:\n\t\t\treturn true, nil\n\t\t}\n\t}\n}\n\n// Skip reads tokens until it has consumed the end element\n// matching the most recent start element already consumed.\n// It recurs if it encounters a start element, so it can be used to\n// skip nested structures.\n// It returns nil if it finds an end element matching the start\n// element; otherwise it returns an error describing the problem.\nfunc (d *Decoder) Skip() error {\n\tfor {\n\t\ttok, err := d.Token()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tswitch tok.(type) {\n\t\tcase StartElement:\n\t\t\tif err := d.Skip(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase EndElement:\n\t\t\treturn nil\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/internal/xml/read_test.go",
    "content": "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage xml\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\n// Stripped down Atom feed data structures.\n\nfunc TestUnmarshalFeed(t *testing.T) {\n\tvar f Feed\n\tif err := Unmarshal([]byte(atomFeedString), &f); err != nil {\n\t\tt.Fatalf(\"Unmarshal: %s\", err)\n\t}\n\tif !reflect.DeepEqual(f, atomFeed) {\n\t\tt.Fatalf(\"have %#v\\nwant %#v\", f, atomFeed)\n\t}\n}\n\n// hget http://codereview.appspot.com/rss/mine/rsc\nconst atomFeedString = `\n<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<feed xmlns=\"http://www.w3.org/2005/Atom\" xml:lang=\"en-us\" updated=\"2009-10-04T01:35:58+00:00\"><title>Code Review - My issues</title><link href=\"http://codereview.appspot.com/\" rel=\"alternate\"></link><link href=\"http://codereview.appspot.com/rss/mine/rsc\" rel=\"self\"></link><id>http://codereview.appspot.com/</id><author><name>rietveld&lt;&gt;</name></author><entry><title>rietveld: an attempt at pubsubhubbub\n</title><link href=\"http://codereview.appspot.com/126085\" rel=\"alternate\"></link><updated>2009-10-04T01:35:58+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:134d9179c41f806be79b3a5f7877d19a</id><summary type=\"html\">\n  An attempt at adding pubsubhubbub support to Rietveld.\nhttp://code.google.com/p/pubsubhubbub\nhttp://code.google.com/p/rietveld/issues/detail?id=155\n\nThe server side of the protocol is trivial:\n  1. add a &amp;lt;link rel=&amp;quot;hub&amp;quot; href=&amp;quot;hub-server&amp;quot;&amp;gt; tag to all\n     feeds that will be pubsubhubbubbed.\n  2. every time one of those feeds changes, tell the hub\n     with a simple POST request.\n\nI have tested this by adding debug prints to a local hub\nserver and checking that the server got the right publish\nrequests.\n\nI can&amp;#39;t quite get the server to work, but I think the bug\nis not in my code.  I think that the server expects to be\nable to grab the feed and see the feed&amp;#39;s actual URL in\nthe link rel=&amp;quot;self&amp;quot;, but the default value for that drops\nthe :port from the URL, and I cannot for the life of me\nfigure out how to get the Atom generator deep inside\ndjango not to do that, or even where it is doing that,\nor even what code is running to generate the Atom feed.\n(I thought I knew but I added some assert False statements\nand it kept running!)\n\nIgnoring that particular problem, I would appreciate\nfeedback on the right way to get the two values at\nthe top of feeds.py marked NOTE(rsc).\n\n\n</summary></entry><entry><title>rietveld: correct tab handling\n</title><link href=\"http://codereview.appspot.com/124106\" rel=\"alternate\"></link><updated>2009-10-03T23:02:17+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:0a2a4f19bb815101f0ba2904aed7c35a</id><summary type=\"html\">\n  This fixes the buggy tab rendering that can be seen at\nhttp://codereview.appspot.com/116075/diff/1/2\n\nThe fundamental problem was that the tab code was\nnot being told what column the text began in, so it\ndidn&amp;#39;t know where to put the tab stops.  Another problem\nwas that some of the code assumed that string byte\noffsets were the same as column offsets, which is only\ntrue if there are no tabs.\n\nIn the process of fixing this, I cleaned up the arguments\nto Fold and ExpandTabs and renamed them Break and\n_ExpandTabs so that I could be sure that I found all the\ncall sites.  I also wanted to verify that ExpandTabs was\nnot being used from outside intra_region_diff.py.\n\n\n</summary></entry></feed> \t   `\n\ntype Feed struct {\n\tXMLName Name      `xml:\"http://www.w3.org/2005/Atom feed\"`\n\tTitle   string    `xml:\"title\"`\n\tId      string    `xml:\"id\"`\n\tLink    []Link    `xml:\"link\"`\n\tUpdated time.Time `xml:\"updated,attr\"`\n\tAuthor  Person    `xml:\"author\"`\n\tEntry   []Entry   `xml:\"entry\"`\n}\n\ntype Entry struct {\n\tTitle   string    `xml:\"title\"`\n\tId      string    `xml:\"id\"`\n\tLink    []Link    `xml:\"link\"`\n\tUpdated time.Time `xml:\"updated\"`\n\tAuthor  Person    `xml:\"author\"`\n\tSummary Text      `xml:\"summary\"`\n}\n\ntype Link struct {\n\tRel  string `xml:\"rel,attr,omitempty\"`\n\tHref string `xml:\"href,attr\"`\n}\n\ntype Person struct {\n\tName     string `xml:\"name\"`\n\tURI      string `xml:\"uri\"`\n\tEmail    string `xml:\"email\"`\n\tInnerXML string `xml:\",innerxml\"`\n}\n\ntype Text struct {\n\tType string `xml:\"type,attr,omitempty\"`\n\tBody string `xml:\",chardata\"`\n}\n\nvar atomFeed = Feed{\n\tXMLName: Name{\"http://www.w3.org/2005/Atom\", \"feed\"},\n\tTitle:   \"Code Review - My issues\",\n\tLink: []Link{\n\t\t{Rel: \"alternate\", Href: \"http://codereview.appspot.com/\"},\n\t\t{Rel: \"self\", Href: \"http://codereview.appspot.com/rss/mine/rsc\"},\n\t},\n\tId:      \"http://codereview.appspot.com/\",\n\tUpdated: ParseTime(\"2009-10-04T01:35:58+00:00\"),\n\tAuthor: Person{\n\t\tName:     \"rietveld<>\",\n\t\tInnerXML: \"<name>rietveld&lt;&gt;</name>\",\n\t},\n\tEntry: []Entry{\n\t\t{\n\t\t\tTitle: \"rietveld: an attempt at pubsubhubbub\\n\",\n\t\t\tLink: []Link{\n\t\t\t\t{Rel: \"alternate\", Href: \"http://codereview.appspot.com/126085\"},\n\t\t\t},\n\t\t\tUpdated: ParseTime(\"2009-10-04T01:35:58+00:00\"),\n\t\t\tAuthor: Person{\n\t\t\t\tName:     \"email-address-removed\",\n\t\t\t\tInnerXML: \"<name>email-address-removed</name>\",\n\t\t\t},\n\t\t\tId: \"urn:md5:134d9179c41f806be79b3a5f7877d19a\",\n\t\t\tSummary: Text{\n\t\t\t\tType: \"html\",\n\t\t\t\tBody: `\n  An attempt at adding pubsubhubbub support to Rietveld.\nhttp://code.google.com/p/pubsubhubbub\nhttp://code.google.com/p/rietveld/issues/detail?id=155\n\nThe server side of the protocol is trivial:\n  1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all\n     feeds that will be pubsubhubbubbed.\n  2. every time one of those feeds changes, tell the hub\n     with a simple POST request.\n\nI have tested this by adding debug prints to a local hub\nserver and checking that the server got the right publish\nrequests.\n\nI can&#39;t quite get the server to work, but I think the bug\nis not in my code.  I think that the server expects to be\nable to grab the feed and see the feed&#39;s actual URL in\nthe link rel=&quot;self&quot;, but the default value for that drops\nthe :port from the URL, and I cannot for the life of me\nfigure out how to get the Atom generator deep inside\ndjango not to do that, or even where it is doing that,\nor even what code is running to generate the Atom feed.\n(I thought I knew but I added some assert False statements\nand it kept running!)\n\nIgnoring that particular problem, I would appreciate\nfeedback on the right way to get the two values at\nthe top of feeds.py marked NOTE(rsc).\n\n\n`,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tTitle: \"rietveld: correct tab handling\\n\",\n\t\t\tLink: []Link{\n\t\t\t\t{Rel: \"alternate\", Href: \"http://codereview.appspot.com/124106\"},\n\t\t\t},\n\t\t\tUpdated: ParseTime(\"2009-10-03T23:02:17+00:00\"),\n\t\t\tAuthor: Person{\n\t\t\t\tName:     \"email-address-removed\",\n\t\t\t\tInnerXML: \"<name>email-address-removed</name>\",\n\t\t\t},\n\t\t\tId: \"urn:md5:0a2a4f19bb815101f0ba2904aed7c35a\",\n\t\t\tSummary: Text{\n\t\t\t\tType: \"html\",\n\t\t\t\tBody: `\n  This fixes the buggy tab rendering that can be seen at\nhttp://codereview.appspot.com/116075/diff/1/2\n\nThe fundamental problem was that the tab code was\nnot being told what column the text began in, so it\ndidn&#39;t know where to put the tab stops.  Another problem\nwas that some of the code assumed that string byte\noffsets were the same as column offsets, which is only\ntrue if there are no tabs.\n\nIn the process of fixing this, I cleaned up the arguments\nto Fold and ExpandTabs and renamed them Break and\n_ExpandTabs so that I could be sure that I found all the\ncall sites.  I also wanted to verify that ExpandTabs was\nnot being used from outside intra_region_diff.py.\n\n\n`,\n\t\t\t},\n\t\t},\n\t},\n}\n\nconst pathTestString = `\n<Result>\n    <Before>1</Before>\n    <Items>\n        <Item1>\n            <Value>A</Value>\n        </Item1>\n        <Item2>\n            <Value>B</Value>\n        </Item2>\n        <Item1>\n            <Value>C</Value>\n            <Value>D</Value>\n        </Item1>\n        <_>\n            <Value>E</Value>\n        </_>\n    </Items>\n    <After>2</After>\n</Result>\n`\n\ntype PathTestItem struct {\n\tValue string\n}\n\ntype PathTestA struct {\n\tItems         []PathTestItem `xml:\">Item1\"`\n\tBefore, After string\n}\n\ntype PathTestB struct {\n\tOther         []PathTestItem `xml:\"Items>Item1\"`\n\tBefore, After string\n}\n\ntype PathTestC struct {\n\tValues1       []string `xml:\"Items>Item1>Value\"`\n\tValues2       []string `xml:\"Items>Item2>Value\"`\n\tBefore, After string\n}\n\ntype PathTestSet struct {\n\tItem1 []PathTestItem\n}\n\ntype PathTestD struct {\n\tOther         PathTestSet `xml:\"Items\"`\n\tBefore, After string\n}\n\ntype PathTestE struct {\n\tUnderline     string `xml:\"Items>_>Value\"`\n\tBefore, After string\n}\n\nvar pathTests = []interface{}{\n\t&PathTestA{Items: []PathTestItem{{\"A\"}, {\"D\"}}, Before: \"1\", After: \"2\"},\n\t&PathTestB{Other: []PathTestItem{{\"A\"}, {\"D\"}}, Before: \"1\", After: \"2\"},\n\t&PathTestC{Values1: []string{\"A\", \"C\", \"D\"}, Values2: []string{\"B\"}, Before: \"1\", After: \"2\"},\n\t&PathTestD{Other: PathTestSet{Item1: []PathTestItem{{\"A\"}, {\"D\"}}}, Before: \"1\", After: \"2\"},\n\t&PathTestE{Underline: \"E\", Before: \"1\", After: \"2\"},\n}\n\nfunc TestUnmarshalPaths(t *testing.T) {\n\tfor _, pt := range pathTests {\n\t\tv := reflect.New(reflect.TypeOf(pt).Elem()).Interface()\n\t\tif err := Unmarshal([]byte(pathTestString), v); err != nil {\n\t\t\tt.Fatalf(\"Unmarshal: %s\", err)\n\t\t}\n\t\tif !reflect.DeepEqual(v, pt) {\n\t\t\tt.Fatalf(\"have %#v\\nwant %#v\", v, pt)\n\t\t}\n\t}\n}\n\ntype BadPathTestA struct {\n\tFirst  string `xml:\"items>item1\"`\n\tOther  string `xml:\"items>item2\"`\n\tSecond string `xml:\"items\"`\n}\n\ntype BadPathTestB struct {\n\tOther  string `xml:\"items>item2>value\"`\n\tFirst  string `xml:\"items>item1\"`\n\tSecond string `xml:\"items>item1>value\"`\n}\n\ntype BadPathTestC struct {\n\tFirst  string\n\tSecond string `xml:\"First\"`\n}\n\ntype BadPathTestD struct {\n\tBadPathEmbeddedA\n\tBadPathEmbeddedB\n}\n\ntype BadPathEmbeddedA struct {\n\tFirst string\n}\n\ntype BadPathEmbeddedB struct {\n\tSecond string `xml:\"First\"`\n}\n\nvar badPathTests = []struct {\n\tv, e interface{}\n}{\n\t{&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), \"First\", \"items>item1\", \"Second\", \"items\"}},\n\t{&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), \"First\", \"items>item1\", \"Second\", \"items>item1>value\"}},\n\t{&BadPathTestC{}, &TagPathError{reflect.TypeOf(BadPathTestC{}), \"First\", \"\", \"Second\", \"First\"}},\n\t{&BadPathTestD{}, &TagPathError{reflect.TypeOf(BadPathTestD{}), \"First\", \"\", \"Second\", \"First\"}},\n}\n\nfunc TestUnmarshalBadPaths(t *testing.T) {\n\tfor _, tt := range badPathTests {\n\t\terr := Unmarshal([]byte(pathTestString), tt.v)\n\t\tif !reflect.DeepEqual(err, tt.e) {\n\t\t\tt.Fatalf(\"Unmarshal with %#v didn't fail properly:\\nhave %#v,\\nwant %#v\", tt.v, err, tt.e)\n\t\t}\n\t}\n}\n\nconst OK = \"OK\"\nconst withoutNameTypeData = `\n<?xml version=\"1.0\" charset=\"utf-8\"?>\n<Test3 Attr=\"OK\" />`\n\ntype TestThree struct {\n\tXMLName Name   `xml:\"Test3\"`\n\tAttr    string `xml:\",attr\"`\n}\n\nfunc TestUnmarshalWithoutNameType(t *testing.T) {\n\tvar x TestThree\n\tif err := Unmarshal([]byte(withoutNameTypeData), &x); err != nil {\n\t\tt.Fatalf(\"Unmarshal: %s\", err)\n\t}\n\tif x.Attr != OK {\n\t\tt.Fatalf(\"have %v\\nwant %v\", x.Attr, OK)\n\t}\n}\n\nfunc TestUnmarshalAttr(t *testing.T) {\n\ttype ParamVal struct {\n\t\tInt int `xml:\"int,attr\"`\n\t}\n\n\ttype ParamPtr struct {\n\t\tInt *int `xml:\"int,attr\"`\n\t}\n\n\ttype ParamStringPtr struct {\n\t\tInt *string `xml:\"int,attr\"`\n\t}\n\n\tx := []byte(`<Param int=\"1\" />`)\n\n\tp1 := &ParamPtr{}\n\tif err := Unmarshal(x, p1); err != nil {\n\t\tt.Fatalf(\"Unmarshal: %s\", err)\n\t}\n\tif p1.Int == nil {\n\t\tt.Fatalf(\"Unmarshal failed in to *int field\")\n\t} else if *p1.Int != 1 {\n\t\tt.Fatalf(\"Unmarshal with %s failed:\\nhave %#v,\\n want %#v\", x, p1.Int, 1)\n\t}\n\n\tp2 := &ParamVal{}\n\tif err := Unmarshal(x, p2); err != nil {\n\t\tt.Fatalf(\"Unmarshal: %s\", err)\n\t}\n\tif p2.Int != 1 {\n\t\tt.Fatalf(\"Unmarshal with %s failed:\\nhave %#v,\\n want %#v\", x, p2.Int, 1)\n\t}\n\n\tp3 := &ParamStringPtr{}\n\tif err := Unmarshal(x, p3); err != nil {\n\t\tt.Fatalf(\"Unmarshal: %s\", err)\n\t}\n\tif p3.Int == nil {\n\t\tt.Fatalf(\"Unmarshal failed in to *string field\")\n\t} else if *p3.Int != \"1\" {\n\t\tt.Fatalf(\"Unmarshal with %s failed:\\nhave %#v,\\n want %#v\", x, p3.Int, 1)\n\t}\n}\n\ntype Tables struct {\n\tHTable string `xml:\"http://www.w3.org/TR/html4/ table\"`\n\tFTable string `xml:\"http://www.w3schools.com/furniture table\"`\n}\n\nvar tables = []struct {\n\txml string\n\ttab Tables\n\tns  string\n}{\n\t{\n\t\txml: `<Tables>` +\n\t\t\t`<table xmlns=\"http://www.w3.org/TR/html4/\">hello</table>` +\n\t\t\t`<table xmlns=\"http://www.w3schools.com/furniture\">world</table>` +\n\t\t\t`</Tables>`,\n\t\ttab: Tables{\"hello\", \"world\"},\n\t},\n\t{\n\t\txml: `<Tables>` +\n\t\t\t`<table xmlns=\"http://www.w3schools.com/furniture\">world</table>` +\n\t\t\t`<table xmlns=\"http://www.w3.org/TR/html4/\">hello</table>` +\n\t\t\t`</Tables>`,\n\t\ttab: Tables{\"hello\", \"world\"},\n\t},\n\t{\n\t\txml: `<Tables xmlns:f=\"http://www.w3schools.com/furniture\" xmlns:h=\"http://www.w3.org/TR/html4/\">` +\n\t\t\t`<f:table>world</f:table>` +\n\t\t\t`<h:table>hello</h:table>` +\n\t\t\t`</Tables>`,\n\t\ttab: Tables{\"hello\", \"world\"},\n\t},\n\t{\n\t\txml: `<Tables>` +\n\t\t\t`<table>bogus</table>` +\n\t\t\t`</Tables>`,\n\t\ttab: Tables{},\n\t},\n\t{\n\t\txml: `<Tables>` +\n\t\t\t`<table>only</table>` +\n\t\t\t`</Tables>`,\n\t\ttab: Tables{HTable: \"only\"},\n\t\tns:  \"http://www.w3.org/TR/html4/\",\n\t},\n\t{\n\t\txml: `<Tables>` +\n\t\t\t`<table>only</table>` +\n\t\t\t`</Tables>`,\n\t\ttab: Tables{FTable: \"only\"},\n\t\tns:  \"http://www.w3schools.com/furniture\",\n\t},\n\t{\n\t\txml: `<Tables>` +\n\t\t\t`<table>only</table>` +\n\t\t\t`</Tables>`,\n\t\ttab: Tables{},\n\t\tns:  \"something else entirely\",\n\t},\n}\n\nfunc TestUnmarshalNS(t *testing.T) {\n\tfor i, tt := range tables {\n\t\tvar dst Tables\n\t\tvar err error\n\t\tif tt.ns != \"\" {\n\t\t\td := NewDecoder(strings.NewReader(tt.xml))\n\t\t\td.DefaultSpace = tt.ns\n\t\t\terr = d.Decode(&dst)\n\t\t} else {\n\t\t\terr = Unmarshal([]byte(tt.xml), &dst)\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Errorf(\"#%d: Unmarshal: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\twant := tt.tab\n\t\tif dst != want {\n\t\t\tt.Errorf(\"#%d: dst=%+v, want %+v\", i, dst, want)\n\t\t}\n\t}\n}\n\nfunc TestRoundTrip(t *testing.T) {\n\t// From issue 7535\n\tconst s = `<ex:element xmlns:ex=\"http://example.com/schema\"></ex:element>`\n\tin := bytes.NewBufferString(s)\n\tfor i := 0; i < 10; i++ {\n\t\tout := &bytes.Buffer{}\n\t\td := NewDecoder(in)\n\t\te := NewEncoder(out)\n\n\t\tfor {\n\t\t\tt, err := d.Token()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"failed:\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\te.EncodeToken(t)\n\t\t}\n\t\te.Flush()\n\t\tin = out\n\t}\n\tif got := in.String(); got != s {\n\t\tt.Errorf(\"have: %q\\nwant: %q\\n\", got, s)\n\t}\n}\n\nfunc TestMarshalNS(t *testing.T) {\n\tdst := Tables{\"hello\", \"world\"}\n\tdata, err := Marshal(&dst)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal: %v\", err)\n\t}\n\twant := `<Tables><table xmlns=\"http://www.w3.org/TR/html4/\">hello</table><table xmlns=\"http://www.w3schools.com/furniture\">world</table></Tables>`\n\tstr := string(data)\n\tif str != want {\n\t\tt.Errorf(\"have: %q\\nwant: %q\\n\", str, want)\n\t}\n}\n\ntype TableAttrs struct {\n\tTAttr TAttr\n}\n\ntype TAttr struct {\n\tHTable string `xml:\"http://www.w3.org/TR/html4/ table,attr\"`\n\tFTable string `xml:\"http://www.w3schools.com/furniture table,attr\"`\n\tLang   string `xml:\"http://www.w3.org/XML/1998/namespace lang,attr,omitempty\"`\n\tOther1 string `xml:\"http://golang.org/xml/ other,attr,omitempty\"`\n\tOther2 string `xml:\"http://golang.org/xmlfoo/ other,attr,omitempty\"`\n\tOther3 string `xml:\"http://golang.org/json/ other,attr,omitempty\"`\n\tOther4 string `xml:\"http://golang.org/2/json/ other,attr,omitempty\"`\n}\n\nvar tableAttrs = []struct {\n\txml string\n\ttab TableAttrs\n\tns  string\n}{\n\t{\n\t\txml: `<TableAttrs xmlns:f=\"http://www.w3schools.com/furniture\" xmlns:h=\"http://www.w3.org/TR/html4/\"><TAttr ` +\n\t\t\t`h:table=\"hello\" f:table=\"world\" ` +\n\t\t\t`/></TableAttrs>`,\n\t\ttab: TableAttrs{TAttr{HTable: \"hello\", FTable: \"world\"}},\n\t},\n\t{\n\t\txml: `<TableAttrs><TAttr xmlns:f=\"http://www.w3schools.com/furniture\" xmlns:h=\"http://www.w3.org/TR/html4/\" ` +\n\t\t\t`h:table=\"hello\" f:table=\"world\" ` +\n\t\t\t`/></TableAttrs>`,\n\t\ttab: TableAttrs{TAttr{HTable: \"hello\", FTable: \"world\"}},\n\t},\n\t{\n\t\txml: `<TableAttrs><TAttr ` +\n\t\t\t`h:table=\"hello\" f:table=\"world\" xmlns:f=\"http://www.w3schools.com/furniture\" xmlns:h=\"http://www.w3.org/TR/html4/\" ` +\n\t\t\t`/></TableAttrs>`,\n\t\ttab: TableAttrs{TAttr{HTable: \"hello\", FTable: \"world\"}},\n\t},\n\t{\n\t\t// Default space does not apply to attribute names.\n\t\txml: `<TableAttrs xmlns=\"http://www.w3schools.com/furniture\" xmlns:h=\"http://www.w3.org/TR/html4/\"><TAttr ` +\n\t\t\t`h:table=\"hello\" table=\"world\" ` +\n\t\t\t`/></TableAttrs>`,\n\t\ttab: TableAttrs{TAttr{HTable: \"hello\", FTable: \"\"}},\n\t},\n\t{\n\t\t// Default space does not apply to attribute names.\n\t\txml: `<TableAttrs xmlns:f=\"http://www.w3schools.com/furniture\"><TAttr xmlns=\"http://www.w3.org/TR/html4/\" ` +\n\t\t\t`table=\"hello\" f:table=\"world\" ` +\n\t\t\t`/></TableAttrs>`,\n\t\ttab: TableAttrs{TAttr{HTable: \"\", FTable: \"world\"}},\n\t},\n\t{\n\t\txml: `<TableAttrs><TAttr ` +\n\t\t\t`table=\"bogus\" ` +\n\t\t\t`/></TableAttrs>`,\n\t\ttab: TableAttrs{},\n\t},\n\t{\n\t\t// Default space does not apply to attribute names.\n\t\txml: `<TableAttrs xmlns:h=\"http://www.w3.org/TR/html4/\"><TAttr ` +\n\t\t\t`h:table=\"hello\" table=\"world\" ` +\n\t\t\t`/></TableAttrs>`,\n\t\ttab: TableAttrs{TAttr{HTable: \"hello\", FTable: \"\"}},\n\t\tns:  \"http://www.w3schools.com/furniture\",\n\t},\n\t{\n\t\t// Default space does not apply to attribute names.\n\t\txml: `<TableAttrs xmlns:f=\"http://www.w3schools.com/furniture\"><TAttr ` +\n\t\t\t`table=\"hello\" f:table=\"world\" ` +\n\t\t\t`/></TableAttrs>`,\n\t\ttab: TableAttrs{TAttr{HTable: \"\", FTable: \"world\"}},\n\t\tns:  \"http://www.w3.org/TR/html4/\",\n\t},\n\t{\n\t\txml: `<TableAttrs><TAttr ` +\n\t\t\t`table=\"bogus\" ` +\n\t\t\t`/></TableAttrs>`,\n\t\ttab: TableAttrs{},\n\t\tns:  \"something else entirely\",\n\t},\n}\n\nfunc TestUnmarshalNSAttr(t *testing.T) {\n\tfor i, tt := range tableAttrs {\n\t\tvar dst TableAttrs\n\t\tvar err error\n\t\tif tt.ns != \"\" {\n\t\t\td := NewDecoder(strings.NewReader(tt.xml))\n\t\t\td.DefaultSpace = tt.ns\n\t\t\terr = d.Decode(&dst)\n\t\t} else {\n\t\t\terr = Unmarshal([]byte(tt.xml), &dst)\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Errorf(\"#%d: Unmarshal: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\twant := tt.tab\n\t\tif dst != want {\n\t\t\tt.Errorf(\"#%d: dst=%+v, want %+v\", i, dst, want)\n\t\t}\n\t}\n}\n\nfunc TestMarshalNSAttr(t *testing.T) {\n\tsrc := TableAttrs{TAttr{\"hello\", \"world\", \"en_US\", \"other1\", \"other2\", \"other3\", \"other4\"}}\n\tdata, err := Marshal(&src)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal: %v\", err)\n\t}\n\twant := `<TableAttrs><TAttr xmlns:json_1=\"http://golang.org/2/json/\" xmlns:json=\"http://golang.org/json/\" xmlns:_xmlfoo=\"http://golang.org/xmlfoo/\" xmlns:_xml=\"http://golang.org/xml/\" xmlns:furniture=\"http://www.w3schools.com/furniture\" xmlns:html4=\"http://www.w3.org/TR/html4/\" html4:table=\"hello\" furniture:table=\"world\" xml:lang=\"en_US\" _xml:other=\"other1\" _xmlfoo:other=\"other2\" json:other=\"other3\" json_1:other=\"other4\"></TAttr></TableAttrs>`\n\tstr := string(data)\n\tif str != want {\n\t\tt.Errorf(\"Marshal:\\nhave: %#q\\nwant: %#q\\n\", str, want)\n\t}\n\n\tvar dst TableAttrs\n\tif err := Unmarshal(data, &dst); err != nil {\n\t\tt.Errorf(\"Unmarshal: %v\", err)\n\t}\n\n\tif dst != src {\n\t\tt.Errorf(\"Unmarshal = %q, want %q\", dst, src)\n\t}\n}\n\ntype MyCharData struct {\n\tbody string\n}\n\nfunc (m *MyCharData) UnmarshalXML(d *Decoder, start StartElement) error {\n\tfor {\n\t\tt, err := d.Token()\n\t\tif err == io.EOF { // found end of element\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif char, ok := t.(CharData); ok {\n\t\t\tm.body += string(char)\n\t\t}\n\t}\n\treturn nil\n}\n\nvar _ Unmarshaler = (*MyCharData)(nil)\n\nfunc (m *MyCharData) UnmarshalXMLAttr(attr Attr) error {\n\tpanic(\"must not call\")\n}\n\ntype MyAttr struct {\n\tattr string\n}\n\nfunc (m *MyAttr) UnmarshalXMLAttr(attr Attr) error {\n\tm.attr = attr.Value\n\treturn nil\n}\n\nvar _ UnmarshalerAttr = (*MyAttr)(nil)\n\ntype MyStruct struct {\n\tData *MyCharData\n\tAttr *MyAttr `xml:\",attr\"`\n\n\tData2 MyCharData\n\tAttr2 MyAttr `xml:\",attr\"`\n}\n\nfunc TestUnmarshaler(t *testing.T) {\n\txml := `<?xml version=\"1.0\" encoding=\"utf-8\"?>\n\t\t<MyStruct Attr=\"attr1\" Attr2=\"attr2\">\n\t\t<Data>hello <!-- comment -->world</Data>\n\t\t<Data2>howdy <!-- comment -->world</Data2>\n\t\t</MyStruct>\n\t`\n\n\tvar m MyStruct\n\tif err := Unmarshal([]byte(xml), &m); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif m.Data == nil || m.Attr == nil || m.Data.body != \"hello world\" || m.Attr.attr != \"attr1\" || m.Data2.body != \"howdy world\" || m.Attr2.attr != \"attr2\" {\n\t\tt.Errorf(\"m=%#+v\\n\", m)\n\t}\n}\n\ntype Pea struct {\n\tCotelydon string\n}\n\ntype Pod struct {\n\tPea interface{} `xml:\"Pea\"`\n}\n\n// https://golang.org/issue/6836\nfunc TestUnmarshalIntoInterface(t *testing.T) {\n\tpod := new(Pod)\n\tpod.Pea = new(Pea)\n\txml := `<Pod><Pea><Cotelydon>Green stuff</Cotelydon></Pea></Pod>`\n\terr := Unmarshal([]byte(xml), pod)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to unmarshal %q: %v\", xml, err)\n\t}\n\tpea, ok := pod.Pea.(*Pea)\n\tif !ok {\n\t\tt.Fatalf(\"unmarshalled into wrong type: have %T want *Pea\", pod.Pea)\n\t}\n\thave, want := pea.Cotelydon, \"Green stuff\"\n\tif have != want {\n\t\tt.Errorf(\"failed to unmarshal into interface, have %q want %q\", have, want)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage xml\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n)\n\n// typeInfo holds details for the xml representation of a type.\ntype typeInfo struct {\n\txmlname *fieldInfo\n\tfields  []fieldInfo\n}\n\n// fieldInfo holds details for the xml representation of a single field.\ntype fieldInfo struct {\n\tidx     []int\n\tname    string\n\txmlns   string\n\tflags   fieldFlags\n\tparents []string\n}\n\ntype fieldFlags int\n\nconst (\n\tfElement fieldFlags = 1 << iota\n\tfAttr\n\tfCharData\n\tfInnerXml\n\tfComment\n\tfAny\n\n\tfOmitEmpty\n\n\tfMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny\n)\n\nvar tinfoMap = make(map[reflect.Type]*typeInfo)\nvar tinfoLock sync.RWMutex\n\nvar nameType = reflect.TypeOf(Name{})\n\n// getTypeInfo returns the typeInfo structure with details necessary\n// for marshalling and unmarshalling typ.\nfunc getTypeInfo(typ reflect.Type) (*typeInfo, error) {\n\ttinfoLock.RLock()\n\ttinfo, ok := tinfoMap[typ]\n\ttinfoLock.RUnlock()\n\tif ok {\n\t\treturn tinfo, nil\n\t}\n\ttinfo = &typeInfo{}\n\tif typ.Kind() == reflect.Struct && typ != nameType {\n\t\tn := typ.NumField()\n\t\tfor i := 0; i < n; i++ {\n\t\t\tf := typ.Field(i)\n\t\t\tif f.PkgPath != \"\" || f.Tag.Get(\"xml\") == \"-\" {\n\t\t\t\tcontinue // Private field\n\t\t\t}\n\n\t\t\t// For embedded structs, embed its fields.\n\t\t\tif f.Anonymous {\n\t\t\t\tt := f.Type\n\t\t\t\tif t.Kind() == reflect.Ptr {\n\t\t\t\t\tt = t.Elem()\n\t\t\t\t}\n\t\t\t\tif t.Kind() == reflect.Struct {\n\t\t\t\t\tinner, err := getTypeInfo(t)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\tif tinfo.xmlname == nil {\n\t\t\t\t\t\ttinfo.xmlname = inner.xmlname\n\t\t\t\t\t}\n\t\t\t\t\tfor _, finfo := range inner.fields {\n\t\t\t\t\t\tfinfo.idx = append([]int{i}, finfo.idx...)\n\t\t\t\t\t\tif err := addFieldInfo(typ, tinfo, &finfo); err != nil {\n\t\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfinfo, err := structFieldInfo(typ, &f)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif f.Name == \"XMLName\" {\n\t\t\t\ttinfo.xmlname = finfo\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Add the field if it doesn't conflict with other fields.\n\t\t\tif err := addFieldInfo(typ, tinfo, finfo); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\ttinfoLock.Lock()\n\ttinfoMap[typ] = tinfo\n\ttinfoLock.Unlock()\n\treturn tinfo, nil\n}\n\n// structFieldInfo builds and returns a fieldInfo for f.\nfunc structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) {\n\tfinfo := &fieldInfo{idx: f.Index}\n\n\t// Split the tag from the xml namespace if necessary.\n\ttag := f.Tag.Get(\"xml\")\n\tif i := strings.Index(tag, \" \"); i >= 0 {\n\t\tfinfo.xmlns, tag = tag[:i], tag[i+1:]\n\t}\n\n\t// Parse flags.\n\ttokens := strings.Split(tag, \",\")\n\tif len(tokens) == 1 {\n\t\tfinfo.flags = fElement\n\t} else {\n\t\ttag = tokens[0]\n\t\tfor _, flag := range tokens[1:] {\n\t\t\tswitch flag {\n\t\t\tcase \"attr\":\n\t\t\t\tfinfo.flags |= fAttr\n\t\t\tcase \"chardata\":\n\t\t\t\tfinfo.flags |= fCharData\n\t\t\tcase \"innerxml\":\n\t\t\t\tfinfo.flags |= fInnerXml\n\t\t\tcase \"comment\":\n\t\t\t\tfinfo.flags |= fComment\n\t\t\tcase \"any\":\n\t\t\t\tfinfo.flags |= fAny\n\t\t\tcase \"omitempty\":\n\t\t\t\tfinfo.flags |= fOmitEmpty\n\t\t\t}\n\t\t}\n\n\t\t// Validate the flags used.\n\t\tvalid := true\n\t\tswitch mode := finfo.flags & fMode; mode {\n\t\tcase 0:\n\t\t\tfinfo.flags |= fElement\n\t\tcase fAttr, fCharData, fInnerXml, fComment, fAny:\n\t\t\tif f.Name == \"XMLName\" || tag != \"\" && mode != fAttr {\n\t\t\t\tvalid = false\n\t\t\t}\n\t\tdefault:\n\t\t\t// This will also catch multiple modes in a single field.\n\t\t\tvalid = false\n\t\t}\n\t\tif finfo.flags&fMode == fAny {\n\t\t\tfinfo.flags |= fElement\n\t\t}\n\t\tif finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 {\n\t\t\tvalid = false\n\t\t}\n\t\tif !valid {\n\t\t\treturn nil, fmt.Errorf(\"xml: invalid tag in field %s of type %s: %q\",\n\t\t\t\tf.Name, typ, f.Tag.Get(\"xml\"))\n\t\t}\n\t}\n\n\t// Use of xmlns without a name is not allowed.\n\tif finfo.xmlns != \"\" && tag == \"\" {\n\t\treturn nil, fmt.Errorf(\"xml: namespace without name in field %s of type %s: %q\",\n\t\t\tf.Name, typ, f.Tag.Get(\"xml\"))\n\t}\n\n\tif f.Name == \"XMLName\" {\n\t\t// The XMLName field records the XML element name. Don't\n\t\t// process it as usual because its name should default to\n\t\t// empty rather than to the field name.\n\t\tfinfo.name = tag\n\t\treturn finfo, nil\n\t}\n\n\tif tag == \"\" {\n\t\t// If the name part of the tag is completely empty, get\n\t\t// default from XMLName of underlying struct if feasible,\n\t\t// or field name otherwise.\n\t\tif xmlname := lookupXMLName(f.Type); xmlname != nil {\n\t\t\tfinfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name\n\t\t} else {\n\t\t\tfinfo.name = f.Name\n\t\t}\n\t\treturn finfo, nil\n\t}\n\n\tif finfo.xmlns == \"\" && finfo.flags&fAttr == 0 {\n\t\t// If it's an element no namespace specified, get the default\n\t\t// from the XMLName of enclosing struct if possible.\n\t\tif xmlname := lookupXMLName(typ); xmlname != nil {\n\t\t\tfinfo.xmlns = xmlname.xmlns\n\t\t}\n\t}\n\n\t// Prepare field name and parents.\n\tparents := strings.Split(tag, \">\")\n\tif parents[0] == \"\" {\n\t\tparents[0] = f.Name\n\t}\n\tif parents[len(parents)-1] == \"\" {\n\t\treturn nil, fmt.Errorf(\"xml: trailing '>' in field %s of type %s\", f.Name, typ)\n\t}\n\tfinfo.name = parents[len(parents)-1]\n\tif len(parents) > 1 {\n\t\tif (finfo.flags & fElement) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"xml: %s chain not valid with %s flag\", tag, strings.Join(tokens[1:], \",\"))\n\t\t}\n\t\tfinfo.parents = parents[:len(parents)-1]\n\t}\n\n\t// If the field type has an XMLName field, the names must match\n\t// so that the behavior of both marshalling and unmarshalling\n\t// is straightforward and unambiguous.\n\tif finfo.flags&fElement != 0 {\n\t\tftyp := f.Type\n\t\txmlname := lookupXMLName(ftyp)\n\t\tif xmlname != nil && xmlname.name != finfo.name {\n\t\t\treturn nil, fmt.Errorf(\"xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName\",\n\t\t\t\tfinfo.name, typ, f.Name, xmlname.name, ftyp)\n\t\t}\n\t}\n\treturn finfo, nil\n}\n\n// lookupXMLName returns the fieldInfo for typ's XMLName field\n// in case it exists and has a valid xml field tag, otherwise\n// it returns nil.\nfunc lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) {\n\tfor typ.Kind() == reflect.Ptr {\n\t\ttyp = typ.Elem()\n\t}\n\tif typ.Kind() != reflect.Struct {\n\t\treturn nil\n\t}\n\tfor i, n := 0, typ.NumField(); i < n; i++ {\n\t\tf := typ.Field(i)\n\t\tif f.Name != \"XMLName\" {\n\t\t\tcontinue\n\t\t}\n\t\tfinfo, err := structFieldInfo(typ, &f)\n\t\tif finfo.name != \"\" && err == nil {\n\t\t\treturn finfo\n\t\t}\n\t\t// Also consider errors as a non-existent field tag\n\t\t// and let getTypeInfo itself report the error.\n\t\tbreak\n\t}\n\treturn nil\n}\n\nfunc min(a, b int) int {\n\tif a <= b {\n\t\treturn a\n\t}\n\treturn b\n}\n\n// addFieldInfo adds finfo to tinfo.fields if there are no\n// conflicts, or if conflicts arise from previous fields that were\n// obtained from deeper embedded structures than finfo. In the latter\n// case, the conflicting entries are dropped.\n// A conflict occurs when the path (parent + name) to a field is\n// itself a prefix of another path, or when two paths match exactly.\n// It is okay for field paths to share a common, shorter prefix.\nfunc addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error {\n\tvar conflicts []int\nLoop:\n\t// First, figure all conflicts. Most working code will have none.\n\tfor i := range tinfo.fields {\n\t\toldf := &tinfo.fields[i]\n\t\tif oldf.flags&fMode != newf.flags&fMode {\n\t\t\tcontinue\n\t\t}\n\t\tif oldf.xmlns != \"\" && newf.xmlns != \"\" && oldf.xmlns != newf.xmlns {\n\t\t\tcontinue\n\t\t}\n\t\tminl := min(len(newf.parents), len(oldf.parents))\n\t\tfor p := 0; p < minl; p++ {\n\t\t\tif oldf.parents[p] != newf.parents[p] {\n\t\t\t\tcontinue Loop\n\t\t\t}\n\t\t}\n\t\tif len(oldf.parents) > len(newf.parents) {\n\t\t\tif oldf.parents[len(newf.parents)] == newf.name {\n\t\t\t\tconflicts = append(conflicts, i)\n\t\t\t}\n\t\t} else if len(oldf.parents) < len(newf.parents) {\n\t\t\tif newf.parents[len(oldf.parents)] == oldf.name {\n\t\t\t\tconflicts = append(conflicts, i)\n\t\t\t}\n\t\t} else {\n\t\t\tif newf.name == oldf.name {\n\t\t\t\tconflicts = append(conflicts, i)\n\t\t\t}\n\t\t}\n\t}\n\t// Without conflicts, add the new field and return.\n\tif conflicts == nil {\n\t\ttinfo.fields = append(tinfo.fields, *newf)\n\t\treturn nil\n\t}\n\n\t// If any conflict is shallower, ignore the new field.\n\t// This matches the Go field resolution on embedding.\n\tfor _, i := range conflicts {\n\t\tif len(tinfo.fields[i].idx) < len(newf.idx) {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// Otherwise, if any of them is at the same depth level, it's an error.\n\tfor _, i := range conflicts {\n\t\toldf := &tinfo.fields[i]\n\t\tif len(oldf.idx) == len(newf.idx) {\n\t\t\tf1 := typ.FieldByIndex(oldf.idx)\n\t\t\tf2 := typ.FieldByIndex(newf.idx)\n\t\t\treturn &TagPathError{typ, f1.Name, f1.Tag.Get(\"xml\"), f2.Name, f2.Tag.Get(\"xml\")}\n\t\t}\n\t}\n\n\t// Otherwise, the new field is shallower, and thus takes precedence,\n\t// so drop the conflicting fields from tinfo and append the new one.\n\tfor c := len(conflicts) - 1; c >= 0; c-- {\n\t\ti := conflicts[c]\n\t\tcopy(tinfo.fields[i:], tinfo.fields[i+1:])\n\t\ttinfo.fields = tinfo.fields[:len(tinfo.fields)-1]\n\t}\n\ttinfo.fields = append(tinfo.fields, *newf)\n\treturn nil\n}\n\n// A TagPathError represents an error in the unmarshalling process\n// caused by the use of field tags with conflicting paths.\ntype TagPathError struct {\n\tStruct       reflect.Type\n\tField1, Tag1 string\n\tField2, Tag2 string\n}\n\nfunc (e *TagPathError) Error() string {\n\treturn fmt.Sprintf(\"%s field %q with tag %q conflicts with field %q with tag %q\", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2)\n}\n\n// value returns v's field value corresponding to finfo.\n// It's equivalent to v.FieldByIndex(finfo.idx), but initializes\n// and dereferences pointers as necessary.\nfunc (finfo *fieldInfo) value(v reflect.Value) reflect.Value {\n\tfor i, x := range finfo.idx {\n\t\tif i > 0 {\n\t\t\tt := v.Type()\n\t\t\tif t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {\n\t\t\t\tif v.IsNil() {\n\t\t\t\t\tv.Set(reflect.New(v.Type().Elem()))\n\t\t\t\t}\n\t\t\t\tv = v.Elem()\n\t\t\t}\n\t\t}\n\t\tv = v.Field(x)\n\t}\n\treturn v\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/internal/xml/xml.go",
    "content": "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package xml implements a simple XML 1.0 parser that\n// understands XML name spaces.\npackage xml\n\n// References:\n//    Annotated XML spec: http://www.xml.com/axml/testaxml.htm\n//    XML name spaces: http://www.w3.org/TR/REC-xml-names/\n\n// TODO(rsc):\n//\tTest error handling.\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n)\n\n// A SyntaxError represents a syntax error in the XML input stream.\ntype SyntaxError struct {\n\tMsg  string\n\tLine int\n}\n\nfunc (e *SyntaxError) Error() string {\n\treturn \"XML syntax error on line \" + strconv.Itoa(e.Line) + \": \" + e.Msg\n}\n\n// A Name represents an XML name (Local) annotated with a name space\n// identifier (Space). In tokens returned by Decoder.Token, the Space\n// identifier is given as a canonical URL, not the short prefix used in\n// the document being parsed.\n//\n// As a special case, XML namespace declarations will use the literal\n// string \"xmlns\" for the Space field instead of the fully resolved URL.\n// See Encoder.EncodeToken for more information on namespace encoding\n// behaviour.\ntype Name struct {\n\tSpace, Local string\n}\n\n// isNamespace reports whether the name is a namespace-defining name.\nfunc (name Name) isNamespace() bool {\n\treturn name.Local == \"xmlns\" || name.Space == \"xmlns\"\n}\n\n// An Attr represents an attribute in an XML element (Name=Value).\ntype Attr struct {\n\tName  Name\n\tValue string\n}\n\n// A Token is an interface holding one of the token types:\n// StartElement, EndElement, CharData, Comment, ProcInst, or Directive.\ntype Token interface{}\n\n// A StartElement represents an XML start element.\ntype StartElement struct {\n\tName Name\n\tAttr []Attr\n}\n\nfunc (e StartElement) Copy() StartElement {\n\tattrs := make([]Attr, len(e.Attr))\n\tcopy(attrs, e.Attr)\n\te.Attr = attrs\n\treturn e\n}\n\n// End returns the corresponding XML end element.\nfunc (e StartElement) End() EndElement {\n\treturn EndElement{e.Name}\n}\n\n// setDefaultNamespace sets the namespace of the element\n// as the default for all elements contained within it.\nfunc (e *StartElement) setDefaultNamespace() {\n\tif e.Name.Space == \"\" {\n\t\t// If there's no namespace on the element, don't\n\t\t// set the default. Strictly speaking this might be wrong, as\n\t\t// we can't tell if the element had no namespace set\n\t\t// or was just using the default namespace.\n\t\treturn\n\t}\n\t// Don't add a default name space if there's already one set.\n\tfor _, attr := range e.Attr {\n\t\tif attr.Name.Space == \"\" && attr.Name.Local == \"xmlns\" {\n\t\t\treturn\n\t\t}\n\t}\n\te.Attr = append(e.Attr, Attr{\n\t\tName: Name{\n\t\t\tLocal: \"xmlns\",\n\t\t},\n\t\tValue: e.Name.Space,\n\t})\n}\n\n// An EndElement represents an XML end element.\ntype EndElement struct {\n\tName Name\n}\n\n// A CharData represents XML character data (raw text),\n// in which XML escape sequences have been replaced by\n// the characters they represent.\ntype CharData []byte\n\nfunc makeCopy(b []byte) []byte {\n\tb1 := make([]byte, len(b))\n\tcopy(b1, b)\n\treturn b1\n}\n\nfunc (c CharData) Copy() CharData { return CharData(makeCopy(c)) }\n\n// A Comment represents an XML comment of the form <!--comment-->.\n// The bytes do not include the <!-- and --> comment markers.\ntype Comment []byte\n\nfunc (c Comment) Copy() Comment { return Comment(makeCopy(c)) }\n\n// A ProcInst represents an XML processing instruction of the form <?target inst?>\ntype ProcInst struct {\n\tTarget string\n\tInst   []byte\n}\n\nfunc (p ProcInst) Copy() ProcInst {\n\tp.Inst = makeCopy(p.Inst)\n\treturn p\n}\n\n// A Directive represents an XML directive of the form <!text>.\n// The bytes do not include the <! and > markers.\ntype Directive []byte\n\nfunc (d Directive) Copy() Directive { return Directive(makeCopy(d)) }\n\n// CopyToken returns a copy of a Token.\nfunc CopyToken(t Token) Token {\n\tswitch v := t.(type) {\n\tcase CharData:\n\t\treturn v.Copy()\n\tcase Comment:\n\t\treturn v.Copy()\n\tcase Directive:\n\t\treturn v.Copy()\n\tcase ProcInst:\n\t\treturn v.Copy()\n\tcase StartElement:\n\t\treturn v.Copy()\n\t}\n\treturn t\n}\n\n// A Decoder represents an XML parser reading a particular input stream.\n// The parser assumes that its input is encoded in UTF-8.\ntype Decoder struct {\n\t// Strict defaults to true, enforcing the requirements\n\t// of the XML specification.\n\t// If set to false, the parser allows input containing common\n\t// mistakes:\n\t//\t* If an element is missing an end tag, the parser invents\n\t//\t  end tags as necessary to keep the return values from Token\n\t//\t  properly balanced.\n\t//\t* In attribute values and character data, unknown or malformed\n\t//\t  character entities (sequences beginning with &) are left alone.\n\t//\n\t// Setting:\n\t//\n\t//\td.Strict = false;\n\t//\td.AutoClose = HTMLAutoClose;\n\t//\td.Entity = HTMLEntity\n\t//\n\t// creates a parser that can handle typical HTML.\n\t//\n\t// Strict mode does not enforce the requirements of the XML name spaces TR.\n\t// In particular it does not reject name space tags using undefined prefixes.\n\t// Such tags are recorded with the unknown prefix as the name space URL.\n\tStrict bool\n\n\t// When Strict == false, AutoClose indicates a set of elements to\n\t// consider closed immediately after they are opened, regardless\n\t// of whether an end element is present.\n\tAutoClose []string\n\n\t// Entity can be used to map non-standard entity names to string replacements.\n\t// The parser behaves as if these standard mappings are present in the map,\n\t// regardless of the actual map content:\n\t//\n\t//\t\"lt\": \"<\",\n\t//\t\"gt\": \">\",\n\t//\t\"amp\": \"&\",\n\t//\t\"apos\": \"'\",\n\t//\t\"quot\": `\"`,\n\tEntity map[string]string\n\n\t// CharsetReader, if non-nil, defines a function to generate\n\t// charset-conversion readers, converting from the provided\n\t// non-UTF-8 charset into UTF-8. If CharsetReader is nil or\n\t// returns an error, parsing stops with an error. One of the\n\t// the CharsetReader's result values must be non-nil.\n\tCharsetReader func(charset string, input io.Reader) (io.Reader, error)\n\n\t// DefaultSpace sets the default name space used for unadorned tags,\n\t// as if the entire XML stream were wrapped in an element containing\n\t// the attribute xmlns=\"DefaultSpace\".\n\tDefaultSpace string\n\n\tr              io.ByteReader\n\tbuf            bytes.Buffer\n\tsaved          *bytes.Buffer\n\tstk            *stack\n\tfree           *stack\n\tneedClose      bool\n\ttoClose        Name\n\tnextToken      Token\n\tnextByte       int\n\tns             map[string]string\n\terr            error\n\tline           int\n\toffset         int64\n\tunmarshalDepth int\n}\n\n// NewDecoder creates a new XML parser reading from r.\n// If r does not implement io.ByteReader, NewDecoder will\n// do its own buffering.\nfunc NewDecoder(r io.Reader) *Decoder {\n\td := &Decoder{\n\t\tns:       make(map[string]string),\n\t\tnextByte: -1,\n\t\tline:     1,\n\t\tStrict:   true,\n\t}\n\td.switchToReader(r)\n\treturn d\n}\n\n// Token returns the next XML token in the input stream.\n// At the end of the input stream, Token returns nil, io.EOF.\n//\n// Slices of bytes in the returned token data refer to the\n// parser's internal buffer and remain valid only until the next\n// call to Token. To acquire a copy of the bytes, call CopyToken\n// or the token's Copy method.\n//\n// Token expands self-closing elements such as <br/>\n// into separate start and end elements returned by successive calls.\n//\n// Token guarantees that the StartElement and EndElement\n// tokens it returns are properly nested and matched:\n// if Token encounters an unexpected end element,\n// it will return an error.\n//\n// Token implements XML name spaces as described by\n// http://www.w3.org/TR/REC-xml-names/.  Each of the\n// Name structures contained in the Token has the Space\n// set to the URL identifying its name space when known.\n// If Token encounters an unrecognized name space prefix,\n// it uses the prefix as the Space rather than report an error.\nfunc (d *Decoder) Token() (t Token, err error) {\n\tif d.stk != nil && d.stk.kind == stkEOF {\n\t\terr = io.EOF\n\t\treturn\n\t}\n\tif d.nextToken != nil {\n\t\tt = d.nextToken\n\t\td.nextToken = nil\n\t} else if t, err = d.rawToken(); err != nil {\n\t\treturn\n\t}\n\n\tif !d.Strict {\n\t\tif t1, ok := d.autoClose(t); ok {\n\t\t\td.nextToken = t\n\t\t\tt = t1\n\t\t}\n\t}\n\tswitch t1 := t.(type) {\n\tcase StartElement:\n\t\t// In XML name spaces, the translations listed in the\n\t\t// attributes apply to the element name and\n\t\t// to the other attribute names, so process\n\t\t// the translations first.\n\t\tfor _, a := range t1.Attr {\n\t\t\tif a.Name.Space == \"xmlns\" {\n\t\t\t\tv, ok := d.ns[a.Name.Local]\n\t\t\t\td.pushNs(a.Name.Local, v, ok)\n\t\t\t\td.ns[a.Name.Local] = a.Value\n\t\t\t}\n\t\t\tif a.Name.Space == \"\" && a.Name.Local == \"xmlns\" {\n\t\t\t\t// Default space for untagged names\n\t\t\t\tv, ok := d.ns[\"\"]\n\t\t\t\td.pushNs(\"\", v, ok)\n\t\t\t\td.ns[\"\"] = a.Value\n\t\t\t}\n\t\t}\n\n\t\td.translate(&t1.Name, true)\n\t\tfor i := range t1.Attr {\n\t\t\td.translate(&t1.Attr[i].Name, false)\n\t\t}\n\t\td.pushElement(t1.Name)\n\t\tt = t1\n\n\tcase EndElement:\n\t\td.translate(&t1.Name, true)\n\t\tif !d.popElement(&t1) {\n\t\t\treturn nil, d.err\n\t\t}\n\t\tt = t1\n\t}\n\treturn\n}\n\nconst xmlURL = \"http://www.w3.org/XML/1998/namespace\"\n\n// Apply name space translation to name n.\n// The default name space (for Space==\"\")\n// applies only to element names, not to attribute names.\nfunc (d *Decoder) translate(n *Name, isElementName bool) {\n\tswitch {\n\tcase n.Space == \"xmlns\":\n\t\treturn\n\tcase n.Space == \"\" && !isElementName:\n\t\treturn\n\tcase n.Space == \"xml\":\n\t\tn.Space = xmlURL\n\tcase n.Space == \"\" && n.Local == \"xmlns\":\n\t\treturn\n\t}\n\tif v, ok := d.ns[n.Space]; ok {\n\t\tn.Space = v\n\t} else if n.Space == \"\" {\n\t\tn.Space = d.DefaultSpace\n\t}\n}\n\nfunc (d *Decoder) switchToReader(r io.Reader) {\n\t// Get efficient byte at a time reader.\n\t// Assume that if reader has its own\n\t// ReadByte, it's efficient enough.\n\t// Otherwise, use bufio.\n\tif rb, ok := r.(io.ByteReader); ok {\n\t\td.r = rb\n\t} else {\n\t\td.r = bufio.NewReader(r)\n\t}\n}\n\n// Parsing state - stack holds old name space translations\n// and the current set of open elements. The translations to pop when\n// ending a given tag are *below* it on the stack, which is\n// more work but forced on us by XML.\ntype stack struct {\n\tnext *stack\n\tkind int\n\tname Name\n\tok   bool\n}\n\nconst (\n\tstkStart = iota\n\tstkNs\n\tstkEOF\n)\n\nfunc (d *Decoder) push(kind int) *stack {\n\ts := d.free\n\tif s != nil {\n\t\td.free = s.next\n\t} else {\n\t\ts = new(stack)\n\t}\n\ts.next = d.stk\n\ts.kind = kind\n\td.stk = s\n\treturn s\n}\n\nfunc (d *Decoder) pop() *stack {\n\ts := d.stk\n\tif s != nil {\n\t\td.stk = s.next\n\t\ts.next = d.free\n\t\td.free = s\n\t}\n\treturn s\n}\n\n// Record that after the current element is finished\n// (that element is already pushed on the stack)\n// Token should return EOF until popEOF is called.\nfunc (d *Decoder) pushEOF() {\n\t// Walk down stack to find Start.\n\t// It might not be the top, because there might be stkNs\n\t// entries above it.\n\tstart := d.stk\n\tfor start.kind != stkStart {\n\t\tstart = start.next\n\t}\n\t// The stkNs entries below a start are associated with that\n\t// element too; skip over them.\n\tfor start.next != nil && start.next.kind == stkNs {\n\t\tstart = start.next\n\t}\n\ts := d.free\n\tif s != nil {\n\t\td.free = s.next\n\t} else {\n\t\ts = new(stack)\n\t}\n\ts.kind = stkEOF\n\ts.next = start.next\n\tstart.next = s\n}\n\n// Undo a pushEOF.\n// The element must have been finished, so the EOF should be at the top of the stack.\nfunc (d *Decoder) popEOF() bool {\n\tif d.stk == nil || d.stk.kind != stkEOF {\n\t\treturn false\n\t}\n\td.pop()\n\treturn true\n}\n\n// Record that we are starting an element with the given name.\nfunc (d *Decoder) pushElement(name Name) {\n\ts := d.push(stkStart)\n\ts.name = name\n}\n\n// Record that we are changing the value of ns[local].\n// The old value is url, ok.\nfunc (d *Decoder) pushNs(local string, url string, ok bool) {\n\ts := d.push(stkNs)\n\ts.name.Local = local\n\ts.name.Space = url\n\ts.ok = ok\n}\n\n// Creates a SyntaxError with the current line number.\nfunc (d *Decoder) syntaxError(msg string) error {\n\treturn &SyntaxError{Msg: msg, Line: d.line}\n}\n\n// Record that we are ending an element with the given name.\n// The name must match the record at the top of the stack,\n// which must be a pushElement record.\n// After popping the element, apply any undo records from\n// the stack to restore the name translations that existed\n// before we saw this element.\nfunc (d *Decoder) popElement(t *EndElement) bool {\n\ts := d.pop()\n\tname := t.Name\n\tswitch {\n\tcase s == nil || s.kind != stkStart:\n\t\td.err = d.syntaxError(\"unexpected end element </\" + name.Local + \">\")\n\t\treturn false\n\tcase s.name.Local != name.Local:\n\t\tif !d.Strict {\n\t\t\td.needClose = true\n\t\t\td.toClose = t.Name\n\t\t\tt.Name = s.name\n\t\t\treturn true\n\t\t}\n\t\td.err = d.syntaxError(\"element <\" + s.name.Local + \"> closed by </\" + name.Local + \">\")\n\t\treturn false\n\tcase s.name.Space != name.Space:\n\t\td.err = d.syntaxError(\"element <\" + s.name.Local + \"> in space \" + s.name.Space +\n\t\t\t\"closed by </\" + name.Local + \"> in space \" + name.Space)\n\t\treturn false\n\t}\n\n\t// Pop stack until a Start or EOF is on the top, undoing the\n\t// translations that were associated with the element we just closed.\n\tfor d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF {\n\t\ts := d.pop()\n\t\tif s.ok {\n\t\t\td.ns[s.name.Local] = s.name.Space\n\t\t} else {\n\t\t\tdelete(d.ns, s.name.Local)\n\t\t}\n\t}\n\n\treturn true\n}\n\n// If the top element on the stack is autoclosing and\n// t is not the end tag, invent the end tag.\nfunc (d *Decoder) autoClose(t Token) (Token, bool) {\n\tif d.stk == nil || d.stk.kind != stkStart {\n\t\treturn nil, false\n\t}\n\tname := strings.ToLower(d.stk.name.Local)\n\tfor _, s := range d.AutoClose {\n\t\tif strings.ToLower(s) == name {\n\t\t\t// This one should be auto closed if t doesn't close it.\n\t\t\tet, ok := t.(EndElement)\n\t\t\tif !ok || et.Name.Local != name {\n\t\t\t\treturn EndElement{d.stk.name}, true\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil, false\n}\n\nvar errRawToken = errors.New(\"xml: cannot use RawToken from UnmarshalXML method\")\n\n// RawToken is like Token but does not verify that\n// start and end elements match and does not translate\n// name space prefixes to their corresponding URLs.\nfunc (d *Decoder) RawToken() (Token, error) {\n\tif d.unmarshalDepth > 0 {\n\t\treturn nil, errRawToken\n\t}\n\treturn d.rawToken()\n}\n\nfunc (d *Decoder) rawToken() (Token, error) {\n\tif d.err != nil {\n\t\treturn nil, d.err\n\t}\n\tif d.needClose {\n\t\t// The last element we read was self-closing and\n\t\t// we returned just the StartElement half.\n\t\t// Return the EndElement half now.\n\t\td.needClose = false\n\t\treturn EndElement{d.toClose}, nil\n\t}\n\n\tb, ok := d.getc()\n\tif !ok {\n\t\treturn nil, d.err\n\t}\n\n\tif b != '<' {\n\t\t// Text section.\n\t\td.ungetc(b)\n\t\tdata := d.text(-1, false)\n\t\tif data == nil {\n\t\t\treturn nil, d.err\n\t\t}\n\t\treturn CharData(data), nil\n\t}\n\n\tif b, ok = d.mustgetc(); !ok {\n\t\treturn nil, d.err\n\t}\n\tswitch b {\n\tcase '/':\n\t\t// </: End element\n\t\tvar name Name\n\t\tif name, ok = d.nsname(); !ok {\n\t\t\tif d.err == nil {\n\t\t\t\td.err = d.syntaxError(\"expected element name after </\")\n\t\t\t}\n\t\t\treturn nil, d.err\n\t\t}\n\t\td.space()\n\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\treturn nil, d.err\n\t\t}\n\t\tif b != '>' {\n\t\t\td.err = d.syntaxError(\"invalid characters between </\" + name.Local + \" and >\")\n\t\t\treturn nil, d.err\n\t\t}\n\t\treturn EndElement{name}, nil\n\n\tcase '?':\n\t\t// <?: Processing instruction.\n\t\tvar target string\n\t\tif target, ok = d.name(); !ok {\n\t\t\tif d.err == nil {\n\t\t\t\td.err = d.syntaxError(\"expected target name after <?\")\n\t\t\t}\n\t\t\treturn nil, d.err\n\t\t}\n\t\td.space()\n\t\td.buf.Reset()\n\t\tvar b0 byte\n\t\tfor {\n\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\treturn nil, d.err\n\t\t\t}\n\t\t\td.buf.WriteByte(b)\n\t\t\tif b0 == '?' && b == '>' {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tb0 = b\n\t\t}\n\t\tdata := d.buf.Bytes()\n\t\tdata = data[0 : len(data)-2] // chop ?>\n\n\t\tif target == \"xml\" {\n\t\t\tcontent := string(data)\n\t\t\tver := procInst(\"version\", content)\n\t\t\tif ver != \"\" && ver != \"1.0\" {\n\t\t\t\td.err = fmt.Errorf(\"xml: unsupported version %q; only version 1.0 is supported\", ver)\n\t\t\t\treturn nil, d.err\n\t\t\t}\n\t\t\tenc := procInst(\"encoding\", content)\n\t\t\tif enc != \"\" && enc != \"utf-8\" && enc != \"UTF-8\" {\n\t\t\t\tif d.CharsetReader == nil {\n\t\t\t\t\td.err = fmt.Errorf(\"xml: encoding %q declared but Decoder.CharsetReader is nil\", enc)\n\t\t\t\t\treturn nil, d.err\n\t\t\t\t}\n\t\t\t\tnewr, err := d.CharsetReader(enc, d.r.(io.Reader))\n\t\t\t\tif err != nil {\n\t\t\t\t\td.err = fmt.Errorf(\"xml: opening charset %q: %v\", enc, err)\n\t\t\t\t\treturn nil, d.err\n\t\t\t\t}\n\t\t\t\tif newr == nil {\n\t\t\t\t\tpanic(\"CharsetReader returned a nil Reader for charset \" + enc)\n\t\t\t\t}\n\t\t\t\td.switchToReader(newr)\n\t\t\t}\n\t\t}\n\t\treturn ProcInst{target, data}, nil\n\n\tcase '!':\n\t\t// <!: Maybe comment, maybe CDATA.\n\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\treturn nil, d.err\n\t\t}\n\t\tswitch b {\n\t\tcase '-': // <!-\n\t\t\t// Probably <!-- for a comment.\n\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\treturn nil, d.err\n\t\t\t}\n\t\t\tif b != '-' {\n\t\t\t\td.err = d.syntaxError(\"invalid sequence <!- not part of <!--\")\n\t\t\t\treturn nil, d.err\n\t\t\t}\n\t\t\t// Look for terminator.\n\t\t\td.buf.Reset()\n\t\t\tvar b0, b1 byte\n\t\t\tfor {\n\t\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\t\treturn nil, d.err\n\t\t\t\t}\n\t\t\t\td.buf.WriteByte(b)\n\t\t\t\tif b0 == '-' && b1 == '-' && b == '>' {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tb0, b1 = b1, b\n\t\t\t}\n\t\t\tdata := d.buf.Bytes()\n\t\t\tdata = data[0 : len(data)-3] // chop -->\n\t\t\treturn Comment(data), nil\n\n\t\tcase '[': // <![\n\t\t\t// Probably <![CDATA[.\n\t\t\tfor i := 0; i < 6; i++ {\n\t\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\t\treturn nil, d.err\n\t\t\t\t}\n\t\t\t\tif b != \"CDATA[\"[i] {\n\t\t\t\t\td.err = d.syntaxError(\"invalid <![ sequence\")\n\t\t\t\t\treturn nil, d.err\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Have <![CDATA[.  Read text until ]]>.\n\t\t\tdata := d.text(-1, true)\n\t\t\tif data == nil {\n\t\t\t\treturn nil, d.err\n\t\t\t}\n\t\t\treturn CharData(data), nil\n\t\t}\n\n\t\t// Probably a directive: <!DOCTYPE ...>, <!ENTITY ...>, etc.\n\t\t// We don't care, but accumulate for caller. Quoted angle\n\t\t// brackets do not count for nesting.\n\t\td.buf.Reset()\n\t\td.buf.WriteByte(b)\n\t\tinquote := uint8(0)\n\t\tdepth := 0\n\t\tfor {\n\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\treturn nil, d.err\n\t\t\t}\n\t\t\tif inquote == 0 && b == '>' && depth == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\tHandleB:\n\t\t\td.buf.WriteByte(b)\n\t\t\tswitch {\n\t\t\tcase b == inquote:\n\t\t\t\tinquote = 0\n\n\t\t\tcase inquote != 0:\n\t\t\t\t// in quotes, no special action\n\n\t\t\tcase b == '\\'' || b == '\"':\n\t\t\t\tinquote = b\n\n\t\t\tcase b == '>' && inquote == 0:\n\t\t\t\tdepth--\n\n\t\t\tcase b == '<' && inquote == 0:\n\t\t\t\t// Look for <!-- to begin comment.\n\t\t\t\ts := \"!--\"\n\t\t\t\tfor i := 0; i < len(s); i++ {\n\t\t\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\t\t\treturn nil, d.err\n\t\t\t\t\t}\n\t\t\t\t\tif b != s[i] {\n\t\t\t\t\t\tfor j := 0; j < i; j++ {\n\t\t\t\t\t\t\td.buf.WriteByte(s[j])\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdepth++\n\t\t\t\t\t\tgoto HandleB\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Remove < that was written above.\n\t\t\t\td.buf.Truncate(d.buf.Len() - 1)\n\n\t\t\t\t// Look for terminator.\n\t\t\t\tvar b0, b1 byte\n\t\t\t\tfor {\n\t\t\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\t\t\treturn nil, d.err\n\t\t\t\t\t}\n\t\t\t\t\tif b0 == '-' && b1 == '-' && b == '>' {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tb0, b1 = b1, b\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn Directive(d.buf.Bytes()), nil\n\t}\n\n\t// Must be an open element like <a href=\"foo\">\n\td.ungetc(b)\n\n\tvar (\n\t\tname  Name\n\t\tempty bool\n\t\tattr  []Attr\n\t)\n\tif name, ok = d.nsname(); !ok {\n\t\tif d.err == nil {\n\t\t\td.err = d.syntaxError(\"expected element name after <\")\n\t\t}\n\t\treturn nil, d.err\n\t}\n\n\tattr = []Attr{}\n\tfor {\n\t\td.space()\n\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\treturn nil, d.err\n\t\t}\n\t\tif b == '/' {\n\t\t\tempty = true\n\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\treturn nil, d.err\n\t\t\t}\n\t\t\tif b != '>' {\n\t\t\t\td.err = d.syntaxError(\"expected /> in element\")\n\t\t\t\treturn nil, d.err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tif b == '>' {\n\t\t\tbreak\n\t\t}\n\t\td.ungetc(b)\n\n\t\tn := len(attr)\n\t\tif n >= cap(attr) {\n\t\t\tnCap := 2 * cap(attr)\n\t\t\tif nCap == 0 {\n\t\t\t\tnCap = 4\n\t\t\t}\n\t\t\tnattr := make([]Attr, n, nCap)\n\t\t\tcopy(nattr, attr)\n\t\t\tattr = nattr\n\t\t}\n\t\tattr = attr[0 : n+1]\n\t\ta := &attr[n]\n\t\tif a.Name, ok = d.nsname(); !ok {\n\t\t\tif d.err == nil {\n\t\t\t\td.err = d.syntaxError(\"expected attribute name in element\")\n\t\t\t}\n\t\t\treturn nil, d.err\n\t\t}\n\t\td.space()\n\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\treturn nil, d.err\n\t\t}\n\t\tif b != '=' {\n\t\t\tif d.Strict {\n\t\t\t\td.err = d.syntaxError(\"attribute name without = in element\")\n\t\t\t\treturn nil, d.err\n\t\t\t} else {\n\t\t\t\td.ungetc(b)\n\t\t\t\ta.Value = a.Name.Local\n\t\t\t}\n\t\t} else {\n\t\t\td.space()\n\t\t\tdata := d.attrval()\n\t\t\tif data == nil {\n\t\t\t\treturn nil, d.err\n\t\t\t}\n\t\t\ta.Value = string(data)\n\t\t}\n\t}\n\tif empty {\n\t\td.needClose = true\n\t\td.toClose = name\n\t}\n\treturn StartElement{name, attr}, nil\n}\n\nfunc (d *Decoder) attrval() []byte {\n\tb, ok := d.mustgetc()\n\tif !ok {\n\t\treturn nil\n\t}\n\t// Handle quoted attribute values\n\tif b == '\"' || b == '\\'' {\n\t\treturn d.text(int(b), false)\n\t}\n\t// Handle unquoted attribute values for strict parsers\n\tif d.Strict {\n\t\td.err = d.syntaxError(\"unquoted or missing attribute value in element\")\n\t\treturn nil\n\t}\n\t// Handle unquoted attribute values for unstrict parsers\n\td.ungetc(b)\n\td.buf.Reset()\n\tfor {\n\t\tb, ok = d.mustgetc()\n\t\tif !ok {\n\t\t\treturn nil\n\t\t}\n\t\t// http://www.w3.org/TR/REC-html40/intro/sgmltut.html#h-3.2.2\n\t\tif 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' ||\n\t\t\t'0' <= b && b <= '9' || b == '_' || b == ':' || b == '-' {\n\t\t\td.buf.WriteByte(b)\n\t\t} else {\n\t\t\td.ungetc(b)\n\t\t\tbreak\n\t\t}\n\t}\n\treturn d.buf.Bytes()\n}\n\n// Skip spaces if any\nfunc (d *Decoder) space() {\n\tfor {\n\t\tb, ok := d.getc()\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tswitch b {\n\t\tcase ' ', '\\r', '\\n', '\\t':\n\t\tdefault:\n\t\t\td.ungetc(b)\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// Read a single byte.\n// If there is no byte to read, return ok==false\n// and leave the error in d.err.\n// Maintain line number.\nfunc (d *Decoder) getc() (b byte, ok bool) {\n\tif d.err != nil {\n\t\treturn 0, false\n\t}\n\tif d.nextByte >= 0 {\n\t\tb = byte(d.nextByte)\n\t\td.nextByte = -1\n\t} else {\n\t\tb, d.err = d.r.ReadByte()\n\t\tif d.err != nil {\n\t\t\treturn 0, false\n\t\t}\n\t\tif d.saved != nil {\n\t\t\td.saved.WriteByte(b)\n\t\t}\n\t}\n\tif b == '\\n' {\n\t\td.line++\n\t}\n\td.offset++\n\treturn b, true\n}\n\n// InputOffset returns the input stream byte offset of the current decoder position.\n// The offset gives the location of the end of the most recently returned token\n// and the beginning of the next token.\nfunc (d *Decoder) InputOffset() int64 {\n\treturn d.offset\n}\n\n// Return saved offset.\n// If we did ungetc (nextByte >= 0), have to back up one.\nfunc (d *Decoder) savedOffset() int {\n\tn := d.saved.Len()\n\tif d.nextByte >= 0 {\n\t\tn--\n\t}\n\treturn n\n}\n\n// Must read a single byte.\n// If there is no byte to read,\n// set d.err to SyntaxError(\"unexpected EOF\")\n// and return ok==false\nfunc (d *Decoder) mustgetc() (b byte, ok bool) {\n\tif b, ok = d.getc(); !ok {\n\t\tif d.err == io.EOF {\n\t\t\td.err = d.syntaxError(\"unexpected EOF\")\n\t\t}\n\t}\n\treturn\n}\n\n// Unread a single byte.\nfunc (d *Decoder) ungetc(b byte) {\n\tif b == '\\n' {\n\t\td.line--\n\t}\n\td.nextByte = int(b)\n\td.offset--\n}\n\nvar entity = map[string]int{\n\t\"lt\":   '<',\n\t\"gt\":   '>',\n\t\"amp\":  '&',\n\t\"apos\": '\\'',\n\t\"quot\": '\"',\n}\n\n// Read plain text section (XML calls it character data).\n// If quote >= 0, we are in a quoted string and need to find the matching quote.\n// If cdata == true, we are in a <![CDATA[ section and need to find ]]>.\n// On failure return nil and leave the error in d.err.\nfunc (d *Decoder) text(quote int, cdata bool) []byte {\n\tvar b0, b1 byte\n\tvar trunc int\n\td.buf.Reset()\nInput:\n\tfor {\n\t\tb, ok := d.getc()\n\t\tif !ok {\n\t\t\tif cdata {\n\t\t\t\tif d.err == io.EOF {\n\t\t\t\t\td.err = d.syntaxError(\"unexpected EOF in CDATA section\")\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tbreak Input\n\t\t}\n\n\t\t// <![CDATA[ section ends with ]]>.\n\t\t// It is an error for ]]> to appear in ordinary text.\n\t\tif b0 == ']' && b1 == ']' && b == '>' {\n\t\t\tif cdata {\n\t\t\t\ttrunc = 2\n\t\t\t\tbreak Input\n\t\t\t}\n\t\t\td.err = d.syntaxError(\"unescaped ]]> not in CDATA section\")\n\t\t\treturn nil\n\t\t}\n\n\t\t// Stop reading text if we see a <.\n\t\tif b == '<' && !cdata {\n\t\t\tif quote >= 0 {\n\t\t\t\td.err = d.syntaxError(\"unescaped < inside quoted string\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\td.ungetc('<')\n\t\t\tbreak Input\n\t\t}\n\t\tif quote >= 0 && b == byte(quote) {\n\t\t\tbreak Input\n\t\t}\n\t\tif b == '&' && !cdata {\n\t\t\t// Read escaped character expression up to semicolon.\n\t\t\t// XML in all its glory allows a document to define and use\n\t\t\t// its own character names with <!ENTITY ...> directives.\n\t\t\t// Parsers are required to recognize lt, gt, amp, apos, and quot\n\t\t\t// even if they have not been declared.\n\t\t\tbefore := d.buf.Len()\n\t\t\td.buf.WriteByte('&')\n\t\t\tvar ok bool\n\t\t\tvar text string\n\t\t\tvar haveText bool\n\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif b == '#' {\n\t\t\t\td.buf.WriteByte(b)\n\t\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tbase := 10\n\t\t\t\tif b == 'x' {\n\t\t\t\t\tbase = 16\n\t\t\t\t\td.buf.WriteByte(b)\n\t\t\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tstart := d.buf.Len()\n\t\t\t\tfor '0' <= b && b <= '9' ||\n\t\t\t\t\tbase == 16 && 'a' <= b && b <= 'f' ||\n\t\t\t\t\tbase == 16 && 'A' <= b && b <= 'F' {\n\t\t\t\t\td.buf.WriteByte(b)\n\t\t\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif b != ';' {\n\t\t\t\t\td.ungetc(b)\n\t\t\t\t} else {\n\t\t\t\t\ts := string(d.buf.Bytes()[start:])\n\t\t\t\t\td.buf.WriteByte(';')\n\t\t\t\t\tn, err := strconv.ParseUint(s, base, 64)\n\t\t\t\t\tif err == nil && n <= unicode.MaxRune {\n\t\t\t\t\t\ttext = string(n)\n\t\t\t\t\t\thaveText = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\td.ungetc(b)\n\t\t\t\tif !d.readName() {\n\t\t\t\t\tif d.err != nil {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\tok = false\n\t\t\t\t}\n\t\t\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tif b != ';' {\n\t\t\t\t\td.ungetc(b)\n\t\t\t\t} else {\n\t\t\t\t\tname := d.buf.Bytes()[before+1:]\n\t\t\t\t\td.buf.WriteByte(';')\n\t\t\t\t\tif isName(name) {\n\t\t\t\t\t\ts := string(name)\n\t\t\t\t\t\tif r, ok := entity[s]; ok {\n\t\t\t\t\t\t\ttext = string(r)\n\t\t\t\t\t\t\thaveText = true\n\t\t\t\t\t\t} else if d.Entity != nil {\n\t\t\t\t\t\t\ttext, haveText = d.Entity[s]\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif haveText {\n\t\t\t\td.buf.Truncate(before)\n\t\t\t\td.buf.Write([]byte(text))\n\t\t\t\tb0, b1 = 0, 0\n\t\t\t\tcontinue Input\n\t\t\t}\n\t\t\tif !d.Strict {\n\t\t\t\tb0, b1 = 0, 0\n\t\t\t\tcontinue Input\n\t\t\t}\n\t\t\tent := string(d.buf.Bytes()[before:])\n\t\t\tif ent[len(ent)-1] != ';' {\n\t\t\t\tent += \" (no semicolon)\"\n\t\t\t}\n\t\t\td.err = d.syntaxError(\"invalid character entity \" + ent)\n\t\t\treturn nil\n\t\t}\n\n\t\t// We must rewrite unescaped \\r and \\r\\n into \\n.\n\t\tif b == '\\r' {\n\t\t\td.buf.WriteByte('\\n')\n\t\t} else if b1 == '\\r' && b == '\\n' {\n\t\t\t// Skip \\r\\n--we already wrote \\n.\n\t\t} else {\n\t\t\td.buf.WriteByte(b)\n\t\t}\n\n\t\tb0, b1 = b1, b\n\t}\n\tdata := d.buf.Bytes()\n\tdata = data[0 : len(data)-trunc]\n\n\t// Inspect each rune for being a disallowed character.\n\tbuf := data\n\tfor len(buf) > 0 {\n\t\tr, size := utf8.DecodeRune(buf)\n\t\tif r == utf8.RuneError && size == 1 {\n\t\t\td.err = d.syntaxError(\"invalid UTF-8\")\n\t\t\treturn nil\n\t\t}\n\t\tbuf = buf[size:]\n\t\tif !isInCharacterRange(r) {\n\t\t\td.err = d.syntaxError(fmt.Sprintf(\"illegal character code %U\", r))\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn data\n}\n\n// Decide whether the given rune is in the XML Character Range, per\n// the Char production of http://www.xml.com/axml/testaxml.htm,\n// Section 2.2 Characters.\nfunc isInCharacterRange(r rune) (inrange bool) {\n\treturn r == 0x09 ||\n\t\tr == 0x0A ||\n\t\tr == 0x0D ||\n\t\tr >= 0x20 && r <= 0xDF77 ||\n\t\tr >= 0xE000 && r <= 0xFFFD ||\n\t\tr >= 0x10000 && r <= 0x10FFFF\n}\n\n// Get name space name: name with a : stuck in the middle.\n// The part before the : is the name space identifier.\nfunc (d *Decoder) nsname() (name Name, ok bool) {\n\ts, ok := d.name()\n\tif !ok {\n\t\treturn\n\t}\n\ti := strings.Index(s, \":\")\n\tif i < 0 {\n\t\tname.Local = s\n\t} else {\n\t\tname.Space = s[0:i]\n\t\tname.Local = s[i+1:]\n\t}\n\treturn name, true\n}\n\n// Get name: /first(first|second)*/\n// Do not set d.err if the name is missing (unless unexpected EOF is received):\n// let the caller provide better context.\nfunc (d *Decoder) name() (s string, ok bool) {\n\td.buf.Reset()\n\tif !d.readName() {\n\t\treturn \"\", false\n\t}\n\n\t// Now we check the characters.\n\tb := d.buf.Bytes()\n\tif !isName(b) {\n\t\td.err = d.syntaxError(\"invalid XML name: \" + string(b))\n\t\treturn \"\", false\n\t}\n\treturn string(b), true\n}\n\n// Read a name and append its bytes to d.buf.\n// The name is delimited by any single-byte character not valid in names.\n// All multi-byte characters are accepted; the caller must check their validity.\nfunc (d *Decoder) readName() (ok bool) {\n\tvar b byte\n\tif b, ok = d.mustgetc(); !ok {\n\t\treturn\n\t}\n\tif b < utf8.RuneSelf && !isNameByte(b) {\n\t\td.ungetc(b)\n\t\treturn false\n\t}\n\td.buf.WriteByte(b)\n\n\tfor {\n\t\tif b, ok = d.mustgetc(); !ok {\n\t\t\treturn\n\t\t}\n\t\tif b < utf8.RuneSelf && !isNameByte(b) {\n\t\t\td.ungetc(b)\n\t\t\tbreak\n\t\t}\n\t\td.buf.WriteByte(b)\n\t}\n\treturn true\n}\n\nfunc isNameByte(c byte) bool {\n\treturn 'A' <= c && c <= 'Z' ||\n\t\t'a' <= c && c <= 'z' ||\n\t\t'0' <= c && c <= '9' ||\n\t\tc == '_' || c == ':' || c == '.' || c == '-'\n}\n\nfunc isName(s []byte) bool {\n\tif len(s) == 0 {\n\t\treturn false\n\t}\n\tc, n := utf8.DecodeRune(s)\n\tif c == utf8.RuneError && n == 1 {\n\t\treturn false\n\t}\n\tif !unicode.Is(first, c) {\n\t\treturn false\n\t}\n\tfor n < len(s) {\n\t\ts = s[n:]\n\t\tc, n = utf8.DecodeRune(s)\n\t\tif c == utf8.RuneError && n == 1 {\n\t\t\treturn false\n\t\t}\n\t\tif !unicode.Is(first, c) && !unicode.Is(second, c) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc isNameString(s string) bool {\n\tif len(s) == 0 {\n\t\treturn false\n\t}\n\tc, n := utf8.DecodeRuneInString(s)\n\tif c == utf8.RuneError && n == 1 {\n\t\treturn false\n\t}\n\tif !unicode.Is(first, c) {\n\t\treturn false\n\t}\n\tfor n < len(s) {\n\t\ts = s[n:]\n\t\tc, n = utf8.DecodeRuneInString(s)\n\t\tif c == utf8.RuneError && n == 1 {\n\t\t\treturn false\n\t\t}\n\t\tif !unicode.Is(first, c) && !unicode.Is(second, c) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// These tables were generated by cut and paste from Appendix B of\n// the XML spec at http://www.xml.com/axml/testaxml.htm\n// and then reformatting. First corresponds to (Letter | '_' | ':')\n// and second corresponds to NameChar.\n\nvar first = &unicode.RangeTable{\n\tR16: []unicode.Range16{\n\t\t{0x003A, 0x003A, 1},\n\t\t{0x0041, 0x005A, 1},\n\t\t{0x005F, 0x005F, 1},\n\t\t{0x0061, 0x007A, 1},\n\t\t{0x00C0, 0x00D6, 1},\n\t\t{0x00D8, 0x00F6, 1},\n\t\t{0x00F8, 0x00FF, 1},\n\t\t{0x0100, 0x0131, 1},\n\t\t{0x0134, 0x013E, 1},\n\t\t{0x0141, 0x0148, 1},\n\t\t{0x014A, 0x017E, 1},\n\t\t{0x0180, 0x01C3, 1},\n\t\t{0x01CD, 0x01F0, 1},\n\t\t{0x01F4, 0x01F5, 1},\n\t\t{0x01FA, 0x0217, 1},\n\t\t{0x0250, 0x02A8, 1},\n\t\t{0x02BB, 0x02C1, 1},\n\t\t{0x0386, 0x0386, 1},\n\t\t{0x0388, 0x038A, 1},\n\t\t{0x038C, 0x038C, 1},\n\t\t{0x038E, 0x03A1, 1},\n\t\t{0x03A3, 0x03CE, 1},\n\t\t{0x03D0, 0x03D6, 1},\n\t\t{0x03DA, 0x03E0, 2},\n\t\t{0x03E2, 0x03F3, 1},\n\t\t{0x0401, 0x040C, 1},\n\t\t{0x040E, 0x044F, 1},\n\t\t{0x0451, 0x045C, 1},\n\t\t{0x045E, 0x0481, 1},\n\t\t{0x0490, 0x04C4, 1},\n\t\t{0x04C7, 0x04C8, 1},\n\t\t{0x04CB, 0x04CC, 1},\n\t\t{0x04D0, 0x04EB, 1},\n\t\t{0x04EE, 0x04F5, 1},\n\t\t{0x04F8, 0x04F9, 1},\n\t\t{0x0531, 0x0556, 1},\n\t\t{0x0559, 0x0559, 1},\n\t\t{0x0561, 0x0586, 1},\n\t\t{0x05D0, 0x05EA, 1},\n\t\t{0x05F0, 0x05F2, 1},\n\t\t{0x0621, 0x063A, 1},\n\t\t{0x0641, 0x064A, 1},\n\t\t{0x0671, 0x06B7, 1},\n\t\t{0x06BA, 0x06BE, 1},\n\t\t{0x06C0, 0x06CE, 1},\n\t\t{0x06D0, 0x06D3, 1},\n\t\t{0x06D5, 0x06D5, 1},\n\t\t{0x06E5, 0x06E6, 1},\n\t\t{0x0905, 0x0939, 1},\n\t\t{0x093D, 0x093D, 1},\n\t\t{0x0958, 0x0961, 1},\n\t\t{0x0985, 0x098C, 1},\n\t\t{0x098F, 0x0990, 1},\n\t\t{0x0993, 0x09A8, 1},\n\t\t{0x09AA, 0x09B0, 1},\n\t\t{0x09B2, 0x09B2, 1},\n\t\t{0x09B6, 0x09B9, 1},\n\t\t{0x09DC, 0x09DD, 1},\n\t\t{0x09DF, 0x09E1, 1},\n\t\t{0x09F0, 0x09F1, 1},\n\t\t{0x0A05, 0x0A0A, 1},\n\t\t{0x0A0F, 0x0A10, 1},\n\t\t{0x0A13, 0x0A28, 1},\n\t\t{0x0A2A, 0x0A30, 1},\n\t\t{0x0A32, 0x0A33, 1},\n\t\t{0x0A35, 0x0A36, 1},\n\t\t{0x0A38, 0x0A39, 1},\n\t\t{0x0A59, 0x0A5C, 1},\n\t\t{0x0A5E, 0x0A5E, 1},\n\t\t{0x0A72, 0x0A74, 1},\n\t\t{0x0A85, 0x0A8B, 1},\n\t\t{0x0A8D, 0x0A8D, 1},\n\t\t{0x0A8F, 0x0A91, 1},\n\t\t{0x0A93, 0x0AA8, 1},\n\t\t{0x0AAA, 0x0AB0, 1},\n\t\t{0x0AB2, 0x0AB3, 1},\n\t\t{0x0AB5, 0x0AB9, 1},\n\t\t{0x0ABD, 0x0AE0, 0x23},\n\t\t{0x0B05, 0x0B0C, 1},\n\t\t{0x0B0F, 0x0B10, 1},\n\t\t{0x0B13, 0x0B28, 1},\n\t\t{0x0B2A, 0x0B30, 1},\n\t\t{0x0B32, 0x0B33, 1},\n\t\t{0x0B36, 0x0B39, 1},\n\t\t{0x0B3D, 0x0B3D, 1},\n\t\t{0x0B5C, 0x0B5D, 1},\n\t\t{0x0B5F, 0x0B61, 1},\n\t\t{0x0B85, 0x0B8A, 1},\n\t\t{0x0B8E, 0x0B90, 1},\n\t\t{0x0B92, 0x0B95, 1},\n\t\t{0x0B99, 0x0B9A, 1},\n\t\t{0x0B9C, 0x0B9C, 1},\n\t\t{0x0B9E, 0x0B9F, 1},\n\t\t{0x0BA3, 0x0BA4, 1},\n\t\t{0x0BA8, 0x0BAA, 1},\n\t\t{0x0BAE, 0x0BB5, 1},\n\t\t{0x0BB7, 0x0BB9, 1},\n\t\t{0x0C05, 0x0C0C, 1},\n\t\t{0x0C0E, 0x0C10, 1},\n\t\t{0x0C12, 0x0C28, 1},\n\t\t{0x0C2A, 0x0C33, 1},\n\t\t{0x0C35, 0x0C39, 1},\n\t\t{0x0C60, 0x0C61, 1},\n\t\t{0x0C85, 0x0C8C, 1},\n\t\t{0x0C8E, 0x0C90, 1},\n\t\t{0x0C92, 0x0CA8, 1},\n\t\t{0x0CAA, 0x0CB3, 1},\n\t\t{0x0CB5, 0x0CB9, 1},\n\t\t{0x0CDE, 0x0CDE, 1},\n\t\t{0x0CE0, 0x0CE1, 1},\n\t\t{0x0D05, 0x0D0C, 1},\n\t\t{0x0D0E, 0x0D10, 1},\n\t\t{0x0D12, 0x0D28, 1},\n\t\t{0x0D2A, 0x0D39, 1},\n\t\t{0x0D60, 0x0D61, 1},\n\t\t{0x0E01, 0x0E2E, 1},\n\t\t{0x0E30, 0x0E30, 1},\n\t\t{0x0E32, 0x0E33, 1},\n\t\t{0x0E40, 0x0E45, 1},\n\t\t{0x0E81, 0x0E82, 1},\n\t\t{0x0E84, 0x0E84, 1},\n\t\t{0x0E87, 0x0E88, 1},\n\t\t{0x0E8A, 0x0E8D, 3},\n\t\t{0x0E94, 0x0E97, 1},\n\t\t{0x0E99, 0x0E9F, 1},\n\t\t{0x0EA1, 0x0EA3, 1},\n\t\t{0x0EA5, 0x0EA7, 2},\n\t\t{0x0EAA, 0x0EAB, 1},\n\t\t{0x0EAD, 0x0EAE, 1},\n\t\t{0x0EB0, 0x0EB0, 1},\n\t\t{0x0EB2, 0x0EB3, 1},\n\t\t{0x0EBD, 0x0EBD, 1},\n\t\t{0x0EC0, 0x0EC4, 1},\n\t\t{0x0F40, 0x0F47, 1},\n\t\t{0x0F49, 0x0F69, 1},\n\t\t{0x10A0, 0x10C5, 1},\n\t\t{0x10D0, 0x10F6, 1},\n\t\t{0x1100, 0x1100, 1},\n\t\t{0x1102, 0x1103, 1},\n\t\t{0x1105, 0x1107, 1},\n\t\t{0x1109, 0x1109, 1},\n\t\t{0x110B, 0x110C, 1},\n\t\t{0x110E, 0x1112, 1},\n\t\t{0x113C, 0x1140, 2},\n\t\t{0x114C, 0x1150, 2},\n\t\t{0x1154, 0x1155, 1},\n\t\t{0x1159, 0x1159, 1},\n\t\t{0x115F, 0x1161, 1},\n\t\t{0x1163, 0x1169, 2},\n\t\t{0x116D, 0x116E, 1},\n\t\t{0x1172, 0x1173, 1},\n\t\t{0x1175, 0x119E, 0x119E - 0x1175},\n\t\t{0x11A8, 0x11AB, 0x11AB - 0x11A8},\n\t\t{0x11AE, 0x11AF, 1},\n\t\t{0x11B7, 0x11B8, 1},\n\t\t{0x11BA, 0x11BA, 1},\n\t\t{0x11BC, 0x11C2, 1},\n\t\t{0x11EB, 0x11F0, 0x11F0 - 0x11EB},\n\t\t{0x11F9, 0x11F9, 1},\n\t\t{0x1E00, 0x1E9B, 1},\n\t\t{0x1EA0, 0x1EF9, 1},\n\t\t{0x1F00, 0x1F15, 1},\n\t\t{0x1F18, 0x1F1D, 1},\n\t\t{0x1F20, 0x1F45, 1},\n\t\t{0x1F48, 0x1F4D, 1},\n\t\t{0x1F50, 0x1F57, 1},\n\t\t{0x1F59, 0x1F5B, 0x1F5B - 0x1F59},\n\t\t{0x1F5D, 0x1F5D, 1},\n\t\t{0x1F5F, 0x1F7D, 1},\n\t\t{0x1F80, 0x1FB4, 1},\n\t\t{0x1FB6, 0x1FBC, 1},\n\t\t{0x1FBE, 0x1FBE, 1},\n\t\t{0x1FC2, 0x1FC4, 1},\n\t\t{0x1FC6, 0x1FCC, 1},\n\t\t{0x1FD0, 0x1FD3, 1},\n\t\t{0x1FD6, 0x1FDB, 1},\n\t\t{0x1FE0, 0x1FEC, 1},\n\t\t{0x1FF2, 0x1FF4, 1},\n\t\t{0x1FF6, 0x1FFC, 1},\n\t\t{0x2126, 0x2126, 1},\n\t\t{0x212A, 0x212B, 1},\n\t\t{0x212E, 0x212E, 1},\n\t\t{0x2180, 0x2182, 1},\n\t\t{0x3007, 0x3007, 1},\n\t\t{0x3021, 0x3029, 1},\n\t\t{0x3041, 0x3094, 1},\n\t\t{0x30A1, 0x30FA, 1},\n\t\t{0x3105, 0x312C, 1},\n\t\t{0x4E00, 0x9FA5, 1},\n\t\t{0xAC00, 0xD7A3, 1},\n\t},\n}\n\nvar second = &unicode.RangeTable{\n\tR16: []unicode.Range16{\n\t\t{0x002D, 0x002E, 1},\n\t\t{0x0030, 0x0039, 1},\n\t\t{0x00B7, 0x00B7, 1},\n\t\t{0x02D0, 0x02D1, 1},\n\t\t{0x0300, 0x0345, 1},\n\t\t{0x0360, 0x0361, 1},\n\t\t{0x0387, 0x0387, 1},\n\t\t{0x0483, 0x0486, 1},\n\t\t{0x0591, 0x05A1, 1},\n\t\t{0x05A3, 0x05B9, 1},\n\t\t{0x05BB, 0x05BD, 1},\n\t\t{0x05BF, 0x05BF, 1},\n\t\t{0x05C1, 0x05C2, 1},\n\t\t{0x05C4, 0x0640, 0x0640 - 0x05C4},\n\t\t{0x064B, 0x0652, 1},\n\t\t{0x0660, 0x0669, 1},\n\t\t{0x0670, 0x0670, 1},\n\t\t{0x06D6, 0x06DC, 1},\n\t\t{0x06DD, 0x06DF, 1},\n\t\t{0x06E0, 0x06E4, 1},\n\t\t{0x06E7, 0x06E8, 1},\n\t\t{0x06EA, 0x06ED, 1},\n\t\t{0x06F0, 0x06F9, 1},\n\t\t{0x0901, 0x0903, 1},\n\t\t{0x093C, 0x093C, 1},\n\t\t{0x093E, 0x094C, 1},\n\t\t{0x094D, 0x094D, 1},\n\t\t{0x0951, 0x0954, 1},\n\t\t{0x0962, 0x0963, 1},\n\t\t{0x0966, 0x096F, 1},\n\t\t{0x0981, 0x0983, 1},\n\t\t{0x09BC, 0x09BC, 1},\n\t\t{0x09BE, 0x09BF, 1},\n\t\t{0x09C0, 0x09C4, 1},\n\t\t{0x09C7, 0x09C8, 1},\n\t\t{0x09CB, 0x09CD, 1},\n\t\t{0x09D7, 0x09D7, 1},\n\t\t{0x09E2, 0x09E3, 1},\n\t\t{0x09E6, 0x09EF, 1},\n\t\t{0x0A02, 0x0A3C, 0x3A},\n\t\t{0x0A3E, 0x0A3F, 1},\n\t\t{0x0A40, 0x0A42, 1},\n\t\t{0x0A47, 0x0A48, 1},\n\t\t{0x0A4B, 0x0A4D, 1},\n\t\t{0x0A66, 0x0A6F, 1},\n\t\t{0x0A70, 0x0A71, 1},\n\t\t{0x0A81, 0x0A83, 1},\n\t\t{0x0ABC, 0x0ABC, 1},\n\t\t{0x0ABE, 0x0AC5, 1},\n\t\t{0x0AC7, 0x0AC9, 1},\n\t\t{0x0ACB, 0x0ACD, 1},\n\t\t{0x0AE6, 0x0AEF, 1},\n\t\t{0x0B01, 0x0B03, 1},\n\t\t{0x0B3C, 0x0B3C, 1},\n\t\t{0x0B3E, 0x0B43, 1},\n\t\t{0x0B47, 0x0B48, 1},\n\t\t{0x0B4B, 0x0B4D, 1},\n\t\t{0x0B56, 0x0B57, 1},\n\t\t{0x0B66, 0x0B6F, 1},\n\t\t{0x0B82, 0x0B83, 1},\n\t\t{0x0BBE, 0x0BC2, 1},\n\t\t{0x0BC6, 0x0BC8, 1},\n\t\t{0x0BCA, 0x0BCD, 1},\n\t\t{0x0BD7, 0x0BD7, 1},\n\t\t{0x0BE7, 0x0BEF, 1},\n\t\t{0x0C01, 0x0C03, 1},\n\t\t{0x0C3E, 0x0C44, 1},\n\t\t{0x0C46, 0x0C48, 1},\n\t\t{0x0C4A, 0x0C4D, 1},\n\t\t{0x0C55, 0x0C56, 1},\n\t\t{0x0C66, 0x0C6F, 1},\n\t\t{0x0C82, 0x0C83, 1},\n\t\t{0x0CBE, 0x0CC4, 1},\n\t\t{0x0CC6, 0x0CC8, 1},\n\t\t{0x0CCA, 0x0CCD, 1},\n\t\t{0x0CD5, 0x0CD6, 1},\n\t\t{0x0CE6, 0x0CEF, 1},\n\t\t{0x0D02, 0x0D03, 1},\n\t\t{0x0D3E, 0x0D43, 1},\n\t\t{0x0D46, 0x0D48, 1},\n\t\t{0x0D4A, 0x0D4D, 1},\n\t\t{0x0D57, 0x0D57, 1},\n\t\t{0x0D66, 0x0D6F, 1},\n\t\t{0x0E31, 0x0E31, 1},\n\t\t{0x0E34, 0x0E3A, 1},\n\t\t{0x0E46, 0x0E46, 1},\n\t\t{0x0E47, 0x0E4E, 1},\n\t\t{0x0E50, 0x0E59, 1},\n\t\t{0x0EB1, 0x0EB1, 1},\n\t\t{0x0EB4, 0x0EB9, 1},\n\t\t{0x0EBB, 0x0EBC, 1},\n\t\t{0x0EC6, 0x0EC6, 1},\n\t\t{0x0EC8, 0x0ECD, 1},\n\t\t{0x0ED0, 0x0ED9, 1},\n\t\t{0x0F18, 0x0F19, 1},\n\t\t{0x0F20, 0x0F29, 1},\n\t\t{0x0F35, 0x0F39, 2},\n\t\t{0x0F3E, 0x0F3F, 1},\n\t\t{0x0F71, 0x0F84, 1},\n\t\t{0x0F86, 0x0F8B, 1},\n\t\t{0x0F90, 0x0F95, 1},\n\t\t{0x0F97, 0x0F97, 1},\n\t\t{0x0F99, 0x0FAD, 1},\n\t\t{0x0FB1, 0x0FB7, 1},\n\t\t{0x0FB9, 0x0FB9, 1},\n\t\t{0x20D0, 0x20DC, 1},\n\t\t{0x20E1, 0x3005, 0x3005 - 0x20E1},\n\t\t{0x302A, 0x302F, 1},\n\t\t{0x3031, 0x3035, 1},\n\t\t{0x3099, 0x309A, 1},\n\t\t{0x309D, 0x309E, 1},\n\t\t{0x30FC, 0x30FE, 1},\n\t},\n}\n\n// HTMLEntity is an entity map containing translations for the\n// standard HTML entity characters.\nvar HTMLEntity = htmlEntity\n\nvar htmlEntity = map[string]string{\n\t/*\n\t\thget http://www.w3.org/TR/html4/sgml/entities.html |\n\t\tssam '\n\t\t\t,y /\\&gt;/ x/\\&lt;(.|\\n)+/ s/\\n/ /g\n\t\t\t,x v/^\\&lt;!ENTITY/d\n\t\t\t,s/\\&lt;!ENTITY ([^ ]+) .*U\\+([0-9A-F][0-9A-F][0-9A-F][0-9A-F]) .+/\t\"\\1\": \"\\\\u\\2\",/g\n\t\t'\n\t*/\n\t\"nbsp\":     \"\\u00A0\",\n\t\"iexcl\":    \"\\u00A1\",\n\t\"cent\":     \"\\u00A2\",\n\t\"pound\":    \"\\u00A3\",\n\t\"curren\":   \"\\u00A4\",\n\t\"yen\":      \"\\u00A5\",\n\t\"brvbar\":   \"\\u00A6\",\n\t\"sect\":     \"\\u00A7\",\n\t\"uml\":      \"\\u00A8\",\n\t\"copy\":     \"\\u00A9\",\n\t\"ordf\":     \"\\u00AA\",\n\t\"laquo\":    \"\\u00AB\",\n\t\"not\":      \"\\u00AC\",\n\t\"shy\":      \"\\u00AD\",\n\t\"reg\":      \"\\u00AE\",\n\t\"macr\":     \"\\u00AF\",\n\t\"deg\":      \"\\u00B0\",\n\t\"plusmn\":   \"\\u00B1\",\n\t\"sup2\":     \"\\u00B2\",\n\t\"sup3\":     \"\\u00B3\",\n\t\"acute\":    \"\\u00B4\",\n\t\"micro\":    \"\\u00B5\",\n\t\"para\":     \"\\u00B6\",\n\t\"middot\":   \"\\u00B7\",\n\t\"cedil\":    \"\\u00B8\",\n\t\"sup1\":     \"\\u00B9\",\n\t\"ordm\":     \"\\u00BA\",\n\t\"raquo\":    \"\\u00BB\",\n\t\"frac14\":   \"\\u00BC\",\n\t\"frac12\":   \"\\u00BD\",\n\t\"frac34\":   \"\\u00BE\",\n\t\"iquest\":   \"\\u00BF\",\n\t\"Agrave\":   \"\\u00C0\",\n\t\"Aacute\":   \"\\u00C1\",\n\t\"Acirc\":    \"\\u00C2\",\n\t\"Atilde\":   \"\\u00C3\",\n\t\"Auml\":     \"\\u00C4\",\n\t\"Aring\":    \"\\u00C5\",\n\t\"AElig\":    \"\\u00C6\",\n\t\"Ccedil\":   \"\\u00C7\",\n\t\"Egrave\":   \"\\u00C8\",\n\t\"Eacute\":   \"\\u00C9\",\n\t\"Ecirc\":    \"\\u00CA\",\n\t\"Euml\":     \"\\u00CB\",\n\t\"Igrave\":   \"\\u00CC\",\n\t\"Iacute\":   \"\\u00CD\",\n\t\"Icirc\":    \"\\u00CE\",\n\t\"Iuml\":     \"\\u00CF\",\n\t\"ETH\":      \"\\u00D0\",\n\t\"Ntilde\":   \"\\u00D1\",\n\t\"Ograve\":   \"\\u00D2\",\n\t\"Oacute\":   \"\\u00D3\",\n\t\"Ocirc\":    \"\\u00D4\",\n\t\"Otilde\":   \"\\u00D5\",\n\t\"Ouml\":     \"\\u00D6\",\n\t\"times\":    \"\\u00D7\",\n\t\"Oslash\":   \"\\u00D8\",\n\t\"Ugrave\":   \"\\u00D9\",\n\t\"Uacute\":   \"\\u00DA\",\n\t\"Ucirc\":    \"\\u00DB\",\n\t\"Uuml\":     \"\\u00DC\",\n\t\"Yacute\":   \"\\u00DD\",\n\t\"THORN\":    \"\\u00DE\",\n\t\"szlig\":    \"\\u00DF\",\n\t\"agrave\":   \"\\u00E0\",\n\t\"aacute\":   \"\\u00E1\",\n\t\"acirc\":    \"\\u00E2\",\n\t\"atilde\":   \"\\u00E3\",\n\t\"auml\":     \"\\u00E4\",\n\t\"aring\":    \"\\u00E5\",\n\t\"aelig\":    \"\\u00E6\",\n\t\"ccedil\":   \"\\u00E7\",\n\t\"egrave\":   \"\\u00E8\",\n\t\"eacute\":   \"\\u00E9\",\n\t\"ecirc\":    \"\\u00EA\",\n\t\"euml\":     \"\\u00EB\",\n\t\"igrave\":   \"\\u00EC\",\n\t\"iacute\":   \"\\u00ED\",\n\t\"icirc\":    \"\\u00EE\",\n\t\"iuml\":     \"\\u00EF\",\n\t\"eth\":      \"\\u00F0\",\n\t\"ntilde\":   \"\\u00F1\",\n\t\"ograve\":   \"\\u00F2\",\n\t\"oacute\":   \"\\u00F3\",\n\t\"ocirc\":    \"\\u00F4\",\n\t\"otilde\":   \"\\u00F5\",\n\t\"ouml\":     \"\\u00F6\",\n\t\"divide\":   \"\\u00F7\",\n\t\"oslash\":   \"\\u00F8\",\n\t\"ugrave\":   \"\\u00F9\",\n\t\"uacute\":   \"\\u00FA\",\n\t\"ucirc\":    \"\\u00FB\",\n\t\"uuml\":     \"\\u00FC\",\n\t\"yacute\":   \"\\u00FD\",\n\t\"thorn\":    \"\\u00FE\",\n\t\"yuml\":     \"\\u00FF\",\n\t\"fnof\":     \"\\u0192\",\n\t\"Alpha\":    \"\\u0391\",\n\t\"Beta\":     \"\\u0392\",\n\t\"Gamma\":    \"\\u0393\",\n\t\"Delta\":    \"\\u0394\",\n\t\"Epsilon\":  \"\\u0395\",\n\t\"Zeta\":     \"\\u0396\",\n\t\"Eta\":      \"\\u0397\",\n\t\"Theta\":    \"\\u0398\",\n\t\"Iota\":     \"\\u0399\",\n\t\"Kappa\":    \"\\u039A\",\n\t\"Lambda\":   \"\\u039B\",\n\t\"Mu\":       \"\\u039C\",\n\t\"Nu\":       \"\\u039D\",\n\t\"Xi\":       \"\\u039E\",\n\t\"Omicron\":  \"\\u039F\",\n\t\"Pi\":       \"\\u03A0\",\n\t\"Rho\":      \"\\u03A1\",\n\t\"Sigma\":    \"\\u03A3\",\n\t\"Tau\":      \"\\u03A4\",\n\t\"Upsilon\":  \"\\u03A5\",\n\t\"Phi\":      \"\\u03A6\",\n\t\"Chi\":      \"\\u03A7\",\n\t\"Psi\":      \"\\u03A8\",\n\t\"Omega\":    \"\\u03A9\",\n\t\"alpha\":    \"\\u03B1\",\n\t\"beta\":     \"\\u03B2\",\n\t\"gamma\":    \"\\u03B3\",\n\t\"delta\":    \"\\u03B4\",\n\t\"epsilon\":  \"\\u03B5\",\n\t\"zeta\":     \"\\u03B6\",\n\t\"eta\":      \"\\u03B7\",\n\t\"theta\":    \"\\u03B8\",\n\t\"iota\":     \"\\u03B9\",\n\t\"kappa\":    \"\\u03BA\",\n\t\"lambda\":   \"\\u03BB\",\n\t\"mu\":       \"\\u03BC\",\n\t\"nu\":       \"\\u03BD\",\n\t\"xi\":       \"\\u03BE\",\n\t\"omicron\":  \"\\u03BF\",\n\t\"pi\":       \"\\u03C0\",\n\t\"rho\":      \"\\u03C1\",\n\t\"sigmaf\":   \"\\u03C2\",\n\t\"sigma\":    \"\\u03C3\",\n\t\"tau\":      \"\\u03C4\",\n\t\"upsilon\":  \"\\u03C5\",\n\t\"phi\":      \"\\u03C6\",\n\t\"chi\":      \"\\u03C7\",\n\t\"psi\":      \"\\u03C8\",\n\t\"omega\":    \"\\u03C9\",\n\t\"thetasym\": \"\\u03D1\",\n\t\"upsih\":    \"\\u03D2\",\n\t\"piv\":      \"\\u03D6\",\n\t\"bull\":     \"\\u2022\",\n\t\"hellip\":   \"\\u2026\",\n\t\"prime\":    \"\\u2032\",\n\t\"Prime\":    \"\\u2033\",\n\t\"oline\":    \"\\u203E\",\n\t\"frasl\":    \"\\u2044\",\n\t\"weierp\":   \"\\u2118\",\n\t\"image\":    \"\\u2111\",\n\t\"real\":     \"\\u211C\",\n\t\"trade\":    \"\\u2122\",\n\t\"alefsym\":  \"\\u2135\",\n\t\"larr\":     \"\\u2190\",\n\t\"uarr\":     \"\\u2191\",\n\t\"rarr\":     \"\\u2192\",\n\t\"darr\":     \"\\u2193\",\n\t\"harr\":     \"\\u2194\",\n\t\"crarr\":    \"\\u21B5\",\n\t\"lArr\":     \"\\u21D0\",\n\t\"uArr\":     \"\\u21D1\",\n\t\"rArr\":     \"\\u21D2\",\n\t\"dArr\":     \"\\u21D3\",\n\t\"hArr\":     \"\\u21D4\",\n\t\"forall\":   \"\\u2200\",\n\t\"part\":     \"\\u2202\",\n\t\"exist\":    \"\\u2203\",\n\t\"empty\":    \"\\u2205\",\n\t\"nabla\":    \"\\u2207\",\n\t\"isin\":     \"\\u2208\",\n\t\"notin\":    \"\\u2209\",\n\t\"ni\":       \"\\u220B\",\n\t\"prod\":     \"\\u220F\",\n\t\"sum\":      \"\\u2211\",\n\t\"minus\":    \"\\u2212\",\n\t\"lowast\":   \"\\u2217\",\n\t\"radic\":    \"\\u221A\",\n\t\"prop\":     \"\\u221D\",\n\t\"infin\":    \"\\u221E\",\n\t\"ang\":      \"\\u2220\",\n\t\"and\":      \"\\u2227\",\n\t\"or\":       \"\\u2228\",\n\t\"cap\":      \"\\u2229\",\n\t\"cup\":      \"\\u222A\",\n\t\"int\":      \"\\u222B\",\n\t\"there4\":   \"\\u2234\",\n\t\"sim\":      \"\\u223C\",\n\t\"cong\":     \"\\u2245\",\n\t\"asymp\":    \"\\u2248\",\n\t\"ne\":       \"\\u2260\",\n\t\"equiv\":    \"\\u2261\",\n\t\"le\":       \"\\u2264\",\n\t\"ge\":       \"\\u2265\",\n\t\"sub\":      \"\\u2282\",\n\t\"sup\":      \"\\u2283\",\n\t\"nsub\":     \"\\u2284\",\n\t\"sube\":     \"\\u2286\",\n\t\"supe\":     \"\\u2287\",\n\t\"oplus\":    \"\\u2295\",\n\t\"otimes\":   \"\\u2297\",\n\t\"perp\":     \"\\u22A5\",\n\t\"sdot\":     \"\\u22C5\",\n\t\"lceil\":    \"\\u2308\",\n\t\"rceil\":    \"\\u2309\",\n\t\"lfloor\":   \"\\u230A\",\n\t\"rfloor\":   \"\\u230B\",\n\t\"lang\":     \"\\u2329\",\n\t\"rang\":     \"\\u232A\",\n\t\"loz\":      \"\\u25CA\",\n\t\"spades\":   \"\\u2660\",\n\t\"clubs\":    \"\\u2663\",\n\t\"hearts\":   \"\\u2665\",\n\t\"diams\":    \"\\u2666\",\n\t\"quot\":     \"\\u0022\",\n\t\"amp\":      \"\\u0026\",\n\t\"lt\":       \"\\u003C\",\n\t\"gt\":       \"\\u003E\",\n\t\"OElig\":    \"\\u0152\",\n\t\"oelig\":    \"\\u0153\",\n\t\"Scaron\":   \"\\u0160\",\n\t\"scaron\":   \"\\u0161\",\n\t\"Yuml\":     \"\\u0178\",\n\t\"circ\":     \"\\u02C6\",\n\t\"tilde\":    \"\\u02DC\",\n\t\"ensp\":     \"\\u2002\",\n\t\"emsp\":     \"\\u2003\",\n\t\"thinsp\":   \"\\u2009\",\n\t\"zwnj\":     \"\\u200C\",\n\t\"zwj\":      \"\\u200D\",\n\t\"lrm\":      \"\\u200E\",\n\t\"rlm\":      \"\\u200F\",\n\t\"ndash\":    \"\\u2013\",\n\t\"mdash\":    \"\\u2014\",\n\t\"lsquo\":    \"\\u2018\",\n\t\"rsquo\":    \"\\u2019\",\n\t\"sbquo\":    \"\\u201A\",\n\t\"ldquo\":    \"\\u201C\",\n\t\"rdquo\":    \"\\u201D\",\n\t\"bdquo\":    \"\\u201E\",\n\t\"dagger\":   \"\\u2020\",\n\t\"Dagger\":   \"\\u2021\",\n\t\"permil\":   \"\\u2030\",\n\t\"lsaquo\":   \"\\u2039\",\n\t\"rsaquo\":   \"\\u203A\",\n\t\"euro\":     \"\\u20AC\",\n}\n\n// HTMLAutoClose is the set of HTML elements that\n// should be considered to close automatically.\nvar HTMLAutoClose = htmlAutoClose\n\nvar htmlAutoClose = []string{\n\t/*\n\t\thget http://www.w3.org/TR/html4/loose.dtd |\n\t\t9 sed -n 's/<!ELEMENT ([^ ]*) +- O EMPTY.+/\t\"\\1\",/p' | tr A-Z a-z\n\t*/\n\t\"basefont\",\n\t\"br\",\n\t\"area\",\n\t\"link\",\n\t\"img\",\n\t\"param\",\n\t\"hr\",\n\t\"input\",\n\t\"col\",\n\t\"frame\",\n\t\"isindex\",\n\t\"base\",\n\t\"meta\",\n}\n\nvar (\n\tesc_quot = []byte(\"&#34;\") // shorter than \"&quot;\"\n\tesc_apos = []byte(\"&#39;\") // shorter than \"&apos;\"\n\tesc_amp  = []byte(\"&amp;\")\n\tesc_lt   = []byte(\"&lt;\")\n\tesc_gt   = []byte(\"&gt;\")\n\tesc_tab  = []byte(\"&#x9;\")\n\tesc_nl   = []byte(\"&#xA;\")\n\tesc_cr   = []byte(\"&#xD;\")\n\tesc_fffd = []byte(\"\\uFFFD\") // Unicode replacement character\n)\n\n// EscapeText writes to w the properly escaped XML equivalent\n// of the plain text data s.\nfunc EscapeText(w io.Writer, s []byte) error {\n\treturn escapeText(w, s, true)\n}\n\n// escapeText writes to w the properly escaped XML equivalent\n// of the plain text data s. If escapeNewline is true, newline\n// characters will be escaped.\nfunc escapeText(w io.Writer, s []byte, escapeNewline bool) error {\n\tvar esc []byte\n\tlast := 0\n\tfor i := 0; i < len(s); {\n\t\tr, width := utf8.DecodeRune(s[i:])\n\t\ti += width\n\t\tswitch r {\n\t\tcase '\"':\n\t\t\tesc = esc_quot\n\t\tcase '\\'':\n\t\t\tesc = esc_apos\n\t\tcase '&':\n\t\t\tesc = esc_amp\n\t\tcase '<':\n\t\t\tesc = esc_lt\n\t\tcase '>':\n\t\t\tesc = esc_gt\n\t\tcase '\\t':\n\t\t\tesc = esc_tab\n\t\tcase '\\n':\n\t\t\tif !escapeNewline {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tesc = esc_nl\n\t\tcase '\\r':\n\t\t\tesc = esc_cr\n\t\tdefault:\n\t\t\tif !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {\n\t\t\t\tesc = esc_fffd\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif _, err := w.Write(s[last : i-width]); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := w.Write(esc); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlast = i\n\t}\n\tif _, err := w.Write(s[last:]); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// EscapeString writes to p the properly escaped XML equivalent\n// of the plain text data s.\nfunc (p *printer) EscapeString(s string) {\n\tvar esc []byte\n\tlast := 0\n\tfor i := 0; i < len(s); {\n\t\tr, width := utf8.DecodeRuneInString(s[i:])\n\t\ti += width\n\t\tswitch r {\n\t\tcase '\"':\n\t\t\tesc = esc_quot\n\t\tcase '\\'':\n\t\t\tesc = esc_apos\n\t\tcase '&':\n\t\t\tesc = esc_amp\n\t\tcase '<':\n\t\t\tesc = esc_lt\n\t\tcase '>':\n\t\t\tesc = esc_gt\n\t\tcase '\\t':\n\t\t\tesc = esc_tab\n\t\tcase '\\n':\n\t\t\tesc = esc_nl\n\t\tcase '\\r':\n\t\t\tesc = esc_cr\n\t\tdefault:\n\t\t\tif !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {\n\t\t\t\tesc = esc_fffd\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tp.WriteString(s[last : i-width])\n\t\tp.Write(esc)\n\t\tlast = i\n\t}\n\tp.WriteString(s[last:])\n}\n\n// Escape is like EscapeText but omits the error return value.\n// It is provided for backwards compatibility with Go 1.0.\n// Code targeting Go 1.1 or later should use EscapeText.\nfunc Escape(w io.Writer, s []byte) {\n\tEscapeText(w, s)\n}\n\n// procInst parses the `param=\"...\"` or `param='...'`\n// value out of the provided string, returning \"\" if not found.\nfunc procInst(param, s string) string {\n\t// TODO: this parsing is somewhat lame and not exact.\n\t// It works for all actual cases, though.\n\tparam = param + \"=\"\n\tidx := strings.Index(s, param)\n\tif idx == -1 {\n\t\treturn \"\"\n\t}\n\tv := s[idx+len(param):]\n\tif v == \"\" {\n\t\treturn \"\"\n\t}\n\tif v[0] != '\\'' && v[0] != '\"' {\n\t\treturn \"\"\n\t}\n\tidx = strings.IndexRune(v[1:], rune(v[0]))\n\tif idx == -1 {\n\t\treturn \"\"\n\t}\n\treturn v[1 : idx+1]\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/internal/xml/xml_test.go",
    "content": "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage xml\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"unicode/utf8\"\n)\n\nconst testInput = `\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n  \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n<body xmlns:foo=\"ns1\" xmlns=\"ns2\" xmlns:tag=\"ns3\" ` +\n\t\"\\r\\n\\t\" + `  >\n  <hello lang=\"en\">World &lt;&gt;&apos;&quot; &#x767d;&#40300;翔</hello>\n  <query>&何; &is-it;</query>\n  <goodbye />\n  <outer foo:attr=\"value\" xmlns:tag=\"ns4\">\n    <inner/>\n  </outer>\n  <tag:name>\n    <![CDATA[Some text here.]]>\n  </tag:name>\n</body><!-- missing final newline -->`\n\nvar testEntity = map[string]string{\"何\": \"What\", \"is-it\": \"is it?\"}\n\nvar rawTokens = []Token{\n\tCharData(\"\\n\"),\n\tProcInst{\"xml\", []byte(`version=\"1.0\" encoding=\"UTF-8\"`)},\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n  \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\"`),\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"body\"}, []Attr{{Name{\"xmlns\", \"foo\"}, \"ns1\"}, {Name{\"\", \"xmlns\"}, \"ns2\"}, {Name{\"xmlns\", \"tag\"}, \"ns3\"}}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"\", \"hello\"}, []Attr{{Name{\"\", \"lang\"}, \"en\"}}},\n\tCharData(\"World <>'\\\" 白鵬翔\"),\n\tEndElement{Name{\"\", \"hello\"}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"\", \"query\"}, []Attr{}},\n\tCharData(\"What is it?\"),\n\tEndElement{Name{\"\", \"query\"}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"\", \"goodbye\"}, []Attr{}},\n\tEndElement{Name{\"\", \"goodbye\"}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"\", \"outer\"}, []Attr{{Name{\"foo\", \"attr\"}, \"value\"}, {Name{\"xmlns\", \"tag\"}, \"ns4\"}}},\n\tCharData(\"\\n    \"),\n\tStartElement{Name{\"\", \"inner\"}, []Attr{}},\n\tEndElement{Name{\"\", \"inner\"}},\n\tCharData(\"\\n  \"),\n\tEndElement{Name{\"\", \"outer\"}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"tag\", \"name\"}, []Attr{}},\n\tCharData(\"\\n    \"),\n\tCharData(\"Some text here.\"),\n\tCharData(\"\\n  \"),\n\tEndElement{Name{\"tag\", \"name\"}},\n\tCharData(\"\\n\"),\n\tEndElement{Name{\"\", \"body\"}},\n\tComment(\" missing final newline \"),\n}\n\nvar cookedTokens = []Token{\n\tCharData(\"\\n\"),\n\tProcInst{\"xml\", []byte(`version=\"1.0\" encoding=\"UTF-8\"`)},\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n  \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\"`),\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"ns2\", \"body\"}, []Attr{{Name{\"xmlns\", \"foo\"}, \"ns1\"}, {Name{\"\", \"xmlns\"}, \"ns2\"}, {Name{\"xmlns\", \"tag\"}, \"ns3\"}}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"ns2\", \"hello\"}, []Attr{{Name{\"\", \"lang\"}, \"en\"}}},\n\tCharData(\"World <>'\\\" 白鵬翔\"),\n\tEndElement{Name{\"ns2\", \"hello\"}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"ns2\", \"query\"}, []Attr{}},\n\tCharData(\"What is it?\"),\n\tEndElement{Name{\"ns2\", \"query\"}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"ns2\", \"goodbye\"}, []Attr{}},\n\tEndElement{Name{\"ns2\", \"goodbye\"}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"ns2\", \"outer\"}, []Attr{{Name{\"ns1\", \"attr\"}, \"value\"}, {Name{\"xmlns\", \"tag\"}, \"ns4\"}}},\n\tCharData(\"\\n    \"),\n\tStartElement{Name{\"ns2\", \"inner\"}, []Attr{}},\n\tEndElement{Name{\"ns2\", \"inner\"}},\n\tCharData(\"\\n  \"),\n\tEndElement{Name{\"ns2\", \"outer\"}},\n\tCharData(\"\\n  \"),\n\tStartElement{Name{\"ns3\", \"name\"}, []Attr{}},\n\tCharData(\"\\n    \"),\n\tCharData(\"Some text here.\"),\n\tCharData(\"\\n  \"),\n\tEndElement{Name{\"ns3\", \"name\"}},\n\tCharData(\"\\n\"),\n\tEndElement{Name{\"ns2\", \"body\"}},\n\tComment(\" missing final newline \"),\n}\n\nconst testInputAltEncoding = `\n<?xml version=\"1.0\" encoding=\"x-testing-uppercase\"?>\n<TAG>VALUE</TAG>`\n\nvar rawTokensAltEncoding = []Token{\n\tCharData(\"\\n\"),\n\tProcInst{\"xml\", []byte(`version=\"1.0\" encoding=\"x-testing-uppercase\"`)},\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"tag\"}, []Attr{}},\n\tCharData(\"value\"),\n\tEndElement{Name{\"\", \"tag\"}},\n}\n\nvar xmlInput = []string{\n\t// unexpected EOF cases\n\t\"<\",\n\t\"<t\",\n\t\"<t \",\n\t\"<t/\",\n\t\"<!\",\n\t\"<!-\",\n\t\"<!--\",\n\t\"<!--c-\",\n\t\"<!--c--\",\n\t\"<!d\",\n\t\"<t></\",\n\t\"<t></t\",\n\t\"<?\",\n\t\"<?p\",\n\t\"<t a\",\n\t\"<t a=\",\n\t\"<t a='\",\n\t\"<t a=''\",\n\t\"<t/><![\",\n\t\"<t/><![C\",\n\t\"<t/><![CDATA[d\",\n\t\"<t/><![CDATA[d]\",\n\t\"<t/><![CDATA[d]]\",\n\n\t// other Syntax errors\n\t\"<>\",\n\t\"<t/a\",\n\t\"<0 />\",\n\t\"<?0 >\",\n\t//\t\"<!0 >\",\t// let the Token() caller handle\n\t\"</0>\",\n\t\"<t 0=''>\",\n\t\"<t a='&'>\",\n\t\"<t a='<'>\",\n\t\"<t>&nbspc;</t>\",\n\t\"<t a>\",\n\t\"<t a=>\",\n\t\"<t a=v>\",\n\t//\t\"<![CDATA[d]]>\",\t// let the Token() caller handle\n\t\"<t></e>\",\n\t\"<t></>\",\n\t\"<t></t!\",\n\t\"<t>cdata]]></t>\",\n}\n\nfunc TestRawToken(t *testing.T) {\n\td := NewDecoder(strings.NewReader(testInput))\n\td.Entity = testEntity\n\ttestRawToken(t, d, testInput, rawTokens)\n}\n\nconst nonStrictInput = `\n<tag>non&entity</tag>\n<tag>&unknown;entity</tag>\n<tag>&#123</tag>\n<tag>&#zzz;</tag>\n<tag>&なまえ3;</tag>\n<tag>&lt-gt;</tag>\n<tag>&;</tag>\n<tag>&0a;</tag>\n`\n\nvar nonStringEntity = map[string]string{\"\": \"oops!\", \"0a\": \"oops!\"}\n\nvar nonStrictTokens = []Token{\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"tag\"}, []Attr{}},\n\tCharData(\"non&entity\"),\n\tEndElement{Name{\"\", \"tag\"}},\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"tag\"}, []Attr{}},\n\tCharData(\"&unknown;entity\"),\n\tEndElement{Name{\"\", \"tag\"}},\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"tag\"}, []Attr{}},\n\tCharData(\"&#123\"),\n\tEndElement{Name{\"\", \"tag\"}},\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"tag\"}, []Attr{}},\n\tCharData(\"&#zzz;\"),\n\tEndElement{Name{\"\", \"tag\"}},\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"tag\"}, []Attr{}},\n\tCharData(\"&なまえ3;\"),\n\tEndElement{Name{\"\", \"tag\"}},\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"tag\"}, []Attr{}},\n\tCharData(\"&lt-gt;\"),\n\tEndElement{Name{\"\", \"tag\"}},\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"tag\"}, []Attr{}},\n\tCharData(\"&;\"),\n\tEndElement{Name{\"\", \"tag\"}},\n\tCharData(\"\\n\"),\n\tStartElement{Name{\"\", \"tag\"}, []Attr{}},\n\tCharData(\"&0a;\"),\n\tEndElement{Name{\"\", \"tag\"}},\n\tCharData(\"\\n\"),\n}\n\nfunc TestNonStrictRawToken(t *testing.T) {\n\td := NewDecoder(strings.NewReader(nonStrictInput))\n\td.Strict = false\n\ttestRawToken(t, d, nonStrictInput, nonStrictTokens)\n}\n\ntype downCaser struct {\n\tt *testing.T\n\tr io.ByteReader\n}\n\nfunc (d *downCaser) ReadByte() (c byte, err error) {\n\tc, err = d.r.ReadByte()\n\tif c >= 'A' && c <= 'Z' {\n\t\tc += 'a' - 'A'\n\t}\n\treturn\n}\n\nfunc (d *downCaser) Read(p []byte) (int, error) {\n\td.t.Fatalf(\"unexpected Read call on downCaser reader\")\n\tpanic(\"unreachable\")\n}\n\nfunc TestRawTokenAltEncoding(t *testing.T) {\n\td := NewDecoder(strings.NewReader(testInputAltEncoding))\n\td.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) {\n\t\tif charset != \"x-testing-uppercase\" {\n\t\t\tt.Fatalf(\"unexpected charset %q\", charset)\n\t\t}\n\t\treturn &downCaser{t, input.(io.ByteReader)}, nil\n\t}\n\ttestRawToken(t, d, testInputAltEncoding, rawTokensAltEncoding)\n}\n\nfunc TestRawTokenAltEncodingNoConverter(t *testing.T) {\n\td := NewDecoder(strings.NewReader(testInputAltEncoding))\n\ttoken, err := d.RawToken()\n\tif token == nil {\n\t\tt.Fatalf(\"expected a token on first RawToken call\")\n\t}\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttoken, err = d.RawToken()\n\tif token != nil {\n\t\tt.Errorf(\"expected a nil token; got %#v\", token)\n\t}\n\tif err == nil {\n\t\tt.Fatalf(\"expected an error on second RawToken call\")\n\t}\n\tconst encoding = \"x-testing-uppercase\"\n\tif !strings.Contains(err.Error(), encoding) {\n\t\tt.Errorf(\"expected error to contain %q; got error: %v\",\n\t\t\tencoding, err)\n\t}\n}\n\nfunc testRawToken(t *testing.T, d *Decoder, raw string, rawTokens []Token) {\n\tlastEnd := int64(0)\n\tfor i, want := range rawTokens {\n\t\tstart := d.InputOffset()\n\t\thave, err := d.RawToken()\n\t\tend := d.InputOffset()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"token %d: unexpected error: %s\", i, err)\n\t\t}\n\t\tif !reflect.DeepEqual(have, want) {\n\t\t\tvar shave, swant string\n\t\t\tif _, ok := have.(CharData); ok {\n\t\t\t\tshave = fmt.Sprintf(\"CharData(%q)\", have)\n\t\t\t} else {\n\t\t\t\tshave = fmt.Sprintf(\"%#v\", have)\n\t\t\t}\n\t\t\tif _, ok := want.(CharData); ok {\n\t\t\t\tswant = fmt.Sprintf(\"CharData(%q)\", want)\n\t\t\t} else {\n\t\t\t\tswant = fmt.Sprintf(\"%#v\", want)\n\t\t\t}\n\t\t\tt.Errorf(\"token %d = %s, want %s\", i, shave, swant)\n\t\t}\n\n\t\t// Check that InputOffset returned actual token.\n\t\tswitch {\n\t\tcase start < lastEnd:\n\t\t\tt.Errorf(\"token %d: position [%d,%d) for %T is before previous token\", i, start, end, have)\n\t\tcase start >= end:\n\t\t\t// Special case: EndElement can be synthesized.\n\t\t\tif start == end && end == lastEnd {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.Errorf(\"token %d: position [%d,%d) for %T is empty\", i, start, end, have)\n\t\tcase end > int64(len(raw)):\n\t\t\tt.Errorf(\"token %d: position [%d,%d) for %T extends beyond input\", i, start, end, have)\n\t\tdefault:\n\t\t\ttext := raw[start:end]\n\t\t\tif strings.ContainsAny(text, \"<>\") && (!strings.HasPrefix(text, \"<\") || !strings.HasSuffix(text, \">\")) {\n\t\t\t\tt.Errorf(\"token %d: misaligned raw token %#q for %T\", i, text, have)\n\t\t\t}\n\t\t}\n\t\tlastEnd = end\n\t}\n}\n\n// Ensure that directives (specifically !DOCTYPE) include the complete\n// text of any nested directives, noting that < and > do not change\n// nesting depth if they are in single or double quotes.\n\nvar nestedDirectivesInput = `\n<!DOCTYPE [<!ENTITY rdf \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\">]>\n<!DOCTYPE [<!ENTITY xlt \">\">]>\n<!DOCTYPE [<!ENTITY xlt \"<\">]>\n<!DOCTYPE [<!ENTITY xlt '>'>]>\n<!DOCTYPE [<!ENTITY xlt '<'>]>\n<!DOCTYPE [<!ENTITY xlt '\">'>]>\n<!DOCTYPE [<!ENTITY xlt \"'<\">]>\n`\n\nvar nestedDirectivesTokens = []Token{\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE [<!ENTITY rdf \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\">]`),\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE [<!ENTITY xlt \">\">]`),\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE [<!ENTITY xlt \"<\">]`),\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE [<!ENTITY xlt '>'>]`),\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE [<!ENTITY xlt '<'>]`),\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE [<!ENTITY xlt '\">'>]`),\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE [<!ENTITY xlt \"'<\">]`),\n\tCharData(\"\\n\"),\n}\n\nfunc TestNestedDirectives(t *testing.T) {\n\td := NewDecoder(strings.NewReader(nestedDirectivesInput))\n\n\tfor i, want := range nestedDirectivesTokens {\n\t\thave, err := d.Token()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"token %d: unexpected error: %s\", i, err)\n\t\t}\n\t\tif !reflect.DeepEqual(have, want) {\n\t\t\tt.Errorf(\"token %d = %#v want %#v\", i, have, want)\n\t\t}\n\t}\n}\n\nfunc TestToken(t *testing.T) {\n\td := NewDecoder(strings.NewReader(testInput))\n\td.Entity = testEntity\n\n\tfor i, want := range cookedTokens {\n\t\thave, err := d.Token()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"token %d: unexpected error: %s\", i, err)\n\t\t}\n\t\tif !reflect.DeepEqual(have, want) {\n\t\t\tt.Errorf(\"token %d = %#v want %#v\", i, have, want)\n\t\t}\n\t}\n}\n\nfunc TestSyntax(t *testing.T) {\n\tfor i := range xmlInput {\n\t\td := NewDecoder(strings.NewReader(xmlInput[i]))\n\t\tvar err error\n\t\tfor _, err = d.Token(); err == nil; _, err = d.Token() {\n\t\t}\n\t\tif _, ok := err.(*SyntaxError); !ok {\n\t\t\tt.Fatalf(`xmlInput \"%s\": expected SyntaxError not received`, xmlInput[i])\n\t\t}\n\t}\n}\n\ntype allScalars struct {\n\tTrue1     bool\n\tTrue2     bool\n\tFalse1    bool\n\tFalse2    bool\n\tInt       int\n\tInt8      int8\n\tInt16     int16\n\tInt32     int32\n\tInt64     int64\n\tUint      int\n\tUint8     uint8\n\tUint16    uint16\n\tUint32    uint32\n\tUint64    uint64\n\tUintptr   uintptr\n\tFloat32   float32\n\tFloat64   float64\n\tString    string\n\tPtrString *string\n}\n\nvar all = allScalars{\n\tTrue1:     true,\n\tTrue2:     true,\n\tFalse1:    false,\n\tFalse2:    false,\n\tInt:       1,\n\tInt8:      -2,\n\tInt16:     3,\n\tInt32:     -4,\n\tInt64:     5,\n\tUint:      6,\n\tUint8:     7,\n\tUint16:    8,\n\tUint32:    9,\n\tUint64:    10,\n\tUintptr:   11,\n\tFloat32:   13.0,\n\tFloat64:   14.0,\n\tString:    \"15\",\n\tPtrString: &sixteen,\n}\n\nvar sixteen = \"16\"\n\nconst testScalarsInput = `<allscalars>\n\t<True1>true</True1>\n\t<True2>1</True2>\n\t<False1>false</False1>\n\t<False2>0</False2>\n\t<Int>1</Int>\n\t<Int8>-2</Int8>\n\t<Int16>3</Int16>\n\t<Int32>-4</Int32>\n\t<Int64>5</Int64>\n\t<Uint>6</Uint>\n\t<Uint8>7</Uint8>\n\t<Uint16>8</Uint16>\n\t<Uint32>9</Uint32>\n\t<Uint64>10</Uint64>\n\t<Uintptr>11</Uintptr>\n\t<Float>12.0</Float>\n\t<Float32>13.0</Float32>\n\t<Float64>14.0</Float64>\n\t<String>15</String>\n\t<PtrString>16</PtrString>\n</allscalars>`\n\nfunc TestAllScalars(t *testing.T) {\n\tvar a allScalars\n\terr := Unmarshal([]byte(testScalarsInput), &a)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !reflect.DeepEqual(a, all) {\n\t\tt.Errorf(\"have %+v want %+v\", a, all)\n\t}\n}\n\ntype item struct {\n\tField_a string\n}\n\nfunc TestIssue569(t *testing.T) {\n\tdata := `<item><Field_a>abcd</Field_a></item>`\n\tvar i item\n\terr := Unmarshal([]byte(data), &i)\n\n\tif err != nil || i.Field_a != \"abcd\" {\n\t\tt.Fatal(\"Expecting abcd\")\n\t}\n}\n\nfunc TestUnquotedAttrs(t *testing.T) {\n\tdata := \"<tag attr=azAZ09:-_\\t>\"\n\td := NewDecoder(strings.NewReader(data))\n\td.Strict = false\n\ttoken, err := d.Token()\n\tif _, ok := err.(*SyntaxError); ok {\n\t\tt.Errorf(\"Unexpected error: %v\", err)\n\t}\n\tif token.(StartElement).Name.Local != \"tag\" {\n\t\tt.Errorf(\"Unexpected tag name: %v\", token.(StartElement).Name.Local)\n\t}\n\tattr := token.(StartElement).Attr[0]\n\tif attr.Value != \"azAZ09:-_\" {\n\t\tt.Errorf(\"Unexpected attribute value: %v\", attr.Value)\n\t}\n\tif attr.Name.Local != \"attr\" {\n\t\tt.Errorf(\"Unexpected attribute name: %v\", attr.Name.Local)\n\t}\n}\n\nfunc TestValuelessAttrs(t *testing.T) {\n\ttests := [][3]string{\n\t\t{\"<p nowrap>\", \"p\", \"nowrap\"},\n\t\t{\"<p nowrap >\", \"p\", \"nowrap\"},\n\t\t{\"<input checked/>\", \"input\", \"checked\"},\n\t\t{\"<input checked />\", \"input\", \"checked\"},\n\t}\n\tfor _, test := range tests {\n\t\td := NewDecoder(strings.NewReader(test[0]))\n\t\td.Strict = false\n\t\ttoken, err := d.Token()\n\t\tif _, ok := err.(*SyntaxError); ok {\n\t\t\tt.Errorf(\"Unexpected error: %v\", err)\n\t\t}\n\t\tif token.(StartElement).Name.Local != test[1] {\n\t\t\tt.Errorf(\"Unexpected tag name: %v\", token.(StartElement).Name.Local)\n\t\t}\n\t\tattr := token.(StartElement).Attr[0]\n\t\tif attr.Value != test[2] {\n\t\t\tt.Errorf(\"Unexpected attribute value: %v\", attr.Value)\n\t\t}\n\t\tif attr.Name.Local != test[2] {\n\t\t\tt.Errorf(\"Unexpected attribute name: %v\", attr.Name.Local)\n\t\t}\n\t}\n}\n\nfunc TestCopyTokenCharData(t *testing.T) {\n\tdata := []byte(\"same data\")\n\tvar tok1 Token = CharData(data)\n\ttok2 := CopyToken(tok1)\n\tif !reflect.DeepEqual(tok1, tok2) {\n\t\tt.Error(\"CopyToken(CharData) != CharData\")\n\t}\n\tdata[1] = 'o'\n\tif reflect.DeepEqual(tok1, tok2) {\n\t\tt.Error(\"CopyToken(CharData) uses same buffer.\")\n\t}\n}\n\nfunc TestCopyTokenStartElement(t *testing.T) {\n\telt := StartElement{Name{\"\", \"hello\"}, []Attr{{Name{\"\", \"lang\"}, \"en\"}}}\n\tvar tok1 Token = elt\n\ttok2 := CopyToken(tok1)\n\tif tok1.(StartElement).Attr[0].Value != \"en\" {\n\t\tt.Error(\"CopyToken overwrote Attr[0]\")\n\t}\n\tif !reflect.DeepEqual(tok1, tok2) {\n\t\tt.Error(\"CopyToken(StartElement) != StartElement\")\n\t}\n\ttok1.(StartElement).Attr[0] = Attr{Name{\"\", \"lang\"}, \"de\"}\n\tif reflect.DeepEqual(tok1, tok2) {\n\t\tt.Error(\"CopyToken(CharData) uses same buffer.\")\n\t}\n}\n\nfunc TestSyntaxErrorLineNum(t *testing.T) {\n\ttestInput := \"<P>Foo<P>\\n\\n<P>Bar</>\\n\"\n\td := NewDecoder(strings.NewReader(testInput))\n\tvar err error\n\tfor _, err = d.Token(); err == nil; _, err = d.Token() {\n\t}\n\tsynerr, ok := err.(*SyntaxError)\n\tif !ok {\n\t\tt.Error(\"Expected SyntaxError.\")\n\t}\n\tif synerr.Line != 3 {\n\t\tt.Error(\"SyntaxError didn't have correct line number.\")\n\t}\n}\n\nfunc TestTrailingRawToken(t *testing.T) {\n\tinput := `<FOO></FOO>  `\n\td := NewDecoder(strings.NewReader(input))\n\tvar err error\n\tfor _, err = d.RawToken(); err == nil; _, err = d.RawToken() {\n\t}\n\tif err != io.EOF {\n\t\tt.Fatalf(\"d.RawToken() = _, %v, want _, io.EOF\", err)\n\t}\n}\n\nfunc TestTrailingToken(t *testing.T) {\n\tinput := `<FOO></FOO>  `\n\td := NewDecoder(strings.NewReader(input))\n\tvar err error\n\tfor _, err = d.Token(); err == nil; _, err = d.Token() {\n\t}\n\tif err != io.EOF {\n\t\tt.Fatalf(\"d.Token() = _, %v, want _, io.EOF\", err)\n\t}\n}\n\nfunc TestEntityInsideCDATA(t *testing.T) {\n\tinput := `<test><![CDATA[ &val=foo ]]></test>`\n\td := NewDecoder(strings.NewReader(input))\n\tvar err error\n\tfor _, err = d.Token(); err == nil; _, err = d.Token() {\n\t}\n\tif err != io.EOF {\n\t\tt.Fatalf(\"d.Token() = _, %v, want _, io.EOF\", err)\n\t}\n}\n\nvar characterTests = []struct {\n\tin  string\n\terr string\n}{\n\t{\"\\x12<doc/>\", \"illegal character code U+0012\"},\n\t{\"<?xml version=\\\"1.0\\\"?>\\x0b<doc/>\", \"illegal character code U+000B\"},\n\t{\"\\xef\\xbf\\xbe<doc/>\", \"illegal character code U+FFFE\"},\n\t{\"<?xml version=\\\"1.0\\\"?><doc>\\r\\n<hiya/>\\x07<toots/></doc>\", \"illegal character code U+0007\"},\n\t{\"<?xml version=\\\"1.0\\\"?><doc \\x12='value'>what's up</doc>\", \"expected attribute name in element\"},\n\t{\"<doc>&abc\\x01;</doc>\", \"invalid character entity &abc (no semicolon)\"},\n\t{\"<doc>&\\x01;</doc>\", \"invalid character entity & (no semicolon)\"},\n\t{\"<doc>&\\xef\\xbf\\xbe;</doc>\", \"invalid character entity &\\uFFFE;\"},\n\t{\"<doc>&hello;</doc>\", \"invalid character entity &hello;\"},\n}\n\nfunc TestDisallowedCharacters(t *testing.T) {\n\n\tfor i, tt := range characterTests {\n\t\td := NewDecoder(strings.NewReader(tt.in))\n\t\tvar err error\n\n\t\tfor err == nil {\n\t\t\t_, err = d.Token()\n\t\t}\n\t\tsynerr, ok := err.(*SyntaxError)\n\t\tif !ok {\n\t\t\tt.Fatalf(\"input %d d.Token() = _, %v, want _, *SyntaxError\", i, err)\n\t\t}\n\t\tif synerr.Msg != tt.err {\n\t\t\tt.Fatalf(\"input %d synerr.Msg wrong: want %q, got %q\", i, tt.err, synerr.Msg)\n\t\t}\n\t}\n}\n\ntype procInstEncodingTest struct {\n\texpect, got string\n}\n\nvar procInstTests = []struct {\n\tinput  string\n\texpect [2]string\n}{\n\t{`version=\"1.0\" encoding=\"utf-8\"`, [2]string{\"1.0\", \"utf-8\"}},\n\t{`version=\"1.0\" encoding='utf-8'`, [2]string{\"1.0\", \"utf-8\"}},\n\t{`version=\"1.0\" encoding='utf-8' `, [2]string{\"1.0\", \"utf-8\"}},\n\t{`version=\"1.0\" encoding=utf-8`, [2]string{\"1.0\", \"\"}},\n\t{`encoding=\"FOO\" `, [2]string{\"\", \"FOO\"}},\n}\n\nfunc TestProcInstEncoding(t *testing.T) {\n\tfor _, test := range procInstTests {\n\t\tif got := procInst(\"version\", test.input); got != test.expect[0] {\n\t\t\tt.Errorf(\"procInst(version, %q) = %q; want %q\", test.input, got, test.expect[0])\n\t\t}\n\t\tif got := procInst(\"encoding\", test.input); got != test.expect[1] {\n\t\t\tt.Errorf(\"procInst(encoding, %q) = %q; want %q\", test.input, got, test.expect[1])\n\t\t}\n\t}\n}\n\n// Ensure that directives with comments include the complete\n// text of any nested directives.\n\nvar directivesWithCommentsInput = `\n<!DOCTYPE [<!-- a comment --><!ENTITY rdf \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\">]>\n<!DOCTYPE [<!ENTITY go \"Golang\"><!-- a comment-->]>\n<!DOCTYPE <!-> <!> <!----> <!-->--> <!--->--> [<!ENTITY go \"Golang\"><!-- a comment-->]>\n`\n\nvar directivesWithCommentsTokens = []Token{\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE [<!ENTITY rdf \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\">]`),\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE [<!ENTITY go \"Golang\">]`),\n\tCharData(\"\\n\"),\n\tDirective(`DOCTYPE <!-> <!>    [<!ENTITY go \"Golang\">]`),\n\tCharData(\"\\n\"),\n}\n\nfunc TestDirectivesWithComments(t *testing.T) {\n\td := NewDecoder(strings.NewReader(directivesWithCommentsInput))\n\n\tfor i, want := range directivesWithCommentsTokens {\n\t\thave, err := d.Token()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"token %d: unexpected error: %s\", i, err)\n\t\t}\n\t\tif !reflect.DeepEqual(have, want) {\n\t\t\tt.Errorf(\"token %d = %#v want %#v\", i, have, want)\n\t\t}\n\t}\n}\n\n// Writer whose Write method always returns an error.\ntype errWriter struct{}\n\nfunc (errWriter) Write(p []byte) (n int, err error) { return 0, fmt.Errorf(\"unwritable\") }\n\nfunc TestEscapeTextIOErrors(t *testing.T) {\n\texpectErr := \"unwritable\"\n\terr := EscapeText(errWriter{}, []byte{'A'})\n\n\tif err == nil || err.Error() != expectErr {\n\t\tt.Errorf(\"have %v, want %v\", err, expectErr)\n\t}\n}\n\nfunc TestEscapeTextInvalidChar(t *testing.T) {\n\tinput := []byte(\"A \\x00 terminated string.\")\n\texpected := \"A \\uFFFD terminated string.\"\n\n\tbuff := new(bytes.Buffer)\n\tif err := EscapeText(buff, input); err != nil {\n\t\tt.Fatalf(\"have %v, want nil\", err)\n\t}\n\ttext := buff.String()\n\n\tif text != expected {\n\t\tt.Errorf(\"have %v, want %v\", text, expected)\n\t}\n}\n\nfunc TestIssue5880(t *testing.T) {\n\ttype T []byte\n\tdata, err := Marshal(T{192, 168, 0, 1})\n\tif err != nil {\n\t\tt.Errorf(\"Marshal error: %v\", err)\n\t}\n\tif !utf8.Valid(data) {\n\t\tt.Errorf(\"Marshal generated invalid UTF-8: %x\", data)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/litmus_test_server.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n/*\nThis program is a server for the WebDAV 'litmus' compliance test at\nhttp://www.webdav.org/neon/litmus/\nTo run the test:\n\ngo run litmus_test_server.go\n\nand separately, from the downloaded litmus-xxx directory:\n\nmake URL=http://localhost:9999/ check\n*/\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net/http\"\n\t\"net/url\"\n\n\t\"golang.org/x/net/webdav\"\n)\n\nvar port = flag.Int(\"port\", 9999, \"server port\")\n\nfunc main() {\n\tflag.Parse()\n\tlog.SetFlags(0)\n\th := &webdav.Handler{\n\t\tFileSystem: webdav.NewMemFS(),\n\t\tLockSystem: webdav.NewMemLS(),\n\t\tLogger: func(r *http.Request, err error) {\n\t\t\tlitmus := r.Header.Get(\"X-Litmus\")\n\t\t\tif len(litmus) > 19 {\n\t\t\t\tlitmus = litmus[:16] + \"...\"\n\t\t\t}\n\n\t\t\tswitch r.Method {\n\t\t\tcase \"COPY\", \"MOVE\":\n\t\t\t\tdst := \"\"\n\t\t\t\tif u, err := url.Parse(r.Header.Get(\"Destination\")); err == nil {\n\t\t\t\t\tdst = u.Path\n\t\t\t\t}\n\t\t\t\to := r.Header.Get(\"Overwrite\")\n\t\t\t\tlog.Printf(\"%-20s%-10s%-30s%-30so=%-2s%v\", litmus, r.Method, r.URL.Path, dst, o, err)\n\t\t\tdefault:\n\t\t\t\tlog.Printf(\"%-20s%-10s%-30s%v\", litmus, r.Method, r.URL.Path, err)\n\t\t\t}\n\t\t},\n\t}\n\n\t// The next line would normally be:\n\t//\thttp.Handle(\"/\", h)\n\t// but we wrap that HTTP handler h to cater for a special case.\n\t//\n\t// The propfind_invalid2 litmus test case expects an empty namespace prefix\n\t// declaration to be an error. The FAQ in the webdav litmus test says:\n\t//\n\t// \"What does the \"propfind_invalid2\" test check for?...\n\t//\n\t// If a request was sent with an XML body which included an empty namespace\n\t// prefix declaration (xmlns:ns1=\"\"), then the server must reject that with\n\t// a \"400 Bad Request\" response, as it is invalid according to the XML\n\t// Namespace specification.\"\n\t//\n\t// On the other hand, the Go standard library's encoding/xml package\n\t// accepts an empty xmlns namespace, as per the discussion at\n\t// https://github.com/golang/go/issues/8068\n\t//\n\t// Empty namespaces seem disallowed in the second (2006) edition of the XML\n\t// standard, but allowed in a later edition. The grammar differs between\n\t// http://www.w3.org/TR/2006/REC-xml-names-20060816/#ns-decl and\n\t// http://www.w3.org/TR/REC-xml-names/#dt-prefix\n\t//\n\t// Thus, we assume that the propfind_invalid2 test is obsolete, and\n\t// hard-code the 400 Bad Request response that the test expects.\n\thttp.Handle(\"/\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Header.Get(\"X-Litmus\") == \"props: 3 (propfind_invalid2)\" {\n\t\t\thttp.Error(w, \"400 Bad Request\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\th.ServeHTTP(w, r)\n\t}))\n\n\taddr := fmt.Sprintf(\":%d\", *port)\n\tlog.Printf(\"Serving %v\", addr)\n\tlog.Fatal(http.ListenAndServe(addr, nil))\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/lock.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage webdav\n\nimport (\n\t\"container/heap\"\n\t\"errors\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\t// ErrConfirmationFailed is returned by a LockSystem's Confirm method.\n\tErrConfirmationFailed = errors.New(\"webdav: confirmation failed\")\n\t// ErrForbidden is returned by a LockSystem's Unlock method.\n\tErrForbidden = errors.New(\"webdav: forbidden\")\n\t// ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods.\n\tErrLocked = errors.New(\"webdav: locked\")\n\t// ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods.\n\tErrNoSuchLock = errors.New(\"webdav: no such lock\")\n)\n\n// Condition can match a WebDAV resource, based on a token or ETag.\n// Exactly one of Token and ETag should be non-empty.\ntype Condition struct {\n\tNot   bool\n\tToken string\n\tETag  string\n}\n\n// LockSystem manages access to a collection of named resources. The elements\n// in a lock name are separated by slash ('/', U+002F) characters, regardless\n// of host operating system convention.\ntype LockSystem interface {\n\t// Confirm confirms that the caller can claim all of the locks specified by\n\t// the given conditions, and that holding the union of all of those locks\n\t// gives exclusive access to all of the named resources. Up to two resources\n\t// can be named. Empty names are ignored.\n\t//\n\t// Exactly one of release and err will be non-nil. If release is non-nil,\n\t// all of the requested locks are held until release is called. Calling\n\t// release does not unlock the lock, in the WebDAV UNLOCK sense, but once\n\t// Confirm has confirmed that a lock claim is valid, that lock cannot be\n\t// Confirmed again until it has been released.\n\t//\n\t// If Confirm returns ErrConfirmationFailed then the Handler will continue\n\t// to try any other set of locks presented (a WebDAV HTTP request can\n\t// present more than one set of locks). If it returns any other non-nil\n\t// error, the Handler will write a \"500 Internal Server Error\" HTTP status.\n\tConfirm(now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error)\n\n\t// Create creates a lock with the given depth, duration, owner and root\n\t// (name). The depth will either be negative (meaning infinite) or zero.\n\t//\n\t// If Create returns ErrLocked then the Handler will write a \"423 Locked\"\n\t// HTTP status. If it returns any other non-nil error, the Handler will\n\t// write a \"500 Internal Server Error\" HTTP status.\n\t//\n\t// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for\n\t// when to use each error.\n\t//\n\t// The token returned identifies the created lock. It should be an absolute\n\t// URI as defined by RFC 3986, Section 4.3. In particular, it should not\n\t// contain whitespace.\n\tCreate(now time.Time, details LockDetails) (token string, err error)\n\n\t// Refresh refreshes the lock with the given token.\n\t//\n\t// If Refresh returns ErrLocked then the Handler will write a \"423 Locked\"\n\t// HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write\n\t// a \"412 Precondition Failed\" HTTP Status. If it returns any other non-nil\n\t// error, the Handler will write a \"500 Internal Server Error\" HTTP status.\n\t//\n\t// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for\n\t// when to use each error.\n\tRefresh(now time.Time, token string, duration time.Duration) (LockDetails, error)\n\n\t// Unlock unlocks the lock with the given token.\n\t//\n\t// If Unlock returns ErrForbidden then the Handler will write a \"403\n\t// Forbidden\" HTTP Status. If Unlock returns ErrLocked then the Handler\n\t// will write a \"423 Locked\" HTTP status. If Unlock returns ErrNoSuchLock\n\t// then the Handler will write a \"409 Conflict\" HTTP Status. If it returns\n\t// any other non-nil error, the Handler will write a \"500 Internal Server\n\t// Error\" HTTP status.\n\t//\n\t// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for\n\t// when to use each error.\n\tUnlock(now time.Time, token string) error\n}\n\n// LockDetails are a lock's metadata.\ntype LockDetails struct {\n\t// Root is the root resource name being locked. For a zero-depth lock, the\n\t// root is the only resource being locked.\n\tRoot string\n\t// Duration is the lock timeout. A negative duration means infinite.\n\tDuration time.Duration\n\t// OwnerXML is the verbatim <owner> XML given in a LOCK HTTP request.\n\t//\n\t// TODO: does the \"verbatim\" nature play well with XML namespaces?\n\t// Does the OwnerXML field need to have more structure? See\n\t// https://codereview.appspot.com/175140043/#msg2\n\tOwnerXML string\n\t// ZeroDepth is whether the lock has zero depth. If it does not have zero\n\t// depth, it has infinite depth.\n\tZeroDepth bool\n}\n\n// NewMemLS returns a new in-memory LockSystem.\nfunc NewMemLS() LockSystem {\n\treturn &memLS{\n\t\tbyName:  make(map[string]*memLSNode),\n\t\tbyToken: make(map[string]*memLSNode),\n\t\tgen:     uint64(time.Now().Unix()),\n\t}\n}\n\ntype memLS struct {\n\tmu      sync.Mutex\n\tbyName  map[string]*memLSNode\n\tbyToken map[string]*memLSNode\n\tgen     uint64\n\t// byExpiry only contains those nodes whose LockDetails have a finite\n\t// Duration and are yet to expire.\n\tbyExpiry byExpiry\n}\n\nfunc (m *memLS) nextToken() string {\n\tm.gen++\n\treturn strconv.FormatUint(m.gen, 10)\n}\n\nfunc (m *memLS) collectExpiredNodes(now time.Time) {\n\tfor len(m.byExpiry) > 0 {\n\t\tif now.Before(m.byExpiry[0].expiry) {\n\t\t\tbreak\n\t\t}\n\t\tm.remove(m.byExpiry[0])\n\t}\n}\n\nfunc (m *memLS) Confirm(now time.Time, name0, name1 string, conditions ...Condition) (func(), error) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tm.collectExpiredNodes(now)\n\n\tvar n0, n1 *memLSNode\n\tif name0 != \"\" {\n\t\tif n0 = m.lookup(slashClean(name0), conditions...); n0 == nil {\n\t\t\treturn nil, ErrConfirmationFailed\n\t\t}\n\t}\n\tif name1 != \"\" {\n\t\tif n1 = m.lookup(slashClean(name1), conditions...); n1 == nil {\n\t\t\treturn nil, ErrConfirmationFailed\n\t\t}\n\t}\n\n\t// Don't hold the same node twice.\n\tif n1 == n0 {\n\t\tn1 = nil\n\t}\n\n\tif n0 != nil {\n\t\tm.hold(n0)\n\t}\n\tif n1 != nil {\n\t\tm.hold(n1)\n\t}\n\treturn func() {\n\t\tm.mu.Lock()\n\t\tdefer m.mu.Unlock()\n\t\tif n1 != nil {\n\t\t\tm.unhold(n1)\n\t\t}\n\t\tif n0 != nil {\n\t\t\tm.unhold(n0)\n\t\t}\n\t}, nil\n}\n\n// lookup returns the node n that locks the named resource, provided that n\n// matches at least one of the given conditions and that lock isn't held by\n// another party. Otherwise, it returns nil.\n//\n// n may be a parent of the named resource, if n is an infinite depth lock.\nfunc (m *memLS) lookup(name string, conditions ...Condition) (n *memLSNode) {\n\t// TODO: support Condition.Not and Condition.ETag.\n\tfor _, c := range conditions {\n\t\tn = m.byToken[c.Token]\n\t\tif n == nil || n.held {\n\t\t\tcontinue\n\t\t}\n\t\tif name == n.details.Root {\n\t\t\treturn n\n\t\t}\n\t\tif n.details.ZeroDepth {\n\t\t\tcontinue\n\t\t}\n\t\tif n.details.Root == \"/\" || strings.HasPrefix(name, n.details.Root+\"/\") {\n\t\t\treturn n\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (m *memLS) hold(n *memLSNode) {\n\tif n.held {\n\t\tpanic(\"webdav: memLS inconsistent held state\")\n\t}\n\tn.held = true\n\tif n.details.Duration >= 0 && n.byExpiryIndex >= 0 {\n\t\theap.Remove(&m.byExpiry, n.byExpiryIndex)\n\t}\n}\n\nfunc (m *memLS) unhold(n *memLSNode) {\n\tif !n.held {\n\t\tpanic(\"webdav: memLS inconsistent held state\")\n\t}\n\tn.held = false\n\tif n.details.Duration >= 0 {\n\t\theap.Push(&m.byExpiry, n)\n\t}\n}\n\nfunc (m *memLS) Create(now time.Time, details LockDetails) (string, error) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tm.collectExpiredNodes(now)\n\tdetails.Root = slashClean(details.Root)\n\n\tif !m.canCreate(details.Root, details.ZeroDepth) {\n\t\treturn \"\", ErrLocked\n\t}\n\tn := m.create(details.Root)\n\tn.token = m.nextToken()\n\tm.byToken[n.token] = n\n\tn.details = details\n\tif n.details.Duration >= 0 {\n\t\tn.expiry = now.Add(n.details.Duration)\n\t\theap.Push(&m.byExpiry, n)\n\t}\n\treturn n.token, nil\n}\n\nfunc (m *memLS) Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tm.collectExpiredNodes(now)\n\n\tn := m.byToken[token]\n\tif n == nil {\n\t\treturn LockDetails{}, ErrNoSuchLock\n\t}\n\tif n.held {\n\t\treturn LockDetails{}, ErrLocked\n\t}\n\tif n.byExpiryIndex >= 0 {\n\t\theap.Remove(&m.byExpiry, n.byExpiryIndex)\n\t}\n\tn.details.Duration = duration\n\tif n.details.Duration >= 0 {\n\t\tn.expiry = now.Add(n.details.Duration)\n\t\theap.Push(&m.byExpiry, n)\n\t}\n\treturn n.details, nil\n}\n\nfunc (m *memLS) Unlock(now time.Time, token string) error {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tm.collectExpiredNodes(now)\n\n\tn := m.byToken[token]\n\tif n == nil {\n\t\treturn ErrNoSuchLock\n\t}\n\tif n.held {\n\t\treturn ErrLocked\n\t}\n\tm.remove(n)\n\treturn nil\n}\n\nfunc (m *memLS) canCreate(name string, zeroDepth bool) bool {\n\treturn walkToRoot(name, func(name0 string, first bool) bool {\n\t\tn := m.byName[name0]\n\t\tif n == nil {\n\t\t\treturn true\n\t\t}\n\t\tif first {\n\t\t\tif n.token != \"\" {\n\t\t\t\t// The target node is already locked.\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif !zeroDepth {\n\t\t\t\t// The requested lock depth is infinite, and the fact that n exists\n\t\t\t\t// (n != nil) means that a descendent of the target node is locked.\n\t\t\t\treturn false\n\t\t\t}\n\t\t} else if n.token != \"\" && !n.details.ZeroDepth {\n\t\t\t// An ancestor of the target node is locked with infinite depth.\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t})\n}\n\nfunc (m *memLS) create(name string) (ret *memLSNode) {\n\twalkToRoot(name, func(name0 string, first bool) bool {\n\t\tn := m.byName[name0]\n\t\tif n == nil {\n\t\t\tn = &memLSNode{\n\t\t\t\tdetails: LockDetails{\n\t\t\t\t\tRoot: name0,\n\t\t\t\t},\n\t\t\t\tbyExpiryIndex: -1,\n\t\t\t}\n\t\t\tm.byName[name0] = n\n\t\t}\n\t\tn.refCount++\n\t\tif first {\n\t\t\tret = n\n\t\t}\n\t\treturn true\n\t})\n\treturn ret\n}\n\nfunc (m *memLS) remove(n *memLSNode) {\n\tdelete(m.byToken, n.token)\n\tn.token = \"\"\n\twalkToRoot(n.details.Root, func(name0 string, first bool) bool {\n\t\tx := m.byName[name0]\n\t\tx.refCount--\n\t\tif x.refCount == 0 {\n\t\t\tdelete(m.byName, name0)\n\t\t}\n\t\treturn true\n\t})\n\tif n.byExpiryIndex >= 0 {\n\t\theap.Remove(&m.byExpiry, n.byExpiryIndex)\n\t}\n}\n\nfunc walkToRoot(name string, f func(name0 string, first bool) bool) bool {\n\tfor first := true; ; first = false {\n\t\tif !f(name, first) {\n\t\t\treturn false\n\t\t}\n\t\tif name == \"/\" {\n\t\t\tbreak\n\t\t}\n\t\tname = name[:strings.LastIndex(name, \"/\")]\n\t\tif name == \"\" {\n\t\t\tname = \"/\"\n\t\t}\n\t}\n\treturn true\n}\n\ntype memLSNode struct {\n\t// details are the lock metadata. Even if this node's name is not explicitly locked,\n\t// details.Root will still equal the node's name.\n\tdetails LockDetails\n\t// token is the unique identifier for this node's lock. An empty token means that\n\t// this node is not explicitly locked.\n\ttoken string\n\t// refCount is the number of self-or-descendent nodes that are explicitly locked.\n\trefCount int\n\t// expiry is when this node's lock expires.\n\texpiry time.Time\n\t// byExpiryIndex is the index of this node in memLS.byExpiry. It is -1\n\t// if this node does not expire, or has expired.\n\tbyExpiryIndex int\n\t// held is whether this node's lock is actively held by a Confirm call.\n\theld bool\n}\n\ntype byExpiry []*memLSNode\n\nfunc (b *byExpiry) Len() int {\n\treturn len(*b)\n}\n\nfunc (b *byExpiry) Less(i, j int) bool {\n\treturn (*b)[i].expiry.Before((*b)[j].expiry)\n}\n\nfunc (b *byExpiry) Swap(i, j int) {\n\t(*b)[i], (*b)[j] = (*b)[j], (*b)[i]\n\t(*b)[i].byExpiryIndex = i\n\t(*b)[j].byExpiryIndex = j\n}\n\nfunc (b *byExpiry) Push(x interface{}) {\n\tn := x.(*memLSNode)\n\tn.byExpiryIndex = len(*b)\n\t*b = append(*b, n)\n}\n\nfunc (b *byExpiry) Pop() interface{} {\n\ti := len(*b) - 1\n\tn := (*b)[i]\n\t(*b)[i] = nil\n\tn.byExpiryIndex = -1\n\t*b = (*b)[:i]\n\treturn n\n}\n\nconst infiniteTimeout = -1\n\n// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is\n// empty, an infiniteTimeout is returned.\nfunc parseTimeout(s string) (time.Duration, error) {\n\tif s == \"\" {\n\t\treturn infiniteTimeout, nil\n\t}\n\tif i := strings.IndexByte(s, ','); i >= 0 {\n\t\ts = s[:i]\n\t}\n\ts = strings.TrimSpace(s)\n\tif s == \"Infinite\" {\n\t\treturn infiniteTimeout, nil\n\t}\n\tconst pre = \"Second-\"\n\tif !strings.HasPrefix(s, pre) {\n\t\treturn 0, errInvalidTimeout\n\t}\n\ts = s[len(pre):]\n\tif s == \"\" || s[0] < '0' || '9' < s[0] {\n\t\treturn 0, errInvalidTimeout\n\t}\n\tn, err := strconv.ParseInt(s, 10, 64)\n\tif err != nil || 1<<32-1 < n {\n\t\treturn 0, errInvalidTimeout\n\t}\n\treturn time.Duration(n) * time.Second, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/lock_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage webdav\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"path\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestWalkToRoot(t *testing.T) {\n\ttestCases := []struct {\n\t\tname string\n\t\twant []string\n\t}{{\n\t\t\"/a/b/c/d\",\n\t\t[]string{\n\t\t\t\"/a/b/c/d\",\n\t\t\t\"/a/b/c\",\n\t\t\t\"/a/b\",\n\t\t\t\"/a\",\n\t\t\t\"/\",\n\t\t},\n\t}, {\n\t\t\"/a\",\n\t\t[]string{\n\t\t\t\"/a\",\n\t\t\t\"/\",\n\t\t},\n\t}, {\n\t\t\"/\",\n\t\t[]string{\n\t\t\t\"/\",\n\t\t},\n\t}}\n\n\tfor _, tc := range testCases {\n\t\tvar got []string\n\t\tif !walkToRoot(tc.name, func(name0 string, first bool) bool {\n\t\t\tif first != (len(got) == 0) {\n\t\t\t\tt.Errorf(\"name=%q: first=%t but len(got)==%d\", tc.name, first, len(got))\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tgot = append(got, name0)\n\t\t\treturn true\n\t\t}) {\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(got, tc.want) {\n\t\t\tt.Errorf(\"name=%q:\\ngot  %q\\nwant %q\", tc.name, got, tc.want)\n\t\t}\n\t}\n}\n\nvar lockTestDurations = []time.Duration{\n\tinfiniteTimeout, // infiniteTimeout means to never expire.\n\t0,               // A zero duration means to expire immediately.\n\t100 * time.Hour, // A very large duration will not expire in these tests.\n}\n\n// lockTestNames are the names of a set of mutually compatible locks. For each\n// name fragment:\n//\t- _ means no explicit lock.\n//\t- i means an infinite-depth lock,\n//\t- z means a zero-depth lock,\nvar lockTestNames = []string{\n\t\"/_/_/_/_/z\",\n\t\"/_/_/i\",\n\t\"/_/z\",\n\t\"/_/z/i\",\n\t\"/_/z/z\",\n\t\"/_/z/_/i\",\n\t\"/_/z/_/z\",\n\t\"/i\",\n\t\"/z\",\n\t\"/z/_/i\",\n\t\"/z/_/z\",\n}\n\nfunc lockTestZeroDepth(name string) bool {\n\tswitch name[len(name)-1] {\n\tcase 'i':\n\t\treturn false\n\tcase 'z':\n\t\treturn true\n\t}\n\tpanic(fmt.Sprintf(\"lock name %q did not end with 'i' or 'z'\", name))\n}\n\nfunc TestMemLSCanCreate(t *testing.T) {\n\tnow := time.Unix(0, 0)\n\tm := NewMemLS().(*memLS)\n\n\tfor _, name := range lockTestNames {\n\t\t_, err := m.Create(now, LockDetails{\n\t\t\tRoot:      name,\n\t\t\tDuration:  infiniteTimeout,\n\t\t\tZeroDepth: lockTestZeroDepth(name),\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"creating lock for %q: %v\", name, err)\n\t\t}\n\t}\n\n\twantCanCreate := func(name string, zeroDepth bool) bool {\n\t\tfor _, n := range lockTestNames {\n\t\t\tswitch {\n\t\t\tcase n == name:\n\t\t\t\t// An existing lock has the same name as the proposed lock.\n\t\t\t\treturn false\n\t\t\tcase strings.HasPrefix(n, name):\n\t\t\t\t// An existing lock would be a child of the proposed lock,\n\t\t\t\t// which conflicts if the proposed lock has infinite depth.\n\t\t\t\tif !zeroDepth {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\tcase strings.HasPrefix(name, n):\n\t\t\t\t// An existing lock would be an ancestor of the proposed lock,\n\t\t\t\t// which conflicts if the ancestor has infinite depth.\n\t\t\t\tif n[len(n)-1] == 'i' {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\tvar check func(int, string)\n\tcheck = func(recursion int, name string) {\n\t\tfor _, zeroDepth := range []bool{false, true} {\n\t\t\tgot := m.canCreate(name, zeroDepth)\n\t\t\twant := wantCanCreate(name, zeroDepth)\n\t\t\tif got != want {\n\t\t\t\tt.Errorf(\"canCreate name=%q zeroDepth=%t: got %t, want %t\", name, zeroDepth, got, want)\n\t\t\t}\n\t\t}\n\t\tif recursion == 6 {\n\t\t\treturn\n\t\t}\n\t\tif name != \"/\" {\n\t\t\tname += \"/\"\n\t\t}\n\t\tfor _, c := range \"_iz\" {\n\t\t\tcheck(recursion+1, name+string(c))\n\t\t}\n\t}\n\tcheck(0, \"/\")\n}\n\nfunc TestMemLSLookup(t *testing.T) {\n\tnow := time.Unix(0, 0)\n\tm := NewMemLS().(*memLS)\n\n\tbadToken := m.nextToken()\n\tt.Logf(\"badToken=%q\", badToken)\n\n\tfor _, name := range lockTestNames {\n\t\ttoken, err := m.Create(now, LockDetails{\n\t\t\tRoot:      name,\n\t\t\tDuration:  infiniteTimeout,\n\t\t\tZeroDepth: lockTestZeroDepth(name),\n\t\t})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"creating lock for %q: %v\", name, err)\n\t\t}\n\t\tt.Logf(\"%-15q -> node=%p token=%q\", name, m.byName[name], token)\n\t}\n\n\tbaseNames := append([]string{\"/a\", \"/b/c\"}, lockTestNames...)\n\tfor _, baseName := range baseNames {\n\t\tfor _, suffix := range []string{\"\", \"/0\", \"/1/2/3\"} {\n\t\t\tname := baseName + suffix\n\n\t\t\tgoodToken := \"\"\n\t\t\tbase := m.byName[baseName]\n\t\t\tif base != nil && (suffix == \"\" || !lockTestZeroDepth(baseName)) {\n\t\t\t\tgoodToken = base.token\n\t\t\t}\n\n\t\t\tfor _, token := range []string{badToken, goodToken} {\n\t\t\t\tif token == \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tgot := m.lookup(name, Condition{Token: token})\n\t\t\t\twant := base\n\t\t\t\tif token == badToken {\n\t\t\t\t\twant = nil\n\t\t\t\t}\n\t\t\t\tif got != want {\n\t\t\t\t\tt.Errorf(\"name=%-20qtoken=%q (bad=%t): got %p, want %p\",\n\t\t\t\t\t\tname, token, token == badToken, got, want)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestMemLSConfirm(t *testing.T) {\n\tnow := time.Unix(0, 0)\n\tm := NewMemLS().(*memLS)\n\talice, err := m.Create(now, LockDetails{\n\t\tRoot:      \"/alice\",\n\t\tDuration:  infiniteTimeout,\n\t\tZeroDepth: false,\n\t})\n\ttweedle, err := m.Create(now, LockDetails{\n\t\tRoot:      \"/tweedle\",\n\t\tDuration:  infiniteTimeout,\n\t\tZeroDepth: false,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Create: %v\", err)\n\t}\n\tif err := m.consistent(); err != nil {\n\t\tt.Fatalf(\"Create: inconsistent state: %v\", err)\n\t}\n\n\t// Test a mismatch between name and condition.\n\t_, err = m.Confirm(now, \"/tweedle/dee\", \"\", Condition{Token: alice})\n\tif err != ErrConfirmationFailed {\n\t\tt.Fatalf(\"Confirm (mismatch): got %v, want ErrConfirmationFailed\", err)\n\t}\n\tif err := m.consistent(); err != nil {\n\t\tt.Fatalf(\"Confirm (mismatch): inconsistent state: %v\", err)\n\t}\n\n\t// Test two names (that fall under the same lock) in the one Confirm call.\n\trelease, err := m.Confirm(now, \"/tweedle/dee\", \"/tweedle/dum\", Condition{Token: tweedle})\n\tif err != nil {\n\t\tt.Fatalf(\"Confirm (twins): %v\", err)\n\t}\n\tif err := m.consistent(); err != nil {\n\t\tt.Fatalf(\"Confirm (twins): inconsistent state: %v\", err)\n\t}\n\trelease()\n\tif err := m.consistent(); err != nil {\n\t\tt.Fatalf(\"release (twins): inconsistent state: %v\", err)\n\t}\n\n\t// Test the same two names in overlapping Confirm / release calls.\n\treleaseDee, err := m.Confirm(now, \"/tweedle/dee\", \"\", Condition{Token: tweedle})\n\tif err != nil {\n\t\tt.Fatalf(\"Confirm (sequence #0): %v\", err)\n\t}\n\tif err := m.consistent(); err != nil {\n\t\tt.Fatalf(\"Confirm (sequence #0): inconsistent state: %v\", err)\n\t}\n\n\t_, err = m.Confirm(now, \"/tweedle/dum\", \"\", Condition{Token: tweedle})\n\tif err != ErrConfirmationFailed {\n\t\tt.Fatalf(\"Confirm (sequence #1): got %v, want ErrConfirmationFailed\", err)\n\t}\n\tif err := m.consistent(); err != nil {\n\t\tt.Fatalf(\"Confirm (sequence #1): inconsistent state: %v\", err)\n\t}\n\n\treleaseDee()\n\tif err := m.consistent(); err != nil {\n\t\tt.Fatalf(\"release (sequence #2): inconsistent state: %v\", err)\n\t}\n\n\treleaseDum, err := m.Confirm(now, \"/tweedle/dum\", \"\", Condition{Token: tweedle})\n\tif err != nil {\n\t\tt.Fatalf(\"Confirm (sequence #3): %v\", err)\n\t}\n\tif err := m.consistent(); err != nil {\n\t\tt.Fatalf(\"Confirm (sequence #3): inconsistent state: %v\", err)\n\t}\n\n\t// Test that you can't unlock a held lock.\n\terr = m.Unlock(now, tweedle)\n\tif err != ErrLocked {\n\t\tt.Fatalf(\"Unlock (sequence #4): got %v, want ErrLocked\", err)\n\t}\n\n\treleaseDum()\n\tif err := m.consistent(); err != nil {\n\t\tt.Fatalf(\"release (sequence #5): inconsistent state: %v\", err)\n\t}\n\n\terr = m.Unlock(now, tweedle)\n\tif err != nil {\n\t\tt.Fatalf(\"Unlock (sequence #6): %v\", err)\n\t}\n\tif err := m.consistent(); err != nil {\n\t\tt.Fatalf(\"Unlock (sequence #6): inconsistent state: %v\", err)\n\t}\n}\n\nfunc TestMemLSNonCanonicalRoot(t *testing.T) {\n\tnow := time.Unix(0, 0)\n\tm := NewMemLS().(*memLS)\n\ttoken, err := m.Create(now, LockDetails{\n\t\tRoot:     \"/foo/./bar//\",\n\t\tDuration: 1 * time.Second,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Create: %v\", err)\n\t}\n\tif err := m.consistent(); err != nil {\n\t\tt.Fatalf(\"Create: inconsistent state: %v\", err)\n\t}\n\tif err := m.Unlock(now, token); err != nil {\n\t\tt.Fatalf(\"Unlock: %v\", err)\n\t}\n\tif err := m.consistent(); err != nil {\n\t\tt.Fatalf(\"Unlock: inconsistent state: %v\", err)\n\t}\n}\n\nfunc TestMemLSExpiry(t *testing.T) {\n\tm := NewMemLS().(*memLS)\n\ttestCases := []string{\n\t\t\"setNow 0\",\n\t\t\"create /a.5\",\n\t\t\"want /a.5\",\n\t\t\"create /c.6\",\n\t\t\"want /a.5 /c.6\",\n\t\t\"create /a/b.7\",\n\t\t\"want /a.5 /a/b.7 /c.6\",\n\t\t\"setNow 4\",\n\t\t\"want /a.5 /a/b.7 /c.6\",\n\t\t\"setNow 5\",\n\t\t\"want /a/b.7 /c.6\",\n\t\t\"setNow 6\",\n\t\t\"want /a/b.7\",\n\t\t\"setNow 7\",\n\t\t\"want \",\n\t\t\"setNow 8\",\n\t\t\"want \",\n\t\t\"create /a.12\",\n\t\t\"create /b.13\",\n\t\t\"create /c.15\",\n\t\t\"create /a/d.16\",\n\t\t\"want /a.12 /a/d.16 /b.13 /c.15\",\n\t\t\"refresh /a.14\",\n\t\t\"want /a.14 /a/d.16 /b.13 /c.15\",\n\t\t\"setNow 12\",\n\t\t\"want /a.14 /a/d.16 /b.13 /c.15\",\n\t\t\"setNow 13\",\n\t\t\"want /a.14 /a/d.16 /c.15\",\n\t\t\"setNow 14\",\n\t\t\"want /a/d.16 /c.15\",\n\t\t\"refresh /a/d.20\",\n\t\t\"refresh /c.20\",\n\t\t\"want /a/d.20 /c.20\",\n\t\t\"setNow 20\",\n\t\t\"want \",\n\t}\n\n\ttokens := map[string]string{}\n\tzTime := time.Unix(0, 0)\n\tnow := zTime\n\tfor i, tc := range testCases {\n\t\tj := strings.IndexByte(tc, ' ')\n\t\tif j < 0 {\n\t\t\tt.Fatalf(\"test case #%d %q: invalid command\", i, tc)\n\t\t}\n\t\top, arg := tc[:j], tc[j+1:]\n\t\tswitch op {\n\t\tdefault:\n\t\t\tt.Fatalf(\"test case #%d %q: invalid operation %q\", i, tc, op)\n\n\t\tcase \"create\", \"refresh\":\n\t\t\tparts := strings.Split(arg, \".\")\n\t\t\tif len(parts) != 2 {\n\t\t\t\tt.Fatalf(\"test case #%d %q: invalid create\", i, tc)\n\t\t\t}\n\t\t\troot := parts[0]\n\t\t\td, err := strconv.Atoi(parts[1])\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"test case #%d %q: invalid duration\", i, tc)\n\t\t\t}\n\t\t\tdur := time.Unix(0, 0).Add(time.Duration(d) * time.Second).Sub(now)\n\n\t\t\tswitch op {\n\t\t\tcase \"create\":\n\t\t\t\ttoken, err := m.Create(now, LockDetails{\n\t\t\t\t\tRoot:      root,\n\t\t\t\t\tDuration:  dur,\n\t\t\t\t\tZeroDepth: true,\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"test case #%d %q: Create: %v\", i, tc, err)\n\t\t\t\t}\n\t\t\t\ttokens[root] = token\n\n\t\t\tcase \"refresh\":\n\t\t\t\ttoken := tokens[root]\n\t\t\t\tif token == \"\" {\n\t\t\t\t\tt.Fatalf(\"test case #%d %q: no token for %q\", i, tc, root)\n\t\t\t\t}\n\t\t\t\tgot, err := m.Refresh(now, token, dur)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"test case #%d %q: Refresh: %v\", i, tc, err)\n\t\t\t\t}\n\t\t\t\twant := LockDetails{\n\t\t\t\t\tRoot:      root,\n\t\t\t\t\tDuration:  dur,\n\t\t\t\t\tZeroDepth: true,\n\t\t\t\t}\n\t\t\t\tif got != want {\n\t\t\t\t\tt.Fatalf(\"test case #%d %q:\\ngot  %v\\nwant %v\", i, tc, got, want)\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"setNow\":\n\t\t\td, err := strconv.Atoi(arg)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"test case #%d %q: invalid duration\", i, tc)\n\t\t\t}\n\t\t\tnow = time.Unix(0, 0).Add(time.Duration(d) * time.Second)\n\n\t\tcase \"want\":\n\t\t\tm.mu.Lock()\n\t\t\tm.collectExpiredNodes(now)\n\t\t\tgot := make([]string, 0, len(m.byToken))\n\t\t\tfor _, n := range m.byToken {\n\t\t\t\tgot = append(got, fmt.Sprintf(\"%s.%d\",\n\t\t\t\t\tn.details.Root, n.expiry.Sub(zTime)/time.Second))\n\t\t\t}\n\t\t\tm.mu.Unlock()\n\t\t\tsort.Strings(got)\n\t\t\twant := []string{}\n\t\t\tif arg != \"\" {\n\t\t\t\twant = strings.Split(arg, \" \")\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, want) {\n\t\t\t\tt.Fatalf(\"test case #%d %q:\\ngot  %q\\nwant %q\", i, tc, got, want)\n\t\t\t}\n\t\t}\n\n\t\tif err := m.consistent(); err != nil {\n\t\t\tt.Fatalf(\"test case #%d %q: inconsistent state: %v\", i, tc, err)\n\t\t}\n\t}\n}\n\nfunc TestMemLS(t *testing.T) {\n\tnow := time.Unix(0, 0)\n\tm := NewMemLS().(*memLS)\n\trng := rand.New(rand.NewSource(0))\n\ttokens := map[string]string{}\n\tnConfirm, nCreate, nRefresh, nUnlock := 0, 0, 0, 0\n\tconst N = 2000\n\n\tfor i := 0; i < N; i++ {\n\t\tname := lockTestNames[rng.Intn(len(lockTestNames))]\n\t\tduration := lockTestDurations[rng.Intn(len(lockTestDurations))]\n\t\tconfirmed, unlocked := false, false\n\n\t\t// If the name was already locked, we randomly confirm/release, refresh\n\t\t// or unlock it. Otherwise, we create a lock.\n\t\ttoken := tokens[name]\n\t\tif token != \"\" {\n\t\t\tswitch rng.Intn(3) {\n\t\t\tcase 0:\n\t\t\t\tconfirmed = true\n\t\t\t\tnConfirm++\n\t\t\t\trelease, err := m.Confirm(now, name, \"\", Condition{Token: token})\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"iteration #%d: Confirm %q: %v\", i, name, err)\n\t\t\t\t}\n\t\t\t\tif err := m.consistent(); err != nil {\n\t\t\t\t\tt.Fatalf(\"iteration #%d: inconsistent state: %v\", i, err)\n\t\t\t\t}\n\t\t\t\trelease()\n\n\t\t\tcase 1:\n\t\t\t\tnRefresh++\n\t\t\t\tif _, err := m.Refresh(now, token, duration); err != nil {\n\t\t\t\t\tt.Fatalf(\"iteration #%d: Refresh %q: %v\", i, name, err)\n\t\t\t\t}\n\n\t\t\tcase 2:\n\t\t\t\tunlocked = true\n\t\t\t\tnUnlock++\n\t\t\t\tif err := m.Unlock(now, token); err != nil {\n\t\t\t\t\tt.Fatalf(\"iteration #%d: Unlock %q: %v\", i, name, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t} else {\n\t\t\tnCreate++\n\t\t\tvar err error\n\t\t\ttoken, err = m.Create(now, LockDetails{\n\t\t\t\tRoot:      name,\n\t\t\t\tDuration:  duration,\n\t\t\t\tZeroDepth: lockTestZeroDepth(name),\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"iteration #%d: Create %q: %v\", i, name, err)\n\t\t\t}\n\t\t}\n\n\t\tif !confirmed {\n\t\t\tif duration == 0 || unlocked {\n\t\t\t\t// A zero-duration lock should expire immediately and is\n\t\t\t\t// effectively equivalent to being unlocked.\n\t\t\t\ttokens[name] = \"\"\n\t\t\t} else {\n\t\t\t\ttokens[name] = token\n\t\t\t}\n\t\t}\n\n\t\tif err := m.consistent(); err != nil {\n\t\t\tt.Fatalf(\"iteration #%d: inconsistent state: %v\", i, err)\n\t\t}\n\t}\n\n\tif nConfirm < N/10 {\n\t\tt.Fatalf(\"too few Confirm calls: got %d, want >= %d\", nConfirm, N/10)\n\t}\n\tif nCreate < N/10 {\n\t\tt.Fatalf(\"too few Create calls: got %d, want >= %d\", nCreate, N/10)\n\t}\n\tif nRefresh < N/10 {\n\t\tt.Fatalf(\"too few Refresh calls: got %d, want >= %d\", nRefresh, N/10)\n\t}\n\tif nUnlock < N/10 {\n\t\tt.Fatalf(\"too few Unlock calls: got %d, want >= %d\", nUnlock, N/10)\n\t}\n}\n\nfunc (m *memLS) consistent() error {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\t// If m.byName is non-empty, then it must contain an entry for the root \"/\",\n\t// and its refCount should equal the number of locked nodes.\n\tif len(m.byName) > 0 {\n\t\tn := m.byName[\"/\"]\n\t\tif n == nil {\n\t\t\treturn fmt.Errorf(`non-empty m.byName does not contain the root \"/\"`)\n\t\t}\n\t\tif n.refCount != len(m.byToken) {\n\t\t\treturn fmt.Errorf(\"root node refCount=%d, differs from len(m.byToken)=%d\", n.refCount, len(m.byToken))\n\t\t}\n\t}\n\n\tfor name, n := range m.byName {\n\t\t// The map keys should be consistent with the node's copy of the key.\n\t\tif n.details.Root != name {\n\t\t\treturn fmt.Errorf(\"node name %q != byName map key %q\", n.details.Root, name)\n\t\t}\n\n\t\t// A name must be clean, and start with a \"/\".\n\t\tif len(name) == 0 || name[0] != '/' {\n\t\t\treturn fmt.Errorf(`node name %q does not start with \"/\"`, name)\n\t\t}\n\t\tif name != path.Clean(name) {\n\t\t\treturn fmt.Errorf(`node name %q is not clean`, name)\n\t\t}\n\n\t\t// A node's refCount should be positive.\n\t\tif n.refCount <= 0 {\n\t\t\treturn fmt.Errorf(\"non-positive refCount for node at name %q\", name)\n\t\t}\n\n\t\t// A node's refCount should be the number of self-or-descendents that\n\t\t// are locked (i.e. have a non-empty token).\n\t\tvar list []string\n\t\tfor name0, n0 := range m.byName {\n\t\t\t// All of lockTestNames' name fragments are one byte long: '_', 'i' or 'z',\n\t\t\t// so strings.HasPrefix is equivalent to self-or-descendent name match.\n\t\t\t// We don't have to worry about \"/foo/bar\" being a false positive match\n\t\t\t// for \"/foo/b\".\n\t\t\tif strings.HasPrefix(name0, name) && n0.token != \"\" {\n\t\t\t\tlist = append(list, name0)\n\t\t\t}\n\t\t}\n\t\tif n.refCount != len(list) {\n\t\t\tsort.Strings(list)\n\t\t\treturn fmt.Errorf(\"node at name %q has refCount %d but locked self-or-descendents are %q (len=%d)\",\n\t\t\t\tname, n.refCount, list, len(list))\n\t\t}\n\n\t\t// A node n is in m.byToken if it has a non-empty token.\n\t\tif n.token != \"\" {\n\t\t\tif _, ok := m.byToken[n.token]; !ok {\n\t\t\t\treturn fmt.Errorf(\"node at name %q has token %q but not in m.byToken\", name, n.token)\n\t\t\t}\n\t\t}\n\n\t\t// A node n is in m.byExpiry if it has a non-negative byExpiryIndex.\n\t\tif n.byExpiryIndex >= 0 {\n\t\t\tif n.byExpiryIndex >= len(m.byExpiry) {\n\t\t\t\treturn fmt.Errorf(\"node at name %q has byExpiryIndex %d but m.byExpiry has length %d\", name, n.byExpiryIndex, len(m.byExpiry))\n\t\t\t}\n\t\t\tif n != m.byExpiry[n.byExpiryIndex] {\n\t\t\t\treturn fmt.Errorf(\"node at name %q has byExpiryIndex %d but that indexes a different node\", name, n.byExpiryIndex)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor token, n := range m.byToken {\n\t\t// The map keys should be consistent with the node's copy of the key.\n\t\tif n.token != token {\n\t\t\treturn fmt.Errorf(\"node token %q != byToken map key %q\", n.token, token)\n\t\t}\n\n\t\t// Every node in m.byToken is in m.byName.\n\t\tif _, ok := m.byName[n.details.Root]; !ok {\n\t\t\treturn fmt.Errorf(\"node at name %q in m.byToken but not in m.byName\", n.details.Root)\n\t\t}\n\t}\n\n\tfor i, n := range m.byExpiry {\n\t\t// The slice indices should be consistent with the node's copy of the index.\n\t\tif n.byExpiryIndex != i {\n\t\t\treturn fmt.Errorf(\"node byExpiryIndex %d != byExpiry slice index %d\", n.byExpiryIndex, i)\n\t\t}\n\n\t\t// Every node in m.byExpiry is in m.byName.\n\t\tif _, ok := m.byName[n.details.Root]; !ok {\n\t\t\treturn fmt.Errorf(\"node at name %q in m.byExpiry but not in m.byName\", n.details.Root)\n\t\t}\n\n\t\t// No node in m.byExpiry should be held.\n\t\tif n.held {\n\t\t\treturn fmt.Errorf(\"node at name %q in m.byExpiry is held\", n.details.Root)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc TestParseTimeout(t *testing.T) {\n\ttestCases := []struct {\n\t\ts       string\n\t\twant    time.Duration\n\t\twantErr error\n\t}{{\n\t\t\"\",\n\t\tinfiniteTimeout,\n\t\tnil,\n\t}, {\n\t\t\"Infinite\",\n\t\tinfiniteTimeout,\n\t\tnil,\n\t}, {\n\t\t\"Infinitesimal\",\n\t\t0,\n\t\terrInvalidTimeout,\n\t}, {\n\t\t\"infinite\",\n\t\t0,\n\t\terrInvalidTimeout,\n\t}, {\n\t\t\"Second-0\",\n\t\t0 * time.Second,\n\t\tnil,\n\t}, {\n\t\t\"Second-123\",\n\t\t123 * time.Second,\n\t\tnil,\n\t}, {\n\t\t\"  Second-456    \",\n\t\t456 * time.Second,\n\t\tnil,\n\t}, {\n\t\t\"Second-4100000000\",\n\t\t4100000000 * time.Second,\n\t\tnil,\n\t}, {\n\t\t\"junk\",\n\t\t0,\n\t\terrInvalidTimeout,\n\t}, {\n\t\t\"Second-\",\n\t\t0,\n\t\terrInvalidTimeout,\n\t}, {\n\t\t\"Second--1\",\n\t\t0,\n\t\terrInvalidTimeout,\n\t}, {\n\t\t\"Second--123\",\n\t\t0,\n\t\terrInvalidTimeout,\n\t}, {\n\t\t\"Second-+123\",\n\t\t0,\n\t\terrInvalidTimeout,\n\t}, {\n\t\t\"Second-0x123\",\n\t\t0,\n\t\terrInvalidTimeout,\n\t}, {\n\t\t\"second-123\",\n\t\t0,\n\t\terrInvalidTimeout,\n\t}, {\n\t\t\"Second-4294967295\",\n\t\t4294967295 * time.Second,\n\t\tnil,\n\t}, {\n\t\t// Section 10.7 says that \"The timeout value for TimeType \"Second\"\n\t\t// must not be greater than 2^32-1.\"\n\t\t\"Second-4294967296\",\n\t\t0,\n\t\terrInvalidTimeout,\n\t}, {\n\t\t// This test case comes from section 9.10.9 of the spec. It says,\n\t\t//\n\t\t// \"In this request, the client has specified that it desires an\n\t\t// infinite-length lock, if available, otherwise a timeout of 4.1\n\t\t// billion seconds, if available.\"\n\t\t//\n\t\t// The Go WebDAV package always supports infinite length locks,\n\t\t// and ignores the fallback after the comma.\n\t\t\"Infinite, Second-4100000000\",\n\t\tinfiniteTimeout,\n\t\tnil,\n\t}}\n\n\tfor _, tc := range testCases {\n\t\tgot, gotErr := parseTimeout(tc.s)\n\t\tif got != tc.want || gotErr != tc.wantErr {\n\t\t\tt.Errorf(\"parsing %q:\\ngot  %v, %v\\nwant %v, %v\", tc.s, got, gotErr, tc.want, tc.wantErr)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/prop.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage webdav\n\nimport (\n\t\"bytes\"\n\t\"encoding/xml\"\n\t\"fmt\"\n\t\"io\"\n\t\"mime\"\n\t\"net/http\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strconv\"\n\n\t\"golang.org/x/net/context\"\n)\n\n// Proppatch describes a property update instruction as defined in RFC 4918.\n// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH\ntype Proppatch struct {\n\t// Remove specifies whether this patch removes properties. If it does not\n\t// remove them, it sets them.\n\tRemove bool\n\t// Props contains the properties to be set or removed.\n\tProps []Property\n}\n\n// Propstat describes a XML propstat element as defined in RFC 4918.\n// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat\ntype Propstat struct {\n\t// Props contains the properties for which Status applies.\n\tProps []Property\n\n\t// Status defines the HTTP status code of the properties in Prop.\n\t// Allowed values include, but are not limited to the WebDAV status\n\t// code extensions for HTTP/1.1.\n\t// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11\n\tStatus int\n\n\t// XMLError contains the XML representation of the optional error element.\n\t// XML content within this field must not rely on any predefined\n\t// namespace declarations or prefixes. If empty, the XML error element\n\t// is omitted.\n\tXMLError string\n\n\t// ResponseDescription contains the contents of the optional\n\t// responsedescription field. If empty, the XML element is omitted.\n\tResponseDescription string\n}\n\n// makePropstats returns a slice containing those of x and y whose Props slice\n// is non-empty. If both are empty, it returns a slice containing an otherwise\n// zero Propstat whose HTTP status code is 200 OK.\nfunc makePropstats(x, y Propstat) []Propstat {\n\tpstats := make([]Propstat, 0, 2)\n\tif len(x.Props) != 0 {\n\t\tpstats = append(pstats, x)\n\t}\n\tif len(y.Props) != 0 {\n\t\tpstats = append(pstats, y)\n\t}\n\tif len(pstats) == 0 {\n\t\tpstats = append(pstats, Propstat{\n\t\t\tStatus: http.StatusOK,\n\t\t})\n\t}\n\treturn pstats\n}\n\n// DeadPropsHolder holds the dead properties of a resource.\n//\n// Dead properties are those properties that are explicitly defined. In\n// comparison, live properties, such as DAV:getcontentlength, are implicitly\n// defined by the underlying resource, and cannot be explicitly overridden or\n// removed. See the Terminology section of\n// http://www.webdav.org/specs/rfc4918.html#rfc.section.3\n//\n// There is a whitelist of the names of live properties. This package handles\n// all live properties, and will only pass non-whitelisted names to the Patch\n// method of DeadPropsHolder implementations.\ntype DeadPropsHolder interface {\n\t// DeadProps returns a copy of the dead properties held.\n\tDeadProps() (map[xml.Name]Property, error)\n\n\t// Patch patches the dead properties held.\n\t//\n\t// Patching is atomic; either all or no patches succeed. It returns (nil,\n\t// non-nil) if an internal server error occurred, otherwise the Propstats\n\t// collectively contain one Property for each proposed patch Property. If\n\t// all patches succeed, Patch returns a slice of length one and a Propstat\n\t// element with a 200 OK HTTP status code. If none succeed, for reasons\n\t// other than an internal server error, no Propstat has status 200 OK.\n\t//\n\t// For more details on when various HTTP status codes apply, see\n\t// http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status\n\tPatch([]Proppatch) ([]Propstat, error)\n}\n\n// liveProps contains all supported, protected DAV: properties.\nvar liveProps = map[xml.Name]struct {\n\t// findFn implements the propfind function of this property. If nil,\n\t// it indicates a hidden property.\n\tfindFn func(context.Context, FileSystem, LockSystem, string, os.FileInfo) (string, error)\n\t// dir is true if the property applies to directories.\n\tdir bool\n}{\n\t{Space: \"DAV:\", Local: \"resourcetype\"}: {\n\t\tfindFn: findResourceType,\n\t\tdir:    true,\n\t},\n\t{Space: \"DAV:\", Local: \"displayname\"}: {\n\t\tfindFn: findDisplayName,\n\t\tdir:    true,\n\t},\n\t{Space: \"DAV:\", Local: \"getcontentlength\"}: {\n\t\tfindFn: findContentLength,\n\t\tdir:    false,\n\t},\n\t{Space: \"DAV:\", Local: \"getlastmodified\"}: {\n\t\tfindFn: findLastModified,\n\t\t// http://webdav.org/specs/rfc4918.html#PROPERTY_getlastmodified\n\t\t// suggests that getlastmodified should only apply to GETable\n\t\t// resources, and this package does not support GET on directories.\n\t\t//\n\t\t// Nonetheless, some WebDAV clients expect child directories to be\n\t\t// sortable by getlastmodified date, so this value is true, not false.\n\t\t// See golang.org/issue/15334.\n\t\tdir: true,\n\t},\n\t{Space: \"DAV:\", Local: \"creationdate\"}: {\n\t\tfindFn: nil,\n\t\tdir:    false,\n\t},\n\t{Space: \"DAV:\", Local: \"getcontentlanguage\"}: {\n\t\tfindFn: nil,\n\t\tdir:    false,\n\t},\n\t{Space: \"DAV:\", Local: \"getcontenttype\"}: {\n\t\tfindFn: findContentType,\n\t\tdir:    false,\n\t},\n\t{Space: \"DAV:\", Local: \"getetag\"}: {\n\t\tfindFn: findETag,\n\t\t// findETag implements ETag as the concatenated hex values of a file's\n\t\t// modification time and size. This is not a reliable synchronization\n\t\t// mechanism for directories, so we do not advertise getetag for DAV\n\t\t// collections.\n\t\tdir: false,\n\t},\n\n\t// TODO: The lockdiscovery property requires LockSystem to list the\n\t// active locks on a resource.\n\t{Space: \"DAV:\", Local: \"lockdiscovery\"}: {},\n\t{Space: \"DAV:\", Local: \"supportedlock\"}: {\n\t\tfindFn: findSupportedLock,\n\t\tdir:    true,\n\t},\n}\n\n// TODO(nigeltao) merge props and allprop?\n\n// Props returns the status of the properties named pnames for resource name.\n//\n// Each Propstat has a unique status and each property name will only be part\n// of one Propstat element.\nfunc props(ctx context.Context, fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) {\n\tf, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tisDir := fi.IsDir()\n\n\tvar deadProps map[xml.Name]Property\n\tif dph, ok := f.(DeadPropsHolder); ok {\n\t\tdeadProps, err = dph.DeadProps()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tpstatOK := Propstat{Status: http.StatusOK}\n\tpstatNotFound := Propstat{Status: http.StatusNotFound}\n\tfor _, pn := range pnames {\n\t\t// If this file has dead properties, check if they contain pn.\n\t\tif dp, ok := deadProps[pn]; ok {\n\t\t\tpstatOK.Props = append(pstatOK.Props, dp)\n\t\t\tcontinue\n\t\t}\n\t\t// Otherwise, it must either be a live property or we don't know it.\n\t\tif prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) {\n\t\t\tinnerXML, err := prop.findFn(ctx, fs, ls, name, fi)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tpstatOK.Props = append(pstatOK.Props, Property{\n\t\t\t\tXMLName:  pn,\n\t\t\t\tInnerXML: []byte(innerXML),\n\t\t\t})\n\t\t} else {\n\t\t\tpstatNotFound.Props = append(pstatNotFound.Props, Property{\n\t\t\t\tXMLName: pn,\n\t\t\t})\n\t\t}\n\t}\n\treturn makePropstats(pstatOK, pstatNotFound), nil\n}\n\n// Propnames returns the property names defined for resource name.\nfunc propnames(ctx context.Context, fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) {\n\tf, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tisDir := fi.IsDir()\n\n\tvar deadProps map[xml.Name]Property\n\tif dph, ok := f.(DeadPropsHolder); ok {\n\t\tdeadProps, err = dph.DeadProps()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tpnames := make([]xml.Name, 0, len(liveProps)+len(deadProps))\n\tfor pn, prop := range liveProps {\n\t\tif prop.findFn != nil && (prop.dir || !isDir) {\n\t\t\tpnames = append(pnames, pn)\n\t\t}\n\t}\n\tfor pn := range deadProps {\n\t\tpnames = append(pnames, pn)\n\t}\n\treturn pnames, nil\n}\n\n// Allprop returns the properties defined for resource name and the properties\n// named in include.\n//\n// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined\n// within the RFC plus dead properties. Other live properties should only be\n// returned if they are named in 'include'.\n//\n// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND\nfunc allprop(ctx context.Context, fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) {\n\tpnames, err := propnames(ctx, fs, ls, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Add names from include if they are not already covered in pnames.\n\tnameset := make(map[xml.Name]bool)\n\tfor _, pn := range pnames {\n\t\tnameset[pn] = true\n\t}\n\tfor _, pn := range include {\n\t\tif !nameset[pn] {\n\t\t\tpnames = append(pnames, pn)\n\t\t}\n\t}\n\treturn props(ctx, fs, ls, name, pnames)\n}\n\n// Patch patches the properties of resource name. The return values are\n// constrained in the same manner as DeadPropsHolder.Patch.\nfunc patch(ctx context.Context, fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) {\n\tconflict := false\nloop:\n\tfor _, patch := range patches {\n\t\tfor _, p := range patch.Props {\n\t\t\tif _, ok := liveProps[p.XMLName]; ok {\n\t\t\t\tconflict = true\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t}\n\t}\n\tif conflict {\n\t\tpstatForbidden := Propstat{\n\t\t\tStatus:   http.StatusForbidden,\n\t\t\tXMLError: `<D:cannot-modify-protected-property xmlns:D=\"DAV:\"/>`,\n\t\t}\n\t\tpstatFailedDep := Propstat{\n\t\t\tStatus: StatusFailedDependency,\n\t\t}\n\t\tfor _, patch := range patches {\n\t\t\tfor _, p := range patch.Props {\n\t\t\t\tif _, ok := liveProps[p.XMLName]; ok {\n\t\t\t\t\tpstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName})\n\t\t\t\t} else {\n\t\t\t\t\tpstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn makePropstats(pstatForbidden, pstatFailedDep), nil\n\t}\n\n\tf, err := fs.OpenFile(ctx, name, os.O_RDWR, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tif dph, ok := f.(DeadPropsHolder); ok {\n\t\tret, err := dph.Patch(patches)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that\n\t\t// \"The contents of the prop XML element must only list the names of\n\t\t// properties to which the result in the status element applies.\"\n\t\tfor _, pstat := range ret {\n\t\t\tfor i, p := range pstat.Props {\n\t\t\t\tpstat.Props[i] = Property{XMLName: p.XMLName}\n\t\t\t}\n\t\t}\n\t\treturn ret, nil\n\t}\n\t// The file doesn't implement the optional DeadPropsHolder interface, so\n\t// all patches are forbidden.\n\tpstat := Propstat{Status: http.StatusForbidden}\n\tfor _, patch := range patches {\n\t\tfor _, p := range patch.Props {\n\t\t\tpstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})\n\t\t}\n\t}\n\treturn []Propstat{pstat}, nil\n}\n\nfunc escapeXML(s string) string {\n\tfor i := 0; i < len(s); i++ {\n\t\t// As an optimization, if s contains only ASCII letters, digits or a\n\t\t// few special characters, the escaped value is s itself and we don't\n\t\t// need to allocate a buffer and convert between string and []byte.\n\t\tswitch c := s[i]; {\n\t\tcase c == ' ' || c == '_' ||\n\t\t\t('+' <= c && c <= '9') || // Digits as well as + , - . and /\n\t\t\t('A' <= c && c <= 'Z') ||\n\t\t\t('a' <= c && c <= 'z'):\n\t\t\tcontinue\n\t\t}\n\t\t// Otherwise, go through the full escaping process.\n\t\tvar buf bytes.Buffer\n\t\txml.EscapeText(&buf, []byte(s))\n\t\treturn buf.String()\n\t}\n\treturn s\n}\n\nfunc findResourceType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {\n\tif fi.IsDir() {\n\t\treturn `<D:collection xmlns:D=\"DAV:\"/>`, nil\n\t}\n\treturn \"\", nil\n}\n\nfunc findDisplayName(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {\n\tif slashClean(name) == \"/\" {\n\t\t// Hide the real name of a possibly prefixed root directory.\n\t\treturn \"\", nil\n\t}\n\treturn escapeXML(fi.Name()), nil\n}\n\nfunc findContentLength(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {\n\treturn strconv.FormatInt(fi.Size(), 10), nil\n}\n\nfunc findLastModified(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {\n\treturn fi.ModTime().Format(http.TimeFormat), nil\n}\n\nfunc findContentType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {\n\tf, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer f.Close()\n\t// This implementation is based on serveContent's code in the standard net/http package.\n\tctype := mime.TypeByExtension(filepath.Ext(name))\n\tif ctype != \"\" {\n\t\treturn ctype, nil\n\t}\n\t// Read a chunk to decide between utf-8 text and binary.\n\tvar buf [512]byte\n\tn, err := io.ReadFull(f, buf[:])\n\tif err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {\n\t\treturn \"\", err\n\t}\n\tctype = http.DetectContentType(buf[:n])\n\t// Rewind file.\n\t_, err = f.Seek(0, os.SEEK_SET)\n\treturn ctype, err\n}\n\nfunc findETag(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {\n\t// The Apache http 2.4 web server by default concatenates the\n\t// modification time and size of a file. We replicate the heuristic\n\t// with nanosecond granularity.\n\treturn fmt.Sprintf(`\"%x%x\"`, fi.ModTime().UnixNano(), fi.Size()), nil\n}\n\nfunc findSupportedLock(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {\n\treturn `` +\n\t\t`<D:lockentry xmlns:D=\"DAV:\">` +\n\t\t`<D:lockscope><D:exclusive/></D:lockscope>` +\n\t\t`<D:locktype><D:write/></D:locktype>` +\n\t\t`</D:lockentry>`, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/prop_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage webdav\n\nimport (\n\t\"encoding/xml\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"os\"\n\t\"reflect\"\n\t\"sort\"\n\t\"testing\"\n\n\t\"golang.org/x/net/context\"\n)\n\nfunc TestMemPS(t *testing.T) {\n\tctx := context.Background()\n\t// calcProps calculates the getlastmodified and getetag DAV: property\n\t// values in pstats for resource name in file-system fs.\n\tcalcProps := func(name string, fs FileSystem, ls LockSystem, pstats []Propstat) error {\n\t\tfi, err := fs.Stat(ctx, name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, pst := range pstats {\n\t\t\tfor i, p := range pst.Props {\n\t\t\t\tswitch p.XMLName {\n\t\t\t\tcase xml.Name{Space: \"DAV:\", Local: \"getlastmodified\"}:\n\t\t\t\t\tp.InnerXML = []byte(fi.ModTime().Format(http.TimeFormat))\n\t\t\t\t\tpst.Props[i] = p\n\t\t\t\tcase xml.Name{Space: \"DAV:\", Local: \"getetag\"}:\n\t\t\t\t\tif fi.IsDir() {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tetag, err := findETag(ctx, fs, ls, name, fi)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tp.InnerXML = []byte(etag)\n\t\t\t\t\tpst.Props[i] = p\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tconst (\n\t\tlockEntry = `` +\n\t\t\t`<D:lockentry xmlns:D=\"DAV:\">` +\n\t\t\t`<D:lockscope><D:exclusive/></D:lockscope>` +\n\t\t\t`<D:locktype><D:write/></D:locktype>` +\n\t\t\t`</D:lockentry>`\n\t\tstatForbiddenError = `<D:cannot-modify-protected-property xmlns:D=\"DAV:\"/>`\n\t)\n\n\ttype propOp struct {\n\t\top            string\n\t\tname          string\n\t\tpnames        []xml.Name\n\t\tpatches       []Proppatch\n\t\twantPnames    []xml.Name\n\t\twantPropstats []Propstat\n\t}\n\n\ttestCases := []struct {\n\t\tdesc        string\n\t\tnoDeadProps bool\n\t\tbuildfs     []string\n\t\tpropOp      []propOp\n\t}{{\n\t\tdesc:    \"propname\",\n\t\tbuildfs: []string{\"mkdir /dir\", \"touch /file\"},\n\t\tpropOp: []propOp{{\n\t\t\top:   \"propname\",\n\t\t\tname: \"/dir\",\n\t\t\twantPnames: []xml.Name{\n\t\t\t\t{Space: \"DAV:\", Local: \"resourcetype\"},\n\t\t\t\t{Space: \"DAV:\", Local: \"displayname\"},\n\t\t\t\t{Space: \"DAV:\", Local: \"supportedlock\"},\n\t\t\t\t{Space: \"DAV:\", Local: \"getlastmodified\"},\n\t\t\t},\n\t\t}, {\n\t\t\top:   \"propname\",\n\t\t\tname: \"/file\",\n\t\t\twantPnames: []xml.Name{\n\t\t\t\t{Space: \"DAV:\", Local: \"resourcetype\"},\n\t\t\t\t{Space: \"DAV:\", Local: \"displayname\"},\n\t\t\t\t{Space: \"DAV:\", Local: \"getcontentlength\"},\n\t\t\t\t{Space: \"DAV:\", Local: \"getlastmodified\"},\n\t\t\t\t{Space: \"DAV:\", Local: \"getcontenttype\"},\n\t\t\t\t{Space: \"DAV:\", Local: \"getetag\"},\n\t\t\t\t{Space: \"DAV:\", Local: \"supportedlock\"},\n\t\t\t},\n\t\t}},\n\t}, {\n\t\tdesc:    \"allprop dir and file\",\n\t\tbuildfs: []string{\"mkdir /dir\", \"write /file foobarbaz\"},\n\t\tpropOp: []propOp{{\n\t\t\top:   \"allprop\",\n\t\t\tname: \"/dir\",\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusOK,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"resourcetype\"},\n\t\t\t\t\tInnerXML: []byte(`<D:collection xmlns:D=\"DAV:\"/>`),\n\t\t\t\t}, {\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"displayname\"},\n\t\t\t\t\tInnerXML: []byte(\"dir\"),\n\t\t\t\t}, {\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"getlastmodified\"},\n\t\t\t\t\tInnerXML: nil, // Calculated during test.\n\t\t\t\t}, {\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"supportedlock\"},\n\t\t\t\t\tInnerXML: []byte(lockEntry),\n\t\t\t\t}},\n\t\t\t}},\n\t\t}, {\n\t\t\top:   \"allprop\",\n\t\t\tname: \"/file\",\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusOK,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"resourcetype\"},\n\t\t\t\t\tInnerXML: []byte(\"\"),\n\t\t\t\t}, {\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"displayname\"},\n\t\t\t\t\tInnerXML: []byte(\"file\"),\n\t\t\t\t}, {\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"getcontentlength\"},\n\t\t\t\t\tInnerXML: []byte(\"9\"),\n\t\t\t\t}, {\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"getlastmodified\"},\n\t\t\t\t\tInnerXML: nil, // Calculated during test.\n\t\t\t\t}, {\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"getcontenttype\"},\n\t\t\t\t\tInnerXML: []byte(\"text/plain; charset=utf-8\"),\n\t\t\t\t}, {\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"getetag\"},\n\t\t\t\t\tInnerXML: nil, // Calculated during test.\n\t\t\t\t}, {\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"supportedlock\"},\n\t\t\t\t\tInnerXML: []byte(lockEntry),\n\t\t\t\t}},\n\t\t\t}},\n\t\t}, {\n\t\t\top:   \"allprop\",\n\t\t\tname: \"/file\",\n\t\t\tpnames: []xml.Name{\n\t\t\t\t{\"DAV:\", \"resourcetype\"},\n\t\t\t\t{\"foo\", \"bar\"},\n\t\t\t},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusOK,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"resourcetype\"},\n\t\t\t\t\tInnerXML: []byte(\"\"),\n\t\t\t\t}, {\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"displayname\"},\n\t\t\t\t\tInnerXML: []byte(\"file\"),\n\t\t\t\t}, {\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"getcontentlength\"},\n\t\t\t\t\tInnerXML: []byte(\"9\"),\n\t\t\t\t}, {\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"getlastmodified\"},\n\t\t\t\t\tInnerXML: nil, // Calculated during test.\n\t\t\t\t}, {\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"getcontenttype\"},\n\t\t\t\t\tInnerXML: []byte(\"text/plain; charset=utf-8\"),\n\t\t\t\t}, {\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"getetag\"},\n\t\t\t\t\tInnerXML: nil, // Calculated during test.\n\t\t\t\t}, {\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"supportedlock\"},\n\t\t\t\t\tInnerXML: []byte(lockEntry),\n\t\t\t\t}}}, {\n\t\t\t\tStatus: http.StatusNotFound,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t}}},\n\t\t\t},\n\t\t}},\n\t}, {\n\t\tdesc:    \"propfind DAV:resourcetype\",\n\t\tbuildfs: []string{\"mkdir /dir\", \"touch /file\"},\n\t\tpropOp: []propOp{{\n\t\t\top:     \"propfind\",\n\t\t\tname:   \"/dir\",\n\t\t\tpnames: []xml.Name{{\"DAV:\", \"resourcetype\"}},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusOK,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"resourcetype\"},\n\t\t\t\t\tInnerXML: []byte(`<D:collection xmlns:D=\"DAV:\"/>`),\n\t\t\t\t}},\n\t\t\t}},\n\t\t}, {\n\t\t\top:     \"propfind\",\n\t\t\tname:   \"/file\",\n\t\t\tpnames: []xml.Name{{\"DAV:\", \"resourcetype\"}},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusOK,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"resourcetype\"},\n\t\t\t\t\tInnerXML: []byte(\"\"),\n\t\t\t\t}},\n\t\t\t}},\n\t\t}},\n\t}, {\n\t\tdesc:    \"propfind unsupported DAV properties\",\n\t\tbuildfs: []string{\"mkdir /dir\"},\n\t\tpropOp: []propOp{{\n\t\t\top:     \"propfind\",\n\t\t\tname:   \"/dir\",\n\t\t\tpnames: []xml.Name{{\"DAV:\", \"getcontentlanguage\"}},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusNotFound,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"DAV:\", Local: \"getcontentlanguage\"},\n\t\t\t\t}},\n\t\t\t}},\n\t\t}, {\n\t\t\top:     \"propfind\",\n\t\t\tname:   \"/dir\",\n\t\t\tpnames: []xml.Name{{\"DAV:\", \"creationdate\"}},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusNotFound,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"DAV:\", Local: \"creationdate\"},\n\t\t\t\t}},\n\t\t\t}},\n\t\t}},\n\t}, {\n\t\tdesc:    \"propfind getetag for files but not for directories\",\n\t\tbuildfs: []string{\"mkdir /dir\", \"touch /file\"},\n\t\tpropOp: []propOp{{\n\t\t\top:     \"propfind\",\n\t\t\tname:   \"/dir\",\n\t\t\tpnames: []xml.Name{{\"DAV:\", \"getetag\"}},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusNotFound,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"DAV:\", Local: \"getetag\"},\n\t\t\t\t}},\n\t\t\t}},\n\t\t}, {\n\t\t\top:     \"propfind\",\n\t\t\tname:   \"/file\",\n\t\t\tpnames: []xml.Name{{\"DAV:\", \"getetag\"}},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusOK,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"getetag\"},\n\t\t\t\t\tInnerXML: nil, // Calculated during test.\n\t\t\t\t}},\n\t\t\t}},\n\t\t}},\n\t}, {\n\t\tdesc:        \"proppatch property on no-dead-properties file system\",\n\t\tbuildfs:     []string{\"mkdir /dir\"},\n\t\tnoDeadProps: true,\n\t\tpropOp: []propOp{{\n\t\t\top:   \"proppatch\",\n\t\t\tname: \"/dir\",\n\t\t\tpatches: []Proppatch{{\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t}},\n\t\t\t}},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusForbidden,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t}},\n\t\t\t}},\n\t\t}, {\n\t\t\top:   \"proppatch\",\n\t\t\tname: \"/dir\",\n\t\t\tpatches: []Proppatch{{\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"DAV:\", Local: \"getetag\"},\n\t\t\t\t}},\n\t\t\t}},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus:   http.StatusForbidden,\n\t\t\t\tXMLError: statForbiddenError,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"DAV:\", Local: \"getetag\"},\n\t\t\t\t}},\n\t\t\t}},\n\t\t}},\n\t}, {\n\t\tdesc:    \"proppatch dead property\",\n\t\tbuildfs: []string{\"mkdir /dir\"},\n\t\tpropOp: []propOp{{\n\t\t\top:   \"proppatch\",\n\t\t\tname: \"/dir\",\n\t\t\tpatches: []Proppatch{{\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName:  xml.Name{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t\tInnerXML: []byte(\"baz\"),\n\t\t\t\t}},\n\t\t\t}},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusOK,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t}},\n\t\t\t}},\n\t\t}, {\n\t\t\top:     \"propfind\",\n\t\t\tname:   \"/dir\",\n\t\t\tpnames: []xml.Name{{Space: \"foo\", Local: \"bar\"}},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusOK,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName:  xml.Name{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t\tInnerXML: []byte(\"baz\"),\n\t\t\t\t}},\n\t\t\t}},\n\t\t}},\n\t}, {\n\t\tdesc:    \"proppatch dead property with failed dependency\",\n\t\tbuildfs: []string{\"mkdir /dir\"},\n\t\tpropOp: []propOp{{\n\t\t\top:   \"proppatch\",\n\t\t\tname: \"/dir\",\n\t\t\tpatches: []Proppatch{{\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName:  xml.Name{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t\tInnerXML: []byte(\"baz\"),\n\t\t\t\t}},\n\t\t\t}, {\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName:  xml.Name{Space: \"DAV:\", Local: \"displayname\"},\n\t\t\t\t\tInnerXML: []byte(\"xxx\"),\n\t\t\t\t}},\n\t\t\t}},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus:   http.StatusForbidden,\n\t\t\t\tXMLError: statForbiddenError,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"DAV:\", Local: \"displayname\"},\n\t\t\t\t}},\n\t\t\t}, {\n\t\t\t\tStatus: StatusFailedDependency,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t}},\n\t\t\t}},\n\t\t}, {\n\t\t\top:     \"propfind\",\n\t\t\tname:   \"/dir\",\n\t\t\tpnames: []xml.Name{{Space: \"foo\", Local: \"bar\"}},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusNotFound,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t}},\n\t\t\t}},\n\t\t}},\n\t}, {\n\t\tdesc:    \"proppatch remove dead property\",\n\t\tbuildfs: []string{\"mkdir /dir\"},\n\t\tpropOp: []propOp{{\n\t\t\top:   \"proppatch\",\n\t\t\tname: \"/dir\",\n\t\t\tpatches: []Proppatch{{\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName:  xml.Name{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t\tInnerXML: []byte(\"baz\"),\n\t\t\t\t}, {\n\t\t\t\t\tXMLName:  xml.Name{Space: \"spam\", Local: \"ham\"},\n\t\t\t\t\tInnerXML: []byte(\"eggs\"),\n\t\t\t\t}},\n\t\t\t}},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusOK,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t}, {\n\t\t\t\t\tXMLName: xml.Name{Space: \"spam\", Local: \"ham\"},\n\t\t\t\t}},\n\t\t\t}},\n\t\t}, {\n\t\t\top:   \"propfind\",\n\t\t\tname: \"/dir\",\n\t\t\tpnames: []xml.Name{\n\t\t\t\t{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t{Space: \"spam\", Local: \"ham\"},\n\t\t\t},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusOK,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName:  xml.Name{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t\tInnerXML: []byte(\"baz\"),\n\t\t\t\t}, {\n\t\t\t\t\tXMLName:  xml.Name{Space: \"spam\", Local: \"ham\"},\n\t\t\t\t\tInnerXML: []byte(\"eggs\"),\n\t\t\t\t}},\n\t\t\t}},\n\t\t}, {\n\t\t\top:   \"proppatch\",\n\t\t\tname: \"/dir\",\n\t\t\tpatches: []Proppatch{{\n\t\t\t\tRemove: true,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t}},\n\t\t\t}},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusOK,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t}},\n\t\t\t}},\n\t\t}, {\n\t\t\top:   \"propfind\",\n\t\t\tname: \"/dir\",\n\t\t\tpnames: []xml.Name{\n\t\t\t\t{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t{Space: \"spam\", Local: \"ham\"},\n\t\t\t},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusNotFound,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t}},\n\t\t\t}, {\n\t\t\t\tStatus: http.StatusOK,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName:  xml.Name{Space: \"spam\", Local: \"ham\"},\n\t\t\t\t\tInnerXML: []byte(\"eggs\"),\n\t\t\t\t}},\n\t\t\t}},\n\t\t}},\n\t}, {\n\t\tdesc:    \"propname with dead property\",\n\t\tbuildfs: []string{\"touch /file\"},\n\t\tpropOp: []propOp{{\n\t\t\top:   \"proppatch\",\n\t\t\tname: \"/file\",\n\t\t\tpatches: []Proppatch{{\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName:  xml.Name{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t\tInnerXML: []byte(\"baz\"),\n\t\t\t\t}},\n\t\t\t}},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusOK,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t}},\n\t\t\t}},\n\t\t}, {\n\t\t\top:   \"propname\",\n\t\t\tname: \"/file\",\n\t\t\twantPnames: []xml.Name{\n\t\t\t\t{Space: \"DAV:\", Local: \"resourcetype\"},\n\t\t\t\t{Space: \"DAV:\", Local: \"displayname\"},\n\t\t\t\t{Space: \"DAV:\", Local: \"getcontentlength\"},\n\t\t\t\t{Space: \"DAV:\", Local: \"getlastmodified\"},\n\t\t\t\t{Space: \"DAV:\", Local: \"getcontenttype\"},\n\t\t\t\t{Space: \"DAV:\", Local: \"getetag\"},\n\t\t\t\t{Space: \"DAV:\", Local: \"supportedlock\"},\n\t\t\t\t{Space: \"foo\", Local: \"bar\"},\n\t\t\t},\n\t\t}},\n\t}, {\n\t\tdesc:    \"proppatch remove unknown dead property\",\n\t\tbuildfs: []string{\"mkdir /dir\"},\n\t\tpropOp: []propOp{{\n\t\t\top:   \"proppatch\",\n\t\t\tname: \"/dir\",\n\t\t\tpatches: []Proppatch{{\n\t\t\t\tRemove: true,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t}},\n\t\t\t}},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusOK,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"foo\", Local: \"bar\"},\n\t\t\t\t}},\n\t\t\t}},\n\t\t}},\n\t}, {\n\t\tdesc:    \"bad: propfind unknown property\",\n\t\tbuildfs: []string{\"mkdir /dir\"},\n\t\tpropOp: []propOp{{\n\t\t\top:     \"propfind\",\n\t\t\tname:   \"/dir\",\n\t\t\tpnames: []xml.Name{{\"foo:\", \"bar\"}},\n\t\t\twantPropstats: []Propstat{{\n\t\t\t\tStatus: http.StatusNotFound,\n\t\t\t\tProps: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"foo:\", Local: \"bar\"},\n\t\t\t\t}},\n\t\t\t}},\n\t\t}},\n\t}}\n\n\tfor _, tc := range testCases {\n\t\tfs, err := buildTestFS(tc.buildfs)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%s: cannot create test filesystem: %v\", tc.desc, err)\n\t\t}\n\t\tif tc.noDeadProps {\n\t\t\tfs = noDeadPropsFS{fs}\n\t\t}\n\t\tls := NewMemLS()\n\t\tfor _, op := range tc.propOp {\n\t\t\tdesc := fmt.Sprintf(\"%s: %s %s\", tc.desc, op.op, op.name)\n\t\t\tif err = calcProps(op.name, fs, ls, op.wantPropstats); err != nil {\n\t\t\t\tt.Fatalf(\"%s: calcProps: %v\", desc, err)\n\t\t\t}\n\n\t\t\t// Call property system.\n\t\t\tvar propstats []Propstat\n\t\t\tswitch op.op {\n\t\t\tcase \"propname\":\n\t\t\t\tpnames, err := propnames(ctx, fs, ls, op.name)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"%s: got error %v, want nil\", desc, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tsort.Sort(byXMLName(pnames))\n\t\t\t\tsort.Sort(byXMLName(op.wantPnames))\n\t\t\t\tif !reflect.DeepEqual(pnames, op.wantPnames) {\n\t\t\t\t\tt.Errorf(\"%s: pnames\\ngot  %q\\nwant %q\", desc, pnames, op.wantPnames)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\tcase \"allprop\":\n\t\t\t\tpropstats, err = allprop(ctx, fs, ls, op.name, op.pnames)\n\t\t\tcase \"propfind\":\n\t\t\t\tpropstats, err = props(ctx, fs, ls, op.name, op.pnames)\n\t\t\tcase \"proppatch\":\n\t\t\t\tpropstats, err = patch(ctx, fs, ls, op.name, op.patches)\n\t\t\tdefault:\n\t\t\t\tt.Fatalf(\"%s: %s not implemented\", desc, op.op)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"%s: got error %v, want nil\", desc, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Compare return values from allprop, propfind or proppatch.\n\t\t\tfor _, pst := range propstats {\n\t\t\t\tsort.Sort(byPropname(pst.Props))\n\t\t\t}\n\t\t\tfor _, pst := range op.wantPropstats {\n\t\t\t\tsort.Sort(byPropname(pst.Props))\n\t\t\t}\n\t\t\tsort.Sort(byStatus(propstats))\n\t\t\tsort.Sort(byStatus(op.wantPropstats))\n\t\t\tif !reflect.DeepEqual(propstats, op.wantPropstats) {\n\t\t\t\tt.Errorf(\"%s: propstat\\ngot  %q\\nwant %q\", desc, propstats, op.wantPropstats)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc cmpXMLName(a, b xml.Name) bool {\n\tif a.Space != b.Space {\n\t\treturn a.Space < b.Space\n\t}\n\treturn a.Local < b.Local\n}\n\ntype byXMLName []xml.Name\n\nfunc (b byXMLName) Len() int           { return len(b) }\nfunc (b byXMLName) Swap(i, j int)      { b[i], b[j] = b[j], b[i] }\nfunc (b byXMLName) Less(i, j int) bool { return cmpXMLName(b[i], b[j]) }\n\ntype byPropname []Property\n\nfunc (b byPropname) Len() int           { return len(b) }\nfunc (b byPropname) Swap(i, j int)      { b[i], b[j] = b[j], b[i] }\nfunc (b byPropname) Less(i, j int) bool { return cmpXMLName(b[i].XMLName, b[j].XMLName) }\n\ntype byStatus []Propstat\n\nfunc (b byStatus) Len() int           { return len(b) }\nfunc (b byStatus) Swap(i, j int)      { b[i], b[j] = b[j], b[i] }\nfunc (b byStatus) Less(i, j int) bool { return b[i].Status < b[j].Status }\n\ntype noDeadPropsFS struct {\n\tFileSystem\n}\n\nfunc (fs noDeadPropsFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {\n\tf, err := fs.FileSystem.OpenFile(ctx, name, flag, perm)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn noDeadPropsFile{f}, nil\n}\n\n// noDeadPropsFile wraps a File but strips any optional DeadPropsHolder methods\n// provided by the underlying File implementation.\ntype noDeadPropsFile struct {\n\tf File\n}\n\nfunc (f noDeadPropsFile) Close() error                              { return f.f.Close() }\nfunc (f noDeadPropsFile) Read(p []byte) (int, error)                { return f.f.Read(p) }\nfunc (f noDeadPropsFile) Readdir(count int) ([]os.FileInfo, error)  { return f.f.Readdir(count) }\nfunc (f noDeadPropsFile) Seek(off int64, whence int) (int64, error) { return f.f.Seek(off, whence) }\nfunc (f noDeadPropsFile) Stat() (os.FileInfo, error)                { return f.f.Stat() }\nfunc (f noDeadPropsFile) Write(p []byte) (int, error)               { return f.f.Write(p) }\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/webdav.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package webdav provides a WebDAV server implementation.\npackage webdav // import \"golang.org/x/net/webdav\"\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Handler struct {\n\t// Prefix is the URL path prefix to strip from WebDAV resource paths.\n\tPrefix string\n\t// FileSystem is the virtual file system.\n\tFileSystem FileSystem\n\t// LockSystem is the lock management system.\n\tLockSystem LockSystem\n\t// Logger is an optional error logger. If non-nil, it will be called\n\t// for all HTTP requests.\n\tLogger func(*http.Request, error)\n}\n\nfunc (h *Handler) stripPrefix(p string) (string, int, error) {\n\tif h.Prefix == \"\" {\n\t\treturn p, http.StatusOK, nil\n\t}\n\tif r := strings.TrimPrefix(p, h.Prefix); len(r) < len(p) {\n\t\treturn r, http.StatusOK, nil\n\t}\n\treturn p, http.StatusNotFound, errPrefixMismatch\n}\n\nfunc (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tstatus, err := http.StatusBadRequest, errUnsupportedMethod\n\tif h.FileSystem == nil {\n\t\tstatus, err = http.StatusInternalServerError, errNoFileSystem\n\t} else if h.LockSystem == nil {\n\t\tstatus, err = http.StatusInternalServerError, errNoLockSystem\n\t} else {\n\t\tswitch r.Method {\n\t\tcase \"OPTIONS\":\n\t\t\tstatus, err = h.handleOptions(w, r)\n\t\tcase \"GET\", \"HEAD\", \"POST\":\n\t\t\tstatus, err = h.handleGetHeadPost(w, r)\n\t\tcase \"DELETE\":\n\t\t\tstatus, err = h.handleDelete(w, r)\n\t\tcase \"PUT\":\n\t\t\tstatus, err = h.handlePut(w, r)\n\t\tcase \"MKCOL\":\n\t\t\tstatus, err = h.handleMkcol(w, r)\n\t\tcase \"COPY\", \"MOVE\":\n\t\t\tstatus, err = h.handleCopyMove(w, r)\n\t\tcase \"LOCK\":\n\t\t\tstatus, err = h.handleLock(w, r)\n\t\tcase \"UNLOCK\":\n\t\t\tstatus, err = h.handleUnlock(w, r)\n\t\tcase \"PROPFIND\":\n\t\t\tstatus, err = h.handlePropfind(w, r)\n\t\tcase \"PROPPATCH\":\n\t\t\tstatus, err = h.handleProppatch(w, r)\n\t\t}\n\t}\n\n\tif status != 0 {\n\t\tw.WriteHeader(status)\n\t\tif status != http.StatusNoContent {\n\t\t\tw.Write([]byte(StatusText(status)))\n\t\t}\n\t}\n\tif h.Logger != nil {\n\t\th.Logger(r, err)\n\t}\n}\n\nfunc (h *Handler) lock(now time.Time, root string) (token string, status int, err error) {\n\ttoken, err = h.LockSystem.Create(now, LockDetails{\n\t\tRoot:      root,\n\t\tDuration:  infiniteTimeout,\n\t\tZeroDepth: true,\n\t})\n\tif err != nil {\n\t\tif err == ErrLocked {\n\t\t\treturn \"\", StatusLocked, err\n\t\t}\n\t\treturn \"\", http.StatusInternalServerError, err\n\t}\n\treturn token, 0, nil\n}\n\nfunc (h *Handler) confirmLocks(r *http.Request, src, dst string) (release func(), status int, err error) {\n\thdr := r.Header.Get(\"If\")\n\tif hdr == \"\" {\n\t\t// An empty If header means that the client hasn't previously created locks.\n\t\t// Even if this client doesn't care about locks, we still need to check that\n\t\t// the resources aren't locked by another client, so we create temporary\n\t\t// locks that would conflict with another client's locks. These temporary\n\t\t// locks are unlocked at the end of the HTTP request.\n\t\tnow, srcToken, dstToken := time.Now(), \"\", \"\"\n\t\tif src != \"\" {\n\t\t\tsrcToken, status, err = h.lock(now, src)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, status, err\n\t\t\t}\n\t\t}\n\t\tif dst != \"\" {\n\t\t\tdstToken, status, err = h.lock(now, dst)\n\t\t\tif err != nil {\n\t\t\t\tif srcToken != \"\" {\n\t\t\t\t\th.LockSystem.Unlock(now, srcToken)\n\t\t\t\t}\n\t\t\t\treturn nil, status, err\n\t\t\t}\n\t\t}\n\n\t\treturn func() {\n\t\t\tif dstToken != \"\" {\n\t\t\t\th.LockSystem.Unlock(now, dstToken)\n\t\t\t}\n\t\t\tif srcToken != \"\" {\n\t\t\t\th.LockSystem.Unlock(now, srcToken)\n\t\t\t}\n\t\t}, 0, nil\n\t}\n\n\tih, ok := parseIfHeader(hdr)\n\tif !ok {\n\t\treturn nil, http.StatusBadRequest, errInvalidIfHeader\n\t}\n\t// ih is a disjunction (OR) of ifLists, so any ifList will do.\n\tfor _, l := range ih.lists {\n\t\tlsrc := l.resourceTag\n\t\tif lsrc == \"\" {\n\t\t\tlsrc = src\n\t\t} else {\n\t\t\tu, err := url.Parse(lsrc)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif u.Host != r.Host {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlsrc, status, err = h.stripPrefix(u.Path)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, status, err\n\t\t\t}\n\t\t}\n\t\trelease, err = h.LockSystem.Confirm(time.Now(), lsrc, dst, l.conditions...)\n\t\tif err == ErrConfirmationFailed {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, http.StatusInternalServerError, err\n\t\t}\n\t\treturn release, 0, nil\n\t}\n\t// Section 10.4.1 says that \"If this header is evaluated and all state lists\n\t// fail, then the request must fail with a 412 (Precondition Failed) status.\"\n\t// We follow the spec even though the cond_put_corrupt_token test case from\n\t// the litmus test warns on seeing a 412 instead of a 423 (Locked).\n\treturn nil, http.StatusPreconditionFailed, ErrLocked\n}\n\nfunc (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status int, err error) {\n\treqPath, status, err := h.stripPrefix(r.URL.Path)\n\tif err != nil {\n\t\treturn status, err\n\t}\n\tctx := getContext(r)\n\tallow := \"OPTIONS, LOCK, PUT, MKCOL\"\n\tif fi, err := h.FileSystem.Stat(ctx, reqPath); err == nil {\n\t\tif fi.IsDir() {\n\t\t\tallow = \"OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND\"\n\t\t} else {\n\t\t\tallow = \"OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT\"\n\t\t}\n\t}\n\tw.Header().Set(\"Allow\", allow)\n\t// http://www.webdav.org/specs/rfc4918.html#dav.compliance.classes\n\tw.Header().Set(\"DAV\", \"1, 2\")\n\t// http://msdn.microsoft.com/en-au/library/cc250217.aspx\n\tw.Header().Set(\"MS-Author-Via\", \"DAV\")\n\treturn 0, nil\n}\n\nfunc (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (status int, err error) {\n\treqPath, status, err := h.stripPrefix(r.URL.Path)\n\tif err != nil {\n\t\treturn status, err\n\t}\n\t// TODO: check locks for read-only access??\n\tctx := getContext(r)\n\tf, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn http.StatusNotFound, err\n\t}\n\tdefer f.Close()\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn http.StatusNotFound, err\n\t}\n\tif fi.IsDir() {\n\t\treturn http.StatusMethodNotAllowed, nil\n\t}\n\tetag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi)\n\tif err != nil {\n\t\treturn http.StatusInternalServerError, err\n\t}\n\tw.Header().Set(\"ETag\", etag)\n\t// Let ServeContent determine the Content-Type header.\n\thttp.ServeContent(w, r, reqPath, fi.ModTime(), f)\n\treturn 0, nil\n}\n\nfunc (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status int, err error) {\n\treqPath, status, err := h.stripPrefix(r.URL.Path)\n\tif err != nil {\n\t\treturn status, err\n\t}\n\trelease, status, err := h.confirmLocks(r, reqPath, \"\")\n\tif err != nil {\n\t\treturn status, err\n\t}\n\tdefer release()\n\n\tctx := getContext(r)\n\n\t// TODO: return MultiStatus where appropriate.\n\n\t// \"godoc os RemoveAll\" says that \"If the path does not exist, RemoveAll\n\t// returns nil (no error).\" WebDAV semantics are that it should return a\n\t// \"404 Not Found\". We therefore have to Stat before we RemoveAll.\n\tif _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn http.StatusNotFound, err\n\t\t}\n\t\treturn http.StatusMethodNotAllowed, err\n\t}\n\tif err := h.FileSystem.RemoveAll(ctx, reqPath); err != nil {\n\t\treturn http.StatusMethodNotAllowed, err\n\t}\n\treturn http.StatusNoContent, nil\n}\n\nfunc (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, err error) {\n\treqPath, status, err := h.stripPrefix(r.URL.Path)\n\tif err != nil {\n\t\treturn status, err\n\t}\n\trelease, status, err := h.confirmLocks(r, reqPath, \"\")\n\tif err != nil {\n\t\treturn status, err\n\t}\n\tdefer release()\n\t// TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz'\n\t// comments in http.checkEtag.\n\tctx := getContext(r)\n\n\tf, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)\n\tif err != nil {\n\t\treturn http.StatusNotFound, err\n\t}\n\t_, copyErr := io.Copy(f, r.Body)\n\tfi, statErr := f.Stat()\n\tcloseErr := f.Close()\n\t// TODO(rost): Returning 405 Method Not Allowed might not be appropriate.\n\tif copyErr != nil {\n\t\treturn http.StatusMethodNotAllowed, copyErr\n\t}\n\tif statErr != nil {\n\t\treturn http.StatusMethodNotAllowed, statErr\n\t}\n\tif closeErr != nil {\n\t\treturn http.StatusMethodNotAllowed, closeErr\n\t}\n\tetag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi)\n\tif err != nil {\n\t\treturn http.StatusInternalServerError, err\n\t}\n\tw.Header().Set(\"ETag\", etag)\n\treturn http.StatusCreated, nil\n}\n\nfunc (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status int, err error) {\n\treqPath, status, err := h.stripPrefix(r.URL.Path)\n\tif err != nil {\n\t\treturn status, err\n\t}\n\trelease, status, err := h.confirmLocks(r, reqPath, \"\")\n\tif err != nil {\n\t\treturn status, err\n\t}\n\tdefer release()\n\n\tctx := getContext(r)\n\n\tif r.ContentLength > 0 {\n\t\treturn http.StatusUnsupportedMediaType, nil\n\t}\n\tif err := h.FileSystem.Mkdir(ctx, reqPath, 0777); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn http.StatusConflict, err\n\t\t}\n\t\treturn http.StatusMethodNotAllowed, err\n\t}\n\treturn http.StatusCreated, nil\n}\n\nfunc (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status int, err error) {\n\thdr := r.Header.Get(\"Destination\")\n\tif hdr == \"\" {\n\t\treturn http.StatusBadRequest, errInvalidDestination\n\t}\n\tu, err := url.Parse(hdr)\n\tif err != nil {\n\t\treturn http.StatusBadRequest, errInvalidDestination\n\t}\n\tif u.Host != r.Host {\n\t\treturn http.StatusBadGateway, errInvalidDestination\n\t}\n\n\tsrc, status, err := h.stripPrefix(r.URL.Path)\n\tif err != nil {\n\t\treturn status, err\n\t}\n\n\tdst, status, err := h.stripPrefix(u.Path)\n\tif err != nil {\n\t\treturn status, err\n\t}\n\n\tif dst == \"\" {\n\t\treturn http.StatusBadGateway, errInvalidDestination\n\t}\n\tif dst == src {\n\t\treturn http.StatusForbidden, errDestinationEqualsSource\n\t}\n\n\tctx := getContext(r)\n\n\tif r.Method == \"COPY\" {\n\t\t// Section 7.5.1 says that a COPY only needs to lock the destination,\n\t\t// not both destination and source. Strictly speaking, this is racy,\n\t\t// even though a COPY doesn't modify the source, if a concurrent\n\t\t// operation modifies the source. However, the litmus test explicitly\n\t\t// checks that COPYing a locked-by-another source is OK.\n\t\trelease, status, err := h.confirmLocks(r, \"\", dst)\n\t\tif err != nil {\n\t\t\treturn status, err\n\t\t}\n\t\tdefer release()\n\n\t\t// Section 9.8.3 says that \"The COPY method on a collection without a Depth\n\t\t// header must act as if a Depth header with value \"infinity\" was included\".\n\t\tdepth := infiniteDepth\n\t\tif hdr := r.Header.Get(\"Depth\"); hdr != \"\" {\n\t\t\tdepth = parseDepth(hdr)\n\t\t\tif depth != 0 && depth != infiniteDepth {\n\t\t\t\t// Section 9.8.3 says that \"A client may submit a Depth header on a\n\t\t\t\t// COPY on a collection with a value of \"0\" or \"infinity\".\"\n\t\t\t\treturn http.StatusBadRequest, errInvalidDepth\n\t\t\t}\n\t\t}\n\t\treturn copyFiles(ctx, h.FileSystem, src, dst, r.Header.Get(\"Overwrite\") != \"F\", depth, 0)\n\t}\n\n\trelease, status, err := h.confirmLocks(r, src, dst)\n\tif err != nil {\n\t\treturn status, err\n\t}\n\tdefer release()\n\n\t// Section 9.9.2 says that \"The MOVE method on a collection must act as if\n\t// a \"Depth: infinity\" header was used on it. A client must not submit a\n\t// Depth header on a MOVE on a collection with any value but \"infinity\".\"\n\tif hdr := r.Header.Get(\"Depth\"); hdr != \"\" {\n\t\tif parseDepth(hdr) != infiniteDepth {\n\t\t\treturn http.StatusBadRequest, errInvalidDepth\n\t\t}\n\t}\n\treturn moveFiles(ctx, h.FileSystem, src, dst, r.Header.Get(\"Overwrite\") == \"T\")\n}\n\nfunc (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus int, retErr error) {\n\tduration, err := parseTimeout(r.Header.Get(\"Timeout\"))\n\tif err != nil {\n\t\treturn http.StatusBadRequest, err\n\t}\n\tli, status, err := readLockInfo(r.Body)\n\tif err != nil {\n\t\treturn status, err\n\t}\n\n\tctx := getContext(r)\n\ttoken, ld, now, created := \"\", LockDetails{}, time.Now(), false\n\tif li == (lockInfo{}) {\n\t\t// An empty lockInfo means to refresh the lock.\n\t\tih, ok := parseIfHeader(r.Header.Get(\"If\"))\n\t\tif !ok {\n\t\t\treturn http.StatusBadRequest, errInvalidIfHeader\n\t\t}\n\t\tif len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 {\n\t\t\ttoken = ih.lists[0].conditions[0].Token\n\t\t}\n\t\tif token == \"\" {\n\t\t\treturn http.StatusBadRequest, errInvalidLockToken\n\t\t}\n\t\tld, err = h.LockSystem.Refresh(now, token, duration)\n\t\tif err != nil {\n\t\t\tif err == ErrNoSuchLock {\n\t\t\t\treturn http.StatusPreconditionFailed, err\n\t\t\t}\n\t\t\treturn http.StatusInternalServerError, err\n\t\t}\n\n\t} else {\n\t\t// Section 9.10.3 says that \"If no Depth header is submitted on a LOCK request,\n\t\t// then the request MUST act as if a \"Depth:infinity\" had been submitted.\"\n\t\tdepth := infiniteDepth\n\t\tif hdr := r.Header.Get(\"Depth\"); hdr != \"\" {\n\t\t\tdepth = parseDepth(hdr)\n\t\t\tif depth != 0 && depth != infiniteDepth {\n\t\t\t\t// Section 9.10.3 says that \"Values other than 0 or infinity must not be\n\t\t\t\t// used with the Depth header on a LOCK method\".\n\t\t\t\treturn http.StatusBadRequest, errInvalidDepth\n\t\t\t}\n\t\t}\n\t\treqPath, status, err := h.stripPrefix(r.URL.Path)\n\t\tif err != nil {\n\t\t\treturn status, err\n\t\t}\n\t\tld = LockDetails{\n\t\t\tRoot:      reqPath,\n\t\t\tDuration:  duration,\n\t\t\tOwnerXML:  li.Owner.InnerXML,\n\t\t\tZeroDepth: depth == 0,\n\t\t}\n\t\ttoken, err = h.LockSystem.Create(now, ld)\n\t\tif err != nil {\n\t\t\tif err == ErrLocked {\n\t\t\t\treturn StatusLocked, err\n\t\t\t}\n\t\t\treturn http.StatusInternalServerError, err\n\t\t}\n\t\tdefer func() {\n\t\t\tif retErr != nil {\n\t\t\t\th.LockSystem.Unlock(now, token)\n\t\t\t}\n\t\t}()\n\n\t\t// Create the resource if it didn't previously exist.\n\t\tif _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {\n\t\t\tf, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)\n\t\t\tif err != nil {\n\t\t\t\t// TODO: detect missing intermediate dirs and return http.StatusConflict?\n\t\t\t\treturn http.StatusInternalServerError, err\n\t\t\t}\n\t\t\tf.Close()\n\t\t\tcreated = true\n\t\t}\n\n\t\t// http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the\n\t\t// Lock-Token value is a Coded-URL. We add angle brackets.\n\t\tw.Header().Set(\"Lock-Token\", \"<\"+token+\">\")\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/xml; charset=utf-8\")\n\tif created {\n\t\t// This is \"w.WriteHeader(http.StatusCreated)\" and not \"return\n\t\t// http.StatusCreated, nil\" because we write our own (XML) response to w\n\t\t// and Handler.ServeHTTP would otherwise write \"Created\".\n\t\tw.WriteHeader(http.StatusCreated)\n\t}\n\twriteLockInfo(w, token, ld)\n\treturn 0, nil\n}\n\nfunc (h *Handler) handleUnlock(w http.ResponseWriter, r *http.Request) (status int, err error) {\n\t// http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the\n\t// Lock-Token value is a Coded-URL. We strip its angle brackets.\n\tt := r.Header.Get(\"Lock-Token\")\n\tif len(t) < 2 || t[0] != '<' || t[len(t)-1] != '>' {\n\t\treturn http.StatusBadRequest, errInvalidLockToken\n\t}\n\tt = t[1 : len(t)-1]\n\n\tswitch err = h.LockSystem.Unlock(time.Now(), t); err {\n\tcase nil:\n\t\treturn http.StatusNoContent, err\n\tcase ErrForbidden:\n\t\treturn http.StatusForbidden, err\n\tcase ErrLocked:\n\t\treturn StatusLocked, err\n\tcase ErrNoSuchLock:\n\t\treturn http.StatusConflict, err\n\tdefault:\n\t\treturn http.StatusInternalServerError, err\n\t}\n}\n\nfunc (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status int, err error) {\n\treqPath, status, err := h.stripPrefix(r.URL.Path)\n\tif err != nil {\n\t\treturn status, err\n\t}\n\tctx := getContext(r)\n\tfi, err := h.FileSystem.Stat(ctx, reqPath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn http.StatusNotFound, err\n\t\t}\n\t\treturn http.StatusMethodNotAllowed, err\n\t}\n\tdepth := infiniteDepth\n\tif hdr := r.Header.Get(\"Depth\"); hdr != \"\" {\n\t\tdepth = parseDepth(hdr)\n\t\tif depth == invalidDepth {\n\t\t\treturn http.StatusBadRequest, errInvalidDepth\n\t\t}\n\t}\n\tpf, status, err := readPropfind(r.Body)\n\tif err != nil {\n\t\treturn status, err\n\t}\n\n\tmw := multistatusWriter{w: w}\n\n\twalkFn := func(reqPath string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar pstats []Propstat\n\t\tif pf.Propname != nil {\n\t\t\tpnames, err := propnames(ctx, h.FileSystem, h.LockSystem, reqPath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tpstat := Propstat{Status: http.StatusOK}\n\t\t\tfor _, xmlname := range pnames {\n\t\t\t\tpstat.Props = append(pstat.Props, Property{XMLName: xmlname})\n\t\t\t}\n\t\t\tpstats = append(pstats, pstat)\n\t\t} else if pf.Allprop != nil {\n\t\t\tpstats, err = allprop(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop)\n\t\t} else {\n\t\t\tpstats, err = props(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn mw.write(makePropstatResponse(path.Join(h.Prefix, reqPath), pstats))\n\t}\n\n\twalkErr := walkFS(ctx, h.FileSystem, depth, reqPath, fi, walkFn)\n\tcloseErr := mw.close()\n\tif walkErr != nil {\n\t\treturn http.StatusInternalServerError, walkErr\n\t}\n\tif closeErr != nil {\n\t\treturn http.StatusInternalServerError, closeErr\n\t}\n\treturn 0, nil\n}\n\nfunc (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (status int, err error) {\n\treqPath, status, err := h.stripPrefix(r.URL.Path)\n\tif err != nil {\n\t\treturn status, err\n\t}\n\trelease, status, err := h.confirmLocks(r, reqPath, \"\")\n\tif err != nil {\n\t\treturn status, err\n\t}\n\tdefer release()\n\n\tctx := getContext(r)\n\n\tif _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn http.StatusNotFound, err\n\t\t}\n\t\treturn http.StatusMethodNotAllowed, err\n\t}\n\tpatches, status, err := readProppatch(r.Body)\n\tif err != nil {\n\t\treturn status, err\n\t}\n\tpstats, err := patch(ctx, h.FileSystem, h.LockSystem, reqPath, patches)\n\tif err != nil {\n\t\treturn http.StatusInternalServerError, err\n\t}\n\tmw := multistatusWriter{w: w}\n\twriteErr := mw.write(makePropstatResponse(r.URL.Path, pstats))\n\tcloseErr := mw.close()\n\tif writeErr != nil {\n\t\treturn http.StatusInternalServerError, writeErr\n\t}\n\tif closeErr != nil {\n\t\treturn http.StatusInternalServerError, closeErr\n\t}\n\treturn 0, nil\n}\n\nfunc makePropstatResponse(href string, pstats []Propstat) *response {\n\tresp := response{\n\t\tHref:     []string{(&url.URL{Path: href}).EscapedPath()},\n\t\tPropstat: make([]propstat, 0, len(pstats)),\n\t}\n\tfor _, p := range pstats {\n\t\tvar xmlErr *xmlError\n\t\tif p.XMLError != \"\" {\n\t\t\txmlErr = &xmlError{InnerXML: []byte(p.XMLError)}\n\t\t}\n\t\tresp.Propstat = append(resp.Propstat, propstat{\n\t\t\tStatus:              fmt.Sprintf(\"HTTP/1.1 %d %s\", p.Status, StatusText(p.Status)),\n\t\t\tProp:                p.Props,\n\t\t\tResponseDescription: p.ResponseDescription,\n\t\t\tError:               xmlErr,\n\t\t})\n\t}\n\treturn &resp\n}\n\nconst (\n\tinfiniteDepth = -1\n\tinvalidDepth  = -2\n)\n\n// parseDepth maps the strings \"0\", \"1\" and \"infinity\" to 0, 1 and\n// infiniteDepth. Parsing any other string returns invalidDepth.\n//\n// Different WebDAV methods have further constraints on valid depths:\n//\t- PROPFIND has no further restrictions, as per section 9.1.\n//\t- COPY accepts only \"0\" or \"infinity\", as per section 9.8.3.\n//\t- MOVE accepts only \"infinity\", as per section 9.9.2.\n//\t- LOCK accepts only \"0\" or \"infinity\", as per section 9.10.3.\n// These constraints are enforced by the handleXxx methods.\nfunc parseDepth(s string) int {\n\tswitch s {\n\tcase \"0\":\n\t\treturn 0\n\tcase \"1\":\n\t\treturn 1\n\tcase \"infinity\":\n\t\treturn infiniteDepth\n\t}\n\treturn invalidDepth\n}\n\n// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11\nconst (\n\tStatusMulti               = 207\n\tStatusUnprocessableEntity = 422\n\tStatusLocked              = 423\n\tStatusFailedDependency    = 424\n\tStatusInsufficientStorage = 507\n)\n\nfunc StatusText(code int) string {\n\tswitch code {\n\tcase StatusMulti:\n\t\treturn \"Multi-Status\"\n\tcase StatusUnprocessableEntity:\n\t\treturn \"Unprocessable Entity\"\n\tcase StatusLocked:\n\t\treturn \"Locked\"\n\tcase StatusFailedDependency:\n\t\treturn \"Failed Dependency\"\n\tcase StatusInsufficientStorage:\n\t\treturn \"Insufficient Storage\"\n\t}\n\treturn http.StatusText(code)\n}\n\nvar (\n\terrDestinationEqualsSource = errors.New(\"webdav: destination equals source\")\n\terrDirectoryNotEmpty       = errors.New(\"webdav: directory not empty\")\n\terrInvalidDepth            = errors.New(\"webdav: invalid depth\")\n\terrInvalidDestination      = errors.New(\"webdav: invalid destination\")\n\terrInvalidIfHeader         = errors.New(\"webdav: invalid If header\")\n\terrInvalidLockInfo         = errors.New(\"webdav: invalid lock info\")\n\terrInvalidLockToken        = errors.New(\"webdav: invalid lock token\")\n\terrInvalidPropfind         = errors.New(\"webdav: invalid propfind\")\n\terrInvalidProppatch        = errors.New(\"webdav: invalid proppatch\")\n\terrInvalidResponse         = errors.New(\"webdav: invalid response\")\n\terrInvalidTimeout          = errors.New(\"webdav: invalid timeout\")\n\terrNoFileSystem            = errors.New(\"webdav: no file system\")\n\terrNoLockSystem            = errors.New(\"webdav: no lock system\")\n\terrNotADirectory           = errors.New(\"webdav: not a directory\")\n\terrPrefixMismatch          = errors.New(\"webdav: prefix mismatch\")\n\terrRecursionTooDeep        = errors.New(\"webdav: recursion too deep\")\n\terrUnsupportedLockInfo     = errors.New(\"webdav: unsupported lock info\")\n\terrUnsupportedMethod       = errors.New(\"webdav: unsupported method\")\n)\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/webdav_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage webdav\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang.org/x/net/context\"\n)\n\n// TODO: add tests to check XML responses with the expected prefix path\nfunc TestPrefix(t *testing.T) {\n\tconst dst, blah = \"Destination\", \"blah blah blah\"\n\n\t// createLockBody comes from the example in Section 9.10.7.\n\tconst createLockBody = `<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n\t\t<D:lockinfo xmlns:D='DAV:'>\n\t\t\t<D:lockscope><D:exclusive/></D:lockscope>\n\t\t\t<D:locktype><D:write/></D:locktype>\n\t\t\t<D:owner>\n\t\t\t\t<D:href>http://example.org/~ejw/contact.html</D:href>\n\t\t\t</D:owner>\n\t\t</D:lockinfo>\n\t`\n\n\tdo := func(method, urlStr string, body string, wantStatusCode int, headers ...string) (http.Header, error) {\n\t\tvar bodyReader io.Reader\n\t\tif body != \"\" {\n\t\t\tbodyReader = strings.NewReader(body)\n\t\t}\n\t\treq, err := http.NewRequest(method, urlStr, bodyReader)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor len(headers) >= 2 {\n\t\t\treq.Header.Add(headers[0], headers[1])\n\t\t\theaders = headers[2:]\n\t\t}\n\t\tres, err := http.DefaultTransport.RoundTrip(req)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer res.Body.Close()\n\t\tif res.StatusCode != wantStatusCode {\n\t\t\treturn nil, fmt.Errorf(\"got status code %d, want %d\", res.StatusCode, wantStatusCode)\n\t\t}\n\t\treturn res.Header, nil\n\t}\n\n\tprefixes := []string{\n\t\t\"/\",\n\t\t\"/a/\",\n\t\t\"/a/b/\",\n\t\t\"/a/b/c/\",\n\t}\n\tctx := context.Background()\n\tfor _, prefix := range prefixes {\n\t\tfs := NewMemFS()\n\t\th := &Handler{\n\t\t\tFileSystem: fs,\n\t\t\tLockSystem: NewMemLS(),\n\t\t}\n\t\tmux := http.NewServeMux()\n\t\tif prefix != \"/\" {\n\t\t\th.Prefix = prefix\n\t\t}\n\t\tmux.Handle(prefix, h)\n\t\tsrv := httptest.NewServer(mux)\n\t\tdefer srv.Close()\n\n\t\t// The script is:\n\t\t//\tMKCOL /a\n\t\t//\tMKCOL /a/b\n\t\t//\tPUT   /a/b/c\n\t\t//\tCOPY  /a/b/c /a/b/d\n\t\t//\tMKCOL /a/b/e\n\t\t//\tMOVE  /a/b/d /a/b/e/f\n\t\t//\tLOCK  /a/b/e/g\n\t\t//\tPUT   /a/b/e/g\n\t\t// which should yield the (possibly stripped) filenames /a/b/c,\n\t\t// /a/b/e/f and /a/b/e/g, plus their parent directories.\n\n\t\twantA := map[string]int{\n\t\t\t\"/\":       http.StatusCreated,\n\t\t\t\"/a/\":     http.StatusMovedPermanently,\n\t\t\t\"/a/b/\":   http.StatusNotFound,\n\t\t\t\"/a/b/c/\": http.StatusNotFound,\n\t\t}[prefix]\n\t\tif _, err := do(\"MKCOL\", srv.URL+\"/a\", \"\", wantA); err != nil {\n\t\t\tt.Errorf(\"prefix=%-9q MKCOL /a: %v\", prefix, err)\n\t\t\tcontinue\n\t\t}\n\n\t\twantB := map[string]int{\n\t\t\t\"/\":       http.StatusCreated,\n\t\t\t\"/a/\":     http.StatusCreated,\n\t\t\t\"/a/b/\":   http.StatusMovedPermanently,\n\t\t\t\"/a/b/c/\": http.StatusNotFound,\n\t\t}[prefix]\n\t\tif _, err := do(\"MKCOL\", srv.URL+\"/a/b\", \"\", wantB); err != nil {\n\t\t\tt.Errorf(\"prefix=%-9q MKCOL /a/b: %v\", prefix, err)\n\t\t\tcontinue\n\t\t}\n\n\t\twantC := map[string]int{\n\t\t\t\"/\":       http.StatusCreated,\n\t\t\t\"/a/\":     http.StatusCreated,\n\t\t\t\"/a/b/\":   http.StatusCreated,\n\t\t\t\"/a/b/c/\": http.StatusMovedPermanently,\n\t\t}[prefix]\n\t\tif _, err := do(\"PUT\", srv.URL+\"/a/b/c\", blah, wantC); err != nil {\n\t\t\tt.Errorf(\"prefix=%-9q PUT /a/b/c: %v\", prefix, err)\n\t\t\tcontinue\n\t\t}\n\n\t\twantD := map[string]int{\n\t\t\t\"/\":       http.StatusCreated,\n\t\t\t\"/a/\":     http.StatusCreated,\n\t\t\t\"/a/b/\":   http.StatusCreated,\n\t\t\t\"/a/b/c/\": http.StatusMovedPermanently,\n\t\t}[prefix]\n\t\tif _, err := do(\"COPY\", srv.URL+\"/a/b/c\", \"\", wantD, dst, srv.URL+\"/a/b/d\"); err != nil {\n\t\t\tt.Errorf(\"prefix=%-9q COPY /a/b/c /a/b/d: %v\", prefix, err)\n\t\t\tcontinue\n\t\t}\n\n\t\twantE := map[string]int{\n\t\t\t\"/\":       http.StatusCreated,\n\t\t\t\"/a/\":     http.StatusCreated,\n\t\t\t\"/a/b/\":   http.StatusCreated,\n\t\t\t\"/a/b/c/\": http.StatusNotFound,\n\t\t}[prefix]\n\t\tif _, err := do(\"MKCOL\", srv.URL+\"/a/b/e\", \"\", wantE); err != nil {\n\t\t\tt.Errorf(\"prefix=%-9q MKCOL /a/b/e: %v\", prefix, err)\n\t\t\tcontinue\n\t\t}\n\n\t\twantF := map[string]int{\n\t\t\t\"/\":       http.StatusCreated,\n\t\t\t\"/a/\":     http.StatusCreated,\n\t\t\t\"/a/b/\":   http.StatusCreated,\n\t\t\t\"/a/b/c/\": http.StatusNotFound,\n\t\t}[prefix]\n\t\tif _, err := do(\"MOVE\", srv.URL+\"/a/b/d\", \"\", wantF, dst, srv.URL+\"/a/b/e/f\"); err != nil {\n\t\t\tt.Errorf(\"prefix=%-9q MOVE /a/b/d /a/b/e/f: %v\", prefix, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar lockToken string\n\t\twantG := map[string]int{\n\t\t\t\"/\":       http.StatusCreated,\n\t\t\t\"/a/\":     http.StatusCreated,\n\t\t\t\"/a/b/\":   http.StatusCreated,\n\t\t\t\"/a/b/c/\": http.StatusNotFound,\n\t\t}[prefix]\n\t\tif h, err := do(\"LOCK\", srv.URL+\"/a/b/e/g\", createLockBody, wantG); err != nil {\n\t\t\tt.Errorf(\"prefix=%-9q LOCK /a/b/e/g: %v\", prefix, err)\n\t\t\tcontinue\n\t\t} else {\n\t\t\tlockToken = h.Get(\"Lock-Token\")\n\t\t}\n\n\t\tifHeader := fmt.Sprintf(\"<%s/a/b/e/g> (%s)\", srv.URL, lockToken)\n\t\twantH := map[string]int{\n\t\t\t\"/\":       http.StatusCreated,\n\t\t\t\"/a/\":     http.StatusCreated,\n\t\t\t\"/a/b/\":   http.StatusCreated,\n\t\t\t\"/a/b/c/\": http.StatusNotFound,\n\t\t}[prefix]\n\t\tif _, err := do(\"PUT\", srv.URL+\"/a/b/e/g\", blah, wantH, \"If\", ifHeader); err != nil {\n\t\t\tt.Errorf(\"prefix=%-9q PUT /a/b/e/g: %v\", prefix, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tgot, err := find(ctx, nil, fs, \"/\")\n\t\tif err != nil {\n\t\t\tt.Errorf(\"prefix=%-9q find: %v\", prefix, err)\n\t\t\tcontinue\n\t\t}\n\t\tsort.Strings(got)\n\t\twant := map[string][]string{\n\t\t\t\"/\":       {\"/\", \"/a\", \"/a/b\", \"/a/b/c\", \"/a/b/e\", \"/a/b/e/f\", \"/a/b/e/g\"},\n\t\t\t\"/a/\":     {\"/\", \"/b\", \"/b/c\", \"/b/e\", \"/b/e/f\", \"/b/e/g\"},\n\t\t\t\"/a/b/\":   {\"/\", \"/c\", \"/e\", \"/e/f\", \"/e/g\"},\n\t\t\t\"/a/b/c/\": {\"/\"},\n\t\t}[prefix]\n\t\tif !reflect.DeepEqual(got, want) {\n\t\t\tt.Errorf(\"prefix=%-9q find:\\ngot  %v\\nwant %v\", prefix, got, want)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc TestEscapeXML(t *testing.T) {\n\t// These test cases aren't exhaustive, and there is more than one way to\n\t// escape e.g. a quot (as \"&#34;\" or \"&quot;\") or an apos. We presume that\n\t// the encoding/xml package tests xml.EscapeText more thoroughly. This test\n\t// here is just a sanity check for this package's escapeXML function, and\n\t// its attempt to provide a fast path (and avoid a bytes.Buffer allocation)\n\t// when escaping filenames is obviously a no-op.\n\ttestCases := map[string]string{\n\t\t\"\":              \"\",\n\t\t\" \":             \" \",\n\t\t\"&\":             \"&amp;\",\n\t\t\"*\":             \"*\",\n\t\t\"+\":             \"+\",\n\t\t\",\":             \",\",\n\t\t\"-\":             \"-\",\n\t\t\".\":             \".\",\n\t\t\"/\":             \"/\",\n\t\t\"0\":             \"0\",\n\t\t\"9\":             \"9\",\n\t\t\":\":             \":\",\n\t\t\"<\":             \"&lt;\",\n\t\t\">\":             \"&gt;\",\n\t\t\"A\":             \"A\",\n\t\t\"_\":             \"_\",\n\t\t\"a\":             \"a\",\n\t\t\"~\":             \"~\",\n\t\t\"\\u0201\":        \"\\u0201\",\n\t\t\"&amp;\":         \"&amp;amp;\",\n\t\t\"foo&<b/ar>baz\": \"foo&amp;&lt;b/ar&gt;baz\",\n\t}\n\n\tfor in, want := range testCases {\n\t\tif got := escapeXML(in); got != want {\n\t\t\tt.Errorf(\"in=%q: got %q, want %q\", in, got, want)\n\t\t}\n\t}\n}\n\nfunc TestFilenameEscape(t *testing.T) {\n\threfRe := regexp.MustCompile(`<D:href>([^<]*)</D:href>`)\n\tdisplayNameRe := regexp.MustCompile(`<D:displayname>([^<]*)</D:displayname>`)\n\tdo := func(method, urlStr string) (string, string, error) {\n\t\treq, err := http.NewRequest(method, urlStr, nil)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\tres, err := http.DefaultClient.Do(req)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\tdefer res.Body.Close()\n\n\t\tb, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\threfMatch := hrefRe.FindStringSubmatch(string(b))\n\t\tif len(hrefMatch) != 2 {\n\t\t\treturn \"\", \"\", errors.New(\"D:href not found\")\n\t\t}\n\t\tdisplayNameMatch := displayNameRe.FindStringSubmatch(string(b))\n\t\tif len(displayNameMatch) != 2 {\n\t\t\treturn \"\", \"\", errors.New(\"D:displayname not found\")\n\t\t}\n\n\t\treturn hrefMatch[1], displayNameMatch[1], nil\n\t}\n\n\ttestCases := []struct {\n\t\tname, wantHref, wantDisplayName string\n\t}{{\n\t\tname:            `/foo%bar`,\n\t\twantHref:        `/foo%25bar`,\n\t\twantDisplayName: `foo%bar`,\n\t}, {\n\t\tname:            `/こんにちわ世界`,\n\t\twantHref:        `/%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%82%8F%E4%B8%96%E7%95%8C`,\n\t\twantDisplayName: `こんにちわ世界`,\n\t}, {\n\t\tname:            `/Program Files/`,\n\t\twantHref:        `/Program%20Files`,\n\t\twantDisplayName: `Program Files`,\n\t}, {\n\t\tname:            `/go+lang`,\n\t\twantHref:        `/go+lang`,\n\t\twantDisplayName: `go+lang`,\n\t}, {\n\t\tname:            `/go&lang`,\n\t\twantHref:        `/go&amp;lang`,\n\t\twantDisplayName: `go&amp;lang`,\n\t}, {\n\t\tname:            `/go<lang`,\n\t\twantHref:        `/go%3Clang`,\n\t\twantDisplayName: `go&lt;lang`,\n\t}}\n\tctx := context.Background()\n\tfs := NewMemFS()\n\tfor _, tc := range testCases {\n\t\tif strings.HasSuffix(tc.name, \"/\") {\n\t\t\tif err := fs.Mkdir(ctx, tc.name, 0755); err != nil {\n\t\t\t\tt.Fatalf(\"name=%q: Mkdir: %v\", tc.name, err)\n\t\t\t}\n\t\t} else {\n\t\t\tf, err := fs.OpenFile(ctx, tc.name, os.O_CREATE, 0644)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"name=%q: OpenFile: %v\", tc.name, err)\n\t\t\t}\n\t\t\tf.Close()\n\t\t}\n\t}\n\n\tsrv := httptest.NewServer(&Handler{\n\t\tFileSystem: fs,\n\t\tLockSystem: NewMemLS(),\n\t})\n\tdefer srv.Close()\n\n\tu, err := url.Parse(srv.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor _, tc := range testCases {\n\t\tu.Path = tc.name\n\t\tgotHref, gotDisplayName, err := do(\"PROPFIND\", u.String())\n\t\tif err != nil {\n\t\t\tt.Errorf(\"name=%q: PROPFIND: %v\", tc.name, err)\n\t\t\tcontinue\n\t\t}\n\t\tif gotHref != tc.wantHref {\n\t\t\tt.Errorf(\"name=%q: got href %q, want %q\", tc.name, gotHref, tc.wantHref)\n\t\t}\n\t\tif gotDisplayName != tc.wantDisplayName {\n\t\t\tt.Errorf(\"name=%q: got dispayname %q, want %q\", tc.name, gotDisplayName, tc.wantDisplayName)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/xml.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage webdav\n\n// The XML encoding is covered by Section 14.\n// http://www.webdav.org/specs/rfc4918.html#xml.element.definitions\n\nimport (\n\t\"bytes\"\n\t\"encoding/xml\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"time\"\n\n\t// As of https://go-review.googlesource.com/#/c/12772/ which was submitted\n\t// in July 2015, this package uses an internal fork of the standard\n\t// library's encoding/xml package, due to changes in the way namespaces\n\t// were encoded. Such changes were introduced in the Go 1.5 cycle, but were\n\t// rolled back in response to https://github.com/golang/go/issues/11841\n\t//\n\t// However, this package's exported API, specifically the Property and\n\t// DeadPropsHolder types, need to refer to the standard library's version\n\t// of the xml.Name type, as code that imports this package cannot refer to\n\t// the internal version.\n\t//\n\t// This file therefore imports both the internal and external versions, as\n\t// ixml and xml, and converts between them.\n\t//\n\t// In the long term, this package should use the standard library's version\n\t// only, and the internal fork deleted, once\n\t// https://github.com/golang/go/issues/13400 is resolved.\n\tixml \"golang.org/x/net/webdav/internal/xml\"\n)\n\n// http://www.webdav.org/specs/rfc4918.html#ELEMENT_lockinfo\ntype lockInfo struct {\n\tXMLName   ixml.Name `xml:\"lockinfo\"`\n\tExclusive *struct{} `xml:\"lockscope>exclusive\"`\n\tShared    *struct{} `xml:\"lockscope>shared\"`\n\tWrite     *struct{} `xml:\"locktype>write\"`\n\tOwner     owner     `xml:\"owner\"`\n}\n\n// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner\ntype owner struct {\n\tInnerXML string `xml:\",innerxml\"`\n}\n\nfunc readLockInfo(r io.Reader) (li lockInfo, status int, err error) {\n\tc := &countingReader{r: r}\n\tif err = ixml.NewDecoder(c).Decode(&li); err != nil {\n\t\tif err == io.EOF {\n\t\t\tif c.n == 0 {\n\t\t\t\t// An empty body means to refresh the lock.\n\t\t\t\t// http://www.webdav.org/specs/rfc4918.html#refreshing-locks\n\t\t\t\treturn lockInfo{}, 0, nil\n\t\t\t}\n\t\t\terr = errInvalidLockInfo\n\t\t}\n\t\treturn lockInfo{}, http.StatusBadRequest, err\n\t}\n\t// We only support exclusive (non-shared) write locks. In practice, these are\n\t// the only types of locks that seem to matter.\n\tif li.Exclusive == nil || li.Shared != nil || li.Write == nil {\n\t\treturn lockInfo{}, http.StatusNotImplemented, errUnsupportedLockInfo\n\t}\n\treturn li, 0, nil\n}\n\ntype countingReader struct {\n\tn int\n\tr io.Reader\n}\n\nfunc (c *countingReader) Read(p []byte) (int, error) {\n\tn, err := c.r.Read(p)\n\tc.n += n\n\treturn n, err\n}\n\nfunc writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) {\n\tdepth := \"infinity\"\n\tif ld.ZeroDepth {\n\t\tdepth = \"0\"\n\t}\n\ttimeout := ld.Duration / time.Second\n\treturn fmt.Fprintf(w, \"<?xml version=\\\"1.0\\\" encoding=\\\"utf-8\\\"?>\\n\"+\n\t\t\"<D:prop xmlns:D=\\\"DAV:\\\"><D:lockdiscovery><D:activelock>\\n\"+\n\t\t\"\t<D:locktype><D:write/></D:locktype>\\n\"+\n\t\t\"\t<D:lockscope><D:exclusive/></D:lockscope>\\n\"+\n\t\t\"\t<D:depth>%s</D:depth>\\n\"+\n\t\t\"\t<D:owner>%s</D:owner>\\n\"+\n\t\t\"\t<D:timeout>Second-%d</D:timeout>\\n\"+\n\t\t\"\t<D:locktoken><D:href>%s</D:href></D:locktoken>\\n\"+\n\t\t\"\t<D:lockroot><D:href>%s</D:href></D:lockroot>\\n\"+\n\t\t\"</D:activelock></D:lockdiscovery></D:prop>\",\n\t\tdepth, ld.OwnerXML, timeout, escape(token), escape(ld.Root),\n\t)\n}\n\nfunc escape(s string) string {\n\tfor i := 0; i < len(s); i++ {\n\t\tswitch s[i] {\n\t\tcase '\"', '&', '\\'', '<', '>':\n\t\t\tb := bytes.NewBuffer(nil)\n\t\t\tixml.EscapeText(b, []byte(s))\n\t\t\treturn b.String()\n\t\t}\n\t}\n\treturn s\n}\n\n// Next returns the next token, if any, in the XML stream of d.\n// RFC 4918 requires to ignore comments, processing instructions\n// and directives.\n// http://www.webdav.org/specs/rfc4918.html#property_values\n// http://www.webdav.org/specs/rfc4918.html#xml-extensibility\nfunc next(d *ixml.Decoder) (ixml.Token, error) {\n\tfor {\n\t\tt, err := d.Token()\n\t\tif err != nil {\n\t\t\treturn t, err\n\t\t}\n\t\tswitch t.(type) {\n\t\tcase ixml.Comment, ixml.Directive, ixml.ProcInst:\n\t\t\tcontinue\n\t\tdefault:\n\t\t\treturn t, nil\n\t\t}\n\t}\n}\n\n// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for propfind)\ntype propfindProps []xml.Name\n\n// UnmarshalXML appends the property names enclosed within start to pn.\n//\n// It returns an error if start does not contain any properties or if\n// properties contain values. Character data between properties is ignored.\nfunc (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {\n\tfor {\n\t\tt, err := next(d)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tswitch t.(type) {\n\t\tcase ixml.EndElement:\n\t\t\tif len(*pn) == 0 {\n\t\t\t\treturn fmt.Errorf(\"%s must not be empty\", start.Name.Local)\n\t\t\t}\n\t\t\treturn nil\n\t\tcase ixml.StartElement:\n\t\t\tname := t.(ixml.StartElement).Name\n\t\t\tt, err = next(d)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, ok := t.(ixml.EndElement); !ok {\n\t\t\t\treturn fmt.Errorf(\"unexpected token %T\", t)\n\t\t\t}\n\t\t\t*pn = append(*pn, xml.Name(name))\n\t\t}\n\t}\n}\n\n// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind\ntype propfind struct {\n\tXMLName  ixml.Name     `xml:\"DAV: propfind\"`\n\tAllprop  *struct{}     `xml:\"DAV: allprop\"`\n\tPropname *struct{}     `xml:\"DAV: propname\"`\n\tProp     propfindProps `xml:\"DAV: prop\"`\n\tInclude  propfindProps `xml:\"DAV: include\"`\n}\n\nfunc readPropfind(r io.Reader) (pf propfind, status int, err error) {\n\tc := countingReader{r: r}\n\tif err = ixml.NewDecoder(&c).Decode(&pf); err != nil {\n\t\tif err == io.EOF {\n\t\t\tif c.n == 0 {\n\t\t\t\t// An empty body means to propfind allprop.\n\t\t\t\t// http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND\n\t\t\t\treturn propfind{Allprop: new(struct{})}, 0, nil\n\t\t\t}\n\t\t\terr = errInvalidPropfind\n\t\t}\n\t\treturn propfind{}, http.StatusBadRequest, err\n\t}\n\n\tif pf.Allprop == nil && pf.Include != nil {\n\t\treturn propfind{}, http.StatusBadRequest, errInvalidPropfind\n\t}\n\tif pf.Allprop != nil && (pf.Prop != nil || pf.Propname != nil) {\n\t\treturn propfind{}, http.StatusBadRequest, errInvalidPropfind\n\t}\n\tif pf.Prop != nil && pf.Propname != nil {\n\t\treturn propfind{}, http.StatusBadRequest, errInvalidPropfind\n\t}\n\tif pf.Propname == nil && pf.Allprop == nil && pf.Prop == nil {\n\t\treturn propfind{}, http.StatusBadRequest, errInvalidPropfind\n\t}\n\treturn pf, 0, nil\n}\n\n// Property represents a single DAV resource property as defined in RFC 4918.\n// See http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties\ntype Property struct {\n\t// XMLName is the fully qualified name that identifies this property.\n\tXMLName xml.Name\n\n\t// Lang is an optional xml:lang attribute.\n\tLang string `xml:\"xml:lang,attr,omitempty\"`\n\n\t// InnerXML contains the XML representation of the property value.\n\t// See http://www.webdav.org/specs/rfc4918.html#property_values\n\t//\n\t// Property values of complex type or mixed-content must have fully\n\t// expanded XML namespaces or be self-contained with according\n\t// XML namespace declarations. They must not rely on any XML\n\t// namespace declarations within the scope of the XML document,\n\t// even including the DAV: namespace.\n\tInnerXML []byte `xml:\",innerxml\"`\n}\n\n// ixmlProperty is the same as the Property type except it holds an ixml.Name\n// instead of an xml.Name.\ntype ixmlProperty struct {\n\tXMLName  ixml.Name\n\tLang     string `xml:\"xml:lang,attr,omitempty\"`\n\tInnerXML []byte `xml:\",innerxml\"`\n}\n\n// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error\n// See multistatusWriter for the \"D:\" namespace prefix.\ntype xmlError struct {\n\tXMLName  ixml.Name `xml:\"D:error\"`\n\tInnerXML []byte    `xml:\",innerxml\"`\n}\n\n// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat\n// See multistatusWriter for the \"D:\" namespace prefix.\ntype propstat struct {\n\tProp                []Property `xml:\"D:prop>_ignored_\"`\n\tStatus              string     `xml:\"D:status\"`\n\tError               *xmlError  `xml:\"D:error\"`\n\tResponseDescription string     `xml:\"D:responsedescription,omitempty\"`\n}\n\n// ixmlPropstat is the same as the propstat type except it holds an ixml.Name\n// instead of an xml.Name.\ntype ixmlPropstat struct {\n\tProp                []ixmlProperty `xml:\"D:prop>_ignored_\"`\n\tStatus              string         `xml:\"D:status\"`\n\tError               *xmlError      `xml:\"D:error\"`\n\tResponseDescription string         `xml:\"D:responsedescription,omitempty\"`\n}\n\n// MarshalXML prepends the \"D:\" namespace prefix on properties in the DAV: namespace\n// before encoding. See multistatusWriter.\nfunc (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error {\n\t// Convert from a propstat to an ixmlPropstat.\n\tixmlPs := ixmlPropstat{\n\t\tProp:                make([]ixmlProperty, len(ps.Prop)),\n\t\tStatus:              ps.Status,\n\t\tError:               ps.Error,\n\t\tResponseDescription: ps.ResponseDescription,\n\t}\n\tfor k, prop := range ps.Prop {\n\t\tixmlPs.Prop[k] = ixmlProperty{\n\t\t\tXMLName:  ixml.Name(prop.XMLName),\n\t\t\tLang:     prop.Lang,\n\t\t\tInnerXML: prop.InnerXML,\n\t\t}\n\t}\n\n\tfor k, prop := range ixmlPs.Prop {\n\t\tif prop.XMLName.Space == \"DAV:\" {\n\t\t\tprop.XMLName = ixml.Name{Space: \"\", Local: \"D:\" + prop.XMLName.Local}\n\t\t\tixmlPs.Prop[k] = prop\n\t\t}\n\t}\n\t// Distinct type to avoid infinite recursion of MarshalXML.\n\ttype newpropstat ixmlPropstat\n\treturn e.EncodeElement(newpropstat(ixmlPs), start)\n}\n\n// http://www.webdav.org/specs/rfc4918.html#ELEMENT_response\n// See multistatusWriter for the \"D:\" namespace prefix.\ntype response struct {\n\tXMLName             ixml.Name  `xml:\"D:response\"`\n\tHref                []string   `xml:\"D:href\"`\n\tPropstat            []propstat `xml:\"D:propstat\"`\n\tStatus              string     `xml:\"D:status,omitempty\"`\n\tError               *xmlError  `xml:\"D:error\"`\n\tResponseDescription string     `xml:\"D:responsedescription,omitempty\"`\n}\n\n// MultistatusWriter marshals one or more Responses into a XML\n// multistatus response.\n// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_multistatus\n// TODO(rsto, mpl): As a workaround, the \"D:\" namespace prefix, defined as\n// \"DAV:\" on this element, is prepended on the nested response, as well as on all\n// its nested elements. All property names in the DAV: namespace are prefixed as\n// well. This is because some versions of Mini-Redirector (on windows 7) ignore\n// elements with a default namespace (no prefixed namespace). A less intrusive fix\n// should be possible after golang.org/cl/11074. See https://golang.org/issue/11177\ntype multistatusWriter struct {\n\t// ResponseDescription contains the optional responsedescription\n\t// of the multistatus XML element. Only the latest content before\n\t// close will be emitted. Empty response descriptions are not\n\t// written.\n\tresponseDescription string\n\n\tw   http.ResponseWriter\n\tenc *ixml.Encoder\n}\n\n// Write validates and emits a DAV response as part of a multistatus response\n// element.\n//\n// It sets the HTTP status code of its underlying http.ResponseWriter to 207\n// (Multi-Status) and populates the Content-Type header. If r is the\n// first, valid response to be written, Write prepends the XML representation\n// of r with a multistatus tag. Callers must call close after the last response\n// has been written.\nfunc (w *multistatusWriter) write(r *response) error {\n\tswitch len(r.Href) {\n\tcase 0:\n\t\treturn errInvalidResponse\n\tcase 1:\n\t\tif len(r.Propstat) > 0 != (r.Status == \"\") {\n\t\t\treturn errInvalidResponse\n\t\t}\n\tdefault:\n\t\tif len(r.Propstat) > 0 || r.Status == \"\" {\n\t\t\treturn errInvalidResponse\n\t\t}\n\t}\n\terr := w.writeHeader()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn w.enc.Encode(r)\n}\n\n// writeHeader writes a XML multistatus start element on w's underlying\n// http.ResponseWriter and returns the result of the write operation.\n// After the first write attempt, writeHeader becomes a no-op.\nfunc (w *multistatusWriter) writeHeader() error {\n\tif w.enc != nil {\n\t\treturn nil\n\t}\n\tw.w.Header().Add(\"Content-Type\", \"text/xml; charset=utf-8\")\n\tw.w.WriteHeader(StatusMulti)\n\t_, err := fmt.Fprintf(w.w, `<?xml version=\"1.0\" encoding=\"UTF-8\"?>`)\n\tif err != nil {\n\t\treturn err\n\t}\n\tw.enc = ixml.NewEncoder(w.w)\n\treturn w.enc.EncodeToken(ixml.StartElement{\n\t\tName: ixml.Name{\n\t\t\tSpace: \"DAV:\",\n\t\t\tLocal: \"multistatus\",\n\t\t},\n\t\tAttr: []ixml.Attr{{\n\t\t\tName:  ixml.Name{Space: \"xmlns\", Local: \"D\"},\n\t\t\tValue: \"DAV:\",\n\t\t}},\n\t})\n}\n\n// Close completes the marshalling of the multistatus response. It returns\n// an error if the multistatus response could not be completed. If both the\n// return value and field enc of w are nil, then no multistatus response has\n// been written.\nfunc (w *multistatusWriter) close() error {\n\tif w.enc == nil {\n\t\treturn nil\n\t}\n\tvar end []ixml.Token\n\tif w.responseDescription != \"\" {\n\t\tname := ixml.Name{Space: \"DAV:\", Local: \"responsedescription\"}\n\t\tend = append(end,\n\t\t\tixml.StartElement{Name: name},\n\t\t\tixml.CharData(w.responseDescription),\n\t\t\tixml.EndElement{Name: name},\n\t\t)\n\t}\n\tend = append(end, ixml.EndElement{\n\t\tName: ixml.Name{Space: \"DAV:\", Local: \"multistatus\"},\n\t})\n\tfor _, t := range end {\n\t\terr := w.enc.EncodeToken(t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn w.enc.Flush()\n}\n\nvar xmlLangName = ixml.Name{Space: \"http://www.w3.org/XML/1998/namespace\", Local: \"lang\"}\n\nfunc xmlLang(s ixml.StartElement, d string) string {\n\tfor _, attr := range s.Attr {\n\t\tif attr.Name == xmlLangName {\n\t\t\treturn attr.Value\n\t\t}\n\t}\n\treturn d\n}\n\ntype xmlValue []byte\n\nfunc (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {\n\t// The XML value of a property can be arbitrary, mixed-content XML.\n\t// To make sure that the unmarshalled value contains all required\n\t// namespaces, we encode all the property value XML tokens into a\n\t// buffer. This forces the encoder to redeclare any used namespaces.\n\tvar b bytes.Buffer\n\te := ixml.NewEncoder(&b)\n\tfor {\n\t\tt, err := next(d)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif e, ok := t.(ixml.EndElement); ok && e.Name == start.Name {\n\t\t\tbreak\n\t\t}\n\t\tif err = e.EncodeToken(t); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\terr := e.Flush()\n\tif err != nil {\n\t\treturn err\n\t}\n\t*v = b.Bytes()\n\treturn nil\n}\n\n// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch)\ntype proppatchProps []Property\n\n// UnmarshalXML appends the property names and values enclosed within start\n// to ps.\n//\n// An xml:lang attribute that is defined either on the DAV:prop or property\n// name XML element is propagated to the property's Lang field.\n//\n// UnmarshalXML returns an error if start does not contain any properties or if\n// property values contain syntactically incorrect XML.\nfunc (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {\n\tlang := xmlLang(start, \"\")\n\tfor {\n\t\tt, err := next(d)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tswitch elem := t.(type) {\n\t\tcase ixml.EndElement:\n\t\t\tif len(*ps) == 0 {\n\t\t\t\treturn fmt.Errorf(\"%s must not be empty\", start.Name.Local)\n\t\t\t}\n\t\t\treturn nil\n\t\tcase ixml.StartElement:\n\t\t\tp := Property{\n\t\t\t\tXMLName: xml.Name(t.(ixml.StartElement).Name),\n\t\t\t\tLang:    xmlLang(t.(ixml.StartElement), lang),\n\t\t\t}\n\t\t\terr = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t*ps = append(*ps, p)\n\t\t}\n\t}\n}\n\n// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set\n// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove\ntype setRemove struct {\n\tXMLName ixml.Name\n\tLang    string         `xml:\"xml:lang,attr,omitempty\"`\n\tProp    proppatchProps `xml:\"DAV: prop\"`\n}\n\n// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate\ntype propertyupdate struct {\n\tXMLName   ixml.Name   `xml:\"DAV: propertyupdate\"`\n\tLang      string      `xml:\"xml:lang,attr,omitempty\"`\n\tSetRemove []setRemove `xml:\",any\"`\n}\n\nfunc readProppatch(r io.Reader) (patches []Proppatch, status int, err error) {\n\tvar pu propertyupdate\n\tif err = ixml.NewDecoder(r).Decode(&pu); err != nil {\n\t\treturn nil, http.StatusBadRequest, err\n\t}\n\tfor _, op := range pu.SetRemove {\n\t\tremove := false\n\t\tswitch op.XMLName {\n\t\tcase ixml.Name{Space: \"DAV:\", Local: \"set\"}:\n\t\t\t// No-op.\n\t\tcase ixml.Name{Space: \"DAV:\", Local: \"remove\"}:\n\t\t\tfor _, p := range op.Prop {\n\t\t\t\tif len(p.InnerXML) > 0 {\n\t\t\t\t\treturn nil, http.StatusBadRequest, errInvalidProppatch\n\t\t\t\t}\n\t\t\t}\n\t\t\tremove = true\n\t\tdefault:\n\t\t\treturn nil, http.StatusBadRequest, errInvalidProppatch\n\t\t}\n\t\tpatches = append(patches, Proppatch{Remove: remove, Props: op.Prop})\n\t}\n\treturn patches, 0, nil\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/webdav/xml_test.go",
    "content": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage webdav\n\nimport (\n\t\"bytes\"\n\t\"encoding/xml\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n\n\tixml \"golang.org/x/net/webdav/internal/xml\"\n)\n\nfunc TestReadLockInfo(t *testing.T) {\n\t// The \"section x.y.z\" test cases come from section x.y.z of the spec at\n\t// http://www.webdav.org/specs/rfc4918.html\n\ttestCases := []struct {\n\t\tdesc       string\n\t\tinput      string\n\t\twantLI     lockInfo\n\t\twantStatus int\n\t}{{\n\t\t\"bad: junk\",\n\t\t\"xxx\",\n\t\tlockInfo{},\n\t\thttp.StatusBadRequest,\n\t}, {\n\t\t\"bad: invalid owner XML\",\n\t\t\"\" +\n\t\t\t\"<D:lockinfo xmlns:D='DAV:'>\\n\" +\n\t\t\t\"  <D:lockscope><D:exclusive/></D:lockscope>\\n\" +\n\t\t\t\"  <D:locktype><D:write/></D:locktype>\\n\" +\n\t\t\t\"  <D:owner>\\n\" +\n\t\t\t\"    <D:href>   no end tag   \\n\" +\n\t\t\t\"  </D:owner>\\n\" +\n\t\t\t\"</D:lockinfo>\",\n\t\tlockInfo{},\n\t\thttp.StatusBadRequest,\n\t}, {\n\t\t\"bad: invalid UTF-8\",\n\t\t\"\" +\n\t\t\t\"<D:lockinfo xmlns:D='DAV:'>\\n\" +\n\t\t\t\"  <D:lockscope><D:exclusive/></D:lockscope>\\n\" +\n\t\t\t\"  <D:locktype><D:write/></D:locktype>\\n\" +\n\t\t\t\"  <D:owner>\\n\" +\n\t\t\t\"    <D:href>   \\xff   </D:href>\\n\" +\n\t\t\t\"  </D:owner>\\n\" +\n\t\t\t\"</D:lockinfo>\",\n\t\tlockInfo{},\n\t\thttp.StatusBadRequest,\n\t}, {\n\t\t\"bad: unfinished XML #1\",\n\t\t\"\" +\n\t\t\t\"<D:lockinfo xmlns:D='DAV:'>\\n\" +\n\t\t\t\"  <D:lockscope><D:exclusive/></D:lockscope>\\n\" +\n\t\t\t\"  <D:locktype><D:write/></D:locktype>\\n\",\n\t\tlockInfo{},\n\t\thttp.StatusBadRequest,\n\t}, {\n\t\t\"bad: unfinished XML #2\",\n\t\t\"\" +\n\t\t\t\"<D:lockinfo xmlns:D='DAV:'>\\n\" +\n\t\t\t\"  <D:lockscope><D:exclusive/></D:lockscope>\\n\" +\n\t\t\t\"  <D:locktype><D:write/></D:locktype>\\n\" +\n\t\t\t\"  <D:owner>\\n\",\n\t\tlockInfo{},\n\t\thttp.StatusBadRequest,\n\t}, {\n\t\t\"good: empty\",\n\t\t\"\",\n\t\tlockInfo{},\n\t\t0,\n\t}, {\n\t\t\"good: plain-text owner\",\n\t\t\"\" +\n\t\t\t\"<D:lockinfo xmlns:D='DAV:'>\\n\" +\n\t\t\t\"  <D:lockscope><D:exclusive/></D:lockscope>\\n\" +\n\t\t\t\"  <D:locktype><D:write/></D:locktype>\\n\" +\n\t\t\t\"  <D:owner>gopher</D:owner>\\n\" +\n\t\t\t\"</D:lockinfo>\",\n\t\tlockInfo{\n\t\t\tXMLName:   ixml.Name{Space: \"DAV:\", Local: \"lockinfo\"},\n\t\t\tExclusive: new(struct{}),\n\t\t\tWrite:     new(struct{}),\n\t\t\tOwner: owner{\n\t\t\t\tInnerXML: \"gopher\",\n\t\t\t},\n\t\t},\n\t\t0,\n\t}, {\n\t\t\"section 9.10.7\",\n\t\t\"\" +\n\t\t\t\"<D:lockinfo xmlns:D='DAV:'>\\n\" +\n\t\t\t\"  <D:lockscope><D:exclusive/></D:lockscope>\\n\" +\n\t\t\t\"  <D:locktype><D:write/></D:locktype>\\n\" +\n\t\t\t\"  <D:owner>\\n\" +\n\t\t\t\"    <D:href>http://example.org/~ejw/contact.html</D:href>\\n\" +\n\t\t\t\"  </D:owner>\\n\" +\n\t\t\t\"</D:lockinfo>\",\n\t\tlockInfo{\n\t\t\tXMLName:   ixml.Name{Space: \"DAV:\", Local: \"lockinfo\"},\n\t\t\tExclusive: new(struct{}),\n\t\t\tWrite:     new(struct{}),\n\t\t\tOwner: owner{\n\t\t\t\tInnerXML: \"\\n    <D:href>http://example.org/~ejw/contact.html</D:href>\\n  \",\n\t\t\t},\n\t\t},\n\t\t0,\n\t}}\n\n\tfor _, tc := range testCases {\n\t\tli, status, err := readLockInfo(strings.NewReader(tc.input))\n\t\tif tc.wantStatus != 0 {\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"%s: got nil error, want non-nil\", tc.desc)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\tt.Errorf(\"%s: %v\", tc.desc, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(li, tc.wantLI) || status != tc.wantStatus {\n\t\t\tt.Errorf(\"%s:\\ngot  lockInfo=%v, status=%v\\nwant lockInfo=%v, status=%v\",\n\t\t\t\ttc.desc, li, status, tc.wantLI, tc.wantStatus)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc TestReadPropfind(t *testing.T) {\n\ttestCases := []struct {\n\t\tdesc       string\n\t\tinput      string\n\t\twantPF     propfind\n\t\twantStatus int\n\t}{{\n\t\tdesc: \"propfind: propname\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"  <A:propname/>\\n\" +\n\t\t\t\"</A:propfind>\",\n\t\twantPF: propfind{\n\t\t\tXMLName:  ixml.Name{Space: \"DAV:\", Local: \"propfind\"},\n\t\t\tPropname: new(struct{}),\n\t\t},\n\t}, {\n\t\tdesc:  \"propfind: empty body means allprop\",\n\t\tinput: \"\",\n\t\twantPF: propfind{\n\t\t\tAllprop: new(struct{}),\n\t\t},\n\t}, {\n\t\tdesc: \"propfind: allprop\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"   <A:allprop/>\\n\" +\n\t\t\t\"</A:propfind>\",\n\t\twantPF: propfind{\n\t\t\tXMLName: ixml.Name{Space: \"DAV:\", Local: \"propfind\"},\n\t\t\tAllprop: new(struct{}),\n\t\t},\n\t}, {\n\t\tdesc: \"propfind: allprop followed by include\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"  <A:allprop/>\\n\" +\n\t\t\t\"  <A:include><A:displayname/></A:include>\\n\" +\n\t\t\t\"</A:propfind>\",\n\t\twantPF: propfind{\n\t\t\tXMLName: ixml.Name{Space: \"DAV:\", Local: \"propfind\"},\n\t\t\tAllprop: new(struct{}),\n\t\t\tInclude: propfindProps{xml.Name{Space: \"DAV:\", Local: \"displayname\"}},\n\t\t},\n\t}, {\n\t\tdesc: \"propfind: include followed by allprop\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"  <A:include><A:displayname/></A:include>\\n\" +\n\t\t\t\"  <A:allprop/>\\n\" +\n\t\t\t\"</A:propfind>\",\n\t\twantPF: propfind{\n\t\t\tXMLName: ixml.Name{Space: \"DAV:\", Local: \"propfind\"},\n\t\t\tAllprop: new(struct{}),\n\t\t\tInclude: propfindProps{xml.Name{Space: \"DAV:\", Local: \"displayname\"}},\n\t\t},\n\t}, {\n\t\tdesc: \"propfind: propfind\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"  <A:prop><A:displayname/></A:prop>\\n\" +\n\t\t\t\"</A:propfind>\",\n\t\twantPF: propfind{\n\t\t\tXMLName: ixml.Name{Space: \"DAV:\", Local: \"propfind\"},\n\t\t\tProp:    propfindProps{xml.Name{Space: \"DAV:\", Local: \"displayname\"}},\n\t\t},\n\t}, {\n\t\tdesc: \"propfind: prop with ignored comments\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"  <A:prop>\\n\" +\n\t\t\t\"    <!-- ignore -->\\n\" +\n\t\t\t\"    <A:displayname><!-- ignore --></A:displayname>\\n\" +\n\t\t\t\"  </A:prop>\\n\" +\n\t\t\t\"</A:propfind>\",\n\t\twantPF: propfind{\n\t\t\tXMLName: ixml.Name{Space: \"DAV:\", Local: \"propfind\"},\n\t\t\tProp:    propfindProps{xml.Name{Space: \"DAV:\", Local: \"displayname\"}},\n\t\t},\n\t}, {\n\t\tdesc: \"propfind: propfind with ignored whitespace\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"  <A:prop>   <A:displayname/></A:prop>\\n\" +\n\t\t\t\"</A:propfind>\",\n\t\twantPF: propfind{\n\t\t\tXMLName: ixml.Name{Space: \"DAV:\", Local: \"propfind\"},\n\t\t\tProp:    propfindProps{xml.Name{Space: \"DAV:\", Local: \"displayname\"}},\n\t\t},\n\t}, {\n\t\tdesc: \"propfind: propfind with ignored mixed-content\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"  <A:prop>foo<A:displayname/>bar</A:prop>\\n\" +\n\t\t\t\"</A:propfind>\",\n\t\twantPF: propfind{\n\t\t\tXMLName: ixml.Name{Space: \"DAV:\", Local: \"propfind\"},\n\t\t\tProp:    propfindProps{xml.Name{Space: \"DAV:\", Local: \"displayname\"}},\n\t\t},\n\t}, {\n\t\tdesc: \"propfind: propname with ignored element (section A.4)\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"  <A:propname/>\\n\" +\n\t\t\t\"  <E:leave-out xmlns:E='E:'>*boss*</E:leave-out>\\n\" +\n\t\t\t\"</A:propfind>\",\n\t\twantPF: propfind{\n\t\t\tXMLName:  ixml.Name{Space: \"DAV:\", Local: \"propfind\"},\n\t\t\tPropname: new(struct{}),\n\t\t},\n\t}, {\n\t\tdesc:       \"propfind: bad: junk\",\n\t\tinput:      \"xxx\",\n\t\twantStatus: http.StatusBadRequest,\n\t}, {\n\t\tdesc: \"propfind: bad: propname and allprop (section A.3)\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"  <A:propname/>\" +\n\t\t\t\"  <A:allprop/>\" +\n\t\t\t\"</A:propfind>\",\n\t\twantStatus: http.StatusBadRequest,\n\t}, {\n\t\tdesc: \"propfind: bad: propname and prop\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"  <A:prop><A:displayname/></A:prop>\\n\" +\n\t\t\t\"  <A:propname/>\\n\" +\n\t\t\t\"</A:propfind>\",\n\t\twantStatus: http.StatusBadRequest,\n\t}, {\n\t\tdesc: \"propfind: bad: allprop and prop\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"  <A:allprop/>\\n\" +\n\t\t\t\"  <A:prop><A:foo/><A:/prop>\\n\" +\n\t\t\t\"</A:propfind>\",\n\t\twantStatus: http.StatusBadRequest,\n\t}, {\n\t\tdesc: \"propfind: bad: empty propfind with ignored element (section A.4)\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"  <E:expired-props/>\\n\" +\n\t\t\t\"</A:propfind>\",\n\t\twantStatus: http.StatusBadRequest,\n\t}, {\n\t\tdesc: \"propfind: bad: empty prop\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"  <A:prop/>\\n\" +\n\t\t\t\"</A:propfind>\",\n\t\twantStatus: http.StatusBadRequest,\n\t}, {\n\t\tdesc: \"propfind: bad: prop with just chardata\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"  <A:prop>foo</A:prop>\\n\" +\n\t\t\t\"</A:propfind>\",\n\t\twantStatus: http.StatusBadRequest,\n\t}, {\n\t\tdesc: \"bad: interrupted prop\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"  <A:prop><A:foo></A:prop>\\n\",\n\t\twantStatus: http.StatusBadRequest,\n\t}, {\n\t\tdesc: \"bad: malformed end element prop\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"  <A:prop><A:foo/></A:bar></A:prop>\\n\",\n\t\twantStatus: http.StatusBadRequest,\n\t}, {\n\t\tdesc: \"propfind: bad: property with chardata value\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"  <A:prop><A:foo>bar</A:foo></A:prop>\\n\" +\n\t\t\t\"</A:propfind>\",\n\t\twantStatus: http.StatusBadRequest,\n\t}, {\n\t\tdesc: \"propfind: bad: property with whitespace value\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"  <A:prop><A:foo> </A:foo></A:prop>\\n\" +\n\t\t\t\"</A:propfind>\",\n\t\twantStatus: http.StatusBadRequest,\n\t}, {\n\t\tdesc: \"propfind: bad: include without allprop\",\n\t\tinput: \"\" +\n\t\t\t\"<A:propfind xmlns:A='DAV:'>\\n\" +\n\t\t\t\"  <A:include><A:foo/></A:include>\\n\" +\n\t\t\t\"</A:propfind>\",\n\t\twantStatus: http.StatusBadRequest,\n\t}}\n\n\tfor _, tc := range testCases {\n\t\tpf, status, err := readPropfind(strings.NewReader(tc.input))\n\t\tif tc.wantStatus != 0 {\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"%s: got nil error, want non-nil\", tc.desc)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\tt.Errorf(\"%s: %v\", tc.desc, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(pf, tc.wantPF) || status != tc.wantStatus {\n\t\t\tt.Errorf(\"%s:\\ngot  propfind=%v, status=%v\\nwant propfind=%v, status=%v\",\n\t\t\t\ttc.desc, pf, status, tc.wantPF, tc.wantStatus)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc TestMultistatusWriter(t *testing.T) {\n\t///The \"section x.y.z\" test cases come from section x.y.z of the spec at\n\t// http://www.webdav.org/specs/rfc4918.html\n\ttestCases := []struct {\n\t\tdesc        string\n\t\tresponses   []response\n\t\trespdesc    string\n\t\twriteHeader bool\n\t\twantXML     string\n\t\twantCode    int\n\t\twantErr     error\n\t}{{\n\t\tdesc: \"section 9.2.2 (failed dependency)\",\n\t\tresponses: []response{{\n\t\t\tHref: []string{\"http://example.com/foo\"},\n\t\t\tPropstat: []propstat{{\n\t\t\t\tProp: []Property{{\n\t\t\t\t\tXMLName: xml.Name{\n\t\t\t\t\t\tSpace: \"http://ns.example.com/\",\n\t\t\t\t\t\tLocal: \"Authors\",\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t\tStatus: \"HTTP/1.1 424 Failed Dependency\",\n\t\t\t}, {\n\t\t\t\tProp: []Property{{\n\t\t\t\t\tXMLName: xml.Name{\n\t\t\t\t\t\tSpace: \"http://ns.example.com/\",\n\t\t\t\t\t\tLocal: \"Copyright-Owner\",\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t\tStatus: \"HTTP/1.1 409 Conflict\",\n\t\t\t}},\n\t\t\tResponseDescription: \"Copyright Owner cannot be deleted or altered.\",\n\t\t}},\n\t\twantXML: `` +\n\t\t\t`<?xml version=\"1.0\" encoding=\"UTF-8\"?>` +\n\t\t\t`<multistatus xmlns=\"DAV:\">` +\n\t\t\t`  <response>` +\n\t\t\t`    <href>http://example.com/foo</href>` +\n\t\t\t`    <propstat>` +\n\t\t\t`      <prop>` +\n\t\t\t`        <Authors xmlns=\"http://ns.example.com/\"></Authors>` +\n\t\t\t`      </prop>` +\n\t\t\t`      <status>HTTP/1.1 424 Failed Dependency</status>` +\n\t\t\t`    </propstat>` +\n\t\t\t`    <propstat xmlns=\"DAV:\">` +\n\t\t\t`      <prop>` +\n\t\t\t`        <Copyright-Owner xmlns=\"http://ns.example.com/\"></Copyright-Owner>` +\n\t\t\t`      </prop>` +\n\t\t\t`      <status>HTTP/1.1 409 Conflict</status>` +\n\t\t\t`    </propstat>` +\n\t\t\t`  <responsedescription>Copyright Owner cannot be deleted or altered.</responsedescription>` +\n\t\t\t`</response>` +\n\t\t\t`</multistatus>`,\n\t\twantCode: StatusMulti,\n\t}, {\n\t\tdesc: \"section 9.6.2 (lock-token-submitted)\",\n\t\tresponses: []response{{\n\t\t\tHref:   []string{\"http://example.com/foo\"},\n\t\t\tStatus: \"HTTP/1.1 423 Locked\",\n\t\t\tError: &xmlError{\n\t\t\t\tInnerXML: []byte(`<lock-token-submitted xmlns=\"DAV:\"/>`),\n\t\t\t},\n\t\t}},\n\t\twantXML: `` +\n\t\t\t`<?xml version=\"1.0\" encoding=\"UTF-8\"?>` +\n\t\t\t`<multistatus xmlns=\"DAV:\">` +\n\t\t\t`  <response>` +\n\t\t\t`    <href>http://example.com/foo</href>` +\n\t\t\t`    <status>HTTP/1.1 423 Locked</status>` +\n\t\t\t`    <error><lock-token-submitted xmlns=\"DAV:\"/></error>` +\n\t\t\t`  </response>` +\n\t\t\t`</multistatus>`,\n\t\twantCode: StatusMulti,\n\t}, {\n\t\tdesc: \"section 9.1.3\",\n\t\tresponses: []response{{\n\t\t\tHref: []string{\"http://example.com/foo\"},\n\t\t\tPropstat: []propstat{{\n\t\t\t\tProp: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"http://ns.example.com/boxschema/\", Local: \"bigbox\"},\n\t\t\t\t\tInnerXML: []byte(`` +\n\t\t\t\t\t\t`<BoxType xmlns=\"http://ns.example.com/boxschema/\">` +\n\t\t\t\t\t\t`Box type A` +\n\t\t\t\t\t\t`</BoxType>`),\n\t\t\t\t}, {\n\t\t\t\t\tXMLName: xml.Name{Space: \"http://ns.example.com/boxschema/\", Local: \"author\"},\n\t\t\t\t\tInnerXML: []byte(`` +\n\t\t\t\t\t\t`<Name xmlns=\"http://ns.example.com/boxschema/\">` +\n\t\t\t\t\t\t`J.J. Johnson` +\n\t\t\t\t\t\t`</Name>`),\n\t\t\t\t}},\n\t\t\t\tStatus: \"HTTP/1.1 200 OK\",\n\t\t\t}, {\n\t\t\t\tProp: []Property{{\n\t\t\t\t\tXMLName: xml.Name{Space: \"http://ns.example.com/boxschema/\", Local: \"DingALing\"},\n\t\t\t\t}, {\n\t\t\t\t\tXMLName: xml.Name{Space: \"http://ns.example.com/boxschema/\", Local: \"Random\"},\n\t\t\t\t}},\n\t\t\t\tStatus:              \"HTTP/1.1 403 Forbidden\",\n\t\t\t\tResponseDescription: \"The user does not have access to the DingALing property.\",\n\t\t\t}},\n\t\t}},\n\t\trespdesc: \"There has been an access violation error.\",\n\t\twantXML: `` +\n\t\t\t`<?xml version=\"1.0\" encoding=\"UTF-8\"?>` +\n\t\t\t`<multistatus xmlns=\"DAV:\" xmlns:B=\"http://ns.example.com/boxschema/\">` +\n\t\t\t`  <response>` +\n\t\t\t`    <href>http://example.com/foo</href>` +\n\t\t\t`    <propstat>` +\n\t\t\t`      <prop>` +\n\t\t\t`        <B:bigbox><B:BoxType>Box type A</B:BoxType></B:bigbox>` +\n\t\t\t`        <B:author><B:Name>J.J. Johnson</B:Name></B:author>` +\n\t\t\t`      </prop>` +\n\t\t\t`      <status>HTTP/1.1 200 OK</status>` +\n\t\t\t`    </propstat>` +\n\t\t\t`    <propstat>` +\n\t\t\t`      <prop>` +\n\t\t\t`        <B:DingALing/>` +\n\t\t\t`        <B:Random/>` +\n\t\t\t`      </prop>` +\n\t\t\t`      <status>HTTP/1.1 403 Forbidden</status>` +\n\t\t\t`      <responsedescription>The user does not have access to the DingALing property.</responsedescription>` +\n\t\t\t`    </propstat>` +\n\t\t\t`  </response>` +\n\t\t\t`  <responsedescription>There has been an access violation error.</responsedescription>` +\n\t\t\t`</multistatus>`,\n\t\twantCode: StatusMulti,\n\t}, {\n\t\tdesc: \"no response written\",\n\t\t// default of http.responseWriter\n\t\twantCode: http.StatusOK,\n\t}, {\n\t\tdesc:     \"no response written (with description)\",\n\t\trespdesc: \"too bad\",\n\t\t// default of http.responseWriter\n\t\twantCode: http.StatusOK,\n\t}, {\n\t\tdesc:        \"empty multistatus with header\",\n\t\twriteHeader: true,\n\t\twantXML:     `<multistatus xmlns=\"DAV:\"></multistatus>`,\n\t\twantCode:    StatusMulti,\n\t}, {\n\t\tdesc: \"bad: no href\",\n\t\tresponses: []response{{\n\t\t\tPropstat: []propstat{{\n\t\t\t\tProp: []Property{{\n\t\t\t\t\tXMLName: xml.Name{\n\t\t\t\t\t\tSpace: \"http://example.com/\",\n\t\t\t\t\t\tLocal: \"foo\",\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t\tStatus: \"HTTP/1.1 200 OK\",\n\t\t\t}},\n\t\t}},\n\t\twantErr: errInvalidResponse,\n\t\t// default of http.responseWriter\n\t\twantCode: http.StatusOK,\n\t}, {\n\t\tdesc: \"bad: multiple hrefs and no status\",\n\t\tresponses: []response{{\n\t\t\tHref: []string{\"http://example.com/foo\", \"http://example.com/bar\"},\n\t\t}},\n\t\twantErr: errInvalidResponse,\n\t\t// default of http.responseWriter\n\t\twantCode: http.StatusOK,\n\t}, {\n\t\tdesc: \"bad: one href and no propstat\",\n\t\tresponses: []response{{\n\t\t\tHref: []string{\"http://example.com/foo\"},\n\t\t}},\n\t\twantErr: errInvalidResponse,\n\t\t// default of http.responseWriter\n\t\twantCode: http.StatusOK,\n\t}, {\n\t\tdesc: \"bad: status with one href and propstat\",\n\t\tresponses: []response{{\n\t\t\tHref: []string{\"http://example.com/foo\"},\n\t\t\tPropstat: []propstat{{\n\t\t\t\tProp: []Property{{\n\t\t\t\t\tXMLName: xml.Name{\n\t\t\t\t\t\tSpace: \"http://example.com/\",\n\t\t\t\t\t\tLocal: \"foo\",\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t\tStatus: \"HTTP/1.1 200 OK\",\n\t\t\t}},\n\t\t\tStatus: \"HTTP/1.1 200 OK\",\n\t\t}},\n\t\twantErr: errInvalidResponse,\n\t\t// default of http.responseWriter\n\t\twantCode: http.StatusOK,\n\t}, {\n\t\tdesc: \"bad: multiple hrefs and propstat\",\n\t\tresponses: []response{{\n\t\t\tHref: []string{\n\t\t\t\t\"http://example.com/foo\",\n\t\t\t\t\"http://example.com/bar\",\n\t\t\t},\n\t\t\tPropstat: []propstat{{\n\t\t\t\tProp: []Property{{\n\t\t\t\t\tXMLName: xml.Name{\n\t\t\t\t\t\tSpace: \"http://example.com/\",\n\t\t\t\t\t\tLocal: \"foo\",\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t\tStatus: \"HTTP/1.1 200 OK\",\n\t\t\t}},\n\t\t}},\n\t\twantErr: errInvalidResponse,\n\t\t// default of http.responseWriter\n\t\twantCode: http.StatusOK,\n\t}}\n\n\tn := xmlNormalizer{omitWhitespace: true}\nloop:\n\tfor _, tc := range testCases {\n\t\trec := httptest.NewRecorder()\n\t\tw := multistatusWriter{w: rec, responseDescription: tc.respdesc}\n\t\tif tc.writeHeader {\n\t\t\tif err := w.writeHeader(); err != nil {\n\t\t\t\tt.Errorf(\"%s: got writeHeader error %v, want nil\", tc.desc, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tfor _, r := range tc.responses {\n\t\t\tif err := w.write(&r); err != nil {\n\t\t\t\tif err != tc.wantErr {\n\t\t\t\t\tt.Errorf(\"%s: got write error %v, want %v\",\n\t\t\t\t\t\ttc.desc, err, tc.wantErr)\n\t\t\t\t}\n\t\t\t\tcontinue loop\n\t\t\t}\n\t\t}\n\t\tif err := w.close(); err != tc.wantErr {\n\t\t\tt.Errorf(\"%s: got close error %v, want %v\",\n\t\t\t\ttc.desc, err, tc.wantErr)\n\t\t\tcontinue\n\t\t}\n\t\tif rec.Code != tc.wantCode {\n\t\t\tt.Errorf(\"%s: got HTTP status code %d, want %d\\n\",\n\t\t\t\ttc.desc, rec.Code, tc.wantCode)\n\t\t\tcontinue\n\t\t}\n\t\tgotXML := rec.Body.String()\n\t\teq, err := n.equalXML(strings.NewReader(gotXML), strings.NewReader(tc.wantXML))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: equalXML: %v\", tc.desc, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !eq {\n\t\t\tt.Errorf(\"%s: XML body\\ngot  %s\\nwant %s\", tc.desc, gotXML, tc.wantXML)\n\t\t}\n\t}\n}\n\nfunc TestReadProppatch(t *testing.T) {\n\tppStr := func(pps []Proppatch) string {\n\t\tvar outer []string\n\t\tfor _, pp := range pps {\n\t\t\tvar inner []string\n\t\t\tfor _, p := range pp.Props {\n\t\t\t\tinner = append(inner, fmt.Sprintf(\"{XMLName: %q, Lang: %q, InnerXML: %q}\",\n\t\t\t\t\tp.XMLName, p.Lang, p.InnerXML))\n\t\t\t}\n\t\t\touter = append(outer, fmt.Sprintf(\"{Remove: %t, Props: [%s]}\",\n\t\t\t\tpp.Remove, strings.Join(inner, \", \")))\n\t\t}\n\t\treturn \"[\" + strings.Join(outer, \", \") + \"]\"\n\t}\n\n\ttestCases := []struct {\n\t\tdesc       string\n\t\tinput      string\n\t\twantPP     []Proppatch\n\t\twantStatus int\n\t}{{\n\t\tdesc: \"proppatch: section 9.2 (with simple property value)\",\n\t\tinput: `` +\n\t\t\t`<?xml version=\"1.0\" encoding=\"utf-8\" ?>` +\n\t\t\t`<D:propertyupdate xmlns:D=\"DAV:\"` +\n\t\t\t`                  xmlns:Z=\"http://ns.example.com/z/\">` +\n\t\t\t`    <D:set>` +\n\t\t\t`         <D:prop><Z:Authors>somevalue</Z:Authors></D:prop>` +\n\t\t\t`    </D:set>` +\n\t\t\t`    <D:remove>` +\n\t\t\t`         <D:prop><Z:Copyright-Owner/></D:prop>` +\n\t\t\t`    </D:remove>` +\n\t\t\t`</D:propertyupdate>`,\n\t\twantPP: []Proppatch{{\n\t\t\tProps: []Property{{\n\t\t\t\txml.Name{Space: \"http://ns.example.com/z/\", Local: \"Authors\"},\n\t\t\t\t\"\",\n\t\t\t\t[]byte(`somevalue`),\n\t\t\t}},\n\t\t}, {\n\t\t\tRemove: true,\n\t\t\tProps: []Property{{\n\t\t\t\txml.Name{Space: \"http://ns.example.com/z/\", Local: \"Copyright-Owner\"},\n\t\t\t\t\"\",\n\t\t\t\tnil,\n\t\t\t}},\n\t\t}},\n\t}, {\n\t\tdesc: \"proppatch: lang attribute on prop\",\n\t\tinput: `` +\n\t\t\t`<?xml version=\"1.0\" encoding=\"utf-8\" ?>` +\n\t\t\t`<D:propertyupdate xmlns:D=\"DAV:\">` +\n\t\t\t`    <D:set>` +\n\t\t\t`         <D:prop xml:lang=\"en\">` +\n\t\t\t`              <foo xmlns=\"http://example.com/ns\"/>` +\n\t\t\t`         </D:prop>` +\n\t\t\t`    </D:set>` +\n\t\t\t`</D:propertyupdate>`,\n\t\twantPP: []Proppatch{{\n\t\t\tProps: []Property{{\n\t\t\t\txml.Name{Space: \"http://example.com/ns\", Local: \"foo\"},\n\t\t\t\t\"en\",\n\t\t\t\tnil,\n\t\t\t}},\n\t\t}},\n\t}, {\n\t\tdesc: \"bad: remove with value\",\n\t\tinput: `` +\n\t\t\t`<?xml version=\"1.0\" encoding=\"utf-8\" ?>` +\n\t\t\t`<D:propertyupdate xmlns:D=\"DAV:\"` +\n\t\t\t`                  xmlns:Z=\"http://ns.example.com/z/\">` +\n\t\t\t`    <D:remove>` +\n\t\t\t`         <D:prop>` +\n\t\t\t`              <Z:Authors>` +\n\t\t\t`              <Z:Author>Jim Whitehead</Z:Author>` +\n\t\t\t`              </Z:Authors>` +\n\t\t\t`         </D:prop>` +\n\t\t\t`    </D:remove>` +\n\t\t\t`</D:propertyupdate>`,\n\t\twantStatus: http.StatusBadRequest,\n\t}, {\n\t\tdesc: \"bad: empty propertyupdate\",\n\t\tinput: `` +\n\t\t\t`<?xml version=\"1.0\" encoding=\"utf-8\" ?>` +\n\t\t\t`<D:propertyupdate xmlns:D=\"DAV:\"` +\n\t\t\t`</D:propertyupdate>`,\n\t\twantStatus: http.StatusBadRequest,\n\t}, {\n\t\tdesc: \"bad: empty prop\",\n\t\tinput: `` +\n\t\t\t`<?xml version=\"1.0\" encoding=\"utf-8\" ?>` +\n\t\t\t`<D:propertyupdate xmlns:D=\"DAV:\"` +\n\t\t\t`                  xmlns:Z=\"http://ns.example.com/z/\">` +\n\t\t\t`    <D:remove>` +\n\t\t\t`        <D:prop/>` +\n\t\t\t`    </D:remove>` +\n\t\t\t`</D:propertyupdate>`,\n\t\twantStatus: http.StatusBadRequest,\n\t}}\n\n\tfor _, tc := range testCases {\n\t\tpp, status, err := readProppatch(strings.NewReader(tc.input))\n\t\tif tc.wantStatus != 0 {\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"%s: got nil error, want non-nil\", tc.desc)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\tt.Errorf(\"%s: %v\", tc.desc, err)\n\t\t\tcontinue\n\t\t}\n\t\tif status != tc.wantStatus {\n\t\t\tt.Errorf(\"%s: got status %d, want %d\", tc.desc, status, tc.wantStatus)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(pp, tc.wantPP) || status != tc.wantStatus {\n\t\t\tt.Errorf(\"%s: proppatch\\ngot  %v\\nwant %v\", tc.desc, ppStr(pp), ppStr(tc.wantPP))\n\t\t}\n\t}\n}\n\nfunc TestUnmarshalXMLValue(t *testing.T) {\n\ttestCases := []struct {\n\t\tdesc    string\n\t\tinput   string\n\t\twantVal string\n\t}{{\n\t\tdesc:    \"simple char data\",\n\t\tinput:   \"<root>foo</root>\",\n\t\twantVal: \"foo\",\n\t}, {\n\t\tdesc:    \"empty element\",\n\t\tinput:   \"<root><foo/></root>\",\n\t\twantVal: \"<foo/>\",\n\t}, {\n\t\tdesc:    \"preserve namespace\",\n\t\tinput:   `<root><foo xmlns=\"bar\"/></root>`,\n\t\twantVal: `<foo xmlns=\"bar\"/>`,\n\t}, {\n\t\tdesc:    \"preserve root element namespace\",\n\t\tinput:   `<root xmlns:bar=\"bar\"><bar:foo/></root>`,\n\t\twantVal: `<foo xmlns=\"bar\"/>`,\n\t}, {\n\t\tdesc:    \"preserve whitespace\",\n\t\tinput:   \"<root>  \\t </root>\",\n\t\twantVal: \"  \\t \",\n\t}, {\n\t\tdesc:    \"preserve mixed content\",\n\t\tinput:   `<root xmlns=\"bar\">  <foo>a<bam xmlns=\"baz\"/> </foo> </root>`,\n\t\twantVal: `  <foo xmlns=\"bar\">a<bam xmlns=\"baz\"/> </foo> `,\n\t}, {\n\t\tdesc: \"section 9.2\",\n\t\tinput: `` +\n\t\t\t`<Z:Authors xmlns:Z=\"http://ns.example.com/z/\">` +\n\t\t\t`  <Z:Author>Jim Whitehead</Z:Author>` +\n\t\t\t`  <Z:Author>Roy Fielding</Z:Author>` +\n\t\t\t`</Z:Authors>`,\n\t\twantVal: `` +\n\t\t\t`  <Author xmlns=\"http://ns.example.com/z/\">Jim Whitehead</Author>` +\n\t\t\t`  <Author xmlns=\"http://ns.example.com/z/\">Roy Fielding</Author>`,\n\t}, {\n\t\tdesc: \"section 4.3.1 (mixed content)\",\n\t\tinput: `` +\n\t\t\t`<x:author ` +\n\t\t\t`    xmlns:x='http://example.com/ns' ` +\n\t\t\t`    xmlns:D=\"DAV:\">` +\n\t\t\t`  <x:name>Jane Doe</x:name>` +\n\t\t\t`  <!-- Jane's contact info -->` +\n\t\t\t`  <x:uri type='email'` +\n\t\t\t`         added='2005-11-26'>mailto:jane.doe@example.com</x:uri>` +\n\t\t\t`  <x:uri type='web'` +\n\t\t\t`         added='2005-11-27'>http://www.example.com</x:uri>` +\n\t\t\t`  <x:notes xmlns:h='http://www.w3.org/1999/xhtml'>` +\n\t\t\t`    Jane has been working way <h:em>too</h:em> long on the` +\n\t\t\t`    long-awaited revision of <![CDATA[<RFC2518>]]>.` +\n\t\t\t`  </x:notes>` +\n\t\t\t`</x:author>`,\n\t\twantVal: `` +\n\t\t\t`  <name xmlns=\"http://example.com/ns\">Jane Doe</name>` +\n\t\t\t`  ` +\n\t\t\t`  <uri type='email'` +\n\t\t\t`       xmlns=\"http://example.com/ns\" ` +\n\t\t\t`       added='2005-11-26'>mailto:jane.doe@example.com</uri>` +\n\t\t\t`  <uri added='2005-11-27'` +\n\t\t\t`       type='web'` +\n\t\t\t`       xmlns=\"http://example.com/ns\">http://www.example.com</uri>` +\n\t\t\t`  <notes xmlns=\"http://example.com/ns\" ` +\n\t\t\t`         xmlns:h=\"http://www.w3.org/1999/xhtml\">` +\n\t\t\t`    Jane has been working way <h:em>too</h:em> long on the` +\n\t\t\t`    long-awaited revision of &lt;RFC2518&gt;.` +\n\t\t\t`  </notes>`,\n\t}}\n\n\tvar n xmlNormalizer\n\tfor _, tc := range testCases {\n\t\td := ixml.NewDecoder(strings.NewReader(tc.input))\n\t\tvar v xmlValue\n\t\tif err := d.Decode(&v); err != nil {\n\t\t\tt.Errorf(\"%s: got error %v, want nil\", tc.desc, err)\n\t\t\tcontinue\n\t\t}\n\t\teq, err := n.equalXML(bytes.NewReader(v), strings.NewReader(tc.wantVal))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: equalXML: %v\", tc.desc, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !eq {\n\t\t\tt.Errorf(\"%s:\\ngot  %s\\nwant %s\", tc.desc, string(v), tc.wantVal)\n\t\t}\n\t}\n}\n\n// xmlNormalizer normalizes XML.\ntype xmlNormalizer struct {\n\t// omitWhitespace instructs to ignore whitespace between element tags.\n\tomitWhitespace bool\n\t// omitComments instructs to ignore XML comments.\n\tomitComments bool\n}\n\n// normalize writes the normalized XML content of r to w. It applies the\n// following rules\n//\n//     * Rename namespace prefixes according to an internal heuristic.\n//     * Remove unnecessary namespace declarations.\n//     * Sort attributes in XML start elements in lexical order of their\n//       fully qualified name.\n//     * Remove XML directives and processing instructions.\n//     * Remove CDATA between XML tags that only contains whitespace, if\n//       instructed to do so.\n//     * Remove comments, if instructed to do so.\n//\nfunc (n *xmlNormalizer) normalize(w io.Writer, r io.Reader) error {\n\td := ixml.NewDecoder(r)\n\te := ixml.NewEncoder(w)\n\tfor {\n\t\tt, err := d.Token()\n\t\tif err != nil {\n\t\t\tif t == nil && err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tswitch val := t.(type) {\n\t\tcase ixml.Directive, ixml.ProcInst:\n\t\t\tcontinue\n\t\tcase ixml.Comment:\n\t\t\tif n.omitComments {\n\t\t\t\tcontinue\n\t\t\t}\n\t\tcase ixml.CharData:\n\t\t\tif n.omitWhitespace && len(bytes.TrimSpace(val)) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\tcase ixml.StartElement:\n\t\t\tstart, _ := ixml.CopyToken(val).(ixml.StartElement)\n\t\t\tattr := start.Attr[:0]\n\t\t\tfor _, a := range start.Attr {\n\t\t\t\tif a.Name.Space == \"xmlns\" || a.Name.Local == \"xmlns\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tattr = append(attr, a)\n\t\t\t}\n\t\t\tsort.Sort(byName(attr))\n\t\t\tstart.Attr = attr\n\t\t\tt = start\n\t\t}\n\t\terr = e.EncodeToken(t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn e.Flush()\n}\n\n// equalXML tests for equality of the normalized XML contents of a and b.\nfunc (n *xmlNormalizer) equalXML(a, b io.Reader) (bool, error) {\n\tvar buf bytes.Buffer\n\tif err := n.normalize(&buf, a); err != nil {\n\t\treturn false, err\n\t}\n\tnormA := buf.String()\n\tbuf.Reset()\n\tif err := n.normalize(&buf, b); err != nil {\n\t\treturn false, err\n\t}\n\tnormB := buf.String()\n\treturn normA == normB, nil\n}\n\ntype byName []ixml.Attr\n\nfunc (a byName) Len() int      { return len(a) }\nfunc (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a byName) Less(i, j int) bool {\n\tif a[i].Name.Space != a[j].Name.Space {\n\t\treturn a[i].Name.Space < a[j].Name.Space\n\t}\n\treturn a[i].Name.Local < a[j].Name.Local\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/websocket/client.go",
    "content": "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage websocket\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n)\n\n// DialError is an error that occurs while dialling a websocket server.\ntype DialError struct {\n\t*Config\n\tErr error\n}\n\nfunc (e *DialError) Error() string {\n\treturn \"websocket.Dial \" + e.Config.Location.String() + \": \" + e.Err.Error()\n}\n\n// NewConfig creates a new WebSocket config for client connection.\nfunc NewConfig(server, origin string) (config *Config, err error) {\n\tconfig = new(Config)\n\tconfig.Version = ProtocolVersionHybi13\n\tconfig.Location, err = url.ParseRequestURI(server)\n\tif err != nil {\n\t\treturn\n\t}\n\tconfig.Origin, err = url.ParseRequestURI(origin)\n\tif err != nil {\n\t\treturn\n\t}\n\tconfig.Header = http.Header(make(map[string][]string))\n\treturn\n}\n\n// NewClient creates a new WebSocket client connection over rwc.\nfunc NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) {\n\tbr := bufio.NewReader(rwc)\n\tbw := bufio.NewWriter(rwc)\n\terr = hybiClientHandshake(config, br, bw)\n\tif err != nil {\n\t\treturn\n\t}\n\tbuf := bufio.NewReadWriter(br, bw)\n\tws = newHybiClientConn(config, buf, rwc)\n\treturn\n}\n\n// Dial opens a new client connection to a WebSocket.\nfunc Dial(url_, protocol, origin string) (ws *Conn, err error) {\n\tconfig, err := NewConfig(url_, origin)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif protocol != \"\" {\n\t\tconfig.Protocol = []string{protocol}\n\t}\n\treturn DialConfig(config)\n}\n\nvar portMap = map[string]string{\n\t\"ws\":  \"80\",\n\t\"wss\": \"443\",\n}\n\nfunc parseAuthority(location *url.URL) string {\n\tif _, ok := portMap[location.Scheme]; ok {\n\t\tif _, _, err := net.SplitHostPort(location.Host); err != nil {\n\t\t\treturn net.JoinHostPort(location.Host, portMap[location.Scheme])\n\t\t}\n\t}\n\treturn location.Host\n}\n\n// DialConfig opens a new client connection to a WebSocket with a config.\nfunc DialConfig(config *Config) (ws *Conn, err error) {\n\tvar client net.Conn\n\tif config.Location == nil {\n\t\treturn nil, &DialError{config, ErrBadWebSocketLocation}\n\t}\n\tif config.Origin == nil {\n\t\treturn nil, &DialError{config, ErrBadWebSocketOrigin}\n\t}\n\tdialer := config.Dialer\n\tif dialer == nil {\n\t\tdialer = &net.Dialer{}\n\t}\n\tclient, err = dialWithDialer(dialer, config)\n\tif err != nil {\n\t\tgoto Error\n\t}\n\tws, err = NewClient(config, client)\n\tif err != nil {\n\t\tclient.Close()\n\t\tgoto Error\n\t}\n\treturn\n\nError:\n\treturn nil, &DialError{config, err}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/websocket/dial.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage websocket\n\nimport (\n\t\"crypto/tls\"\n\t\"net\"\n)\n\nfunc dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) {\n\tswitch config.Location.Scheme {\n\tcase \"ws\":\n\t\tconn, err = dialer.Dial(\"tcp\", parseAuthority(config.Location))\n\n\tcase \"wss\":\n\t\tconn, err = tls.DialWithDialer(dialer, \"tcp\", parseAuthority(config.Location), config.TlsConfig)\n\n\tdefault:\n\t\terr = ErrBadScheme\n\t}\n\treturn\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/websocket/dial_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage websocket\n\nimport (\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\t\"time\"\n)\n\n// This test depend on Go 1.3+ because in earlier versions the Dialer won't be\n// used in TLS connections and a timeout won't be triggered.\nfunc TestDialConfigTLSWithDialer(t *testing.T) {\n\ttlsServer := httptest.NewTLSServer(nil)\n\ttlsServerAddr := tlsServer.Listener.Addr().String()\n\tlog.Print(\"Test TLS WebSocket server listening on \", tlsServerAddr)\n\tdefer tlsServer.Close()\n\tconfig, _ := NewConfig(fmt.Sprintf(\"wss://%s/echo\", tlsServerAddr), \"http://localhost\")\n\tconfig.Dialer = &net.Dialer{\n\t\tDeadline: time.Now().Add(-time.Minute),\n\t}\n\tconfig.TlsConfig = &tls.Config{\n\t\tInsecureSkipVerify: true,\n\t}\n\t_, err := DialConfig(config)\n\tdialerr, ok := err.(*DialError)\n\tif !ok {\n\t\tt.Fatalf(\"DialError expected, got %#v\", err)\n\t}\n\tneterr, ok := dialerr.Err.(*net.OpError)\n\tif !ok {\n\t\tt.Fatalf(\"net.OpError error expected, got %#v\", dialerr.Err)\n\t}\n\tif !neterr.Timeout() {\n\t\tt.Fatalf(\"expected timeout error, got %#v\", neterr)\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/websocket/exampledial_test.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage websocket_test\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\n\t\"golang.org/x/net/websocket\"\n)\n\n// This example demonstrates a trivial client.\nfunc ExampleDial() {\n\torigin := \"http://localhost/\"\n\turl := \"ws://localhost:12345/ws\"\n\tws, err := websocket.Dial(url, \"\", origin)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif _, err := ws.Write([]byte(\"hello, world!\\n\")); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar msg = make([]byte, 512)\n\tvar n int\n\tif n, err = ws.Read(msg); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"Received: %s.\\n\", msg[:n])\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/websocket/examplehandler_test.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage websocket_test\n\nimport (\n\t\"io\"\n\t\"net/http\"\n\n\t\"golang.org/x/net/websocket\"\n)\n\n// Echo the data received on the WebSocket.\nfunc EchoServer(ws *websocket.Conn) {\n\tio.Copy(ws, ws)\n}\n\n// This example demonstrates a trivial echo server.\nfunc ExampleHandler() {\n\thttp.Handle(\"/echo\", websocket.Handler(EchoServer))\n\terr := http.ListenAndServe(\":12345\", nil)\n\tif err != nil {\n\t\tpanic(\"ListenAndServe: \" + err.Error())\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/websocket/hybi.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage websocket\n\n// This file implements a protocol of hybi draft.\n// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto/rand\"\n\t\"crypto/sha1\"\n\t\"encoding/base64\"\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n)\n\nconst (\n\twebsocketGUID = \"258EAFA5-E914-47DA-95CA-C5AB0DC85B11\"\n\n\tcloseStatusNormal            = 1000\n\tcloseStatusGoingAway         = 1001\n\tcloseStatusProtocolError     = 1002\n\tcloseStatusUnsupportedData   = 1003\n\tcloseStatusFrameTooLarge     = 1004\n\tcloseStatusNoStatusRcvd      = 1005\n\tcloseStatusAbnormalClosure   = 1006\n\tcloseStatusBadMessageData    = 1007\n\tcloseStatusPolicyViolation   = 1008\n\tcloseStatusTooBigData        = 1009\n\tcloseStatusExtensionMismatch = 1010\n\n\tmaxControlFramePayloadLength = 125\n)\n\nvar (\n\tErrBadMaskingKey         = &ProtocolError{\"bad masking key\"}\n\tErrBadPongMessage        = &ProtocolError{\"bad pong message\"}\n\tErrBadClosingStatus      = &ProtocolError{\"bad closing status\"}\n\tErrUnsupportedExtensions = &ProtocolError{\"unsupported extensions\"}\n\tErrNotImplemented        = &ProtocolError{\"not implemented\"}\n\n\thandshakeHeader = map[string]bool{\n\t\t\"Host\":                   true,\n\t\t\"Upgrade\":                true,\n\t\t\"Connection\":             true,\n\t\t\"Sec-Websocket-Key\":      true,\n\t\t\"Sec-Websocket-Origin\":   true,\n\t\t\"Sec-Websocket-Version\":  true,\n\t\t\"Sec-Websocket-Protocol\": true,\n\t\t\"Sec-Websocket-Accept\":   true,\n\t}\n)\n\n// A hybiFrameHeader is a frame header as defined in hybi draft.\ntype hybiFrameHeader struct {\n\tFin        bool\n\tRsv        [3]bool\n\tOpCode     byte\n\tLength     int64\n\tMaskingKey []byte\n\n\tdata *bytes.Buffer\n}\n\n// A hybiFrameReader is a reader for hybi frame.\ntype hybiFrameReader struct {\n\treader io.Reader\n\n\theader hybiFrameHeader\n\tpos    int64\n\tlength int\n}\n\nfunc (frame *hybiFrameReader) Read(msg []byte) (n int, err error) {\n\tn, err = frame.reader.Read(msg)\n\tif frame.header.MaskingKey != nil {\n\t\tfor i := 0; i < n; i++ {\n\t\t\tmsg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4]\n\t\t\tframe.pos++\n\t\t}\n\t}\n\treturn n, err\n}\n\nfunc (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode }\n\nfunc (frame *hybiFrameReader) HeaderReader() io.Reader {\n\tif frame.header.data == nil {\n\t\treturn nil\n\t}\n\tif frame.header.data.Len() == 0 {\n\t\treturn nil\n\t}\n\treturn frame.header.data\n}\n\nfunc (frame *hybiFrameReader) TrailerReader() io.Reader { return nil }\n\nfunc (frame *hybiFrameReader) Len() (n int) { return frame.length }\n\n// A hybiFrameReaderFactory creates new frame reader based on its frame type.\ntype hybiFrameReaderFactory struct {\n\t*bufio.Reader\n}\n\n// NewFrameReader reads a frame header from the connection, and creates new reader for the frame.\n// See Section 5.2 Base Framing protocol for detail.\n// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2\nfunc (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) {\n\thybiFrame := new(hybiFrameReader)\n\tframe = hybiFrame\n\tvar header []byte\n\tvar b byte\n\t// First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits)\n\tb, err = buf.ReadByte()\n\tif err != nil {\n\t\treturn\n\t}\n\theader = append(header, b)\n\thybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0\n\tfor i := 0; i < 3; i++ {\n\t\tj := uint(6 - i)\n\t\thybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0\n\t}\n\thybiFrame.header.OpCode = header[0] & 0x0f\n\n\t// Second byte. Mask/Payload len(7bits)\n\tb, err = buf.ReadByte()\n\tif err != nil {\n\t\treturn\n\t}\n\theader = append(header, b)\n\tmask := (b & 0x80) != 0\n\tb &= 0x7f\n\tlengthFields := 0\n\tswitch {\n\tcase b <= 125: // Payload length 7bits.\n\t\thybiFrame.header.Length = int64(b)\n\tcase b == 126: // Payload length 7+16bits\n\t\tlengthFields = 2\n\tcase b == 127: // Payload length 7+64bits\n\t\tlengthFields = 8\n\t}\n\tfor i := 0; i < lengthFields; i++ {\n\t\tb, err = buf.ReadByte()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tif lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits\n\t\t\tb &= 0x7f\n\t\t}\n\t\theader = append(header, b)\n\t\thybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b)\n\t}\n\tif mask {\n\t\t// Masking key. 4 bytes.\n\t\tfor i := 0; i < 4; i++ {\n\t\t\tb, err = buf.ReadByte()\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\theader = append(header, b)\n\t\t\thybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b)\n\t\t}\n\t}\n\thybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length)\n\thybiFrame.header.data = bytes.NewBuffer(header)\n\thybiFrame.length = len(header) + int(hybiFrame.header.Length)\n\treturn\n}\n\n// A HybiFrameWriter is a writer for hybi frame.\ntype hybiFrameWriter struct {\n\twriter *bufio.Writer\n\n\theader *hybiFrameHeader\n}\n\nfunc (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) {\n\tvar header []byte\n\tvar b byte\n\tif frame.header.Fin {\n\t\tb |= 0x80\n\t}\n\tfor i := 0; i < 3; i++ {\n\t\tif frame.header.Rsv[i] {\n\t\t\tj := uint(6 - i)\n\t\t\tb |= 1 << j\n\t\t}\n\t}\n\tb |= frame.header.OpCode\n\theader = append(header, b)\n\tif frame.header.MaskingKey != nil {\n\t\tb = 0x80\n\t} else {\n\t\tb = 0\n\t}\n\tlengthFields := 0\n\tlength := len(msg)\n\tswitch {\n\tcase length <= 125:\n\t\tb |= byte(length)\n\tcase length < 65536:\n\t\tb |= 126\n\t\tlengthFields = 2\n\tdefault:\n\t\tb |= 127\n\t\tlengthFields = 8\n\t}\n\theader = append(header, b)\n\tfor i := 0; i < lengthFields; i++ {\n\t\tj := uint((lengthFields - i - 1) * 8)\n\t\tb = byte((length >> j) & 0xff)\n\t\theader = append(header, b)\n\t}\n\tif frame.header.MaskingKey != nil {\n\t\tif len(frame.header.MaskingKey) != 4 {\n\t\t\treturn 0, ErrBadMaskingKey\n\t\t}\n\t\theader = append(header, frame.header.MaskingKey...)\n\t\tframe.writer.Write(header)\n\t\tdata := make([]byte, length)\n\t\tfor i := range data {\n\t\t\tdata[i] = msg[i] ^ frame.header.MaskingKey[i%4]\n\t\t}\n\t\tframe.writer.Write(data)\n\t\terr = frame.writer.Flush()\n\t\treturn length, err\n\t}\n\tframe.writer.Write(header)\n\tframe.writer.Write(msg)\n\terr = frame.writer.Flush()\n\treturn length, err\n}\n\nfunc (frame *hybiFrameWriter) Close() error { return nil }\n\ntype hybiFrameWriterFactory struct {\n\t*bufio.Writer\n\tneedMaskingKey bool\n}\n\nfunc (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) {\n\tframeHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType}\n\tif buf.needMaskingKey {\n\t\tframeHeader.MaskingKey, err = generateMaskingKey()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil\n}\n\ntype hybiFrameHandler struct {\n\tconn        *Conn\n\tpayloadType byte\n}\n\nfunc (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) {\n\tif handler.conn.IsServerConn() {\n\t\t// The client MUST mask all frames sent to the server.\n\t\tif frame.(*hybiFrameReader).header.MaskingKey == nil {\n\t\t\thandler.WriteClose(closeStatusProtocolError)\n\t\t\treturn nil, io.EOF\n\t\t}\n\t} else {\n\t\t// The server MUST NOT mask all frames.\n\t\tif frame.(*hybiFrameReader).header.MaskingKey != nil {\n\t\t\thandler.WriteClose(closeStatusProtocolError)\n\t\t\treturn nil, io.EOF\n\t\t}\n\t}\n\tif header := frame.HeaderReader(); header != nil {\n\t\tio.Copy(ioutil.Discard, header)\n\t}\n\tswitch frame.PayloadType() {\n\tcase ContinuationFrame:\n\t\tframe.(*hybiFrameReader).header.OpCode = handler.payloadType\n\tcase TextFrame, BinaryFrame:\n\t\thandler.payloadType = frame.PayloadType()\n\tcase CloseFrame:\n\t\treturn nil, io.EOF\n\tcase PingFrame, PongFrame:\n\t\tb := make([]byte, maxControlFramePayloadLength)\n\t\tn, err := io.ReadFull(frame, b)\n\t\tif err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {\n\t\t\treturn nil, err\n\t\t}\n\t\tio.Copy(ioutil.Discard, frame)\n\t\tif frame.PayloadType() == PingFrame {\n\t\t\tif _, err := handler.WritePong(b[:n]); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\treturn nil, nil\n\t}\n\treturn frame, nil\n}\n\nfunc (handler *hybiFrameHandler) WriteClose(status int) (err error) {\n\thandler.conn.wio.Lock()\n\tdefer handler.conn.wio.Unlock()\n\tw, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame)\n\tif err != nil {\n\t\treturn err\n\t}\n\tmsg := make([]byte, 2)\n\tbinary.BigEndian.PutUint16(msg, uint16(status))\n\t_, err = w.Write(msg)\n\tw.Close()\n\treturn err\n}\n\nfunc (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) {\n\thandler.conn.wio.Lock()\n\tdefer handler.conn.wio.Unlock()\n\tw, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tn, err = w.Write(msg)\n\tw.Close()\n\treturn n, err\n}\n\n// newHybiConn creates a new WebSocket connection speaking hybi draft protocol.\nfunc newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {\n\tif buf == nil {\n\t\tbr := bufio.NewReader(rwc)\n\t\tbw := bufio.NewWriter(rwc)\n\t\tbuf = bufio.NewReadWriter(br, bw)\n\t}\n\tws := &Conn{config: config, request: request, buf: buf, rwc: rwc,\n\t\tframeReaderFactory: hybiFrameReaderFactory{buf.Reader},\n\t\tframeWriterFactory: hybiFrameWriterFactory{\n\t\t\tbuf.Writer, request == nil},\n\t\tPayloadType:        TextFrame,\n\t\tdefaultCloseStatus: closeStatusNormal}\n\tws.frameHandler = &hybiFrameHandler{conn: ws}\n\treturn ws\n}\n\n// generateMaskingKey generates a masking key for a frame.\nfunc generateMaskingKey() (maskingKey []byte, err error) {\n\tmaskingKey = make([]byte, 4)\n\tif _, err = io.ReadFull(rand.Reader, maskingKey); err != nil {\n\t\treturn\n\t}\n\treturn\n}\n\n// generateNonce generates a nonce consisting of a randomly selected 16-byte\n// value that has been base64-encoded.\nfunc generateNonce() (nonce []byte) {\n\tkey := make([]byte, 16)\n\tif _, err := io.ReadFull(rand.Reader, key); err != nil {\n\t\tpanic(err)\n\t}\n\tnonce = make([]byte, 24)\n\tbase64.StdEncoding.Encode(nonce, key)\n\treturn\n}\n\n// removeZone removes IPv6 zone identifer from host.\n// E.g., \"[fe80::1%en0]:8080\" to \"[fe80::1]:8080\"\nfunc removeZone(host string) string {\n\tif !strings.HasPrefix(host, \"[\") {\n\t\treturn host\n\t}\n\ti := strings.LastIndex(host, \"]\")\n\tif i < 0 {\n\t\treturn host\n\t}\n\tj := strings.LastIndex(host[:i], \"%\")\n\tif j < 0 {\n\t\treturn host\n\t}\n\treturn host[:j] + host[i:]\n}\n\n// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of\n// the nonce (\"Sec-WebSocket-Key\" value) with the websocket GUID string.\nfunc getNonceAccept(nonce []byte) (expected []byte, err error) {\n\th := sha1.New()\n\tif _, err = h.Write(nonce); err != nil {\n\t\treturn\n\t}\n\tif _, err = h.Write([]byte(websocketGUID)); err != nil {\n\t\treturn\n\t}\n\texpected = make([]byte, 28)\n\tbase64.StdEncoding.Encode(expected, h.Sum(nil))\n\treturn\n}\n\n// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17\nfunc hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) {\n\tbw.WriteString(\"GET \" + config.Location.RequestURI() + \" HTTP/1.1\\r\\n\")\n\n\t// According to RFC 6874, an HTTP client, proxy, or other\n\t// intermediary must remove any IPv6 zone identifier attached\n\t// to an outgoing URI.\n\tbw.WriteString(\"Host: \" + removeZone(config.Location.Host) + \"\\r\\n\")\n\tbw.WriteString(\"Upgrade: websocket\\r\\n\")\n\tbw.WriteString(\"Connection: Upgrade\\r\\n\")\n\tnonce := generateNonce()\n\tif config.handshakeData != nil {\n\t\tnonce = []byte(config.handshakeData[\"key\"])\n\t}\n\tbw.WriteString(\"Sec-WebSocket-Key: \" + string(nonce) + \"\\r\\n\")\n\tbw.WriteString(\"Origin: \" + strings.ToLower(config.Origin.String()) + \"\\r\\n\")\n\n\tif config.Version != ProtocolVersionHybi13 {\n\t\treturn ErrBadProtocolVersion\n\t}\n\n\tbw.WriteString(\"Sec-WebSocket-Version: \" + fmt.Sprintf(\"%d\", config.Version) + \"\\r\\n\")\n\tif len(config.Protocol) > 0 {\n\t\tbw.WriteString(\"Sec-WebSocket-Protocol: \" + strings.Join(config.Protocol, \", \") + \"\\r\\n\")\n\t}\n\t// TODO(ukai): send Sec-WebSocket-Extensions.\n\terr = config.Header.WriteSubset(bw, handshakeHeader)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbw.WriteString(\"\\r\\n\")\n\tif err = bw.Flush(); err != nil {\n\t\treturn err\n\t}\n\n\tresp, err := http.ReadResponse(br, &http.Request{Method: \"GET\"})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode != 101 {\n\t\treturn ErrBadStatus\n\t}\n\tif strings.ToLower(resp.Header.Get(\"Upgrade\")) != \"websocket\" ||\n\t\tstrings.ToLower(resp.Header.Get(\"Connection\")) != \"upgrade\" {\n\t\treturn ErrBadUpgrade\n\t}\n\texpectedAccept, err := getNonceAccept(nonce)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.Header.Get(\"Sec-WebSocket-Accept\") != string(expectedAccept) {\n\t\treturn ErrChallengeResponse\n\t}\n\tif resp.Header.Get(\"Sec-WebSocket-Extensions\") != \"\" {\n\t\treturn ErrUnsupportedExtensions\n\t}\n\tofferedProtocol := resp.Header.Get(\"Sec-WebSocket-Protocol\")\n\tif offeredProtocol != \"\" {\n\t\tprotocolMatched := false\n\t\tfor i := 0; i < len(config.Protocol); i++ {\n\t\t\tif config.Protocol[i] == offeredProtocol {\n\t\t\t\tprotocolMatched = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !protocolMatched {\n\t\t\treturn ErrBadWebSocketProtocol\n\t\t}\n\t\tconfig.Protocol = []string{offeredProtocol}\n\t}\n\n\treturn nil\n}\n\n// newHybiClientConn creates a client WebSocket connection after handshake.\nfunc newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn {\n\treturn newHybiConn(config, buf, rwc, nil)\n}\n\n// A HybiServerHandshaker performs a server handshake using hybi draft protocol.\ntype hybiServerHandshaker struct {\n\t*Config\n\taccept []byte\n}\n\nfunc (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) {\n\tc.Version = ProtocolVersionHybi13\n\tif req.Method != \"GET\" {\n\t\treturn http.StatusMethodNotAllowed, ErrBadRequestMethod\n\t}\n\t// HTTP version can be safely ignored.\n\n\tif strings.ToLower(req.Header.Get(\"Upgrade\")) != \"websocket\" ||\n\t\t!strings.Contains(strings.ToLower(req.Header.Get(\"Connection\")), \"upgrade\") {\n\t\treturn http.StatusBadRequest, ErrNotWebSocket\n\t}\n\n\tkey := req.Header.Get(\"Sec-Websocket-Key\")\n\tif key == \"\" {\n\t\treturn http.StatusBadRequest, ErrChallengeResponse\n\t}\n\tversion := req.Header.Get(\"Sec-Websocket-Version\")\n\tswitch version {\n\tcase \"13\":\n\t\tc.Version = ProtocolVersionHybi13\n\tdefault:\n\t\treturn http.StatusBadRequest, ErrBadWebSocketVersion\n\t}\n\tvar scheme string\n\tif req.TLS != nil {\n\t\tscheme = \"wss\"\n\t} else {\n\t\tscheme = \"ws\"\n\t}\n\tc.Location, err = url.ParseRequestURI(scheme + \"://\" + req.Host + req.URL.RequestURI())\n\tif err != nil {\n\t\treturn http.StatusBadRequest, err\n\t}\n\tprotocol := strings.TrimSpace(req.Header.Get(\"Sec-Websocket-Protocol\"))\n\tif protocol != \"\" {\n\t\tprotocols := strings.Split(protocol, \",\")\n\t\tfor i := 0; i < len(protocols); i++ {\n\t\t\tc.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i]))\n\t\t}\n\t}\n\tc.accept, err = getNonceAccept([]byte(key))\n\tif err != nil {\n\t\treturn http.StatusInternalServerError, err\n\t}\n\treturn http.StatusSwitchingProtocols, nil\n}\n\n// Origin parses the Origin header in req.\n// If the Origin header is not set, it returns nil and nil.\nfunc Origin(config *Config, req *http.Request) (*url.URL, error) {\n\tvar origin string\n\tswitch config.Version {\n\tcase ProtocolVersionHybi13:\n\t\torigin = req.Header.Get(\"Origin\")\n\t}\n\tif origin == \"\" {\n\t\treturn nil, nil\n\t}\n\treturn url.ParseRequestURI(origin)\n}\n\nfunc (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) {\n\tif len(c.Protocol) > 0 {\n\t\tif len(c.Protocol) != 1 {\n\t\t\t// You need choose a Protocol in Handshake func in Server.\n\t\t\treturn ErrBadWebSocketProtocol\n\t\t}\n\t}\n\tbuf.WriteString(\"HTTP/1.1 101 Switching Protocols\\r\\n\")\n\tbuf.WriteString(\"Upgrade: websocket\\r\\n\")\n\tbuf.WriteString(\"Connection: Upgrade\\r\\n\")\n\tbuf.WriteString(\"Sec-WebSocket-Accept: \" + string(c.accept) + \"\\r\\n\")\n\tif len(c.Protocol) > 0 {\n\t\tbuf.WriteString(\"Sec-WebSocket-Protocol: \" + c.Protocol[0] + \"\\r\\n\")\n\t}\n\t// TODO(ukai): send Sec-WebSocket-Extensions.\n\tif c.Header != nil {\n\t\terr := c.Header.WriteSubset(buf, handshakeHeader)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tbuf.WriteString(\"\\r\\n\")\n\treturn buf.Flush()\n}\n\nfunc (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {\n\treturn newHybiServerConn(c.Config, buf, rwc, request)\n}\n\n// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol.\nfunc newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {\n\treturn newHybiConn(config, buf, rwc, request)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/websocket/hybi_test.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage websocket\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\t\"testing\"\n)\n\n// Test the getNonceAccept function with values in\n// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17\nfunc TestSecWebSocketAccept(t *testing.T) {\n\tnonce := []byte(\"dGhlIHNhbXBsZSBub25jZQ==\")\n\texpected := []byte(\"s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\")\n\taccept, err := getNonceAccept(nonce)\n\tif err != nil {\n\t\tt.Errorf(\"getNonceAccept: returned error %v\", err)\n\t\treturn\n\t}\n\tif !bytes.Equal(expected, accept) {\n\t\tt.Errorf(\"getNonceAccept: expected %q got %q\", expected, accept)\n\t}\n}\n\nfunc TestHybiClientHandshake(t *testing.T) {\n\ttype test struct {\n\t\turl, host string\n\t}\n\ttests := []test{\n\t\t{\"ws://server.example.com/chat\", \"server.example.com\"},\n\t\t{\"ws://127.0.0.1/chat\", \"127.0.0.1\"},\n\t}\n\tif _, err := url.ParseRequestURI(\"http://[fe80::1%25lo0]\"); err == nil {\n\t\ttests = append(tests, test{\"ws://[fe80::1%25lo0]/chat\", \"[fe80::1]\"})\n\t}\n\n\tfor _, tt := range tests {\n\t\tvar b bytes.Buffer\n\t\tbw := bufio.NewWriter(&b)\n\t\tbr := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\nSec-WebSocket-Protocol: chat\n\n`))\n\t\tvar err error\n\t\tvar config Config\n\t\tconfig.Location, err = url.ParseRequestURI(tt.url)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"location url\", err)\n\t\t}\n\t\tconfig.Origin, err = url.ParseRequestURI(\"http://example.com\")\n\t\tif err != nil {\n\t\t\tt.Fatal(\"origin url\", err)\n\t\t}\n\t\tconfig.Protocol = append(config.Protocol, \"chat\")\n\t\tconfig.Protocol = append(config.Protocol, \"superchat\")\n\t\tconfig.Version = ProtocolVersionHybi13\n\t\tconfig.handshakeData = map[string]string{\n\t\t\t\"key\": \"dGhlIHNhbXBsZSBub25jZQ==\",\n\t\t}\n\t\tif err := hybiClientHandshake(&config, br, bw); err != nil {\n\t\t\tt.Fatal(\"handshake\", err)\n\t\t}\n\t\treq, err := http.ReadRequest(bufio.NewReader(&b))\n\t\tif err != nil {\n\t\t\tt.Fatal(\"read request\", err)\n\t\t}\n\t\tif req.Method != \"GET\" {\n\t\t\tt.Errorf(\"request method expected GET, but got %s\", req.Method)\n\t\t}\n\t\tif req.URL.Path != \"/chat\" {\n\t\t\tt.Errorf(\"request path expected /chat, but got %s\", req.URL.Path)\n\t\t}\n\t\tif req.Proto != \"HTTP/1.1\" {\n\t\t\tt.Errorf(\"request proto expected HTTP/1.1, but got %s\", req.Proto)\n\t\t}\n\t\tif req.Host != tt.host {\n\t\t\tt.Errorf(\"request host expected %s, but got %s\", tt.host, req.Host)\n\t\t}\n\t\tvar expectedHeader = map[string]string{\n\t\t\t\"Connection\":             \"Upgrade\",\n\t\t\t\"Upgrade\":                \"websocket\",\n\t\t\t\"Sec-Websocket-Key\":      config.handshakeData[\"key\"],\n\t\t\t\"Origin\":                 config.Origin.String(),\n\t\t\t\"Sec-Websocket-Protocol\": \"chat, superchat\",\n\t\t\t\"Sec-Websocket-Version\":  fmt.Sprintf(\"%d\", ProtocolVersionHybi13),\n\t\t}\n\t\tfor k, v := range expectedHeader {\n\t\t\tif req.Header.Get(k) != v {\n\t\t\t\tt.Errorf(\"%s expected %s, but got %v\", k, v, req.Header.Get(k))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestHybiClientHandshakeWithHeader(t *testing.T) {\n\tb := bytes.NewBuffer([]byte{})\n\tbw := bufio.NewWriter(b)\n\tbr := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\nSec-WebSocket-Protocol: chat\n\n`))\n\tvar err error\n\tconfig := new(Config)\n\tconfig.Location, err = url.ParseRequestURI(\"ws://server.example.com/chat\")\n\tif err != nil {\n\t\tt.Fatal(\"location url\", err)\n\t}\n\tconfig.Origin, err = url.ParseRequestURI(\"http://example.com\")\n\tif err != nil {\n\t\tt.Fatal(\"origin url\", err)\n\t}\n\tconfig.Protocol = append(config.Protocol, \"chat\")\n\tconfig.Protocol = append(config.Protocol, \"superchat\")\n\tconfig.Version = ProtocolVersionHybi13\n\tconfig.Header = http.Header(make(map[string][]string))\n\tconfig.Header.Add(\"User-Agent\", \"test\")\n\n\tconfig.handshakeData = map[string]string{\n\t\t\"key\": \"dGhlIHNhbXBsZSBub25jZQ==\",\n\t}\n\terr = hybiClientHandshake(config, br, bw)\n\tif err != nil {\n\t\tt.Errorf(\"handshake failed: %v\", err)\n\t}\n\treq, err := http.ReadRequest(bufio.NewReader(b))\n\tif err != nil {\n\t\tt.Fatalf(\"read request: %v\", err)\n\t}\n\tif req.Method != \"GET\" {\n\t\tt.Errorf(\"request method expected GET, but got %q\", req.Method)\n\t}\n\tif req.URL.Path != \"/chat\" {\n\t\tt.Errorf(\"request path expected /chat, but got %q\", req.URL.Path)\n\t}\n\tif req.Proto != \"HTTP/1.1\" {\n\t\tt.Errorf(\"request proto expected HTTP/1.1, but got %q\", req.Proto)\n\t}\n\tif req.Host != \"server.example.com\" {\n\t\tt.Errorf(\"request Host expected server.example.com, but got %v\", req.Host)\n\t}\n\tvar expectedHeader = map[string]string{\n\t\t\"Connection\":             \"Upgrade\",\n\t\t\"Upgrade\":                \"websocket\",\n\t\t\"Sec-Websocket-Key\":      config.handshakeData[\"key\"],\n\t\t\"Origin\":                 config.Origin.String(),\n\t\t\"Sec-Websocket-Protocol\": \"chat, superchat\",\n\t\t\"Sec-Websocket-Version\":  fmt.Sprintf(\"%d\", ProtocolVersionHybi13),\n\t\t\"User-Agent\":             \"test\",\n\t}\n\tfor k, v := range expectedHeader {\n\t\tif req.Header.Get(k) != v {\n\t\t\tt.Errorf(fmt.Sprintf(\"%s expected %q but got %q\", k, v, req.Header.Get(k)))\n\t\t}\n\t}\n}\n\nfunc TestHybiServerHandshake(t *testing.T) {\n\tconfig := new(Config)\n\thandshaker := &hybiServerHandshaker{Config: config}\n\tbr := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1\nHost: server.example.com\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\nOrigin: http://example.com\nSec-WebSocket-Protocol: chat, superchat\nSec-WebSocket-Version: 13\n\n`))\n\treq, err := http.ReadRequest(br)\n\tif err != nil {\n\t\tt.Fatal(\"request\", err)\n\t}\n\tcode, err := handshaker.ReadHandshake(br, req)\n\tif err != nil {\n\t\tt.Errorf(\"handshake failed: %v\", err)\n\t}\n\tif code != http.StatusSwitchingProtocols {\n\t\tt.Errorf(\"status expected %q but got %q\", http.StatusSwitchingProtocols, code)\n\t}\n\texpectedProtocols := []string{\"chat\", \"superchat\"}\n\tif fmt.Sprintf(\"%v\", config.Protocol) != fmt.Sprintf(\"%v\", expectedProtocols) {\n\t\tt.Errorf(\"protocol expected %q but got %q\", expectedProtocols, config.Protocol)\n\t}\n\tb := bytes.NewBuffer([]byte{})\n\tbw := bufio.NewWriter(b)\n\n\tconfig.Protocol = config.Protocol[:1]\n\n\terr = handshaker.AcceptHandshake(bw)\n\tif err != nil {\n\t\tt.Errorf(\"handshake response failed: %v\", err)\n\t}\n\texpectedResponse := strings.Join([]string{\n\t\t\"HTTP/1.1 101 Switching Protocols\",\n\t\t\"Upgrade: websocket\",\n\t\t\"Connection: Upgrade\",\n\t\t\"Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\",\n\t\t\"Sec-WebSocket-Protocol: chat\",\n\t\t\"\", \"\"}, \"\\r\\n\")\n\n\tif b.String() != expectedResponse {\n\t\tt.Errorf(\"handshake expected %q but got %q\", expectedResponse, b.String())\n\t}\n}\n\nfunc TestHybiServerHandshakeNoSubProtocol(t *testing.T) {\n\tconfig := new(Config)\n\thandshaker := &hybiServerHandshaker{Config: config}\n\tbr := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1\nHost: server.example.com\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\nOrigin: http://example.com\nSec-WebSocket-Version: 13\n\n`))\n\treq, err := http.ReadRequest(br)\n\tif err != nil {\n\t\tt.Fatal(\"request\", err)\n\t}\n\tcode, err := handshaker.ReadHandshake(br, req)\n\tif err != nil {\n\t\tt.Errorf(\"handshake failed: %v\", err)\n\t}\n\tif code != http.StatusSwitchingProtocols {\n\t\tt.Errorf(\"status expected %q but got %q\", http.StatusSwitchingProtocols, code)\n\t}\n\tif len(config.Protocol) != 0 {\n\t\tt.Errorf(\"len(config.Protocol) expected 0, but got %q\", len(config.Protocol))\n\t}\n\tb := bytes.NewBuffer([]byte{})\n\tbw := bufio.NewWriter(b)\n\n\terr = handshaker.AcceptHandshake(bw)\n\tif err != nil {\n\t\tt.Errorf(\"handshake response failed: %v\", err)\n\t}\n\texpectedResponse := strings.Join([]string{\n\t\t\"HTTP/1.1 101 Switching Protocols\",\n\t\t\"Upgrade: websocket\",\n\t\t\"Connection: Upgrade\",\n\t\t\"Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\",\n\t\t\"\", \"\"}, \"\\r\\n\")\n\n\tif b.String() != expectedResponse {\n\t\tt.Errorf(\"handshake expected %q but got %q\", expectedResponse, b.String())\n\t}\n}\n\nfunc TestHybiServerHandshakeHybiBadVersion(t *testing.T) {\n\tconfig := new(Config)\n\thandshaker := &hybiServerHandshaker{Config: config}\n\tbr := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1\nHost: server.example.com\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\nSec-WebSocket-Origin: http://example.com\nSec-WebSocket-Protocol: chat, superchat\nSec-WebSocket-Version: 9\n\n`))\n\treq, err := http.ReadRequest(br)\n\tif err != nil {\n\t\tt.Fatal(\"request\", err)\n\t}\n\tcode, err := handshaker.ReadHandshake(br, req)\n\tif err != ErrBadWebSocketVersion {\n\t\tt.Errorf(\"handshake expected err %q but got %q\", ErrBadWebSocketVersion, err)\n\t}\n\tif code != http.StatusBadRequest {\n\t\tt.Errorf(\"status expected %q but got %q\", http.StatusBadRequest, code)\n\t}\n}\n\nfunc testHybiFrame(t *testing.T, testHeader, testPayload, testMaskedPayload []byte, frameHeader *hybiFrameHeader) {\n\tb := bytes.NewBuffer([]byte{})\n\tframeWriterFactory := &hybiFrameWriterFactory{bufio.NewWriter(b), false}\n\tw, _ := frameWriterFactory.NewFrameWriter(TextFrame)\n\tw.(*hybiFrameWriter).header = frameHeader\n\t_, err := w.Write(testPayload)\n\tw.Close()\n\tif err != nil {\n\t\tt.Errorf(\"Write error %q\", err)\n\t}\n\tvar expectedFrame []byte\n\texpectedFrame = append(expectedFrame, testHeader...)\n\texpectedFrame = append(expectedFrame, testMaskedPayload...)\n\tif !bytes.Equal(expectedFrame, b.Bytes()) {\n\t\tt.Errorf(\"frame expected %q got %q\", expectedFrame, b.Bytes())\n\t}\n\tframeReaderFactory := &hybiFrameReaderFactory{bufio.NewReader(b)}\n\tr, err := frameReaderFactory.NewFrameReader()\n\tif err != nil {\n\t\tt.Errorf(\"Read error %q\", err)\n\t}\n\tif header := r.HeaderReader(); header == nil {\n\t\tt.Errorf(\"no header\")\n\t} else {\n\t\tactualHeader := make([]byte, r.Len())\n\t\tn, err := header.Read(actualHeader)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Read header error %q\", err)\n\t\t} else {\n\t\t\tif n < len(testHeader) {\n\t\t\t\tt.Errorf(\"header too short %q got %q\", testHeader, actualHeader[:n])\n\t\t\t}\n\t\t\tif !bytes.Equal(testHeader, actualHeader[:n]) {\n\t\t\t\tt.Errorf(\"header expected %q got %q\", testHeader, actualHeader[:n])\n\t\t\t}\n\t\t}\n\t}\n\tif trailer := r.TrailerReader(); trailer != nil {\n\t\tt.Errorf(\"unexpected trailer %q\", trailer)\n\t}\n\tframe := r.(*hybiFrameReader)\n\tif frameHeader.Fin != frame.header.Fin ||\n\t\tframeHeader.OpCode != frame.header.OpCode ||\n\t\tlen(testPayload) != int(frame.header.Length) {\n\t\tt.Errorf(\"mismatch %v (%d) vs %v\", frameHeader, len(testPayload), frame)\n\t}\n\tpayload := make([]byte, len(testPayload))\n\t_, err = r.Read(payload)\n\tif err != nil && err != io.EOF {\n\t\tt.Errorf(\"read %v\", err)\n\t}\n\tif !bytes.Equal(testPayload, payload) {\n\t\tt.Errorf(\"payload %q vs %q\", testPayload, payload)\n\t}\n}\n\nfunc TestHybiShortTextFrame(t *testing.T) {\n\tframeHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame}\n\tpayload := []byte(\"hello\")\n\ttestHybiFrame(t, []byte{0x81, 0x05}, payload, payload, frameHeader)\n\n\tpayload = make([]byte, 125)\n\ttestHybiFrame(t, []byte{0x81, 125}, payload, payload, frameHeader)\n}\n\nfunc TestHybiShortMaskedTextFrame(t *testing.T) {\n\tframeHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame,\n\t\tMaskingKey: []byte{0xcc, 0x55, 0x80, 0x20}}\n\tpayload := []byte(\"hello\")\n\tmaskedPayload := []byte{0xa4, 0x30, 0xec, 0x4c, 0xa3}\n\theader := []byte{0x81, 0x85}\n\theader = append(header, frameHeader.MaskingKey...)\n\ttestHybiFrame(t, header, payload, maskedPayload, frameHeader)\n}\n\nfunc TestHybiShortBinaryFrame(t *testing.T) {\n\tframeHeader := &hybiFrameHeader{Fin: true, OpCode: BinaryFrame}\n\tpayload := []byte(\"hello\")\n\ttestHybiFrame(t, []byte{0x82, 0x05}, payload, payload, frameHeader)\n\n\tpayload = make([]byte, 125)\n\ttestHybiFrame(t, []byte{0x82, 125}, payload, payload, frameHeader)\n}\n\nfunc TestHybiControlFrame(t *testing.T) {\n\tpayload := []byte(\"hello\")\n\n\tframeHeader := &hybiFrameHeader{Fin: true, OpCode: PingFrame}\n\ttestHybiFrame(t, []byte{0x89, 0x05}, payload, payload, frameHeader)\n\n\tframeHeader = &hybiFrameHeader{Fin: true, OpCode: PingFrame}\n\ttestHybiFrame(t, []byte{0x89, 0x00}, nil, nil, frameHeader)\n\n\tframeHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame}\n\ttestHybiFrame(t, []byte{0x8A, 0x05}, payload, payload, frameHeader)\n\n\tframeHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame}\n\ttestHybiFrame(t, []byte{0x8A, 0x00}, nil, nil, frameHeader)\n\n\tframeHeader = &hybiFrameHeader{Fin: true, OpCode: CloseFrame}\n\tpayload = []byte{0x03, 0xe8} // 1000\n\ttestHybiFrame(t, []byte{0x88, 0x02}, payload, payload, frameHeader)\n}\n\nfunc TestHybiLongFrame(t *testing.T) {\n\tframeHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame}\n\tpayload := make([]byte, 126)\n\ttestHybiFrame(t, []byte{0x81, 126, 0x00, 126}, payload, payload, frameHeader)\n\n\tpayload = make([]byte, 65535)\n\ttestHybiFrame(t, []byte{0x81, 126, 0xff, 0xff}, payload, payload, frameHeader)\n\n\tpayload = make([]byte, 65536)\n\ttestHybiFrame(t, []byte{0x81, 127, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00}, payload, payload, frameHeader)\n}\n\nfunc TestHybiClientRead(t *testing.T) {\n\twireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o',\n\t\t0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping\n\t\t0x81, 0x05, 'w', 'o', 'r', 'l', 'd'}\n\tbr := bufio.NewReader(bytes.NewBuffer(wireData))\n\tbw := bufio.NewWriter(bytes.NewBuffer([]byte{}))\n\tconn := newHybiConn(newConfig(t, \"/\"), bufio.NewReadWriter(br, bw), nil, nil)\n\n\tmsg := make([]byte, 512)\n\tn, err := conn.Read(msg)\n\tif err != nil {\n\t\tt.Errorf(\"read 1st frame, error %q\", err)\n\t}\n\tif n != 5 {\n\t\tt.Errorf(\"read 1st frame, expect 5, got %d\", n)\n\t}\n\tif !bytes.Equal(wireData[2:7], msg[:n]) {\n\t\tt.Errorf(\"read 1st frame %v, got %v\", wireData[2:7], msg[:n])\n\t}\n\tn, err = conn.Read(msg)\n\tif err != nil {\n\t\tt.Errorf(\"read 2nd frame, error %q\", err)\n\t}\n\tif n != 5 {\n\t\tt.Errorf(\"read 2nd frame, expect 5, got %d\", n)\n\t}\n\tif !bytes.Equal(wireData[16:21], msg[:n]) {\n\t\tt.Errorf(\"read 2nd frame %v, got %v\", wireData[16:21], msg[:n])\n\t}\n\tn, err = conn.Read(msg)\n\tif err == nil {\n\t\tt.Errorf(\"read not EOF\")\n\t}\n\tif n != 0 {\n\t\tt.Errorf(\"expect read 0, got %d\", n)\n\t}\n}\n\nfunc TestHybiShortRead(t *testing.T) {\n\twireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o',\n\t\t0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping\n\t\t0x81, 0x05, 'w', 'o', 'r', 'l', 'd'}\n\tbr := bufio.NewReader(bytes.NewBuffer(wireData))\n\tbw := bufio.NewWriter(bytes.NewBuffer([]byte{}))\n\tconn := newHybiConn(newConfig(t, \"/\"), bufio.NewReadWriter(br, bw), nil, nil)\n\n\tstep := 0\n\tpos := 0\n\texpectedPos := []int{2, 5, 16, 19}\n\texpectedLen := []int{3, 2, 3, 2}\n\tfor {\n\t\tmsg := make([]byte, 3)\n\t\tn, err := conn.Read(msg)\n\t\tif step >= len(expectedPos) {\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"read not EOF\")\n\t\t\t}\n\t\t\tif n != 0 {\n\t\t\t\tt.Errorf(\"expect read 0, got %d\", n)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tpos = expectedPos[step]\n\t\tendPos := pos + expectedLen[step]\n\t\tif err != nil {\n\t\t\tt.Errorf(\"read from %d, got error %q\", pos, err)\n\t\t\treturn\n\t\t}\n\t\tif n != endPos-pos {\n\t\t\tt.Errorf(\"read from %d, expect %d, got %d\", pos, endPos-pos, n)\n\t\t}\n\t\tif !bytes.Equal(wireData[pos:endPos], msg[:n]) {\n\t\t\tt.Errorf(\"read from %d, frame %v, got %v\", pos, wireData[pos:endPos], msg[:n])\n\t\t}\n\t\tstep++\n\t}\n}\n\nfunc TestHybiServerRead(t *testing.T) {\n\twireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20,\n\t\t0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello\n\t\t0x89, 0x85, 0xcc, 0x55, 0x80, 0x20,\n\t\t0xa4, 0x30, 0xec, 0x4c, 0xa3, // ping: hello\n\t\t0x81, 0x85, 0xed, 0x83, 0xb4, 0x24,\n\t\t0x9a, 0xec, 0xc6, 0x48, 0x89, // world\n\t}\n\tbr := bufio.NewReader(bytes.NewBuffer(wireData))\n\tbw := bufio.NewWriter(bytes.NewBuffer([]byte{}))\n\tconn := newHybiConn(newConfig(t, \"/\"), bufio.NewReadWriter(br, bw), nil, new(http.Request))\n\n\texpected := [][]byte{[]byte(\"hello\"), []byte(\"world\")}\n\n\tmsg := make([]byte, 512)\n\tn, err := conn.Read(msg)\n\tif err != nil {\n\t\tt.Errorf(\"read 1st frame, error %q\", err)\n\t}\n\tif n != 5 {\n\t\tt.Errorf(\"read 1st frame, expect 5, got %d\", n)\n\t}\n\tif !bytes.Equal(expected[0], msg[:n]) {\n\t\tt.Errorf(\"read 1st frame %q, got %q\", expected[0], msg[:n])\n\t}\n\n\tn, err = conn.Read(msg)\n\tif err != nil {\n\t\tt.Errorf(\"read 2nd frame, error %q\", err)\n\t}\n\tif n != 5 {\n\t\tt.Errorf(\"read 2nd frame, expect 5, got %d\", n)\n\t}\n\tif !bytes.Equal(expected[1], msg[:n]) {\n\t\tt.Errorf(\"read 2nd frame %q, got %q\", expected[1], msg[:n])\n\t}\n\n\tn, err = conn.Read(msg)\n\tif err == nil {\n\t\tt.Errorf(\"read not EOF\")\n\t}\n\tif n != 0 {\n\t\tt.Errorf(\"expect read 0, got %d\", n)\n\t}\n}\n\nfunc TestHybiServerReadWithoutMasking(t *testing.T) {\n\twireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o'}\n\tbr := bufio.NewReader(bytes.NewBuffer(wireData))\n\tbw := bufio.NewWriter(bytes.NewBuffer([]byte{}))\n\tconn := newHybiConn(newConfig(t, \"/\"), bufio.NewReadWriter(br, bw), nil, new(http.Request))\n\t// server MUST close the connection upon receiving a non-masked frame.\n\tmsg := make([]byte, 512)\n\t_, err := conn.Read(msg)\n\tif err != io.EOF {\n\t\tt.Errorf(\"read 1st frame, expect %q, but got %q\", io.EOF, err)\n\t}\n}\n\nfunc TestHybiClientReadWithMasking(t *testing.T) {\n\twireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20,\n\t\t0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello\n\t}\n\tbr := bufio.NewReader(bytes.NewBuffer(wireData))\n\tbw := bufio.NewWriter(bytes.NewBuffer([]byte{}))\n\tconn := newHybiConn(newConfig(t, \"/\"), bufio.NewReadWriter(br, bw), nil, nil)\n\n\t// client MUST close the connection upon receiving a masked frame.\n\tmsg := make([]byte, 512)\n\t_, err := conn.Read(msg)\n\tif err != io.EOF {\n\t\tt.Errorf(\"read 1st frame, expect %q, but got %q\", io.EOF, err)\n\t}\n}\n\n// Test the hybiServerHandshaker supports firefox implementation and\n// checks Connection request header include (but it's not necessary\n// equal to) \"upgrade\"\nfunc TestHybiServerFirefoxHandshake(t *testing.T) {\n\tconfig := new(Config)\n\thandshaker := &hybiServerHandshaker{Config: config}\n\tbr := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1\nHost: server.example.com\nUpgrade: websocket\nConnection: keep-alive, upgrade\nSec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\nOrigin: http://example.com\nSec-WebSocket-Protocol: chat, superchat\nSec-WebSocket-Version: 13\n\n`))\n\treq, err := http.ReadRequest(br)\n\tif err != nil {\n\t\tt.Fatal(\"request\", err)\n\t}\n\tcode, err := handshaker.ReadHandshake(br, req)\n\tif err != nil {\n\t\tt.Errorf(\"handshake failed: %v\", err)\n\t}\n\tif code != http.StatusSwitchingProtocols {\n\t\tt.Errorf(\"status expected %q but got %q\", http.StatusSwitchingProtocols, code)\n\t}\n\tb := bytes.NewBuffer([]byte{})\n\tbw := bufio.NewWriter(b)\n\n\tconfig.Protocol = []string{\"chat\"}\n\n\terr = handshaker.AcceptHandshake(bw)\n\tif err != nil {\n\t\tt.Errorf(\"handshake response failed: %v\", err)\n\t}\n\texpectedResponse := strings.Join([]string{\n\t\t\"HTTP/1.1 101 Switching Protocols\",\n\t\t\"Upgrade: websocket\",\n\t\t\"Connection: Upgrade\",\n\t\t\"Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\",\n\t\t\"Sec-WebSocket-Protocol: chat\",\n\t\t\"\", \"\"}, \"\\r\\n\")\n\n\tif b.String() != expectedResponse {\n\t\tt.Errorf(\"handshake expected %q but got %q\", expectedResponse, b.String())\n\t}\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/websocket/server.go",
    "content": "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage websocket\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n)\n\nfunc newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) {\n\tvar hs serverHandshaker = &hybiServerHandshaker{Config: config}\n\tcode, err := hs.ReadHandshake(buf.Reader, req)\n\tif err == ErrBadWebSocketVersion {\n\t\tfmt.Fprintf(buf, \"HTTP/1.1 %03d %s\\r\\n\", code, http.StatusText(code))\n\t\tfmt.Fprintf(buf, \"Sec-WebSocket-Version: %s\\r\\n\", SupportedProtocolVersion)\n\t\tbuf.WriteString(\"\\r\\n\")\n\t\tbuf.WriteString(err.Error())\n\t\tbuf.Flush()\n\t\treturn\n\t}\n\tif err != nil {\n\t\tfmt.Fprintf(buf, \"HTTP/1.1 %03d %s\\r\\n\", code, http.StatusText(code))\n\t\tbuf.WriteString(\"\\r\\n\")\n\t\tbuf.WriteString(err.Error())\n\t\tbuf.Flush()\n\t\treturn\n\t}\n\tif handshake != nil {\n\t\terr = handshake(config, req)\n\t\tif err != nil {\n\t\t\tcode = http.StatusForbidden\n\t\t\tfmt.Fprintf(buf, \"HTTP/1.1 %03d %s\\r\\n\", code, http.StatusText(code))\n\t\t\tbuf.WriteString(\"\\r\\n\")\n\t\t\tbuf.Flush()\n\t\t\treturn\n\t\t}\n\t}\n\terr = hs.AcceptHandshake(buf.Writer)\n\tif err != nil {\n\t\tcode = http.StatusBadRequest\n\t\tfmt.Fprintf(buf, \"HTTP/1.1 %03d %s\\r\\n\", code, http.StatusText(code))\n\t\tbuf.WriteString(\"\\r\\n\")\n\t\tbuf.Flush()\n\t\treturn\n\t}\n\tconn = hs.NewServerConn(buf, rwc, req)\n\treturn\n}\n\n// Server represents a server of a WebSocket.\ntype Server struct {\n\t// Config is a WebSocket configuration for new WebSocket connection.\n\tConfig\n\n\t// Handshake is an optional function in WebSocket handshake.\n\t// For example, you can check, or don't check Origin header.\n\t// Another example, you can select config.Protocol.\n\tHandshake func(*Config, *http.Request) error\n\n\t// Handler handles a WebSocket connection.\n\tHandler\n}\n\n// ServeHTTP implements the http.Handler interface for a WebSocket\nfunc (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\ts.serveWebSocket(w, req)\n}\n\nfunc (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) {\n\trwc, buf, err := w.(http.Hijacker).Hijack()\n\tif err != nil {\n\t\tpanic(\"Hijack failed: \" + err.Error())\n\t}\n\t// The server should abort the WebSocket connection if it finds\n\t// the client did not send a handshake that matches with protocol\n\t// specification.\n\tdefer rwc.Close()\n\tconn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake)\n\tif err != nil {\n\t\treturn\n\t}\n\tif conn == nil {\n\t\tpanic(\"unexpected nil conn\")\n\t}\n\ts.Handler(conn)\n}\n\n// Handler is a simple interface to a WebSocket browser client.\n// It checks if Origin header is valid URL by default.\n// You might want to verify websocket.Conn.Config().Origin in the func.\n// If you use Server instead of Handler, you could call websocket.Origin and\n// check the origin in your Handshake func. So, if you want to accept\n// non-browser clients, which do not send an Origin header, set a\n// Server.Handshake that does not check the origin.\ntype Handler func(*Conn)\n\nfunc checkOrigin(config *Config, req *http.Request) (err error) {\n\tconfig.Origin, err = Origin(config, req)\n\tif err == nil && config.Origin == nil {\n\t\treturn fmt.Errorf(\"null origin\")\n\t}\n\treturn err\n}\n\n// ServeHTTP implements the http.Handler interface for a WebSocket\nfunc (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\ts := Server{Handler: h, Handshake: checkOrigin}\n\ts.serveWebSocket(w, req)\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/websocket/websocket.go",
    "content": "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package websocket implements a client and server for the WebSocket protocol\n// as specified in RFC 6455.\n//\n// This package currently lacks some features found in an alternative\n// and more actively maintained WebSocket package:\n//\n//     https://godoc.org/github.com/gorilla/websocket\n//\npackage websocket // import \"golang.org/x/net/websocket\"\n\nimport (\n\t\"bufio\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tProtocolVersionHybi13    = 13\n\tProtocolVersionHybi      = ProtocolVersionHybi13\n\tSupportedProtocolVersion = \"13\"\n\n\tContinuationFrame = 0\n\tTextFrame         = 1\n\tBinaryFrame       = 2\n\tCloseFrame        = 8\n\tPingFrame         = 9\n\tPongFrame         = 10\n\tUnknownFrame      = 255\n\n\tDefaultMaxPayloadBytes = 32 << 20 // 32MB\n)\n\n// ProtocolError represents WebSocket protocol errors.\ntype ProtocolError struct {\n\tErrorString string\n}\n\nfunc (err *ProtocolError) Error() string { return err.ErrorString }\n\nvar (\n\tErrBadProtocolVersion   = &ProtocolError{\"bad protocol version\"}\n\tErrBadScheme            = &ProtocolError{\"bad scheme\"}\n\tErrBadStatus            = &ProtocolError{\"bad status\"}\n\tErrBadUpgrade           = &ProtocolError{\"missing or bad upgrade\"}\n\tErrBadWebSocketOrigin   = &ProtocolError{\"missing or bad WebSocket-Origin\"}\n\tErrBadWebSocketLocation = &ProtocolError{\"missing or bad WebSocket-Location\"}\n\tErrBadWebSocketProtocol = &ProtocolError{\"missing or bad WebSocket-Protocol\"}\n\tErrBadWebSocketVersion  = &ProtocolError{\"missing or bad WebSocket Version\"}\n\tErrChallengeResponse    = &ProtocolError{\"mismatch challenge/response\"}\n\tErrBadFrame             = &ProtocolError{\"bad frame\"}\n\tErrBadFrameBoundary     = &ProtocolError{\"not on frame boundary\"}\n\tErrNotWebSocket         = &ProtocolError{\"not websocket protocol\"}\n\tErrBadRequestMethod     = &ProtocolError{\"bad method\"}\n\tErrNotSupported         = &ProtocolError{\"not supported\"}\n)\n\n// ErrFrameTooLarge is returned by Codec's Receive method if payload size\n// exceeds limit set by Conn.MaxPayloadBytes\nvar ErrFrameTooLarge = errors.New(\"websocket: frame payload size exceeds limit\")\n\n// Addr is an implementation of net.Addr for WebSocket.\ntype Addr struct {\n\t*url.URL\n}\n\n// Network returns the network type for a WebSocket, \"websocket\".\nfunc (addr *Addr) Network() string { return \"websocket\" }\n\n// Config is a WebSocket configuration\ntype Config struct {\n\t// A WebSocket server address.\n\tLocation *url.URL\n\n\t// A Websocket client origin.\n\tOrigin *url.URL\n\n\t// WebSocket subprotocols.\n\tProtocol []string\n\n\t// WebSocket protocol version.\n\tVersion int\n\n\t// TLS config for secure WebSocket (wss).\n\tTlsConfig *tls.Config\n\n\t// Additional header fields to be sent in WebSocket opening handshake.\n\tHeader http.Header\n\n\t// Dialer used when opening websocket connections.\n\tDialer *net.Dialer\n\n\thandshakeData map[string]string\n}\n\n// serverHandshaker is an interface to handle WebSocket server side handshake.\ntype serverHandshaker interface {\n\t// ReadHandshake reads handshake request message from client.\n\t// Returns http response code and error if any.\n\tReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error)\n\n\t// AcceptHandshake accepts the client handshake request and sends\n\t// handshake response back to client.\n\tAcceptHandshake(buf *bufio.Writer) (err error)\n\n\t// NewServerConn creates a new WebSocket connection.\n\tNewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn)\n}\n\n// frameReader is an interface to read a WebSocket frame.\ntype frameReader interface {\n\t// Reader is to read payload of the frame.\n\tio.Reader\n\n\t// PayloadType returns payload type.\n\tPayloadType() byte\n\n\t// HeaderReader returns a reader to read header of the frame.\n\tHeaderReader() io.Reader\n\n\t// TrailerReader returns a reader to read trailer of the frame.\n\t// If it returns nil, there is no trailer in the frame.\n\tTrailerReader() io.Reader\n\n\t// Len returns total length of the frame, including header and trailer.\n\tLen() int\n}\n\n// frameReaderFactory is an interface to creates new frame reader.\ntype frameReaderFactory interface {\n\tNewFrameReader() (r frameReader, err error)\n}\n\n// frameWriter is an interface to write a WebSocket frame.\ntype frameWriter interface {\n\t// Writer is to write payload of the frame.\n\tio.WriteCloser\n}\n\n// frameWriterFactory is an interface to create new frame writer.\ntype frameWriterFactory interface {\n\tNewFrameWriter(payloadType byte) (w frameWriter, err error)\n}\n\ntype frameHandler interface {\n\tHandleFrame(frame frameReader) (r frameReader, err error)\n\tWriteClose(status int) (err error)\n}\n\n// Conn represents a WebSocket connection.\n//\n// Multiple goroutines may invoke methods on a Conn simultaneously.\ntype Conn struct {\n\tconfig  *Config\n\trequest *http.Request\n\n\tbuf *bufio.ReadWriter\n\trwc io.ReadWriteCloser\n\n\trio sync.Mutex\n\tframeReaderFactory\n\tframeReader\n\n\twio sync.Mutex\n\tframeWriterFactory\n\n\tframeHandler\n\tPayloadType        byte\n\tdefaultCloseStatus int\n\n\t// MaxPayloadBytes limits the size of frame payload received over Conn\n\t// by Codec's Receive method. If zero, DefaultMaxPayloadBytes is used.\n\tMaxPayloadBytes int\n}\n\n// Read implements the io.Reader interface:\n// it reads data of a frame from the WebSocket connection.\n// if msg is not large enough for the frame data, it fills the msg and next Read\n// will read the rest of the frame data.\n// it reads Text frame or Binary frame.\nfunc (ws *Conn) Read(msg []byte) (n int, err error) {\n\tws.rio.Lock()\n\tdefer ws.rio.Unlock()\nagain:\n\tif ws.frameReader == nil {\n\t\tframe, err := ws.frameReaderFactory.NewFrameReader()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tws.frameReader, err = ws.frameHandler.HandleFrame(frame)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tif ws.frameReader == nil {\n\t\t\tgoto again\n\t\t}\n\t}\n\tn, err = ws.frameReader.Read(msg)\n\tif err == io.EOF {\n\t\tif trailer := ws.frameReader.TrailerReader(); trailer != nil {\n\t\t\tio.Copy(ioutil.Discard, trailer)\n\t\t}\n\t\tws.frameReader = nil\n\t\tgoto again\n\t}\n\treturn n, err\n}\n\n// Write implements the io.Writer interface:\n// it writes data as a frame to the WebSocket connection.\nfunc (ws *Conn) Write(msg []byte) (n int, err error) {\n\tws.wio.Lock()\n\tdefer ws.wio.Unlock()\n\tw, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tn, err = w.Write(msg)\n\tw.Close()\n\treturn n, err\n}\n\n// Close implements the io.Closer interface.\nfunc (ws *Conn) Close() error {\n\terr := ws.frameHandler.WriteClose(ws.defaultCloseStatus)\n\terr1 := ws.rwc.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn err1\n}\n\nfunc (ws *Conn) IsClientConn() bool { return ws.request == nil }\nfunc (ws *Conn) IsServerConn() bool { return ws.request != nil }\n\n// LocalAddr returns the WebSocket Origin for the connection for client, or\n// the WebSocket location for server.\nfunc (ws *Conn) LocalAddr() net.Addr {\n\tif ws.IsClientConn() {\n\t\treturn &Addr{ws.config.Origin}\n\t}\n\treturn &Addr{ws.config.Location}\n}\n\n// RemoteAddr returns the WebSocket location for the connection for client, or\n// the Websocket Origin for server.\nfunc (ws *Conn) RemoteAddr() net.Addr {\n\tif ws.IsClientConn() {\n\t\treturn &Addr{ws.config.Location}\n\t}\n\treturn &Addr{ws.config.Origin}\n}\n\nvar errSetDeadline = errors.New(\"websocket: cannot set deadline: not using a net.Conn\")\n\n// SetDeadline sets the connection's network read & write deadlines.\nfunc (ws *Conn) SetDeadline(t time.Time) error {\n\tif conn, ok := ws.rwc.(net.Conn); ok {\n\t\treturn conn.SetDeadline(t)\n\t}\n\treturn errSetDeadline\n}\n\n// SetReadDeadline sets the connection's network read deadline.\nfunc (ws *Conn) SetReadDeadline(t time.Time) error {\n\tif conn, ok := ws.rwc.(net.Conn); ok {\n\t\treturn conn.SetReadDeadline(t)\n\t}\n\treturn errSetDeadline\n}\n\n// SetWriteDeadline sets the connection's network write deadline.\nfunc (ws *Conn) SetWriteDeadline(t time.Time) error {\n\tif conn, ok := ws.rwc.(net.Conn); ok {\n\t\treturn conn.SetWriteDeadline(t)\n\t}\n\treturn errSetDeadline\n}\n\n// Config returns the WebSocket config.\nfunc (ws *Conn) Config() *Config { return ws.config }\n\n// Request returns the http request upgraded to the WebSocket.\n// It is nil for client side.\nfunc (ws *Conn) Request() *http.Request { return ws.request }\n\n// Codec represents a symmetric pair of functions that implement a codec.\ntype Codec struct {\n\tMarshal   func(v interface{}) (data []byte, payloadType byte, err error)\n\tUnmarshal func(data []byte, payloadType byte, v interface{}) (err error)\n}\n\n// Send sends v marshaled by cd.Marshal as single frame to ws.\nfunc (cd Codec) Send(ws *Conn, v interface{}) (err error) {\n\tdata, payloadType, err := cd.Marshal(v)\n\tif err != nil {\n\t\treturn err\n\t}\n\tws.wio.Lock()\n\tdefer ws.wio.Unlock()\n\tw, err := ws.frameWriterFactory.NewFrameWriter(payloadType)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(data)\n\tw.Close()\n\treturn err\n}\n\n// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores\n// in v. The whole frame payload is read to an in-memory buffer; max size of\n// payload is defined by ws.MaxPayloadBytes. If frame payload size exceeds\n// limit, ErrFrameTooLarge is returned; in this case frame is not read off wire\n// completely. The next call to Receive would read and discard leftover data of\n// previous oversized frame before processing next frame.\nfunc (cd Codec) Receive(ws *Conn, v interface{}) (err error) {\n\tws.rio.Lock()\n\tdefer ws.rio.Unlock()\n\tif ws.frameReader != nil {\n\t\t_, err = io.Copy(ioutil.Discard, ws.frameReader)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tws.frameReader = nil\n\t}\nagain:\n\tframe, err := ws.frameReaderFactory.NewFrameReader()\n\tif err != nil {\n\t\treturn err\n\t}\n\tframe, err = ws.frameHandler.HandleFrame(frame)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif frame == nil {\n\t\tgoto again\n\t}\n\tmaxPayloadBytes := ws.MaxPayloadBytes\n\tif maxPayloadBytes == 0 {\n\t\tmaxPayloadBytes = DefaultMaxPayloadBytes\n\t}\n\tif hf, ok := frame.(*hybiFrameReader); ok && hf.header.Length > int64(maxPayloadBytes) {\n\t\t// payload size exceeds limit, no need to call Unmarshal\n\t\t//\n\t\t// set frameReader to current oversized frame so that\n\t\t// the next call to this function can drain leftover\n\t\t// data before processing the next frame\n\t\tws.frameReader = frame\n\t\treturn ErrFrameTooLarge\n\t}\n\tpayloadType := frame.PayloadType()\n\tdata, err := ioutil.ReadAll(frame)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn cd.Unmarshal(data, payloadType, v)\n}\n\nfunc marshal(v interface{}) (msg []byte, payloadType byte, err error) {\n\tswitch data := v.(type) {\n\tcase string:\n\t\treturn []byte(data), TextFrame, nil\n\tcase []byte:\n\t\treturn data, BinaryFrame, nil\n\t}\n\treturn nil, UnknownFrame, ErrNotSupported\n}\n\nfunc unmarshal(msg []byte, payloadType byte, v interface{}) (err error) {\n\tswitch data := v.(type) {\n\tcase *string:\n\t\t*data = string(msg)\n\t\treturn nil\n\tcase *[]byte:\n\t\t*data = msg\n\t\treturn nil\n\t}\n\treturn ErrNotSupported\n}\n\n/*\nMessage is a codec to send/receive text/binary data in a frame on WebSocket connection.\nTo send/receive text frame, use string type.\nTo send/receive binary frame, use []byte type.\n\nTrivial usage:\n\n\timport \"websocket\"\n\n\t// receive text frame\n\tvar message string\n\twebsocket.Message.Receive(ws, &message)\n\n\t// send text frame\n\tmessage = \"hello\"\n\twebsocket.Message.Send(ws, message)\n\n\t// receive binary frame\n\tvar data []byte\n\twebsocket.Message.Receive(ws, &data)\n\n\t// send binary frame\n\tdata = []byte{0, 1, 2}\n\twebsocket.Message.Send(ws, data)\n\n*/\nvar Message = Codec{marshal, unmarshal}\n\nfunc jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) {\n\tmsg, err = json.Marshal(v)\n\treturn msg, TextFrame, err\n}\n\nfunc jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) {\n\treturn json.Unmarshal(msg, v)\n}\n\n/*\nJSON is a codec to send/receive JSON data in a frame from a WebSocket connection.\n\nTrivial usage:\n\n\timport \"websocket\"\n\n\ttype T struct {\n\t\tMsg string\n\t\tCount int\n\t}\n\n\t// receive JSON type T\n\tvar data T\n\twebsocket.JSON.Receive(ws, &data)\n\n\t// send JSON type T\n\twebsocket.JSON.Send(ws, data)\n*/\nvar JSON = Codec{jsonMarshal, jsonUnmarshal}\n"
  },
  {
    "path": "vendor/golang.org/x/net/websocket/websocket_test.go",
    "content": "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage websocket\n\nimport (\n\t\"bytes\"\n\t\"crypto/rand\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar serverAddr string\nvar once sync.Once\n\nfunc echoServer(ws *Conn) {\n\tdefer ws.Close()\n\tio.Copy(ws, ws)\n}\n\ntype Count struct {\n\tS string\n\tN int\n}\n\nfunc countServer(ws *Conn) {\n\tdefer ws.Close()\n\tfor {\n\t\tvar count Count\n\t\terr := JSON.Receive(ws, &count)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tcount.N++\n\t\tcount.S = strings.Repeat(count.S, count.N)\n\t\terr = JSON.Send(ws, count)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n}\n\ntype testCtrlAndDataHandler struct {\n\thybiFrameHandler\n}\n\nfunc (h *testCtrlAndDataHandler) WritePing(b []byte) (int, error) {\n\th.hybiFrameHandler.conn.wio.Lock()\n\tdefer h.hybiFrameHandler.conn.wio.Unlock()\n\tw, err := h.hybiFrameHandler.conn.frameWriterFactory.NewFrameWriter(PingFrame)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tn, err := w.Write(b)\n\tw.Close()\n\treturn n, err\n}\n\nfunc ctrlAndDataServer(ws *Conn) {\n\tdefer ws.Close()\n\th := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}}\n\tws.frameHandler = h\n\n\tgo func() {\n\t\tfor i := 0; ; i++ {\n\t\t\tvar b []byte\n\t\t\tif i%2 != 0 { // with or without payload\n\t\t\t\tb = []byte(fmt.Sprintf(\"#%d-CONTROL-FRAME-FROM-SERVER\", i))\n\t\t\t}\n\t\t\tif _, err := h.WritePing(b); err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif _, err := h.WritePong(b); err != nil { // unsolicited pong\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t}\n\t}()\n\n\tb := make([]byte, 128)\n\tfor {\n\t\tn, err := ws.Read(b)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif _, err := ws.Write(b[:n]); err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc subProtocolHandshake(config *Config, req *http.Request) error {\n\tfor _, proto := range config.Protocol {\n\t\tif proto == \"chat\" {\n\t\t\tconfig.Protocol = []string{proto}\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn ErrBadWebSocketProtocol\n}\n\nfunc subProtoServer(ws *Conn) {\n\tfor _, proto := range ws.Config().Protocol {\n\t\tio.WriteString(ws, proto)\n\t}\n}\n\nfunc startServer() {\n\thttp.Handle(\"/echo\", Handler(echoServer))\n\thttp.Handle(\"/count\", Handler(countServer))\n\thttp.Handle(\"/ctrldata\", Handler(ctrlAndDataServer))\n\tsubproto := Server{\n\t\tHandshake: subProtocolHandshake,\n\t\tHandler:   Handler(subProtoServer),\n\t}\n\thttp.Handle(\"/subproto\", subproto)\n\tserver := httptest.NewServer(nil)\n\tserverAddr = server.Listener.Addr().String()\n\tlog.Print(\"Test WebSocket server listening on \", serverAddr)\n}\n\nfunc newConfig(t *testing.T, path string) *Config {\n\tconfig, _ := NewConfig(fmt.Sprintf(\"ws://%s%s\", serverAddr, path), \"http://localhost\")\n\treturn config\n}\n\nfunc TestEcho(t *testing.T) {\n\tonce.Do(startServer)\n\n\t// websocket.Dial()\n\tclient, err := net.Dial(\"tcp\", serverAddr)\n\tif err != nil {\n\t\tt.Fatal(\"dialing\", err)\n\t}\n\tconn, err := NewClient(newConfig(t, \"/echo\"), client)\n\tif err != nil {\n\t\tt.Errorf(\"WebSocket handshake error: %v\", err)\n\t\treturn\n\t}\n\n\tmsg := []byte(\"hello, world\\n\")\n\tif _, err := conn.Write(msg); err != nil {\n\t\tt.Errorf(\"Write: %v\", err)\n\t}\n\tvar actual_msg = make([]byte, 512)\n\tn, err := conn.Read(actual_msg)\n\tif err != nil {\n\t\tt.Errorf(\"Read: %v\", err)\n\t}\n\tactual_msg = actual_msg[0:n]\n\tif !bytes.Equal(msg, actual_msg) {\n\t\tt.Errorf(\"Echo: expected %q got %q\", msg, actual_msg)\n\t}\n\tconn.Close()\n}\n\nfunc TestAddr(t *testing.T) {\n\tonce.Do(startServer)\n\n\t// websocket.Dial()\n\tclient, err := net.Dial(\"tcp\", serverAddr)\n\tif err != nil {\n\t\tt.Fatal(\"dialing\", err)\n\t}\n\tconn, err := NewClient(newConfig(t, \"/echo\"), client)\n\tif err != nil {\n\t\tt.Errorf(\"WebSocket handshake error: %v\", err)\n\t\treturn\n\t}\n\n\tra := conn.RemoteAddr().String()\n\tif !strings.HasPrefix(ra, \"ws://\") || !strings.HasSuffix(ra, \"/echo\") {\n\t\tt.Errorf(\"Bad remote addr: %v\", ra)\n\t}\n\tla := conn.LocalAddr().String()\n\tif !strings.HasPrefix(la, \"http://\") {\n\t\tt.Errorf(\"Bad local addr: %v\", la)\n\t}\n\tconn.Close()\n}\n\nfunc TestCount(t *testing.T) {\n\tonce.Do(startServer)\n\n\t// websocket.Dial()\n\tclient, err := net.Dial(\"tcp\", serverAddr)\n\tif err != nil {\n\t\tt.Fatal(\"dialing\", err)\n\t}\n\tconn, err := NewClient(newConfig(t, \"/count\"), client)\n\tif err != nil {\n\t\tt.Errorf(\"WebSocket handshake error: %v\", err)\n\t\treturn\n\t}\n\n\tvar count Count\n\tcount.S = \"hello\"\n\tif err := JSON.Send(conn, count); err != nil {\n\t\tt.Errorf(\"Write: %v\", err)\n\t}\n\tif err := JSON.Receive(conn, &count); err != nil {\n\t\tt.Errorf(\"Read: %v\", err)\n\t}\n\tif count.N != 1 {\n\t\tt.Errorf(\"count: expected %d got %d\", 1, count.N)\n\t}\n\tif count.S != \"hello\" {\n\t\tt.Errorf(\"count: expected %q got %q\", \"hello\", count.S)\n\t}\n\tif err := JSON.Send(conn, count); err != nil {\n\t\tt.Errorf(\"Write: %v\", err)\n\t}\n\tif err := JSON.Receive(conn, &count); err != nil {\n\t\tt.Errorf(\"Read: %v\", err)\n\t}\n\tif count.N != 2 {\n\t\tt.Errorf(\"count: expected %d got %d\", 2, count.N)\n\t}\n\tif count.S != \"hellohello\" {\n\t\tt.Errorf(\"count: expected %q got %q\", \"hellohello\", count.S)\n\t}\n\tconn.Close()\n}\n\nfunc TestWithQuery(t *testing.T) {\n\tonce.Do(startServer)\n\n\tclient, err := net.Dial(\"tcp\", serverAddr)\n\tif err != nil {\n\t\tt.Fatal(\"dialing\", err)\n\t}\n\n\tconfig := newConfig(t, \"/echo\")\n\tconfig.Location, err = url.ParseRequestURI(fmt.Sprintf(\"ws://%s/echo?q=v\", serverAddr))\n\tif err != nil {\n\t\tt.Fatal(\"location url\", err)\n\t}\n\n\tws, err := NewClient(config, client)\n\tif err != nil {\n\t\tt.Errorf(\"WebSocket handshake: %v\", err)\n\t\treturn\n\t}\n\tws.Close()\n}\n\nfunc testWithProtocol(t *testing.T, subproto []string) (string, error) {\n\tonce.Do(startServer)\n\n\tclient, err := net.Dial(\"tcp\", serverAddr)\n\tif err != nil {\n\t\tt.Fatal(\"dialing\", err)\n\t}\n\n\tconfig := newConfig(t, \"/subproto\")\n\tconfig.Protocol = subproto\n\n\tws, err := NewClient(config, client)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tmsg := make([]byte, 16)\n\tn, err := ws.Read(msg)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tws.Close()\n\treturn string(msg[:n]), nil\n}\n\nfunc TestWithProtocol(t *testing.T) {\n\tproto, err := testWithProtocol(t, []string{\"chat\"})\n\tif err != nil {\n\t\tt.Errorf(\"SubProto: unexpected error: %v\", err)\n\t}\n\tif proto != \"chat\" {\n\t\tt.Errorf(\"SubProto: expected %q, got %q\", \"chat\", proto)\n\t}\n}\n\nfunc TestWithTwoProtocol(t *testing.T) {\n\tproto, err := testWithProtocol(t, []string{\"test\", \"chat\"})\n\tif err != nil {\n\t\tt.Errorf(\"SubProto: unexpected error: %v\", err)\n\t}\n\tif proto != \"chat\" {\n\t\tt.Errorf(\"SubProto: expected %q, got %q\", \"chat\", proto)\n\t}\n}\n\nfunc TestWithBadProtocol(t *testing.T) {\n\t_, err := testWithProtocol(t, []string{\"test\"})\n\tif err != ErrBadStatus {\n\t\tt.Errorf(\"SubProto: expected %v, got %v\", ErrBadStatus, err)\n\t}\n}\n\nfunc TestHTTP(t *testing.T) {\n\tonce.Do(startServer)\n\n\t// If the client did not send a handshake that matches the protocol\n\t// specification, the server MUST return an HTTP response with an\n\t// appropriate error code (such as 400 Bad Request)\n\tresp, err := http.Get(fmt.Sprintf(\"http://%s/echo\", serverAddr))\n\tif err != nil {\n\t\tt.Errorf(\"Get: error %#v\", err)\n\t\treturn\n\t}\n\tif resp == nil {\n\t\tt.Error(\"Get: resp is null\")\n\t\treturn\n\t}\n\tif resp.StatusCode != http.StatusBadRequest {\n\t\tt.Errorf(\"Get: expected %q got %q\", http.StatusBadRequest, resp.StatusCode)\n\t}\n}\n\nfunc TestTrailingSpaces(t *testing.T) {\n\t// http://code.google.com/p/go/issues/detail?id=955\n\t// The last runs of this create keys with trailing spaces that should not be\n\t// generated by the client.\n\tonce.Do(startServer)\n\tconfig := newConfig(t, \"/echo\")\n\tfor i := 0; i < 30; i++ {\n\t\t// body\n\t\tws, err := DialConfig(config)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Dial #%d failed: %v\", i, err)\n\t\t\tbreak\n\t\t}\n\t\tws.Close()\n\t}\n}\n\nfunc TestDialConfigBadVersion(t *testing.T) {\n\tonce.Do(startServer)\n\tconfig := newConfig(t, \"/echo\")\n\tconfig.Version = 1234\n\n\t_, err := DialConfig(config)\n\n\tif dialerr, ok := err.(*DialError); ok {\n\t\tif dialerr.Err != ErrBadProtocolVersion {\n\t\t\tt.Errorf(\"dial expected err %q but got %q\", ErrBadProtocolVersion, dialerr.Err)\n\t\t}\n\t}\n}\n\nfunc TestDialConfigWithDialer(t *testing.T) {\n\tonce.Do(startServer)\n\tconfig := newConfig(t, \"/echo\")\n\tconfig.Dialer = &net.Dialer{\n\t\tDeadline: time.Now().Add(-time.Minute),\n\t}\n\t_, err := DialConfig(config)\n\tdialerr, ok := err.(*DialError)\n\tif !ok {\n\t\tt.Fatalf(\"DialError expected, got %#v\", err)\n\t}\n\tneterr, ok := dialerr.Err.(*net.OpError)\n\tif !ok {\n\t\tt.Fatalf(\"net.OpError error expected, got %#v\", dialerr.Err)\n\t}\n\tif !neterr.Timeout() {\n\t\tt.Fatalf(\"expected timeout error, got %#v\", neterr)\n\t}\n}\n\nfunc TestSmallBuffer(t *testing.T) {\n\t// http://code.google.com/p/go/issues/detail?id=1145\n\t// Read should be able to handle reading a fragment of a frame.\n\tonce.Do(startServer)\n\n\t// websocket.Dial()\n\tclient, err := net.Dial(\"tcp\", serverAddr)\n\tif err != nil {\n\t\tt.Fatal(\"dialing\", err)\n\t}\n\tconn, err := NewClient(newConfig(t, \"/echo\"), client)\n\tif err != nil {\n\t\tt.Errorf(\"WebSocket handshake error: %v\", err)\n\t\treturn\n\t}\n\n\tmsg := []byte(\"hello, world\\n\")\n\tif _, err := conn.Write(msg); err != nil {\n\t\tt.Errorf(\"Write: %v\", err)\n\t}\n\tvar small_msg = make([]byte, 8)\n\tn, err := conn.Read(small_msg)\n\tif err != nil {\n\t\tt.Errorf(\"Read: %v\", err)\n\t}\n\tif !bytes.Equal(msg[:len(small_msg)], small_msg) {\n\t\tt.Errorf(\"Echo: expected %q got %q\", msg[:len(small_msg)], small_msg)\n\t}\n\tvar second_msg = make([]byte, len(msg))\n\tn, err = conn.Read(second_msg)\n\tif err != nil {\n\t\tt.Errorf(\"Read: %v\", err)\n\t}\n\tsecond_msg = second_msg[0:n]\n\tif !bytes.Equal(msg[len(small_msg):], second_msg) {\n\t\tt.Errorf(\"Echo: expected %q got %q\", msg[len(small_msg):], second_msg)\n\t}\n\tconn.Close()\n}\n\nvar parseAuthorityTests = []struct {\n\tin  *url.URL\n\tout string\n}{\n\t{\n\t\t&url.URL{\n\t\t\tScheme: \"ws\",\n\t\t\tHost:   \"www.google.com\",\n\t\t},\n\t\t\"www.google.com:80\",\n\t},\n\t{\n\t\t&url.URL{\n\t\t\tScheme: \"wss\",\n\t\t\tHost:   \"www.google.com\",\n\t\t},\n\t\t\"www.google.com:443\",\n\t},\n\t{\n\t\t&url.URL{\n\t\t\tScheme: \"ws\",\n\t\t\tHost:   \"www.google.com:80\",\n\t\t},\n\t\t\"www.google.com:80\",\n\t},\n\t{\n\t\t&url.URL{\n\t\t\tScheme: \"wss\",\n\t\t\tHost:   \"www.google.com:443\",\n\t\t},\n\t\t\"www.google.com:443\",\n\t},\n\t// some invalid ones for parseAuthority. parseAuthority doesn't\n\t// concern itself with the scheme unless it actually knows about it\n\t{\n\t\t&url.URL{\n\t\t\tScheme: \"http\",\n\t\t\tHost:   \"www.google.com\",\n\t\t},\n\t\t\"www.google.com\",\n\t},\n\t{\n\t\t&url.URL{\n\t\t\tScheme: \"http\",\n\t\t\tHost:   \"www.google.com:80\",\n\t\t},\n\t\t\"www.google.com:80\",\n\t},\n\t{\n\t\t&url.URL{\n\t\t\tScheme: \"asdf\",\n\t\t\tHost:   \"127.0.0.1\",\n\t\t},\n\t\t\"127.0.0.1\",\n\t},\n\t{\n\t\t&url.URL{\n\t\t\tScheme: \"asdf\",\n\t\t\tHost:   \"www.google.com\",\n\t\t},\n\t\t\"www.google.com\",\n\t},\n}\n\nfunc TestParseAuthority(t *testing.T) {\n\tfor _, tt := range parseAuthorityTests {\n\t\tout := parseAuthority(tt.in)\n\t\tif out != tt.out {\n\t\t\tt.Errorf(\"got %v; want %v\", out, tt.out)\n\t\t}\n\t}\n}\n\ntype closerConn struct {\n\tnet.Conn\n\tclosed int // count of the number of times Close was called\n}\n\nfunc (c *closerConn) Close() error {\n\tc.closed++\n\treturn c.Conn.Close()\n}\n\nfunc TestClose(t *testing.T) {\n\tif runtime.GOOS == \"plan9\" {\n\t\tt.Skip(\"see golang.org/issue/11454\")\n\t}\n\n\tonce.Do(startServer)\n\n\tconn, err := net.Dial(\"tcp\", serverAddr)\n\tif err != nil {\n\t\tt.Fatal(\"dialing\", err)\n\t}\n\n\tcc := closerConn{Conn: conn}\n\n\tclient, err := NewClient(newConfig(t, \"/echo\"), &cc)\n\tif err != nil {\n\t\tt.Fatalf(\"WebSocket handshake: %v\", err)\n\t}\n\n\t// set the deadline to ten minutes ago, which will have expired by the time\n\t// client.Close sends the close status frame.\n\tconn.SetDeadline(time.Now().Add(-10 * time.Minute))\n\n\tif err := client.Close(); err == nil {\n\t\tt.Errorf(\"ws.Close(): expected error, got %v\", err)\n\t}\n\tif cc.closed < 1 {\n\t\tt.Fatalf(\"ws.Close(): expected underlying ws.rwc.Close to be called > 0 times, got: %v\", cc.closed)\n\t}\n}\n\nvar originTests = []struct {\n\treq    *http.Request\n\torigin *url.URL\n}{\n\t{\n\t\treq: &http.Request{\n\t\t\tHeader: http.Header{\n\t\t\t\t\"Origin\": []string{\"http://www.example.com\"},\n\t\t\t},\n\t\t},\n\t\torigin: &url.URL{\n\t\t\tScheme: \"http\",\n\t\t\tHost:   \"www.example.com\",\n\t\t},\n\t},\n\t{\n\t\treq: &http.Request{},\n\t},\n}\n\nfunc TestOrigin(t *testing.T) {\n\tconf := newConfig(t, \"/echo\")\n\tconf.Version = ProtocolVersionHybi13\n\tfor i, tt := range originTests {\n\t\torigin, err := Origin(conf, tt.req)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(origin, tt.origin) {\n\t\t\tt.Errorf(\"#%d: got origin %v; want %v\", i, origin, tt.origin)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc TestCtrlAndData(t *testing.T) {\n\tonce.Do(startServer)\n\n\tc, err := net.Dial(\"tcp\", serverAddr)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tws, err := NewClient(newConfig(t, \"/ctrldata\"), c)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer ws.Close()\n\n\th := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}}\n\tws.frameHandler = h\n\n\tb := make([]byte, 128)\n\tfor i := 0; i < 2; i++ {\n\t\tdata := []byte(fmt.Sprintf(\"#%d-DATA-FRAME-FROM-CLIENT\", i))\n\t\tif _, err := ws.Write(data); err != nil {\n\t\t\tt.Fatalf(\"#%d: %v\", i, err)\n\t\t}\n\t\tvar ctrl []byte\n\t\tif i%2 != 0 { // with or without payload\n\t\t\tctrl = []byte(fmt.Sprintf(\"#%d-CONTROL-FRAME-FROM-CLIENT\", i))\n\t\t}\n\t\tif _, err := h.WritePing(ctrl); err != nil {\n\t\t\tt.Fatalf(\"#%d: %v\", i, err)\n\t\t}\n\t\tn, err := ws.Read(b)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"#%d: %v\", i, err)\n\t\t}\n\t\tif !bytes.Equal(b[:n], data) {\n\t\t\tt.Fatalf(\"#%d: got %v; want %v\", i, b[:n], data)\n\t\t}\n\t}\n}\n\nfunc TestCodec_ReceiveLimited(t *testing.T) {\n\tconst limit = 2048\n\tvar payloads [][]byte\n\tfor _, size := range []int{\n\t\t1024,\n\t\t2048,\n\t\t4096, // receive of this message would be interrupted due to limit\n\t\t2048, // this one is to make sure next receive recovers discarding leftovers\n\t} {\n\t\tb := make([]byte, size)\n\t\trand.Read(b)\n\t\tpayloads = append(payloads, b)\n\t}\n\thandlerDone := make(chan struct{})\n\tlimitedHandler := func(ws *Conn) {\n\t\tdefer close(handlerDone)\n\t\tws.MaxPayloadBytes = limit\n\t\tdefer ws.Close()\n\t\tfor i, p := range payloads {\n\t\t\tt.Logf(\"payload #%d (size %d, exceeds limit: %v)\", i, len(p), len(p) > limit)\n\t\t\tvar recv []byte\n\t\t\terr := Message.Receive(ws, &recv)\n\t\t\tswitch err {\n\t\t\tcase nil:\n\t\t\tcase ErrFrameTooLarge:\n\t\t\t\tif len(p) <= limit {\n\t\t\t\t\tt.Fatalf(\"unexpected frame size limit: expected %d bytes of payload having limit at %d\", len(p), limit)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\tdefault:\n\t\t\t\tt.Fatalf(\"unexpected error: %v (want either nil or ErrFrameTooLarge)\", err)\n\t\t\t}\n\t\t\tif len(recv) > limit {\n\t\t\t\tt.Fatalf(\"received %d bytes of payload having limit at %d\", len(recv), limit)\n\t\t\t}\n\t\t\tif !bytes.Equal(p, recv) {\n\t\t\t\tt.Fatalf(\"received payload differs:\\ngot:\\t%v\\nwant:\\t%v\", recv, p)\n\t\t\t}\n\t\t}\n\t}\n\tserver := httptest.NewServer(Handler(limitedHandler))\n\tdefer server.CloseClientConnections()\n\tdefer server.Close()\n\taddr := server.Listener.Addr().String()\n\tws, err := Dial(\"ws://\"+addr+\"/\", \"\", \"http://localhost/\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer ws.Close()\n\tfor i, p := range payloads {\n\t\tif err := Message.Send(ws, p); err != nil {\n\t\t\tt.Fatalf(\"payload #%d (size %d): %v\", i, len(p), err)\n\t\t}\n\t}\n\t<-handlerDone\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/xsrftoken/xsrf.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package xsrftoken provides methods for generating and validating secure XSRF tokens.\npackage xsrftoken // import \"golang.org/x/net/xsrftoken\"\n\nimport (\n\t\"crypto/hmac\"\n\t\"crypto/sha1\"\n\t\"crypto/subtle\"\n\t\"encoding/base64\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n// Timeout is the duration for which XSRF tokens are valid.\n// It is exported so clients may set cookie timeouts that match generated tokens.\nconst Timeout = 24 * time.Hour\n\n// clean sanitizes a string for inclusion in a token by replacing all \":\"s.\nfunc clean(s string) string {\n\treturn strings.Replace(s, \":\", \"_\", -1)\n}\n\n// Generate returns a URL-safe secure XSRF token that expires in 24 hours.\n//\n// key is a secret key for your application; it must be non-empty.\n// userID is an optional unique identifier for the user.\n// actionID is an optional action the user is taking (e.g. POSTing to a particular path).\nfunc Generate(key, userID, actionID string) string {\n\treturn generateTokenAtTime(key, userID, actionID, time.Now())\n}\n\n// generateTokenAtTime is like Generate, but returns a token that expires 24 hours from now.\nfunc generateTokenAtTime(key, userID, actionID string, now time.Time) string {\n\tif len(key) == 0 {\n\t\tpanic(\"zero length xsrf secret key\")\n\t}\n\t// Round time up and convert to milliseconds.\n\tmilliTime := (now.UnixNano() + 1e6 - 1) / 1e6\n\n\th := hmac.New(sha1.New, []byte(key))\n\tfmt.Fprintf(h, \"%s:%s:%d\", clean(userID), clean(actionID), milliTime)\n\n\t// Get the padded base64 string then removing the padding.\n\ttok := string(h.Sum(nil))\n\ttok = base64.URLEncoding.EncodeToString([]byte(tok))\n\ttok = strings.TrimRight(tok, \"=\")\n\n\treturn fmt.Sprintf(\"%s:%d\", tok, milliTime)\n}\n\n// Valid reports whether a token is a valid, unexpired token returned by Generate.\nfunc Valid(token, key, userID, actionID string) bool {\n\treturn validTokenAtTime(token, key, userID, actionID, time.Now())\n}\n\n// validTokenAtTime reports whether a token is valid at the given time.\nfunc validTokenAtTime(token, key, userID, actionID string, now time.Time) bool {\n\tif len(key) == 0 {\n\t\tpanic(\"zero length xsrf secret key\")\n\t}\n\t// Extract the issue time of the token.\n\tsep := strings.LastIndex(token, \":\")\n\tif sep < 0 {\n\t\treturn false\n\t}\n\tmillis, err := strconv.ParseInt(token[sep+1:], 10, 64)\n\tif err != nil {\n\t\treturn false\n\t}\n\tissueTime := time.Unix(0, millis*1e6)\n\n\t// Check that the token is not expired.\n\tif now.Sub(issueTime) >= Timeout {\n\t\treturn false\n\t}\n\n\t// Check that the token is not from the future.\n\t// Allow 1 minute grace period in case the token is being verified on a\n\t// machine whose clock is behind the machine that issued the token.\n\tif issueTime.After(now.Add(1 * time.Minute)) {\n\t\treturn false\n\t}\n\n\texpected := generateTokenAtTime(key, userID, actionID, issueTime)\n\n\t// Check that the token matches the expected value.\n\t// Use constant time comparison to avoid timing attacks.\n\treturn subtle.ConstantTimeCompare([]byte(token), []byte(expected)) == 1\n}\n"
  },
  {
    "path": "vendor/golang.org/x/net/xsrftoken/xsrf_test.go",
    "content": "// Copyright 2012 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage xsrftoken\n\nimport (\n\t\"encoding/base64\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst (\n\tkey      = \"quay\"\n\tuserID   = \"12345678\"\n\tactionID = \"POST /form\"\n)\n\nvar (\n\tnow              = time.Now()\n\toneMinuteFromNow = now.Add(1 * time.Minute)\n)\n\nfunc TestValidToken(t *testing.T) {\n\ttok := generateTokenAtTime(key, userID, actionID, now)\n\tif !validTokenAtTime(tok, key, userID, actionID, oneMinuteFromNow) {\n\t\tt.Error(\"One second later: Expected token to be valid\")\n\t}\n\tif !validTokenAtTime(tok, key, userID, actionID, now.Add(Timeout-1*time.Nanosecond)) {\n\t\tt.Error(\"Just before timeout: Expected token to be valid\")\n\t}\n\tif !validTokenAtTime(tok, key, userID, actionID, now.Add(-1*time.Minute+1*time.Millisecond)) {\n\t\tt.Error(\"One minute in the past: Expected token to be valid\")\n\t}\n}\n\n// TestSeparatorReplacement tests that separators are being correctly substituted\nfunc TestSeparatorReplacement(t *testing.T) {\n\ttok := generateTokenAtTime(\"foo:bar\", \"baz\", \"wah\", now)\n\ttok2 := generateTokenAtTime(\"foo\", \"bar:baz\", \"wah\", now)\n\tif tok == tok2 {\n\t\tt.Errorf(\"Expected generated tokens to be different\")\n\t}\n}\n\nfunc TestInvalidToken(t *testing.T) {\n\tinvalidTokenTests := []struct {\n\t\tname, key, userID, actionID string\n\t\tt                           time.Time\n\t}{\n\t\t{\"Bad key\", \"foobar\", userID, actionID, oneMinuteFromNow},\n\t\t{\"Bad userID\", key, \"foobar\", actionID, oneMinuteFromNow},\n\t\t{\"Bad actionID\", key, userID, \"foobar\", oneMinuteFromNow},\n\t\t{\"Expired\", key, userID, actionID, now.Add(Timeout + 1*time.Millisecond)},\n\t\t{\"More than 1 minute from the future\", key, userID, actionID, now.Add(-1*time.Nanosecond - 1*time.Minute)},\n\t}\n\n\ttok := generateTokenAtTime(key, userID, actionID, now)\n\tfor _, itt := range invalidTokenTests {\n\t\tif validTokenAtTime(tok, itt.key, itt.userID, itt.actionID, itt.t) {\n\t\t\tt.Errorf(\"%v: Expected token to be invalid\", itt.name)\n\t\t}\n\t}\n}\n\n// TestValidateBadData primarily tests that no unexpected panics are triggered\n// during parsing\nfunc TestValidateBadData(t *testing.T) {\n\tbadDataTests := []struct {\n\t\tname, tok string\n\t}{\n\t\t{\"Invalid Base64\", \"ASDab24(@)$*==\"},\n\t\t{\"No delimiter\", base64.URLEncoding.EncodeToString([]byte(\"foobar12345678\"))},\n\t\t{\"Invalid time\", base64.URLEncoding.EncodeToString([]byte(\"foobar:foobar\"))},\n\t\t{\"Wrong length\", \"1234\" + generateTokenAtTime(key, userID, actionID, now)},\n\t}\n\n\tfor _, bdt := range badDataTests {\n\t\tif validTokenAtTime(bdt.tok, key, userID, actionID, oneMinuteFromNow) {\n\t\t\tt.Errorf(\"%v: Expected token to be invalid\", bdt.name)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vsphere-influxdb.go",
    "content": "/*  Copyright 2016-2018 Adrian Todorov, Oxalide ato@oxalide.com\nOriginal project author: https://github.com/cblomart\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program.  If not, see <http://www.gnu.org/licenses/>.\n*/\n\npackage main\n\nimport (\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"net/url\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n\tinfluxclient \"github.com/influxdata/influxdb/client/v2\"\n\t\"github.com/vmware/govmomi\"\n\t\"github.com/vmware/govmomi/property\"\n\t\"github.com/vmware/govmomi/vim25/methods\"\n\t\"github.com/vmware/govmomi/vim25/mo\"\n\t\"github.com/vmware/govmomi/vim25/types\"\n\t\"golang.org/x/net/context\"\n)\n\nconst (\n\t// name of the service\n\tname        = \"vsphere-influxdb\"\n\tdescription = \"send vsphere stats to influxdb\"\n)\n\n// Configuration is used to store config data\ntype Configuration struct {\n\tVCenters             []*VCenter\n\tMetrics              []Metric\n\tInterval             int\n\tDomain               string\n\tRemoveHostDomainName bool\n\tInfluxDB             InfluxDB\n}\n\n// InfluxDB is used for InfluxDB connections\ntype InfluxDB struct {\n\tHostname string\n\tUsername string\n\tPassword string\n\tDatabase string\n\tPrefix   string\n}\n\n// VCenter for VMware vCenter connections\ntype VCenter struct {\n\tHostname     string\n\tUsername     string\n\tPassword     string\n\tMetricGroups []*MetricGroup\n\tclient       *govmomi.Client\n}\n\n// MetricDef metric definition\ntype MetricDef struct {\n\tMetric    string\n\tInstances string\n\tKey       int32\n}\n\n// Metric is used for metrics retrieval\ntype Metric struct {\n\tObjectType []string\n\tDefinition []MetricDef\n}\n\n// MetricGroup is used for grouping metrics retrieval\ntype MetricGroup struct {\n\tObjectType string\n\tMetrics    []MetricDef\n\tMor        []types.ManagedObjectReference\n}\n\n// EntityQuery are informations to query about an entity\ntype EntityQuery struct {\n\tName    string\n\tEntity  types.ManagedObjectReference\n\tMetrics []int32\n}\n\nvar getversion, debug, test bool\nvar stdlog, errlog *log.Logger\nvar version = \"master\"\n\n// Connect to the actual vCenter connection used to query data\nfunc (vcenter *VCenter) Connect() error {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tstdlog.Println(\"Connecting to vcenter:\", vcenter.Hostname)\n\tu, err := url.Parse(\"https://\" + vcenter.Hostname + \"/sdk\")\n\tu.User = url.UserPassword(vcenter.Username, vcenter.Password)\n\tif err != nil {\n\t\terrlog.Println(\"Could not parse vcenter url:\", vcenter.Hostname)\n\t\terrlog.Println(\"Error:\", err)\n\t\treturn err\n\t}\n\n\tclient, err := govmomi.NewClient(ctx, u, true)\n\tif err != nil {\n\t\terrlog.Println(\"Could not connect to vcenter:\", vcenter.Hostname)\n\t\terrlog.Println(\"Error:\", err)\n\t\treturn err\n\t}\n\n\tvcenter.client = client\n\n\treturn nil\n}\n\n// Disconnect from the vCenter\nfunc (vcenter *VCenter) Disconnect() error {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tif vcenter.client != nil {\n\t\tif err := vcenter.client.Logout(ctx); err != nil {\n\t\t\terrlog.Println(\"Could not disconnect properly from vcenter:\", vcenter.Hostname, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// Init the VCenter connection\nfunc (vcenter *VCenter) Init(config Configuration) error {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tclient := vcenter.client\n\n\t// Print version\n\tif debug {\n\t\taboutInfo := client.Client.ServiceContent.About\n\t\tstdlog.Println(\"Version:\", aboutInfo.FullName)\n\t}\n\n\tvar perfmanager mo.PerformanceManager\n\terr := client.RetrieveOne(ctx, *client.ServiceContent.PerfManager, nil, &perfmanager)\n\tif err != nil {\n\t\terrlog.Println(\"Could not get performance manager\")\n\t\terrlog.Println(\"Error:\", err)\n\t\treturn err\n\t}\n\n\t// Print PerformanceManager interval collection level\n\tif debug {\n\t\tstdlog.Println(\"PerformanceManager interval collection level\")\n\t\tspew.Dump(perfmanager.HistoricalInterval)\n\t}\n\n\tfor _, perf := range perfmanager.PerfCounter {\n\t\tgroupinfo := perf.GroupInfo.GetElementDescription()\n\t\tnameinfo := perf.NameInfo.GetElementDescription()\n\t\tidentifier := groupinfo.Key + \".\" + nameinfo.Key + \".\" + fmt.Sprint(perf.RollupType)\n\t\tfor _, metric := range config.Metrics {\n\t\t\tfor _, metricdef := range metric.Definition {\n\t\t\t\tif metricdef.Metric == identifier {\n\t\t\t\t\tmetricd := MetricDef{Metric: metricdef.Metric, Instances: metricdef.Instances, Key: perf.Key}\n\t\t\t\t\tfor _, mtype := range metric.ObjectType {\n\t\t\t\t\t\tadded := false\n\t\t\t\t\t\tfor _, metricgroup := range vcenter.MetricGroups {\n\t\t\t\t\t\t\tif metricgroup.ObjectType == mtype {\n\t\t\t\t\t\t\t\tmetricgroup.Metrics = append(metricgroup.Metrics, metricd)\n\t\t\t\t\t\t\t\tadded = true\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif added == false {\n\t\t\t\t\t\t\tmetricgroup := MetricGroup{ObjectType: mtype, Metrics: []MetricDef{metricd}}\n\t\t\t\t\t\t\tvcenter.MetricGroups = append(vcenter.MetricGroups, &metricgroup)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n// Query a vcenter\nfunc (vcenter *VCenter) Query(config Configuration, InfluxDBClient influxclient.Client, nowTime time.Time) {\n\tstdlog.Println(\"Setting up query inventory of vcenter:\", vcenter.Hostname)\n\n\t// Create the contect\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\t// Get the client\n\tclient := vcenter.client\n\n\t// Create the view manager\n\tvar viewManager mo.ViewManager\n\terr := client.RetrieveOne(ctx, *client.ServiceContent.ViewManager, nil, &viewManager)\n\tif err != nil {\n\t\terrlog.Println(\"Could not get view manager from vcenter:\", vcenter.Hostname)\n\t\terrlog.Println(\"Error: \", err)\n\t\treturn\n\t}\n\n\t// Get the Datacenters from root folder\n\tvar rootFolder mo.Folder\n\terr = client.RetrieveOne(ctx, client.ServiceContent.RootFolder, nil, &rootFolder)\n\tif err != nil {\n\t\terrlog.Println(\"Could not get root folder from vcenter:\", vcenter.Hostname)\n\t\terrlog.Println(\"Error:\", err)\n\t\treturn\n\t}\n\n\tdatacenters := []types.ManagedObjectReference{}\n\tfor _, child := range rootFolder.ChildEntity {\n\t\t//if child.Type == \"Datacenter\" {\n\t\tdatacenters = append(datacenters, child)\n\t\t//}\n\t}\n\t// Get intresting object types from specified queries\n\tobjectTypes := []string{}\n\tfor _, group := range vcenter.MetricGroups {\n\t\tobjectTypes = append(objectTypes, group.ObjectType)\n\t}\n\tobjectTypes = append(objectTypes, \"ClusterComputeResource\")\n\tobjectTypes = append(objectTypes, \"ResourcePool\")\n\tobjectTypes = append(objectTypes, \"Datastore\")\n\n\t// Loop trought datacenters and create the intersting object reference list\n\tmors := []types.ManagedObjectReference{}\n\tfor _, datacenter := range datacenters {\n\t\t// Create the CreateContentView request\n\t\treq := types.CreateContainerView{This: viewManager.Reference(), Container: datacenter, Type: objectTypes, Recursive: true}\n\t\tres, err := methods.CreateContainerView(ctx, client.RoundTripper, &req)\n\t\tif err != nil {\n\t\t\terrlog.Println(\"Could not create container view from vcenter:\", vcenter.Hostname)\n\t\t\terrlog.Println(\"Error:\", err)\n\t\t\tcontinue\n\t\t}\n\t\t// Retrieve the created ContentView\n\t\tvar containerView mo.ContainerView\n\t\terr = client.RetrieveOne(ctx, res.Returnval, nil, &containerView)\n\t\tif err != nil {\n\t\t\terrlog.Println(\"Could not get container view from vcenter:\", vcenter.Hostname)\n\t\t\terrlog.Println(\"Error:\", err)\n\t\t\tcontinue\n\t\t}\n\t\t// Add found object to object list\n\t\tmors = append(mors, containerView.View...)\n\t}\n\n\t// Create MORS for each object type\n\tvmRefs := []types.ManagedObjectReference{}\n\thostRefs := []types.ManagedObjectReference{}\n\tclusterRefs := []types.ManagedObjectReference{}\n\trespoolRefs := []types.ManagedObjectReference{}\n\tdatastoreRefs := []types.ManagedObjectReference{}\n\n\tnewMors := []types.ManagedObjectReference{}\n\n\tif debug {\n\t\tspew.Dump(mors)\n\t}\n\t// Assign each MORS type to a specific array\n\tfor _, mor := range mors {\n\t\tif mor.Type == \"VirtualMachine\" {\n\t\t\tvmRefs = append(vmRefs, mor)\n\t\t\tnewMors = append(newMors, mor)\n\t\t} else if mor.Type == \"HostSystem\" {\n\t\t\thostRefs = append(hostRefs, mor)\n\t\t\tnewMors = append(newMors, mor)\n\t\t} else if mor.Type == \"ClusterComputeResource\" {\n\t\t\tclusterRefs = append(clusterRefs, mor)\n\t\t} else if mor.Type == \"ResourcePool\" {\n\t\t\trespoolRefs = append(respoolRefs, mor)\n\t\t} else if mor.Type == \"Datastore\" {\n\t\t\tdatastoreRefs = append(datastoreRefs, mor)\n\t\t}\n\t}\n\n\t// Copy the mors without the clusters\n\tmors = newMors\n\tpc := property.DefaultCollector(client.Client)\n\n\t// govmomi segfaults when the list objects to retrieve is empty, so check everything\n\n\t// Retrieve properties for all vms\n\tvar vmmo []mo.VirtualMachine\n\tif len(vmRefs) > 0 {\n\t\terr = pc.Retrieve(ctx, vmRefs, []string{\"summary\"}, &vmmo)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Retrieve properties for hosts\n\tvar hsmo []mo.HostSystem\n\tif len(hostRefs) > 0 {\n\t\terr = pc.Retrieve(ctx, hostRefs, []string{\"parent\", \"summary\"}, &hsmo)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t//Retrieve properties for Cluster(s)\n\tvar clmo []mo.ClusterComputeResource\n\tif len(clusterRefs) > 0 {\n\t\terr = pc.Retrieve(ctx, clusterRefs, []string{\"name\", \"configuration\", \"host\"}, &clmo)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t//Retrieve properties for ResourcePool\n\tvar rpmo []mo.ResourcePool\n\tif len(respoolRefs) > 0 {\n\t\terr = pc.Retrieve(ctx, respoolRefs, []string{\"summary\"}, &rpmo)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Retrieve summary property for all datastores\n\tvar dss []mo.Datastore\n\tif len(datastoreRefs) > 0 {\n\t\terr = pc.Retrieve(ctx, datastoreRefs, []string{\"summary\"}, &dss)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Initialize the map that will hold the VM MOR to ResourcePool reference\n\tvmToPool := make(map[types.ManagedObjectReference]string)\n\n\tvar respool []mo.ResourcePool\n\t// Retrieve properties for ResourcePools\n\tif len(respoolRefs) > 0 {\n\t\tif debug {\n\t\t\tstdlog.Println(\"Going inside ResourcePools\")\n\t\t}\n\t\terr = pc.Retrieve(ctx, respoolRefs, []string{\"name\", \"config\", \"vm\"}, &respool)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\t\tfor _, pool := range respool {\n\t\t\tif debug {\n\t\t\t\tstdlog.Println(\"---resourcepool name - you should see every resourcepool here (+VMs inside)----\")\n\t\t\t\tstdlog.Println(pool.Name)\n\t\t\t\tstdlog.Println(pool.Config.MemoryAllocation.GetResourceAllocationInfo().Limit)\n\t\t\t\tstdlog.Println(pool.Config.CpuAllocation.GetResourceAllocationInfo().Limit)\n\t\t\t}\n\t\t\tfor _, vm := range pool.Vm {\n\t\t\t\tif debug {\n\t\t\t\t\tstdlog.Println(\"--VM ID - you should see every VM ID here--\")\n\t\t\t\t\tstdlog.Println(vm)\n\t\t\t\t}\n\t\t\t\tvmToPool[vm] = pool.Name\n\t\t\t}\n\t\t}\n\t}\n\n\t// Initialize the map that will hold the VM MOR to cluster reference\n\tvmToCluster := make(map[types.ManagedObjectReference]string)\n\n\t// Initialize the map that will hold the host MOR to cluster reference\n\thostToCluster := make(map[types.ManagedObjectReference]string)\n\n\t// Initialize the map that will hold the vDisk UUID per VM MOR to datastore reference\n\t//\tvDiskToDatastore := make(map[types.ManagedObjectReference]map[string]string)\n\n\t// Retrieve properties for clusters, if any\n\tif len(clusterRefs) > 0 {\n\t\tif debug {\n\t\t\tstdlog.Println(\"Going inside clusters\")\n\t\t}\n\n\t\t// Step 1 : Get ObjectContents and Host info for VM\n\t\t//          The host is found under the runtime structure.\n\n\t\t// Step 2 : Step 2: Get the ManagedObjectReference from the Host we just got.\n\n\t\t// Step 3 : Get a list all the clusters that vCenter knows about, and for each one, also get the host\n\n\t\t// Step 4 : Loop through all clusters that exist (which we got in step 3), and loop through each host\n\t\t//          and see if that host matches the host we got in step 2 as the host of the vm.\n\t\t//          If we find it, return it, otherwise we return null.\n\n\t\tfor _, vm := range vmmo {\n\t\t\t// check if VM is a clone in progress and skip it\n\t\t\tif vm.Summary.Runtime.Host == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvmhost := vm.Summary.Runtime.Host\n\n\t\t\tfor _, cl := range clmo {\n\t\t\t\tfor _, host := range cl.Host {\n\t\t\t\t\thostToCluster[host] = cl.Name\n\n\t\t\t\t\tif *vmhost == host {\n\t\t\t\t\t\tvmToCluster[vm.Self] = cl.Name\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// Retrieve properties for the pools\n\trespoolSummary := make(map[types.ManagedObjectReference]map[string]string)\n\tfor _, pools := range rpmo {\n\t\trespoolSummary[pools.Self] = make(map[string]string)\n\t\trespoolSummary[pools.Self][\"name\"] = pools.Summary.GetResourcePoolSummary().Name\n\t}\n\n\t// Initialize the maps that will hold the extra tags and metrics for VMs\n\thostSummary := make(map[types.ManagedObjectReference]map[string]string)\n\thostExtraMetrics := make(map[types.ManagedObjectReference]map[string]int64)\n\n\tfor _, host := range hsmo {\n\n\t\t// Extra tags per host\n\t\thostSummary[host.Self] = make(map[string]string)\n\t\thostSummary[host.Self][\"name\"] = host.Summary.Config.Name\n\t\t// Remove Domain Name from Host\n\t\tif config.RemoveHostDomainName {\n\t\t\thostSummary[host.Self][\"name\"] = strings.Replace(host.Summary.Config.Name, config.Domain, \"\", -1)\n\t\t}\n\t\thostSummary[host.Self][\"cluster\"] = hostToCluster[host.Self]\n\n\t\t// Extra metrics per host\n\t\thostExtraMetrics[host.Self] = make(map[string]int64)\n\t\thostExtraMetrics[host.Self][\"uptime\"] = int64(host.Summary.QuickStats.Uptime)\n\t\thostExtraMetrics[host.Self][\"cpu_corecount_total\"] = int64(host.Summary.Hardware.NumCpuThreads)\n\t}\n\n\t// Initialize the maps that will hold the extra tags and metrics for VMs\n\tvmSummary := make(map[types.ManagedObjectReference]map[string]string)\n\tvmExtraMetrics := make(map[types.ManagedObjectReference]map[string]int64)\n\n\t// Assign extra details per VM in vmSummary\n\tfor _, vm := range vmmo {\n\t\t// extra tags per VM\n\t\tvmSummary[vm.Self] = make(map[string]string)\n\t\t// Ugly way to extract datastore value\n\t\tre, err := regexp.Compile(`\\[(.*?)\\]`)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\tvmSummary[vm.Self][\"datastore\"] = strings.Replace(strings.Replace(re.FindString(fmt.Sprintln(vm.Summary.Config)), \"[\", \"\", -1), \"]\", \"\", -1)\n\n\t\t// List all devices to get vDisks\n\t\t//\t\tfor _, device := range vm.Config.Hardware.Device {\n\t\t//\t\t\t// Hacky way to check if it's a vDisk and if it's datastore is different than the main one for VM\n\t\t//\t\t\tif device.Backing.FileName != nil && device.Backing.Datastore.Name != vmSummary[vm.Self][\"datastore\"] {\n\t\t//\t\t\t\tif vDiskToDatastore[vm.Self] == nil {\n\t\t//\t\t\t\t\tvDiskToDatastore[vm.Self] = make(map[string]string)\n\t\t//\t\t\t\t}\n\t\t//\t\t\t\tvDiskToDatastore[vm.Self][device.diskObjectId] = device.Backing.Datastore.Name\n\t\t//\t\t\t}\n\t\t//\t\t}\n\n\t\tif vmToCluster[vm.Self] != \"\" {\n\t\t\tvmSummary[vm.Self][\"cluster\"] = vmToCluster[vm.Self]\n\t\t}\n\t\tif vmToPool[vm.Self] != \"\" {\n\t\t\tvmSummary[vm.Self][\"respool\"] = vmToPool[vm.Self]\n\t\t}\n\t\tif vm.Summary.Runtime.Host != nil {\n\t\t\tvmSummary[vm.Self][\"esx\"] = hostSummary[*vm.Summary.Runtime.Host][\"name\"]\n\t\t}\n\n\t\t// Extra metrics per VM\n\t\tvmExtraMetrics[vm.Self] = make(map[string]int64)\n\t\tvmExtraMetrics[vm.Self][\"uptime\"] = int64(vm.Summary.QuickStats.UptimeSeconds)\n\t}\n\t//\tfmt.Println(\"vDiskDatastore:\")\n\t//\tspew.Dump(vDiskToDatastore)\n\t// get object names\n\tobjects := []mo.ManagedEntity{}\n\n\t//object for propery collection\n\tpropSpec := &types.PropertySpec{Type: \"ManagedEntity\", PathSet: []string{\"name\"}}\n\tvar objectSet []types.ObjectSpec\n\tfor _, mor := range mors {\n\t\tobjectSet = append(objectSet, types.ObjectSpec{Obj: mor, Skip: types.NewBool(false)})\n\t}\n\n\t//retrieve name property\n\tpropreq := types.RetrieveProperties{SpecSet: []types.PropertyFilterSpec{{ObjectSet: objectSet, PropSet: []types.PropertySpec{*propSpec}}}}\n\tpropres, err := client.PropertyCollector().RetrieveProperties(ctx, propreq)\n\tif err != nil {\n\t\terrlog.Println(\"Could not retrieve object names from vcenter:\", vcenter.Hostname)\n\t\terrlog.Println(\"Error:\", err)\n\t\treturn\n\t}\n\n\t//load retrieved properties\n\terr = mo.LoadRetrievePropertiesResponse(propres, &objects)\n\tif err != nil {\n\t\terrlog.Println(\"Could not retrieve object names from vcenter:\", vcenter.Hostname)\n\t\terrlog.Println(\"Error:\", err)\n\t\treturn\n\t}\n\n\t//create a map to resolve object names\n\tmorToName := make(map[types.ManagedObjectReference]string)\n\tfor _, object := range objects {\n\t\tmorToName[object.Self] = object.Name\n\t}\n\n\t//create a map to resolve metric names\n\tmetricToName := make(map[int32]string)\n\tfor _, metricgroup := range vcenter.MetricGroups {\n\t\tfor _, metricdef := range metricgroup.Metrics {\n\t\t\tmetricToName[metricdef.Key] = metricdef.Metric\n\t\t}\n\t}\n\n\t// Create Queries from interesting objects and requested metrics\n\n\tqueries := []types.PerfQuerySpec{}\n\n\t// Common parameters\n\tintervalIDint := 20\n\tvar intervalID int32\n\tintervalID = int32(intervalIDint)\n\n\tendTime := time.Now().Add(time.Duration(-1) * time.Second)\n\tstartTime := endTime.Add(time.Duration(-config.Interval) * time.Second)\n\n\t// Parse objects\n\tfor _, mor := range mors {\n\t\tmetricIds := []types.PerfMetricId{}\n\t\tfor _, metricgroup := range vcenter.MetricGroups {\n\t\t\tif metricgroup.ObjectType == mor.Type {\n\t\t\t\tfor _, metricdef := range metricgroup.Metrics {\n\t\t\t\t\tmetricIds = append(metricIds, types.PerfMetricId{CounterId: metricdef.Key, Instance: metricdef.Instances})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tqueries = append(queries, types.PerfQuerySpec{Entity: mor, StartTime: &startTime, EndTime: &endTime, MetricId: metricIds, IntervalId: intervalID})\n\t}\n\n\t// Query the performances\n\tperfreq := types.QueryPerf{This: *client.ServiceContent.PerfManager, QuerySpec: queries}\n\tperfres, err := methods.QueryPerf(ctx, client.RoundTripper, &perfreq)\n\tif err != nil {\n\t\terrlog.Println(\"Could not request perfs from vcenter:\", vcenter.Hostname)\n\t\terrlog.Println(\"Error:\", err)\n\t\treturn\n\t}\n\n\t// Get the result\n\tvcName := strings.Replace(vcenter.Hostname, config.Domain, \"\", -1)\n\n\t//Influx batch points\n\tbp, err := influxclient.NewBatchPoints(influxclient.BatchPointsConfig{\n\t\tDatabase:  config.InfluxDB.Database,\n\t\tPrecision: \"s\",\n\t})\n\tif err != nil {\n\t\terrlog.Println(err)\n\t\treturn\n\t}\n\n\tfor _, base := range perfres.Returnval {\n\t\tpem := base.(*types.PerfEntityMetric)\n\t\tentityName := strings.ToLower(pem.Entity.Type)\n\t\tname := strings.ToLower(strings.Replace(morToName[pem.Entity], config.Domain, \"\", -1))\n\n\t\t//Create map for InfluxDB fields\n\t\tfields := make(map[string]interface{})\n\n\t\t// Create map for InfluxDB tags\n\t\ttags := map[string]string{\"host\": vcName, \"name\": name}\n\n\t\t// Add extra per VM tags\n\t\tif summary, ok := vmSummary[pem.Entity]; ok {\n\t\t\tfor key, tag := range summary {\n\t\t\t\ttags[key] = tag\n\t\t\t}\n\t\t}\n\t\tif summary, ok := hostSummary[pem.Entity]; ok {\n\t\t\tfor key, tag := range summary {\n\t\t\t\ttags[key] = tag\n\t\t\t}\n\t\t}\n\n\t\tif summary, ok := respoolSummary[pem.Entity]; ok {\n\t\t\tfor key, tag := range summary {\n\t\t\t\ttags[key] = tag\n\t\t\t}\n\t\t}\n\n\t\tspecialFields := make(map[string]map[string]map[string]map[string]interface{})\n\t\tspecialTags := make(map[string]map[string]map[string]map[string]string)\n\t\tnowTime := time.Now()\n\t\tfor _, baseserie := range pem.Value {\n\t\t\tserie := baseserie.(*types.PerfMetricIntSeries)\n\t\t\tmetricName := strings.ToLower(metricToName[serie.Id.CounterId])\n\t\t\tinfluxMetricName := strings.Replace(metricName, \".\", \"_\", -1)\n\t\t\tinstanceName := strings.ToLower(strings.Replace(serie.Id.Instance, \".\", \"_\", -1))\n\t\t\tmeasurementName := strings.Split(metricName, \".\")[0]\n\n\t\t\tif strings.Index(influxMetricName, \"datastore\") != -1 {\n\t\t\t\tinstanceName = \"\"\n\t\t\t}\n\n\t\t\tvar value int64 = -1\n\t\t\tif strings.HasSuffix(metricName, \".average\") {\n\t\t\t\tvalue = average(serie.Value...)\n\t\t\t} else if strings.HasSuffix(metricName, \".maximum\") {\n\t\t\t\tvalue = max(serie.Value...)\n\t\t\t} else if strings.HasSuffix(metricName, \".minimum\") {\n\t\t\t\tvalue = min(serie.Value...)\n\t\t\t} else if strings.HasSuffix(metricName, \".latest\") {\n\t\t\t\tvalue = serie.Value[len(serie.Value)-1]\n\t\t\t} else if strings.HasSuffix(metricName, \".summation\") {\n\t\t\t\tvalue = sum(serie.Value...)\n\t\t\t}\n\n\t\t\tif instanceName == \"\" {\n\t\t\t\tfields[influxMetricName] = value\n\t\t\t} else {\n\t\t\t\t// init maps\n\t\t\t\tif specialFields[measurementName] == nil {\n\t\t\t\t\tspecialFields[measurementName] = make(map[string]map[string]map[string]interface{})\n\t\t\t\t\tspecialTags[measurementName] = make(map[string]map[string]map[string]string)\n\n\t\t\t\t}\n\n\t\t\t\tif specialFields[measurementName][tags[\"name\"]] == nil {\n\t\t\t\t\tspecialFields[measurementName][tags[\"name\"]] = make(map[string]map[string]interface{})\n\t\t\t\t\tspecialTags[measurementName][tags[\"name\"]] = make(map[string]map[string]string)\n\t\t\t\t}\n\n\t\t\t\tif specialFields[measurementName][tags[\"name\"]][instanceName] == nil {\n\t\t\t\t\tspecialFields[measurementName][tags[\"name\"]][instanceName] = make(map[string]interface{})\n\t\t\t\t\tspecialTags[measurementName][tags[\"name\"]][instanceName] = make(map[string]string)\n\n\t\t\t\t}\n\n\t\t\t\tspecialFields[measurementName][tags[\"name\"]][instanceName][influxMetricName] = value\n\n\t\t\t\tfor k, v := range tags {\n\t\t\t\t\tspecialTags[measurementName][tags[\"name\"]][instanceName][k] = v\n\t\t\t\t}\n\t\t\t\tspecialTags[measurementName][tags[\"name\"]][instanceName][\"instance\"] = instanceName\n\t\t\t}\n\t\t}\n\t\t// Create the fields for the hostExtraMetrics\n\t\tif metrics, ok := hostExtraMetrics[pem.Entity]; ok {\n\t\t\tfor key, value := range metrics {\n\t\t\t\tfields[key] = value\n\t\t\t}\n\t\t}\n\t\t// Create the fields for the vmExtraMetrics\n\t\tif metrics, ok := vmExtraMetrics[pem.Entity]; ok {\n\t\t\tfor key, value := range metrics {\n\t\t\t\tfields[key] = value\n\t\t\t}\n\t\t}\n\n\t\t//create InfluxDB points\n\t\tpt, err := influxclient.NewPoint(config.InfluxDB.Prefix+entityName, tags, fields, nowTime)\n\t\tif err != nil {\n\t\t\terrlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tbp.AddPoint(pt)\n\n\t\tfor measurement, v := range specialFields {\n\t\t\tfor name, metric := range v {\n\t\t\t\tfor instance, value := range metric {\n\t\t\t\t\tpt2, err := influxclient.NewPoint(config.InfluxDB.Prefix+measurement, specialTags[measurement][name][instance], value, time.Now())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\terrlog.Println(err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tbp.AddPoint(pt2)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, pool := range respool {\n\t\t\trespoolFields := map[string]interface{}{\n\t\t\t\t\"cpu_limit\":    pool.Config.CpuAllocation.GetResourceAllocationInfo().Limit,\n\t\t\t\t\"memory_limit\": pool.Config.MemoryAllocation.GetResourceAllocationInfo().Limit,\n\t\t\t}\n\t\t\trespoolTags := map[string]string{\"pool_name\": pool.Name}\n\t\t\tpt3, err := influxclient.NewPoint(config.InfluxDB.Prefix+\"resourcepool\", respoolTags, respoolFields, time.Now())\n\t\t\tif err != nil {\n\t\t\t\terrlog.Println(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbp.AddPoint(pt3)\n\t\t}\n\n\t\tfor _, datastore := range dss {\n\t\t\tdatastoreFields := map[string]interface{}{\n\t\t\t\t\"capacity\":   datastore.Summary.Capacity,\n\t\t\t\t\"free_space\": datastore.Summary.FreeSpace,\n\t\t\t\t\"usage\":      1.0 - (float64(datastore.Summary.FreeSpace) / float64(datastore.Summary.Capacity)),\n\t\t\t}\n\t\t\tdatastoreTags := map[string]string{\"ds_name\": datastore.Summary.Name, \"host\": vcName}\n\t\t\tpt4, err := influxclient.NewPoint(config.InfluxDB.Prefix+\"datastore\", datastoreTags, datastoreFields, time.Now())\n\t\t\tif err != nil {\n\t\t\t\terrlog.Println(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbp.AddPoint(pt4)\n\t\t}\n\n\t}\n\n\t//InfluxDB send if not in test mode\n\tif test != true {\n\t\terr = InfluxDBClient.Write(bp)\n\t\tif err != nil {\n\t\t\terrlog.Println(err)\n\t\t\treturn\n\t\t}\n\t\tstdlog.Println(\"Sent data to Influxdb from:\", vcenter.Hostname)\n\t} else {\n\t\tspew.Dump(bp)\n\t}\n}\n\nfunc min(n ...int64) int64 {\n\tvar min int64 = -1\n\tfor _, i := range n {\n\t\tif i >= 0 {\n\t\t\tif min == -1 {\n\t\t\t\tmin = i\n\t\t\t} else {\n\t\t\t\tif i < min {\n\t\t\t\t\tmin = i\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn min\n}\nfunc max(n ...int64) int64 {\n\tvar max int64 = -1\n\tfor _, i := range n {\n\t\tif i >= 0 {\n\t\t\tif max == -1 {\n\t\t\t\tmax = i\n\t\t\t} else {\n\t\t\t\tif i > max {\n\t\t\t\t\tmax = i\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn max\n}\n\nfunc sum(n ...int64) int64 {\n\tvar total int64\n\tfor _, i := range n {\n\t\tif i > 0 {\n\t\t\ttotal += i\n\t\t}\n\t}\n\treturn total\n}\n\nfunc average(n ...int64) int64 {\n\tvar total int64\n\tvar count int64\n\tfor _, i := range n {\n\t\tif i >= 0 {\n\t\t\tcount++\n\t\t\ttotal += i\n\t\t}\n\t}\n\tfavg := float64(total) / float64(count)\n\treturn int64(math.Floor(favg + .5))\n}\n\nfunc worker(id int, config Configuration, influxDBClient influxclient.Client, nowTime time.Time, vcenters <-chan *VCenter, results chan<- bool) {\n\tfor vcenter := range vcenters {\n\t\tif debug {\n\t\t\tstdlog.Println(\"Worker\", id, \"received vcenter\", vcenter.Hostname)\n\t\t}\n\n\t\tif err := vcenter.Connect(); err != nil {\n\t\t\terrlog.Println(\"Could not initialize connection to vcenter\", vcenter.Hostname, err)\n\t\t\tresults <- true\n\t\t\tcontinue\n\t\t}\n\t\tif err := vcenter.Init(config); err == nil {\n\t\t\tvcenter.Query(config, influxDBClient, nowTime)\n\t\t}\n\n\t\tvcenter.Disconnect()\n\t\tresults <- true\n\t}\n}\n\nfunc main() {\n\tbaseName := path.Base(os.Args[0])\n\tstdlog = log.New(os.Stdout, \"\", log.Ldate|log.Ltime)\n\terrlog = log.New(os.Stderr, \"\", log.Ldate|log.Ltime)\n\n\tflag.BoolVar(&debug, \"debug\", false, \"Debug mode\")\n\tflag.BoolVar(&test, \"test\", false, \"Test mode, data will be collected from vCenters, but nothing will be written to InfluxDB, only printed to stdout\")\n\tflag.BoolVar(&getversion, \"version\", false, \"Get version and exit\")\n\tworkerCount := flag.Int(\"workers\", 4, \"Number of concurrent workers to query vcenters\")\n\tcfgFile := flag.String(\"config\", \"/etc/\"+baseName+\".json\", \"Config file to use\")\n\tflag.Parse()\n\n\tif getversion {\n\t\tfmt.Println(\"Version:\", version)\n\t\tos.Exit(0)\n\t}\n\n\tstdlog.Println(\"Starting\", baseName, \"with config file\", *cfgFile)\n\n\t// read the configuration\n\tfile, err := os.Open(*cfgFile)\n\tif err != nil {\n\t\terrlog.Println(\"Could not open configuration file\", *cfgFile)\n\t\terrlog.Fatalln(err)\n\t}\n\n\tjsondec := json.NewDecoder(file)\n\tconfig := Configuration{}\n\terr = jsondec.Decode(&config)\n\tif err != nil {\n\t\terrlog.Println(\"Could not decode configuration file\", *cfgFile)\n\t\terrlog.Fatalln(err)\n\t}\n\n\t// Support environemt variables / overrides for Influx Connection\n\tif ihostname := os.Getenv(\"INFLUX_HOSTNAME\"); ihostname != \"\" {\n\t\tconfig.InfluxDB.Hostname = os.Getenv(\"INFLUX_HOSTNAME\")\n\t\tconfig.InfluxDB.Username = os.Getenv(\"INFLUX_USERNAME\")\n\t\tconfig.InfluxDB.Password = os.Getenv(\"INFLUX_PASSWORD\")\n\t\tconfig.InfluxDB.Database = os.Getenv(\"INFLUX_DATABASE\")\n\t}\n\t// Support environment variables for VSphere\n\t// Currently ony one server is supported and added to the list of vSphere servers\n\tif vhostname := os.Getenv(\"VSPHERE_HOSTNAME\"); vhostname != \"\" {\n\t\tvc := VCenter{\n\t\t\tHostname: os.Getenv(\"VSPHERE_HOSTNAME\"),\n\t\t\tUsername: os.Getenv(\"VSPHERE_USERNAME\"),\n\t\t\tPassword: os.Getenv(\"VSPHERE_PASSWORD\"),\n\t\t}\n\t\tconfig.VCenters = append(config.VCenters, &vc)\n\t}\n\n\t// Print configuration in debug mode\n\tif debug {\n\t\tstdlog.Println(\"---Configuration - you should see the config here---\")\n\t\tspew.Dump(config)\n\t}\n\n\t// Initialize InfluxDB and connect to database\n\tInfluxDBClient, err := influxclient.NewHTTPClient(influxclient.HTTPConfig{\n\t\tAddr:     config.InfluxDB.Hostname,\n\t\tUsername: config.InfluxDB.Username,\n\t\tPassword: config.InfluxDB.Password,\n\t})\n\tif err != nil {\n\t\terrlog.Println(\"Could not initialize InfluxDB client\")\n\t\terrlog.Fatalln(err)\n\t}\n\n\tif _, _, err := InfluxDBClient.Ping(0); err != nil {\n\t\terrlog.Println(\"Could not connect to InfluxDB\")\n\t\terrlog.Fatalln(err)\n\t}\n\n\tdefer InfluxDBClient.Close()\n\n\tstdlog.Println(\"Successfully connected to Influx\")\n\n\t// make the channels, get the time, launch the goroutines\n\tvcenterCount := len(config.VCenters)\n\tvcenters := make(chan *VCenter, vcenterCount)\n\tresults := make(chan bool, vcenterCount)\n\tnowTime := time.Now()\n\n\tfor i := 0; i < *workerCount; i++ {\n\t\tgo worker(i, config, InfluxDBClient, nowTime, vcenters, results)\n\t}\n\n\tfor _, vcenter := range config.VCenters {\n\t\tvcenters <- vcenter\n\t}\n\tclose(vcenters)\n\n\tfor i := 0; i < vcenterCount; i++ {\n\t\t<-results\n\t}\n\n}\n"
  },
  {
    "path": "vsphere-influxdb.json.sample",
    "content": "{\n  \"Domain\": \".lab\",\n  \"RemoveHostDomainName\": false,\n  \"Interval\": 60,\n  \"VCenters\": [\n    { \"Username\": \"monitoring\", \"Password\": \"monixx\", \"Hostname\": \"vcenter-01.dc-01.lab\" },\n    { \"Username\": \"monitoring\", \"Password\": \"monixx\", \"Hostname\": \"vcenter-01.dc-02.lab\" },\n    { \"Username\": \"monitoring\", \"Password\": \"monixx\", \"Hostname\": \"vcenter-02.dc-02.lab\" },\n    { \"Username\": \"monitoring\", \"Password\": \"monixx\", \"Hostname\": \"vcenter-01.home.lab\" }\n  ],\n\n  \"InfluxDB\": {\n    \"Prefix\": \"vsphere_\",\n    \"Hostname\": \"http://influxdb-01.dc-01.lab:8086\",\n    \"Username\": \"vm\",\n    \"Password\": \"vmware334\",\n    \"Database\": \"vmware_performance\"\n  },\n\n  \"Metrics\": [\n    { \n      \"ObjectType\": [ \"VirtualMachine\", \"HostSystem\" ], \n      \"Definition\": [\n        { \"Metric\": \"cpu.usage.average\", \"Instances\": \"*\" },\n        { \"Metric\": \"cpu.usage.maximum\", \"Instances\": \"*\" },\n        { \"Metric\": \"cpu.usagemhz.average\", \"Instances\": \"*\" },\n        { \"Metric\": \"cpu.usagemhz.maximum\", \"Instances\": \"*\" },\n        { \"Metric\": \"cpu.wait.summation\", \"Instances\": \"*\" },\n        { \"Metric\": \"cpu.system.summation\", \"Instances\": \"*\" },\n        { \"Metric\": \"cpu.ready.summation\", \"Instances\": \"*\" },\n        { \"Metric\": \"mem.usage.average\", \"Instances\": \"*\" },\n        { \"Metric\": \"mem.usage.maximum\", \"Instances\": \"*\" },\n        { \"Metric\": \"mem.consumed.average\", \"Instances\": \"*\" },\n        { \"Metric\": \"mem.consumed.maximum\", \"Instances\": \"*\" },\n        { \"Metric\": \"mem.active.average\", \"Instances\": \"*\" },\n        { \"Metric\": \"mem.active.maximum\", \"Instances\": \"*\" },\n        { \"Metric\": \"mem.vmmemctl.average\", \"Instances\": \"*\" },\n        { \"Metric\": \"mem.vmmemctl.maximum\", \"Instances\": \"*\" },\n        { \"Metric\": \"mem.totalCapacity.average\", \"Instances\": \"*\" },\n        { \"Metric\": \"net.packetsRx.summation\", \"Instances\": \"*\" },\n        { \"Metric\": \"net.packetsTx.summation\", \"Instances\": \"*\" },\n        { \"Metric\": \"net.throughput.usage.average\", \"Instances\": \"*\" },\n        { \"Metric\": \"net.received.average\", \"Instances\": \"*\" },\n        { \"Metric\": \"net.transmitted.average\", \"Instances\": \"*\" },\n        { \"Metric\": \"net.throughput.usage.nfs.average\", \"Instances\": \"*\" },\n        { \"Metric\": \"datastore.numberReadAveraged.average\", \"Instances\": \"*\" },\n        { \"Metric\": \"datastore.numberWriteAveraged.average\", \"Instances\": \"*\" },\n        { \"Metric\": \"datastore.read.average\", \"Instances\": \"*\" },\n        { \"Metric\": \"datastore.write.average\", \"Instances\": \"*\" },\n        { \"Metric\": \"datastore.totalReadLatency.average\", \"Instances\": \"*\" },\n        { \"Metric\": \"datastore.totalWriteLatency.average\", \"Instances\": \"*\" },\n        { \"Metric\": \"mem.capacity.provisioned.average\", \"Instances\": \"*\"},\n        { \"Metric\": \"cpu.corecount.provisioned.average\", \"Instances\": \"*\" }\n      ]\n    },\n\n    { \n      \"ObjectType\": [ \"VirtualMachine\" ], \n      \"Definition\": [\n      { \"Metric\": \"datastore.datastoreVMObservedLatency.latest\", \"Instances\": \"*\" }\n      ]\n    },\n\n    { \n      \"ObjectType\": [ \"HostSystem\" ], \n      \"Definition\": [\n        { \"Metric\": \"disk.maxTotalLatency.latest\", \"Instances\": \"\" },\n        { \"Metric\": \"disk.numberReadAveraged.average\", \"Instances\": \"*\" },\n        { \"Metric\": \"disk.numberWriteAveraged.average\", \"Instances\": \"*\" },\n        { \"Metric\": \"net.throughput.contention.summation\", \"Instances\": \"*\" }\n      ]\n    },\n\n    {\n      \"ObjectType\": [ \"Datastore\" ],\n      \"Definition\": [\n        { \"Metric\": \"disk.capacity.latest\", \"Instances\": \"*\" },\n        { \"Metric\": \"disk.used.latest\", \"Instances\": \"*\" }\n      ]\n    }\n  ]\n}\n"
  }
]